xref: /openbmc/linux/drivers/gpu/drm/radeon/radeon_device.c (revision ecc23d0a422a3118fcf6e4f0a46e17a6c2047b02)
1771fe6b9SJerome Glisse /*
2771fe6b9SJerome Glisse  * Copyright 2008 Advanced Micro Devices, Inc.
3771fe6b9SJerome Glisse  * Copyright 2008 Red Hat Inc.
4771fe6b9SJerome Glisse  * Copyright 2009 Jerome Glisse.
5771fe6b9SJerome Glisse  *
6771fe6b9SJerome Glisse  * Permission is hereby granted, free of charge, to any person obtaining a
7771fe6b9SJerome Glisse  * copy of this software and associated documentation files (the "Software"),
8771fe6b9SJerome Glisse  * to deal in the Software without restriction, including without limitation
9771fe6b9SJerome Glisse  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10771fe6b9SJerome Glisse  * and/or sell copies of the Software, and to permit persons to whom the
11771fe6b9SJerome Glisse  * Software is furnished to do so, subject to the following conditions:
12771fe6b9SJerome Glisse  *
13771fe6b9SJerome Glisse  * The above copyright notice and this permission notice shall be included in
14771fe6b9SJerome Glisse  * all copies or substantial portions of the Software.
15771fe6b9SJerome Glisse  *
16771fe6b9SJerome Glisse  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17771fe6b9SJerome Glisse  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18771fe6b9SJerome Glisse  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19771fe6b9SJerome Glisse  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20771fe6b9SJerome Glisse  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21771fe6b9SJerome Glisse  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22771fe6b9SJerome Glisse  * OTHER DEALINGS IN THE SOFTWARE.
23771fe6b9SJerome Glisse  *
24771fe6b9SJerome Glisse  * Authors: Dave Airlie
25771fe6b9SJerome Glisse  *          Alex Deucher
26771fe6b9SJerome Glisse  *          Jerome Glisse
27771fe6b9SJerome Glisse  */
28f9183127SSam Ravnborg 
29771fe6b9SJerome Glisse #include <linux/console.h>
30bcc65fd8SMatthew Garrett #include <linux/efi.h>
312ef79416SThomas Zimmermann #include <linux/pci.h>
32f9183127SSam Ravnborg #include <linux/pm_runtime.h>
33f9183127SSam Ravnborg #include <linux/slab.h>
34f9183127SSam Ravnborg #include <linux/vga_switcheroo.h>
35f9183127SSam Ravnborg #include <linux/vgaarb.h>
36f9183127SSam Ravnborg 
37f9183127SSam Ravnborg #include <drm/drm_cache.h>
38f9183127SSam Ravnborg #include <drm/drm_crtc_helper.h>
39f9183127SSam Ravnborg #include <drm/drm_device.h>
40f9183127SSam Ravnborg #include <drm/drm_file.h>
41720cf96dSVille Syrjälä #include <drm/drm_framebuffer.h>
42f9183127SSam Ravnborg #include <drm/drm_probe_helper.h>
43f9183127SSam Ravnborg #include <drm/radeon_drm.h>
44f9183127SSam Ravnborg 
452aa3b7c8SLee Jones #include "radeon_device.h"
46771fe6b9SJerome Glisse #include "radeon_reg.h"
47771fe6b9SJerome Glisse #include "radeon.h"
48771fe6b9SJerome Glisse #include "atom.h"
49771fe6b9SJerome Glisse 
501b5331d9SJerome Glisse static const char radeon_family_name[][16] = {
511b5331d9SJerome Glisse 	"R100",
521b5331d9SJerome Glisse 	"RV100",
531b5331d9SJerome Glisse 	"RS100",
541b5331d9SJerome Glisse 	"RV200",
551b5331d9SJerome Glisse 	"RS200",
561b5331d9SJerome Glisse 	"R200",
571b5331d9SJerome Glisse 	"RV250",
581b5331d9SJerome Glisse 	"RS300",
591b5331d9SJerome Glisse 	"RV280",
601b5331d9SJerome Glisse 	"R300",
611b5331d9SJerome Glisse 	"R350",
621b5331d9SJerome Glisse 	"RV350",
631b5331d9SJerome Glisse 	"RV380",
641b5331d9SJerome Glisse 	"R420",
651b5331d9SJerome Glisse 	"R423",
661b5331d9SJerome Glisse 	"RV410",
671b5331d9SJerome Glisse 	"RS400",
681b5331d9SJerome Glisse 	"RS480",
691b5331d9SJerome Glisse 	"RS600",
701b5331d9SJerome Glisse 	"RS690",
711b5331d9SJerome Glisse 	"RS740",
721b5331d9SJerome Glisse 	"RV515",
731b5331d9SJerome Glisse 	"R520",
741b5331d9SJerome Glisse 	"RV530",
751b5331d9SJerome Glisse 	"RV560",
761b5331d9SJerome Glisse 	"RV570",
771b5331d9SJerome Glisse 	"R580",
781b5331d9SJerome Glisse 	"R600",
791b5331d9SJerome Glisse 	"RV610",
801b5331d9SJerome Glisse 	"RV630",
811b5331d9SJerome Glisse 	"RV670",
821b5331d9SJerome Glisse 	"RV620",
831b5331d9SJerome Glisse 	"RV635",
841b5331d9SJerome Glisse 	"RS780",
851b5331d9SJerome Glisse 	"RS880",
861b5331d9SJerome Glisse 	"RV770",
871b5331d9SJerome Glisse 	"RV730",
881b5331d9SJerome Glisse 	"RV710",
891b5331d9SJerome Glisse 	"RV740",
901b5331d9SJerome Glisse 	"CEDAR",
911b5331d9SJerome Glisse 	"REDWOOD",
921b5331d9SJerome Glisse 	"JUNIPER",
931b5331d9SJerome Glisse 	"CYPRESS",
941b5331d9SJerome Glisse 	"HEMLOCK",
95b08ebe7eSAlex Deucher 	"PALM",
964df64e65SAlex Deucher 	"SUMO",
974df64e65SAlex Deucher 	"SUMO2",
981fe18305SAlex Deucher 	"BARTS",
991fe18305SAlex Deucher 	"TURKS",
1001fe18305SAlex Deucher 	"CAICOS",
101b7cfc9feSAlex Deucher 	"CAYMAN",
1028848f759SAlex Deucher 	"ARUBA",
103cb28bb34SAlex Deucher 	"TAHITI",
104cb28bb34SAlex Deucher 	"PITCAIRN",
105cb28bb34SAlex Deucher 	"VERDE",
106624d3524SAlex Deucher 	"OLAND",
107b5d9d726SAlex Deucher 	"HAINAN",
1086eac752eSAlex Deucher 	"BONAIRE",
1096eac752eSAlex Deucher 	"KAVERI",
1106eac752eSAlex Deucher 	"KABINI",
1113bf599e8SAlex Deucher 	"HAWAII",
112b0a9f22aSSamuel Li 	"MULLINS",
1131b5331d9SJerome Glisse 	"LAST",
1141b5331d9SJerome Glisse };
1151b5331d9SJerome Glisse 
116066f1f0bSAlex Deucher #if defined(CONFIG_VGA_SWITCHEROO)
117066f1f0bSAlex Deucher bool radeon_has_atpx_dgpu_power_cntl(void);
118066f1f0bSAlex Deucher bool radeon_is_atpx_hybrid(void);
119066f1f0bSAlex Deucher #else
radeon_has_atpx_dgpu_power_cntl(void)120066f1f0bSAlex Deucher static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
radeon_is_atpx_hybrid(void)121066f1f0bSAlex Deucher static inline bool radeon_is_atpx_hybrid(void) { return false; }
122066f1f0bSAlex Deucher #endif
123066f1f0bSAlex Deucher 
1244807c5a8SAlex Deucher #define RADEON_PX_QUIRK_DISABLE_PX  (1 << 0)
1254807c5a8SAlex Deucher 
1264807c5a8SAlex Deucher struct radeon_px_quirk {
1274807c5a8SAlex Deucher 	u32 chip_vendor;
1284807c5a8SAlex Deucher 	u32 chip_device;
1294807c5a8SAlex Deucher 	u32 subsys_vendor;
1304807c5a8SAlex Deucher 	u32 subsys_device;
1314807c5a8SAlex Deucher 	u32 px_quirk_flags;
1324807c5a8SAlex Deucher };
1334807c5a8SAlex Deucher 
1344807c5a8SAlex Deucher static struct radeon_px_quirk radeon_px_quirk_list[] = {
1354807c5a8SAlex Deucher 	/* Acer aspire 5560g (CPU: AMD A4-3305M; GPU: AMD Radeon HD 6480g + 7470m)
1364807c5a8SAlex Deucher 	 * https://bugzilla.kernel.org/show_bug.cgi?id=74551
1374807c5a8SAlex Deucher 	 */
1384807c5a8SAlex Deucher 	{ PCI_VENDOR_ID_ATI, 0x6760, 0x1025, 0x0672, RADEON_PX_QUIRK_DISABLE_PX },
1394807c5a8SAlex Deucher 	/* Asus K73TA laptop with AMD A6-3400M APU and Radeon 6550 GPU
1404807c5a8SAlex Deucher 	 * https://bugzilla.kernel.org/show_bug.cgi?id=51381
1414807c5a8SAlex Deucher 	 */
1424807c5a8SAlex Deucher 	{ PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x108c, RADEON_PX_QUIRK_DISABLE_PX },
143ff1b1294SAlex Deucher 	/* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
144ff1b1294SAlex Deucher 	 * https://bugzilla.kernel.org/show_bug.cgi?id=51381
145ff1b1294SAlex Deucher 	 */
146ff1b1294SAlex Deucher 	{ PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
1474eb59793SAlex Deucher 	/* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
1484eb59793SAlex Deucher 	 * https://bugs.freedesktop.org/show_bug.cgi?id=101491
1494eb59793SAlex Deucher 	 */
1504eb59793SAlex Deucher 	{ PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
151eb40c86aSNico Sneck 	/* Asus K73TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
152eb40c86aSNico Sneck 	 * https://bugzilla.kernel.org/show_bug.cgi?id=51381#c52
153eb40c86aSNico Sneck 	 */
154eb40c86aSNico Sneck 	{ PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2123, RADEON_PX_QUIRK_DISABLE_PX },
1554807c5a8SAlex Deucher 	{ 0, 0, 0, 0, 0 },
1564807c5a8SAlex Deucher };
1574807c5a8SAlex Deucher 
radeon_is_px(struct drm_device * dev)15890c4cde9SAlex Deucher bool radeon_is_px(struct drm_device *dev)
15990c4cde9SAlex Deucher {
16090c4cde9SAlex Deucher 	struct radeon_device *rdev = dev->dev_private;
16190c4cde9SAlex Deucher 
16290c4cde9SAlex Deucher 	if (rdev->flags & RADEON_IS_PX)
16390c4cde9SAlex Deucher 		return true;
16490c4cde9SAlex Deucher 	return false;
16590c4cde9SAlex Deucher }
16610ebc0bcSDave Airlie 
radeon_device_handle_px_quirks(struct radeon_device * rdev)1674807c5a8SAlex Deucher static void radeon_device_handle_px_quirks(struct radeon_device *rdev)
1684807c5a8SAlex Deucher {
1694807c5a8SAlex Deucher 	struct radeon_px_quirk *p = radeon_px_quirk_list;
1704807c5a8SAlex Deucher 
1714807c5a8SAlex Deucher 	/* Apply PX quirks */
1724807c5a8SAlex Deucher 	while (p && p->chip_device != 0) {
1734807c5a8SAlex Deucher 		if (rdev->pdev->vendor == p->chip_vendor &&
1744807c5a8SAlex Deucher 		    rdev->pdev->device == p->chip_device &&
1754807c5a8SAlex Deucher 		    rdev->pdev->subsystem_vendor == p->subsys_vendor &&
1764807c5a8SAlex Deucher 		    rdev->pdev->subsystem_device == p->subsys_device) {
1774807c5a8SAlex Deucher 			rdev->px_quirk_flags = p->px_quirk_flags;
1784807c5a8SAlex Deucher 			break;
1794807c5a8SAlex Deucher 		}
1804807c5a8SAlex Deucher 		++p;
1814807c5a8SAlex Deucher 	}
1824807c5a8SAlex Deucher 
1834807c5a8SAlex Deucher 	if (rdev->px_quirk_flags & RADEON_PX_QUIRK_DISABLE_PX)
1844807c5a8SAlex Deucher 		rdev->flags &= ~RADEON_IS_PX;
185066f1f0bSAlex Deucher 
186066f1f0bSAlex Deucher 	/* disable PX is the system doesn't support dGPU power control or hybrid gfx */
187066f1f0bSAlex Deucher 	if (!radeon_is_atpx_hybrid() &&
188066f1f0bSAlex Deucher 	    !radeon_has_atpx_dgpu_power_cntl())
189066f1f0bSAlex Deucher 		rdev->flags &= ~RADEON_IS_PX;
1904807c5a8SAlex Deucher }
1914807c5a8SAlex Deucher 
1920c195119SAlex Deucher /**
1932e1b65f9SAlex Deucher  * radeon_program_register_sequence - program an array of registers.
1942e1b65f9SAlex Deucher  *
1952e1b65f9SAlex Deucher  * @rdev: radeon_device pointer
1962e1b65f9SAlex Deucher  * @registers: pointer to the register array
1972e1b65f9SAlex Deucher  * @array_size: size of the register array
1982e1b65f9SAlex Deucher  *
1992e1b65f9SAlex Deucher  * Programs an array or registers with and and or masks.
2002e1b65f9SAlex Deucher  * This is a helper for setting golden registers.
2012e1b65f9SAlex Deucher  */
radeon_program_register_sequence(struct radeon_device * rdev,const u32 * registers,const u32 array_size)2022e1b65f9SAlex Deucher void radeon_program_register_sequence(struct radeon_device *rdev,
2032e1b65f9SAlex Deucher 				      const u32 *registers,
2042e1b65f9SAlex Deucher 				      const u32 array_size)
2052e1b65f9SAlex Deucher {
2062e1b65f9SAlex Deucher 	u32 tmp, reg, and_mask, or_mask;
2072e1b65f9SAlex Deucher 	int i;
2082e1b65f9SAlex Deucher 
2092e1b65f9SAlex Deucher 	if (array_size % 3)
2102e1b65f9SAlex Deucher 		return;
2112e1b65f9SAlex Deucher 
2122e1b65f9SAlex Deucher 	for (i = 0; i < array_size; i +=3) {
2132e1b65f9SAlex Deucher 		reg = registers[i + 0];
2142e1b65f9SAlex Deucher 		and_mask = registers[i + 1];
2152e1b65f9SAlex Deucher 		or_mask = registers[i + 2];
2162e1b65f9SAlex Deucher 
2172e1b65f9SAlex Deucher 		if (and_mask == 0xffffffff) {
2182e1b65f9SAlex Deucher 			tmp = or_mask;
2192e1b65f9SAlex Deucher 		} else {
2202e1b65f9SAlex Deucher 			tmp = RREG32(reg);
2212e1b65f9SAlex Deucher 			tmp &= ~and_mask;
2222e1b65f9SAlex Deucher 			tmp |= or_mask;
2232e1b65f9SAlex Deucher 		}
2242e1b65f9SAlex Deucher 		WREG32(reg, tmp);
2252e1b65f9SAlex Deucher 	}
2262e1b65f9SAlex Deucher }
2272e1b65f9SAlex Deucher 
radeon_pci_config_reset(struct radeon_device * rdev)2281a0041b8SAlex Deucher void radeon_pci_config_reset(struct radeon_device *rdev)
2291a0041b8SAlex Deucher {
2301a0041b8SAlex Deucher 	pci_write_config_dword(rdev->pdev, 0x7c, RADEON_ASIC_RESET_DATA);
2311a0041b8SAlex Deucher }
2321a0041b8SAlex Deucher 
2332e1b65f9SAlex Deucher /**
2340c195119SAlex Deucher  * radeon_surface_init - Clear GPU surface registers.
2350c195119SAlex Deucher  *
2360c195119SAlex Deucher  * @rdev: radeon_device pointer
2370c195119SAlex Deucher  *
2380c195119SAlex Deucher  * Clear GPU surface registers (r1xx-r5xx).
239b1e3a6d1SMichel Dänzer  */
radeon_surface_init(struct radeon_device * rdev)2403ce0a23dSJerome Glisse void radeon_surface_init(struct radeon_device *rdev)
241b1e3a6d1SMichel Dänzer {
242b1e3a6d1SMichel Dänzer 	/* FIXME: check this out */
243b1e3a6d1SMichel Dänzer 	if (rdev->family < CHIP_R600) {
244b1e3a6d1SMichel Dänzer 		int i;
245b1e3a6d1SMichel Dänzer 
246550e2d92SDave Airlie 		for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
247550e2d92SDave Airlie 			if (rdev->surface_regs[i].bo)
248550e2d92SDave Airlie 				radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
249550e2d92SDave Airlie 			else
250550e2d92SDave Airlie 				radeon_clear_surface_reg(rdev, i);
251b1e3a6d1SMichel Dänzer 		}
252e024e110SDave Airlie 		/* enable surfaces */
253e024e110SDave Airlie 		WREG32(RADEON_SURFACE_CNTL, 0);
254b1e3a6d1SMichel Dänzer 	}
255b1e3a6d1SMichel Dänzer }
256b1e3a6d1SMichel Dänzer 
257b1e3a6d1SMichel Dänzer /*
258771fe6b9SJerome Glisse  * GPU scratch registers helpers function.
259771fe6b9SJerome Glisse  */
2600c195119SAlex Deucher /**
2610c195119SAlex Deucher  * radeon_scratch_init - Init scratch register driver information.
2620c195119SAlex Deucher  *
2630c195119SAlex Deucher  * @rdev: radeon_device pointer
2640c195119SAlex Deucher  *
2650c195119SAlex Deucher  * Init CP scratch register driver information (r1xx-r5xx)
2660c195119SAlex Deucher  */
radeon_scratch_init(struct radeon_device * rdev)2673ce0a23dSJerome Glisse void radeon_scratch_init(struct radeon_device *rdev)
268771fe6b9SJerome Glisse {
269771fe6b9SJerome Glisse 	int i;
270771fe6b9SJerome Glisse 
271771fe6b9SJerome Glisse 	/* FIXME: check this out */
272771fe6b9SJerome Glisse 	if (rdev->family < CHIP_R300) {
273771fe6b9SJerome Glisse 		rdev->scratch.num_reg = 5;
274771fe6b9SJerome Glisse 	} else {
275771fe6b9SJerome Glisse 		rdev->scratch.num_reg = 7;
276771fe6b9SJerome Glisse 	}
277724c80e1SAlex Deucher 	rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
278771fe6b9SJerome Glisse 	for (i = 0; i < rdev->scratch.num_reg; i++) {
279771fe6b9SJerome Glisse 		rdev->scratch.free[i] = true;
280724c80e1SAlex Deucher 		rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
281771fe6b9SJerome Glisse 	}
282771fe6b9SJerome Glisse }
283771fe6b9SJerome Glisse 
2840c195119SAlex Deucher /**
2850c195119SAlex Deucher  * radeon_scratch_get - Allocate a scratch register
2860c195119SAlex Deucher  *
2870c195119SAlex Deucher  * @rdev: radeon_device pointer
2880c195119SAlex Deucher  * @reg: scratch register mmio offset
2890c195119SAlex Deucher  *
2900c195119SAlex Deucher  * Allocate a CP scratch register for use by the driver (all asics).
2910c195119SAlex Deucher  * Returns 0 on success or -EINVAL on failure.
2920c195119SAlex Deucher  */
radeon_scratch_get(struct radeon_device * rdev,uint32_t * reg)293771fe6b9SJerome Glisse int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
294771fe6b9SJerome Glisse {
295771fe6b9SJerome Glisse 	int i;
296771fe6b9SJerome Glisse 
297771fe6b9SJerome Glisse 	for (i = 0; i < rdev->scratch.num_reg; i++) {
298771fe6b9SJerome Glisse 		if (rdev->scratch.free[i]) {
299771fe6b9SJerome Glisse 			rdev->scratch.free[i] = false;
300771fe6b9SJerome Glisse 			*reg = rdev->scratch.reg[i];
301771fe6b9SJerome Glisse 			return 0;
302771fe6b9SJerome Glisse 		}
303771fe6b9SJerome Glisse 	}
304771fe6b9SJerome Glisse 	return -EINVAL;
305771fe6b9SJerome Glisse }
306771fe6b9SJerome Glisse 
3070c195119SAlex Deucher /**
3080c195119SAlex Deucher  * radeon_scratch_free - Free a scratch register
3090c195119SAlex Deucher  *
3100c195119SAlex Deucher  * @rdev: radeon_device pointer
3110c195119SAlex Deucher  * @reg: scratch register mmio offset
3120c195119SAlex Deucher  *
3130c195119SAlex Deucher  * Free a CP scratch register allocated for use by the driver (all asics)
3140c195119SAlex Deucher  */
radeon_scratch_free(struct radeon_device * rdev,uint32_t reg)315771fe6b9SJerome Glisse void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
316771fe6b9SJerome Glisse {
317771fe6b9SJerome Glisse 	int i;
318771fe6b9SJerome Glisse 
319771fe6b9SJerome Glisse 	for (i = 0; i < rdev->scratch.num_reg; i++) {
320771fe6b9SJerome Glisse 		if (rdev->scratch.reg[i] == reg) {
321771fe6b9SJerome Glisse 			rdev->scratch.free[i] = true;
322771fe6b9SJerome Glisse 			return;
323771fe6b9SJerome Glisse 		}
324771fe6b9SJerome Glisse 	}
325771fe6b9SJerome Glisse }
326771fe6b9SJerome Glisse 
3270c195119SAlex Deucher /*
32875efdee1SAlex Deucher  * GPU doorbell aperture helpers function.
32975efdee1SAlex Deucher  */
33075efdee1SAlex Deucher /**
33175efdee1SAlex Deucher  * radeon_doorbell_init - Init doorbell driver information.
33275efdee1SAlex Deucher  *
33375efdee1SAlex Deucher  * @rdev: radeon_device pointer
33475efdee1SAlex Deucher  *
33575efdee1SAlex Deucher  * Init doorbell driver information (CIK)
33675efdee1SAlex Deucher  * Returns 0 on success, error on failure.
33775efdee1SAlex Deucher  */
radeon_doorbell_init(struct radeon_device * rdev)33828f5a6cdSRashika Kheria static int radeon_doorbell_init(struct radeon_device *rdev)
33975efdee1SAlex Deucher {
34075efdee1SAlex Deucher 	/* doorbell bar mapping */
34175efdee1SAlex Deucher 	rdev->doorbell.base = pci_resource_start(rdev->pdev, 2);
34275efdee1SAlex Deucher 	rdev->doorbell.size = pci_resource_len(rdev->pdev, 2);
34375efdee1SAlex Deucher 
344d5754ab8SAndrew Lewycky 	rdev->doorbell.num_doorbells = min_t(u32, rdev->doorbell.size / sizeof(u32), RADEON_MAX_DOORBELLS);
345d5754ab8SAndrew Lewycky 	if (rdev->doorbell.num_doorbells == 0)
346d5754ab8SAndrew Lewycky 		return -EINVAL;
34775efdee1SAlex Deucher 
348d5754ab8SAndrew Lewycky 	rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.num_doorbells * sizeof(u32));
34975efdee1SAlex Deucher 	if (rdev->doorbell.ptr == NULL) {
35075efdee1SAlex Deucher 		return -ENOMEM;
35175efdee1SAlex Deucher 	}
35275efdee1SAlex Deucher 	DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base);
35375efdee1SAlex Deucher 	DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size);
35475efdee1SAlex Deucher 
355d5754ab8SAndrew Lewycky 	memset(&rdev->doorbell.used, 0, sizeof(rdev->doorbell.used));
35675efdee1SAlex Deucher 
35775efdee1SAlex Deucher 	return 0;
35875efdee1SAlex Deucher }
35975efdee1SAlex Deucher 
36075efdee1SAlex Deucher /**
36175efdee1SAlex Deucher  * radeon_doorbell_fini - Tear down doorbell driver information.
36275efdee1SAlex Deucher  *
36375efdee1SAlex Deucher  * @rdev: radeon_device pointer
36475efdee1SAlex Deucher  *
36575efdee1SAlex Deucher  * Tear down doorbell driver information (CIK)
36675efdee1SAlex Deucher  */
radeon_doorbell_fini(struct radeon_device * rdev)36728f5a6cdSRashika Kheria static void radeon_doorbell_fini(struct radeon_device *rdev)
36875efdee1SAlex Deucher {
36975efdee1SAlex Deucher 	iounmap(rdev->doorbell.ptr);
37075efdee1SAlex Deucher 	rdev->doorbell.ptr = NULL;
37175efdee1SAlex Deucher }
37275efdee1SAlex Deucher 
37375efdee1SAlex Deucher /**
374d5754ab8SAndrew Lewycky  * radeon_doorbell_get - Allocate a doorbell entry
37575efdee1SAlex Deucher  *
37675efdee1SAlex Deucher  * @rdev: radeon_device pointer
377d5754ab8SAndrew Lewycky  * @doorbell: doorbell index
37875efdee1SAlex Deucher  *
379d5754ab8SAndrew Lewycky  * Allocate a doorbell for use by the driver (all asics).
38075efdee1SAlex Deucher  * Returns 0 on success or -EINVAL on failure.
38175efdee1SAlex Deucher  */
radeon_doorbell_get(struct radeon_device * rdev,u32 * doorbell)38275efdee1SAlex Deucher int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell)
38375efdee1SAlex Deucher {
384d5754ab8SAndrew Lewycky 	unsigned long offset = find_first_zero_bit(rdev->doorbell.used, rdev->doorbell.num_doorbells);
385d5754ab8SAndrew Lewycky 	if (offset < rdev->doorbell.num_doorbells) {
386d5754ab8SAndrew Lewycky 		__set_bit(offset, rdev->doorbell.used);
387d5754ab8SAndrew Lewycky 		*doorbell = offset;
38875efdee1SAlex Deucher 		return 0;
389d5754ab8SAndrew Lewycky 	} else {
39075efdee1SAlex Deucher 		return -EINVAL;
39175efdee1SAlex Deucher 	}
392d5754ab8SAndrew Lewycky }
39375efdee1SAlex Deucher 
39475efdee1SAlex Deucher /**
395d5754ab8SAndrew Lewycky  * radeon_doorbell_free - Free a doorbell entry
39675efdee1SAlex Deucher  *
39775efdee1SAlex Deucher  * @rdev: radeon_device pointer
398d5754ab8SAndrew Lewycky  * @doorbell: doorbell index
39975efdee1SAlex Deucher  *
400d5754ab8SAndrew Lewycky  * Free a doorbell allocated for use by the driver (all asics)
40175efdee1SAlex Deucher  */
radeon_doorbell_free(struct radeon_device * rdev,u32 doorbell)40275efdee1SAlex Deucher void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell)
40375efdee1SAlex Deucher {
404d5754ab8SAndrew Lewycky 	if (doorbell < rdev->doorbell.num_doorbells)
405d5754ab8SAndrew Lewycky 		__clear_bit(doorbell, rdev->doorbell.used);
40675efdee1SAlex Deucher }
40775efdee1SAlex Deucher 
40875efdee1SAlex Deucher /*
4090c195119SAlex Deucher  * radeon_wb_*()
41004f61f6cSCai Huoqing  * Writeback is the method by which the GPU updates special pages
4110c195119SAlex Deucher  * in memory with the status of certain GPU events (fences, ring pointers,
4120c195119SAlex Deucher  * etc.).
4130c195119SAlex Deucher  */
4140c195119SAlex Deucher 
4150c195119SAlex Deucher /**
4160c195119SAlex Deucher  * radeon_wb_disable - Disable Writeback
4170c195119SAlex Deucher  *
4180c195119SAlex Deucher  * @rdev: radeon_device pointer
4190c195119SAlex Deucher  *
4200c195119SAlex Deucher  * Disables Writeback (all asics).  Used for suspend.
4210c195119SAlex Deucher  */
radeon_wb_disable(struct radeon_device * rdev)422724c80e1SAlex Deucher void radeon_wb_disable(struct radeon_device *rdev)
423724c80e1SAlex Deucher {
424724c80e1SAlex Deucher 	rdev->wb.enabled = false;
425724c80e1SAlex Deucher }
426724c80e1SAlex Deucher 
4270c195119SAlex Deucher /**
4280c195119SAlex Deucher  * radeon_wb_fini - Disable Writeback and free memory
4290c195119SAlex Deucher  *
4300c195119SAlex Deucher  * @rdev: radeon_device pointer
4310c195119SAlex Deucher  *
4320c195119SAlex Deucher  * Disables Writeback and frees the Writeback memory (all asics).
4330c195119SAlex Deucher  * Used at driver shutdown.
4340c195119SAlex Deucher  */
radeon_wb_fini(struct radeon_device * rdev)435724c80e1SAlex Deucher void radeon_wb_fini(struct radeon_device *rdev)
436724c80e1SAlex Deucher {
437724c80e1SAlex Deucher 	radeon_wb_disable(rdev);
438724c80e1SAlex Deucher 	if (rdev->wb.wb_obj) {
439089920f2SJerome Glisse 		if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) {
440089920f2SJerome Glisse 			radeon_bo_kunmap(rdev->wb.wb_obj);
441089920f2SJerome Glisse 			radeon_bo_unpin(rdev->wb.wb_obj);
442089920f2SJerome Glisse 			radeon_bo_unreserve(rdev->wb.wb_obj);
443089920f2SJerome Glisse 		}
444724c80e1SAlex Deucher 		radeon_bo_unref(&rdev->wb.wb_obj);
445724c80e1SAlex Deucher 		rdev->wb.wb = NULL;
446724c80e1SAlex Deucher 		rdev->wb.wb_obj = NULL;
447724c80e1SAlex Deucher 	}
448724c80e1SAlex Deucher }
449724c80e1SAlex Deucher 
4500c195119SAlex Deucher /**
4510c195119SAlex Deucher  * radeon_wb_init- Init Writeback driver info and allocate memory
4520c195119SAlex Deucher  *
4530c195119SAlex Deucher  * @rdev: radeon_device pointer
4540c195119SAlex Deucher  *
4550c195119SAlex Deucher  * Disables Writeback and frees the Writeback memory (all asics).
4560c195119SAlex Deucher  * Used at driver startup.
4570c195119SAlex Deucher  * Returns 0 on success or an -error on failure.
4580c195119SAlex Deucher  */
radeon_wb_init(struct radeon_device * rdev)459724c80e1SAlex Deucher int radeon_wb_init(struct radeon_device *rdev)
460724c80e1SAlex Deucher {
461724c80e1SAlex Deucher 	int r;
462724c80e1SAlex Deucher 
463724c80e1SAlex Deucher 	if (rdev->wb.wb_obj == NULL) {
464441921d5SDaniel Vetter 		r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
465831b6966SMaarten Lankhorst 				     RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL,
46602376d82SMichel Dänzer 				     &rdev->wb.wb_obj);
467724c80e1SAlex Deucher 		if (r) {
468724c80e1SAlex Deucher 			dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
469724c80e1SAlex Deucher 			return r;
470724c80e1SAlex Deucher 		}
471724c80e1SAlex Deucher 		r = radeon_bo_reserve(rdev->wb.wb_obj, false);
472724c80e1SAlex Deucher 		if (unlikely(r != 0)) {
473724c80e1SAlex Deucher 			radeon_wb_fini(rdev);
474724c80e1SAlex Deucher 			return r;
475724c80e1SAlex Deucher 		}
476724c80e1SAlex Deucher 		r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
477724c80e1SAlex Deucher 				&rdev->wb.gpu_addr);
478724c80e1SAlex Deucher 		if (r) {
479724c80e1SAlex Deucher 			radeon_bo_unreserve(rdev->wb.wb_obj);
480724c80e1SAlex Deucher 			dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
481724c80e1SAlex Deucher 			radeon_wb_fini(rdev);
482724c80e1SAlex Deucher 			return r;
483724c80e1SAlex Deucher 		}
484724c80e1SAlex Deucher 		r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
485724c80e1SAlex Deucher 		radeon_bo_unreserve(rdev->wb.wb_obj);
486724c80e1SAlex Deucher 		if (r) {
487724c80e1SAlex Deucher 			dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
488724c80e1SAlex Deucher 			radeon_wb_fini(rdev);
489724c80e1SAlex Deucher 			return r;
490724c80e1SAlex Deucher 		}
491089920f2SJerome Glisse 	}
492724c80e1SAlex Deucher 
493e6ba7599SAlex Deucher 	/* clear wb memory */
494e6ba7599SAlex Deucher 	memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
495d0f8a854SAlex Deucher 	/* disable event_write fences */
496d0f8a854SAlex Deucher 	rdev->wb.use_event = false;
497724c80e1SAlex Deucher 	/* disabled via module param */
4983b7a2b24SJerome Glisse 	if (radeon_no_wb == 1) {
499724c80e1SAlex Deucher 		rdev->wb.enabled = false;
5003b7a2b24SJerome Glisse 	} else {
501724c80e1SAlex Deucher 		if (rdev->flags & RADEON_IS_AGP) {
50228eebb70SAlex Deucher 			/* often unreliable on AGP */
50328eebb70SAlex Deucher 			rdev->wb.enabled = false;
50428eebb70SAlex Deucher 		} else if (rdev->family < CHIP_R300) {
50528eebb70SAlex Deucher 			/* often unreliable on pre-r300 */
506724c80e1SAlex Deucher 			rdev->wb.enabled = false;
507d0f8a854SAlex Deucher 		} else {
508724c80e1SAlex Deucher 			rdev->wb.enabled = true;
509d0f8a854SAlex Deucher 			/* event_write fences are only available on r600+ */
5103b7a2b24SJerome Glisse 			if (rdev->family >= CHIP_R600) {
511d0f8a854SAlex Deucher 				rdev->wb.use_event = true;
512d0f8a854SAlex Deucher 			}
513724c80e1SAlex Deucher 		}
5143b7a2b24SJerome Glisse 	}
515c994ead6SAlex Deucher 	/* always use writeback/events on NI, APUs */
516c994ead6SAlex Deucher 	if (rdev->family >= CHIP_PALM) {
5177d52785dSAlex Deucher 		rdev->wb.enabled = true;
5187d52785dSAlex Deucher 		rdev->wb.use_event = true;
5197d52785dSAlex Deucher 	}
520724c80e1SAlex Deucher 
521724c80e1SAlex Deucher 	dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
522724c80e1SAlex Deucher 
523724c80e1SAlex Deucher 	return 0;
524724c80e1SAlex Deucher }
525724c80e1SAlex Deucher 
526d594e46aSJerome Glisse /**
527d594e46aSJerome Glisse  * radeon_vram_location - try to find VRAM location
528d594e46aSJerome Glisse  * @rdev: radeon device structure holding all necessary informations
529d594e46aSJerome Glisse  * @mc: memory controller structure holding memory informations
530d594e46aSJerome Glisse  * @base: base address at which to put VRAM
531d594e46aSJerome Glisse  *
532d594e46aSJerome Glisse  * Function will place try to place VRAM at base address provided
533d594e46aSJerome Glisse  * as parameter (which is so far either PCI aperture address or
534d594e46aSJerome Glisse  * for IGP TOM base address).
535d594e46aSJerome Glisse  *
536d594e46aSJerome Glisse  * If there is not enough space to fit the unvisible VRAM in the 32bits
537d594e46aSJerome Glisse  * address space then we limit the VRAM size to the aperture.
538d594e46aSJerome Glisse  *
539d594e46aSJerome Glisse  * If we are using AGP and if the AGP aperture doesn't allow us to have
540d594e46aSJerome Glisse  * room for all the VRAM than we restrict the VRAM to the PCI aperture
541d594e46aSJerome Glisse  * size and print a warning.
542d594e46aSJerome Glisse  *
543d594e46aSJerome Glisse  * This function will never fails, worst case are limiting VRAM.
544d594e46aSJerome Glisse  *
545d594e46aSJerome Glisse  * Note: GTT start, end, size should be initialized before calling this
546d594e46aSJerome Glisse  * function on AGP platform.
547d594e46aSJerome Glisse  *
548f017853eSLee Jones  * Note 1: We don't explicitly enforce VRAM start to be aligned on VRAM size,
549d594e46aSJerome Glisse  * this shouldn't be a problem as we are using the PCI aperture as a reference.
550d594e46aSJerome Glisse  * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
551d594e46aSJerome Glisse  * not IGP.
552d594e46aSJerome Glisse  *
553f017853eSLee Jones  * Note 2: we use mc_vram_size as on some board we need to program the mc to
554d594e46aSJerome Glisse  * cover the whole aperture even if VRAM size is inferior to aperture size
555d594e46aSJerome Glisse  * Novell bug 204882 + along with lots of ubuntu ones
556d594e46aSJerome Glisse  *
557f017853eSLee Jones  * Note 3: when limiting vram it's safe to overwritte real_vram_size because
558d594e46aSJerome Glisse  * we are not in case where real_vram_size is inferior to mc_vram_size (ie
559d594e46aSJerome Glisse  * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
560d594e46aSJerome Glisse  * ones)
561d594e46aSJerome Glisse  *
562f017853eSLee Jones  * Note 4: IGP TOM addr should be the same as the aperture addr, we don't
563d594e46aSJerome Glisse  * explicitly check for that thought.
564d594e46aSJerome Glisse  *
565d594e46aSJerome Glisse  * FIXME: when reducing VRAM size align new size on power of 2.
566771fe6b9SJerome Glisse  */
radeon_vram_location(struct radeon_device * rdev,struct radeon_mc * mc,u64 base)567d594e46aSJerome Glisse void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
568771fe6b9SJerome Glisse {
5691bcb04f7SChristian König 	uint64_t limit = (uint64_t)radeon_vram_limit << 20;
5701bcb04f7SChristian König 
571d594e46aSJerome Glisse 	mc->vram_start = base;
5729ed8b1f9SAlex Deucher 	if (mc->mc_vram_size > (rdev->mc.mc_mask - base + 1)) {
573d594e46aSJerome Glisse 		dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
574d594e46aSJerome Glisse 		mc->real_vram_size = mc->aper_size;
575d594e46aSJerome Glisse 		mc->mc_vram_size = mc->aper_size;
576771fe6b9SJerome Glisse 	}
577d594e46aSJerome Glisse 	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
5782cbeb4efSJerome Glisse 	if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
579d594e46aSJerome Glisse 		dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
580d594e46aSJerome Glisse 		mc->real_vram_size = mc->aper_size;
581d594e46aSJerome Glisse 		mc->mc_vram_size = mc->aper_size;
582771fe6b9SJerome Glisse 	}
583d594e46aSJerome Glisse 	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
5841bcb04f7SChristian König 	if (limit && limit < mc->real_vram_size)
5851bcb04f7SChristian König 		mc->real_vram_size = limit;
586dd7cc55aSAlex Deucher 	dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
587d594e46aSJerome Glisse 			mc->mc_vram_size >> 20, mc->vram_start,
588d594e46aSJerome Glisse 			mc->vram_end, mc->real_vram_size >> 20);
589771fe6b9SJerome Glisse }
590771fe6b9SJerome Glisse 
591d594e46aSJerome Glisse /**
592d594e46aSJerome Glisse  * radeon_gtt_location - try to find GTT location
593d594e46aSJerome Glisse  * @rdev: radeon device structure holding all necessary informations
594d594e46aSJerome Glisse  * @mc: memory controller structure holding memory informations
595d594e46aSJerome Glisse  *
596d594e46aSJerome Glisse  * Function will place try to place GTT before or after VRAM.
597d594e46aSJerome Glisse  *
598d594e46aSJerome Glisse  * If GTT size is bigger than space left then we ajust GTT size.
599d594e46aSJerome Glisse  * Thus function will never fails.
600d594e46aSJerome Glisse  *
601d594e46aSJerome Glisse  * FIXME: when reducing GTT size align new size on power of 2.
602d594e46aSJerome Glisse  */
radeon_gtt_location(struct radeon_device * rdev,struct radeon_mc * mc)603d594e46aSJerome Glisse void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
604d594e46aSJerome Glisse {
605d594e46aSJerome Glisse 	u64 size_af, size_bf;
606d594e46aSJerome Glisse 
6079ed8b1f9SAlex Deucher 	size_af = ((rdev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
6088d369bb1SAlex Deucher 	size_bf = mc->vram_start & ~mc->gtt_base_align;
609d594e46aSJerome Glisse 	if (size_bf > size_af) {
610d594e46aSJerome Glisse 		if (mc->gtt_size > size_bf) {
611d594e46aSJerome Glisse 			dev_warn(rdev->dev, "limiting GTT\n");
612d594e46aSJerome Glisse 			mc->gtt_size = size_bf;
613d594e46aSJerome Glisse 		}
6148d369bb1SAlex Deucher 		mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
615d594e46aSJerome Glisse 	} else {
616d594e46aSJerome Glisse 		if (mc->gtt_size > size_af) {
617d594e46aSJerome Glisse 			dev_warn(rdev->dev, "limiting GTT\n");
618d594e46aSJerome Glisse 			mc->gtt_size = size_af;
619d594e46aSJerome Glisse 		}
6208d369bb1SAlex Deucher 		mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
621d594e46aSJerome Glisse 	}
622d594e46aSJerome Glisse 	mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
623dd7cc55aSAlex Deucher 	dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
624d594e46aSJerome Glisse 			mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
625d594e46aSJerome Glisse }
626771fe6b9SJerome Glisse 
627771fe6b9SJerome Glisse /*
628771fe6b9SJerome Glisse  * GPU helpers function.
629771fe6b9SJerome Glisse  */
63005082b8bSAlex Deucher 
631f017853eSLee Jones /*
63205082b8bSAlex Deucher  * radeon_device_is_virtual - check if we are running is a virtual environment
63305082b8bSAlex Deucher  *
63405082b8bSAlex Deucher  * Check if the asic has been passed through to a VM (all asics).
63505082b8bSAlex Deucher  * Used at driver startup.
63605082b8bSAlex Deucher  * Returns true if virtual or false if not.
63705082b8bSAlex Deucher  */
radeon_device_is_virtual(void)638a801abe4SAlex Deucher bool radeon_device_is_virtual(void)
63905082b8bSAlex Deucher {
64005082b8bSAlex Deucher #ifdef CONFIG_X86
64105082b8bSAlex Deucher 	return boot_cpu_has(X86_FEATURE_HYPERVISOR);
64205082b8bSAlex Deucher #else
64305082b8bSAlex Deucher 	return false;
64405082b8bSAlex Deucher #endif
64505082b8bSAlex Deucher }
64605082b8bSAlex Deucher 
6470c195119SAlex Deucher /**
6480c195119SAlex Deucher  * radeon_card_posted - check if the hw has already been initialized
6490c195119SAlex Deucher  *
6500c195119SAlex Deucher  * @rdev: radeon_device pointer
6510c195119SAlex Deucher  *
6520c195119SAlex Deucher  * Check if the asic has been initialized (all asics).
6530c195119SAlex Deucher  * Used at driver startup.
6540c195119SAlex Deucher  * Returns true if initialized or false if not.
6550c195119SAlex Deucher  */
radeon_card_posted(struct radeon_device * rdev)6569f022ddfSJerome Glisse bool radeon_card_posted(struct radeon_device *rdev)
657771fe6b9SJerome Glisse {
658771fe6b9SJerome Glisse 	uint32_t reg;
659771fe6b9SJerome Glisse 
660884031f0SAlex Deucher 	/* for pass through, always force asic_init for CI */
661884031f0SAlex Deucher 	if (rdev->family >= CHIP_BONAIRE &&
662884031f0SAlex Deucher 	    radeon_device_is_virtual())
66305082b8bSAlex Deucher 		return false;
66405082b8bSAlex Deucher 
66550a583f6SAlex Deucher 	/* required for EFI mode on macbook2,1 which uses an r5xx asic */
66683e68189SMatt Fleming 	if (efi_enabled(EFI_BOOT) &&
66750a583f6SAlex Deucher 	    (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
66850a583f6SAlex Deucher 	    (rdev->family < CHIP_R600))
669bcc65fd8SMatthew Garrett 		return false;
670bcc65fd8SMatthew Garrett 
6712cf3a4fcSAlex Deucher 	if (ASIC_IS_NODCE(rdev))
6722cf3a4fcSAlex Deucher 		goto check_memsize;
6732cf3a4fcSAlex Deucher 
674771fe6b9SJerome Glisse 	/* first check CRTCs */
67509fb8bd1SAlex Deucher 	if (ASIC_IS_DCE4(rdev)) {
67618007401SAlex Deucher 		reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
67718007401SAlex Deucher 			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
67809fb8bd1SAlex Deucher 			if (rdev->num_crtc >= 4) {
67909fb8bd1SAlex Deucher 				reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
68009fb8bd1SAlex Deucher 					RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
68109fb8bd1SAlex Deucher 			}
68209fb8bd1SAlex Deucher 			if (rdev->num_crtc >= 6) {
68309fb8bd1SAlex Deucher 				reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
684bcc1c2a1SAlex Deucher 					RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
68509fb8bd1SAlex Deucher 			}
686bcc1c2a1SAlex Deucher 		if (reg & EVERGREEN_CRTC_MASTER_EN)
687bcc1c2a1SAlex Deucher 			return true;
688bcc1c2a1SAlex Deucher 	} else if (ASIC_IS_AVIVO(rdev)) {
689771fe6b9SJerome Glisse 		reg = RREG32(AVIVO_D1CRTC_CONTROL) |
690771fe6b9SJerome Glisse 		      RREG32(AVIVO_D2CRTC_CONTROL);
691771fe6b9SJerome Glisse 		if (reg & AVIVO_CRTC_EN) {
692771fe6b9SJerome Glisse 			return true;
693771fe6b9SJerome Glisse 		}
694771fe6b9SJerome Glisse 	} else {
695771fe6b9SJerome Glisse 		reg = RREG32(RADEON_CRTC_GEN_CNTL) |
696771fe6b9SJerome Glisse 		      RREG32(RADEON_CRTC2_GEN_CNTL);
697771fe6b9SJerome Glisse 		if (reg & RADEON_CRTC_EN) {
698771fe6b9SJerome Glisse 			return true;
699771fe6b9SJerome Glisse 		}
700771fe6b9SJerome Glisse 	}
701771fe6b9SJerome Glisse 
7022cf3a4fcSAlex Deucher check_memsize:
703771fe6b9SJerome Glisse 	/* then check MEM_SIZE, in case the crtcs are off */
704771fe6b9SJerome Glisse 	if (rdev->family >= CHIP_R600)
705771fe6b9SJerome Glisse 		reg = RREG32(R600_CONFIG_MEMSIZE);
706771fe6b9SJerome Glisse 	else
707771fe6b9SJerome Glisse 		reg = RREG32(RADEON_CONFIG_MEMSIZE);
708771fe6b9SJerome Glisse 
709771fe6b9SJerome Glisse 	if (reg)
710771fe6b9SJerome Glisse 		return true;
711771fe6b9SJerome Glisse 
712771fe6b9SJerome Glisse 	return false;
713771fe6b9SJerome Glisse 
714771fe6b9SJerome Glisse }
715771fe6b9SJerome Glisse 
7160c195119SAlex Deucher /**
7170c195119SAlex Deucher  * radeon_update_bandwidth_info - update display bandwidth params
7180c195119SAlex Deucher  *
7190c195119SAlex Deucher  * @rdev: radeon_device pointer
7200c195119SAlex Deucher  *
7210c195119SAlex Deucher  * Used when sclk/mclk are switched or display modes are set.
7220c195119SAlex Deucher  * params are used to calculate display watermarks (all asics)
7230c195119SAlex Deucher  */
radeon_update_bandwidth_info(struct radeon_device * rdev)724f47299c5SAlex Deucher void radeon_update_bandwidth_info(struct radeon_device *rdev)
725f47299c5SAlex Deucher {
726f47299c5SAlex Deucher 	fixed20_12 a;
7278807286eSAlex Deucher 	u32 sclk = rdev->pm.current_sclk;
7288807286eSAlex Deucher 	u32 mclk = rdev->pm.current_mclk;
729f47299c5SAlex Deucher 
7308807286eSAlex Deucher 	/* sclk/mclk in Mhz */
73168adac5eSBen Skeggs 	a.full = dfixed_const(100);
73268adac5eSBen Skeggs 	rdev->pm.sclk.full = dfixed_const(sclk);
73368adac5eSBen Skeggs 	rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
73468adac5eSBen Skeggs 	rdev->pm.mclk.full = dfixed_const(mclk);
73568adac5eSBen Skeggs 	rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
736f47299c5SAlex Deucher 
7378807286eSAlex Deucher 	if (rdev->flags & RADEON_IS_IGP) {
73868adac5eSBen Skeggs 		a.full = dfixed_const(16);
739f47299c5SAlex Deucher 		/* core_bandwidth = sclk(Mhz) * 16 */
74068adac5eSBen Skeggs 		rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
741f47299c5SAlex Deucher 	}
742f47299c5SAlex Deucher }
743f47299c5SAlex Deucher 
7440c195119SAlex Deucher /**
7450c195119SAlex Deucher  * radeon_boot_test_post_card - check and possibly initialize the hw
7460c195119SAlex Deucher  *
7470c195119SAlex Deucher  * @rdev: radeon_device pointer
7480c195119SAlex Deucher  *
7490c195119SAlex Deucher  * Check if the asic is initialized and if not, attempt to initialize
7500c195119SAlex Deucher  * it (all asics).
7510c195119SAlex Deucher  * Returns true if initialized or false if not.
7520c195119SAlex Deucher  */
radeon_boot_test_post_card(struct radeon_device * rdev)75372542d77SDave Airlie bool radeon_boot_test_post_card(struct radeon_device *rdev)
75472542d77SDave Airlie {
75572542d77SDave Airlie 	if (radeon_card_posted(rdev))
75672542d77SDave Airlie 		return true;
75772542d77SDave Airlie 
75872542d77SDave Airlie 	if (rdev->bios) {
75972542d77SDave Airlie 		DRM_INFO("GPU not posted. posting now...\n");
76072542d77SDave Airlie 		if (rdev->is_atom_bios)
76172542d77SDave Airlie 			atom_asic_init(rdev->mode_info.atom_context);
76272542d77SDave Airlie 		else
763*5e3a0f77SWu Hoi Pok 			radeon_combios_asic_init(rdev_to_drm(rdev));
76472542d77SDave Airlie 		return true;
76572542d77SDave Airlie 	} else {
76672542d77SDave Airlie 		dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
76772542d77SDave Airlie 		return false;
76872542d77SDave Airlie 	}
76972542d77SDave Airlie }
77072542d77SDave Airlie 
7710c195119SAlex Deucher /**
7720c195119SAlex Deucher  * radeon_dummy_page_init - init dummy page used by the driver
7730c195119SAlex Deucher  *
7740c195119SAlex Deucher  * @rdev: radeon_device pointer
7750c195119SAlex Deucher  *
7760c195119SAlex Deucher  * Allocate the dummy page used by the driver (all asics).
7770c195119SAlex Deucher  * This dummy page is used by the driver as a filler for gart entries
7780c195119SAlex Deucher  * when pages are taken out of the GART
7790c195119SAlex Deucher  * Returns 0 on sucess, -ENOMEM on failure.
7800c195119SAlex Deucher  */
radeon_dummy_page_init(struct radeon_device * rdev)7813ce0a23dSJerome Glisse int radeon_dummy_page_init(struct radeon_device *rdev)
7823ce0a23dSJerome Glisse {
78382568565SDave Airlie 	if (rdev->dummy_page.page)
78482568565SDave Airlie 		return 0;
7853ce0a23dSJerome Glisse 	rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
7863ce0a23dSJerome Glisse 	if (rdev->dummy_page.page == NULL)
7873ce0a23dSJerome Glisse 		return -ENOMEM;
7887e7726ecSNirmoy Das 	rdev->dummy_page.addr = dma_map_page(&rdev->pdev->dev, rdev->dummy_page.page,
789a5f61dd4SChristophe JAILLET 					0, PAGE_SIZE, DMA_BIDIRECTIONAL);
7907e7726ecSNirmoy Das 	if (dma_mapping_error(&rdev->pdev->dev, rdev->dummy_page.addr)) {
791a30f6fb7SBenjamin Herrenschmidt 		dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n");
7923ce0a23dSJerome Glisse 		__free_page(rdev->dummy_page.page);
7933ce0a23dSJerome Glisse 		rdev->dummy_page.page = NULL;
7943ce0a23dSJerome Glisse 		return -ENOMEM;
7953ce0a23dSJerome Glisse 	}
796cb658906SMichel Dänzer 	rdev->dummy_page.entry = radeon_gart_get_page_entry(rdev->dummy_page.addr,
797cb658906SMichel Dänzer 							    RADEON_GART_PAGE_DUMMY);
7983ce0a23dSJerome Glisse 	return 0;
7993ce0a23dSJerome Glisse }
8003ce0a23dSJerome Glisse 
8010c195119SAlex Deucher /**
8020c195119SAlex Deucher  * radeon_dummy_page_fini - free dummy page used by the driver
8030c195119SAlex Deucher  *
8040c195119SAlex Deucher  * @rdev: radeon_device pointer
8050c195119SAlex Deucher  *
8060c195119SAlex Deucher  * Frees the dummy page used by the driver (all asics).
8070c195119SAlex Deucher  */
radeon_dummy_page_fini(struct radeon_device * rdev)8083ce0a23dSJerome Glisse void radeon_dummy_page_fini(struct radeon_device *rdev)
8093ce0a23dSJerome Glisse {
8103ce0a23dSJerome Glisse 	if (rdev->dummy_page.page == NULL)
8113ce0a23dSJerome Glisse 		return;
812a5f61dd4SChristophe JAILLET 	dma_unmap_page(&rdev->pdev->dev, rdev->dummy_page.addr, PAGE_SIZE,
813a5f61dd4SChristophe JAILLET 		       DMA_BIDIRECTIONAL);
8143ce0a23dSJerome Glisse 	__free_page(rdev->dummy_page.page);
8153ce0a23dSJerome Glisse 	rdev->dummy_page.page = NULL;
8163ce0a23dSJerome Glisse }
8173ce0a23dSJerome Glisse 
818771fe6b9SJerome Glisse 
819771fe6b9SJerome Glisse /* ATOM accessor methods */
8200c195119SAlex Deucher /*
8210c195119SAlex Deucher  * ATOM is an interpreted byte code stored in tables in the vbios.  The
8220c195119SAlex Deucher  * driver registers callbacks to access registers and the interpreter
8230c195119SAlex Deucher  * in the driver parses the tables and executes then to program specific
8240c195119SAlex Deucher  * actions (set display modes, asic init, etc.).  See radeon_atombios.c,
8250c195119SAlex Deucher  * atombios.h, and atom.c
8260c195119SAlex Deucher  */
8270c195119SAlex Deucher 
8280c195119SAlex Deucher /**
8290c195119SAlex Deucher  * cail_pll_read - read PLL register
8300c195119SAlex Deucher  *
8310c195119SAlex Deucher  * @info: atom card_info pointer
8320c195119SAlex Deucher  * @reg: PLL register offset
8330c195119SAlex Deucher  *
8340c195119SAlex Deucher  * Provides a PLL register accessor for the atom interpreter (r4xx+).
8350c195119SAlex Deucher  * Returns the value of the PLL register.
8360c195119SAlex Deucher  */
cail_pll_read(struct card_info * info,uint32_t reg)837771fe6b9SJerome Glisse static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
838771fe6b9SJerome Glisse {
839771fe6b9SJerome Glisse 	struct radeon_device *rdev = info->dev->dev_private;
840771fe6b9SJerome Glisse 	uint32_t r;
841771fe6b9SJerome Glisse 
842771fe6b9SJerome Glisse 	r = rdev->pll_rreg(rdev, reg);
843771fe6b9SJerome Glisse 	return r;
844771fe6b9SJerome Glisse }
845771fe6b9SJerome Glisse 
8460c195119SAlex Deucher /**
8470c195119SAlex Deucher  * cail_pll_write - write PLL register
8480c195119SAlex Deucher  *
8490c195119SAlex Deucher  * @info: atom card_info pointer
8500c195119SAlex Deucher  * @reg: PLL register offset
8510c195119SAlex Deucher  * @val: value to write to the pll register
8520c195119SAlex Deucher  *
8530c195119SAlex Deucher  * Provides a PLL register accessor for the atom interpreter (r4xx+).
8540c195119SAlex Deucher  */
cail_pll_write(struct card_info * info,uint32_t reg,uint32_t val)855771fe6b9SJerome Glisse static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
856771fe6b9SJerome Glisse {
857771fe6b9SJerome Glisse 	struct radeon_device *rdev = info->dev->dev_private;
858771fe6b9SJerome Glisse 
859771fe6b9SJerome Glisse 	rdev->pll_wreg(rdev, reg, val);
860771fe6b9SJerome Glisse }
861771fe6b9SJerome Glisse 
8620c195119SAlex Deucher /**
8630c195119SAlex Deucher  * cail_mc_read - read MC (Memory Controller) register
8640c195119SAlex Deucher  *
8650c195119SAlex Deucher  * @info: atom card_info pointer
8660c195119SAlex Deucher  * @reg: MC register offset
8670c195119SAlex Deucher  *
8680c195119SAlex Deucher  * Provides an MC register accessor for the atom interpreter (r4xx+).
8690c195119SAlex Deucher  * Returns the value of the MC register.
8700c195119SAlex Deucher  */
cail_mc_read(struct card_info * info,uint32_t reg)871771fe6b9SJerome Glisse static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
872771fe6b9SJerome Glisse {
873771fe6b9SJerome Glisse 	struct radeon_device *rdev = info->dev->dev_private;
874771fe6b9SJerome Glisse 	uint32_t r;
875771fe6b9SJerome Glisse 
876771fe6b9SJerome Glisse 	r = rdev->mc_rreg(rdev, reg);
877771fe6b9SJerome Glisse 	return r;
878771fe6b9SJerome Glisse }
879771fe6b9SJerome Glisse 
8800c195119SAlex Deucher /**
8810c195119SAlex Deucher  * cail_mc_write - write MC (Memory Controller) register
8820c195119SAlex Deucher  *
8830c195119SAlex Deucher  * @info: atom card_info pointer
8840c195119SAlex Deucher  * @reg: MC register offset
8850c195119SAlex Deucher  * @val: value to write to the pll register
8860c195119SAlex Deucher  *
8870c195119SAlex Deucher  * Provides a MC register accessor for the atom interpreter (r4xx+).
8880c195119SAlex Deucher  */
cail_mc_write(struct card_info * info,uint32_t reg,uint32_t val)889771fe6b9SJerome Glisse static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
890771fe6b9SJerome Glisse {
891771fe6b9SJerome Glisse 	struct radeon_device *rdev = info->dev->dev_private;
892771fe6b9SJerome Glisse 
893771fe6b9SJerome Glisse 	rdev->mc_wreg(rdev, reg, val);
894771fe6b9SJerome Glisse }
895771fe6b9SJerome Glisse 
8960c195119SAlex Deucher /**
8970c195119SAlex Deucher  * cail_reg_write - write MMIO register
8980c195119SAlex Deucher  *
8990c195119SAlex Deucher  * @info: atom card_info pointer
9000c195119SAlex Deucher  * @reg: MMIO register offset
9010c195119SAlex Deucher  * @val: value to write to the pll register
9020c195119SAlex Deucher  *
9030c195119SAlex Deucher  * Provides a MMIO register accessor for the atom interpreter (r4xx+).
9040c195119SAlex Deucher  */
cail_reg_write(struct card_info * info,uint32_t reg,uint32_t val)905771fe6b9SJerome Glisse static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
906771fe6b9SJerome Glisse {
907771fe6b9SJerome Glisse 	struct radeon_device *rdev = info->dev->dev_private;
908771fe6b9SJerome Glisse 
909771fe6b9SJerome Glisse 	WREG32(reg*4, val);
910771fe6b9SJerome Glisse }
911771fe6b9SJerome Glisse 
9120c195119SAlex Deucher /**
9130c195119SAlex Deucher  * cail_reg_read - read MMIO register
9140c195119SAlex Deucher  *
9150c195119SAlex Deucher  * @info: atom card_info pointer
9160c195119SAlex Deucher  * @reg: MMIO register offset
9170c195119SAlex Deucher  *
9180c195119SAlex Deucher  * Provides an MMIO register accessor for the atom interpreter (r4xx+).
9190c195119SAlex Deucher  * Returns the value of the MMIO register.
9200c195119SAlex Deucher  */
cail_reg_read(struct card_info * info,uint32_t reg)921771fe6b9SJerome Glisse static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
922771fe6b9SJerome Glisse {
923771fe6b9SJerome Glisse 	struct radeon_device *rdev = info->dev->dev_private;
924771fe6b9SJerome Glisse 	uint32_t r;
925771fe6b9SJerome Glisse 
926771fe6b9SJerome Glisse 	r = RREG32(reg*4);
927771fe6b9SJerome Glisse 	return r;
928771fe6b9SJerome Glisse }
929771fe6b9SJerome Glisse 
9300c195119SAlex Deucher /**
9310c195119SAlex Deucher  * cail_ioreg_write - write IO register
9320c195119SAlex Deucher  *
9330c195119SAlex Deucher  * @info: atom card_info pointer
9340c195119SAlex Deucher  * @reg: IO register offset
9350c195119SAlex Deucher  * @val: value to write to the pll register
9360c195119SAlex Deucher  *
9370c195119SAlex Deucher  * Provides a IO register accessor for the atom interpreter (r4xx+).
9380c195119SAlex Deucher  */
cail_ioreg_write(struct card_info * info,uint32_t reg,uint32_t val)939351a52a2SAlex Deucher static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
940351a52a2SAlex Deucher {
941351a52a2SAlex Deucher 	struct radeon_device *rdev = info->dev->dev_private;
942351a52a2SAlex Deucher 
943351a52a2SAlex Deucher 	WREG32_IO(reg*4, val);
944351a52a2SAlex Deucher }
945351a52a2SAlex Deucher 
9460c195119SAlex Deucher /**
9470c195119SAlex Deucher  * cail_ioreg_read - read IO register
9480c195119SAlex Deucher  *
9490c195119SAlex Deucher  * @info: atom card_info pointer
9500c195119SAlex Deucher  * @reg: IO register offset
9510c195119SAlex Deucher  *
9520c195119SAlex Deucher  * Provides an IO register accessor for the atom interpreter (r4xx+).
9530c195119SAlex Deucher  * Returns the value of the IO register.
9540c195119SAlex Deucher  */
cail_ioreg_read(struct card_info * info,uint32_t reg)955351a52a2SAlex Deucher static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
956351a52a2SAlex Deucher {
957351a52a2SAlex Deucher 	struct radeon_device *rdev = info->dev->dev_private;
958351a52a2SAlex Deucher 	uint32_t r;
959351a52a2SAlex Deucher 
960351a52a2SAlex Deucher 	r = RREG32_IO(reg*4);
961351a52a2SAlex Deucher 	return r;
962351a52a2SAlex Deucher }
963351a52a2SAlex Deucher 
9640c195119SAlex Deucher /**
9650c195119SAlex Deucher  * radeon_atombios_init - init the driver info and callbacks for atombios
9660c195119SAlex Deucher  *
9670c195119SAlex Deucher  * @rdev: radeon_device pointer
9680c195119SAlex Deucher  *
9690c195119SAlex Deucher  * Initializes the driver info and register access callbacks for the
9700c195119SAlex Deucher  * ATOM interpreter (r4xx+).
9710c195119SAlex Deucher  * Returns 0 on sucess, -ENOMEM on failure.
9720c195119SAlex Deucher  * Called at driver startup.
9730c195119SAlex Deucher  */
radeon_atombios_init(struct radeon_device * rdev)974771fe6b9SJerome Glisse int radeon_atombios_init(struct radeon_device *rdev)
975771fe6b9SJerome Glisse {
97661c4b24bSMathias Fröhlich 	struct card_info *atom_card_info =
97761c4b24bSMathias Fröhlich 	    kzalloc(sizeof(struct card_info), GFP_KERNEL);
97861c4b24bSMathias Fröhlich 
97961c4b24bSMathias Fröhlich 	if (!atom_card_info)
98061c4b24bSMathias Fröhlich 		return -ENOMEM;
98161c4b24bSMathias Fröhlich 
98261c4b24bSMathias Fröhlich 	rdev->mode_info.atom_card_info = atom_card_info;
983*5e3a0f77SWu Hoi Pok 	atom_card_info->dev = rdev_to_drm(rdev);
98461c4b24bSMathias Fröhlich 	atom_card_info->reg_read = cail_reg_read;
98561c4b24bSMathias Fröhlich 	atom_card_info->reg_write = cail_reg_write;
986351a52a2SAlex Deucher 	/* needed for iio ops */
987351a52a2SAlex Deucher 	if (rdev->rio_mem) {
988351a52a2SAlex Deucher 		atom_card_info->ioreg_read = cail_ioreg_read;
989351a52a2SAlex Deucher 		atom_card_info->ioreg_write = cail_ioreg_write;
990351a52a2SAlex Deucher 	} else {
991351a52a2SAlex Deucher 		DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
992351a52a2SAlex Deucher 		atom_card_info->ioreg_read = cail_reg_read;
993351a52a2SAlex Deucher 		atom_card_info->ioreg_write = cail_reg_write;
994351a52a2SAlex Deucher 	}
99561c4b24bSMathias Fröhlich 	atom_card_info->mc_read = cail_mc_read;
99661c4b24bSMathias Fröhlich 	atom_card_info->mc_write = cail_mc_write;
99761c4b24bSMathias Fröhlich 	atom_card_info->pll_read = cail_pll_read;
99861c4b24bSMathias Fröhlich 	atom_card_info->pll_write = cail_pll_write;
99961c4b24bSMathias Fröhlich 
100061c4b24bSMathias Fröhlich 	rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
10010e34d094STim Gardner 	if (!rdev->mode_info.atom_context) {
10020e34d094STim Gardner 		radeon_atombios_fini(rdev);
10030e34d094STim Gardner 		return -ENOMEM;
10040e34d094STim Gardner 	}
10050e34d094STim Gardner 
1006c31ad97fSRafał Miłecki 	mutex_init(&rdev->mode_info.atom_context->mutex);
10071c949842SDave Airlie 	mutex_init(&rdev->mode_info.atom_context->scratch_mutex);
1008*5e3a0f77SWu Hoi Pok 	radeon_atom_initialize_bios_scratch_regs(rdev_to_drm(rdev));
1009d904ef9bSDave Airlie 	atom_allocate_fb_scratch(rdev->mode_info.atom_context);
1010771fe6b9SJerome Glisse 	return 0;
1011771fe6b9SJerome Glisse }
1012771fe6b9SJerome Glisse 
10130c195119SAlex Deucher /**
10140c195119SAlex Deucher  * radeon_atombios_fini - free the driver info and callbacks for atombios
10150c195119SAlex Deucher  *
10160c195119SAlex Deucher  * @rdev: radeon_device pointer
10170c195119SAlex Deucher  *
10180c195119SAlex Deucher  * Frees the driver info and register access callbacks for the ATOM
10190c195119SAlex Deucher  * interpreter (r4xx+).
10200c195119SAlex Deucher  * Called at driver shutdown.
10210c195119SAlex Deucher  */
radeon_atombios_fini(struct radeon_device * rdev)1022771fe6b9SJerome Glisse void radeon_atombios_fini(struct radeon_device *rdev)
1023771fe6b9SJerome Glisse {
10244a04a844SJerome Glisse 	if (rdev->mode_info.atom_context) {
1025d904ef9bSDave Airlie 		kfree(rdev->mode_info.atom_context->scratch);
10264773fadeSLiwei Song 		kfree(rdev->mode_info.atom_context->iio);
10274a04a844SJerome Glisse 	}
10280e34d094STim Gardner 	kfree(rdev->mode_info.atom_context);
10290e34d094STim Gardner 	rdev->mode_info.atom_context = NULL;
103061c4b24bSMathias Fröhlich 	kfree(rdev->mode_info.atom_card_info);
10310e34d094STim Gardner 	rdev->mode_info.atom_card_info = NULL;
1032771fe6b9SJerome Glisse }
1033771fe6b9SJerome Glisse 
10340c195119SAlex Deucher /* COMBIOS */
10350c195119SAlex Deucher /*
10360c195119SAlex Deucher  * COMBIOS is the bios format prior to ATOM. It provides
10370c195119SAlex Deucher  * command tables similar to ATOM, but doesn't have a unified
10380c195119SAlex Deucher  * parser.  See radeon_combios.c
10390c195119SAlex Deucher  */
10400c195119SAlex Deucher 
10410c195119SAlex Deucher /**
10420c195119SAlex Deucher  * radeon_combios_init - init the driver info for combios
10430c195119SAlex Deucher  *
10440c195119SAlex Deucher  * @rdev: radeon_device pointer
10450c195119SAlex Deucher  *
10460c195119SAlex Deucher  * Initializes the driver info for combios (r1xx-r3xx).
10470c195119SAlex Deucher  * Returns 0 on sucess.
10480c195119SAlex Deucher  * Called at driver startup.
10490c195119SAlex Deucher  */
radeon_combios_init(struct radeon_device * rdev)1050771fe6b9SJerome Glisse int radeon_combios_init(struct radeon_device *rdev)
1051771fe6b9SJerome Glisse {
1052*5e3a0f77SWu Hoi Pok 	radeon_combios_initialize_bios_scratch_regs(rdev_to_drm(rdev));
1053771fe6b9SJerome Glisse 	return 0;
1054771fe6b9SJerome Glisse }
1055771fe6b9SJerome Glisse 
10560c195119SAlex Deucher /**
10570c195119SAlex Deucher  * radeon_combios_fini - free the driver info for combios
10580c195119SAlex Deucher  *
10590c195119SAlex Deucher  * @rdev: radeon_device pointer
10600c195119SAlex Deucher  *
10610c195119SAlex Deucher  * Frees the driver info for combios (r1xx-r3xx).
10620c195119SAlex Deucher  * Called at driver shutdown.
10630c195119SAlex Deucher  */
radeon_combios_fini(struct radeon_device * rdev)1064771fe6b9SJerome Glisse void radeon_combios_fini(struct radeon_device *rdev)
1065771fe6b9SJerome Glisse {
1066771fe6b9SJerome Glisse }
1067771fe6b9SJerome Glisse 
10680c195119SAlex Deucher /* if we get transitioned to only one device, take VGA back */
10690c195119SAlex Deucher /**
10700c195119SAlex Deucher  * radeon_vga_set_decode - enable/disable vga decode
10710c195119SAlex Deucher  *
1072bf44e8ceSChristoph Hellwig  * @pdev: PCI device
10730c195119SAlex Deucher  * @state: enable/disable vga decode
10740c195119SAlex Deucher  *
10750c195119SAlex Deucher  * Enable/disable vga decode (all asics).
10760c195119SAlex Deucher  * Returns VGA resource flags.
10770c195119SAlex Deucher  */
radeon_vga_set_decode(struct pci_dev * pdev,bool state)1078bf44e8ceSChristoph Hellwig static unsigned int radeon_vga_set_decode(struct pci_dev *pdev, bool state)
107928d52043SDave Airlie {
1080bf44e8ceSChristoph Hellwig 	struct drm_device *dev = pci_get_drvdata(pdev);
1081bf44e8ceSChristoph Hellwig 	struct radeon_device *rdev = dev->dev_private;
108228d52043SDave Airlie 	radeon_vga_set_state(rdev, state);
108328d52043SDave Airlie 	if (state)
108428d52043SDave Airlie 		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
108528d52043SDave Airlie 		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
108628d52043SDave Airlie 	else
108728d52043SDave Airlie 		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
108828d52043SDave Airlie }
1089c1176d6fSDave Airlie 
10900c195119SAlex Deucher /**
1091d3da76aaSLee Jones  * radeon_gart_size_auto - Determine a sensible default GART size
1092d3da76aaSLee Jones  *                         according to ASIC family.
10935e3c4f90SGrigori Goronzy  *
1094f017853eSLee Jones  * @family: ASIC family name
10955e3c4f90SGrigori Goronzy  */
radeon_gart_size_auto(enum radeon_family family)10965e3c4f90SGrigori Goronzy static int radeon_gart_size_auto(enum radeon_family family)
10975e3c4f90SGrigori Goronzy {
10985e3c4f90SGrigori Goronzy 	/* default to a larger gart size on newer asics */
10995e3c4f90SGrigori Goronzy 	if (family >= CHIP_TAHITI)
11005e3c4f90SGrigori Goronzy 		return 2048;
11015e3c4f90SGrigori Goronzy 	else if (family >= CHIP_RV770)
11025e3c4f90SGrigori Goronzy 		return 1024;
11035e3c4f90SGrigori Goronzy 	else
11045e3c4f90SGrigori Goronzy 		return 512;
11055e3c4f90SGrigori Goronzy }
11065e3c4f90SGrigori Goronzy 
11075e3c4f90SGrigori Goronzy /**
11080c195119SAlex Deucher  * radeon_check_arguments - validate module params
11090c195119SAlex Deucher  *
11100c195119SAlex Deucher  * @rdev: radeon_device pointer
11110c195119SAlex Deucher  *
11120c195119SAlex Deucher  * Validates certain module parameters and updates
11130c195119SAlex Deucher  * the associated values used by the driver (all asics).
11140c195119SAlex Deucher  */
radeon_check_arguments(struct radeon_device * rdev)11151109ca09SLauri Kasanen static void radeon_check_arguments(struct radeon_device *rdev)
111636421338SJerome Glisse {
111736421338SJerome Glisse 	/* vramlimit must be a power of two */
11189da29026SMateusz Jończyk 	if (radeon_vram_limit != 0 && !is_power_of_2(radeon_vram_limit)) {
111936421338SJerome Glisse 		dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
112036421338SJerome Glisse 				radeon_vram_limit);
112136421338SJerome Glisse 		radeon_vram_limit = 0;
112236421338SJerome Glisse 	}
11231bcb04f7SChristian König 
1124edcd26e8SAlex Deucher 	if (radeon_gart_size == -1) {
11255e3c4f90SGrigori Goronzy 		radeon_gart_size = radeon_gart_size_auto(rdev->family);
1126edcd26e8SAlex Deucher 	}
112736421338SJerome Glisse 	/* gtt size must be power of two and greater or equal to 32M */
11281bcb04f7SChristian König 	if (radeon_gart_size < 32) {
1129edcd26e8SAlex Deucher 		dev_warn(rdev->dev, "gart size (%d) too small\n",
113036421338SJerome Glisse 				radeon_gart_size);
11315e3c4f90SGrigori Goronzy 		radeon_gart_size = radeon_gart_size_auto(rdev->family);
11328c2d34ebSJonathan Gray 	} else if (!is_power_of_2(radeon_gart_size)) {
113336421338SJerome Glisse 		dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
113436421338SJerome Glisse 				radeon_gart_size);
11355e3c4f90SGrigori Goronzy 		radeon_gart_size = radeon_gart_size_auto(rdev->family);
113636421338SJerome Glisse 	}
11371bcb04f7SChristian König 	rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
11381bcb04f7SChristian König 
113936421338SJerome Glisse 	/* AGP mode can only be -1, 1, 2, 4, 8 */
114036421338SJerome Glisse 	switch (radeon_agpmode) {
114136421338SJerome Glisse 	case -1:
114236421338SJerome Glisse 	case 0:
114336421338SJerome Glisse 	case 1:
114436421338SJerome Glisse 	case 2:
114536421338SJerome Glisse 	case 4:
114636421338SJerome Glisse 	case 8:
114736421338SJerome Glisse 		break;
114836421338SJerome Glisse 	default:
114936421338SJerome Glisse 		dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
115036421338SJerome Glisse 				"-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
115136421338SJerome Glisse 		radeon_agpmode = 0;
115236421338SJerome Glisse 		break;
115336421338SJerome Glisse 	}
1154c1c44132SChristian König 
11558c2d34ebSJonathan Gray 	if (!is_power_of_2(radeon_vm_size)) {
1156c1c44132SChristian König 		dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n",
1157c1c44132SChristian König 			 radeon_vm_size);
115820b2656dSChristian König 		radeon_vm_size = 4;
1159c1c44132SChristian König 	}
1160c1c44132SChristian König 
116120b2656dSChristian König 	if (radeon_vm_size < 1) {
116213c240efSAlexandre Demers 		dev_warn(rdev->dev, "VM size (%d) too small, min is 1GB\n",
1163c1c44132SChristian König 			 radeon_vm_size);
116420b2656dSChristian König 		radeon_vm_size = 4;
1165c1c44132SChristian König 	}
1166c1c44132SChristian König 
1167c1c44132SChristian König 	/*
1168c1c44132SChristian König 	 * Max GPUVM size for Cayman, SI and CI are 40 bits.
1169c1c44132SChristian König 	 */
117020b2656dSChristian König 	if (radeon_vm_size > 1024) {
117120b2656dSChristian König 		dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n",
1172c1c44132SChristian König 			 radeon_vm_size);
117320b2656dSChristian König 		radeon_vm_size = 4;
1174c1c44132SChristian König 	}
11754510fb98SChristian König 
11764510fb98SChristian König 	/* defines number of bits in page table versus page directory,
11774510fb98SChristian König 	 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
11784510fb98SChristian König 	 * page table and the remaining bits are in the page directory */
1179dfc230f9SChristian König 	if (radeon_vm_block_size == -1) {
1180dfc230f9SChristian König 
1181dfc230f9SChristian König 		/* Total bits covered by PD + PTs */
11828e66e134SAlex Deucher 		unsigned bits = ilog2(radeon_vm_size) + 18;
1183dfc230f9SChristian König 
1184dfc230f9SChristian König 		/* Make sure the PD is 4K in size up to 8GB address space.
1185dfc230f9SChristian König 		   Above that split equal between PD and PTs */
1186dfc230f9SChristian König 		if (radeon_vm_size <= 8)
1187dfc230f9SChristian König 			radeon_vm_block_size = bits - 9;
1188dfc230f9SChristian König 		else
1189dfc230f9SChristian König 			radeon_vm_block_size = (bits + 3) / 2;
1190dfc230f9SChristian König 
1191dfc230f9SChristian König 	} else if (radeon_vm_block_size < 9) {
119220b2656dSChristian König 		dev_warn(rdev->dev, "VM page table size (%d) too small\n",
11934510fb98SChristian König 			 radeon_vm_block_size);
11944510fb98SChristian König 		radeon_vm_block_size = 9;
11954510fb98SChristian König 	}
11964510fb98SChristian König 
11974510fb98SChristian König 	if (radeon_vm_block_size > 24 ||
119820b2656dSChristian König 	    (radeon_vm_size * 1024) < (1ull << radeon_vm_block_size)) {
119920b2656dSChristian König 		dev_warn(rdev->dev, "VM page table size (%d) too large\n",
12004510fb98SChristian König 			 radeon_vm_block_size);
12014510fb98SChristian König 		radeon_vm_block_size = 9;
12024510fb98SChristian König 	}
120336421338SJerome Glisse }
120436421338SJerome Glisse 
12050c195119SAlex Deucher /**
12060c195119SAlex Deucher  * radeon_switcheroo_set_state - set switcheroo state
12070c195119SAlex Deucher  *
12080c195119SAlex Deucher  * @pdev: pci dev pointer
12098e5de1d8SLukas Wunner  * @state: vga_switcheroo state
12100c195119SAlex Deucher  *
121131bc2485Swangjianli  * Callback for the switcheroo driver.  Suspends or resumes
12120c195119SAlex Deucher  * the asics before or after it is powered up using ACPI methods.
12130c195119SAlex Deucher  */
radeon_switcheroo_set_state(struct pci_dev * pdev,enum vga_switcheroo_state state)12146a9ee8afSDave Airlie static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
12156a9ee8afSDave Airlie {
12166a9ee8afSDave Airlie 	struct drm_device *dev = pci_get_drvdata(pdev);
121710ebc0bcSDave Airlie 
121890c4cde9SAlex Deucher 	if (radeon_is_px(dev) && state == VGA_SWITCHEROO_OFF)
121910ebc0bcSDave Airlie 		return;
122010ebc0bcSDave Airlie 
12216a9ee8afSDave Airlie 	if (state == VGA_SWITCHEROO_ON) {
12227ca85295SJoe Perches 		pr_info("radeon: switched on\n");
12236a9ee8afSDave Airlie 		/* don't suspend or resume card normally */
12245bcf719bSDave Airlie 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1225d1f9809eSMaarten Lankhorst 
122610ebc0bcSDave Airlie 		radeon_resume_kms(dev, true, true);
1227d1f9809eSMaarten Lankhorst 
12285bcf719bSDave Airlie 		dev->switch_power_state = DRM_SWITCH_POWER_ON;
1229fbf81762SDave Airlie 		drm_kms_helper_poll_enable(dev);
12306a9ee8afSDave Airlie 	} else {
12317ca85295SJoe Perches 		pr_info("radeon: switched off\n");
1232fbf81762SDave Airlie 		drm_kms_helper_poll_disable(dev);
12335bcf719bSDave Airlie 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1234274ad65cSJérome Glisse 		radeon_suspend_kms(dev, true, true, false);
12355bcf719bSDave Airlie 		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
12366a9ee8afSDave Airlie 	}
12376a9ee8afSDave Airlie }
12386a9ee8afSDave Airlie 
12390c195119SAlex Deucher /**
12400c195119SAlex Deucher  * radeon_switcheroo_can_switch - see if switcheroo state can change
12410c195119SAlex Deucher  *
12420c195119SAlex Deucher  * @pdev: pci dev pointer
12430c195119SAlex Deucher  *
12440c195119SAlex Deucher  * Callback for the switcheroo driver.  Check of the switcheroo
12450c195119SAlex Deucher  * state can be changed.
12460c195119SAlex Deucher  * Returns true if the state can be changed, false if not.
12470c195119SAlex Deucher  */
radeon_switcheroo_can_switch(struct pci_dev * pdev)12486a9ee8afSDave Airlie static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
12496a9ee8afSDave Airlie {
12506a9ee8afSDave Airlie 	struct drm_device *dev = pci_get_drvdata(pdev);
12516a9ee8afSDave Airlie 
1252fc8fd40eSDaniel Vetter 	/*
1253fc8fd40eSDaniel Vetter 	 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1254fc8fd40eSDaniel Vetter 	 * locking inversion with the driver load path. And the access here is
1255fc8fd40eSDaniel Vetter 	 * completely racy anyway. So don't bother with locking for now.
1256fc8fd40eSDaniel Vetter 	 */
12577e13ad89SChris Wilson 	return atomic_read(&dev->open_count) == 0;
12586a9ee8afSDave Airlie }
12596a9ee8afSDave Airlie 
126026ec685fSTakashi Iwai static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
126126ec685fSTakashi Iwai 	.set_gpu_state = radeon_switcheroo_set_state,
126226ec685fSTakashi Iwai 	.reprobe = NULL,
126326ec685fSTakashi Iwai 	.can_switch = radeon_switcheroo_can_switch,
126426ec685fSTakashi Iwai };
12656a9ee8afSDave Airlie 
12660c195119SAlex Deucher /**
12670c195119SAlex Deucher  * radeon_device_init - initialize the driver
12680c195119SAlex Deucher  *
12690c195119SAlex Deucher  * @rdev: radeon_device pointer
1270f017853eSLee Jones  * @ddev: drm dev pointer
12710c195119SAlex Deucher  * @pdev: pci dev pointer
12720c195119SAlex Deucher  * @flags: driver flags
12730c195119SAlex Deucher  *
12740c195119SAlex Deucher  * Initializes the driver info and hw (all asics).
12750c195119SAlex Deucher  * Returns 0 for success or an error on failure.
12760c195119SAlex Deucher  * Called at driver startup.
12770c195119SAlex Deucher  */
radeon_device_init(struct radeon_device * rdev,struct drm_device * ddev,struct pci_dev * pdev,uint32_t flags)1278771fe6b9SJerome Glisse int radeon_device_init(struct radeon_device *rdev,
1279771fe6b9SJerome Glisse 		       struct drm_device *ddev,
1280771fe6b9SJerome Glisse 		       struct pci_dev *pdev,
1281771fe6b9SJerome Glisse 		       uint32_t flags)
1282771fe6b9SJerome Glisse {
1283351a52a2SAlex Deucher 	int r, i;
1284ad49f501SDave Airlie 	int dma_bits;
128510ebc0bcSDave Airlie 	bool runtime = false;
1286771fe6b9SJerome Glisse 
1287771fe6b9SJerome Glisse 	rdev->shutdown = false;
12889f022ddfSJerome Glisse 	rdev->dev = &pdev->dev;
1289771fe6b9SJerome Glisse 	rdev->ddev = ddev;
1290771fe6b9SJerome Glisse 	rdev->pdev = pdev;
1291771fe6b9SJerome Glisse 	rdev->flags = flags;
1292771fe6b9SJerome Glisse 	rdev->family = flags & RADEON_FAMILY_MASK;
1293771fe6b9SJerome Glisse 	rdev->is_atom_bios = false;
1294771fe6b9SJerome Glisse 	rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
1295edcd26e8SAlex Deucher 	rdev->mc.gtt_size = 512 * 1024 * 1024;
1296733289c2SJerome Glisse 	rdev->accel_working = false;
12978b25ed34SAlex Deucher 	/* set up ring ids */
12988b25ed34SAlex Deucher 	for (i = 0; i < RADEON_NUM_RINGS; i++) {
12998b25ed34SAlex Deucher 		rdev->ring[i].idx = i;
13008b25ed34SAlex Deucher 	}
1301f54d1867SChris Wilson 	rdev->fence_context = dma_fence_context_alloc(RADEON_NUM_RINGS);
13021b5331d9SJerome Glisse 
1303fe0d36e0SAlex Deucher 	DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
1304d522d9ccSThomas Reim 		 radeon_family_name[rdev->family], pdev->vendor, pdev->device,
1305fe0d36e0SAlex Deucher 		 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
13061b5331d9SJerome Glisse 
1307771fe6b9SJerome Glisse 	/* mutex initialization are all done here so we
1308771fe6b9SJerome Glisse 	 * can recall function without having locking issues */
1309d6999bc7SChristian König 	mutex_init(&rdev->ring_lock);
131040bacf16SAlex Deucher 	mutex_init(&rdev->dc_hw_i2c_mutex);
1311c20dc369SChristian Koenig 	atomic_set(&rdev->ih.lock, 0);
13124c788679SJerome Glisse 	mutex_init(&rdev->gem.mutex);
1313c913e23aSRafał Miłecki 	mutex_init(&rdev->pm.mutex);
13146759a0a7SMarek Olšák 	mutex_init(&rdev->gpu_clock_mutex);
1315f61d5b46SAlex Deucher 	mutex_init(&rdev->srbm_mutex);
131620ea3471STakashi Iwai 	mutex_init(&rdev->audio.component_mutex);
1317db7fce39SChristian König 	init_rwsem(&rdev->pm.mclk_lock);
1318dee53e7fSJerome Glisse 	init_rwsem(&rdev->exclusive_lock);
131973a6d3fcSRafał Miłecki 	init_waitqueue_head(&rdev->irq.vblank_queue);
13201b9c3dd0SAlex Deucher 	r = radeon_gem_init(rdev);
13211b9c3dd0SAlex Deucher 	if (r)
13221b9c3dd0SAlex Deucher 		return r;
1323529364e0SChristian König 
1324c1c44132SChristian König 	radeon_check_arguments(rdev);
132523d4f1f2SAlex Deucher 	/* Adjust VM size here.
1326c1c44132SChristian König 	 * Max GPUVM size for cayman+ is 40 bits.
132723d4f1f2SAlex Deucher 	 */
132820b2656dSChristian König 	rdev->vm_manager.max_pfn = radeon_vm_size << 18;
1329771fe6b9SJerome Glisse 
13304aac0473SJerome Glisse 	/* Set asic functions */
13314aac0473SJerome Glisse 	r = radeon_asic_init(rdev);
133236421338SJerome Glisse 	if (r)
13334aac0473SJerome Glisse 		return r;
13344aac0473SJerome Glisse 
1335f95df9caSAlex Deucher 	/* all of the newer IGP chips have an internal gart
1336f95df9caSAlex Deucher 	 * However some rs4xx report as AGP, so remove that here.
1337f95df9caSAlex Deucher 	 */
1338f95df9caSAlex Deucher 	if ((rdev->family >= CHIP_RS400) &&
1339f95df9caSAlex Deucher 	    (rdev->flags & RADEON_IS_IGP)) {
1340f95df9caSAlex Deucher 		rdev->flags &= ~RADEON_IS_AGP;
1341f95df9caSAlex Deucher 	}
1342f95df9caSAlex Deucher 
134330256a3fSJerome Glisse 	if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
1344b574f251SJerome Glisse 		radeon_agp_disable(rdev);
1345771fe6b9SJerome Glisse 	}
1346771fe6b9SJerome Glisse 
13479ed8b1f9SAlex Deucher 	/* Set the internal MC address mask
13489ed8b1f9SAlex Deucher 	 * This is the max address of the GPU's
13499ed8b1f9SAlex Deucher 	 * internal address space.
13509ed8b1f9SAlex Deucher 	 */
13519ed8b1f9SAlex Deucher 	if (rdev->family >= CHIP_CAYMAN)
13529ed8b1f9SAlex Deucher 		rdev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
13539ed8b1f9SAlex Deucher 	else if (rdev->family >= CHIP_CEDAR)
13549ed8b1f9SAlex Deucher 		rdev->mc.mc_mask = 0xfffffffffULL; /* 36 bit MC */
13559ed8b1f9SAlex Deucher 	else
13569ed8b1f9SAlex Deucher 		rdev->mc.mc_mask = 0xffffffffULL; /* 32 bit MC */
13579ed8b1f9SAlex Deucher 
135833b3ad37SChristoph Hellwig 	/* set DMA mask.
1359ad49f501SDave Airlie 	 * PCIE - can handle 40-bits.
1360005a83f1SAlex Deucher 	 * IGP - can handle 40-bits
1361ad49f501SDave Airlie 	 * AGP - generally dma32 is safest
1362005a83f1SAlex Deucher 	 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
1363ad49f501SDave Airlie 	 */
136433b3ad37SChristoph Hellwig 	dma_bits = 40;
1365ad49f501SDave Airlie 	if (rdev->flags & RADEON_IS_AGP)
136633b3ad37SChristoph Hellwig 		dma_bits = 32;
1367005a83f1SAlex Deucher 	if ((rdev->flags & RADEON_IS_PCI) &&
13684a2b6662SJerome Glisse 	    (rdev->family <= CHIP_RS740))
136933b3ad37SChristoph Hellwig 		dma_bits = 32;
1370bcb0b981SBen Crocker #ifdef CONFIG_PPC64
1371bcb0b981SBen Crocker 	if (rdev->family == CHIP_CEDAR)
137233b3ad37SChristoph Hellwig 		dma_bits = 32;
1373bcb0b981SBen Crocker #endif
1374ad49f501SDave Airlie 
137503127c58SChristoph Hellwig 	r = dma_set_mask_and_coherent(&rdev->pdev->dev, DMA_BIT_MASK(dma_bits));
1376771fe6b9SJerome Glisse 	if (r) {
13777ca85295SJoe Perches 		pr_warn("radeon: No suitable DMA available\n");
137803127c58SChristoph Hellwig 		return r;
1379c52494f6SKonrad Rzeszutek Wilk 	}
1380913b2cb7SMichael D Labriola 	rdev->need_swiotlb = drm_need_swiotlb(dma_bits);
1381771fe6b9SJerome Glisse 
1382771fe6b9SJerome Glisse 	/* Registers mapping */
1383771fe6b9SJerome Glisse 	/* TODO: block userspace mapping of io register */
13842c385151SDaniel Vetter 	spin_lock_init(&rdev->mmio_idx_lock);
1385fe78118cSAlex Deucher 	spin_lock_init(&rdev->smc_idx_lock);
13860a5b7b0bSAlex Deucher 	spin_lock_init(&rdev->pll_idx_lock);
13870a5b7b0bSAlex Deucher 	spin_lock_init(&rdev->mc_idx_lock);
13880a5b7b0bSAlex Deucher 	spin_lock_init(&rdev->pcie_idx_lock);
13890a5b7b0bSAlex Deucher 	spin_lock_init(&rdev->pciep_idx_lock);
13900a5b7b0bSAlex Deucher 	spin_lock_init(&rdev->pif_idx_lock);
13910a5b7b0bSAlex Deucher 	spin_lock_init(&rdev->cg_idx_lock);
13920a5b7b0bSAlex Deucher 	spin_lock_init(&rdev->uvd_idx_lock);
13930a5b7b0bSAlex Deucher 	spin_lock_init(&rdev->rcu_idx_lock);
13940a5b7b0bSAlex Deucher 	spin_lock_init(&rdev->didt_idx_lock);
13950a5b7b0bSAlex Deucher 	spin_lock_init(&rdev->end_idx_lock);
1396efad86dbSAlex Deucher 	if (rdev->family >= CHIP_BONAIRE) {
1397efad86dbSAlex Deucher 		rdev->rmmio_base = pci_resource_start(rdev->pdev, 5);
1398efad86dbSAlex Deucher 		rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);
1399efad86dbSAlex Deucher 	} else {
140001d73a69SJordan Crouse 		rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
140101d73a69SJordan Crouse 		rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
1402efad86dbSAlex Deucher 	}
1403771fe6b9SJerome Glisse 	rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
1404a33c1a82SAndy Shevchenko 	if (rdev->rmmio == NULL)
1405771fe6b9SJerome Glisse 		return -ENOMEM;
1406771fe6b9SJerome Glisse 
140775efdee1SAlex Deucher 	/* doorbell bar mapping */
140875efdee1SAlex Deucher 	if (rdev->family >= CHIP_BONAIRE)
140975efdee1SAlex Deucher 		radeon_doorbell_init(rdev);
141075efdee1SAlex Deucher 
1411351a52a2SAlex Deucher 	/* io port mapping */
1412351a52a2SAlex Deucher 	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1413351a52a2SAlex Deucher 		if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
1414351a52a2SAlex Deucher 			rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
1415351a52a2SAlex Deucher 			rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
1416351a52a2SAlex Deucher 			break;
1417351a52a2SAlex Deucher 		}
1418351a52a2SAlex Deucher 	}
1419351a52a2SAlex Deucher 	if (rdev->rio_mem == NULL)
1420351a52a2SAlex Deucher 		DRM_ERROR("Unable to find PCI I/O BAR\n");
1421351a52a2SAlex Deucher 
14224807c5a8SAlex Deucher 	if (rdev->flags & RADEON_IS_PX)
14234807c5a8SAlex Deucher 		radeon_device_handle_px_quirks(rdev);
14244807c5a8SAlex Deucher 
142528d52043SDave Airlie 	/* if we have > 1 VGA cards, then disable the radeon VGA resources */
142693239ea1SDave Airlie 	/* this will fail for cards that aren't VGA class devices, just
142793239ea1SDave Airlie 	 * ignore it */
1428bf44e8ceSChristoph Hellwig 	vga_client_register(rdev->pdev, radeon_vga_set_decode);
142910ebc0bcSDave Airlie 
1430bfaddd9fSAlex Deucher 	if (rdev->flags & RADEON_IS_PX)
143110ebc0bcSDave Airlie 		runtime = true;
14327ffb0ce3SLukas Wunner 	if (!pci_is_thunderbolt_attached(rdev->pdev))
14337ffb0ce3SLukas Wunner 		vga_switcheroo_register_client(rdev->pdev,
14347ffb0ce3SLukas Wunner 					       &radeon_switcheroo_ops, runtime);
143510ebc0bcSDave Airlie 	if (runtime)
143610ebc0bcSDave Airlie 		vga_switcheroo_init_domain_pm_ops(rdev->dev, &rdev->vga_pm_domain);
143728d52043SDave Airlie 
14383ce0a23dSJerome Glisse 	r = radeon_init(rdev);
1439b574f251SJerome Glisse 	if (r)
14402e97140dSAlex Deucher 		goto failed;
1441b1e3a6d1SMichel Dänzer 
14425b54d679SNirmoy Das 	radeon_gem_debugfs_init(rdev);
14439843ead0SDave Airlie 
1444b574f251SJerome Glisse 	if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
1445b574f251SJerome Glisse 		/* Acceleration not working on AGP card try again
1446b574f251SJerome Glisse 		 * with fallback to PCI or PCIE GART
1447b574f251SJerome Glisse 		 */
1448a2d07b74SJerome Glisse 		radeon_asic_reset(rdev);
1449b574f251SJerome Glisse 		radeon_fini(rdev);
1450b574f251SJerome Glisse 		radeon_agp_disable(rdev);
1451b574f251SJerome Glisse 		r = radeon_init(rdev);
14524aac0473SJerome Glisse 		if (r)
14532e97140dSAlex Deucher 			goto failed;
14543ce0a23dSJerome Glisse 	}
14556c7bcceaSAlex Deucher 
145620ea3471STakashi Iwai 	radeon_audio_component_init(rdev);
145720ea3471STakashi Iwai 
145813a7d299SChristian König 	r = radeon_ib_ring_tests(rdev);
145913a7d299SChristian König 	if (r)
146013a7d299SChristian König 		DRM_ERROR("ib ring test failed (%d).\n", r);
146113a7d299SChristian König 
14626dfd1972SJérôme Glisse 	/*
14636dfd1972SJérôme Glisse 	 * Turks/Thames GPU will freeze whole laptop if DPM is not restarted
14646dfd1972SJérôme Glisse 	 * after the CP ring have chew one packet at least. Hence here we stop
14656dfd1972SJérôme Glisse 	 * and restart DPM after the radeon_ib_ring_tests().
14666dfd1972SJérôme Glisse 	 */
14676dfd1972SJérôme Glisse 	if (rdev->pm.dpm_enabled &&
14686dfd1972SJérôme Glisse 	    (rdev->pm.pm_method == PM_METHOD_DPM) &&
14696dfd1972SJérôme Glisse 	    (rdev->family == CHIP_TURKS) &&
14706dfd1972SJérôme Glisse 	    (rdev->flags & RADEON_IS_MOBILITY)) {
14716dfd1972SJérôme Glisse 		mutex_lock(&rdev->pm.mutex);
14726dfd1972SJérôme Glisse 		radeon_dpm_disable(rdev);
14736dfd1972SJérôme Glisse 		radeon_dpm_enable(rdev);
14746dfd1972SJérôme Glisse 		mutex_unlock(&rdev->pm.mutex);
14756dfd1972SJérôme Glisse 	}
14766dfd1972SJérôme Glisse 
147760a7e396SChristian König 	if ((radeon_testing & 1)) {
14784a1132a0SAlex Deucher 		if (rdev->accel_working)
1479ecc0b326SMichel Dänzer 			radeon_test_moves(rdev);
14804a1132a0SAlex Deucher 		else
14814a1132a0SAlex Deucher 			DRM_INFO("radeon: acceleration disabled, skipping move tests\n");
1482ecc0b326SMichel Dänzer 	}
148360a7e396SChristian König 	if ((radeon_testing & 2)) {
14844a1132a0SAlex Deucher 		if (rdev->accel_working)
148560a7e396SChristian König 			radeon_test_syncing(rdev);
14864a1132a0SAlex Deucher 		else
14874a1132a0SAlex Deucher 			DRM_INFO("radeon: acceleration disabled, skipping sync tests\n");
148860a7e396SChristian König 	}
1489771fe6b9SJerome Glisse 	if (radeon_benchmarking) {
14904a1132a0SAlex Deucher 		if (rdev->accel_working)
1491638dd7dbSIlija Hadzic 			radeon_benchmark(rdev, radeon_benchmarking);
14924a1132a0SAlex Deucher 		else
14934a1132a0SAlex Deucher 			DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
1494771fe6b9SJerome Glisse 	}
14956cf8a3f5SJerome Glisse 	return 0;
14962e97140dSAlex Deucher 
14972e97140dSAlex Deucher failed:
1498b8751946SLukas Wunner 	/* balance pm_runtime_get_sync() in radeon_driver_unload_kms() */
1499b8751946SLukas Wunner 	if (radeon_is_px(ddev))
1500b8751946SLukas Wunner 		pm_runtime_put_noidle(ddev->dev);
15012e97140dSAlex Deucher 	if (runtime)
15022e97140dSAlex Deucher 		vga_switcheroo_fini_domain_pm_ops(rdev->dev);
15032e97140dSAlex Deucher 	return r;
1504771fe6b9SJerome Glisse }
1505771fe6b9SJerome Glisse 
15060c195119SAlex Deucher /**
15070c195119SAlex Deucher  * radeon_device_fini - tear down the driver
15080c195119SAlex Deucher  *
15090c195119SAlex Deucher  * @rdev: radeon_device pointer
15100c195119SAlex Deucher  *
15110c195119SAlex Deucher  * Tear down the driver info (all asics).
15120c195119SAlex Deucher  * Called at driver shutdown.
15130c195119SAlex Deucher  */
radeon_device_fini(struct radeon_device * rdev)1514771fe6b9SJerome Glisse void radeon_device_fini(struct radeon_device *rdev)
1515771fe6b9SJerome Glisse {
1516771fe6b9SJerome Glisse 	DRM_INFO("radeon: finishing device.\n");
1517771fe6b9SJerome Glisse 	rdev->shutdown = true;
151890aca4d2SJerome Glisse 	/* evict vram memory */
151990aca4d2SJerome Glisse 	radeon_bo_evict_vram(rdev);
152020ea3471STakashi Iwai 	radeon_audio_component_fini(rdev);
15213ce0a23dSJerome Glisse 	radeon_fini(rdev);
15227ffb0ce3SLukas Wunner 	if (!pci_is_thunderbolt_attached(rdev->pdev))
15236a9ee8afSDave Airlie 		vga_switcheroo_unregister_client(rdev->pdev);
15242e97140dSAlex Deucher 	if (rdev->flags & RADEON_IS_PX)
15252e97140dSAlex Deucher 		vga_switcheroo_fini_domain_pm_ops(rdev->dev);
1526b8779475SChristoph Hellwig 	vga_client_unregister(rdev->pdev);
1527e0a2ca73SAlex Deucher 	if (rdev->rio_mem)
1528351a52a2SAlex Deucher 		pci_iounmap(rdev->pdev, rdev->rio_mem);
1529351a52a2SAlex Deucher 	rdev->rio_mem = NULL;
1530771fe6b9SJerome Glisse 	iounmap(rdev->rmmio);
1531771fe6b9SJerome Glisse 	rdev->rmmio = NULL;
153275efdee1SAlex Deucher 	if (rdev->family >= CHIP_BONAIRE)
153375efdee1SAlex Deucher 		radeon_doorbell_fini(rdev);
1534771fe6b9SJerome Glisse }
1535771fe6b9SJerome Glisse 
1536771fe6b9SJerome Glisse 
1537771fe6b9SJerome Glisse /*
1538771fe6b9SJerome Glisse  * Suspend & resume.
1539771fe6b9SJerome Glisse  */
1540f017853eSLee Jones /*
15410c195119SAlex Deucher  * radeon_suspend_kms - initiate device suspend
15420c195119SAlex Deucher  *
15430c195119SAlex Deucher  * Puts the hw in the suspend state (all asics).
15440c195119SAlex Deucher  * Returns 0 for success or an error on failure.
15450c195119SAlex Deucher  * Called at driver suspend.
15460c195119SAlex Deucher  */
radeon_suspend_kms(struct drm_device * dev,bool suspend,bool fbcon,bool freeze)1547274ad65cSJérome Glisse int radeon_suspend_kms(struct drm_device *dev, bool suspend,
1548274ad65cSJérome Glisse 		       bool fbcon, bool freeze)
1549771fe6b9SJerome Glisse {
1550875c1866SDarren Jenkins 	struct radeon_device *rdev;
1551d86a4126SThomas Zimmermann 	struct pci_dev *pdev;
1552771fe6b9SJerome Glisse 	struct drm_crtc *crtc;
1553d8dcaa1dSAlex Deucher 	struct drm_connector *connector;
15547465280cSAlex Deucher 	int i, r;
1555771fe6b9SJerome Glisse 
1556875c1866SDarren Jenkins 	if (dev == NULL || dev->dev_private == NULL) {
1557771fe6b9SJerome Glisse 		return -ENODEV;
1558771fe6b9SJerome Glisse 	}
15597473e830SDave Airlie 
1560875c1866SDarren Jenkins 	rdev = dev->dev_private;
1561d86a4126SThomas Zimmermann 	pdev = to_pci_dev(dev->dev);
1562875c1866SDarren Jenkins 
1563f2aba352SAlex Deucher 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
15646a9ee8afSDave Airlie 		return 0;
1565d8dcaa1dSAlex Deucher 
156686698c20SSeth Forshee 	drm_kms_helper_poll_disable(dev);
156786698c20SSeth Forshee 
15686adaed5bSDaniel Vetter 	drm_modeset_lock_all(dev);
1569d8dcaa1dSAlex Deucher 	/* turn off display hw */
1570d8dcaa1dSAlex Deucher 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1571d8dcaa1dSAlex Deucher 		drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
1572d8dcaa1dSAlex Deucher 	}
15736adaed5bSDaniel Vetter 	drm_modeset_unlock_all(dev);
1574d8dcaa1dSAlex Deucher 
1575f3cbb17bSGrigori Goronzy 	/* unpin the front buffers and cursors */
1576771fe6b9SJerome Glisse 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1577f3cbb17bSGrigori Goronzy 		struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
15789a0f0c9dSDaniel Stone 		struct drm_framebuffer *fb = crtc->primary->fb;
15794c788679SJerome Glisse 		struct radeon_bo *robj;
1580771fe6b9SJerome Glisse 
1581f3cbb17bSGrigori Goronzy 		if (radeon_crtc->cursor_bo) {
1582f3cbb17bSGrigori Goronzy 			struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
1583f3cbb17bSGrigori Goronzy 			r = radeon_bo_reserve(robj, false);
1584f3cbb17bSGrigori Goronzy 			if (r == 0) {
1585f3cbb17bSGrigori Goronzy 				radeon_bo_unpin(robj);
1586f3cbb17bSGrigori Goronzy 				radeon_bo_unreserve(robj);
1587f3cbb17bSGrigori Goronzy 			}
1588f3cbb17bSGrigori Goronzy 		}
1589f3cbb17bSGrigori Goronzy 
15909a0f0c9dSDaniel Stone 		if (fb == NULL || fb->obj[0] == NULL) {
1591771fe6b9SJerome Glisse 			continue;
1592771fe6b9SJerome Glisse 		}
15939a0f0c9dSDaniel Stone 		robj = gem_to_radeon_bo(fb->obj[0]);
159438651674SDave Airlie 		/* don't unpin kernel fb objects */
159538651674SDave Airlie 		if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
15964c788679SJerome Glisse 			r = radeon_bo_reserve(robj, false);
159738651674SDave Airlie 			if (r == 0) {
15984c788679SJerome Glisse 				radeon_bo_unpin(robj);
15994c788679SJerome Glisse 				radeon_bo_unreserve(robj);
16004c788679SJerome Glisse 			}
1601771fe6b9SJerome Glisse 		}
1602771fe6b9SJerome Glisse 	}
1603771fe6b9SJerome Glisse 	/* evict vram memory */
16044c788679SJerome Glisse 	radeon_bo_evict_vram(rdev);
16058a47cc9eSChristian König 
1606771fe6b9SJerome Glisse 	/* wait for gpu to finish processing current batch */
16075f8f635eSJerome Glisse 	for (i = 0; i < RADEON_NUM_RINGS; i++) {
160837615527SChristian König 		r = radeon_fence_wait_empty(rdev, i);
16095f8f635eSJerome Glisse 		if (r) {
16105f8f635eSJerome Glisse 			/* delay GPU reset to resume */
1611eb98c709SChristian König 			radeon_fence_driver_force_completion(rdev, i);
1612d6c770d2SZhenneng Li 		} else {
1613d6c770d2SZhenneng Li 			/* finish executing delayed work */
1614d6c770d2SZhenneng Li 			flush_delayed_work(&rdev->fence_drv[i].lockup_work);
16155f8f635eSJerome Glisse 		}
16165f8f635eSJerome Glisse 	}
1617771fe6b9SJerome Glisse 
1618f657c2a7SYang Zhao 	radeon_save_bios_scratch_regs(rdev);
1619f657c2a7SYang Zhao 
16203ce0a23dSJerome Glisse 	radeon_suspend(rdev);
1621d4877cf2SAlex Deucher 	radeon_hpd_fini(rdev);
1622ec9aaaffSAlex Deucher 	/* evict remaining vram memory
1623ec9aaaffSAlex Deucher 	 * This second call to evict vram is to evict the gart page table
1624ec9aaaffSAlex Deucher 	 * using the CPU.
1625ec9aaaffSAlex Deucher 	 */
16264c788679SJerome Glisse 	radeon_bo_evict_vram(rdev);
1627771fe6b9SJerome Glisse 
162810b06122SJerome Glisse 	radeon_agp_suspend(rdev);
162910b06122SJerome Glisse 
1630d86a4126SThomas Zimmermann 	pci_save_state(pdev);
163182060854SAlex Deucher 	if (freeze && rdev->family >= CHIP_CEDAR && !(rdev->flags & RADEON_IS_IGP)) {
1632274ad65cSJérome Glisse 		rdev->asic->asic_reset(rdev, true);
1633d86a4126SThomas Zimmermann 		pci_restore_state(pdev);
1634274ad65cSJérome Glisse 	} else if (suspend) {
1635771fe6b9SJerome Glisse 		/* Shut down the device */
1636d86a4126SThomas Zimmermann 		pci_disable_device(pdev);
1637d86a4126SThomas Zimmermann 		pci_set_power_state(pdev, PCI_D3hot);
1638771fe6b9SJerome Glisse 	}
163910ebc0bcSDave Airlie 
164010ebc0bcSDave Airlie 	if (fbcon) {
1641ac751efaSTorben Hohn 		console_lock();
164238651674SDave Airlie 		radeon_fbdev_set_suspend(rdev, 1);
1643ac751efaSTorben Hohn 		console_unlock();
164410ebc0bcSDave Airlie 	}
1645771fe6b9SJerome Glisse 	return 0;
1646771fe6b9SJerome Glisse }
1647771fe6b9SJerome Glisse 
1648f017853eSLee Jones /*
16490c195119SAlex Deucher  * radeon_resume_kms - initiate device resume
16500c195119SAlex Deucher  *
16510c195119SAlex Deucher  * Bring the hw back to operating state (all asics).
16520c195119SAlex Deucher  * Returns 0 for success or an error on failure.
16530c195119SAlex Deucher  * Called at driver resume.
16540c195119SAlex Deucher  */
radeon_resume_kms(struct drm_device * dev,bool resume,bool fbcon)165510ebc0bcSDave Airlie int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
1656771fe6b9SJerome Glisse {
165709bdf591SCedric Godin 	struct drm_connector *connector;
1658771fe6b9SJerome Glisse 	struct radeon_device *rdev = dev->dev_private;
1659d86a4126SThomas Zimmermann 	struct pci_dev *pdev = to_pci_dev(dev->dev);
1660f3cbb17bSGrigori Goronzy 	struct drm_crtc *crtc;
166104eb2206SChristian König 	int r;
1662771fe6b9SJerome Glisse 
1663f2aba352SAlex Deucher 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
16646a9ee8afSDave Airlie 		return 0;
16656a9ee8afSDave Airlie 
166610ebc0bcSDave Airlie 	if (fbcon) {
1667ac751efaSTorben Hohn 		console_lock();
166810ebc0bcSDave Airlie 	}
16697473e830SDave Airlie 	if (resume) {
1670d86a4126SThomas Zimmermann 		pci_set_power_state(pdev, PCI_D0);
1671d86a4126SThomas Zimmermann 		pci_restore_state(pdev);
1672d86a4126SThomas Zimmermann 		if (pci_enable_device(pdev)) {
167310ebc0bcSDave Airlie 			if (fbcon)
1674ac751efaSTorben Hohn 				console_unlock();
1675771fe6b9SJerome Glisse 			return -1;
1676771fe6b9SJerome Glisse 		}
16777473e830SDave Airlie 	}
16780ebf1717SDave Airlie 	/* resume AGP if in use */
16790ebf1717SDave Airlie 	radeon_agp_resume(rdev);
16803ce0a23dSJerome Glisse 	radeon_resume(rdev);
168104eb2206SChristian König 
168204eb2206SChristian König 	r = radeon_ib_ring_tests(rdev);
168304eb2206SChristian König 	if (r)
168404eb2206SChristian König 		DRM_ERROR("ib ring test failed (%d).\n", r);
168504eb2206SChristian König 
1686bc6a6295SAlex Deucher 	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
16876c7bcceaSAlex Deucher 		/* do dpm late init */
16886c7bcceaSAlex Deucher 		r = radeon_pm_late_init(rdev);
16896c7bcceaSAlex Deucher 		if (r) {
16906c7bcceaSAlex Deucher 			rdev->pm.dpm_enabled = false;
16916c7bcceaSAlex Deucher 			DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
16926c7bcceaSAlex Deucher 		}
1693bc6a6295SAlex Deucher 	} else {
1694bc6a6295SAlex Deucher 		/* resume old pm late */
1695bc6a6295SAlex Deucher 		radeon_pm_resume(rdev);
16966c7bcceaSAlex Deucher 	}
16976c7bcceaSAlex Deucher 
1698f657c2a7SYang Zhao 	radeon_restore_bios_scratch_regs(rdev);
169909bdf591SCedric Godin 
1700f3cbb17bSGrigori Goronzy 	/* pin cursors */
1701f3cbb17bSGrigori Goronzy 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1702f3cbb17bSGrigori Goronzy 		struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1703f3cbb17bSGrigori Goronzy 
1704f3cbb17bSGrigori Goronzy 		if (radeon_crtc->cursor_bo) {
1705f3cbb17bSGrigori Goronzy 			struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
1706f3cbb17bSGrigori Goronzy 			r = radeon_bo_reserve(robj, false);
1707f3cbb17bSGrigori Goronzy 			if (r == 0) {
1708f3cbb17bSGrigori Goronzy 				/* Only 27 bit offset for legacy cursor */
1709f3cbb17bSGrigori Goronzy 				r = radeon_bo_pin_restricted(robj,
1710f3cbb17bSGrigori Goronzy 							     RADEON_GEM_DOMAIN_VRAM,
1711f3cbb17bSGrigori Goronzy 							     ASIC_IS_AVIVO(rdev) ?
1712f3cbb17bSGrigori Goronzy 							     0 : 1 << 27,
1713f3cbb17bSGrigori Goronzy 							     &radeon_crtc->cursor_addr);
1714f3cbb17bSGrigori Goronzy 				if (r != 0)
1715f3cbb17bSGrigori Goronzy 					DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
1716f3cbb17bSGrigori Goronzy 				radeon_bo_unreserve(robj);
1717f3cbb17bSGrigori Goronzy 			}
1718f3cbb17bSGrigori Goronzy 		}
1719f3cbb17bSGrigori Goronzy 	}
1720f3cbb17bSGrigori Goronzy 
17213fa47d9eSAlex Deucher 	/* init dig PHYs, disp eng pll */
17223fa47d9eSAlex Deucher 	if (rdev->is_atom_bios) {
1723ac89af1eSAlex Deucher 		radeon_atom_encoder_init(rdev);
1724f3f1f03eSAlex Deucher 		radeon_atom_disp_eng_pll_init(rdev);
1725bced76f2SAlex Deucher 		/* turn on the BL */
1726bced76f2SAlex Deucher 		if (rdev->mode_info.bl_encoder) {
1727bced76f2SAlex Deucher 			u8 bl_level = radeon_get_backlight_level(rdev,
1728bced76f2SAlex Deucher 								 rdev->mode_info.bl_encoder);
1729bced76f2SAlex Deucher 			radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
1730bced76f2SAlex Deucher 						   bl_level);
1731bced76f2SAlex Deucher 		}
17323fa47d9eSAlex Deucher 	}
1733d4877cf2SAlex Deucher 	/* reset hpd state */
1734d4877cf2SAlex Deucher 	radeon_hpd_init(rdev);
1735771fe6b9SJerome Glisse 	/* blat the mode back in */
1736ec9954fcSDave Airlie 	if (fbcon) {
1737771fe6b9SJerome Glisse 		drm_helper_resume_force_mode(dev);
1738a93f344dSAlex Deucher 		/* turn on display hw */
17396adaed5bSDaniel Vetter 		drm_modeset_lock_all(dev);
1740a93f344dSAlex Deucher 		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1741a93f344dSAlex Deucher 			drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
1742a93f344dSAlex Deucher 		}
17436adaed5bSDaniel Vetter 		drm_modeset_unlock_all(dev);
1744ec9954fcSDave Airlie 	}
174586698c20SSeth Forshee 
174686698c20SSeth Forshee 	drm_kms_helper_poll_enable(dev);
174718ee37a4SDaniel Vetter 
17483640da2fSAlex Deucher 	/* set the power state here in case we are a PX system or headless */
17493640da2fSAlex Deucher 	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
17503640da2fSAlex Deucher 		radeon_pm_compute_clocks(rdev);
17513640da2fSAlex Deucher 
175218ee37a4SDaniel Vetter 	if (fbcon) {
175318ee37a4SDaniel Vetter 		radeon_fbdev_set_suspend(rdev, 0);
175418ee37a4SDaniel Vetter 		console_unlock();
175518ee37a4SDaniel Vetter 	}
175618ee37a4SDaniel Vetter 
1757771fe6b9SJerome Glisse 	return 0;
1758771fe6b9SJerome Glisse }
1759771fe6b9SJerome Glisse 
17600c195119SAlex Deucher /**
17610c195119SAlex Deucher  * radeon_gpu_reset - reset the asic
17620c195119SAlex Deucher  *
17630c195119SAlex Deucher  * @rdev: radeon device pointer
17640c195119SAlex Deucher  *
17650c195119SAlex Deucher  * Attempt the reset the GPU if it has hung (all asics).
17660c195119SAlex Deucher  * Returns 0 for success or an error on failure.
17670c195119SAlex Deucher  */
radeon_gpu_reset(struct radeon_device * rdev)176890aca4d2SJerome Glisse int radeon_gpu_reset(struct radeon_device *rdev)
176990aca4d2SJerome Glisse {
177055d7c221SChristian König 	unsigned ring_sizes[RADEON_NUM_RINGS];
177155d7c221SChristian König 	uint32_t *ring_data[RADEON_NUM_RINGS];
177255d7c221SChristian König 
177355d7c221SChristian König 	bool saved = false;
177455d7c221SChristian König 
177555d7c221SChristian König 	int i, r;
177690aca4d2SJerome Glisse 
1777dee53e7fSJerome Glisse 	down_write(&rdev->exclusive_lock);
1778f9eaf9aeSChristian König 
1779f9eaf9aeSChristian König 	if (!rdev->needs_reset) {
1780f9eaf9aeSChristian König 		up_write(&rdev->exclusive_lock);
1781f9eaf9aeSChristian König 		return 0;
1782f9eaf9aeSChristian König 	}
1783f9eaf9aeSChristian König 
178472b9076bSMarek Olšák 	atomic_inc(&rdev->gpu_reset_counter);
178572b9076bSMarek Olšák 
178690aca4d2SJerome Glisse 	radeon_save_bios_scratch_regs(rdev);
178790aca4d2SJerome Glisse 	radeon_suspend(rdev);
178873ef0e0dSAlex Deucher 	radeon_hpd_fini(rdev);
178990aca4d2SJerome Glisse 
179055d7c221SChristian König 	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
179155d7c221SChristian König 		ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
179255d7c221SChristian König 						   &ring_data[i]);
179355d7c221SChristian König 		if (ring_sizes[i]) {
179455d7c221SChristian König 			saved = true;
179555d7c221SChristian König 			dev_info(rdev->dev, "Saved %d dwords of commands "
179655d7c221SChristian König 				 "on ring %d.\n", ring_sizes[i], i);
179755d7c221SChristian König 		}
179855d7c221SChristian König 	}
179955d7c221SChristian König 
180090aca4d2SJerome Glisse 	r = radeon_asic_reset(rdev);
180190aca4d2SJerome Glisse 	if (!r) {
180255d7c221SChristian König 		dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
180390aca4d2SJerome Glisse 		radeon_resume(rdev);
180455d7c221SChristian König 	}
180504eb2206SChristian König 
180690aca4d2SJerome Glisse 	radeon_restore_bios_scratch_regs(rdev);
180755d7c221SChristian König 
180855d7c221SChristian König 	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
18099bb39ff4SMaarten Lankhorst 		if (!r && ring_data[i]) {
181055d7c221SChristian König 			radeon_ring_restore(rdev, &rdev->ring[i],
181155d7c221SChristian König 					    ring_sizes[i], ring_data[i]);
181255d7c221SChristian König 		} else {
1813eb98c709SChristian König 			radeon_fence_driver_force_completion(rdev, i);
181455d7c221SChristian König 			kfree(ring_data[i]);
181555d7c221SChristian König 		}
181655d7c221SChristian König 	}
181755d7c221SChristian König 
1818c940b447SAlex Deucher 	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
1819c940b447SAlex Deucher 		/* do dpm late init */
1820c940b447SAlex Deucher 		r = radeon_pm_late_init(rdev);
1821c940b447SAlex Deucher 		if (r) {
1822c940b447SAlex Deucher 			rdev->pm.dpm_enabled = false;
1823c940b447SAlex Deucher 			DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
1824c940b447SAlex Deucher 		}
1825c940b447SAlex Deucher 	} else {
1826c940b447SAlex Deucher 		/* resume old pm late */
182795f59509SAlex Deucher 		radeon_pm_resume(rdev);
1828c940b447SAlex Deucher 	}
1829c940b447SAlex Deucher 
183073ef0e0dSAlex Deucher 	/* init dig PHYs, disp eng pll */
183173ef0e0dSAlex Deucher 	if (rdev->is_atom_bios) {
183273ef0e0dSAlex Deucher 		radeon_atom_encoder_init(rdev);
183373ef0e0dSAlex Deucher 		radeon_atom_disp_eng_pll_init(rdev);
183473ef0e0dSAlex Deucher 		/* turn on the BL */
183573ef0e0dSAlex Deucher 		if (rdev->mode_info.bl_encoder) {
183673ef0e0dSAlex Deucher 			u8 bl_level = radeon_get_backlight_level(rdev,
183773ef0e0dSAlex Deucher 								 rdev->mode_info.bl_encoder);
183873ef0e0dSAlex Deucher 			radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
183973ef0e0dSAlex Deucher 						   bl_level);
184073ef0e0dSAlex Deucher 		}
184173ef0e0dSAlex Deucher 	}
184273ef0e0dSAlex Deucher 	/* reset hpd state */
184373ef0e0dSAlex Deucher 	radeon_hpd_init(rdev);
184473ef0e0dSAlex Deucher 
18453c036389SChristian König 	rdev->in_reset = true;
18463c036389SChristian König 	rdev->needs_reset = false;
18473c036389SChristian König 
18489bb39ff4SMaarten Lankhorst 	downgrade_write(&rdev->exclusive_lock);
18499bb39ff4SMaarten Lankhorst 
1850*5e3a0f77SWu Hoi Pok 	drm_helper_resume_force_mode(rdev_to_drm(rdev));
1851d3493574SJerome Glisse 
1852c940b447SAlex Deucher 	/* set the power state here in case we are a PX system or headless */
1853c940b447SAlex Deucher 	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
1854c940b447SAlex Deucher 		radeon_pm_compute_clocks(rdev);
1855c940b447SAlex Deucher 
18569bb39ff4SMaarten Lankhorst 	if (!r) {
18579bb39ff4SMaarten Lankhorst 		r = radeon_ib_ring_tests(rdev);
18589bb39ff4SMaarten Lankhorst 		if (r && saved)
18599bb39ff4SMaarten Lankhorst 			r = -EAGAIN;
18609bb39ff4SMaarten Lankhorst 	} else {
186190aca4d2SJerome Glisse 		/* bad news, how to tell it to userspace ? */
186290aca4d2SJerome Glisse 		dev_info(rdev->dev, "GPU reset failed\n");
18637a1619b9SMichel Dänzer 	}
18647a1619b9SMichel Dänzer 
18659bb39ff4SMaarten Lankhorst 	rdev->needs_reset = r == -EAGAIN;
18669bb39ff4SMaarten Lankhorst 	rdev->in_reset = false;
18679bb39ff4SMaarten Lankhorst 
18689bb39ff4SMaarten Lankhorst 	up_read(&rdev->exclusive_lock);
186990aca4d2SJerome Glisse 	return r;
187090aca4d2SJerome Glisse }
1871