1771fe6b9SJerome Glisse /* 2771fe6b9SJerome Glisse * Copyright 2008 Advanced Micro Devices, Inc. 3771fe6b9SJerome Glisse * Copyright 2008 Red Hat Inc. 4771fe6b9SJerome Glisse * Copyright 2009 Jerome Glisse. 5771fe6b9SJerome Glisse * 6771fe6b9SJerome Glisse * Permission is hereby granted, free of charge, to any person obtaining a 7771fe6b9SJerome Glisse * copy of this software and associated documentation files (the "Software"), 8771fe6b9SJerome Glisse * to deal in the Software without restriction, including without limitation 9771fe6b9SJerome Glisse * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10771fe6b9SJerome Glisse * and/or sell copies of the Software, and to permit persons to whom the 11771fe6b9SJerome Glisse * Software is furnished to do so, subject to the following conditions: 12771fe6b9SJerome Glisse * 13771fe6b9SJerome Glisse * The above copyright notice and this permission notice shall be included in 14771fe6b9SJerome Glisse * all copies or substantial portions of the Software. 15771fe6b9SJerome Glisse * 16771fe6b9SJerome Glisse * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17771fe6b9SJerome Glisse * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18771fe6b9SJerome Glisse * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19771fe6b9SJerome Glisse * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20771fe6b9SJerome Glisse * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21771fe6b9SJerome Glisse * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22771fe6b9SJerome Glisse * OTHER DEALINGS IN THE SOFTWARE. 23771fe6b9SJerome Glisse * 24771fe6b9SJerome Glisse * Authors: Dave Airlie 25771fe6b9SJerome Glisse * Alex Deucher 26771fe6b9SJerome Glisse * Jerome Glisse 27771fe6b9SJerome Glisse */ 28771fe6b9SJerome Glisse #include <linux/console.h> 295a0e3ad6STejun Heo #include <linux/slab.h> 30771fe6b9SJerome Glisse #include <drm/drmP.h> 31771fe6b9SJerome Glisse #include <drm/drm_crtc_helper.h> 32771fe6b9SJerome Glisse #include <drm/radeon_drm.h> 33b8751946SLukas Wunner #include <linux/pm_runtime.h> 3428d52043SDave Airlie #include <linux/vgaarb.h> 356a9ee8afSDave Airlie #include <linux/vga_switcheroo.h> 36bcc65fd8SMatthew Garrett #include <linux/efi.h> 37771fe6b9SJerome Glisse #include "radeon_reg.h" 38771fe6b9SJerome Glisse #include "radeon.h" 39771fe6b9SJerome Glisse #include "atom.h" 40771fe6b9SJerome Glisse 411b5331d9SJerome Glisse static const char radeon_family_name[][16] = { 421b5331d9SJerome Glisse "R100", 431b5331d9SJerome Glisse "RV100", 441b5331d9SJerome Glisse "RS100", 451b5331d9SJerome Glisse "RV200", 461b5331d9SJerome Glisse "RS200", 471b5331d9SJerome Glisse "R200", 481b5331d9SJerome Glisse "RV250", 491b5331d9SJerome Glisse "RS300", 501b5331d9SJerome Glisse "RV280", 511b5331d9SJerome Glisse "R300", 521b5331d9SJerome Glisse "R350", 531b5331d9SJerome Glisse "RV350", 541b5331d9SJerome Glisse "RV380", 551b5331d9SJerome Glisse "R420", 561b5331d9SJerome Glisse "R423", 571b5331d9SJerome Glisse "RV410", 581b5331d9SJerome Glisse "RS400", 591b5331d9SJerome Glisse "RS480", 601b5331d9SJerome Glisse "RS600", 611b5331d9SJerome Glisse "RS690", 621b5331d9SJerome Glisse "RS740", 631b5331d9SJerome Glisse "RV515", 641b5331d9SJerome Glisse "R520", 651b5331d9SJerome Glisse "RV530", 661b5331d9SJerome Glisse "RV560", 671b5331d9SJerome Glisse "RV570", 681b5331d9SJerome Glisse "R580", 691b5331d9SJerome Glisse "R600", 701b5331d9SJerome Glisse "RV610", 711b5331d9SJerome Glisse "RV630", 721b5331d9SJerome Glisse "RV670", 731b5331d9SJerome Glisse "RV620", 741b5331d9SJerome Glisse "RV635", 751b5331d9SJerome Glisse "RS780", 761b5331d9SJerome Glisse "RS880", 771b5331d9SJerome Glisse "RV770", 781b5331d9SJerome Glisse "RV730", 791b5331d9SJerome Glisse "RV710", 801b5331d9SJerome Glisse "RV740", 811b5331d9SJerome Glisse "CEDAR", 821b5331d9SJerome Glisse "REDWOOD", 831b5331d9SJerome Glisse "JUNIPER", 841b5331d9SJerome Glisse "CYPRESS", 851b5331d9SJerome Glisse "HEMLOCK", 86b08ebe7eSAlex Deucher "PALM", 874df64e65SAlex Deucher "SUMO", 884df64e65SAlex Deucher "SUMO2", 891fe18305SAlex Deucher "BARTS", 901fe18305SAlex Deucher "TURKS", 911fe18305SAlex Deucher "CAICOS", 92b7cfc9feSAlex Deucher "CAYMAN", 938848f759SAlex Deucher "ARUBA", 94cb28bb34SAlex Deucher "TAHITI", 95cb28bb34SAlex Deucher "PITCAIRN", 96cb28bb34SAlex Deucher "VERDE", 97624d3524SAlex Deucher "OLAND", 98b5d9d726SAlex Deucher "HAINAN", 996eac752eSAlex Deucher "BONAIRE", 1006eac752eSAlex Deucher "KAVERI", 1016eac752eSAlex Deucher "KABINI", 1023bf599e8SAlex Deucher "HAWAII", 103b0a9f22aSSamuel Li "MULLINS", 1041b5331d9SJerome Glisse "LAST", 1051b5331d9SJerome Glisse }; 1061b5331d9SJerome Glisse 107066f1f0bSAlex Deucher #if defined(CONFIG_VGA_SWITCHEROO) 108066f1f0bSAlex Deucher bool radeon_has_atpx_dgpu_power_cntl(void); 109066f1f0bSAlex Deucher bool radeon_is_atpx_hybrid(void); 110066f1f0bSAlex Deucher #else 111066f1f0bSAlex Deucher static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; } 112066f1f0bSAlex Deucher static inline bool radeon_is_atpx_hybrid(void) { return false; } 113066f1f0bSAlex Deucher #endif 114066f1f0bSAlex Deucher 1154807c5a8SAlex Deucher #define RADEON_PX_QUIRK_DISABLE_PX (1 << 0) 1164807c5a8SAlex Deucher #define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1) 1174807c5a8SAlex Deucher 1184807c5a8SAlex Deucher struct radeon_px_quirk { 1194807c5a8SAlex Deucher u32 chip_vendor; 1204807c5a8SAlex Deucher u32 chip_device; 1214807c5a8SAlex Deucher u32 subsys_vendor; 1224807c5a8SAlex Deucher u32 subsys_device; 1234807c5a8SAlex Deucher u32 px_quirk_flags; 1244807c5a8SAlex Deucher }; 1254807c5a8SAlex Deucher 1264807c5a8SAlex Deucher static struct radeon_px_quirk radeon_px_quirk_list[] = { 1274807c5a8SAlex Deucher /* Acer aspire 5560g (CPU: AMD A4-3305M; GPU: AMD Radeon HD 6480g + 7470m) 1284807c5a8SAlex Deucher * https://bugzilla.kernel.org/show_bug.cgi?id=74551 1294807c5a8SAlex Deucher */ 1304807c5a8SAlex Deucher { PCI_VENDOR_ID_ATI, 0x6760, 0x1025, 0x0672, RADEON_PX_QUIRK_DISABLE_PX }, 1314807c5a8SAlex Deucher /* Asus K73TA laptop with AMD A6-3400M APU and Radeon 6550 GPU 1324807c5a8SAlex Deucher * https://bugzilla.kernel.org/show_bug.cgi?id=51381 1334807c5a8SAlex Deucher */ 1344807c5a8SAlex Deucher { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x108c, RADEON_PX_QUIRK_DISABLE_PX }, 135ff1b1294SAlex Deucher /* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU 136ff1b1294SAlex Deucher * https://bugzilla.kernel.org/show_bug.cgi?id=51381 137ff1b1294SAlex Deucher */ 138ff1b1294SAlex Deucher { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX }, 1394807c5a8SAlex Deucher /* macbook pro 8.2 */ 1404807c5a8SAlex Deucher { PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP }, 1414807c5a8SAlex Deucher { 0, 0, 0, 0, 0 }, 1424807c5a8SAlex Deucher }; 1434807c5a8SAlex Deucher 14490c4cde9SAlex Deucher bool radeon_is_px(struct drm_device *dev) 14590c4cde9SAlex Deucher { 14690c4cde9SAlex Deucher struct radeon_device *rdev = dev->dev_private; 14790c4cde9SAlex Deucher 14890c4cde9SAlex Deucher if (rdev->flags & RADEON_IS_PX) 14990c4cde9SAlex Deucher return true; 15090c4cde9SAlex Deucher return false; 15190c4cde9SAlex Deucher } 15210ebc0bcSDave Airlie 1534807c5a8SAlex Deucher static void radeon_device_handle_px_quirks(struct radeon_device *rdev) 1544807c5a8SAlex Deucher { 1554807c5a8SAlex Deucher struct radeon_px_quirk *p = radeon_px_quirk_list; 1564807c5a8SAlex Deucher 1574807c5a8SAlex Deucher /* Apply PX quirks */ 1584807c5a8SAlex Deucher while (p && p->chip_device != 0) { 1594807c5a8SAlex Deucher if (rdev->pdev->vendor == p->chip_vendor && 1604807c5a8SAlex Deucher rdev->pdev->device == p->chip_device && 1614807c5a8SAlex Deucher rdev->pdev->subsystem_vendor == p->subsys_vendor && 1624807c5a8SAlex Deucher rdev->pdev->subsystem_device == p->subsys_device) { 1634807c5a8SAlex Deucher rdev->px_quirk_flags = p->px_quirk_flags; 1644807c5a8SAlex Deucher break; 1654807c5a8SAlex Deucher } 1664807c5a8SAlex Deucher ++p; 1674807c5a8SAlex Deucher } 1684807c5a8SAlex Deucher 1694807c5a8SAlex Deucher if (rdev->px_quirk_flags & RADEON_PX_QUIRK_DISABLE_PX) 1704807c5a8SAlex Deucher rdev->flags &= ~RADEON_IS_PX; 171066f1f0bSAlex Deucher 172066f1f0bSAlex Deucher /* disable PX is the system doesn't support dGPU power control or hybrid gfx */ 173066f1f0bSAlex Deucher if (!radeon_is_atpx_hybrid() && 174066f1f0bSAlex Deucher !radeon_has_atpx_dgpu_power_cntl()) 175066f1f0bSAlex Deucher rdev->flags &= ~RADEON_IS_PX; 1764807c5a8SAlex Deucher } 1774807c5a8SAlex Deucher 1780c195119SAlex Deucher /** 1792e1b65f9SAlex Deucher * radeon_program_register_sequence - program an array of registers. 1802e1b65f9SAlex Deucher * 1812e1b65f9SAlex Deucher * @rdev: radeon_device pointer 1822e1b65f9SAlex Deucher * @registers: pointer to the register array 1832e1b65f9SAlex Deucher * @array_size: size of the register array 1842e1b65f9SAlex Deucher * 1852e1b65f9SAlex Deucher * Programs an array or registers with and and or masks. 1862e1b65f9SAlex Deucher * This is a helper for setting golden registers. 1872e1b65f9SAlex Deucher */ 1882e1b65f9SAlex Deucher void radeon_program_register_sequence(struct radeon_device *rdev, 1892e1b65f9SAlex Deucher const u32 *registers, 1902e1b65f9SAlex Deucher const u32 array_size) 1912e1b65f9SAlex Deucher { 1922e1b65f9SAlex Deucher u32 tmp, reg, and_mask, or_mask; 1932e1b65f9SAlex Deucher int i; 1942e1b65f9SAlex Deucher 1952e1b65f9SAlex Deucher if (array_size % 3) 1962e1b65f9SAlex Deucher return; 1972e1b65f9SAlex Deucher 1982e1b65f9SAlex Deucher for (i = 0; i < array_size; i +=3) { 1992e1b65f9SAlex Deucher reg = registers[i + 0]; 2002e1b65f9SAlex Deucher and_mask = registers[i + 1]; 2012e1b65f9SAlex Deucher or_mask = registers[i + 2]; 2022e1b65f9SAlex Deucher 2032e1b65f9SAlex Deucher if (and_mask == 0xffffffff) { 2042e1b65f9SAlex Deucher tmp = or_mask; 2052e1b65f9SAlex Deucher } else { 2062e1b65f9SAlex Deucher tmp = RREG32(reg); 2072e1b65f9SAlex Deucher tmp &= ~and_mask; 2082e1b65f9SAlex Deucher tmp |= or_mask; 2092e1b65f9SAlex Deucher } 2102e1b65f9SAlex Deucher WREG32(reg, tmp); 2112e1b65f9SAlex Deucher } 2122e1b65f9SAlex Deucher } 2132e1b65f9SAlex Deucher 2141a0041b8SAlex Deucher void radeon_pci_config_reset(struct radeon_device *rdev) 2151a0041b8SAlex Deucher { 2161a0041b8SAlex Deucher pci_write_config_dword(rdev->pdev, 0x7c, RADEON_ASIC_RESET_DATA); 2171a0041b8SAlex Deucher } 2181a0041b8SAlex Deucher 2192e1b65f9SAlex Deucher /** 2200c195119SAlex Deucher * radeon_surface_init - Clear GPU surface registers. 2210c195119SAlex Deucher * 2220c195119SAlex Deucher * @rdev: radeon_device pointer 2230c195119SAlex Deucher * 2240c195119SAlex Deucher * Clear GPU surface registers (r1xx-r5xx). 225b1e3a6d1SMichel Dänzer */ 2263ce0a23dSJerome Glisse void radeon_surface_init(struct radeon_device *rdev) 227b1e3a6d1SMichel Dänzer { 228b1e3a6d1SMichel Dänzer /* FIXME: check this out */ 229b1e3a6d1SMichel Dänzer if (rdev->family < CHIP_R600) { 230b1e3a6d1SMichel Dänzer int i; 231b1e3a6d1SMichel Dänzer 232550e2d92SDave Airlie for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) { 233550e2d92SDave Airlie if (rdev->surface_regs[i].bo) 234550e2d92SDave Airlie radeon_bo_get_surface_reg(rdev->surface_regs[i].bo); 235550e2d92SDave Airlie else 236550e2d92SDave Airlie radeon_clear_surface_reg(rdev, i); 237b1e3a6d1SMichel Dänzer } 238e024e110SDave Airlie /* enable surfaces */ 239e024e110SDave Airlie WREG32(RADEON_SURFACE_CNTL, 0); 240b1e3a6d1SMichel Dänzer } 241b1e3a6d1SMichel Dänzer } 242b1e3a6d1SMichel Dänzer 243b1e3a6d1SMichel Dänzer /* 244771fe6b9SJerome Glisse * GPU scratch registers helpers function. 245771fe6b9SJerome Glisse */ 2460c195119SAlex Deucher /** 2470c195119SAlex Deucher * radeon_scratch_init - Init scratch register driver information. 2480c195119SAlex Deucher * 2490c195119SAlex Deucher * @rdev: radeon_device pointer 2500c195119SAlex Deucher * 2510c195119SAlex Deucher * Init CP scratch register driver information (r1xx-r5xx) 2520c195119SAlex Deucher */ 2533ce0a23dSJerome Glisse void radeon_scratch_init(struct radeon_device *rdev) 254771fe6b9SJerome Glisse { 255771fe6b9SJerome Glisse int i; 256771fe6b9SJerome Glisse 257771fe6b9SJerome Glisse /* FIXME: check this out */ 258771fe6b9SJerome Glisse if (rdev->family < CHIP_R300) { 259771fe6b9SJerome Glisse rdev->scratch.num_reg = 5; 260771fe6b9SJerome Glisse } else { 261771fe6b9SJerome Glisse rdev->scratch.num_reg = 7; 262771fe6b9SJerome Glisse } 263724c80e1SAlex Deucher rdev->scratch.reg_base = RADEON_SCRATCH_REG0; 264771fe6b9SJerome Glisse for (i = 0; i < rdev->scratch.num_reg; i++) { 265771fe6b9SJerome Glisse rdev->scratch.free[i] = true; 266724c80e1SAlex Deucher rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4); 267771fe6b9SJerome Glisse } 268771fe6b9SJerome Glisse } 269771fe6b9SJerome Glisse 2700c195119SAlex Deucher /** 2710c195119SAlex Deucher * radeon_scratch_get - Allocate a scratch register 2720c195119SAlex Deucher * 2730c195119SAlex Deucher * @rdev: radeon_device pointer 2740c195119SAlex Deucher * @reg: scratch register mmio offset 2750c195119SAlex Deucher * 2760c195119SAlex Deucher * Allocate a CP scratch register for use by the driver (all asics). 2770c195119SAlex Deucher * Returns 0 on success or -EINVAL on failure. 2780c195119SAlex Deucher */ 279771fe6b9SJerome Glisse int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg) 280771fe6b9SJerome Glisse { 281771fe6b9SJerome Glisse int i; 282771fe6b9SJerome Glisse 283771fe6b9SJerome Glisse for (i = 0; i < rdev->scratch.num_reg; i++) { 284771fe6b9SJerome Glisse if (rdev->scratch.free[i]) { 285771fe6b9SJerome Glisse rdev->scratch.free[i] = false; 286771fe6b9SJerome Glisse *reg = rdev->scratch.reg[i]; 287771fe6b9SJerome Glisse return 0; 288771fe6b9SJerome Glisse } 289771fe6b9SJerome Glisse } 290771fe6b9SJerome Glisse return -EINVAL; 291771fe6b9SJerome Glisse } 292771fe6b9SJerome Glisse 2930c195119SAlex Deucher /** 2940c195119SAlex Deucher * radeon_scratch_free - Free a scratch register 2950c195119SAlex Deucher * 2960c195119SAlex Deucher * @rdev: radeon_device pointer 2970c195119SAlex Deucher * @reg: scratch register mmio offset 2980c195119SAlex Deucher * 2990c195119SAlex Deucher * Free a CP scratch register allocated for use by the driver (all asics) 3000c195119SAlex Deucher */ 301771fe6b9SJerome Glisse void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg) 302771fe6b9SJerome Glisse { 303771fe6b9SJerome Glisse int i; 304771fe6b9SJerome Glisse 305771fe6b9SJerome Glisse for (i = 0; i < rdev->scratch.num_reg; i++) { 306771fe6b9SJerome Glisse if (rdev->scratch.reg[i] == reg) { 307771fe6b9SJerome Glisse rdev->scratch.free[i] = true; 308771fe6b9SJerome Glisse return; 309771fe6b9SJerome Glisse } 310771fe6b9SJerome Glisse } 311771fe6b9SJerome Glisse } 312771fe6b9SJerome Glisse 3130c195119SAlex Deucher /* 31475efdee1SAlex Deucher * GPU doorbell aperture helpers function. 31575efdee1SAlex Deucher */ 31675efdee1SAlex Deucher /** 31775efdee1SAlex Deucher * radeon_doorbell_init - Init doorbell driver information. 31875efdee1SAlex Deucher * 31975efdee1SAlex Deucher * @rdev: radeon_device pointer 32075efdee1SAlex Deucher * 32175efdee1SAlex Deucher * Init doorbell driver information (CIK) 32275efdee1SAlex Deucher * Returns 0 on success, error on failure. 32375efdee1SAlex Deucher */ 32428f5a6cdSRashika Kheria static int radeon_doorbell_init(struct radeon_device *rdev) 32575efdee1SAlex Deucher { 32675efdee1SAlex Deucher /* doorbell bar mapping */ 32775efdee1SAlex Deucher rdev->doorbell.base = pci_resource_start(rdev->pdev, 2); 32875efdee1SAlex Deucher rdev->doorbell.size = pci_resource_len(rdev->pdev, 2); 32975efdee1SAlex Deucher 330d5754ab8SAndrew Lewycky rdev->doorbell.num_doorbells = min_t(u32, rdev->doorbell.size / sizeof(u32), RADEON_MAX_DOORBELLS); 331d5754ab8SAndrew Lewycky if (rdev->doorbell.num_doorbells == 0) 332d5754ab8SAndrew Lewycky return -EINVAL; 33375efdee1SAlex Deucher 334d5754ab8SAndrew Lewycky rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.num_doorbells * sizeof(u32)); 33575efdee1SAlex Deucher if (rdev->doorbell.ptr == NULL) { 33675efdee1SAlex Deucher return -ENOMEM; 33775efdee1SAlex Deucher } 33875efdee1SAlex Deucher DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base); 33975efdee1SAlex Deucher DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size); 34075efdee1SAlex Deucher 341d5754ab8SAndrew Lewycky memset(&rdev->doorbell.used, 0, sizeof(rdev->doorbell.used)); 34275efdee1SAlex Deucher 34375efdee1SAlex Deucher return 0; 34475efdee1SAlex Deucher } 34575efdee1SAlex Deucher 34675efdee1SAlex Deucher /** 34775efdee1SAlex Deucher * radeon_doorbell_fini - Tear down doorbell driver information. 34875efdee1SAlex Deucher * 34975efdee1SAlex Deucher * @rdev: radeon_device pointer 35075efdee1SAlex Deucher * 35175efdee1SAlex Deucher * Tear down doorbell driver information (CIK) 35275efdee1SAlex Deucher */ 35328f5a6cdSRashika Kheria static void radeon_doorbell_fini(struct radeon_device *rdev) 35475efdee1SAlex Deucher { 35575efdee1SAlex Deucher iounmap(rdev->doorbell.ptr); 35675efdee1SAlex Deucher rdev->doorbell.ptr = NULL; 35775efdee1SAlex Deucher } 35875efdee1SAlex Deucher 35975efdee1SAlex Deucher /** 360d5754ab8SAndrew Lewycky * radeon_doorbell_get - Allocate a doorbell entry 36175efdee1SAlex Deucher * 36275efdee1SAlex Deucher * @rdev: radeon_device pointer 363d5754ab8SAndrew Lewycky * @doorbell: doorbell index 36475efdee1SAlex Deucher * 365d5754ab8SAndrew Lewycky * Allocate a doorbell for use by the driver (all asics). 36675efdee1SAlex Deucher * Returns 0 on success or -EINVAL on failure. 36775efdee1SAlex Deucher */ 36875efdee1SAlex Deucher int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell) 36975efdee1SAlex Deucher { 370d5754ab8SAndrew Lewycky unsigned long offset = find_first_zero_bit(rdev->doorbell.used, rdev->doorbell.num_doorbells); 371d5754ab8SAndrew Lewycky if (offset < rdev->doorbell.num_doorbells) { 372d5754ab8SAndrew Lewycky __set_bit(offset, rdev->doorbell.used); 373d5754ab8SAndrew Lewycky *doorbell = offset; 37475efdee1SAlex Deucher return 0; 375d5754ab8SAndrew Lewycky } else { 37675efdee1SAlex Deucher return -EINVAL; 37775efdee1SAlex Deucher } 378d5754ab8SAndrew Lewycky } 37975efdee1SAlex Deucher 38075efdee1SAlex Deucher /** 381d5754ab8SAndrew Lewycky * radeon_doorbell_free - Free a doorbell entry 38275efdee1SAlex Deucher * 38375efdee1SAlex Deucher * @rdev: radeon_device pointer 384d5754ab8SAndrew Lewycky * @doorbell: doorbell index 38575efdee1SAlex Deucher * 386d5754ab8SAndrew Lewycky * Free a doorbell allocated for use by the driver (all asics) 38775efdee1SAlex Deucher */ 38875efdee1SAlex Deucher void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell) 38975efdee1SAlex Deucher { 390d5754ab8SAndrew Lewycky if (doorbell < rdev->doorbell.num_doorbells) 391d5754ab8SAndrew Lewycky __clear_bit(doorbell, rdev->doorbell.used); 39275efdee1SAlex Deucher } 39375efdee1SAlex Deucher 394ebff8453SOded Gabbay /** 395ebff8453SOded Gabbay * radeon_doorbell_get_kfd_info - Report doorbell configuration required to 396ebff8453SOded Gabbay * setup KFD 397ebff8453SOded Gabbay * 398ebff8453SOded Gabbay * @rdev: radeon_device pointer 399ebff8453SOded Gabbay * @aperture_base: output returning doorbell aperture base physical address 400ebff8453SOded Gabbay * @aperture_size: output returning doorbell aperture size in bytes 401ebff8453SOded Gabbay * @start_offset: output returning # of doorbell bytes reserved for radeon. 402ebff8453SOded Gabbay * 403ebff8453SOded Gabbay * Radeon and the KFD share the doorbell aperture. Radeon sets it up, 404ebff8453SOded Gabbay * takes doorbells required for its own rings and reports the setup to KFD. 405ebff8453SOded Gabbay * Radeon reserved doorbells are at the start of the doorbell aperture. 406ebff8453SOded Gabbay */ 407ebff8453SOded Gabbay void radeon_doorbell_get_kfd_info(struct radeon_device *rdev, 408ebff8453SOded Gabbay phys_addr_t *aperture_base, 409ebff8453SOded Gabbay size_t *aperture_size, 410ebff8453SOded Gabbay size_t *start_offset) 411ebff8453SOded Gabbay { 412ebff8453SOded Gabbay /* The first num_doorbells are used by radeon. 413ebff8453SOded Gabbay * KFD takes whatever's left in the aperture. */ 414ebff8453SOded Gabbay if (rdev->doorbell.size > rdev->doorbell.num_doorbells * sizeof(u32)) { 415ebff8453SOded Gabbay *aperture_base = rdev->doorbell.base; 416ebff8453SOded Gabbay *aperture_size = rdev->doorbell.size; 417ebff8453SOded Gabbay *start_offset = rdev->doorbell.num_doorbells * sizeof(u32); 418ebff8453SOded Gabbay } else { 419ebff8453SOded Gabbay *aperture_base = 0; 420ebff8453SOded Gabbay *aperture_size = 0; 421ebff8453SOded Gabbay *start_offset = 0; 422ebff8453SOded Gabbay } 423ebff8453SOded Gabbay } 424ebff8453SOded Gabbay 42575efdee1SAlex Deucher /* 4260c195119SAlex Deucher * radeon_wb_*() 4270c195119SAlex Deucher * Writeback is the the method by which the the GPU updates special pages 4280c195119SAlex Deucher * in memory with the status of certain GPU events (fences, ring pointers, 4290c195119SAlex Deucher * etc.). 4300c195119SAlex Deucher */ 4310c195119SAlex Deucher 4320c195119SAlex Deucher /** 4330c195119SAlex Deucher * radeon_wb_disable - Disable Writeback 4340c195119SAlex Deucher * 4350c195119SAlex Deucher * @rdev: radeon_device pointer 4360c195119SAlex Deucher * 4370c195119SAlex Deucher * Disables Writeback (all asics). Used for suspend. 4380c195119SAlex Deucher */ 439724c80e1SAlex Deucher void radeon_wb_disable(struct radeon_device *rdev) 440724c80e1SAlex Deucher { 441724c80e1SAlex Deucher rdev->wb.enabled = false; 442724c80e1SAlex Deucher } 443724c80e1SAlex Deucher 4440c195119SAlex Deucher /** 4450c195119SAlex Deucher * radeon_wb_fini - Disable Writeback and free memory 4460c195119SAlex Deucher * 4470c195119SAlex Deucher * @rdev: radeon_device pointer 4480c195119SAlex Deucher * 4490c195119SAlex Deucher * Disables Writeback and frees the Writeback memory (all asics). 4500c195119SAlex Deucher * Used at driver shutdown. 4510c195119SAlex Deucher */ 452724c80e1SAlex Deucher void radeon_wb_fini(struct radeon_device *rdev) 453724c80e1SAlex Deucher { 454724c80e1SAlex Deucher radeon_wb_disable(rdev); 455724c80e1SAlex Deucher if (rdev->wb.wb_obj) { 456089920f2SJerome Glisse if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) { 457089920f2SJerome Glisse radeon_bo_kunmap(rdev->wb.wb_obj); 458089920f2SJerome Glisse radeon_bo_unpin(rdev->wb.wb_obj); 459089920f2SJerome Glisse radeon_bo_unreserve(rdev->wb.wb_obj); 460089920f2SJerome Glisse } 461724c80e1SAlex Deucher radeon_bo_unref(&rdev->wb.wb_obj); 462724c80e1SAlex Deucher rdev->wb.wb = NULL; 463724c80e1SAlex Deucher rdev->wb.wb_obj = NULL; 464724c80e1SAlex Deucher } 465724c80e1SAlex Deucher } 466724c80e1SAlex Deucher 4670c195119SAlex Deucher /** 4680c195119SAlex Deucher * radeon_wb_init- Init Writeback driver info and allocate memory 4690c195119SAlex Deucher * 4700c195119SAlex Deucher * @rdev: radeon_device pointer 4710c195119SAlex Deucher * 4720c195119SAlex Deucher * Disables Writeback and frees the Writeback memory (all asics). 4730c195119SAlex Deucher * Used at driver startup. 4740c195119SAlex Deucher * Returns 0 on success or an -error on failure. 4750c195119SAlex Deucher */ 476724c80e1SAlex Deucher int radeon_wb_init(struct radeon_device *rdev) 477724c80e1SAlex Deucher { 478724c80e1SAlex Deucher int r; 479724c80e1SAlex Deucher 480724c80e1SAlex Deucher if (rdev->wb.wb_obj == NULL) { 481441921d5SDaniel Vetter r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true, 482831b6966SMaarten Lankhorst RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL, 48302376d82SMichel Dänzer &rdev->wb.wb_obj); 484724c80e1SAlex Deucher if (r) { 485724c80e1SAlex Deucher dev_warn(rdev->dev, "(%d) create WB bo failed\n", r); 486724c80e1SAlex Deucher return r; 487724c80e1SAlex Deucher } 488724c80e1SAlex Deucher r = radeon_bo_reserve(rdev->wb.wb_obj, false); 489724c80e1SAlex Deucher if (unlikely(r != 0)) { 490724c80e1SAlex Deucher radeon_wb_fini(rdev); 491724c80e1SAlex Deucher return r; 492724c80e1SAlex Deucher } 493724c80e1SAlex Deucher r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, 494724c80e1SAlex Deucher &rdev->wb.gpu_addr); 495724c80e1SAlex Deucher if (r) { 496724c80e1SAlex Deucher radeon_bo_unreserve(rdev->wb.wb_obj); 497724c80e1SAlex Deucher dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r); 498724c80e1SAlex Deucher radeon_wb_fini(rdev); 499724c80e1SAlex Deucher return r; 500724c80e1SAlex Deucher } 501724c80e1SAlex Deucher r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); 502724c80e1SAlex Deucher radeon_bo_unreserve(rdev->wb.wb_obj); 503724c80e1SAlex Deucher if (r) { 504724c80e1SAlex Deucher dev_warn(rdev->dev, "(%d) map WB bo failed\n", r); 505724c80e1SAlex Deucher radeon_wb_fini(rdev); 506724c80e1SAlex Deucher return r; 507724c80e1SAlex Deucher } 508089920f2SJerome Glisse } 509724c80e1SAlex Deucher 510e6ba7599SAlex Deucher /* clear wb memory */ 511e6ba7599SAlex Deucher memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE); 512d0f8a854SAlex Deucher /* disable event_write fences */ 513d0f8a854SAlex Deucher rdev->wb.use_event = false; 514724c80e1SAlex Deucher /* disabled via module param */ 5153b7a2b24SJerome Glisse if (radeon_no_wb == 1) { 516724c80e1SAlex Deucher rdev->wb.enabled = false; 5173b7a2b24SJerome Glisse } else { 518724c80e1SAlex Deucher if (rdev->flags & RADEON_IS_AGP) { 51928eebb70SAlex Deucher /* often unreliable on AGP */ 52028eebb70SAlex Deucher rdev->wb.enabled = false; 52128eebb70SAlex Deucher } else if (rdev->family < CHIP_R300) { 52228eebb70SAlex Deucher /* often unreliable on pre-r300 */ 523724c80e1SAlex Deucher rdev->wb.enabled = false; 524d0f8a854SAlex Deucher } else { 525724c80e1SAlex Deucher rdev->wb.enabled = true; 526d0f8a854SAlex Deucher /* event_write fences are only available on r600+ */ 5273b7a2b24SJerome Glisse if (rdev->family >= CHIP_R600) { 528d0f8a854SAlex Deucher rdev->wb.use_event = true; 529d0f8a854SAlex Deucher } 530724c80e1SAlex Deucher } 5313b7a2b24SJerome Glisse } 532c994ead6SAlex Deucher /* always use writeback/events on NI, APUs */ 533c994ead6SAlex Deucher if (rdev->family >= CHIP_PALM) { 5347d52785dSAlex Deucher rdev->wb.enabled = true; 5357d52785dSAlex Deucher rdev->wb.use_event = true; 5367d52785dSAlex Deucher } 537724c80e1SAlex Deucher 538724c80e1SAlex Deucher dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis"); 539724c80e1SAlex Deucher 540724c80e1SAlex Deucher return 0; 541724c80e1SAlex Deucher } 542724c80e1SAlex Deucher 543d594e46aSJerome Glisse /** 544d594e46aSJerome Glisse * radeon_vram_location - try to find VRAM location 545d594e46aSJerome Glisse * @rdev: radeon device structure holding all necessary informations 546d594e46aSJerome Glisse * @mc: memory controller structure holding memory informations 547d594e46aSJerome Glisse * @base: base address at which to put VRAM 548d594e46aSJerome Glisse * 549d594e46aSJerome Glisse * Function will place try to place VRAM at base address provided 550d594e46aSJerome Glisse * as parameter (which is so far either PCI aperture address or 551d594e46aSJerome Glisse * for IGP TOM base address). 552d594e46aSJerome Glisse * 553d594e46aSJerome Glisse * If there is not enough space to fit the unvisible VRAM in the 32bits 554d594e46aSJerome Glisse * address space then we limit the VRAM size to the aperture. 555d594e46aSJerome Glisse * 556d594e46aSJerome Glisse * If we are using AGP and if the AGP aperture doesn't allow us to have 557d594e46aSJerome Glisse * room for all the VRAM than we restrict the VRAM to the PCI aperture 558d594e46aSJerome Glisse * size and print a warning. 559d594e46aSJerome Glisse * 560d594e46aSJerome Glisse * This function will never fails, worst case are limiting VRAM. 561d594e46aSJerome Glisse * 562d594e46aSJerome Glisse * Note: GTT start, end, size should be initialized before calling this 563d594e46aSJerome Glisse * function on AGP platform. 564d594e46aSJerome Glisse * 56525985edcSLucas De Marchi * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size, 566d594e46aSJerome Glisse * this shouldn't be a problem as we are using the PCI aperture as a reference. 567d594e46aSJerome Glisse * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but 568d594e46aSJerome Glisse * not IGP. 569d594e46aSJerome Glisse * 570d594e46aSJerome Glisse * Note: we use mc_vram_size as on some board we need to program the mc to 571d594e46aSJerome Glisse * cover the whole aperture even if VRAM size is inferior to aperture size 572d594e46aSJerome Glisse * Novell bug 204882 + along with lots of ubuntu ones 573d594e46aSJerome Glisse * 574d594e46aSJerome Glisse * Note: when limiting vram it's safe to overwritte real_vram_size because 575d594e46aSJerome Glisse * we are not in case where real_vram_size is inferior to mc_vram_size (ie 576d594e46aSJerome Glisse * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu 577d594e46aSJerome Glisse * ones) 578d594e46aSJerome Glisse * 579d594e46aSJerome Glisse * Note: IGP TOM addr should be the same as the aperture addr, we don't 580d594e46aSJerome Glisse * explicitly check for that thought. 581d594e46aSJerome Glisse * 582d594e46aSJerome Glisse * FIXME: when reducing VRAM size align new size on power of 2. 583771fe6b9SJerome Glisse */ 584d594e46aSJerome Glisse void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base) 585771fe6b9SJerome Glisse { 5861bcb04f7SChristian König uint64_t limit = (uint64_t)radeon_vram_limit << 20; 5871bcb04f7SChristian König 588d594e46aSJerome Glisse mc->vram_start = base; 5899ed8b1f9SAlex Deucher if (mc->mc_vram_size > (rdev->mc.mc_mask - base + 1)) { 590d594e46aSJerome Glisse dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n"); 591d594e46aSJerome Glisse mc->real_vram_size = mc->aper_size; 592d594e46aSJerome Glisse mc->mc_vram_size = mc->aper_size; 593771fe6b9SJerome Glisse } 594d594e46aSJerome Glisse mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; 5952cbeb4efSJerome Glisse if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) { 596d594e46aSJerome Glisse dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n"); 597d594e46aSJerome Glisse mc->real_vram_size = mc->aper_size; 598d594e46aSJerome Glisse mc->mc_vram_size = mc->aper_size; 599771fe6b9SJerome Glisse } 600d594e46aSJerome Glisse mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; 6011bcb04f7SChristian König if (limit && limit < mc->real_vram_size) 6021bcb04f7SChristian König mc->real_vram_size = limit; 603dd7cc55aSAlex Deucher dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n", 604d594e46aSJerome Glisse mc->mc_vram_size >> 20, mc->vram_start, 605d594e46aSJerome Glisse mc->vram_end, mc->real_vram_size >> 20); 606771fe6b9SJerome Glisse } 607771fe6b9SJerome Glisse 608d594e46aSJerome Glisse /** 609d594e46aSJerome Glisse * radeon_gtt_location - try to find GTT location 610d594e46aSJerome Glisse * @rdev: radeon device structure holding all necessary informations 611d594e46aSJerome Glisse * @mc: memory controller structure holding memory informations 612d594e46aSJerome Glisse * 613d594e46aSJerome Glisse * Function will place try to place GTT before or after VRAM. 614d594e46aSJerome Glisse * 615d594e46aSJerome Glisse * If GTT size is bigger than space left then we ajust GTT size. 616d594e46aSJerome Glisse * Thus function will never fails. 617d594e46aSJerome Glisse * 618d594e46aSJerome Glisse * FIXME: when reducing GTT size align new size on power of 2. 619d594e46aSJerome Glisse */ 620d594e46aSJerome Glisse void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) 621d594e46aSJerome Glisse { 622d594e46aSJerome Glisse u64 size_af, size_bf; 623d594e46aSJerome Glisse 6249ed8b1f9SAlex Deucher size_af = ((rdev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align; 6258d369bb1SAlex Deucher size_bf = mc->vram_start & ~mc->gtt_base_align; 626d594e46aSJerome Glisse if (size_bf > size_af) { 627d594e46aSJerome Glisse if (mc->gtt_size > size_bf) { 628d594e46aSJerome Glisse dev_warn(rdev->dev, "limiting GTT\n"); 629d594e46aSJerome Glisse mc->gtt_size = size_bf; 630d594e46aSJerome Glisse } 6318d369bb1SAlex Deucher mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size; 632d594e46aSJerome Glisse } else { 633d594e46aSJerome Glisse if (mc->gtt_size > size_af) { 634d594e46aSJerome Glisse dev_warn(rdev->dev, "limiting GTT\n"); 635d594e46aSJerome Glisse mc->gtt_size = size_af; 636d594e46aSJerome Glisse } 6378d369bb1SAlex Deucher mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align; 638d594e46aSJerome Glisse } 639d594e46aSJerome Glisse mc->gtt_end = mc->gtt_start + mc->gtt_size - 1; 640dd7cc55aSAlex Deucher dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n", 641d594e46aSJerome Glisse mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end); 642d594e46aSJerome Glisse } 643771fe6b9SJerome Glisse 644771fe6b9SJerome Glisse /* 645771fe6b9SJerome Glisse * GPU helpers function. 646771fe6b9SJerome Glisse */ 64705082b8bSAlex Deucher 64805082b8bSAlex Deucher /** 64905082b8bSAlex Deucher * radeon_device_is_virtual - check if we are running is a virtual environment 65005082b8bSAlex Deucher * 65105082b8bSAlex Deucher * Check if the asic has been passed through to a VM (all asics). 65205082b8bSAlex Deucher * Used at driver startup. 65305082b8bSAlex Deucher * Returns true if virtual or false if not. 65405082b8bSAlex Deucher */ 655a801abe4SAlex Deucher bool radeon_device_is_virtual(void) 65605082b8bSAlex Deucher { 65705082b8bSAlex Deucher #ifdef CONFIG_X86 65805082b8bSAlex Deucher return boot_cpu_has(X86_FEATURE_HYPERVISOR); 65905082b8bSAlex Deucher #else 66005082b8bSAlex Deucher return false; 66105082b8bSAlex Deucher #endif 66205082b8bSAlex Deucher } 66305082b8bSAlex Deucher 6640c195119SAlex Deucher /** 6650c195119SAlex Deucher * radeon_card_posted - check if the hw has already been initialized 6660c195119SAlex Deucher * 6670c195119SAlex Deucher * @rdev: radeon_device pointer 6680c195119SAlex Deucher * 6690c195119SAlex Deucher * Check if the asic has been initialized (all asics). 6700c195119SAlex Deucher * Used at driver startup. 6710c195119SAlex Deucher * Returns true if initialized or false if not. 6720c195119SAlex Deucher */ 6739f022ddfSJerome Glisse bool radeon_card_posted(struct radeon_device *rdev) 674771fe6b9SJerome Glisse { 675771fe6b9SJerome Glisse uint32_t reg; 676771fe6b9SJerome Glisse 677884031f0SAlex Deucher /* for pass through, always force asic_init for CI */ 678884031f0SAlex Deucher if (rdev->family >= CHIP_BONAIRE && 679884031f0SAlex Deucher radeon_device_is_virtual()) 68005082b8bSAlex Deucher return false; 68105082b8bSAlex Deucher 68250a583f6SAlex Deucher /* required for EFI mode on macbook2,1 which uses an r5xx asic */ 68383e68189SMatt Fleming if (efi_enabled(EFI_BOOT) && 68450a583f6SAlex Deucher (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) && 68550a583f6SAlex Deucher (rdev->family < CHIP_R600)) 686bcc65fd8SMatthew Garrett return false; 687bcc65fd8SMatthew Garrett 6882cf3a4fcSAlex Deucher if (ASIC_IS_NODCE(rdev)) 6892cf3a4fcSAlex Deucher goto check_memsize; 6902cf3a4fcSAlex Deucher 691771fe6b9SJerome Glisse /* first check CRTCs */ 69209fb8bd1SAlex Deucher if (ASIC_IS_DCE4(rdev)) { 69318007401SAlex Deucher reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | 69418007401SAlex Deucher RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET); 69509fb8bd1SAlex Deucher if (rdev->num_crtc >= 4) { 69609fb8bd1SAlex Deucher reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) | 69709fb8bd1SAlex Deucher RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET); 69809fb8bd1SAlex Deucher } 69909fb8bd1SAlex Deucher if (rdev->num_crtc >= 6) { 70009fb8bd1SAlex Deucher reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) | 701bcc1c2a1SAlex Deucher RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET); 70209fb8bd1SAlex Deucher } 703bcc1c2a1SAlex Deucher if (reg & EVERGREEN_CRTC_MASTER_EN) 704bcc1c2a1SAlex Deucher return true; 705bcc1c2a1SAlex Deucher } else if (ASIC_IS_AVIVO(rdev)) { 706771fe6b9SJerome Glisse reg = RREG32(AVIVO_D1CRTC_CONTROL) | 707771fe6b9SJerome Glisse RREG32(AVIVO_D2CRTC_CONTROL); 708771fe6b9SJerome Glisse if (reg & AVIVO_CRTC_EN) { 709771fe6b9SJerome Glisse return true; 710771fe6b9SJerome Glisse } 711771fe6b9SJerome Glisse } else { 712771fe6b9SJerome Glisse reg = RREG32(RADEON_CRTC_GEN_CNTL) | 713771fe6b9SJerome Glisse RREG32(RADEON_CRTC2_GEN_CNTL); 714771fe6b9SJerome Glisse if (reg & RADEON_CRTC_EN) { 715771fe6b9SJerome Glisse return true; 716771fe6b9SJerome Glisse } 717771fe6b9SJerome Glisse } 718771fe6b9SJerome Glisse 7192cf3a4fcSAlex Deucher check_memsize: 720771fe6b9SJerome Glisse /* then check MEM_SIZE, in case the crtcs are off */ 721771fe6b9SJerome Glisse if (rdev->family >= CHIP_R600) 722771fe6b9SJerome Glisse reg = RREG32(R600_CONFIG_MEMSIZE); 723771fe6b9SJerome Glisse else 724771fe6b9SJerome Glisse reg = RREG32(RADEON_CONFIG_MEMSIZE); 725771fe6b9SJerome Glisse 726771fe6b9SJerome Glisse if (reg) 727771fe6b9SJerome Glisse return true; 728771fe6b9SJerome Glisse 729771fe6b9SJerome Glisse return false; 730771fe6b9SJerome Glisse 731771fe6b9SJerome Glisse } 732771fe6b9SJerome Glisse 7330c195119SAlex Deucher /** 7340c195119SAlex Deucher * radeon_update_bandwidth_info - update display bandwidth params 7350c195119SAlex Deucher * 7360c195119SAlex Deucher * @rdev: radeon_device pointer 7370c195119SAlex Deucher * 7380c195119SAlex Deucher * Used when sclk/mclk are switched or display modes are set. 7390c195119SAlex Deucher * params are used to calculate display watermarks (all asics) 7400c195119SAlex Deucher */ 741f47299c5SAlex Deucher void radeon_update_bandwidth_info(struct radeon_device *rdev) 742f47299c5SAlex Deucher { 743f47299c5SAlex Deucher fixed20_12 a; 7448807286eSAlex Deucher u32 sclk = rdev->pm.current_sclk; 7458807286eSAlex Deucher u32 mclk = rdev->pm.current_mclk; 746f47299c5SAlex Deucher 7478807286eSAlex Deucher /* sclk/mclk in Mhz */ 74868adac5eSBen Skeggs a.full = dfixed_const(100); 74968adac5eSBen Skeggs rdev->pm.sclk.full = dfixed_const(sclk); 75068adac5eSBen Skeggs rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a); 75168adac5eSBen Skeggs rdev->pm.mclk.full = dfixed_const(mclk); 75268adac5eSBen Skeggs rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a); 753f47299c5SAlex Deucher 7548807286eSAlex Deucher if (rdev->flags & RADEON_IS_IGP) { 75568adac5eSBen Skeggs a.full = dfixed_const(16); 756f47299c5SAlex Deucher /* core_bandwidth = sclk(Mhz) * 16 */ 75768adac5eSBen Skeggs rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a); 758f47299c5SAlex Deucher } 759f47299c5SAlex Deucher } 760f47299c5SAlex Deucher 7610c195119SAlex Deucher /** 7620c195119SAlex Deucher * radeon_boot_test_post_card - check and possibly initialize the hw 7630c195119SAlex Deucher * 7640c195119SAlex Deucher * @rdev: radeon_device pointer 7650c195119SAlex Deucher * 7660c195119SAlex Deucher * Check if the asic is initialized and if not, attempt to initialize 7670c195119SAlex Deucher * it (all asics). 7680c195119SAlex Deucher * Returns true if initialized or false if not. 7690c195119SAlex Deucher */ 77072542d77SDave Airlie bool radeon_boot_test_post_card(struct radeon_device *rdev) 77172542d77SDave Airlie { 77272542d77SDave Airlie if (radeon_card_posted(rdev)) 77372542d77SDave Airlie return true; 77472542d77SDave Airlie 77572542d77SDave Airlie if (rdev->bios) { 77672542d77SDave Airlie DRM_INFO("GPU not posted. posting now...\n"); 77772542d77SDave Airlie if (rdev->is_atom_bios) 77872542d77SDave Airlie atom_asic_init(rdev->mode_info.atom_context); 77972542d77SDave Airlie else 78072542d77SDave Airlie radeon_combios_asic_init(rdev->ddev); 78172542d77SDave Airlie return true; 78272542d77SDave Airlie } else { 78372542d77SDave Airlie dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); 78472542d77SDave Airlie return false; 78572542d77SDave Airlie } 78672542d77SDave Airlie } 78772542d77SDave Airlie 7880c195119SAlex Deucher /** 7890c195119SAlex Deucher * radeon_dummy_page_init - init dummy page used by the driver 7900c195119SAlex Deucher * 7910c195119SAlex Deucher * @rdev: radeon_device pointer 7920c195119SAlex Deucher * 7930c195119SAlex Deucher * Allocate the dummy page used by the driver (all asics). 7940c195119SAlex Deucher * This dummy page is used by the driver as a filler for gart entries 7950c195119SAlex Deucher * when pages are taken out of the GART 7960c195119SAlex Deucher * Returns 0 on sucess, -ENOMEM on failure. 7970c195119SAlex Deucher */ 7983ce0a23dSJerome Glisse int radeon_dummy_page_init(struct radeon_device *rdev) 7993ce0a23dSJerome Glisse { 80082568565SDave Airlie if (rdev->dummy_page.page) 80182568565SDave Airlie return 0; 8023ce0a23dSJerome Glisse rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO); 8033ce0a23dSJerome Glisse if (rdev->dummy_page.page == NULL) 8043ce0a23dSJerome Glisse return -ENOMEM; 8053ce0a23dSJerome Glisse rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page, 8063ce0a23dSJerome Glisse 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 807a30f6fb7SBenjamin Herrenschmidt if (pci_dma_mapping_error(rdev->pdev, rdev->dummy_page.addr)) { 808a30f6fb7SBenjamin Herrenschmidt dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n"); 8093ce0a23dSJerome Glisse __free_page(rdev->dummy_page.page); 8103ce0a23dSJerome Glisse rdev->dummy_page.page = NULL; 8113ce0a23dSJerome Glisse return -ENOMEM; 8123ce0a23dSJerome Glisse } 813cb658906SMichel Dänzer rdev->dummy_page.entry = radeon_gart_get_page_entry(rdev->dummy_page.addr, 814cb658906SMichel Dänzer RADEON_GART_PAGE_DUMMY); 8153ce0a23dSJerome Glisse return 0; 8163ce0a23dSJerome Glisse } 8173ce0a23dSJerome Glisse 8180c195119SAlex Deucher /** 8190c195119SAlex Deucher * radeon_dummy_page_fini - free dummy page used by the driver 8200c195119SAlex Deucher * 8210c195119SAlex Deucher * @rdev: radeon_device pointer 8220c195119SAlex Deucher * 8230c195119SAlex Deucher * Frees the dummy page used by the driver (all asics). 8240c195119SAlex Deucher */ 8253ce0a23dSJerome Glisse void radeon_dummy_page_fini(struct radeon_device *rdev) 8263ce0a23dSJerome Glisse { 8273ce0a23dSJerome Glisse if (rdev->dummy_page.page == NULL) 8283ce0a23dSJerome Glisse return; 8293ce0a23dSJerome Glisse pci_unmap_page(rdev->pdev, rdev->dummy_page.addr, 8303ce0a23dSJerome Glisse PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 8313ce0a23dSJerome Glisse __free_page(rdev->dummy_page.page); 8323ce0a23dSJerome Glisse rdev->dummy_page.page = NULL; 8333ce0a23dSJerome Glisse } 8343ce0a23dSJerome Glisse 835771fe6b9SJerome Glisse 836771fe6b9SJerome Glisse /* ATOM accessor methods */ 8370c195119SAlex Deucher /* 8380c195119SAlex Deucher * ATOM is an interpreted byte code stored in tables in the vbios. The 8390c195119SAlex Deucher * driver registers callbacks to access registers and the interpreter 8400c195119SAlex Deucher * in the driver parses the tables and executes then to program specific 8410c195119SAlex Deucher * actions (set display modes, asic init, etc.). See radeon_atombios.c, 8420c195119SAlex Deucher * atombios.h, and atom.c 8430c195119SAlex Deucher */ 8440c195119SAlex Deucher 8450c195119SAlex Deucher /** 8460c195119SAlex Deucher * cail_pll_read - read PLL register 8470c195119SAlex Deucher * 8480c195119SAlex Deucher * @info: atom card_info pointer 8490c195119SAlex Deucher * @reg: PLL register offset 8500c195119SAlex Deucher * 8510c195119SAlex Deucher * Provides a PLL register accessor for the atom interpreter (r4xx+). 8520c195119SAlex Deucher * Returns the value of the PLL register. 8530c195119SAlex Deucher */ 854771fe6b9SJerome Glisse static uint32_t cail_pll_read(struct card_info *info, uint32_t reg) 855771fe6b9SJerome Glisse { 856771fe6b9SJerome Glisse struct radeon_device *rdev = info->dev->dev_private; 857771fe6b9SJerome Glisse uint32_t r; 858771fe6b9SJerome Glisse 859771fe6b9SJerome Glisse r = rdev->pll_rreg(rdev, reg); 860771fe6b9SJerome Glisse return r; 861771fe6b9SJerome Glisse } 862771fe6b9SJerome Glisse 8630c195119SAlex Deucher /** 8640c195119SAlex Deucher * cail_pll_write - write PLL register 8650c195119SAlex Deucher * 8660c195119SAlex Deucher * @info: atom card_info pointer 8670c195119SAlex Deucher * @reg: PLL register offset 8680c195119SAlex Deucher * @val: value to write to the pll register 8690c195119SAlex Deucher * 8700c195119SAlex Deucher * Provides a PLL register accessor for the atom interpreter (r4xx+). 8710c195119SAlex Deucher */ 872771fe6b9SJerome Glisse static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val) 873771fe6b9SJerome Glisse { 874771fe6b9SJerome Glisse struct radeon_device *rdev = info->dev->dev_private; 875771fe6b9SJerome Glisse 876771fe6b9SJerome Glisse rdev->pll_wreg(rdev, reg, val); 877771fe6b9SJerome Glisse } 878771fe6b9SJerome Glisse 8790c195119SAlex Deucher /** 8800c195119SAlex Deucher * cail_mc_read - read MC (Memory Controller) register 8810c195119SAlex Deucher * 8820c195119SAlex Deucher * @info: atom card_info pointer 8830c195119SAlex Deucher * @reg: MC register offset 8840c195119SAlex Deucher * 8850c195119SAlex Deucher * Provides an MC register accessor for the atom interpreter (r4xx+). 8860c195119SAlex Deucher * Returns the value of the MC register. 8870c195119SAlex Deucher */ 888771fe6b9SJerome Glisse static uint32_t cail_mc_read(struct card_info *info, uint32_t reg) 889771fe6b9SJerome Glisse { 890771fe6b9SJerome Glisse struct radeon_device *rdev = info->dev->dev_private; 891771fe6b9SJerome Glisse uint32_t r; 892771fe6b9SJerome Glisse 893771fe6b9SJerome Glisse r = rdev->mc_rreg(rdev, reg); 894771fe6b9SJerome Glisse return r; 895771fe6b9SJerome Glisse } 896771fe6b9SJerome Glisse 8970c195119SAlex Deucher /** 8980c195119SAlex Deucher * cail_mc_write - write MC (Memory Controller) register 8990c195119SAlex Deucher * 9000c195119SAlex Deucher * @info: atom card_info pointer 9010c195119SAlex Deucher * @reg: MC register offset 9020c195119SAlex Deucher * @val: value to write to the pll register 9030c195119SAlex Deucher * 9040c195119SAlex Deucher * Provides a MC register accessor for the atom interpreter (r4xx+). 9050c195119SAlex Deucher */ 906771fe6b9SJerome Glisse static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val) 907771fe6b9SJerome Glisse { 908771fe6b9SJerome Glisse struct radeon_device *rdev = info->dev->dev_private; 909771fe6b9SJerome Glisse 910771fe6b9SJerome Glisse rdev->mc_wreg(rdev, reg, val); 911771fe6b9SJerome Glisse } 912771fe6b9SJerome Glisse 9130c195119SAlex Deucher /** 9140c195119SAlex Deucher * cail_reg_write - write MMIO register 9150c195119SAlex Deucher * 9160c195119SAlex Deucher * @info: atom card_info pointer 9170c195119SAlex Deucher * @reg: MMIO register offset 9180c195119SAlex Deucher * @val: value to write to the pll register 9190c195119SAlex Deucher * 9200c195119SAlex Deucher * Provides a MMIO register accessor for the atom interpreter (r4xx+). 9210c195119SAlex Deucher */ 922771fe6b9SJerome Glisse static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val) 923771fe6b9SJerome Glisse { 924771fe6b9SJerome Glisse struct radeon_device *rdev = info->dev->dev_private; 925771fe6b9SJerome Glisse 926771fe6b9SJerome Glisse WREG32(reg*4, val); 927771fe6b9SJerome Glisse } 928771fe6b9SJerome Glisse 9290c195119SAlex Deucher /** 9300c195119SAlex Deucher * cail_reg_read - read MMIO register 9310c195119SAlex Deucher * 9320c195119SAlex Deucher * @info: atom card_info pointer 9330c195119SAlex Deucher * @reg: MMIO register offset 9340c195119SAlex Deucher * 9350c195119SAlex Deucher * Provides an MMIO register accessor for the atom interpreter (r4xx+). 9360c195119SAlex Deucher * Returns the value of the MMIO register. 9370c195119SAlex Deucher */ 938771fe6b9SJerome Glisse static uint32_t cail_reg_read(struct card_info *info, uint32_t reg) 939771fe6b9SJerome Glisse { 940771fe6b9SJerome Glisse struct radeon_device *rdev = info->dev->dev_private; 941771fe6b9SJerome Glisse uint32_t r; 942771fe6b9SJerome Glisse 943771fe6b9SJerome Glisse r = RREG32(reg*4); 944771fe6b9SJerome Glisse return r; 945771fe6b9SJerome Glisse } 946771fe6b9SJerome Glisse 9470c195119SAlex Deucher /** 9480c195119SAlex Deucher * cail_ioreg_write - write IO register 9490c195119SAlex Deucher * 9500c195119SAlex Deucher * @info: atom card_info pointer 9510c195119SAlex Deucher * @reg: IO register offset 9520c195119SAlex Deucher * @val: value to write to the pll register 9530c195119SAlex Deucher * 9540c195119SAlex Deucher * Provides a IO register accessor for the atom interpreter (r4xx+). 9550c195119SAlex Deucher */ 956351a52a2SAlex Deucher static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val) 957351a52a2SAlex Deucher { 958351a52a2SAlex Deucher struct radeon_device *rdev = info->dev->dev_private; 959351a52a2SAlex Deucher 960351a52a2SAlex Deucher WREG32_IO(reg*4, val); 961351a52a2SAlex Deucher } 962351a52a2SAlex Deucher 9630c195119SAlex Deucher /** 9640c195119SAlex Deucher * cail_ioreg_read - read IO register 9650c195119SAlex Deucher * 9660c195119SAlex Deucher * @info: atom card_info pointer 9670c195119SAlex Deucher * @reg: IO register offset 9680c195119SAlex Deucher * 9690c195119SAlex Deucher * Provides an IO register accessor for the atom interpreter (r4xx+). 9700c195119SAlex Deucher * Returns the value of the IO register. 9710c195119SAlex Deucher */ 972351a52a2SAlex Deucher static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg) 973351a52a2SAlex Deucher { 974351a52a2SAlex Deucher struct radeon_device *rdev = info->dev->dev_private; 975351a52a2SAlex Deucher uint32_t r; 976351a52a2SAlex Deucher 977351a52a2SAlex Deucher r = RREG32_IO(reg*4); 978351a52a2SAlex Deucher return r; 979351a52a2SAlex Deucher } 980351a52a2SAlex Deucher 9810c195119SAlex Deucher /** 9820c195119SAlex Deucher * radeon_atombios_init - init the driver info and callbacks for atombios 9830c195119SAlex Deucher * 9840c195119SAlex Deucher * @rdev: radeon_device pointer 9850c195119SAlex Deucher * 9860c195119SAlex Deucher * Initializes the driver info and register access callbacks for the 9870c195119SAlex Deucher * ATOM interpreter (r4xx+). 9880c195119SAlex Deucher * Returns 0 on sucess, -ENOMEM on failure. 9890c195119SAlex Deucher * Called at driver startup. 9900c195119SAlex Deucher */ 991771fe6b9SJerome Glisse int radeon_atombios_init(struct radeon_device *rdev) 992771fe6b9SJerome Glisse { 99361c4b24bSMathias Fröhlich struct card_info *atom_card_info = 99461c4b24bSMathias Fröhlich kzalloc(sizeof(struct card_info), GFP_KERNEL); 99561c4b24bSMathias Fröhlich 99661c4b24bSMathias Fröhlich if (!atom_card_info) 99761c4b24bSMathias Fröhlich return -ENOMEM; 99861c4b24bSMathias Fröhlich 99961c4b24bSMathias Fröhlich rdev->mode_info.atom_card_info = atom_card_info; 100061c4b24bSMathias Fröhlich atom_card_info->dev = rdev->ddev; 100161c4b24bSMathias Fröhlich atom_card_info->reg_read = cail_reg_read; 100261c4b24bSMathias Fröhlich atom_card_info->reg_write = cail_reg_write; 1003351a52a2SAlex Deucher /* needed for iio ops */ 1004351a52a2SAlex Deucher if (rdev->rio_mem) { 1005351a52a2SAlex Deucher atom_card_info->ioreg_read = cail_ioreg_read; 1006351a52a2SAlex Deucher atom_card_info->ioreg_write = cail_ioreg_write; 1007351a52a2SAlex Deucher } else { 1008351a52a2SAlex Deucher DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n"); 1009351a52a2SAlex Deucher atom_card_info->ioreg_read = cail_reg_read; 1010351a52a2SAlex Deucher atom_card_info->ioreg_write = cail_reg_write; 1011351a52a2SAlex Deucher } 101261c4b24bSMathias Fröhlich atom_card_info->mc_read = cail_mc_read; 101361c4b24bSMathias Fröhlich atom_card_info->mc_write = cail_mc_write; 101461c4b24bSMathias Fröhlich atom_card_info->pll_read = cail_pll_read; 101561c4b24bSMathias Fröhlich atom_card_info->pll_write = cail_pll_write; 101661c4b24bSMathias Fröhlich 101761c4b24bSMathias Fröhlich rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios); 10180e34d094STim Gardner if (!rdev->mode_info.atom_context) { 10190e34d094STim Gardner radeon_atombios_fini(rdev); 10200e34d094STim Gardner return -ENOMEM; 10210e34d094STim Gardner } 10220e34d094STim Gardner 1023c31ad97fSRafał Miłecki mutex_init(&rdev->mode_info.atom_context->mutex); 10241c949842SDave Airlie mutex_init(&rdev->mode_info.atom_context->scratch_mutex); 1025771fe6b9SJerome Glisse radeon_atom_initialize_bios_scratch_regs(rdev->ddev); 1026d904ef9bSDave Airlie atom_allocate_fb_scratch(rdev->mode_info.atom_context); 1027771fe6b9SJerome Glisse return 0; 1028771fe6b9SJerome Glisse } 1029771fe6b9SJerome Glisse 10300c195119SAlex Deucher /** 10310c195119SAlex Deucher * radeon_atombios_fini - free the driver info and callbacks for atombios 10320c195119SAlex Deucher * 10330c195119SAlex Deucher * @rdev: radeon_device pointer 10340c195119SAlex Deucher * 10350c195119SAlex Deucher * Frees the driver info and register access callbacks for the ATOM 10360c195119SAlex Deucher * interpreter (r4xx+). 10370c195119SAlex Deucher * Called at driver shutdown. 10380c195119SAlex Deucher */ 1039771fe6b9SJerome Glisse void radeon_atombios_fini(struct radeon_device *rdev) 1040771fe6b9SJerome Glisse { 10414a04a844SJerome Glisse if (rdev->mode_info.atom_context) { 1042d904ef9bSDave Airlie kfree(rdev->mode_info.atom_context->scratch); 10434a04a844SJerome Glisse } 10440e34d094STim Gardner kfree(rdev->mode_info.atom_context); 10450e34d094STim Gardner rdev->mode_info.atom_context = NULL; 104661c4b24bSMathias Fröhlich kfree(rdev->mode_info.atom_card_info); 10470e34d094STim Gardner rdev->mode_info.atom_card_info = NULL; 1048771fe6b9SJerome Glisse } 1049771fe6b9SJerome Glisse 10500c195119SAlex Deucher /* COMBIOS */ 10510c195119SAlex Deucher /* 10520c195119SAlex Deucher * COMBIOS is the bios format prior to ATOM. It provides 10530c195119SAlex Deucher * command tables similar to ATOM, but doesn't have a unified 10540c195119SAlex Deucher * parser. See radeon_combios.c 10550c195119SAlex Deucher */ 10560c195119SAlex Deucher 10570c195119SAlex Deucher /** 10580c195119SAlex Deucher * radeon_combios_init - init the driver info for combios 10590c195119SAlex Deucher * 10600c195119SAlex Deucher * @rdev: radeon_device pointer 10610c195119SAlex Deucher * 10620c195119SAlex Deucher * Initializes the driver info for combios (r1xx-r3xx). 10630c195119SAlex Deucher * Returns 0 on sucess. 10640c195119SAlex Deucher * Called at driver startup. 10650c195119SAlex Deucher */ 1066771fe6b9SJerome Glisse int radeon_combios_init(struct radeon_device *rdev) 1067771fe6b9SJerome Glisse { 1068771fe6b9SJerome Glisse radeon_combios_initialize_bios_scratch_regs(rdev->ddev); 1069771fe6b9SJerome Glisse return 0; 1070771fe6b9SJerome Glisse } 1071771fe6b9SJerome Glisse 10720c195119SAlex Deucher /** 10730c195119SAlex Deucher * radeon_combios_fini - free the driver info for combios 10740c195119SAlex Deucher * 10750c195119SAlex Deucher * @rdev: radeon_device pointer 10760c195119SAlex Deucher * 10770c195119SAlex Deucher * Frees the driver info for combios (r1xx-r3xx). 10780c195119SAlex Deucher * Called at driver shutdown. 10790c195119SAlex Deucher */ 1080771fe6b9SJerome Glisse void radeon_combios_fini(struct radeon_device *rdev) 1081771fe6b9SJerome Glisse { 1082771fe6b9SJerome Glisse } 1083771fe6b9SJerome Glisse 10840c195119SAlex Deucher /* if we get transitioned to only one device, take VGA back */ 10850c195119SAlex Deucher /** 10860c195119SAlex Deucher * radeon_vga_set_decode - enable/disable vga decode 10870c195119SAlex Deucher * 10880c195119SAlex Deucher * @cookie: radeon_device pointer 10890c195119SAlex Deucher * @state: enable/disable vga decode 10900c195119SAlex Deucher * 10910c195119SAlex Deucher * Enable/disable vga decode (all asics). 10920c195119SAlex Deucher * Returns VGA resource flags. 10930c195119SAlex Deucher */ 109428d52043SDave Airlie static unsigned int radeon_vga_set_decode(void *cookie, bool state) 109528d52043SDave Airlie { 109628d52043SDave Airlie struct radeon_device *rdev = cookie; 109728d52043SDave Airlie radeon_vga_set_state(rdev, state); 109828d52043SDave Airlie if (state) 109928d52043SDave Airlie return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | 110028d52043SDave Airlie VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 110128d52043SDave Airlie else 110228d52043SDave Airlie return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 110328d52043SDave Airlie } 1104c1176d6fSDave Airlie 11050c195119SAlex Deucher /** 11061bcb04f7SChristian König * radeon_check_pot_argument - check that argument is a power of two 11071bcb04f7SChristian König * 11081bcb04f7SChristian König * @arg: value to check 11091bcb04f7SChristian König * 11101bcb04f7SChristian König * Validates that a certain argument is a power of two (all asics). 11111bcb04f7SChristian König * Returns true if argument is valid. 11121bcb04f7SChristian König */ 11131bcb04f7SChristian König static bool radeon_check_pot_argument(int arg) 11141bcb04f7SChristian König { 11151bcb04f7SChristian König return (arg & (arg - 1)) == 0; 11161bcb04f7SChristian König } 11171bcb04f7SChristian König 11181bcb04f7SChristian König /** 11195e3c4f90SGrigori Goronzy * Determine a sensible default GART size according to ASIC family. 11205e3c4f90SGrigori Goronzy * 11215e3c4f90SGrigori Goronzy * @family ASIC family name 11225e3c4f90SGrigori Goronzy */ 11235e3c4f90SGrigori Goronzy static int radeon_gart_size_auto(enum radeon_family family) 11245e3c4f90SGrigori Goronzy { 11255e3c4f90SGrigori Goronzy /* default to a larger gart size on newer asics */ 11265e3c4f90SGrigori Goronzy if (family >= CHIP_TAHITI) 11275e3c4f90SGrigori Goronzy return 2048; 11285e3c4f90SGrigori Goronzy else if (family >= CHIP_RV770) 11295e3c4f90SGrigori Goronzy return 1024; 11305e3c4f90SGrigori Goronzy else 11315e3c4f90SGrigori Goronzy return 512; 11325e3c4f90SGrigori Goronzy } 11335e3c4f90SGrigori Goronzy 11345e3c4f90SGrigori Goronzy /** 11350c195119SAlex Deucher * radeon_check_arguments - validate module params 11360c195119SAlex Deucher * 11370c195119SAlex Deucher * @rdev: radeon_device pointer 11380c195119SAlex Deucher * 11390c195119SAlex Deucher * Validates certain module parameters and updates 11400c195119SAlex Deucher * the associated values used by the driver (all asics). 11410c195119SAlex Deucher */ 11421109ca09SLauri Kasanen static void radeon_check_arguments(struct radeon_device *rdev) 114336421338SJerome Glisse { 114436421338SJerome Glisse /* vramlimit must be a power of two */ 11451bcb04f7SChristian König if (!radeon_check_pot_argument(radeon_vram_limit)) { 114636421338SJerome Glisse dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n", 114736421338SJerome Glisse radeon_vram_limit); 114836421338SJerome Glisse radeon_vram_limit = 0; 114936421338SJerome Glisse } 11501bcb04f7SChristian König 1151edcd26e8SAlex Deucher if (radeon_gart_size == -1) { 11525e3c4f90SGrigori Goronzy radeon_gart_size = radeon_gart_size_auto(rdev->family); 1153edcd26e8SAlex Deucher } 115436421338SJerome Glisse /* gtt size must be power of two and greater or equal to 32M */ 11551bcb04f7SChristian König if (radeon_gart_size < 32) { 1156edcd26e8SAlex Deucher dev_warn(rdev->dev, "gart size (%d) too small\n", 115736421338SJerome Glisse radeon_gart_size); 11585e3c4f90SGrigori Goronzy radeon_gart_size = radeon_gart_size_auto(rdev->family); 11591bcb04f7SChristian König } else if (!radeon_check_pot_argument(radeon_gart_size)) { 116036421338SJerome Glisse dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n", 116136421338SJerome Glisse radeon_gart_size); 11625e3c4f90SGrigori Goronzy radeon_gart_size = radeon_gart_size_auto(rdev->family); 116336421338SJerome Glisse } 11641bcb04f7SChristian König rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20; 11651bcb04f7SChristian König 116636421338SJerome Glisse /* AGP mode can only be -1, 1, 2, 4, 8 */ 116736421338SJerome Glisse switch (radeon_agpmode) { 116836421338SJerome Glisse case -1: 116936421338SJerome Glisse case 0: 117036421338SJerome Glisse case 1: 117136421338SJerome Glisse case 2: 117236421338SJerome Glisse case 4: 117336421338SJerome Glisse case 8: 117436421338SJerome Glisse break; 117536421338SJerome Glisse default: 117636421338SJerome Glisse dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: " 117736421338SJerome Glisse "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode); 117836421338SJerome Glisse radeon_agpmode = 0; 117936421338SJerome Glisse break; 118036421338SJerome Glisse } 1181c1c44132SChristian König 1182c1c44132SChristian König if (!radeon_check_pot_argument(radeon_vm_size)) { 1183c1c44132SChristian König dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n", 1184c1c44132SChristian König radeon_vm_size); 118520b2656dSChristian König radeon_vm_size = 4; 1186c1c44132SChristian König } 1187c1c44132SChristian König 118820b2656dSChristian König if (radeon_vm_size < 1) { 118913c240efSAlexandre Demers dev_warn(rdev->dev, "VM size (%d) too small, min is 1GB\n", 1190c1c44132SChristian König radeon_vm_size); 119120b2656dSChristian König radeon_vm_size = 4; 1192c1c44132SChristian König } 1193c1c44132SChristian König 1194c1c44132SChristian König /* 1195c1c44132SChristian König * Max GPUVM size for Cayman, SI and CI are 40 bits. 1196c1c44132SChristian König */ 119720b2656dSChristian König if (radeon_vm_size > 1024) { 119820b2656dSChristian König dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n", 1199c1c44132SChristian König radeon_vm_size); 120020b2656dSChristian König radeon_vm_size = 4; 1201c1c44132SChristian König } 12024510fb98SChristian König 12034510fb98SChristian König /* defines number of bits in page table versus page directory, 12044510fb98SChristian König * a page is 4KB so we have 12 bits offset, minimum 9 bits in the 12054510fb98SChristian König * page table and the remaining bits are in the page directory */ 1206dfc230f9SChristian König if (radeon_vm_block_size == -1) { 1207dfc230f9SChristian König 1208dfc230f9SChristian König /* Total bits covered by PD + PTs */ 12098e66e134SAlex Deucher unsigned bits = ilog2(radeon_vm_size) + 18; 1210dfc230f9SChristian König 1211dfc230f9SChristian König /* Make sure the PD is 4K in size up to 8GB address space. 1212dfc230f9SChristian König Above that split equal between PD and PTs */ 1213dfc230f9SChristian König if (radeon_vm_size <= 8) 1214dfc230f9SChristian König radeon_vm_block_size = bits - 9; 1215dfc230f9SChristian König else 1216dfc230f9SChristian König radeon_vm_block_size = (bits + 3) / 2; 1217dfc230f9SChristian König 1218dfc230f9SChristian König } else if (radeon_vm_block_size < 9) { 121920b2656dSChristian König dev_warn(rdev->dev, "VM page table size (%d) too small\n", 12204510fb98SChristian König radeon_vm_block_size); 12214510fb98SChristian König radeon_vm_block_size = 9; 12224510fb98SChristian König } 12234510fb98SChristian König 12244510fb98SChristian König if (radeon_vm_block_size > 24 || 122520b2656dSChristian König (radeon_vm_size * 1024) < (1ull << radeon_vm_block_size)) { 122620b2656dSChristian König dev_warn(rdev->dev, "VM page table size (%d) too large\n", 12274510fb98SChristian König radeon_vm_block_size); 12284510fb98SChristian König radeon_vm_block_size = 9; 12294510fb98SChristian König } 123036421338SJerome Glisse } 123136421338SJerome Glisse 12320c195119SAlex Deucher /** 12330c195119SAlex Deucher * radeon_switcheroo_set_state - set switcheroo state 12340c195119SAlex Deucher * 12350c195119SAlex Deucher * @pdev: pci dev pointer 12368e5de1d8SLukas Wunner * @state: vga_switcheroo state 12370c195119SAlex Deucher * 12380c195119SAlex Deucher * Callback for the switcheroo driver. Suspends or resumes the 12390c195119SAlex Deucher * the asics before or after it is powered up using ACPI methods. 12400c195119SAlex Deucher */ 12416a9ee8afSDave Airlie static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) 12426a9ee8afSDave Airlie { 12436a9ee8afSDave Airlie struct drm_device *dev = pci_get_drvdata(pdev); 12444807c5a8SAlex Deucher struct radeon_device *rdev = dev->dev_private; 124510ebc0bcSDave Airlie 124690c4cde9SAlex Deucher if (radeon_is_px(dev) && state == VGA_SWITCHEROO_OFF) 124710ebc0bcSDave Airlie return; 124810ebc0bcSDave Airlie 12496a9ee8afSDave Airlie if (state == VGA_SWITCHEROO_ON) { 1250d1f9809eSMaarten Lankhorst unsigned d3_delay = dev->pdev->d3_delay; 1251d1f9809eSMaarten Lankhorst 12527ca85295SJoe Perches pr_info("radeon: switched on\n"); 12536a9ee8afSDave Airlie /* don't suspend or resume card normally */ 12545bcf719bSDave Airlie dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 1255d1f9809eSMaarten Lankhorst 12564807c5a8SAlex Deucher if (d3_delay < 20 && (rdev->px_quirk_flags & RADEON_PX_QUIRK_LONG_WAKEUP)) 1257d1f9809eSMaarten Lankhorst dev->pdev->d3_delay = 20; 1258d1f9809eSMaarten Lankhorst 125910ebc0bcSDave Airlie radeon_resume_kms(dev, true, true); 1260d1f9809eSMaarten Lankhorst 1261d1f9809eSMaarten Lankhorst dev->pdev->d3_delay = d3_delay; 1262d1f9809eSMaarten Lankhorst 12635bcf719bSDave Airlie dev->switch_power_state = DRM_SWITCH_POWER_ON; 1264fbf81762SDave Airlie drm_kms_helper_poll_enable(dev); 12656a9ee8afSDave Airlie } else { 12667ca85295SJoe Perches pr_info("radeon: switched off\n"); 1267fbf81762SDave Airlie drm_kms_helper_poll_disable(dev); 12685bcf719bSDave Airlie dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 1269274ad65cSJérome Glisse radeon_suspend_kms(dev, true, true, false); 12705bcf719bSDave Airlie dev->switch_power_state = DRM_SWITCH_POWER_OFF; 12716a9ee8afSDave Airlie } 12726a9ee8afSDave Airlie } 12736a9ee8afSDave Airlie 12740c195119SAlex Deucher /** 12750c195119SAlex Deucher * radeon_switcheroo_can_switch - see if switcheroo state can change 12760c195119SAlex Deucher * 12770c195119SAlex Deucher * @pdev: pci dev pointer 12780c195119SAlex Deucher * 12790c195119SAlex Deucher * Callback for the switcheroo driver. Check of the switcheroo 12800c195119SAlex Deucher * state can be changed. 12810c195119SAlex Deucher * Returns true if the state can be changed, false if not. 12820c195119SAlex Deucher */ 12836a9ee8afSDave Airlie static bool radeon_switcheroo_can_switch(struct pci_dev *pdev) 12846a9ee8afSDave Airlie { 12856a9ee8afSDave Airlie struct drm_device *dev = pci_get_drvdata(pdev); 12866a9ee8afSDave Airlie 1287fc8fd40eSDaniel Vetter /* 1288fc8fd40eSDaniel Vetter * FIXME: open_count is protected by drm_global_mutex but that would lead to 1289fc8fd40eSDaniel Vetter * locking inversion with the driver load path. And the access here is 1290fc8fd40eSDaniel Vetter * completely racy anyway. So don't bother with locking for now. 1291fc8fd40eSDaniel Vetter */ 1292fc8fd40eSDaniel Vetter return dev->open_count == 0; 12936a9ee8afSDave Airlie } 12946a9ee8afSDave Airlie 129526ec685fSTakashi Iwai static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = { 129626ec685fSTakashi Iwai .set_gpu_state = radeon_switcheroo_set_state, 129726ec685fSTakashi Iwai .reprobe = NULL, 129826ec685fSTakashi Iwai .can_switch = radeon_switcheroo_can_switch, 129926ec685fSTakashi Iwai }; 13006a9ee8afSDave Airlie 13010c195119SAlex Deucher /** 13020c195119SAlex Deucher * radeon_device_init - initialize the driver 13030c195119SAlex Deucher * 13040c195119SAlex Deucher * @rdev: radeon_device pointer 13050c195119SAlex Deucher * @pdev: drm dev pointer 13060c195119SAlex Deucher * @pdev: pci dev pointer 13070c195119SAlex Deucher * @flags: driver flags 13080c195119SAlex Deucher * 13090c195119SAlex Deucher * Initializes the driver info and hw (all asics). 13100c195119SAlex Deucher * Returns 0 for success or an error on failure. 13110c195119SAlex Deucher * Called at driver startup. 13120c195119SAlex Deucher */ 1313771fe6b9SJerome Glisse int radeon_device_init(struct radeon_device *rdev, 1314771fe6b9SJerome Glisse struct drm_device *ddev, 1315771fe6b9SJerome Glisse struct pci_dev *pdev, 1316771fe6b9SJerome Glisse uint32_t flags) 1317771fe6b9SJerome Glisse { 1318351a52a2SAlex Deucher int r, i; 1319ad49f501SDave Airlie int dma_bits; 132010ebc0bcSDave Airlie bool runtime = false; 1321771fe6b9SJerome Glisse 1322771fe6b9SJerome Glisse rdev->shutdown = false; 13239f022ddfSJerome Glisse rdev->dev = &pdev->dev; 1324771fe6b9SJerome Glisse rdev->ddev = ddev; 1325771fe6b9SJerome Glisse rdev->pdev = pdev; 1326771fe6b9SJerome Glisse rdev->flags = flags; 1327771fe6b9SJerome Glisse rdev->family = flags & RADEON_FAMILY_MASK; 1328771fe6b9SJerome Glisse rdev->is_atom_bios = false; 1329771fe6b9SJerome Glisse rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT; 1330edcd26e8SAlex Deucher rdev->mc.gtt_size = 512 * 1024 * 1024; 1331733289c2SJerome Glisse rdev->accel_working = false; 13328b25ed34SAlex Deucher /* set up ring ids */ 13338b25ed34SAlex Deucher for (i = 0; i < RADEON_NUM_RINGS; i++) { 13348b25ed34SAlex Deucher rdev->ring[i].idx = i; 13358b25ed34SAlex Deucher } 1336f54d1867SChris Wilson rdev->fence_context = dma_fence_context_alloc(RADEON_NUM_RINGS); 13371b5331d9SJerome Glisse 1338fe0d36e0SAlex Deucher DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n", 1339d522d9ccSThomas Reim radeon_family_name[rdev->family], pdev->vendor, pdev->device, 1340fe0d36e0SAlex Deucher pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision); 13411b5331d9SJerome Glisse 1342771fe6b9SJerome Glisse /* mutex initialization are all done here so we 1343771fe6b9SJerome Glisse * can recall function without having locking issues */ 1344d6999bc7SChristian König mutex_init(&rdev->ring_lock); 134540bacf16SAlex Deucher mutex_init(&rdev->dc_hw_i2c_mutex); 1346c20dc369SChristian Koenig atomic_set(&rdev->ih.lock, 0); 13474c788679SJerome Glisse mutex_init(&rdev->gem.mutex); 1348c913e23aSRafał Miłecki mutex_init(&rdev->pm.mutex); 13496759a0a7SMarek Olšák mutex_init(&rdev->gpu_clock_mutex); 1350f61d5b46SAlex Deucher mutex_init(&rdev->srbm_mutex); 13511c0a4625SOded Gabbay mutex_init(&rdev->grbm_idx_mutex); 1352db7fce39SChristian König init_rwsem(&rdev->pm.mclk_lock); 1353dee53e7fSJerome Glisse init_rwsem(&rdev->exclusive_lock); 135473a6d3fcSRafał Miłecki init_waitqueue_head(&rdev->irq.vblank_queue); 1355341cb9e4SChristian König mutex_init(&rdev->mn_lock); 1356341cb9e4SChristian König hash_init(rdev->mn_hash); 13571b9c3dd0SAlex Deucher r = radeon_gem_init(rdev); 13581b9c3dd0SAlex Deucher if (r) 13591b9c3dd0SAlex Deucher return r; 1360529364e0SChristian König 1361c1c44132SChristian König radeon_check_arguments(rdev); 136223d4f1f2SAlex Deucher /* Adjust VM size here. 1363c1c44132SChristian König * Max GPUVM size for cayman+ is 40 bits. 136423d4f1f2SAlex Deucher */ 136520b2656dSChristian König rdev->vm_manager.max_pfn = radeon_vm_size << 18; 1366771fe6b9SJerome Glisse 13674aac0473SJerome Glisse /* Set asic functions */ 13684aac0473SJerome Glisse r = radeon_asic_init(rdev); 136936421338SJerome Glisse if (r) 13704aac0473SJerome Glisse return r; 13714aac0473SJerome Glisse 1372f95df9caSAlex Deucher /* all of the newer IGP chips have an internal gart 1373f95df9caSAlex Deucher * However some rs4xx report as AGP, so remove that here. 1374f95df9caSAlex Deucher */ 1375f95df9caSAlex Deucher if ((rdev->family >= CHIP_RS400) && 1376f95df9caSAlex Deucher (rdev->flags & RADEON_IS_IGP)) { 1377f95df9caSAlex Deucher rdev->flags &= ~RADEON_IS_AGP; 1378f95df9caSAlex Deucher } 1379f95df9caSAlex Deucher 138030256a3fSJerome Glisse if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) { 1381b574f251SJerome Glisse radeon_agp_disable(rdev); 1382771fe6b9SJerome Glisse } 1383771fe6b9SJerome Glisse 13849ed8b1f9SAlex Deucher /* Set the internal MC address mask 13859ed8b1f9SAlex Deucher * This is the max address of the GPU's 13869ed8b1f9SAlex Deucher * internal address space. 13879ed8b1f9SAlex Deucher */ 13889ed8b1f9SAlex Deucher if (rdev->family >= CHIP_CAYMAN) 13899ed8b1f9SAlex Deucher rdev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */ 13909ed8b1f9SAlex Deucher else if (rdev->family >= CHIP_CEDAR) 13919ed8b1f9SAlex Deucher rdev->mc.mc_mask = 0xfffffffffULL; /* 36 bit MC */ 13929ed8b1f9SAlex Deucher else 13939ed8b1f9SAlex Deucher rdev->mc.mc_mask = 0xffffffffULL; /* 32 bit MC */ 13949ed8b1f9SAlex Deucher 1395ad49f501SDave Airlie /* set DMA mask + need_dma32 flags. 1396ad49f501SDave Airlie * PCIE - can handle 40-bits. 1397005a83f1SAlex Deucher * IGP - can handle 40-bits 1398ad49f501SDave Airlie * AGP - generally dma32 is safest 1399005a83f1SAlex Deucher * PCI - dma32 for legacy pci gart, 40 bits on newer asics 1400ad49f501SDave Airlie */ 1401ad49f501SDave Airlie rdev->need_dma32 = false; 1402ad49f501SDave Airlie if (rdev->flags & RADEON_IS_AGP) 1403ad49f501SDave Airlie rdev->need_dma32 = true; 1404005a83f1SAlex Deucher if ((rdev->flags & RADEON_IS_PCI) && 14054a2b6662SJerome Glisse (rdev->family <= CHIP_RS740)) 1406ad49f501SDave Airlie rdev->need_dma32 = true; 1407ad49f501SDave Airlie 1408ad49f501SDave Airlie dma_bits = rdev->need_dma32 ? 32 : 40; 1409ad49f501SDave Airlie r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits)); 1410771fe6b9SJerome Glisse if (r) { 141162fff811SDaniel Haid rdev->need_dma32 = true; 1412c52494f6SKonrad Rzeszutek Wilk dma_bits = 32; 14137ca85295SJoe Perches pr_warn("radeon: No suitable DMA available\n"); 1414771fe6b9SJerome Glisse } 1415c52494f6SKonrad Rzeszutek Wilk r = pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits)); 1416c52494f6SKonrad Rzeszutek Wilk if (r) { 1417c52494f6SKonrad Rzeszutek Wilk pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32)); 14187ca85295SJoe Perches pr_warn("radeon: No coherent DMA available\n"); 1419c52494f6SKonrad Rzeszutek Wilk } 1420771fe6b9SJerome Glisse 1421771fe6b9SJerome Glisse /* Registers mapping */ 1422771fe6b9SJerome Glisse /* TODO: block userspace mapping of io register */ 14232c385151SDaniel Vetter spin_lock_init(&rdev->mmio_idx_lock); 1424fe78118cSAlex Deucher spin_lock_init(&rdev->smc_idx_lock); 14250a5b7b0bSAlex Deucher spin_lock_init(&rdev->pll_idx_lock); 14260a5b7b0bSAlex Deucher spin_lock_init(&rdev->mc_idx_lock); 14270a5b7b0bSAlex Deucher spin_lock_init(&rdev->pcie_idx_lock); 14280a5b7b0bSAlex Deucher spin_lock_init(&rdev->pciep_idx_lock); 14290a5b7b0bSAlex Deucher spin_lock_init(&rdev->pif_idx_lock); 14300a5b7b0bSAlex Deucher spin_lock_init(&rdev->cg_idx_lock); 14310a5b7b0bSAlex Deucher spin_lock_init(&rdev->uvd_idx_lock); 14320a5b7b0bSAlex Deucher spin_lock_init(&rdev->rcu_idx_lock); 14330a5b7b0bSAlex Deucher spin_lock_init(&rdev->didt_idx_lock); 14340a5b7b0bSAlex Deucher spin_lock_init(&rdev->end_idx_lock); 1435efad86dbSAlex Deucher if (rdev->family >= CHIP_BONAIRE) { 1436efad86dbSAlex Deucher rdev->rmmio_base = pci_resource_start(rdev->pdev, 5); 1437efad86dbSAlex Deucher rdev->rmmio_size = pci_resource_len(rdev->pdev, 5); 1438efad86dbSAlex Deucher } else { 143901d73a69SJordan Crouse rdev->rmmio_base = pci_resource_start(rdev->pdev, 2); 144001d73a69SJordan Crouse rdev->rmmio_size = pci_resource_len(rdev->pdev, 2); 1441efad86dbSAlex Deucher } 1442771fe6b9SJerome Glisse rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size); 1443*a33c1a82SAndy Shevchenko if (rdev->rmmio == NULL) 1444771fe6b9SJerome Glisse return -ENOMEM; 1445771fe6b9SJerome Glisse 144675efdee1SAlex Deucher /* doorbell bar mapping */ 144775efdee1SAlex Deucher if (rdev->family >= CHIP_BONAIRE) 144875efdee1SAlex Deucher radeon_doorbell_init(rdev); 144975efdee1SAlex Deucher 1450351a52a2SAlex Deucher /* io port mapping */ 1451351a52a2SAlex Deucher for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 1452351a52a2SAlex Deucher if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) { 1453351a52a2SAlex Deucher rdev->rio_mem_size = pci_resource_len(rdev->pdev, i); 1454351a52a2SAlex Deucher rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size); 1455351a52a2SAlex Deucher break; 1456351a52a2SAlex Deucher } 1457351a52a2SAlex Deucher } 1458351a52a2SAlex Deucher if (rdev->rio_mem == NULL) 1459351a52a2SAlex Deucher DRM_ERROR("Unable to find PCI I/O BAR\n"); 1460351a52a2SAlex Deucher 14614807c5a8SAlex Deucher if (rdev->flags & RADEON_IS_PX) 14624807c5a8SAlex Deucher radeon_device_handle_px_quirks(rdev); 14634807c5a8SAlex Deucher 146428d52043SDave Airlie /* if we have > 1 VGA cards, then disable the radeon VGA resources */ 146593239ea1SDave Airlie /* this will fail for cards that aren't VGA class devices, just 146693239ea1SDave Airlie * ignore it */ 146793239ea1SDave Airlie vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode); 146810ebc0bcSDave Airlie 1469bfaddd9fSAlex Deucher if (rdev->flags & RADEON_IS_PX) 147010ebc0bcSDave Airlie runtime = true; 147110ebc0bcSDave Airlie vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime); 147210ebc0bcSDave Airlie if (runtime) 147310ebc0bcSDave Airlie vga_switcheroo_init_domain_pm_ops(rdev->dev, &rdev->vga_pm_domain); 147428d52043SDave Airlie 14753ce0a23dSJerome Glisse r = radeon_init(rdev); 1476b574f251SJerome Glisse if (r) 14772e97140dSAlex Deucher goto failed; 1478b1e3a6d1SMichel Dänzer 1479409851f4SJerome Glisse r = radeon_gem_debugfs_init(rdev); 1480409851f4SJerome Glisse if (r) { 1481409851f4SJerome Glisse DRM_ERROR("registering gem debugfs failed (%d).\n", r); 1482409851f4SJerome Glisse } 1483409851f4SJerome Glisse 14849843ead0SDave Airlie r = radeon_mst_debugfs_init(rdev); 14859843ead0SDave Airlie if (r) { 14869843ead0SDave Airlie DRM_ERROR("registering mst debugfs failed (%d).\n", r); 14879843ead0SDave Airlie } 14889843ead0SDave Airlie 1489b574f251SJerome Glisse if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) { 1490b574f251SJerome Glisse /* Acceleration not working on AGP card try again 1491b574f251SJerome Glisse * with fallback to PCI or PCIE GART 1492b574f251SJerome Glisse */ 1493a2d07b74SJerome Glisse radeon_asic_reset(rdev); 1494b574f251SJerome Glisse radeon_fini(rdev); 1495b574f251SJerome Glisse radeon_agp_disable(rdev); 1496b574f251SJerome Glisse r = radeon_init(rdev); 14974aac0473SJerome Glisse if (r) 14982e97140dSAlex Deucher goto failed; 14993ce0a23dSJerome Glisse } 15006c7bcceaSAlex Deucher 150113a7d299SChristian König r = radeon_ib_ring_tests(rdev); 150213a7d299SChristian König if (r) 150313a7d299SChristian König DRM_ERROR("ib ring test failed (%d).\n", r); 150413a7d299SChristian König 15056dfd1972SJérôme Glisse /* 15066dfd1972SJérôme Glisse * Turks/Thames GPU will freeze whole laptop if DPM is not restarted 15076dfd1972SJérôme Glisse * after the CP ring have chew one packet at least. Hence here we stop 15086dfd1972SJérôme Glisse * and restart DPM after the radeon_ib_ring_tests(). 15096dfd1972SJérôme Glisse */ 15106dfd1972SJérôme Glisse if (rdev->pm.dpm_enabled && 15116dfd1972SJérôme Glisse (rdev->pm.pm_method == PM_METHOD_DPM) && 15126dfd1972SJérôme Glisse (rdev->family == CHIP_TURKS) && 15136dfd1972SJérôme Glisse (rdev->flags & RADEON_IS_MOBILITY)) { 15146dfd1972SJérôme Glisse mutex_lock(&rdev->pm.mutex); 15156dfd1972SJérôme Glisse radeon_dpm_disable(rdev); 15166dfd1972SJérôme Glisse radeon_dpm_enable(rdev); 15176dfd1972SJérôme Glisse mutex_unlock(&rdev->pm.mutex); 15186dfd1972SJérôme Glisse } 15196dfd1972SJérôme Glisse 152060a7e396SChristian König if ((radeon_testing & 1)) { 15214a1132a0SAlex Deucher if (rdev->accel_working) 1522ecc0b326SMichel Dänzer radeon_test_moves(rdev); 15234a1132a0SAlex Deucher else 15244a1132a0SAlex Deucher DRM_INFO("radeon: acceleration disabled, skipping move tests\n"); 1525ecc0b326SMichel Dänzer } 152660a7e396SChristian König if ((radeon_testing & 2)) { 15274a1132a0SAlex Deucher if (rdev->accel_working) 152860a7e396SChristian König radeon_test_syncing(rdev); 15294a1132a0SAlex Deucher else 15304a1132a0SAlex Deucher DRM_INFO("radeon: acceleration disabled, skipping sync tests\n"); 153160a7e396SChristian König } 1532771fe6b9SJerome Glisse if (radeon_benchmarking) { 15334a1132a0SAlex Deucher if (rdev->accel_working) 1534638dd7dbSIlija Hadzic radeon_benchmark(rdev, radeon_benchmarking); 15354a1132a0SAlex Deucher else 15364a1132a0SAlex Deucher DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n"); 1537771fe6b9SJerome Glisse } 15386cf8a3f5SJerome Glisse return 0; 15392e97140dSAlex Deucher 15402e97140dSAlex Deucher failed: 1541b8751946SLukas Wunner /* balance pm_runtime_get_sync() in radeon_driver_unload_kms() */ 1542b8751946SLukas Wunner if (radeon_is_px(ddev)) 1543b8751946SLukas Wunner pm_runtime_put_noidle(ddev->dev); 15442e97140dSAlex Deucher if (runtime) 15452e97140dSAlex Deucher vga_switcheroo_fini_domain_pm_ops(rdev->dev); 15462e97140dSAlex Deucher return r; 1547771fe6b9SJerome Glisse } 1548771fe6b9SJerome Glisse 15490c195119SAlex Deucher /** 15500c195119SAlex Deucher * radeon_device_fini - tear down the driver 15510c195119SAlex Deucher * 15520c195119SAlex Deucher * @rdev: radeon_device pointer 15530c195119SAlex Deucher * 15540c195119SAlex Deucher * Tear down the driver info (all asics). 15550c195119SAlex Deucher * Called at driver shutdown. 15560c195119SAlex Deucher */ 1557771fe6b9SJerome Glisse void radeon_device_fini(struct radeon_device *rdev) 1558771fe6b9SJerome Glisse { 1559771fe6b9SJerome Glisse DRM_INFO("radeon: finishing device.\n"); 1560771fe6b9SJerome Glisse rdev->shutdown = true; 156190aca4d2SJerome Glisse /* evict vram memory */ 156290aca4d2SJerome Glisse radeon_bo_evict_vram(rdev); 15633ce0a23dSJerome Glisse radeon_fini(rdev); 15646a9ee8afSDave Airlie vga_switcheroo_unregister_client(rdev->pdev); 15652e97140dSAlex Deucher if (rdev->flags & RADEON_IS_PX) 15662e97140dSAlex Deucher vga_switcheroo_fini_domain_pm_ops(rdev->dev); 1567c1176d6fSDave Airlie vga_client_register(rdev->pdev, NULL, NULL, NULL); 1568e0a2ca73SAlex Deucher if (rdev->rio_mem) 1569351a52a2SAlex Deucher pci_iounmap(rdev->pdev, rdev->rio_mem); 1570351a52a2SAlex Deucher rdev->rio_mem = NULL; 1571771fe6b9SJerome Glisse iounmap(rdev->rmmio); 1572771fe6b9SJerome Glisse rdev->rmmio = NULL; 157375efdee1SAlex Deucher if (rdev->family >= CHIP_BONAIRE) 157475efdee1SAlex Deucher radeon_doorbell_fini(rdev); 1575771fe6b9SJerome Glisse } 1576771fe6b9SJerome Glisse 1577771fe6b9SJerome Glisse 1578771fe6b9SJerome Glisse /* 1579771fe6b9SJerome Glisse * Suspend & resume. 1580771fe6b9SJerome Glisse */ 15810c195119SAlex Deucher /** 15820c195119SAlex Deucher * radeon_suspend_kms - initiate device suspend 15830c195119SAlex Deucher * 15840c195119SAlex Deucher * @pdev: drm dev pointer 15850c195119SAlex Deucher * @state: suspend state 15860c195119SAlex Deucher * 15870c195119SAlex Deucher * Puts the hw in the suspend state (all asics). 15880c195119SAlex Deucher * Returns 0 for success or an error on failure. 15890c195119SAlex Deucher * Called at driver suspend. 15900c195119SAlex Deucher */ 1591274ad65cSJérome Glisse int radeon_suspend_kms(struct drm_device *dev, bool suspend, 1592274ad65cSJérome Glisse bool fbcon, bool freeze) 1593771fe6b9SJerome Glisse { 1594875c1866SDarren Jenkins struct radeon_device *rdev; 1595771fe6b9SJerome Glisse struct drm_crtc *crtc; 1596d8dcaa1dSAlex Deucher struct drm_connector *connector; 15977465280cSAlex Deucher int i, r; 1598771fe6b9SJerome Glisse 1599875c1866SDarren Jenkins if (dev == NULL || dev->dev_private == NULL) { 1600771fe6b9SJerome Glisse return -ENODEV; 1601771fe6b9SJerome Glisse } 16027473e830SDave Airlie 1603875c1866SDarren Jenkins rdev = dev->dev_private; 1604875c1866SDarren Jenkins 1605f2aba352SAlex Deucher if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 16066a9ee8afSDave Airlie return 0; 1607d8dcaa1dSAlex Deucher 160886698c20SSeth Forshee drm_kms_helper_poll_disable(dev); 160986698c20SSeth Forshee 16106adaed5bSDaniel Vetter drm_modeset_lock_all(dev); 1611d8dcaa1dSAlex Deucher /* turn off display hw */ 1612d8dcaa1dSAlex Deucher list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 1613d8dcaa1dSAlex Deucher drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); 1614d8dcaa1dSAlex Deucher } 16156adaed5bSDaniel Vetter drm_modeset_unlock_all(dev); 1616d8dcaa1dSAlex Deucher 1617f3cbb17bSGrigori Goronzy /* unpin the front buffers and cursors */ 1618771fe6b9SJerome Glisse list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 1619f3cbb17bSGrigori Goronzy struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 1620f4510a27SMatt Roper struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->primary->fb); 16214c788679SJerome Glisse struct radeon_bo *robj; 1622771fe6b9SJerome Glisse 1623f3cbb17bSGrigori Goronzy if (radeon_crtc->cursor_bo) { 1624f3cbb17bSGrigori Goronzy struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo); 1625f3cbb17bSGrigori Goronzy r = radeon_bo_reserve(robj, false); 1626f3cbb17bSGrigori Goronzy if (r == 0) { 1627f3cbb17bSGrigori Goronzy radeon_bo_unpin(robj); 1628f3cbb17bSGrigori Goronzy radeon_bo_unreserve(robj); 1629f3cbb17bSGrigori Goronzy } 1630f3cbb17bSGrigori Goronzy } 1631f3cbb17bSGrigori Goronzy 1632771fe6b9SJerome Glisse if (rfb == NULL || rfb->obj == NULL) { 1633771fe6b9SJerome Glisse continue; 1634771fe6b9SJerome Glisse } 16357e4d15d9SDaniel Vetter robj = gem_to_radeon_bo(rfb->obj); 163638651674SDave Airlie /* don't unpin kernel fb objects */ 163738651674SDave Airlie if (!radeon_fbdev_robj_is_fb(rdev, robj)) { 16384c788679SJerome Glisse r = radeon_bo_reserve(robj, false); 163938651674SDave Airlie if (r == 0) { 16404c788679SJerome Glisse radeon_bo_unpin(robj); 16414c788679SJerome Glisse radeon_bo_unreserve(robj); 16424c788679SJerome Glisse } 1643771fe6b9SJerome Glisse } 1644771fe6b9SJerome Glisse } 1645771fe6b9SJerome Glisse /* evict vram memory */ 16464c788679SJerome Glisse radeon_bo_evict_vram(rdev); 16478a47cc9eSChristian König 1648771fe6b9SJerome Glisse /* wait for gpu to finish processing current batch */ 16495f8f635eSJerome Glisse for (i = 0; i < RADEON_NUM_RINGS; i++) { 165037615527SChristian König r = radeon_fence_wait_empty(rdev, i); 16515f8f635eSJerome Glisse if (r) { 16525f8f635eSJerome Glisse /* delay GPU reset to resume */ 1653eb98c709SChristian König radeon_fence_driver_force_completion(rdev, i); 16545f8f635eSJerome Glisse } 16555f8f635eSJerome Glisse } 1656771fe6b9SJerome Glisse 1657f657c2a7SYang Zhao radeon_save_bios_scratch_regs(rdev); 1658f657c2a7SYang Zhao 16593ce0a23dSJerome Glisse radeon_suspend(rdev); 1660d4877cf2SAlex Deucher radeon_hpd_fini(rdev); 1661ec9aaaffSAlex Deucher /* evict remaining vram memory 1662ec9aaaffSAlex Deucher * This second call to evict vram is to evict the gart page table 1663ec9aaaffSAlex Deucher * using the CPU. 1664ec9aaaffSAlex Deucher */ 16654c788679SJerome Glisse radeon_bo_evict_vram(rdev); 1666771fe6b9SJerome Glisse 166710b06122SJerome Glisse radeon_agp_suspend(rdev); 166810b06122SJerome Glisse 1669771fe6b9SJerome Glisse pci_save_state(dev->pdev); 1670ccaa2c12SJérôme Glisse if (freeze && rdev->family >= CHIP_CEDAR) { 1671274ad65cSJérome Glisse rdev->asic->asic_reset(rdev, true); 1672274ad65cSJérome Glisse pci_restore_state(dev->pdev); 1673274ad65cSJérome Glisse } else if (suspend) { 1674771fe6b9SJerome Glisse /* Shut down the device */ 1675771fe6b9SJerome Glisse pci_disable_device(dev->pdev); 1676771fe6b9SJerome Glisse pci_set_power_state(dev->pdev, PCI_D3hot); 1677771fe6b9SJerome Glisse } 167810ebc0bcSDave Airlie 167910ebc0bcSDave Airlie if (fbcon) { 1680ac751efaSTorben Hohn console_lock(); 168138651674SDave Airlie radeon_fbdev_set_suspend(rdev, 1); 1682ac751efaSTorben Hohn console_unlock(); 168310ebc0bcSDave Airlie } 1684771fe6b9SJerome Glisse return 0; 1685771fe6b9SJerome Glisse } 1686771fe6b9SJerome Glisse 16870c195119SAlex Deucher /** 16880c195119SAlex Deucher * radeon_resume_kms - initiate device resume 16890c195119SAlex Deucher * 16900c195119SAlex Deucher * @pdev: drm dev pointer 16910c195119SAlex Deucher * 16920c195119SAlex Deucher * Bring the hw back to operating state (all asics). 16930c195119SAlex Deucher * Returns 0 for success or an error on failure. 16940c195119SAlex Deucher * Called at driver resume. 16950c195119SAlex Deucher */ 169610ebc0bcSDave Airlie int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon) 1697771fe6b9SJerome Glisse { 169809bdf591SCedric Godin struct drm_connector *connector; 1699771fe6b9SJerome Glisse struct radeon_device *rdev = dev->dev_private; 1700f3cbb17bSGrigori Goronzy struct drm_crtc *crtc; 170104eb2206SChristian König int r; 1702771fe6b9SJerome Glisse 1703f2aba352SAlex Deucher if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 17046a9ee8afSDave Airlie return 0; 17056a9ee8afSDave Airlie 170610ebc0bcSDave Airlie if (fbcon) { 1707ac751efaSTorben Hohn console_lock(); 170810ebc0bcSDave Airlie } 17097473e830SDave Airlie if (resume) { 1710771fe6b9SJerome Glisse pci_set_power_state(dev->pdev, PCI_D0); 1711771fe6b9SJerome Glisse pci_restore_state(dev->pdev); 1712771fe6b9SJerome Glisse if (pci_enable_device(dev->pdev)) { 171310ebc0bcSDave Airlie if (fbcon) 1714ac751efaSTorben Hohn console_unlock(); 1715771fe6b9SJerome Glisse return -1; 1716771fe6b9SJerome Glisse } 17177473e830SDave Airlie } 17180ebf1717SDave Airlie /* resume AGP if in use */ 17190ebf1717SDave Airlie radeon_agp_resume(rdev); 17203ce0a23dSJerome Glisse radeon_resume(rdev); 172104eb2206SChristian König 172204eb2206SChristian König r = radeon_ib_ring_tests(rdev); 172304eb2206SChristian König if (r) 172404eb2206SChristian König DRM_ERROR("ib ring test failed (%d).\n", r); 172504eb2206SChristian König 1726bc6a6295SAlex Deucher if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { 17276c7bcceaSAlex Deucher /* do dpm late init */ 17286c7bcceaSAlex Deucher r = radeon_pm_late_init(rdev); 17296c7bcceaSAlex Deucher if (r) { 17306c7bcceaSAlex Deucher rdev->pm.dpm_enabled = false; 17316c7bcceaSAlex Deucher DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n"); 17326c7bcceaSAlex Deucher } 1733bc6a6295SAlex Deucher } else { 1734bc6a6295SAlex Deucher /* resume old pm late */ 1735bc6a6295SAlex Deucher radeon_pm_resume(rdev); 17366c7bcceaSAlex Deucher } 17376c7bcceaSAlex Deucher 1738f657c2a7SYang Zhao radeon_restore_bios_scratch_regs(rdev); 173909bdf591SCedric Godin 1740f3cbb17bSGrigori Goronzy /* pin cursors */ 1741f3cbb17bSGrigori Goronzy list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 1742f3cbb17bSGrigori Goronzy struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 1743f3cbb17bSGrigori Goronzy 1744f3cbb17bSGrigori Goronzy if (radeon_crtc->cursor_bo) { 1745f3cbb17bSGrigori Goronzy struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo); 1746f3cbb17bSGrigori Goronzy r = radeon_bo_reserve(robj, false); 1747f3cbb17bSGrigori Goronzy if (r == 0) { 1748f3cbb17bSGrigori Goronzy /* Only 27 bit offset for legacy cursor */ 1749f3cbb17bSGrigori Goronzy r = radeon_bo_pin_restricted(robj, 1750f3cbb17bSGrigori Goronzy RADEON_GEM_DOMAIN_VRAM, 1751f3cbb17bSGrigori Goronzy ASIC_IS_AVIVO(rdev) ? 1752f3cbb17bSGrigori Goronzy 0 : 1 << 27, 1753f3cbb17bSGrigori Goronzy &radeon_crtc->cursor_addr); 1754f3cbb17bSGrigori Goronzy if (r != 0) 1755f3cbb17bSGrigori Goronzy DRM_ERROR("Failed to pin cursor BO (%d)\n", r); 1756f3cbb17bSGrigori Goronzy radeon_bo_unreserve(robj); 1757f3cbb17bSGrigori Goronzy } 1758f3cbb17bSGrigori Goronzy } 1759f3cbb17bSGrigori Goronzy } 1760f3cbb17bSGrigori Goronzy 17613fa47d9eSAlex Deucher /* init dig PHYs, disp eng pll */ 17623fa47d9eSAlex Deucher if (rdev->is_atom_bios) { 1763ac89af1eSAlex Deucher radeon_atom_encoder_init(rdev); 1764f3f1f03eSAlex Deucher radeon_atom_disp_eng_pll_init(rdev); 1765bced76f2SAlex Deucher /* turn on the BL */ 1766bced76f2SAlex Deucher if (rdev->mode_info.bl_encoder) { 1767bced76f2SAlex Deucher u8 bl_level = radeon_get_backlight_level(rdev, 1768bced76f2SAlex Deucher rdev->mode_info.bl_encoder); 1769bced76f2SAlex Deucher radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder, 1770bced76f2SAlex Deucher bl_level); 1771bced76f2SAlex Deucher } 17723fa47d9eSAlex Deucher } 1773d4877cf2SAlex Deucher /* reset hpd state */ 1774d4877cf2SAlex Deucher radeon_hpd_init(rdev); 1775771fe6b9SJerome Glisse /* blat the mode back in */ 1776ec9954fcSDave Airlie if (fbcon) { 1777771fe6b9SJerome Glisse drm_helper_resume_force_mode(dev); 1778a93f344dSAlex Deucher /* turn on display hw */ 17796adaed5bSDaniel Vetter drm_modeset_lock_all(dev); 1780a93f344dSAlex Deucher list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 1781a93f344dSAlex Deucher drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); 1782a93f344dSAlex Deucher } 17836adaed5bSDaniel Vetter drm_modeset_unlock_all(dev); 1784ec9954fcSDave Airlie } 178586698c20SSeth Forshee 178686698c20SSeth Forshee drm_kms_helper_poll_enable(dev); 178718ee37a4SDaniel Vetter 17883640da2fSAlex Deucher /* set the power state here in case we are a PX system or headless */ 17893640da2fSAlex Deucher if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) 17903640da2fSAlex Deucher radeon_pm_compute_clocks(rdev); 17913640da2fSAlex Deucher 179218ee37a4SDaniel Vetter if (fbcon) { 179318ee37a4SDaniel Vetter radeon_fbdev_set_suspend(rdev, 0); 179418ee37a4SDaniel Vetter console_unlock(); 179518ee37a4SDaniel Vetter } 179618ee37a4SDaniel Vetter 1797771fe6b9SJerome Glisse return 0; 1798771fe6b9SJerome Glisse } 1799771fe6b9SJerome Glisse 18000c195119SAlex Deucher /** 18010c195119SAlex Deucher * radeon_gpu_reset - reset the asic 18020c195119SAlex Deucher * 18030c195119SAlex Deucher * @rdev: radeon device pointer 18040c195119SAlex Deucher * 18050c195119SAlex Deucher * Attempt the reset the GPU if it has hung (all asics). 18060c195119SAlex Deucher * Returns 0 for success or an error on failure. 18070c195119SAlex Deucher */ 180890aca4d2SJerome Glisse int radeon_gpu_reset(struct radeon_device *rdev) 180990aca4d2SJerome Glisse { 181055d7c221SChristian König unsigned ring_sizes[RADEON_NUM_RINGS]; 181155d7c221SChristian König uint32_t *ring_data[RADEON_NUM_RINGS]; 181255d7c221SChristian König 181355d7c221SChristian König bool saved = false; 181455d7c221SChristian König 181555d7c221SChristian König int i, r; 18168fd1b84cSDave Airlie int resched; 181790aca4d2SJerome Glisse 1818dee53e7fSJerome Glisse down_write(&rdev->exclusive_lock); 1819f9eaf9aeSChristian König 1820f9eaf9aeSChristian König if (!rdev->needs_reset) { 1821f9eaf9aeSChristian König up_write(&rdev->exclusive_lock); 1822f9eaf9aeSChristian König return 0; 1823f9eaf9aeSChristian König } 1824f9eaf9aeSChristian König 182572b9076bSMarek Olšák atomic_inc(&rdev->gpu_reset_counter); 182672b9076bSMarek Olšák 182790aca4d2SJerome Glisse radeon_save_bios_scratch_regs(rdev); 18288fd1b84cSDave Airlie /* block TTM */ 18298fd1b84cSDave Airlie resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); 183090aca4d2SJerome Glisse radeon_suspend(rdev); 183173ef0e0dSAlex Deucher radeon_hpd_fini(rdev); 183290aca4d2SJerome Glisse 183355d7c221SChristian König for (i = 0; i < RADEON_NUM_RINGS; ++i) { 183455d7c221SChristian König ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i], 183555d7c221SChristian König &ring_data[i]); 183655d7c221SChristian König if (ring_sizes[i]) { 183755d7c221SChristian König saved = true; 183855d7c221SChristian König dev_info(rdev->dev, "Saved %d dwords of commands " 183955d7c221SChristian König "on ring %d.\n", ring_sizes[i], i); 184055d7c221SChristian König } 184155d7c221SChristian König } 184255d7c221SChristian König 184390aca4d2SJerome Glisse r = radeon_asic_reset(rdev); 184490aca4d2SJerome Glisse if (!r) { 184555d7c221SChristian König dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n"); 184690aca4d2SJerome Glisse radeon_resume(rdev); 184755d7c221SChristian König } 184804eb2206SChristian König 184990aca4d2SJerome Glisse radeon_restore_bios_scratch_regs(rdev); 185055d7c221SChristian König 185155d7c221SChristian König for (i = 0; i < RADEON_NUM_RINGS; ++i) { 18529bb39ff4SMaarten Lankhorst if (!r && ring_data[i]) { 185355d7c221SChristian König radeon_ring_restore(rdev, &rdev->ring[i], 185455d7c221SChristian König ring_sizes[i], ring_data[i]); 185555d7c221SChristian König } else { 1856eb98c709SChristian König radeon_fence_driver_force_completion(rdev, i); 185755d7c221SChristian König kfree(ring_data[i]); 185855d7c221SChristian König } 185955d7c221SChristian König } 186055d7c221SChristian König 1861c940b447SAlex Deucher if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { 1862c940b447SAlex Deucher /* do dpm late init */ 1863c940b447SAlex Deucher r = radeon_pm_late_init(rdev); 1864c940b447SAlex Deucher if (r) { 1865c940b447SAlex Deucher rdev->pm.dpm_enabled = false; 1866c940b447SAlex Deucher DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n"); 1867c940b447SAlex Deucher } 1868c940b447SAlex Deucher } else { 1869c940b447SAlex Deucher /* resume old pm late */ 187095f59509SAlex Deucher radeon_pm_resume(rdev); 1871c940b447SAlex Deucher } 1872c940b447SAlex Deucher 187373ef0e0dSAlex Deucher /* init dig PHYs, disp eng pll */ 187473ef0e0dSAlex Deucher if (rdev->is_atom_bios) { 187573ef0e0dSAlex Deucher radeon_atom_encoder_init(rdev); 187673ef0e0dSAlex Deucher radeon_atom_disp_eng_pll_init(rdev); 187773ef0e0dSAlex Deucher /* turn on the BL */ 187873ef0e0dSAlex Deucher if (rdev->mode_info.bl_encoder) { 187973ef0e0dSAlex Deucher u8 bl_level = radeon_get_backlight_level(rdev, 188073ef0e0dSAlex Deucher rdev->mode_info.bl_encoder); 188173ef0e0dSAlex Deucher radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder, 188273ef0e0dSAlex Deucher bl_level); 188373ef0e0dSAlex Deucher } 188473ef0e0dSAlex Deucher } 188573ef0e0dSAlex Deucher /* reset hpd state */ 188673ef0e0dSAlex Deucher radeon_hpd_init(rdev); 188773ef0e0dSAlex Deucher 18889bb39ff4SMaarten Lankhorst ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); 18893c036389SChristian König 18903c036389SChristian König rdev->in_reset = true; 18913c036389SChristian König rdev->needs_reset = false; 18923c036389SChristian König 18939bb39ff4SMaarten Lankhorst downgrade_write(&rdev->exclusive_lock); 18949bb39ff4SMaarten Lankhorst 1895d3493574SJerome Glisse drm_helper_resume_force_mode(rdev->ddev); 1896d3493574SJerome Glisse 1897c940b447SAlex Deucher /* set the power state here in case we are a PX system or headless */ 1898c940b447SAlex Deucher if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) 1899c940b447SAlex Deucher radeon_pm_compute_clocks(rdev); 1900c940b447SAlex Deucher 19019bb39ff4SMaarten Lankhorst if (!r) { 19029bb39ff4SMaarten Lankhorst r = radeon_ib_ring_tests(rdev); 19039bb39ff4SMaarten Lankhorst if (r && saved) 19049bb39ff4SMaarten Lankhorst r = -EAGAIN; 19059bb39ff4SMaarten Lankhorst } else { 190690aca4d2SJerome Glisse /* bad news, how to tell it to userspace ? */ 190790aca4d2SJerome Glisse dev_info(rdev->dev, "GPU reset failed\n"); 19087a1619b9SMichel Dänzer } 19097a1619b9SMichel Dänzer 19109bb39ff4SMaarten Lankhorst rdev->needs_reset = r == -EAGAIN; 19119bb39ff4SMaarten Lankhorst rdev->in_reset = false; 19129bb39ff4SMaarten Lankhorst 19139bb39ff4SMaarten Lankhorst up_read(&rdev->exclusive_lock); 191490aca4d2SJerome Glisse return r; 191590aca4d2SJerome Glisse } 191690aca4d2SJerome Glisse 1917771fe6b9SJerome Glisse 1918771fe6b9SJerome Glisse /* 1919771fe6b9SJerome Glisse * Debugfs 1920771fe6b9SJerome Glisse */ 1921771fe6b9SJerome Glisse int radeon_debugfs_add_files(struct radeon_device *rdev, 1922771fe6b9SJerome Glisse struct drm_info_list *files, 1923771fe6b9SJerome Glisse unsigned nfiles) 1924771fe6b9SJerome Glisse { 1925771fe6b9SJerome Glisse unsigned i; 1926771fe6b9SJerome Glisse 19274d8bf9aeSChristian König for (i = 0; i < rdev->debugfs_count; i++) { 19284d8bf9aeSChristian König if (rdev->debugfs[i].files == files) { 1929771fe6b9SJerome Glisse /* Already registered */ 1930771fe6b9SJerome Glisse return 0; 1931771fe6b9SJerome Glisse } 1932771fe6b9SJerome Glisse } 1933c245cb9eSMichael Witten 19344d8bf9aeSChristian König i = rdev->debugfs_count + 1; 1935c245cb9eSMichael Witten if (i > RADEON_DEBUGFS_MAX_COMPONENTS) { 1936c245cb9eSMichael Witten DRM_ERROR("Reached maximum number of debugfs components.\n"); 1937c245cb9eSMichael Witten DRM_ERROR("Report so we increase " 1938c245cb9eSMichael Witten "RADEON_DEBUGFS_MAX_COMPONENTS.\n"); 1939771fe6b9SJerome Glisse return -EINVAL; 1940771fe6b9SJerome Glisse } 19414d8bf9aeSChristian König rdev->debugfs[rdev->debugfs_count].files = files; 19424d8bf9aeSChristian König rdev->debugfs[rdev->debugfs_count].num_files = nfiles; 19434d8bf9aeSChristian König rdev->debugfs_count = i; 1944771fe6b9SJerome Glisse #if defined(CONFIG_DEBUG_FS) 1945771fe6b9SJerome Glisse drm_debugfs_create_files(files, nfiles, 1946771fe6b9SJerome Glisse rdev->ddev->primary->debugfs_root, 1947771fe6b9SJerome Glisse rdev->ddev->primary); 1948771fe6b9SJerome Glisse #endif 1949771fe6b9SJerome Glisse return 0; 1950771fe6b9SJerome Glisse } 1951