1771fe6b9SJerome Glisse /* 2771fe6b9SJerome Glisse * Copyright 2008 Advanced Micro Devices, Inc. 3771fe6b9SJerome Glisse * Copyright 2008 Red Hat Inc. 4771fe6b9SJerome Glisse * Copyright 2009 Jerome Glisse. 5771fe6b9SJerome Glisse * 6771fe6b9SJerome Glisse * Permission is hereby granted, free of charge, to any person obtaining a 7771fe6b9SJerome Glisse * copy of this software and associated documentation files (the "Software"), 8771fe6b9SJerome Glisse * to deal in the Software without restriction, including without limitation 9771fe6b9SJerome Glisse * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10771fe6b9SJerome Glisse * and/or sell copies of the Software, and to permit persons to whom the 11771fe6b9SJerome Glisse * Software is furnished to do so, subject to the following conditions: 12771fe6b9SJerome Glisse * 13771fe6b9SJerome Glisse * The above copyright notice and this permission notice shall be included in 14771fe6b9SJerome Glisse * all copies or substantial portions of the Software. 15771fe6b9SJerome Glisse * 16771fe6b9SJerome Glisse * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17771fe6b9SJerome Glisse * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18771fe6b9SJerome Glisse * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19771fe6b9SJerome Glisse * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20771fe6b9SJerome Glisse * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21771fe6b9SJerome Glisse * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22771fe6b9SJerome Glisse * OTHER DEALINGS IN THE SOFTWARE. 23771fe6b9SJerome Glisse * 24771fe6b9SJerome Glisse * Authors: Dave Airlie 25771fe6b9SJerome Glisse * Alex Deucher 26771fe6b9SJerome Glisse * Jerome Glisse 27771fe6b9SJerome Glisse */ 28771fe6b9SJerome Glisse #include <linux/console.h> 295a0e3ad6STejun Heo #include <linux/slab.h> 30771fe6b9SJerome Glisse #include <drm/drmP.h> 31771fe6b9SJerome Glisse #include <drm/drm_crtc_helper.h> 32771fe6b9SJerome Glisse #include <drm/radeon_drm.h> 33b8751946SLukas Wunner #include <linux/pm_runtime.h> 3428d52043SDave Airlie #include <linux/vgaarb.h> 356a9ee8afSDave Airlie #include <linux/vga_switcheroo.h> 36bcc65fd8SMatthew Garrett #include <linux/efi.h> 37771fe6b9SJerome Glisse #include "radeon_reg.h" 38771fe6b9SJerome Glisse #include "radeon.h" 39771fe6b9SJerome Glisse #include "atom.h" 40771fe6b9SJerome Glisse 411b5331d9SJerome Glisse static const char radeon_family_name[][16] = { 421b5331d9SJerome Glisse "R100", 431b5331d9SJerome Glisse "RV100", 441b5331d9SJerome Glisse "RS100", 451b5331d9SJerome Glisse "RV200", 461b5331d9SJerome Glisse "RS200", 471b5331d9SJerome Glisse "R200", 481b5331d9SJerome Glisse "RV250", 491b5331d9SJerome Glisse "RS300", 501b5331d9SJerome Glisse "RV280", 511b5331d9SJerome Glisse "R300", 521b5331d9SJerome Glisse "R350", 531b5331d9SJerome Glisse "RV350", 541b5331d9SJerome Glisse "RV380", 551b5331d9SJerome Glisse "R420", 561b5331d9SJerome Glisse "R423", 571b5331d9SJerome Glisse "RV410", 581b5331d9SJerome Glisse "RS400", 591b5331d9SJerome Glisse "RS480", 601b5331d9SJerome Glisse "RS600", 611b5331d9SJerome Glisse "RS690", 621b5331d9SJerome Glisse "RS740", 631b5331d9SJerome Glisse "RV515", 641b5331d9SJerome Glisse "R520", 651b5331d9SJerome Glisse "RV530", 661b5331d9SJerome Glisse "RV560", 671b5331d9SJerome Glisse "RV570", 681b5331d9SJerome Glisse "R580", 691b5331d9SJerome Glisse "R600", 701b5331d9SJerome Glisse "RV610", 711b5331d9SJerome Glisse "RV630", 721b5331d9SJerome Glisse "RV670", 731b5331d9SJerome Glisse "RV620", 741b5331d9SJerome Glisse "RV635", 751b5331d9SJerome Glisse "RS780", 761b5331d9SJerome Glisse "RS880", 771b5331d9SJerome Glisse "RV770", 781b5331d9SJerome Glisse "RV730", 791b5331d9SJerome Glisse "RV710", 801b5331d9SJerome Glisse "RV740", 811b5331d9SJerome Glisse "CEDAR", 821b5331d9SJerome Glisse "REDWOOD", 831b5331d9SJerome Glisse "JUNIPER", 841b5331d9SJerome Glisse "CYPRESS", 851b5331d9SJerome Glisse "HEMLOCK", 86b08ebe7eSAlex Deucher "PALM", 874df64e65SAlex Deucher "SUMO", 884df64e65SAlex Deucher "SUMO2", 891fe18305SAlex Deucher "BARTS", 901fe18305SAlex Deucher "TURKS", 911fe18305SAlex Deucher "CAICOS", 92b7cfc9feSAlex Deucher "CAYMAN", 938848f759SAlex Deucher "ARUBA", 94cb28bb34SAlex Deucher "TAHITI", 95cb28bb34SAlex Deucher "PITCAIRN", 96cb28bb34SAlex Deucher "VERDE", 97624d3524SAlex Deucher "OLAND", 98b5d9d726SAlex Deucher "HAINAN", 996eac752eSAlex Deucher "BONAIRE", 1006eac752eSAlex Deucher "KAVERI", 1016eac752eSAlex Deucher "KABINI", 1023bf599e8SAlex Deucher "HAWAII", 103b0a9f22aSSamuel Li "MULLINS", 1041b5331d9SJerome Glisse "LAST", 1051b5331d9SJerome Glisse }; 1061b5331d9SJerome Glisse 107066f1f0bSAlex Deucher #if defined(CONFIG_VGA_SWITCHEROO) 108066f1f0bSAlex Deucher bool radeon_has_atpx_dgpu_power_cntl(void); 109066f1f0bSAlex Deucher bool radeon_is_atpx_hybrid(void); 110066f1f0bSAlex Deucher #else 111066f1f0bSAlex Deucher static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; } 112066f1f0bSAlex Deucher static inline bool radeon_is_atpx_hybrid(void) { return false; } 113066f1f0bSAlex Deucher #endif 114066f1f0bSAlex Deucher 1154807c5a8SAlex Deucher #define RADEON_PX_QUIRK_DISABLE_PX (1 << 0) 1164807c5a8SAlex Deucher 1174807c5a8SAlex Deucher struct radeon_px_quirk { 1184807c5a8SAlex Deucher u32 chip_vendor; 1194807c5a8SAlex Deucher u32 chip_device; 1204807c5a8SAlex Deucher u32 subsys_vendor; 1214807c5a8SAlex Deucher u32 subsys_device; 1224807c5a8SAlex Deucher u32 px_quirk_flags; 1234807c5a8SAlex Deucher }; 1244807c5a8SAlex Deucher 1254807c5a8SAlex Deucher static struct radeon_px_quirk radeon_px_quirk_list[] = { 1264807c5a8SAlex Deucher /* Acer aspire 5560g (CPU: AMD A4-3305M; GPU: AMD Radeon HD 6480g + 7470m) 1274807c5a8SAlex Deucher * https://bugzilla.kernel.org/show_bug.cgi?id=74551 1284807c5a8SAlex Deucher */ 1294807c5a8SAlex Deucher { PCI_VENDOR_ID_ATI, 0x6760, 0x1025, 0x0672, RADEON_PX_QUIRK_DISABLE_PX }, 1304807c5a8SAlex Deucher /* Asus K73TA laptop with AMD A6-3400M APU and Radeon 6550 GPU 1314807c5a8SAlex Deucher * https://bugzilla.kernel.org/show_bug.cgi?id=51381 1324807c5a8SAlex Deucher */ 1334807c5a8SAlex Deucher { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x108c, RADEON_PX_QUIRK_DISABLE_PX }, 134ff1b1294SAlex Deucher /* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU 135ff1b1294SAlex Deucher * https://bugzilla.kernel.org/show_bug.cgi?id=51381 136ff1b1294SAlex Deucher */ 137ff1b1294SAlex Deucher { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX }, 1384eb59793SAlex Deucher /* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU 1394eb59793SAlex Deucher * https://bugs.freedesktop.org/show_bug.cgi?id=101491 1404eb59793SAlex Deucher */ 1414eb59793SAlex Deucher { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX }, 1424807c5a8SAlex Deucher { 0, 0, 0, 0, 0 }, 1434807c5a8SAlex Deucher }; 1444807c5a8SAlex Deucher 14590c4cde9SAlex Deucher bool radeon_is_px(struct drm_device *dev) 14690c4cde9SAlex Deucher { 14790c4cde9SAlex Deucher struct radeon_device *rdev = dev->dev_private; 14890c4cde9SAlex Deucher 14990c4cde9SAlex Deucher if (rdev->flags & RADEON_IS_PX) 15090c4cde9SAlex Deucher return true; 15190c4cde9SAlex Deucher return false; 15290c4cde9SAlex Deucher } 15310ebc0bcSDave Airlie 1544807c5a8SAlex Deucher static void radeon_device_handle_px_quirks(struct radeon_device *rdev) 1554807c5a8SAlex Deucher { 1564807c5a8SAlex Deucher struct radeon_px_quirk *p = radeon_px_quirk_list; 1574807c5a8SAlex Deucher 1584807c5a8SAlex Deucher /* Apply PX quirks */ 1594807c5a8SAlex Deucher while (p && p->chip_device != 0) { 1604807c5a8SAlex Deucher if (rdev->pdev->vendor == p->chip_vendor && 1614807c5a8SAlex Deucher rdev->pdev->device == p->chip_device && 1624807c5a8SAlex Deucher rdev->pdev->subsystem_vendor == p->subsys_vendor && 1634807c5a8SAlex Deucher rdev->pdev->subsystem_device == p->subsys_device) { 1644807c5a8SAlex Deucher rdev->px_quirk_flags = p->px_quirk_flags; 1654807c5a8SAlex Deucher break; 1664807c5a8SAlex Deucher } 1674807c5a8SAlex Deucher ++p; 1684807c5a8SAlex Deucher } 1694807c5a8SAlex Deucher 1704807c5a8SAlex Deucher if (rdev->px_quirk_flags & RADEON_PX_QUIRK_DISABLE_PX) 1714807c5a8SAlex Deucher rdev->flags &= ~RADEON_IS_PX; 172066f1f0bSAlex Deucher 173066f1f0bSAlex Deucher /* disable PX is the system doesn't support dGPU power control or hybrid gfx */ 174066f1f0bSAlex Deucher if (!radeon_is_atpx_hybrid() && 175066f1f0bSAlex Deucher !radeon_has_atpx_dgpu_power_cntl()) 176066f1f0bSAlex Deucher rdev->flags &= ~RADEON_IS_PX; 1774807c5a8SAlex Deucher } 1784807c5a8SAlex Deucher 1790c195119SAlex Deucher /** 1802e1b65f9SAlex Deucher * radeon_program_register_sequence - program an array of registers. 1812e1b65f9SAlex Deucher * 1822e1b65f9SAlex Deucher * @rdev: radeon_device pointer 1832e1b65f9SAlex Deucher * @registers: pointer to the register array 1842e1b65f9SAlex Deucher * @array_size: size of the register array 1852e1b65f9SAlex Deucher * 1862e1b65f9SAlex Deucher * Programs an array or registers with and and or masks. 1872e1b65f9SAlex Deucher * This is a helper for setting golden registers. 1882e1b65f9SAlex Deucher */ 1892e1b65f9SAlex Deucher void radeon_program_register_sequence(struct radeon_device *rdev, 1902e1b65f9SAlex Deucher const u32 *registers, 1912e1b65f9SAlex Deucher const u32 array_size) 1922e1b65f9SAlex Deucher { 1932e1b65f9SAlex Deucher u32 tmp, reg, and_mask, or_mask; 1942e1b65f9SAlex Deucher int i; 1952e1b65f9SAlex Deucher 1962e1b65f9SAlex Deucher if (array_size % 3) 1972e1b65f9SAlex Deucher return; 1982e1b65f9SAlex Deucher 1992e1b65f9SAlex Deucher for (i = 0; i < array_size; i +=3) { 2002e1b65f9SAlex Deucher reg = registers[i + 0]; 2012e1b65f9SAlex Deucher and_mask = registers[i + 1]; 2022e1b65f9SAlex Deucher or_mask = registers[i + 2]; 2032e1b65f9SAlex Deucher 2042e1b65f9SAlex Deucher if (and_mask == 0xffffffff) { 2052e1b65f9SAlex Deucher tmp = or_mask; 2062e1b65f9SAlex Deucher } else { 2072e1b65f9SAlex Deucher tmp = RREG32(reg); 2082e1b65f9SAlex Deucher tmp &= ~and_mask; 2092e1b65f9SAlex Deucher tmp |= or_mask; 2102e1b65f9SAlex Deucher } 2112e1b65f9SAlex Deucher WREG32(reg, tmp); 2122e1b65f9SAlex Deucher } 2132e1b65f9SAlex Deucher } 2142e1b65f9SAlex Deucher 2151a0041b8SAlex Deucher void radeon_pci_config_reset(struct radeon_device *rdev) 2161a0041b8SAlex Deucher { 2171a0041b8SAlex Deucher pci_write_config_dword(rdev->pdev, 0x7c, RADEON_ASIC_RESET_DATA); 2181a0041b8SAlex Deucher } 2191a0041b8SAlex Deucher 2202e1b65f9SAlex Deucher /** 2210c195119SAlex Deucher * radeon_surface_init - Clear GPU surface registers. 2220c195119SAlex Deucher * 2230c195119SAlex Deucher * @rdev: radeon_device pointer 2240c195119SAlex Deucher * 2250c195119SAlex Deucher * Clear GPU surface registers (r1xx-r5xx). 226b1e3a6d1SMichel Dänzer */ 2273ce0a23dSJerome Glisse void radeon_surface_init(struct radeon_device *rdev) 228b1e3a6d1SMichel Dänzer { 229b1e3a6d1SMichel Dänzer /* FIXME: check this out */ 230b1e3a6d1SMichel Dänzer if (rdev->family < CHIP_R600) { 231b1e3a6d1SMichel Dänzer int i; 232b1e3a6d1SMichel Dänzer 233550e2d92SDave Airlie for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) { 234550e2d92SDave Airlie if (rdev->surface_regs[i].bo) 235550e2d92SDave Airlie radeon_bo_get_surface_reg(rdev->surface_regs[i].bo); 236550e2d92SDave Airlie else 237550e2d92SDave Airlie radeon_clear_surface_reg(rdev, i); 238b1e3a6d1SMichel Dänzer } 239e024e110SDave Airlie /* enable surfaces */ 240e024e110SDave Airlie WREG32(RADEON_SURFACE_CNTL, 0); 241b1e3a6d1SMichel Dänzer } 242b1e3a6d1SMichel Dänzer } 243b1e3a6d1SMichel Dänzer 244b1e3a6d1SMichel Dänzer /* 245771fe6b9SJerome Glisse * GPU scratch registers helpers function. 246771fe6b9SJerome Glisse */ 2470c195119SAlex Deucher /** 2480c195119SAlex Deucher * radeon_scratch_init - Init scratch register driver information. 2490c195119SAlex Deucher * 2500c195119SAlex Deucher * @rdev: radeon_device pointer 2510c195119SAlex Deucher * 2520c195119SAlex Deucher * Init CP scratch register driver information (r1xx-r5xx) 2530c195119SAlex Deucher */ 2543ce0a23dSJerome Glisse void radeon_scratch_init(struct radeon_device *rdev) 255771fe6b9SJerome Glisse { 256771fe6b9SJerome Glisse int i; 257771fe6b9SJerome Glisse 258771fe6b9SJerome Glisse /* FIXME: check this out */ 259771fe6b9SJerome Glisse if (rdev->family < CHIP_R300) { 260771fe6b9SJerome Glisse rdev->scratch.num_reg = 5; 261771fe6b9SJerome Glisse } else { 262771fe6b9SJerome Glisse rdev->scratch.num_reg = 7; 263771fe6b9SJerome Glisse } 264724c80e1SAlex Deucher rdev->scratch.reg_base = RADEON_SCRATCH_REG0; 265771fe6b9SJerome Glisse for (i = 0; i < rdev->scratch.num_reg; i++) { 266771fe6b9SJerome Glisse rdev->scratch.free[i] = true; 267724c80e1SAlex Deucher rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4); 268771fe6b9SJerome Glisse } 269771fe6b9SJerome Glisse } 270771fe6b9SJerome Glisse 2710c195119SAlex Deucher /** 2720c195119SAlex Deucher * radeon_scratch_get - Allocate a scratch register 2730c195119SAlex Deucher * 2740c195119SAlex Deucher * @rdev: radeon_device pointer 2750c195119SAlex Deucher * @reg: scratch register mmio offset 2760c195119SAlex Deucher * 2770c195119SAlex Deucher * Allocate a CP scratch register for use by the driver (all asics). 2780c195119SAlex Deucher * Returns 0 on success or -EINVAL on failure. 2790c195119SAlex Deucher */ 280771fe6b9SJerome Glisse int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg) 281771fe6b9SJerome Glisse { 282771fe6b9SJerome Glisse int i; 283771fe6b9SJerome Glisse 284771fe6b9SJerome Glisse for (i = 0; i < rdev->scratch.num_reg; i++) { 285771fe6b9SJerome Glisse if (rdev->scratch.free[i]) { 286771fe6b9SJerome Glisse rdev->scratch.free[i] = false; 287771fe6b9SJerome Glisse *reg = rdev->scratch.reg[i]; 288771fe6b9SJerome Glisse return 0; 289771fe6b9SJerome Glisse } 290771fe6b9SJerome Glisse } 291771fe6b9SJerome Glisse return -EINVAL; 292771fe6b9SJerome Glisse } 293771fe6b9SJerome Glisse 2940c195119SAlex Deucher /** 2950c195119SAlex Deucher * radeon_scratch_free - Free a scratch register 2960c195119SAlex Deucher * 2970c195119SAlex Deucher * @rdev: radeon_device pointer 2980c195119SAlex Deucher * @reg: scratch register mmio offset 2990c195119SAlex Deucher * 3000c195119SAlex Deucher * Free a CP scratch register allocated for use by the driver (all asics) 3010c195119SAlex Deucher */ 302771fe6b9SJerome Glisse void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg) 303771fe6b9SJerome Glisse { 304771fe6b9SJerome Glisse int i; 305771fe6b9SJerome Glisse 306771fe6b9SJerome Glisse for (i = 0; i < rdev->scratch.num_reg; i++) { 307771fe6b9SJerome Glisse if (rdev->scratch.reg[i] == reg) { 308771fe6b9SJerome Glisse rdev->scratch.free[i] = true; 309771fe6b9SJerome Glisse return; 310771fe6b9SJerome Glisse } 311771fe6b9SJerome Glisse } 312771fe6b9SJerome Glisse } 313771fe6b9SJerome Glisse 3140c195119SAlex Deucher /* 31575efdee1SAlex Deucher * GPU doorbell aperture helpers function. 31675efdee1SAlex Deucher */ 31775efdee1SAlex Deucher /** 31875efdee1SAlex Deucher * radeon_doorbell_init - Init doorbell driver information. 31975efdee1SAlex Deucher * 32075efdee1SAlex Deucher * @rdev: radeon_device pointer 32175efdee1SAlex Deucher * 32275efdee1SAlex Deucher * Init doorbell driver information (CIK) 32375efdee1SAlex Deucher * Returns 0 on success, error on failure. 32475efdee1SAlex Deucher */ 32528f5a6cdSRashika Kheria static int radeon_doorbell_init(struct radeon_device *rdev) 32675efdee1SAlex Deucher { 32775efdee1SAlex Deucher /* doorbell bar mapping */ 32875efdee1SAlex Deucher rdev->doorbell.base = pci_resource_start(rdev->pdev, 2); 32975efdee1SAlex Deucher rdev->doorbell.size = pci_resource_len(rdev->pdev, 2); 33075efdee1SAlex Deucher 331d5754ab8SAndrew Lewycky rdev->doorbell.num_doorbells = min_t(u32, rdev->doorbell.size / sizeof(u32), RADEON_MAX_DOORBELLS); 332d5754ab8SAndrew Lewycky if (rdev->doorbell.num_doorbells == 0) 333d5754ab8SAndrew Lewycky return -EINVAL; 33475efdee1SAlex Deucher 335d5754ab8SAndrew Lewycky rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.num_doorbells * sizeof(u32)); 33675efdee1SAlex Deucher if (rdev->doorbell.ptr == NULL) { 33775efdee1SAlex Deucher return -ENOMEM; 33875efdee1SAlex Deucher } 33975efdee1SAlex Deucher DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base); 34075efdee1SAlex Deucher DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size); 34175efdee1SAlex Deucher 342d5754ab8SAndrew Lewycky memset(&rdev->doorbell.used, 0, sizeof(rdev->doorbell.used)); 34375efdee1SAlex Deucher 34475efdee1SAlex Deucher return 0; 34575efdee1SAlex Deucher } 34675efdee1SAlex Deucher 34775efdee1SAlex Deucher /** 34875efdee1SAlex Deucher * radeon_doorbell_fini - Tear down doorbell driver information. 34975efdee1SAlex Deucher * 35075efdee1SAlex Deucher * @rdev: radeon_device pointer 35175efdee1SAlex Deucher * 35275efdee1SAlex Deucher * Tear down doorbell driver information (CIK) 35375efdee1SAlex Deucher */ 35428f5a6cdSRashika Kheria static void radeon_doorbell_fini(struct radeon_device *rdev) 35575efdee1SAlex Deucher { 35675efdee1SAlex Deucher iounmap(rdev->doorbell.ptr); 35775efdee1SAlex Deucher rdev->doorbell.ptr = NULL; 35875efdee1SAlex Deucher } 35975efdee1SAlex Deucher 36075efdee1SAlex Deucher /** 361d5754ab8SAndrew Lewycky * radeon_doorbell_get - Allocate a doorbell entry 36275efdee1SAlex Deucher * 36375efdee1SAlex Deucher * @rdev: radeon_device pointer 364d5754ab8SAndrew Lewycky * @doorbell: doorbell index 36575efdee1SAlex Deucher * 366d5754ab8SAndrew Lewycky * Allocate a doorbell for use by the driver (all asics). 36775efdee1SAlex Deucher * Returns 0 on success or -EINVAL on failure. 36875efdee1SAlex Deucher */ 36975efdee1SAlex Deucher int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell) 37075efdee1SAlex Deucher { 371d5754ab8SAndrew Lewycky unsigned long offset = find_first_zero_bit(rdev->doorbell.used, rdev->doorbell.num_doorbells); 372d5754ab8SAndrew Lewycky if (offset < rdev->doorbell.num_doorbells) { 373d5754ab8SAndrew Lewycky __set_bit(offset, rdev->doorbell.used); 374d5754ab8SAndrew Lewycky *doorbell = offset; 37575efdee1SAlex Deucher return 0; 376d5754ab8SAndrew Lewycky } else { 37775efdee1SAlex Deucher return -EINVAL; 37875efdee1SAlex Deucher } 379d5754ab8SAndrew Lewycky } 38075efdee1SAlex Deucher 38175efdee1SAlex Deucher /** 382d5754ab8SAndrew Lewycky * radeon_doorbell_free - Free a doorbell entry 38375efdee1SAlex Deucher * 38475efdee1SAlex Deucher * @rdev: radeon_device pointer 385d5754ab8SAndrew Lewycky * @doorbell: doorbell index 38675efdee1SAlex Deucher * 387d5754ab8SAndrew Lewycky * Free a doorbell allocated for use by the driver (all asics) 38875efdee1SAlex Deucher */ 38975efdee1SAlex Deucher void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell) 39075efdee1SAlex Deucher { 391d5754ab8SAndrew Lewycky if (doorbell < rdev->doorbell.num_doorbells) 392d5754ab8SAndrew Lewycky __clear_bit(doorbell, rdev->doorbell.used); 39375efdee1SAlex Deucher } 39475efdee1SAlex Deucher 395ebff8453SOded Gabbay /** 396ebff8453SOded Gabbay * radeon_doorbell_get_kfd_info - Report doorbell configuration required to 397ebff8453SOded Gabbay * setup KFD 398ebff8453SOded Gabbay * 399ebff8453SOded Gabbay * @rdev: radeon_device pointer 400ebff8453SOded Gabbay * @aperture_base: output returning doorbell aperture base physical address 401ebff8453SOded Gabbay * @aperture_size: output returning doorbell aperture size in bytes 402ebff8453SOded Gabbay * @start_offset: output returning # of doorbell bytes reserved for radeon. 403ebff8453SOded Gabbay * 404ebff8453SOded Gabbay * Radeon and the KFD share the doorbell aperture. Radeon sets it up, 405ebff8453SOded Gabbay * takes doorbells required for its own rings and reports the setup to KFD. 406ebff8453SOded Gabbay * Radeon reserved doorbells are at the start of the doorbell aperture. 407ebff8453SOded Gabbay */ 408ebff8453SOded Gabbay void radeon_doorbell_get_kfd_info(struct radeon_device *rdev, 409ebff8453SOded Gabbay phys_addr_t *aperture_base, 410ebff8453SOded Gabbay size_t *aperture_size, 411ebff8453SOded Gabbay size_t *start_offset) 412ebff8453SOded Gabbay { 413ebff8453SOded Gabbay /* The first num_doorbells are used by radeon. 414ebff8453SOded Gabbay * KFD takes whatever's left in the aperture. */ 415ebff8453SOded Gabbay if (rdev->doorbell.size > rdev->doorbell.num_doorbells * sizeof(u32)) { 416ebff8453SOded Gabbay *aperture_base = rdev->doorbell.base; 417ebff8453SOded Gabbay *aperture_size = rdev->doorbell.size; 418ebff8453SOded Gabbay *start_offset = rdev->doorbell.num_doorbells * sizeof(u32); 419ebff8453SOded Gabbay } else { 420ebff8453SOded Gabbay *aperture_base = 0; 421ebff8453SOded Gabbay *aperture_size = 0; 422ebff8453SOded Gabbay *start_offset = 0; 423ebff8453SOded Gabbay } 424ebff8453SOded Gabbay } 425ebff8453SOded Gabbay 42675efdee1SAlex Deucher /* 4270c195119SAlex Deucher * radeon_wb_*() 4280c195119SAlex Deucher * Writeback is the the method by which the the GPU updates special pages 4290c195119SAlex Deucher * in memory with the status of certain GPU events (fences, ring pointers, 4300c195119SAlex Deucher * etc.). 4310c195119SAlex Deucher */ 4320c195119SAlex Deucher 4330c195119SAlex Deucher /** 4340c195119SAlex Deucher * radeon_wb_disable - Disable Writeback 4350c195119SAlex Deucher * 4360c195119SAlex Deucher * @rdev: radeon_device pointer 4370c195119SAlex Deucher * 4380c195119SAlex Deucher * Disables Writeback (all asics). Used for suspend. 4390c195119SAlex Deucher */ 440724c80e1SAlex Deucher void radeon_wb_disable(struct radeon_device *rdev) 441724c80e1SAlex Deucher { 442724c80e1SAlex Deucher rdev->wb.enabled = false; 443724c80e1SAlex Deucher } 444724c80e1SAlex Deucher 4450c195119SAlex Deucher /** 4460c195119SAlex Deucher * radeon_wb_fini - Disable Writeback and free memory 4470c195119SAlex Deucher * 4480c195119SAlex Deucher * @rdev: radeon_device pointer 4490c195119SAlex Deucher * 4500c195119SAlex Deucher * Disables Writeback and frees the Writeback memory (all asics). 4510c195119SAlex Deucher * Used at driver shutdown. 4520c195119SAlex Deucher */ 453724c80e1SAlex Deucher void radeon_wb_fini(struct radeon_device *rdev) 454724c80e1SAlex Deucher { 455724c80e1SAlex Deucher radeon_wb_disable(rdev); 456724c80e1SAlex Deucher if (rdev->wb.wb_obj) { 457089920f2SJerome Glisse if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) { 458089920f2SJerome Glisse radeon_bo_kunmap(rdev->wb.wb_obj); 459089920f2SJerome Glisse radeon_bo_unpin(rdev->wb.wb_obj); 460089920f2SJerome Glisse radeon_bo_unreserve(rdev->wb.wb_obj); 461089920f2SJerome Glisse } 462724c80e1SAlex Deucher radeon_bo_unref(&rdev->wb.wb_obj); 463724c80e1SAlex Deucher rdev->wb.wb = NULL; 464724c80e1SAlex Deucher rdev->wb.wb_obj = NULL; 465724c80e1SAlex Deucher } 466724c80e1SAlex Deucher } 467724c80e1SAlex Deucher 4680c195119SAlex Deucher /** 4690c195119SAlex Deucher * radeon_wb_init- Init Writeback driver info and allocate memory 4700c195119SAlex Deucher * 4710c195119SAlex Deucher * @rdev: radeon_device pointer 4720c195119SAlex Deucher * 4730c195119SAlex Deucher * Disables Writeback and frees the Writeback memory (all asics). 4740c195119SAlex Deucher * Used at driver startup. 4750c195119SAlex Deucher * Returns 0 on success or an -error on failure. 4760c195119SAlex Deucher */ 477724c80e1SAlex Deucher int radeon_wb_init(struct radeon_device *rdev) 478724c80e1SAlex Deucher { 479724c80e1SAlex Deucher int r; 480724c80e1SAlex Deucher 481724c80e1SAlex Deucher if (rdev->wb.wb_obj == NULL) { 482441921d5SDaniel Vetter r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true, 483831b6966SMaarten Lankhorst RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL, 48402376d82SMichel Dänzer &rdev->wb.wb_obj); 485724c80e1SAlex Deucher if (r) { 486724c80e1SAlex Deucher dev_warn(rdev->dev, "(%d) create WB bo failed\n", r); 487724c80e1SAlex Deucher return r; 488724c80e1SAlex Deucher } 489724c80e1SAlex Deucher r = radeon_bo_reserve(rdev->wb.wb_obj, false); 490724c80e1SAlex Deucher if (unlikely(r != 0)) { 491724c80e1SAlex Deucher radeon_wb_fini(rdev); 492724c80e1SAlex Deucher return r; 493724c80e1SAlex Deucher } 494724c80e1SAlex Deucher r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, 495724c80e1SAlex Deucher &rdev->wb.gpu_addr); 496724c80e1SAlex Deucher if (r) { 497724c80e1SAlex Deucher radeon_bo_unreserve(rdev->wb.wb_obj); 498724c80e1SAlex Deucher dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r); 499724c80e1SAlex Deucher radeon_wb_fini(rdev); 500724c80e1SAlex Deucher return r; 501724c80e1SAlex Deucher } 502724c80e1SAlex Deucher r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); 503724c80e1SAlex Deucher radeon_bo_unreserve(rdev->wb.wb_obj); 504724c80e1SAlex Deucher if (r) { 505724c80e1SAlex Deucher dev_warn(rdev->dev, "(%d) map WB bo failed\n", r); 506724c80e1SAlex Deucher radeon_wb_fini(rdev); 507724c80e1SAlex Deucher return r; 508724c80e1SAlex Deucher } 509089920f2SJerome Glisse } 510724c80e1SAlex Deucher 511e6ba7599SAlex Deucher /* clear wb memory */ 512e6ba7599SAlex Deucher memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE); 513d0f8a854SAlex Deucher /* disable event_write fences */ 514d0f8a854SAlex Deucher rdev->wb.use_event = false; 515724c80e1SAlex Deucher /* disabled via module param */ 5163b7a2b24SJerome Glisse if (radeon_no_wb == 1) { 517724c80e1SAlex Deucher rdev->wb.enabled = false; 5183b7a2b24SJerome Glisse } else { 519724c80e1SAlex Deucher if (rdev->flags & RADEON_IS_AGP) { 52028eebb70SAlex Deucher /* often unreliable on AGP */ 52128eebb70SAlex Deucher rdev->wb.enabled = false; 52228eebb70SAlex Deucher } else if (rdev->family < CHIP_R300) { 52328eebb70SAlex Deucher /* often unreliable on pre-r300 */ 524724c80e1SAlex Deucher rdev->wb.enabled = false; 525d0f8a854SAlex Deucher } else { 526724c80e1SAlex Deucher rdev->wb.enabled = true; 527d0f8a854SAlex Deucher /* event_write fences are only available on r600+ */ 5283b7a2b24SJerome Glisse if (rdev->family >= CHIP_R600) { 529d0f8a854SAlex Deucher rdev->wb.use_event = true; 530d0f8a854SAlex Deucher } 531724c80e1SAlex Deucher } 5323b7a2b24SJerome Glisse } 533c994ead6SAlex Deucher /* always use writeback/events on NI, APUs */ 534c994ead6SAlex Deucher if (rdev->family >= CHIP_PALM) { 5357d52785dSAlex Deucher rdev->wb.enabled = true; 5367d52785dSAlex Deucher rdev->wb.use_event = true; 5377d52785dSAlex Deucher } 538724c80e1SAlex Deucher 539724c80e1SAlex Deucher dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis"); 540724c80e1SAlex Deucher 541724c80e1SAlex Deucher return 0; 542724c80e1SAlex Deucher } 543724c80e1SAlex Deucher 544d594e46aSJerome Glisse /** 545d594e46aSJerome Glisse * radeon_vram_location - try to find VRAM location 546d594e46aSJerome Glisse * @rdev: radeon device structure holding all necessary informations 547d594e46aSJerome Glisse * @mc: memory controller structure holding memory informations 548d594e46aSJerome Glisse * @base: base address at which to put VRAM 549d594e46aSJerome Glisse * 550d594e46aSJerome Glisse * Function will place try to place VRAM at base address provided 551d594e46aSJerome Glisse * as parameter (which is so far either PCI aperture address or 552d594e46aSJerome Glisse * for IGP TOM base address). 553d594e46aSJerome Glisse * 554d594e46aSJerome Glisse * If there is not enough space to fit the unvisible VRAM in the 32bits 555d594e46aSJerome Glisse * address space then we limit the VRAM size to the aperture. 556d594e46aSJerome Glisse * 557d594e46aSJerome Glisse * If we are using AGP and if the AGP aperture doesn't allow us to have 558d594e46aSJerome Glisse * room for all the VRAM than we restrict the VRAM to the PCI aperture 559d594e46aSJerome Glisse * size and print a warning. 560d594e46aSJerome Glisse * 561d594e46aSJerome Glisse * This function will never fails, worst case are limiting VRAM. 562d594e46aSJerome Glisse * 563d594e46aSJerome Glisse * Note: GTT start, end, size should be initialized before calling this 564d594e46aSJerome Glisse * function on AGP platform. 565d594e46aSJerome Glisse * 56625985edcSLucas De Marchi * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size, 567d594e46aSJerome Glisse * this shouldn't be a problem as we are using the PCI aperture as a reference. 568d594e46aSJerome Glisse * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but 569d594e46aSJerome Glisse * not IGP. 570d594e46aSJerome Glisse * 571d594e46aSJerome Glisse * Note: we use mc_vram_size as on some board we need to program the mc to 572d594e46aSJerome Glisse * cover the whole aperture even if VRAM size is inferior to aperture size 573d594e46aSJerome Glisse * Novell bug 204882 + along with lots of ubuntu ones 574d594e46aSJerome Glisse * 575d594e46aSJerome Glisse * Note: when limiting vram it's safe to overwritte real_vram_size because 576d594e46aSJerome Glisse * we are not in case where real_vram_size is inferior to mc_vram_size (ie 577d594e46aSJerome Glisse * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu 578d594e46aSJerome Glisse * ones) 579d594e46aSJerome Glisse * 580d594e46aSJerome Glisse * Note: IGP TOM addr should be the same as the aperture addr, we don't 581d594e46aSJerome Glisse * explicitly check for that thought. 582d594e46aSJerome Glisse * 583d594e46aSJerome Glisse * FIXME: when reducing VRAM size align new size on power of 2. 584771fe6b9SJerome Glisse */ 585d594e46aSJerome Glisse void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base) 586771fe6b9SJerome Glisse { 5871bcb04f7SChristian König uint64_t limit = (uint64_t)radeon_vram_limit << 20; 5881bcb04f7SChristian König 589d594e46aSJerome Glisse mc->vram_start = base; 5909ed8b1f9SAlex Deucher if (mc->mc_vram_size > (rdev->mc.mc_mask - base + 1)) { 591d594e46aSJerome Glisse dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n"); 592d594e46aSJerome Glisse mc->real_vram_size = mc->aper_size; 593d594e46aSJerome Glisse mc->mc_vram_size = mc->aper_size; 594771fe6b9SJerome Glisse } 595d594e46aSJerome Glisse mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; 5962cbeb4efSJerome Glisse if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) { 597d594e46aSJerome Glisse dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n"); 598d594e46aSJerome Glisse mc->real_vram_size = mc->aper_size; 599d594e46aSJerome Glisse mc->mc_vram_size = mc->aper_size; 600771fe6b9SJerome Glisse } 601d594e46aSJerome Glisse mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; 6021bcb04f7SChristian König if (limit && limit < mc->real_vram_size) 6031bcb04f7SChristian König mc->real_vram_size = limit; 604dd7cc55aSAlex Deucher dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n", 605d594e46aSJerome Glisse mc->mc_vram_size >> 20, mc->vram_start, 606d594e46aSJerome Glisse mc->vram_end, mc->real_vram_size >> 20); 607771fe6b9SJerome Glisse } 608771fe6b9SJerome Glisse 609d594e46aSJerome Glisse /** 610d594e46aSJerome Glisse * radeon_gtt_location - try to find GTT location 611d594e46aSJerome Glisse * @rdev: radeon device structure holding all necessary informations 612d594e46aSJerome Glisse * @mc: memory controller structure holding memory informations 613d594e46aSJerome Glisse * 614d594e46aSJerome Glisse * Function will place try to place GTT before or after VRAM. 615d594e46aSJerome Glisse * 616d594e46aSJerome Glisse * If GTT size is bigger than space left then we ajust GTT size. 617d594e46aSJerome Glisse * Thus function will never fails. 618d594e46aSJerome Glisse * 619d594e46aSJerome Glisse * FIXME: when reducing GTT size align new size on power of 2. 620d594e46aSJerome Glisse */ 621d594e46aSJerome Glisse void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) 622d594e46aSJerome Glisse { 623d594e46aSJerome Glisse u64 size_af, size_bf; 624d594e46aSJerome Glisse 6259ed8b1f9SAlex Deucher size_af = ((rdev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align; 6268d369bb1SAlex Deucher size_bf = mc->vram_start & ~mc->gtt_base_align; 627d594e46aSJerome Glisse if (size_bf > size_af) { 628d594e46aSJerome Glisse if (mc->gtt_size > size_bf) { 629d594e46aSJerome Glisse dev_warn(rdev->dev, "limiting GTT\n"); 630d594e46aSJerome Glisse mc->gtt_size = size_bf; 631d594e46aSJerome Glisse } 6328d369bb1SAlex Deucher mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size; 633d594e46aSJerome Glisse } else { 634d594e46aSJerome Glisse if (mc->gtt_size > size_af) { 635d594e46aSJerome Glisse dev_warn(rdev->dev, "limiting GTT\n"); 636d594e46aSJerome Glisse mc->gtt_size = size_af; 637d594e46aSJerome Glisse } 6388d369bb1SAlex Deucher mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align; 639d594e46aSJerome Glisse } 640d594e46aSJerome Glisse mc->gtt_end = mc->gtt_start + mc->gtt_size - 1; 641dd7cc55aSAlex Deucher dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n", 642d594e46aSJerome Glisse mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end); 643d594e46aSJerome Glisse } 644771fe6b9SJerome Glisse 645771fe6b9SJerome Glisse /* 646771fe6b9SJerome Glisse * GPU helpers function. 647771fe6b9SJerome Glisse */ 64805082b8bSAlex Deucher 64905082b8bSAlex Deucher /** 65005082b8bSAlex Deucher * radeon_device_is_virtual - check if we are running is a virtual environment 65105082b8bSAlex Deucher * 65205082b8bSAlex Deucher * Check if the asic has been passed through to a VM (all asics). 65305082b8bSAlex Deucher * Used at driver startup. 65405082b8bSAlex Deucher * Returns true if virtual or false if not. 65505082b8bSAlex Deucher */ 656a801abe4SAlex Deucher bool radeon_device_is_virtual(void) 65705082b8bSAlex Deucher { 65805082b8bSAlex Deucher #ifdef CONFIG_X86 65905082b8bSAlex Deucher return boot_cpu_has(X86_FEATURE_HYPERVISOR); 66005082b8bSAlex Deucher #else 66105082b8bSAlex Deucher return false; 66205082b8bSAlex Deucher #endif 66305082b8bSAlex Deucher } 66405082b8bSAlex Deucher 6650c195119SAlex Deucher /** 6660c195119SAlex Deucher * radeon_card_posted - check if the hw has already been initialized 6670c195119SAlex Deucher * 6680c195119SAlex Deucher * @rdev: radeon_device pointer 6690c195119SAlex Deucher * 6700c195119SAlex Deucher * Check if the asic has been initialized (all asics). 6710c195119SAlex Deucher * Used at driver startup. 6720c195119SAlex Deucher * Returns true if initialized or false if not. 6730c195119SAlex Deucher */ 6749f022ddfSJerome Glisse bool radeon_card_posted(struct radeon_device *rdev) 675771fe6b9SJerome Glisse { 676771fe6b9SJerome Glisse uint32_t reg; 677771fe6b9SJerome Glisse 678884031f0SAlex Deucher /* for pass through, always force asic_init for CI */ 679884031f0SAlex Deucher if (rdev->family >= CHIP_BONAIRE && 680884031f0SAlex Deucher radeon_device_is_virtual()) 68105082b8bSAlex Deucher return false; 68205082b8bSAlex Deucher 68350a583f6SAlex Deucher /* required for EFI mode on macbook2,1 which uses an r5xx asic */ 68483e68189SMatt Fleming if (efi_enabled(EFI_BOOT) && 68550a583f6SAlex Deucher (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) && 68650a583f6SAlex Deucher (rdev->family < CHIP_R600)) 687bcc65fd8SMatthew Garrett return false; 688bcc65fd8SMatthew Garrett 6892cf3a4fcSAlex Deucher if (ASIC_IS_NODCE(rdev)) 6902cf3a4fcSAlex Deucher goto check_memsize; 6912cf3a4fcSAlex Deucher 692771fe6b9SJerome Glisse /* first check CRTCs */ 69309fb8bd1SAlex Deucher if (ASIC_IS_DCE4(rdev)) { 69418007401SAlex Deucher reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | 69518007401SAlex Deucher RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET); 69609fb8bd1SAlex Deucher if (rdev->num_crtc >= 4) { 69709fb8bd1SAlex Deucher reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) | 69809fb8bd1SAlex Deucher RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET); 69909fb8bd1SAlex Deucher } 70009fb8bd1SAlex Deucher if (rdev->num_crtc >= 6) { 70109fb8bd1SAlex Deucher reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) | 702bcc1c2a1SAlex Deucher RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET); 70309fb8bd1SAlex Deucher } 704bcc1c2a1SAlex Deucher if (reg & EVERGREEN_CRTC_MASTER_EN) 705bcc1c2a1SAlex Deucher return true; 706bcc1c2a1SAlex Deucher } else if (ASIC_IS_AVIVO(rdev)) { 707771fe6b9SJerome Glisse reg = RREG32(AVIVO_D1CRTC_CONTROL) | 708771fe6b9SJerome Glisse RREG32(AVIVO_D2CRTC_CONTROL); 709771fe6b9SJerome Glisse if (reg & AVIVO_CRTC_EN) { 710771fe6b9SJerome Glisse return true; 711771fe6b9SJerome Glisse } 712771fe6b9SJerome Glisse } else { 713771fe6b9SJerome Glisse reg = RREG32(RADEON_CRTC_GEN_CNTL) | 714771fe6b9SJerome Glisse RREG32(RADEON_CRTC2_GEN_CNTL); 715771fe6b9SJerome Glisse if (reg & RADEON_CRTC_EN) { 716771fe6b9SJerome Glisse return true; 717771fe6b9SJerome Glisse } 718771fe6b9SJerome Glisse } 719771fe6b9SJerome Glisse 7202cf3a4fcSAlex Deucher check_memsize: 721771fe6b9SJerome Glisse /* then check MEM_SIZE, in case the crtcs are off */ 722771fe6b9SJerome Glisse if (rdev->family >= CHIP_R600) 723771fe6b9SJerome Glisse reg = RREG32(R600_CONFIG_MEMSIZE); 724771fe6b9SJerome Glisse else 725771fe6b9SJerome Glisse reg = RREG32(RADEON_CONFIG_MEMSIZE); 726771fe6b9SJerome Glisse 727771fe6b9SJerome Glisse if (reg) 728771fe6b9SJerome Glisse return true; 729771fe6b9SJerome Glisse 730771fe6b9SJerome Glisse return false; 731771fe6b9SJerome Glisse 732771fe6b9SJerome Glisse } 733771fe6b9SJerome Glisse 7340c195119SAlex Deucher /** 7350c195119SAlex Deucher * radeon_update_bandwidth_info - update display bandwidth params 7360c195119SAlex Deucher * 7370c195119SAlex Deucher * @rdev: radeon_device pointer 7380c195119SAlex Deucher * 7390c195119SAlex Deucher * Used when sclk/mclk are switched or display modes are set. 7400c195119SAlex Deucher * params are used to calculate display watermarks (all asics) 7410c195119SAlex Deucher */ 742f47299c5SAlex Deucher void radeon_update_bandwidth_info(struct radeon_device *rdev) 743f47299c5SAlex Deucher { 744f47299c5SAlex Deucher fixed20_12 a; 7458807286eSAlex Deucher u32 sclk = rdev->pm.current_sclk; 7468807286eSAlex Deucher u32 mclk = rdev->pm.current_mclk; 747f47299c5SAlex Deucher 7488807286eSAlex Deucher /* sclk/mclk in Mhz */ 74968adac5eSBen Skeggs a.full = dfixed_const(100); 75068adac5eSBen Skeggs rdev->pm.sclk.full = dfixed_const(sclk); 75168adac5eSBen Skeggs rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a); 75268adac5eSBen Skeggs rdev->pm.mclk.full = dfixed_const(mclk); 75368adac5eSBen Skeggs rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a); 754f47299c5SAlex Deucher 7558807286eSAlex Deucher if (rdev->flags & RADEON_IS_IGP) { 75668adac5eSBen Skeggs a.full = dfixed_const(16); 757f47299c5SAlex Deucher /* core_bandwidth = sclk(Mhz) * 16 */ 75868adac5eSBen Skeggs rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a); 759f47299c5SAlex Deucher } 760f47299c5SAlex Deucher } 761f47299c5SAlex Deucher 7620c195119SAlex Deucher /** 7630c195119SAlex Deucher * radeon_boot_test_post_card - check and possibly initialize the hw 7640c195119SAlex Deucher * 7650c195119SAlex Deucher * @rdev: radeon_device pointer 7660c195119SAlex Deucher * 7670c195119SAlex Deucher * Check if the asic is initialized and if not, attempt to initialize 7680c195119SAlex Deucher * it (all asics). 7690c195119SAlex Deucher * Returns true if initialized or false if not. 7700c195119SAlex Deucher */ 77172542d77SDave Airlie bool radeon_boot_test_post_card(struct radeon_device *rdev) 77272542d77SDave Airlie { 77372542d77SDave Airlie if (radeon_card_posted(rdev)) 77472542d77SDave Airlie return true; 77572542d77SDave Airlie 77672542d77SDave Airlie if (rdev->bios) { 77772542d77SDave Airlie DRM_INFO("GPU not posted. posting now...\n"); 77872542d77SDave Airlie if (rdev->is_atom_bios) 77972542d77SDave Airlie atom_asic_init(rdev->mode_info.atom_context); 78072542d77SDave Airlie else 78172542d77SDave Airlie radeon_combios_asic_init(rdev->ddev); 78272542d77SDave Airlie return true; 78372542d77SDave Airlie } else { 78472542d77SDave Airlie dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); 78572542d77SDave Airlie return false; 78672542d77SDave Airlie } 78772542d77SDave Airlie } 78872542d77SDave Airlie 7890c195119SAlex Deucher /** 7900c195119SAlex Deucher * radeon_dummy_page_init - init dummy page used by the driver 7910c195119SAlex Deucher * 7920c195119SAlex Deucher * @rdev: radeon_device pointer 7930c195119SAlex Deucher * 7940c195119SAlex Deucher * Allocate the dummy page used by the driver (all asics). 7950c195119SAlex Deucher * This dummy page is used by the driver as a filler for gart entries 7960c195119SAlex Deucher * when pages are taken out of the GART 7970c195119SAlex Deucher * Returns 0 on sucess, -ENOMEM on failure. 7980c195119SAlex Deucher */ 7993ce0a23dSJerome Glisse int radeon_dummy_page_init(struct radeon_device *rdev) 8003ce0a23dSJerome Glisse { 80182568565SDave Airlie if (rdev->dummy_page.page) 80282568565SDave Airlie return 0; 8033ce0a23dSJerome Glisse rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO); 8043ce0a23dSJerome Glisse if (rdev->dummy_page.page == NULL) 8053ce0a23dSJerome Glisse return -ENOMEM; 8063ce0a23dSJerome Glisse rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page, 8073ce0a23dSJerome Glisse 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 808a30f6fb7SBenjamin Herrenschmidt if (pci_dma_mapping_error(rdev->pdev, rdev->dummy_page.addr)) { 809a30f6fb7SBenjamin Herrenschmidt dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n"); 8103ce0a23dSJerome Glisse __free_page(rdev->dummy_page.page); 8113ce0a23dSJerome Glisse rdev->dummy_page.page = NULL; 8123ce0a23dSJerome Glisse return -ENOMEM; 8133ce0a23dSJerome Glisse } 814cb658906SMichel Dänzer rdev->dummy_page.entry = radeon_gart_get_page_entry(rdev->dummy_page.addr, 815cb658906SMichel Dänzer RADEON_GART_PAGE_DUMMY); 8163ce0a23dSJerome Glisse return 0; 8173ce0a23dSJerome Glisse } 8183ce0a23dSJerome Glisse 8190c195119SAlex Deucher /** 8200c195119SAlex Deucher * radeon_dummy_page_fini - free dummy page used by the driver 8210c195119SAlex Deucher * 8220c195119SAlex Deucher * @rdev: radeon_device pointer 8230c195119SAlex Deucher * 8240c195119SAlex Deucher * Frees the dummy page used by the driver (all asics). 8250c195119SAlex Deucher */ 8263ce0a23dSJerome Glisse void radeon_dummy_page_fini(struct radeon_device *rdev) 8273ce0a23dSJerome Glisse { 8283ce0a23dSJerome Glisse if (rdev->dummy_page.page == NULL) 8293ce0a23dSJerome Glisse return; 8303ce0a23dSJerome Glisse pci_unmap_page(rdev->pdev, rdev->dummy_page.addr, 8313ce0a23dSJerome Glisse PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 8323ce0a23dSJerome Glisse __free_page(rdev->dummy_page.page); 8333ce0a23dSJerome Glisse rdev->dummy_page.page = NULL; 8343ce0a23dSJerome Glisse } 8353ce0a23dSJerome Glisse 836771fe6b9SJerome Glisse 837771fe6b9SJerome Glisse /* ATOM accessor methods */ 8380c195119SAlex Deucher /* 8390c195119SAlex Deucher * ATOM is an interpreted byte code stored in tables in the vbios. The 8400c195119SAlex Deucher * driver registers callbacks to access registers and the interpreter 8410c195119SAlex Deucher * in the driver parses the tables and executes then to program specific 8420c195119SAlex Deucher * actions (set display modes, asic init, etc.). See radeon_atombios.c, 8430c195119SAlex Deucher * atombios.h, and atom.c 8440c195119SAlex Deucher */ 8450c195119SAlex Deucher 8460c195119SAlex Deucher /** 8470c195119SAlex Deucher * cail_pll_read - read PLL register 8480c195119SAlex Deucher * 8490c195119SAlex Deucher * @info: atom card_info pointer 8500c195119SAlex Deucher * @reg: PLL register offset 8510c195119SAlex Deucher * 8520c195119SAlex Deucher * Provides a PLL register accessor for the atom interpreter (r4xx+). 8530c195119SAlex Deucher * Returns the value of the PLL register. 8540c195119SAlex Deucher */ 855771fe6b9SJerome Glisse static uint32_t cail_pll_read(struct card_info *info, uint32_t reg) 856771fe6b9SJerome Glisse { 857771fe6b9SJerome Glisse struct radeon_device *rdev = info->dev->dev_private; 858771fe6b9SJerome Glisse uint32_t r; 859771fe6b9SJerome Glisse 860771fe6b9SJerome Glisse r = rdev->pll_rreg(rdev, reg); 861771fe6b9SJerome Glisse return r; 862771fe6b9SJerome Glisse } 863771fe6b9SJerome Glisse 8640c195119SAlex Deucher /** 8650c195119SAlex Deucher * cail_pll_write - write PLL register 8660c195119SAlex Deucher * 8670c195119SAlex Deucher * @info: atom card_info pointer 8680c195119SAlex Deucher * @reg: PLL register offset 8690c195119SAlex Deucher * @val: value to write to the pll register 8700c195119SAlex Deucher * 8710c195119SAlex Deucher * Provides a PLL register accessor for the atom interpreter (r4xx+). 8720c195119SAlex Deucher */ 873771fe6b9SJerome Glisse static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val) 874771fe6b9SJerome Glisse { 875771fe6b9SJerome Glisse struct radeon_device *rdev = info->dev->dev_private; 876771fe6b9SJerome Glisse 877771fe6b9SJerome Glisse rdev->pll_wreg(rdev, reg, val); 878771fe6b9SJerome Glisse } 879771fe6b9SJerome Glisse 8800c195119SAlex Deucher /** 8810c195119SAlex Deucher * cail_mc_read - read MC (Memory Controller) register 8820c195119SAlex Deucher * 8830c195119SAlex Deucher * @info: atom card_info pointer 8840c195119SAlex Deucher * @reg: MC register offset 8850c195119SAlex Deucher * 8860c195119SAlex Deucher * Provides an MC register accessor for the atom interpreter (r4xx+). 8870c195119SAlex Deucher * Returns the value of the MC register. 8880c195119SAlex Deucher */ 889771fe6b9SJerome Glisse static uint32_t cail_mc_read(struct card_info *info, uint32_t reg) 890771fe6b9SJerome Glisse { 891771fe6b9SJerome Glisse struct radeon_device *rdev = info->dev->dev_private; 892771fe6b9SJerome Glisse uint32_t r; 893771fe6b9SJerome Glisse 894771fe6b9SJerome Glisse r = rdev->mc_rreg(rdev, reg); 895771fe6b9SJerome Glisse return r; 896771fe6b9SJerome Glisse } 897771fe6b9SJerome Glisse 8980c195119SAlex Deucher /** 8990c195119SAlex Deucher * cail_mc_write - write MC (Memory Controller) register 9000c195119SAlex Deucher * 9010c195119SAlex Deucher * @info: atom card_info pointer 9020c195119SAlex Deucher * @reg: MC register offset 9030c195119SAlex Deucher * @val: value to write to the pll register 9040c195119SAlex Deucher * 9050c195119SAlex Deucher * Provides a MC register accessor for the atom interpreter (r4xx+). 9060c195119SAlex Deucher */ 907771fe6b9SJerome Glisse static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val) 908771fe6b9SJerome Glisse { 909771fe6b9SJerome Glisse struct radeon_device *rdev = info->dev->dev_private; 910771fe6b9SJerome Glisse 911771fe6b9SJerome Glisse rdev->mc_wreg(rdev, reg, val); 912771fe6b9SJerome Glisse } 913771fe6b9SJerome Glisse 9140c195119SAlex Deucher /** 9150c195119SAlex Deucher * cail_reg_write - write MMIO register 9160c195119SAlex Deucher * 9170c195119SAlex Deucher * @info: atom card_info pointer 9180c195119SAlex Deucher * @reg: MMIO register offset 9190c195119SAlex Deucher * @val: value to write to the pll register 9200c195119SAlex Deucher * 9210c195119SAlex Deucher * Provides a MMIO register accessor for the atom interpreter (r4xx+). 9220c195119SAlex Deucher */ 923771fe6b9SJerome Glisse static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val) 924771fe6b9SJerome Glisse { 925771fe6b9SJerome Glisse struct radeon_device *rdev = info->dev->dev_private; 926771fe6b9SJerome Glisse 927771fe6b9SJerome Glisse WREG32(reg*4, val); 928771fe6b9SJerome Glisse } 929771fe6b9SJerome Glisse 9300c195119SAlex Deucher /** 9310c195119SAlex Deucher * cail_reg_read - read MMIO register 9320c195119SAlex Deucher * 9330c195119SAlex Deucher * @info: atom card_info pointer 9340c195119SAlex Deucher * @reg: MMIO register offset 9350c195119SAlex Deucher * 9360c195119SAlex Deucher * Provides an MMIO register accessor for the atom interpreter (r4xx+). 9370c195119SAlex Deucher * Returns the value of the MMIO register. 9380c195119SAlex Deucher */ 939771fe6b9SJerome Glisse static uint32_t cail_reg_read(struct card_info *info, uint32_t reg) 940771fe6b9SJerome Glisse { 941771fe6b9SJerome Glisse struct radeon_device *rdev = info->dev->dev_private; 942771fe6b9SJerome Glisse uint32_t r; 943771fe6b9SJerome Glisse 944771fe6b9SJerome Glisse r = RREG32(reg*4); 945771fe6b9SJerome Glisse return r; 946771fe6b9SJerome Glisse } 947771fe6b9SJerome Glisse 9480c195119SAlex Deucher /** 9490c195119SAlex Deucher * cail_ioreg_write - write IO register 9500c195119SAlex Deucher * 9510c195119SAlex Deucher * @info: atom card_info pointer 9520c195119SAlex Deucher * @reg: IO register offset 9530c195119SAlex Deucher * @val: value to write to the pll register 9540c195119SAlex Deucher * 9550c195119SAlex Deucher * Provides a IO register accessor for the atom interpreter (r4xx+). 9560c195119SAlex Deucher */ 957351a52a2SAlex Deucher static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val) 958351a52a2SAlex Deucher { 959351a52a2SAlex Deucher struct radeon_device *rdev = info->dev->dev_private; 960351a52a2SAlex Deucher 961351a52a2SAlex Deucher WREG32_IO(reg*4, val); 962351a52a2SAlex Deucher } 963351a52a2SAlex Deucher 9640c195119SAlex Deucher /** 9650c195119SAlex Deucher * cail_ioreg_read - read IO register 9660c195119SAlex Deucher * 9670c195119SAlex Deucher * @info: atom card_info pointer 9680c195119SAlex Deucher * @reg: IO register offset 9690c195119SAlex Deucher * 9700c195119SAlex Deucher * Provides an IO register accessor for the atom interpreter (r4xx+). 9710c195119SAlex Deucher * Returns the value of the IO register. 9720c195119SAlex Deucher */ 973351a52a2SAlex Deucher static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg) 974351a52a2SAlex Deucher { 975351a52a2SAlex Deucher struct radeon_device *rdev = info->dev->dev_private; 976351a52a2SAlex Deucher uint32_t r; 977351a52a2SAlex Deucher 978351a52a2SAlex Deucher r = RREG32_IO(reg*4); 979351a52a2SAlex Deucher return r; 980351a52a2SAlex Deucher } 981351a52a2SAlex Deucher 9820c195119SAlex Deucher /** 9830c195119SAlex Deucher * radeon_atombios_init - init the driver info and callbacks for atombios 9840c195119SAlex Deucher * 9850c195119SAlex Deucher * @rdev: radeon_device pointer 9860c195119SAlex Deucher * 9870c195119SAlex Deucher * Initializes the driver info and register access callbacks for the 9880c195119SAlex Deucher * ATOM interpreter (r4xx+). 9890c195119SAlex Deucher * Returns 0 on sucess, -ENOMEM on failure. 9900c195119SAlex Deucher * Called at driver startup. 9910c195119SAlex Deucher */ 992771fe6b9SJerome Glisse int radeon_atombios_init(struct radeon_device *rdev) 993771fe6b9SJerome Glisse { 99461c4b24bSMathias Fröhlich struct card_info *atom_card_info = 99561c4b24bSMathias Fröhlich kzalloc(sizeof(struct card_info), GFP_KERNEL); 99661c4b24bSMathias Fröhlich 99761c4b24bSMathias Fröhlich if (!atom_card_info) 99861c4b24bSMathias Fröhlich return -ENOMEM; 99961c4b24bSMathias Fröhlich 100061c4b24bSMathias Fröhlich rdev->mode_info.atom_card_info = atom_card_info; 100161c4b24bSMathias Fröhlich atom_card_info->dev = rdev->ddev; 100261c4b24bSMathias Fröhlich atom_card_info->reg_read = cail_reg_read; 100361c4b24bSMathias Fröhlich atom_card_info->reg_write = cail_reg_write; 1004351a52a2SAlex Deucher /* needed for iio ops */ 1005351a52a2SAlex Deucher if (rdev->rio_mem) { 1006351a52a2SAlex Deucher atom_card_info->ioreg_read = cail_ioreg_read; 1007351a52a2SAlex Deucher atom_card_info->ioreg_write = cail_ioreg_write; 1008351a52a2SAlex Deucher } else { 1009351a52a2SAlex Deucher DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n"); 1010351a52a2SAlex Deucher atom_card_info->ioreg_read = cail_reg_read; 1011351a52a2SAlex Deucher atom_card_info->ioreg_write = cail_reg_write; 1012351a52a2SAlex Deucher } 101361c4b24bSMathias Fröhlich atom_card_info->mc_read = cail_mc_read; 101461c4b24bSMathias Fröhlich atom_card_info->mc_write = cail_mc_write; 101561c4b24bSMathias Fröhlich atom_card_info->pll_read = cail_pll_read; 101661c4b24bSMathias Fröhlich atom_card_info->pll_write = cail_pll_write; 101761c4b24bSMathias Fröhlich 101861c4b24bSMathias Fröhlich rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios); 10190e34d094STim Gardner if (!rdev->mode_info.atom_context) { 10200e34d094STim Gardner radeon_atombios_fini(rdev); 10210e34d094STim Gardner return -ENOMEM; 10220e34d094STim Gardner } 10230e34d094STim Gardner 1024c31ad97fSRafał Miłecki mutex_init(&rdev->mode_info.atom_context->mutex); 10251c949842SDave Airlie mutex_init(&rdev->mode_info.atom_context->scratch_mutex); 1026771fe6b9SJerome Glisse radeon_atom_initialize_bios_scratch_regs(rdev->ddev); 1027d904ef9bSDave Airlie atom_allocate_fb_scratch(rdev->mode_info.atom_context); 1028771fe6b9SJerome Glisse return 0; 1029771fe6b9SJerome Glisse } 1030771fe6b9SJerome Glisse 10310c195119SAlex Deucher /** 10320c195119SAlex Deucher * radeon_atombios_fini - free the driver info and callbacks for atombios 10330c195119SAlex Deucher * 10340c195119SAlex Deucher * @rdev: radeon_device pointer 10350c195119SAlex Deucher * 10360c195119SAlex Deucher * Frees the driver info and register access callbacks for the ATOM 10370c195119SAlex Deucher * interpreter (r4xx+). 10380c195119SAlex Deucher * Called at driver shutdown. 10390c195119SAlex Deucher */ 1040771fe6b9SJerome Glisse void radeon_atombios_fini(struct radeon_device *rdev) 1041771fe6b9SJerome Glisse { 10424a04a844SJerome Glisse if (rdev->mode_info.atom_context) { 1043d904ef9bSDave Airlie kfree(rdev->mode_info.atom_context->scratch); 10444a04a844SJerome Glisse } 10450e34d094STim Gardner kfree(rdev->mode_info.atom_context); 10460e34d094STim Gardner rdev->mode_info.atom_context = NULL; 104761c4b24bSMathias Fröhlich kfree(rdev->mode_info.atom_card_info); 10480e34d094STim Gardner rdev->mode_info.atom_card_info = NULL; 1049771fe6b9SJerome Glisse } 1050771fe6b9SJerome Glisse 10510c195119SAlex Deucher /* COMBIOS */ 10520c195119SAlex Deucher /* 10530c195119SAlex Deucher * COMBIOS is the bios format prior to ATOM. It provides 10540c195119SAlex Deucher * command tables similar to ATOM, but doesn't have a unified 10550c195119SAlex Deucher * parser. See radeon_combios.c 10560c195119SAlex Deucher */ 10570c195119SAlex Deucher 10580c195119SAlex Deucher /** 10590c195119SAlex Deucher * radeon_combios_init - init the driver info for combios 10600c195119SAlex Deucher * 10610c195119SAlex Deucher * @rdev: radeon_device pointer 10620c195119SAlex Deucher * 10630c195119SAlex Deucher * Initializes the driver info for combios (r1xx-r3xx). 10640c195119SAlex Deucher * Returns 0 on sucess. 10650c195119SAlex Deucher * Called at driver startup. 10660c195119SAlex Deucher */ 1067771fe6b9SJerome Glisse int radeon_combios_init(struct radeon_device *rdev) 1068771fe6b9SJerome Glisse { 1069771fe6b9SJerome Glisse radeon_combios_initialize_bios_scratch_regs(rdev->ddev); 1070771fe6b9SJerome Glisse return 0; 1071771fe6b9SJerome Glisse } 1072771fe6b9SJerome Glisse 10730c195119SAlex Deucher /** 10740c195119SAlex Deucher * radeon_combios_fini - free the driver info for combios 10750c195119SAlex Deucher * 10760c195119SAlex Deucher * @rdev: radeon_device pointer 10770c195119SAlex Deucher * 10780c195119SAlex Deucher * Frees the driver info for combios (r1xx-r3xx). 10790c195119SAlex Deucher * Called at driver shutdown. 10800c195119SAlex Deucher */ 1081771fe6b9SJerome Glisse void radeon_combios_fini(struct radeon_device *rdev) 1082771fe6b9SJerome Glisse { 1083771fe6b9SJerome Glisse } 1084771fe6b9SJerome Glisse 10850c195119SAlex Deucher /* if we get transitioned to only one device, take VGA back */ 10860c195119SAlex Deucher /** 10870c195119SAlex Deucher * radeon_vga_set_decode - enable/disable vga decode 10880c195119SAlex Deucher * 10890c195119SAlex Deucher * @cookie: radeon_device pointer 10900c195119SAlex Deucher * @state: enable/disable vga decode 10910c195119SAlex Deucher * 10920c195119SAlex Deucher * Enable/disable vga decode (all asics). 10930c195119SAlex Deucher * Returns VGA resource flags. 10940c195119SAlex Deucher */ 109528d52043SDave Airlie static unsigned int radeon_vga_set_decode(void *cookie, bool state) 109628d52043SDave Airlie { 109728d52043SDave Airlie struct radeon_device *rdev = cookie; 109828d52043SDave Airlie radeon_vga_set_state(rdev, state); 109928d52043SDave Airlie if (state) 110028d52043SDave Airlie return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | 110128d52043SDave Airlie VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 110228d52043SDave Airlie else 110328d52043SDave Airlie return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 110428d52043SDave Airlie } 1105c1176d6fSDave Airlie 11060c195119SAlex Deucher /** 11071bcb04f7SChristian König * radeon_check_pot_argument - check that argument is a power of two 11081bcb04f7SChristian König * 11091bcb04f7SChristian König * @arg: value to check 11101bcb04f7SChristian König * 11111bcb04f7SChristian König * Validates that a certain argument is a power of two (all asics). 11121bcb04f7SChristian König * Returns true if argument is valid. 11131bcb04f7SChristian König */ 11141bcb04f7SChristian König static bool radeon_check_pot_argument(int arg) 11151bcb04f7SChristian König { 11161bcb04f7SChristian König return (arg & (arg - 1)) == 0; 11171bcb04f7SChristian König } 11181bcb04f7SChristian König 11191bcb04f7SChristian König /** 11205e3c4f90SGrigori Goronzy * Determine a sensible default GART size according to ASIC family. 11215e3c4f90SGrigori Goronzy * 11225e3c4f90SGrigori Goronzy * @family ASIC family name 11235e3c4f90SGrigori Goronzy */ 11245e3c4f90SGrigori Goronzy static int radeon_gart_size_auto(enum radeon_family family) 11255e3c4f90SGrigori Goronzy { 11265e3c4f90SGrigori Goronzy /* default to a larger gart size on newer asics */ 11275e3c4f90SGrigori Goronzy if (family >= CHIP_TAHITI) 11285e3c4f90SGrigori Goronzy return 2048; 11295e3c4f90SGrigori Goronzy else if (family >= CHIP_RV770) 11305e3c4f90SGrigori Goronzy return 1024; 11315e3c4f90SGrigori Goronzy else 11325e3c4f90SGrigori Goronzy return 512; 11335e3c4f90SGrigori Goronzy } 11345e3c4f90SGrigori Goronzy 11355e3c4f90SGrigori Goronzy /** 11360c195119SAlex Deucher * radeon_check_arguments - validate module params 11370c195119SAlex Deucher * 11380c195119SAlex Deucher * @rdev: radeon_device pointer 11390c195119SAlex Deucher * 11400c195119SAlex Deucher * Validates certain module parameters and updates 11410c195119SAlex Deucher * the associated values used by the driver (all asics). 11420c195119SAlex Deucher */ 11431109ca09SLauri Kasanen static void radeon_check_arguments(struct radeon_device *rdev) 114436421338SJerome Glisse { 114536421338SJerome Glisse /* vramlimit must be a power of two */ 11461bcb04f7SChristian König if (!radeon_check_pot_argument(radeon_vram_limit)) { 114736421338SJerome Glisse dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n", 114836421338SJerome Glisse radeon_vram_limit); 114936421338SJerome Glisse radeon_vram_limit = 0; 115036421338SJerome Glisse } 11511bcb04f7SChristian König 1152edcd26e8SAlex Deucher if (radeon_gart_size == -1) { 11535e3c4f90SGrigori Goronzy radeon_gart_size = radeon_gart_size_auto(rdev->family); 1154edcd26e8SAlex Deucher } 115536421338SJerome Glisse /* gtt size must be power of two and greater or equal to 32M */ 11561bcb04f7SChristian König if (radeon_gart_size < 32) { 1157edcd26e8SAlex Deucher dev_warn(rdev->dev, "gart size (%d) too small\n", 115836421338SJerome Glisse radeon_gart_size); 11595e3c4f90SGrigori Goronzy radeon_gart_size = radeon_gart_size_auto(rdev->family); 11601bcb04f7SChristian König } else if (!radeon_check_pot_argument(radeon_gart_size)) { 116136421338SJerome Glisse dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n", 116236421338SJerome Glisse radeon_gart_size); 11635e3c4f90SGrigori Goronzy radeon_gart_size = radeon_gart_size_auto(rdev->family); 116436421338SJerome Glisse } 11651bcb04f7SChristian König rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20; 11661bcb04f7SChristian König 116736421338SJerome Glisse /* AGP mode can only be -1, 1, 2, 4, 8 */ 116836421338SJerome Glisse switch (radeon_agpmode) { 116936421338SJerome Glisse case -1: 117036421338SJerome Glisse case 0: 117136421338SJerome Glisse case 1: 117236421338SJerome Glisse case 2: 117336421338SJerome Glisse case 4: 117436421338SJerome Glisse case 8: 117536421338SJerome Glisse break; 117636421338SJerome Glisse default: 117736421338SJerome Glisse dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: " 117836421338SJerome Glisse "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode); 117936421338SJerome Glisse radeon_agpmode = 0; 118036421338SJerome Glisse break; 118136421338SJerome Glisse } 1182c1c44132SChristian König 1183c1c44132SChristian König if (!radeon_check_pot_argument(radeon_vm_size)) { 1184c1c44132SChristian König dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n", 1185c1c44132SChristian König radeon_vm_size); 118620b2656dSChristian König radeon_vm_size = 4; 1187c1c44132SChristian König } 1188c1c44132SChristian König 118920b2656dSChristian König if (radeon_vm_size < 1) { 119013c240efSAlexandre Demers dev_warn(rdev->dev, "VM size (%d) too small, min is 1GB\n", 1191c1c44132SChristian König radeon_vm_size); 119220b2656dSChristian König radeon_vm_size = 4; 1193c1c44132SChristian König } 1194c1c44132SChristian König 1195c1c44132SChristian König /* 1196c1c44132SChristian König * Max GPUVM size for Cayman, SI and CI are 40 bits. 1197c1c44132SChristian König */ 119820b2656dSChristian König if (radeon_vm_size > 1024) { 119920b2656dSChristian König dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n", 1200c1c44132SChristian König radeon_vm_size); 120120b2656dSChristian König radeon_vm_size = 4; 1202c1c44132SChristian König } 12034510fb98SChristian König 12044510fb98SChristian König /* defines number of bits in page table versus page directory, 12054510fb98SChristian König * a page is 4KB so we have 12 bits offset, minimum 9 bits in the 12064510fb98SChristian König * page table and the remaining bits are in the page directory */ 1207dfc230f9SChristian König if (radeon_vm_block_size == -1) { 1208dfc230f9SChristian König 1209dfc230f9SChristian König /* Total bits covered by PD + PTs */ 12108e66e134SAlex Deucher unsigned bits = ilog2(radeon_vm_size) + 18; 1211dfc230f9SChristian König 1212dfc230f9SChristian König /* Make sure the PD is 4K in size up to 8GB address space. 1213dfc230f9SChristian König Above that split equal between PD and PTs */ 1214dfc230f9SChristian König if (radeon_vm_size <= 8) 1215dfc230f9SChristian König radeon_vm_block_size = bits - 9; 1216dfc230f9SChristian König else 1217dfc230f9SChristian König radeon_vm_block_size = (bits + 3) / 2; 1218dfc230f9SChristian König 1219dfc230f9SChristian König } else if (radeon_vm_block_size < 9) { 122020b2656dSChristian König dev_warn(rdev->dev, "VM page table size (%d) too small\n", 12214510fb98SChristian König radeon_vm_block_size); 12224510fb98SChristian König radeon_vm_block_size = 9; 12234510fb98SChristian König } 12244510fb98SChristian König 12254510fb98SChristian König if (radeon_vm_block_size > 24 || 122620b2656dSChristian König (radeon_vm_size * 1024) < (1ull << radeon_vm_block_size)) { 122720b2656dSChristian König dev_warn(rdev->dev, "VM page table size (%d) too large\n", 12284510fb98SChristian König radeon_vm_block_size); 12294510fb98SChristian König radeon_vm_block_size = 9; 12304510fb98SChristian König } 123136421338SJerome Glisse } 123236421338SJerome Glisse 12330c195119SAlex Deucher /** 12340c195119SAlex Deucher * radeon_switcheroo_set_state - set switcheroo state 12350c195119SAlex Deucher * 12360c195119SAlex Deucher * @pdev: pci dev pointer 12378e5de1d8SLukas Wunner * @state: vga_switcheroo state 12380c195119SAlex Deucher * 12390c195119SAlex Deucher * Callback for the switcheroo driver. Suspends or resumes the 12400c195119SAlex Deucher * the asics before or after it is powered up using ACPI methods. 12410c195119SAlex Deucher */ 12426a9ee8afSDave Airlie static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) 12436a9ee8afSDave Airlie { 12446a9ee8afSDave Airlie struct drm_device *dev = pci_get_drvdata(pdev); 124510ebc0bcSDave Airlie 124690c4cde9SAlex Deucher if (radeon_is_px(dev) && state == VGA_SWITCHEROO_OFF) 124710ebc0bcSDave Airlie return; 124810ebc0bcSDave Airlie 12496a9ee8afSDave Airlie if (state == VGA_SWITCHEROO_ON) { 12507ca85295SJoe Perches pr_info("radeon: switched on\n"); 12516a9ee8afSDave Airlie /* don't suspend or resume card normally */ 12525bcf719bSDave Airlie dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 1253d1f9809eSMaarten Lankhorst 125410ebc0bcSDave Airlie radeon_resume_kms(dev, true, true); 1255d1f9809eSMaarten Lankhorst 12565bcf719bSDave Airlie dev->switch_power_state = DRM_SWITCH_POWER_ON; 1257fbf81762SDave Airlie drm_kms_helper_poll_enable(dev); 12586a9ee8afSDave Airlie } else { 12597ca85295SJoe Perches pr_info("radeon: switched off\n"); 1260fbf81762SDave Airlie drm_kms_helper_poll_disable(dev); 12615bcf719bSDave Airlie dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 1262274ad65cSJérome Glisse radeon_suspend_kms(dev, true, true, false); 12635bcf719bSDave Airlie dev->switch_power_state = DRM_SWITCH_POWER_OFF; 12646a9ee8afSDave Airlie } 12656a9ee8afSDave Airlie } 12666a9ee8afSDave Airlie 12670c195119SAlex Deucher /** 12680c195119SAlex Deucher * radeon_switcheroo_can_switch - see if switcheroo state can change 12690c195119SAlex Deucher * 12700c195119SAlex Deucher * @pdev: pci dev pointer 12710c195119SAlex Deucher * 12720c195119SAlex Deucher * Callback for the switcheroo driver. Check of the switcheroo 12730c195119SAlex Deucher * state can be changed. 12740c195119SAlex Deucher * Returns true if the state can be changed, false if not. 12750c195119SAlex Deucher */ 12766a9ee8afSDave Airlie static bool radeon_switcheroo_can_switch(struct pci_dev *pdev) 12776a9ee8afSDave Airlie { 12786a9ee8afSDave Airlie struct drm_device *dev = pci_get_drvdata(pdev); 12796a9ee8afSDave Airlie 1280fc8fd40eSDaniel Vetter /* 1281fc8fd40eSDaniel Vetter * FIXME: open_count is protected by drm_global_mutex but that would lead to 1282fc8fd40eSDaniel Vetter * locking inversion with the driver load path. And the access here is 1283fc8fd40eSDaniel Vetter * completely racy anyway. So don't bother with locking for now. 1284fc8fd40eSDaniel Vetter */ 1285fc8fd40eSDaniel Vetter return dev->open_count == 0; 12866a9ee8afSDave Airlie } 12876a9ee8afSDave Airlie 128826ec685fSTakashi Iwai static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = { 128926ec685fSTakashi Iwai .set_gpu_state = radeon_switcheroo_set_state, 129026ec685fSTakashi Iwai .reprobe = NULL, 129126ec685fSTakashi Iwai .can_switch = radeon_switcheroo_can_switch, 129226ec685fSTakashi Iwai }; 12936a9ee8afSDave Airlie 12940c195119SAlex Deucher /** 12950c195119SAlex Deucher * radeon_device_init - initialize the driver 12960c195119SAlex Deucher * 12970c195119SAlex Deucher * @rdev: radeon_device pointer 12980c195119SAlex Deucher * @pdev: drm dev pointer 12990c195119SAlex Deucher * @pdev: pci dev pointer 13000c195119SAlex Deucher * @flags: driver flags 13010c195119SAlex Deucher * 13020c195119SAlex Deucher * Initializes the driver info and hw (all asics). 13030c195119SAlex Deucher * Returns 0 for success or an error on failure. 13040c195119SAlex Deucher * Called at driver startup. 13050c195119SAlex Deucher */ 1306771fe6b9SJerome Glisse int radeon_device_init(struct radeon_device *rdev, 1307771fe6b9SJerome Glisse struct drm_device *ddev, 1308771fe6b9SJerome Glisse struct pci_dev *pdev, 1309771fe6b9SJerome Glisse uint32_t flags) 1310771fe6b9SJerome Glisse { 1311351a52a2SAlex Deucher int r, i; 1312ad49f501SDave Airlie int dma_bits; 131310ebc0bcSDave Airlie bool runtime = false; 1314771fe6b9SJerome Glisse 1315771fe6b9SJerome Glisse rdev->shutdown = false; 13169f022ddfSJerome Glisse rdev->dev = &pdev->dev; 1317771fe6b9SJerome Glisse rdev->ddev = ddev; 1318771fe6b9SJerome Glisse rdev->pdev = pdev; 1319771fe6b9SJerome Glisse rdev->flags = flags; 1320771fe6b9SJerome Glisse rdev->family = flags & RADEON_FAMILY_MASK; 1321771fe6b9SJerome Glisse rdev->is_atom_bios = false; 1322771fe6b9SJerome Glisse rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT; 1323edcd26e8SAlex Deucher rdev->mc.gtt_size = 512 * 1024 * 1024; 1324733289c2SJerome Glisse rdev->accel_working = false; 13258b25ed34SAlex Deucher /* set up ring ids */ 13268b25ed34SAlex Deucher for (i = 0; i < RADEON_NUM_RINGS; i++) { 13278b25ed34SAlex Deucher rdev->ring[i].idx = i; 13288b25ed34SAlex Deucher } 1329f54d1867SChris Wilson rdev->fence_context = dma_fence_context_alloc(RADEON_NUM_RINGS); 13301b5331d9SJerome Glisse 1331fe0d36e0SAlex Deucher DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n", 1332d522d9ccSThomas Reim radeon_family_name[rdev->family], pdev->vendor, pdev->device, 1333fe0d36e0SAlex Deucher pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision); 13341b5331d9SJerome Glisse 1335771fe6b9SJerome Glisse /* mutex initialization are all done here so we 1336771fe6b9SJerome Glisse * can recall function without having locking issues */ 1337d6999bc7SChristian König mutex_init(&rdev->ring_lock); 133840bacf16SAlex Deucher mutex_init(&rdev->dc_hw_i2c_mutex); 1339c20dc369SChristian Koenig atomic_set(&rdev->ih.lock, 0); 13404c788679SJerome Glisse mutex_init(&rdev->gem.mutex); 1341c913e23aSRafał Miłecki mutex_init(&rdev->pm.mutex); 13426759a0a7SMarek Olšák mutex_init(&rdev->gpu_clock_mutex); 1343f61d5b46SAlex Deucher mutex_init(&rdev->srbm_mutex); 13441c0a4625SOded Gabbay mutex_init(&rdev->grbm_idx_mutex); 1345db7fce39SChristian König init_rwsem(&rdev->pm.mclk_lock); 1346dee53e7fSJerome Glisse init_rwsem(&rdev->exclusive_lock); 134773a6d3fcSRafał Miłecki init_waitqueue_head(&rdev->irq.vblank_queue); 1348341cb9e4SChristian König mutex_init(&rdev->mn_lock); 1349341cb9e4SChristian König hash_init(rdev->mn_hash); 13501b9c3dd0SAlex Deucher r = radeon_gem_init(rdev); 13511b9c3dd0SAlex Deucher if (r) 13521b9c3dd0SAlex Deucher return r; 1353529364e0SChristian König 1354c1c44132SChristian König radeon_check_arguments(rdev); 135523d4f1f2SAlex Deucher /* Adjust VM size here. 1356c1c44132SChristian König * Max GPUVM size for cayman+ is 40 bits. 135723d4f1f2SAlex Deucher */ 135820b2656dSChristian König rdev->vm_manager.max_pfn = radeon_vm_size << 18; 1359771fe6b9SJerome Glisse 13604aac0473SJerome Glisse /* Set asic functions */ 13614aac0473SJerome Glisse r = radeon_asic_init(rdev); 136236421338SJerome Glisse if (r) 13634aac0473SJerome Glisse return r; 13644aac0473SJerome Glisse 1365f95df9caSAlex Deucher /* all of the newer IGP chips have an internal gart 1366f95df9caSAlex Deucher * However some rs4xx report as AGP, so remove that here. 1367f95df9caSAlex Deucher */ 1368f95df9caSAlex Deucher if ((rdev->family >= CHIP_RS400) && 1369f95df9caSAlex Deucher (rdev->flags & RADEON_IS_IGP)) { 1370f95df9caSAlex Deucher rdev->flags &= ~RADEON_IS_AGP; 1371f95df9caSAlex Deucher } 1372f95df9caSAlex Deucher 137330256a3fSJerome Glisse if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) { 1374b574f251SJerome Glisse radeon_agp_disable(rdev); 1375771fe6b9SJerome Glisse } 1376771fe6b9SJerome Glisse 13779ed8b1f9SAlex Deucher /* Set the internal MC address mask 13789ed8b1f9SAlex Deucher * This is the max address of the GPU's 13799ed8b1f9SAlex Deucher * internal address space. 13809ed8b1f9SAlex Deucher */ 13819ed8b1f9SAlex Deucher if (rdev->family >= CHIP_CAYMAN) 13829ed8b1f9SAlex Deucher rdev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */ 13839ed8b1f9SAlex Deucher else if (rdev->family >= CHIP_CEDAR) 13849ed8b1f9SAlex Deucher rdev->mc.mc_mask = 0xfffffffffULL; /* 36 bit MC */ 13859ed8b1f9SAlex Deucher else 13869ed8b1f9SAlex Deucher rdev->mc.mc_mask = 0xffffffffULL; /* 32 bit MC */ 13879ed8b1f9SAlex Deucher 1388ad49f501SDave Airlie /* set DMA mask + need_dma32 flags. 1389ad49f501SDave Airlie * PCIE - can handle 40-bits. 1390005a83f1SAlex Deucher * IGP - can handle 40-bits 1391ad49f501SDave Airlie * AGP - generally dma32 is safest 1392005a83f1SAlex Deucher * PCI - dma32 for legacy pci gart, 40 bits on newer asics 1393ad49f501SDave Airlie */ 1394ad49f501SDave Airlie rdev->need_dma32 = false; 1395ad49f501SDave Airlie if (rdev->flags & RADEON_IS_AGP) 1396ad49f501SDave Airlie rdev->need_dma32 = true; 1397005a83f1SAlex Deucher if ((rdev->flags & RADEON_IS_PCI) && 13984a2b6662SJerome Glisse (rdev->family <= CHIP_RS740)) 1399ad49f501SDave Airlie rdev->need_dma32 = true; 1400ad49f501SDave Airlie 1401ad49f501SDave Airlie dma_bits = rdev->need_dma32 ? 32 : 40; 1402ad49f501SDave Airlie r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits)); 1403771fe6b9SJerome Glisse if (r) { 140462fff811SDaniel Haid rdev->need_dma32 = true; 1405c52494f6SKonrad Rzeszutek Wilk dma_bits = 32; 14067ca85295SJoe Perches pr_warn("radeon: No suitable DMA available\n"); 1407771fe6b9SJerome Glisse } 1408c52494f6SKonrad Rzeszutek Wilk r = pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits)); 1409c52494f6SKonrad Rzeszutek Wilk if (r) { 1410c52494f6SKonrad Rzeszutek Wilk pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32)); 14117ca85295SJoe Perches pr_warn("radeon: No coherent DMA available\n"); 1412c52494f6SKonrad Rzeszutek Wilk } 1413771fe6b9SJerome Glisse 1414771fe6b9SJerome Glisse /* Registers mapping */ 1415771fe6b9SJerome Glisse /* TODO: block userspace mapping of io register */ 14162c385151SDaniel Vetter spin_lock_init(&rdev->mmio_idx_lock); 1417fe78118cSAlex Deucher spin_lock_init(&rdev->smc_idx_lock); 14180a5b7b0bSAlex Deucher spin_lock_init(&rdev->pll_idx_lock); 14190a5b7b0bSAlex Deucher spin_lock_init(&rdev->mc_idx_lock); 14200a5b7b0bSAlex Deucher spin_lock_init(&rdev->pcie_idx_lock); 14210a5b7b0bSAlex Deucher spin_lock_init(&rdev->pciep_idx_lock); 14220a5b7b0bSAlex Deucher spin_lock_init(&rdev->pif_idx_lock); 14230a5b7b0bSAlex Deucher spin_lock_init(&rdev->cg_idx_lock); 14240a5b7b0bSAlex Deucher spin_lock_init(&rdev->uvd_idx_lock); 14250a5b7b0bSAlex Deucher spin_lock_init(&rdev->rcu_idx_lock); 14260a5b7b0bSAlex Deucher spin_lock_init(&rdev->didt_idx_lock); 14270a5b7b0bSAlex Deucher spin_lock_init(&rdev->end_idx_lock); 1428efad86dbSAlex Deucher if (rdev->family >= CHIP_BONAIRE) { 1429efad86dbSAlex Deucher rdev->rmmio_base = pci_resource_start(rdev->pdev, 5); 1430efad86dbSAlex Deucher rdev->rmmio_size = pci_resource_len(rdev->pdev, 5); 1431efad86dbSAlex Deucher } else { 143201d73a69SJordan Crouse rdev->rmmio_base = pci_resource_start(rdev->pdev, 2); 143301d73a69SJordan Crouse rdev->rmmio_size = pci_resource_len(rdev->pdev, 2); 1434efad86dbSAlex Deucher } 1435771fe6b9SJerome Glisse rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size); 1436a33c1a82SAndy Shevchenko if (rdev->rmmio == NULL) 1437771fe6b9SJerome Glisse return -ENOMEM; 1438771fe6b9SJerome Glisse 143975efdee1SAlex Deucher /* doorbell bar mapping */ 144075efdee1SAlex Deucher if (rdev->family >= CHIP_BONAIRE) 144175efdee1SAlex Deucher radeon_doorbell_init(rdev); 144275efdee1SAlex Deucher 1443351a52a2SAlex Deucher /* io port mapping */ 1444351a52a2SAlex Deucher for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 1445351a52a2SAlex Deucher if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) { 1446351a52a2SAlex Deucher rdev->rio_mem_size = pci_resource_len(rdev->pdev, i); 1447351a52a2SAlex Deucher rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size); 1448351a52a2SAlex Deucher break; 1449351a52a2SAlex Deucher } 1450351a52a2SAlex Deucher } 1451351a52a2SAlex Deucher if (rdev->rio_mem == NULL) 1452351a52a2SAlex Deucher DRM_ERROR("Unable to find PCI I/O BAR\n"); 1453351a52a2SAlex Deucher 14544807c5a8SAlex Deucher if (rdev->flags & RADEON_IS_PX) 14554807c5a8SAlex Deucher radeon_device_handle_px_quirks(rdev); 14564807c5a8SAlex Deucher 145728d52043SDave Airlie /* if we have > 1 VGA cards, then disable the radeon VGA resources */ 145893239ea1SDave Airlie /* this will fail for cards that aren't VGA class devices, just 145993239ea1SDave Airlie * ignore it */ 146093239ea1SDave Airlie vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode); 146110ebc0bcSDave Airlie 1462bfaddd9fSAlex Deucher if (rdev->flags & RADEON_IS_PX) 146310ebc0bcSDave Airlie runtime = true; 14647ffb0ce3SLukas Wunner if (!pci_is_thunderbolt_attached(rdev->pdev)) 14657ffb0ce3SLukas Wunner vga_switcheroo_register_client(rdev->pdev, 14667ffb0ce3SLukas Wunner &radeon_switcheroo_ops, runtime); 146710ebc0bcSDave Airlie if (runtime) 146810ebc0bcSDave Airlie vga_switcheroo_init_domain_pm_ops(rdev->dev, &rdev->vga_pm_domain); 146928d52043SDave Airlie 14703ce0a23dSJerome Glisse r = radeon_init(rdev); 1471b574f251SJerome Glisse if (r) 14722e97140dSAlex Deucher goto failed; 1473b1e3a6d1SMichel Dänzer 1474409851f4SJerome Glisse r = radeon_gem_debugfs_init(rdev); 1475409851f4SJerome Glisse if (r) { 1476409851f4SJerome Glisse DRM_ERROR("registering gem debugfs failed (%d).\n", r); 1477409851f4SJerome Glisse } 1478409851f4SJerome Glisse 14799843ead0SDave Airlie r = radeon_mst_debugfs_init(rdev); 14809843ead0SDave Airlie if (r) { 14819843ead0SDave Airlie DRM_ERROR("registering mst debugfs failed (%d).\n", r); 14829843ead0SDave Airlie } 14839843ead0SDave Airlie 1484b574f251SJerome Glisse if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) { 1485b574f251SJerome Glisse /* Acceleration not working on AGP card try again 1486b574f251SJerome Glisse * with fallback to PCI or PCIE GART 1487b574f251SJerome Glisse */ 1488a2d07b74SJerome Glisse radeon_asic_reset(rdev); 1489b574f251SJerome Glisse radeon_fini(rdev); 1490b574f251SJerome Glisse radeon_agp_disable(rdev); 1491b574f251SJerome Glisse r = radeon_init(rdev); 14924aac0473SJerome Glisse if (r) 14932e97140dSAlex Deucher goto failed; 14943ce0a23dSJerome Glisse } 14956c7bcceaSAlex Deucher 149613a7d299SChristian König r = radeon_ib_ring_tests(rdev); 149713a7d299SChristian König if (r) 149813a7d299SChristian König DRM_ERROR("ib ring test failed (%d).\n", r); 149913a7d299SChristian König 15006dfd1972SJérôme Glisse /* 15016dfd1972SJérôme Glisse * Turks/Thames GPU will freeze whole laptop if DPM is not restarted 15026dfd1972SJérôme Glisse * after the CP ring have chew one packet at least. Hence here we stop 15036dfd1972SJérôme Glisse * and restart DPM after the radeon_ib_ring_tests(). 15046dfd1972SJérôme Glisse */ 15056dfd1972SJérôme Glisse if (rdev->pm.dpm_enabled && 15066dfd1972SJérôme Glisse (rdev->pm.pm_method == PM_METHOD_DPM) && 15076dfd1972SJérôme Glisse (rdev->family == CHIP_TURKS) && 15086dfd1972SJérôme Glisse (rdev->flags & RADEON_IS_MOBILITY)) { 15096dfd1972SJérôme Glisse mutex_lock(&rdev->pm.mutex); 15106dfd1972SJérôme Glisse radeon_dpm_disable(rdev); 15116dfd1972SJérôme Glisse radeon_dpm_enable(rdev); 15126dfd1972SJérôme Glisse mutex_unlock(&rdev->pm.mutex); 15136dfd1972SJérôme Glisse } 15146dfd1972SJérôme Glisse 151560a7e396SChristian König if ((radeon_testing & 1)) { 15164a1132a0SAlex Deucher if (rdev->accel_working) 1517ecc0b326SMichel Dänzer radeon_test_moves(rdev); 15184a1132a0SAlex Deucher else 15194a1132a0SAlex Deucher DRM_INFO("radeon: acceleration disabled, skipping move tests\n"); 1520ecc0b326SMichel Dänzer } 152160a7e396SChristian König if ((radeon_testing & 2)) { 15224a1132a0SAlex Deucher if (rdev->accel_working) 152360a7e396SChristian König radeon_test_syncing(rdev); 15244a1132a0SAlex Deucher else 15254a1132a0SAlex Deucher DRM_INFO("radeon: acceleration disabled, skipping sync tests\n"); 152660a7e396SChristian König } 1527771fe6b9SJerome Glisse if (radeon_benchmarking) { 15284a1132a0SAlex Deucher if (rdev->accel_working) 1529638dd7dbSIlija Hadzic radeon_benchmark(rdev, radeon_benchmarking); 15304a1132a0SAlex Deucher else 15314a1132a0SAlex Deucher DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n"); 1532771fe6b9SJerome Glisse } 15336cf8a3f5SJerome Glisse return 0; 15342e97140dSAlex Deucher 15352e97140dSAlex Deucher failed: 1536b8751946SLukas Wunner /* balance pm_runtime_get_sync() in radeon_driver_unload_kms() */ 1537b8751946SLukas Wunner if (radeon_is_px(ddev)) 1538b8751946SLukas Wunner pm_runtime_put_noidle(ddev->dev); 15392e97140dSAlex Deucher if (runtime) 15402e97140dSAlex Deucher vga_switcheroo_fini_domain_pm_ops(rdev->dev); 15412e97140dSAlex Deucher return r; 1542771fe6b9SJerome Glisse } 1543771fe6b9SJerome Glisse 15440c195119SAlex Deucher /** 15450c195119SAlex Deucher * radeon_device_fini - tear down the driver 15460c195119SAlex Deucher * 15470c195119SAlex Deucher * @rdev: radeon_device pointer 15480c195119SAlex Deucher * 15490c195119SAlex Deucher * Tear down the driver info (all asics). 15500c195119SAlex Deucher * Called at driver shutdown. 15510c195119SAlex Deucher */ 1552771fe6b9SJerome Glisse void radeon_device_fini(struct radeon_device *rdev) 1553771fe6b9SJerome Glisse { 1554771fe6b9SJerome Glisse DRM_INFO("radeon: finishing device.\n"); 1555771fe6b9SJerome Glisse rdev->shutdown = true; 155690aca4d2SJerome Glisse /* evict vram memory */ 155790aca4d2SJerome Glisse radeon_bo_evict_vram(rdev); 15583ce0a23dSJerome Glisse radeon_fini(rdev); 15597ffb0ce3SLukas Wunner if (!pci_is_thunderbolt_attached(rdev->pdev)) 15606a9ee8afSDave Airlie vga_switcheroo_unregister_client(rdev->pdev); 15612e97140dSAlex Deucher if (rdev->flags & RADEON_IS_PX) 15622e97140dSAlex Deucher vga_switcheroo_fini_domain_pm_ops(rdev->dev); 1563c1176d6fSDave Airlie vga_client_register(rdev->pdev, NULL, NULL, NULL); 1564e0a2ca73SAlex Deucher if (rdev->rio_mem) 1565351a52a2SAlex Deucher pci_iounmap(rdev->pdev, rdev->rio_mem); 1566351a52a2SAlex Deucher rdev->rio_mem = NULL; 1567771fe6b9SJerome Glisse iounmap(rdev->rmmio); 1568771fe6b9SJerome Glisse rdev->rmmio = NULL; 156975efdee1SAlex Deucher if (rdev->family >= CHIP_BONAIRE) 157075efdee1SAlex Deucher radeon_doorbell_fini(rdev); 1571771fe6b9SJerome Glisse } 1572771fe6b9SJerome Glisse 1573771fe6b9SJerome Glisse 1574771fe6b9SJerome Glisse /* 1575771fe6b9SJerome Glisse * Suspend & resume. 1576771fe6b9SJerome Glisse */ 15770c195119SAlex Deucher /** 15780c195119SAlex Deucher * radeon_suspend_kms - initiate device suspend 15790c195119SAlex Deucher * 15800c195119SAlex Deucher * @pdev: drm dev pointer 15810c195119SAlex Deucher * @state: suspend state 15820c195119SAlex Deucher * 15830c195119SAlex Deucher * Puts the hw in the suspend state (all asics). 15840c195119SAlex Deucher * Returns 0 for success or an error on failure. 15850c195119SAlex Deucher * Called at driver suspend. 15860c195119SAlex Deucher */ 1587274ad65cSJérome Glisse int radeon_suspend_kms(struct drm_device *dev, bool suspend, 1588274ad65cSJérome Glisse bool fbcon, bool freeze) 1589771fe6b9SJerome Glisse { 1590875c1866SDarren Jenkins struct radeon_device *rdev; 1591771fe6b9SJerome Glisse struct drm_crtc *crtc; 1592d8dcaa1dSAlex Deucher struct drm_connector *connector; 15937465280cSAlex Deucher int i, r; 1594771fe6b9SJerome Glisse 1595875c1866SDarren Jenkins if (dev == NULL || dev->dev_private == NULL) { 1596771fe6b9SJerome Glisse return -ENODEV; 1597771fe6b9SJerome Glisse } 15987473e830SDave Airlie 1599875c1866SDarren Jenkins rdev = dev->dev_private; 1600875c1866SDarren Jenkins 1601f2aba352SAlex Deucher if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 16026a9ee8afSDave Airlie return 0; 1603d8dcaa1dSAlex Deucher 160486698c20SSeth Forshee drm_kms_helper_poll_disable(dev); 160586698c20SSeth Forshee 16066adaed5bSDaniel Vetter drm_modeset_lock_all(dev); 1607d8dcaa1dSAlex Deucher /* turn off display hw */ 1608d8dcaa1dSAlex Deucher list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 1609d8dcaa1dSAlex Deucher drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); 1610d8dcaa1dSAlex Deucher } 16116adaed5bSDaniel Vetter drm_modeset_unlock_all(dev); 1612d8dcaa1dSAlex Deucher 1613f3cbb17bSGrigori Goronzy /* unpin the front buffers and cursors */ 1614771fe6b9SJerome Glisse list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 1615f3cbb17bSGrigori Goronzy struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 1616f4510a27SMatt Roper struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->primary->fb); 16174c788679SJerome Glisse struct radeon_bo *robj; 1618771fe6b9SJerome Glisse 1619f3cbb17bSGrigori Goronzy if (radeon_crtc->cursor_bo) { 1620f3cbb17bSGrigori Goronzy struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo); 1621f3cbb17bSGrigori Goronzy r = radeon_bo_reserve(robj, false); 1622f3cbb17bSGrigori Goronzy if (r == 0) { 1623f3cbb17bSGrigori Goronzy radeon_bo_unpin(robj); 1624f3cbb17bSGrigori Goronzy radeon_bo_unreserve(robj); 1625f3cbb17bSGrigori Goronzy } 1626f3cbb17bSGrigori Goronzy } 1627f3cbb17bSGrigori Goronzy 1628771fe6b9SJerome Glisse if (rfb == NULL || rfb->obj == NULL) { 1629771fe6b9SJerome Glisse continue; 1630771fe6b9SJerome Glisse } 16317e4d15d9SDaniel Vetter robj = gem_to_radeon_bo(rfb->obj); 163238651674SDave Airlie /* don't unpin kernel fb objects */ 163338651674SDave Airlie if (!radeon_fbdev_robj_is_fb(rdev, robj)) { 16344c788679SJerome Glisse r = radeon_bo_reserve(robj, false); 163538651674SDave Airlie if (r == 0) { 16364c788679SJerome Glisse radeon_bo_unpin(robj); 16374c788679SJerome Glisse radeon_bo_unreserve(robj); 16384c788679SJerome Glisse } 1639771fe6b9SJerome Glisse } 1640771fe6b9SJerome Glisse } 1641771fe6b9SJerome Glisse /* evict vram memory */ 16424c788679SJerome Glisse radeon_bo_evict_vram(rdev); 16438a47cc9eSChristian König 1644771fe6b9SJerome Glisse /* wait for gpu to finish processing current batch */ 16455f8f635eSJerome Glisse for (i = 0; i < RADEON_NUM_RINGS; i++) { 164637615527SChristian König r = radeon_fence_wait_empty(rdev, i); 16475f8f635eSJerome Glisse if (r) { 16485f8f635eSJerome Glisse /* delay GPU reset to resume */ 1649eb98c709SChristian König radeon_fence_driver_force_completion(rdev, i); 16505f8f635eSJerome Glisse } 16515f8f635eSJerome Glisse } 1652771fe6b9SJerome Glisse 1653f657c2a7SYang Zhao radeon_save_bios_scratch_regs(rdev); 1654f657c2a7SYang Zhao 16553ce0a23dSJerome Glisse radeon_suspend(rdev); 1656d4877cf2SAlex Deucher radeon_hpd_fini(rdev); 1657ec9aaaffSAlex Deucher /* evict remaining vram memory 1658ec9aaaffSAlex Deucher * This second call to evict vram is to evict the gart page table 1659ec9aaaffSAlex Deucher * using the CPU. 1660ec9aaaffSAlex Deucher */ 16614c788679SJerome Glisse radeon_bo_evict_vram(rdev); 1662771fe6b9SJerome Glisse 166310b06122SJerome Glisse radeon_agp_suspend(rdev); 166410b06122SJerome Glisse 1665771fe6b9SJerome Glisse pci_save_state(dev->pdev); 1666*82060854SAlex Deucher if (freeze && rdev->family >= CHIP_CEDAR && !(rdev->flags & RADEON_IS_IGP)) { 1667274ad65cSJérome Glisse rdev->asic->asic_reset(rdev, true); 1668274ad65cSJérome Glisse pci_restore_state(dev->pdev); 1669274ad65cSJérome Glisse } else if (suspend) { 1670771fe6b9SJerome Glisse /* Shut down the device */ 1671771fe6b9SJerome Glisse pci_disable_device(dev->pdev); 1672771fe6b9SJerome Glisse pci_set_power_state(dev->pdev, PCI_D3hot); 1673771fe6b9SJerome Glisse } 167410ebc0bcSDave Airlie 167510ebc0bcSDave Airlie if (fbcon) { 1676ac751efaSTorben Hohn console_lock(); 167738651674SDave Airlie radeon_fbdev_set_suspend(rdev, 1); 1678ac751efaSTorben Hohn console_unlock(); 167910ebc0bcSDave Airlie } 1680771fe6b9SJerome Glisse return 0; 1681771fe6b9SJerome Glisse } 1682771fe6b9SJerome Glisse 16830c195119SAlex Deucher /** 16840c195119SAlex Deucher * radeon_resume_kms - initiate device resume 16850c195119SAlex Deucher * 16860c195119SAlex Deucher * @pdev: drm dev pointer 16870c195119SAlex Deucher * 16880c195119SAlex Deucher * Bring the hw back to operating state (all asics). 16890c195119SAlex Deucher * Returns 0 for success or an error on failure. 16900c195119SAlex Deucher * Called at driver resume. 16910c195119SAlex Deucher */ 169210ebc0bcSDave Airlie int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon) 1693771fe6b9SJerome Glisse { 169409bdf591SCedric Godin struct drm_connector *connector; 1695771fe6b9SJerome Glisse struct radeon_device *rdev = dev->dev_private; 1696f3cbb17bSGrigori Goronzy struct drm_crtc *crtc; 169704eb2206SChristian König int r; 1698771fe6b9SJerome Glisse 1699f2aba352SAlex Deucher if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 17006a9ee8afSDave Airlie return 0; 17016a9ee8afSDave Airlie 170210ebc0bcSDave Airlie if (fbcon) { 1703ac751efaSTorben Hohn console_lock(); 170410ebc0bcSDave Airlie } 17057473e830SDave Airlie if (resume) { 1706771fe6b9SJerome Glisse pci_set_power_state(dev->pdev, PCI_D0); 1707771fe6b9SJerome Glisse pci_restore_state(dev->pdev); 1708771fe6b9SJerome Glisse if (pci_enable_device(dev->pdev)) { 170910ebc0bcSDave Airlie if (fbcon) 1710ac751efaSTorben Hohn console_unlock(); 1711771fe6b9SJerome Glisse return -1; 1712771fe6b9SJerome Glisse } 17137473e830SDave Airlie } 17140ebf1717SDave Airlie /* resume AGP if in use */ 17150ebf1717SDave Airlie radeon_agp_resume(rdev); 17163ce0a23dSJerome Glisse radeon_resume(rdev); 171704eb2206SChristian König 171804eb2206SChristian König r = radeon_ib_ring_tests(rdev); 171904eb2206SChristian König if (r) 172004eb2206SChristian König DRM_ERROR("ib ring test failed (%d).\n", r); 172104eb2206SChristian König 1722bc6a6295SAlex Deucher if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { 17236c7bcceaSAlex Deucher /* do dpm late init */ 17246c7bcceaSAlex Deucher r = radeon_pm_late_init(rdev); 17256c7bcceaSAlex Deucher if (r) { 17266c7bcceaSAlex Deucher rdev->pm.dpm_enabled = false; 17276c7bcceaSAlex Deucher DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n"); 17286c7bcceaSAlex Deucher } 1729bc6a6295SAlex Deucher } else { 1730bc6a6295SAlex Deucher /* resume old pm late */ 1731bc6a6295SAlex Deucher radeon_pm_resume(rdev); 17326c7bcceaSAlex Deucher } 17336c7bcceaSAlex Deucher 1734f657c2a7SYang Zhao radeon_restore_bios_scratch_regs(rdev); 173509bdf591SCedric Godin 1736f3cbb17bSGrigori Goronzy /* pin cursors */ 1737f3cbb17bSGrigori Goronzy list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 1738f3cbb17bSGrigori Goronzy struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 1739f3cbb17bSGrigori Goronzy 1740f3cbb17bSGrigori Goronzy if (radeon_crtc->cursor_bo) { 1741f3cbb17bSGrigori Goronzy struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo); 1742f3cbb17bSGrigori Goronzy r = radeon_bo_reserve(robj, false); 1743f3cbb17bSGrigori Goronzy if (r == 0) { 1744f3cbb17bSGrigori Goronzy /* Only 27 bit offset for legacy cursor */ 1745f3cbb17bSGrigori Goronzy r = radeon_bo_pin_restricted(robj, 1746f3cbb17bSGrigori Goronzy RADEON_GEM_DOMAIN_VRAM, 1747f3cbb17bSGrigori Goronzy ASIC_IS_AVIVO(rdev) ? 1748f3cbb17bSGrigori Goronzy 0 : 1 << 27, 1749f3cbb17bSGrigori Goronzy &radeon_crtc->cursor_addr); 1750f3cbb17bSGrigori Goronzy if (r != 0) 1751f3cbb17bSGrigori Goronzy DRM_ERROR("Failed to pin cursor BO (%d)\n", r); 1752f3cbb17bSGrigori Goronzy radeon_bo_unreserve(robj); 1753f3cbb17bSGrigori Goronzy } 1754f3cbb17bSGrigori Goronzy } 1755f3cbb17bSGrigori Goronzy } 1756f3cbb17bSGrigori Goronzy 17573fa47d9eSAlex Deucher /* init dig PHYs, disp eng pll */ 17583fa47d9eSAlex Deucher if (rdev->is_atom_bios) { 1759ac89af1eSAlex Deucher radeon_atom_encoder_init(rdev); 1760f3f1f03eSAlex Deucher radeon_atom_disp_eng_pll_init(rdev); 1761bced76f2SAlex Deucher /* turn on the BL */ 1762bced76f2SAlex Deucher if (rdev->mode_info.bl_encoder) { 1763bced76f2SAlex Deucher u8 bl_level = radeon_get_backlight_level(rdev, 1764bced76f2SAlex Deucher rdev->mode_info.bl_encoder); 1765bced76f2SAlex Deucher radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder, 1766bced76f2SAlex Deucher bl_level); 1767bced76f2SAlex Deucher } 17683fa47d9eSAlex Deucher } 1769d4877cf2SAlex Deucher /* reset hpd state */ 1770d4877cf2SAlex Deucher radeon_hpd_init(rdev); 1771771fe6b9SJerome Glisse /* blat the mode back in */ 1772ec9954fcSDave Airlie if (fbcon) { 1773771fe6b9SJerome Glisse drm_helper_resume_force_mode(dev); 1774a93f344dSAlex Deucher /* turn on display hw */ 17756adaed5bSDaniel Vetter drm_modeset_lock_all(dev); 1776a93f344dSAlex Deucher list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 1777a93f344dSAlex Deucher drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); 1778a93f344dSAlex Deucher } 17796adaed5bSDaniel Vetter drm_modeset_unlock_all(dev); 1780ec9954fcSDave Airlie } 178186698c20SSeth Forshee 178286698c20SSeth Forshee drm_kms_helper_poll_enable(dev); 178318ee37a4SDaniel Vetter 17843640da2fSAlex Deucher /* set the power state here in case we are a PX system or headless */ 17853640da2fSAlex Deucher if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) 17863640da2fSAlex Deucher radeon_pm_compute_clocks(rdev); 17873640da2fSAlex Deucher 178818ee37a4SDaniel Vetter if (fbcon) { 178918ee37a4SDaniel Vetter radeon_fbdev_set_suspend(rdev, 0); 179018ee37a4SDaniel Vetter console_unlock(); 179118ee37a4SDaniel Vetter } 179218ee37a4SDaniel Vetter 1793771fe6b9SJerome Glisse return 0; 1794771fe6b9SJerome Glisse } 1795771fe6b9SJerome Glisse 17960c195119SAlex Deucher /** 17970c195119SAlex Deucher * radeon_gpu_reset - reset the asic 17980c195119SAlex Deucher * 17990c195119SAlex Deucher * @rdev: radeon device pointer 18000c195119SAlex Deucher * 18010c195119SAlex Deucher * Attempt the reset the GPU if it has hung (all asics). 18020c195119SAlex Deucher * Returns 0 for success or an error on failure. 18030c195119SAlex Deucher */ 180490aca4d2SJerome Glisse int radeon_gpu_reset(struct radeon_device *rdev) 180590aca4d2SJerome Glisse { 180655d7c221SChristian König unsigned ring_sizes[RADEON_NUM_RINGS]; 180755d7c221SChristian König uint32_t *ring_data[RADEON_NUM_RINGS]; 180855d7c221SChristian König 180955d7c221SChristian König bool saved = false; 181055d7c221SChristian König 181155d7c221SChristian König int i, r; 18128fd1b84cSDave Airlie int resched; 181390aca4d2SJerome Glisse 1814dee53e7fSJerome Glisse down_write(&rdev->exclusive_lock); 1815f9eaf9aeSChristian König 1816f9eaf9aeSChristian König if (!rdev->needs_reset) { 1817f9eaf9aeSChristian König up_write(&rdev->exclusive_lock); 1818f9eaf9aeSChristian König return 0; 1819f9eaf9aeSChristian König } 1820f9eaf9aeSChristian König 182172b9076bSMarek Olšák atomic_inc(&rdev->gpu_reset_counter); 182272b9076bSMarek Olšák 182390aca4d2SJerome Glisse radeon_save_bios_scratch_regs(rdev); 18248fd1b84cSDave Airlie /* block TTM */ 18258fd1b84cSDave Airlie resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); 182690aca4d2SJerome Glisse radeon_suspend(rdev); 182773ef0e0dSAlex Deucher radeon_hpd_fini(rdev); 182890aca4d2SJerome Glisse 182955d7c221SChristian König for (i = 0; i < RADEON_NUM_RINGS; ++i) { 183055d7c221SChristian König ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i], 183155d7c221SChristian König &ring_data[i]); 183255d7c221SChristian König if (ring_sizes[i]) { 183355d7c221SChristian König saved = true; 183455d7c221SChristian König dev_info(rdev->dev, "Saved %d dwords of commands " 183555d7c221SChristian König "on ring %d.\n", ring_sizes[i], i); 183655d7c221SChristian König } 183755d7c221SChristian König } 183855d7c221SChristian König 183990aca4d2SJerome Glisse r = radeon_asic_reset(rdev); 184090aca4d2SJerome Glisse if (!r) { 184155d7c221SChristian König dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n"); 184290aca4d2SJerome Glisse radeon_resume(rdev); 184355d7c221SChristian König } 184404eb2206SChristian König 184590aca4d2SJerome Glisse radeon_restore_bios_scratch_regs(rdev); 184655d7c221SChristian König 184755d7c221SChristian König for (i = 0; i < RADEON_NUM_RINGS; ++i) { 18489bb39ff4SMaarten Lankhorst if (!r && ring_data[i]) { 184955d7c221SChristian König radeon_ring_restore(rdev, &rdev->ring[i], 185055d7c221SChristian König ring_sizes[i], ring_data[i]); 185155d7c221SChristian König } else { 1852eb98c709SChristian König radeon_fence_driver_force_completion(rdev, i); 185355d7c221SChristian König kfree(ring_data[i]); 185455d7c221SChristian König } 185555d7c221SChristian König } 185655d7c221SChristian König 1857c940b447SAlex Deucher if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { 1858c940b447SAlex Deucher /* do dpm late init */ 1859c940b447SAlex Deucher r = radeon_pm_late_init(rdev); 1860c940b447SAlex Deucher if (r) { 1861c940b447SAlex Deucher rdev->pm.dpm_enabled = false; 1862c940b447SAlex Deucher DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n"); 1863c940b447SAlex Deucher } 1864c940b447SAlex Deucher } else { 1865c940b447SAlex Deucher /* resume old pm late */ 186695f59509SAlex Deucher radeon_pm_resume(rdev); 1867c940b447SAlex Deucher } 1868c940b447SAlex Deucher 186973ef0e0dSAlex Deucher /* init dig PHYs, disp eng pll */ 187073ef0e0dSAlex Deucher if (rdev->is_atom_bios) { 187173ef0e0dSAlex Deucher radeon_atom_encoder_init(rdev); 187273ef0e0dSAlex Deucher radeon_atom_disp_eng_pll_init(rdev); 187373ef0e0dSAlex Deucher /* turn on the BL */ 187473ef0e0dSAlex Deucher if (rdev->mode_info.bl_encoder) { 187573ef0e0dSAlex Deucher u8 bl_level = radeon_get_backlight_level(rdev, 187673ef0e0dSAlex Deucher rdev->mode_info.bl_encoder); 187773ef0e0dSAlex Deucher radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder, 187873ef0e0dSAlex Deucher bl_level); 187973ef0e0dSAlex Deucher } 188073ef0e0dSAlex Deucher } 188173ef0e0dSAlex Deucher /* reset hpd state */ 188273ef0e0dSAlex Deucher radeon_hpd_init(rdev); 188373ef0e0dSAlex Deucher 18849bb39ff4SMaarten Lankhorst ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); 18853c036389SChristian König 18863c036389SChristian König rdev->in_reset = true; 18873c036389SChristian König rdev->needs_reset = false; 18883c036389SChristian König 18899bb39ff4SMaarten Lankhorst downgrade_write(&rdev->exclusive_lock); 18909bb39ff4SMaarten Lankhorst 1891d3493574SJerome Glisse drm_helper_resume_force_mode(rdev->ddev); 1892d3493574SJerome Glisse 1893c940b447SAlex Deucher /* set the power state here in case we are a PX system or headless */ 1894c940b447SAlex Deucher if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) 1895c940b447SAlex Deucher radeon_pm_compute_clocks(rdev); 1896c940b447SAlex Deucher 18979bb39ff4SMaarten Lankhorst if (!r) { 18989bb39ff4SMaarten Lankhorst r = radeon_ib_ring_tests(rdev); 18999bb39ff4SMaarten Lankhorst if (r && saved) 19009bb39ff4SMaarten Lankhorst r = -EAGAIN; 19019bb39ff4SMaarten Lankhorst } else { 190290aca4d2SJerome Glisse /* bad news, how to tell it to userspace ? */ 190390aca4d2SJerome Glisse dev_info(rdev->dev, "GPU reset failed\n"); 19047a1619b9SMichel Dänzer } 19057a1619b9SMichel Dänzer 19069bb39ff4SMaarten Lankhorst rdev->needs_reset = r == -EAGAIN; 19079bb39ff4SMaarten Lankhorst rdev->in_reset = false; 19089bb39ff4SMaarten Lankhorst 19099bb39ff4SMaarten Lankhorst up_read(&rdev->exclusive_lock); 191090aca4d2SJerome Glisse return r; 191190aca4d2SJerome Glisse } 191290aca4d2SJerome Glisse 1913771fe6b9SJerome Glisse 1914771fe6b9SJerome Glisse /* 1915771fe6b9SJerome Glisse * Debugfs 1916771fe6b9SJerome Glisse */ 1917771fe6b9SJerome Glisse int radeon_debugfs_add_files(struct radeon_device *rdev, 1918771fe6b9SJerome Glisse struct drm_info_list *files, 1919771fe6b9SJerome Glisse unsigned nfiles) 1920771fe6b9SJerome Glisse { 1921771fe6b9SJerome Glisse unsigned i; 1922771fe6b9SJerome Glisse 19234d8bf9aeSChristian König for (i = 0; i < rdev->debugfs_count; i++) { 19244d8bf9aeSChristian König if (rdev->debugfs[i].files == files) { 1925771fe6b9SJerome Glisse /* Already registered */ 1926771fe6b9SJerome Glisse return 0; 1927771fe6b9SJerome Glisse } 1928771fe6b9SJerome Glisse } 1929c245cb9eSMichael Witten 19304d8bf9aeSChristian König i = rdev->debugfs_count + 1; 1931c245cb9eSMichael Witten if (i > RADEON_DEBUGFS_MAX_COMPONENTS) { 1932c245cb9eSMichael Witten DRM_ERROR("Reached maximum number of debugfs components.\n"); 1933c245cb9eSMichael Witten DRM_ERROR("Report so we increase " 1934c245cb9eSMichael Witten "RADEON_DEBUGFS_MAX_COMPONENTS.\n"); 1935771fe6b9SJerome Glisse return -EINVAL; 1936771fe6b9SJerome Glisse } 19374d8bf9aeSChristian König rdev->debugfs[rdev->debugfs_count].files = files; 19384d8bf9aeSChristian König rdev->debugfs[rdev->debugfs_count].num_files = nfiles; 19394d8bf9aeSChristian König rdev->debugfs_count = i; 1940771fe6b9SJerome Glisse #if defined(CONFIG_DEBUG_FS) 1941771fe6b9SJerome Glisse drm_debugfs_create_files(files, nfiles, 1942771fe6b9SJerome Glisse rdev->ddev->primary->debugfs_root, 1943771fe6b9SJerome Glisse rdev->ddev->primary); 1944771fe6b9SJerome Glisse #endif 1945771fe6b9SJerome Glisse return 0; 1946771fe6b9SJerome Glisse } 1947