1771fe6b9SJerome Glisse /* 2771fe6b9SJerome Glisse * Copyright 2008 Advanced Micro Devices, Inc. 3771fe6b9SJerome Glisse * Copyright 2008 Red Hat Inc. 4771fe6b9SJerome Glisse * Copyright 2009 Jerome Glisse. 5771fe6b9SJerome Glisse * 6771fe6b9SJerome Glisse * Permission is hereby granted, free of charge, to any person obtaining a 7771fe6b9SJerome Glisse * copy of this software and associated documentation files (the "Software"), 8771fe6b9SJerome Glisse * to deal in the Software without restriction, including without limitation 9771fe6b9SJerome Glisse * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10771fe6b9SJerome Glisse * and/or sell copies of the Software, and to permit persons to whom the 11771fe6b9SJerome Glisse * Software is furnished to do so, subject to the following conditions: 12771fe6b9SJerome Glisse * 13771fe6b9SJerome Glisse * The above copyright notice and this permission notice shall be included in 14771fe6b9SJerome Glisse * all copies or substantial portions of the Software. 15771fe6b9SJerome Glisse * 16771fe6b9SJerome Glisse * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17771fe6b9SJerome Glisse * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18771fe6b9SJerome Glisse * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19771fe6b9SJerome Glisse * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20771fe6b9SJerome Glisse * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21771fe6b9SJerome Glisse * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22771fe6b9SJerome Glisse * OTHER DEALINGS IN THE SOFTWARE. 23771fe6b9SJerome Glisse * 24771fe6b9SJerome Glisse * Authors: Dave Airlie 25771fe6b9SJerome Glisse * Alex Deucher 26771fe6b9SJerome Glisse * Jerome Glisse 27771fe6b9SJerome Glisse */ 28f9183127SSam Ravnborg 29771fe6b9SJerome Glisse #include <linux/console.h> 30bcc65fd8SMatthew Garrett #include <linux/efi.h> 312ef79416SThomas Zimmermann #include <linux/pci.h> 32f9183127SSam Ravnborg #include <linux/pm_runtime.h> 33f9183127SSam Ravnborg #include <linux/slab.h> 34f9183127SSam Ravnborg #include <linux/vga_switcheroo.h> 35f9183127SSam Ravnborg #include <linux/vgaarb.h> 36f9183127SSam Ravnborg 37f9183127SSam Ravnborg #include <drm/drm_cache.h> 38f9183127SSam Ravnborg #include <drm/drm_crtc_helper.h> 39f9183127SSam Ravnborg #include <drm/drm_device.h> 40f9183127SSam Ravnborg #include <drm/drm_file.h> 41f9183127SSam Ravnborg #include <drm/drm_probe_helper.h> 42f9183127SSam Ravnborg #include <drm/radeon_drm.h> 43f9183127SSam Ravnborg 442aa3b7c8SLee Jones #include "radeon_device.h" 45771fe6b9SJerome Glisse #include "radeon_reg.h" 46771fe6b9SJerome Glisse #include "radeon.h" 47771fe6b9SJerome Glisse #include "atom.h" 48771fe6b9SJerome Glisse 491b5331d9SJerome Glisse static const char radeon_family_name[][16] = { 501b5331d9SJerome Glisse "R100", 511b5331d9SJerome Glisse "RV100", 521b5331d9SJerome Glisse "RS100", 531b5331d9SJerome Glisse "RV200", 541b5331d9SJerome Glisse "RS200", 551b5331d9SJerome Glisse "R200", 561b5331d9SJerome Glisse "RV250", 571b5331d9SJerome Glisse "RS300", 581b5331d9SJerome Glisse "RV280", 591b5331d9SJerome Glisse "R300", 601b5331d9SJerome Glisse "R350", 611b5331d9SJerome Glisse "RV350", 621b5331d9SJerome Glisse "RV380", 631b5331d9SJerome Glisse "R420", 641b5331d9SJerome Glisse "R423", 651b5331d9SJerome Glisse "RV410", 661b5331d9SJerome Glisse "RS400", 671b5331d9SJerome Glisse "RS480", 681b5331d9SJerome Glisse "RS600", 691b5331d9SJerome Glisse "RS690", 701b5331d9SJerome Glisse "RS740", 711b5331d9SJerome Glisse "RV515", 721b5331d9SJerome Glisse "R520", 731b5331d9SJerome Glisse "RV530", 741b5331d9SJerome Glisse "RV560", 751b5331d9SJerome Glisse "RV570", 761b5331d9SJerome Glisse "R580", 771b5331d9SJerome Glisse "R600", 781b5331d9SJerome Glisse "RV610", 791b5331d9SJerome Glisse "RV630", 801b5331d9SJerome Glisse "RV670", 811b5331d9SJerome Glisse "RV620", 821b5331d9SJerome Glisse "RV635", 831b5331d9SJerome Glisse "RS780", 841b5331d9SJerome Glisse "RS880", 851b5331d9SJerome Glisse "RV770", 861b5331d9SJerome Glisse "RV730", 871b5331d9SJerome Glisse "RV710", 881b5331d9SJerome Glisse "RV740", 891b5331d9SJerome Glisse "CEDAR", 901b5331d9SJerome Glisse "REDWOOD", 911b5331d9SJerome Glisse "JUNIPER", 921b5331d9SJerome Glisse "CYPRESS", 931b5331d9SJerome Glisse "HEMLOCK", 94b08ebe7eSAlex Deucher "PALM", 954df64e65SAlex Deucher "SUMO", 964df64e65SAlex Deucher "SUMO2", 971fe18305SAlex Deucher "BARTS", 981fe18305SAlex Deucher "TURKS", 991fe18305SAlex Deucher "CAICOS", 100b7cfc9feSAlex Deucher "CAYMAN", 1018848f759SAlex Deucher "ARUBA", 102cb28bb34SAlex Deucher "TAHITI", 103cb28bb34SAlex Deucher "PITCAIRN", 104cb28bb34SAlex Deucher "VERDE", 105624d3524SAlex Deucher "OLAND", 106b5d9d726SAlex Deucher "HAINAN", 1076eac752eSAlex Deucher "BONAIRE", 1086eac752eSAlex Deucher "KAVERI", 1096eac752eSAlex Deucher "KABINI", 1103bf599e8SAlex Deucher "HAWAII", 111b0a9f22aSSamuel Li "MULLINS", 1121b5331d9SJerome Glisse "LAST", 1131b5331d9SJerome Glisse }; 1141b5331d9SJerome Glisse 115066f1f0bSAlex Deucher #if defined(CONFIG_VGA_SWITCHEROO) 116066f1f0bSAlex Deucher bool radeon_has_atpx_dgpu_power_cntl(void); 117066f1f0bSAlex Deucher bool radeon_is_atpx_hybrid(void); 118066f1f0bSAlex Deucher #else 119066f1f0bSAlex Deucher static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; } 120066f1f0bSAlex Deucher static inline bool radeon_is_atpx_hybrid(void) { return false; } 121066f1f0bSAlex Deucher #endif 122066f1f0bSAlex Deucher 1234807c5a8SAlex Deucher #define RADEON_PX_QUIRK_DISABLE_PX (1 << 0) 1244807c5a8SAlex Deucher 1254807c5a8SAlex Deucher struct radeon_px_quirk { 1264807c5a8SAlex Deucher u32 chip_vendor; 1274807c5a8SAlex Deucher u32 chip_device; 1284807c5a8SAlex Deucher u32 subsys_vendor; 1294807c5a8SAlex Deucher u32 subsys_device; 1304807c5a8SAlex Deucher u32 px_quirk_flags; 1314807c5a8SAlex Deucher }; 1324807c5a8SAlex Deucher 1334807c5a8SAlex Deucher static struct radeon_px_quirk radeon_px_quirk_list[] = { 1344807c5a8SAlex Deucher /* Acer aspire 5560g (CPU: AMD A4-3305M; GPU: AMD Radeon HD 6480g + 7470m) 1354807c5a8SAlex Deucher * https://bugzilla.kernel.org/show_bug.cgi?id=74551 1364807c5a8SAlex Deucher */ 1374807c5a8SAlex Deucher { PCI_VENDOR_ID_ATI, 0x6760, 0x1025, 0x0672, RADEON_PX_QUIRK_DISABLE_PX }, 1384807c5a8SAlex Deucher /* Asus K73TA laptop with AMD A6-3400M APU and Radeon 6550 GPU 1394807c5a8SAlex Deucher * https://bugzilla.kernel.org/show_bug.cgi?id=51381 1404807c5a8SAlex Deucher */ 1414807c5a8SAlex Deucher { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x108c, RADEON_PX_QUIRK_DISABLE_PX }, 142ff1b1294SAlex Deucher /* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU 143ff1b1294SAlex Deucher * https://bugzilla.kernel.org/show_bug.cgi?id=51381 144ff1b1294SAlex Deucher */ 145ff1b1294SAlex Deucher { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX }, 1464eb59793SAlex Deucher /* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU 1474eb59793SAlex Deucher * https://bugs.freedesktop.org/show_bug.cgi?id=101491 1484eb59793SAlex Deucher */ 1494eb59793SAlex Deucher { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX }, 150eb40c86aSNico Sneck /* Asus K73TK laptop with AMD A6-3420M APU and Radeon 7670m GPU 151eb40c86aSNico Sneck * https://bugzilla.kernel.org/show_bug.cgi?id=51381#c52 152eb40c86aSNico Sneck */ 153eb40c86aSNico Sneck { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2123, RADEON_PX_QUIRK_DISABLE_PX }, 1544807c5a8SAlex Deucher { 0, 0, 0, 0, 0 }, 1554807c5a8SAlex Deucher }; 1564807c5a8SAlex Deucher 15790c4cde9SAlex Deucher bool radeon_is_px(struct drm_device *dev) 15890c4cde9SAlex Deucher { 15990c4cde9SAlex Deucher struct radeon_device *rdev = dev->dev_private; 16090c4cde9SAlex Deucher 16190c4cde9SAlex Deucher if (rdev->flags & RADEON_IS_PX) 16290c4cde9SAlex Deucher return true; 16390c4cde9SAlex Deucher return false; 16490c4cde9SAlex Deucher } 16510ebc0bcSDave Airlie 1664807c5a8SAlex Deucher static void radeon_device_handle_px_quirks(struct radeon_device *rdev) 1674807c5a8SAlex Deucher { 1684807c5a8SAlex Deucher struct radeon_px_quirk *p = radeon_px_quirk_list; 1694807c5a8SAlex Deucher 1704807c5a8SAlex Deucher /* Apply PX quirks */ 1714807c5a8SAlex Deucher while (p && p->chip_device != 0) { 1724807c5a8SAlex Deucher if (rdev->pdev->vendor == p->chip_vendor && 1734807c5a8SAlex Deucher rdev->pdev->device == p->chip_device && 1744807c5a8SAlex Deucher rdev->pdev->subsystem_vendor == p->subsys_vendor && 1754807c5a8SAlex Deucher rdev->pdev->subsystem_device == p->subsys_device) { 1764807c5a8SAlex Deucher rdev->px_quirk_flags = p->px_quirk_flags; 1774807c5a8SAlex Deucher break; 1784807c5a8SAlex Deucher } 1794807c5a8SAlex Deucher ++p; 1804807c5a8SAlex Deucher } 1814807c5a8SAlex Deucher 1824807c5a8SAlex Deucher if (rdev->px_quirk_flags & RADEON_PX_QUIRK_DISABLE_PX) 1834807c5a8SAlex Deucher rdev->flags &= ~RADEON_IS_PX; 184066f1f0bSAlex Deucher 185066f1f0bSAlex Deucher /* disable PX is the system doesn't support dGPU power control or hybrid gfx */ 186066f1f0bSAlex Deucher if (!radeon_is_atpx_hybrid() && 187066f1f0bSAlex Deucher !radeon_has_atpx_dgpu_power_cntl()) 188066f1f0bSAlex Deucher rdev->flags &= ~RADEON_IS_PX; 1894807c5a8SAlex Deucher } 1904807c5a8SAlex Deucher 1910c195119SAlex Deucher /** 1922e1b65f9SAlex Deucher * radeon_program_register_sequence - program an array of registers. 1932e1b65f9SAlex Deucher * 1942e1b65f9SAlex Deucher * @rdev: radeon_device pointer 1952e1b65f9SAlex Deucher * @registers: pointer to the register array 1962e1b65f9SAlex Deucher * @array_size: size of the register array 1972e1b65f9SAlex Deucher * 1982e1b65f9SAlex Deucher * Programs an array or registers with and and or masks. 1992e1b65f9SAlex Deucher * This is a helper for setting golden registers. 2002e1b65f9SAlex Deucher */ 2012e1b65f9SAlex Deucher void radeon_program_register_sequence(struct radeon_device *rdev, 2022e1b65f9SAlex Deucher const u32 *registers, 2032e1b65f9SAlex Deucher const u32 array_size) 2042e1b65f9SAlex Deucher { 2052e1b65f9SAlex Deucher u32 tmp, reg, and_mask, or_mask; 2062e1b65f9SAlex Deucher int i; 2072e1b65f9SAlex Deucher 2082e1b65f9SAlex Deucher if (array_size % 3) 2092e1b65f9SAlex Deucher return; 2102e1b65f9SAlex Deucher 2112e1b65f9SAlex Deucher for (i = 0; i < array_size; i +=3) { 2122e1b65f9SAlex Deucher reg = registers[i + 0]; 2132e1b65f9SAlex Deucher and_mask = registers[i + 1]; 2142e1b65f9SAlex Deucher or_mask = registers[i + 2]; 2152e1b65f9SAlex Deucher 2162e1b65f9SAlex Deucher if (and_mask == 0xffffffff) { 2172e1b65f9SAlex Deucher tmp = or_mask; 2182e1b65f9SAlex Deucher } else { 2192e1b65f9SAlex Deucher tmp = RREG32(reg); 2202e1b65f9SAlex Deucher tmp &= ~and_mask; 2212e1b65f9SAlex Deucher tmp |= or_mask; 2222e1b65f9SAlex Deucher } 2232e1b65f9SAlex Deucher WREG32(reg, tmp); 2242e1b65f9SAlex Deucher } 2252e1b65f9SAlex Deucher } 2262e1b65f9SAlex Deucher 2271a0041b8SAlex Deucher void radeon_pci_config_reset(struct radeon_device *rdev) 2281a0041b8SAlex Deucher { 2291a0041b8SAlex Deucher pci_write_config_dword(rdev->pdev, 0x7c, RADEON_ASIC_RESET_DATA); 2301a0041b8SAlex Deucher } 2311a0041b8SAlex Deucher 2322e1b65f9SAlex Deucher /** 2330c195119SAlex Deucher * radeon_surface_init - Clear GPU surface registers. 2340c195119SAlex Deucher * 2350c195119SAlex Deucher * @rdev: radeon_device pointer 2360c195119SAlex Deucher * 2370c195119SAlex Deucher * Clear GPU surface registers (r1xx-r5xx). 238b1e3a6d1SMichel Dänzer */ 2393ce0a23dSJerome Glisse void radeon_surface_init(struct radeon_device *rdev) 240b1e3a6d1SMichel Dänzer { 241b1e3a6d1SMichel Dänzer /* FIXME: check this out */ 242b1e3a6d1SMichel Dänzer if (rdev->family < CHIP_R600) { 243b1e3a6d1SMichel Dänzer int i; 244b1e3a6d1SMichel Dänzer 245550e2d92SDave Airlie for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) { 246550e2d92SDave Airlie if (rdev->surface_regs[i].bo) 247550e2d92SDave Airlie radeon_bo_get_surface_reg(rdev->surface_regs[i].bo); 248550e2d92SDave Airlie else 249550e2d92SDave Airlie radeon_clear_surface_reg(rdev, i); 250b1e3a6d1SMichel Dänzer } 251e024e110SDave Airlie /* enable surfaces */ 252e024e110SDave Airlie WREG32(RADEON_SURFACE_CNTL, 0); 253b1e3a6d1SMichel Dänzer } 254b1e3a6d1SMichel Dänzer } 255b1e3a6d1SMichel Dänzer 256b1e3a6d1SMichel Dänzer /* 257771fe6b9SJerome Glisse * GPU scratch registers helpers function. 258771fe6b9SJerome Glisse */ 2590c195119SAlex Deucher /** 2600c195119SAlex Deucher * radeon_scratch_init - Init scratch register driver information. 2610c195119SAlex Deucher * 2620c195119SAlex Deucher * @rdev: radeon_device pointer 2630c195119SAlex Deucher * 2640c195119SAlex Deucher * Init CP scratch register driver information (r1xx-r5xx) 2650c195119SAlex Deucher */ 2663ce0a23dSJerome Glisse void radeon_scratch_init(struct radeon_device *rdev) 267771fe6b9SJerome Glisse { 268771fe6b9SJerome Glisse int i; 269771fe6b9SJerome Glisse 270771fe6b9SJerome Glisse /* FIXME: check this out */ 271771fe6b9SJerome Glisse if (rdev->family < CHIP_R300) { 272771fe6b9SJerome Glisse rdev->scratch.num_reg = 5; 273771fe6b9SJerome Glisse } else { 274771fe6b9SJerome Glisse rdev->scratch.num_reg = 7; 275771fe6b9SJerome Glisse } 276724c80e1SAlex Deucher rdev->scratch.reg_base = RADEON_SCRATCH_REG0; 277771fe6b9SJerome Glisse for (i = 0; i < rdev->scratch.num_reg; i++) { 278771fe6b9SJerome Glisse rdev->scratch.free[i] = true; 279724c80e1SAlex Deucher rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4); 280771fe6b9SJerome Glisse } 281771fe6b9SJerome Glisse } 282771fe6b9SJerome Glisse 2830c195119SAlex Deucher /** 2840c195119SAlex Deucher * radeon_scratch_get - Allocate a scratch register 2850c195119SAlex Deucher * 2860c195119SAlex Deucher * @rdev: radeon_device pointer 2870c195119SAlex Deucher * @reg: scratch register mmio offset 2880c195119SAlex Deucher * 2890c195119SAlex Deucher * Allocate a CP scratch register for use by the driver (all asics). 2900c195119SAlex Deucher * Returns 0 on success or -EINVAL on failure. 2910c195119SAlex Deucher */ 292771fe6b9SJerome Glisse int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg) 293771fe6b9SJerome Glisse { 294771fe6b9SJerome Glisse int i; 295771fe6b9SJerome Glisse 296771fe6b9SJerome Glisse for (i = 0; i < rdev->scratch.num_reg; i++) { 297771fe6b9SJerome Glisse if (rdev->scratch.free[i]) { 298771fe6b9SJerome Glisse rdev->scratch.free[i] = false; 299771fe6b9SJerome Glisse *reg = rdev->scratch.reg[i]; 300771fe6b9SJerome Glisse return 0; 301771fe6b9SJerome Glisse } 302771fe6b9SJerome Glisse } 303771fe6b9SJerome Glisse return -EINVAL; 304771fe6b9SJerome Glisse } 305771fe6b9SJerome Glisse 3060c195119SAlex Deucher /** 3070c195119SAlex Deucher * radeon_scratch_free - Free a scratch register 3080c195119SAlex Deucher * 3090c195119SAlex Deucher * @rdev: radeon_device pointer 3100c195119SAlex Deucher * @reg: scratch register mmio offset 3110c195119SAlex Deucher * 3120c195119SAlex Deucher * Free a CP scratch register allocated for use by the driver (all asics) 3130c195119SAlex Deucher */ 314771fe6b9SJerome Glisse void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg) 315771fe6b9SJerome Glisse { 316771fe6b9SJerome Glisse int i; 317771fe6b9SJerome Glisse 318771fe6b9SJerome Glisse for (i = 0; i < rdev->scratch.num_reg; i++) { 319771fe6b9SJerome Glisse if (rdev->scratch.reg[i] == reg) { 320771fe6b9SJerome Glisse rdev->scratch.free[i] = true; 321771fe6b9SJerome Glisse return; 322771fe6b9SJerome Glisse } 323771fe6b9SJerome Glisse } 324771fe6b9SJerome Glisse } 325771fe6b9SJerome Glisse 3260c195119SAlex Deucher /* 32775efdee1SAlex Deucher * GPU doorbell aperture helpers function. 32875efdee1SAlex Deucher */ 32975efdee1SAlex Deucher /** 33075efdee1SAlex Deucher * radeon_doorbell_init - Init doorbell driver information. 33175efdee1SAlex Deucher * 33275efdee1SAlex Deucher * @rdev: radeon_device pointer 33375efdee1SAlex Deucher * 33475efdee1SAlex Deucher * Init doorbell driver information (CIK) 33575efdee1SAlex Deucher * Returns 0 on success, error on failure. 33675efdee1SAlex Deucher */ 33728f5a6cdSRashika Kheria static int radeon_doorbell_init(struct radeon_device *rdev) 33875efdee1SAlex Deucher { 33975efdee1SAlex Deucher /* doorbell bar mapping */ 34075efdee1SAlex Deucher rdev->doorbell.base = pci_resource_start(rdev->pdev, 2); 34175efdee1SAlex Deucher rdev->doorbell.size = pci_resource_len(rdev->pdev, 2); 34275efdee1SAlex Deucher 343d5754ab8SAndrew Lewycky rdev->doorbell.num_doorbells = min_t(u32, rdev->doorbell.size / sizeof(u32), RADEON_MAX_DOORBELLS); 344d5754ab8SAndrew Lewycky if (rdev->doorbell.num_doorbells == 0) 345d5754ab8SAndrew Lewycky return -EINVAL; 34675efdee1SAlex Deucher 347d5754ab8SAndrew Lewycky rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.num_doorbells * sizeof(u32)); 34875efdee1SAlex Deucher if (rdev->doorbell.ptr == NULL) { 34975efdee1SAlex Deucher return -ENOMEM; 35075efdee1SAlex Deucher } 35175efdee1SAlex Deucher DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base); 35275efdee1SAlex Deucher DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size); 35375efdee1SAlex Deucher 354d5754ab8SAndrew Lewycky memset(&rdev->doorbell.used, 0, sizeof(rdev->doorbell.used)); 35575efdee1SAlex Deucher 35675efdee1SAlex Deucher return 0; 35775efdee1SAlex Deucher } 35875efdee1SAlex Deucher 35975efdee1SAlex Deucher /** 36075efdee1SAlex Deucher * radeon_doorbell_fini - Tear down doorbell driver information. 36175efdee1SAlex Deucher * 36275efdee1SAlex Deucher * @rdev: radeon_device pointer 36375efdee1SAlex Deucher * 36475efdee1SAlex Deucher * Tear down doorbell driver information (CIK) 36575efdee1SAlex Deucher */ 36628f5a6cdSRashika Kheria static void radeon_doorbell_fini(struct radeon_device *rdev) 36775efdee1SAlex Deucher { 36875efdee1SAlex Deucher iounmap(rdev->doorbell.ptr); 36975efdee1SAlex Deucher rdev->doorbell.ptr = NULL; 37075efdee1SAlex Deucher } 37175efdee1SAlex Deucher 37275efdee1SAlex Deucher /** 373d5754ab8SAndrew Lewycky * radeon_doorbell_get - Allocate a doorbell entry 37475efdee1SAlex Deucher * 37575efdee1SAlex Deucher * @rdev: radeon_device pointer 376d5754ab8SAndrew Lewycky * @doorbell: doorbell index 37775efdee1SAlex Deucher * 378d5754ab8SAndrew Lewycky * Allocate a doorbell for use by the driver (all asics). 37975efdee1SAlex Deucher * Returns 0 on success or -EINVAL on failure. 38075efdee1SAlex Deucher */ 38175efdee1SAlex Deucher int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell) 38275efdee1SAlex Deucher { 383d5754ab8SAndrew Lewycky unsigned long offset = find_first_zero_bit(rdev->doorbell.used, rdev->doorbell.num_doorbells); 384d5754ab8SAndrew Lewycky if (offset < rdev->doorbell.num_doorbells) { 385d5754ab8SAndrew Lewycky __set_bit(offset, rdev->doorbell.used); 386d5754ab8SAndrew Lewycky *doorbell = offset; 38775efdee1SAlex Deucher return 0; 388d5754ab8SAndrew Lewycky } else { 38975efdee1SAlex Deucher return -EINVAL; 39075efdee1SAlex Deucher } 391d5754ab8SAndrew Lewycky } 39275efdee1SAlex Deucher 39375efdee1SAlex Deucher /** 394d5754ab8SAndrew Lewycky * radeon_doorbell_free - Free a doorbell entry 39575efdee1SAlex Deucher * 39675efdee1SAlex Deucher * @rdev: radeon_device pointer 397d5754ab8SAndrew Lewycky * @doorbell: doorbell index 39875efdee1SAlex Deucher * 399d5754ab8SAndrew Lewycky * Free a doorbell allocated for use by the driver (all asics) 40075efdee1SAlex Deucher */ 40175efdee1SAlex Deucher void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell) 40275efdee1SAlex Deucher { 403d5754ab8SAndrew Lewycky if (doorbell < rdev->doorbell.num_doorbells) 404d5754ab8SAndrew Lewycky __clear_bit(doorbell, rdev->doorbell.used); 40575efdee1SAlex Deucher } 40675efdee1SAlex Deucher 40775efdee1SAlex Deucher /* 4080c195119SAlex Deucher * radeon_wb_*() 40904f61f6cSCai Huoqing * Writeback is the method by which the GPU updates special pages 4100c195119SAlex Deucher * in memory with the status of certain GPU events (fences, ring pointers, 4110c195119SAlex Deucher * etc.). 4120c195119SAlex Deucher */ 4130c195119SAlex Deucher 4140c195119SAlex Deucher /** 4150c195119SAlex Deucher * radeon_wb_disable - Disable Writeback 4160c195119SAlex Deucher * 4170c195119SAlex Deucher * @rdev: radeon_device pointer 4180c195119SAlex Deucher * 4190c195119SAlex Deucher * Disables Writeback (all asics). Used for suspend. 4200c195119SAlex Deucher */ 421724c80e1SAlex Deucher void radeon_wb_disable(struct radeon_device *rdev) 422724c80e1SAlex Deucher { 423724c80e1SAlex Deucher rdev->wb.enabled = false; 424724c80e1SAlex Deucher } 425724c80e1SAlex Deucher 4260c195119SAlex Deucher /** 4270c195119SAlex Deucher * radeon_wb_fini - Disable Writeback and free memory 4280c195119SAlex Deucher * 4290c195119SAlex Deucher * @rdev: radeon_device pointer 4300c195119SAlex Deucher * 4310c195119SAlex Deucher * Disables Writeback and frees the Writeback memory (all asics). 4320c195119SAlex Deucher * Used at driver shutdown. 4330c195119SAlex Deucher */ 434724c80e1SAlex Deucher void radeon_wb_fini(struct radeon_device *rdev) 435724c80e1SAlex Deucher { 436724c80e1SAlex Deucher radeon_wb_disable(rdev); 437724c80e1SAlex Deucher if (rdev->wb.wb_obj) { 438089920f2SJerome Glisse if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) { 439089920f2SJerome Glisse radeon_bo_kunmap(rdev->wb.wb_obj); 440089920f2SJerome Glisse radeon_bo_unpin(rdev->wb.wb_obj); 441089920f2SJerome Glisse radeon_bo_unreserve(rdev->wb.wb_obj); 442089920f2SJerome Glisse } 443724c80e1SAlex Deucher radeon_bo_unref(&rdev->wb.wb_obj); 444724c80e1SAlex Deucher rdev->wb.wb = NULL; 445724c80e1SAlex Deucher rdev->wb.wb_obj = NULL; 446724c80e1SAlex Deucher } 447724c80e1SAlex Deucher } 448724c80e1SAlex Deucher 4490c195119SAlex Deucher /** 4500c195119SAlex Deucher * radeon_wb_init- Init Writeback driver info and allocate memory 4510c195119SAlex Deucher * 4520c195119SAlex Deucher * @rdev: radeon_device pointer 4530c195119SAlex Deucher * 4540c195119SAlex Deucher * Disables Writeback and frees the Writeback memory (all asics). 4550c195119SAlex Deucher * Used at driver startup. 4560c195119SAlex Deucher * Returns 0 on success or an -error on failure. 4570c195119SAlex Deucher */ 458724c80e1SAlex Deucher int radeon_wb_init(struct radeon_device *rdev) 459724c80e1SAlex Deucher { 460724c80e1SAlex Deucher int r; 461724c80e1SAlex Deucher 462724c80e1SAlex Deucher if (rdev->wb.wb_obj == NULL) { 463441921d5SDaniel Vetter r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true, 464831b6966SMaarten Lankhorst RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL, 46502376d82SMichel Dänzer &rdev->wb.wb_obj); 466724c80e1SAlex Deucher if (r) { 467724c80e1SAlex Deucher dev_warn(rdev->dev, "(%d) create WB bo failed\n", r); 468724c80e1SAlex Deucher return r; 469724c80e1SAlex Deucher } 470724c80e1SAlex Deucher r = radeon_bo_reserve(rdev->wb.wb_obj, false); 471724c80e1SAlex Deucher if (unlikely(r != 0)) { 472724c80e1SAlex Deucher radeon_wb_fini(rdev); 473724c80e1SAlex Deucher return r; 474724c80e1SAlex Deucher } 475724c80e1SAlex Deucher r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, 476724c80e1SAlex Deucher &rdev->wb.gpu_addr); 477724c80e1SAlex Deucher if (r) { 478724c80e1SAlex Deucher radeon_bo_unreserve(rdev->wb.wb_obj); 479724c80e1SAlex Deucher dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r); 480724c80e1SAlex Deucher radeon_wb_fini(rdev); 481724c80e1SAlex Deucher return r; 482724c80e1SAlex Deucher } 483724c80e1SAlex Deucher r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); 484724c80e1SAlex Deucher radeon_bo_unreserve(rdev->wb.wb_obj); 485724c80e1SAlex Deucher if (r) { 486724c80e1SAlex Deucher dev_warn(rdev->dev, "(%d) map WB bo failed\n", r); 487724c80e1SAlex Deucher radeon_wb_fini(rdev); 488724c80e1SAlex Deucher return r; 489724c80e1SAlex Deucher } 490089920f2SJerome Glisse } 491724c80e1SAlex Deucher 492e6ba7599SAlex Deucher /* clear wb memory */ 493e6ba7599SAlex Deucher memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE); 494d0f8a854SAlex Deucher /* disable event_write fences */ 495d0f8a854SAlex Deucher rdev->wb.use_event = false; 496724c80e1SAlex Deucher /* disabled via module param */ 4973b7a2b24SJerome Glisse if (radeon_no_wb == 1) { 498724c80e1SAlex Deucher rdev->wb.enabled = false; 4993b7a2b24SJerome Glisse } else { 500724c80e1SAlex Deucher if (rdev->flags & RADEON_IS_AGP) { 50128eebb70SAlex Deucher /* often unreliable on AGP */ 50228eebb70SAlex Deucher rdev->wb.enabled = false; 50328eebb70SAlex Deucher } else if (rdev->family < CHIP_R300) { 50428eebb70SAlex Deucher /* often unreliable on pre-r300 */ 505724c80e1SAlex Deucher rdev->wb.enabled = false; 506d0f8a854SAlex Deucher } else { 507724c80e1SAlex Deucher rdev->wb.enabled = true; 508d0f8a854SAlex Deucher /* event_write fences are only available on r600+ */ 5093b7a2b24SJerome Glisse if (rdev->family >= CHIP_R600) { 510d0f8a854SAlex Deucher rdev->wb.use_event = true; 511d0f8a854SAlex Deucher } 512724c80e1SAlex Deucher } 5133b7a2b24SJerome Glisse } 514c994ead6SAlex Deucher /* always use writeback/events on NI, APUs */ 515c994ead6SAlex Deucher if (rdev->family >= CHIP_PALM) { 5167d52785dSAlex Deucher rdev->wb.enabled = true; 5177d52785dSAlex Deucher rdev->wb.use_event = true; 5187d52785dSAlex Deucher } 519724c80e1SAlex Deucher 520724c80e1SAlex Deucher dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis"); 521724c80e1SAlex Deucher 522724c80e1SAlex Deucher return 0; 523724c80e1SAlex Deucher } 524724c80e1SAlex Deucher 525d594e46aSJerome Glisse /** 526d594e46aSJerome Glisse * radeon_vram_location - try to find VRAM location 527d594e46aSJerome Glisse * @rdev: radeon device structure holding all necessary informations 528d594e46aSJerome Glisse * @mc: memory controller structure holding memory informations 529d594e46aSJerome Glisse * @base: base address at which to put VRAM 530d594e46aSJerome Glisse * 531d594e46aSJerome Glisse * Function will place try to place VRAM at base address provided 532d594e46aSJerome Glisse * as parameter (which is so far either PCI aperture address or 533d594e46aSJerome Glisse * for IGP TOM base address). 534d594e46aSJerome Glisse * 535d594e46aSJerome Glisse * If there is not enough space to fit the unvisible VRAM in the 32bits 536d594e46aSJerome Glisse * address space then we limit the VRAM size to the aperture. 537d594e46aSJerome Glisse * 538d594e46aSJerome Glisse * If we are using AGP and if the AGP aperture doesn't allow us to have 539d594e46aSJerome Glisse * room for all the VRAM than we restrict the VRAM to the PCI aperture 540d594e46aSJerome Glisse * size and print a warning. 541d594e46aSJerome Glisse * 542d594e46aSJerome Glisse * This function will never fails, worst case are limiting VRAM. 543d594e46aSJerome Glisse * 544d594e46aSJerome Glisse * Note: GTT start, end, size should be initialized before calling this 545d594e46aSJerome Glisse * function on AGP platform. 546d594e46aSJerome Glisse * 547f017853eSLee Jones * Note 1: We don't explicitly enforce VRAM start to be aligned on VRAM size, 548d594e46aSJerome Glisse * this shouldn't be a problem as we are using the PCI aperture as a reference. 549d594e46aSJerome Glisse * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but 550d594e46aSJerome Glisse * not IGP. 551d594e46aSJerome Glisse * 552f017853eSLee Jones * Note 2: we use mc_vram_size as on some board we need to program the mc to 553d594e46aSJerome Glisse * cover the whole aperture even if VRAM size is inferior to aperture size 554d594e46aSJerome Glisse * Novell bug 204882 + along with lots of ubuntu ones 555d594e46aSJerome Glisse * 556f017853eSLee Jones * Note 3: when limiting vram it's safe to overwritte real_vram_size because 557d594e46aSJerome Glisse * we are not in case where real_vram_size is inferior to mc_vram_size (ie 558d594e46aSJerome Glisse * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu 559d594e46aSJerome Glisse * ones) 560d594e46aSJerome Glisse * 561f017853eSLee Jones * Note 4: IGP TOM addr should be the same as the aperture addr, we don't 562d594e46aSJerome Glisse * explicitly check for that thought. 563d594e46aSJerome Glisse * 564d594e46aSJerome Glisse * FIXME: when reducing VRAM size align new size on power of 2. 565771fe6b9SJerome Glisse */ 566d594e46aSJerome Glisse void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base) 567771fe6b9SJerome Glisse { 5681bcb04f7SChristian König uint64_t limit = (uint64_t)radeon_vram_limit << 20; 5691bcb04f7SChristian König 570d594e46aSJerome Glisse mc->vram_start = base; 5719ed8b1f9SAlex Deucher if (mc->mc_vram_size > (rdev->mc.mc_mask - base + 1)) { 572d594e46aSJerome Glisse dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n"); 573d594e46aSJerome Glisse mc->real_vram_size = mc->aper_size; 574d594e46aSJerome Glisse mc->mc_vram_size = mc->aper_size; 575771fe6b9SJerome Glisse } 576d594e46aSJerome Glisse mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; 5772cbeb4efSJerome Glisse if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) { 578d594e46aSJerome Glisse dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n"); 579d594e46aSJerome Glisse mc->real_vram_size = mc->aper_size; 580d594e46aSJerome Glisse mc->mc_vram_size = mc->aper_size; 581771fe6b9SJerome Glisse } 582d594e46aSJerome Glisse mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; 5831bcb04f7SChristian König if (limit && limit < mc->real_vram_size) 5841bcb04f7SChristian König mc->real_vram_size = limit; 585dd7cc55aSAlex Deucher dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n", 586d594e46aSJerome Glisse mc->mc_vram_size >> 20, mc->vram_start, 587d594e46aSJerome Glisse mc->vram_end, mc->real_vram_size >> 20); 588771fe6b9SJerome Glisse } 589771fe6b9SJerome Glisse 590d594e46aSJerome Glisse /** 591d594e46aSJerome Glisse * radeon_gtt_location - try to find GTT location 592d594e46aSJerome Glisse * @rdev: radeon device structure holding all necessary informations 593d594e46aSJerome Glisse * @mc: memory controller structure holding memory informations 594d594e46aSJerome Glisse * 595d594e46aSJerome Glisse * Function will place try to place GTT before or after VRAM. 596d594e46aSJerome Glisse * 597d594e46aSJerome Glisse * If GTT size is bigger than space left then we ajust GTT size. 598d594e46aSJerome Glisse * Thus function will never fails. 599d594e46aSJerome Glisse * 600d594e46aSJerome Glisse * FIXME: when reducing GTT size align new size on power of 2. 601d594e46aSJerome Glisse */ 602d594e46aSJerome Glisse void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) 603d594e46aSJerome Glisse { 604d594e46aSJerome Glisse u64 size_af, size_bf; 605d594e46aSJerome Glisse 6069ed8b1f9SAlex Deucher size_af = ((rdev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align; 6078d369bb1SAlex Deucher size_bf = mc->vram_start & ~mc->gtt_base_align; 608d594e46aSJerome Glisse if (size_bf > size_af) { 609d594e46aSJerome Glisse if (mc->gtt_size > size_bf) { 610d594e46aSJerome Glisse dev_warn(rdev->dev, "limiting GTT\n"); 611d594e46aSJerome Glisse mc->gtt_size = size_bf; 612d594e46aSJerome Glisse } 6138d369bb1SAlex Deucher mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size; 614d594e46aSJerome Glisse } else { 615d594e46aSJerome Glisse if (mc->gtt_size > size_af) { 616d594e46aSJerome Glisse dev_warn(rdev->dev, "limiting GTT\n"); 617d594e46aSJerome Glisse mc->gtt_size = size_af; 618d594e46aSJerome Glisse } 6198d369bb1SAlex Deucher mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align; 620d594e46aSJerome Glisse } 621d594e46aSJerome Glisse mc->gtt_end = mc->gtt_start + mc->gtt_size - 1; 622dd7cc55aSAlex Deucher dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n", 623d594e46aSJerome Glisse mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end); 624d594e46aSJerome Glisse } 625771fe6b9SJerome Glisse 626771fe6b9SJerome Glisse /* 627771fe6b9SJerome Glisse * GPU helpers function. 628771fe6b9SJerome Glisse */ 62905082b8bSAlex Deucher 630f017853eSLee Jones /* 63105082b8bSAlex Deucher * radeon_device_is_virtual - check if we are running is a virtual environment 63205082b8bSAlex Deucher * 63305082b8bSAlex Deucher * Check if the asic has been passed through to a VM (all asics). 63405082b8bSAlex Deucher * Used at driver startup. 63505082b8bSAlex Deucher * Returns true if virtual or false if not. 63605082b8bSAlex Deucher */ 637a801abe4SAlex Deucher bool radeon_device_is_virtual(void) 63805082b8bSAlex Deucher { 63905082b8bSAlex Deucher #ifdef CONFIG_X86 64005082b8bSAlex Deucher return boot_cpu_has(X86_FEATURE_HYPERVISOR); 64105082b8bSAlex Deucher #else 64205082b8bSAlex Deucher return false; 64305082b8bSAlex Deucher #endif 64405082b8bSAlex Deucher } 64505082b8bSAlex Deucher 6460c195119SAlex Deucher /** 6470c195119SAlex Deucher * radeon_card_posted - check if the hw has already been initialized 6480c195119SAlex Deucher * 6490c195119SAlex Deucher * @rdev: radeon_device pointer 6500c195119SAlex Deucher * 6510c195119SAlex Deucher * Check if the asic has been initialized (all asics). 6520c195119SAlex Deucher * Used at driver startup. 6530c195119SAlex Deucher * Returns true if initialized or false if not. 6540c195119SAlex Deucher */ 6559f022ddfSJerome Glisse bool radeon_card_posted(struct radeon_device *rdev) 656771fe6b9SJerome Glisse { 657771fe6b9SJerome Glisse uint32_t reg; 658771fe6b9SJerome Glisse 659884031f0SAlex Deucher /* for pass through, always force asic_init for CI */ 660884031f0SAlex Deucher if (rdev->family >= CHIP_BONAIRE && 661884031f0SAlex Deucher radeon_device_is_virtual()) 66205082b8bSAlex Deucher return false; 66305082b8bSAlex Deucher 66450a583f6SAlex Deucher /* required for EFI mode on macbook2,1 which uses an r5xx asic */ 66583e68189SMatt Fleming if (efi_enabled(EFI_BOOT) && 66650a583f6SAlex Deucher (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) && 66750a583f6SAlex Deucher (rdev->family < CHIP_R600)) 668bcc65fd8SMatthew Garrett return false; 669bcc65fd8SMatthew Garrett 6702cf3a4fcSAlex Deucher if (ASIC_IS_NODCE(rdev)) 6712cf3a4fcSAlex Deucher goto check_memsize; 6722cf3a4fcSAlex Deucher 673771fe6b9SJerome Glisse /* first check CRTCs */ 67409fb8bd1SAlex Deucher if (ASIC_IS_DCE4(rdev)) { 67518007401SAlex Deucher reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | 67618007401SAlex Deucher RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET); 67709fb8bd1SAlex Deucher if (rdev->num_crtc >= 4) { 67809fb8bd1SAlex Deucher reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) | 67909fb8bd1SAlex Deucher RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET); 68009fb8bd1SAlex Deucher } 68109fb8bd1SAlex Deucher if (rdev->num_crtc >= 6) { 68209fb8bd1SAlex Deucher reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) | 683bcc1c2a1SAlex Deucher RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET); 68409fb8bd1SAlex Deucher } 685bcc1c2a1SAlex Deucher if (reg & EVERGREEN_CRTC_MASTER_EN) 686bcc1c2a1SAlex Deucher return true; 687bcc1c2a1SAlex Deucher } else if (ASIC_IS_AVIVO(rdev)) { 688771fe6b9SJerome Glisse reg = RREG32(AVIVO_D1CRTC_CONTROL) | 689771fe6b9SJerome Glisse RREG32(AVIVO_D2CRTC_CONTROL); 690771fe6b9SJerome Glisse if (reg & AVIVO_CRTC_EN) { 691771fe6b9SJerome Glisse return true; 692771fe6b9SJerome Glisse } 693771fe6b9SJerome Glisse } else { 694771fe6b9SJerome Glisse reg = RREG32(RADEON_CRTC_GEN_CNTL) | 695771fe6b9SJerome Glisse RREG32(RADEON_CRTC2_GEN_CNTL); 696771fe6b9SJerome Glisse if (reg & RADEON_CRTC_EN) { 697771fe6b9SJerome Glisse return true; 698771fe6b9SJerome Glisse } 699771fe6b9SJerome Glisse } 700771fe6b9SJerome Glisse 7012cf3a4fcSAlex Deucher check_memsize: 702771fe6b9SJerome Glisse /* then check MEM_SIZE, in case the crtcs are off */ 703771fe6b9SJerome Glisse if (rdev->family >= CHIP_R600) 704771fe6b9SJerome Glisse reg = RREG32(R600_CONFIG_MEMSIZE); 705771fe6b9SJerome Glisse else 706771fe6b9SJerome Glisse reg = RREG32(RADEON_CONFIG_MEMSIZE); 707771fe6b9SJerome Glisse 708771fe6b9SJerome Glisse if (reg) 709771fe6b9SJerome Glisse return true; 710771fe6b9SJerome Glisse 711771fe6b9SJerome Glisse return false; 712771fe6b9SJerome Glisse 713771fe6b9SJerome Glisse } 714771fe6b9SJerome Glisse 7150c195119SAlex Deucher /** 7160c195119SAlex Deucher * radeon_update_bandwidth_info - update display bandwidth params 7170c195119SAlex Deucher * 7180c195119SAlex Deucher * @rdev: radeon_device pointer 7190c195119SAlex Deucher * 7200c195119SAlex Deucher * Used when sclk/mclk are switched or display modes are set. 7210c195119SAlex Deucher * params are used to calculate display watermarks (all asics) 7220c195119SAlex Deucher */ 723f47299c5SAlex Deucher void radeon_update_bandwidth_info(struct radeon_device *rdev) 724f47299c5SAlex Deucher { 725f47299c5SAlex Deucher fixed20_12 a; 7268807286eSAlex Deucher u32 sclk = rdev->pm.current_sclk; 7278807286eSAlex Deucher u32 mclk = rdev->pm.current_mclk; 728f47299c5SAlex Deucher 7298807286eSAlex Deucher /* sclk/mclk in Mhz */ 73068adac5eSBen Skeggs a.full = dfixed_const(100); 73168adac5eSBen Skeggs rdev->pm.sclk.full = dfixed_const(sclk); 73268adac5eSBen Skeggs rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a); 73368adac5eSBen Skeggs rdev->pm.mclk.full = dfixed_const(mclk); 73468adac5eSBen Skeggs rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a); 735f47299c5SAlex Deucher 7368807286eSAlex Deucher if (rdev->flags & RADEON_IS_IGP) { 73768adac5eSBen Skeggs a.full = dfixed_const(16); 738f47299c5SAlex Deucher /* core_bandwidth = sclk(Mhz) * 16 */ 73968adac5eSBen Skeggs rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a); 740f47299c5SAlex Deucher } 741f47299c5SAlex Deucher } 742f47299c5SAlex Deucher 7430c195119SAlex Deucher /** 7440c195119SAlex Deucher * radeon_boot_test_post_card - check and possibly initialize the hw 7450c195119SAlex Deucher * 7460c195119SAlex Deucher * @rdev: radeon_device pointer 7470c195119SAlex Deucher * 7480c195119SAlex Deucher * Check if the asic is initialized and if not, attempt to initialize 7490c195119SAlex Deucher * it (all asics). 7500c195119SAlex Deucher * Returns true if initialized or false if not. 7510c195119SAlex Deucher */ 75272542d77SDave Airlie bool radeon_boot_test_post_card(struct radeon_device *rdev) 75372542d77SDave Airlie { 75472542d77SDave Airlie if (radeon_card_posted(rdev)) 75572542d77SDave Airlie return true; 75672542d77SDave Airlie 75772542d77SDave Airlie if (rdev->bios) { 75872542d77SDave Airlie DRM_INFO("GPU not posted. posting now...\n"); 75972542d77SDave Airlie if (rdev->is_atom_bios) 76072542d77SDave Airlie atom_asic_init(rdev->mode_info.atom_context); 76172542d77SDave Airlie else 76272542d77SDave Airlie radeon_combios_asic_init(rdev->ddev); 76372542d77SDave Airlie return true; 76472542d77SDave Airlie } else { 76572542d77SDave Airlie dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); 76672542d77SDave Airlie return false; 76772542d77SDave Airlie } 76872542d77SDave Airlie } 76972542d77SDave Airlie 7700c195119SAlex Deucher /** 7710c195119SAlex Deucher * radeon_dummy_page_init - init dummy page used by the driver 7720c195119SAlex Deucher * 7730c195119SAlex Deucher * @rdev: radeon_device pointer 7740c195119SAlex Deucher * 7750c195119SAlex Deucher * Allocate the dummy page used by the driver (all asics). 7760c195119SAlex Deucher * This dummy page is used by the driver as a filler for gart entries 7770c195119SAlex Deucher * when pages are taken out of the GART 7780c195119SAlex Deucher * Returns 0 on sucess, -ENOMEM on failure. 7790c195119SAlex Deucher */ 7803ce0a23dSJerome Glisse int radeon_dummy_page_init(struct radeon_device *rdev) 7813ce0a23dSJerome Glisse { 78282568565SDave Airlie if (rdev->dummy_page.page) 78382568565SDave Airlie return 0; 7843ce0a23dSJerome Glisse rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO); 7853ce0a23dSJerome Glisse if (rdev->dummy_page.page == NULL) 7863ce0a23dSJerome Glisse return -ENOMEM; 7877e7726ecSNirmoy Das rdev->dummy_page.addr = dma_map_page(&rdev->pdev->dev, rdev->dummy_page.page, 788a5f61dd4SChristophe JAILLET 0, PAGE_SIZE, DMA_BIDIRECTIONAL); 7897e7726ecSNirmoy Das if (dma_mapping_error(&rdev->pdev->dev, rdev->dummy_page.addr)) { 790a30f6fb7SBenjamin Herrenschmidt dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n"); 7913ce0a23dSJerome Glisse __free_page(rdev->dummy_page.page); 7923ce0a23dSJerome Glisse rdev->dummy_page.page = NULL; 7933ce0a23dSJerome Glisse return -ENOMEM; 7943ce0a23dSJerome Glisse } 795cb658906SMichel Dänzer rdev->dummy_page.entry = radeon_gart_get_page_entry(rdev->dummy_page.addr, 796cb658906SMichel Dänzer RADEON_GART_PAGE_DUMMY); 7973ce0a23dSJerome Glisse return 0; 7983ce0a23dSJerome Glisse } 7993ce0a23dSJerome Glisse 8000c195119SAlex Deucher /** 8010c195119SAlex Deucher * radeon_dummy_page_fini - free dummy page used by the driver 8020c195119SAlex Deucher * 8030c195119SAlex Deucher * @rdev: radeon_device pointer 8040c195119SAlex Deucher * 8050c195119SAlex Deucher * Frees the dummy page used by the driver (all asics). 8060c195119SAlex Deucher */ 8073ce0a23dSJerome Glisse void radeon_dummy_page_fini(struct radeon_device *rdev) 8083ce0a23dSJerome Glisse { 8093ce0a23dSJerome Glisse if (rdev->dummy_page.page == NULL) 8103ce0a23dSJerome Glisse return; 811a5f61dd4SChristophe JAILLET dma_unmap_page(&rdev->pdev->dev, rdev->dummy_page.addr, PAGE_SIZE, 812a5f61dd4SChristophe JAILLET DMA_BIDIRECTIONAL); 8133ce0a23dSJerome Glisse __free_page(rdev->dummy_page.page); 8143ce0a23dSJerome Glisse rdev->dummy_page.page = NULL; 8153ce0a23dSJerome Glisse } 8163ce0a23dSJerome Glisse 817771fe6b9SJerome Glisse 818771fe6b9SJerome Glisse /* ATOM accessor methods */ 8190c195119SAlex Deucher /* 8200c195119SAlex Deucher * ATOM is an interpreted byte code stored in tables in the vbios. The 8210c195119SAlex Deucher * driver registers callbacks to access registers and the interpreter 8220c195119SAlex Deucher * in the driver parses the tables and executes then to program specific 8230c195119SAlex Deucher * actions (set display modes, asic init, etc.). See radeon_atombios.c, 8240c195119SAlex Deucher * atombios.h, and atom.c 8250c195119SAlex Deucher */ 8260c195119SAlex Deucher 8270c195119SAlex Deucher /** 8280c195119SAlex Deucher * cail_pll_read - read PLL register 8290c195119SAlex Deucher * 8300c195119SAlex Deucher * @info: atom card_info pointer 8310c195119SAlex Deucher * @reg: PLL register offset 8320c195119SAlex Deucher * 8330c195119SAlex Deucher * Provides a PLL register accessor for the atom interpreter (r4xx+). 8340c195119SAlex Deucher * Returns the value of the PLL register. 8350c195119SAlex Deucher */ 836771fe6b9SJerome Glisse static uint32_t cail_pll_read(struct card_info *info, uint32_t reg) 837771fe6b9SJerome Glisse { 838771fe6b9SJerome Glisse struct radeon_device *rdev = info->dev->dev_private; 839771fe6b9SJerome Glisse uint32_t r; 840771fe6b9SJerome Glisse 841771fe6b9SJerome Glisse r = rdev->pll_rreg(rdev, reg); 842771fe6b9SJerome Glisse return r; 843771fe6b9SJerome Glisse } 844771fe6b9SJerome Glisse 8450c195119SAlex Deucher /** 8460c195119SAlex Deucher * cail_pll_write - write PLL register 8470c195119SAlex Deucher * 8480c195119SAlex Deucher * @info: atom card_info pointer 8490c195119SAlex Deucher * @reg: PLL register offset 8500c195119SAlex Deucher * @val: value to write to the pll register 8510c195119SAlex Deucher * 8520c195119SAlex Deucher * Provides a PLL register accessor for the atom interpreter (r4xx+). 8530c195119SAlex Deucher */ 854771fe6b9SJerome Glisse static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val) 855771fe6b9SJerome Glisse { 856771fe6b9SJerome Glisse struct radeon_device *rdev = info->dev->dev_private; 857771fe6b9SJerome Glisse 858771fe6b9SJerome Glisse rdev->pll_wreg(rdev, reg, val); 859771fe6b9SJerome Glisse } 860771fe6b9SJerome Glisse 8610c195119SAlex Deucher /** 8620c195119SAlex Deucher * cail_mc_read - read MC (Memory Controller) register 8630c195119SAlex Deucher * 8640c195119SAlex Deucher * @info: atom card_info pointer 8650c195119SAlex Deucher * @reg: MC register offset 8660c195119SAlex Deucher * 8670c195119SAlex Deucher * Provides an MC register accessor for the atom interpreter (r4xx+). 8680c195119SAlex Deucher * Returns the value of the MC register. 8690c195119SAlex Deucher */ 870771fe6b9SJerome Glisse static uint32_t cail_mc_read(struct card_info *info, uint32_t reg) 871771fe6b9SJerome Glisse { 872771fe6b9SJerome Glisse struct radeon_device *rdev = info->dev->dev_private; 873771fe6b9SJerome Glisse uint32_t r; 874771fe6b9SJerome Glisse 875771fe6b9SJerome Glisse r = rdev->mc_rreg(rdev, reg); 876771fe6b9SJerome Glisse return r; 877771fe6b9SJerome Glisse } 878771fe6b9SJerome Glisse 8790c195119SAlex Deucher /** 8800c195119SAlex Deucher * cail_mc_write - write MC (Memory Controller) register 8810c195119SAlex Deucher * 8820c195119SAlex Deucher * @info: atom card_info pointer 8830c195119SAlex Deucher * @reg: MC register offset 8840c195119SAlex Deucher * @val: value to write to the pll register 8850c195119SAlex Deucher * 8860c195119SAlex Deucher * Provides a MC register accessor for the atom interpreter (r4xx+). 8870c195119SAlex Deucher */ 888771fe6b9SJerome Glisse static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val) 889771fe6b9SJerome Glisse { 890771fe6b9SJerome Glisse struct radeon_device *rdev = info->dev->dev_private; 891771fe6b9SJerome Glisse 892771fe6b9SJerome Glisse rdev->mc_wreg(rdev, reg, val); 893771fe6b9SJerome Glisse } 894771fe6b9SJerome Glisse 8950c195119SAlex Deucher /** 8960c195119SAlex Deucher * cail_reg_write - write MMIO register 8970c195119SAlex Deucher * 8980c195119SAlex Deucher * @info: atom card_info pointer 8990c195119SAlex Deucher * @reg: MMIO register offset 9000c195119SAlex Deucher * @val: value to write to the pll register 9010c195119SAlex Deucher * 9020c195119SAlex Deucher * Provides a MMIO register accessor for the atom interpreter (r4xx+). 9030c195119SAlex Deucher */ 904771fe6b9SJerome Glisse static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val) 905771fe6b9SJerome Glisse { 906771fe6b9SJerome Glisse struct radeon_device *rdev = info->dev->dev_private; 907771fe6b9SJerome Glisse 908771fe6b9SJerome Glisse WREG32(reg*4, val); 909771fe6b9SJerome Glisse } 910771fe6b9SJerome Glisse 9110c195119SAlex Deucher /** 9120c195119SAlex Deucher * cail_reg_read - read MMIO register 9130c195119SAlex Deucher * 9140c195119SAlex Deucher * @info: atom card_info pointer 9150c195119SAlex Deucher * @reg: MMIO register offset 9160c195119SAlex Deucher * 9170c195119SAlex Deucher * Provides an MMIO register accessor for the atom interpreter (r4xx+). 9180c195119SAlex Deucher * Returns the value of the MMIO register. 9190c195119SAlex Deucher */ 920771fe6b9SJerome Glisse static uint32_t cail_reg_read(struct card_info *info, uint32_t reg) 921771fe6b9SJerome Glisse { 922771fe6b9SJerome Glisse struct radeon_device *rdev = info->dev->dev_private; 923771fe6b9SJerome Glisse uint32_t r; 924771fe6b9SJerome Glisse 925771fe6b9SJerome Glisse r = RREG32(reg*4); 926771fe6b9SJerome Glisse return r; 927771fe6b9SJerome Glisse } 928771fe6b9SJerome Glisse 9290c195119SAlex Deucher /** 9300c195119SAlex Deucher * cail_ioreg_write - write IO register 9310c195119SAlex Deucher * 9320c195119SAlex Deucher * @info: atom card_info pointer 9330c195119SAlex Deucher * @reg: IO register offset 9340c195119SAlex Deucher * @val: value to write to the pll register 9350c195119SAlex Deucher * 9360c195119SAlex Deucher * Provides a IO register accessor for the atom interpreter (r4xx+). 9370c195119SAlex Deucher */ 938351a52a2SAlex Deucher static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val) 939351a52a2SAlex Deucher { 940351a52a2SAlex Deucher struct radeon_device *rdev = info->dev->dev_private; 941351a52a2SAlex Deucher 942351a52a2SAlex Deucher WREG32_IO(reg*4, val); 943351a52a2SAlex Deucher } 944351a52a2SAlex Deucher 9450c195119SAlex Deucher /** 9460c195119SAlex Deucher * cail_ioreg_read - read IO register 9470c195119SAlex Deucher * 9480c195119SAlex Deucher * @info: atom card_info pointer 9490c195119SAlex Deucher * @reg: IO register offset 9500c195119SAlex Deucher * 9510c195119SAlex Deucher * Provides an IO register accessor for the atom interpreter (r4xx+). 9520c195119SAlex Deucher * Returns the value of the IO register. 9530c195119SAlex Deucher */ 954351a52a2SAlex Deucher static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg) 955351a52a2SAlex Deucher { 956351a52a2SAlex Deucher struct radeon_device *rdev = info->dev->dev_private; 957351a52a2SAlex Deucher uint32_t r; 958351a52a2SAlex Deucher 959351a52a2SAlex Deucher r = RREG32_IO(reg*4); 960351a52a2SAlex Deucher return r; 961351a52a2SAlex Deucher } 962351a52a2SAlex Deucher 9630c195119SAlex Deucher /** 9640c195119SAlex Deucher * radeon_atombios_init - init the driver info and callbacks for atombios 9650c195119SAlex Deucher * 9660c195119SAlex Deucher * @rdev: radeon_device pointer 9670c195119SAlex Deucher * 9680c195119SAlex Deucher * Initializes the driver info and register access callbacks for the 9690c195119SAlex Deucher * ATOM interpreter (r4xx+). 9700c195119SAlex Deucher * Returns 0 on sucess, -ENOMEM on failure. 9710c195119SAlex Deucher * Called at driver startup. 9720c195119SAlex Deucher */ 973771fe6b9SJerome Glisse int radeon_atombios_init(struct radeon_device *rdev) 974771fe6b9SJerome Glisse { 97561c4b24bSMathias Fröhlich struct card_info *atom_card_info = 97661c4b24bSMathias Fröhlich kzalloc(sizeof(struct card_info), GFP_KERNEL); 97761c4b24bSMathias Fröhlich 97861c4b24bSMathias Fröhlich if (!atom_card_info) 97961c4b24bSMathias Fröhlich return -ENOMEM; 98061c4b24bSMathias Fröhlich 98161c4b24bSMathias Fröhlich rdev->mode_info.atom_card_info = atom_card_info; 98261c4b24bSMathias Fröhlich atom_card_info->dev = rdev->ddev; 98361c4b24bSMathias Fröhlich atom_card_info->reg_read = cail_reg_read; 98461c4b24bSMathias Fröhlich atom_card_info->reg_write = cail_reg_write; 985351a52a2SAlex Deucher /* needed for iio ops */ 986351a52a2SAlex Deucher if (rdev->rio_mem) { 987351a52a2SAlex Deucher atom_card_info->ioreg_read = cail_ioreg_read; 988351a52a2SAlex Deucher atom_card_info->ioreg_write = cail_ioreg_write; 989351a52a2SAlex Deucher } else { 990351a52a2SAlex Deucher DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n"); 991351a52a2SAlex Deucher atom_card_info->ioreg_read = cail_reg_read; 992351a52a2SAlex Deucher atom_card_info->ioreg_write = cail_reg_write; 993351a52a2SAlex Deucher } 99461c4b24bSMathias Fröhlich atom_card_info->mc_read = cail_mc_read; 99561c4b24bSMathias Fröhlich atom_card_info->mc_write = cail_mc_write; 99661c4b24bSMathias Fröhlich atom_card_info->pll_read = cail_pll_read; 99761c4b24bSMathias Fröhlich atom_card_info->pll_write = cail_pll_write; 99861c4b24bSMathias Fröhlich 99961c4b24bSMathias Fröhlich rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios); 10000e34d094STim Gardner if (!rdev->mode_info.atom_context) { 10010e34d094STim Gardner radeon_atombios_fini(rdev); 10020e34d094STim Gardner return -ENOMEM; 10030e34d094STim Gardner } 10040e34d094STim Gardner 1005c31ad97fSRafał Miłecki mutex_init(&rdev->mode_info.atom_context->mutex); 10061c949842SDave Airlie mutex_init(&rdev->mode_info.atom_context->scratch_mutex); 1007771fe6b9SJerome Glisse radeon_atom_initialize_bios_scratch_regs(rdev->ddev); 1008d904ef9bSDave Airlie atom_allocate_fb_scratch(rdev->mode_info.atom_context); 1009771fe6b9SJerome Glisse return 0; 1010771fe6b9SJerome Glisse } 1011771fe6b9SJerome Glisse 10120c195119SAlex Deucher /** 10130c195119SAlex Deucher * radeon_atombios_fini - free the driver info and callbacks for atombios 10140c195119SAlex Deucher * 10150c195119SAlex Deucher * @rdev: radeon_device pointer 10160c195119SAlex Deucher * 10170c195119SAlex Deucher * Frees the driver info and register access callbacks for the ATOM 10180c195119SAlex Deucher * interpreter (r4xx+). 10190c195119SAlex Deucher * Called at driver shutdown. 10200c195119SAlex Deucher */ 1021771fe6b9SJerome Glisse void radeon_atombios_fini(struct radeon_device *rdev) 1022771fe6b9SJerome Glisse { 10234a04a844SJerome Glisse if (rdev->mode_info.atom_context) { 1024d904ef9bSDave Airlie kfree(rdev->mode_info.atom_context->scratch); 10254a04a844SJerome Glisse } 10260e34d094STim Gardner kfree(rdev->mode_info.atom_context); 10270e34d094STim Gardner rdev->mode_info.atom_context = NULL; 102861c4b24bSMathias Fröhlich kfree(rdev->mode_info.atom_card_info); 10290e34d094STim Gardner rdev->mode_info.atom_card_info = NULL; 1030771fe6b9SJerome Glisse } 1031771fe6b9SJerome Glisse 10320c195119SAlex Deucher /* COMBIOS */ 10330c195119SAlex Deucher /* 10340c195119SAlex Deucher * COMBIOS is the bios format prior to ATOM. It provides 10350c195119SAlex Deucher * command tables similar to ATOM, but doesn't have a unified 10360c195119SAlex Deucher * parser. See radeon_combios.c 10370c195119SAlex Deucher */ 10380c195119SAlex Deucher 10390c195119SAlex Deucher /** 10400c195119SAlex Deucher * radeon_combios_init - init the driver info for combios 10410c195119SAlex Deucher * 10420c195119SAlex Deucher * @rdev: radeon_device pointer 10430c195119SAlex Deucher * 10440c195119SAlex Deucher * Initializes the driver info for combios (r1xx-r3xx). 10450c195119SAlex Deucher * Returns 0 on sucess. 10460c195119SAlex Deucher * Called at driver startup. 10470c195119SAlex Deucher */ 1048771fe6b9SJerome Glisse int radeon_combios_init(struct radeon_device *rdev) 1049771fe6b9SJerome Glisse { 1050771fe6b9SJerome Glisse radeon_combios_initialize_bios_scratch_regs(rdev->ddev); 1051771fe6b9SJerome Glisse return 0; 1052771fe6b9SJerome Glisse } 1053771fe6b9SJerome Glisse 10540c195119SAlex Deucher /** 10550c195119SAlex Deucher * radeon_combios_fini - free the driver info for combios 10560c195119SAlex Deucher * 10570c195119SAlex Deucher * @rdev: radeon_device pointer 10580c195119SAlex Deucher * 10590c195119SAlex Deucher * Frees the driver info for combios (r1xx-r3xx). 10600c195119SAlex Deucher * Called at driver shutdown. 10610c195119SAlex Deucher */ 1062771fe6b9SJerome Glisse void radeon_combios_fini(struct radeon_device *rdev) 1063771fe6b9SJerome Glisse { 1064771fe6b9SJerome Glisse } 1065771fe6b9SJerome Glisse 10660c195119SAlex Deucher /* if we get transitioned to only one device, take VGA back */ 10670c195119SAlex Deucher /** 10680c195119SAlex Deucher * radeon_vga_set_decode - enable/disable vga decode 10690c195119SAlex Deucher * 1070bf44e8ceSChristoph Hellwig * @pdev: PCI device 10710c195119SAlex Deucher * @state: enable/disable vga decode 10720c195119SAlex Deucher * 10730c195119SAlex Deucher * Enable/disable vga decode (all asics). 10740c195119SAlex Deucher * Returns VGA resource flags. 10750c195119SAlex Deucher */ 1076bf44e8ceSChristoph Hellwig static unsigned int radeon_vga_set_decode(struct pci_dev *pdev, bool state) 107728d52043SDave Airlie { 1078bf44e8ceSChristoph Hellwig struct drm_device *dev = pci_get_drvdata(pdev); 1079bf44e8ceSChristoph Hellwig struct radeon_device *rdev = dev->dev_private; 108028d52043SDave Airlie radeon_vga_set_state(rdev, state); 108128d52043SDave Airlie if (state) 108228d52043SDave Airlie return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | 108328d52043SDave Airlie VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 108428d52043SDave Airlie else 108528d52043SDave Airlie return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 108628d52043SDave Airlie } 1087c1176d6fSDave Airlie 10880c195119SAlex Deucher /** 1089d3da76aaSLee Jones * radeon_gart_size_auto - Determine a sensible default GART size 1090d3da76aaSLee Jones * according to ASIC family. 10915e3c4f90SGrigori Goronzy * 1092f017853eSLee Jones * @family: ASIC family name 10935e3c4f90SGrigori Goronzy */ 10945e3c4f90SGrigori Goronzy static int radeon_gart_size_auto(enum radeon_family family) 10955e3c4f90SGrigori Goronzy { 10965e3c4f90SGrigori Goronzy /* default to a larger gart size on newer asics */ 10975e3c4f90SGrigori Goronzy if (family >= CHIP_TAHITI) 10985e3c4f90SGrigori Goronzy return 2048; 10995e3c4f90SGrigori Goronzy else if (family >= CHIP_RV770) 11005e3c4f90SGrigori Goronzy return 1024; 11015e3c4f90SGrigori Goronzy else 11025e3c4f90SGrigori Goronzy return 512; 11035e3c4f90SGrigori Goronzy } 11045e3c4f90SGrigori Goronzy 11055e3c4f90SGrigori Goronzy /** 11060c195119SAlex Deucher * radeon_check_arguments - validate module params 11070c195119SAlex Deucher * 11080c195119SAlex Deucher * @rdev: radeon_device pointer 11090c195119SAlex Deucher * 11100c195119SAlex Deucher * Validates certain module parameters and updates 11110c195119SAlex Deucher * the associated values used by the driver (all asics). 11120c195119SAlex Deucher */ 11131109ca09SLauri Kasanen static void radeon_check_arguments(struct radeon_device *rdev) 111436421338SJerome Glisse { 111536421338SJerome Glisse /* vramlimit must be a power of two */ 1116*8c2d34ebSJonathan Gray if (!is_power_of_2(radeon_vram_limit)) { 111736421338SJerome Glisse dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n", 111836421338SJerome Glisse radeon_vram_limit); 111936421338SJerome Glisse radeon_vram_limit = 0; 112036421338SJerome Glisse } 11211bcb04f7SChristian König 1122edcd26e8SAlex Deucher if (radeon_gart_size == -1) { 11235e3c4f90SGrigori Goronzy radeon_gart_size = radeon_gart_size_auto(rdev->family); 1124edcd26e8SAlex Deucher } 112536421338SJerome Glisse /* gtt size must be power of two and greater or equal to 32M */ 11261bcb04f7SChristian König if (radeon_gart_size < 32) { 1127edcd26e8SAlex Deucher dev_warn(rdev->dev, "gart size (%d) too small\n", 112836421338SJerome Glisse radeon_gart_size); 11295e3c4f90SGrigori Goronzy radeon_gart_size = radeon_gart_size_auto(rdev->family); 1130*8c2d34ebSJonathan Gray } else if (!is_power_of_2(radeon_gart_size)) { 113136421338SJerome Glisse dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n", 113236421338SJerome Glisse radeon_gart_size); 11335e3c4f90SGrigori Goronzy radeon_gart_size = radeon_gart_size_auto(rdev->family); 113436421338SJerome Glisse } 11351bcb04f7SChristian König rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20; 11361bcb04f7SChristian König 113736421338SJerome Glisse /* AGP mode can only be -1, 1, 2, 4, 8 */ 113836421338SJerome Glisse switch (radeon_agpmode) { 113936421338SJerome Glisse case -1: 114036421338SJerome Glisse case 0: 114136421338SJerome Glisse case 1: 114236421338SJerome Glisse case 2: 114336421338SJerome Glisse case 4: 114436421338SJerome Glisse case 8: 114536421338SJerome Glisse break; 114636421338SJerome Glisse default: 114736421338SJerome Glisse dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: " 114836421338SJerome Glisse "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode); 114936421338SJerome Glisse radeon_agpmode = 0; 115036421338SJerome Glisse break; 115136421338SJerome Glisse } 1152c1c44132SChristian König 1153*8c2d34ebSJonathan Gray if (!is_power_of_2(radeon_vm_size)) { 1154c1c44132SChristian König dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n", 1155c1c44132SChristian König radeon_vm_size); 115620b2656dSChristian König radeon_vm_size = 4; 1157c1c44132SChristian König } 1158c1c44132SChristian König 115920b2656dSChristian König if (radeon_vm_size < 1) { 116013c240efSAlexandre Demers dev_warn(rdev->dev, "VM size (%d) too small, min is 1GB\n", 1161c1c44132SChristian König radeon_vm_size); 116220b2656dSChristian König radeon_vm_size = 4; 1163c1c44132SChristian König } 1164c1c44132SChristian König 1165c1c44132SChristian König /* 1166c1c44132SChristian König * Max GPUVM size for Cayman, SI and CI are 40 bits. 1167c1c44132SChristian König */ 116820b2656dSChristian König if (radeon_vm_size > 1024) { 116920b2656dSChristian König dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n", 1170c1c44132SChristian König radeon_vm_size); 117120b2656dSChristian König radeon_vm_size = 4; 1172c1c44132SChristian König } 11734510fb98SChristian König 11744510fb98SChristian König /* defines number of bits in page table versus page directory, 11754510fb98SChristian König * a page is 4KB so we have 12 bits offset, minimum 9 bits in the 11764510fb98SChristian König * page table and the remaining bits are in the page directory */ 1177dfc230f9SChristian König if (radeon_vm_block_size == -1) { 1178dfc230f9SChristian König 1179dfc230f9SChristian König /* Total bits covered by PD + PTs */ 11808e66e134SAlex Deucher unsigned bits = ilog2(radeon_vm_size) + 18; 1181dfc230f9SChristian König 1182dfc230f9SChristian König /* Make sure the PD is 4K in size up to 8GB address space. 1183dfc230f9SChristian König Above that split equal between PD and PTs */ 1184dfc230f9SChristian König if (radeon_vm_size <= 8) 1185dfc230f9SChristian König radeon_vm_block_size = bits - 9; 1186dfc230f9SChristian König else 1187dfc230f9SChristian König radeon_vm_block_size = (bits + 3) / 2; 1188dfc230f9SChristian König 1189dfc230f9SChristian König } else if (radeon_vm_block_size < 9) { 119020b2656dSChristian König dev_warn(rdev->dev, "VM page table size (%d) too small\n", 11914510fb98SChristian König radeon_vm_block_size); 11924510fb98SChristian König radeon_vm_block_size = 9; 11934510fb98SChristian König } 11944510fb98SChristian König 11954510fb98SChristian König if (radeon_vm_block_size > 24 || 119620b2656dSChristian König (radeon_vm_size * 1024) < (1ull << radeon_vm_block_size)) { 119720b2656dSChristian König dev_warn(rdev->dev, "VM page table size (%d) too large\n", 11984510fb98SChristian König radeon_vm_block_size); 11994510fb98SChristian König radeon_vm_block_size = 9; 12004510fb98SChristian König } 120136421338SJerome Glisse } 120236421338SJerome Glisse 12030c195119SAlex Deucher /** 12040c195119SAlex Deucher * radeon_switcheroo_set_state - set switcheroo state 12050c195119SAlex Deucher * 12060c195119SAlex Deucher * @pdev: pci dev pointer 12078e5de1d8SLukas Wunner * @state: vga_switcheroo state 12080c195119SAlex Deucher * 12090c195119SAlex Deucher * Callback for the switcheroo driver. Suspends or resumes the 12100c195119SAlex Deucher * the asics before or after it is powered up using ACPI methods. 12110c195119SAlex Deucher */ 12126a9ee8afSDave Airlie static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) 12136a9ee8afSDave Airlie { 12146a9ee8afSDave Airlie struct drm_device *dev = pci_get_drvdata(pdev); 121510ebc0bcSDave Airlie 121690c4cde9SAlex Deucher if (radeon_is_px(dev) && state == VGA_SWITCHEROO_OFF) 121710ebc0bcSDave Airlie return; 121810ebc0bcSDave Airlie 12196a9ee8afSDave Airlie if (state == VGA_SWITCHEROO_ON) { 12207ca85295SJoe Perches pr_info("radeon: switched on\n"); 12216a9ee8afSDave Airlie /* don't suspend or resume card normally */ 12225bcf719bSDave Airlie dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 1223d1f9809eSMaarten Lankhorst 122410ebc0bcSDave Airlie radeon_resume_kms(dev, true, true); 1225d1f9809eSMaarten Lankhorst 12265bcf719bSDave Airlie dev->switch_power_state = DRM_SWITCH_POWER_ON; 1227fbf81762SDave Airlie drm_kms_helper_poll_enable(dev); 12286a9ee8afSDave Airlie } else { 12297ca85295SJoe Perches pr_info("radeon: switched off\n"); 1230fbf81762SDave Airlie drm_kms_helper_poll_disable(dev); 12315bcf719bSDave Airlie dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 1232274ad65cSJérome Glisse radeon_suspend_kms(dev, true, true, false); 12335bcf719bSDave Airlie dev->switch_power_state = DRM_SWITCH_POWER_OFF; 12346a9ee8afSDave Airlie } 12356a9ee8afSDave Airlie } 12366a9ee8afSDave Airlie 12370c195119SAlex Deucher /** 12380c195119SAlex Deucher * radeon_switcheroo_can_switch - see if switcheroo state can change 12390c195119SAlex Deucher * 12400c195119SAlex Deucher * @pdev: pci dev pointer 12410c195119SAlex Deucher * 12420c195119SAlex Deucher * Callback for the switcheroo driver. Check of the switcheroo 12430c195119SAlex Deucher * state can be changed. 12440c195119SAlex Deucher * Returns true if the state can be changed, false if not. 12450c195119SAlex Deucher */ 12466a9ee8afSDave Airlie static bool radeon_switcheroo_can_switch(struct pci_dev *pdev) 12476a9ee8afSDave Airlie { 12486a9ee8afSDave Airlie struct drm_device *dev = pci_get_drvdata(pdev); 12496a9ee8afSDave Airlie 1250fc8fd40eSDaniel Vetter /* 1251fc8fd40eSDaniel Vetter * FIXME: open_count is protected by drm_global_mutex but that would lead to 1252fc8fd40eSDaniel Vetter * locking inversion with the driver load path. And the access here is 1253fc8fd40eSDaniel Vetter * completely racy anyway. So don't bother with locking for now. 1254fc8fd40eSDaniel Vetter */ 12557e13ad89SChris Wilson return atomic_read(&dev->open_count) == 0; 12566a9ee8afSDave Airlie } 12576a9ee8afSDave Airlie 125826ec685fSTakashi Iwai static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = { 125926ec685fSTakashi Iwai .set_gpu_state = radeon_switcheroo_set_state, 126026ec685fSTakashi Iwai .reprobe = NULL, 126126ec685fSTakashi Iwai .can_switch = radeon_switcheroo_can_switch, 126226ec685fSTakashi Iwai }; 12636a9ee8afSDave Airlie 12640c195119SAlex Deucher /** 12650c195119SAlex Deucher * radeon_device_init - initialize the driver 12660c195119SAlex Deucher * 12670c195119SAlex Deucher * @rdev: radeon_device pointer 1268f017853eSLee Jones * @ddev: drm dev pointer 12690c195119SAlex Deucher * @pdev: pci dev pointer 12700c195119SAlex Deucher * @flags: driver flags 12710c195119SAlex Deucher * 12720c195119SAlex Deucher * Initializes the driver info and hw (all asics). 12730c195119SAlex Deucher * Returns 0 for success or an error on failure. 12740c195119SAlex Deucher * Called at driver startup. 12750c195119SAlex Deucher */ 1276771fe6b9SJerome Glisse int radeon_device_init(struct radeon_device *rdev, 1277771fe6b9SJerome Glisse struct drm_device *ddev, 1278771fe6b9SJerome Glisse struct pci_dev *pdev, 1279771fe6b9SJerome Glisse uint32_t flags) 1280771fe6b9SJerome Glisse { 1281351a52a2SAlex Deucher int r, i; 1282ad49f501SDave Airlie int dma_bits; 128310ebc0bcSDave Airlie bool runtime = false; 1284771fe6b9SJerome Glisse 1285771fe6b9SJerome Glisse rdev->shutdown = false; 12869f022ddfSJerome Glisse rdev->dev = &pdev->dev; 1287771fe6b9SJerome Glisse rdev->ddev = ddev; 1288771fe6b9SJerome Glisse rdev->pdev = pdev; 1289771fe6b9SJerome Glisse rdev->flags = flags; 1290771fe6b9SJerome Glisse rdev->family = flags & RADEON_FAMILY_MASK; 1291771fe6b9SJerome Glisse rdev->is_atom_bios = false; 1292771fe6b9SJerome Glisse rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT; 1293edcd26e8SAlex Deucher rdev->mc.gtt_size = 512 * 1024 * 1024; 1294733289c2SJerome Glisse rdev->accel_working = false; 12958b25ed34SAlex Deucher /* set up ring ids */ 12968b25ed34SAlex Deucher for (i = 0; i < RADEON_NUM_RINGS; i++) { 12978b25ed34SAlex Deucher rdev->ring[i].idx = i; 12988b25ed34SAlex Deucher } 1299f54d1867SChris Wilson rdev->fence_context = dma_fence_context_alloc(RADEON_NUM_RINGS); 13001b5331d9SJerome Glisse 1301fe0d36e0SAlex Deucher DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n", 1302d522d9ccSThomas Reim radeon_family_name[rdev->family], pdev->vendor, pdev->device, 1303fe0d36e0SAlex Deucher pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision); 13041b5331d9SJerome Glisse 1305771fe6b9SJerome Glisse /* mutex initialization are all done here so we 1306771fe6b9SJerome Glisse * can recall function without having locking issues */ 1307d6999bc7SChristian König mutex_init(&rdev->ring_lock); 130840bacf16SAlex Deucher mutex_init(&rdev->dc_hw_i2c_mutex); 1309c20dc369SChristian Koenig atomic_set(&rdev->ih.lock, 0); 13104c788679SJerome Glisse mutex_init(&rdev->gem.mutex); 1311c913e23aSRafał Miłecki mutex_init(&rdev->pm.mutex); 13126759a0a7SMarek Olšák mutex_init(&rdev->gpu_clock_mutex); 1313f61d5b46SAlex Deucher mutex_init(&rdev->srbm_mutex); 1314db7fce39SChristian König init_rwsem(&rdev->pm.mclk_lock); 1315dee53e7fSJerome Glisse init_rwsem(&rdev->exclusive_lock); 131673a6d3fcSRafał Miłecki init_waitqueue_head(&rdev->irq.vblank_queue); 13171b9c3dd0SAlex Deucher r = radeon_gem_init(rdev); 13181b9c3dd0SAlex Deucher if (r) 13191b9c3dd0SAlex Deucher return r; 1320529364e0SChristian König 1321c1c44132SChristian König radeon_check_arguments(rdev); 132223d4f1f2SAlex Deucher /* Adjust VM size here. 1323c1c44132SChristian König * Max GPUVM size for cayman+ is 40 bits. 132423d4f1f2SAlex Deucher */ 132520b2656dSChristian König rdev->vm_manager.max_pfn = radeon_vm_size << 18; 1326771fe6b9SJerome Glisse 13274aac0473SJerome Glisse /* Set asic functions */ 13284aac0473SJerome Glisse r = radeon_asic_init(rdev); 132936421338SJerome Glisse if (r) 13304aac0473SJerome Glisse return r; 13314aac0473SJerome Glisse 1332f95df9caSAlex Deucher /* all of the newer IGP chips have an internal gart 1333f95df9caSAlex Deucher * However some rs4xx report as AGP, so remove that here. 1334f95df9caSAlex Deucher */ 1335f95df9caSAlex Deucher if ((rdev->family >= CHIP_RS400) && 1336f95df9caSAlex Deucher (rdev->flags & RADEON_IS_IGP)) { 1337f95df9caSAlex Deucher rdev->flags &= ~RADEON_IS_AGP; 1338f95df9caSAlex Deucher } 1339f95df9caSAlex Deucher 134030256a3fSJerome Glisse if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) { 1341b574f251SJerome Glisse radeon_agp_disable(rdev); 1342771fe6b9SJerome Glisse } 1343771fe6b9SJerome Glisse 13449ed8b1f9SAlex Deucher /* Set the internal MC address mask 13459ed8b1f9SAlex Deucher * This is the max address of the GPU's 13469ed8b1f9SAlex Deucher * internal address space. 13479ed8b1f9SAlex Deucher */ 13489ed8b1f9SAlex Deucher if (rdev->family >= CHIP_CAYMAN) 13499ed8b1f9SAlex Deucher rdev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */ 13509ed8b1f9SAlex Deucher else if (rdev->family >= CHIP_CEDAR) 13519ed8b1f9SAlex Deucher rdev->mc.mc_mask = 0xfffffffffULL; /* 36 bit MC */ 13529ed8b1f9SAlex Deucher else 13539ed8b1f9SAlex Deucher rdev->mc.mc_mask = 0xffffffffULL; /* 32 bit MC */ 13549ed8b1f9SAlex Deucher 135533b3ad37SChristoph Hellwig /* set DMA mask. 1356ad49f501SDave Airlie * PCIE - can handle 40-bits. 1357005a83f1SAlex Deucher * IGP - can handle 40-bits 1358ad49f501SDave Airlie * AGP - generally dma32 is safest 1359005a83f1SAlex Deucher * PCI - dma32 for legacy pci gart, 40 bits on newer asics 1360ad49f501SDave Airlie */ 136133b3ad37SChristoph Hellwig dma_bits = 40; 1362ad49f501SDave Airlie if (rdev->flags & RADEON_IS_AGP) 136333b3ad37SChristoph Hellwig dma_bits = 32; 1364005a83f1SAlex Deucher if ((rdev->flags & RADEON_IS_PCI) && 13654a2b6662SJerome Glisse (rdev->family <= CHIP_RS740)) 136633b3ad37SChristoph Hellwig dma_bits = 32; 1367bcb0b981SBen Crocker #ifdef CONFIG_PPC64 1368bcb0b981SBen Crocker if (rdev->family == CHIP_CEDAR) 136933b3ad37SChristoph Hellwig dma_bits = 32; 1370bcb0b981SBen Crocker #endif 1371ad49f501SDave Airlie 137203127c58SChristoph Hellwig r = dma_set_mask_and_coherent(&rdev->pdev->dev, DMA_BIT_MASK(dma_bits)); 1373771fe6b9SJerome Glisse if (r) { 13747ca85295SJoe Perches pr_warn("radeon: No suitable DMA available\n"); 137503127c58SChristoph Hellwig return r; 1376c52494f6SKonrad Rzeszutek Wilk } 1377913b2cb7SMichael D Labriola rdev->need_swiotlb = drm_need_swiotlb(dma_bits); 1378771fe6b9SJerome Glisse 1379771fe6b9SJerome Glisse /* Registers mapping */ 1380771fe6b9SJerome Glisse /* TODO: block userspace mapping of io register */ 13812c385151SDaniel Vetter spin_lock_init(&rdev->mmio_idx_lock); 1382fe78118cSAlex Deucher spin_lock_init(&rdev->smc_idx_lock); 13830a5b7b0bSAlex Deucher spin_lock_init(&rdev->pll_idx_lock); 13840a5b7b0bSAlex Deucher spin_lock_init(&rdev->mc_idx_lock); 13850a5b7b0bSAlex Deucher spin_lock_init(&rdev->pcie_idx_lock); 13860a5b7b0bSAlex Deucher spin_lock_init(&rdev->pciep_idx_lock); 13870a5b7b0bSAlex Deucher spin_lock_init(&rdev->pif_idx_lock); 13880a5b7b0bSAlex Deucher spin_lock_init(&rdev->cg_idx_lock); 13890a5b7b0bSAlex Deucher spin_lock_init(&rdev->uvd_idx_lock); 13900a5b7b0bSAlex Deucher spin_lock_init(&rdev->rcu_idx_lock); 13910a5b7b0bSAlex Deucher spin_lock_init(&rdev->didt_idx_lock); 13920a5b7b0bSAlex Deucher spin_lock_init(&rdev->end_idx_lock); 1393efad86dbSAlex Deucher if (rdev->family >= CHIP_BONAIRE) { 1394efad86dbSAlex Deucher rdev->rmmio_base = pci_resource_start(rdev->pdev, 5); 1395efad86dbSAlex Deucher rdev->rmmio_size = pci_resource_len(rdev->pdev, 5); 1396efad86dbSAlex Deucher } else { 139701d73a69SJordan Crouse rdev->rmmio_base = pci_resource_start(rdev->pdev, 2); 139801d73a69SJordan Crouse rdev->rmmio_size = pci_resource_len(rdev->pdev, 2); 1399efad86dbSAlex Deucher } 1400771fe6b9SJerome Glisse rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size); 1401a33c1a82SAndy Shevchenko if (rdev->rmmio == NULL) 1402771fe6b9SJerome Glisse return -ENOMEM; 1403771fe6b9SJerome Glisse 140475efdee1SAlex Deucher /* doorbell bar mapping */ 140575efdee1SAlex Deucher if (rdev->family >= CHIP_BONAIRE) 140675efdee1SAlex Deucher radeon_doorbell_init(rdev); 140775efdee1SAlex Deucher 1408351a52a2SAlex Deucher /* io port mapping */ 1409351a52a2SAlex Deucher for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 1410351a52a2SAlex Deucher if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) { 1411351a52a2SAlex Deucher rdev->rio_mem_size = pci_resource_len(rdev->pdev, i); 1412351a52a2SAlex Deucher rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size); 1413351a52a2SAlex Deucher break; 1414351a52a2SAlex Deucher } 1415351a52a2SAlex Deucher } 1416351a52a2SAlex Deucher if (rdev->rio_mem == NULL) 1417351a52a2SAlex Deucher DRM_ERROR("Unable to find PCI I/O BAR\n"); 1418351a52a2SAlex Deucher 14194807c5a8SAlex Deucher if (rdev->flags & RADEON_IS_PX) 14204807c5a8SAlex Deucher radeon_device_handle_px_quirks(rdev); 14214807c5a8SAlex Deucher 142228d52043SDave Airlie /* if we have > 1 VGA cards, then disable the radeon VGA resources */ 142393239ea1SDave Airlie /* this will fail for cards that aren't VGA class devices, just 142493239ea1SDave Airlie * ignore it */ 1425bf44e8ceSChristoph Hellwig vga_client_register(rdev->pdev, radeon_vga_set_decode); 142610ebc0bcSDave Airlie 1427bfaddd9fSAlex Deucher if (rdev->flags & RADEON_IS_PX) 142810ebc0bcSDave Airlie runtime = true; 14297ffb0ce3SLukas Wunner if (!pci_is_thunderbolt_attached(rdev->pdev)) 14307ffb0ce3SLukas Wunner vga_switcheroo_register_client(rdev->pdev, 14317ffb0ce3SLukas Wunner &radeon_switcheroo_ops, runtime); 143210ebc0bcSDave Airlie if (runtime) 143310ebc0bcSDave Airlie vga_switcheroo_init_domain_pm_ops(rdev->dev, &rdev->vga_pm_domain); 143428d52043SDave Airlie 14353ce0a23dSJerome Glisse r = radeon_init(rdev); 1436b574f251SJerome Glisse if (r) 14372e97140dSAlex Deucher goto failed; 1438b1e3a6d1SMichel Dänzer 14395b54d679SNirmoy Das radeon_gem_debugfs_init(rdev); 14405b54d679SNirmoy Das radeon_mst_debugfs_init(rdev); 14419843ead0SDave Airlie 1442b574f251SJerome Glisse if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) { 1443b574f251SJerome Glisse /* Acceleration not working on AGP card try again 1444b574f251SJerome Glisse * with fallback to PCI or PCIE GART 1445b574f251SJerome Glisse */ 1446a2d07b74SJerome Glisse radeon_asic_reset(rdev); 1447b574f251SJerome Glisse radeon_fini(rdev); 1448b574f251SJerome Glisse radeon_agp_disable(rdev); 1449b574f251SJerome Glisse r = radeon_init(rdev); 14504aac0473SJerome Glisse if (r) 14512e97140dSAlex Deucher goto failed; 14523ce0a23dSJerome Glisse } 14536c7bcceaSAlex Deucher 145413a7d299SChristian König r = radeon_ib_ring_tests(rdev); 145513a7d299SChristian König if (r) 145613a7d299SChristian König DRM_ERROR("ib ring test failed (%d).\n", r); 145713a7d299SChristian König 14586dfd1972SJérôme Glisse /* 14596dfd1972SJérôme Glisse * Turks/Thames GPU will freeze whole laptop if DPM is not restarted 14606dfd1972SJérôme Glisse * after the CP ring have chew one packet at least. Hence here we stop 14616dfd1972SJérôme Glisse * and restart DPM after the radeon_ib_ring_tests(). 14626dfd1972SJérôme Glisse */ 14636dfd1972SJérôme Glisse if (rdev->pm.dpm_enabled && 14646dfd1972SJérôme Glisse (rdev->pm.pm_method == PM_METHOD_DPM) && 14656dfd1972SJérôme Glisse (rdev->family == CHIP_TURKS) && 14666dfd1972SJérôme Glisse (rdev->flags & RADEON_IS_MOBILITY)) { 14676dfd1972SJérôme Glisse mutex_lock(&rdev->pm.mutex); 14686dfd1972SJérôme Glisse radeon_dpm_disable(rdev); 14696dfd1972SJérôme Glisse radeon_dpm_enable(rdev); 14706dfd1972SJérôme Glisse mutex_unlock(&rdev->pm.mutex); 14716dfd1972SJérôme Glisse } 14726dfd1972SJérôme Glisse 147360a7e396SChristian König if ((radeon_testing & 1)) { 14744a1132a0SAlex Deucher if (rdev->accel_working) 1475ecc0b326SMichel Dänzer radeon_test_moves(rdev); 14764a1132a0SAlex Deucher else 14774a1132a0SAlex Deucher DRM_INFO("radeon: acceleration disabled, skipping move tests\n"); 1478ecc0b326SMichel Dänzer } 147960a7e396SChristian König if ((radeon_testing & 2)) { 14804a1132a0SAlex Deucher if (rdev->accel_working) 148160a7e396SChristian König radeon_test_syncing(rdev); 14824a1132a0SAlex Deucher else 14834a1132a0SAlex Deucher DRM_INFO("radeon: acceleration disabled, skipping sync tests\n"); 148460a7e396SChristian König } 1485771fe6b9SJerome Glisse if (radeon_benchmarking) { 14864a1132a0SAlex Deucher if (rdev->accel_working) 1487638dd7dbSIlija Hadzic radeon_benchmark(rdev, radeon_benchmarking); 14884a1132a0SAlex Deucher else 14894a1132a0SAlex Deucher DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n"); 1490771fe6b9SJerome Glisse } 14916cf8a3f5SJerome Glisse return 0; 14922e97140dSAlex Deucher 14932e97140dSAlex Deucher failed: 1494b8751946SLukas Wunner /* balance pm_runtime_get_sync() in radeon_driver_unload_kms() */ 1495b8751946SLukas Wunner if (radeon_is_px(ddev)) 1496b8751946SLukas Wunner pm_runtime_put_noidle(ddev->dev); 14972e97140dSAlex Deucher if (runtime) 14982e97140dSAlex Deucher vga_switcheroo_fini_domain_pm_ops(rdev->dev); 14992e97140dSAlex Deucher return r; 1500771fe6b9SJerome Glisse } 1501771fe6b9SJerome Glisse 15020c195119SAlex Deucher /** 15030c195119SAlex Deucher * radeon_device_fini - tear down the driver 15040c195119SAlex Deucher * 15050c195119SAlex Deucher * @rdev: radeon_device pointer 15060c195119SAlex Deucher * 15070c195119SAlex Deucher * Tear down the driver info (all asics). 15080c195119SAlex Deucher * Called at driver shutdown. 15090c195119SAlex Deucher */ 1510771fe6b9SJerome Glisse void radeon_device_fini(struct radeon_device *rdev) 1511771fe6b9SJerome Glisse { 1512771fe6b9SJerome Glisse DRM_INFO("radeon: finishing device.\n"); 1513771fe6b9SJerome Glisse rdev->shutdown = true; 151490aca4d2SJerome Glisse /* evict vram memory */ 151590aca4d2SJerome Glisse radeon_bo_evict_vram(rdev); 15163ce0a23dSJerome Glisse radeon_fini(rdev); 15177ffb0ce3SLukas Wunner if (!pci_is_thunderbolt_attached(rdev->pdev)) 15186a9ee8afSDave Airlie vga_switcheroo_unregister_client(rdev->pdev); 15192e97140dSAlex Deucher if (rdev->flags & RADEON_IS_PX) 15202e97140dSAlex Deucher vga_switcheroo_fini_domain_pm_ops(rdev->dev); 1521b8779475SChristoph Hellwig vga_client_unregister(rdev->pdev); 1522e0a2ca73SAlex Deucher if (rdev->rio_mem) 1523351a52a2SAlex Deucher pci_iounmap(rdev->pdev, rdev->rio_mem); 1524351a52a2SAlex Deucher rdev->rio_mem = NULL; 1525771fe6b9SJerome Glisse iounmap(rdev->rmmio); 1526771fe6b9SJerome Glisse rdev->rmmio = NULL; 152775efdee1SAlex Deucher if (rdev->family >= CHIP_BONAIRE) 152875efdee1SAlex Deucher radeon_doorbell_fini(rdev); 1529771fe6b9SJerome Glisse } 1530771fe6b9SJerome Glisse 1531771fe6b9SJerome Glisse 1532771fe6b9SJerome Glisse /* 1533771fe6b9SJerome Glisse * Suspend & resume. 1534771fe6b9SJerome Glisse */ 1535f017853eSLee Jones /* 15360c195119SAlex Deucher * radeon_suspend_kms - initiate device suspend 15370c195119SAlex Deucher * 15380c195119SAlex Deucher * Puts the hw in the suspend state (all asics). 15390c195119SAlex Deucher * Returns 0 for success or an error on failure. 15400c195119SAlex Deucher * Called at driver suspend. 15410c195119SAlex Deucher */ 1542274ad65cSJérome Glisse int radeon_suspend_kms(struct drm_device *dev, bool suspend, 1543274ad65cSJérome Glisse bool fbcon, bool freeze) 1544771fe6b9SJerome Glisse { 1545875c1866SDarren Jenkins struct radeon_device *rdev; 1546d86a4126SThomas Zimmermann struct pci_dev *pdev; 1547771fe6b9SJerome Glisse struct drm_crtc *crtc; 1548d8dcaa1dSAlex Deucher struct drm_connector *connector; 15497465280cSAlex Deucher int i, r; 1550771fe6b9SJerome Glisse 1551875c1866SDarren Jenkins if (dev == NULL || dev->dev_private == NULL) { 1552771fe6b9SJerome Glisse return -ENODEV; 1553771fe6b9SJerome Glisse } 15547473e830SDave Airlie 1555875c1866SDarren Jenkins rdev = dev->dev_private; 1556d86a4126SThomas Zimmermann pdev = to_pci_dev(dev->dev); 1557875c1866SDarren Jenkins 1558f2aba352SAlex Deucher if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 15596a9ee8afSDave Airlie return 0; 1560d8dcaa1dSAlex Deucher 156186698c20SSeth Forshee drm_kms_helper_poll_disable(dev); 156286698c20SSeth Forshee 15636adaed5bSDaniel Vetter drm_modeset_lock_all(dev); 1564d8dcaa1dSAlex Deucher /* turn off display hw */ 1565d8dcaa1dSAlex Deucher list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 1566d8dcaa1dSAlex Deucher drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); 1567d8dcaa1dSAlex Deucher } 15686adaed5bSDaniel Vetter drm_modeset_unlock_all(dev); 1569d8dcaa1dSAlex Deucher 1570f3cbb17bSGrigori Goronzy /* unpin the front buffers and cursors */ 1571771fe6b9SJerome Glisse list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 1572f3cbb17bSGrigori Goronzy struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 15739a0f0c9dSDaniel Stone struct drm_framebuffer *fb = crtc->primary->fb; 15744c788679SJerome Glisse struct radeon_bo *robj; 1575771fe6b9SJerome Glisse 1576f3cbb17bSGrigori Goronzy if (radeon_crtc->cursor_bo) { 1577f3cbb17bSGrigori Goronzy struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo); 1578f3cbb17bSGrigori Goronzy r = radeon_bo_reserve(robj, false); 1579f3cbb17bSGrigori Goronzy if (r == 0) { 1580f3cbb17bSGrigori Goronzy radeon_bo_unpin(robj); 1581f3cbb17bSGrigori Goronzy radeon_bo_unreserve(robj); 1582f3cbb17bSGrigori Goronzy } 1583f3cbb17bSGrigori Goronzy } 1584f3cbb17bSGrigori Goronzy 15859a0f0c9dSDaniel Stone if (fb == NULL || fb->obj[0] == NULL) { 1586771fe6b9SJerome Glisse continue; 1587771fe6b9SJerome Glisse } 15889a0f0c9dSDaniel Stone robj = gem_to_radeon_bo(fb->obj[0]); 158938651674SDave Airlie /* don't unpin kernel fb objects */ 159038651674SDave Airlie if (!radeon_fbdev_robj_is_fb(rdev, robj)) { 15914c788679SJerome Glisse r = radeon_bo_reserve(robj, false); 159238651674SDave Airlie if (r == 0) { 15934c788679SJerome Glisse radeon_bo_unpin(robj); 15944c788679SJerome Glisse radeon_bo_unreserve(robj); 15954c788679SJerome Glisse } 1596771fe6b9SJerome Glisse } 1597771fe6b9SJerome Glisse } 1598771fe6b9SJerome Glisse /* evict vram memory */ 15994c788679SJerome Glisse radeon_bo_evict_vram(rdev); 16008a47cc9eSChristian König 1601771fe6b9SJerome Glisse /* wait for gpu to finish processing current batch */ 16025f8f635eSJerome Glisse for (i = 0; i < RADEON_NUM_RINGS; i++) { 160337615527SChristian König r = radeon_fence_wait_empty(rdev, i); 16045f8f635eSJerome Glisse if (r) { 16055f8f635eSJerome Glisse /* delay GPU reset to resume */ 1606eb98c709SChristian König radeon_fence_driver_force_completion(rdev, i); 16075f8f635eSJerome Glisse } 16085f8f635eSJerome Glisse } 1609771fe6b9SJerome Glisse 1610f657c2a7SYang Zhao radeon_save_bios_scratch_regs(rdev); 1611f657c2a7SYang Zhao 16123ce0a23dSJerome Glisse radeon_suspend(rdev); 1613d4877cf2SAlex Deucher radeon_hpd_fini(rdev); 1614ec9aaaffSAlex Deucher /* evict remaining vram memory 1615ec9aaaffSAlex Deucher * This second call to evict vram is to evict the gart page table 1616ec9aaaffSAlex Deucher * using the CPU. 1617ec9aaaffSAlex Deucher */ 16184c788679SJerome Glisse radeon_bo_evict_vram(rdev); 1619771fe6b9SJerome Glisse 162010b06122SJerome Glisse radeon_agp_suspend(rdev); 162110b06122SJerome Glisse 1622d86a4126SThomas Zimmermann pci_save_state(pdev); 162382060854SAlex Deucher if (freeze && rdev->family >= CHIP_CEDAR && !(rdev->flags & RADEON_IS_IGP)) { 1624274ad65cSJérome Glisse rdev->asic->asic_reset(rdev, true); 1625d86a4126SThomas Zimmermann pci_restore_state(pdev); 1626274ad65cSJérome Glisse } else if (suspend) { 1627771fe6b9SJerome Glisse /* Shut down the device */ 1628d86a4126SThomas Zimmermann pci_disable_device(pdev); 1629d86a4126SThomas Zimmermann pci_set_power_state(pdev, PCI_D3hot); 1630771fe6b9SJerome Glisse } 163110ebc0bcSDave Airlie 163210ebc0bcSDave Airlie if (fbcon) { 1633ac751efaSTorben Hohn console_lock(); 163438651674SDave Airlie radeon_fbdev_set_suspend(rdev, 1); 1635ac751efaSTorben Hohn console_unlock(); 163610ebc0bcSDave Airlie } 1637771fe6b9SJerome Glisse return 0; 1638771fe6b9SJerome Glisse } 1639771fe6b9SJerome Glisse 1640f017853eSLee Jones /* 16410c195119SAlex Deucher * radeon_resume_kms - initiate device resume 16420c195119SAlex Deucher * 16430c195119SAlex Deucher * Bring the hw back to operating state (all asics). 16440c195119SAlex Deucher * Returns 0 for success or an error on failure. 16450c195119SAlex Deucher * Called at driver resume. 16460c195119SAlex Deucher */ 164710ebc0bcSDave Airlie int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon) 1648771fe6b9SJerome Glisse { 164909bdf591SCedric Godin struct drm_connector *connector; 1650771fe6b9SJerome Glisse struct radeon_device *rdev = dev->dev_private; 1651d86a4126SThomas Zimmermann struct pci_dev *pdev = to_pci_dev(dev->dev); 1652f3cbb17bSGrigori Goronzy struct drm_crtc *crtc; 165304eb2206SChristian König int r; 1654771fe6b9SJerome Glisse 1655f2aba352SAlex Deucher if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 16566a9ee8afSDave Airlie return 0; 16576a9ee8afSDave Airlie 165810ebc0bcSDave Airlie if (fbcon) { 1659ac751efaSTorben Hohn console_lock(); 166010ebc0bcSDave Airlie } 16617473e830SDave Airlie if (resume) { 1662d86a4126SThomas Zimmermann pci_set_power_state(pdev, PCI_D0); 1663d86a4126SThomas Zimmermann pci_restore_state(pdev); 1664d86a4126SThomas Zimmermann if (pci_enable_device(pdev)) { 166510ebc0bcSDave Airlie if (fbcon) 1666ac751efaSTorben Hohn console_unlock(); 1667771fe6b9SJerome Glisse return -1; 1668771fe6b9SJerome Glisse } 16697473e830SDave Airlie } 16700ebf1717SDave Airlie /* resume AGP if in use */ 16710ebf1717SDave Airlie radeon_agp_resume(rdev); 16723ce0a23dSJerome Glisse radeon_resume(rdev); 167304eb2206SChristian König 167404eb2206SChristian König r = radeon_ib_ring_tests(rdev); 167504eb2206SChristian König if (r) 167604eb2206SChristian König DRM_ERROR("ib ring test failed (%d).\n", r); 167704eb2206SChristian König 1678bc6a6295SAlex Deucher if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { 16796c7bcceaSAlex Deucher /* do dpm late init */ 16806c7bcceaSAlex Deucher r = radeon_pm_late_init(rdev); 16816c7bcceaSAlex Deucher if (r) { 16826c7bcceaSAlex Deucher rdev->pm.dpm_enabled = false; 16836c7bcceaSAlex Deucher DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n"); 16846c7bcceaSAlex Deucher } 1685bc6a6295SAlex Deucher } else { 1686bc6a6295SAlex Deucher /* resume old pm late */ 1687bc6a6295SAlex Deucher radeon_pm_resume(rdev); 16886c7bcceaSAlex Deucher } 16896c7bcceaSAlex Deucher 1690f657c2a7SYang Zhao radeon_restore_bios_scratch_regs(rdev); 169109bdf591SCedric Godin 1692f3cbb17bSGrigori Goronzy /* pin cursors */ 1693f3cbb17bSGrigori Goronzy list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 1694f3cbb17bSGrigori Goronzy struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 1695f3cbb17bSGrigori Goronzy 1696f3cbb17bSGrigori Goronzy if (radeon_crtc->cursor_bo) { 1697f3cbb17bSGrigori Goronzy struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo); 1698f3cbb17bSGrigori Goronzy r = radeon_bo_reserve(robj, false); 1699f3cbb17bSGrigori Goronzy if (r == 0) { 1700f3cbb17bSGrigori Goronzy /* Only 27 bit offset for legacy cursor */ 1701f3cbb17bSGrigori Goronzy r = radeon_bo_pin_restricted(robj, 1702f3cbb17bSGrigori Goronzy RADEON_GEM_DOMAIN_VRAM, 1703f3cbb17bSGrigori Goronzy ASIC_IS_AVIVO(rdev) ? 1704f3cbb17bSGrigori Goronzy 0 : 1 << 27, 1705f3cbb17bSGrigori Goronzy &radeon_crtc->cursor_addr); 1706f3cbb17bSGrigori Goronzy if (r != 0) 1707f3cbb17bSGrigori Goronzy DRM_ERROR("Failed to pin cursor BO (%d)\n", r); 1708f3cbb17bSGrigori Goronzy radeon_bo_unreserve(robj); 1709f3cbb17bSGrigori Goronzy } 1710f3cbb17bSGrigori Goronzy } 1711f3cbb17bSGrigori Goronzy } 1712f3cbb17bSGrigori Goronzy 17133fa47d9eSAlex Deucher /* init dig PHYs, disp eng pll */ 17143fa47d9eSAlex Deucher if (rdev->is_atom_bios) { 1715ac89af1eSAlex Deucher radeon_atom_encoder_init(rdev); 1716f3f1f03eSAlex Deucher radeon_atom_disp_eng_pll_init(rdev); 1717bced76f2SAlex Deucher /* turn on the BL */ 1718bced76f2SAlex Deucher if (rdev->mode_info.bl_encoder) { 1719bced76f2SAlex Deucher u8 bl_level = radeon_get_backlight_level(rdev, 1720bced76f2SAlex Deucher rdev->mode_info.bl_encoder); 1721bced76f2SAlex Deucher radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder, 1722bced76f2SAlex Deucher bl_level); 1723bced76f2SAlex Deucher } 17243fa47d9eSAlex Deucher } 1725d4877cf2SAlex Deucher /* reset hpd state */ 1726d4877cf2SAlex Deucher radeon_hpd_init(rdev); 1727771fe6b9SJerome Glisse /* blat the mode back in */ 1728ec9954fcSDave Airlie if (fbcon) { 1729771fe6b9SJerome Glisse drm_helper_resume_force_mode(dev); 1730a93f344dSAlex Deucher /* turn on display hw */ 17316adaed5bSDaniel Vetter drm_modeset_lock_all(dev); 1732a93f344dSAlex Deucher list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 1733a93f344dSAlex Deucher drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); 1734a93f344dSAlex Deucher } 17356adaed5bSDaniel Vetter drm_modeset_unlock_all(dev); 1736ec9954fcSDave Airlie } 173786698c20SSeth Forshee 173886698c20SSeth Forshee drm_kms_helper_poll_enable(dev); 173918ee37a4SDaniel Vetter 17403640da2fSAlex Deucher /* set the power state here in case we are a PX system or headless */ 17413640da2fSAlex Deucher if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) 17423640da2fSAlex Deucher radeon_pm_compute_clocks(rdev); 17433640da2fSAlex Deucher 174418ee37a4SDaniel Vetter if (fbcon) { 174518ee37a4SDaniel Vetter radeon_fbdev_set_suspend(rdev, 0); 174618ee37a4SDaniel Vetter console_unlock(); 174718ee37a4SDaniel Vetter } 174818ee37a4SDaniel Vetter 1749771fe6b9SJerome Glisse return 0; 1750771fe6b9SJerome Glisse } 1751771fe6b9SJerome Glisse 17520c195119SAlex Deucher /** 17530c195119SAlex Deucher * radeon_gpu_reset - reset the asic 17540c195119SAlex Deucher * 17550c195119SAlex Deucher * @rdev: radeon device pointer 17560c195119SAlex Deucher * 17570c195119SAlex Deucher * Attempt the reset the GPU if it has hung (all asics). 17580c195119SAlex Deucher * Returns 0 for success or an error on failure. 17590c195119SAlex Deucher */ 176090aca4d2SJerome Glisse int radeon_gpu_reset(struct radeon_device *rdev) 176190aca4d2SJerome Glisse { 176255d7c221SChristian König unsigned ring_sizes[RADEON_NUM_RINGS]; 176355d7c221SChristian König uint32_t *ring_data[RADEON_NUM_RINGS]; 176455d7c221SChristian König 176555d7c221SChristian König bool saved = false; 176655d7c221SChristian König 176755d7c221SChristian König int i, r; 17688fd1b84cSDave Airlie int resched; 176990aca4d2SJerome Glisse 1770dee53e7fSJerome Glisse down_write(&rdev->exclusive_lock); 1771f9eaf9aeSChristian König 1772f9eaf9aeSChristian König if (!rdev->needs_reset) { 1773f9eaf9aeSChristian König up_write(&rdev->exclusive_lock); 1774f9eaf9aeSChristian König return 0; 1775f9eaf9aeSChristian König } 1776f9eaf9aeSChristian König 177772b9076bSMarek Olšák atomic_inc(&rdev->gpu_reset_counter); 177872b9076bSMarek Olšák 177990aca4d2SJerome Glisse radeon_save_bios_scratch_regs(rdev); 17808fd1b84cSDave Airlie /* block TTM */ 17818fd1b84cSDave Airlie resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); 178290aca4d2SJerome Glisse radeon_suspend(rdev); 178373ef0e0dSAlex Deucher radeon_hpd_fini(rdev); 178490aca4d2SJerome Glisse 178555d7c221SChristian König for (i = 0; i < RADEON_NUM_RINGS; ++i) { 178655d7c221SChristian König ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i], 178755d7c221SChristian König &ring_data[i]); 178855d7c221SChristian König if (ring_sizes[i]) { 178955d7c221SChristian König saved = true; 179055d7c221SChristian König dev_info(rdev->dev, "Saved %d dwords of commands " 179155d7c221SChristian König "on ring %d.\n", ring_sizes[i], i); 179255d7c221SChristian König } 179355d7c221SChristian König } 179455d7c221SChristian König 179590aca4d2SJerome Glisse r = radeon_asic_reset(rdev); 179690aca4d2SJerome Glisse if (!r) { 179755d7c221SChristian König dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n"); 179890aca4d2SJerome Glisse radeon_resume(rdev); 179955d7c221SChristian König } 180004eb2206SChristian König 180190aca4d2SJerome Glisse radeon_restore_bios_scratch_regs(rdev); 180255d7c221SChristian König 180355d7c221SChristian König for (i = 0; i < RADEON_NUM_RINGS; ++i) { 18049bb39ff4SMaarten Lankhorst if (!r && ring_data[i]) { 180555d7c221SChristian König radeon_ring_restore(rdev, &rdev->ring[i], 180655d7c221SChristian König ring_sizes[i], ring_data[i]); 180755d7c221SChristian König } else { 1808eb98c709SChristian König radeon_fence_driver_force_completion(rdev, i); 180955d7c221SChristian König kfree(ring_data[i]); 181055d7c221SChristian König } 181155d7c221SChristian König } 181255d7c221SChristian König 1813c940b447SAlex Deucher if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { 1814c940b447SAlex Deucher /* do dpm late init */ 1815c940b447SAlex Deucher r = radeon_pm_late_init(rdev); 1816c940b447SAlex Deucher if (r) { 1817c940b447SAlex Deucher rdev->pm.dpm_enabled = false; 1818c940b447SAlex Deucher DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n"); 1819c940b447SAlex Deucher } 1820c940b447SAlex Deucher } else { 1821c940b447SAlex Deucher /* resume old pm late */ 182295f59509SAlex Deucher radeon_pm_resume(rdev); 1823c940b447SAlex Deucher } 1824c940b447SAlex Deucher 182573ef0e0dSAlex Deucher /* init dig PHYs, disp eng pll */ 182673ef0e0dSAlex Deucher if (rdev->is_atom_bios) { 182773ef0e0dSAlex Deucher radeon_atom_encoder_init(rdev); 182873ef0e0dSAlex Deucher radeon_atom_disp_eng_pll_init(rdev); 182973ef0e0dSAlex Deucher /* turn on the BL */ 183073ef0e0dSAlex Deucher if (rdev->mode_info.bl_encoder) { 183173ef0e0dSAlex Deucher u8 bl_level = radeon_get_backlight_level(rdev, 183273ef0e0dSAlex Deucher rdev->mode_info.bl_encoder); 183373ef0e0dSAlex Deucher radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder, 183473ef0e0dSAlex Deucher bl_level); 183573ef0e0dSAlex Deucher } 183673ef0e0dSAlex Deucher } 183773ef0e0dSAlex Deucher /* reset hpd state */ 183873ef0e0dSAlex Deucher radeon_hpd_init(rdev); 183973ef0e0dSAlex Deucher 18409bb39ff4SMaarten Lankhorst ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); 18413c036389SChristian König 18423c036389SChristian König rdev->in_reset = true; 18433c036389SChristian König rdev->needs_reset = false; 18443c036389SChristian König 18459bb39ff4SMaarten Lankhorst downgrade_write(&rdev->exclusive_lock); 18469bb39ff4SMaarten Lankhorst 1847d3493574SJerome Glisse drm_helper_resume_force_mode(rdev->ddev); 1848d3493574SJerome Glisse 1849c940b447SAlex Deucher /* set the power state here in case we are a PX system or headless */ 1850c940b447SAlex Deucher if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) 1851c940b447SAlex Deucher radeon_pm_compute_clocks(rdev); 1852c940b447SAlex Deucher 18539bb39ff4SMaarten Lankhorst if (!r) { 18549bb39ff4SMaarten Lankhorst r = radeon_ib_ring_tests(rdev); 18559bb39ff4SMaarten Lankhorst if (r && saved) 18569bb39ff4SMaarten Lankhorst r = -EAGAIN; 18579bb39ff4SMaarten Lankhorst } else { 185890aca4d2SJerome Glisse /* bad news, how to tell it to userspace ? */ 185990aca4d2SJerome Glisse dev_info(rdev->dev, "GPU reset failed\n"); 18607a1619b9SMichel Dänzer } 18617a1619b9SMichel Dänzer 18629bb39ff4SMaarten Lankhorst rdev->needs_reset = r == -EAGAIN; 18639bb39ff4SMaarten Lankhorst rdev->in_reset = false; 18649bb39ff4SMaarten Lankhorst 18659bb39ff4SMaarten Lankhorst up_read(&rdev->exclusive_lock); 186690aca4d2SJerome Glisse return r; 186790aca4d2SJerome Glisse } 1868