1771fe6b9SJerome Glisse /* 2771fe6b9SJerome Glisse * Copyright 2008 Advanced Micro Devices, Inc. 3771fe6b9SJerome Glisse * Copyright 2008 Red Hat Inc. 4771fe6b9SJerome Glisse * Copyright 2009 Jerome Glisse. 5771fe6b9SJerome Glisse * 6771fe6b9SJerome Glisse * Permission is hereby granted, free of charge, to any person obtaining a 7771fe6b9SJerome Glisse * copy of this software and associated documentation files (the "Software"), 8771fe6b9SJerome Glisse * to deal in the Software without restriction, including without limitation 9771fe6b9SJerome Glisse * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10771fe6b9SJerome Glisse * and/or sell copies of the Software, and to permit persons to whom the 11771fe6b9SJerome Glisse * Software is furnished to do so, subject to the following conditions: 12771fe6b9SJerome Glisse * 13771fe6b9SJerome Glisse * The above copyright notice and this permission notice shall be included in 14771fe6b9SJerome Glisse * all copies or substantial portions of the Software. 15771fe6b9SJerome Glisse * 16771fe6b9SJerome Glisse * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17771fe6b9SJerome Glisse * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18771fe6b9SJerome Glisse * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19771fe6b9SJerome Glisse * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20771fe6b9SJerome Glisse * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21771fe6b9SJerome Glisse * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22771fe6b9SJerome Glisse * OTHER DEALINGS IN THE SOFTWARE. 23771fe6b9SJerome Glisse * 24771fe6b9SJerome Glisse * Authors: Dave Airlie 25771fe6b9SJerome Glisse * Alex Deucher 26771fe6b9SJerome Glisse * Jerome Glisse 27771fe6b9SJerome Glisse */ 28771fe6b9SJerome Glisse #include <linux/console.h> 29771fe6b9SJerome Glisse #include <drm/drmP.h> 30771fe6b9SJerome Glisse #include <drm/drm_crtc_helper.h> 31771fe6b9SJerome Glisse #include <drm/radeon_drm.h> 3228d52043SDave Airlie #include <linux/vgaarb.h> 336a9ee8afSDave Airlie #include <linux/vga_switcheroo.h> 34771fe6b9SJerome Glisse #include "radeon_reg.h" 35771fe6b9SJerome Glisse #include "radeon.h" 36771fe6b9SJerome Glisse #include "atom.h" 37771fe6b9SJerome Glisse 38*1b5331d9SJerome Glisse static const char radeon_family_name[][16] = { 39*1b5331d9SJerome Glisse "R100", 40*1b5331d9SJerome Glisse "RV100", 41*1b5331d9SJerome Glisse "RS100", 42*1b5331d9SJerome Glisse "RV200", 43*1b5331d9SJerome Glisse "RS200", 44*1b5331d9SJerome Glisse "R200", 45*1b5331d9SJerome Glisse "RV250", 46*1b5331d9SJerome Glisse "RS300", 47*1b5331d9SJerome Glisse "RV280", 48*1b5331d9SJerome Glisse "R300", 49*1b5331d9SJerome Glisse "R350", 50*1b5331d9SJerome Glisse "RV350", 51*1b5331d9SJerome Glisse "RV380", 52*1b5331d9SJerome Glisse "R420", 53*1b5331d9SJerome Glisse "R423", 54*1b5331d9SJerome Glisse "RV410", 55*1b5331d9SJerome Glisse "RS400", 56*1b5331d9SJerome Glisse "RS480", 57*1b5331d9SJerome Glisse "RS600", 58*1b5331d9SJerome Glisse "RS690", 59*1b5331d9SJerome Glisse "RS740", 60*1b5331d9SJerome Glisse "RV515", 61*1b5331d9SJerome Glisse "R520", 62*1b5331d9SJerome Glisse "RV530", 63*1b5331d9SJerome Glisse "RV560", 64*1b5331d9SJerome Glisse "RV570", 65*1b5331d9SJerome Glisse "R580", 66*1b5331d9SJerome Glisse "R600", 67*1b5331d9SJerome Glisse "RV610", 68*1b5331d9SJerome Glisse "RV630", 69*1b5331d9SJerome Glisse "RV670", 70*1b5331d9SJerome Glisse "RV620", 71*1b5331d9SJerome Glisse "RV635", 72*1b5331d9SJerome Glisse "RS780", 73*1b5331d9SJerome Glisse "RS880", 74*1b5331d9SJerome Glisse "RV770", 75*1b5331d9SJerome Glisse "RV730", 76*1b5331d9SJerome Glisse "RV710", 77*1b5331d9SJerome Glisse "RV740", 78*1b5331d9SJerome Glisse "CEDAR", 79*1b5331d9SJerome Glisse "REDWOOD", 80*1b5331d9SJerome Glisse "JUNIPER", 81*1b5331d9SJerome Glisse "CYPRESS", 82*1b5331d9SJerome Glisse "HEMLOCK", 83*1b5331d9SJerome Glisse "LAST", 84*1b5331d9SJerome Glisse }; 85*1b5331d9SJerome Glisse 86771fe6b9SJerome Glisse /* 87b1e3a6d1SMichel Dänzer * Clear GPU surface registers. 88b1e3a6d1SMichel Dänzer */ 893ce0a23dSJerome Glisse void radeon_surface_init(struct radeon_device *rdev) 90b1e3a6d1SMichel Dänzer { 91b1e3a6d1SMichel Dänzer /* FIXME: check this out */ 92b1e3a6d1SMichel Dänzer if (rdev->family < CHIP_R600) { 93b1e3a6d1SMichel Dänzer int i; 94b1e3a6d1SMichel Dänzer 95550e2d92SDave Airlie for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) { 96550e2d92SDave Airlie if (rdev->surface_regs[i].bo) 97550e2d92SDave Airlie radeon_bo_get_surface_reg(rdev->surface_regs[i].bo); 98550e2d92SDave Airlie else 99550e2d92SDave Airlie radeon_clear_surface_reg(rdev, i); 100b1e3a6d1SMichel Dänzer } 101e024e110SDave Airlie /* enable surfaces */ 102e024e110SDave Airlie WREG32(RADEON_SURFACE_CNTL, 0); 103b1e3a6d1SMichel Dänzer } 104b1e3a6d1SMichel Dänzer } 105b1e3a6d1SMichel Dänzer 106b1e3a6d1SMichel Dänzer /* 107771fe6b9SJerome Glisse * GPU scratch registers helpers function. 108771fe6b9SJerome Glisse */ 1093ce0a23dSJerome Glisse void radeon_scratch_init(struct radeon_device *rdev) 110771fe6b9SJerome Glisse { 111771fe6b9SJerome Glisse int i; 112771fe6b9SJerome Glisse 113771fe6b9SJerome Glisse /* FIXME: check this out */ 114771fe6b9SJerome Glisse if (rdev->family < CHIP_R300) { 115771fe6b9SJerome Glisse rdev->scratch.num_reg = 5; 116771fe6b9SJerome Glisse } else { 117771fe6b9SJerome Glisse rdev->scratch.num_reg = 7; 118771fe6b9SJerome Glisse } 119771fe6b9SJerome Glisse for (i = 0; i < rdev->scratch.num_reg; i++) { 120771fe6b9SJerome Glisse rdev->scratch.free[i] = true; 121771fe6b9SJerome Glisse rdev->scratch.reg[i] = RADEON_SCRATCH_REG0 + (i * 4); 122771fe6b9SJerome Glisse } 123771fe6b9SJerome Glisse } 124771fe6b9SJerome Glisse 125771fe6b9SJerome Glisse int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg) 126771fe6b9SJerome Glisse { 127771fe6b9SJerome Glisse int i; 128771fe6b9SJerome Glisse 129771fe6b9SJerome Glisse for (i = 0; i < rdev->scratch.num_reg; i++) { 130771fe6b9SJerome Glisse if (rdev->scratch.free[i]) { 131771fe6b9SJerome Glisse rdev->scratch.free[i] = false; 132771fe6b9SJerome Glisse *reg = rdev->scratch.reg[i]; 133771fe6b9SJerome Glisse return 0; 134771fe6b9SJerome Glisse } 135771fe6b9SJerome Glisse } 136771fe6b9SJerome Glisse return -EINVAL; 137771fe6b9SJerome Glisse } 138771fe6b9SJerome Glisse 139771fe6b9SJerome Glisse void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg) 140771fe6b9SJerome Glisse { 141771fe6b9SJerome Glisse int i; 142771fe6b9SJerome Glisse 143771fe6b9SJerome Glisse for (i = 0; i < rdev->scratch.num_reg; i++) { 144771fe6b9SJerome Glisse if (rdev->scratch.reg[i] == reg) { 145771fe6b9SJerome Glisse rdev->scratch.free[i] = true; 146771fe6b9SJerome Glisse return; 147771fe6b9SJerome Glisse } 148771fe6b9SJerome Glisse } 149771fe6b9SJerome Glisse } 150771fe6b9SJerome Glisse 151d594e46aSJerome Glisse /** 152d594e46aSJerome Glisse * radeon_vram_location - try to find VRAM location 153d594e46aSJerome Glisse * @rdev: radeon device structure holding all necessary informations 154d594e46aSJerome Glisse * @mc: memory controller structure holding memory informations 155d594e46aSJerome Glisse * @base: base address at which to put VRAM 156d594e46aSJerome Glisse * 157d594e46aSJerome Glisse * Function will place try to place VRAM at base address provided 158d594e46aSJerome Glisse * as parameter (which is so far either PCI aperture address or 159d594e46aSJerome Glisse * for IGP TOM base address). 160d594e46aSJerome Glisse * 161d594e46aSJerome Glisse * If there is not enough space to fit the unvisible VRAM in the 32bits 162d594e46aSJerome Glisse * address space then we limit the VRAM size to the aperture. 163d594e46aSJerome Glisse * 164d594e46aSJerome Glisse * If we are using AGP and if the AGP aperture doesn't allow us to have 165d594e46aSJerome Glisse * room for all the VRAM than we restrict the VRAM to the PCI aperture 166d594e46aSJerome Glisse * size and print a warning. 167d594e46aSJerome Glisse * 168d594e46aSJerome Glisse * This function will never fails, worst case are limiting VRAM. 169d594e46aSJerome Glisse * 170d594e46aSJerome Glisse * Note: GTT start, end, size should be initialized before calling this 171d594e46aSJerome Glisse * function on AGP platform. 172d594e46aSJerome Glisse * 173d594e46aSJerome Glisse * Note: We don't explictly enforce VRAM start to be aligned on VRAM size, 174d594e46aSJerome Glisse * this shouldn't be a problem as we are using the PCI aperture as a reference. 175d594e46aSJerome Glisse * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but 176d594e46aSJerome Glisse * not IGP. 177d594e46aSJerome Glisse * 178d594e46aSJerome Glisse * Note: we use mc_vram_size as on some board we need to program the mc to 179d594e46aSJerome Glisse * cover the whole aperture even if VRAM size is inferior to aperture size 180d594e46aSJerome Glisse * Novell bug 204882 + along with lots of ubuntu ones 181d594e46aSJerome Glisse * 182d594e46aSJerome Glisse * Note: when limiting vram it's safe to overwritte real_vram_size because 183d594e46aSJerome Glisse * we are not in case where real_vram_size is inferior to mc_vram_size (ie 184d594e46aSJerome Glisse * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu 185d594e46aSJerome Glisse * ones) 186d594e46aSJerome Glisse * 187d594e46aSJerome Glisse * Note: IGP TOM addr should be the same as the aperture addr, we don't 188d594e46aSJerome Glisse * explicitly check for that thought. 189d594e46aSJerome Glisse * 190d594e46aSJerome Glisse * FIXME: when reducing VRAM size align new size on power of 2. 191771fe6b9SJerome Glisse */ 192d594e46aSJerome Glisse void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base) 193771fe6b9SJerome Glisse { 194d594e46aSJerome Glisse mc->vram_start = base; 195d594e46aSJerome Glisse if (mc->mc_vram_size > (0xFFFFFFFF - base + 1)) { 196d594e46aSJerome Glisse dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n"); 197d594e46aSJerome Glisse mc->real_vram_size = mc->aper_size; 198d594e46aSJerome Glisse mc->mc_vram_size = mc->aper_size; 199771fe6b9SJerome Glisse } 200d594e46aSJerome Glisse mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; 201d594e46aSJerome Glisse if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_end <= mc->gtt_end) { 202d594e46aSJerome Glisse dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n"); 203d594e46aSJerome Glisse mc->real_vram_size = mc->aper_size; 204d594e46aSJerome Glisse mc->mc_vram_size = mc->aper_size; 205771fe6b9SJerome Glisse } 206d594e46aSJerome Glisse mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; 207d594e46aSJerome Glisse dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n", 208d594e46aSJerome Glisse mc->mc_vram_size >> 20, mc->vram_start, 209d594e46aSJerome Glisse mc->vram_end, mc->real_vram_size >> 20); 210771fe6b9SJerome Glisse } 211771fe6b9SJerome Glisse 212d594e46aSJerome Glisse /** 213d594e46aSJerome Glisse * radeon_gtt_location - try to find GTT location 214d594e46aSJerome Glisse * @rdev: radeon device structure holding all necessary informations 215d594e46aSJerome Glisse * @mc: memory controller structure holding memory informations 216d594e46aSJerome Glisse * 217d594e46aSJerome Glisse * Function will place try to place GTT before or after VRAM. 218d594e46aSJerome Glisse * 219d594e46aSJerome Glisse * If GTT size is bigger than space left then we ajust GTT size. 220d594e46aSJerome Glisse * Thus function will never fails. 221d594e46aSJerome Glisse * 222d594e46aSJerome Glisse * FIXME: when reducing GTT size align new size on power of 2. 223d594e46aSJerome Glisse */ 224d594e46aSJerome Glisse void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) 225d594e46aSJerome Glisse { 226d594e46aSJerome Glisse u64 size_af, size_bf; 227d594e46aSJerome Glisse 228d594e46aSJerome Glisse size_af = 0xFFFFFFFF - mc->vram_end; 229d594e46aSJerome Glisse size_bf = mc->vram_start; 230d594e46aSJerome Glisse if (size_bf > size_af) { 231d594e46aSJerome Glisse if (mc->gtt_size > size_bf) { 232d594e46aSJerome Glisse dev_warn(rdev->dev, "limiting GTT\n"); 233d594e46aSJerome Glisse mc->gtt_size = size_bf; 234d594e46aSJerome Glisse } 235d594e46aSJerome Glisse mc->gtt_start = mc->vram_start - mc->gtt_size; 236d594e46aSJerome Glisse } else { 237d594e46aSJerome Glisse if (mc->gtt_size > size_af) { 238d594e46aSJerome Glisse dev_warn(rdev->dev, "limiting GTT\n"); 239d594e46aSJerome Glisse mc->gtt_size = size_af; 240d594e46aSJerome Glisse } 241d594e46aSJerome Glisse mc->gtt_start = mc->vram_end + 1; 242d594e46aSJerome Glisse } 243d594e46aSJerome Glisse mc->gtt_end = mc->gtt_start + mc->gtt_size - 1; 244d594e46aSJerome Glisse dev_info(rdev->dev, "GTT: %lluM 0x%08llX - 0x%08llX\n", 245d594e46aSJerome Glisse mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end); 246d594e46aSJerome Glisse } 247771fe6b9SJerome Glisse 248771fe6b9SJerome Glisse /* 249771fe6b9SJerome Glisse * GPU helpers function. 250771fe6b9SJerome Glisse */ 2519f022ddfSJerome Glisse bool radeon_card_posted(struct radeon_device *rdev) 252771fe6b9SJerome Glisse { 253771fe6b9SJerome Glisse uint32_t reg; 254771fe6b9SJerome Glisse 255771fe6b9SJerome Glisse /* first check CRTCs */ 256bcc1c2a1SAlex Deucher if (ASIC_IS_DCE4(rdev)) { 257bcc1c2a1SAlex Deucher reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | 258bcc1c2a1SAlex Deucher RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) | 259bcc1c2a1SAlex Deucher RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) | 260bcc1c2a1SAlex Deucher RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) | 261bcc1c2a1SAlex Deucher RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) | 262bcc1c2a1SAlex Deucher RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET); 263bcc1c2a1SAlex Deucher if (reg & EVERGREEN_CRTC_MASTER_EN) 264bcc1c2a1SAlex Deucher return true; 265bcc1c2a1SAlex Deucher } else if (ASIC_IS_AVIVO(rdev)) { 266771fe6b9SJerome Glisse reg = RREG32(AVIVO_D1CRTC_CONTROL) | 267771fe6b9SJerome Glisse RREG32(AVIVO_D2CRTC_CONTROL); 268771fe6b9SJerome Glisse if (reg & AVIVO_CRTC_EN) { 269771fe6b9SJerome Glisse return true; 270771fe6b9SJerome Glisse } 271771fe6b9SJerome Glisse } else { 272771fe6b9SJerome Glisse reg = RREG32(RADEON_CRTC_GEN_CNTL) | 273771fe6b9SJerome Glisse RREG32(RADEON_CRTC2_GEN_CNTL); 274771fe6b9SJerome Glisse if (reg & RADEON_CRTC_EN) { 275771fe6b9SJerome Glisse return true; 276771fe6b9SJerome Glisse } 277771fe6b9SJerome Glisse } 278771fe6b9SJerome Glisse 279771fe6b9SJerome Glisse /* then check MEM_SIZE, in case the crtcs are off */ 280771fe6b9SJerome Glisse if (rdev->family >= CHIP_R600) 281771fe6b9SJerome Glisse reg = RREG32(R600_CONFIG_MEMSIZE); 282771fe6b9SJerome Glisse else 283771fe6b9SJerome Glisse reg = RREG32(RADEON_CONFIG_MEMSIZE); 284771fe6b9SJerome Glisse 285771fe6b9SJerome Glisse if (reg) 286771fe6b9SJerome Glisse return true; 287771fe6b9SJerome Glisse 288771fe6b9SJerome Glisse return false; 289771fe6b9SJerome Glisse 290771fe6b9SJerome Glisse } 291771fe6b9SJerome Glisse 292f47299c5SAlex Deucher void radeon_update_bandwidth_info(struct radeon_device *rdev) 293f47299c5SAlex Deucher { 294f47299c5SAlex Deucher fixed20_12 a; 295f47299c5SAlex Deucher u32 sclk, mclk; 296f47299c5SAlex Deucher 297f47299c5SAlex Deucher if (rdev->flags & RADEON_IS_IGP) { 298f47299c5SAlex Deucher sclk = radeon_get_engine_clock(rdev); 299f47299c5SAlex Deucher mclk = rdev->clock.default_mclk; 300f47299c5SAlex Deucher 301f47299c5SAlex Deucher a.full = rfixed_const(100); 302f47299c5SAlex Deucher rdev->pm.sclk.full = rfixed_const(sclk); 303f47299c5SAlex Deucher rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); 304f47299c5SAlex Deucher rdev->pm.mclk.full = rfixed_const(mclk); 305f47299c5SAlex Deucher rdev->pm.mclk.full = rfixed_div(rdev->pm.mclk, a); 306f47299c5SAlex Deucher 307f47299c5SAlex Deucher a.full = rfixed_const(16); 308f47299c5SAlex Deucher /* core_bandwidth = sclk(Mhz) * 16 */ 309f47299c5SAlex Deucher rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a); 310f47299c5SAlex Deucher } else { 311f47299c5SAlex Deucher sclk = radeon_get_engine_clock(rdev); 312f47299c5SAlex Deucher mclk = radeon_get_memory_clock(rdev); 313f47299c5SAlex Deucher 314f47299c5SAlex Deucher a.full = rfixed_const(100); 315f47299c5SAlex Deucher rdev->pm.sclk.full = rfixed_const(sclk); 316f47299c5SAlex Deucher rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); 317f47299c5SAlex Deucher rdev->pm.mclk.full = rfixed_const(mclk); 318f47299c5SAlex Deucher rdev->pm.mclk.full = rfixed_div(rdev->pm.mclk, a); 319f47299c5SAlex Deucher } 320f47299c5SAlex Deucher } 321f47299c5SAlex Deucher 32272542d77SDave Airlie bool radeon_boot_test_post_card(struct radeon_device *rdev) 32372542d77SDave Airlie { 32472542d77SDave Airlie if (radeon_card_posted(rdev)) 32572542d77SDave Airlie return true; 32672542d77SDave Airlie 32772542d77SDave Airlie if (rdev->bios) { 32872542d77SDave Airlie DRM_INFO("GPU not posted. posting now...\n"); 32972542d77SDave Airlie if (rdev->is_atom_bios) 33072542d77SDave Airlie atom_asic_init(rdev->mode_info.atom_context); 33172542d77SDave Airlie else 33272542d77SDave Airlie radeon_combios_asic_init(rdev->ddev); 33372542d77SDave Airlie return true; 33472542d77SDave Airlie } else { 33572542d77SDave Airlie dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); 33672542d77SDave Airlie return false; 33772542d77SDave Airlie } 33872542d77SDave Airlie } 33972542d77SDave Airlie 3403ce0a23dSJerome Glisse int radeon_dummy_page_init(struct radeon_device *rdev) 3413ce0a23dSJerome Glisse { 34282568565SDave Airlie if (rdev->dummy_page.page) 34382568565SDave Airlie return 0; 3443ce0a23dSJerome Glisse rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO); 3453ce0a23dSJerome Glisse if (rdev->dummy_page.page == NULL) 3463ce0a23dSJerome Glisse return -ENOMEM; 3473ce0a23dSJerome Glisse rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page, 3483ce0a23dSJerome Glisse 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 3493ce0a23dSJerome Glisse if (!rdev->dummy_page.addr) { 3503ce0a23dSJerome Glisse __free_page(rdev->dummy_page.page); 3513ce0a23dSJerome Glisse rdev->dummy_page.page = NULL; 3523ce0a23dSJerome Glisse return -ENOMEM; 3533ce0a23dSJerome Glisse } 3543ce0a23dSJerome Glisse return 0; 3553ce0a23dSJerome Glisse } 3563ce0a23dSJerome Glisse 3573ce0a23dSJerome Glisse void radeon_dummy_page_fini(struct radeon_device *rdev) 3583ce0a23dSJerome Glisse { 3593ce0a23dSJerome Glisse if (rdev->dummy_page.page == NULL) 3603ce0a23dSJerome Glisse return; 3613ce0a23dSJerome Glisse pci_unmap_page(rdev->pdev, rdev->dummy_page.addr, 3623ce0a23dSJerome Glisse PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 3633ce0a23dSJerome Glisse __free_page(rdev->dummy_page.page); 3643ce0a23dSJerome Glisse rdev->dummy_page.page = NULL; 3653ce0a23dSJerome Glisse } 3663ce0a23dSJerome Glisse 367771fe6b9SJerome Glisse 368771fe6b9SJerome Glisse /* ATOM accessor methods */ 369771fe6b9SJerome Glisse static uint32_t cail_pll_read(struct card_info *info, uint32_t reg) 370771fe6b9SJerome Glisse { 371771fe6b9SJerome Glisse struct radeon_device *rdev = info->dev->dev_private; 372771fe6b9SJerome Glisse uint32_t r; 373771fe6b9SJerome Glisse 374771fe6b9SJerome Glisse r = rdev->pll_rreg(rdev, reg); 375771fe6b9SJerome Glisse return r; 376771fe6b9SJerome Glisse } 377771fe6b9SJerome Glisse 378771fe6b9SJerome Glisse static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val) 379771fe6b9SJerome Glisse { 380771fe6b9SJerome Glisse struct radeon_device *rdev = info->dev->dev_private; 381771fe6b9SJerome Glisse 382771fe6b9SJerome Glisse rdev->pll_wreg(rdev, reg, val); 383771fe6b9SJerome Glisse } 384771fe6b9SJerome Glisse 385771fe6b9SJerome Glisse static uint32_t cail_mc_read(struct card_info *info, uint32_t reg) 386771fe6b9SJerome Glisse { 387771fe6b9SJerome Glisse struct radeon_device *rdev = info->dev->dev_private; 388771fe6b9SJerome Glisse uint32_t r; 389771fe6b9SJerome Glisse 390771fe6b9SJerome Glisse r = rdev->mc_rreg(rdev, reg); 391771fe6b9SJerome Glisse return r; 392771fe6b9SJerome Glisse } 393771fe6b9SJerome Glisse 394771fe6b9SJerome Glisse static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val) 395771fe6b9SJerome Glisse { 396771fe6b9SJerome Glisse struct radeon_device *rdev = info->dev->dev_private; 397771fe6b9SJerome Glisse 398771fe6b9SJerome Glisse rdev->mc_wreg(rdev, reg, val); 399771fe6b9SJerome Glisse } 400771fe6b9SJerome Glisse 401771fe6b9SJerome Glisse static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val) 402771fe6b9SJerome Glisse { 403771fe6b9SJerome Glisse struct radeon_device *rdev = info->dev->dev_private; 404771fe6b9SJerome Glisse 405771fe6b9SJerome Glisse WREG32(reg*4, val); 406771fe6b9SJerome Glisse } 407771fe6b9SJerome Glisse 408771fe6b9SJerome Glisse static uint32_t cail_reg_read(struct card_info *info, uint32_t reg) 409771fe6b9SJerome Glisse { 410771fe6b9SJerome Glisse struct radeon_device *rdev = info->dev->dev_private; 411771fe6b9SJerome Glisse uint32_t r; 412771fe6b9SJerome Glisse 413771fe6b9SJerome Glisse r = RREG32(reg*4); 414771fe6b9SJerome Glisse return r; 415771fe6b9SJerome Glisse } 416771fe6b9SJerome Glisse 417771fe6b9SJerome Glisse int radeon_atombios_init(struct radeon_device *rdev) 418771fe6b9SJerome Glisse { 41961c4b24bSMathias Fröhlich struct card_info *atom_card_info = 42061c4b24bSMathias Fröhlich kzalloc(sizeof(struct card_info), GFP_KERNEL); 42161c4b24bSMathias Fröhlich 42261c4b24bSMathias Fröhlich if (!atom_card_info) 42361c4b24bSMathias Fröhlich return -ENOMEM; 42461c4b24bSMathias Fröhlich 42561c4b24bSMathias Fröhlich rdev->mode_info.atom_card_info = atom_card_info; 42661c4b24bSMathias Fröhlich atom_card_info->dev = rdev->ddev; 42761c4b24bSMathias Fröhlich atom_card_info->reg_read = cail_reg_read; 42861c4b24bSMathias Fröhlich atom_card_info->reg_write = cail_reg_write; 42961c4b24bSMathias Fröhlich atom_card_info->mc_read = cail_mc_read; 43061c4b24bSMathias Fröhlich atom_card_info->mc_write = cail_mc_write; 43161c4b24bSMathias Fröhlich atom_card_info->pll_read = cail_pll_read; 43261c4b24bSMathias Fröhlich atom_card_info->pll_write = cail_pll_write; 43361c4b24bSMathias Fröhlich 43461c4b24bSMathias Fröhlich rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios); 435c31ad97fSRafał Miłecki mutex_init(&rdev->mode_info.atom_context->mutex); 436771fe6b9SJerome Glisse radeon_atom_initialize_bios_scratch_regs(rdev->ddev); 437d904ef9bSDave Airlie atom_allocate_fb_scratch(rdev->mode_info.atom_context); 438771fe6b9SJerome Glisse return 0; 439771fe6b9SJerome Glisse } 440771fe6b9SJerome Glisse 441771fe6b9SJerome Glisse void radeon_atombios_fini(struct radeon_device *rdev) 442771fe6b9SJerome Glisse { 4434a04a844SJerome Glisse if (rdev->mode_info.atom_context) { 444d904ef9bSDave Airlie kfree(rdev->mode_info.atom_context->scratch); 445771fe6b9SJerome Glisse kfree(rdev->mode_info.atom_context); 4464a04a844SJerome Glisse } 44761c4b24bSMathias Fröhlich kfree(rdev->mode_info.atom_card_info); 448771fe6b9SJerome Glisse } 449771fe6b9SJerome Glisse 450771fe6b9SJerome Glisse int radeon_combios_init(struct radeon_device *rdev) 451771fe6b9SJerome Glisse { 452771fe6b9SJerome Glisse radeon_combios_initialize_bios_scratch_regs(rdev->ddev); 453771fe6b9SJerome Glisse return 0; 454771fe6b9SJerome Glisse } 455771fe6b9SJerome Glisse 456771fe6b9SJerome Glisse void radeon_combios_fini(struct radeon_device *rdev) 457771fe6b9SJerome Glisse { 458771fe6b9SJerome Glisse } 459771fe6b9SJerome Glisse 46028d52043SDave Airlie /* if we get transitioned to only one device, tak VGA back */ 46128d52043SDave Airlie static unsigned int radeon_vga_set_decode(void *cookie, bool state) 46228d52043SDave Airlie { 46328d52043SDave Airlie struct radeon_device *rdev = cookie; 46428d52043SDave Airlie radeon_vga_set_state(rdev, state); 46528d52043SDave Airlie if (state) 46628d52043SDave Airlie return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | 46728d52043SDave Airlie VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 46828d52043SDave Airlie else 46928d52043SDave Airlie return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 47028d52043SDave Airlie } 471c1176d6fSDave Airlie 47236421338SJerome Glisse void radeon_check_arguments(struct radeon_device *rdev) 47336421338SJerome Glisse { 47436421338SJerome Glisse /* vramlimit must be a power of two */ 47536421338SJerome Glisse switch (radeon_vram_limit) { 47636421338SJerome Glisse case 0: 47736421338SJerome Glisse case 4: 47836421338SJerome Glisse case 8: 47936421338SJerome Glisse case 16: 48036421338SJerome Glisse case 32: 48136421338SJerome Glisse case 64: 48236421338SJerome Glisse case 128: 48336421338SJerome Glisse case 256: 48436421338SJerome Glisse case 512: 48536421338SJerome Glisse case 1024: 48636421338SJerome Glisse case 2048: 48736421338SJerome Glisse case 4096: 48836421338SJerome Glisse break; 48936421338SJerome Glisse default: 49036421338SJerome Glisse dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n", 49136421338SJerome Glisse radeon_vram_limit); 49236421338SJerome Glisse radeon_vram_limit = 0; 49336421338SJerome Glisse break; 49436421338SJerome Glisse } 49536421338SJerome Glisse radeon_vram_limit = radeon_vram_limit << 20; 49636421338SJerome Glisse /* gtt size must be power of two and greater or equal to 32M */ 49736421338SJerome Glisse switch (radeon_gart_size) { 49836421338SJerome Glisse case 4: 49936421338SJerome Glisse case 8: 50036421338SJerome Glisse case 16: 50136421338SJerome Glisse dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n", 50236421338SJerome Glisse radeon_gart_size); 50336421338SJerome Glisse radeon_gart_size = 512; 50436421338SJerome Glisse break; 50536421338SJerome Glisse case 32: 50636421338SJerome Glisse case 64: 50736421338SJerome Glisse case 128: 50836421338SJerome Glisse case 256: 50936421338SJerome Glisse case 512: 51036421338SJerome Glisse case 1024: 51136421338SJerome Glisse case 2048: 51236421338SJerome Glisse case 4096: 51336421338SJerome Glisse break; 51436421338SJerome Glisse default: 51536421338SJerome Glisse dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n", 51636421338SJerome Glisse radeon_gart_size); 51736421338SJerome Glisse radeon_gart_size = 512; 51836421338SJerome Glisse break; 51936421338SJerome Glisse } 52036421338SJerome Glisse rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; 52136421338SJerome Glisse /* AGP mode can only be -1, 1, 2, 4, 8 */ 52236421338SJerome Glisse switch (radeon_agpmode) { 52336421338SJerome Glisse case -1: 52436421338SJerome Glisse case 0: 52536421338SJerome Glisse case 1: 52636421338SJerome Glisse case 2: 52736421338SJerome Glisse case 4: 52836421338SJerome Glisse case 8: 52936421338SJerome Glisse break; 53036421338SJerome Glisse default: 53136421338SJerome Glisse dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: " 53236421338SJerome Glisse "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode); 53336421338SJerome Glisse radeon_agpmode = 0; 53436421338SJerome Glisse break; 53536421338SJerome Glisse } 53636421338SJerome Glisse } 53736421338SJerome Glisse 5386a9ee8afSDave Airlie static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) 5396a9ee8afSDave Airlie { 5406a9ee8afSDave Airlie struct drm_device *dev = pci_get_drvdata(pdev); 5416a9ee8afSDave Airlie struct radeon_device *rdev = dev->dev_private; 5426a9ee8afSDave Airlie pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; 5436a9ee8afSDave Airlie if (state == VGA_SWITCHEROO_ON) { 5446a9ee8afSDave Airlie printk(KERN_INFO "radeon: switched on\n"); 5456a9ee8afSDave Airlie /* don't suspend or resume card normally */ 5466a9ee8afSDave Airlie rdev->powered_down = false; 5476a9ee8afSDave Airlie radeon_resume_kms(dev); 5486a9ee8afSDave Airlie } else { 5496a9ee8afSDave Airlie printk(KERN_INFO "radeon: switched off\n"); 5506a9ee8afSDave Airlie radeon_suspend_kms(dev, pmm); 5516a9ee8afSDave Airlie /* don't suspend or resume card normally */ 5526a9ee8afSDave Airlie rdev->powered_down = true; 5536a9ee8afSDave Airlie } 5546a9ee8afSDave Airlie } 5556a9ee8afSDave Airlie 5566a9ee8afSDave Airlie static bool radeon_switcheroo_can_switch(struct pci_dev *pdev) 5576a9ee8afSDave Airlie { 5586a9ee8afSDave Airlie struct drm_device *dev = pci_get_drvdata(pdev); 5596a9ee8afSDave Airlie bool can_switch; 5606a9ee8afSDave Airlie 5616a9ee8afSDave Airlie spin_lock(&dev->count_lock); 5626a9ee8afSDave Airlie can_switch = (dev->open_count == 0); 5636a9ee8afSDave Airlie spin_unlock(&dev->count_lock); 5646a9ee8afSDave Airlie return can_switch; 5656a9ee8afSDave Airlie } 5666a9ee8afSDave Airlie 5676a9ee8afSDave Airlie 568771fe6b9SJerome Glisse int radeon_device_init(struct radeon_device *rdev, 569771fe6b9SJerome Glisse struct drm_device *ddev, 570771fe6b9SJerome Glisse struct pci_dev *pdev, 571771fe6b9SJerome Glisse uint32_t flags) 572771fe6b9SJerome Glisse { 5736cf8a3f5SJerome Glisse int r; 574ad49f501SDave Airlie int dma_bits; 575771fe6b9SJerome Glisse 576771fe6b9SJerome Glisse rdev->shutdown = false; 5779f022ddfSJerome Glisse rdev->dev = &pdev->dev; 578771fe6b9SJerome Glisse rdev->ddev = ddev; 579771fe6b9SJerome Glisse rdev->pdev = pdev; 580771fe6b9SJerome Glisse rdev->flags = flags; 581771fe6b9SJerome Glisse rdev->family = flags & RADEON_FAMILY_MASK; 582771fe6b9SJerome Glisse rdev->is_atom_bios = false; 583771fe6b9SJerome Glisse rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT; 584771fe6b9SJerome Glisse rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; 585771fe6b9SJerome Glisse rdev->gpu_lockup = false; 586733289c2SJerome Glisse rdev->accel_working = false; 587*1b5331d9SJerome Glisse 588*1b5331d9SJerome Glisse DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X).\n", 589*1b5331d9SJerome Glisse radeon_family_name[rdev->family], pdev->vendor, pdev->device); 590*1b5331d9SJerome Glisse 591771fe6b9SJerome Glisse /* mutex initialization are all done here so we 592771fe6b9SJerome Glisse * can recall function without having locking issues */ 593771fe6b9SJerome Glisse mutex_init(&rdev->cs_mutex); 594771fe6b9SJerome Glisse mutex_init(&rdev->ib_pool.mutex); 595771fe6b9SJerome Glisse mutex_init(&rdev->cp.mutex); 59640bacf16SAlex Deucher mutex_init(&rdev->dc_hw_i2c_mutex); 597d8f60cfcSAlex Deucher if (rdev->family >= CHIP_R600) 598d8f60cfcSAlex Deucher spin_lock_init(&rdev->ih.lock); 5994c788679SJerome Glisse mutex_init(&rdev->gem.mutex); 600c913e23aSRafał Miłecki mutex_init(&rdev->pm.mutex); 601771fe6b9SJerome Glisse rwlock_init(&rdev->fence_drv.lock); 6029f022ddfSJerome Glisse INIT_LIST_HEAD(&rdev->gem.objects); 60373a6d3fcSRafał Miłecki init_waitqueue_head(&rdev->irq.vblank_queue); 604771fe6b9SJerome Glisse 605d4877cf2SAlex Deucher /* setup workqueue */ 606d4877cf2SAlex Deucher rdev->wq = create_workqueue("radeon"); 607d4877cf2SAlex Deucher if (rdev->wq == NULL) 608d4877cf2SAlex Deucher return -ENOMEM; 609d4877cf2SAlex Deucher 6104aac0473SJerome Glisse /* Set asic functions */ 6114aac0473SJerome Glisse r = radeon_asic_init(rdev); 61236421338SJerome Glisse if (r) 6134aac0473SJerome Glisse return r; 61436421338SJerome Glisse radeon_check_arguments(rdev); 6154aac0473SJerome Glisse 616f95df9caSAlex Deucher /* all of the newer IGP chips have an internal gart 617f95df9caSAlex Deucher * However some rs4xx report as AGP, so remove that here. 618f95df9caSAlex Deucher */ 619f95df9caSAlex Deucher if ((rdev->family >= CHIP_RS400) && 620f95df9caSAlex Deucher (rdev->flags & RADEON_IS_IGP)) { 621f95df9caSAlex Deucher rdev->flags &= ~RADEON_IS_AGP; 622f95df9caSAlex Deucher } 623f95df9caSAlex Deucher 62430256a3fSJerome Glisse if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) { 625b574f251SJerome Glisse radeon_agp_disable(rdev); 626771fe6b9SJerome Glisse } 627771fe6b9SJerome Glisse 628ad49f501SDave Airlie /* set DMA mask + need_dma32 flags. 629ad49f501SDave Airlie * PCIE - can handle 40-bits. 630ad49f501SDave Airlie * IGP - can handle 40-bits (in theory) 631ad49f501SDave Airlie * AGP - generally dma32 is safest 632ad49f501SDave Airlie * PCI - only dma32 633ad49f501SDave Airlie */ 634ad49f501SDave Airlie rdev->need_dma32 = false; 635ad49f501SDave Airlie if (rdev->flags & RADEON_IS_AGP) 636ad49f501SDave Airlie rdev->need_dma32 = true; 637ad49f501SDave Airlie if (rdev->flags & RADEON_IS_PCI) 638ad49f501SDave Airlie rdev->need_dma32 = true; 639ad49f501SDave Airlie 640ad49f501SDave Airlie dma_bits = rdev->need_dma32 ? 32 : 40; 641ad49f501SDave Airlie r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits)); 642771fe6b9SJerome Glisse if (r) { 643771fe6b9SJerome Glisse printk(KERN_WARNING "radeon: No suitable DMA available.\n"); 644771fe6b9SJerome Glisse } 645771fe6b9SJerome Glisse 646771fe6b9SJerome Glisse /* Registers mapping */ 647771fe6b9SJerome Glisse /* TODO: block userspace mapping of io register */ 648771fe6b9SJerome Glisse rdev->rmmio_base = drm_get_resource_start(rdev->ddev, 2); 649771fe6b9SJerome Glisse rdev->rmmio_size = drm_get_resource_len(rdev->ddev, 2); 650771fe6b9SJerome Glisse rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size); 651771fe6b9SJerome Glisse if (rdev->rmmio == NULL) { 652771fe6b9SJerome Glisse return -ENOMEM; 653771fe6b9SJerome Glisse } 654771fe6b9SJerome Glisse DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base); 655771fe6b9SJerome Glisse DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size); 656771fe6b9SJerome Glisse 65728d52043SDave Airlie /* if we have > 1 VGA cards, then disable the radeon VGA resources */ 65893239ea1SDave Airlie /* this will fail for cards that aren't VGA class devices, just 65993239ea1SDave Airlie * ignore it */ 66093239ea1SDave Airlie vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode); 6616a9ee8afSDave Airlie vga_switcheroo_register_client(rdev->pdev, 6626a9ee8afSDave Airlie radeon_switcheroo_set_state, 6636a9ee8afSDave Airlie radeon_switcheroo_can_switch); 66428d52043SDave Airlie 6653ce0a23dSJerome Glisse r = radeon_init(rdev); 666b574f251SJerome Glisse if (r) 667b574f251SJerome Glisse return r; 668b1e3a6d1SMichel Dänzer 669b574f251SJerome Glisse if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) { 670b574f251SJerome Glisse /* Acceleration not working on AGP card try again 671b574f251SJerome Glisse * with fallback to PCI or PCIE GART 672b574f251SJerome Glisse */ 6731a029b76SJerome Glisse radeon_gpu_reset(rdev); 674b574f251SJerome Glisse radeon_fini(rdev); 675b574f251SJerome Glisse radeon_agp_disable(rdev); 676b574f251SJerome Glisse r = radeon_init(rdev); 6774aac0473SJerome Glisse if (r) 6784aac0473SJerome Glisse return r; 6793ce0a23dSJerome Glisse } 680ecc0b326SMichel Dänzer if (radeon_testing) { 681ecc0b326SMichel Dänzer radeon_test_moves(rdev); 682ecc0b326SMichel Dänzer } 683771fe6b9SJerome Glisse if (radeon_benchmarking) { 684771fe6b9SJerome Glisse radeon_benchmark(rdev); 685771fe6b9SJerome Glisse } 6866cf8a3f5SJerome Glisse return 0; 687771fe6b9SJerome Glisse } 688771fe6b9SJerome Glisse 689771fe6b9SJerome Glisse void radeon_device_fini(struct radeon_device *rdev) 690771fe6b9SJerome Glisse { 691771fe6b9SJerome Glisse DRM_INFO("radeon: finishing device.\n"); 692771fe6b9SJerome Glisse rdev->shutdown = true; 6933ce0a23dSJerome Glisse radeon_fini(rdev); 694d4877cf2SAlex Deucher destroy_workqueue(rdev->wq); 6956a9ee8afSDave Airlie vga_switcheroo_unregister_client(rdev->pdev); 696c1176d6fSDave Airlie vga_client_register(rdev->pdev, NULL, NULL, NULL); 697771fe6b9SJerome Glisse iounmap(rdev->rmmio); 698771fe6b9SJerome Glisse rdev->rmmio = NULL; 699771fe6b9SJerome Glisse } 700771fe6b9SJerome Glisse 701771fe6b9SJerome Glisse 702771fe6b9SJerome Glisse /* 703771fe6b9SJerome Glisse * Suspend & resume. 704771fe6b9SJerome Glisse */ 705771fe6b9SJerome Glisse int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) 706771fe6b9SJerome Glisse { 707875c1866SDarren Jenkins struct radeon_device *rdev; 708771fe6b9SJerome Glisse struct drm_crtc *crtc; 7094c788679SJerome Glisse int r; 710771fe6b9SJerome Glisse 711875c1866SDarren Jenkins if (dev == NULL || dev->dev_private == NULL) { 712771fe6b9SJerome Glisse return -ENODEV; 713771fe6b9SJerome Glisse } 714771fe6b9SJerome Glisse if (state.event == PM_EVENT_PRETHAW) { 715771fe6b9SJerome Glisse return 0; 716771fe6b9SJerome Glisse } 717875c1866SDarren Jenkins rdev = dev->dev_private; 718875c1866SDarren Jenkins 7196a9ee8afSDave Airlie if (rdev->powered_down) 7206a9ee8afSDave Airlie return 0; 721771fe6b9SJerome Glisse /* unpin the front buffers */ 722771fe6b9SJerome Glisse list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 723771fe6b9SJerome Glisse struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb); 7244c788679SJerome Glisse struct radeon_bo *robj; 725771fe6b9SJerome Glisse 726771fe6b9SJerome Glisse if (rfb == NULL || rfb->obj == NULL) { 727771fe6b9SJerome Glisse continue; 728771fe6b9SJerome Glisse } 729771fe6b9SJerome Glisse robj = rfb->obj->driver_private; 7304c788679SJerome Glisse if (robj != rdev->fbdev_rbo) { 7314c788679SJerome Glisse r = radeon_bo_reserve(robj, false); 7324c788679SJerome Glisse if (unlikely(r == 0)) { 7334c788679SJerome Glisse radeon_bo_unpin(robj); 7344c788679SJerome Glisse radeon_bo_unreserve(robj); 7354c788679SJerome Glisse } 736771fe6b9SJerome Glisse } 737771fe6b9SJerome Glisse } 738771fe6b9SJerome Glisse /* evict vram memory */ 7394c788679SJerome Glisse radeon_bo_evict_vram(rdev); 740771fe6b9SJerome Glisse /* wait for gpu to finish processing current batch */ 741771fe6b9SJerome Glisse radeon_fence_wait_last(rdev); 742771fe6b9SJerome Glisse 743f657c2a7SYang Zhao radeon_save_bios_scratch_regs(rdev); 744f657c2a7SYang Zhao 7453ce0a23dSJerome Glisse radeon_suspend(rdev); 746d4877cf2SAlex Deucher radeon_hpd_fini(rdev); 747771fe6b9SJerome Glisse /* evict remaining vram memory */ 7484c788679SJerome Glisse radeon_bo_evict_vram(rdev); 749771fe6b9SJerome Glisse 750771fe6b9SJerome Glisse pci_save_state(dev->pdev); 751771fe6b9SJerome Glisse if (state.event == PM_EVENT_SUSPEND) { 752771fe6b9SJerome Glisse /* Shut down the device */ 753771fe6b9SJerome Glisse pci_disable_device(dev->pdev); 754771fe6b9SJerome Glisse pci_set_power_state(dev->pdev, PCI_D3hot); 755771fe6b9SJerome Glisse } 756771fe6b9SJerome Glisse acquire_console_sem(); 757771fe6b9SJerome Glisse fb_set_suspend(rdev->fbdev_info, 1); 758771fe6b9SJerome Glisse release_console_sem(); 759771fe6b9SJerome Glisse return 0; 760771fe6b9SJerome Glisse } 761771fe6b9SJerome Glisse 762771fe6b9SJerome Glisse int radeon_resume_kms(struct drm_device *dev) 763771fe6b9SJerome Glisse { 764771fe6b9SJerome Glisse struct radeon_device *rdev = dev->dev_private; 765771fe6b9SJerome Glisse 7666a9ee8afSDave Airlie if (rdev->powered_down) 7676a9ee8afSDave Airlie return 0; 7686a9ee8afSDave Airlie 769771fe6b9SJerome Glisse acquire_console_sem(); 770771fe6b9SJerome Glisse pci_set_power_state(dev->pdev, PCI_D0); 771771fe6b9SJerome Glisse pci_restore_state(dev->pdev); 772771fe6b9SJerome Glisse if (pci_enable_device(dev->pdev)) { 773771fe6b9SJerome Glisse release_console_sem(); 774771fe6b9SJerome Glisse return -1; 775771fe6b9SJerome Glisse } 776771fe6b9SJerome Glisse pci_set_master(dev->pdev); 7770ebf1717SDave Airlie /* resume AGP if in use */ 7780ebf1717SDave Airlie radeon_agp_resume(rdev); 7793ce0a23dSJerome Glisse radeon_resume(rdev); 780f657c2a7SYang Zhao radeon_restore_bios_scratch_regs(rdev); 781771fe6b9SJerome Glisse fb_set_suspend(rdev->fbdev_info, 0); 782771fe6b9SJerome Glisse release_console_sem(); 783771fe6b9SJerome Glisse 784d4877cf2SAlex Deucher /* reset hpd state */ 785d4877cf2SAlex Deucher radeon_hpd_init(rdev); 786771fe6b9SJerome Glisse /* blat the mode back in */ 787771fe6b9SJerome Glisse drm_helper_resume_force_mode(dev); 788771fe6b9SJerome Glisse return 0; 789771fe6b9SJerome Glisse } 790771fe6b9SJerome Glisse 791771fe6b9SJerome Glisse 792771fe6b9SJerome Glisse /* 793771fe6b9SJerome Glisse * Debugfs 794771fe6b9SJerome Glisse */ 795771fe6b9SJerome Glisse struct radeon_debugfs { 796771fe6b9SJerome Glisse struct drm_info_list *files; 797771fe6b9SJerome Glisse unsigned num_files; 798771fe6b9SJerome Glisse }; 799771fe6b9SJerome Glisse static struct radeon_debugfs _radeon_debugfs[RADEON_DEBUGFS_MAX_NUM_FILES]; 800771fe6b9SJerome Glisse static unsigned _radeon_debugfs_count = 0; 801771fe6b9SJerome Glisse 802771fe6b9SJerome Glisse int radeon_debugfs_add_files(struct radeon_device *rdev, 803771fe6b9SJerome Glisse struct drm_info_list *files, 804771fe6b9SJerome Glisse unsigned nfiles) 805771fe6b9SJerome Glisse { 806771fe6b9SJerome Glisse unsigned i; 807771fe6b9SJerome Glisse 808771fe6b9SJerome Glisse for (i = 0; i < _radeon_debugfs_count; i++) { 809771fe6b9SJerome Glisse if (_radeon_debugfs[i].files == files) { 810771fe6b9SJerome Glisse /* Already registered */ 811771fe6b9SJerome Glisse return 0; 812771fe6b9SJerome Glisse } 813771fe6b9SJerome Glisse } 814771fe6b9SJerome Glisse if ((_radeon_debugfs_count + nfiles) > RADEON_DEBUGFS_MAX_NUM_FILES) { 815771fe6b9SJerome Glisse DRM_ERROR("Reached maximum number of debugfs files.\n"); 816771fe6b9SJerome Glisse DRM_ERROR("Report so we increase RADEON_DEBUGFS_MAX_NUM_FILES.\n"); 817771fe6b9SJerome Glisse return -EINVAL; 818771fe6b9SJerome Glisse } 819771fe6b9SJerome Glisse _radeon_debugfs[_radeon_debugfs_count].files = files; 820771fe6b9SJerome Glisse _radeon_debugfs[_radeon_debugfs_count].num_files = nfiles; 821771fe6b9SJerome Glisse _radeon_debugfs_count++; 822771fe6b9SJerome Glisse #if defined(CONFIG_DEBUG_FS) 823771fe6b9SJerome Glisse drm_debugfs_create_files(files, nfiles, 824771fe6b9SJerome Glisse rdev->ddev->control->debugfs_root, 825771fe6b9SJerome Glisse rdev->ddev->control); 826771fe6b9SJerome Glisse drm_debugfs_create_files(files, nfiles, 827771fe6b9SJerome Glisse rdev->ddev->primary->debugfs_root, 828771fe6b9SJerome Glisse rdev->ddev->primary); 829771fe6b9SJerome Glisse #endif 830771fe6b9SJerome Glisse return 0; 831771fe6b9SJerome Glisse } 832771fe6b9SJerome Glisse 833771fe6b9SJerome Glisse #if defined(CONFIG_DEBUG_FS) 834771fe6b9SJerome Glisse int radeon_debugfs_init(struct drm_minor *minor) 835771fe6b9SJerome Glisse { 836771fe6b9SJerome Glisse return 0; 837771fe6b9SJerome Glisse } 838771fe6b9SJerome Glisse 839771fe6b9SJerome Glisse void radeon_debugfs_cleanup(struct drm_minor *minor) 840771fe6b9SJerome Glisse { 841771fe6b9SJerome Glisse unsigned i; 842771fe6b9SJerome Glisse 843771fe6b9SJerome Glisse for (i = 0; i < _radeon_debugfs_count; i++) { 844771fe6b9SJerome Glisse drm_debugfs_remove_files(_radeon_debugfs[i].files, 845771fe6b9SJerome Glisse _radeon_debugfs[i].num_files, minor); 846771fe6b9SJerome Glisse } 847771fe6b9SJerome Glisse } 848771fe6b9SJerome Glisse #endif 849