1771fe6b9SJerome Glisse /* 2771fe6b9SJerome Glisse * Copyright 2008 Advanced Micro Devices, Inc. 3771fe6b9SJerome Glisse * Copyright 2008 Red Hat Inc. 4771fe6b9SJerome Glisse * Copyright 2009 Jerome Glisse. 5771fe6b9SJerome Glisse * 6771fe6b9SJerome Glisse * Permission is hereby granted, free of charge, to any person obtaining a 7771fe6b9SJerome Glisse * copy of this software and associated documentation files (the "Software"), 8771fe6b9SJerome Glisse * to deal in the Software without restriction, including without limitation 9771fe6b9SJerome Glisse * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10771fe6b9SJerome Glisse * and/or sell copies of the Software, and to permit persons to whom the 11771fe6b9SJerome Glisse * Software is furnished to do so, subject to the following conditions: 12771fe6b9SJerome Glisse * 13771fe6b9SJerome Glisse * The above copyright notice and this permission notice shall be included in 14771fe6b9SJerome Glisse * all copies or substantial portions of the Software. 15771fe6b9SJerome Glisse * 16771fe6b9SJerome Glisse * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17771fe6b9SJerome Glisse * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18771fe6b9SJerome Glisse * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19771fe6b9SJerome Glisse * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20771fe6b9SJerome Glisse * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21771fe6b9SJerome Glisse * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22771fe6b9SJerome Glisse * OTHER DEALINGS IN THE SOFTWARE. 23771fe6b9SJerome Glisse * 24771fe6b9SJerome Glisse * Authors: Dave Airlie 25771fe6b9SJerome Glisse * Alex Deucher 26771fe6b9SJerome Glisse * Jerome Glisse 27771fe6b9SJerome Glisse */ 28771fe6b9SJerome Glisse #include <linux/console.h> 295a0e3ad6STejun Heo #include <linux/slab.h> 30771fe6b9SJerome Glisse #include <drm/drmP.h> 31771fe6b9SJerome Glisse #include <drm/drm_crtc_helper.h> 32771fe6b9SJerome Glisse #include <drm/radeon_drm.h> 3328d52043SDave Airlie #include <linux/vgaarb.h> 346a9ee8afSDave Airlie #include <linux/vga_switcheroo.h> 35771fe6b9SJerome Glisse #include "radeon_reg.h" 36771fe6b9SJerome Glisse #include "radeon.h" 37771fe6b9SJerome Glisse #include "atom.h" 38771fe6b9SJerome Glisse 391b5331d9SJerome Glisse static const char radeon_family_name[][16] = { 401b5331d9SJerome Glisse "R100", 411b5331d9SJerome Glisse "RV100", 421b5331d9SJerome Glisse "RS100", 431b5331d9SJerome Glisse "RV200", 441b5331d9SJerome Glisse "RS200", 451b5331d9SJerome Glisse "R200", 461b5331d9SJerome Glisse "RV250", 471b5331d9SJerome Glisse "RS300", 481b5331d9SJerome Glisse "RV280", 491b5331d9SJerome Glisse "R300", 501b5331d9SJerome Glisse "R350", 511b5331d9SJerome Glisse "RV350", 521b5331d9SJerome Glisse "RV380", 531b5331d9SJerome Glisse "R420", 541b5331d9SJerome Glisse "R423", 551b5331d9SJerome Glisse "RV410", 561b5331d9SJerome Glisse "RS400", 571b5331d9SJerome Glisse "RS480", 581b5331d9SJerome Glisse "RS600", 591b5331d9SJerome Glisse "RS690", 601b5331d9SJerome Glisse "RS740", 611b5331d9SJerome Glisse "RV515", 621b5331d9SJerome Glisse "R520", 631b5331d9SJerome Glisse "RV530", 641b5331d9SJerome Glisse "RV560", 651b5331d9SJerome Glisse "RV570", 661b5331d9SJerome Glisse "R580", 671b5331d9SJerome Glisse "R600", 681b5331d9SJerome Glisse "RV610", 691b5331d9SJerome Glisse "RV630", 701b5331d9SJerome Glisse "RV670", 711b5331d9SJerome Glisse "RV620", 721b5331d9SJerome Glisse "RV635", 731b5331d9SJerome Glisse "RS780", 741b5331d9SJerome Glisse "RS880", 751b5331d9SJerome Glisse "RV770", 761b5331d9SJerome Glisse "RV730", 771b5331d9SJerome Glisse "RV710", 781b5331d9SJerome Glisse "RV740", 791b5331d9SJerome Glisse "CEDAR", 801b5331d9SJerome Glisse "REDWOOD", 811b5331d9SJerome Glisse "JUNIPER", 821b5331d9SJerome Glisse "CYPRESS", 831b5331d9SJerome Glisse "HEMLOCK", 841b5331d9SJerome Glisse "LAST", 851b5331d9SJerome Glisse }; 861b5331d9SJerome Glisse 87771fe6b9SJerome Glisse /* 88b1e3a6d1SMichel Dänzer * Clear GPU surface registers. 89b1e3a6d1SMichel Dänzer */ 903ce0a23dSJerome Glisse void radeon_surface_init(struct radeon_device *rdev) 91b1e3a6d1SMichel Dänzer { 92b1e3a6d1SMichel Dänzer /* FIXME: check this out */ 93b1e3a6d1SMichel Dänzer if (rdev->family < CHIP_R600) { 94b1e3a6d1SMichel Dänzer int i; 95b1e3a6d1SMichel Dänzer 96550e2d92SDave Airlie for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) { 97550e2d92SDave Airlie if (rdev->surface_regs[i].bo) 98550e2d92SDave Airlie radeon_bo_get_surface_reg(rdev->surface_regs[i].bo); 99550e2d92SDave Airlie else 100550e2d92SDave Airlie radeon_clear_surface_reg(rdev, i); 101b1e3a6d1SMichel Dänzer } 102e024e110SDave Airlie /* enable surfaces */ 103e024e110SDave Airlie WREG32(RADEON_SURFACE_CNTL, 0); 104b1e3a6d1SMichel Dänzer } 105b1e3a6d1SMichel Dänzer } 106b1e3a6d1SMichel Dänzer 107b1e3a6d1SMichel Dänzer /* 108771fe6b9SJerome Glisse * GPU scratch registers helpers function. 109771fe6b9SJerome Glisse */ 1103ce0a23dSJerome Glisse void radeon_scratch_init(struct radeon_device *rdev) 111771fe6b9SJerome Glisse { 112771fe6b9SJerome Glisse int i; 113771fe6b9SJerome Glisse 114771fe6b9SJerome Glisse /* FIXME: check this out */ 115771fe6b9SJerome Glisse if (rdev->family < CHIP_R300) { 116771fe6b9SJerome Glisse rdev->scratch.num_reg = 5; 117771fe6b9SJerome Glisse } else { 118771fe6b9SJerome Glisse rdev->scratch.num_reg = 7; 119771fe6b9SJerome Glisse } 120771fe6b9SJerome Glisse for (i = 0; i < rdev->scratch.num_reg; i++) { 121771fe6b9SJerome Glisse rdev->scratch.free[i] = true; 122771fe6b9SJerome Glisse rdev->scratch.reg[i] = RADEON_SCRATCH_REG0 + (i * 4); 123771fe6b9SJerome Glisse } 124771fe6b9SJerome Glisse } 125771fe6b9SJerome Glisse 126771fe6b9SJerome Glisse int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg) 127771fe6b9SJerome Glisse { 128771fe6b9SJerome Glisse int i; 129771fe6b9SJerome Glisse 130771fe6b9SJerome Glisse for (i = 0; i < rdev->scratch.num_reg; i++) { 131771fe6b9SJerome Glisse if (rdev->scratch.free[i]) { 132771fe6b9SJerome Glisse rdev->scratch.free[i] = false; 133771fe6b9SJerome Glisse *reg = rdev->scratch.reg[i]; 134771fe6b9SJerome Glisse return 0; 135771fe6b9SJerome Glisse } 136771fe6b9SJerome Glisse } 137771fe6b9SJerome Glisse return -EINVAL; 138771fe6b9SJerome Glisse } 139771fe6b9SJerome Glisse 140771fe6b9SJerome Glisse void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg) 141771fe6b9SJerome Glisse { 142771fe6b9SJerome Glisse int i; 143771fe6b9SJerome Glisse 144771fe6b9SJerome Glisse for (i = 0; i < rdev->scratch.num_reg; i++) { 145771fe6b9SJerome Glisse if (rdev->scratch.reg[i] == reg) { 146771fe6b9SJerome Glisse rdev->scratch.free[i] = true; 147771fe6b9SJerome Glisse return; 148771fe6b9SJerome Glisse } 149771fe6b9SJerome Glisse } 150771fe6b9SJerome Glisse } 151771fe6b9SJerome Glisse 152d594e46aSJerome Glisse /** 153d594e46aSJerome Glisse * radeon_vram_location - try to find VRAM location 154d594e46aSJerome Glisse * @rdev: radeon device structure holding all necessary informations 155d594e46aSJerome Glisse * @mc: memory controller structure holding memory informations 156d594e46aSJerome Glisse * @base: base address at which to put VRAM 157d594e46aSJerome Glisse * 158d594e46aSJerome Glisse * Function will place try to place VRAM at base address provided 159d594e46aSJerome Glisse * as parameter (which is so far either PCI aperture address or 160d594e46aSJerome Glisse * for IGP TOM base address). 161d594e46aSJerome Glisse * 162d594e46aSJerome Glisse * If there is not enough space to fit the unvisible VRAM in the 32bits 163d594e46aSJerome Glisse * address space then we limit the VRAM size to the aperture. 164d594e46aSJerome Glisse * 165d594e46aSJerome Glisse * If we are using AGP and if the AGP aperture doesn't allow us to have 166d594e46aSJerome Glisse * room for all the VRAM than we restrict the VRAM to the PCI aperture 167d594e46aSJerome Glisse * size and print a warning. 168d594e46aSJerome Glisse * 169d594e46aSJerome Glisse * This function will never fails, worst case are limiting VRAM. 170d594e46aSJerome Glisse * 171d594e46aSJerome Glisse * Note: GTT start, end, size should be initialized before calling this 172d594e46aSJerome Glisse * function on AGP platform. 173d594e46aSJerome Glisse * 174d594e46aSJerome Glisse * Note: We don't explictly enforce VRAM start to be aligned on VRAM size, 175d594e46aSJerome Glisse * this shouldn't be a problem as we are using the PCI aperture as a reference. 176d594e46aSJerome Glisse * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but 177d594e46aSJerome Glisse * not IGP. 178d594e46aSJerome Glisse * 179d594e46aSJerome Glisse * Note: we use mc_vram_size as on some board we need to program the mc to 180d594e46aSJerome Glisse * cover the whole aperture even if VRAM size is inferior to aperture size 181d594e46aSJerome Glisse * Novell bug 204882 + along with lots of ubuntu ones 182d594e46aSJerome Glisse * 183d594e46aSJerome Glisse * Note: when limiting vram it's safe to overwritte real_vram_size because 184d594e46aSJerome Glisse * we are not in case where real_vram_size is inferior to mc_vram_size (ie 185d594e46aSJerome Glisse * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu 186d594e46aSJerome Glisse * ones) 187d594e46aSJerome Glisse * 188d594e46aSJerome Glisse * Note: IGP TOM addr should be the same as the aperture addr, we don't 189d594e46aSJerome Glisse * explicitly check for that thought. 190d594e46aSJerome Glisse * 191d594e46aSJerome Glisse * FIXME: when reducing VRAM size align new size on power of 2. 192771fe6b9SJerome Glisse */ 193d594e46aSJerome Glisse void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base) 194771fe6b9SJerome Glisse { 195d594e46aSJerome Glisse mc->vram_start = base; 196d594e46aSJerome Glisse if (mc->mc_vram_size > (0xFFFFFFFF - base + 1)) { 197d594e46aSJerome Glisse dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n"); 198d594e46aSJerome Glisse mc->real_vram_size = mc->aper_size; 199d594e46aSJerome Glisse mc->mc_vram_size = mc->aper_size; 200771fe6b9SJerome Glisse } 201d594e46aSJerome Glisse mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; 2022cbeb4efSJerome Glisse if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) { 203d594e46aSJerome Glisse dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n"); 204d594e46aSJerome Glisse mc->real_vram_size = mc->aper_size; 205d594e46aSJerome Glisse mc->mc_vram_size = mc->aper_size; 206771fe6b9SJerome Glisse } 207d594e46aSJerome Glisse mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; 208d594e46aSJerome Glisse dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n", 209d594e46aSJerome Glisse mc->mc_vram_size >> 20, mc->vram_start, 210d594e46aSJerome Glisse mc->vram_end, mc->real_vram_size >> 20); 211771fe6b9SJerome Glisse } 212771fe6b9SJerome Glisse 213d594e46aSJerome Glisse /** 214d594e46aSJerome Glisse * radeon_gtt_location - try to find GTT location 215d594e46aSJerome Glisse * @rdev: radeon device structure holding all necessary informations 216d594e46aSJerome Glisse * @mc: memory controller structure holding memory informations 217d594e46aSJerome Glisse * 218d594e46aSJerome Glisse * Function will place try to place GTT before or after VRAM. 219d594e46aSJerome Glisse * 220d594e46aSJerome Glisse * If GTT size is bigger than space left then we ajust GTT size. 221d594e46aSJerome Glisse * Thus function will never fails. 222d594e46aSJerome Glisse * 223d594e46aSJerome Glisse * FIXME: when reducing GTT size align new size on power of 2. 224d594e46aSJerome Glisse */ 225d594e46aSJerome Glisse void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) 226d594e46aSJerome Glisse { 227d594e46aSJerome Glisse u64 size_af, size_bf; 228d594e46aSJerome Glisse 2298d369bb1SAlex Deucher size_af = ((0xFFFFFFFF - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align; 2308d369bb1SAlex Deucher size_bf = mc->vram_start & ~mc->gtt_base_align; 231d594e46aSJerome Glisse if (size_bf > size_af) { 232d594e46aSJerome Glisse if (mc->gtt_size > size_bf) { 233d594e46aSJerome Glisse dev_warn(rdev->dev, "limiting GTT\n"); 234d594e46aSJerome Glisse mc->gtt_size = size_bf; 235d594e46aSJerome Glisse } 2368d369bb1SAlex Deucher mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size; 237d594e46aSJerome Glisse } else { 238d594e46aSJerome Glisse if (mc->gtt_size > size_af) { 239d594e46aSJerome Glisse dev_warn(rdev->dev, "limiting GTT\n"); 240d594e46aSJerome Glisse mc->gtt_size = size_af; 241d594e46aSJerome Glisse } 2428d369bb1SAlex Deucher mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align; 243d594e46aSJerome Glisse } 244d594e46aSJerome Glisse mc->gtt_end = mc->gtt_start + mc->gtt_size - 1; 245d594e46aSJerome Glisse dev_info(rdev->dev, "GTT: %lluM 0x%08llX - 0x%08llX\n", 246d594e46aSJerome Glisse mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end); 247d594e46aSJerome Glisse } 248771fe6b9SJerome Glisse 249771fe6b9SJerome Glisse /* 250771fe6b9SJerome Glisse * GPU helpers function. 251771fe6b9SJerome Glisse */ 2529f022ddfSJerome Glisse bool radeon_card_posted(struct radeon_device *rdev) 253771fe6b9SJerome Glisse { 254771fe6b9SJerome Glisse uint32_t reg; 255771fe6b9SJerome Glisse 256771fe6b9SJerome Glisse /* first check CRTCs */ 257bcc1c2a1SAlex Deucher if (ASIC_IS_DCE4(rdev)) { 258bcc1c2a1SAlex Deucher reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | 259bcc1c2a1SAlex Deucher RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) | 260bcc1c2a1SAlex Deucher RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) | 261bcc1c2a1SAlex Deucher RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) | 262bcc1c2a1SAlex Deucher RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) | 263bcc1c2a1SAlex Deucher RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET); 264bcc1c2a1SAlex Deucher if (reg & EVERGREEN_CRTC_MASTER_EN) 265bcc1c2a1SAlex Deucher return true; 266bcc1c2a1SAlex Deucher } else if (ASIC_IS_AVIVO(rdev)) { 267771fe6b9SJerome Glisse reg = RREG32(AVIVO_D1CRTC_CONTROL) | 268771fe6b9SJerome Glisse RREG32(AVIVO_D2CRTC_CONTROL); 269771fe6b9SJerome Glisse if (reg & AVIVO_CRTC_EN) { 270771fe6b9SJerome Glisse return true; 271771fe6b9SJerome Glisse } 272771fe6b9SJerome Glisse } else { 273771fe6b9SJerome Glisse reg = RREG32(RADEON_CRTC_GEN_CNTL) | 274771fe6b9SJerome Glisse RREG32(RADEON_CRTC2_GEN_CNTL); 275771fe6b9SJerome Glisse if (reg & RADEON_CRTC_EN) { 276771fe6b9SJerome Glisse return true; 277771fe6b9SJerome Glisse } 278771fe6b9SJerome Glisse } 279771fe6b9SJerome Glisse 280771fe6b9SJerome Glisse /* then check MEM_SIZE, in case the crtcs are off */ 281771fe6b9SJerome Glisse if (rdev->family >= CHIP_R600) 282771fe6b9SJerome Glisse reg = RREG32(R600_CONFIG_MEMSIZE); 283771fe6b9SJerome Glisse else 284771fe6b9SJerome Glisse reg = RREG32(RADEON_CONFIG_MEMSIZE); 285771fe6b9SJerome Glisse 286771fe6b9SJerome Glisse if (reg) 287771fe6b9SJerome Glisse return true; 288771fe6b9SJerome Glisse 289771fe6b9SJerome Glisse return false; 290771fe6b9SJerome Glisse 291771fe6b9SJerome Glisse } 292771fe6b9SJerome Glisse 293f47299c5SAlex Deucher void radeon_update_bandwidth_info(struct radeon_device *rdev) 294f47299c5SAlex Deucher { 295f47299c5SAlex Deucher fixed20_12 a; 296*8807286eSAlex Deucher u32 sclk = rdev->pm.current_sclk; 297*8807286eSAlex Deucher u32 mclk = rdev->pm.current_mclk; 298f47299c5SAlex Deucher 299*8807286eSAlex Deucher /* sclk/mclk in Mhz */ 30068adac5eSBen Skeggs a.full = dfixed_const(100); 30168adac5eSBen Skeggs rdev->pm.sclk.full = dfixed_const(sclk); 30268adac5eSBen Skeggs rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a); 30368adac5eSBen Skeggs rdev->pm.mclk.full = dfixed_const(mclk); 30468adac5eSBen Skeggs rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a); 305f47299c5SAlex Deucher 306*8807286eSAlex Deucher if (rdev->flags & RADEON_IS_IGP) { 30768adac5eSBen Skeggs a.full = dfixed_const(16); 308f47299c5SAlex Deucher /* core_bandwidth = sclk(Mhz) * 16 */ 30968adac5eSBen Skeggs rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a); 310f47299c5SAlex Deucher } 311f47299c5SAlex Deucher } 312f47299c5SAlex Deucher 31372542d77SDave Airlie bool radeon_boot_test_post_card(struct radeon_device *rdev) 31472542d77SDave Airlie { 31572542d77SDave Airlie if (radeon_card_posted(rdev)) 31672542d77SDave Airlie return true; 31772542d77SDave Airlie 31872542d77SDave Airlie if (rdev->bios) { 31972542d77SDave Airlie DRM_INFO("GPU not posted. posting now...\n"); 32072542d77SDave Airlie if (rdev->is_atom_bios) 32172542d77SDave Airlie atom_asic_init(rdev->mode_info.atom_context); 32272542d77SDave Airlie else 32372542d77SDave Airlie radeon_combios_asic_init(rdev->ddev); 32472542d77SDave Airlie return true; 32572542d77SDave Airlie } else { 32672542d77SDave Airlie dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); 32772542d77SDave Airlie return false; 32872542d77SDave Airlie } 32972542d77SDave Airlie } 33072542d77SDave Airlie 3313ce0a23dSJerome Glisse int radeon_dummy_page_init(struct radeon_device *rdev) 3323ce0a23dSJerome Glisse { 33382568565SDave Airlie if (rdev->dummy_page.page) 33482568565SDave Airlie return 0; 3353ce0a23dSJerome Glisse rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO); 3363ce0a23dSJerome Glisse if (rdev->dummy_page.page == NULL) 3373ce0a23dSJerome Glisse return -ENOMEM; 3383ce0a23dSJerome Glisse rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page, 3393ce0a23dSJerome Glisse 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 340a30f6fb7SBenjamin Herrenschmidt if (pci_dma_mapping_error(rdev->pdev, rdev->dummy_page.addr)) { 341a30f6fb7SBenjamin Herrenschmidt dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n"); 3423ce0a23dSJerome Glisse __free_page(rdev->dummy_page.page); 3433ce0a23dSJerome Glisse rdev->dummy_page.page = NULL; 3443ce0a23dSJerome Glisse return -ENOMEM; 3453ce0a23dSJerome Glisse } 3463ce0a23dSJerome Glisse return 0; 3473ce0a23dSJerome Glisse } 3483ce0a23dSJerome Glisse 3493ce0a23dSJerome Glisse void radeon_dummy_page_fini(struct radeon_device *rdev) 3503ce0a23dSJerome Glisse { 3513ce0a23dSJerome Glisse if (rdev->dummy_page.page == NULL) 3523ce0a23dSJerome Glisse return; 3533ce0a23dSJerome Glisse pci_unmap_page(rdev->pdev, rdev->dummy_page.addr, 3543ce0a23dSJerome Glisse PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 3553ce0a23dSJerome Glisse __free_page(rdev->dummy_page.page); 3563ce0a23dSJerome Glisse rdev->dummy_page.page = NULL; 3573ce0a23dSJerome Glisse } 3583ce0a23dSJerome Glisse 359771fe6b9SJerome Glisse 360771fe6b9SJerome Glisse /* ATOM accessor methods */ 361771fe6b9SJerome Glisse static uint32_t cail_pll_read(struct card_info *info, uint32_t reg) 362771fe6b9SJerome Glisse { 363771fe6b9SJerome Glisse struct radeon_device *rdev = info->dev->dev_private; 364771fe6b9SJerome Glisse uint32_t r; 365771fe6b9SJerome Glisse 366771fe6b9SJerome Glisse r = rdev->pll_rreg(rdev, reg); 367771fe6b9SJerome Glisse return r; 368771fe6b9SJerome Glisse } 369771fe6b9SJerome Glisse 370771fe6b9SJerome Glisse static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val) 371771fe6b9SJerome Glisse { 372771fe6b9SJerome Glisse struct radeon_device *rdev = info->dev->dev_private; 373771fe6b9SJerome Glisse 374771fe6b9SJerome Glisse rdev->pll_wreg(rdev, reg, val); 375771fe6b9SJerome Glisse } 376771fe6b9SJerome Glisse 377771fe6b9SJerome Glisse static uint32_t cail_mc_read(struct card_info *info, uint32_t reg) 378771fe6b9SJerome Glisse { 379771fe6b9SJerome Glisse struct radeon_device *rdev = info->dev->dev_private; 380771fe6b9SJerome Glisse uint32_t r; 381771fe6b9SJerome Glisse 382771fe6b9SJerome Glisse r = rdev->mc_rreg(rdev, reg); 383771fe6b9SJerome Glisse return r; 384771fe6b9SJerome Glisse } 385771fe6b9SJerome Glisse 386771fe6b9SJerome Glisse static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val) 387771fe6b9SJerome Glisse { 388771fe6b9SJerome Glisse struct radeon_device *rdev = info->dev->dev_private; 389771fe6b9SJerome Glisse 390771fe6b9SJerome Glisse rdev->mc_wreg(rdev, reg, val); 391771fe6b9SJerome Glisse } 392771fe6b9SJerome Glisse 393771fe6b9SJerome Glisse static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val) 394771fe6b9SJerome Glisse { 395771fe6b9SJerome Glisse struct radeon_device *rdev = info->dev->dev_private; 396771fe6b9SJerome Glisse 397771fe6b9SJerome Glisse WREG32(reg*4, val); 398771fe6b9SJerome Glisse } 399771fe6b9SJerome Glisse 400771fe6b9SJerome Glisse static uint32_t cail_reg_read(struct card_info *info, uint32_t reg) 401771fe6b9SJerome Glisse { 402771fe6b9SJerome Glisse struct radeon_device *rdev = info->dev->dev_private; 403771fe6b9SJerome Glisse uint32_t r; 404771fe6b9SJerome Glisse 405771fe6b9SJerome Glisse r = RREG32(reg*4); 406771fe6b9SJerome Glisse return r; 407771fe6b9SJerome Glisse } 408771fe6b9SJerome Glisse 409351a52a2SAlex Deucher static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val) 410351a52a2SAlex Deucher { 411351a52a2SAlex Deucher struct radeon_device *rdev = info->dev->dev_private; 412351a52a2SAlex Deucher 413351a52a2SAlex Deucher WREG32_IO(reg*4, val); 414351a52a2SAlex Deucher } 415351a52a2SAlex Deucher 416351a52a2SAlex Deucher static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg) 417351a52a2SAlex Deucher { 418351a52a2SAlex Deucher struct radeon_device *rdev = info->dev->dev_private; 419351a52a2SAlex Deucher uint32_t r; 420351a52a2SAlex Deucher 421351a52a2SAlex Deucher r = RREG32_IO(reg*4); 422351a52a2SAlex Deucher return r; 423351a52a2SAlex Deucher } 424351a52a2SAlex Deucher 425771fe6b9SJerome Glisse int radeon_atombios_init(struct radeon_device *rdev) 426771fe6b9SJerome Glisse { 42761c4b24bSMathias Fröhlich struct card_info *atom_card_info = 42861c4b24bSMathias Fröhlich kzalloc(sizeof(struct card_info), GFP_KERNEL); 42961c4b24bSMathias Fröhlich 43061c4b24bSMathias Fröhlich if (!atom_card_info) 43161c4b24bSMathias Fröhlich return -ENOMEM; 43261c4b24bSMathias Fröhlich 43361c4b24bSMathias Fröhlich rdev->mode_info.atom_card_info = atom_card_info; 43461c4b24bSMathias Fröhlich atom_card_info->dev = rdev->ddev; 43561c4b24bSMathias Fröhlich atom_card_info->reg_read = cail_reg_read; 43661c4b24bSMathias Fröhlich atom_card_info->reg_write = cail_reg_write; 437351a52a2SAlex Deucher /* needed for iio ops */ 438351a52a2SAlex Deucher if (rdev->rio_mem) { 439351a52a2SAlex Deucher atom_card_info->ioreg_read = cail_ioreg_read; 440351a52a2SAlex Deucher atom_card_info->ioreg_write = cail_ioreg_write; 441351a52a2SAlex Deucher } else { 442351a52a2SAlex Deucher DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n"); 443351a52a2SAlex Deucher atom_card_info->ioreg_read = cail_reg_read; 444351a52a2SAlex Deucher atom_card_info->ioreg_write = cail_reg_write; 445351a52a2SAlex Deucher } 44661c4b24bSMathias Fröhlich atom_card_info->mc_read = cail_mc_read; 44761c4b24bSMathias Fröhlich atom_card_info->mc_write = cail_mc_write; 44861c4b24bSMathias Fröhlich atom_card_info->pll_read = cail_pll_read; 44961c4b24bSMathias Fröhlich atom_card_info->pll_write = cail_pll_write; 45061c4b24bSMathias Fröhlich 45161c4b24bSMathias Fröhlich rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios); 452c31ad97fSRafał Miłecki mutex_init(&rdev->mode_info.atom_context->mutex); 453771fe6b9SJerome Glisse radeon_atom_initialize_bios_scratch_regs(rdev->ddev); 454d904ef9bSDave Airlie atom_allocate_fb_scratch(rdev->mode_info.atom_context); 455771fe6b9SJerome Glisse return 0; 456771fe6b9SJerome Glisse } 457771fe6b9SJerome Glisse 458771fe6b9SJerome Glisse void radeon_atombios_fini(struct radeon_device *rdev) 459771fe6b9SJerome Glisse { 4604a04a844SJerome Glisse if (rdev->mode_info.atom_context) { 461d904ef9bSDave Airlie kfree(rdev->mode_info.atom_context->scratch); 462771fe6b9SJerome Glisse kfree(rdev->mode_info.atom_context); 4634a04a844SJerome Glisse } 46461c4b24bSMathias Fröhlich kfree(rdev->mode_info.atom_card_info); 465771fe6b9SJerome Glisse } 466771fe6b9SJerome Glisse 467771fe6b9SJerome Glisse int radeon_combios_init(struct radeon_device *rdev) 468771fe6b9SJerome Glisse { 469771fe6b9SJerome Glisse radeon_combios_initialize_bios_scratch_regs(rdev->ddev); 470771fe6b9SJerome Glisse return 0; 471771fe6b9SJerome Glisse } 472771fe6b9SJerome Glisse 473771fe6b9SJerome Glisse void radeon_combios_fini(struct radeon_device *rdev) 474771fe6b9SJerome Glisse { 475771fe6b9SJerome Glisse } 476771fe6b9SJerome Glisse 47728d52043SDave Airlie /* if we get transitioned to only one device, tak VGA back */ 47828d52043SDave Airlie static unsigned int radeon_vga_set_decode(void *cookie, bool state) 47928d52043SDave Airlie { 48028d52043SDave Airlie struct radeon_device *rdev = cookie; 48128d52043SDave Airlie radeon_vga_set_state(rdev, state); 48228d52043SDave Airlie if (state) 48328d52043SDave Airlie return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | 48428d52043SDave Airlie VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 48528d52043SDave Airlie else 48628d52043SDave Airlie return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 48728d52043SDave Airlie } 488c1176d6fSDave Airlie 48936421338SJerome Glisse void radeon_check_arguments(struct radeon_device *rdev) 49036421338SJerome Glisse { 49136421338SJerome Glisse /* vramlimit must be a power of two */ 49236421338SJerome Glisse switch (radeon_vram_limit) { 49336421338SJerome Glisse case 0: 49436421338SJerome Glisse case 4: 49536421338SJerome Glisse case 8: 49636421338SJerome Glisse case 16: 49736421338SJerome Glisse case 32: 49836421338SJerome Glisse case 64: 49936421338SJerome Glisse case 128: 50036421338SJerome Glisse case 256: 50136421338SJerome Glisse case 512: 50236421338SJerome Glisse case 1024: 50336421338SJerome Glisse case 2048: 50436421338SJerome Glisse case 4096: 50536421338SJerome Glisse break; 50636421338SJerome Glisse default: 50736421338SJerome Glisse dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n", 50836421338SJerome Glisse radeon_vram_limit); 50936421338SJerome Glisse radeon_vram_limit = 0; 51036421338SJerome Glisse break; 51136421338SJerome Glisse } 51236421338SJerome Glisse radeon_vram_limit = radeon_vram_limit << 20; 51336421338SJerome Glisse /* gtt size must be power of two and greater or equal to 32M */ 51436421338SJerome Glisse switch (radeon_gart_size) { 51536421338SJerome Glisse case 4: 51636421338SJerome Glisse case 8: 51736421338SJerome Glisse case 16: 51836421338SJerome Glisse dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n", 51936421338SJerome Glisse radeon_gart_size); 52036421338SJerome Glisse radeon_gart_size = 512; 52136421338SJerome Glisse break; 52236421338SJerome Glisse case 32: 52336421338SJerome Glisse case 64: 52436421338SJerome Glisse case 128: 52536421338SJerome Glisse case 256: 52636421338SJerome Glisse case 512: 52736421338SJerome Glisse case 1024: 52836421338SJerome Glisse case 2048: 52936421338SJerome Glisse case 4096: 53036421338SJerome Glisse break; 53136421338SJerome Glisse default: 53236421338SJerome Glisse dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n", 53336421338SJerome Glisse radeon_gart_size); 53436421338SJerome Glisse radeon_gart_size = 512; 53536421338SJerome Glisse break; 53636421338SJerome Glisse } 53736421338SJerome Glisse rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; 53836421338SJerome Glisse /* AGP mode can only be -1, 1, 2, 4, 8 */ 53936421338SJerome Glisse switch (radeon_agpmode) { 54036421338SJerome Glisse case -1: 54136421338SJerome Glisse case 0: 54236421338SJerome Glisse case 1: 54336421338SJerome Glisse case 2: 54436421338SJerome Glisse case 4: 54536421338SJerome Glisse case 8: 54636421338SJerome Glisse break; 54736421338SJerome Glisse default: 54836421338SJerome Glisse dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: " 54936421338SJerome Glisse "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode); 55036421338SJerome Glisse radeon_agpmode = 0; 55136421338SJerome Glisse break; 55236421338SJerome Glisse } 55336421338SJerome Glisse } 55436421338SJerome Glisse 5556a9ee8afSDave Airlie static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) 5566a9ee8afSDave Airlie { 5576a9ee8afSDave Airlie struct drm_device *dev = pci_get_drvdata(pdev); 5586a9ee8afSDave Airlie struct radeon_device *rdev = dev->dev_private; 5596a9ee8afSDave Airlie pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; 5606a9ee8afSDave Airlie if (state == VGA_SWITCHEROO_ON) { 5616a9ee8afSDave Airlie printk(KERN_INFO "radeon: switched on\n"); 5626a9ee8afSDave Airlie /* don't suspend or resume card normally */ 5636a9ee8afSDave Airlie rdev->powered_down = false; 5646a9ee8afSDave Airlie radeon_resume_kms(dev); 565fbf81762SDave Airlie drm_kms_helper_poll_enable(dev); 5666a9ee8afSDave Airlie } else { 5676a9ee8afSDave Airlie printk(KERN_INFO "radeon: switched off\n"); 568fbf81762SDave Airlie drm_kms_helper_poll_disable(dev); 5696a9ee8afSDave Airlie radeon_suspend_kms(dev, pmm); 5706a9ee8afSDave Airlie /* don't suspend or resume card normally */ 5716a9ee8afSDave Airlie rdev->powered_down = true; 5726a9ee8afSDave Airlie } 5736a9ee8afSDave Airlie } 5746a9ee8afSDave Airlie 5756a9ee8afSDave Airlie static bool radeon_switcheroo_can_switch(struct pci_dev *pdev) 5766a9ee8afSDave Airlie { 5776a9ee8afSDave Airlie struct drm_device *dev = pci_get_drvdata(pdev); 5786a9ee8afSDave Airlie bool can_switch; 5796a9ee8afSDave Airlie 5806a9ee8afSDave Airlie spin_lock(&dev->count_lock); 5816a9ee8afSDave Airlie can_switch = (dev->open_count == 0); 5826a9ee8afSDave Airlie spin_unlock(&dev->count_lock); 5836a9ee8afSDave Airlie return can_switch; 5846a9ee8afSDave Airlie } 5856a9ee8afSDave Airlie 5866a9ee8afSDave Airlie 587771fe6b9SJerome Glisse int radeon_device_init(struct radeon_device *rdev, 588771fe6b9SJerome Glisse struct drm_device *ddev, 589771fe6b9SJerome Glisse struct pci_dev *pdev, 590771fe6b9SJerome Glisse uint32_t flags) 591771fe6b9SJerome Glisse { 592351a52a2SAlex Deucher int r, i; 593ad49f501SDave Airlie int dma_bits; 594771fe6b9SJerome Glisse 595771fe6b9SJerome Glisse rdev->shutdown = false; 5969f022ddfSJerome Glisse rdev->dev = &pdev->dev; 597771fe6b9SJerome Glisse rdev->ddev = ddev; 598771fe6b9SJerome Glisse rdev->pdev = pdev; 599771fe6b9SJerome Glisse rdev->flags = flags; 600771fe6b9SJerome Glisse rdev->family = flags & RADEON_FAMILY_MASK; 601771fe6b9SJerome Glisse rdev->is_atom_bios = false; 602771fe6b9SJerome Glisse rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT; 603771fe6b9SJerome Glisse rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; 604771fe6b9SJerome Glisse rdev->gpu_lockup = false; 605733289c2SJerome Glisse rdev->accel_working = false; 6061b5331d9SJerome Glisse 6071b5331d9SJerome Glisse DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X).\n", 6081b5331d9SJerome Glisse radeon_family_name[rdev->family], pdev->vendor, pdev->device); 6091b5331d9SJerome Glisse 610771fe6b9SJerome Glisse /* mutex initialization are all done here so we 611771fe6b9SJerome Glisse * can recall function without having locking issues */ 612771fe6b9SJerome Glisse mutex_init(&rdev->cs_mutex); 613771fe6b9SJerome Glisse mutex_init(&rdev->ib_pool.mutex); 614771fe6b9SJerome Glisse mutex_init(&rdev->cp.mutex); 61540bacf16SAlex Deucher mutex_init(&rdev->dc_hw_i2c_mutex); 616d8f60cfcSAlex Deucher if (rdev->family >= CHIP_R600) 617d8f60cfcSAlex Deucher spin_lock_init(&rdev->ih.lock); 6184c788679SJerome Glisse mutex_init(&rdev->gem.mutex); 619c913e23aSRafał Miłecki mutex_init(&rdev->pm.mutex); 6205876dd24SMatthew Garrett mutex_init(&rdev->vram_mutex); 621771fe6b9SJerome Glisse rwlock_init(&rdev->fence_drv.lock); 6229f022ddfSJerome Glisse INIT_LIST_HEAD(&rdev->gem.objects); 62373a6d3fcSRafał Miłecki init_waitqueue_head(&rdev->irq.vblank_queue); 6242031f77cSAlex Deucher init_waitqueue_head(&rdev->irq.idle_queue); 625771fe6b9SJerome Glisse 626d4877cf2SAlex Deucher /* setup workqueue */ 627d4877cf2SAlex Deucher rdev->wq = create_workqueue("radeon"); 628d4877cf2SAlex Deucher if (rdev->wq == NULL) 629d4877cf2SAlex Deucher return -ENOMEM; 630d4877cf2SAlex Deucher 6314aac0473SJerome Glisse /* Set asic functions */ 6324aac0473SJerome Glisse r = radeon_asic_init(rdev); 63336421338SJerome Glisse if (r) 6344aac0473SJerome Glisse return r; 63536421338SJerome Glisse radeon_check_arguments(rdev); 6364aac0473SJerome Glisse 637f95df9caSAlex Deucher /* all of the newer IGP chips have an internal gart 638f95df9caSAlex Deucher * However some rs4xx report as AGP, so remove that here. 639f95df9caSAlex Deucher */ 640f95df9caSAlex Deucher if ((rdev->family >= CHIP_RS400) && 641f95df9caSAlex Deucher (rdev->flags & RADEON_IS_IGP)) { 642f95df9caSAlex Deucher rdev->flags &= ~RADEON_IS_AGP; 643f95df9caSAlex Deucher } 644f95df9caSAlex Deucher 64530256a3fSJerome Glisse if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) { 646b574f251SJerome Glisse radeon_agp_disable(rdev); 647771fe6b9SJerome Glisse } 648771fe6b9SJerome Glisse 649ad49f501SDave Airlie /* set DMA mask + need_dma32 flags. 650ad49f501SDave Airlie * PCIE - can handle 40-bits. 651ad49f501SDave Airlie * IGP - can handle 40-bits (in theory) 652ad49f501SDave Airlie * AGP - generally dma32 is safest 653ad49f501SDave Airlie * PCI - only dma32 654ad49f501SDave Airlie */ 655ad49f501SDave Airlie rdev->need_dma32 = false; 656ad49f501SDave Airlie if (rdev->flags & RADEON_IS_AGP) 657ad49f501SDave Airlie rdev->need_dma32 = true; 658ad49f501SDave Airlie if (rdev->flags & RADEON_IS_PCI) 659ad49f501SDave Airlie rdev->need_dma32 = true; 660ad49f501SDave Airlie 661ad49f501SDave Airlie dma_bits = rdev->need_dma32 ? 32 : 40; 662ad49f501SDave Airlie r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits)); 663771fe6b9SJerome Glisse if (r) { 664771fe6b9SJerome Glisse printk(KERN_WARNING "radeon: No suitable DMA available.\n"); 665771fe6b9SJerome Glisse } 666771fe6b9SJerome Glisse 667771fe6b9SJerome Glisse /* Registers mapping */ 668771fe6b9SJerome Glisse /* TODO: block userspace mapping of io register */ 66901d73a69SJordan Crouse rdev->rmmio_base = pci_resource_start(rdev->pdev, 2); 67001d73a69SJordan Crouse rdev->rmmio_size = pci_resource_len(rdev->pdev, 2); 671771fe6b9SJerome Glisse rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size); 672771fe6b9SJerome Glisse if (rdev->rmmio == NULL) { 673771fe6b9SJerome Glisse return -ENOMEM; 674771fe6b9SJerome Glisse } 675771fe6b9SJerome Glisse DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base); 676771fe6b9SJerome Glisse DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size); 677771fe6b9SJerome Glisse 678351a52a2SAlex Deucher /* io port mapping */ 679351a52a2SAlex Deucher for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 680351a52a2SAlex Deucher if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) { 681351a52a2SAlex Deucher rdev->rio_mem_size = pci_resource_len(rdev->pdev, i); 682351a52a2SAlex Deucher rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size); 683351a52a2SAlex Deucher break; 684351a52a2SAlex Deucher } 685351a52a2SAlex Deucher } 686351a52a2SAlex Deucher if (rdev->rio_mem == NULL) 687351a52a2SAlex Deucher DRM_ERROR("Unable to find PCI I/O BAR\n"); 688351a52a2SAlex Deucher 68928d52043SDave Airlie /* if we have > 1 VGA cards, then disable the radeon VGA resources */ 69093239ea1SDave Airlie /* this will fail for cards that aren't VGA class devices, just 69193239ea1SDave Airlie * ignore it */ 69293239ea1SDave Airlie vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode); 6936a9ee8afSDave Airlie vga_switcheroo_register_client(rdev->pdev, 6946a9ee8afSDave Airlie radeon_switcheroo_set_state, 6956a9ee8afSDave Airlie radeon_switcheroo_can_switch); 69628d52043SDave Airlie 6973ce0a23dSJerome Glisse r = radeon_init(rdev); 698b574f251SJerome Glisse if (r) 699b574f251SJerome Glisse return r; 700b1e3a6d1SMichel Dänzer 701b574f251SJerome Glisse if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) { 702b574f251SJerome Glisse /* Acceleration not working on AGP card try again 703b574f251SJerome Glisse * with fallback to PCI or PCIE GART 704b574f251SJerome Glisse */ 705a2d07b74SJerome Glisse radeon_asic_reset(rdev); 706b574f251SJerome Glisse radeon_fini(rdev); 707b574f251SJerome Glisse radeon_agp_disable(rdev); 708b574f251SJerome Glisse r = radeon_init(rdev); 7094aac0473SJerome Glisse if (r) 7104aac0473SJerome Glisse return r; 7113ce0a23dSJerome Glisse } 712ecc0b326SMichel Dänzer if (radeon_testing) { 713ecc0b326SMichel Dänzer radeon_test_moves(rdev); 714ecc0b326SMichel Dänzer } 715771fe6b9SJerome Glisse if (radeon_benchmarking) { 716771fe6b9SJerome Glisse radeon_benchmark(rdev); 717771fe6b9SJerome Glisse } 7186cf8a3f5SJerome Glisse return 0; 719771fe6b9SJerome Glisse } 720771fe6b9SJerome Glisse 721771fe6b9SJerome Glisse void radeon_device_fini(struct radeon_device *rdev) 722771fe6b9SJerome Glisse { 723771fe6b9SJerome Glisse DRM_INFO("radeon: finishing device.\n"); 724771fe6b9SJerome Glisse rdev->shutdown = true; 72590aca4d2SJerome Glisse /* evict vram memory */ 72690aca4d2SJerome Glisse radeon_bo_evict_vram(rdev); 7273ce0a23dSJerome Glisse radeon_fini(rdev); 728d4877cf2SAlex Deucher destroy_workqueue(rdev->wq); 7296a9ee8afSDave Airlie vga_switcheroo_unregister_client(rdev->pdev); 730c1176d6fSDave Airlie vga_client_register(rdev->pdev, NULL, NULL, NULL); 731e0a2ca73SAlex Deucher if (rdev->rio_mem) 732351a52a2SAlex Deucher pci_iounmap(rdev->pdev, rdev->rio_mem); 733351a52a2SAlex Deucher rdev->rio_mem = NULL; 734771fe6b9SJerome Glisse iounmap(rdev->rmmio); 735771fe6b9SJerome Glisse rdev->rmmio = NULL; 736771fe6b9SJerome Glisse } 737771fe6b9SJerome Glisse 738771fe6b9SJerome Glisse 739771fe6b9SJerome Glisse /* 740771fe6b9SJerome Glisse * Suspend & resume. 741771fe6b9SJerome Glisse */ 742771fe6b9SJerome Glisse int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) 743771fe6b9SJerome Glisse { 744875c1866SDarren Jenkins struct radeon_device *rdev; 745771fe6b9SJerome Glisse struct drm_crtc *crtc; 746d8dcaa1dSAlex Deucher struct drm_connector *connector; 7474c788679SJerome Glisse int r; 748771fe6b9SJerome Glisse 749875c1866SDarren Jenkins if (dev == NULL || dev->dev_private == NULL) { 750771fe6b9SJerome Glisse return -ENODEV; 751771fe6b9SJerome Glisse } 752771fe6b9SJerome Glisse if (state.event == PM_EVENT_PRETHAW) { 753771fe6b9SJerome Glisse return 0; 754771fe6b9SJerome Glisse } 755875c1866SDarren Jenkins rdev = dev->dev_private; 756875c1866SDarren Jenkins 7576a9ee8afSDave Airlie if (rdev->powered_down) 7586a9ee8afSDave Airlie return 0; 759d8dcaa1dSAlex Deucher 760d8dcaa1dSAlex Deucher /* turn off display hw */ 761d8dcaa1dSAlex Deucher list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 762d8dcaa1dSAlex Deucher drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); 763d8dcaa1dSAlex Deucher } 764d8dcaa1dSAlex Deucher 765771fe6b9SJerome Glisse /* unpin the front buffers */ 766771fe6b9SJerome Glisse list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 767771fe6b9SJerome Glisse struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb); 7684c788679SJerome Glisse struct radeon_bo *robj; 769771fe6b9SJerome Glisse 770771fe6b9SJerome Glisse if (rfb == NULL || rfb->obj == NULL) { 771771fe6b9SJerome Glisse continue; 772771fe6b9SJerome Glisse } 773771fe6b9SJerome Glisse robj = rfb->obj->driver_private; 77438651674SDave Airlie /* don't unpin kernel fb objects */ 77538651674SDave Airlie if (!radeon_fbdev_robj_is_fb(rdev, robj)) { 7764c788679SJerome Glisse r = radeon_bo_reserve(robj, false); 77738651674SDave Airlie if (r == 0) { 7784c788679SJerome Glisse radeon_bo_unpin(robj); 7794c788679SJerome Glisse radeon_bo_unreserve(robj); 7804c788679SJerome Glisse } 781771fe6b9SJerome Glisse } 782771fe6b9SJerome Glisse } 783771fe6b9SJerome Glisse /* evict vram memory */ 7844c788679SJerome Glisse radeon_bo_evict_vram(rdev); 785771fe6b9SJerome Glisse /* wait for gpu to finish processing current batch */ 786771fe6b9SJerome Glisse radeon_fence_wait_last(rdev); 787771fe6b9SJerome Glisse 788f657c2a7SYang Zhao radeon_save_bios_scratch_regs(rdev); 789f657c2a7SYang Zhao 790ce8f5370SAlex Deucher radeon_pm_suspend(rdev); 7913ce0a23dSJerome Glisse radeon_suspend(rdev); 792d4877cf2SAlex Deucher radeon_hpd_fini(rdev); 793771fe6b9SJerome Glisse /* evict remaining vram memory */ 7944c788679SJerome Glisse radeon_bo_evict_vram(rdev); 795771fe6b9SJerome Glisse 79610b06122SJerome Glisse radeon_agp_suspend(rdev); 79710b06122SJerome Glisse 798771fe6b9SJerome Glisse pci_save_state(dev->pdev); 799771fe6b9SJerome Glisse if (state.event == PM_EVENT_SUSPEND) { 800771fe6b9SJerome Glisse /* Shut down the device */ 801771fe6b9SJerome Glisse pci_disable_device(dev->pdev); 802771fe6b9SJerome Glisse pci_set_power_state(dev->pdev, PCI_D3hot); 803771fe6b9SJerome Glisse } 804771fe6b9SJerome Glisse acquire_console_sem(); 80538651674SDave Airlie radeon_fbdev_set_suspend(rdev, 1); 806771fe6b9SJerome Glisse release_console_sem(); 807771fe6b9SJerome Glisse return 0; 808771fe6b9SJerome Glisse } 809771fe6b9SJerome Glisse 810771fe6b9SJerome Glisse int radeon_resume_kms(struct drm_device *dev) 811771fe6b9SJerome Glisse { 81209bdf591SCedric Godin struct drm_connector *connector; 813771fe6b9SJerome Glisse struct radeon_device *rdev = dev->dev_private; 814771fe6b9SJerome Glisse 8156a9ee8afSDave Airlie if (rdev->powered_down) 8166a9ee8afSDave Airlie return 0; 8176a9ee8afSDave Airlie 818771fe6b9SJerome Glisse acquire_console_sem(); 819771fe6b9SJerome Glisse pci_set_power_state(dev->pdev, PCI_D0); 820771fe6b9SJerome Glisse pci_restore_state(dev->pdev); 821771fe6b9SJerome Glisse if (pci_enable_device(dev->pdev)) { 822771fe6b9SJerome Glisse release_console_sem(); 823771fe6b9SJerome Glisse return -1; 824771fe6b9SJerome Glisse } 825771fe6b9SJerome Glisse pci_set_master(dev->pdev); 8260ebf1717SDave Airlie /* resume AGP if in use */ 8270ebf1717SDave Airlie radeon_agp_resume(rdev); 8283ce0a23dSJerome Glisse radeon_resume(rdev); 829ce8f5370SAlex Deucher radeon_pm_resume(rdev); 830f657c2a7SYang Zhao radeon_restore_bios_scratch_regs(rdev); 83109bdf591SCedric Godin 83209bdf591SCedric Godin /* turn on display hw */ 83309bdf591SCedric Godin list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 83409bdf591SCedric Godin drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); 83509bdf591SCedric Godin } 83609bdf591SCedric Godin 83738651674SDave Airlie radeon_fbdev_set_suspend(rdev, 0); 838771fe6b9SJerome Glisse release_console_sem(); 839771fe6b9SJerome Glisse 840d4877cf2SAlex Deucher /* reset hpd state */ 841d4877cf2SAlex Deucher radeon_hpd_init(rdev); 842771fe6b9SJerome Glisse /* blat the mode back in */ 843771fe6b9SJerome Glisse drm_helper_resume_force_mode(dev); 844771fe6b9SJerome Glisse return 0; 845771fe6b9SJerome Glisse } 846771fe6b9SJerome Glisse 84790aca4d2SJerome Glisse int radeon_gpu_reset(struct radeon_device *rdev) 84890aca4d2SJerome Glisse { 84990aca4d2SJerome Glisse int r; 85090aca4d2SJerome Glisse 85190aca4d2SJerome Glisse radeon_save_bios_scratch_regs(rdev); 85290aca4d2SJerome Glisse radeon_suspend(rdev); 85390aca4d2SJerome Glisse 85490aca4d2SJerome Glisse r = radeon_asic_reset(rdev); 85590aca4d2SJerome Glisse if (!r) { 85690aca4d2SJerome Glisse dev_info(rdev->dev, "GPU reset succeed\n"); 85790aca4d2SJerome Glisse radeon_resume(rdev); 85890aca4d2SJerome Glisse radeon_restore_bios_scratch_regs(rdev); 85990aca4d2SJerome Glisse drm_helper_resume_force_mode(rdev->ddev); 86090aca4d2SJerome Glisse return 0; 86190aca4d2SJerome Glisse } 86290aca4d2SJerome Glisse /* bad news, how to tell it to userspace ? */ 86390aca4d2SJerome Glisse dev_info(rdev->dev, "GPU reset failed\n"); 86490aca4d2SJerome Glisse return r; 86590aca4d2SJerome Glisse } 86690aca4d2SJerome Glisse 867771fe6b9SJerome Glisse 868771fe6b9SJerome Glisse /* 869771fe6b9SJerome Glisse * Debugfs 870771fe6b9SJerome Glisse */ 871771fe6b9SJerome Glisse struct radeon_debugfs { 872771fe6b9SJerome Glisse struct drm_info_list *files; 873771fe6b9SJerome Glisse unsigned num_files; 874771fe6b9SJerome Glisse }; 875771fe6b9SJerome Glisse static struct radeon_debugfs _radeon_debugfs[RADEON_DEBUGFS_MAX_NUM_FILES]; 876771fe6b9SJerome Glisse static unsigned _radeon_debugfs_count = 0; 877771fe6b9SJerome Glisse 878771fe6b9SJerome Glisse int radeon_debugfs_add_files(struct radeon_device *rdev, 879771fe6b9SJerome Glisse struct drm_info_list *files, 880771fe6b9SJerome Glisse unsigned nfiles) 881771fe6b9SJerome Glisse { 882771fe6b9SJerome Glisse unsigned i; 883771fe6b9SJerome Glisse 884771fe6b9SJerome Glisse for (i = 0; i < _radeon_debugfs_count; i++) { 885771fe6b9SJerome Glisse if (_radeon_debugfs[i].files == files) { 886771fe6b9SJerome Glisse /* Already registered */ 887771fe6b9SJerome Glisse return 0; 888771fe6b9SJerome Glisse } 889771fe6b9SJerome Glisse } 890771fe6b9SJerome Glisse if ((_radeon_debugfs_count + nfiles) > RADEON_DEBUGFS_MAX_NUM_FILES) { 891771fe6b9SJerome Glisse DRM_ERROR("Reached maximum number of debugfs files.\n"); 892771fe6b9SJerome Glisse DRM_ERROR("Report so we increase RADEON_DEBUGFS_MAX_NUM_FILES.\n"); 893771fe6b9SJerome Glisse return -EINVAL; 894771fe6b9SJerome Glisse } 895771fe6b9SJerome Glisse _radeon_debugfs[_radeon_debugfs_count].files = files; 896771fe6b9SJerome Glisse _radeon_debugfs[_radeon_debugfs_count].num_files = nfiles; 897771fe6b9SJerome Glisse _radeon_debugfs_count++; 898771fe6b9SJerome Glisse #if defined(CONFIG_DEBUG_FS) 899771fe6b9SJerome Glisse drm_debugfs_create_files(files, nfiles, 900771fe6b9SJerome Glisse rdev->ddev->control->debugfs_root, 901771fe6b9SJerome Glisse rdev->ddev->control); 902771fe6b9SJerome Glisse drm_debugfs_create_files(files, nfiles, 903771fe6b9SJerome Glisse rdev->ddev->primary->debugfs_root, 904771fe6b9SJerome Glisse rdev->ddev->primary); 905771fe6b9SJerome Glisse #endif 906771fe6b9SJerome Glisse return 0; 907771fe6b9SJerome Glisse } 908771fe6b9SJerome Glisse 909771fe6b9SJerome Glisse #if defined(CONFIG_DEBUG_FS) 910771fe6b9SJerome Glisse int radeon_debugfs_init(struct drm_minor *minor) 911771fe6b9SJerome Glisse { 912771fe6b9SJerome Glisse return 0; 913771fe6b9SJerome Glisse } 914771fe6b9SJerome Glisse 915771fe6b9SJerome Glisse void radeon_debugfs_cleanup(struct drm_minor *minor) 916771fe6b9SJerome Glisse { 917771fe6b9SJerome Glisse unsigned i; 918771fe6b9SJerome Glisse 919771fe6b9SJerome Glisse for (i = 0; i < _radeon_debugfs_count; i++) { 920771fe6b9SJerome Glisse drm_debugfs_remove_files(_radeon_debugfs[i].files, 921771fe6b9SJerome Glisse _radeon_debugfs[i].num_files, minor); 922771fe6b9SJerome Glisse } 923771fe6b9SJerome Glisse } 924771fe6b9SJerome Glisse #endif 925