1771fe6b9SJerome Glisse /* 2771fe6b9SJerome Glisse * Copyright 2008 Advanced Micro Devices, Inc. 3771fe6b9SJerome Glisse * Copyright 2008 Red Hat Inc. 4771fe6b9SJerome Glisse * Copyright 2009 Jerome Glisse. 5771fe6b9SJerome Glisse * 6771fe6b9SJerome Glisse * Permission is hereby granted, free of charge, to any person obtaining a 7771fe6b9SJerome Glisse * copy of this software and associated documentation files (the "Software"), 8771fe6b9SJerome Glisse * to deal in the Software without restriction, including without limitation 9771fe6b9SJerome Glisse * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10771fe6b9SJerome Glisse * and/or sell copies of the Software, and to permit persons to whom the 11771fe6b9SJerome Glisse * Software is furnished to do so, subject to the following conditions: 12771fe6b9SJerome Glisse * 13771fe6b9SJerome Glisse * The above copyright notice and this permission notice shall be included in 14771fe6b9SJerome Glisse * all copies or substantial portions of the Software. 15771fe6b9SJerome Glisse * 16771fe6b9SJerome Glisse * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17771fe6b9SJerome Glisse * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18771fe6b9SJerome Glisse * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19771fe6b9SJerome Glisse * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20771fe6b9SJerome Glisse * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21771fe6b9SJerome Glisse * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22771fe6b9SJerome Glisse * OTHER DEALINGS IN THE SOFTWARE. 23771fe6b9SJerome Glisse * 24771fe6b9SJerome Glisse * Authors: Dave Airlie 25771fe6b9SJerome Glisse * Alex Deucher 26771fe6b9SJerome Glisse * Jerome Glisse 27771fe6b9SJerome Glisse */ 28771fe6b9SJerome Glisse #include <linux/console.h> 295a0e3ad6STejun Heo #include <linux/slab.h> 30771fe6b9SJerome Glisse #include <drm/drmP.h> 31771fe6b9SJerome Glisse #include <drm/drm_crtc_helper.h> 32771fe6b9SJerome Glisse #include <drm/radeon_drm.h> 3328d52043SDave Airlie #include <linux/vgaarb.h> 346a9ee8afSDave Airlie #include <linux/vga_switcheroo.h> 35771fe6b9SJerome Glisse #include "radeon_reg.h" 36771fe6b9SJerome Glisse #include "radeon.h" 37771fe6b9SJerome Glisse #include "atom.h" 38771fe6b9SJerome Glisse 391b5331d9SJerome Glisse static const char radeon_family_name[][16] = { 401b5331d9SJerome Glisse "R100", 411b5331d9SJerome Glisse "RV100", 421b5331d9SJerome Glisse "RS100", 431b5331d9SJerome Glisse "RV200", 441b5331d9SJerome Glisse "RS200", 451b5331d9SJerome Glisse "R200", 461b5331d9SJerome Glisse "RV250", 471b5331d9SJerome Glisse "RS300", 481b5331d9SJerome Glisse "RV280", 491b5331d9SJerome Glisse "R300", 501b5331d9SJerome Glisse "R350", 511b5331d9SJerome Glisse "RV350", 521b5331d9SJerome Glisse "RV380", 531b5331d9SJerome Glisse "R420", 541b5331d9SJerome Glisse "R423", 551b5331d9SJerome Glisse "RV410", 561b5331d9SJerome Glisse "RS400", 571b5331d9SJerome Glisse "RS480", 581b5331d9SJerome Glisse "RS600", 591b5331d9SJerome Glisse "RS690", 601b5331d9SJerome Glisse "RS740", 611b5331d9SJerome Glisse "RV515", 621b5331d9SJerome Glisse "R520", 631b5331d9SJerome Glisse "RV530", 641b5331d9SJerome Glisse "RV560", 651b5331d9SJerome Glisse "RV570", 661b5331d9SJerome Glisse "R580", 671b5331d9SJerome Glisse "R600", 681b5331d9SJerome Glisse "RV610", 691b5331d9SJerome Glisse "RV630", 701b5331d9SJerome Glisse "RV670", 711b5331d9SJerome Glisse "RV620", 721b5331d9SJerome Glisse "RV635", 731b5331d9SJerome Glisse "RS780", 741b5331d9SJerome Glisse "RS880", 751b5331d9SJerome Glisse "RV770", 761b5331d9SJerome Glisse "RV730", 771b5331d9SJerome Glisse "RV710", 781b5331d9SJerome Glisse "RV740", 791b5331d9SJerome Glisse "CEDAR", 801b5331d9SJerome Glisse "REDWOOD", 811b5331d9SJerome Glisse "JUNIPER", 821b5331d9SJerome Glisse "CYPRESS", 831b5331d9SJerome Glisse "HEMLOCK", 84b08ebe7eSAlex Deucher "PALM", 851fe18305SAlex Deucher "BARTS", 861fe18305SAlex Deucher "TURKS", 871fe18305SAlex Deucher "CAICOS", 88*b7cfc9feSAlex Deucher "CAYMAN", 891b5331d9SJerome Glisse "LAST", 901b5331d9SJerome Glisse }; 911b5331d9SJerome Glisse 92771fe6b9SJerome Glisse /* 93b1e3a6d1SMichel Dänzer * Clear GPU surface registers. 94b1e3a6d1SMichel Dänzer */ 953ce0a23dSJerome Glisse void radeon_surface_init(struct radeon_device *rdev) 96b1e3a6d1SMichel Dänzer { 97b1e3a6d1SMichel Dänzer /* FIXME: check this out */ 98b1e3a6d1SMichel Dänzer if (rdev->family < CHIP_R600) { 99b1e3a6d1SMichel Dänzer int i; 100b1e3a6d1SMichel Dänzer 101550e2d92SDave Airlie for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) { 102550e2d92SDave Airlie if (rdev->surface_regs[i].bo) 103550e2d92SDave Airlie radeon_bo_get_surface_reg(rdev->surface_regs[i].bo); 104550e2d92SDave Airlie else 105550e2d92SDave Airlie radeon_clear_surface_reg(rdev, i); 106b1e3a6d1SMichel Dänzer } 107e024e110SDave Airlie /* enable surfaces */ 108e024e110SDave Airlie WREG32(RADEON_SURFACE_CNTL, 0); 109b1e3a6d1SMichel Dänzer } 110b1e3a6d1SMichel Dänzer } 111b1e3a6d1SMichel Dänzer 112b1e3a6d1SMichel Dänzer /* 113771fe6b9SJerome Glisse * GPU scratch registers helpers function. 114771fe6b9SJerome Glisse */ 1153ce0a23dSJerome Glisse void radeon_scratch_init(struct radeon_device *rdev) 116771fe6b9SJerome Glisse { 117771fe6b9SJerome Glisse int i; 118771fe6b9SJerome Glisse 119771fe6b9SJerome Glisse /* FIXME: check this out */ 120771fe6b9SJerome Glisse if (rdev->family < CHIP_R300) { 121771fe6b9SJerome Glisse rdev->scratch.num_reg = 5; 122771fe6b9SJerome Glisse } else { 123771fe6b9SJerome Glisse rdev->scratch.num_reg = 7; 124771fe6b9SJerome Glisse } 125724c80e1SAlex Deucher rdev->scratch.reg_base = RADEON_SCRATCH_REG0; 126771fe6b9SJerome Glisse for (i = 0; i < rdev->scratch.num_reg; i++) { 127771fe6b9SJerome Glisse rdev->scratch.free[i] = true; 128724c80e1SAlex Deucher rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4); 129771fe6b9SJerome Glisse } 130771fe6b9SJerome Glisse } 131771fe6b9SJerome Glisse 132771fe6b9SJerome Glisse int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg) 133771fe6b9SJerome Glisse { 134771fe6b9SJerome Glisse int i; 135771fe6b9SJerome Glisse 136771fe6b9SJerome Glisse for (i = 0; i < rdev->scratch.num_reg; i++) { 137771fe6b9SJerome Glisse if (rdev->scratch.free[i]) { 138771fe6b9SJerome Glisse rdev->scratch.free[i] = false; 139771fe6b9SJerome Glisse *reg = rdev->scratch.reg[i]; 140771fe6b9SJerome Glisse return 0; 141771fe6b9SJerome Glisse } 142771fe6b9SJerome Glisse } 143771fe6b9SJerome Glisse return -EINVAL; 144771fe6b9SJerome Glisse } 145771fe6b9SJerome Glisse 146771fe6b9SJerome Glisse void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg) 147771fe6b9SJerome Glisse { 148771fe6b9SJerome Glisse int i; 149771fe6b9SJerome Glisse 150771fe6b9SJerome Glisse for (i = 0; i < rdev->scratch.num_reg; i++) { 151771fe6b9SJerome Glisse if (rdev->scratch.reg[i] == reg) { 152771fe6b9SJerome Glisse rdev->scratch.free[i] = true; 153771fe6b9SJerome Glisse return; 154771fe6b9SJerome Glisse } 155771fe6b9SJerome Glisse } 156771fe6b9SJerome Glisse } 157771fe6b9SJerome Glisse 158724c80e1SAlex Deucher void radeon_wb_disable(struct radeon_device *rdev) 159724c80e1SAlex Deucher { 160724c80e1SAlex Deucher int r; 161724c80e1SAlex Deucher 162724c80e1SAlex Deucher if (rdev->wb.wb_obj) { 163724c80e1SAlex Deucher r = radeon_bo_reserve(rdev->wb.wb_obj, false); 164724c80e1SAlex Deucher if (unlikely(r != 0)) 165724c80e1SAlex Deucher return; 166724c80e1SAlex Deucher radeon_bo_kunmap(rdev->wb.wb_obj); 167724c80e1SAlex Deucher radeon_bo_unpin(rdev->wb.wb_obj); 168724c80e1SAlex Deucher radeon_bo_unreserve(rdev->wb.wb_obj); 169724c80e1SAlex Deucher } 170724c80e1SAlex Deucher rdev->wb.enabled = false; 171724c80e1SAlex Deucher } 172724c80e1SAlex Deucher 173724c80e1SAlex Deucher void radeon_wb_fini(struct radeon_device *rdev) 174724c80e1SAlex Deucher { 175724c80e1SAlex Deucher radeon_wb_disable(rdev); 176724c80e1SAlex Deucher if (rdev->wb.wb_obj) { 177724c80e1SAlex Deucher radeon_bo_unref(&rdev->wb.wb_obj); 178724c80e1SAlex Deucher rdev->wb.wb = NULL; 179724c80e1SAlex Deucher rdev->wb.wb_obj = NULL; 180724c80e1SAlex Deucher } 181724c80e1SAlex Deucher } 182724c80e1SAlex Deucher 183724c80e1SAlex Deucher int radeon_wb_init(struct radeon_device *rdev) 184724c80e1SAlex Deucher { 185724c80e1SAlex Deucher int r; 186724c80e1SAlex Deucher 187724c80e1SAlex Deucher if (rdev->wb.wb_obj == NULL) { 188441921d5SDaniel Vetter r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true, 189724c80e1SAlex Deucher RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj); 190724c80e1SAlex Deucher if (r) { 191724c80e1SAlex Deucher dev_warn(rdev->dev, "(%d) create WB bo failed\n", r); 192724c80e1SAlex Deucher return r; 193724c80e1SAlex Deucher } 194724c80e1SAlex Deucher } 195724c80e1SAlex Deucher r = radeon_bo_reserve(rdev->wb.wb_obj, false); 196724c80e1SAlex Deucher if (unlikely(r != 0)) { 197724c80e1SAlex Deucher radeon_wb_fini(rdev); 198724c80e1SAlex Deucher return r; 199724c80e1SAlex Deucher } 200724c80e1SAlex Deucher r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, 201724c80e1SAlex Deucher &rdev->wb.gpu_addr); 202724c80e1SAlex Deucher if (r) { 203724c80e1SAlex Deucher radeon_bo_unreserve(rdev->wb.wb_obj); 204724c80e1SAlex Deucher dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r); 205724c80e1SAlex Deucher radeon_wb_fini(rdev); 206724c80e1SAlex Deucher return r; 207724c80e1SAlex Deucher } 208724c80e1SAlex Deucher r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); 209724c80e1SAlex Deucher radeon_bo_unreserve(rdev->wb.wb_obj); 210724c80e1SAlex Deucher if (r) { 211724c80e1SAlex Deucher dev_warn(rdev->dev, "(%d) map WB bo failed\n", r); 212724c80e1SAlex Deucher radeon_wb_fini(rdev); 213724c80e1SAlex Deucher return r; 214724c80e1SAlex Deucher } 215724c80e1SAlex Deucher 216d0f8a854SAlex Deucher /* disable event_write fences */ 217d0f8a854SAlex Deucher rdev->wb.use_event = false; 218724c80e1SAlex Deucher /* disabled via module param */ 219724c80e1SAlex Deucher if (radeon_no_wb == 1) 220724c80e1SAlex Deucher rdev->wb.enabled = false; 221724c80e1SAlex Deucher else { 222724c80e1SAlex Deucher /* often unreliable on AGP */ 223724c80e1SAlex Deucher if (rdev->flags & RADEON_IS_AGP) { 224724c80e1SAlex Deucher rdev->wb.enabled = false; 225d0f8a854SAlex Deucher } else { 226724c80e1SAlex Deucher rdev->wb.enabled = true; 227d0f8a854SAlex Deucher /* event_write fences are only available on r600+ */ 228d0f8a854SAlex Deucher if (rdev->family >= CHIP_R600) 229d0f8a854SAlex Deucher rdev->wb.use_event = true; 230d0f8a854SAlex Deucher } 231724c80e1SAlex Deucher } 2327d52785dSAlex Deucher /* always use writeback/events on NI */ 2337d52785dSAlex Deucher if (ASIC_IS_DCE5(rdev)) { 2347d52785dSAlex Deucher rdev->wb.enabled = true; 2357d52785dSAlex Deucher rdev->wb.use_event = true; 2367d52785dSAlex Deucher } 237724c80e1SAlex Deucher 238724c80e1SAlex Deucher dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis"); 239724c80e1SAlex Deucher 240724c80e1SAlex Deucher return 0; 241724c80e1SAlex Deucher } 242724c80e1SAlex Deucher 243d594e46aSJerome Glisse /** 244d594e46aSJerome Glisse * radeon_vram_location - try to find VRAM location 245d594e46aSJerome Glisse * @rdev: radeon device structure holding all necessary informations 246d594e46aSJerome Glisse * @mc: memory controller structure holding memory informations 247d594e46aSJerome Glisse * @base: base address at which to put VRAM 248d594e46aSJerome Glisse * 249d594e46aSJerome Glisse * Function will place try to place VRAM at base address provided 250d594e46aSJerome Glisse * as parameter (which is so far either PCI aperture address or 251d594e46aSJerome Glisse * for IGP TOM base address). 252d594e46aSJerome Glisse * 253d594e46aSJerome Glisse * If there is not enough space to fit the unvisible VRAM in the 32bits 254d594e46aSJerome Glisse * address space then we limit the VRAM size to the aperture. 255d594e46aSJerome Glisse * 256d594e46aSJerome Glisse * If we are using AGP and if the AGP aperture doesn't allow us to have 257d594e46aSJerome Glisse * room for all the VRAM than we restrict the VRAM to the PCI aperture 258d594e46aSJerome Glisse * size and print a warning. 259d594e46aSJerome Glisse * 260d594e46aSJerome Glisse * This function will never fails, worst case are limiting VRAM. 261d594e46aSJerome Glisse * 262d594e46aSJerome Glisse * Note: GTT start, end, size should be initialized before calling this 263d594e46aSJerome Glisse * function on AGP platform. 264d594e46aSJerome Glisse * 265d594e46aSJerome Glisse * Note: We don't explictly enforce VRAM start to be aligned on VRAM size, 266d594e46aSJerome Glisse * this shouldn't be a problem as we are using the PCI aperture as a reference. 267d594e46aSJerome Glisse * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but 268d594e46aSJerome Glisse * not IGP. 269d594e46aSJerome Glisse * 270d594e46aSJerome Glisse * Note: we use mc_vram_size as on some board we need to program the mc to 271d594e46aSJerome Glisse * cover the whole aperture even if VRAM size is inferior to aperture size 272d594e46aSJerome Glisse * Novell bug 204882 + along with lots of ubuntu ones 273d594e46aSJerome Glisse * 274d594e46aSJerome Glisse * Note: when limiting vram it's safe to overwritte real_vram_size because 275d594e46aSJerome Glisse * we are not in case where real_vram_size is inferior to mc_vram_size (ie 276d594e46aSJerome Glisse * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu 277d594e46aSJerome Glisse * ones) 278d594e46aSJerome Glisse * 279d594e46aSJerome Glisse * Note: IGP TOM addr should be the same as the aperture addr, we don't 280d594e46aSJerome Glisse * explicitly check for that thought. 281d594e46aSJerome Glisse * 282d594e46aSJerome Glisse * FIXME: when reducing VRAM size align new size on power of 2. 283771fe6b9SJerome Glisse */ 284d594e46aSJerome Glisse void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base) 285771fe6b9SJerome Glisse { 286d594e46aSJerome Glisse mc->vram_start = base; 287d594e46aSJerome Glisse if (mc->mc_vram_size > (0xFFFFFFFF - base + 1)) { 288d594e46aSJerome Glisse dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n"); 289d594e46aSJerome Glisse mc->real_vram_size = mc->aper_size; 290d594e46aSJerome Glisse mc->mc_vram_size = mc->aper_size; 291771fe6b9SJerome Glisse } 292d594e46aSJerome Glisse mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; 2932cbeb4efSJerome Glisse if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) { 294d594e46aSJerome Glisse dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n"); 295d594e46aSJerome Glisse mc->real_vram_size = mc->aper_size; 296d594e46aSJerome Glisse mc->mc_vram_size = mc->aper_size; 297771fe6b9SJerome Glisse } 298d594e46aSJerome Glisse mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; 299dd7cc55aSAlex Deucher dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n", 300d594e46aSJerome Glisse mc->mc_vram_size >> 20, mc->vram_start, 301d594e46aSJerome Glisse mc->vram_end, mc->real_vram_size >> 20); 302771fe6b9SJerome Glisse } 303771fe6b9SJerome Glisse 304d594e46aSJerome Glisse /** 305d594e46aSJerome Glisse * radeon_gtt_location - try to find GTT location 306d594e46aSJerome Glisse * @rdev: radeon device structure holding all necessary informations 307d594e46aSJerome Glisse * @mc: memory controller structure holding memory informations 308d594e46aSJerome Glisse * 309d594e46aSJerome Glisse * Function will place try to place GTT before or after VRAM. 310d594e46aSJerome Glisse * 311d594e46aSJerome Glisse * If GTT size is bigger than space left then we ajust GTT size. 312d594e46aSJerome Glisse * Thus function will never fails. 313d594e46aSJerome Glisse * 314d594e46aSJerome Glisse * FIXME: when reducing GTT size align new size on power of 2. 315d594e46aSJerome Glisse */ 316d594e46aSJerome Glisse void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) 317d594e46aSJerome Glisse { 318d594e46aSJerome Glisse u64 size_af, size_bf; 319d594e46aSJerome Glisse 3208d369bb1SAlex Deucher size_af = ((0xFFFFFFFF - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align; 3218d369bb1SAlex Deucher size_bf = mc->vram_start & ~mc->gtt_base_align; 322d594e46aSJerome Glisse if (size_bf > size_af) { 323d594e46aSJerome Glisse if (mc->gtt_size > size_bf) { 324d594e46aSJerome Glisse dev_warn(rdev->dev, "limiting GTT\n"); 325d594e46aSJerome Glisse mc->gtt_size = size_bf; 326d594e46aSJerome Glisse } 3278d369bb1SAlex Deucher mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size; 328d594e46aSJerome Glisse } else { 329d594e46aSJerome Glisse if (mc->gtt_size > size_af) { 330d594e46aSJerome Glisse dev_warn(rdev->dev, "limiting GTT\n"); 331d594e46aSJerome Glisse mc->gtt_size = size_af; 332d594e46aSJerome Glisse } 3338d369bb1SAlex Deucher mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align; 334d594e46aSJerome Glisse } 335d594e46aSJerome Glisse mc->gtt_end = mc->gtt_start + mc->gtt_size - 1; 336dd7cc55aSAlex Deucher dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n", 337d594e46aSJerome Glisse mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end); 338d594e46aSJerome Glisse } 339771fe6b9SJerome Glisse 340771fe6b9SJerome Glisse /* 341771fe6b9SJerome Glisse * GPU helpers function. 342771fe6b9SJerome Glisse */ 3439f022ddfSJerome Glisse bool radeon_card_posted(struct radeon_device *rdev) 344771fe6b9SJerome Glisse { 345771fe6b9SJerome Glisse uint32_t reg; 346771fe6b9SJerome Glisse 347771fe6b9SJerome Glisse /* first check CRTCs */ 34818007401SAlex Deucher if (ASIC_IS_DCE41(rdev)) { 34918007401SAlex Deucher reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | 35018007401SAlex Deucher RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET); 35118007401SAlex Deucher if (reg & EVERGREEN_CRTC_MASTER_EN) 35218007401SAlex Deucher return true; 35318007401SAlex Deucher } else if (ASIC_IS_DCE4(rdev)) { 354bcc1c2a1SAlex Deucher reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | 355bcc1c2a1SAlex Deucher RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) | 356bcc1c2a1SAlex Deucher RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) | 357bcc1c2a1SAlex Deucher RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) | 358bcc1c2a1SAlex Deucher RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) | 359bcc1c2a1SAlex Deucher RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET); 360bcc1c2a1SAlex Deucher if (reg & EVERGREEN_CRTC_MASTER_EN) 361bcc1c2a1SAlex Deucher return true; 362bcc1c2a1SAlex Deucher } else if (ASIC_IS_AVIVO(rdev)) { 363771fe6b9SJerome Glisse reg = RREG32(AVIVO_D1CRTC_CONTROL) | 364771fe6b9SJerome Glisse RREG32(AVIVO_D2CRTC_CONTROL); 365771fe6b9SJerome Glisse if (reg & AVIVO_CRTC_EN) { 366771fe6b9SJerome Glisse return true; 367771fe6b9SJerome Glisse } 368771fe6b9SJerome Glisse } else { 369771fe6b9SJerome Glisse reg = RREG32(RADEON_CRTC_GEN_CNTL) | 370771fe6b9SJerome Glisse RREG32(RADEON_CRTC2_GEN_CNTL); 371771fe6b9SJerome Glisse if (reg & RADEON_CRTC_EN) { 372771fe6b9SJerome Glisse return true; 373771fe6b9SJerome Glisse } 374771fe6b9SJerome Glisse } 375771fe6b9SJerome Glisse 376771fe6b9SJerome Glisse /* then check MEM_SIZE, in case the crtcs are off */ 377771fe6b9SJerome Glisse if (rdev->family >= CHIP_R600) 378771fe6b9SJerome Glisse reg = RREG32(R600_CONFIG_MEMSIZE); 379771fe6b9SJerome Glisse else 380771fe6b9SJerome Glisse reg = RREG32(RADEON_CONFIG_MEMSIZE); 381771fe6b9SJerome Glisse 382771fe6b9SJerome Glisse if (reg) 383771fe6b9SJerome Glisse return true; 384771fe6b9SJerome Glisse 385771fe6b9SJerome Glisse return false; 386771fe6b9SJerome Glisse 387771fe6b9SJerome Glisse } 388771fe6b9SJerome Glisse 389f47299c5SAlex Deucher void radeon_update_bandwidth_info(struct radeon_device *rdev) 390f47299c5SAlex Deucher { 391f47299c5SAlex Deucher fixed20_12 a; 3928807286eSAlex Deucher u32 sclk = rdev->pm.current_sclk; 3938807286eSAlex Deucher u32 mclk = rdev->pm.current_mclk; 394f47299c5SAlex Deucher 3958807286eSAlex Deucher /* sclk/mclk in Mhz */ 39668adac5eSBen Skeggs a.full = dfixed_const(100); 39768adac5eSBen Skeggs rdev->pm.sclk.full = dfixed_const(sclk); 39868adac5eSBen Skeggs rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a); 39968adac5eSBen Skeggs rdev->pm.mclk.full = dfixed_const(mclk); 40068adac5eSBen Skeggs rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a); 401f47299c5SAlex Deucher 4028807286eSAlex Deucher if (rdev->flags & RADEON_IS_IGP) { 40368adac5eSBen Skeggs a.full = dfixed_const(16); 404f47299c5SAlex Deucher /* core_bandwidth = sclk(Mhz) * 16 */ 40568adac5eSBen Skeggs rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a); 406f47299c5SAlex Deucher } 407f47299c5SAlex Deucher } 408f47299c5SAlex Deucher 40972542d77SDave Airlie bool radeon_boot_test_post_card(struct radeon_device *rdev) 41072542d77SDave Airlie { 41172542d77SDave Airlie if (radeon_card_posted(rdev)) 41272542d77SDave Airlie return true; 41372542d77SDave Airlie 41472542d77SDave Airlie if (rdev->bios) { 41572542d77SDave Airlie DRM_INFO("GPU not posted. posting now...\n"); 41672542d77SDave Airlie if (rdev->is_atom_bios) 41772542d77SDave Airlie atom_asic_init(rdev->mode_info.atom_context); 41872542d77SDave Airlie else 41972542d77SDave Airlie radeon_combios_asic_init(rdev->ddev); 42072542d77SDave Airlie return true; 42172542d77SDave Airlie } else { 42272542d77SDave Airlie dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); 42372542d77SDave Airlie return false; 42472542d77SDave Airlie } 42572542d77SDave Airlie } 42672542d77SDave Airlie 4273ce0a23dSJerome Glisse int radeon_dummy_page_init(struct radeon_device *rdev) 4283ce0a23dSJerome Glisse { 42982568565SDave Airlie if (rdev->dummy_page.page) 43082568565SDave Airlie return 0; 4313ce0a23dSJerome Glisse rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO); 4323ce0a23dSJerome Glisse if (rdev->dummy_page.page == NULL) 4333ce0a23dSJerome Glisse return -ENOMEM; 4343ce0a23dSJerome Glisse rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page, 4353ce0a23dSJerome Glisse 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 436a30f6fb7SBenjamin Herrenschmidt if (pci_dma_mapping_error(rdev->pdev, rdev->dummy_page.addr)) { 437a30f6fb7SBenjamin Herrenschmidt dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n"); 4383ce0a23dSJerome Glisse __free_page(rdev->dummy_page.page); 4393ce0a23dSJerome Glisse rdev->dummy_page.page = NULL; 4403ce0a23dSJerome Glisse return -ENOMEM; 4413ce0a23dSJerome Glisse } 4423ce0a23dSJerome Glisse return 0; 4433ce0a23dSJerome Glisse } 4443ce0a23dSJerome Glisse 4453ce0a23dSJerome Glisse void radeon_dummy_page_fini(struct radeon_device *rdev) 4463ce0a23dSJerome Glisse { 4473ce0a23dSJerome Glisse if (rdev->dummy_page.page == NULL) 4483ce0a23dSJerome Glisse return; 4493ce0a23dSJerome Glisse pci_unmap_page(rdev->pdev, rdev->dummy_page.addr, 4503ce0a23dSJerome Glisse PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 4513ce0a23dSJerome Glisse __free_page(rdev->dummy_page.page); 4523ce0a23dSJerome Glisse rdev->dummy_page.page = NULL; 4533ce0a23dSJerome Glisse } 4543ce0a23dSJerome Glisse 455771fe6b9SJerome Glisse 456771fe6b9SJerome Glisse /* ATOM accessor methods */ 457771fe6b9SJerome Glisse static uint32_t cail_pll_read(struct card_info *info, uint32_t reg) 458771fe6b9SJerome Glisse { 459771fe6b9SJerome Glisse struct radeon_device *rdev = info->dev->dev_private; 460771fe6b9SJerome Glisse uint32_t r; 461771fe6b9SJerome Glisse 462771fe6b9SJerome Glisse r = rdev->pll_rreg(rdev, reg); 463771fe6b9SJerome Glisse return r; 464771fe6b9SJerome Glisse } 465771fe6b9SJerome Glisse 466771fe6b9SJerome Glisse static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val) 467771fe6b9SJerome Glisse { 468771fe6b9SJerome Glisse struct radeon_device *rdev = info->dev->dev_private; 469771fe6b9SJerome Glisse 470771fe6b9SJerome Glisse rdev->pll_wreg(rdev, reg, val); 471771fe6b9SJerome Glisse } 472771fe6b9SJerome Glisse 473771fe6b9SJerome Glisse static uint32_t cail_mc_read(struct card_info *info, uint32_t reg) 474771fe6b9SJerome Glisse { 475771fe6b9SJerome Glisse struct radeon_device *rdev = info->dev->dev_private; 476771fe6b9SJerome Glisse uint32_t r; 477771fe6b9SJerome Glisse 478771fe6b9SJerome Glisse r = rdev->mc_rreg(rdev, reg); 479771fe6b9SJerome Glisse return r; 480771fe6b9SJerome Glisse } 481771fe6b9SJerome Glisse 482771fe6b9SJerome Glisse static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val) 483771fe6b9SJerome Glisse { 484771fe6b9SJerome Glisse struct radeon_device *rdev = info->dev->dev_private; 485771fe6b9SJerome Glisse 486771fe6b9SJerome Glisse rdev->mc_wreg(rdev, reg, val); 487771fe6b9SJerome Glisse } 488771fe6b9SJerome Glisse 489771fe6b9SJerome Glisse static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val) 490771fe6b9SJerome Glisse { 491771fe6b9SJerome Glisse struct radeon_device *rdev = info->dev->dev_private; 492771fe6b9SJerome Glisse 493771fe6b9SJerome Glisse WREG32(reg*4, val); 494771fe6b9SJerome Glisse } 495771fe6b9SJerome Glisse 496771fe6b9SJerome Glisse static uint32_t cail_reg_read(struct card_info *info, uint32_t reg) 497771fe6b9SJerome Glisse { 498771fe6b9SJerome Glisse struct radeon_device *rdev = info->dev->dev_private; 499771fe6b9SJerome Glisse uint32_t r; 500771fe6b9SJerome Glisse 501771fe6b9SJerome Glisse r = RREG32(reg*4); 502771fe6b9SJerome Glisse return r; 503771fe6b9SJerome Glisse } 504771fe6b9SJerome Glisse 505351a52a2SAlex Deucher static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val) 506351a52a2SAlex Deucher { 507351a52a2SAlex Deucher struct radeon_device *rdev = info->dev->dev_private; 508351a52a2SAlex Deucher 509351a52a2SAlex Deucher WREG32_IO(reg*4, val); 510351a52a2SAlex Deucher } 511351a52a2SAlex Deucher 512351a52a2SAlex Deucher static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg) 513351a52a2SAlex Deucher { 514351a52a2SAlex Deucher struct radeon_device *rdev = info->dev->dev_private; 515351a52a2SAlex Deucher uint32_t r; 516351a52a2SAlex Deucher 517351a52a2SAlex Deucher r = RREG32_IO(reg*4); 518351a52a2SAlex Deucher return r; 519351a52a2SAlex Deucher } 520351a52a2SAlex Deucher 521771fe6b9SJerome Glisse int radeon_atombios_init(struct radeon_device *rdev) 522771fe6b9SJerome Glisse { 52361c4b24bSMathias Fröhlich struct card_info *atom_card_info = 52461c4b24bSMathias Fröhlich kzalloc(sizeof(struct card_info), GFP_KERNEL); 52561c4b24bSMathias Fröhlich 52661c4b24bSMathias Fröhlich if (!atom_card_info) 52761c4b24bSMathias Fröhlich return -ENOMEM; 52861c4b24bSMathias Fröhlich 52961c4b24bSMathias Fröhlich rdev->mode_info.atom_card_info = atom_card_info; 53061c4b24bSMathias Fröhlich atom_card_info->dev = rdev->ddev; 53161c4b24bSMathias Fröhlich atom_card_info->reg_read = cail_reg_read; 53261c4b24bSMathias Fröhlich atom_card_info->reg_write = cail_reg_write; 533351a52a2SAlex Deucher /* needed for iio ops */ 534351a52a2SAlex Deucher if (rdev->rio_mem) { 535351a52a2SAlex Deucher atom_card_info->ioreg_read = cail_ioreg_read; 536351a52a2SAlex Deucher atom_card_info->ioreg_write = cail_ioreg_write; 537351a52a2SAlex Deucher } else { 538351a52a2SAlex Deucher DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n"); 539351a52a2SAlex Deucher atom_card_info->ioreg_read = cail_reg_read; 540351a52a2SAlex Deucher atom_card_info->ioreg_write = cail_reg_write; 541351a52a2SAlex Deucher } 54261c4b24bSMathias Fröhlich atom_card_info->mc_read = cail_mc_read; 54361c4b24bSMathias Fröhlich atom_card_info->mc_write = cail_mc_write; 54461c4b24bSMathias Fröhlich atom_card_info->pll_read = cail_pll_read; 54561c4b24bSMathias Fröhlich atom_card_info->pll_write = cail_pll_write; 54661c4b24bSMathias Fröhlich 54761c4b24bSMathias Fröhlich rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios); 548c31ad97fSRafał Miłecki mutex_init(&rdev->mode_info.atom_context->mutex); 549771fe6b9SJerome Glisse radeon_atom_initialize_bios_scratch_regs(rdev->ddev); 550d904ef9bSDave Airlie atom_allocate_fb_scratch(rdev->mode_info.atom_context); 551771fe6b9SJerome Glisse return 0; 552771fe6b9SJerome Glisse } 553771fe6b9SJerome Glisse 554771fe6b9SJerome Glisse void radeon_atombios_fini(struct radeon_device *rdev) 555771fe6b9SJerome Glisse { 5564a04a844SJerome Glisse if (rdev->mode_info.atom_context) { 557d904ef9bSDave Airlie kfree(rdev->mode_info.atom_context->scratch); 558771fe6b9SJerome Glisse kfree(rdev->mode_info.atom_context); 5594a04a844SJerome Glisse } 56061c4b24bSMathias Fröhlich kfree(rdev->mode_info.atom_card_info); 561771fe6b9SJerome Glisse } 562771fe6b9SJerome Glisse 563771fe6b9SJerome Glisse int radeon_combios_init(struct radeon_device *rdev) 564771fe6b9SJerome Glisse { 565771fe6b9SJerome Glisse radeon_combios_initialize_bios_scratch_regs(rdev->ddev); 566771fe6b9SJerome Glisse return 0; 567771fe6b9SJerome Glisse } 568771fe6b9SJerome Glisse 569771fe6b9SJerome Glisse void radeon_combios_fini(struct radeon_device *rdev) 570771fe6b9SJerome Glisse { 571771fe6b9SJerome Glisse } 572771fe6b9SJerome Glisse 57328d52043SDave Airlie /* if we get transitioned to only one device, tak VGA back */ 57428d52043SDave Airlie static unsigned int radeon_vga_set_decode(void *cookie, bool state) 57528d52043SDave Airlie { 57628d52043SDave Airlie struct radeon_device *rdev = cookie; 57728d52043SDave Airlie radeon_vga_set_state(rdev, state); 57828d52043SDave Airlie if (state) 57928d52043SDave Airlie return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | 58028d52043SDave Airlie VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 58128d52043SDave Airlie else 58228d52043SDave Airlie return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 58328d52043SDave Airlie } 584c1176d6fSDave Airlie 58536421338SJerome Glisse void radeon_check_arguments(struct radeon_device *rdev) 58636421338SJerome Glisse { 58736421338SJerome Glisse /* vramlimit must be a power of two */ 58836421338SJerome Glisse switch (radeon_vram_limit) { 58936421338SJerome Glisse case 0: 59036421338SJerome Glisse case 4: 59136421338SJerome Glisse case 8: 59236421338SJerome Glisse case 16: 59336421338SJerome Glisse case 32: 59436421338SJerome Glisse case 64: 59536421338SJerome Glisse case 128: 59636421338SJerome Glisse case 256: 59736421338SJerome Glisse case 512: 59836421338SJerome Glisse case 1024: 59936421338SJerome Glisse case 2048: 60036421338SJerome Glisse case 4096: 60136421338SJerome Glisse break; 60236421338SJerome Glisse default: 60336421338SJerome Glisse dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n", 60436421338SJerome Glisse radeon_vram_limit); 60536421338SJerome Glisse radeon_vram_limit = 0; 60636421338SJerome Glisse break; 60736421338SJerome Glisse } 60836421338SJerome Glisse radeon_vram_limit = radeon_vram_limit << 20; 60936421338SJerome Glisse /* gtt size must be power of two and greater or equal to 32M */ 61036421338SJerome Glisse switch (radeon_gart_size) { 61136421338SJerome Glisse case 4: 61236421338SJerome Glisse case 8: 61336421338SJerome Glisse case 16: 61436421338SJerome Glisse dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n", 61536421338SJerome Glisse radeon_gart_size); 61636421338SJerome Glisse radeon_gart_size = 512; 61736421338SJerome Glisse break; 61836421338SJerome Glisse case 32: 61936421338SJerome Glisse case 64: 62036421338SJerome Glisse case 128: 62136421338SJerome Glisse case 256: 62236421338SJerome Glisse case 512: 62336421338SJerome Glisse case 1024: 62436421338SJerome Glisse case 2048: 62536421338SJerome Glisse case 4096: 62636421338SJerome Glisse break; 62736421338SJerome Glisse default: 62836421338SJerome Glisse dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n", 62936421338SJerome Glisse radeon_gart_size); 63036421338SJerome Glisse radeon_gart_size = 512; 63136421338SJerome Glisse break; 63236421338SJerome Glisse } 63336421338SJerome Glisse rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; 63436421338SJerome Glisse /* AGP mode can only be -1, 1, 2, 4, 8 */ 63536421338SJerome Glisse switch (radeon_agpmode) { 63636421338SJerome Glisse case -1: 63736421338SJerome Glisse case 0: 63836421338SJerome Glisse case 1: 63936421338SJerome Glisse case 2: 64036421338SJerome Glisse case 4: 64136421338SJerome Glisse case 8: 64236421338SJerome Glisse break; 64336421338SJerome Glisse default: 64436421338SJerome Glisse dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: " 64536421338SJerome Glisse "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode); 64636421338SJerome Glisse radeon_agpmode = 0; 64736421338SJerome Glisse break; 64836421338SJerome Glisse } 64936421338SJerome Glisse } 65036421338SJerome Glisse 6516a9ee8afSDave Airlie static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) 6526a9ee8afSDave Airlie { 6536a9ee8afSDave Airlie struct drm_device *dev = pci_get_drvdata(pdev); 6546a9ee8afSDave Airlie pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; 6556a9ee8afSDave Airlie if (state == VGA_SWITCHEROO_ON) { 6566a9ee8afSDave Airlie printk(KERN_INFO "radeon: switched on\n"); 6576a9ee8afSDave Airlie /* don't suspend or resume card normally */ 6585bcf719bSDave Airlie dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 6596a9ee8afSDave Airlie radeon_resume_kms(dev); 6605bcf719bSDave Airlie dev->switch_power_state = DRM_SWITCH_POWER_ON; 661fbf81762SDave Airlie drm_kms_helper_poll_enable(dev); 6626a9ee8afSDave Airlie } else { 6636a9ee8afSDave Airlie printk(KERN_INFO "radeon: switched off\n"); 664fbf81762SDave Airlie drm_kms_helper_poll_disable(dev); 6655bcf719bSDave Airlie dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 6666a9ee8afSDave Airlie radeon_suspend_kms(dev, pmm); 6675bcf719bSDave Airlie dev->switch_power_state = DRM_SWITCH_POWER_OFF; 6686a9ee8afSDave Airlie } 6696a9ee8afSDave Airlie } 6706a9ee8afSDave Airlie 6716a9ee8afSDave Airlie static bool radeon_switcheroo_can_switch(struct pci_dev *pdev) 6726a9ee8afSDave Airlie { 6736a9ee8afSDave Airlie struct drm_device *dev = pci_get_drvdata(pdev); 6746a9ee8afSDave Airlie bool can_switch; 6756a9ee8afSDave Airlie 6766a9ee8afSDave Airlie spin_lock(&dev->count_lock); 6776a9ee8afSDave Airlie can_switch = (dev->open_count == 0); 6786a9ee8afSDave Airlie spin_unlock(&dev->count_lock); 6796a9ee8afSDave Airlie return can_switch; 6806a9ee8afSDave Airlie } 6816a9ee8afSDave Airlie 6826a9ee8afSDave Airlie 683771fe6b9SJerome Glisse int radeon_device_init(struct radeon_device *rdev, 684771fe6b9SJerome Glisse struct drm_device *ddev, 685771fe6b9SJerome Glisse struct pci_dev *pdev, 686771fe6b9SJerome Glisse uint32_t flags) 687771fe6b9SJerome Glisse { 688351a52a2SAlex Deucher int r, i; 689ad49f501SDave Airlie int dma_bits; 690771fe6b9SJerome Glisse 691771fe6b9SJerome Glisse rdev->shutdown = false; 6929f022ddfSJerome Glisse rdev->dev = &pdev->dev; 693771fe6b9SJerome Glisse rdev->ddev = ddev; 694771fe6b9SJerome Glisse rdev->pdev = pdev; 695771fe6b9SJerome Glisse rdev->flags = flags; 696771fe6b9SJerome Glisse rdev->family = flags & RADEON_FAMILY_MASK; 697771fe6b9SJerome Glisse rdev->is_atom_bios = false; 698771fe6b9SJerome Glisse rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT; 699771fe6b9SJerome Glisse rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; 700771fe6b9SJerome Glisse rdev->gpu_lockup = false; 701733289c2SJerome Glisse rdev->accel_working = false; 7021b5331d9SJerome Glisse 7031b5331d9SJerome Glisse DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X).\n", 7041b5331d9SJerome Glisse radeon_family_name[rdev->family], pdev->vendor, pdev->device); 7051b5331d9SJerome Glisse 706771fe6b9SJerome Glisse /* mutex initialization are all done here so we 707771fe6b9SJerome Glisse * can recall function without having locking issues */ 708771fe6b9SJerome Glisse mutex_init(&rdev->cs_mutex); 709771fe6b9SJerome Glisse mutex_init(&rdev->ib_pool.mutex); 710771fe6b9SJerome Glisse mutex_init(&rdev->cp.mutex); 71140bacf16SAlex Deucher mutex_init(&rdev->dc_hw_i2c_mutex); 712d8f60cfcSAlex Deucher if (rdev->family >= CHIP_R600) 713d8f60cfcSAlex Deucher spin_lock_init(&rdev->ih.lock); 7144c788679SJerome Glisse mutex_init(&rdev->gem.mutex); 715c913e23aSRafał Miłecki mutex_init(&rdev->pm.mutex); 7165876dd24SMatthew Garrett mutex_init(&rdev->vram_mutex); 717771fe6b9SJerome Glisse rwlock_init(&rdev->fence_drv.lock); 7189f022ddfSJerome Glisse INIT_LIST_HEAD(&rdev->gem.objects); 71973a6d3fcSRafał Miłecki init_waitqueue_head(&rdev->irq.vblank_queue); 7202031f77cSAlex Deucher init_waitqueue_head(&rdev->irq.idle_queue); 721771fe6b9SJerome Glisse 7224aac0473SJerome Glisse /* Set asic functions */ 7234aac0473SJerome Glisse r = radeon_asic_init(rdev); 72436421338SJerome Glisse if (r) 7254aac0473SJerome Glisse return r; 72636421338SJerome Glisse radeon_check_arguments(rdev); 7274aac0473SJerome Glisse 728f95df9caSAlex Deucher /* all of the newer IGP chips have an internal gart 729f95df9caSAlex Deucher * However some rs4xx report as AGP, so remove that here. 730f95df9caSAlex Deucher */ 731f95df9caSAlex Deucher if ((rdev->family >= CHIP_RS400) && 732f95df9caSAlex Deucher (rdev->flags & RADEON_IS_IGP)) { 733f95df9caSAlex Deucher rdev->flags &= ~RADEON_IS_AGP; 734f95df9caSAlex Deucher } 735f95df9caSAlex Deucher 73630256a3fSJerome Glisse if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) { 737b574f251SJerome Glisse radeon_agp_disable(rdev); 738771fe6b9SJerome Glisse } 739771fe6b9SJerome Glisse 740ad49f501SDave Airlie /* set DMA mask + need_dma32 flags. 741ad49f501SDave Airlie * PCIE - can handle 40-bits. 742ad49f501SDave Airlie * IGP - can handle 40-bits (in theory) 743ad49f501SDave Airlie * AGP - generally dma32 is safest 744ad49f501SDave Airlie * PCI - only dma32 745ad49f501SDave Airlie */ 746ad49f501SDave Airlie rdev->need_dma32 = false; 747ad49f501SDave Airlie if (rdev->flags & RADEON_IS_AGP) 748ad49f501SDave Airlie rdev->need_dma32 = true; 749ad49f501SDave Airlie if (rdev->flags & RADEON_IS_PCI) 750ad49f501SDave Airlie rdev->need_dma32 = true; 751ad49f501SDave Airlie 752ad49f501SDave Airlie dma_bits = rdev->need_dma32 ? 32 : 40; 753ad49f501SDave Airlie r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits)); 754771fe6b9SJerome Glisse if (r) { 755771fe6b9SJerome Glisse printk(KERN_WARNING "radeon: No suitable DMA available.\n"); 756771fe6b9SJerome Glisse } 757771fe6b9SJerome Glisse 758771fe6b9SJerome Glisse /* Registers mapping */ 759771fe6b9SJerome Glisse /* TODO: block userspace mapping of io register */ 76001d73a69SJordan Crouse rdev->rmmio_base = pci_resource_start(rdev->pdev, 2); 76101d73a69SJordan Crouse rdev->rmmio_size = pci_resource_len(rdev->pdev, 2); 762771fe6b9SJerome Glisse rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size); 763771fe6b9SJerome Glisse if (rdev->rmmio == NULL) { 764771fe6b9SJerome Glisse return -ENOMEM; 765771fe6b9SJerome Glisse } 766771fe6b9SJerome Glisse DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base); 767771fe6b9SJerome Glisse DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size); 768771fe6b9SJerome Glisse 769351a52a2SAlex Deucher /* io port mapping */ 770351a52a2SAlex Deucher for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 771351a52a2SAlex Deucher if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) { 772351a52a2SAlex Deucher rdev->rio_mem_size = pci_resource_len(rdev->pdev, i); 773351a52a2SAlex Deucher rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size); 774351a52a2SAlex Deucher break; 775351a52a2SAlex Deucher } 776351a52a2SAlex Deucher } 777351a52a2SAlex Deucher if (rdev->rio_mem == NULL) 778351a52a2SAlex Deucher DRM_ERROR("Unable to find PCI I/O BAR\n"); 779351a52a2SAlex Deucher 78028d52043SDave Airlie /* if we have > 1 VGA cards, then disable the radeon VGA resources */ 78193239ea1SDave Airlie /* this will fail for cards that aren't VGA class devices, just 78293239ea1SDave Airlie * ignore it */ 78393239ea1SDave Airlie vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode); 7846a9ee8afSDave Airlie vga_switcheroo_register_client(rdev->pdev, 7856a9ee8afSDave Airlie radeon_switcheroo_set_state, 7868d608aa6SDave Airlie NULL, 7876a9ee8afSDave Airlie radeon_switcheroo_can_switch); 78828d52043SDave Airlie 7893ce0a23dSJerome Glisse r = radeon_init(rdev); 790b574f251SJerome Glisse if (r) 791b574f251SJerome Glisse return r; 792b1e3a6d1SMichel Dänzer 793b574f251SJerome Glisse if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) { 794b574f251SJerome Glisse /* Acceleration not working on AGP card try again 795b574f251SJerome Glisse * with fallback to PCI or PCIE GART 796b574f251SJerome Glisse */ 797a2d07b74SJerome Glisse radeon_asic_reset(rdev); 798b574f251SJerome Glisse radeon_fini(rdev); 799b574f251SJerome Glisse radeon_agp_disable(rdev); 800b574f251SJerome Glisse r = radeon_init(rdev); 8014aac0473SJerome Glisse if (r) 8024aac0473SJerome Glisse return r; 8033ce0a23dSJerome Glisse } 804ecc0b326SMichel Dänzer if (radeon_testing) { 805ecc0b326SMichel Dänzer radeon_test_moves(rdev); 806ecc0b326SMichel Dänzer } 807771fe6b9SJerome Glisse if (radeon_benchmarking) { 808771fe6b9SJerome Glisse radeon_benchmark(rdev); 809771fe6b9SJerome Glisse } 8106cf8a3f5SJerome Glisse return 0; 811771fe6b9SJerome Glisse } 812771fe6b9SJerome Glisse 813771fe6b9SJerome Glisse void radeon_device_fini(struct radeon_device *rdev) 814771fe6b9SJerome Glisse { 815771fe6b9SJerome Glisse DRM_INFO("radeon: finishing device.\n"); 816771fe6b9SJerome Glisse rdev->shutdown = true; 81790aca4d2SJerome Glisse /* evict vram memory */ 81890aca4d2SJerome Glisse radeon_bo_evict_vram(rdev); 8193ce0a23dSJerome Glisse radeon_fini(rdev); 8206a9ee8afSDave Airlie vga_switcheroo_unregister_client(rdev->pdev); 821c1176d6fSDave Airlie vga_client_register(rdev->pdev, NULL, NULL, NULL); 822e0a2ca73SAlex Deucher if (rdev->rio_mem) 823351a52a2SAlex Deucher pci_iounmap(rdev->pdev, rdev->rio_mem); 824351a52a2SAlex Deucher rdev->rio_mem = NULL; 825771fe6b9SJerome Glisse iounmap(rdev->rmmio); 826771fe6b9SJerome Glisse rdev->rmmio = NULL; 827771fe6b9SJerome Glisse } 828771fe6b9SJerome Glisse 829771fe6b9SJerome Glisse 830771fe6b9SJerome Glisse /* 831771fe6b9SJerome Glisse * Suspend & resume. 832771fe6b9SJerome Glisse */ 833771fe6b9SJerome Glisse int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) 834771fe6b9SJerome Glisse { 835875c1866SDarren Jenkins struct radeon_device *rdev; 836771fe6b9SJerome Glisse struct drm_crtc *crtc; 837d8dcaa1dSAlex Deucher struct drm_connector *connector; 8384c788679SJerome Glisse int r; 839771fe6b9SJerome Glisse 840875c1866SDarren Jenkins if (dev == NULL || dev->dev_private == NULL) { 841771fe6b9SJerome Glisse return -ENODEV; 842771fe6b9SJerome Glisse } 843771fe6b9SJerome Glisse if (state.event == PM_EVENT_PRETHAW) { 844771fe6b9SJerome Glisse return 0; 845771fe6b9SJerome Glisse } 846875c1866SDarren Jenkins rdev = dev->dev_private; 847875c1866SDarren Jenkins 8485bcf719bSDave Airlie if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 8496a9ee8afSDave Airlie return 0; 850d8dcaa1dSAlex Deucher 851d8dcaa1dSAlex Deucher /* turn off display hw */ 852d8dcaa1dSAlex Deucher list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 853d8dcaa1dSAlex Deucher drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); 854d8dcaa1dSAlex Deucher } 855d8dcaa1dSAlex Deucher 856771fe6b9SJerome Glisse /* unpin the front buffers */ 857771fe6b9SJerome Glisse list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 858771fe6b9SJerome Glisse struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb); 8594c788679SJerome Glisse struct radeon_bo *robj; 860771fe6b9SJerome Glisse 861771fe6b9SJerome Glisse if (rfb == NULL || rfb->obj == NULL) { 862771fe6b9SJerome Glisse continue; 863771fe6b9SJerome Glisse } 8647e4d15d9SDaniel Vetter robj = gem_to_radeon_bo(rfb->obj); 86538651674SDave Airlie /* don't unpin kernel fb objects */ 86638651674SDave Airlie if (!radeon_fbdev_robj_is_fb(rdev, robj)) { 8674c788679SJerome Glisse r = radeon_bo_reserve(robj, false); 86838651674SDave Airlie if (r == 0) { 8694c788679SJerome Glisse radeon_bo_unpin(robj); 8704c788679SJerome Glisse radeon_bo_unreserve(robj); 8714c788679SJerome Glisse } 872771fe6b9SJerome Glisse } 873771fe6b9SJerome Glisse } 874771fe6b9SJerome Glisse /* evict vram memory */ 8754c788679SJerome Glisse radeon_bo_evict_vram(rdev); 876771fe6b9SJerome Glisse /* wait for gpu to finish processing current batch */ 877771fe6b9SJerome Glisse radeon_fence_wait_last(rdev); 878771fe6b9SJerome Glisse 879f657c2a7SYang Zhao radeon_save_bios_scratch_regs(rdev); 880f657c2a7SYang Zhao 881ce8f5370SAlex Deucher radeon_pm_suspend(rdev); 8823ce0a23dSJerome Glisse radeon_suspend(rdev); 883d4877cf2SAlex Deucher radeon_hpd_fini(rdev); 884771fe6b9SJerome Glisse /* evict remaining vram memory */ 8854c788679SJerome Glisse radeon_bo_evict_vram(rdev); 886771fe6b9SJerome Glisse 88710b06122SJerome Glisse radeon_agp_suspend(rdev); 88810b06122SJerome Glisse 889771fe6b9SJerome Glisse pci_save_state(dev->pdev); 890771fe6b9SJerome Glisse if (state.event == PM_EVENT_SUSPEND) { 891771fe6b9SJerome Glisse /* Shut down the device */ 892771fe6b9SJerome Glisse pci_disable_device(dev->pdev); 893771fe6b9SJerome Glisse pci_set_power_state(dev->pdev, PCI_D3hot); 894771fe6b9SJerome Glisse } 895ac751efaSTorben Hohn console_lock(); 89638651674SDave Airlie radeon_fbdev_set_suspend(rdev, 1); 897ac751efaSTorben Hohn console_unlock(); 898771fe6b9SJerome Glisse return 0; 899771fe6b9SJerome Glisse } 900771fe6b9SJerome Glisse 901771fe6b9SJerome Glisse int radeon_resume_kms(struct drm_device *dev) 902771fe6b9SJerome Glisse { 90309bdf591SCedric Godin struct drm_connector *connector; 904771fe6b9SJerome Glisse struct radeon_device *rdev = dev->dev_private; 905771fe6b9SJerome Glisse 9065bcf719bSDave Airlie if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 9076a9ee8afSDave Airlie return 0; 9086a9ee8afSDave Airlie 909ac751efaSTorben Hohn console_lock(); 910771fe6b9SJerome Glisse pci_set_power_state(dev->pdev, PCI_D0); 911771fe6b9SJerome Glisse pci_restore_state(dev->pdev); 912771fe6b9SJerome Glisse if (pci_enable_device(dev->pdev)) { 913ac751efaSTorben Hohn console_unlock(); 914771fe6b9SJerome Glisse return -1; 915771fe6b9SJerome Glisse } 916771fe6b9SJerome Glisse pci_set_master(dev->pdev); 9170ebf1717SDave Airlie /* resume AGP if in use */ 9180ebf1717SDave Airlie radeon_agp_resume(rdev); 9193ce0a23dSJerome Glisse radeon_resume(rdev); 920ce8f5370SAlex Deucher radeon_pm_resume(rdev); 921f657c2a7SYang Zhao radeon_restore_bios_scratch_regs(rdev); 92209bdf591SCedric Godin 92338651674SDave Airlie radeon_fbdev_set_suspend(rdev, 0); 924ac751efaSTorben Hohn console_unlock(); 925771fe6b9SJerome Glisse 926d4877cf2SAlex Deucher /* reset hpd state */ 927d4877cf2SAlex Deucher radeon_hpd_init(rdev); 928771fe6b9SJerome Glisse /* blat the mode back in */ 929771fe6b9SJerome Glisse drm_helper_resume_force_mode(dev); 930a93f344dSAlex Deucher /* turn on display hw */ 931a93f344dSAlex Deucher list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 932a93f344dSAlex Deucher drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); 933a93f344dSAlex Deucher } 934771fe6b9SJerome Glisse return 0; 935771fe6b9SJerome Glisse } 936771fe6b9SJerome Glisse 93790aca4d2SJerome Glisse int radeon_gpu_reset(struct radeon_device *rdev) 93890aca4d2SJerome Glisse { 93990aca4d2SJerome Glisse int r; 94090aca4d2SJerome Glisse 94190aca4d2SJerome Glisse radeon_save_bios_scratch_regs(rdev); 94290aca4d2SJerome Glisse radeon_suspend(rdev); 94390aca4d2SJerome Glisse 94490aca4d2SJerome Glisse r = radeon_asic_reset(rdev); 94590aca4d2SJerome Glisse if (!r) { 94690aca4d2SJerome Glisse dev_info(rdev->dev, "GPU reset succeed\n"); 94790aca4d2SJerome Glisse radeon_resume(rdev); 94890aca4d2SJerome Glisse radeon_restore_bios_scratch_regs(rdev); 94990aca4d2SJerome Glisse drm_helper_resume_force_mode(rdev->ddev); 95090aca4d2SJerome Glisse return 0; 95190aca4d2SJerome Glisse } 95290aca4d2SJerome Glisse /* bad news, how to tell it to userspace ? */ 95390aca4d2SJerome Glisse dev_info(rdev->dev, "GPU reset failed\n"); 95490aca4d2SJerome Glisse return r; 95590aca4d2SJerome Glisse } 95690aca4d2SJerome Glisse 957771fe6b9SJerome Glisse 958771fe6b9SJerome Glisse /* 959771fe6b9SJerome Glisse * Debugfs 960771fe6b9SJerome Glisse */ 961771fe6b9SJerome Glisse struct radeon_debugfs { 962771fe6b9SJerome Glisse struct drm_info_list *files; 963771fe6b9SJerome Glisse unsigned num_files; 964771fe6b9SJerome Glisse }; 965771fe6b9SJerome Glisse static struct radeon_debugfs _radeon_debugfs[RADEON_DEBUGFS_MAX_NUM_FILES]; 966771fe6b9SJerome Glisse static unsigned _radeon_debugfs_count = 0; 967771fe6b9SJerome Glisse 968771fe6b9SJerome Glisse int radeon_debugfs_add_files(struct radeon_device *rdev, 969771fe6b9SJerome Glisse struct drm_info_list *files, 970771fe6b9SJerome Glisse unsigned nfiles) 971771fe6b9SJerome Glisse { 972771fe6b9SJerome Glisse unsigned i; 973771fe6b9SJerome Glisse 974771fe6b9SJerome Glisse for (i = 0; i < _radeon_debugfs_count; i++) { 975771fe6b9SJerome Glisse if (_radeon_debugfs[i].files == files) { 976771fe6b9SJerome Glisse /* Already registered */ 977771fe6b9SJerome Glisse return 0; 978771fe6b9SJerome Glisse } 979771fe6b9SJerome Glisse } 980771fe6b9SJerome Glisse if ((_radeon_debugfs_count + nfiles) > RADEON_DEBUGFS_MAX_NUM_FILES) { 981771fe6b9SJerome Glisse DRM_ERROR("Reached maximum number of debugfs files.\n"); 982771fe6b9SJerome Glisse DRM_ERROR("Report so we increase RADEON_DEBUGFS_MAX_NUM_FILES.\n"); 983771fe6b9SJerome Glisse return -EINVAL; 984771fe6b9SJerome Glisse } 985771fe6b9SJerome Glisse _radeon_debugfs[_radeon_debugfs_count].files = files; 986771fe6b9SJerome Glisse _radeon_debugfs[_radeon_debugfs_count].num_files = nfiles; 987771fe6b9SJerome Glisse _radeon_debugfs_count++; 988771fe6b9SJerome Glisse #if defined(CONFIG_DEBUG_FS) 989771fe6b9SJerome Glisse drm_debugfs_create_files(files, nfiles, 990771fe6b9SJerome Glisse rdev->ddev->control->debugfs_root, 991771fe6b9SJerome Glisse rdev->ddev->control); 992771fe6b9SJerome Glisse drm_debugfs_create_files(files, nfiles, 993771fe6b9SJerome Glisse rdev->ddev->primary->debugfs_root, 994771fe6b9SJerome Glisse rdev->ddev->primary); 995771fe6b9SJerome Glisse #endif 996771fe6b9SJerome Glisse return 0; 997771fe6b9SJerome Glisse } 998771fe6b9SJerome Glisse 999771fe6b9SJerome Glisse #if defined(CONFIG_DEBUG_FS) 1000771fe6b9SJerome Glisse int radeon_debugfs_init(struct drm_minor *minor) 1001771fe6b9SJerome Glisse { 1002771fe6b9SJerome Glisse return 0; 1003771fe6b9SJerome Glisse } 1004771fe6b9SJerome Glisse 1005771fe6b9SJerome Glisse void radeon_debugfs_cleanup(struct drm_minor *minor) 1006771fe6b9SJerome Glisse { 1007771fe6b9SJerome Glisse unsigned i; 1008771fe6b9SJerome Glisse 1009771fe6b9SJerome Glisse for (i = 0; i < _radeon_debugfs_count; i++) { 1010771fe6b9SJerome Glisse drm_debugfs_remove_files(_radeon_debugfs[i].files, 1011771fe6b9SJerome Glisse _radeon_debugfs[i].num_files, minor); 1012771fe6b9SJerome Glisse } 1013771fe6b9SJerome Glisse } 1014771fe6b9SJerome Glisse #endif 1015