1771fe6b9SJerome Glisse /* 2771fe6b9SJerome Glisse * Copyright 2008 Advanced Micro Devices, Inc. 3771fe6b9SJerome Glisse * Copyright 2008 Red Hat Inc. 4771fe6b9SJerome Glisse * Copyright 2009 Jerome Glisse. 5771fe6b9SJerome Glisse * 6771fe6b9SJerome Glisse * Permission is hereby granted, free of charge, to any person obtaining a 7771fe6b9SJerome Glisse * copy of this software and associated documentation files (the "Software"), 8771fe6b9SJerome Glisse * to deal in the Software without restriction, including without limitation 9771fe6b9SJerome Glisse * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10771fe6b9SJerome Glisse * and/or sell copies of the Software, and to permit persons to whom the 11771fe6b9SJerome Glisse * Software is furnished to do so, subject to the following conditions: 12771fe6b9SJerome Glisse * 13771fe6b9SJerome Glisse * The above copyright notice and this permission notice shall be included in 14771fe6b9SJerome Glisse * all copies or substantial portions of the Software. 15771fe6b9SJerome Glisse * 16771fe6b9SJerome Glisse * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17771fe6b9SJerome Glisse * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18771fe6b9SJerome Glisse * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19771fe6b9SJerome Glisse * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20771fe6b9SJerome Glisse * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21771fe6b9SJerome Glisse * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22771fe6b9SJerome Glisse * OTHER DEALINGS IN THE SOFTWARE. 23771fe6b9SJerome Glisse * 24771fe6b9SJerome Glisse * Authors: Dave Airlie 25771fe6b9SJerome Glisse * Alex Deucher 26771fe6b9SJerome Glisse * Jerome Glisse 27771fe6b9SJerome Glisse */ 28771fe6b9SJerome Glisse #include <linux/console.h> 295a0e3ad6STejun Heo #include <linux/slab.h> 30771fe6b9SJerome Glisse #include <drm/drmP.h> 31771fe6b9SJerome Glisse #include <drm/drm_crtc_helper.h> 32771fe6b9SJerome Glisse #include <drm/radeon_drm.h> 3328d52043SDave Airlie #include <linux/vgaarb.h> 346a9ee8afSDave Airlie #include <linux/vga_switcheroo.h> 35771fe6b9SJerome Glisse #include "radeon_reg.h" 36771fe6b9SJerome Glisse #include "radeon.h" 37771fe6b9SJerome Glisse #include "atom.h" 38771fe6b9SJerome Glisse 391b5331d9SJerome Glisse static const char radeon_family_name[][16] = { 401b5331d9SJerome Glisse "R100", 411b5331d9SJerome Glisse "RV100", 421b5331d9SJerome Glisse "RS100", 431b5331d9SJerome Glisse "RV200", 441b5331d9SJerome Glisse "RS200", 451b5331d9SJerome Glisse "R200", 461b5331d9SJerome Glisse "RV250", 471b5331d9SJerome Glisse "RS300", 481b5331d9SJerome Glisse "RV280", 491b5331d9SJerome Glisse "R300", 501b5331d9SJerome Glisse "R350", 511b5331d9SJerome Glisse "RV350", 521b5331d9SJerome Glisse "RV380", 531b5331d9SJerome Glisse "R420", 541b5331d9SJerome Glisse "R423", 551b5331d9SJerome Glisse "RV410", 561b5331d9SJerome Glisse "RS400", 571b5331d9SJerome Glisse "RS480", 581b5331d9SJerome Glisse "RS600", 591b5331d9SJerome Glisse "RS690", 601b5331d9SJerome Glisse "RS740", 611b5331d9SJerome Glisse "RV515", 621b5331d9SJerome Glisse "R520", 631b5331d9SJerome Glisse "RV530", 641b5331d9SJerome Glisse "RV560", 651b5331d9SJerome Glisse "RV570", 661b5331d9SJerome Glisse "R580", 671b5331d9SJerome Glisse "R600", 681b5331d9SJerome Glisse "RV610", 691b5331d9SJerome Glisse "RV630", 701b5331d9SJerome Glisse "RV670", 711b5331d9SJerome Glisse "RV620", 721b5331d9SJerome Glisse "RV635", 731b5331d9SJerome Glisse "RS780", 741b5331d9SJerome Glisse "RS880", 751b5331d9SJerome Glisse "RV770", 761b5331d9SJerome Glisse "RV730", 771b5331d9SJerome Glisse "RV710", 781b5331d9SJerome Glisse "RV740", 791b5331d9SJerome Glisse "CEDAR", 801b5331d9SJerome Glisse "REDWOOD", 811b5331d9SJerome Glisse "JUNIPER", 821b5331d9SJerome Glisse "CYPRESS", 831b5331d9SJerome Glisse "HEMLOCK", 84b08ebe7eSAlex Deucher "PALM", 854df64e65SAlex Deucher "SUMO", 864df64e65SAlex Deucher "SUMO2", 871fe18305SAlex Deucher "BARTS", 881fe18305SAlex Deucher "TURKS", 891fe18305SAlex Deucher "CAICOS", 90b7cfc9feSAlex Deucher "CAYMAN", 911b5331d9SJerome Glisse "LAST", 921b5331d9SJerome Glisse }; 931b5331d9SJerome Glisse 94771fe6b9SJerome Glisse /* 95b1e3a6d1SMichel Dänzer * Clear GPU surface registers. 96b1e3a6d1SMichel Dänzer */ 973ce0a23dSJerome Glisse void radeon_surface_init(struct radeon_device *rdev) 98b1e3a6d1SMichel Dänzer { 99b1e3a6d1SMichel Dänzer /* FIXME: check this out */ 100b1e3a6d1SMichel Dänzer if (rdev->family < CHIP_R600) { 101b1e3a6d1SMichel Dänzer int i; 102b1e3a6d1SMichel Dänzer 103550e2d92SDave Airlie for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) { 104550e2d92SDave Airlie if (rdev->surface_regs[i].bo) 105550e2d92SDave Airlie radeon_bo_get_surface_reg(rdev->surface_regs[i].bo); 106550e2d92SDave Airlie else 107550e2d92SDave Airlie radeon_clear_surface_reg(rdev, i); 108b1e3a6d1SMichel Dänzer } 109e024e110SDave Airlie /* enable surfaces */ 110e024e110SDave Airlie WREG32(RADEON_SURFACE_CNTL, 0); 111b1e3a6d1SMichel Dänzer } 112b1e3a6d1SMichel Dänzer } 113b1e3a6d1SMichel Dänzer 114b1e3a6d1SMichel Dänzer /* 115771fe6b9SJerome Glisse * GPU scratch registers helpers function. 116771fe6b9SJerome Glisse */ 1173ce0a23dSJerome Glisse void radeon_scratch_init(struct radeon_device *rdev) 118771fe6b9SJerome Glisse { 119771fe6b9SJerome Glisse int i; 120771fe6b9SJerome Glisse 121771fe6b9SJerome Glisse /* FIXME: check this out */ 122771fe6b9SJerome Glisse if (rdev->family < CHIP_R300) { 123771fe6b9SJerome Glisse rdev->scratch.num_reg = 5; 124771fe6b9SJerome Glisse } else { 125771fe6b9SJerome Glisse rdev->scratch.num_reg = 7; 126771fe6b9SJerome Glisse } 127724c80e1SAlex Deucher rdev->scratch.reg_base = RADEON_SCRATCH_REG0; 128771fe6b9SJerome Glisse for (i = 0; i < rdev->scratch.num_reg; i++) { 129771fe6b9SJerome Glisse rdev->scratch.free[i] = true; 130724c80e1SAlex Deucher rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4); 131771fe6b9SJerome Glisse } 132771fe6b9SJerome Glisse } 133771fe6b9SJerome Glisse 134771fe6b9SJerome Glisse int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg) 135771fe6b9SJerome Glisse { 136771fe6b9SJerome Glisse int i; 137771fe6b9SJerome Glisse 138771fe6b9SJerome Glisse for (i = 0; i < rdev->scratch.num_reg; i++) { 139771fe6b9SJerome Glisse if (rdev->scratch.free[i]) { 140771fe6b9SJerome Glisse rdev->scratch.free[i] = false; 141771fe6b9SJerome Glisse *reg = rdev->scratch.reg[i]; 142771fe6b9SJerome Glisse return 0; 143771fe6b9SJerome Glisse } 144771fe6b9SJerome Glisse } 145771fe6b9SJerome Glisse return -EINVAL; 146771fe6b9SJerome Glisse } 147771fe6b9SJerome Glisse 148771fe6b9SJerome Glisse void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg) 149771fe6b9SJerome Glisse { 150771fe6b9SJerome Glisse int i; 151771fe6b9SJerome Glisse 152771fe6b9SJerome Glisse for (i = 0; i < rdev->scratch.num_reg; i++) { 153771fe6b9SJerome Glisse if (rdev->scratch.reg[i] == reg) { 154771fe6b9SJerome Glisse rdev->scratch.free[i] = true; 155771fe6b9SJerome Glisse return; 156771fe6b9SJerome Glisse } 157771fe6b9SJerome Glisse } 158771fe6b9SJerome Glisse } 159771fe6b9SJerome Glisse 160724c80e1SAlex Deucher void radeon_wb_disable(struct radeon_device *rdev) 161724c80e1SAlex Deucher { 162724c80e1SAlex Deucher int r; 163724c80e1SAlex Deucher 164724c80e1SAlex Deucher if (rdev->wb.wb_obj) { 165724c80e1SAlex Deucher r = radeon_bo_reserve(rdev->wb.wb_obj, false); 166724c80e1SAlex Deucher if (unlikely(r != 0)) 167724c80e1SAlex Deucher return; 168724c80e1SAlex Deucher radeon_bo_kunmap(rdev->wb.wb_obj); 169724c80e1SAlex Deucher radeon_bo_unpin(rdev->wb.wb_obj); 170724c80e1SAlex Deucher radeon_bo_unreserve(rdev->wb.wb_obj); 171724c80e1SAlex Deucher } 172724c80e1SAlex Deucher rdev->wb.enabled = false; 173724c80e1SAlex Deucher } 174724c80e1SAlex Deucher 175724c80e1SAlex Deucher void radeon_wb_fini(struct radeon_device *rdev) 176724c80e1SAlex Deucher { 177724c80e1SAlex Deucher radeon_wb_disable(rdev); 178724c80e1SAlex Deucher if (rdev->wb.wb_obj) { 179724c80e1SAlex Deucher radeon_bo_unref(&rdev->wb.wb_obj); 180724c80e1SAlex Deucher rdev->wb.wb = NULL; 181724c80e1SAlex Deucher rdev->wb.wb_obj = NULL; 182724c80e1SAlex Deucher } 183724c80e1SAlex Deucher } 184724c80e1SAlex Deucher 185724c80e1SAlex Deucher int radeon_wb_init(struct radeon_device *rdev) 186724c80e1SAlex Deucher { 187724c80e1SAlex Deucher int r; 188724c80e1SAlex Deucher 189724c80e1SAlex Deucher if (rdev->wb.wb_obj == NULL) { 190441921d5SDaniel Vetter r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true, 191724c80e1SAlex Deucher RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj); 192724c80e1SAlex Deucher if (r) { 193724c80e1SAlex Deucher dev_warn(rdev->dev, "(%d) create WB bo failed\n", r); 194724c80e1SAlex Deucher return r; 195724c80e1SAlex Deucher } 196724c80e1SAlex Deucher } 197724c80e1SAlex Deucher r = radeon_bo_reserve(rdev->wb.wb_obj, false); 198724c80e1SAlex Deucher if (unlikely(r != 0)) { 199724c80e1SAlex Deucher radeon_wb_fini(rdev); 200724c80e1SAlex Deucher return r; 201724c80e1SAlex Deucher } 202724c80e1SAlex Deucher r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, 203724c80e1SAlex Deucher &rdev->wb.gpu_addr); 204724c80e1SAlex Deucher if (r) { 205724c80e1SAlex Deucher radeon_bo_unreserve(rdev->wb.wb_obj); 206724c80e1SAlex Deucher dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r); 207724c80e1SAlex Deucher radeon_wb_fini(rdev); 208724c80e1SAlex Deucher return r; 209724c80e1SAlex Deucher } 210724c80e1SAlex Deucher r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); 211724c80e1SAlex Deucher radeon_bo_unreserve(rdev->wb.wb_obj); 212724c80e1SAlex Deucher if (r) { 213724c80e1SAlex Deucher dev_warn(rdev->dev, "(%d) map WB bo failed\n", r); 214724c80e1SAlex Deucher radeon_wb_fini(rdev); 215724c80e1SAlex Deucher return r; 216724c80e1SAlex Deucher } 217724c80e1SAlex Deucher 218e6ba7599SAlex Deucher /* clear wb memory */ 219e6ba7599SAlex Deucher memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE); 220d0f8a854SAlex Deucher /* disable event_write fences */ 221d0f8a854SAlex Deucher rdev->wb.use_event = false; 222724c80e1SAlex Deucher /* disabled via module param */ 223724c80e1SAlex Deucher if (radeon_no_wb == 1) 224724c80e1SAlex Deucher rdev->wb.enabled = false; 225724c80e1SAlex Deucher else { 226724c80e1SAlex Deucher /* often unreliable on AGP */ 227724c80e1SAlex Deucher if (rdev->flags & RADEON_IS_AGP) { 228724c80e1SAlex Deucher rdev->wb.enabled = false; 229d0f8a854SAlex Deucher } else { 230724c80e1SAlex Deucher rdev->wb.enabled = true; 231d0f8a854SAlex Deucher /* event_write fences are only available on r600+ */ 232d0f8a854SAlex Deucher if (rdev->family >= CHIP_R600) 233d0f8a854SAlex Deucher rdev->wb.use_event = true; 234d0f8a854SAlex Deucher } 235724c80e1SAlex Deucher } 2367d52785dSAlex Deucher /* always use writeback/events on NI */ 2377d52785dSAlex Deucher if (ASIC_IS_DCE5(rdev)) { 2387d52785dSAlex Deucher rdev->wb.enabled = true; 2397d52785dSAlex Deucher rdev->wb.use_event = true; 2407d52785dSAlex Deucher } 241724c80e1SAlex Deucher 242724c80e1SAlex Deucher dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis"); 243724c80e1SAlex Deucher 244724c80e1SAlex Deucher return 0; 245724c80e1SAlex Deucher } 246724c80e1SAlex Deucher 247d594e46aSJerome Glisse /** 248d594e46aSJerome Glisse * radeon_vram_location - try to find VRAM location 249d594e46aSJerome Glisse * @rdev: radeon device structure holding all necessary informations 250d594e46aSJerome Glisse * @mc: memory controller structure holding memory informations 251d594e46aSJerome Glisse * @base: base address at which to put VRAM 252d594e46aSJerome Glisse * 253d594e46aSJerome Glisse * Function will place try to place VRAM at base address provided 254d594e46aSJerome Glisse * as parameter (which is so far either PCI aperture address or 255d594e46aSJerome Glisse * for IGP TOM base address). 256d594e46aSJerome Glisse * 257d594e46aSJerome Glisse * If there is not enough space to fit the unvisible VRAM in the 32bits 258d594e46aSJerome Glisse * address space then we limit the VRAM size to the aperture. 259d594e46aSJerome Glisse * 260d594e46aSJerome Glisse * If we are using AGP and if the AGP aperture doesn't allow us to have 261d594e46aSJerome Glisse * room for all the VRAM than we restrict the VRAM to the PCI aperture 262d594e46aSJerome Glisse * size and print a warning. 263d594e46aSJerome Glisse * 264d594e46aSJerome Glisse * This function will never fails, worst case are limiting VRAM. 265d594e46aSJerome Glisse * 266d594e46aSJerome Glisse * Note: GTT start, end, size should be initialized before calling this 267d594e46aSJerome Glisse * function on AGP platform. 268d594e46aSJerome Glisse * 26925985edcSLucas De Marchi * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size, 270d594e46aSJerome Glisse * this shouldn't be a problem as we are using the PCI aperture as a reference. 271d594e46aSJerome Glisse * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but 272d594e46aSJerome Glisse * not IGP. 273d594e46aSJerome Glisse * 274d594e46aSJerome Glisse * Note: we use mc_vram_size as on some board we need to program the mc to 275d594e46aSJerome Glisse * cover the whole aperture even if VRAM size is inferior to aperture size 276d594e46aSJerome Glisse * Novell bug 204882 + along with lots of ubuntu ones 277d594e46aSJerome Glisse * 278d594e46aSJerome Glisse * Note: when limiting vram it's safe to overwritte real_vram_size because 279d594e46aSJerome Glisse * we are not in case where real_vram_size is inferior to mc_vram_size (ie 280d594e46aSJerome Glisse * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu 281d594e46aSJerome Glisse * ones) 282d594e46aSJerome Glisse * 283d594e46aSJerome Glisse * Note: IGP TOM addr should be the same as the aperture addr, we don't 284d594e46aSJerome Glisse * explicitly check for that thought. 285d594e46aSJerome Glisse * 286d594e46aSJerome Glisse * FIXME: when reducing VRAM size align new size on power of 2. 287771fe6b9SJerome Glisse */ 288d594e46aSJerome Glisse void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base) 289771fe6b9SJerome Glisse { 290d594e46aSJerome Glisse mc->vram_start = base; 291d594e46aSJerome Glisse if (mc->mc_vram_size > (0xFFFFFFFF - base + 1)) { 292d594e46aSJerome Glisse dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n"); 293d594e46aSJerome Glisse mc->real_vram_size = mc->aper_size; 294d594e46aSJerome Glisse mc->mc_vram_size = mc->aper_size; 295771fe6b9SJerome Glisse } 296d594e46aSJerome Glisse mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; 2972cbeb4efSJerome Glisse if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) { 298d594e46aSJerome Glisse dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n"); 299d594e46aSJerome Glisse mc->real_vram_size = mc->aper_size; 300d594e46aSJerome Glisse mc->mc_vram_size = mc->aper_size; 301771fe6b9SJerome Glisse } 302d594e46aSJerome Glisse mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; 303dd7cc55aSAlex Deucher dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n", 304d594e46aSJerome Glisse mc->mc_vram_size >> 20, mc->vram_start, 305d594e46aSJerome Glisse mc->vram_end, mc->real_vram_size >> 20); 306771fe6b9SJerome Glisse } 307771fe6b9SJerome Glisse 308d594e46aSJerome Glisse /** 309d594e46aSJerome Glisse * radeon_gtt_location - try to find GTT location 310d594e46aSJerome Glisse * @rdev: radeon device structure holding all necessary informations 311d594e46aSJerome Glisse * @mc: memory controller structure holding memory informations 312d594e46aSJerome Glisse * 313d594e46aSJerome Glisse * Function will place try to place GTT before or after VRAM. 314d594e46aSJerome Glisse * 315d594e46aSJerome Glisse * If GTT size is bigger than space left then we ajust GTT size. 316d594e46aSJerome Glisse * Thus function will never fails. 317d594e46aSJerome Glisse * 318d594e46aSJerome Glisse * FIXME: when reducing GTT size align new size on power of 2. 319d594e46aSJerome Glisse */ 320d594e46aSJerome Glisse void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) 321d594e46aSJerome Glisse { 322d594e46aSJerome Glisse u64 size_af, size_bf; 323d594e46aSJerome Glisse 3248d369bb1SAlex Deucher size_af = ((0xFFFFFFFF - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align; 3258d369bb1SAlex Deucher size_bf = mc->vram_start & ~mc->gtt_base_align; 326d594e46aSJerome Glisse if (size_bf > size_af) { 327d594e46aSJerome Glisse if (mc->gtt_size > size_bf) { 328d594e46aSJerome Glisse dev_warn(rdev->dev, "limiting GTT\n"); 329d594e46aSJerome Glisse mc->gtt_size = size_bf; 330d594e46aSJerome Glisse } 3318d369bb1SAlex Deucher mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size; 332d594e46aSJerome Glisse } else { 333d594e46aSJerome Glisse if (mc->gtt_size > size_af) { 334d594e46aSJerome Glisse dev_warn(rdev->dev, "limiting GTT\n"); 335d594e46aSJerome Glisse mc->gtt_size = size_af; 336d594e46aSJerome Glisse } 3378d369bb1SAlex Deucher mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align; 338d594e46aSJerome Glisse } 339d594e46aSJerome Glisse mc->gtt_end = mc->gtt_start + mc->gtt_size - 1; 340dd7cc55aSAlex Deucher dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n", 341d594e46aSJerome Glisse mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end); 342d594e46aSJerome Glisse } 343771fe6b9SJerome Glisse 344771fe6b9SJerome Glisse /* 345771fe6b9SJerome Glisse * GPU helpers function. 346771fe6b9SJerome Glisse */ 3479f022ddfSJerome Glisse bool radeon_card_posted(struct radeon_device *rdev) 348771fe6b9SJerome Glisse { 349771fe6b9SJerome Glisse uint32_t reg; 350771fe6b9SJerome Glisse 351771fe6b9SJerome Glisse /* first check CRTCs */ 35218007401SAlex Deucher if (ASIC_IS_DCE41(rdev)) { 35318007401SAlex Deucher reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | 35418007401SAlex Deucher RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET); 35518007401SAlex Deucher if (reg & EVERGREEN_CRTC_MASTER_EN) 35618007401SAlex Deucher return true; 35718007401SAlex Deucher } else if (ASIC_IS_DCE4(rdev)) { 358bcc1c2a1SAlex Deucher reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | 359bcc1c2a1SAlex Deucher RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) | 360bcc1c2a1SAlex Deucher RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) | 361bcc1c2a1SAlex Deucher RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) | 362bcc1c2a1SAlex Deucher RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) | 363bcc1c2a1SAlex Deucher RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET); 364bcc1c2a1SAlex Deucher if (reg & EVERGREEN_CRTC_MASTER_EN) 365bcc1c2a1SAlex Deucher return true; 366bcc1c2a1SAlex Deucher } else if (ASIC_IS_AVIVO(rdev)) { 367771fe6b9SJerome Glisse reg = RREG32(AVIVO_D1CRTC_CONTROL) | 368771fe6b9SJerome Glisse RREG32(AVIVO_D2CRTC_CONTROL); 369771fe6b9SJerome Glisse if (reg & AVIVO_CRTC_EN) { 370771fe6b9SJerome Glisse return true; 371771fe6b9SJerome Glisse } 372771fe6b9SJerome Glisse } else { 373771fe6b9SJerome Glisse reg = RREG32(RADEON_CRTC_GEN_CNTL) | 374771fe6b9SJerome Glisse RREG32(RADEON_CRTC2_GEN_CNTL); 375771fe6b9SJerome Glisse if (reg & RADEON_CRTC_EN) { 376771fe6b9SJerome Glisse return true; 377771fe6b9SJerome Glisse } 378771fe6b9SJerome Glisse } 379771fe6b9SJerome Glisse 380771fe6b9SJerome Glisse /* then check MEM_SIZE, in case the crtcs are off */ 381771fe6b9SJerome Glisse if (rdev->family >= CHIP_R600) 382771fe6b9SJerome Glisse reg = RREG32(R600_CONFIG_MEMSIZE); 383771fe6b9SJerome Glisse else 384771fe6b9SJerome Glisse reg = RREG32(RADEON_CONFIG_MEMSIZE); 385771fe6b9SJerome Glisse 386771fe6b9SJerome Glisse if (reg) 387771fe6b9SJerome Glisse return true; 388771fe6b9SJerome Glisse 389771fe6b9SJerome Glisse return false; 390771fe6b9SJerome Glisse 391771fe6b9SJerome Glisse } 392771fe6b9SJerome Glisse 393f47299c5SAlex Deucher void radeon_update_bandwidth_info(struct radeon_device *rdev) 394f47299c5SAlex Deucher { 395f47299c5SAlex Deucher fixed20_12 a; 3968807286eSAlex Deucher u32 sclk = rdev->pm.current_sclk; 3978807286eSAlex Deucher u32 mclk = rdev->pm.current_mclk; 398f47299c5SAlex Deucher 3998807286eSAlex Deucher /* sclk/mclk in Mhz */ 40068adac5eSBen Skeggs a.full = dfixed_const(100); 40168adac5eSBen Skeggs rdev->pm.sclk.full = dfixed_const(sclk); 40268adac5eSBen Skeggs rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a); 40368adac5eSBen Skeggs rdev->pm.mclk.full = dfixed_const(mclk); 40468adac5eSBen Skeggs rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a); 405f47299c5SAlex Deucher 4068807286eSAlex Deucher if (rdev->flags & RADEON_IS_IGP) { 40768adac5eSBen Skeggs a.full = dfixed_const(16); 408f47299c5SAlex Deucher /* core_bandwidth = sclk(Mhz) * 16 */ 40968adac5eSBen Skeggs rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a); 410f47299c5SAlex Deucher } 411f47299c5SAlex Deucher } 412f47299c5SAlex Deucher 41372542d77SDave Airlie bool radeon_boot_test_post_card(struct radeon_device *rdev) 41472542d77SDave Airlie { 41572542d77SDave Airlie if (radeon_card_posted(rdev)) 41672542d77SDave Airlie return true; 41772542d77SDave Airlie 41872542d77SDave Airlie if (rdev->bios) { 41972542d77SDave Airlie DRM_INFO("GPU not posted. posting now...\n"); 42072542d77SDave Airlie if (rdev->is_atom_bios) 42172542d77SDave Airlie atom_asic_init(rdev->mode_info.atom_context); 42272542d77SDave Airlie else 42372542d77SDave Airlie radeon_combios_asic_init(rdev->ddev); 42472542d77SDave Airlie return true; 42572542d77SDave Airlie } else { 42672542d77SDave Airlie dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); 42772542d77SDave Airlie return false; 42872542d77SDave Airlie } 42972542d77SDave Airlie } 43072542d77SDave Airlie 4313ce0a23dSJerome Glisse int radeon_dummy_page_init(struct radeon_device *rdev) 4323ce0a23dSJerome Glisse { 43382568565SDave Airlie if (rdev->dummy_page.page) 43482568565SDave Airlie return 0; 4353ce0a23dSJerome Glisse rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO); 4363ce0a23dSJerome Glisse if (rdev->dummy_page.page == NULL) 4373ce0a23dSJerome Glisse return -ENOMEM; 4383ce0a23dSJerome Glisse rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page, 4393ce0a23dSJerome Glisse 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 440a30f6fb7SBenjamin Herrenschmidt if (pci_dma_mapping_error(rdev->pdev, rdev->dummy_page.addr)) { 441a30f6fb7SBenjamin Herrenschmidt dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n"); 4423ce0a23dSJerome Glisse __free_page(rdev->dummy_page.page); 4433ce0a23dSJerome Glisse rdev->dummy_page.page = NULL; 4443ce0a23dSJerome Glisse return -ENOMEM; 4453ce0a23dSJerome Glisse } 4463ce0a23dSJerome Glisse return 0; 4473ce0a23dSJerome Glisse } 4483ce0a23dSJerome Glisse 4493ce0a23dSJerome Glisse void radeon_dummy_page_fini(struct radeon_device *rdev) 4503ce0a23dSJerome Glisse { 4513ce0a23dSJerome Glisse if (rdev->dummy_page.page == NULL) 4523ce0a23dSJerome Glisse return; 4533ce0a23dSJerome Glisse pci_unmap_page(rdev->pdev, rdev->dummy_page.addr, 4543ce0a23dSJerome Glisse PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 4553ce0a23dSJerome Glisse __free_page(rdev->dummy_page.page); 4563ce0a23dSJerome Glisse rdev->dummy_page.page = NULL; 4573ce0a23dSJerome Glisse } 4583ce0a23dSJerome Glisse 459771fe6b9SJerome Glisse 460771fe6b9SJerome Glisse /* ATOM accessor methods */ 461771fe6b9SJerome Glisse static uint32_t cail_pll_read(struct card_info *info, uint32_t reg) 462771fe6b9SJerome Glisse { 463771fe6b9SJerome Glisse struct radeon_device *rdev = info->dev->dev_private; 464771fe6b9SJerome Glisse uint32_t r; 465771fe6b9SJerome Glisse 466771fe6b9SJerome Glisse r = rdev->pll_rreg(rdev, reg); 467771fe6b9SJerome Glisse return r; 468771fe6b9SJerome Glisse } 469771fe6b9SJerome Glisse 470771fe6b9SJerome Glisse static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val) 471771fe6b9SJerome Glisse { 472771fe6b9SJerome Glisse struct radeon_device *rdev = info->dev->dev_private; 473771fe6b9SJerome Glisse 474771fe6b9SJerome Glisse rdev->pll_wreg(rdev, reg, val); 475771fe6b9SJerome Glisse } 476771fe6b9SJerome Glisse 477771fe6b9SJerome Glisse static uint32_t cail_mc_read(struct card_info *info, uint32_t reg) 478771fe6b9SJerome Glisse { 479771fe6b9SJerome Glisse struct radeon_device *rdev = info->dev->dev_private; 480771fe6b9SJerome Glisse uint32_t r; 481771fe6b9SJerome Glisse 482771fe6b9SJerome Glisse r = rdev->mc_rreg(rdev, reg); 483771fe6b9SJerome Glisse return r; 484771fe6b9SJerome Glisse } 485771fe6b9SJerome Glisse 486771fe6b9SJerome Glisse static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val) 487771fe6b9SJerome Glisse { 488771fe6b9SJerome Glisse struct radeon_device *rdev = info->dev->dev_private; 489771fe6b9SJerome Glisse 490771fe6b9SJerome Glisse rdev->mc_wreg(rdev, reg, val); 491771fe6b9SJerome Glisse } 492771fe6b9SJerome Glisse 493771fe6b9SJerome Glisse static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val) 494771fe6b9SJerome Glisse { 495771fe6b9SJerome Glisse struct radeon_device *rdev = info->dev->dev_private; 496771fe6b9SJerome Glisse 497771fe6b9SJerome Glisse WREG32(reg*4, val); 498771fe6b9SJerome Glisse } 499771fe6b9SJerome Glisse 500771fe6b9SJerome Glisse static uint32_t cail_reg_read(struct card_info *info, uint32_t reg) 501771fe6b9SJerome Glisse { 502771fe6b9SJerome Glisse struct radeon_device *rdev = info->dev->dev_private; 503771fe6b9SJerome Glisse uint32_t r; 504771fe6b9SJerome Glisse 505771fe6b9SJerome Glisse r = RREG32(reg*4); 506771fe6b9SJerome Glisse return r; 507771fe6b9SJerome Glisse } 508771fe6b9SJerome Glisse 509351a52a2SAlex Deucher static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val) 510351a52a2SAlex Deucher { 511351a52a2SAlex Deucher struct radeon_device *rdev = info->dev->dev_private; 512351a52a2SAlex Deucher 513351a52a2SAlex Deucher WREG32_IO(reg*4, val); 514351a52a2SAlex Deucher } 515351a52a2SAlex Deucher 516351a52a2SAlex Deucher static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg) 517351a52a2SAlex Deucher { 518351a52a2SAlex Deucher struct radeon_device *rdev = info->dev->dev_private; 519351a52a2SAlex Deucher uint32_t r; 520351a52a2SAlex Deucher 521351a52a2SAlex Deucher r = RREG32_IO(reg*4); 522351a52a2SAlex Deucher return r; 523351a52a2SAlex Deucher } 524351a52a2SAlex Deucher 525771fe6b9SJerome Glisse int radeon_atombios_init(struct radeon_device *rdev) 526771fe6b9SJerome Glisse { 52761c4b24bSMathias Fröhlich struct card_info *atom_card_info = 52861c4b24bSMathias Fröhlich kzalloc(sizeof(struct card_info), GFP_KERNEL); 52961c4b24bSMathias Fröhlich 53061c4b24bSMathias Fröhlich if (!atom_card_info) 53161c4b24bSMathias Fröhlich return -ENOMEM; 53261c4b24bSMathias Fröhlich 53361c4b24bSMathias Fröhlich rdev->mode_info.atom_card_info = atom_card_info; 53461c4b24bSMathias Fröhlich atom_card_info->dev = rdev->ddev; 53561c4b24bSMathias Fröhlich atom_card_info->reg_read = cail_reg_read; 53661c4b24bSMathias Fröhlich atom_card_info->reg_write = cail_reg_write; 537351a52a2SAlex Deucher /* needed for iio ops */ 538351a52a2SAlex Deucher if (rdev->rio_mem) { 539351a52a2SAlex Deucher atom_card_info->ioreg_read = cail_ioreg_read; 540351a52a2SAlex Deucher atom_card_info->ioreg_write = cail_ioreg_write; 541351a52a2SAlex Deucher } else { 542351a52a2SAlex Deucher DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n"); 543351a52a2SAlex Deucher atom_card_info->ioreg_read = cail_reg_read; 544351a52a2SAlex Deucher atom_card_info->ioreg_write = cail_reg_write; 545351a52a2SAlex Deucher } 54661c4b24bSMathias Fröhlich atom_card_info->mc_read = cail_mc_read; 54761c4b24bSMathias Fröhlich atom_card_info->mc_write = cail_mc_write; 54861c4b24bSMathias Fröhlich atom_card_info->pll_read = cail_pll_read; 54961c4b24bSMathias Fröhlich atom_card_info->pll_write = cail_pll_write; 55061c4b24bSMathias Fröhlich 55161c4b24bSMathias Fröhlich rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios); 552c31ad97fSRafał Miłecki mutex_init(&rdev->mode_info.atom_context->mutex); 553771fe6b9SJerome Glisse radeon_atom_initialize_bios_scratch_regs(rdev->ddev); 554d904ef9bSDave Airlie atom_allocate_fb_scratch(rdev->mode_info.atom_context); 555771fe6b9SJerome Glisse return 0; 556771fe6b9SJerome Glisse } 557771fe6b9SJerome Glisse 558771fe6b9SJerome Glisse void radeon_atombios_fini(struct radeon_device *rdev) 559771fe6b9SJerome Glisse { 5604a04a844SJerome Glisse if (rdev->mode_info.atom_context) { 561d904ef9bSDave Airlie kfree(rdev->mode_info.atom_context->scratch); 562771fe6b9SJerome Glisse kfree(rdev->mode_info.atom_context); 5634a04a844SJerome Glisse } 56461c4b24bSMathias Fröhlich kfree(rdev->mode_info.atom_card_info); 565771fe6b9SJerome Glisse } 566771fe6b9SJerome Glisse 567771fe6b9SJerome Glisse int radeon_combios_init(struct radeon_device *rdev) 568771fe6b9SJerome Glisse { 569771fe6b9SJerome Glisse radeon_combios_initialize_bios_scratch_regs(rdev->ddev); 570771fe6b9SJerome Glisse return 0; 571771fe6b9SJerome Glisse } 572771fe6b9SJerome Glisse 573771fe6b9SJerome Glisse void radeon_combios_fini(struct radeon_device *rdev) 574771fe6b9SJerome Glisse { 575771fe6b9SJerome Glisse } 576771fe6b9SJerome Glisse 57728d52043SDave Airlie /* if we get transitioned to only one device, tak VGA back */ 57828d52043SDave Airlie static unsigned int radeon_vga_set_decode(void *cookie, bool state) 57928d52043SDave Airlie { 58028d52043SDave Airlie struct radeon_device *rdev = cookie; 58128d52043SDave Airlie radeon_vga_set_state(rdev, state); 58228d52043SDave Airlie if (state) 58328d52043SDave Airlie return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | 58428d52043SDave Airlie VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 58528d52043SDave Airlie else 58628d52043SDave Airlie return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 58728d52043SDave Airlie } 588c1176d6fSDave Airlie 58936421338SJerome Glisse void radeon_check_arguments(struct radeon_device *rdev) 59036421338SJerome Glisse { 59136421338SJerome Glisse /* vramlimit must be a power of two */ 59236421338SJerome Glisse switch (radeon_vram_limit) { 59336421338SJerome Glisse case 0: 59436421338SJerome Glisse case 4: 59536421338SJerome Glisse case 8: 59636421338SJerome Glisse case 16: 59736421338SJerome Glisse case 32: 59836421338SJerome Glisse case 64: 59936421338SJerome Glisse case 128: 60036421338SJerome Glisse case 256: 60136421338SJerome Glisse case 512: 60236421338SJerome Glisse case 1024: 60336421338SJerome Glisse case 2048: 60436421338SJerome Glisse case 4096: 60536421338SJerome Glisse break; 60636421338SJerome Glisse default: 60736421338SJerome Glisse dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n", 60836421338SJerome Glisse radeon_vram_limit); 60936421338SJerome Glisse radeon_vram_limit = 0; 61036421338SJerome Glisse break; 61136421338SJerome Glisse } 61236421338SJerome Glisse radeon_vram_limit = radeon_vram_limit << 20; 61336421338SJerome Glisse /* gtt size must be power of two and greater or equal to 32M */ 61436421338SJerome Glisse switch (radeon_gart_size) { 61536421338SJerome Glisse case 4: 61636421338SJerome Glisse case 8: 61736421338SJerome Glisse case 16: 61836421338SJerome Glisse dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n", 61936421338SJerome Glisse radeon_gart_size); 62036421338SJerome Glisse radeon_gart_size = 512; 62136421338SJerome Glisse break; 62236421338SJerome Glisse case 32: 62336421338SJerome Glisse case 64: 62436421338SJerome Glisse case 128: 62536421338SJerome Glisse case 256: 62636421338SJerome Glisse case 512: 62736421338SJerome Glisse case 1024: 62836421338SJerome Glisse case 2048: 62936421338SJerome Glisse case 4096: 63036421338SJerome Glisse break; 63136421338SJerome Glisse default: 63236421338SJerome Glisse dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n", 63336421338SJerome Glisse radeon_gart_size); 63436421338SJerome Glisse radeon_gart_size = 512; 63536421338SJerome Glisse break; 63636421338SJerome Glisse } 63736421338SJerome Glisse rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; 63836421338SJerome Glisse /* AGP mode can only be -1, 1, 2, 4, 8 */ 63936421338SJerome Glisse switch (radeon_agpmode) { 64036421338SJerome Glisse case -1: 64136421338SJerome Glisse case 0: 64236421338SJerome Glisse case 1: 64336421338SJerome Glisse case 2: 64436421338SJerome Glisse case 4: 64536421338SJerome Glisse case 8: 64636421338SJerome Glisse break; 64736421338SJerome Glisse default: 64836421338SJerome Glisse dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: " 64936421338SJerome Glisse "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode); 65036421338SJerome Glisse radeon_agpmode = 0; 65136421338SJerome Glisse break; 65236421338SJerome Glisse } 65336421338SJerome Glisse } 65436421338SJerome Glisse 6556a9ee8afSDave Airlie static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) 6566a9ee8afSDave Airlie { 6576a9ee8afSDave Airlie struct drm_device *dev = pci_get_drvdata(pdev); 6586a9ee8afSDave Airlie pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; 6596a9ee8afSDave Airlie if (state == VGA_SWITCHEROO_ON) { 6606a9ee8afSDave Airlie printk(KERN_INFO "radeon: switched on\n"); 6616a9ee8afSDave Airlie /* don't suspend or resume card normally */ 6625bcf719bSDave Airlie dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 6636a9ee8afSDave Airlie radeon_resume_kms(dev); 6645bcf719bSDave Airlie dev->switch_power_state = DRM_SWITCH_POWER_ON; 665fbf81762SDave Airlie drm_kms_helper_poll_enable(dev); 6666a9ee8afSDave Airlie } else { 6676a9ee8afSDave Airlie printk(KERN_INFO "radeon: switched off\n"); 668fbf81762SDave Airlie drm_kms_helper_poll_disable(dev); 6695bcf719bSDave Airlie dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 6706a9ee8afSDave Airlie radeon_suspend_kms(dev, pmm); 6715bcf719bSDave Airlie dev->switch_power_state = DRM_SWITCH_POWER_OFF; 6726a9ee8afSDave Airlie } 6736a9ee8afSDave Airlie } 6746a9ee8afSDave Airlie 6756a9ee8afSDave Airlie static bool radeon_switcheroo_can_switch(struct pci_dev *pdev) 6766a9ee8afSDave Airlie { 6776a9ee8afSDave Airlie struct drm_device *dev = pci_get_drvdata(pdev); 6786a9ee8afSDave Airlie bool can_switch; 6796a9ee8afSDave Airlie 6806a9ee8afSDave Airlie spin_lock(&dev->count_lock); 6816a9ee8afSDave Airlie can_switch = (dev->open_count == 0); 6826a9ee8afSDave Airlie spin_unlock(&dev->count_lock); 6836a9ee8afSDave Airlie return can_switch; 6846a9ee8afSDave Airlie } 6856a9ee8afSDave Airlie 6866a9ee8afSDave Airlie 687771fe6b9SJerome Glisse int radeon_device_init(struct radeon_device *rdev, 688771fe6b9SJerome Glisse struct drm_device *ddev, 689771fe6b9SJerome Glisse struct pci_dev *pdev, 690771fe6b9SJerome Glisse uint32_t flags) 691771fe6b9SJerome Glisse { 692351a52a2SAlex Deucher int r, i; 693ad49f501SDave Airlie int dma_bits; 694771fe6b9SJerome Glisse 695771fe6b9SJerome Glisse rdev->shutdown = false; 6969f022ddfSJerome Glisse rdev->dev = &pdev->dev; 697771fe6b9SJerome Glisse rdev->ddev = ddev; 698771fe6b9SJerome Glisse rdev->pdev = pdev; 699771fe6b9SJerome Glisse rdev->flags = flags; 700771fe6b9SJerome Glisse rdev->family = flags & RADEON_FAMILY_MASK; 701771fe6b9SJerome Glisse rdev->is_atom_bios = false; 702771fe6b9SJerome Glisse rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT; 703771fe6b9SJerome Glisse rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; 704771fe6b9SJerome Glisse rdev->gpu_lockup = false; 705733289c2SJerome Glisse rdev->accel_working = false; 7061b5331d9SJerome Glisse 707*d522d9ccSThomas Reim DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n", 708*d522d9ccSThomas Reim radeon_family_name[rdev->family], pdev->vendor, pdev->device, 709*d522d9ccSThomas Reim pdev->subsystem_vendor, pdev->subsystem_device); 7101b5331d9SJerome Glisse 711771fe6b9SJerome Glisse /* mutex initialization are all done here so we 712771fe6b9SJerome Glisse * can recall function without having locking issues */ 713771fe6b9SJerome Glisse mutex_init(&rdev->cs_mutex); 714771fe6b9SJerome Glisse mutex_init(&rdev->ib_pool.mutex); 715771fe6b9SJerome Glisse mutex_init(&rdev->cp.mutex); 71640bacf16SAlex Deucher mutex_init(&rdev->dc_hw_i2c_mutex); 717d8f60cfcSAlex Deucher if (rdev->family >= CHIP_R600) 718d8f60cfcSAlex Deucher spin_lock_init(&rdev->ih.lock); 7194c788679SJerome Glisse mutex_init(&rdev->gem.mutex); 720c913e23aSRafał Miłecki mutex_init(&rdev->pm.mutex); 7215876dd24SMatthew Garrett mutex_init(&rdev->vram_mutex); 722771fe6b9SJerome Glisse rwlock_init(&rdev->fence_drv.lock); 7239f022ddfSJerome Glisse INIT_LIST_HEAD(&rdev->gem.objects); 72473a6d3fcSRafał Miłecki init_waitqueue_head(&rdev->irq.vblank_queue); 7252031f77cSAlex Deucher init_waitqueue_head(&rdev->irq.idle_queue); 726771fe6b9SJerome Glisse 7274aac0473SJerome Glisse /* Set asic functions */ 7284aac0473SJerome Glisse r = radeon_asic_init(rdev); 72936421338SJerome Glisse if (r) 7304aac0473SJerome Glisse return r; 73136421338SJerome Glisse radeon_check_arguments(rdev); 7324aac0473SJerome Glisse 733f95df9caSAlex Deucher /* all of the newer IGP chips have an internal gart 734f95df9caSAlex Deucher * However some rs4xx report as AGP, so remove that here. 735f95df9caSAlex Deucher */ 736f95df9caSAlex Deucher if ((rdev->family >= CHIP_RS400) && 737f95df9caSAlex Deucher (rdev->flags & RADEON_IS_IGP)) { 738f95df9caSAlex Deucher rdev->flags &= ~RADEON_IS_AGP; 739f95df9caSAlex Deucher } 740f95df9caSAlex Deucher 74130256a3fSJerome Glisse if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) { 742b574f251SJerome Glisse radeon_agp_disable(rdev); 743771fe6b9SJerome Glisse } 744771fe6b9SJerome Glisse 745ad49f501SDave Airlie /* set DMA mask + need_dma32 flags. 746ad49f501SDave Airlie * PCIE - can handle 40-bits. 747ad49f501SDave Airlie * IGP - can handle 40-bits (in theory) 748ad49f501SDave Airlie * AGP - generally dma32 is safest 749ad49f501SDave Airlie * PCI - only dma32 750ad49f501SDave Airlie */ 751ad49f501SDave Airlie rdev->need_dma32 = false; 752ad49f501SDave Airlie if (rdev->flags & RADEON_IS_AGP) 753ad49f501SDave Airlie rdev->need_dma32 = true; 754ad49f501SDave Airlie if (rdev->flags & RADEON_IS_PCI) 755ad49f501SDave Airlie rdev->need_dma32 = true; 756ad49f501SDave Airlie 757ad49f501SDave Airlie dma_bits = rdev->need_dma32 ? 32 : 40; 758ad49f501SDave Airlie r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits)); 759771fe6b9SJerome Glisse if (r) { 76062fff811SDaniel Haid rdev->need_dma32 = true; 761771fe6b9SJerome Glisse printk(KERN_WARNING "radeon: No suitable DMA available.\n"); 762771fe6b9SJerome Glisse } 763771fe6b9SJerome Glisse 764771fe6b9SJerome Glisse /* Registers mapping */ 765771fe6b9SJerome Glisse /* TODO: block userspace mapping of io register */ 76601d73a69SJordan Crouse rdev->rmmio_base = pci_resource_start(rdev->pdev, 2); 76701d73a69SJordan Crouse rdev->rmmio_size = pci_resource_len(rdev->pdev, 2); 768771fe6b9SJerome Glisse rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size); 769771fe6b9SJerome Glisse if (rdev->rmmio == NULL) { 770771fe6b9SJerome Glisse return -ENOMEM; 771771fe6b9SJerome Glisse } 772771fe6b9SJerome Glisse DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base); 773771fe6b9SJerome Glisse DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size); 774771fe6b9SJerome Glisse 775351a52a2SAlex Deucher /* io port mapping */ 776351a52a2SAlex Deucher for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 777351a52a2SAlex Deucher if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) { 778351a52a2SAlex Deucher rdev->rio_mem_size = pci_resource_len(rdev->pdev, i); 779351a52a2SAlex Deucher rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size); 780351a52a2SAlex Deucher break; 781351a52a2SAlex Deucher } 782351a52a2SAlex Deucher } 783351a52a2SAlex Deucher if (rdev->rio_mem == NULL) 784351a52a2SAlex Deucher DRM_ERROR("Unable to find PCI I/O BAR\n"); 785351a52a2SAlex Deucher 78628d52043SDave Airlie /* if we have > 1 VGA cards, then disable the radeon VGA resources */ 78793239ea1SDave Airlie /* this will fail for cards that aren't VGA class devices, just 78893239ea1SDave Airlie * ignore it */ 78993239ea1SDave Airlie vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode); 7906a9ee8afSDave Airlie vga_switcheroo_register_client(rdev->pdev, 7916a9ee8afSDave Airlie radeon_switcheroo_set_state, 7928d608aa6SDave Airlie NULL, 7936a9ee8afSDave Airlie radeon_switcheroo_can_switch); 79428d52043SDave Airlie 7953ce0a23dSJerome Glisse r = radeon_init(rdev); 796b574f251SJerome Glisse if (r) 797b574f251SJerome Glisse return r; 798b1e3a6d1SMichel Dänzer 799b574f251SJerome Glisse if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) { 800b574f251SJerome Glisse /* Acceleration not working on AGP card try again 801b574f251SJerome Glisse * with fallback to PCI or PCIE GART 802b574f251SJerome Glisse */ 803a2d07b74SJerome Glisse radeon_asic_reset(rdev); 804b574f251SJerome Glisse radeon_fini(rdev); 805b574f251SJerome Glisse radeon_agp_disable(rdev); 806b574f251SJerome Glisse r = radeon_init(rdev); 8074aac0473SJerome Glisse if (r) 8084aac0473SJerome Glisse return r; 8093ce0a23dSJerome Glisse } 810ecc0b326SMichel Dänzer if (radeon_testing) { 811ecc0b326SMichel Dänzer radeon_test_moves(rdev); 812ecc0b326SMichel Dänzer } 813771fe6b9SJerome Glisse if (radeon_benchmarking) { 814771fe6b9SJerome Glisse radeon_benchmark(rdev); 815771fe6b9SJerome Glisse } 8166cf8a3f5SJerome Glisse return 0; 817771fe6b9SJerome Glisse } 818771fe6b9SJerome Glisse 819771fe6b9SJerome Glisse void radeon_device_fini(struct radeon_device *rdev) 820771fe6b9SJerome Glisse { 821771fe6b9SJerome Glisse DRM_INFO("radeon: finishing device.\n"); 822771fe6b9SJerome Glisse rdev->shutdown = true; 82390aca4d2SJerome Glisse /* evict vram memory */ 82490aca4d2SJerome Glisse radeon_bo_evict_vram(rdev); 8253ce0a23dSJerome Glisse radeon_fini(rdev); 8266a9ee8afSDave Airlie vga_switcheroo_unregister_client(rdev->pdev); 827c1176d6fSDave Airlie vga_client_register(rdev->pdev, NULL, NULL, NULL); 828e0a2ca73SAlex Deucher if (rdev->rio_mem) 829351a52a2SAlex Deucher pci_iounmap(rdev->pdev, rdev->rio_mem); 830351a52a2SAlex Deucher rdev->rio_mem = NULL; 831771fe6b9SJerome Glisse iounmap(rdev->rmmio); 832771fe6b9SJerome Glisse rdev->rmmio = NULL; 833771fe6b9SJerome Glisse } 834771fe6b9SJerome Glisse 835771fe6b9SJerome Glisse 836771fe6b9SJerome Glisse /* 837771fe6b9SJerome Glisse * Suspend & resume. 838771fe6b9SJerome Glisse */ 839771fe6b9SJerome Glisse int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) 840771fe6b9SJerome Glisse { 841875c1866SDarren Jenkins struct radeon_device *rdev; 842771fe6b9SJerome Glisse struct drm_crtc *crtc; 843d8dcaa1dSAlex Deucher struct drm_connector *connector; 8444c788679SJerome Glisse int r; 845771fe6b9SJerome Glisse 846875c1866SDarren Jenkins if (dev == NULL || dev->dev_private == NULL) { 847771fe6b9SJerome Glisse return -ENODEV; 848771fe6b9SJerome Glisse } 849771fe6b9SJerome Glisse if (state.event == PM_EVENT_PRETHAW) { 850771fe6b9SJerome Glisse return 0; 851771fe6b9SJerome Glisse } 852875c1866SDarren Jenkins rdev = dev->dev_private; 853875c1866SDarren Jenkins 8545bcf719bSDave Airlie if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 8556a9ee8afSDave Airlie return 0; 856d8dcaa1dSAlex Deucher 857d8dcaa1dSAlex Deucher /* turn off display hw */ 858d8dcaa1dSAlex Deucher list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 859d8dcaa1dSAlex Deucher drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); 860d8dcaa1dSAlex Deucher } 861d8dcaa1dSAlex Deucher 862771fe6b9SJerome Glisse /* unpin the front buffers */ 863771fe6b9SJerome Glisse list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 864771fe6b9SJerome Glisse struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb); 8654c788679SJerome Glisse struct radeon_bo *robj; 866771fe6b9SJerome Glisse 867771fe6b9SJerome Glisse if (rfb == NULL || rfb->obj == NULL) { 868771fe6b9SJerome Glisse continue; 869771fe6b9SJerome Glisse } 8707e4d15d9SDaniel Vetter robj = gem_to_radeon_bo(rfb->obj); 87138651674SDave Airlie /* don't unpin kernel fb objects */ 87238651674SDave Airlie if (!radeon_fbdev_robj_is_fb(rdev, robj)) { 8734c788679SJerome Glisse r = radeon_bo_reserve(robj, false); 87438651674SDave Airlie if (r == 0) { 8754c788679SJerome Glisse radeon_bo_unpin(robj); 8764c788679SJerome Glisse radeon_bo_unreserve(robj); 8774c788679SJerome Glisse } 878771fe6b9SJerome Glisse } 879771fe6b9SJerome Glisse } 880771fe6b9SJerome Glisse /* evict vram memory */ 8814c788679SJerome Glisse radeon_bo_evict_vram(rdev); 882771fe6b9SJerome Glisse /* wait for gpu to finish processing current batch */ 883771fe6b9SJerome Glisse radeon_fence_wait_last(rdev); 884771fe6b9SJerome Glisse 885f657c2a7SYang Zhao radeon_save_bios_scratch_regs(rdev); 886f657c2a7SYang Zhao 887ce8f5370SAlex Deucher radeon_pm_suspend(rdev); 8883ce0a23dSJerome Glisse radeon_suspend(rdev); 889d4877cf2SAlex Deucher radeon_hpd_fini(rdev); 890771fe6b9SJerome Glisse /* evict remaining vram memory */ 8914c788679SJerome Glisse radeon_bo_evict_vram(rdev); 892771fe6b9SJerome Glisse 89310b06122SJerome Glisse radeon_agp_suspend(rdev); 89410b06122SJerome Glisse 895771fe6b9SJerome Glisse pci_save_state(dev->pdev); 896771fe6b9SJerome Glisse if (state.event == PM_EVENT_SUSPEND) { 897771fe6b9SJerome Glisse /* Shut down the device */ 898771fe6b9SJerome Glisse pci_disable_device(dev->pdev); 899771fe6b9SJerome Glisse pci_set_power_state(dev->pdev, PCI_D3hot); 900771fe6b9SJerome Glisse } 901ac751efaSTorben Hohn console_lock(); 90238651674SDave Airlie radeon_fbdev_set_suspend(rdev, 1); 903ac751efaSTorben Hohn console_unlock(); 904771fe6b9SJerome Glisse return 0; 905771fe6b9SJerome Glisse } 906771fe6b9SJerome Glisse 907771fe6b9SJerome Glisse int radeon_resume_kms(struct drm_device *dev) 908771fe6b9SJerome Glisse { 90909bdf591SCedric Godin struct drm_connector *connector; 910771fe6b9SJerome Glisse struct radeon_device *rdev = dev->dev_private; 911771fe6b9SJerome Glisse 9125bcf719bSDave Airlie if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 9136a9ee8afSDave Airlie return 0; 9146a9ee8afSDave Airlie 915ac751efaSTorben Hohn console_lock(); 916771fe6b9SJerome Glisse pci_set_power_state(dev->pdev, PCI_D0); 917771fe6b9SJerome Glisse pci_restore_state(dev->pdev); 918771fe6b9SJerome Glisse if (pci_enable_device(dev->pdev)) { 919ac751efaSTorben Hohn console_unlock(); 920771fe6b9SJerome Glisse return -1; 921771fe6b9SJerome Glisse } 922771fe6b9SJerome Glisse pci_set_master(dev->pdev); 9230ebf1717SDave Airlie /* resume AGP if in use */ 9240ebf1717SDave Airlie radeon_agp_resume(rdev); 9253ce0a23dSJerome Glisse radeon_resume(rdev); 926ce8f5370SAlex Deucher radeon_pm_resume(rdev); 927f657c2a7SYang Zhao radeon_restore_bios_scratch_regs(rdev); 92809bdf591SCedric Godin 92938651674SDave Airlie radeon_fbdev_set_suspend(rdev, 0); 930ac751efaSTorben Hohn console_unlock(); 931771fe6b9SJerome Glisse 932ac89af1eSAlex Deucher /* init dig PHYs */ 933ac89af1eSAlex Deucher if (rdev->is_atom_bios) 934ac89af1eSAlex Deucher radeon_atom_encoder_init(rdev); 935d4877cf2SAlex Deucher /* reset hpd state */ 936d4877cf2SAlex Deucher radeon_hpd_init(rdev); 937771fe6b9SJerome Glisse /* blat the mode back in */ 938771fe6b9SJerome Glisse drm_helper_resume_force_mode(dev); 939a93f344dSAlex Deucher /* turn on display hw */ 940a93f344dSAlex Deucher list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 941a93f344dSAlex Deucher drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); 942a93f344dSAlex Deucher } 943771fe6b9SJerome Glisse return 0; 944771fe6b9SJerome Glisse } 945771fe6b9SJerome Glisse 94690aca4d2SJerome Glisse int radeon_gpu_reset(struct radeon_device *rdev) 94790aca4d2SJerome Glisse { 94890aca4d2SJerome Glisse int r; 9498fd1b84cSDave Airlie int resched; 95090aca4d2SJerome Glisse 95190aca4d2SJerome Glisse radeon_save_bios_scratch_regs(rdev); 9528fd1b84cSDave Airlie /* block TTM */ 9538fd1b84cSDave Airlie resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); 95490aca4d2SJerome Glisse radeon_suspend(rdev); 95590aca4d2SJerome Glisse 95690aca4d2SJerome Glisse r = radeon_asic_reset(rdev); 95790aca4d2SJerome Glisse if (!r) { 95890aca4d2SJerome Glisse dev_info(rdev->dev, "GPU reset succeed\n"); 95990aca4d2SJerome Glisse radeon_resume(rdev); 96090aca4d2SJerome Glisse radeon_restore_bios_scratch_regs(rdev); 96190aca4d2SJerome Glisse drm_helper_resume_force_mode(rdev->ddev); 9628fd1b84cSDave Airlie ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); 96390aca4d2SJerome Glisse return 0; 96490aca4d2SJerome Glisse } 96590aca4d2SJerome Glisse /* bad news, how to tell it to userspace ? */ 96690aca4d2SJerome Glisse dev_info(rdev->dev, "GPU reset failed\n"); 96790aca4d2SJerome Glisse return r; 96890aca4d2SJerome Glisse } 96990aca4d2SJerome Glisse 970771fe6b9SJerome Glisse 971771fe6b9SJerome Glisse /* 972771fe6b9SJerome Glisse * Debugfs 973771fe6b9SJerome Glisse */ 974771fe6b9SJerome Glisse struct radeon_debugfs { 975771fe6b9SJerome Glisse struct drm_info_list *files; 976771fe6b9SJerome Glisse unsigned num_files; 977771fe6b9SJerome Glisse }; 978771fe6b9SJerome Glisse static struct radeon_debugfs _radeon_debugfs[RADEON_DEBUGFS_MAX_NUM_FILES]; 979771fe6b9SJerome Glisse static unsigned _radeon_debugfs_count = 0; 980771fe6b9SJerome Glisse 981771fe6b9SJerome Glisse int radeon_debugfs_add_files(struct radeon_device *rdev, 982771fe6b9SJerome Glisse struct drm_info_list *files, 983771fe6b9SJerome Glisse unsigned nfiles) 984771fe6b9SJerome Glisse { 985771fe6b9SJerome Glisse unsigned i; 986771fe6b9SJerome Glisse 987771fe6b9SJerome Glisse for (i = 0; i < _radeon_debugfs_count; i++) { 988771fe6b9SJerome Glisse if (_radeon_debugfs[i].files == files) { 989771fe6b9SJerome Glisse /* Already registered */ 990771fe6b9SJerome Glisse return 0; 991771fe6b9SJerome Glisse } 992771fe6b9SJerome Glisse } 993771fe6b9SJerome Glisse if ((_radeon_debugfs_count + nfiles) > RADEON_DEBUGFS_MAX_NUM_FILES) { 994771fe6b9SJerome Glisse DRM_ERROR("Reached maximum number of debugfs files.\n"); 995771fe6b9SJerome Glisse DRM_ERROR("Report so we increase RADEON_DEBUGFS_MAX_NUM_FILES.\n"); 996771fe6b9SJerome Glisse return -EINVAL; 997771fe6b9SJerome Glisse } 998771fe6b9SJerome Glisse _radeon_debugfs[_radeon_debugfs_count].files = files; 999771fe6b9SJerome Glisse _radeon_debugfs[_radeon_debugfs_count].num_files = nfiles; 1000771fe6b9SJerome Glisse _radeon_debugfs_count++; 1001771fe6b9SJerome Glisse #if defined(CONFIG_DEBUG_FS) 1002771fe6b9SJerome Glisse drm_debugfs_create_files(files, nfiles, 1003771fe6b9SJerome Glisse rdev->ddev->control->debugfs_root, 1004771fe6b9SJerome Glisse rdev->ddev->control); 1005771fe6b9SJerome Glisse drm_debugfs_create_files(files, nfiles, 1006771fe6b9SJerome Glisse rdev->ddev->primary->debugfs_root, 1007771fe6b9SJerome Glisse rdev->ddev->primary); 1008771fe6b9SJerome Glisse #endif 1009771fe6b9SJerome Glisse return 0; 1010771fe6b9SJerome Glisse } 1011771fe6b9SJerome Glisse 1012771fe6b9SJerome Glisse #if defined(CONFIG_DEBUG_FS) 1013771fe6b9SJerome Glisse int radeon_debugfs_init(struct drm_minor *minor) 1014771fe6b9SJerome Glisse { 1015771fe6b9SJerome Glisse return 0; 1016771fe6b9SJerome Glisse } 1017771fe6b9SJerome Glisse 1018771fe6b9SJerome Glisse void radeon_debugfs_cleanup(struct drm_minor *minor) 1019771fe6b9SJerome Glisse { 1020771fe6b9SJerome Glisse unsigned i; 1021771fe6b9SJerome Glisse 1022771fe6b9SJerome Glisse for (i = 0; i < _radeon_debugfs_count; i++) { 1023771fe6b9SJerome Glisse drm_debugfs_remove_files(_radeon_debugfs[i].files, 1024771fe6b9SJerome Glisse _radeon_debugfs[i].num_files, minor); 1025771fe6b9SJerome Glisse } 1026771fe6b9SJerome Glisse } 1027771fe6b9SJerome Glisse #endif 1028