1771fe6b9SJerome Glisse /* 2771fe6b9SJerome Glisse * Copyright 2008 Advanced Micro Devices, Inc. 3771fe6b9SJerome Glisse * Copyright 2008 Red Hat Inc. 4771fe6b9SJerome Glisse * Copyright 2009 Jerome Glisse. 5771fe6b9SJerome Glisse * 6771fe6b9SJerome Glisse * Permission is hereby granted, free of charge, to any person obtaining a 7771fe6b9SJerome Glisse * copy of this software and associated documentation files (the "Software"), 8771fe6b9SJerome Glisse * to deal in the Software without restriction, including without limitation 9771fe6b9SJerome Glisse * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10771fe6b9SJerome Glisse * and/or sell copies of the Software, and to permit persons to whom the 11771fe6b9SJerome Glisse * Software is furnished to do so, subject to the following conditions: 12771fe6b9SJerome Glisse * 13771fe6b9SJerome Glisse * The above copyright notice and this permission notice shall be included in 14771fe6b9SJerome Glisse * all copies or substantial portions of the Software. 15771fe6b9SJerome Glisse * 16771fe6b9SJerome Glisse * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17771fe6b9SJerome Glisse * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18771fe6b9SJerome Glisse * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19771fe6b9SJerome Glisse * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20771fe6b9SJerome Glisse * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21771fe6b9SJerome Glisse * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22771fe6b9SJerome Glisse * OTHER DEALINGS IN THE SOFTWARE. 23771fe6b9SJerome Glisse * 24771fe6b9SJerome Glisse * Authors: Dave Airlie 25771fe6b9SJerome Glisse * Alex Deucher 26771fe6b9SJerome Glisse * Jerome Glisse 27771fe6b9SJerome Glisse */ 28771fe6b9SJerome Glisse #include <linux/console.h> 295a0e3ad6STejun Heo #include <linux/slab.h> 30771fe6b9SJerome Glisse #include <drm/drmP.h> 31771fe6b9SJerome Glisse #include <drm/drm_crtc_helper.h> 32771fe6b9SJerome Glisse #include <drm/radeon_drm.h> 3328d52043SDave Airlie #include <linux/vgaarb.h> 346a9ee8afSDave Airlie #include <linux/vga_switcheroo.h> 35bcc65fd8SMatthew Garrett #include <linux/efi.h> 36771fe6b9SJerome Glisse #include "radeon_reg.h" 37771fe6b9SJerome Glisse #include "radeon.h" 38771fe6b9SJerome Glisse #include "atom.h" 39771fe6b9SJerome Glisse 401b5331d9SJerome Glisse static const char radeon_family_name[][16] = { 411b5331d9SJerome Glisse "R100", 421b5331d9SJerome Glisse "RV100", 431b5331d9SJerome Glisse "RS100", 441b5331d9SJerome Glisse "RV200", 451b5331d9SJerome Glisse "RS200", 461b5331d9SJerome Glisse "R200", 471b5331d9SJerome Glisse "RV250", 481b5331d9SJerome Glisse "RS300", 491b5331d9SJerome Glisse "RV280", 501b5331d9SJerome Glisse "R300", 511b5331d9SJerome Glisse "R350", 521b5331d9SJerome Glisse "RV350", 531b5331d9SJerome Glisse "RV380", 541b5331d9SJerome Glisse "R420", 551b5331d9SJerome Glisse "R423", 561b5331d9SJerome Glisse "RV410", 571b5331d9SJerome Glisse "RS400", 581b5331d9SJerome Glisse "RS480", 591b5331d9SJerome Glisse "RS600", 601b5331d9SJerome Glisse "RS690", 611b5331d9SJerome Glisse "RS740", 621b5331d9SJerome Glisse "RV515", 631b5331d9SJerome Glisse "R520", 641b5331d9SJerome Glisse "RV530", 651b5331d9SJerome Glisse "RV560", 661b5331d9SJerome Glisse "RV570", 671b5331d9SJerome Glisse "R580", 681b5331d9SJerome Glisse "R600", 691b5331d9SJerome Glisse "RV610", 701b5331d9SJerome Glisse "RV630", 711b5331d9SJerome Glisse "RV670", 721b5331d9SJerome Glisse "RV620", 731b5331d9SJerome Glisse "RV635", 741b5331d9SJerome Glisse "RS780", 751b5331d9SJerome Glisse "RS880", 761b5331d9SJerome Glisse "RV770", 771b5331d9SJerome Glisse "RV730", 781b5331d9SJerome Glisse "RV710", 791b5331d9SJerome Glisse "RV740", 801b5331d9SJerome Glisse "CEDAR", 811b5331d9SJerome Glisse "REDWOOD", 821b5331d9SJerome Glisse "JUNIPER", 831b5331d9SJerome Glisse "CYPRESS", 841b5331d9SJerome Glisse "HEMLOCK", 85b08ebe7eSAlex Deucher "PALM", 864df64e65SAlex Deucher "SUMO", 874df64e65SAlex Deucher "SUMO2", 881fe18305SAlex Deucher "BARTS", 891fe18305SAlex Deucher "TURKS", 901fe18305SAlex Deucher "CAICOS", 91b7cfc9feSAlex Deucher "CAYMAN", 92cb28bb34SAlex Deucher "TAHITI", 93cb28bb34SAlex Deucher "PITCAIRN", 94cb28bb34SAlex Deucher "VERDE", 951b5331d9SJerome Glisse "LAST", 961b5331d9SJerome Glisse }; 971b5331d9SJerome Glisse 98771fe6b9SJerome Glisse /* 99b1e3a6d1SMichel Dänzer * Clear GPU surface registers. 100b1e3a6d1SMichel Dänzer */ 1013ce0a23dSJerome Glisse void radeon_surface_init(struct radeon_device *rdev) 102b1e3a6d1SMichel Dänzer { 103b1e3a6d1SMichel Dänzer /* FIXME: check this out */ 104b1e3a6d1SMichel Dänzer if (rdev->family < CHIP_R600) { 105b1e3a6d1SMichel Dänzer int i; 106b1e3a6d1SMichel Dänzer 107550e2d92SDave Airlie for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) { 108550e2d92SDave Airlie if (rdev->surface_regs[i].bo) 109550e2d92SDave Airlie radeon_bo_get_surface_reg(rdev->surface_regs[i].bo); 110550e2d92SDave Airlie else 111550e2d92SDave Airlie radeon_clear_surface_reg(rdev, i); 112b1e3a6d1SMichel Dänzer } 113e024e110SDave Airlie /* enable surfaces */ 114e024e110SDave Airlie WREG32(RADEON_SURFACE_CNTL, 0); 115b1e3a6d1SMichel Dänzer } 116b1e3a6d1SMichel Dänzer } 117b1e3a6d1SMichel Dänzer 118b1e3a6d1SMichel Dänzer /* 119771fe6b9SJerome Glisse * GPU scratch registers helpers function. 120771fe6b9SJerome Glisse */ 1213ce0a23dSJerome Glisse void radeon_scratch_init(struct radeon_device *rdev) 122771fe6b9SJerome Glisse { 123771fe6b9SJerome Glisse int i; 124771fe6b9SJerome Glisse 125771fe6b9SJerome Glisse /* FIXME: check this out */ 126771fe6b9SJerome Glisse if (rdev->family < CHIP_R300) { 127771fe6b9SJerome Glisse rdev->scratch.num_reg = 5; 128771fe6b9SJerome Glisse } else { 129771fe6b9SJerome Glisse rdev->scratch.num_reg = 7; 130771fe6b9SJerome Glisse } 131724c80e1SAlex Deucher rdev->scratch.reg_base = RADEON_SCRATCH_REG0; 132771fe6b9SJerome Glisse for (i = 0; i < rdev->scratch.num_reg; i++) { 133771fe6b9SJerome Glisse rdev->scratch.free[i] = true; 134724c80e1SAlex Deucher rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4); 135771fe6b9SJerome Glisse } 136771fe6b9SJerome Glisse } 137771fe6b9SJerome Glisse 138771fe6b9SJerome Glisse int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg) 139771fe6b9SJerome Glisse { 140771fe6b9SJerome Glisse int i; 141771fe6b9SJerome Glisse 142771fe6b9SJerome Glisse for (i = 0; i < rdev->scratch.num_reg; i++) { 143771fe6b9SJerome Glisse if (rdev->scratch.free[i]) { 144771fe6b9SJerome Glisse rdev->scratch.free[i] = false; 145771fe6b9SJerome Glisse *reg = rdev->scratch.reg[i]; 146771fe6b9SJerome Glisse return 0; 147771fe6b9SJerome Glisse } 148771fe6b9SJerome Glisse } 149771fe6b9SJerome Glisse return -EINVAL; 150771fe6b9SJerome Glisse } 151771fe6b9SJerome Glisse 152771fe6b9SJerome Glisse void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg) 153771fe6b9SJerome Glisse { 154771fe6b9SJerome Glisse int i; 155771fe6b9SJerome Glisse 156771fe6b9SJerome Glisse for (i = 0; i < rdev->scratch.num_reg; i++) { 157771fe6b9SJerome Glisse if (rdev->scratch.reg[i] == reg) { 158771fe6b9SJerome Glisse rdev->scratch.free[i] = true; 159771fe6b9SJerome Glisse return; 160771fe6b9SJerome Glisse } 161771fe6b9SJerome Glisse } 162771fe6b9SJerome Glisse } 163771fe6b9SJerome Glisse 164724c80e1SAlex Deucher void radeon_wb_disable(struct radeon_device *rdev) 165724c80e1SAlex Deucher { 166724c80e1SAlex Deucher int r; 167724c80e1SAlex Deucher 168724c80e1SAlex Deucher if (rdev->wb.wb_obj) { 169724c80e1SAlex Deucher r = radeon_bo_reserve(rdev->wb.wb_obj, false); 170724c80e1SAlex Deucher if (unlikely(r != 0)) 171724c80e1SAlex Deucher return; 172724c80e1SAlex Deucher radeon_bo_kunmap(rdev->wb.wb_obj); 173724c80e1SAlex Deucher radeon_bo_unpin(rdev->wb.wb_obj); 174724c80e1SAlex Deucher radeon_bo_unreserve(rdev->wb.wb_obj); 175724c80e1SAlex Deucher } 176724c80e1SAlex Deucher rdev->wb.enabled = false; 177724c80e1SAlex Deucher } 178724c80e1SAlex Deucher 179724c80e1SAlex Deucher void radeon_wb_fini(struct radeon_device *rdev) 180724c80e1SAlex Deucher { 181724c80e1SAlex Deucher radeon_wb_disable(rdev); 182724c80e1SAlex Deucher if (rdev->wb.wb_obj) { 183724c80e1SAlex Deucher radeon_bo_unref(&rdev->wb.wb_obj); 184724c80e1SAlex Deucher rdev->wb.wb = NULL; 185724c80e1SAlex Deucher rdev->wb.wb_obj = NULL; 186724c80e1SAlex Deucher } 187724c80e1SAlex Deucher } 188724c80e1SAlex Deucher 189724c80e1SAlex Deucher int radeon_wb_init(struct radeon_device *rdev) 190724c80e1SAlex Deucher { 191724c80e1SAlex Deucher int r; 192724c80e1SAlex Deucher 193724c80e1SAlex Deucher if (rdev->wb.wb_obj == NULL) { 194441921d5SDaniel Vetter r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true, 195724c80e1SAlex Deucher RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj); 196724c80e1SAlex Deucher if (r) { 197724c80e1SAlex Deucher dev_warn(rdev->dev, "(%d) create WB bo failed\n", r); 198724c80e1SAlex Deucher return r; 199724c80e1SAlex Deucher } 200724c80e1SAlex Deucher } 201724c80e1SAlex Deucher r = radeon_bo_reserve(rdev->wb.wb_obj, false); 202724c80e1SAlex Deucher if (unlikely(r != 0)) { 203724c80e1SAlex Deucher radeon_wb_fini(rdev); 204724c80e1SAlex Deucher return r; 205724c80e1SAlex Deucher } 206724c80e1SAlex Deucher r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, 207724c80e1SAlex Deucher &rdev->wb.gpu_addr); 208724c80e1SAlex Deucher if (r) { 209724c80e1SAlex Deucher radeon_bo_unreserve(rdev->wb.wb_obj); 210724c80e1SAlex Deucher dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r); 211724c80e1SAlex Deucher radeon_wb_fini(rdev); 212724c80e1SAlex Deucher return r; 213724c80e1SAlex Deucher } 214724c80e1SAlex Deucher r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); 215724c80e1SAlex Deucher radeon_bo_unreserve(rdev->wb.wb_obj); 216724c80e1SAlex Deucher if (r) { 217724c80e1SAlex Deucher dev_warn(rdev->dev, "(%d) map WB bo failed\n", r); 218724c80e1SAlex Deucher radeon_wb_fini(rdev); 219724c80e1SAlex Deucher return r; 220724c80e1SAlex Deucher } 221724c80e1SAlex Deucher 222e6ba7599SAlex Deucher /* clear wb memory */ 223e6ba7599SAlex Deucher memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE); 224d0f8a854SAlex Deucher /* disable event_write fences */ 225d0f8a854SAlex Deucher rdev->wb.use_event = false; 226724c80e1SAlex Deucher /* disabled via module param */ 227724c80e1SAlex Deucher if (radeon_no_wb == 1) 228724c80e1SAlex Deucher rdev->wb.enabled = false; 229724c80e1SAlex Deucher else { 230724c80e1SAlex Deucher if (rdev->flags & RADEON_IS_AGP) { 23128eebb70SAlex Deucher /* often unreliable on AGP */ 23228eebb70SAlex Deucher rdev->wb.enabled = false; 23328eebb70SAlex Deucher } else if (rdev->family < CHIP_R300) { 23428eebb70SAlex Deucher /* often unreliable on pre-r300 */ 235724c80e1SAlex Deucher rdev->wb.enabled = false; 236d0f8a854SAlex Deucher } else { 237724c80e1SAlex Deucher rdev->wb.enabled = true; 238d0f8a854SAlex Deucher /* event_write fences are only available on r600+ */ 239d0f8a854SAlex Deucher if (rdev->family >= CHIP_R600) 240d0f8a854SAlex Deucher rdev->wb.use_event = true; 241d0f8a854SAlex Deucher } 242724c80e1SAlex Deucher } 2437d52785dSAlex Deucher /* always use writeback/events on NI */ 2447d52785dSAlex Deucher if (ASIC_IS_DCE5(rdev)) { 2457d52785dSAlex Deucher rdev->wb.enabled = true; 2467d52785dSAlex Deucher rdev->wb.use_event = true; 2477d52785dSAlex Deucher } 248724c80e1SAlex Deucher 249724c80e1SAlex Deucher dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis"); 250724c80e1SAlex Deucher 251724c80e1SAlex Deucher return 0; 252724c80e1SAlex Deucher } 253724c80e1SAlex Deucher 254d594e46aSJerome Glisse /** 255d594e46aSJerome Glisse * radeon_vram_location - try to find VRAM location 256d594e46aSJerome Glisse * @rdev: radeon device structure holding all necessary informations 257d594e46aSJerome Glisse * @mc: memory controller structure holding memory informations 258d594e46aSJerome Glisse * @base: base address at which to put VRAM 259d594e46aSJerome Glisse * 260d594e46aSJerome Glisse * Function will place try to place VRAM at base address provided 261d594e46aSJerome Glisse * as parameter (which is so far either PCI aperture address or 262d594e46aSJerome Glisse * for IGP TOM base address). 263d594e46aSJerome Glisse * 264d594e46aSJerome Glisse * If there is not enough space to fit the unvisible VRAM in the 32bits 265d594e46aSJerome Glisse * address space then we limit the VRAM size to the aperture. 266d594e46aSJerome Glisse * 267d594e46aSJerome Glisse * If we are using AGP and if the AGP aperture doesn't allow us to have 268d594e46aSJerome Glisse * room for all the VRAM than we restrict the VRAM to the PCI aperture 269d594e46aSJerome Glisse * size and print a warning. 270d594e46aSJerome Glisse * 271d594e46aSJerome Glisse * This function will never fails, worst case are limiting VRAM. 272d594e46aSJerome Glisse * 273d594e46aSJerome Glisse * Note: GTT start, end, size should be initialized before calling this 274d594e46aSJerome Glisse * function on AGP platform. 275d594e46aSJerome Glisse * 27625985edcSLucas De Marchi * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size, 277d594e46aSJerome Glisse * this shouldn't be a problem as we are using the PCI aperture as a reference. 278d594e46aSJerome Glisse * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but 279d594e46aSJerome Glisse * not IGP. 280d594e46aSJerome Glisse * 281d594e46aSJerome Glisse * Note: we use mc_vram_size as on some board we need to program the mc to 282d594e46aSJerome Glisse * cover the whole aperture even if VRAM size is inferior to aperture size 283d594e46aSJerome Glisse * Novell bug 204882 + along with lots of ubuntu ones 284d594e46aSJerome Glisse * 285d594e46aSJerome Glisse * Note: when limiting vram it's safe to overwritte real_vram_size because 286d594e46aSJerome Glisse * we are not in case where real_vram_size is inferior to mc_vram_size (ie 287d594e46aSJerome Glisse * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu 288d594e46aSJerome Glisse * ones) 289d594e46aSJerome Glisse * 290d594e46aSJerome Glisse * Note: IGP TOM addr should be the same as the aperture addr, we don't 291d594e46aSJerome Glisse * explicitly check for that thought. 292d594e46aSJerome Glisse * 293d594e46aSJerome Glisse * FIXME: when reducing VRAM size align new size on power of 2. 294771fe6b9SJerome Glisse */ 295d594e46aSJerome Glisse void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base) 296771fe6b9SJerome Glisse { 297d594e46aSJerome Glisse mc->vram_start = base; 298d594e46aSJerome Glisse if (mc->mc_vram_size > (0xFFFFFFFF - base + 1)) { 299d594e46aSJerome Glisse dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n"); 300d594e46aSJerome Glisse mc->real_vram_size = mc->aper_size; 301d594e46aSJerome Glisse mc->mc_vram_size = mc->aper_size; 302771fe6b9SJerome Glisse } 303d594e46aSJerome Glisse mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; 3042cbeb4efSJerome Glisse if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) { 305d594e46aSJerome Glisse dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n"); 306d594e46aSJerome Glisse mc->real_vram_size = mc->aper_size; 307d594e46aSJerome Glisse mc->mc_vram_size = mc->aper_size; 308771fe6b9SJerome Glisse } 309d594e46aSJerome Glisse mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; 310ba95c45aSMichel Dänzer if (radeon_vram_limit && radeon_vram_limit < mc->real_vram_size) 311ba95c45aSMichel Dänzer mc->real_vram_size = radeon_vram_limit; 312dd7cc55aSAlex Deucher dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n", 313d594e46aSJerome Glisse mc->mc_vram_size >> 20, mc->vram_start, 314d594e46aSJerome Glisse mc->vram_end, mc->real_vram_size >> 20); 315771fe6b9SJerome Glisse } 316771fe6b9SJerome Glisse 317d594e46aSJerome Glisse /** 318d594e46aSJerome Glisse * radeon_gtt_location - try to find GTT location 319d594e46aSJerome Glisse * @rdev: radeon device structure holding all necessary informations 320d594e46aSJerome Glisse * @mc: memory controller structure holding memory informations 321d594e46aSJerome Glisse * 322d594e46aSJerome Glisse * Function will place try to place GTT before or after VRAM. 323d594e46aSJerome Glisse * 324d594e46aSJerome Glisse * If GTT size is bigger than space left then we ajust GTT size. 325d594e46aSJerome Glisse * Thus function will never fails. 326d594e46aSJerome Glisse * 327d594e46aSJerome Glisse * FIXME: when reducing GTT size align new size on power of 2. 328d594e46aSJerome Glisse */ 329d594e46aSJerome Glisse void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) 330d594e46aSJerome Glisse { 331d594e46aSJerome Glisse u64 size_af, size_bf; 332d594e46aSJerome Glisse 3338d369bb1SAlex Deucher size_af = ((0xFFFFFFFF - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align; 3348d369bb1SAlex Deucher size_bf = mc->vram_start & ~mc->gtt_base_align; 335d594e46aSJerome Glisse if (size_bf > size_af) { 336d594e46aSJerome Glisse if (mc->gtt_size > size_bf) { 337d594e46aSJerome Glisse dev_warn(rdev->dev, "limiting GTT\n"); 338d594e46aSJerome Glisse mc->gtt_size = size_bf; 339d594e46aSJerome Glisse } 3408d369bb1SAlex Deucher mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size; 341d594e46aSJerome Glisse } else { 342d594e46aSJerome Glisse if (mc->gtt_size > size_af) { 343d594e46aSJerome Glisse dev_warn(rdev->dev, "limiting GTT\n"); 344d594e46aSJerome Glisse mc->gtt_size = size_af; 345d594e46aSJerome Glisse } 3468d369bb1SAlex Deucher mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align; 347d594e46aSJerome Glisse } 348d594e46aSJerome Glisse mc->gtt_end = mc->gtt_start + mc->gtt_size - 1; 349dd7cc55aSAlex Deucher dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n", 350d594e46aSJerome Glisse mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end); 351d594e46aSJerome Glisse } 352771fe6b9SJerome Glisse 353771fe6b9SJerome Glisse /* 354771fe6b9SJerome Glisse * GPU helpers function. 355771fe6b9SJerome Glisse */ 3569f022ddfSJerome Glisse bool radeon_card_posted(struct radeon_device *rdev) 357771fe6b9SJerome Glisse { 358771fe6b9SJerome Glisse uint32_t reg; 359771fe6b9SJerome Glisse 360bcc65fd8SMatthew Garrett if (efi_enabled && rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) 361bcc65fd8SMatthew Garrett return false; 362bcc65fd8SMatthew Garrett 363771fe6b9SJerome Glisse /* first check CRTCs */ 36418007401SAlex Deucher if (ASIC_IS_DCE41(rdev)) { 36518007401SAlex Deucher reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | 36618007401SAlex Deucher RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET); 36718007401SAlex Deucher if (reg & EVERGREEN_CRTC_MASTER_EN) 36818007401SAlex Deucher return true; 36918007401SAlex Deucher } else if (ASIC_IS_DCE4(rdev)) { 370bcc1c2a1SAlex Deucher reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | 371bcc1c2a1SAlex Deucher RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) | 372bcc1c2a1SAlex Deucher RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) | 373bcc1c2a1SAlex Deucher RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) | 374bcc1c2a1SAlex Deucher RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) | 375bcc1c2a1SAlex Deucher RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET); 376bcc1c2a1SAlex Deucher if (reg & EVERGREEN_CRTC_MASTER_EN) 377bcc1c2a1SAlex Deucher return true; 378bcc1c2a1SAlex Deucher } else if (ASIC_IS_AVIVO(rdev)) { 379771fe6b9SJerome Glisse reg = RREG32(AVIVO_D1CRTC_CONTROL) | 380771fe6b9SJerome Glisse RREG32(AVIVO_D2CRTC_CONTROL); 381771fe6b9SJerome Glisse if (reg & AVIVO_CRTC_EN) { 382771fe6b9SJerome Glisse return true; 383771fe6b9SJerome Glisse } 384771fe6b9SJerome Glisse } else { 385771fe6b9SJerome Glisse reg = RREG32(RADEON_CRTC_GEN_CNTL) | 386771fe6b9SJerome Glisse RREG32(RADEON_CRTC2_GEN_CNTL); 387771fe6b9SJerome Glisse if (reg & RADEON_CRTC_EN) { 388771fe6b9SJerome Glisse return true; 389771fe6b9SJerome Glisse } 390771fe6b9SJerome Glisse } 391771fe6b9SJerome Glisse 392771fe6b9SJerome Glisse /* then check MEM_SIZE, in case the crtcs are off */ 393771fe6b9SJerome Glisse if (rdev->family >= CHIP_R600) 394771fe6b9SJerome Glisse reg = RREG32(R600_CONFIG_MEMSIZE); 395771fe6b9SJerome Glisse else 396771fe6b9SJerome Glisse reg = RREG32(RADEON_CONFIG_MEMSIZE); 397771fe6b9SJerome Glisse 398771fe6b9SJerome Glisse if (reg) 399771fe6b9SJerome Glisse return true; 400771fe6b9SJerome Glisse 401771fe6b9SJerome Glisse return false; 402771fe6b9SJerome Glisse 403771fe6b9SJerome Glisse } 404771fe6b9SJerome Glisse 405f47299c5SAlex Deucher void radeon_update_bandwidth_info(struct radeon_device *rdev) 406f47299c5SAlex Deucher { 407f47299c5SAlex Deucher fixed20_12 a; 4088807286eSAlex Deucher u32 sclk = rdev->pm.current_sclk; 4098807286eSAlex Deucher u32 mclk = rdev->pm.current_mclk; 410f47299c5SAlex Deucher 4118807286eSAlex Deucher /* sclk/mclk in Mhz */ 41268adac5eSBen Skeggs a.full = dfixed_const(100); 41368adac5eSBen Skeggs rdev->pm.sclk.full = dfixed_const(sclk); 41468adac5eSBen Skeggs rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a); 41568adac5eSBen Skeggs rdev->pm.mclk.full = dfixed_const(mclk); 41668adac5eSBen Skeggs rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a); 417f47299c5SAlex Deucher 4188807286eSAlex Deucher if (rdev->flags & RADEON_IS_IGP) { 41968adac5eSBen Skeggs a.full = dfixed_const(16); 420f47299c5SAlex Deucher /* core_bandwidth = sclk(Mhz) * 16 */ 42168adac5eSBen Skeggs rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a); 422f47299c5SAlex Deucher } 423f47299c5SAlex Deucher } 424f47299c5SAlex Deucher 42572542d77SDave Airlie bool radeon_boot_test_post_card(struct radeon_device *rdev) 42672542d77SDave Airlie { 42772542d77SDave Airlie if (radeon_card_posted(rdev)) 42872542d77SDave Airlie return true; 42972542d77SDave Airlie 43072542d77SDave Airlie if (rdev->bios) { 43172542d77SDave Airlie DRM_INFO("GPU not posted. posting now...\n"); 43272542d77SDave Airlie if (rdev->is_atom_bios) 43372542d77SDave Airlie atom_asic_init(rdev->mode_info.atom_context); 43472542d77SDave Airlie else 43572542d77SDave Airlie radeon_combios_asic_init(rdev->ddev); 43672542d77SDave Airlie return true; 43772542d77SDave Airlie } else { 43872542d77SDave Airlie dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); 43972542d77SDave Airlie return false; 44072542d77SDave Airlie } 44172542d77SDave Airlie } 44272542d77SDave Airlie 4433ce0a23dSJerome Glisse int radeon_dummy_page_init(struct radeon_device *rdev) 4443ce0a23dSJerome Glisse { 44582568565SDave Airlie if (rdev->dummy_page.page) 44682568565SDave Airlie return 0; 4473ce0a23dSJerome Glisse rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO); 4483ce0a23dSJerome Glisse if (rdev->dummy_page.page == NULL) 4493ce0a23dSJerome Glisse return -ENOMEM; 4503ce0a23dSJerome Glisse rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page, 4513ce0a23dSJerome Glisse 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 452a30f6fb7SBenjamin Herrenschmidt if (pci_dma_mapping_error(rdev->pdev, rdev->dummy_page.addr)) { 453a30f6fb7SBenjamin Herrenschmidt dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n"); 4543ce0a23dSJerome Glisse __free_page(rdev->dummy_page.page); 4553ce0a23dSJerome Glisse rdev->dummy_page.page = NULL; 4563ce0a23dSJerome Glisse return -ENOMEM; 4573ce0a23dSJerome Glisse } 4583ce0a23dSJerome Glisse return 0; 4593ce0a23dSJerome Glisse } 4603ce0a23dSJerome Glisse 4613ce0a23dSJerome Glisse void radeon_dummy_page_fini(struct radeon_device *rdev) 4623ce0a23dSJerome Glisse { 4633ce0a23dSJerome Glisse if (rdev->dummy_page.page == NULL) 4643ce0a23dSJerome Glisse return; 4653ce0a23dSJerome Glisse pci_unmap_page(rdev->pdev, rdev->dummy_page.addr, 4663ce0a23dSJerome Glisse PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 4673ce0a23dSJerome Glisse __free_page(rdev->dummy_page.page); 4683ce0a23dSJerome Glisse rdev->dummy_page.page = NULL; 4693ce0a23dSJerome Glisse } 4703ce0a23dSJerome Glisse 471771fe6b9SJerome Glisse 472771fe6b9SJerome Glisse /* ATOM accessor methods */ 473771fe6b9SJerome Glisse static uint32_t cail_pll_read(struct card_info *info, uint32_t reg) 474771fe6b9SJerome Glisse { 475771fe6b9SJerome Glisse struct radeon_device *rdev = info->dev->dev_private; 476771fe6b9SJerome Glisse uint32_t r; 477771fe6b9SJerome Glisse 478771fe6b9SJerome Glisse r = rdev->pll_rreg(rdev, reg); 479771fe6b9SJerome Glisse return r; 480771fe6b9SJerome Glisse } 481771fe6b9SJerome Glisse 482771fe6b9SJerome Glisse static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val) 483771fe6b9SJerome Glisse { 484771fe6b9SJerome Glisse struct radeon_device *rdev = info->dev->dev_private; 485771fe6b9SJerome Glisse 486771fe6b9SJerome Glisse rdev->pll_wreg(rdev, reg, val); 487771fe6b9SJerome Glisse } 488771fe6b9SJerome Glisse 489771fe6b9SJerome Glisse static uint32_t cail_mc_read(struct card_info *info, uint32_t reg) 490771fe6b9SJerome Glisse { 491771fe6b9SJerome Glisse struct radeon_device *rdev = info->dev->dev_private; 492771fe6b9SJerome Glisse uint32_t r; 493771fe6b9SJerome Glisse 494771fe6b9SJerome Glisse r = rdev->mc_rreg(rdev, reg); 495771fe6b9SJerome Glisse return r; 496771fe6b9SJerome Glisse } 497771fe6b9SJerome Glisse 498771fe6b9SJerome Glisse static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val) 499771fe6b9SJerome Glisse { 500771fe6b9SJerome Glisse struct radeon_device *rdev = info->dev->dev_private; 501771fe6b9SJerome Glisse 502771fe6b9SJerome Glisse rdev->mc_wreg(rdev, reg, val); 503771fe6b9SJerome Glisse } 504771fe6b9SJerome Glisse 505771fe6b9SJerome Glisse static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val) 506771fe6b9SJerome Glisse { 507771fe6b9SJerome Glisse struct radeon_device *rdev = info->dev->dev_private; 508771fe6b9SJerome Glisse 509771fe6b9SJerome Glisse WREG32(reg*4, val); 510771fe6b9SJerome Glisse } 511771fe6b9SJerome Glisse 512771fe6b9SJerome Glisse static uint32_t cail_reg_read(struct card_info *info, uint32_t reg) 513771fe6b9SJerome Glisse { 514771fe6b9SJerome Glisse struct radeon_device *rdev = info->dev->dev_private; 515771fe6b9SJerome Glisse uint32_t r; 516771fe6b9SJerome Glisse 517771fe6b9SJerome Glisse r = RREG32(reg*4); 518771fe6b9SJerome Glisse return r; 519771fe6b9SJerome Glisse } 520771fe6b9SJerome Glisse 521351a52a2SAlex Deucher static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val) 522351a52a2SAlex Deucher { 523351a52a2SAlex Deucher struct radeon_device *rdev = info->dev->dev_private; 524351a52a2SAlex Deucher 525351a52a2SAlex Deucher WREG32_IO(reg*4, val); 526351a52a2SAlex Deucher } 527351a52a2SAlex Deucher 528351a52a2SAlex Deucher static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg) 529351a52a2SAlex Deucher { 530351a52a2SAlex Deucher struct radeon_device *rdev = info->dev->dev_private; 531351a52a2SAlex Deucher uint32_t r; 532351a52a2SAlex Deucher 533351a52a2SAlex Deucher r = RREG32_IO(reg*4); 534351a52a2SAlex Deucher return r; 535351a52a2SAlex Deucher } 536351a52a2SAlex Deucher 537771fe6b9SJerome Glisse int radeon_atombios_init(struct radeon_device *rdev) 538771fe6b9SJerome Glisse { 53961c4b24bSMathias Fröhlich struct card_info *atom_card_info = 54061c4b24bSMathias Fröhlich kzalloc(sizeof(struct card_info), GFP_KERNEL); 54161c4b24bSMathias Fröhlich 54261c4b24bSMathias Fröhlich if (!atom_card_info) 54361c4b24bSMathias Fröhlich return -ENOMEM; 54461c4b24bSMathias Fröhlich 54561c4b24bSMathias Fröhlich rdev->mode_info.atom_card_info = atom_card_info; 54661c4b24bSMathias Fröhlich atom_card_info->dev = rdev->ddev; 54761c4b24bSMathias Fröhlich atom_card_info->reg_read = cail_reg_read; 54861c4b24bSMathias Fröhlich atom_card_info->reg_write = cail_reg_write; 549351a52a2SAlex Deucher /* needed for iio ops */ 550351a52a2SAlex Deucher if (rdev->rio_mem) { 551351a52a2SAlex Deucher atom_card_info->ioreg_read = cail_ioreg_read; 552351a52a2SAlex Deucher atom_card_info->ioreg_write = cail_ioreg_write; 553351a52a2SAlex Deucher } else { 554351a52a2SAlex Deucher DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n"); 555351a52a2SAlex Deucher atom_card_info->ioreg_read = cail_reg_read; 556351a52a2SAlex Deucher atom_card_info->ioreg_write = cail_reg_write; 557351a52a2SAlex Deucher } 55861c4b24bSMathias Fröhlich atom_card_info->mc_read = cail_mc_read; 55961c4b24bSMathias Fröhlich atom_card_info->mc_write = cail_mc_write; 56061c4b24bSMathias Fröhlich atom_card_info->pll_read = cail_pll_read; 56161c4b24bSMathias Fröhlich atom_card_info->pll_write = cail_pll_write; 56261c4b24bSMathias Fröhlich 56361c4b24bSMathias Fröhlich rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios); 564c31ad97fSRafał Miłecki mutex_init(&rdev->mode_info.atom_context->mutex); 565771fe6b9SJerome Glisse radeon_atom_initialize_bios_scratch_regs(rdev->ddev); 566d904ef9bSDave Airlie atom_allocate_fb_scratch(rdev->mode_info.atom_context); 567771fe6b9SJerome Glisse return 0; 568771fe6b9SJerome Glisse } 569771fe6b9SJerome Glisse 570771fe6b9SJerome Glisse void radeon_atombios_fini(struct radeon_device *rdev) 571771fe6b9SJerome Glisse { 5724a04a844SJerome Glisse if (rdev->mode_info.atom_context) { 573d904ef9bSDave Airlie kfree(rdev->mode_info.atom_context->scratch); 574771fe6b9SJerome Glisse kfree(rdev->mode_info.atom_context); 5754a04a844SJerome Glisse } 57661c4b24bSMathias Fröhlich kfree(rdev->mode_info.atom_card_info); 577771fe6b9SJerome Glisse } 578771fe6b9SJerome Glisse 579771fe6b9SJerome Glisse int radeon_combios_init(struct radeon_device *rdev) 580771fe6b9SJerome Glisse { 581771fe6b9SJerome Glisse radeon_combios_initialize_bios_scratch_regs(rdev->ddev); 582771fe6b9SJerome Glisse return 0; 583771fe6b9SJerome Glisse } 584771fe6b9SJerome Glisse 585771fe6b9SJerome Glisse void radeon_combios_fini(struct radeon_device *rdev) 586771fe6b9SJerome Glisse { 587771fe6b9SJerome Glisse } 588771fe6b9SJerome Glisse 58928d52043SDave Airlie /* if we get transitioned to only one device, tak VGA back */ 59028d52043SDave Airlie static unsigned int radeon_vga_set_decode(void *cookie, bool state) 59128d52043SDave Airlie { 59228d52043SDave Airlie struct radeon_device *rdev = cookie; 59328d52043SDave Airlie radeon_vga_set_state(rdev, state); 59428d52043SDave Airlie if (state) 59528d52043SDave Airlie return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | 59628d52043SDave Airlie VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 59728d52043SDave Airlie else 59828d52043SDave Airlie return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 59928d52043SDave Airlie } 600c1176d6fSDave Airlie 60136421338SJerome Glisse void radeon_check_arguments(struct radeon_device *rdev) 60236421338SJerome Glisse { 60336421338SJerome Glisse /* vramlimit must be a power of two */ 60436421338SJerome Glisse switch (radeon_vram_limit) { 60536421338SJerome Glisse case 0: 60636421338SJerome Glisse case 4: 60736421338SJerome Glisse case 8: 60836421338SJerome Glisse case 16: 60936421338SJerome Glisse case 32: 61036421338SJerome Glisse case 64: 61136421338SJerome Glisse case 128: 61236421338SJerome Glisse case 256: 61336421338SJerome Glisse case 512: 61436421338SJerome Glisse case 1024: 61536421338SJerome Glisse case 2048: 61636421338SJerome Glisse case 4096: 61736421338SJerome Glisse break; 61836421338SJerome Glisse default: 61936421338SJerome Glisse dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n", 62036421338SJerome Glisse radeon_vram_limit); 62136421338SJerome Glisse radeon_vram_limit = 0; 62236421338SJerome Glisse break; 62336421338SJerome Glisse } 62436421338SJerome Glisse radeon_vram_limit = radeon_vram_limit << 20; 62536421338SJerome Glisse /* gtt size must be power of two and greater or equal to 32M */ 62636421338SJerome Glisse switch (radeon_gart_size) { 62736421338SJerome Glisse case 4: 62836421338SJerome Glisse case 8: 62936421338SJerome Glisse case 16: 63036421338SJerome Glisse dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n", 63136421338SJerome Glisse radeon_gart_size); 63236421338SJerome Glisse radeon_gart_size = 512; 63336421338SJerome Glisse break; 63436421338SJerome Glisse case 32: 63536421338SJerome Glisse case 64: 63636421338SJerome Glisse case 128: 63736421338SJerome Glisse case 256: 63836421338SJerome Glisse case 512: 63936421338SJerome Glisse case 1024: 64036421338SJerome Glisse case 2048: 64136421338SJerome Glisse case 4096: 64236421338SJerome Glisse break; 64336421338SJerome Glisse default: 64436421338SJerome Glisse dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n", 64536421338SJerome Glisse radeon_gart_size); 64636421338SJerome Glisse radeon_gart_size = 512; 64736421338SJerome Glisse break; 64836421338SJerome Glisse } 64936421338SJerome Glisse rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; 65036421338SJerome Glisse /* AGP mode can only be -1, 1, 2, 4, 8 */ 65136421338SJerome Glisse switch (radeon_agpmode) { 65236421338SJerome Glisse case -1: 65336421338SJerome Glisse case 0: 65436421338SJerome Glisse case 1: 65536421338SJerome Glisse case 2: 65636421338SJerome Glisse case 4: 65736421338SJerome Glisse case 8: 65836421338SJerome Glisse break; 65936421338SJerome Glisse default: 66036421338SJerome Glisse dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: " 66136421338SJerome Glisse "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode); 66236421338SJerome Glisse radeon_agpmode = 0; 66336421338SJerome Glisse break; 66436421338SJerome Glisse } 66536421338SJerome Glisse } 66636421338SJerome Glisse 6676a9ee8afSDave Airlie static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) 6686a9ee8afSDave Airlie { 6696a9ee8afSDave Airlie struct drm_device *dev = pci_get_drvdata(pdev); 6706a9ee8afSDave Airlie pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; 6716a9ee8afSDave Airlie if (state == VGA_SWITCHEROO_ON) { 6726a9ee8afSDave Airlie printk(KERN_INFO "radeon: switched on\n"); 6736a9ee8afSDave Airlie /* don't suspend or resume card normally */ 6745bcf719bSDave Airlie dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 6756a9ee8afSDave Airlie radeon_resume_kms(dev); 6765bcf719bSDave Airlie dev->switch_power_state = DRM_SWITCH_POWER_ON; 677fbf81762SDave Airlie drm_kms_helper_poll_enable(dev); 6786a9ee8afSDave Airlie } else { 6796a9ee8afSDave Airlie printk(KERN_INFO "radeon: switched off\n"); 680fbf81762SDave Airlie drm_kms_helper_poll_disable(dev); 6815bcf719bSDave Airlie dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 6826a9ee8afSDave Airlie radeon_suspend_kms(dev, pmm); 6835bcf719bSDave Airlie dev->switch_power_state = DRM_SWITCH_POWER_OFF; 6846a9ee8afSDave Airlie } 6856a9ee8afSDave Airlie } 6866a9ee8afSDave Airlie 6876a9ee8afSDave Airlie static bool radeon_switcheroo_can_switch(struct pci_dev *pdev) 6886a9ee8afSDave Airlie { 6896a9ee8afSDave Airlie struct drm_device *dev = pci_get_drvdata(pdev); 6906a9ee8afSDave Airlie bool can_switch; 6916a9ee8afSDave Airlie 6926a9ee8afSDave Airlie spin_lock(&dev->count_lock); 6936a9ee8afSDave Airlie can_switch = (dev->open_count == 0); 6946a9ee8afSDave Airlie spin_unlock(&dev->count_lock); 6956a9ee8afSDave Airlie return can_switch; 6966a9ee8afSDave Airlie } 6976a9ee8afSDave Airlie 6986a9ee8afSDave Airlie 699771fe6b9SJerome Glisse int radeon_device_init(struct radeon_device *rdev, 700771fe6b9SJerome Glisse struct drm_device *ddev, 701771fe6b9SJerome Glisse struct pci_dev *pdev, 702771fe6b9SJerome Glisse uint32_t flags) 703771fe6b9SJerome Glisse { 704351a52a2SAlex Deucher int r, i; 705ad49f501SDave Airlie int dma_bits; 706771fe6b9SJerome Glisse 707771fe6b9SJerome Glisse rdev->shutdown = false; 7089f022ddfSJerome Glisse rdev->dev = &pdev->dev; 709771fe6b9SJerome Glisse rdev->ddev = ddev; 710771fe6b9SJerome Glisse rdev->pdev = pdev; 711771fe6b9SJerome Glisse rdev->flags = flags; 712771fe6b9SJerome Glisse rdev->family = flags & RADEON_FAMILY_MASK; 713771fe6b9SJerome Glisse rdev->is_atom_bios = false; 714771fe6b9SJerome Glisse rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT; 715771fe6b9SJerome Glisse rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; 716771fe6b9SJerome Glisse rdev->gpu_lockup = false; 717733289c2SJerome Glisse rdev->accel_working = false; 7181b5331d9SJerome Glisse 719d522d9ccSThomas Reim DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n", 720d522d9ccSThomas Reim radeon_family_name[rdev->family], pdev->vendor, pdev->device, 721d522d9ccSThomas Reim pdev->subsystem_vendor, pdev->subsystem_device); 7221b5331d9SJerome Glisse 723771fe6b9SJerome Glisse /* mutex initialization are all done here so we 724771fe6b9SJerome Glisse * can recall function without having locking issues */ 7257a1619b9SMichel Dänzer radeon_mutex_init(&rdev->cs_mutex); 7269fc04b50SJerome Glisse radeon_mutex_init(&rdev->ib_pool.mutex); 727bf852799SChristian König for (i = 0; i < RADEON_NUM_RINGS; ++i) 728e32eb50dSChristian König mutex_init(&rdev->ring[i].mutex); 72940bacf16SAlex Deucher mutex_init(&rdev->dc_hw_i2c_mutex); 730d8f60cfcSAlex Deucher if (rdev->family >= CHIP_R600) 731d8f60cfcSAlex Deucher spin_lock_init(&rdev->ih.lock); 7324c788679SJerome Glisse mutex_init(&rdev->gem.mutex); 733c913e23aSRafał Miłecki mutex_init(&rdev->pm.mutex); 7345876dd24SMatthew Garrett mutex_init(&rdev->vram_mutex); 7357465280cSAlex Deucher rwlock_init(&rdev->fence_lock); 73615d3332fSChristian König rwlock_init(&rdev->semaphore_drv.lock); 7379f022ddfSJerome Glisse INIT_LIST_HEAD(&rdev->gem.objects); 73873a6d3fcSRafał Miłecki init_waitqueue_head(&rdev->irq.vblank_queue); 7392031f77cSAlex Deucher init_waitqueue_head(&rdev->irq.idle_queue); 740c1341e52SJerome Glisse INIT_LIST_HEAD(&rdev->semaphore_drv.bo); 741721604a1SJerome Glisse /* initialize vm here */ 742721604a1SJerome Glisse rdev->vm_manager.use_bitmap = 1; 743721604a1SJerome Glisse rdev->vm_manager.max_pfn = 1 << 20; 744721604a1SJerome Glisse INIT_LIST_HEAD(&rdev->vm_manager.lru_vm); 745771fe6b9SJerome Glisse 7464aac0473SJerome Glisse /* Set asic functions */ 7474aac0473SJerome Glisse r = radeon_asic_init(rdev); 74836421338SJerome Glisse if (r) 7494aac0473SJerome Glisse return r; 75036421338SJerome Glisse radeon_check_arguments(rdev); 7514aac0473SJerome Glisse 752f95df9caSAlex Deucher /* all of the newer IGP chips have an internal gart 753f95df9caSAlex Deucher * However some rs4xx report as AGP, so remove that here. 754f95df9caSAlex Deucher */ 755f95df9caSAlex Deucher if ((rdev->family >= CHIP_RS400) && 756f95df9caSAlex Deucher (rdev->flags & RADEON_IS_IGP)) { 757f95df9caSAlex Deucher rdev->flags &= ~RADEON_IS_AGP; 758f95df9caSAlex Deucher } 759f95df9caSAlex Deucher 76030256a3fSJerome Glisse if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) { 761b574f251SJerome Glisse radeon_agp_disable(rdev); 762771fe6b9SJerome Glisse } 763771fe6b9SJerome Glisse 764ad49f501SDave Airlie /* set DMA mask + need_dma32 flags. 765ad49f501SDave Airlie * PCIE - can handle 40-bits. 766005a83f1SAlex Deucher * IGP - can handle 40-bits 767ad49f501SDave Airlie * AGP - generally dma32 is safest 768005a83f1SAlex Deucher * PCI - dma32 for legacy pci gart, 40 bits on newer asics 769ad49f501SDave Airlie */ 770ad49f501SDave Airlie rdev->need_dma32 = false; 771ad49f501SDave Airlie if (rdev->flags & RADEON_IS_AGP) 772ad49f501SDave Airlie rdev->need_dma32 = true; 773005a83f1SAlex Deucher if ((rdev->flags & RADEON_IS_PCI) && 774005a83f1SAlex Deucher (rdev->family < CHIP_RS400)) 775ad49f501SDave Airlie rdev->need_dma32 = true; 776ad49f501SDave Airlie 777ad49f501SDave Airlie dma_bits = rdev->need_dma32 ? 32 : 40; 778ad49f501SDave Airlie r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits)); 779771fe6b9SJerome Glisse if (r) { 78062fff811SDaniel Haid rdev->need_dma32 = true; 781c52494f6SKonrad Rzeszutek Wilk dma_bits = 32; 782771fe6b9SJerome Glisse printk(KERN_WARNING "radeon: No suitable DMA available.\n"); 783771fe6b9SJerome Glisse } 784c52494f6SKonrad Rzeszutek Wilk r = pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits)); 785c52494f6SKonrad Rzeszutek Wilk if (r) { 786c52494f6SKonrad Rzeszutek Wilk pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32)); 787c52494f6SKonrad Rzeszutek Wilk printk(KERN_WARNING "radeon: No coherent DMA available.\n"); 788c52494f6SKonrad Rzeszutek Wilk } 789771fe6b9SJerome Glisse 790771fe6b9SJerome Glisse /* Registers mapping */ 791771fe6b9SJerome Glisse /* TODO: block userspace mapping of io register */ 79201d73a69SJordan Crouse rdev->rmmio_base = pci_resource_start(rdev->pdev, 2); 79301d73a69SJordan Crouse rdev->rmmio_size = pci_resource_len(rdev->pdev, 2); 794771fe6b9SJerome Glisse rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size); 795771fe6b9SJerome Glisse if (rdev->rmmio == NULL) { 796771fe6b9SJerome Glisse return -ENOMEM; 797771fe6b9SJerome Glisse } 798771fe6b9SJerome Glisse DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base); 799771fe6b9SJerome Glisse DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size); 800771fe6b9SJerome Glisse 801351a52a2SAlex Deucher /* io port mapping */ 802351a52a2SAlex Deucher for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 803351a52a2SAlex Deucher if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) { 804351a52a2SAlex Deucher rdev->rio_mem_size = pci_resource_len(rdev->pdev, i); 805351a52a2SAlex Deucher rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size); 806351a52a2SAlex Deucher break; 807351a52a2SAlex Deucher } 808351a52a2SAlex Deucher } 809351a52a2SAlex Deucher if (rdev->rio_mem == NULL) 810351a52a2SAlex Deucher DRM_ERROR("Unable to find PCI I/O BAR\n"); 811351a52a2SAlex Deucher 81228d52043SDave Airlie /* if we have > 1 VGA cards, then disable the radeon VGA resources */ 81393239ea1SDave Airlie /* this will fail for cards that aren't VGA class devices, just 81493239ea1SDave Airlie * ignore it */ 81593239ea1SDave Airlie vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode); 8166a9ee8afSDave Airlie vga_switcheroo_register_client(rdev->pdev, 8176a9ee8afSDave Airlie radeon_switcheroo_set_state, 8188d608aa6SDave Airlie NULL, 8196a9ee8afSDave Airlie radeon_switcheroo_can_switch); 82028d52043SDave Airlie 8213ce0a23dSJerome Glisse r = radeon_init(rdev); 822b574f251SJerome Glisse if (r) 823b574f251SJerome Glisse return r; 824b1e3a6d1SMichel Dänzer 825b574f251SJerome Glisse if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) { 826b574f251SJerome Glisse /* Acceleration not working on AGP card try again 827b574f251SJerome Glisse * with fallback to PCI or PCIE GART 828b574f251SJerome Glisse */ 829a2d07b74SJerome Glisse radeon_asic_reset(rdev); 830b574f251SJerome Glisse radeon_fini(rdev); 831b574f251SJerome Glisse radeon_agp_disable(rdev); 832b574f251SJerome Glisse r = radeon_init(rdev); 8334aac0473SJerome Glisse if (r) 8344aac0473SJerome Glisse return r; 8353ce0a23dSJerome Glisse } 83660a7e396SChristian König if ((radeon_testing & 1)) { 837ecc0b326SMichel Dänzer radeon_test_moves(rdev); 838ecc0b326SMichel Dänzer } 83960a7e396SChristian König if ((radeon_testing & 2)) { 84060a7e396SChristian König radeon_test_syncing(rdev); 84160a7e396SChristian König } 842771fe6b9SJerome Glisse if (radeon_benchmarking) { 843638dd7dbSIlija Hadzic radeon_benchmark(rdev, radeon_benchmarking); 844771fe6b9SJerome Glisse } 8456cf8a3f5SJerome Glisse return 0; 846771fe6b9SJerome Glisse } 847771fe6b9SJerome Glisse 8484d8bf9aeSChristian König static void radeon_debugfs_remove_files(struct radeon_device *rdev); 8494d8bf9aeSChristian König 850771fe6b9SJerome Glisse void radeon_device_fini(struct radeon_device *rdev) 851771fe6b9SJerome Glisse { 852771fe6b9SJerome Glisse DRM_INFO("radeon: finishing device.\n"); 853771fe6b9SJerome Glisse rdev->shutdown = true; 85490aca4d2SJerome Glisse /* evict vram memory */ 85590aca4d2SJerome Glisse radeon_bo_evict_vram(rdev); 8563ce0a23dSJerome Glisse radeon_fini(rdev); 8576a9ee8afSDave Airlie vga_switcheroo_unregister_client(rdev->pdev); 858c1176d6fSDave Airlie vga_client_register(rdev->pdev, NULL, NULL, NULL); 859e0a2ca73SAlex Deucher if (rdev->rio_mem) 860351a52a2SAlex Deucher pci_iounmap(rdev->pdev, rdev->rio_mem); 861351a52a2SAlex Deucher rdev->rio_mem = NULL; 862771fe6b9SJerome Glisse iounmap(rdev->rmmio); 863771fe6b9SJerome Glisse rdev->rmmio = NULL; 8644d8bf9aeSChristian König radeon_debugfs_remove_files(rdev); 865771fe6b9SJerome Glisse } 866771fe6b9SJerome Glisse 867771fe6b9SJerome Glisse 868771fe6b9SJerome Glisse /* 869771fe6b9SJerome Glisse * Suspend & resume. 870771fe6b9SJerome Glisse */ 871771fe6b9SJerome Glisse int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) 872771fe6b9SJerome Glisse { 873875c1866SDarren Jenkins struct radeon_device *rdev; 874771fe6b9SJerome Glisse struct drm_crtc *crtc; 875d8dcaa1dSAlex Deucher struct drm_connector *connector; 8767465280cSAlex Deucher int i, r; 877771fe6b9SJerome Glisse 878875c1866SDarren Jenkins if (dev == NULL || dev->dev_private == NULL) { 879771fe6b9SJerome Glisse return -ENODEV; 880771fe6b9SJerome Glisse } 881771fe6b9SJerome Glisse if (state.event == PM_EVENT_PRETHAW) { 882771fe6b9SJerome Glisse return 0; 883771fe6b9SJerome Glisse } 884875c1866SDarren Jenkins rdev = dev->dev_private; 885875c1866SDarren Jenkins 8865bcf719bSDave Airlie if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 8876a9ee8afSDave Airlie return 0; 888d8dcaa1dSAlex Deucher 88986698c20SSeth Forshee drm_kms_helper_poll_disable(dev); 89086698c20SSeth Forshee 891d8dcaa1dSAlex Deucher /* turn off display hw */ 892d8dcaa1dSAlex Deucher list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 893d8dcaa1dSAlex Deucher drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); 894d8dcaa1dSAlex Deucher } 895d8dcaa1dSAlex Deucher 896771fe6b9SJerome Glisse /* unpin the front buffers */ 897771fe6b9SJerome Glisse list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 898771fe6b9SJerome Glisse struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb); 8994c788679SJerome Glisse struct radeon_bo *robj; 900771fe6b9SJerome Glisse 901771fe6b9SJerome Glisse if (rfb == NULL || rfb->obj == NULL) { 902771fe6b9SJerome Glisse continue; 903771fe6b9SJerome Glisse } 9047e4d15d9SDaniel Vetter robj = gem_to_radeon_bo(rfb->obj); 90538651674SDave Airlie /* don't unpin kernel fb objects */ 90638651674SDave Airlie if (!radeon_fbdev_robj_is_fb(rdev, robj)) { 9074c788679SJerome Glisse r = radeon_bo_reserve(robj, false); 90838651674SDave Airlie if (r == 0) { 9094c788679SJerome Glisse radeon_bo_unpin(robj); 9104c788679SJerome Glisse radeon_bo_unreserve(robj); 9114c788679SJerome Glisse } 912771fe6b9SJerome Glisse } 913771fe6b9SJerome Glisse } 914771fe6b9SJerome Glisse /* evict vram memory */ 9154c788679SJerome Glisse radeon_bo_evict_vram(rdev); 916771fe6b9SJerome Glisse /* wait for gpu to finish processing current batch */ 9177465280cSAlex Deucher for (i = 0; i < RADEON_NUM_RINGS; i++) 9187465280cSAlex Deucher radeon_fence_wait_last(rdev, i); 919771fe6b9SJerome Glisse 920f657c2a7SYang Zhao radeon_save_bios_scratch_regs(rdev); 921f657c2a7SYang Zhao 922ce8f5370SAlex Deucher radeon_pm_suspend(rdev); 9233ce0a23dSJerome Glisse radeon_suspend(rdev); 924d4877cf2SAlex Deucher radeon_hpd_fini(rdev); 925771fe6b9SJerome Glisse /* evict remaining vram memory */ 9264c788679SJerome Glisse radeon_bo_evict_vram(rdev); 927771fe6b9SJerome Glisse 92810b06122SJerome Glisse radeon_agp_suspend(rdev); 92910b06122SJerome Glisse 930771fe6b9SJerome Glisse pci_save_state(dev->pdev); 931771fe6b9SJerome Glisse if (state.event == PM_EVENT_SUSPEND) { 932771fe6b9SJerome Glisse /* Shut down the device */ 933771fe6b9SJerome Glisse pci_disable_device(dev->pdev); 934771fe6b9SJerome Glisse pci_set_power_state(dev->pdev, PCI_D3hot); 935771fe6b9SJerome Glisse } 936ac751efaSTorben Hohn console_lock(); 93738651674SDave Airlie radeon_fbdev_set_suspend(rdev, 1); 938ac751efaSTorben Hohn console_unlock(); 939771fe6b9SJerome Glisse return 0; 940771fe6b9SJerome Glisse } 941771fe6b9SJerome Glisse 942771fe6b9SJerome Glisse int radeon_resume_kms(struct drm_device *dev) 943771fe6b9SJerome Glisse { 94409bdf591SCedric Godin struct drm_connector *connector; 945771fe6b9SJerome Glisse struct radeon_device *rdev = dev->dev_private; 946771fe6b9SJerome Glisse 9475bcf719bSDave Airlie if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 9486a9ee8afSDave Airlie return 0; 9496a9ee8afSDave Airlie 950ac751efaSTorben Hohn console_lock(); 951771fe6b9SJerome Glisse pci_set_power_state(dev->pdev, PCI_D0); 952771fe6b9SJerome Glisse pci_restore_state(dev->pdev); 953771fe6b9SJerome Glisse if (pci_enable_device(dev->pdev)) { 954ac751efaSTorben Hohn console_unlock(); 955771fe6b9SJerome Glisse return -1; 956771fe6b9SJerome Glisse } 957771fe6b9SJerome Glisse pci_set_master(dev->pdev); 9580ebf1717SDave Airlie /* resume AGP if in use */ 9590ebf1717SDave Airlie radeon_agp_resume(rdev); 9603ce0a23dSJerome Glisse radeon_resume(rdev); 961ce8f5370SAlex Deucher radeon_pm_resume(rdev); 962f657c2a7SYang Zhao radeon_restore_bios_scratch_regs(rdev); 96309bdf591SCedric Godin 96438651674SDave Airlie radeon_fbdev_set_suspend(rdev, 0); 965ac751efaSTorben Hohn console_unlock(); 966771fe6b9SJerome Glisse 9673fa47d9eSAlex Deucher /* init dig PHYs, disp eng pll */ 9683fa47d9eSAlex Deucher if (rdev->is_atom_bios) { 969ac89af1eSAlex Deucher radeon_atom_encoder_init(rdev); 970*f3f1f03eSAlex Deucher radeon_atom_disp_eng_pll_init(rdev); 9713fa47d9eSAlex Deucher } 972d4877cf2SAlex Deucher /* reset hpd state */ 973d4877cf2SAlex Deucher radeon_hpd_init(rdev); 974771fe6b9SJerome Glisse /* blat the mode back in */ 975771fe6b9SJerome Glisse drm_helper_resume_force_mode(dev); 976a93f344dSAlex Deucher /* turn on display hw */ 977a93f344dSAlex Deucher list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 978a93f344dSAlex Deucher drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); 979a93f344dSAlex Deucher } 98086698c20SSeth Forshee 98186698c20SSeth Forshee drm_kms_helper_poll_enable(dev); 982771fe6b9SJerome Glisse return 0; 983771fe6b9SJerome Glisse } 984771fe6b9SJerome Glisse 98590aca4d2SJerome Glisse int radeon_gpu_reset(struct radeon_device *rdev) 98690aca4d2SJerome Glisse { 98790aca4d2SJerome Glisse int r; 9888fd1b84cSDave Airlie int resched; 98990aca4d2SJerome Glisse 9907a1619b9SMichel Dänzer /* Prevent CS ioctl from interfering */ 9917a1619b9SMichel Dänzer radeon_mutex_lock(&rdev->cs_mutex); 9927a1619b9SMichel Dänzer 99390aca4d2SJerome Glisse radeon_save_bios_scratch_regs(rdev); 9948fd1b84cSDave Airlie /* block TTM */ 9958fd1b84cSDave Airlie resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); 99690aca4d2SJerome Glisse radeon_suspend(rdev); 99790aca4d2SJerome Glisse 99890aca4d2SJerome Glisse r = radeon_asic_reset(rdev); 99990aca4d2SJerome Glisse if (!r) { 100090aca4d2SJerome Glisse dev_info(rdev->dev, "GPU reset succeed\n"); 100190aca4d2SJerome Glisse radeon_resume(rdev); 100290aca4d2SJerome Glisse radeon_restore_bios_scratch_regs(rdev); 100390aca4d2SJerome Glisse drm_helper_resume_force_mode(rdev->ddev); 10048fd1b84cSDave Airlie ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); 100590aca4d2SJerome Glisse } 10067a1619b9SMichel Dänzer 10077a1619b9SMichel Dänzer radeon_mutex_unlock(&rdev->cs_mutex); 10087a1619b9SMichel Dänzer 10097a1619b9SMichel Dänzer if (r) { 101090aca4d2SJerome Glisse /* bad news, how to tell it to userspace ? */ 101190aca4d2SJerome Glisse dev_info(rdev->dev, "GPU reset failed\n"); 10127a1619b9SMichel Dänzer } 10137a1619b9SMichel Dänzer 101490aca4d2SJerome Glisse return r; 101590aca4d2SJerome Glisse } 101690aca4d2SJerome Glisse 1017771fe6b9SJerome Glisse 1018771fe6b9SJerome Glisse /* 1019771fe6b9SJerome Glisse * Debugfs 1020771fe6b9SJerome Glisse */ 1021771fe6b9SJerome Glisse int radeon_debugfs_add_files(struct radeon_device *rdev, 1022771fe6b9SJerome Glisse struct drm_info_list *files, 1023771fe6b9SJerome Glisse unsigned nfiles) 1024771fe6b9SJerome Glisse { 1025771fe6b9SJerome Glisse unsigned i; 1026771fe6b9SJerome Glisse 10274d8bf9aeSChristian König for (i = 0; i < rdev->debugfs_count; i++) { 10284d8bf9aeSChristian König if (rdev->debugfs[i].files == files) { 1029771fe6b9SJerome Glisse /* Already registered */ 1030771fe6b9SJerome Glisse return 0; 1031771fe6b9SJerome Glisse } 1032771fe6b9SJerome Glisse } 1033c245cb9eSMichael Witten 10344d8bf9aeSChristian König i = rdev->debugfs_count + 1; 1035c245cb9eSMichael Witten if (i > RADEON_DEBUGFS_MAX_COMPONENTS) { 1036c245cb9eSMichael Witten DRM_ERROR("Reached maximum number of debugfs components.\n"); 1037c245cb9eSMichael Witten DRM_ERROR("Report so we increase " 1038c245cb9eSMichael Witten "RADEON_DEBUGFS_MAX_COMPONENTS.\n"); 1039771fe6b9SJerome Glisse return -EINVAL; 1040771fe6b9SJerome Glisse } 10414d8bf9aeSChristian König rdev->debugfs[rdev->debugfs_count].files = files; 10424d8bf9aeSChristian König rdev->debugfs[rdev->debugfs_count].num_files = nfiles; 10434d8bf9aeSChristian König rdev->debugfs_count = i; 1044771fe6b9SJerome Glisse #if defined(CONFIG_DEBUG_FS) 1045771fe6b9SJerome Glisse drm_debugfs_create_files(files, nfiles, 1046771fe6b9SJerome Glisse rdev->ddev->control->debugfs_root, 1047771fe6b9SJerome Glisse rdev->ddev->control); 1048771fe6b9SJerome Glisse drm_debugfs_create_files(files, nfiles, 1049771fe6b9SJerome Glisse rdev->ddev->primary->debugfs_root, 1050771fe6b9SJerome Glisse rdev->ddev->primary); 1051771fe6b9SJerome Glisse #endif 1052771fe6b9SJerome Glisse return 0; 1053771fe6b9SJerome Glisse } 1054771fe6b9SJerome Glisse 10554d8bf9aeSChristian König static void radeon_debugfs_remove_files(struct radeon_device *rdev) 10564d8bf9aeSChristian König { 10574d8bf9aeSChristian König #if defined(CONFIG_DEBUG_FS) 10584d8bf9aeSChristian König unsigned i; 10594d8bf9aeSChristian König 10604d8bf9aeSChristian König for (i = 0; i < rdev->debugfs_count; i++) { 10614d8bf9aeSChristian König drm_debugfs_remove_files(rdev->debugfs[i].files, 10624d8bf9aeSChristian König rdev->debugfs[i].num_files, 10634d8bf9aeSChristian König rdev->ddev->control); 10644d8bf9aeSChristian König drm_debugfs_remove_files(rdev->debugfs[i].files, 10654d8bf9aeSChristian König rdev->debugfs[i].num_files, 10664d8bf9aeSChristian König rdev->ddev->primary); 10674d8bf9aeSChristian König } 10684d8bf9aeSChristian König #endif 10694d8bf9aeSChristian König } 10704d8bf9aeSChristian König 1071771fe6b9SJerome Glisse #if defined(CONFIG_DEBUG_FS) 1072771fe6b9SJerome Glisse int radeon_debugfs_init(struct drm_minor *minor) 1073771fe6b9SJerome Glisse { 1074771fe6b9SJerome Glisse return 0; 1075771fe6b9SJerome Glisse } 1076771fe6b9SJerome Glisse 1077771fe6b9SJerome Glisse void radeon_debugfs_cleanup(struct drm_minor *minor) 1078771fe6b9SJerome Glisse { 1079771fe6b9SJerome Glisse } 1080771fe6b9SJerome Glisse #endif 1081