1771fe6b9SJerome Glisse /* 2771fe6b9SJerome Glisse * Copyright 2008 Advanced Micro Devices, Inc. 3771fe6b9SJerome Glisse * Copyright 2008 Red Hat Inc. 4771fe6b9SJerome Glisse * Copyright 2009 Jerome Glisse. 5771fe6b9SJerome Glisse * 6771fe6b9SJerome Glisse * Permission is hereby granted, free of charge, to any person obtaining a 7771fe6b9SJerome Glisse * copy of this software and associated documentation files (the "Software"), 8771fe6b9SJerome Glisse * to deal in the Software without restriction, including without limitation 9771fe6b9SJerome Glisse * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10771fe6b9SJerome Glisse * and/or sell copies of the Software, and to permit persons to whom the 11771fe6b9SJerome Glisse * Software is furnished to do so, subject to the following conditions: 12771fe6b9SJerome Glisse * 13771fe6b9SJerome Glisse * The above copyright notice and this permission notice shall be included in 14771fe6b9SJerome Glisse * all copies or substantial portions of the Software. 15771fe6b9SJerome Glisse * 16771fe6b9SJerome Glisse * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17771fe6b9SJerome Glisse * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18771fe6b9SJerome Glisse * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19771fe6b9SJerome Glisse * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20771fe6b9SJerome Glisse * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21771fe6b9SJerome Glisse * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22771fe6b9SJerome Glisse * OTHER DEALINGS IN THE SOFTWARE. 23771fe6b9SJerome Glisse * 24771fe6b9SJerome Glisse * Authors: Dave Airlie 25771fe6b9SJerome Glisse * Alex Deucher 26771fe6b9SJerome Glisse * Jerome Glisse 27771fe6b9SJerome Glisse */ 28771fe6b9SJerome Glisse #include <linux/console.h> 295a0e3ad6STejun Heo #include <linux/slab.h> 30771fe6b9SJerome Glisse #include <drm/drmP.h> 31771fe6b9SJerome Glisse #include <drm/drm_crtc_helper.h> 32771fe6b9SJerome Glisse #include <drm/radeon_drm.h> 3328d52043SDave Airlie #include <linux/vgaarb.h> 346a9ee8afSDave Airlie #include <linux/vga_switcheroo.h> 35771fe6b9SJerome Glisse #include "radeon_reg.h" 36771fe6b9SJerome Glisse #include "radeon.h" 37771fe6b9SJerome Glisse #include "atom.h" 38771fe6b9SJerome Glisse 391b5331d9SJerome Glisse static const char radeon_family_name[][16] = { 401b5331d9SJerome Glisse "R100", 411b5331d9SJerome Glisse "RV100", 421b5331d9SJerome Glisse "RS100", 431b5331d9SJerome Glisse "RV200", 441b5331d9SJerome Glisse "RS200", 451b5331d9SJerome Glisse "R200", 461b5331d9SJerome Glisse "RV250", 471b5331d9SJerome Glisse "RS300", 481b5331d9SJerome Glisse "RV280", 491b5331d9SJerome Glisse "R300", 501b5331d9SJerome Glisse "R350", 511b5331d9SJerome Glisse "RV350", 521b5331d9SJerome Glisse "RV380", 531b5331d9SJerome Glisse "R420", 541b5331d9SJerome Glisse "R423", 551b5331d9SJerome Glisse "RV410", 561b5331d9SJerome Glisse "RS400", 571b5331d9SJerome Glisse "RS480", 581b5331d9SJerome Glisse "RS600", 591b5331d9SJerome Glisse "RS690", 601b5331d9SJerome Glisse "RS740", 611b5331d9SJerome Glisse "RV515", 621b5331d9SJerome Glisse "R520", 631b5331d9SJerome Glisse "RV530", 641b5331d9SJerome Glisse "RV560", 651b5331d9SJerome Glisse "RV570", 661b5331d9SJerome Glisse "R580", 671b5331d9SJerome Glisse "R600", 681b5331d9SJerome Glisse "RV610", 691b5331d9SJerome Glisse "RV630", 701b5331d9SJerome Glisse "RV670", 711b5331d9SJerome Glisse "RV620", 721b5331d9SJerome Glisse "RV635", 731b5331d9SJerome Glisse "RS780", 741b5331d9SJerome Glisse "RS880", 751b5331d9SJerome Glisse "RV770", 761b5331d9SJerome Glisse "RV730", 771b5331d9SJerome Glisse "RV710", 781b5331d9SJerome Glisse "RV740", 791b5331d9SJerome Glisse "CEDAR", 801b5331d9SJerome Glisse "REDWOOD", 811b5331d9SJerome Glisse "JUNIPER", 821b5331d9SJerome Glisse "CYPRESS", 831b5331d9SJerome Glisse "HEMLOCK", 841b5331d9SJerome Glisse "LAST", 851b5331d9SJerome Glisse }; 861b5331d9SJerome Glisse 87771fe6b9SJerome Glisse /* 88b1e3a6d1SMichel Dänzer * Clear GPU surface registers. 89b1e3a6d1SMichel Dänzer */ 903ce0a23dSJerome Glisse void radeon_surface_init(struct radeon_device *rdev) 91b1e3a6d1SMichel Dänzer { 92b1e3a6d1SMichel Dänzer /* FIXME: check this out */ 93b1e3a6d1SMichel Dänzer if (rdev->family < CHIP_R600) { 94b1e3a6d1SMichel Dänzer int i; 95b1e3a6d1SMichel Dänzer 96550e2d92SDave Airlie for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) { 97550e2d92SDave Airlie if (rdev->surface_regs[i].bo) 98550e2d92SDave Airlie radeon_bo_get_surface_reg(rdev->surface_regs[i].bo); 99550e2d92SDave Airlie else 100550e2d92SDave Airlie radeon_clear_surface_reg(rdev, i); 101b1e3a6d1SMichel Dänzer } 102e024e110SDave Airlie /* enable surfaces */ 103e024e110SDave Airlie WREG32(RADEON_SURFACE_CNTL, 0); 104b1e3a6d1SMichel Dänzer } 105b1e3a6d1SMichel Dänzer } 106b1e3a6d1SMichel Dänzer 107b1e3a6d1SMichel Dänzer /* 108771fe6b9SJerome Glisse * GPU scratch registers helpers function. 109771fe6b9SJerome Glisse */ 1103ce0a23dSJerome Glisse void radeon_scratch_init(struct radeon_device *rdev) 111771fe6b9SJerome Glisse { 112771fe6b9SJerome Glisse int i; 113771fe6b9SJerome Glisse 114771fe6b9SJerome Glisse /* FIXME: check this out */ 115771fe6b9SJerome Glisse if (rdev->family < CHIP_R300) { 116771fe6b9SJerome Glisse rdev->scratch.num_reg = 5; 117771fe6b9SJerome Glisse } else { 118771fe6b9SJerome Glisse rdev->scratch.num_reg = 7; 119771fe6b9SJerome Glisse } 120724c80e1SAlex Deucher rdev->scratch.reg_base = RADEON_SCRATCH_REG0; 121771fe6b9SJerome Glisse for (i = 0; i < rdev->scratch.num_reg; i++) { 122771fe6b9SJerome Glisse rdev->scratch.free[i] = true; 123724c80e1SAlex Deucher rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4); 124771fe6b9SJerome Glisse } 125771fe6b9SJerome Glisse } 126771fe6b9SJerome Glisse 127771fe6b9SJerome Glisse int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg) 128771fe6b9SJerome Glisse { 129771fe6b9SJerome Glisse int i; 130771fe6b9SJerome Glisse 131771fe6b9SJerome Glisse for (i = 0; i < rdev->scratch.num_reg; i++) { 132771fe6b9SJerome Glisse if (rdev->scratch.free[i]) { 133771fe6b9SJerome Glisse rdev->scratch.free[i] = false; 134771fe6b9SJerome Glisse *reg = rdev->scratch.reg[i]; 135771fe6b9SJerome Glisse return 0; 136771fe6b9SJerome Glisse } 137771fe6b9SJerome Glisse } 138771fe6b9SJerome Glisse return -EINVAL; 139771fe6b9SJerome Glisse } 140771fe6b9SJerome Glisse 141771fe6b9SJerome Glisse void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg) 142771fe6b9SJerome Glisse { 143771fe6b9SJerome Glisse int i; 144771fe6b9SJerome Glisse 145771fe6b9SJerome Glisse for (i = 0; i < rdev->scratch.num_reg; i++) { 146771fe6b9SJerome Glisse if (rdev->scratch.reg[i] == reg) { 147771fe6b9SJerome Glisse rdev->scratch.free[i] = true; 148771fe6b9SJerome Glisse return; 149771fe6b9SJerome Glisse } 150771fe6b9SJerome Glisse } 151771fe6b9SJerome Glisse } 152771fe6b9SJerome Glisse 153724c80e1SAlex Deucher void radeon_wb_disable(struct radeon_device *rdev) 154724c80e1SAlex Deucher { 155724c80e1SAlex Deucher int r; 156724c80e1SAlex Deucher 157724c80e1SAlex Deucher if (rdev->wb.wb_obj) { 158724c80e1SAlex Deucher r = radeon_bo_reserve(rdev->wb.wb_obj, false); 159724c80e1SAlex Deucher if (unlikely(r != 0)) 160724c80e1SAlex Deucher return; 161724c80e1SAlex Deucher radeon_bo_kunmap(rdev->wb.wb_obj); 162724c80e1SAlex Deucher radeon_bo_unpin(rdev->wb.wb_obj); 163724c80e1SAlex Deucher radeon_bo_unreserve(rdev->wb.wb_obj); 164724c80e1SAlex Deucher } 165724c80e1SAlex Deucher rdev->wb.enabled = false; 166724c80e1SAlex Deucher } 167724c80e1SAlex Deucher 168724c80e1SAlex Deucher void radeon_wb_fini(struct radeon_device *rdev) 169724c80e1SAlex Deucher { 170724c80e1SAlex Deucher radeon_wb_disable(rdev); 171724c80e1SAlex Deucher if (rdev->wb.wb_obj) { 172724c80e1SAlex Deucher radeon_bo_unref(&rdev->wb.wb_obj); 173724c80e1SAlex Deucher rdev->wb.wb = NULL; 174724c80e1SAlex Deucher rdev->wb.wb_obj = NULL; 175724c80e1SAlex Deucher } 176724c80e1SAlex Deucher } 177724c80e1SAlex Deucher 178724c80e1SAlex Deucher int radeon_wb_init(struct radeon_device *rdev) 179724c80e1SAlex Deucher { 180724c80e1SAlex Deucher int r; 181724c80e1SAlex Deucher 182724c80e1SAlex Deucher if (rdev->wb.wb_obj == NULL) { 183*268b2510SAlex Deucher r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true, 184724c80e1SAlex Deucher RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj); 185724c80e1SAlex Deucher if (r) { 186724c80e1SAlex Deucher dev_warn(rdev->dev, "(%d) create WB bo failed\n", r); 187724c80e1SAlex Deucher return r; 188724c80e1SAlex Deucher } 189724c80e1SAlex Deucher } 190724c80e1SAlex Deucher r = radeon_bo_reserve(rdev->wb.wb_obj, false); 191724c80e1SAlex Deucher if (unlikely(r != 0)) { 192724c80e1SAlex Deucher radeon_wb_fini(rdev); 193724c80e1SAlex Deucher return r; 194724c80e1SAlex Deucher } 195724c80e1SAlex Deucher r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, 196724c80e1SAlex Deucher &rdev->wb.gpu_addr); 197724c80e1SAlex Deucher if (r) { 198724c80e1SAlex Deucher radeon_bo_unreserve(rdev->wb.wb_obj); 199724c80e1SAlex Deucher dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r); 200724c80e1SAlex Deucher radeon_wb_fini(rdev); 201724c80e1SAlex Deucher return r; 202724c80e1SAlex Deucher } 203724c80e1SAlex Deucher r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); 204724c80e1SAlex Deucher radeon_bo_unreserve(rdev->wb.wb_obj); 205724c80e1SAlex Deucher if (r) { 206724c80e1SAlex Deucher dev_warn(rdev->dev, "(%d) map WB bo failed\n", r); 207724c80e1SAlex Deucher radeon_wb_fini(rdev); 208724c80e1SAlex Deucher return r; 209724c80e1SAlex Deucher } 210724c80e1SAlex Deucher 211d0f8a854SAlex Deucher /* disable event_write fences */ 212d0f8a854SAlex Deucher rdev->wb.use_event = false; 213724c80e1SAlex Deucher /* disabled via module param */ 214724c80e1SAlex Deucher if (radeon_no_wb == 1) 215724c80e1SAlex Deucher rdev->wb.enabled = false; 216724c80e1SAlex Deucher else { 217724c80e1SAlex Deucher /* often unreliable on AGP */ 218724c80e1SAlex Deucher if (rdev->flags & RADEON_IS_AGP) { 219724c80e1SAlex Deucher rdev->wb.enabled = false; 220d0f8a854SAlex Deucher } else { 221724c80e1SAlex Deucher rdev->wb.enabled = true; 222d0f8a854SAlex Deucher /* event_write fences are only available on r600+ */ 223d0f8a854SAlex Deucher if (rdev->family >= CHIP_R600) 224d0f8a854SAlex Deucher rdev->wb.use_event = true; 225d0f8a854SAlex Deucher } 226724c80e1SAlex Deucher } 227724c80e1SAlex Deucher 228724c80e1SAlex Deucher dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis"); 229724c80e1SAlex Deucher 230724c80e1SAlex Deucher return 0; 231724c80e1SAlex Deucher } 232724c80e1SAlex Deucher 233d594e46aSJerome Glisse /** 234d594e46aSJerome Glisse * radeon_vram_location - try to find VRAM location 235d594e46aSJerome Glisse * @rdev: radeon device structure holding all necessary informations 236d594e46aSJerome Glisse * @mc: memory controller structure holding memory informations 237d594e46aSJerome Glisse * @base: base address at which to put VRAM 238d594e46aSJerome Glisse * 239d594e46aSJerome Glisse * Function will place try to place VRAM at base address provided 240d594e46aSJerome Glisse * as parameter (which is so far either PCI aperture address or 241d594e46aSJerome Glisse * for IGP TOM base address). 242d594e46aSJerome Glisse * 243d594e46aSJerome Glisse * If there is not enough space to fit the unvisible VRAM in the 32bits 244d594e46aSJerome Glisse * address space then we limit the VRAM size to the aperture. 245d594e46aSJerome Glisse * 246d594e46aSJerome Glisse * If we are using AGP and if the AGP aperture doesn't allow us to have 247d594e46aSJerome Glisse * room for all the VRAM than we restrict the VRAM to the PCI aperture 248d594e46aSJerome Glisse * size and print a warning. 249d594e46aSJerome Glisse * 250d594e46aSJerome Glisse * This function will never fails, worst case are limiting VRAM. 251d594e46aSJerome Glisse * 252d594e46aSJerome Glisse * Note: GTT start, end, size should be initialized before calling this 253d594e46aSJerome Glisse * function on AGP platform. 254d594e46aSJerome Glisse * 255d594e46aSJerome Glisse * Note: We don't explictly enforce VRAM start to be aligned on VRAM size, 256d594e46aSJerome Glisse * this shouldn't be a problem as we are using the PCI aperture as a reference. 257d594e46aSJerome Glisse * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but 258d594e46aSJerome Glisse * not IGP. 259d594e46aSJerome Glisse * 260d594e46aSJerome Glisse * Note: we use mc_vram_size as on some board we need to program the mc to 261d594e46aSJerome Glisse * cover the whole aperture even if VRAM size is inferior to aperture size 262d594e46aSJerome Glisse * Novell bug 204882 + along with lots of ubuntu ones 263d594e46aSJerome Glisse * 264d594e46aSJerome Glisse * Note: when limiting vram it's safe to overwritte real_vram_size because 265d594e46aSJerome Glisse * we are not in case where real_vram_size is inferior to mc_vram_size (ie 266d594e46aSJerome Glisse * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu 267d594e46aSJerome Glisse * ones) 268d594e46aSJerome Glisse * 269d594e46aSJerome Glisse * Note: IGP TOM addr should be the same as the aperture addr, we don't 270d594e46aSJerome Glisse * explicitly check for that thought. 271d594e46aSJerome Glisse * 272d594e46aSJerome Glisse * FIXME: when reducing VRAM size align new size on power of 2. 273771fe6b9SJerome Glisse */ 274d594e46aSJerome Glisse void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base) 275771fe6b9SJerome Glisse { 276d594e46aSJerome Glisse mc->vram_start = base; 277d594e46aSJerome Glisse if (mc->mc_vram_size > (0xFFFFFFFF - base + 1)) { 278d594e46aSJerome Glisse dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n"); 279d594e46aSJerome Glisse mc->real_vram_size = mc->aper_size; 280d594e46aSJerome Glisse mc->mc_vram_size = mc->aper_size; 281771fe6b9SJerome Glisse } 282d594e46aSJerome Glisse mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; 2832cbeb4efSJerome Glisse if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) { 284d594e46aSJerome Glisse dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n"); 285d594e46aSJerome Glisse mc->real_vram_size = mc->aper_size; 286d594e46aSJerome Glisse mc->mc_vram_size = mc->aper_size; 287771fe6b9SJerome Glisse } 288d594e46aSJerome Glisse mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; 289d594e46aSJerome Glisse dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n", 290d594e46aSJerome Glisse mc->mc_vram_size >> 20, mc->vram_start, 291d594e46aSJerome Glisse mc->vram_end, mc->real_vram_size >> 20); 292771fe6b9SJerome Glisse } 293771fe6b9SJerome Glisse 294d594e46aSJerome Glisse /** 295d594e46aSJerome Glisse * radeon_gtt_location - try to find GTT location 296d594e46aSJerome Glisse * @rdev: radeon device structure holding all necessary informations 297d594e46aSJerome Glisse * @mc: memory controller structure holding memory informations 298d594e46aSJerome Glisse * 299d594e46aSJerome Glisse * Function will place try to place GTT before or after VRAM. 300d594e46aSJerome Glisse * 301d594e46aSJerome Glisse * If GTT size is bigger than space left then we ajust GTT size. 302d594e46aSJerome Glisse * Thus function will never fails. 303d594e46aSJerome Glisse * 304d594e46aSJerome Glisse * FIXME: when reducing GTT size align new size on power of 2. 305d594e46aSJerome Glisse */ 306d594e46aSJerome Glisse void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) 307d594e46aSJerome Glisse { 308d594e46aSJerome Glisse u64 size_af, size_bf; 309d594e46aSJerome Glisse 3108d369bb1SAlex Deucher size_af = ((0xFFFFFFFF - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align; 3118d369bb1SAlex Deucher size_bf = mc->vram_start & ~mc->gtt_base_align; 312d594e46aSJerome Glisse if (size_bf > size_af) { 313d594e46aSJerome Glisse if (mc->gtt_size > size_bf) { 314d594e46aSJerome Glisse dev_warn(rdev->dev, "limiting GTT\n"); 315d594e46aSJerome Glisse mc->gtt_size = size_bf; 316d594e46aSJerome Glisse } 3178d369bb1SAlex Deucher mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size; 318d594e46aSJerome Glisse } else { 319d594e46aSJerome Glisse if (mc->gtt_size > size_af) { 320d594e46aSJerome Glisse dev_warn(rdev->dev, "limiting GTT\n"); 321d594e46aSJerome Glisse mc->gtt_size = size_af; 322d594e46aSJerome Glisse } 3238d369bb1SAlex Deucher mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align; 324d594e46aSJerome Glisse } 325d594e46aSJerome Glisse mc->gtt_end = mc->gtt_start + mc->gtt_size - 1; 326d594e46aSJerome Glisse dev_info(rdev->dev, "GTT: %lluM 0x%08llX - 0x%08llX\n", 327d594e46aSJerome Glisse mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end); 328d594e46aSJerome Glisse } 329771fe6b9SJerome Glisse 330771fe6b9SJerome Glisse /* 331771fe6b9SJerome Glisse * GPU helpers function. 332771fe6b9SJerome Glisse */ 3339f022ddfSJerome Glisse bool radeon_card_posted(struct radeon_device *rdev) 334771fe6b9SJerome Glisse { 335771fe6b9SJerome Glisse uint32_t reg; 336771fe6b9SJerome Glisse 337771fe6b9SJerome Glisse /* first check CRTCs */ 338bcc1c2a1SAlex Deucher if (ASIC_IS_DCE4(rdev)) { 339bcc1c2a1SAlex Deucher reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | 340bcc1c2a1SAlex Deucher RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) | 341bcc1c2a1SAlex Deucher RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) | 342bcc1c2a1SAlex Deucher RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) | 343bcc1c2a1SAlex Deucher RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) | 344bcc1c2a1SAlex Deucher RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET); 345bcc1c2a1SAlex Deucher if (reg & EVERGREEN_CRTC_MASTER_EN) 346bcc1c2a1SAlex Deucher return true; 347bcc1c2a1SAlex Deucher } else if (ASIC_IS_AVIVO(rdev)) { 348771fe6b9SJerome Glisse reg = RREG32(AVIVO_D1CRTC_CONTROL) | 349771fe6b9SJerome Glisse RREG32(AVIVO_D2CRTC_CONTROL); 350771fe6b9SJerome Glisse if (reg & AVIVO_CRTC_EN) { 351771fe6b9SJerome Glisse return true; 352771fe6b9SJerome Glisse } 353771fe6b9SJerome Glisse } else { 354771fe6b9SJerome Glisse reg = RREG32(RADEON_CRTC_GEN_CNTL) | 355771fe6b9SJerome Glisse RREG32(RADEON_CRTC2_GEN_CNTL); 356771fe6b9SJerome Glisse if (reg & RADEON_CRTC_EN) { 357771fe6b9SJerome Glisse return true; 358771fe6b9SJerome Glisse } 359771fe6b9SJerome Glisse } 360771fe6b9SJerome Glisse 361771fe6b9SJerome Glisse /* then check MEM_SIZE, in case the crtcs are off */ 362771fe6b9SJerome Glisse if (rdev->family >= CHIP_R600) 363771fe6b9SJerome Glisse reg = RREG32(R600_CONFIG_MEMSIZE); 364771fe6b9SJerome Glisse else 365771fe6b9SJerome Glisse reg = RREG32(RADEON_CONFIG_MEMSIZE); 366771fe6b9SJerome Glisse 367771fe6b9SJerome Glisse if (reg) 368771fe6b9SJerome Glisse return true; 369771fe6b9SJerome Glisse 370771fe6b9SJerome Glisse return false; 371771fe6b9SJerome Glisse 372771fe6b9SJerome Glisse } 373771fe6b9SJerome Glisse 374f47299c5SAlex Deucher void radeon_update_bandwidth_info(struct radeon_device *rdev) 375f47299c5SAlex Deucher { 376f47299c5SAlex Deucher fixed20_12 a; 3778807286eSAlex Deucher u32 sclk = rdev->pm.current_sclk; 3788807286eSAlex Deucher u32 mclk = rdev->pm.current_mclk; 379f47299c5SAlex Deucher 3808807286eSAlex Deucher /* sclk/mclk in Mhz */ 38168adac5eSBen Skeggs a.full = dfixed_const(100); 38268adac5eSBen Skeggs rdev->pm.sclk.full = dfixed_const(sclk); 38368adac5eSBen Skeggs rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a); 38468adac5eSBen Skeggs rdev->pm.mclk.full = dfixed_const(mclk); 38568adac5eSBen Skeggs rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a); 386f47299c5SAlex Deucher 3878807286eSAlex Deucher if (rdev->flags & RADEON_IS_IGP) { 38868adac5eSBen Skeggs a.full = dfixed_const(16); 389f47299c5SAlex Deucher /* core_bandwidth = sclk(Mhz) * 16 */ 39068adac5eSBen Skeggs rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a); 391f47299c5SAlex Deucher } 392f47299c5SAlex Deucher } 393f47299c5SAlex Deucher 39472542d77SDave Airlie bool radeon_boot_test_post_card(struct radeon_device *rdev) 39572542d77SDave Airlie { 39672542d77SDave Airlie if (radeon_card_posted(rdev)) 39772542d77SDave Airlie return true; 39872542d77SDave Airlie 39972542d77SDave Airlie if (rdev->bios) { 40072542d77SDave Airlie DRM_INFO("GPU not posted. posting now...\n"); 40172542d77SDave Airlie if (rdev->is_atom_bios) 40272542d77SDave Airlie atom_asic_init(rdev->mode_info.atom_context); 40372542d77SDave Airlie else 40472542d77SDave Airlie radeon_combios_asic_init(rdev->ddev); 40572542d77SDave Airlie return true; 40672542d77SDave Airlie } else { 40772542d77SDave Airlie dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); 40872542d77SDave Airlie return false; 40972542d77SDave Airlie } 41072542d77SDave Airlie } 41172542d77SDave Airlie 4123ce0a23dSJerome Glisse int radeon_dummy_page_init(struct radeon_device *rdev) 4133ce0a23dSJerome Glisse { 41482568565SDave Airlie if (rdev->dummy_page.page) 41582568565SDave Airlie return 0; 4163ce0a23dSJerome Glisse rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO); 4173ce0a23dSJerome Glisse if (rdev->dummy_page.page == NULL) 4183ce0a23dSJerome Glisse return -ENOMEM; 4193ce0a23dSJerome Glisse rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page, 4203ce0a23dSJerome Glisse 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 421a30f6fb7SBenjamin Herrenschmidt if (pci_dma_mapping_error(rdev->pdev, rdev->dummy_page.addr)) { 422a30f6fb7SBenjamin Herrenschmidt dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n"); 4233ce0a23dSJerome Glisse __free_page(rdev->dummy_page.page); 4243ce0a23dSJerome Glisse rdev->dummy_page.page = NULL; 4253ce0a23dSJerome Glisse return -ENOMEM; 4263ce0a23dSJerome Glisse } 4273ce0a23dSJerome Glisse return 0; 4283ce0a23dSJerome Glisse } 4293ce0a23dSJerome Glisse 4303ce0a23dSJerome Glisse void radeon_dummy_page_fini(struct radeon_device *rdev) 4313ce0a23dSJerome Glisse { 4323ce0a23dSJerome Glisse if (rdev->dummy_page.page == NULL) 4333ce0a23dSJerome Glisse return; 4343ce0a23dSJerome Glisse pci_unmap_page(rdev->pdev, rdev->dummy_page.addr, 4353ce0a23dSJerome Glisse PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 4363ce0a23dSJerome Glisse __free_page(rdev->dummy_page.page); 4373ce0a23dSJerome Glisse rdev->dummy_page.page = NULL; 4383ce0a23dSJerome Glisse } 4393ce0a23dSJerome Glisse 440771fe6b9SJerome Glisse 441771fe6b9SJerome Glisse /* ATOM accessor methods */ 442771fe6b9SJerome Glisse static uint32_t cail_pll_read(struct card_info *info, uint32_t reg) 443771fe6b9SJerome Glisse { 444771fe6b9SJerome Glisse struct radeon_device *rdev = info->dev->dev_private; 445771fe6b9SJerome Glisse uint32_t r; 446771fe6b9SJerome Glisse 447771fe6b9SJerome Glisse r = rdev->pll_rreg(rdev, reg); 448771fe6b9SJerome Glisse return r; 449771fe6b9SJerome Glisse } 450771fe6b9SJerome Glisse 451771fe6b9SJerome Glisse static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val) 452771fe6b9SJerome Glisse { 453771fe6b9SJerome Glisse struct radeon_device *rdev = info->dev->dev_private; 454771fe6b9SJerome Glisse 455771fe6b9SJerome Glisse rdev->pll_wreg(rdev, reg, val); 456771fe6b9SJerome Glisse } 457771fe6b9SJerome Glisse 458771fe6b9SJerome Glisse static uint32_t cail_mc_read(struct card_info *info, uint32_t reg) 459771fe6b9SJerome Glisse { 460771fe6b9SJerome Glisse struct radeon_device *rdev = info->dev->dev_private; 461771fe6b9SJerome Glisse uint32_t r; 462771fe6b9SJerome Glisse 463771fe6b9SJerome Glisse r = rdev->mc_rreg(rdev, reg); 464771fe6b9SJerome Glisse return r; 465771fe6b9SJerome Glisse } 466771fe6b9SJerome Glisse 467771fe6b9SJerome Glisse static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val) 468771fe6b9SJerome Glisse { 469771fe6b9SJerome Glisse struct radeon_device *rdev = info->dev->dev_private; 470771fe6b9SJerome Glisse 471771fe6b9SJerome Glisse rdev->mc_wreg(rdev, reg, val); 472771fe6b9SJerome Glisse } 473771fe6b9SJerome Glisse 474771fe6b9SJerome Glisse static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val) 475771fe6b9SJerome Glisse { 476771fe6b9SJerome Glisse struct radeon_device *rdev = info->dev->dev_private; 477771fe6b9SJerome Glisse 478771fe6b9SJerome Glisse WREG32(reg*4, val); 479771fe6b9SJerome Glisse } 480771fe6b9SJerome Glisse 481771fe6b9SJerome Glisse static uint32_t cail_reg_read(struct card_info *info, uint32_t reg) 482771fe6b9SJerome Glisse { 483771fe6b9SJerome Glisse struct radeon_device *rdev = info->dev->dev_private; 484771fe6b9SJerome Glisse uint32_t r; 485771fe6b9SJerome Glisse 486771fe6b9SJerome Glisse r = RREG32(reg*4); 487771fe6b9SJerome Glisse return r; 488771fe6b9SJerome Glisse } 489771fe6b9SJerome Glisse 490351a52a2SAlex Deucher static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val) 491351a52a2SAlex Deucher { 492351a52a2SAlex Deucher struct radeon_device *rdev = info->dev->dev_private; 493351a52a2SAlex Deucher 494351a52a2SAlex Deucher WREG32_IO(reg*4, val); 495351a52a2SAlex Deucher } 496351a52a2SAlex Deucher 497351a52a2SAlex Deucher static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg) 498351a52a2SAlex Deucher { 499351a52a2SAlex Deucher struct radeon_device *rdev = info->dev->dev_private; 500351a52a2SAlex Deucher uint32_t r; 501351a52a2SAlex Deucher 502351a52a2SAlex Deucher r = RREG32_IO(reg*4); 503351a52a2SAlex Deucher return r; 504351a52a2SAlex Deucher } 505351a52a2SAlex Deucher 506771fe6b9SJerome Glisse int radeon_atombios_init(struct radeon_device *rdev) 507771fe6b9SJerome Glisse { 50861c4b24bSMathias Fröhlich struct card_info *atom_card_info = 50961c4b24bSMathias Fröhlich kzalloc(sizeof(struct card_info), GFP_KERNEL); 51061c4b24bSMathias Fröhlich 51161c4b24bSMathias Fröhlich if (!atom_card_info) 51261c4b24bSMathias Fröhlich return -ENOMEM; 51361c4b24bSMathias Fröhlich 51461c4b24bSMathias Fröhlich rdev->mode_info.atom_card_info = atom_card_info; 51561c4b24bSMathias Fröhlich atom_card_info->dev = rdev->ddev; 51661c4b24bSMathias Fröhlich atom_card_info->reg_read = cail_reg_read; 51761c4b24bSMathias Fröhlich atom_card_info->reg_write = cail_reg_write; 518351a52a2SAlex Deucher /* needed for iio ops */ 519351a52a2SAlex Deucher if (rdev->rio_mem) { 520351a52a2SAlex Deucher atom_card_info->ioreg_read = cail_ioreg_read; 521351a52a2SAlex Deucher atom_card_info->ioreg_write = cail_ioreg_write; 522351a52a2SAlex Deucher } else { 523351a52a2SAlex Deucher DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n"); 524351a52a2SAlex Deucher atom_card_info->ioreg_read = cail_reg_read; 525351a52a2SAlex Deucher atom_card_info->ioreg_write = cail_reg_write; 526351a52a2SAlex Deucher } 52761c4b24bSMathias Fröhlich atom_card_info->mc_read = cail_mc_read; 52861c4b24bSMathias Fröhlich atom_card_info->mc_write = cail_mc_write; 52961c4b24bSMathias Fröhlich atom_card_info->pll_read = cail_pll_read; 53061c4b24bSMathias Fröhlich atom_card_info->pll_write = cail_pll_write; 53161c4b24bSMathias Fröhlich 53261c4b24bSMathias Fröhlich rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios); 533c31ad97fSRafał Miłecki mutex_init(&rdev->mode_info.atom_context->mutex); 534771fe6b9SJerome Glisse radeon_atom_initialize_bios_scratch_regs(rdev->ddev); 535d904ef9bSDave Airlie atom_allocate_fb_scratch(rdev->mode_info.atom_context); 536771fe6b9SJerome Glisse return 0; 537771fe6b9SJerome Glisse } 538771fe6b9SJerome Glisse 539771fe6b9SJerome Glisse void radeon_atombios_fini(struct radeon_device *rdev) 540771fe6b9SJerome Glisse { 5414a04a844SJerome Glisse if (rdev->mode_info.atom_context) { 542d904ef9bSDave Airlie kfree(rdev->mode_info.atom_context->scratch); 543771fe6b9SJerome Glisse kfree(rdev->mode_info.atom_context); 5444a04a844SJerome Glisse } 54561c4b24bSMathias Fröhlich kfree(rdev->mode_info.atom_card_info); 546771fe6b9SJerome Glisse } 547771fe6b9SJerome Glisse 548771fe6b9SJerome Glisse int radeon_combios_init(struct radeon_device *rdev) 549771fe6b9SJerome Glisse { 550771fe6b9SJerome Glisse radeon_combios_initialize_bios_scratch_regs(rdev->ddev); 551771fe6b9SJerome Glisse return 0; 552771fe6b9SJerome Glisse } 553771fe6b9SJerome Glisse 554771fe6b9SJerome Glisse void radeon_combios_fini(struct radeon_device *rdev) 555771fe6b9SJerome Glisse { 556771fe6b9SJerome Glisse } 557771fe6b9SJerome Glisse 55828d52043SDave Airlie /* if we get transitioned to only one device, tak VGA back */ 55928d52043SDave Airlie static unsigned int radeon_vga_set_decode(void *cookie, bool state) 56028d52043SDave Airlie { 56128d52043SDave Airlie struct radeon_device *rdev = cookie; 56228d52043SDave Airlie radeon_vga_set_state(rdev, state); 56328d52043SDave Airlie if (state) 56428d52043SDave Airlie return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | 56528d52043SDave Airlie VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 56628d52043SDave Airlie else 56728d52043SDave Airlie return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 56828d52043SDave Airlie } 569c1176d6fSDave Airlie 57036421338SJerome Glisse void radeon_check_arguments(struct radeon_device *rdev) 57136421338SJerome Glisse { 57236421338SJerome Glisse /* vramlimit must be a power of two */ 57336421338SJerome Glisse switch (radeon_vram_limit) { 57436421338SJerome Glisse case 0: 57536421338SJerome Glisse case 4: 57636421338SJerome Glisse case 8: 57736421338SJerome Glisse case 16: 57836421338SJerome Glisse case 32: 57936421338SJerome Glisse case 64: 58036421338SJerome Glisse case 128: 58136421338SJerome Glisse case 256: 58236421338SJerome Glisse case 512: 58336421338SJerome Glisse case 1024: 58436421338SJerome Glisse case 2048: 58536421338SJerome Glisse case 4096: 58636421338SJerome Glisse break; 58736421338SJerome Glisse default: 58836421338SJerome Glisse dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n", 58936421338SJerome Glisse radeon_vram_limit); 59036421338SJerome Glisse radeon_vram_limit = 0; 59136421338SJerome Glisse break; 59236421338SJerome Glisse } 59336421338SJerome Glisse radeon_vram_limit = radeon_vram_limit << 20; 59436421338SJerome Glisse /* gtt size must be power of two and greater or equal to 32M */ 59536421338SJerome Glisse switch (radeon_gart_size) { 59636421338SJerome Glisse case 4: 59736421338SJerome Glisse case 8: 59836421338SJerome Glisse case 16: 59936421338SJerome Glisse dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n", 60036421338SJerome Glisse radeon_gart_size); 60136421338SJerome Glisse radeon_gart_size = 512; 60236421338SJerome Glisse break; 60336421338SJerome Glisse case 32: 60436421338SJerome Glisse case 64: 60536421338SJerome Glisse case 128: 60636421338SJerome Glisse case 256: 60736421338SJerome Glisse case 512: 60836421338SJerome Glisse case 1024: 60936421338SJerome Glisse case 2048: 61036421338SJerome Glisse case 4096: 61136421338SJerome Glisse break; 61236421338SJerome Glisse default: 61336421338SJerome Glisse dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n", 61436421338SJerome Glisse radeon_gart_size); 61536421338SJerome Glisse radeon_gart_size = 512; 61636421338SJerome Glisse break; 61736421338SJerome Glisse } 61836421338SJerome Glisse rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; 61936421338SJerome Glisse /* AGP mode can only be -1, 1, 2, 4, 8 */ 62036421338SJerome Glisse switch (radeon_agpmode) { 62136421338SJerome Glisse case -1: 62236421338SJerome Glisse case 0: 62336421338SJerome Glisse case 1: 62436421338SJerome Glisse case 2: 62536421338SJerome Glisse case 4: 62636421338SJerome Glisse case 8: 62736421338SJerome Glisse break; 62836421338SJerome Glisse default: 62936421338SJerome Glisse dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: " 63036421338SJerome Glisse "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode); 63136421338SJerome Glisse radeon_agpmode = 0; 63236421338SJerome Glisse break; 63336421338SJerome Glisse } 63436421338SJerome Glisse } 63536421338SJerome Glisse 6366a9ee8afSDave Airlie static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) 6376a9ee8afSDave Airlie { 6386a9ee8afSDave Airlie struct drm_device *dev = pci_get_drvdata(pdev); 6396a9ee8afSDave Airlie struct radeon_device *rdev = dev->dev_private; 6406a9ee8afSDave Airlie pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; 6416a9ee8afSDave Airlie if (state == VGA_SWITCHEROO_ON) { 6426a9ee8afSDave Airlie printk(KERN_INFO "radeon: switched on\n"); 6436a9ee8afSDave Airlie /* don't suspend or resume card normally */ 6446a9ee8afSDave Airlie rdev->powered_down = false; 6456a9ee8afSDave Airlie radeon_resume_kms(dev); 646fbf81762SDave Airlie drm_kms_helper_poll_enable(dev); 6476a9ee8afSDave Airlie } else { 6486a9ee8afSDave Airlie printk(KERN_INFO "radeon: switched off\n"); 649fbf81762SDave Airlie drm_kms_helper_poll_disable(dev); 6506a9ee8afSDave Airlie radeon_suspend_kms(dev, pmm); 6516a9ee8afSDave Airlie /* don't suspend or resume card normally */ 6526a9ee8afSDave Airlie rdev->powered_down = true; 6536a9ee8afSDave Airlie } 6546a9ee8afSDave Airlie } 6556a9ee8afSDave Airlie 6566a9ee8afSDave Airlie static bool radeon_switcheroo_can_switch(struct pci_dev *pdev) 6576a9ee8afSDave Airlie { 6586a9ee8afSDave Airlie struct drm_device *dev = pci_get_drvdata(pdev); 6596a9ee8afSDave Airlie bool can_switch; 6606a9ee8afSDave Airlie 6616a9ee8afSDave Airlie spin_lock(&dev->count_lock); 6626a9ee8afSDave Airlie can_switch = (dev->open_count == 0); 6636a9ee8afSDave Airlie spin_unlock(&dev->count_lock); 6646a9ee8afSDave Airlie return can_switch; 6656a9ee8afSDave Airlie } 6666a9ee8afSDave Airlie 6676a9ee8afSDave Airlie 668771fe6b9SJerome Glisse int radeon_device_init(struct radeon_device *rdev, 669771fe6b9SJerome Glisse struct drm_device *ddev, 670771fe6b9SJerome Glisse struct pci_dev *pdev, 671771fe6b9SJerome Glisse uint32_t flags) 672771fe6b9SJerome Glisse { 673351a52a2SAlex Deucher int r, i; 674ad49f501SDave Airlie int dma_bits; 675771fe6b9SJerome Glisse 676771fe6b9SJerome Glisse rdev->shutdown = false; 6779f022ddfSJerome Glisse rdev->dev = &pdev->dev; 678771fe6b9SJerome Glisse rdev->ddev = ddev; 679771fe6b9SJerome Glisse rdev->pdev = pdev; 680771fe6b9SJerome Glisse rdev->flags = flags; 681771fe6b9SJerome Glisse rdev->family = flags & RADEON_FAMILY_MASK; 682771fe6b9SJerome Glisse rdev->is_atom_bios = false; 683771fe6b9SJerome Glisse rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT; 684771fe6b9SJerome Glisse rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; 685771fe6b9SJerome Glisse rdev->gpu_lockup = false; 686733289c2SJerome Glisse rdev->accel_working = false; 6871b5331d9SJerome Glisse 6881b5331d9SJerome Glisse DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X).\n", 6891b5331d9SJerome Glisse radeon_family_name[rdev->family], pdev->vendor, pdev->device); 6901b5331d9SJerome Glisse 691771fe6b9SJerome Glisse /* mutex initialization are all done here so we 692771fe6b9SJerome Glisse * can recall function without having locking issues */ 693771fe6b9SJerome Glisse mutex_init(&rdev->cs_mutex); 694771fe6b9SJerome Glisse mutex_init(&rdev->ib_pool.mutex); 695771fe6b9SJerome Glisse mutex_init(&rdev->cp.mutex); 69640bacf16SAlex Deucher mutex_init(&rdev->dc_hw_i2c_mutex); 697d8f60cfcSAlex Deucher if (rdev->family >= CHIP_R600) 698d8f60cfcSAlex Deucher spin_lock_init(&rdev->ih.lock); 6994c788679SJerome Glisse mutex_init(&rdev->gem.mutex); 700c913e23aSRafał Miłecki mutex_init(&rdev->pm.mutex); 7015876dd24SMatthew Garrett mutex_init(&rdev->vram_mutex); 702771fe6b9SJerome Glisse rwlock_init(&rdev->fence_drv.lock); 7039f022ddfSJerome Glisse INIT_LIST_HEAD(&rdev->gem.objects); 70473a6d3fcSRafał Miłecki init_waitqueue_head(&rdev->irq.vblank_queue); 7052031f77cSAlex Deucher init_waitqueue_head(&rdev->irq.idle_queue); 706771fe6b9SJerome Glisse 707d4877cf2SAlex Deucher /* setup workqueue */ 708d4877cf2SAlex Deucher rdev->wq = create_workqueue("radeon"); 709d4877cf2SAlex Deucher if (rdev->wq == NULL) 710d4877cf2SAlex Deucher return -ENOMEM; 711d4877cf2SAlex Deucher 7124aac0473SJerome Glisse /* Set asic functions */ 7134aac0473SJerome Glisse r = radeon_asic_init(rdev); 71436421338SJerome Glisse if (r) 7154aac0473SJerome Glisse return r; 71636421338SJerome Glisse radeon_check_arguments(rdev); 7174aac0473SJerome Glisse 718f95df9caSAlex Deucher /* all of the newer IGP chips have an internal gart 719f95df9caSAlex Deucher * However some rs4xx report as AGP, so remove that here. 720f95df9caSAlex Deucher */ 721f95df9caSAlex Deucher if ((rdev->family >= CHIP_RS400) && 722f95df9caSAlex Deucher (rdev->flags & RADEON_IS_IGP)) { 723f95df9caSAlex Deucher rdev->flags &= ~RADEON_IS_AGP; 724f95df9caSAlex Deucher } 725f95df9caSAlex Deucher 72630256a3fSJerome Glisse if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) { 727b574f251SJerome Glisse radeon_agp_disable(rdev); 728771fe6b9SJerome Glisse } 729771fe6b9SJerome Glisse 730ad49f501SDave Airlie /* set DMA mask + need_dma32 flags. 731ad49f501SDave Airlie * PCIE - can handle 40-bits. 732ad49f501SDave Airlie * IGP - can handle 40-bits (in theory) 733ad49f501SDave Airlie * AGP - generally dma32 is safest 734ad49f501SDave Airlie * PCI - only dma32 735ad49f501SDave Airlie */ 736ad49f501SDave Airlie rdev->need_dma32 = false; 737ad49f501SDave Airlie if (rdev->flags & RADEON_IS_AGP) 738ad49f501SDave Airlie rdev->need_dma32 = true; 739ad49f501SDave Airlie if (rdev->flags & RADEON_IS_PCI) 740ad49f501SDave Airlie rdev->need_dma32 = true; 741ad49f501SDave Airlie 742ad49f501SDave Airlie dma_bits = rdev->need_dma32 ? 32 : 40; 743ad49f501SDave Airlie r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits)); 744771fe6b9SJerome Glisse if (r) { 745771fe6b9SJerome Glisse printk(KERN_WARNING "radeon: No suitable DMA available.\n"); 746771fe6b9SJerome Glisse } 747771fe6b9SJerome Glisse 748771fe6b9SJerome Glisse /* Registers mapping */ 749771fe6b9SJerome Glisse /* TODO: block userspace mapping of io register */ 75001d73a69SJordan Crouse rdev->rmmio_base = pci_resource_start(rdev->pdev, 2); 75101d73a69SJordan Crouse rdev->rmmio_size = pci_resource_len(rdev->pdev, 2); 752771fe6b9SJerome Glisse rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size); 753771fe6b9SJerome Glisse if (rdev->rmmio == NULL) { 754771fe6b9SJerome Glisse return -ENOMEM; 755771fe6b9SJerome Glisse } 756771fe6b9SJerome Glisse DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base); 757771fe6b9SJerome Glisse DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size); 758771fe6b9SJerome Glisse 759351a52a2SAlex Deucher /* io port mapping */ 760351a52a2SAlex Deucher for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 761351a52a2SAlex Deucher if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) { 762351a52a2SAlex Deucher rdev->rio_mem_size = pci_resource_len(rdev->pdev, i); 763351a52a2SAlex Deucher rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size); 764351a52a2SAlex Deucher break; 765351a52a2SAlex Deucher } 766351a52a2SAlex Deucher } 767351a52a2SAlex Deucher if (rdev->rio_mem == NULL) 768351a52a2SAlex Deucher DRM_ERROR("Unable to find PCI I/O BAR\n"); 769351a52a2SAlex Deucher 77028d52043SDave Airlie /* if we have > 1 VGA cards, then disable the radeon VGA resources */ 77193239ea1SDave Airlie /* this will fail for cards that aren't VGA class devices, just 77293239ea1SDave Airlie * ignore it */ 77393239ea1SDave Airlie vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode); 7746a9ee8afSDave Airlie vga_switcheroo_register_client(rdev->pdev, 7756a9ee8afSDave Airlie radeon_switcheroo_set_state, 7766a9ee8afSDave Airlie radeon_switcheroo_can_switch); 77728d52043SDave Airlie 7783ce0a23dSJerome Glisse r = radeon_init(rdev); 779b574f251SJerome Glisse if (r) 780b574f251SJerome Glisse return r; 781b1e3a6d1SMichel Dänzer 782b574f251SJerome Glisse if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) { 783b574f251SJerome Glisse /* Acceleration not working on AGP card try again 784b574f251SJerome Glisse * with fallback to PCI or PCIE GART 785b574f251SJerome Glisse */ 786a2d07b74SJerome Glisse radeon_asic_reset(rdev); 787b574f251SJerome Glisse radeon_fini(rdev); 788b574f251SJerome Glisse radeon_agp_disable(rdev); 789b574f251SJerome Glisse r = radeon_init(rdev); 7904aac0473SJerome Glisse if (r) 7914aac0473SJerome Glisse return r; 7923ce0a23dSJerome Glisse } 793ecc0b326SMichel Dänzer if (radeon_testing) { 794ecc0b326SMichel Dänzer radeon_test_moves(rdev); 795ecc0b326SMichel Dänzer } 796771fe6b9SJerome Glisse if (radeon_benchmarking) { 797771fe6b9SJerome Glisse radeon_benchmark(rdev); 798771fe6b9SJerome Glisse } 7996cf8a3f5SJerome Glisse return 0; 800771fe6b9SJerome Glisse } 801771fe6b9SJerome Glisse 802771fe6b9SJerome Glisse void radeon_device_fini(struct radeon_device *rdev) 803771fe6b9SJerome Glisse { 804771fe6b9SJerome Glisse DRM_INFO("radeon: finishing device.\n"); 805771fe6b9SJerome Glisse rdev->shutdown = true; 80690aca4d2SJerome Glisse /* evict vram memory */ 80790aca4d2SJerome Glisse radeon_bo_evict_vram(rdev); 8083ce0a23dSJerome Glisse radeon_fini(rdev); 809d4877cf2SAlex Deucher destroy_workqueue(rdev->wq); 8106a9ee8afSDave Airlie vga_switcheroo_unregister_client(rdev->pdev); 811c1176d6fSDave Airlie vga_client_register(rdev->pdev, NULL, NULL, NULL); 812e0a2ca73SAlex Deucher if (rdev->rio_mem) 813351a52a2SAlex Deucher pci_iounmap(rdev->pdev, rdev->rio_mem); 814351a52a2SAlex Deucher rdev->rio_mem = NULL; 815771fe6b9SJerome Glisse iounmap(rdev->rmmio); 816771fe6b9SJerome Glisse rdev->rmmio = NULL; 817771fe6b9SJerome Glisse } 818771fe6b9SJerome Glisse 819771fe6b9SJerome Glisse 820771fe6b9SJerome Glisse /* 821771fe6b9SJerome Glisse * Suspend & resume. 822771fe6b9SJerome Glisse */ 823771fe6b9SJerome Glisse int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) 824771fe6b9SJerome Glisse { 825875c1866SDarren Jenkins struct radeon_device *rdev; 826771fe6b9SJerome Glisse struct drm_crtc *crtc; 827d8dcaa1dSAlex Deucher struct drm_connector *connector; 8284c788679SJerome Glisse int r; 829771fe6b9SJerome Glisse 830875c1866SDarren Jenkins if (dev == NULL || dev->dev_private == NULL) { 831771fe6b9SJerome Glisse return -ENODEV; 832771fe6b9SJerome Glisse } 833771fe6b9SJerome Glisse if (state.event == PM_EVENT_PRETHAW) { 834771fe6b9SJerome Glisse return 0; 835771fe6b9SJerome Glisse } 836875c1866SDarren Jenkins rdev = dev->dev_private; 837875c1866SDarren Jenkins 8386a9ee8afSDave Airlie if (rdev->powered_down) 8396a9ee8afSDave Airlie return 0; 840d8dcaa1dSAlex Deucher 841d8dcaa1dSAlex Deucher /* turn off display hw */ 842d8dcaa1dSAlex Deucher list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 843d8dcaa1dSAlex Deucher drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); 844d8dcaa1dSAlex Deucher } 845d8dcaa1dSAlex Deucher 846771fe6b9SJerome Glisse /* unpin the front buffers */ 847771fe6b9SJerome Glisse list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 848771fe6b9SJerome Glisse struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb); 8494c788679SJerome Glisse struct radeon_bo *robj; 850771fe6b9SJerome Glisse 851771fe6b9SJerome Glisse if (rfb == NULL || rfb->obj == NULL) { 852771fe6b9SJerome Glisse continue; 853771fe6b9SJerome Glisse } 854771fe6b9SJerome Glisse robj = rfb->obj->driver_private; 85538651674SDave Airlie /* don't unpin kernel fb objects */ 85638651674SDave Airlie if (!radeon_fbdev_robj_is_fb(rdev, robj)) { 8574c788679SJerome Glisse r = radeon_bo_reserve(robj, false); 85838651674SDave Airlie if (r == 0) { 8594c788679SJerome Glisse radeon_bo_unpin(robj); 8604c788679SJerome Glisse radeon_bo_unreserve(robj); 8614c788679SJerome Glisse } 862771fe6b9SJerome Glisse } 863771fe6b9SJerome Glisse } 864771fe6b9SJerome Glisse /* evict vram memory */ 8654c788679SJerome Glisse radeon_bo_evict_vram(rdev); 866771fe6b9SJerome Glisse /* wait for gpu to finish processing current batch */ 867771fe6b9SJerome Glisse radeon_fence_wait_last(rdev); 868771fe6b9SJerome Glisse 869f657c2a7SYang Zhao radeon_save_bios_scratch_regs(rdev); 870f657c2a7SYang Zhao 871ce8f5370SAlex Deucher radeon_pm_suspend(rdev); 8723ce0a23dSJerome Glisse radeon_suspend(rdev); 873d4877cf2SAlex Deucher radeon_hpd_fini(rdev); 874771fe6b9SJerome Glisse /* evict remaining vram memory */ 8754c788679SJerome Glisse radeon_bo_evict_vram(rdev); 876771fe6b9SJerome Glisse 87710b06122SJerome Glisse radeon_agp_suspend(rdev); 87810b06122SJerome Glisse 879771fe6b9SJerome Glisse pci_save_state(dev->pdev); 880771fe6b9SJerome Glisse if (state.event == PM_EVENT_SUSPEND) { 881771fe6b9SJerome Glisse /* Shut down the device */ 882771fe6b9SJerome Glisse pci_disable_device(dev->pdev); 883771fe6b9SJerome Glisse pci_set_power_state(dev->pdev, PCI_D3hot); 884771fe6b9SJerome Glisse } 885771fe6b9SJerome Glisse acquire_console_sem(); 88638651674SDave Airlie radeon_fbdev_set_suspend(rdev, 1); 887771fe6b9SJerome Glisse release_console_sem(); 888771fe6b9SJerome Glisse return 0; 889771fe6b9SJerome Glisse } 890771fe6b9SJerome Glisse 891771fe6b9SJerome Glisse int radeon_resume_kms(struct drm_device *dev) 892771fe6b9SJerome Glisse { 89309bdf591SCedric Godin struct drm_connector *connector; 894771fe6b9SJerome Glisse struct radeon_device *rdev = dev->dev_private; 895771fe6b9SJerome Glisse 8966a9ee8afSDave Airlie if (rdev->powered_down) 8976a9ee8afSDave Airlie return 0; 8986a9ee8afSDave Airlie 899771fe6b9SJerome Glisse acquire_console_sem(); 900771fe6b9SJerome Glisse pci_set_power_state(dev->pdev, PCI_D0); 901771fe6b9SJerome Glisse pci_restore_state(dev->pdev); 902771fe6b9SJerome Glisse if (pci_enable_device(dev->pdev)) { 903771fe6b9SJerome Glisse release_console_sem(); 904771fe6b9SJerome Glisse return -1; 905771fe6b9SJerome Glisse } 906771fe6b9SJerome Glisse pci_set_master(dev->pdev); 9070ebf1717SDave Airlie /* resume AGP if in use */ 9080ebf1717SDave Airlie radeon_agp_resume(rdev); 9093ce0a23dSJerome Glisse radeon_resume(rdev); 910ce8f5370SAlex Deucher radeon_pm_resume(rdev); 911f657c2a7SYang Zhao radeon_restore_bios_scratch_regs(rdev); 91209bdf591SCedric Godin 91309bdf591SCedric Godin /* turn on display hw */ 91409bdf591SCedric Godin list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 91509bdf591SCedric Godin drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); 91609bdf591SCedric Godin } 91709bdf591SCedric Godin 91838651674SDave Airlie radeon_fbdev_set_suspend(rdev, 0); 919771fe6b9SJerome Glisse release_console_sem(); 920771fe6b9SJerome Glisse 921d4877cf2SAlex Deucher /* reset hpd state */ 922d4877cf2SAlex Deucher radeon_hpd_init(rdev); 923771fe6b9SJerome Glisse /* blat the mode back in */ 924771fe6b9SJerome Glisse drm_helper_resume_force_mode(dev); 925771fe6b9SJerome Glisse return 0; 926771fe6b9SJerome Glisse } 927771fe6b9SJerome Glisse 92890aca4d2SJerome Glisse int radeon_gpu_reset(struct radeon_device *rdev) 92990aca4d2SJerome Glisse { 93090aca4d2SJerome Glisse int r; 93190aca4d2SJerome Glisse 93290aca4d2SJerome Glisse radeon_save_bios_scratch_regs(rdev); 93390aca4d2SJerome Glisse radeon_suspend(rdev); 93490aca4d2SJerome Glisse 93590aca4d2SJerome Glisse r = radeon_asic_reset(rdev); 93690aca4d2SJerome Glisse if (!r) { 93790aca4d2SJerome Glisse dev_info(rdev->dev, "GPU reset succeed\n"); 93890aca4d2SJerome Glisse radeon_resume(rdev); 93990aca4d2SJerome Glisse radeon_restore_bios_scratch_regs(rdev); 94090aca4d2SJerome Glisse drm_helper_resume_force_mode(rdev->ddev); 94190aca4d2SJerome Glisse return 0; 94290aca4d2SJerome Glisse } 94390aca4d2SJerome Glisse /* bad news, how to tell it to userspace ? */ 94490aca4d2SJerome Glisse dev_info(rdev->dev, "GPU reset failed\n"); 94590aca4d2SJerome Glisse return r; 94690aca4d2SJerome Glisse } 94790aca4d2SJerome Glisse 948771fe6b9SJerome Glisse 949771fe6b9SJerome Glisse /* 950771fe6b9SJerome Glisse * Debugfs 951771fe6b9SJerome Glisse */ 952771fe6b9SJerome Glisse struct radeon_debugfs { 953771fe6b9SJerome Glisse struct drm_info_list *files; 954771fe6b9SJerome Glisse unsigned num_files; 955771fe6b9SJerome Glisse }; 956771fe6b9SJerome Glisse static struct radeon_debugfs _radeon_debugfs[RADEON_DEBUGFS_MAX_NUM_FILES]; 957771fe6b9SJerome Glisse static unsigned _radeon_debugfs_count = 0; 958771fe6b9SJerome Glisse 959771fe6b9SJerome Glisse int radeon_debugfs_add_files(struct radeon_device *rdev, 960771fe6b9SJerome Glisse struct drm_info_list *files, 961771fe6b9SJerome Glisse unsigned nfiles) 962771fe6b9SJerome Glisse { 963771fe6b9SJerome Glisse unsigned i; 964771fe6b9SJerome Glisse 965771fe6b9SJerome Glisse for (i = 0; i < _radeon_debugfs_count; i++) { 966771fe6b9SJerome Glisse if (_radeon_debugfs[i].files == files) { 967771fe6b9SJerome Glisse /* Already registered */ 968771fe6b9SJerome Glisse return 0; 969771fe6b9SJerome Glisse } 970771fe6b9SJerome Glisse } 971771fe6b9SJerome Glisse if ((_radeon_debugfs_count + nfiles) > RADEON_DEBUGFS_MAX_NUM_FILES) { 972771fe6b9SJerome Glisse DRM_ERROR("Reached maximum number of debugfs files.\n"); 973771fe6b9SJerome Glisse DRM_ERROR("Report so we increase RADEON_DEBUGFS_MAX_NUM_FILES.\n"); 974771fe6b9SJerome Glisse return -EINVAL; 975771fe6b9SJerome Glisse } 976771fe6b9SJerome Glisse _radeon_debugfs[_radeon_debugfs_count].files = files; 977771fe6b9SJerome Glisse _radeon_debugfs[_radeon_debugfs_count].num_files = nfiles; 978771fe6b9SJerome Glisse _radeon_debugfs_count++; 979771fe6b9SJerome Glisse #if defined(CONFIG_DEBUG_FS) 980771fe6b9SJerome Glisse drm_debugfs_create_files(files, nfiles, 981771fe6b9SJerome Glisse rdev->ddev->control->debugfs_root, 982771fe6b9SJerome Glisse rdev->ddev->control); 983771fe6b9SJerome Glisse drm_debugfs_create_files(files, nfiles, 984771fe6b9SJerome Glisse rdev->ddev->primary->debugfs_root, 985771fe6b9SJerome Glisse rdev->ddev->primary); 986771fe6b9SJerome Glisse #endif 987771fe6b9SJerome Glisse return 0; 988771fe6b9SJerome Glisse } 989771fe6b9SJerome Glisse 990771fe6b9SJerome Glisse #if defined(CONFIG_DEBUG_FS) 991771fe6b9SJerome Glisse int radeon_debugfs_init(struct drm_minor *minor) 992771fe6b9SJerome Glisse { 993771fe6b9SJerome Glisse return 0; 994771fe6b9SJerome Glisse } 995771fe6b9SJerome Glisse 996771fe6b9SJerome Glisse void radeon_debugfs_cleanup(struct drm_minor *minor) 997771fe6b9SJerome Glisse { 998771fe6b9SJerome Glisse unsigned i; 999771fe6b9SJerome Glisse 1000771fe6b9SJerome Glisse for (i = 0; i < _radeon_debugfs_count; i++) { 1001771fe6b9SJerome Glisse drm_debugfs_remove_files(_radeon_debugfs[i].files, 1002771fe6b9SJerome Glisse _radeon_debugfs[i].num_files, minor); 1003771fe6b9SJerome Glisse } 1004771fe6b9SJerome Glisse } 1005771fe6b9SJerome Glisse #endif 1006