1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #include <linux/slab.h> 29 #include <linux/seq_file.h> 30 #include <linux/firmware.h> 31 #include <linux/platform_device.h> 32 #include <linux/module.h> 33 #include <drm/drmP.h> 34 #include <drm/radeon_drm.h> 35 #include "radeon.h" 36 #include "radeon_asic.h" 37 #include "radeon_mode.h" 38 #include "r600d.h" 39 #include "atom.h" 40 #include "avivod.h" 41 42 #define PFP_UCODE_SIZE 576 43 #define PM4_UCODE_SIZE 1792 44 #define RLC_UCODE_SIZE 768 45 #define R700_PFP_UCODE_SIZE 848 46 #define R700_PM4_UCODE_SIZE 1360 47 #define R700_RLC_UCODE_SIZE 1024 48 #define EVERGREEN_PFP_UCODE_SIZE 1120 49 #define EVERGREEN_PM4_UCODE_SIZE 1376 50 #define EVERGREEN_RLC_UCODE_SIZE 768 51 #define CAYMAN_RLC_UCODE_SIZE 1024 52 #define ARUBA_RLC_UCODE_SIZE 1536 53 54 /* Firmware Names */ 55 MODULE_FIRMWARE("radeon/R600_pfp.bin"); 56 MODULE_FIRMWARE("radeon/R600_me.bin"); 57 MODULE_FIRMWARE("radeon/RV610_pfp.bin"); 58 MODULE_FIRMWARE("radeon/RV610_me.bin"); 59 MODULE_FIRMWARE("radeon/RV630_pfp.bin"); 60 MODULE_FIRMWARE("radeon/RV630_me.bin"); 61 MODULE_FIRMWARE("radeon/RV620_pfp.bin"); 62 MODULE_FIRMWARE("radeon/RV620_me.bin"); 63 MODULE_FIRMWARE("radeon/RV635_pfp.bin"); 64 MODULE_FIRMWARE("radeon/RV635_me.bin"); 65 MODULE_FIRMWARE("radeon/RV670_pfp.bin"); 66 MODULE_FIRMWARE("radeon/RV670_me.bin"); 67 MODULE_FIRMWARE("radeon/RS780_pfp.bin"); 68 MODULE_FIRMWARE("radeon/RS780_me.bin"); 69 MODULE_FIRMWARE("radeon/RV770_pfp.bin"); 70 MODULE_FIRMWARE("radeon/RV770_me.bin"); 71 MODULE_FIRMWARE("radeon/RV730_pfp.bin"); 72 MODULE_FIRMWARE("radeon/RV730_me.bin"); 73 MODULE_FIRMWARE("radeon/RV710_pfp.bin"); 74 MODULE_FIRMWARE("radeon/RV710_me.bin"); 75 MODULE_FIRMWARE("radeon/R600_rlc.bin"); 76 MODULE_FIRMWARE("radeon/R700_rlc.bin"); 77 MODULE_FIRMWARE("radeon/CEDAR_pfp.bin"); 78 MODULE_FIRMWARE("radeon/CEDAR_me.bin"); 79 MODULE_FIRMWARE("radeon/CEDAR_rlc.bin"); 80 MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin"); 81 MODULE_FIRMWARE("radeon/REDWOOD_me.bin"); 82 MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin"); 83 MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin"); 84 MODULE_FIRMWARE("radeon/JUNIPER_me.bin"); 85 MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin"); 86 MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin"); 87 MODULE_FIRMWARE("radeon/CYPRESS_me.bin"); 88 MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin"); 89 MODULE_FIRMWARE("radeon/PALM_pfp.bin"); 90 MODULE_FIRMWARE("radeon/PALM_me.bin"); 91 MODULE_FIRMWARE("radeon/SUMO_rlc.bin"); 92 MODULE_FIRMWARE("radeon/SUMO_pfp.bin"); 93 MODULE_FIRMWARE("radeon/SUMO_me.bin"); 94 MODULE_FIRMWARE("radeon/SUMO2_pfp.bin"); 95 MODULE_FIRMWARE("radeon/SUMO2_me.bin"); 96 97 static const u32 crtc_offsets[2] = 98 { 99 0, 100 AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL 101 }; 102 103 int r600_debugfs_mc_info_init(struct radeon_device *rdev); 104 105 /* r600,rv610,rv630,rv620,rv635,rv670 */ 106 int r600_mc_wait_for_idle(struct radeon_device *rdev); 107 static void r600_gpu_init(struct radeon_device *rdev); 108 void r600_fini(struct radeon_device *rdev); 109 void r600_irq_disable(struct radeon_device *rdev); 110 static void r600_pcie_gen2_enable(struct radeon_device *rdev); 111 112 /** 113 * r600_get_xclk - get the xclk 114 * 115 * @rdev: radeon_device pointer 116 * 117 * Returns the reference clock used by the gfx engine 118 * (r6xx, IGPs, APUs). 119 */ 120 u32 r600_get_xclk(struct radeon_device *rdev) 121 { 122 return rdev->clock.spll.reference_freq; 123 } 124 125 /* get temperature in millidegrees */ 126 int rv6xx_get_temp(struct radeon_device *rdev) 127 { 128 u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >> 129 ASIC_T_SHIFT; 130 int actual_temp = temp & 0xff; 131 132 if (temp & 0x100) 133 actual_temp -= 256; 134 135 return actual_temp * 1000; 136 } 137 138 void r600_pm_get_dynpm_state(struct radeon_device *rdev) 139 { 140 int i; 141 142 rdev->pm.dynpm_can_upclock = true; 143 rdev->pm.dynpm_can_downclock = true; 144 145 /* power state array is low to high, default is first */ 146 if ((rdev->flags & RADEON_IS_IGP) || (rdev->family == CHIP_R600)) { 147 int min_power_state_index = 0; 148 149 if (rdev->pm.num_power_states > 2) 150 min_power_state_index = 1; 151 152 switch (rdev->pm.dynpm_planned_action) { 153 case DYNPM_ACTION_MINIMUM: 154 rdev->pm.requested_power_state_index = min_power_state_index; 155 rdev->pm.requested_clock_mode_index = 0; 156 rdev->pm.dynpm_can_downclock = false; 157 break; 158 case DYNPM_ACTION_DOWNCLOCK: 159 if (rdev->pm.current_power_state_index == min_power_state_index) { 160 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; 161 rdev->pm.dynpm_can_downclock = false; 162 } else { 163 if (rdev->pm.active_crtc_count > 1) { 164 for (i = 0; i < rdev->pm.num_power_states; i++) { 165 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) 166 continue; 167 else if (i >= rdev->pm.current_power_state_index) { 168 rdev->pm.requested_power_state_index = 169 rdev->pm.current_power_state_index; 170 break; 171 } else { 172 rdev->pm.requested_power_state_index = i; 173 break; 174 } 175 } 176 } else { 177 if (rdev->pm.current_power_state_index == 0) 178 rdev->pm.requested_power_state_index = 179 rdev->pm.num_power_states - 1; 180 else 181 rdev->pm.requested_power_state_index = 182 rdev->pm.current_power_state_index - 1; 183 } 184 } 185 rdev->pm.requested_clock_mode_index = 0; 186 /* don't use the power state if crtcs are active and no display flag is set */ 187 if ((rdev->pm.active_crtc_count > 0) && 188 (rdev->pm.power_state[rdev->pm.requested_power_state_index]. 189 clock_info[rdev->pm.requested_clock_mode_index].flags & 190 RADEON_PM_MODE_NO_DISPLAY)) { 191 rdev->pm.requested_power_state_index++; 192 } 193 break; 194 case DYNPM_ACTION_UPCLOCK: 195 if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) { 196 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; 197 rdev->pm.dynpm_can_upclock = false; 198 } else { 199 if (rdev->pm.active_crtc_count > 1) { 200 for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) { 201 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) 202 continue; 203 else if (i <= rdev->pm.current_power_state_index) { 204 rdev->pm.requested_power_state_index = 205 rdev->pm.current_power_state_index; 206 break; 207 } else { 208 rdev->pm.requested_power_state_index = i; 209 break; 210 } 211 } 212 } else 213 rdev->pm.requested_power_state_index = 214 rdev->pm.current_power_state_index + 1; 215 } 216 rdev->pm.requested_clock_mode_index = 0; 217 break; 218 case DYNPM_ACTION_DEFAULT: 219 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index; 220 rdev->pm.requested_clock_mode_index = 0; 221 rdev->pm.dynpm_can_upclock = false; 222 break; 223 case DYNPM_ACTION_NONE: 224 default: 225 DRM_ERROR("Requested mode for not defined action\n"); 226 return; 227 } 228 } else { 229 /* XXX select a power state based on AC/DC, single/dualhead, etc. */ 230 /* for now just select the first power state and switch between clock modes */ 231 /* power state array is low to high, default is first (0) */ 232 if (rdev->pm.active_crtc_count > 1) { 233 rdev->pm.requested_power_state_index = -1; 234 /* start at 1 as we don't want the default mode */ 235 for (i = 1; i < rdev->pm.num_power_states; i++) { 236 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) 237 continue; 238 else if ((rdev->pm.power_state[i].type == POWER_STATE_TYPE_PERFORMANCE) || 239 (rdev->pm.power_state[i].type == POWER_STATE_TYPE_BATTERY)) { 240 rdev->pm.requested_power_state_index = i; 241 break; 242 } 243 } 244 /* if nothing selected, grab the default state. */ 245 if (rdev->pm.requested_power_state_index == -1) 246 rdev->pm.requested_power_state_index = 0; 247 } else 248 rdev->pm.requested_power_state_index = 1; 249 250 switch (rdev->pm.dynpm_planned_action) { 251 case DYNPM_ACTION_MINIMUM: 252 rdev->pm.requested_clock_mode_index = 0; 253 rdev->pm.dynpm_can_downclock = false; 254 break; 255 case DYNPM_ACTION_DOWNCLOCK: 256 if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) { 257 if (rdev->pm.current_clock_mode_index == 0) { 258 rdev->pm.requested_clock_mode_index = 0; 259 rdev->pm.dynpm_can_downclock = false; 260 } else 261 rdev->pm.requested_clock_mode_index = 262 rdev->pm.current_clock_mode_index - 1; 263 } else { 264 rdev->pm.requested_clock_mode_index = 0; 265 rdev->pm.dynpm_can_downclock = false; 266 } 267 /* don't use the power state if crtcs are active and no display flag is set */ 268 if ((rdev->pm.active_crtc_count > 0) && 269 (rdev->pm.power_state[rdev->pm.requested_power_state_index]. 270 clock_info[rdev->pm.requested_clock_mode_index].flags & 271 RADEON_PM_MODE_NO_DISPLAY)) { 272 rdev->pm.requested_clock_mode_index++; 273 } 274 break; 275 case DYNPM_ACTION_UPCLOCK: 276 if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) { 277 if (rdev->pm.current_clock_mode_index == 278 (rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1)) { 279 rdev->pm.requested_clock_mode_index = rdev->pm.current_clock_mode_index; 280 rdev->pm.dynpm_can_upclock = false; 281 } else 282 rdev->pm.requested_clock_mode_index = 283 rdev->pm.current_clock_mode_index + 1; 284 } else { 285 rdev->pm.requested_clock_mode_index = 286 rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1; 287 rdev->pm.dynpm_can_upclock = false; 288 } 289 break; 290 case DYNPM_ACTION_DEFAULT: 291 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index; 292 rdev->pm.requested_clock_mode_index = 0; 293 rdev->pm.dynpm_can_upclock = false; 294 break; 295 case DYNPM_ACTION_NONE: 296 default: 297 DRM_ERROR("Requested mode for not defined action\n"); 298 return; 299 } 300 } 301 302 DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n", 303 rdev->pm.power_state[rdev->pm.requested_power_state_index]. 304 clock_info[rdev->pm.requested_clock_mode_index].sclk, 305 rdev->pm.power_state[rdev->pm.requested_power_state_index]. 306 clock_info[rdev->pm.requested_clock_mode_index].mclk, 307 rdev->pm.power_state[rdev->pm.requested_power_state_index]. 308 pcie_lanes); 309 } 310 311 void rs780_pm_init_profile(struct radeon_device *rdev) 312 { 313 if (rdev->pm.num_power_states == 2) { 314 /* default */ 315 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 316 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 317 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; 318 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; 319 /* low sh */ 320 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0; 321 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0; 322 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; 323 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; 324 /* mid sh */ 325 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0; 326 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0; 327 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; 328 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; 329 /* high sh */ 330 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0; 331 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1; 332 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; 333 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0; 334 /* low mh */ 335 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0; 336 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0; 337 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; 338 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; 339 /* mid mh */ 340 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0; 341 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0; 342 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; 343 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; 344 /* high mh */ 345 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0; 346 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 1; 347 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; 348 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0; 349 } else if (rdev->pm.num_power_states == 3) { 350 /* default */ 351 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 352 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 353 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; 354 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; 355 /* low sh */ 356 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1; 357 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1; 358 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; 359 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; 360 /* mid sh */ 361 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1; 362 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1; 363 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; 364 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; 365 /* high sh */ 366 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1; 367 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 2; 368 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; 369 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0; 370 /* low mh */ 371 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 1; 372 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 1; 373 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; 374 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; 375 /* mid mh */ 376 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 1; 377 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 1; 378 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; 379 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; 380 /* high mh */ 381 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 1; 382 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2; 383 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; 384 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0; 385 } else { 386 /* default */ 387 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 388 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 389 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; 390 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; 391 /* low sh */ 392 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 2; 393 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 2; 394 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; 395 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; 396 /* mid sh */ 397 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 2; 398 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 2; 399 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; 400 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; 401 /* high sh */ 402 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 2; 403 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 3; 404 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; 405 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0; 406 /* low mh */ 407 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2; 408 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0; 409 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; 410 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; 411 /* mid mh */ 412 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2; 413 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0; 414 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; 415 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; 416 /* high mh */ 417 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2; 418 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 3; 419 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; 420 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0; 421 } 422 } 423 424 void r600_pm_init_profile(struct radeon_device *rdev) 425 { 426 int idx; 427 428 if (rdev->family == CHIP_R600) { 429 /* XXX */ 430 /* default */ 431 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 432 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 433 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; 434 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; 435 /* low sh */ 436 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 437 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 438 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; 439 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; 440 /* mid sh */ 441 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 442 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 443 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; 444 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; 445 /* high sh */ 446 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 447 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 448 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; 449 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0; 450 /* low mh */ 451 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 452 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 453 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; 454 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; 455 /* mid mh */ 456 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 457 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 458 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; 459 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; 460 /* high mh */ 461 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 462 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 463 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; 464 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0; 465 } else { 466 if (rdev->pm.num_power_states < 4) { 467 /* default */ 468 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 469 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 470 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; 471 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2; 472 /* low sh */ 473 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1; 474 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1; 475 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; 476 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; 477 /* mid sh */ 478 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1; 479 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1; 480 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; 481 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1; 482 /* high sh */ 483 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1; 484 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1; 485 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; 486 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2; 487 /* low mh */ 488 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2; 489 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 2; 490 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; 491 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; 492 /* low mh */ 493 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2; 494 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 2; 495 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; 496 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1; 497 /* high mh */ 498 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2; 499 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2; 500 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; 501 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2; 502 } else { 503 /* default */ 504 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 505 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 506 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; 507 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2; 508 /* low sh */ 509 if (rdev->flags & RADEON_IS_MOBILITY) 510 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); 511 else 512 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); 513 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx; 514 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx; 515 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; 516 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; 517 /* mid sh */ 518 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx; 519 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx; 520 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; 521 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1; 522 /* high sh */ 523 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); 524 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx; 525 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx; 526 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; 527 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2; 528 /* low mh */ 529 if (rdev->flags & RADEON_IS_MOBILITY) 530 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1); 531 else 532 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); 533 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx; 534 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx; 535 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; 536 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; 537 /* mid mh */ 538 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx; 539 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx; 540 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; 541 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1; 542 /* high mh */ 543 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); 544 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx; 545 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx; 546 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; 547 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2; 548 } 549 } 550 } 551 552 void r600_pm_misc(struct radeon_device *rdev) 553 { 554 int req_ps_idx = rdev->pm.requested_power_state_index; 555 int req_cm_idx = rdev->pm.requested_clock_mode_index; 556 struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx]; 557 struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage; 558 559 if ((voltage->type == VOLTAGE_SW) && voltage->voltage) { 560 /* 0xff01 is a flag rather then an actual voltage */ 561 if (voltage->voltage == 0xff01) 562 return; 563 if (voltage->voltage != rdev->pm.current_vddc) { 564 radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC); 565 rdev->pm.current_vddc = voltage->voltage; 566 DRM_DEBUG_DRIVER("Setting: v: %d\n", voltage->voltage); 567 } 568 } 569 } 570 571 bool r600_gui_idle(struct radeon_device *rdev) 572 { 573 if (RREG32(GRBM_STATUS) & GUI_ACTIVE) 574 return false; 575 else 576 return true; 577 } 578 579 /* hpd for digital panel detect/disconnect */ 580 bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) 581 { 582 bool connected = false; 583 584 if (ASIC_IS_DCE3(rdev)) { 585 switch (hpd) { 586 case RADEON_HPD_1: 587 if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE) 588 connected = true; 589 break; 590 case RADEON_HPD_2: 591 if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE) 592 connected = true; 593 break; 594 case RADEON_HPD_3: 595 if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE) 596 connected = true; 597 break; 598 case RADEON_HPD_4: 599 if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE) 600 connected = true; 601 break; 602 /* DCE 3.2 */ 603 case RADEON_HPD_5: 604 if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE) 605 connected = true; 606 break; 607 case RADEON_HPD_6: 608 if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE) 609 connected = true; 610 break; 611 default: 612 break; 613 } 614 } else { 615 switch (hpd) { 616 case RADEON_HPD_1: 617 if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE) 618 connected = true; 619 break; 620 case RADEON_HPD_2: 621 if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE) 622 connected = true; 623 break; 624 case RADEON_HPD_3: 625 if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE) 626 connected = true; 627 break; 628 default: 629 break; 630 } 631 } 632 return connected; 633 } 634 635 void r600_hpd_set_polarity(struct radeon_device *rdev, 636 enum radeon_hpd_id hpd) 637 { 638 u32 tmp; 639 bool connected = r600_hpd_sense(rdev, hpd); 640 641 if (ASIC_IS_DCE3(rdev)) { 642 switch (hpd) { 643 case RADEON_HPD_1: 644 tmp = RREG32(DC_HPD1_INT_CONTROL); 645 if (connected) 646 tmp &= ~DC_HPDx_INT_POLARITY; 647 else 648 tmp |= DC_HPDx_INT_POLARITY; 649 WREG32(DC_HPD1_INT_CONTROL, tmp); 650 break; 651 case RADEON_HPD_2: 652 tmp = RREG32(DC_HPD2_INT_CONTROL); 653 if (connected) 654 tmp &= ~DC_HPDx_INT_POLARITY; 655 else 656 tmp |= DC_HPDx_INT_POLARITY; 657 WREG32(DC_HPD2_INT_CONTROL, tmp); 658 break; 659 case RADEON_HPD_3: 660 tmp = RREG32(DC_HPD3_INT_CONTROL); 661 if (connected) 662 tmp &= ~DC_HPDx_INT_POLARITY; 663 else 664 tmp |= DC_HPDx_INT_POLARITY; 665 WREG32(DC_HPD3_INT_CONTROL, tmp); 666 break; 667 case RADEON_HPD_4: 668 tmp = RREG32(DC_HPD4_INT_CONTROL); 669 if (connected) 670 tmp &= ~DC_HPDx_INT_POLARITY; 671 else 672 tmp |= DC_HPDx_INT_POLARITY; 673 WREG32(DC_HPD4_INT_CONTROL, tmp); 674 break; 675 case RADEON_HPD_5: 676 tmp = RREG32(DC_HPD5_INT_CONTROL); 677 if (connected) 678 tmp &= ~DC_HPDx_INT_POLARITY; 679 else 680 tmp |= DC_HPDx_INT_POLARITY; 681 WREG32(DC_HPD5_INT_CONTROL, tmp); 682 break; 683 /* DCE 3.2 */ 684 case RADEON_HPD_6: 685 tmp = RREG32(DC_HPD6_INT_CONTROL); 686 if (connected) 687 tmp &= ~DC_HPDx_INT_POLARITY; 688 else 689 tmp |= DC_HPDx_INT_POLARITY; 690 WREG32(DC_HPD6_INT_CONTROL, tmp); 691 break; 692 default: 693 break; 694 } 695 } else { 696 switch (hpd) { 697 case RADEON_HPD_1: 698 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL); 699 if (connected) 700 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY; 701 else 702 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY; 703 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); 704 break; 705 case RADEON_HPD_2: 706 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL); 707 if (connected) 708 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY; 709 else 710 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY; 711 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); 712 break; 713 case RADEON_HPD_3: 714 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL); 715 if (connected) 716 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY; 717 else 718 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY; 719 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp); 720 break; 721 default: 722 break; 723 } 724 } 725 } 726 727 void r600_hpd_init(struct radeon_device *rdev) 728 { 729 struct drm_device *dev = rdev->ddev; 730 struct drm_connector *connector; 731 unsigned enable = 0; 732 733 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 734 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 735 736 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || 737 connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { 738 /* don't try to enable hpd on eDP or LVDS avoid breaking the 739 * aux dp channel on imac and help (but not completely fix) 740 * https://bugzilla.redhat.com/show_bug.cgi?id=726143 741 */ 742 continue; 743 } 744 if (ASIC_IS_DCE3(rdev)) { 745 u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa); 746 if (ASIC_IS_DCE32(rdev)) 747 tmp |= DC_HPDx_EN; 748 749 switch (radeon_connector->hpd.hpd) { 750 case RADEON_HPD_1: 751 WREG32(DC_HPD1_CONTROL, tmp); 752 break; 753 case RADEON_HPD_2: 754 WREG32(DC_HPD2_CONTROL, tmp); 755 break; 756 case RADEON_HPD_3: 757 WREG32(DC_HPD3_CONTROL, tmp); 758 break; 759 case RADEON_HPD_4: 760 WREG32(DC_HPD4_CONTROL, tmp); 761 break; 762 /* DCE 3.2 */ 763 case RADEON_HPD_5: 764 WREG32(DC_HPD5_CONTROL, tmp); 765 break; 766 case RADEON_HPD_6: 767 WREG32(DC_HPD6_CONTROL, tmp); 768 break; 769 default: 770 break; 771 } 772 } else { 773 switch (radeon_connector->hpd.hpd) { 774 case RADEON_HPD_1: 775 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN); 776 break; 777 case RADEON_HPD_2: 778 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN); 779 break; 780 case RADEON_HPD_3: 781 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN); 782 break; 783 default: 784 break; 785 } 786 } 787 enable |= 1 << radeon_connector->hpd.hpd; 788 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); 789 } 790 radeon_irq_kms_enable_hpd(rdev, enable); 791 } 792 793 void r600_hpd_fini(struct radeon_device *rdev) 794 { 795 struct drm_device *dev = rdev->ddev; 796 struct drm_connector *connector; 797 unsigned disable = 0; 798 799 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 800 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 801 if (ASIC_IS_DCE3(rdev)) { 802 switch (radeon_connector->hpd.hpd) { 803 case RADEON_HPD_1: 804 WREG32(DC_HPD1_CONTROL, 0); 805 break; 806 case RADEON_HPD_2: 807 WREG32(DC_HPD2_CONTROL, 0); 808 break; 809 case RADEON_HPD_3: 810 WREG32(DC_HPD3_CONTROL, 0); 811 break; 812 case RADEON_HPD_4: 813 WREG32(DC_HPD4_CONTROL, 0); 814 break; 815 /* DCE 3.2 */ 816 case RADEON_HPD_5: 817 WREG32(DC_HPD5_CONTROL, 0); 818 break; 819 case RADEON_HPD_6: 820 WREG32(DC_HPD6_CONTROL, 0); 821 break; 822 default: 823 break; 824 } 825 } else { 826 switch (radeon_connector->hpd.hpd) { 827 case RADEON_HPD_1: 828 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0); 829 break; 830 case RADEON_HPD_2: 831 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0); 832 break; 833 case RADEON_HPD_3: 834 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0); 835 break; 836 default: 837 break; 838 } 839 } 840 disable |= 1 << radeon_connector->hpd.hpd; 841 } 842 radeon_irq_kms_disable_hpd(rdev, disable); 843 } 844 845 /* 846 * R600 PCIE GART 847 */ 848 void r600_pcie_gart_tlb_flush(struct radeon_device *rdev) 849 { 850 unsigned i; 851 u32 tmp; 852 853 /* flush hdp cache so updates hit vram */ 854 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) && 855 !(rdev->flags & RADEON_IS_AGP)) { 856 void __iomem *ptr = (void *)rdev->gart.ptr; 857 u32 tmp; 858 859 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read 860 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL 861 * This seems to cause problems on some AGP cards. Just use the old 862 * method for them. 863 */ 864 WREG32(HDP_DEBUG1, 0); 865 tmp = readl((void __iomem *)ptr); 866 } else 867 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); 868 869 WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12); 870 WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12); 871 WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1)); 872 for (i = 0; i < rdev->usec_timeout; i++) { 873 /* read MC_STATUS */ 874 tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE); 875 tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT; 876 if (tmp == 2) { 877 printk(KERN_WARNING "[drm] r600 flush TLB failed\n"); 878 return; 879 } 880 if (tmp) { 881 return; 882 } 883 udelay(1); 884 } 885 } 886 887 int r600_pcie_gart_init(struct radeon_device *rdev) 888 { 889 int r; 890 891 if (rdev->gart.robj) { 892 WARN(1, "R600 PCIE GART already initialized\n"); 893 return 0; 894 } 895 /* Initialize common gart structure */ 896 r = radeon_gart_init(rdev); 897 if (r) 898 return r; 899 rdev->gart.table_size = rdev->gart.num_gpu_pages * 8; 900 return radeon_gart_table_vram_alloc(rdev); 901 } 902 903 static int r600_pcie_gart_enable(struct radeon_device *rdev) 904 { 905 u32 tmp; 906 int r, i; 907 908 if (rdev->gart.robj == NULL) { 909 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); 910 return -EINVAL; 911 } 912 r = radeon_gart_table_vram_pin(rdev); 913 if (r) 914 return r; 915 radeon_gart_restore(rdev); 916 917 /* Setup L2 cache */ 918 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | 919 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | 920 EFFECTIVE_L2_QUEUE_SIZE(7)); 921 WREG32(VM_L2_CNTL2, 0); 922 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1)); 923 /* Setup TLB control */ 924 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING | 925 SYSTEM_ACCESS_MODE_NOT_IN_SYS | 926 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) | 927 ENABLE_WAIT_L2_QUERY; 928 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp); 929 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp); 930 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING); 931 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp); 932 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp); 933 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp); 934 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp); 935 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp); 936 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp); 937 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp); 938 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp); 939 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp); 940 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); 941 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); 942 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); 943 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); 944 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); 945 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | 946 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); 947 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, 948 (u32)(rdev->dummy_page.addr >> 12)); 949 for (i = 1; i < 7; i++) 950 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); 951 952 r600_pcie_gart_tlb_flush(rdev); 953 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", 954 (unsigned)(rdev->mc.gtt_size >> 20), 955 (unsigned long long)rdev->gart.table_addr); 956 rdev->gart.ready = true; 957 return 0; 958 } 959 960 static void r600_pcie_gart_disable(struct radeon_device *rdev) 961 { 962 u32 tmp; 963 int i; 964 965 /* Disable all tables */ 966 for (i = 0; i < 7; i++) 967 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); 968 969 /* Disable L2 cache */ 970 WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING | 971 EFFECTIVE_L2_QUEUE_SIZE(7)); 972 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1)); 973 /* Setup L1 TLB control */ 974 tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) | 975 ENABLE_WAIT_L2_QUERY; 976 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp); 977 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp); 978 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp); 979 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp); 980 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp); 981 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp); 982 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp); 983 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp); 984 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp); 985 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp); 986 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp); 987 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp); 988 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp); 989 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp); 990 radeon_gart_table_vram_unpin(rdev); 991 } 992 993 static void r600_pcie_gart_fini(struct radeon_device *rdev) 994 { 995 radeon_gart_fini(rdev); 996 r600_pcie_gart_disable(rdev); 997 radeon_gart_table_vram_free(rdev); 998 } 999 1000 static void r600_agp_enable(struct radeon_device *rdev) 1001 { 1002 u32 tmp; 1003 int i; 1004 1005 /* Setup L2 cache */ 1006 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | 1007 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | 1008 EFFECTIVE_L2_QUEUE_SIZE(7)); 1009 WREG32(VM_L2_CNTL2, 0); 1010 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1)); 1011 /* Setup TLB control */ 1012 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING | 1013 SYSTEM_ACCESS_MODE_NOT_IN_SYS | 1014 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) | 1015 ENABLE_WAIT_L2_QUERY; 1016 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp); 1017 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp); 1018 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING); 1019 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp); 1020 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp); 1021 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp); 1022 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp); 1023 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp); 1024 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp); 1025 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp); 1026 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp); 1027 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp); 1028 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); 1029 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); 1030 for (i = 0; i < 7; i++) 1031 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); 1032 } 1033 1034 int r600_mc_wait_for_idle(struct radeon_device *rdev) 1035 { 1036 unsigned i; 1037 u32 tmp; 1038 1039 for (i = 0; i < rdev->usec_timeout; i++) { 1040 /* read MC_STATUS */ 1041 tmp = RREG32(R_000E50_SRBM_STATUS) & 0x3F00; 1042 if (!tmp) 1043 return 0; 1044 udelay(1); 1045 } 1046 return -1; 1047 } 1048 1049 uint32_t rs780_mc_rreg(struct radeon_device *rdev, uint32_t reg) 1050 { 1051 uint32_t r; 1052 1053 WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg)); 1054 r = RREG32(R_0028FC_MC_DATA); 1055 WREG32(R_0028F8_MC_INDEX, ~C_0028F8_MC_IND_ADDR); 1056 return r; 1057 } 1058 1059 void rs780_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 1060 { 1061 WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg) | 1062 S_0028F8_MC_IND_WR_EN(1)); 1063 WREG32(R_0028FC_MC_DATA, v); 1064 WREG32(R_0028F8_MC_INDEX, 0x7F); 1065 } 1066 1067 static void r600_mc_program(struct radeon_device *rdev) 1068 { 1069 struct rv515_mc_save save; 1070 u32 tmp; 1071 int i, j; 1072 1073 /* Initialize HDP */ 1074 for (i = 0, j = 0; i < 32; i++, j += 0x18) { 1075 WREG32((0x2c14 + j), 0x00000000); 1076 WREG32((0x2c18 + j), 0x00000000); 1077 WREG32((0x2c1c + j), 0x00000000); 1078 WREG32((0x2c20 + j), 0x00000000); 1079 WREG32((0x2c24 + j), 0x00000000); 1080 } 1081 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0); 1082 1083 rv515_mc_stop(rdev, &save); 1084 if (r600_mc_wait_for_idle(rdev)) { 1085 dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); 1086 } 1087 /* Lockout access through VGA aperture (doesn't exist before R600) */ 1088 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE); 1089 /* Update configuration */ 1090 if (rdev->flags & RADEON_IS_AGP) { 1091 if (rdev->mc.vram_start < rdev->mc.gtt_start) { 1092 /* VRAM before AGP */ 1093 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, 1094 rdev->mc.vram_start >> 12); 1095 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, 1096 rdev->mc.gtt_end >> 12); 1097 } else { 1098 /* VRAM after AGP */ 1099 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, 1100 rdev->mc.gtt_start >> 12); 1101 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, 1102 rdev->mc.vram_end >> 12); 1103 } 1104 } else { 1105 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12); 1106 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12); 1107 } 1108 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12); 1109 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16; 1110 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF); 1111 WREG32(MC_VM_FB_LOCATION, tmp); 1112 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); 1113 WREG32(HDP_NONSURFACE_INFO, (2 << 7)); 1114 WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF); 1115 if (rdev->flags & RADEON_IS_AGP) { 1116 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22); 1117 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22); 1118 WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22); 1119 } else { 1120 WREG32(MC_VM_AGP_BASE, 0); 1121 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF); 1122 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF); 1123 } 1124 if (r600_mc_wait_for_idle(rdev)) { 1125 dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); 1126 } 1127 rv515_mc_resume(rdev, &save); 1128 /* we need to own VRAM, so turn off the VGA renderer here 1129 * to stop it overwriting our objects */ 1130 rv515_vga_render_disable(rdev); 1131 } 1132 1133 /** 1134 * r600_vram_gtt_location - try to find VRAM & GTT location 1135 * @rdev: radeon device structure holding all necessary informations 1136 * @mc: memory controller structure holding memory informations 1137 * 1138 * Function will place try to place VRAM at same place as in CPU (PCI) 1139 * address space as some GPU seems to have issue when we reprogram at 1140 * different address space. 1141 * 1142 * If there is not enough space to fit the unvisible VRAM after the 1143 * aperture then we limit the VRAM size to the aperture. 1144 * 1145 * If we are using AGP then place VRAM adjacent to AGP aperture are we need 1146 * them to be in one from GPU point of view so that we can program GPU to 1147 * catch access outside them (weird GPU policy see ??). 1148 * 1149 * This function will never fails, worst case are limiting VRAM or GTT. 1150 * 1151 * Note: GTT start, end, size should be initialized before calling this 1152 * function on AGP platform. 1153 */ 1154 static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) 1155 { 1156 u64 size_bf, size_af; 1157 1158 if (mc->mc_vram_size > 0xE0000000) { 1159 /* leave room for at least 512M GTT */ 1160 dev_warn(rdev->dev, "limiting VRAM\n"); 1161 mc->real_vram_size = 0xE0000000; 1162 mc->mc_vram_size = 0xE0000000; 1163 } 1164 if (rdev->flags & RADEON_IS_AGP) { 1165 size_bf = mc->gtt_start; 1166 size_af = mc->mc_mask - mc->gtt_end; 1167 if (size_bf > size_af) { 1168 if (mc->mc_vram_size > size_bf) { 1169 dev_warn(rdev->dev, "limiting VRAM\n"); 1170 mc->real_vram_size = size_bf; 1171 mc->mc_vram_size = size_bf; 1172 } 1173 mc->vram_start = mc->gtt_start - mc->mc_vram_size; 1174 } else { 1175 if (mc->mc_vram_size > size_af) { 1176 dev_warn(rdev->dev, "limiting VRAM\n"); 1177 mc->real_vram_size = size_af; 1178 mc->mc_vram_size = size_af; 1179 } 1180 mc->vram_start = mc->gtt_end + 1; 1181 } 1182 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; 1183 dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n", 1184 mc->mc_vram_size >> 20, mc->vram_start, 1185 mc->vram_end, mc->real_vram_size >> 20); 1186 } else { 1187 u64 base = 0; 1188 if (rdev->flags & RADEON_IS_IGP) { 1189 base = RREG32(MC_VM_FB_LOCATION) & 0xFFFF; 1190 base <<= 24; 1191 } 1192 radeon_vram_location(rdev, &rdev->mc, base); 1193 rdev->mc.gtt_base_align = 0; 1194 radeon_gtt_location(rdev, mc); 1195 } 1196 } 1197 1198 static int r600_mc_init(struct radeon_device *rdev) 1199 { 1200 u32 tmp; 1201 int chansize, numchan; 1202 uint32_t h_addr, l_addr; 1203 unsigned long long k8_addr; 1204 1205 /* Get VRAM informations */ 1206 rdev->mc.vram_is_ddr = true; 1207 tmp = RREG32(RAMCFG); 1208 if (tmp & CHANSIZE_OVERRIDE) { 1209 chansize = 16; 1210 } else if (tmp & CHANSIZE_MASK) { 1211 chansize = 64; 1212 } else { 1213 chansize = 32; 1214 } 1215 tmp = RREG32(CHMAP); 1216 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { 1217 case 0: 1218 default: 1219 numchan = 1; 1220 break; 1221 case 1: 1222 numchan = 2; 1223 break; 1224 case 2: 1225 numchan = 4; 1226 break; 1227 case 3: 1228 numchan = 8; 1229 break; 1230 } 1231 rdev->mc.vram_width = numchan * chansize; 1232 /* Could aper size report 0 ? */ 1233 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); 1234 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); 1235 /* Setup GPU memory space */ 1236 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); 1237 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); 1238 rdev->mc.visible_vram_size = rdev->mc.aper_size; 1239 r600_vram_gtt_location(rdev, &rdev->mc); 1240 1241 if (rdev->flags & RADEON_IS_IGP) { 1242 rs690_pm_info(rdev); 1243 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); 1244 1245 if (rdev->family == CHIP_RS780 || rdev->family == CHIP_RS880) { 1246 /* Use K8 direct mapping for fast fb access. */ 1247 rdev->fastfb_working = false; 1248 h_addr = G_000012_K8_ADDR_EXT(RREG32_MC(R_000012_MC_MISC_UMA_CNTL)); 1249 l_addr = RREG32_MC(R_000011_K8_FB_LOCATION); 1250 k8_addr = ((unsigned long long)h_addr) << 32 | l_addr; 1251 #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE) 1252 if (k8_addr + rdev->mc.visible_vram_size < 0x100000000ULL) 1253 #endif 1254 { 1255 /* FastFB shall be used with UMA memory. Here it is simply disabled when sideport 1256 * memory is present. 1257 */ 1258 if (rdev->mc.igp_sideport_enabled == false && radeon_fastfb == 1) { 1259 DRM_INFO("Direct mapping: aper base at 0x%llx, replaced by direct mapping base 0x%llx.\n", 1260 (unsigned long long)rdev->mc.aper_base, k8_addr); 1261 rdev->mc.aper_base = (resource_size_t)k8_addr; 1262 rdev->fastfb_working = true; 1263 } 1264 } 1265 } 1266 } 1267 1268 radeon_update_bandwidth_info(rdev); 1269 return 0; 1270 } 1271 1272 int r600_vram_scratch_init(struct radeon_device *rdev) 1273 { 1274 int r; 1275 1276 if (rdev->vram_scratch.robj == NULL) { 1277 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, 1278 PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, 1279 NULL, &rdev->vram_scratch.robj); 1280 if (r) { 1281 return r; 1282 } 1283 } 1284 1285 r = radeon_bo_reserve(rdev->vram_scratch.robj, false); 1286 if (unlikely(r != 0)) 1287 return r; 1288 r = radeon_bo_pin(rdev->vram_scratch.robj, 1289 RADEON_GEM_DOMAIN_VRAM, &rdev->vram_scratch.gpu_addr); 1290 if (r) { 1291 radeon_bo_unreserve(rdev->vram_scratch.robj); 1292 return r; 1293 } 1294 r = radeon_bo_kmap(rdev->vram_scratch.robj, 1295 (void **)&rdev->vram_scratch.ptr); 1296 if (r) 1297 radeon_bo_unpin(rdev->vram_scratch.robj); 1298 radeon_bo_unreserve(rdev->vram_scratch.robj); 1299 1300 return r; 1301 } 1302 1303 void r600_vram_scratch_fini(struct radeon_device *rdev) 1304 { 1305 int r; 1306 1307 if (rdev->vram_scratch.robj == NULL) { 1308 return; 1309 } 1310 r = radeon_bo_reserve(rdev->vram_scratch.robj, false); 1311 if (likely(r == 0)) { 1312 radeon_bo_kunmap(rdev->vram_scratch.robj); 1313 radeon_bo_unpin(rdev->vram_scratch.robj); 1314 radeon_bo_unreserve(rdev->vram_scratch.robj); 1315 } 1316 radeon_bo_unref(&rdev->vram_scratch.robj); 1317 } 1318 1319 void r600_set_bios_scratch_engine_hung(struct radeon_device *rdev, bool hung) 1320 { 1321 u32 tmp = RREG32(R600_BIOS_3_SCRATCH); 1322 1323 if (hung) 1324 tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG; 1325 else 1326 tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG; 1327 1328 WREG32(R600_BIOS_3_SCRATCH, tmp); 1329 } 1330 1331 static void r600_print_gpu_status_regs(struct radeon_device *rdev) 1332 { 1333 dev_info(rdev->dev, " R_008010_GRBM_STATUS = 0x%08X\n", 1334 RREG32(R_008010_GRBM_STATUS)); 1335 dev_info(rdev->dev, " R_008014_GRBM_STATUS2 = 0x%08X\n", 1336 RREG32(R_008014_GRBM_STATUS2)); 1337 dev_info(rdev->dev, " R_000E50_SRBM_STATUS = 0x%08X\n", 1338 RREG32(R_000E50_SRBM_STATUS)); 1339 dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n", 1340 RREG32(CP_STALLED_STAT1)); 1341 dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n", 1342 RREG32(CP_STALLED_STAT2)); 1343 dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n", 1344 RREG32(CP_BUSY_STAT)); 1345 dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n", 1346 RREG32(CP_STAT)); 1347 dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n", 1348 RREG32(DMA_STATUS_REG)); 1349 } 1350 1351 static bool r600_is_display_hung(struct radeon_device *rdev) 1352 { 1353 u32 crtc_hung = 0; 1354 u32 crtc_status[2]; 1355 u32 i, j, tmp; 1356 1357 for (i = 0; i < rdev->num_crtc; i++) { 1358 if (RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]) & AVIVO_CRTC_EN) { 1359 crtc_status[i] = RREG32(AVIVO_D1CRTC_STATUS_HV_COUNT + crtc_offsets[i]); 1360 crtc_hung |= (1 << i); 1361 } 1362 } 1363 1364 for (j = 0; j < 10; j++) { 1365 for (i = 0; i < rdev->num_crtc; i++) { 1366 if (crtc_hung & (1 << i)) { 1367 tmp = RREG32(AVIVO_D1CRTC_STATUS_HV_COUNT + crtc_offsets[i]); 1368 if (tmp != crtc_status[i]) 1369 crtc_hung &= ~(1 << i); 1370 } 1371 } 1372 if (crtc_hung == 0) 1373 return false; 1374 udelay(100); 1375 } 1376 1377 return true; 1378 } 1379 1380 static u32 r600_gpu_check_soft_reset(struct radeon_device *rdev) 1381 { 1382 u32 reset_mask = 0; 1383 u32 tmp; 1384 1385 /* GRBM_STATUS */ 1386 tmp = RREG32(R_008010_GRBM_STATUS); 1387 if (rdev->family >= CHIP_RV770) { 1388 if (G_008010_PA_BUSY(tmp) | G_008010_SC_BUSY(tmp) | 1389 G_008010_SH_BUSY(tmp) | G_008010_SX_BUSY(tmp) | 1390 G_008010_TA_BUSY(tmp) | G_008010_VGT_BUSY(tmp) | 1391 G_008010_DB03_BUSY(tmp) | G_008010_CB03_BUSY(tmp) | 1392 G_008010_SPI03_BUSY(tmp) | G_008010_VGT_BUSY_NO_DMA(tmp)) 1393 reset_mask |= RADEON_RESET_GFX; 1394 } else { 1395 if (G_008010_PA_BUSY(tmp) | G_008010_SC_BUSY(tmp) | 1396 G_008010_SH_BUSY(tmp) | G_008010_SX_BUSY(tmp) | 1397 G_008010_TA03_BUSY(tmp) | G_008010_VGT_BUSY(tmp) | 1398 G_008010_DB03_BUSY(tmp) | G_008010_CB03_BUSY(tmp) | 1399 G_008010_SPI03_BUSY(tmp) | G_008010_VGT_BUSY_NO_DMA(tmp)) 1400 reset_mask |= RADEON_RESET_GFX; 1401 } 1402 1403 if (G_008010_CF_RQ_PENDING(tmp) | G_008010_PF_RQ_PENDING(tmp) | 1404 G_008010_CP_BUSY(tmp) | G_008010_CP_COHERENCY_BUSY(tmp)) 1405 reset_mask |= RADEON_RESET_CP; 1406 1407 if (G_008010_GRBM_EE_BUSY(tmp)) 1408 reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP; 1409 1410 /* DMA_STATUS_REG */ 1411 tmp = RREG32(DMA_STATUS_REG); 1412 if (!(tmp & DMA_IDLE)) 1413 reset_mask |= RADEON_RESET_DMA; 1414 1415 /* SRBM_STATUS */ 1416 tmp = RREG32(R_000E50_SRBM_STATUS); 1417 if (G_000E50_RLC_RQ_PENDING(tmp) | G_000E50_RLC_BUSY(tmp)) 1418 reset_mask |= RADEON_RESET_RLC; 1419 1420 if (G_000E50_IH_BUSY(tmp)) 1421 reset_mask |= RADEON_RESET_IH; 1422 1423 if (G_000E50_SEM_BUSY(tmp)) 1424 reset_mask |= RADEON_RESET_SEM; 1425 1426 if (G_000E50_GRBM_RQ_PENDING(tmp)) 1427 reset_mask |= RADEON_RESET_GRBM; 1428 1429 if (G_000E50_VMC_BUSY(tmp)) 1430 reset_mask |= RADEON_RESET_VMC; 1431 1432 if (G_000E50_MCB_BUSY(tmp) | G_000E50_MCDZ_BUSY(tmp) | 1433 G_000E50_MCDY_BUSY(tmp) | G_000E50_MCDX_BUSY(tmp) | 1434 G_000E50_MCDW_BUSY(tmp)) 1435 reset_mask |= RADEON_RESET_MC; 1436 1437 if (r600_is_display_hung(rdev)) 1438 reset_mask |= RADEON_RESET_DISPLAY; 1439 1440 /* Skip MC reset as it's mostly likely not hung, just busy */ 1441 if (reset_mask & RADEON_RESET_MC) { 1442 DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask); 1443 reset_mask &= ~RADEON_RESET_MC; 1444 } 1445 1446 return reset_mask; 1447 } 1448 1449 static void r600_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask) 1450 { 1451 struct rv515_mc_save save; 1452 u32 grbm_soft_reset = 0, srbm_soft_reset = 0; 1453 u32 tmp; 1454 1455 if (reset_mask == 0) 1456 return; 1457 1458 dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask); 1459 1460 r600_print_gpu_status_regs(rdev); 1461 1462 /* Disable CP parsing/prefetching */ 1463 if (rdev->family >= CHIP_RV770) 1464 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1) | S_0086D8_CP_PFP_HALT(1)); 1465 else 1466 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); 1467 1468 /* disable the RLC */ 1469 WREG32(RLC_CNTL, 0); 1470 1471 if (reset_mask & RADEON_RESET_DMA) { 1472 /* Disable DMA */ 1473 tmp = RREG32(DMA_RB_CNTL); 1474 tmp &= ~DMA_RB_ENABLE; 1475 WREG32(DMA_RB_CNTL, tmp); 1476 } 1477 1478 mdelay(50); 1479 1480 rv515_mc_stop(rdev, &save); 1481 if (r600_mc_wait_for_idle(rdev)) { 1482 dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); 1483 } 1484 1485 if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) { 1486 if (rdev->family >= CHIP_RV770) 1487 grbm_soft_reset |= S_008020_SOFT_RESET_DB(1) | 1488 S_008020_SOFT_RESET_CB(1) | 1489 S_008020_SOFT_RESET_PA(1) | 1490 S_008020_SOFT_RESET_SC(1) | 1491 S_008020_SOFT_RESET_SPI(1) | 1492 S_008020_SOFT_RESET_SX(1) | 1493 S_008020_SOFT_RESET_SH(1) | 1494 S_008020_SOFT_RESET_TC(1) | 1495 S_008020_SOFT_RESET_TA(1) | 1496 S_008020_SOFT_RESET_VC(1) | 1497 S_008020_SOFT_RESET_VGT(1); 1498 else 1499 grbm_soft_reset |= S_008020_SOFT_RESET_CR(1) | 1500 S_008020_SOFT_RESET_DB(1) | 1501 S_008020_SOFT_RESET_CB(1) | 1502 S_008020_SOFT_RESET_PA(1) | 1503 S_008020_SOFT_RESET_SC(1) | 1504 S_008020_SOFT_RESET_SMX(1) | 1505 S_008020_SOFT_RESET_SPI(1) | 1506 S_008020_SOFT_RESET_SX(1) | 1507 S_008020_SOFT_RESET_SH(1) | 1508 S_008020_SOFT_RESET_TC(1) | 1509 S_008020_SOFT_RESET_TA(1) | 1510 S_008020_SOFT_RESET_VC(1) | 1511 S_008020_SOFT_RESET_VGT(1); 1512 } 1513 1514 if (reset_mask & RADEON_RESET_CP) { 1515 grbm_soft_reset |= S_008020_SOFT_RESET_CP(1) | 1516 S_008020_SOFT_RESET_VGT(1); 1517 1518 srbm_soft_reset |= S_000E60_SOFT_RESET_GRBM(1); 1519 } 1520 1521 if (reset_mask & RADEON_RESET_DMA) { 1522 if (rdev->family >= CHIP_RV770) 1523 srbm_soft_reset |= RV770_SOFT_RESET_DMA; 1524 else 1525 srbm_soft_reset |= SOFT_RESET_DMA; 1526 } 1527 1528 if (reset_mask & RADEON_RESET_RLC) 1529 srbm_soft_reset |= S_000E60_SOFT_RESET_RLC(1); 1530 1531 if (reset_mask & RADEON_RESET_SEM) 1532 srbm_soft_reset |= S_000E60_SOFT_RESET_SEM(1); 1533 1534 if (reset_mask & RADEON_RESET_IH) 1535 srbm_soft_reset |= S_000E60_SOFT_RESET_IH(1); 1536 1537 if (reset_mask & RADEON_RESET_GRBM) 1538 srbm_soft_reset |= S_000E60_SOFT_RESET_GRBM(1); 1539 1540 if (!(rdev->flags & RADEON_IS_IGP)) { 1541 if (reset_mask & RADEON_RESET_MC) 1542 srbm_soft_reset |= S_000E60_SOFT_RESET_MC(1); 1543 } 1544 1545 if (reset_mask & RADEON_RESET_VMC) 1546 srbm_soft_reset |= S_000E60_SOFT_RESET_VMC(1); 1547 1548 if (grbm_soft_reset) { 1549 tmp = RREG32(R_008020_GRBM_SOFT_RESET); 1550 tmp |= grbm_soft_reset; 1551 dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp); 1552 WREG32(R_008020_GRBM_SOFT_RESET, tmp); 1553 tmp = RREG32(R_008020_GRBM_SOFT_RESET); 1554 1555 udelay(50); 1556 1557 tmp &= ~grbm_soft_reset; 1558 WREG32(R_008020_GRBM_SOFT_RESET, tmp); 1559 tmp = RREG32(R_008020_GRBM_SOFT_RESET); 1560 } 1561 1562 if (srbm_soft_reset) { 1563 tmp = RREG32(SRBM_SOFT_RESET); 1564 tmp |= srbm_soft_reset; 1565 dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); 1566 WREG32(SRBM_SOFT_RESET, tmp); 1567 tmp = RREG32(SRBM_SOFT_RESET); 1568 1569 udelay(50); 1570 1571 tmp &= ~srbm_soft_reset; 1572 WREG32(SRBM_SOFT_RESET, tmp); 1573 tmp = RREG32(SRBM_SOFT_RESET); 1574 } 1575 1576 /* Wait a little for things to settle down */ 1577 mdelay(1); 1578 1579 rv515_mc_resume(rdev, &save); 1580 udelay(50); 1581 1582 r600_print_gpu_status_regs(rdev); 1583 } 1584 1585 int r600_asic_reset(struct radeon_device *rdev) 1586 { 1587 u32 reset_mask; 1588 1589 reset_mask = r600_gpu_check_soft_reset(rdev); 1590 1591 if (reset_mask) 1592 r600_set_bios_scratch_engine_hung(rdev, true); 1593 1594 r600_gpu_soft_reset(rdev, reset_mask); 1595 1596 reset_mask = r600_gpu_check_soft_reset(rdev); 1597 1598 if (!reset_mask) 1599 r600_set_bios_scratch_engine_hung(rdev, false); 1600 1601 return 0; 1602 } 1603 1604 /** 1605 * r600_gfx_is_lockup - Check if the GFX engine is locked up 1606 * 1607 * @rdev: radeon_device pointer 1608 * @ring: radeon_ring structure holding ring information 1609 * 1610 * Check if the GFX engine is locked up. 1611 * Returns true if the engine appears to be locked up, false if not. 1612 */ 1613 bool r600_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) 1614 { 1615 u32 reset_mask = r600_gpu_check_soft_reset(rdev); 1616 1617 if (!(reset_mask & (RADEON_RESET_GFX | 1618 RADEON_RESET_COMPUTE | 1619 RADEON_RESET_CP))) { 1620 radeon_ring_lockup_update(ring); 1621 return false; 1622 } 1623 /* force CP activities */ 1624 radeon_ring_force_activity(rdev, ring); 1625 return radeon_ring_test_lockup(rdev, ring); 1626 } 1627 1628 /** 1629 * r600_dma_is_lockup - Check if the DMA engine is locked up 1630 * 1631 * @rdev: radeon_device pointer 1632 * @ring: radeon_ring structure holding ring information 1633 * 1634 * Check if the async DMA engine is locked up. 1635 * Returns true if the engine appears to be locked up, false if not. 1636 */ 1637 bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) 1638 { 1639 u32 reset_mask = r600_gpu_check_soft_reset(rdev); 1640 1641 if (!(reset_mask & RADEON_RESET_DMA)) { 1642 radeon_ring_lockup_update(ring); 1643 return false; 1644 } 1645 /* force ring activities */ 1646 radeon_ring_force_activity(rdev, ring); 1647 return radeon_ring_test_lockup(rdev, ring); 1648 } 1649 1650 u32 r6xx_remap_render_backend(struct radeon_device *rdev, 1651 u32 tiling_pipe_num, 1652 u32 max_rb_num, 1653 u32 total_max_rb_num, 1654 u32 disabled_rb_mask) 1655 { 1656 u32 rendering_pipe_num, rb_num_width, req_rb_num; 1657 u32 pipe_rb_ratio, pipe_rb_remain, tmp; 1658 u32 data = 0, mask = 1 << (max_rb_num - 1); 1659 unsigned i, j; 1660 1661 /* mask out the RBs that don't exist on that asic */ 1662 tmp = disabled_rb_mask | ((0xff << max_rb_num) & 0xff); 1663 /* make sure at least one RB is available */ 1664 if ((tmp & 0xff) != 0xff) 1665 disabled_rb_mask = tmp; 1666 1667 rendering_pipe_num = 1 << tiling_pipe_num; 1668 req_rb_num = total_max_rb_num - r600_count_pipe_bits(disabled_rb_mask); 1669 BUG_ON(rendering_pipe_num < req_rb_num); 1670 1671 pipe_rb_ratio = rendering_pipe_num / req_rb_num; 1672 pipe_rb_remain = rendering_pipe_num - pipe_rb_ratio * req_rb_num; 1673 1674 if (rdev->family <= CHIP_RV740) { 1675 /* r6xx/r7xx */ 1676 rb_num_width = 2; 1677 } else { 1678 /* eg+ */ 1679 rb_num_width = 4; 1680 } 1681 1682 for (i = 0; i < max_rb_num; i++) { 1683 if (!(mask & disabled_rb_mask)) { 1684 for (j = 0; j < pipe_rb_ratio; j++) { 1685 data <<= rb_num_width; 1686 data |= max_rb_num - i - 1; 1687 } 1688 if (pipe_rb_remain) { 1689 data <<= rb_num_width; 1690 data |= max_rb_num - i - 1; 1691 pipe_rb_remain--; 1692 } 1693 } 1694 mask >>= 1; 1695 } 1696 1697 return data; 1698 } 1699 1700 int r600_count_pipe_bits(uint32_t val) 1701 { 1702 return hweight32(val); 1703 } 1704 1705 static void r600_gpu_init(struct radeon_device *rdev) 1706 { 1707 u32 tiling_config; 1708 u32 ramcfg; 1709 u32 cc_rb_backend_disable; 1710 u32 cc_gc_shader_pipe_config; 1711 u32 tmp; 1712 int i, j; 1713 u32 sq_config; 1714 u32 sq_gpr_resource_mgmt_1 = 0; 1715 u32 sq_gpr_resource_mgmt_2 = 0; 1716 u32 sq_thread_resource_mgmt = 0; 1717 u32 sq_stack_resource_mgmt_1 = 0; 1718 u32 sq_stack_resource_mgmt_2 = 0; 1719 u32 disabled_rb_mask; 1720 1721 rdev->config.r600.tiling_group_size = 256; 1722 switch (rdev->family) { 1723 case CHIP_R600: 1724 rdev->config.r600.max_pipes = 4; 1725 rdev->config.r600.max_tile_pipes = 8; 1726 rdev->config.r600.max_simds = 4; 1727 rdev->config.r600.max_backends = 4; 1728 rdev->config.r600.max_gprs = 256; 1729 rdev->config.r600.max_threads = 192; 1730 rdev->config.r600.max_stack_entries = 256; 1731 rdev->config.r600.max_hw_contexts = 8; 1732 rdev->config.r600.max_gs_threads = 16; 1733 rdev->config.r600.sx_max_export_size = 128; 1734 rdev->config.r600.sx_max_export_pos_size = 16; 1735 rdev->config.r600.sx_max_export_smx_size = 128; 1736 rdev->config.r600.sq_num_cf_insts = 2; 1737 break; 1738 case CHIP_RV630: 1739 case CHIP_RV635: 1740 rdev->config.r600.max_pipes = 2; 1741 rdev->config.r600.max_tile_pipes = 2; 1742 rdev->config.r600.max_simds = 3; 1743 rdev->config.r600.max_backends = 1; 1744 rdev->config.r600.max_gprs = 128; 1745 rdev->config.r600.max_threads = 192; 1746 rdev->config.r600.max_stack_entries = 128; 1747 rdev->config.r600.max_hw_contexts = 8; 1748 rdev->config.r600.max_gs_threads = 4; 1749 rdev->config.r600.sx_max_export_size = 128; 1750 rdev->config.r600.sx_max_export_pos_size = 16; 1751 rdev->config.r600.sx_max_export_smx_size = 128; 1752 rdev->config.r600.sq_num_cf_insts = 2; 1753 break; 1754 case CHIP_RV610: 1755 case CHIP_RV620: 1756 case CHIP_RS780: 1757 case CHIP_RS880: 1758 rdev->config.r600.max_pipes = 1; 1759 rdev->config.r600.max_tile_pipes = 1; 1760 rdev->config.r600.max_simds = 2; 1761 rdev->config.r600.max_backends = 1; 1762 rdev->config.r600.max_gprs = 128; 1763 rdev->config.r600.max_threads = 192; 1764 rdev->config.r600.max_stack_entries = 128; 1765 rdev->config.r600.max_hw_contexts = 4; 1766 rdev->config.r600.max_gs_threads = 4; 1767 rdev->config.r600.sx_max_export_size = 128; 1768 rdev->config.r600.sx_max_export_pos_size = 16; 1769 rdev->config.r600.sx_max_export_smx_size = 128; 1770 rdev->config.r600.sq_num_cf_insts = 1; 1771 break; 1772 case CHIP_RV670: 1773 rdev->config.r600.max_pipes = 4; 1774 rdev->config.r600.max_tile_pipes = 4; 1775 rdev->config.r600.max_simds = 4; 1776 rdev->config.r600.max_backends = 4; 1777 rdev->config.r600.max_gprs = 192; 1778 rdev->config.r600.max_threads = 192; 1779 rdev->config.r600.max_stack_entries = 256; 1780 rdev->config.r600.max_hw_contexts = 8; 1781 rdev->config.r600.max_gs_threads = 16; 1782 rdev->config.r600.sx_max_export_size = 128; 1783 rdev->config.r600.sx_max_export_pos_size = 16; 1784 rdev->config.r600.sx_max_export_smx_size = 128; 1785 rdev->config.r600.sq_num_cf_insts = 2; 1786 break; 1787 default: 1788 break; 1789 } 1790 1791 /* Initialize HDP */ 1792 for (i = 0, j = 0; i < 32; i++, j += 0x18) { 1793 WREG32((0x2c14 + j), 0x00000000); 1794 WREG32((0x2c18 + j), 0x00000000); 1795 WREG32((0x2c1c + j), 0x00000000); 1796 WREG32((0x2c20 + j), 0x00000000); 1797 WREG32((0x2c24 + j), 0x00000000); 1798 } 1799 1800 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); 1801 1802 /* Setup tiling */ 1803 tiling_config = 0; 1804 ramcfg = RREG32(RAMCFG); 1805 switch (rdev->config.r600.max_tile_pipes) { 1806 case 1: 1807 tiling_config |= PIPE_TILING(0); 1808 break; 1809 case 2: 1810 tiling_config |= PIPE_TILING(1); 1811 break; 1812 case 4: 1813 tiling_config |= PIPE_TILING(2); 1814 break; 1815 case 8: 1816 tiling_config |= PIPE_TILING(3); 1817 break; 1818 default: 1819 break; 1820 } 1821 rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes; 1822 rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT); 1823 tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT); 1824 tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT); 1825 1826 tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT; 1827 if (tmp > 3) { 1828 tiling_config |= ROW_TILING(3); 1829 tiling_config |= SAMPLE_SPLIT(3); 1830 } else { 1831 tiling_config |= ROW_TILING(tmp); 1832 tiling_config |= SAMPLE_SPLIT(tmp); 1833 } 1834 tiling_config |= BANK_SWAPS(1); 1835 1836 cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000; 1837 tmp = R6XX_MAX_BACKENDS - 1838 r600_count_pipe_bits((cc_rb_backend_disable >> 16) & R6XX_MAX_BACKENDS_MASK); 1839 if (tmp < rdev->config.r600.max_backends) { 1840 rdev->config.r600.max_backends = tmp; 1841 } 1842 1843 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0x00ffff00; 1844 tmp = R6XX_MAX_PIPES - 1845 r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R6XX_MAX_PIPES_MASK); 1846 if (tmp < rdev->config.r600.max_pipes) { 1847 rdev->config.r600.max_pipes = tmp; 1848 } 1849 tmp = R6XX_MAX_SIMDS - 1850 r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK); 1851 if (tmp < rdev->config.r600.max_simds) { 1852 rdev->config.r600.max_simds = tmp; 1853 } 1854 1855 disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R6XX_MAX_BACKENDS_MASK; 1856 tmp = (tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT; 1857 tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.r600.max_backends, 1858 R6XX_MAX_BACKENDS, disabled_rb_mask); 1859 tiling_config |= tmp << 16; 1860 rdev->config.r600.backend_map = tmp; 1861 1862 rdev->config.r600.tile_config = tiling_config; 1863 WREG32(GB_TILING_CONFIG, tiling_config); 1864 WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff); 1865 WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff); 1866 WREG32(DMA_TILING_CONFIG, tiling_config & 0xffff); 1867 1868 tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8); 1869 WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK); 1870 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK); 1871 1872 /* Setup some CP states */ 1873 WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b))); 1874 WREG32(CP_MEQ_THRESHOLDS, (MEQ_END(0x40) | ROQ_END(0x40))); 1875 1876 WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | SYNC_GRADIENT | 1877 SYNC_WALKER | SYNC_ALIGNER)); 1878 /* Setup various GPU states */ 1879 if (rdev->family == CHIP_RV670) 1880 WREG32(ARB_GDEC_RD_CNTL, 0x00000021); 1881 1882 tmp = RREG32(SX_DEBUG_1); 1883 tmp |= SMX_EVENT_RELEASE; 1884 if ((rdev->family > CHIP_R600)) 1885 tmp |= ENABLE_NEW_SMX_ADDRESS; 1886 WREG32(SX_DEBUG_1, tmp); 1887 1888 if (((rdev->family) == CHIP_R600) || 1889 ((rdev->family) == CHIP_RV630) || 1890 ((rdev->family) == CHIP_RV610) || 1891 ((rdev->family) == CHIP_RV620) || 1892 ((rdev->family) == CHIP_RS780) || 1893 ((rdev->family) == CHIP_RS880)) { 1894 WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE); 1895 } else { 1896 WREG32(DB_DEBUG, 0); 1897 } 1898 WREG32(DB_WATERMARKS, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) | 1899 DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4))); 1900 1901 WREG32(PA_SC_MULTI_CHIP_CNTL, 0); 1902 WREG32(VGT_NUM_INSTANCES, 0); 1903 1904 WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0)); 1905 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(0)); 1906 1907 tmp = RREG32(SQ_MS_FIFO_SIZES); 1908 if (((rdev->family) == CHIP_RV610) || 1909 ((rdev->family) == CHIP_RV620) || 1910 ((rdev->family) == CHIP_RS780) || 1911 ((rdev->family) == CHIP_RS880)) { 1912 tmp = (CACHE_FIFO_SIZE(0xa) | 1913 FETCH_FIFO_HIWATER(0xa) | 1914 DONE_FIFO_HIWATER(0xe0) | 1915 ALU_UPDATE_FIFO_HIWATER(0x8)); 1916 } else if (((rdev->family) == CHIP_R600) || 1917 ((rdev->family) == CHIP_RV630)) { 1918 tmp &= ~DONE_FIFO_HIWATER(0xff); 1919 tmp |= DONE_FIFO_HIWATER(0x4); 1920 } 1921 WREG32(SQ_MS_FIFO_SIZES, tmp); 1922 1923 /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT 1924 * should be adjusted as needed by the 2D/3D drivers. This just sets default values 1925 */ 1926 sq_config = RREG32(SQ_CONFIG); 1927 sq_config &= ~(PS_PRIO(3) | 1928 VS_PRIO(3) | 1929 GS_PRIO(3) | 1930 ES_PRIO(3)); 1931 sq_config |= (DX9_CONSTS | 1932 VC_ENABLE | 1933 PS_PRIO(0) | 1934 VS_PRIO(1) | 1935 GS_PRIO(2) | 1936 ES_PRIO(3)); 1937 1938 if ((rdev->family) == CHIP_R600) { 1939 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(124) | 1940 NUM_VS_GPRS(124) | 1941 NUM_CLAUSE_TEMP_GPRS(4)); 1942 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(0) | 1943 NUM_ES_GPRS(0)); 1944 sq_thread_resource_mgmt = (NUM_PS_THREADS(136) | 1945 NUM_VS_THREADS(48) | 1946 NUM_GS_THREADS(4) | 1947 NUM_ES_THREADS(4)); 1948 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(128) | 1949 NUM_VS_STACK_ENTRIES(128)); 1950 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(0) | 1951 NUM_ES_STACK_ENTRIES(0)); 1952 } else if (((rdev->family) == CHIP_RV610) || 1953 ((rdev->family) == CHIP_RV620) || 1954 ((rdev->family) == CHIP_RS780) || 1955 ((rdev->family) == CHIP_RS880)) { 1956 /* no vertex cache */ 1957 sq_config &= ~VC_ENABLE; 1958 1959 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) | 1960 NUM_VS_GPRS(44) | 1961 NUM_CLAUSE_TEMP_GPRS(2)); 1962 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) | 1963 NUM_ES_GPRS(17)); 1964 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) | 1965 NUM_VS_THREADS(78) | 1966 NUM_GS_THREADS(4) | 1967 NUM_ES_THREADS(31)); 1968 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) | 1969 NUM_VS_STACK_ENTRIES(40)); 1970 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) | 1971 NUM_ES_STACK_ENTRIES(16)); 1972 } else if (((rdev->family) == CHIP_RV630) || 1973 ((rdev->family) == CHIP_RV635)) { 1974 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) | 1975 NUM_VS_GPRS(44) | 1976 NUM_CLAUSE_TEMP_GPRS(2)); 1977 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(18) | 1978 NUM_ES_GPRS(18)); 1979 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) | 1980 NUM_VS_THREADS(78) | 1981 NUM_GS_THREADS(4) | 1982 NUM_ES_THREADS(31)); 1983 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) | 1984 NUM_VS_STACK_ENTRIES(40)); 1985 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) | 1986 NUM_ES_STACK_ENTRIES(16)); 1987 } else if ((rdev->family) == CHIP_RV670) { 1988 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) | 1989 NUM_VS_GPRS(44) | 1990 NUM_CLAUSE_TEMP_GPRS(2)); 1991 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) | 1992 NUM_ES_GPRS(17)); 1993 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) | 1994 NUM_VS_THREADS(78) | 1995 NUM_GS_THREADS(4) | 1996 NUM_ES_THREADS(31)); 1997 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(64) | 1998 NUM_VS_STACK_ENTRIES(64)); 1999 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(64) | 2000 NUM_ES_STACK_ENTRIES(64)); 2001 } 2002 2003 WREG32(SQ_CONFIG, sq_config); 2004 WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1); 2005 WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2); 2006 WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt); 2007 WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1); 2008 WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2); 2009 2010 if (((rdev->family) == CHIP_RV610) || 2011 ((rdev->family) == CHIP_RV620) || 2012 ((rdev->family) == CHIP_RS780) || 2013 ((rdev->family) == CHIP_RS880)) { 2014 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY)); 2015 } else { 2016 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC)); 2017 } 2018 2019 /* More default values. 2D/3D driver should adjust as needed */ 2020 WREG32(PA_SC_AA_SAMPLE_LOCS_2S, (S0_X(0xc) | S0_Y(0x4) | 2021 S1_X(0x4) | S1_Y(0xc))); 2022 WREG32(PA_SC_AA_SAMPLE_LOCS_4S, (S0_X(0xe) | S0_Y(0xe) | 2023 S1_X(0x2) | S1_Y(0x2) | 2024 S2_X(0xa) | S2_Y(0x6) | 2025 S3_X(0x6) | S3_Y(0xa))); 2026 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0, (S0_X(0xe) | S0_Y(0xb) | 2027 S1_X(0x4) | S1_Y(0xc) | 2028 S2_X(0x1) | S2_Y(0x6) | 2029 S3_X(0xa) | S3_Y(0xe))); 2030 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1, (S4_X(0x6) | S4_Y(0x1) | 2031 S5_X(0x0) | S5_Y(0x0) | 2032 S6_X(0xb) | S6_Y(0x4) | 2033 S7_X(0x7) | S7_Y(0x8))); 2034 2035 WREG32(VGT_STRMOUT_EN, 0); 2036 tmp = rdev->config.r600.max_pipes * 16; 2037 switch (rdev->family) { 2038 case CHIP_RV610: 2039 case CHIP_RV620: 2040 case CHIP_RS780: 2041 case CHIP_RS880: 2042 tmp += 32; 2043 break; 2044 case CHIP_RV670: 2045 tmp += 128; 2046 break; 2047 default: 2048 break; 2049 } 2050 if (tmp > 256) { 2051 tmp = 256; 2052 } 2053 WREG32(VGT_ES_PER_GS, 128); 2054 WREG32(VGT_GS_PER_ES, tmp); 2055 WREG32(VGT_GS_PER_VS, 2); 2056 WREG32(VGT_GS_VERTEX_REUSE, 16); 2057 2058 /* more default values. 2D/3D driver should adjust as needed */ 2059 WREG32(PA_SC_LINE_STIPPLE_STATE, 0); 2060 WREG32(VGT_STRMOUT_EN, 0); 2061 WREG32(SX_MISC, 0); 2062 WREG32(PA_SC_MODE_CNTL, 0); 2063 WREG32(PA_SC_AA_CONFIG, 0); 2064 WREG32(PA_SC_LINE_STIPPLE, 0); 2065 WREG32(SPI_INPUT_Z, 0); 2066 WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2)); 2067 WREG32(CB_COLOR7_FRAG, 0); 2068 2069 /* Clear render buffer base addresses */ 2070 WREG32(CB_COLOR0_BASE, 0); 2071 WREG32(CB_COLOR1_BASE, 0); 2072 WREG32(CB_COLOR2_BASE, 0); 2073 WREG32(CB_COLOR3_BASE, 0); 2074 WREG32(CB_COLOR4_BASE, 0); 2075 WREG32(CB_COLOR5_BASE, 0); 2076 WREG32(CB_COLOR6_BASE, 0); 2077 WREG32(CB_COLOR7_BASE, 0); 2078 WREG32(CB_COLOR7_FRAG, 0); 2079 2080 switch (rdev->family) { 2081 case CHIP_RV610: 2082 case CHIP_RV620: 2083 case CHIP_RS780: 2084 case CHIP_RS880: 2085 tmp = TC_L2_SIZE(8); 2086 break; 2087 case CHIP_RV630: 2088 case CHIP_RV635: 2089 tmp = TC_L2_SIZE(4); 2090 break; 2091 case CHIP_R600: 2092 tmp = TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT; 2093 break; 2094 default: 2095 tmp = TC_L2_SIZE(0); 2096 break; 2097 } 2098 WREG32(TC_CNTL, tmp); 2099 2100 tmp = RREG32(HDP_HOST_PATH_CNTL); 2101 WREG32(HDP_HOST_PATH_CNTL, tmp); 2102 2103 tmp = RREG32(ARB_POP); 2104 tmp |= ENABLE_TC128; 2105 WREG32(ARB_POP, tmp); 2106 2107 WREG32(PA_SC_MULTI_CHIP_CNTL, 0); 2108 WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA | 2109 NUM_CLIP_SEQ(3))); 2110 WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095)); 2111 WREG32(VC_ENHANCE, 0); 2112 } 2113 2114 2115 /* 2116 * Indirect registers accessor 2117 */ 2118 u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg) 2119 { 2120 u32 r; 2121 2122 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff)); 2123 (void)RREG32(PCIE_PORT_INDEX); 2124 r = RREG32(PCIE_PORT_DATA); 2125 return r; 2126 } 2127 2128 void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v) 2129 { 2130 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff)); 2131 (void)RREG32(PCIE_PORT_INDEX); 2132 WREG32(PCIE_PORT_DATA, (v)); 2133 (void)RREG32(PCIE_PORT_DATA); 2134 } 2135 2136 /* 2137 * CP & Ring 2138 */ 2139 void r600_cp_stop(struct radeon_device *rdev) 2140 { 2141 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); 2142 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); 2143 WREG32(SCRATCH_UMSK, 0); 2144 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; 2145 } 2146 2147 int r600_init_microcode(struct radeon_device *rdev) 2148 { 2149 struct platform_device *pdev; 2150 const char *chip_name; 2151 const char *rlc_chip_name; 2152 size_t pfp_req_size, me_req_size, rlc_req_size; 2153 char fw_name[30]; 2154 int err; 2155 2156 DRM_DEBUG("\n"); 2157 2158 pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0); 2159 err = IS_ERR(pdev); 2160 if (err) { 2161 printk(KERN_ERR "radeon_cp: Failed to register firmware\n"); 2162 return -EINVAL; 2163 } 2164 2165 switch (rdev->family) { 2166 case CHIP_R600: 2167 chip_name = "R600"; 2168 rlc_chip_name = "R600"; 2169 break; 2170 case CHIP_RV610: 2171 chip_name = "RV610"; 2172 rlc_chip_name = "R600"; 2173 break; 2174 case CHIP_RV630: 2175 chip_name = "RV630"; 2176 rlc_chip_name = "R600"; 2177 break; 2178 case CHIP_RV620: 2179 chip_name = "RV620"; 2180 rlc_chip_name = "R600"; 2181 break; 2182 case CHIP_RV635: 2183 chip_name = "RV635"; 2184 rlc_chip_name = "R600"; 2185 break; 2186 case CHIP_RV670: 2187 chip_name = "RV670"; 2188 rlc_chip_name = "R600"; 2189 break; 2190 case CHIP_RS780: 2191 case CHIP_RS880: 2192 chip_name = "RS780"; 2193 rlc_chip_name = "R600"; 2194 break; 2195 case CHIP_RV770: 2196 chip_name = "RV770"; 2197 rlc_chip_name = "R700"; 2198 break; 2199 case CHIP_RV730: 2200 case CHIP_RV740: 2201 chip_name = "RV730"; 2202 rlc_chip_name = "R700"; 2203 break; 2204 case CHIP_RV710: 2205 chip_name = "RV710"; 2206 rlc_chip_name = "R700"; 2207 break; 2208 case CHIP_CEDAR: 2209 chip_name = "CEDAR"; 2210 rlc_chip_name = "CEDAR"; 2211 break; 2212 case CHIP_REDWOOD: 2213 chip_name = "REDWOOD"; 2214 rlc_chip_name = "REDWOOD"; 2215 break; 2216 case CHIP_JUNIPER: 2217 chip_name = "JUNIPER"; 2218 rlc_chip_name = "JUNIPER"; 2219 break; 2220 case CHIP_CYPRESS: 2221 case CHIP_HEMLOCK: 2222 chip_name = "CYPRESS"; 2223 rlc_chip_name = "CYPRESS"; 2224 break; 2225 case CHIP_PALM: 2226 chip_name = "PALM"; 2227 rlc_chip_name = "SUMO"; 2228 break; 2229 case CHIP_SUMO: 2230 chip_name = "SUMO"; 2231 rlc_chip_name = "SUMO"; 2232 break; 2233 case CHIP_SUMO2: 2234 chip_name = "SUMO2"; 2235 rlc_chip_name = "SUMO"; 2236 break; 2237 default: BUG(); 2238 } 2239 2240 if (rdev->family >= CHIP_CEDAR) { 2241 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4; 2242 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4; 2243 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4; 2244 } else if (rdev->family >= CHIP_RV770) { 2245 pfp_req_size = R700_PFP_UCODE_SIZE * 4; 2246 me_req_size = R700_PM4_UCODE_SIZE * 4; 2247 rlc_req_size = R700_RLC_UCODE_SIZE * 4; 2248 } else { 2249 pfp_req_size = PFP_UCODE_SIZE * 4; 2250 me_req_size = PM4_UCODE_SIZE * 12; 2251 rlc_req_size = RLC_UCODE_SIZE * 4; 2252 } 2253 2254 DRM_INFO("Loading %s Microcode\n", chip_name); 2255 2256 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name); 2257 err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev); 2258 if (err) 2259 goto out; 2260 if (rdev->pfp_fw->size != pfp_req_size) { 2261 printk(KERN_ERR 2262 "r600_cp: Bogus length %zu in firmware \"%s\"\n", 2263 rdev->pfp_fw->size, fw_name); 2264 err = -EINVAL; 2265 goto out; 2266 } 2267 2268 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name); 2269 err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev); 2270 if (err) 2271 goto out; 2272 if (rdev->me_fw->size != me_req_size) { 2273 printk(KERN_ERR 2274 "r600_cp: Bogus length %zu in firmware \"%s\"\n", 2275 rdev->me_fw->size, fw_name); 2276 err = -EINVAL; 2277 } 2278 2279 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name); 2280 err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev); 2281 if (err) 2282 goto out; 2283 if (rdev->rlc_fw->size != rlc_req_size) { 2284 printk(KERN_ERR 2285 "r600_rlc: Bogus length %zu in firmware \"%s\"\n", 2286 rdev->rlc_fw->size, fw_name); 2287 err = -EINVAL; 2288 } 2289 2290 out: 2291 platform_device_unregister(pdev); 2292 2293 if (err) { 2294 if (err != -EINVAL) 2295 printk(KERN_ERR 2296 "r600_cp: Failed to load firmware \"%s\"\n", 2297 fw_name); 2298 release_firmware(rdev->pfp_fw); 2299 rdev->pfp_fw = NULL; 2300 release_firmware(rdev->me_fw); 2301 rdev->me_fw = NULL; 2302 release_firmware(rdev->rlc_fw); 2303 rdev->rlc_fw = NULL; 2304 } 2305 return err; 2306 } 2307 2308 static int r600_cp_load_microcode(struct radeon_device *rdev) 2309 { 2310 const __be32 *fw_data; 2311 int i; 2312 2313 if (!rdev->me_fw || !rdev->pfp_fw) 2314 return -EINVAL; 2315 2316 r600_cp_stop(rdev); 2317 2318 WREG32(CP_RB_CNTL, 2319 #ifdef __BIG_ENDIAN 2320 BUF_SWAP_32BIT | 2321 #endif 2322 RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3)); 2323 2324 /* Reset cp */ 2325 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP); 2326 RREG32(GRBM_SOFT_RESET); 2327 mdelay(15); 2328 WREG32(GRBM_SOFT_RESET, 0); 2329 2330 WREG32(CP_ME_RAM_WADDR, 0); 2331 2332 fw_data = (const __be32 *)rdev->me_fw->data; 2333 WREG32(CP_ME_RAM_WADDR, 0); 2334 for (i = 0; i < PM4_UCODE_SIZE * 3; i++) 2335 WREG32(CP_ME_RAM_DATA, 2336 be32_to_cpup(fw_data++)); 2337 2338 fw_data = (const __be32 *)rdev->pfp_fw->data; 2339 WREG32(CP_PFP_UCODE_ADDR, 0); 2340 for (i = 0; i < PFP_UCODE_SIZE; i++) 2341 WREG32(CP_PFP_UCODE_DATA, 2342 be32_to_cpup(fw_data++)); 2343 2344 WREG32(CP_PFP_UCODE_ADDR, 0); 2345 WREG32(CP_ME_RAM_WADDR, 0); 2346 WREG32(CP_ME_RAM_RADDR, 0); 2347 return 0; 2348 } 2349 2350 int r600_cp_start(struct radeon_device *rdev) 2351 { 2352 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 2353 int r; 2354 uint32_t cp_me; 2355 2356 r = radeon_ring_lock(rdev, ring, 7); 2357 if (r) { 2358 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); 2359 return r; 2360 } 2361 radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5)); 2362 radeon_ring_write(ring, 0x1); 2363 if (rdev->family >= CHIP_RV770) { 2364 radeon_ring_write(ring, 0x0); 2365 radeon_ring_write(ring, rdev->config.rv770.max_hw_contexts - 1); 2366 } else { 2367 radeon_ring_write(ring, 0x3); 2368 radeon_ring_write(ring, rdev->config.r600.max_hw_contexts - 1); 2369 } 2370 radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); 2371 radeon_ring_write(ring, 0); 2372 radeon_ring_write(ring, 0); 2373 radeon_ring_unlock_commit(rdev, ring); 2374 2375 cp_me = 0xff; 2376 WREG32(R_0086D8_CP_ME_CNTL, cp_me); 2377 return 0; 2378 } 2379 2380 int r600_cp_resume(struct radeon_device *rdev) 2381 { 2382 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 2383 u32 tmp; 2384 u32 rb_bufsz; 2385 int r; 2386 2387 /* Reset cp */ 2388 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP); 2389 RREG32(GRBM_SOFT_RESET); 2390 mdelay(15); 2391 WREG32(GRBM_SOFT_RESET, 0); 2392 2393 /* Set ring buffer size */ 2394 rb_bufsz = drm_order(ring->ring_size / 8); 2395 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; 2396 #ifdef __BIG_ENDIAN 2397 tmp |= BUF_SWAP_32BIT; 2398 #endif 2399 WREG32(CP_RB_CNTL, tmp); 2400 WREG32(CP_SEM_WAIT_TIMER, 0x0); 2401 2402 /* Set the write pointer delay */ 2403 WREG32(CP_RB_WPTR_DELAY, 0); 2404 2405 /* Initialize the ring buffer's read and write pointers */ 2406 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); 2407 WREG32(CP_RB_RPTR_WR, 0); 2408 ring->wptr = 0; 2409 WREG32(CP_RB_WPTR, ring->wptr); 2410 2411 /* set the wb address whether it's enabled or not */ 2412 WREG32(CP_RB_RPTR_ADDR, 2413 ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC)); 2414 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); 2415 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); 2416 2417 if (rdev->wb.enabled) 2418 WREG32(SCRATCH_UMSK, 0xff); 2419 else { 2420 tmp |= RB_NO_UPDATE; 2421 WREG32(SCRATCH_UMSK, 0); 2422 } 2423 2424 mdelay(1); 2425 WREG32(CP_RB_CNTL, tmp); 2426 2427 WREG32(CP_RB_BASE, ring->gpu_addr >> 8); 2428 WREG32(CP_DEBUG, (1 << 27) | (1 << 28)); 2429 2430 ring->rptr = RREG32(CP_RB_RPTR); 2431 2432 r600_cp_start(rdev); 2433 ring->ready = true; 2434 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring); 2435 if (r) { 2436 ring->ready = false; 2437 return r; 2438 } 2439 return 0; 2440 } 2441 2442 void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size) 2443 { 2444 u32 rb_bufsz; 2445 int r; 2446 2447 /* Align ring size */ 2448 rb_bufsz = drm_order(ring_size / 8); 2449 ring_size = (1 << (rb_bufsz + 1)) * 4; 2450 ring->ring_size = ring_size; 2451 ring->align_mask = 16 - 1; 2452 2453 if (radeon_ring_supports_scratch_reg(rdev, ring)) { 2454 r = radeon_scratch_get(rdev, &ring->rptr_save_reg); 2455 if (r) { 2456 DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r); 2457 ring->rptr_save_reg = 0; 2458 } 2459 } 2460 } 2461 2462 void r600_cp_fini(struct radeon_device *rdev) 2463 { 2464 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 2465 r600_cp_stop(rdev); 2466 radeon_ring_fini(rdev, ring); 2467 radeon_scratch_free(rdev, ring->rptr_save_reg); 2468 } 2469 2470 /* 2471 * DMA 2472 * Starting with R600, the GPU has an asynchronous 2473 * DMA engine. The programming model is very similar 2474 * to the 3D engine (ring buffer, IBs, etc.), but the 2475 * DMA controller has it's own packet format that is 2476 * different form the PM4 format used by the 3D engine. 2477 * It supports copying data, writing embedded data, 2478 * solid fills, and a number of other things. It also 2479 * has support for tiling/detiling of buffers. 2480 */ 2481 /** 2482 * r600_dma_stop - stop the async dma engine 2483 * 2484 * @rdev: radeon_device pointer 2485 * 2486 * Stop the async dma engine (r6xx-evergreen). 2487 */ 2488 void r600_dma_stop(struct radeon_device *rdev) 2489 { 2490 u32 rb_cntl = RREG32(DMA_RB_CNTL); 2491 2492 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); 2493 2494 rb_cntl &= ~DMA_RB_ENABLE; 2495 WREG32(DMA_RB_CNTL, rb_cntl); 2496 2497 rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false; 2498 } 2499 2500 /** 2501 * r600_dma_resume - setup and start the async dma engine 2502 * 2503 * @rdev: radeon_device pointer 2504 * 2505 * Set up the DMA ring buffer and enable it. (r6xx-evergreen). 2506 * Returns 0 for success, error for failure. 2507 */ 2508 int r600_dma_resume(struct radeon_device *rdev) 2509 { 2510 struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; 2511 u32 rb_cntl, dma_cntl, ib_cntl; 2512 u32 rb_bufsz; 2513 int r; 2514 2515 /* Reset dma */ 2516 if (rdev->family >= CHIP_RV770) 2517 WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA); 2518 else 2519 WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA); 2520 RREG32(SRBM_SOFT_RESET); 2521 udelay(50); 2522 WREG32(SRBM_SOFT_RESET, 0); 2523 2524 WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0); 2525 WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0); 2526 2527 /* Set ring buffer size in dwords */ 2528 rb_bufsz = drm_order(ring->ring_size / 4); 2529 rb_cntl = rb_bufsz << 1; 2530 #ifdef __BIG_ENDIAN 2531 rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE; 2532 #endif 2533 WREG32(DMA_RB_CNTL, rb_cntl); 2534 2535 /* Initialize the ring buffer's read and write pointers */ 2536 WREG32(DMA_RB_RPTR, 0); 2537 WREG32(DMA_RB_WPTR, 0); 2538 2539 /* set the wb address whether it's enabled or not */ 2540 WREG32(DMA_RB_RPTR_ADDR_HI, 2541 upper_32_bits(rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFF); 2542 WREG32(DMA_RB_RPTR_ADDR_LO, 2543 ((rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFFFFFFFC)); 2544 2545 if (rdev->wb.enabled) 2546 rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE; 2547 2548 WREG32(DMA_RB_BASE, ring->gpu_addr >> 8); 2549 2550 /* enable DMA IBs */ 2551 ib_cntl = DMA_IB_ENABLE; 2552 #ifdef __BIG_ENDIAN 2553 ib_cntl |= DMA_IB_SWAP_ENABLE; 2554 #endif 2555 WREG32(DMA_IB_CNTL, ib_cntl); 2556 2557 dma_cntl = RREG32(DMA_CNTL); 2558 dma_cntl &= ~CTXEMPTY_INT_ENABLE; 2559 WREG32(DMA_CNTL, dma_cntl); 2560 2561 if (rdev->family >= CHIP_RV770) 2562 WREG32(DMA_MODE, 1); 2563 2564 ring->wptr = 0; 2565 WREG32(DMA_RB_WPTR, ring->wptr << 2); 2566 2567 ring->rptr = RREG32(DMA_RB_RPTR) >> 2; 2568 2569 WREG32(DMA_RB_CNTL, rb_cntl | DMA_RB_ENABLE); 2570 2571 ring->ready = true; 2572 2573 r = radeon_ring_test(rdev, R600_RING_TYPE_DMA_INDEX, ring); 2574 if (r) { 2575 ring->ready = false; 2576 return r; 2577 } 2578 2579 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); 2580 2581 return 0; 2582 } 2583 2584 /** 2585 * r600_dma_fini - tear down the async dma engine 2586 * 2587 * @rdev: radeon_device pointer 2588 * 2589 * Stop the async dma engine and free the ring (r6xx-evergreen). 2590 */ 2591 void r600_dma_fini(struct radeon_device *rdev) 2592 { 2593 r600_dma_stop(rdev); 2594 radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]); 2595 } 2596 2597 /* 2598 * UVD 2599 */ 2600 int r600_uvd_rbc_start(struct radeon_device *rdev) 2601 { 2602 struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; 2603 uint64_t rptr_addr; 2604 uint32_t rb_bufsz, tmp; 2605 int r; 2606 2607 rptr_addr = rdev->wb.gpu_addr + R600_WB_UVD_RPTR_OFFSET; 2608 2609 if (upper_32_bits(rptr_addr) != upper_32_bits(ring->gpu_addr)) { 2610 DRM_ERROR("UVD ring and rptr not in the same 4GB segment!\n"); 2611 return -EINVAL; 2612 } 2613 2614 /* force RBC into idle state */ 2615 WREG32(UVD_RBC_RB_CNTL, 0x11010101); 2616 2617 /* Set the write pointer delay */ 2618 WREG32(UVD_RBC_RB_WPTR_CNTL, 0); 2619 2620 /* set the wb address */ 2621 WREG32(UVD_RBC_RB_RPTR_ADDR, rptr_addr >> 2); 2622 2623 /* programm the 4GB memory segment for rptr and ring buffer */ 2624 WREG32(UVD_LMI_EXT40_ADDR, upper_32_bits(rptr_addr) | 2625 (0x7 << 16) | (0x1 << 31)); 2626 2627 /* Initialize the ring buffer's read and write pointers */ 2628 WREG32(UVD_RBC_RB_RPTR, 0x0); 2629 2630 ring->wptr = ring->rptr = RREG32(UVD_RBC_RB_RPTR); 2631 WREG32(UVD_RBC_RB_WPTR, ring->wptr); 2632 2633 /* set the ring address */ 2634 WREG32(UVD_RBC_RB_BASE, ring->gpu_addr); 2635 2636 /* Set ring buffer size */ 2637 rb_bufsz = drm_order(ring->ring_size); 2638 rb_bufsz = (0x1 << 8) | rb_bufsz; 2639 WREG32(UVD_RBC_RB_CNTL, rb_bufsz); 2640 2641 ring->ready = true; 2642 r = radeon_ring_test(rdev, R600_RING_TYPE_UVD_INDEX, ring); 2643 if (r) { 2644 ring->ready = false; 2645 return r; 2646 } 2647 2648 r = radeon_ring_lock(rdev, ring, 10); 2649 if (r) { 2650 DRM_ERROR("radeon: ring failed to lock UVD ring (%d).\n", r); 2651 return r; 2652 } 2653 2654 tmp = PACKET0(UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0); 2655 radeon_ring_write(ring, tmp); 2656 radeon_ring_write(ring, 0xFFFFF); 2657 2658 tmp = PACKET0(UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0); 2659 radeon_ring_write(ring, tmp); 2660 radeon_ring_write(ring, 0xFFFFF); 2661 2662 tmp = PACKET0(UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0); 2663 radeon_ring_write(ring, tmp); 2664 radeon_ring_write(ring, 0xFFFFF); 2665 2666 /* Clear timeout status bits */ 2667 radeon_ring_write(ring, PACKET0(UVD_SEMA_TIMEOUT_STATUS, 0)); 2668 radeon_ring_write(ring, 0x8); 2669 2670 radeon_ring_write(ring, PACKET0(UVD_SEMA_CNTL, 0)); 2671 radeon_ring_write(ring, 3); 2672 2673 radeon_ring_unlock_commit(rdev, ring); 2674 2675 return 0; 2676 } 2677 2678 void r600_uvd_rbc_stop(struct radeon_device *rdev) 2679 { 2680 struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; 2681 2682 /* force RBC into idle state */ 2683 WREG32(UVD_RBC_RB_CNTL, 0x11010101); 2684 ring->ready = false; 2685 } 2686 2687 int r600_uvd_init(struct radeon_device *rdev) 2688 { 2689 int i, j, r; 2690 2691 /* raise clocks while booting up the VCPU */ 2692 radeon_set_uvd_clocks(rdev, 53300, 40000); 2693 2694 /* disable clock gating */ 2695 WREG32(UVD_CGC_GATE, 0); 2696 2697 /* disable interupt */ 2698 WREG32_P(UVD_MASTINT_EN, 0, ~(1 << 1)); 2699 2700 /* put LMI, VCPU, RBC etc... into reset */ 2701 WREG32(UVD_SOFT_RESET, LMI_SOFT_RESET | VCPU_SOFT_RESET | 2702 LBSI_SOFT_RESET | RBC_SOFT_RESET | CSM_SOFT_RESET | 2703 CXW_SOFT_RESET | TAP_SOFT_RESET | LMI_UMC_SOFT_RESET); 2704 mdelay(5); 2705 2706 /* take UVD block out of reset */ 2707 WREG32_P(SRBM_SOFT_RESET, 0, ~SOFT_RESET_UVD); 2708 mdelay(5); 2709 2710 /* initialize UVD memory controller */ 2711 WREG32(UVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) | 2712 (1 << 21) | (1 << 9) | (1 << 20)); 2713 2714 /* disable byte swapping */ 2715 WREG32(UVD_LMI_SWAP_CNTL, 0); 2716 WREG32(UVD_MP_SWAP_CNTL, 0); 2717 2718 WREG32(UVD_MPC_SET_MUXA0, 0x40c2040); 2719 WREG32(UVD_MPC_SET_MUXA1, 0x0); 2720 WREG32(UVD_MPC_SET_MUXB0, 0x40c2040); 2721 WREG32(UVD_MPC_SET_MUXB1, 0x0); 2722 WREG32(UVD_MPC_SET_ALU, 0); 2723 WREG32(UVD_MPC_SET_MUX, 0x88); 2724 2725 /* Stall UMC */ 2726 WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); 2727 WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3)); 2728 2729 /* take all subblocks out of reset, except VCPU */ 2730 WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET); 2731 mdelay(5); 2732 2733 /* enable VCPU clock */ 2734 WREG32(UVD_VCPU_CNTL, 1 << 9); 2735 2736 /* enable UMC */ 2737 WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8)); 2738 2739 /* boot up the VCPU */ 2740 WREG32(UVD_SOFT_RESET, 0); 2741 mdelay(10); 2742 2743 WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3)); 2744 2745 for (i = 0; i < 10; ++i) { 2746 uint32_t status; 2747 for (j = 0; j < 100; ++j) { 2748 status = RREG32(UVD_STATUS); 2749 if (status & 2) 2750 break; 2751 mdelay(10); 2752 } 2753 r = 0; 2754 if (status & 2) 2755 break; 2756 2757 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n"); 2758 WREG32_P(UVD_SOFT_RESET, VCPU_SOFT_RESET, ~VCPU_SOFT_RESET); 2759 mdelay(10); 2760 WREG32_P(UVD_SOFT_RESET, 0, ~VCPU_SOFT_RESET); 2761 mdelay(10); 2762 r = -1; 2763 } 2764 2765 if (r) { 2766 DRM_ERROR("UVD not responding, giving up!!!\n"); 2767 radeon_set_uvd_clocks(rdev, 0, 0); 2768 return r; 2769 } 2770 2771 /* enable interupt */ 2772 WREG32_P(UVD_MASTINT_EN, 3<<1, ~(3 << 1)); 2773 2774 r = r600_uvd_rbc_start(rdev); 2775 if (!r) 2776 DRM_INFO("UVD initialized successfully.\n"); 2777 2778 /* lower clocks again */ 2779 radeon_set_uvd_clocks(rdev, 0, 0); 2780 2781 return r; 2782 } 2783 2784 /* 2785 * GPU scratch registers helpers function. 2786 */ 2787 void r600_scratch_init(struct radeon_device *rdev) 2788 { 2789 int i; 2790 2791 rdev->scratch.num_reg = 7; 2792 rdev->scratch.reg_base = SCRATCH_REG0; 2793 for (i = 0; i < rdev->scratch.num_reg; i++) { 2794 rdev->scratch.free[i] = true; 2795 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4); 2796 } 2797 } 2798 2799 int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) 2800 { 2801 uint32_t scratch; 2802 uint32_t tmp = 0; 2803 unsigned i; 2804 int r; 2805 2806 r = radeon_scratch_get(rdev, &scratch); 2807 if (r) { 2808 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r); 2809 return r; 2810 } 2811 WREG32(scratch, 0xCAFEDEAD); 2812 r = radeon_ring_lock(rdev, ring, 3); 2813 if (r) { 2814 DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ring->idx, r); 2815 radeon_scratch_free(rdev, scratch); 2816 return r; 2817 } 2818 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 2819 radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); 2820 radeon_ring_write(ring, 0xDEADBEEF); 2821 radeon_ring_unlock_commit(rdev, ring); 2822 for (i = 0; i < rdev->usec_timeout; i++) { 2823 tmp = RREG32(scratch); 2824 if (tmp == 0xDEADBEEF) 2825 break; 2826 DRM_UDELAY(1); 2827 } 2828 if (i < rdev->usec_timeout) { 2829 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i); 2830 } else { 2831 DRM_ERROR("radeon: ring %d test failed (scratch(0x%04X)=0x%08X)\n", 2832 ring->idx, scratch, tmp); 2833 r = -EINVAL; 2834 } 2835 radeon_scratch_free(rdev, scratch); 2836 return r; 2837 } 2838 2839 /** 2840 * r600_dma_ring_test - simple async dma engine test 2841 * 2842 * @rdev: radeon_device pointer 2843 * @ring: radeon_ring structure holding ring information 2844 * 2845 * Test the DMA engine by writing using it to write an 2846 * value to memory. (r6xx-SI). 2847 * Returns 0 for success, error for failure. 2848 */ 2849 int r600_dma_ring_test(struct radeon_device *rdev, 2850 struct radeon_ring *ring) 2851 { 2852 unsigned i; 2853 int r; 2854 void __iomem *ptr = (void *)rdev->vram_scratch.ptr; 2855 u32 tmp; 2856 2857 if (!ptr) { 2858 DRM_ERROR("invalid vram scratch pointer\n"); 2859 return -EINVAL; 2860 } 2861 2862 tmp = 0xCAFEDEAD; 2863 writel(tmp, ptr); 2864 2865 r = radeon_ring_lock(rdev, ring, 4); 2866 if (r) { 2867 DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r); 2868 return r; 2869 } 2870 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1)); 2871 radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc); 2872 radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff); 2873 radeon_ring_write(ring, 0xDEADBEEF); 2874 radeon_ring_unlock_commit(rdev, ring); 2875 2876 for (i = 0; i < rdev->usec_timeout; i++) { 2877 tmp = readl(ptr); 2878 if (tmp == 0xDEADBEEF) 2879 break; 2880 DRM_UDELAY(1); 2881 } 2882 2883 if (i < rdev->usec_timeout) { 2884 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i); 2885 } else { 2886 DRM_ERROR("radeon: ring %d test failed (0x%08X)\n", 2887 ring->idx, tmp); 2888 r = -EINVAL; 2889 } 2890 return r; 2891 } 2892 2893 int r600_uvd_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) 2894 { 2895 uint32_t tmp = 0; 2896 unsigned i; 2897 int r; 2898 2899 WREG32(UVD_CONTEXT_ID, 0xCAFEDEAD); 2900 r = radeon_ring_lock(rdev, ring, 3); 2901 if (r) { 2902 DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", 2903 ring->idx, r); 2904 return r; 2905 } 2906 radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0)); 2907 radeon_ring_write(ring, 0xDEADBEEF); 2908 radeon_ring_unlock_commit(rdev, ring); 2909 for (i = 0; i < rdev->usec_timeout; i++) { 2910 tmp = RREG32(UVD_CONTEXT_ID); 2911 if (tmp == 0xDEADBEEF) 2912 break; 2913 DRM_UDELAY(1); 2914 } 2915 2916 if (i < rdev->usec_timeout) { 2917 DRM_INFO("ring test on %d succeeded in %d usecs\n", 2918 ring->idx, i); 2919 } else { 2920 DRM_ERROR("radeon: ring %d test failed (0x%08X)\n", 2921 ring->idx, tmp); 2922 r = -EINVAL; 2923 } 2924 return r; 2925 } 2926 2927 /* 2928 * CP fences/semaphores 2929 */ 2930 2931 void r600_fence_ring_emit(struct radeon_device *rdev, 2932 struct radeon_fence *fence) 2933 { 2934 struct radeon_ring *ring = &rdev->ring[fence->ring]; 2935 2936 if (rdev->wb.use_event) { 2937 u64 addr = rdev->fence_drv[fence->ring].gpu_addr; 2938 /* flush read cache over gart */ 2939 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); 2940 radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | 2941 PACKET3_VC_ACTION_ENA | 2942 PACKET3_SH_ACTION_ENA); 2943 radeon_ring_write(ring, 0xFFFFFFFF); 2944 radeon_ring_write(ring, 0); 2945 radeon_ring_write(ring, 10); /* poll interval */ 2946 /* EVENT_WRITE_EOP - flush caches, send int */ 2947 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); 2948 radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5)); 2949 radeon_ring_write(ring, addr & 0xffffffff); 2950 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2)); 2951 radeon_ring_write(ring, fence->seq); 2952 radeon_ring_write(ring, 0); 2953 } else { 2954 /* flush read cache over gart */ 2955 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); 2956 radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | 2957 PACKET3_VC_ACTION_ENA | 2958 PACKET3_SH_ACTION_ENA); 2959 radeon_ring_write(ring, 0xFFFFFFFF); 2960 radeon_ring_write(ring, 0); 2961 radeon_ring_write(ring, 10); /* poll interval */ 2962 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0)); 2963 radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0)); 2964 /* wait for 3D idle clean */ 2965 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 2966 radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); 2967 radeon_ring_write(ring, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit); 2968 /* Emit fence sequence & fire IRQ */ 2969 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 2970 radeon_ring_write(ring, ((rdev->fence_drv[fence->ring].scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); 2971 radeon_ring_write(ring, fence->seq); 2972 /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */ 2973 radeon_ring_write(ring, PACKET0(CP_INT_STATUS, 0)); 2974 radeon_ring_write(ring, RB_INT_STAT); 2975 } 2976 } 2977 2978 void r600_uvd_fence_emit(struct radeon_device *rdev, 2979 struct radeon_fence *fence) 2980 { 2981 struct radeon_ring *ring = &rdev->ring[fence->ring]; 2982 uint32_t addr = rdev->fence_drv[fence->ring].gpu_addr; 2983 2984 radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0)); 2985 radeon_ring_write(ring, fence->seq); 2986 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0)); 2987 radeon_ring_write(ring, addr & 0xffffffff); 2988 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0)); 2989 radeon_ring_write(ring, upper_32_bits(addr) & 0xff); 2990 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0)); 2991 radeon_ring_write(ring, 0); 2992 2993 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0)); 2994 radeon_ring_write(ring, 0); 2995 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0)); 2996 radeon_ring_write(ring, 0); 2997 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0)); 2998 radeon_ring_write(ring, 2); 2999 return; 3000 } 3001 3002 void r600_semaphore_ring_emit(struct radeon_device *rdev, 3003 struct radeon_ring *ring, 3004 struct radeon_semaphore *semaphore, 3005 bool emit_wait) 3006 { 3007 uint64_t addr = semaphore->gpu_addr; 3008 unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL; 3009 3010 if (rdev->family < CHIP_CAYMAN) 3011 sel |= PACKET3_SEM_WAIT_ON_SIGNAL; 3012 3013 radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1)); 3014 radeon_ring_write(ring, addr & 0xffffffff); 3015 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel); 3016 } 3017 3018 /* 3019 * DMA fences/semaphores 3020 */ 3021 3022 /** 3023 * r600_dma_fence_ring_emit - emit a fence on the DMA ring 3024 * 3025 * @rdev: radeon_device pointer 3026 * @fence: radeon fence object 3027 * 3028 * Add a DMA fence packet to the ring to write 3029 * the fence seq number and DMA trap packet to generate 3030 * an interrupt if needed (r6xx-r7xx). 3031 */ 3032 void r600_dma_fence_ring_emit(struct radeon_device *rdev, 3033 struct radeon_fence *fence) 3034 { 3035 struct radeon_ring *ring = &rdev->ring[fence->ring]; 3036 u64 addr = rdev->fence_drv[fence->ring].gpu_addr; 3037 3038 /* write the fence */ 3039 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0)); 3040 radeon_ring_write(ring, addr & 0xfffffffc); 3041 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff)); 3042 radeon_ring_write(ring, lower_32_bits(fence->seq)); 3043 /* generate an interrupt */ 3044 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0)); 3045 } 3046 3047 /** 3048 * r600_dma_semaphore_ring_emit - emit a semaphore on the dma ring 3049 * 3050 * @rdev: radeon_device pointer 3051 * @ring: radeon_ring structure holding ring information 3052 * @semaphore: radeon semaphore object 3053 * @emit_wait: wait or signal semaphore 3054 * 3055 * Add a DMA semaphore packet to the ring wait on or signal 3056 * other rings (r6xx-SI). 3057 */ 3058 void r600_dma_semaphore_ring_emit(struct radeon_device *rdev, 3059 struct radeon_ring *ring, 3060 struct radeon_semaphore *semaphore, 3061 bool emit_wait) 3062 { 3063 u64 addr = semaphore->gpu_addr; 3064 u32 s = emit_wait ? 0 : 1; 3065 3066 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SEMAPHORE, 0, s, 0)); 3067 radeon_ring_write(ring, addr & 0xfffffffc); 3068 radeon_ring_write(ring, upper_32_bits(addr) & 0xff); 3069 } 3070 3071 void r600_uvd_semaphore_emit(struct radeon_device *rdev, 3072 struct radeon_ring *ring, 3073 struct radeon_semaphore *semaphore, 3074 bool emit_wait) 3075 { 3076 uint64_t addr = semaphore->gpu_addr; 3077 3078 radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0)); 3079 radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF); 3080 3081 radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0)); 3082 radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF); 3083 3084 radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0)); 3085 radeon_ring_write(ring, emit_wait ? 1 : 0); 3086 } 3087 3088 int r600_copy_blit(struct radeon_device *rdev, 3089 uint64_t src_offset, 3090 uint64_t dst_offset, 3091 unsigned num_gpu_pages, 3092 struct radeon_fence **fence) 3093 { 3094 struct radeon_semaphore *sem = NULL; 3095 struct radeon_sa_bo *vb = NULL; 3096 int r; 3097 3098 r = r600_blit_prepare_copy(rdev, num_gpu_pages, fence, &vb, &sem); 3099 if (r) { 3100 return r; 3101 } 3102 r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages, vb); 3103 r600_blit_done_copy(rdev, fence, vb, sem); 3104 return 0; 3105 } 3106 3107 /** 3108 * r600_copy_dma - copy pages using the DMA engine 3109 * 3110 * @rdev: radeon_device pointer 3111 * @src_offset: src GPU address 3112 * @dst_offset: dst GPU address 3113 * @num_gpu_pages: number of GPU pages to xfer 3114 * @fence: radeon fence object 3115 * 3116 * Copy GPU paging using the DMA engine (r6xx). 3117 * Used by the radeon ttm implementation to move pages if 3118 * registered as the asic copy callback. 3119 */ 3120 int r600_copy_dma(struct radeon_device *rdev, 3121 uint64_t src_offset, uint64_t dst_offset, 3122 unsigned num_gpu_pages, 3123 struct radeon_fence **fence) 3124 { 3125 struct radeon_semaphore *sem = NULL; 3126 int ring_index = rdev->asic->copy.dma_ring_index; 3127 struct radeon_ring *ring = &rdev->ring[ring_index]; 3128 u32 size_in_dw, cur_size_in_dw; 3129 int i, num_loops; 3130 int r = 0; 3131 3132 r = radeon_semaphore_create(rdev, &sem); 3133 if (r) { 3134 DRM_ERROR("radeon: moving bo (%d).\n", r); 3135 return r; 3136 } 3137 3138 size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4; 3139 num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFE); 3140 r = radeon_ring_lock(rdev, ring, num_loops * 4 + 8); 3141 if (r) { 3142 DRM_ERROR("radeon: moving bo (%d).\n", r); 3143 radeon_semaphore_free(rdev, &sem, NULL); 3144 return r; 3145 } 3146 3147 if (radeon_fence_need_sync(*fence, ring->idx)) { 3148 radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, 3149 ring->idx); 3150 radeon_fence_note_sync(*fence, ring->idx); 3151 } else { 3152 radeon_semaphore_free(rdev, &sem, NULL); 3153 } 3154 3155 for (i = 0; i < num_loops; i++) { 3156 cur_size_in_dw = size_in_dw; 3157 if (cur_size_in_dw > 0xFFFE) 3158 cur_size_in_dw = 0xFFFE; 3159 size_in_dw -= cur_size_in_dw; 3160 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw)); 3161 radeon_ring_write(ring, dst_offset & 0xfffffffc); 3162 radeon_ring_write(ring, src_offset & 0xfffffffc); 3163 radeon_ring_write(ring, (((upper_32_bits(dst_offset) & 0xff) << 16) | 3164 (upper_32_bits(src_offset) & 0xff))); 3165 src_offset += cur_size_in_dw * 4; 3166 dst_offset += cur_size_in_dw * 4; 3167 } 3168 3169 r = radeon_fence_emit(rdev, fence, ring->idx); 3170 if (r) { 3171 radeon_ring_unlock_undo(rdev, ring); 3172 return r; 3173 } 3174 3175 radeon_ring_unlock_commit(rdev, ring); 3176 radeon_semaphore_free(rdev, &sem, *fence); 3177 3178 return r; 3179 } 3180 3181 int r600_set_surface_reg(struct radeon_device *rdev, int reg, 3182 uint32_t tiling_flags, uint32_t pitch, 3183 uint32_t offset, uint32_t obj_size) 3184 { 3185 /* FIXME: implement */ 3186 return 0; 3187 } 3188 3189 void r600_clear_surface_reg(struct radeon_device *rdev, int reg) 3190 { 3191 /* FIXME: implement */ 3192 } 3193 3194 static int r600_startup(struct radeon_device *rdev) 3195 { 3196 struct radeon_ring *ring; 3197 int r; 3198 3199 /* enable pcie gen2 link */ 3200 r600_pcie_gen2_enable(rdev); 3201 3202 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { 3203 r = r600_init_microcode(rdev); 3204 if (r) { 3205 DRM_ERROR("Failed to load firmware!\n"); 3206 return r; 3207 } 3208 } 3209 3210 r = r600_vram_scratch_init(rdev); 3211 if (r) 3212 return r; 3213 3214 r600_mc_program(rdev); 3215 if (rdev->flags & RADEON_IS_AGP) { 3216 r600_agp_enable(rdev); 3217 } else { 3218 r = r600_pcie_gart_enable(rdev); 3219 if (r) 3220 return r; 3221 } 3222 r600_gpu_init(rdev); 3223 r = r600_blit_init(rdev); 3224 if (r) { 3225 r600_blit_fini(rdev); 3226 rdev->asic->copy.copy = NULL; 3227 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); 3228 } 3229 3230 /* allocate wb buffer */ 3231 r = radeon_wb_init(rdev); 3232 if (r) 3233 return r; 3234 3235 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX); 3236 if (r) { 3237 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); 3238 return r; 3239 } 3240 3241 r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX); 3242 if (r) { 3243 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r); 3244 return r; 3245 } 3246 3247 /* Enable IRQ */ 3248 if (!rdev->irq.installed) { 3249 r = radeon_irq_kms_init(rdev); 3250 if (r) 3251 return r; 3252 } 3253 3254 r = r600_irq_init(rdev); 3255 if (r) { 3256 DRM_ERROR("radeon: IH init failed (%d).\n", r); 3257 radeon_irq_kms_fini(rdev); 3258 return r; 3259 } 3260 r600_irq_set(rdev); 3261 3262 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 3263 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, 3264 R600_CP_RB_RPTR, R600_CP_RB_WPTR, 3265 0, 0xfffff, RADEON_CP_PACKET2); 3266 if (r) 3267 return r; 3268 3269 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; 3270 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, 3271 DMA_RB_RPTR, DMA_RB_WPTR, 3272 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); 3273 if (r) 3274 return r; 3275 3276 r = r600_cp_load_microcode(rdev); 3277 if (r) 3278 return r; 3279 r = r600_cp_resume(rdev); 3280 if (r) 3281 return r; 3282 3283 r = r600_dma_resume(rdev); 3284 if (r) 3285 return r; 3286 3287 r = radeon_ib_pool_init(rdev); 3288 if (r) { 3289 dev_err(rdev->dev, "IB initialization failed (%d).\n", r); 3290 return r; 3291 } 3292 3293 r = r600_audio_init(rdev); 3294 if (r) { 3295 DRM_ERROR("radeon: audio init failed\n"); 3296 return r; 3297 } 3298 3299 return 0; 3300 } 3301 3302 void r600_vga_set_state(struct radeon_device *rdev, bool state) 3303 { 3304 uint32_t temp; 3305 3306 temp = RREG32(CONFIG_CNTL); 3307 if (state == false) { 3308 temp &= ~(1<<0); 3309 temp |= (1<<1); 3310 } else { 3311 temp &= ~(1<<1); 3312 } 3313 WREG32(CONFIG_CNTL, temp); 3314 } 3315 3316 int r600_resume(struct radeon_device *rdev) 3317 { 3318 int r; 3319 3320 /* Do not reset GPU before posting, on r600 hw unlike on r500 hw, 3321 * posting will perform necessary task to bring back GPU into good 3322 * shape. 3323 */ 3324 /* post card */ 3325 atom_asic_init(rdev->mode_info.atom_context); 3326 3327 rdev->accel_working = true; 3328 r = r600_startup(rdev); 3329 if (r) { 3330 DRM_ERROR("r600 startup failed on resume\n"); 3331 rdev->accel_working = false; 3332 return r; 3333 } 3334 3335 return r; 3336 } 3337 3338 int r600_suspend(struct radeon_device *rdev) 3339 { 3340 r600_audio_fini(rdev); 3341 r600_cp_stop(rdev); 3342 r600_dma_stop(rdev); 3343 r600_irq_suspend(rdev); 3344 radeon_wb_disable(rdev); 3345 r600_pcie_gart_disable(rdev); 3346 3347 return 0; 3348 } 3349 3350 /* Plan is to move initialization in that function and use 3351 * helper function so that radeon_device_init pretty much 3352 * do nothing more than calling asic specific function. This 3353 * should also allow to remove a bunch of callback function 3354 * like vram_info. 3355 */ 3356 int r600_init(struct radeon_device *rdev) 3357 { 3358 int r; 3359 3360 if (r600_debugfs_mc_info_init(rdev)) { 3361 DRM_ERROR("Failed to register debugfs file for mc !\n"); 3362 } 3363 /* Read BIOS */ 3364 if (!radeon_get_bios(rdev)) { 3365 if (ASIC_IS_AVIVO(rdev)) 3366 return -EINVAL; 3367 } 3368 /* Must be an ATOMBIOS */ 3369 if (!rdev->is_atom_bios) { 3370 dev_err(rdev->dev, "Expecting atombios for R600 GPU\n"); 3371 return -EINVAL; 3372 } 3373 r = radeon_atombios_init(rdev); 3374 if (r) 3375 return r; 3376 /* Post card if necessary */ 3377 if (!radeon_card_posted(rdev)) { 3378 if (!rdev->bios) { 3379 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); 3380 return -EINVAL; 3381 } 3382 DRM_INFO("GPU not posted. posting now...\n"); 3383 atom_asic_init(rdev->mode_info.atom_context); 3384 } 3385 /* Initialize scratch registers */ 3386 r600_scratch_init(rdev); 3387 /* Initialize surface registers */ 3388 radeon_surface_init(rdev); 3389 /* Initialize clocks */ 3390 radeon_get_clock_info(rdev->ddev); 3391 /* Fence driver */ 3392 r = radeon_fence_driver_init(rdev); 3393 if (r) 3394 return r; 3395 if (rdev->flags & RADEON_IS_AGP) { 3396 r = radeon_agp_init(rdev); 3397 if (r) 3398 radeon_agp_disable(rdev); 3399 } 3400 r = r600_mc_init(rdev); 3401 if (r) 3402 return r; 3403 /* Memory manager */ 3404 r = radeon_bo_init(rdev); 3405 if (r) 3406 return r; 3407 3408 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; 3409 r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); 3410 3411 rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL; 3412 r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024); 3413 3414 rdev->ih.ring_obj = NULL; 3415 r600_ih_ring_init(rdev, 64 * 1024); 3416 3417 r = r600_pcie_gart_init(rdev); 3418 if (r) 3419 return r; 3420 3421 rdev->accel_working = true; 3422 r = r600_startup(rdev); 3423 if (r) { 3424 dev_err(rdev->dev, "disabling GPU acceleration\n"); 3425 r600_cp_fini(rdev); 3426 r600_dma_fini(rdev); 3427 r600_irq_fini(rdev); 3428 radeon_wb_fini(rdev); 3429 radeon_ib_pool_fini(rdev); 3430 radeon_irq_kms_fini(rdev); 3431 r600_pcie_gart_fini(rdev); 3432 rdev->accel_working = false; 3433 } 3434 3435 return 0; 3436 } 3437 3438 void r600_fini(struct radeon_device *rdev) 3439 { 3440 r600_audio_fini(rdev); 3441 r600_blit_fini(rdev); 3442 r600_cp_fini(rdev); 3443 r600_dma_fini(rdev); 3444 r600_irq_fini(rdev); 3445 radeon_wb_fini(rdev); 3446 radeon_ib_pool_fini(rdev); 3447 radeon_irq_kms_fini(rdev); 3448 r600_pcie_gart_fini(rdev); 3449 r600_vram_scratch_fini(rdev); 3450 radeon_agp_fini(rdev); 3451 radeon_gem_fini(rdev); 3452 radeon_fence_driver_fini(rdev); 3453 radeon_bo_fini(rdev); 3454 radeon_atombios_fini(rdev); 3455 kfree(rdev->bios); 3456 rdev->bios = NULL; 3457 } 3458 3459 3460 /* 3461 * CS stuff 3462 */ 3463 void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) 3464 { 3465 struct radeon_ring *ring = &rdev->ring[ib->ring]; 3466 u32 next_rptr; 3467 3468 if (ring->rptr_save_reg) { 3469 next_rptr = ring->wptr + 3 + 4; 3470 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 3471 radeon_ring_write(ring, ((ring->rptr_save_reg - 3472 PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); 3473 radeon_ring_write(ring, next_rptr); 3474 } else if (rdev->wb.enabled) { 3475 next_rptr = ring->wptr + 5 + 4; 3476 radeon_ring_write(ring, PACKET3(PACKET3_MEM_WRITE, 3)); 3477 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); 3478 radeon_ring_write(ring, (upper_32_bits(ring->next_rptr_gpu_addr) & 0xff) | (1 << 18)); 3479 radeon_ring_write(ring, next_rptr); 3480 radeon_ring_write(ring, 0); 3481 } 3482 3483 radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); 3484 radeon_ring_write(ring, 3485 #ifdef __BIG_ENDIAN 3486 (2 << 0) | 3487 #endif 3488 (ib->gpu_addr & 0xFFFFFFFC)); 3489 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF); 3490 radeon_ring_write(ring, ib->length_dw); 3491 } 3492 3493 void r600_uvd_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) 3494 { 3495 struct radeon_ring *ring = &rdev->ring[ib->ring]; 3496 3497 radeon_ring_write(ring, PACKET0(UVD_RBC_IB_BASE, 0)); 3498 radeon_ring_write(ring, ib->gpu_addr); 3499 radeon_ring_write(ring, PACKET0(UVD_RBC_IB_SIZE, 0)); 3500 radeon_ring_write(ring, ib->length_dw); 3501 } 3502 3503 int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) 3504 { 3505 struct radeon_ib ib; 3506 uint32_t scratch; 3507 uint32_t tmp = 0; 3508 unsigned i; 3509 int r; 3510 3511 r = radeon_scratch_get(rdev, &scratch); 3512 if (r) { 3513 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r); 3514 return r; 3515 } 3516 WREG32(scratch, 0xCAFEDEAD); 3517 r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256); 3518 if (r) { 3519 DRM_ERROR("radeon: failed to get ib (%d).\n", r); 3520 goto free_scratch; 3521 } 3522 ib.ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1); 3523 ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); 3524 ib.ptr[2] = 0xDEADBEEF; 3525 ib.length_dw = 3; 3526 r = radeon_ib_schedule(rdev, &ib, NULL); 3527 if (r) { 3528 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); 3529 goto free_ib; 3530 } 3531 r = radeon_fence_wait(ib.fence, false); 3532 if (r) { 3533 DRM_ERROR("radeon: fence wait failed (%d).\n", r); 3534 goto free_ib; 3535 } 3536 for (i = 0; i < rdev->usec_timeout; i++) { 3537 tmp = RREG32(scratch); 3538 if (tmp == 0xDEADBEEF) 3539 break; 3540 DRM_UDELAY(1); 3541 } 3542 if (i < rdev->usec_timeout) { 3543 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i); 3544 } else { 3545 DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n", 3546 scratch, tmp); 3547 r = -EINVAL; 3548 } 3549 free_ib: 3550 radeon_ib_free(rdev, &ib); 3551 free_scratch: 3552 radeon_scratch_free(rdev, scratch); 3553 return r; 3554 } 3555 3556 /** 3557 * r600_dma_ib_test - test an IB on the DMA engine 3558 * 3559 * @rdev: radeon_device pointer 3560 * @ring: radeon_ring structure holding ring information 3561 * 3562 * Test a simple IB in the DMA ring (r6xx-SI). 3563 * Returns 0 on success, error on failure. 3564 */ 3565 int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) 3566 { 3567 struct radeon_ib ib; 3568 unsigned i; 3569 int r; 3570 void __iomem *ptr = (void *)rdev->vram_scratch.ptr; 3571 u32 tmp = 0; 3572 3573 if (!ptr) { 3574 DRM_ERROR("invalid vram scratch pointer\n"); 3575 return -EINVAL; 3576 } 3577 3578 tmp = 0xCAFEDEAD; 3579 writel(tmp, ptr); 3580 3581 r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256); 3582 if (r) { 3583 DRM_ERROR("radeon: failed to get ib (%d).\n", r); 3584 return r; 3585 } 3586 3587 ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1); 3588 ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc; 3589 ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff; 3590 ib.ptr[3] = 0xDEADBEEF; 3591 ib.length_dw = 4; 3592 3593 r = radeon_ib_schedule(rdev, &ib, NULL); 3594 if (r) { 3595 radeon_ib_free(rdev, &ib); 3596 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); 3597 return r; 3598 } 3599 r = radeon_fence_wait(ib.fence, false); 3600 if (r) { 3601 DRM_ERROR("radeon: fence wait failed (%d).\n", r); 3602 return r; 3603 } 3604 for (i = 0; i < rdev->usec_timeout; i++) { 3605 tmp = readl(ptr); 3606 if (tmp == 0xDEADBEEF) 3607 break; 3608 DRM_UDELAY(1); 3609 } 3610 if (i < rdev->usec_timeout) { 3611 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i); 3612 } else { 3613 DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp); 3614 r = -EINVAL; 3615 } 3616 radeon_ib_free(rdev, &ib); 3617 return r; 3618 } 3619 3620 int r600_uvd_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) 3621 { 3622 struct radeon_fence *fence = NULL; 3623 int r; 3624 3625 r = radeon_set_uvd_clocks(rdev, 53300, 40000); 3626 if (r) { 3627 DRM_ERROR("radeon: failed to raise UVD clocks (%d).\n", r); 3628 return r; 3629 } 3630 3631 r = radeon_uvd_get_create_msg(rdev, ring->idx, 1, NULL); 3632 if (r) { 3633 DRM_ERROR("radeon: failed to get create msg (%d).\n", r); 3634 goto error; 3635 } 3636 3637 r = radeon_uvd_get_destroy_msg(rdev, ring->idx, 1, &fence); 3638 if (r) { 3639 DRM_ERROR("radeon: failed to get destroy ib (%d).\n", r); 3640 goto error; 3641 } 3642 3643 r = radeon_fence_wait(fence, false); 3644 if (r) { 3645 DRM_ERROR("radeon: fence wait failed (%d).\n", r); 3646 goto error; 3647 } 3648 DRM_INFO("ib test on ring %d succeeded\n", ring->idx); 3649 error: 3650 radeon_fence_unref(&fence); 3651 radeon_set_uvd_clocks(rdev, 0, 0); 3652 return r; 3653 } 3654 3655 /** 3656 * r600_dma_ring_ib_execute - Schedule an IB on the DMA engine 3657 * 3658 * @rdev: radeon_device pointer 3659 * @ib: IB object to schedule 3660 * 3661 * Schedule an IB in the DMA ring (r6xx-r7xx). 3662 */ 3663 void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) 3664 { 3665 struct radeon_ring *ring = &rdev->ring[ib->ring]; 3666 3667 if (rdev->wb.enabled) { 3668 u32 next_rptr = ring->wptr + 4; 3669 while ((next_rptr & 7) != 5) 3670 next_rptr++; 3671 next_rptr += 3; 3672 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1)); 3673 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); 3674 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff); 3675 radeon_ring_write(ring, next_rptr); 3676 } 3677 3678 /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring. 3679 * Pad as necessary with NOPs. 3680 */ 3681 while ((ring->wptr & 7) != 5) 3682 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); 3683 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0)); 3684 radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0)); 3685 radeon_ring_write(ring, (ib->length_dw << 16) | (upper_32_bits(ib->gpu_addr) & 0xFF)); 3686 3687 } 3688 3689 /* 3690 * Interrupts 3691 * 3692 * Interrupts use a ring buffer on r6xx/r7xx hardware. It works pretty 3693 * the same as the CP ring buffer, but in reverse. Rather than the CPU 3694 * writing to the ring and the GPU consuming, the GPU writes to the ring 3695 * and host consumes. As the host irq handler processes interrupts, it 3696 * increments the rptr. When the rptr catches up with the wptr, all the 3697 * current interrupts have been processed. 3698 */ 3699 3700 void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size) 3701 { 3702 u32 rb_bufsz; 3703 3704 /* Align ring size */ 3705 rb_bufsz = drm_order(ring_size / 4); 3706 ring_size = (1 << rb_bufsz) * 4; 3707 rdev->ih.ring_size = ring_size; 3708 rdev->ih.ptr_mask = rdev->ih.ring_size - 1; 3709 rdev->ih.rptr = 0; 3710 } 3711 3712 int r600_ih_ring_alloc(struct radeon_device *rdev) 3713 { 3714 int r; 3715 3716 /* Allocate ring buffer */ 3717 if (rdev->ih.ring_obj == NULL) { 3718 r = radeon_bo_create(rdev, rdev->ih.ring_size, 3719 PAGE_SIZE, true, 3720 RADEON_GEM_DOMAIN_GTT, 3721 NULL, &rdev->ih.ring_obj); 3722 if (r) { 3723 DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r); 3724 return r; 3725 } 3726 r = radeon_bo_reserve(rdev->ih.ring_obj, false); 3727 if (unlikely(r != 0)) 3728 return r; 3729 r = radeon_bo_pin(rdev->ih.ring_obj, 3730 RADEON_GEM_DOMAIN_GTT, 3731 &rdev->ih.gpu_addr); 3732 if (r) { 3733 radeon_bo_unreserve(rdev->ih.ring_obj); 3734 DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r); 3735 return r; 3736 } 3737 r = radeon_bo_kmap(rdev->ih.ring_obj, 3738 (void **)&rdev->ih.ring); 3739 radeon_bo_unreserve(rdev->ih.ring_obj); 3740 if (r) { 3741 DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r); 3742 return r; 3743 } 3744 } 3745 return 0; 3746 } 3747 3748 void r600_ih_ring_fini(struct radeon_device *rdev) 3749 { 3750 int r; 3751 if (rdev->ih.ring_obj) { 3752 r = radeon_bo_reserve(rdev->ih.ring_obj, false); 3753 if (likely(r == 0)) { 3754 radeon_bo_kunmap(rdev->ih.ring_obj); 3755 radeon_bo_unpin(rdev->ih.ring_obj); 3756 radeon_bo_unreserve(rdev->ih.ring_obj); 3757 } 3758 radeon_bo_unref(&rdev->ih.ring_obj); 3759 rdev->ih.ring = NULL; 3760 rdev->ih.ring_obj = NULL; 3761 } 3762 } 3763 3764 void r600_rlc_stop(struct radeon_device *rdev) 3765 { 3766 3767 if ((rdev->family >= CHIP_RV770) && 3768 (rdev->family <= CHIP_RV740)) { 3769 /* r7xx asics need to soft reset RLC before halting */ 3770 WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC); 3771 RREG32(SRBM_SOFT_RESET); 3772 mdelay(15); 3773 WREG32(SRBM_SOFT_RESET, 0); 3774 RREG32(SRBM_SOFT_RESET); 3775 } 3776 3777 WREG32(RLC_CNTL, 0); 3778 } 3779 3780 static void r600_rlc_start(struct radeon_device *rdev) 3781 { 3782 WREG32(RLC_CNTL, RLC_ENABLE); 3783 } 3784 3785 static int r600_rlc_init(struct radeon_device *rdev) 3786 { 3787 u32 i; 3788 const __be32 *fw_data; 3789 3790 if (!rdev->rlc_fw) 3791 return -EINVAL; 3792 3793 r600_rlc_stop(rdev); 3794 3795 WREG32(RLC_HB_CNTL, 0); 3796 3797 if (rdev->family == CHIP_ARUBA) { 3798 WREG32(TN_RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8); 3799 WREG32(TN_RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8); 3800 } 3801 if (rdev->family <= CHIP_CAYMAN) { 3802 WREG32(RLC_HB_BASE, 0); 3803 WREG32(RLC_HB_RPTR, 0); 3804 WREG32(RLC_HB_WPTR, 0); 3805 } 3806 if (rdev->family <= CHIP_CAICOS) { 3807 WREG32(RLC_HB_WPTR_LSB_ADDR, 0); 3808 WREG32(RLC_HB_WPTR_MSB_ADDR, 0); 3809 } 3810 WREG32(RLC_MC_CNTL, 0); 3811 WREG32(RLC_UCODE_CNTL, 0); 3812 3813 fw_data = (const __be32 *)rdev->rlc_fw->data; 3814 if (rdev->family >= CHIP_ARUBA) { 3815 for (i = 0; i < ARUBA_RLC_UCODE_SIZE; i++) { 3816 WREG32(RLC_UCODE_ADDR, i); 3817 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++)); 3818 } 3819 } else if (rdev->family >= CHIP_CAYMAN) { 3820 for (i = 0; i < CAYMAN_RLC_UCODE_SIZE; i++) { 3821 WREG32(RLC_UCODE_ADDR, i); 3822 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++)); 3823 } 3824 } else if (rdev->family >= CHIP_CEDAR) { 3825 for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) { 3826 WREG32(RLC_UCODE_ADDR, i); 3827 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++)); 3828 } 3829 } else if (rdev->family >= CHIP_RV770) { 3830 for (i = 0; i < R700_RLC_UCODE_SIZE; i++) { 3831 WREG32(RLC_UCODE_ADDR, i); 3832 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++)); 3833 } 3834 } else { 3835 for (i = 0; i < RLC_UCODE_SIZE; i++) { 3836 WREG32(RLC_UCODE_ADDR, i); 3837 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++)); 3838 } 3839 } 3840 WREG32(RLC_UCODE_ADDR, 0); 3841 3842 r600_rlc_start(rdev); 3843 3844 return 0; 3845 } 3846 3847 static void r600_enable_interrupts(struct radeon_device *rdev) 3848 { 3849 u32 ih_cntl = RREG32(IH_CNTL); 3850 u32 ih_rb_cntl = RREG32(IH_RB_CNTL); 3851 3852 ih_cntl |= ENABLE_INTR; 3853 ih_rb_cntl |= IH_RB_ENABLE; 3854 WREG32(IH_CNTL, ih_cntl); 3855 WREG32(IH_RB_CNTL, ih_rb_cntl); 3856 rdev->ih.enabled = true; 3857 } 3858 3859 void r600_disable_interrupts(struct radeon_device *rdev) 3860 { 3861 u32 ih_rb_cntl = RREG32(IH_RB_CNTL); 3862 u32 ih_cntl = RREG32(IH_CNTL); 3863 3864 ih_rb_cntl &= ~IH_RB_ENABLE; 3865 ih_cntl &= ~ENABLE_INTR; 3866 WREG32(IH_RB_CNTL, ih_rb_cntl); 3867 WREG32(IH_CNTL, ih_cntl); 3868 /* set rptr, wptr to 0 */ 3869 WREG32(IH_RB_RPTR, 0); 3870 WREG32(IH_RB_WPTR, 0); 3871 rdev->ih.enabled = false; 3872 rdev->ih.rptr = 0; 3873 } 3874 3875 static void r600_disable_interrupt_state(struct radeon_device *rdev) 3876 { 3877 u32 tmp; 3878 3879 WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); 3880 tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE; 3881 WREG32(DMA_CNTL, tmp); 3882 WREG32(GRBM_INT_CNTL, 0); 3883 WREG32(DxMODE_INT_MASK, 0); 3884 WREG32(D1GRPH_INTERRUPT_CONTROL, 0); 3885 WREG32(D2GRPH_INTERRUPT_CONTROL, 0); 3886 if (ASIC_IS_DCE3(rdev)) { 3887 WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0); 3888 WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0); 3889 tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY; 3890 WREG32(DC_HPD1_INT_CONTROL, tmp); 3891 tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY; 3892 WREG32(DC_HPD2_INT_CONTROL, tmp); 3893 tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY; 3894 WREG32(DC_HPD3_INT_CONTROL, tmp); 3895 tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY; 3896 WREG32(DC_HPD4_INT_CONTROL, tmp); 3897 if (ASIC_IS_DCE32(rdev)) { 3898 tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY; 3899 WREG32(DC_HPD5_INT_CONTROL, tmp); 3900 tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY; 3901 WREG32(DC_HPD6_INT_CONTROL, tmp); 3902 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; 3903 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, tmp); 3904 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; 3905 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, tmp); 3906 } else { 3907 tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; 3908 WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp); 3909 tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; 3910 WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp); 3911 } 3912 } else { 3913 WREG32(DACA_AUTODETECT_INT_CONTROL, 0); 3914 WREG32(DACB_AUTODETECT_INT_CONTROL, 0); 3915 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY; 3916 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); 3917 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY; 3918 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); 3919 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY; 3920 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp); 3921 tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; 3922 WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp); 3923 tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; 3924 WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp); 3925 } 3926 } 3927 3928 int r600_irq_init(struct radeon_device *rdev) 3929 { 3930 int ret = 0; 3931 int rb_bufsz; 3932 u32 interrupt_cntl, ih_cntl, ih_rb_cntl; 3933 3934 /* allocate ring */ 3935 ret = r600_ih_ring_alloc(rdev); 3936 if (ret) 3937 return ret; 3938 3939 /* disable irqs */ 3940 r600_disable_interrupts(rdev); 3941 3942 /* init rlc */ 3943 ret = r600_rlc_init(rdev); 3944 if (ret) { 3945 r600_ih_ring_fini(rdev); 3946 return ret; 3947 } 3948 3949 /* setup interrupt control */ 3950 /* set dummy read address to ring address */ 3951 WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8); 3952 interrupt_cntl = RREG32(INTERRUPT_CNTL); 3953 /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi 3954 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN 3955 */ 3956 interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE; 3957 /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */ 3958 interrupt_cntl &= ~IH_REQ_NONSNOOP_EN; 3959 WREG32(INTERRUPT_CNTL, interrupt_cntl); 3960 3961 WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8); 3962 rb_bufsz = drm_order(rdev->ih.ring_size / 4); 3963 3964 ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE | 3965 IH_WPTR_OVERFLOW_CLEAR | 3966 (rb_bufsz << 1)); 3967 3968 if (rdev->wb.enabled) 3969 ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE; 3970 3971 /* set the writeback address whether it's enabled or not */ 3972 WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC); 3973 WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF); 3974 3975 WREG32(IH_RB_CNTL, ih_rb_cntl); 3976 3977 /* set rptr, wptr to 0 */ 3978 WREG32(IH_RB_RPTR, 0); 3979 WREG32(IH_RB_WPTR, 0); 3980 3981 /* Default settings for IH_CNTL (disabled at first) */ 3982 ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10); 3983 /* RPTR_REARM only works if msi's are enabled */ 3984 if (rdev->msi_enabled) 3985 ih_cntl |= RPTR_REARM; 3986 WREG32(IH_CNTL, ih_cntl); 3987 3988 /* force the active interrupt state to all disabled */ 3989 if (rdev->family >= CHIP_CEDAR) 3990 evergreen_disable_interrupt_state(rdev); 3991 else 3992 r600_disable_interrupt_state(rdev); 3993 3994 /* at this point everything should be setup correctly to enable master */ 3995 pci_set_master(rdev->pdev); 3996 3997 /* enable irqs */ 3998 r600_enable_interrupts(rdev); 3999 4000 return ret; 4001 } 4002 4003 void r600_irq_suspend(struct radeon_device *rdev) 4004 { 4005 r600_irq_disable(rdev); 4006 r600_rlc_stop(rdev); 4007 } 4008 4009 void r600_irq_fini(struct radeon_device *rdev) 4010 { 4011 r600_irq_suspend(rdev); 4012 r600_ih_ring_fini(rdev); 4013 } 4014 4015 int r600_irq_set(struct radeon_device *rdev) 4016 { 4017 u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE; 4018 u32 mode_int = 0; 4019 u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0; 4020 u32 grbm_int_cntl = 0; 4021 u32 hdmi0, hdmi1; 4022 u32 d1grph = 0, d2grph = 0; 4023 u32 dma_cntl; 4024 4025 if (!rdev->irq.installed) { 4026 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); 4027 return -EINVAL; 4028 } 4029 /* don't enable anything if the ih is disabled */ 4030 if (!rdev->ih.enabled) { 4031 r600_disable_interrupts(rdev); 4032 /* force the active interrupt state to all disabled */ 4033 r600_disable_interrupt_state(rdev); 4034 return 0; 4035 } 4036 4037 if (ASIC_IS_DCE3(rdev)) { 4038 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; 4039 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN; 4040 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN; 4041 hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN; 4042 if (ASIC_IS_DCE32(rdev)) { 4043 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN; 4044 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN; 4045 hdmi0 = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0) & ~AFMT_AZ_FORMAT_WTRIG_MASK; 4046 hdmi1 = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1) & ~AFMT_AZ_FORMAT_WTRIG_MASK; 4047 } else { 4048 hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; 4049 hdmi1 = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; 4050 } 4051 } else { 4052 hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN; 4053 hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN; 4054 hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN; 4055 hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; 4056 hdmi1 = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; 4057 } 4058 dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE; 4059 4060 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { 4061 DRM_DEBUG("r600_irq_set: sw int\n"); 4062 cp_int_cntl |= RB_INT_ENABLE; 4063 cp_int_cntl |= TIME_STAMP_INT_ENABLE; 4064 } 4065 4066 if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) { 4067 DRM_DEBUG("r600_irq_set: sw int dma\n"); 4068 dma_cntl |= TRAP_ENABLE; 4069 } 4070 4071 if (rdev->irq.crtc_vblank_int[0] || 4072 atomic_read(&rdev->irq.pflip[0])) { 4073 DRM_DEBUG("r600_irq_set: vblank 0\n"); 4074 mode_int |= D1MODE_VBLANK_INT_MASK; 4075 } 4076 if (rdev->irq.crtc_vblank_int[1] || 4077 atomic_read(&rdev->irq.pflip[1])) { 4078 DRM_DEBUG("r600_irq_set: vblank 1\n"); 4079 mode_int |= D2MODE_VBLANK_INT_MASK; 4080 } 4081 if (rdev->irq.hpd[0]) { 4082 DRM_DEBUG("r600_irq_set: hpd 1\n"); 4083 hpd1 |= DC_HPDx_INT_EN; 4084 } 4085 if (rdev->irq.hpd[1]) { 4086 DRM_DEBUG("r600_irq_set: hpd 2\n"); 4087 hpd2 |= DC_HPDx_INT_EN; 4088 } 4089 if (rdev->irq.hpd[2]) { 4090 DRM_DEBUG("r600_irq_set: hpd 3\n"); 4091 hpd3 |= DC_HPDx_INT_EN; 4092 } 4093 if (rdev->irq.hpd[3]) { 4094 DRM_DEBUG("r600_irq_set: hpd 4\n"); 4095 hpd4 |= DC_HPDx_INT_EN; 4096 } 4097 if (rdev->irq.hpd[4]) { 4098 DRM_DEBUG("r600_irq_set: hpd 5\n"); 4099 hpd5 |= DC_HPDx_INT_EN; 4100 } 4101 if (rdev->irq.hpd[5]) { 4102 DRM_DEBUG("r600_irq_set: hpd 6\n"); 4103 hpd6 |= DC_HPDx_INT_EN; 4104 } 4105 if (rdev->irq.afmt[0]) { 4106 DRM_DEBUG("r600_irq_set: hdmi 0\n"); 4107 hdmi0 |= HDMI0_AZ_FORMAT_WTRIG_MASK; 4108 } 4109 if (rdev->irq.afmt[1]) { 4110 DRM_DEBUG("r600_irq_set: hdmi 0\n"); 4111 hdmi1 |= HDMI0_AZ_FORMAT_WTRIG_MASK; 4112 } 4113 4114 WREG32(CP_INT_CNTL, cp_int_cntl); 4115 WREG32(DMA_CNTL, dma_cntl); 4116 WREG32(DxMODE_INT_MASK, mode_int); 4117 WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph); 4118 WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph); 4119 WREG32(GRBM_INT_CNTL, grbm_int_cntl); 4120 if (ASIC_IS_DCE3(rdev)) { 4121 WREG32(DC_HPD1_INT_CONTROL, hpd1); 4122 WREG32(DC_HPD2_INT_CONTROL, hpd2); 4123 WREG32(DC_HPD3_INT_CONTROL, hpd3); 4124 WREG32(DC_HPD4_INT_CONTROL, hpd4); 4125 if (ASIC_IS_DCE32(rdev)) { 4126 WREG32(DC_HPD5_INT_CONTROL, hpd5); 4127 WREG32(DC_HPD6_INT_CONTROL, hpd6); 4128 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, hdmi0); 4129 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, hdmi1); 4130 } else { 4131 WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0); 4132 WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, hdmi1); 4133 } 4134 } else { 4135 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1); 4136 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2); 4137 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3); 4138 WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0); 4139 WREG32(HDMI1_AUDIO_PACKET_CONTROL, hdmi1); 4140 } 4141 4142 return 0; 4143 } 4144 4145 static void r600_irq_ack(struct radeon_device *rdev) 4146 { 4147 u32 tmp; 4148 4149 if (ASIC_IS_DCE3(rdev)) { 4150 rdev->irq.stat_regs.r600.disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS); 4151 rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE); 4152 rdev->irq.stat_regs.r600.disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2); 4153 if (ASIC_IS_DCE32(rdev)) { 4154 rdev->irq.stat_regs.r600.hdmi0_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET0); 4155 rdev->irq.stat_regs.r600.hdmi1_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET1); 4156 } else { 4157 rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS); 4158 rdev->irq.stat_regs.r600.hdmi1_status = RREG32(DCE3_HDMI1_STATUS); 4159 } 4160 } else { 4161 rdev->irq.stat_regs.r600.disp_int = RREG32(DISP_INTERRUPT_STATUS); 4162 rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE); 4163 rdev->irq.stat_regs.r600.disp_int_cont2 = 0; 4164 rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS); 4165 rdev->irq.stat_regs.r600.hdmi1_status = RREG32(HDMI1_STATUS); 4166 } 4167 rdev->irq.stat_regs.r600.d1grph_int = RREG32(D1GRPH_INTERRUPT_STATUS); 4168 rdev->irq.stat_regs.r600.d2grph_int = RREG32(D2GRPH_INTERRUPT_STATUS); 4169 4170 if (rdev->irq.stat_regs.r600.d1grph_int & DxGRPH_PFLIP_INT_OCCURRED) 4171 WREG32(D1GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR); 4172 if (rdev->irq.stat_regs.r600.d2grph_int & DxGRPH_PFLIP_INT_OCCURRED) 4173 WREG32(D2GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR); 4174 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) 4175 WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK); 4176 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) 4177 WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK); 4178 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) 4179 WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK); 4180 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) 4181 WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK); 4182 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) { 4183 if (ASIC_IS_DCE3(rdev)) { 4184 tmp = RREG32(DC_HPD1_INT_CONTROL); 4185 tmp |= DC_HPDx_INT_ACK; 4186 WREG32(DC_HPD1_INT_CONTROL, tmp); 4187 } else { 4188 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL); 4189 tmp |= DC_HPDx_INT_ACK; 4190 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); 4191 } 4192 } 4193 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) { 4194 if (ASIC_IS_DCE3(rdev)) { 4195 tmp = RREG32(DC_HPD2_INT_CONTROL); 4196 tmp |= DC_HPDx_INT_ACK; 4197 WREG32(DC_HPD2_INT_CONTROL, tmp); 4198 } else { 4199 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL); 4200 tmp |= DC_HPDx_INT_ACK; 4201 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); 4202 } 4203 } 4204 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) { 4205 if (ASIC_IS_DCE3(rdev)) { 4206 tmp = RREG32(DC_HPD3_INT_CONTROL); 4207 tmp |= DC_HPDx_INT_ACK; 4208 WREG32(DC_HPD3_INT_CONTROL, tmp); 4209 } else { 4210 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL); 4211 tmp |= DC_HPDx_INT_ACK; 4212 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp); 4213 } 4214 } 4215 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) { 4216 tmp = RREG32(DC_HPD4_INT_CONTROL); 4217 tmp |= DC_HPDx_INT_ACK; 4218 WREG32(DC_HPD4_INT_CONTROL, tmp); 4219 } 4220 if (ASIC_IS_DCE32(rdev)) { 4221 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) { 4222 tmp = RREG32(DC_HPD5_INT_CONTROL); 4223 tmp |= DC_HPDx_INT_ACK; 4224 WREG32(DC_HPD5_INT_CONTROL, tmp); 4225 } 4226 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) { 4227 tmp = RREG32(DC_HPD5_INT_CONTROL); 4228 tmp |= DC_HPDx_INT_ACK; 4229 WREG32(DC_HPD6_INT_CONTROL, tmp); 4230 } 4231 if (rdev->irq.stat_regs.r600.hdmi0_status & AFMT_AZ_FORMAT_WTRIG) { 4232 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0); 4233 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK; 4234 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, tmp); 4235 } 4236 if (rdev->irq.stat_regs.r600.hdmi1_status & AFMT_AZ_FORMAT_WTRIG) { 4237 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1); 4238 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK; 4239 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, tmp); 4240 } 4241 } else { 4242 if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) { 4243 tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL); 4244 tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK; 4245 WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp); 4246 } 4247 if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) { 4248 if (ASIC_IS_DCE3(rdev)) { 4249 tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL); 4250 tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK; 4251 WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp); 4252 } else { 4253 tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL); 4254 tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK; 4255 WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp); 4256 } 4257 } 4258 } 4259 } 4260 4261 void r600_irq_disable(struct radeon_device *rdev) 4262 { 4263 r600_disable_interrupts(rdev); 4264 /* Wait and acknowledge irq */ 4265 mdelay(1); 4266 r600_irq_ack(rdev); 4267 r600_disable_interrupt_state(rdev); 4268 } 4269 4270 static u32 r600_get_ih_wptr(struct radeon_device *rdev) 4271 { 4272 u32 wptr, tmp; 4273 4274 if (rdev->wb.enabled) 4275 wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]); 4276 else 4277 wptr = RREG32(IH_RB_WPTR); 4278 4279 if (wptr & RB_OVERFLOW) { 4280 /* When a ring buffer overflow happen start parsing interrupt 4281 * from the last not overwritten vector (wptr + 16). Hopefully 4282 * this should allow us to catchup. 4283 */ 4284 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n", 4285 wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask); 4286 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask; 4287 tmp = RREG32(IH_RB_CNTL); 4288 tmp |= IH_WPTR_OVERFLOW_CLEAR; 4289 WREG32(IH_RB_CNTL, tmp); 4290 } 4291 return (wptr & rdev->ih.ptr_mask); 4292 } 4293 4294 /* r600 IV Ring 4295 * Each IV ring entry is 128 bits: 4296 * [7:0] - interrupt source id 4297 * [31:8] - reserved 4298 * [59:32] - interrupt source data 4299 * [127:60] - reserved 4300 * 4301 * The basic interrupt vector entries 4302 * are decoded as follows: 4303 * src_id src_data description 4304 * 1 0 D1 Vblank 4305 * 1 1 D1 Vline 4306 * 5 0 D2 Vblank 4307 * 5 1 D2 Vline 4308 * 19 0 FP Hot plug detection A 4309 * 19 1 FP Hot plug detection B 4310 * 19 2 DAC A auto-detection 4311 * 19 3 DAC B auto-detection 4312 * 21 4 HDMI block A 4313 * 21 5 HDMI block B 4314 * 176 - CP_INT RB 4315 * 177 - CP_INT IB1 4316 * 178 - CP_INT IB2 4317 * 181 - EOP Interrupt 4318 * 233 - GUI Idle 4319 * 4320 * Note, these are based on r600 and may need to be 4321 * adjusted or added to on newer asics 4322 */ 4323 4324 int r600_irq_process(struct radeon_device *rdev) 4325 { 4326 u32 wptr; 4327 u32 rptr; 4328 u32 src_id, src_data; 4329 u32 ring_index; 4330 bool queue_hotplug = false; 4331 bool queue_hdmi = false; 4332 4333 if (!rdev->ih.enabled || rdev->shutdown) 4334 return IRQ_NONE; 4335 4336 /* No MSIs, need a dummy read to flush PCI DMAs */ 4337 if (!rdev->msi_enabled) 4338 RREG32(IH_RB_WPTR); 4339 4340 wptr = r600_get_ih_wptr(rdev); 4341 4342 restart_ih: 4343 /* is somebody else already processing irqs? */ 4344 if (atomic_xchg(&rdev->ih.lock, 1)) 4345 return IRQ_NONE; 4346 4347 rptr = rdev->ih.rptr; 4348 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr); 4349 4350 /* Order reading of wptr vs. reading of IH ring data */ 4351 rmb(); 4352 4353 /* display interrupts */ 4354 r600_irq_ack(rdev); 4355 4356 while (rptr != wptr) { 4357 /* wptr/rptr are in bytes! */ 4358 ring_index = rptr / 4; 4359 src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff; 4360 src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff; 4361 4362 switch (src_id) { 4363 case 1: /* D1 vblank/vline */ 4364 switch (src_data) { 4365 case 0: /* D1 vblank */ 4366 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) { 4367 if (rdev->irq.crtc_vblank_int[0]) { 4368 drm_handle_vblank(rdev->ddev, 0); 4369 rdev->pm.vblank_sync = true; 4370 wake_up(&rdev->irq.vblank_queue); 4371 } 4372 if (atomic_read(&rdev->irq.pflip[0])) 4373 radeon_crtc_handle_flip(rdev, 0); 4374 rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT; 4375 DRM_DEBUG("IH: D1 vblank\n"); 4376 } 4377 break; 4378 case 1: /* D1 vline */ 4379 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) { 4380 rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT; 4381 DRM_DEBUG("IH: D1 vline\n"); 4382 } 4383 break; 4384 default: 4385 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 4386 break; 4387 } 4388 break; 4389 case 5: /* D2 vblank/vline */ 4390 switch (src_data) { 4391 case 0: /* D2 vblank */ 4392 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) { 4393 if (rdev->irq.crtc_vblank_int[1]) { 4394 drm_handle_vblank(rdev->ddev, 1); 4395 rdev->pm.vblank_sync = true; 4396 wake_up(&rdev->irq.vblank_queue); 4397 } 4398 if (atomic_read(&rdev->irq.pflip[1])) 4399 radeon_crtc_handle_flip(rdev, 1); 4400 rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT; 4401 DRM_DEBUG("IH: D2 vblank\n"); 4402 } 4403 break; 4404 case 1: /* D1 vline */ 4405 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) { 4406 rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT; 4407 DRM_DEBUG("IH: D2 vline\n"); 4408 } 4409 break; 4410 default: 4411 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 4412 break; 4413 } 4414 break; 4415 case 19: /* HPD/DAC hotplug */ 4416 switch (src_data) { 4417 case 0: 4418 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) { 4419 rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT; 4420 queue_hotplug = true; 4421 DRM_DEBUG("IH: HPD1\n"); 4422 } 4423 break; 4424 case 1: 4425 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) { 4426 rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT; 4427 queue_hotplug = true; 4428 DRM_DEBUG("IH: HPD2\n"); 4429 } 4430 break; 4431 case 4: 4432 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) { 4433 rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT; 4434 queue_hotplug = true; 4435 DRM_DEBUG("IH: HPD3\n"); 4436 } 4437 break; 4438 case 5: 4439 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) { 4440 rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT; 4441 queue_hotplug = true; 4442 DRM_DEBUG("IH: HPD4\n"); 4443 } 4444 break; 4445 case 10: 4446 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) { 4447 rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT; 4448 queue_hotplug = true; 4449 DRM_DEBUG("IH: HPD5\n"); 4450 } 4451 break; 4452 case 12: 4453 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) { 4454 rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT; 4455 queue_hotplug = true; 4456 DRM_DEBUG("IH: HPD6\n"); 4457 } 4458 break; 4459 default: 4460 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 4461 break; 4462 } 4463 break; 4464 case 21: /* hdmi */ 4465 switch (src_data) { 4466 case 4: 4467 if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) { 4468 rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG; 4469 queue_hdmi = true; 4470 DRM_DEBUG("IH: HDMI0\n"); 4471 } 4472 break; 4473 case 5: 4474 if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) { 4475 rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG; 4476 queue_hdmi = true; 4477 DRM_DEBUG("IH: HDMI1\n"); 4478 } 4479 break; 4480 default: 4481 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data); 4482 break; 4483 } 4484 break; 4485 case 176: /* CP_INT in ring buffer */ 4486 case 177: /* CP_INT in IB1 */ 4487 case 178: /* CP_INT in IB2 */ 4488 DRM_DEBUG("IH: CP int: 0x%08x\n", src_data); 4489 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); 4490 break; 4491 case 181: /* CP EOP event */ 4492 DRM_DEBUG("IH: CP EOP\n"); 4493 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); 4494 break; 4495 case 224: /* DMA trap event */ 4496 DRM_DEBUG("IH: DMA trap\n"); 4497 radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX); 4498 break; 4499 case 233: /* GUI IDLE */ 4500 DRM_DEBUG("IH: GUI idle\n"); 4501 break; 4502 default: 4503 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 4504 break; 4505 } 4506 4507 /* wptr/rptr are in bytes! */ 4508 rptr += 16; 4509 rptr &= rdev->ih.ptr_mask; 4510 } 4511 if (queue_hotplug) 4512 schedule_work(&rdev->hotplug_work); 4513 if (queue_hdmi) 4514 schedule_work(&rdev->audio_work); 4515 rdev->ih.rptr = rptr; 4516 WREG32(IH_RB_RPTR, rdev->ih.rptr); 4517 atomic_set(&rdev->ih.lock, 0); 4518 4519 /* make sure wptr hasn't changed while processing */ 4520 wptr = r600_get_ih_wptr(rdev); 4521 if (wptr != rptr) 4522 goto restart_ih; 4523 4524 return IRQ_HANDLED; 4525 } 4526 4527 /* 4528 * Debugfs info 4529 */ 4530 #if defined(CONFIG_DEBUG_FS) 4531 4532 static int r600_debugfs_mc_info(struct seq_file *m, void *data) 4533 { 4534 struct drm_info_node *node = (struct drm_info_node *) m->private; 4535 struct drm_device *dev = node->minor->dev; 4536 struct radeon_device *rdev = dev->dev_private; 4537 4538 DREG32_SYS(m, rdev, R_000E50_SRBM_STATUS); 4539 DREG32_SYS(m, rdev, VM_L2_STATUS); 4540 return 0; 4541 } 4542 4543 static struct drm_info_list r600_mc_info_list[] = { 4544 {"r600_mc_info", r600_debugfs_mc_info, 0, NULL}, 4545 }; 4546 #endif 4547 4548 int r600_debugfs_mc_info_init(struct radeon_device *rdev) 4549 { 4550 #if defined(CONFIG_DEBUG_FS) 4551 return radeon_debugfs_add_files(rdev, r600_mc_info_list, ARRAY_SIZE(r600_mc_info_list)); 4552 #else 4553 return 0; 4554 #endif 4555 } 4556 4557 /** 4558 * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl 4559 * rdev: radeon device structure 4560 * bo: buffer object struct which userspace is waiting for idle 4561 * 4562 * Some R6XX/R7XX doesn't seems to take into account HDP flush performed 4563 * through ring buffer, this leads to corruption in rendering, see 4564 * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we 4565 * directly perform HDP flush by writing register through MMIO. 4566 */ 4567 void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo) 4568 { 4569 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read 4570 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL. 4571 * This seems to cause problems on some AGP cards. Just use the old 4572 * method for them. 4573 */ 4574 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) && 4575 rdev->vram_scratch.ptr && !(rdev->flags & RADEON_IS_AGP)) { 4576 void __iomem *ptr = (void *)rdev->vram_scratch.ptr; 4577 u32 tmp; 4578 4579 WREG32(HDP_DEBUG1, 0); 4580 tmp = readl((void __iomem *)ptr); 4581 } else 4582 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); 4583 } 4584 4585 void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes) 4586 { 4587 u32 link_width_cntl, mask; 4588 4589 if (rdev->flags & RADEON_IS_IGP) 4590 return; 4591 4592 if (!(rdev->flags & RADEON_IS_PCIE)) 4593 return; 4594 4595 /* x2 cards have a special sequence */ 4596 if (ASIC_IS_X2(rdev)) 4597 return; 4598 4599 radeon_gui_idle(rdev); 4600 4601 switch (lanes) { 4602 case 0: 4603 mask = RADEON_PCIE_LC_LINK_WIDTH_X0; 4604 break; 4605 case 1: 4606 mask = RADEON_PCIE_LC_LINK_WIDTH_X1; 4607 break; 4608 case 2: 4609 mask = RADEON_PCIE_LC_LINK_WIDTH_X2; 4610 break; 4611 case 4: 4612 mask = RADEON_PCIE_LC_LINK_WIDTH_X4; 4613 break; 4614 case 8: 4615 mask = RADEON_PCIE_LC_LINK_WIDTH_X8; 4616 break; 4617 case 12: 4618 /* not actually supported */ 4619 mask = RADEON_PCIE_LC_LINK_WIDTH_X12; 4620 break; 4621 case 16: 4622 mask = RADEON_PCIE_LC_LINK_WIDTH_X16; 4623 break; 4624 default: 4625 DRM_ERROR("invalid pcie lane request: %d\n", lanes); 4626 return; 4627 } 4628 4629 link_width_cntl = RREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL); 4630 link_width_cntl &= ~RADEON_PCIE_LC_LINK_WIDTH_MASK; 4631 link_width_cntl |= mask << RADEON_PCIE_LC_LINK_WIDTH_SHIFT; 4632 link_width_cntl |= (RADEON_PCIE_LC_RECONFIG_NOW | 4633 R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE); 4634 4635 WREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 4636 } 4637 4638 int r600_get_pcie_lanes(struct radeon_device *rdev) 4639 { 4640 u32 link_width_cntl; 4641 4642 if (rdev->flags & RADEON_IS_IGP) 4643 return 0; 4644 4645 if (!(rdev->flags & RADEON_IS_PCIE)) 4646 return 0; 4647 4648 /* x2 cards have a special sequence */ 4649 if (ASIC_IS_X2(rdev)) 4650 return 0; 4651 4652 radeon_gui_idle(rdev); 4653 4654 link_width_cntl = RREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL); 4655 4656 switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) { 4657 case RADEON_PCIE_LC_LINK_WIDTH_X1: 4658 return 1; 4659 case RADEON_PCIE_LC_LINK_WIDTH_X2: 4660 return 2; 4661 case RADEON_PCIE_LC_LINK_WIDTH_X4: 4662 return 4; 4663 case RADEON_PCIE_LC_LINK_WIDTH_X8: 4664 return 8; 4665 case RADEON_PCIE_LC_LINK_WIDTH_X12: 4666 /* not actually supported */ 4667 return 12; 4668 case RADEON_PCIE_LC_LINK_WIDTH_X0: 4669 case RADEON_PCIE_LC_LINK_WIDTH_X16: 4670 default: 4671 return 16; 4672 } 4673 } 4674 4675 static void r600_pcie_gen2_enable(struct radeon_device *rdev) 4676 { 4677 u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp; 4678 u16 link_cntl2; 4679 4680 if (radeon_pcie_gen2 == 0) 4681 return; 4682 4683 if (rdev->flags & RADEON_IS_IGP) 4684 return; 4685 4686 if (!(rdev->flags & RADEON_IS_PCIE)) 4687 return; 4688 4689 /* x2 cards have a special sequence */ 4690 if (ASIC_IS_X2(rdev)) 4691 return; 4692 4693 /* only RV6xx+ chips are supported */ 4694 if (rdev->family <= CHIP_R600) 4695 return; 4696 4697 if ((rdev->pdev->bus->max_bus_speed != PCIE_SPEED_5_0GT) && 4698 (rdev->pdev->bus->max_bus_speed != PCIE_SPEED_8_0GT)) 4699 return; 4700 4701 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL); 4702 if (speed_cntl & LC_CURRENT_DATA_RATE) { 4703 DRM_INFO("PCIE gen 2 link speeds already enabled\n"); 4704 return; 4705 } 4706 4707 DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n"); 4708 4709 /* 55 nm r6xx asics */ 4710 if ((rdev->family == CHIP_RV670) || 4711 (rdev->family == CHIP_RV620) || 4712 (rdev->family == CHIP_RV635)) { 4713 /* advertise upconfig capability */ 4714 link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL); 4715 link_width_cntl &= ~LC_UPCONFIGURE_DIS; 4716 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 4717 link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL); 4718 if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) { 4719 lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT; 4720 link_width_cntl &= ~(LC_LINK_WIDTH_MASK | 4721 LC_RECONFIG_ARC_MISSING_ESCAPE); 4722 link_width_cntl |= lanes | LC_RECONFIG_NOW | LC_RENEGOTIATE_EN; 4723 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 4724 } else { 4725 link_width_cntl |= LC_UPCONFIGURE_DIS; 4726 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 4727 } 4728 } 4729 4730 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL); 4731 if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) && 4732 (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) { 4733 4734 /* 55 nm r6xx asics */ 4735 if ((rdev->family == CHIP_RV670) || 4736 (rdev->family == CHIP_RV620) || 4737 (rdev->family == CHIP_RV635)) { 4738 WREG32(MM_CFGREGS_CNTL, 0x8); 4739 link_cntl2 = RREG32(0x4088); 4740 WREG32(MM_CFGREGS_CNTL, 0); 4741 /* not supported yet */ 4742 if (link_cntl2 & SELECTABLE_DEEMPHASIS) 4743 return; 4744 } 4745 4746 speed_cntl &= ~LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK; 4747 speed_cntl |= (0x3 << LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT); 4748 speed_cntl &= ~LC_VOLTAGE_TIMER_SEL_MASK; 4749 speed_cntl &= ~LC_FORCE_DIS_HW_SPEED_CHANGE; 4750 speed_cntl |= LC_FORCE_EN_HW_SPEED_CHANGE; 4751 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl); 4752 4753 tmp = RREG32(0x541c); 4754 WREG32(0x541c, tmp | 0x8); 4755 WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN); 4756 link_cntl2 = RREG16(0x4088); 4757 link_cntl2 &= ~TARGET_LINK_SPEED_MASK; 4758 link_cntl2 |= 0x2; 4759 WREG16(0x4088, link_cntl2); 4760 WREG32(MM_CFGREGS_CNTL, 0); 4761 4762 if ((rdev->family == CHIP_RV670) || 4763 (rdev->family == CHIP_RV620) || 4764 (rdev->family == CHIP_RV635)) { 4765 training_cntl = RREG32_PCIE_PORT(PCIE_LC_TRAINING_CNTL); 4766 training_cntl &= ~LC_POINT_7_PLUS_EN; 4767 WREG32_PCIE_PORT(PCIE_LC_TRAINING_CNTL, training_cntl); 4768 } else { 4769 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL); 4770 speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN; 4771 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl); 4772 } 4773 4774 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL); 4775 speed_cntl |= LC_GEN2_EN_STRAP; 4776 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl); 4777 4778 } else { 4779 link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL); 4780 /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */ 4781 if (1) 4782 link_width_cntl |= LC_UPCONFIGURE_DIS; 4783 else 4784 link_width_cntl &= ~LC_UPCONFIGURE_DIS; 4785 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 4786 } 4787 } 4788 4789 /** 4790 * r600_get_gpu_clock_counter - return GPU clock counter snapshot 4791 * 4792 * @rdev: radeon_device pointer 4793 * 4794 * Fetches a GPU clock counter snapshot (R6xx-cayman). 4795 * Returns the 64 bit clock counter snapshot. 4796 */ 4797 uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev) 4798 { 4799 uint64_t clock; 4800 4801 mutex_lock(&rdev->gpu_clock_mutex); 4802 WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1); 4803 clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) | 4804 ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL); 4805 mutex_unlock(&rdev->gpu_clock_mutex); 4806 return clock; 4807 } 4808