1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #include <linux/firmware.h> 29 #include <linux/platform_device.h> 30 #include <linux/slab.h> 31 #include <drm/drmP.h> 32 #include "radeon.h" 33 #include "radeon_asic.h" 34 #include <drm/radeon_drm.h> 35 #include "rv770d.h" 36 #include "atom.h" 37 #include "avivod.h" 38 39 #define R700_PFP_UCODE_SIZE 848 40 #define R700_PM4_UCODE_SIZE 1360 41 42 static void rv770_gpu_init(struct radeon_device *rdev); 43 void rv770_fini(struct radeon_device *rdev); 44 static void rv770_pcie_gen2_enable(struct radeon_device *rdev); 45 46 #define PCIE_BUS_CLK 10000 47 #define TCLK (PCIE_BUS_CLK / 10) 48 49 /** 50 * rv770_get_xclk - get the xclk 51 * 52 * @rdev: radeon_device pointer 53 * 54 * Returns the reference clock used by the gfx engine 55 * (r7xx-cayman). 56 */ 57 u32 rv770_get_xclk(struct radeon_device *rdev) 58 { 59 u32 reference_clock = rdev->clock.spll.reference_freq; 60 u32 tmp = RREG32(CG_CLKPIN_CNTL); 61 62 if (tmp & MUX_TCLK_TO_XCLK) 63 return TCLK; 64 65 if (tmp & XTALIN_DIVIDE) 66 return reference_clock / 4; 67 68 return reference_clock; 69 } 70 71 u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) 72 { 73 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 74 u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset); 75 int i; 76 77 /* Lock the graphics update lock */ 78 tmp |= AVIVO_D1GRPH_UPDATE_LOCK; 79 WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); 80 81 /* update the scanout addresses */ 82 if (radeon_crtc->crtc_id) { 83 WREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base)); 84 WREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base)); 85 } else { 86 WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base)); 87 WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base)); 88 } 89 WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, 90 (u32)crtc_base); 91 WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, 92 (u32)crtc_base); 93 94 /* Wait for update_pending to go high. */ 95 for (i = 0; i < rdev->usec_timeout; i++) { 96 if (RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING) 97 break; 98 udelay(1); 99 } 100 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); 101 102 /* Unlock the lock, so double-buffering can take place inside vblank */ 103 tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK; 104 WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); 105 106 /* Return current update_pending status: */ 107 return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING; 108 } 109 110 /* get temperature in millidegrees */ 111 int rv770_get_temp(struct radeon_device *rdev) 112 { 113 u32 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >> 114 ASIC_T_SHIFT; 115 int actual_temp; 116 117 if (temp & 0x400) 118 actual_temp = -256; 119 else if (temp & 0x200) 120 actual_temp = 255; 121 else if (temp & 0x100) { 122 actual_temp = temp & 0x1ff; 123 actual_temp |= ~0x1ff; 124 } else 125 actual_temp = temp & 0xff; 126 127 return (actual_temp * 1000) / 2; 128 } 129 130 void rv770_pm_misc(struct radeon_device *rdev) 131 { 132 int req_ps_idx = rdev->pm.requested_power_state_index; 133 int req_cm_idx = rdev->pm.requested_clock_mode_index; 134 struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx]; 135 struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage; 136 137 if ((voltage->type == VOLTAGE_SW) && voltage->voltage) { 138 /* 0xff01 is a flag rather then an actual voltage */ 139 if (voltage->voltage == 0xff01) 140 return; 141 if (voltage->voltage != rdev->pm.current_vddc) { 142 radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC); 143 rdev->pm.current_vddc = voltage->voltage; 144 DRM_DEBUG("Setting: v: %d\n", voltage->voltage); 145 } 146 } 147 } 148 149 /* 150 * GART 151 */ 152 static int rv770_pcie_gart_enable(struct radeon_device *rdev) 153 { 154 u32 tmp; 155 int r, i; 156 157 if (rdev->gart.robj == NULL) { 158 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); 159 return -EINVAL; 160 } 161 r = radeon_gart_table_vram_pin(rdev); 162 if (r) 163 return r; 164 radeon_gart_restore(rdev); 165 /* Setup L2 cache */ 166 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | 167 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | 168 EFFECTIVE_L2_QUEUE_SIZE(7)); 169 WREG32(VM_L2_CNTL2, 0); 170 WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2)); 171 /* Setup TLB control */ 172 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING | 173 SYSTEM_ACCESS_MODE_NOT_IN_SYS | 174 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU | 175 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5); 176 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp); 177 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp); 178 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp); 179 if (rdev->family == CHIP_RV740) 180 WREG32(MC_VM_MD_L1_TLB3_CNTL, tmp); 181 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp); 182 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); 183 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); 184 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); 185 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); 186 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); 187 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); 188 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | 189 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); 190 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, 191 (u32)(rdev->dummy_page.addr >> 12)); 192 for (i = 1; i < 7; i++) 193 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); 194 195 r600_pcie_gart_tlb_flush(rdev); 196 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", 197 (unsigned)(rdev->mc.gtt_size >> 20), 198 (unsigned long long)rdev->gart.table_addr); 199 rdev->gart.ready = true; 200 return 0; 201 } 202 203 static void rv770_pcie_gart_disable(struct radeon_device *rdev) 204 { 205 u32 tmp; 206 int i; 207 208 /* Disable all tables */ 209 for (i = 0; i < 7; i++) 210 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); 211 212 /* Setup L2 cache */ 213 WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING | 214 EFFECTIVE_L2_QUEUE_SIZE(7)); 215 WREG32(VM_L2_CNTL2, 0); 216 WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2)); 217 /* Setup TLB control */ 218 tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5); 219 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp); 220 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp); 221 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp); 222 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp); 223 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); 224 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); 225 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); 226 radeon_gart_table_vram_unpin(rdev); 227 } 228 229 static void rv770_pcie_gart_fini(struct radeon_device *rdev) 230 { 231 radeon_gart_fini(rdev); 232 rv770_pcie_gart_disable(rdev); 233 radeon_gart_table_vram_free(rdev); 234 } 235 236 237 static void rv770_agp_enable(struct radeon_device *rdev) 238 { 239 u32 tmp; 240 int i; 241 242 /* Setup L2 cache */ 243 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | 244 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | 245 EFFECTIVE_L2_QUEUE_SIZE(7)); 246 WREG32(VM_L2_CNTL2, 0); 247 WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2)); 248 /* Setup TLB control */ 249 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING | 250 SYSTEM_ACCESS_MODE_NOT_IN_SYS | 251 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU | 252 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5); 253 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp); 254 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp); 255 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp); 256 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp); 257 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); 258 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); 259 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); 260 for (i = 0; i < 7; i++) 261 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); 262 } 263 264 static void rv770_mc_program(struct radeon_device *rdev) 265 { 266 struct rv515_mc_save save; 267 u32 tmp; 268 int i, j; 269 270 /* Initialize HDP */ 271 for (i = 0, j = 0; i < 32; i++, j += 0x18) { 272 WREG32((0x2c14 + j), 0x00000000); 273 WREG32((0x2c18 + j), 0x00000000); 274 WREG32((0x2c1c + j), 0x00000000); 275 WREG32((0x2c20 + j), 0x00000000); 276 WREG32((0x2c24 + j), 0x00000000); 277 } 278 /* r7xx hw bug. Read from HDP_DEBUG1 rather 279 * than writing to HDP_REG_COHERENCY_FLUSH_CNTL 280 */ 281 tmp = RREG32(HDP_DEBUG1); 282 283 rv515_mc_stop(rdev, &save); 284 if (r600_mc_wait_for_idle(rdev)) { 285 dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); 286 } 287 /* Lockout access through VGA aperture*/ 288 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE); 289 /* Update configuration */ 290 if (rdev->flags & RADEON_IS_AGP) { 291 if (rdev->mc.vram_start < rdev->mc.gtt_start) { 292 /* VRAM before AGP */ 293 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, 294 rdev->mc.vram_start >> 12); 295 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, 296 rdev->mc.gtt_end >> 12); 297 } else { 298 /* VRAM after AGP */ 299 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, 300 rdev->mc.gtt_start >> 12); 301 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, 302 rdev->mc.vram_end >> 12); 303 } 304 } else { 305 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, 306 rdev->mc.vram_start >> 12); 307 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, 308 rdev->mc.vram_end >> 12); 309 } 310 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12); 311 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16; 312 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF); 313 WREG32(MC_VM_FB_LOCATION, tmp); 314 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); 315 WREG32(HDP_NONSURFACE_INFO, (2 << 7)); 316 WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF); 317 if (rdev->flags & RADEON_IS_AGP) { 318 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16); 319 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16); 320 WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22); 321 } else { 322 WREG32(MC_VM_AGP_BASE, 0); 323 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF); 324 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF); 325 } 326 if (r600_mc_wait_for_idle(rdev)) { 327 dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); 328 } 329 rv515_mc_resume(rdev, &save); 330 /* we need to own VRAM, so turn off the VGA renderer here 331 * to stop it overwriting our objects */ 332 rv515_vga_render_disable(rdev); 333 } 334 335 336 /* 337 * CP. 338 */ 339 void r700_cp_stop(struct radeon_device *rdev) 340 { 341 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); 342 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT)); 343 WREG32(SCRATCH_UMSK, 0); 344 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; 345 } 346 347 static int rv770_cp_load_microcode(struct radeon_device *rdev) 348 { 349 const __be32 *fw_data; 350 int i; 351 352 if (!rdev->me_fw || !rdev->pfp_fw) 353 return -EINVAL; 354 355 r700_cp_stop(rdev); 356 WREG32(CP_RB_CNTL, 357 #ifdef __BIG_ENDIAN 358 BUF_SWAP_32BIT | 359 #endif 360 RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3)); 361 362 /* Reset cp */ 363 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP); 364 RREG32(GRBM_SOFT_RESET); 365 mdelay(15); 366 WREG32(GRBM_SOFT_RESET, 0); 367 368 fw_data = (const __be32 *)rdev->pfp_fw->data; 369 WREG32(CP_PFP_UCODE_ADDR, 0); 370 for (i = 0; i < R700_PFP_UCODE_SIZE; i++) 371 WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++)); 372 WREG32(CP_PFP_UCODE_ADDR, 0); 373 374 fw_data = (const __be32 *)rdev->me_fw->data; 375 WREG32(CP_ME_RAM_WADDR, 0); 376 for (i = 0; i < R700_PM4_UCODE_SIZE; i++) 377 WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++)); 378 379 WREG32(CP_PFP_UCODE_ADDR, 0); 380 WREG32(CP_ME_RAM_WADDR, 0); 381 WREG32(CP_ME_RAM_RADDR, 0); 382 return 0; 383 } 384 385 void r700_cp_fini(struct radeon_device *rdev) 386 { 387 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 388 r700_cp_stop(rdev); 389 radeon_ring_fini(rdev, ring); 390 radeon_scratch_free(rdev, ring->rptr_save_reg); 391 } 392 393 /* 394 * Core functions 395 */ 396 static void rv770_gpu_init(struct radeon_device *rdev) 397 { 398 int i, j, num_qd_pipes; 399 u32 ta_aux_cntl; 400 u32 sx_debug_1; 401 u32 smx_dc_ctl0; 402 u32 db_debug3; 403 u32 num_gs_verts_per_thread; 404 u32 vgt_gs_per_es; 405 u32 gs_prim_buffer_depth = 0; 406 u32 sq_ms_fifo_sizes; 407 u32 sq_config; 408 u32 sq_thread_resource_mgmt; 409 u32 hdp_host_path_cntl; 410 u32 sq_dyn_gpr_size_simd_ab_0; 411 u32 gb_tiling_config = 0; 412 u32 cc_rb_backend_disable = 0; 413 u32 cc_gc_shader_pipe_config = 0; 414 u32 mc_arb_ramcfg; 415 u32 db_debug4, tmp; 416 u32 inactive_pipes, shader_pipe_config; 417 u32 disabled_rb_mask; 418 unsigned active_number; 419 420 /* setup chip specs */ 421 rdev->config.rv770.tiling_group_size = 256; 422 switch (rdev->family) { 423 case CHIP_RV770: 424 rdev->config.rv770.max_pipes = 4; 425 rdev->config.rv770.max_tile_pipes = 8; 426 rdev->config.rv770.max_simds = 10; 427 rdev->config.rv770.max_backends = 4; 428 rdev->config.rv770.max_gprs = 256; 429 rdev->config.rv770.max_threads = 248; 430 rdev->config.rv770.max_stack_entries = 512; 431 rdev->config.rv770.max_hw_contexts = 8; 432 rdev->config.rv770.max_gs_threads = 16 * 2; 433 rdev->config.rv770.sx_max_export_size = 128; 434 rdev->config.rv770.sx_max_export_pos_size = 16; 435 rdev->config.rv770.sx_max_export_smx_size = 112; 436 rdev->config.rv770.sq_num_cf_insts = 2; 437 438 rdev->config.rv770.sx_num_of_sets = 7; 439 rdev->config.rv770.sc_prim_fifo_size = 0xF9; 440 rdev->config.rv770.sc_hiz_tile_fifo_size = 0x30; 441 rdev->config.rv770.sc_earlyz_tile_fifo_fize = 0x130; 442 break; 443 case CHIP_RV730: 444 rdev->config.rv770.max_pipes = 2; 445 rdev->config.rv770.max_tile_pipes = 4; 446 rdev->config.rv770.max_simds = 8; 447 rdev->config.rv770.max_backends = 2; 448 rdev->config.rv770.max_gprs = 128; 449 rdev->config.rv770.max_threads = 248; 450 rdev->config.rv770.max_stack_entries = 256; 451 rdev->config.rv770.max_hw_contexts = 8; 452 rdev->config.rv770.max_gs_threads = 16 * 2; 453 rdev->config.rv770.sx_max_export_size = 256; 454 rdev->config.rv770.sx_max_export_pos_size = 32; 455 rdev->config.rv770.sx_max_export_smx_size = 224; 456 rdev->config.rv770.sq_num_cf_insts = 2; 457 458 rdev->config.rv770.sx_num_of_sets = 7; 459 rdev->config.rv770.sc_prim_fifo_size = 0xf9; 460 rdev->config.rv770.sc_hiz_tile_fifo_size = 0x30; 461 rdev->config.rv770.sc_earlyz_tile_fifo_fize = 0x130; 462 if (rdev->config.rv770.sx_max_export_pos_size > 16) { 463 rdev->config.rv770.sx_max_export_pos_size -= 16; 464 rdev->config.rv770.sx_max_export_smx_size += 16; 465 } 466 break; 467 case CHIP_RV710: 468 rdev->config.rv770.max_pipes = 2; 469 rdev->config.rv770.max_tile_pipes = 2; 470 rdev->config.rv770.max_simds = 2; 471 rdev->config.rv770.max_backends = 1; 472 rdev->config.rv770.max_gprs = 256; 473 rdev->config.rv770.max_threads = 192; 474 rdev->config.rv770.max_stack_entries = 256; 475 rdev->config.rv770.max_hw_contexts = 4; 476 rdev->config.rv770.max_gs_threads = 8 * 2; 477 rdev->config.rv770.sx_max_export_size = 128; 478 rdev->config.rv770.sx_max_export_pos_size = 16; 479 rdev->config.rv770.sx_max_export_smx_size = 112; 480 rdev->config.rv770.sq_num_cf_insts = 1; 481 482 rdev->config.rv770.sx_num_of_sets = 7; 483 rdev->config.rv770.sc_prim_fifo_size = 0x40; 484 rdev->config.rv770.sc_hiz_tile_fifo_size = 0x30; 485 rdev->config.rv770.sc_earlyz_tile_fifo_fize = 0x130; 486 break; 487 case CHIP_RV740: 488 rdev->config.rv770.max_pipes = 4; 489 rdev->config.rv770.max_tile_pipes = 4; 490 rdev->config.rv770.max_simds = 8; 491 rdev->config.rv770.max_backends = 4; 492 rdev->config.rv770.max_gprs = 256; 493 rdev->config.rv770.max_threads = 248; 494 rdev->config.rv770.max_stack_entries = 512; 495 rdev->config.rv770.max_hw_contexts = 8; 496 rdev->config.rv770.max_gs_threads = 16 * 2; 497 rdev->config.rv770.sx_max_export_size = 256; 498 rdev->config.rv770.sx_max_export_pos_size = 32; 499 rdev->config.rv770.sx_max_export_smx_size = 224; 500 rdev->config.rv770.sq_num_cf_insts = 2; 501 502 rdev->config.rv770.sx_num_of_sets = 7; 503 rdev->config.rv770.sc_prim_fifo_size = 0x100; 504 rdev->config.rv770.sc_hiz_tile_fifo_size = 0x30; 505 rdev->config.rv770.sc_earlyz_tile_fifo_fize = 0x130; 506 507 if (rdev->config.rv770.sx_max_export_pos_size > 16) { 508 rdev->config.rv770.sx_max_export_pos_size -= 16; 509 rdev->config.rv770.sx_max_export_smx_size += 16; 510 } 511 break; 512 default: 513 break; 514 } 515 516 /* Initialize HDP */ 517 j = 0; 518 for (i = 0; i < 32; i++) { 519 WREG32((0x2c14 + j), 0x00000000); 520 WREG32((0x2c18 + j), 0x00000000); 521 WREG32((0x2c1c + j), 0x00000000); 522 WREG32((0x2c20 + j), 0x00000000); 523 WREG32((0x2c24 + j), 0x00000000); 524 j += 0x18; 525 } 526 527 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); 528 529 /* setup tiling, simd, pipe config */ 530 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); 531 532 shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG); 533 inactive_pipes = (shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> INACTIVE_QD_PIPES_SHIFT; 534 for (i = 0, tmp = 1, active_number = 0; i < R7XX_MAX_PIPES; i++) { 535 if (!(inactive_pipes & tmp)) { 536 active_number++; 537 } 538 tmp <<= 1; 539 } 540 if (active_number == 1) { 541 WREG32(SPI_CONFIG_CNTL, DISABLE_INTERP_1); 542 } else { 543 WREG32(SPI_CONFIG_CNTL, 0); 544 } 545 546 cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000; 547 tmp = R7XX_MAX_BACKENDS - r600_count_pipe_bits(cc_rb_backend_disable >> 16); 548 if (tmp < rdev->config.rv770.max_backends) { 549 rdev->config.rv770.max_backends = tmp; 550 } 551 552 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00; 553 tmp = R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R7XX_MAX_PIPES_MASK); 554 if (tmp < rdev->config.rv770.max_pipes) { 555 rdev->config.rv770.max_pipes = tmp; 556 } 557 tmp = R7XX_MAX_SIMDS - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R7XX_MAX_SIMDS_MASK); 558 if (tmp < rdev->config.rv770.max_simds) { 559 rdev->config.rv770.max_simds = tmp; 560 } 561 562 switch (rdev->config.rv770.max_tile_pipes) { 563 case 1: 564 default: 565 gb_tiling_config = PIPE_TILING(0); 566 break; 567 case 2: 568 gb_tiling_config = PIPE_TILING(1); 569 break; 570 case 4: 571 gb_tiling_config = PIPE_TILING(2); 572 break; 573 case 8: 574 gb_tiling_config = PIPE_TILING(3); 575 break; 576 } 577 rdev->config.rv770.tiling_npipes = rdev->config.rv770.max_tile_pipes; 578 579 disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R7XX_MAX_BACKENDS_MASK; 580 tmp = (gb_tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT; 581 tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.rv770.max_backends, 582 R7XX_MAX_BACKENDS, disabled_rb_mask); 583 gb_tiling_config |= tmp << 16; 584 rdev->config.rv770.backend_map = tmp; 585 586 if (rdev->family == CHIP_RV770) 587 gb_tiling_config |= BANK_TILING(1); 588 else { 589 if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) 590 gb_tiling_config |= BANK_TILING(1); 591 else 592 gb_tiling_config |= BANK_TILING(0); 593 } 594 rdev->config.rv770.tiling_nbanks = 4 << ((gb_tiling_config >> 4) & 0x3); 595 gb_tiling_config |= GROUP_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT); 596 if (((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT) > 3) { 597 gb_tiling_config |= ROW_TILING(3); 598 gb_tiling_config |= SAMPLE_SPLIT(3); 599 } else { 600 gb_tiling_config |= 601 ROW_TILING(((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT)); 602 gb_tiling_config |= 603 SAMPLE_SPLIT(((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT)); 604 } 605 606 gb_tiling_config |= BANK_SWAPS(1); 607 rdev->config.rv770.tile_config = gb_tiling_config; 608 609 WREG32(GB_TILING_CONFIG, gb_tiling_config); 610 WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff)); 611 WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff)); 612 WREG32(DMA_TILING_CONFIG, (gb_tiling_config & 0xffff)); 613 WREG32(DMA_TILING_CONFIG2, (gb_tiling_config & 0xffff)); 614 615 WREG32(CGTS_SYS_TCC_DISABLE, 0); 616 WREG32(CGTS_TCC_DISABLE, 0); 617 WREG32(CGTS_USER_SYS_TCC_DISABLE, 0); 618 WREG32(CGTS_USER_TCC_DISABLE, 0); 619 620 621 num_qd_pipes = R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8); 622 WREG32(VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & DEALLOC_DIST_MASK); 623 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & VTX_REUSE_DEPTH_MASK); 624 625 /* set HW defaults for 3D engine */ 626 WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | 627 ROQ_IB2_START(0x2b))); 628 629 WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30)); 630 631 ta_aux_cntl = RREG32(TA_CNTL_AUX); 632 WREG32(TA_CNTL_AUX, ta_aux_cntl | DISABLE_CUBE_ANISO); 633 634 sx_debug_1 = RREG32(SX_DEBUG_1); 635 sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS; 636 WREG32(SX_DEBUG_1, sx_debug_1); 637 638 smx_dc_ctl0 = RREG32(SMX_DC_CTL0); 639 smx_dc_ctl0 &= ~CACHE_DEPTH(0x1ff); 640 smx_dc_ctl0 |= CACHE_DEPTH((rdev->config.rv770.sx_num_of_sets * 64) - 1); 641 WREG32(SMX_DC_CTL0, smx_dc_ctl0); 642 643 if (rdev->family != CHIP_RV740) 644 WREG32(SMX_EVENT_CTL, (ES_FLUSH_CTL(4) | 645 GS_FLUSH_CTL(4) | 646 ACK_FLUSH_CTL(3) | 647 SYNC_FLUSH_CTL)); 648 649 if (rdev->family != CHIP_RV770) 650 WREG32(SMX_SAR_CTL0, 0x00003f3f); 651 652 db_debug3 = RREG32(DB_DEBUG3); 653 db_debug3 &= ~DB_CLK_OFF_DELAY(0x1f); 654 switch (rdev->family) { 655 case CHIP_RV770: 656 case CHIP_RV740: 657 db_debug3 |= DB_CLK_OFF_DELAY(0x1f); 658 break; 659 case CHIP_RV710: 660 case CHIP_RV730: 661 default: 662 db_debug3 |= DB_CLK_OFF_DELAY(2); 663 break; 664 } 665 WREG32(DB_DEBUG3, db_debug3); 666 667 if (rdev->family != CHIP_RV770) { 668 db_debug4 = RREG32(DB_DEBUG4); 669 db_debug4 |= DISABLE_TILE_COVERED_FOR_PS_ITER; 670 WREG32(DB_DEBUG4, db_debug4); 671 } 672 673 WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.rv770.sx_max_export_size / 4) - 1) | 674 POSITION_BUFFER_SIZE((rdev->config.rv770.sx_max_export_pos_size / 4) - 1) | 675 SMX_BUFFER_SIZE((rdev->config.rv770.sx_max_export_smx_size / 4) - 1))); 676 677 WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.rv770.sc_prim_fifo_size) | 678 SC_HIZ_TILE_FIFO_SIZE(rdev->config.rv770.sc_hiz_tile_fifo_size) | 679 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.rv770.sc_earlyz_tile_fifo_fize))); 680 681 WREG32(PA_SC_MULTI_CHIP_CNTL, 0); 682 683 WREG32(VGT_NUM_INSTANCES, 1); 684 685 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4)); 686 687 WREG32(CP_PERFMON_CNTL, 0); 688 689 sq_ms_fifo_sizes = (CACHE_FIFO_SIZE(16 * rdev->config.rv770.sq_num_cf_insts) | 690 DONE_FIFO_HIWATER(0xe0) | 691 ALU_UPDATE_FIFO_HIWATER(0x8)); 692 switch (rdev->family) { 693 case CHIP_RV770: 694 case CHIP_RV730: 695 case CHIP_RV710: 696 sq_ms_fifo_sizes |= FETCH_FIFO_HIWATER(0x1); 697 break; 698 case CHIP_RV740: 699 default: 700 sq_ms_fifo_sizes |= FETCH_FIFO_HIWATER(0x4); 701 break; 702 } 703 WREG32(SQ_MS_FIFO_SIZES, sq_ms_fifo_sizes); 704 705 /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT 706 * should be adjusted as needed by the 2D/3D drivers. This just sets default values 707 */ 708 sq_config = RREG32(SQ_CONFIG); 709 sq_config &= ~(PS_PRIO(3) | 710 VS_PRIO(3) | 711 GS_PRIO(3) | 712 ES_PRIO(3)); 713 sq_config |= (DX9_CONSTS | 714 VC_ENABLE | 715 EXPORT_SRC_C | 716 PS_PRIO(0) | 717 VS_PRIO(1) | 718 GS_PRIO(2) | 719 ES_PRIO(3)); 720 if (rdev->family == CHIP_RV710) 721 /* no vertex cache */ 722 sq_config &= ~VC_ENABLE; 723 724 WREG32(SQ_CONFIG, sq_config); 725 726 WREG32(SQ_GPR_RESOURCE_MGMT_1, (NUM_PS_GPRS((rdev->config.rv770.max_gprs * 24)/64) | 727 NUM_VS_GPRS((rdev->config.rv770.max_gprs * 24)/64) | 728 NUM_CLAUSE_TEMP_GPRS(((rdev->config.rv770.max_gprs * 24)/64)/2))); 729 730 WREG32(SQ_GPR_RESOURCE_MGMT_2, (NUM_GS_GPRS((rdev->config.rv770.max_gprs * 7)/64) | 731 NUM_ES_GPRS((rdev->config.rv770.max_gprs * 7)/64))); 732 733 sq_thread_resource_mgmt = (NUM_PS_THREADS((rdev->config.rv770.max_threads * 4)/8) | 734 NUM_VS_THREADS((rdev->config.rv770.max_threads * 2)/8) | 735 NUM_ES_THREADS((rdev->config.rv770.max_threads * 1)/8)); 736 if (((rdev->config.rv770.max_threads * 1) / 8) > rdev->config.rv770.max_gs_threads) 737 sq_thread_resource_mgmt |= NUM_GS_THREADS(rdev->config.rv770.max_gs_threads); 738 else 739 sq_thread_resource_mgmt |= NUM_GS_THREADS((rdev->config.rv770.max_gs_threads * 1)/8); 740 WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt); 741 742 WREG32(SQ_STACK_RESOURCE_MGMT_1, (NUM_PS_STACK_ENTRIES((rdev->config.rv770.max_stack_entries * 1)/4) | 743 NUM_VS_STACK_ENTRIES((rdev->config.rv770.max_stack_entries * 1)/4))); 744 745 WREG32(SQ_STACK_RESOURCE_MGMT_2, (NUM_GS_STACK_ENTRIES((rdev->config.rv770.max_stack_entries * 1)/4) | 746 NUM_ES_STACK_ENTRIES((rdev->config.rv770.max_stack_entries * 1)/4))); 747 748 sq_dyn_gpr_size_simd_ab_0 = (SIMDA_RING0((rdev->config.rv770.max_gprs * 38)/64) | 749 SIMDA_RING1((rdev->config.rv770.max_gprs * 38)/64) | 750 SIMDB_RING0((rdev->config.rv770.max_gprs * 38)/64) | 751 SIMDB_RING1((rdev->config.rv770.max_gprs * 38)/64)); 752 753 WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_0, sq_dyn_gpr_size_simd_ab_0); 754 WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_1, sq_dyn_gpr_size_simd_ab_0); 755 WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_2, sq_dyn_gpr_size_simd_ab_0); 756 WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_3, sq_dyn_gpr_size_simd_ab_0); 757 WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_4, sq_dyn_gpr_size_simd_ab_0); 758 WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_5, sq_dyn_gpr_size_simd_ab_0); 759 WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_6, sq_dyn_gpr_size_simd_ab_0); 760 WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_7, sq_dyn_gpr_size_simd_ab_0); 761 762 WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) | 763 FORCE_EOV_MAX_REZ_CNT(255))); 764 765 if (rdev->family == CHIP_RV710) 766 WREG32(VGT_CACHE_INVALIDATION, (CACHE_INVALIDATION(TC_ONLY) | 767 AUTO_INVLD_EN(ES_AND_GS_AUTO))); 768 else 769 WREG32(VGT_CACHE_INVALIDATION, (CACHE_INVALIDATION(VC_AND_TC) | 770 AUTO_INVLD_EN(ES_AND_GS_AUTO))); 771 772 switch (rdev->family) { 773 case CHIP_RV770: 774 case CHIP_RV730: 775 case CHIP_RV740: 776 gs_prim_buffer_depth = 384; 777 break; 778 case CHIP_RV710: 779 gs_prim_buffer_depth = 128; 780 break; 781 default: 782 break; 783 } 784 785 num_gs_verts_per_thread = rdev->config.rv770.max_pipes * 16; 786 vgt_gs_per_es = gs_prim_buffer_depth + num_gs_verts_per_thread; 787 /* Max value for this is 256 */ 788 if (vgt_gs_per_es > 256) 789 vgt_gs_per_es = 256; 790 791 WREG32(VGT_ES_PER_GS, 128); 792 WREG32(VGT_GS_PER_ES, vgt_gs_per_es); 793 WREG32(VGT_GS_PER_VS, 2); 794 795 /* more default values. 2D/3D driver should adjust as needed */ 796 WREG32(VGT_GS_VERTEX_REUSE, 16); 797 WREG32(PA_SC_LINE_STIPPLE_STATE, 0); 798 WREG32(VGT_STRMOUT_EN, 0); 799 WREG32(SX_MISC, 0); 800 WREG32(PA_SC_MODE_CNTL, 0); 801 WREG32(PA_SC_EDGERULE, 0xaaaaaaaa); 802 WREG32(PA_SC_AA_CONFIG, 0); 803 WREG32(PA_SC_CLIPRECT_RULE, 0xffff); 804 WREG32(PA_SC_LINE_STIPPLE, 0); 805 WREG32(SPI_INPUT_Z, 0); 806 WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2)); 807 WREG32(CB_COLOR7_FRAG, 0); 808 809 /* clear render buffer base addresses */ 810 WREG32(CB_COLOR0_BASE, 0); 811 WREG32(CB_COLOR1_BASE, 0); 812 WREG32(CB_COLOR2_BASE, 0); 813 WREG32(CB_COLOR3_BASE, 0); 814 WREG32(CB_COLOR4_BASE, 0); 815 WREG32(CB_COLOR5_BASE, 0); 816 WREG32(CB_COLOR6_BASE, 0); 817 WREG32(CB_COLOR7_BASE, 0); 818 819 WREG32(TCP_CNTL, 0); 820 821 hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL); 822 WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl); 823 824 WREG32(PA_SC_MULTI_CHIP_CNTL, 0); 825 826 WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA | 827 NUM_CLIP_SEQ(3))); 828 WREG32(VC_ENHANCE, 0); 829 } 830 831 void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) 832 { 833 u64 size_bf, size_af; 834 835 if (mc->mc_vram_size > 0xE0000000) { 836 /* leave room for at least 512M GTT */ 837 dev_warn(rdev->dev, "limiting VRAM\n"); 838 mc->real_vram_size = 0xE0000000; 839 mc->mc_vram_size = 0xE0000000; 840 } 841 if (rdev->flags & RADEON_IS_AGP) { 842 size_bf = mc->gtt_start; 843 size_af = 0xFFFFFFFF - mc->gtt_end; 844 if (size_bf > size_af) { 845 if (mc->mc_vram_size > size_bf) { 846 dev_warn(rdev->dev, "limiting VRAM\n"); 847 mc->real_vram_size = size_bf; 848 mc->mc_vram_size = size_bf; 849 } 850 mc->vram_start = mc->gtt_start - mc->mc_vram_size; 851 } else { 852 if (mc->mc_vram_size > size_af) { 853 dev_warn(rdev->dev, "limiting VRAM\n"); 854 mc->real_vram_size = size_af; 855 mc->mc_vram_size = size_af; 856 } 857 mc->vram_start = mc->gtt_end + 1; 858 } 859 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; 860 dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n", 861 mc->mc_vram_size >> 20, mc->vram_start, 862 mc->vram_end, mc->real_vram_size >> 20); 863 } else { 864 radeon_vram_location(rdev, &rdev->mc, 0); 865 rdev->mc.gtt_base_align = 0; 866 radeon_gtt_location(rdev, mc); 867 } 868 } 869 870 static int rv770_mc_init(struct radeon_device *rdev) 871 { 872 u32 tmp; 873 int chansize, numchan; 874 875 /* Get VRAM informations */ 876 rdev->mc.vram_is_ddr = true; 877 tmp = RREG32(MC_ARB_RAMCFG); 878 if (tmp & CHANSIZE_OVERRIDE) { 879 chansize = 16; 880 } else if (tmp & CHANSIZE_MASK) { 881 chansize = 64; 882 } else { 883 chansize = 32; 884 } 885 tmp = RREG32(MC_SHARED_CHMAP); 886 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { 887 case 0: 888 default: 889 numchan = 1; 890 break; 891 case 1: 892 numchan = 2; 893 break; 894 case 2: 895 numchan = 4; 896 break; 897 case 3: 898 numchan = 8; 899 break; 900 } 901 rdev->mc.vram_width = numchan * chansize; 902 /* Could aper size report 0 ? */ 903 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); 904 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); 905 /* Setup GPU memory space */ 906 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); 907 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); 908 rdev->mc.visible_vram_size = rdev->mc.aper_size; 909 r700_vram_gtt_location(rdev, &rdev->mc); 910 radeon_update_bandwidth_info(rdev); 911 912 return 0; 913 } 914 915 /** 916 * rv770_copy_dma - copy pages using the DMA engine 917 * 918 * @rdev: radeon_device pointer 919 * @src_offset: src GPU address 920 * @dst_offset: dst GPU address 921 * @num_gpu_pages: number of GPU pages to xfer 922 * @fence: radeon fence object 923 * 924 * Copy GPU paging using the DMA engine (r7xx). 925 * Used by the radeon ttm implementation to move pages if 926 * registered as the asic copy callback. 927 */ 928 int rv770_copy_dma(struct radeon_device *rdev, 929 uint64_t src_offset, uint64_t dst_offset, 930 unsigned num_gpu_pages, 931 struct radeon_fence **fence) 932 { 933 struct radeon_semaphore *sem = NULL; 934 int ring_index = rdev->asic->copy.dma_ring_index; 935 struct radeon_ring *ring = &rdev->ring[ring_index]; 936 u32 size_in_dw, cur_size_in_dw; 937 int i, num_loops; 938 int r = 0; 939 940 r = radeon_semaphore_create(rdev, &sem); 941 if (r) { 942 DRM_ERROR("radeon: moving bo (%d).\n", r); 943 return r; 944 } 945 946 size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4; 947 num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFF); 948 r = radeon_ring_lock(rdev, ring, num_loops * 5 + 8); 949 if (r) { 950 DRM_ERROR("radeon: moving bo (%d).\n", r); 951 radeon_semaphore_free(rdev, &sem, NULL); 952 return r; 953 } 954 955 if (radeon_fence_need_sync(*fence, ring->idx)) { 956 radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, 957 ring->idx); 958 radeon_fence_note_sync(*fence, ring->idx); 959 } else { 960 radeon_semaphore_free(rdev, &sem, NULL); 961 } 962 963 for (i = 0; i < num_loops; i++) { 964 cur_size_in_dw = size_in_dw; 965 if (cur_size_in_dw > 0xFFFF) 966 cur_size_in_dw = 0xFFFF; 967 size_in_dw -= cur_size_in_dw; 968 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw)); 969 radeon_ring_write(ring, dst_offset & 0xfffffffc); 970 radeon_ring_write(ring, src_offset & 0xfffffffc); 971 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff); 972 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff); 973 src_offset += cur_size_in_dw * 4; 974 dst_offset += cur_size_in_dw * 4; 975 } 976 977 r = radeon_fence_emit(rdev, fence, ring->idx); 978 if (r) { 979 radeon_ring_unlock_undo(rdev, ring); 980 return r; 981 } 982 983 radeon_ring_unlock_commit(rdev, ring); 984 radeon_semaphore_free(rdev, &sem, *fence); 985 986 return r; 987 } 988 989 static int rv770_startup(struct radeon_device *rdev) 990 { 991 struct radeon_ring *ring; 992 int r; 993 994 /* enable pcie gen2 link */ 995 rv770_pcie_gen2_enable(rdev); 996 997 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { 998 r = r600_init_microcode(rdev); 999 if (r) { 1000 DRM_ERROR("Failed to load firmware!\n"); 1001 return r; 1002 } 1003 } 1004 1005 r = r600_vram_scratch_init(rdev); 1006 if (r) 1007 return r; 1008 1009 rv770_mc_program(rdev); 1010 if (rdev->flags & RADEON_IS_AGP) { 1011 rv770_agp_enable(rdev); 1012 } else { 1013 r = rv770_pcie_gart_enable(rdev); 1014 if (r) 1015 return r; 1016 } 1017 1018 rv770_gpu_init(rdev); 1019 r = r600_blit_init(rdev); 1020 if (r) { 1021 r600_blit_fini(rdev); 1022 rdev->asic->copy.copy = NULL; 1023 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); 1024 } 1025 1026 /* allocate wb buffer */ 1027 r = radeon_wb_init(rdev); 1028 if (r) 1029 return r; 1030 1031 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX); 1032 if (r) { 1033 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); 1034 return r; 1035 } 1036 1037 r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX); 1038 if (r) { 1039 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r); 1040 return r; 1041 } 1042 1043 /* Enable IRQ */ 1044 r = r600_irq_init(rdev); 1045 if (r) { 1046 DRM_ERROR("radeon: IH init failed (%d).\n", r); 1047 radeon_irq_kms_fini(rdev); 1048 return r; 1049 } 1050 r600_irq_set(rdev); 1051 1052 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 1053 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, 1054 R600_CP_RB_RPTR, R600_CP_RB_WPTR, 1055 0, 0xfffff, RADEON_CP_PACKET2); 1056 if (r) 1057 return r; 1058 1059 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; 1060 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, 1061 DMA_RB_RPTR, DMA_RB_WPTR, 1062 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); 1063 if (r) 1064 return r; 1065 1066 r = rv770_cp_load_microcode(rdev); 1067 if (r) 1068 return r; 1069 r = r600_cp_resume(rdev); 1070 if (r) 1071 return r; 1072 1073 r = r600_dma_resume(rdev); 1074 if (r) 1075 return r; 1076 1077 r = radeon_ib_pool_init(rdev); 1078 if (r) { 1079 dev_err(rdev->dev, "IB initialization failed (%d).\n", r); 1080 return r; 1081 } 1082 1083 r = r600_audio_init(rdev); 1084 if (r) { 1085 DRM_ERROR("radeon: audio init failed\n"); 1086 return r; 1087 } 1088 1089 return 0; 1090 } 1091 1092 int rv770_resume(struct radeon_device *rdev) 1093 { 1094 int r; 1095 1096 /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw, 1097 * posting will perform necessary task to bring back GPU into good 1098 * shape. 1099 */ 1100 /* post card */ 1101 atom_asic_init(rdev->mode_info.atom_context); 1102 1103 rdev->accel_working = true; 1104 r = rv770_startup(rdev); 1105 if (r) { 1106 DRM_ERROR("r600 startup failed on resume\n"); 1107 rdev->accel_working = false; 1108 return r; 1109 } 1110 1111 return r; 1112 1113 } 1114 1115 int rv770_suspend(struct radeon_device *rdev) 1116 { 1117 r600_audio_fini(rdev); 1118 r700_cp_stop(rdev); 1119 r600_dma_stop(rdev); 1120 r600_irq_suspend(rdev); 1121 radeon_wb_disable(rdev); 1122 rv770_pcie_gart_disable(rdev); 1123 1124 return 0; 1125 } 1126 1127 /* Plan is to move initialization in that function and use 1128 * helper function so that radeon_device_init pretty much 1129 * do nothing more than calling asic specific function. This 1130 * should also allow to remove a bunch of callback function 1131 * like vram_info. 1132 */ 1133 int rv770_init(struct radeon_device *rdev) 1134 { 1135 int r; 1136 1137 /* Read BIOS */ 1138 if (!radeon_get_bios(rdev)) { 1139 if (ASIC_IS_AVIVO(rdev)) 1140 return -EINVAL; 1141 } 1142 /* Must be an ATOMBIOS */ 1143 if (!rdev->is_atom_bios) { 1144 dev_err(rdev->dev, "Expecting atombios for R600 GPU\n"); 1145 return -EINVAL; 1146 } 1147 r = radeon_atombios_init(rdev); 1148 if (r) 1149 return r; 1150 /* Post card if necessary */ 1151 if (!radeon_card_posted(rdev)) { 1152 if (!rdev->bios) { 1153 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); 1154 return -EINVAL; 1155 } 1156 DRM_INFO("GPU not posted. posting now...\n"); 1157 atom_asic_init(rdev->mode_info.atom_context); 1158 } 1159 /* Initialize scratch registers */ 1160 r600_scratch_init(rdev); 1161 /* Initialize surface registers */ 1162 radeon_surface_init(rdev); 1163 /* Initialize clocks */ 1164 radeon_get_clock_info(rdev->ddev); 1165 /* Fence driver */ 1166 r = radeon_fence_driver_init(rdev); 1167 if (r) 1168 return r; 1169 /* initialize AGP */ 1170 if (rdev->flags & RADEON_IS_AGP) { 1171 r = radeon_agp_init(rdev); 1172 if (r) 1173 radeon_agp_disable(rdev); 1174 } 1175 r = rv770_mc_init(rdev); 1176 if (r) 1177 return r; 1178 /* Memory manager */ 1179 r = radeon_bo_init(rdev); 1180 if (r) 1181 return r; 1182 1183 r = radeon_irq_kms_init(rdev); 1184 if (r) 1185 return r; 1186 1187 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; 1188 r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); 1189 1190 rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL; 1191 r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024); 1192 1193 rdev->ih.ring_obj = NULL; 1194 r600_ih_ring_init(rdev, 64 * 1024); 1195 1196 r = r600_pcie_gart_init(rdev); 1197 if (r) 1198 return r; 1199 1200 rdev->accel_working = true; 1201 r = rv770_startup(rdev); 1202 if (r) { 1203 dev_err(rdev->dev, "disabling GPU acceleration\n"); 1204 r700_cp_fini(rdev); 1205 r600_dma_fini(rdev); 1206 r600_irq_fini(rdev); 1207 radeon_wb_fini(rdev); 1208 radeon_ib_pool_fini(rdev); 1209 radeon_irq_kms_fini(rdev); 1210 rv770_pcie_gart_fini(rdev); 1211 rdev->accel_working = false; 1212 } 1213 1214 return 0; 1215 } 1216 1217 void rv770_fini(struct radeon_device *rdev) 1218 { 1219 r600_blit_fini(rdev); 1220 r700_cp_fini(rdev); 1221 r600_dma_fini(rdev); 1222 r600_irq_fini(rdev); 1223 radeon_wb_fini(rdev); 1224 radeon_ib_pool_fini(rdev); 1225 radeon_irq_kms_fini(rdev); 1226 rv770_pcie_gart_fini(rdev); 1227 r600_vram_scratch_fini(rdev); 1228 radeon_gem_fini(rdev); 1229 radeon_fence_driver_fini(rdev); 1230 radeon_agp_fini(rdev); 1231 radeon_bo_fini(rdev); 1232 radeon_atombios_fini(rdev); 1233 kfree(rdev->bios); 1234 rdev->bios = NULL; 1235 } 1236 1237 static void rv770_pcie_gen2_enable(struct radeon_device *rdev) 1238 { 1239 u32 link_width_cntl, lanes, speed_cntl, tmp; 1240 u16 link_cntl2; 1241 u32 mask; 1242 int ret; 1243 1244 if (radeon_pcie_gen2 == 0) 1245 return; 1246 1247 if (rdev->flags & RADEON_IS_IGP) 1248 return; 1249 1250 if (!(rdev->flags & RADEON_IS_PCIE)) 1251 return; 1252 1253 /* x2 cards have a special sequence */ 1254 if (ASIC_IS_X2(rdev)) 1255 return; 1256 1257 ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask); 1258 if (ret != 0) 1259 return; 1260 1261 if (!(mask & DRM_PCIE_SPEED_50)) 1262 return; 1263 1264 DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n"); 1265 1266 /* advertise upconfig capability */ 1267 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL); 1268 link_width_cntl &= ~LC_UPCONFIGURE_DIS; 1269 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 1270 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL); 1271 if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) { 1272 lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT; 1273 link_width_cntl &= ~(LC_LINK_WIDTH_MASK | 1274 LC_RECONFIG_ARC_MISSING_ESCAPE); 1275 link_width_cntl |= lanes | LC_RECONFIG_NOW | 1276 LC_RENEGOTIATE_EN | LC_UPCONFIGURE_SUPPORT; 1277 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 1278 } else { 1279 link_width_cntl |= LC_UPCONFIGURE_DIS; 1280 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 1281 } 1282 1283 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); 1284 if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) && 1285 (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) { 1286 1287 tmp = RREG32(0x541c); 1288 WREG32(0x541c, tmp | 0x8); 1289 WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN); 1290 link_cntl2 = RREG16(0x4088); 1291 link_cntl2 &= ~TARGET_LINK_SPEED_MASK; 1292 link_cntl2 |= 0x2; 1293 WREG16(0x4088, link_cntl2); 1294 WREG32(MM_CFGREGS_CNTL, 0); 1295 1296 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); 1297 speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN; 1298 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); 1299 1300 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); 1301 speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT; 1302 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); 1303 1304 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); 1305 speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT; 1306 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); 1307 1308 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); 1309 speed_cntl |= LC_GEN2_EN_STRAP; 1310 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); 1311 1312 } else { 1313 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL); 1314 /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */ 1315 if (1) 1316 link_width_cntl |= LC_UPCONFIGURE_DIS; 1317 else 1318 link_width_cntl &= ~LC_UPCONFIGURE_DIS; 1319 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 1320 } 1321 } 1322