1 /* 2 * Copyright 2010 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Alex Deucher 23 */ 24 #include <linux/firmware.h> 25 #include <linux/platform_device.h> 26 #include <linux/slab.h> 27 #include <linux/module.h> 28 #include "drmP.h" 29 #include "radeon.h" 30 #include "radeon_asic.h" 31 #include "radeon_drm.h" 32 #include "nid.h" 33 #include "atom.h" 34 #include "ni_reg.h" 35 #include "cayman_blit_shaders.h" 36 37 extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save); 38 extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save); 39 extern int evergreen_mc_wait_for_idle(struct radeon_device *rdev); 40 extern void evergreen_mc_program(struct radeon_device *rdev); 41 extern void evergreen_irq_suspend(struct radeon_device *rdev); 42 extern int evergreen_mc_init(struct radeon_device *rdev); 43 extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev); 44 extern void evergreen_pcie_gen2_enable(struct radeon_device *rdev); 45 46 #define EVERGREEN_PFP_UCODE_SIZE 1120 47 #define EVERGREEN_PM4_UCODE_SIZE 1376 48 #define EVERGREEN_RLC_UCODE_SIZE 768 49 #define BTC_MC_UCODE_SIZE 6024 50 51 #define CAYMAN_PFP_UCODE_SIZE 2176 52 #define CAYMAN_PM4_UCODE_SIZE 2176 53 #define CAYMAN_RLC_UCODE_SIZE 1024 54 #define CAYMAN_MC_UCODE_SIZE 6037 55 56 /* Firmware Names */ 57 MODULE_FIRMWARE("radeon/BARTS_pfp.bin"); 58 MODULE_FIRMWARE("radeon/BARTS_me.bin"); 59 MODULE_FIRMWARE("radeon/BARTS_mc.bin"); 60 MODULE_FIRMWARE("radeon/BTC_rlc.bin"); 61 MODULE_FIRMWARE("radeon/TURKS_pfp.bin"); 62 MODULE_FIRMWARE("radeon/TURKS_me.bin"); 63 MODULE_FIRMWARE("radeon/TURKS_mc.bin"); 64 MODULE_FIRMWARE("radeon/CAICOS_pfp.bin"); 65 MODULE_FIRMWARE("radeon/CAICOS_me.bin"); 66 MODULE_FIRMWARE("radeon/CAICOS_mc.bin"); 67 MODULE_FIRMWARE("radeon/CAYMAN_pfp.bin"); 68 MODULE_FIRMWARE("radeon/CAYMAN_me.bin"); 69 MODULE_FIRMWARE("radeon/CAYMAN_mc.bin"); 70 MODULE_FIRMWARE("radeon/CAYMAN_rlc.bin"); 71 72 #define BTC_IO_MC_REGS_SIZE 29 73 74 static const u32 barts_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = { 75 {0x00000077, 0xff010100}, 76 {0x00000078, 0x00000000}, 77 {0x00000079, 0x00001434}, 78 {0x0000007a, 0xcc08ec08}, 79 {0x0000007b, 0x00040000}, 80 {0x0000007c, 0x000080c0}, 81 {0x0000007d, 0x09000000}, 82 {0x0000007e, 0x00210404}, 83 {0x00000081, 0x08a8e800}, 84 {0x00000082, 0x00030444}, 85 {0x00000083, 0x00000000}, 86 {0x00000085, 0x00000001}, 87 {0x00000086, 0x00000002}, 88 {0x00000087, 0x48490000}, 89 {0x00000088, 0x20244647}, 90 {0x00000089, 0x00000005}, 91 {0x0000008b, 0x66030000}, 92 {0x0000008c, 0x00006603}, 93 {0x0000008d, 0x00000100}, 94 {0x0000008f, 0x00001c0a}, 95 {0x00000090, 0xff000001}, 96 {0x00000094, 0x00101101}, 97 {0x00000095, 0x00000fff}, 98 {0x00000096, 0x00116fff}, 99 {0x00000097, 0x60010000}, 100 {0x00000098, 0x10010000}, 101 {0x00000099, 0x00006000}, 102 {0x0000009a, 0x00001000}, 103 {0x0000009f, 0x00946a00} 104 }; 105 106 static const u32 turks_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = { 107 {0x00000077, 0xff010100}, 108 {0x00000078, 0x00000000}, 109 {0x00000079, 0x00001434}, 110 {0x0000007a, 0xcc08ec08}, 111 {0x0000007b, 0x00040000}, 112 {0x0000007c, 0x000080c0}, 113 {0x0000007d, 0x09000000}, 114 {0x0000007e, 0x00210404}, 115 {0x00000081, 0x08a8e800}, 116 {0x00000082, 0x00030444}, 117 {0x00000083, 0x00000000}, 118 {0x00000085, 0x00000001}, 119 {0x00000086, 0x00000002}, 120 {0x00000087, 0x48490000}, 121 {0x00000088, 0x20244647}, 122 {0x00000089, 0x00000005}, 123 {0x0000008b, 0x66030000}, 124 {0x0000008c, 0x00006603}, 125 {0x0000008d, 0x00000100}, 126 {0x0000008f, 0x00001c0a}, 127 {0x00000090, 0xff000001}, 128 {0x00000094, 0x00101101}, 129 {0x00000095, 0x00000fff}, 130 {0x00000096, 0x00116fff}, 131 {0x00000097, 0x60010000}, 132 {0x00000098, 0x10010000}, 133 {0x00000099, 0x00006000}, 134 {0x0000009a, 0x00001000}, 135 {0x0000009f, 0x00936a00} 136 }; 137 138 static const u32 caicos_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = { 139 {0x00000077, 0xff010100}, 140 {0x00000078, 0x00000000}, 141 {0x00000079, 0x00001434}, 142 {0x0000007a, 0xcc08ec08}, 143 {0x0000007b, 0x00040000}, 144 {0x0000007c, 0x000080c0}, 145 {0x0000007d, 0x09000000}, 146 {0x0000007e, 0x00210404}, 147 {0x00000081, 0x08a8e800}, 148 {0x00000082, 0x00030444}, 149 {0x00000083, 0x00000000}, 150 {0x00000085, 0x00000001}, 151 {0x00000086, 0x00000002}, 152 {0x00000087, 0x48490000}, 153 {0x00000088, 0x20244647}, 154 {0x00000089, 0x00000005}, 155 {0x0000008b, 0x66030000}, 156 {0x0000008c, 0x00006603}, 157 {0x0000008d, 0x00000100}, 158 {0x0000008f, 0x00001c0a}, 159 {0x00000090, 0xff000001}, 160 {0x00000094, 0x00101101}, 161 {0x00000095, 0x00000fff}, 162 {0x00000096, 0x00116fff}, 163 {0x00000097, 0x60010000}, 164 {0x00000098, 0x10010000}, 165 {0x00000099, 0x00006000}, 166 {0x0000009a, 0x00001000}, 167 {0x0000009f, 0x00916a00} 168 }; 169 170 static const u32 cayman_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = { 171 {0x00000077, 0xff010100}, 172 {0x00000078, 0x00000000}, 173 {0x00000079, 0x00001434}, 174 {0x0000007a, 0xcc08ec08}, 175 {0x0000007b, 0x00040000}, 176 {0x0000007c, 0x000080c0}, 177 {0x0000007d, 0x09000000}, 178 {0x0000007e, 0x00210404}, 179 {0x00000081, 0x08a8e800}, 180 {0x00000082, 0x00030444}, 181 {0x00000083, 0x00000000}, 182 {0x00000085, 0x00000001}, 183 {0x00000086, 0x00000002}, 184 {0x00000087, 0x48490000}, 185 {0x00000088, 0x20244647}, 186 {0x00000089, 0x00000005}, 187 {0x0000008b, 0x66030000}, 188 {0x0000008c, 0x00006603}, 189 {0x0000008d, 0x00000100}, 190 {0x0000008f, 0x00001c0a}, 191 {0x00000090, 0xff000001}, 192 {0x00000094, 0x00101101}, 193 {0x00000095, 0x00000fff}, 194 {0x00000096, 0x00116fff}, 195 {0x00000097, 0x60010000}, 196 {0x00000098, 0x10010000}, 197 {0x00000099, 0x00006000}, 198 {0x0000009a, 0x00001000}, 199 {0x0000009f, 0x00976b00} 200 }; 201 202 int ni_mc_load_microcode(struct radeon_device *rdev) 203 { 204 const __be32 *fw_data; 205 u32 mem_type, running, blackout = 0; 206 u32 *io_mc_regs; 207 int i, ucode_size, regs_size; 208 209 if (!rdev->mc_fw) 210 return -EINVAL; 211 212 switch (rdev->family) { 213 case CHIP_BARTS: 214 io_mc_regs = (u32 *)&barts_io_mc_regs; 215 ucode_size = BTC_MC_UCODE_SIZE; 216 regs_size = BTC_IO_MC_REGS_SIZE; 217 break; 218 case CHIP_TURKS: 219 io_mc_regs = (u32 *)&turks_io_mc_regs; 220 ucode_size = BTC_MC_UCODE_SIZE; 221 regs_size = BTC_IO_MC_REGS_SIZE; 222 break; 223 case CHIP_CAICOS: 224 default: 225 io_mc_regs = (u32 *)&caicos_io_mc_regs; 226 ucode_size = BTC_MC_UCODE_SIZE; 227 regs_size = BTC_IO_MC_REGS_SIZE; 228 break; 229 case CHIP_CAYMAN: 230 io_mc_regs = (u32 *)&cayman_io_mc_regs; 231 ucode_size = CAYMAN_MC_UCODE_SIZE; 232 regs_size = BTC_IO_MC_REGS_SIZE; 233 break; 234 } 235 236 mem_type = (RREG32(MC_SEQ_MISC0) & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT; 237 running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK; 238 239 if ((mem_type == MC_SEQ_MISC0_GDDR5_VALUE) && (running == 0)) { 240 if (running) { 241 blackout = RREG32(MC_SHARED_BLACKOUT_CNTL); 242 WREG32(MC_SHARED_BLACKOUT_CNTL, 1); 243 } 244 245 /* reset the engine and set to writable */ 246 WREG32(MC_SEQ_SUP_CNTL, 0x00000008); 247 WREG32(MC_SEQ_SUP_CNTL, 0x00000010); 248 249 /* load mc io regs */ 250 for (i = 0; i < regs_size; i++) { 251 WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]); 252 WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]); 253 } 254 /* load the MC ucode */ 255 fw_data = (const __be32 *)rdev->mc_fw->data; 256 for (i = 0; i < ucode_size; i++) 257 WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++)); 258 259 /* put the engine back into the active state */ 260 WREG32(MC_SEQ_SUP_CNTL, 0x00000008); 261 WREG32(MC_SEQ_SUP_CNTL, 0x00000004); 262 WREG32(MC_SEQ_SUP_CNTL, 0x00000001); 263 264 /* wait for training to complete */ 265 while (!(RREG32(MC_IO_PAD_CNTL_D0) & MEM_FALL_OUT_CMD)) 266 udelay(10); 267 268 if (running) 269 WREG32(MC_SHARED_BLACKOUT_CNTL, blackout); 270 } 271 272 return 0; 273 } 274 275 int ni_init_microcode(struct radeon_device *rdev) 276 { 277 struct platform_device *pdev; 278 const char *chip_name; 279 const char *rlc_chip_name; 280 size_t pfp_req_size, me_req_size, rlc_req_size, mc_req_size; 281 char fw_name[30]; 282 int err; 283 284 DRM_DEBUG("\n"); 285 286 pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0); 287 err = IS_ERR(pdev); 288 if (err) { 289 printk(KERN_ERR "radeon_cp: Failed to register firmware\n"); 290 return -EINVAL; 291 } 292 293 switch (rdev->family) { 294 case CHIP_BARTS: 295 chip_name = "BARTS"; 296 rlc_chip_name = "BTC"; 297 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4; 298 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4; 299 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4; 300 mc_req_size = BTC_MC_UCODE_SIZE * 4; 301 break; 302 case CHIP_TURKS: 303 chip_name = "TURKS"; 304 rlc_chip_name = "BTC"; 305 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4; 306 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4; 307 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4; 308 mc_req_size = BTC_MC_UCODE_SIZE * 4; 309 break; 310 case CHIP_CAICOS: 311 chip_name = "CAICOS"; 312 rlc_chip_name = "BTC"; 313 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4; 314 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4; 315 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4; 316 mc_req_size = BTC_MC_UCODE_SIZE * 4; 317 break; 318 case CHIP_CAYMAN: 319 chip_name = "CAYMAN"; 320 rlc_chip_name = "CAYMAN"; 321 pfp_req_size = CAYMAN_PFP_UCODE_SIZE * 4; 322 me_req_size = CAYMAN_PM4_UCODE_SIZE * 4; 323 rlc_req_size = CAYMAN_RLC_UCODE_SIZE * 4; 324 mc_req_size = CAYMAN_MC_UCODE_SIZE * 4; 325 break; 326 default: BUG(); 327 } 328 329 DRM_INFO("Loading %s Microcode\n", chip_name); 330 331 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name); 332 err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev); 333 if (err) 334 goto out; 335 if (rdev->pfp_fw->size != pfp_req_size) { 336 printk(KERN_ERR 337 "ni_cp: Bogus length %zu in firmware \"%s\"\n", 338 rdev->pfp_fw->size, fw_name); 339 err = -EINVAL; 340 goto out; 341 } 342 343 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name); 344 err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev); 345 if (err) 346 goto out; 347 if (rdev->me_fw->size != me_req_size) { 348 printk(KERN_ERR 349 "ni_cp: Bogus length %zu in firmware \"%s\"\n", 350 rdev->me_fw->size, fw_name); 351 err = -EINVAL; 352 } 353 354 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name); 355 err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev); 356 if (err) 357 goto out; 358 if (rdev->rlc_fw->size != rlc_req_size) { 359 printk(KERN_ERR 360 "ni_rlc: Bogus length %zu in firmware \"%s\"\n", 361 rdev->rlc_fw->size, fw_name); 362 err = -EINVAL; 363 } 364 365 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); 366 err = request_firmware(&rdev->mc_fw, fw_name, &pdev->dev); 367 if (err) 368 goto out; 369 if (rdev->mc_fw->size != mc_req_size) { 370 printk(KERN_ERR 371 "ni_mc: Bogus length %zu in firmware \"%s\"\n", 372 rdev->mc_fw->size, fw_name); 373 err = -EINVAL; 374 } 375 out: 376 platform_device_unregister(pdev); 377 378 if (err) { 379 if (err != -EINVAL) 380 printk(KERN_ERR 381 "ni_cp: Failed to load firmware \"%s\"\n", 382 fw_name); 383 release_firmware(rdev->pfp_fw); 384 rdev->pfp_fw = NULL; 385 release_firmware(rdev->me_fw); 386 rdev->me_fw = NULL; 387 release_firmware(rdev->rlc_fw); 388 rdev->rlc_fw = NULL; 389 release_firmware(rdev->mc_fw); 390 rdev->mc_fw = NULL; 391 } 392 return err; 393 } 394 395 /* 396 * Core functions 397 */ 398 static u32 cayman_get_tile_pipe_to_backend_map(struct radeon_device *rdev, 399 u32 num_tile_pipes, 400 u32 num_backends_per_asic, 401 u32 *backend_disable_mask_per_asic, 402 u32 num_shader_engines) 403 { 404 u32 backend_map = 0; 405 u32 enabled_backends_mask = 0; 406 u32 enabled_backends_count = 0; 407 u32 num_backends_per_se; 408 u32 cur_pipe; 409 u32 swizzle_pipe[CAYMAN_MAX_PIPES]; 410 u32 cur_backend = 0; 411 u32 i; 412 bool force_no_swizzle; 413 414 /* force legal values */ 415 if (num_tile_pipes < 1) 416 num_tile_pipes = 1; 417 if (num_tile_pipes > rdev->config.cayman.max_tile_pipes) 418 num_tile_pipes = rdev->config.cayman.max_tile_pipes; 419 if (num_shader_engines < 1) 420 num_shader_engines = 1; 421 if (num_shader_engines > rdev->config.cayman.max_shader_engines) 422 num_shader_engines = rdev->config.cayman.max_shader_engines; 423 if (num_backends_per_asic < num_shader_engines) 424 num_backends_per_asic = num_shader_engines; 425 if (num_backends_per_asic > (rdev->config.cayman.max_backends_per_se * num_shader_engines)) 426 num_backends_per_asic = rdev->config.cayman.max_backends_per_se * num_shader_engines; 427 428 /* make sure we have the same number of backends per se */ 429 num_backends_per_asic = ALIGN(num_backends_per_asic, num_shader_engines); 430 /* set up the number of backends per se */ 431 num_backends_per_se = num_backends_per_asic / num_shader_engines; 432 if (num_backends_per_se > rdev->config.cayman.max_backends_per_se) { 433 num_backends_per_se = rdev->config.cayman.max_backends_per_se; 434 num_backends_per_asic = num_backends_per_se * num_shader_engines; 435 } 436 437 /* create enable mask and count for enabled backends */ 438 for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) { 439 if (((*backend_disable_mask_per_asic >> i) & 1) == 0) { 440 enabled_backends_mask |= (1 << i); 441 ++enabled_backends_count; 442 } 443 if (enabled_backends_count == num_backends_per_asic) 444 break; 445 } 446 447 /* force the backends mask to match the current number of backends */ 448 if (enabled_backends_count != num_backends_per_asic) { 449 u32 this_backend_enabled; 450 u32 shader_engine; 451 u32 backend_per_se; 452 453 enabled_backends_mask = 0; 454 enabled_backends_count = 0; 455 *backend_disable_mask_per_asic = CAYMAN_MAX_BACKENDS_MASK; 456 for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) { 457 /* calc the current se */ 458 shader_engine = i / rdev->config.cayman.max_backends_per_se; 459 /* calc the backend per se */ 460 backend_per_se = i % rdev->config.cayman.max_backends_per_se; 461 /* default to not enabled */ 462 this_backend_enabled = 0; 463 if ((shader_engine < num_shader_engines) && 464 (backend_per_se < num_backends_per_se)) 465 this_backend_enabled = 1; 466 if (this_backend_enabled) { 467 enabled_backends_mask |= (1 << i); 468 *backend_disable_mask_per_asic &= ~(1 << i); 469 ++enabled_backends_count; 470 } 471 } 472 } 473 474 475 memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * CAYMAN_MAX_PIPES); 476 switch (rdev->family) { 477 case CHIP_CAYMAN: 478 force_no_swizzle = true; 479 break; 480 default: 481 force_no_swizzle = false; 482 break; 483 } 484 if (force_no_swizzle) { 485 bool last_backend_enabled = false; 486 487 force_no_swizzle = false; 488 for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) { 489 if (((enabled_backends_mask >> i) & 1) == 1) { 490 if (last_backend_enabled) 491 force_no_swizzle = true; 492 last_backend_enabled = true; 493 } else 494 last_backend_enabled = false; 495 } 496 } 497 498 switch (num_tile_pipes) { 499 case 1: 500 case 3: 501 case 5: 502 case 7: 503 DRM_ERROR("odd number of pipes!\n"); 504 break; 505 case 2: 506 swizzle_pipe[0] = 0; 507 swizzle_pipe[1] = 1; 508 break; 509 case 4: 510 if (force_no_swizzle) { 511 swizzle_pipe[0] = 0; 512 swizzle_pipe[1] = 1; 513 swizzle_pipe[2] = 2; 514 swizzle_pipe[3] = 3; 515 } else { 516 swizzle_pipe[0] = 0; 517 swizzle_pipe[1] = 2; 518 swizzle_pipe[2] = 1; 519 swizzle_pipe[3] = 3; 520 } 521 break; 522 case 6: 523 if (force_no_swizzle) { 524 swizzle_pipe[0] = 0; 525 swizzle_pipe[1] = 1; 526 swizzle_pipe[2] = 2; 527 swizzle_pipe[3] = 3; 528 swizzle_pipe[4] = 4; 529 swizzle_pipe[5] = 5; 530 } else { 531 swizzle_pipe[0] = 0; 532 swizzle_pipe[1] = 2; 533 swizzle_pipe[2] = 4; 534 swizzle_pipe[3] = 1; 535 swizzle_pipe[4] = 3; 536 swizzle_pipe[5] = 5; 537 } 538 break; 539 case 8: 540 if (force_no_swizzle) { 541 swizzle_pipe[0] = 0; 542 swizzle_pipe[1] = 1; 543 swizzle_pipe[2] = 2; 544 swizzle_pipe[3] = 3; 545 swizzle_pipe[4] = 4; 546 swizzle_pipe[5] = 5; 547 swizzle_pipe[6] = 6; 548 swizzle_pipe[7] = 7; 549 } else { 550 swizzle_pipe[0] = 0; 551 swizzle_pipe[1] = 2; 552 swizzle_pipe[2] = 4; 553 swizzle_pipe[3] = 6; 554 swizzle_pipe[4] = 1; 555 swizzle_pipe[5] = 3; 556 swizzle_pipe[6] = 5; 557 swizzle_pipe[7] = 7; 558 } 559 break; 560 } 561 562 for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) { 563 while (((1 << cur_backend) & enabled_backends_mask) == 0) 564 cur_backend = (cur_backend + 1) % CAYMAN_MAX_BACKENDS; 565 566 backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4))); 567 568 cur_backend = (cur_backend + 1) % CAYMAN_MAX_BACKENDS; 569 } 570 571 return backend_map; 572 } 573 574 static u32 cayman_get_disable_mask_per_asic(struct radeon_device *rdev, 575 u32 disable_mask_per_se, 576 u32 max_disable_mask_per_se, 577 u32 num_shader_engines) 578 { 579 u32 disable_field_width_per_se = r600_count_pipe_bits(disable_mask_per_se); 580 u32 disable_mask_per_asic = disable_mask_per_se & max_disable_mask_per_se; 581 582 if (num_shader_engines == 1) 583 return disable_mask_per_asic; 584 else if (num_shader_engines == 2) 585 return disable_mask_per_asic | (disable_mask_per_asic << disable_field_width_per_se); 586 else 587 return 0xffffffff; 588 } 589 590 static void cayman_gpu_init(struct radeon_device *rdev) 591 { 592 u32 cc_rb_backend_disable = 0; 593 u32 cc_gc_shader_pipe_config; 594 u32 gb_addr_config = 0; 595 u32 mc_shared_chmap, mc_arb_ramcfg; 596 u32 gb_backend_map; 597 u32 cgts_tcc_disable; 598 u32 sx_debug_1; 599 u32 smx_dc_ctl0; 600 u32 gc_user_shader_pipe_config; 601 u32 gc_user_rb_backend_disable; 602 u32 cgts_user_tcc_disable; 603 u32 cgts_sm_ctrl_reg; 604 u32 hdp_host_path_cntl; 605 u32 tmp; 606 int i, j; 607 608 switch (rdev->family) { 609 case CHIP_CAYMAN: 610 default: 611 rdev->config.cayman.max_shader_engines = 2; 612 rdev->config.cayman.max_pipes_per_simd = 4; 613 rdev->config.cayman.max_tile_pipes = 8; 614 rdev->config.cayman.max_simds_per_se = 12; 615 rdev->config.cayman.max_backends_per_se = 4; 616 rdev->config.cayman.max_texture_channel_caches = 8; 617 rdev->config.cayman.max_gprs = 256; 618 rdev->config.cayman.max_threads = 256; 619 rdev->config.cayman.max_gs_threads = 32; 620 rdev->config.cayman.max_stack_entries = 512; 621 rdev->config.cayman.sx_num_of_sets = 8; 622 rdev->config.cayman.sx_max_export_size = 256; 623 rdev->config.cayman.sx_max_export_pos_size = 64; 624 rdev->config.cayman.sx_max_export_smx_size = 192; 625 rdev->config.cayman.max_hw_contexts = 8; 626 rdev->config.cayman.sq_num_cf_insts = 2; 627 628 rdev->config.cayman.sc_prim_fifo_size = 0x100; 629 rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30; 630 rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130; 631 break; 632 } 633 634 /* Initialize HDP */ 635 for (i = 0, j = 0; i < 32; i++, j += 0x18) { 636 WREG32((0x2c14 + j), 0x00000000); 637 WREG32((0x2c18 + j), 0x00000000); 638 WREG32((0x2c1c + j), 0x00000000); 639 WREG32((0x2c20 + j), 0x00000000); 640 WREG32((0x2c24 + j), 0x00000000); 641 } 642 643 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); 644 645 evergreen_fix_pci_max_read_req_size(rdev); 646 647 mc_shared_chmap = RREG32(MC_SHARED_CHMAP); 648 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); 649 650 cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE); 651 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG); 652 cgts_tcc_disable = 0xff000000; 653 gc_user_rb_backend_disable = RREG32(GC_USER_RB_BACKEND_DISABLE); 654 gc_user_shader_pipe_config = RREG32(GC_USER_SHADER_PIPE_CONFIG); 655 cgts_user_tcc_disable = RREG32(CGTS_USER_TCC_DISABLE); 656 657 rdev->config.cayman.num_shader_engines = rdev->config.cayman.max_shader_engines; 658 tmp = ((~gc_user_shader_pipe_config) & INACTIVE_QD_PIPES_MASK) >> INACTIVE_QD_PIPES_SHIFT; 659 rdev->config.cayman.num_shader_pipes_per_simd = r600_count_pipe_bits(tmp); 660 rdev->config.cayman.num_tile_pipes = rdev->config.cayman.max_tile_pipes; 661 tmp = ((~gc_user_shader_pipe_config) & INACTIVE_SIMDS_MASK) >> INACTIVE_SIMDS_SHIFT; 662 rdev->config.cayman.num_simds_per_se = r600_count_pipe_bits(tmp); 663 tmp = ((~gc_user_rb_backend_disable) & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT; 664 rdev->config.cayman.num_backends_per_se = r600_count_pipe_bits(tmp); 665 tmp = (gc_user_rb_backend_disable & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT; 666 rdev->config.cayman.backend_disable_mask_per_asic = 667 cayman_get_disable_mask_per_asic(rdev, tmp, CAYMAN_MAX_BACKENDS_PER_SE_MASK, 668 rdev->config.cayman.num_shader_engines); 669 rdev->config.cayman.backend_map = 670 cayman_get_tile_pipe_to_backend_map(rdev, rdev->config.cayman.num_tile_pipes, 671 rdev->config.cayman.num_backends_per_se * 672 rdev->config.cayman.num_shader_engines, 673 &rdev->config.cayman.backend_disable_mask_per_asic, 674 rdev->config.cayman.num_shader_engines); 675 tmp = ((~cgts_user_tcc_disable) & TCC_DISABLE_MASK) >> TCC_DISABLE_SHIFT; 676 rdev->config.cayman.num_texture_channel_caches = r600_count_pipe_bits(tmp); 677 tmp = (mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT; 678 rdev->config.cayman.mem_max_burst_length_bytes = (tmp + 1) * 256; 679 if (rdev->config.cayman.mem_max_burst_length_bytes > 512) 680 rdev->config.cayman.mem_max_burst_length_bytes = 512; 681 tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT; 682 rdev->config.cayman.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024; 683 if (rdev->config.cayman.mem_row_size_in_kb > 4) 684 rdev->config.cayman.mem_row_size_in_kb = 4; 685 /* XXX use MC settings? */ 686 rdev->config.cayman.shader_engine_tile_size = 32; 687 rdev->config.cayman.num_gpus = 1; 688 rdev->config.cayman.multi_gpu_tile_size = 64; 689 690 //gb_addr_config = 0x02011003 691 #if 0 692 gb_addr_config = RREG32(GB_ADDR_CONFIG); 693 #else 694 gb_addr_config = 0; 695 switch (rdev->config.cayman.num_tile_pipes) { 696 case 1: 697 default: 698 gb_addr_config |= NUM_PIPES(0); 699 break; 700 case 2: 701 gb_addr_config |= NUM_PIPES(1); 702 break; 703 case 4: 704 gb_addr_config |= NUM_PIPES(2); 705 break; 706 case 8: 707 gb_addr_config |= NUM_PIPES(3); 708 break; 709 } 710 711 tmp = (rdev->config.cayman.mem_max_burst_length_bytes / 256) - 1; 712 gb_addr_config |= PIPE_INTERLEAVE_SIZE(tmp); 713 gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.cayman.num_shader_engines - 1); 714 tmp = (rdev->config.cayman.shader_engine_tile_size / 16) - 1; 715 gb_addr_config |= SHADER_ENGINE_TILE_SIZE(tmp); 716 switch (rdev->config.cayman.num_gpus) { 717 case 1: 718 default: 719 gb_addr_config |= NUM_GPUS(0); 720 break; 721 case 2: 722 gb_addr_config |= NUM_GPUS(1); 723 break; 724 case 4: 725 gb_addr_config |= NUM_GPUS(2); 726 break; 727 } 728 switch (rdev->config.cayman.multi_gpu_tile_size) { 729 case 16: 730 gb_addr_config |= MULTI_GPU_TILE_SIZE(0); 731 break; 732 case 32: 733 default: 734 gb_addr_config |= MULTI_GPU_TILE_SIZE(1); 735 break; 736 case 64: 737 gb_addr_config |= MULTI_GPU_TILE_SIZE(2); 738 break; 739 case 128: 740 gb_addr_config |= MULTI_GPU_TILE_SIZE(3); 741 break; 742 } 743 switch (rdev->config.cayman.mem_row_size_in_kb) { 744 case 1: 745 default: 746 gb_addr_config |= ROW_SIZE(0); 747 break; 748 case 2: 749 gb_addr_config |= ROW_SIZE(1); 750 break; 751 case 4: 752 gb_addr_config |= ROW_SIZE(2); 753 break; 754 } 755 #endif 756 757 tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT; 758 rdev->config.cayman.num_tile_pipes = (1 << tmp); 759 tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT; 760 rdev->config.cayman.mem_max_burst_length_bytes = (tmp + 1) * 256; 761 tmp = (gb_addr_config & NUM_SHADER_ENGINES_MASK) >> NUM_SHADER_ENGINES_SHIFT; 762 rdev->config.cayman.num_shader_engines = tmp + 1; 763 tmp = (gb_addr_config & NUM_GPUS_MASK) >> NUM_GPUS_SHIFT; 764 rdev->config.cayman.num_gpus = tmp + 1; 765 tmp = (gb_addr_config & MULTI_GPU_TILE_SIZE_MASK) >> MULTI_GPU_TILE_SIZE_SHIFT; 766 rdev->config.cayman.multi_gpu_tile_size = 1 << tmp; 767 tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT; 768 rdev->config.cayman.mem_row_size_in_kb = 1 << tmp; 769 770 //gb_backend_map = 0x76541032; 771 #if 0 772 gb_backend_map = RREG32(GB_BACKEND_MAP); 773 #else 774 gb_backend_map = 775 cayman_get_tile_pipe_to_backend_map(rdev, rdev->config.cayman.num_tile_pipes, 776 rdev->config.cayman.num_backends_per_se * 777 rdev->config.cayman.num_shader_engines, 778 &rdev->config.cayman.backend_disable_mask_per_asic, 779 rdev->config.cayman.num_shader_engines); 780 #endif 781 /* setup tiling info dword. gb_addr_config is not adequate since it does 782 * not have bank info, so create a custom tiling dword. 783 * bits 3:0 num_pipes 784 * bits 7:4 num_banks 785 * bits 11:8 group_size 786 * bits 15:12 row_size 787 */ 788 rdev->config.cayman.tile_config = 0; 789 switch (rdev->config.cayman.num_tile_pipes) { 790 case 1: 791 default: 792 rdev->config.cayman.tile_config |= (0 << 0); 793 break; 794 case 2: 795 rdev->config.cayman.tile_config |= (1 << 0); 796 break; 797 case 4: 798 rdev->config.cayman.tile_config |= (2 << 0); 799 break; 800 case 8: 801 rdev->config.cayman.tile_config |= (3 << 0); 802 break; 803 } 804 rdev->config.cayman.tile_config |= 805 ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4; 806 rdev->config.cayman.tile_config |= 807 ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8; 808 rdev->config.cayman.tile_config |= 809 ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12; 810 811 rdev->config.cayman.backend_map = gb_backend_map; 812 WREG32(GB_BACKEND_MAP, gb_backend_map); 813 WREG32(GB_ADDR_CONFIG, gb_addr_config); 814 WREG32(DMIF_ADDR_CONFIG, gb_addr_config); 815 WREG32(HDP_ADDR_CONFIG, gb_addr_config); 816 817 /* primary versions */ 818 WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); 819 WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); 820 WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); 821 822 WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable); 823 WREG32(CGTS_SYS_TCC_DISABLE, cgts_tcc_disable); 824 825 /* user versions */ 826 WREG32(GC_USER_RB_BACKEND_DISABLE, cc_rb_backend_disable); 827 WREG32(GC_USER_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); 828 WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); 829 830 WREG32(CGTS_USER_SYS_TCC_DISABLE, cgts_tcc_disable); 831 WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable); 832 833 /* reprogram the shader complex */ 834 cgts_sm_ctrl_reg = RREG32(CGTS_SM_CTRL_REG); 835 for (i = 0; i < 16; i++) 836 WREG32(CGTS_SM_CTRL_REG, OVERRIDE); 837 WREG32(CGTS_SM_CTRL_REG, cgts_sm_ctrl_reg); 838 839 /* set HW defaults for 3D engine */ 840 WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60)); 841 842 sx_debug_1 = RREG32(SX_DEBUG_1); 843 sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS; 844 WREG32(SX_DEBUG_1, sx_debug_1); 845 846 smx_dc_ctl0 = RREG32(SMX_DC_CTL0); 847 smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff); 848 smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.cayman.sx_num_of_sets); 849 WREG32(SMX_DC_CTL0, smx_dc_ctl0); 850 851 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4) | CRC_SIMD_ID_WADDR_DISABLE); 852 853 /* need to be explicitly zero-ed */ 854 WREG32(VGT_OFFCHIP_LDS_BASE, 0); 855 WREG32(SQ_LSTMP_RING_BASE, 0); 856 WREG32(SQ_HSTMP_RING_BASE, 0); 857 WREG32(SQ_ESTMP_RING_BASE, 0); 858 WREG32(SQ_GSTMP_RING_BASE, 0); 859 WREG32(SQ_VSTMP_RING_BASE, 0); 860 WREG32(SQ_PSTMP_RING_BASE, 0); 861 862 WREG32(TA_CNTL_AUX, DISABLE_CUBE_ANISO); 863 864 WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.cayman.sx_max_export_size / 4) - 1) | 865 POSITION_BUFFER_SIZE((rdev->config.cayman.sx_max_export_pos_size / 4) - 1) | 866 SMX_BUFFER_SIZE((rdev->config.cayman.sx_max_export_smx_size / 4) - 1))); 867 868 WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.cayman.sc_prim_fifo_size) | 869 SC_HIZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_hiz_tile_fifo_size) | 870 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_earlyz_tile_fifo_size))); 871 872 873 WREG32(VGT_NUM_INSTANCES, 1); 874 875 WREG32(CP_PERFMON_CNTL, 0); 876 877 WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.cayman.sq_num_cf_insts) | 878 FETCH_FIFO_HIWATER(0x4) | 879 DONE_FIFO_HIWATER(0xe0) | 880 ALU_UPDATE_FIFO_HIWATER(0x8))); 881 882 WREG32(SQ_GPR_RESOURCE_MGMT_1, NUM_CLAUSE_TEMP_GPRS(4)); 883 WREG32(SQ_CONFIG, (VC_ENABLE | 884 EXPORT_SRC_C | 885 GFX_PRIO(0) | 886 CS1_PRIO(0) | 887 CS2_PRIO(1))); 888 WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, DYN_GPR_ENABLE); 889 890 WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) | 891 FORCE_EOV_MAX_REZ_CNT(255))); 892 893 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) | 894 AUTO_INVLD_EN(ES_AND_GS_AUTO)); 895 896 WREG32(VGT_GS_VERTEX_REUSE, 16); 897 WREG32(PA_SC_LINE_STIPPLE_STATE, 0); 898 899 WREG32(CB_PERF_CTR0_SEL_0, 0); 900 WREG32(CB_PERF_CTR0_SEL_1, 0); 901 WREG32(CB_PERF_CTR1_SEL_0, 0); 902 WREG32(CB_PERF_CTR1_SEL_1, 0); 903 WREG32(CB_PERF_CTR2_SEL_0, 0); 904 WREG32(CB_PERF_CTR2_SEL_1, 0); 905 WREG32(CB_PERF_CTR3_SEL_0, 0); 906 WREG32(CB_PERF_CTR3_SEL_1, 0); 907 908 tmp = RREG32(HDP_MISC_CNTL); 909 tmp |= HDP_FLUSH_INVALIDATE_CACHE; 910 WREG32(HDP_MISC_CNTL, tmp); 911 912 hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL); 913 WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl); 914 915 WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3)); 916 917 udelay(50); 918 } 919 920 /* 921 * GART 922 */ 923 void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev) 924 { 925 /* flush hdp cache */ 926 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); 927 928 /* bits 0-7 are the VM contexts0-7 */ 929 WREG32(VM_INVALIDATE_REQUEST, 1); 930 } 931 932 int cayman_pcie_gart_enable(struct radeon_device *rdev) 933 { 934 int r; 935 936 if (rdev->gart.table.vram.robj == NULL) { 937 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); 938 return -EINVAL; 939 } 940 r = radeon_gart_table_vram_pin(rdev); 941 if (r) 942 return r; 943 radeon_gart_restore(rdev); 944 /* Setup TLB control */ 945 WREG32(MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB | 946 ENABLE_L1_FRAGMENT_PROCESSING | 947 SYSTEM_ACCESS_MODE_NOT_IN_SYS | 948 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU); 949 /* Setup L2 cache */ 950 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | 951 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | 952 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE | 953 EFFECTIVE_L2_QUEUE_SIZE(7) | 954 CONTEXT1_IDENTITY_ACCESS_MODE(1)); 955 WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE); 956 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY | 957 L2_CACHE_BIGK_FRAGMENT_SIZE(6)); 958 /* setup context0 */ 959 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); 960 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); 961 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); 962 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, 963 (u32)(rdev->dummy_page.addr >> 12)); 964 WREG32(VM_CONTEXT0_CNTL2, 0); 965 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | 966 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); 967 /* disable context1-7 */ 968 WREG32(VM_CONTEXT1_CNTL2, 0); 969 WREG32(VM_CONTEXT1_CNTL, 0); 970 971 cayman_pcie_gart_tlb_flush(rdev); 972 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", 973 (unsigned)(rdev->mc.gtt_size >> 20), 974 (unsigned long long)rdev->gart.table_addr); 975 rdev->gart.ready = true; 976 return 0; 977 } 978 979 void cayman_pcie_gart_disable(struct radeon_device *rdev) 980 { 981 int r; 982 983 /* Disable all tables */ 984 WREG32(VM_CONTEXT0_CNTL, 0); 985 WREG32(VM_CONTEXT1_CNTL, 0); 986 /* Setup TLB control */ 987 WREG32(MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING | 988 SYSTEM_ACCESS_MODE_NOT_IN_SYS | 989 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU); 990 /* Setup L2 cache */ 991 WREG32(VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | 992 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE | 993 EFFECTIVE_L2_QUEUE_SIZE(7) | 994 CONTEXT1_IDENTITY_ACCESS_MODE(1)); 995 WREG32(VM_L2_CNTL2, 0); 996 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY | 997 L2_CACHE_BIGK_FRAGMENT_SIZE(6)); 998 if (rdev->gart.table.vram.robj) { 999 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false); 1000 if (likely(r == 0)) { 1001 radeon_bo_kunmap(rdev->gart.table.vram.robj); 1002 radeon_bo_unpin(rdev->gart.table.vram.robj); 1003 radeon_bo_unreserve(rdev->gart.table.vram.robj); 1004 } 1005 } 1006 } 1007 1008 void cayman_pcie_gart_fini(struct radeon_device *rdev) 1009 { 1010 cayman_pcie_gart_disable(rdev); 1011 radeon_gart_table_vram_free(rdev); 1012 radeon_gart_fini(rdev); 1013 } 1014 1015 /* 1016 * CP. 1017 */ 1018 static void cayman_cp_enable(struct radeon_device *rdev, bool enable) 1019 { 1020 if (enable) 1021 WREG32(CP_ME_CNTL, 0); 1022 else { 1023 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); 1024 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT)); 1025 WREG32(SCRATCH_UMSK, 0); 1026 } 1027 } 1028 1029 static int cayman_cp_load_microcode(struct radeon_device *rdev) 1030 { 1031 const __be32 *fw_data; 1032 int i; 1033 1034 if (!rdev->me_fw || !rdev->pfp_fw) 1035 return -EINVAL; 1036 1037 cayman_cp_enable(rdev, false); 1038 1039 fw_data = (const __be32 *)rdev->pfp_fw->data; 1040 WREG32(CP_PFP_UCODE_ADDR, 0); 1041 for (i = 0; i < CAYMAN_PFP_UCODE_SIZE; i++) 1042 WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++)); 1043 WREG32(CP_PFP_UCODE_ADDR, 0); 1044 1045 fw_data = (const __be32 *)rdev->me_fw->data; 1046 WREG32(CP_ME_RAM_WADDR, 0); 1047 for (i = 0; i < CAYMAN_PM4_UCODE_SIZE; i++) 1048 WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++)); 1049 1050 WREG32(CP_PFP_UCODE_ADDR, 0); 1051 WREG32(CP_ME_RAM_WADDR, 0); 1052 WREG32(CP_ME_RAM_RADDR, 0); 1053 return 0; 1054 } 1055 1056 static int cayman_cp_start(struct radeon_device *rdev) 1057 { 1058 int r, i; 1059 1060 r = radeon_ring_lock(rdev, 7); 1061 if (r) { 1062 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); 1063 return r; 1064 } 1065 radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5)); 1066 radeon_ring_write(rdev, 0x1); 1067 radeon_ring_write(rdev, 0x0); 1068 radeon_ring_write(rdev, rdev->config.cayman.max_hw_contexts - 1); 1069 radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); 1070 radeon_ring_write(rdev, 0); 1071 radeon_ring_write(rdev, 0); 1072 radeon_ring_unlock_commit(rdev); 1073 1074 cayman_cp_enable(rdev, true); 1075 1076 r = radeon_ring_lock(rdev, cayman_default_size + 19); 1077 if (r) { 1078 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); 1079 return r; 1080 } 1081 1082 /* setup clear context state */ 1083 radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 1084 radeon_ring_write(rdev, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); 1085 1086 for (i = 0; i < cayman_default_size; i++) 1087 radeon_ring_write(rdev, cayman_default_state[i]); 1088 1089 radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 1090 radeon_ring_write(rdev, PACKET3_PREAMBLE_END_CLEAR_STATE); 1091 1092 /* set clear context state */ 1093 radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0)); 1094 radeon_ring_write(rdev, 0); 1095 1096 /* SQ_VTX_BASE_VTX_LOC */ 1097 radeon_ring_write(rdev, 0xc0026f00); 1098 radeon_ring_write(rdev, 0x00000000); 1099 radeon_ring_write(rdev, 0x00000000); 1100 radeon_ring_write(rdev, 0x00000000); 1101 1102 /* Clear consts */ 1103 radeon_ring_write(rdev, 0xc0036f00); 1104 radeon_ring_write(rdev, 0x00000bc4); 1105 radeon_ring_write(rdev, 0xffffffff); 1106 radeon_ring_write(rdev, 0xffffffff); 1107 radeon_ring_write(rdev, 0xffffffff); 1108 1109 radeon_ring_write(rdev, 0xc0026900); 1110 radeon_ring_write(rdev, 0x00000316); 1111 radeon_ring_write(rdev, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ 1112 radeon_ring_write(rdev, 0x00000010); /* */ 1113 1114 radeon_ring_unlock_commit(rdev); 1115 1116 /* XXX init other rings */ 1117 1118 return 0; 1119 } 1120 1121 static void cayman_cp_fini(struct radeon_device *rdev) 1122 { 1123 cayman_cp_enable(rdev, false); 1124 radeon_ring_fini(rdev); 1125 } 1126 1127 int cayman_cp_resume(struct radeon_device *rdev) 1128 { 1129 u32 tmp; 1130 u32 rb_bufsz; 1131 int r; 1132 1133 /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */ 1134 WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP | 1135 SOFT_RESET_PA | 1136 SOFT_RESET_SH | 1137 SOFT_RESET_VGT | 1138 SOFT_RESET_SPI | 1139 SOFT_RESET_SX)); 1140 RREG32(GRBM_SOFT_RESET); 1141 mdelay(15); 1142 WREG32(GRBM_SOFT_RESET, 0); 1143 RREG32(GRBM_SOFT_RESET); 1144 1145 WREG32(CP_SEM_WAIT_TIMER, 0x4); 1146 1147 /* Set the write pointer delay */ 1148 WREG32(CP_RB_WPTR_DELAY, 0); 1149 1150 WREG32(CP_DEBUG, (1 << 27)); 1151 1152 /* ring 0 - compute and gfx */ 1153 /* Set ring buffer size */ 1154 rb_bufsz = drm_order(rdev->cp.ring_size / 8); 1155 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; 1156 #ifdef __BIG_ENDIAN 1157 tmp |= BUF_SWAP_32BIT; 1158 #endif 1159 WREG32(CP_RB0_CNTL, tmp); 1160 1161 /* Initialize the ring buffer's read and write pointers */ 1162 WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA); 1163 rdev->cp.wptr = 0; 1164 WREG32(CP_RB0_WPTR, rdev->cp.wptr); 1165 1166 /* set the wb address wether it's enabled or not */ 1167 WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC); 1168 WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); 1169 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); 1170 1171 if (rdev->wb.enabled) 1172 WREG32(SCRATCH_UMSK, 0xff); 1173 else { 1174 tmp |= RB_NO_UPDATE; 1175 WREG32(SCRATCH_UMSK, 0); 1176 } 1177 1178 mdelay(1); 1179 WREG32(CP_RB0_CNTL, tmp); 1180 1181 WREG32(CP_RB0_BASE, rdev->cp.gpu_addr >> 8); 1182 1183 rdev->cp.rptr = RREG32(CP_RB0_RPTR); 1184 1185 /* ring1 - compute only */ 1186 /* Set ring buffer size */ 1187 rb_bufsz = drm_order(rdev->cp1.ring_size / 8); 1188 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; 1189 #ifdef __BIG_ENDIAN 1190 tmp |= BUF_SWAP_32BIT; 1191 #endif 1192 WREG32(CP_RB1_CNTL, tmp); 1193 1194 /* Initialize the ring buffer's read and write pointers */ 1195 WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA); 1196 rdev->cp1.wptr = 0; 1197 WREG32(CP_RB1_WPTR, rdev->cp1.wptr); 1198 1199 /* set the wb address wether it's enabled or not */ 1200 WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC); 1201 WREG32(CP_RB1_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFF); 1202 1203 mdelay(1); 1204 WREG32(CP_RB1_CNTL, tmp); 1205 1206 WREG32(CP_RB1_BASE, rdev->cp1.gpu_addr >> 8); 1207 1208 rdev->cp1.rptr = RREG32(CP_RB1_RPTR); 1209 1210 /* ring2 - compute only */ 1211 /* Set ring buffer size */ 1212 rb_bufsz = drm_order(rdev->cp2.ring_size / 8); 1213 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; 1214 #ifdef __BIG_ENDIAN 1215 tmp |= BUF_SWAP_32BIT; 1216 #endif 1217 WREG32(CP_RB2_CNTL, tmp); 1218 1219 /* Initialize the ring buffer's read and write pointers */ 1220 WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA); 1221 rdev->cp2.wptr = 0; 1222 WREG32(CP_RB2_WPTR, rdev->cp2.wptr); 1223 1224 /* set the wb address wether it's enabled or not */ 1225 WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC); 1226 WREG32(CP_RB2_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFF); 1227 1228 mdelay(1); 1229 WREG32(CP_RB2_CNTL, tmp); 1230 1231 WREG32(CP_RB2_BASE, rdev->cp2.gpu_addr >> 8); 1232 1233 rdev->cp2.rptr = RREG32(CP_RB2_RPTR); 1234 1235 /* start the rings */ 1236 cayman_cp_start(rdev); 1237 rdev->cp.ready = true; 1238 rdev->cp1.ready = true; 1239 rdev->cp2.ready = true; 1240 /* this only test cp0 */ 1241 r = radeon_ring_test(rdev); 1242 if (r) { 1243 rdev->cp.ready = false; 1244 rdev->cp1.ready = false; 1245 rdev->cp2.ready = false; 1246 return r; 1247 } 1248 1249 return 0; 1250 } 1251 1252 bool cayman_gpu_is_lockup(struct radeon_device *rdev) 1253 { 1254 u32 srbm_status; 1255 u32 grbm_status; 1256 u32 grbm_status_se0, grbm_status_se1; 1257 struct r100_gpu_lockup *lockup = &rdev->config.cayman.lockup; 1258 int r; 1259 1260 srbm_status = RREG32(SRBM_STATUS); 1261 grbm_status = RREG32(GRBM_STATUS); 1262 grbm_status_se0 = RREG32(GRBM_STATUS_SE0); 1263 grbm_status_se1 = RREG32(GRBM_STATUS_SE1); 1264 if (!(grbm_status & GUI_ACTIVE)) { 1265 r100_gpu_lockup_update(lockup, &rdev->cp); 1266 return false; 1267 } 1268 /* force CP activities */ 1269 r = radeon_ring_lock(rdev, 2); 1270 if (!r) { 1271 /* PACKET2 NOP */ 1272 radeon_ring_write(rdev, 0x80000000); 1273 radeon_ring_write(rdev, 0x80000000); 1274 radeon_ring_unlock_commit(rdev); 1275 } 1276 /* XXX deal with CP0,1,2 */ 1277 rdev->cp.rptr = RREG32(CP_RB0_RPTR); 1278 return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp); 1279 } 1280 1281 static int cayman_gpu_soft_reset(struct radeon_device *rdev) 1282 { 1283 struct evergreen_mc_save save; 1284 u32 grbm_reset = 0; 1285 1286 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) 1287 return 0; 1288 1289 dev_info(rdev->dev, "GPU softreset \n"); 1290 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", 1291 RREG32(GRBM_STATUS)); 1292 dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n", 1293 RREG32(GRBM_STATUS_SE0)); 1294 dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n", 1295 RREG32(GRBM_STATUS_SE1)); 1296 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", 1297 RREG32(SRBM_STATUS)); 1298 evergreen_mc_stop(rdev, &save); 1299 if (evergreen_mc_wait_for_idle(rdev)) { 1300 dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); 1301 } 1302 /* Disable CP parsing/prefetching */ 1303 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT); 1304 1305 /* reset all the gfx blocks */ 1306 grbm_reset = (SOFT_RESET_CP | 1307 SOFT_RESET_CB | 1308 SOFT_RESET_DB | 1309 SOFT_RESET_GDS | 1310 SOFT_RESET_PA | 1311 SOFT_RESET_SC | 1312 SOFT_RESET_SPI | 1313 SOFT_RESET_SH | 1314 SOFT_RESET_SX | 1315 SOFT_RESET_TC | 1316 SOFT_RESET_TA | 1317 SOFT_RESET_VGT | 1318 SOFT_RESET_IA); 1319 1320 dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset); 1321 WREG32(GRBM_SOFT_RESET, grbm_reset); 1322 (void)RREG32(GRBM_SOFT_RESET); 1323 udelay(50); 1324 WREG32(GRBM_SOFT_RESET, 0); 1325 (void)RREG32(GRBM_SOFT_RESET); 1326 /* Wait a little for things to settle down */ 1327 udelay(50); 1328 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", 1329 RREG32(GRBM_STATUS)); 1330 dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n", 1331 RREG32(GRBM_STATUS_SE0)); 1332 dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n", 1333 RREG32(GRBM_STATUS_SE1)); 1334 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", 1335 RREG32(SRBM_STATUS)); 1336 evergreen_mc_resume(rdev, &save); 1337 return 0; 1338 } 1339 1340 int cayman_asic_reset(struct radeon_device *rdev) 1341 { 1342 return cayman_gpu_soft_reset(rdev); 1343 } 1344 1345 static int cayman_startup(struct radeon_device *rdev) 1346 { 1347 int r; 1348 1349 /* enable pcie gen2 link */ 1350 evergreen_pcie_gen2_enable(rdev); 1351 1352 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) { 1353 r = ni_init_microcode(rdev); 1354 if (r) { 1355 DRM_ERROR("Failed to load firmware!\n"); 1356 return r; 1357 } 1358 } 1359 r = ni_mc_load_microcode(rdev); 1360 if (r) { 1361 DRM_ERROR("Failed to load MC firmware!\n"); 1362 return r; 1363 } 1364 1365 evergreen_mc_program(rdev); 1366 r = cayman_pcie_gart_enable(rdev); 1367 if (r) 1368 return r; 1369 cayman_gpu_init(rdev); 1370 1371 r = evergreen_blit_init(rdev); 1372 if (r) { 1373 r600_blit_fini(rdev); 1374 rdev->asic->copy = NULL; 1375 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); 1376 } 1377 1378 /* allocate wb buffer */ 1379 r = radeon_wb_init(rdev); 1380 if (r) 1381 return r; 1382 1383 /* Enable IRQ */ 1384 r = r600_irq_init(rdev); 1385 if (r) { 1386 DRM_ERROR("radeon: IH init failed (%d).\n", r); 1387 radeon_irq_kms_fini(rdev); 1388 return r; 1389 } 1390 evergreen_irq_set(rdev); 1391 1392 r = radeon_ring_init(rdev, rdev->cp.ring_size); 1393 if (r) 1394 return r; 1395 r = cayman_cp_load_microcode(rdev); 1396 if (r) 1397 return r; 1398 r = cayman_cp_resume(rdev); 1399 if (r) 1400 return r; 1401 1402 return 0; 1403 } 1404 1405 int cayman_resume(struct radeon_device *rdev) 1406 { 1407 int r; 1408 1409 /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw, 1410 * posting will perform necessary task to bring back GPU into good 1411 * shape. 1412 */ 1413 /* post card */ 1414 atom_asic_init(rdev->mode_info.atom_context); 1415 1416 r = cayman_startup(rdev); 1417 if (r) { 1418 DRM_ERROR("cayman startup failed on resume\n"); 1419 return r; 1420 } 1421 1422 r = r600_ib_test(rdev); 1423 if (r) { 1424 DRM_ERROR("radeon: failled testing IB (%d).\n", r); 1425 return r; 1426 } 1427 1428 return r; 1429 1430 } 1431 1432 int cayman_suspend(struct radeon_device *rdev) 1433 { 1434 /* FIXME: we should wait for ring to be empty */ 1435 cayman_cp_enable(rdev, false); 1436 rdev->cp.ready = false; 1437 evergreen_irq_suspend(rdev); 1438 radeon_wb_disable(rdev); 1439 cayman_pcie_gart_disable(rdev); 1440 r600_blit_suspend(rdev); 1441 1442 return 0; 1443 } 1444 1445 /* Plan is to move initialization in that function and use 1446 * helper function so that radeon_device_init pretty much 1447 * do nothing more than calling asic specific function. This 1448 * should also allow to remove a bunch of callback function 1449 * like vram_info. 1450 */ 1451 int cayman_init(struct radeon_device *rdev) 1452 { 1453 int r; 1454 1455 /* This don't do much */ 1456 r = radeon_gem_init(rdev); 1457 if (r) 1458 return r; 1459 /* Read BIOS */ 1460 if (!radeon_get_bios(rdev)) { 1461 if (ASIC_IS_AVIVO(rdev)) 1462 return -EINVAL; 1463 } 1464 /* Must be an ATOMBIOS */ 1465 if (!rdev->is_atom_bios) { 1466 dev_err(rdev->dev, "Expecting atombios for cayman GPU\n"); 1467 return -EINVAL; 1468 } 1469 r = radeon_atombios_init(rdev); 1470 if (r) 1471 return r; 1472 1473 /* Post card if necessary */ 1474 if (!radeon_card_posted(rdev)) { 1475 if (!rdev->bios) { 1476 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); 1477 return -EINVAL; 1478 } 1479 DRM_INFO("GPU not posted. posting now...\n"); 1480 atom_asic_init(rdev->mode_info.atom_context); 1481 } 1482 /* Initialize scratch registers */ 1483 r600_scratch_init(rdev); 1484 /* Initialize surface registers */ 1485 radeon_surface_init(rdev); 1486 /* Initialize clocks */ 1487 radeon_get_clock_info(rdev->ddev); 1488 /* Fence driver */ 1489 r = radeon_fence_driver_init(rdev); 1490 if (r) 1491 return r; 1492 /* initialize memory controller */ 1493 r = evergreen_mc_init(rdev); 1494 if (r) 1495 return r; 1496 /* Memory manager */ 1497 r = radeon_bo_init(rdev); 1498 if (r) 1499 return r; 1500 1501 r = radeon_irq_kms_init(rdev); 1502 if (r) 1503 return r; 1504 1505 rdev->cp.ring_obj = NULL; 1506 r600_ring_init(rdev, 1024 * 1024); 1507 1508 rdev->ih.ring_obj = NULL; 1509 r600_ih_ring_init(rdev, 64 * 1024); 1510 1511 r = r600_pcie_gart_init(rdev); 1512 if (r) 1513 return r; 1514 1515 rdev->accel_working = true; 1516 r = cayman_startup(rdev); 1517 if (r) { 1518 dev_err(rdev->dev, "disabling GPU acceleration\n"); 1519 cayman_cp_fini(rdev); 1520 r600_irq_fini(rdev); 1521 radeon_wb_fini(rdev); 1522 radeon_irq_kms_fini(rdev); 1523 cayman_pcie_gart_fini(rdev); 1524 rdev->accel_working = false; 1525 } 1526 if (rdev->accel_working) { 1527 r = radeon_ib_pool_init(rdev); 1528 if (r) { 1529 DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r); 1530 rdev->accel_working = false; 1531 } 1532 r = r600_ib_test(rdev); 1533 if (r) { 1534 DRM_ERROR("radeon: failed testing IB (%d).\n", r); 1535 rdev->accel_working = false; 1536 } 1537 } 1538 1539 /* Don't start up if the MC ucode is missing. 1540 * The default clocks and voltages before the MC ucode 1541 * is loaded are not suffient for advanced operations. 1542 */ 1543 if (!rdev->mc_fw) { 1544 DRM_ERROR("radeon: MC ucode required for NI+.\n"); 1545 return -EINVAL; 1546 } 1547 1548 return 0; 1549 } 1550 1551 void cayman_fini(struct radeon_device *rdev) 1552 { 1553 r600_blit_fini(rdev); 1554 cayman_cp_fini(rdev); 1555 r600_irq_fini(rdev); 1556 radeon_wb_fini(rdev); 1557 radeon_ib_pool_fini(rdev); 1558 radeon_irq_kms_fini(rdev); 1559 cayman_pcie_gart_fini(rdev); 1560 radeon_gem_fini(rdev); 1561 radeon_fence_driver_fini(rdev); 1562 radeon_bo_fini(rdev); 1563 radeon_atombios_fini(rdev); 1564 kfree(rdev->bios); 1565 rdev->bios = NULL; 1566 } 1567 1568