1 /* 2 * Copyright 2010 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Alex Deucher 23 */ 24 #include <linux/firmware.h> 25 #include <linux/platform_device.h> 26 #include <linux/slab.h> 27 #include "drmP.h" 28 #include "radeon.h" 29 #include "radeon_asic.h" 30 #include "radeon_drm.h" 31 #include "nid.h" 32 #include "atom.h" 33 #include "ni_reg.h" 34 #include "cayman_blit_shaders.h" 35 36 extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save); 37 extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save); 38 extern int evergreen_mc_wait_for_idle(struct radeon_device *rdev); 39 extern void evergreen_mc_program(struct radeon_device *rdev); 40 extern void evergreen_irq_suspend(struct radeon_device *rdev); 41 extern int evergreen_mc_init(struct radeon_device *rdev); 42 extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev); 43 extern void evergreen_pcie_gen2_enable(struct radeon_device *rdev); 44 45 #define EVERGREEN_PFP_UCODE_SIZE 1120 46 #define EVERGREEN_PM4_UCODE_SIZE 1376 47 #define EVERGREEN_RLC_UCODE_SIZE 768 48 #define BTC_MC_UCODE_SIZE 6024 49 50 #define CAYMAN_PFP_UCODE_SIZE 2176 51 #define CAYMAN_PM4_UCODE_SIZE 2176 52 #define CAYMAN_RLC_UCODE_SIZE 1024 53 #define CAYMAN_MC_UCODE_SIZE 6037 54 55 /* Firmware Names */ 56 MODULE_FIRMWARE("radeon/BARTS_pfp.bin"); 57 MODULE_FIRMWARE("radeon/BARTS_me.bin"); 58 MODULE_FIRMWARE("radeon/BARTS_mc.bin"); 59 MODULE_FIRMWARE("radeon/BTC_rlc.bin"); 60 MODULE_FIRMWARE("radeon/TURKS_pfp.bin"); 61 MODULE_FIRMWARE("radeon/TURKS_me.bin"); 62 MODULE_FIRMWARE("radeon/TURKS_mc.bin"); 63 MODULE_FIRMWARE("radeon/CAICOS_pfp.bin"); 64 MODULE_FIRMWARE("radeon/CAICOS_me.bin"); 65 MODULE_FIRMWARE("radeon/CAICOS_mc.bin"); 66 MODULE_FIRMWARE("radeon/CAYMAN_pfp.bin"); 67 MODULE_FIRMWARE("radeon/CAYMAN_me.bin"); 68 MODULE_FIRMWARE("radeon/CAYMAN_mc.bin"); 69 MODULE_FIRMWARE("radeon/CAYMAN_rlc.bin"); 70 71 #define BTC_IO_MC_REGS_SIZE 29 72 73 static const u32 barts_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = { 74 {0x00000077, 0xff010100}, 75 {0x00000078, 0x00000000}, 76 {0x00000079, 0x00001434}, 77 {0x0000007a, 0xcc08ec08}, 78 {0x0000007b, 0x00040000}, 79 {0x0000007c, 0x000080c0}, 80 {0x0000007d, 0x09000000}, 81 {0x0000007e, 0x00210404}, 82 {0x00000081, 0x08a8e800}, 83 {0x00000082, 0x00030444}, 84 {0x00000083, 0x00000000}, 85 {0x00000085, 0x00000001}, 86 {0x00000086, 0x00000002}, 87 {0x00000087, 0x48490000}, 88 {0x00000088, 0x20244647}, 89 {0x00000089, 0x00000005}, 90 {0x0000008b, 0x66030000}, 91 {0x0000008c, 0x00006603}, 92 {0x0000008d, 0x00000100}, 93 {0x0000008f, 0x00001c0a}, 94 {0x00000090, 0xff000001}, 95 {0x00000094, 0x00101101}, 96 {0x00000095, 0x00000fff}, 97 {0x00000096, 0x00116fff}, 98 {0x00000097, 0x60010000}, 99 {0x00000098, 0x10010000}, 100 {0x00000099, 0x00006000}, 101 {0x0000009a, 0x00001000}, 102 {0x0000009f, 0x00946a00} 103 }; 104 105 static const u32 turks_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = { 106 {0x00000077, 0xff010100}, 107 {0x00000078, 0x00000000}, 108 {0x00000079, 0x00001434}, 109 {0x0000007a, 0xcc08ec08}, 110 {0x0000007b, 0x00040000}, 111 {0x0000007c, 0x000080c0}, 112 {0x0000007d, 0x09000000}, 113 {0x0000007e, 0x00210404}, 114 {0x00000081, 0x08a8e800}, 115 {0x00000082, 0x00030444}, 116 {0x00000083, 0x00000000}, 117 {0x00000085, 0x00000001}, 118 {0x00000086, 0x00000002}, 119 {0x00000087, 0x48490000}, 120 {0x00000088, 0x20244647}, 121 {0x00000089, 0x00000005}, 122 {0x0000008b, 0x66030000}, 123 {0x0000008c, 0x00006603}, 124 {0x0000008d, 0x00000100}, 125 {0x0000008f, 0x00001c0a}, 126 {0x00000090, 0xff000001}, 127 {0x00000094, 0x00101101}, 128 {0x00000095, 0x00000fff}, 129 {0x00000096, 0x00116fff}, 130 {0x00000097, 0x60010000}, 131 {0x00000098, 0x10010000}, 132 {0x00000099, 0x00006000}, 133 {0x0000009a, 0x00001000}, 134 {0x0000009f, 0x00936a00} 135 }; 136 137 static const u32 caicos_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = { 138 {0x00000077, 0xff010100}, 139 {0x00000078, 0x00000000}, 140 {0x00000079, 0x00001434}, 141 {0x0000007a, 0xcc08ec08}, 142 {0x0000007b, 0x00040000}, 143 {0x0000007c, 0x000080c0}, 144 {0x0000007d, 0x09000000}, 145 {0x0000007e, 0x00210404}, 146 {0x00000081, 0x08a8e800}, 147 {0x00000082, 0x00030444}, 148 {0x00000083, 0x00000000}, 149 {0x00000085, 0x00000001}, 150 {0x00000086, 0x00000002}, 151 {0x00000087, 0x48490000}, 152 {0x00000088, 0x20244647}, 153 {0x00000089, 0x00000005}, 154 {0x0000008b, 0x66030000}, 155 {0x0000008c, 0x00006603}, 156 {0x0000008d, 0x00000100}, 157 {0x0000008f, 0x00001c0a}, 158 {0x00000090, 0xff000001}, 159 {0x00000094, 0x00101101}, 160 {0x00000095, 0x00000fff}, 161 {0x00000096, 0x00116fff}, 162 {0x00000097, 0x60010000}, 163 {0x00000098, 0x10010000}, 164 {0x00000099, 0x00006000}, 165 {0x0000009a, 0x00001000}, 166 {0x0000009f, 0x00916a00} 167 }; 168 169 static const u32 cayman_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = { 170 {0x00000077, 0xff010100}, 171 {0x00000078, 0x00000000}, 172 {0x00000079, 0x00001434}, 173 {0x0000007a, 0xcc08ec08}, 174 {0x0000007b, 0x00040000}, 175 {0x0000007c, 0x000080c0}, 176 {0x0000007d, 0x09000000}, 177 {0x0000007e, 0x00210404}, 178 {0x00000081, 0x08a8e800}, 179 {0x00000082, 0x00030444}, 180 {0x00000083, 0x00000000}, 181 {0x00000085, 0x00000001}, 182 {0x00000086, 0x00000002}, 183 {0x00000087, 0x48490000}, 184 {0x00000088, 0x20244647}, 185 {0x00000089, 0x00000005}, 186 {0x0000008b, 0x66030000}, 187 {0x0000008c, 0x00006603}, 188 {0x0000008d, 0x00000100}, 189 {0x0000008f, 0x00001c0a}, 190 {0x00000090, 0xff000001}, 191 {0x00000094, 0x00101101}, 192 {0x00000095, 0x00000fff}, 193 {0x00000096, 0x00116fff}, 194 {0x00000097, 0x60010000}, 195 {0x00000098, 0x10010000}, 196 {0x00000099, 0x00006000}, 197 {0x0000009a, 0x00001000}, 198 {0x0000009f, 0x00976b00} 199 }; 200 201 int ni_mc_load_microcode(struct radeon_device *rdev) 202 { 203 const __be32 *fw_data; 204 u32 mem_type, running, blackout = 0; 205 u32 *io_mc_regs; 206 int i, ucode_size, regs_size; 207 208 if (!rdev->mc_fw) 209 return -EINVAL; 210 211 switch (rdev->family) { 212 case CHIP_BARTS: 213 io_mc_regs = (u32 *)&barts_io_mc_regs; 214 ucode_size = BTC_MC_UCODE_SIZE; 215 regs_size = BTC_IO_MC_REGS_SIZE; 216 break; 217 case CHIP_TURKS: 218 io_mc_regs = (u32 *)&turks_io_mc_regs; 219 ucode_size = BTC_MC_UCODE_SIZE; 220 regs_size = BTC_IO_MC_REGS_SIZE; 221 break; 222 case CHIP_CAICOS: 223 default: 224 io_mc_regs = (u32 *)&caicos_io_mc_regs; 225 ucode_size = BTC_MC_UCODE_SIZE; 226 regs_size = BTC_IO_MC_REGS_SIZE; 227 break; 228 case CHIP_CAYMAN: 229 io_mc_regs = (u32 *)&cayman_io_mc_regs; 230 ucode_size = CAYMAN_MC_UCODE_SIZE; 231 regs_size = BTC_IO_MC_REGS_SIZE; 232 break; 233 } 234 235 mem_type = (RREG32(MC_SEQ_MISC0) & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT; 236 running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK; 237 238 if ((mem_type == MC_SEQ_MISC0_GDDR5_VALUE) && (running == 0)) { 239 if (running) { 240 blackout = RREG32(MC_SHARED_BLACKOUT_CNTL); 241 WREG32(MC_SHARED_BLACKOUT_CNTL, 1); 242 } 243 244 /* reset the engine and set to writable */ 245 WREG32(MC_SEQ_SUP_CNTL, 0x00000008); 246 WREG32(MC_SEQ_SUP_CNTL, 0x00000010); 247 248 /* load mc io regs */ 249 for (i = 0; i < regs_size; i++) { 250 WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]); 251 WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]); 252 } 253 /* load the MC ucode */ 254 fw_data = (const __be32 *)rdev->mc_fw->data; 255 for (i = 0; i < ucode_size; i++) 256 WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++)); 257 258 /* put the engine back into the active state */ 259 WREG32(MC_SEQ_SUP_CNTL, 0x00000008); 260 WREG32(MC_SEQ_SUP_CNTL, 0x00000004); 261 WREG32(MC_SEQ_SUP_CNTL, 0x00000001); 262 263 /* wait for training to complete */ 264 for (i = 0; i < rdev->usec_timeout; i++) { 265 if (RREG32(MC_IO_PAD_CNTL_D0) & MEM_FALL_OUT_CMD) 266 break; 267 udelay(1); 268 } 269 270 if (running) 271 WREG32(MC_SHARED_BLACKOUT_CNTL, blackout); 272 } 273 274 return 0; 275 } 276 277 int ni_init_microcode(struct radeon_device *rdev) 278 { 279 struct platform_device *pdev; 280 const char *chip_name; 281 const char *rlc_chip_name; 282 size_t pfp_req_size, me_req_size, rlc_req_size, mc_req_size; 283 char fw_name[30]; 284 int err; 285 286 DRM_DEBUG("\n"); 287 288 pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0); 289 err = IS_ERR(pdev); 290 if (err) { 291 printk(KERN_ERR "radeon_cp: Failed to register firmware\n"); 292 return -EINVAL; 293 } 294 295 switch (rdev->family) { 296 case CHIP_BARTS: 297 chip_name = "BARTS"; 298 rlc_chip_name = "BTC"; 299 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4; 300 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4; 301 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4; 302 mc_req_size = BTC_MC_UCODE_SIZE * 4; 303 break; 304 case CHIP_TURKS: 305 chip_name = "TURKS"; 306 rlc_chip_name = "BTC"; 307 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4; 308 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4; 309 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4; 310 mc_req_size = BTC_MC_UCODE_SIZE * 4; 311 break; 312 case CHIP_CAICOS: 313 chip_name = "CAICOS"; 314 rlc_chip_name = "BTC"; 315 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4; 316 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4; 317 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4; 318 mc_req_size = BTC_MC_UCODE_SIZE * 4; 319 break; 320 case CHIP_CAYMAN: 321 chip_name = "CAYMAN"; 322 rlc_chip_name = "CAYMAN"; 323 pfp_req_size = CAYMAN_PFP_UCODE_SIZE * 4; 324 me_req_size = CAYMAN_PM4_UCODE_SIZE * 4; 325 rlc_req_size = CAYMAN_RLC_UCODE_SIZE * 4; 326 mc_req_size = CAYMAN_MC_UCODE_SIZE * 4; 327 break; 328 default: BUG(); 329 } 330 331 DRM_INFO("Loading %s Microcode\n", chip_name); 332 333 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name); 334 err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev); 335 if (err) 336 goto out; 337 if (rdev->pfp_fw->size != pfp_req_size) { 338 printk(KERN_ERR 339 "ni_cp: Bogus length %zu in firmware \"%s\"\n", 340 rdev->pfp_fw->size, fw_name); 341 err = -EINVAL; 342 goto out; 343 } 344 345 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name); 346 err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev); 347 if (err) 348 goto out; 349 if (rdev->me_fw->size != me_req_size) { 350 printk(KERN_ERR 351 "ni_cp: Bogus length %zu in firmware \"%s\"\n", 352 rdev->me_fw->size, fw_name); 353 err = -EINVAL; 354 } 355 356 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name); 357 err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev); 358 if (err) 359 goto out; 360 if (rdev->rlc_fw->size != rlc_req_size) { 361 printk(KERN_ERR 362 "ni_rlc: Bogus length %zu in firmware \"%s\"\n", 363 rdev->rlc_fw->size, fw_name); 364 err = -EINVAL; 365 } 366 367 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); 368 err = request_firmware(&rdev->mc_fw, fw_name, &pdev->dev); 369 if (err) 370 goto out; 371 if (rdev->mc_fw->size != mc_req_size) { 372 printk(KERN_ERR 373 "ni_mc: Bogus length %zu in firmware \"%s\"\n", 374 rdev->mc_fw->size, fw_name); 375 err = -EINVAL; 376 } 377 out: 378 platform_device_unregister(pdev); 379 380 if (err) { 381 if (err != -EINVAL) 382 printk(KERN_ERR 383 "ni_cp: Failed to load firmware \"%s\"\n", 384 fw_name); 385 release_firmware(rdev->pfp_fw); 386 rdev->pfp_fw = NULL; 387 release_firmware(rdev->me_fw); 388 rdev->me_fw = NULL; 389 release_firmware(rdev->rlc_fw); 390 rdev->rlc_fw = NULL; 391 release_firmware(rdev->mc_fw); 392 rdev->mc_fw = NULL; 393 } 394 return err; 395 } 396 397 /* 398 * Core functions 399 */ 400 static u32 cayman_get_tile_pipe_to_backend_map(struct radeon_device *rdev, 401 u32 num_tile_pipes, 402 u32 num_backends_per_asic, 403 u32 *backend_disable_mask_per_asic, 404 u32 num_shader_engines) 405 { 406 u32 backend_map = 0; 407 u32 enabled_backends_mask = 0; 408 u32 enabled_backends_count = 0; 409 u32 num_backends_per_se; 410 u32 cur_pipe; 411 u32 swizzle_pipe[CAYMAN_MAX_PIPES]; 412 u32 cur_backend = 0; 413 u32 i; 414 bool force_no_swizzle; 415 416 /* force legal values */ 417 if (num_tile_pipes < 1) 418 num_tile_pipes = 1; 419 if (num_tile_pipes > rdev->config.cayman.max_tile_pipes) 420 num_tile_pipes = rdev->config.cayman.max_tile_pipes; 421 if (num_shader_engines < 1) 422 num_shader_engines = 1; 423 if (num_shader_engines > rdev->config.cayman.max_shader_engines) 424 num_shader_engines = rdev->config.cayman.max_shader_engines; 425 if (num_backends_per_asic < num_shader_engines) 426 num_backends_per_asic = num_shader_engines; 427 if (num_backends_per_asic > (rdev->config.cayman.max_backends_per_se * num_shader_engines)) 428 num_backends_per_asic = rdev->config.cayman.max_backends_per_se * num_shader_engines; 429 430 /* make sure we have the same number of backends per se */ 431 num_backends_per_asic = ALIGN(num_backends_per_asic, num_shader_engines); 432 /* set up the number of backends per se */ 433 num_backends_per_se = num_backends_per_asic / num_shader_engines; 434 if (num_backends_per_se > rdev->config.cayman.max_backends_per_se) { 435 num_backends_per_se = rdev->config.cayman.max_backends_per_se; 436 num_backends_per_asic = num_backends_per_se * num_shader_engines; 437 } 438 439 /* create enable mask and count for enabled backends */ 440 for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) { 441 if (((*backend_disable_mask_per_asic >> i) & 1) == 0) { 442 enabled_backends_mask |= (1 << i); 443 ++enabled_backends_count; 444 } 445 if (enabled_backends_count == num_backends_per_asic) 446 break; 447 } 448 449 /* force the backends mask to match the current number of backends */ 450 if (enabled_backends_count != num_backends_per_asic) { 451 u32 this_backend_enabled; 452 u32 shader_engine; 453 u32 backend_per_se; 454 455 enabled_backends_mask = 0; 456 enabled_backends_count = 0; 457 *backend_disable_mask_per_asic = CAYMAN_MAX_BACKENDS_MASK; 458 for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) { 459 /* calc the current se */ 460 shader_engine = i / rdev->config.cayman.max_backends_per_se; 461 /* calc the backend per se */ 462 backend_per_se = i % rdev->config.cayman.max_backends_per_se; 463 /* default to not enabled */ 464 this_backend_enabled = 0; 465 if ((shader_engine < num_shader_engines) && 466 (backend_per_se < num_backends_per_se)) 467 this_backend_enabled = 1; 468 if (this_backend_enabled) { 469 enabled_backends_mask |= (1 << i); 470 *backend_disable_mask_per_asic &= ~(1 << i); 471 ++enabled_backends_count; 472 } 473 } 474 } 475 476 477 memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * CAYMAN_MAX_PIPES); 478 switch (rdev->family) { 479 case CHIP_CAYMAN: 480 force_no_swizzle = true; 481 break; 482 default: 483 force_no_swizzle = false; 484 break; 485 } 486 if (force_no_swizzle) { 487 bool last_backend_enabled = false; 488 489 force_no_swizzle = false; 490 for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) { 491 if (((enabled_backends_mask >> i) & 1) == 1) { 492 if (last_backend_enabled) 493 force_no_swizzle = true; 494 last_backend_enabled = true; 495 } else 496 last_backend_enabled = false; 497 } 498 } 499 500 switch (num_tile_pipes) { 501 case 1: 502 case 3: 503 case 5: 504 case 7: 505 DRM_ERROR("odd number of pipes!\n"); 506 break; 507 case 2: 508 swizzle_pipe[0] = 0; 509 swizzle_pipe[1] = 1; 510 break; 511 case 4: 512 if (force_no_swizzle) { 513 swizzle_pipe[0] = 0; 514 swizzle_pipe[1] = 1; 515 swizzle_pipe[2] = 2; 516 swizzle_pipe[3] = 3; 517 } else { 518 swizzle_pipe[0] = 0; 519 swizzle_pipe[1] = 2; 520 swizzle_pipe[2] = 1; 521 swizzle_pipe[3] = 3; 522 } 523 break; 524 case 6: 525 if (force_no_swizzle) { 526 swizzle_pipe[0] = 0; 527 swizzle_pipe[1] = 1; 528 swizzle_pipe[2] = 2; 529 swizzle_pipe[3] = 3; 530 swizzle_pipe[4] = 4; 531 swizzle_pipe[5] = 5; 532 } else { 533 swizzle_pipe[0] = 0; 534 swizzle_pipe[1] = 2; 535 swizzle_pipe[2] = 4; 536 swizzle_pipe[3] = 1; 537 swizzle_pipe[4] = 3; 538 swizzle_pipe[5] = 5; 539 } 540 break; 541 case 8: 542 if (force_no_swizzle) { 543 swizzle_pipe[0] = 0; 544 swizzle_pipe[1] = 1; 545 swizzle_pipe[2] = 2; 546 swizzle_pipe[3] = 3; 547 swizzle_pipe[4] = 4; 548 swizzle_pipe[5] = 5; 549 swizzle_pipe[6] = 6; 550 swizzle_pipe[7] = 7; 551 } else { 552 swizzle_pipe[0] = 0; 553 swizzle_pipe[1] = 2; 554 swizzle_pipe[2] = 4; 555 swizzle_pipe[3] = 6; 556 swizzle_pipe[4] = 1; 557 swizzle_pipe[5] = 3; 558 swizzle_pipe[6] = 5; 559 swizzle_pipe[7] = 7; 560 } 561 break; 562 } 563 564 for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) { 565 while (((1 << cur_backend) & enabled_backends_mask) == 0) 566 cur_backend = (cur_backend + 1) % CAYMAN_MAX_BACKENDS; 567 568 backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4))); 569 570 cur_backend = (cur_backend + 1) % CAYMAN_MAX_BACKENDS; 571 } 572 573 return backend_map; 574 } 575 576 static u32 cayman_get_disable_mask_per_asic(struct radeon_device *rdev, 577 u32 disable_mask_per_se, 578 u32 max_disable_mask_per_se, 579 u32 num_shader_engines) 580 { 581 u32 disable_field_width_per_se = r600_count_pipe_bits(disable_mask_per_se); 582 u32 disable_mask_per_asic = disable_mask_per_se & max_disable_mask_per_se; 583 584 if (num_shader_engines == 1) 585 return disable_mask_per_asic; 586 else if (num_shader_engines == 2) 587 return disable_mask_per_asic | (disable_mask_per_asic << disable_field_width_per_se); 588 else 589 return 0xffffffff; 590 } 591 592 static void cayman_gpu_init(struct radeon_device *rdev) 593 { 594 u32 cc_rb_backend_disable = 0; 595 u32 cc_gc_shader_pipe_config; 596 u32 gb_addr_config = 0; 597 u32 mc_shared_chmap, mc_arb_ramcfg; 598 u32 gb_backend_map; 599 u32 cgts_tcc_disable; 600 u32 sx_debug_1; 601 u32 smx_dc_ctl0; 602 u32 gc_user_shader_pipe_config; 603 u32 gc_user_rb_backend_disable; 604 u32 cgts_user_tcc_disable; 605 u32 cgts_sm_ctrl_reg; 606 u32 hdp_host_path_cntl; 607 u32 tmp; 608 int i, j; 609 610 switch (rdev->family) { 611 case CHIP_CAYMAN: 612 default: 613 rdev->config.cayman.max_shader_engines = 2; 614 rdev->config.cayman.max_pipes_per_simd = 4; 615 rdev->config.cayman.max_tile_pipes = 8; 616 rdev->config.cayman.max_simds_per_se = 12; 617 rdev->config.cayman.max_backends_per_se = 4; 618 rdev->config.cayman.max_texture_channel_caches = 8; 619 rdev->config.cayman.max_gprs = 256; 620 rdev->config.cayman.max_threads = 256; 621 rdev->config.cayman.max_gs_threads = 32; 622 rdev->config.cayman.max_stack_entries = 512; 623 rdev->config.cayman.sx_num_of_sets = 8; 624 rdev->config.cayman.sx_max_export_size = 256; 625 rdev->config.cayman.sx_max_export_pos_size = 64; 626 rdev->config.cayman.sx_max_export_smx_size = 192; 627 rdev->config.cayman.max_hw_contexts = 8; 628 rdev->config.cayman.sq_num_cf_insts = 2; 629 630 rdev->config.cayman.sc_prim_fifo_size = 0x100; 631 rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30; 632 rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130; 633 break; 634 } 635 636 /* Initialize HDP */ 637 for (i = 0, j = 0; i < 32; i++, j += 0x18) { 638 WREG32((0x2c14 + j), 0x00000000); 639 WREG32((0x2c18 + j), 0x00000000); 640 WREG32((0x2c1c + j), 0x00000000); 641 WREG32((0x2c20 + j), 0x00000000); 642 WREG32((0x2c24 + j), 0x00000000); 643 } 644 645 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); 646 647 evergreen_fix_pci_max_read_req_size(rdev); 648 649 mc_shared_chmap = RREG32(MC_SHARED_CHMAP); 650 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); 651 652 cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE); 653 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG); 654 cgts_tcc_disable = 0xff000000; 655 gc_user_rb_backend_disable = RREG32(GC_USER_RB_BACKEND_DISABLE); 656 gc_user_shader_pipe_config = RREG32(GC_USER_SHADER_PIPE_CONFIG); 657 cgts_user_tcc_disable = RREG32(CGTS_USER_TCC_DISABLE); 658 659 rdev->config.cayman.num_shader_engines = rdev->config.cayman.max_shader_engines; 660 tmp = ((~gc_user_shader_pipe_config) & INACTIVE_QD_PIPES_MASK) >> INACTIVE_QD_PIPES_SHIFT; 661 rdev->config.cayman.num_shader_pipes_per_simd = r600_count_pipe_bits(tmp); 662 rdev->config.cayman.num_tile_pipes = rdev->config.cayman.max_tile_pipes; 663 tmp = ((~gc_user_shader_pipe_config) & INACTIVE_SIMDS_MASK) >> INACTIVE_SIMDS_SHIFT; 664 rdev->config.cayman.num_simds_per_se = r600_count_pipe_bits(tmp); 665 tmp = ((~gc_user_rb_backend_disable) & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT; 666 rdev->config.cayman.num_backends_per_se = r600_count_pipe_bits(tmp); 667 tmp = (gc_user_rb_backend_disable & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT; 668 rdev->config.cayman.backend_disable_mask_per_asic = 669 cayman_get_disable_mask_per_asic(rdev, tmp, CAYMAN_MAX_BACKENDS_PER_SE_MASK, 670 rdev->config.cayman.num_shader_engines); 671 rdev->config.cayman.backend_map = 672 cayman_get_tile_pipe_to_backend_map(rdev, rdev->config.cayman.num_tile_pipes, 673 rdev->config.cayman.num_backends_per_se * 674 rdev->config.cayman.num_shader_engines, 675 &rdev->config.cayman.backend_disable_mask_per_asic, 676 rdev->config.cayman.num_shader_engines); 677 tmp = ((~cgts_user_tcc_disable) & TCC_DISABLE_MASK) >> TCC_DISABLE_SHIFT; 678 rdev->config.cayman.num_texture_channel_caches = r600_count_pipe_bits(tmp); 679 tmp = (mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT; 680 rdev->config.cayman.mem_max_burst_length_bytes = (tmp + 1) * 256; 681 if (rdev->config.cayman.mem_max_burst_length_bytes > 512) 682 rdev->config.cayman.mem_max_burst_length_bytes = 512; 683 tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT; 684 rdev->config.cayman.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024; 685 if (rdev->config.cayman.mem_row_size_in_kb > 4) 686 rdev->config.cayman.mem_row_size_in_kb = 4; 687 /* XXX use MC settings? */ 688 rdev->config.cayman.shader_engine_tile_size = 32; 689 rdev->config.cayman.num_gpus = 1; 690 rdev->config.cayman.multi_gpu_tile_size = 64; 691 692 //gb_addr_config = 0x02011003 693 #if 0 694 gb_addr_config = RREG32(GB_ADDR_CONFIG); 695 #else 696 gb_addr_config = 0; 697 switch (rdev->config.cayman.num_tile_pipes) { 698 case 1: 699 default: 700 gb_addr_config |= NUM_PIPES(0); 701 break; 702 case 2: 703 gb_addr_config |= NUM_PIPES(1); 704 break; 705 case 4: 706 gb_addr_config |= NUM_PIPES(2); 707 break; 708 case 8: 709 gb_addr_config |= NUM_PIPES(3); 710 break; 711 } 712 713 tmp = (rdev->config.cayman.mem_max_burst_length_bytes / 256) - 1; 714 gb_addr_config |= PIPE_INTERLEAVE_SIZE(tmp); 715 gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.cayman.num_shader_engines - 1); 716 tmp = (rdev->config.cayman.shader_engine_tile_size / 16) - 1; 717 gb_addr_config |= SHADER_ENGINE_TILE_SIZE(tmp); 718 switch (rdev->config.cayman.num_gpus) { 719 case 1: 720 default: 721 gb_addr_config |= NUM_GPUS(0); 722 break; 723 case 2: 724 gb_addr_config |= NUM_GPUS(1); 725 break; 726 case 4: 727 gb_addr_config |= NUM_GPUS(2); 728 break; 729 } 730 switch (rdev->config.cayman.multi_gpu_tile_size) { 731 case 16: 732 gb_addr_config |= MULTI_GPU_TILE_SIZE(0); 733 break; 734 case 32: 735 default: 736 gb_addr_config |= MULTI_GPU_TILE_SIZE(1); 737 break; 738 case 64: 739 gb_addr_config |= MULTI_GPU_TILE_SIZE(2); 740 break; 741 case 128: 742 gb_addr_config |= MULTI_GPU_TILE_SIZE(3); 743 break; 744 } 745 switch (rdev->config.cayman.mem_row_size_in_kb) { 746 case 1: 747 default: 748 gb_addr_config |= ROW_SIZE(0); 749 break; 750 case 2: 751 gb_addr_config |= ROW_SIZE(1); 752 break; 753 case 4: 754 gb_addr_config |= ROW_SIZE(2); 755 break; 756 } 757 #endif 758 759 tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT; 760 rdev->config.cayman.num_tile_pipes = (1 << tmp); 761 tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT; 762 rdev->config.cayman.mem_max_burst_length_bytes = (tmp + 1) * 256; 763 tmp = (gb_addr_config & NUM_SHADER_ENGINES_MASK) >> NUM_SHADER_ENGINES_SHIFT; 764 rdev->config.cayman.num_shader_engines = tmp + 1; 765 tmp = (gb_addr_config & NUM_GPUS_MASK) >> NUM_GPUS_SHIFT; 766 rdev->config.cayman.num_gpus = tmp + 1; 767 tmp = (gb_addr_config & MULTI_GPU_TILE_SIZE_MASK) >> MULTI_GPU_TILE_SIZE_SHIFT; 768 rdev->config.cayman.multi_gpu_tile_size = 1 << tmp; 769 tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT; 770 rdev->config.cayman.mem_row_size_in_kb = 1 << tmp; 771 772 //gb_backend_map = 0x76541032; 773 #if 0 774 gb_backend_map = RREG32(GB_BACKEND_MAP); 775 #else 776 gb_backend_map = 777 cayman_get_tile_pipe_to_backend_map(rdev, rdev->config.cayman.num_tile_pipes, 778 rdev->config.cayman.num_backends_per_se * 779 rdev->config.cayman.num_shader_engines, 780 &rdev->config.cayman.backend_disable_mask_per_asic, 781 rdev->config.cayman.num_shader_engines); 782 #endif 783 /* setup tiling info dword. gb_addr_config is not adequate since it does 784 * not have bank info, so create a custom tiling dword. 785 * bits 3:0 num_pipes 786 * bits 7:4 num_banks 787 * bits 11:8 group_size 788 * bits 15:12 row_size 789 */ 790 rdev->config.cayman.tile_config = 0; 791 switch (rdev->config.cayman.num_tile_pipes) { 792 case 1: 793 default: 794 rdev->config.cayman.tile_config |= (0 << 0); 795 break; 796 case 2: 797 rdev->config.cayman.tile_config |= (1 << 0); 798 break; 799 case 4: 800 rdev->config.cayman.tile_config |= (2 << 0); 801 break; 802 case 8: 803 rdev->config.cayman.tile_config |= (3 << 0); 804 break; 805 } 806 rdev->config.cayman.tile_config |= 807 ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4; 808 rdev->config.cayman.tile_config |= 809 ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8; 810 rdev->config.cayman.tile_config |= 811 ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12; 812 813 rdev->config.cayman.backend_map = gb_backend_map; 814 WREG32(GB_BACKEND_MAP, gb_backend_map); 815 WREG32(GB_ADDR_CONFIG, gb_addr_config); 816 WREG32(DMIF_ADDR_CONFIG, gb_addr_config); 817 WREG32(HDP_ADDR_CONFIG, gb_addr_config); 818 819 /* primary versions */ 820 WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); 821 WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); 822 WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); 823 824 WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable); 825 WREG32(CGTS_SYS_TCC_DISABLE, cgts_tcc_disable); 826 827 /* user versions */ 828 WREG32(GC_USER_RB_BACKEND_DISABLE, cc_rb_backend_disable); 829 WREG32(GC_USER_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); 830 WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); 831 832 WREG32(CGTS_USER_SYS_TCC_DISABLE, cgts_tcc_disable); 833 WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable); 834 835 /* reprogram the shader complex */ 836 cgts_sm_ctrl_reg = RREG32(CGTS_SM_CTRL_REG); 837 for (i = 0; i < 16; i++) 838 WREG32(CGTS_SM_CTRL_REG, OVERRIDE); 839 WREG32(CGTS_SM_CTRL_REG, cgts_sm_ctrl_reg); 840 841 /* set HW defaults for 3D engine */ 842 WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60)); 843 844 sx_debug_1 = RREG32(SX_DEBUG_1); 845 sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS; 846 WREG32(SX_DEBUG_1, sx_debug_1); 847 848 smx_dc_ctl0 = RREG32(SMX_DC_CTL0); 849 smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff); 850 smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.cayman.sx_num_of_sets); 851 WREG32(SMX_DC_CTL0, smx_dc_ctl0); 852 853 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4) | CRC_SIMD_ID_WADDR_DISABLE); 854 855 /* need to be explicitly zero-ed */ 856 WREG32(VGT_OFFCHIP_LDS_BASE, 0); 857 WREG32(SQ_LSTMP_RING_BASE, 0); 858 WREG32(SQ_HSTMP_RING_BASE, 0); 859 WREG32(SQ_ESTMP_RING_BASE, 0); 860 WREG32(SQ_GSTMP_RING_BASE, 0); 861 WREG32(SQ_VSTMP_RING_BASE, 0); 862 WREG32(SQ_PSTMP_RING_BASE, 0); 863 864 WREG32(TA_CNTL_AUX, DISABLE_CUBE_ANISO); 865 866 WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.cayman.sx_max_export_size / 4) - 1) | 867 POSITION_BUFFER_SIZE((rdev->config.cayman.sx_max_export_pos_size / 4) - 1) | 868 SMX_BUFFER_SIZE((rdev->config.cayman.sx_max_export_smx_size / 4) - 1))); 869 870 WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.cayman.sc_prim_fifo_size) | 871 SC_HIZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_hiz_tile_fifo_size) | 872 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_earlyz_tile_fifo_size))); 873 874 875 WREG32(VGT_NUM_INSTANCES, 1); 876 877 WREG32(CP_PERFMON_CNTL, 0); 878 879 WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.cayman.sq_num_cf_insts) | 880 FETCH_FIFO_HIWATER(0x4) | 881 DONE_FIFO_HIWATER(0xe0) | 882 ALU_UPDATE_FIFO_HIWATER(0x8))); 883 884 WREG32(SQ_GPR_RESOURCE_MGMT_1, NUM_CLAUSE_TEMP_GPRS(4)); 885 WREG32(SQ_CONFIG, (VC_ENABLE | 886 EXPORT_SRC_C | 887 GFX_PRIO(0) | 888 CS1_PRIO(0) | 889 CS2_PRIO(1))); 890 WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, DYN_GPR_ENABLE); 891 892 WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) | 893 FORCE_EOV_MAX_REZ_CNT(255))); 894 895 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) | 896 AUTO_INVLD_EN(ES_AND_GS_AUTO)); 897 898 WREG32(VGT_GS_VERTEX_REUSE, 16); 899 WREG32(PA_SC_LINE_STIPPLE_STATE, 0); 900 901 WREG32(CB_PERF_CTR0_SEL_0, 0); 902 WREG32(CB_PERF_CTR0_SEL_1, 0); 903 WREG32(CB_PERF_CTR1_SEL_0, 0); 904 WREG32(CB_PERF_CTR1_SEL_1, 0); 905 WREG32(CB_PERF_CTR2_SEL_0, 0); 906 WREG32(CB_PERF_CTR2_SEL_1, 0); 907 WREG32(CB_PERF_CTR3_SEL_0, 0); 908 WREG32(CB_PERF_CTR3_SEL_1, 0); 909 910 tmp = RREG32(HDP_MISC_CNTL); 911 tmp |= HDP_FLUSH_INVALIDATE_CACHE; 912 WREG32(HDP_MISC_CNTL, tmp); 913 914 hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL); 915 WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl); 916 917 WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3)); 918 919 udelay(50); 920 } 921 922 /* 923 * GART 924 */ 925 void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev) 926 { 927 /* flush hdp cache */ 928 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); 929 930 /* bits 0-7 are the VM contexts0-7 */ 931 WREG32(VM_INVALIDATE_REQUEST, 1); 932 } 933 934 int cayman_pcie_gart_enable(struct radeon_device *rdev) 935 { 936 int r; 937 938 if (rdev->gart.robj == NULL) { 939 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); 940 return -EINVAL; 941 } 942 r = radeon_gart_table_vram_pin(rdev); 943 if (r) 944 return r; 945 radeon_gart_restore(rdev); 946 /* Setup TLB control */ 947 WREG32(MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB | 948 ENABLE_L1_FRAGMENT_PROCESSING | 949 SYSTEM_ACCESS_MODE_NOT_IN_SYS | 950 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU); 951 /* Setup L2 cache */ 952 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | 953 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | 954 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE | 955 EFFECTIVE_L2_QUEUE_SIZE(7) | 956 CONTEXT1_IDENTITY_ACCESS_MODE(1)); 957 WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE); 958 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY | 959 L2_CACHE_BIGK_FRAGMENT_SIZE(6)); 960 /* setup context0 */ 961 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); 962 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); 963 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); 964 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, 965 (u32)(rdev->dummy_page.addr >> 12)); 966 WREG32(VM_CONTEXT0_CNTL2, 0); 967 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | 968 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); 969 /* disable context1-7 */ 970 WREG32(VM_CONTEXT1_CNTL2, 0); 971 WREG32(VM_CONTEXT1_CNTL, 0); 972 973 cayman_pcie_gart_tlb_flush(rdev); 974 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", 975 (unsigned)(rdev->mc.gtt_size >> 20), 976 (unsigned long long)rdev->gart.table_addr); 977 rdev->gart.ready = true; 978 return 0; 979 } 980 981 void cayman_pcie_gart_disable(struct radeon_device *rdev) 982 { 983 /* Disable all tables */ 984 WREG32(VM_CONTEXT0_CNTL, 0); 985 WREG32(VM_CONTEXT1_CNTL, 0); 986 /* Setup TLB control */ 987 WREG32(MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING | 988 SYSTEM_ACCESS_MODE_NOT_IN_SYS | 989 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU); 990 /* Setup L2 cache */ 991 WREG32(VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | 992 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE | 993 EFFECTIVE_L2_QUEUE_SIZE(7) | 994 CONTEXT1_IDENTITY_ACCESS_MODE(1)); 995 WREG32(VM_L2_CNTL2, 0); 996 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY | 997 L2_CACHE_BIGK_FRAGMENT_SIZE(6)); 998 radeon_gart_table_vram_unpin(rdev); 999 } 1000 1001 void cayman_pcie_gart_fini(struct radeon_device *rdev) 1002 { 1003 cayman_pcie_gart_disable(rdev); 1004 radeon_gart_table_vram_free(rdev); 1005 radeon_gart_fini(rdev); 1006 } 1007 1008 /* 1009 * CP. 1010 */ 1011 static void cayman_cp_enable(struct radeon_device *rdev, bool enable) 1012 { 1013 if (enable) 1014 WREG32(CP_ME_CNTL, 0); 1015 else { 1016 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); 1017 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT)); 1018 WREG32(SCRATCH_UMSK, 0); 1019 } 1020 } 1021 1022 static int cayman_cp_load_microcode(struct radeon_device *rdev) 1023 { 1024 const __be32 *fw_data; 1025 int i; 1026 1027 if (!rdev->me_fw || !rdev->pfp_fw) 1028 return -EINVAL; 1029 1030 cayman_cp_enable(rdev, false); 1031 1032 fw_data = (const __be32 *)rdev->pfp_fw->data; 1033 WREG32(CP_PFP_UCODE_ADDR, 0); 1034 for (i = 0; i < CAYMAN_PFP_UCODE_SIZE; i++) 1035 WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++)); 1036 WREG32(CP_PFP_UCODE_ADDR, 0); 1037 1038 fw_data = (const __be32 *)rdev->me_fw->data; 1039 WREG32(CP_ME_RAM_WADDR, 0); 1040 for (i = 0; i < CAYMAN_PM4_UCODE_SIZE; i++) 1041 WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++)); 1042 1043 WREG32(CP_PFP_UCODE_ADDR, 0); 1044 WREG32(CP_ME_RAM_WADDR, 0); 1045 WREG32(CP_ME_RAM_RADDR, 0); 1046 return 0; 1047 } 1048 1049 static int cayman_cp_start(struct radeon_device *rdev) 1050 { 1051 int r, i; 1052 1053 r = radeon_ring_lock(rdev, 7); 1054 if (r) { 1055 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); 1056 return r; 1057 } 1058 radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5)); 1059 radeon_ring_write(rdev, 0x1); 1060 radeon_ring_write(rdev, 0x0); 1061 radeon_ring_write(rdev, rdev->config.cayman.max_hw_contexts - 1); 1062 radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); 1063 radeon_ring_write(rdev, 0); 1064 radeon_ring_write(rdev, 0); 1065 radeon_ring_unlock_commit(rdev); 1066 1067 cayman_cp_enable(rdev, true); 1068 1069 r = radeon_ring_lock(rdev, cayman_default_size + 19); 1070 if (r) { 1071 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); 1072 return r; 1073 } 1074 1075 /* setup clear context state */ 1076 radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 1077 radeon_ring_write(rdev, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); 1078 1079 for (i = 0; i < cayman_default_size; i++) 1080 radeon_ring_write(rdev, cayman_default_state[i]); 1081 1082 radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 1083 radeon_ring_write(rdev, PACKET3_PREAMBLE_END_CLEAR_STATE); 1084 1085 /* set clear context state */ 1086 radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0)); 1087 radeon_ring_write(rdev, 0); 1088 1089 /* SQ_VTX_BASE_VTX_LOC */ 1090 radeon_ring_write(rdev, 0xc0026f00); 1091 radeon_ring_write(rdev, 0x00000000); 1092 radeon_ring_write(rdev, 0x00000000); 1093 radeon_ring_write(rdev, 0x00000000); 1094 1095 /* Clear consts */ 1096 radeon_ring_write(rdev, 0xc0036f00); 1097 radeon_ring_write(rdev, 0x00000bc4); 1098 radeon_ring_write(rdev, 0xffffffff); 1099 radeon_ring_write(rdev, 0xffffffff); 1100 radeon_ring_write(rdev, 0xffffffff); 1101 1102 radeon_ring_write(rdev, 0xc0026900); 1103 radeon_ring_write(rdev, 0x00000316); 1104 radeon_ring_write(rdev, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ 1105 radeon_ring_write(rdev, 0x00000010); /* */ 1106 1107 radeon_ring_unlock_commit(rdev); 1108 1109 /* XXX init other rings */ 1110 1111 return 0; 1112 } 1113 1114 static void cayman_cp_fini(struct radeon_device *rdev) 1115 { 1116 cayman_cp_enable(rdev, false); 1117 radeon_ring_fini(rdev); 1118 } 1119 1120 int cayman_cp_resume(struct radeon_device *rdev) 1121 { 1122 u32 tmp; 1123 u32 rb_bufsz; 1124 int r; 1125 1126 /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */ 1127 WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP | 1128 SOFT_RESET_PA | 1129 SOFT_RESET_SH | 1130 SOFT_RESET_VGT | 1131 SOFT_RESET_SPI | 1132 SOFT_RESET_SX)); 1133 RREG32(GRBM_SOFT_RESET); 1134 mdelay(15); 1135 WREG32(GRBM_SOFT_RESET, 0); 1136 RREG32(GRBM_SOFT_RESET); 1137 1138 WREG32(CP_SEM_WAIT_TIMER, 0x4); 1139 1140 /* Set the write pointer delay */ 1141 WREG32(CP_RB_WPTR_DELAY, 0); 1142 1143 WREG32(CP_DEBUG, (1 << 27)); 1144 1145 /* ring 0 - compute and gfx */ 1146 /* Set ring buffer size */ 1147 rb_bufsz = drm_order(rdev->cp.ring_size / 8); 1148 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; 1149 #ifdef __BIG_ENDIAN 1150 tmp |= BUF_SWAP_32BIT; 1151 #endif 1152 WREG32(CP_RB0_CNTL, tmp); 1153 1154 /* Initialize the ring buffer's read and write pointers */ 1155 WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA); 1156 rdev->cp.wptr = 0; 1157 WREG32(CP_RB0_WPTR, rdev->cp.wptr); 1158 1159 /* set the wb address wether it's enabled or not */ 1160 WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC); 1161 WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); 1162 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); 1163 1164 if (rdev->wb.enabled) 1165 WREG32(SCRATCH_UMSK, 0xff); 1166 else { 1167 tmp |= RB_NO_UPDATE; 1168 WREG32(SCRATCH_UMSK, 0); 1169 } 1170 1171 mdelay(1); 1172 WREG32(CP_RB0_CNTL, tmp); 1173 1174 WREG32(CP_RB0_BASE, rdev->cp.gpu_addr >> 8); 1175 1176 rdev->cp.rptr = RREG32(CP_RB0_RPTR); 1177 1178 /* ring1 - compute only */ 1179 /* Set ring buffer size */ 1180 rb_bufsz = drm_order(rdev->cp1.ring_size / 8); 1181 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; 1182 #ifdef __BIG_ENDIAN 1183 tmp |= BUF_SWAP_32BIT; 1184 #endif 1185 WREG32(CP_RB1_CNTL, tmp); 1186 1187 /* Initialize the ring buffer's read and write pointers */ 1188 WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA); 1189 rdev->cp1.wptr = 0; 1190 WREG32(CP_RB1_WPTR, rdev->cp1.wptr); 1191 1192 /* set the wb address wether it's enabled or not */ 1193 WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC); 1194 WREG32(CP_RB1_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFF); 1195 1196 mdelay(1); 1197 WREG32(CP_RB1_CNTL, tmp); 1198 1199 WREG32(CP_RB1_BASE, rdev->cp1.gpu_addr >> 8); 1200 1201 rdev->cp1.rptr = RREG32(CP_RB1_RPTR); 1202 1203 /* ring2 - compute only */ 1204 /* Set ring buffer size */ 1205 rb_bufsz = drm_order(rdev->cp2.ring_size / 8); 1206 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; 1207 #ifdef __BIG_ENDIAN 1208 tmp |= BUF_SWAP_32BIT; 1209 #endif 1210 WREG32(CP_RB2_CNTL, tmp); 1211 1212 /* Initialize the ring buffer's read and write pointers */ 1213 WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA); 1214 rdev->cp2.wptr = 0; 1215 WREG32(CP_RB2_WPTR, rdev->cp2.wptr); 1216 1217 /* set the wb address wether it's enabled or not */ 1218 WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC); 1219 WREG32(CP_RB2_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFF); 1220 1221 mdelay(1); 1222 WREG32(CP_RB2_CNTL, tmp); 1223 1224 WREG32(CP_RB2_BASE, rdev->cp2.gpu_addr >> 8); 1225 1226 rdev->cp2.rptr = RREG32(CP_RB2_RPTR); 1227 1228 /* start the rings */ 1229 cayman_cp_start(rdev); 1230 rdev->cp.ready = true; 1231 rdev->cp1.ready = true; 1232 rdev->cp2.ready = true; 1233 /* this only test cp0 */ 1234 r = radeon_ring_test(rdev); 1235 if (r) { 1236 rdev->cp.ready = false; 1237 rdev->cp1.ready = false; 1238 rdev->cp2.ready = false; 1239 return r; 1240 } 1241 1242 return 0; 1243 } 1244 1245 bool cayman_gpu_is_lockup(struct radeon_device *rdev) 1246 { 1247 u32 srbm_status; 1248 u32 grbm_status; 1249 u32 grbm_status_se0, grbm_status_se1; 1250 struct r100_gpu_lockup *lockup = &rdev->config.cayman.lockup; 1251 int r; 1252 1253 srbm_status = RREG32(SRBM_STATUS); 1254 grbm_status = RREG32(GRBM_STATUS); 1255 grbm_status_se0 = RREG32(GRBM_STATUS_SE0); 1256 grbm_status_se1 = RREG32(GRBM_STATUS_SE1); 1257 if (!(grbm_status & GUI_ACTIVE)) { 1258 r100_gpu_lockup_update(lockup, &rdev->cp); 1259 return false; 1260 } 1261 /* force CP activities */ 1262 r = radeon_ring_lock(rdev, 2); 1263 if (!r) { 1264 /* PACKET2 NOP */ 1265 radeon_ring_write(rdev, 0x80000000); 1266 radeon_ring_write(rdev, 0x80000000); 1267 radeon_ring_unlock_commit(rdev); 1268 } 1269 /* XXX deal with CP0,1,2 */ 1270 rdev->cp.rptr = RREG32(CP_RB0_RPTR); 1271 return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp); 1272 } 1273 1274 static int cayman_gpu_soft_reset(struct radeon_device *rdev) 1275 { 1276 struct evergreen_mc_save save; 1277 u32 grbm_reset = 0; 1278 1279 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) 1280 return 0; 1281 1282 dev_info(rdev->dev, "GPU softreset \n"); 1283 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", 1284 RREG32(GRBM_STATUS)); 1285 dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n", 1286 RREG32(GRBM_STATUS_SE0)); 1287 dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n", 1288 RREG32(GRBM_STATUS_SE1)); 1289 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", 1290 RREG32(SRBM_STATUS)); 1291 evergreen_mc_stop(rdev, &save); 1292 if (evergreen_mc_wait_for_idle(rdev)) { 1293 dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); 1294 } 1295 /* Disable CP parsing/prefetching */ 1296 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT); 1297 1298 /* reset all the gfx blocks */ 1299 grbm_reset = (SOFT_RESET_CP | 1300 SOFT_RESET_CB | 1301 SOFT_RESET_DB | 1302 SOFT_RESET_GDS | 1303 SOFT_RESET_PA | 1304 SOFT_RESET_SC | 1305 SOFT_RESET_SPI | 1306 SOFT_RESET_SH | 1307 SOFT_RESET_SX | 1308 SOFT_RESET_TC | 1309 SOFT_RESET_TA | 1310 SOFT_RESET_VGT | 1311 SOFT_RESET_IA); 1312 1313 dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset); 1314 WREG32(GRBM_SOFT_RESET, grbm_reset); 1315 (void)RREG32(GRBM_SOFT_RESET); 1316 udelay(50); 1317 WREG32(GRBM_SOFT_RESET, 0); 1318 (void)RREG32(GRBM_SOFT_RESET); 1319 /* Wait a little for things to settle down */ 1320 udelay(50); 1321 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", 1322 RREG32(GRBM_STATUS)); 1323 dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n", 1324 RREG32(GRBM_STATUS_SE0)); 1325 dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n", 1326 RREG32(GRBM_STATUS_SE1)); 1327 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", 1328 RREG32(SRBM_STATUS)); 1329 evergreen_mc_resume(rdev, &save); 1330 return 0; 1331 } 1332 1333 int cayman_asic_reset(struct radeon_device *rdev) 1334 { 1335 return cayman_gpu_soft_reset(rdev); 1336 } 1337 1338 static int cayman_startup(struct radeon_device *rdev) 1339 { 1340 int r; 1341 1342 /* enable pcie gen2 link */ 1343 evergreen_pcie_gen2_enable(rdev); 1344 1345 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) { 1346 r = ni_init_microcode(rdev); 1347 if (r) { 1348 DRM_ERROR("Failed to load firmware!\n"); 1349 return r; 1350 } 1351 } 1352 r = ni_mc_load_microcode(rdev); 1353 if (r) { 1354 DRM_ERROR("Failed to load MC firmware!\n"); 1355 return r; 1356 } 1357 1358 r = r600_vram_scratch_init(rdev); 1359 if (r) 1360 return r; 1361 1362 evergreen_mc_program(rdev); 1363 r = cayman_pcie_gart_enable(rdev); 1364 if (r) 1365 return r; 1366 cayman_gpu_init(rdev); 1367 1368 r = evergreen_blit_init(rdev); 1369 if (r) { 1370 r600_blit_fini(rdev); 1371 rdev->asic->copy = NULL; 1372 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); 1373 } 1374 1375 /* allocate wb buffer */ 1376 r = radeon_wb_init(rdev); 1377 if (r) 1378 return r; 1379 1380 /* Enable IRQ */ 1381 r = r600_irq_init(rdev); 1382 if (r) { 1383 DRM_ERROR("radeon: IH init failed (%d).\n", r); 1384 radeon_irq_kms_fini(rdev); 1385 return r; 1386 } 1387 evergreen_irq_set(rdev); 1388 1389 r = radeon_ring_init(rdev, rdev->cp.ring_size); 1390 if (r) 1391 return r; 1392 r = cayman_cp_load_microcode(rdev); 1393 if (r) 1394 return r; 1395 r = cayman_cp_resume(rdev); 1396 if (r) 1397 return r; 1398 1399 return 0; 1400 } 1401 1402 int cayman_resume(struct radeon_device *rdev) 1403 { 1404 int r; 1405 1406 /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw, 1407 * posting will perform necessary task to bring back GPU into good 1408 * shape. 1409 */ 1410 /* post card */ 1411 atom_asic_init(rdev->mode_info.atom_context); 1412 1413 r = cayman_startup(rdev); 1414 if (r) { 1415 DRM_ERROR("cayman startup failed on resume\n"); 1416 return r; 1417 } 1418 1419 r = r600_ib_test(rdev); 1420 if (r) { 1421 DRM_ERROR("radeon: failled testing IB (%d).\n", r); 1422 return r; 1423 } 1424 1425 return r; 1426 1427 } 1428 1429 int cayman_suspend(struct radeon_device *rdev) 1430 { 1431 /* FIXME: we should wait for ring to be empty */ 1432 cayman_cp_enable(rdev, false); 1433 rdev->cp.ready = false; 1434 evergreen_irq_suspend(rdev); 1435 radeon_wb_disable(rdev); 1436 cayman_pcie_gart_disable(rdev); 1437 r600_blit_suspend(rdev); 1438 1439 return 0; 1440 } 1441 1442 /* Plan is to move initialization in that function and use 1443 * helper function so that radeon_device_init pretty much 1444 * do nothing more than calling asic specific function. This 1445 * should also allow to remove a bunch of callback function 1446 * like vram_info. 1447 */ 1448 int cayman_init(struct radeon_device *rdev) 1449 { 1450 int r; 1451 1452 /* This don't do much */ 1453 r = radeon_gem_init(rdev); 1454 if (r) 1455 return r; 1456 /* Read BIOS */ 1457 if (!radeon_get_bios(rdev)) { 1458 if (ASIC_IS_AVIVO(rdev)) 1459 return -EINVAL; 1460 } 1461 /* Must be an ATOMBIOS */ 1462 if (!rdev->is_atom_bios) { 1463 dev_err(rdev->dev, "Expecting atombios for cayman GPU\n"); 1464 return -EINVAL; 1465 } 1466 r = radeon_atombios_init(rdev); 1467 if (r) 1468 return r; 1469 1470 /* Post card if necessary */ 1471 if (!radeon_card_posted(rdev)) { 1472 if (!rdev->bios) { 1473 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); 1474 return -EINVAL; 1475 } 1476 DRM_INFO("GPU not posted. posting now...\n"); 1477 atom_asic_init(rdev->mode_info.atom_context); 1478 } 1479 /* Initialize scratch registers */ 1480 r600_scratch_init(rdev); 1481 /* Initialize surface registers */ 1482 radeon_surface_init(rdev); 1483 /* Initialize clocks */ 1484 radeon_get_clock_info(rdev->ddev); 1485 /* Fence driver */ 1486 r = radeon_fence_driver_init(rdev); 1487 if (r) 1488 return r; 1489 /* initialize memory controller */ 1490 r = evergreen_mc_init(rdev); 1491 if (r) 1492 return r; 1493 /* Memory manager */ 1494 r = radeon_bo_init(rdev); 1495 if (r) 1496 return r; 1497 1498 r = radeon_irq_kms_init(rdev); 1499 if (r) 1500 return r; 1501 1502 rdev->cp.ring_obj = NULL; 1503 r600_ring_init(rdev, 1024 * 1024); 1504 1505 rdev->ih.ring_obj = NULL; 1506 r600_ih_ring_init(rdev, 64 * 1024); 1507 1508 r = r600_pcie_gart_init(rdev); 1509 if (r) 1510 return r; 1511 1512 rdev->accel_working = true; 1513 r = cayman_startup(rdev); 1514 if (r) { 1515 dev_err(rdev->dev, "disabling GPU acceleration\n"); 1516 cayman_cp_fini(rdev); 1517 r600_irq_fini(rdev); 1518 radeon_wb_fini(rdev); 1519 radeon_irq_kms_fini(rdev); 1520 cayman_pcie_gart_fini(rdev); 1521 rdev->accel_working = false; 1522 } 1523 if (rdev->accel_working) { 1524 r = radeon_ib_pool_init(rdev); 1525 if (r) { 1526 DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r); 1527 rdev->accel_working = false; 1528 } 1529 r = r600_ib_test(rdev); 1530 if (r) { 1531 DRM_ERROR("radeon: failed testing IB (%d).\n", r); 1532 rdev->accel_working = false; 1533 } 1534 } 1535 1536 /* Don't start up if the MC ucode is missing. 1537 * The default clocks and voltages before the MC ucode 1538 * is loaded are not suffient for advanced operations. 1539 */ 1540 if (!rdev->mc_fw) { 1541 DRM_ERROR("radeon: MC ucode required for NI+.\n"); 1542 return -EINVAL; 1543 } 1544 1545 return 0; 1546 } 1547 1548 void cayman_fini(struct radeon_device *rdev) 1549 { 1550 r600_blit_fini(rdev); 1551 cayman_cp_fini(rdev); 1552 r600_irq_fini(rdev); 1553 radeon_wb_fini(rdev); 1554 radeon_ib_pool_fini(rdev); 1555 radeon_irq_kms_fini(rdev); 1556 cayman_pcie_gart_fini(rdev); 1557 r600_vram_scratch_fini(rdev); 1558 radeon_gem_fini(rdev); 1559 radeon_fence_driver_fini(rdev); 1560 radeon_bo_fini(rdev); 1561 radeon_atombios_fini(rdev); 1562 kfree(rdev->bios); 1563 rdev->bios = NULL; 1564 } 1565 1566