1 /* 2 * Copyright 2010 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Alex Deucher 23 */ 24 #include <linux/firmware.h> 25 #include <linux/platform_device.h> 26 #include <linux/slab.h> 27 #include "drmP.h" 28 #include "radeon.h" 29 #include "radeon_asic.h" 30 #include "radeon_drm.h" 31 #include "nid.h" 32 #include "atom.h" 33 #include "ni_reg.h" 34 #include "cayman_blit_shaders.h" 35 36 extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save); 37 extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save); 38 extern int evergreen_mc_wait_for_idle(struct radeon_device *rdev); 39 extern void evergreen_mc_program(struct radeon_device *rdev); 40 extern void evergreen_irq_suspend(struct radeon_device *rdev); 41 extern int evergreen_mc_init(struct radeon_device *rdev); 42 43 #define EVERGREEN_PFP_UCODE_SIZE 1120 44 #define EVERGREEN_PM4_UCODE_SIZE 1376 45 #define EVERGREEN_RLC_UCODE_SIZE 768 46 #define BTC_MC_UCODE_SIZE 6024 47 48 #define CAYMAN_PFP_UCODE_SIZE 2176 49 #define CAYMAN_PM4_UCODE_SIZE 2176 50 #define CAYMAN_RLC_UCODE_SIZE 1024 51 #define CAYMAN_MC_UCODE_SIZE 6037 52 53 /* Firmware Names */ 54 MODULE_FIRMWARE("radeon/BARTS_pfp.bin"); 55 MODULE_FIRMWARE("radeon/BARTS_me.bin"); 56 MODULE_FIRMWARE("radeon/BARTS_mc.bin"); 57 MODULE_FIRMWARE("radeon/BTC_rlc.bin"); 58 MODULE_FIRMWARE("radeon/TURKS_pfp.bin"); 59 MODULE_FIRMWARE("radeon/TURKS_me.bin"); 60 MODULE_FIRMWARE("radeon/TURKS_mc.bin"); 61 MODULE_FIRMWARE("radeon/CAICOS_pfp.bin"); 62 MODULE_FIRMWARE("radeon/CAICOS_me.bin"); 63 MODULE_FIRMWARE("radeon/CAICOS_mc.bin"); 64 MODULE_FIRMWARE("radeon/CAYMAN_pfp.bin"); 65 MODULE_FIRMWARE("radeon/CAYMAN_me.bin"); 66 MODULE_FIRMWARE("radeon/CAYMAN_mc.bin"); 67 MODULE_FIRMWARE("radeon/CAYMAN_rlc.bin"); 68 69 #define BTC_IO_MC_REGS_SIZE 29 70 71 static const u32 barts_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = { 72 {0x00000077, 0xff010100}, 73 {0x00000078, 0x00000000}, 74 {0x00000079, 0x00001434}, 75 {0x0000007a, 0xcc08ec08}, 76 {0x0000007b, 0x00040000}, 77 {0x0000007c, 0x000080c0}, 78 {0x0000007d, 0x09000000}, 79 {0x0000007e, 0x00210404}, 80 {0x00000081, 0x08a8e800}, 81 {0x00000082, 0x00030444}, 82 {0x00000083, 0x00000000}, 83 {0x00000085, 0x00000001}, 84 {0x00000086, 0x00000002}, 85 {0x00000087, 0x48490000}, 86 {0x00000088, 0x20244647}, 87 {0x00000089, 0x00000005}, 88 {0x0000008b, 0x66030000}, 89 {0x0000008c, 0x00006603}, 90 {0x0000008d, 0x00000100}, 91 {0x0000008f, 0x00001c0a}, 92 {0x00000090, 0xff000001}, 93 {0x00000094, 0x00101101}, 94 {0x00000095, 0x00000fff}, 95 {0x00000096, 0x00116fff}, 96 {0x00000097, 0x60010000}, 97 {0x00000098, 0x10010000}, 98 {0x00000099, 0x00006000}, 99 {0x0000009a, 0x00001000}, 100 {0x0000009f, 0x00946a00} 101 }; 102 103 static const u32 turks_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = { 104 {0x00000077, 0xff010100}, 105 {0x00000078, 0x00000000}, 106 {0x00000079, 0x00001434}, 107 {0x0000007a, 0xcc08ec08}, 108 {0x0000007b, 0x00040000}, 109 {0x0000007c, 0x000080c0}, 110 {0x0000007d, 0x09000000}, 111 {0x0000007e, 0x00210404}, 112 {0x00000081, 0x08a8e800}, 113 {0x00000082, 0x00030444}, 114 {0x00000083, 0x00000000}, 115 {0x00000085, 0x00000001}, 116 {0x00000086, 0x00000002}, 117 {0x00000087, 0x48490000}, 118 {0x00000088, 0x20244647}, 119 {0x00000089, 0x00000005}, 120 {0x0000008b, 0x66030000}, 121 {0x0000008c, 0x00006603}, 122 {0x0000008d, 0x00000100}, 123 {0x0000008f, 0x00001c0a}, 124 {0x00000090, 0xff000001}, 125 {0x00000094, 0x00101101}, 126 {0x00000095, 0x00000fff}, 127 {0x00000096, 0x00116fff}, 128 {0x00000097, 0x60010000}, 129 {0x00000098, 0x10010000}, 130 {0x00000099, 0x00006000}, 131 {0x0000009a, 0x00001000}, 132 {0x0000009f, 0x00936a00} 133 }; 134 135 static const u32 caicos_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = { 136 {0x00000077, 0xff010100}, 137 {0x00000078, 0x00000000}, 138 {0x00000079, 0x00001434}, 139 {0x0000007a, 0xcc08ec08}, 140 {0x0000007b, 0x00040000}, 141 {0x0000007c, 0x000080c0}, 142 {0x0000007d, 0x09000000}, 143 {0x0000007e, 0x00210404}, 144 {0x00000081, 0x08a8e800}, 145 {0x00000082, 0x00030444}, 146 {0x00000083, 0x00000000}, 147 {0x00000085, 0x00000001}, 148 {0x00000086, 0x00000002}, 149 {0x00000087, 0x48490000}, 150 {0x00000088, 0x20244647}, 151 {0x00000089, 0x00000005}, 152 {0x0000008b, 0x66030000}, 153 {0x0000008c, 0x00006603}, 154 {0x0000008d, 0x00000100}, 155 {0x0000008f, 0x00001c0a}, 156 {0x00000090, 0xff000001}, 157 {0x00000094, 0x00101101}, 158 {0x00000095, 0x00000fff}, 159 {0x00000096, 0x00116fff}, 160 {0x00000097, 0x60010000}, 161 {0x00000098, 0x10010000}, 162 {0x00000099, 0x00006000}, 163 {0x0000009a, 0x00001000}, 164 {0x0000009f, 0x00916a00} 165 }; 166 167 static const u32 cayman_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = { 168 {0x00000077, 0xff010100}, 169 {0x00000078, 0x00000000}, 170 {0x00000079, 0x00001434}, 171 {0x0000007a, 0xcc08ec08}, 172 {0x0000007b, 0x00040000}, 173 {0x0000007c, 0x000080c0}, 174 {0x0000007d, 0x09000000}, 175 {0x0000007e, 0x00210404}, 176 {0x00000081, 0x08a8e800}, 177 {0x00000082, 0x00030444}, 178 {0x00000083, 0x00000000}, 179 {0x00000085, 0x00000001}, 180 {0x00000086, 0x00000002}, 181 {0x00000087, 0x48490000}, 182 {0x00000088, 0x20244647}, 183 {0x00000089, 0x00000005}, 184 {0x0000008b, 0x66030000}, 185 {0x0000008c, 0x00006603}, 186 {0x0000008d, 0x00000100}, 187 {0x0000008f, 0x00001c0a}, 188 {0x00000090, 0xff000001}, 189 {0x00000094, 0x00101101}, 190 {0x00000095, 0x00000fff}, 191 {0x00000096, 0x00116fff}, 192 {0x00000097, 0x60010000}, 193 {0x00000098, 0x10010000}, 194 {0x00000099, 0x00006000}, 195 {0x0000009a, 0x00001000}, 196 {0x0000009f, 0x00976b00} 197 }; 198 199 int ni_mc_load_microcode(struct radeon_device *rdev) 200 { 201 const __be32 *fw_data; 202 u32 mem_type, running, blackout = 0; 203 u32 *io_mc_regs; 204 int i, ucode_size, regs_size; 205 206 if (!rdev->mc_fw) 207 return -EINVAL; 208 209 switch (rdev->family) { 210 case CHIP_BARTS: 211 io_mc_regs = (u32 *)&barts_io_mc_regs; 212 ucode_size = BTC_MC_UCODE_SIZE; 213 regs_size = BTC_IO_MC_REGS_SIZE; 214 break; 215 case CHIP_TURKS: 216 io_mc_regs = (u32 *)&turks_io_mc_regs; 217 ucode_size = BTC_MC_UCODE_SIZE; 218 regs_size = BTC_IO_MC_REGS_SIZE; 219 break; 220 case CHIP_CAICOS: 221 default: 222 io_mc_regs = (u32 *)&caicos_io_mc_regs; 223 ucode_size = BTC_MC_UCODE_SIZE; 224 regs_size = BTC_IO_MC_REGS_SIZE; 225 break; 226 case CHIP_CAYMAN: 227 io_mc_regs = (u32 *)&cayman_io_mc_regs; 228 ucode_size = CAYMAN_MC_UCODE_SIZE; 229 regs_size = BTC_IO_MC_REGS_SIZE; 230 break; 231 } 232 233 mem_type = (RREG32(MC_SEQ_MISC0) & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT; 234 running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK; 235 236 if ((mem_type == MC_SEQ_MISC0_GDDR5_VALUE) && (running == 0)) { 237 if (running) { 238 blackout = RREG32(MC_SHARED_BLACKOUT_CNTL); 239 WREG32(MC_SHARED_BLACKOUT_CNTL, 1); 240 } 241 242 /* reset the engine and set to writable */ 243 WREG32(MC_SEQ_SUP_CNTL, 0x00000008); 244 WREG32(MC_SEQ_SUP_CNTL, 0x00000010); 245 246 /* load mc io regs */ 247 for (i = 0; i < regs_size; i++) { 248 WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]); 249 WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]); 250 } 251 /* load the MC ucode */ 252 fw_data = (const __be32 *)rdev->mc_fw->data; 253 for (i = 0; i < ucode_size; i++) 254 WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++)); 255 256 /* put the engine back into the active state */ 257 WREG32(MC_SEQ_SUP_CNTL, 0x00000008); 258 WREG32(MC_SEQ_SUP_CNTL, 0x00000004); 259 WREG32(MC_SEQ_SUP_CNTL, 0x00000001); 260 261 /* wait for training to complete */ 262 while (!(RREG32(MC_IO_PAD_CNTL_D0) & MEM_FALL_OUT_CMD)) 263 udelay(10); 264 265 if (running) 266 WREG32(MC_SHARED_BLACKOUT_CNTL, blackout); 267 } 268 269 return 0; 270 } 271 272 int ni_init_microcode(struct radeon_device *rdev) 273 { 274 struct platform_device *pdev; 275 const char *chip_name; 276 const char *rlc_chip_name; 277 size_t pfp_req_size, me_req_size, rlc_req_size, mc_req_size; 278 char fw_name[30]; 279 int err; 280 281 DRM_DEBUG("\n"); 282 283 pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0); 284 err = IS_ERR(pdev); 285 if (err) { 286 printk(KERN_ERR "radeon_cp: Failed to register firmware\n"); 287 return -EINVAL; 288 } 289 290 switch (rdev->family) { 291 case CHIP_BARTS: 292 chip_name = "BARTS"; 293 rlc_chip_name = "BTC"; 294 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4; 295 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4; 296 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4; 297 mc_req_size = BTC_MC_UCODE_SIZE * 4; 298 break; 299 case CHIP_TURKS: 300 chip_name = "TURKS"; 301 rlc_chip_name = "BTC"; 302 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4; 303 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4; 304 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4; 305 mc_req_size = BTC_MC_UCODE_SIZE * 4; 306 break; 307 case CHIP_CAICOS: 308 chip_name = "CAICOS"; 309 rlc_chip_name = "BTC"; 310 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4; 311 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4; 312 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4; 313 mc_req_size = BTC_MC_UCODE_SIZE * 4; 314 break; 315 case CHIP_CAYMAN: 316 chip_name = "CAYMAN"; 317 rlc_chip_name = "CAYMAN"; 318 pfp_req_size = CAYMAN_PFP_UCODE_SIZE * 4; 319 me_req_size = CAYMAN_PM4_UCODE_SIZE * 4; 320 rlc_req_size = CAYMAN_RLC_UCODE_SIZE * 4; 321 mc_req_size = CAYMAN_MC_UCODE_SIZE * 4; 322 break; 323 default: BUG(); 324 } 325 326 DRM_INFO("Loading %s Microcode\n", chip_name); 327 328 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name); 329 err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev); 330 if (err) 331 goto out; 332 if (rdev->pfp_fw->size != pfp_req_size) { 333 printk(KERN_ERR 334 "ni_cp: Bogus length %zu in firmware \"%s\"\n", 335 rdev->pfp_fw->size, fw_name); 336 err = -EINVAL; 337 goto out; 338 } 339 340 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name); 341 err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev); 342 if (err) 343 goto out; 344 if (rdev->me_fw->size != me_req_size) { 345 printk(KERN_ERR 346 "ni_cp: Bogus length %zu in firmware \"%s\"\n", 347 rdev->me_fw->size, fw_name); 348 err = -EINVAL; 349 } 350 351 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name); 352 err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev); 353 if (err) 354 goto out; 355 if (rdev->rlc_fw->size != rlc_req_size) { 356 printk(KERN_ERR 357 "ni_rlc: Bogus length %zu in firmware \"%s\"\n", 358 rdev->rlc_fw->size, fw_name); 359 err = -EINVAL; 360 } 361 362 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); 363 err = request_firmware(&rdev->mc_fw, fw_name, &pdev->dev); 364 if (err) 365 goto out; 366 if (rdev->mc_fw->size != mc_req_size) { 367 printk(KERN_ERR 368 "ni_mc: Bogus length %zu in firmware \"%s\"\n", 369 rdev->mc_fw->size, fw_name); 370 err = -EINVAL; 371 } 372 out: 373 platform_device_unregister(pdev); 374 375 if (err) { 376 if (err != -EINVAL) 377 printk(KERN_ERR 378 "ni_cp: Failed to load firmware \"%s\"\n", 379 fw_name); 380 release_firmware(rdev->pfp_fw); 381 rdev->pfp_fw = NULL; 382 release_firmware(rdev->me_fw); 383 rdev->me_fw = NULL; 384 release_firmware(rdev->rlc_fw); 385 rdev->rlc_fw = NULL; 386 release_firmware(rdev->mc_fw); 387 rdev->mc_fw = NULL; 388 } 389 return err; 390 } 391 392 /* 393 * Core functions 394 */ 395 static u32 cayman_get_tile_pipe_to_backend_map(struct radeon_device *rdev, 396 u32 num_tile_pipes, 397 u32 num_backends_per_asic, 398 u32 *backend_disable_mask_per_asic, 399 u32 num_shader_engines) 400 { 401 u32 backend_map = 0; 402 u32 enabled_backends_mask = 0; 403 u32 enabled_backends_count = 0; 404 u32 num_backends_per_se; 405 u32 cur_pipe; 406 u32 swizzle_pipe[CAYMAN_MAX_PIPES]; 407 u32 cur_backend = 0; 408 u32 i; 409 bool force_no_swizzle; 410 411 /* force legal values */ 412 if (num_tile_pipes < 1) 413 num_tile_pipes = 1; 414 if (num_tile_pipes > rdev->config.cayman.max_tile_pipes) 415 num_tile_pipes = rdev->config.cayman.max_tile_pipes; 416 if (num_shader_engines < 1) 417 num_shader_engines = 1; 418 if (num_shader_engines > rdev->config.cayman.max_shader_engines) 419 num_shader_engines = rdev->config.cayman.max_shader_engines; 420 if (num_backends_per_asic > num_shader_engines) 421 num_backends_per_asic = num_shader_engines; 422 if (num_backends_per_asic > (rdev->config.cayman.max_backends_per_se * num_shader_engines)) 423 num_backends_per_asic = rdev->config.cayman.max_backends_per_se * num_shader_engines; 424 425 /* make sure we have the same number of backends per se */ 426 num_backends_per_asic = ALIGN(num_backends_per_asic, num_shader_engines); 427 /* set up the number of backends per se */ 428 num_backends_per_se = num_backends_per_asic / num_shader_engines; 429 if (num_backends_per_se > rdev->config.cayman.max_backends_per_se) { 430 num_backends_per_se = rdev->config.cayman.max_backends_per_se; 431 num_backends_per_asic = num_backends_per_se * num_shader_engines; 432 } 433 434 /* create enable mask and count for enabled backends */ 435 for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) { 436 if (((*backend_disable_mask_per_asic >> i) & 1) == 0) { 437 enabled_backends_mask |= (1 << i); 438 ++enabled_backends_count; 439 } 440 if (enabled_backends_count == num_backends_per_asic) 441 break; 442 } 443 444 /* force the backends mask to match the current number of backends */ 445 if (enabled_backends_count != num_backends_per_asic) { 446 u32 this_backend_enabled; 447 u32 shader_engine; 448 u32 backend_per_se; 449 450 enabled_backends_mask = 0; 451 enabled_backends_count = 0; 452 *backend_disable_mask_per_asic = CAYMAN_MAX_BACKENDS_MASK; 453 for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) { 454 /* calc the current se */ 455 shader_engine = i / rdev->config.cayman.max_backends_per_se; 456 /* calc the backend per se */ 457 backend_per_se = i % rdev->config.cayman.max_backends_per_se; 458 /* default to not enabled */ 459 this_backend_enabled = 0; 460 if ((shader_engine < num_shader_engines) && 461 (backend_per_se < num_backends_per_se)) 462 this_backend_enabled = 1; 463 if (this_backend_enabled) { 464 enabled_backends_mask |= (1 << i); 465 *backend_disable_mask_per_asic &= ~(1 << i); 466 ++enabled_backends_count; 467 } 468 } 469 } 470 471 472 memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * CAYMAN_MAX_PIPES); 473 switch (rdev->family) { 474 case CHIP_CAYMAN: 475 force_no_swizzle = true; 476 break; 477 default: 478 force_no_swizzle = false; 479 break; 480 } 481 if (force_no_swizzle) { 482 bool last_backend_enabled = false; 483 484 force_no_swizzle = false; 485 for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) { 486 if (((enabled_backends_mask >> i) & 1) == 1) { 487 if (last_backend_enabled) 488 force_no_swizzle = true; 489 last_backend_enabled = true; 490 } else 491 last_backend_enabled = false; 492 } 493 } 494 495 switch (num_tile_pipes) { 496 case 1: 497 case 3: 498 case 5: 499 case 7: 500 DRM_ERROR("odd number of pipes!\n"); 501 break; 502 case 2: 503 swizzle_pipe[0] = 0; 504 swizzle_pipe[1] = 1; 505 break; 506 case 4: 507 if (force_no_swizzle) { 508 swizzle_pipe[0] = 0; 509 swizzle_pipe[1] = 1; 510 swizzle_pipe[2] = 2; 511 swizzle_pipe[3] = 3; 512 } else { 513 swizzle_pipe[0] = 0; 514 swizzle_pipe[1] = 2; 515 swizzle_pipe[2] = 1; 516 swizzle_pipe[3] = 3; 517 } 518 break; 519 case 6: 520 if (force_no_swizzle) { 521 swizzle_pipe[0] = 0; 522 swizzle_pipe[1] = 1; 523 swizzle_pipe[2] = 2; 524 swizzle_pipe[3] = 3; 525 swizzle_pipe[4] = 4; 526 swizzle_pipe[5] = 5; 527 } else { 528 swizzle_pipe[0] = 0; 529 swizzle_pipe[1] = 2; 530 swizzle_pipe[2] = 4; 531 swizzle_pipe[3] = 1; 532 swizzle_pipe[4] = 3; 533 swizzle_pipe[5] = 5; 534 } 535 break; 536 case 8: 537 if (force_no_swizzle) { 538 swizzle_pipe[0] = 0; 539 swizzle_pipe[1] = 1; 540 swizzle_pipe[2] = 2; 541 swizzle_pipe[3] = 3; 542 swizzle_pipe[4] = 4; 543 swizzle_pipe[5] = 5; 544 swizzle_pipe[6] = 6; 545 swizzle_pipe[7] = 7; 546 } else { 547 swizzle_pipe[0] = 0; 548 swizzle_pipe[1] = 2; 549 swizzle_pipe[2] = 4; 550 swizzle_pipe[3] = 6; 551 swizzle_pipe[4] = 1; 552 swizzle_pipe[5] = 3; 553 swizzle_pipe[6] = 5; 554 swizzle_pipe[7] = 7; 555 } 556 break; 557 } 558 559 for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) { 560 while (((1 << cur_backend) & enabled_backends_mask) == 0) 561 cur_backend = (cur_backend + 1) % CAYMAN_MAX_BACKENDS; 562 563 backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4))); 564 565 cur_backend = (cur_backend + 1) % CAYMAN_MAX_BACKENDS; 566 } 567 568 return backend_map; 569 } 570 571 static void cayman_program_channel_remap(struct radeon_device *rdev) 572 { 573 u32 tcp_chan_steer_lo, tcp_chan_steer_hi, mc_shared_chremap, tmp; 574 575 tmp = RREG32(MC_SHARED_CHMAP); 576 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { 577 case 0: 578 case 1: 579 case 2: 580 case 3: 581 default: 582 /* default mapping */ 583 mc_shared_chremap = 0x00fac688; 584 break; 585 } 586 587 switch (rdev->family) { 588 case CHIP_CAYMAN: 589 default: 590 //tcp_chan_steer_lo = 0x54763210 591 tcp_chan_steer_lo = 0x76543210; 592 tcp_chan_steer_hi = 0x0000ba98; 593 break; 594 } 595 596 WREG32(TCP_CHAN_STEER_LO, tcp_chan_steer_lo); 597 WREG32(TCP_CHAN_STEER_HI, tcp_chan_steer_hi); 598 WREG32(MC_SHARED_CHREMAP, mc_shared_chremap); 599 } 600 601 static u32 cayman_get_disable_mask_per_asic(struct radeon_device *rdev, 602 u32 disable_mask_per_se, 603 u32 max_disable_mask_per_se, 604 u32 num_shader_engines) 605 { 606 u32 disable_field_width_per_se = r600_count_pipe_bits(disable_mask_per_se); 607 u32 disable_mask_per_asic = disable_mask_per_se & max_disable_mask_per_se; 608 609 if (num_shader_engines == 1) 610 return disable_mask_per_asic; 611 else if (num_shader_engines == 2) 612 return disable_mask_per_asic | (disable_mask_per_asic << disable_field_width_per_se); 613 else 614 return 0xffffffff; 615 } 616 617 static void cayman_gpu_init(struct radeon_device *rdev) 618 { 619 u32 cc_rb_backend_disable = 0; 620 u32 cc_gc_shader_pipe_config; 621 u32 gb_addr_config = 0; 622 u32 mc_shared_chmap, mc_arb_ramcfg; 623 u32 gb_backend_map; 624 u32 cgts_tcc_disable; 625 u32 sx_debug_1; 626 u32 smx_dc_ctl0; 627 u32 gc_user_shader_pipe_config; 628 u32 gc_user_rb_backend_disable; 629 u32 cgts_user_tcc_disable; 630 u32 cgts_sm_ctrl_reg; 631 u32 hdp_host_path_cntl; 632 u32 tmp; 633 int i, j; 634 635 switch (rdev->family) { 636 case CHIP_CAYMAN: 637 default: 638 rdev->config.cayman.max_shader_engines = 2; 639 rdev->config.cayman.max_pipes_per_simd = 4; 640 rdev->config.cayman.max_tile_pipes = 8; 641 rdev->config.cayman.max_simds_per_se = 12; 642 rdev->config.cayman.max_backends_per_se = 4; 643 rdev->config.cayman.max_texture_channel_caches = 8; 644 rdev->config.cayman.max_gprs = 256; 645 rdev->config.cayman.max_threads = 256; 646 rdev->config.cayman.max_gs_threads = 32; 647 rdev->config.cayman.max_stack_entries = 512; 648 rdev->config.cayman.sx_num_of_sets = 8; 649 rdev->config.cayman.sx_max_export_size = 256; 650 rdev->config.cayman.sx_max_export_pos_size = 64; 651 rdev->config.cayman.sx_max_export_smx_size = 192; 652 rdev->config.cayman.max_hw_contexts = 8; 653 rdev->config.cayman.sq_num_cf_insts = 2; 654 655 rdev->config.cayman.sc_prim_fifo_size = 0x100; 656 rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30; 657 rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130; 658 break; 659 } 660 661 /* Initialize HDP */ 662 for (i = 0, j = 0; i < 32; i++, j += 0x18) { 663 WREG32((0x2c14 + j), 0x00000000); 664 WREG32((0x2c18 + j), 0x00000000); 665 WREG32((0x2c1c + j), 0x00000000); 666 WREG32((0x2c20 + j), 0x00000000); 667 WREG32((0x2c24 + j), 0x00000000); 668 } 669 670 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); 671 672 mc_shared_chmap = RREG32(MC_SHARED_CHMAP); 673 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); 674 675 cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE); 676 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG); 677 cgts_tcc_disable = RREG32(CGTS_TCC_DISABLE); 678 gc_user_rb_backend_disable = RREG32(GC_USER_RB_BACKEND_DISABLE); 679 gc_user_shader_pipe_config = RREG32(GC_USER_SHADER_PIPE_CONFIG); 680 cgts_user_tcc_disable = RREG32(CGTS_USER_TCC_DISABLE); 681 682 rdev->config.cayman.num_shader_engines = rdev->config.cayman.max_shader_engines; 683 tmp = ((~gc_user_shader_pipe_config) & INACTIVE_QD_PIPES_MASK) >> INACTIVE_QD_PIPES_SHIFT; 684 rdev->config.cayman.num_shader_pipes_per_simd = r600_count_pipe_bits(tmp); 685 rdev->config.cayman.num_tile_pipes = rdev->config.cayman.max_tile_pipes; 686 tmp = ((~gc_user_shader_pipe_config) & INACTIVE_SIMDS_MASK) >> INACTIVE_SIMDS_SHIFT; 687 rdev->config.cayman.num_simds_per_se = r600_count_pipe_bits(tmp); 688 tmp = ((~gc_user_rb_backend_disable) & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT; 689 rdev->config.cayman.num_backends_per_se = r600_count_pipe_bits(tmp); 690 tmp = (gc_user_rb_backend_disable & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT; 691 rdev->config.cayman.backend_disable_mask_per_asic = 692 cayman_get_disable_mask_per_asic(rdev, tmp, CAYMAN_MAX_BACKENDS_PER_SE_MASK, 693 rdev->config.cayman.num_shader_engines); 694 rdev->config.cayman.backend_map = 695 cayman_get_tile_pipe_to_backend_map(rdev, rdev->config.cayman.num_tile_pipes, 696 rdev->config.cayman.num_backends_per_se * 697 rdev->config.cayman.num_shader_engines, 698 &rdev->config.cayman.backend_disable_mask_per_asic, 699 rdev->config.cayman.num_shader_engines); 700 tmp = ((~cgts_user_tcc_disable) & TCC_DISABLE_MASK) >> TCC_DISABLE_SHIFT; 701 rdev->config.cayman.num_texture_channel_caches = r600_count_pipe_bits(tmp); 702 tmp = (mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT; 703 rdev->config.cayman.mem_max_burst_length_bytes = (tmp + 1) * 256; 704 if (rdev->config.cayman.mem_max_burst_length_bytes > 512) 705 rdev->config.cayman.mem_max_burst_length_bytes = 512; 706 tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT; 707 rdev->config.cayman.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024; 708 if (rdev->config.cayman.mem_row_size_in_kb > 4) 709 rdev->config.cayman.mem_row_size_in_kb = 4; 710 /* XXX use MC settings? */ 711 rdev->config.cayman.shader_engine_tile_size = 32; 712 rdev->config.cayman.num_gpus = 1; 713 rdev->config.cayman.multi_gpu_tile_size = 64; 714 715 //gb_addr_config = 0x02011003 716 #if 0 717 gb_addr_config = RREG32(GB_ADDR_CONFIG); 718 #else 719 gb_addr_config = 0; 720 switch (rdev->config.cayman.num_tile_pipes) { 721 case 1: 722 default: 723 gb_addr_config |= NUM_PIPES(0); 724 break; 725 case 2: 726 gb_addr_config |= NUM_PIPES(1); 727 break; 728 case 4: 729 gb_addr_config |= NUM_PIPES(2); 730 break; 731 case 8: 732 gb_addr_config |= NUM_PIPES(3); 733 break; 734 } 735 736 tmp = (rdev->config.cayman.mem_max_burst_length_bytes / 256) - 1; 737 gb_addr_config |= PIPE_INTERLEAVE_SIZE(tmp); 738 gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.cayman.num_shader_engines - 1); 739 tmp = (rdev->config.cayman.shader_engine_tile_size / 16) - 1; 740 gb_addr_config |= SHADER_ENGINE_TILE_SIZE(tmp); 741 switch (rdev->config.cayman.num_gpus) { 742 case 1: 743 default: 744 gb_addr_config |= NUM_GPUS(0); 745 break; 746 case 2: 747 gb_addr_config |= NUM_GPUS(1); 748 break; 749 case 4: 750 gb_addr_config |= NUM_GPUS(2); 751 break; 752 } 753 switch (rdev->config.cayman.multi_gpu_tile_size) { 754 case 16: 755 gb_addr_config |= MULTI_GPU_TILE_SIZE(0); 756 break; 757 case 32: 758 default: 759 gb_addr_config |= MULTI_GPU_TILE_SIZE(1); 760 break; 761 case 64: 762 gb_addr_config |= MULTI_GPU_TILE_SIZE(2); 763 break; 764 case 128: 765 gb_addr_config |= MULTI_GPU_TILE_SIZE(3); 766 break; 767 } 768 switch (rdev->config.cayman.mem_row_size_in_kb) { 769 case 1: 770 default: 771 gb_addr_config |= ROW_SIZE(0); 772 break; 773 case 2: 774 gb_addr_config |= ROW_SIZE(1); 775 break; 776 case 4: 777 gb_addr_config |= ROW_SIZE(2); 778 break; 779 } 780 #endif 781 782 tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT; 783 rdev->config.cayman.num_tile_pipes = (1 << tmp); 784 tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT; 785 rdev->config.cayman.mem_max_burst_length_bytes = (tmp + 1) * 256; 786 tmp = (gb_addr_config & NUM_SHADER_ENGINES_MASK) >> NUM_SHADER_ENGINES_SHIFT; 787 rdev->config.cayman.num_shader_engines = tmp + 1; 788 tmp = (gb_addr_config & NUM_GPUS_MASK) >> NUM_GPUS_SHIFT; 789 rdev->config.cayman.num_gpus = tmp + 1; 790 tmp = (gb_addr_config & MULTI_GPU_TILE_SIZE_MASK) >> MULTI_GPU_TILE_SIZE_SHIFT; 791 rdev->config.cayman.multi_gpu_tile_size = 1 << tmp; 792 tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT; 793 rdev->config.cayman.mem_row_size_in_kb = 1 << tmp; 794 795 //gb_backend_map = 0x76541032; 796 #if 0 797 gb_backend_map = RREG32(GB_BACKEND_MAP); 798 #else 799 gb_backend_map = 800 cayman_get_tile_pipe_to_backend_map(rdev, rdev->config.cayman.num_tile_pipes, 801 rdev->config.cayman.num_backends_per_se * 802 rdev->config.cayman.num_shader_engines, 803 &rdev->config.cayman.backend_disable_mask_per_asic, 804 rdev->config.cayman.num_shader_engines); 805 #endif 806 /* setup tiling info dword. gb_addr_config is not adequate since it does 807 * not have bank info, so create a custom tiling dword. 808 * bits 3:0 num_pipes 809 * bits 7:4 num_banks 810 * bits 11:8 group_size 811 * bits 15:12 row_size 812 */ 813 rdev->config.cayman.tile_config = 0; 814 switch (rdev->config.cayman.num_tile_pipes) { 815 case 1: 816 default: 817 rdev->config.cayman.tile_config |= (0 << 0); 818 break; 819 case 2: 820 rdev->config.cayman.tile_config |= (1 << 0); 821 break; 822 case 4: 823 rdev->config.cayman.tile_config |= (2 << 0); 824 break; 825 case 8: 826 rdev->config.cayman.tile_config |= (3 << 0); 827 break; 828 } 829 rdev->config.cayman.tile_config |= 830 ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4; 831 rdev->config.cayman.tile_config |= 832 (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT; 833 rdev->config.cayman.tile_config |= 834 ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12; 835 836 WREG32(GB_BACKEND_MAP, gb_backend_map); 837 WREG32(GB_ADDR_CONFIG, gb_addr_config); 838 WREG32(DMIF_ADDR_CONFIG, gb_addr_config); 839 WREG32(HDP_ADDR_CONFIG, gb_addr_config); 840 841 cayman_program_channel_remap(rdev); 842 843 /* primary versions */ 844 WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); 845 WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); 846 WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); 847 848 WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable); 849 WREG32(CGTS_SYS_TCC_DISABLE, cgts_tcc_disable); 850 851 /* user versions */ 852 WREG32(GC_USER_RB_BACKEND_DISABLE, cc_rb_backend_disable); 853 WREG32(GC_USER_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); 854 WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); 855 856 WREG32(CGTS_USER_SYS_TCC_DISABLE, cgts_tcc_disable); 857 WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable); 858 859 /* reprogram the shader complex */ 860 cgts_sm_ctrl_reg = RREG32(CGTS_SM_CTRL_REG); 861 for (i = 0; i < 16; i++) 862 WREG32(CGTS_SM_CTRL_REG, OVERRIDE); 863 WREG32(CGTS_SM_CTRL_REG, cgts_sm_ctrl_reg); 864 865 /* set HW defaults for 3D engine */ 866 WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60)); 867 868 sx_debug_1 = RREG32(SX_DEBUG_1); 869 sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS; 870 WREG32(SX_DEBUG_1, sx_debug_1); 871 872 smx_dc_ctl0 = RREG32(SMX_DC_CTL0); 873 smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff); 874 smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets); 875 WREG32(SMX_DC_CTL0, smx_dc_ctl0); 876 877 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4) | CRC_SIMD_ID_WADDR_DISABLE); 878 879 /* need to be explicitly zero-ed */ 880 WREG32(VGT_OFFCHIP_LDS_BASE, 0); 881 WREG32(SQ_LSTMP_RING_BASE, 0); 882 WREG32(SQ_HSTMP_RING_BASE, 0); 883 WREG32(SQ_ESTMP_RING_BASE, 0); 884 WREG32(SQ_GSTMP_RING_BASE, 0); 885 WREG32(SQ_VSTMP_RING_BASE, 0); 886 WREG32(SQ_PSTMP_RING_BASE, 0); 887 888 WREG32(TA_CNTL_AUX, DISABLE_CUBE_ANISO); 889 890 WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) | 891 POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) | 892 SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1))); 893 894 WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.evergreen.sc_prim_fifo_size) | 895 SC_HIZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_hiz_tile_fifo_size) | 896 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_earlyz_tile_fifo_size))); 897 898 899 WREG32(VGT_NUM_INSTANCES, 1); 900 901 WREG32(CP_PERFMON_CNTL, 0); 902 903 WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.evergreen.sq_num_cf_insts) | 904 FETCH_FIFO_HIWATER(0x4) | 905 DONE_FIFO_HIWATER(0xe0) | 906 ALU_UPDATE_FIFO_HIWATER(0x8))); 907 908 WREG32(SQ_GPR_RESOURCE_MGMT_1, NUM_CLAUSE_TEMP_GPRS(4)); 909 WREG32(SQ_CONFIG, (VC_ENABLE | 910 EXPORT_SRC_C | 911 GFX_PRIO(0) | 912 CS1_PRIO(0) | 913 CS2_PRIO(1))); 914 WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, DYN_GPR_ENABLE); 915 916 WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) | 917 FORCE_EOV_MAX_REZ_CNT(255))); 918 919 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) | 920 AUTO_INVLD_EN(ES_AND_GS_AUTO)); 921 922 WREG32(VGT_GS_VERTEX_REUSE, 16); 923 WREG32(PA_SC_LINE_STIPPLE_STATE, 0); 924 925 WREG32(CB_PERF_CTR0_SEL_0, 0); 926 WREG32(CB_PERF_CTR0_SEL_1, 0); 927 WREG32(CB_PERF_CTR1_SEL_0, 0); 928 WREG32(CB_PERF_CTR1_SEL_1, 0); 929 WREG32(CB_PERF_CTR2_SEL_0, 0); 930 WREG32(CB_PERF_CTR2_SEL_1, 0); 931 WREG32(CB_PERF_CTR3_SEL_0, 0); 932 WREG32(CB_PERF_CTR3_SEL_1, 0); 933 934 hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL); 935 WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl); 936 937 WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3)); 938 939 udelay(50); 940 } 941 942 /* 943 * GART 944 */ 945 void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev) 946 { 947 /* flush hdp cache */ 948 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); 949 950 /* bits 0-7 are the VM contexts0-7 */ 951 WREG32(VM_INVALIDATE_REQUEST, 1); 952 } 953 954 int cayman_pcie_gart_enable(struct radeon_device *rdev) 955 { 956 int r; 957 958 if (rdev->gart.table.vram.robj == NULL) { 959 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); 960 return -EINVAL; 961 } 962 r = radeon_gart_table_vram_pin(rdev); 963 if (r) 964 return r; 965 radeon_gart_restore(rdev); 966 /* Setup TLB control */ 967 WREG32(MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB | 968 ENABLE_L1_FRAGMENT_PROCESSING | 969 SYSTEM_ACCESS_MODE_NOT_IN_SYS | 970 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU); 971 /* Setup L2 cache */ 972 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | 973 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | 974 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE | 975 EFFECTIVE_L2_QUEUE_SIZE(7) | 976 CONTEXT1_IDENTITY_ACCESS_MODE(1)); 977 WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE); 978 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY | 979 L2_CACHE_BIGK_FRAGMENT_SIZE(6)); 980 /* setup context0 */ 981 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); 982 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); 983 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); 984 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, 985 (u32)(rdev->dummy_page.addr >> 12)); 986 WREG32(VM_CONTEXT0_CNTL2, 0); 987 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | 988 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); 989 /* disable context1-7 */ 990 WREG32(VM_CONTEXT1_CNTL2, 0); 991 WREG32(VM_CONTEXT1_CNTL, 0); 992 993 cayman_pcie_gart_tlb_flush(rdev); 994 rdev->gart.ready = true; 995 return 0; 996 } 997 998 void cayman_pcie_gart_disable(struct radeon_device *rdev) 999 { 1000 int r; 1001 1002 /* Disable all tables */ 1003 WREG32(VM_CONTEXT0_CNTL, 0); 1004 WREG32(VM_CONTEXT1_CNTL, 0); 1005 /* Setup TLB control */ 1006 WREG32(MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING | 1007 SYSTEM_ACCESS_MODE_NOT_IN_SYS | 1008 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU); 1009 /* Setup L2 cache */ 1010 WREG32(VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | 1011 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE | 1012 EFFECTIVE_L2_QUEUE_SIZE(7) | 1013 CONTEXT1_IDENTITY_ACCESS_MODE(1)); 1014 WREG32(VM_L2_CNTL2, 0); 1015 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY | 1016 L2_CACHE_BIGK_FRAGMENT_SIZE(6)); 1017 if (rdev->gart.table.vram.robj) { 1018 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false); 1019 if (likely(r == 0)) { 1020 radeon_bo_kunmap(rdev->gart.table.vram.robj); 1021 radeon_bo_unpin(rdev->gart.table.vram.robj); 1022 radeon_bo_unreserve(rdev->gart.table.vram.robj); 1023 } 1024 } 1025 } 1026 1027 void cayman_pcie_gart_fini(struct radeon_device *rdev) 1028 { 1029 cayman_pcie_gart_disable(rdev); 1030 radeon_gart_table_vram_free(rdev); 1031 radeon_gart_fini(rdev); 1032 } 1033 1034 /* 1035 * CP. 1036 */ 1037 static void cayman_cp_enable(struct radeon_device *rdev, bool enable) 1038 { 1039 if (enable) 1040 WREG32(CP_ME_CNTL, 0); 1041 else { 1042 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); 1043 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT)); 1044 WREG32(SCRATCH_UMSK, 0); 1045 } 1046 } 1047 1048 static int cayman_cp_load_microcode(struct radeon_device *rdev) 1049 { 1050 const __be32 *fw_data; 1051 int i; 1052 1053 if (!rdev->me_fw || !rdev->pfp_fw) 1054 return -EINVAL; 1055 1056 cayman_cp_enable(rdev, false); 1057 1058 fw_data = (const __be32 *)rdev->pfp_fw->data; 1059 WREG32(CP_PFP_UCODE_ADDR, 0); 1060 for (i = 0; i < CAYMAN_PFP_UCODE_SIZE; i++) 1061 WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++)); 1062 WREG32(CP_PFP_UCODE_ADDR, 0); 1063 1064 fw_data = (const __be32 *)rdev->me_fw->data; 1065 WREG32(CP_ME_RAM_WADDR, 0); 1066 for (i = 0; i < CAYMAN_PM4_UCODE_SIZE; i++) 1067 WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++)); 1068 1069 WREG32(CP_PFP_UCODE_ADDR, 0); 1070 WREG32(CP_ME_RAM_WADDR, 0); 1071 WREG32(CP_ME_RAM_RADDR, 0); 1072 return 0; 1073 } 1074 1075 static int cayman_cp_start(struct radeon_device *rdev) 1076 { 1077 int r, i; 1078 1079 r = radeon_ring_lock(rdev, 7); 1080 if (r) { 1081 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); 1082 return r; 1083 } 1084 radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5)); 1085 radeon_ring_write(rdev, 0x1); 1086 radeon_ring_write(rdev, 0x0); 1087 radeon_ring_write(rdev, rdev->config.cayman.max_hw_contexts - 1); 1088 radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); 1089 radeon_ring_write(rdev, 0); 1090 radeon_ring_write(rdev, 0); 1091 radeon_ring_unlock_commit(rdev); 1092 1093 cayman_cp_enable(rdev, true); 1094 1095 r = radeon_ring_lock(rdev, cayman_default_size + 19); 1096 if (r) { 1097 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); 1098 return r; 1099 } 1100 1101 /* setup clear context state */ 1102 radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 1103 radeon_ring_write(rdev, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); 1104 1105 for (i = 0; i < cayman_default_size; i++) 1106 radeon_ring_write(rdev, cayman_default_state[i]); 1107 1108 radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 1109 radeon_ring_write(rdev, PACKET3_PREAMBLE_END_CLEAR_STATE); 1110 1111 /* set clear context state */ 1112 radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0)); 1113 radeon_ring_write(rdev, 0); 1114 1115 /* SQ_VTX_BASE_VTX_LOC */ 1116 radeon_ring_write(rdev, 0xc0026f00); 1117 radeon_ring_write(rdev, 0x00000000); 1118 radeon_ring_write(rdev, 0x00000000); 1119 radeon_ring_write(rdev, 0x00000000); 1120 1121 /* Clear consts */ 1122 radeon_ring_write(rdev, 0xc0036f00); 1123 radeon_ring_write(rdev, 0x00000bc4); 1124 radeon_ring_write(rdev, 0xffffffff); 1125 radeon_ring_write(rdev, 0xffffffff); 1126 radeon_ring_write(rdev, 0xffffffff); 1127 1128 radeon_ring_write(rdev, 0xc0026900); 1129 radeon_ring_write(rdev, 0x00000316); 1130 radeon_ring_write(rdev, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ 1131 radeon_ring_write(rdev, 0x00000010); /* */ 1132 1133 radeon_ring_unlock_commit(rdev); 1134 1135 /* XXX init other rings */ 1136 1137 return 0; 1138 } 1139 1140 static void cayman_cp_fini(struct radeon_device *rdev) 1141 { 1142 cayman_cp_enable(rdev, false); 1143 radeon_ring_fini(rdev); 1144 } 1145 1146 int cayman_cp_resume(struct radeon_device *rdev) 1147 { 1148 u32 tmp; 1149 u32 rb_bufsz; 1150 int r; 1151 1152 /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */ 1153 WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP | 1154 SOFT_RESET_PA | 1155 SOFT_RESET_SH | 1156 SOFT_RESET_VGT | 1157 SOFT_RESET_SX)); 1158 RREG32(GRBM_SOFT_RESET); 1159 mdelay(15); 1160 WREG32(GRBM_SOFT_RESET, 0); 1161 RREG32(GRBM_SOFT_RESET); 1162 1163 WREG32(CP_SEM_WAIT_TIMER, 0x4); 1164 1165 /* Set the write pointer delay */ 1166 WREG32(CP_RB_WPTR_DELAY, 0); 1167 1168 WREG32(CP_DEBUG, (1 << 27)); 1169 1170 /* ring 0 - compute and gfx */ 1171 /* Set ring buffer size */ 1172 rb_bufsz = drm_order(rdev->cp.ring_size / 8); 1173 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; 1174 #ifdef __BIG_ENDIAN 1175 tmp |= BUF_SWAP_32BIT; 1176 #endif 1177 WREG32(CP_RB0_CNTL, tmp); 1178 1179 /* Initialize the ring buffer's read and write pointers */ 1180 WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA); 1181 WREG32(CP_RB0_WPTR, 0); 1182 1183 /* set the wb address wether it's enabled or not */ 1184 WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC); 1185 WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); 1186 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); 1187 1188 if (rdev->wb.enabled) 1189 WREG32(SCRATCH_UMSK, 0xff); 1190 else { 1191 tmp |= RB_NO_UPDATE; 1192 WREG32(SCRATCH_UMSK, 0); 1193 } 1194 1195 mdelay(1); 1196 WREG32(CP_RB0_CNTL, tmp); 1197 1198 WREG32(CP_RB0_BASE, rdev->cp.gpu_addr >> 8); 1199 1200 rdev->cp.rptr = RREG32(CP_RB0_RPTR); 1201 rdev->cp.wptr = RREG32(CP_RB0_WPTR); 1202 1203 /* ring1 - compute only */ 1204 /* Set ring buffer size */ 1205 rb_bufsz = drm_order(rdev->cp1.ring_size / 8); 1206 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; 1207 #ifdef __BIG_ENDIAN 1208 tmp |= BUF_SWAP_32BIT; 1209 #endif 1210 WREG32(CP_RB1_CNTL, tmp); 1211 1212 /* Initialize the ring buffer's read and write pointers */ 1213 WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA); 1214 WREG32(CP_RB1_WPTR, 0); 1215 1216 /* set the wb address wether it's enabled or not */ 1217 WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC); 1218 WREG32(CP_RB1_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFF); 1219 1220 mdelay(1); 1221 WREG32(CP_RB1_CNTL, tmp); 1222 1223 WREG32(CP_RB1_BASE, rdev->cp1.gpu_addr >> 8); 1224 1225 rdev->cp1.rptr = RREG32(CP_RB1_RPTR); 1226 rdev->cp1.wptr = RREG32(CP_RB1_WPTR); 1227 1228 /* ring2 - compute only */ 1229 /* Set ring buffer size */ 1230 rb_bufsz = drm_order(rdev->cp2.ring_size / 8); 1231 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; 1232 #ifdef __BIG_ENDIAN 1233 tmp |= BUF_SWAP_32BIT; 1234 #endif 1235 WREG32(CP_RB2_CNTL, tmp); 1236 1237 /* Initialize the ring buffer's read and write pointers */ 1238 WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA); 1239 WREG32(CP_RB2_WPTR, 0); 1240 1241 /* set the wb address wether it's enabled or not */ 1242 WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC); 1243 WREG32(CP_RB2_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFF); 1244 1245 mdelay(1); 1246 WREG32(CP_RB2_CNTL, tmp); 1247 1248 WREG32(CP_RB2_BASE, rdev->cp2.gpu_addr >> 8); 1249 1250 rdev->cp2.rptr = RREG32(CP_RB2_RPTR); 1251 rdev->cp2.wptr = RREG32(CP_RB2_WPTR); 1252 1253 /* start the rings */ 1254 cayman_cp_start(rdev); 1255 rdev->cp.ready = true; 1256 rdev->cp1.ready = true; 1257 rdev->cp2.ready = true; 1258 /* this only test cp0 */ 1259 r = radeon_ring_test(rdev); 1260 if (r) { 1261 rdev->cp.ready = false; 1262 rdev->cp1.ready = false; 1263 rdev->cp2.ready = false; 1264 return r; 1265 } 1266 1267 return 0; 1268 } 1269 1270 bool cayman_gpu_is_lockup(struct radeon_device *rdev) 1271 { 1272 u32 srbm_status; 1273 u32 grbm_status; 1274 u32 grbm_status_se0, grbm_status_se1; 1275 struct r100_gpu_lockup *lockup = &rdev->config.cayman.lockup; 1276 int r; 1277 1278 srbm_status = RREG32(SRBM_STATUS); 1279 grbm_status = RREG32(GRBM_STATUS); 1280 grbm_status_se0 = RREG32(GRBM_STATUS_SE0); 1281 grbm_status_se1 = RREG32(GRBM_STATUS_SE1); 1282 if (!(grbm_status & GUI_ACTIVE)) { 1283 r100_gpu_lockup_update(lockup, &rdev->cp); 1284 return false; 1285 } 1286 /* force CP activities */ 1287 r = radeon_ring_lock(rdev, 2); 1288 if (!r) { 1289 /* PACKET2 NOP */ 1290 radeon_ring_write(rdev, 0x80000000); 1291 radeon_ring_write(rdev, 0x80000000); 1292 radeon_ring_unlock_commit(rdev); 1293 } 1294 /* XXX deal with CP0,1,2 */ 1295 rdev->cp.rptr = RREG32(CP_RB0_RPTR); 1296 return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp); 1297 } 1298 1299 static int cayman_gpu_soft_reset(struct radeon_device *rdev) 1300 { 1301 struct evergreen_mc_save save; 1302 u32 grbm_reset = 0; 1303 1304 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) 1305 return 0; 1306 1307 dev_info(rdev->dev, "GPU softreset \n"); 1308 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", 1309 RREG32(GRBM_STATUS)); 1310 dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n", 1311 RREG32(GRBM_STATUS_SE0)); 1312 dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n", 1313 RREG32(GRBM_STATUS_SE1)); 1314 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", 1315 RREG32(SRBM_STATUS)); 1316 evergreen_mc_stop(rdev, &save); 1317 if (evergreen_mc_wait_for_idle(rdev)) { 1318 dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); 1319 } 1320 /* Disable CP parsing/prefetching */ 1321 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT); 1322 1323 /* reset all the gfx blocks */ 1324 grbm_reset = (SOFT_RESET_CP | 1325 SOFT_RESET_CB | 1326 SOFT_RESET_DB | 1327 SOFT_RESET_GDS | 1328 SOFT_RESET_PA | 1329 SOFT_RESET_SC | 1330 SOFT_RESET_SPI | 1331 SOFT_RESET_SH | 1332 SOFT_RESET_SX | 1333 SOFT_RESET_TC | 1334 SOFT_RESET_TA | 1335 SOFT_RESET_VGT | 1336 SOFT_RESET_IA); 1337 1338 dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset); 1339 WREG32(GRBM_SOFT_RESET, grbm_reset); 1340 (void)RREG32(GRBM_SOFT_RESET); 1341 udelay(50); 1342 WREG32(GRBM_SOFT_RESET, 0); 1343 (void)RREG32(GRBM_SOFT_RESET); 1344 /* Wait a little for things to settle down */ 1345 udelay(50); 1346 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", 1347 RREG32(GRBM_STATUS)); 1348 dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n", 1349 RREG32(GRBM_STATUS_SE0)); 1350 dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n", 1351 RREG32(GRBM_STATUS_SE1)); 1352 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", 1353 RREG32(SRBM_STATUS)); 1354 evergreen_mc_resume(rdev, &save); 1355 return 0; 1356 } 1357 1358 int cayman_asic_reset(struct radeon_device *rdev) 1359 { 1360 return cayman_gpu_soft_reset(rdev); 1361 } 1362 1363 static int cayman_startup(struct radeon_device *rdev) 1364 { 1365 int r; 1366 1367 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) { 1368 r = ni_init_microcode(rdev); 1369 if (r) { 1370 DRM_ERROR("Failed to load firmware!\n"); 1371 return r; 1372 } 1373 } 1374 r = ni_mc_load_microcode(rdev); 1375 if (r) { 1376 DRM_ERROR("Failed to load MC firmware!\n"); 1377 return r; 1378 } 1379 1380 evergreen_mc_program(rdev); 1381 r = cayman_pcie_gart_enable(rdev); 1382 if (r) 1383 return r; 1384 cayman_gpu_init(rdev); 1385 1386 #if 0 1387 r = cayman_blit_init(rdev); 1388 if (r) { 1389 cayman_blit_fini(rdev); 1390 rdev->asic->copy = NULL; 1391 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); 1392 } 1393 #endif 1394 1395 /* allocate wb buffer */ 1396 r = radeon_wb_init(rdev); 1397 if (r) 1398 return r; 1399 1400 /* Enable IRQ */ 1401 r = r600_irq_init(rdev); 1402 if (r) { 1403 DRM_ERROR("radeon: IH init failed (%d).\n", r); 1404 radeon_irq_kms_fini(rdev); 1405 return r; 1406 } 1407 evergreen_irq_set(rdev); 1408 1409 r = radeon_ring_init(rdev, rdev->cp.ring_size); 1410 if (r) 1411 return r; 1412 r = cayman_cp_load_microcode(rdev); 1413 if (r) 1414 return r; 1415 r = cayman_cp_resume(rdev); 1416 if (r) 1417 return r; 1418 1419 return 0; 1420 } 1421 1422 int cayman_resume(struct radeon_device *rdev) 1423 { 1424 int r; 1425 1426 /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw, 1427 * posting will perform necessary task to bring back GPU into good 1428 * shape. 1429 */ 1430 /* post card */ 1431 atom_asic_init(rdev->mode_info.atom_context); 1432 1433 r = cayman_startup(rdev); 1434 if (r) { 1435 DRM_ERROR("cayman startup failed on resume\n"); 1436 return r; 1437 } 1438 1439 r = r600_ib_test(rdev); 1440 if (r) { 1441 DRM_ERROR("radeon: failled testing IB (%d).\n", r); 1442 return r; 1443 } 1444 1445 return r; 1446 1447 } 1448 1449 int cayman_suspend(struct radeon_device *rdev) 1450 { 1451 /* int r; */ 1452 1453 /* FIXME: we should wait for ring to be empty */ 1454 cayman_cp_enable(rdev, false); 1455 rdev->cp.ready = false; 1456 evergreen_irq_suspend(rdev); 1457 radeon_wb_disable(rdev); 1458 cayman_pcie_gart_disable(rdev); 1459 1460 #if 0 1461 /* unpin shaders bo */ 1462 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); 1463 if (likely(r == 0)) { 1464 radeon_bo_unpin(rdev->r600_blit.shader_obj); 1465 radeon_bo_unreserve(rdev->r600_blit.shader_obj); 1466 } 1467 #endif 1468 return 0; 1469 } 1470 1471 /* Plan is to move initialization in that function and use 1472 * helper function so that radeon_device_init pretty much 1473 * do nothing more than calling asic specific function. This 1474 * should also allow to remove a bunch of callback function 1475 * like vram_info. 1476 */ 1477 int cayman_init(struct radeon_device *rdev) 1478 { 1479 int r; 1480 1481 /* This don't do much */ 1482 r = radeon_gem_init(rdev); 1483 if (r) 1484 return r; 1485 /* Read BIOS */ 1486 if (!radeon_get_bios(rdev)) { 1487 if (ASIC_IS_AVIVO(rdev)) 1488 return -EINVAL; 1489 } 1490 /* Must be an ATOMBIOS */ 1491 if (!rdev->is_atom_bios) { 1492 dev_err(rdev->dev, "Expecting atombios for cayman GPU\n"); 1493 return -EINVAL; 1494 } 1495 r = radeon_atombios_init(rdev); 1496 if (r) 1497 return r; 1498 1499 /* Post card if necessary */ 1500 if (!radeon_card_posted(rdev)) { 1501 if (!rdev->bios) { 1502 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); 1503 return -EINVAL; 1504 } 1505 DRM_INFO("GPU not posted. posting now...\n"); 1506 atom_asic_init(rdev->mode_info.atom_context); 1507 } 1508 /* Initialize scratch registers */ 1509 r600_scratch_init(rdev); 1510 /* Initialize surface registers */ 1511 radeon_surface_init(rdev); 1512 /* Initialize clocks */ 1513 radeon_get_clock_info(rdev->ddev); 1514 /* Fence driver */ 1515 r = radeon_fence_driver_init(rdev); 1516 if (r) 1517 return r; 1518 /* initialize memory controller */ 1519 r = evergreen_mc_init(rdev); 1520 if (r) 1521 return r; 1522 /* Memory manager */ 1523 r = radeon_bo_init(rdev); 1524 if (r) 1525 return r; 1526 1527 r = radeon_irq_kms_init(rdev); 1528 if (r) 1529 return r; 1530 1531 rdev->cp.ring_obj = NULL; 1532 r600_ring_init(rdev, 1024 * 1024); 1533 1534 rdev->ih.ring_obj = NULL; 1535 r600_ih_ring_init(rdev, 64 * 1024); 1536 1537 r = r600_pcie_gart_init(rdev); 1538 if (r) 1539 return r; 1540 1541 rdev->accel_working = true; 1542 r = cayman_startup(rdev); 1543 if (r) { 1544 dev_err(rdev->dev, "disabling GPU acceleration\n"); 1545 cayman_cp_fini(rdev); 1546 r600_irq_fini(rdev); 1547 radeon_wb_fini(rdev); 1548 radeon_irq_kms_fini(rdev); 1549 cayman_pcie_gart_fini(rdev); 1550 rdev->accel_working = false; 1551 } 1552 if (rdev->accel_working) { 1553 r = radeon_ib_pool_init(rdev); 1554 if (r) { 1555 DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r); 1556 rdev->accel_working = false; 1557 } 1558 r = r600_ib_test(rdev); 1559 if (r) { 1560 DRM_ERROR("radeon: failed testing IB (%d).\n", r); 1561 rdev->accel_working = false; 1562 } 1563 } 1564 1565 /* Don't start up if the MC ucode is missing. 1566 * The default clocks and voltages before the MC ucode 1567 * is loaded are not suffient for advanced operations. 1568 */ 1569 if (!rdev->mc_fw) { 1570 DRM_ERROR("radeon: MC ucode required for NI+.\n"); 1571 return -EINVAL; 1572 } 1573 1574 return 0; 1575 } 1576 1577 void cayman_fini(struct radeon_device *rdev) 1578 { 1579 /* cayman_blit_fini(rdev); */ 1580 cayman_cp_fini(rdev); 1581 r600_irq_fini(rdev); 1582 radeon_wb_fini(rdev); 1583 radeon_irq_kms_fini(rdev); 1584 cayman_pcie_gart_fini(rdev); 1585 radeon_gem_fini(rdev); 1586 radeon_fence_driver_fini(rdev); 1587 radeon_bo_fini(rdev); 1588 radeon_atombios_fini(rdev); 1589 kfree(rdev->bios); 1590 rdev->bios = NULL; 1591 } 1592 1593