1 /* 2 * Copyright 2010 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Alex Deucher 23 */ 24 #include <linux/firmware.h> 25 #include <linux/platform_device.h> 26 #include <linux/slab.h> 27 #include <linux/module.h> 28 #include "drmP.h" 29 #include "radeon.h" 30 #include "radeon_asic.h" 31 #include "radeon_drm.h" 32 #include "nid.h" 33 #include "atom.h" 34 #include "ni_reg.h" 35 #include "cayman_blit_shaders.h" 36 37 extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save); 38 extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save); 39 extern int evergreen_mc_wait_for_idle(struct radeon_device *rdev); 40 extern void evergreen_mc_program(struct radeon_device *rdev); 41 extern void evergreen_irq_suspend(struct radeon_device *rdev); 42 extern int evergreen_mc_init(struct radeon_device *rdev); 43 extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev); 44 extern void evergreen_pcie_gen2_enable(struct radeon_device *rdev); 45 extern void si_rlc_fini(struct radeon_device *rdev); 46 extern int si_rlc_init(struct radeon_device *rdev); 47 48 #define EVERGREEN_PFP_UCODE_SIZE 1120 49 #define EVERGREEN_PM4_UCODE_SIZE 1376 50 #define EVERGREEN_RLC_UCODE_SIZE 768 51 #define BTC_MC_UCODE_SIZE 6024 52 53 #define CAYMAN_PFP_UCODE_SIZE 2176 54 #define CAYMAN_PM4_UCODE_SIZE 2176 55 #define CAYMAN_RLC_UCODE_SIZE 1024 56 #define CAYMAN_MC_UCODE_SIZE 6037 57 58 #define ARUBA_RLC_UCODE_SIZE 1536 59 60 /* Firmware Names */ 61 MODULE_FIRMWARE("radeon/BARTS_pfp.bin"); 62 MODULE_FIRMWARE("radeon/BARTS_me.bin"); 63 MODULE_FIRMWARE("radeon/BARTS_mc.bin"); 64 MODULE_FIRMWARE("radeon/BTC_rlc.bin"); 65 MODULE_FIRMWARE("radeon/TURKS_pfp.bin"); 66 MODULE_FIRMWARE("radeon/TURKS_me.bin"); 67 MODULE_FIRMWARE("radeon/TURKS_mc.bin"); 68 MODULE_FIRMWARE("radeon/CAICOS_pfp.bin"); 69 MODULE_FIRMWARE("radeon/CAICOS_me.bin"); 70 MODULE_FIRMWARE("radeon/CAICOS_mc.bin"); 71 MODULE_FIRMWARE("radeon/CAYMAN_pfp.bin"); 72 MODULE_FIRMWARE("radeon/CAYMAN_me.bin"); 73 MODULE_FIRMWARE("radeon/CAYMAN_mc.bin"); 74 MODULE_FIRMWARE("radeon/CAYMAN_rlc.bin"); 75 MODULE_FIRMWARE("radeon/ARUBA_pfp.bin"); 76 MODULE_FIRMWARE("radeon/ARUBA_me.bin"); 77 MODULE_FIRMWARE("radeon/ARUBA_rlc.bin"); 78 79 #define BTC_IO_MC_REGS_SIZE 29 80 81 static const u32 barts_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = { 82 {0x00000077, 0xff010100}, 83 {0x00000078, 0x00000000}, 84 {0x00000079, 0x00001434}, 85 {0x0000007a, 0xcc08ec08}, 86 {0x0000007b, 0x00040000}, 87 {0x0000007c, 0x000080c0}, 88 {0x0000007d, 0x09000000}, 89 {0x0000007e, 0x00210404}, 90 {0x00000081, 0x08a8e800}, 91 {0x00000082, 0x00030444}, 92 {0x00000083, 0x00000000}, 93 {0x00000085, 0x00000001}, 94 {0x00000086, 0x00000002}, 95 {0x00000087, 0x48490000}, 96 {0x00000088, 0x20244647}, 97 {0x00000089, 0x00000005}, 98 {0x0000008b, 0x66030000}, 99 {0x0000008c, 0x00006603}, 100 {0x0000008d, 0x00000100}, 101 {0x0000008f, 0x00001c0a}, 102 {0x00000090, 0xff000001}, 103 {0x00000094, 0x00101101}, 104 {0x00000095, 0x00000fff}, 105 {0x00000096, 0x00116fff}, 106 {0x00000097, 0x60010000}, 107 {0x00000098, 0x10010000}, 108 {0x00000099, 0x00006000}, 109 {0x0000009a, 0x00001000}, 110 {0x0000009f, 0x00946a00} 111 }; 112 113 static const u32 turks_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = { 114 {0x00000077, 0xff010100}, 115 {0x00000078, 0x00000000}, 116 {0x00000079, 0x00001434}, 117 {0x0000007a, 0xcc08ec08}, 118 {0x0000007b, 0x00040000}, 119 {0x0000007c, 0x000080c0}, 120 {0x0000007d, 0x09000000}, 121 {0x0000007e, 0x00210404}, 122 {0x00000081, 0x08a8e800}, 123 {0x00000082, 0x00030444}, 124 {0x00000083, 0x00000000}, 125 {0x00000085, 0x00000001}, 126 {0x00000086, 0x00000002}, 127 {0x00000087, 0x48490000}, 128 {0x00000088, 0x20244647}, 129 {0x00000089, 0x00000005}, 130 {0x0000008b, 0x66030000}, 131 {0x0000008c, 0x00006603}, 132 {0x0000008d, 0x00000100}, 133 {0x0000008f, 0x00001c0a}, 134 {0x00000090, 0xff000001}, 135 {0x00000094, 0x00101101}, 136 {0x00000095, 0x00000fff}, 137 {0x00000096, 0x00116fff}, 138 {0x00000097, 0x60010000}, 139 {0x00000098, 0x10010000}, 140 {0x00000099, 0x00006000}, 141 {0x0000009a, 0x00001000}, 142 {0x0000009f, 0x00936a00} 143 }; 144 145 static const u32 caicos_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = { 146 {0x00000077, 0xff010100}, 147 {0x00000078, 0x00000000}, 148 {0x00000079, 0x00001434}, 149 {0x0000007a, 0xcc08ec08}, 150 {0x0000007b, 0x00040000}, 151 {0x0000007c, 0x000080c0}, 152 {0x0000007d, 0x09000000}, 153 {0x0000007e, 0x00210404}, 154 {0x00000081, 0x08a8e800}, 155 {0x00000082, 0x00030444}, 156 {0x00000083, 0x00000000}, 157 {0x00000085, 0x00000001}, 158 {0x00000086, 0x00000002}, 159 {0x00000087, 0x48490000}, 160 {0x00000088, 0x20244647}, 161 {0x00000089, 0x00000005}, 162 {0x0000008b, 0x66030000}, 163 {0x0000008c, 0x00006603}, 164 {0x0000008d, 0x00000100}, 165 {0x0000008f, 0x00001c0a}, 166 {0x00000090, 0xff000001}, 167 {0x00000094, 0x00101101}, 168 {0x00000095, 0x00000fff}, 169 {0x00000096, 0x00116fff}, 170 {0x00000097, 0x60010000}, 171 {0x00000098, 0x10010000}, 172 {0x00000099, 0x00006000}, 173 {0x0000009a, 0x00001000}, 174 {0x0000009f, 0x00916a00} 175 }; 176 177 static const u32 cayman_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = { 178 {0x00000077, 0xff010100}, 179 {0x00000078, 0x00000000}, 180 {0x00000079, 0x00001434}, 181 {0x0000007a, 0xcc08ec08}, 182 {0x0000007b, 0x00040000}, 183 {0x0000007c, 0x000080c0}, 184 {0x0000007d, 0x09000000}, 185 {0x0000007e, 0x00210404}, 186 {0x00000081, 0x08a8e800}, 187 {0x00000082, 0x00030444}, 188 {0x00000083, 0x00000000}, 189 {0x00000085, 0x00000001}, 190 {0x00000086, 0x00000002}, 191 {0x00000087, 0x48490000}, 192 {0x00000088, 0x20244647}, 193 {0x00000089, 0x00000005}, 194 {0x0000008b, 0x66030000}, 195 {0x0000008c, 0x00006603}, 196 {0x0000008d, 0x00000100}, 197 {0x0000008f, 0x00001c0a}, 198 {0x00000090, 0xff000001}, 199 {0x00000094, 0x00101101}, 200 {0x00000095, 0x00000fff}, 201 {0x00000096, 0x00116fff}, 202 {0x00000097, 0x60010000}, 203 {0x00000098, 0x10010000}, 204 {0x00000099, 0x00006000}, 205 {0x0000009a, 0x00001000}, 206 {0x0000009f, 0x00976b00} 207 }; 208 209 int ni_mc_load_microcode(struct radeon_device *rdev) 210 { 211 const __be32 *fw_data; 212 u32 mem_type, running, blackout = 0; 213 u32 *io_mc_regs; 214 int i, ucode_size, regs_size; 215 216 if (!rdev->mc_fw) 217 return -EINVAL; 218 219 switch (rdev->family) { 220 case CHIP_BARTS: 221 io_mc_regs = (u32 *)&barts_io_mc_regs; 222 ucode_size = BTC_MC_UCODE_SIZE; 223 regs_size = BTC_IO_MC_REGS_SIZE; 224 break; 225 case CHIP_TURKS: 226 io_mc_regs = (u32 *)&turks_io_mc_regs; 227 ucode_size = BTC_MC_UCODE_SIZE; 228 regs_size = BTC_IO_MC_REGS_SIZE; 229 break; 230 case CHIP_CAICOS: 231 default: 232 io_mc_regs = (u32 *)&caicos_io_mc_regs; 233 ucode_size = BTC_MC_UCODE_SIZE; 234 regs_size = BTC_IO_MC_REGS_SIZE; 235 break; 236 case CHIP_CAYMAN: 237 io_mc_regs = (u32 *)&cayman_io_mc_regs; 238 ucode_size = CAYMAN_MC_UCODE_SIZE; 239 regs_size = BTC_IO_MC_REGS_SIZE; 240 break; 241 } 242 243 mem_type = (RREG32(MC_SEQ_MISC0) & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT; 244 running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK; 245 246 if ((mem_type == MC_SEQ_MISC0_GDDR5_VALUE) && (running == 0)) { 247 if (running) { 248 blackout = RREG32(MC_SHARED_BLACKOUT_CNTL); 249 WREG32(MC_SHARED_BLACKOUT_CNTL, 1); 250 } 251 252 /* reset the engine and set to writable */ 253 WREG32(MC_SEQ_SUP_CNTL, 0x00000008); 254 WREG32(MC_SEQ_SUP_CNTL, 0x00000010); 255 256 /* load mc io regs */ 257 for (i = 0; i < regs_size; i++) { 258 WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]); 259 WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]); 260 } 261 /* load the MC ucode */ 262 fw_data = (const __be32 *)rdev->mc_fw->data; 263 for (i = 0; i < ucode_size; i++) 264 WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++)); 265 266 /* put the engine back into the active state */ 267 WREG32(MC_SEQ_SUP_CNTL, 0x00000008); 268 WREG32(MC_SEQ_SUP_CNTL, 0x00000004); 269 WREG32(MC_SEQ_SUP_CNTL, 0x00000001); 270 271 /* wait for training to complete */ 272 for (i = 0; i < rdev->usec_timeout; i++) { 273 if (RREG32(MC_IO_PAD_CNTL_D0) & MEM_FALL_OUT_CMD) 274 break; 275 udelay(1); 276 } 277 278 if (running) 279 WREG32(MC_SHARED_BLACKOUT_CNTL, blackout); 280 } 281 282 return 0; 283 } 284 285 int ni_init_microcode(struct radeon_device *rdev) 286 { 287 struct platform_device *pdev; 288 const char *chip_name; 289 const char *rlc_chip_name; 290 size_t pfp_req_size, me_req_size, rlc_req_size, mc_req_size; 291 char fw_name[30]; 292 int err; 293 294 DRM_DEBUG("\n"); 295 296 pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0); 297 err = IS_ERR(pdev); 298 if (err) { 299 printk(KERN_ERR "radeon_cp: Failed to register firmware\n"); 300 return -EINVAL; 301 } 302 303 switch (rdev->family) { 304 case CHIP_BARTS: 305 chip_name = "BARTS"; 306 rlc_chip_name = "BTC"; 307 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4; 308 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4; 309 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4; 310 mc_req_size = BTC_MC_UCODE_SIZE * 4; 311 break; 312 case CHIP_TURKS: 313 chip_name = "TURKS"; 314 rlc_chip_name = "BTC"; 315 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4; 316 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4; 317 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4; 318 mc_req_size = BTC_MC_UCODE_SIZE * 4; 319 break; 320 case CHIP_CAICOS: 321 chip_name = "CAICOS"; 322 rlc_chip_name = "BTC"; 323 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4; 324 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4; 325 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4; 326 mc_req_size = BTC_MC_UCODE_SIZE * 4; 327 break; 328 case CHIP_CAYMAN: 329 chip_name = "CAYMAN"; 330 rlc_chip_name = "CAYMAN"; 331 pfp_req_size = CAYMAN_PFP_UCODE_SIZE * 4; 332 me_req_size = CAYMAN_PM4_UCODE_SIZE * 4; 333 rlc_req_size = CAYMAN_RLC_UCODE_SIZE * 4; 334 mc_req_size = CAYMAN_MC_UCODE_SIZE * 4; 335 break; 336 case CHIP_ARUBA: 337 chip_name = "ARUBA"; 338 rlc_chip_name = "ARUBA"; 339 /* pfp/me same size as CAYMAN */ 340 pfp_req_size = CAYMAN_PFP_UCODE_SIZE * 4; 341 me_req_size = CAYMAN_PM4_UCODE_SIZE * 4; 342 rlc_req_size = ARUBA_RLC_UCODE_SIZE * 4; 343 mc_req_size = 0; 344 break; 345 default: BUG(); 346 } 347 348 DRM_INFO("Loading %s Microcode\n", chip_name); 349 350 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name); 351 err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev); 352 if (err) 353 goto out; 354 if (rdev->pfp_fw->size != pfp_req_size) { 355 printk(KERN_ERR 356 "ni_cp: Bogus length %zu in firmware \"%s\"\n", 357 rdev->pfp_fw->size, fw_name); 358 err = -EINVAL; 359 goto out; 360 } 361 362 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name); 363 err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev); 364 if (err) 365 goto out; 366 if (rdev->me_fw->size != me_req_size) { 367 printk(KERN_ERR 368 "ni_cp: Bogus length %zu in firmware \"%s\"\n", 369 rdev->me_fw->size, fw_name); 370 err = -EINVAL; 371 } 372 373 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name); 374 err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev); 375 if (err) 376 goto out; 377 if (rdev->rlc_fw->size != rlc_req_size) { 378 printk(KERN_ERR 379 "ni_rlc: Bogus length %zu in firmware \"%s\"\n", 380 rdev->rlc_fw->size, fw_name); 381 err = -EINVAL; 382 } 383 384 /* no MC ucode on TN */ 385 if (!(rdev->flags & RADEON_IS_IGP)) { 386 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); 387 err = request_firmware(&rdev->mc_fw, fw_name, &pdev->dev); 388 if (err) 389 goto out; 390 if (rdev->mc_fw->size != mc_req_size) { 391 printk(KERN_ERR 392 "ni_mc: Bogus length %zu in firmware \"%s\"\n", 393 rdev->mc_fw->size, fw_name); 394 err = -EINVAL; 395 } 396 } 397 out: 398 platform_device_unregister(pdev); 399 400 if (err) { 401 if (err != -EINVAL) 402 printk(KERN_ERR 403 "ni_cp: Failed to load firmware \"%s\"\n", 404 fw_name); 405 release_firmware(rdev->pfp_fw); 406 rdev->pfp_fw = NULL; 407 release_firmware(rdev->me_fw); 408 rdev->me_fw = NULL; 409 release_firmware(rdev->rlc_fw); 410 rdev->rlc_fw = NULL; 411 release_firmware(rdev->mc_fw); 412 rdev->mc_fw = NULL; 413 } 414 return err; 415 } 416 417 /* 418 * Core functions 419 */ 420 static void cayman_gpu_init(struct radeon_device *rdev) 421 { 422 u32 gb_addr_config = 0; 423 u32 mc_shared_chmap, mc_arb_ramcfg; 424 u32 cgts_tcc_disable; 425 u32 sx_debug_1; 426 u32 smx_dc_ctl0; 427 u32 cgts_sm_ctrl_reg; 428 u32 hdp_host_path_cntl; 429 u32 tmp; 430 u32 disabled_rb_mask; 431 int i, j; 432 433 switch (rdev->family) { 434 case CHIP_CAYMAN: 435 rdev->config.cayman.max_shader_engines = 2; 436 rdev->config.cayman.max_pipes_per_simd = 4; 437 rdev->config.cayman.max_tile_pipes = 8; 438 rdev->config.cayman.max_simds_per_se = 12; 439 rdev->config.cayman.max_backends_per_se = 4; 440 rdev->config.cayman.max_texture_channel_caches = 8; 441 rdev->config.cayman.max_gprs = 256; 442 rdev->config.cayman.max_threads = 256; 443 rdev->config.cayman.max_gs_threads = 32; 444 rdev->config.cayman.max_stack_entries = 512; 445 rdev->config.cayman.sx_num_of_sets = 8; 446 rdev->config.cayman.sx_max_export_size = 256; 447 rdev->config.cayman.sx_max_export_pos_size = 64; 448 rdev->config.cayman.sx_max_export_smx_size = 192; 449 rdev->config.cayman.max_hw_contexts = 8; 450 rdev->config.cayman.sq_num_cf_insts = 2; 451 452 rdev->config.cayman.sc_prim_fifo_size = 0x100; 453 rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30; 454 rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130; 455 gb_addr_config = CAYMAN_GB_ADDR_CONFIG_GOLDEN; 456 break; 457 case CHIP_ARUBA: 458 default: 459 rdev->config.cayman.max_shader_engines = 1; 460 rdev->config.cayman.max_pipes_per_simd = 4; 461 rdev->config.cayman.max_tile_pipes = 2; 462 if ((rdev->pdev->device == 0x9900) || 463 (rdev->pdev->device == 0x9901)) { 464 rdev->config.cayman.max_simds_per_se = 6; 465 rdev->config.cayman.max_backends_per_se = 2; 466 } else if ((rdev->pdev->device == 0x9903) || 467 (rdev->pdev->device == 0x9904)) { 468 rdev->config.cayman.max_simds_per_se = 4; 469 rdev->config.cayman.max_backends_per_se = 2; 470 } else if ((rdev->pdev->device == 0x9990) || 471 (rdev->pdev->device == 0x9991)) { 472 rdev->config.cayman.max_simds_per_se = 3; 473 rdev->config.cayman.max_backends_per_se = 1; 474 } else { 475 rdev->config.cayman.max_simds_per_se = 2; 476 rdev->config.cayman.max_backends_per_se = 1; 477 } 478 rdev->config.cayman.max_texture_channel_caches = 2; 479 rdev->config.cayman.max_gprs = 256; 480 rdev->config.cayman.max_threads = 256; 481 rdev->config.cayman.max_gs_threads = 32; 482 rdev->config.cayman.max_stack_entries = 512; 483 rdev->config.cayman.sx_num_of_sets = 8; 484 rdev->config.cayman.sx_max_export_size = 256; 485 rdev->config.cayman.sx_max_export_pos_size = 64; 486 rdev->config.cayman.sx_max_export_smx_size = 192; 487 rdev->config.cayman.max_hw_contexts = 8; 488 rdev->config.cayman.sq_num_cf_insts = 2; 489 490 rdev->config.cayman.sc_prim_fifo_size = 0x40; 491 rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30; 492 rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130; 493 gb_addr_config = ARUBA_GB_ADDR_CONFIG_GOLDEN; 494 break; 495 } 496 497 /* Initialize HDP */ 498 for (i = 0, j = 0; i < 32; i++, j += 0x18) { 499 WREG32((0x2c14 + j), 0x00000000); 500 WREG32((0x2c18 + j), 0x00000000); 501 WREG32((0x2c1c + j), 0x00000000); 502 WREG32((0x2c20 + j), 0x00000000); 503 WREG32((0x2c24 + j), 0x00000000); 504 } 505 506 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); 507 508 evergreen_fix_pci_max_read_req_size(rdev); 509 510 mc_shared_chmap = RREG32(MC_SHARED_CHMAP); 511 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); 512 513 tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT; 514 rdev->config.cayman.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024; 515 if (rdev->config.cayman.mem_row_size_in_kb > 4) 516 rdev->config.cayman.mem_row_size_in_kb = 4; 517 /* XXX use MC settings? */ 518 rdev->config.cayman.shader_engine_tile_size = 32; 519 rdev->config.cayman.num_gpus = 1; 520 rdev->config.cayman.multi_gpu_tile_size = 64; 521 522 tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT; 523 rdev->config.cayman.num_tile_pipes = (1 << tmp); 524 tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT; 525 rdev->config.cayman.mem_max_burst_length_bytes = (tmp + 1) * 256; 526 tmp = (gb_addr_config & NUM_SHADER_ENGINES_MASK) >> NUM_SHADER_ENGINES_SHIFT; 527 rdev->config.cayman.num_shader_engines = tmp + 1; 528 tmp = (gb_addr_config & NUM_GPUS_MASK) >> NUM_GPUS_SHIFT; 529 rdev->config.cayman.num_gpus = tmp + 1; 530 tmp = (gb_addr_config & MULTI_GPU_TILE_SIZE_MASK) >> MULTI_GPU_TILE_SIZE_SHIFT; 531 rdev->config.cayman.multi_gpu_tile_size = 1 << tmp; 532 tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT; 533 rdev->config.cayman.mem_row_size_in_kb = 1 << tmp; 534 535 536 /* setup tiling info dword. gb_addr_config is not adequate since it does 537 * not have bank info, so create a custom tiling dword. 538 * bits 3:0 num_pipes 539 * bits 7:4 num_banks 540 * bits 11:8 group_size 541 * bits 15:12 row_size 542 */ 543 rdev->config.cayman.tile_config = 0; 544 switch (rdev->config.cayman.num_tile_pipes) { 545 case 1: 546 default: 547 rdev->config.cayman.tile_config |= (0 << 0); 548 break; 549 case 2: 550 rdev->config.cayman.tile_config |= (1 << 0); 551 break; 552 case 4: 553 rdev->config.cayman.tile_config |= (2 << 0); 554 break; 555 case 8: 556 rdev->config.cayman.tile_config |= (3 << 0); 557 break; 558 } 559 560 /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */ 561 if (rdev->flags & RADEON_IS_IGP) 562 rdev->config.cayman.tile_config |= 1 << 4; 563 else { 564 if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) 565 rdev->config.cayman.tile_config |= 1 << 4; 566 else 567 rdev->config.cayman.tile_config |= 0 << 4; 568 } 569 rdev->config.cayman.tile_config |= 570 ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8; 571 rdev->config.cayman.tile_config |= 572 ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12; 573 574 tmp = 0; 575 for (i = (rdev->config.cayman.max_shader_engines - 1); i >= 0; i--) { 576 u32 rb_disable_bitmap; 577 578 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i)); 579 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i)); 580 rb_disable_bitmap = (RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000) >> 16; 581 tmp <<= 4; 582 tmp |= rb_disable_bitmap; 583 } 584 /* enabled rb are just the one not disabled :) */ 585 disabled_rb_mask = tmp; 586 587 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES); 588 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES); 589 590 WREG32(GB_ADDR_CONFIG, gb_addr_config); 591 WREG32(DMIF_ADDR_CONFIG, gb_addr_config); 592 WREG32(HDP_ADDR_CONFIG, gb_addr_config); 593 594 tmp = gb_addr_config & NUM_PIPES_MASK; 595 tmp = r6xx_remap_render_backend(rdev, tmp, 596 rdev->config.cayman.max_backends_per_se * 597 rdev->config.cayman.max_shader_engines, 598 CAYMAN_MAX_BACKENDS, disabled_rb_mask); 599 WREG32(GB_BACKEND_MAP, tmp); 600 601 cgts_tcc_disable = 0xffff0000; 602 for (i = 0; i < rdev->config.cayman.max_texture_channel_caches; i++) 603 cgts_tcc_disable &= ~(1 << (16 + i)); 604 WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable); 605 WREG32(CGTS_SYS_TCC_DISABLE, cgts_tcc_disable); 606 WREG32(CGTS_USER_SYS_TCC_DISABLE, cgts_tcc_disable); 607 WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable); 608 609 /* reprogram the shader complex */ 610 cgts_sm_ctrl_reg = RREG32(CGTS_SM_CTRL_REG); 611 for (i = 0; i < 16; i++) 612 WREG32(CGTS_SM_CTRL_REG, OVERRIDE); 613 WREG32(CGTS_SM_CTRL_REG, cgts_sm_ctrl_reg); 614 615 /* set HW defaults for 3D engine */ 616 WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60)); 617 618 sx_debug_1 = RREG32(SX_DEBUG_1); 619 sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS; 620 WREG32(SX_DEBUG_1, sx_debug_1); 621 622 smx_dc_ctl0 = RREG32(SMX_DC_CTL0); 623 smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff); 624 smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.cayman.sx_num_of_sets); 625 WREG32(SMX_DC_CTL0, smx_dc_ctl0); 626 627 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4) | CRC_SIMD_ID_WADDR_DISABLE); 628 629 /* need to be explicitly zero-ed */ 630 WREG32(VGT_OFFCHIP_LDS_BASE, 0); 631 WREG32(SQ_LSTMP_RING_BASE, 0); 632 WREG32(SQ_HSTMP_RING_BASE, 0); 633 WREG32(SQ_ESTMP_RING_BASE, 0); 634 WREG32(SQ_GSTMP_RING_BASE, 0); 635 WREG32(SQ_VSTMP_RING_BASE, 0); 636 WREG32(SQ_PSTMP_RING_BASE, 0); 637 638 WREG32(TA_CNTL_AUX, DISABLE_CUBE_ANISO); 639 640 WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.cayman.sx_max_export_size / 4) - 1) | 641 POSITION_BUFFER_SIZE((rdev->config.cayman.sx_max_export_pos_size / 4) - 1) | 642 SMX_BUFFER_SIZE((rdev->config.cayman.sx_max_export_smx_size / 4) - 1))); 643 644 WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.cayman.sc_prim_fifo_size) | 645 SC_HIZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_hiz_tile_fifo_size) | 646 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_earlyz_tile_fifo_size))); 647 648 649 WREG32(VGT_NUM_INSTANCES, 1); 650 651 WREG32(CP_PERFMON_CNTL, 0); 652 653 WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.cayman.sq_num_cf_insts) | 654 FETCH_FIFO_HIWATER(0x4) | 655 DONE_FIFO_HIWATER(0xe0) | 656 ALU_UPDATE_FIFO_HIWATER(0x8))); 657 658 WREG32(SQ_GPR_RESOURCE_MGMT_1, NUM_CLAUSE_TEMP_GPRS(4)); 659 WREG32(SQ_CONFIG, (VC_ENABLE | 660 EXPORT_SRC_C | 661 GFX_PRIO(0) | 662 CS1_PRIO(0) | 663 CS2_PRIO(1))); 664 WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, DYN_GPR_ENABLE); 665 666 WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) | 667 FORCE_EOV_MAX_REZ_CNT(255))); 668 669 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) | 670 AUTO_INVLD_EN(ES_AND_GS_AUTO)); 671 672 WREG32(VGT_GS_VERTEX_REUSE, 16); 673 WREG32(PA_SC_LINE_STIPPLE_STATE, 0); 674 675 WREG32(CB_PERF_CTR0_SEL_0, 0); 676 WREG32(CB_PERF_CTR0_SEL_1, 0); 677 WREG32(CB_PERF_CTR1_SEL_0, 0); 678 WREG32(CB_PERF_CTR1_SEL_1, 0); 679 WREG32(CB_PERF_CTR2_SEL_0, 0); 680 WREG32(CB_PERF_CTR2_SEL_1, 0); 681 WREG32(CB_PERF_CTR3_SEL_0, 0); 682 WREG32(CB_PERF_CTR3_SEL_1, 0); 683 684 tmp = RREG32(HDP_MISC_CNTL); 685 tmp |= HDP_FLUSH_INVALIDATE_CACHE; 686 WREG32(HDP_MISC_CNTL, tmp); 687 688 hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL); 689 WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl); 690 691 WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3)); 692 693 udelay(50); 694 } 695 696 /* 697 * GART 698 */ 699 void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev) 700 { 701 /* flush hdp cache */ 702 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); 703 704 /* bits 0-7 are the VM contexts0-7 */ 705 WREG32(VM_INVALIDATE_REQUEST, 1); 706 } 707 708 int cayman_pcie_gart_enable(struct radeon_device *rdev) 709 { 710 int i, r; 711 712 if (rdev->gart.robj == NULL) { 713 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); 714 return -EINVAL; 715 } 716 r = radeon_gart_table_vram_pin(rdev); 717 if (r) 718 return r; 719 radeon_gart_restore(rdev); 720 /* Setup TLB control */ 721 WREG32(MC_VM_MX_L1_TLB_CNTL, 722 (0xA << 7) | 723 ENABLE_L1_TLB | 724 ENABLE_L1_FRAGMENT_PROCESSING | 725 SYSTEM_ACCESS_MODE_NOT_IN_SYS | 726 ENABLE_ADVANCED_DRIVER_MODEL | 727 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU); 728 /* Setup L2 cache */ 729 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | 730 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | 731 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE | 732 EFFECTIVE_L2_QUEUE_SIZE(7) | 733 CONTEXT1_IDENTITY_ACCESS_MODE(1)); 734 WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE); 735 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY | 736 L2_CACHE_BIGK_FRAGMENT_SIZE(6)); 737 /* setup context0 */ 738 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); 739 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); 740 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); 741 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, 742 (u32)(rdev->dummy_page.addr >> 12)); 743 WREG32(VM_CONTEXT0_CNTL2, 0); 744 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | 745 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); 746 747 WREG32(0x15D4, 0); 748 WREG32(0x15D8, 0); 749 WREG32(0x15DC, 0); 750 751 /* empty context1-7 */ 752 for (i = 1; i < 8; i++) { 753 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0); 754 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), 0); 755 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2), 756 rdev->gart.table_addr >> 12); 757 } 758 759 /* enable context1-7 */ 760 WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR, 761 (u32)(rdev->dummy_page.addr >> 12)); 762 WREG32(VM_CONTEXT1_CNTL2, 0); 763 WREG32(VM_CONTEXT1_CNTL, 0); 764 WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | 765 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); 766 767 cayman_pcie_gart_tlb_flush(rdev); 768 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", 769 (unsigned)(rdev->mc.gtt_size >> 20), 770 (unsigned long long)rdev->gart.table_addr); 771 rdev->gart.ready = true; 772 return 0; 773 } 774 775 void cayman_pcie_gart_disable(struct radeon_device *rdev) 776 { 777 /* Disable all tables */ 778 WREG32(VM_CONTEXT0_CNTL, 0); 779 WREG32(VM_CONTEXT1_CNTL, 0); 780 /* Setup TLB control */ 781 WREG32(MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING | 782 SYSTEM_ACCESS_MODE_NOT_IN_SYS | 783 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU); 784 /* Setup L2 cache */ 785 WREG32(VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | 786 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE | 787 EFFECTIVE_L2_QUEUE_SIZE(7) | 788 CONTEXT1_IDENTITY_ACCESS_MODE(1)); 789 WREG32(VM_L2_CNTL2, 0); 790 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY | 791 L2_CACHE_BIGK_FRAGMENT_SIZE(6)); 792 radeon_gart_table_vram_unpin(rdev); 793 } 794 795 void cayman_pcie_gart_fini(struct radeon_device *rdev) 796 { 797 cayman_pcie_gart_disable(rdev); 798 radeon_gart_table_vram_free(rdev); 799 radeon_gart_fini(rdev); 800 } 801 802 void cayman_cp_int_cntl_setup(struct radeon_device *rdev, 803 int ring, u32 cp_int_cntl) 804 { 805 u32 srbm_gfx_cntl = RREG32(SRBM_GFX_CNTL) & ~3; 806 807 WREG32(SRBM_GFX_CNTL, srbm_gfx_cntl | (ring & 3)); 808 WREG32(CP_INT_CNTL, cp_int_cntl); 809 } 810 811 /* 812 * CP. 813 */ 814 void cayman_fence_ring_emit(struct radeon_device *rdev, 815 struct radeon_fence *fence) 816 { 817 struct radeon_ring *ring = &rdev->ring[fence->ring]; 818 u64 addr = rdev->fence_drv[fence->ring].gpu_addr; 819 820 /* flush read cache over gart for this vmid */ 821 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 822 radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2); 823 radeon_ring_write(ring, 0); 824 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); 825 radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA); 826 radeon_ring_write(ring, 0xFFFFFFFF); 827 radeon_ring_write(ring, 0); 828 radeon_ring_write(ring, 10); /* poll interval */ 829 /* EVENT_WRITE_EOP - flush caches, send int */ 830 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); 831 radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5)); 832 radeon_ring_write(ring, addr & 0xffffffff); 833 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2)); 834 radeon_ring_write(ring, fence->seq); 835 radeon_ring_write(ring, 0); 836 } 837 838 void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) 839 { 840 struct radeon_ring *ring = &rdev->ring[ib->fence->ring]; 841 842 /* set to DX10/11 mode */ 843 radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0)); 844 radeon_ring_write(ring, 1); 845 radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); 846 radeon_ring_write(ring, 847 #ifdef __BIG_ENDIAN 848 (2 << 0) | 849 #endif 850 (ib->gpu_addr & 0xFFFFFFFC)); 851 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF); 852 radeon_ring_write(ring, ib->length_dw | (ib->vm_id << 24)); 853 854 /* flush read cache over gart for this vmid */ 855 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 856 radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2); 857 radeon_ring_write(ring, ib->vm_id); 858 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); 859 radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA); 860 radeon_ring_write(ring, 0xFFFFFFFF); 861 radeon_ring_write(ring, 0); 862 radeon_ring_write(ring, 10); /* poll interval */ 863 } 864 865 static void cayman_cp_enable(struct radeon_device *rdev, bool enable) 866 { 867 if (enable) 868 WREG32(CP_ME_CNTL, 0); 869 else { 870 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); 871 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT)); 872 WREG32(SCRATCH_UMSK, 0); 873 } 874 } 875 876 static int cayman_cp_load_microcode(struct radeon_device *rdev) 877 { 878 const __be32 *fw_data; 879 int i; 880 881 if (!rdev->me_fw || !rdev->pfp_fw) 882 return -EINVAL; 883 884 cayman_cp_enable(rdev, false); 885 886 fw_data = (const __be32 *)rdev->pfp_fw->data; 887 WREG32(CP_PFP_UCODE_ADDR, 0); 888 for (i = 0; i < CAYMAN_PFP_UCODE_SIZE; i++) 889 WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++)); 890 WREG32(CP_PFP_UCODE_ADDR, 0); 891 892 fw_data = (const __be32 *)rdev->me_fw->data; 893 WREG32(CP_ME_RAM_WADDR, 0); 894 for (i = 0; i < CAYMAN_PM4_UCODE_SIZE; i++) 895 WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++)); 896 897 WREG32(CP_PFP_UCODE_ADDR, 0); 898 WREG32(CP_ME_RAM_WADDR, 0); 899 WREG32(CP_ME_RAM_RADDR, 0); 900 return 0; 901 } 902 903 static int cayman_cp_start(struct radeon_device *rdev) 904 { 905 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 906 int r, i; 907 908 r = radeon_ring_lock(rdev, ring, 7); 909 if (r) { 910 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); 911 return r; 912 } 913 radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5)); 914 radeon_ring_write(ring, 0x1); 915 radeon_ring_write(ring, 0x0); 916 radeon_ring_write(ring, rdev->config.cayman.max_hw_contexts - 1); 917 radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); 918 radeon_ring_write(ring, 0); 919 radeon_ring_write(ring, 0); 920 radeon_ring_unlock_commit(rdev, ring); 921 922 cayman_cp_enable(rdev, true); 923 924 r = radeon_ring_lock(rdev, ring, cayman_default_size + 19); 925 if (r) { 926 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); 927 return r; 928 } 929 930 /* setup clear context state */ 931 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 932 radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); 933 934 for (i = 0; i < cayman_default_size; i++) 935 radeon_ring_write(ring, cayman_default_state[i]); 936 937 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 938 radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE); 939 940 /* set clear context state */ 941 radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0)); 942 radeon_ring_write(ring, 0); 943 944 /* SQ_VTX_BASE_VTX_LOC */ 945 radeon_ring_write(ring, 0xc0026f00); 946 radeon_ring_write(ring, 0x00000000); 947 radeon_ring_write(ring, 0x00000000); 948 radeon_ring_write(ring, 0x00000000); 949 950 /* Clear consts */ 951 radeon_ring_write(ring, 0xc0036f00); 952 radeon_ring_write(ring, 0x00000bc4); 953 radeon_ring_write(ring, 0xffffffff); 954 radeon_ring_write(ring, 0xffffffff); 955 radeon_ring_write(ring, 0xffffffff); 956 957 radeon_ring_write(ring, 0xc0026900); 958 radeon_ring_write(ring, 0x00000316); 959 radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ 960 radeon_ring_write(ring, 0x00000010); /* */ 961 962 radeon_ring_unlock_commit(rdev, ring); 963 964 /* XXX init other rings */ 965 966 return 0; 967 } 968 969 static void cayman_cp_fini(struct radeon_device *rdev) 970 { 971 cayman_cp_enable(rdev, false); 972 radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); 973 } 974 975 int cayman_cp_resume(struct radeon_device *rdev) 976 { 977 struct radeon_ring *ring; 978 u32 tmp; 979 u32 rb_bufsz; 980 int r; 981 982 /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */ 983 WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP | 984 SOFT_RESET_PA | 985 SOFT_RESET_SH | 986 SOFT_RESET_VGT | 987 SOFT_RESET_SPI | 988 SOFT_RESET_SX)); 989 RREG32(GRBM_SOFT_RESET); 990 mdelay(15); 991 WREG32(GRBM_SOFT_RESET, 0); 992 RREG32(GRBM_SOFT_RESET); 993 994 WREG32(CP_SEM_WAIT_TIMER, 0x0); 995 WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0); 996 997 /* Set the write pointer delay */ 998 WREG32(CP_RB_WPTR_DELAY, 0); 999 1000 WREG32(CP_DEBUG, (1 << 27)); 1001 1002 /* ring 0 - compute and gfx */ 1003 /* Set ring buffer size */ 1004 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 1005 rb_bufsz = drm_order(ring->ring_size / 8); 1006 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; 1007 #ifdef __BIG_ENDIAN 1008 tmp |= BUF_SWAP_32BIT; 1009 #endif 1010 WREG32(CP_RB0_CNTL, tmp); 1011 1012 /* Initialize the ring buffer's read and write pointers */ 1013 WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA); 1014 ring->wptr = 0; 1015 WREG32(CP_RB0_WPTR, ring->wptr); 1016 1017 /* set the wb address wether it's enabled or not */ 1018 WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC); 1019 WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); 1020 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); 1021 1022 if (rdev->wb.enabled) 1023 WREG32(SCRATCH_UMSK, 0xff); 1024 else { 1025 tmp |= RB_NO_UPDATE; 1026 WREG32(SCRATCH_UMSK, 0); 1027 } 1028 1029 mdelay(1); 1030 WREG32(CP_RB0_CNTL, tmp); 1031 1032 WREG32(CP_RB0_BASE, ring->gpu_addr >> 8); 1033 1034 ring->rptr = RREG32(CP_RB0_RPTR); 1035 1036 /* ring1 - compute only */ 1037 /* Set ring buffer size */ 1038 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; 1039 rb_bufsz = drm_order(ring->ring_size / 8); 1040 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; 1041 #ifdef __BIG_ENDIAN 1042 tmp |= BUF_SWAP_32BIT; 1043 #endif 1044 WREG32(CP_RB1_CNTL, tmp); 1045 1046 /* Initialize the ring buffer's read and write pointers */ 1047 WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA); 1048 ring->wptr = 0; 1049 WREG32(CP_RB1_WPTR, ring->wptr); 1050 1051 /* set the wb address wether it's enabled or not */ 1052 WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC); 1053 WREG32(CP_RB1_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFF); 1054 1055 mdelay(1); 1056 WREG32(CP_RB1_CNTL, tmp); 1057 1058 WREG32(CP_RB1_BASE, ring->gpu_addr >> 8); 1059 1060 ring->rptr = RREG32(CP_RB1_RPTR); 1061 1062 /* ring2 - compute only */ 1063 /* Set ring buffer size */ 1064 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; 1065 rb_bufsz = drm_order(ring->ring_size / 8); 1066 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; 1067 #ifdef __BIG_ENDIAN 1068 tmp |= BUF_SWAP_32BIT; 1069 #endif 1070 WREG32(CP_RB2_CNTL, tmp); 1071 1072 /* Initialize the ring buffer's read and write pointers */ 1073 WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA); 1074 ring->wptr = 0; 1075 WREG32(CP_RB2_WPTR, ring->wptr); 1076 1077 /* set the wb address wether it's enabled or not */ 1078 WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC); 1079 WREG32(CP_RB2_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFF); 1080 1081 mdelay(1); 1082 WREG32(CP_RB2_CNTL, tmp); 1083 1084 WREG32(CP_RB2_BASE, ring->gpu_addr >> 8); 1085 1086 ring->rptr = RREG32(CP_RB2_RPTR); 1087 1088 /* start the rings */ 1089 cayman_cp_start(rdev); 1090 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true; 1091 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false; 1092 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; 1093 /* this only test cp0 */ 1094 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); 1095 if (r) { 1096 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; 1097 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false; 1098 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; 1099 return r; 1100 } 1101 1102 return 0; 1103 } 1104 1105 static int cayman_gpu_soft_reset(struct radeon_device *rdev) 1106 { 1107 struct evergreen_mc_save save; 1108 u32 grbm_reset = 0; 1109 1110 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) 1111 return 0; 1112 1113 dev_info(rdev->dev, "GPU softreset \n"); 1114 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", 1115 RREG32(GRBM_STATUS)); 1116 dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n", 1117 RREG32(GRBM_STATUS_SE0)); 1118 dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n", 1119 RREG32(GRBM_STATUS_SE1)); 1120 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", 1121 RREG32(SRBM_STATUS)); 1122 dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_ADDR 0x%08X\n", 1123 RREG32(0x14F8)); 1124 dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_STATUS 0x%08X\n", 1125 RREG32(0x14D8)); 1126 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", 1127 RREG32(0x14FC)); 1128 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", 1129 RREG32(0x14DC)); 1130 1131 evergreen_mc_stop(rdev, &save); 1132 if (evergreen_mc_wait_for_idle(rdev)) { 1133 dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); 1134 } 1135 /* Disable CP parsing/prefetching */ 1136 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT); 1137 1138 /* reset all the gfx blocks */ 1139 grbm_reset = (SOFT_RESET_CP | 1140 SOFT_RESET_CB | 1141 SOFT_RESET_DB | 1142 SOFT_RESET_GDS | 1143 SOFT_RESET_PA | 1144 SOFT_RESET_SC | 1145 SOFT_RESET_SPI | 1146 SOFT_RESET_SH | 1147 SOFT_RESET_SX | 1148 SOFT_RESET_TC | 1149 SOFT_RESET_TA | 1150 SOFT_RESET_VGT | 1151 SOFT_RESET_IA); 1152 1153 dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset); 1154 WREG32(GRBM_SOFT_RESET, grbm_reset); 1155 (void)RREG32(GRBM_SOFT_RESET); 1156 udelay(50); 1157 WREG32(GRBM_SOFT_RESET, 0); 1158 (void)RREG32(GRBM_SOFT_RESET); 1159 /* Wait a little for things to settle down */ 1160 udelay(50); 1161 1162 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", 1163 RREG32(GRBM_STATUS)); 1164 dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n", 1165 RREG32(GRBM_STATUS_SE0)); 1166 dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n", 1167 RREG32(GRBM_STATUS_SE1)); 1168 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", 1169 RREG32(SRBM_STATUS)); 1170 evergreen_mc_resume(rdev, &save); 1171 return 0; 1172 } 1173 1174 int cayman_asic_reset(struct radeon_device *rdev) 1175 { 1176 return cayman_gpu_soft_reset(rdev); 1177 } 1178 1179 static int cayman_startup(struct radeon_device *rdev) 1180 { 1181 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 1182 int r; 1183 1184 /* enable pcie gen2 link */ 1185 evergreen_pcie_gen2_enable(rdev); 1186 1187 if (rdev->flags & RADEON_IS_IGP) { 1188 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { 1189 r = ni_init_microcode(rdev); 1190 if (r) { 1191 DRM_ERROR("Failed to load firmware!\n"); 1192 return r; 1193 } 1194 } 1195 } else { 1196 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) { 1197 r = ni_init_microcode(rdev); 1198 if (r) { 1199 DRM_ERROR("Failed to load firmware!\n"); 1200 return r; 1201 } 1202 } 1203 1204 r = ni_mc_load_microcode(rdev); 1205 if (r) { 1206 DRM_ERROR("Failed to load MC firmware!\n"); 1207 return r; 1208 } 1209 } 1210 1211 r = r600_vram_scratch_init(rdev); 1212 if (r) 1213 return r; 1214 1215 evergreen_mc_program(rdev); 1216 r = cayman_pcie_gart_enable(rdev); 1217 if (r) 1218 return r; 1219 cayman_gpu_init(rdev); 1220 1221 r = evergreen_blit_init(rdev); 1222 if (r) { 1223 r600_blit_fini(rdev); 1224 rdev->asic->copy.copy = NULL; 1225 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); 1226 } 1227 1228 /* allocate rlc buffers */ 1229 if (rdev->flags & RADEON_IS_IGP) { 1230 r = si_rlc_init(rdev); 1231 if (r) { 1232 DRM_ERROR("Failed to init rlc BOs!\n"); 1233 return r; 1234 } 1235 } 1236 1237 /* allocate wb buffer */ 1238 r = radeon_wb_init(rdev); 1239 if (r) 1240 return r; 1241 1242 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX); 1243 if (r) { 1244 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); 1245 return r; 1246 } 1247 1248 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX); 1249 if (r) { 1250 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); 1251 return r; 1252 } 1253 1254 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP2_INDEX); 1255 if (r) { 1256 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); 1257 return r; 1258 } 1259 1260 /* Enable IRQ */ 1261 r = r600_irq_init(rdev); 1262 if (r) { 1263 DRM_ERROR("radeon: IH init failed (%d).\n", r); 1264 radeon_irq_kms_fini(rdev); 1265 return r; 1266 } 1267 evergreen_irq_set(rdev); 1268 1269 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, 1270 CP_RB0_RPTR, CP_RB0_WPTR, 1271 0, 0xfffff, RADEON_CP_PACKET2); 1272 if (r) 1273 return r; 1274 r = cayman_cp_load_microcode(rdev); 1275 if (r) 1276 return r; 1277 r = cayman_cp_resume(rdev); 1278 if (r) 1279 return r; 1280 1281 r = radeon_ib_pool_start(rdev); 1282 if (r) 1283 return r; 1284 1285 r = radeon_ib_ring_tests(rdev); 1286 if (r) 1287 return r; 1288 1289 r = radeon_vm_manager_start(rdev); 1290 if (r) 1291 return r; 1292 1293 return 0; 1294 } 1295 1296 int cayman_resume(struct radeon_device *rdev) 1297 { 1298 int r; 1299 1300 /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw, 1301 * posting will perform necessary task to bring back GPU into good 1302 * shape. 1303 */ 1304 /* post card */ 1305 atom_asic_init(rdev->mode_info.atom_context); 1306 1307 rdev->accel_working = true; 1308 r = cayman_startup(rdev); 1309 if (r) { 1310 DRM_ERROR("cayman startup failed on resume\n"); 1311 rdev->accel_working = false; 1312 return r; 1313 } 1314 return r; 1315 } 1316 1317 int cayman_suspend(struct radeon_device *rdev) 1318 { 1319 /* FIXME: we should wait for ring to be empty */ 1320 radeon_ib_pool_suspend(rdev); 1321 radeon_vm_manager_suspend(rdev); 1322 r600_blit_suspend(rdev); 1323 cayman_cp_enable(rdev, false); 1324 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; 1325 evergreen_irq_suspend(rdev); 1326 radeon_wb_disable(rdev); 1327 cayman_pcie_gart_disable(rdev); 1328 return 0; 1329 } 1330 1331 /* Plan is to move initialization in that function and use 1332 * helper function so that radeon_device_init pretty much 1333 * do nothing more than calling asic specific function. This 1334 * should also allow to remove a bunch of callback function 1335 * like vram_info. 1336 */ 1337 int cayman_init(struct radeon_device *rdev) 1338 { 1339 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 1340 int r; 1341 1342 /* Read BIOS */ 1343 if (!radeon_get_bios(rdev)) { 1344 if (ASIC_IS_AVIVO(rdev)) 1345 return -EINVAL; 1346 } 1347 /* Must be an ATOMBIOS */ 1348 if (!rdev->is_atom_bios) { 1349 dev_err(rdev->dev, "Expecting atombios for cayman GPU\n"); 1350 return -EINVAL; 1351 } 1352 r = radeon_atombios_init(rdev); 1353 if (r) 1354 return r; 1355 1356 /* Post card if necessary */ 1357 if (!radeon_card_posted(rdev)) { 1358 if (!rdev->bios) { 1359 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); 1360 return -EINVAL; 1361 } 1362 DRM_INFO("GPU not posted. posting now...\n"); 1363 atom_asic_init(rdev->mode_info.atom_context); 1364 } 1365 /* Initialize scratch registers */ 1366 r600_scratch_init(rdev); 1367 /* Initialize surface registers */ 1368 radeon_surface_init(rdev); 1369 /* Initialize clocks */ 1370 radeon_get_clock_info(rdev->ddev); 1371 /* Fence driver */ 1372 r = radeon_fence_driver_init(rdev); 1373 if (r) 1374 return r; 1375 /* initialize memory controller */ 1376 r = evergreen_mc_init(rdev); 1377 if (r) 1378 return r; 1379 /* Memory manager */ 1380 r = radeon_bo_init(rdev); 1381 if (r) 1382 return r; 1383 1384 r = radeon_irq_kms_init(rdev); 1385 if (r) 1386 return r; 1387 1388 ring->ring_obj = NULL; 1389 r600_ring_init(rdev, ring, 1024 * 1024); 1390 1391 rdev->ih.ring_obj = NULL; 1392 r600_ih_ring_init(rdev, 64 * 1024); 1393 1394 r = r600_pcie_gart_init(rdev); 1395 if (r) 1396 return r; 1397 1398 r = radeon_ib_pool_init(rdev); 1399 rdev->accel_working = true; 1400 if (r) { 1401 dev_err(rdev->dev, "IB initialization failed (%d).\n", r); 1402 rdev->accel_working = false; 1403 } 1404 r = radeon_vm_manager_init(rdev); 1405 if (r) { 1406 dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r); 1407 } 1408 1409 r = cayman_startup(rdev); 1410 if (r) { 1411 dev_err(rdev->dev, "disabling GPU acceleration\n"); 1412 cayman_cp_fini(rdev); 1413 r600_irq_fini(rdev); 1414 if (rdev->flags & RADEON_IS_IGP) 1415 si_rlc_fini(rdev); 1416 radeon_wb_fini(rdev); 1417 r100_ib_fini(rdev); 1418 radeon_vm_manager_fini(rdev); 1419 radeon_irq_kms_fini(rdev); 1420 cayman_pcie_gart_fini(rdev); 1421 rdev->accel_working = false; 1422 } 1423 1424 /* Don't start up if the MC ucode is missing. 1425 * The default clocks and voltages before the MC ucode 1426 * is loaded are not suffient for advanced operations. 1427 * 1428 * We can skip this check for TN, because there is no MC 1429 * ucode. 1430 */ 1431 if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) { 1432 DRM_ERROR("radeon: MC ucode required for NI+.\n"); 1433 return -EINVAL; 1434 } 1435 1436 return 0; 1437 } 1438 1439 void cayman_fini(struct radeon_device *rdev) 1440 { 1441 r600_blit_fini(rdev); 1442 cayman_cp_fini(rdev); 1443 r600_irq_fini(rdev); 1444 if (rdev->flags & RADEON_IS_IGP) 1445 si_rlc_fini(rdev); 1446 radeon_wb_fini(rdev); 1447 radeon_vm_manager_fini(rdev); 1448 r100_ib_fini(rdev); 1449 radeon_irq_kms_fini(rdev); 1450 cayman_pcie_gart_fini(rdev); 1451 r600_vram_scratch_fini(rdev); 1452 radeon_gem_fini(rdev); 1453 radeon_fence_driver_fini(rdev); 1454 radeon_bo_fini(rdev); 1455 radeon_atombios_fini(rdev); 1456 kfree(rdev->bios); 1457 rdev->bios = NULL; 1458 } 1459 1460 /* 1461 * vm 1462 */ 1463 int cayman_vm_init(struct radeon_device *rdev) 1464 { 1465 /* number of VMs */ 1466 rdev->vm_manager.nvm = 8; 1467 /* base offset of vram pages */ 1468 if (rdev->flags & RADEON_IS_IGP) { 1469 u64 tmp = RREG32(FUS_MC_VM_FB_OFFSET); 1470 tmp <<= 22; 1471 rdev->vm_manager.vram_base_offset = tmp; 1472 } else 1473 rdev->vm_manager.vram_base_offset = 0; 1474 return 0; 1475 } 1476 1477 void cayman_vm_fini(struct radeon_device *rdev) 1478 { 1479 } 1480 1481 int cayman_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm, int id) 1482 { 1483 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (id << 2), 0); 1484 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (id << 2), vm->last_pfn); 1485 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (id << 2), vm->pt_gpu_addr >> 12); 1486 /* flush hdp cache */ 1487 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); 1488 /* bits 0-7 are the VM contexts0-7 */ 1489 WREG32(VM_INVALIDATE_REQUEST, 1 << id); 1490 return 0; 1491 } 1492 1493 void cayman_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm) 1494 { 1495 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (vm->id << 2), 0); 1496 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (vm->id << 2), 0); 1497 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0); 1498 /* flush hdp cache */ 1499 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); 1500 /* bits 0-7 are the VM contexts0-7 */ 1501 WREG32(VM_INVALIDATE_REQUEST, 1 << vm->id); 1502 } 1503 1504 void cayman_vm_tlb_flush(struct radeon_device *rdev, struct radeon_vm *vm) 1505 { 1506 if (vm->id == -1) 1507 return; 1508 1509 /* flush hdp cache */ 1510 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); 1511 /* bits 0-7 are the VM contexts0-7 */ 1512 WREG32(VM_INVALIDATE_REQUEST, 1 << vm->id); 1513 } 1514 1515 #define R600_PTE_VALID (1 << 0) 1516 #define R600_PTE_SYSTEM (1 << 1) 1517 #define R600_PTE_SNOOPED (1 << 2) 1518 #define R600_PTE_READABLE (1 << 5) 1519 #define R600_PTE_WRITEABLE (1 << 6) 1520 1521 uint32_t cayman_vm_page_flags(struct radeon_device *rdev, 1522 struct radeon_vm *vm, 1523 uint32_t flags) 1524 { 1525 uint32_t r600_flags = 0; 1526 1527 r600_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0; 1528 r600_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0; 1529 r600_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0; 1530 if (flags & RADEON_VM_PAGE_SYSTEM) { 1531 r600_flags |= R600_PTE_SYSTEM; 1532 r600_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0; 1533 } 1534 return r600_flags; 1535 } 1536 1537 void cayman_vm_set_page(struct radeon_device *rdev, struct radeon_vm *vm, 1538 unsigned pfn, uint64_t addr, uint32_t flags) 1539 { 1540 void __iomem *ptr = (void *)vm->pt; 1541 1542 addr = addr & 0xFFFFFFFFFFFFF000ULL; 1543 addr |= flags; 1544 writeq(addr, ptr + (pfn * 8)); 1545 } 1546