1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #ifndef __RADEON_H__ 29 #define __RADEON_H__ 30 31 /* TODO: Here are things that needs to be done : 32 * - surface allocator & initializer : (bit like scratch reg) should 33 * initialize HDP_ stuff on RS600, R600, R700 hw, well anythings 34 * related to surface 35 * - WB : write back stuff (do it bit like scratch reg things) 36 * - Vblank : look at Jesse's rework and what we should do 37 * - r600/r700: gart & cp 38 * - cs : clean cs ioctl use bitmap & things like that. 39 * - power management stuff 40 * - Barrier in gart code 41 * - Unmappabled vram ? 42 * - TESTING, TESTING, TESTING 43 */ 44 45 /* Initialization path: 46 * We expect that acceleration initialization might fail for various 47 * reasons even thought we work hard to make it works on most 48 * configurations. In order to still have a working userspace in such 49 * situation the init path must succeed up to the memory controller 50 * initialization point. Failure before this point are considered as 51 * fatal error. Here is the init callchain : 52 * radeon_device_init perform common structure, mutex initialization 53 * asic_init setup the GPU memory layout and perform all 54 * one time initialization (failure in this 55 * function are considered fatal) 56 * asic_startup setup the GPU acceleration, in order to 57 * follow guideline the first thing this 58 * function should do is setting the GPU 59 * memory controller (only MC setup failure 60 * are considered as fatal) 61 */ 62 63 #include <linux/atomic.h> 64 #include <linux/wait.h> 65 #include <linux/list.h> 66 #include <linux/kref.h> 67 68 #include <ttm/ttm_bo_api.h> 69 #include <ttm/ttm_bo_driver.h> 70 #include <ttm/ttm_placement.h> 71 #include <ttm/ttm_module.h> 72 #include <ttm/ttm_execbuf_util.h> 73 74 #include "radeon_family.h" 75 #include "radeon_mode.h" 76 #include "radeon_reg.h" 77 78 /* 79 * Modules parameters. 80 */ 81 extern int radeon_no_wb; 82 extern int radeon_modeset; 83 extern int radeon_dynclks; 84 extern int radeon_r4xx_atom; 85 extern int radeon_agpmode; 86 extern int radeon_vram_limit; 87 extern int radeon_gart_size; 88 extern int radeon_benchmarking; 89 extern int radeon_testing; 90 extern int radeon_connector_table; 91 extern int radeon_tv; 92 extern int radeon_audio; 93 extern int radeon_disp_priority; 94 extern int radeon_hw_i2c; 95 extern int radeon_pcie_gen2; 96 extern int radeon_msi; 97 extern int radeon_lockup_timeout; 98 extern int radeon_fastfb; 99 extern int radeon_dpm; 100 extern int radeon_aspm; 101 102 /* 103 * Copy from radeon_drv.h so we don't have to include both and have conflicting 104 * symbol; 105 */ 106 #define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */ 107 #define RADEON_FENCE_JIFFIES_TIMEOUT (HZ / 2) 108 /* RADEON_IB_POOL_SIZE must be a power of 2 */ 109 #define RADEON_IB_POOL_SIZE 16 110 #define RADEON_DEBUGFS_MAX_COMPONENTS 32 111 #define RADEONFB_CONN_LIMIT 4 112 #define RADEON_BIOS_NUM_SCRATCH 8 113 114 /* max number of rings */ 115 #define RADEON_NUM_RINGS 6 116 117 /* fence seq are set to this number when signaled */ 118 #define RADEON_FENCE_SIGNALED_SEQ 0LL 119 120 /* internal ring indices */ 121 /* r1xx+ has gfx CP ring */ 122 #define RADEON_RING_TYPE_GFX_INDEX 0 123 124 /* cayman has 2 compute CP rings */ 125 #define CAYMAN_RING_TYPE_CP1_INDEX 1 126 #define CAYMAN_RING_TYPE_CP2_INDEX 2 127 128 /* R600+ has an async dma ring */ 129 #define R600_RING_TYPE_DMA_INDEX 3 130 /* cayman add a second async dma ring */ 131 #define CAYMAN_RING_TYPE_DMA1_INDEX 4 132 133 /* R600+ */ 134 #define R600_RING_TYPE_UVD_INDEX 5 135 136 /* hardcode those limit for now */ 137 #define RADEON_VA_IB_OFFSET (1 << 20) 138 #define RADEON_VA_RESERVED_SIZE (8 << 20) 139 #define RADEON_IB_VM_MAX_SIZE (64 << 10) 140 141 /* reset flags */ 142 #define RADEON_RESET_GFX (1 << 0) 143 #define RADEON_RESET_COMPUTE (1 << 1) 144 #define RADEON_RESET_DMA (1 << 2) 145 #define RADEON_RESET_CP (1 << 3) 146 #define RADEON_RESET_GRBM (1 << 4) 147 #define RADEON_RESET_DMA1 (1 << 5) 148 #define RADEON_RESET_RLC (1 << 6) 149 #define RADEON_RESET_SEM (1 << 7) 150 #define RADEON_RESET_IH (1 << 8) 151 #define RADEON_RESET_VMC (1 << 9) 152 #define RADEON_RESET_MC (1 << 10) 153 #define RADEON_RESET_DISPLAY (1 << 11) 154 155 /* CG block flags */ 156 #define RADEON_CG_BLOCK_GFX (1 << 0) 157 #define RADEON_CG_BLOCK_MC (1 << 1) 158 #define RADEON_CG_BLOCK_SDMA (1 << 2) 159 #define RADEON_CG_BLOCK_UVD (1 << 3) 160 #define RADEON_CG_BLOCK_VCE (1 << 4) 161 #define RADEON_CG_BLOCK_HDP (1 << 5) 162 #define RADEON_CG_BLOCK_BIF (1 << 6) 163 164 /* CG flags */ 165 #define RADEON_CG_SUPPORT_GFX_MGCG (1 << 0) 166 #define RADEON_CG_SUPPORT_GFX_MGLS (1 << 1) 167 #define RADEON_CG_SUPPORT_GFX_CGCG (1 << 2) 168 #define RADEON_CG_SUPPORT_GFX_CGLS (1 << 3) 169 #define RADEON_CG_SUPPORT_GFX_CGTS (1 << 4) 170 #define RADEON_CG_SUPPORT_GFX_CGTS_LS (1 << 5) 171 #define RADEON_CG_SUPPORT_GFX_CP_LS (1 << 6) 172 #define RADEON_CG_SUPPORT_GFX_RLC_LS (1 << 7) 173 #define RADEON_CG_SUPPORT_MC_LS (1 << 8) 174 #define RADEON_CG_SUPPORT_MC_MGCG (1 << 9) 175 #define RADEON_CG_SUPPORT_SDMA_LS (1 << 10) 176 #define RADEON_CG_SUPPORT_SDMA_MGCG (1 << 11) 177 #define RADEON_CG_SUPPORT_BIF_LS (1 << 12) 178 #define RADEON_CG_SUPPORT_UVD_MGCG (1 << 13) 179 #define RADEON_CG_SUPPORT_VCE_MGCG (1 << 14) 180 #define RADEON_CG_SUPPORT_HDP_LS (1 << 15) 181 #define RADEON_CG_SUPPORT_HDP_MGCG (1 << 16) 182 183 /* PG flags */ 184 #define RADEON_PG_SUPPORT_GFX_CG (1 << 0) 185 #define RADEON_PG_SUPPORT_GFX_SMG (1 << 1) 186 #define RADEON_PG_SUPPORT_GFX_DMG (1 << 2) 187 #define RADEON_PG_SUPPORT_UVD (1 << 3) 188 #define RADEON_PG_SUPPORT_VCE (1 << 4) 189 #define RADEON_PG_SUPPORT_CP (1 << 5) 190 #define RADEON_PG_SUPPORT_GDS (1 << 6) 191 #define RADEON_PG_SUPPORT_RLC_SMU_HS (1 << 7) 192 #define RADEON_PG_SUPPORT_SDMA (1 << 8) 193 #define RADEON_PG_SUPPORT_ACP (1 << 9) 194 #define RADEON_PG_SUPPORT_SAMU (1 << 10) 195 196 /* max cursor sizes (in pixels) */ 197 #define CURSOR_WIDTH 64 198 #define CURSOR_HEIGHT 64 199 200 #define CIK_CURSOR_WIDTH 128 201 #define CIK_CURSOR_HEIGHT 128 202 203 /* 204 * Errata workarounds. 205 */ 206 enum radeon_pll_errata { 207 CHIP_ERRATA_R300_CG = 0x00000001, 208 CHIP_ERRATA_PLL_DUMMYREADS = 0x00000002, 209 CHIP_ERRATA_PLL_DELAY = 0x00000004 210 }; 211 212 213 struct radeon_device; 214 215 216 /* 217 * BIOS. 218 */ 219 bool radeon_get_bios(struct radeon_device *rdev); 220 221 /* 222 * Dummy page 223 */ 224 struct radeon_dummy_page { 225 struct page *page; 226 dma_addr_t addr; 227 }; 228 int radeon_dummy_page_init(struct radeon_device *rdev); 229 void radeon_dummy_page_fini(struct radeon_device *rdev); 230 231 232 /* 233 * Clocks 234 */ 235 struct radeon_clock { 236 struct radeon_pll p1pll; 237 struct radeon_pll p2pll; 238 struct radeon_pll dcpll; 239 struct radeon_pll spll; 240 struct radeon_pll mpll; 241 /* 10 Khz units */ 242 uint32_t default_mclk; 243 uint32_t default_sclk; 244 uint32_t default_dispclk; 245 uint32_t current_dispclk; 246 uint32_t dp_extclk; 247 uint32_t max_pixel_clock; 248 }; 249 250 /* 251 * Power management 252 */ 253 int radeon_pm_init(struct radeon_device *rdev); 254 void radeon_pm_fini(struct radeon_device *rdev); 255 void radeon_pm_compute_clocks(struct radeon_device *rdev); 256 void radeon_pm_suspend(struct radeon_device *rdev); 257 void radeon_pm_resume(struct radeon_device *rdev); 258 void radeon_combios_get_power_modes(struct radeon_device *rdev); 259 void radeon_atombios_get_power_modes(struct radeon_device *rdev); 260 int radeon_atom_get_clock_dividers(struct radeon_device *rdev, 261 u8 clock_type, 262 u32 clock, 263 bool strobe_mode, 264 struct atom_clock_dividers *dividers); 265 int radeon_atom_get_memory_pll_dividers(struct radeon_device *rdev, 266 u32 clock, 267 bool strobe_mode, 268 struct atom_mpll_param *mpll_param); 269 void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type); 270 int radeon_atom_get_voltage_gpio_settings(struct radeon_device *rdev, 271 u16 voltage_level, u8 voltage_type, 272 u32 *gpio_value, u32 *gpio_mask); 273 void radeon_atom_set_engine_dram_timings(struct radeon_device *rdev, 274 u32 eng_clock, u32 mem_clock); 275 int radeon_atom_get_voltage_step(struct radeon_device *rdev, 276 u8 voltage_type, u16 *voltage_step); 277 int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type, 278 u16 voltage_id, u16 *voltage); 279 int radeon_atom_get_leakage_vddc_based_on_leakage_idx(struct radeon_device *rdev, 280 u16 *voltage, 281 u16 leakage_idx); 282 int radeon_atom_get_leakage_id_from_vbios(struct radeon_device *rdev, 283 u16 *leakage_id); 284 int radeon_atom_get_leakage_vddc_based_on_leakage_params(struct radeon_device *rdev, 285 u16 *vddc, u16 *vddci, 286 u16 virtual_voltage_id, 287 u16 vbios_voltage_id); 288 int radeon_atom_round_to_true_voltage(struct radeon_device *rdev, 289 u8 voltage_type, 290 u16 nominal_voltage, 291 u16 *true_voltage); 292 int radeon_atom_get_min_voltage(struct radeon_device *rdev, 293 u8 voltage_type, u16 *min_voltage); 294 int radeon_atom_get_max_voltage(struct radeon_device *rdev, 295 u8 voltage_type, u16 *max_voltage); 296 int radeon_atom_get_voltage_table(struct radeon_device *rdev, 297 u8 voltage_type, u8 voltage_mode, 298 struct atom_voltage_table *voltage_table); 299 bool radeon_atom_is_voltage_gpio(struct radeon_device *rdev, 300 u8 voltage_type, u8 voltage_mode); 301 void radeon_atom_update_memory_dll(struct radeon_device *rdev, 302 u32 mem_clock); 303 void radeon_atom_set_ac_timing(struct radeon_device *rdev, 304 u32 mem_clock); 305 int radeon_atom_init_mc_reg_table(struct radeon_device *rdev, 306 u8 module_index, 307 struct atom_mc_reg_table *reg_table); 308 int radeon_atom_get_memory_info(struct radeon_device *rdev, 309 u8 module_index, struct atom_memory_info *mem_info); 310 int radeon_atom_get_mclk_range_table(struct radeon_device *rdev, 311 bool gddr5, u8 module_index, 312 struct atom_memory_clock_range_table *mclk_range_table); 313 int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type, 314 u16 voltage_id, u16 *voltage); 315 void rs690_pm_info(struct radeon_device *rdev); 316 extern void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw, 317 unsigned *bankh, unsigned *mtaspect, 318 unsigned *tile_split); 319 320 /* 321 * Fences. 322 */ 323 struct radeon_fence_driver { 324 uint32_t scratch_reg; 325 uint64_t gpu_addr; 326 volatile uint32_t *cpu_addr; 327 /* sync_seq is protected by ring emission lock */ 328 uint64_t sync_seq[RADEON_NUM_RINGS]; 329 atomic64_t last_seq; 330 unsigned long last_activity; 331 bool initialized; 332 }; 333 334 struct radeon_fence { 335 struct radeon_device *rdev; 336 struct kref kref; 337 /* protected by radeon_fence.lock */ 338 uint64_t seq; 339 /* RB, DMA, etc. */ 340 unsigned ring; 341 }; 342 343 int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring); 344 int radeon_fence_driver_init(struct radeon_device *rdev); 345 void radeon_fence_driver_fini(struct radeon_device *rdev); 346 void radeon_fence_driver_force_completion(struct radeon_device *rdev); 347 int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring); 348 void radeon_fence_process(struct radeon_device *rdev, int ring); 349 bool radeon_fence_signaled(struct radeon_fence *fence); 350 int radeon_fence_wait(struct radeon_fence *fence, bool interruptible); 351 int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring); 352 int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring); 353 int radeon_fence_wait_any(struct radeon_device *rdev, 354 struct radeon_fence **fences, 355 bool intr); 356 struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence); 357 void radeon_fence_unref(struct radeon_fence **fence); 358 unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring); 359 bool radeon_fence_need_sync(struct radeon_fence *fence, int ring); 360 void radeon_fence_note_sync(struct radeon_fence *fence, int ring); 361 static inline struct radeon_fence *radeon_fence_later(struct radeon_fence *a, 362 struct radeon_fence *b) 363 { 364 if (!a) { 365 return b; 366 } 367 368 if (!b) { 369 return a; 370 } 371 372 BUG_ON(a->ring != b->ring); 373 374 if (a->seq > b->seq) { 375 return a; 376 } else { 377 return b; 378 } 379 } 380 381 static inline bool radeon_fence_is_earlier(struct radeon_fence *a, 382 struct radeon_fence *b) 383 { 384 if (!a) { 385 return false; 386 } 387 388 if (!b) { 389 return true; 390 } 391 392 BUG_ON(a->ring != b->ring); 393 394 return a->seq < b->seq; 395 } 396 397 /* 398 * Tiling registers 399 */ 400 struct radeon_surface_reg { 401 struct radeon_bo *bo; 402 }; 403 404 #define RADEON_GEM_MAX_SURFACES 8 405 406 /* 407 * TTM. 408 */ 409 struct radeon_mman { 410 struct ttm_bo_global_ref bo_global_ref; 411 struct drm_global_reference mem_global_ref; 412 struct ttm_bo_device bdev; 413 bool mem_global_referenced; 414 bool initialized; 415 }; 416 417 /* bo virtual address in a specific vm */ 418 struct radeon_bo_va { 419 /* protected by bo being reserved */ 420 struct list_head bo_list; 421 uint64_t soffset; 422 uint64_t eoffset; 423 uint32_t flags; 424 bool valid; 425 unsigned ref_count; 426 427 /* protected by vm mutex */ 428 struct list_head vm_list; 429 430 /* constant after initialization */ 431 struct radeon_vm *vm; 432 struct radeon_bo *bo; 433 }; 434 435 struct radeon_bo { 436 /* Protected by gem.mutex */ 437 struct list_head list; 438 /* Protected by tbo.reserved */ 439 u32 placements[3]; 440 struct ttm_placement placement; 441 struct ttm_buffer_object tbo; 442 struct ttm_bo_kmap_obj kmap; 443 unsigned pin_count; 444 void *kptr; 445 u32 tiling_flags; 446 u32 pitch; 447 int surface_reg; 448 /* list of all virtual address to which this bo 449 * is associated to 450 */ 451 struct list_head va; 452 /* Constant after initialization */ 453 struct radeon_device *rdev; 454 struct drm_gem_object gem_base; 455 456 struct ttm_bo_kmap_obj dma_buf_vmap; 457 pid_t pid; 458 }; 459 #define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base) 460 461 struct radeon_bo_list { 462 struct ttm_validate_buffer tv; 463 struct radeon_bo *bo; 464 uint64_t gpu_offset; 465 bool written; 466 unsigned domain; 467 unsigned alt_domain; 468 u32 tiling_flags; 469 }; 470 471 int radeon_gem_debugfs_init(struct radeon_device *rdev); 472 473 /* sub-allocation manager, it has to be protected by another lock. 474 * By conception this is an helper for other part of the driver 475 * like the indirect buffer or semaphore, which both have their 476 * locking. 477 * 478 * Principe is simple, we keep a list of sub allocation in offset 479 * order (first entry has offset == 0, last entry has the highest 480 * offset). 481 * 482 * When allocating new object we first check if there is room at 483 * the end total_size - (last_object_offset + last_object_size) >= 484 * alloc_size. If so we allocate new object there. 485 * 486 * When there is not enough room at the end, we start waiting for 487 * each sub object until we reach object_offset+object_size >= 488 * alloc_size, this object then become the sub object we return. 489 * 490 * Alignment can't be bigger than page size. 491 * 492 * Hole are not considered for allocation to keep things simple. 493 * Assumption is that there won't be hole (all object on same 494 * alignment). 495 */ 496 struct radeon_sa_manager { 497 wait_queue_head_t wq; 498 struct radeon_bo *bo; 499 struct list_head *hole; 500 struct list_head flist[RADEON_NUM_RINGS]; 501 struct list_head olist; 502 unsigned size; 503 uint64_t gpu_addr; 504 void *cpu_ptr; 505 uint32_t domain; 506 uint32_t align; 507 }; 508 509 struct radeon_sa_bo; 510 511 /* sub-allocation buffer */ 512 struct radeon_sa_bo { 513 struct list_head olist; 514 struct list_head flist; 515 struct radeon_sa_manager *manager; 516 unsigned soffset; 517 unsigned eoffset; 518 struct radeon_fence *fence; 519 }; 520 521 /* 522 * GEM objects. 523 */ 524 struct radeon_gem { 525 struct mutex mutex; 526 struct list_head objects; 527 }; 528 529 int radeon_gem_init(struct radeon_device *rdev); 530 void radeon_gem_fini(struct radeon_device *rdev); 531 int radeon_gem_object_create(struct radeon_device *rdev, int size, 532 int alignment, int initial_domain, 533 bool discardable, bool kernel, 534 struct drm_gem_object **obj); 535 536 int radeon_mode_dumb_create(struct drm_file *file_priv, 537 struct drm_device *dev, 538 struct drm_mode_create_dumb *args); 539 int radeon_mode_dumb_mmap(struct drm_file *filp, 540 struct drm_device *dev, 541 uint32_t handle, uint64_t *offset_p); 542 543 /* 544 * Semaphores. 545 */ 546 /* everything here is constant */ 547 struct radeon_semaphore { 548 struct radeon_sa_bo *sa_bo; 549 signed waiters; 550 uint64_t gpu_addr; 551 }; 552 553 int radeon_semaphore_create(struct radeon_device *rdev, 554 struct radeon_semaphore **semaphore); 555 void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring, 556 struct radeon_semaphore *semaphore); 557 void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring, 558 struct radeon_semaphore *semaphore); 559 int radeon_semaphore_sync_rings(struct radeon_device *rdev, 560 struct radeon_semaphore *semaphore, 561 int signaler, int waiter); 562 void radeon_semaphore_free(struct radeon_device *rdev, 563 struct radeon_semaphore **semaphore, 564 struct radeon_fence *fence); 565 566 /* 567 * GART structures, functions & helpers 568 */ 569 struct radeon_mc; 570 571 #define RADEON_GPU_PAGE_SIZE 4096 572 #define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1) 573 #define RADEON_GPU_PAGE_SHIFT 12 574 #define RADEON_GPU_PAGE_ALIGN(a) (((a) + RADEON_GPU_PAGE_MASK) & ~RADEON_GPU_PAGE_MASK) 575 576 struct radeon_gart { 577 dma_addr_t table_addr; 578 struct radeon_bo *robj; 579 void *ptr; 580 unsigned num_gpu_pages; 581 unsigned num_cpu_pages; 582 unsigned table_size; 583 struct page **pages; 584 dma_addr_t *pages_addr; 585 bool ready; 586 }; 587 588 int radeon_gart_table_ram_alloc(struct radeon_device *rdev); 589 void radeon_gart_table_ram_free(struct radeon_device *rdev); 590 int radeon_gart_table_vram_alloc(struct radeon_device *rdev); 591 void radeon_gart_table_vram_free(struct radeon_device *rdev); 592 int radeon_gart_table_vram_pin(struct radeon_device *rdev); 593 void radeon_gart_table_vram_unpin(struct radeon_device *rdev); 594 int radeon_gart_init(struct radeon_device *rdev); 595 void radeon_gart_fini(struct radeon_device *rdev); 596 void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, 597 int pages); 598 int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, 599 int pages, struct page **pagelist, 600 dma_addr_t *dma_addr); 601 void radeon_gart_restore(struct radeon_device *rdev); 602 603 604 /* 605 * GPU MC structures, functions & helpers 606 */ 607 struct radeon_mc { 608 resource_size_t aper_size; 609 resource_size_t aper_base; 610 resource_size_t agp_base; 611 /* for some chips with <= 32MB we need to lie 612 * about vram size near mc fb location */ 613 u64 mc_vram_size; 614 u64 visible_vram_size; 615 u64 gtt_size; 616 u64 gtt_start; 617 u64 gtt_end; 618 u64 vram_start; 619 u64 vram_end; 620 unsigned vram_width; 621 u64 real_vram_size; 622 int vram_mtrr; 623 bool vram_is_ddr; 624 bool igp_sideport_enabled; 625 u64 gtt_base_align; 626 u64 mc_mask; 627 }; 628 629 bool radeon_combios_sideport_present(struct radeon_device *rdev); 630 bool radeon_atombios_sideport_present(struct radeon_device *rdev); 631 632 /* 633 * GPU scratch registers structures, functions & helpers 634 */ 635 struct radeon_scratch { 636 unsigned num_reg; 637 uint32_t reg_base; 638 bool free[32]; 639 uint32_t reg[32]; 640 }; 641 642 int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg); 643 void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg); 644 645 /* 646 * GPU doorbell structures, functions & helpers 647 */ 648 struct radeon_doorbell { 649 u32 num_pages; 650 bool free[1024]; 651 /* doorbell mmio */ 652 resource_size_t base; 653 resource_size_t size; 654 void __iomem *ptr; 655 }; 656 657 int radeon_doorbell_get(struct radeon_device *rdev, u32 *page); 658 void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell); 659 660 /* 661 * IRQS. 662 */ 663 664 struct radeon_unpin_work { 665 struct work_struct work; 666 struct radeon_device *rdev; 667 int crtc_id; 668 struct radeon_fence *fence; 669 struct drm_pending_vblank_event *event; 670 struct radeon_bo *old_rbo; 671 u64 new_crtc_base; 672 }; 673 674 struct r500_irq_stat_regs { 675 u32 disp_int; 676 u32 hdmi0_status; 677 }; 678 679 struct r600_irq_stat_regs { 680 u32 disp_int; 681 u32 disp_int_cont; 682 u32 disp_int_cont2; 683 u32 d1grph_int; 684 u32 d2grph_int; 685 u32 hdmi0_status; 686 u32 hdmi1_status; 687 }; 688 689 struct evergreen_irq_stat_regs { 690 u32 disp_int; 691 u32 disp_int_cont; 692 u32 disp_int_cont2; 693 u32 disp_int_cont3; 694 u32 disp_int_cont4; 695 u32 disp_int_cont5; 696 u32 d1grph_int; 697 u32 d2grph_int; 698 u32 d3grph_int; 699 u32 d4grph_int; 700 u32 d5grph_int; 701 u32 d6grph_int; 702 u32 afmt_status1; 703 u32 afmt_status2; 704 u32 afmt_status3; 705 u32 afmt_status4; 706 u32 afmt_status5; 707 u32 afmt_status6; 708 }; 709 710 struct cik_irq_stat_regs { 711 u32 disp_int; 712 u32 disp_int_cont; 713 u32 disp_int_cont2; 714 u32 disp_int_cont3; 715 u32 disp_int_cont4; 716 u32 disp_int_cont5; 717 u32 disp_int_cont6; 718 }; 719 720 union radeon_irq_stat_regs { 721 struct r500_irq_stat_regs r500; 722 struct r600_irq_stat_regs r600; 723 struct evergreen_irq_stat_regs evergreen; 724 struct cik_irq_stat_regs cik; 725 }; 726 727 #define RADEON_MAX_HPD_PINS 6 728 #define RADEON_MAX_CRTCS 6 729 #define RADEON_MAX_AFMT_BLOCKS 7 730 731 struct radeon_irq { 732 bool installed; 733 spinlock_t lock; 734 atomic_t ring_int[RADEON_NUM_RINGS]; 735 bool crtc_vblank_int[RADEON_MAX_CRTCS]; 736 atomic_t pflip[RADEON_MAX_CRTCS]; 737 wait_queue_head_t vblank_queue; 738 bool hpd[RADEON_MAX_HPD_PINS]; 739 bool afmt[RADEON_MAX_AFMT_BLOCKS]; 740 union radeon_irq_stat_regs stat_regs; 741 bool dpm_thermal; 742 }; 743 744 int radeon_irq_kms_init(struct radeon_device *rdev); 745 void radeon_irq_kms_fini(struct radeon_device *rdev); 746 void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring); 747 void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring); 748 void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc); 749 void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc); 750 void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block); 751 void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block); 752 void radeon_irq_kms_enable_hpd(struct radeon_device *rdev, unsigned hpd_mask); 753 void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask); 754 755 /* 756 * CP & rings. 757 */ 758 759 struct radeon_ib { 760 struct radeon_sa_bo *sa_bo; 761 uint32_t length_dw; 762 uint64_t gpu_addr; 763 uint32_t *ptr; 764 int ring; 765 struct radeon_fence *fence; 766 struct radeon_vm *vm; 767 bool is_const_ib; 768 struct radeon_fence *sync_to[RADEON_NUM_RINGS]; 769 struct radeon_semaphore *semaphore; 770 }; 771 772 struct radeon_ring { 773 struct radeon_bo *ring_obj; 774 volatile uint32_t *ring; 775 unsigned rptr; 776 unsigned rptr_offs; 777 unsigned rptr_reg; 778 unsigned rptr_save_reg; 779 u64 next_rptr_gpu_addr; 780 volatile u32 *next_rptr_cpu_addr; 781 unsigned wptr; 782 unsigned wptr_old; 783 unsigned wptr_reg; 784 unsigned ring_size; 785 unsigned ring_free_dw; 786 int count_dw; 787 unsigned long last_activity; 788 unsigned last_rptr; 789 uint64_t gpu_addr; 790 uint32_t align_mask; 791 uint32_t ptr_mask; 792 bool ready; 793 u32 nop; 794 u32 idx; 795 u64 last_semaphore_signal_addr; 796 u64 last_semaphore_wait_addr; 797 /* for CIK queues */ 798 u32 me; 799 u32 pipe; 800 u32 queue; 801 struct radeon_bo *mqd_obj; 802 u32 doorbell_page_num; 803 u32 doorbell_offset; 804 unsigned wptr_offs; 805 }; 806 807 struct radeon_mec { 808 struct radeon_bo *hpd_eop_obj; 809 u64 hpd_eop_gpu_addr; 810 u32 num_pipe; 811 u32 num_mec; 812 u32 num_queue; 813 }; 814 815 /* 816 * VM 817 */ 818 819 /* maximum number of VMIDs */ 820 #define RADEON_NUM_VM 16 821 822 /* defines number of bits in page table versus page directory, 823 * a page is 4KB so we have 12 bits offset, 9 bits in the page 824 * table and the remaining 19 bits are in the page directory */ 825 #define RADEON_VM_BLOCK_SIZE 9 826 827 /* number of entries in page table */ 828 #define RADEON_VM_PTE_COUNT (1 << RADEON_VM_BLOCK_SIZE) 829 830 /* PTBs (Page Table Blocks) need to be aligned to 32K */ 831 #define RADEON_VM_PTB_ALIGN_SIZE 32768 832 #define RADEON_VM_PTB_ALIGN_MASK (RADEON_VM_PTB_ALIGN_SIZE - 1) 833 #define RADEON_VM_PTB_ALIGN(a) (((a) + RADEON_VM_PTB_ALIGN_MASK) & ~RADEON_VM_PTB_ALIGN_MASK) 834 835 struct radeon_vm { 836 struct list_head list; 837 struct list_head va; 838 unsigned id; 839 840 /* contains the page directory */ 841 struct radeon_sa_bo *page_directory; 842 uint64_t pd_gpu_addr; 843 844 /* array of page tables, one for each page directory entry */ 845 struct radeon_sa_bo **page_tables; 846 847 struct mutex mutex; 848 /* last fence for cs using this vm */ 849 struct radeon_fence *fence; 850 /* last flush or NULL if we still need to flush */ 851 struct radeon_fence *last_flush; 852 }; 853 854 struct radeon_vm_manager { 855 struct mutex lock; 856 struct list_head lru_vm; 857 struct radeon_fence *active[RADEON_NUM_VM]; 858 struct radeon_sa_manager sa_manager; 859 uint32_t max_pfn; 860 /* number of VMIDs */ 861 unsigned nvm; 862 /* vram base address for page table entry */ 863 u64 vram_base_offset; 864 /* is vm enabled? */ 865 bool enabled; 866 }; 867 868 /* 869 * file private structure 870 */ 871 struct radeon_fpriv { 872 struct radeon_vm vm; 873 }; 874 875 /* 876 * R6xx+ IH ring 877 */ 878 struct r600_ih { 879 struct radeon_bo *ring_obj; 880 volatile uint32_t *ring; 881 unsigned rptr; 882 unsigned ring_size; 883 uint64_t gpu_addr; 884 uint32_t ptr_mask; 885 atomic_t lock; 886 bool enabled; 887 }; 888 889 /* 890 * RLC stuff 891 */ 892 #include "clearstate_defs.h" 893 894 struct radeon_rlc { 895 /* for power gating */ 896 struct radeon_bo *save_restore_obj; 897 uint64_t save_restore_gpu_addr; 898 volatile uint32_t *sr_ptr; 899 const u32 *reg_list; 900 u32 reg_list_size; 901 /* for clear state */ 902 struct radeon_bo *clear_state_obj; 903 uint64_t clear_state_gpu_addr; 904 volatile uint32_t *cs_ptr; 905 const struct cs_section_def *cs_data; 906 u32 clear_state_size; 907 /* for cp tables */ 908 struct radeon_bo *cp_table_obj; 909 uint64_t cp_table_gpu_addr; 910 volatile uint32_t *cp_table_ptr; 911 u32 cp_table_size; 912 }; 913 914 int radeon_ib_get(struct radeon_device *rdev, int ring, 915 struct radeon_ib *ib, struct radeon_vm *vm, 916 unsigned size); 917 void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib); 918 void radeon_ib_sync_to(struct radeon_ib *ib, struct radeon_fence *fence); 919 int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, 920 struct radeon_ib *const_ib); 921 int radeon_ib_pool_init(struct radeon_device *rdev); 922 void radeon_ib_pool_fini(struct radeon_device *rdev); 923 int radeon_ib_ring_tests(struct radeon_device *rdev); 924 /* Ring access between begin & end cannot sleep */ 925 bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev, 926 struct radeon_ring *ring); 927 void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp); 928 int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw); 929 int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw); 930 void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp); 931 void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp); 932 void radeon_ring_undo(struct radeon_ring *ring); 933 void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp); 934 int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp); 935 void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *ring); 936 void radeon_ring_lockup_update(struct radeon_ring *ring); 937 bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring); 938 unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring, 939 uint32_t **data); 940 int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring, 941 unsigned size, uint32_t *data); 942 int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size, 943 unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg, u32 nop); 944 void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *cp); 945 946 947 /* r600 async dma */ 948 void r600_dma_stop(struct radeon_device *rdev); 949 int r600_dma_resume(struct radeon_device *rdev); 950 void r600_dma_fini(struct radeon_device *rdev); 951 952 void cayman_dma_stop(struct radeon_device *rdev); 953 int cayman_dma_resume(struct radeon_device *rdev); 954 void cayman_dma_fini(struct radeon_device *rdev); 955 956 /* 957 * CS. 958 */ 959 struct radeon_cs_reloc { 960 struct drm_gem_object *gobj; 961 struct radeon_bo *robj; 962 struct radeon_bo_list lobj; 963 uint32_t handle; 964 uint32_t flags; 965 }; 966 967 struct radeon_cs_chunk { 968 uint32_t chunk_id; 969 uint32_t length_dw; 970 int kpage_idx[2]; 971 uint32_t *kpage[2]; 972 uint32_t *kdata; 973 void __user *user_ptr; 974 int last_copied_page; 975 int last_page_index; 976 }; 977 978 struct radeon_cs_parser { 979 struct device *dev; 980 struct radeon_device *rdev; 981 struct drm_file *filp; 982 /* chunks */ 983 unsigned nchunks; 984 struct radeon_cs_chunk *chunks; 985 uint64_t *chunks_array; 986 /* IB */ 987 unsigned idx; 988 /* relocations */ 989 unsigned nrelocs; 990 struct radeon_cs_reloc *relocs; 991 struct radeon_cs_reloc **relocs_ptr; 992 struct list_head validated; 993 unsigned dma_reloc_idx; 994 /* indices of various chunks */ 995 int chunk_ib_idx; 996 int chunk_relocs_idx; 997 int chunk_flags_idx; 998 int chunk_const_ib_idx; 999 struct radeon_ib ib; 1000 struct radeon_ib const_ib; 1001 void *track; 1002 unsigned family; 1003 int parser_error; 1004 u32 cs_flags; 1005 u32 ring; 1006 s32 priority; 1007 struct ww_acquire_ctx ticket; 1008 }; 1009 1010 extern int radeon_cs_finish_pages(struct radeon_cs_parser *p); 1011 extern u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx); 1012 1013 struct radeon_cs_packet { 1014 unsigned idx; 1015 unsigned type; 1016 unsigned reg; 1017 unsigned opcode; 1018 int count; 1019 unsigned one_reg_wr; 1020 }; 1021 1022 typedef int (*radeon_packet0_check_t)(struct radeon_cs_parser *p, 1023 struct radeon_cs_packet *pkt, 1024 unsigned idx, unsigned reg); 1025 typedef int (*radeon_packet3_check_t)(struct radeon_cs_parser *p, 1026 struct radeon_cs_packet *pkt); 1027 1028 1029 /* 1030 * AGP 1031 */ 1032 int radeon_agp_init(struct radeon_device *rdev); 1033 void radeon_agp_resume(struct radeon_device *rdev); 1034 void radeon_agp_suspend(struct radeon_device *rdev); 1035 void radeon_agp_fini(struct radeon_device *rdev); 1036 1037 1038 /* 1039 * Writeback 1040 */ 1041 struct radeon_wb { 1042 struct radeon_bo *wb_obj; 1043 volatile uint32_t *wb; 1044 uint64_t gpu_addr; 1045 bool enabled; 1046 bool use_event; 1047 }; 1048 1049 #define RADEON_WB_SCRATCH_OFFSET 0 1050 #define RADEON_WB_RING0_NEXT_RPTR 256 1051 #define RADEON_WB_CP_RPTR_OFFSET 1024 1052 #define RADEON_WB_CP1_RPTR_OFFSET 1280 1053 #define RADEON_WB_CP2_RPTR_OFFSET 1536 1054 #define R600_WB_DMA_RPTR_OFFSET 1792 1055 #define R600_WB_IH_WPTR_OFFSET 2048 1056 #define CAYMAN_WB_DMA1_RPTR_OFFSET 2304 1057 #define R600_WB_EVENT_OFFSET 3072 1058 #define CIK_WB_CP1_WPTR_OFFSET 3328 1059 #define CIK_WB_CP2_WPTR_OFFSET 3584 1060 1061 /** 1062 * struct radeon_pm - power management datas 1063 * @max_bandwidth: maximum bandwidth the gpu has (MByte/s) 1064 * @igp_sideport_mclk: sideport memory clock Mhz (rs690,rs740,rs780,rs880) 1065 * @igp_system_mclk: system clock Mhz (rs690,rs740,rs780,rs880) 1066 * @igp_ht_link_clk: ht link clock Mhz (rs690,rs740,rs780,rs880) 1067 * @igp_ht_link_width: ht link width in bits (rs690,rs740,rs780,rs880) 1068 * @k8_bandwidth: k8 bandwidth the gpu has (MByte/s) (IGP) 1069 * @sideport_bandwidth: sideport bandwidth the gpu has (MByte/s) (IGP) 1070 * @ht_bandwidth: ht bandwidth the gpu has (MByte/s) (IGP) 1071 * @core_bandwidth: core GPU bandwidth the gpu has (MByte/s) (IGP) 1072 * @sclk: GPU clock Mhz (core bandwidth depends of this clock) 1073 * @needed_bandwidth: current bandwidth needs 1074 * 1075 * It keeps track of various data needed to take powermanagement decision. 1076 * Bandwidth need is used to determine minimun clock of the GPU and memory. 1077 * Equation between gpu/memory clock and available bandwidth is hw dependent 1078 * (type of memory, bus size, efficiency, ...) 1079 */ 1080 1081 enum radeon_pm_method { 1082 PM_METHOD_PROFILE, 1083 PM_METHOD_DYNPM, 1084 PM_METHOD_DPM, 1085 }; 1086 1087 enum radeon_dynpm_state { 1088 DYNPM_STATE_DISABLED, 1089 DYNPM_STATE_MINIMUM, 1090 DYNPM_STATE_PAUSED, 1091 DYNPM_STATE_ACTIVE, 1092 DYNPM_STATE_SUSPENDED, 1093 }; 1094 enum radeon_dynpm_action { 1095 DYNPM_ACTION_NONE, 1096 DYNPM_ACTION_MINIMUM, 1097 DYNPM_ACTION_DOWNCLOCK, 1098 DYNPM_ACTION_UPCLOCK, 1099 DYNPM_ACTION_DEFAULT 1100 }; 1101 1102 enum radeon_voltage_type { 1103 VOLTAGE_NONE = 0, 1104 VOLTAGE_GPIO, 1105 VOLTAGE_VDDC, 1106 VOLTAGE_SW 1107 }; 1108 1109 enum radeon_pm_state_type { 1110 /* not used for dpm */ 1111 POWER_STATE_TYPE_DEFAULT, 1112 POWER_STATE_TYPE_POWERSAVE, 1113 /* user selectable states */ 1114 POWER_STATE_TYPE_BATTERY, 1115 POWER_STATE_TYPE_BALANCED, 1116 POWER_STATE_TYPE_PERFORMANCE, 1117 /* internal states */ 1118 POWER_STATE_TYPE_INTERNAL_UVD, 1119 POWER_STATE_TYPE_INTERNAL_UVD_SD, 1120 POWER_STATE_TYPE_INTERNAL_UVD_HD, 1121 POWER_STATE_TYPE_INTERNAL_UVD_HD2, 1122 POWER_STATE_TYPE_INTERNAL_UVD_MVC, 1123 POWER_STATE_TYPE_INTERNAL_BOOT, 1124 POWER_STATE_TYPE_INTERNAL_THERMAL, 1125 POWER_STATE_TYPE_INTERNAL_ACPI, 1126 POWER_STATE_TYPE_INTERNAL_ULV, 1127 POWER_STATE_TYPE_INTERNAL_3DPERF, 1128 }; 1129 1130 enum radeon_pm_profile_type { 1131 PM_PROFILE_DEFAULT, 1132 PM_PROFILE_AUTO, 1133 PM_PROFILE_LOW, 1134 PM_PROFILE_MID, 1135 PM_PROFILE_HIGH, 1136 }; 1137 1138 #define PM_PROFILE_DEFAULT_IDX 0 1139 #define PM_PROFILE_LOW_SH_IDX 1 1140 #define PM_PROFILE_MID_SH_IDX 2 1141 #define PM_PROFILE_HIGH_SH_IDX 3 1142 #define PM_PROFILE_LOW_MH_IDX 4 1143 #define PM_PROFILE_MID_MH_IDX 5 1144 #define PM_PROFILE_HIGH_MH_IDX 6 1145 #define PM_PROFILE_MAX 7 1146 1147 struct radeon_pm_profile { 1148 int dpms_off_ps_idx; 1149 int dpms_on_ps_idx; 1150 int dpms_off_cm_idx; 1151 int dpms_on_cm_idx; 1152 }; 1153 1154 enum radeon_int_thermal_type { 1155 THERMAL_TYPE_NONE, 1156 THERMAL_TYPE_EXTERNAL, 1157 THERMAL_TYPE_EXTERNAL_GPIO, 1158 THERMAL_TYPE_RV6XX, 1159 THERMAL_TYPE_RV770, 1160 THERMAL_TYPE_ADT7473_WITH_INTERNAL, 1161 THERMAL_TYPE_EVERGREEN, 1162 THERMAL_TYPE_SUMO, 1163 THERMAL_TYPE_NI, 1164 THERMAL_TYPE_SI, 1165 THERMAL_TYPE_EMC2103_WITH_INTERNAL, 1166 THERMAL_TYPE_CI, 1167 THERMAL_TYPE_KV, 1168 }; 1169 1170 struct radeon_voltage { 1171 enum radeon_voltage_type type; 1172 /* gpio voltage */ 1173 struct radeon_gpio_rec gpio; 1174 u32 delay; /* delay in usec from voltage drop to sclk change */ 1175 bool active_high; /* voltage drop is active when bit is high */ 1176 /* VDDC voltage */ 1177 u8 vddc_id; /* index into vddc voltage table */ 1178 u8 vddci_id; /* index into vddci voltage table */ 1179 bool vddci_enabled; 1180 /* r6xx+ sw */ 1181 u16 voltage; 1182 /* evergreen+ vddci */ 1183 u16 vddci; 1184 }; 1185 1186 /* clock mode flags */ 1187 #define RADEON_PM_MODE_NO_DISPLAY (1 << 0) 1188 1189 struct radeon_pm_clock_info { 1190 /* memory clock */ 1191 u32 mclk; 1192 /* engine clock */ 1193 u32 sclk; 1194 /* voltage info */ 1195 struct radeon_voltage voltage; 1196 /* standardized clock flags */ 1197 u32 flags; 1198 }; 1199 1200 /* state flags */ 1201 #define RADEON_PM_STATE_SINGLE_DISPLAY_ONLY (1 << 0) 1202 1203 struct radeon_power_state { 1204 enum radeon_pm_state_type type; 1205 struct radeon_pm_clock_info *clock_info; 1206 /* number of valid clock modes in this power state */ 1207 int num_clock_modes; 1208 struct radeon_pm_clock_info *default_clock_mode; 1209 /* standardized state flags */ 1210 u32 flags; 1211 u32 misc; /* vbios specific flags */ 1212 u32 misc2; /* vbios specific flags */ 1213 int pcie_lanes; /* pcie lanes */ 1214 }; 1215 1216 /* 1217 * Some modes are overclocked by very low value, accept them 1218 */ 1219 #define RADEON_MODE_OVERCLOCK_MARGIN 500 /* 5 MHz */ 1220 1221 enum radeon_dpm_auto_throttle_src { 1222 RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, 1223 RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL 1224 }; 1225 1226 enum radeon_dpm_event_src { 1227 RADEON_DPM_EVENT_SRC_ANALOG = 0, 1228 RADEON_DPM_EVENT_SRC_EXTERNAL = 1, 1229 RADEON_DPM_EVENT_SRC_DIGITAL = 2, 1230 RADEON_DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3, 1231 RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4 1232 }; 1233 1234 struct radeon_ps { 1235 u32 caps; /* vbios flags */ 1236 u32 class; /* vbios flags */ 1237 u32 class2; /* vbios flags */ 1238 /* UVD clocks */ 1239 u32 vclk; 1240 u32 dclk; 1241 /* VCE clocks */ 1242 u32 evclk; 1243 u32 ecclk; 1244 /* asic priv */ 1245 void *ps_priv; 1246 }; 1247 1248 struct radeon_dpm_thermal { 1249 /* thermal interrupt work */ 1250 struct work_struct work; 1251 /* low temperature threshold */ 1252 int min_temp; 1253 /* high temperature threshold */ 1254 int max_temp; 1255 /* was interrupt low to high or high to low */ 1256 bool high_to_low; 1257 }; 1258 1259 enum radeon_clk_action 1260 { 1261 RADEON_SCLK_UP = 1, 1262 RADEON_SCLK_DOWN 1263 }; 1264 1265 struct radeon_blacklist_clocks 1266 { 1267 u32 sclk; 1268 u32 mclk; 1269 enum radeon_clk_action action; 1270 }; 1271 1272 struct radeon_clock_and_voltage_limits { 1273 u32 sclk; 1274 u32 mclk; 1275 u32 vddc; 1276 u32 vddci; 1277 }; 1278 1279 struct radeon_clock_array { 1280 u32 count; 1281 u32 *values; 1282 }; 1283 1284 struct radeon_clock_voltage_dependency_entry { 1285 u32 clk; 1286 u16 v; 1287 }; 1288 1289 struct radeon_clock_voltage_dependency_table { 1290 u32 count; 1291 struct radeon_clock_voltage_dependency_entry *entries; 1292 }; 1293 1294 union radeon_cac_leakage_entry { 1295 struct { 1296 u16 vddc; 1297 u32 leakage; 1298 }; 1299 struct { 1300 u16 vddc1; 1301 u16 vddc2; 1302 u16 vddc3; 1303 }; 1304 }; 1305 1306 struct radeon_cac_leakage_table { 1307 u32 count; 1308 union radeon_cac_leakage_entry *entries; 1309 }; 1310 1311 struct radeon_phase_shedding_limits_entry { 1312 u16 voltage; 1313 u32 sclk; 1314 u32 mclk; 1315 }; 1316 1317 struct radeon_phase_shedding_limits_table { 1318 u32 count; 1319 struct radeon_phase_shedding_limits_entry *entries; 1320 }; 1321 1322 struct radeon_uvd_clock_voltage_dependency_entry { 1323 u32 vclk; 1324 u32 dclk; 1325 u16 v; 1326 }; 1327 1328 struct radeon_uvd_clock_voltage_dependency_table { 1329 u8 count; 1330 struct radeon_uvd_clock_voltage_dependency_entry *entries; 1331 }; 1332 1333 struct radeon_vce_clock_voltage_dependency_entry { 1334 u32 ecclk; 1335 u32 evclk; 1336 u16 v; 1337 }; 1338 1339 struct radeon_vce_clock_voltage_dependency_table { 1340 u8 count; 1341 struct radeon_vce_clock_voltage_dependency_entry *entries; 1342 }; 1343 1344 struct radeon_ppm_table { 1345 u8 ppm_design; 1346 u16 cpu_core_number; 1347 u32 platform_tdp; 1348 u32 small_ac_platform_tdp; 1349 u32 platform_tdc; 1350 u32 small_ac_platform_tdc; 1351 u32 apu_tdp; 1352 u32 dgpu_tdp; 1353 u32 dgpu_ulv_power; 1354 u32 tj_max; 1355 }; 1356 1357 struct radeon_cac_tdp_table { 1358 u16 tdp; 1359 u16 configurable_tdp; 1360 u16 tdc; 1361 u16 battery_power_limit; 1362 u16 small_power_limit; 1363 u16 low_cac_leakage; 1364 u16 high_cac_leakage; 1365 u16 maximum_power_delivery_limit; 1366 }; 1367 1368 struct radeon_dpm_dynamic_state { 1369 struct radeon_clock_voltage_dependency_table vddc_dependency_on_sclk; 1370 struct radeon_clock_voltage_dependency_table vddci_dependency_on_mclk; 1371 struct radeon_clock_voltage_dependency_table vddc_dependency_on_mclk; 1372 struct radeon_clock_voltage_dependency_table mvdd_dependency_on_mclk; 1373 struct radeon_clock_voltage_dependency_table vddc_dependency_on_dispclk; 1374 struct radeon_uvd_clock_voltage_dependency_table uvd_clock_voltage_dependency_table; 1375 struct radeon_vce_clock_voltage_dependency_table vce_clock_voltage_dependency_table; 1376 struct radeon_clock_voltage_dependency_table samu_clock_voltage_dependency_table; 1377 struct radeon_clock_voltage_dependency_table acp_clock_voltage_dependency_table; 1378 struct radeon_clock_array valid_sclk_values; 1379 struct radeon_clock_array valid_mclk_values; 1380 struct radeon_clock_and_voltage_limits max_clock_voltage_on_dc; 1381 struct radeon_clock_and_voltage_limits max_clock_voltage_on_ac; 1382 u32 mclk_sclk_ratio; 1383 u32 sclk_mclk_delta; 1384 u16 vddc_vddci_delta; 1385 u16 min_vddc_for_pcie_gen2; 1386 struct radeon_cac_leakage_table cac_leakage_table; 1387 struct radeon_phase_shedding_limits_table phase_shedding_limits_table; 1388 struct radeon_ppm_table *ppm_table; 1389 struct radeon_cac_tdp_table *cac_tdp_table; 1390 }; 1391 1392 struct radeon_dpm_fan { 1393 u16 t_min; 1394 u16 t_med; 1395 u16 t_high; 1396 u16 pwm_min; 1397 u16 pwm_med; 1398 u16 pwm_high; 1399 u8 t_hyst; 1400 u32 cycle_delay; 1401 u16 t_max; 1402 bool ucode_fan_control; 1403 }; 1404 1405 enum radeon_pcie_gen { 1406 RADEON_PCIE_GEN1 = 0, 1407 RADEON_PCIE_GEN2 = 1, 1408 RADEON_PCIE_GEN3 = 2, 1409 RADEON_PCIE_GEN_INVALID = 0xffff 1410 }; 1411 1412 enum radeon_dpm_forced_level { 1413 RADEON_DPM_FORCED_LEVEL_AUTO = 0, 1414 RADEON_DPM_FORCED_LEVEL_LOW = 1, 1415 RADEON_DPM_FORCED_LEVEL_HIGH = 2, 1416 }; 1417 1418 struct radeon_dpm { 1419 struct radeon_ps *ps; 1420 /* number of valid power states */ 1421 int num_ps; 1422 /* current power state that is active */ 1423 struct radeon_ps *current_ps; 1424 /* requested power state */ 1425 struct radeon_ps *requested_ps; 1426 /* boot up power state */ 1427 struct radeon_ps *boot_ps; 1428 /* default uvd power state */ 1429 struct radeon_ps *uvd_ps; 1430 enum radeon_pm_state_type state; 1431 enum radeon_pm_state_type user_state; 1432 u32 platform_caps; 1433 u32 voltage_response_time; 1434 u32 backbias_response_time; 1435 void *priv; 1436 u32 new_active_crtcs; 1437 int new_active_crtc_count; 1438 u32 current_active_crtcs; 1439 int current_active_crtc_count; 1440 struct radeon_dpm_dynamic_state dyn_state; 1441 struct radeon_dpm_fan fan; 1442 u32 tdp_limit; 1443 u32 near_tdp_limit; 1444 u32 near_tdp_limit_adjusted; 1445 u32 sq_ramping_threshold; 1446 u32 cac_leakage; 1447 u16 tdp_od_limit; 1448 u32 tdp_adjustment; 1449 u16 load_line_slope; 1450 bool power_control; 1451 bool ac_power; 1452 /* special states active */ 1453 bool thermal_active; 1454 bool uvd_active; 1455 /* thermal handling */ 1456 struct radeon_dpm_thermal thermal; 1457 /* forced levels */ 1458 enum radeon_dpm_forced_level forced_level; 1459 /* track UVD streams */ 1460 unsigned sd; 1461 unsigned hd; 1462 }; 1463 1464 void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable); 1465 1466 struct radeon_pm { 1467 struct mutex mutex; 1468 /* write locked while reprogramming mclk */ 1469 struct rw_semaphore mclk_lock; 1470 u32 active_crtcs; 1471 int active_crtc_count; 1472 int req_vblank; 1473 bool vblank_sync; 1474 fixed20_12 max_bandwidth; 1475 fixed20_12 igp_sideport_mclk; 1476 fixed20_12 igp_system_mclk; 1477 fixed20_12 igp_ht_link_clk; 1478 fixed20_12 igp_ht_link_width; 1479 fixed20_12 k8_bandwidth; 1480 fixed20_12 sideport_bandwidth; 1481 fixed20_12 ht_bandwidth; 1482 fixed20_12 core_bandwidth; 1483 fixed20_12 sclk; 1484 fixed20_12 mclk; 1485 fixed20_12 needed_bandwidth; 1486 struct radeon_power_state *power_state; 1487 /* number of valid power states */ 1488 int num_power_states; 1489 int current_power_state_index; 1490 int current_clock_mode_index; 1491 int requested_power_state_index; 1492 int requested_clock_mode_index; 1493 int default_power_state_index; 1494 u32 current_sclk; 1495 u32 current_mclk; 1496 u16 current_vddc; 1497 u16 current_vddci; 1498 u32 default_sclk; 1499 u32 default_mclk; 1500 u16 default_vddc; 1501 u16 default_vddci; 1502 struct radeon_i2c_chan *i2c_bus; 1503 /* selected pm method */ 1504 enum radeon_pm_method pm_method; 1505 /* dynpm power management */ 1506 struct delayed_work dynpm_idle_work; 1507 enum radeon_dynpm_state dynpm_state; 1508 enum radeon_dynpm_action dynpm_planned_action; 1509 unsigned long dynpm_action_timeout; 1510 bool dynpm_can_upclock; 1511 bool dynpm_can_downclock; 1512 /* profile-based power management */ 1513 enum radeon_pm_profile_type profile; 1514 int profile_index; 1515 struct radeon_pm_profile profiles[PM_PROFILE_MAX]; 1516 /* internal thermal controller on rv6xx+ */ 1517 enum radeon_int_thermal_type int_thermal_type; 1518 struct device *int_hwmon_dev; 1519 /* dpm */ 1520 bool dpm_enabled; 1521 struct radeon_dpm dpm; 1522 }; 1523 1524 int radeon_pm_get_type_index(struct radeon_device *rdev, 1525 enum radeon_pm_state_type ps_type, 1526 int instance); 1527 /* 1528 * UVD 1529 */ 1530 #define RADEON_MAX_UVD_HANDLES 10 1531 #define RADEON_UVD_STACK_SIZE (1024*1024) 1532 #define RADEON_UVD_HEAP_SIZE (1024*1024) 1533 1534 struct radeon_uvd { 1535 struct radeon_bo *vcpu_bo; 1536 void *cpu_addr; 1537 uint64_t gpu_addr; 1538 void *saved_bo; 1539 atomic_t handles[RADEON_MAX_UVD_HANDLES]; 1540 struct drm_file *filp[RADEON_MAX_UVD_HANDLES]; 1541 unsigned img_size[RADEON_MAX_UVD_HANDLES]; 1542 struct delayed_work idle_work; 1543 }; 1544 1545 int radeon_uvd_init(struct radeon_device *rdev); 1546 void radeon_uvd_fini(struct radeon_device *rdev); 1547 int radeon_uvd_suspend(struct radeon_device *rdev); 1548 int radeon_uvd_resume(struct radeon_device *rdev); 1549 int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring, 1550 uint32_t handle, struct radeon_fence **fence); 1551 int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring, 1552 uint32_t handle, struct radeon_fence **fence); 1553 void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo); 1554 void radeon_uvd_free_handles(struct radeon_device *rdev, 1555 struct drm_file *filp); 1556 int radeon_uvd_cs_parse(struct radeon_cs_parser *parser); 1557 void radeon_uvd_note_usage(struct radeon_device *rdev); 1558 int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev, 1559 unsigned vclk, unsigned dclk, 1560 unsigned vco_min, unsigned vco_max, 1561 unsigned fb_factor, unsigned fb_mask, 1562 unsigned pd_min, unsigned pd_max, 1563 unsigned pd_even, 1564 unsigned *optimal_fb_div, 1565 unsigned *optimal_vclk_div, 1566 unsigned *optimal_dclk_div); 1567 int radeon_uvd_send_upll_ctlreq(struct radeon_device *rdev, 1568 unsigned cg_upll_func_cntl); 1569 1570 struct r600_audio_pin { 1571 int channels; 1572 int rate; 1573 int bits_per_sample; 1574 u8 status_bits; 1575 u8 category_code; 1576 u32 offset; 1577 bool connected; 1578 u32 id; 1579 }; 1580 1581 struct r600_audio { 1582 bool enabled; 1583 struct r600_audio_pin pin[RADEON_MAX_AFMT_BLOCKS]; 1584 int num_pins; 1585 }; 1586 1587 /* 1588 * Benchmarking 1589 */ 1590 void radeon_benchmark(struct radeon_device *rdev, int test_number); 1591 1592 1593 /* 1594 * Testing 1595 */ 1596 void radeon_test_moves(struct radeon_device *rdev); 1597 void radeon_test_ring_sync(struct radeon_device *rdev, 1598 struct radeon_ring *cpA, 1599 struct radeon_ring *cpB); 1600 void radeon_test_syncing(struct radeon_device *rdev); 1601 1602 1603 /* 1604 * Debugfs 1605 */ 1606 struct radeon_debugfs { 1607 struct drm_info_list *files; 1608 unsigned num_files; 1609 }; 1610 1611 int radeon_debugfs_add_files(struct radeon_device *rdev, 1612 struct drm_info_list *files, 1613 unsigned nfiles); 1614 int radeon_debugfs_fence_init(struct radeon_device *rdev); 1615 1616 /* 1617 * ASIC ring specific functions. 1618 */ 1619 struct radeon_asic_ring { 1620 /* ring read/write ptr handling */ 1621 u32 (*get_rptr)(struct radeon_device *rdev, struct radeon_ring *ring); 1622 u32 (*get_wptr)(struct radeon_device *rdev, struct radeon_ring *ring); 1623 void (*set_wptr)(struct radeon_device *rdev, struct radeon_ring *ring); 1624 1625 /* validating and patching of IBs */ 1626 int (*ib_parse)(struct radeon_device *rdev, struct radeon_ib *ib); 1627 int (*cs_parse)(struct radeon_cs_parser *p); 1628 1629 /* command emmit functions */ 1630 void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib); 1631 void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence); 1632 void (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp, 1633 struct radeon_semaphore *semaphore, bool emit_wait); 1634 void (*vm_flush)(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); 1635 1636 /* testing functions */ 1637 int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp); 1638 int (*ib_test)(struct radeon_device *rdev, struct radeon_ring *cp); 1639 bool (*is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp); 1640 1641 /* deprecated */ 1642 void (*ring_start)(struct radeon_device *rdev, struct radeon_ring *cp); 1643 }; 1644 1645 /* 1646 * ASIC specific functions. 1647 */ 1648 struct radeon_asic { 1649 int (*init)(struct radeon_device *rdev); 1650 void (*fini)(struct radeon_device *rdev); 1651 int (*resume)(struct radeon_device *rdev); 1652 int (*suspend)(struct radeon_device *rdev); 1653 void (*vga_set_state)(struct radeon_device *rdev, bool state); 1654 int (*asic_reset)(struct radeon_device *rdev); 1655 /* ioctl hw specific callback. Some hw might want to perform special 1656 * operation on specific ioctl. For instance on wait idle some hw 1657 * might want to perform and HDP flush through MMIO as it seems that 1658 * some R6XX/R7XX hw doesn't take HDP flush into account if programmed 1659 * through ring. 1660 */ 1661 void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo); 1662 /* check if 3D engine is idle */ 1663 bool (*gui_idle)(struct radeon_device *rdev); 1664 /* wait for mc_idle */ 1665 int (*mc_wait_for_idle)(struct radeon_device *rdev); 1666 /* get the reference clock */ 1667 u32 (*get_xclk)(struct radeon_device *rdev); 1668 /* get the gpu clock counter */ 1669 uint64_t (*get_gpu_clock_counter)(struct radeon_device *rdev); 1670 /* gart */ 1671 struct { 1672 void (*tlb_flush)(struct radeon_device *rdev); 1673 int (*set_page)(struct radeon_device *rdev, int i, uint64_t addr); 1674 } gart; 1675 struct { 1676 int (*init)(struct radeon_device *rdev); 1677 void (*fini)(struct radeon_device *rdev); 1678 1679 u32 pt_ring_index; 1680 void (*set_page)(struct radeon_device *rdev, 1681 struct radeon_ib *ib, 1682 uint64_t pe, 1683 uint64_t addr, unsigned count, 1684 uint32_t incr, uint32_t flags); 1685 } vm; 1686 /* ring specific callbacks */ 1687 struct radeon_asic_ring *ring[RADEON_NUM_RINGS]; 1688 /* irqs */ 1689 struct { 1690 int (*set)(struct radeon_device *rdev); 1691 int (*process)(struct radeon_device *rdev); 1692 } irq; 1693 /* displays */ 1694 struct { 1695 /* display watermarks */ 1696 void (*bandwidth_update)(struct radeon_device *rdev); 1697 /* get frame count */ 1698 u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc); 1699 /* wait for vblank */ 1700 void (*wait_for_vblank)(struct radeon_device *rdev, int crtc); 1701 /* set backlight level */ 1702 void (*set_backlight_level)(struct radeon_encoder *radeon_encoder, u8 level); 1703 /* get backlight level */ 1704 u8 (*get_backlight_level)(struct radeon_encoder *radeon_encoder); 1705 /* audio callbacks */ 1706 void (*hdmi_enable)(struct drm_encoder *encoder, bool enable); 1707 void (*hdmi_setmode)(struct drm_encoder *encoder, struct drm_display_mode *mode); 1708 } display; 1709 /* copy functions for bo handling */ 1710 struct { 1711 int (*blit)(struct radeon_device *rdev, 1712 uint64_t src_offset, 1713 uint64_t dst_offset, 1714 unsigned num_gpu_pages, 1715 struct radeon_fence **fence); 1716 u32 blit_ring_index; 1717 int (*dma)(struct radeon_device *rdev, 1718 uint64_t src_offset, 1719 uint64_t dst_offset, 1720 unsigned num_gpu_pages, 1721 struct radeon_fence **fence); 1722 u32 dma_ring_index; 1723 /* method used for bo copy */ 1724 int (*copy)(struct radeon_device *rdev, 1725 uint64_t src_offset, 1726 uint64_t dst_offset, 1727 unsigned num_gpu_pages, 1728 struct radeon_fence **fence); 1729 /* ring used for bo copies */ 1730 u32 copy_ring_index; 1731 } copy; 1732 /* surfaces */ 1733 struct { 1734 int (*set_reg)(struct radeon_device *rdev, int reg, 1735 uint32_t tiling_flags, uint32_t pitch, 1736 uint32_t offset, uint32_t obj_size); 1737 void (*clear_reg)(struct radeon_device *rdev, int reg); 1738 } surface; 1739 /* hotplug detect */ 1740 struct { 1741 void (*init)(struct radeon_device *rdev); 1742 void (*fini)(struct radeon_device *rdev); 1743 bool (*sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd); 1744 void (*set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd); 1745 } hpd; 1746 /* static power management */ 1747 struct { 1748 void (*misc)(struct radeon_device *rdev); 1749 void (*prepare)(struct radeon_device *rdev); 1750 void (*finish)(struct radeon_device *rdev); 1751 void (*init_profile)(struct radeon_device *rdev); 1752 void (*get_dynpm_state)(struct radeon_device *rdev); 1753 uint32_t (*get_engine_clock)(struct radeon_device *rdev); 1754 void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock); 1755 uint32_t (*get_memory_clock)(struct radeon_device *rdev); 1756 void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock); 1757 int (*get_pcie_lanes)(struct radeon_device *rdev); 1758 void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes); 1759 void (*set_clock_gating)(struct radeon_device *rdev, int enable); 1760 int (*set_uvd_clocks)(struct radeon_device *rdev, u32 vclk, u32 dclk); 1761 int (*get_temperature)(struct radeon_device *rdev); 1762 } pm; 1763 /* dynamic power management */ 1764 struct { 1765 int (*init)(struct radeon_device *rdev); 1766 void (*setup_asic)(struct radeon_device *rdev); 1767 int (*enable)(struct radeon_device *rdev); 1768 void (*disable)(struct radeon_device *rdev); 1769 int (*pre_set_power_state)(struct radeon_device *rdev); 1770 int (*set_power_state)(struct radeon_device *rdev); 1771 void (*post_set_power_state)(struct radeon_device *rdev); 1772 void (*display_configuration_changed)(struct radeon_device *rdev); 1773 void (*fini)(struct radeon_device *rdev); 1774 u32 (*get_sclk)(struct radeon_device *rdev, bool low); 1775 u32 (*get_mclk)(struct radeon_device *rdev, bool low); 1776 void (*print_power_state)(struct radeon_device *rdev, struct radeon_ps *ps); 1777 void (*debugfs_print_current_performance_level)(struct radeon_device *rdev, struct seq_file *m); 1778 int (*force_performance_level)(struct radeon_device *rdev, enum radeon_dpm_forced_level level); 1779 bool (*vblank_too_short)(struct radeon_device *rdev); 1780 void (*powergate_uvd)(struct radeon_device *rdev, bool gate); 1781 } dpm; 1782 /* pageflipping */ 1783 struct { 1784 void (*pre_page_flip)(struct radeon_device *rdev, int crtc); 1785 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base); 1786 void (*post_page_flip)(struct radeon_device *rdev, int crtc); 1787 } pflip; 1788 }; 1789 1790 /* 1791 * Asic structures 1792 */ 1793 struct r100_asic { 1794 const unsigned *reg_safe_bm; 1795 unsigned reg_safe_bm_size; 1796 u32 hdp_cntl; 1797 }; 1798 1799 struct r300_asic { 1800 const unsigned *reg_safe_bm; 1801 unsigned reg_safe_bm_size; 1802 u32 resync_scratch; 1803 u32 hdp_cntl; 1804 }; 1805 1806 struct r600_asic { 1807 unsigned max_pipes; 1808 unsigned max_tile_pipes; 1809 unsigned max_simds; 1810 unsigned max_backends; 1811 unsigned max_gprs; 1812 unsigned max_threads; 1813 unsigned max_stack_entries; 1814 unsigned max_hw_contexts; 1815 unsigned max_gs_threads; 1816 unsigned sx_max_export_size; 1817 unsigned sx_max_export_pos_size; 1818 unsigned sx_max_export_smx_size; 1819 unsigned sq_num_cf_insts; 1820 unsigned tiling_nbanks; 1821 unsigned tiling_npipes; 1822 unsigned tiling_group_size; 1823 unsigned tile_config; 1824 unsigned backend_map; 1825 }; 1826 1827 struct rv770_asic { 1828 unsigned max_pipes; 1829 unsigned max_tile_pipes; 1830 unsigned max_simds; 1831 unsigned max_backends; 1832 unsigned max_gprs; 1833 unsigned max_threads; 1834 unsigned max_stack_entries; 1835 unsigned max_hw_contexts; 1836 unsigned max_gs_threads; 1837 unsigned sx_max_export_size; 1838 unsigned sx_max_export_pos_size; 1839 unsigned sx_max_export_smx_size; 1840 unsigned sq_num_cf_insts; 1841 unsigned sx_num_of_sets; 1842 unsigned sc_prim_fifo_size; 1843 unsigned sc_hiz_tile_fifo_size; 1844 unsigned sc_earlyz_tile_fifo_fize; 1845 unsigned tiling_nbanks; 1846 unsigned tiling_npipes; 1847 unsigned tiling_group_size; 1848 unsigned tile_config; 1849 unsigned backend_map; 1850 }; 1851 1852 struct evergreen_asic { 1853 unsigned num_ses; 1854 unsigned max_pipes; 1855 unsigned max_tile_pipes; 1856 unsigned max_simds; 1857 unsigned max_backends; 1858 unsigned max_gprs; 1859 unsigned max_threads; 1860 unsigned max_stack_entries; 1861 unsigned max_hw_contexts; 1862 unsigned max_gs_threads; 1863 unsigned sx_max_export_size; 1864 unsigned sx_max_export_pos_size; 1865 unsigned sx_max_export_smx_size; 1866 unsigned sq_num_cf_insts; 1867 unsigned sx_num_of_sets; 1868 unsigned sc_prim_fifo_size; 1869 unsigned sc_hiz_tile_fifo_size; 1870 unsigned sc_earlyz_tile_fifo_size; 1871 unsigned tiling_nbanks; 1872 unsigned tiling_npipes; 1873 unsigned tiling_group_size; 1874 unsigned tile_config; 1875 unsigned backend_map; 1876 }; 1877 1878 struct cayman_asic { 1879 unsigned max_shader_engines; 1880 unsigned max_pipes_per_simd; 1881 unsigned max_tile_pipes; 1882 unsigned max_simds_per_se; 1883 unsigned max_backends_per_se; 1884 unsigned max_texture_channel_caches; 1885 unsigned max_gprs; 1886 unsigned max_threads; 1887 unsigned max_gs_threads; 1888 unsigned max_stack_entries; 1889 unsigned sx_num_of_sets; 1890 unsigned sx_max_export_size; 1891 unsigned sx_max_export_pos_size; 1892 unsigned sx_max_export_smx_size; 1893 unsigned max_hw_contexts; 1894 unsigned sq_num_cf_insts; 1895 unsigned sc_prim_fifo_size; 1896 unsigned sc_hiz_tile_fifo_size; 1897 unsigned sc_earlyz_tile_fifo_size; 1898 1899 unsigned num_shader_engines; 1900 unsigned num_shader_pipes_per_simd; 1901 unsigned num_tile_pipes; 1902 unsigned num_simds_per_se; 1903 unsigned num_backends_per_se; 1904 unsigned backend_disable_mask_per_asic; 1905 unsigned backend_map; 1906 unsigned num_texture_channel_caches; 1907 unsigned mem_max_burst_length_bytes; 1908 unsigned mem_row_size_in_kb; 1909 unsigned shader_engine_tile_size; 1910 unsigned num_gpus; 1911 unsigned multi_gpu_tile_size; 1912 1913 unsigned tile_config; 1914 }; 1915 1916 struct si_asic { 1917 unsigned max_shader_engines; 1918 unsigned max_tile_pipes; 1919 unsigned max_cu_per_sh; 1920 unsigned max_sh_per_se; 1921 unsigned max_backends_per_se; 1922 unsigned max_texture_channel_caches; 1923 unsigned max_gprs; 1924 unsigned max_gs_threads; 1925 unsigned max_hw_contexts; 1926 unsigned sc_prim_fifo_size_frontend; 1927 unsigned sc_prim_fifo_size_backend; 1928 unsigned sc_hiz_tile_fifo_size; 1929 unsigned sc_earlyz_tile_fifo_size; 1930 1931 unsigned num_tile_pipes; 1932 unsigned num_backends_per_se; 1933 unsigned backend_disable_mask_per_asic; 1934 unsigned backend_map; 1935 unsigned num_texture_channel_caches; 1936 unsigned mem_max_burst_length_bytes; 1937 unsigned mem_row_size_in_kb; 1938 unsigned shader_engine_tile_size; 1939 unsigned num_gpus; 1940 unsigned multi_gpu_tile_size; 1941 1942 unsigned tile_config; 1943 uint32_t tile_mode_array[32]; 1944 }; 1945 1946 struct cik_asic { 1947 unsigned max_shader_engines; 1948 unsigned max_tile_pipes; 1949 unsigned max_cu_per_sh; 1950 unsigned max_sh_per_se; 1951 unsigned max_backends_per_se; 1952 unsigned max_texture_channel_caches; 1953 unsigned max_gprs; 1954 unsigned max_gs_threads; 1955 unsigned max_hw_contexts; 1956 unsigned sc_prim_fifo_size_frontend; 1957 unsigned sc_prim_fifo_size_backend; 1958 unsigned sc_hiz_tile_fifo_size; 1959 unsigned sc_earlyz_tile_fifo_size; 1960 1961 unsigned num_tile_pipes; 1962 unsigned num_backends_per_se; 1963 unsigned backend_disable_mask_per_asic; 1964 unsigned backend_map; 1965 unsigned num_texture_channel_caches; 1966 unsigned mem_max_burst_length_bytes; 1967 unsigned mem_row_size_in_kb; 1968 unsigned shader_engine_tile_size; 1969 unsigned num_gpus; 1970 unsigned multi_gpu_tile_size; 1971 1972 unsigned tile_config; 1973 uint32_t tile_mode_array[32]; 1974 }; 1975 1976 union radeon_asic_config { 1977 struct r300_asic r300; 1978 struct r100_asic r100; 1979 struct r600_asic r600; 1980 struct rv770_asic rv770; 1981 struct evergreen_asic evergreen; 1982 struct cayman_asic cayman; 1983 struct si_asic si; 1984 struct cik_asic cik; 1985 }; 1986 1987 /* 1988 * asic initizalization from radeon_asic.c 1989 */ 1990 void radeon_agp_disable(struct radeon_device *rdev); 1991 int radeon_asic_init(struct radeon_device *rdev); 1992 1993 1994 /* 1995 * IOCTL. 1996 */ 1997 int radeon_gem_info_ioctl(struct drm_device *dev, void *data, 1998 struct drm_file *filp); 1999 int radeon_gem_create_ioctl(struct drm_device *dev, void *data, 2000 struct drm_file *filp); 2001 int radeon_gem_pin_ioctl(struct drm_device *dev, void *data, 2002 struct drm_file *file_priv); 2003 int radeon_gem_unpin_ioctl(struct drm_device *dev, void *data, 2004 struct drm_file *file_priv); 2005 int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data, 2006 struct drm_file *file_priv); 2007 int radeon_gem_pread_ioctl(struct drm_device *dev, void *data, 2008 struct drm_file *file_priv); 2009 int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, 2010 struct drm_file *filp); 2011 int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data, 2012 struct drm_file *filp); 2013 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, 2014 struct drm_file *filp); 2015 int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, 2016 struct drm_file *filp); 2017 int radeon_gem_va_ioctl(struct drm_device *dev, void *data, 2018 struct drm_file *filp); 2019 int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); 2020 int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, 2021 struct drm_file *filp); 2022 int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, 2023 struct drm_file *filp); 2024 2025 /* VRAM scratch page for HDP bug, default vram page */ 2026 struct r600_vram_scratch { 2027 struct radeon_bo *robj; 2028 volatile uint32_t *ptr; 2029 u64 gpu_addr; 2030 }; 2031 2032 /* 2033 * ACPI 2034 */ 2035 struct radeon_atif_notification_cfg { 2036 bool enabled; 2037 int command_code; 2038 }; 2039 2040 struct radeon_atif_notifications { 2041 bool display_switch; 2042 bool expansion_mode_change; 2043 bool thermal_state; 2044 bool forced_power_state; 2045 bool system_power_state; 2046 bool display_conf_change; 2047 bool px_gfx_switch; 2048 bool brightness_change; 2049 bool dgpu_display_event; 2050 }; 2051 2052 struct radeon_atif_functions { 2053 bool system_params; 2054 bool sbios_requests; 2055 bool select_active_disp; 2056 bool lid_state; 2057 bool get_tv_standard; 2058 bool set_tv_standard; 2059 bool get_panel_expansion_mode; 2060 bool set_panel_expansion_mode; 2061 bool temperature_change; 2062 bool graphics_device_types; 2063 }; 2064 2065 struct radeon_atif { 2066 struct radeon_atif_notifications notifications; 2067 struct radeon_atif_functions functions; 2068 struct radeon_atif_notification_cfg notification_cfg; 2069 struct radeon_encoder *encoder_for_bl; 2070 }; 2071 2072 struct radeon_atcs_functions { 2073 bool get_ext_state; 2074 bool pcie_perf_req; 2075 bool pcie_dev_rdy; 2076 bool pcie_bus_width; 2077 }; 2078 2079 struct radeon_atcs { 2080 struct radeon_atcs_functions functions; 2081 }; 2082 2083 /* 2084 * Core structure, functions and helpers. 2085 */ 2086 typedef uint32_t (*radeon_rreg_t)(struct radeon_device*, uint32_t); 2087 typedef void (*radeon_wreg_t)(struct radeon_device*, uint32_t, uint32_t); 2088 2089 struct radeon_device { 2090 struct device *dev; 2091 struct drm_device *ddev; 2092 struct pci_dev *pdev; 2093 struct rw_semaphore exclusive_lock; 2094 /* ASIC */ 2095 union radeon_asic_config config; 2096 enum radeon_family family; 2097 unsigned long flags; 2098 int usec_timeout; 2099 enum radeon_pll_errata pll_errata; 2100 int num_gb_pipes; 2101 int num_z_pipes; 2102 int disp_priority; 2103 /* BIOS */ 2104 uint8_t *bios; 2105 bool is_atom_bios; 2106 uint16_t bios_header_start; 2107 struct radeon_bo *stollen_vga_memory; 2108 /* Register mmio */ 2109 resource_size_t rmmio_base; 2110 resource_size_t rmmio_size; 2111 /* protects concurrent MM_INDEX/DATA based register access */ 2112 spinlock_t mmio_idx_lock; 2113 void __iomem *rmmio; 2114 radeon_rreg_t mc_rreg; 2115 radeon_wreg_t mc_wreg; 2116 radeon_rreg_t pll_rreg; 2117 radeon_wreg_t pll_wreg; 2118 uint32_t pcie_reg_mask; 2119 radeon_rreg_t pciep_rreg; 2120 radeon_wreg_t pciep_wreg; 2121 /* io port */ 2122 void __iomem *rio_mem; 2123 resource_size_t rio_mem_size; 2124 struct radeon_clock clock; 2125 struct radeon_mc mc; 2126 struct radeon_gart gart; 2127 struct radeon_mode_info mode_info; 2128 struct radeon_scratch scratch; 2129 struct radeon_doorbell doorbell; 2130 struct radeon_mman mman; 2131 struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS]; 2132 wait_queue_head_t fence_queue; 2133 struct mutex ring_lock; 2134 struct radeon_ring ring[RADEON_NUM_RINGS]; 2135 bool ib_pool_ready; 2136 struct radeon_sa_manager ring_tmp_bo; 2137 struct radeon_irq irq; 2138 struct radeon_asic *asic; 2139 struct radeon_gem gem; 2140 struct radeon_pm pm; 2141 struct radeon_uvd uvd; 2142 uint32_t bios_scratch[RADEON_BIOS_NUM_SCRATCH]; 2143 struct radeon_wb wb; 2144 struct radeon_dummy_page dummy_page; 2145 bool shutdown; 2146 bool suspend; 2147 bool need_dma32; 2148 bool accel_working; 2149 bool fastfb_working; /* IGP feature*/ 2150 struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES]; 2151 const struct firmware *me_fw; /* all family ME firmware */ 2152 const struct firmware *pfp_fw; /* r6/700 PFP firmware */ 2153 const struct firmware *rlc_fw; /* r6/700 RLC firmware */ 2154 const struct firmware *mc_fw; /* NI MC firmware */ 2155 const struct firmware *ce_fw; /* SI CE firmware */ 2156 const struct firmware *mec_fw; /* CIK MEC firmware */ 2157 const struct firmware *sdma_fw; /* CIK SDMA firmware */ 2158 const struct firmware *smc_fw; /* SMC firmware */ 2159 const struct firmware *uvd_fw; /* UVD firmware */ 2160 struct r600_vram_scratch vram_scratch; 2161 int msi_enabled; /* msi enabled */ 2162 struct r600_ih ih; /* r6/700 interrupt ring */ 2163 struct radeon_rlc rlc; 2164 struct radeon_mec mec; 2165 struct work_struct hotplug_work; 2166 struct work_struct audio_work; 2167 struct work_struct reset_work; 2168 int num_crtc; /* number of crtcs */ 2169 struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */ 2170 bool has_uvd; 2171 struct r600_audio audio; /* audio stuff */ 2172 struct notifier_block acpi_nb; 2173 /* only one userspace can use Hyperz features or CMASK at a time */ 2174 struct drm_file *hyperz_filp; 2175 struct drm_file *cmask_filp; 2176 /* i2c buses */ 2177 struct radeon_i2c_chan *i2c_bus[RADEON_MAX_I2C_BUS]; 2178 /* debugfs */ 2179 struct radeon_debugfs debugfs[RADEON_DEBUGFS_MAX_COMPONENTS]; 2180 unsigned debugfs_count; 2181 /* virtual memory */ 2182 struct radeon_vm_manager vm_manager; 2183 struct mutex gpu_clock_mutex; 2184 /* ACPI interface */ 2185 struct radeon_atif atif; 2186 struct radeon_atcs atcs; 2187 /* srbm instance registers */ 2188 struct mutex srbm_mutex; 2189 /* clock, powergating flags */ 2190 u32 cg_flags; 2191 u32 pg_flags; 2192 }; 2193 2194 int radeon_device_init(struct radeon_device *rdev, 2195 struct drm_device *ddev, 2196 struct pci_dev *pdev, 2197 uint32_t flags); 2198 void radeon_device_fini(struct radeon_device *rdev); 2199 int radeon_gpu_wait_for_idle(struct radeon_device *rdev); 2200 2201 uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg, 2202 bool always_indirect); 2203 void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v, 2204 bool always_indirect); 2205 u32 r100_io_rreg(struct radeon_device *rdev, u32 reg); 2206 void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v); 2207 2208 u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 offset); 2209 void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v); 2210 2211 /* 2212 * Cast helper 2213 */ 2214 #define to_radeon_fence(p) ((struct radeon_fence *)(p)) 2215 2216 /* 2217 * Registers read & write functions. 2218 */ 2219 #define RREG8(reg) readb((rdev->rmmio) + (reg)) 2220 #define WREG8(reg, v) writeb(v, (rdev->rmmio) + (reg)) 2221 #define RREG16(reg) readw((rdev->rmmio) + (reg)) 2222 #define WREG16(reg, v) writew(v, (rdev->rmmio) + (reg)) 2223 #define RREG32(reg) r100_mm_rreg(rdev, (reg), false) 2224 #define RREG32_IDX(reg) r100_mm_rreg(rdev, (reg), true) 2225 #define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg), false)) 2226 #define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v), false) 2227 #define WREG32_IDX(reg, v) r100_mm_wreg(rdev, (reg), (v), true) 2228 #define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) 2229 #define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) 2230 #define RREG32_PLL(reg) rdev->pll_rreg(rdev, (reg)) 2231 #define WREG32_PLL(reg, v) rdev->pll_wreg(rdev, (reg), (v)) 2232 #define RREG32_MC(reg) rdev->mc_rreg(rdev, (reg)) 2233 #define WREG32_MC(reg, v) rdev->mc_wreg(rdev, (reg), (v)) 2234 #define RREG32_PCIE(reg) rv370_pcie_rreg(rdev, (reg)) 2235 #define WREG32_PCIE(reg, v) rv370_pcie_wreg(rdev, (reg), (v)) 2236 #define RREG32_PCIE_PORT(reg) rdev->pciep_rreg(rdev, (reg)) 2237 #define WREG32_PCIE_PORT(reg, v) rdev->pciep_wreg(rdev, (reg), (v)) 2238 #define RREG32_SMC(reg) tn_smc_rreg(rdev, (reg)) 2239 #define WREG32_SMC(reg, v) tn_smc_wreg(rdev, (reg), (v)) 2240 #define RREG32_RCU(reg) r600_rcu_rreg(rdev, (reg)) 2241 #define WREG32_RCU(reg, v) r600_rcu_wreg(rdev, (reg), (v)) 2242 #define RREG32_CG(reg) eg_cg_rreg(rdev, (reg)) 2243 #define WREG32_CG(reg, v) eg_cg_wreg(rdev, (reg), (v)) 2244 #define RREG32_PIF_PHY0(reg) eg_pif_phy0_rreg(rdev, (reg)) 2245 #define WREG32_PIF_PHY0(reg, v) eg_pif_phy0_wreg(rdev, (reg), (v)) 2246 #define RREG32_PIF_PHY1(reg) eg_pif_phy1_rreg(rdev, (reg)) 2247 #define WREG32_PIF_PHY1(reg, v) eg_pif_phy1_wreg(rdev, (reg), (v)) 2248 #define RREG32_UVD_CTX(reg) r600_uvd_ctx_rreg(rdev, (reg)) 2249 #define WREG32_UVD_CTX(reg, v) r600_uvd_ctx_wreg(rdev, (reg), (v)) 2250 #define RREG32_DIDT(reg) cik_didt_rreg(rdev, (reg)) 2251 #define WREG32_DIDT(reg, v) cik_didt_wreg(rdev, (reg), (v)) 2252 #define WREG32_P(reg, val, mask) \ 2253 do { \ 2254 uint32_t tmp_ = RREG32(reg); \ 2255 tmp_ &= (mask); \ 2256 tmp_ |= ((val) & ~(mask)); \ 2257 WREG32(reg, tmp_); \ 2258 } while (0) 2259 #define WREG32_AND(reg, and) WREG32_P(reg, 0, and) 2260 #define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or)) 2261 #define WREG32_PLL_P(reg, val, mask) \ 2262 do { \ 2263 uint32_t tmp_ = RREG32_PLL(reg); \ 2264 tmp_ &= (mask); \ 2265 tmp_ |= ((val) & ~(mask)); \ 2266 WREG32_PLL(reg, tmp_); \ 2267 } while (0) 2268 #define DREG32_SYS(sqf, rdev, reg) seq_printf((sqf), #reg " : 0x%08X\n", r100_mm_rreg((rdev), (reg), false)) 2269 #define RREG32_IO(reg) r100_io_rreg(rdev, (reg)) 2270 #define WREG32_IO(reg, v) r100_io_wreg(rdev, (reg), (v)) 2271 2272 #define RDOORBELL32(offset) cik_mm_rdoorbell(rdev, (offset)) 2273 #define WDOORBELL32(offset, v) cik_mm_wdoorbell(rdev, (offset), (v)) 2274 2275 /* 2276 * Indirect registers accessor 2277 */ 2278 static inline uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg) 2279 { 2280 uint32_t r; 2281 2282 WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask)); 2283 r = RREG32(RADEON_PCIE_DATA); 2284 return r; 2285 } 2286 2287 static inline void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 2288 { 2289 WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask)); 2290 WREG32(RADEON_PCIE_DATA, (v)); 2291 } 2292 2293 static inline u32 tn_smc_rreg(struct radeon_device *rdev, u32 reg) 2294 { 2295 u32 r; 2296 2297 WREG32(TN_SMC_IND_INDEX_0, (reg)); 2298 r = RREG32(TN_SMC_IND_DATA_0); 2299 return r; 2300 } 2301 2302 static inline void tn_smc_wreg(struct radeon_device *rdev, u32 reg, u32 v) 2303 { 2304 WREG32(TN_SMC_IND_INDEX_0, (reg)); 2305 WREG32(TN_SMC_IND_DATA_0, (v)); 2306 } 2307 2308 static inline u32 r600_rcu_rreg(struct radeon_device *rdev, u32 reg) 2309 { 2310 u32 r; 2311 2312 WREG32(R600_RCU_INDEX, ((reg) & 0x1fff)); 2313 r = RREG32(R600_RCU_DATA); 2314 return r; 2315 } 2316 2317 static inline void r600_rcu_wreg(struct radeon_device *rdev, u32 reg, u32 v) 2318 { 2319 WREG32(R600_RCU_INDEX, ((reg) & 0x1fff)); 2320 WREG32(R600_RCU_DATA, (v)); 2321 } 2322 2323 static inline u32 eg_cg_rreg(struct radeon_device *rdev, u32 reg) 2324 { 2325 u32 r; 2326 2327 WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff)); 2328 r = RREG32(EVERGREEN_CG_IND_DATA); 2329 return r; 2330 } 2331 2332 static inline void eg_cg_wreg(struct radeon_device *rdev, u32 reg, u32 v) 2333 { 2334 WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff)); 2335 WREG32(EVERGREEN_CG_IND_DATA, (v)); 2336 } 2337 2338 static inline u32 eg_pif_phy0_rreg(struct radeon_device *rdev, u32 reg) 2339 { 2340 u32 r; 2341 2342 WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff)); 2343 r = RREG32(EVERGREEN_PIF_PHY0_DATA); 2344 return r; 2345 } 2346 2347 static inline void eg_pif_phy0_wreg(struct radeon_device *rdev, u32 reg, u32 v) 2348 { 2349 WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff)); 2350 WREG32(EVERGREEN_PIF_PHY0_DATA, (v)); 2351 } 2352 2353 static inline u32 eg_pif_phy1_rreg(struct radeon_device *rdev, u32 reg) 2354 { 2355 u32 r; 2356 2357 WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff)); 2358 r = RREG32(EVERGREEN_PIF_PHY1_DATA); 2359 return r; 2360 } 2361 2362 static inline void eg_pif_phy1_wreg(struct radeon_device *rdev, u32 reg, u32 v) 2363 { 2364 WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff)); 2365 WREG32(EVERGREEN_PIF_PHY1_DATA, (v)); 2366 } 2367 2368 static inline u32 r600_uvd_ctx_rreg(struct radeon_device *rdev, u32 reg) 2369 { 2370 u32 r; 2371 2372 WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff)); 2373 r = RREG32(R600_UVD_CTX_DATA); 2374 return r; 2375 } 2376 2377 static inline void r600_uvd_ctx_wreg(struct radeon_device *rdev, u32 reg, u32 v) 2378 { 2379 WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff)); 2380 WREG32(R600_UVD_CTX_DATA, (v)); 2381 } 2382 2383 2384 static inline u32 cik_didt_rreg(struct radeon_device *rdev, u32 reg) 2385 { 2386 u32 r; 2387 2388 WREG32(CIK_DIDT_IND_INDEX, (reg)); 2389 r = RREG32(CIK_DIDT_IND_DATA); 2390 return r; 2391 } 2392 2393 static inline void cik_didt_wreg(struct radeon_device *rdev, u32 reg, u32 v) 2394 { 2395 WREG32(CIK_DIDT_IND_INDEX, (reg)); 2396 WREG32(CIK_DIDT_IND_DATA, (v)); 2397 } 2398 2399 void r100_pll_errata_after_index(struct radeon_device *rdev); 2400 2401 2402 /* 2403 * ASICs helpers. 2404 */ 2405 #define ASIC_IS_RN50(rdev) ((rdev->pdev->device == 0x515e) || \ 2406 (rdev->pdev->device == 0x5969)) 2407 #define ASIC_IS_RV100(rdev) ((rdev->family == CHIP_RV100) || \ 2408 (rdev->family == CHIP_RV200) || \ 2409 (rdev->family == CHIP_RS100) || \ 2410 (rdev->family == CHIP_RS200) || \ 2411 (rdev->family == CHIP_RV250) || \ 2412 (rdev->family == CHIP_RV280) || \ 2413 (rdev->family == CHIP_RS300)) 2414 #define ASIC_IS_R300(rdev) ((rdev->family == CHIP_R300) || \ 2415 (rdev->family == CHIP_RV350) || \ 2416 (rdev->family == CHIP_R350) || \ 2417 (rdev->family == CHIP_RV380) || \ 2418 (rdev->family == CHIP_R420) || \ 2419 (rdev->family == CHIP_R423) || \ 2420 (rdev->family == CHIP_RV410) || \ 2421 (rdev->family == CHIP_RS400) || \ 2422 (rdev->family == CHIP_RS480)) 2423 #define ASIC_IS_X2(rdev) ((rdev->ddev->pdev->device == 0x9441) || \ 2424 (rdev->ddev->pdev->device == 0x9443) || \ 2425 (rdev->ddev->pdev->device == 0x944B) || \ 2426 (rdev->ddev->pdev->device == 0x9506) || \ 2427 (rdev->ddev->pdev->device == 0x9509) || \ 2428 (rdev->ddev->pdev->device == 0x950F) || \ 2429 (rdev->ddev->pdev->device == 0x689C) || \ 2430 (rdev->ddev->pdev->device == 0x689D)) 2431 #define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600)) 2432 #define ASIC_IS_DCE2(rdev) ((rdev->family == CHIP_RS600) || \ 2433 (rdev->family == CHIP_RS690) || \ 2434 (rdev->family == CHIP_RS740) || \ 2435 (rdev->family >= CHIP_R600)) 2436 #define ASIC_IS_DCE3(rdev) ((rdev->family >= CHIP_RV620)) 2437 #define ASIC_IS_DCE32(rdev) ((rdev->family >= CHIP_RV730)) 2438 #define ASIC_IS_DCE4(rdev) ((rdev->family >= CHIP_CEDAR)) 2439 #define ASIC_IS_DCE41(rdev) ((rdev->family >= CHIP_PALM) && \ 2440 (rdev->flags & RADEON_IS_IGP)) 2441 #define ASIC_IS_DCE5(rdev) ((rdev->family >= CHIP_BARTS)) 2442 #define ASIC_IS_DCE6(rdev) ((rdev->family >= CHIP_ARUBA)) 2443 #define ASIC_IS_DCE61(rdev) ((rdev->family >= CHIP_ARUBA) && \ 2444 (rdev->flags & RADEON_IS_IGP)) 2445 #define ASIC_IS_DCE64(rdev) ((rdev->family == CHIP_OLAND)) 2446 #define ASIC_IS_NODCE(rdev) ((rdev->family == CHIP_HAINAN)) 2447 #define ASIC_IS_DCE8(rdev) ((rdev->family >= CHIP_BONAIRE)) 2448 2449 #define ASIC_IS_LOMBOK(rdev) ((rdev->ddev->pdev->device == 0x6849) || \ 2450 (rdev->ddev->pdev->device == 0x6850) || \ 2451 (rdev->ddev->pdev->device == 0x6858) || \ 2452 (rdev->ddev->pdev->device == 0x6859) || \ 2453 (rdev->ddev->pdev->device == 0x6840) || \ 2454 (rdev->ddev->pdev->device == 0x6841) || \ 2455 (rdev->ddev->pdev->device == 0x6842) || \ 2456 (rdev->ddev->pdev->device == 0x6843)) 2457 2458 /* 2459 * BIOS helpers. 2460 */ 2461 #define RBIOS8(i) (rdev->bios[i]) 2462 #define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8)) 2463 #define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16)) 2464 2465 int radeon_combios_init(struct radeon_device *rdev); 2466 void radeon_combios_fini(struct radeon_device *rdev); 2467 int radeon_atombios_init(struct radeon_device *rdev); 2468 void radeon_atombios_fini(struct radeon_device *rdev); 2469 2470 2471 /* 2472 * RING helpers. 2473 */ 2474 #if DRM_DEBUG_CODE == 0 2475 static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v) 2476 { 2477 ring->ring[ring->wptr++] = v; 2478 ring->wptr &= ring->ptr_mask; 2479 ring->count_dw--; 2480 ring->ring_free_dw--; 2481 } 2482 #else 2483 /* With debugging this is just too big to inline */ 2484 void radeon_ring_write(struct radeon_ring *ring, uint32_t v); 2485 #endif 2486 2487 /* 2488 * ASICs macro. 2489 */ 2490 #define radeon_init(rdev) (rdev)->asic->init((rdev)) 2491 #define radeon_fini(rdev) (rdev)->asic->fini((rdev)) 2492 #define radeon_resume(rdev) (rdev)->asic->resume((rdev)) 2493 #define radeon_suspend(rdev) (rdev)->asic->suspend((rdev)) 2494 #define radeon_cs_parse(rdev, r, p) (rdev)->asic->ring[(r)]->cs_parse((p)) 2495 #define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state)) 2496 #define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev)) 2497 #define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev)) 2498 #define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart.set_page((rdev), (i), (p)) 2499 #define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev)) 2500 #define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev)) 2501 #define radeon_asic_vm_set_page(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_page((rdev), (ib), (pe), (addr), (count), (incr), (flags))) 2502 #define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)]->ring_start((rdev), (cp)) 2503 #define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)]->ring_test((rdev), (cp)) 2504 #define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)]->ib_test((rdev), (cp)) 2505 #define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)]->ib_execute((rdev), (ib)) 2506 #define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)]->ib_parse((rdev), (ib)) 2507 #define radeon_ring_is_lockup(rdev, r, cp) (rdev)->asic->ring[(r)]->is_lockup((rdev), (cp)) 2508 #define radeon_ring_vm_flush(rdev, r, vm) (rdev)->asic->ring[(r)]->vm_flush((rdev), (r), (vm)) 2509 #define radeon_ring_get_rptr(rdev, r) (rdev)->asic->ring[(r)->idx]->get_rptr((rdev), (r)) 2510 #define radeon_ring_get_wptr(rdev, r) (rdev)->asic->ring[(r)->idx]->get_wptr((rdev), (r)) 2511 #define radeon_ring_set_wptr(rdev, r) (rdev)->asic->ring[(r)->idx]->set_wptr((rdev), (r)) 2512 #define radeon_irq_set(rdev) (rdev)->asic->irq.set((rdev)) 2513 #define radeon_irq_process(rdev) (rdev)->asic->irq.process((rdev)) 2514 #define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->display.get_vblank_counter((rdev), (crtc)) 2515 #define radeon_set_backlight_level(rdev, e, l) (rdev)->asic->display.set_backlight_level((e), (l)) 2516 #define radeon_get_backlight_level(rdev, e) (rdev)->asic->display.get_backlight_level((e)) 2517 #define radeon_hdmi_enable(rdev, e, b) (rdev)->asic->display.hdmi_enable((e), (b)) 2518 #define radeon_hdmi_setmode(rdev, e, m) (rdev)->asic->display.hdmi_setmode((e), (m)) 2519 #define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)]->emit_fence((rdev), (fence)) 2520 #define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)]->emit_semaphore((rdev), (cp), (semaphore), (emit_wait)) 2521 #define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (f)) 2522 #define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy.dma((rdev), (s), (d), (np), (f)) 2523 #define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy.copy((rdev), (s), (d), (np), (f)) 2524 #define radeon_copy_blit_ring_index(rdev) (rdev)->asic->copy.blit_ring_index 2525 #define radeon_copy_dma_ring_index(rdev) (rdev)->asic->copy.dma_ring_index 2526 #define radeon_copy_ring_index(rdev) (rdev)->asic->copy.copy_ring_index 2527 #define radeon_get_engine_clock(rdev) (rdev)->asic->pm.get_engine_clock((rdev)) 2528 #define radeon_set_engine_clock(rdev, e) (rdev)->asic->pm.set_engine_clock((rdev), (e)) 2529 #define radeon_get_memory_clock(rdev) (rdev)->asic->pm.get_memory_clock((rdev)) 2530 #define radeon_set_memory_clock(rdev, e) (rdev)->asic->pm.set_memory_clock((rdev), (e)) 2531 #define radeon_get_pcie_lanes(rdev) (rdev)->asic->pm.get_pcie_lanes((rdev)) 2532 #define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->pm.set_pcie_lanes((rdev), (l)) 2533 #define radeon_set_clock_gating(rdev, e) (rdev)->asic->pm.set_clock_gating((rdev), (e)) 2534 #define radeon_set_uvd_clocks(rdev, v, d) (rdev)->asic->pm.set_uvd_clocks((rdev), (v), (d)) 2535 #define radeon_get_temperature(rdev) (rdev)->asic->pm.get_temperature((rdev)) 2536 #define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->surface.set_reg((rdev), (r), (f), (p), (o), (s))) 2537 #define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->surface.clear_reg((rdev), (r))) 2538 #define radeon_bandwidth_update(rdev) (rdev)->asic->display.bandwidth_update((rdev)) 2539 #define radeon_hpd_init(rdev) (rdev)->asic->hpd.init((rdev)) 2540 #define radeon_hpd_fini(rdev) (rdev)->asic->hpd.fini((rdev)) 2541 #define radeon_hpd_sense(rdev, h) (rdev)->asic->hpd.sense((rdev), (h)) 2542 #define radeon_hpd_set_polarity(rdev, h) (rdev)->asic->hpd.set_polarity((rdev), (h)) 2543 #define radeon_gui_idle(rdev) (rdev)->asic->gui_idle((rdev)) 2544 #define radeon_pm_misc(rdev) (rdev)->asic->pm.misc((rdev)) 2545 #define radeon_pm_prepare(rdev) (rdev)->asic->pm.prepare((rdev)) 2546 #define radeon_pm_finish(rdev) (rdev)->asic->pm.finish((rdev)) 2547 #define radeon_pm_init_profile(rdev) (rdev)->asic->pm.init_profile((rdev)) 2548 #define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm.get_dynpm_state((rdev)) 2549 #define radeon_pre_page_flip(rdev, crtc) (rdev)->asic->pflip.pre_page_flip((rdev), (crtc)) 2550 #define radeon_page_flip(rdev, crtc, base) (rdev)->asic->pflip.page_flip((rdev), (crtc), (base)) 2551 #define radeon_post_page_flip(rdev, crtc) (rdev)->asic->pflip.post_page_flip((rdev), (crtc)) 2552 #define radeon_wait_for_vblank(rdev, crtc) (rdev)->asic->display.wait_for_vblank((rdev), (crtc)) 2553 #define radeon_mc_wait_for_idle(rdev) (rdev)->asic->mc_wait_for_idle((rdev)) 2554 #define radeon_get_xclk(rdev) (rdev)->asic->get_xclk((rdev)) 2555 #define radeon_get_gpu_clock_counter(rdev) (rdev)->asic->get_gpu_clock_counter((rdev)) 2556 #define radeon_dpm_init(rdev) rdev->asic->dpm.init((rdev)) 2557 #define radeon_dpm_setup_asic(rdev) rdev->asic->dpm.setup_asic((rdev)) 2558 #define radeon_dpm_enable(rdev) rdev->asic->dpm.enable((rdev)) 2559 #define radeon_dpm_disable(rdev) rdev->asic->dpm.disable((rdev)) 2560 #define radeon_dpm_pre_set_power_state(rdev) rdev->asic->dpm.pre_set_power_state((rdev)) 2561 #define radeon_dpm_set_power_state(rdev) rdev->asic->dpm.set_power_state((rdev)) 2562 #define radeon_dpm_post_set_power_state(rdev) rdev->asic->dpm.post_set_power_state((rdev)) 2563 #define radeon_dpm_display_configuration_changed(rdev) rdev->asic->dpm.display_configuration_changed((rdev)) 2564 #define radeon_dpm_fini(rdev) rdev->asic->dpm.fini((rdev)) 2565 #define radeon_dpm_get_sclk(rdev, l) rdev->asic->dpm.get_sclk((rdev), (l)) 2566 #define radeon_dpm_get_mclk(rdev, l) rdev->asic->dpm.get_mclk((rdev), (l)) 2567 #define radeon_dpm_print_power_state(rdev, ps) rdev->asic->dpm.print_power_state((rdev), (ps)) 2568 #define radeon_dpm_debugfs_print_current_performance_level(rdev, m) rdev->asic->dpm.debugfs_print_current_performance_level((rdev), (m)) 2569 #define radeon_dpm_force_performance_level(rdev, l) rdev->asic->dpm.force_performance_level((rdev), (l)) 2570 #define radeon_dpm_vblank_too_short(rdev) rdev->asic->dpm.vblank_too_short((rdev)) 2571 #define radeon_dpm_powergate_uvd(rdev, g) rdev->asic->dpm.powergate_uvd((rdev), (g)) 2572 2573 /* Common functions */ 2574 /* AGP */ 2575 extern int radeon_gpu_reset(struct radeon_device *rdev); 2576 extern void r600_set_bios_scratch_engine_hung(struct radeon_device *rdev, bool hung); 2577 extern void radeon_agp_disable(struct radeon_device *rdev); 2578 extern int radeon_modeset_init(struct radeon_device *rdev); 2579 extern void radeon_modeset_fini(struct radeon_device *rdev); 2580 extern bool radeon_card_posted(struct radeon_device *rdev); 2581 extern void radeon_update_bandwidth_info(struct radeon_device *rdev); 2582 extern void radeon_update_display_priority(struct radeon_device *rdev); 2583 extern bool radeon_boot_test_post_card(struct radeon_device *rdev); 2584 extern void radeon_scratch_init(struct radeon_device *rdev); 2585 extern void radeon_wb_fini(struct radeon_device *rdev); 2586 extern int radeon_wb_init(struct radeon_device *rdev); 2587 extern void radeon_wb_disable(struct radeon_device *rdev); 2588 extern void radeon_surface_init(struct radeon_device *rdev); 2589 extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data); 2590 extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable); 2591 extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable); 2592 extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain); 2593 extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo); 2594 extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base); 2595 extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); 2596 extern int radeon_resume_kms(struct drm_device *dev); 2597 extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state); 2598 extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size); 2599 extern void radeon_program_register_sequence(struct radeon_device *rdev, 2600 const u32 *registers, 2601 const u32 array_size); 2602 2603 /* 2604 * vm 2605 */ 2606 int radeon_vm_manager_init(struct radeon_device *rdev); 2607 void radeon_vm_manager_fini(struct radeon_device *rdev); 2608 void radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm); 2609 void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm); 2610 int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm); 2611 void radeon_vm_add_to_lru(struct radeon_device *rdev, struct radeon_vm *vm); 2612 struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev, 2613 struct radeon_vm *vm, int ring); 2614 void radeon_vm_fence(struct radeon_device *rdev, 2615 struct radeon_vm *vm, 2616 struct radeon_fence *fence); 2617 uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr); 2618 int radeon_vm_bo_update_pte(struct radeon_device *rdev, 2619 struct radeon_vm *vm, 2620 struct radeon_bo *bo, 2621 struct ttm_mem_reg *mem); 2622 void radeon_vm_bo_invalidate(struct radeon_device *rdev, 2623 struct radeon_bo *bo); 2624 struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm, 2625 struct radeon_bo *bo); 2626 struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev, 2627 struct radeon_vm *vm, 2628 struct radeon_bo *bo); 2629 int radeon_vm_bo_set_addr(struct radeon_device *rdev, 2630 struct radeon_bo_va *bo_va, 2631 uint64_t offset, 2632 uint32_t flags); 2633 int radeon_vm_bo_rmv(struct radeon_device *rdev, 2634 struct radeon_bo_va *bo_va); 2635 2636 /* audio */ 2637 void r600_audio_update_hdmi(struct work_struct *work); 2638 struct r600_audio_pin *r600_audio_get_pin(struct radeon_device *rdev); 2639 struct r600_audio_pin *dce6_audio_get_pin(struct radeon_device *rdev); 2640 2641 /* 2642 * R600 vram scratch functions 2643 */ 2644 int r600_vram_scratch_init(struct radeon_device *rdev); 2645 void r600_vram_scratch_fini(struct radeon_device *rdev); 2646 2647 /* 2648 * r600 cs checking helper 2649 */ 2650 unsigned r600_mip_minify(unsigned size, unsigned level); 2651 bool r600_fmt_is_valid_color(u32 format); 2652 bool r600_fmt_is_valid_texture(u32 format, enum radeon_family family); 2653 int r600_fmt_get_blocksize(u32 format); 2654 int r600_fmt_get_nblocksx(u32 format, u32 w); 2655 int r600_fmt_get_nblocksy(u32 format, u32 h); 2656 2657 /* 2658 * r600 functions used by radeon_encoder.c 2659 */ 2660 struct radeon_hdmi_acr { 2661 u32 clock; 2662 2663 int n_32khz; 2664 int cts_32khz; 2665 2666 int n_44_1khz; 2667 int cts_44_1khz; 2668 2669 int n_48khz; 2670 int cts_48khz; 2671 2672 }; 2673 2674 extern struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock); 2675 2676 extern u32 r6xx_remap_render_backend(struct radeon_device *rdev, 2677 u32 tiling_pipe_num, 2678 u32 max_rb_num, 2679 u32 total_max_rb_num, 2680 u32 enabled_rb_mask); 2681 2682 /* 2683 * evergreen functions used by radeon_encoder.c 2684 */ 2685 2686 extern int ni_init_microcode(struct radeon_device *rdev); 2687 extern int ni_mc_load_microcode(struct radeon_device *rdev); 2688 2689 /* radeon_acpi.c */ 2690 #if defined(CONFIG_ACPI) 2691 extern int radeon_acpi_init(struct radeon_device *rdev); 2692 extern void radeon_acpi_fini(struct radeon_device *rdev); 2693 extern bool radeon_acpi_is_pcie_performance_request_supported(struct radeon_device *rdev); 2694 extern int radeon_acpi_pcie_performance_request(struct radeon_device *rdev, 2695 u8 perf_req, bool advertise); 2696 extern int radeon_acpi_pcie_notify_device_ready(struct radeon_device *rdev); 2697 #else 2698 static inline int radeon_acpi_init(struct radeon_device *rdev) { return 0; } 2699 static inline void radeon_acpi_fini(struct radeon_device *rdev) { } 2700 #endif 2701 2702 int radeon_cs_packet_parse(struct radeon_cs_parser *p, 2703 struct radeon_cs_packet *pkt, 2704 unsigned idx); 2705 bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p); 2706 void radeon_cs_dump_packet(struct radeon_cs_parser *p, 2707 struct radeon_cs_packet *pkt); 2708 int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p, 2709 struct radeon_cs_reloc **cs_reloc, 2710 int nomm); 2711 int r600_cs_common_vline_parse(struct radeon_cs_parser *p, 2712 uint32_t *vline_start_end, 2713 uint32_t *vline_status); 2714 2715 #include "radeon_object.h" 2716 2717 #endif 2718