xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/amdgpu.h (revision 3e26a691)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #ifndef __AMDGPU_H__
29 #define __AMDGPU_H__
30 
31 #include <linux/atomic.h>
32 #include <linux/wait.h>
33 #include <linux/list.h>
34 #include <linux/kref.h>
35 #include <linux/interval_tree.h>
36 #include <linux/hashtable.h>
37 #include <linux/fence.h>
38 
39 #include <ttm/ttm_bo_api.h>
40 #include <ttm/ttm_bo_driver.h>
41 #include <ttm/ttm_placement.h>
42 #include <ttm/ttm_module.h>
43 #include <ttm/ttm_execbuf_util.h>
44 
45 #include <drm/drmP.h>
46 #include <drm/drm_gem.h>
47 #include <drm/amdgpu_drm.h>
48 
49 #include "amd_shared.h"
50 #include "amdgpu_mode.h"
51 #include "amdgpu_ih.h"
52 #include "amdgpu_irq.h"
53 #include "amdgpu_ucode.h"
54 #include "amdgpu_gds.h"
55 #include "amd_powerplay.h"
56 #include "amdgpu_acp.h"
57 
58 #include "gpu_scheduler.h"
59 
60 /*
61  * Modules parameters.
62  */
63 extern int amdgpu_modeset;
64 extern int amdgpu_vram_limit;
65 extern int amdgpu_gart_size;
66 extern int amdgpu_benchmarking;
67 extern int amdgpu_testing;
68 extern int amdgpu_audio;
69 extern int amdgpu_disp_priority;
70 extern int amdgpu_hw_i2c;
71 extern int amdgpu_pcie_gen2;
72 extern int amdgpu_msi;
73 extern int amdgpu_lockup_timeout;
74 extern int amdgpu_dpm;
75 extern int amdgpu_smc_load_fw;
76 extern int amdgpu_aspm;
77 extern int amdgpu_runtime_pm;
78 extern unsigned amdgpu_ip_block_mask;
79 extern int amdgpu_bapm;
80 extern int amdgpu_deep_color;
81 extern int amdgpu_vm_size;
82 extern int amdgpu_vm_block_size;
83 extern int amdgpu_vm_fault_stop;
84 extern int amdgpu_vm_debug;
85 extern int amdgpu_sched_jobs;
86 extern int amdgpu_sched_hw_submission;
87 extern int amdgpu_powerplay;
88 extern unsigned amdgpu_pcie_gen_cap;
89 extern unsigned amdgpu_pcie_lane_cap;
90 
91 #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS	        3000
92 #define AMDGPU_MAX_USEC_TIMEOUT			100000	/* 100 ms */
93 #define AMDGPU_FENCE_JIFFIES_TIMEOUT		(HZ / 2)
94 /* AMDGPU_IB_POOL_SIZE must be a power of 2 */
95 #define AMDGPU_IB_POOL_SIZE			16
96 #define AMDGPU_DEBUGFS_MAX_COMPONENTS		32
97 #define AMDGPUFB_CONN_LIMIT			4
98 #define AMDGPU_BIOS_NUM_SCRATCH			8
99 
100 /* max number of rings */
101 #define AMDGPU_MAX_RINGS			16
102 #define AMDGPU_MAX_GFX_RINGS			1
103 #define AMDGPU_MAX_COMPUTE_RINGS		8
104 #define AMDGPU_MAX_VCE_RINGS			2
105 
106 /* max number of IP instances */
107 #define AMDGPU_MAX_SDMA_INSTANCES		2
108 
109 /* hardcode that limit for now */
110 #define AMDGPU_VA_RESERVED_SIZE			(8 << 20)
111 
112 /* hard reset data */
113 #define AMDGPU_ASIC_RESET_DATA                  0x39d5e86b
114 
115 /* reset flags */
116 #define AMDGPU_RESET_GFX			(1 << 0)
117 #define AMDGPU_RESET_COMPUTE			(1 << 1)
118 #define AMDGPU_RESET_DMA			(1 << 2)
119 #define AMDGPU_RESET_CP				(1 << 3)
120 #define AMDGPU_RESET_GRBM			(1 << 4)
121 #define AMDGPU_RESET_DMA1			(1 << 5)
122 #define AMDGPU_RESET_RLC			(1 << 6)
123 #define AMDGPU_RESET_SEM			(1 << 7)
124 #define AMDGPU_RESET_IH				(1 << 8)
125 #define AMDGPU_RESET_VMC			(1 << 9)
126 #define AMDGPU_RESET_MC				(1 << 10)
127 #define AMDGPU_RESET_DISPLAY			(1 << 11)
128 #define AMDGPU_RESET_UVD			(1 << 12)
129 #define AMDGPU_RESET_VCE			(1 << 13)
130 #define AMDGPU_RESET_VCE1			(1 << 14)
131 
132 /* GFX current status */
133 #define AMDGPU_GFX_NORMAL_MODE			0x00000000L
134 #define AMDGPU_GFX_SAFE_MODE			0x00000001L
135 #define AMDGPU_GFX_PG_DISABLED_MODE		0x00000002L
136 #define AMDGPU_GFX_CG_DISABLED_MODE		0x00000004L
137 #define AMDGPU_GFX_LBPW_DISABLED_MODE		0x00000008L
138 
139 /* max cursor sizes (in pixels) */
140 #define CIK_CURSOR_WIDTH 128
141 #define CIK_CURSOR_HEIGHT 128
142 
143 struct amdgpu_device;
144 struct amdgpu_ib;
145 struct amdgpu_vm;
146 struct amdgpu_ring;
147 struct amdgpu_cs_parser;
148 struct amdgpu_job;
149 struct amdgpu_irq_src;
150 struct amdgpu_fpriv;
151 
152 enum amdgpu_cp_irq {
153 	AMDGPU_CP_IRQ_GFX_EOP = 0,
154 	AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP,
155 	AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP,
156 	AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP,
157 	AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP,
158 	AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP,
159 	AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP,
160 	AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP,
161 	AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP,
162 
163 	AMDGPU_CP_IRQ_LAST
164 };
165 
166 enum amdgpu_sdma_irq {
167 	AMDGPU_SDMA_IRQ_TRAP0 = 0,
168 	AMDGPU_SDMA_IRQ_TRAP1,
169 
170 	AMDGPU_SDMA_IRQ_LAST
171 };
172 
173 enum amdgpu_thermal_irq {
174 	AMDGPU_THERMAL_IRQ_LOW_TO_HIGH = 0,
175 	AMDGPU_THERMAL_IRQ_HIGH_TO_LOW,
176 
177 	AMDGPU_THERMAL_IRQ_LAST
178 };
179 
180 int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
181 				  enum amd_ip_block_type block_type,
182 				  enum amd_clockgating_state state);
183 int amdgpu_set_powergating_state(struct amdgpu_device *adev,
184 				  enum amd_ip_block_type block_type,
185 				  enum amd_powergating_state state);
186 
187 struct amdgpu_ip_block_version {
188 	enum amd_ip_block_type type;
189 	u32 major;
190 	u32 minor;
191 	u32 rev;
192 	const struct amd_ip_funcs *funcs;
193 };
194 
195 int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
196 				enum amd_ip_block_type type,
197 				u32 major, u32 minor);
198 
199 const struct amdgpu_ip_block_version * amdgpu_get_ip_block(
200 					struct amdgpu_device *adev,
201 					enum amd_ip_block_type type);
202 
203 /* provided by hw blocks that can move/clear data.  e.g., gfx or sdma */
204 struct amdgpu_buffer_funcs {
205 	/* maximum bytes in a single operation */
206 	uint32_t	copy_max_bytes;
207 
208 	/* number of dw to reserve per operation */
209 	unsigned	copy_num_dw;
210 
211 	/* used for buffer migration */
212 	void (*emit_copy_buffer)(struct amdgpu_ib *ib,
213 				 /* src addr in bytes */
214 				 uint64_t src_offset,
215 				 /* dst addr in bytes */
216 				 uint64_t dst_offset,
217 				 /* number of byte to transfer */
218 				 uint32_t byte_count);
219 
220 	/* maximum bytes in a single operation */
221 	uint32_t	fill_max_bytes;
222 
223 	/* number of dw to reserve per operation */
224 	unsigned	fill_num_dw;
225 
226 	/* used for buffer clearing */
227 	void (*emit_fill_buffer)(struct amdgpu_ib *ib,
228 				 /* value to write to memory */
229 				 uint32_t src_data,
230 				 /* dst addr in bytes */
231 				 uint64_t dst_offset,
232 				 /* number of byte to fill */
233 				 uint32_t byte_count);
234 };
235 
236 /* provided by hw blocks that can write ptes, e.g., sdma */
237 struct amdgpu_vm_pte_funcs {
238 	/* copy pte entries from GART */
239 	void (*copy_pte)(struct amdgpu_ib *ib,
240 			 uint64_t pe, uint64_t src,
241 			 unsigned count);
242 	/* write pte one entry at a time with addr mapping */
243 	void (*write_pte)(struct amdgpu_ib *ib,
244 			  const dma_addr_t *pages_addr, uint64_t pe,
245 			  uint64_t addr, unsigned count,
246 			  uint32_t incr, uint32_t flags);
247 	/* for linear pte/pde updates without addr mapping */
248 	void (*set_pte_pde)(struct amdgpu_ib *ib,
249 			    uint64_t pe,
250 			    uint64_t addr, unsigned count,
251 			    uint32_t incr, uint32_t flags);
252 };
253 
254 /* provided by the gmc block */
255 struct amdgpu_gart_funcs {
256 	/* flush the vm tlb via mmio */
257 	void (*flush_gpu_tlb)(struct amdgpu_device *adev,
258 			      uint32_t vmid);
259 	/* write pte/pde updates using the cpu */
260 	int (*set_pte_pde)(struct amdgpu_device *adev,
261 			   void *cpu_pt_addr, /* cpu addr of page table */
262 			   uint32_t gpu_page_idx, /* pte/pde to update */
263 			   uint64_t addr, /* addr to write into pte/pde */
264 			   uint32_t flags); /* access flags */
265 };
266 
267 /* provided by the ih block */
268 struct amdgpu_ih_funcs {
269 	/* ring read/write ptr handling, called from interrupt context */
270 	u32 (*get_wptr)(struct amdgpu_device *adev);
271 	void (*decode_iv)(struct amdgpu_device *adev,
272 			  struct amdgpu_iv_entry *entry);
273 	void (*set_rptr)(struct amdgpu_device *adev);
274 };
275 
276 /* provided by hw blocks that expose a ring buffer for commands */
277 struct amdgpu_ring_funcs {
278 	/* ring read/write ptr handling */
279 	u32 (*get_rptr)(struct amdgpu_ring *ring);
280 	u32 (*get_wptr)(struct amdgpu_ring *ring);
281 	void (*set_wptr)(struct amdgpu_ring *ring);
282 	/* validating and patching of IBs */
283 	int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
284 	/* command emit functions */
285 	void (*emit_ib)(struct amdgpu_ring *ring,
286 			struct amdgpu_ib *ib);
287 	void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
288 			   uint64_t seq, unsigned flags);
289 	void (*emit_pipeline_sync)(struct amdgpu_ring *ring);
290 	void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vm_id,
291 			      uint64_t pd_addr);
292 	void (*emit_hdp_flush)(struct amdgpu_ring *ring);
293 	void (*emit_hdp_invalidate)(struct amdgpu_ring *ring);
294 	void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid,
295 				uint32_t gds_base, uint32_t gds_size,
296 				uint32_t gws_base, uint32_t gws_size,
297 				uint32_t oa_base, uint32_t oa_size);
298 	/* testing functions */
299 	int (*test_ring)(struct amdgpu_ring *ring);
300 	int (*test_ib)(struct amdgpu_ring *ring);
301 	/* insert NOP packets */
302 	void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count);
303 	/* pad the indirect buffer to the necessary number of dw */
304 	void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
305 };
306 
307 /*
308  * BIOS.
309  */
310 bool amdgpu_get_bios(struct amdgpu_device *adev);
311 bool amdgpu_read_bios(struct amdgpu_device *adev);
312 
313 /*
314  * Dummy page
315  */
316 struct amdgpu_dummy_page {
317 	struct page	*page;
318 	dma_addr_t	addr;
319 };
320 int amdgpu_dummy_page_init(struct amdgpu_device *adev);
321 void amdgpu_dummy_page_fini(struct amdgpu_device *adev);
322 
323 
324 /*
325  * Clocks
326  */
327 
328 #define AMDGPU_MAX_PPLL 3
329 
330 struct amdgpu_clock {
331 	struct amdgpu_pll ppll[AMDGPU_MAX_PPLL];
332 	struct amdgpu_pll spll;
333 	struct amdgpu_pll mpll;
334 	/* 10 Khz units */
335 	uint32_t default_mclk;
336 	uint32_t default_sclk;
337 	uint32_t default_dispclk;
338 	uint32_t current_dispclk;
339 	uint32_t dp_extclk;
340 	uint32_t max_pixel_clock;
341 };
342 
343 /*
344  * Fences.
345  */
346 struct amdgpu_fence_driver {
347 	uint64_t			gpu_addr;
348 	volatile uint32_t		*cpu_addr;
349 	/* sync_seq is protected by ring emission lock */
350 	uint32_t			sync_seq;
351 	atomic_t			last_seq;
352 	bool				initialized;
353 	struct amdgpu_irq_src		*irq_src;
354 	unsigned			irq_type;
355 	struct timer_list		fallback_timer;
356 	unsigned			num_fences_mask;
357 	spinlock_t			lock;
358 	struct fence			**fences;
359 };
360 
361 /* some special values for the owner field */
362 #define AMDGPU_FENCE_OWNER_UNDEFINED	((void*)0ul)
363 #define AMDGPU_FENCE_OWNER_VM		((void*)1ul)
364 
365 #define AMDGPU_FENCE_FLAG_64BIT         (1 << 0)
366 #define AMDGPU_FENCE_FLAG_INT           (1 << 1)
367 
368 struct amdgpu_user_fence {
369 	/* write-back bo */
370 	struct amdgpu_bo 	*bo;
371 	/* write-back address offset to bo start */
372 	uint32_t                offset;
373 };
374 
375 int amdgpu_fence_driver_init(struct amdgpu_device *adev);
376 void amdgpu_fence_driver_fini(struct amdgpu_device *adev);
377 void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev);
378 
379 int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
380 				  unsigned num_hw_submission);
381 int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
382 				   struct amdgpu_irq_src *irq_src,
383 				   unsigned irq_type);
384 void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
385 void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
386 int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **fence);
387 void amdgpu_fence_process(struct amdgpu_ring *ring);
388 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
389 unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
390 
391 /*
392  * TTM.
393  */
394 struct amdgpu_mman {
395 	struct ttm_bo_global_ref        bo_global_ref;
396 	struct drm_global_reference	mem_global_ref;
397 	struct ttm_bo_device		bdev;
398 	bool				mem_global_referenced;
399 	bool				initialized;
400 
401 #if defined(CONFIG_DEBUG_FS)
402 	struct dentry			*vram;
403 	struct dentry			*gtt;
404 #endif
405 
406 	/* buffer handling */
407 	const struct amdgpu_buffer_funcs	*buffer_funcs;
408 	struct amdgpu_ring			*buffer_funcs_ring;
409 	/* Scheduler entity for buffer moves */
410 	struct amd_sched_entity			entity;
411 };
412 
413 int amdgpu_copy_buffer(struct amdgpu_ring *ring,
414 		       uint64_t src_offset,
415 		       uint64_t dst_offset,
416 		       uint32_t byte_count,
417 		       struct reservation_object *resv,
418 		       struct fence **fence);
419 int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
420 
421 struct amdgpu_bo_list_entry {
422 	struct amdgpu_bo		*robj;
423 	struct ttm_validate_buffer	tv;
424 	struct amdgpu_bo_va		*bo_va;
425 	uint32_t			priority;
426 	struct page			**user_pages;
427 	int				user_invalidated;
428 };
429 
430 struct amdgpu_bo_va_mapping {
431 	struct list_head		list;
432 	struct interval_tree_node	it;
433 	uint64_t			offset;
434 	uint32_t			flags;
435 };
436 
437 /* bo virtual addresses in a specific vm */
438 struct amdgpu_bo_va {
439 	/* protected by bo being reserved */
440 	struct list_head		bo_list;
441 	struct fence		        *last_pt_update;
442 	unsigned			ref_count;
443 
444 	/* protected by vm mutex and spinlock */
445 	struct list_head		vm_status;
446 
447 	/* mappings for this bo_va */
448 	struct list_head		invalids;
449 	struct list_head		valids;
450 
451 	/* constant after initialization */
452 	struct amdgpu_vm		*vm;
453 	struct amdgpu_bo		*bo;
454 };
455 
456 #define AMDGPU_GEM_DOMAIN_MAX		0x3
457 
458 struct amdgpu_bo {
459 	/* Protected by gem.mutex */
460 	struct list_head		list;
461 	/* Protected by tbo.reserved */
462 	u32				prefered_domains;
463 	u32				allowed_domains;
464 	struct ttm_place		placements[AMDGPU_GEM_DOMAIN_MAX + 1];
465 	struct ttm_placement		placement;
466 	struct ttm_buffer_object	tbo;
467 	struct ttm_bo_kmap_obj		kmap;
468 	u64				flags;
469 	unsigned			pin_count;
470 	void				*kptr;
471 	u64				tiling_flags;
472 	u64				metadata_flags;
473 	void				*metadata;
474 	u32				metadata_size;
475 	/* list of all virtual address to which this bo
476 	 * is associated to
477 	 */
478 	struct list_head		va;
479 	/* Constant after initialization */
480 	struct amdgpu_device		*adev;
481 	struct drm_gem_object		gem_base;
482 	struct amdgpu_bo		*parent;
483 
484 	struct ttm_bo_kmap_obj		dma_buf_vmap;
485 	struct amdgpu_mn		*mn;
486 	struct list_head		mn_list;
487 };
488 #define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, gem_base)
489 
490 void amdgpu_gem_object_free(struct drm_gem_object *obj);
491 int amdgpu_gem_object_open(struct drm_gem_object *obj,
492 				struct drm_file *file_priv);
493 void amdgpu_gem_object_close(struct drm_gem_object *obj,
494 				struct drm_file *file_priv);
495 unsigned long amdgpu_gem_timeout(uint64_t timeout_ns);
496 struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj);
497 struct drm_gem_object *amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
498 							struct dma_buf_attachment *attach,
499 							struct sg_table *sg);
500 struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
501 					struct drm_gem_object *gobj,
502 					int flags);
503 int amdgpu_gem_prime_pin(struct drm_gem_object *obj);
504 void amdgpu_gem_prime_unpin(struct drm_gem_object *obj);
505 struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *);
506 void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj);
507 void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
508 int amdgpu_gem_debugfs_init(struct amdgpu_device *adev);
509 
510 /* sub-allocation manager, it has to be protected by another lock.
511  * By conception this is an helper for other part of the driver
512  * like the indirect buffer or semaphore, which both have their
513  * locking.
514  *
515  * Principe is simple, we keep a list of sub allocation in offset
516  * order (first entry has offset == 0, last entry has the highest
517  * offset).
518  *
519  * When allocating new object we first check if there is room at
520  * the end total_size - (last_object_offset + last_object_size) >=
521  * alloc_size. If so we allocate new object there.
522  *
523  * When there is not enough room at the end, we start waiting for
524  * each sub object until we reach object_offset+object_size >=
525  * alloc_size, this object then become the sub object we return.
526  *
527  * Alignment can't be bigger than page size.
528  *
529  * Hole are not considered for allocation to keep things simple.
530  * Assumption is that there won't be hole (all object on same
531  * alignment).
532  */
533 
534 #define AMDGPU_SA_NUM_FENCE_LISTS	32
535 
536 struct amdgpu_sa_manager {
537 	wait_queue_head_t	wq;
538 	struct amdgpu_bo	*bo;
539 	struct list_head	*hole;
540 	struct list_head	flist[AMDGPU_SA_NUM_FENCE_LISTS];
541 	struct list_head	olist;
542 	unsigned		size;
543 	uint64_t		gpu_addr;
544 	void			*cpu_ptr;
545 	uint32_t		domain;
546 	uint32_t		align;
547 };
548 
549 /* sub-allocation buffer */
550 struct amdgpu_sa_bo {
551 	struct list_head		olist;
552 	struct list_head		flist;
553 	struct amdgpu_sa_manager	*manager;
554 	unsigned			soffset;
555 	unsigned			eoffset;
556 	struct fence		        *fence;
557 };
558 
559 /*
560  * GEM objects.
561  */
562 void amdgpu_gem_force_release(struct amdgpu_device *adev);
563 int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
564 				int alignment, u32 initial_domain,
565 				u64 flags, bool kernel,
566 				struct drm_gem_object **obj);
567 
568 int amdgpu_mode_dumb_create(struct drm_file *file_priv,
569 			    struct drm_device *dev,
570 			    struct drm_mode_create_dumb *args);
571 int amdgpu_mode_dumb_mmap(struct drm_file *filp,
572 			  struct drm_device *dev,
573 			  uint32_t handle, uint64_t *offset_p);
574 /*
575  * Synchronization
576  */
577 struct amdgpu_sync {
578 	DECLARE_HASHTABLE(fences, 4);
579 	struct fence	        *last_vm_update;
580 };
581 
582 void amdgpu_sync_create(struct amdgpu_sync *sync);
583 int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
584 		      struct fence *f);
585 int amdgpu_sync_resv(struct amdgpu_device *adev,
586 		     struct amdgpu_sync *sync,
587 		     struct reservation_object *resv,
588 		     void *owner);
589 struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
590 int amdgpu_sync_wait(struct amdgpu_sync *sync);
591 void amdgpu_sync_free(struct amdgpu_sync *sync);
592 int amdgpu_sync_init(void);
593 void amdgpu_sync_fini(void);
594 
595 /*
596  * GART structures, functions & helpers
597  */
598 struct amdgpu_mc;
599 
600 #define AMDGPU_GPU_PAGE_SIZE 4096
601 #define AMDGPU_GPU_PAGE_MASK (AMDGPU_GPU_PAGE_SIZE - 1)
602 #define AMDGPU_GPU_PAGE_SHIFT 12
603 #define AMDGPU_GPU_PAGE_ALIGN(a) (((a) + AMDGPU_GPU_PAGE_MASK) & ~AMDGPU_GPU_PAGE_MASK)
604 
605 struct amdgpu_gart {
606 	dma_addr_t			table_addr;
607 	struct amdgpu_bo		*robj;
608 	void				*ptr;
609 	unsigned			num_gpu_pages;
610 	unsigned			num_cpu_pages;
611 	unsigned			table_size;
612 	struct page			**pages;
613 	dma_addr_t			*pages_addr;
614 	bool				ready;
615 	const struct amdgpu_gart_funcs *gart_funcs;
616 };
617 
618 int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev);
619 void amdgpu_gart_table_ram_free(struct amdgpu_device *adev);
620 int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev);
621 void amdgpu_gart_table_vram_free(struct amdgpu_device *adev);
622 int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev);
623 void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev);
624 int amdgpu_gart_init(struct amdgpu_device *adev);
625 void amdgpu_gart_fini(struct amdgpu_device *adev);
626 void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset,
627 			int pages);
628 int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset,
629 		     int pages, struct page **pagelist,
630 		     dma_addr_t *dma_addr, uint32_t flags);
631 
632 /*
633  * GPU MC structures, functions & helpers
634  */
635 struct amdgpu_mc {
636 	resource_size_t		aper_size;
637 	resource_size_t		aper_base;
638 	resource_size_t		agp_base;
639 	/* for some chips with <= 32MB we need to lie
640 	 * about vram size near mc fb location */
641 	u64			mc_vram_size;
642 	u64			visible_vram_size;
643 	u64			gtt_size;
644 	u64			gtt_start;
645 	u64			gtt_end;
646 	u64			vram_start;
647 	u64			vram_end;
648 	unsigned		vram_width;
649 	u64			real_vram_size;
650 	int			vram_mtrr;
651 	u64                     gtt_base_align;
652 	u64                     mc_mask;
653 	const struct firmware   *fw;	/* MC firmware */
654 	uint32_t                fw_version;
655 	struct amdgpu_irq_src	vm_fault;
656 	uint32_t		vram_type;
657 };
658 
659 /*
660  * GPU doorbell structures, functions & helpers
661  */
662 typedef enum _AMDGPU_DOORBELL_ASSIGNMENT
663 {
664 	AMDGPU_DOORBELL_KIQ                     = 0x000,
665 	AMDGPU_DOORBELL_HIQ                     = 0x001,
666 	AMDGPU_DOORBELL_DIQ                     = 0x002,
667 	AMDGPU_DOORBELL_MEC_RING0               = 0x010,
668 	AMDGPU_DOORBELL_MEC_RING1               = 0x011,
669 	AMDGPU_DOORBELL_MEC_RING2               = 0x012,
670 	AMDGPU_DOORBELL_MEC_RING3               = 0x013,
671 	AMDGPU_DOORBELL_MEC_RING4               = 0x014,
672 	AMDGPU_DOORBELL_MEC_RING5               = 0x015,
673 	AMDGPU_DOORBELL_MEC_RING6               = 0x016,
674 	AMDGPU_DOORBELL_MEC_RING7               = 0x017,
675 	AMDGPU_DOORBELL_GFX_RING0               = 0x020,
676 	AMDGPU_DOORBELL_sDMA_ENGINE0            = 0x1E0,
677 	AMDGPU_DOORBELL_sDMA_ENGINE1            = 0x1E1,
678 	AMDGPU_DOORBELL_IH                      = 0x1E8,
679 	AMDGPU_DOORBELL_MAX_ASSIGNMENT          = 0x3FF,
680 	AMDGPU_DOORBELL_INVALID                 = 0xFFFF
681 } AMDGPU_DOORBELL_ASSIGNMENT;
682 
683 struct amdgpu_doorbell {
684 	/* doorbell mmio */
685 	resource_size_t		base;
686 	resource_size_t		size;
687 	u32 __iomem		*ptr;
688 	u32			num_doorbells;	/* Number of doorbells actually reserved for amdgpu. */
689 };
690 
691 void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
692 				phys_addr_t *aperture_base,
693 				size_t *aperture_size,
694 				size_t *start_offset);
695 
696 /*
697  * IRQS.
698  */
699 
700 struct amdgpu_flip_work {
701 	struct work_struct		flip_work;
702 	struct work_struct		unpin_work;
703 	struct amdgpu_device		*adev;
704 	int				crtc_id;
705 	uint64_t			base;
706 	struct drm_pending_vblank_event *event;
707 	struct amdgpu_bo		*old_rbo;
708 	struct fence			*excl;
709 	unsigned			shared_count;
710 	struct fence			**shared;
711 	struct fence_cb			cb;
712 };
713 
714 
715 /*
716  * CP & rings.
717  */
718 
719 struct amdgpu_ib {
720 	struct amdgpu_sa_bo		*sa_bo;
721 	uint32_t			length_dw;
722 	uint64_t			gpu_addr;
723 	uint32_t			*ptr;
724 	struct amdgpu_user_fence        *user;
725 	struct amdgpu_vm		*vm;
726 	unsigned			vm_id;
727 	uint64_t			vm_pd_addr;
728 	struct amdgpu_ctx		*ctx;
729 	uint32_t			gds_base, gds_size;
730 	uint32_t			gws_base, gws_size;
731 	uint32_t			oa_base, oa_size;
732 	uint32_t			flags;
733 	/* resulting sequence number */
734 	uint64_t			sequence;
735 };
736 
737 enum amdgpu_ring_type {
738 	AMDGPU_RING_TYPE_GFX,
739 	AMDGPU_RING_TYPE_COMPUTE,
740 	AMDGPU_RING_TYPE_SDMA,
741 	AMDGPU_RING_TYPE_UVD,
742 	AMDGPU_RING_TYPE_VCE
743 };
744 
745 extern struct amd_sched_backend_ops amdgpu_sched_ops;
746 
747 int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
748 		     struct amdgpu_job **job);
749 int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
750 			     struct amdgpu_job **job);
751 void amdgpu_job_free(struct amdgpu_job *job);
752 int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
753 		      struct amd_sched_entity *entity, void *owner,
754 		      struct fence **f);
755 
756 struct amdgpu_ring {
757 	struct amdgpu_device		*adev;
758 	const struct amdgpu_ring_funcs	*funcs;
759 	struct amdgpu_fence_driver	fence_drv;
760 	struct amd_gpu_scheduler 	sched;
761 
762 	spinlock_t              fence_lock;
763 	struct amdgpu_bo	*ring_obj;
764 	volatile uint32_t	*ring;
765 	unsigned		rptr_offs;
766 	u64			next_rptr_gpu_addr;
767 	volatile u32		*next_rptr_cpu_addr;
768 	unsigned		wptr;
769 	unsigned		wptr_old;
770 	unsigned		ring_size;
771 	unsigned		max_dw;
772 	int			count_dw;
773 	uint64_t		gpu_addr;
774 	uint32_t		align_mask;
775 	uint32_t		ptr_mask;
776 	bool			ready;
777 	u32			nop;
778 	u32			idx;
779 	u32			me;
780 	u32			pipe;
781 	u32			queue;
782 	struct amdgpu_bo	*mqd_obj;
783 	u32			doorbell_index;
784 	bool			use_doorbell;
785 	unsigned		wptr_offs;
786 	unsigned		next_rptr_offs;
787 	unsigned		fence_offs;
788 	struct amdgpu_ctx	*current_ctx;
789 	enum amdgpu_ring_type	type;
790 	char			name[16];
791 };
792 
793 /*
794  * VM
795  */
796 
797 /* maximum number of VMIDs */
798 #define AMDGPU_NUM_VM	16
799 
800 /* number of entries in page table */
801 #define AMDGPU_VM_PTE_COUNT (1 << amdgpu_vm_block_size)
802 
803 /* PTBs (Page Table Blocks) need to be aligned to 32K */
804 #define AMDGPU_VM_PTB_ALIGN_SIZE   32768
805 #define AMDGPU_VM_PTB_ALIGN_MASK (AMDGPU_VM_PTB_ALIGN_SIZE - 1)
806 #define AMDGPU_VM_PTB_ALIGN(a) (((a) + AMDGPU_VM_PTB_ALIGN_MASK) & ~AMDGPU_VM_PTB_ALIGN_MASK)
807 
808 #define AMDGPU_PTE_VALID	(1 << 0)
809 #define AMDGPU_PTE_SYSTEM	(1 << 1)
810 #define AMDGPU_PTE_SNOOPED	(1 << 2)
811 
812 /* VI only */
813 #define AMDGPU_PTE_EXECUTABLE	(1 << 4)
814 
815 #define AMDGPU_PTE_READABLE	(1 << 5)
816 #define AMDGPU_PTE_WRITEABLE	(1 << 6)
817 
818 /* PTE (Page Table Entry) fragment field for different page sizes */
819 #define AMDGPU_PTE_FRAG_4KB	(0 << 7)
820 #define AMDGPU_PTE_FRAG_64KB	(4 << 7)
821 #define AMDGPU_LOG2_PAGES_PER_FRAG 4
822 
823 /* How to programm VM fault handling */
824 #define AMDGPU_VM_FAULT_STOP_NEVER	0
825 #define AMDGPU_VM_FAULT_STOP_FIRST	1
826 #define AMDGPU_VM_FAULT_STOP_ALWAYS	2
827 
828 struct amdgpu_vm_pt {
829 	struct amdgpu_bo_list_entry	entry;
830 	uint64_t			addr;
831 };
832 
833 struct amdgpu_vm_id {
834 	struct amdgpu_vm_manager_id	*mgr_id;
835 	uint64_t			pd_gpu_addr;
836 	/* last flushed PD/PT update */
837 	struct fence			*flushed_updates;
838 };
839 
840 struct amdgpu_vm {
841 	/* tree of virtual addresses mapped */
842 	struct rb_root		va;
843 
844 	/* protecting invalidated */
845 	spinlock_t		status_lock;
846 
847 	/* BOs moved, but not yet updated in the PT */
848 	struct list_head	invalidated;
849 
850 	/* BOs cleared in the PT because of a move */
851 	struct list_head	cleared;
852 
853 	/* BO mappings freed, but not yet updated in the PT */
854 	struct list_head	freed;
855 
856 	/* contains the page directory */
857 	struct amdgpu_bo	*page_directory;
858 	unsigned		max_pde_used;
859 	struct fence		*page_directory_fence;
860 
861 	/* array of page tables, one for each page directory entry */
862 	struct amdgpu_vm_pt	*page_tables;
863 
864 	/* for id and flush management per ring */
865 	struct amdgpu_vm_id	ids[AMDGPU_MAX_RINGS];
866 
867 	/* protecting freed */
868 	spinlock_t		freed_lock;
869 
870 	/* Scheduler entity for page table updates */
871 	struct amd_sched_entity	entity;
872 };
873 
874 struct amdgpu_vm_manager_id {
875 	struct list_head	list;
876 	struct fence		*active;
877 	atomic_long_t		owner;
878 
879 	uint32_t		gds_base;
880 	uint32_t		gds_size;
881 	uint32_t		gws_base;
882 	uint32_t		gws_size;
883 	uint32_t		oa_base;
884 	uint32_t		oa_size;
885 };
886 
887 struct amdgpu_vm_manager {
888 	/* Handling of VMIDs */
889 	struct mutex				lock;
890 	unsigned				num_ids;
891 	struct list_head			ids_lru;
892 	struct amdgpu_vm_manager_id		ids[AMDGPU_NUM_VM];
893 
894 	uint32_t				max_pfn;
895 	/* vram base address for page table entry  */
896 	u64					vram_base_offset;
897 	/* is vm enabled? */
898 	bool					enabled;
899 	/* vm pte handling */
900 	const struct amdgpu_vm_pte_funcs        *vm_pte_funcs;
901 	struct amdgpu_ring                      *vm_pte_rings[AMDGPU_MAX_RINGS];
902 	unsigned				vm_pte_num_rings;
903 	atomic_t				vm_pte_next_ring;
904 };
905 
906 void amdgpu_vm_manager_init(struct amdgpu_device *adev);
907 void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
908 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
909 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
910 void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
911 			 struct list_head *validated,
912 			 struct amdgpu_bo_list_entry *entry);
913 void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates);
914 void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
915 				  struct amdgpu_vm *vm);
916 int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
917 		      struct amdgpu_sync *sync, struct fence *fence,
918 		      unsigned *vm_id, uint64_t *vm_pd_addr);
919 void amdgpu_vm_flush(struct amdgpu_ring *ring,
920 		     unsigned vm_id, uint64_t pd_addr,
921 		     uint32_t gds_base, uint32_t gds_size,
922 		     uint32_t gws_base, uint32_t gws_size,
923 		     uint32_t oa_base, uint32_t oa_size);
924 void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
925 uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
926 int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
927 				    struct amdgpu_vm *vm);
928 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
929 			  struct amdgpu_vm *vm);
930 int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm,
931 			     struct amdgpu_sync *sync);
932 int amdgpu_vm_bo_update(struct amdgpu_device *adev,
933 			struct amdgpu_bo_va *bo_va,
934 			struct ttm_mem_reg *mem);
935 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
936 			     struct amdgpu_bo *bo);
937 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
938 				       struct amdgpu_bo *bo);
939 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
940 				      struct amdgpu_vm *vm,
941 				      struct amdgpu_bo *bo);
942 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
943 		     struct amdgpu_bo_va *bo_va,
944 		     uint64_t addr, uint64_t offset,
945 		     uint64_t size, uint32_t flags);
946 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
947 		       struct amdgpu_bo_va *bo_va,
948 		       uint64_t addr);
949 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
950 		      struct amdgpu_bo_va *bo_va);
951 
952 /*
953  * context related structures
954  */
955 
956 struct amdgpu_ctx_ring {
957 	uint64_t		sequence;
958 	struct fence		**fences;
959 	struct amd_sched_entity	entity;
960 };
961 
962 struct amdgpu_ctx {
963 	struct kref		refcount;
964 	struct amdgpu_device    *adev;
965 	unsigned		reset_counter;
966 	spinlock_t		ring_lock;
967 	struct fence            **fences;
968 	struct amdgpu_ctx_ring	rings[AMDGPU_MAX_RINGS];
969 };
970 
971 struct amdgpu_ctx_mgr {
972 	struct amdgpu_device	*adev;
973 	struct mutex		lock;
974 	/* protected by lock */
975 	struct idr		ctx_handles;
976 };
977 
978 struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
979 int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
980 
981 uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
982 			      struct fence *fence);
983 struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
984 				   struct amdgpu_ring *ring, uint64_t seq);
985 
986 int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
987 		     struct drm_file *filp);
988 
989 void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr);
990 void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
991 
992 /*
993  * file private structure
994  */
995 
996 struct amdgpu_fpriv {
997 	struct amdgpu_vm	vm;
998 	struct mutex		bo_list_lock;
999 	struct idr		bo_list_handles;
1000 	struct amdgpu_ctx_mgr	ctx_mgr;
1001 };
1002 
1003 /*
1004  * residency list
1005  */
1006 
1007 struct amdgpu_bo_list {
1008 	struct mutex lock;
1009 	struct amdgpu_bo *gds_obj;
1010 	struct amdgpu_bo *gws_obj;
1011 	struct amdgpu_bo *oa_obj;
1012 	unsigned first_userptr;
1013 	unsigned num_entries;
1014 	struct amdgpu_bo_list_entry *array;
1015 };
1016 
1017 struct amdgpu_bo_list *
1018 amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id);
1019 void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
1020 			     struct list_head *validated);
1021 void amdgpu_bo_list_put(struct amdgpu_bo_list *list);
1022 void amdgpu_bo_list_free(struct amdgpu_bo_list *list);
1023 
1024 /*
1025  * GFX stuff
1026  */
1027 #include "clearstate_defs.h"
1028 
1029 struct amdgpu_rlc {
1030 	/* for power gating */
1031 	struct amdgpu_bo	*save_restore_obj;
1032 	uint64_t		save_restore_gpu_addr;
1033 	volatile uint32_t	*sr_ptr;
1034 	const u32               *reg_list;
1035 	u32                     reg_list_size;
1036 	/* for clear state */
1037 	struct amdgpu_bo	*clear_state_obj;
1038 	uint64_t		clear_state_gpu_addr;
1039 	volatile uint32_t	*cs_ptr;
1040 	const struct cs_section_def   *cs_data;
1041 	u32                     clear_state_size;
1042 	/* for cp tables */
1043 	struct amdgpu_bo	*cp_table_obj;
1044 	uint64_t		cp_table_gpu_addr;
1045 	volatile uint32_t	*cp_table_ptr;
1046 	u32                     cp_table_size;
1047 };
1048 
1049 struct amdgpu_mec {
1050 	struct amdgpu_bo	*hpd_eop_obj;
1051 	u64			hpd_eop_gpu_addr;
1052 	u32 num_pipe;
1053 	u32 num_mec;
1054 	u32 num_queue;
1055 };
1056 
1057 /*
1058  * GPU scratch registers structures, functions & helpers
1059  */
1060 struct amdgpu_scratch {
1061 	unsigned		num_reg;
1062 	uint32_t                reg_base;
1063 	bool			free[32];
1064 	uint32_t		reg[32];
1065 };
1066 
1067 /*
1068  * GFX configurations
1069  */
1070 struct amdgpu_gca_config {
1071 	unsigned max_shader_engines;
1072 	unsigned max_tile_pipes;
1073 	unsigned max_cu_per_sh;
1074 	unsigned max_sh_per_se;
1075 	unsigned max_backends_per_se;
1076 	unsigned max_texture_channel_caches;
1077 	unsigned max_gprs;
1078 	unsigned max_gs_threads;
1079 	unsigned max_hw_contexts;
1080 	unsigned sc_prim_fifo_size_frontend;
1081 	unsigned sc_prim_fifo_size_backend;
1082 	unsigned sc_hiz_tile_fifo_size;
1083 	unsigned sc_earlyz_tile_fifo_size;
1084 
1085 	unsigned num_tile_pipes;
1086 	unsigned backend_enable_mask;
1087 	unsigned mem_max_burst_length_bytes;
1088 	unsigned mem_row_size_in_kb;
1089 	unsigned shader_engine_tile_size;
1090 	unsigned num_gpus;
1091 	unsigned multi_gpu_tile_size;
1092 	unsigned mc_arb_ramcfg;
1093 	unsigned gb_addr_config;
1094 	unsigned num_rbs;
1095 
1096 	uint32_t tile_mode_array[32];
1097 	uint32_t macrotile_mode_array[16];
1098 };
1099 
1100 struct amdgpu_gfx {
1101 	struct mutex			gpu_clock_mutex;
1102 	struct amdgpu_gca_config	config;
1103 	struct amdgpu_rlc		rlc;
1104 	struct amdgpu_mec		mec;
1105 	struct amdgpu_scratch		scratch;
1106 	const struct firmware		*me_fw;	/* ME firmware */
1107 	uint32_t			me_fw_version;
1108 	const struct firmware		*pfp_fw; /* PFP firmware */
1109 	uint32_t			pfp_fw_version;
1110 	const struct firmware		*ce_fw;	/* CE firmware */
1111 	uint32_t			ce_fw_version;
1112 	const struct firmware		*rlc_fw; /* RLC firmware */
1113 	uint32_t			rlc_fw_version;
1114 	const struct firmware		*mec_fw; /* MEC firmware */
1115 	uint32_t			mec_fw_version;
1116 	const struct firmware		*mec2_fw; /* MEC2 firmware */
1117 	uint32_t			mec2_fw_version;
1118 	uint32_t			me_feature_version;
1119 	uint32_t			ce_feature_version;
1120 	uint32_t			pfp_feature_version;
1121 	uint32_t			rlc_feature_version;
1122 	uint32_t			mec_feature_version;
1123 	uint32_t			mec2_feature_version;
1124 	struct amdgpu_ring		gfx_ring[AMDGPU_MAX_GFX_RINGS];
1125 	unsigned			num_gfx_rings;
1126 	struct amdgpu_ring		compute_ring[AMDGPU_MAX_COMPUTE_RINGS];
1127 	unsigned			num_compute_rings;
1128 	struct amdgpu_irq_src		eop_irq;
1129 	struct amdgpu_irq_src		priv_reg_irq;
1130 	struct amdgpu_irq_src		priv_inst_irq;
1131 	/* gfx status */
1132 	uint32_t gfx_current_status;
1133 	/* ce ram size*/
1134 	unsigned ce_ram_size;
1135 };
1136 
1137 int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
1138 		  unsigned size, struct amdgpu_ib *ib);
1139 void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, struct fence *f);
1140 int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
1141 		       struct amdgpu_ib *ib, struct fence *last_vm_update,
1142 		       struct fence **f);
1143 int amdgpu_ib_pool_init(struct amdgpu_device *adev);
1144 void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
1145 int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
1146 int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
1147 void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
1148 void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
1149 void amdgpu_ring_commit(struct amdgpu_ring *ring);
1150 void amdgpu_ring_undo(struct amdgpu_ring *ring);
1151 unsigned amdgpu_ring_backup(struct amdgpu_ring *ring,
1152 			    uint32_t **data);
1153 int amdgpu_ring_restore(struct amdgpu_ring *ring,
1154 			unsigned size, uint32_t *data);
1155 int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
1156 		     unsigned ring_size, u32 nop, u32 align_mask,
1157 		     struct amdgpu_irq_src *irq_src, unsigned irq_type,
1158 		     enum amdgpu_ring_type ring_type);
1159 void amdgpu_ring_fini(struct amdgpu_ring *ring);
1160 
1161 /*
1162  * CS.
1163  */
1164 struct amdgpu_cs_chunk {
1165 	uint32_t		chunk_id;
1166 	uint32_t		length_dw;
1167 	uint32_t		*kdata;
1168 };
1169 
1170 struct amdgpu_cs_parser {
1171 	struct amdgpu_device	*adev;
1172 	struct drm_file		*filp;
1173 	struct amdgpu_ctx	*ctx;
1174 
1175 	/* chunks */
1176 	unsigned		nchunks;
1177 	struct amdgpu_cs_chunk	*chunks;
1178 
1179 	/* scheduler job object */
1180 	struct amdgpu_job	*job;
1181 
1182 	/* buffer objects */
1183 	struct ww_acquire_ctx		ticket;
1184 	struct amdgpu_bo_list		*bo_list;
1185 	struct amdgpu_bo_list_entry	vm_pd;
1186 	struct list_head		validated;
1187 	struct fence			*fence;
1188 	uint64_t			bytes_moved_threshold;
1189 	uint64_t			bytes_moved;
1190 
1191 	/* user fence */
1192 	struct amdgpu_bo_list_entry	uf_entry;
1193 };
1194 
1195 struct amdgpu_job {
1196 	struct amd_sched_job    base;
1197 	struct amdgpu_device	*adev;
1198 	struct amdgpu_ring	*ring;
1199 	struct amdgpu_sync	sync;
1200 	struct amdgpu_ib	*ibs;
1201 	struct fence		*fence; /* the hw fence */
1202 	uint32_t		num_ibs;
1203 	void			*owner;
1204 	struct amdgpu_user_fence uf;
1205 };
1206 #define to_amdgpu_job(sched_job)		\
1207 		container_of((sched_job), struct amdgpu_job, base)
1208 
1209 static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p,
1210 				      uint32_t ib_idx, int idx)
1211 {
1212 	return p->job->ibs[ib_idx].ptr[idx];
1213 }
1214 
1215 static inline void amdgpu_set_ib_value(struct amdgpu_cs_parser *p,
1216 				       uint32_t ib_idx, int idx,
1217 				       uint32_t value)
1218 {
1219 	p->job->ibs[ib_idx].ptr[idx] = value;
1220 }
1221 
1222 /*
1223  * Writeback
1224  */
1225 #define AMDGPU_MAX_WB 1024	/* Reserve at most 1024 WB slots for amdgpu-owned rings. */
1226 
1227 struct amdgpu_wb {
1228 	struct amdgpu_bo	*wb_obj;
1229 	volatile uint32_t	*wb;
1230 	uint64_t		gpu_addr;
1231 	u32			num_wb;	/* Number of wb slots actually reserved for amdgpu. */
1232 	unsigned long		used[DIV_ROUND_UP(AMDGPU_MAX_WB, BITS_PER_LONG)];
1233 };
1234 
1235 int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb);
1236 void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb);
1237 
1238 
1239 
1240 enum amdgpu_int_thermal_type {
1241 	THERMAL_TYPE_NONE,
1242 	THERMAL_TYPE_EXTERNAL,
1243 	THERMAL_TYPE_EXTERNAL_GPIO,
1244 	THERMAL_TYPE_RV6XX,
1245 	THERMAL_TYPE_RV770,
1246 	THERMAL_TYPE_ADT7473_WITH_INTERNAL,
1247 	THERMAL_TYPE_EVERGREEN,
1248 	THERMAL_TYPE_SUMO,
1249 	THERMAL_TYPE_NI,
1250 	THERMAL_TYPE_SI,
1251 	THERMAL_TYPE_EMC2103_WITH_INTERNAL,
1252 	THERMAL_TYPE_CI,
1253 	THERMAL_TYPE_KV,
1254 };
1255 
1256 enum amdgpu_dpm_auto_throttle_src {
1257 	AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL,
1258 	AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL
1259 };
1260 
1261 enum amdgpu_dpm_event_src {
1262 	AMDGPU_DPM_EVENT_SRC_ANALOG = 0,
1263 	AMDGPU_DPM_EVENT_SRC_EXTERNAL = 1,
1264 	AMDGPU_DPM_EVENT_SRC_DIGITAL = 2,
1265 	AMDGPU_DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
1266 	AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4
1267 };
1268 
1269 #define AMDGPU_MAX_VCE_LEVELS 6
1270 
1271 enum amdgpu_vce_level {
1272 	AMDGPU_VCE_LEVEL_AC_ALL = 0,     /* AC, All cases */
1273 	AMDGPU_VCE_LEVEL_DC_EE = 1,      /* DC, entropy encoding */
1274 	AMDGPU_VCE_LEVEL_DC_LL_LOW = 2,  /* DC, low latency queue, res <= 720 */
1275 	AMDGPU_VCE_LEVEL_DC_LL_HIGH = 3, /* DC, low latency queue, 1080 >= res > 720 */
1276 	AMDGPU_VCE_LEVEL_DC_GP_LOW = 4,  /* DC, general purpose queue, res <= 720 */
1277 	AMDGPU_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */
1278 };
1279 
1280 struct amdgpu_ps {
1281 	u32 caps; /* vbios flags */
1282 	u32 class; /* vbios flags */
1283 	u32 class2; /* vbios flags */
1284 	/* UVD clocks */
1285 	u32 vclk;
1286 	u32 dclk;
1287 	/* VCE clocks */
1288 	u32 evclk;
1289 	u32 ecclk;
1290 	bool vce_active;
1291 	enum amdgpu_vce_level vce_level;
1292 	/* asic priv */
1293 	void *ps_priv;
1294 };
1295 
1296 struct amdgpu_dpm_thermal {
1297 	/* thermal interrupt work */
1298 	struct work_struct work;
1299 	/* low temperature threshold */
1300 	int                min_temp;
1301 	/* high temperature threshold */
1302 	int                max_temp;
1303 	/* was last interrupt low to high or high to low */
1304 	bool               high_to_low;
1305 	/* interrupt source */
1306 	struct amdgpu_irq_src	irq;
1307 };
1308 
1309 enum amdgpu_clk_action
1310 {
1311 	AMDGPU_SCLK_UP = 1,
1312 	AMDGPU_SCLK_DOWN
1313 };
1314 
1315 struct amdgpu_blacklist_clocks
1316 {
1317 	u32 sclk;
1318 	u32 mclk;
1319 	enum amdgpu_clk_action action;
1320 };
1321 
1322 struct amdgpu_clock_and_voltage_limits {
1323 	u32 sclk;
1324 	u32 mclk;
1325 	u16 vddc;
1326 	u16 vddci;
1327 };
1328 
1329 struct amdgpu_clock_array {
1330 	u32 count;
1331 	u32 *values;
1332 };
1333 
1334 struct amdgpu_clock_voltage_dependency_entry {
1335 	u32 clk;
1336 	u16 v;
1337 };
1338 
1339 struct amdgpu_clock_voltage_dependency_table {
1340 	u32 count;
1341 	struct amdgpu_clock_voltage_dependency_entry *entries;
1342 };
1343 
1344 union amdgpu_cac_leakage_entry {
1345 	struct {
1346 		u16 vddc;
1347 		u32 leakage;
1348 	};
1349 	struct {
1350 		u16 vddc1;
1351 		u16 vddc2;
1352 		u16 vddc3;
1353 	};
1354 };
1355 
1356 struct amdgpu_cac_leakage_table {
1357 	u32 count;
1358 	union amdgpu_cac_leakage_entry *entries;
1359 };
1360 
1361 struct amdgpu_phase_shedding_limits_entry {
1362 	u16 voltage;
1363 	u32 sclk;
1364 	u32 mclk;
1365 };
1366 
1367 struct amdgpu_phase_shedding_limits_table {
1368 	u32 count;
1369 	struct amdgpu_phase_shedding_limits_entry *entries;
1370 };
1371 
1372 struct amdgpu_uvd_clock_voltage_dependency_entry {
1373 	u32 vclk;
1374 	u32 dclk;
1375 	u16 v;
1376 };
1377 
1378 struct amdgpu_uvd_clock_voltage_dependency_table {
1379 	u8 count;
1380 	struct amdgpu_uvd_clock_voltage_dependency_entry *entries;
1381 };
1382 
1383 struct amdgpu_vce_clock_voltage_dependency_entry {
1384 	u32 ecclk;
1385 	u32 evclk;
1386 	u16 v;
1387 };
1388 
1389 struct amdgpu_vce_clock_voltage_dependency_table {
1390 	u8 count;
1391 	struct amdgpu_vce_clock_voltage_dependency_entry *entries;
1392 };
1393 
1394 struct amdgpu_ppm_table {
1395 	u8 ppm_design;
1396 	u16 cpu_core_number;
1397 	u32 platform_tdp;
1398 	u32 small_ac_platform_tdp;
1399 	u32 platform_tdc;
1400 	u32 small_ac_platform_tdc;
1401 	u32 apu_tdp;
1402 	u32 dgpu_tdp;
1403 	u32 dgpu_ulv_power;
1404 	u32 tj_max;
1405 };
1406 
1407 struct amdgpu_cac_tdp_table {
1408 	u16 tdp;
1409 	u16 configurable_tdp;
1410 	u16 tdc;
1411 	u16 battery_power_limit;
1412 	u16 small_power_limit;
1413 	u16 low_cac_leakage;
1414 	u16 high_cac_leakage;
1415 	u16 maximum_power_delivery_limit;
1416 };
1417 
1418 struct amdgpu_dpm_dynamic_state {
1419 	struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_sclk;
1420 	struct amdgpu_clock_voltage_dependency_table vddci_dependency_on_mclk;
1421 	struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_mclk;
1422 	struct amdgpu_clock_voltage_dependency_table mvdd_dependency_on_mclk;
1423 	struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_dispclk;
1424 	struct amdgpu_uvd_clock_voltage_dependency_table uvd_clock_voltage_dependency_table;
1425 	struct amdgpu_vce_clock_voltage_dependency_table vce_clock_voltage_dependency_table;
1426 	struct amdgpu_clock_voltage_dependency_table samu_clock_voltage_dependency_table;
1427 	struct amdgpu_clock_voltage_dependency_table acp_clock_voltage_dependency_table;
1428 	struct amdgpu_clock_voltage_dependency_table vddgfx_dependency_on_sclk;
1429 	struct amdgpu_clock_array valid_sclk_values;
1430 	struct amdgpu_clock_array valid_mclk_values;
1431 	struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_dc;
1432 	struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_ac;
1433 	u32 mclk_sclk_ratio;
1434 	u32 sclk_mclk_delta;
1435 	u16 vddc_vddci_delta;
1436 	u16 min_vddc_for_pcie_gen2;
1437 	struct amdgpu_cac_leakage_table cac_leakage_table;
1438 	struct amdgpu_phase_shedding_limits_table phase_shedding_limits_table;
1439 	struct amdgpu_ppm_table *ppm_table;
1440 	struct amdgpu_cac_tdp_table *cac_tdp_table;
1441 };
1442 
1443 struct amdgpu_dpm_fan {
1444 	u16 t_min;
1445 	u16 t_med;
1446 	u16 t_high;
1447 	u16 pwm_min;
1448 	u16 pwm_med;
1449 	u16 pwm_high;
1450 	u8 t_hyst;
1451 	u32 cycle_delay;
1452 	u16 t_max;
1453 	u8 control_mode;
1454 	u16 default_max_fan_pwm;
1455 	u16 default_fan_output_sensitivity;
1456 	u16 fan_output_sensitivity;
1457 	bool ucode_fan_control;
1458 };
1459 
1460 enum amdgpu_pcie_gen {
1461 	AMDGPU_PCIE_GEN1 = 0,
1462 	AMDGPU_PCIE_GEN2 = 1,
1463 	AMDGPU_PCIE_GEN3 = 2,
1464 	AMDGPU_PCIE_GEN_INVALID = 0xffff
1465 };
1466 
1467 enum amdgpu_dpm_forced_level {
1468 	AMDGPU_DPM_FORCED_LEVEL_AUTO = 0,
1469 	AMDGPU_DPM_FORCED_LEVEL_LOW = 1,
1470 	AMDGPU_DPM_FORCED_LEVEL_HIGH = 2,
1471 	AMDGPU_DPM_FORCED_LEVEL_MANUAL = 3,
1472 };
1473 
1474 struct amdgpu_vce_state {
1475 	/* vce clocks */
1476 	u32 evclk;
1477 	u32 ecclk;
1478 	/* gpu clocks */
1479 	u32 sclk;
1480 	u32 mclk;
1481 	u8 clk_idx;
1482 	u8 pstate;
1483 };
1484 
1485 struct amdgpu_dpm_funcs {
1486 	int (*get_temperature)(struct amdgpu_device *adev);
1487 	int (*pre_set_power_state)(struct amdgpu_device *adev);
1488 	int (*set_power_state)(struct amdgpu_device *adev);
1489 	void (*post_set_power_state)(struct amdgpu_device *adev);
1490 	void (*display_configuration_changed)(struct amdgpu_device *adev);
1491 	u32 (*get_sclk)(struct amdgpu_device *adev, bool low);
1492 	u32 (*get_mclk)(struct amdgpu_device *adev, bool low);
1493 	void (*print_power_state)(struct amdgpu_device *adev, struct amdgpu_ps *ps);
1494 	void (*debugfs_print_current_performance_level)(struct amdgpu_device *adev, struct seq_file *m);
1495 	int (*force_performance_level)(struct amdgpu_device *adev, enum amdgpu_dpm_forced_level level);
1496 	bool (*vblank_too_short)(struct amdgpu_device *adev);
1497 	void (*powergate_uvd)(struct amdgpu_device *adev, bool gate);
1498 	void (*powergate_vce)(struct amdgpu_device *adev, bool gate);
1499 	void (*enable_bapm)(struct amdgpu_device *adev, bool enable);
1500 	void (*set_fan_control_mode)(struct amdgpu_device *adev, u32 mode);
1501 	u32 (*get_fan_control_mode)(struct amdgpu_device *adev);
1502 	int (*set_fan_speed_percent)(struct amdgpu_device *adev, u32 speed);
1503 	int (*get_fan_speed_percent)(struct amdgpu_device *adev, u32 *speed);
1504 };
1505 
1506 struct amdgpu_dpm {
1507 	struct amdgpu_ps        *ps;
1508 	/* number of valid power states */
1509 	int                     num_ps;
1510 	/* current power state that is active */
1511 	struct amdgpu_ps        *current_ps;
1512 	/* requested power state */
1513 	struct amdgpu_ps        *requested_ps;
1514 	/* boot up power state */
1515 	struct amdgpu_ps        *boot_ps;
1516 	/* default uvd power state */
1517 	struct amdgpu_ps        *uvd_ps;
1518 	/* vce requirements */
1519 	struct amdgpu_vce_state vce_states[AMDGPU_MAX_VCE_LEVELS];
1520 	enum amdgpu_vce_level vce_level;
1521 	enum amd_pm_state_type state;
1522 	enum amd_pm_state_type user_state;
1523 	u32                     platform_caps;
1524 	u32                     voltage_response_time;
1525 	u32                     backbias_response_time;
1526 	void                    *priv;
1527 	u32			new_active_crtcs;
1528 	int			new_active_crtc_count;
1529 	u32			current_active_crtcs;
1530 	int			current_active_crtc_count;
1531 	struct amdgpu_dpm_dynamic_state dyn_state;
1532 	struct amdgpu_dpm_fan fan;
1533 	u32 tdp_limit;
1534 	u32 near_tdp_limit;
1535 	u32 near_tdp_limit_adjusted;
1536 	u32 sq_ramping_threshold;
1537 	u32 cac_leakage;
1538 	u16 tdp_od_limit;
1539 	u32 tdp_adjustment;
1540 	u16 load_line_slope;
1541 	bool power_control;
1542 	bool ac_power;
1543 	/* special states active */
1544 	bool                    thermal_active;
1545 	bool                    uvd_active;
1546 	bool                    vce_active;
1547 	/* thermal handling */
1548 	struct amdgpu_dpm_thermal thermal;
1549 	/* forced levels */
1550 	enum amdgpu_dpm_forced_level forced_level;
1551 };
1552 
1553 struct amdgpu_pm {
1554 	struct mutex		mutex;
1555 	u32                     current_sclk;
1556 	u32                     current_mclk;
1557 	u32                     default_sclk;
1558 	u32                     default_mclk;
1559 	struct amdgpu_i2c_chan *i2c_bus;
1560 	/* internal thermal controller on rv6xx+ */
1561 	enum amdgpu_int_thermal_type int_thermal_type;
1562 	struct device	        *int_hwmon_dev;
1563 	/* fan control parameters */
1564 	bool                    no_fan;
1565 	u8                      fan_pulses_per_revolution;
1566 	u8                      fan_min_rpm;
1567 	u8                      fan_max_rpm;
1568 	/* dpm */
1569 	bool                    dpm_enabled;
1570 	bool                    sysfs_initialized;
1571 	struct amdgpu_dpm       dpm;
1572 	const struct firmware	*fw;	/* SMC firmware */
1573 	uint32_t                fw_version;
1574 	const struct amdgpu_dpm_funcs *funcs;
1575 	uint32_t                pcie_gen_mask;
1576 	uint32_t                pcie_mlw_mask;
1577 	struct amd_pp_display_configuration pm_display_cfg;/* set by DAL */
1578 };
1579 
1580 void amdgpu_get_pcie_info(struct amdgpu_device *adev);
1581 
1582 /*
1583  * UVD
1584  */
1585 #define AMDGPU_MAX_UVD_HANDLES	10
1586 #define AMDGPU_UVD_STACK_SIZE	(1024*1024)
1587 #define AMDGPU_UVD_HEAP_SIZE	(1024*1024)
1588 #define AMDGPU_UVD_FIRMWARE_OFFSET 256
1589 
1590 struct amdgpu_uvd {
1591 	struct amdgpu_bo	*vcpu_bo;
1592 	void			*cpu_addr;
1593 	uint64_t		gpu_addr;
1594 	atomic_t		handles[AMDGPU_MAX_UVD_HANDLES];
1595 	struct drm_file		*filp[AMDGPU_MAX_UVD_HANDLES];
1596 	struct delayed_work	idle_work;
1597 	const struct firmware	*fw;	/* UVD firmware */
1598 	struct amdgpu_ring	ring;
1599 	struct amdgpu_irq_src	irq;
1600 	bool			address_64_bit;
1601 	struct amd_sched_entity entity;
1602 };
1603 
1604 /*
1605  * VCE
1606  */
1607 #define AMDGPU_MAX_VCE_HANDLES	16
1608 #define AMDGPU_VCE_FIRMWARE_OFFSET 256
1609 
1610 #define AMDGPU_VCE_HARVEST_VCE0 (1 << 0)
1611 #define AMDGPU_VCE_HARVEST_VCE1 (1 << 1)
1612 
1613 struct amdgpu_vce {
1614 	struct amdgpu_bo	*vcpu_bo;
1615 	uint64_t		gpu_addr;
1616 	unsigned		fw_version;
1617 	unsigned		fb_version;
1618 	atomic_t		handles[AMDGPU_MAX_VCE_HANDLES];
1619 	struct drm_file		*filp[AMDGPU_MAX_VCE_HANDLES];
1620 	uint32_t		img_size[AMDGPU_MAX_VCE_HANDLES];
1621 	struct delayed_work	idle_work;
1622 	const struct firmware	*fw;	/* VCE firmware */
1623 	struct amdgpu_ring	ring[AMDGPU_MAX_VCE_RINGS];
1624 	struct amdgpu_irq_src	irq;
1625 	unsigned		harvest_config;
1626 	struct amd_sched_entity	entity;
1627 };
1628 
1629 /*
1630  * SDMA
1631  */
1632 struct amdgpu_sdma_instance {
1633 	/* SDMA firmware */
1634 	const struct firmware	*fw;
1635 	uint32_t		fw_version;
1636 	uint32_t		feature_version;
1637 
1638 	struct amdgpu_ring	ring;
1639 	bool			burst_nop;
1640 };
1641 
1642 struct amdgpu_sdma {
1643 	struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
1644 	struct amdgpu_irq_src	trap_irq;
1645 	struct amdgpu_irq_src	illegal_inst_irq;
1646 	int 			num_instances;
1647 };
1648 
1649 /*
1650  * Firmware
1651  */
1652 struct amdgpu_firmware {
1653 	struct amdgpu_firmware_info ucode[AMDGPU_UCODE_ID_MAXIMUM];
1654 	bool smu_load;
1655 	struct amdgpu_bo *fw_buf;
1656 	unsigned int fw_size;
1657 };
1658 
1659 /*
1660  * Benchmarking
1661  */
1662 void amdgpu_benchmark(struct amdgpu_device *adev, int test_number);
1663 
1664 
1665 /*
1666  * Testing
1667  */
1668 void amdgpu_test_moves(struct amdgpu_device *adev);
1669 void amdgpu_test_ring_sync(struct amdgpu_device *adev,
1670 			   struct amdgpu_ring *cpA,
1671 			   struct amdgpu_ring *cpB);
1672 void amdgpu_test_syncing(struct amdgpu_device *adev);
1673 
1674 /*
1675  * MMU Notifier
1676  */
1677 #if defined(CONFIG_MMU_NOTIFIER)
1678 int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr);
1679 void amdgpu_mn_unregister(struct amdgpu_bo *bo);
1680 #else
1681 static inline int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
1682 {
1683 	return -ENODEV;
1684 }
1685 static inline void amdgpu_mn_unregister(struct amdgpu_bo *bo) {}
1686 #endif
1687 
1688 /*
1689  * Debugfs
1690  */
1691 struct amdgpu_debugfs {
1692 	struct drm_info_list	*files;
1693 	unsigned		num_files;
1694 };
1695 
1696 int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
1697 			     struct drm_info_list *files,
1698 			     unsigned nfiles);
1699 int amdgpu_debugfs_fence_init(struct amdgpu_device *adev);
1700 
1701 #if defined(CONFIG_DEBUG_FS)
1702 int amdgpu_debugfs_init(struct drm_minor *minor);
1703 void amdgpu_debugfs_cleanup(struct drm_minor *minor);
1704 #endif
1705 
1706 /*
1707  * amdgpu smumgr functions
1708  */
1709 struct amdgpu_smumgr_funcs {
1710 	int (*check_fw_load_finish)(struct amdgpu_device *adev, uint32_t fwtype);
1711 	int (*request_smu_load_fw)(struct amdgpu_device *adev);
1712 	int (*request_smu_specific_fw)(struct amdgpu_device *adev, uint32_t fwtype);
1713 };
1714 
1715 /*
1716  * amdgpu smumgr
1717  */
1718 struct amdgpu_smumgr {
1719 	struct amdgpu_bo *toc_buf;
1720 	struct amdgpu_bo *smu_buf;
1721 	/* asic priv smu data */
1722 	void *priv;
1723 	spinlock_t smu_lock;
1724 	/* smumgr functions */
1725 	const struct amdgpu_smumgr_funcs *smumgr_funcs;
1726 	/* ucode loading complete flag */
1727 	uint32_t fw_flags;
1728 };
1729 
1730 /*
1731  * ASIC specific register table accessible by UMD
1732  */
1733 struct amdgpu_allowed_register_entry {
1734 	uint32_t reg_offset;
1735 	bool untouched;
1736 	bool grbm_indexed;
1737 };
1738 
1739 struct amdgpu_cu_info {
1740 	uint32_t number; /* total active CU number */
1741 	uint32_t ao_cu_mask;
1742 	uint32_t bitmap[4][4];
1743 };
1744 
1745 
1746 /*
1747  * ASIC specific functions.
1748  */
1749 struct amdgpu_asic_funcs {
1750 	bool (*read_disabled_bios)(struct amdgpu_device *adev);
1751 	bool (*read_bios_from_rom)(struct amdgpu_device *adev,
1752 				   u8 *bios, u32 length_bytes);
1753 	int (*read_register)(struct amdgpu_device *adev, u32 se_num,
1754 			     u32 sh_num, u32 reg_offset, u32 *value);
1755 	void (*set_vga_state)(struct amdgpu_device *adev, bool state);
1756 	int (*reset)(struct amdgpu_device *adev);
1757 	/* wait for mc_idle */
1758 	int (*wait_for_mc_idle)(struct amdgpu_device *adev);
1759 	/* get the reference clock */
1760 	u32 (*get_xclk)(struct amdgpu_device *adev);
1761 	/* get the gpu clock counter */
1762 	uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev);
1763 	int (*get_cu_info)(struct amdgpu_device *adev, struct amdgpu_cu_info *info);
1764 	/* MM block clocks */
1765 	int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk);
1766 	int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk);
1767 };
1768 
1769 /*
1770  * IOCTL.
1771  */
1772 int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
1773 			    struct drm_file *filp);
1774 int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
1775 				struct drm_file *filp);
1776 
1777 int amdgpu_gem_info_ioctl(struct drm_device *dev, void *data,
1778 			  struct drm_file *filp);
1779 int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
1780 			struct drm_file *filp);
1781 int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
1782 			  struct drm_file *filp);
1783 int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
1784 			      struct drm_file *filp);
1785 int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
1786 			  struct drm_file *filp);
1787 int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
1788 			struct drm_file *filp);
1789 int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
1790 int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
1791 
1792 int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
1793 				struct drm_file *filp);
1794 
1795 /* VRAM scratch page for HDP bug, default vram page */
1796 struct amdgpu_vram_scratch {
1797 	struct amdgpu_bo		*robj;
1798 	volatile uint32_t		*ptr;
1799 	u64				gpu_addr;
1800 };
1801 
1802 /*
1803  * ACPI
1804  */
1805 struct amdgpu_atif_notification_cfg {
1806 	bool enabled;
1807 	int command_code;
1808 };
1809 
1810 struct amdgpu_atif_notifications {
1811 	bool display_switch;
1812 	bool expansion_mode_change;
1813 	bool thermal_state;
1814 	bool forced_power_state;
1815 	bool system_power_state;
1816 	bool display_conf_change;
1817 	bool px_gfx_switch;
1818 	bool brightness_change;
1819 	bool dgpu_display_event;
1820 };
1821 
1822 struct amdgpu_atif_functions {
1823 	bool system_params;
1824 	bool sbios_requests;
1825 	bool select_active_disp;
1826 	bool lid_state;
1827 	bool get_tv_standard;
1828 	bool set_tv_standard;
1829 	bool get_panel_expansion_mode;
1830 	bool set_panel_expansion_mode;
1831 	bool temperature_change;
1832 	bool graphics_device_types;
1833 };
1834 
1835 struct amdgpu_atif {
1836 	struct amdgpu_atif_notifications notifications;
1837 	struct amdgpu_atif_functions functions;
1838 	struct amdgpu_atif_notification_cfg notification_cfg;
1839 	struct amdgpu_encoder *encoder_for_bl;
1840 };
1841 
1842 struct amdgpu_atcs_functions {
1843 	bool get_ext_state;
1844 	bool pcie_perf_req;
1845 	bool pcie_dev_rdy;
1846 	bool pcie_bus_width;
1847 };
1848 
1849 struct amdgpu_atcs {
1850 	struct amdgpu_atcs_functions functions;
1851 };
1852 
1853 /*
1854  * CGS
1855  */
1856 void *amdgpu_cgs_create_device(struct amdgpu_device *adev);
1857 void amdgpu_cgs_destroy_device(void *cgs_device);
1858 
1859 
1860 /*
1861  * CGS
1862  */
1863 void *amdgpu_cgs_create_device(struct amdgpu_device *adev);
1864 void amdgpu_cgs_destroy_device(void *cgs_device);
1865 
1866 
1867 /* GPU virtualization */
1868 struct amdgpu_virtualization {
1869 	bool supports_sr_iov;
1870 };
1871 
1872 /*
1873  * Core structure, functions and helpers.
1874  */
1875 typedef uint32_t (*amdgpu_rreg_t)(struct amdgpu_device*, uint32_t);
1876 typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
1877 
1878 typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
1879 typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
1880 
1881 struct amdgpu_ip_block_status {
1882 	bool valid;
1883 	bool sw;
1884 	bool hw;
1885 };
1886 
1887 struct amdgpu_device {
1888 	struct device			*dev;
1889 	struct drm_device		*ddev;
1890 	struct pci_dev			*pdev;
1891 
1892 #ifdef CONFIG_DRM_AMD_ACP
1893 	struct amdgpu_acp		acp;
1894 #endif
1895 
1896 	/* ASIC */
1897 	enum amd_asic_type		asic_type;
1898 	uint32_t			family;
1899 	uint32_t			rev_id;
1900 	uint32_t			external_rev_id;
1901 	unsigned long			flags;
1902 	int				usec_timeout;
1903 	const struct amdgpu_asic_funcs	*asic_funcs;
1904 	bool				shutdown;
1905 	bool				suspend;
1906 	bool				need_dma32;
1907 	bool				accel_working;
1908 	struct work_struct 		reset_work;
1909 	struct notifier_block		acpi_nb;
1910 	struct amdgpu_i2c_chan		*i2c_bus[AMDGPU_MAX_I2C_BUS];
1911 	struct amdgpu_debugfs		debugfs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
1912 	unsigned 			debugfs_count;
1913 #if defined(CONFIG_DEBUG_FS)
1914 	struct dentry			*debugfs_regs;
1915 #endif
1916 	struct amdgpu_atif		atif;
1917 	struct amdgpu_atcs		atcs;
1918 	struct mutex			srbm_mutex;
1919 	/* GRBM index mutex. Protects concurrent access to GRBM index */
1920 	struct mutex                    grbm_idx_mutex;
1921 	struct dev_pm_domain		vga_pm_domain;
1922 	bool				have_disp_power_ref;
1923 
1924 	/* BIOS */
1925 	uint8_t				*bios;
1926 	bool				is_atom_bios;
1927 	uint16_t			bios_header_start;
1928 	struct amdgpu_bo		*stollen_vga_memory;
1929 	uint32_t			bios_scratch[AMDGPU_BIOS_NUM_SCRATCH];
1930 
1931 	/* Register/doorbell mmio */
1932 	resource_size_t			rmmio_base;
1933 	resource_size_t			rmmio_size;
1934 	void __iomem			*rmmio;
1935 	/* protects concurrent MM_INDEX/DATA based register access */
1936 	spinlock_t mmio_idx_lock;
1937 	/* protects concurrent SMC based register access */
1938 	spinlock_t smc_idx_lock;
1939 	amdgpu_rreg_t			smc_rreg;
1940 	amdgpu_wreg_t			smc_wreg;
1941 	/* protects concurrent PCIE register access */
1942 	spinlock_t pcie_idx_lock;
1943 	amdgpu_rreg_t			pcie_rreg;
1944 	amdgpu_wreg_t			pcie_wreg;
1945 	/* protects concurrent UVD register access */
1946 	spinlock_t uvd_ctx_idx_lock;
1947 	amdgpu_rreg_t			uvd_ctx_rreg;
1948 	amdgpu_wreg_t			uvd_ctx_wreg;
1949 	/* protects concurrent DIDT register access */
1950 	spinlock_t didt_idx_lock;
1951 	amdgpu_rreg_t			didt_rreg;
1952 	amdgpu_wreg_t			didt_wreg;
1953 	/* protects concurrent ENDPOINT (audio) register access */
1954 	spinlock_t audio_endpt_idx_lock;
1955 	amdgpu_block_rreg_t		audio_endpt_rreg;
1956 	amdgpu_block_wreg_t		audio_endpt_wreg;
1957 	void __iomem                    *rio_mem;
1958 	resource_size_t			rio_mem_size;
1959 	struct amdgpu_doorbell		doorbell;
1960 
1961 	/* clock/pll info */
1962 	struct amdgpu_clock            clock;
1963 
1964 	/* MC */
1965 	struct amdgpu_mc		mc;
1966 	struct amdgpu_gart		gart;
1967 	struct amdgpu_dummy_page	dummy_page;
1968 	struct amdgpu_vm_manager	vm_manager;
1969 
1970 	/* memory management */
1971 	struct amdgpu_mman		mman;
1972 	struct amdgpu_vram_scratch	vram_scratch;
1973 	struct amdgpu_wb		wb;
1974 	atomic64_t			vram_usage;
1975 	atomic64_t			vram_vis_usage;
1976 	atomic64_t			gtt_usage;
1977 	atomic64_t			num_bytes_moved;
1978 	atomic_t			gpu_reset_counter;
1979 
1980 	/* display */
1981 	struct amdgpu_mode_info		mode_info;
1982 	struct work_struct		hotplug_work;
1983 	struct amdgpu_irq_src		crtc_irq;
1984 	struct amdgpu_irq_src		pageflip_irq;
1985 	struct amdgpu_irq_src		hpd_irq;
1986 
1987 	/* rings */
1988 	unsigned			fence_context;
1989 	unsigned			num_rings;
1990 	struct amdgpu_ring		*rings[AMDGPU_MAX_RINGS];
1991 	bool				ib_pool_ready;
1992 	struct amdgpu_sa_manager	ring_tmp_bo;
1993 
1994 	/* interrupts */
1995 	struct amdgpu_irq		irq;
1996 
1997 	/* powerplay */
1998 	struct amd_powerplay		powerplay;
1999 	bool				pp_enabled;
2000 	bool				pp_force_state_enabled;
2001 
2002 	/* dpm */
2003 	struct amdgpu_pm		pm;
2004 	u32				cg_flags;
2005 	u32				pg_flags;
2006 
2007 	/* amdgpu smumgr */
2008 	struct amdgpu_smumgr smu;
2009 
2010 	/* gfx */
2011 	struct amdgpu_gfx		gfx;
2012 
2013 	/* sdma */
2014 	struct amdgpu_sdma		sdma;
2015 
2016 	/* uvd */
2017 	struct amdgpu_uvd		uvd;
2018 
2019 	/* vce */
2020 	struct amdgpu_vce		vce;
2021 
2022 	/* firmwares */
2023 	struct amdgpu_firmware		firmware;
2024 
2025 	/* GDS */
2026 	struct amdgpu_gds		gds;
2027 
2028 	const struct amdgpu_ip_block_version *ip_blocks;
2029 	int				num_ip_blocks;
2030 	struct amdgpu_ip_block_status	*ip_block_status;
2031 	struct mutex	mn_lock;
2032 	DECLARE_HASHTABLE(mn_hash, 7);
2033 
2034 	/* tracking pinned memory */
2035 	u64 vram_pin_size;
2036 	u64 gart_pin_size;
2037 
2038 	/* amdkfd interface */
2039 	struct kfd_dev          *kfd;
2040 
2041 	struct amdgpu_virtualization virtualization;
2042 };
2043 
2044 bool amdgpu_device_is_px(struct drm_device *dev);
2045 int amdgpu_device_init(struct amdgpu_device *adev,
2046 		       struct drm_device *ddev,
2047 		       struct pci_dev *pdev,
2048 		       uint32_t flags);
2049 void amdgpu_device_fini(struct amdgpu_device *adev);
2050 int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev);
2051 
2052 uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
2053 			bool always_indirect);
2054 void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
2055 		    bool always_indirect);
2056 u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg);
2057 void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v);
2058 
2059 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index);
2060 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v);
2061 
2062 /*
2063  * Registers read & write functions.
2064  */
2065 #define RREG32(reg) amdgpu_mm_rreg(adev, (reg), false)
2066 #define RREG32_IDX(reg) amdgpu_mm_rreg(adev, (reg), true)
2067 #define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_mm_rreg(adev, (reg), false))
2068 #define WREG32(reg, v) amdgpu_mm_wreg(adev, (reg), (v), false)
2069 #define WREG32_IDX(reg, v) amdgpu_mm_wreg(adev, (reg), (v), true)
2070 #define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
2071 #define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
2072 #define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg))
2073 #define WREG32_PCIE(reg, v) adev->pcie_wreg(adev, (reg), (v))
2074 #define RREG32_SMC(reg) adev->smc_rreg(adev, (reg))
2075 #define WREG32_SMC(reg, v) adev->smc_wreg(adev, (reg), (v))
2076 #define RREG32_UVD_CTX(reg) adev->uvd_ctx_rreg(adev, (reg))
2077 #define WREG32_UVD_CTX(reg, v) adev->uvd_ctx_wreg(adev, (reg), (v))
2078 #define RREG32_DIDT(reg) adev->didt_rreg(adev, (reg))
2079 #define WREG32_DIDT(reg, v) adev->didt_wreg(adev, (reg), (v))
2080 #define RREG32_AUDIO_ENDPT(block, reg) adev->audio_endpt_rreg(adev, (block), (reg))
2081 #define WREG32_AUDIO_ENDPT(block, reg, v) adev->audio_endpt_wreg(adev, (block), (reg), (v))
2082 #define WREG32_P(reg, val, mask)				\
2083 	do {							\
2084 		uint32_t tmp_ = RREG32(reg);			\
2085 		tmp_ &= (mask);					\
2086 		tmp_ |= ((val) & ~(mask));			\
2087 		WREG32(reg, tmp_);				\
2088 	} while (0)
2089 #define WREG32_AND(reg, and) WREG32_P(reg, 0, and)
2090 #define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or))
2091 #define WREG32_PLL_P(reg, val, mask)				\
2092 	do {							\
2093 		uint32_t tmp_ = RREG32_PLL(reg);		\
2094 		tmp_ &= (mask);					\
2095 		tmp_ |= ((val) & ~(mask));			\
2096 		WREG32_PLL(reg, tmp_);				\
2097 	} while (0)
2098 #define DREG32_SYS(sqf, adev, reg) seq_printf((sqf), #reg " : 0x%08X\n", amdgpu_mm_rreg((adev), (reg), false))
2099 #define RREG32_IO(reg) amdgpu_io_rreg(adev, (reg))
2100 #define WREG32_IO(reg, v) amdgpu_io_wreg(adev, (reg), (v))
2101 
2102 #define RDOORBELL32(index) amdgpu_mm_rdoorbell(adev, (index))
2103 #define WDOORBELL32(index, v) amdgpu_mm_wdoorbell(adev, (index), (v))
2104 
2105 #define REG_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
2106 #define REG_FIELD_MASK(reg, field) reg##__##field##_MASK
2107 
2108 #define REG_SET_FIELD(orig_val, reg, field, field_val)			\
2109 	(((orig_val) & ~REG_FIELD_MASK(reg, field)) |			\
2110 	 (REG_FIELD_MASK(reg, field) & ((field_val) << REG_FIELD_SHIFT(reg, field))))
2111 
2112 #define REG_GET_FIELD(value, reg, field)				\
2113 	(((value) & REG_FIELD_MASK(reg, field)) >> REG_FIELD_SHIFT(reg, field))
2114 
2115 /*
2116  * BIOS helpers.
2117  */
2118 #define RBIOS8(i) (adev->bios[i])
2119 #define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8))
2120 #define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16))
2121 
2122 /*
2123  * RING helpers.
2124  */
2125 static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)
2126 {
2127 	if (ring->count_dw <= 0)
2128 		DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n");
2129 	ring->ring[ring->wptr++] = v;
2130 	ring->wptr &= ring->ptr_mask;
2131 	ring->count_dw--;
2132 }
2133 
2134 static inline struct amdgpu_sdma_instance *
2135 amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
2136 {
2137 	struct amdgpu_device *adev = ring->adev;
2138 	int i;
2139 
2140 	for (i = 0; i < adev->sdma.num_instances; i++)
2141 		if (&adev->sdma.instance[i].ring == ring)
2142 			break;
2143 
2144 	if (i < AMDGPU_MAX_SDMA_INSTANCES)
2145 		return &adev->sdma.instance[i];
2146 	else
2147 		return NULL;
2148 }
2149 
2150 /*
2151  * ASICs macro.
2152  */
2153 #define amdgpu_asic_set_vga_state(adev, state) (adev)->asic_funcs->set_vga_state((adev), (state))
2154 #define amdgpu_asic_reset(adev) (adev)->asic_funcs->reset((adev))
2155 #define amdgpu_asic_wait_for_mc_idle(adev) (adev)->asic_funcs->wait_for_mc_idle((adev))
2156 #define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev))
2157 #define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d))
2158 #define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec))
2159 #define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev))
2160 #define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev))
2161 #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
2162 #define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
2163 #define amdgpu_asic_get_cu_info(adev, info) (adev)->asic_funcs->get_cu_info((adev), (info))
2164 #define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid))
2165 #define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
2166 #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
2167 #define amdgpu_vm_write_pte(adev, ib, pa, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pa), (pe), (addr), (count), (incr), (flags)))
2168 #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
2169 #define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
2170 #define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
2171 #define amdgpu_ring_test_ib(r) (r)->funcs->test_ib((r))
2172 #define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
2173 #define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
2174 #define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
2175 #define amdgpu_ring_emit_ib(r, ib) (r)->funcs->emit_ib((r), (ib))
2176 #define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r))
2177 #define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr))
2178 #define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags))
2179 #define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as))
2180 #define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r))
2181 #define amdgpu_ring_emit_hdp_invalidate(r) (r)->funcs->emit_hdp_invalidate((r))
2182 #define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
2183 #define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev))
2184 #define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
2185 #define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
2186 #define amdgpu_display_set_vga_render_state(adev, r) (adev)->mode_info.funcs->set_vga_render_state((adev), (r))
2187 #define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc))
2188 #define amdgpu_display_vblank_wait(adev, crtc) (adev)->mode_info.funcs->vblank_wait((adev), (crtc))
2189 #define amdgpu_display_is_display_hung(adev) (adev)->mode_info.funcs->is_display_hung((adev))
2190 #define amdgpu_display_backlight_set_level(adev, e, l) (adev)->mode_info.funcs->backlight_set_level((e), (l))
2191 #define amdgpu_display_backlight_get_level(adev, e) (adev)->mode_info.funcs->backlight_get_level((e))
2192 #define amdgpu_display_hpd_sense(adev, h) (adev)->mode_info.funcs->hpd_sense((adev), (h))
2193 #define amdgpu_display_hpd_set_polarity(adev, h) (adev)->mode_info.funcs->hpd_set_polarity((adev), (h))
2194 #define amdgpu_display_hpd_get_gpio_reg(adev) (adev)->mode_info.funcs->hpd_get_gpio_reg((adev))
2195 #define amdgpu_display_bandwidth_update(adev) (adev)->mode_info.funcs->bandwidth_update((adev))
2196 #define amdgpu_display_page_flip(adev, crtc, base) (adev)->mode_info.funcs->page_flip((adev), (crtc), (base))
2197 #define amdgpu_display_page_flip_get_scanoutpos(adev, crtc, vbl, pos) (adev)->mode_info.funcs->page_flip_get_scanoutpos((adev), (crtc), (vbl), (pos))
2198 #define amdgpu_display_add_encoder(adev, e, s, c) (adev)->mode_info.funcs->add_encoder((adev), (e), (s), (c))
2199 #define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r))
2200 #define amdgpu_display_stop_mc_access(adev, s) (adev)->mode_info.funcs->stop_mc_access((adev), (s))
2201 #define amdgpu_display_resume_mc_access(adev, s) (adev)->mode_info.funcs->resume_mc_access((adev), (s))
2202 #define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib),  (s), (d), (b))
2203 #define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b))
2204 #define amdgpu_dpm_pre_set_power_state(adev) (adev)->pm.funcs->pre_set_power_state((adev))
2205 #define amdgpu_dpm_set_power_state(adev) (adev)->pm.funcs->set_power_state((adev))
2206 #define amdgpu_dpm_post_set_power_state(adev) (adev)->pm.funcs->post_set_power_state((adev))
2207 #define amdgpu_dpm_display_configuration_changed(adev) (adev)->pm.funcs->display_configuration_changed((adev))
2208 #define amdgpu_dpm_print_power_state(adev, ps) (adev)->pm.funcs->print_power_state((adev), (ps))
2209 #define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev))
2210 #define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e))
2211 
2212 #define amdgpu_dpm_get_temperature(adev) \
2213 	((adev)->pp_enabled ?						\
2214 	      (adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle) : \
2215 	      (adev)->pm.funcs->get_temperature((adev)))
2216 
2217 #define amdgpu_dpm_set_fan_control_mode(adev, m) \
2218 	((adev)->pp_enabled ?						\
2219 	      (adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)) : \
2220 	      (adev)->pm.funcs->set_fan_control_mode((adev), (m)))
2221 
2222 #define amdgpu_dpm_get_fan_control_mode(adev) \
2223 	((adev)->pp_enabled ?						\
2224 	      (adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle) : \
2225 	      (adev)->pm.funcs->get_fan_control_mode((adev)))
2226 
2227 #define amdgpu_dpm_set_fan_speed_percent(adev, s) \
2228 	((adev)->pp_enabled ?						\
2229 	      (adev)->powerplay.pp_funcs->set_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
2230 	      (adev)->pm.funcs->set_fan_speed_percent((adev), (s)))
2231 
2232 #define amdgpu_dpm_get_fan_speed_percent(adev, s) \
2233 	((adev)->pp_enabled ?						\
2234 	      (adev)->powerplay.pp_funcs->get_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
2235 	      (adev)->pm.funcs->get_fan_speed_percent((adev), (s)))
2236 
2237 #define amdgpu_dpm_get_sclk(adev, l) \
2238 	((adev)->pp_enabled ?						\
2239 	      (adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (l)) : \
2240 		(adev)->pm.funcs->get_sclk((adev), (l)))
2241 
2242 #define amdgpu_dpm_get_mclk(adev, l)  \
2243 	((adev)->pp_enabled ?						\
2244 	      (adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (l)) : \
2245 	      (adev)->pm.funcs->get_mclk((adev), (l)))
2246 
2247 
2248 #define amdgpu_dpm_force_performance_level(adev, l) \
2249 	((adev)->pp_enabled ?						\
2250 	      (adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)) : \
2251 	      (adev)->pm.funcs->force_performance_level((adev), (l)))
2252 
2253 #define amdgpu_dpm_powergate_uvd(adev, g) \
2254 	((adev)->pp_enabled ?						\
2255 	      (adev)->powerplay.pp_funcs->powergate_uvd((adev)->powerplay.pp_handle, (g)) : \
2256 	      (adev)->pm.funcs->powergate_uvd((adev), (g)))
2257 
2258 #define amdgpu_dpm_powergate_vce(adev, g) \
2259 	((adev)->pp_enabled ?						\
2260 	      (adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)) : \
2261 	      (adev)->pm.funcs->powergate_vce((adev), (g)))
2262 
2263 #define amdgpu_dpm_debugfs_print_current_performance_level(adev, m) \
2264 	((adev)->pp_enabled ?						\
2265 	      (adev)->powerplay.pp_funcs->print_current_performance_level((adev)->powerplay.pp_handle, (m)) : \
2266 	      (adev)->pm.funcs->debugfs_print_current_performance_level((adev), (m)))
2267 
2268 #define amdgpu_dpm_get_current_power_state(adev) \
2269 	(adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle)
2270 
2271 #define amdgpu_dpm_get_performance_level(adev) \
2272 	(adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle)
2273 
2274 #define amdgpu_dpm_get_pp_num_states(adev, data) \
2275 	(adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data)
2276 
2277 #define amdgpu_dpm_get_pp_table(adev, table) \
2278 	(adev)->powerplay.pp_funcs->get_pp_table((adev)->powerplay.pp_handle, table)
2279 
2280 #define amdgpu_dpm_set_pp_table(adev, buf, size) \
2281 	(adev)->powerplay.pp_funcs->set_pp_table((adev)->powerplay.pp_handle, buf, size)
2282 
2283 #define amdgpu_dpm_print_clock_levels(adev, type, buf) \
2284 	(adev)->powerplay.pp_funcs->print_clock_levels((adev)->powerplay.pp_handle, type, buf)
2285 
2286 #define amdgpu_dpm_force_clock_level(adev, type, level) \
2287 		(adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level)
2288 
2289 #define amdgpu_dpm_dispatch_task(adev, event_id, input, output)		\
2290 	(adev)->powerplay.pp_funcs->dispatch_tasks((adev)->powerplay.pp_handle, (event_id), (input), (output))
2291 
2292 #define amdgpu_gds_switch(adev, r, v, d, w, a) (adev)->gds.funcs->patch_gds_switch((r), (v), (d), (w), (a))
2293 
2294 /* Common functions */
2295 int amdgpu_gpu_reset(struct amdgpu_device *adev);
2296 void amdgpu_pci_config_reset(struct amdgpu_device *adev);
2297 bool amdgpu_card_posted(struct amdgpu_device *adev);
2298 void amdgpu_update_display_priority(struct amdgpu_device *adev);
2299 
2300 int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data);
2301 int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
2302 		       u32 ip_instance, u32 ring,
2303 		       struct amdgpu_ring **out_ring);
2304 void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain);
2305 bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
2306 int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages);
2307 int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
2308 				     uint32_t flags);
2309 bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
2310 struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm);
2311 bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
2312 				  unsigned long end);
2313 bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
2314 				       int *last_invalidated);
2315 bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
2316 uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
2317 				 struct ttm_mem_reg *mem);
2318 void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base);
2319 void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
2320 void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size);
2321 void amdgpu_program_register_sequence(struct amdgpu_device *adev,
2322 					     const u32 *registers,
2323 					     const u32 array_size);
2324 
2325 bool amdgpu_device_is_px(struct drm_device *dev);
2326 /* atpx handler */
2327 #if defined(CONFIG_VGA_SWITCHEROO)
2328 void amdgpu_register_atpx_handler(void);
2329 void amdgpu_unregister_atpx_handler(void);
2330 #else
2331 static inline void amdgpu_register_atpx_handler(void) {}
2332 static inline void amdgpu_unregister_atpx_handler(void) {}
2333 #endif
2334 
2335 /*
2336  * KMS
2337  */
2338 extern const struct drm_ioctl_desc amdgpu_ioctls_kms[];
2339 extern int amdgpu_max_kms_ioctl;
2340 
2341 int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags);
2342 int amdgpu_driver_unload_kms(struct drm_device *dev);
2343 void amdgpu_driver_lastclose_kms(struct drm_device *dev);
2344 int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv);
2345 void amdgpu_driver_postclose_kms(struct drm_device *dev,
2346 				 struct drm_file *file_priv);
2347 void amdgpu_driver_preclose_kms(struct drm_device *dev,
2348 				struct drm_file *file_priv);
2349 int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon);
2350 int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
2351 u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe);
2352 int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe);
2353 void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe);
2354 int amdgpu_get_vblank_timestamp_kms(struct drm_device *dev, unsigned int pipe,
2355 				    int *max_error,
2356 				    struct timeval *vblank_time,
2357 				    unsigned flags);
2358 long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd,
2359 			     unsigned long arg);
2360 
2361 /*
2362  * functions used by amdgpu_encoder.c
2363  */
2364 struct amdgpu_afmt_acr {
2365 	u32 clock;
2366 
2367 	int n_32khz;
2368 	int cts_32khz;
2369 
2370 	int n_44_1khz;
2371 	int cts_44_1khz;
2372 
2373 	int n_48khz;
2374 	int cts_48khz;
2375 
2376 };
2377 
2378 struct amdgpu_afmt_acr amdgpu_afmt_acr(uint32_t clock);
2379 
2380 /* amdgpu_acpi.c */
2381 #if defined(CONFIG_ACPI)
2382 int amdgpu_acpi_init(struct amdgpu_device *adev);
2383 void amdgpu_acpi_fini(struct amdgpu_device *adev);
2384 bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *adev);
2385 int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
2386 						u8 perf_req, bool advertise);
2387 int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
2388 #else
2389 static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
2390 static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
2391 #endif
2392 
2393 struct amdgpu_bo_va_mapping *
2394 amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
2395 		       uint64_t addr, struct amdgpu_bo **bo);
2396 
2397 #include "amdgpu_object.h"
2398 
2399 #endif
2400