xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/amdgpu.h (revision 0984d159)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #ifndef __AMDGPU_H__
29 #define __AMDGPU_H__
30 
31 #include <linux/atomic.h>
32 #include <linux/wait.h>
33 #include <linux/list.h>
34 #include <linux/kref.h>
35 #include <linux/interval_tree.h>
36 #include <linux/hashtable.h>
37 #include <linux/fence.h>
38 
39 #include <ttm/ttm_bo_api.h>
40 #include <ttm/ttm_bo_driver.h>
41 #include <ttm/ttm_placement.h>
42 #include <ttm/ttm_module.h>
43 #include <ttm/ttm_execbuf_util.h>
44 
45 #include <drm/drmP.h>
46 #include <drm/drm_gem.h>
47 #include <drm/amdgpu_drm.h>
48 
49 #include "amd_shared.h"
50 #include "amdgpu_mode.h"
51 #include "amdgpu_ih.h"
52 #include "amdgpu_irq.h"
53 #include "amdgpu_ucode.h"
54 #include "amdgpu_gds.h"
55 #include "amd_powerplay.h"
56 #include "amdgpu_acp.h"
57 
58 #include "gpu_scheduler.h"
59 
60 /*
61  * Modules parameters.
62  */
63 extern int amdgpu_modeset;
64 extern int amdgpu_vram_limit;
65 extern int amdgpu_gart_size;
66 extern int amdgpu_benchmarking;
67 extern int amdgpu_testing;
68 extern int amdgpu_audio;
69 extern int amdgpu_disp_priority;
70 extern int amdgpu_hw_i2c;
71 extern int amdgpu_pcie_gen2;
72 extern int amdgpu_msi;
73 extern int amdgpu_lockup_timeout;
74 extern int amdgpu_dpm;
75 extern int amdgpu_smc_load_fw;
76 extern int amdgpu_aspm;
77 extern int amdgpu_runtime_pm;
78 extern unsigned amdgpu_ip_block_mask;
79 extern int amdgpu_bapm;
80 extern int amdgpu_deep_color;
81 extern int amdgpu_vm_size;
82 extern int amdgpu_vm_block_size;
83 extern int amdgpu_vm_fault_stop;
84 extern int amdgpu_vm_debug;
85 extern int amdgpu_sched_jobs;
86 extern int amdgpu_sched_hw_submission;
87 extern int amdgpu_powerplay;
88 extern unsigned amdgpu_pcie_gen_cap;
89 extern unsigned amdgpu_pcie_lane_cap;
90 
91 #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS	        3000
92 #define AMDGPU_MAX_USEC_TIMEOUT			100000	/* 100 ms */
93 #define AMDGPU_FENCE_JIFFIES_TIMEOUT		(HZ / 2)
94 /* AMDGPU_IB_POOL_SIZE must be a power of 2 */
95 #define AMDGPU_IB_POOL_SIZE			16
96 #define AMDGPU_DEBUGFS_MAX_COMPONENTS		32
97 #define AMDGPUFB_CONN_LIMIT			4
98 #define AMDGPU_BIOS_NUM_SCRATCH			8
99 
100 /* max number of rings */
101 #define AMDGPU_MAX_RINGS			16
102 #define AMDGPU_MAX_GFX_RINGS			1
103 #define AMDGPU_MAX_COMPUTE_RINGS		8
104 #define AMDGPU_MAX_VCE_RINGS			2
105 
106 /* max number of IP instances */
107 #define AMDGPU_MAX_SDMA_INSTANCES		2
108 
109 /* hardcode that limit for now */
110 #define AMDGPU_VA_RESERVED_SIZE			(8 << 20)
111 
112 /* hard reset data */
113 #define AMDGPU_ASIC_RESET_DATA                  0x39d5e86b
114 
115 /* reset flags */
116 #define AMDGPU_RESET_GFX			(1 << 0)
117 #define AMDGPU_RESET_COMPUTE			(1 << 1)
118 #define AMDGPU_RESET_DMA			(1 << 2)
119 #define AMDGPU_RESET_CP				(1 << 3)
120 #define AMDGPU_RESET_GRBM			(1 << 4)
121 #define AMDGPU_RESET_DMA1			(1 << 5)
122 #define AMDGPU_RESET_RLC			(1 << 6)
123 #define AMDGPU_RESET_SEM			(1 << 7)
124 #define AMDGPU_RESET_IH				(1 << 8)
125 #define AMDGPU_RESET_VMC			(1 << 9)
126 #define AMDGPU_RESET_MC				(1 << 10)
127 #define AMDGPU_RESET_DISPLAY			(1 << 11)
128 #define AMDGPU_RESET_UVD			(1 << 12)
129 #define AMDGPU_RESET_VCE			(1 << 13)
130 #define AMDGPU_RESET_VCE1			(1 << 14)
131 
132 /* GFX current status */
133 #define AMDGPU_GFX_NORMAL_MODE			0x00000000L
134 #define AMDGPU_GFX_SAFE_MODE			0x00000001L
135 #define AMDGPU_GFX_PG_DISABLED_MODE		0x00000002L
136 #define AMDGPU_GFX_CG_DISABLED_MODE		0x00000004L
137 #define AMDGPU_GFX_LBPW_DISABLED_MODE		0x00000008L
138 
139 /* max cursor sizes (in pixels) */
140 #define CIK_CURSOR_WIDTH 128
141 #define CIK_CURSOR_HEIGHT 128
142 
143 struct amdgpu_device;
144 struct amdgpu_ib;
145 struct amdgpu_vm;
146 struct amdgpu_ring;
147 struct amdgpu_cs_parser;
148 struct amdgpu_job;
149 struct amdgpu_irq_src;
150 struct amdgpu_fpriv;
151 
152 enum amdgpu_cp_irq {
153 	AMDGPU_CP_IRQ_GFX_EOP = 0,
154 	AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP,
155 	AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP,
156 	AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP,
157 	AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP,
158 	AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP,
159 	AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP,
160 	AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP,
161 	AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP,
162 
163 	AMDGPU_CP_IRQ_LAST
164 };
165 
166 enum amdgpu_sdma_irq {
167 	AMDGPU_SDMA_IRQ_TRAP0 = 0,
168 	AMDGPU_SDMA_IRQ_TRAP1,
169 
170 	AMDGPU_SDMA_IRQ_LAST
171 };
172 
173 enum amdgpu_thermal_irq {
174 	AMDGPU_THERMAL_IRQ_LOW_TO_HIGH = 0,
175 	AMDGPU_THERMAL_IRQ_HIGH_TO_LOW,
176 
177 	AMDGPU_THERMAL_IRQ_LAST
178 };
179 
180 int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
181 				  enum amd_ip_block_type block_type,
182 				  enum amd_clockgating_state state);
183 int amdgpu_set_powergating_state(struct amdgpu_device *adev,
184 				  enum amd_ip_block_type block_type,
185 				  enum amd_powergating_state state);
186 
187 struct amdgpu_ip_block_version {
188 	enum amd_ip_block_type type;
189 	u32 major;
190 	u32 minor;
191 	u32 rev;
192 	const struct amd_ip_funcs *funcs;
193 };
194 
195 int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
196 				enum amd_ip_block_type type,
197 				u32 major, u32 minor);
198 
199 const struct amdgpu_ip_block_version * amdgpu_get_ip_block(
200 					struct amdgpu_device *adev,
201 					enum amd_ip_block_type type);
202 
203 /* provided by hw blocks that can move/clear data.  e.g., gfx or sdma */
204 struct amdgpu_buffer_funcs {
205 	/* maximum bytes in a single operation */
206 	uint32_t	copy_max_bytes;
207 
208 	/* number of dw to reserve per operation */
209 	unsigned	copy_num_dw;
210 
211 	/* used for buffer migration */
212 	void (*emit_copy_buffer)(struct amdgpu_ib *ib,
213 				 /* src addr in bytes */
214 				 uint64_t src_offset,
215 				 /* dst addr in bytes */
216 				 uint64_t dst_offset,
217 				 /* number of byte to transfer */
218 				 uint32_t byte_count);
219 
220 	/* maximum bytes in a single operation */
221 	uint32_t	fill_max_bytes;
222 
223 	/* number of dw to reserve per operation */
224 	unsigned	fill_num_dw;
225 
226 	/* used for buffer clearing */
227 	void (*emit_fill_buffer)(struct amdgpu_ib *ib,
228 				 /* value to write to memory */
229 				 uint32_t src_data,
230 				 /* dst addr in bytes */
231 				 uint64_t dst_offset,
232 				 /* number of byte to fill */
233 				 uint32_t byte_count);
234 };
235 
236 /* provided by hw blocks that can write ptes, e.g., sdma */
237 struct amdgpu_vm_pte_funcs {
238 	/* copy pte entries from GART */
239 	void (*copy_pte)(struct amdgpu_ib *ib,
240 			 uint64_t pe, uint64_t src,
241 			 unsigned count);
242 	/* write pte one entry at a time with addr mapping */
243 	void (*write_pte)(struct amdgpu_ib *ib,
244 			  const dma_addr_t *pages_addr, uint64_t pe,
245 			  uint64_t addr, unsigned count,
246 			  uint32_t incr, uint32_t flags);
247 	/* for linear pte/pde updates without addr mapping */
248 	void (*set_pte_pde)(struct amdgpu_ib *ib,
249 			    uint64_t pe,
250 			    uint64_t addr, unsigned count,
251 			    uint32_t incr, uint32_t flags);
252 };
253 
254 /* provided by the gmc block */
255 struct amdgpu_gart_funcs {
256 	/* flush the vm tlb via mmio */
257 	void (*flush_gpu_tlb)(struct amdgpu_device *adev,
258 			      uint32_t vmid);
259 	/* write pte/pde updates using the cpu */
260 	int (*set_pte_pde)(struct amdgpu_device *adev,
261 			   void *cpu_pt_addr, /* cpu addr of page table */
262 			   uint32_t gpu_page_idx, /* pte/pde to update */
263 			   uint64_t addr, /* addr to write into pte/pde */
264 			   uint32_t flags); /* access flags */
265 };
266 
267 /* provided by the ih block */
268 struct amdgpu_ih_funcs {
269 	/* ring read/write ptr handling, called from interrupt context */
270 	u32 (*get_wptr)(struct amdgpu_device *adev);
271 	void (*decode_iv)(struct amdgpu_device *adev,
272 			  struct amdgpu_iv_entry *entry);
273 	void (*set_rptr)(struct amdgpu_device *adev);
274 };
275 
276 /* provided by hw blocks that expose a ring buffer for commands */
277 struct amdgpu_ring_funcs {
278 	/* ring read/write ptr handling */
279 	u32 (*get_rptr)(struct amdgpu_ring *ring);
280 	u32 (*get_wptr)(struct amdgpu_ring *ring);
281 	void (*set_wptr)(struct amdgpu_ring *ring);
282 	/* validating and patching of IBs */
283 	int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
284 	/* command emit functions */
285 	void (*emit_ib)(struct amdgpu_ring *ring,
286 			struct amdgpu_ib *ib,
287 			unsigned vm_id, bool ctx_switch);
288 	void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
289 			   uint64_t seq, unsigned flags);
290 	void (*emit_pipeline_sync)(struct amdgpu_ring *ring);
291 	void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vm_id,
292 			      uint64_t pd_addr);
293 	void (*emit_hdp_flush)(struct amdgpu_ring *ring);
294 	void (*emit_hdp_invalidate)(struct amdgpu_ring *ring);
295 	void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid,
296 				uint32_t gds_base, uint32_t gds_size,
297 				uint32_t gws_base, uint32_t gws_size,
298 				uint32_t oa_base, uint32_t oa_size);
299 	/* testing functions */
300 	int (*test_ring)(struct amdgpu_ring *ring);
301 	int (*test_ib)(struct amdgpu_ring *ring);
302 	/* insert NOP packets */
303 	void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count);
304 	/* pad the indirect buffer to the necessary number of dw */
305 	void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
306 	unsigned (*init_cond_exec)(struct amdgpu_ring *ring);
307 	void (*patch_cond_exec)(struct amdgpu_ring *ring, unsigned offset);
308 };
309 
310 /*
311  * BIOS.
312  */
313 bool amdgpu_get_bios(struct amdgpu_device *adev);
314 bool amdgpu_read_bios(struct amdgpu_device *adev);
315 
316 /*
317  * Dummy page
318  */
319 struct amdgpu_dummy_page {
320 	struct page	*page;
321 	dma_addr_t	addr;
322 };
323 int amdgpu_dummy_page_init(struct amdgpu_device *adev);
324 void amdgpu_dummy_page_fini(struct amdgpu_device *adev);
325 
326 
327 /*
328  * Clocks
329  */
330 
331 #define AMDGPU_MAX_PPLL 3
332 
333 struct amdgpu_clock {
334 	struct amdgpu_pll ppll[AMDGPU_MAX_PPLL];
335 	struct amdgpu_pll spll;
336 	struct amdgpu_pll mpll;
337 	/* 10 Khz units */
338 	uint32_t default_mclk;
339 	uint32_t default_sclk;
340 	uint32_t default_dispclk;
341 	uint32_t current_dispclk;
342 	uint32_t dp_extclk;
343 	uint32_t max_pixel_clock;
344 };
345 
346 /*
347  * Fences.
348  */
349 struct amdgpu_fence_driver {
350 	uint64_t			gpu_addr;
351 	volatile uint32_t		*cpu_addr;
352 	/* sync_seq is protected by ring emission lock */
353 	uint32_t			sync_seq;
354 	atomic_t			last_seq;
355 	bool				initialized;
356 	struct amdgpu_irq_src		*irq_src;
357 	unsigned			irq_type;
358 	struct timer_list		fallback_timer;
359 	unsigned			num_fences_mask;
360 	spinlock_t			lock;
361 	struct fence			**fences;
362 };
363 
364 /* some special values for the owner field */
365 #define AMDGPU_FENCE_OWNER_UNDEFINED	((void*)0ul)
366 #define AMDGPU_FENCE_OWNER_VM		((void*)1ul)
367 
368 #define AMDGPU_FENCE_FLAG_64BIT         (1 << 0)
369 #define AMDGPU_FENCE_FLAG_INT           (1 << 1)
370 
371 int amdgpu_fence_driver_init(struct amdgpu_device *adev);
372 void amdgpu_fence_driver_fini(struct amdgpu_device *adev);
373 void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev);
374 
375 int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
376 				  unsigned num_hw_submission);
377 int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
378 				   struct amdgpu_irq_src *irq_src,
379 				   unsigned irq_type);
380 void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
381 void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
382 int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **fence);
383 void amdgpu_fence_process(struct amdgpu_ring *ring);
384 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
385 unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
386 
387 /*
388  * TTM.
389  */
390 
391 #define AMDGPU_TTM_LRU_SIZE	20
392 
393 struct amdgpu_mman_lru {
394 	struct list_head		*lru[TTM_NUM_MEM_TYPES];
395 	struct list_head		*swap_lru;
396 };
397 
398 struct amdgpu_mman {
399 	struct ttm_bo_global_ref        bo_global_ref;
400 	struct drm_global_reference	mem_global_ref;
401 	struct ttm_bo_device		bdev;
402 	bool				mem_global_referenced;
403 	bool				initialized;
404 
405 #if defined(CONFIG_DEBUG_FS)
406 	struct dentry			*vram;
407 	struct dentry			*gtt;
408 #endif
409 
410 	/* buffer handling */
411 	const struct amdgpu_buffer_funcs	*buffer_funcs;
412 	struct amdgpu_ring			*buffer_funcs_ring;
413 	/* Scheduler entity for buffer moves */
414 	struct amd_sched_entity			entity;
415 
416 	/* custom LRU management */
417 	struct amdgpu_mman_lru			log2_size[AMDGPU_TTM_LRU_SIZE];
418 };
419 
420 int amdgpu_copy_buffer(struct amdgpu_ring *ring,
421 		       uint64_t src_offset,
422 		       uint64_t dst_offset,
423 		       uint32_t byte_count,
424 		       struct reservation_object *resv,
425 		       struct fence **fence);
426 int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
427 
428 struct amdgpu_bo_list_entry {
429 	struct amdgpu_bo		*robj;
430 	struct ttm_validate_buffer	tv;
431 	struct amdgpu_bo_va		*bo_va;
432 	uint32_t			priority;
433 	struct page			**user_pages;
434 	int				user_invalidated;
435 };
436 
437 struct amdgpu_bo_va_mapping {
438 	struct list_head		list;
439 	struct interval_tree_node	it;
440 	uint64_t			offset;
441 	uint32_t			flags;
442 };
443 
444 /* bo virtual addresses in a specific vm */
445 struct amdgpu_bo_va {
446 	/* protected by bo being reserved */
447 	struct list_head		bo_list;
448 	struct fence		        *last_pt_update;
449 	unsigned			ref_count;
450 
451 	/* protected by vm mutex and spinlock */
452 	struct list_head		vm_status;
453 
454 	/* mappings for this bo_va */
455 	struct list_head		invalids;
456 	struct list_head		valids;
457 
458 	/* constant after initialization */
459 	struct amdgpu_vm		*vm;
460 	struct amdgpu_bo		*bo;
461 };
462 
463 #define AMDGPU_GEM_DOMAIN_MAX		0x3
464 
465 struct amdgpu_bo {
466 	/* Protected by gem.mutex */
467 	struct list_head		list;
468 	/* Protected by tbo.reserved */
469 	u32				prefered_domains;
470 	u32				allowed_domains;
471 	struct ttm_place		placements[AMDGPU_GEM_DOMAIN_MAX + 1];
472 	struct ttm_placement		placement;
473 	struct ttm_buffer_object	tbo;
474 	struct ttm_bo_kmap_obj		kmap;
475 	u64				flags;
476 	unsigned			pin_count;
477 	void				*kptr;
478 	u64				tiling_flags;
479 	u64				metadata_flags;
480 	void				*metadata;
481 	u32				metadata_size;
482 	/* list of all virtual address to which this bo
483 	 * is associated to
484 	 */
485 	struct list_head		va;
486 	/* Constant after initialization */
487 	struct amdgpu_device		*adev;
488 	struct drm_gem_object		gem_base;
489 	struct amdgpu_bo		*parent;
490 
491 	struct ttm_bo_kmap_obj		dma_buf_vmap;
492 	struct amdgpu_mn		*mn;
493 	struct list_head		mn_list;
494 };
495 #define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, gem_base)
496 
497 void amdgpu_gem_object_free(struct drm_gem_object *obj);
498 int amdgpu_gem_object_open(struct drm_gem_object *obj,
499 				struct drm_file *file_priv);
500 void amdgpu_gem_object_close(struct drm_gem_object *obj,
501 				struct drm_file *file_priv);
502 unsigned long amdgpu_gem_timeout(uint64_t timeout_ns);
503 struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj);
504 struct drm_gem_object *
505 amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
506 				 struct dma_buf_attachment *attach,
507 				 struct sg_table *sg);
508 struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
509 					struct drm_gem_object *gobj,
510 					int flags);
511 int amdgpu_gem_prime_pin(struct drm_gem_object *obj);
512 void amdgpu_gem_prime_unpin(struct drm_gem_object *obj);
513 struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *);
514 void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj);
515 void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
516 int amdgpu_gem_debugfs_init(struct amdgpu_device *adev);
517 
518 /* sub-allocation manager, it has to be protected by another lock.
519  * By conception this is an helper for other part of the driver
520  * like the indirect buffer or semaphore, which both have their
521  * locking.
522  *
523  * Principe is simple, we keep a list of sub allocation in offset
524  * order (first entry has offset == 0, last entry has the highest
525  * offset).
526  *
527  * When allocating new object we first check if there is room at
528  * the end total_size - (last_object_offset + last_object_size) >=
529  * alloc_size. If so we allocate new object there.
530  *
531  * When there is not enough room at the end, we start waiting for
532  * each sub object until we reach object_offset+object_size >=
533  * alloc_size, this object then become the sub object we return.
534  *
535  * Alignment can't be bigger than page size.
536  *
537  * Hole are not considered for allocation to keep things simple.
538  * Assumption is that there won't be hole (all object on same
539  * alignment).
540  */
541 
542 #define AMDGPU_SA_NUM_FENCE_LISTS	32
543 
544 struct amdgpu_sa_manager {
545 	wait_queue_head_t	wq;
546 	struct amdgpu_bo	*bo;
547 	struct list_head	*hole;
548 	struct list_head	flist[AMDGPU_SA_NUM_FENCE_LISTS];
549 	struct list_head	olist;
550 	unsigned		size;
551 	uint64_t		gpu_addr;
552 	void			*cpu_ptr;
553 	uint32_t		domain;
554 	uint32_t		align;
555 };
556 
557 /* sub-allocation buffer */
558 struct amdgpu_sa_bo {
559 	struct list_head		olist;
560 	struct list_head		flist;
561 	struct amdgpu_sa_manager	*manager;
562 	unsigned			soffset;
563 	unsigned			eoffset;
564 	struct fence		        *fence;
565 };
566 
567 /*
568  * GEM objects.
569  */
570 void amdgpu_gem_force_release(struct amdgpu_device *adev);
571 int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
572 				int alignment, u32 initial_domain,
573 				u64 flags, bool kernel,
574 				struct drm_gem_object **obj);
575 
576 int amdgpu_mode_dumb_create(struct drm_file *file_priv,
577 			    struct drm_device *dev,
578 			    struct drm_mode_create_dumb *args);
579 int amdgpu_mode_dumb_mmap(struct drm_file *filp,
580 			  struct drm_device *dev,
581 			  uint32_t handle, uint64_t *offset_p);
582 /*
583  * Synchronization
584  */
585 struct amdgpu_sync {
586 	DECLARE_HASHTABLE(fences, 4);
587 	struct fence	        *last_vm_update;
588 };
589 
590 void amdgpu_sync_create(struct amdgpu_sync *sync);
591 int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
592 		      struct fence *f);
593 int amdgpu_sync_resv(struct amdgpu_device *adev,
594 		     struct amdgpu_sync *sync,
595 		     struct reservation_object *resv,
596 		     void *owner);
597 bool amdgpu_sync_is_idle(struct amdgpu_sync *sync);
598 int amdgpu_sync_cycle_fences(struct amdgpu_sync *dst, struct amdgpu_sync *src,
599 			     struct fence *fence);
600 struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
601 int amdgpu_sync_wait(struct amdgpu_sync *sync);
602 void amdgpu_sync_free(struct amdgpu_sync *sync);
603 int amdgpu_sync_init(void);
604 void amdgpu_sync_fini(void);
605 int amdgpu_fence_slab_init(void);
606 void amdgpu_fence_slab_fini(void);
607 
608 /*
609  * GART structures, functions & helpers
610  */
611 struct amdgpu_mc;
612 
613 #define AMDGPU_GPU_PAGE_SIZE 4096
614 #define AMDGPU_GPU_PAGE_MASK (AMDGPU_GPU_PAGE_SIZE - 1)
615 #define AMDGPU_GPU_PAGE_SHIFT 12
616 #define AMDGPU_GPU_PAGE_ALIGN(a) (((a) + AMDGPU_GPU_PAGE_MASK) & ~AMDGPU_GPU_PAGE_MASK)
617 
618 struct amdgpu_gart {
619 	dma_addr_t			table_addr;
620 	struct amdgpu_bo		*robj;
621 	void				*ptr;
622 	unsigned			num_gpu_pages;
623 	unsigned			num_cpu_pages;
624 	unsigned			table_size;
625 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
626 	struct page			**pages;
627 #endif
628 	bool				ready;
629 	const struct amdgpu_gart_funcs *gart_funcs;
630 };
631 
632 int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev);
633 void amdgpu_gart_table_ram_free(struct amdgpu_device *adev);
634 int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev);
635 void amdgpu_gart_table_vram_free(struct amdgpu_device *adev);
636 int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev);
637 void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev);
638 int amdgpu_gart_init(struct amdgpu_device *adev);
639 void amdgpu_gart_fini(struct amdgpu_device *adev);
640 void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset,
641 			int pages);
642 int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset,
643 		     int pages, struct page **pagelist,
644 		     dma_addr_t *dma_addr, uint32_t flags);
645 
646 /*
647  * GPU MC structures, functions & helpers
648  */
649 struct amdgpu_mc {
650 	resource_size_t		aper_size;
651 	resource_size_t		aper_base;
652 	resource_size_t		agp_base;
653 	/* for some chips with <= 32MB we need to lie
654 	 * about vram size near mc fb location */
655 	u64			mc_vram_size;
656 	u64			visible_vram_size;
657 	u64			gtt_size;
658 	u64			gtt_start;
659 	u64			gtt_end;
660 	u64			vram_start;
661 	u64			vram_end;
662 	unsigned		vram_width;
663 	u64			real_vram_size;
664 	int			vram_mtrr;
665 	u64                     gtt_base_align;
666 	u64                     mc_mask;
667 	const struct firmware   *fw;	/* MC firmware */
668 	uint32_t                fw_version;
669 	struct amdgpu_irq_src	vm_fault;
670 	uint32_t		vram_type;
671 };
672 
673 /*
674  * GPU doorbell structures, functions & helpers
675  */
676 typedef enum _AMDGPU_DOORBELL_ASSIGNMENT
677 {
678 	AMDGPU_DOORBELL_KIQ                     = 0x000,
679 	AMDGPU_DOORBELL_HIQ                     = 0x001,
680 	AMDGPU_DOORBELL_DIQ                     = 0x002,
681 	AMDGPU_DOORBELL_MEC_RING0               = 0x010,
682 	AMDGPU_DOORBELL_MEC_RING1               = 0x011,
683 	AMDGPU_DOORBELL_MEC_RING2               = 0x012,
684 	AMDGPU_DOORBELL_MEC_RING3               = 0x013,
685 	AMDGPU_DOORBELL_MEC_RING4               = 0x014,
686 	AMDGPU_DOORBELL_MEC_RING5               = 0x015,
687 	AMDGPU_DOORBELL_MEC_RING6               = 0x016,
688 	AMDGPU_DOORBELL_MEC_RING7               = 0x017,
689 	AMDGPU_DOORBELL_GFX_RING0               = 0x020,
690 	AMDGPU_DOORBELL_sDMA_ENGINE0            = 0x1E0,
691 	AMDGPU_DOORBELL_sDMA_ENGINE1            = 0x1E1,
692 	AMDGPU_DOORBELL_IH                      = 0x1E8,
693 	AMDGPU_DOORBELL_MAX_ASSIGNMENT          = 0x3FF,
694 	AMDGPU_DOORBELL_INVALID                 = 0xFFFF
695 } AMDGPU_DOORBELL_ASSIGNMENT;
696 
697 struct amdgpu_doorbell {
698 	/* doorbell mmio */
699 	resource_size_t		base;
700 	resource_size_t		size;
701 	u32 __iomem		*ptr;
702 	u32			num_doorbells;	/* Number of doorbells actually reserved for amdgpu. */
703 };
704 
705 void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
706 				phys_addr_t *aperture_base,
707 				size_t *aperture_size,
708 				size_t *start_offset);
709 
710 /*
711  * IRQS.
712  */
713 
714 struct amdgpu_flip_work {
715 	struct work_struct		flip_work;
716 	struct work_struct		unpin_work;
717 	struct amdgpu_device		*adev;
718 	int				crtc_id;
719 	uint64_t			base;
720 	struct drm_pending_vblank_event *event;
721 	struct amdgpu_bo		*old_rbo;
722 	struct fence			*excl;
723 	unsigned			shared_count;
724 	struct fence			**shared;
725 	struct fence_cb			cb;
726 	bool				async;
727 };
728 
729 
730 /*
731  * CP & rings.
732  */
733 
734 struct amdgpu_ib {
735 	struct amdgpu_sa_bo		*sa_bo;
736 	uint32_t			length_dw;
737 	uint64_t			gpu_addr;
738 	uint32_t			*ptr;
739 	uint32_t			flags;
740 };
741 
742 enum amdgpu_ring_type {
743 	AMDGPU_RING_TYPE_GFX,
744 	AMDGPU_RING_TYPE_COMPUTE,
745 	AMDGPU_RING_TYPE_SDMA,
746 	AMDGPU_RING_TYPE_UVD,
747 	AMDGPU_RING_TYPE_VCE
748 };
749 
750 extern const struct amd_sched_backend_ops amdgpu_sched_ops;
751 
752 int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
753 		     struct amdgpu_job **job, struct amdgpu_vm *vm);
754 int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
755 			     struct amdgpu_job **job);
756 
757 void amdgpu_job_free(struct amdgpu_job *job);
758 void amdgpu_job_free_func(struct kref *refcount);
759 int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
760 		      struct amd_sched_entity *entity, void *owner,
761 		      struct fence **f);
762 void amdgpu_job_timeout_func(struct work_struct *work);
763 
764 struct amdgpu_ring {
765 	struct amdgpu_device		*adev;
766 	const struct amdgpu_ring_funcs	*funcs;
767 	struct amdgpu_fence_driver	fence_drv;
768 	struct amd_gpu_scheduler	sched;
769 
770 	spinlock_t              fence_lock;
771 	struct amdgpu_bo	*ring_obj;
772 	volatile uint32_t	*ring;
773 	unsigned		rptr_offs;
774 	u64			next_rptr_gpu_addr;
775 	volatile u32		*next_rptr_cpu_addr;
776 	unsigned		wptr;
777 	unsigned		wptr_old;
778 	unsigned		ring_size;
779 	unsigned		max_dw;
780 	int			count_dw;
781 	uint64_t		gpu_addr;
782 	uint32_t		align_mask;
783 	uint32_t		ptr_mask;
784 	bool			ready;
785 	u32			nop;
786 	u32			idx;
787 	u32			me;
788 	u32			pipe;
789 	u32			queue;
790 	struct amdgpu_bo	*mqd_obj;
791 	u32			doorbell_index;
792 	bool			use_doorbell;
793 	unsigned		wptr_offs;
794 	unsigned		next_rptr_offs;
795 	unsigned		fence_offs;
796 	uint64_t		current_ctx;
797 	enum amdgpu_ring_type	type;
798 	char			name[16];
799 	unsigned		cond_exe_offs;
800 	u64				cond_exe_gpu_addr;
801 	volatile u32	*cond_exe_cpu_addr;
802 };
803 
804 /*
805  * VM
806  */
807 
808 /* maximum number of VMIDs */
809 #define AMDGPU_NUM_VM	16
810 
811 /* number of entries in page table */
812 #define AMDGPU_VM_PTE_COUNT (1 << amdgpu_vm_block_size)
813 
814 /* PTBs (Page Table Blocks) need to be aligned to 32K */
815 #define AMDGPU_VM_PTB_ALIGN_SIZE   32768
816 #define AMDGPU_VM_PTB_ALIGN_MASK (AMDGPU_VM_PTB_ALIGN_SIZE - 1)
817 #define AMDGPU_VM_PTB_ALIGN(a) (((a) + AMDGPU_VM_PTB_ALIGN_MASK) & ~AMDGPU_VM_PTB_ALIGN_MASK)
818 
819 #define AMDGPU_PTE_VALID	(1 << 0)
820 #define AMDGPU_PTE_SYSTEM	(1 << 1)
821 #define AMDGPU_PTE_SNOOPED	(1 << 2)
822 
823 /* VI only */
824 #define AMDGPU_PTE_EXECUTABLE	(1 << 4)
825 
826 #define AMDGPU_PTE_READABLE	(1 << 5)
827 #define AMDGPU_PTE_WRITEABLE	(1 << 6)
828 
829 /* PTE (Page Table Entry) fragment field for different page sizes */
830 #define AMDGPU_PTE_FRAG_4KB	(0 << 7)
831 #define AMDGPU_PTE_FRAG_64KB	(4 << 7)
832 #define AMDGPU_LOG2_PAGES_PER_FRAG 4
833 
834 /* How to programm VM fault handling */
835 #define AMDGPU_VM_FAULT_STOP_NEVER	0
836 #define AMDGPU_VM_FAULT_STOP_FIRST	1
837 #define AMDGPU_VM_FAULT_STOP_ALWAYS	2
838 
839 struct amdgpu_vm_pt {
840 	struct amdgpu_bo_list_entry	entry;
841 	uint64_t			addr;
842 };
843 
844 struct amdgpu_vm {
845 	/* tree of virtual addresses mapped */
846 	struct rb_root		va;
847 
848 	/* protecting invalidated */
849 	spinlock_t		status_lock;
850 
851 	/* BOs moved, but not yet updated in the PT */
852 	struct list_head	invalidated;
853 
854 	/* BOs cleared in the PT because of a move */
855 	struct list_head	cleared;
856 
857 	/* BO mappings freed, but not yet updated in the PT */
858 	struct list_head	freed;
859 
860 	/* contains the page directory */
861 	struct amdgpu_bo	*page_directory;
862 	unsigned		max_pde_used;
863 	struct fence		*page_directory_fence;
864 
865 	/* array of page tables, one for each page directory entry */
866 	struct amdgpu_vm_pt	*page_tables;
867 
868 	/* for id and flush management per ring */
869 	struct amdgpu_vm_id	*ids[AMDGPU_MAX_RINGS];
870 
871 	/* protecting freed */
872 	spinlock_t		freed_lock;
873 
874 	/* Scheduler entity for page table updates */
875 	struct amd_sched_entity	entity;
876 
877 	/* client id */
878 	u64                     client_id;
879 };
880 
881 struct amdgpu_vm_id {
882 	struct list_head	list;
883 	struct fence		*first;
884 	struct amdgpu_sync	active;
885 	struct fence		*last_flush;
886 	struct amdgpu_ring      *last_user;
887 	atomic64_t		owner;
888 
889 	uint64_t		pd_gpu_addr;
890 	/* last flushed PD/PT update */
891 	struct fence		*flushed_updates;
892 
893 	uint32_t		gds_base;
894 	uint32_t		gds_size;
895 	uint32_t		gws_base;
896 	uint32_t		gws_size;
897 	uint32_t		oa_base;
898 	uint32_t		oa_size;
899 };
900 
901 struct amdgpu_vm_manager {
902 	/* Handling of VMIDs */
903 	struct mutex				lock;
904 	unsigned				num_ids;
905 	struct list_head			ids_lru;
906 	struct amdgpu_vm_id			ids[AMDGPU_NUM_VM];
907 
908 	uint32_t				max_pfn;
909 	/* vram base address for page table entry  */
910 	u64					vram_base_offset;
911 	/* is vm enabled? */
912 	bool					enabled;
913 	/* vm pte handling */
914 	const struct amdgpu_vm_pte_funcs        *vm_pte_funcs;
915 	struct amdgpu_ring                      *vm_pte_rings[AMDGPU_MAX_RINGS];
916 	unsigned				vm_pte_num_rings;
917 	atomic_t				vm_pte_next_ring;
918 	/* client id counter */
919 	atomic64_t				client_counter;
920 };
921 
922 void amdgpu_vm_manager_init(struct amdgpu_device *adev);
923 void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
924 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
925 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
926 void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
927 			 struct list_head *validated,
928 			 struct amdgpu_bo_list_entry *entry);
929 void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates);
930 void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
931 				  struct amdgpu_vm *vm);
932 int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
933 		      struct amdgpu_sync *sync, struct fence *fence,
934 		      unsigned *vm_id, uint64_t *vm_pd_addr);
935 int amdgpu_vm_flush(struct amdgpu_ring *ring,
936 		    unsigned vm_id, uint64_t pd_addr,
937 		    uint32_t gds_base, uint32_t gds_size,
938 		    uint32_t gws_base, uint32_t gws_size,
939 		    uint32_t oa_base, uint32_t oa_size);
940 void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
941 uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
942 int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
943 				    struct amdgpu_vm *vm);
944 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
945 			  struct amdgpu_vm *vm);
946 int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm,
947 			     struct amdgpu_sync *sync);
948 int amdgpu_vm_bo_update(struct amdgpu_device *adev,
949 			struct amdgpu_bo_va *bo_va,
950 			struct ttm_mem_reg *mem);
951 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
952 			     struct amdgpu_bo *bo);
953 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
954 				       struct amdgpu_bo *bo);
955 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
956 				      struct amdgpu_vm *vm,
957 				      struct amdgpu_bo *bo);
958 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
959 		     struct amdgpu_bo_va *bo_va,
960 		     uint64_t addr, uint64_t offset,
961 		     uint64_t size, uint32_t flags);
962 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
963 		       struct amdgpu_bo_va *bo_va,
964 		       uint64_t addr);
965 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
966 		      struct amdgpu_bo_va *bo_va);
967 
968 /*
969  * context related structures
970  */
971 
972 struct amdgpu_ctx_ring {
973 	uint64_t		sequence;
974 	struct fence		**fences;
975 	struct amd_sched_entity	entity;
976 };
977 
978 struct amdgpu_ctx {
979 	struct kref		refcount;
980 	struct amdgpu_device    *adev;
981 	unsigned		reset_counter;
982 	spinlock_t		ring_lock;
983 	struct fence            **fences;
984 	struct amdgpu_ctx_ring	rings[AMDGPU_MAX_RINGS];
985 };
986 
987 struct amdgpu_ctx_mgr {
988 	struct amdgpu_device	*adev;
989 	struct mutex		lock;
990 	/* protected by lock */
991 	struct idr		ctx_handles;
992 };
993 
994 struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
995 int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
996 
997 uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
998 			      struct fence *fence);
999 struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
1000 				   struct amdgpu_ring *ring, uint64_t seq);
1001 
1002 int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
1003 		     struct drm_file *filp);
1004 
1005 void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr);
1006 void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
1007 
1008 /*
1009  * file private structure
1010  */
1011 
1012 struct amdgpu_fpriv {
1013 	struct amdgpu_vm	vm;
1014 	struct mutex		bo_list_lock;
1015 	struct idr		bo_list_handles;
1016 	struct amdgpu_ctx_mgr	ctx_mgr;
1017 };
1018 
1019 /*
1020  * residency list
1021  */
1022 
1023 struct amdgpu_bo_list {
1024 	struct mutex lock;
1025 	struct amdgpu_bo *gds_obj;
1026 	struct amdgpu_bo *gws_obj;
1027 	struct amdgpu_bo *oa_obj;
1028 	unsigned first_userptr;
1029 	unsigned num_entries;
1030 	struct amdgpu_bo_list_entry *array;
1031 };
1032 
1033 struct amdgpu_bo_list *
1034 amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id);
1035 void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
1036 			     struct list_head *validated);
1037 void amdgpu_bo_list_put(struct amdgpu_bo_list *list);
1038 void amdgpu_bo_list_free(struct amdgpu_bo_list *list);
1039 
1040 /*
1041  * GFX stuff
1042  */
1043 #include "clearstate_defs.h"
1044 
1045 struct amdgpu_rlc_funcs {
1046 	void (*enter_safe_mode)(struct amdgpu_device *adev);
1047 	void (*exit_safe_mode)(struct amdgpu_device *adev);
1048 };
1049 
1050 struct amdgpu_rlc {
1051 	/* for power gating */
1052 	struct amdgpu_bo	*save_restore_obj;
1053 	uint64_t		save_restore_gpu_addr;
1054 	volatile uint32_t	*sr_ptr;
1055 	const u32               *reg_list;
1056 	u32                     reg_list_size;
1057 	/* for clear state */
1058 	struct amdgpu_bo	*clear_state_obj;
1059 	uint64_t		clear_state_gpu_addr;
1060 	volatile uint32_t	*cs_ptr;
1061 	const struct cs_section_def   *cs_data;
1062 	u32                     clear_state_size;
1063 	/* for cp tables */
1064 	struct amdgpu_bo	*cp_table_obj;
1065 	uint64_t		cp_table_gpu_addr;
1066 	volatile uint32_t	*cp_table_ptr;
1067 	u32                     cp_table_size;
1068 
1069 	/* safe mode for updating CG/PG state */
1070 	bool in_safe_mode;
1071 	const struct amdgpu_rlc_funcs *funcs;
1072 
1073 	/* for firmware data */
1074 	u32 save_and_restore_offset;
1075 	u32 clear_state_descriptor_offset;
1076 	u32 avail_scratch_ram_locations;
1077 	u32 reg_restore_list_size;
1078 	u32 reg_list_format_start;
1079 	u32 reg_list_format_separate_start;
1080 	u32 starting_offsets_start;
1081 	u32 reg_list_format_size_bytes;
1082 	u32 reg_list_size_bytes;
1083 
1084 	u32 *register_list_format;
1085 	u32 *register_restore;
1086 };
1087 
1088 struct amdgpu_mec {
1089 	struct amdgpu_bo	*hpd_eop_obj;
1090 	u64			hpd_eop_gpu_addr;
1091 	u32 num_pipe;
1092 	u32 num_mec;
1093 	u32 num_queue;
1094 };
1095 
1096 /*
1097  * GPU scratch registers structures, functions & helpers
1098  */
1099 struct amdgpu_scratch {
1100 	unsigned		num_reg;
1101 	uint32_t                reg_base;
1102 	bool			free[32];
1103 	uint32_t		reg[32];
1104 };
1105 
1106 /*
1107  * GFX configurations
1108  */
1109 struct amdgpu_gca_config {
1110 	unsigned max_shader_engines;
1111 	unsigned max_tile_pipes;
1112 	unsigned max_cu_per_sh;
1113 	unsigned max_sh_per_se;
1114 	unsigned max_backends_per_se;
1115 	unsigned max_texture_channel_caches;
1116 	unsigned max_gprs;
1117 	unsigned max_gs_threads;
1118 	unsigned max_hw_contexts;
1119 	unsigned sc_prim_fifo_size_frontend;
1120 	unsigned sc_prim_fifo_size_backend;
1121 	unsigned sc_hiz_tile_fifo_size;
1122 	unsigned sc_earlyz_tile_fifo_size;
1123 
1124 	unsigned num_tile_pipes;
1125 	unsigned backend_enable_mask;
1126 	unsigned mem_max_burst_length_bytes;
1127 	unsigned mem_row_size_in_kb;
1128 	unsigned shader_engine_tile_size;
1129 	unsigned num_gpus;
1130 	unsigned multi_gpu_tile_size;
1131 	unsigned mc_arb_ramcfg;
1132 	unsigned gb_addr_config;
1133 	unsigned num_rbs;
1134 
1135 	uint32_t tile_mode_array[32];
1136 	uint32_t macrotile_mode_array[16];
1137 };
1138 
1139 struct amdgpu_cu_info {
1140 	uint32_t number; /* total active CU number */
1141 	uint32_t ao_cu_mask;
1142 	uint32_t bitmap[4][4];
1143 };
1144 
1145 struct amdgpu_gfx {
1146 	struct mutex			gpu_clock_mutex;
1147 	struct amdgpu_gca_config	config;
1148 	struct amdgpu_rlc		rlc;
1149 	struct amdgpu_mec		mec;
1150 	struct amdgpu_scratch		scratch;
1151 	const struct firmware		*me_fw;	/* ME firmware */
1152 	uint32_t			me_fw_version;
1153 	const struct firmware		*pfp_fw; /* PFP firmware */
1154 	uint32_t			pfp_fw_version;
1155 	const struct firmware		*ce_fw;	/* CE firmware */
1156 	uint32_t			ce_fw_version;
1157 	const struct firmware		*rlc_fw; /* RLC firmware */
1158 	uint32_t			rlc_fw_version;
1159 	const struct firmware		*mec_fw; /* MEC firmware */
1160 	uint32_t			mec_fw_version;
1161 	const struct firmware		*mec2_fw; /* MEC2 firmware */
1162 	uint32_t			mec2_fw_version;
1163 	uint32_t			me_feature_version;
1164 	uint32_t			ce_feature_version;
1165 	uint32_t			pfp_feature_version;
1166 	uint32_t			rlc_feature_version;
1167 	uint32_t			mec_feature_version;
1168 	uint32_t			mec2_feature_version;
1169 	struct amdgpu_ring		gfx_ring[AMDGPU_MAX_GFX_RINGS];
1170 	unsigned			num_gfx_rings;
1171 	struct amdgpu_ring		compute_ring[AMDGPU_MAX_COMPUTE_RINGS];
1172 	unsigned			num_compute_rings;
1173 	struct amdgpu_irq_src		eop_irq;
1174 	struct amdgpu_irq_src		priv_reg_irq;
1175 	struct amdgpu_irq_src		priv_inst_irq;
1176 	/* gfx status */
1177 	uint32_t			gfx_current_status;
1178 	/* ce ram size*/
1179 	unsigned			ce_ram_size;
1180 	struct amdgpu_cu_info		cu_info;
1181 };
1182 
1183 int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
1184 		  unsigned size, struct amdgpu_ib *ib);
1185 void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
1186 		    struct fence *f);
1187 int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
1188 		       struct amdgpu_ib *ib, struct fence *last_vm_update,
1189 		       struct amdgpu_job *job, struct fence **f);
1190 int amdgpu_ib_pool_init(struct amdgpu_device *adev);
1191 void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
1192 int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
1193 int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
1194 void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
1195 void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
1196 void amdgpu_ring_commit(struct amdgpu_ring *ring);
1197 void amdgpu_ring_undo(struct amdgpu_ring *ring);
1198 unsigned amdgpu_ring_backup(struct amdgpu_ring *ring,
1199 			    uint32_t **data);
1200 int amdgpu_ring_restore(struct amdgpu_ring *ring,
1201 			unsigned size, uint32_t *data);
1202 int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
1203 		     unsigned ring_size, u32 nop, u32 align_mask,
1204 		     struct amdgpu_irq_src *irq_src, unsigned irq_type,
1205 		     enum amdgpu_ring_type ring_type);
1206 void amdgpu_ring_fini(struct amdgpu_ring *ring);
1207 
1208 /*
1209  * CS.
1210  */
1211 struct amdgpu_cs_chunk {
1212 	uint32_t		chunk_id;
1213 	uint32_t		length_dw;
1214 	void			*kdata;
1215 };
1216 
1217 struct amdgpu_cs_parser {
1218 	struct amdgpu_device	*adev;
1219 	struct drm_file		*filp;
1220 	struct amdgpu_ctx	*ctx;
1221 
1222 	/* chunks */
1223 	unsigned		nchunks;
1224 	struct amdgpu_cs_chunk	*chunks;
1225 
1226 	/* scheduler job object */
1227 	struct amdgpu_job	*job;
1228 
1229 	/* buffer objects */
1230 	struct ww_acquire_ctx		ticket;
1231 	struct amdgpu_bo_list		*bo_list;
1232 	struct amdgpu_bo_list_entry	vm_pd;
1233 	struct list_head		validated;
1234 	struct fence			*fence;
1235 	uint64_t			bytes_moved_threshold;
1236 	uint64_t			bytes_moved;
1237 
1238 	/* user fence */
1239 	struct amdgpu_bo_list_entry	uf_entry;
1240 };
1241 
1242 struct amdgpu_job {
1243 	struct amd_sched_job    base;
1244 	struct amdgpu_device	*adev;
1245 	struct amdgpu_vm	*vm;
1246 	struct amdgpu_ring	*ring;
1247 	struct amdgpu_sync	sync;
1248 	struct amdgpu_ib	*ibs;
1249 	struct fence		*fence; /* the hw fence */
1250 	uint32_t		num_ibs;
1251 	void			*owner;
1252 	uint64_t		ctx;
1253 	unsigned		vm_id;
1254 	uint64_t		vm_pd_addr;
1255 	uint32_t		gds_base, gds_size;
1256 	uint32_t		gws_base, gws_size;
1257 	uint32_t		oa_base, oa_size;
1258 
1259 	/* user fence handling */
1260 	struct amdgpu_bo	*uf_bo;
1261 	uint32_t		uf_offset;
1262 	uint64_t		uf_sequence;
1263 
1264 };
1265 #define to_amdgpu_job(sched_job)		\
1266 		container_of((sched_job), struct amdgpu_job, base)
1267 
1268 static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p,
1269 				      uint32_t ib_idx, int idx)
1270 {
1271 	return p->job->ibs[ib_idx].ptr[idx];
1272 }
1273 
1274 static inline void amdgpu_set_ib_value(struct amdgpu_cs_parser *p,
1275 				       uint32_t ib_idx, int idx,
1276 				       uint32_t value)
1277 {
1278 	p->job->ibs[ib_idx].ptr[idx] = value;
1279 }
1280 
1281 /*
1282  * Writeback
1283  */
1284 #define AMDGPU_MAX_WB 1024	/* Reserve at most 1024 WB slots for amdgpu-owned rings. */
1285 
1286 struct amdgpu_wb {
1287 	struct amdgpu_bo	*wb_obj;
1288 	volatile uint32_t	*wb;
1289 	uint64_t		gpu_addr;
1290 	u32			num_wb;	/* Number of wb slots actually reserved for amdgpu. */
1291 	unsigned long		used[DIV_ROUND_UP(AMDGPU_MAX_WB, BITS_PER_LONG)];
1292 };
1293 
1294 int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb);
1295 void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb);
1296 
1297 
1298 
1299 enum amdgpu_int_thermal_type {
1300 	THERMAL_TYPE_NONE,
1301 	THERMAL_TYPE_EXTERNAL,
1302 	THERMAL_TYPE_EXTERNAL_GPIO,
1303 	THERMAL_TYPE_RV6XX,
1304 	THERMAL_TYPE_RV770,
1305 	THERMAL_TYPE_ADT7473_WITH_INTERNAL,
1306 	THERMAL_TYPE_EVERGREEN,
1307 	THERMAL_TYPE_SUMO,
1308 	THERMAL_TYPE_NI,
1309 	THERMAL_TYPE_SI,
1310 	THERMAL_TYPE_EMC2103_WITH_INTERNAL,
1311 	THERMAL_TYPE_CI,
1312 	THERMAL_TYPE_KV,
1313 };
1314 
1315 enum amdgpu_dpm_auto_throttle_src {
1316 	AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL,
1317 	AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL
1318 };
1319 
1320 enum amdgpu_dpm_event_src {
1321 	AMDGPU_DPM_EVENT_SRC_ANALOG = 0,
1322 	AMDGPU_DPM_EVENT_SRC_EXTERNAL = 1,
1323 	AMDGPU_DPM_EVENT_SRC_DIGITAL = 2,
1324 	AMDGPU_DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
1325 	AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4
1326 };
1327 
1328 #define AMDGPU_MAX_VCE_LEVELS 6
1329 
1330 enum amdgpu_vce_level {
1331 	AMDGPU_VCE_LEVEL_AC_ALL = 0,     /* AC, All cases */
1332 	AMDGPU_VCE_LEVEL_DC_EE = 1,      /* DC, entropy encoding */
1333 	AMDGPU_VCE_LEVEL_DC_LL_LOW = 2,  /* DC, low latency queue, res <= 720 */
1334 	AMDGPU_VCE_LEVEL_DC_LL_HIGH = 3, /* DC, low latency queue, 1080 >= res > 720 */
1335 	AMDGPU_VCE_LEVEL_DC_GP_LOW = 4,  /* DC, general purpose queue, res <= 720 */
1336 	AMDGPU_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */
1337 };
1338 
1339 struct amdgpu_ps {
1340 	u32 caps; /* vbios flags */
1341 	u32 class; /* vbios flags */
1342 	u32 class2; /* vbios flags */
1343 	/* UVD clocks */
1344 	u32 vclk;
1345 	u32 dclk;
1346 	/* VCE clocks */
1347 	u32 evclk;
1348 	u32 ecclk;
1349 	bool vce_active;
1350 	enum amdgpu_vce_level vce_level;
1351 	/* asic priv */
1352 	void *ps_priv;
1353 };
1354 
1355 struct amdgpu_dpm_thermal {
1356 	/* thermal interrupt work */
1357 	struct work_struct work;
1358 	/* low temperature threshold */
1359 	int                min_temp;
1360 	/* high temperature threshold */
1361 	int                max_temp;
1362 	/* was last interrupt low to high or high to low */
1363 	bool               high_to_low;
1364 	/* interrupt source */
1365 	struct amdgpu_irq_src	irq;
1366 };
1367 
1368 enum amdgpu_clk_action
1369 {
1370 	AMDGPU_SCLK_UP = 1,
1371 	AMDGPU_SCLK_DOWN
1372 };
1373 
1374 struct amdgpu_blacklist_clocks
1375 {
1376 	u32 sclk;
1377 	u32 mclk;
1378 	enum amdgpu_clk_action action;
1379 };
1380 
1381 struct amdgpu_clock_and_voltage_limits {
1382 	u32 sclk;
1383 	u32 mclk;
1384 	u16 vddc;
1385 	u16 vddci;
1386 };
1387 
1388 struct amdgpu_clock_array {
1389 	u32 count;
1390 	u32 *values;
1391 };
1392 
1393 struct amdgpu_clock_voltage_dependency_entry {
1394 	u32 clk;
1395 	u16 v;
1396 };
1397 
1398 struct amdgpu_clock_voltage_dependency_table {
1399 	u32 count;
1400 	struct amdgpu_clock_voltage_dependency_entry *entries;
1401 };
1402 
1403 union amdgpu_cac_leakage_entry {
1404 	struct {
1405 		u16 vddc;
1406 		u32 leakage;
1407 	};
1408 	struct {
1409 		u16 vddc1;
1410 		u16 vddc2;
1411 		u16 vddc3;
1412 	};
1413 };
1414 
1415 struct amdgpu_cac_leakage_table {
1416 	u32 count;
1417 	union amdgpu_cac_leakage_entry *entries;
1418 };
1419 
1420 struct amdgpu_phase_shedding_limits_entry {
1421 	u16 voltage;
1422 	u32 sclk;
1423 	u32 mclk;
1424 };
1425 
1426 struct amdgpu_phase_shedding_limits_table {
1427 	u32 count;
1428 	struct amdgpu_phase_shedding_limits_entry *entries;
1429 };
1430 
1431 struct amdgpu_uvd_clock_voltage_dependency_entry {
1432 	u32 vclk;
1433 	u32 dclk;
1434 	u16 v;
1435 };
1436 
1437 struct amdgpu_uvd_clock_voltage_dependency_table {
1438 	u8 count;
1439 	struct amdgpu_uvd_clock_voltage_dependency_entry *entries;
1440 };
1441 
1442 struct amdgpu_vce_clock_voltage_dependency_entry {
1443 	u32 ecclk;
1444 	u32 evclk;
1445 	u16 v;
1446 };
1447 
1448 struct amdgpu_vce_clock_voltage_dependency_table {
1449 	u8 count;
1450 	struct amdgpu_vce_clock_voltage_dependency_entry *entries;
1451 };
1452 
1453 struct amdgpu_ppm_table {
1454 	u8 ppm_design;
1455 	u16 cpu_core_number;
1456 	u32 platform_tdp;
1457 	u32 small_ac_platform_tdp;
1458 	u32 platform_tdc;
1459 	u32 small_ac_platform_tdc;
1460 	u32 apu_tdp;
1461 	u32 dgpu_tdp;
1462 	u32 dgpu_ulv_power;
1463 	u32 tj_max;
1464 };
1465 
1466 struct amdgpu_cac_tdp_table {
1467 	u16 tdp;
1468 	u16 configurable_tdp;
1469 	u16 tdc;
1470 	u16 battery_power_limit;
1471 	u16 small_power_limit;
1472 	u16 low_cac_leakage;
1473 	u16 high_cac_leakage;
1474 	u16 maximum_power_delivery_limit;
1475 };
1476 
1477 struct amdgpu_dpm_dynamic_state {
1478 	struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_sclk;
1479 	struct amdgpu_clock_voltage_dependency_table vddci_dependency_on_mclk;
1480 	struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_mclk;
1481 	struct amdgpu_clock_voltage_dependency_table mvdd_dependency_on_mclk;
1482 	struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_dispclk;
1483 	struct amdgpu_uvd_clock_voltage_dependency_table uvd_clock_voltage_dependency_table;
1484 	struct amdgpu_vce_clock_voltage_dependency_table vce_clock_voltage_dependency_table;
1485 	struct amdgpu_clock_voltage_dependency_table samu_clock_voltage_dependency_table;
1486 	struct amdgpu_clock_voltage_dependency_table acp_clock_voltage_dependency_table;
1487 	struct amdgpu_clock_voltage_dependency_table vddgfx_dependency_on_sclk;
1488 	struct amdgpu_clock_array valid_sclk_values;
1489 	struct amdgpu_clock_array valid_mclk_values;
1490 	struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_dc;
1491 	struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_ac;
1492 	u32 mclk_sclk_ratio;
1493 	u32 sclk_mclk_delta;
1494 	u16 vddc_vddci_delta;
1495 	u16 min_vddc_for_pcie_gen2;
1496 	struct amdgpu_cac_leakage_table cac_leakage_table;
1497 	struct amdgpu_phase_shedding_limits_table phase_shedding_limits_table;
1498 	struct amdgpu_ppm_table *ppm_table;
1499 	struct amdgpu_cac_tdp_table *cac_tdp_table;
1500 };
1501 
1502 struct amdgpu_dpm_fan {
1503 	u16 t_min;
1504 	u16 t_med;
1505 	u16 t_high;
1506 	u16 pwm_min;
1507 	u16 pwm_med;
1508 	u16 pwm_high;
1509 	u8 t_hyst;
1510 	u32 cycle_delay;
1511 	u16 t_max;
1512 	u8 control_mode;
1513 	u16 default_max_fan_pwm;
1514 	u16 default_fan_output_sensitivity;
1515 	u16 fan_output_sensitivity;
1516 	bool ucode_fan_control;
1517 };
1518 
1519 enum amdgpu_pcie_gen {
1520 	AMDGPU_PCIE_GEN1 = 0,
1521 	AMDGPU_PCIE_GEN2 = 1,
1522 	AMDGPU_PCIE_GEN3 = 2,
1523 	AMDGPU_PCIE_GEN_INVALID = 0xffff
1524 };
1525 
1526 enum amdgpu_dpm_forced_level {
1527 	AMDGPU_DPM_FORCED_LEVEL_AUTO = 0,
1528 	AMDGPU_DPM_FORCED_LEVEL_LOW = 1,
1529 	AMDGPU_DPM_FORCED_LEVEL_HIGH = 2,
1530 	AMDGPU_DPM_FORCED_LEVEL_MANUAL = 3,
1531 };
1532 
1533 struct amdgpu_vce_state {
1534 	/* vce clocks */
1535 	u32 evclk;
1536 	u32 ecclk;
1537 	/* gpu clocks */
1538 	u32 sclk;
1539 	u32 mclk;
1540 	u8 clk_idx;
1541 	u8 pstate;
1542 };
1543 
1544 struct amdgpu_dpm_funcs {
1545 	int (*get_temperature)(struct amdgpu_device *adev);
1546 	int (*pre_set_power_state)(struct amdgpu_device *adev);
1547 	int (*set_power_state)(struct amdgpu_device *adev);
1548 	void (*post_set_power_state)(struct amdgpu_device *adev);
1549 	void (*display_configuration_changed)(struct amdgpu_device *adev);
1550 	u32 (*get_sclk)(struct amdgpu_device *adev, bool low);
1551 	u32 (*get_mclk)(struct amdgpu_device *adev, bool low);
1552 	void (*print_power_state)(struct amdgpu_device *adev, struct amdgpu_ps *ps);
1553 	void (*debugfs_print_current_performance_level)(struct amdgpu_device *adev, struct seq_file *m);
1554 	int (*force_performance_level)(struct amdgpu_device *adev, enum amdgpu_dpm_forced_level level);
1555 	bool (*vblank_too_short)(struct amdgpu_device *adev);
1556 	void (*powergate_uvd)(struct amdgpu_device *adev, bool gate);
1557 	void (*powergate_vce)(struct amdgpu_device *adev, bool gate);
1558 	void (*enable_bapm)(struct amdgpu_device *adev, bool enable);
1559 	void (*set_fan_control_mode)(struct amdgpu_device *adev, u32 mode);
1560 	u32 (*get_fan_control_mode)(struct amdgpu_device *adev);
1561 	int (*set_fan_speed_percent)(struct amdgpu_device *adev, u32 speed);
1562 	int (*get_fan_speed_percent)(struct amdgpu_device *adev, u32 *speed);
1563 };
1564 
1565 struct amdgpu_dpm {
1566 	struct amdgpu_ps        *ps;
1567 	/* number of valid power states */
1568 	int                     num_ps;
1569 	/* current power state that is active */
1570 	struct amdgpu_ps        *current_ps;
1571 	/* requested power state */
1572 	struct amdgpu_ps        *requested_ps;
1573 	/* boot up power state */
1574 	struct amdgpu_ps        *boot_ps;
1575 	/* default uvd power state */
1576 	struct amdgpu_ps        *uvd_ps;
1577 	/* vce requirements */
1578 	struct amdgpu_vce_state vce_states[AMDGPU_MAX_VCE_LEVELS];
1579 	enum amdgpu_vce_level vce_level;
1580 	enum amd_pm_state_type state;
1581 	enum amd_pm_state_type user_state;
1582 	u32                     platform_caps;
1583 	u32                     voltage_response_time;
1584 	u32                     backbias_response_time;
1585 	void                    *priv;
1586 	u32			new_active_crtcs;
1587 	int			new_active_crtc_count;
1588 	u32			current_active_crtcs;
1589 	int			current_active_crtc_count;
1590 	struct amdgpu_dpm_dynamic_state dyn_state;
1591 	struct amdgpu_dpm_fan fan;
1592 	u32 tdp_limit;
1593 	u32 near_tdp_limit;
1594 	u32 near_tdp_limit_adjusted;
1595 	u32 sq_ramping_threshold;
1596 	u32 cac_leakage;
1597 	u16 tdp_od_limit;
1598 	u32 tdp_adjustment;
1599 	u16 load_line_slope;
1600 	bool power_control;
1601 	bool ac_power;
1602 	/* special states active */
1603 	bool                    thermal_active;
1604 	bool                    uvd_active;
1605 	bool                    vce_active;
1606 	/* thermal handling */
1607 	struct amdgpu_dpm_thermal thermal;
1608 	/* forced levels */
1609 	enum amdgpu_dpm_forced_level forced_level;
1610 };
1611 
1612 struct amdgpu_pm {
1613 	struct mutex		mutex;
1614 	u32                     current_sclk;
1615 	u32                     current_mclk;
1616 	u32                     default_sclk;
1617 	u32                     default_mclk;
1618 	struct amdgpu_i2c_chan *i2c_bus;
1619 	/* internal thermal controller on rv6xx+ */
1620 	enum amdgpu_int_thermal_type int_thermal_type;
1621 	struct device	        *int_hwmon_dev;
1622 	/* fan control parameters */
1623 	bool                    no_fan;
1624 	u8                      fan_pulses_per_revolution;
1625 	u8                      fan_min_rpm;
1626 	u8                      fan_max_rpm;
1627 	/* dpm */
1628 	bool                    dpm_enabled;
1629 	bool                    sysfs_initialized;
1630 	struct amdgpu_dpm       dpm;
1631 	const struct firmware	*fw;	/* SMC firmware */
1632 	uint32_t                fw_version;
1633 	const struct amdgpu_dpm_funcs *funcs;
1634 	uint32_t                pcie_gen_mask;
1635 	uint32_t                pcie_mlw_mask;
1636 	struct amd_pp_display_configuration pm_display_cfg;/* set by DAL */
1637 };
1638 
1639 void amdgpu_get_pcie_info(struct amdgpu_device *adev);
1640 
1641 /*
1642  * UVD
1643  */
1644 #define AMDGPU_DEFAULT_UVD_HANDLES	10
1645 #define AMDGPU_MAX_UVD_HANDLES		40
1646 #define AMDGPU_UVD_STACK_SIZE		(200*1024)
1647 #define AMDGPU_UVD_HEAP_SIZE		(256*1024)
1648 #define AMDGPU_UVD_SESSION_SIZE		(50*1024)
1649 #define AMDGPU_UVD_FIRMWARE_OFFSET	256
1650 
1651 struct amdgpu_uvd {
1652 	struct amdgpu_bo	*vcpu_bo;
1653 	void			*cpu_addr;
1654 	uint64_t		gpu_addr;
1655 	unsigned		fw_version;
1656 	void			*saved_bo;
1657 	unsigned		max_handles;
1658 	atomic_t		handles[AMDGPU_MAX_UVD_HANDLES];
1659 	struct drm_file		*filp[AMDGPU_MAX_UVD_HANDLES];
1660 	struct delayed_work	idle_work;
1661 	const struct firmware	*fw;	/* UVD firmware */
1662 	struct amdgpu_ring	ring;
1663 	struct amdgpu_irq_src	irq;
1664 	bool			address_64_bit;
1665 	struct amd_sched_entity entity;
1666 };
1667 
1668 /*
1669  * VCE
1670  */
1671 #define AMDGPU_MAX_VCE_HANDLES	16
1672 #define AMDGPU_VCE_FIRMWARE_OFFSET 256
1673 
1674 #define AMDGPU_VCE_HARVEST_VCE0 (1 << 0)
1675 #define AMDGPU_VCE_HARVEST_VCE1 (1 << 1)
1676 
1677 struct amdgpu_vce {
1678 	struct amdgpu_bo	*vcpu_bo;
1679 	uint64_t		gpu_addr;
1680 	unsigned		fw_version;
1681 	unsigned		fb_version;
1682 	atomic_t		handles[AMDGPU_MAX_VCE_HANDLES];
1683 	struct drm_file		*filp[AMDGPU_MAX_VCE_HANDLES];
1684 	uint32_t		img_size[AMDGPU_MAX_VCE_HANDLES];
1685 	struct delayed_work	idle_work;
1686 	const struct firmware	*fw;	/* VCE firmware */
1687 	struct amdgpu_ring	ring[AMDGPU_MAX_VCE_RINGS];
1688 	struct amdgpu_irq_src	irq;
1689 	unsigned		harvest_config;
1690 	struct amd_sched_entity	entity;
1691 };
1692 
1693 /*
1694  * SDMA
1695  */
1696 struct amdgpu_sdma_instance {
1697 	/* SDMA firmware */
1698 	const struct firmware	*fw;
1699 	uint32_t		fw_version;
1700 	uint32_t		feature_version;
1701 
1702 	struct amdgpu_ring	ring;
1703 	bool			burst_nop;
1704 };
1705 
1706 struct amdgpu_sdma {
1707 	struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
1708 	struct amdgpu_irq_src	trap_irq;
1709 	struct amdgpu_irq_src	illegal_inst_irq;
1710 	int			num_instances;
1711 };
1712 
1713 /*
1714  * Firmware
1715  */
1716 struct amdgpu_firmware {
1717 	struct amdgpu_firmware_info ucode[AMDGPU_UCODE_ID_MAXIMUM];
1718 	bool smu_load;
1719 	struct amdgpu_bo *fw_buf;
1720 	unsigned int fw_size;
1721 };
1722 
1723 /*
1724  * Benchmarking
1725  */
1726 void amdgpu_benchmark(struct amdgpu_device *adev, int test_number);
1727 
1728 
1729 /*
1730  * Testing
1731  */
1732 void amdgpu_test_moves(struct amdgpu_device *adev);
1733 void amdgpu_test_ring_sync(struct amdgpu_device *adev,
1734 			   struct amdgpu_ring *cpA,
1735 			   struct amdgpu_ring *cpB);
1736 void amdgpu_test_syncing(struct amdgpu_device *adev);
1737 
1738 /*
1739  * MMU Notifier
1740  */
1741 #if defined(CONFIG_MMU_NOTIFIER)
1742 int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr);
1743 void amdgpu_mn_unregister(struct amdgpu_bo *bo);
1744 #else
1745 static inline int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
1746 {
1747 	return -ENODEV;
1748 }
1749 static inline void amdgpu_mn_unregister(struct amdgpu_bo *bo) {}
1750 #endif
1751 
1752 /*
1753  * Debugfs
1754  */
1755 struct amdgpu_debugfs {
1756 	const struct drm_info_list	*files;
1757 	unsigned		num_files;
1758 };
1759 
1760 int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
1761 			     const struct drm_info_list *files,
1762 			     unsigned nfiles);
1763 int amdgpu_debugfs_fence_init(struct amdgpu_device *adev);
1764 
1765 #if defined(CONFIG_DEBUG_FS)
1766 int amdgpu_debugfs_init(struct drm_minor *minor);
1767 void amdgpu_debugfs_cleanup(struct drm_minor *minor);
1768 #endif
1769 
1770 /*
1771  * amdgpu smumgr functions
1772  */
1773 struct amdgpu_smumgr_funcs {
1774 	int (*check_fw_load_finish)(struct amdgpu_device *adev, uint32_t fwtype);
1775 	int (*request_smu_load_fw)(struct amdgpu_device *adev);
1776 	int (*request_smu_specific_fw)(struct amdgpu_device *adev, uint32_t fwtype);
1777 };
1778 
1779 /*
1780  * amdgpu smumgr
1781  */
1782 struct amdgpu_smumgr {
1783 	struct amdgpu_bo *toc_buf;
1784 	struct amdgpu_bo *smu_buf;
1785 	/* asic priv smu data */
1786 	void *priv;
1787 	spinlock_t smu_lock;
1788 	/* smumgr functions */
1789 	const struct amdgpu_smumgr_funcs *smumgr_funcs;
1790 	/* ucode loading complete flag */
1791 	uint32_t fw_flags;
1792 };
1793 
1794 /*
1795  * ASIC specific register table accessible by UMD
1796  */
1797 struct amdgpu_allowed_register_entry {
1798 	uint32_t reg_offset;
1799 	bool untouched;
1800 	bool grbm_indexed;
1801 };
1802 
1803 /*
1804  * ASIC specific functions.
1805  */
1806 struct amdgpu_asic_funcs {
1807 	bool (*read_disabled_bios)(struct amdgpu_device *adev);
1808 	bool (*read_bios_from_rom)(struct amdgpu_device *adev,
1809 				   u8 *bios, u32 length_bytes);
1810 	int (*read_register)(struct amdgpu_device *adev, u32 se_num,
1811 			     u32 sh_num, u32 reg_offset, u32 *value);
1812 	void (*set_vga_state)(struct amdgpu_device *adev, bool state);
1813 	int (*reset)(struct amdgpu_device *adev);
1814 	/* wait for mc_idle */
1815 	int (*wait_for_mc_idle)(struct amdgpu_device *adev);
1816 	/* get the reference clock */
1817 	u32 (*get_xclk)(struct amdgpu_device *adev);
1818 	/* get the gpu clock counter */
1819 	uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev);
1820 	/* MM block clocks */
1821 	int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk);
1822 	int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk);
1823 	/* query virtual capabilities */
1824 	u32 (*get_virtual_caps)(struct amdgpu_device *adev);
1825 };
1826 
1827 /*
1828  * IOCTL.
1829  */
1830 int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
1831 			    struct drm_file *filp);
1832 int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
1833 				struct drm_file *filp);
1834 
1835 int amdgpu_gem_info_ioctl(struct drm_device *dev, void *data,
1836 			  struct drm_file *filp);
1837 int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
1838 			struct drm_file *filp);
1839 int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
1840 			  struct drm_file *filp);
1841 int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
1842 			      struct drm_file *filp);
1843 int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
1844 			  struct drm_file *filp);
1845 int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
1846 			struct drm_file *filp);
1847 int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
1848 int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
1849 
1850 int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
1851 				struct drm_file *filp);
1852 
1853 /* VRAM scratch page for HDP bug, default vram page */
1854 struct amdgpu_vram_scratch {
1855 	struct amdgpu_bo		*robj;
1856 	volatile uint32_t		*ptr;
1857 	u64				gpu_addr;
1858 };
1859 
1860 /*
1861  * ACPI
1862  */
1863 struct amdgpu_atif_notification_cfg {
1864 	bool enabled;
1865 	int command_code;
1866 };
1867 
1868 struct amdgpu_atif_notifications {
1869 	bool display_switch;
1870 	bool expansion_mode_change;
1871 	bool thermal_state;
1872 	bool forced_power_state;
1873 	bool system_power_state;
1874 	bool display_conf_change;
1875 	bool px_gfx_switch;
1876 	bool brightness_change;
1877 	bool dgpu_display_event;
1878 };
1879 
1880 struct amdgpu_atif_functions {
1881 	bool system_params;
1882 	bool sbios_requests;
1883 	bool select_active_disp;
1884 	bool lid_state;
1885 	bool get_tv_standard;
1886 	bool set_tv_standard;
1887 	bool get_panel_expansion_mode;
1888 	bool set_panel_expansion_mode;
1889 	bool temperature_change;
1890 	bool graphics_device_types;
1891 };
1892 
1893 struct amdgpu_atif {
1894 	struct amdgpu_atif_notifications notifications;
1895 	struct amdgpu_atif_functions functions;
1896 	struct amdgpu_atif_notification_cfg notification_cfg;
1897 	struct amdgpu_encoder *encoder_for_bl;
1898 };
1899 
1900 struct amdgpu_atcs_functions {
1901 	bool get_ext_state;
1902 	bool pcie_perf_req;
1903 	bool pcie_dev_rdy;
1904 	bool pcie_bus_width;
1905 };
1906 
1907 struct amdgpu_atcs {
1908 	struct amdgpu_atcs_functions functions;
1909 };
1910 
1911 /*
1912  * CGS
1913  */
1914 struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev);
1915 void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device);
1916 
1917 
1918 /* GPU virtualization */
1919 #define AMDGPU_VIRT_CAPS_SRIOV_EN       (1 << 0)
1920 #define AMDGPU_VIRT_CAPS_IS_VF          (1 << 1)
1921 struct amdgpu_virtualization {
1922 	bool supports_sr_iov;
1923 	bool is_virtual;
1924 	u32 caps;
1925 };
1926 
1927 /*
1928  * Core structure, functions and helpers.
1929  */
1930 typedef uint32_t (*amdgpu_rreg_t)(struct amdgpu_device*, uint32_t);
1931 typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
1932 
1933 typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
1934 typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
1935 
1936 struct amdgpu_ip_block_status {
1937 	bool valid;
1938 	bool sw;
1939 	bool hw;
1940 };
1941 
1942 struct amdgpu_device {
1943 	struct device			*dev;
1944 	struct drm_device		*ddev;
1945 	struct pci_dev			*pdev;
1946 
1947 #ifdef CONFIG_DRM_AMD_ACP
1948 	struct amdgpu_acp		acp;
1949 #endif
1950 
1951 	/* ASIC */
1952 	enum amd_asic_type		asic_type;
1953 	uint32_t			family;
1954 	uint32_t			rev_id;
1955 	uint32_t			external_rev_id;
1956 	unsigned long			flags;
1957 	int				usec_timeout;
1958 	const struct amdgpu_asic_funcs	*asic_funcs;
1959 	bool				shutdown;
1960 	bool				need_dma32;
1961 	bool				accel_working;
1962 	struct work_struct		reset_work;
1963 	struct notifier_block		acpi_nb;
1964 	struct amdgpu_i2c_chan		*i2c_bus[AMDGPU_MAX_I2C_BUS];
1965 	struct amdgpu_debugfs		debugfs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
1966 	unsigned			debugfs_count;
1967 #if defined(CONFIG_DEBUG_FS)
1968 	struct dentry			*debugfs_regs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
1969 #endif
1970 	struct amdgpu_atif		atif;
1971 	struct amdgpu_atcs		atcs;
1972 	struct mutex			srbm_mutex;
1973 	/* GRBM index mutex. Protects concurrent access to GRBM index */
1974 	struct mutex                    grbm_idx_mutex;
1975 	struct dev_pm_domain		vga_pm_domain;
1976 	bool				have_disp_power_ref;
1977 
1978 	/* BIOS */
1979 	uint8_t				*bios;
1980 	bool				is_atom_bios;
1981 	struct amdgpu_bo		*stollen_vga_memory;
1982 	uint32_t			bios_scratch[AMDGPU_BIOS_NUM_SCRATCH];
1983 
1984 	/* Register/doorbell mmio */
1985 	resource_size_t			rmmio_base;
1986 	resource_size_t			rmmio_size;
1987 	void __iomem			*rmmio;
1988 	/* protects concurrent MM_INDEX/DATA based register access */
1989 	spinlock_t mmio_idx_lock;
1990 	/* protects concurrent SMC based register access */
1991 	spinlock_t smc_idx_lock;
1992 	amdgpu_rreg_t			smc_rreg;
1993 	amdgpu_wreg_t			smc_wreg;
1994 	/* protects concurrent PCIE register access */
1995 	spinlock_t pcie_idx_lock;
1996 	amdgpu_rreg_t			pcie_rreg;
1997 	amdgpu_wreg_t			pcie_wreg;
1998 	/* protects concurrent UVD register access */
1999 	spinlock_t uvd_ctx_idx_lock;
2000 	amdgpu_rreg_t			uvd_ctx_rreg;
2001 	amdgpu_wreg_t			uvd_ctx_wreg;
2002 	/* protects concurrent DIDT register access */
2003 	spinlock_t didt_idx_lock;
2004 	amdgpu_rreg_t			didt_rreg;
2005 	amdgpu_wreg_t			didt_wreg;
2006 	/* protects concurrent ENDPOINT (audio) register access */
2007 	spinlock_t audio_endpt_idx_lock;
2008 	amdgpu_block_rreg_t		audio_endpt_rreg;
2009 	amdgpu_block_wreg_t		audio_endpt_wreg;
2010 	void __iomem                    *rio_mem;
2011 	resource_size_t			rio_mem_size;
2012 	struct amdgpu_doorbell		doorbell;
2013 
2014 	/* clock/pll info */
2015 	struct amdgpu_clock            clock;
2016 
2017 	/* MC */
2018 	struct amdgpu_mc		mc;
2019 	struct amdgpu_gart		gart;
2020 	struct amdgpu_dummy_page	dummy_page;
2021 	struct amdgpu_vm_manager	vm_manager;
2022 
2023 	/* memory management */
2024 	struct amdgpu_mman		mman;
2025 	struct amdgpu_vram_scratch	vram_scratch;
2026 	struct amdgpu_wb		wb;
2027 	atomic64_t			vram_usage;
2028 	atomic64_t			vram_vis_usage;
2029 	atomic64_t			gtt_usage;
2030 	atomic64_t			num_bytes_moved;
2031 	atomic_t			gpu_reset_counter;
2032 
2033 	/* display */
2034 	struct amdgpu_mode_info		mode_info;
2035 	struct work_struct		hotplug_work;
2036 	struct amdgpu_irq_src		crtc_irq;
2037 	struct amdgpu_irq_src		pageflip_irq;
2038 	struct amdgpu_irq_src		hpd_irq;
2039 
2040 	/* rings */
2041 	unsigned			fence_context;
2042 	unsigned			num_rings;
2043 	struct amdgpu_ring		*rings[AMDGPU_MAX_RINGS];
2044 	bool				ib_pool_ready;
2045 	struct amdgpu_sa_manager	ring_tmp_bo;
2046 
2047 	/* interrupts */
2048 	struct amdgpu_irq		irq;
2049 
2050 	/* powerplay */
2051 	struct amd_powerplay		powerplay;
2052 	bool				pp_enabled;
2053 	bool				pp_force_state_enabled;
2054 
2055 	/* dpm */
2056 	struct amdgpu_pm		pm;
2057 	u32				cg_flags;
2058 	u32				pg_flags;
2059 
2060 	/* amdgpu smumgr */
2061 	struct amdgpu_smumgr smu;
2062 
2063 	/* gfx */
2064 	struct amdgpu_gfx		gfx;
2065 
2066 	/* sdma */
2067 	struct amdgpu_sdma		sdma;
2068 
2069 	/* uvd */
2070 	struct amdgpu_uvd		uvd;
2071 
2072 	/* vce */
2073 	struct amdgpu_vce		vce;
2074 
2075 	/* firmwares */
2076 	struct amdgpu_firmware		firmware;
2077 
2078 	/* GDS */
2079 	struct amdgpu_gds		gds;
2080 
2081 	const struct amdgpu_ip_block_version *ip_blocks;
2082 	int				num_ip_blocks;
2083 	struct amdgpu_ip_block_status	*ip_block_status;
2084 	struct mutex	mn_lock;
2085 	DECLARE_HASHTABLE(mn_hash, 7);
2086 
2087 	/* tracking pinned memory */
2088 	u64 vram_pin_size;
2089 	u64 invisible_pin_size;
2090 	u64 gart_pin_size;
2091 
2092 	/* amdkfd interface */
2093 	struct kfd_dev          *kfd;
2094 
2095 	struct amdgpu_virtualization virtualization;
2096 };
2097 
2098 bool amdgpu_device_is_px(struct drm_device *dev);
2099 int amdgpu_device_init(struct amdgpu_device *adev,
2100 		       struct drm_device *ddev,
2101 		       struct pci_dev *pdev,
2102 		       uint32_t flags);
2103 void amdgpu_device_fini(struct amdgpu_device *adev);
2104 int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev);
2105 
2106 uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
2107 			bool always_indirect);
2108 void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
2109 		    bool always_indirect);
2110 u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg);
2111 void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v);
2112 
2113 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index);
2114 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v);
2115 
2116 /*
2117  * Registers read & write functions.
2118  */
2119 #define RREG32(reg) amdgpu_mm_rreg(adev, (reg), false)
2120 #define RREG32_IDX(reg) amdgpu_mm_rreg(adev, (reg), true)
2121 #define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_mm_rreg(adev, (reg), false))
2122 #define WREG32(reg, v) amdgpu_mm_wreg(adev, (reg), (v), false)
2123 #define WREG32_IDX(reg, v) amdgpu_mm_wreg(adev, (reg), (v), true)
2124 #define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
2125 #define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
2126 #define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg))
2127 #define WREG32_PCIE(reg, v) adev->pcie_wreg(adev, (reg), (v))
2128 #define RREG32_SMC(reg) adev->smc_rreg(adev, (reg))
2129 #define WREG32_SMC(reg, v) adev->smc_wreg(adev, (reg), (v))
2130 #define RREG32_UVD_CTX(reg) adev->uvd_ctx_rreg(adev, (reg))
2131 #define WREG32_UVD_CTX(reg, v) adev->uvd_ctx_wreg(adev, (reg), (v))
2132 #define RREG32_DIDT(reg) adev->didt_rreg(adev, (reg))
2133 #define WREG32_DIDT(reg, v) adev->didt_wreg(adev, (reg), (v))
2134 #define RREG32_AUDIO_ENDPT(block, reg) adev->audio_endpt_rreg(adev, (block), (reg))
2135 #define WREG32_AUDIO_ENDPT(block, reg, v) adev->audio_endpt_wreg(adev, (block), (reg), (v))
2136 #define WREG32_P(reg, val, mask)				\
2137 	do {							\
2138 		uint32_t tmp_ = RREG32(reg);			\
2139 		tmp_ &= (mask);					\
2140 		tmp_ |= ((val) & ~(mask));			\
2141 		WREG32(reg, tmp_);				\
2142 	} while (0)
2143 #define WREG32_AND(reg, and) WREG32_P(reg, 0, and)
2144 #define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or))
2145 #define WREG32_PLL_P(reg, val, mask)				\
2146 	do {							\
2147 		uint32_t tmp_ = RREG32_PLL(reg);		\
2148 		tmp_ &= (mask);					\
2149 		tmp_ |= ((val) & ~(mask));			\
2150 		WREG32_PLL(reg, tmp_);				\
2151 	} while (0)
2152 #define DREG32_SYS(sqf, adev, reg) seq_printf((sqf), #reg " : 0x%08X\n", amdgpu_mm_rreg((adev), (reg), false))
2153 #define RREG32_IO(reg) amdgpu_io_rreg(adev, (reg))
2154 #define WREG32_IO(reg, v) amdgpu_io_wreg(adev, (reg), (v))
2155 
2156 #define RDOORBELL32(index) amdgpu_mm_rdoorbell(adev, (index))
2157 #define WDOORBELL32(index, v) amdgpu_mm_wdoorbell(adev, (index), (v))
2158 
2159 #define REG_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
2160 #define REG_FIELD_MASK(reg, field) reg##__##field##_MASK
2161 
2162 #define REG_SET_FIELD(orig_val, reg, field, field_val)			\
2163 	(((orig_val) & ~REG_FIELD_MASK(reg, field)) |			\
2164 	 (REG_FIELD_MASK(reg, field) & ((field_val) << REG_FIELD_SHIFT(reg, field))))
2165 
2166 #define REG_GET_FIELD(value, reg, field)				\
2167 	(((value) & REG_FIELD_MASK(reg, field)) >> REG_FIELD_SHIFT(reg, field))
2168 
2169 /*
2170  * BIOS helpers.
2171  */
2172 #define RBIOS8(i) (adev->bios[i])
2173 #define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8))
2174 #define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16))
2175 
2176 /*
2177  * RING helpers.
2178  */
2179 static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)
2180 {
2181 	if (ring->count_dw <= 0)
2182 		DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n");
2183 	ring->ring[ring->wptr++] = v;
2184 	ring->wptr &= ring->ptr_mask;
2185 	ring->count_dw--;
2186 }
2187 
2188 static inline struct amdgpu_sdma_instance *
2189 amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
2190 {
2191 	struct amdgpu_device *adev = ring->adev;
2192 	int i;
2193 
2194 	for (i = 0; i < adev->sdma.num_instances; i++)
2195 		if (&adev->sdma.instance[i].ring == ring)
2196 			break;
2197 
2198 	if (i < AMDGPU_MAX_SDMA_INSTANCES)
2199 		return &adev->sdma.instance[i];
2200 	else
2201 		return NULL;
2202 }
2203 
2204 /*
2205  * ASICs macro.
2206  */
2207 #define amdgpu_asic_set_vga_state(adev, state) (adev)->asic_funcs->set_vga_state((adev), (state))
2208 #define amdgpu_asic_reset(adev) (adev)->asic_funcs->reset((adev))
2209 #define amdgpu_asic_wait_for_mc_idle(adev) (adev)->asic_funcs->wait_for_mc_idle((adev))
2210 #define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev))
2211 #define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d))
2212 #define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec))
2213 #define amdgpu_asic_get_virtual_caps(adev) ((adev)->asic_funcs->get_virtual_caps((adev)))
2214 #define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev))
2215 #define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev))
2216 #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
2217 #define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
2218 #define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid))
2219 #define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
2220 #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
2221 #define amdgpu_vm_write_pte(adev, ib, pa, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pa), (pe), (addr), (count), (incr), (flags)))
2222 #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
2223 #define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
2224 #define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
2225 #define amdgpu_ring_test_ib(r) (r)->funcs->test_ib((r))
2226 #define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
2227 #define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
2228 #define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
2229 #define amdgpu_ring_emit_ib(r, ib, vm_id, c) (r)->funcs->emit_ib((r), (ib), (vm_id), (c))
2230 #define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r))
2231 #define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr))
2232 #define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags))
2233 #define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as))
2234 #define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r))
2235 #define amdgpu_ring_emit_hdp_invalidate(r) (r)->funcs->emit_hdp_invalidate((r))
2236 #define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
2237 #define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
2238 #define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
2239 #define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev))
2240 #define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
2241 #define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
2242 #define amdgpu_display_set_vga_render_state(adev, r) (adev)->mode_info.funcs->set_vga_render_state((adev), (r))
2243 #define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc))
2244 #define amdgpu_display_vblank_wait(adev, crtc) (adev)->mode_info.funcs->vblank_wait((adev), (crtc))
2245 #define amdgpu_display_is_display_hung(adev) (adev)->mode_info.funcs->is_display_hung((adev))
2246 #define amdgpu_display_backlight_set_level(adev, e, l) (adev)->mode_info.funcs->backlight_set_level((e), (l))
2247 #define amdgpu_display_backlight_get_level(adev, e) (adev)->mode_info.funcs->backlight_get_level((e))
2248 #define amdgpu_display_hpd_sense(adev, h) (adev)->mode_info.funcs->hpd_sense((adev), (h))
2249 #define amdgpu_display_hpd_set_polarity(adev, h) (adev)->mode_info.funcs->hpd_set_polarity((adev), (h))
2250 #define amdgpu_display_hpd_get_gpio_reg(adev) (adev)->mode_info.funcs->hpd_get_gpio_reg((adev))
2251 #define amdgpu_display_bandwidth_update(adev) (adev)->mode_info.funcs->bandwidth_update((adev))
2252 #define amdgpu_display_page_flip(adev, crtc, base, async) (adev)->mode_info.funcs->page_flip((adev), (crtc), (base), (async))
2253 #define amdgpu_display_page_flip_get_scanoutpos(adev, crtc, vbl, pos) (adev)->mode_info.funcs->page_flip_get_scanoutpos((adev), (crtc), (vbl), (pos))
2254 #define amdgpu_display_add_encoder(adev, e, s, c) (adev)->mode_info.funcs->add_encoder((adev), (e), (s), (c))
2255 #define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r))
2256 #define amdgpu_display_stop_mc_access(adev, s) (adev)->mode_info.funcs->stop_mc_access((adev), (s))
2257 #define amdgpu_display_resume_mc_access(adev, s) (adev)->mode_info.funcs->resume_mc_access((adev), (s))
2258 #define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib),  (s), (d), (b))
2259 #define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b))
2260 #define amdgpu_dpm_pre_set_power_state(adev) (adev)->pm.funcs->pre_set_power_state((adev))
2261 #define amdgpu_dpm_set_power_state(adev) (adev)->pm.funcs->set_power_state((adev))
2262 #define amdgpu_dpm_post_set_power_state(adev) (adev)->pm.funcs->post_set_power_state((adev))
2263 #define amdgpu_dpm_display_configuration_changed(adev) (adev)->pm.funcs->display_configuration_changed((adev))
2264 #define amdgpu_dpm_print_power_state(adev, ps) (adev)->pm.funcs->print_power_state((adev), (ps))
2265 #define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev))
2266 #define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e))
2267 
2268 #define amdgpu_dpm_get_temperature(adev) \
2269 	((adev)->pp_enabled ?						\
2270 	      (adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle) : \
2271 	      (adev)->pm.funcs->get_temperature((adev)))
2272 
2273 #define amdgpu_dpm_set_fan_control_mode(adev, m) \
2274 	((adev)->pp_enabled ?						\
2275 	      (adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)) : \
2276 	      (adev)->pm.funcs->set_fan_control_mode((adev), (m)))
2277 
2278 #define amdgpu_dpm_get_fan_control_mode(adev) \
2279 	((adev)->pp_enabled ?						\
2280 	      (adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle) : \
2281 	      (adev)->pm.funcs->get_fan_control_mode((adev)))
2282 
2283 #define amdgpu_dpm_set_fan_speed_percent(adev, s) \
2284 	((adev)->pp_enabled ?						\
2285 	      (adev)->powerplay.pp_funcs->set_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
2286 	      (adev)->pm.funcs->set_fan_speed_percent((adev), (s)))
2287 
2288 #define amdgpu_dpm_get_fan_speed_percent(adev, s) \
2289 	((adev)->pp_enabled ?						\
2290 	      (adev)->powerplay.pp_funcs->get_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
2291 	      (adev)->pm.funcs->get_fan_speed_percent((adev), (s)))
2292 
2293 #define amdgpu_dpm_get_sclk(adev, l) \
2294 	((adev)->pp_enabled ?						\
2295 	      (adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (l)) : \
2296 		(adev)->pm.funcs->get_sclk((adev), (l)))
2297 
2298 #define amdgpu_dpm_get_mclk(adev, l)  \
2299 	((adev)->pp_enabled ?						\
2300 	      (adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (l)) : \
2301 	      (adev)->pm.funcs->get_mclk((adev), (l)))
2302 
2303 
2304 #define amdgpu_dpm_force_performance_level(adev, l) \
2305 	((adev)->pp_enabled ?						\
2306 	      (adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)) : \
2307 	      (adev)->pm.funcs->force_performance_level((adev), (l)))
2308 
2309 #define amdgpu_dpm_powergate_uvd(adev, g) \
2310 	((adev)->pp_enabled ?						\
2311 	      (adev)->powerplay.pp_funcs->powergate_uvd((adev)->powerplay.pp_handle, (g)) : \
2312 	      (adev)->pm.funcs->powergate_uvd((adev), (g)))
2313 
2314 #define amdgpu_dpm_powergate_vce(adev, g) \
2315 	((adev)->pp_enabled ?						\
2316 	      (adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)) : \
2317 	      (adev)->pm.funcs->powergate_vce((adev), (g)))
2318 
2319 #define amdgpu_dpm_debugfs_print_current_performance_level(adev, m) \
2320 	((adev)->pp_enabled ?						\
2321 	      (adev)->powerplay.pp_funcs->print_current_performance_level((adev)->powerplay.pp_handle, (m)) : \
2322 	      (adev)->pm.funcs->debugfs_print_current_performance_level((adev), (m)))
2323 
2324 #define amdgpu_dpm_get_current_power_state(adev) \
2325 	(adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle)
2326 
2327 #define amdgpu_dpm_get_performance_level(adev) \
2328 	(adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle)
2329 
2330 #define amdgpu_dpm_get_pp_num_states(adev, data) \
2331 	(adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data)
2332 
2333 #define amdgpu_dpm_get_pp_table(adev, table) \
2334 	(adev)->powerplay.pp_funcs->get_pp_table((adev)->powerplay.pp_handle, table)
2335 
2336 #define amdgpu_dpm_set_pp_table(adev, buf, size) \
2337 	(adev)->powerplay.pp_funcs->set_pp_table((adev)->powerplay.pp_handle, buf, size)
2338 
2339 #define amdgpu_dpm_print_clock_levels(adev, type, buf) \
2340 	(adev)->powerplay.pp_funcs->print_clock_levels((adev)->powerplay.pp_handle, type, buf)
2341 
2342 #define amdgpu_dpm_force_clock_level(adev, type, level) \
2343 		(adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level)
2344 
2345 #define amdgpu_dpm_dispatch_task(adev, event_id, input, output)		\
2346 	(adev)->powerplay.pp_funcs->dispatch_tasks((adev)->powerplay.pp_handle, (event_id), (input), (output))
2347 
2348 #define amdgpu_gds_switch(adev, r, v, d, w, a) (adev)->gds.funcs->patch_gds_switch((r), (v), (d), (w), (a))
2349 
2350 /* Common functions */
2351 int amdgpu_gpu_reset(struct amdgpu_device *adev);
2352 void amdgpu_pci_config_reset(struct amdgpu_device *adev);
2353 bool amdgpu_card_posted(struct amdgpu_device *adev);
2354 void amdgpu_update_display_priority(struct amdgpu_device *adev);
2355 
2356 int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data);
2357 int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
2358 		       u32 ip_instance, u32 ring,
2359 		       struct amdgpu_ring **out_ring);
2360 void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain);
2361 bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
2362 int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages);
2363 int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
2364 				     uint32_t flags);
2365 bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
2366 struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm);
2367 bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
2368 				  unsigned long end);
2369 bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
2370 				       int *last_invalidated);
2371 bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
2372 uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
2373 				 struct ttm_mem_reg *mem);
2374 void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base);
2375 void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
2376 void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size);
2377 void amdgpu_program_register_sequence(struct amdgpu_device *adev,
2378 					     const u32 *registers,
2379 					     const u32 array_size);
2380 
2381 bool amdgpu_device_is_px(struct drm_device *dev);
2382 /* atpx handler */
2383 #if defined(CONFIG_VGA_SWITCHEROO)
2384 void amdgpu_register_atpx_handler(void);
2385 void amdgpu_unregister_atpx_handler(void);
2386 #else
2387 static inline void amdgpu_register_atpx_handler(void) {}
2388 static inline void amdgpu_unregister_atpx_handler(void) {}
2389 #endif
2390 
2391 /*
2392  * KMS
2393  */
2394 extern const struct drm_ioctl_desc amdgpu_ioctls_kms[];
2395 extern const int amdgpu_max_kms_ioctl;
2396 
2397 int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags);
2398 int amdgpu_driver_unload_kms(struct drm_device *dev);
2399 void amdgpu_driver_lastclose_kms(struct drm_device *dev);
2400 int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv);
2401 void amdgpu_driver_postclose_kms(struct drm_device *dev,
2402 				 struct drm_file *file_priv);
2403 void amdgpu_driver_preclose_kms(struct drm_device *dev,
2404 				struct drm_file *file_priv);
2405 int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon);
2406 int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
2407 u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe);
2408 int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe);
2409 void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe);
2410 int amdgpu_get_vblank_timestamp_kms(struct drm_device *dev, unsigned int pipe,
2411 				    int *max_error,
2412 				    struct timeval *vblank_time,
2413 				    unsigned flags);
2414 long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd,
2415 			     unsigned long arg);
2416 
2417 /*
2418  * functions used by amdgpu_encoder.c
2419  */
2420 struct amdgpu_afmt_acr {
2421 	u32 clock;
2422 
2423 	int n_32khz;
2424 	int cts_32khz;
2425 
2426 	int n_44_1khz;
2427 	int cts_44_1khz;
2428 
2429 	int n_48khz;
2430 	int cts_48khz;
2431 
2432 };
2433 
2434 struct amdgpu_afmt_acr amdgpu_afmt_acr(uint32_t clock);
2435 
2436 /* amdgpu_acpi.c */
2437 #if defined(CONFIG_ACPI)
2438 int amdgpu_acpi_init(struct amdgpu_device *adev);
2439 void amdgpu_acpi_fini(struct amdgpu_device *adev);
2440 bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *adev);
2441 int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
2442 						u8 perf_req, bool advertise);
2443 int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
2444 #else
2445 static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
2446 static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
2447 #endif
2448 
2449 struct amdgpu_bo_va_mapping *
2450 amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
2451 		       uint64_t addr, struct amdgpu_bo **bo);
2452 
2453 #include "amdgpu_object.h"
2454 #endif
2455