xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h (revision a6ca5ac746d104019e76c29e69c2a1fc6dd2b29f)
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Christian König
23  */
24 #ifndef __AMDGPU_VM_H__
25 #define __AMDGPU_VM_H__
26 
27 #include <linux/rbtree.h>
28 
29 #include "gpu_scheduler.h"
30 #include "amdgpu_sync.h"
31 #include "amdgpu_ring.h"
32 
33 struct amdgpu_bo_va;
34 struct amdgpu_job;
35 struct amdgpu_bo_list_entry;
36 
37 /*
38  * GPUVM handling
39  */
40 
41 /* maximum number of VMIDs */
42 #define AMDGPU_NUM_VM	16
43 
44 /* Maximum number of PTEs the hardware can write with one command */
45 #define AMDGPU_VM_MAX_UPDATE_SIZE	0x3FFFF
46 
47 /* number of entries in page table */
48 #define AMDGPU_VM_PTE_COUNT(adev) (1 << (adev)->vm_manager.block_size)
49 
50 /* PTBs (Page Table Blocks) need to be aligned to 32K */
51 #define AMDGPU_VM_PTB_ALIGN_SIZE   32768
52 
53 /* LOG2 number of continuous pages for the fragment field */
54 #define AMDGPU_LOG2_PAGES_PER_FRAG 4
55 
56 #define AMDGPU_PTE_VALID	(1ULL << 0)
57 #define AMDGPU_PTE_SYSTEM	(1ULL << 1)
58 #define AMDGPU_PTE_SNOOPED	(1ULL << 2)
59 
60 /* VI only */
61 #define AMDGPU_PTE_EXECUTABLE	(1ULL << 4)
62 
63 #define AMDGPU_PTE_READABLE	(1ULL << 5)
64 #define AMDGPU_PTE_WRITEABLE	(1ULL << 6)
65 
66 #define AMDGPU_PTE_FRAG(x)	((x & 0x1fULL) << 7)
67 
68 /* TILED for VEGA10, reserved for older ASICs  */
69 #define AMDGPU_PTE_PRT		(1ULL << 51)
70 
71 /* VEGA10 only */
72 #define AMDGPU_PTE_MTYPE(a)    ((uint64_t)a << 57)
73 #define AMDGPU_PTE_MTYPE_MASK	AMDGPU_PTE_MTYPE(3ULL)
74 
75 /* How to programm VM fault handling */
76 #define AMDGPU_VM_FAULT_STOP_NEVER	0
77 #define AMDGPU_VM_FAULT_STOP_FIRST	1
78 #define AMDGPU_VM_FAULT_STOP_ALWAYS	2
79 
80 /* max number of VMHUB */
81 #define AMDGPU_MAX_VMHUBS			2
82 #define AMDGPU_GFXHUB				0
83 #define AMDGPU_MMHUB				1
84 
85 /* hardcode that limit for now */
86 #define AMDGPU_VA_RESERVED_SIZE			(8 << 20)
87 /* max vmids dedicated for process */
88 #define AMDGPU_VM_MAX_RESERVED_VMID	1
89 
90 struct amdgpu_vm_pt {
91 	struct amdgpu_bo	*bo;
92 	uint64_t		addr;
93 
94 	/* array of page tables, one for each directory entry */
95 	struct amdgpu_vm_pt	*entries;
96 	unsigned		last_entry_used;
97 };
98 
99 struct amdgpu_vm {
100 	/* tree of virtual addresses mapped */
101 	struct rb_root		va;
102 
103 	/* protecting invalidated */
104 	spinlock_t		status_lock;
105 
106 	/* BOs moved, but not yet updated in the PT */
107 	struct list_head	invalidated;
108 
109 	/* BOs cleared in the PT because of a move */
110 	struct list_head	cleared;
111 
112 	/* BO mappings freed, but not yet updated in the PT */
113 	struct list_head	freed;
114 
115 	/* contains the page directory */
116 	struct amdgpu_vm_pt     root;
117 	struct dma_fence	*last_dir_update;
118 	uint64_t		last_eviction_counter;
119 
120 	/* protecting freed */
121 	spinlock_t		freed_lock;
122 
123 	/* Scheduler entity for page table updates */
124 	struct amd_sched_entity	entity;
125 
126 	/* client id */
127 	u64                     client_id;
128 	/* dedicated to vm */
129 	struct amdgpu_vm_id	*reserved_vmid[AMDGPU_MAX_VMHUBS];
130 	/* each VM will map on CSA */
131 	struct amdgpu_bo_va *csa_bo_va;
132 };
133 
134 struct amdgpu_vm_id {
135 	struct list_head	list;
136 	struct amdgpu_sync	active;
137 	struct dma_fence		*last_flush;
138 	atomic64_t		owner;
139 
140 	uint64_t		pd_gpu_addr;
141 	/* last flushed PD/PT update */
142 	struct dma_fence		*flushed_updates;
143 
144 	uint32_t                current_gpu_reset_count;
145 
146 	uint32_t		gds_base;
147 	uint32_t		gds_size;
148 	uint32_t		gws_base;
149 	uint32_t		gws_size;
150 	uint32_t		oa_base;
151 	uint32_t		oa_size;
152 };
153 
154 struct amdgpu_vm_id_manager {
155 	struct mutex		lock;
156 	unsigned		num_ids;
157 	struct list_head	ids_lru;
158 	struct amdgpu_vm_id	ids[AMDGPU_NUM_VM];
159 	atomic_t		reserved_vmid_num;
160 };
161 
162 struct amdgpu_vm_manager {
163 	/* Handling of VMIDs */
164 	struct amdgpu_vm_id_manager		id_mgr[AMDGPU_MAX_VMHUBS];
165 
166 	/* Handling of VM fences */
167 	u64					fence_context;
168 	unsigned				seqno[AMDGPU_MAX_RINGS];
169 
170 	uint64_t				max_pfn;
171 	uint32_t				num_level;
172 	uint64_t				vm_size;
173 	uint32_t				block_size;
174 	/* vram base address for page table entry  */
175 	u64					vram_base_offset;
176 	/* is vm enabled? */
177 	bool					enabled;
178 	/* vm pte handling */
179 	const struct amdgpu_vm_pte_funcs        *vm_pte_funcs;
180 	struct amdgpu_ring                      *vm_pte_rings[AMDGPU_MAX_RINGS];
181 	unsigned				vm_pte_num_rings;
182 	atomic_t				vm_pte_next_ring;
183 	/* client id counter */
184 	atomic64_t				client_counter;
185 
186 	/* partial resident texture handling */
187 	spinlock_t				prt_lock;
188 	atomic_t				num_prt_users;
189 };
190 
191 void amdgpu_vm_manager_init(struct amdgpu_device *adev);
192 void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
193 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
194 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
195 void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
196 			 struct list_head *validated,
197 			 struct amdgpu_bo_list_entry *entry);
198 int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
199 			      int (*callback)(void *p, struct amdgpu_bo *bo),
200 			      void *param);
201 void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
202 				  struct amdgpu_vm *vm);
203 int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
204 			struct amdgpu_vm *vm,
205 			uint64_t saddr, uint64_t size);
206 int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
207 		      struct amdgpu_sync *sync, struct dma_fence *fence,
208 		      struct amdgpu_job *job);
209 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
210 void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub,
211 			unsigned vmid);
212 void amdgpu_vm_reset_all_ids(struct amdgpu_device *adev);
213 int amdgpu_vm_update_directories(struct amdgpu_device *adev,
214 				 struct amdgpu_vm *vm);
215 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
216 			  struct amdgpu_vm *vm,
217 			  struct dma_fence **fence);
218 int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm,
219 			     struct amdgpu_sync *sync);
220 int amdgpu_vm_bo_update(struct amdgpu_device *adev,
221 			struct amdgpu_bo_va *bo_va,
222 			bool clear);
223 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
224 			     struct amdgpu_bo *bo);
225 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
226 				       struct amdgpu_bo *bo);
227 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
228 				      struct amdgpu_vm *vm,
229 				      struct amdgpu_bo *bo);
230 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
231 		     struct amdgpu_bo_va *bo_va,
232 		     uint64_t addr, uint64_t offset,
233 		     uint64_t size, uint64_t flags);
234 int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
235 			     struct amdgpu_bo_va *bo_va,
236 			     uint64_t addr, uint64_t offset,
237 			     uint64_t size, uint64_t flags);
238 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
239 		       struct amdgpu_bo_va *bo_va,
240 		       uint64_t addr);
241 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
242 				struct amdgpu_vm *vm,
243 				uint64_t saddr, uint64_t size);
244 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
245 		      struct amdgpu_bo_va *bo_va);
246 void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size);
247 int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
248 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
249 				  struct amdgpu_job *job);
250 
251 #endif
252