1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Christian König
23  */
24 #ifndef __AMDGPU_VM_H__
25 #define __AMDGPU_VM_H__
26 
27 #include <linux/rbtree.h>
28 #include <linux/idr.h>
29 
30 #include "gpu_scheduler.h"
31 #include "amdgpu_sync.h"
32 #include "amdgpu_ring.h"
33 
34 struct amdgpu_bo_va;
35 struct amdgpu_job;
36 struct amdgpu_bo_list_entry;
37 
38 /*
39  * GPUVM handling
40  */
41 
42 /* maximum number of VMIDs */
43 #define AMDGPU_NUM_VM	16
44 
45 /* Maximum number of PTEs the hardware can write with one command */
46 #define AMDGPU_VM_MAX_UPDATE_SIZE	0x3FFFF
47 
48 /* number of entries in page table */
49 #define AMDGPU_VM_PTE_COUNT(adev) (1 << (adev)->vm_manager.block_size)
50 
51 /* PTBs (Page Table Blocks) need to be aligned to 32K */
52 #define AMDGPU_VM_PTB_ALIGN_SIZE   32768
53 
54 #define AMDGPU_PTE_VALID	(1ULL << 0)
55 #define AMDGPU_PTE_SYSTEM	(1ULL << 1)
56 #define AMDGPU_PTE_SNOOPED	(1ULL << 2)
57 
58 /* VI only */
59 #define AMDGPU_PTE_EXECUTABLE	(1ULL << 4)
60 
61 #define AMDGPU_PTE_READABLE	(1ULL << 5)
62 #define AMDGPU_PTE_WRITEABLE	(1ULL << 6)
63 
64 #define AMDGPU_PTE_FRAG(x)	((x & 0x1fULL) << 7)
65 
66 /* TILED for VEGA10, reserved for older ASICs  */
67 #define AMDGPU_PTE_PRT		(1ULL << 51)
68 
69 /* PDE is handled as PTE for VEGA10 */
70 #define AMDGPU_PDE_PTE		(1ULL << 54)
71 
72 /* VEGA10 only */
73 #define AMDGPU_PTE_MTYPE(a)    ((uint64_t)a << 57)
74 #define AMDGPU_PTE_MTYPE_MASK	AMDGPU_PTE_MTYPE(3ULL)
75 
76 /* For Raven */
77 #define AMDGPU_MTYPE_CC 2
78 
79 #define AMDGPU_PTE_DEFAULT_ATC  (AMDGPU_PTE_SYSTEM      \
80                                 | AMDGPU_PTE_SNOOPED    \
81                                 | AMDGPU_PTE_EXECUTABLE \
82                                 | AMDGPU_PTE_READABLE   \
83                                 | AMDGPU_PTE_WRITEABLE  \
84                                 | AMDGPU_PTE_MTYPE(AMDGPU_MTYPE_CC))
85 
86 /* How to programm VM fault handling */
87 #define AMDGPU_VM_FAULT_STOP_NEVER	0
88 #define AMDGPU_VM_FAULT_STOP_FIRST	1
89 #define AMDGPU_VM_FAULT_STOP_ALWAYS	2
90 
91 /* max number of VMHUB */
92 #define AMDGPU_MAX_VMHUBS			2
93 #define AMDGPU_GFXHUB				0
94 #define AMDGPU_MMHUB				1
95 
96 /* hardcode that limit for now */
97 #define AMDGPU_VA_RESERVED_SIZE			(8ULL << 20)
98 
99 /* max vmids dedicated for process */
100 #define AMDGPU_VM_MAX_RESERVED_VMID	1
101 
102 #define AMDGPU_VM_CONTEXT_GFX 0
103 #define AMDGPU_VM_CONTEXT_COMPUTE 1
104 
105 /* See vm_update_mode */
106 #define AMDGPU_VM_USE_CPU_FOR_GFX (1 << 0)
107 #define AMDGPU_VM_USE_CPU_FOR_COMPUTE (1 << 1)
108 
109 /* base structure for tracking BO usage in a VM */
110 struct amdgpu_vm_bo_base {
111 	/* constant after initialization */
112 	struct amdgpu_vm		*vm;
113 	struct amdgpu_bo		*bo;
114 
115 	/* protected by bo being reserved */
116 	struct list_head		bo_list;
117 
118 	/* protected by spinlock */
119 	struct list_head		vm_status;
120 
121 	/* protected by the BO being reserved */
122 	bool				moved;
123 };
124 
125 struct amdgpu_vm_pt {
126 	struct amdgpu_vm_bo_base	base;
127 	uint64_t			addr;
128 
129 	/* array of page tables, one for each directory entry */
130 	struct amdgpu_vm_pt		*entries;
131 	unsigned			last_entry_used;
132 };
133 
134 #define AMDGPU_VM_FAULT(pasid, addr) (((u64)(pasid) << 48) | (addr))
135 #define AMDGPU_VM_FAULT_PASID(fault) ((u64)(fault) >> 48)
136 #define AMDGPU_VM_FAULT_ADDR(fault)  ((u64)(fault) & 0xfffffffff000ULL)
137 
138 struct amdgpu_vm {
139 	/* tree of virtual addresses mapped */
140 	struct rb_root_cached	va;
141 
142 	/* protecting invalidated */
143 	spinlock_t		status_lock;
144 
145 	/* BOs who needs a validation */
146 	struct list_head	evicted;
147 
148 	/* PT BOs which relocated and their parent need an update */
149 	struct list_head	relocated;
150 
151 	/* BOs moved, but not yet updated in the PT */
152 	struct list_head	moved;
153 
154 	/* BO mappings freed, but not yet updated in the PT */
155 	struct list_head	freed;
156 
157 	/* contains the page directory */
158 	struct amdgpu_vm_pt     root;
159 	struct dma_fence	*last_update;
160 
161 	/* protecting freed */
162 	spinlock_t		freed_lock;
163 
164 	/* Scheduler entity for page table updates */
165 	struct amd_sched_entity	entity;
166 
167 	/* client id and PASID (TODO: replace client_id with PASID) */
168 	u64                     client_id;
169 	unsigned int		pasid;
170 	/* dedicated to vm */
171 	struct amdgpu_vm_id	*reserved_vmid[AMDGPU_MAX_VMHUBS];
172 
173 	/* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */
174 	bool                    use_cpu_for_update;
175 
176 	/* Flag to indicate ATS support from PTE for GFX9 */
177 	bool			pte_support_ats;
178 
179 	/* Up to 128 pending retry page faults */
180 	DECLARE_KFIFO(faults, u64, 128);
181 
182 	/* Limit non-retry fault storms */
183 	unsigned int		fault_credit;
184 };
185 
186 struct amdgpu_vm_id {
187 	struct list_head	list;
188 	struct amdgpu_sync	active;
189 	struct dma_fence		*last_flush;
190 	atomic64_t		owner;
191 
192 	uint64_t		pd_gpu_addr;
193 	/* last flushed PD/PT update */
194 	struct dma_fence		*flushed_updates;
195 
196 	uint32_t                current_gpu_reset_count;
197 
198 	uint32_t		gds_base;
199 	uint32_t		gds_size;
200 	uint32_t		gws_base;
201 	uint32_t		gws_size;
202 	uint32_t		oa_base;
203 	uint32_t		oa_size;
204 };
205 
206 struct amdgpu_vm_id_manager {
207 	struct mutex		lock;
208 	unsigned		num_ids;
209 	struct list_head	ids_lru;
210 	struct amdgpu_vm_id	ids[AMDGPU_NUM_VM];
211 	atomic_t		reserved_vmid_num;
212 };
213 
214 struct amdgpu_vm_manager {
215 	/* Handling of VMIDs */
216 	struct amdgpu_vm_id_manager		id_mgr[AMDGPU_MAX_VMHUBS];
217 
218 	/* Handling of VM fences */
219 	u64					fence_context;
220 	unsigned				seqno[AMDGPU_MAX_RINGS];
221 
222 	uint64_t				max_pfn;
223 	uint32_t				num_level;
224 	uint64_t				vm_size;
225 	uint32_t				block_size;
226 	uint32_t				fragment_size;
227 	/* vram base address for page table entry  */
228 	u64					vram_base_offset;
229 	/* vm pte handling */
230 	const struct amdgpu_vm_pte_funcs        *vm_pte_funcs;
231 	struct amdgpu_ring                      *vm_pte_rings[AMDGPU_MAX_RINGS];
232 	unsigned				vm_pte_num_rings;
233 	atomic_t				vm_pte_next_ring;
234 	/* client id counter */
235 	atomic64_t				client_counter;
236 
237 	/* partial resident texture handling */
238 	spinlock_t				prt_lock;
239 	atomic_t				num_prt_users;
240 
241 	/* controls how VM page tables are updated for Graphics and Compute.
242 	 * BIT0[= 0] Graphics updated by SDMA [= 1] by CPU
243 	 * BIT1[= 0] Compute updated by SDMA [= 1] by CPU
244 	 */
245 	int					vm_update_mode;
246 
247 	/* PASID to VM mapping, will be used in interrupt context to
248 	 * look up VM of a page fault
249 	 */
250 	struct idr				pasid_idr;
251 	spinlock_t				pasid_lock;
252 };
253 
254 int amdgpu_vm_alloc_pasid(unsigned int bits);
255 void amdgpu_vm_free_pasid(unsigned int pasid);
256 void amdgpu_vm_manager_init(struct amdgpu_device *adev);
257 void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
258 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
259 		   int vm_context, unsigned int pasid);
260 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
261 bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
262 				  unsigned int pasid);
263 void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
264 			 struct list_head *validated,
265 			 struct amdgpu_bo_list_entry *entry);
266 bool amdgpu_vm_ready(struct amdgpu_vm *vm);
267 int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
268 			      int (*callback)(void *p, struct amdgpu_bo *bo),
269 			      void *param);
270 int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
271 			struct amdgpu_vm *vm,
272 			uint64_t saddr, uint64_t size);
273 int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
274 		      struct amdgpu_sync *sync, struct dma_fence *fence,
275 		      struct amdgpu_job *job);
276 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync);
277 void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub,
278 			unsigned vmid);
279 void amdgpu_vm_reset_all_ids(struct amdgpu_device *adev);
280 int amdgpu_vm_update_directories(struct amdgpu_device *adev,
281 				 struct amdgpu_vm *vm);
282 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
283 			  struct amdgpu_vm *vm,
284 			  struct dma_fence **fence);
285 int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
286 			   struct amdgpu_vm *vm);
287 int amdgpu_vm_bo_update(struct amdgpu_device *adev,
288 			struct amdgpu_bo_va *bo_va,
289 			bool clear);
290 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
291 			     struct amdgpu_bo *bo, bool evicted);
292 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
293 				       struct amdgpu_bo *bo);
294 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
295 				      struct amdgpu_vm *vm,
296 				      struct amdgpu_bo *bo);
297 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
298 		     struct amdgpu_bo_va *bo_va,
299 		     uint64_t addr, uint64_t offset,
300 		     uint64_t size, uint64_t flags);
301 int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
302 			     struct amdgpu_bo_va *bo_va,
303 			     uint64_t addr, uint64_t offset,
304 			     uint64_t size, uint64_t flags);
305 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
306 		       struct amdgpu_bo_va *bo_va,
307 		       uint64_t addr);
308 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
309 				struct amdgpu_vm *vm,
310 				uint64_t saddr, uint64_t size);
311 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
312 							 uint64_t addr);
313 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
314 		      struct amdgpu_bo_va *bo_va);
315 void amdgpu_vm_set_fragment_size(struct amdgpu_device *adev,
316 				uint32_t fragment_size_default);
317 void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size,
318 				uint32_t fragment_size_default);
319 int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
320 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
321 				  struct amdgpu_job *job);
322 void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev);
323 
324 #endif
325