1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Christian König
23  */
24 #ifndef __AMDGPU_VM_H__
25 #define __AMDGPU_VM_H__
26 
27 #include <linux/idr.h>
28 #include <linux/kfifo.h>
29 #include <linux/rbtree.h>
30 #include <drm/gpu_scheduler.h>
31 
32 #include "amdgpu_sync.h"
33 #include "amdgpu_ring.h"
34 #include "amdgpu_ids.h"
35 
36 struct amdgpu_bo_va;
37 struct amdgpu_job;
38 struct amdgpu_bo_list_entry;
39 
40 /*
41  * GPUVM handling
42  */
43 
44 /* Maximum number of PTEs the hardware can write with one command */
45 #define AMDGPU_VM_MAX_UPDATE_SIZE	0x3FFFF
46 
47 /* number of entries in page table */
48 #define AMDGPU_VM_PTE_COUNT(adev) (1 << (adev)->vm_manager.block_size)
49 
50 /* PTBs (Page Table Blocks) need to be aligned to 32K */
51 #define AMDGPU_VM_PTB_ALIGN_SIZE   32768
52 
53 #define AMDGPU_PTE_VALID	(1ULL << 0)
54 #define AMDGPU_PTE_SYSTEM	(1ULL << 1)
55 #define AMDGPU_PTE_SNOOPED	(1ULL << 2)
56 
57 /* VI only */
58 #define AMDGPU_PTE_EXECUTABLE	(1ULL << 4)
59 
60 #define AMDGPU_PTE_READABLE	(1ULL << 5)
61 #define AMDGPU_PTE_WRITEABLE	(1ULL << 6)
62 
63 #define AMDGPU_PTE_FRAG(x)	((x & 0x1fULL) << 7)
64 
65 /* TILED for VEGA10, reserved for older ASICs  */
66 #define AMDGPU_PTE_PRT		(1ULL << 51)
67 
68 /* PDE is handled as PTE for VEGA10 */
69 #define AMDGPU_PDE_PTE		(1ULL << 54)
70 
71 /* PTE is handled as PDE for VEGA10 (Translate Further) */
72 #define AMDGPU_PTE_TF		(1ULL << 56)
73 
74 /* PDE Block Fragment Size for VEGA10 */
75 #define AMDGPU_PDE_BFS(a)	((uint64_t)a << 59)
76 
77 /* VEGA10 only */
78 #define AMDGPU_PTE_MTYPE(a)    ((uint64_t)a << 57)
79 #define AMDGPU_PTE_MTYPE_MASK	AMDGPU_PTE_MTYPE(3ULL)
80 
81 /* For Raven */
82 #define AMDGPU_MTYPE_CC 2
83 
84 #define AMDGPU_PTE_DEFAULT_ATC  (AMDGPU_PTE_SYSTEM      \
85                                 | AMDGPU_PTE_SNOOPED    \
86                                 | AMDGPU_PTE_EXECUTABLE \
87                                 | AMDGPU_PTE_READABLE   \
88                                 | AMDGPU_PTE_WRITEABLE  \
89                                 | AMDGPU_PTE_MTYPE(AMDGPU_MTYPE_CC))
90 
91 /* How to programm VM fault handling */
92 #define AMDGPU_VM_FAULT_STOP_NEVER	0
93 #define AMDGPU_VM_FAULT_STOP_FIRST	1
94 #define AMDGPU_VM_FAULT_STOP_ALWAYS	2
95 
96 /* max number of VMHUB */
97 #define AMDGPU_MAX_VMHUBS			2
98 #define AMDGPU_GFXHUB				0
99 #define AMDGPU_MMHUB				1
100 
101 /* hardcode that limit for now */
102 #define AMDGPU_VA_RESERVED_SIZE			(8ULL << 20)
103 
104 /* VA hole for 48bit addresses on Vega10 */
105 #define AMDGPU_VA_HOLE_START			0x0000800000000000ULL
106 #define AMDGPU_VA_HOLE_END			0xffff800000000000ULL
107 
108 /*
109  * Hardware is programmed as if the hole doesn't exists with start and end
110  * address values.
111  *
112  * This mask is used to remove the upper 16bits of the VA and so come up with
113  * the linear addr value.
114  */
115 #define AMDGPU_VA_HOLE_MASK			0x0000ffffffffffffULL
116 
117 /* max vmids dedicated for process */
118 #define AMDGPU_VM_MAX_RESERVED_VMID	1
119 
120 #define AMDGPU_VM_CONTEXT_GFX 0
121 #define AMDGPU_VM_CONTEXT_COMPUTE 1
122 
123 /* See vm_update_mode */
124 #define AMDGPU_VM_USE_CPU_FOR_GFX (1 << 0)
125 #define AMDGPU_VM_USE_CPU_FOR_COMPUTE (1 << 1)
126 
127 /* VMPT level enumerate, and the hiberachy is:
128  * PDB2->PDB1->PDB0->PTB
129  */
130 enum amdgpu_vm_level {
131 	AMDGPU_VM_PDB2,
132 	AMDGPU_VM_PDB1,
133 	AMDGPU_VM_PDB0,
134 	AMDGPU_VM_PTB
135 };
136 
137 /* base structure for tracking BO usage in a VM */
138 struct amdgpu_vm_bo_base {
139 	/* constant after initialization */
140 	struct amdgpu_vm		*vm;
141 	struct amdgpu_bo		*bo;
142 
143 	/* protected by bo being reserved */
144 	struct list_head		bo_list;
145 
146 	/* protected by spinlock */
147 	struct list_head		vm_status;
148 
149 	/* protected by the BO being reserved */
150 	bool				moved;
151 };
152 
153 struct amdgpu_vm_pt {
154 	struct amdgpu_vm_bo_base	base;
155 	bool				huge;
156 
157 	/* array of page tables, one for each directory entry */
158 	struct amdgpu_vm_pt		*entries;
159 };
160 
161 #define AMDGPU_VM_FAULT(pasid, addr) (((u64)(pasid) << 48) | (addr))
162 #define AMDGPU_VM_FAULT_PASID(fault) ((u64)(fault) >> 48)
163 #define AMDGPU_VM_FAULT_ADDR(fault)  ((u64)(fault) & 0xfffffffff000ULL)
164 
165 struct amdgpu_vm {
166 	/* tree of virtual addresses mapped */
167 	struct rb_root_cached	va;
168 
169 	/* protecting invalidated */
170 	spinlock_t		status_lock;
171 
172 	/* BOs who needs a validation */
173 	struct list_head	evicted;
174 
175 	/* PT BOs which relocated and their parent need an update */
176 	struct list_head	relocated;
177 
178 	/* BOs moved, but not yet updated in the PT */
179 	struct list_head	moved;
180 
181 	/* BO mappings freed, but not yet updated in the PT */
182 	struct list_head	freed;
183 
184 	/* contains the page directory */
185 	struct amdgpu_vm_pt     root;
186 	struct dma_fence	*last_update;
187 
188 	/* protecting freed */
189 	spinlock_t		freed_lock;
190 
191 	/* Scheduler entity for page table updates */
192 	struct drm_sched_entity	entity;
193 
194 	unsigned int		pasid;
195 	/* dedicated to vm */
196 	struct amdgpu_vmid	*reserved_vmid[AMDGPU_MAX_VMHUBS];
197 
198 	/* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */
199 	bool                    use_cpu_for_update;
200 
201 	/* Flag to indicate ATS support from PTE for GFX9 */
202 	bool			pte_support_ats;
203 
204 	/* Up to 128 pending retry page faults */
205 	DECLARE_KFIFO(faults, u64, 128);
206 
207 	/* Limit non-retry fault storms */
208 	unsigned int		fault_credit;
209 };
210 
211 struct amdgpu_vm_manager {
212 	/* Handling of VMIDs */
213 	struct amdgpu_vmid_mgr			id_mgr[AMDGPU_MAX_VMHUBS];
214 
215 	/* Handling of VM fences */
216 	u64					fence_context;
217 	unsigned				seqno[AMDGPU_MAX_RINGS];
218 
219 	uint64_t				max_pfn;
220 	uint32_t				num_level;
221 	uint32_t				block_size;
222 	uint32_t				fragment_size;
223 	enum amdgpu_vm_level			root_level;
224 	/* vram base address for page table entry  */
225 	u64					vram_base_offset;
226 	/* vm pte handling */
227 	const struct amdgpu_vm_pte_funcs        *vm_pte_funcs;
228 	struct amdgpu_ring                      *vm_pte_rings[AMDGPU_MAX_RINGS];
229 	unsigned				vm_pte_num_rings;
230 	atomic_t				vm_pte_next_ring;
231 
232 	/* partial resident texture handling */
233 	spinlock_t				prt_lock;
234 	atomic_t				num_prt_users;
235 
236 	/* controls how VM page tables are updated for Graphics and Compute.
237 	 * BIT0[= 0] Graphics updated by SDMA [= 1] by CPU
238 	 * BIT1[= 0] Compute updated by SDMA [= 1] by CPU
239 	 */
240 	int					vm_update_mode;
241 
242 	/* PASID to VM mapping, will be used in interrupt context to
243 	 * look up VM of a page fault
244 	 */
245 	struct idr				pasid_idr;
246 	spinlock_t				pasid_lock;
247 };
248 
249 void amdgpu_vm_manager_init(struct amdgpu_device *adev);
250 void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
251 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
252 		   int vm_context, unsigned int pasid);
253 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
254 bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
255 				  unsigned int pasid);
256 void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
257 			 struct list_head *validated,
258 			 struct amdgpu_bo_list_entry *entry);
259 bool amdgpu_vm_ready(struct amdgpu_vm *vm);
260 int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
261 			      int (*callback)(void *p, struct amdgpu_bo *bo),
262 			      void *param);
263 int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
264 			struct amdgpu_vm *vm,
265 			uint64_t saddr, uint64_t size);
266 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync);
267 int amdgpu_vm_update_directories(struct amdgpu_device *adev,
268 				 struct amdgpu_vm *vm);
269 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
270 			  struct amdgpu_vm *vm,
271 			  struct dma_fence **fence);
272 int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
273 			   struct amdgpu_vm *vm);
274 int amdgpu_vm_bo_update(struct amdgpu_device *adev,
275 			struct amdgpu_bo_va *bo_va,
276 			bool clear);
277 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
278 			     struct amdgpu_bo *bo, bool evicted);
279 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
280 				       struct amdgpu_bo *bo);
281 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
282 				      struct amdgpu_vm *vm,
283 				      struct amdgpu_bo *bo);
284 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
285 		     struct amdgpu_bo_va *bo_va,
286 		     uint64_t addr, uint64_t offset,
287 		     uint64_t size, uint64_t flags);
288 int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
289 			     struct amdgpu_bo_va *bo_va,
290 			     uint64_t addr, uint64_t offset,
291 			     uint64_t size, uint64_t flags);
292 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
293 		       struct amdgpu_bo_va *bo_va,
294 		       uint64_t addr);
295 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
296 				struct amdgpu_vm *vm,
297 				uint64_t saddr, uint64_t size);
298 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
299 							 uint64_t addr);
300 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
301 		      struct amdgpu_bo_va *bo_va);
302 void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size,
303 			   uint32_t fragment_size_default, unsigned max_level,
304 			   unsigned max_bits);
305 int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
306 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
307 				  struct amdgpu_job *job);
308 void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev);
309 
310 #endif
311