1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Christian König
23  */
24 #ifndef __AMDGPU_VM_H__
25 #define __AMDGPU_VM_H__
26 
27 #include <linux/idr.h>
28 #include <linux/kfifo.h>
29 #include <linux/rbtree.h>
30 #include <drm/gpu_scheduler.h>
31 #include <drm/drm_file.h>
32 #include <drm/ttm/ttm_bo.h>
33 #include <linux/sched/mm.h>
34 
35 #include "amdgpu_sync.h"
36 #include "amdgpu_ring.h"
37 #include "amdgpu_ids.h"
38 
39 struct drm_exec;
40 
41 struct amdgpu_bo_va;
42 struct amdgpu_job;
43 struct amdgpu_bo_list_entry;
44 struct amdgpu_bo_vm;
45 struct amdgpu_mem_stats;
46 
47 /*
48  * GPUVM handling
49  */
50 
51 /* Maximum number of PTEs the hardware can write with one command */
52 #define AMDGPU_VM_MAX_UPDATE_SIZE	0x3FFFF
53 
54 /* number of entries in page table */
55 #define AMDGPU_VM_PTE_COUNT(adev) (1 << (adev)->vm_manager.block_size)
56 
57 #define AMDGPU_PTE_VALID	(1ULL << 0)
58 #define AMDGPU_PTE_SYSTEM	(1ULL << 1)
59 #define AMDGPU_PTE_SNOOPED	(1ULL << 2)
60 
61 /* RV+ */
62 #define AMDGPU_PTE_TMZ		(1ULL << 3)
63 
64 /* VI only */
65 #define AMDGPU_PTE_EXECUTABLE	(1ULL << 4)
66 
67 #define AMDGPU_PTE_READABLE	(1ULL << 5)
68 #define AMDGPU_PTE_WRITEABLE	(1ULL << 6)
69 
70 #define AMDGPU_PTE_FRAG(x)	((x & 0x1fULL) << 7)
71 
72 /* TILED for VEGA10, reserved for older ASICs  */
73 #define AMDGPU_PTE_PRT		(1ULL << 51)
74 
75 /* PDE is handled as PTE for VEGA10 */
76 #define AMDGPU_PDE_PTE		(1ULL << 54)
77 
78 #define AMDGPU_PTE_LOG          (1ULL << 55)
79 
80 /* PTE is handled as PDE for VEGA10 (Translate Further) */
81 #define AMDGPU_PTE_TF		(1ULL << 56)
82 
83 /* MALL noalloc for sienna_cichlid, reserved for older ASICs  */
84 #define AMDGPU_PTE_NOALLOC	(1ULL << 58)
85 
86 /* PDE Block Fragment Size for VEGA10 */
87 #define AMDGPU_PDE_BFS(a)	((uint64_t)a << 59)
88 
89 
90 /* For GFX9 */
91 #define AMDGPU_PTE_MTYPE_VG10(a)	((uint64_t)(a) << 57)
92 #define AMDGPU_PTE_MTYPE_VG10_MASK	AMDGPU_PTE_MTYPE_VG10(3ULL)
93 
94 #define AMDGPU_MTYPE_NC 0
95 #define AMDGPU_MTYPE_CC 2
96 
97 #define AMDGPU_PTE_DEFAULT_ATC  (AMDGPU_PTE_SYSTEM      \
98                                 | AMDGPU_PTE_SNOOPED    \
99                                 | AMDGPU_PTE_EXECUTABLE \
100                                 | AMDGPU_PTE_READABLE   \
101                                 | AMDGPU_PTE_WRITEABLE  \
102                                 | AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_CC))
103 
104 /* gfx10 */
105 #define AMDGPU_PTE_MTYPE_NV10(a)       ((uint64_t)(a) << 48)
106 #define AMDGPU_PTE_MTYPE_NV10_MASK     AMDGPU_PTE_MTYPE_NV10(7ULL)
107 
108 /* How to program VM fault handling */
109 #define AMDGPU_VM_FAULT_STOP_NEVER	0
110 #define AMDGPU_VM_FAULT_STOP_FIRST	1
111 #define AMDGPU_VM_FAULT_STOP_ALWAYS	2
112 
113 /* Reserve 4MB VRAM for page tables */
114 #define AMDGPU_VM_RESERVED_VRAM		(8ULL << 20)
115 
116 /*
117  * max number of VMHUB
118  * layout: max 8 GFXHUB + 4 MMHUB0 + 1 MMHUB1
119  */
120 #define AMDGPU_MAX_VMHUBS			13
121 #define AMDGPU_GFXHUB(x)			(x)
122 #define AMDGPU_MMHUB0(x)			(8 + x)
123 #define AMDGPU_MMHUB1(x)			(8 + 4 + x)
124 
125 /* Reserve 2MB at top/bottom of address space for kernel use */
126 #define AMDGPU_VA_RESERVED_SIZE			(2ULL << 20)
127 
128 /* See vm_update_mode */
129 #define AMDGPU_VM_USE_CPU_FOR_GFX (1 << 0)
130 #define AMDGPU_VM_USE_CPU_FOR_COMPUTE (1 << 1)
131 
132 /* VMPT level enumerate, and the hiberachy is:
133  * PDB2->PDB1->PDB0->PTB
134  */
135 enum amdgpu_vm_level {
136 	AMDGPU_VM_PDB2,
137 	AMDGPU_VM_PDB1,
138 	AMDGPU_VM_PDB0,
139 	AMDGPU_VM_PTB
140 };
141 
142 /* base structure for tracking BO usage in a VM */
143 struct amdgpu_vm_bo_base {
144 	/* constant after initialization */
145 	struct amdgpu_vm		*vm;
146 	struct amdgpu_bo		*bo;
147 
148 	/* protected by bo being reserved */
149 	struct amdgpu_vm_bo_base	*next;
150 
151 	/* protected by spinlock */
152 	struct list_head		vm_status;
153 
154 	/* protected by the BO being reserved */
155 	bool				moved;
156 };
157 
158 /* provided by hw blocks that can write ptes, e.g., sdma */
159 struct amdgpu_vm_pte_funcs {
160 	/* number of dw to reserve per operation */
161 	unsigned	copy_pte_num_dw;
162 
163 	/* copy pte entries from GART */
164 	void (*copy_pte)(struct amdgpu_ib *ib,
165 			 uint64_t pe, uint64_t src,
166 			 unsigned count);
167 
168 	/* write pte one entry at a time with addr mapping */
169 	void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe,
170 			  uint64_t value, unsigned count,
171 			  uint32_t incr);
172 	/* for linear pte/pde updates without addr mapping */
173 	void (*set_pte_pde)(struct amdgpu_ib *ib,
174 			    uint64_t pe,
175 			    uint64_t addr, unsigned count,
176 			    uint32_t incr, uint64_t flags);
177 };
178 
179 struct amdgpu_task_info {
180 	char	process_name[TASK_COMM_LEN];
181 	char	task_name[TASK_COMM_LEN];
182 	pid_t	pid;
183 	pid_t	tgid;
184 };
185 
186 /**
187  * struct amdgpu_vm_update_params
188  *
189  * Encapsulate some VM table update parameters to reduce
190  * the number of function parameters
191  *
192  */
193 struct amdgpu_vm_update_params {
194 
195 	/**
196 	 * @adev: amdgpu device we do this update for
197 	 */
198 	struct amdgpu_device *adev;
199 
200 	/**
201 	 * @vm: optional amdgpu_vm we do this update for
202 	 */
203 	struct amdgpu_vm *vm;
204 
205 	/**
206 	 * @immediate: if changes should be made immediately
207 	 */
208 	bool immediate;
209 
210 	/**
211 	 * @unlocked: true if the root BO is not locked
212 	 */
213 	bool unlocked;
214 
215 	/**
216 	 * @pages_addr:
217 	 *
218 	 * DMA addresses to use for mapping
219 	 */
220 	dma_addr_t *pages_addr;
221 
222 	/**
223 	 * @job: job to used for hw submission
224 	 */
225 	struct amdgpu_job *job;
226 
227 	/**
228 	 * @num_dw_left: number of dw left for the IB
229 	 */
230 	unsigned int num_dw_left;
231 
232 	/**
233 	 * @table_freed: return true if page table is freed when updating
234 	 */
235 	bool table_freed;
236 };
237 
238 struct amdgpu_vm_update_funcs {
239 	int (*map_table)(struct amdgpu_bo_vm *bo);
240 	int (*prepare)(struct amdgpu_vm_update_params *p, struct dma_resv *resv,
241 		       enum amdgpu_sync_mode sync_mode);
242 	int (*update)(struct amdgpu_vm_update_params *p,
243 		      struct amdgpu_bo_vm *bo, uint64_t pe, uint64_t addr,
244 		      unsigned count, uint32_t incr, uint64_t flags);
245 	int (*commit)(struct amdgpu_vm_update_params *p,
246 		      struct dma_fence **fence);
247 };
248 
249 struct amdgpu_vm {
250 	/* tree of virtual addresses mapped */
251 	struct rb_root_cached	va;
252 
253 	/* Lock to prevent eviction while we are updating page tables
254 	 * use vm_eviction_lock/unlock(vm)
255 	 */
256 	struct mutex		eviction_lock;
257 	bool			evicting;
258 	unsigned int		saved_flags;
259 
260 	/* Lock to protect vm_bo add/del/move on all lists of vm */
261 	spinlock_t		status_lock;
262 
263 	/* BOs who needs a validation */
264 	struct list_head	evicted;
265 
266 	/* PT BOs which relocated and their parent need an update */
267 	struct list_head	relocated;
268 
269 	/* per VM BOs moved, but not yet updated in the PT */
270 	struct list_head	moved;
271 
272 	/* All BOs of this VM not currently in the state machine */
273 	struct list_head	idle;
274 
275 	/* regular invalidated BOs, but not yet updated in the PT */
276 	struct list_head	invalidated;
277 
278 	/* BO mappings freed, but not yet updated in the PT */
279 	struct list_head	freed;
280 
281 	/* BOs which are invalidated, has been updated in the PTs */
282 	struct list_head        done;
283 
284 	/* PT BOs scheduled to free and fill with zero if vm_resv is not hold */
285 	struct list_head	pt_freed;
286 	struct work_struct	pt_free_work;
287 
288 	/* contains the page directory */
289 	struct amdgpu_vm_bo_base     root;
290 	struct dma_fence	*last_update;
291 
292 	/* Scheduler entities for page table updates */
293 	struct drm_sched_entity	immediate;
294 	struct drm_sched_entity	delayed;
295 
296 	/* Last finished delayed update */
297 	atomic64_t		tlb_seq;
298 	struct dma_fence	*last_tlb_flush;
299 
300 	/* How many times we had to re-generate the page tables */
301 	uint64_t		generation;
302 
303 	/* Last unlocked submission to the scheduler entities */
304 	struct dma_fence	*last_unlocked;
305 
306 	unsigned int		pasid;
307 	bool			reserved_vmid[AMDGPU_MAX_VMHUBS];
308 
309 	/* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */
310 	bool					use_cpu_for_update;
311 
312 	/* Functions to use for VM table updates */
313 	const struct amdgpu_vm_update_funcs	*update_funcs;
314 
315 	/* Flag to indicate ATS support from PTE for GFX9 */
316 	bool			pte_support_ats;
317 
318 	/* Up to 128 pending retry page faults */
319 	DECLARE_KFIFO(faults, u64, 128);
320 
321 	/* Points to the KFD process VM info */
322 	struct amdkfd_process_info *process_info;
323 
324 	/* List node in amdkfd_process_info.vm_list_head */
325 	struct list_head	vm_list_node;
326 
327 	/* Valid while the PD is reserved or fenced */
328 	uint64_t		pd_phys_addr;
329 
330 	/* Some basic info about the task */
331 	struct amdgpu_task_info task_info;
332 
333 	/* Store positions of group of BOs */
334 	struct ttm_lru_bulk_move lru_bulk_move;
335 	/* Flag to indicate if VM is used for compute */
336 	bool			is_compute_context;
337 
338 	/* Memory partition number, -1 means any partition */
339 	int8_t			mem_id;
340 };
341 
342 struct amdgpu_vm_manager {
343 	/* Handling of VMIDs */
344 	struct amdgpu_vmid_mgr			id_mgr[AMDGPU_MAX_VMHUBS];
345 	unsigned int				first_kfd_vmid;
346 	bool					concurrent_flush;
347 
348 	/* Handling of VM fences */
349 	u64					fence_context;
350 	unsigned				seqno[AMDGPU_MAX_RINGS];
351 
352 	uint64_t				max_pfn;
353 	uint32_t				num_level;
354 	uint32_t				block_size;
355 	uint32_t				fragment_size;
356 	enum amdgpu_vm_level			root_level;
357 	/* vram base address for page table entry  */
358 	u64					vram_base_offset;
359 	/* vm pte handling */
360 	const struct amdgpu_vm_pte_funcs	*vm_pte_funcs;
361 	struct drm_gpu_scheduler		*vm_pte_scheds[AMDGPU_MAX_RINGS];
362 	unsigned				vm_pte_num_scheds;
363 	struct amdgpu_ring			*page_fault;
364 
365 	/* partial resident texture handling */
366 	spinlock_t				prt_lock;
367 	atomic_t				num_prt_users;
368 
369 	/* controls how VM page tables are updated for Graphics and Compute.
370 	 * BIT0[= 0] Graphics updated by SDMA [= 1] by CPU
371 	 * BIT1[= 0] Compute updated by SDMA [= 1] by CPU
372 	 */
373 	int					vm_update_mode;
374 
375 	/* PASID to VM mapping, will be used in interrupt context to
376 	 * look up VM of a page fault
377 	 */
378 	struct xarray				pasids;
379 };
380 
381 struct amdgpu_bo_va_mapping;
382 
383 #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
384 #define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
385 #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
386 
387 extern const struct amdgpu_vm_update_funcs amdgpu_vm_cpu_funcs;
388 extern const struct amdgpu_vm_update_funcs amdgpu_vm_sdma_funcs;
389 
390 void amdgpu_vm_manager_init(struct amdgpu_device *adev);
391 void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
392 
393 int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,
394 			u32 pasid);
395 
396 long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout);
397 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
398 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
399 void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
400 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
401 int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec,
402 		      unsigned int num_fences);
403 bool amdgpu_vm_ready(struct amdgpu_vm *vm);
404 uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm);
405 int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
406 			      int (*callback)(void *p, struct amdgpu_bo *bo),
407 			      void *param);
408 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync);
409 int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
410 			  struct amdgpu_vm *vm, bool immediate);
411 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
412 			  struct amdgpu_vm *vm,
413 			  struct dma_fence **fence);
414 int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
415 			   struct amdgpu_vm *vm);
416 void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
417 			    struct amdgpu_vm *vm, struct amdgpu_bo *bo);
418 int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
419 			   bool immediate, bool unlocked, bool flush_tlb,
420 			   struct dma_resv *resv, uint64_t start, uint64_t last,
421 			   uint64_t flags, uint64_t offset, uint64_t vram_base,
422 			   struct ttm_resource *res, dma_addr_t *pages_addr,
423 			   struct dma_fence **fence);
424 int amdgpu_vm_bo_update(struct amdgpu_device *adev,
425 			struct amdgpu_bo_va *bo_va,
426 			bool clear);
427 bool amdgpu_vm_evictable(struct amdgpu_bo *bo);
428 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
429 			     struct amdgpu_bo *bo, bool evicted);
430 uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
431 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
432 				       struct amdgpu_bo *bo);
433 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
434 				      struct amdgpu_vm *vm,
435 				      struct amdgpu_bo *bo);
436 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
437 		     struct amdgpu_bo_va *bo_va,
438 		     uint64_t addr, uint64_t offset,
439 		     uint64_t size, uint64_t flags);
440 int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
441 			     struct amdgpu_bo_va *bo_va,
442 			     uint64_t addr, uint64_t offset,
443 			     uint64_t size, uint64_t flags);
444 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
445 		       struct amdgpu_bo_va *bo_va,
446 		       uint64_t addr);
447 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
448 				struct amdgpu_vm *vm,
449 				uint64_t saddr, uint64_t size);
450 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
451 							 uint64_t addr);
452 void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket);
453 void amdgpu_vm_bo_del(struct amdgpu_device *adev,
454 		      struct amdgpu_bo_va *bo_va);
455 void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
456 			   uint32_t fragment_size_default, unsigned max_level,
457 			   unsigned max_bits);
458 int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
459 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
460 				  struct amdgpu_job *job);
461 void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev);
462 
463 void amdgpu_vm_get_task_info(struct amdgpu_device *adev, u32 pasid,
464 			     struct amdgpu_task_info *task_info);
465 bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
466 			    u32 vmid, u32 node_id, uint64_t addr,
467 			    bool write_fault);
468 
469 void amdgpu_vm_set_task_info(struct amdgpu_vm *vm);
470 
471 void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
472 				struct amdgpu_vm *vm);
473 void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
474 			  struct amdgpu_mem_stats *stats);
475 
476 int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm,
477 		       struct amdgpu_bo_vm *vmbo, bool immediate);
478 int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
479 			int level, bool immediate, struct amdgpu_bo_vm **vmbo);
480 void amdgpu_vm_pt_free_root(struct amdgpu_device *adev, struct amdgpu_vm *vm);
481 bool amdgpu_vm_pt_is_root_clean(struct amdgpu_device *adev,
482 				struct amdgpu_vm *vm);
483 
484 int amdgpu_vm_pde_update(struct amdgpu_vm_update_params *params,
485 			 struct amdgpu_vm_bo_base *entry);
486 int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params,
487 			  uint64_t start, uint64_t end,
488 			  uint64_t dst, uint64_t flags);
489 void amdgpu_vm_pt_free_work(struct work_struct *work);
490 
491 #if defined(CONFIG_DEBUG_FS)
492 void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m);
493 #endif
494 
495 /**
496  * amdgpu_vm_tlb_seq - return tlb flush sequence number
497  * @vm: the amdgpu_vm structure to query
498  *
499  * Returns the tlb flush sequence number which indicates that the VM TLBs needs
500  * to be invalidated whenever the sequence number change.
501  */
502 static inline uint64_t amdgpu_vm_tlb_seq(struct amdgpu_vm *vm)
503 {
504 	unsigned long flags;
505 	spinlock_t *lock;
506 
507 	/*
508 	 * Workaround to stop racing between the fence signaling and handling
509 	 * the cb. The lock is static after initially setting it up, just make
510 	 * sure that the dma_fence structure isn't freed up.
511 	 */
512 	rcu_read_lock();
513 	lock = vm->last_tlb_flush->lock;
514 	rcu_read_unlock();
515 
516 	spin_lock_irqsave(lock, flags);
517 	spin_unlock_irqrestore(lock, flags);
518 
519 	return atomic64_read(&vm->tlb_seq);
520 }
521 
522 /*
523  * vm eviction_lock can be taken in MMU notifiers. Make sure no reclaim-FS
524  * happens while holding this lock anywhere to prevent deadlocks when
525  * an MMU notifier runs in reclaim-FS context.
526  */
527 static inline void amdgpu_vm_eviction_lock(struct amdgpu_vm *vm)
528 {
529 	mutex_lock(&vm->eviction_lock);
530 	vm->saved_flags = memalloc_noreclaim_save();
531 }
532 
533 static inline bool amdgpu_vm_eviction_trylock(struct amdgpu_vm *vm)
534 {
535 	if (mutex_trylock(&vm->eviction_lock)) {
536 		vm->saved_flags = memalloc_noreclaim_save();
537 		return true;
538 	}
539 	return false;
540 }
541 
542 static inline void amdgpu_vm_eviction_unlock(struct amdgpu_vm *vm)
543 {
544 	memalloc_noreclaim_restore(vm->saved_flags);
545 	mutex_unlock(&vm->eviction_lock);
546 }
547 
548 #endif
549