1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #ifndef __AMDGPU_OBJECT_H__
29 #define __AMDGPU_OBJECT_H__
30 
31 #include <drm/amdgpu_drm.h>
32 #include "amdgpu.h"
33 #ifdef CONFIG_MMU_NOTIFIER
34 #include <linux/mmu_notifier.h>
35 #endif
36 
37 #define AMDGPU_BO_INVALID_OFFSET	LONG_MAX
38 #define AMDGPU_BO_MAX_PLACEMENTS	3
39 
40 struct amdgpu_bo_param {
41 	unsigned long			size;
42 	int				byte_align;
43 	u32				domain;
44 	u32				preferred_domain;
45 	u64				flags;
46 	enum ttm_bo_type		type;
47 	bool				no_wait_gpu;
48 	struct dma_resv	*resv;
49 };
50 
51 /* bo virtual addresses in a vm */
52 struct amdgpu_bo_va_mapping {
53 	struct amdgpu_bo_va		*bo_va;
54 	struct list_head		list;
55 	struct rb_node			rb;
56 	uint64_t			start;
57 	uint64_t			last;
58 	uint64_t			__subtree_last;
59 	uint64_t			offset;
60 	uint64_t			flags;
61 };
62 
63 /* User space allocated BO in a VM */
64 struct amdgpu_bo_va {
65 	struct amdgpu_vm_bo_base	base;
66 
67 	/* protected by bo being reserved */
68 	unsigned			ref_count;
69 
70 	/* all other members protected by the VM PD being reserved */
71 	struct dma_fence	        *last_pt_update;
72 
73 	/* mappings for this bo_va */
74 	struct list_head		invalids;
75 	struct list_head		valids;
76 
77 	/* If the mappings are cleared or filled */
78 	bool				cleared;
79 
80 	bool				is_xgmi;
81 };
82 
83 struct amdgpu_bo {
84 	/* Protected by tbo.reserved */
85 	u32				preferred_domains;
86 	u32				allowed_domains;
87 	struct ttm_place		placements[AMDGPU_BO_MAX_PLACEMENTS];
88 	struct ttm_placement		placement;
89 	struct ttm_buffer_object	tbo;
90 	struct ttm_bo_kmap_obj		kmap;
91 	u64				flags;
92 	u64				tiling_flags;
93 	u64				metadata_flags;
94 	void				*metadata;
95 	u32				metadata_size;
96 	unsigned			prime_shared_count;
97 	/* per VM structure for page tables and with virtual addresses */
98 	struct amdgpu_vm_bo_base	*vm_bo;
99 	/* Constant after initialization */
100 	struct amdgpu_bo		*parent;
101 	struct amdgpu_bo		*shadow;
102 
103 	struct ttm_bo_kmap_obj		dma_buf_vmap;
104 	struct amdgpu_mn		*mn;
105 
106 
107 #ifdef CONFIG_MMU_NOTIFIER
108 	struct mmu_interval_notifier	notifier;
109 #endif
110 
111 	struct list_head		shadow_list;
112 
113 	struct kgd_mem                  *kfd_bo;
114 };
115 
116 static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo)
117 {
118 	return container_of(tbo, struct amdgpu_bo, tbo);
119 }
120 
121 /**
122  * amdgpu_mem_type_to_domain - return domain corresponding to mem_type
123  * @mem_type:	ttm memory type
124  *
125  * Returns corresponding domain of the ttm mem_type
126  */
127 static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type)
128 {
129 	switch (mem_type) {
130 	case TTM_PL_VRAM:
131 		return AMDGPU_GEM_DOMAIN_VRAM;
132 	case TTM_PL_TT:
133 		return AMDGPU_GEM_DOMAIN_GTT;
134 	case TTM_PL_SYSTEM:
135 		return AMDGPU_GEM_DOMAIN_CPU;
136 	case AMDGPU_PL_GDS:
137 		return AMDGPU_GEM_DOMAIN_GDS;
138 	case AMDGPU_PL_GWS:
139 		return AMDGPU_GEM_DOMAIN_GWS;
140 	case AMDGPU_PL_OA:
141 		return AMDGPU_GEM_DOMAIN_OA;
142 	default:
143 		break;
144 	}
145 	return 0;
146 }
147 
148 /**
149  * amdgpu_bo_reserve - reserve bo
150  * @bo:		bo structure
151  * @no_intr:	don't return -ERESTARTSYS on pending signal
152  *
153  * Returns:
154  * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
155  * a signal. Release all buffer reservations and return to user-space.
156  */
157 static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr)
158 {
159 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
160 	int r;
161 
162 	r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL);
163 	if (unlikely(r != 0)) {
164 		if (r != -ERESTARTSYS)
165 			dev_err(adev->dev, "%p reserve failed\n", bo);
166 		return r;
167 	}
168 	return 0;
169 }
170 
171 static inline void amdgpu_bo_unreserve(struct amdgpu_bo *bo)
172 {
173 	ttm_bo_unreserve(&bo->tbo);
174 }
175 
176 static inline unsigned long amdgpu_bo_size(struct amdgpu_bo *bo)
177 {
178 	return bo->tbo.num_pages << PAGE_SHIFT;
179 }
180 
181 static inline unsigned amdgpu_bo_ngpu_pages(struct amdgpu_bo *bo)
182 {
183 	return (bo->tbo.num_pages << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE;
184 }
185 
186 static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo)
187 {
188 	return (bo->tbo.mem.page_alignment << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE;
189 }
190 
191 /**
192  * amdgpu_bo_mmap_offset - return mmap offset of bo
193  * @bo:	amdgpu object for which we query the offset
194  *
195  * Returns mmap offset of the object.
196  */
197 static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo)
198 {
199 	return drm_vma_node_offset_addr(&bo->tbo.base.vma_node);
200 }
201 
202 /**
203  * amdgpu_bo_in_cpu_visible_vram - check if BO is (partly) in visible VRAM
204  */
205 static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo)
206 {
207 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
208 	unsigned fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
209 	struct drm_mm_node *node = bo->tbo.mem.mm_node;
210 	unsigned long pages_left;
211 
212 	if (bo->tbo.mem.mem_type != TTM_PL_VRAM)
213 		return false;
214 
215 	for (pages_left = bo->tbo.mem.num_pages; pages_left;
216 	     pages_left -= node->size, node++)
217 		if (node->start < fpfn)
218 			return true;
219 
220 	return false;
221 }
222 
223 /**
224  * amdgpu_bo_explicit_sync - return whether the bo is explicitly synced
225  */
226 static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo)
227 {
228 	return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC;
229 }
230 
231 /**
232  * amdgpu_bo_encrypted - test if the BO is encrypted
233  * @bo: pointer to a buffer object
234  *
235  * Return true if the buffer object is encrypted, false otherwise.
236  */
237 static inline bool amdgpu_bo_encrypted(struct amdgpu_bo *bo)
238 {
239 	return bo->flags & AMDGPU_GEM_CREATE_ENCRYPTED;
240 }
241 
242 bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
243 void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
244 
245 int amdgpu_bo_create(struct amdgpu_device *adev,
246 		     struct amdgpu_bo_param *bp,
247 		     struct amdgpu_bo **bo_ptr);
248 int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
249 			      unsigned long size, int align,
250 			      u32 domain, struct amdgpu_bo **bo_ptr,
251 			      u64 *gpu_addr, void **cpu_addr);
252 int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
253 			    unsigned long size, int align,
254 			    u32 domain, struct amdgpu_bo **bo_ptr,
255 			    u64 *gpu_addr, void **cpu_addr);
256 int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
257 			       uint64_t offset, uint64_t size, uint32_t domain,
258 			       struct amdgpu_bo **bo_ptr, void **cpu_addr);
259 void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
260 			   void **cpu_addr);
261 int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
262 void *amdgpu_bo_kptr(struct amdgpu_bo *bo);
263 void amdgpu_bo_kunmap(struct amdgpu_bo *bo);
264 struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo);
265 void amdgpu_bo_unref(struct amdgpu_bo **bo);
266 int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain);
267 int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
268 			     u64 min_offset, u64 max_offset);
269 void amdgpu_bo_unpin(struct amdgpu_bo *bo);
270 int amdgpu_bo_evict_vram(struct amdgpu_device *adev);
271 int amdgpu_bo_init(struct amdgpu_device *adev);
272 int amdgpu_bo_late_init(struct amdgpu_device *adev);
273 void amdgpu_bo_fini(struct amdgpu_device *adev);
274 int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
275 				struct vm_area_struct *vma);
276 int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags);
277 void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags);
278 int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
279 			    uint32_t metadata_size, uint64_t flags);
280 int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
281 			   size_t buffer_size, uint32_t *metadata_size,
282 			   uint64_t *flags);
283 void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
284 			   bool evict,
285 			   struct ttm_resource *new_mem);
286 void amdgpu_bo_release_notify(struct ttm_buffer_object *bo);
287 vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
288 void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
289 		     bool shared);
290 int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
291 			     enum amdgpu_sync_mode sync_mode, void *owner,
292 			     bool intr);
293 int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr);
294 u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
295 u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo);
296 int amdgpu_bo_validate(struct amdgpu_bo *bo);
297 int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow,
298 			     struct dma_fence **fence);
299 uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev,
300 					    uint32_t domain);
301 
302 /*
303  * sub allocation
304  */
305 
306 static inline uint64_t amdgpu_sa_bo_gpu_addr(struct amdgpu_sa_bo *sa_bo)
307 {
308 	return sa_bo->manager->gpu_addr + sa_bo->soffset;
309 }
310 
311 static inline void * amdgpu_sa_bo_cpu_addr(struct amdgpu_sa_bo *sa_bo)
312 {
313 	return sa_bo->manager->cpu_ptr + sa_bo->soffset;
314 }
315 
316 int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
317 				     struct amdgpu_sa_manager *sa_manager,
318 				     unsigned size, u32 align, u32 domain);
319 void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
320 				      struct amdgpu_sa_manager *sa_manager);
321 int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
322 				      struct amdgpu_sa_manager *sa_manager);
323 int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
324 		     struct amdgpu_sa_bo **sa_bo,
325 		     unsigned size, unsigned align);
326 void amdgpu_sa_bo_free(struct amdgpu_device *adev,
327 			      struct amdgpu_sa_bo **sa_bo,
328 			      struct dma_fence *fence);
329 #if defined(CONFIG_DEBUG_FS)
330 void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
331 					 struct seq_file *m);
332 u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m);
333 #endif
334 int amdgpu_debugfs_sa_init(struct amdgpu_device *adev);
335 
336 bool amdgpu_bo_support_uswc(u64 bo_flags);
337 
338 
339 #endif
340