1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #ifndef __AMDGPU_OBJECT_H__
29 #define __AMDGPU_OBJECT_H__
30 
31 #include <drm/amdgpu_drm.h>
32 #include "amdgpu.h"
33 #ifdef CONFIG_MMU_NOTIFIER
34 #include <linux/mmu_notifier.h>
35 #endif
36 
37 #define AMDGPU_BO_INVALID_OFFSET	LONG_MAX
38 #define AMDGPU_BO_MAX_PLACEMENTS	3
39 
40 #define to_amdgpu_bo_user(abo) container_of((abo), struct amdgpu_bo_user, bo)
41 
42 struct amdgpu_bo_param {
43 	unsigned long			size;
44 	int				byte_align;
45 	u32				bo_ptr_size;
46 	u32				domain;
47 	u32				preferred_domain;
48 	u64				flags;
49 	enum ttm_bo_type		type;
50 	bool				no_wait_gpu;
51 	struct dma_resv	*resv;
52 };
53 
54 /* bo virtual addresses in a vm */
55 struct amdgpu_bo_va_mapping {
56 	struct amdgpu_bo_va		*bo_va;
57 	struct list_head		list;
58 	struct rb_node			rb;
59 	uint64_t			start;
60 	uint64_t			last;
61 	uint64_t			__subtree_last;
62 	uint64_t			offset;
63 	uint64_t			flags;
64 };
65 
66 /* User space allocated BO in a VM */
67 struct amdgpu_bo_va {
68 	struct amdgpu_vm_bo_base	base;
69 
70 	/* protected by bo being reserved */
71 	unsigned			ref_count;
72 
73 	/* all other members protected by the VM PD being reserved */
74 	struct dma_fence	        *last_pt_update;
75 
76 	/* mappings for this bo_va */
77 	struct list_head		invalids;
78 	struct list_head		valids;
79 
80 	/* If the mappings are cleared or filled */
81 	bool				cleared;
82 
83 	bool				is_xgmi;
84 };
85 
86 struct amdgpu_bo {
87 	/* Protected by tbo.reserved */
88 	u32				preferred_domains;
89 	u32				allowed_domains;
90 	struct ttm_place		placements[AMDGPU_BO_MAX_PLACEMENTS];
91 	struct ttm_placement		placement;
92 	struct ttm_buffer_object	tbo;
93 	struct ttm_bo_kmap_obj		kmap;
94 	u64				flags;
95 	unsigned			prime_shared_count;
96 	/* per VM structure for page tables and with virtual addresses */
97 	struct amdgpu_vm_bo_base	*vm_bo;
98 	/* Constant after initialization */
99 	struct amdgpu_bo		*parent;
100 	struct amdgpu_bo		*shadow;
101 
102 
103 
104 #ifdef CONFIG_MMU_NOTIFIER
105 	struct mmu_interval_notifier	notifier;
106 #endif
107 
108 	struct list_head		shadow_list;
109 
110 	struct kgd_mem                  *kfd_bo;
111 };
112 
113 struct amdgpu_bo_user {
114 	struct amdgpu_bo		bo;
115 	u64				tiling_flags;
116 	u64				metadata_flags;
117 	void				*metadata;
118 	u32				metadata_size;
119 
120 };
121 
122 static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo)
123 {
124 	return container_of(tbo, struct amdgpu_bo, tbo);
125 }
126 
127 /**
128  * amdgpu_mem_type_to_domain - return domain corresponding to mem_type
129  * @mem_type:	ttm memory type
130  *
131  * Returns corresponding domain of the ttm mem_type
132  */
133 static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type)
134 {
135 	switch (mem_type) {
136 	case TTM_PL_VRAM:
137 		return AMDGPU_GEM_DOMAIN_VRAM;
138 	case TTM_PL_TT:
139 		return AMDGPU_GEM_DOMAIN_GTT;
140 	case TTM_PL_SYSTEM:
141 		return AMDGPU_GEM_DOMAIN_CPU;
142 	case AMDGPU_PL_GDS:
143 		return AMDGPU_GEM_DOMAIN_GDS;
144 	case AMDGPU_PL_GWS:
145 		return AMDGPU_GEM_DOMAIN_GWS;
146 	case AMDGPU_PL_OA:
147 		return AMDGPU_GEM_DOMAIN_OA;
148 	default:
149 		break;
150 	}
151 	return 0;
152 }
153 
154 /**
155  * amdgpu_bo_reserve - reserve bo
156  * @bo:		bo structure
157  * @no_intr:	don't return -ERESTARTSYS on pending signal
158  *
159  * Returns:
160  * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
161  * a signal. Release all buffer reservations and return to user-space.
162  */
163 static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr)
164 {
165 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
166 	int r;
167 
168 	r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL);
169 	if (unlikely(r != 0)) {
170 		if (r != -ERESTARTSYS)
171 			dev_err(adev->dev, "%p reserve failed\n", bo);
172 		return r;
173 	}
174 	return 0;
175 }
176 
177 static inline void amdgpu_bo_unreserve(struct amdgpu_bo *bo)
178 {
179 	ttm_bo_unreserve(&bo->tbo);
180 }
181 
182 static inline unsigned long amdgpu_bo_size(struct amdgpu_bo *bo)
183 {
184 	return bo->tbo.base.size;
185 }
186 
187 static inline unsigned amdgpu_bo_ngpu_pages(struct amdgpu_bo *bo)
188 {
189 	return bo->tbo.base.size / AMDGPU_GPU_PAGE_SIZE;
190 }
191 
192 static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo)
193 {
194 	return (bo->tbo.mem.page_alignment << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE;
195 }
196 
197 /**
198  * amdgpu_bo_mmap_offset - return mmap offset of bo
199  * @bo:	amdgpu object for which we query the offset
200  *
201  * Returns mmap offset of the object.
202  */
203 static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo)
204 {
205 	return drm_vma_node_offset_addr(&bo->tbo.base.vma_node);
206 }
207 
208 /**
209  * amdgpu_bo_in_cpu_visible_vram - check if BO is (partly) in visible VRAM
210  */
211 static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo)
212 {
213 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
214 	unsigned fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
215 	struct drm_mm_node *node = bo->tbo.mem.mm_node;
216 	unsigned long pages_left;
217 
218 	if (bo->tbo.mem.mem_type != TTM_PL_VRAM)
219 		return false;
220 
221 	for (pages_left = bo->tbo.mem.num_pages; pages_left;
222 	     pages_left -= node->size, node++)
223 		if (node->start < fpfn)
224 			return true;
225 
226 	return false;
227 }
228 
229 /**
230  * amdgpu_bo_explicit_sync - return whether the bo is explicitly synced
231  */
232 static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo)
233 {
234 	return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC;
235 }
236 
237 /**
238  * amdgpu_bo_encrypted - test if the BO is encrypted
239  * @bo: pointer to a buffer object
240  *
241  * Return true if the buffer object is encrypted, false otherwise.
242  */
243 static inline bool amdgpu_bo_encrypted(struct amdgpu_bo *bo)
244 {
245 	return bo->flags & AMDGPU_GEM_CREATE_ENCRYPTED;
246 }
247 
248 bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
249 void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
250 
251 int amdgpu_bo_create(struct amdgpu_device *adev,
252 		     struct amdgpu_bo_param *bp,
253 		     struct amdgpu_bo **bo_ptr);
254 int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
255 			      unsigned long size, int align,
256 			      u32 domain, struct amdgpu_bo **bo_ptr,
257 			      u64 *gpu_addr, void **cpu_addr);
258 int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
259 			    unsigned long size, int align,
260 			    u32 domain, struct amdgpu_bo **bo_ptr,
261 			    u64 *gpu_addr, void **cpu_addr);
262 int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
263 			       uint64_t offset, uint64_t size, uint32_t domain,
264 			       struct amdgpu_bo **bo_ptr, void **cpu_addr);
265 int amdgpu_bo_create_user(struct amdgpu_device *adev,
266 			  struct amdgpu_bo_param *bp,
267 			  struct amdgpu_bo_user **ubo_ptr);
268 void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
269 			   void **cpu_addr);
270 int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
271 void *amdgpu_bo_kptr(struct amdgpu_bo *bo);
272 void amdgpu_bo_kunmap(struct amdgpu_bo *bo);
273 struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo);
274 void amdgpu_bo_unref(struct amdgpu_bo **bo);
275 int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain);
276 int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
277 			     u64 min_offset, u64 max_offset);
278 void amdgpu_bo_unpin(struct amdgpu_bo *bo);
279 int amdgpu_bo_evict_vram(struct amdgpu_device *adev);
280 int amdgpu_bo_init(struct amdgpu_device *adev);
281 void amdgpu_bo_fini(struct amdgpu_device *adev);
282 int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags);
283 void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags);
284 int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
285 			    uint32_t metadata_size, uint64_t flags);
286 int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
287 			   size_t buffer_size, uint32_t *metadata_size,
288 			   uint64_t *flags);
289 void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
290 			   bool evict,
291 			   struct ttm_resource *new_mem);
292 void amdgpu_bo_release_notify(struct ttm_buffer_object *bo);
293 vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
294 void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
295 		     bool shared);
296 int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
297 			     enum amdgpu_sync_mode sync_mode, void *owner,
298 			     bool intr);
299 int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr);
300 u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
301 u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo);
302 int amdgpu_bo_validate(struct amdgpu_bo *bo);
303 int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow,
304 			     struct dma_fence **fence);
305 uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev,
306 					    uint32_t domain);
307 
308 /*
309  * sub allocation
310  */
311 
312 static inline uint64_t amdgpu_sa_bo_gpu_addr(struct amdgpu_sa_bo *sa_bo)
313 {
314 	return sa_bo->manager->gpu_addr + sa_bo->soffset;
315 }
316 
317 static inline void * amdgpu_sa_bo_cpu_addr(struct amdgpu_sa_bo *sa_bo)
318 {
319 	return sa_bo->manager->cpu_ptr + sa_bo->soffset;
320 }
321 
322 int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
323 				     struct amdgpu_sa_manager *sa_manager,
324 				     unsigned size, u32 align, u32 domain);
325 void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
326 				      struct amdgpu_sa_manager *sa_manager);
327 int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
328 				      struct amdgpu_sa_manager *sa_manager);
329 int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
330 		     struct amdgpu_sa_bo **sa_bo,
331 		     unsigned size, unsigned align);
332 void amdgpu_sa_bo_free(struct amdgpu_device *adev,
333 			      struct amdgpu_sa_bo **sa_bo,
334 			      struct dma_fence *fence);
335 #if defined(CONFIG_DEBUG_FS)
336 void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
337 					 struct seq_file *m);
338 u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m);
339 #endif
340 void amdgpu_debugfs_sa_init(struct amdgpu_device *adev);
341 
342 bool amdgpu_bo_support_uswc(u64 bo_flags);
343 
344 
345 #endif
346