1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #ifndef __AMDGPU_OBJECT_H__
29 #define __AMDGPU_OBJECT_H__
30 
31 #include <drm/amdgpu_drm.h>
32 #include "amdgpu.h"
33 
34 #define AMDGPU_BO_INVALID_OFFSET	LONG_MAX
35 #define AMDGPU_BO_MAX_PLACEMENTS	3
36 
37 struct amdgpu_bo_param {
38 	unsigned long			size;
39 	int				byte_align;
40 	u32				domain;
41 	u32				preferred_domain;
42 	u64				flags;
43 	enum ttm_bo_type		type;
44 	struct reservation_object	*resv;
45 };
46 
47 /* bo virtual addresses in a vm */
48 struct amdgpu_bo_va_mapping {
49 	struct amdgpu_bo_va		*bo_va;
50 	struct list_head		list;
51 	struct rb_node			rb;
52 	uint64_t			start;
53 	uint64_t			last;
54 	uint64_t			__subtree_last;
55 	uint64_t			offset;
56 	uint64_t			flags;
57 };
58 
59 /* User space allocated BO in a VM */
60 struct amdgpu_bo_va {
61 	struct amdgpu_vm_bo_base	base;
62 
63 	/* protected by bo being reserved */
64 	unsigned			ref_count;
65 
66 	/* all other members protected by the VM PD being reserved */
67 	struct dma_fence	        *last_pt_update;
68 
69 	/* mappings for this bo_va */
70 	struct list_head		invalids;
71 	struct list_head		valids;
72 
73 	/* If the mappings are cleared or filled */
74 	bool				cleared;
75 };
76 
77 struct amdgpu_bo {
78 	/* Protected by tbo.reserved */
79 	u32				preferred_domains;
80 	u32				allowed_domains;
81 	struct ttm_place		placements[AMDGPU_BO_MAX_PLACEMENTS];
82 	struct ttm_placement		placement;
83 	struct ttm_buffer_object	tbo;
84 	struct ttm_bo_kmap_obj		kmap;
85 	u64				flags;
86 	unsigned			pin_count;
87 	u64				tiling_flags;
88 	u64				metadata_flags;
89 	void				*metadata;
90 	u32				metadata_size;
91 	unsigned			prime_shared_count;
92 	/* per VM structure for page tables and with virtual addresses */
93 	struct amdgpu_vm_bo_base	*vm_bo;
94 	/* Constant after initialization */
95 	struct drm_gem_object		gem_base;
96 	struct amdgpu_bo		*parent;
97 	struct amdgpu_bo		*shadow;
98 
99 	struct ttm_bo_kmap_obj		dma_buf_vmap;
100 	struct amdgpu_mn		*mn;
101 
102 	union {
103 		struct list_head	mn_list;
104 		struct list_head	shadow_list;
105 	};
106 
107 	struct kgd_mem                  *kfd_bo;
108 };
109 
110 static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo)
111 {
112 	return container_of(tbo, struct amdgpu_bo, tbo);
113 }
114 
115 /**
116  * amdgpu_mem_type_to_domain - return domain corresponding to mem_type
117  * @mem_type:	ttm memory type
118  *
119  * Returns corresponding domain of the ttm mem_type
120  */
121 static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type)
122 {
123 	switch (mem_type) {
124 	case TTM_PL_VRAM:
125 		return AMDGPU_GEM_DOMAIN_VRAM;
126 	case TTM_PL_TT:
127 		return AMDGPU_GEM_DOMAIN_GTT;
128 	case TTM_PL_SYSTEM:
129 		return AMDGPU_GEM_DOMAIN_CPU;
130 	case AMDGPU_PL_GDS:
131 		return AMDGPU_GEM_DOMAIN_GDS;
132 	case AMDGPU_PL_GWS:
133 		return AMDGPU_GEM_DOMAIN_GWS;
134 	case AMDGPU_PL_OA:
135 		return AMDGPU_GEM_DOMAIN_OA;
136 	default:
137 		break;
138 	}
139 	return 0;
140 }
141 
142 /**
143  * amdgpu_bo_reserve - reserve bo
144  * @bo:		bo structure
145  * @no_intr:	don't return -ERESTARTSYS on pending signal
146  *
147  * Returns:
148  * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
149  * a signal. Release all buffer reservations and return to user-space.
150  */
151 static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr)
152 {
153 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
154 	int r;
155 
156 	r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL);
157 	if (unlikely(r != 0)) {
158 		if (r != -ERESTARTSYS)
159 			dev_err(adev->dev, "%p reserve failed\n", bo);
160 		return r;
161 	}
162 	return 0;
163 }
164 
165 static inline void amdgpu_bo_unreserve(struct amdgpu_bo *bo)
166 {
167 	ttm_bo_unreserve(&bo->tbo);
168 }
169 
170 static inline unsigned long amdgpu_bo_size(struct amdgpu_bo *bo)
171 {
172 	return bo->tbo.num_pages << PAGE_SHIFT;
173 }
174 
175 static inline unsigned amdgpu_bo_ngpu_pages(struct amdgpu_bo *bo)
176 {
177 	return (bo->tbo.num_pages << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE;
178 }
179 
180 static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo)
181 {
182 	return (bo->tbo.mem.page_alignment << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE;
183 }
184 
185 /**
186  * amdgpu_bo_mmap_offset - return mmap offset of bo
187  * @bo:	amdgpu object for which we query the offset
188  *
189  * Returns mmap offset of the object.
190  */
191 static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo)
192 {
193 	return drm_vma_node_offset_addr(&bo->tbo.vma_node);
194 }
195 
196 /**
197  * amdgpu_bo_in_cpu_visible_vram - check if BO is (partly) in visible VRAM
198  */
199 static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo)
200 {
201 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
202 	unsigned fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
203 	struct drm_mm_node *node = bo->tbo.mem.mm_node;
204 	unsigned long pages_left;
205 
206 	if (bo->tbo.mem.mem_type != TTM_PL_VRAM)
207 		return false;
208 
209 	for (pages_left = bo->tbo.mem.num_pages; pages_left;
210 	     pages_left -= node->size, node++)
211 		if (node->start < fpfn)
212 			return true;
213 
214 	return false;
215 }
216 
217 /**
218  * amdgpu_bo_explicit_sync - return whether the bo is explicitly synced
219  */
220 static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo)
221 {
222 	return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC;
223 }
224 
225 bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
226 void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
227 
228 int amdgpu_bo_create(struct amdgpu_device *adev,
229 		     struct amdgpu_bo_param *bp,
230 		     struct amdgpu_bo **bo_ptr);
231 int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
232 			      unsigned long size, int align,
233 			      u32 domain, struct amdgpu_bo **bo_ptr,
234 			      u64 *gpu_addr, void **cpu_addr);
235 int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
236 			    unsigned long size, int align,
237 			    u32 domain, struct amdgpu_bo **bo_ptr,
238 			    u64 *gpu_addr, void **cpu_addr);
239 void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
240 			   void **cpu_addr);
241 int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
242 void *amdgpu_bo_kptr(struct amdgpu_bo *bo);
243 void amdgpu_bo_kunmap(struct amdgpu_bo *bo);
244 struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo);
245 void amdgpu_bo_unref(struct amdgpu_bo **bo);
246 int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain);
247 int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
248 			     u64 min_offset, u64 max_offset);
249 int amdgpu_bo_unpin(struct amdgpu_bo *bo);
250 int amdgpu_bo_evict_vram(struct amdgpu_device *adev);
251 int amdgpu_bo_init(struct amdgpu_device *adev);
252 int amdgpu_bo_late_init(struct amdgpu_device *adev);
253 void amdgpu_bo_fini(struct amdgpu_device *adev);
254 int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
255 				struct vm_area_struct *vma);
256 int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags);
257 void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags);
258 int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
259 			    uint32_t metadata_size, uint64_t flags);
260 int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
261 			   size_t buffer_size, uint32_t *metadata_size,
262 			   uint64_t *flags);
263 void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
264 			   bool evict,
265 			   struct ttm_mem_reg *new_mem);
266 int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
267 void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
268 		     bool shared);
269 u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
270 int amdgpu_bo_validate(struct amdgpu_bo *bo);
271 int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow,
272 			     struct dma_fence **fence);
273 uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev,
274 					    uint32_t domain);
275 
276 /*
277  * sub allocation
278  */
279 
280 static inline uint64_t amdgpu_sa_bo_gpu_addr(struct amdgpu_sa_bo *sa_bo)
281 {
282 	return sa_bo->manager->gpu_addr + sa_bo->soffset;
283 }
284 
285 static inline void * amdgpu_sa_bo_cpu_addr(struct amdgpu_sa_bo *sa_bo)
286 {
287 	return sa_bo->manager->cpu_ptr + sa_bo->soffset;
288 }
289 
290 int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
291 				     struct amdgpu_sa_manager *sa_manager,
292 				     unsigned size, u32 align, u32 domain);
293 void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
294 				      struct amdgpu_sa_manager *sa_manager);
295 int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
296 				      struct amdgpu_sa_manager *sa_manager);
297 int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
298 		     struct amdgpu_sa_bo **sa_bo,
299 		     unsigned size, unsigned align);
300 void amdgpu_sa_bo_free(struct amdgpu_device *adev,
301 			      struct amdgpu_sa_bo **sa_bo,
302 			      struct dma_fence *fence);
303 #if defined(CONFIG_DEBUG_FS)
304 void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
305 					 struct seq_file *m);
306 #endif
307 
308 
309 #endif
310