1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #ifndef __AMDGPU_OBJECT_H__
29 #define __AMDGPU_OBJECT_H__
30 
31 #include <drm/amdgpu_drm.h>
32 #include "amdgpu.h"
33 
34 #define AMDGPU_BO_INVALID_OFFSET	LONG_MAX
35 
36 /* bo virtual addresses in a vm */
37 struct amdgpu_bo_va_mapping {
38 	struct amdgpu_bo_va		*bo_va;
39 	struct list_head		list;
40 	struct rb_node			rb;
41 	uint64_t			start;
42 	uint64_t			last;
43 	uint64_t			__subtree_last;
44 	uint64_t			offset;
45 	uint64_t			flags;
46 };
47 
48 /* User space allocated BO in a VM */
49 struct amdgpu_bo_va {
50 	struct amdgpu_vm_bo_base	base;
51 
52 	/* protected by bo being reserved */
53 	unsigned			ref_count;
54 
55 	/* all other members protected by the VM PD being reserved */
56 	struct dma_fence	        *last_pt_update;
57 
58 	/* mappings for this bo_va */
59 	struct list_head		invalids;
60 	struct list_head		valids;
61 
62 	/* If the mappings are cleared or filled */
63 	bool				cleared;
64 };
65 
66 struct amdgpu_bo {
67 	/* Protected by tbo.reserved */
68 	u32				preferred_domains;
69 	u32				allowed_domains;
70 	struct ttm_place		placements[AMDGPU_GEM_DOMAIN_MAX + 1];
71 	struct ttm_placement		placement;
72 	struct ttm_buffer_object	tbo;
73 	struct ttm_bo_kmap_obj		kmap;
74 	u64				flags;
75 	unsigned			pin_count;
76 	u64				tiling_flags;
77 	u64				metadata_flags;
78 	void				*metadata;
79 	u32				metadata_size;
80 	unsigned			prime_shared_count;
81 	/* list of all virtual address to which this bo is associated to */
82 	struct list_head		va;
83 	/* Constant after initialization */
84 	struct drm_gem_object		gem_base;
85 	struct amdgpu_bo		*parent;
86 	struct amdgpu_bo		*shadow;
87 
88 	struct ttm_bo_kmap_obj		dma_buf_vmap;
89 	struct amdgpu_mn		*mn;
90 
91 	union {
92 		struct list_head	mn_list;
93 		struct list_head	shadow_list;
94 	};
95 
96 	struct kgd_mem                  *kfd_bo;
97 };
98 
99 static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo)
100 {
101 	return container_of(tbo, struct amdgpu_bo, tbo);
102 }
103 
104 /**
105  * amdgpu_mem_type_to_domain - return domain corresponding to mem_type
106  * @mem_type:	ttm memory type
107  *
108  * Returns corresponding domain of the ttm mem_type
109  */
110 static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type)
111 {
112 	switch (mem_type) {
113 	case TTM_PL_VRAM:
114 		return AMDGPU_GEM_DOMAIN_VRAM;
115 	case TTM_PL_TT:
116 		return AMDGPU_GEM_DOMAIN_GTT;
117 	case TTM_PL_SYSTEM:
118 		return AMDGPU_GEM_DOMAIN_CPU;
119 	case AMDGPU_PL_GDS:
120 		return AMDGPU_GEM_DOMAIN_GDS;
121 	case AMDGPU_PL_GWS:
122 		return AMDGPU_GEM_DOMAIN_GWS;
123 	case AMDGPU_PL_OA:
124 		return AMDGPU_GEM_DOMAIN_OA;
125 	default:
126 		break;
127 	}
128 	return 0;
129 }
130 
131 /**
132  * amdgpu_bo_reserve - reserve bo
133  * @bo:		bo structure
134  * @no_intr:	don't return -ERESTARTSYS on pending signal
135  *
136  * Returns:
137  * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
138  * a signal. Release all buffer reservations and return to user-space.
139  */
140 static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr)
141 {
142 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
143 	int r;
144 
145 	r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL);
146 	if (unlikely(r != 0)) {
147 		if (r != -ERESTARTSYS)
148 			dev_err(adev->dev, "%p reserve failed\n", bo);
149 		return r;
150 	}
151 	return 0;
152 }
153 
154 static inline void amdgpu_bo_unreserve(struct amdgpu_bo *bo)
155 {
156 	ttm_bo_unreserve(&bo->tbo);
157 }
158 
159 static inline unsigned long amdgpu_bo_size(struct amdgpu_bo *bo)
160 {
161 	return bo->tbo.num_pages << PAGE_SHIFT;
162 }
163 
164 static inline unsigned amdgpu_bo_ngpu_pages(struct amdgpu_bo *bo)
165 {
166 	return (bo->tbo.num_pages << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE;
167 }
168 
169 static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo)
170 {
171 	return (bo->tbo.mem.page_alignment << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE;
172 }
173 
174 /**
175  * amdgpu_bo_mmap_offset - return mmap offset of bo
176  * @bo:	amdgpu object for which we query the offset
177  *
178  * Returns mmap offset of the object.
179  */
180 static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo)
181 {
182 	return drm_vma_node_offset_addr(&bo->tbo.vma_node);
183 }
184 
185 /**
186  * amdgpu_bo_gpu_accessible - return whether the bo is currently in memory that
187  * is accessible to the GPU.
188  */
189 static inline bool amdgpu_bo_gpu_accessible(struct amdgpu_bo *bo)
190 {
191 	switch (bo->tbo.mem.mem_type) {
192 	case TTM_PL_TT: return amdgpu_gtt_mgr_has_gart_addr(&bo->tbo.mem);
193 	case TTM_PL_VRAM: return true;
194 	default: return false;
195 	}
196 }
197 
198 /**
199  * amdgpu_bo_explicit_sync - return whether the bo is explicitly synced
200  */
201 static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo)
202 {
203 	return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC;
204 }
205 
206 int amdgpu_bo_create(struct amdgpu_device *adev, unsigned long size,
207 		     int byte_align, u32 domain,
208 		     u64 flags, enum ttm_bo_type type,
209 		     struct reservation_object *resv,
210 		     struct amdgpu_bo **bo_ptr);
211 int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
212 			      unsigned long size, int align,
213 			      u32 domain, struct amdgpu_bo **bo_ptr,
214 			      u64 *gpu_addr, void **cpu_addr);
215 int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
216 			    unsigned long size, int align,
217 			    u32 domain, struct amdgpu_bo **bo_ptr,
218 			    u64 *gpu_addr, void **cpu_addr);
219 void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
220 			   void **cpu_addr);
221 int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
222 void *amdgpu_bo_kptr(struct amdgpu_bo *bo);
223 void amdgpu_bo_kunmap(struct amdgpu_bo *bo);
224 struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo);
225 void amdgpu_bo_unref(struct amdgpu_bo **bo);
226 int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr);
227 int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
228 			     u64 min_offset, u64 max_offset,
229 			     u64 *gpu_addr);
230 int amdgpu_bo_unpin(struct amdgpu_bo *bo);
231 int amdgpu_bo_evict_vram(struct amdgpu_device *adev);
232 int amdgpu_bo_init(struct amdgpu_device *adev);
233 void amdgpu_bo_fini(struct amdgpu_device *adev);
234 int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
235 				struct vm_area_struct *vma);
236 int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags);
237 void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags);
238 int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
239 			    uint32_t metadata_size, uint64_t flags);
240 int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
241 			   size_t buffer_size, uint32_t *metadata_size,
242 			   uint64_t *flags);
243 void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
244 			   bool evict,
245 			   struct ttm_mem_reg *new_mem);
246 int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
247 void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
248 		     bool shared);
249 u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
250 int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
251 			       struct amdgpu_ring *ring,
252 			       struct amdgpu_bo *bo,
253 			       struct reservation_object *resv,
254 			       struct dma_fence **fence, bool direct);
255 int amdgpu_bo_validate(struct amdgpu_bo *bo);
256 int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
257 				  struct amdgpu_ring *ring,
258 				  struct amdgpu_bo *bo,
259 				  struct reservation_object *resv,
260 				  struct dma_fence **fence,
261 				  bool direct);
262 
263 
264 /*
265  * sub allocation
266  */
267 
268 static inline uint64_t amdgpu_sa_bo_gpu_addr(struct amdgpu_sa_bo *sa_bo)
269 {
270 	return sa_bo->manager->gpu_addr + sa_bo->soffset;
271 }
272 
273 static inline void * amdgpu_sa_bo_cpu_addr(struct amdgpu_sa_bo *sa_bo)
274 {
275 	return sa_bo->manager->cpu_ptr + sa_bo->soffset;
276 }
277 
278 int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
279 				     struct amdgpu_sa_manager *sa_manager,
280 				     unsigned size, u32 align, u32 domain);
281 void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
282 				      struct amdgpu_sa_manager *sa_manager);
283 int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
284 				      struct amdgpu_sa_manager *sa_manager);
285 int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
286 		     struct amdgpu_sa_bo **sa_bo,
287 		     unsigned size, unsigned align);
288 void amdgpu_sa_bo_free(struct amdgpu_device *adev,
289 			      struct amdgpu_sa_bo **sa_bo,
290 			      struct dma_fence *fence);
291 #if defined(CONFIG_DEBUG_FS)
292 void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
293 					 struct seq_file *m);
294 #endif
295 
296 
297 #endif
298