1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #ifndef __AMDGPU_OBJECT_H__
29 #define __AMDGPU_OBJECT_H__
30 
31 #include <drm/amdgpu_drm.h>
32 #include "amdgpu.h"
33 
34 #define AMDGPU_BO_INVALID_OFFSET	LONG_MAX
35 
36 /* bo virtual addresses in a vm */
37 struct amdgpu_bo_va_mapping {
38 	struct amdgpu_bo_va		*bo_va;
39 	struct list_head		list;
40 	struct rb_node			rb;
41 	uint64_t			start;
42 	uint64_t			last;
43 	uint64_t			__subtree_last;
44 	uint64_t			offset;
45 	uint64_t			flags;
46 };
47 
48 /* User space allocated BO in a VM */
49 struct amdgpu_bo_va {
50 	struct amdgpu_vm_bo_base	base;
51 
52 	/* protected by bo being reserved */
53 	unsigned			ref_count;
54 
55 	/* all other members protected by the VM PD being reserved */
56 	struct dma_fence	        *last_pt_update;
57 
58 	/* mappings for this bo_va */
59 	struct list_head		invalids;
60 	struct list_head		valids;
61 
62 	/* If the mappings are cleared or filled */
63 	bool				cleared;
64 };
65 
66 struct amdgpu_bo {
67 	/* Protected by tbo.reserved */
68 	u32				preferred_domains;
69 	u32				allowed_domains;
70 	struct ttm_place		placements[AMDGPU_GEM_DOMAIN_MAX + 1];
71 	struct ttm_placement		placement;
72 	struct ttm_buffer_object	tbo;
73 	struct ttm_bo_kmap_obj		kmap;
74 	u64				flags;
75 	unsigned			pin_count;
76 	u64				tiling_flags;
77 	u64				metadata_flags;
78 	void				*metadata;
79 	u32				metadata_size;
80 	unsigned			prime_shared_count;
81 	/* list of all virtual address to which this bo is associated to */
82 	struct list_head		va;
83 	/* Constant after initialization */
84 	struct drm_gem_object		gem_base;
85 	struct amdgpu_bo		*parent;
86 	struct amdgpu_bo		*shadow;
87 
88 	struct ttm_bo_kmap_obj		dma_buf_vmap;
89 	struct amdgpu_mn		*mn;
90 
91 	union {
92 		struct list_head	mn_list;
93 		struct list_head	shadow_list;
94 	};
95 };
96 
97 /**
98  * amdgpu_mem_type_to_domain - return domain corresponding to mem_type
99  * @mem_type:	ttm memory type
100  *
101  * Returns corresponding domain of the ttm mem_type
102  */
103 static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type)
104 {
105 	switch (mem_type) {
106 	case TTM_PL_VRAM:
107 		return AMDGPU_GEM_DOMAIN_VRAM;
108 	case TTM_PL_TT:
109 		return AMDGPU_GEM_DOMAIN_GTT;
110 	case TTM_PL_SYSTEM:
111 		return AMDGPU_GEM_DOMAIN_CPU;
112 	case AMDGPU_PL_GDS:
113 		return AMDGPU_GEM_DOMAIN_GDS;
114 	case AMDGPU_PL_GWS:
115 		return AMDGPU_GEM_DOMAIN_GWS;
116 	case AMDGPU_PL_OA:
117 		return AMDGPU_GEM_DOMAIN_OA;
118 	default:
119 		break;
120 	}
121 	return 0;
122 }
123 
124 /**
125  * amdgpu_bo_reserve - reserve bo
126  * @bo:		bo structure
127  * @no_intr:	don't return -ERESTARTSYS on pending signal
128  *
129  * Returns:
130  * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
131  * a signal. Release all buffer reservations and return to user-space.
132  */
133 static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr)
134 {
135 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
136 	int r;
137 
138 	r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL);
139 	if (unlikely(r != 0)) {
140 		if (r != -ERESTARTSYS)
141 			dev_err(adev->dev, "%p reserve failed\n", bo);
142 		return r;
143 	}
144 	return 0;
145 }
146 
147 static inline void amdgpu_bo_unreserve(struct amdgpu_bo *bo)
148 {
149 	ttm_bo_unreserve(&bo->tbo);
150 }
151 
152 static inline unsigned long amdgpu_bo_size(struct amdgpu_bo *bo)
153 {
154 	return bo->tbo.num_pages << PAGE_SHIFT;
155 }
156 
157 static inline unsigned amdgpu_bo_ngpu_pages(struct amdgpu_bo *bo)
158 {
159 	return (bo->tbo.num_pages << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE;
160 }
161 
162 static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo)
163 {
164 	return (bo->tbo.mem.page_alignment << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE;
165 }
166 
167 /**
168  * amdgpu_bo_mmap_offset - return mmap offset of bo
169  * @bo:	amdgpu object for which we query the offset
170  *
171  * Returns mmap offset of the object.
172  */
173 static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo)
174 {
175 	return drm_vma_node_offset_addr(&bo->tbo.vma_node);
176 }
177 
178 /**
179  * amdgpu_bo_gpu_accessible - return whether the bo is currently in memory that
180  * is accessible to the GPU.
181  */
182 static inline bool amdgpu_bo_gpu_accessible(struct amdgpu_bo *bo)
183 {
184 	switch (bo->tbo.mem.mem_type) {
185 	case TTM_PL_TT: return amdgpu_ttm_is_bound(bo->tbo.ttm);
186 	case TTM_PL_VRAM: return true;
187 	default: return false;
188 	}
189 }
190 
191 int amdgpu_bo_create(struct amdgpu_device *adev,
192 			    unsigned long size, int byte_align,
193 			    bool kernel, u32 domain, u64 flags,
194 			    struct sg_table *sg,
195 			    struct reservation_object *resv,
196 			    uint64_t init_value,
197 			    struct amdgpu_bo **bo_ptr);
198 int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
199 			      unsigned long size, int align,
200 			      u32 domain, struct amdgpu_bo **bo_ptr,
201 			      u64 *gpu_addr, void **cpu_addr);
202 int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
203 			    unsigned long size, int align,
204 			    u32 domain, struct amdgpu_bo **bo_ptr,
205 			    u64 *gpu_addr, void **cpu_addr);
206 void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
207 			   void **cpu_addr);
208 int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
209 void *amdgpu_bo_kptr(struct amdgpu_bo *bo);
210 void amdgpu_bo_kunmap(struct amdgpu_bo *bo);
211 struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo);
212 void amdgpu_bo_unref(struct amdgpu_bo **bo);
213 int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr);
214 int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
215 			     u64 min_offset, u64 max_offset,
216 			     u64 *gpu_addr);
217 int amdgpu_bo_unpin(struct amdgpu_bo *bo);
218 int amdgpu_bo_evict_vram(struct amdgpu_device *adev);
219 int amdgpu_bo_init(struct amdgpu_device *adev);
220 void amdgpu_bo_fini(struct amdgpu_device *adev);
221 int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
222 				struct vm_area_struct *vma);
223 int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags);
224 void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags);
225 int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
226 			    uint32_t metadata_size, uint64_t flags);
227 int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
228 			   size_t buffer_size, uint32_t *metadata_size,
229 			   uint64_t *flags);
230 void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
231 			   bool evict,
232 			   struct ttm_mem_reg *new_mem);
233 int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
234 void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
235 		     bool shared);
236 u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
237 int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
238 			       struct amdgpu_ring *ring,
239 			       struct amdgpu_bo *bo,
240 			       struct reservation_object *resv,
241 			       struct dma_fence **fence, bool direct);
242 int amdgpu_bo_validate(struct amdgpu_bo *bo);
243 int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
244 				  struct amdgpu_ring *ring,
245 				  struct amdgpu_bo *bo,
246 				  struct reservation_object *resv,
247 				  struct dma_fence **fence,
248 				  bool direct);
249 
250 
251 /*
252  * sub allocation
253  */
254 
255 static inline uint64_t amdgpu_sa_bo_gpu_addr(struct amdgpu_sa_bo *sa_bo)
256 {
257 	return sa_bo->manager->gpu_addr + sa_bo->soffset;
258 }
259 
260 static inline void * amdgpu_sa_bo_cpu_addr(struct amdgpu_sa_bo *sa_bo)
261 {
262 	return sa_bo->manager->cpu_ptr + sa_bo->soffset;
263 }
264 
265 int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
266 				     struct amdgpu_sa_manager *sa_manager,
267 				     unsigned size, u32 align, u32 domain);
268 void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
269 				      struct amdgpu_sa_manager *sa_manager);
270 int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
271 				      struct amdgpu_sa_manager *sa_manager);
272 int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev,
273 					struct amdgpu_sa_manager *sa_manager);
274 int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
275 		     struct amdgpu_sa_bo **sa_bo,
276 		     unsigned size, unsigned align);
277 void amdgpu_sa_bo_free(struct amdgpu_device *adev,
278 			      struct amdgpu_sa_bo **sa_bo,
279 			      struct dma_fence *fence);
280 #if defined(CONFIG_DEBUG_FS)
281 void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
282 					 struct seq_file *m);
283 #endif
284 
285 
286 #endif
287