1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #ifndef __AMDGPU_OBJECT_H__ 29 #define __AMDGPU_OBJECT_H__ 30 31 #include <drm/amdgpu_drm.h> 32 #include "amdgpu.h" 33 #ifdef CONFIG_MMU_NOTIFIER 34 #include <linux/mmu_notifier.h> 35 #endif 36 37 #define AMDGPU_BO_INVALID_OFFSET LONG_MAX 38 #define AMDGPU_BO_MAX_PLACEMENTS 3 39 40 struct amdgpu_bo_param { 41 unsigned long size; 42 int byte_align; 43 u32 domain; 44 u32 preferred_domain; 45 u64 flags; 46 enum ttm_bo_type type; 47 bool no_wait_gpu; 48 struct dma_resv *resv; 49 }; 50 51 /* bo virtual addresses in a vm */ 52 struct amdgpu_bo_va_mapping { 53 struct amdgpu_bo_va *bo_va; 54 struct list_head list; 55 struct rb_node rb; 56 uint64_t start; 57 uint64_t last; 58 uint64_t __subtree_last; 59 uint64_t offset; 60 uint64_t flags; 61 }; 62 63 /* User space allocated BO in a VM */ 64 struct amdgpu_bo_va { 65 struct amdgpu_vm_bo_base base; 66 67 /* protected by bo being reserved */ 68 unsigned ref_count; 69 70 /* all other members protected by the VM PD being reserved */ 71 struct dma_fence *last_pt_update; 72 73 /* mappings for this bo_va */ 74 struct list_head invalids; 75 struct list_head valids; 76 77 /* If the mappings are cleared or filled */ 78 bool cleared; 79 80 bool is_xgmi; 81 }; 82 83 struct amdgpu_bo { 84 /* Protected by tbo.reserved */ 85 u32 preferred_domains; 86 u32 allowed_domains; 87 struct ttm_place placements[AMDGPU_BO_MAX_PLACEMENTS]; 88 struct ttm_placement placement; 89 struct ttm_buffer_object tbo; 90 struct ttm_bo_kmap_obj kmap; 91 u64 flags; 92 u64 tiling_flags; 93 u64 metadata_flags; 94 void *metadata; 95 u32 metadata_size; 96 unsigned prime_shared_count; 97 /* per VM structure for page tables and with virtual addresses */ 98 struct amdgpu_vm_bo_base *vm_bo; 99 /* Constant after initialization */ 100 struct amdgpu_bo *parent; 101 struct amdgpu_bo *shadow; 102 103 104 105 #ifdef CONFIG_MMU_NOTIFIER 106 struct mmu_interval_notifier notifier; 107 #endif 108 109 struct list_head shadow_list; 110 111 struct kgd_mem *kfd_bo; 112 }; 113 114 static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo) 115 { 116 return container_of(tbo, struct amdgpu_bo, tbo); 117 } 118 119 /** 120 * amdgpu_mem_type_to_domain - return domain corresponding to mem_type 121 * @mem_type: ttm memory type 122 * 123 * Returns corresponding domain of the ttm mem_type 124 */ 125 static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type) 126 { 127 switch (mem_type) { 128 case TTM_PL_VRAM: 129 return AMDGPU_GEM_DOMAIN_VRAM; 130 case TTM_PL_TT: 131 return AMDGPU_GEM_DOMAIN_GTT; 132 case TTM_PL_SYSTEM: 133 return AMDGPU_GEM_DOMAIN_CPU; 134 case AMDGPU_PL_GDS: 135 return AMDGPU_GEM_DOMAIN_GDS; 136 case AMDGPU_PL_GWS: 137 return AMDGPU_GEM_DOMAIN_GWS; 138 case AMDGPU_PL_OA: 139 return AMDGPU_GEM_DOMAIN_OA; 140 default: 141 break; 142 } 143 return 0; 144 } 145 146 /** 147 * amdgpu_bo_reserve - reserve bo 148 * @bo: bo structure 149 * @no_intr: don't return -ERESTARTSYS on pending signal 150 * 151 * Returns: 152 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by 153 * a signal. Release all buffer reservations and return to user-space. 154 */ 155 static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr) 156 { 157 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 158 int r; 159 160 r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL); 161 if (unlikely(r != 0)) { 162 if (r != -ERESTARTSYS) 163 dev_err(adev->dev, "%p reserve failed\n", bo); 164 return r; 165 } 166 return 0; 167 } 168 169 static inline void amdgpu_bo_unreserve(struct amdgpu_bo *bo) 170 { 171 ttm_bo_unreserve(&bo->tbo); 172 } 173 174 static inline unsigned long amdgpu_bo_size(struct amdgpu_bo *bo) 175 { 176 return bo->tbo.base.size; 177 } 178 179 static inline unsigned amdgpu_bo_ngpu_pages(struct amdgpu_bo *bo) 180 { 181 return bo->tbo.base.size / AMDGPU_GPU_PAGE_SIZE; 182 } 183 184 static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo) 185 { 186 return (bo->tbo.mem.page_alignment << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE; 187 } 188 189 /** 190 * amdgpu_bo_mmap_offset - return mmap offset of bo 191 * @bo: amdgpu object for which we query the offset 192 * 193 * Returns mmap offset of the object. 194 */ 195 static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo) 196 { 197 return drm_vma_node_offset_addr(&bo->tbo.base.vma_node); 198 } 199 200 /** 201 * amdgpu_bo_in_cpu_visible_vram - check if BO is (partly) in visible VRAM 202 */ 203 static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo) 204 { 205 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 206 unsigned fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT; 207 struct drm_mm_node *node = bo->tbo.mem.mm_node; 208 unsigned long pages_left; 209 210 if (bo->tbo.mem.mem_type != TTM_PL_VRAM) 211 return false; 212 213 for (pages_left = bo->tbo.mem.num_pages; pages_left; 214 pages_left -= node->size, node++) 215 if (node->start < fpfn) 216 return true; 217 218 return false; 219 } 220 221 /** 222 * amdgpu_bo_explicit_sync - return whether the bo is explicitly synced 223 */ 224 static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo) 225 { 226 return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC; 227 } 228 229 /** 230 * amdgpu_bo_encrypted - test if the BO is encrypted 231 * @bo: pointer to a buffer object 232 * 233 * Return true if the buffer object is encrypted, false otherwise. 234 */ 235 static inline bool amdgpu_bo_encrypted(struct amdgpu_bo *bo) 236 { 237 return bo->flags & AMDGPU_GEM_CREATE_ENCRYPTED; 238 } 239 240 bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo); 241 void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain); 242 243 int amdgpu_bo_create(struct amdgpu_device *adev, 244 struct amdgpu_bo_param *bp, 245 struct amdgpu_bo **bo_ptr); 246 int amdgpu_bo_create_reserved(struct amdgpu_device *adev, 247 unsigned long size, int align, 248 u32 domain, struct amdgpu_bo **bo_ptr, 249 u64 *gpu_addr, void **cpu_addr); 250 int amdgpu_bo_create_kernel(struct amdgpu_device *adev, 251 unsigned long size, int align, 252 u32 domain, struct amdgpu_bo **bo_ptr, 253 u64 *gpu_addr, void **cpu_addr); 254 int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev, 255 uint64_t offset, uint64_t size, uint32_t domain, 256 struct amdgpu_bo **bo_ptr, void **cpu_addr); 257 void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr, 258 void **cpu_addr); 259 int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr); 260 void *amdgpu_bo_kptr(struct amdgpu_bo *bo); 261 void amdgpu_bo_kunmap(struct amdgpu_bo *bo); 262 struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo); 263 void amdgpu_bo_unref(struct amdgpu_bo **bo); 264 int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain); 265 int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, 266 u64 min_offset, u64 max_offset); 267 void amdgpu_bo_unpin(struct amdgpu_bo *bo); 268 int amdgpu_bo_evict_vram(struct amdgpu_device *adev); 269 int amdgpu_bo_init(struct amdgpu_device *adev); 270 void amdgpu_bo_fini(struct amdgpu_device *adev); 271 int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo, 272 struct vm_area_struct *vma); 273 int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags); 274 void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags); 275 int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata, 276 uint32_t metadata_size, uint64_t flags); 277 int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer, 278 size_t buffer_size, uint32_t *metadata_size, 279 uint64_t *flags); 280 void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, 281 bool evict, 282 struct ttm_resource *new_mem); 283 void amdgpu_bo_release_notify(struct ttm_buffer_object *bo); 284 vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo); 285 void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence, 286 bool shared); 287 int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv, 288 enum amdgpu_sync_mode sync_mode, void *owner, 289 bool intr); 290 int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr); 291 u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo); 292 u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo); 293 int amdgpu_bo_validate(struct amdgpu_bo *bo); 294 int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow, 295 struct dma_fence **fence); 296 uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev, 297 uint32_t domain); 298 299 /* 300 * sub allocation 301 */ 302 303 static inline uint64_t amdgpu_sa_bo_gpu_addr(struct amdgpu_sa_bo *sa_bo) 304 { 305 return sa_bo->manager->gpu_addr + sa_bo->soffset; 306 } 307 308 static inline void * amdgpu_sa_bo_cpu_addr(struct amdgpu_sa_bo *sa_bo) 309 { 310 return sa_bo->manager->cpu_ptr + sa_bo->soffset; 311 } 312 313 int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev, 314 struct amdgpu_sa_manager *sa_manager, 315 unsigned size, u32 align, u32 domain); 316 void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev, 317 struct amdgpu_sa_manager *sa_manager); 318 int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev, 319 struct amdgpu_sa_manager *sa_manager); 320 int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager, 321 struct amdgpu_sa_bo **sa_bo, 322 unsigned size, unsigned align); 323 void amdgpu_sa_bo_free(struct amdgpu_device *adev, 324 struct amdgpu_sa_bo **sa_bo, 325 struct dma_fence *fence); 326 #if defined(CONFIG_DEBUG_FS) 327 void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager, 328 struct seq_file *m); 329 u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m); 330 #endif 331 void amdgpu_debugfs_sa_init(struct amdgpu_device *adev); 332 333 bool amdgpu_bo_support_uswc(u64 bo_flags); 334 335 336 #endif 337