1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #ifndef __AMDGPU_TTM_H__ 25 #define __AMDGPU_TTM_H__ 26 27 #include <linux/dma-direction.h> 28 #include <drm/gpu_scheduler.h> 29 #include "amdgpu_vram_mgr.h" 30 #include "amdgpu.h" 31 32 #define AMDGPU_PL_GDS (TTM_PL_PRIV + 0) 33 #define AMDGPU_PL_GWS (TTM_PL_PRIV + 1) 34 #define AMDGPU_PL_OA (TTM_PL_PRIV + 2) 35 #define AMDGPU_PL_PREEMPT (TTM_PL_PRIV + 3) 36 37 #define AMDGPU_GTT_MAX_TRANSFER_SIZE 512 38 #define AMDGPU_GTT_NUM_TRANSFER_WINDOWS 2 39 40 #define AMDGPU_POISON 0xd0bed0be 41 42 struct hmm_range; 43 44 struct amdgpu_gtt_mgr { 45 struct ttm_resource_manager manager; 46 struct drm_mm mm; 47 spinlock_t lock; 48 }; 49 50 struct amdgpu_mman { 51 struct ttm_device bdev; 52 struct ttm_pool *ttm_pools; 53 bool initialized; 54 void __iomem *aper_base_kaddr; 55 56 /* buffer handling */ 57 const struct amdgpu_buffer_funcs *buffer_funcs; 58 struct amdgpu_ring *buffer_funcs_ring; 59 bool buffer_funcs_enabled; 60 61 struct mutex gtt_window_lock; 62 /* Scheduler entity for buffer moves */ 63 struct drm_sched_entity entity; 64 65 struct amdgpu_vram_mgr vram_mgr; 66 struct amdgpu_gtt_mgr gtt_mgr; 67 struct ttm_resource_manager preempt_mgr; 68 69 uint64_t stolen_vga_size; 70 struct amdgpu_bo *stolen_vga_memory; 71 uint64_t stolen_extended_size; 72 struct amdgpu_bo *stolen_extended_memory; 73 bool keep_stolen_vga_memory; 74 75 struct amdgpu_bo *stolen_reserved_memory; 76 uint64_t stolen_reserved_offset; 77 uint64_t stolen_reserved_size; 78 79 /* discovery */ 80 uint8_t *discovery_bin; 81 uint32_t discovery_tmr_size; 82 /* fw reserved memory */ 83 struct amdgpu_bo *fw_reserved_memory; 84 85 /* firmware VRAM reservation */ 86 u64 fw_vram_usage_start_offset; 87 u64 fw_vram_usage_size; 88 struct amdgpu_bo *fw_vram_usage_reserved_bo; 89 void *fw_vram_usage_va; 90 91 /* driver VRAM reservation */ 92 u64 drv_vram_usage_start_offset; 93 u64 drv_vram_usage_size; 94 struct amdgpu_bo *drv_vram_usage_reserved_bo; 95 void *drv_vram_usage_va; 96 97 /* PAGE_SIZE'd BO for process memory r/w over SDMA. */ 98 struct amdgpu_bo *sdma_access_bo; 99 void *sdma_access_ptr; 100 }; 101 102 struct amdgpu_copy_mem { 103 struct ttm_buffer_object *bo; 104 struct ttm_resource *mem; 105 unsigned long offset; 106 }; 107 108 int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, uint64_t gtt_size); 109 void amdgpu_gtt_mgr_fini(struct amdgpu_device *adev); 110 int amdgpu_preempt_mgr_init(struct amdgpu_device *adev); 111 void amdgpu_preempt_mgr_fini(struct amdgpu_device *adev); 112 int amdgpu_vram_mgr_init(struct amdgpu_device *adev); 113 void amdgpu_vram_mgr_fini(struct amdgpu_device *adev); 114 115 bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *mem); 116 void amdgpu_gtt_mgr_recover(struct amdgpu_gtt_mgr *mgr); 117 118 uint64_t amdgpu_preempt_mgr_usage(struct ttm_resource_manager *man); 119 120 u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo); 121 int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev, 122 struct ttm_resource *mem, 123 u64 offset, u64 size, 124 struct device *dev, 125 enum dma_data_direction dir, 126 struct sg_table **sgt); 127 void amdgpu_vram_mgr_free_sgt(struct device *dev, 128 enum dma_data_direction dir, 129 struct sg_table *sgt); 130 uint64_t amdgpu_vram_mgr_vis_usage(struct amdgpu_vram_mgr *mgr); 131 int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr, 132 uint64_t start, uint64_t size); 133 int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr, 134 uint64_t start); 135 136 int amdgpu_ttm_init(struct amdgpu_device *adev); 137 void amdgpu_ttm_fini(struct amdgpu_device *adev); 138 void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, 139 bool enable); 140 141 int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, 142 uint64_t dst_offset, uint32_t byte_count, 143 struct dma_resv *resv, 144 struct dma_fence **fence, bool direct_submit, 145 bool vm_needs_flush, bool tmz); 146 int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, 147 const struct amdgpu_copy_mem *src, 148 const struct amdgpu_copy_mem *dst, 149 uint64_t size, bool tmz, 150 struct dma_resv *resv, 151 struct dma_fence **f); 152 int amdgpu_fill_buffer(struct amdgpu_bo *bo, 153 uint32_t src_data, 154 struct dma_resv *resv, 155 struct dma_fence **fence); 156 157 int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo); 158 void amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo); 159 uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type); 160 161 #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR) 162 int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages, 163 struct hmm_range **range); 164 void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm, 165 struct hmm_range *range); 166 bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm, 167 struct hmm_range *range); 168 #else 169 static inline int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, 170 struct page **pages, 171 struct hmm_range **range) 172 { 173 return -EPERM; 174 } 175 static inline void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm, 176 struct hmm_range *range) 177 { 178 } 179 static inline bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm, 180 struct hmm_range *range) 181 { 182 return false; 183 } 184 #endif 185 186 void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages); 187 int amdgpu_ttm_tt_get_userptr(const struct ttm_buffer_object *tbo, 188 uint64_t *user_addr); 189 int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo, 190 uint64_t addr, uint32_t flags); 191 bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm); 192 struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm); 193 bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, 194 unsigned long end, unsigned long *userptr); 195 bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm, 196 int *last_invalidated); 197 bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm); 198 bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm); 199 uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem); 200 uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, 201 struct ttm_resource *mem); 202 int amdgpu_ttm_evict_resources(struct amdgpu_device *adev, int mem_type); 203 204 void amdgpu_ttm_debugfs_init(struct amdgpu_device *adev); 205 206 #endif 207