1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include "amdgpu.h" 25 #define MAX_KIQ_REG_WAIT 100000 26 27 int amdgpu_allocate_static_csa(struct amdgpu_device *adev) 28 { 29 int r; 30 void *ptr; 31 32 r = amdgpu_bo_create_kernel(adev, AMDGPU_CSA_SIZE, PAGE_SIZE, 33 AMDGPU_GEM_DOMAIN_VRAM, &adev->virt.csa_obj, 34 &adev->virt.csa_vmid0_addr, &ptr); 35 if (r) 36 return r; 37 38 memset(ptr, 0, AMDGPU_CSA_SIZE); 39 return 0; 40 } 41 42 /* 43 * amdgpu_map_static_csa should be called during amdgpu_vm_init 44 * it maps virtual address "AMDGPU_VA_RESERVED_SIZE - AMDGPU_CSA_SIZE" 45 * to this VM, and each command submission of GFX should use this virtual 46 * address within META_DATA init package to support SRIOV gfx preemption. 47 */ 48 49 int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm) 50 { 51 int r; 52 struct amdgpu_bo_va *bo_va; 53 struct ww_acquire_ctx ticket; 54 struct list_head list; 55 struct amdgpu_bo_list_entry pd; 56 struct ttm_validate_buffer csa_tv; 57 58 INIT_LIST_HEAD(&list); 59 INIT_LIST_HEAD(&csa_tv.head); 60 csa_tv.bo = &adev->virt.csa_obj->tbo; 61 csa_tv.shared = true; 62 63 list_add(&csa_tv.head, &list); 64 amdgpu_vm_get_pd_bo(vm, &list, &pd); 65 66 r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL); 67 if (r) { 68 DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r); 69 return r; 70 } 71 72 bo_va = amdgpu_vm_bo_add(adev, vm, adev->virt.csa_obj); 73 if (!bo_va) { 74 ttm_eu_backoff_reservation(&ticket, &list); 75 DRM_ERROR("failed to create bo_va for static CSA\n"); 76 return -ENOMEM; 77 } 78 79 r = amdgpu_vm_alloc_pts(adev, bo_va->vm, AMDGPU_CSA_VADDR, 80 AMDGPU_CSA_SIZE); 81 if (r) { 82 DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r); 83 amdgpu_vm_bo_rmv(adev, bo_va); 84 ttm_eu_backoff_reservation(&ticket, &list); 85 return r; 86 } 87 88 r = amdgpu_vm_bo_map(adev, bo_va, AMDGPU_CSA_VADDR, 0,AMDGPU_CSA_SIZE, 89 AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE | 90 AMDGPU_PTE_EXECUTABLE); 91 92 if (r) { 93 DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r); 94 amdgpu_vm_bo_rmv(adev, bo_va); 95 ttm_eu_backoff_reservation(&ticket, &list); 96 return r; 97 } 98 99 vm->csa_bo_va = bo_va; 100 ttm_eu_backoff_reservation(&ticket, &list); 101 return 0; 102 } 103 104 void amdgpu_virt_init_setting(struct amdgpu_device *adev) 105 { 106 /* enable virtual display */ 107 adev->mode_info.num_crtc = 1; 108 adev->enable_virtual_display = true; 109 adev->cg_flags = 0; 110 adev->pg_flags = 0; 111 112 mutex_init(&adev->virt.lock_reset); 113 } 114 115 uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg) 116 { 117 signed long r; 118 uint32_t val; 119 struct dma_fence *f; 120 struct amdgpu_kiq *kiq = &adev->gfx.kiq; 121 struct amdgpu_ring *ring = &kiq->ring; 122 123 BUG_ON(!ring->funcs->emit_rreg); 124 125 mutex_lock(&kiq->ring_mutex); 126 amdgpu_ring_alloc(ring, 32); 127 amdgpu_ring_emit_rreg(ring, reg); 128 amdgpu_fence_emit(ring, &f); 129 amdgpu_ring_commit(ring); 130 mutex_unlock(&kiq->ring_mutex); 131 132 r = dma_fence_wait_timeout(f, false, msecs_to_jiffies(MAX_KIQ_REG_WAIT)); 133 dma_fence_put(f); 134 if (r < 1) { 135 DRM_ERROR("wait for kiq fence error: %ld.\n", r); 136 return ~0; 137 } 138 139 val = adev->wb.wb[adev->virt.reg_val_offs]; 140 141 return val; 142 } 143 144 void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v) 145 { 146 signed long r; 147 struct dma_fence *f; 148 struct amdgpu_kiq *kiq = &adev->gfx.kiq; 149 struct amdgpu_ring *ring = &kiq->ring; 150 151 BUG_ON(!ring->funcs->emit_wreg); 152 153 mutex_lock(&kiq->ring_mutex); 154 amdgpu_ring_alloc(ring, 32); 155 amdgpu_ring_emit_wreg(ring, reg, v); 156 amdgpu_fence_emit(ring, &f); 157 amdgpu_ring_commit(ring); 158 mutex_unlock(&kiq->ring_mutex); 159 160 r = dma_fence_wait_timeout(f, false, msecs_to_jiffies(MAX_KIQ_REG_WAIT)); 161 if (r < 1) 162 DRM_ERROR("wait for kiq fence error: %ld.\n", r); 163 dma_fence_put(f); 164 } 165 166 /** 167 * amdgpu_virt_request_full_gpu() - request full gpu access 168 * @amdgpu: amdgpu device. 169 * @init: is driver init time. 170 * When start to init/fini driver, first need to request full gpu access. 171 * Return: Zero if request success, otherwise will return error. 172 */ 173 int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init) 174 { 175 struct amdgpu_virt *virt = &adev->virt; 176 int r; 177 178 if (virt->ops && virt->ops->req_full_gpu) { 179 r = virt->ops->req_full_gpu(adev, init); 180 if (r) 181 return r; 182 183 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME; 184 } 185 186 return 0; 187 } 188 189 /** 190 * amdgpu_virt_release_full_gpu() - release full gpu access 191 * @amdgpu: amdgpu device. 192 * @init: is driver init time. 193 * When finishing driver init/fini, need to release full gpu access. 194 * Return: Zero if release success, otherwise will returen error. 195 */ 196 int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init) 197 { 198 struct amdgpu_virt *virt = &adev->virt; 199 int r; 200 201 if (virt->ops && virt->ops->rel_full_gpu) { 202 r = virt->ops->rel_full_gpu(adev, init); 203 if (r) 204 return r; 205 206 adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME; 207 } 208 return 0; 209 } 210 211 /** 212 * amdgpu_virt_reset_gpu() - reset gpu 213 * @amdgpu: amdgpu device. 214 * Send reset command to GPU hypervisor to reset GPU that VM is using 215 * Return: Zero if reset success, otherwise will return error. 216 */ 217 int amdgpu_virt_reset_gpu(struct amdgpu_device *adev) 218 { 219 struct amdgpu_virt *virt = &adev->virt; 220 int r; 221 222 if (virt->ops && virt->ops->reset_gpu) { 223 r = virt->ops->reset_gpu(adev); 224 if (r) 225 return r; 226 227 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME; 228 } 229 230 return 0; 231 } 232 233 /** 234 * amdgpu_virt_alloc_mm_table() - alloc memory for mm table 235 * @amdgpu: amdgpu device. 236 * MM table is used by UVD and VCE for its initialization 237 * Return: Zero if allocate success. 238 */ 239 int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev) 240 { 241 int r; 242 243 if (!amdgpu_sriov_vf(adev) || adev->virt.mm_table.gpu_addr) 244 return 0; 245 246 r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE, 247 AMDGPU_GEM_DOMAIN_VRAM, 248 &adev->virt.mm_table.bo, 249 &adev->virt.mm_table.gpu_addr, 250 (void *)&adev->virt.mm_table.cpu_addr); 251 if (r) { 252 DRM_ERROR("failed to alloc mm table and error = %d.\n", r); 253 return r; 254 } 255 256 memset((void *)adev->virt.mm_table.cpu_addr, 0, PAGE_SIZE); 257 DRM_INFO("MM table gpu addr = 0x%llx, cpu addr = %p.\n", 258 adev->virt.mm_table.gpu_addr, 259 adev->virt.mm_table.cpu_addr); 260 return 0; 261 } 262 263 /** 264 * amdgpu_virt_free_mm_table() - free mm table memory 265 * @amdgpu: amdgpu device. 266 * Free MM table memory 267 */ 268 void amdgpu_virt_free_mm_table(struct amdgpu_device *adev) 269 { 270 if (!amdgpu_sriov_vf(adev) || !adev->virt.mm_table.gpu_addr) 271 return; 272 273 amdgpu_bo_free_kernel(&adev->virt.mm_table.bo, 274 &adev->virt.mm_table.gpu_addr, 275 (void *)&adev->virt.mm_table.cpu_addr); 276 adev->virt.mm_table.gpu_addr = 0; 277 } 278