1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include "amdgpu.h" 25 26 int amdgpu_allocate_static_csa(struct amdgpu_device *adev) 27 { 28 int r; 29 void *ptr; 30 31 r = amdgpu_bo_create_kernel(adev, AMDGPU_CSA_SIZE, PAGE_SIZE, 32 AMDGPU_GEM_DOMAIN_VRAM, &adev->virt.csa_obj, 33 &adev->virt.csa_vmid0_addr, &ptr); 34 if (r) 35 return r; 36 37 memset(ptr, 0, AMDGPU_CSA_SIZE); 38 return 0; 39 } 40 41 /* 42 * amdgpu_map_static_csa should be called during amdgpu_vm_init 43 * it maps virtual address "AMDGPU_VA_RESERVED_SIZE - AMDGPU_CSA_SIZE" 44 * to this VM, and each command submission of GFX should use this virtual 45 * address within META_DATA init package to support SRIOV gfx preemption. 46 */ 47 48 int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm) 49 { 50 int r; 51 struct amdgpu_bo_va *bo_va; 52 struct ww_acquire_ctx ticket; 53 struct list_head list; 54 struct amdgpu_bo_list_entry pd; 55 struct ttm_validate_buffer csa_tv; 56 57 INIT_LIST_HEAD(&list); 58 INIT_LIST_HEAD(&csa_tv.head); 59 csa_tv.bo = &adev->virt.csa_obj->tbo; 60 csa_tv.shared = true; 61 62 list_add(&csa_tv.head, &list); 63 amdgpu_vm_get_pd_bo(vm, &list, &pd); 64 65 r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL); 66 if (r) { 67 DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r); 68 return r; 69 } 70 71 bo_va = amdgpu_vm_bo_add(adev, vm, adev->virt.csa_obj); 72 if (!bo_va) { 73 ttm_eu_backoff_reservation(&ticket, &list); 74 DRM_ERROR("failed to create bo_va for static CSA\n"); 75 return -ENOMEM; 76 } 77 78 r = amdgpu_vm_bo_map(adev, bo_va, AMDGPU_CSA_VADDR, 0,AMDGPU_CSA_SIZE, 79 AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE | 80 AMDGPU_PTE_EXECUTABLE); 81 82 if (r) { 83 DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r); 84 amdgpu_vm_bo_rmv(adev, bo_va); 85 ttm_eu_backoff_reservation(&ticket, &list); 86 return r; 87 } 88 89 vm->csa_bo_va = bo_va; 90 ttm_eu_backoff_reservation(&ticket, &list); 91 return 0; 92 } 93 94 void amdgpu_virt_init_setting(struct amdgpu_device *adev) 95 { 96 /* enable virtual display */ 97 adev->mode_info.num_crtc = 1; 98 adev->enable_virtual_display = true; 99 100 mutex_init(&adev->virt.lock_kiq); 101 mutex_init(&adev->virt.lock_reset); 102 } 103 104 uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg) 105 { 106 signed long r; 107 uint32_t val; 108 struct dma_fence *f; 109 struct amdgpu_kiq *kiq = &adev->gfx.kiq; 110 struct amdgpu_ring *ring = &kiq->ring; 111 112 BUG_ON(!ring->funcs->emit_rreg); 113 114 mutex_lock(&adev->virt.lock_kiq); 115 amdgpu_ring_alloc(ring, 32); 116 amdgpu_ring_emit_hdp_flush(ring); 117 amdgpu_ring_emit_rreg(ring, reg); 118 amdgpu_ring_emit_hdp_invalidate(ring); 119 amdgpu_fence_emit(ring, &f); 120 amdgpu_ring_commit(ring); 121 mutex_unlock(&adev->virt.lock_kiq); 122 123 r = dma_fence_wait(f, false); 124 if (r) 125 DRM_ERROR("wait for kiq fence error: %ld.\n", r); 126 dma_fence_put(f); 127 128 val = adev->wb.wb[adev->virt.reg_val_offs]; 129 130 return val; 131 } 132 133 void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v) 134 { 135 signed long r; 136 struct dma_fence *f; 137 struct amdgpu_kiq *kiq = &adev->gfx.kiq; 138 struct amdgpu_ring *ring = &kiq->ring; 139 140 BUG_ON(!ring->funcs->emit_wreg); 141 142 mutex_lock(&adev->virt.lock_kiq); 143 amdgpu_ring_alloc(ring, 32); 144 amdgpu_ring_emit_hdp_flush(ring); 145 amdgpu_ring_emit_wreg(ring, reg, v); 146 amdgpu_ring_emit_hdp_invalidate(ring); 147 amdgpu_fence_emit(ring, &f); 148 amdgpu_ring_commit(ring); 149 mutex_unlock(&adev->virt.lock_kiq); 150 151 r = dma_fence_wait(f, false); 152 if (r) 153 DRM_ERROR("wait for kiq fence error: %ld.\n", r); 154 dma_fence_put(f); 155 } 156 157 /** 158 * amdgpu_virt_request_full_gpu() - request full gpu access 159 * @amdgpu: amdgpu device. 160 * @init: is driver init time. 161 * When start to init/fini driver, first need to request full gpu access. 162 * Return: Zero if request success, otherwise will return error. 163 */ 164 int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init) 165 { 166 struct amdgpu_virt *virt = &adev->virt; 167 int r; 168 169 if (virt->ops && virt->ops->req_full_gpu) { 170 r = virt->ops->req_full_gpu(adev, init); 171 if (r) 172 return r; 173 174 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME; 175 } 176 177 return 0; 178 } 179 180 /** 181 * amdgpu_virt_release_full_gpu() - release full gpu access 182 * @amdgpu: amdgpu device. 183 * @init: is driver init time. 184 * When finishing driver init/fini, need to release full gpu access. 185 * Return: Zero if release success, otherwise will returen error. 186 */ 187 int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init) 188 { 189 struct amdgpu_virt *virt = &adev->virt; 190 int r; 191 192 if (virt->ops && virt->ops->rel_full_gpu) { 193 r = virt->ops->rel_full_gpu(adev, init); 194 if (r) 195 return r; 196 197 adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME; 198 } 199 return 0; 200 } 201 202 /** 203 * amdgpu_virt_reset_gpu() - reset gpu 204 * @amdgpu: amdgpu device. 205 * Send reset command to GPU hypervisor to reset GPU that VM is using 206 * Return: Zero if reset success, otherwise will return error. 207 */ 208 int amdgpu_virt_reset_gpu(struct amdgpu_device *adev) 209 { 210 struct amdgpu_virt *virt = &adev->virt; 211 int r; 212 213 if (virt->ops && virt->ops->reset_gpu) { 214 r = virt->ops->reset_gpu(adev); 215 if (r) 216 return r; 217 218 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME; 219 } 220 221 return 0; 222 } 223