1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include "amdgpu.h" 25 #define MAX_KIQ_REG_WAIT 100000000 /* in usecs */ 26 27 int amdgpu_allocate_static_csa(struct amdgpu_device *adev) 28 { 29 int r; 30 void *ptr; 31 32 r = amdgpu_bo_create_kernel(adev, AMDGPU_CSA_SIZE, PAGE_SIZE, 33 AMDGPU_GEM_DOMAIN_VRAM, &adev->virt.csa_obj, 34 &adev->virt.csa_vmid0_addr, &ptr); 35 if (r) 36 return r; 37 38 memset(ptr, 0, AMDGPU_CSA_SIZE); 39 return 0; 40 } 41 42 /* 43 * amdgpu_map_static_csa should be called during amdgpu_vm_init 44 * it maps virtual address "AMDGPU_VA_RESERVED_SIZE - AMDGPU_CSA_SIZE" 45 * to this VM, and each command submission of GFX should use this virtual 46 * address within META_DATA init package to support SRIOV gfx preemption. 47 */ 48 49 int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, 50 struct amdgpu_bo_va **bo_va) 51 { 52 struct ww_acquire_ctx ticket; 53 struct list_head list; 54 struct amdgpu_bo_list_entry pd; 55 struct ttm_validate_buffer csa_tv; 56 int r; 57 58 INIT_LIST_HEAD(&list); 59 INIT_LIST_HEAD(&csa_tv.head); 60 csa_tv.bo = &adev->virt.csa_obj->tbo; 61 csa_tv.shared = true; 62 63 list_add(&csa_tv.head, &list); 64 amdgpu_vm_get_pd_bo(vm, &list, &pd); 65 66 r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL); 67 if (r) { 68 DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r); 69 return r; 70 } 71 72 *bo_va = amdgpu_vm_bo_add(adev, vm, adev->virt.csa_obj); 73 if (!*bo_va) { 74 ttm_eu_backoff_reservation(&ticket, &list); 75 DRM_ERROR("failed to create bo_va for static CSA\n"); 76 return -ENOMEM; 77 } 78 79 r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, AMDGPU_CSA_VADDR, 80 AMDGPU_CSA_SIZE); 81 if (r) { 82 DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r); 83 amdgpu_vm_bo_rmv(adev, *bo_va); 84 ttm_eu_backoff_reservation(&ticket, &list); 85 return r; 86 } 87 88 r = amdgpu_vm_bo_map(adev, *bo_va, AMDGPU_CSA_VADDR, 0, AMDGPU_CSA_SIZE, 89 AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE | 90 AMDGPU_PTE_EXECUTABLE); 91 92 if (r) { 93 DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r); 94 amdgpu_vm_bo_rmv(adev, *bo_va); 95 ttm_eu_backoff_reservation(&ticket, &list); 96 return r; 97 } 98 99 ttm_eu_backoff_reservation(&ticket, &list); 100 return 0; 101 } 102 103 void amdgpu_virt_init_setting(struct amdgpu_device *adev) 104 { 105 /* enable virtual display */ 106 adev->mode_info.num_crtc = 1; 107 adev->enable_virtual_display = true; 108 adev->cg_flags = 0; 109 adev->pg_flags = 0; 110 111 mutex_init(&adev->virt.lock_reset); 112 } 113 114 uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg) 115 { 116 signed long r; 117 unsigned long flags; 118 uint32_t val, seq; 119 struct amdgpu_kiq *kiq = &adev->gfx.kiq; 120 struct amdgpu_ring *ring = &kiq->ring; 121 122 BUG_ON(!ring->funcs->emit_rreg); 123 124 spin_lock_irqsave(&kiq->ring_lock, flags); 125 amdgpu_ring_alloc(ring, 32); 126 amdgpu_ring_emit_rreg(ring, reg); 127 amdgpu_fence_emit_polling(ring, &seq); 128 amdgpu_ring_commit(ring); 129 spin_unlock_irqrestore(&kiq->ring_lock, flags); 130 131 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); 132 if (r < 1) { 133 DRM_ERROR("wait for kiq fence error: %ld\n", r); 134 return ~0; 135 } 136 val = adev->wb.wb[adev->virt.reg_val_offs]; 137 138 return val; 139 } 140 141 void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v) 142 { 143 signed long r; 144 unsigned long flags; 145 uint32_t seq; 146 struct amdgpu_kiq *kiq = &adev->gfx.kiq; 147 struct amdgpu_ring *ring = &kiq->ring; 148 149 BUG_ON(!ring->funcs->emit_wreg); 150 151 spin_lock_irqsave(&kiq->ring_lock, flags); 152 amdgpu_ring_alloc(ring, 32); 153 amdgpu_ring_emit_wreg(ring, reg, v); 154 amdgpu_fence_emit_polling(ring, &seq); 155 amdgpu_ring_commit(ring); 156 spin_unlock_irqrestore(&kiq->ring_lock, flags); 157 158 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); 159 if (r < 1) 160 DRM_ERROR("wait for kiq fence error: %ld\n", r); 161 } 162 163 /** 164 * amdgpu_virt_request_full_gpu() - request full gpu access 165 * @amdgpu: amdgpu device. 166 * @init: is driver init time. 167 * When start to init/fini driver, first need to request full gpu access. 168 * Return: Zero if request success, otherwise will return error. 169 */ 170 int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init) 171 { 172 struct amdgpu_virt *virt = &adev->virt; 173 int r; 174 175 if (virt->ops && virt->ops->req_full_gpu) { 176 r = virt->ops->req_full_gpu(adev, init); 177 if (r) 178 return r; 179 180 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME; 181 } 182 183 return 0; 184 } 185 186 /** 187 * amdgpu_virt_release_full_gpu() - release full gpu access 188 * @amdgpu: amdgpu device. 189 * @init: is driver init time. 190 * When finishing driver init/fini, need to release full gpu access. 191 * Return: Zero if release success, otherwise will returen error. 192 */ 193 int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init) 194 { 195 struct amdgpu_virt *virt = &adev->virt; 196 int r; 197 198 if (virt->ops && virt->ops->rel_full_gpu) { 199 r = virt->ops->rel_full_gpu(adev, init); 200 if (r) 201 return r; 202 203 adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME; 204 } 205 return 0; 206 } 207 208 /** 209 * amdgpu_virt_reset_gpu() - reset gpu 210 * @amdgpu: amdgpu device. 211 * Send reset command to GPU hypervisor to reset GPU that VM is using 212 * Return: Zero if reset success, otherwise will return error. 213 */ 214 int amdgpu_virt_reset_gpu(struct amdgpu_device *adev) 215 { 216 struct amdgpu_virt *virt = &adev->virt; 217 int r; 218 219 if (virt->ops && virt->ops->reset_gpu) { 220 r = virt->ops->reset_gpu(adev); 221 if (r) 222 return r; 223 224 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME; 225 } 226 227 return 0; 228 } 229 230 /** 231 * amdgpu_virt_alloc_mm_table() - alloc memory for mm table 232 * @amdgpu: amdgpu device. 233 * MM table is used by UVD and VCE for its initialization 234 * Return: Zero if allocate success. 235 */ 236 int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev) 237 { 238 int r; 239 240 if (!amdgpu_sriov_vf(adev) || adev->virt.mm_table.gpu_addr) 241 return 0; 242 243 r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE, 244 AMDGPU_GEM_DOMAIN_VRAM, 245 &adev->virt.mm_table.bo, 246 &adev->virt.mm_table.gpu_addr, 247 (void *)&adev->virt.mm_table.cpu_addr); 248 if (r) { 249 DRM_ERROR("failed to alloc mm table and error = %d.\n", r); 250 return r; 251 } 252 253 memset((void *)adev->virt.mm_table.cpu_addr, 0, PAGE_SIZE); 254 DRM_INFO("MM table gpu addr = 0x%llx, cpu addr = %p.\n", 255 adev->virt.mm_table.gpu_addr, 256 adev->virt.mm_table.cpu_addr); 257 return 0; 258 } 259 260 /** 261 * amdgpu_virt_free_mm_table() - free mm table memory 262 * @amdgpu: amdgpu device. 263 * Free MM table memory 264 */ 265 void amdgpu_virt_free_mm_table(struct amdgpu_device *adev) 266 { 267 if (!amdgpu_sriov_vf(adev) || !adev->virt.mm_table.gpu_addr) 268 return; 269 270 amdgpu_bo_free_kernel(&adev->virt.mm_table.bo, 271 &adev->virt.mm_table.gpu_addr, 272 (void *)&adev->virt.mm_table.cpu_addr); 273 adev->virt.mm_table.gpu_addr = 0; 274 } 275 276 277 int amdgpu_virt_fw_reserve_get_checksum(void *obj, 278 unsigned long obj_size, 279 unsigned int key, 280 unsigned int chksum) 281 { 282 unsigned int ret = key; 283 unsigned long i = 0; 284 unsigned char *pos; 285 286 pos = (char *)obj; 287 /* calculate checksum */ 288 for (i = 0; i < obj_size; ++i) 289 ret += *(pos + i); 290 /* minus the chksum itself */ 291 pos = (char *)&chksum; 292 for (i = 0; i < sizeof(chksum); ++i) 293 ret -= *(pos + i); 294 return ret; 295 } 296 297 void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev) 298 { 299 uint32_t pf2vf_ver = 0; 300 uint32_t pf2vf_size = 0; 301 uint32_t checksum = 0; 302 uint32_t checkval; 303 char *str; 304 305 adev->virt.fw_reserve.p_pf2vf = NULL; 306 adev->virt.fw_reserve.p_vf2pf = NULL; 307 308 if (adev->fw_vram_usage.va != NULL) { 309 adev->virt.fw_reserve.p_pf2vf = 310 (struct amdgim_pf2vf_info_header *)( 311 adev->fw_vram_usage.va + AMDGIM_DATAEXCHANGE_OFFSET); 312 pf2vf_ver = adev->virt.fw_reserve.p_pf2vf->version; 313 AMDGPU_FW_VRAM_PF2VF_READ(adev, header.size, &pf2vf_size); 314 AMDGPU_FW_VRAM_PF2VF_READ(adev, checksum, &checksum); 315 316 /* pf2vf message must be in 4K */ 317 if (pf2vf_size > 0 && pf2vf_size < 4096) { 318 checkval = amdgpu_virt_fw_reserve_get_checksum( 319 adev->virt.fw_reserve.p_pf2vf, pf2vf_size, 320 adev->virt.fw_reserve.checksum_key, checksum); 321 if (checkval == checksum) { 322 adev->virt.fw_reserve.p_vf2pf = 323 ((void *)adev->virt.fw_reserve.p_pf2vf + 324 pf2vf_size); 325 memset((void *)adev->virt.fw_reserve.p_vf2pf, 0, 326 sizeof(amdgim_vf2pf_info)); 327 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.version, 328 AMDGPU_FW_VRAM_VF2PF_VER); 329 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.size, 330 sizeof(amdgim_vf2pf_info)); 331 AMDGPU_FW_VRAM_VF2PF_READ(adev, driver_version, 332 &str); 333 #ifdef MODULE 334 if (THIS_MODULE->version != NULL) 335 strcpy(str, THIS_MODULE->version); 336 else 337 #endif 338 strcpy(str, "N/A"); 339 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, driver_cert, 340 0); 341 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, checksum, 342 amdgpu_virt_fw_reserve_get_checksum( 343 adev->virt.fw_reserve.p_vf2pf, 344 pf2vf_size, 345 adev->virt.fw_reserve.checksum_key, 0)); 346 } 347 } 348 } 349 } 350 351 352