1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include "amdgpu.h" 25 #define MAX_KIQ_REG_WAIT 100000000 /* in usecs */ 26 27 bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev) 28 { 29 /* By now all MMIO pages except mailbox are blocked */ 30 /* if blocking is enabled in hypervisor. Choose the */ 31 /* SCRATCH_REG0 to test. */ 32 return RREG32_NO_KIQ(0xc040) == 0xffffffff; 33 } 34 35 int amdgpu_allocate_static_csa(struct amdgpu_device *adev) 36 { 37 int r; 38 void *ptr; 39 40 r = amdgpu_bo_create_kernel(adev, AMDGPU_CSA_SIZE, PAGE_SIZE, 41 AMDGPU_GEM_DOMAIN_VRAM, &adev->virt.csa_obj, 42 &adev->virt.csa_vmid0_addr, &ptr); 43 if (r) 44 return r; 45 46 memset(ptr, 0, AMDGPU_CSA_SIZE); 47 return 0; 48 } 49 50 void amdgpu_free_static_csa(struct amdgpu_device *adev) { 51 amdgpu_bo_free_kernel(&adev->virt.csa_obj, 52 &adev->virt.csa_vmid0_addr, 53 NULL); 54 } 55 56 /* 57 * amdgpu_map_static_csa should be called during amdgpu_vm_init 58 * it maps virtual address "AMDGPU_VA_RESERVED_SIZE - AMDGPU_CSA_SIZE" 59 * to this VM, and each command submission of GFX should use this virtual 60 * address within META_DATA init package to support SRIOV gfx preemption. 61 */ 62 63 int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, 64 struct amdgpu_bo_va **bo_va) 65 { 66 struct ww_acquire_ctx ticket; 67 struct list_head list; 68 struct amdgpu_bo_list_entry pd; 69 struct ttm_validate_buffer csa_tv; 70 int r; 71 72 INIT_LIST_HEAD(&list); 73 INIT_LIST_HEAD(&csa_tv.head); 74 csa_tv.bo = &adev->virt.csa_obj->tbo; 75 csa_tv.shared = true; 76 77 list_add(&csa_tv.head, &list); 78 amdgpu_vm_get_pd_bo(vm, &list, &pd); 79 80 r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL); 81 if (r) { 82 DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r); 83 return r; 84 } 85 86 *bo_va = amdgpu_vm_bo_add(adev, vm, adev->virt.csa_obj); 87 if (!*bo_va) { 88 ttm_eu_backoff_reservation(&ticket, &list); 89 DRM_ERROR("failed to create bo_va for static CSA\n"); 90 return -ENOMEM; 91 } 92 93 r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, AMDGPU_CSA_VADDR, 94 AMDGPU_CSA_SIZE); 95 if (r) { 96 DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r); 97 amdgpu_vm_bo_rmv(adev, *bo_va); 98 ttm_eu_backoff_reservation(&ticket, &list); 99 return r; 100 } 101 102 r = amdgpu_vm_bo_map(adev, *bo_va, AMDGPU_CSA_VADDR, 0, AMDGPU_CSA_SIZE, 103 AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE | 104 AMDGPU_PTE_EXECUTABLE); 105 106 if (r) { 107 DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r); 108 amdgpu_vm_bo_rmv(adev, *bo_va); 109 ttm_eu_backoff_reservation(&ticket, &list); 110 return r; 111 } 112 113 ttm_eu_backoff_reservation(&ticket, &list); 114 return 0; 115 } 116 117 void amdgpu_virt_init_setting(struct amdgpu_device *adev) 118 { 119 /* enable virtual display */ 120 adev->mode_info.num_crtc = 1; 121 adev->enable_virtual_display = true; 122 adev->cg_flags = 0; 123 adev->pg_flags = 0; 124 } 125 126 uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg) 127 { 128 signed long r; 129 unsigned long flags; 130 uint32_t val, seq; 131 struct amdgpu_kiq *kiq = &adev->gfx.kiq; 132 struct amdgpu_ring *ring = &kiq->ring; 133 134 BUG_ON(!ring->funcs->emit_rreg); 135 136 spin_lock_irqsave(&kiq->ring_lock, flags); 137 amdgpu_ring_alloc(ring, 32); 138 amdgpu_ring_emit_rreg(ring, reg); 139 amdgpu_fence_emit_polling(ring, &seq); 140 amdgpu_ring_commit(ring); 141 spin_unlock_irqrestore(&kiq->ring_lock, flags); 142 143 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); 144 if (r < 1) { 145 DRM_ERROR("wait for kiq fence error: %ld\n", r); 146 return ~0; 147 } 148 val = adev->wb.wb[adev->virt.reg_val_offs]; 149 150 return val; 151 } 152 153 void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v) 154 { 155 signed long r; 156 unsigned long flags; 157 uint32_t seq; 158 struct amdgpu_kiq *kiq = &adev->gfx.kiq; 159 struct amdgpu_ring *ring = &kiq->ring; 160 161 BUG_ON(!ring->funcs->emit_wreg); 162 163 spin_lock_irqsave(&kiq->ring_lock, flags); 164 amdgpu_ring_alloc(ring, 32); 165 amdgpu_ring_emit_wreg(ring, reg, v); 166 amdgpu_fence_emit_polling(ring, &seq); 167 amdgpu_ring_commit(ring); 168 spin_unlock_irqrestore(&kiq->ring_lock, flags); 169 170 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); 171 if (r < 1) 172 DRM_ERROR("wait for kiq fence error: %ld\n", r); 173 } 174 175 /** 176 * amdgpu_virt_request_full_gpu() - request full gpu access 177 * @amdgpu: amdgpu device. 178 * @init: is driver init time. 179 * When start to init/fini driver, first need to request full gpu access. 180 * Return: Zero if request success, otherwise will return error. 181 */ 182 int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init) 183 { 184 struct amdgpu_virt *virt = &adev->virt; 185 int r; 186 187 if (virt->ops && virt->ops->req_full_gpu) { 188 r = virt->ops->req_full_gpu(adev, init); 189 if (r) 190 return r; 191 192 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME; 193 } 194 195 return 0; 196 } 197 198 /** 199 * amdgpu_virt_release_full_gpu() - release full gpu access 200 * @amdgpu: amdgpu device. 201 * @init: is driver init time. 202 * When finishing driver init/fini, need to release full gpu access. 203 * Return: Zero if release success, otherwise will returen error. 204 */ 205 int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init) 206 { 207 struct amdgpu_virt *virt = &adev->virt; 208 int r; 209 210 if (virt->ops && virt->ops->rel_full_gpu) { 211 r = virt->ops->rel_full_gpu(adev, init); 212 if (r) 213 return r; 214 215 adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME; 216 } 217 return 0; 218 } 219 220 /** 221 * amdgpu_virt_reset_gpu() - reset gpu 222 * @amdgpu: amdgpu device. 223 * Send reset command to GPU hypervisor to reset GPU that VM is using 224 * Return: Zero if reset success, otherwise will return error. 225 */ 226 int amdgpu_virt_reset_gpu(struct amdgpu_device *adev) 227 { 228 struct amdgpu_virt *virt = &adev->virt; 229 int r; 230 231 if (virt->ops && virt->ops->reset_gpu) { 232 r = virt->ops->reset_gpu(adev); 233 if (r) 234 return r; 235 236 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME; 237 } 238 239 return 0; 240 } 241 242 /** 243 * amdgpu_virt_wait_reset() - wait for reset gpu completed 244 * @amdgpu: amdgpu device. 245 * Wait for GPU reset completed. 246 * Return: Zero if reset success, otherwise will return error. 247 */ 248 int amdgpu_virt_wait_reset(struct amdgpu_device *adev) 249 { 250 struct amdgpu_virt *virt = &adev->virt; 251 252 if (!virt->ops || !virt->ops->wait_reset) 253 return -EINVAL; 254 255 return virt->ops->wait_reset(adev); 256 } 257 258 /** 259 * amdgpu_virt_alloc_mm_table() - alloc memory for mm table 260 * @amdgpu: amdgpu device. 261 * MM table is used by UVD and VCE for its initialization 262 * Return: Zero if allocate success. 263 */ 264 int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev) 265 { 266 int r; 267 268 if (!amdgpu_sriov_vf(adev) || adev->virt.mm_table.gpu_addr) 269 return 0; 270 271 r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE, 272 AMDGPU_GEM_DOMAIN_VRAM, 273 &adev->virt.mm_table.bo, 274 &adev->virt.mm_table.gpu_addr, 275 (void *)&adev->virt.mm_table.cpu_addr); 276 if (r) { 277 DRM_ERROR("failed to alloc mm table and error = %d.\n", r); 278 return r; 279 } 280 281 memset((void *)adev->virt.mm_table.cpu_addr, 0, PAGE_SIZE); 282 DRM_INFO("MM table gpu addr = 0x%llx, cpu addr = %p.\n", 283 adev->virt.mm_table.gpu_addr, 284 adev->virt.mm_table.cpu_addr); 285 return 0; 286 } 287 288 /** 289 * amdgpu_virt_free_mm_table() - free mm table memory 290 * @amdgpu: amdgpu device. 291 * Free MM table memory 292 */ 293 void amdgpu_virt_free_mm_table(struct amdgpu_device *adev) 294 { 295 if (!amdgpu_sriov_vf(adev) || !adev->virt.mm_table.gpu_addr) 296 return; 297 298 amdgpu_bo_free_kernel(&adev->virt.mm_table.bo, 299 &adev->virt.mm_table.gpu_addr, 300 (void *)&adev->virt.mm_table.cpu_addr); 301 adev->virt.mm_table.gpu_addr = 0; 302 } 303 304 305 int amdgpu_virt_fw_reserve_get_checksum(void *obj, 306 unsigned long obj_size, 307 unsigned int key, 308 unsigned int chksum) 309 { 310 unsigned int ret = key; 311 unsigned long i = 0; 312 unsigned char *pos; 313 314 pos = (char *)obj; 315 /* calculate checksum */ 316 for (i = 0; i < obj_size; ++i) 317 ret += *(pos + i); 318 /* minus the chksum itself */ 319 pos = (char *)&chksum; 320 for (i = 0; i < sizeof(chksum); ++i) 321 ret -= *(pos + i); 322 return ret; 323 } 324 325 void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev) 326 { 327 uint32_t pf2vf_size = 0; 328 uint32_t checksum = 0; 329 uint32_t checkval; 330 char *str; 331 332 adev->virt.fw_reserve.p_pf2vf = NULL; 333 adev->virt.fw_reserve.p_vf2pf = NULL; 334 335 if (adev->fw_vram_usage.va != NULL) { 336 adev->virt.fw_reserve.p_pf2vf = 337 (struct amdgim_pf2vf_info_header *)( 338 adev->fw_vram_usage.va + AMDGIM_DATAEXCHANGE_OFFSET); 339 AMDGPU_FW_VRAM_PF2VF_READ(adev, header.size, &pf2vf_size); 340 AMDGPU_FW_VRAM_PF2VF_READ(adev, checksum, &checksum); 341 AMDGPU_FW_VRAM_PF2VF_READ(adev, feature_flags, &adev->virt.gim_feature); 342 343 /* pf2vf message must be in 4K */ 344 if (pf2vf_size > 0 && pf2vf_size < 4096) { 345 checkval = amdgpu_virt_fw_reserve_get_checksum( 346 adev->virt.fw_reserve.p_pf2vf, pf2vf_size, 347 adev->virt.fw_reserve.checksum_key, checksum); 348 if (checkval == checksum) { 349 adev->virt.fw_reserve.p_vf2pf = 350 ((void *)adev->virt.fw_reserve.p_pf2vf + 351 pf2vf_size); 352 memset((void *)adev->virt.fw_reserve.p_vf2pf, 0, 353 sizeof(amdgim_vf2pf_info)); 354 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.version, 355 AMDGPU_FW_VRAM_VF2PF_VER); 356 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.size, 357 sizeof(amdgim_vf2pf_info)); 358 AMDGPU_FW_VRAM_VF2PF_READ(adev, driver_version, 359 &str); 360 #ifdef MODULE 361 if (THIS_MODULE->version != NULL) 362 strcpy(str, THIS_MODULE->version); 363 else 364 #endif 365 strcpy(str, "N/A"); 366 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, driver_cert, 367 0); 368 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, checksum, 369 amdgpu_virt_fw_reserve_get_checksum( 370 adev->virt.fw_reserve.p_vf2pf, 371 pf2vf_size, 372 adev->virt.fw_reserve.checksum_key, 0)); 373 } 374 } 375 } 376 } 377 378 379