1 /* 2 * Copyright 2017 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include "amdgpu_ids.h" 24 25 #include <linux/idr.h> 26 #include <linux/dma-fence-array.h> 27 #include <drm/drmP.h> 28 29 #include "amdgpu.h" 30 #include "amdgpu_trace.h" 31 32 /* 33 * PASID manager 34 * 35 * PASIDs are global address space identifiers that can be shared 36 * between the GPU, an IOMMU and the driver. VMs on different devices 37 * may use the same PASID if they share the same address 38 * space. Therefore PASIDs are allocated using a global IDA. VMs are 39 * looked up from the PASID per amdgpu_device. 40 */ 41 static DEFINE_IDA(amdgpu_pasid_ida); 42 43 /* Helper to free pasid from a fence callback */ 44 struct amdgpu_pasid_cb { 45 struct dma_fence_cb cb; 46 unsigned int pasid; 47 }; 48 49 /** 50 * amdgpu_pasid_alloc - Allocate a PASID 51 * @bits: Maximum width of the PASID in bits, must be at least 1 52 * 53 * Allocates a PASID of the given width while keeping smaller PASIDs 54 * available if possible. 55 * 56 * Returns a positive integer on success. Returns %-EINVAL if bits==0. 57 * Returns %-ENOSPC if no PASID was available. Returns %-ENOMEM on 58 * memory allocation failure. 59 */ 60 int amdgpu_pasid_alloc(unsigned int bits) 61 { 62 int pasid = -EINVAL; 63 64 for (bits = min(bits, 31U); bits > 0; bits--) { 65 pasid = ida_simple_get(&amdgpu_pasid_ida, 66 1U << (bits - 1), 1U << bits, 67 GFP_KERNEL); 68 if (pasid != -ENOSPC) 69 break; 70 } 71 72 if (pasid >= 0) 73 trace_amdgpu_pasid_allocated(pasid); 74 75 return pasid; 76 } 77 78 /** 79 * amdgpu_pasid_free - Free a PASID 80 * @pasid: PASID to free 81 */ 82 void amdgpu_pasid_free(unsigned int pasid) 83 { 84 trace_amdgpu_pasid_freed(pasid); 85 ida_simple_remove(&amdgpu_pasid_ida, pasid); 86 } 87 88 static void amdgpu_pasid_free_cb(struct dma_fence *fence, 89 struct dma_fence_cb *_cb) 90 { 91 struct amdgpu_pasid_cb *cb = 92 container_of(_cb, struct amdgpu_pasid_cb, cb); 93 94 amdgpu_pasid_free(cb->pasid); 95 dma_fence_put(fence); 96 kfree(cb); 97 } 98 99 /** 100 * amdgpu_pasid_free_delayed - free pasid when fences signal 101 * 102 * @resv: reservation object with the fences to wait for 103 * @pasid: pasid to free 104 * 105 * Free the pasid only after all the fences in resv are signaled. 106 */ 107 void amdgpu_pasid_free_delayed(struct reservation_object *resv, 108 unsigned int pasid) 109 { 110 struct dma_fence *fence, **fences; 111 struct amdgpu_pasid_cb *cb; 112 unsigned count; 113 int r; 114 115 r = reservation_object_get_fences_rcu(resv, NULL, &count, &fences); 116 if (r) 117 goto fallback; 118 119 if (count == 0) { 120 amdgpu_pasid_free(pasid); 121 return; 122 } 123 124 if (count == 1) { 125 fence = fences[0]; 126 kfree(fences); 127 } else { 128 uint64_t context = dma_fence_context_alloc(1); 129 struct dma_fence_array *array; 130 131 array = dma_fence_array_create(count, fences, context, 132 1, false); 133 if (!array) { 134 kfree(fences); 135 goto fallback; 136 } 137 fence = &array->base; 138 } 139 140 cb = kmalloc(sizeof(*cb), GFP_KERNEL); 141 if (!cb) { 142 /* Last resort when we are OOM */ 143 dma_fence_wait(fence, false); 144 dma_fence_put(fence); 145 amdgpu_pasid_free(pasid); 146 } else { 147 cb->pasid = pasid; 148 if (dma_fence_add_callback(fence, &cb->cb, 149 amdgpu_pasid_free_cb)) 150 amdgpu_pasid_free_cb(fence, &cb->cb); 151 } 152 153 return; 154 155 fallback: 156 /* Not enough memory for the delayed delete, as last resort 157 * block for all the fences to complete. 158 */ 159 reservation_object_wait_timeout_rcu(resv, true, false, 160 MAX_SCHEDULE_TIMEOUT); 161 amdgpu_pasid_free(pasid); 162 } 163 164 /* 165 * VMID manager 166 * 167 * VMIDs are a per VMHUB identifier for page tables handling. 168 */ 169 170 /** 171 * amdgpu_vmid_had_gpu_reset - check if reset occured since last use 172 * 173 * @adev: amdgpu_device pointer 174 * @id: VMID structure 175 * 176 * Check if GPU reset occured since last use of the VMID. 177 */ 178 bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev, 179 struct amdgpu_vmid *id) 180 { 181 return id->current_gpu_reset_count != 182 atomic_read(&adev->gpu_reset_counter); 183 } 184 185 /* idr_mgr->lock must be held */ 186 static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm, 187 struct amdgpu_ring *ring, 188 struct amdgpu_sync *sync, 189 struct dma_fence *fence, 190 struct amdgpu_job *job) 191 { 192 struct amdgpu_device *adev = ring->adev; 193 unsigned vmhub = ring->funcs->vmhub; 194 uint64_t fence_context = adev->fence_context + ring->idx; 195 struct amdgpu_vmid *id = vm->reserved_vmid[vmhub]; 196 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; 197 struct dma_fence *updates = sync->last_vm_update; 198 int r = 0; 199 struct dma_fence *flushed, *tmp; 200 bool needs_flush = vm->use_cpu_for_update; 201 202 flushed = id->flushed_updates; 203 if ((amdgpu_vmid_had_gpu_reset(adev, id)) || 204 (atomic64_read(&id->owner) != vm->entity.fence_context) || 205 (job->vm_pd_addr != id->pd_gpu_addr) || 206 (updates && (!flushed || updates->context != flushed->context || 207 dma_fence_is_later(updates, flushed))) || 208 (!id->last_flush || (id->last_flush->context != fence_context && 209 !dma_fence_is_signaled(id->last_flush)))) { 210 needs_flush = true; 211 /* to prevent one context starved by another context */ 212 id->pd_gpu_addr = 0; 213 tmp = amdgpu_sync_peek_fence(&id->active, ring); 214 if (tmp) { 215 r = amdgpu_sync_fence(adev, sync, tmp, false); 216 return r; 217 } 218 } 219 220 /* Good we can use this VMID. Remember this submission as 221 * user of the VMID. 222 */ 223 r = amdgpu_sync_fence(ring->adev, &id->active, fence, false); 224 if (r) 225 goto out; 226 227 if (updates && (!flushed || updates->context != flushed->context || 228 dma_fence_is_later(updates, flushed))) { 229 dma_fence_put(id->flushed_updates); 230 id->flushed_updates = dma_fence_get(updates); 231 } 232 id->pd_gpu_addr = job->vm_pd_addr; 233 atomic64_set(&id->owner, vm->entity.fence_context); 234 job->vm_needs_flush = needs_flush; 235 if (needs_flush) { 236 dma_fence_put(id->last_flush); 237 id->last_flush = NULL; 238 } 239 job->vmid = id - id_mgr->ids; 240 job->pasid = vm->pasid; 241 trace_amdgpu_vm_grab_id(vm, ring, job); 242 out: 243 return r; 244 } 245 246 /** 247 * amdgpu_vm_grab_id - allocate the next free VMID 248 * 249 * @vm: vm to allocate id for 250 * @ring: ring we want to submit job to 251 * @sync: sync object where we add dependencies 252 * @fence: fence protecting ID from reuse 253 * 254 * Allocate an id for the vm, adding fences to the sync obj as necessary. 255 */ 256 int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, 257 struct amdgpu_sync *sync, struct dma_fence *fence, 258 struct amdgpu_job *job) 259 { 260 struct amdgpu_device *adev = ring->adev; 261 unsigned vmhub = ring->funcs->vmhub; 262 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; 263 uint64_t fence_context = adev->fence_context + ring->idx; 264 struct dma_fence *updates = sync->last_vm_update; 265 struct amdgpu_vmid *id, *idle; 266 struct dma_fence **fences; 267 unsigned i; 268 int r = 0; 269 270 mutex_lock(&id_mgr->lock); 271 if (vm->reserved_vmid[vmhub]) { 272 r = amdgpu_vmid_grab_reserved_locked(vm, ring, sync, fence, job); 273 mutex_unlock(&id_mgr->lock); 274 return r; 275 } 276 fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL); 277 if (!fences) { 278 mutex_unlock(&id_mgr->lock); 279 return -ENOMEM; 280 } 281 /* Check if we have an idle VMID */ 282 i = 0; 283 list_for_each_entry(idle, &id_mgr->ids_lru, list) { 284 fences[i] = amdgpu_sync_peek_fence(&idle->active, ring); 285 if (!fences[i]) 286 break; 287 ++i; 288 } 289 290 /* If we can't find a idle VMID to use, wait till one becomes available */ 291 if (&idle->list == &id_mgr->ids_lru) { 292 u64 fence_context = adev->vm_manager.fence_context + ring->idx; 293 unsigned seqno = ++adev->vm_manager.seqno[ring->idx]; 294 struct dma_fence_array *array; 295 unsigned j; 296 297 for (j = 0; j < i; ++j) 298 dma_fence_get(fences[j]); 299 300 array = dma_fence_array_create(i, fences, fence_context, 301 seqno, true); 302 if (!array) { 303 for (j = 0; j < i; ++j) 304 dma_fence_put(fences[j]); 305 kfree(fences); 306 r = -ENOMEM; 307 goto error; 308 } 309 310 311 r = amdgpu_sync_fence(ring->adev, sync, &array->base, false); 312 dma_fence_put(&array->base); 313 if (r) 314 goto error; 315 316 mutex_unlock(&id_mgr->lock); 317 return 0; 318 319 } 320 kfree(fences); 321 322 job->vm_needs_flush = vm->use_cpu_for_update; 323 /* Check if we can use a VMID already assigned to this VM */ 324 list_for_each_entry_reverse(id, &id_mgr->ids_lru, list) { 325 struct dma_fence *flushed; 326 bool needs_flush = vm->use_cpu_for_update; 327 328 /* Check all the prerequisites to using this VMID */ 329 if (amdgpu_vmid_had_gpu_reset(adev, id)) 330 continue; 331 332 if (atomic64_read(&id->owner) != vm->entity.fence_context) 333 continue; 334 335 if (job->vm_pd_addr != id->pd_gpu_addr) 336 continue; 337 338 if (!id->last_flush || 339 (id->last_flush->context != fence_context && 340 !dma_fence_is_signaled(id->last_flush))) 341 needs_flush = true; 342 343 flushed = id->flushed_updates; 344 if (updates && (!flushed || dma_fence_is_later(updates, flushed))) 345 needs_flush = true; 346 347 /* Concurrent flushes are only possible starting with Vega10 */ 348 if (adev->asic_type < CHIP_VEGA10 && needs_flush) 349 continue; 350 351 /* Good we can use this VMID. Remember this submission as 352 * user of the VMID. 353 */ 354 r = amdgpu_sync_fence(ring->adev, &id->active, fence, false); 355 if (r) 356 goto error; 357 358 if (updates && (!flushed || dma_fence_is_later(updates, flushed))) { 359 dma_fence_put(id->flushed_updates); 360 id->flushed_updates = dma_fence_get(updates); 361 } 362 363 if (needs_flush) 364 goto needs_flush; 365 else 366 goto no_flush_needed; 367 368 } 369 370 /* Still no ID to use? Then use the idle one found earlier */ 371 id = idle; 372 373 /* Remember this submission as user of the VMID */ 374 r = amdgpu_sync_fence(ring->adev, &id->active, fence, false); 375 if (r) 376 goto error; 377 378 id->pd_gpu_addr = job->vm_pd_addr; 379 dma_fence_put(id->flushed_updates); 380 id->flushed_updates = dma_fence_get(updates); 381 atomic64_set(&id->owner, vm->entity.fence_context); 382 383 needs_flush: 384 job->vm_needs_flush = true; 385 dma_fence_put(id->last_flush); 386 id->last_flush = NULL; 387 388 no_flush_needed: 389 list_move_tail(&id->list, &id_mgr->ids_lru); 390 391 job->vmid = id - id_mgr->ids; 392 job->pasid = vm->pasid; 393 trace_amdgpu_vm_grab_id(vm, ring, job); 394 395 error: 396 mutex_unlock(&id_mgr->lock); 397 return r; 398 } 399 400 int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev, 401 struct amdgpu_vm *vm, 402 unsigned vmhub) 403 { 404 struct amdgpu_vmid_mgr *id_mgr; 405 struct amdgpu_vmid *idle; 406 int r = 0; 407 408 id_mgr = &adev->vm_manager.id_mgr[vmhub]; 409 mutex_lock(&id_mgr->lock); 410 if (vm->reserved_vmid[vmhub]) 411 goto unlock; 412 if (atomic_inc_return(&id_mgr->reserved_vmid_num) > 413 AMDGPU_VM_MAX_RESERVED_VMID) { 414 DRM_ERROR("Over limitation of reserved vmid\n"); 415 atomic_dec(&id_mgr->reserved_vmid_num); 416 r = -EINVAL; 417 goto unlock; 418 } 419 /* Select the first entry VMID */ 420 idle = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid, list); 421 list_del_init(&idle->list); 422 vm->reserved_vmid[vmhub] = idle; 423 mutex_unlock(&id_mgr->lock); 424 425 return 0; 426 unlock: 427 mutex_unlock(&id_mgr->lock); 428 return r; 429 } 430 431 void amdgpu_vmid_free_reserved(struct amdgpu_device *adev, 432 struct amdgpu_vm *vm, 433 unsigned vmhub) 434 { 435 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; 436 437 mutex_lock(&id_mgr->lock); 438 if (vm->reserved_vmid[vmhub]) { 439 list_add(&vm->reserved_vmid[vmhub]->list, 440 &id_mgr->ids_lru); 441 vm->reserved_vmid[vmhub] = NULL; 442 atomic_dec(&id_mgr->reserved_vmid_num); 443 } 444 mutex_unlock(&id_mgr->lock); 445 } 446 447 /** 448 * amdgpu_vmid_reset - reset VMID to zero 449 * 450 * @adev: amdgpu device structure 451 * @vmid: vmid number to use 452 * 453 * Reset saved GDW, GWS and OA to force switch on next flush. 454 */ 455 void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub, 456 unsigned vmid) 457 { 458 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; 459 struct amdgpu_vmid *id = &id_mgr->ids[vmid]; 460 461 atomic64_set(&id->owner, 0); 462 id->gds_base = 0; 463 id->gds_size = 0; 464 id->gws_base = 0; 465 id->gws_size = 0; 466 id->oa_base = 0; 467 id->oa_size = 0; 468 } 469 470 /** 471 * amdgpu_vmid_reset_all - reset VMID to zero 472 * 473 * @adev: amdgpu device structure 474 * 475 * Reset VMID to force flush on next use 476 */ 477 void amdgpu_vmid_reset_all(struct amdgpu_device *adev) 478 { 479 unsigned i, j; 480 481 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { 482 struct amdgpu_vmid_mgr *id_mgr = 483 &adev->vm_manager.id_mgr[i]; 484 485 for (j = 1; j < id_mgr->num_ids; ++j) 486 amdgpu_vmid_reset(adev, i, j); 487 } 488 } 489 490 /** 491 * amdgpu_vmid_mgr_init - init the VMID manager 492 * 493 * @adev: amdgpu_device pointer 494 * 495 * Initialize the VM manager structures 496 */ 497 void amdgpu_vmid_mgr_init(struct amdgpu_device *adev) 498 { 499 unsigned i, j; 500 501 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { 502 struct amdgpu_vmid_mgr *id_mgr = 503 &adev->vm_manager.id_mgr[i]; 504 505 mutex_init(&id_mgr->lock); 506 INIT_LIST_HEAD(&id_mgr->ids_lru); 507 atomic_set(&id_mgr->reserved_vmid_num, 0); 508 509 /* skip over VMID 0, since it is the system VM */ 510 for (j = 1; j < id_mgr->num_ids; ++j) { 511 amdgpu_vmid_reset(adev, i, j); 512 amdgpu_sync_create(&id_mgr->ids[i].active); 513 list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru); 514 } 515 } 516 517 adev->vm_manager.fence_context = 518 dma_fence_context_alloc(AMDGPU_MAX_RINGS); 519 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) 520 adev->vm_manager.seqno[i] = 0; 521 } 522 523 /** 524 * amdgpu_vmid_mgr_fini - cleanup VM manager 525 * 526 * @adev: amdgpu_device pointer 527 * 528 * Cleanup the VM manager and free resources. 529 */ 530 void amdgpu_vmid_mgr_fini(struct amdgpu_device *adev) 531 { 532 unsigned i, j; 533 534 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { 535 struct amdgpu_vmid_mgr *id_mgr = 536 &adev->vm_manager.id_mgr[i]; 537 538 mutex_destroy(&id_mgr->lock); 539 for (j = 0; j < AMDGPU_NUM_VMID; ++j) { 540 struct amdgpu_vmid *id = &id_mgr->ids[j]; 541 542 amdgpu_sync_free(&id->active); 543 dma_fence_put(id->flushed_updates); 544 dma_fence_put(id->last_flush); 545 } 546 } 547 } 548