1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: monk liu <monk.liu@amd.com> 23 */ 24 25 #include <drm/drm_auth.h> 26 #include <drm/drm_drv.h> 27 #include "amdgpu.h" 28 #include "amdgpu_sched.h" 29 #include "amdgpu_ras.h" 30 #include <linux/nospec.h> 31 32 #define to_amdgpu_ctx_entity(e) \ 33 container_of((e), struct amdgpu_ctx_entity, entity) 34 35 const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = { 36 [AMDGPU_HW_IP_GFX] = 1, 37 [AMDGPU_HW_IP_COMPUTE] = 4, 38 [AMDGPU_HW_IP_DMA] = 2, 39 [AMDGPU_HW_IP_UVD] = 1, 40 [AMDGPU_HW_IP_VCE] = 1, 41 [AMDGPU_HW_IP_UVD_ENC] = 1, 42 [AMDGPU_HW_IP_VCN_DEC] = 1, 43 [AMDGPU_HW_IP_VCN_ENC] = 1, 44 [AMDGPU_HW_IP_VCN_JPEG] = 1, 45 }; 46 47 bool amdgpu_ctx_priority_is_valid(int32_t ctx_prio) 48 { 49 switch (ctx_prio) { 50 case AMDGPU_CTX_PRIORITY_UNSET: 51 case AMDGPU_CTX_PRIORITY_VERY_LOW: 52 case AMDGPU_CTX_PRIORITY_LOW: 53 case AMDGPU_CTX_PRIORITY_NORMAL: 54 case AMDGPU_CTX_PRIORITY_HIGH: 55 case AMDGPU_CTX_PRIORITY_VERY_HIGH: 56 return true; 57 default: 58 return false; 59 } 60 } 61 62 static enum drm_sched_priority 63 amdgpu_ctx_to_drm_sched_prio(int32_t ctx_prio) 64 { 65 switch (ctx_prio) { 66 case AMDGPU_CTX_PRIORITY_UNSET: 67 return DRM_SCHED_PRIORITY_UNSET; 68 69 case AMDGPU_CTX_PRIORITY_VERY_LOW: 70 return DRM_SCHED_PRIORITY_MIN; 71 72 case AMDGPU_CTX_PRIORITY_LOW: 73 return DRM_SCHED_PRIORITY_MIN; 74 75 case AMDGPU_CTX_PRIORITY_NORMAL: 76 return DRM_SCHED_PRIORITY_NORMAL; 77 78 case AMDGPU_CTX_PRIORITY_HIGH: 79 return DRM_SCHED_PRIORITY_HIGH; 80 81 case AMDGPU_CTX_PRIORITY_VERY_HIGH: 82 return DRM_SCHED_PRIORITY_HIGH; 83 84 /* This should not happen as we sanitized userspace provided priority 85 * already, WARN if this happens. 86 */ 87 default: 88 WARN(1, "Invalid context priority %d\n", ctx_prio); 89 return DRM_SCHED_PRIORITY_NORMAL; 90 } 91 92 } 93 94 static int amdgpu_ctx_priority_permit(struct drm_file *filp, 95 int32_t priority) 96 { 97 if (!amdgpu_ctx_priority_is_valid(priority)) 98 return -EINVAL; 99 100 /* NORMAL and below are accessible by everyone */ 101 if (priority <= AMDGPU_CTX_PRIORITY_NORMAL) 102 return 0; 103 104 if (capable(CAP_SYS_NICE)) 105 return 0; 106 107 if (drm_is_current_master(filp)) 108 return 0; 109 110 return -EACCES; 111 } 112 113 static enum amdgpu_gfx_pipe_priority amdgpu_ctx_prio_to_compute_prio(int32_t prio) 114 { 115 switch (prio) { 116 case AMDGPU_CTX_PRIORITY_HIGH: 117 case AMDGPU_CTX_PRIORITY_VERY_HIGH: 118 return AMDGPU_GFX_PIPE_PRIO_HIGH; 119 default: 120 return AMDGPU_GFX_PIPE_PRIO_NORMAL; 121 } 122 } 123 124 static enum amdgpu_ring_priority_level amdgpu_ctx_sched_prio_to_ring_prio(int32_t prio) 125 { 126 switch (prio) { 127 case AMDGPU_CTX_PRIORITY_HIGH: 128 return AMDGPU_RING_PRIO_1; 129 case AMDGPU_CTX_PRIORITY_VERY_HIGH: 130 return AMDGPU_RING_PRIO_2; 131 default: 132 return AMDGPU_RING_PRIO_0; 133 } 134 } 135 136 static unsigned int amdgpu_ctx_get_hw_prio(struct amdgpu_ctx *ctx, u32 hw_ip) 137 { 138 struct amdgpu_device *adev = ctx->adev; 139 int32_t ctx_prio; 140 unsigned int hw_prio; 141 142 ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ? 143 ctx->init_priority : ctx->override_priority; 144 145 switch (hw_ip) { 146 case AMDGPU_HW_IP_COMPUTE: 147 hw_prio = amdgpu_ctx_prio_to_compute_prio(ctx_prio); 148 break; 149 case AMDGPU_HW_IP_VCE: 150 case AMDGPU_HW_IP_VCN_ENC: 151 hw_prio = amdgpu_ctx_sched_prio_to_ring_prio(ctx_prio); 152 break; 153 default: 154 hw_prio = AMDGPU_RING_PRIO_DEFAULT; 155 break; 156 } 157 158 hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM); 159 if (adev->gpu_sched[hw_ip][hw_prio].num_scheds == 0) 160 hw_prio = AMDGPU_RING_PRIO_DEFAULT; 161 162 return hw_prio; 163 } 164 165 166 static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip, 167 const u32 ring) 168 { 169 struct amdgpu_device *adev = ctx->adev; 170 struct amdgpu_ctx_entity *entity; 171 struct drm_gpu_scheduler **scheds = NULL, *sched = NULL; 172 unsigned num_scheds = 0; 173 int32_t ctx_prio; 174 unsigned int hw_prio; 175 enum drm_sched_priority drm_prio; 176 int r; 177 178 entity = kzalloc(struct_size(entity, fences, amdgpu_sched_jobs), 179 GFP_KERNEL); 180 if (!entity) 181 return -ENOMEM; 182 183 ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ? 184 ctx->init_priority : ctx->override_priority; 185 entity->sequence = 1; 186 hw_prio = amdgpu_ctx_get_hw_prio(ctx, hw_ip); 187 drm_prio = amdgpu_ctx_to_drm_sched_prio(ctx_prio); 188 189 hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM); 190 scheds = adev->gpu_sched[hw_ip][hw_prio].sched; 191 num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds; 192 193 /* disable load balance if the hw engine retains context among dependent jobs */ 194 if (hw_ip == AMDGPU_HW_IP_VCN_ENC || 195 hw_ip == AMDGPU_HW_IP_VCN_DEC || 196 hw_ip == AMDGPU_HW_IP_UVD_ENC || 197 hw_ip == AMDGPU_HW_IP_UVD) { 198 sched = drm_sched_pick_best(scheds, num_scheds); 199 scheds = &sched; 200 num_scheds = 1; 201 } 202 203 r = drm_sched_entity_init(&entity->entity, drm_prio, scheds, num_scheds, 204 &ctx->guilty); 205 if (r) 206 goto error_free_entity; 207 208 /* It's not an error if we fail to install the new entity */ 209 if (cmpxchg(&ctx->entities[hw_ip][ring], NULL, entity)) 210 goto cleanup_entity; 211 212 return 0; 213 214 cleanup_entity: 215 drm_sched_entity_fini(&entity->entity); 216 217 error_free_entity: 218 kfree(entity); 219 220 return r; 221 } 222 223 static int amdgpu_ctx_init(struct amdgpu_device *adev, 224 int32_t priority, 225 struct drm_file *filp, 226 struct amdgpu_ctx *ctx) 227 { 228 int r; 229 230 r = amdgpu_ctx_priority_permit(filp, priority); 231 if (r) 232 return r; 233 234 memset(ctx, 0, sizeof(*ctx)); 235 236 ctx->adev = adev; 237 238 kref_init(&ctx->refcount); 239 spin_lock_init(&ctx->ring_lock); 240 mutex_init(&ctx->lock); 241 242 ctx->reset_counter = atomic_read(&adev->gpu_reset_counter); 243 ctx->reset_counter_query = ctx->reset_counter; 244 ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter); 245 ctx->init_priority = priority; 246 ctx->override_priority = AMDGPU_CTX_PRIORITY_UNSET; 247 ctx->stable_pstate = AMDGPU_CTX_STABLE_PSTATE_NONE; 248 249 return 0; 250 } 251 252 static void amdgpu_ctx_fini_entity(struct amdgpu_ctx_entity *entity) 253 { 254 255 int i; 256 257 if (!entity) 258 return; 259 260 for (i = 0; i < amdgpu_sched_jobs; ++i) 261 dma_fence_put(entity->fences[i]); 262 263 kfree(entity); 264 } 265 266 static int amdgpu_ctx_get_stable_pstate(struct amdgpu_ctx *ctx, 267 u32 *stable_pstate) 268 { 269 struct amdgpu_device *adev = ctx->adev; 270 enum amd_dpm_forced_level current_level; 271 272 current_level = amdgpu_dpm_get_performance_level(adev); 273 274 switch (current_level) { 275 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: 276 *stable_pstate = AMDGPU_CTX_STABLE_PSTATE_STANDARD; 277 break; 278 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: 279 *stable_pstate = AMDGPU_CTX_STABLE_PSTATE_MIN_SCLK; 280 break; 281 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: 282 *stable_pstate = AMDGPU_CTX_STABLE_PSTATE_MIN_MCLK; 283 break; 284 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: 285 *stable_pstate = AMDGPU_CTX_STABLE_PSTATE_PEAK; 286 break; 287 default: 288 *stable_pstate = AMDGPU_CTX_STABLE_PSTATE_NONE; 289 break; 290 } 291 return 0; 292 } 293 294 static int amdgpu_ctx_set_stable_pstate(struct amdgpu_ctx *ctx, 295 u32 stable_pstate) 296 { 297 struct amdgpu_device *adev = ctx->adev; 298 enum amd_dpm_forced_level level; 299 int r; 300 301 mutex_lock(&adev->pm.stable_pstate_ctx_lock); 302 if (adev->pm.stable_pstate_ctx && adev->pm.stable_pstate_ctx != ctx) { 303 r = -EBUSY; 304 goto done; 305 } 306 307 switch (stable_pstate) { 308 case AMDGPU_CTX_STABLE_PSTATE_NONE: 309 level = AMD_DPM_FORCED_LEVEL_AUTO; 310 break; 311 case AMDGPU_CTX_STABLE_PSTATE_STANDARD: 312 level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD; 313 break; 314 case AMDGPU_CTX_STABLE_PSTATE_MIN_SCLK: 315 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK; 316 break; 317 case AMDGPU_CTX_STABLE_PSTATE_MIN_MCLK: 318 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK; 319 break; 320 case AMDGPU_CTX_STABLE_PSTATE_PEAK: 321 level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; 322 break; 323 default: 324 r = -EINVAL; 325 goto done; 326 } 327 328 r = amdgpu_dpm_force_performance_level(adev, level); 329 330 if (level == AMD_DPM_FORCED_LEVEL_AUTO) 331 adev->pm.stable_pstate_ctx = NULL; 332 else 333 adev->pm.stable_pstate_ctx = ctx; 334 done: 335 mutex_unlock(&adev->pm.stable_pstate_ctx_lock); 336 337 return r; 338 } 339 340 static void amdgpu_ctx_fini(struct kref *ref) 341 { 342 struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount); 343 struct amdgpu_device *adev = ctx->adev; 344 unsigned i, j, idx; 345 346 if (!adev) 347 return; 348 349 for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) { 350 for (j = 0; j < AMDGPU_MAX_ENTITY_NUM; ++j) { 351 amdgpu_ctx_fini_entity(ctx->entities[i][j]); 352 ctx->entities[i][j] = NULL; 353 } 354 } 355 356 if (drm_dev_enter(&adev->ddev, &idx)) { 357 amdgpu_ctx_set_stable_pstate(ctx, AMDGPU_CTX_STABLE_PSTATE_NONE); 358 drm_dev_exit(idx); 359 } 360 361 mutex_destroy(&ctx->lock); 362 kfree(ctx); 363 } 364 365 int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance, 366 u32 ring, struct drm_sched_entity **entity) 367 { 368 int r; 369 370 if (hw_ip >= AMDGPU_HW_IP_NUM) { 371 DRM_ERROR("unknown HW IP type: %d\n", hw_ip); 372 return -EINVAL; 373 } 374 375 /* Right now all IPs have only one instance - multiple rings. */ 376 if (instance != 0) { 377 DRM_DEBUG("invalid ip instance: %d\n", instance); 378 return -EINVAL; 379 } 380 381 if (ring >= amdgpu_ctx_num_entities[hw_ip]) { 382 DRM_DEBUG("invalid ring: %d %d\n", hw_ip, ring); 383 return -EINVAL; 384 } 385 386 if (ctx->entities[hw_ip][ring] == NULL) { 387 r = amdgpu_ctx_init_entity(ctx, hw_ip, ring); 388 if (r) 389 return r; 390 } 391 392 *entity = &ctx->entities[hw_ip][ring]->entity; 393 return 0; 394 } 395 396 static int amdgpu_ctx_alloc(struct amdgpu_device *adev, 397 struct amdgpu_fpriv *fpriv, 398 struct drm_file *filp, 399 int32_t priority, 400 uint32_t *id) 401 { 402 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; 403 struct amdgpu_ctx *ctx; 404 int r; 405 406 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); 407 if (!ctx) 408 return -ENOMEM; 409 410 mutex_lock(&mgr->lock); 411 r = idr_alloc(&mgr->ctx_handles, ctx, 1, AMDGPU_VM_MAX_NUM_CTX, GFP_KERNEL); 412 if (r < 0) { 413 mutex_unlock(&mgr->lock); 414 kfree(ctx); 415 return r; 416 } 417 418 *id = (uint32_t)r; 419 r = amdgpu_ctx_init(adev, priority, filp, ctx); 420 if (r) { 421 idr_remove(&mgr->ctx_handles, *id); 422 *id = 0; 423 kfree(ctx); 424 } 425 mutex_unlock(&mgr->lock); 426 return r; 427 } 428 429 static void amdgpu_ctx_do_release(struct kref *ref) 430 { 431 struct amdgpu_ctx *ctx; 432 u32 i, j; 433 434 ctx = container_of(ref, struct amdgpu_ctx, refcount); 435 for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) { 436 for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) { 437 if (!ctx->entities[i][j]) 438 continue; 439 440 drm_sched_entity_destroy(&ctx->entities[i][j]->entity); 441 } 442 } 443 444 amdgpu_ctx_fini(ref); 445 } 446 447 static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id) 448 { 449 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; 450 struct amdgpu_ctx *ctx; 451 452 mutex_lock(&mgr->lock); 453 ctx = idr_remove(&mgr->ctx_handles, id); 454 if (ctx) 455 kref_put(&ctx->refcount, amdgpu_ctx_do_release); 456 mutex_unlock(&mgr->lock); 457 return ctx ? 0 : -EINVAL; 458 } 459 460 static int amdgpu_ctx_query(struct amdgpu_device *adev, 461 struct amdgpu_fpriv *fpriv, uint32_t id, 462 union drm_amdgpu_ctx_out *out) 463 { 464 struct amdgpu_ctx *ctx; 465 struct amdgpu_ctx_mgr *mgr; 466 unsigned reset_counter; 467 468 if (!fpriv) 469 return -EINVAL; 470 471 mgr = &fpriv->ctx_mgr; 472 mutex_lock(&mgr->lock); 473 ctx = idr_find(&mgr->ctx_handles, id); 474 if (!ctx) { 475 mutex_unlock(&mgr->lock); 476 return -EINVAL; 477 } 478 479 /* TODO: these two are always zero */ 480 out->state.flags = 0x0; 481 out->state.hangs = 0x0; 482 483 /* determine if a GPU reset has occured since the last call */ 484 reset_counter = atomic_read(&adev->gpu_reset_counter); 485 /* TODO: this should ideally return NO, GUILTY, or INNOCENT. */ 486 if (ctx->reset_counter_query == reset_counter) 487 out->state.reset_status = AMDGPU_CTX_NO_RESET; 488 else 489 out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET; 490 ctx->reset_counter_query = reset_counter; 491 492 mutex_unlock(&mgr->lock); 493 return 0; 494 } 495 496 #define AMDGPU_RAS_COUNTE_DELAY_MS 3000 497 498 static int amdgpu_ctx_query2(struct amdgpu_device *adev, 499 struct amdgpu_fpriv *fpriv, uint32_t id, 500 union drm_amdgpu_ctx_out *out) 501 { 502 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 503 struct amdgpu_ctx *ctx; 504 struct amdgpu_ctx_mgr *mgr; 505 506 if (!fpriv) 507 return -EINVAL; 508 509 mgr = &fpriv->ctx_mgr; 510 mutex_lock(&mgr->lock); 511 ctx = idr_find(&mgr->ctx_handles, id); 512 if (!ctx) { 513 mutex_unlock(&mgr->lock); 514 return -EINVAL; 515 } 516 517 out->state.flags = 0x0; 518 out->state.hangs = 0x0; 519 520 if (ctx->reset_counter != atomic_read(&adev->gpu_reset_counter)) 521 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RESET; 522 523 if (ctx->vram_lost_counter != atomic_read(&adev->vram_lost_counter)) 524 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST; 525 526 if (atomic_read(&ctx->guilty)) 527 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY; 528 529 if (adev->ras_enabled && con) { 530 /* Return the cached values in O(1), 531 * and schedule delayed work to cache 532 * new vaues. 533 */ 534 int ce_count, ue_count; 535 536 ce_count = atomic_read(&con->ras_ce_count); 537 ue_count = atomic_read(&con->ras_ue_count); 538 539 if (ce_count != ctx->ras_counter_ce) { 540 ctx->ras_counter_ce = ce_count; 541 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_CE; 542 } 543 544 if (ue_count != ctx->ras_counter_ue) { 545 ctx->ras_counter_ue = ue_count; 546 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_UE; 547 } 548 549 schedule_delayed_work(&con->ras_counte_delay_work, 550 msecs_to_jiffies(AMDGPU_RAS_COUNTE_DELAY_MS)); 551 } 552 553 mutex_unlock(&mgr->lock); 554 return 0; 555 } 556 557 558 559 static int amdgpu_ctx_stable_pstate(struct amdgpu_device *adev, 560 struct amdgpu_fpriv *fpriv, uint32_t id, 561 bool set, u32 *stable_pstate) 562 { 563 struct amdgpu_ctx *ctx; 564 struct amdgpu_ctx_mgr *mgr; 565 int r; 566 567 if (!fpriv) 568 return -EINVAL; 569 570 mgr = &fpriv->ctx_mgr; 571 mutex_lock(&mgr->lock); 572 ctx = idr_find(&mgr->ctx_handles, id); 573 if (!ctx) { 574 mutex_unlock(&mgr->lock); 575 return -EINVAL; 576 } 577 578 if (set) 579 r = amdgpu_ctx_set_stable_pstate(ctx, *stable_pstate); 580 else 581 r = amdgpu_ctx_get_stable_pstate(ctx, stable_pstate); 582 583 mutex_unlock(&mgr->lock); 584 return r; 585 } 586 587 int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, 588 struct drm_file *filp) 589 { 590 int r; 591 uint32_t id, stable_pstate; 592 int32_t priority; 593 594 union drm_amdgpu_ctx *args = data; 595 struct amdgpu_device *adev = drm_to_adev(dev); 596 struct amdgpu_fpriv *fpriv = filp->driver_priv; 597 598 id = args->in.ctx_id; 599 priority = args->in.priority; 600 601 /* For backwards compatibility reasons, we need to accept 602 * ioctls with garbage in the priority field */ 603 if (!amdgpu_ctx_priority_is_valid(priority)) 604 priority = AMDGPU_CTX_PRIORITY_NORMAL; 605 606 switch (args->in.op) { 607 case AMDGPU_CTX_OP_ALLOC_CTX: 608 r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id); 609 args->out.alloc.ctx_id = id; 610 break; 611 case AMDGPU_CTX_OP_FREE_CTX: 612 r = amdgpu_ctx_free(fpriv, id); 613 break; 614 case AMDGPU_CTX_OP_QUERY_STATE: 615 r = amdgpu_ctx_query(adev, fpriv, id, &args->out); 616 break; 617 case AMDGPU_CTX_OP_QUERY_STATE2: 618 r = amdgpu_ctx_query2(adev, fpriv, id, &args->out); 619 break; 620 case AMDGPU_CTX_OP_GET_STABLE_PSTATE: 621 if (args->in.flags) 622 return -EINVAL; 623 r = amdgpu_ctx_stable_pstate(adev, fpriv, id, false, &stable_pstate); 624 if (!r) 625 args->out.pstate.flags = stable_pstate; 626 break; 627 case AMDGPU_CTX_OP_SET_STABLE_PSTATE: 628 if (args->in.flags & ~AMDGPU_CTX_STABLE_PSTATE_FLAGS_MASK) 629 return -EINVAL; 630 stable_pstate = args->in.flags & AMDGPU_CTX_STABLE_PSTATE_FLAGS_MASK; 631 if (stable_pstate > AMDGPU_CTX_STABLE_PSTATE_PEAK) 632 return -EINVAL; 633 r = amdgpu_ctx_stable_pstate(adev, fpriv, id, true, &stable_pstate); 634 break; 635 default: 636 return -EINVAL; 637 } 638 639 return r; 640 } 641 642 struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id) 643 { 644 struct amdgpu_ctx *ctx; 645 struct amdgpu_ctx_mgr *mgr; 646 647 if (!fpriv) 648 return NULL; 649 650 mgr = &fpriv->ctx_mgr; 651 652 mutex_lock(&mgr->lock); 653 ctx = idr_find(&mgr->ctx_handles, id); 654 if (ctx) 655 kref_get(&ctx->refcount); 656 mutex_unlock(&mgr->lock); 657 return ctx; 658 } 659 660 int amdgpu_ctx_put(struct amdgpu_ctx *ctx) 661 { 662 if (ctx == NULL) 663 return -EINVAL; 664 665 kref_put(&ctx->refcount, amdgpu_ctx_do_release); 666 return 0; 667 } 668 669 void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, 670 struct drm_sched_entity *entity, 671 struct dma_fence *fence, uint64_t *handle) 672 { 673 struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity); 674 uint64_t seq = centity->sequence; 675 struct dma_fence *other = NULL; 676 unsigned idx = 0; 677 678 idx = seq & (amdgpu_sched_jobs - 1); 679 other = centity->fences[idx]; 680 if (other) 681 BUG_ON(!dma_fence_is_signaled(other)); 682 683 dma_fence_get(fence); 684 685 spin_lock(&ctx->ring_lock); 686 centity->fences[idx] = fence; 687 centity->sequence++; 688 spin_unlock(&ctx->ring_lock); 689 690 dma_fence_put(other); 691 if (handle) 692 *handle = seq; 693 } 694 695 struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, 696 struct drm_sched_entity *entity, 697 uint64_t seq) 698 { 699 struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity); 700 struct dma_fence *fence; 701 702 spin_lock(&ctx->ring_lock); 703 704 if (seq == ~0ull) 705 seq = centity->sequence - 1; 706 707 if (seq >= centity->sequence) { 708 spin_unlock(&ctx->ring_lock); 709 return ERR_PTR(-EINVAL); 710 } 711 712 713 if (seq + amdgpu_sched_jobs < centity->sequence) { 714 spin_unlock(&ctx->ring_lock); 715 return NULL; 716 } 717 718 fence = dma_fence_get(centity->fences[seq & (amdgpu_sched_jobs - 1)]); 719 spin_unlock(&ctx->ring_lock); 720 721 return fence; 722 } 723 724 static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx, 725 struct amdgpu_ctx_entity *aentity, 726 int hw_ip, 727 int32_t priority) 728 { 729 struct amdgpu_device *adev = ctx->adev; 730 unsigned int hw_prio; 731 struct drm_gpu_scheduler **scheds = NULL; 732 unsigned num_scheds; 733 734 /* set sw priority */ 735 drm_sched_entity_set_priority(&aentity->entity, 736 amdgpu_ctx_to_drm_sched_prio(priority)); 737 738 /* set hw priority */ 739 if (hw_ip == AMDGPU_HW_IP_COMPUTE) { 740 hw_prio = amdgpu_ctx_get_hw_prio(ctx, hw_ip); 741 hw_prio = array_index_nospec(hw_prio, AMDGPU_RING_PRIO_MAX); 742 scheds = adev->gpu_sched[hw_ip][hw_prio].sched; 743 num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds; 744 drm_sched_entity_modify_sched(&aentity->entity, scheds, 745 num_scheds); 746 } 747 } 748 749 void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx, 750 int32_t priority) 751 { 752 int32_t ctx_prio; 753 unsigned i, j; 754 755 ctx->override_priority = priority; 756 757 ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ? 758 ctx->init_priority : ctx->override_priority; 759 for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) { 760 for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) { 761 if (!ctx->entities[i][j]) 762 continue; 763 764 amdgpu_ctx_set_entity_priority(ctx, ctx->entities[i][j], 765 i, ctx_prio); 766 } 767 } 768 } 769 770 int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, 771 struct drm_sched_entity *entity) 772 { 773 struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity); 774 struct dma_fence *other; 775 unsigned idx; 776 long r; 777 778 spin_lock(&ctx->ring_lock); 779 idx = centity->sequence & (amdgpu_sched_jobs - 1); 780 other = dma_fence_get(centity->fences[idx]); 781 spin_unlock(&ctx->ring_lock); 782 783 if (!other) 784 return 0; 785 786 r = dma_fence_wait(other, true); 787 if (r < 0 && r != -ERESTARTSYS) 788 DRM_ERROR("Error (%ld) waiting for fence!\n", r); 789 790 dma_fence_put(other); 791 return r; 792 } 793 794 void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr) 795 { 796 mutex_init(&mgr->lock); 797 idr_init(&mgr->ctx_handles); 798 } 799 800 long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout) 801 { 802 struct amdgpu_ctx *ctx; 803 struct idr *idp; 804 uint32_t id, i, j; 805 806 idp = &mgr->ctx_handles; 807 808 mutex_lock(&mgr->lock); 809 idr_for_each_entry(idp, ctx, id) { 810 for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) { 811 for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) { 812 struct drm_sched_entity *entity; 813 814 if (!ctx->entities[i][j]) 815 continue; 816 817 entity = &ctx->entities[i][j]->entity; 818 timeout = drm_sched_entity_flush(entity, timeout); 819 } 820 } 821 } 822 mutex_unlock(&mgr->lock); 823 return timeout; 824 } 825 826 void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr) 827 { 828 struct amdgpu_ctx *ctx; 829 struct idr *idp; 830 uint32_t id, i, j; 831 832 idp = &mgr->ctx_handles; 833 834 idr_for_each_entry(idp, ctx, id) { 835 if (kref_read(&ctx->refcount) != 1) { 836 DRM_ERROR("ctx %p is still alive\n", ctx); 837 continue; 838 } 839 840 for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) { 841 for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) { 842 struct drm_sched_entity *entity; 843 844 if (!ctx->entities[i][j]) 845 continue; 846 847 entity = &ctx->entities[i][j]->entity; 848 drm_sched_entity_fini(entity); 849 } 850 } 851 } 852 } 853 854 void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr) 855 { 856 struct amdgpu_ctx *ctx; 857 struct idr *idp; 858 uint32_t id; 859 860 amdgpu_ctx_mgr_entity_fini(mgr); 861 862 idp = &mgr->ctx_handles; 863 864 idr_for_each_entry(idp, ctx, id) { 865 if (kref_put(&ctx->refcount, amdgpu_ctx_fini) != 1) 866 DRM_ERROR("ctx %p is still alive\n", ctx); 867 } 868 869 idr_destroy(&mgr->ctx_handles); 870 mutex_destroy(&mgr->lock); 871 } 872 873 static void amdgpu_ctx_fence_time(struct amdgpu_ctx *ctx, 874 struct amdgpu_ctx_entity *centity, ktime_t *total, ktime_t *max) 875 { 876 ktime_t now, t1; 877 uint32_t i; 878 879 *total = *max = 0; 880 881 now = ktime_get(); 882 for (i = 0; i < amdgpu_sched_jobs; i++) { 883 struct dma_fence *fence; 884 struct drm_sched_fence *s_fence; 885 886 spin_lock(&ctx->ring_lock); 887 fence = dma_fence_get(centity->fences[i]); 888 spin_unlock(&ctx->ring_lock); 889 if (!fence) 890 continue; 891 s_fence = to_drm_sched_fence(fence); 892 if (!dma_fence_is_signaled(&s_fence->scheduled)) { 893 dma_fence_put(fence); 894 continue; 895 } 896 t1 = s_fence->scheduled.timestamp; 897 if (!ktime_before(t1, now)) { 898 dma_fence_put(fence); 899 continue; 900 } 901 if (dma_fence_is_signaled(&s_fence->finished) && 902 s_fence->finished.timestamp < now) 903 *total += ktime_sub(s_fence->finished.timestamp, t1); 904 else 905 *total += ktime_sub(now, t1); 906 t1 = ktime_sub(now, t1); 907 dma_fence_put(fence); 908 *max = max(t1, *max); 909 } 910 } 911 912 ktime_t amdgpu_ctx_mgr_fence_usage(struct amdgpu_ctx_mgr *mgr, uint32_t hwip, 913 uint32_t idx, uint64_t *elapsed) 914 { 915 struct idr *idp; 916 struct amdgpu_ctx *ctx; 917 uint32_t id; 918 struct amdgpu_ctx_entity *centity; 919 ktime_t total = 0, max = 0; 920 921 if (idx >= AMDGPU_MAX_ENTITY_NUM) 922 return 0; 923 idp = &mgr->ctx_handles; 924 mutex_lock(&mgr->lock); 925 idr_for_each_entry(idp, ctx, id) { 926 ktime_t ttotal, tmax; 927 928 if (!ctx->entities[hwip][idx]) 929 continue; 930 931 centity = ctx->entities[hwip][idx]; 932 amdgpu_ctx_fence_time(ctx, centity, &ttotal, &tmax); 933 934 /* Harmonic mean approximation diverges for very small 935 * values. If ratio < 0.01% ignore 936 */ 937 if (AMDGPU_CTX_FENCE_USAGE_MIN_RATIO(tmax, ttotal)) 938 continue; 939 940 total = ktime_add(total, ttotal); 941 max = ktime_after(tmax, max) ? tmax : max; 942 } 943 944 mutex_unlock(&mgr->lock); 945 if (elapsed) 946 *elapsed = max; 947 948 return total; 949 } 950