1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2021 Intel Corporation 4 */ 5 6 #include "i915_drv.h" 7 #include "intel_guc_slpc.h" 8 #include "gt/intel_gt.h" 9 10 static inline struct intel_guc *slpc_to_guc(struct intel_guc_slpc *slpc) 11 { 12 return container_of(slpc, struct intel_guc, slpc); 13 } 14 15 static inline struct intel_gt *slpc_to_gt(struct intel_guc_slpc *slpc) 16 { 17 return guc_to_gt(slpc_to_guc(slpc)); 18 } 19 20 static inline struct drm_i915_private *slpc_to_i915(struct intel_guc_slpc *slpc) 21 { 22 return slpc_to_gt(slpc)->i915; 23 } 24 25 static bool __detect_slpc_supported(struct intel_guc *guc) 26 { 27 /* GuC SLPC is unavailable for pre-Gen12 */ 28 return guc->submission_supported && 29 GRAPHICS_VER(guc_to_gt(guc)->i915) >= 12; 30 } 31 32 static bool __guc_slpc_selected(struct intel_guc *guc) 33 { 34 if (!intel_guc_slpc_is_supported(guc)) 35 return false; 36 37 return guc->submission_selected; 38 } 39 40 void intel_guc_slpc_init_early(struct intel_guc_slpc *slpc) 41 { 42 struct intel_guc *guc = slpc_to_guc(slpc); 43 44 slpc->supported = __detect_slpc_supported(guc); 45 slpc->selected = __guc_slpc_selected(guc); 46 } 47 48 static void slpc_mem_set_param(struct slpc_shared_data *data, 49 u32 id, u32 value) 50 { 51 GEM_BUG_ON(id >= SLPC_MAX_OVERRIDE_PARAMETERS); 52 /* 53 * When the flag bit is set, corresponding value will be read 54 * and applied by SLPC. 55 */ 56 data->override_params.bits[id >> 5] |= (1 << (id % 32)); 57 data->override_params.values[id] = value; 58 } 59 60 static void slpc_mem_set_enabled(struct slpc_shared_data *data, 61 u8 enable_id, u8 disable_id) 62 { 63 /* 64 * Enabling a param involves setting the enable_id 65 * to 1 and disable_id to 0. 66 */ 67 slpc_mem_set_param(data, enable_id, 1); 68 slpc_mem_set_param(data, disable_id, 0); 69 } 70 71 static void slpc_mem_set_disabled(struct slpc_shared_data *data, 72 u8 enable_id, u8 disable_id) 73 { 74 /* 75 * Disabling a param involves setting the enable_id 76 * to 0 and disable_id to 1. 77 */ 78 slpc_mem_set_param(data, disable_id, 1); 79 slpc_mem_set_param(data, enable_id, 0); 80 } 81 82 static u32 slpc_get_state(struct intel_guc_slpc *slpc) 83 { 84 struct slpc_shared_data *data; 85 86 GEM_BUG_ON(!slpc->vma); 87 88 drm_clflush_virt_range(slpc->vaddr, sizeof(u32)); 89 data = slpc->vaddr; 90 91 return data->header.global_state; 92 } 93 94 static int guc_action_slpc_set_param(struct intel_guc *guc, u8 id, u32 value) 95 { 96 u32 request[] = { 97 GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST, 98 SLPC_EVENT(SLPC_EVENT_PARAMETER_SET, 2), 99 id, 100 value, 101 }; 102 int ret; 103 104 ret = intel_guc_send(guc, request, ARRAY_SIZE(request)); 105 106 return ret > 0 ? -EPROTO : ret; 107 } 108 109 static int guc_action_slpc_unset_param(struct intel_guc *guc, u8 id) 110 { 111 u32 request[] = { 112 GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST, 113 SLPC_EVENT(SLPC_EVENT_PARAMETER_UNSET, 2), 114 id, 115 }; 116 117 return intel_guc_send(guc, request, ARRAY_SIZE(request)); 118 } 119 120 static bool slpc_is_running(struct intel_guc_slpc *slpc) 121 { 122 return slpc_get_state(slpc) == SLPC_GLOBAL_STATE_RUNNING; 123 } 124 125 static int guc_action_slpc_query(struct intel_guc *guc, u32 offset) 126 { 127 u32 request[] = { 128 GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST, 129 SLPC_EVENT(SLPC_EVENT_QUERY_TASK_STATE, 2), 130 offset, 131 0, 132 }; 133 int ret; 134 135 ret = intel_guc_send(guc, request, ARRAY_SIZE(request)); 136 137 return ret > 0 ? -EPROTO : ret; 138 } 139 140 static int slpc_query_task_state(struct intel_guc_slpc *slpc) 141 { 142 struct intel_guc *guc = slpc_to_guc(slpc); 143 struct drm_i915_private *i915 = slpc_to_i915(slpc); 144 u32 offset = intel_guc_ggtt_offset(guc, slpc->vma); 145 int ret; 146 147 ret = guc_action_slpc_query(guc, offset); 148 if (unlikely(ret)) 149 drm_err(&i915->drm, "Failed to query task state (%pe)\n", 150 ERR_PTR(ret)); 151 152 drm_clflush_virt_range(slpc->vaddr, SLPC_PAGE_SIZE_BYTES); 153 154 return ret; 155 } 156 157 static int slpc_set_param(struct intel_guc_slpc *slpc, u8 id, u32 value) 158 { 159 struct intel_guc *guc = slpc_to_guc(slpc); 160 struct drm_i915_private *i915 = slpc_to_i915(slpc); 161 int ret; 162 163 GEM_BUG_ON(id >= SLPC_MAX_PARAM); 164 165 ret = guc_action_slpc_set_param(guc, id, value); 166 if (ret) 167 drm_err(&i915->drm, "Failed to set param %d to %u (%pe)\n", 168 id, value, ERR_PTR(ret)); 169 170 return ret; 171 } 172 173 static int slpc_unset_param(struct intel_guc_slpc *slpc, 174 u8 id) 175 { 176 struct intel_guc *guc = slpc_to_guc(slpc); 177 178 GEM_BUG_ON(id >= SLPC_MAX_PARAM); 179 180 return guc_action_slpc_unset_param(guc, id); 181 } 182 183 static int slpc_force_min_freq(struct intel_guc_slpc *slpc, u32 freq) 184 { 185 struct drm_i915_private *i915 = slpc_to_i915(slpc); 186 struct intel_guc *guc = slpc_to_guc(slpc); 187 intel_wakeref_t wakeref; 188 int ret = 0; 189 190 lockdep_assert_held(&slpc->lock); 191 192 if (!intel_guc_is_ready(guc)) 193 return -ENODEV; 194 195 /* 196 * This function is a little different as compared to 197 * intel_guc_slpc_set_min_freq(). Softlimit will not be updated 198 * here since this is used to temporarily change min freq, 199 * for example, during a waitboost. Caller is responsible for 200 * checking bounds. 201 */ 202 203 with_intel_runtime_pm(&i915->runtime_pm, wakeref) { 204 ret = slpc_set_param(slpc, 205 SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ, 206 freq); 207 if (ret) 208 drm_err(&i915->drm, "Unable to force min freq to %u: %d", 209 freq, ret); 210 } 211 212 return ret; 213 } 214 215 static void slpc_boost_work(struct work_struct *work) 216 { 217 struct intel_guc_slpc *slpc = container_of(work, typeof(*slpc), boost_work); 218 219 /* 220 * Raise min freq to boost. It's possible that 221 * this is greater than current max. But it will 222 * certainly be limited by RP0. An error setting 223 * the min param is not fatal. 224 */ 225 mutex_lock(&slpc->lock); 226 if (atomic_read(&slpc->num_waiters)) { 227 slpc_force_min_freq(slpc, slpc->boost_freq); 228 slpc->num_boosts++; 229 } 230 mutex_unlock(&slpc->lock); 231 } 232 233 int intel_guc_slpc_init(struct intel_guc_slpc *slpc) 234 { 235 struct intel_guc *guc = slpc_to_guc(slpc); 236 struct drm_i915_private *i915 = slpc_to_i915(slpc); 237 u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data)); 238 int err; 239 240 GEM_BUG_ON(slpc->vma); 241 242 err = intel_guc_allocate_and_map_vma(guc, size, &slpc->vma, (void **)&slpc->vaddr); 243 if (unlikely(err)) { 244 drm_err(&i915->drm, 245 "Failed to allocate SLPC struct (err=%pe)\n", 246 ERR_PTR(err)); 247 return err; 248 } 249 250 slpc->max_freq_softlimit = 0; 251 slpc->min_freq_softlimit = 0; 252 253 slpc->boost_freq = 0; 254 atomic_set(&slpc->num_waiters, 0); 255 slpc->num_boosts = 0; 256 257 mutex_init(&slpc->lock); 258 INIT_WORK(&slpc->boost_work, slpc_boost_work); 259 260 return err; 261 } 262 263 static const char *slpc_global_state_to_string(enum slpc_global_state state) 264 { 265 switch (state) { 266 case SLPC_GLOBAL_STATE_NOT_RUNNING: 267 return "not running"; 268 case SLPC_GLOBAL_STATE_INITIALIZING: 269 return "initializing"; 270 case SLPC_GLOBAL_STATE_RESETTING: 271 return "resetting"; 272 case SLPC_GLOBAL_STATE_RUNNING: 273 return "running"; 274 case SLPC_GLOBAL_STATE_SHUTTING_DOWN: 275 return "shutting down"; 276 case SLPC_GLOBAL_STATE_ERROR: 277 return "error"; 278 default: 279 return "unknown"; 280 } 281 } 282 283 static const char *slpc_get_state_string(struct intel_guc_slpc *slpc) 284 { 285 return slpc_global_state_to_string(slpc_get_state(slpc)); 286 } 287 288 static int guc_action_slpc_reset(struct intel_guc *guc, u32 offset) 289 { 290 u32 request[] = { 291 GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST, 292 SLPC_EVENT(SLPC_EVENT_RESET, 2), 293 offset, 294 0, 295 }; 296 int ret; 297 298 ret = intel_guc_send(guc, request, ARRAY_SIZE(request)); 299 300 return ret > 0 ? -EPROTO : ret; 301 } 302 303 static int slpc_reset(struct intel_guc_slpc *slpc) 304 { 305 struct drm_i915_private *i915 = slpc_to_i915(slpc); 306 struct intel_guc *guc = slpc_to_guc(slpc); 307 u32 offset = intel_guc_ggtt_offset(guc, slpc->vma); 308 int ret; 309 310 ret = guc_action_slpc_reset(guc, offset); 311 312 if (unlikely(ret < 0)) { 313 drm_err(&i915->drm, "SLPC reset action failed (%pe)\n", 314 ERR_PTR(ret)); 315 return ret; 316 } 317 318 if (!ret) { 319 if (wait_for(slpc_is_running(slpc), SLPC_RESET_TIMEOUT_MS)) { 320 drm_err(&i915->drm, "SLPC not enabled! State = %s\n", 321 slpc_get_state_string(slpc)); 322 return -EIO; 323 } 324 } 325 326 return 0; 327 } 328 329 static u32 slpc_decode_min_freq(struct intel_guc_slpc *slpc) 330 { 331 struct slpc_shared_data *data = slpc->vaddr; 332 333 GEM_BUG_ON(!slpc->vma); 334 335 return DIV_ROUND_CLOSEST(REG_FIELD_GET(SLPC_MIN_UNSLICE_FREQ_MASK, 336 data->task_state_data.freq) * 337 GT_FREQUENCY_MULTIPLIER, GEN9_FREQ_SCALER); 338 } 339 340 static u32 slpc_decode_max_freq(struct intel_guc_slpc *slpc) 341 { 342 struct slpc_shared_data *data = slpc->vaddr; 343 344 GEM_BUG_ON(!slpc->vma); 345 346 return DIV_ROUND_CLOSEST(REG_FIELD_GET(SLPC_MAX_UNSLICE_FREQ_MASK, 347 data->task_state_data.freq) * 348 GT_FREQUENCY_MULTIPLIER, GEN9_FREQ_SCALER); 349 } 350 351 static void slpc_shared_data_reset(struct slpc_shared_data *data) 352 { 353 memset(data, 0, sizeof(struct slpc_shared_data)); 354 355 data->header.size = sizeof(struct slpc_shared_data); 356 357 /* Enable only GTPERF task, disable others */ 358 slpc_mem_set_enabled(data, SLPC_PARAM_TASK_ENABLE_GTPERF, 359 SLPC_PARAM_TASK_DISABLE_GTPERF); 360 361 slpc_mem_set_disabled(data, SLPC_PARAM_TASK_ENABLE_BALANCER, 362 SLPC_PARAM_TASK_DISABLE_BALANCER); 363 364 slpc_mem_set_disabled(data, SLPC_PARAM_TASK_ENABLE_DCC, 365 SLPC_PARAM_TASK_DISABLE_DCC); 366 } 367 368 /** 369 * intel_guc_slpc_set_max_freq() - Set max frequency limit for SLPC. 370 * @slpc: pointer to intel_guc_slpc. 371 * @val: frequency (MHz) 372 * 373 * This function will invoke GuC SLPC action to update the max frequency 374 * limit for unslice. 375 * 376 * Return: 0 on success, non-zero error code on failure. 377 */ 378 int intel_guc_slpc_set_max_freq(struct intel_guc_slpc *slpc, u32 val) 379 { 380 struct drm_i915_private *i915 = slpc_to_i915(slpc); 381 intel_wakeref_t wakeref; 382 int ret; 383 384 if (val < slpc->min_freq || 385 val > slpc->rp0_freq || 386 val < slpc->min_freq_softlimit) 387 return -EINVAL; 388 389 with_intel_runtime_pm(&i915->runtime_pm, wakeref) { 390 ret = slpc_set_param(slpc, 391 SLPC_PARAM_GLOBAL_MAX_GT_UNSLICE_FREQ_MHZ, 392 val); 393 394 /* Return standardized err code for sysfs calls */ 395 if (ret) 396 ret = -EIO; 397 } 398 399 if (!ret) 400 slpc->max_freq_softlimit = val; 401 402 return ret; 403 } 404 405 /** 406 * intel_guc_slpc_get_max_freq() - Get max frequency limit for SLPC. 407 * @slpc: pointer to intel_guc_slpc. 408 * @val: pointer to val which will hold max frequency (MHz) 409 * 410 * This function will invoke GuC SLPC action to read the max frequency 411 * limit for unslice. 412 * 413 * Return: 0 on success, non-zero error code on failure. 414 */ 415 int intel_guc_slpc_get_max_freq(struct intel_guc_slpc *slpc, u32 *val) 416 { 417 struct drm_i915_private *i915 = slpc_to_i915(slpc); 418 intel_wakeref_t wakeref; 419 int ret = 0; 420 421 with_intel_runtime_pm(&i915->runtime_pm, wakeref) { 422 /* Force GuC to update task data */ 423 ret = slpc_query_task_state(slpc); 424 425 if (!ret) 426 *val = slpc_decode_max_freq(slpc); 427 } 428 429 return ret; 430 } 431 432 /** 433 * intel_guc_slpc_set_min_freq() - Set min frequency limit for SLPC. 434 * @slpc: pointer to intel_guc_slpc. 435 * @val: frequency (MHz) 436 * 437 * This function will invoke GuC SLPC action to update the min unslice 438 * frequency. 439 * 440 * Return: 0 on success, non-zero error code on failure. 441 */ 442 int intel_guc_slpc_set_min_freq(struct intel_guc_slpc *slpc, u32 val) 443 { 444 struct drm_i915_private *i915 = slpc_to_i915(slpc); 445 intel_wakeref_t wakeref; 446 int ret; 447 448 if (val < slpc->min_freq || 449 val > slpc->rp0_freq || 450 val > slpc->max_freq_softlimit) 451 return -EINVAL; 452 453 /* Need a lock now since waitboost can be modifying min as well */ 454 mutex_lock(&slpc->lock); 455 456 with_intel_runtime_pm(&i915->runtime_pm, wakeref) { 457 458 ret = slpc_set_param(slpc, 459 SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ, 460 val); 461 462 /* Return standardized err code for sysfs calls */ 463 if (ret) 464 ret = -EIO; 465 } 466 467 if (!ret) 468 slpc->min_freq_softlimit = val; 469 470 mutex_unlock(&slpc->lock); 471 472 return ret; 473 } 474 475 /** 476 * intel_guc_slpc_get_min_freq() - Get min frequency limit for SLPC. 477 * @slpc: pointer to intel_guc_slpc. 478 * @val: pointer to val which will hold min frequency (MHz) 479 * 480 * This function will invoke GuC SLPC action to read the min frequency 481 * limit for unslice. 482 * 483 * Return: 0 on success, non-zero error code on failure. 484 */ 485 int intel_guc_slpc_get_min_freq(struct intel_guc_slpc *slpc, u32 *val) 486 { 487 struct drm_i915_private *i915 = slpc_to_i915(slpc); 488 intel_wakeref_t wakeref; 489 int ret = 0; 490 491 with_intel_runtime_pm(&i915->runtime_pm, wakeref) { 492 /* Force GuC to update task data */ 493 ret = slpc_query_task_state(slpc); 494 495 if (!ret) 496 *val = slpc_decode_min_freq(slpc); 497 } 498 499 return ret; 500 } 501 502 void intel_guc_pm_intrmsk_enable(struct intel_gt *gt) 503 { 504 u32 pm_intrmsk_mbz = 0; 505 506 /* 507 * Allow GuC to receive ARAT timer expiry event. 508 * This interrupt register is setup by RPS code 509 * when host based Turbo is enabled. 510 */ 511 pm_intrmsk_mbz |= ARAT_EXPIRED_INTRMSK; 512 513 intel_uncore_rmw(gt->uncore, 514 GEN6_PMINTRMSK, pm_intrmsk_mbz, 0); 515 } 516 517 static int slpc_set_softlimits(struct intel_guc_slpc *slpc) 518 { 519 int ret = 0; 520 521 /* 522 * Softlimits are initially equivalent to platform limits 523 * unless they have deviated from defaults, in which case, 524 * we retain the values and set min/max accordingly. 525 */ 526 if (!slpc->max_freq_softlimit) 527 slpc->max_freq_softlimit = slpc->rp0_freq; 528 else if (slpc->max_freq_softlimit != slpc->rp0_freq) 529 ret = intel_guc_slpc_set_max_freq(slpc, 530 slpc->max_freq_softlimit); 531 532 if (unlikely(ret)) 533 return ret; 534 535 if (!slpc->min_freq_softlimit) 536 slpc->min_freq_softlimit = slpc->min_freq; 537 else if (slpc->min_freq_softlimit != slpc->min_freq) 538 return intel_guc_slpc_set_min_freq(slpc, 539 slpc->min_freq_softlimit); 540 541 return 0; 542 } 543 544 static int slpc_ignore_eff_freq(struct intel_guc_slpc *slpc, bool ignore) 545 { 546 int ret = 0; 547 548 if (ignore) { 549 ret = slpc_set_param(slpc, 550 SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY, 551 ignore); 552 if (!ret) 553 return slpc_set_param(slpc, 554 SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ, 555 slpc->min_freq); 556 } else { 557 ret = slpc_unset_param(slpc, 558 SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY); 559 if (!ret) 560 return slpc_unset_param(slpc, 561 SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ); 562 } 563 564 return ret; 565 } 566 567 static int slpc_use_fused_rp0(struct intel_guc_slpc *slpc) 568 { 569 /* Force SLPC to used platform rp0 */ 570 return slpc_set_param(slpc, 571 SLPC_PARAM_GLOBAL_MAX_GT_UNSLICE_FREQ_MHZ, 572 slpc->rp0_freq); 573 } 574 575 static void slpc_get_rp_values(struct intel_guc_slpc *slpc) 576 { 577 u32 rp_state_cap; 578 579 rp_state_cap = intel_uncore_read(slpc_to_gt(slpc)->uncore, 580 GEN6_RP_STATE_CAP); 581 582 slpc->rp0_freq = REG_FIELD_GET(RP0_CAP_MASK, rp_state_cap) * 583 GT_FREQUENCY_MULTIPLIER; 584 slpc->rp1_freq = REG_FIELD_GET(RP1_CAP_MASK, rp_state_cap) * 585 GT_FREQUENCY_MULTIPLIER; 586 slpc->min_freq = REG_FIELD_GET(RPN_CAP_MASK, rp_state_cap) * 587 GT_FREQUENCY_MULTIPLIER; 588 589 if (!slpc->boost_freq) 590 slpc->boost_freq = slpc->rp0_freq; 591 } 592 593 /* 594 * intel_guc_slpc_enable() - Start SLPC 595 * @slpc: pointer to intel_guc_slpc. 596 * 597 * SLPC is enabled by setting up the shared data structure and 598 * sending reset event to GuC SLPC. Initial data is setup in 599 * intel_guc_slpc_init. Here we send the reset event. We do 600 * not currently need a slpc_disable since this is taken care 601 * of automatically when a reset/suspend occurs and the GuC 602 * CTB is destroyed. 603 * 604 * Return: 0 on success, non-zero error code on failure. 605 */ 606 int intel_guc_slpc_enable(struct intel_guc_slpc *slpc) 607 { 608 struct drm_i915_private *i915 = slpc_to_i915(slpc); 609 int ret; 610 611 GEM_BUG_ON(!slpc->vma); 612 613 slpc_shared_data_reset(slpc->vaddr); 614 615 ret = slpc_reset(slpc); 616 if (unlikely(ret < 0)) { 617 drm_err(&i915->drm, "SLPC Reset event returned (%pe)\n", 618 ERR_PTR(ret)); 619 return ret; 620 } 621 622 ret = slpc_query_task_state(slpc); 623 if (unlikely(ret < 0)) 624 return ret; 625 626 intel_guc_pm_intrmsk_enable(to_gt(i915)); 627 628 slpc_get_rp_values(slpc); 629 630 /* Ignore efficient freq and set min to platform min */ 631 ret = slpc_ignore_eff_freq(slpc, true); 632 if (unlikely(ret)) { 633 drm_err(&i915->drm, "Failed to set SLPC min to RPn (%pe)\n", 634 ERR_PTR(ret)); 635 return ret; 636 } 637 638 /* Set SLPC max limit to RP0 */ 639 ret = slpc_use_fused_rp0(slpc); 640 if (unlikely(ret)) { 641 drm_err(&i915->drm, "Failed to set SLPC max to RP0 (%pe)\n", 642 ERR_PTR(ret)); 643 return ret; 644 } 645 646 /* Revert SLPC min/max to softlimits if necessary */ 647 ret = slpc_set_softlimits(slpc); 648 if (unlikely(ret)) { 649 drm_err(&i915->drm, "Failed to set SLPC softlimits (%pe)\n", 650 ERR_PTR(ret)); 651 return ret; 652 } 653 654 return 0; 655 } 656 657 int intel_guc_slpc_set_boost_freq(struct intel_guc_slpc *slpc, u32 val) 658 { 659 int ret = 0; 660 661 if (val < slpc->min_freq || val > slpc->rp0_freq) 662 return -EINVAL; 663 664 mutex_lock(&slpc->lock); 665 666 if (slpc->boost_freq != val) { 667 /* Apply only if there are active waiters */ 668 if (atomic_read(&slpc->num_waiters)) { 669 ret = slpc_force_min_freq(slpc, val); 670 if (ret) { 671 ret = -EIO; 672 goto done; 673 } 674 } 675 676 slpc->boost_freq = val; 677 } 678 679 done: 680 mutex_unlock(&slpc->lock); 681 return ret; 682 } 683 684 void intel_guc_slpc_dec_waiters(struct intel_guc_slpc *slpc) 685 { 686 /* 687 * Return min back to the softlimit. 688 * This is called during request retire, 689 * so we don't need to fail that if the 690 * set_param fails. 691 */ 692 mutex_lock(&slpc->lock); 693 if (atomic_dec_and_test(&slpc->num_waiters)) 694 slpc_force_min_freq(slpc, slpc->min_freq_softlimit); 695 mutex_unlock(&slpc->lock); 696 } 697 698 int intel_guc_slpc_print_info(struct intel_guc_slpc *slpc, struct drm_printer *p) 699 { 700 struct drm_i915_private *i915 = slpc_to_i915(slpc); 701 struct slpc_shared_data *data = slpc->vaddr; 702 struct slpc_task_state_data *slpc_tasks; 703 intel_wakeref_t wakeref; 704 int ret = 0; 705 706 GEM_BUG_ON(!slpc->vma); 707 708 with_intel_runtime_pm(&i915->runtime_pm, wakeref) { 709 ret = slpc_query_task_state(slpc); 710 711 if (!ret) { 712 slpc_tasks = &data->task_state_data; 713 714 drm_printf(p, "\tSLPC state: %s\n", slpc_get_state_string(slpc)); 715 drm_printf(p, "\tGTPERF task active: %s\n", 716 yesno(slpc_tasks->status & SLPC_GTPERF_TASK_ENABLED)); 717 drm_printf(p, "\tMax freq: %u MHz\n", 718 slpc_decode_max_freq(slpc)); 719 drm_printf(p, "\tMin freq: %u MHz\n", 720 slpc_decode_min_freq(slpc)); 721 drm_printf(p, "\twaitboosts: %u\n", 722 slpc->num_boosts); 723 } 724 } 725 726 return ret; 727 } 728 729 void intel_guc_slpc_fini(struct intel_guc_slpc *slpc) 730 { 731 if (!slpc->vma) 732 return; 733 734 i915_vma_unpin_and_release(&slpc->vma, I915_VMA_RELEASE_MAP); 735 } 736