1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2021 Intel Corporation 4 */ 5 6 #include <drm/drm_cache.h> 7 #include <linux/string_helpers.h> 8 9 #include "i915_drv.h" 10 #include "i915_reg.h" 11 #include "intel_guc_slpc.h" 12 #include "intel_guc_print.h" 13 #include "intel_mchbar_regs.h" 14 #include "gt/intel_gt.h" 15 #include "gt/intel_gt_regs.h" 16 #include "gt/intel_rps.h" 17 18 static inline struct intel_guc *slpc_to_guc(struct intel_guc_slpc *slpc) 19 { 20 return container_of(slpc, struct intel_guc, slpc); 21 } 22 23 static inline struct intel_gt *slpc_to_gt(struct intel_guc_slpc *slpc) 24 { 25 return guc_to_gt(slpc_to_guc(slpc)); 26 } 27 28 static inline struct drm_i915_private *slpc_to_i915(struct intel_guc_slpc *slpc) 29 { 30 return slpc_to_gt(slpc)->i915; 31 } 32 33 static bool __detect_slpc_supported(struct intel_guc *guc) 34 { 35 /* GuC SLPC is unavailable for pre-Gen12 */ 36 return guc->submission_supported && 37 GRAPHICS_VER(guc_to_gt(guc)->i915) >= 12; 38 } 39 40 static bool __guc_slpc_selected(struct intel_guc *guc) 41 { 42 if (!intel_guc_slpc_is_supported(guc)) 43 return false; 44 45 return guc->submission_selected; 46 } 47 48 void intel_guc_slpc_init_early(struct intel_guc_slpc *slpc) 49 { 50 struct intel_guc *guc = slpc_to_guc(slpc); 51 52 slpc->supported = __detect_slpc_supported(guc); 53 slpc->selected = __guc_slpc_selected(guc); 54 } 55 56 static void slpc_mem_set_param(struct slpc_shared_data *data, 57 u32 id, u32 value) 58 { 59 GEM_BUG_ON(id >= SLPC_MAX_OVERRIDE_PARAMETERS); 60 /* 61 * When the flag bit is set, corresponding value will be read 62 * and applied by SLPC. 63 */ 64 data->override_params.bits[id >> 5] |= (1 << (id % 32)); 65 data->override_params.values[id] = value; 66 } 67 68 static void slpc_mem_set_enabled(struct slpc_shared_data *data, 69 u8 enable_id, u8 disable_id) 70 { 71 /* 72 * Enabling a param involves setting the enable_id 73 * to 1 and disable_id to 0. 74 */ 75 slpc_mem_set_param(data, enable_id, 1); 76 slpc_mem_set_param(data, disable_id, 0); 77 } 78 79 static void slpc_mem_set_disabled(struct slpc_shared_data *data, 80 u8 enable_id, u8 disable_id) 81 { 82 /* 83 * Disabling a param involves setting the enable_id 84 * to 0 and disable_id to 1. 85 */ 86 slpc_mem_set_param(data, disable_id, 1); 87 slpc_mem_set_param(data, enable_id, 0); 88 } 89 90 static u32 slpc_get_state(struct intel_guc_slpc *slpc) 91 { 92 struct slpc_shared_data *data; 93 94 GEM_BUG_ON(!slpc->vma); 95 96 drm_clflush_virt_range(slpc->vaddr, sizeof(u32)); 97 data = slpc->vaddr; 98 99 return data->header.global_state; 100 } 101 102 static int guc_action_slpc_set_param_nb(struct intel_guc *guc, u8 id, u32 value) 103 { 104 u32 request[] = { 105 GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST, 106 SLPC_EVENT(SLPC_EVENT_PARAMETER_SET, 2), 107 id, 108 value, 109 }; 110 int ret; 111 112 ret = intel_guc_send_nb(guc, request, ARRAY_SIZE(request), 0); 113 114 return ret > 0 ? -EPROTO : ret; 115 } 116 117 static int slpc_set_param_nb(struct intel_guc_slpc *slpc, u8 id, u32 value) 118 { 119 struct intel_guc *guc = slpc_to_guc(slpc); 120 121 GEM_BUG_ON(id >= SLPC_MAX_PARAM); 122 123 return guc_action_slpc_set_param_nb(guc, id, value); 124 } 125 126 static int guc_action_slpc_set_param(struct intel_guc *guc, u8 id, u32 value) 127 { 128 u32 request[] = { 129 GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST, 130 SLPC_EVENT(SLPC_EVENT_PARAMETER_SET, 2), 131 id, 132 value, 133 }; 134 int ret; 135 136 ret = intel_guc_send(guc, request, ARRAY_SIZE(request)); 137 138 return ret > 0 ? -EPROTO : ret; 139 } 140 141 static int guc_action_slpc_unset_param(struct intel_guc *guc, u8 id) 142 { 143 u32 request[] = { 144 GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST, 145 SLPC_EVENT(SLPC_EVENT_PARAMETER_UNSET, 1), 146 id, 147 }; 148 149 return intel_guc_send(guc, request, ARRAY_SIZE(request)); 150 } 151 152 static bool slpc_is_running(struct intel_guc_slpc *slpc) 153 { 154 return slpc_get_state(slpc) == SLPC_GLOBAL_STATE_RUNNING; 155 } 156 157 static int guc_action_slpc_query(struct intel_guc *guc, u32 offset) 158 { 159 u32 request[] = { 160 GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST, 161 SLPC_EVENT(SLPC_EVENT_QUERY_TASK_STATE, 2), 162 offset, 163 0, 164 }; 165 int ret; 166 167 ret = intel_guc_send(guc, request, ARRAY_SIZE(request)); 168 169 return ret > 0 ? -EPROTO : ret; 170 } 171 172 static int slpc_query_task_state(struct intel_guc_slpc *slpc) 173 { 174 struct intel_guc *guc = slpc_to_guc(slpc); 175 u32 offset = intel_guc_ggtt_offset(guc, slpc->vma); 176 int ret; 177 178 ret = guc_action_slpc_query(guc, offset); 179 if (unlikely(ret)) 180 guc_probe_error(guc, "Failed to query task state: %pe\n", ERR_PTR(ret)); 181 182 drm_clflush_virt_range(slpc->vaddr, SLPC_PAGE_SIZE_BYTES); 183 184 return ret; 185 } 186 187 static int slpc_set_param(struct intel_guc_slpc *slpc, u8 id, u32 value) 188 { 189 struct intel_guc *guc = slpc_to_guc(slpc); 190 int ret; 191 192 GEM_BUG_ON(id >= SLPC_MAX_PARAM); 193 194 ret = guc_action_slpc_set_param(guc, id, value); 195 if (ret) 196 guc_probe_error(guc, "Failed to set param %d to %u: %pe\n", 197 id, value, ERR_PTR(ret)); 198 199 return ret; 200 } 201 202 static int slpc_unset_param(struct intel_guc_slpc *slpc, u8 id) 203 { 204 struct intel_guc *guc = slpc_to_guc(slpc); 205 206 GEM_BUG_ON(id >= SLPC_MAX_PARAM); 207 208 return guc_action_slpc_unset_param(guc, id); 209 } 210 211 static int slpc_force_min_freq(struct intel_guc_slpc *slpc, u32 freq) 212 { 213 struct intel_guc *guc = slpc_to_guc(slpc); 214 struct drm_i915_private *i915 = slpc_to_i915(slpc); 215 intel_wakeref_t wakeref; 216 int ret = 0; 217 218 lockdep_assert_held(&slpc->lock); 219 220 if (!intel_guc_is_ready(guc)) 221 return -ENODEV; 222 223 /* 224 * This function is a little different as compared to 225 * intel_guc_slpc_set_min_freq(). Softlimit will not be updated 226 * here since this is used to temporarily change min freq, 227 * for example, during a waitboost. Caller is responsible for 228 * checking bounds. 229 */ 230 231 with_intel_runtime_pm(&i915->runtime_pm, wakeref) { 232 /* Non-blocking request will avoid stalls */ 233 ret = slpc_set_param_nb(slpc, 234 SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ, 235 freq); 236 if (ret) 237 guc_notice(guc, "Failed to send set_param for min freq(%d): %pe\n", 238 freq, ERR_PTR(ret)); 239 } 240 241 return ret; 242 } 243 244 static void slpc_boost_work(struct work_struct *work) 245 { 246 struct intel_guc_slpc *slpc = container_of(work, typeof(*slpc), boost_work); 247 int err; 248 249 /* 250 * Raise min freq to boost. It's possible that 251 * this is greater than current max. But it will 252 * certainly be limited by RP0. An error setting 253 * the min param is not fatal. 254 */ 255 mutex_lock(&slpc->lock); 256 if (atomic_read(&slpc->num_waiters)) { 257 err = slpc_force_min_freq(slpc, slpc->boost_freq); 258 if (!err) 259 slpc->num_boosts++; 260 } 261 mutex_unlock(&slpc->lock); 262 } 263 264 int intel_guc_slpc_init(struct intel_guc_slpc *slpc) 265 { 266 struct intel_guc *guc = slpc_to_guc(slpc); 267 u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data)); 268 int err; 269 270 GEM_BUG_ON(slpc->vma); 271 272 err = intel_guc_allocate_and_map_vma(guc, size, &slpc->vma, (void **)&slpc->vaddr); 273 if (unlikely(err)) { 274 guc_probe_error(guc, "Failed to allocate SLPC struct: %pe\n", ERR_PTR(err)); 275 return err; 276 } 277 278 slpc->max_freq_softlimit = 0; 279 slpc->min_freq_softlimit = 0; 280 slpc->min_is_rpmax = false; 281 282 slpc->boost_freq = 0; 283 atomic_set(&slpc->num_waiters, 0); 284 slpc->num_boosts = 0; 285 slpc->media_ratio_mode = SLPC_MEDIA_RATIO_MODE_DYNAMIC_CONTROL; 286 287 mutex_init(&slpc->lock); 288 INIT_WORK(&slpc->boost_work, slpc_boost_work); 289 290 return err; 291 } 292 293 static const char *slpc_global_state_to_string(enum slpc_global_state state) 294 { 295 switch (state) { 296 case SLPC_GLOBAL_STATE_NOT_RUNNING: 297 return "not running"; 298 case SLPC_GLOBAL_STATE_INITIALIZING: 299 return "initializing"; 300 case SLPC_GLOBAL_STATE_RESETTING: 301 return "resetting"; 302 case SLPC_GLOBAL_STATE_RUNNING: 303 return "running"; 304 case SLPC_GLOBAL_STATE_SHUTTING_DOWN: 305 return "shutting down"; 306 case SLPC_GLOBAL_STATE_ERROR: 307 return "error"; 308 default: 309 return "unknown"; 310 } 311 } 312 313 static const char *slpc_get_state_string(struct intel_guc_slpc *slpc) 314 { 315 return slpc_global_state_to_string(slpc_get_state(slpc)); 316 } 317 318 static int guc_action_slpc_reset(struct intel_guc *guc, u32 offset) 319 { 320 u32 request[] = { 321 GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST, 322 SLPC_EVENT(SLPC_EVENT_RESET, 2), 323 offset, 324 0, 325 }; 326 int ret; 327 328 ret = intel_guc_send(guc, request, ARRAY_SIZE(request)); 329 330 return ret > 0 ? -EPROTO : ret; 331 } 332 333 static int slpc_reset(struct intel_guc_slpc *slpc) 334 { 335 struct intel_guc *guc = slpc_to_guc(slpc); 336 u32 offset = intel_guc_ggtt_offset(guc, slpc->vma); 337 int ret; 338 339 ret = guc_action_slpc_reset(guc, offset); 340 341 if (unlikely(ret < 0)) { 342 guc_probe_error(guc, "SLPC reset action failed: %pe\n", ERR_PTR(ret)); 343 return ret; 344 } 345 346 if (!ret) { 347 if (wait_for(slpc_is_running(slpc), SLPC_RESET_TIMEOUT_MS)) { 348 guc_probe_error(guc, "SLPC not enabled! State = %s\n", 349 slpc_get_state_string(slpc)); 350 return -EIO; 351 } 352 } 353 354 return 0; 355 } 356 357 static u32 slpc_decode_min_freq(struct intel_guc_slpc *slpc) 358 { 359 struct slpc_shared_data *data = slpc->vaddr; 360 361 GEM_BUG_ON(!slpc->vma); 362 363 return DIV_ROUND_CLOSEST(REG_FIELD_GET(SLPC_MIN_UNSLICE_FREQ_MASK, 364 data->task_state_data.freq) * 365 GT_FREQUENCY_MULTIPLIER, GEN9_FREQ_SCALER); 366 } 367 368 static u32 slpc_decode_max_freq(struct intel_guc_slpc *slpc) 369 { 370 struct slpc_shared_data *data = slpc->vaddr; 371 372 GEM_BUG_ON(!slpc->vma); 373 374 return DIV_ROUND_CLOSEST(REG_FIELD_GET(SLPC_MAX_UNSLICE_FREQ_MASK, 375 data->task_state_data.freq) * 376 GT_FREQUENCY_MULTIPLIER, GEN9_FREQ_SCALER); 377 } 378 379 static void slpc_shared_data_reset(struct slpc_shared_data *data) 380 { 381 memset(data, 0, sizeof(struct slpc_shared_data)); 382 383 data->header.size = sizeof(struct slpc_shared_data); 384 385 /* Enable only GTPERF task, disable others */ 386 slpc_mem_set_enabled(data, SLPC_PARAM_TASK_ENABLE_GTPERF, 387 SLPC_PARAM_TASK_DISABLE_GTPERF); 388 389 slpc_mem_set_disabled(data, SLPC_PARAM_TASK_ENABLE_BALANCER, 390 SLPC_PARAM_TASK_DISABLE_BALANCER); 391 392 slpc_mem_set_disabled(data, SLPC_PARAM_TASK_ENABLE_DCC, 393 SLPC_PARAM_TASK_DISABLE_DCC); 394 } 395 396 /** 397 * intel_guc_slpc_set_max_freq() - Set max frequency limit for SLPC. 398 * @slpc: pointer to intel_guc_slpc. 399 * @val: frequency (MHz) 400 * 401 * This function will invoke GuC SLPC action to update the max frequency 402 * limit for unslice. 403 * 404 * Return: 0 on success, non-zero error code on failure. 405 */ 406 int intel_guc_slpc_set_max_freq(struct intel_guc_slpc *slpc, u32 val) 407 { 408 struct drm_i915_private *i915 = slpc_to_i915(slpc); 409 intel_wakeref_t wakeref; 410 int ret; 411 412 if (val < slpc->min_freq || 413 val > slpc->rp0_freq || 414 val < slpc->min_freq_softlimit) 415 return -EINVAL; 416 417 with_intel_runtime_pm(&i915->runtime_pm, wakeref) { 418 ret = slpc_set_param(slpc, 419 SLPC_PARAM_GLOBAL_MAX_GT_UNSLICE_FREQ_MHZ, 420 val); 421 422 /* Return standardized err code for sysfs calls */ 423 if (ret) 424 ret = -EIO; 425 } 426 427 if (!ret) 428 slpc->max_freq_softlimit = val; 429 430 return ret; 431 } 432 433 /** 434 * intel_guc_slpc_get_max_freq() - Get max frequency limit for SLPC. 435 * @slpc: pointer to intel_guc_slpc. 436 * @val: pointer to val which will hold max frequency (MHz) 437 * 438 * This function will invoke GuC SLPC action to read the max frequency 439 * limit for unslice. 440 * 441 * Return: 0 on success, non-zero error code on failure. 442 */ 443 int intel_guc_slpc_get_max_freq(struct intel_guc_slpc *slpc, u32 *val) 444 { 445 struct drm_i915_private *i915 = slpc_to_i915(slpc); 446 intel_wakeref_t wakeref; 447 int ret = 0; 448 449 with_intel_runtime_pm(&i915->runtime_pm, wakeref) { 450 /* Force GuC to update task data */ 451 ret = slpc_query_task_state(slpc); 452 453 if (!ret) 454 *val = slpc_decode_max_freq(slpc); 455 } 456 457 return ret; 458 } 459 460 /** 461 * intel_guc_slpc_set_min_freq() - Set min frequency limit for SLPC. 462 * @slpc: pointer to intel_guc_slpc. 463 * @val: frequency (MHz) 464 * 465 * This function will invoke GuC SLPC action to update the min unslice 466 * frequency. 467 * 468 * Return: 0 on success, non-zero error code on failure. 469 */ 470 int intel_guc_slpc_set_min_freq(struct intel_guc_slpc *slpc, u32 val) 471 { 472 struct drm_i915_private *i915 = slpc_to_i915(slpc); 473 intel_wakeref_t wakeref; 474 int ret; 475 476 if (val < slpc->min_freq || 477 val > slpc->rp0_freq || 478 val > slpc->max_freq_softlimit) 479 return -EINVAL; 480 481 /* Need a lock now since waitboost can be modifying min as well */ 482 mutex_lock(&slpc->lock); 483 wakeref = intel_runtime_pm_get(&i915->runtime_pm); 484 485 /* Ignore efficient freq if lower min freq is requested */ 486 ret = slpc_set_param(slpc, 487 SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY, 488 val < slpc->rp1_freq); 489 if (ret) { 490 guc_probe_error(slpc_to_guc(slpc), "Failed to toggle efficient freq: %pe\n", 491 ERR_PTR(ret)); 492 goto out; 493 } 494 495 ret = slpc_set_param(slpc, 496 SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ, 497 val); 498 499 if (!ret) 500 slpc->min_freq_softlimit = val; 501 502 out: 503 intel_runtime_pm_put(&i915->runtime_pm, wakeref); 504 mutex_unlock(&slpc->lock); 505 506 /* Return standardized err code for sysfs calls */ 507 if (ret) 508 ret = -EIO; 509 510 return ret; 511 } 512 513 /** 514 * intel_guc_slpc_get_min_freq() - Get min frequency limit for SLPC. 515 * @slpc: pointer to intel_guc_slpc. 516 * @val: pointer to val which will hold min frequency (MHz) 517 * 518 * This function will invoke GuC SLPC action to read the min frequency 519 * limit for unslice. 520 * 521 * Return: 0 on success, non-zero error code on failure. 522 */ 523 int intel_guc_slpc_get_min_freq(struct intel_guc_slpc *slpc, u32 *val) 524 { 525 struct drm_i915_private *i915 = slpc_to_i915(slpc); 526 intel_wakeref_t wakeref; 527 int ret = 0; 528 529 with_intel_runtime_pm(&i915->runtime_pm, wakeref) { 530 /* Force GuC to update task data */ 531 ret = slpc_query_task_state(slpc); 532 533 if (!ret) 534 *val = slpc_decode_min_freq(slpc); 535 } 536 537 return ret; 538 } 539 540 int intel_guc_slpc_set_media_ratio_mode(struct intel_guc_slpc *slpc, u32 val) 541 { 542 struct drm_i915_private *i915 = slpc_to_i915(slpc); 543 intel_wakeref_t wakeref; 544 int ret = 0; 545 546 if (!HAS_MEDIA_RATIO_MODE(i915)) 547 return -ENODEV; 548 549 with_intel_runtime_pm(&i915->runtime_pm, wakeref) 550 ret = slpc_set_param(slpc, 551 SLPC_PARAM_MEDIA_FF_RATIO_MODE, 552 val); 553 return ret; 554 } 555 556 void intel_guc_pm_intrmsk_enable(struct intel_gt *gt) 557 { 558 u32 pm_intrmsk_mbz = 0; 559 560 /* 561 * Allow GuC to receive ARAT timer expiry event. 562 * This interrupt register is setup by RPS code 563 * when host based Turbo is enabled. 564 */ 565 pm_intrmsk_mbz |= ARAT_EXPIRED_INTRMSK; 566 567 intel_uncore_rmw(gt->uncore, 568 GEN6_PMINTRMSK, pm_intrmsk_mbz, 0); 569 } 570 571 static int slpc_set_softlimits(struct intel_guc_slpc *slpc) 572 { 573 int ret = 0; 574 575 /* 576 * Softlimits are initially equivalent to platform limits 577 * unless they have deviated from defaults, in which case, 578 * we retain the values and set min/max accordingly. 579 */ 580 if (!slpc->max_freq_softlimit) { 581 slpc->max_freq_softlimit = slpc->rp0_freq; 582 slpc_to_gt(slpc)->defaults.max_freq = slpc->max_freq_softlimit; 583 } else if (slpc->max_freq_softlimit != slpc->rp0_freq) { 584 ret = intel_guc_slpc_set_max_freq(slpc, 585 slpc->max_freq_softlimit); 586 } 587 588 if (unlikely(ret)) 589 return ret; 590 591 if (!slpc->min_freq_softlimit) { 592 ret = intel_guc_slpc_get_min_freq(slpc, &slpc->min_freq_softlimit); 593 if (unlikely(ret)) 594 return ret; 595 slpc_to_gt(slpc)->defaults.min_freq = slpc->min_freq_softlimit; 596 } else if (slpc->min_freq_softlimit != slpc->min_freq) { 597 return intel_guc_slpc_set_min_freq(slpc, 598 slpc->min_freq_softlimit); 599 } 600 601 return 0; 602 } 603 604 static bool is_slpc_min_freq_rpmax(struct intel_guc_slpc *slpc) 605 { 606 int slpc_min_freq; 607 int ret; 608 609 ret = intel_guc_slpc_get_min_freq(slpc, &slpc_min_freq); 610 if (ret) { 611 guc_err(slpc_to_guc(slpc), "Failed to get min freq: %pe\n", ERR_PTR(ret)); 612 return false; 613 } 614 615 if (slpc_min_freq == SLPC_MAX_FREQ_MHZ) 616 return true; 617 else 618 return false; 619 } 620 621 static void update_server_min_softlimit(struct intel_guc_slpc *slpc) 622 { 623 /* For server parts, SLPC min will be at RPMax. 624 * Use min softlimit to clamp it to RP0 instead. 625 */ 626 if (!slpc->min_freq_softlimit && 627 is_slpc_min_freq_rpmax(slpc)) { 628 slpc->min_is_rpmax = true; 629 slpc->min_freq_softlimit = slpc->rp0_freq; 630 (slpc_to_gt(slpc))->defaults.min_freq = slpc->min_freq_softlimit; 631 } 632 } 633 634 static int slpc_use_fused_rp0(struct intel_guc_slpc *slpc) 635 { 636 /* Force SLPC to used platform rp0 */ 637 return slpc_set_param(slpc, 638 SLPC_PARAM_GLOBAL_MAX_GT_UNSLICE_FREQ_MHZ, 639 slpc->rp0_freq); 640 } 641 642 static void slpc_get_rp_values(struct intel_guc_slpc *slpc) 643 { 644 struct intel_rps *rps = &slpc_to_gt(slpc)->rps; 645 struct intel_rps_freq_caps caps; 646 647 gen6_rps_get_freq_caps(rps, &caps); 648 slpc->rp0_freq = intel_gpu_freq(rps, caps.rp0_freq); 649 slpc->rp1_freq = intel_gpu_freq(rps, caps.rp1_freq); 650 slpc->min_freq = intel_gpu_freq(rps, caps.min_freq); 651 652 if (!slpc->boost_freq) 653 slpc->boost_freq = slpc->rp0_freq; 654 } 655 656 /** 657 * intel_guc_slpc_override_gucrc_mode() - override GUCRC mode 658 * @slpc: pointer to intel_guc_slpc. 659 * @mode: new value of the mode. 660 * 661 * This function will override the GUCRC mode. 662 * 663 * Return: 0 on success, non-zero error code on failure. 664 */ 665 int intel_guc_slpc_override_gucrc_mode(struct intel_guc_slpc *slpc, u32 mode) 666 { 667 int ret; 668 struct drm_i915_private *i915 = slpc_to_i915(slpc); 669 intel_wakeref_t wakeref; 670 671 if (mode >= SLPC_GUCRC_MODE_MAX) 672 return -EINVAL; 673 674 with_intel_runtime_pm(&i915->runtime_pm, wakeref) { 675 ret = slpc_set_param(slpc, SLPC_PARAM_PWRGATE_RC_MODE, mode); 676 if (ret) 677 guc_err(slpc_to_guc(slpc), "Override RC mode %d failed: %pe\n", 678 mode, ERR_PTR(ret)); 679 } 680 681 return ret; 682 } 683 684 int intel_guc_slpc_unset_gucrc_mode(struct intel_guc_slpc *slpc) 685 { 686 struct drm_i915_private *i915 = slpc_to_i915(slpc); 687 intel_wakeref_t wakeref; 688 int ret = 0; 689 690 with_intel_runtime_pm(&i915->runtime_pm, wakeref) { 691 ret = slpc_unset_param(slpc, SLPC_PARAM_PWRGATE_RC_MODE); 692 if (ret) 693 guc_err(slpc_to_guc(slpc), "Unsetting RC mode failed: %pe\n", ERR_PTR(ret)); 694 } 695 696 return ret; 697 } 698 699 /* 700 * intel_guc_slpc_enable() - Start SLPC 701 * @slpc: pointer to intel_guc_slpc. 702 * 703 * SLPC is enabled by setting up the shared data structure and 704 * sending reset event to GuC SLPC. Initial data is setup in 705 * intel_guc_slpc_init. Here we send the reset event. We do 706 * not currently need a slpc_disable since this is taken care 707 * of automatically when a reset/suspend occurs and the GuC 708 * CTB is destroyed. 709 * 710 * Return: 0 on success, non-zero error code on failure. 711 */ 712 int intel_guc_slpc_enable(struct intel_guc_slpc *slpc) 713 { 714 struct intel_guc *guc = slpc_to_guc(slpc); 715 int ret; 716 717 GEM_BUG_ON(!slpc->vma); 718 719 slpc_shared_data_reset(slpc->vaddr); 720 721 ret = slpc_reset(slpc); 722 if (unlikely(ret < 0)) { 723 guc_probe_error(guc, "SLPC Reset event returned: %pe\n", ERR_PTR(ret)); 724 return ret; 725 } 726 727 ret = slpc_query_task_state(slpc); 728 if (unlikely(ret < 0)) 729 return ret; 730 731 intel_guc_pm_intrmsk_enable(slpc_to_gt(slpc)); 732 733 slpc_get_rp_values(slpc); 734 735 /* Handle the case where min=max=RPmax */ 736 update_server_min_softlimit(slpc); 737 738 /* Set SLPC max limit to RP0 */ 739 ret = slpc_use_fused_rp0(slpc); 740 if (unlikely(ret)) { 741 guc_probe_error(guc, "Failed to set SLPC max to RP0: %pe\n", ERR_PTR(ret)); 742 return ret; 743 } 744 745 /* Revert SLPC min/max to softlimits if necessary */ 746 ret = slpc_set_softlimits(slpc); 747 if (unlikely(ret)) { 748 guc_probe_error(guc, "Failed to set SLPC softlimits: %pe\n", ERR_PTR(ret)); 749 return ret; 750 } 751 752 /* Set cached media freq ratio mode */ 753 intel_guc_slpc_set_media_ratio_mode(slpc, slpc->media_ratio_mode); 754 755 return 0; 756 } 757 758 int intel_guc_slpc_set_boost_freq(struct intel_guc_slpc *slpc, u32 val) 759 { 760 int ret = 0; 761 762 if (val < slpc->min_freq || val > slpc->rp0_freq) 763 return -EINVAL; 764 765 mutex_lock(&slpc->lock); 766 767 if (slpc->boost_freq != val) { 768 /* Apply only if there are active waiters */ 769 if (atomic_read(&slpc->num_waiters)) { 770 ret = slpc_force_min_freq(slpc, val); 771 if (ret) { 772 ret = -EIO; 773 goto done; 774 } 775 } 776 777 slpc->boost_freq = val; 778 } 779 780 done: 781 mutex_unlock(&slpc->lock); 782 return ret; 783 } 784 785 void intel_guc_slpc_dec_waiters(struct intel_guc_slpc *slpc) 786 { 787 /* 788 * Return min back to the softlimit. 789 * This is called during request retire, 790 * so we don't need to fail that if the 791 * set_param fails. 792 */ 793 mutex_lock(&slpc->lock); 794 if (atomic_dec_and_test(&slpc->num_waiters)) 795 slpc_force_min_freq(slpc, slpc->min_freq_softlimit); 796 mutex_unlock(&slpc->lock); 797 } 798 799 int intel_guc_slpc_print_info(struct intel_guc_slpc *slpc, struct drm_printer *p) 800 { 801 struct drm_i915_private *i915 = slpc_to_i915(slpc); 802 struct slpc_shared_data *data = slpc->vaddr; 803 struct slpc_task_state_data *slpc_tasks; 804 intel_wakeref_t wakeref; 805 int ret = 0; 806 807 GEM_BUG_ON(!slpc->vma); 808 809 with_intel_runtime_pm(&i915->runtime_pm, wakeref) { 810 ret = slpc_query_task_state(slpc); 811 812 if (!ret) { 813 slpc_tasks = &data->task_state_data; 814 815 drm_printf(p, "\tSLPC state: %s\n", slpc_get_state_string(slpc)); 816 drm_printf(p, "\tGTPERF task active: %s\n", 817 str_yes_no(slpc_tasks->status & SLPC_GTPERF_TASK_ENABLED)); 818 drm_printf(p, "\tMax freq: %u MHz\n", 819 slpc_decode_max_freq(slpc)); 820 drm_printf(p, "\tMin freq: %u MHz\n", 821 slpc_decode_min_freq(slpc)); 822 drm_printf(p, "\twaitboosts: %u\n", 823 slpc->num_boosts); 824 } 825 } 826 827 return ret; 828 } 829 830 void intel_guc_slpc_fini(struct intel_guc_slpc *slpc) 831 { 832 if (!slpc->vma) 833 return; 834 835 i915_vma_unpin_and_release(&slpc->vma, I915_VMA_RELEASE_MAP); 836 } 837