1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2021 Intel Corporation 4 */ 5 6 #include "i915_drv.h" 7 #include "intel_guc_slpc.h" 8 #include "gt/intel_gt.h" 9 10 static inline struct intel_guc *slpc_to_guc(struct intel_guc_slpc *slpc) 11 { 12 return container_of(slpc, struct intel_guc, slpc); 13 } 14 15 static inline struct intel_gt *slpc_to_gt(struct intel_guc_slpc *slpc) 16 { 17 return guc_to_gt(slpc_to_guc(slpc)); 18 } 19 20 static inline struct drm_i915_private *slpc_to_i915(struct intel_guc_slpc *slpc) 21 { 22 return slpc_to_gt(slpc)->i915; 23 } 24 25 static bool __detect_slpc_supported(struct intel_guc *guc) 26 { 27 /* GuC SLPC is unavailable for pre-Gen12 */ 28 return guc->submission_supported && 29 GRAPHICS_VER(guc_to_gt(guc)->i915) >= 12; 30 } 31 32 static bool __guc_slpc_selected(struct intel_guc *guc) 33 { 34 if (!intel_guc_slpc_is_supported(guc)) 35 return false; 36 37 return guc->submission_selected; 38 } 39 40 void intel_guc_slpc_init_early(struct intel_guc_slpc *slpc) 41 { 42 struct intel_guc *guc = slpc_to_guc(slpc); 43 44 slpc->supported = __detect_slpc_supported(guc); 45 slpc->selected = __guc_slpc_selected(guc); 46 } 47 48 static void slpc_mem_set_param(struct slpc_shared_data *data, 49 u32 id, u32 value) 50 { 51 GEM_BUG_ON(id >= SLPC_MAX_OVERRIDE_PARAMETERS); 52 /* 53 * When the flag bit is set, corresponding value will be read 54 * and applied by SLPC. 55 */ 56 data->override_params.bits[id >> 5] |= (1 << (id % 32)); 57 data->override_params.values[id] = value; 58 } 59 60 static void slpc_mem_set_enabled(struct slpc_shared_data *data, 61 u8 enable_id, u8 disable_id) 62 { 63 /* 64 * Enabling a param involves setting the enable_id 65 * to 1 and disable_id to 0. 66 */ 67 slpc_mem_set_param(data, enable_id, 1); 68 slpc_mem_set_param(data, disable_id, 0); 69 } 70 71 static void slpc_mem_set_disabled(struct slpc_shared_data *data, 72 u8 enable_id, u8 disable_id) 73 { 74 /* 75 * Disabling a param involves setting the enable_id 76 * to 0 and disable_id to 1. 77 */ 78 slpc_mem_set_param(data, disable_id, 1); 79 slpc_mem_set_param(data, enable_id, 0); 80 } 81 82 int intel_guc_slpc_init(struct intel_guc_slpc *slpc) 83 { 84 struct intel_guc *guc = slpc_to_guc(slpc); 85 struct drm_i915_private *i915 = slpc_to_i915(slpc); 86 u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data)); 87 int err; 88 89 GEM_BUG_ON(slpc->vma); 90 91 err = intel_guc_allocate_and_map_vma(guc, size, &slpc->vma, (void **)&slpc->vaddr); 92 if (unlikely(err)) { 93 drm_err(&i915->drm, 94 "Failed to allocate SLPC struct (err=%pe)\n", 95 ERR_PTR(err)); 96 return err; 97 } 98 99 slpc->max_freq_softlimit = 0; 100 slpc->min_freq_softlimit = 0; 101 102 return err; 103 } 104 105 static u32 slpc_get_state(struct intel_guc_slpc *slpc) 106 { 107 struct slpc_shared_data *data; 108 109 GEM_BUG_ON(!slpc->vma); 110 111 drm_clflush_virt_range(slpc->vaddr, sizeof(u32)); 112 data = slpc->vaddr; 113 114 return data->header.global_state; 115 } 116 117 static int guc_action_slpc_set_param(struct intel_guc *guc, u8 id, u32 value) 118 { 119 u32 request[] = { 120 GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST, 121 SLPC_EVENT(SLPC_EVENT_PARAMETER_SET, 2), 122 id, 123 value, 124 }; 125 int ret; 126 127 ret = intel_guc_send(guc, request, ARRAY_SIZE(request)); 128 129 return ret > 0 ? -EPROTO : ret; 130 } 131 132 static int guc_action_slpc_unset_param(struct intel_guc *guc, u8 id) 133 { 134 u32 request[] = { 135 GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST, 136 SLPC_EVENT(SLPC_EVENT_PARAMETER_UNSET, 2), 137 id, 138 }; 139 140 return intel_guc_send(guc, request, ARRAY_SIZE(request)); 141 } 142 143 static bool slpc_is_running(struct intel_guc_slpc *slpc) 144 { 145 return slpc_get_state(slpc) == SLPC_GLOBAL_STATE_RUNNING; 146 } 147 148 static int guc_action_slpc_query(struct intel_guc *guc, u32 offset) 149 { 150 u32 request[] = { 151 GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST, 152 SLPC_EVENT(SLPC_EVENT_QUERY_TASK_STATE, 2), 153 offset, 154 0, 155 }; 156 int ret; 157 158 ret = intel_guc_send(guc, request, ARRAY_SIZE(request)); 159 160 return ret > 0 ? -EPROTO : ret; 161 } 162 163 static int slpc_query_task_state(struct intel_guc_slpc *slpc) 164 { 165 struct intel_guc *guc = slpc_to_guc(slpc); 166 struct drm_i915_private *i915 = slpc_to_i915(slpc); 167 u32 offset = intel_guc_ggtt_offset(guc, slpc->vma); 168 int ret; 169 170 ret = guc_action_slpc_query(guc, offset); 171 if (unlikely(ret)) 172 drm_err(&i915->drm, "Failed to query task state (%pe)\n", 173 ERR_PTR(ret)); 174 175 drm_clflush_virt_range(slpc->vaddr, SLPC_PAGE_SIZE_BYTES); 176 177 return ret; 178 } 179 180 static int slpc_set_param(struct intel_guc_slpc *slpc, u8 id, u32 value) 181 { 182 struct intel_guc *guc = slpc_to_guc(slpc); 183 struct drm_i915_private *i915 = slpc_to_i915(slpc); 184 int ret; 185 186 GEM_BUG_ON(id >= SLPC_MAX_PARAM); 187 188 ret = guc_action_slpc_set_param(guc, id, value); 189 if (ret) 190 drm_err(&i915->drm, "Failed to set param %d to %u (%pe)\n", 191 id, value, ERR_PTR(ret)); 192 193 return ret; 194 } 195 196 static int slpc_unset_param(struct intel_guc_slpc *slpc, 197 u8 id) 198 { 199 struct intel_guc *guc = slpc_to_guc(slpc); 200 201 GEM_BUG_ON(id >= SLPC_MAX_PARAM); 202 203 return guc_action_slpc_unset_param(guc, id); 204 } 205 206 static const char *slpc_global_state_to_string(enum slpc_global_state state) 207 { 208 switch (state) { 209 case SLPC_GLOBAL_STATE_NOT_RUNNING: 210 return "not running"; 211 case SLPC_GLOBAL_STATE_INITIALIZING: 212 return "initializing"; 213 case SLPC_GLOBAL_STATE_RESETTING: 214 return "resetting"; 215 case SLPC_GLOBAL_STATE_RUNNING: 216 return "running"; 217 case SLPC_GLOBAL_STATE_SHUTTING_DOWN: 218 return "shutting down"; 219 case SLPC_GLOBAL_STATE_ERROR: 220 return "error"; 221 default: 222 return "unknown"; 223 } 224 } 225 226 static const char *slpc_get_state_string(struct intel_guc_slpc *slpc) 227 { 228 return slpc_global_state_to_string(slpc_get_state(slpc)); 229 } 230 231 static int guc_action_slpc_reset(struct intel_guc *guc, u32 offset) 232 { 233 u32 request[] = { 234 GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST, 235 SLPC_EVENT(SLPC_EVENT_RESET, 2), 236 offset, 237 0, 238 }; 239 int ret; 240 241 ret = intel_guc_send(guc, request, ARRAY_SIZE(request)); 242 243 return ret > 0 ? -EPROTO : ret; 244 } 245 246 static int slpc_reset(struct intel_guc_slpc *slpc) 247 { 248 struct drm_i915_private *i915 = slpc_to_i915(slpc); 249 struct intel_guc *guc = slpc_to_guc(slpc); 250 u32 offset = intel_guc_ggtt_offset(guc, slpc->vma); 251 int ret; 252 253 ret = guc_action_slpc_reset(guc, offset); 254 255 if (unlikely(ret < 0)) { 256 drm_err(&i915->drm, "SLPC reset action failed (%pe)\n", 257 ERR_PTR(ret)); 258 return ret; 259 } 260 261 if (!ret) { 262 if (wait_for(slpc_is_running(slpc), SLPC_RESET_TIMEOUT_MS)) { 263 drm_err(&i915->drm, "SLPC not enabled! State = %s\n", 264 slpc_get_state_string(slpc)); 265 return -EIO; 266 } 267 } 268 269 return 0; 270 } 271 272 static u32 slpc_decode_min_freq(struct intel_guc_slpc *slpc) 273 { 274 struct slpc_shared_data *data = slpc->vaddr; 275 276 GEM_BUG_ON(!slpc->vma); 277 278 return DIV_ROUND_CLOSEST(REG_FIELD_GET(SLPC_MIN_UNSLICE_FREQ_MASK, 279 data->task_state_data.freq) * 280 GT_FREQUENCY_MULTIPLIER, GEN9_FREQ_SCALER); 281 } 282 283 static u32 slpc_decode_max_freq(struct intel_guc_slpc *slpc) 284 { 285 struct slpc_shared_data *data = slpc->vaddr; 286 287 GEM_BUG_ON(!slpc->vma); 288 289 return DIV_ROUND_CLOSEST(REG_FIELD_GET(SLPC_MAX_UNSLICE_FREQ_MASK, 290 data->task_state_data.freq) * 291 GT_FREQUENCY_MULTIPLIER, GEN9_FREQ_SCALER); 292 } 293 294 static void slpc_shared_data_reset(struct slpc_shared_data *data) 295 { 296 memset(data, 0, sizeof(struct slpc_shared_data)); 297 298 data->header.size = sizeof(struct slpc_shared_data); 299 300 /* Enable only GTPERF task, disable others */ 301 slpc_mem_set_enabled(data, SLPC_PARAM_TASK_ENABLE_GTPERF, 302 SLPC_PARAM_TASK_DISABLE_GTPERF); 303 304 slpc_mem_set_disabled(data, SLPC_PARAM_TASK_ENABLE_BALANCER, 305 SLPC_PARAM_TASK_DISABLE_BALANCER); 306 307 slpc_mem_set_disabled(data, SLPC_PARAM_TASK_ENABLE_DCC, 308 SLPC_PARAM_TASK_DISABLE_DCC); 309 } 310 311 /** 312 * intel_guc_slpc_set_max_freq() - Set max frequency limit for SLPC. 313 * @slpc: pointer to intel_guc_slpc. 314 * @val: frequency (MHz) 315 * 316 * This function will invoke GuC SLPC action to update the max frequency 317 * limit for unslice. 318 * 319 * Return: 0 on success, non-zero error code on failure. 320 */ 321 int intel_guc_slpc_set_max_freq(struct intel_guc_slpc *slpc, u32 val) 322 { 323 struct drm_i915_private *i915 = slpc_to_i915(slpc); 324 intel_wakeref_t wakeref; 325 int ret; 326 327 if (val < slpc->min_freq || 328 val > slpc->rp0_freq || 329 val < slpc->min_freq_softlimit) 330 return -EINVAL; 331 332 with_intel_runtime_pm(&i915->runtime_pm, wakeref) { 333 ret = slpc_set_param(slpc, 334 SLPC_PARAM_GLOBAL_MAX_GT_UNSLICE_FREQ_MHZ, 335 val); 336 337 /* Return standardized err code for sysfs calls */ 338 if (ret) 339 ret = -EIO; 340 } 341 342 if (!ret) 343 slpc->max_freq_softlimit = val; 344 345 return ret; 346 } 347 348 /** 349 * intel_guc_slpc_get_max_freq() - Get max frequency limit for SLPC. 350 * @slpc: pointer to intel_guc_slpc. 351 * @val: pointer to val which will hold max frequency (MHz) 352 * 353 * This function will invoke GuC SLPC action to read the max frequency 354 * limit for unslice. 355 * 356 * Return: 0 on success, non-zero error code on failure. 357 */ 358 int intel_guc_slpc_get_max_freq(struct intel_guc_slpc *slpc, u32 *val) 359 { 360 struct drm_i915_private *i915 = slpc_to_i915(slpc); 361 intel_wakeref_t wakeref; 362 int ret = 0; 363 364 with_intel_runtime_pm(&i915->runtime_pm, wakeref) { 365 /* Force GuC to update task data */ 366 ret = slpc_query_task_state(slpc); 367 368 if (!ret) 369 *val = slpc_decode_max_freq(slpc); 370 } 371 372 return ret; 373 } 374 375 /** 376 * intel_guc_slpc_set_min_freq() - Set min frequency limit for SLPC. 377 * @slpc: pointer to intel_guc_slpc. 378 * @val: frequency (MHz) 379 * 380 * This function will invoke GuC SLPC action to update the min unslice 381 * frequency. 382 * 383 * Return: 0 on success, non-zero error code on failure. 384 */ 385 int intel_guc_slpc_set_min_freq(struct intel_guc_slpc *slpc, u32 val) 386 { 387 struct drm_i915_private *i915 = slpc_to_i915(slpc); 388 intel_wakeref_t wakeref; 389 int ret; 390 391 if (val < slpc->min_freq || 392 val > slpc->rp0_freq || 393 val > slpc->max_freq_softlimit) 394 return -EINVAL; 395 396 with_intel_runtime_pm(&i915->runtime_pm, wakeref) { 397 ret = slpc_set_param(slpc, 398 SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ, 399 val); 400 401 /* Return standardized err code for sysfs calls */ 402 if (ret) 403 ret = -EIO; 404 } 405 406 if (!ret) 407 slpc->min_freq_softlimit = val; 408 409 return ret; 410 } 411 412 /** 413 * intel_guc_slpc_get_min_freq() - Get min frequency limit for SLPC. 414 * @slpc: pointer to intel_guc_slpc. 415 * @val: pointer to val which will hold min frequency (MHz) 416 * 417 * This function will invoke GuC SLPC action to read the min frequency 418 * limit for unslice. 419 * 420 * Return: 0 on success, non-zero error code on failure. 421 */ 422 int intel_guc_slpc_get_min_freq(struct intel_guc_slpc *slpc, u32 *val) 423 { 424 struct drm_i915_private *i915 = slpc_to_i915(slpc); 425 intel_wakeref_t wakeref; 426 int ret = 0; 427 428 with_intel_runtime_pm(&i915->runtime_pm, wakeref) { 429 /* Force GuC to update task data */ 430 ret = slpc_query_task_state(slpc); 431 432 if (!ret) 433 *val = slpc_decode_min_freq(slpc); 434 } 435 436 return ret; 437 } 438 439 void intel_guc_pm_intrmsk_enable(struct intel_gt *gt) 440 { 441 u32 pm_intrmsk_mbz = 0; 442 443 /* 444 * Allow GuC to receive ARAT timer expiry event. 445 * This interrupt register is setup by RPS code 446 * when host based Turbo is enabled. 447 */ 448 pm_intrmsk_mbz |= ARAT_EXPIRED_INTRMSK; 449 450 intel_uncore_rmw(gt->uncore, 451 GEN6_PMINTRMSK, pm_intrmsk_mbz, 0); 452 } 453 454 static int slpc_set_softlimits(struct intel_guc_slpc *slpc) 455 { 456 int ret = 0; 457 458 /* 459 * Softlimits are initially equivalent to platform limits 460 * unless they have deviated from defaults, in which case, 461 * we retain the values and set min/max accordingly. 462 */ 463 if (!slpc->max_freq_softlimit) 464 slpc->max_freq_softlimit = slpc->rp0_freq; 465 else if (slpc->max_freq_softlimit != slpc->rp0_freq) 466 ret = intel_guc_slpc_set_max_freq(slpc, 467 slpc->max_freq_softlimit); 468 469 if (unlikely(ret)) 470 return ret; 471 472 if (!slpc->min_freq_softlimit) 473 slpc->min_freq_softlimit = slpc->min_freq; 474 else if (slpc->min_freq_softlimit != slpc->min_freq) 475 return intel_guc_slpc_set_min_freq(slpc, 476 slpc->min_freq_softlimit); 477 478 return 0; 479 } 480 481 static int slpc_ignore_eff_freq(struct intel_guc_slpc *slpc, bool ignore) 482 { 483 int ret = 0; 484 485 if (ignore) { 486 ret = slpc_set_param(slpc, 487 SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY, 488 ignore); 489 if (!ret) 490 return slpc_set_param(slpc, 491 SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ, 492 slpc->min_freq); 493 } else { 494 ret = slpc_unset_param(slpc, 495 SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY); 496 if (!ret) 497 return slpc_unset_param(slpc, 498 SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ); 499 } 500 501 return ret; 502 } 503 504 static int slpc_use_fused_rp0(struct intel_guc_slpc *slpc) 505 { 506 /* Force SLPC to used platform rp0 */ 507 return slpc_set_param(slpc, 508 SLPC_PARAM_GLOBAL_MAX_GT_UNSLICE_FREQ_MHZ, 509 slpc->rp0_freq); 510 } 511 512 static void slpc_get_rp_values(struct intel_guc_slpc *slpc) 513 { 514 u32 rp_state_cap; 515 516 rp_state_cap = intel_uncore_read(slpc_to_gt(slpc)->uncore, 517 GEN6_RP_STATE_CAP); 518 519 slpc->rp0_freq = REG_FIELD_GET(RP0_CAP_MASK, rp_state_cap) * 520 GT_FREQUENCY_MULTIPLIER; 521 slpc->rp1_freq = REG_FIELD_GET(RP1_CAP_MASK, rp_state_cap) * 522 GT_FREQUENCY_MULTIPLIER; 523 slpc->min_freq = REG_FIELD_GET(RPN_CAP_MASK, rp_state_cap) * 524 GT_FREQUENCY_MULTIPLIER; 525 } 526 527 /* 528 * intel_guc_slpc_enable() - Start SLPC 529 * @slpc: pointer to intel_guc_slpc. 530 * 531 * SLPC is enabled by setting up the shared data structure and 532 * sending reset event to GuC SLPC. Initial data is setup in 533 * intel_guc_slpc_init. Here we send the reset event. We do 534 * not currently need a slpc_disable since this is taken care 535 * of automatically when a reset/suspend occurs and the GuC 536 * CTB is destroyed. 537 * 538 * Return: 0 on success, non-zero error code on failure. 539 */ 540 int intel_guc_slpc_enable(struct intel_guc_slpc *slpc) 541 { 542 struct drm_i915_private *i915 = slpc_to_i915(slpc); 543 int ret; 544 545 GEM_BUG_ON(!slpc->vma); 546 547 slpc_shared_data_reset(slpc->vaddr); 548 549 ret = slpc_reset(slpc); 550 if (unlikely(ret < 0)) { 551 drm_err(&i915->drm, "SLPC Reset event returned (%pe)\n", 552 ERR_PTR(ret)); 553 return ret; 554 } 555 556 ret = slpc_query_task_state(slpc); 557 if (unlikely(ret < 0)) 558 return ret; 559 560 intel_guc_pm_intrmsk_enable(&i915->gt); 561 562 slpc_get_rp_values(slpc); 563 564 /* Ignore efficient freq and set min to platform min */ 565 ret = slpc_ignore_eff_freq(slpc, true); 566 if (unlikely(ret)) { 567 drm_err(&i915->drm, "Failed to set SLPC min to RPn (%pe)\n", 568 ERR_PTR(ret)); 569 return ret; 570 } 571 572 /* Set SLPC max limit to RP0 */ 573 ret = slpc_use_fused_rp0(slpc); 574 if (unlikely(ret)) { 575 drm_err(&i915->drm, "Failed to set SLPC max to RP0 (%pe)\n", 576 ERR_PTR(ret)); 577 return ret; 578 } 579 580 /* Revert SLPC min/max to softlimits if necessary */ 581 ret = slpc_set_softlimits(slpc); 582 if (unlikely(ret)) { 583 drm_err(&i915->drm, "Failed to set SLPC softlimits (%pe)\n", 584 ERR_PTR(ret)); 585 return ret; 586 } 587 588 return 0; 589 } 590 591 int intel_guc_slpc_print_info(struct intel_guc_slpc *slpc, struct drm_printer *p) 592 { 593 struct drm_i915_private *i915 = slpc_to_i915(slpc); 594 struct slpc_shared_data *data = slpc->vaddr; 595 struct slpc_task_state_data *slpc_tasks; 596 intel_wakeref_t wakeref; 597 int ret = 0; 598 599 GEM_BUG_ON(!slpc->vma); 600 601 with_intel_runtime_pm(&i915->runtime_pm, wakeref) { 602 ret = slpc_query_task_state(slpc); 603 604 if (!ret) { 605 slpc_tasks = &data->task_state_data; 606 607 drm_printf(p, "\tSLPC state: %s\n", slpc_get_state_string(slpc)); 608 drm_printf(p, "\tGTPERF task active: %s\n", 609 yesno(slpc_tasks->status & SLPC_GTPERF_TASK_ENABLED)); 610 drm_printf(p, "\tMax freq: %u MHz\n", 611 slpc_decode_max_freq(slpc)); 612 drm_printf(p, "\tMin freq: %u MHz\n", 613 slpc_decode_min_freq(slpc)); 614 } 615 } 616 617 return ret; 618 } 619 620 void intel_guc_slpc_fini(struct intel_guc_slpc *slpc) 621 { 622 if (!slpc->vma) 623 return; 624 625 i915_vma_unpin_and_release(&slpc->vma, I915_VMA_RELEASE_MAP); 626 } 627