1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include <drm/drm_cache.h>
7 
8 #include "i915_drv.h"
9 #include "i915_reg.h"
10 #include "intel_guc_slpc.h"
11 #include "intel_mchbar_regs.h"
12 #include "gt/intel_gt.h"
13 #include "gt/intel_gt_regs.h"
14 #include "gt/intel_rps.h"
15 
16 static inline struct intel_guc *slpc_to_guc(struct intel_guc_slpc *slpc)
17 {
18 	return container_of(slpc, struct intel_guc, slpc);
19 }
20 
21 static inline struct intel_gt *slpc_to_gt(struct intel_guc_slpc *slpc)
22 {
23 	return guc_to_gt(slpc_to_guc(slpc));
24 }
25 
26 static inline struct drm_i915_private *slpc_to_i915(struct intel_guc_slpc *slpc)
27 {
28 	return slpc_to_gt(slpc)->i915;
29 }
30 
31 static bool __detect_slpc_supported(struct intel_guc *guc)
32 {
33 	/* GuC SLPC is unavailable for pre-Gen12 */
34 	return guc->submission_supported &&
35 		GRAPHICS_VER(guc_to_gt(guc)->i915) >= 12;
36 }
37 
38 static bool __guc_slpc_selected(struct intel_guc *guc)
39 {
40 	if (!intel_guc_slpc_is_supported(guc))
41 		return false;
42 
43 	return guc->submission_selected;
44 }
45 
46 void intel_guc_slpc_init_early(struct intel_guc_slpc *slpc)
47 {
48 	struct intel_guc *guc = slpc_to_guc(slpc);
49 
50 	slpc->supported = __detect_slpc_supported(guc);
51 	slpc->selected = __guc_slpc_selected(guc);
52 }
53 
54 static void slpc_mem_set_param(struct slpc_shared_data *data,
55 			       u32 id, u32 value)
56 {
57 	GEM_BUG_ON(id >= SLPC_MAX_OVERRIDE_PARAMETERS);
58 	/*
59 	 * When the flag bit is set, corresponding value will be read
60 	 * and applied by SLPC.
61 	 */
62 	data->override_params.bits[id >> 5] |= (1 << (id % 32));
63 	data->override_params.values[id] = value;
64 }
65 
66 static void slpc_mem_set_enabled(struct slpc_shared_data *data,
67 				 u8 enable_id, u8 disable_id)
68 {
69 	/*
70 	 * Enabling a param involves setting the enable_id
71 	 * to 1 and disable_id to 0.
72 	 */
73 	slpc_mem_set_param(data, enable_id, 1);
74 	slpc_mem_set_param(data, disable_id, 0);
75 }
76 
77 static void slpc_mem_set_disabled(struct slpc_shared_data *data,
78 				  u8 enable_id, u8 disable_id)
79 {
80 	/*
81 	 * Disabling a param involves setting the enable_id
82 	 * to 0 and disable_id to 1.
83 	 */
84 	slpc_mem_set_param(data, disable_id, 1);
85 	slpc_mem_set_param(data, enable_id, 0);
86 }
87 
88 static u32 slpc_get_state(struct intel_guc_slpc *slpc)
89 {
90 	struct slpc_shared_data *data;
91 
92 	GEM_BUG_ON(!slpc->vma);
93 
94 	drm_clflush_virt_range(slpc->vaddr, sizeof(u32));
95 	data = slpc->vaddr;
96 
97 	return data->header.global_state;
98 }
99 
100 static int guc_action_slpc_set_param(struct intel_guc *guc, u8 id, u32 value)
101 {
102 	u32 request[] = {
103 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
104 		SLPC_EVENT(SLPC_EVENT_PARAMETER_SET, 2),
105 		id,
106 		value,
107 	};
108 	int ret;
109 
110 	ret = intel_guc_send(guc, request, ARRAY_SIZE(request));
111 
112 	return ret > 0 ? -EPROTO : ret;
113 }
114 
115 static int guc_action_slpc_unset_param(struct intel_guc *guc, u8 id)
116 {
117 	u32 request[] = {
118 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
119 		SLPC_EVENT(SLPC_EVENT_PARAMETER_UNSET, 1),
120 		id,
121 	};
122 
123 	return intel_guc_send(guc, request, ARRAY_SIZE(request));
124 }
125 
126 static bool slpc_is_running(struct intel_guc_slpc *slpc)
127 {
128 	return slpc_get_state(slpc) == SLPC_GLOBAL_STATE_RUNNING;
129 }
130 
131 static int guc_action_slpc_query(struct intel_guc *guc, u32 offset)
132 {
133 	u32 request[] = {
134 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
135 		SLPC_EVENT(SLPC_EVENT_QUERY_TASK_STATE, 2),
136 		offset,
137 		0,
138 	};
139 	int ret;
140 
141 	ret = intel_guc_send(guc, request, ARRAY_SIZE(request));
142 
143 	return ret > 0 ? -EPROTO : ret;
144 }
145 
146 static int slpc_query_task_state(struct intel_guc_slpc *slpc)
147 {
148 	struct intel_guc *guc = slpc_to_guc(slpc);
149 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
150 	u32 offset = intel_guc_ggtt_offset(guc, slpc->vma);
151 	int ret;
152 
153 	ret = guc_action_slpc_query(guc, offset);
154 	if (unlikely(ret))
155 		drm_err(&i915->drm, "Failed to query task state (%pe)\n",
156 			ERR_PTR(ret));
157 
158 	drm_clflush_virt_range(slpc->vaddr, SLPC_PAGE_SIZE_BYTES);
159 
160 	return ret;
161 }
162 
163 static int slpc_set_param(struct intel_guc_slpc *slpc, u8 id, u32 value)
164 {
165 	struct intel_guc *guc = slpc_to_guc(slpc);
166 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
167 	int ret;
168 
169 	GEM_BUG_ON(id >= SLPC_MAX_PARAM);
170 
171 	ret = guc_action_slpc_set_param(guc, id, value);
172 	if (ret)
173 		drm_err(&i915->drm, "Failed to set param %d to %u (%pe)\n",
174 			id, value, ERR_PTR(ret));
175 
176 	return ret;
177 }
178 
179 static int slpc_unset_param(struct intel_guc_slpc *slpc,
180 			    u8 id)
181 {
182 	struct intel_guc *guc = slpc_to_guc(slpc);
183 
184 	GEM_BUG_ON(id >= SLPC_MAX_PARAM);
185 
186 	return guc_action_slpc_unset_param(guc, id);
187 }
188 
189 static int slpc_force_min_freq(struct intel_guc_slpc *slpc, u32 freq)
190 {
191 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
192 	struct intel_guc *guc = slpc_to_guc(slpc);
193 	intel_wakeref_t wakeref;
194 	int ret = 0;
195 
196 	lockdep_assert_held(&slpc->lock);
197 
198 	if (!intel_guc_is_ready(guc))
199 		return -ENODEV;
200 
201 	/*
202 	 * This function is a little different as compared to
203 	 * intel_guc_slpc_set_min_freq(). Softlimit will not be updated
204 	 * here since this is used to temporarily change min freq,
205 	 * for example, during a waitboost. Caller is responsible for
206 	 * checking bounds.
207 	 */
208 
209 	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
210 		ret = slpc_set_param(slpc,
211 				     SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
212 				     freq);
213 		if (ret)
214 			drm_err(&i915->drm, "Unable to force min freq to %u: %d",
215 				freq, ret);
216 	}
217 
218 	return ret;
219 }
220 
221 static void slpc_boost_work(struct work_struct *work)
222 {
223 	struct intel_guc_slpc *slpc = container_of(work, typeof(*slpc), boost_work);
224 
225 	/*
226 	 * Raise min freq to boost. It's possible that
227 	 * this is greater than current max. But it will
228 	 * certainly be limited by RP0. An error setting
229 	 * the min param is not fatal.
230 	 */
231 	mutex_lock(&slpc->lock);
232 	if (atomic_read(&slpc->num_waiters)) {
233 		slpc_force_min_freq(slpc, slpc->boost_freq);
234 		slpc->num_boosts++;
235 	}
236 	mutex_unlock(&slpc->lock);
237 }
238 
239 int intel_guc_slpc_init(struct intel_guc_slpc *slpc)
240 {
241 	struct intel_guc *guc = slpc_to_guc(slpc);
242 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
243 	u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
244 	int err;
245 
246 	GEM_BUG_ON(slpc->vma);
247 
248 	err = intel_guc_allocate_and_map_vma(guc, size, &slpc->vma, (void **)&slpc->vaddr);
249 	if (unlikely(err)) {
250 		drm_err(&i915->drm,
251 			"Failed to allocate SLPC struct (err=%pe)\n",
252 			ERR_PTR(err));
253 		return err;
254 	}
255 
256 	slpc->max_freq_softlimit = 0;
257 	slpc->min_freq_softlimit = 0;
258 
259 	slpc->boost_freq = 0;
260 	atomic_set(&slpc->num_waiters, 0);
261 	slpc->num_boosts = 0;
262 
263 	mutex_init(&slpc->lock);
264 	INIT_WORK(&slpc->boost_work, slpc_boost_work);
265 
266 	return err;
267 }
268 
269 static const char *slpc_global_state_to_string(enum slpc_global_state state)
270 {
271 	switch (state) {
272 	case SLPC_GLOBAL_STATE_NOT_RUNNING:
273 		return "not running";
274 	case SLPC_GLOBAL_STATE_INITIALIZING:
275 		return "initializing";
276 	case SLPC_GLOBAL_STATE_RESETTING:
277 		return "resetting";
278 	case SLPC_GLOBAL_STATE_RUNNING:
279 		return "running";
280 	case SLPC_GLOBAL_STATE_SHUTTING_DOWN:
281 		return "shutting down";
282 	case SLPC_GLOBAL_STATE_ERROR:
283 		return "error";
284 	default:
285 		return "unknown";
286 	}
287 }
288 
289 static const char *slpc_get_state_string(struct intel_guc_slpc *slpc)
290 {
291 	return slpc_global_state_to_string(slpc_get_state(slpc));
292 }
293 
294 static int guc_action_slpc_reset(struct intel_guc *guc, u32 offset)
295 {
296 	u32 request[] = {
297 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
298 		SLPC_EVENT(SLPC_EVENT_RESET, 2),
299 		offset,
300 		0,
301 	};
302 	int ret;
303 
304 	ret = intel_guc_send(guc, request, ARRAY_SIZE(request));
305 
306 	return ret > 0 ? -EPROTO : ret;
307 }
308 
309 static int slpc_reset(struct intel_guc_slpc *slpc)
310 {
311 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
312 	struct intel_guc *guc = slpc_to_guc(slpc);
313 	u32 offset = intel_guc_ggtt_offset(guc, slpc->vma);
314 	int ret;
315 
316 	ret = guc_action_slpc_reset(guc, offset);
317 
318 	if (unlikely(ret < 0)) {
319 		drm_err(&i915->drm, "SLPC reset action failed (%pe)\n",
320 			ERR_PTR(ret));
321 		return ret;
322 	}
323 
324 	if (!ret) {
325 		if (wait_for(slpc_is_running(slpc), SLPC_RESET_TIMEOUT_MS)) {
326 			drm_err(&i915->drm, "SLPC not enabled! State = %s\n",
327 				slpc_get_state_string(slpc));
328 			return -EIO;
329 		}
330 	}
331 
332 	return 0;
333 }
334 
335 static u32 slpc_decode_min_freq(struct intel_guc_slpc *slpc)
336 {
337 	struct slpc_shared_data *data = slpc->vaddr;
338 
339 	GEM_BUG_ON(!slpc->vma);
340 
341 	return	DIV_ROUND_CLOSEST(REG_FIELD_GET(SLPC_MIN_UNSLICE_FREQ_MASK,
342 				  data->task_state_data.freq) *
343 				  GT_FREQUENCY_MULTIPLIER, GEN9_FREQ_SCALER);
344 }
345 
346 static u32 slpc_decode_max_freq(struct intel_guc_slpc *slpc)
347 {
348 	struct slpc_shared_data *data = slpc->vaddr;
349 
350 	GEM_BUG_ON(!slpc->vma);
351 
352 	return	DIV_ROUND_CLOSEST(REG_FIELD_GET(SLPC_MAX_UNSLICE_FREQ_MASK,
353 				  data->task_state_data.freq) *
354 				  GT_FREQUENCY_MULTIPLIER, GEN9_FREQ_SCALER);
355 }
356 
357 static void slpc_shared_data_reset(struct slpc_shared_data *data)
358 {
359 	memset(data, 0, sizeof(struct slpc_shared_data));
360 
361 	data->header.size = sizeof(struct slpc_shared_data);
362 
363 	/* Enable only GTPERF task, disable others */
364 	slpc_mem_set_enabled(data, SLPC_PARAM_TASK_ENABLE_GTPERF,
365 			     SLPC_PARAM_TASK_DISABLE_GTPERF);
366 
367 	slpc_mem_set_disabled(data, SLPC_PARAM_TASK_ENABLE_BALANCER,
368 			      SLPC_PARAM_TASK_DISABLE_BALANCER);
369 
370 	slpc_mem_set_disabled(data, SLPC_PARAM_TASK_ENABLE_DCC,
371 			      SLPC_PARAM_TASK_DISABLE_DCC);
372 }
373 
374 /**
375  * intel_guc_slpc_set_max_freq() - Set max frequency limit for SLPC.
376  * @slpc: pointer to intel_guc_slpc.
377  * @val: frequency (MHz)
378  *
379  * This function will invoke GuC SLPC action to update the max frequency
380  * limit for unslice.
381  *
382  * Return: 0 on success, non-zero error code on failure.
383  */
384 int intel_guc_slpc_set_max_freq(struct intel_guc_slpc *slpc, u32 val)
385 {
386 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
387 	intel_wakeref_t wakeref;
388 	int ret;
389 
390 	if (val < slpc->min_freq ||
391 	    val > slpc->rp0_freq ||
392 	    val < slpc->min_freq_softlimit)
393 		return -EINVAL;
394 
395 	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
396 		ret = slpc_set_param(slpc,
397 				     SLPC_PARAM_GLOBAL_MAX_GT_UNSLICE_FREQ_MHZ,
398 				     val);
399 
400 		/* Return standardized err code for sysfs calls */
401 		if (ret)
402 			ret = -EIO;
403 	}
404 
405 	if (!ret)
406 		slpc->max_freq_softlimit = val;
407 
408 	return ret;
409 }
410 
411 /**
412  * intel_guc_slpc_get_max_freq() - Get max frequency limit for SLPC.
413  * @slpc: pointer to intel_guc_slpc.
414  * @val: pointer to val which will hold max frequency (MHz)
415  *
416  * This function will invoke GuC SLPC action to read the max frequency
417  * limit for unslice.
418  *
419  * Return: 0 on success, non-zero error code on failure.
420  */
421 int intel_guc_slpc_get_max_freq(struct intel_guc_slpc *slpc, u32 *val)
422 {
423 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
424 	intel_wakeref_t wakeref;
425 	int ret = 0;
426 
427 	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
428 		/* Force GuC to update task data */
429 		ret = slpc_query_task_state(slpc);
430 
431 		if (!ret)
432 			*val = slpc_decode_max_freq(slpc);
433 	}
434 
435 	return ret;
436 }
437 
438 /**
439  * intel_guc_slpc_set_min_freq() - Set min frequency limit for SLPC.
440  * @slpc: pointer to intel_guc_slpc.
441  * @val: frequency (MHz)
442  *
443  * This function will invoke GuC SLPC action to update the min unslice
444  * frequency.
445  *
446  * Return: 0 on success, non-zero error code on failure.
447  */
448 int intel_guc_slpc_set_min_freq(struct intel_guc_slpc *slpc, u32 val)
449 {
450 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
451 	intel_wakeref_t wakeref;
452 	int ret;
453 
454 	if (val < slpc->min_freq ||
455 	    val > slpc->rp0_freq ||
456 	    val > slpc->max_freq_softlimit)
457 		return -EINVAL;
458 
459 	/* Need a lock now since waitboost can be modifying min as well */
460 	mutex_lock(&slpc->lock);
461 
462 	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
463 
464 		ret = slpc_set_param(slpc,
465 				     SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
466 				     val);
467 
468 		/* Return standardized err code for sysfs calls */
469 		if (ret)
470 			ret = -EIO;
471 	}
472 
473 	if (!ret)
474 		slpc->min_freq_softlimit = val;
475 
476 	mutex_unlock(&slpc->lock);
477 
478 	return ret;
479 }
480 
481 /**
482  * intel_guc_slpc_get_min_freq() - Get min frequency limit for SLPC.
483  * @slpc: pointer to intel_guc_slpc.
484  * @val: pointer to val which will hold min frequency (MHz)
485  *
486  * This function will invoke GuC SLPC action to read the min frequency
487  * limit for unslice.
488  *
489  * Return: 0 on success, non-zero error code on failure.
490  */
491 int intel_guc_slpc_get_min_freq(struct intel_guc_slpc *slpc, u32 *val)
492 {
493 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
494 	intel_wakeref_t wakeref;
495 	int ret = 0;
496 
497 	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
498 		/* Force GuC to update task data */
499 		ret = slpc_query_task_state(slpc);
500 
501 		if (!ret)
502 			*val = slpc_decode_min_freq(slpc);
503 	}
504 
505 	return ret;
506 }
507 
508 void intel_guc_pm_intrmsk_enable(struct intel_gt *gt)
509 {
510 	u32 pm_intrmsk_mbz = 0;
511 
512 	/*
513 	 * Allow GuC to receive ARAT timer expiry event.
514 	 * This interrupt register is setup by RPS code
515 	 * when host based Turbo is enabled.
516 	 */
517 	pm_intrmsk_mbz |= ARAT_EXPIRED_INTRMSK;
518 
519 	intel_uncore_rmw(gt->uncore,
520 			 GEN6_PMINTRMSK, pm_intrmsk_mbz, 0);
521 }
522 
523 static int slpc_set_softlimits(struct intel_guc_slpc *slpc)
524 {
525 	int ret = 0;
526 
527 	/*
528 	 * Softlimits are initially equivalent to platform limits
529 	 * unless they have deviated from defaults, in which case,
530 	 * we retain the values and set min/max accordingly.
531 	 */
532 	if (!slpc->max_freq_softlimit)
533 		slpc->max_freq_softlimit = slpc->rp0_freq;
534 	else if (slpc->max_freq_softlimit != slpc->rp0_freq)
535 		ret = intel_guc_slpc_set_max_freq(slpc,
536 						  slpc->max_freq_softlimit);
537 
538 	if (unlikely(ret))
539 		return ret;
540 
541 	if (!slpc->min_freq_softlimit)
542 		slpc->min_freq_softlimit = slpc->min_freq;
543 	else if (slpc->min_freq_softlimit != slpc->min_freq)
544 		return intel_guc_slpc_set_min_freq(slpc,
545 						   slpc->min_freq_softlimit);
546 
547 	return 0;
548 }
549 
550 static int slpc_ignore_eff_freq(struct intel_guc_slpc *slpc, bool ignore)
551 {
552 	int ret = 0;
553 
554 	if (ignore) {
555 		ret = slpc_set_param(slpc,
556 				     SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY,
557 				     ignore);
558 		if (!ret)
559 			return slpc_set_param(slpc,
560 					      SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
561 					      slpc->min_freq);
562 	} else {
563 		ret = slpc_unset_param(slpc,
564 				       SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY);
565 		if (!ret)
566 			return slpc_unset_param(slpc,
567 						SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ);
568 	}
569 
570 	return ret;
571 }
572 
573 static int slpc_use_fused_rp0(struct intel_guc_slpc *slpc)
574 {
575 	/* Force SLPC to used platform rp0 */
576 	return slpc_set_param(slpc,
577 			      SLPC_PARAM_GLOBAL_MAX_GT_UNSLICE_FREQ_MHZ,
578 			      slpc->rp0_freq);
579 }
580 
581 static void slpc_get_rp_values(struct intel_guc_slpc *slpc)
582 {
583 	struct intel_rps *rps = &slpc_to_gt(slpc)->rps;
584 	u32 rp_state_cap;
585 
586 	rp_state_cap = intel_rps_read_state_cap(rps);
587 
588 	slpc->rp0_freq = REG_FIELD_GET(RP0_CAP_MASK, rp_state_cap) *
589 					GT_FREQUENCY_MULTIPLIER;
590 	slpc->rp1_freq = REG_FIELD_GET(RP1_CAP_MASK, rp_state_cap) *
591 					GT_FREQUENCY_MULTIPLIER;
592 	slpc->min_freq = REG_FIELD_GET(RPN_CAP_MASK, rp_state_cap) *
593 					GT_FREQUENCY_MULTIPLIER;
594 
595 	if (!slpc->boost_freq)
596 		slpc->boost_freq = slpc->rp0_freq;
597 }
598 
599 /*
600  * intel_guc_slpc_enable() - Start SLPC
601  * @slpc: pointer to intel_guc_slpc.
602  *
603  * SLPC is enabled by setting up the shared data structure and
604  * sending reset event to GuC SLPC. Initial data is setup in
605  * intel_guc_slpc_init. Here we send the reset event. We do
606  * not currently need a slpc_disable since this is taken care
607  * of automatically when a reset/suspend occurs and the GuC
608  * CTB is destroyed.
609  *
610  * Return: 0 on success, non-zero error code on failure.
611  */
612 int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
613 {
614 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
615 	int ret;
616 
617 	GEM_BUG_ON(!slpc->vma);
618 
619 	slpc_shared_data_reset(slpc->vaddr);
620 
621 	ret = slpc_reset(slpc);
622 	if (unlikely(ret < 0)) {
623 		drm_err(&i915->drm, "SLPC Reset event returned (%pe)\n",
624 			ERR_PTR(ret));
625 		return ret;
626 	}
627 
628 	ret = slpc_query_task_state(slpc);
629 	if (unlikely(ret < 0))
630 		return ret;
631 
632 	intel_guc_pm_intrmsk_enable(to_gt(i915));
633 
634 	slpc_get_rp_values(slpc);
635 
636 	/* Ignore efficient freq and set min to platform min */
637 	ret = slpc_ignore_eff_freq(slpc, true);
638 	if (unlikely(ret)) {
639 		drm_err(&i915->drm, "Failed to set SLPC min to RPn (%pe)\n",
640 			ERR_PTR(ret));
641 		return ret;
642 	}
643 
644 	/* Set SLPC max limit to RP0 */
645 	ret = slpc_use_fused_rp0(slpc);
646 	if (unlikely(ret)) {
647 		drm_err(&i915->drm, "Failed to set SLPC max to RP0 (%pe)\n",
648 			ERR_PTR(ret));
649 		return ret;
650 	}
651 
652 	/* Revert SLPC min/max to softlimits if necessary */
653 	ret = slpc_set_softlimits(slpc);
654 	if (unlikely(ret)) {
655 		drm_err(&i915->drm, "Failed to set SLPC softlimits (%pe)\n",
656 			ERR_PTR(ret));
657 		return ret;
658 	}
659 
660 	return 0;
661 }
662 
663 int intel_guc_slpc_set_boost_freq(struct intel_guc_slpc *slpc, u32 val)
664 {
665 	int ret = 0;
666 
667 	if (val < slpc->min_freq || val > slpc->rp0_freq)
668 		return -EINVAL;
669 
670 	mutex_lock(&slpc->lock);
671 
672 	if (slpc->boost_freq != val) {
673 		/* Apply only if there are active waiters */
674 		if (atomic_read(&slpc->num_waiters)) {
675 			ret = slpc_force_min_freq(slpc, val);
676 			if (ret) {
677 				ret = -EIO;
678 				goto done;
679 			}
680 		}
681 
682 		slpc->boost_freq = val;
683 	}
684 
685 done:
686 	mutex_unlock(&slpc->lock);
687 	return ret;
688 }
689 
690 void intel_guc_slpc_dec_waiters(struct intel_guc_slpc *slpc)
691 {
692 	/*
693 	 * Return min back to the softlimit.
694 	 * This is called during request retire,
695 	 * so we don't need to fail that if the
696 	 * set_param fails.
697 	 */
698 	mutex_lock(&slpc->lock);
699 	if (atomic_dec_and_test(&slpc->num_waiters))
700 		slpc_force_min_freq(slpc, slpc->min_freq_softlimit);
701 	mutex_unlock(&slpc->lock);
702 }
703 
704 int intel_guc_slpc_print_info(struct intel_guc_slpc *slpc, struct drm_printer *p)
705 {
706 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
707 	struct slpc_shared_data *data = slpc->vaddr;
708 	struct slpc_task_state_data *slpc_tasks;
709 	intel_wakeref_t wakeref;
710 	int ret = 0;
711 
712 	GEM_BUG_ON(!slpc->vma);
713 
714 	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
715 		ret = slpc_query_task_state(slpc);
716 
717 		if (!ret) {
718 			slpc_tasks = &data->task_state_data;
719 
720 			drm_printf(p, "\tSLPC state: %s\n", slpc_get_state_string(slpc));
721 			drm_printf(p, "\tGTPERF task active: %s\n",
722 				   yesno(slpc_tasks->status & SLPC_GTPERF_TASK_ENABLED));
723 			drm_printf(p, "\tMax freq: %u MHz\n",
724 				   slpc_decode_max_freq(slpc));
725 			drm_printf(p, "\tMin freq: %u MHz\n",
726 				   slpc_decode_min_freq(slpc));
727 			drm_printf(p, "\twaitboosts: %u\n",
728 				   slpc->num_boosts);
729 		}
730 	}
731 
732 	return ret;
733 }
734 
735 void intel_guc_slpc_fini(struct intel_guc_slpc *slpc)
736 {
737 	if (!slpc->vma)
738 		return;
739 
740 	i915_vma_unpin_and_release(&slpc->vma, I915_VMA_RELEASE_MAP);
741 }
742