1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include <drm/drm_cache.h>
7 
8 #include "i915_drv.h"
9 #include "i915_reg.h"
10 #include "intel_guc_slpc.h"
11 #include "intel_mchbar_regs.h"
12 #include "gt/intel_gt.h"
13 #include "gt/intel_gt_regs.h"
14 #include "gt/intel_rps.h"
15 
16 static inline struct intel_guc *slpc_to_guc(struct intel_guc_slpc *slpc)
17 {
18 	return container_of(slpc, struct intel_guc, slpc);
19 }
20 
21 static inline struct intel_gt *slpc_to_gt(struct intel_guc_slpc *slpc)
22 {
23 	return guc_to_gt(slpc_to_guc(slpc));
24 }
25 
26 static inline struct drm_i915_private *slpc_to_i915(struct intel_guc_slpc *slpc)
27 {
28 	return slpc_to_gt(slpc)->i915;
29 }
30 
31 static bool __detect_slpc_supported(struct intel_guc *guc)
32 {
33 	/* GuC SLPC is unavailable for pre-Gen12 */
34 	return guc->submission_supported &&
35 		GRAPHICS_VER(guc_to_gt(guc)->i915) >= 12;
36 }
37 
38 static bool __guc_slpc_selected(struct intel_guc *guc)
39 {
40 	if (!intel_guc_slpc_is_supported(guc))
41 		return false;
42 
43 	return guc->submission_selected;
44 }
45 
46 void intel_guc_slpc_init_early(struct intel_guc_slpc *slpc)
47 {
48 	struct intel_guc *guc = slpc_to_guc(slpc);
49 
50 	slpc->supported = __detect_slpc_supported(guc);
51 	slpc->selected = __guc_slpc_selected(guc);
52 }
53 
54 static void slpc_mem_set_param(struct slpc_shared_data *data,
55 			       u32 id, u32 value)
56 {
57 	GEM_BUG_ON(id >= SLPC_MAX_OVERRIDE_PARAMETERS);
58 	/*
59 	 * When the flag bit is set, corresponding value will be read
60 	 * and applied by SLPC.
61 	 */
62 	data->override_params.bits[id >> 5] |= (1 << (id % 32));
63 	data->override_params.values[id] = value;
64 }
65 
66 static void slpc_mem_set_enabled(struct slpc_shared_data *data,
67 				 u8 enable_id, u8 disable_id)
68 {
69 	/*
70 	 * Enabling a param involves setting the enable_id
71 	 * to 1 and disable_id to 0.
72 	 */
73 	slpc_mem_set_param(data, enable_id, 1);
74 	slpc_mem_set_param(data, disable_id, 0);
75 }
76 
77 static void slpc_mem_set_disabled(struct slpc_shared_data *data,
78 				  u8 enable_id, u8 disable_id)
79 {
80 	/*
81 	 * Disabling a param involves setting the enable_id
82 	 * to 0 and disable_id to 1.
83 	 */
84 	slpc_mem_set_param(data, disable_id, 1);
85 	slpc_mem_set_param(data, enable_id, 0);
86 }
87 
88 static u32 slpc_get_state(struct intel_guc_slpc *slpc)
89 {
90 	struct slpc_shared_data *data;
91 
92 	GEM_BUG_ON(!slpc->vma);
93 
94 	drm_clflush_virt_range(slpc->vaddr, sizeof(u32));
95 	data = slpc->vaddr;
96 
97 	return data->header.global_state;
98 }
99 
100 static int guc_action_slpc_set_param(struct intel_guc *guc, u8 id, u32 value)
101 {
102 	u32 request[] = {
103 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
104 		SLPC_EVENT(SLPC_EVENT_PARAMETER_SET, 2),
105 		id,
106 		value,
107 	};
108 	int ret;
109 
110 	ret = intel_guc_send(guc, request, ARRAY_SIZE(request));
111 
112 	return ret > 0 ? -EPROTO : ret;
113 }
114 
115 static int guc_action_slpc_unset_param(struct intel_guc *guc, u8 id)
116 {
117 	u32 request[] = {
118 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
119 		SLPC_EVENT(SLPC_EVENT_PARAMETER_UNSET, 1),
120 		id,
121 	};
122 
123 	return intel_guc_send(guc, request, ARRAY_SIZE(request));
124 }
125 
126 static bool slpc_is_running(struct intel_guc_slpc *slpc)
127 {
128 	return slpc_get_state(slpc) == SLPC_GLOBAL_STATE_RUNNING;
129 }
130 
131 static int guc_action_slpc_query(struct intel_guc *guc, u32 offset)
132 {
133 	u32 request[] = {
134 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
135 		SLPC_EVENT(SLPC_EVENT_QUERY_TASK_STATE, 2),
136 		offset,
137 		0,
138 	};
139 	int ret;
140 
141 	ret = intel_guc_send(guc, request, ARRAY_SIZE(request));
142 
143 	return ret > 0 ? -EPROTO : ret;
144 }
145 
146 static int slpc_query_task_state(struct intel_guc_slpc *slpc)
147 {
148 	struct intel_guc *guc = slpc_to_guc(slpc);
149 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
150 	u32 offset = intel_guc_ggtt_offset(guc, slpc->vma);
151 	int ret;
152 
153 	ret = guc_action_slpc_query(guc, offset);
154 	if (unlikely(ret))
155 		i915_probe_error(i915, "Failed to query task state (%pe)\n",
156 				 ERR_PTR(ret));
157 
158 	drm_clflush_virt_range(slpc->vaddr, SLPC_PAGE_SIZE_BYTES);
159 
160 	return ret;
161 }
162 
163 static int slpc_set_param(struct intel_guc_slpc *slpc, u8 id, u32 value)
164 {
165 	struct intel_guc *guc = slpc_to_guc(slpc);
166 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
167 	int ret;
168 
169 	GEM_BUG_ON(id >= SLPC_MAX_PARAM);
170 
171 	ret = guc_action_slpc_set_param(guc, id, value);
172 	if (ret)
173 		i915_probe_error(i915, "Failed to set param %d to %u (%pe)\n",
174 				 id, value, ERR_PTR(ret));
175 
176 	return ret;
177 }
178 
179 static int slpc_unset_param(struct intel_guc_slpc *slpc,
180 			    u8 id)
181 {
182 	struct intel_guc *guc = slpc_to_guc(slpc);
183 
184 	GEM_BUG_ON(id >= SLPC_MAX_PARAM);
185 
186 	return guc_action_slpc_unset_param(guc, id);
187 }
188 
189 static int slpc_force_min_freq(struct intel_guc_slpc *slpc, u32 freq)
190 {
191 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
192 	struct intel_guc *guc = slpc_to_guc(slpc);
193 	intel_wakeref_t wakeref;
194 	int ret = 0;
195 
196 	lockdep_assert_held(&slpc->lock);
197 
198 	if (!intel_guc_is_ready(guc))
199 		return -ENODEV;
200 
201 	/*
202 	 * This function is a little different as compared to
203 	 * intel_guc_slpc_set_min_freq(). Softlimit will not be updated
204 	 * here since this is used to temporarily change min freq,
205 	 * for example, during a waitboost. Caller is responsible for
206 	 * checking bounds.
207 	 */
208 
209 	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
210 		ret = slpc_set_param(slpc,
211 				     SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
212 				     freq);
213 		if (ret)
214 			i915_probe_error(i915, "Unable to force min freq to %u: %d",
215 					 freq, ret);
216 	}
217 
218 	return ret;
219 }
220 
221 static void slpc_boost_work(struct work_struct *work)
222 {
223 	struct intel_guc_slpc *slpc = container_of(work, typeof(*slpc), boost_work);
224 
225 	/*
226 	 * Raise min freq to boost. It's possible that
227 	 * this is greater than current max. But it will
228 	 * certainly be limited by RP0. An error setting
229 	 * the min param is not fatal.
230 	 */
231 	mutex_lock(&slpc->lock);
232 	if (atomic_read(&slpc->num_waiters)) {
233 		slpc_force_min_freq(slpc, slpc->boost_freq);
234 		slpc->num_boosts++;
235 	}
236 	mutex_unlock(&slpc->lock);
237 }
238 
239 int intel_guc_slpc_init(struct intel_guc_slpc *slpc)
240 {
241 	struct intel_guc *guc = slpc_to_guc(slpc);
242 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
243 	u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
244 	int err;
245 
246 	GEM_BUG_ON(slpc->vma);
247 
248 	err = intel_guc_allocate_and_map_vma(guc, size, &slpc->vma, (void **)&slpc->vaddr);
249 	if (unlikely(err)) {
250 		i915_probe_error(i915,
251 				 "Failed to allocate SLPC struct (err=%pe)\n",
252 				 ERR_PTR(err));
253 		return err;
254 	}
255 
256 	slpc->max_freq_softlimit = 0;
257 	slpc->min_freq_softlimit = 0;
258 
259 	slpc->boost_freq = 0;
260 	atomic_set(&slpc->num_waiters, 0);
261 	slpc->num_boosts = 0;
262 
263 	mutex_init(&slpc->lock);
264 	INIT_WORK(&slpc->boost_work, slpc_boost_work);
265 
266 	return err;
267 }
268 
269 static const char *slpc_global_state_to_string(enum slpc_global_state state)
270 {
271 	switch (state) {
272 	case SLPC_GLOBAL_STATE_NOT_RUNNING:
273 		return "not running";
274 	case SLPC_GLOBAL_STATE_INITIALIZING:
275 		return "initializing";
276 	case SLPC_GLOBAL_STATE_RESETTING:
277 		return "resetting";
278 	case SLPC_GLOBAL_STATE_RUNNING:
279 		return "running";
280 	case SLPC_GLOBAL_STATE_SHUTTING_DOWN:
281 		return "shutting down";
282 	case SLPC_GLOBAL_STATE_ERROR:
283 		return "error";
284 	default:
285 		return "unknown";
286 	}
287 }
288 
289 static const char *slpc_get_state_string(struct intel_guc_slpc *slpc)
290 {
291 	return slpc_global_state_to_string(slpc_get_state(slpc));
292 }
293 
294 static int guc_action_slpc_reset(struct intel_guc *guc, u32 offset)
295 {
296 	u32 request[] = {
297 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
298 		SLPC_EVENT(SLPC_EVENT_RESET, 2),
299 		offset,
300 		0,
301 	};
302 	int ret;
303 
304 	ret = intel_guc_send(guc, request, ARRAY_SIZE(request));
305 
306 	return ret > 0 ? -EPROTO : ret;
307 }
308 
309 static int slpc_reset(struct intel_guc_slpc *slpc)
310 {
311 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
312 	struct intel_guc *guc = slpc_to_guc(slpc);
313 	u32 offset = intel_guc_ggtt_offset(guc, slpc->vma);
314 	int ret;
315 
316 	ret = guc_action_slpc_reset(guc, offset);
317 
318 	if (unlikely(ret < 0)) {
319 		i915_probe_error(i915, "SLPC reset action failed (%pe)\n",
320 				 ERR_PTR(ret));
321 		return ret;
322 	}
323 
324 	if (!ret) {
325 		if (wait_for(slpc_is_running(slpc), SLPC_RESET_TIMEOUT_MS)) {
326 			i915_probe_error(i915, "SLPC not enabled! State = %s\n",
327 					 slpc_get_state_string(slpc));
328 			return -EIO;
329 		}
330 	}
331 
332 	return 0;
333 }
334 
335 static u32 slpc_decode_min_freq(struct intel_guc_slpc *slpc)
336 {
337 	struct slpc_shared_data *data = slpc->vaddr;
338 
339 	GEM_BUG_ON(!slpc->vma);
340 
341 	return	DIV_ROUND_CLOSEST(REG_FIELD_GET(SLPC_MIN_UNSLICE_FREQ_MASK,
342 				  data->task_state_data.freq) *
343 				  GT_FREQUENCY_MULTIPLIER, GEN9_FREQ_SCALER);
344 }
345 
346 static u32 slpc_decode_max_freq(struct intel_guc_slpc *slpc)
347 {
348 	struct slpc_shared_data *data = slpc->vaddr;
349 
350 	GEM_BUG_ON(!slpc->vma);
351 
352 	return	DIV_ROUND_CLOSEST(REG_FIELD_GET(SLPC_MAX_UNSLICE_FREQ_MASK,
353 				  data->task_state_data.freq) *
354 				  GT_FREQUENCY_MULTIPLIER, GEN9_FREQ_SCALER);
355 }
356 
357 static void slpc_shared_data_reset(struct slpc_shared_data *data)
358 {
359 	memset(data, 0, sizeof(struct slpc_shared_data));
360 
361 	data->header.size = sizeof(struct slpc_shared_data);
362 
363 	/* Enable only GTPERF task, disable others */
364 	slpc_mem_set_enabled(data, SLPC_PARAM_TASK_ENABLE_GTPERF,
365 			     SLPC_PARAM_TASK_DISABLE_GTPERF);
366 
367 	slpc_mem_set_disabled(data, SLPC_PARAM_TASK_ENABLE_BALANCER,
368 			      SLPC_PARAM_TASK_DISABLE_BALANCER);
369 
370 	slpc_mem_set_disabled(data, SLPC_PARAM_TASK_ENABLE_DCC,
371 			      SLPC_PARAM_TASK_DISABLE_DCC);
372 }
373 
374 /**
375  * intel_guc_slpc_set_max_freq() - Set max frequency limit for SLPC.
376  * @slpc: pointer to intel_guc_slpc.
377  * @val: frequency (MHz)
378  *
379  * This function will invoke GuC SLPC action to update the max frequency
380  * limit for unslice.
381  *
382  * Return: 0 on success, non-zero error code on failure.
383  */
384 int intel_guc_slpc_set_max_freq(struct intel_guc_slpc *slpc, u32 val)
385 {
386 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
387 	intel_wakeref_t wakeref;
388 	int ret;
389 
390 	if (val < slpc->min_freq ||
391 	    val > slpc->rp0_freq ||
392 	    val < slpc->min_freq_softlimit)
393 		return -EINVAL;
394 
395 	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
396 		ret = slpc_set_param(slpc,
397 				     SLPC_PARAM_GLOBAL_MAX_GT_UNSLICE_FREQ_MHZ,
398 				     val);
399 
400 		/* Return standardized err code for sysfs calls */
401 		if (ret)
402 			ret = -EIO;
403 	}
404 
405 	if (!ret)
406 		slpc->max_freq_softlimit = val;
407 
408 	return ret;
409 }
410 
411 /**
412  * intel_guc_slpc_get_max_freq() - Get max frequency limit for SLPC.
413  * @slpc: pointer to intel_guc_slpc.
414  * @val: pointer to val which will hold max frequency (MHz)
415  *
416  * This function will invoke GuC SLPC action to read the max frequency
417  * limit for unslice.
418  *
419  * Return: 0 on success, non-zero error code on failure.
420  */
421 int intel_guc_slpc_get_max_freq(struct intel_guc_slpc *slpc, u32 *val)
422 {
423 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
424 	intel_wakeref_t wakeref;
425 	int ret = 0;
426 
427 	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
428 		/* Force GuC to update task data */
429 		ret = slpc_query_task_state(slpc);
430 
431 		if (!ret)
432 			*val = slpc_decode_max_freq(slpc);
433 	}
434 
435 	return ret;
436 }
437 
438 /**
439  * intel_guc_slpc_set_min_freq() - Set min frequency limit for SLPC.
440  * @slpc: pointer to intel_guc_slpc.
441  * @val: frequency (MHz)
442  *
443  * This function will invoke GuC SLPC action to update the min unslice
444  * frequency.
445  *
446  * Return: 0 on success, non-zero error code on failure.
447  */
448 int intel_guc_slpc_set_min_freq(struct intel_guc_slpc *slpc, u32 val)
449 {
450 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
451 	intel_wakeref_t wakeref;
452 	int ret;
453 
454 	if (val < slpc->min_freq ||
455 	    val > slpc->rp0_freq ||
456 	    val > slpc->max_freq_softlimit)
457 		return -EINVAL;
458 
459 	/* Need a lock now since waitboost can be modifying min as well */
460 	mutex_lock(&slpc->lock);
461 
462 	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
463 
464 		ret = slpc_set_param(slpc,
465 				     SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
466 				     val);
467 
468 		/* Return standardized err code for sysfs calls */
469 		if (ret)
470 			ret = -EIO;
471 	}
472 
473 	if (!ret)
474 		slpc->min_freq_softlimit = val;
475 
476 	mutex_unlock(&slpc->lock);
477 
478 	return ret;
479 }
480 
481 /**
482  * intel_guc_slpc_get_min_freq() - Get min frequency limit for SLPC.
483  * @slpc: pointer to intel_guc_slpc.
484  * @val: pointer to val which will hold min frequency (MHz)
485  *
486  * This function will invoke GuC SLPC action to read the min frequency
487  * limit for unslice.
488  *
489  * Return: 0 on success, non-zero error code on failure.
490  */
491 int intel_guc_slpc_get_min_freq(struct intel_guc_slpc *slpc, u32 *val)
492 {
493 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
494 	intel_wakeref_t wakeref;
495 	int ret = 0;
496 
497 	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
498 		/* Force GuC to update task data */
499 		ret = slpc_query_task_state(slpc);
500 
501 		if (!ret)
502 			*val = slpc_decode_min_freq(slpc);
503 	}
504 
505 	return ret;
506 }
507 
508 void intel_guc_pm_intrmsk_enable(struct intel_gt *gt)
509 {
510 	u32 pm_intrmsk_mbz = 0;
511 
512 	/*
513 	 * Allow GuC to receive ARAT timer expiry event.
514 	 * This interrupt register is setup by RPS code
515 	 * when host based Turbo is enabled.
516 	 */
517 	pm_intrmsk_mbz |= ARAT_EXPIRED_INTRMSK;
518 
519 	intel_uncore_rmw(gt->uncore,
520 			 GEN6_PMINTRMSK, pm_intrmsk_mbz, 0);
521 }
522 
523 static int slpc_set_softlimits(struct intel_guc_slpc *slpc)
524 {
525 	int ret = 0;
526 
527 	/*
528 	 * Softlimits are initially equivalent to platform limits
529 	 * unless they have deviated from defaults, in which case,
530 	 * we retain the values and set min/max accordingly.
531 	 */
532 	if (!slpc->max_freq_softlimit)
533 		slpc->max_freq_softlimit = slpc->rp0_freq;
534 	else if (slpc->max_freq_softlimit != slpc->rp0_freq)
535 		ret = intel_guc_slpc_set_max_freq(slpc,
536 						  slpc->max_freq_softlimit);
537 
538 	if (unlikely(ret))
539 		return ret;
540 
541 	if (!slpc->min_freq_softlimit)
542 		slpc->min_freq_softlimit = slpc->min_freq;
543 	else if (slpc->min_freq_softlimit != slpc->min_freq)
544 		return intel_guc_slpc_set_min_freq(slpc,
545 						   slpc->min_freq_softlimit);
546 
547 	return 0;
548 }
549 
550 static int slpc_ignore_eff_freq(struct intel_guc_slpc *slpc, bool ignore)
551 {
552 	int ret = 0;
553 
554 	if (ignore) {
555 		ret = slpc_set_param(slpc,
556 				     SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY,
557 				     ignore);
558 		if (!ret)
559 			return slpc_set_param(slpc,
560 					      SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
561 					      slpc->min_freq);
562 	} else {
563 		ret = slpc_unset_param(slpc,
564 				       SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY);
565 		if (!ret)
566 			return slpc_unset_param(slpc,
567 						SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ);
568 	}
569 
570 	return ret;
571 }
572 
573 static int slpc_use_fused_rp0(struct intel_guc_slpc *slpc)
574 {
575 	/* Force SLPC to used platform rp0 */
576 	return slpc_set_param(slpc,
577 			      SLPC_PARAM_GLOBAL_MAX_GT_UNSLICE_FREQ_MHZ,
578 			      slpc->rp0_freq);
579 }
580 
581 static void slpc_get_rp_values(struct intel_guc_slpc *slpc)
582 {
583 	struct intel_rps *rps = &slpc_to_gt(slpc)->rps;
584 	struct intel_rps_freq_caps caps;
585 
586 	gen6_rps_get_freq_caps(rps, &caps);
587 	slpc->rp0_freq = intel_gpu_freq(rps, caps.rp0_freq);
588 	slpc->rp1_freq = intel_gpu_freq(rps, caps.rp1_freq);
589 	slpc->min_freq = intel_gpu_freq(rps, caps.min_freq);
590 
591 	if (!slpc->boost_freq)
592 		slpc->boost_freq = slpc->rp0_freq;
593 }
594 
595 /*
596  * intel_guc_slpc_enable() - Start SLPC
597  * @slpc: pointer to intel_guc_slpc.
598  *
599  * SLPC is enabled by setting up the shared data structure and
600  * sending reset event to GuC SLPC. Initial data is setup in
601  * intel_guc_slpc_init. Here we send the reset event. We do
602  * not currently need a slpc_disable since this is taken care
603  * of automatically when a reset/suspend occurs and the GuC
604  * CTB is destroyed.
605  *
606  * Return: 0 on success, non-zero error code on failure.
607  */
608 int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
609 {
610 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
611 	int ret;
612 
613 	GEM_BUG_ON(!slpc->vma);
614 
615 	slpc_shared_data_reset(slpc->vaddr);
616 
617 	ret = slpc_reset(slpc);
618 	if (unlikely(ret < 0)) {
619 		i915_probe_error(i915, "SLPC Reset event returned (%pe)\n",
620 				 ERR_PTR(ret));
621 		return ret;
622 	}
623 
624 	ret = slpc_query_task_state(slpc);
625 	if (unlikely(ret < 0))
626 		return ret;
627 
628 	intel_guc_pm_intrmsk_enable(to_gt(i915));
629 
630 	slpc_get_rp_values(slpc);
631 
632 	/* Ignore efficient freq and set min to platform min */
633 	ret = slpc_ignore_eff_freq(slpc, true);
634 	if (unlikely(ret)) {
635 		i915_probe_error(i915, "Failed to set SLPC min to RPn (%pe)\n",
636 				 ERR_PTR(ret));
637 		return ret;
638 	}
639 
640 	/* Set SLPC max limit to RP0 */
641 	ret = slpc_use_fused_rp0(slpc);
642 	if (unlikely(ret)) {
643 		i915_probe_error(i915, "Failed to set SLPC max to RP0 (%pe)\n",
644 				 ERR_PTR(ret));
645 		return ret;
646 	}
647 
648 	/* Revert SLPC min/max to softlimits if necessary */
649 	ret = slpc_set_softlimits(slpc);
650 	if (unlikely(ret)) {
651 		i915_probe_error(i915, "Failed to set SLPC softlimits (%pe)\n",
652 				 ERR_PTR(ret));
653 		return ret;
654 	}
655 
656 	return 0;
657 }
658 
659 int intel_guc_slpc_set_boost_freq(struct intel_guc_slpc *slpc, u32 val)
660 {
661 	int ret = 0;
662 
663 	if (val < slpc->min_freq || val > slpc->rp0_freq)
664 		return -EINVAL;
665 
666 	mutex_lock(&slpc->lock);
667 
668 	if (slpc->boost_freq != val) {
669 		/* Apply only if there are active waiters */
670 		if (atomic_read(&slpc->num_waiters)) {
671 			ret = slpc_force_min_freq(slpc, val);
672 			if (ret) {
673 				ret = -EIO;
674 				goto done;
675 			}
676 		}
677 
678 		slpc->boost_freq = val;
679 	}
680 
681 done:
682 	mutex_unlock(&slpc->lock);
683 	return ret;
684 }
685 
686 void intel_guc_slpc_dec_waiters(struct intel_guc_slpc *slpc)
687 {
688 	/*
689 	 * Return min back to the softlimit.
690 	 * This is called during request retire,
691 	 * so we don't need to fail that if the
692 	 * set_param fails.
693 	 */
694 	mutex_lock(&slpc->lock);
695 	if (atomic_dec_and_test(&slpc->num_waiters))
696 		slpc_force_min_freq(slpc, slpc->min_freq_softlimit);
697 	mutex_unlock(&slpc->lock);
698 }
699 
700 int intel_guc_slpc_print_info(struct intel_guc_slpc *slpc, struct drm_printer *p)
701 {
702 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
703 	struct slpc_shared_data *data = slpc->vaddr;
704 	struct slpc_task_state_data *slpc_tasks;
705 	intel_wakeref_t wakeref;
706 	int ret = 0;
707 
708 	GEM_BUG_ON(!slpc->vma);
709 
710 	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
711 		ret = slpc_query_task_state(slpc);
712 
713 		if (!ret) {
714 			slpc_tasks = &data->task_state_data;
715 
716 			drm_printf(p, "\tSLPC state: %s\n", slpc_get_state_string(slpc));
717 			drm_printf(p, "\tGTPERF task active: %s\n",
718 				   yesno(slpc_tasks->status & SLPC_GTPERF_TASK_ENABLED));
719 			drm_printf(p, "\tMax freq: %u MHz\n",
720 				   slpc_decode_max_freq(slpc));
721 			drm_printf(p, "\tMin freq: %u MHz\n",
722 				   slpc_decode_min_freq(slpc));
723 			drm_printf(p, "\twaitboosts: %u\n",
724 				   slpc->num_boosts);
725 		}
726 	}
727 
728 	return ret;
729 }
730 
731 void intel_guc_slpc_fini(struct intel_guc_slpc *slpc)
732 {
733 	if (!slpc->vma)
734 		return;
735 
736 	i915_vma_unpin_and_release(&slpc->vma, I915_VMA_RELEASE_MAP);
737 }
738