1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include "i915_drv.h"
7 #include "intel_guc_slpc.h"
8 #include "gt/intel_gt.h"
9 #include "gt/intel_rps.h"
10 
11 static inline struct intel_guc *slpc_to_guc(struct intel_guc_slpc *slpc)
12 {
13 	return container_of(slpc, struct intel_guc, slpc);
14 }
15 
16 static inline struct intel_gt *slpc_to_gt(struct intel_guc_slpc *slpc)
17 {
18 	return guc_to_gt(slpc_to_guc(slpc));
19 }
20 
21 static inline struct drm_i915_private *slpc_to_i915(struct intel_guc_slpc *slpc)
22 {
23 	return slpc_to_gt(slpc)->i915;
24 }
25 
26 static bool __detect_slpc_supported(struct intel_guc *guc)
27 {
28 	/* GuC SLPC is unavailable for pre-Gen12 */
29 	return guc->submission_supported &&
30 		GRAPHICS_VER(guc_to_gt(guc)->i915) >= 12;
31 }
32 
33 static bool __guc_slpc_selected(struct intel_guc *guc)
34 {
35 	if (!intel_guc_slpc_is_supported(guc))
36 		return false;
37 
38 	return guc->submission_selected;
39 }
40 
41 void intel_guc_slpc_init_early(struct intel_guc_slpc *slpc)
42 {
43 	struct intel_guc *guc = slpc_to_guc(slpc);
44 
45 	slpc->supported = __detect_slpc_supported(guc);
46 	slpc->selected = __guc_slpc_selected(guc);
47 }
48 
49 static void slpc_mem_set_param(struct slpc_shared_data *data,
50 			       u32 id, u32 value)
51 {
52 	GEM_BUG_ON(id >= SLPC_MAX_OVERRIDE_PARAMETERS);
53 	/*
54 	 * When the flag bit is set, corresponding value will be read
55 	 * and applied by SLPC.
56 	 */
57 	data->override_params.bits[id >> 5] |= (1 << (id % 32));
58 	data->override_params.values[id] = value;
59 }
60 
61 static void slpc_mem_set_enabled(struct slpc_shared_data *data,
62 				 u8 enable_id, u8 disable_id)
63 {
64 	/*
65 	 * Enabling a param involves setting the enable_id
66 	 * to 1 and disable_id to 0.
67 	 */
68 	slpc_mem_set_param(data, enable_id, 1);
69 	slpc_mem_set_param(data, disable_id, 0);
70 }
71 
72 static void slpc_mem_set_disabled(struct slpc_shared_data *data,
73 				  u8 enable_id, u8 disable_id)
74 {
75 	/*
76 	 * Disabling a param involves setting the enable_id
77 	 * to 0 and disable_id to 1.
78 	 */
79 	slpc_mem_set_param(data, disable_id, 1);
80 	slpc_mem_set_param(data, enable_id, 0);
81 }
82 
83 static u32 slpc_get_state(struct intel_guc_slpc *slpc)
84 {
85 	struct slpc_shared_data *data;
86 
87 	GEM_BUG_ON(!slpc->vma);
88 
89 	drm_clflush_virt_range(slpc->vaddr, sizeof(u32));
90 	data = slpc->vaddr;
91 
92 	return data->header.global_state;
93 }
94 
95 static int guc_action_slpc_set_param(struct intel_guc *guc, u8 id, u32 value)
96 {
97 	u32 request[] = {
98 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
99 		SLPC_EVENT(SLPC_EVENT_PARAMETER_SET, 2),
100 		id,
101 		value,
102 	};
103 	int ret;
104 
105 	ret = intel_guc_send(guc, request, ARRAY_SIZE(request));
106 
107 	return ret > 0 ? -EPROTO : ret;
108 }
109 
110 static int guc_action_slpc_unset_param(struct intel_guc *guc, u8 id)
111 {
112 	u32 request[] = {
113 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
114 		SLPC_EVENT(SLPC_EVENT_PARAMETER_UNSET, 1),
115 		id,
116 	};
117 
118 	return intel_guc_send(guc, request, ARRAY_SIZE(request));
119 }
120 
121 static bool slpc_is_running(struct intel_guc_slpc *slpc)
122 {
123 	return slpc_get_state(slpc) == SLPC_GLOBAL_STATE_RUNNING;
124 }
125 
126 static int guc_action_slpc_query(struct intel_guc *guc, u32 offset)
127 {
128 	u32 request[] = {
129 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
130 		SLPC_EVENT(SLPC_EVENT_QUERY_TASK_STATE, 2),
131 		offset,
132 		0,
133 	};
134 	int ret;
135 
136 	ret = intel_guc_send(guc, request, ARRAY_SIZE(request));
137 
138 	return ret > 0 ? -EPROTO : ret;
139 }
140 
141 static int slpc_query_task_state(struct intel_guc_slpc *slpc)
142 {
143 	struct intel_guc *guc = slpc_to_guc(slpc);
144 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
145 	u32 offset = intel_guc_ggtt_offset(guc, slpc->vma);
146 	int ret;
147 
148 	ret = guc_action_slpc_query(guc, offset);
149 	if (unlikely(ret))
150 		drm_err(&i915->drm, "Failed to query task state (%pe)\n",
151 			ERR_PTR(ret));
152 
153 	drm_clflush_virt_range(slpc->vaddr, SLPC_PAGE_SIZE_BYTES);
154 
155 	return ret;
156 }
157 
158 static int slpc_set_param(struct intel_guc_slpc *slpc, u8 id, u32 value)
159 {
160 	struct intel_guc *guc = slpc_to_guc(slpc);
161 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
162 	int ret;
163 
164 	GEM_BUG_ON(id >= SLPC_MAX_PARAM);
165 
166 	ret = guc_action_slpc_set_param(guc, id, value);
167 	if (ret)
168 		drm_err(&i915->drm, "Failed to set param %d to %u (%pe)\n",
169 			id, value, ERR_PTR(ret));
170 
171 	return ret;
172 }
173 
174 static int slpc_unset_param(struct intel_guc_slpc *slpc,
175 			    u8 id)
176 {
177 	struct intel_guc *guc = slpc_to_guc(slpc);
178 
179 	GEM_BUG_ON(id >= SLPC_MAX_PARAM);
180 
181 	return guc_action_slpc_unset_param(guc, id);
182 }
183 
184 static int slpc_force_min_freq(struct intel_guc_slpc *slpc, u32 freq)
185 {
186 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
187 	struct intel_guc *guc = slpc_to_guc(slpc);
188 	intel_wakeref_t wakeref;
189 	int ret = 0;
190 
191 	lockdep_assert_held(&slpc->lock);
192 
193 	if (!intel_guc_is_ready(guc))
194 		return -ENODEV;
195 
196 	/*
197 	 * This function is a little different as compared to
198 	 * intel_guc_slpc_set_min_freq(). Softlimit will not be updated
199 	 * here since this is used to temporarily change min freq,
200 	 * for example, during a waitboost. Caller is responsible for
201 	 * checking bounds.
202 	 */
203 
204 	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
205 		ret = slpc_set_param(slpc,
206 				     SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
207 				     freq);
208 		if (ret)
209 			drm_err(&i915->drm, "Unable to force min freq to %u: %d",
210 				freq, ret);
211 	}
212 
213 	return ret;
214 }
215 
216 static void slpc_boost_work(struct work_struct *work)
217 {
218 	struct intel_guc_slpc *slpc = container_of(work, typeof(*slpc), boost_work);
219 
220 	/*
221 	 * Raise min freq to boost. It's possible that
222 	 * this is greater than current max. But it will
223 	 * certainly be limited by RP0. An error setting
224 	 * the min param is not fatal.
225 	 */
226 	mutex_lock(&slpc->lock);
227 	if (atomic_read(&slpc->num_waiters)) {
228 		slpc_force_min_freq(slpc, slpc->boost_freq);
229 		slpc->num_boosts++;
230 	}
231 	mutex_unlock(&slpc->lock);
232 }
233 
234 int intel_guc_slpc_init(struct intel_guc_slpc *slpc)
235 {
236 	struct intel_guc *guc = slpc_to_guc(slpc);
237 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
238 	u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
239 	int err;
240 
241 	GEM_BUG_ON(slpc->vma);
242 
243 	err = intel_guc_allocate_and_map_vma(guc, size, &slpc->vma, (void **)&slpc->vaddr);
244 	if (unlikely(err)) {
245 		drm_err(&i915->drm,
246 			"Failed to allocate SLPC struct (err=%pe)\n",
247 			ERR_PTR(err));
248 		return err;
249 	}
250 
251 	slpc->max_freq_softlimit = 0;
252 	slpc->min_freq_softlimit = 0;
253 
254 	slpc->boost_freq = 0;
255 	atomic_set(&slpc->num_waiters, 0);
256 	slpc->num_boosts = 0;
257 
258 	mutex_init(&slpc->lock);
259 	INIT_WORK(&slpc->boost_work, slpc_boost_work);
260 
261 	return err;
262 }
263 
264 static const char *slpc_global_state_to_string(enum slpc_global_state state)
265 {
266 	switch (state) {
267 	case SLPC_GLOBAL_STATE_NOT_RUNNING:
268 		return "not running";
269 	case SLPC_GLOBAL_STATE_INITIALIZING:
270 		return "initializing";
271 	case SLPC_GLOBAL_STATE_RESETTING:
272 		return "resetting";
273 	case SLPC_GLOBAL_STATE_RUNNING:
274 		return "running";
275 	case SLPC_GLOBAL_STATE_SHUTTING_DOWN:
276 		return "shutting down";
277 	case SLPC_GLOBAL_STATE_ERROR:
278 		return "error";
279 	default:
280 		return "unknown";
281 	}
282 }
283 
284 static const char *slpc_get_state_string(struct intel_guc_slpc *slpc)
285 {
286 	return slpc_global_state_to_string(slpc_get_state(slpc));
287 }
288 
289 static int guc_action_slpc_reset(struct intel_guc *guc, u32 offset)
290 {
291 	u32 request[] = {
292 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
293 		SLPC_EVENT(SLPC_EVENT_RESET, 2),
294 		offset,
295 		0,
296 	};
297 	int ret;
298 
299 	ret = intel_guc_send(guc, request, ARRAY_SIZE(request));
300 
301 	return ret > 0 ? -EPROTO : ret;
302 }
303 
304 static int slpc_reset(struct intel_guc_slpc *slpc)
305 {
306 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
307 	struct intel_guc *guc = slpc_to_guc(slpc);
308 	u32 offset = intel_guc_ggtt_offset(guc, slpc->vma);
309 	int ret;
310 
311 	ret = guc_action_slpc_reset(guc, offset);
312 
313 	if (unlikely(ret < 0)) {
314 		drm_err(&i915->drm, "SLPC reset action failed (%pe)\n",
315 			ERR_PTR(ret));
316 		return ret;
317 	}
318 
319 	if (!ret) {
320 		if (wait_for(slpc_is_running(slpc), SLPC_RESET_TIMEOUT_MS)) {
321 			drm_err(&i915->drm, "SLPC not enabled! State = %s\n",
322 				slpc_get_state_string(slpc));
323 			return -EIO;
324 		}
325 	}
326 
327 	return 0;
328 }
329 
330 static u32 slpc_decode_min_freq(struct intel_guc_slpc *slpc)
331 {
332 	struct slpc_shared_data *data = slpc->vaddr;
333 
334 	GEM_BUG_ON(!slpc->vma);
335 
336 	return	DIV_ROUND_CLOSEST(REG_FIELD_GET(SLPC_MIN_UNSLICE_FREQ_MASK,
337 				  data->task_state_data.freq) *
338 				  GT_FREQUENCY_MULTIPLIER, GEN9_FREQ_SCALER);
339 }
340 
341 static u32 slpc_decode_max_freq(struct intel_guc_slpc *slpc)
342 {
343 	struct slpc_shared_data *data = slpc->vaddr;
344 
345 	GEM_BUG_ON(!slpc->vma);
346 
347 	return	DIV_ROUND_CLOSEST(REG_FIELD_GET(SLPC_MAX_UNSLICE_FREQ_MASK,
348 				  data->task_state_data.freq) *
349 				  GT_FREQUENCY_MULTIPLIER, GEN9_FREQ_SCALER);
350 }
351 
352 static void slpc_shared_data_reset(struct slpc_shared_data *data)
353 {
354 	memset(data, 0, sizeof(struct slpc_shared_data));
355 
356 	data->header.size = sizeof(struct slpc_shared_data);
357 
358 	/* Enable only GTPERF task, disable others */
359 	slpc_mem_set_enabled(data, SLPC_PARAM_TASK_ENABLE_GTPERF,
360 			     SLPC_PARAM_TASK_DISABLE_GTPERF);
361 
362 	slpc_mem_set_disabled(data, SLPC_PARAM_TASK_ENABLE_BALANCER,
363 			      SLPC_PARAM_TASK_DISABLE_BALANCER);
364 
365 	slpc_mem_set_disabled(data, SLPC_PARAM_TASK_ENABLE_DCC,
366 			      SLPC_PARAM_TASK_DISABLE_DCC);
367 }
368 
369 /**
370  * intel_guc_slpc_set_max_freq() - Set max frequency limit for SLPC.
371  * @slpc: pointer to intel_guc_slpc.
372  * @val: frequency (MHz)
373  *
374  * This function will invoke GuC SLPC action to update the max frequency
375  * limit for unslice.
376  *
377  * Return: 0 on success, non-zero error code on failure.
378  */
379 int intel_guc_slpc_set_max_freq(struct intel_guc_slpc *slpc, u32 val)
380 {
381 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
382 	intel_wakeref_t wakeref;
383 	int ret;
384 
385 	if (val < slpc->min_freq ||
386 	    val > slpc->rp0_freq ||
387 	    val < slpc->min_freq_softlimit)
388 		return -EINVAL;
389 
390 	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
391 		ret = slpc_set_param(slpc,
392 				     SLPC_PARAM_GLOBAL_MAX_GT_UNSLICE_FREQ_MHZ,
393 				     val);
394 
395 		/* Return standardized err code for sysfs calls */
396 		if (ret)
397 			ret = -EIO;
398 	}
399 
400 	if (!ret)
401 		slpc->max_freq_softlimit = val;
402 
403 	return ret;
404 }
405 
406 /**
407  * intel_guc_slpc_get_max_freq() - Get max frequency limit for SLPC.
408  * @slpc: pointer to intel_guc_slpc.
409  * @val: pointer to val which will hold max frequency (MHz)
410  *
411  * This function will invoke GuC SLPC action to read the max frequency
412  * limit for unslice.
413  *
414  * Return: 0 on success, non-zero error code on failure.
415  */
416 int intel_guc_slpc_get_max_freq(struct intel_guc_slpc *slpc, u32 *val)
417 {
418 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
419 	intel_wakeref_t wakeref;
420 	int ret = 0;
421 
422 	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
423 		/* Force GuC to update task data */
424 		ret = slpc_query_task_state(slpc);
425 
426 		if (!ret)
427 			*val = slpc_decode_max_freq(slpc);
428 	}
429 
430 	return ret;
431 }
432 
433 /**
434  * intel_guc_slpc_set_min_freq() - Set min frequency limit for SLPC.
435  * @slpc: pointer to intel_guc_slpc.
436  * @val: frequency (MHz)
437  *
438  * This function will invoke GuC SLPC action to update the min unslice
439  * frequency.
440  *
441  * Return: 0 on success, non-zero error code on failure.
442  */
443 int intel_guc_slpc_set_min_freq(struct intel_guc_slpc *slpc, u32 val)
444 {
445 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
446 	intel_wakeref_t wakeref;
447 	int ret;
448 
449 	if (val < slpc->min_freq ||
450 	    val > slpc->rp0_freq ||
451 	    val > slpc->max_freq_softlimit)
452 		return -EINVAL;
453 
454 	/* Need a lock now since waitboost can be modifying min as well */
455 	mutex_lock(&slpc->lock);
456 
457 	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
458 
459 		ret = slpc_set_param(slpc,
460 				     SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
461 				     val);
462 
463 		/* Return standardized err code for sysfs calls */
464 		if (ret)
465 			ret = -EIO;
466 	}
467 
468 	if (!ret)
469 		slpc->min_freq_softlimit = val;
470 
471 	mutex_unlock(&slpc->lock);
472 
473 	return ret;
474 }
475 
476 /**
477  * intel_guc_slpc_get_min_freq() - Get min frequency limit for SLPC.
478  * @slpc: pointer to intel_guc_slpc.
479  * @val: pointer to val which will hold min frequency (MHz)
480  *
481  * This function will invoke GuC SLPC action to read the min frequency
482  * limit for unslice.
483  *
484  * Return: 0 on success, non-zero error code on failure.
485  */
486 int intel_guc_slpc_get_min_freq(struct intel_guc_slpc *slpc, u32 *val)
487 {
488 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
489 	intel_wakeref_t wakeref;
490 	int ret = 0;
491 
492 	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
493 		/* Force GuC to update task data */
494 		ret = slpc_query_task_state(slpc);
495 
496 		if (!ret)
497 			*val = slpc_decode_min_freq(slpc);
498 	}
499 
500 	return ret;
501 }
502 
503 void intel_guc_pm_intrmsk_enable(struct intel_gt *gt)
504 {
505 	u32 pm_intrmsk_mbz = 0;
506 
507 	/*
508 	 * Allow GuC to receive ARAT timer expiry event.
509 	 * This interrupt register is setup by RPS code
510 	 * when host based Turbo is enabled.
511 	 */
512 	pm_intrmsk_mbz |= ARAT_EXPIRED_INTRMSK;
513 
514 	intel_uncore_rmw(gt->uncore,
515 			 GEN6_PMINTRMSK, pm_intrmsk_mbz, 0);
516 }
517 
518 static int slpc_set_softlimits(struct intel_guc_slpc *slpc)
519 {
520 	int ret = 0;
521 
522 	/*
523 	 * Softlimits are initially equivalent to platform limits
524 	 * unless they have deviated from defaults, in which case,
525 	 * we retain the values and set min/max accordingly.
526 	 */
527 	if (!slpc->max_freq_softlimit)
528 		slpc->max_freq_softlimit = slpc->rp0_freq;
529 	else if (slpc->max_freq_softlimit != slpc->rp0_freq)
530 		ret = intel_guc_slpc_set_max_freq(slpc,
531 						  slpc->max_freq_softlimit);
532 
533 	if (unlikely(ret))
534 		return ret;
535 
536 	if (!slpc->min_freq_softlimit)
537 		slpc->min_freq_softlimit = slpc->min_freq;
538 	else if (slpc->min_freq_softlimit != slpc->min_freq)
539 		return intel_guc_slpc_set_min_freq(slpc,
540 						   slpc->min_freq_softlimit);
541 
542 	return 0;
543 }
544 
545 static int slpc_ignore_eff_freq(struct intel_guc_slpc *slpc, bool ignore)
546 {
547 	int ret = 0;
548 
549 	if (ignore) {
550 		ret = slpc_set_param(slpc,
551 				     SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY,
552 				     ignore);
553 		if (!ret)
554 			return slpc_set_param(slpc,
555 					      SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
556 					      slpc->min_freq);
557 	} else {
558 		ret = slpc_unset_param(slpc,
559 				       SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY);
560 		if (!ret)
561 			return slpc_unset_param(slpc,
562 						SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ);
563 	}
564 
565 	return ret;
566 }
567 
568 static int slpc_use_fused_rp0(struct intel_guc_slpc *slpc)
569 {
570 	/* Force SLPC to used platform rp0 */
571 	return slpc_set_param(slpc,
572 			      SLPC_PARAM_GLOBAL_MAX_GT_UNSLICE_FREQ_MHZ,
573 			      slpc->rp0_freq);
574 }
575 
576 static void slpc_get_rp_values(struct intel_guc_slpc *slpc)
577 {
578 	struct intel_rps *rps = &slpc_to_gt(slpc)->rps;
579 	u32 rp_state_cap;
580 
581 	rp_state_cap = intel_rps_read_state_cap(rps);
582 
583 	slpc->rp0_freq = REG_FIELD_GET(RP0_CAP_MASK, rp_state_cap) *
584 					GT_FREQUENCY_MULTIPLIER;
585 	slpc->rp1_freq = REG_FIELD_GET(RP1_CAP_MASK, rp_state_cap) *
586 					GT_FREQUENCY_MULTIPLIER;
587 	slpc->min_freq = REG_FIELD_GET(RPN_CAP_MASK, rp_state_cap) *
588 					GT_FREQUENCY_MULTIPLIER;
589 
590 	if (!slpc->boost_freq)
591 		slpc->boost_freq = slpc->rp0_freq;
592 }
593 
594 /*
595  * intel_guc_slpc_enable() - Start SLPC
596  * @slpc: pointer to intel_guc_slpc.
597  *
598  * SLPC is enabled by setting up the shared data structure and
599  * sending reset event to GuC SLPC. Initial data is setup in
600  * intel_guc_slpc_init. Here we send the reset event. We do
601  * not currently need a slpc_disable since this is taken care
602  * of automatically when a reset/suspend occurs and the GuC
603  * CTB is destroyed.
604  *
605  * Return: 0 on success, non-zero error code on failure.
606  */
607 int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
608 {
609 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
610 	int ret;
611 
612 	GEM_BUG_ON(!slpc->vma);
613 
614 	slpc_shared_data_reset(slpc->vaddr);
615 
616 	ret = slpc_reset(slpc);
617 	if (unlikely(ret < 0)) {
618 		drm_err(&i915->drm, "SLPC Reset event returned (%pe)\n",
619 			ERR_PTR(ret));
620 		return ret;
621 	}
622 
623 	ret = slpc_query_task_state(slpc);
624 	if (unlikely(ret < 0))
625 		return ret;
626 
627 	intel_guc_pm_intrmsk_enable(to_gt(i915));
628 
629 	slpc_get_rp_values(slpc);
630 
631 	/* Ignore efficient freq and set min to platform min */
632 	ret = slpc_ignore_eff_freq(slpc, true);
633 	if (unlikely(ret)) {
634 		drm_err(&i915->drm, "Failed to set SLPC min to RPn (%pe)\n",
635 			ERR_PTR(ret));
636 		return ret;
637 	}
638 
639 	/* Set SLPC max limit to RP0 */
640 	ret = slpc_use_fused_rp0(slpc);
641 	if (unlikely(ret)) {
642 		drm_err(&i915->drm, "Failed to set SLPC max to RP0 (%pe)\n",
643 			ERR_PTR(ret));
644 		return ret;
645 	}
646 
647 	/* Revert SLPC min/max to softlimits if necessary */
648 	ret = slpc_set_softlimits(slpc);
649 	if (unlikely(ret)) {
650 		drm_err(&i915->drm, "Failed to set SLPC softlimits (%pe)\n",
651 			ERR_PTR(ret));
652 		return ret;
653 	}
654 
655 	return 0;
656 }
657 
658 int intel_guc_slpc_set_boost_freq(struct intel_guc_slpc *slpc, u32 val)
659 {
660 	int ret = 0;
661 
662 	if (val < slpc->min_freq || val > slpc->rp0_freq)
663 		return -EINVAL;
664 
665 	mutex_lock(&slpc->lock);
666 
667 	if (slpc->boost_freq != val) {
668 		/* Apply only if there are active waiters */
669 		if (atomic_read(&slpc->num_waiters)) {
670 			ret = slpc_force_min_freq(slpc, val);
671 			if (ret) {
672 				ret = -EIO;
673 				goto done;
674 			}
675 		}
676 
677 		slpc->boost_freq = val;
678 	}
679 
680 done:
681 	mutex_unlock(&slpc->lock);
682 	return ret;
683 }
684 
685 void intel_guc_slpc_dec_waiters(struct intel_guc_slpc *slpc)
686 {
687 	/*
688 	 * Return min back to the softlimit.
689 	 * This is called during request retire,
690 	 * so we don't need to fail that if the
691 	 * set_param fails.
692 	 */
693 	mutex_lock(&slpc->lock);
694 	if (atomic_dec_and_test(&slpc->num_waiters))
695 		slpc_force_min_freq(slpc, slpc->min_freq_softlimit);
696 	mutex_unlock(&slpc->lock);
697 }
698 
699 int intel_guc_slpc_print_info(struct intel_guc_slpc *slpc, struct drm_printer *p)
700 {
701 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
702 	struct slpc_shared_data *data = slpc->vaddr;
703 	struct slpc_task_state_data *slpc_tasks;
704 	intel_wakeref_t wakeref;
705 	int ret = 0;
706 
707 	GEM_BUG_ON(!slpc->vma);
708 
709 	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
710 		ret = slpc_query_task_state(slpc);
711 
712 		if (!ret) {
713 			slpc_tasks = &data->task_state_data;
714 
715 			drm_printf(p, "\tSLPC state: %s\n", slpc_get_state_string(slpc));
716 			drm_printf(p, "\tGTPERF task active: %s\n",
717 				   yesno(slpc_tasks->status & SLPC_GTPERF_TASK_ENABLED));
718 			drm_printf(p, "\tMax freq: %u MHz\n",
719 				   slpc_decode_max_freq(slpc));
720 			drm_printf(p, "\tMin freq: %u MHz\n",
721 				   slpc_decode_min_freq(slpc));
722 			drm_printf(p, "\twaitboosts: %u\n",
723 				   slpc->num_boosts);
724 		}
725 	}
726 
727 	return ret;
728 }
729 
730 void intel_guc_slpc_fini(struct intel_guc_slpc *slpc)
731 {
732 	if (!slpc->vma)
733 		return;
734 
735 	i915_vma_unpin_and_release(&slpc->vma, I915_VMA_RELEASE_MAP);
736 }
737