1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include "i915_drv.h"
7 #include "i915_reg.h"
8 #include "intel_guc_slpc.h"
9 #include "gt/intel_gt.h"
10 #include "gt/intel_gt_regs.h"
11 
12 static inline struct intel_guc *slpc_to_guc(struct intel_guc_slpc *slpc)
13 {
14 	return container_of(slpc, struct intel_guc, slpc);
15 }
16 
17 static inline struct intel_gt *slpc_to_gt(struct intel_guc_slpc *slpc)
18 {
19 	return guc_to_gt(slpc_to_guc(slpc));
20 }
21 
22 static inline struct drm_i915_private *slpc_to_i915(struct intel_guc_slpc *slpc)
23 {
24 	return slpc_to_gt(slpc)->i915;
25 }
26 
27 static bool __detect_slpc_supported(struct intel_guc *guc)
28 {
29 	/* GuC SLPC is unavailable for pre-Gen12 */
30 	return guc->submission_supported &&
31 		GRAPHICS_VER(guc_to_gt(guc)->i915) >= 12;
32 }
33 
34 static bool __guc_slpc_selected(struct intel_guc *guc)
35 {
36 	if (!intel_guc_slpc_is_supported(guc))
37 		return false;
38 
39 	return guc->submission_selected;
40 }
41 
42 void intel_guc_slpc_init_early(struct intel_guc_slpc *slpc)
43 {
44 	struct intel_guc *guc = slpc_to_guc(slpc);
45 
46 	slpc->supported = __detect_slpc_supported(guc);
47 	slpc->selected = __guc_slpc_selected(guc);
48 }
49 
50 static void slpc_mem_set_param(struct slpc_shared_data *data,
51 			       u32 id, u32 value)
52 {
53 	GEM_BUG_ON(id >= SLPC_MAX_OVERRIDE_PARAMETERS);
54 	/*
55 	 * When the flag bit is set, corresponding value will be read
56 	 * and applied by SLPC.
57 	 */
58 	data->override_params.bits[id >> 5] |= (1 << (id % 32));
59 	data->override_params.values[id] = value;
60 }
61 
62 static void slpc_mem_set_enabled(struct slpc_shared_data *data,
63 				 u8 enable_id, u8 disable_id)
64 {
65 	/*
66 	 * Enabling a param involves setting the enable_id
67 	 * to 1 and disable_id to 0.
68 	 */
69 	slpc_mem_set_param(data, enable_id, 1);
70 	slpc_mem_set_param(data, disable_id, 0);
71 }
72 
73 static void slpc_mem_set_disabled(struct slpc_shared_data *data,
74 				  u8 enable_id, u8 disable_id)
75 {
76 	/*
77 	 * Disabling a param involves setting the enable_id
78 	 * to 0 and disable_id to 1.
79 	 */
80 	slpc_mem_set_param(data, disable_id, 1);
81 	slpc_mem_set_param(data, enable_id, 0);
82 }
83 
84 static u32 slpc_get_state(struct intel_guc_slpc *slpc)
85 {
86 	struct slpc_shared_data *data;
87 
88 	GEM_BUG_ON(!slpc->vma);
89 
90 	drm_clflush_virt_range(slpc->vaddr, sizeof(u32));
91 	data = slpc->vaddr;
92 
93 	return data->header.global_state;
94 }
95 
96 static int guc_action_slpc_set_param(struct intel_guc *guc, u8 id, u32 value)
97 {
98 	u32 request[] = {
99 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
100 		SLPC_EVENT(SLPC_EVENT_PARAMETER_SET, 2),
101 		id,
102 		value,
103 	};
104 	int ret;
105 
106 	ret = intel_guc_send(guc, request, ARRAY_SIZE(request));
107 
108 	return ret > 0 ? -EPROTO : ret;
109 }
110 
111 static int guc_action_slpc_unset_param(struct intel_guc *guc, u8 id)
112 {
113 	u32 request[] = {
114 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
115 		SLPC_EVENT(SLPC_EVENT_PARAMETER_UNSET, 2),
116 		id,
117 	};
118 
119 	return intel_guc_send(guc, request, ARRAY_SIZE(request));
120 }
121 
122 static bool slpc_is_running(struct intel_guc_slpc *slpc)
123 {
124 	return slpc_get_state(slpc) == SLPC_GLOBAL_STATE_RUNNING;
125 }
126 
127 static int guc_action_slpc_query(struct intel_guc *guc, u32 offset)
128 {
129 	u32 request[] = {
130 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
131 		SLPC_EVENT(SLPC_EVENT_QUERY_TASK_STATE, 2),
132 		offset,
133 		0,
134 	};
135 	int ret;
136 
137 	ret = intel_guc_send(guc, request, ARRAY_SIZE(request));
138 
139 	return ret > 0 ? -EPROTO : ret;
140 }
141 
142 static int slpc_query_task_state(struct intel_guc_slpc *slpc)
143 {
144 	struct intel_guc *guc = slpc_to_guc(slpc);
145 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
146 	u32 offset = intel_guc_ggtt_offset(guc, slpc->vma);
147 	int ret;
148 
149 	ret = guc_action_slpc_query(guc, offset);
150 	if (unlikely(ret))
151 		drm_err(&i915->drm, "Failed to query task state (%pe)\n",
152 			ERR_PTR(ret));
153 
154 	drm_clflush_virt_range(slpc->vaddr, SLPC_PAGE_SIZE_BYTES);
155 
156 	return ret;
157 }
158 
159 static int slpc_set_param(struct intel_guc_slpc *slpc, u8 id, u32 value)
160 {
161 	struct intel_guc *guc = slpc_to_guc(slpc);
162 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
163 	int ret;
164 
165 	GEM_BUG_ON(id >= SLPC_MAX_PARAM);
166 
167 	ret = guc_action_slpc_set_param(guc, id, value);
168 	if (ret)
169 		drm_err(&i915->drm, "Failed to set param %d to %u (%pe)\n",
170 			id, value, ERR_PTR(ret));
171 
172 	return ret;
173 }
174 
175 static int slpc_unset_param(struct intel_guc_slpc *slpc,
176 			    u8 id)
177 {
178 	struct intel_guc *guc = slpc_to_guc(slpc);
179 
180 	GEM_BUG_ON(id >= SLPC_MAX_PARAM);
181 
182 	return guc_action_slpc_unset_param(guc, id);
183 }
184 
185 static int slpc_force_min_freq(struct intel_guc_slpc *slpc, u32 freq)
186 {
187 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
188 	struct intel_guc *guc = slpc_to_guc(slpc);
189 	intel_wakeref_t wakeref;
190 	int ret = 0;
191 
192 	lockdep_assert_held(&slpc->lock);
193 
194 	if (!intel_guc_is_ready(guc))
195 		return -ENODEV;
196 
197 	/*
198 	 * This function is a little different as compared to
199 	 * intel_guc_slpc_set_min_freq(). Softlimit will not be updated
200 	 * here since this is used to temporarily change min freq,
201 	 * for example, during a waitboost. Caller is responsible for
202 	 * checking bounds.
203 	 */
204 
205 	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
206 		ret = slpc_set_param(slpc,
207 				     SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
208 				     freq);
209 		if (ret)
210 			drm_err(&i915->drm, "Unable to force min freq to %u: %d",
211 				freq, ret);
212 	}
213 
214 	return ret;
215 }
216 
217 static void slpc_boost_work(struct work_struct *work)
218 {
219 	struct intel_guc_slpc *slpc = container_of(work, typeof(*slpc), boost_work);
220 
221 	/*
222 	 * Raise min freq to boost. It's possible that
223 	 * this is greater than current max. But it will
224 	 * certainly be limited by RP0. An error setting
225 	 * the min param is not fatal.
226 	 */
227 	mutex_lock(&slpc->lock);
228 	if (atomic_read(&slpc->num_waiters)) {
229 		slpc_force_min_freq(slpc, slpc->boost_freq);
230 		slpc->num_boosts++;
231 	}
232 	mutex_unlock(&slpc->lock);
233 }
234 
235 int intel_guc_slpc_init(struct intel_guc_slpc *slpc)
236 {
237 	struct intel_guc *guc = slpc_to_guc(slpc);
238 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
239 	u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
240 	int err;
241 
242 	GEM_BUG_ON(slpc->vma);
243 
244 	err = intel_guc_allocate_and_map_vma(guc, size, &slpc->vma, (void **)&slpc->vaddr);
245 	if (unlikely(err)) {
246 		drm_err(&i915->drm,
247 			"Failed to allocate SLPC struct (err=%pe)\n",
248 			ERR_PTR(err));
249 		return err;
250 	}
251 
252 	slpc->max_freq_softlimit = 0;
253 	slpc->min_freq_softlimit = 0;
254 
255 	slpc->boost_freq = 0;
256 	atomic_set(&slpc->num_waiters, 0);
257 	slpc->num_boosts = 0;
258 
259 	mutex_init(&slpc->lock);
260 	INIT_WORK(&slpc->boost_work, slpc_boost_work);
261 
262 	return err;
263 }
264 
265 static const char *slpc_global_state_to_string(enum slpc_global_state state)
266 {
267 	switch (state) {
268 	case SLPC_GLOBAL_STATE_NOT_RUNNING:
269 		return "not running";
270 	case SLPC_GLOBAL_STATE_INITIALIZING:
271 		return "initializing";
272 	case SLPC_GLOBAL_STATE_RESETTING:
273 		return "resetting";
274 	case SLPC_GLOBAL_STATE_RUNNING:
275 		return "running";
276 	case SLPC_GLOBAL_STATE_SHUTTING_DOWN:
277 		return "shutting down";
278 	case SLPC_GLOBAL_STATE_ERROR:
279 		return "error";
280 	default:
281 		return "unknown";
282 	}
283 }
284 
285 static const char *slpc_get_state_string(struct intel_guc_slpc *slpc)
286 {
287 	return slpc_global_state_to_string(slpc_get_state(slpc));
288 }
289 
290 static int guc_action_slpc_reset(struct intel_guc *guc, u32 offset)
291 {
292 	u32 request[] = {
293 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
294 		SLPC_EVENT(SLPC_EVENT_RESET, 2),
295 		offset,
296 		0,
297 	};
298 	int ret;
299 
300 	ret = intel_guc_send(guc, request, ARRAY_SIZE(request));
301 
302 	return ret > 0 ? -EPROTO : ret;
303 }
304 
305 static int slpc_reset(struct intel_guc_slpc *slpc)
306 {
307 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
308 	struct intel_guc *guc = slpc_to_guc(slpc);
309 	u32 offset = intel_guc_ggtt_offset(guc, slpc->vma);
310 	int ret;
311 
312 	ret = guc_action_slpc_reset(guc, offset);
313 
314 	if (unlikely(ret < 0)) {
315 		drm_err(&i915->drm, "SLPC reset action failed (%pe)\n",
316 			ERR_PTR(ret));
317 		return ret;
318 	}
319 
320 	if (!ret) {
321 		if (wait_for(slpc_is_running(slpc), SLPC_RESET_TIMEOUT_MS)) {
322 			drm_err(&i915->drm, "SLPC not enabled! State = %s\n",
323 				slpc_get_state_string(slpc));
324 			return -EIO;
325 		}
326 	}
327 
328 	return 0;
329 }
330 
331 static u32 slpc_decode_min_freq(struct intel_guc_slpc *slpc)
332 {
333 	struct slpc_shared_data *data = slpc->vaddr;
334 
335 	GEM_BUG_ON(!slpc->vma);
336 
337 	return	DIV_ROUND_CLOSEST(REG_FIELD_GET(SLPC_MIN_UNSLICE_FREQ_MASK,
338 				  data->task_state_data.freq) *
339 				  GT_FREQUENCY_MULTIPLIER, GEN9_FREQ_SCALER);
340 }
341 
342 static u32 slpc_decode_max_freq(struct intel_guc_slpc *slpc)
343 {
344 	struct slpc_shared_data *data = slpc->vaddr;
345 
346 	GEM_BUG_ON(!slpc->vma);
347 
348 	return	DIV_ROUND_CLOSEST(REG_FIELD_GET(SLPC_MAX_UNSLICE_FREQ_MASK,
349 				  data->task_state_data.freq) *
350 				  GT_FREQUENCY_MULTIPLIER, GEN9_FREQ_SCALER);
351 }
352 
353 static void slpc_shared_data_reset(struct slpc_shared_data *data)
354 {
355 	memset(data, 0, sizeof(struct slpc_shared_data));
356 
357 	data->header.size = sizeof(struct slpc_shared_data);
358 
359 	/* Enable only GTPERF task, disable others */
360 	slpc_mem_set_enabled(data, SLPC_PARAM_TASK_ENABLE_GTPERF,
361 			     SLPC_PARAM_TASK_DISABLE_GTPERF);
362 
363 	slpc_mem_set_disabled(data, SLPC_PARAM_TASK_ENABLE_BALANCER,
364 			      SLPC_PARAM_TASK_DISABLE_BALANCER);
365 
366 	slpc_mem_set_disabled(data, SLPC_PARAM_TASK_ENABLE_DCC,
367 			      SLPC_PARAM_TASK_DISABLE_DCC);
368 }
369 
370 /**
371  * intel_guc_slpc_set_max_freq() - Set max frequency limit for SLPC.
372  * @slpc: pointer to intel_guc_slpc.
373  * @val: frequency (MHz)
374  *
375  * This function will invoke GuC SLPC action to update the max frequency
376  * limit for unslice.
377  *
378  * Return: 0 on success, non-zero error code on failure.
379  */
380 int intel_guc_slpc_set_max_freq(struct intel_guc_slpc *slpc, u32 val)
381 {
382 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
383 	intel_wakeref_t wakeref;
384 	int ret;
385 
386 	if (val < slpc->min_freq ||
387 	    val > slpc->rp0_freq ||
388 	    val < slpc->min_freq_softlimit)
389 		return -EINVAL;
390 
391 	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
392 		ret = slpc_set_param(slpc,
393 				     SLPC_PARAM_GLOBAL_MAX_GT_UNSLICE_FREQ_MHZ,
394 				     val);
395 
396 		/* Return standardized err code for sysfs calls */
397 		if (ret)
398 			ret = -EIO;
399 	}
400 
401 	if (!ret)
402 		slpc->max_freq_softlimit = val;
403 
404 	return ret;
405 }
406 
407 /**
408  * intel_guc_slpc_get_max_freq() - Get max frequency limit for SLPC.
409  * @slpc: pointer to intel_guc_slpc.
410  * @val: pointer to val which will hold max frequency (MHz)
411  *
412  * This function will invoke GuC SLPC action to read the max frequency
413  * limit for unslice.
414  *
415  * Return: 0 on success, non-zero error code on failure.
416  */
417 int intel_guc_slpc_get_max_freq(struct intel_guc_slpc *slpc, u32 *val)
418 {
419 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
420 	intel_wakeref_t wakeref;
421 	int ret = 0;
422 
423 	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
424 		/* Force GuC to update task data */
425 		ret = slpc_query_task_state(slpc);
426 
427 		if (!ret)
428 			*val = slpc_decode_max_freq(slpc);
429 	}
430 
431 	return ret;
432 }
433 
434 /**
435  * intel_guc_slpc_set_min_freq() - Set min frequency limit for SLPC.
436  * @slpc: pointer to intel_guc_slpc.
437  * @val: frequency (MHz)
438  *
439  * This function will invoke GuC SLPC action to update the min unslice
440  * frequency.
441  *
442  * Return: 0 on success, non-zero error code on failure.
443  */
444 int intel_guc_slpc_set_min_freq(struct intel_guc_slpc *slpc, u32 val)
445 {
446 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
447 	intel_wakeref_t wakeref;
448 	int ret;
449 
450 	if (val < slpc->min_freq ||
451 	    val > slpc->rp0_freq ||
452 	    val > slpc->max_freq_softlimit)
453 		return -EINVAL;
454 
455 	/* Need a lock now since waitboost can be modifying min as well */
456 	mutex_lock(&slpc->lock);
457 
458 	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
459 
460 		ret = slpc_set_param(slpc,
461 				     SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
462 				     val);
463 
464 		/* Return standardized err code for sysfs calls */
465 		if (ret)
466 			ret = -EIO;
467 	}
468 
469 	if (!ret)
470 		slpc->min_freq_softlimit = val;
471 
472 	mutex_unlock(&slpc->lock);
473 
474 	return ret;
475 }
476 
477 /**
478  * intel_guc_slpc_get_min_freq() - Get min frequency limit for SLPC.
479  * @slpc: pointer to intel_guc_slpc.
480  * @val: pointer to val which will hold min frequency (MHz)
481  *
482  * This function will invoke GuC SLPC action to read the min frequency
483  * limit for unslice.
484  *
485  * Return: 0 on success, non-zero error code on failure.
486  */
487 int intel_guc_slpc_get_min_freq(struct intel_guc_slpc *slpc, u32 *val)
488 {
489 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
490 	intel_wakeref_t wakeref;
491 	int ret = 0;
492 
493 	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
494 		/* Force GuC to update task data */
495 		ret = slpc_query_task_state(slpc);
496 
497 		if (!ret)
498 			*val = slpc_decode_min_freq(slpc);
499 	}
500 
501 	return ret;
502 }
503 
504 void intel_guc_pm_intrmsk_enable(struct intel_gt *gt)
505 {
506 	u32 pm_intrmsk_mbz = 0;
507 
508 	/*
509 	 * Allow GuC to receive ARAT timer expiry event.
510 	 * This interrupt register is setup by RPS code
511 	 * when host based Turbo is enabled.
512 	 */
513 	pm_intrmsk_mbz |= ARAT_EXPIRED_INTRMSK;
514 
515 	intel_uncore_rmw(gt->uncore,
516 			 GEN6_PMINTRMSK, pm_intrmsk_mbz, 0);
517 }
518 
519 static int slpc_set_softlimits(struct intel_guc_slpc *slpc)
520 {
521 	int ret = 0;
522 
523 	/*
524 	 * Softlimits are initially equivalent to platform limits
525 	 * unless they have deviated from defaults, in which case,
526 	 * we retain the values and set min/max accordingly.
527 	 */
528 	if (!slpc->max_freq_softlimit)
529 		slpc->max_freq_softlimit = slpc->rp0_freq;
530 	else if (slpc->max_freq_softlimit != slpc->rp0_freq)
531 		ret = intel_guc_slpc_set_max_freq(slpc,
532 						  slpc->max_freq_softlimit);
533 
534 	if (unlikely(ret))
535 		return ret;
536 
537 	if (!slpc->min_freq_softlimit)
538 		slpc->min_freq_softlimit = slpc->min_freq;
539 	else if (slpc->min_freq_softlimit != slpc->min_freq)
540 		return intel_guc_slpc_set_min_freq(slpc,
541 						   slpc->min_freq_softlimit);
542 
543 	return 0;
544 }
545 
546 static int slpc_ignore_eff_freq(struct intel_guc_slpc *slpc, bool ignore)
547 {
548 	int ret = 0;
549 
550 	if (ignore) {
551 		ret = slpc_set_param(slpc,
552 				     SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY,
553 				     ignore);
554 		if (!ret)
555 			return slpc_set_param(slpc,
556 					      SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
557 					      slpc->min_freq);
558 	} else {
559 		ret = slpc_unset_param(slpc,
560 				       SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY);
561 		if (!ret)
562 			return slpc_unset_param(slpc,
563 						SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ);
564 	}
565 
566 	return ret;
567 }
568 
569 static int slpc_use_fused_rp0(struct intel_guc_slpc *slpc)
570 {
571 	/* Force SLPC to used platform rp0 */
572 	return slpc_set_param(slpc,
573 			      SLPC_PARAM_GLOBAL_MAX_GT_UNSLICE_FREQ_MHZ,
574 			      slpc->rp0_freq);
575 }
576 
577 static void slpc_get_rp_values(struct intel_guc_slpc *slpc)
578 {
579 	u32 rp_state_cap;
580 
581 	rp_state_cap = intel_uncore_read(slpc_to_gt(slpc)->uncore,
582 					 GEN6_RP_STATE_CAP);
583 
584 	slpc->rp0_freq = REG_FIELD_GET(RP0_CAP_MASK, rp_state_cap) *
585 					GT_FREQUENCY_MULTIPLIER;
586 	slpc->rp1_freq = REG_FIELD_GET(RP1_CAP_MASK, rp_state_cap) *
587 					GT_FREQUENCY_MULTIPLIER;
588 	slpc->min_freq = REG_FIELD_GET(RPN_CAP_MASK, rp_state_cap) *
589 					GT_FREQUENCY_MULTIPLIER;
590 
591 	if (!slpc->boost_freq)
592 		slpc->boost_freq = slpc->rp0_freq;
593 }
594 
595 /*
596  * intel_guc_slpc_enable() - Start SLPC
597  * @slpc: pointer to intel_guc_slpc.
598  *
599  * SLPC is enabled by setting up the shared data structure and
600  * sending reset event to GuC SLPC. Initial data is setup in
601  * intel_guc_slpc_init. Here we send the reset event. We do
602  * not currently need a slpc_disable since this is taken care
603  * of automatically when a reset/suspend occurs and the GuC
604  * CTB is destroyed.
605  *
606  * Return: 0 on success, non-zero error code on failure.
607  */
608 int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
609 {
610 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
611 	int ret;
612 
613 	GEM_BUG_ON(!slpc->vma);
614 
615 	slpc_shared_data_reset(slpc->vaddr);
616 
617 	ret = slpc_reset(slpc);
618 	if (unlikely(ret < 0)) {
619 		drm_err(&i915->drm, "SLPC Reset event returned (%pe)\n",
620 			ERR_PTR(ret));
621 		return ret;
622 	}
623 
624 	ret = slpc_query_task_state(slpc);
625 	if (unlikely(ret < 0))
626 		return ret;
627 
628 	intel_guc_pm_intrmsk_enable(to_gt(i915));
629 
630 	slpc_get_rp_values(slpc);
631 
632 	/* Ignore efficient freq and set min to platform min */
633 	ret = slpc_ignore_eff_freq(slpc, true);
634 	if (unlikely(ret)) {
635 		drm_err(&i915->drm, "Failed to set SLPC min to RPn (%pe)\n",
636 			ERR_PTR(ret));
637 		return ret;
638 	}
639 
640 	/* Set SLPC max limit to RP0 */
641 	ret = slpc_use_fused_rp0(slpc);
642 	if (unlikely(ret)) {
643 		drm_err(&i915->drm, "Failed to set SLPC max to RP0 (%pe)\n",
644 			ERR_PTR(ret));
645 		return ret;
646 	}
647 
648 	/* Revert SLPC min/max to softlimits if necessary */
649 	ret = slpc_set_softlimits(slpc);
650 	if (unlikely(ret)) {
651 		drm_err(&i915->drm, "Failed to set SLPC softlimits (%pe)\n",
652 			ERR_PTR(ret));
653 		return ret;
654 	}
655 
656 	return 0;
657 }
658 
659 int intel_guc_slpc_set_boost_freq(struct intel_guc_slpc *slpc, u32 val)
660 {
661 	int ret = 0;
662 
663 	if (val < slpc->min_freq || val > slpc->rp0_freq)
664 		return -EINVAL;
665 
666 	mutex_lock(&slpc->lock);
667 
668 	if (slpc->boost_freq != val) {
669 		/* Apply only if there are active waiters */
670 		if (atomic_read(&slpc->num_waiters)) {
671 			ret = slpc_force_min_freq(slpc, val);
672 			if (ret) {
673 				ret = -EIO;
674 				goto done;
675 			}
676 		}
677 
678 		slpc->boost_freq = val;
679 	}
680 
681 done:
682 	mutex_unlock(&slpc->lock);
683 	return ret;
684 }
685 
686 void intel_guc_slpc_dec_waiters(struct intel_guc_slpc *slpc)
687 {
688 	/*
689 	 * Return min back to the softlimit.
690 	 * This is called during request retire,
691 	 * so we don't need to fail that if the
692 	 * set_param fails.
693 	 */
694 	mutex_lock(&slpc->lock);
695 	if (atomic_dec_and_test(&slpc->num_waiters))
696 		slpc_force_min_freq(slpc, slpc->min_freq_softlimit);
697 	mutex_unlock(&slpc->lock);
698 }
699 
700 int intel_guc_slpc_print_info(struct intel_guc_slpc *slpc, struct drm_printer *p)
701 {
702 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
703 	struct slpc_shared_data *data = slpc->vaddr;
704 	struct slpc_task_state_data *slpc_tasks;
705 	intel_wakeref_t wakeref;
706 	int ret = 0;
707 
708 	GEM_BUG_ON(!slpc->vma);
709 
710 	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
711 		ret = slpc_query_task_state(slpc);
712 
713 		if (!ret) {
714 			slpc_tasks = &data->task_state_data;
715 
716 			drm_printf(p, "\tSLPC state: %s\n", slpc_get_state_string(slpc));
717 			drm_printf(p, "\tGTPERF task active: %s\n",
718 				   yesno(slpc_tasks->status & SLPC_GTPERF_TASK_ENABLED));
719 			drm_printf(p, "\tMax freq: %u MHz\n",
720 				   slpc_decode_max_freq(slpc));
721 			drm_printf(p, "\tMin freq: %u MHz\n",
722 				   slpc_decode_min_freq(slpc));
723 			drm_printf(p, "\twaitboosts: %u\n",
724 				   slpc->num_boosts);
725 		}
726 	}
727 
728 	return ret;
729 }
730 
731 void intel_guc_slpc_fini(struct intel_guc_slpc *slpc)
732 {
733 	if (!slpc->vma)
734 		return;
735 
736 	i915_vma_unpin_and_release(&slpc->vma, I915_VMA_RELEASE_MAP);
737 }
738