1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include <drm/drm_cache.h>
7 #include <linux/string_helpers.h>
8 
9 #include "i915_drv.h"
10 #include "i915_reg.h"
11 #include "intel_guc_slpc.h"
12 #include "intel_mchbar_regs.h"
13 #include "gt/intel_gt.h"
14 #include "gt/intel_gt_regs.h"
15 #include "gt/intel_rps.h"
16 
17 static inline struct intel_guc *slpc_to_guc(struct intel_guc_slpc *slpc)
18 {
19 	return container_of(slpc, struct intel_guc, slpc);
20 }
21 
22 static inline struct intel_gt *slpc_to_gt(struct intel_guc_slpc *slpc)
23 {
24 	return guc_to_gt(slpc_to_guc(slpc));
25 }
26 
27 static inline struct drm_i915_private *slpc_to_i915(struct intel_guc_slpc *slpc)
28 {
29 	return slpc_to_gt(slpc)->i915;
30 }
31 
32 static bool __detect_slpc_supported(struct intel_guc *guc)
33 {
34 	/* GuC SLPC is unavailable for pre-Gen12 */
35 	return guc->submission_supported &&
36 		GRAPHICS_VER(guc_to_gt(guc)->i915) >= 12;
37 }
38 
39 static bool __guc_slpc_selected(struct intel_guc *guc)
40 {
41 	if (!intel_guc_slpc_is_supported(guc))
42 		return false;
43 
44 	return guc->submission_selected;
45 }
46 
47 void intel_guc_slpc_init_early(struct intel_guc_slpc *slpc)
48 {
49 	struct intel_guc *guc = slpc_to_guc(slpc);
50 
51 	slpc->supported = __detect_slpc_supported(guc);
52 	slpc->selected = __guc_slpc_selected(guc);
53 }
54 
55 static void slpc_mem_set_param(struct slpc_shared_data *data,
56 			       u32 id, u32 value)
57 {
58 	GEM_BUG_ON(id >= SLPC_MAX_OVERRIDE_PARAMETERS);
59 	/*
60 	 * When the flag bit is set, corresponding value will be read
61 	 * and applied by SLPC.
62 	 */
63 	data->override_params.bits[id >> 5] |= (1 << (id % 32));
64 	data->override_params.values[id] = value;
65 }
66 
67 static void slpc_mem_set_enabled(struct slpc_shared_data *data,
68 				 u8 enable_id, u8 disable_id)
69 {
70 	/*
71 	 * Enabling a param involves setting the enable_id
72 	 * to 1 and disable_id to 0.
73 	 */
74 	slpc_mem_set_param(data, enable_id, 1);
75 	slpc_mem_set_param(data, disable_id, 0);
76 }
77 
78 static void slpc_mem_set_disabled(struct slpc_shared_data *data,
79 				  u8 enable_id, u8 disable_id)
80 {
81 	/*
82 	 * Disabling a param involves setting the enable_id
83 	 * to 0 and disable_id to 1.
84 	 */
85 	slpc_mem_set_param(data, disable_id, 1);
86 	slpc_mem_set_param(data, enable_id, 0);
87 }
88 
89 static u32 slpc_get_state(struct intel_guc_slpc *slpc)
90 {
91 	struct slpc_shared_data *data;
92 
93 	GEM_BUG_ON(!slpc->vma);
94 
95 	drm_clflush_virt_range(slpc->vaddr, sizeof(u32));
96 	data = slpc->vaddr;
97 
98 	return data->header.global_state;
99 }
100 
101 static int guc_action_slpc_set_param_nb(struct intel_guc *guc, u8 id, u32 value)
102 {
103 	u32 request[] = {
104 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
105 		SLPC_EVENT(SLPC_EVENT_PARAMETER_SET, 2),
106 		id,
107 		value,
108 	};
109 	int ret;
110 
111 	ret = intel_guc_send_nb(guc, request, ARRAY_SIZE(request), 0);
112 
113 	return ret > 0 ? -EPROTO : ret;
114 }
115 
116 static int slpc_set_param_nb(struct intel_guc_slpc *slpc, u8 id, u32 value)
117 {
118 	struct intel_guc *guc = slpc_to_guc(slpc);
119 
120 	GEM_BUG_ON(id >= SLPC_MAX_PARAM);
121 
122 	return guc_action_slpc_set_param_nb(guc, id, value);
123 }
124 
125 static int guc_action_slpc_set_param(struct intel_guc *guc, u8 id, u32 value)
126 {
127 	u32 request[] = {
128 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
129 		SLPC_EVENT(SLPC_EVENT_PARAMETER_SET, 2),
130 		id,
131 		value,
132 	};
133 	int ret;
134 
135 	ret = intel_guc_send(guc, request, ARRAY_SIZE(request));
136 
137 	return ret > 0 ? -EPROTO : ret;
138 }
139 
140 static int guc_action_slpc_unset_param(struct intel_guc *guc, u8 id)
141 {
142 	u32 request[] = {
143 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
144 		SLPC_EVENT(SLPC_EVENT_PARAMETER_UNSET, 1),
145 		id,
146 	};
147 
148 	return intel_guc_send(guc, request, ARRAY_SIZE(request));
149 }
150 
151 static bool slpc_is_running(struct intel_guc_slpc *slpc)
152 {
153 	return slpc_get_state(slpc) == SLPC_GLOBAL_STATE_RUNNING;
154 }
155 
156 static int guc_action_slpc_query(struct intel_guc *guc, u32 offset)
157 {
158 	u32 request[] = {
159 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
160 		SLPC_EVENT(SLPC_EVENT_QUERY_TASK_STATE, 2),
161 		offset,
162 		0,
163 	};
164 	int ret;
165 
166 	ret = intel_guc_send(guc, request, ARRAY_SIZE(request));
167 
168 	return ret > 0 ? -EPROTO : ret;
169 }
170 
171 static int slpc_query_task_state(struct intel_guc_slpc *slpc)
172 {
173 	struct intel_guc *guc = slpc_to_guc(slpc);
174 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
175 	u32 offset = intel_guc_ggtt_offset(guc, slpc->vma);
176 	int ret;
177 
178 	ret = guc_action_slpc_query(guc, offset);
179 	if (unlikely(ret))
180 		i915_probe_error(i915, "Failed to query task state (%pe)\n",
181 				 ERR_PTR(ret));
182 
183 	drm_clflush_virt_range(slpc->vaddr, SLPC_PAGE_SIZE_BYTES);
184 
185 	return ret;
186 }
187 
188 static int slpc_set_param(struct intel_guc_slpc *slpc, u8 id, u32 value)
189 {
190 	struct intel_guc *guc = slpc_to_guc(slpc);
191 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
192 	int ret;
193 
194 	GEM_BUG_ON(id >= SLPC_MAX_PARAM);
195 
196 	ret = guc_action_slpc_set_param(guc, id, value);
197 	if (ret)
198 		i915_probe_error(i915, "Failed to set param %d to %u (%pe)\n",
199 				 id, value, ERR_PTR(ret));
200 
201 	return ret;
202 }
203 
204 static int slpc_unset_param(struct intel_guc_slpc *slpc, u8 id)
205 {
206 	struct intel_guc *guc = slpc_to_guc(slpc);
207 
208 	GEM_BUG_ON(id >= SLPC_MAX_PARAM);
209 
210 	return guc_action_slpc_unset_param(guc, id);
211 }
212 
213 static int slpc_force_min_freq(struct intel_guc_slpc *slpc, u32 freq)
214 {
215 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
216 	struct intel_guc *guc = slpc_to_guc(slpc);
217 	intel_wakeref_t wakeref;
218 	int ret = 0;
219 
220 	lockdep_assert_held(&slpc->lock);
221 
222 	if (!intel_guc_is_ready(guc))
223 		return -ENODEV;
224 
225 	/*
226 	 * This function is a little different as compared to
227 	 * intel_guc_slpc_set_min_freq(). Softlimit will not be updated
228 	 * here since this is used to temporarily change min freq,
229 	 * for example, during a waitboost. Caller is responsible for
230 	 * checking bounds.
231 	 */
232 
233 	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
234 		/* Non-blocking request will avoid stalls */
235 		ret = slpc_set_param_nb(slpc,
236 					SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
237 					freq);
238 		if (ret)
239 			drm_notice(&i915->drm,
240 				   "Failed to send set_param for min freq(%d): (%d)\n",
241 				   freq, ret);
242 	}
243 
244 	return ret;
245 }
246 
247 static void slpc_boost_work(struct work_struct *work)
248 {
249 	struct intel_guc_slpc *slpc = container_of(work, typeof(*slpc), boost_work);
250 	int err;
251 
252 	/*
253 	 * Raise min freq to boost. It's possible that
254 	 * this is greater than current max. But it will
255 	 * certainly be limited by RP0. An error setting
256 	 * the min param is not fatal.
257 	 */
258 	mutex_lock(&slpc->lock);
259 	if (atomic_read(&slpc->num_waiters)) {
260 		err = slpc_force_min_freq(slpc, slpc->boost_freq);
261 		if (!err)
262 			slpc->num_boosts++;
263 	}
264 	mutex_unlock(&slpc->lock);
265 }
266 
267 int intel_guc_slpc_init(struct intel_guc_slpc *slpc)
268 {
269 	struct intel_guc *guc = slpc_to_guc(slpc);
270 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
271 	u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
272 	int err;
273 
274 	GEM_BUG_ON(slpc->vma);
275 
276 	err = intel_guc_allocate_and_map_vma(guc, size, &slpc->vma, (void **)&slpc->vaddr);
277 	if (unlikely(err)) {
278 		i915_probe_error(i915,
279 				 "Failed to allocate SLPC struct (err=%pe)\n",
280 				 ERR_PTR(err));
281 		return err;
282 	}
283 
284 	slpc->max_freq_softlimit = 0;
285 	slpc->min_freq_softlimit = 0;
286 	slpc->min_is_rpmax = false;
287 
288 	slpc->boost_freq = 0;
289 	atomic_set(&slpc->num_waiters, 0);
290 	slpc->num_boosts = 0;
291 	slpc->media_ratio_mode = SLPC_MEDIA_RATIO_MODE_DYNAMIC_CONTROL;
292 
293 	mutex_init(&slpc->lock);
294 	INIT_WORK(&slpc->boost_work, slpc_boost_work);
295 
296 	return err;
297 }
298 
299 static const char *slpc_global_state_to_string(enum slpc_global_state state)
300 {
301 	switch (state) {
302 	case SLPC_GLOBAL_STATE_NOT_RUNNING:
303 		return "not running";
304 	case SLPC_GLOBAL_STATE_INITIALIZING:
305 		return "initializing";
306 	case SLPC_GLOBAL_STATE_RESETTING:
307 		return "resetting";
308 	case SLPC_GLOBAL_STATE_RUNNING:
309 		return "running";
310 	case SLPC_GLOBAL_STATE_SHUTTING_DOWN:
311 		return "shutting down";
312 	case SLPC_GLOBAL_STATE_ERROR:
313 		return "error";
314 	default:
315 		return "unknown";
316 	}
317 }
318 
319 static const char *slpc_get_state_string(struct intel_guc_slpc *slpc)
320 {
321 	return slpc_global_state_to_string(slpc_get_state(slpc));
322 }
323 
324 static int guc_action_slpc_reset(struct intel_guc *guc, u32 offset)
325 {
326 	u32 request[] = {
327 		GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
328 		SLPC_EVENT(SLPC_EVENT_RESET, 2),
329 		offset,
330 		0,
331 	};
332 	int ret;
333 
334 	ret = intel_guc_send(guc, request, ARRAY_SIZE(request));
335 
336 	return ret > 0 ? -EPROTO : ret;
337 }
338 
339 static int slpc_reset(struct intel_guc_slpc *slpc)
340 {
341 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
342 	struct intel_guc *guc = slpc_to_guc(slpc);
343 	u32 offset = intel_guc_ggtt_offset(guc, slpc->vma);
344 	int ret;
345 
346 	ret = guc_action_slpc_reset(guc, offset);
347 
348 	if (unlikely(ret < 0)) {
349 		i915_probe_error(i915, "SLPC reset action failed (%pe)\n",
350 				 ERR_PTR(ret));
351 		return ret;
352 	}
353 
354 	if (!ret) {
355 		if (wait_for(slpc_is_running(slpc), SLPC_RESET_TIMEOUT_MS)) {
356 			i915_probe_error(i915, "SLPC not enabled! State = %s\n",
357 					 slpc_get_state_string(slpc));
358 			return -EIO;
359 		}
360 	}
361 
362 	return 0;
363 }
364 
365 static u32 slpc_decode_min_freq(struct intel_guc_slpc *slpc)
366 {
367 	struct slpc_shared_data *data = slpc->vaddr;
368 
369 	GEM_BUG_ON(!slpc->vma);
370 
371 	return	DIV_ROUND_CLOSEST(REG_FIELD_GET(SLPC_MIN_UNSLICE_FREQ_MASK,
372 				  data->task_state_data.freq) *
373 				  GT_FREQUENCY_MULTIPLIER, GEN9_FREQ_SCALER);
374 }
375 
376 static u32 slpc_decode_max_freq(struct intel_guc_slpc *slpc)
377 {
378 	struct slpc_shared_data *data = slpc->vaddr;
379 
380 	GEM_BUG_ON(!slpc->vma);
381 
382 	return	DIV_ROUND_CLOSEST(REG_FIELD_GET(SLPC_MAX_UNSLICE_FREQ_MASK,
383 				  data->task_state_data.freq) *
384 				  GT_FREQUENCY_MULTIPLIER, GEN9_FREQ_SCALER);
385 }
386 
387 static void slpc_shared_data_reset(struct slpc_shared_data *data)
388 {
389 	memset(data, 0, sizeof(struct slpc_shared_data));
390 
391 	data->header.size = sizeof(struct slpc_shared_data);
392 
393 	/* Enable only GTPERF task, disable others */
394 	slpc_mem_set_enabled(data, SLPC_PARAM_TASK_ENABLE_GTPERF,
395 			     SLPC_PARAM_TASK_DISABLE_GTPERF);
396 
397 	slpc_mem_set_disabled(data, SLPC_PARAM_TASK_ENABLE_BALANCER,
398 			      SLPC_PARAM_TASK_DISABLE_BALANCER);
399 
400 	slpc_mem_set_disabled(data, SLPC_PARAM_TASK_ENABLE_DCC,
401 			      SLPC_PARAM_TASK_DISABLE_DCC);
402 }
403 
404 /**
405  * intel_guc_slpc_set_max_freq() - Set max frequency limit for SLPC.
406  * @slpc: pointer to intel_guc_slpc.
407  * @val: frequency (MHz)
408  *
409  * This function will invoke GuC SLPC action to update the max frequency
410  * limit for unslice.
411  *
412  * Return: 0 on success, non-zero error code on failure.
413  */
414 int intel_guc_slpc_set_max_freq(struct intel_guc_slpc *slpc, u32 val)
415 {
416 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
417 	intel_wakeref_t wakeref;
418 	int ret;
419 
420 	if (val < slpc->min_freq ||
421 	    val > slpc->rp0_freq ||
422 	    val < slpc->min_freq_softlimit)
423 		return -EINVAL;
424 
425 	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
426 		ret = slpc_set_param(slpc,
427 				     SLPC_PARAM_GLOBAL_MAX_GT_UNSLICE_FREQ_MHZ,
428 				     val);
429 
430 		/* Return standardized err code for sysfs calls */
431 		if (ret)
432 			ret = -EIO;
433 	}
434 
435 	if (!ret)
436 		slpc->max_freq_softlimit = val;
437 
438 	return ret;
439 }
440 
441 /**
442  * intel_guc_slpc_get_max_freq() - Get max frequency limit for SLPC.
443  * @slpc: pointer to intel_guc_slpc.
444  * @val: pointer to val which will hold max frequency (MHz)
445  *
446  * This function will invoke GuC SLPC action to read the max frequency
447  * limit for unslice.
448  *
449  * Return: 0 on success, non-zero error code on failure.
450  */
451 int intel_guc_slpc_get_max_freq(struct intel_guc_slpc *slpc, u32 *val)
452 {
453 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
454 	intel_wakeref_t wakeref;
455 	int ret = 0;
456 
457 	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
458 		/* Force GuC to update task data */
459 		ret = slpc_query_task_state(slpc);
460 
461 		if (!ret)
462 			*val = slpc_decode_max_freq(slpc);
463 	}
464 
465 	return ret;
466 }
467 
468 /**
469  * intel_guc_slpc_set_min_freq() - Set min frequency limit for SLPC.
470  * @slpc: pointer to intel_guc_slpc.
471  * @val: frequency (MHz)
472  *
473  * This function will invoke GuC SLPC action to update the min unslice
474  * frequency.
475  *
476  * Return: 0 on success, non-zero error code on failure.
477  */
478 int intel_guc_slpc_set_min_freq(struct intel_guc_slpc *slpc, u32 val)
479 {
480 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
481 	intel_wakeref_t wakeref;
482 	int ret;
483 
484 	if (val < slpc->min_freq ||
485 	    val > slpc->rp0_freq ||
486 	    val > slpc->max_freq_softlimit)
487 		return -EINVAL;
488 
489 	/* Need a lock now since waitboost can be modifying min as well */
490 	mutex_lock(&slpc->lock);
491 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
492 
493 	/* Ignore efficient freq if lower min freq is requested */
494 	ret = slpc_set_param(slpc,
495 			     SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY,
496 			     val < slpc->rp1_freq);
497 	if (ret) {
498 		i915_probe_error(i915, "Failed to toggle efficient freq (%pe)\n",
499 				 ERR_PTR(ret));
500 		goto out;
501 	}
502 
503 	ret = slpc_set_param(slpc,
504 			     SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
505 			     val);
506 
507 	if (!ret)
508 		slpc->min_freq_softlimit = val;
509 
510 out:
511 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
512 	mutex_unlock(&slpc->lock);
513 
514 	/* Return standardized err code for sysfs calls */
515 	if (ret)
516 		ret = -EIO;
517 
518 	return ret;
519 }
520 
521 /**
522  * intel_guc_slpc_get_min_freq() - Get min frequency limit for SLPC.
523  * @slpc: pointer to intel_guc_slpc.
524  * @val: pointer to val which will hold min frequency (MHz)
525  *
526  * This function will invoke GuC SLPC action to read the min frequency
527  * limit for unslice.
528  *
529  * Return: 0 on success, non-zero error code on failure.
530  */
531 int intel_guc_slpc_get_min_freq(struct intel_guc_slpc *slpc, u32 *val)
532 {
533 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
534 	intel_wakeref_t wakeref;
535 	int ret = 0;
536 
537 	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
538 		/* Force GuC to update task data */
539 		ret = slpc_query_task_state(slpc);
540 
541 		if (!ret)
542 			*val = slpc_decode_min_freq(slpc);
543 	}
544 
545 	return ret;
546 }
547 
548 int intel_guc_slpc_set_media_ratio_mode(struct intel_guc_slpc *slpc, u32 val)
549 {
550 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
551 	intel_wakeref_t wakeref;
552 	int ret = 0;
553 
554 	if (!HAS_MEDIA_RATIO_MODE(i915))
555 		return -ENODEV;
556 
557 	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
558 		ret = slpc_set_param(slpc,
559 				     SLPC_PARAM_MEDIA_FF_RATIO_MODE,
560 				     val);
561 	return ret;
562 }
563 
564 void intel_guc_pm_intrmsk_enable(struct intel_gt *gt)
565 {
566 	u32 pm_intrmsk_mbz = 0;
567 
568 	/*
569 	 * Allow GuC to receive ARAT timer expiry event.
570 	 * This interrupt register is setup by RPS code
571 	 * when host based Turbo is enabled.
572 	 */
573 	pm_intrmsk_mbz |= ARAT_EXPIRED_INTRMSK;
574 
575 	intel_uncore_rmw(gt->uncore,
576 			 GEN6_PMINTRMSK, pm_intrmsk_mbz, 0);
577 }
578 
579 static int slpc_set_softlimits(struct intel_guc_slpc *slpc)
580 {
581 	int ret = 0;
582 
583 	/*
584 	 * Softlimits are initially equivalent to platform limits
585 	 * unless they have deviated from defaults, in which case,
586 	 * we retain the values and set min/max accordingly.
587 	 */
588 	if (!slpc->max_freq_softlimit) {
589 		slpc->max_freq_softlimit = slpc->rp0_freq;
590 		slpc_to_gt(slpc)->defaults.max_freq = slpc->max_freq_softlimit;
591 	} else if (slpc->max_freq_softlimit != slpc->rp0_freq) {
592 		ret = intel_guc_slpc_set_max_freq(slpc,
593 						  slpc->max_freq_softlimit);
594 	}
595 
596 	if (unlikely(ret))
597 		return ret;
598 
599 	if (!slpc->min_freq_softlimit) {
600 		ret = intel_guc_slpc_get_min_freq(slpc, &slpc->min_freq_softlimit);
601 		if (unlikely(ret))
602 			return ret;
603 		slpc_to_gt(slpc)->defaults.min_freq = slpc->min_freq_softlimit;
604 	} else if (slpc->min_freq_softlimit != slpc->min_freq) {
605 		return intel_guc_slpc_set_min_freq(slpc,
606 						   slpc->min_freq_softlimit);
607 	}
608 
609 	return 0;
610 }
611 
612 static bool is_slpc_min_freq_rpmax(struct intel_guc_slpc *slpc)
613 {
614 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
615 	int slpc_min_freq;
616 	int ret;
617 
618 	ret = intel_guc_slpc_get_min_freq(slpc, &slpc_min_freq);
619 	if (ret) {
620 		drm_err(&i915->drm,
621 			"Failed to get min freq: (%d)\n",
622 			ret);
623 		return false;
624 	}
625 
626 	if (slpc_min_freq == SLPC_MAX_FREQ_MHZ)
627 		return true;
628 	else
629 		return false;
630 }
631 
632 static void update_server_min_softlimit(struct intel_guc_slpc *slpc)
633 {
634 	/* For server parts, SLPC min will be at RPMax.
635 	 * Use min softlimit to clamp it to RP0 instead.
636 	 */
637 	if (!slpc->min_freq_softlimit &&
638 	    is_slpc_min_freq_rpmax(slpc)) {
639 		slpc->min_is_rpmax = true;
640 		slpc->min_freq_softlimit = slpc->rp0_freq;
641 		(slpc_to_gt(slpc))->defaults.min_freq = slpc->min_freq_softlimit;
642 	}
643 }
644 
645 static int slpc_use_fused_rp0(struct intel_guc_slpc *slpc)
646 {
647 	/* Force SLPC to used platform rp0 */
648 	return slpc_set_param(slpc,
649 			      SLPC_PARAM_GLOBAL_MAX_GT_UNSLICE_FREQ_MHZ,
650 			      slpc->rp0_freq);
651 }
652 
653 static void slpc_get_rp_values(struct intel_guc_slpc *slpc)
654 {
655 	struct intel_rps *rps = &slpc_to_gt(slpc)->rps;
656 	struct intel_rps_freq_caps caps;
657 
658 	gen6_rps_get_freq_caps(rps, &caps);
659 	slpc->rp0_freq = intel_gpu_freq(rps, caps.rp0_freq);
660 	slpc->rp1_freq = intel_gpu_freq(rps, caps.rp1_freq);
661 	slpc->min_freq = intel_gpu_freq(rps, caps.min_freq);
662 
663 	if (!slpc->boost_freq)
664 		slpc->boost_freq = slpc->rp0_freq;
665 }
666 
667 /**
668  * intel_guc_slpc_override_gucrc_mode() - override GUCRC mode
669  * @slpc: pointer to intel_guc_slpc.
670  * @mode: new value of the mode.
671  *
672  * This function will override the GUCRC mode.
673  *
674  * Return: 0 on success, non-zero error code on failure.
675  */
676 int intel_guc_slpc_override_gucrc_mode(struct intel_guc_slpc *slpc, u32 mode)
677 {
678 	int ret;
679 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
680 	intel_wakeref_t wakeref;
681 
682 	if (mode >= SLPC_GUCRC_MODE_MAX)
683 		return -EINVAL;
684 
685 	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
686 		ret = slpc_set_param(slpc, SLPC_PARAM_PWRGATE_RC_MODE, mode);
687 		if (ret)
688 			drm_err(&i915->drm,
689 				"Override gucrc mode %d failed %d\n",
690 				mode, ret);
691 	}
692 
693 	return ret;
694 }
695 
696 int intel_guc_slpc_unset_gucrc_mode(struct intel_guc_slpc *slpc)
697 {
698 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
699 	intel_wakeref_t wakeref;
700 	int ret = 0;
701 
702 	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
703 		ret = slpc_unset_param(slpc, SLPC_PARAM_PWRGATE_RC_MODE);
704 		if (ret)
705 			drm_err(&i915->drm,
706 				"Unsetting gucrc mode failed %d\n",
707 				ret);
708 	}
709 
710 	return ret;
711 }
712 
713 /*
714  * intel_guc_slpc_enable() - Start SLPC
715  * @slpc: pointer to intel_guc_slpc.
716  *
717  * SLPC is enabled by setting up the shared data structure and
718  * sending reset event to GuC SLPC. Initial data is setup in
719  * intel_guc_slpc_init. Here we send the reset event. We do
720  * not currently need a slpc_disable since this is taken care
721  * of automatically when a reset/suspend occurs and the GuC
722  * CTB is destroyed.
723  *
724  * Return: 0 on success, non-zero error code on failure.
725  */
726 int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
727 {
728 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
729 	int ret;
730 
731 	GEM_BUG_ON(!slpc->vma);
732 
733 	slpc_shared_data_reset(slpc->vaddr);
734 
735 	ret = slpc_reset(slpc);
736 	if (unlikely(ret < 0)) {
737 		i915_probe_error(i915, "SLPC Reset event returned (%pe)\n",
738 				 ERR_PTR(ret));
739 		return ret;
740 	}
741 
742 	ret = slpc_query_task_state(slpc);
743 	if (unlikely(ret < 0))
744 		return ret;
745 
746 	intel_guc_pm_intrmsk_enable(to_gt(i915));
747 
748 	slpc_get_rp_values(slpc);
749 
750 	/* Handle the case where min=max=RPmax */
751 	update_server_min_softlimit(slpc);
752 
753 	/* Set SLPC max limit to RP0 */
754 	ret = slpc_use_fused_rp0(slpc);
755 	if (unlikely(ret)) {
756 		i915_probe_error(i915, "Failed to set SLPC max to RP0 (%pe)\n",
757 				 ERR_PTR(ret));
758 		return ret;
759 	}
760 
761 	/* Revert SLPC min/max to softlimits if necessary */
762 	ret = slpc_set_softlimits(slpc);
763 	if (unlikely(ret)) {
764 		i915_probe_error(i915, "Failed to set SLPC softlimits (%pe)\n",
765 				 ERR_PTR(ret));
766 		return ret;
767 	}
768 
769 	/* Set cached media freq ratio mode */
770 	intel_guc_slpc_set_media_ratio_mode(slpc, slpc->media_ratio_mode);
771 
772 	return 0;
773 }
774 
775 int intel_guc_slpc_set_boost_freq(struct intel_guc_slpc *slpc, u32 val)
776 {
777 	int ret = 0;
778 
779 	if (val < slpc->min_freq || val > slpc->rp0_freq)
780 		return -EINVAL;
781 
782 	mutex_lock(&slpc->lock);
783 
784 	if (slpc->boost_freq != val) {
785 		/* Apply only if there are active waiters */
786 		if (atomic_read(&slpc->num_waiters)) {
787 			ret = slpc_force_min_freq(slpc, val);
788 			if (ret) {
789 				ret = -EIO;
790 				goto done;
791 			}
792 		}
793 
794 		slpc->boost_freq = val;
795 	}
796 
797 done:
798 	mutex_unlock(&slpc->lock);
799 	return ret;
800 }
801 
802 void intel_guc_slpc_dec_waiters(struct intel_guc_slpc *slpc)
803 {
804 	/*
805 	 * Return min back to the softlimit.
806 	 * This is called during request retire,
807 	 * so we don't need to fail that if the
808 	 * set_param fails.
809 	 */
810 	mutex_lock(&slpc->lock);
811 	if (atomic_dec_and_test(&slpc->num_waiters))
812 		slpc_force_min_freq(slpc, slpc->min_freq_softlimit);
813 	mutex_unlock(&slpc->lock);
814 }
815 
816 int intel_guc_slpc_print_info(struct intel_guc_slpc *slpc, struct drm_printer *p)
817 {
818 	struct drm_i915_private *i915 = slpc_to_i915(slpc);
819 	struct slpc_shared_data *data = slpc->vaddr;
820 	struct slpc_task_state_data *slpc_tasks;
821 	intel_wakeref_t wakeref;
822 	int ret = 0;
823 
824 	GEM_BUG_ON(!slpc->vma);
825 
826 	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
827 		ret = slpc_query_task_state(slpc);
828 
829 		if (!ret) {
830 			slpc_tasks = &data->task_state_data;
831 
832 			drm_printf(p, "\tSLPC state: %s\n", slpc_get_state_string(slpc));
833 			drm_printf(p, "\tGTPERF task active: %s\n",
834 				   str_yes_no(slpc_tasks->status & SLPC_GTPERF_TASK_ENABLED));
835 			drm_printf(p, "\tMax freq: %u MHz\n",
836 				   slpc_decode_max_freq(slpc));
837 			drm_printf(p, "\tMin freq: %u MHz\n",
838 				   slpc_decode_min_freq(slpc));
839 			drm_printf(p, "\twaitboosts: %u\n",
840 				   slpc->num_boosts);
841 		}
842 	}
843 
844 	return ret;
845 }
846 
847 void intel_guc_slpc_fini(struct intel_guc_slpc *slpc)
848 {
849 	if (!slpc->vma)
850 		return;
851 
852 	i915_vma_unpin_and_release(&slpc->vma, I915_VMA_RELEASE_MAP);
853 }
854