xref: /openbmc/linux/drivers/platform/x86/amd/pmf/sps.c (revision e8069f5a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * AMD Platform Management Framework (PMF) Driver
4  *
5  * Copyright (c) 2022, Advanced Micro Devices, Inc.
6  * All Rights Reserved.
7  *
8  * Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
9  */
10 
11 #include "pmf.h"
12 
13 static struct amd_pmf_static_slider_granular config_store;
14 
15 #ifdef CONFIG_AMD_PMF_DEBUG
16 static const char *slider_as_str(unsigned int state)
17 {
18 	switch (state) {
19 	case POWER_MODE_PERFORMANCE:
20 		return "PERFORMANCE";
21 	case POWER_MODE_BALANCED_POWER:
22 		return "BALANCED_POWER";
23 	case POWER_MODE_POWER_SAVER:
24 		return "POWER_SAVER";
25 	default:
26 		return "Unknown Slider State";
27 	}
28 }
29 
30 static const char *source_as_str(unsigned int state)
31 {
32 	switch (state) {
33 	case POWER_SOURCE_AC:
34 		return "AC";
35 	case POWER_SOURCE_DC:
36 		return "DC";
37 	default:
38 		return "Unknown Power State";
39 	}
40 }
41 
42 static void amd_pmf_dump_sps_defaults(struct amd_pmf_static_slider_granular *data)
43 {
44 	int i, j;
45 
46 	pr_debug("Static Slider Data - BEGIN\n");
47 
48 	for (i = 0; i < POWER_SOURCE_MAX; i++) {
49 		for (j = 0; j < POWER_MODE_MAX; j++) {
50 			pr_debug("--- Source:%s Mode:%s ---\n", source_as_str(i), slider_as_str(j));
51 			pr_debug("SPL: %u mW\n", data->prop[i][j].spl);
52 			pr_debug("SPPT: %u mW\n", data->prop[i][j].sppt);
53 			pr_debug("SPPT_ApuOnly: %u mW\n", data->prop[i][j].sppt_apu_only);
54 			pr_debug("FPPT: %u mW\n", data->prop[i][j].fppt);
55 			pr_debug("STTMinLimit: %u mW\n", data->prop[i][j].stt_min);
56 			pr_debug("STT_SkinTempLimit_APU: %u C\n",
57 				 data->prop[i][j].stt_skin_temp[STT_TEMP_APU]);
58 			pr_debug("STT_SkinTempLimit_HS2: %u C\n",
59 				 data->prop[i][j].stt_skin_temp[STT_TEMP_HS2]);
60 		}
61 	}
62 
63 	pr_debug("Static Slider Data - END\n");
64 }
65 #else
66 static void amd_pmf_dump_sps_defaults(struct amd_pmf_static_slider_granular *data) {}
67 #endif
68 
69 static void amd_pmf_load_defaults_sps(struct amd_pmf_dev *dev)
70 {
71 	struct apmf_static_slider_granular_output output;
72 	int i, j, idx = 0;
73 
74 	memset(&config_store, 0, sizeof(config_store));
75 	apmf_get_static_slider_granular(dev, &output);
76 
77 	for (i = 0; i < POWER_SOURCE_MAX; i++) {
78 		for (j = 0; j < POWER_MODE_MAX; j++) {
79 			config_store.prop[i][j].spl = output.prop[idx].spl;
80 			config_store.prop[i][j].sppt = output.prop[idx].sppt;
81 			config_store.prop[i][j].sppt_apu_only =
82 						output.prop[idx].sppt_apu_only;
83 			config_store.prop[i][j].fppt = output.prop[idx].fppt;
84 			config_store.prop[i][j].stt_min = output.prop[idx].stt_min;
85 			config_store.prop[i][j].stt_skin_temp[STT_TEMP_APU] =
86 					output.prop[idx].stt_skin_temp[STT_TEMP_APU];
87 			config_store.prop[i][j].stt_skin_temp[STT_TEMP_HS2] =
88 					output.prop[idx].stt_skin_temp[STT_TEMP_HS2];
89 			config_store.prop[i][j].fan_id = output.prop[idx].fan_id;
90 			idx++;
91 		}
92 	}
93 	amd_pmf_dump_sps_defaults(&config_store);
94 }
95 
96 void amd_pmf_update_slider(struct amd_pmf_dev *dev, bool op, int idx,
97 			   struct amd_pmf_static_slider_granular *table)
98 {
99 	int src = amd_pmf_get_power_source();
100 
101 	if (op == SLIDER_OP_SET) {
102 		amd_pmf_send_cmd(dev, SET_SPL, false, config_store.prop[src][idx].spl, NULL);
103 		amd_pmf_send_cmd(dev, SET_FPPT, false, config_store.prop[src][idx].fppt, NULL);
104 		amd_pmf_send_cmd(dev, SET_SPPT, false, config_store.prop[src][idx].sppt, NULL);
105 		amd_pmf_send_cmd(dev, SET_SPPT_APU_ONLY, false,
106 				 config_store.prop[src][idx].sppt_apu_only, NULL);
107 		amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, false,
108 				 config_store.prop[src][idx].stt_min, NULL);
109 		amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false,
110 				 config_store.prop[src][idx].stt_skin_temp[STT_TEMP_APU], NULL);
111 		amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false,
112 				 config_store.prop[src][idx].stt_skin_temp[STT_TEMP_HS2], NULL);
113 	} else if (op == SLIDER_OP_GET) {
114 		amd_pmf_send_cmd(dev, GET_SPL, true, ARG_NONE, &table->prop[src][idx].spl);
115 		amd_pmf_send_cmd(dev, GET_FPPT, true, ARG_NONE, &table->prop[src][idx].fppt);
116 		amd_pmf_send_cmd(dev, GET_SPPT, true, ARG_NONE, &table->prop[src][idx].sppt);
117 		amd_pmf_send_cmd(dev, GET_SPPT_APU_ONLY, true, ARG_NONE,
118 				 &table->prop[src][idx].sppt_apu_only);
119 		amd_pmf_send_cmd(dev, GET_STT_MIN_LIMIT, true, ARG_NONE,
120 				 &table->prop[src][idx].stt_min);
121 		amd_pmf_send_cmd(dev, GET_STT_LIMIT_APU, true, ARG_NONE,
122 				 (u32 *)&table->prop[src][idx].stt_skin_temp[STT_TEMP_APU]);
123 		amd_pmf_send_cmd(dev, GET_STT_LIMIT_HS2, true, ARG_NONE,
124 				 (u32 *)&table->prop[src][idx].stt_skin_temp[STT_TEMP_HS2]);
125 	}
126 }
127 
128 int amd_pmf_set_sps_power_limits(struct amd_pmf_dev *pmf)
129 {
130 	int mode;
131 
132 	mode = amd_pmf_get_pprof_modes(pmf);
133 	if (mode < 0)
134 		return mode;
135 
136 	amd_pmf_update_slider(pmf, SLIDER_OP_SET, mode, NULL);
137 
138 	return 0;
139 }
140 
141 bool is_pprof_balanced(struct amd_pmf_dev *pmf)
142 {
143 	return (pmf->current_profile == PLATFORM_PROFILE_BALANCED) ? true : false;
144 }
145 
146 static int amd_pmf_profile_get(struct platform_profile_handler *pprof,
147 			       enum platform_profile_option *profile)
148 {
149 	struct amd_pmf_dev *pmf = container_of(pprof, struct amd_pmf_dev, pprof);
150 
151 	*profile = pmf->current_profile;
152 	return 0;
153 }
154 
155 int amd_pmf_get_pprof_modes(struct amd_pmf_dev *pmf)
156 {
157 	int mode;
158 
159 	switch (pmf->current_profile) {
160 	case PLATFORM_PROFILE_PERFORMANCE:
161 		mode = POWER_MODE_PERFORMANCE;
162 		break;
163 	case PLATFORM_PROFILE_BALANCED:
164 		mode = POWER_MODE_BALANCED_POWER;
165 		break;
166 	case PLATFORM_PROFILE_LOW_POWER:
167 		mode = POWER_MODE_POWER_SAVER;
168 		break;
169 	default:
170 		dev_err(pmf->dev, "Unknown Platform Profile.\n");
171 		return -EOPNOTSUPP;
172 	}
173 
174 	return mode;
175 }
176 
177 static int amd_pmf_profile_set(struct platform_profile_handler *pprof,
178 			       enum platform_profile_option profile)
179 {
180 	struct amd_pmf_dev *pmf = container_of(pprof, struct amd_pmf_dev, pprof);
181 
182 	pmf->current_profile = profile;
183 
184 	return amd_pmf_set_sps_power_limits(pmf);
185 }
186 
187 int amd_pmf_init_sps(struct amd_pmf_dev *dev)
188 {
189 	int err;
190 
191 	dev->current_profile = PLATFORM_PROFILE_BALANCED;
192 	amd_pmf_load_defaults_sps(dev);
193 
194 	/* update SPS balanced power mode thermals */
195 	amd_pmf_set_sps_power_limits(dev);
196 
197 	dev->pprof.profile_get = amd_pmf_profile_get;
198 	dev->pprof.profile_set = amd_pmf_profile_set;
199 
200 	/* Setup supported modes */
201 	set_bit(PLATFORM_PROFILE_LOW_POWER, dev->pprof.choices);
202 	set_bit(PLATFORM_PROFILE_BALANCED, dev->pprof.choices);
203 	set_bit(PLATFORM_PROFILE_PERFORMANCE, dev->pprof.choices);
204 
205 	/* Create platform_profile structure and register */
206 	err = platform_profile_register(&dev->pprof);
207 	if (err)
208 		dev_err(dev->dev, "Failed to register SPS support, this is most likely an SBIOS bug: %d\n",
209 			err);
210 
211 	return err;
212 }
213 
214 void amd_pmf_deinit_sps(struct amd_pmf_dev *dev)
215 {
216 	platform_profile_remove();
217 }
218