xref: /openbmc/linux/drivers/platform/x86/amd/pmf/core.c (revision 146b6f68)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * AMD Platform Management Framework Driver
4  *
5  * Copyright (c) 2022, Advanced Micro Devices, Inc.
6  * All Rights Reserved.
7  *
8  * Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
9  */
10 
11 #include <asm/amd_nb.h>
12 #include <linux/debugfs.h>
13 #include <linux/iopoll.h>
14 #include <linux/module.h>
15 #include <linux/pci.h>
16 #include <linux/platform_device.h>
17 #include <linux/power_supply.h>
18 #include "pmf.h"
19 
20 /* PMF-SMU communication registers */
21 #define AMD_PMF_REGISTER_MESSAGE	0xA18
22 #define AMD_PMF_REGISTER_RESPONSE	0xA78
23 #define AMD_PMF_REGISTER_ARGUMENT	0xA58
24 
25 /* Base address of SMU for mapping physical address to virtual address */
26 #define AMD_PMF_MAPPING_SIZE		0x01000
27 #define AMD_PMF_BASE_ADDR_OFFSET	0x10000
28 #define AMD_PMF_BASE_ADDR_LO		0x13B102E8
29 #define AMD_PMF_BASE_ADDR_HI		0x13B102EC
30 #define AMD_PMF_BASE_ADDR_LO_MASK	GENMASK(15, 0)
31 #define AMD_PMF_BASE_ADDR_HI_MASK	GENMASK(31, 20)
32 
33 /* SMU Response Codes */
34 #define AMD_PMF_RESULT_OK                    0x01
35 #define AMD_PMF_RESULT_CMD_REJECT_BUSY       0xFC
36 #define AMD_PMF_RESULT_CMD_REJECT_PREREQ     0xFD
37 #define AMD_PMF_RESULT_CMD_UNKNOWN           0xFE
38 #define AMD_PMF_RESULT_FAILED                0xFF
39 
40 /* List of supported CPU ids */
41 #define AMD_CPU_ID_RMB			0x14b5
42 #define AMD_CPU_ID_PS			0x14e8
43 
44 #define PMF_MSG_DELAY_MIN_US		50
45 #define RESPONSE_REGISTER_LOOP_MAX	20000
46 
47 #define DELAY_MIN_US	2000
48 #define DELAY_MAX_US	3000
49 
50 /* override Metrics Table sample size time (in ms) */
51 static int metrics_table_loop_ms = 1000;
52 module_param(metrics_table_loop_ms, int, 0644);
53 MODULE_PARM_DESC(metrics_table_loop_ms, "Metrics Table sample size time (default = 1000ms)");
54 
55 /* Force load on supported older platforms */
56 static bool force_load;
57 module_param(force_load, bool, 0444);
58 MODULE_PARM_DESC(force_load, "Force load this driver on supported older platforms (experimental)");
59 
60 static int amd_pmf_pwr_src_notify_call(struct notifier_block *nb, unsigned long event, void *data)
61 {
62 	struct amd_pmf_dev *pmf = container_of(nb, struct amd_pmf_dev, pwr_src_notifier);
63 
64 	if (event != PSY_EVENT_PROP_CHANGED)
65 		return NOTIFY_OK;
66 
67 	if (is_apmf_func_supported(pmf, APMF_FUNC_AUTO_MODE) ||
68 	    is_apmf_func_supported(pmf, APMF_FUNC_DYN_SLIDER_DC) ||
69 	    is_apmf_func_supported(pmf, APMF_FUNC_DYN_SLIDER_AC)) {
70 		if ((pmf->amt_enabled || pmf->cnqf_enabled) && is_pprof_balanced(pmf))
71 			return NOTIFY_DONE;
72 	}
73 
74 	amd_pmf_set_sps_power_limits(pmf);
75 
76 	return NOTIFY_OK;
77 }
78 
79 static int current_power_limits_show(struct seq_file *seq, void *unused)
80 {
81 	struct amd_pmf_dev *dev = seq->private;
82 	struct amd_pmf_static_slider_granular table;
83 	int mode, src = 0;
84 
85 	mode = amd_pmf_get_pprof_modes(dev);
86 	if (mode < 0)
87 		return mode;
88 
89 	src = amd_pmf_get_power_source();
90 	amd_pmf_update_slider(dev, SLIDER_OP_GET, mode, &table);
91 	seq_printf(seq, "spl:%u fppt:%u sppt:%u sppt_apu_only:%u stt_min:%u stt[APU]:%u stt[HS2]: %u\n",
92 		   table.prop[src][mode].spl,
93 		   table.prop[src][mode].fppt,
94 		   table.prop[src][mode].sppt,
95 		   table.prop[src][mode].sppt_apu_only,
96 		   table.prop[src][mode].stt_min,
97 		   table.prop[src][mode].stt_skin_temp[STT_TEMP_APU],
98 		   table.prop[src][mode].stt_skin_temp[STT_TEMP_HS2]);
99 	return 0;
100 }
101 DEFINE_SHOW_ATTRIBUTE(current_power_limits);
102 
103 static void amd_pmf_dbgfs_unregister(struct amd_pmf_dev *dev)
104 {
105 	debugfs_remove_recursive(dev->dbgfs_dir);
106 }
107 
108 static void amd_pmf_dbgfs_register(struct amd_pmf_dev *dev)
109 {
110 	dev->dbgfs_dir = debugfs_create_dir("amd_pmf", NULL);
111 	debugfs_create_file("current_power_limits", 0644, dev->dbgfs_dir, dev,
112 			    &current_power_limits_fops);
113 }
114 
115 int amd_pmf_get_power_source(void)
116 {
117 	if (power_supply_is_system_supplied() > 0)
118 		return POWER_SOURCE_AC;
119 	else
120 		return POWER_SOURCE_DC;
121 }
122 
123 static void amd_pmf_get_metrics(struct work_struct *work)
124 {
125 	struct amd_pmf_dev *dev = container_of(work, struct amd_pmf_dev, work_buffer.work);
126 	ktime_t time_elapsed_ms;
127 	int socket_power;
128 
129 	mutex_lock(&dev->update_mutex);
130 	/* Transfer table contents */
131 	memset(dev->buf, 0, sizeof(dev->m_table));
132 	amd_pmf_send_cmd(dev, SET_TRANSFER_TABLE, 0, 7, NULL);
133 	memcpy(&dev->m_table, dev->buf, sizeof(dev->m_table));
134 
135 	time_elapsed_ms = ktime_to_ms(ktime_get()) - dev->start_time;
136 	/* Calculate the avg SoC power consumption */
137 	socket_power = dev->m_table.apu_power + dev->m_table.dgpu_power;
138 
139 	if (dev->amt_enabled) {
140 		/* Apply the Auto Mode transition */
141 		amd_pmf_trans_automode(dev, socket_power, time_elapsed_ms);
142 	}
143 
144 	if (dev->cnqf_enabled) {
145 		/* Apply the CnQF transition */
146 		amd_pmf_trans_cnqf(dev, socket_power, time_elapsed_ms);
147 	}
148 
149 	dev->start_time = ktime_to_ms(ktime_get());
150 	schedule_delayed_work(&dev->work_buffer, msecs_to_jiffies(metrics_table_loop_ms));
151 	mutex_unlock(&dev->update_mutex);
152 }
153 
154 static inline u32 amd_pmf_reg_read(struct amd_pmf_dev *dev, int reg_offset)
155 {
156 	return ioread32(dev->regbase + reg_offset);
157 }
158 
159 static inline void amd_pmf_reg_write(struct amd_pmf_dev *dev, int reg_offset, u32 val)
160 {
161 	iowrite32(val, dev->regbase + reg_offset);
162 }
163 
164 static void __maybe_unused amd_pmf_dump_registers(struct amd_pmf_dev *dev)
165 {
166 	u32 value;
167 
168 	value = amd_pmf_reg_read(dev, AMD_PMF_REGISTER_RESPONSE);
169 	dev_dbg(dev->dev, "AMD_PMF_REGISTER_RESPONSE:%x\n", value);
170 
171 	value = amd_pmf_reg_read(dev, AMD_PMF_REGISTER_ARGUMENT);
172 	dev_dbg(dev->dev, "AMD_PMF_REGISTER_ARGUMENT:%d\n", value);
173 
174 	value = amd_pmf_reg_read(dev, AMD_PMF_REGISTER_MESSAGE);
175 	dev_dbg(dev->dev, "AMD_PMF_REGISTER_MESSAGE:%x\n", value);
176 }
177 
178 int amd_pmf_send_cmd(struct amd_pmf_dev *dev, u8 message, bool get, u32 arg, u32 *data)
179 {
180 	int rc;
181 	u32 val;
182 
183 	mutex_lock(&dev->lock);
184 
185 	/* Wait until we get a valid response */
186 	rc = readx_poll_timeout(ioread32, dev->regbase + AMD_PMF_REGISTER_RESPONSE,
187 				val, val != 0, PMF_MSG_DELAY_MIN_US,
188 				PMF_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
189 	if (rc) {
190 		dev_err(dev->dev, "failed to talk to SMU\n");
191 		goto out_unlock;
192 	}
193 
194 	/* Write zero to response register */
195 	amd_pmf_reg_write(dev, AMD_PMF_REGISTER_RESPONSE, 0);
196 
197 	/* Write argument into argument register */
198 	amd_pmf_reg_write(dev, AMD_PMF_REGISTER_ARGUMENT, arg);
199 
200 	/* Write message ID to message ID register */
201 	amd_pmf_reg_write(dev, AMD_PMF_REGISTER_MESSAGE, message);
202 
203 	/* Wait until we get a valid response */
204 	rc = readx_poll_timeout(ioread32, dev->regbase + AMD_PMF_REGISTER_RESPONSE,
205 				val, val != 0, PMF_MSG_DELAY_MIN_US,
206 				PMF_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
207 	if (rc) {
208 		dev_err(dev->dev, "SMU response timed out\n");
209 		goto out_unlock;
210 	}
211 
212 	switch (val) {
213 	case AMD_PMF_RESULT_OK:
214 		if (get) {
215 			/* PMFW may take longer time to return back the data */
216 			usleep_range(DELAY_MIN_US, 10 * DELAY_MAX_US);
217 			*data = amd_pmf_reg_read(dev, AMD_PMF_REGISTER_ARGUMENT);
218 		}
219 		break;
220 	case AMD_PMF_RESULT_CMD_REJECT_BUSY:
221 		dev_err(dev->dev, "SMU not ready. err: 0x%x\n", val);
222 		rc = -EBUSY;
223 		goto out_unlock;
224 	case AMD_PMF_RESULT_CMD_UNKNOWN:
225 		dev_err(dev->dev, "SMU cmd unknown. err: 0x%x\n", val);
226 		rc = -EINVAL;
227 		goto out_unlock;
228 	case AMD_PMF_RESULT_CMD_REJECT_PREREQ:
229 	case AMD_PMF_RESULT_FAILED:
230 	default:
231 		dev_err(dev->dev, "SMU cmd failed. err: 0x%x\n", val);
232 		rc = -EIO;
233 		goto out_unlock;
234 	}
235 
236 out_unlock:
237 	mutex_unlock(&dev->lock);
238 	amd_pmf_dump_registers(dev);
239 	return rc;
240 }
241 
242 static const struct pci_device_id pmf_pci_ids[] = {
243 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_RMB) },
244 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_PS) },
245 	{ }
246 };
247 
248 static void amd_pmf_set_dram_addr(struct amd_pmf_dev *dev)
249 {
250 	u64 phys_addr;
251 	u32 hi, low;
252 
253 	phys_addr = virt_to_phys(dev->buf);
254 	hi = phys_addr >> 32;
255 	low = phys_addr & GENMASK(31, 0);
256 
257 	amd_pmf_send_cmd(dev, SET_DRAM_ADDR_HIGH, 0, hi, NULL);
258 	amd_pmf_send_cmd(dev, SET_DRAM_ADDR_LOW, 0, low, NULL);
259 }
260 
261 int amd_pmf_init_metrics_table(struct amd_pmf_dev *dev)
262 {
263 	/* Get Metrics Table Address */
264 	dev->buf = kzalloc(sizeof(dev->m_table), GFP_KERNEL);
265 	if (!dev->buf)
266 		return -ENOMEM;
267 
268 	INIT_DELAYED_WORK(&dev->work_buffer, amd_pmf_get_metrics);
269 
270 	amd_pmf_set_dram_addr(dev);
271 
272 	/*
273 	 * Start collecting the metrics data after a small delay
274 	 * or else, we might end up getting stale values from PMFW.
275 	 */
276 	schedule_delayed_work(&dev->work_buffer, msecs_to_jiffies(metrics_table_loop_ms * 3));
277 
278 	return 0;
279 }
280 
281 static int amd_pmf_resume_handler(struct device *dev)
282 {
283 	struct amd_pmf_dev *pdev = dev_get_drvdata(dev);
284 
285 	if (pdev->buf)
286 		amd_pmf_set_dram_addr(pdev);
287 
288 	return 0;
289 }
290 
291 static DEFINE_SIMPLE_DEV_PM_OPS(amd_pmf_pm, NULL, amd_pmf_resume_handler);
292 
293 static void amd_pmf_init_features(struct amd_pmf_dev *dev)
294 {
295 	int ret;
296 
297 	/* Enable Static Slider */
298 	if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR)) {
299 		amd_pmf_init_sps(dev);
300 		dev->pwr_src_notifier.notifier_call = amd_pmf_pwr_src_notify_call;
301 		power_supply_reg_notifier(&dev->pwr_src_notifier);
302 		dev_dbg(dev->dev, "SPS enabled and Platform Profiles registered\n");
303 	}
304 
305 	/* Enable Auto Mode */
306 	if (is_apmf_func_supported(dev, APMF_FUNC_AUTO_MODE)) {
307 		amd_pmf_init_auto_mode(dev);
308 		dev_dbg(dev->dev, "Auto Mode Init done\n");
309 	} else if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_AC) ||
310 			  is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_DC)) {
311 		/* Enable Cool n Quiet Framework (CnQF) */
312 		ret = amd_pmf_init_cnqf(dev);
313 		if (ret)
314 			dev_warn(dev->dev, "CnQF Init failed\n");
315 	}
316 }
317 
318 static void amd_pmf_deinit_features(struct amd_pmf_dev *dev)
319 {
320 	if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR)) {
321 		power_supply_unreg_notifier(&dev->pwr_src_notifier);
322 		amd_pmf_deinit_sps(dev);
323 	}
324 
325 	if (is_apmf_func_supported(dev, APMF_FUNC_AUTO_MODE)) {
326 		amd_pmf_deinit_auto_mode(dev);
327 	} else if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_AC) ||
328 			  is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_DC)) {
329 		amd_pmf_deinit_cnqf(dev);
330 	}
331 }
332 
333 static const struct acpi_device_id amd_pmf_acpi_ids[] = {
334 	{"AMDI0100", 0x100},
335 	{"AMDI0102", 0},
336 	{ }
337 };
338 MODULE_DEVICE_TABLE(acpi, amd_pmf_acpi_ids);
339 
340 static int amd_pmf_probe(struct platform_device *pdev)
341 {
342 	const struct acpi_device_id *id;
343 	struct amd_pmf_dev *dev;
344 	struct pci_dev *rdev;
345 	u32 base_addr_lo;
346 	u32 base_addr_hi;
347 	u64 base_addr;
348 	u32 val;
349 	int err;
350 
351 	id = acpi_match_device(amd_pmf_acpi_ids, &pdev->dev);
352 	if (!id)
353 		return -ENODEV;
354 
355 	if (id->driver_data == 0x100 && !force_load)
356 		return -ENODEV;
357 
358 	dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
359 	if (!dev)
360 		return -ENOMEM;
361 
362 	dev->dev = &pdev->dev;
363 
364 	rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
365 	if (!rdev || !pci_match_id(pmf_pci_ids, rdev)) {
366 		pci_dev_put(rdev);
367 		return -ENODEV;
368 	}
369 
370 	dev->cpu_id = rdev->device;
371 
372 	err = amd_smn_read(0, AMD_PMF_BASE_ADDR_LO, &val);
373 	if (err) {
374 		dev_err(dev->dev, "error in reading from 0x%x\n", AMD_PMF_BASE_ADDR_LO);
375 		pci_dev_put(rdev);
376 		return pcibios_err_to_errno(err);
377 	}
378 
379 	base_addr_lo = val & AMD_PMF_BASE_ADDR_HI_MASK;
380 
381 	err = amd_smn_read(0, AMD_PMF_BASE_ADDR_HI, &val);
382 	if (err) {
383 		dev_err(dev->dev, "error in reading from 0x%x\n", AMD_PMF_BASE_ADDR_HI);
384 		pci_dev_put(rdev);
385 		return pcibios_err_to_errno(err);
386 	}
387 
388 	base_addr_hi = val & AMD_PMF_BASE_ADDR_LO_MASK;
389 	pci_dev_put(rdev);
390 	base_addr = ((u64)base_addr_hi << 32 | base_addr_lo);
391 
392 	dev->regbase = devm_ioremap(dev->dev, base_addr + AMD_PMF_BASE_ADDR_OFFSET,
393 				    AMD_PMF_MAPPING_SIZE);
394 	if (!dev->regbase)
395 		return -ENOMEM;
396 
397 	mutex_init(&dev->lock);
398 	mutex_init(&dev->update_mutex);
399 
400 	apmf_acpi_init(dev);
401 	platform_set_drvdata(pdev, dev);
402 	amd_pmf_init_features(dev);
403 	apmf_install_handler(dev);
404 	amd_pmf_dbgfs_register(dev);
405 
406 	dev_info(dev->dev, "registered PMF device successfully\n");
407 
408 	return 0;
409 }
410 
411 static void amd_pmf_remove(struct platform_device *pdev)
412 {
413 	struct amd_pmf_dev *dev = platform_get_drvdata(pdev);
414 
415 	amd_pmf_deinit_features(dev);
416 	apmf_acpi_deinit(dev);
417 	amd_pmf_dbgfs_unregister(dev);
418 	mutex_destroy(&dev->lock);
419 	mutex_destroy(&dev->update_mutex);
420 	kfree(dev->buf);
421 }
422 
423 static const struct attribute_group *amd_pmf_driver_groups[] = {
424 	&cnqf_feature_attribute_group,
425 	NULL,
426 };
427 
428 static struct platform_driver amd_pmf_driver = {
429 	.driver = {
430 		.name = "amd-pmf",
431 		.acpi_match_table = amd_pmf_acpi_ids,
432 		.dev_groups = amd_pmf_driver_groups,
433 		.pm = pm_sleep_ptr(&amd_pmf_pm),
434 	},
435 	.probe = amd_pmf_probe,
436 	.remove_new = amd_pmf_remove,
437 };
438 module_platform_driver(amd_pmf_driver);
439 
440 MODULE_LICENSE("GPL");
441 MODULE_DESCRIPTION("AMD Platform Management Framework Driver");
442