199d6bdf3SSudeep Holla // SPDX-License-Identifier: GPL-2.0
299d6bdf3SSudeep Holla /*
399d6bdf3SSudeep Holla  * System Control and Power Interface (SCMI) based CPUFreq Interface driver
499d6bdf3SSudeep Holla  *
599d6bdf3SSudeep Holla  * Copyright (C) 2018 ARM Ltd.
699d6bdf3SSudeep Holla  * Sudeep Holla <sudeep.holla@arm.com>
799d6bdf3SSudeep Holla  */
899d6bdf3SSudeep Holla 
999d6bdf3SSudeep Holla #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
1099d6bdf3SSudeep Holla 
1199d6bdf3SSudeep Holla #include <linux/cpu.h>
1299d6bdf3SSudeep Holla #include <linux/cpufreq.h>
1399d6bdf3SSudeep Holla #include <linux/cpumask.h>
143c429851SQuentin Perret #include <linux/energy_model.h>
1599d6bdf3SSudeep Holla #include <linux/export.h>
1699d6bdf3SSudeep Holla #include <linux/module.h>
1799d6bdf3SSudeep Holla #include <linux/pm_opp.h>
1899d6bdf3SSudeep Holla #include <linux/slab.h>
1999d6bdf3SSudeep Holla #include <linux/scmi_protocol.h>
2099d6bdf3SSudeep Holla #include <linux/types.h>
2199d6bdf3SSudeep Holla 
2299d6bdf3SSudeep Holla struct scmi_data {
2399d6bdf3SSudeep Holla 	int domain_id;
2499d6bdf3SSudeep Holla 	struct device *cpu_dev;
2599d6bdf3SSudeep Holla };
2699d6bdf3SSudeep Holla 
2799d6bdf3SSudeep Holla static const struct scmi_handle *handle;
2899d6bdf3SSudeep Holla 
2999d6bdf3SSudeep Holla static unsigned int scmi_cpufreq_get_rate(unsigned int cpu)
3099d6bdf3SSudeep Holla {
3199d6bdf3SSudeep Holla 	struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
3299d6bdf3SSudeep Holla 	struct scmi_perf_ops *perf_ops = handle->perf_ops;
3399d6bdf3SSudeep Holla 	struct scmi_data *priv = policy->driver_data;
3499d6bdf3SSudeep Holla 	unsigned long rate;
3599d6bdf3SSudeep Holla 	int ret;
3699d6bdf3SSudeep Holla 
3799d6bdf3SSudeep Holla 	ret = perf_ops->freq_get(handle, priv->domain_id, &rate, false);
3899d6bdf3SSudeep Holla 	if (ret)
3999d6bdf3SSudeep Holla 		return 0;
4099d6bdf3SSudeep Holla 	return rate / 1000;
4199d6bdf3SSudeep Holla }
4299d6bdf3SSudeep Holla 
4399d6bdf3SSudeep Holla /*
4499d6bdf3SSudeep Holla  * perf_ops->freq_set is not a synchronous, the actual OPP change will
4599d6bdf3SSudeep Holla  * happen asynchronously and can get notified if the events are
4699d6bdf3SSudeep Holla  * subscribed for by the SCMI firmware
4799d6bdf3SSudeep Holla  */
4899d6bdf3SSudeep Holla static int
4999d6bdf3SSudeep Holla scmi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index)
5099d6bdf3SSudeep Holla {
5199d6bdf3SSudeep Holla 	int ret;
5299d6bdf3SSudeep Holla 	struct scmi_data *priv = policy->driver_data;
5399d6bdf3SSudeep Holla 	struct scmi_perf_ops *perf_ops = handle->perf_ops;
540e141d1cSQuentin Perret 	u64 freq = policy->freq_table[index].frequency;
5599d6bdf3SSudeep Holla 
560e141d1cSQuentin Perret 	ret = perf_ops->freq_set(handle, priv->domain_id, freq * 1000, false);
5799d6bdf3SSudeep Holla 	if (!ret)
5899d6bdf3SSudeep Holla 		arch_set_freq_scale(policy->related_cpus, freq,
5999d6bdf3SSudeep Holla 				    policy->cpuinfo.max_freq);
6099d6bdf3SSudeep Holla 	return ret;
6199d6bdf3SSudeep Holla }
6299d6bdf3SSudeep Holla 
6302f208c5SSudeep Holla static unsigned int scmi_cpufreq_fast_switch(struct cpufreq_policy *policy,
6402f208c5SSudeep Holla 					     unsigned int target_freq)
6502f208c5SSudeep Holla {
6602f208c5SSudeep Holla 	struct scmi_data *priv = policy->driver_data;
6702f208c5SSudeep Holla 	struct scmi_perf_ops *perf_ops = handle->perf_ops;
6802f208c5SSudeep Holla 
6902f208c5SSudeep Holla 	if (!perf_ops->freq_set(handle, priv->domain_id,
7002f208c5SSudeep Holla 				target_freq * 1000, true)) {
7102f208c5SSudeep Holla 		arch_set_freq_scale(policy->related_cpus, target_freq,
7202f208c5SSudeep Holla 				    policy->cpuinfo.max_freq);
7302f208c5SSudeep Holla 		return target_freq;
7402f208c5SSudeep Holla 	}
7502f208c5SSudeep Holla 
7602f208c5SSudeep Holla 	return 0;
7702f208c5SSudeep Holla }
7802f208c5SSudeep Holla 
7999d6bdf3SSudeep Holla static int
8099d6bdf3SSudeep Holla scmi_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
8199d6bdf3SSudeep Holla {
8299d6bdf3SSudeep Holla 	int cpu, domain, tdomain;
8399d6bdf3SSudeep Holla 	struct device *tcpu_dev;
8499d6bdf3SSudeep Holla 
8599d6bdf3SSudeep Holla 	domain = handle->perf_ops->device_domain_id(cpu_dev);
8699d6bdf3SSudeep Holla 	if (domain < 0)
8799d6bdf3SSudeep Holla 		return domain;
8899d6bdf3SSudeep Holla 
8999d6bdf3SSudeep Holla 	for_each_possible_cpu(cpu) {
9099d6bdf3SSudeep Holla 		if (cpu == cpu_dev->id)
9199d6bdf3SSudeep Holla 			continue;
9299d6bdf3SSudeep Holla 
9399d6bdf3SSudeep Holla 		tcpu_dev = get_cpu_device(cpu);
9499d6bdf3SSudeep Holla 		if (!tcpu_dev)
9599d6bdf3SSudeep Holla 			continue;
9699d6bdf3SSudeep Holla 
9799d6bdf3SSudeep Holla 		tdomain = handle->perf_ops->device_domain_id(tcpu_dev);
9899d6bdf3SSudeep Holla 		if (tdomain == domain)
9999d6bdf3SSudeep Holla 			cpumask_set_cpu(cpu, cpumask);
10099d6bdf3SSudeep Holla 	}
10199d6bdf3SSudeep Holla 
10299d6bdf3SSudeep Holla 	return 0;
10399d6bdf3SSudeep Holla }
10499d6bdf3SSudeep Holla 
1053c429851SQuentin Perret static int __maybe_unused
106*d0351cc3SLukasz Luba scmi_get_cpu_power(unsigned long *power, unsigned long *KHz,
107*d0351cc3SLukasz Luba 		   struct device *cpu_dev)
1083c429851SQuentin Perret {
1093c429851SQuentin Perret 	unsigned long Hz;
1103c429851SQuentin Perret 	int ret, domain;
1113c429851SQuentin Perret 
1123c429851SQuentin Perret 	domain = handle->perf_ops->device_domain_id(cpu_dev);
1133c429851SQuentin Perret 	if (domain < 0)
1143c429851SQuentin Perret 		return domain;
1153c429851SQuentin Perret 
1163c429851SQuentin Perret 	/* Get the power cost of the performance domain. */
1173c429851SQuentin Perret 	Hz = *KHz * 1000;
1183c429851SQuentin Perret 	ret = handle->perf_ops->est_power_get(handle, domain, &Hz, power);
1193c429851SQuentin Perret 	if (ret)
1203c429851SQuentin Perret 		return ret;
1213c429851SQuentin Perret 
1223c429851SQuentin Perret 	/* The EM framework specifies the frequency in KHz. */
1233c429851SQuentin Perret 	*KHz = Hz / 1000;
1243c429851SQuentin Perret 
1253c429851SQuentin Perret 	return 0;
1263c429851SQuentin Perret }
1273c429851SQuentin Perret 
12899d6bdf3SSudeep Holla static int scmi_cpufreq_init(struct cpufreq_policy *policy)
12999d6bdf3SSudeep Holla {
1303c429851SQuentin Perret 	int ret, nr_opp;
13199d6bdf3SSudeep Holla 	unsigned int latency;
13299d6bdf3SSudeep Holla 	struct device *cpu_dev;
13399d6bdf3SSudeep Holla 	struct scmi_data *priv;
13499d6bdf3SSudeep Holla 	struct cpufreq_frequency_table *freq_table;
1353c429851SQuentin Perret 	struct em_data_callback em_cb = EM_DATA_CB(scmi_get_cpu_power);
13699d6bdf3SSudeep Holla 
13799d6bdf3SSudeep Holla 	cpu_dev = get_cpu_device(policy->cpu);
13899d6bdf3SSudeep Holla 	if (!cpu_dev) {
13999d6bdf3SSudeep Holla 		pr_err("failed to get cpu%d device\n", policy->cpu);
14099d6bdf3SSudeep Holla 		return -ENODEV;
14199d6bdf3SSudeep Holla 	}
14299d6bdf3SSudeep Holla 
1437859e08cSSudeep Holla 	ret = handle->perf_ops->device_opps_add(handle, cpu_dev);
14499d6bdf3SSudeep Holla 	if (ret) {
14599d6bdf3SSudeep Holla 		dev_warn(cpu_dev, "failed to add opps to the device\n");
14699d6bdf3SSudeep Holla 		return ret;
14799d6bdf3SSudeep Holla 	}
14899d6bdf3SSudeep Holla 
14999d6bdf3SSudeep Holla 	ret = scmi_get_sharing_cpus(cpu_dev, policy->cpus);
15099d6bdf3SSudeep Holla 	if (ret) {
15199d6bdf3SSudeep Holla 		dev_warn(cpu_dev, "failed to get sharing cpumask\n");
15299d6bdf3SSudeep Holla 		return ret;
15399d6bdf3SSudeep Holla 	}
15499d6bdf3SSudeep Holla 
15599d6bdf3SSudeep Holla 	ret = dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus);
15699d6bdf3SSudeep Holla 	if (ret) {
15799d6bdf3SSudeep Holla 		dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
15899d6bdf3SSudeep Holla 			__func__, ret);
15999d6bdf3SSudeep Holla 		return ret;
16099d6bdf3SSudeep Holla 	}
16199d6bdf3SSudeep Holla 
1623c429851SQuentin Perret 	nr_opp = dev_pm_opp_get_opp_count(cpu_dev);
1633c429851SQuentin Perret 	if (nr_opp <= 0) {
16499d6bdf3SSudeep Holla 		dev_dbg(cpu_dev, "OPP table is not ready, deferring probe\n");
16599d6bdf3SSudeep Holla 		ret = -EPROBE_DEFER;
16699d6bdf3SSudeep Holla 		goto out_free_opp;
16799d6bdf3SSudeep Holla 	}
16899d6bdf3SSudeep Holla 
16999d6bdf3SSudeep Holla 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
17099d6bdf3SSudeep Holla 	if (!priv) {
17199d6bdf3SSudeep Holla 		ret = -ENOMEM;
17299d6bdf3SSudeep Holla 		goto out_free_opp;
17399d6bdf3SSudeep Holla 	}
17499d6bdf3SSudeep Holla 
17599d6bdf3SSudeep Holla 	ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
17699d6bdf3SSudeep Holla 	if (ret) {
17799d6bdf3SSudeep Holla 		dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
17899d6bdf3SSudeep Holla 		goto out_free_priv;
17999d6bdf3SSudeep Holla 	}
18099d6bdf3SSudeep Holla 
18199d6bdf3SSudeep Holla 	priv->cpu_dev = cpu_dev;
18299d6bdf3SSudeep Holla 	priv->domain_id = handle->perf_ops->device_domain_id(cpu_dev);
18399d6bdf3SSudeep Holla 
18499d6bdf3SSudeep Holla 	policy->driver_data = priv;
185d983af98SViresh Kumar 	policy->freq_table = freq_table;
18699d6bdf3SSudeep Holla 
18799d6bdf3SSudeep Holla 	/* SCMI allows DVFS request for any domain from any CPU */
18899d6bdf3SSudeep Holla 	policy->dvfs_possible_from_any_cpu = true;
18999d6bdf3SSudeep Holla 
1907859e08cSSudeep Holla 	latency = handle->perf_ops->transition_latency_get(handle, cpu_dev);
19199d6bdf3SSudeep Holla 	if (!latency)
19299d6bdf3SSudeep Holla 		latency = CPUFREQ_ETERNAL;
19399d6bdf3SSudeep Holla 
19499d6bdf3SSudeep Holla 	policy->cpuinfo.transition_latency = latency;
19599d6bdf3SSudeep Holla 
19602f208c5SSudeep Holla 	policy->fast_switch_possible = true;
1973c429851SQuentin Perret 
198*d0351cc3SLukasz Luba 	em_dev_register_perf_domain(cpu_dev, nr_opp, &em_cb, policy->cpus);
1993c429851SQuentin Perret 
20099d6bdf3SSudeep Holla 	return 0;
20199d6bdf3SSudeep Holla 
20299d6bdf3SSudeep Holla out_free_priv:
20399d6bdf3SSudeep Holla 	kfree(priv);
20499d6bdf3SSudeep Holla out_free_opp:
2051690d8bbSViresh Kumar 	dev_pm_opp_remove_all_dynamic(cpu_dev);
20699d6bdf3SSudeep Holla 
20799d6bdf3SSudeep Holla 	return ret;
20899d6bdf3SSudeep Holla }
20999d6bdf3SSudeep Holla 
21099d6bdf3SSudeep Holla static int scmi_cpufreq_exit(struct cpufreq_policy *policy)
21199d6bdf3SSudeep Holla {
21299d6bdf3SSudeep Holla 	struct scmi_data *priv = policy->driver_data;
21399d6bdf3SSudeep Holla 
21499d6bdf3SSudeep Holla 	dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
2151690d8bbSViresh Kumar 	dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
2168cbd468bSYangtao Li 	kfree(priv);
21799d6bdf3SSudeep Holla 
21899d6bdf3SSudeep Holla 	return 0;
21999d6bdf3SSudeep Holla }
22099d6bdf3SSudeep Holla 
22199d6bdf3SSudeep Holla static struct cpufreq_driver scmi_cpufreq_driver = {
22299d6bdf3SSudeep Holla 	.name	= "scmi",
22399d6bdf3SSudeep Holla 	.flags	= CPUFREQ_STICKY | CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
2245da7af9aSAmit Kucheria 		  CPUFREQ_NEED_INITIAL_FREQ_CHECK |
2255da7af9aSAmit Kucheria 		  CPUFREQ_IS_COOLING_DEV,
22699d6bdf3SSudeep Holla 	.verify	= cpufreq_generic_frequency_table_verify,
22799d6bdf3SSudeep Holla 	.attr	= cpufreq_generic_attr,
22899d6bdf3SSudeep Holla 	.target_index	= scmi_cpufreq_set_target,
22902f208c5SSudeep Holla 	.fast_switch	= scmi_cpufreq_fast_switch,
23099d6bdf3SSudeep Holla 	.get	= scmi_cpufreq_get_rate,
23199d6bdf3SSudeep Holla 	.init	= scmi_cpufreq_init,
23299d6bdf3SSudeep Holla 	.exit	= scmi_cpufreq_exit,
23399d6bdf3SSudeep Holla };
23499d6bdf3SSudeep Holla 
23599d6bdf3SSudeep Holla static int scmi_cpufreq_probe(struct scmi_device *sdev)
23699d6bdf3SSudeep Holla {
23799d6bdf3SSudeep Holla 	int ret;
23899d6bdf3SSudeep Holla 
23999d6bdf3SSudeep Holla 	handle = sdev->handle;
24099d6bdf3SSudeep Holla 
24199d6bdf3SSudeep Holla 	if (!handle || !handle->perf_ops)
24299d6bdf3SSudeep Holla 		return -ENODEV;
24399d6bdf3SSudeep Holla 
24499d6bdf3SSudeep Holla 	ret = cpufreq_register_driver(&scmi_cpufreq_driver);
24599d6bdf3SSudeep Holla 	if (ret) {
24699d6bdf3SSudeep Holla 		dev_err(&sdev->dev, "%s: registering cpufreq failed, err: %d\n",
24799d6bdf3SSudeep Holla 			__func__, ret);
24899d6bdf3SSudeep Holla 	}
24999d6bdf3SSudeep Holla 
25099d6bdf3SSudeep Holla 	return ret;
25199d6bdf3SSudeep Holla }
25299d6bdf3SSudeep Holla 
25399d6bdf3SSudeep Holla static void scmi_cpufreq_remove(struct scmi_device *sdev)
25499d6bdf3SSudeep Holla {
25599d6bdf3SSudeep Holla 	cpufreq_unregister_driver(&scmi_cpufreq_driver);
25699d6bdf3SSudeep Holla }
25799d6bdf3SSudeep Holla 
25899d6bdf3SSudeep Holla static const struct scmi_device_id scmi_id_table[] = {
25912b76626SSudeep Holla 	{ SCMI_PROTOCOL_PERF, "cpufreq" },
26099d6bdf3SSudeep Holla 	{ },
26199d6bdf3SSudeep Holla };
26299d6bdf3SSudeep Holla MODULE_DEVICE_TABLE(scmi, scmi_id_table);
26399d6bdf3SSudeep Holla 
26499d6bdf3SSudeep Holla static struct scmi_driver scmi_cpufreq_drv = {
26599d6bdf3SSudeep Holla 	.name		= "scmi-cpufreq",
26699d6bdf3SSudeep Holla 	.probe		= scmi_cpufreq_probe,
26799d6bdf3SSudeep Holla 	.remove		= scmi_cpufreq_remove,
26899d6bdf3SSudeep Holla 	.id_table	= scmi_id_table,
26999d6bdf3SSudeep Holla };
27099d6bdf3SSudeep Holla module_scmi_driver(scmi_cpufreq_drv);
27199d6bdf3SSudeep Holla 
27299d6bdf3SSudeep Holla MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
27399d6bdf3SSudeep Holla MODULE_DESCRIPTION("ARM SCMI CPUFreq interface driver");
27499d6bdf3SSudeep Holla MODULE_LICENSE("GPL v2");
275