199d6bdf3SSudeep Holla // SPDX-License-Identifier: GPL-2.0
299d6bdf3SSudeep Holla /*
399d6bdf3SSudeep Holla  * System Control and Power Interface (SCMI) based CPUFreq Interface driver
499d6bdf3SSudeep Holla  *
599d6bdf3SSudeep Holla  * Copyright (C) 2018 ARM Ltd.
699d6bdf3SSudeep Holla  * Sudeep Holla <sudeep.holla@arm.com>
799d6bdf3SSudeep Holla  */
899d6bdf3SSudeep Holla 
999d6bdf3SSudeep Holla #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
1099d6bdf3SSudeep Holla 
118410e7f3SSudeep Holla #include <linux/clk-provider.h>
1299d6bdf3SSudeep Holla #include <linux/cpu.h>
1399d6bdf3SSudeep Holla #include <linux/cpufreq.h>
1499d6bdf3SSudeep Holla #include <linux/cpumask.h>
153c429851SQuentin Perret #include <linux/energy_model.h>
1699d6bdf3SSudeep Holla #include <linux/export.h>
1799d6bdf3SSudeep Holla #include <linux/module.h>
1899d6bdf3SSudeep Holla #include <linux/pm_opp.h>
1999d6bdf3SSudeep Holla #include <linux/slab.h>
2099d6bdf3SSudeep Holla #include <linux/scmi_protocol.h>
2199d6bdf3SSudeep Holla #include <linux/types.h>
2299d6bdf3SSudeep Holla 
2399d6bdf3SSudeep Holla struct scmi_data {
2499d6bdf3SSudeep Holla 	int domain_id;
2599d6bdf3SSudeep Holla 	struct device *cpu_dev;
2699d6bdf3SSudeep Holla };
2799d6bdf3SSudeep Holla 
2899d6bdf3SSudeep Holla static const struct scmi_handle *handle;
2999d6bdf3SSudeep Holla 
3099d6bdf3SSudeep Holla static unsigned int scmi_cpufreq_get_rate(unsigned int cpu)
3199d6bdf3SSudeep Holla {
3299d6bdf3SSudeep Holla 	struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
3369ecb323SRikard Falkeborn 	const struct scmi_perf_ops *perf_ops = handle->perf_ops;
3499d6bdf3SSudeep Holla 	struct scmi_data *priv = policy->driver_data;
3599d6bdf3SSudeep Holla 	unsigned long rate;
3699d6bdf3SSudeep Holla 	int ret;
3799d6bdf3SSudeep Holla 
3899d6bdf3SSudeep Holla 	ret = perf_ops->freq_get(handle, priv->domain_id, &rate, false);
3999d6bdf3SSudeep Holla 	if (ret)
4099d6bdf3SSudeep Holla 		return 0;
4199d6bdf3SSudeep Holla 	return rate / 1000;
4299d6bdf3SSudeep Holla }
4399d6bdf3SSudeep Holla 
4499d6bdf3SSudeep Holla /*
4599d6bdf3SSudeep Holla  * perf_ops->freq_set is not a synchronous, the actual OPP change will
4699d6bdf3SSudeep Holla  * happen asynchronously and can get notified if the events are
4799d6bdf3SSudeep Holla  * subscribed for by the SCMI firmware
4899d6bdf3SSudeep Holla  */
4999d6bdf3SSudeep Holla static int
5099d6bdf3SSudeep Holla scmi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index)
5199d6bdf3SSudeep Holla {
5299d6bdf3SSudeep Holla 	struct scmi_data *priv = policy->driver_data;
5369ecb323SRikard Falkeborn 	const struct scmi_perf_ops *perf_ops = handle->perf_ops;
540e141d1cSQuentin Perret 	u64 freq = policy->freq_table[index].frequency;
5599d6bdf3SSudeep Holla 
561a0419b0SIonela Voinescu 	return perf_ops->freq_set(handle, priv->domain_id, freq * 1000, false);
5799d6bdf3SSudeep Holla }
5899d6bdf3SSudeep Holla 
5902f208c5SSudeep Holla static unsigned int scmi_cpufreq_fast_switch(struct cpufreq_policy *policy,
6002f208c5SSudeep Holla 					     unsigned int target_freq)
6102f208c5SSudeep Holla {
6202f208c5SSudeep Holla 	struct scmi_data *priv = policy->driver_data;
6369ecb323SRikard Falkeborn 	const struct scmi_perf_ops *perf_ops = handle->perf_ops;
6402f208c5SSudeep Holla 
6502f208c5SSudeep Holla 	if (!perf_ops->freq_set(handle, priv->domain_id,
661a0419b0SIonela Voinescu 				target_freq * 1000, true))
6702f208c5SSudeep Holla 		return target_freq;
6802f208c5SSudeep Holla 
6902f208c5SSudeep Holla 	return 0;
7002f208c5SSudeep Holla }
7102f208c5SSudeep Holla 
7299d6bdf3SSudeep Holla static int
7399d6bdf3SSudeep Holla scmi_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
7499d6bdf3SSudeep Holla {
7599d6bdf3SSudeep Holla 	int cpu, domain, tdomain;
7699d6bdf3SSudeep Holla 	struct device *tcpu_dev;
7799d6bdf3SSudeep Holla 
7899d6bdf3SSudeep Holla 	domain = handle->perf_ops->device_domain_id(cpu_dev);
7999d6bdf3SSudeep Holla 	if (domain < 0)
8099d6bdf3SSudeep Holla 		return domain;
8199d6bdf3SSudeep Holla 
8299d6bdf3SSudeep Holla 	for_each_possible_cpu(cpu) {
8399d6bdf3SSudeep Holla 		if (cpu == cpu_dev->id)
8499d6bdf3SSudeep Holla 			continue;
8599d6bdf3SSudeep Holla 
8699d6bdf3SSudeep Holla 		tcpu_dev = get_cpu_device(cpu);
8799d6bdf3SSudeep Holla 		if (!tcpu_dev)
8899d6bdf3SSudeep Holla 			continue;
8999d6bdf3SSudeep Holla 
9099d6bdf3SSudeep Holla 		tdomain = handle->perf_ops->device_domain_id(tcpu_dev);
9199d6bdf3SSudeep Holla 		if (tdomain == domain)
9299d6bdf3SSudeep Holla 			cpumask_set_cpu(cpu, cpumask);
9399d6bdf3SSudeep Holla 	}
9499d6bdf3SSudeep Holla 
9599d6bdf3SSudeep Holla 	return 0;
9699d6bdf3SSudeep Holla }
9799d6bdf3SSudeep Holla 
983c429851SQuentin Perret static int __maybe_unused
99d0351cc3SLukasz Luba scmi_get_cpu_power(unsigned long *power, unsigned long *KHz,
100d0351cc3SLukasz Luba 		   struct device *cpu_dev)
1013c429851SQuentin Perret {
1023c429851SQuentin Perret 	unsigned long Hz;
1033c429851SQuentin Perret 	int ret, domain;
1043c429851SQuentin Perret 
1053c429851SQuentin Perret 	domain = handle->perf_ops->device_domain_id(cpu_dev);
1063c429851SQuentin Perret 	if (domain < 0)
1073c429851SQuentin Perret 		return domain;
1083c429851SQuentin Perret 
1093c429851SQuentin Perret 	/* Get the power cost of the performance domain. */
1103c429851SQuentin Perret 	Hz = *KHz * 1000;
1113c429851SQuentin Perret 	ret = handle->perf_ops->est_power_get(handle, domain, &Hz, power);
1123c429851SQuentin Perret 	if (ret)
1133c429851SQuentin Perret 		return ret;
1143c429851SQuentin Perret 
1153c429851SQuentin Perret 	/* The EM framework specifies the frequency in KHz. */
1163c429851SQuentin Perret 	*KHz = Hz / 1000;
1173c429851SQuentin Perret 
1183c429851SQuentin Perret 	return 0;
1193c429851SQuentin Perret }
1203c429851SQuentin Perret 
12199d6bdf3SSudeep Holla static int scmi_cpufreq_init(struct cpufreq_policy *policy)
12299d6bdf3SSudeep Holla {
1233c429851SQuentin Perret 	int ret, nr_opp;
12499d6bdf3SSudeep Holla 	unsigned int latency;
12599d6bdf3SSudeep Holla 	struct device *cpu_dev;
12699d6bdf3SSudeep Holla 	struct scmi_data *priv;
12799d6bdf3SSudeep Holla 	struct cpufreq_frequency_table *freq_table;
1283c429851SQuentin Perret 	struct em_data_callback em_cb = EM_DATA_CB(scmi_get_cpu_power);
129f9b0498dSLukasz Luba 	bool power_scale_mw;
13099d6bdf3SSudeep Holla 
13199d6bdf3SSudeep Holla 	cpu_dev = get_cpu_device(policy->cpu);
13299d6bdf3SSudeep Holla 	if (!cpu_dev) {
13399d6bdf3SSudeep Holla 		pr_err("failed to get cpu%d device\n", policy->cpu);
13499d6bdf3SSudeep Holla 		return -ENODEV;
13599d6bdf3SSudeep Holla 	}
13699d6bdf3SSudeep Holla 
1377859e08cSSudeep Holla 	ret = handle->perf_ops->device_opps_add(handle, cpu_dev);
13899d6bdf3SSudeep Holla 	if (ret) {
13999d6bdf3SSudeep Holla 		dev_warn(cpu_dev, "failed to add opps to the device\n");
14099d6bdf3SSudeep Holla 		return ret;
14199d6bdf3SSudeep Holla 	}
14299d6bdf3SSudeep Holla 
14399d6bdf3SSudeep Holla 	ret = scmi_get_sharing_cpus(cpu_dev, policy->cpus);
14499d6bdf3SSudeep Holla 	if (ret) {
14599d6bdf3SSudeep Holla 		dev_warn(cpu_dev, "failed to get sharing cpumask\n");
14699d6bdf3SSudeep Holla 		return ret;
14799d6bdf3SSudeep Holla 	}
14899d6bdf3SSudeep Holla 
14999d6bdf3SSudeep Holla 	ret = dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus);
15099d6bdf3SSudeep Holla 	if (ret) {
15199d6bdf3SSudeep Holla 		dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
15299d6bdf3SSudeep Holla 			__func__, ret);
15399d6bdf3SSudeep Holla 		return ret;
15499d6bdf3SSudeep Holla 	}
15599d6bdf3SSudeep Holla 
1563c429851SQuentin Perret 	nr_opp = dev_pm_opp_get_opp_count(cpu_dev);
1573c429851SQuentin Perret 	if (nr_opp <= 0) {
15899d6bdf3SSudeep Holla 		dev_dbg(cpu_dev, "OPP table is not ready, deferring probe\n");
15999d6bdf3SSudeep Holla 		ret = -EPROBE_DEFER;
16099d6bdf3SSudeep Holla 		goto out_free_opp;
16199d6bdf3SSudeep Holla 	}
16299d6bdf3SSudeep Holla 
16399d6bdf3SSudeep Holla 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
16499d6bdf3SSudeep Holla 	if (!priv) {
16599d6bdf3SSudeep Holla 		ret = -ENOMEM;
16699d6bdf3SSudeep Holla 		goto out_free_opp;
16799d6bdf3SSudeep Holla 	}
16899d6bdf3SSudeep Holla 
16999d6bdf3SSudeep Holla 	ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
17099d6bdf3SSudeep Holla 	if (ret) {
17199d6bdf3SSudeep Holla 		dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
17299d6bdf3SSudeep Holla 		goto out_free_priv;
17399d6bdf3SSudeep Holla 	}
17499d6bdf3SSudeep Holla 
17599d6bdf3SSudeep Holla 	priv->cpu_dev = cpu_dev;
17699d6bdf3SSudeep Holla 	priv->domain_id = handle->perf_ops->device_domain_id(cpu_dev);
17799d6bdf3SSudeep Holla 
17899d6bdf3SSudeep Holla 	policy->driver_data = priv;
179d983af98SViresh Kumar 	policy->freq_table = freq_table;
18099d6bdf3SSudeep Holla 
18199d6bdf3SSudeep Holla 	/* SCMI allows DVFS request for any domain from any CPU */
18299d6bdf3SSudeep Holla 	policy->dvfs_possible_from_any_cpu = true;
18399d6bdf3SSudeep Holla 
1847859e08cSSudeep Holla 	latency = handle->perf_ops->transition_latency_get(handle, cpu_dev);
18599d6bdf3SSudeep Holla 	if (!latency)
18699d6bdf3SSudeep Holla 		latency = CPUFREQ_ETERNAL;
18799d6bdf3SSudeep Holla 
18899d6bdf3SSudeep Holla 	policy->cpuinfo.transition_latency = latency;
18999d6bdf3SSudeep Holla 
190fb357127SNicola Mazzucato 	policy->fast_switch_possible =
191fb357127SNicola Mazzucato 		handle->perf_ops->fast_switch_possible(handle, cpu_dev);
1923c429851SQuentin Perret 
193f9b0498dSLukasz Luba 	power_scale_mw = handle->perf_ops->power_scale_mw_get(handle);
194c250d50fSLukasz Luba 	em_dev_register_perf_domain(cpu_dev, nr_opp, &em_cb, policy->cpus,
195f9b0498dSLukasz Luba 				    power_scale_mw);
1963c429851SQuentin Perret 
19799d6bdf3SSudeep Holla 	return 0;
19899d6bdf3SSudeep Holla 
19999d6bdf3SSudeep Holla out_free_priv:
20099d6bdf3SSudeep Holla 	kfree(priv);
20199d6bdf3SSudeep Holla out_free_opp:
2021690d8bbSViresh Kumar 	dev_pm_opp_remove_all_dynamic(cpu_dev);
20399d6bdf3SSudeep Holla 
20499d6bdf3SSudeep Holla 	return ret;
20599d6bdf3SSudeep Holla }
20699d6bdf3SSudeep Holla 
20799d6bdf3SSudeep Holla static int scmi_cpufreq_exit(struct cpufreq_policy *policy)
20899d6bdf3SSudeep Holla {
20999d6bdf3SSudeep Holla 	struct scmi_data *priv = policy->driver_data;
21099d6bdf3SSudeep Holla 
21199d6bdf3SSudeep Holla 	dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
2121690d8bbSViresh Kumar 	dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
2138cbd468bSYangtao Li 	kfree(priv);
21499d6bdf3SSudeep Holla 
21599d6bdf3SSudeep Holla 	return 0;
21699d6bdf3SSudeep Holla }
21799d6bdf3SSudeep Holla 
21899d6bdf3SSudeep Holla static struct cpufreq_driver scmi_cpufreq_driver = {
21999d6bdf3SSudeep Holla 	.name	= "scmi",
220*5ae4a4b4SViresh Kumar 	.flags	= CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
2215da7af9aSAmit Kucheria 		  CPUFREQ_NEED_INITIAL_FREQ_CHECK |
2225da7af9aSAmit Kucheria 		  CPUFREQ_IS_COOLING_DEV,
22399d6bdf3SSudeep Holla 	.verify	= cpufreq_generic_frequency_table_verify,
22499d6bdf3SSudeep Holla 	.attr	= cpufreq_generic_attr,
22599d6bdf3SSudeep Holla 	.target_index	= scmi_cpufreq_set_target,
22602f208c5SSudeep Holla 	.fast_switch	= scmi_cpufreq_fast_switch,
22799d6bdf3SSudeep Holla 	.get	= scmi_cpufreq_get_rate,
22899d6bdf3SSudeep Holla 	.init	= scmi_cpufreq_init,
22999d6bdf3SSudeep Holla 	.exit	= scmi_cpufreq_exit,
23099d6bdf3SSudeep Holla };
23199d6bdf3SSudeep Holla 
23299d6bdf3SSudeep Holla static int scmi_cpufreq_probe(struct scmi_device *sdev)
23399d6bdf3SSudeep Holla {
23499d6bdf3SSudeep Holla 	int ret;
2358410e7f3SSudeep Holla 	struct device *dev = &sdev->dev;
23699d6bdf3SSudeep Holla 
23799d6bdf3SSudeep Holla 	handle = sdev->handle;
23899d6bdf3SSudeep Holla 
23999d6bdf3SSudeep Holla 	if (!handle || !handle->perf_ops)
24099d6bdf3SSudeep Holla 		return -ENODEV;
24199d6bdf3SSudeep Holla 
242f943849fSSudeep Holla #ifdef CONFIG_COMMON_CLK
2438410e7f3SSudeep Holla 	/* dummy clock provider as needed by OPP if clocks property is used */
2448410e7f3SSudeep Holla 	if (of_find_property(dev->of_node, "#clock-cells", NULL))
2458410e7f3SSudeep Holla 		devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, NULL);
246f943849fSSudeep Holla #endif
2478410e7f3SSudeep Holla 
24899d6bdf3SSudeep Holla 	ret = cpufreq_register_driver(&scmi_cpufreq_driver);
24999d6bdf3SSudeep Holla 	if (ret) {
250f943849fSSudeep Holla 		dev_err(dev, "%s: registering cpufreq failed, err: %d\n",
25199d6bdf3SSudeep Holla 			__func__, ret);
25299d6bdf3SSudeep Holla 	}
25399d6bdf3SSudeep Holla 
25499d6bdf3SSudeep Holla 	return ret;
25599d6bdf3SSudeep Holla }
25699d6bdf3SSudeep Holla 
25799d6bdf3SSudeep Holla static void scmi_cpufreq_remove(struct scmi_device *sdev)
25899d6bdf3SSudeep Holla {
25999d6bdf3SSudeep Holla 	cpufreq_unregister_driver(&scmi_cpufreq_driver);
26099d6bdf3SSudeep Holla }
26199d6bdf3SSudeep Holla 
26299d6bdf3SSudeep Holla static const struct scmi_device_id scmi_id_table[] = {
26312b76626SSudeep Holla 	{ SCMI_PROTOCOL_PERF, "cpufreq" },
26499d6bdf3SSudeep Holla 	{ },
26599d6bdf3SSudeep Holla };
26699d6bdf3SSudeep Holla MODULE_DEVICE_TABLE(scmi, scmi_id_table);
26799d6bdf3SSudeep Holla 
26899d6bdf3SSudeep Holla static struct scmi_driver scmi_cpufreq_drv = {
26999d6bdf3SSudeep Holla 	.name		= "scmi-cpufreq",
27099d6bdf3SSudeep Holla 	.probe		= scmi_cpufreq_probe,
27199d6bdf3SSudeep Holla 	.remove		= scmi_cpufreq_remove,
27299d6bdf3SSudeep Holla 	.id_table	= scmi_id_table,
27399d6bdf3SSudeep Holla };
27499d6bdf3SSudeep Holla module_scmi_driver(scmi_cpufreq_drv);
27599d6bdf3SSudeep Holla 
27699d6bdf3SSudeep Holla MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
27799d6bdf3SSudeep Holla MODULE_DESCRIPTION("ARM SCMI CPUFreq interface driver");
27899d6bdf3SSudeep Holla MODULE_LICENSE("GPL v2");
279