1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2020 - 2022, NVIDIA CORPORATION. All rights reserved
4 */
5
6 #include <linux/cpu.h>
7 #include <linux/cpufreq.h>
8 #include <linux/delay.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/module.h>
11 #include <linux/of.h>
12 #include <linux/of_platform.h>
13 #include <linux/platform_device.h>
14 #include <linux/slab.h>
15 #include <linux/units.h>
16
17 #include <asm/smp_plat.h>
18
19 #include <soc/tegra/bpmp.h>
20 #include <soc/tegra/bpmp-abi.h>
21
22 #define KHZ 1000
23 #define REF_CLK_MHZ 408 /* 408 MHz */
24 #define US_DELAY 500
25 #define CPUFREQ_TBL_STEP_HZ (50 * KHZ * KHZ)
26 #define MAX_CNT ~0U
27
28 #define NDIV_MASK 0x1FF
29
30 #define CORE_OFFSET(cpu) (cpu * 8)
31 #define CMU_CLKS_BASE 0x2000
32 #define SCRATCH_FREQ_CORE_REG(data, cpu) (data->regs + CMU_CLKS_BASE + CORE_OFFSET(cpu))
33
34 #define MMCRAB_CLUSTER_BASE(cl) (0x30000 + (cl * 0x10000))
35 #define CLUSTER_ACTMON_BASE(data, cl) \
36 (data->regs + (MMCRAB_CLUSTER_BASE(cl) + data->soc->actmon_cntr_base))
37 #define CORE_ACTMON_CNTR_REG(data, cl, cpu) (CLUSTER_ACTMON_BASE(data, cl) + CORE_OFFSET(cpu))
38
39 /* cpufreq transisition latency */
40 #define TEGRA_CPUFREQ_TRANSITION_LATENCY (300 * 1000) /* unit in nanoseconds */
41
42 struct tegra_cpu_ctr {
43 u32 cpu;
44 u32 coreclk_cnt, last_coreclk_cnt;
45 u32 refclk_cnt, last_refclk_cnt;
46 };
47
48 struct read_counters_work {
49 struct work_struct work;
50 struct tegra_cpu_ctr c;
51 };
52
53 struct tegra_cpufreq_ops {
54 void (*read_counters)(struct tegra_cpu_ctr *c);
55 void (*set_cpu_ndiv)(struct cpufreq_policy *policy, u64 ndiv);
56 void (*get_cpu_cluster_id)(u32 cpu, u32 *cpuid, u32 *clusterid);
57 int (*get_cpu_ndiv)(u32 cpu, u32 cpuid, u32 clusterid, u64 *ndiv);
58 };
59
60 struct tegra_cpufreq_soc {
61 struct tegra_cpufreq_ops *ops;
62 int maxcpus_per_cluster;
63 unsigned int num_clusters;
64 phys_addr_t actmon_cntr_base;
65 };
66
67 struct tegra194_cpufreq_data {
68 void __iomem *regs;
69 struct cpufreq_frequency_table **bpmp_luts;
70 const struct tegra_cpufreq_soc *soc;
71 bool icc_dram_bw_scaling;
72 };
73
74 static struct workqueue_struct *read_counters_wq;
75
tegra_cpufreq_set_bw(struct cpufreq_policy * policy,unsigned long freq_khz)76 static int tegra_cpufreq_set_bw(struct cpufreq_policy *policy, unsigned long freq_khz)
77 {
78 struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
79 struct dev_pm_opp *opp;
80 struct device *dev;
81 int ret;
82
83 dev = get_cpu_device(policy->cpu);
84 if (!dev)
85 return -ENODEV;
86
87 opp = dev_pm_opp_find_freq_exact(dev, freq_khz * KHZ, true);
88 if (IS_ERR(opp))
89 return PTR_ERR(opp);
90
91 ret = dev_pm_opp_set_opp(dev, opp);
92 if (ret)
93 data->icc_dram_bw_scaling = false;
94
95 dev_pm_opp_put(opp);
96 return ret;
97 }
98
tegra_get_cpu_mpidr(void * mpidr)99 static void tegra_get_cpu_mpidr(void *mpidr)
100 {
101 *((u64 *)mpidr) = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
102 }
103
tegra234_get_cpu_cluster_id(u32 cpu,u32 * cpuid,u32 * clusterid)104 static void tegra234_get_cpu_cluster_id(u32 cpu, u32 *cpuid, u32 *clusterid)
105 {
106 u64 mpidr;
107
108 smp_call_function_single(cpu, tegra_get_cpu_mpidr, &mpidr, true);
109
110 if (cpuid)
111 *cpuid = MPIDR_AFFINITY_LEVEL(mpidr, 1);
112 if (clusterid)
113 *clusterid = MPIDR_AFFINITY_LEVEL(mpidr, 2);
114 }
115
tegra234_get_cpu_ndiv(u32 cpu,u32 cpuid,u32 clusterid,u64 * ndiv)116 static int tegra234_get_cpu_ndiv(u32 cpu, u32 cpuid, u32 clusterid, u64 *ndiv)
117 {
118 struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
119 void __iomem *freq_core_reg;
120 u64 mpidr_id;
121
122 /* use physical id to get address of per core frequency register */
123 mpidr_id = (clusterid * data->soc->maxcpus_per_cluster) + cpuid;
124 freq_core_reg = SCRATCH_FREQ_CORE_REG(data, mpidr_id);
125
126 *ndiv = readl(freq_core_reg) & NDIV_MASK;
127
128 return 0;
129 }
130
tegra234_set_cpu_ndiv(struct cpufreq_policy * policy,u64 ndiv)131 static void tegra234_set_cpu_ndiv(struct cpufreq_policy *policy, u64 ndiv)
132 {
133 struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
134 void __iomem *freq_core_reg;
135 u32 cpu, cpuid, clusterid;
136 u64 mpidr_id;
137
138 for_each_cpu_and(cpu, policy->cpus, cpu_online_mask) {
139 data->soc->ops->get_cpu_cluster_id(cpu, &cpuid, &clusterid);
140
141 /* use physical id to get address of per core frequency register */
142 mpidr_id = (clusterid * data->soc->maxcpus_per_cluster) + cpuid;
143 freq_core_reg = SCRATCH_FREQ_CORE_REG(data, mpidr_id);
144
145 writel(ndiv, freq_core_reg);
146 }
147 }
148
149 /*
150 * This register provides access to two counter values with a single
151 * 64-bit read. The counter values are used to determine the average
152 * actual frequency a core has run at over a period of time.
153 * [63:32] PLLP counter: Counts at fixed frequency (408 MHz)
154 * [31:0] Core clock counter: Counts on every core clock cycle
155 */
tegra234_read_counters(struct tegra_cpu_ctr * c)156 static void tegra234_read_counters(struct tegra_cpu_ctr *c)
157 {
158 struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
159 void __iomem *actmon_reg;
160 u32 cpuid, clusterid;
161 u64 val;
162
163 data->soc->ops->get_cpu_cluster_id(c->cpu, &cpuid, &clusterid);
164 actmon_reg = CORE_ACTMON_CNTR_REG(data, clusterid, cpuid);
165
166 val = readq(actmon_reg);
167 c->last_refclk_cnt = upper_32_bits(val);
168 c->last_coreclk_cnt = lower_32_bits(val);
169 udelay(US_DELAY);
170 val = readq(actmon_reg);
171 c->refclk_cnt = upper_32_bits(val);
172 c->coreclk_cnt = lower_32_bits(val);
173 }
174
175 static struct tegra_cpufreq_ops tegra234_cpufreq_ops = {
176 .read_counters = tegra234_read_counters,
177 .get_cpu_cluster_id = tegra234_get_cpu_cluster_id,
178 .get_cpu_ndiv = tegra234_get_cpu_ndiv,
179 .set_cpu_ndiv = tegra234_set_cpu_ndiv,
180 };
181
182 static const struct tegra_cpufreq_soc tegra234_cpufreq_soc = {
183 .ops = &tegra234_cpufreq_ops,
184 .actmon_cntr_base = 0x9000,
185 .maxcpus_per_cluster = 4,
186 .num_clusters = 3,
187 };
188
189 static const struct tegra_cpufreq_soc tegra239_cpufreq_soc = {
190 .ops = &tegra234_cpufreq_ops,
191 .actmon_cntr_base = 0x4000,
192 .maxcpus_per_cluster = 8,
193 .num_clusters = 1,
194 };
195
tegra194_get_cpu_cluster_id(u32 cpu,u32 * cpuid,u32 * clusterid)196 static void tegra194_get_cpu_cluster_id(u32 cpu, u32 *cpuid, u32 *clusterid)
197 {
198 u64 mpidr;
199
200 smp_call_function_single(cpu, tegra_get_cpu_mpidr, &mpidr, true);
201
202 if (cpuid)
203 *cpuid = MPIDR_AFFINITY_LEVEL(mpidr, 0);
204 if (clusterid)
205 *clusterid = MPIDR_AFFINITY_LEVEL(mpidr, 1);
206 }
207
208 /*
209 * Read per-core Read-only system register NVFREQ_FEEDBACK_EL1.
210 * The register provides frequency feedback information to
211 * determine the average actual frequency a core has run at over
212 * a period of time.
213 * [31:0] PLLP counter: Counts at fixed frequency (408 MHz)
214 * [63:32] Core clock counter: counts on every core clock cycle
215 * where the core is architecturally clocking
216 */
read_freq_feedback(void)217 static u64 read_freq_feedback(void)
218 {
219 u64 val = 0;
220
221 asm volatile("mrs %0, s3_0_c15_c0_5" : "=r" (val) : );
222
223 return val;
224 }
225
map_ndiv_to_freq(struct mrq_cpu_ndiv_limits_response * nltbl,u16 ndiv)226 static inline u32 map_ndiv_to_freq(struct mrq_cpu_ndiv_limits_response
227 *nltbl, u16 ndiv)
228 {
229 return nltbl->ref_clk_hz / KHZ * ndiv / (nltbl->pdiv * nltbl->mdiv);
230 }
231
tegra194_read_counters(struct tegra_cpu_ctr * c)232 static void tegra194_read_counters(struct tegra_cpu_ctr *c)
233 {
234 u64 val;
235
236 val = read_freq_feedback();
237 c->last_refclk_cnt = lower_32_bits(val);
238 c->last_coreclk_cnt = upper_32_bits(val);
239 udelay(US_DELAY);
240 val = read_freq_feedback();
241 c->refclk_cnt = lower_32_bits(val);
242 c->coreclk_cnt = upper_32_bits(val);
243 }
244
tegra_read_counters(struct work_struct * work)245 static void tegra_read_counters(struct work_struct *work)
246 {
247 struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
248 struct read_counters_work *read_counters_work;
249 struct tegra_cpu_ctr *c;
250
251 /*
252 * ref_clk_counter(32 bit counter) runs on constant clk,
253 * pll_p(408MHz).
254 * It will take = 2 ^ 32 / 408 MHz to overflow ref clk counter
255 * = 10526880 usec = 10.527 sec to overflow
256 *
257 * Like wise core_clk_counter(32 bit counter) runs on core clock.
258 * It's synchronized to crab_clk (cpu_crab_clk) which runs at
259 * freq of cluster. Assuming max cluster clock ~2000MHz,
260 * It will take = 2 ^ 32 / 2000 MHz to overflow core clk counter
261 * = ~2.147 sec to overflow
262 */
263 read_counters_work = container_of(work, struct read_counters_work,
264 work);
265 c = &read_counters_work->c;
266
267 data->soc->ops->read_counters(c);
268 }
269
270 /*
271 * Return instantaneous cpu speed
272 * Instantaneous freq is calculated as -
273 * -Takes sample on every query of getting the freq.
274 * - Read core and ref clock counters;
275 * - Delay for X us
276 * - Read above cycle counters again
277 * - Calculates freq by subtracting current and previous counters
278 * divided by the delay time or eqv. of ref_clk_counter in delta time
279 * - Return Kcycles/second, freq in KHz
280 *
281 * delta time period = x sec
282 * = delta ref_clk_counter / (408 * 10^6) sec
283 * freq in Hz = cycles/sec
284 * = (delta cycles / x sec
285 * = (delta cycles * 408 * 10^6) / delta ref_clk_counter
286 * in KHz = (delta cycles * 408 * 10^3) / delta ref_clk_counter
287 *
288 * @cpu - logical cpu whose freq to be updated
289 * Returns freq in KHz on success, 0 if cpu is offline
290 */
tegra194_calculate_speed(u32 cpu)291 static unsigned int tegra194_calculate_speed(u32 cpu)
292 {
293 struct read_counters_work read_counters_work;
294 struct tegra_cpu_ctr c;
295 u32 delta_refcnt;
296 u32 delta_ccnt;
297 u32 rate_mhz;
298
299 /*
300 * udelay() is required to reconstruct cpu frequency over an
301 * observation window. Using workqueue to call udelay() with
302 * interrupts enabled.
303 */
304 read_counters_work.c.cpu = cpu;
305 INIT_WORK_ONSTACK(&read_counters_work.work, tegra_read_counters);
306 queue_work_on(cpu, read_counters_wq, &read_counters_work.work);
307 flush_work(&read_counters_work.work);
308 c = read_counters_work.c;
309
310 if (c.coreclk_cnt < c.last_coreclk_cnt)
311 delta_ccnt = c.coreclk_cnt + (MAX_CNT - c.last_coreclk_cnt);
312 else
313 delta_ccnt = c.coreclk_cnt - c.last_coreclk_cnt;
314 if (!delta_ccnt)
315 return 0;
316
317 /* ref clock is 32 bits */
318 if (c.refclk_cnt < c.last_refclk_cnt)
319 delta_refcnt = c.refclk_cnt + (MAX_CNT - c.last_refclk_cnt);
320 else
321 delta_refcnt = c.refclk_cnt - c.last_refclk_cnt;
322 if (!delta_refcnt) {
323 pr_debug("cpufreq: %d is idle, delta_refcnt: 0\n", cpu);
324 return 0;
325 }
326 rate_mhz = ((unsigned long)(delta_ccnt * REF_CLK_MHZ)) / delta_refcnt;
327
328 return (rate_mhz * KHZ); /* in KHz */
329 }
330
tegra194_get_cpu_ndiv_sysreg(void * ndiv)331 static void tegra194_get_cpu_ndiv_sysreg(void *ndiv)
332 {
333 u64 ndiv_val;
334
335 asm volatile("mrs %0, s3_0_c15_c0_4" : "=r" (ndiv_val) : );
336
337 *(u64 *)ndiv = ndiv_val;
338 }
339
tegra194_get_cpu_ndiv(u32 cpu,u32 cpuid,u32 clusterid,u64 * ndiv)340 static int tegra194_get_cpu_ndiv(u32 cpu, u32 cpuid, u32 clusterid, u64 *ndiv)
341 {
342 return smp_call_function_single(cpu, tegra194_get_cpu_ndiv_sysreg, &ndiv, true);
343 }
344
tegra194_set_cpu_ndiv_sysreg(void * data)345 static void tegra194_set_cpu_ndiv_sysreg(void *data)
346 {
347 u64 ndiv_val = *(u64 *)data;
348
349 asm volatile("msr s3_0_c15_c0_4, %0" : : "r" (ndiv_val));
350 }
351
tegra194_set_cpu_ndiv(struct cpufreq_policy * policy,u64 ndiv)352 static void tegra194_set_cpu_ndiv(struct cpufreq_policy *policy, u64 ndiv)
353 {
354 on_each_cpu_mask(policy->cpus, tegra194_set_cpu_ndiv_sysreg, &ndiv, true);
355 }
356
tegra194_get_speed(u32 cpu)357 static unsigned int tegra194_get_speed(u32 cpu)
358 {
359 struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
360 struct cpufreq_frequency_table *pos;
361 u32 cpuid, clusterid;
362 unsigned int rate;
363 u64 ndiv;
364 int ret;
365
366 data->soc->ops->get_cpu_cluster_id(cpu, &cpuid, &clusterid);
367
368 /* reconstruct actual cpu freq using counters */
369 rate = tegra194_calculate_speed(cpu);
370
371 /* get last written ndiv value */
372 ret = data->soc->ops->get_cpu_ndiv(cpu, cpuid, clusterid, &ndiv);
373 if (WARN_ON_ONCE(ret))
374 return rate;
375
376 /*
377 * If the reconstructed frequency has acceptable delta from
378 * the last written value, then return freq corresponding
379 * to the last written ndiv value from freq_table. This is
380 * done to return consistent value.
381 */
382 cpufreq_for_each_valid_entry(pos, data->bpmp_luts[clusterid]) {
383 if (pos->driver_data != ndiv)
384 continue;
385
386 if (abs(pos->frequency - rate) > 115200) {
387 pr_warn("cpufreq: cpu%d,cur:%u,set:%u,set ndiv:%llu\n",
388 cpu, rate, pos->frequency, ndiv);
389 } else {
390 rate = pos->frequency;
391 }
392 break;
393 }
394 return rate;
395 }
396
tegra_cpufreq_init_cpufreq_table(struct cpufreq_policy * policy,struct cpufreq_frequency_table * bpmp_lut,struct cpufreq_frequency_table ** opp_table)397 static int tegra_cpufreq_init_cpufreq_table(struct cpufreq_policy *policy,
398 struct cpufreq_frequency_table *bpmp_lut,
399 struct cpufreq_frequency_table **opp_table)
400 {
401 struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
402 struct cpufreq_frequency_table *freq_table = NULL;
403 struct cpufreq_frequency_table *pos;
404 struct device *cpu_dev;
405 struct dev_pm_opp *opp;
406 unsigned long rate;
407 int ret, max_opps;
408 int j = 0;
409
410 cpu_dev = get_cpu_device(policy->cpu);
411 if (!cpu_dev) {
412 pr_err("%s: failed to get cpu%d device\n", __func__, policy->cpu);
413 return -ENODEV;
414 }
415
416 /* Initialize OPP table mentioned in operating-points-v2 property in DT */
417 ret = dev_pm_opp_of_add_table_indexed(cpu_dev, 0);
418 if (!ret) {
419 max_opps = dev_pm_opp_get_opp_count(cpu_dev);
420 if (max_opps <= 0) {
421 dev_err(cpu_dev, "Failed to add OPPs\n");
422 return max_opps;
423 }
424
425 /* Disable all opps and cross-validate against LUT later */
426 for (rate = 0; ; rate++) {
427 opp = dev_pm_opp_find_freq_ceil(cpu_dev, &rate);
428 if (IS_ERR(opp))
429 break;
430
431 dev_pm_opp_put(opp);
432 dev_pm_opp_disable(cpu_dev, rate);
433 }
434 } else {
435 dev_err(cpu_dev, "Invalid or empty opp table in device tree\n");
436 data->icc_dram_bw_scaling = false;
437 return ret;
438 }
439
440 freq_table = kcalloc((max_opps + 1), sizeof(*freq_table), GFP_KERNEL);
441 if (!freq_table)
442 return -ENOMEM;
443
444 /*
445 * Cross check the frequencies from BPMP-FW LUT against the OPP's present in DT.
446 * Enable only those DT OPP's which are present in LUT also.
447 */
448 cpufreq_for_each_valid_entry(pos, bpmp_lut) {
449 opp = dev_pm_opp_find_freq_exact(cpu_dev, pos->frequency * KHZ, false);
450 if (IS_ERR(opp))
451 continue;
452
453 dev_pm_opp_put(opp);
454
455 ret = dev_pm_opp_enable(cpu_dev, pos->frequency * KHZ);
456 if (ret < 0)
457 return ret;
458
459 freq_table[j].driver_data = pos->driver_data;
460 freq_table[j].frequency = pos->frequency;
461 j++;
462 }
463
464 freq_table[j].driver_data = pos->driver_data;
465 freq_table[j].frequency = CPUFREQ_TABLE_END;
466
467 *opp_table = &freq_table[0];
468
469 dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus);
470
471 return ret;
472 }
473
tegra194_cpufreq_init(struct cpufreq_policy * policy)474 static int tegra194_cpufreq_init(struct cpufreq_policy *policy)
475 {
476 struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
477 int maxcpus_per_cluster = data->soc->maxcpus_per_cluster;
478 struct cpufreq_frequency_table *freq_table;
479 struct cpufreq_frequency_table *bpmp_lut;
480 u32 start_cpu, cpu;
481 u32 clusterid;
482 int ret;
483
484 data->soc->ops->get_cpu_cluster_id(policy->cpu, NULL, &clusterid);
485 if (clusterid >= data->soc->num_clusters || !data->bpmp_luts[clusterid])
486 return -EINVAL;
487
488 start_cpu = rounddown(policy->cpu, maxcpus_per_cluster);
489 /* set same policy for all cpus in a cluster */
490 for (cpu = start_cpu; cpu < (start_cpu + maxcpus_per_cluster); cpu++) {
491 if (cpu_possible(cpu))
492 cpumask_set_cpu(cpu, policy->cpus);
493 }
494 policy->cpuinfo.transition_latency = TEGRA_CPUFREQ_TRANSITION_LATENCY;
495
496 bpmp_lut = data->bpmp_luts[clusterid];
497
498 if (data->icc_dram_bw_scaling) {
499 ret = tegra_cpufreq_init_cpufreq_table(policy, bpmp_lut, &freq_table);
500 if (!ret) {
501 policy->freq_table = freq_table;
502 return 0;
503 }
504 }
505
506 data->icc_dram_bw_scaling = false;
507 policy->freq_table = bpmp_lut;
508 pr_info("OPP tables missing from DT, EMC frequency scaling disabled\n");
509
510 return 0;
511 }
512
tegra194_cpufreq_online(struct cpufreq_policy * policy)513 static int tegra194_cpufreq_online(struct cpufreq_policy *policy)
514 {
515 /* We did light-weight tear down earlier, nothing to do here */
516 return 0;
517 }
518
tegra194_cpufreq_offline(struct cpufreq_policy * policy)519 static int tegra194_cpufreq_offline(struct cpufreq_policy *policy)
520 {
521 /*
522 * Preserve policy->driver_data and don't free resources on light-weight
523 * tear down.
524 */
525
526 return 0;
527 }
528
tegra194_cpufreq_exit(struct cpufreq_policy * policy)529 static int tegra194_cpufreq_exit(struct cpufreq_policy *policy)
530 {
531 struct device *cpu_dev = get_cpu_device(policy->cpu);
532
533 dev_pm_opp_remove_all_dynamic(cpu_dev);
534 dev_pm_opp_of_cpumask_remove_table(policy->related_cpus);
535
536 return 0;
537 }
538
tegra194_cpufreq_set_target(struct cpufreq_policy * policy,unsigned int index)539 static int tegra194_cpufreq_set_target(struct cpufreq_policy *policy,
540 unsigned int index)
541 {
542 struct cpufreq_frequency_table *tbl = policy->freq_table + index;
543 struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
544
545 /*
546 * Each core writes frequency in per core register. Then both cores
547 * in a cluster run at same frequency which is the maximum frequency
548 * request out of the values requested by both cores in that cluster.
549 */
550 data->soc->ops->set_cpu_ndiv(policy, (u64)tbl->driver_data);
551
552 if (data->icc_dram_bw_scaling)
553 tegra_cpufreq_set_bw(policy, tbl->frequency);
554
555 return 0;
556 }
557
558 static struct cpufreq_driver tegra194_cpufreq_driver = {
559 .name = "tegra194",
560 .flags = CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_INITIAL_FREQ_CHECK |
561 CPUFREQ_IS_COOLING_DEV,
562 .verify = cpufreq_generic_frequency_table_verify,
563 .target_index = tegra194_cpufreq_set_target,
564 .get = tegra194_get_speed,
565 .init = tegra194_cpufreq_init,
566 .exit = tegra194_cpufreq_exit,
567 .online = tegra194_cpufreq_online,
568 .offline = tegra194_cpufreq_offline,
569 .attr = cpufreq_generic_attr,
570 };
571
572 static struct tegra_cpufreq_ops tegra194_cpufreq_ops = {
573 .read_counters = tegra194_read_counters,
574 .get_cpu_cluster_id = tegra194_get_cpu_cluster_id,
575 .get_cpu_ndiv = tegra194_get_cpu_ndiv,
576 .set_cpu_ndiv = tegra194_set_cpu_ndiv,
577 };
578
579 static const struct tegra_cpufreq_soc tegra194_cpufreq_soc = {
580 .ops = &tegra194_cpufreq_ops,
581 .maxcpus_per_cluster = 2,
582 .num_clusters = 4,
583 };
584
tegra194_cpufreq_free_resources(void)585 static void tegra194_cpufreq_free_resources(void)
586 {
587 destroy_workqueue(read_counters_wq);
588 }
589
590 static struct cpufreq_frequency_table *
tegra_cpufreq_bpmp_read_lut(struct platform_device * pdev,struct tegra_bpmp * bpmp,unsigned int cluster_id)591 tegra_cpufreq_bpmp_read_lut(struct platform_device *pdev, struct tegra_bpmp *bpmp,
592 unsigned int cluster_id)
593 {
594 struct cpufreq_frequency_table *freq_table;
595 struct mrq_cpu_ndiv_limits_response resp;
596 unsigned int num_freqs, ndiv, delta_ndiv;
597 struct mrq_cpu_ndiv_limits_request req;
598 struct tegra_bpmp_message msg;
599 u16 freq_table_step_size;
600 int err, index;
601
602 memset(&req, 0, sizeof(req));
603 req.cluster_id = cluster_id;
604
605 memset(&msg, 0, sizeof(msg));
606 msg.mrq = MRQ_CPU_NDIV_LIMITS;
607 msg.tx.data = &req;
608 msg.tx.size = sizeof(req);
609 msg.rx.data = &resp;
610 msg.rx.size = sizeof(resp);
611
612 err = tegra_bpmp_transfer(bpmp, &msg);
613 if (err)
614 return ERR_PTR(err);
615 if (msg.rx.ret == -BPMP_EINVAL) {
616 /* Cluster not available */
617 return NULL;
618 }
619 if (msg.rx.ret)
620 return ERR_PTR(-EINVAL);
621
622 /*
623 * Make sure frequency table step is a multiple of mdiv to match
624 * vhint table granularity.
625 */
626 freq_table_step_size = resp.mdiv *
627 DIV_ROUND_UP(CPUFREQ_TBL_STEP_HZ, resp.ref_clk_hz);
628
629 dev_dbg(&pdev->dev, "cluster %d: frequency table step size: %d\n",
630 cluster_id, freq_table_step_size);
631
632 delta_ndiv = resp.ndiv_max - resp.ndiv_min;
633
634 if (unlikely(delta_ndiv == 0)) {
635 num_freqs = 1;
636 } else {
637 /* We store both ndiv_min and ndiv_max hence the +1 */
638 num_freqs = delta_ndiv / freq_table_step_size + 1;
639 }
640
641 num_freqs += (delta_ndiv % freq_table_step_size) ? 1 : 0;
642
643 freq_table = devm_kcalloc(&pdev->dev, num_freqs + 1,
644 sizeof(*freq_table), GFP_KERNEL);
645 if (!freq_table)
646 return ERR_PTR(-ENOMEM);
647
648 for (index = 0, ndiv = resp.ndiv_min;
649 ndiv < resp.ndiv_max;
650 index++, ndiv += freq_table_step_size) {
651 freq_table[index].driver_data = ndiv;
652 freq_table[index].frequency = map_ndiv_to_freq(&resp, ndiv);
653 }
654
655 freq_table[index].driver_data = resp.ndiv_max;
656 freq_table[index++].frequency = map_ndiv_to_freq(&resp, resp.ndiv_max);
657 freq_table[index].frequency = CPUFREQ_TABLE_END;
658
659 return freq_table;
660 }
661
tegra194_cpufreq_probe(struct platform_device * pdev)662 static int tegra194_cpufreq_probe(struct platform_device *pdev)
663 {
664 const struct tegra_cpufreq_soc *soc;
665 struct tegra194_cpufreq_data *data;
666 struct tegra_bpmp *bpmp;
667 struct device *cpu_dev;
668 int err, i;
669
670 data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
671 if (!data)
672 return -ENOMEM;
673
674 soc = of_device_get_match_data(&pdev->dev);
675
676 if (soc->ops && soc->maxcpus_per_cluster && soc->num_clusters) {
677 data->soc = soc;
678 } else {
679 dev_err(&pdev->dev, "soc data missing\n");
680 return -EINVAL;
681 }
682
683 data->bpmp_luts = devm_kcalloc(&pdev->dev, data->soc->num_clusters,
684 sizeof(*data->bpmp_luts), GFP_KERNEL);
685 if (!data->bpmp_luts)
686 return -ENOMEM;
687
688 if (soc->actmon_cntr_base) {
689 /* mmio registers are used for frequency request and re-construction */
690 data->regs = devm_platform_ioremap_resource(pdev, 0);
691 if (IS_ERR(data->regs))
692 return PTR_ERR(data->regs);
693 }
694
695 platform_set_drvdata(pdev, data);
696
697 bpmp = tegra_bpmp_get(&pdev->dev);
698 if (IS_ERR(bpmp))
699 return PTR_ERR(bpmp);
700
701 read_counters_wq = alloc_workqueue("read_counters_wq", __WQ_LEGACY, 1);
702 if (!read_counters_wq) {
703 dev_err(&pdev->dev, "fail to create_workqueue\n");
704 err = -EINVAL;
705 goto put_bpmp;
706 }
707
708 for (i = 0; i < data->soc->num_clusters; i++) {
709 data->bpmp_luts[i] = tegra_cpufreq_bpmp_read_lut(pdev, bpmp, i);
710 if (IS_ERR(data->bpmp_luts[i])) {
711 err = PTR_ERR(data->bpmp_luts[i]);
712 goto err_free_res;
713 }
714 }
715
716 tegra194_cpufreq_driver.driver_data = data;
717
718 /* Check for optional OPPv2 and interconnect paths on CPU0 to enable ICC scaling */
719 cpu_dev = get_cpu_device(0);
720 if (!cpu_dev) {
721 err = -EPROBE_DEFER;
722 goto err_free_res;
723 }
724
725 if (dev_pm_opp_of_get_opp_desc_node(cpu_dev)) {
726 err = dev_pm_opp_of_find_icc_paths(cpu_dev, NULL);
727 if (!err)
728 data->icc_dram_bw_scaling = true;
729 }
730
731 err = cpufreq_register_driver(&tegra194_cpufreq_driver);
732 if (!err)
733 goto put_bpmp;
734
735 err_free_res:
736 tegra194_cpufreq_free_resources();
737 put_bpmp:
738 tegra_bpmp_put(bpmp);
739 return err;
740 }
741
tegra194_cpufreq_remove(struct platform_device * pdev)742 static void tegra194_cpufreq_remove(struct platform_device *pdev)
743 {
744 cpufreq_unregister_driver(&tegra194_cpufreq_driver);
745 tegra194_cpufreq_free_resources();
746 }
747
748 static const struct of_device_id tegra194_cpufreq_of_match[] = {
749 { .compatible = "nvidia,tegra194-ccplex", .data = &tegra194_cpufreq_soc },
750 { .compatible = "nvidia,tegra234-ccplex-cluster", .data = &tegra234_cpufreq_soc },
751 { .compatible = "nvidia,tegra239-ccplex-cluster", .data = &tegra239_cpufreq_soc },
752 { /* sentinel */ }
753 };
754 MODULE_DEVICE_TABLE(of, tegra194_cpufreq_of_match);
755
756 static struct platform_driver tegra194_ccplex_driver = {
757 .driver = {
758 .name = "tegra194-cpufreq",
759 .of_match_table = tegra194_cpufreq_of_match,
760 },
761 .probe = tegra194_cpufreq_probe,
762 .remove_new = tegra194_cpufreq_remove,
763 };
764 module_platform_driver(tegra194_ccplex_driver);
765
766 MODULE_AUTHOR("Mikko Perttunen <mperttunen@nvidia.com>");
767 MODULE_AUTHOR("Sumit Gupta <sumitg@nvidia.com>");
768 MODULE_DESCRIPTION("NVIDIA Tegra194 cpufreq driver");
769 MODULE_LICENSE("GPL v2");
770