1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * A devfreq driver for NVIDIA Tegra SoCs
4  *
5  * Copyright (c) 2014 NVIDIA CORPORATION. All rights reserved.
6  * Copyright (C) 2014 Google, Inc
7  */
8 
9 #include <linux/clk.h>
10 #include <linux/cpufreq.h>
11 #include <linux/devfreq.h>
12 #include <linux/interrupt.h>
13 #include <linux/io.h>
14 #include <linux/irq.h>
15 #include <linux/module.h>
16 #include <linux/of_device.h>
17 #include <linux/platform_device.h>
18 #include <linux/pm_opp.h>
19 #include <linux/reset.h>
20 #include <linux/workqueue.h>
21 
22 #include "governor.h"
23 
24 #define ACTMON_GLB_STATUS					0x0
25 #define ACTMON_GLB_PERIOD_CTRL					0x4
26 
27 #define ACTMON_DEV_CTRL						0x0
28 #define ACTMON_DEV_CTRL_K_VAL_SHIFT				10
29 #define ACTMON_DEV_CTRL_ENB_PERIODIC				BIT(18)
30 #define ACTMON_DEV_CTRL_AVG_BELOW_WMARK_EN			BIT(20)
31 #define ACTMON_DEV_CTRL_AVG_ABOVE_WMARK_EN			BIT(21)
32 #define ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_NUM_SHIFT	23
33 #define ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_NUM_SHIFT	26
34 #define ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN		BIT(29)
35 #define ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN		BIT(30)
36 #define ACTMON_DEV_CTRL_ENB					BIT(31)
37 
38 #define ACTMON_DEV_CTRL_STOP					0x00000000
39 
40 #define ACTMON_DEV_UPPER_WMARK					0x4
41 #define ACTMON_DEV_LOWER_WMARK					0x8
42 #define ACTMON_DEV_INIT_AVG					0xc
43 #define ACTMON_DEV_AVG_UPPER_WMARK				0x10
44 #define ACTMON_DEV_AVG_LOWER_WMARK				0x14
45 #define ACTMON_DEV_COUNT_WEIGHT					0x18
46 #define ACTMON_DEV_AVG_COUNT					0x20
47 #define ACTMON_DEV_INTR_STATUS					0x24
48 
49 #define ACTMON_INTR_STATUS_CLEAR				0xffffffff
50 
51 #define ACTMON_DEV_INTR_CONSECUTIVE_UPPER			BIT(31)
52 #define ACTMON_DEV_INTR_CONSECUTIVE_LOWER			BIT(30)
53 
54 #define ACTMON_ABOVE_WMARK_WINDOW				1
55 #define ACTMON_BELOW_WMARK_WINDOW				3
56 #define ACTMON_BOOST_FREQ_STEP					16000
57 
58 /*
59  * Activity counter is incremented every 256 memory transactions, and each
60  * transaction takes 4 EMC clocks for Tegra124; So the COUNT_WEIGHT is
61  * 4 * 256 = 1024.
62  */
63 #define ACTMON_COUNT_WEIGHT					0x400
64 
65 /*
66  * ACTMON_AVERAGE_WINDOW_LOG2: default value for @DEV_CTRL_K_VAL, which
67  * translates to 2 ^ (K_VAL + 1). ex: 2 ^ (6 + 1) = 128
68  */
69 #define ACTMON_AVERAGE_WINDOW_LOG2			6
70 #define ACTMON_SAMPLING_PERIOD				12 /* ms */
71 #define ACTMON_DEFAULT_AVG_BAND				6  /* 1/10 of % */
72 
73 #define KHZ							1000
74 
75 #define KHZ_MAX						(ULONG_MAX / KHZ)
76 
77 /* Assume that the bus is saturated if the utilization is 25% */
78 #define BUS_SATURATION_RATIO					25
79 
80 /**
81  * struct tegra_devfreq_device_config - configuration specific to an ACTMON
82  * device
83  *
84  * Coefficients and thresholds are percentages unless otherwise noted
85  */
86 struct tegra_devfreq_device_config {
87 	u32		offset;
88 	u32		irq_mask;
89 
90 	/* Factors applied to boost_freq every consecutive watermark breach */
91 	unsigned int	boost_up_coeff;
92 	unsigned int	boost_down_coeff;
93 
94 	/* Define the watermark bounds when applied to the current avg */
95 	unsigned int	boost_up_threshold;
96 	unsigned int	boost_down_threshold;
97 
98 	/*
99 	 * Threshold of activity (cycles translated to kHz) below which the
100 	 * CPU frequency isn't to be taken into account. This is to avoid
101 	 * increasing the EMC frequency when the CPU is very busy but not
102 	 * accessing the bus often.
103 	 */
104 	u32		avg_dependency_threshold;
105 };
106 
107 enum tegra_actmon_device {
108 	MCALL = 0,
109 	MCCPU,
110 };
111 
112 static const struct tegra_devfreq_device_config actmon_device_configs[] = {
113 	{
114 		/* MCALL: All memory accesses (including from the CPUs) */
115 		.offset = 0x1c0,
116 		.irq_mask = 1 << 26,
117 		.boost_up_coeff = 200,
118 		.boost_down_coeff = 50,
119 		.boost_up_threshold = 60,
120 		.boost_down_threshold = 40,
121 	},
122 	{
123 		/* MCCPU: memory accesses from the CPUs */
124 		.offset = 0x200,
125 		.irq_mask = 1 << 25,
126 		.boost_up_coeff = 800,
127 		.boost_down_coeff = 40,
128 		.boost_up_threshold = 27,
129 		.boost_down_threshold = 10,
130 		.avg_dependency_threshold = 16000, /* 16MHz in kHz units */
131 	},
132 };
133 
134 /**
135  * struct tegra_devfreq_device - state specific to an ACTMON device
136  *
137  * Frequencies are in kHz.
138  */
139 struct tegra_devfreq_device {
140 	const struct tegra_devfreq_device_config *config;
141 	void __iomem *regs;
142 
143 	/* Average event count sampled in the last interrupt */
144 	u32 avg_count;
145 
146 	/*
147 	 * Extra frequency to increase the target by due to consecutive
148 	 * watermark breaches.
149 	 */
150 	unsigned long boost_freq;
151 
152 	/* Optimal frequency calculated from the stats for this device */
153 	unsigned long target_freq;
154 };
155 
156 struct tegra_devfreq {
157 	struct devfreq		*devfreq;
158 
159 	struct reset_control	*reset;
160 	struct clk		*clock;
161 	void __iomem		*regs;
162 
163 	struct clk		*emc_clock;
164 	unsigned long		max_freq;
165 	unsigned long		cur_freq;
166 	struct notifier_block	clk_rate_change_nb;
167 
168 	struct delayed_work	cpufreq_update_work;
169 	struct notifier_block	cpu_rate_change_nb;
170 
171 	struct tegra_devfreq_device devices[ARRAY_SIZE(actmon_device_configs)];
172 
173 	unsigned int		irq;
174 
175 	bool			started;
176 };
177 
178 struct tegra_actmon_emc_ratio {
179 	unsigned long cpu_freq;
180 	unsigned long emc_freq;
181 };
182 
183 static const struct tegra_actmon_emc_ratio actmon_emc_ratios[] = {
184 	{ 1400000,    KHZ_MAX },
185 	{ 1200000,    750000 },
186 	{ 1100000,    600000 },
187 	{ 1000000,    500000 },
188 	{  800000,    375000 },
189 	{  500000,    200000 },
190 	{  250000,    100000 },
191 };
192 
193 static u32 actmon_readl(struct tegra_devfreq *tegra, u32 offset)
194 {
195 	return readl_relaxed(tegra->regs + offset);
196 }
197 
198 static void actmon_writel(struct tegra_devfreq *tegra, u32 val, u32 offset)
199 {
200 	writel_relaxed(val, tegra->regs + offset);
201 }
202 
203 static u32 device_readl(struct tegra_devfreq_device *dev, u32 offset)
204 {
205 	return readl_relaxed(dev->regs + offset);
206 }
207 
208 static void device_writel(struct tegra_devfreq_device *dev, u32 val,
209 			  u32 offset)
210 {
211 	writel_relaxed(val, dev->regs + offset);
212 }
213 
214 static unsigned long do_percent(unsigned long long val, unsigned int pct)
215 {
216 	val = val * pct;
217 	do_div(val, 100);
218 
219 	/*
220 	 * High freq + high boosting percent + large polling interval are
221 	 * resulting in integer overflow when watermarks are calculated.
222 	 */
223 	return min_t(u64, val, U32_MAX);
224 }
225 
226 static void tegra_devfreq_update_avg_wmark(struct tegra_devfreq *tegra,
227 					   struct tegra_devfreq_device *dev)
228 {
229 	u32 avg_band_freq = tegra->max_freq * ACTMON_DEFAULT_AVG_BAND / KHZ;
230 	u32 band = avg_band_freq * tegra->devfreq->profile->polling_ms;
231 	u32 avg;
232 
233 	avg = min(dev->avg_count, U32_MAX - band);
234 	device_writel(dev, avg + band, ACTMON_DEV_AVG_UPPER_WMARK);
235 
236 	avg = max(dev->avg_count, band);
237 	device_writel(dev, avg - band, ACTMON_DEV_AVG_LOWER_WMARK);
238 }
239 
240 static void tegra_devfreq_update_wmark(struct tegra_devfreq *tegra,
241 				       struct tegra_devfreq_device *dev)
242 {
243 	u32 val = tegra->cur_freq * tegra->devfreq->profile->polling_ms;
244 
245 	device_writel(dev, do_percent(val, dev->config->boost_up_threshold),
246 		      ACTMON_DEV_UPPER_WMARK);
247 
248 	device_writel(dev, do_percent(val, dev->config->boost_down_threshold),
249 		      ACTMON_DEV_LOWER_WMARK);
250 }
251 
252 static void actmon_isr_device(struct tegra_devfreq *tegra,
253 			      struct tegra_devfreq_device *dev)
254 {
255 	u32 intr_status, dev_ctrl;
256 
257 	dev->avg_count = device_readl(dev, ACTMON_DEV_AVG_COUNT);
258 	tegra_devfreq_update_avg_wmark(tegra, dev);
259 
260 	intr_status = device_readl(dev, ACTMON_DEV_INTR_STATUS);
261 	dev_ctrl = device_readl(dev, ACTMON_DEV_CTRL);
262 
263 	if (intr_status & ACTMON_DEV_INTR_CONSECUTIVE_UPPER) {
264 		/*
265 		 * new_boost = min(old_boost * up_coef + step, max_freq)
266 		 */
267 		dev->boost_freq = do_percent(dev->boost_freq,
268 					     dev->config->boost_up_coeff);
269 		dev->boost_freq += ACTMON_BOOST_FREQ_STEP;
270 
271 		dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
272 
273 		if (dev->boost_freq >= tegra->max_freq) {
274 			dev_ctrl &= ~ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
275 			dev->boost_freq = tegra->max_freq;
276 		}
277 	} else if (intr_status & ACTMON_DEV_INTR_CONSECUTIVE_LOWER) {
278 		/*
279 		 * new_boost = old_boost * down_coef
280 		 * or 0 if (old_boost * down_coef < step / 2)
281 		 */
282 		dev->boost_freq = do_percent(dev->boost_freq,
283 					     dev->config->boost_down_coeff);
284 
285 		dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
286 
287 		if (dev->boost_freq < (ACTMON_BOOST_FREQ_STEP >> 1)) {
288 			dev_ctrl &= ~ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
289 			dev->boost_freq = 0;
290 		}
291 	}
292 
293 	device_writel(dev, dev_ctrl, ACTMON_DEV_CTRL);
294 
295 	device_writel(dev, ACTMON_INTR_STATUS_CLEAR, ACTMON_DEV_INTR_STATUS);
296 }
297 
298 static unsigned long actmon_cpu_to_emc_rate(struct tegra_devfreq *tegra,
299 					    unsigned long cpu_freq)
300 {
301 	unsigned int i;
302 	const struct tegra_actmon_emc_ratio *ratio = actmon_emc_ratios;
303 
304 	for (i = 0; i < ARRAY_SIZE(actmon_emc_ratios); i++, ratio++) {
305 		if (cpu_freq >= ratio->cpu_freq) {
306 			if (ratio->emc_freq >= tegra->max_freq)
307 				return tegra->max_freq;
308 			else
309 				return ratio->emc_freq;
310 		}
311 	}
312 
313 	return 0;
314 }
315 
316 static unsigned long actmon_device_target_freq(struct tegra_devfreq *tegra,
317 					       struct tegra_devfreq_device *dev)
318 {
319 	unsigned int avg_sustain_coef;
320 	unsigned long target_freq;
321 
322 	target_freq = dev->avg_count / tegra->devfreq->profile->polling_ms;
323 	avg_sustain_coef = 100 * 100 / dev->config->boost_up_threshold;
324 	target_freq = do_percent(target_freq, avg_sustain_coef);
325 
326 	return target_freq;
327 }
328 
329 static void actmon_update_target(struct tegra_devfreq *tegra,
330 				 struct tegra_devfreq_device *dev)
331 {
332 	unsigned long cpu_freq = 0;
333 	unsigned long static_cpu_emc_freq = 0;
334 
335 	dev->target_freq = actmon_device_target_freq(tegra, dev);
336 
337 	if (dev->config->avg_dependency_threshold &&
338 	    dev->config->avg_dependency_threshold <= dev->target_freq) {
339 		cpu_freq = cpufreq_quick_get(0);
340 		static_cpu_emc_freq = actmon_cpu_to_emc_rate(tegra, cpu_freq);
341 
342 		dev->target_freq += dev->boost_freq;
343 		dev->target_freq = max(dev->target_freq, static_cpu_emc_freq);
344 	} else {
345 		dev->target_freq += dev->boost_freq;
346 	}
347 }
348 
349 static irqreturn_t actmon_thread_isr(int irq, void *data)
350 {
351 	struct tegra_devfreq *tegra = data;
352 	bool handled = false;
353 	unsigned int i;
354 	u32 val;
355 
356 	mutex_lock(&tegra->devfreq->lock);
357 
358 	val = actmon_readl(tegra, ACTMON_GLB_STATUS);
359 	for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
360 		if (val & tegra->devices[i].config->irq_mask) {
361 			actmon_isr_device(tegra, tegra->devices + i);
362 			handled = true;
363 		}
364 	}
365 
366 	if (handled)
367 		update_devfreq(tegra->devfreq);
368 
369 	mutex_unlock(&tegra->devfreq->lock);
370 
371 	return handled ? IRQ_HANDLED : IRQ_NONE;
372 }
373 
374 static int tegra_actmon_clk_notify_cb(struct notifier_block *nb,
375 				      unsigned long action, void *ptr)
376 {
377 	struct clk_notifier_data *data = ptr;
378 	struct tegra_devfreq *tegra;
379 	struct tegra_devfreq_device *dev;
380 	unsigned int i;
381 
382 	if (action != POST_RATE_CHANGE)
383 		return NOTIFY_OK;
384 
385 	tegra = container_of(nb, struct tegra_devfreq, clk_rate_change_nb);
386 
387 	tegra->cur_freq = data->new_rate / KHZ;
388 
389 	for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
390 		dev = &tegra->devices[i];
391 
392 		tegra_devfreq_update_wmark(tegra, dev);
393 	}
394 
395 	return NOTIFY_OK;
396 }
397 
398 static void tegra_actmon_delayed_update(struct work_struct *work)
399 {
400 	struct tegra_devfreq *tegra = container_of(work, struct tegra_devfreq,
401 						   cpufreq_update_work.work);
402 
403 	mutex_lock(&tegra->devfreq->lock);
404 	update_devfreq(tegra->devfreq);
405 	mutex_unlock(&tegra->devfreq->lock);
406 }
407 
408 static unsigned long
409 tegra_actmon_cpufreq_contribution(struct tegra_devfreq *tegra,
410 				  unsigned int cpu_freq)
411 {
412 	struct tegra_devfreq_device *actmon_dev = &tegra->devices[MCCPU];
413 	unsigned long static_cpu_emc_freq, dev_freq;
414 
415 	dev_freq = actmon_device_target_freq(tegra, actmon_dev);
416 
417 	/* check whether CPU's freq is taken into account at all */
418 	if (dev_freq < actmon_dev->config->avg_dependency_threshold)
419 		return 0;
420 
421 	static_cpu_emc_freq = actmon_cpu_to_emc_rate(tegra, cpu_freq);
422 
423 	if (dev_freq >= static_cpu_emc_freq)
424 		return 0;
425 
426 	return static_cpu_emc_freq;
427 }
428 
429 static int tegra_actmon_cpu_notify_cb(struct notifier_block *nb,
430 				      unsigned long action, void *ptr)
431 {
432 	struct cpufreq_freqs *freqs = ptr;
433 	struct tegra_devfreq *tegra;
434 	unsigned long old, new, delay;
435 
436 	if (action != CPUFREQ_POSTCHANGE)
437 		return NOTIFY_OK;
438 
439 	tegra = container_of(nb, struct tegra_devfreq, cpu_rate_change_nb);
440 
441 	/*
442 	 * Quickly check whether CPU frequency should be taken into account
443 	 * at all, without blocking CPUFreq's core.
444 	 */
445 	if (mutex_trylock(&tegra->devfreq->lock)) {
446 		old = tegra_actmon_cpufreq_contribution(tegra, freqs->old);
447 		new = tegra_actmon_cpufreq_contribution(tegra, freqs->new);
448 		mutex_unlock(&tegra->devfreq->lock);
449 
450 		/*
451 		 * If CPU's frequency shouldn't be taken into account at
452 		 * the moment, then there is no need to update the devfreq's
453 		 * state because ISR will re-check CPU's frequency on the
454 		 * next interrupt.
455 		 */
456 		if (old == new)
457 			return NOTIFY_OK;
458 	}
459 
460 	/*
461 	 * CPUFreq driver should support CPUFREQ_ASYNC_NOTIFICATION in order
462 	 * to allow asynchronous notifications. This means we can't block
463 	 * here for too long, otherwise CPUFreq's core will complain with a
464 	 * warning splat.
465 	 */
466 	delay = msecs_to_jiffies(ACTMON_SAMPLING_PERIOD);
467 	schedule_delayed_work(&tegra->cpufreq_update_work, delay);
468 
469 	return NOTIFY_OK;
470 }
471 
472 static void tegra_actmon_configure_device(struct tegra_devfreq *tegra,
473 					  struct tegra_devfreq_device *dev)
474 {
475 	u32 val = 0;
476 
477 	/* reset boosting on governor's restart */
478 	dev->boost_freq = 0;
479 
480 	dev->target_freq = tegra->cur_freq;
481 
482 	dev->avg_count = tegra->cur_freq * tegra->devfreq->profile->polling_ms;
483 	device_writel(dev, dev->avg_count, ACTMON_DEV_INIT_AVG);
484 
485 	tegra_devfreq_update_avg_wmark(tegra, dev);
486 	tegra_devfreq_update_wmark(tegra, dev);
487 
488 	device_writel(dev, ACTMON_COUNT_WEIGHT, ACTMON_DEV_COUNT_WEIGHT);
489 	device_writel(dev, ACTMON_INTR_STATUS_CLEAR, ACTMON_DEV_INTR_STATUS);
490 
491 	val |= ACTMON_DEV_CTRL_ENB_PERIODIC;
492 	val |= (ACTMON_AVERAGE_WINDOW_LOG2 - 1)
493 		<< ACTMON_DEV_CTRL_K_VAL_SHIFT;
494 	val |= (ACTMON_BELOW_WMARK_WINDOW - 1)
495 		<< ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_NUM_SHIFT;
496 	val |= (ACTMON_ABOVE_WMARK_WINDOW - 1)
497 		<< ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_NUM_SHIFT;
498 	val |= ACTMON_DEV_CTRL_AVG_ABOVE_WMARK_EN;
499 	val |= ACTMON_DEV_CTRL_AVG_BELOW_WMARK_EN;
500 	val |= ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
501 	val |= ACTMON_DEV_CTRL_ENB;
502 
503 	device_writel(dev, val, ACTMON_DEV_CTRL);
504 }
505 
506 static void tegra_actmon_stop_devices(struct tegra_devfreq *tegra)
507 {
508 	struct tegra_devfreq_device *dev = tegra->devices;
509 	unsigned int i;
510 
511 	for (i = 0; i < ARRAY_SIZE(tegra->devices); i++, dev++) {
512 		device_writel(dev, ACTMON_DEV_CTRL_STOP, ACTMON_DEV_CTRL);
513 		device_writel(dev, ACTMON_INTR_STATUS_CLEAR,
514 			      ACTMON_DEV_INTR_STATUS);
515 	}
516 }
517 
518 static int tegra_actmon_resume(struct tegra_devfreq *tegra)
519 {
520 	unsigned int i;
521 	int err;
522 
523 	if (!tegra->devfreq->profile->polling_ms || !tegra->started)
524 		return 0;
525 
526 	actmon_writel(tegra, tegra->devfreq->profile->polling_ms - 1,
527 		      ACTMON_GLB_PERIOD_CTRL);
528 
529 	/*
530 	 * CLK notifications are needed in order to reconfigure the upper
531 	 * consecutive watermark in accordance to the actual clock rate
532 	 * to avoid unnecessary upper interrupts.
533 	 */
534 	err = clk_notifier_register(tegra->emc_clock,
535 				    &tegra->clk_rate_change_nb);
536 	if (err) {
537 		dev_err(tegra->devfreq->dev.parent,
538 			"Failed to register rate change notifier\n");
539 		return err;
540 	}
541 
542 	tegra->cur_freq = clk_get_rate(tegra->emc_clock) / KHZ;
543 
544 	for (i = 0; i < ARRAY_SIZE(tegra->devices); i++)
545 		tegra_actmon_configure_device(tegra, &tegra->devices[i]);
546 
547 	/*
548 	 * We are estimating CPU's memory bandwidth requirement based on
549 	 * amount of memory accesses and system's load, judging by CPU's
550 	 * frequency. We also don't want to receive events about CPU's
551 	 * frequency transaction when governor is stopped, hence notifier
552 	 * is registered dynamically.
553 	 */
554 	err = cpufreq_register_notifier(&tegra->cpu_rate_change_nb,
555 					CPUFREQ_TRANSITION_NOTIFIER);
556 	if (err) {
557 		dev_err(tegra->devfreq->dev.parent,
558 			"Failed to register rate change notifier: %d\n", err);
559 		goto err_stop;
560 	}
561 
562 	enable_irq(tegra->irq);
563 
564 	return 0;
565 
566 err_stop:
567 	tegra_actmon_stop_devices(tegra);
568 
569 	clk_notifier_unregister(tegra->emc_clock, &tegra->clk_rate_change_nb);
570 
571 	return err;
572 }
573 
574 static int tegra_actmon_start(struct tegra_devfreq *tegra)
575 {
576 	int ret = 0;
577 
578 	if (!tegra->started) {
579 		tegra->started = true;
580 
581 		ret = tegra_actmon_resume(tegra);
582 		if (ret)
583 			tegra->started = false;
584 	}
585 
586 	return ret;
587 }
588 
589 static void tegra_actmon_pause(struct tegra_devfreq *tegra)
590 {
591 	if (!tegra->devfreq->profile->polling_ms || !tegra->started)
592 		return;
593 
594 	disable_irq(tegra->irq);
595 
596 	cpufreq_unregister_notifier(&tegra->cpu_rate_change_nb,
597 				    CPUFREQ_TRANSITION_NOTIFIER);
598 
599 	cancel_delayed_work_sync(&tegra->cpufreq_update_work);
600 
601 	tegra_actmon_stop_devices(tegra);
602 
603 	clk_notifier_unregister(tegra->emc_clock, &tegra->clk_rate_change_nb);
604 }
605 
606 static void tegra_actmon_stop(struct tegra_devfreq *tegra)
607 {
608 	tegra_actmon_pause(tegra);
609 	tegra->started = false;
610 }
611 
612 static int tegra_devfreq_target(struct device *dev, unsigned long *freq,
613 				u32 flags)
614 {
615 	struct tegra_devfreq *tegra = dev_get_drvdata(dev);
616 	struct devfreq *devfreq = tegra->devfreq;
617 	struct dev_pm_opp *opp;
618 	unsigned long rate;
619 	int err;
620 
621 	opp = devfreq_recommended_opp(dev, freq, flags);
622 	if (IS_ERR(opp)) {
623 		dev_err(dev, "Failed to find opp for %lu Hz\n", *freq);
624 		return PTR_ERR(opp);
625 	}
626 	rate = dev_pm_opp_get_freq(opp);
627 	dev_pm_opp_put(opp);
628 
629 	err = clk_set_min_rate(tegra->emc_clock, rate * KHZ);
630 	if (err)
631 		return err;
632 
633 	err = clk_set_rate(tegra->emc_clock, 0);
634 	if (err)
635 		goto restore_min_rate;
636 
637 	return 0;
638 
639 restore_min_rate:
640 	clk_set_min_rate(tegra->emc_clock, devfreq->previous_freq);
641 
642 	return err;
643 }
644 
645 static int tegra_devfreq_get_dev_status(struct device *dev,
646 					struct devfreq_dev_status *stat)
647 {
648 	struct tegra_devfreq *tegra = dev_get_drvdata(dev);
649 	struct tegra_devfreq_device *actmon_dev;
650 	unsigned long cur_freq;
651 
652 	cur_freq = READ_ONCE(tegra->cur_freq);
653 
654 	/* To be used by the tegra governor */
655 	stat->private_data = tegra;
656 
657 	/* The below are to be used by the other governors */
658 	stat->current_frequency = cur_freq;
659 
660 	actmon_dev = &tegra->devices[MCALL];
661 
662 	/* Number of cycles spent on memory access */
663 	stat->busy_time = device_readl(actmon_dev, ACTMON_DEV_AVG_COUNT);
664 
665 	/* The bus can be considered to be saturated way before 100% */
666 	stat->busy_time *= 100 / BUS_SATURATION_RATIO;
667 
668 	/* Number of cycles in a sampling period */
669 	stat->total_time = tegra->devfreq->profile->polling_ms * cur_freq;
670 
671 	stat->busy_time = min(stat->busy_time, stat->total_time);
672 
673 	return 0;
674 }
675 
676 static struct devfreq_dev_profile tegra_devfreq_profile = {
677 	.polling_ms	= ACTMON_SAMPLING_PERIOD,
678 	.target		= tegra_devfreq_target,
679 	.get_dev_status	= tegra_devfreq_get_dev_status,
680 };
681 
682 static int tegra_governor_get_target(struct devfreq *devfreq,
683 				     unsigned long *freq)
684 {
685 	struct devfreq_dev_status *stat;
686 	struct tegra_devfreq *tegra;
687 	struct tegra_devfreq_device *dev;
688 	unsigned long target_freq = 0;
689 	unsigned int i;
690 	int err;
691 
692 	err = devfreq_update_stats(devfreq);
693 	if (err)
694 		return err;
695 
696 	stat = &devfreq->last_status;
697 
698 	tegra = stat->private_data;
699 
700 	for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
701 		dev = &tegra->devices[i];
702 
703 		actmon_update_target(tegra, dev);
704 
705 		target_freq = max(target_freq, dev->target_freq);
706 	}
707 
708 	*freq = target_freq;
709 
710 	return 0;
711 }
712 
713 static int tegra_governor_event_handler(struct devfreq *devfreq,
714 					unsigned int event, void *data)
715 {
716 	struct tegra_devfreq *tegra = dev_get_drvdata(devfreq->dev.parent);
717 	unsigned int *new_delay = data;
718 	int ret = 0;
719 
720 	/*
721 	 * Couple devfreq-device with the governor early because it is
722 	 * needed at the moment of governor's start (used by ISR).
723 	 */
724 	tegra->devfreq = devfreq;
725 
726 	switch (event) {
727 	case DEVFREQ_GOV_START:
728 		devfreq_monitor_start(devfreq);
729 		ret = tegra_actmon_start(tegra);
730 		break;
731 
732 	case DEVFREQ_GOV_STOP:
733 		tegra_actmon_stop(tegra);
734 		devfreq_monitor_stop(devfreq);
735 		break;
736 
737 	case DEVFREQ_GOV_INTERVAL:
738 		/*
739 		 * ACTMON hardware supports up to 256 milliseconds for the
740 		 * sampling period.
741 		 */
742 		if (*new_delay > 256) {
743 			ret = -EINVAL;
744 			break;
745 		}
746 
747 		tegra_actmon_pause(tegra);
748 		devfreq_interval_update(devfreq, new_delay);
749 		ret = tegra_actmon_resume(tegra);
750 		break;
751 
752 	case DEVFREQ_GOV_SUSPEND:
753 		tegra_actmon_stop(tegra);
754 		devfreq_monitor_suspend(devfreq);
755 		break;
756 
757 	case DEVFREQ_GOV_RESUME:
758 		devfreq_monitor_resume(devfreq);
759 		ret = tegra_actmon_start(tegra);
760 		break;
761 	}
762 
763 	return ret;
764 }
765 
766 static struct devfreq_governor tegra_devfreq_governor = {
767 	.name = "tegra_actmon",
768 	.get_target_freq = tegra_governor_get_target,
769 	.event_handler = tegra_governor_event_handler,
770 	.immutable = true,
771 	.interrupt_driven = true,
772 };
773 
774 static int tegra_devfreq_probe(struct platform_device *pdev)
775 {
776 	struct tegra_devfreq_device *dev;
777 	struct tegra_devfreq *tegra;
778 	struct devfreq *devfreq;
779 	unsigned int i;
780 	long rate;
781 	int err;
782 
783 	tegra = devm_kzalloc(&pdev->dev, sizeof(*tegra), GFP_KERNEL);
784 	if (!tegra)
785 		return -ENOMEM;
786 
787 	tegra->regs = devm_platform_ioremap_resource(pdev, 0);
788 	if (IS_ERR(tegra->regs))
789 		return PTR_ERR(tegra->regs);
790 
791 	tegra->reset = devm_reset_control_get(&pdev->dev, "actmon");
792 	if (IS_ERR(tegra->reset)) {
793 		dev_err(&pdev->dev, "Failed to get reset\n");
794 		return PTR_ERR(tegra->reset);
795 	}
796 
797 	tegra->clock = devm_clk_get(&pdev->dev, "actmon");
798 	if (IS_ERR(tegra->clock)) {
799 		dev_err(&pdev->dev, "Failed to get actmon clock\n");
800 		return PTR_ERR(tegra->clock);
801 	}
802 
803 	tegra->emc_clock = devm_clk_get(&pdev->dev, "emc");
804 	if (IS_ERR(tegra->emc_clock)) {
805 		dev_err(&pdev->dev, "Failed to get emc clock\n");
806 		return PTR_ERR(tegra->emc_clock);
807 	}
808 
809 	err = platform_get_irq(pdev, 0);
810 	if (err < 0) {
811 		dev_err(&pdev->dev, "Failed to get IRQ: %d\n", err);
812 		return err;
813 	}
814 	tegra->irq = err;
815 
816 	irq_set_status_flags(tegra->irq, IRQ_NOAUTOEN);
817 
818 	err = devm_request_threaded_irq(&pdev->dev, tegra->irq, NULL,
819 					actmon_thread_isr, IRQF_ONESHOT,
820 					"tegra-devfreq", tegra);
821 	if (err) {
822 		dev_err(&pdev->dev, "Interrupt request failed: %d\n", err);
823 		return err;
824 	}
825 
826 	reset_control_assert(tegra->reset);
827 
828 	err = clk_prepare_enable(tegra->clock);
829 	if (err) {
830 		dev_err(&pdev->dev,
831 			"Failed to prepare and enable ACTMON clock\n");
832 		return err;
833 	}
834 
835 	reset_control_deassert(tegra->reset);
836 
837 	rate = clk_round_rate(tegra->emc_clock, ULONG_MAX);
838 	if (rate < 0) {
839 		dev_err(&pdev->dev, "Failed to round clock rate: %ld\n", rate);
840 		return rate;
841 	}
842 
843 	tegra->max_freq = rate / KHZ;
844 
845 	for (i = 0; i < ARRAY_SIZE(actmon_device_configs); i++) {
846 		dev = tegra->devices + i;
847 		dev->config = actmon_device_configs + i;
848 		dev->regs = tegra->regs + dev->config->offset;
849 	}
850 
851 	for (rate = 0; rate <= tegra->max_freq * KHZ; rate++) {
852 		rate = clk_round_rate(tegra->emc_clock, rate);
853 
854 		if (rate < 0) {
855 			dev_err(&pdev->dev,
856 				"Failed to round clock rate: %ld\n", rate);
857 			err = rate;
858 			goto remove_opps;
859 		}
860 
861 		err = dev_pm_opp_add(&pdev->dev, rate / KHZ, 0);
862 		if (err) {
863 			dev_err(&pdev->dev, "Failed to add OPP: %d\n", err);
864 			goto remove_opps;
865 		}
866 	}
867 
868 	platform_set_drvdata(pdev, tegra);
869 
870 	tegra->clk_rate_change_nb.notifier_call = tegra_actmon_clk_notify_cb;
871 	tegra->cpu_rate_change_nb.notifier_call = tegra_actmon_cpu_notify_cb;
872 
873 	INIT_DELAYED_WORK(&tegra->cpufreq_update_work,
874 			  tegra_actmon_delayed_update);
875 
876 	err = devfreq_add_governor(&tegra_devfreq_governor);
877 	if (err) {
878 		dev_err(&pdev->dev, "Failed to add governor: %d\n", err);
879 		goto remove_opps;
880 	}
881 
882 	tegra_devfreq_profile.initial_freq = clk_get_rate(tegra->emc_clock);
883 	tegra_devfreq_profile.initial_freq /= KHZ;
884 
885 	devfreq = devfreq_add_device(&pdev->dev, &tegra_devfreq_profile,
886 				     "tegra_actmon", NULL);
887 	if (IS_ERR(devfreq)) {
888 		err = PTR_ERR(devfreq);
889 		goto remove_governor;
890 	}
891 
892 	return 0;
893 
894 remove_governor:
895 	devfreq_remove_governor(&tegra_devfreq_governor);
896 
897 remove_opps:
898 	dev_pm_opp_remove_all_dynamic(&pdev->dev);
899 
900 	reset_control_reset(tegra->reset);
901 	clk_disable_unprepare(tegra->clock);
902 
903 	return err;
904 }
905 
906 static int tegra_devfreq_remove(struct platform_device *pdev)
907 {
908 	struct tegra_devfreq *tegra = platform_get_drvdata(pdev);
909 
910 	devfreq_remove_device(tegra->devfreq);
911 	devfreq_remove_governor(&tegra_devfreq_governor);
912 
913 	dev_pm_opp_remove_all_dynamic(&pdev->dev);
914 
915 	reset_control_reset(tegra->reset);
916 	clk_disable_unprepare(tegra->clock);
917 
918 	return 0;
919 }
920 
921 static const struct of_device_id tegra_devfreq_of_match[] = {
922 	{ .compatible = "nvidia,tegra30-actmon" },
923 	{ .compatible = "nvidia,tegra124-actmon" },
924 	{ },
925 };
926 
927 MODULE_DEVICE_TABLE(of, tegra_devfreq_of_match);
928 
929 static struct platform_driver tegra_devfreq_driver = {
930 	.probe	= tegra_devfreq_probe,
931 	.remove	= tegra_devfreq_remove,
932 	.driver = {
933 		.name = "tegra-devfreq",
934 		.of_match_table = tegra_devfreq_of_match,
935 	},
936 };
937 module_platform_driver(tegra_devfreq_driver);
938 
939 MODULE_LICENSE("GPL v2");
940 MODULE_DESCRIPTION("Tegra devfreq driver");
941 MODULE_AUTHOR("Tomeu Vizoso <tomeu.vizoso@collabora.com>");
942