1 // SPDX-License-Identifier: GPL-2.0+
2 //
3 // Copyright 2016 Freescale Semiconductor, Inc.
4 // Copyright 2017 NXP
5 
6 #include <linux/clk.h>
7 #include <linux/clockchips.h>
8 #include <linux/clocksource.h>
9 #include <linux/delay.h>
10 #include <linux/interrupt.h>
11 #include <linux/of_address.h>
12 #include <linux/of_irq.h>
13 #include <linux/sched_clock.h>
14 
15 #define TPM_PARAM			0x4
16 #define TPM_PARAM_WIDTH_SHIFT		16
17 #define TPM_PARAM_WIDTH_MASK		(0xff << 16)
18 #define TPM_SC				0x10
19 #define TPM_SC_CMOD_INC_PER_CNT		(0x1 << 3)
20 #define TPM_SC_CMOD_DIV_DEFAULT		0x3
21 #define TPM_SC_CMOD_DIV_MAX		0x7
22 #define TPM_SC_TOF_MASK			(0x1 << 7)
23 #define TPM_CNT				0x14
24 #define TPM_MOD				0x18
25 #define TPM_STATUS			0x1c
26 #define TPM_STATUS_CH0F			BIT(0)
27 #define TPM_C0SC			0x20
28 #define TPM_C0SC_CHIE			BIT(6)
29 #define TPM_C0SC_MODE_SHIFT		2
30 #define TPM_C0SC_MODE_MASK		0x3c
31 #define TPM_C0SC_MODE_SW_COMPARE	0x4
32 #define TPM_C0SC_CHF_MASK		(0x1 << 7)
33 #define TPM_C0V				0x24
34 
35 static int counter_width;
36 static int rating;
37 static void __iomem *timer_base;
38 static struct clock_event_device clockevent_tpm;
39 
40 static inline void tpm_timer_disable(void)
41 {
42 	unsigned int val;
43 
44 	/* channel disable */
45 	val = readl(timer_base + TPM_C0SC);
46 	val &= ~(TPM_C0SC_MODE_MASK | TPM_C0SC_CHIE);
47 	writel(val, timer_base + TPM_C0SC);
48 }
49 
50 static inline void tpm_timer_enable(void)
51 {
52 	unsigned int val;
53 
54 	/* channel enabled in sw compare mode */
55 	val = readl(timer_base + TPM_C0SC);
56 	val |= (TPM_C0SC_MODE_SW_COMPARE << TPM_C0SC_MODE_SHIFT) |
57 	       TPM_C0SC_CHIE;
58 	writel(val, timer_base + TPM_C0SC);
59 }
60 
61 static inline void tpm_irq_acknowledge(void)
62 {
63 	writel(TPM_STATUS_CH0F, timer_base + TPM_STATUS);
64 }
65 
66 static struct delay_timer tpm_delay_timer;
67 
68 static inline unsigned long tpm_read_counter(void)
69 {
70 	return readl(timer_base + TPM_CNT);
71 }
72 
73 static unsigned long tpm_read_current_timer(void)
74 {
75 	return tpm_read_counter();
76 }
77 
78 static u64 notrace tpm_read_sched_clock(void)
79 {
80 	return tpm_read_counter();
81 }
82 
83 static int __init tpm_clocksource_init(unsigned long rate)
84 {
85 	tpm_delay_timer.read_current_timer = &tpm_read_current_timer;
86 	tpm_delay_timer.freq = rate;
87 	register_current_timer_delay(&tpm_delay_timer);
88 
89 	sched_clock_register(tpm_read_sched_clock, counter_width, rate);
90 
91 	return clocksource_mmio_init(timer_base + TPM_CNT, "imx-tpm",
92 				     rate, rating, counter_width,
93 				     clocksource_mmio_readl_up);
94 }
95 
96 static int tpm_set_next_event(unsigned long delta,
97 				struct clock_event_device *evt)
98 {
99 	unsigned long next, now;
100 
101 	next = tpm_read_counter();
102 	next += delta;
103 	writel(next, timer_base + TPM_C0V);
104 	now = tpm_read_counter();
105 
106 	/*
107 	 * NOTE: We observed in a very small probability, the bus fabric
108 	 * contention between GPU and A7 may results a few cycles delay
109 	 * of writing CNT registers which may cause the min_delta event got
110 	 * missed, so we need add a ETIME check here in case it happened.
111 	 */
112 	return (int)(next - now) <= 0 ? -ETIME : 0;
113 }
114 
115 static int tpm_set_state_oneshot(struct clock_event_device *evt)
116 {
117 	tpm_timer_enable();
118 
119 	return 0;
120 }
121 
122 static int tpm_set_state_shutdown(struct clock_event_device *evt)
123 {
124 	tpm_timer_disable();
125 
126 	return 0;
127 }
128 
129 static irqreturn_t tpm_timer_interrupt(int irq, void *dev_id)
130 {
131 	struct clock_event_device *evt = dev_id;
132 
133 	tpm_irq_acknowledge();
134 
135 	evt->event_handler(evt);
136 
137 	return IRQ_HANDLED;
138 }
139 
140 static struct clock_event_device clockevent_tpm = {
141 	.name			= "i.MX7ULP TPM Timer",
142 	.features		= CLOCK_EVT_FEAT_ONESHOT,
143 	.set_state_oneshot	= tpm_set_state_oneshot,
144 	.set_next_event		= tpm_set_next_event,
145 	.set_state_shutdown	= tpm_set_state_shutdown,
146 };
147 
148 static int __init tpm_clockevent_init(unsigned long rate, int irq)
149 {
150 	int ret;
151 
152 	ret = request_irq(irq, tpm_timer_interrupt, IRQF_TIMER | IRQF_IRQPOLL,
153 			  "i.MX7ULP TPM Timer", &clockevent_tpm);
154 
155 	clockevent_tpm.rating = rating;
156 	clockevent_tpm.cpumask = cpumask_of(0);
157 	clockevent_tpm.irq = irq;
158 	clockevents_config_and_register(&clockevent_tpm, rate, 300,
159 					GENMASK(counter_width - 1, 1));
160 
161 	return ret;
162 }
163 
164 static int __init tpm_timer_init(struct device_node *np)
165 {
166 	struct clk *ipg, *per;
167 	int irq, ret;
168 	u32 rate;
169 
170 	timer_base = of_iomap(np, 0);
171 	if (!timer_base) {
172 		pr_err("tpm: failed to get base address\n");
173 		return -ENXIO;
174 	}
175 
176 	irq = irq_of_parse_and_map(np, 0);
177 	if (!irq) {
178 		pr_err("tpm: failed to get irq\n");
179 		ret = -ENOENT;
180 		goto err_iomap;
181 	}
182 
183 	ipg = of_clk_get_by_name(np, "ipg");
184 	per = of_clk_get_by_name(np, "per");
185 	if (IS_ERR(ipg) || IS_ERR(per)) {
186 		pr_err("tpm: failed to get ipg or per clk\n");
187 		ret = -ENODEV;
188 		goto err_clk_get;
189 	}
190 
191 	/* enable clk before accessing registers */
192 	ret = clk_prepare_enable(ipg);
193 	if (ret) {
194 		pr_err("tpm: ipg clock enable failed (%d)\n", ret);
195 		goto err_clk_get;
196 	}
197 
198 	ret = clk_prepare_enable(per);
199 	if (ret) {
200 		pr_err("tpm: per clock enable failed (%d)\n", ret);
201 		goto err_per_clk_enable;
202 	}
203 
204 	counter_width = (readl(timer_base + TPM_PARAM) & TPM_PARAM_WIDTH_MASK)
205 		>> TPM_PARAM_WIDTH_SHIFT;
206 	/* use rating 200 for 32-bit counter and 150 for 16-bit counter */
207 	rating = counter_width == 0x20 ? 200 : 150;
208 
209 	/*
210 	 * Initialize tpm module to a known state
211 	 * 1) Counter disabled
212 	 * 2) TPM counter operates in up counting mode
213 	 * 3) Timer Overflow Interrupt disabled
214 	 * 4) Channel0 disabled
215 	 * 5) DMA transfers disabled
216 	 */
217 	/* make sure counter is disabled */
218 	writel(0, timer_base + TPM_SC);
219 	/* TOF is W1C */
220 	writel(TPM_SC_TOF_MASK, timer_base + TPM_SC);
221 	writel(0, timer_base + TPM_CNT);
222 	/* CHF is W1C */
223 	writel(TPM_C0SC_CHF_MASK, timer_base + TPM_C0SC);
224 
225 	/*
226 	 * increase per cnt,
227 	 * div 8 for 32-bit counter and div 128 for 16-bit counter
228 	 */
229 	writel(TPM_SC_CMOD_INC_PER_CNT |
230 		(counter_width == 0x20 ?
231 		TPM_SC_CMOD_DIV_DEFAULT : TPM_SC_CMOD_DIV_MAX),
232 		     timer_base + TPM_SC);
233 
234 	/* set MOD register to maximum for free running mode */
235 	writel(GENMASK(counter_width - 1, 0), timer_base + TPM_MOD);
236 
237 	rate = clk_get_rate(per) >> 3;
238 	ret = tpm_clocksource_init(rate);
239 	if (ret)
240 		goto err_per_clk_enable;
241 
242 	ret = tpm_clockevent_init(rate, irq);
243 	if (ret)
244 		goto err_per_clk_enable;
245 
246 	return 0;
247 
248 err_per_clk_enable:
249 	clk_disable_unprepare(ipg);
250 err_clk_get:
251 	clk_put(per);
252 	clk_put(ipg);
253 err_iomap:
254 	iounmap(timer_base);
255 	return ret;
256 }
257 TIMER_OF_DECLARE(imx7ulp, "fsl,imx7ulp-tpm", tpm_timer_init);
258