xref: /openbmc/linux/drivers/net/ethernet/ti/icssg/icss_iep.c (revision 9144f784f852f9a125cabe9927b986d909bfa439)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /* Texas Instruments ICSSG Industrial Ethernet Peripheral (IEP) Driver
4  *
5  * Copyright (C) 2023 Texas Instruments Incorporated - https://www.ti.com
6  *
7  */
8 
9 #include <linux/bitops.h>
10 #include <linux/clk.h>
11 #include <linux/err.h>
12 #include <linux/io.h>
13 #include <linux/module.h>
14 #include <linux/of.h>
15 #include <linux/of_platform.h>
16 #include <linux/platform_device.h>
17 #include <linux/timekeeping.h>
18 #include <linux/interrupt.h>
19 #include <linux/of_irq.h>
20 
21 #include "icss_iep.h"
22 
23 #define IEP_MAX_DEF_INC		0xf
24 #define IEP_MAX_COMPEN_INC		0xfff
25 #define IEP_MAX_COMPEN_COUNT	0xffffff
26 
27 #define IEP_GLOBAL_CFG_CNT_ENABLE	BIT(0)
28 #define IEP_GLOBAL_CFG_DEFAULT_INC_MASK		GENMASK(7, 4)
29 #define IEP_GLOBAL_CFG_DEFAULT_INC_SHIFT	4
30 #define IEP_GLOBAL_CFG_COMPEN_INC_MASK		GENMASK(19, 8)
31 #define IEP_GLOBAL_CFG_COMPEN_INC_SHIFT		8
32 
33 #define IEP_GLOBAL_STATUS_CNT_OVF	BIT(0)
34 
35 #define IEP_CMP_CFG_SHADOW_EN		BIT(17)
36 #define IEP_CMP_CFG_CMP0_RST_CNT_EN	BIT(0)
37 #define IEP_CMP_CFG_CMP_EN(cmp)		(GENMASK(16, 1) & (1 << ((cmp) + 1)))
38 
39 #define IEP_CMP_STATUS(cmp)		(1 << (cmp))
40 
41 #define IEP_SYNC_CTRL_SYNC_EN		BIT(0)
42 #define IEP_SYNC_CTRL_SYNC_N_EN(n)	(GENMASK(2, 1) & (BIT(1) << (n)))
43 
44 #define IEP_MIN_CMP	0
45 #define IEP_MAX_CMP	15
46 
47 #define ICSS_IEP_64BIT_COUNTER_SUPPORT		BIT(0)
48 #define ICSS_IEP_SLOW_COMPEN_REG_SUPPORT	BIT(1)
49 #define ICSS_IEP_SHADOW_MODE_SUPPORT		BIT(2)
50 
51 #define LATCH_INDEX(ts_index)			((ts_index) + 6)
52 #define IEP_CAP_CFG_CAPNR_1ST_EVENT_EN(n)	BIT(LATCH_INDEX(n))
53 #define IEP_CAP_CFG_CAP_ASYNC_EN(n)		BIT(LATCH_INDEX(n) + 10)
54 
55 enum {
56 	ICSS_IEP_GLOBAL_CFG_REG,
57 	ICSS_IEP_GLOBAL_STATUS_REG,
58 	ICSS_IEP_COMPEN_REG,
59 	ICSS_IEP_SLOW_COMPEN_REG,
60 	ICSS_IEP_COUNT_REG0,
61 	ICSS_IEP_COUNT_REG1,
62 	ICSS_IEP_CAPTURE_CFG_REG,
63 	ICSS_IEP_CAPTURE_STAT_REG,
64 
65 	ICSS_IEP_CAP6_RISE_REG0,
66 	ICSS_IEP_CAP6_RISE_REG1,
67 
68 	ICSS_IEP_CAP7_RISE_REG0,
69 	ICSS_IEP_CAP7_RISE_REG1,
70 
71 	ICSS_IEP_CMP_CFG_REG,
72 	ICSS_IEP_CMP_STAT_REG,
73 	ICSS_IEP_CMP0_REG0,
74 	ICSS_IEP_CMP0_REG1,
75 	ICSS_IEP_CMP1_REG0,
76 	ICSS_IEP_CMP1_REG1,
77 
78 	ICSS_IEP_CMP8_REG0,
79 	ICSS_IEP_CMP8_REG1,
80 	ICSS_IEP_SYNC_CTRL_REG,
81 	ICSS_IEP_SYNC0_STAT_REG,
82 	ICSS_IEP_SYNC1_STAT_REG,
83 	ICSS_IEP_SYNC_PWIDTH_REG,
84 	ICSS_IEP_SYNC0_PERIOD_REG,
85 	ICSS_IEP_SYNC1_DELAY_REG,
86 	ICSS_IEP_SYNC_START_REG,
87 	ICSS_IEP_MAX_REGS,
88 };
89 
90 /**
91  * struct icss_iep_plat_data - Plat data to handle SoC variants
92  * @config: Regmap configuration data
93  * @reg_offs: register offsets to capture offset differences across SoCs
94  * @flags: Flags to represent IEP properties
95  */
96 struct icss_iep_plat_data {
97 	struct regmap_config *config;
98 	u32 reg_offs[ICSS_IEP_MAX_REGS];
99 	u32 flags;
100 };
101 
102 struct icss_iep {
103 	struct device *dev;
104 	void __iomem *base;
105 	const struct icss_iep_plat_data *plat_data;
106 	struct regmap *map;
107 	struct device_node *client_np;
108 	unsigned long refclk_freq;
109 	int clk_tick_time;	/* one refclk tick time in ns */
110 	struct ptp_clock_info ptp_info;
111 	struct ptp_clock *ptp_clock;
112 	struct mutex ptp_clk_mutex;	/* PHC access serializer */
113 	spinlock_t irq_lock; /* CMP IRQ vs icss_iep_ptp_enable access */
114 	u32 def_inc;
115 	s16 slow_cmp_inc;
116 	u32 slow_cmp_count;
117 	const struct icss_iep_clockops *ops;
118 	void *clockops_data;
119 	u32 cycle_time_ns;
120 	u32 perout_enabled;
121 	bool pps_enabled;
122 	int cap_cmp_irq;
123 	u64 period;
124 	u32 latch_enable;
125 };
126 
127 /**
128  * icss_iep_get_count_hi() - Get the upper 32 bit IEP counter
129  * @iep: Pointer to structure representing IEP.
130  *
131  * Return: upper 32 bit IEP counter
132  */
icss_iep_get_count_hi(struct icss_iep * iep)133 int icss_iep_get_count_hi(struct icss_iep *iep)
134 {
135 	u32 val = 0;
136 
137 	if (iep && (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT))
138 		val = readl(iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG1]);
139 
140 	return val;
141 }
142 EXPORT_SYMBOL_GPL(icss_iep_get_count_hi);
143 
144 /**
145  * icss_iep_get_count_low() - Get the lower 32 bit IEP counter
146  * @iep: Pointer to structure representing IEP.
147  *
148  * Return: lower 32 bit IEP counter
149  */
icss_iep_get_count_low(struct icss_iep * iep)150 int icss_iep_get_count_low(struct icss_iep *iep)
151 {
152 	u32 val = 0;
153 
154 	if (iep)
155 		val = readl(iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG0]);
156 
157 	return val;
158 }
159 EXPORT_SYMBOL_GPL(icss_iep_get_count_low);
160 
161 /**
162  * icss_iep_get_ptp_clock_idx() - Get PTP clock index using IEP driver
163  * @iep: Pointer to structure representing IEP.
164  *
165  * Return: PTP clock index, -1 if not registered
166  */
icss_iep_get_ptp_clock_idx(struct icss_iep * iep)167 int icss_iep_get_ptp_clock_idx(struct icss_iep *iep)
168 {
169 	if (!iep || !iep->ptp_clock)
170 		return -1;
171 	return ptp_clock_index(iep->ptp_clock);
172 }
173 EXPORT_SYMBOL_GPL(icss_iep_get_ptp_clock_idx);
174 
icss_iep_set_counter(struct icss_iep * iep,u64 ns)175 static void icss_iep_set_counter(struct icss_iep *iep, u64 ns)
176 {
177 	if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
178 		writel(upper_32_bits(ns), iep->base +
179 		       iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG1]);
180 	writel(lower_32_bits(ns), iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG0]);
181 }
182 
183 static void icss_iep_update_to_next_boundary(struct icss_iep *iep, u64 start_ns);
184 
185 /**
186  * icss_iep_settime() - Set time of the PTP clock using IEP driver
187  * @iep: Pointer to structure representing IEP.
188  * @ns: Time to be set in nanoseconds
189  *
190  * This API uses writel() instead of regmap_write() for write operations as
191  * regmap_write() is too slow and this API is time sensitive.
192  */
icss_iep_settime(struct icss_iep * iep,u64 ns)193 static void icss_iep_settime(struct icss_iep *iep, u64 ns)
194 {
195 	unsigned long flags;
196 
197 	if (iep->ops && iep->ops->settime) {
198 		iep->ops->settime(iep->clockops_data, ns);
199 		return;
200 	}
201 
202 	spin_lock_irqsave(&iep->irq_lock, flags);
203 	if (iep->pps_enabled || iep->perout_enabled)
204 		writel(0, iep->base + iep->plat_data->reg_offs[ICSS_IEP_SYNC_CTRL_REG]);
205 
206 	icss_iep_set_counter(iep, ns);
207 
208 	if (iep->pps_enabled || iep->perout_enabled) {
209 		icss_iep_update_to_next_boundary(iep, ns);
210 		writel(IEP_SYNC_CTRL_SYNC_N_EN(0) | IEP_SYNC_CTRL_SYNC_EN,
211 		       iep->base + iep->plat_data->reg_offs[ICSS_IEP_SYNC_CTRL_REG]);
212 	}
213 	spin_unlock_irqrestore(&iep->irq_lock, flags);
214 }
215 
216 /**
217  * icss_iep_gettime() - Get time of the PTP clock using IEP driver
218  * @iep: Pointer to structure representing IEP.
219  * @sts: Pointer to structure representing PTP system timestamp.
220  *
221  * This API uses readl() instead of regmap_read() for read operations as
222  * regmap_read() is too slow and this API is time sensitive.
223  *
224  * Return: The current timestamp of the PTP clock using IEP driver
225  */
icss_iep_gettime(struct icss_iep * iep,struct ptp_system_timestamp * sts)226 static u64 icss_iep_gettime(struct icss_iep *iep,
227 			    struct ptp_system_timestamp *sts)
228 {
229 	u32 ts_hi = 0, ts_lo;
230 	unsigned long flags;
231 
232 	if (iep->ops && iep->ops->gettime)
233 		return iep->ops->gettime(iep->clockops_data, sts);
234 
235 	/* use local_irq_x() to make it work for both RT/non-RT */
236 	local_irq_save(flags);
237 
238 	/* no need to play with hi-lo, hi is latched when lo is read */
239 	ptp_read_system_prets(sts);
240 	ts_lo = readl(iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG0]);
241 	ptp_read_system_postts(sts);
242 	if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
243 		ts_hi = readl(iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG1]);
244 
245 	local_irq_restore(flags);
246 
247 	return (u64)ts_lo | (u64)ts_hi << 32;
248 }
249 
icss_iep_enable(struct icss_iep * iep)250 static void icss_iep_enable(struct icss_iep *iep)
251 {
252 	regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_CFG_REG,
253 			   IEP_GLOBAL_CFG_CNT_ENABLE,
254 			   IEP_GLOBAL_CFG_CNT_ENABLE);
255 }
256 
icss_iep_disable(struct icss_iep * iep)257 static void icss_iep_disable(struct icss_iep *iep)
258 {
259 	regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_CFG_REG,
260 			   IEP_GLOBAL_CFG_CNT_ENABLE,
261 			   0);
262 }
263 
icss_iep_enable_shadow_mode(struct icss_iep * iep)264 static void icss_iep_enable_shadow_mode(struct icss_iep *iep)
265 {
266 	u32 cycle_time;
267 	int cmp;
268 
269 	cycle_time = iep->cycle_time_ns - iep->def_inc;
270 
271 	icss_iep_disable(iep);
272 
273 	/* disable shadow mode */
274 	regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
275 			   IEP_CMP_CFG_SHADOW_EN, 0);
276 
277 	/* enable shadow mode */
278 	regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
279 			   IEP_CMP_CFG_SHADOW_EN, IEP_CMP_CFG_SHADOW_EN);
280 
281 	/* clear counters */
282 	icss_iep_set_counter(iep, 0);
283 
284 	/* clear overflow status */
285 	regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_STATUS_REG,
286 			   IEP_GLOBAL_STATUS_CNT_OVF,
287 			   IEP_GLOBAL_STATUS_CNT_OVF);
288 
289 	/* clear compare status */
290 	for (cmp = IEP_MIN_CMP; cmp < IEP_MAX_CMP; cmp++) {
291 		regmap_update_bits(iep->map, ICSS_IEP_CMP_STAT_REG,
292 				   IEP_CMP_STATUS(cmp), IEP_CMP_STATUS(cmp));
293 
294 		regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
295 				   IEP_CMP_CFG_CMP_EN(cmp), 0);
296 	}
297 
298 	/* enable reset counter on CMP0 event */
299 	regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
300 			   IEP_CMP_CFG_CMP0_RST_CNT_EN,
301 			   IEP_CMP_CFG_CMP0_RST_CNT_EN);
302 	/* enable compare */
303 	regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
304 			   IEP_CMP_CFG_CMP_EN(0),
305 			   IEP_CMP_CFG_CMP_EN(0));
306 
307 	/* set CMP0 value to cycle time */
308 	regmap_write(iep->map, ICSS_IEP_CMP0_REG0, cycle_time);
309 	if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
310 		regmap_write(iep->map, ICSS_IEP_CMP0_REG1, cycle_time);
311 
312 	icss_iep_set_counter(iep, 0);
313 	icss_iep_enable(iep);
314 }
315 
icss_iep_set_default_inc(struct icss_iep * iep,u8 def_inc)316 static void icss_iep_set_default_inc(struct icss_iep *iep, u8 def_inc)
317 {
318 	regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_CFG_REG,
319 			   IEP_GLOBAL_CFG_DEFAULT_INC_MASK,
320 			   def_inc << IEP_GLOBAL_CFG_DEFAULT_INC_SHIFT);
321 }
322 
icss_iep_set_compensation_inc(struct icss_iep * iep,u16 compen_inc)323 static void icss_iep_set_compensation_inc(struct icss_iep *iep, u16 compen_inc)
324 {
325 	struct device *dev = regmap_get_device(iep->map);
326 
327 	if (compen_inc > IEP_MAX_COMPEN_INC) {
328 		dev_err(dev, "%s: too high compensation inc %d\n",
329 			__func__, compen_inc);
330 		compen_inc = IEP_MAX_COMPEN_INC;
331 	}
332 
333 	regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_CFG_REG,
334 			   IEP_GLOBAL_CFG_COMPEN_INC_MASK,
335 			   compen_inc << IEP_GLOBAL_CFG_COMPEN_INC_SHIFT);
336 }
337 
icss_iep_set_compensation_count(struct icss_iep * iep,u32 compen_count)338 static void icss_iep_set_compensation_count(struct icss_iep *iep,
339 					    u32 compen_count)
340 {
341 	struct device *dev = regmap_get_device(iep->map);
342 
343 	if (compen_count > IEP_MAX_COMPEN_COUNT) {
344 		dev_err(dev, "%s: too high compensation count %d\n",
345 			__func__, compen_count);
346 		compen_count = IEP_MAX_COMPEN_COUNT;
347 	}
348 
349 	regmap_write(iep->map, ICSS_IEP_COMPEN_REG, compen_count);
350 }
351 
icss_iep_set_slow_compensation_count(struct icss_iep * iep,u32 compen_count)352 static void icss_iep_set_slow_compensation_count(struct icss_iep *iep,
353 						 u32 compen_count)
354 {
355 	regmap_write(iep->map, ICSS_IEP_SLOW_COMPEN_REG, compen_count);
356 }
357 
358 /* PTP PHC operations */
icss_iep_ptp_adjfine(struct ptp_clock_info * ptp,long scaled_ppm)359 static int icss_iep_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
360 {
361 	struct icss_iep *iep = container_of(ptp, struct icss_iep, ptp_info);
362 	s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
363 	u32 cyc_count;
364 	u16 cmp_inc;
365 
366 	mutex_lock(&iep->ptp_clk_mutex);
367 
368 	/* ppb is amount of frequency we want to adjust in 1GHz (billion)
369 	 * e.g. 100ppb means we need to speed up clock by 100Hz
370 	 * i.e. at end of 1 second (1 billion ns) clock time, we should be
371 	 * counting 100 more ns.
372 	 * We use IEP slow compensation to achieve continuous freq. adjustment.
373 	 * There are 2 parts. Cycle time and adjustment per cycle.
374 	 * Simplest case would be 1 sec Cycle time. Then adjustment
375 	 * pre cycle would be (def_inc + ppb) value.
376 	 * Cycle time will have to be chosen based on how worse the ppb is.
377 	 * e.g. smaller the ppb, cycle time has to be large.
378 	 * The minimum adjustment we can do is +-1ns per cycle so let's
379 	 * reduce the cycle time to get 1ns per cycle adjustment.
380 	 *	1ppb = 1sec cycle time & 1ns adjust
381 	 *	1000ppb = 1/1000 cycle time & 1ns adjust per cycle
382 	 */
383 
384 	if (iep->cycle_time_ns)
385 		iep->slow_cmp_inc = iep->clk_tick_time;	/* 4ns adj per cycle */
386 	else
387 		iep->slow_cmp_inc = 1;	/* 1ns adjust per cycle */
388 
389 	if (ppb < 0) {
390 		iep->slow_cmp_inc = -iep->slow_cmp_inc;
391 		ppb = -ppb;
392 	}
393 
394 	cyc_count = NSEC_PER_SEC;		/* 1s cycle time @1GHz */
395 	cyc_count /= ppb;		/* cycle time per ppb */
396 
397 	/* slow_cmp_count is decremented every clock cycle, e.g. @250MHz */
398 	if (!iep->cycle_time_ns)
399 		cyc_count /= iep->clk_tick_time;
400 	iep->slow_cmp_count = cyc_count;
401 
402 	/* iep->clk_tick_time is def_inc */
403 	cmp_inc = iep->clk_tick_time + iep->slow_cmp_inc;
404 	icss_iep_set_compensation_inc(iep, cmp_inc);
405 	icss_iep_set_slow_compensation_count(iep, iep->slow_cmp_count);
406 
407 	mutex_unlock(&iep->ptp_clk_mutex);
408 
409 	return 0;
410 }
411 
icss_iep_ptp_adjtime(struct ptp_clock_info * ptp,s64 delta)412 static int icss_iep_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
413 {
414 	struct icss_iep *iep = container_of(ptp, struct icss_iep, ptp_info);
415 	s64 ns;
416 
417 	mutex_lock(&iep->ptp_clk_mutex);
418 	if (iep->ops && iep->ops->adjtime) {
419 		iep->ops->adjtime(iep->clockops_data, delta);
420 	} else {
421 		ns = icss_iep_gettime(iep, NULL);
422 		ns += delta;
423 		icss_iep_settime(iep, ns);
424 	}
425 	mutex_unlock(&iep->ptp_clk_mutex);
426 
427 	return 0;
428 }
429 
icss_iep_ptp_gettimeex(struct ptp_clock_info * ptp,struct timespec64 * ts,struct ptp_system_timestamp * sts)430 static int icss_iep_ptp_gettimeex(struct ptp_clock_info *ptp,
431 				  struct timespec64 *ts,
432 				  struct ptp_system_timestamp *sts)
433 {
434 	struct icss_iep *iep = container_of(ptp, struct icss_iep, ptp_info);
435 	u64 ns;
436 
437 	mutex_lock(&iep->ptp_clk_mutex);
438 	ns = icss_iep_gettime(iep, sts);
439 	*ts = ns_to_timespec64(ns);
440 	mutex_unlock(&iep->ptp_clk_mutex);
441 
442 	return 0;
443 }
444 
icss_iep_ptp_settime(struct ptp_clock_info * ptp,const struct timespec64 * ts)445 static int icss_iep_ptp_settime(struct ptp_clock_info *ptp,
446 				const struct timespec64 *ts)
447 {
448 	struct icss_iep *iep = container_of(ptp, struct icss_iep, ptp_info);
449 	u64 ns;
450 
451 	mutex_lock(&iep->ptp_clk_mutex);
452 	ns = timespec64_to_ns(ts);
453 	icss_iep_settime(iep, ns);
454 	mutex_unlock(&iep->ptp_clk_mutex);
455 
456 	return 0;
457 }
458 
icss_iep_update_to_next_boundary(struct icss_iep * iep,u64 start_ns)459 static void icss_iep_update_to_next_boundary(struct icss_iep *iep, u64 start_ns)
460 {
461 	u64 ns, p_ns;
462 	u32 offset;
463 
464 	ns = icss_iep_gettime(iep, NULL);
465 	if (start_ns < ns)
466 		start_ns = ns;
467 	p_ns = iep->period;
468 	/* Round up to next period boundary */
469 	start_ns += p_ns - 1;
470 	offset = do_div(start_ns, p_ns);
471 	start_ns = start_ns * p_ns;
472 	/* If it is too close to update, shift to next boundary */
473 	if (p_ns - offset < 10)
474 		start_ns += p_ns;
475 
476 	regmap_write(iep->map, ICSS_IEP_CMP1_REG0, lower_32_bits(start_ns));
477 	if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
478 		regmap_write(iep->map, ICSS_IEP_CMP1_REG1, upper_32_bits(start_ns));
479 }
480 
icss_iep_perout_enable_hw(struct icss_iep * iep,struct ptp_perout_request * req,int on)481 static int icss_iep_perout_enable_hw(struct icss_iep *iep,
482 				     struct ptp_perout_request *req, int on)
483 {
484 	int ret;
485 	u64 cmp;
486 
487 	if (iep->ops && iep->ops->perout_enable) {
488 		ret = iep->ops->perout_enable(iep->clockops_data, req, on, &cmp);
489 		if (ret)
490 			return ret;
491 
492 		if (on) {
493 			/* Configure CMP */
494 			regmap_write(iep->map, ICSS_IEP_CMP1_REG0, lower_32_bits(cmp));
495 			if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
496 				regmap_write(iep->map, ICSS_IEP_CMP1_REG1, upper_32_bits(cmp));
497 			/* Configure SYNC, 1ms pulse width */
498 			regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG, 1000000);
499 			regmap_write(iep->map, ICSS_IEP_SYNC0_PERIOD_REG, 0);
500 			regmap_write(iep->map, ICSS_IEP_SYNC_START_REG, 0);
501 			regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, 0); /* one-shot mode */
502 			/* Enable CMP 1 */
503 			regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
504 					   IEP_CMP_CFG_CMP_EN(1), IEP_CMP_CFG_CMP_EN(1));
505 		} else {
506 			/* Disable CMP 1 */
507 			regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
508 					   IEP_CMP_CFG_CMP_EN(1), 0);
509 
510 			/* clear regs */
511 			regmap_write(iep->map, ICSS_IEP_CMP1_REG0, 0);
512 			if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
513 				regmap_write(iep->map, ICSS_IEP_CMP1_REG1, 0);
514 		}
515 	} else {
516 		if (on) {
517 			u64 start_ns;
518 
519 			iep->period = ((u64)req->period.sec * NSEC_PER_SEC) +
520 				      req->period.nsec;
521 			start_ns = ((u64)req->period.sec * NSEC_PER_SEC)
522 				   + req->period.nsec;
523 			icss_iep_update_to_next_boundary(iep, start_ns);
524 
525 			/* Enable Sync in single shot mode  */
526 			regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG,
527 				     IEP_SYNC_CTRL_SYNC_N_EN(0) | IEP_SYNC_CTRL_SYNC_EN);
528 			/* Enable CMP 1 */
529 			regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
530 					   IEP_CMP_CFG_CMP_EN(1), IEP_CMP_CFG_CMP_EN(1));
531 		} else {
532 			/* Disable CMP 1 */
533 			regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
534 					   IEP_CMP_CFG_CMP_EN(1), 0);
535 
536 			/* clear CMP regs */
537 			regmap_write(iep->map, ICSS_IEP_CMP1_REG0, 0);
538 			if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
539 				regmap_write(iep->map, ICSS_IEP_CMP1_REG1, 0);
540 
541 			/* Disable sync */
542 			regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, 0);
543 		}
544 	}
545 
546 	return 0;
547 }
548 
icss_iep_perout_enable(struct icss_iep * iep,struct ptp_perout_request * req,int on)549 static int icss_iep_perout_enable(struct icss_iep *iep,
550 				  struct ptp_perout_request *req, int on)
551 {
552 	unsigned long flags;
553 	int ret = 0;
554 
555 	mutex_lock(&iep->ptp_clk_mutex);
556 
557 	if (iep->pps_enabled) {
558 		ret = -EBUSY;
559 		goto exit;
560 	}
561 
562 	if (iep->perout_enabled == !!on)
563 		goto exit;
564 
565 	spin_lock_irqsave(&iep->irq_lock, flags);
566 	ret = icss_iep_perout_enable_hw(iep, req, on);
567 	if (!ret)
568 		iep->perout_enabled = !!on;
569 	spin_unlock_irqrestore(&iep->irq_lock, flags);
570 
571 exit:
572 	mutex_unlock(&iep->ptp_clk_mutex);
573 
574 	return ret;
575 }
576 
icss_iep_pps_enable(struct icss_iep * iep,int on)577 static int icss_iep_pps_enable(struct icss_iep *iep, int on)
578 {
579 	struct ptp_clock_request rq;
580 	struct timespec64 ts;
581 	unsigned long flags;
582 	int ret = 0;
583 	u64 ns;
584 
585 	mutex_lock(&iep->ptp_clk_mutex);
586 
587 	if (iep->perout_enabled) {
588 		ret = -EBUSY;
589 		goto exit;
590 	}
591 
592 	if (iep->pps_enabled == !!on)
593 		goto exit;
594 
595 	spin_lock_irqsave(&iep->irq_lock, flags);
596 
597 	rq.perout.index = 0;
598 	if (on) {
599 		ns = icss_iep_gettime(iep, NULL);
600 		ts = ns_to_timespec64(ns);
601 		rq.perout.period.sec = 1;
602 		rq.perout.period.nsec = 0;
603 		rq.perout.start.sec = ts.tv_sec + 2;
604 		rq.perout.start.nsec = 0;
605 		ret = icss_iep_perout_enable_hw(iep, &rq.perout, on);
606 	} else {
607 		ret = icss_iep_perout_enable_hw(iep, &rq.perout, on);
608 	}
609 
610 	if (!ret)
611 		iep->pps_enabled = !!on;
612 
613 	spin_unlock_irqrestore(&iep->irq_lock, flags);
614 
615 exit:
616 	mutex_unlock(&iep->ptp_clk_mutex);
617 
618 	return ret;
619 }
620 
icss_iep_extts_enable(struct icss_iep * iep,u32 index,int on)621 static int icss_iep_extts_enable(struct icss_iep *iep, u32 index, int on)
622 {
623 	u32 val, cap, ret = 0;
624 
625 	mutex_lock(&iep->ptp_clk_mutex);
626 
627 	if (iep->ops && iep->ops->extts_enable) {
628 		ret = iep->ops->extts_enable(iep->clockops_data, index, on);
629 		goto exit;
630 	}
631 
632 	if (((iep->latch_enable & BIT(index)) >> index) == on)
633 		goto exit;
634 
635 	regmap_read(iep->map, ICSS_IEP_CAPTURE_CFG_REG, &val);
636 	cap = IEP_CAP_CFG_CAP_ASYNC_EN(index) | IEP_CAP_CFG_CAPNR_1ST_EVENT_EN(index);
637 	if (on) {
638 		val |= cap;
639 		iep->latch_enable |= BIT(index);
640 	} else {
641 		val &= ~cap;
642 		iep->latch_enable &= ~BIT(index);
643 	}
644 	regmap_write(iep->map, ICSS_IEP_CAPTURE_CFG_REG, val);
645 
646 exit:
647 	mutex_unlock(&iep->ptp_clk_mutex);
648 
649 	return ret;
650 }
651 
icss_iep_ptp_enable(struct ptp_clock_info * ptp,struct ptp_clock_request * rq,int on)652 static int icss_iep_ptp_enable(struct ptp_clock_info *ptp,
653 			       struct ptp_clock_request *rq, int on)
654 {
655 	struct icss_iep *iep = container_of(ptp, struct icss_iep, ptp_info);
656 
657 	switch (rq->type) {
658 	case PTP_CLK_REQ_PEROUT:
659 		return icss_iep_perout_enable(iep, &rq->perout, on);
660 	case PTP_CLK_REQ_PPS:
661 		return icss_iep_pps_enable(iep, on);
662 	case PTP_CLK_REQ_EXTTS:
663 		return icss_iep_extts_enable(iep, rq->extts.index, on);
664 	default:
665 		break;
666 	}
667 
668 	return -EOPNOTSUPP;
669 }
670 
671 static struct ptp_clock_info icss_iep_ptp_info = {
672 	.owner		= THIS_MODULE,
673 	.name		= "ICSS IEP timer",
674 	.max_adj	= 10000000,
675 	.adjfine	= icss_iep_ptp_adjfine,
676 	.adjtime	= icss_iep_ptp_adjtime,
677 	.gettimex64	= icss_iep_ptp_gettimeex,
678 	.settime64	= icss_iep_ptp_settime,
679 	.enable		= icss_iep_ptp_enable,
680 };
681 
icss_iep_get_idx(struct device_node * np,int idx)682 struct icss_iep *icss_iep_get_idx(struct device_node *np, int idx)
683 {
684 	struct platform_device *pdev;
685 	struct device_node *iep_np;
686 	struct icss_iep *iep;
687 
688 	iep_np = of_parse_phandle(np, "ti,iep", idx);
689 	if (!iep_np || !of_device_is_available(iep_np))
690 		return ERR_PTR(-ENODEV);
691 
692 	pdev = of_find_device_by_node(iep_np);
693 	of_node_put(iep_np);
694 
695 	if (!pdev)
696 		/* probably IEP not yet probed */
697 		return ERR_PTR(-EPROBE_DEFER);
698 
699 	iep = platform_get_drvdata(pdev);
700 	if (!iep)
701 		return ERR_PTR(-EPROBE_DEFER);
702 
703 	device_lock(iep->dev);
704 	if (iep->client_np) {
705 		device_unlock(iep->dev);
706 		dev_err(iep->dev, "IEP is already acquired by %s",
707 			iep->client_np->name);
708 		return ERR_PTR(-EBUSY);
709 	}
710 	iep->client_np = np;
711 	device_unlock(iep->dev);
712 	get_device(iep->dev);
713 
714 	return iep;
715 }
716 EXPORT_SYMBOL_GPL(icss_iep_get_idx);
717 
icss_iep_get(struct device_node * np)718 struct icss_iep *icss_iep_get(struct device_node *np)
719 {
720 	return icss_iep_get_idx(np, 0);
721 }
722 EXPORT_SYMBOL_GPL(icss_iep_get);
723 
icss_iep_put(struct icss_iep * iep)724 void icss_iep_put(struct icss_iep *iep)
725 {
726 	device_lock(iep->dev);
727 	iep->client_np = NULL;
728 	device_unlock(iep->dev);
729 	put_device(iep->dev);
730 }
731 EXPORT_SYMBOL_GPL(icss_iep_put);
732 
icss_iep_init_fw(struct icss_iep * iep)733 void icss_iep_init_fw(struct icss_iep *iep)
734 {
735 	/* start IEP for FW use in raw 64bit mode, no PTP support */
736 	iep->clk_tick_time = iep->def_inc;
737 	iep->cycle_time_ns = 0;
738 	iep->ops = NULL;
739 	iep->clockops_data = NULL;
740 	icss_iep_set_default_inc(iep, iep->def_inc);
741 	icss_iep_set_compensation_inc(iep, iep->def_inc);
742 	icss_iep_set_compensation_count(iep, 0);
743 	regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG, iep->refclk_freq / 10); /* 100 ms pulse */
744 	regmap_write(iep->map, ICSS_IEP_SYNC0_PERIOD_REG, 0);
745 	if (iep->plat_data->flags & ICSS_IEP_SLOW_COMPEN_REG_SUPPORT)
746 		icss_iep_set_slow_compensation_count(iep, 0);
747 
748 	icss_iep_enable(iep);
749 	icss_iep_settime(iep, 0);
750 }
751 EXPORT_SYMBOL_GPL(icss_iep_init_fw);
752 
icss_iep_exit_fw(struct icss_iep * iep)753 void icss_iep_exit_fw(struct icss_iep *iep)
754 {
755 	icss_iep_disable(iep);
756 }
757 EXPORT_SYMBOL_GPL(icss_iep_exit_fw);
758 
icss_iep_init(struct icss_iep * iep,const struct icss_iep_clockops * clkops,void * clockops_data,u32 cycle_time_ns)759 int icss_iep_init(struct icss_iep *iep, const struct icss_iep_clockops *clkops,
760 		  void *clockops_data, u32 cycle_time_ns)
761 {
762 	int ret = 0;
763 
764 	iep->cycle_time_ns = cycle_time_ns;
765 	iep->clk_tick_time = iep->def_inc;
766 	iep->ops = clkops;
767 	iep->clockops_data = clockops_data;
768 	icss_iep_set_default_inc(iep, iep->def_inc);
769 	icss_iep_set_compensation_inc(iep, iep->def_inc);
770 	icss_iep_set_compensation_count(iep, 0);
771 	regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG, iep->refclk_freq / 10); /* 100 ms pulse */
772 	regmap_write(iep->map, ICSS_IEP_SYNC0_PERIOD_REG, 0);
773 	if (iep->plat_data->flags & ICSS_IEP_SLOW_COMPEN_REG_SUPPORT)
774 		icss_iep_set_slow_compensation_count(iep, 0);
775 
776 	if (!(iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT) ||
777 	    !(iep->plat_data->flags & ICSS_IEP_SLOW_COMPEN_REG_SUPPORT))
778 		goto skip_perout;
779 
780 	if (iep->ops && iep->ops->perout_enable) {
781 		iep->ptp_info.n_per_out = 1;
782 		iep->ptp_info.pps = 1;
783 	}
784 
785 	if (iep->ops && iep->ops->extts_enable)
786 		iep->ptp_info.n_ext_ts = 2;
787 
788 skip_perout:
789 	if (cycle_time_ns)
790 		icss_iep_enable_shadow_mode(iep);
791 	else
792 		icss_iep_enable(iep);
793 	icss_iep_settime(iep, ktime_get_real_ns());
794 
795 	iep->ptp_clock = ptp_clock_register(&iep->ptp_info, iep->dev);
796 	if (IS_ERR(iep->ptp_clock)) {
797 		ret = PTR_ERR(iep->ptp_clock);
798 		iep->ptp_clock = NULL;
799 		dev_err(iep->dev, "Failed to register ptp clk %d\n", ret);
800 	}
801 
802 	return ret;
803 }
804 EXPORT_SYMBOL_GPL(icss_iep_init);
805 
icss_iep_exit(struct icss_iep * iep)806 int icss_iep_exit(struct icss_iep *iep)
807 {
808 	if (iep->ptp_clock) {
809 		ptp_clock_unregister(iep->ptp_clock);
810 		iep->ptp_clock = NULL;
811 	}
812 	icss_iep_disable(iep);
813 
814 	if (iep->pps_enabled)
815 		icss_iep_pps_enable(iep, false);
816 	else if (iep->perout_enabled)
817 		icss_iep_perout_enable(iep, NULL, false);
818 
819 	return 0;
820 }
821 EXPORT_SYMBOL_GPL(icss_iep_exit);
822 
icss_iep_probe(struct platform_device * pdev)823 static int icss_iep_probe(struct platform_device *pdev)
824 {
825 	struct device *dev = &pdev->dev;
826 	struct icss_iep *iep;
827 	struct clk *iep_clk;
828 
829 	iep = devm_kzalloc(dev, sizeof(*iep), GFP_KERNEL);
830 	if (!iep)
831 		return -ENOMEM;
832 
833 	iep->dev = dev;
834 	iep->base = devm_platform_ioremap_resource(pdev, 0);
835 	if (IS_ERR(iep->base))
836 		return -ENODEV;
837 
838 	iep_clk = devm_clk_get(dev, NULL);
839 	if (IS_ERR(iep_clk))
840 		return PTR_ERR(iep_clk);
841 
842 	iep->refclk_freq = clk_get_rate(iep_clk);
843 
844 	iep->def_inc = NSEC_PER_SEC / iep->refclk_freq;	/* ns per clock tick */
845 	if (iep->def_inc > IEP_MAX_DEF_INC) {
846 		dev_err(dev, "Failed to set def_inc %d.  IEP_clock is too slow to be supported\n",
847 			iep->def_inc);
848 		return -EINVAL;
849 	}
850 
851 	iep->plat_data = device_get_match_data(dev);
852 	if (!iep->plat_data)
853 		return -EINVAL;
854 
855 	iep->map = devm_regmap_init(dev, NULL, iep, iep->plat_data->config);
856 	if (IS_ERR(iep->map)) {
857 		dev_err(dev, "Failed to create regmap for IEP %ld\n",
858 			PTR_ERR(iep->map));
859 		return PTR_ERR(iep->map);
860 	}
861 
862 	iep->ptp_info = icss_iep_ptp_info;
863 	mutex_init(&iep->ptp_clk_mutex);
864 	spin_lock_init(&iep->irq_lock);
865 	dev_set_drvdata(dev, iep);
866 	icss_iep_disable(iep);
867 
868 	return 0;
869 }
870 
am654_icss_iep_valid_reg(struct device * dev,unsigned int reg)871 static bool am654_icss_iep_valid_reg(struct device *dev, unsigned int reg)
872 {
873 	switch (reg) {
874 	case ICSS_IEP_GLOBAL_CFG_REG ... ICSS_IEP_SYNC_START_REG:
875 		return true;
876 	default:
877 		return false;
878 	}
879 
880 	return false;
881 }
882 
icss_iep_regmap_write(void * context,unsigned int reg,unsigned int val)883 static int icss_iep_regmap_write(void *context, unsigned int reg,
884 				 unsigned int val)
885 {
886 	struct icss_iep *iep = context;
887 
888 	writel(val, iep->base + iep->plat_data->reg_offs[reg]);
889 
890 	return 0;
891 }
892 
icss_iep_regmap_read(void * context,unsigned int reg,unsigned int * val)893 static int icss_iep_regmap_read(void *context, unsigned int reg,
894 				unsigned int *val)
895 {
896 	struct icss_iep *iep = context;
897 
898 	*val = readl(iep->base + iep->plat_data->reg_offs[reg]);
899 
900 	return 0;
901 }
902 
903 static struct regmap_config am654_icss_iep_regmap_config = {
904 	.name = "icss iep",
905 	.reg_stride = 1,
906 	.reg_write = icss_iep_regmap_write,
907 	.reg_read = icss_iep_regmap_read,
908 	.writeable_reg = am654_icss_iep_valid_reg,
909 	.readable_reg = am654_icss_iep_valid_reg,
910 	.fast_io = 1,
911 };
912 
913 static const struct icss_iep_plat_data am654_icss_iep_plat_data = {
914 	.flags = ICSS_IEP_64BIT_COUNTER_SUPPORT |
915 		 ICSS_IEP_SLOW_COMPEN_REG_SUPPORT |
916 		 ICSS_IEP_SHADOW_MODE_SUPPORT,
917 	.reg_offs = {
918 		[ICSS_IEP_GLOBAL_CFG_REG] = 0x00,
919 		[ICSS_IEP_COMPEN_REG] = 0x08,
920 		[ICSS_IEP_SLOW_COMPEN_REG] = 0x0C,
921 		[ICSS_IEP_COUNT_REG0] = 0x10,
922 		[ICSS_IEP_COUNT_REG1] = 0x14,
923 		[ICSS_IEP_CAPTURE_CFG_REG] = 0x18,
924 		[ICSS_IEP_CAPTURE_STAT_REG] = 0x1c,
925 
926 		[ICSS_IEP_CAP6_RISE_REG0] = 0x50,
927 		[ICSS_IEP_CAP6_RISE_REG1] = 0x54,
928 
929 		[ICSS_IEP_CAP7_RISE_REG0] = 0x60,
930 		[ICSS_IEP_CAP7_RISE_REG1] = 0x64,
931 
932 		[ICSS_IEP_CMP_CFG_REG] = 0x70,
933 		[ICSS_IEP_CMP_STAT_REG] = 0x74,
934 		[ICSS_IEP_CMP0_REG0] = 0x78,
935 		[ICSS_IEP_CMP0_REG1] = 0x7c,
936 		[ICSS_IEP_CMP1_REG0] = 0x80,
937 		[ICSS_IEP_CMP1_REG1] = 0x84,
938 
939 		[ICSS_IEP_CMP8_REG0] = 0xc0,
940 		[ICSS_IEP_CMP8_REG1] = 0xc4,
941 		[ICSS_IEP_SYNC_CTRL_REG] = 0x180,
942 		[ICSS_IEP_SYNC0_STAT_REG] = 0x188,
943 		[ICSS_IEP_SYNC1_STAT_REG] = 0x18c,
944 		[ICSS_IEP_SYNC_PWIDTH_REG] = 0x190,
945 		[ICSS_IEP_SYNC0_PERIOD_REG] = 0x194,
946 		[ICSS_IEP_SYNC1_DELAY_REG] = 0x198,
947 		[ICSS_IEP_SYNC_START_REG] = 0x19c,
948 	},
949 	.config = &am654_icss_iep_regmap_config,
950 };
951 
952 static const struct of_device_id icss_iep_of_match[] = {
953 	{
954 		.compatible = "ti,am654-icss-iep",
955 		.data = &am654_icss_iep_plat_data,
956 	},
957 	{},
958 };
959 MODULE_DEVICE_TABLE(of, icss_iep_of_match);
960 
961 static struct platform_driver icss_iep_driver = {
962 	.driver = {
963 		.name = "icss-iep",
964 		.of_match_table = icss_iep_of_match,
965 	},
966 	.probe = icss_iep_probe,
967 };
968 module_platform_driver(icss_iep_driver);
969 
970 MODULE_LICENSE("GPL");
971 MODULE_DESCRIPTION("TI ICSS IEP driver");
972 MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>");
973 MODULE_AUTHOR("Md Danish Anwar <danishanwar@ti.com>");
974