1 // SPDX-License-Identifier: GPL-2.0
2
3 /* Texas Instruments ICSSG Industrial Ethernet Peripheral (IEP) Driver
4 *
5 * Copyright (C) 2023 Texas Instruments Incorporated - https://www.ti.com
6 *
7 */
8
9 #include <linux/bitops.h>
10 #include <linux/clk.h>
11 #include <linux/err.h>
12 #include <linux/io.h>
13 #include <linux/module.h>
14 #include <linux/of.h>
15 #include <linux/of_platform.h>
16 #include <linux/platform_device.h>
17 #include <linux/timekeeping.h>
18 #include <linux/interrupt.h>
19 #include <linux/of_irq.h>
20
21 #include "icss_iep.h"
22
23 #define IEP_MAX_DEF_INC 0xf
24 #define IEP_MAX_COMPEN_INC 0xfff
25 #define IEP_MAX_COMPEN_COUNT 0xffffff
26
27 #define IEP_GLOBAL_CFG_CNT_ENABLE BIT(0)
28 #define IEP_GLOBAL_CFG_DEFAULT_INC_MASK GENMASK(7, 4)
29 #define IEP_GLOBAL_CFG_DEFAULT_INC_SHIFT 4
30 #define IEP_GLOBAL_CFG_COMPEN_INC_MASK GENMASK(19, 8)
31 #define IEP_GLOBAL_CFG_COMPEN_INC_SHIFT 8
32
33 #define IEP_GLOBAL_STATUS_CNT_OVF BIT(0)
34
35 #define IEP_CMP_CFG_SHADOW_EN BIT(17)
36 #define IEP_CMP_CFG_CMP0_RST_CNT_EN BIT(0)
37 #define IEP_CMP_CFG_CMP_EN(cmp) (GENMASK(16, 1) & (1 << ((cmp) + 1)))
38
39 #define IEP_CMP_STATUS(cmp) (1 << (cmp))
40
41 #define IEP_SYNC_CTRL_SYNC_EN BIT(0)
42 #define IEP_SYNC_CTRL_SYNC_N_EN(n) (GENMASK(2, 1) & (BIT(1) << (n)))
43
44 #define IEP_MIN_CMP 0
45 #define IEP_MAX_CMP 15
46
47 #define ICSS_IEP_64BIT_COUNTER_SUPPORT BIT(0)
48 #define ICSS_IEP_SLOW_COMPEN_REG_SUPPORT BIT(1)
49 #define ICSS_IEP_SHADOW_MODE_SUPPORT BIT(2)
50
51 #define LATCH_INDEX(ts_index) ((ts_index) + 6)
52 #define IEP_CAP_CFG_CAPNR_1ST_EVENT_EN(n) BIT(LATCH_INDEX(n))
53 #define IEP_CAP_CFG_CAP_ASYNC_EN(n) BIT(LATCH_INDEX(n) + 10)
54
55 enum {
56 ICSS_IEP_GLOBAL_CFG_REG,
57 ICSS_IEP_GLOBAL_STATUS_REG,
58 ICSS_IEP_COMPEN_REG,
59 ICSS_IEP_SLOW_COMPEN_REG,
60 ICSS_IEP_COUNT_REG0,
61 ICSS_IEP_COUNT_REG1,
62 ICSS_IEP_CAPTURE_CFG_REG,
63 ICSS_IEP_CAPTURE_STAT_REG,
64
65 ICSS_IEP_CAP6_RISE_REG0,
66 ICSS_IEP_CAP6_RISE_REG1,
67
68 ICSS_IEP_CAP7_RISE_REG0,
69 ICSS_IEP_CAP7_RISE_REG1,
70
71 ICSS_IEP_CMP_CFG_REG,
72 ICSS_IEP_CMP_STAT_REG,
73 ICSS_IEP_CMP0_REG0,
74 ICSS_IEP_CMP0_REG1,
75 ICSS_IEP_CMP1_REG0,
76 ICSS_IEP_CMP1_REG1,
77
78 ICSS_IEP_CMP8_REG0,
79 ICSS_IEP_CMP8_REG1,
80 ICSS_IEP_SYNC_CTRL_REG,
81 ICSS_IEP_SYNC0_STAT_REG,
82 ICSS_IEP_SYNC1_STAT_REG,
83 ICSS_IEP_SYNC_PWIDTH_REG,
84 ICSS_IEP_SYNC0_PERIOD_REG,
85 ICSS_IEP_SYNC1_DELAY_REG,
86 ICSS_IEP_SYNC_START_REG,
87 ICSS_IEP_MAX_REGS,
88 };
89
90 /**
91 * struct icss_iep_plat_data - Plat data to handle SoC variants
92 * @config: Regmap configuration data
93 * @reg_offs: register offsets to capture offset differences across SoCs
94 * @flags: Flags to represent IEP properties
95 */
96 struct icss_iep_plat_data {
97 struct regmap_config *config;
98 u32 reg_offs[ICSS_IEP_MAX_REGS];
99 u32 flags;
100 };
101
102 struct icss_iep {
103 struct device *dev;
104 void __iomem *base;
105 const struct icss_iep_plat_data *plat_data;
106 struct regmap *map;
107 struct device_node *client_np;
108 unsigned long refclk_freq;
109 int clk_tick_time; /* one refclk tick time in ns */
110 struct ptp_clock_info ptp_info;
111 struct ptp_clock *ptp_clock;
112 struct mutex ptp_clk_mutex; /* PHC access serializer */
113 u32 def_inc;
114 s16 slow_cmp_inc;
115 u32 slow_cmp_count;
116 const struct icss_iep_clockops *ops;
117 void *clockops_data;
118 u32 cycle_time_ns;
119 u32 perout_enabled;
120 bool pps_enabled;
121 int cap_cmp_irq;
122 u64 period;
123 u32 latch_enable;
124 };
125
126 /**
127 * icss_iep_get_count_hi() - Get the upper 32 bit IEP counter
128 * @iep: Pointer to structure representing IEP.
129 *
130 * Return: upper 32 bit IEP counter
131 */
icss_iep_get_count_hi(struct icss_iep * iep)132 int icss_iep_get_count_hi(struct icss_iep *iep)
133 {
134 u32 val = 0;
135
136 if (iep && (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT))
137 val = readl(iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG1]);
138
139 return val;
140 }
141 EXPORT_SYMBOL_GPL(icss_iep_get_count_hi);
142
143 /**
144 * icss_iep_get_count_low() - Get the lower 32 bit IEP counter
145 * @iep: Pointer to structure representing IEP.
146 *
147 * Return: lower 32 bit IEP counter
148 */
icss_iep_get_count_low(struct icss_iep * iep)149 int icss_iep_get_count_low(struct icss_iep *iep)
150 {
151 u32 val = 0;
152
153 if (iep)
154 val = readl(iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG0]);
155
156 return val;
157 }
158 EXPORT_SYMBOL_GPL(icss_iep_get_count_low);
159
160 /**
161 * icss_iep_get_ptp_clock_idx() - Get PTP clock index using IEP driver
162 * @iep: Pointer to structure representing IEP.
163 *
164 * Return: PTP clock index, -1 if not registered
165 */
icss_iep_get_ptp_clock_idx(struct icss_iep * iep)166 int icss_iep_get_ptp_clock_idx(struct icss_iep *iep)
167 {
168 if (!iep || !iep->ptp_clock)
169 return -1;
170 return ptp_clock_index(iep->ptp_clock);
171 }
172 EXPORT_SYMBOL_GPL(icss_iep_get_ptp_clock_idx);
173
icss_iep_set_counter(struct icss_iep * iep,u64 ns)174 static void icss_iep_set_counter(struct icss_iep *iep, u64 ns)
175 {
176 if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
177 writel(upper_32_bits(ns), iep->base +
178 iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG1]);
179 writel(lower_32_bits(ns), iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG0]);
180 }
181
182 static void icss_iep_update_to_next_boundary(struct icss_iep *iep, u64 start_ns);
183
184 /**
185 * icss_iep_settime() - Set time of the PTP clock using IEP driver
186 * @iep: Pointer to structure representing IEP.
187 * @ns: Time to be set in nanoseconds
188 *
189 * This API uses writel() instead of regmap_write() for write operations as
190 * regmap_write() is too slow and this API is time sensitive.
191 */
icss_iep_settime(struct icss_iep * iep,u64 ns)192 static void icss_iep_settime(struct icss_iep *iep, u64 ns)
193 {
194 if (iep->ops && iep->ops->settime) {
195 iep->ops->settime(iep->clockops_data, ns);
196 return;
197 }
198
199 if (iep->pps_enabled || iep->perout_enabled)
200 writel(0, iep->base + iep->plat_data->reg_offs[ICSS_IEP_SYNC_CTRL_REG]);
201
202 icss_iep_set_counter(iep, ns);
203
204 if (iep->pps_enabled || iep->perout_enabled) {
205 icss_iep_update_to_next_boundary(iep, ns);
206 writel(IEP_SYNC_CTRL_SYNC_N_EN(0) | IEP_SYNC_CTRL_SYNC_EN,
207 iep->base + iep->plat_data->reg_offs[ICSS_IEP_SYNC_CTRL_REG]);
208 }
209 }
210
211 /**
212 * icss_iep_gettime() - Get time of the PTP clock using IEP driver
213 * @iep: Pointer to structure representing IEP.
214 * @sts: Pointer to structure representing PTP system timestamp.
215 *
216 * This API uses readl() instead of regmap_read() for read operations as
217 * regmap_read() is too slow and this API is time sensitive.
218 *
219 * Return: The current timestamp of the PTP clock using IEP driver
220 */
icss_iep_gettime(struct icss_iep * iep,struct ptp_system_timestamp * sts)221 static u64 icss_iep_gettime(struct icss_iep *iep,
222 struct ptp_system_timestamp *sts)
223 {
224 u32 ts_hi = 0, ts_lo;
225 unsigned long flags;
226
227 if (iep->ops && iep->ops->gettime)
228 return iep->ops->gettime(iep->clockops_data, sts);
229
230 /* use local_irq_x() to make it work for both RT/non-RT */
231 local_irq_save(flags);
232
233 /* no need to play with hi-lo, hi is latched when lo is read */
234 ptp_read_system_prets(sts);
235 ts_lo = readl(iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG0]);
236 ptp_read_system_postts(sts);
237 if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
238 ts_hi = readl(iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG1]);
239
240 local_irq_restore(flags);
241
242 return (u64)ts_lo | (u64)ts_hi << 32;
243 }
244
icss_iep_enable(struct icss_iep * iep)245 static void icss_iep_enable(struct icss_iep *iep)
246 {
247 regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_CFG_REG,
248 IEP_GLOBAL_CFG_CNT_ENABLE,
249 IEP_GLOBAL_CFG_CNT_ENABLE);
250 }
251
icss_iep_disable(struct icss_iep * iep)252 static void icss_iep_disable(struct icss_iep *iep)
253 {
254 regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_CFG_REG,
255 IEP_GLOBAL_CFG_CNT_ENABLE,
256 0);
257 }
258
icss_iep_enable_shadow_mode(struct icss_iep * iep)259 static void icss_iep_enable_shadow_mode(struct icss_iep *iep)
260 {
261 u32 cycle_time;
262 int cmp;
263
264 cycle_time = iep->cycle_time_ns - iep->def_inc;
265
266 icss_iep_disable(iep);
267
268 /* disable shadow mode */
269 regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
270 IEP_CMP_CFG_SHADOW_EN, 0);
271
272 /* enable shadow mode */
273 regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
274 IEP_CMP_CFG_SHADOW_EN, IEP_CMP_CFG_SHADOW_EN);
275
276 /* clear counters */
277 icss_iep_set_counter(iep, 0);
278
279 /* clear overflow status */
280 regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_STATUS_REG,
281 IEP_GLOBAL_STATUS_CNT_OVF,
282 IEP_GLOBAL_STATUS_CNT_OVF);
283
284 /* clear compare status */
285 for (cmp = IEP_MIN_CMP; cmp < IEP_MAX_CMP; cmp++) {
286 regmap_update_bits(iep->map, ICSS_IEP_CMP_STAT_REG,
287 IEP_CMP_STATUS(cmp), IEP_CMP_STATUS(cmp));
288
289 regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
290 IEP_CMP_CFG_CMP_EN(cmp), 0);
291 }
292
293 /* enable reset counter on CMP0 event */
294 regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
295 IEP_CMP_CFG_CMP0_RST_CNT_EN,
296 IEP_CMP_CFG_CMP0_RST_CNT_EN);
297 /* enable compare */
298 regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
299 IEP_CMP_CFG_CMP_EN(0),
300 IEP_CMP_CFG_CMP_EN(0));
301
302 /* set CMP0 value to cycle time */
303 regmap_write(iep->map, ICSS_IEP_CMP0_REG0, cycle_time);
304 if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
305 regmap_write(iep->map, ICSS_IEP_CMP0_REG1, cycle_time);
306
307 icss_iep_set_counter(iep, 0);
308 icss_iep_enable(iep);
309 }
310
icss_iep_set_default_inc(struct icss_iep * iep,u8 def_inc)311 static void icss_iep_set_default_inc(struct icss_iep *iep, u8 def_inc)
312 {
313 regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_CFG_REG,
314 IEP_GLOBAL_CFG_DEFAULT_INC_MASK,
315 def_inc << IEP_GLOBAL_CFG_DEFAULT_INC_SHIFT);
316 }
317
icss_iep_set_compensation_inc(struct icss_iep * iep,u16 compen_inc)318 static void icss_iep_set_compensation_inc(struct icss_iep *iep, u16 compen_inc)
319 {
320 struct device *dev = regmap_get_device(iep->map);
321
322 if (compen_inc > IEP_MAX_COMPEN_INC) {
323 dev_err(dev, "%s: too high compensation inc %d\n",
324 __func__, compen_inc);
325 compen_inc = IEP_MAX_COMPEN_INC;
326 }
327
328 regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_CFG_REG,
329 IEP_GLOBAL_CFG_COMPEN_INC_MASK,
330 compen_inc << IEP_GLOBAL_CFG_COMPEN_INC_SHIFT);
331 }
332
icss_iep_set_compensation_count(struct icss_iep * iep,u32 compen_count)333 static void icss_iep_set_compensation_count(struct icss_iep *iep,
334 u32 compen_count)
335 {
336 struct device *dev = regmap_get_device(iep->map);
337
338 if (compen_count > IEP_MAX_COMPEN_COUNT) {
339 dev_err(dev, "%s: too high compensation count %d\n",
340 __func__, compen_count);
341 compen_count = IEP_MAX_COMPEN_COUNT;
342 }
343
344 regmap_write(iep->map, ICSS_IEP_COMPEN_REG, compen_count);
345 }
346
icss_iep_set_slow_compensation_count(struct icss_iep * iep,u32 compen_count)347 static void icss_iep_set_slow_compensation_count(struct icss_iep *iep,
348 u32 compen_count)
349 {
350 regmap_write(iep->map, ICSS_IEP_SLOW_COMPEN_REG, compen_count);
351 }
352
353 /* PTP PHC operations */
icss_iep_ptp_adjfine(struct ptp_clock_info * ptp,long scaled_ppm)354 static int icss_iep_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
355 {
356 struct icss_iep *iep = container_of(ptp, struct icss_iep, ptp_info);
357 s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
358 u32 cyc_count;
359 u16 cmp_inc;
360
361 mutex_lock(&iep->ptp_clk_mutex);
362
363 /* ppb is amount of frequency we want to adjust in 1GHz (billion)
364 * e.g. 100ppb means we need to speed up clock by 100Hz
365 * i.e. at end of 1 second (1 billion ns) clock time, we should be
366 * counting 100 more ns.
367 * We use IEP slow compensation to achieve continuous freq. adjustment.
368 * There are 2 parts. Cycle time and adjustment per cycle.
369 * Simplest case would be 1 sec Cycle time. Then adjustment
370 * pre cycle would be (def_inc + ppb) value.
371 * Cycle time will have to be chosen based on how worse the ppb is.
372 * e.g. smaller the ppb, cycle time has to be large.
373 * The minimum adjustment we can do is +-1ns per cycle so let's
374 * reduce the cycle time to get 1ns per cycle adjustment.
375 * 1ppb = 1sec cycle time & 1ns adjust
376 * 1000ppb = 1/1000 cycle time & 1ns adjust per cycle
377 */
378
379 if (iep->cycle_time_ns)
380 iep->slow_cmp_inc = iep->clk_tick_time; /* 4ns adj per cycle */
381 else
382 iep->slow_cmp_inc = 1; /* 1ns adjust per cycle */
383
384 if (ppb < 0) {
385 iep->slow_cmp_inc = -iep->slow_cmp_inc;
386 ppb = -ppb;
387 }
388
389 cyc_count = NSEC_PER_SEC; /* 1s cycle time @1GHz */
390 cyc_count /= ppb; /* cycle time per ppb */
391
392 /* slow_cmp_count is decremented every clock cycle, e.g. @250MHz */
393 if (!iep->cycle_time_ns)
394 cyc_count /= iep->clk_tick_time;
395 iep->slow_cmp_count = cyc_count;
396
397 /* iep->clk_tick_time is def_inc */
398 cmp_inc = iep->clk_tick_time + iep->slow_cmp_inc;
399 icss_iep_set_compensation_inc(iep, cmp_inc);
400 icss_iep_set_slow_compensation_count(iep, iep->slow_cmp_count);
401
402 mutex_unlock(&iep->ptp_clk_mutex);
403
404 return 0;
405 }
406
icss_iep_ptp_adjtime(struct ptp_clock_info * ptp,s64 delta)407 static int icss_iep_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
408 {
409 struct icss_iep *iep = container_of(ptp, struct icss_iep, ptp_info);
410 s64 ns;
411
412 mutex_lock(&iep->ptp_clk_mutex);
413 if (iep->ops && iep->ops->adjtime) {
414 iep->ops->adjtime(iep->clockops_data, delta);
415 } else {
416 ns = icss_iep_gettime(iep, NULL);
417 ns += delta;
418 icss_iep_settime(iep, ns);
419 }
420 mutex_unlock(&iep->ptp_clk_mutex);
421
422 return 0;
423 }
424
icss_iep_ptp_gettimeex(struct ptp_clock_info * ptp,struct timespec64 * ts,struct ptp_system_timestamp * sts)425 static int icss_iep_ptp_gettimeex(struct ptp_clock_info *ptp,
426 struct timespec64 *ts,
427 struct ptp_system_timestamp *sts)
428 {
429 struct icss_iep *iep = container_of(ptp, struct icss_iep, ptp_info);
430 u64 ns;
431
432 mutex_lock(&iep->ptp_clk_mutex);
433 ns = icss_iep_gettime(iep, sts);
434 *ts = ns_to_timespec64(ns);
435 mutex_unlock(&iep->ptp_clk_mutex);
436
437 return 0;
438 }
439
icss_iep_ptp_settime(struct ptp_clock_info * ptp,const struct timespec64 * ts)440 static int icss_iep_ptp_settime(struct ptp_clock_info *ptp,
441 const struct timespec64 *ts)
442 {
443 struct icss_iep *iep = container_of(ptp, struct icss_iep, ptp_info);
444 u64 ns;
445
446 mutex_lock(&iep->ptp_clk_mutex);
447 ns = timespec64_to_ns(ts);
448 icss_iep_settime(iep, ns);
449 mutex_unlock(&iep->ptp_clk_mutex);
450
451 return 0;
452 }
453
icss_iep_update_to_next_boundary(struct icss_iep * iep,u64 start_ns)454 static void icss_iep_update_to_next_boundary(struct icss_iep *iep, u64 start_ns)
455 {
456 u64 ns, p_ns;
457 u32 offset;
458
459 ns = icss_iep_gettime(iep, NULL);
460 if (start_ns < ns)
461 start_ns = ns;
462 p_ns = iep->period;
463 /* Round up to next period boundary */
464 start_ns += p_ns - 1;
465 offset = do_div(start_ns, p_ns);
466 start_ns = start_ns * p_ns;
467 /* If it is too close to update, shift to next boundary */
468 if (p_ns - offset < 10)
469 start_ns += p_ns;
470
471 regmap_write(iep->map, ICSS_IEP_CMP1_REG0, lower_32_bits(start_ns));
472 if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
473 regmap_write(iep->map, ICSS_IEP_CMP1_REG1, upper_32_bits(start_ns));
474 }
475
icss_iep_perout_enable_hw(struct icss_iep * iep,struct ptp_perout_request * req,int on)476 static int icss_iep_perout_enable_hw(struct icss_iep *iep,
477 struct ptp_perout_request *req, int on)
478 {
479 struct timespec64 ts;
480 u64 ns_start;
481 u64 ns_width;
482 int ret;
483 u64 cmp;
484
485 if (!on) {
486 /* Disable CMP 1 */
487 regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
488 IEP_CMP_CFG_CMP_EN(1), 0);
489
490 /* clear CMP regs */
491 regmap_write(iep->map, ICSS_IEP_CMP1_REG0, 0);
492 if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
493 regmap_write(iep->map, ICSS_IEP_CMP1_REG1, 0);
494
495 /* Disable sync */
496 regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, 0);
497
498 return 0;
499 }
500
501 /* Calculate width of the signal for PPS/PEROUT handling */
502 ts.tv_sec = req->on.sec;
503 ts.tv_nsec = req->on.nsec;
504 ns_width = timespec64_to_ns(&ts);
505
506 if (req->flags & PTP_PEROUT_PHASE) {
507 ts.tv_sec = req->phase.sec;
508 ts.tv_nsec = req->phase.nsec;
509 ns_start = timespec64_to_ns(&ts);
510 } else {
511 ns_start = 0;
512 }
513
514 if (iep->ops && iep->ops->perout_enable) {
515 ret = iep->ops->perout_enable(iep->clockops_data, req, on, &cmp);
516 if (ret)
517 return ret;
518
519 /* Configure CMP */
520 regmap_write(iep->map, ICSS_IEP_CMP1_REG0, lower_32_bits(cmp));
521 if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
522 regmap_write(iep->map, ICSS_IEP_CMP1_REG1, upper_32_bits(cmp));
523 /* Configure SYNC, based on req on width */
524 regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG,
525 div_u64(ns_width, iep->def_inc));
526 regmap_write(iep->map, ICSS_IEP_SYNC0_PERIOD_REG, 0);
527 regmap_write(iep->map, ICSS_IEP_SYNC_START_REG,
528 div_u64(ns_start, iep->def_inc));
529 regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, 0); /* one-shot mode */
530 /* Enable CMP 1 */
531 regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
532 IEP_CMP_CFG_CMP_EN(1), IEP_CMP_CFG_CMP_EN(1));
533 } else {
534 u64 start_ns;
535
536 iep->period = ((u64)req->period.sec * NSEC_PER_SEC) +
537 req->period.nsec;
538 start_ns = ((u64)req->period.sec * NSEC_PER_SEC)
539 + req->period.nsec;
540 icss_iep_update_to_next_boundary(iep, start_ns);
541
542 regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG,
543 div_u64(ns_width, iep->def_inc));
544 regmap_write(iep->map, ICSS_IEP_SYNC_START_REG,
545 div_u64(ns_start, iep->def_inc));
546 /* Enable Sync in single shot mode */
547 regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG,
548 IEP_SYNC_CTRL_SYNC_N_EN(0) | IEP_SYNC_CTRL_SYNC_EN);
549 /* Enable CMP 1 */
550 regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
551 IEP_CMP_CFG_CMP_EN(1), IEP_CMP_CFG_CMP_EN(1));
552 }
553
554 return 0;
555 }
556
icss_iep_perout_enable(struct icss_iep * iep,struct ptp_perout_request * req,int on)557 static int icss_iep_perout_enable(struct icss_iep *iep,
558 struct ptp_perout_request *req, int on)
559 {
560 int ret = 0;
561
562 if (!on)
563 goto disable;
564
565 /* Reject requests with unsupported flags */
566 if (req->flags & ~(PTP_PEROUT_DUTY_CYCLE |
567 PTP_PEROUT_PHASE))
568 return -EOPNOTSUPP;
569
570 /* Set default "on" time (1ms) for the signal if not passed by the app */
571 if (!(req->flags & PTP_PEROUT_DUTY_CYCLE)) {
572 req->on.sec = 0;
573 req->on.nsec = NSEC_PER_MSEC;
574 }
575
576 disable:
577 mutex_lock(&iep->ptp_clk_mutex);
578
579 if (iep->pps_enabled) {
580 ret = -EBUSY;
581 goto exit;
582 }
583
584 if (iep->perout_enabled == !!on)
585 goto exit;
586
587 ret = icss_iep_perout_enable_hw(iep, req, on);
588 if (!ret)
589 iep->perout_enabled = !!on;
590
591 exit:
592 mutex_unlock(&iep->ptp_clk_mutex);
593
594 return ret;
595 }
596
icss_iep_pps_enable(struct icss_iep * iep,int on)597 static int icss_iep_pps_enable(struct icss_iep *iep, int on)
598 {
599 struct ptp_clock_request rq;
600 struct timespec64 ts;
601 int ret = 0;
602 u64 ns;
603
604 mutex_lock(&iep->ptp_clk_mutex);
605
606 if (iep->perout_enabled) {
607 ret = -EBUSY;
608 goto exit;
609 }
610
611 if (iep->pps_enabled == !!on)
612 goto exit;
613
614 rq.perout.index = 0;
615 if (on) {
616 ns = icss_iep_gettime(iep, NULL);
617 ts = ns_to_timespec64(ns);
618 rq.perout.flags = 0;
619 rq.perout.period.sec = 1;
620 rq.perout.period.nsec = 0;
621 rq.perout.start.sec = ts.tv_sec + 2;
622 rq.perout.start.nsec = 0;
623 rq.perout.on.sec = 0;
624 rq.perout.on.nsec = NSEC_PER_MSEC;
625 ret = icss_iep_perout_enable_hw(iep, &rq.perout, on);
626 } else {
627 ret = icss_iep_perout_enable_hw(iep, &rq.perout, on);
628 }
629
630 if (!ret)
631 iep->pps_enabled = !!on;
632
633 exit:
634 mutex_unlock(&iep->ptp_clk_mutex);
635
636 return ret;
637 }
638
icss_iep_extts_enable(struct icss_iep * iep,u32 index,int on)639 static int icss_iep_extts_enable(struct icss_iep *iep, u32 index, int on)
640 {
641 u32 val, cap, ret = 0;
642
643 mutex_lock(&iep->ptp_clk_mutex);
644
645 if (iep->ops && iep->ops->extts_enable) {
646 ret = iep->ops->extts_enable(iep->clockops_data, index, on);
647 goto exit;
648 }
649
650 if (((iep->latch_enable & BIT(index)) >> index) == on)
651 goto exit;
652
653 regmap_read(iep->map, ICSS_IEP_CAPTURE_CFG_REG, &val);
654 cap = IEP_CAP_CFG_CAP_ASYNC_EN(index) | IEP_CAP_CFG_CAPNR_1ST_EVENT_EN(index);
655 if (on) {
656 val |= cap;
657 iep->latch_enable |= BIT(index);
658 } else {
659 val &= ~cap;
660 iep->latch_enable &= ~BIT(index);
661 }
662 regmap_write(iep->map, ICSS_IEP_CAPTURE_CFG_REG, val);
663
664 exit:
665 mutex_unlock(&iep->ptp_clk_mutex);
666
667 return ret;
668 }
669
icss_iep_ptp_enable(struct ptp_clock_info * ptp,struct ptp_clock_request * rq,int on)670 static int icss_iep_ptp_enable(struct ptp_clock_info *ptp,
671 struct ptp_clock_request *rq, int on)
672 {
673 struct icss_iep *iep = container_of(ptp, struct icss_iep, ptp_info);
674
675 switch (rq->type) {
676 case PTP_CLK_REQ_PEROUT:
677 return icss_iep_perout_enable(iep, &rq->perout, on);
678 case PTP_CLK_REQ_PPS:
679 return icss_iep_pps_enable(iep, on);
680 case PTP_CLK_REQ_EXTTS:
681 return icss_iep_extts_enable(iep, rq->extts.index, on);
682 default:
683 break;
684 }
685
686 return -EOPNOTSUPP;
687 }
688
689 static struct ptp_clock_info icss_iep_ptp_info = {
690 .owner = THIS_MODULE,
691 .name = "ICSS IEP timer",
692 .max_adj = 10000000,
693 .adjfine = icss_iep_ptp_adjfine,
694 .adjtime = icss_iep_ptp_adjtime,
695 .gettimex64 = icss_iep_ptp_gettimeex,
696 .settime64 = icss_iep_ptp_settime,
697 .enable = icss_iep_ptp_enable,
698 };
699
icss_iep_get_idx(struct device_node * np,int idx)700 struct icss_iep *icss_iep_get_idx(struct device_node *np, int idx)
701 {
702 struct platform_device *pdev;
703 struct device_node *iep_np;
704 struct icss_iep *iep;
705
706 iep_np = of_parse_phandle(np, "ti,iep", idx);
707 if (!iep_np || !of_device_is_available(iep_np))
708 return ERR_PTR(-ENODEV);
709
710 pdev = of_find_device_by_node(iep_np);
711 of_node_put(iep_np);
712
713 if (!pdev)
714 /* probably IEP not yet probed */
715 return ERR_PTR(-EPROBE_DEFER);
716
717 iep = platform_get_drvdata(pdev);
718 if (!iep)
719 return ERR_PTR(-EPROBE_DEFER);
720
721 device_lock(iep->dev);
722 if (iep->client_np) {
723 device_unlock(iep->dev);
724 dev_err(iep->dev, "IEP is already acquired by %s",
725 iep->client_np->name);
726 return ERR_PTR(-EBUSY);
727 }
728 iep->client_np = np;
729 device_unlock(iep->dev);
730 get_device(iep->dev);
731
732 return iep;
733 }
734 EXPORT_SYMBOL_GPL(icss_iep_get_idx);
735
icss_iep_get(struct device_node * np)736 struct icss_iep *icss_iep_get(struct device_node *np)
737 {
738 return icss_iep_get_idx(np, 0);
739 }
740 EXPORT_SYMBOL_GPL(icss_iep_get);
741
icss_iep_put(struct icss_iep * iep)742 void icss_iep_put(struct icss_iep *iep)
743 {
744 device_lock(iep->dev);
745 iep->client_np = NULL;
746 device_unlock(iep->dev);
747 put_device(iep->dev);
748 }
749 EXPORT_SYMBOL_GPL(icss_iep_put);
750
icss_iep_init_fw(struct icss_iep * iep)751 void icss_iep_init_fw(struct icss_iep *iep)
752 {
753 /* start IEP for FW use in raw 64bit mode, no PTP support */
754 iep->clk_tick_time = iep->def_inc;
755 iep->cycle_time_ns = 0;
756 iep->ops = NULL;
757 iep->clockops_data = NULL;
758 icss_iep_set_default_inc(iep, iep->def_inc);
759 icss_iep_set_compensation_inc(iep, iep->def_inc);
760 icss_iep_set_compensation_count(iep, 0);
761 regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG, iep->refclk_freq / 10); /* 100 ms pulse */
762 regmap_write(iep->map, ICSS_IEP_SYNC0_PERIOD_REG, 0);
763 if (iep->plat_data->flags & ICSS_IEP_SLOW_COMPEN_REG_SUPPORT)
764 icss_iep_set_slow_compensation_count(iep, 0);
765
766 icss_iep_enable(iep);
767 icss_iep_settime(iep, 0);
768 }
769 EXPORT_SYMBOL_GPL(icss_iep_init_fw);
770
icss_iep_exit_fw(struct icss_iep * iep)771 void icss_iep_exit_fw(struct icss_iep *iep)
772 {
773 icss_iep_disable(iep);
774 }
775 EXPORT_SYMBOL_GPL(icss_iep_exit_fw);
776
icss_iep_init(struct icss_iep * iep,const struct icss_iep_clockops * clkops,void * clockops_data,u32 cycle_time_ns)777 int icss_iep_init(struct icss_iep *iep, const struct icss_iep_clockops *clkops,
778 void *clockops_data, u32 cycle_time_ns)
779 {
780 int ret = 0;
781
782 iep->cycle_time_ns = cycle_time_ns;
783 iep->clk_tick_time = iep->def_inc;
784 iep->ops = clkops;
785 iep->clockops_data = clockops_data;
786 icss_iep_set_default_inc(iep, iep->def_inc);
787 icss_iep_set_compensation_inc(iep, iep->def_inc);
788 icss_iep_set_compensation_count(iep, 0);
789 regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG, iep->refclk_freq / 10); /* 100 ms pulse */
790 regmap_write(iep->map, ICSS_IEP_SYNC0_PERIOD_REG, 0);
791 if (iep->plat_data->flags & ICSS_IEP_SLOW_COMPEN_REG_SUPPORT)
792 icss_iep_set_slow_compensation_count(iep, 0);
793
794 if (!(iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT) ||
795 !(iep->plat_data->flags & ICSS_IEP_SLOW_COMPEN_REG_SUPPORT))
796 goto skip_perout;
797
798 if (iep->ops && iep->ops->perout_enable) {
799 iep->ptp_info.n_per_out = 1;
800 iep->ptp_info.pps = 1;
801 }
802
803 if (iep->ops && iep->ops->extts_enable)
804 iep->ptp_info.n_ext_ts = 2;
805
806 skip_perout:
807 if (cycle_time_ns)
808 icss_iep_enable_shadow_mode(iep);
809 else
810 icss_iep_enable(iep);
811 icss_iep_settime(iep, ktime_get_real_ns());
812
813 iep->ptp_clock = ptp_clock_register(&iep->ptp_info, iep->dev);
814 if (IS_ERR(iep->ptp_clock)) {
815 ret = PTR_ERR(iep->ptp_clock);
816 iep->ptp_clock = NULL;
817 dev_err(iep->dev, "Failed to register ptp clk %d\n", ret);
818 }
819
820 return ret;
821 }
822 EXPORT_SYMBOL_GPL(icss_iep_init);
823
icss_iep_exit(struct icss_iep * iep)824 int icss_iep_exit(struct icss_iep *iep)
825 {
826 if (iep->ptp_clock) {
827 ptp_clock_unregister(iep->ptp_clock);
828 iep->ptp_clock = NULL;
829 }
830 icss_iep_disable(iep);
831
832 if (iep->pps_enabled)
833 icss_iep_pps_enable(iep, false);
834 else if (iep->perout_enabled)
835 icss_iep_perout_enable(iep, NULL, false);
836
837 return 0;
838 }
839 EXPORT_SYMBOL_GPL(icss_iep_exit);
840
icss_iep_probe(struct platform_device * pdev)841 static int icss_iep_probe(struct platform_device *pdev)
842 {
843 struct device *dev = &pdev->dev;
844 struct icss_iep *iep;
845 struct clk *iep_clk;
846
847 iep = devm_kzalloc(dev, sizeof(*iep), GFP_KERNEL);
848 if (!iep)
849 return -ENOMEM;
850
851 iep->dev = dev;
852 iep->base = devm_platform_ioremap_resource(pdev, 0);
853 if (IS_ERR(iep->base))
854 return -ENODEV;
855
856 iep_clk = devm_clk_get(dev, NULL);
857 if (IS_ERR(iep_clk))
858 return PTR_ERR(iep_clk);
859
860 iep->refclk_freq = clk_get_rate(iep_clk);
861
862 iep->def_inc = NSEC_PER_SEC / iep->refclk_freq; /* ns per clock tick */
863 if (iep->def_inc > IEP_MAX_DEF_INC) {
864 dev_err(dev, "Failed to set def_inc %d. IEP_clock is too slow to be supported\n",
865 iep->def_inc);
866 return -EINVAL;
867 }
868
869 iep->plat_data = device_get_match_data(dev);
870 if (!iep->plat_data)
871 return -EINVAL;
872
873 iep->map = devm_regmap_init(dev, NULL, iep, iep->plat_data->config);
874 if (IS_ERR(iep->map)) {
875 dev_err(dev, "Failed to create regmap for IEP %ld\n",
876 PTR_ERR(iep->map));
877 return PTR_ERR(iep->map);
878 }
879
880 iep->ptp_info = icss_iep_ptp_info;
881 mutex_init(&iep->ptp_clk_mutex);
882 dev_set_drvdata(dev, iep);
883 icss_iep_disable(iep);
884
885 return 0;
886 }
887
am654_icss_iep_valid_reg(struct device * dev,unsigned int reg)888 static bool am654_icss_iep_valid_reg(struct device *dev, unsigned int reg)
889 {
890 switch (reg) {
891 case ICSS_IEP_GLOBAL_CFG_REG ... ICSS_IEP_SYNC_START_REG:
892 return true;
893 default:
894 return false;
895 }
896
897 return false;
898 }
899
icss_iep_regmap_write(void * context,unsigned int reg,unsigned int val)900 static int icss_iep_regmap_write(void *context, unsigned int reg,
901 unsigned int val)
902 {
903 struct icss_iep *iep = context;
904
905 writel(val, iep->base + iep->plat_data->reg_offs[reg]);
906
907 return 0;
908 }
909
icss_iep_regmap_read(void * context,unsigned int reg,unsigned int * val)910 static int icss_iep_regmap_read(void *context, unsigned int reg,
911 unsigned int *val)
912 {
913 struct icss_iep *iep = context;
914
915 *val = readl(iep->base + iep->plat_data->reg_offs[reg]);
916
917 return 0;
918 }
919
920 static struct regmap_config am654_icss_iep_regmap_config = {
921 .name = "icss iep",
922 .reg_stride = 1,
923 .reg_write = icss_iep_regmap_write,
924 .reg_read = icss_iep_regmap_read,
925 .writeable_reg = am654_icss_iep_valid_reg,
926 .readable_reg = am654_icss_iep_valid_reg,
927 .fast_io = 1,
928 };
929
930 static const struct icss_iep_plat_data am654_icss_iep_plat_data = {
931 .flags = ICSS_IEP_64BIT_COUNTER_SUPPORT |
932 ICSS_IEP_SLOW_COMPEN_REG_SUPPORT |
933 ICSS_IEP_SHADOW_MODE_SUPPORT,
934 .reg_offs = {
935 [ICSS_IEP_GLOBAL_CFG_REG] = 0x00,
936 [ICSS_IEP_COMPEN_REG] = 0x08,
937 [ICSS_IEP_SLOW_COMPEN_REG] = 0x0C,
938 [ICSS_IEP_COUNT_REG0] = 0x10,
939 [ICSS_IEP_COUNT_REG1] = 0x14,
940 [ICSS_IEP_CAPTURE_CFG_REG] = 0x18,
941 [ICSS_IEP_CAPTURE_STAT_REG] = 0x1c,
942
943 [ICSS_IEP_CAP6_RISE_REG0] = 0x50,
944 [ICSS_IEP_CAP6_RISE_REG1] = 0x54,
945
946 [ICSS_IEP_CAP7_RISE_REG0] = 0x60,
947 [ICSS_IEP_CAP7_RISE_REG1] = 0x64,
948
949 [ICSS_IEP_CMP_CFG_REG] = 0x70,
950 [ICSS_IEP_CMP_STAT_REG] = 0x74,
951 [ICSS_IEP_CMP0_REG0] = 0x78,
952 [ICSS_IEP_CMP0_REG1] = 0x7c,
953 [ICSS_IEP_CMP1_REG0] = 0x80,
954 [ICSS_IEP_CMP1_REG1] = 0x84,
955
956 [ICSS_IEP_CMP8_REG0] = 0xc0,
957 [ICSS_IEP_CMP8_REG1] = 0xc4,
958 [ICSS_IEP_SYNC_CTRL_REG] = 0x180,
959 [ICSS_IEP_SYNC0_STAT_REG] = 0x188,
960 [ICSS_IEP_SYNC1_STAT_REG] = 0x18c,
961 [ICSS_IEP_SYNC_PWIDTH_REG] = 0x190,
962 [ICSS_IEP_SYNC0_PERIOD_REG] = 0x194,
963 [ICSS_IEP_SYNC1_DELAY_REG] = 0x198,
964 [ICSS_IEP_SYNC_START_REG] = 0x19c,
965 },
966 .config = &am654_icss_iep_regmap_config,
967 };
968
969 static const struct of_device_id icss_iep_of_match[] = {
970 {
971 .compatible = "ti,am654-icss-iep",
972 .data = &am654_icss_iep_plat_data,
973 },
974 {},
975 };
976 MODULE_DEVICE_TABLE(of, icss_iep_of_match);
977
978 static struct platform_driver icss_iep_driver = {
979 .driver = {
980 .name = "icss-iep",
981 .of_match_table = icss_iep_of_match,
982 },
983 .probe = icss_iep_probe,
984 };
985 module_platform_driver(icss_iep_driver);
986
987 MODULE_LICENSE("GPL");
988 MODULE_DESCRIPTION("TI ICSS IEP driver");
989 MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>");
990 MODULE_AUTHOR("Md Danish Anwar <danishanwar@ti.com>");
991