xref: /openbmc/linux/drivers/net/ethernet/ti/icssg/icss_iep.c (revision 0f9b4c3ca5fdf3e177266ef994071b1a03f07318)
1  // SPDX-License-Identifier: GPL-2.0
2  
3  /* Texas Instruments ICSSG Industrial Ethernet Peripheral (IEP) Driver
4   *
5   * Copyright (C) 2023 Texas Instruments Incorporated - https://www.ti.com
6   *
7   */
8  
9  #include <linux/bitops.h>
10  #include <linux/clk.h>
11  #include <linux/err.h>
12  #include <linux/io.h>
13  #include <linux/module.h>
14  #include <linux/of.h>
15  #include <linux/of_platform.h>
16  #include <linux/platform_device.h>
17  #include <linux/timekeeping.h>
18  #include <linux/interrupt.h>
19  #include <linux/of_irq.h>
20  
21  #include "icss_iep.h"
22  
23  #define IEP_MAX_DEF_INC		0xf
24  #define IEP_MAX_COMPEN_INC		0xfff
25  #define IEP_MAX_COMPEN_COUNT	0xffffff
26  
27  #define IEP_GLOBAL_CFG_CNT_ENABLE	BIT(0)
28  #define IEP_GLOBAL_CFG_DEFAULT_INC_MASK		GENMASK(7, 4)
29  #define IEP_GLOBAL_CFG_DEFAULT_INC_SHIFT	4
30  #define IEP_GLOBAL_CFG_COMPEN_INC_MASK		GENMASK(19, 8)
31  #define IEP_GLOBAL_CFG_COMPEN_INC_SHIFT		8
32  
33  #define IEP_GLOBAL_STATUS_CNT_OVF	BIT(0)
34  
35  #define IEP_CMP_CFG_SHADOW_EN		BIT(17)
36  #define IEP_CMP_CFG_CMP0_RST_CNT_EN	BIT(0)
37  #define IEP_CMP_CFG_CMP_EN(cmp)		(GENMASK(16, 1) & (1 << ((cmp) + 1)))
38  
39  #define IEP_CMP_STATUS(cmp)		(1 << (cmp))
40  
41  #define IEP_SYNC_CTRL_SYNC_EN		BIT(0)
42  #define IEP_SYNC_CTRL_SYNC_N_EN(n)	(GENMASK(2, 1) & (BIT(1) << (n)))
43  
44  #define IEP_MIN_CMP	0
45  #define IEP_MAX_CMP	15
46  
47  #define ICSS_IEP_64BIT_COUNTER_SUPPORT		BIT(0)
48  #define ICSS_IEP_SLOW_COMPEN_REG_SUPPORT	BIT(1)
49  #define ICSS_IEP_SHADOW_MODE_SUPPORT		BIT(2)
50  
51  #define LATCH_INDEX(ts_index)			((ts_index) + 6)
52  #define IEP_CAP_CFG_CAPNR_1ST_EVENT_EN(n)	BIT(LATCH_INDEX(n))
53  #define IEP_CAP_CFG_CAP_ASYNC_EN(n)		BIT(LATCH_INDEX(n) + 10)
54  
55  enum {
56  	ICSS_IEP_GLOBAL_CFG_REG,
57  	ICSS_IEP_GLOBAL_STATUS_REG,
58  	ICSS_IEP_COMPEN_REG,
59  	ICSS_IEP_SLOW_COMPEN_REG,
60  	ICSS_IEP_COUNT_REG0,
61  	ICSS_IEP_COUNT_REG1,
62  	ICSS_IEP_CAPTURE_CFG_REG,
63  	ICSS_IEP_CAPTURE_STAT_REG,
64  
65  	ICSS_IEP_CAP6_RISE_REG0,
66  	ICSS_IEP_CAP6_RISE_REG1,
67  
68  	ICSS_IEP_CAP7_RISE_REG0,
69  	ICSS_IEP_CAP7_RISE_REG1,
70  
71  	ICSS_IEP_CMP_CFG_REG,
72  	ICSS_IEP_CMP_STAT_REG,
73  	ICSS_IEP_CMP0_REG0,
74  	ICSS_IEP_CMP0_REG1,
75  	ICSS_IEP_CMP1_REG0,
76  	ICSS_IEP_CMP1_REG1,
77  
78  	ICSS_IEP_CMP8_REG0,
79  	ICSS_IEP_CMP8_REG1,
80  	ICSS_IEP_SYNC_CTRL_REG,
81  	ICSS_IEP_SYNC0_STAT_REG,
82  	ICSS_IEP_SYNC1_STAT_REG,
83  	ICSS_IEP_SYNC_PWIDTH_REG,
84  	ICSS_IEP_SYNC0_PERIOD_REG,
85  	ICSS_IEP_SYNC1_DELAY_REG,
86  	ICSS_IEP_SYNC_START_REG,
87  	ICSS_IEP_MAX_REGS,
88  };
89  
90  /**
91   * struct icss_iep_plat_data - Plat data to handle SoC variants
92   * @config: Regmap configuration data
93   * @reg_offs: register offsets to capture offset differences across SoCs
94   * @flags: Flags to represent IEP properties
95   */
96  struct icss_iep_plat_data {
97  	struct regmap_config *config;
98  	u32 reg_offs[ICSS_IEP_MAX_REGS];
99  	u32 flags;
100  };
101  
102  struct icss_iep {
103  	struct device *dev;
104  	void __iomem *base;
105  	const struct icss_iep_plat_data *plat_data;
106  	struct regmap *map;
107  	struct device_node *client_np;
108  	unsigned long refclk_freq;
109  	int clk_tick_time;	/* one refclk tick time in ns */
110  	struct ptp_clock_info ptp_info;
111  	struct ptp_clock *ptp_clock;
112  	struct mutex ptp_clk_mutex;	/* PHC access serializer */
113  	u32 def_inc;
114  	s16 slow_cmp_inc;
115  	u32 slow_cmp_count;
116  	const struct icss_iep_clockops *ops;
117  	void *clockops_data;
118  	u32 cycle_time_ns;
119  	u32 perout_enabled;
120  	bool pps_enabled;
121  	int cap_cmp_irq;
122  	u64 period;
123  	u32 latch_enable;
124  };
125  
126  /**
127   * icss_iep_get_count_hi() - Get the upper 32 bit IEP counter
128   * @iep: Pointer to structure representing IEP.
129   *
130   * Return: upper 32 bit IEP counter
131   */
icss_iep_get_count_hi(struct icss_iep * iep)132  int icss_iep_get_count_hi(struct icss_iep *iep)
133  {
134  	u32 val = 0;
135  
136  	if (iep && (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT))
137  		val = readl(iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG1]);
138  
139  	return val;
140  }
141  EXPORT_SYMBOL_GPL(icss_iep_get_count_hi);
142  
143  /**
144   * icss_iep_get_count_low() - Get the lower 32 bit IEP counter
145   * @iep: Pointer to structure representing IEP.
146   *
147   * Return: lower 32 bit IEP counter
148   */
icss_iep_get_count_low(struct icss_iep * iep)149  int icss_iep_get_count_low(struct icss_iep *iep)
150  {
151  	u32 val = 0;
152  
153  	if (iep)
154  		val = readl(iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG0]);
155  
156  	return val;
157  }
158  EXPORT_SYMBOL_GPL(icss_iep_get_count_low);
159  
160  /**
161   * icss_iep_get_ptp_clock_idx() - Get PTP clock index using IEP driver
162   * @iep: Pointer to structure representing IEP.
163   *
164   * Return: PTP clock index, -1 if not registered
165   */
icss_iep_get_ptp_clock_idx(struct icss_iep * iep)166  int icss_iep_get_ptp_clock_idx(struct icss_iep *iep)
167  {
168  	if (!iep || !iep->ptp_clock)
169  		return -1;
170  	return ptp_clock_index(iep->ptp_clock);
171  }
172  EXPORT_SYMBOL_GPL(icss_iep_get_ptp_clock_idx);
173  
icss_iep_set_counter(struct icss_iep * iep,u64 ns)174  static void icss_iep_set_counter(struct icss_iep *iep, u64 ns)
175  {
176  	if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
177  		writel(upper_32_bits(ns), iep->base +
178  		       iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG1]);
179  	writel(lower_32_bits(ns), iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG0]);
180  }
181  
182  static void icss_iep_update_to_next_boundary(struct icss_iep *iep, u64 start_ns);
183  
184  /**
185   * icss_iep_settime() - Set time of the PTP clock using IEP driver
186   * @iep: Pointer to structure representing IEP.
187   * @ns: Time to be set in nanoseconds
188   *
189   * This API uses writel() instead of regmap_write() for write operations as
190   * regmap_write() is too slow and this API is time sensitive.
191   */
icss_iep_settime(struct icss_iep * iep,u64 ns)192  static void icss_iep_settime(struct icss_iep *iep, u64 ns)
193  {
194  	if (iep->ops && iep->ops->settime) {
195  		iep->ops->settime(iep->clockops_data, ns);
196  		return;
197  	}
198  
199  	if (iep->pps_enabled || iep->perout_enabled)
200  		writel(0, iep->base + iep->plat_data->reg_offs[ICSS_IEP_SYNC_CTRL_REG]);
201  
202  	icss_iep_set_counter(iep, ns);
203  
204  	if (iep->pps_enabled || iep->perout_enabled) {
205  		icss_iep_update_to_next_boundary(iep, ns);
206  		writel(IEP_SYNC_CTRL_SYNC_N_EN(0) | IEP_SYNC_CTRL_SYNC_EN,
207  		       iep->base + iep->plat_data->reg_offs[ICSS_IEP_SYNC_CTRL_REG]);
208  	}
209  }
210  
211  /**
212   * icss_iep_gettime() - Get time of the PTP clock using IEP driver
213   * @iep: Pointer to structure representing IEP.
214   * @sts: Pointer to structure representing PTP system timestamp.
215   *
216   * This API uses readl() instead of regmap_read() for read operations as
217   * regmap_read() is too slow and this API is time sensitive.
218   *
219   * Return: The current timestamp of the PTP clock using IEP driver
220   */
icss_iep_gettime(struct icss_iep * iep,struct ptp_system_timestamp * sts)221  static u64 icss_iep_gettime(struct icss_iep *iep,
222  			    struct ptp_system_timestamp *sts)
223  {
224  	u32 ts_hi = 0, ts_lo;
225  	unsigned long flags;
226  
227  	if (iep->ops && iep->ops->gettime)
228  		return iep->ops->gettime(iep->clockops_data, sts);
229  
230  	/* use local_irq_x() to make it work for both RT/non-RT */
231  	local_irq_save(flags);
232  
233  	/* no need to play with hi-lo, hi is latched when lo is read */
234  	ptp_read_system_prets(sts);
235  	ts_lo = readl(iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG0]);
236  	ptp_read_system_postts(sts);
237  	if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
238  		ts_hi = readl(iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG1]);
239  
240  	local_irq_restore(flags);
241  
242  	return (u64)ts_lo | (u64)ts_hi << 32;
243  }
244  
icss_iep_enable(struct icss_iep * iep)245  static void icss_iep_enable(struct icss_iep *iep)
246  {
247  	regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_CFG_REG,
248  			   IEP_GLOBAL_CFG_CNT_ENABLE,
249  			   IEP_GLOBAL_CFG_CNT_ENABLE);
250  }
251  
icss_iep_disable(struct icss_iep * iep)252  static void icss_iep_disable(struct icss_iep *iep)
253  {
254  	regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_CFG_REG,
255  			   IEP_GLOBAL_CFG_CNT_ENABLE,
256  			   0);
257  }
258  
icss_iep_enable_shadow_mode(struct icss_iep * iep)259  static void icss_iep_enable_shadow_mode(struct icss_iep *iep)
260  {
261  	u32 cycle_time;
262  	int cmp;
263  
264  	cycle_time = iep->cycle_time_ns - iep->def_inc;
265  
266  	icss_iep_disable(iep);
267  
268  	/* disable shadow mode */
269  	regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
270  			   IEP_CMP_CFG_SHADOW_EN, 0);
271  
272  	/* enable shadow mode */
273  	regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
274  			   IEP_CMP_CFG_SHADOW_EN, IEP_CMP_CFG_SHADOW_EN);
275  
276  	/* clear counters */
277  	icss_iep_set_counter(iep, 0);
278  
279  	/* clear overflow status */
280  	regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_STATUS_REG,
281  			   IEP_GLOBAL_STATUS_CNT_OVF,
282  			   IEP_GLOBAL_STATUS_CNT_OVF);
283  
284  	/* clear compare status */
285  	for (cmp = IEP_MIN_CMP; cmp < IEP_MAX_CMP; cmp++) {
286  		regmap_update_bits(iep->map, ICSS_IEP_CMP_STAT_REG,
287  				   IEP_CMP_STATUS(cmp), IEP_CMP_STATUS(cmp));
288  
289  		regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
290  				   IEP_CMP_CFG_CMP_EN(cmp), 0);
291  	}
292  
293  	/* enable reset counter on CMP0 event */
294  	regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
295  			   IEP_CMP_CFG_CMP0_RST_CNT_EN,
296  			   IEP_CMP_CFG_CMP0_RST_CNT_EN);
297  	/* enable compare */
298  	regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
299  			   IEP_CMP_CFG_CMP_EN(0),
300  			   IEP_CMP_CFG_CMP_EN(0));
301  
302  	/* set CMP0 value to cycle time */
303  	regmap_write(iep->map, ICSS_IEP_CMP0_REG0, cycle_time);
304  	if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
305  		regmap_write(iep->map, ICSS_IEP_CMP0_REG1, cycle_time);
306  
307  	icss_iep_set_counter(iep, 0);
308  	icss_iep_enable(iep);
309  }
310  
icss_iep_set_default_inc(struct icss_iep * iep,u8 def_inc)311  static void icss_iep_set_default_inc(struct icss_iep *iep, u8 def_inc)
312  {
313  	regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_CFG_REG,
314  			   IEP_GLOBAL_CFG_DEFAULT_INC_MASK,
315  			   def_inc << IEP_GLOBAL_CFG_DEFAULT_INC_SHIFT);
316  }
317  
icss_iep_set_compensation_inc(struct icss_iep * iep,u16 compen_inc)318  static void icss_iep_set_compensation_inc(struct icss_iep *iep, u16 compen_inc)
319  {
320  	struct device *dev = regmap_get_device(iep->map);
321  
322  	if (compen_inc > IEP_MAX_COMPEN_INC) {
323  		dev_err(dev, "%s: too high compensation inc %d\n",
324  			__func__, compen_inc);
325  		compen_inc = IEP_MAX_COMPEN_INC;
326  	}
327  
328  	regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_CFG_REG,
329  			   IEP_GLOBAL_CFG_COMPEN_INC_MASK,
330  			   compen_inc << IEP_GLOBAL_CFG_COMPEN_INC_SHIFT);
331  }
332  
icss_iep_set_compensation_count(struct icss_iep * iep,u32 compen_count)333  static void icss_iep_set_compensation_count(struct icss_iep *iep,
334  					    u32 compen_count)
335  {
336  	struct device *dev = regmap_get_device(iep->map);
337  
338  	if (compen_count > IEP_MAX_COMPEN_COUNT) {
339  		dev_err(dev, "%s: too high compensation count %d\n",
340  			__func__, compen_count);
341  		compen_count = IEP_MAX_COMPEN_COUNT;
342  	}
343  
344  	regmap_write(iep->map, ICSS_IEP_COMPEN_REG, compen_count);
345  }
346  
icss_iep_set_slow_compensation_count(struct icss_iep * iep,u32 compen_count)347  static void icss_iep_set_slow_compensation_count(struct icss_iep *iep,
348  						 u32 compen_count)
349  {
350  	regmap_write(iep->map, ICSS_IEP_SLOW_COMPEN_REG, compen_count);
351  }
352  
353  /* PTP PHC operations */
icss_iep_ptp_adjfine(struct ptp_clock_info * ptp,long scaled_ppm)354  static int icss_iep_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
355  {
356  	struct icss_iep *iep = container_of(ptp, struct icss_iep, ptp_info);
357  	s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
358  	u32 cyc_count;
359  	u16 cmp_inc;
360  
361  	mutex_lock(&iep->ptp_clk_mutex);
362  
363  	/* ppb is amount of frequency we want to adjust in 1GHz (billion)
364  	 * e.g. 100ppb means we need to speed up clock by 100Hz
365  	 * i.e. at end of 1 second (1 billion ns) clock time, we should be
366  	 * counting 100 more ns.
367  	 * We use IEP slow compensation to achieve continuous freq. adjustment.
368  	 * There are 2 parts. Cycle time and adjustment per cycle.
369  	 * Simplest case would be 1 sec Cycle time. Then adjustment
370  	 * pre cycle would be (def_inc + ppb) value.
371  	 * Cycle time will have to be chosen based on how worse the ppb is.
372  	 * e.g. smaller the ppb, cycle time has to be large.
373  	 * The minimum adjustment we can do is +-1ns per cycle so let's
374  	 * reduce the cycle time to get 1ns per cycle adjustment.
375  	 *	1ppb = 1sec cycle time & 1ns adjust
376  	 *	1000ppb = 1/1000 cycle time & 1ns adjust per cycle
377  	 */
378  
379  	if (iep->cycle_time_ns)
380  		iep->slow_cmp_inc = iep->clk_tick_time;	/* 4ns adj per cycle */
381  	else
382  		iep->slow_cmp_inc = 1;	/* 1ns adjust per cycle */
383  
384  	if (ppb < 0) {
385  		iep->slow_cmp_inc = -iep->slow_cmp_inc;
386  		ppb = -ppb;
387  	}
388  
389  	cyc_count = NSEC_PER_SEC;		/* 1s cycle time @1GHz */
390  	cyc_count /= ppb;		/* cycle time per ppb */
391  
392  	/* slow_cmp_count is decremented every clock cycle, e.g. @250MHz */
393  	if (!iep->cycle_time_ns)
394  		cyc_count /= iep->clk_tick_time;
395  	iep->slow_cmp_count = cyc_count;
396  
397  	/* iep->clk_tick_time is def_inc */
398  	cmp_inc = iep->clk_tick_time + iep->slow_cmp_inc;
399  	icss_iep_set_compensation_inc(iep, cmp_inc);
400  	icss_iep_set_slow_compensation_count(iep, iep->slow_cmp_count);
401  
402  	mutex_unlock(&iep->ptp_clk_mutex);
403  
404  	return 0;
405  }
406  
icss_iep_ptp_adjtime(struct ptp_clock_info * ptp,s64 delta)407  static int icss_iep_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
408  {
409  	struct icss_iep *iep = container_of(ptp, struct icss_iep, ptp_info);
410  	s64 ns;
411  
412  	mutex_lock(&iep->ptp_clk_mutex);
413  	if (iep->ops && iep->ops->adjtime) {
414  		iep->ops->adjtime(iep->clockops_data, delta);
415  	} else {
416  		ns = icss_iep_gettime(iep, NULL);
417  		ns += delta;
418  		icss_iep_settime(iep, ns);
419  	}
420  	mutex_unlock(&iep->ptp_clk_mutex);
421  
422  	return 0;
423  }
424  
icss_iep_ptp_gettimeex(struct ptp_clock_info * ptp,struct timespec64 * ts,struct ptp_system_timestamp * sts)425  static int icss_iep_ptp_gettimeex(struct ptp_clock_info *ptp,
426  				  struct timespec64 *ts,
427  				  struct ptp_system_timestamp *sts)
428  {
429  	struct icss_iep *iep = container_of(ptp, struct icss_iep, ptp_info);
430  	u64 ns;
431  
432  	mutex_lock(&iep->ptp_clk_mutex);
433  	ns = icss_iep_gettime(iep, sts);
434  	*ts = ns_to_timespec64(ns);
435  	mutex_unlock(&iep->ptp_clk_mutex);
436  
437  	return 0;
438  }
439  
icss_iep_ptp_settime(struct ptp_clock_info * ptp,const struct timespec64 * ts)440  static int icss_iep_ptp_settime(struct ptp_clock_info *ptp,
441  				const struct timespec64 *ts)
442  {
443  	struct icss_iep *iep = container_of(ptp, struct icss_iep, ptp_info);
444  	u64 ns;
445  
446  	mutex_lock(&iep->ptp_clk_mutex);
447  	ns = timespec64_to_ns(ts);
448  	icss_iep_settime(iep, ns);
449  	mutex_unlock(&iep->ptp_clk_mutex);
450  
451  	return 0;
452  }
453  
icss_iep_update_to_next_boundary(struct icss_iep * iep,u64 start_ns)454  static void icss_iep_update_to_next_boundary(struct icss_iep *iep, u64 start_ns)
455  {
456  	u64 ns, p_ns;
457  	u32 offset;
458  
459  	ns = icss_iep_gettime(iep, NULL);
460  	if (start_ns < ns)
461  		start_ns = ns;
462  	p_ns = iep->period;
463  	/* Round up to next period boundary */
464  	start_ns += p_ns - 1;
465  	offset = do_div(start_ns, p_ns);
466  	start_ns = start_ns * p_ns;
467  	/* If it is too close to update, shift to next boundary */
468  	if (p_ns - offset < 10)
469  		start_ns += p_ns;
470  
471  	regmap_write(iep->map, ICSS_IEP_CMP1_REG0, lower_32_bits(start_ns));
472  	if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
473  		regmap_write(iep->map, ICSS_IEP_CMP1_REG1, upper_32_bits(start_ns));
474  }
475  
icss_iep_perout_enable_hw(struct icss_iep * iep,struct ptp_perout_request * req,int on)476  static int icss_iep_perout_enable_hw(struct icss_iep *iep,
477  				     struct ptp_perout_request *req, int on)
478  {
479  	int ret;
480  	u64 cmp;
481  
482  	if (iep->ops && iep->ops->perout_enable) {
483  		ret = iep->ops->perout_enable(iep->clockops_data, req, on, &cmp);
484  		if (ret)
485  			return ret;
486  
487  		if (on) {
488  			/* Configure CMP */
489  			regmap_write(iep->map, ICSS_IEP_CMP1_REG0, lower_32_bits(cmp));
490  			if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
491  				regmap_write(iep->map, ICSS_IEP_CMP1_REG1, upper_32_bits(cmp));
492  			/* Configure SYNC, 1ms pulse width */
493  			regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG, 1000000);
494  			regmap_write(iep->map, ICSS_IEP_SYNC0_PERIOD_REG, 0);
495  			regmap_write(iep->map, ICSS_IEP_SYNC_START_REG, 0);
496  			regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, 0); /* one-shot mode */
497  			/* Enable CMP 1 */
498  			regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
499  					   IEP_CMP_CFG_CMP_EN(1), IEP_CMP_CFG_CMP_EN(1));
500  		} else {
501  			/* Disable CMP 1 */
502  			regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
503  					   IEP_CMP_CFG_CMP_EN(1), 0);
504  
505  			/* clear regs */
506  			regmap_write(iep->map, ICSS_IEP_CMP1_REG0, 0);
507  			if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
508  				regmap_write(iep->map, ICSS_IEP_CMP1_REG1, 0);
509  		}
510  	} else {
511  		if (on) {
512  			u64 start_ns;
513  
514  			iep->period = ((u64)req->period.sec * NSEC_PER_SEC) +
515  				      req->period.nsec;
516  			start_ns = ((u64)req->period.sec * NSEC_PER_SEC)
517  				   + req->period.nsec;
518  			icss_iep_update_to_next_boundary(iep, start_ns);
519  
520  			/* Enable Sync in single shot mode  */
521  			regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG,
522  				     IEP_SYNC_CTRL_SYNC_N_EN(0) | IEP_SYNC_CTRL_SYNC_EN);
523  			/* Enable CMP 1 */
524  			regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
525  					   IEP_CMP_CFG_CMP_EN(1), IEP_CMP_CFG_CMP_EN(1));
526  		} else {
527  			/* Disable CMP 1 */
528  			regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
529  					   IEP_CMP_CFG_CMP_EN(1), 0);
530  
531  			/* clear CMP regs */
532  			regmap_write(iep->map, ICSS_IEP_CMP1_REG0, 0);
533  			if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
534  				regmap_write(iep->map, ICSS_IEP_CMP1_REG1, 0);
535  
536  			/* Disable sync */
537  			regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, 0);
538  		}
539  	}
540  
541  	return 0;
542  }
543  
icss_iep_perout_enable(struct icss_iep * iep,struct ptp_perout_request * req,int on)544  static int icss_iep_perout_enable(struct icss_iep *iep,
545  				  struct ptp_perout_request *req, int on)
546  {
547  	return -EOPNOTSUPP;
548  }
549  
icss_iep_pps_enable(struct icss_iep * iep,int on)550  static int icss_iep_pps_enable(struct icss_iep *iep, int on)
551  {
552  	struct ptp_clock_request rq;
553  	struct timespec64 ts;
554  	int ret = 0;
555  	u64 ns;
556  
557  	mutex_lock(&iep->ptp_clk_mutex);
558  
559  	if (iep->perout_enabled) {
560  		ret = -EBUSY;
561  		goto exit;
562  	}
563  
564  	if (iep->pps_enabled == !!on)
565  		goto exit;
566  
567  	rq.perout.index = 0;
568  	if (on) {
569  		ns = icss_iep_gettime(iep, NULL);
570  		ts = ns_to_timespec64(ns);
571  		rq.perout.period.sec = 1;
572  		rq.perout.period.nsec = 0;
573  		rq.perout.start.sec = ts.tv_sec + 2;
574  		rq.perout.start.nsec = 0;
575  		ret = icss_iep_perout_enable_hw(iep, &rq.perout, on);
576  	} else {
577  		ret = icss_iep_perout_enable_hw(iep, &rq.perout, on);
578  	}
579  
580  	if (!ret)
581  		iep->pps_enabled = !!on;
582  
583  exit:
584  	mutex_unlock(&iep->ptp_clk_mutex);
585  
586  	return ret;
587  }
588  
icss_iep_extts_enable(struct icss_iep * iep,u32 index,int on)589  static int icss_iep_extts_enable(struct icss_iep *iep, u32 index, int on)
590  {
591  	u32 val, cap, ret = 0;
592  
593  	mutex_lock(&iep->ptp_clk_mutex);
594  
595  	if (iep->ops && iep->ops->extts_enable) {
596  		ret = iep->ops->extts_enable(iep->clockops_data, index, on);
597  		goto exit;
598  	}
599  
600  	if (((iep->latch_enable & BIT(index)) >> index) == on)
601  		goto exit;
602  
603  	regmap_read(iep->map, ICSS_IEP_CAPTURE_CFG_REG, &val);
604  	cap = IEP_CAP_CFG_CAP_ASYNC_EN(index) | IEP_CAP_CFG_CAPNR_1ST_EVENT_EN(index);
605  	if (on) {
606  		val |= cap;
607  		iep->latch_enable |= BIT(index);
608  	} else {
609  		val &= ~cap;
610  		iep->latch_enable &= ~BIT(index);
611  	}
612  	regmap_write(iep->map, ICSS_IEP_CAPTURE_CFG_REG, val);
613  
614  exit:
615  	mutex_unlock(&iep->ptp_clk_mutex);
616  
617  	return ret;
618  }
619  
icss_iep_ptp_enable(struct ptp_clock_info * ptp,struct ptp_clock_request * rq,int on)620  static int icss_iep_ptp_enable(struct ptp_clock_info *ptp,
621  			       struct ptp_clock_request *rq, int on)
622  {
623  	struct icss_iep *iep = container_of(ptp, struct icss_iep, ptp_info);
624  
625  	switch (rq->type) {
626  	case PTP_CLK_REQ_PEROUT:
627  		return icss_iep_perout_enable(iep, &rq->perout, on);
628  	case PTP_CLK_REQ_PPS:
629  		return icss_iep_pps_enable(iep, on);
630  	case PTP_CLK_REQ_EXTTS:
631  		return icss_iep_extts_enable(iep, rq->extts.index, on);
632  	default:
633  		break;
634  	}
635  
636  	return -EOPNOTSUPP;
637  }
638  
639  static struct ptp_clock_info icss_iep_ptp_info = {
640  	.owner		= THIS_MODULE,
641  	.name		= "ICSS IEP timer",
642  	.max_adj	= 10000000,
643  	.adjfine	= icss_iep_ptp_adjfine,
644  	.adjtime	= icss_iep_ptp_adjtime,
645  	.gettimex64	= icss_iep_ptp_gettimeex,
646  	.settime64	= icss_iep_ptp_settime,
647  	.enable		= icss_iep_ptp_enable,
648  };
649  
icss_iep_get_idx(struct device_node * np,int idx)650  struct icss_iep *icss_iep_get_idx(struct device_node *np, int idx)
651  {
652  	struct platform_device *pdev;
653  	struct device_node *iep_np;
654  	struct icss_iep *iep;
655  
656  	iep_np = of_parse_phandle(np, "ti,iep", idx);
657  	if (!iep_np || !of_device_is_available(iep_np))
658  		return ERR_PTR(-ENODEV);
659  
660  	pdev = of_find_device_by_node(iep_np);
661  	of_node_put(iep_np);
662  
663  	if (!pdev)
664  		/* probably IEP not yet probed */
665  		return ERR_PTR(-EPROBE_DEFER);
666  
667  	iep = platform_get_drvdata(pdev);
668  	if (!iep)
669  		return ERR_PTR(-EPROBE_DEFER);
670  
671  	device_lock(iep->dev);
672  	if (iep->client_np) {
673  		device_unlock(iep->dev);
674  		dev_err(iep->dev, "IEP is already acquired by %s",
675  			iep->client_np->name);
676  		return ERR_PTR(-EBUSY);
677  	}
678  	iep->client_np = np;
679  	device_unlock(iep->dev);
680  	get_device(iep->dev);
681  
682  	return iep;
683  }
684  EXPORT_SYMBOL_GPL(icss_iep_get_idx);
685  
icss_iep_get(struct device_node * np)686  struct icss_iep *icss_iep_get(struct device_node *np)
687  {
688  	return icss_iep_get_idx(np, 0);
689  }
690  EXPORT_SYMBOL_GPL(icss_iep_get);
691  
icss_iep_put(struct icss_iep * iep)692  void icss_iep_put(struct icss_iep *iep)
693  {
694  	device_lock(iep->dev);
695  	iep->client_np = NULL;
696  	device_unlock(iep->dev);
697  	put_device(iep->dev);
698  }
699  EXPORT_SYMBOL_GPL(icss_iep_put);
700  
icss_iep_init_fw(struct icss_iep * iep)701  void icss_iep_init_fw(struct icss_iep *iep)
702  {
703  	/* start IEP for FW use in raw 64bit mode, no PTP support */
704  	iep->clk_tick_time = iep->def_inc;
705  	iep->cycle_time_ns = 0;
706  	iep->ops = NULL;
707  	iep->clockops_data = NULL;
708  	icss_iep_set_default_inc(iep, iep->def_inc);
709  	icss_iep_set_compensation_inc(iep, iep->def_inc);
710  	icss_iep_set_compensation_count(iep, 0);
711  	regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG, iep->refclk_freq / 10); /* 100 ms pulse */
712  	regmap_write(iep->map, ICSS_IEP_SYNC0_PERIOD_REG, 0);
713  	if (iep->plat_data->flags & ICSS_IEP_SLOW_COMPEN_REG_SUPPORT)
714  		icss_iep_set_slow_compensation_count(iep, 0);
715  
716  	icss_iep_enable(iep);
717  	icss_iep_settime(iep, 0);
718  }
719  EXPORT_SYMBOL_GPL(icss_iep_init_fw);
720  
icss_iep_exit_fw(struct icss_iep * iep)721  void icss_iep_exit_fw(struct icss_iep *iep)
722  {
723  	icss_iep_disable(iep);
724  }
725  EXPORT_SYMBOL_GPL(icss_iep_exit_fw);
726  
icss_iep_init(struct icss_iep * iep,const struct icss_iep_clockops * clkops,void * clockops_data,u32 cycle_time_ns)727  int icss_iep_init(struct icss_iep *iep, const struct icss_iep_clockops *clkops,
728  		  void *clockops_data, u32 cycle_time_ns)
729  {
730  	int ret = 0;
731  
732  	iep->cycle_time_ns = cycle_time_ns;
733  	iep->clk_tick_time = iep->def_inc;
734  	iep->ops = clkops;
735  	iep->clockops_data = clockops_data;
736  	icss_iep_set_default_inc(iep, iep->def_inc);
737  	icss_iep_set_compensation_inc(iep, iep->def_inc);
738  	icss_iep_set_compensation_count(iep, 0);
739  	regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG, iep->refclk_freq / 10); /* 100 ms pulse */
740  	regmap_write(iep->map, ICSS_IEP_SYNC0_PERIOD_REG, 0);
741  	if (iep->plat_data->flags & ICSS_IEP_SLOW_COMPEN_REG_SUPPORT)
742  		icss_iep_set_slow_compensation_count(iep, 0);
743  
744  	if (!(iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT) ||
745  	    !(iep->plat_data->flags & ICSS_IEP_SLOW_COMPEN_REG_SUPPORT))
746  		goto skip_perout;
747  
748  	if (iep->ops && iep->ops->perout_enable) {
749  		iep->ptp_info.n_per_out = 1;
750  		iep->ptp_info.pps = 1;
751  	}
752  
753  	if (iep->ops && iep->ops->extts_enable)
754  		iep->ptp_info.n_ext_ts = 2;
755  
756  skip_perout:
757  	if (cycle_time_ns)
758  		icss_iep_enable_shadow_mode(iep);
759  	else
760  		icss_iep_enable(iep);
761  	icss_iep_settime(iep, ktime_get_real_ns());
762  
763  	iep->ptp_clock = ptp_clock_register(&iep->ptp_info, iep->dev);
764  	if (IS_ERR(iep->ptp_clock)) {
765  		ret = PTR_ERR(iep->ptp_clock);
766  		iep->ptp_clock = NULL;
767  		dev_err(iep->dev, "Failed to register ptp clk %d\n", ret);
768  	}
769  
770  	return ret;
771  }
772  EXPORT_SYMBOL_GPL(icss_iep_init);
773  
icss_iep_exit(struct icss_iep * iep)774  int icss_iep_exit(struct icss_iep *iep)
775  {
776  	if (iep->ptp_clock) {
777  		ptp_clock_unregister(iep->ptp_clock);
778  		iep->ptp_clock = NULL;
779  	}
780  	icss_iep_disable(iep);
781  
782  	if (iep->pps_enabled)
783  		icss_iep_pps_enable(iep, false);
784  	else if (iep->perout_enabled)
785  		icss_iep_perout_enable(iep, NULL, false);
786  
787  	return 0;
788  }
789  EXPORT_SYMBOL_GPL(icss_iep_exit);
790  
icss_iep_probe(struct platform_device * pdev)791  static int icss_iep_probe(struct platform_device *pdev)
792  {
793  	struct device *dev = &pdev->dev;
794  	struct icss_iep *iep;
795  	struct clk *iep_clk;
796  
797  	iep = devm_kzalloc(dev, sizeof(*iep), GFP_KERNEL);
798  	if (!iep)
799  		return -ENOMEM;
800  
801  	iep->dev = dev;
802  	iep->base = devm_platform_ioremap_resource(pdev, 0);
803  	if (IS_ERR(iep->base))
804  		return -ENODEV;
805  
806  	iep_clk = devm_clk_get(dev, NULL);
807  	if (IS_ERR(iep_clk))
808  		return PTR_ERR(iep_clk);
809  
810  	iep->refclk_freq = clk_get_rate(iep_clk);
811  
812  	iep->def_inc = NSEC_PER_SEC / iep->refclk_freq;	/* ns per clock tick */
813  	if (iep->def_inc > IEP_MAX_DEF_INC) {
814  		dev_err(dev, "Failed to set def_inc %d.  IEP_clock is too slow to be supported\n",
815  			iep->def_inc);
816  		return -EINVAL;
817  	}
818  
819  	iep->plat_data = device_get_match_data(dev);
820  	if (!iep->plat_data)
821  		return -EINVAL;
822  
823  	iep->map = devm_regmap_init(dev, NULL, iep, iep->plat_data->config);
824  	if (IS_ERR(iep->map)) {
825  		dev_err(dev, "Failed to create regmap for IEP %ld\n",
826  			PTR_ERR(iep->map));
827  		return PTR_ERR(iep->map);
828  	}
829  
830  	iep->ptp_info = icss_iep_ptp_info;
831  	mutex_init(&iep->ptp_clk_mutex);
832  	dev_set_drvdata(dev, iep);
833  	icss_iep_disable(iep);
834  
835  	return 0;
836  }
837  
am654_icss_iep_valid_reg(struct device * dev,unsigned int reg)838  static bool am654_icss_iep_valid_reg(struct device *dev, unsigned int reg)
839  {
840  	switch (reg) {
841  	case ICSS_IEP_GLOBAL_CFG_REG ... ICSS_IEP_SYNC_START_REG:
842  		return true;
843  	default:
844  		return false;
845  	}
846  
847  	return false;
848  }
849  
icss_iep_regmap_write(void * context,unsigned int reg,unsigned int val)850  static int icss_iep_regmap_write(void *context, unsigned int reg,
851  				 unsigned int val)
852  {
853  	struct icss_iep *iep = context;
854  
855  	writel(val, iep->base + iep->plat_data->reg_offs[reg]);
856  
857  	return 0;
858  }
859  
icss_iep_regmap_read(void * context,unsigned int reg,unsigned int * val)860  static int icss_iep_regmap_read(void *context, unsigned int reg,
861  				unsigned int *val)
862  {
863  	struct icss_iep *iep = context;
864  
865  	*val = readl(iep->base + iep->plat_data->reg_offs[reg]);
866  
867  	return 0;
868  }
869  
870  static struct regmap_config am654_icss_iep_regmap_config = {
871  	.name = "icss iep",
872  	.reg_stride = 1,
873  	.reg_write = icss_iep_regmap_write,
874  	.reg_read = icss_iep_regmap_read,
875  	.writeable_reg = am654_icss_iep_valid_reg,
876  	.readable_reg = am654_icss_iep_valid_reg,
877  	.fast_io = 1,
878  };
879  
880  static const struct icss_iep_plat_data am654_icss_iep_plat_data = {
881  	.flags = ICSS_IEP_64BIT_COUNTER_SUPPORT |
882  		 ICSS_IEP_SLOW_COMPEN_REG_SUPPORT |
883  		 ICSS_IEP_SHADOW_MODE_SUPPORT,
884  	.reg_offs = {
885  		[ICSS_IEP_GLOBAL_CFG_REG] = 0x00,
886  		[ICSS_IEP_COMPEN_REG] = 0x08,
887  		[ICSS_IEP_SLOW_COMPEN_REG] = 0x0C,
888  		[ICSS_IEP_COUNT_REG0] = 0x10,
889  		[ICSS_IEP_COUNT_REG1] = 0x14,
890  		[ICSS_IEP_CAPTURE_CFG_REG] = 0x18,
891  		[ICSS_IEP_CAPTURE_STAT_REG] = 0x1c,
892  
893  		[ICSS_IEP_CAP6_RISE_REG0] = 0x50,
894  		[ICSS_IEP_CAP6_RISE_REG1] = 0x54,
895  
896  		[ICSS_IEP_CAP7_RISE_REG0] = 0x60,
897  		[ICSS_IEP_CAP7_RISE_REG1] = 0x64,
898  
899  		[ICSS_IEP_CMP_CFG_REG] = 0x70,
900  		[ICSS_IEP_CMP_STAT_REG] = 0x74,
901  		[ICSS_IEP_CMP0_REG0] = 0x78,
902  		[ICSS_IEP_CMP0_REG1] = 0x7c,
903  		[ICSS_IEP_CMP1_REG0] = 0x80,
904  		[ICSS_IEP_CMP1_REG1] = 0x84,
905  
906  		[ICSS_IEP_CMP8_REG0] = 0xc0,
907  		[ICSS_IEP_CMP8_REG1] = 0xc4,
908  		[ICSS_IEP_SYNC_CTRL_REG] = 0x180,
909  		[ICSS_IEP_SYNC0_STAT_REG] = 0x188,
910  		[ICSS_IEP_SYNC1_STAT_REG] = 0x18c,
911  		[ICSS_IEP_SYNC_PWIDTH_REG] = 0x190,
912  		[ICSS_IEP_SYNC0_PERIOD_REG] = 0x194,
913  		[ICSS_IEP_SYNC1_DELAY_REG] = 0x198,
914  		[ICSS_IEP_SYNC_START_REG] = 0x19c,
915  	},
916  	.config = &am654_icss_iep_regmap_config,
917  };
918  
919  static const struct of_device_id icss_iep_of_match[] = {
920  	{
921  		.compatible = "ti,am654-icss-iep",
922  		.data = &am654_icss_iep_plat_data,
923  	},
924  	{},
925  };
926  MODULE_DEVICE_TABLE(of, icss_iep_of_match);
927  
928  static struct platform_driver icss_iep_driver = {
929  	.driver = {
930  		.name = "icss-iep",
931  		.of_match_table = icss_iep_of_match,
932  	},
933  	.probe = icss_iep_probe,
934  };
935  module_platform_driver(icss_iep_driver);
936  
937  MODULE_LICENSE("GPL");
938  MODULE_DESCRIPTION("TI ICSS IEP driver");
939  MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>");
940  MODULE_AUTHOR("Md Danish Anwar <danishanwar@ti.com>");
941