1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Fast Ethernet Controller (ENET) PTP driver for MX6x.
4  *
5  * Copyright (C) 2012 Freescale Semiconductor, Inc.
6  */
7 
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/string.h>
13 #include <linux/ptrace.h>
14 #include <linux/errno.h>
15 #include <linux/ioport.h>
16 #include <linux/slab.h>
17 #include <linux/interrupt.h>
18 #include <linux/pci.h>
19 #include <linux/delay.h>
20 #include <linux/netdevice.h>
21 #include <linux/etherdevice.h>
22 #include <linux/skbuff.h>
23 #include <linux/spinlock.h>
24 #include <linux/workqueue.h>
25 #include <linux/bitops.h>
26 #include <linux/io.h>
27 #include <linux/irq.h>
28 #include <linux/clk.h>
29 #include <linux/platform_device.h>
30 #include <linux/phy.h>
31 #include <linux/fec.h>
32 #include <linux/of.h>
33 #include <linux/of_device.h>
34 #include <linux/of_gpio.h>
35 #include <linux/of_net.h>
36 
37 #include "fec.h"
38 
39 /* FEC 1588 register bits */
40 #define FEC_T_CTRL_SLAVE                0x00002000
41 #define FEC_T_CTRL_CAPTURE              0x00000800
42 #define FEC_T_CTRL_RESTART              0x00000200
43 #define FEC_T_CTRL_PERIOD_RST           0x00000030
44 #define FEC_T_CTRL_PERIOD_EN		0x00000010
45 #define FEC_T_CTRL_ENABLE               0x00000001
46 
47 #define FEC_T_INC_MASK                  0x0000007f
48 #define FEC_T_INC_OFFSET                0
49 #define FEC_T_INC_CORR_MASK             0x00007f00
50 #define FEC_T_INC_CORR_OFFSET           8
51 
52 #define FEC_T_CTRL_PINPER		0x00000080
53 #define FEC_T_TF0_MASK			0x00000001
54 #define FEC_T_TF0_OFFSET		0
55 #define FEC_T_TF1_MASK			0x00000002
56 #define FEC_T_TF1_OFFSET		1
57 #define FEC_T_TF2_MASK			0x00000004
58 #define FEC_T_TF2_OFFSET		2
59 #define FEC_T_TF3_MASK			0x00000008
60 #define FEC_T_TF3_OFFSET		3
61 #define FEC_T_TDRE_MASK			0x00000001
62 #define FEC_T_TDRE_OFFSET		0
63 #define FEC_T_TMODE_MASK		0x0000003C
64 #define FEC_T_TMODE_OFFSET		2
65 #define FEC_T_TIE_MASK			0x00000040
66 #define FEC_T_TIE_OFFSET		6
67 #define FEC_T_TF_MASK			0x00000080
68 #define FEC_T_TF_OFFSET			7
69 
70 #define FEC_ATIME_CTRL		0x400
71 #define FEC_ATIME		0x404
72 #define FEC_ATIME_EVT_OFFSET	0x408
73 #define FEC_ATIME_EVT_PERIOD	0x40c
74 #define FEC_ATIME_CORR		0x410
75 #define FEC_ATIME_INC		0x414
76 #define FEC_TS_TIMESTAMP	0x418
77 
78 #define FEC_TGSR		0x604
79 #define FEC_TCSR(n)		(0x608 + n * 0x08)
80 #define FEC_TCCR(n)		(0x60C + n * 0x08)
81 #define MAX_TIMER_CHANNEL	3
82 #define FEC_TMODE_TOGGLE	0x05
83 #define FEC_HIGH_PULSE		0x0F
84 
85 #define FEC_CC_MULT	(1 << 31)
86 #define FEC_COUNTER_PERIOD	(1 << 31)
87 #define PPS_OUPUT_RELOAD_PERIOD	NSEC_PER_SEC
88 #define FEC_CHANNLE_0		0
89 #define DEFAULT_PPS_CHANNEL	FEC_CHANNLE_0
90 
91 /**
92  * fec_ptp_enable_pps
93  * @fep: the fec_enet_private structure handle
94  * @enable: enable the channel pps output
95  *
96  * This function enble the PPS ouput on the timer channel.
97  */
98 static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
99 {
100 	unsigned long flags;
101 	u32 val, tempval;
102 	struct timespec64 ts;
103 	u64 ns;
104 
105 	if (fep->pps_enable == enable)
106 		return 0;
107 
108 	fep->pps_channel = DEFAULT_PPS_CHANNEL;
109 	fep->reload_period = PPS_OUPUT_RELOAD_PERIOD;
110 
111 	spin_lock_irqsave(&fep->tmreg_lock, flags);
112 
113 	if (enable) {
114 		/* clear capture or output compare interrupt status if have.
115 		 */
116 		writel(FEC_T_TF_MASK, fep->hwp + FEC_TCSR(fep->pps_channel));
117 
118 		/* It is recommended to double check the TMODE field in the
119 		 * TCSR register to be cleared before the first compare counter
120 		 * is written into TCCR register. Just add a double check.
121 		 */
122 		val = readl(fep->hwp + FEC_TCSR(fep->pps_channel));
123 		do {
124 			val &= ~(FEC_T_TMODE_MASK);
125 			writel(val, fep->hwp + FEC_TCSR(fep->pps_channel));
126 			val = readl(fep->hwp + FEC_TCSR(fep->pps_channel));
127 		} while (val & FEC_T_TMODE_MASK);
128 
129 		/* Dummy read counter to update the counter */
130 		timecounter_read(&fep->tc);
131 		/* We want to find the first compare event in the next
132 		 * second point. So we need to know what the ptp time
133 		 * is now and how many nanoseconds is ahead to get next second.
134 		 * The remaining nanosecond ahead before the next second would be
135 		 * NSEC_PER_SEC - ts.tv_nsec. Add the remaining nanoseconds
136 		 * to current timer would be next second.
137 		 */
138 		tempval = fep->cc.read(&fep->cc);
139 		/* Convert the ptp local counter to 1588 timestamp */
140 		ns = timecounter_cyc2time(&fep->tc, tempval);
141 		ts = ns_to_timespec64(ns);
142 
143 		/* The tempval is  less than 3 seconds, and  so val is less than
144 		 * 4 seconds. No overflow for 32bit calculation.
145 		 */
146 		val = NSEC_PER_SEC - (u32)ts.tv_nsec + tempval;
147 
148 		/* Need to consider the situation that the current time is
149 		 * very close to the second point, which means NSEC_PER_SEC
150 		 * - ts.tv_nsec is close to be zero(For example 20ns); Since the timer
151 		 * is still running when we calculate the first compare event, it is
152 		 * possible that the remaining nanoseonds run out before the compare
153 		 * counter is calculated and written into TCCR register. To avoid
154 		 * this possibility, we will set the compare event to be the next
155 		 * of next second. The current setting is 31-bit timer and wrap
156 		 * around over 2 seconds. So it is okay to set the next of next
157 		 * seond for the timer.
158 		 */
159 		val += NSEC_PER_SEC;
160 
161 		/* We add (2 * NSEC_PER_SEC - (u32)ts.tv_nsec) to current
162 		 * ptp counter, which maybe cause 32-bit wrap. Since the
163 		 * (NSEC_PER_SEC - (u32)ts.tv_nsec) is less than 2 second.
164 		 * We can ensure the wrap will not cause issue. If the offset
165 		 * is bigger than fep->cc.mask would be a error.
166 		 */
167 		val &= fep->cc.mask;
168 		writel(val, fep->hwp + FEC_TCCR(fep->pps_channel));
169 
170 		/* Calculate the second the compare event timestamp */
171 		fep->next_counter = (val + fep->reload_period) & fep->cc.mask;
172 
173 		/* * Enable compare event when overflow */
174 		val = readl(fep->hwp + FEC_ATIME_CTRL);
175 		val |= FEC_T_CTRL_PINPER;
176 		writel(val, fep->hwp + FEC_ATIME_CTRL);
177 
178 		/* Compare channel setting. */
179 		val = readl(fep->hwp + FEC_TCSR(fep->pps_channel));
180 		val |= (1 << FEC_T_TF_OFFSET | 1 << FEC_T_TIE_OFFSET);
181 		val &= ~(1 << FEC_T_TDRE_OFFSET);
182 		val &= ~(FEC_T_TMODE_MASK);
183 		val |= (FEC_HIGH_PULSE << FEC_T_TMODE_OFFSET);
184 		writel(val, fep->hwp + FEC_TCSR(fep->pps_channel));
185 
186 		/* Write the second compare event timestamp and calculate
187 		 * the third timestamp. Refer the TCCR register detail in the spec.
188 		 */
189 		writel(fep->next_counter, fep->hwp + FEC_TCCR(fep->pps_channel));
190 		fep->next_counter = (fep->next_counter + fep->reload_period) & fep->cc.mask;
191 	} else {
192 		writel(0, fep->hwp + FEC_TCSR(fep->pps_channel));
193 	}
194 
195 	fep->pps_enable = enable;
196 	spin_unlock_irqrestore(&fep->tmreg_lock, flags);
197 
198 	return 0;
199 }
200 
201 /**
202  * fec_ptp_read - read raw cycle counter (to be used by time counter)
203  * @cc: the cyclecounter structure
204  *
205  * this function reads the cyclecounter registers and is called by the
206  * cyclecounter structure used to construct a ns counter from the
207  * arbitrary fixed point registers
208  */
209 static u64 fec_ptp_read(const struct cyclecounter *cc)
210 {
211 	struct fec_enet_private *fep =
212 		container_of(cc, struct fec_enet_private, cc);
213 	u32 tempval;
214 
215 	tempval = readl(fep->hwp + FEC_ATIME_CTRL);
216 	tempval |= FEC_T_CTRL_CAPTURE;
217 	writel(tempval, fep->hwp + FEC_ATIME_CTRL);
218 
219 	if (fep->quirks & FEC_QUIRK_BUG_CAPTURE)
220 		udelay(1);
221 
222 	return readl(fep->hwp + FEC_ATIME);
223 }
224 
225 /**
226  * fec_ptp_start_cyclecounter - create the cycle counter from hw
227  * @ndev: network device
228  *
229  * this function initializes the timecounter and cyclecounter
230  * structures for use in generated a ns counter from the arbitrary
231  * fixed point cycles registers in the hardware.
232  */
233 void fec_ptp_start_cyclecounter(struct net_device *ndev)
234 {
235 	struct fec_enet_private *fep = netdev_priv(ndev);
236 	unsigned long flags;
237 	int inc;
238 
239 	inc = 1000000000 / fep->cycle_speed;
240 
241 	/* grab the ptp lock */
242 	spin_lock_irqsave(&fep->tmreg_lock, flags);
243 
244 	/* 1ns counter */
245 	writel(inc << FEC_T_INC_OFFSET, fep->hwp + FEC_ATIME_INC);
246 
247 	/* use 31-bit timer counter */
248 	writel(FEC_COUNTER_PERIOD, fep->hwp + FEC_ATIME_EVT_PERIOD);
249 
250 	writel(FEC_T_CTRL_ENABLE | FEC_T_CTRL_PERIOD_RST,
251 		fep->hwp + FEC_ATIME_CTRL);
252 
253 	memset(&fep->cc, 0, sizeof(fep->cc));
254 	fep->cc.read = fec_ptp_read;
255 	fep->cc.mask = CLOCKSOURCE_MASK(31);
256 	fep->cc.shift = 31;
257 	fep->cc.mult = FEC_CC_MULT;
258 
259 	/* reset the ns time counter */
260 	timecounter_init(&fep->tc, &fep->cc, 0);
261 
262 	spin_unlock_irqrestore(&fep->tmreg_lock, flags);
263 }
264 
265 /**
266  * fec_ptp_adjfreq - adjust ptp cycle frequency
267  * @ptp: the ptp clock structure
268  * @ppb: parts per billion adjustment from base
269  *
270  * Adjust the frequency of the ptp cycle counter by the
271  * indicated ppb from the base frequency.
272  *
273  * Because ENET hardware frequency adjust is complex,
274  * using software method to do that.
275  */
276 static int fec_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
277 {
278 	unsigned long flags;
279 	int neg_adj = 0;
280 	u32 i, tmp;
281 	u32 corr_inc, corr_period;
282 	u32 corr_ns;
283 	u64 lhs, rhs;
284 
285 	struct fec_enet_private *fep =
286 	    container_of(ptp, struct fec_enet_private, ptp_caps);
287 
288 	if (ppb == 0)
289 		return 0;
290 
291 	if (ppb < 0) {
292 		ppb = -ppb;
293 		neg_adj = 1;
294 	}
295 
296 	/* In theory, corr_inc/corr_period = ppb/NSEC_PER_SEC;
297 	 * Try to find the corr_inc  between 1 to fep->ptp_inc to
298 	 * meet adjustment requirement.
299 	 */
300 	lhs = NSEC_PER_SEC;
301 	rhs = (u64)ppb * (u64)fep->ptp_inc;
302 	for (i = 1; i <= fep->ptp_inc; i++) {
303 		if (lhs >= rhs) {
304 			corr_inc = i;
305 			corr_period = div_u64(lhs, rhs);
306 			break;
307 		}
308 		lhs += NSEC_PER_SEC;
309 	}
310 	/* Not found? Set it to high value - double speed
311 	 * correct in every clock step.
312 	 */
313 	if (i > fep->ptp_inc) {
314 		corr_inc = fep->ptp_inc;
315 		corr_period = 1;
316 	}
317 
318 	if (neg_adj)
319 		corr_ns = fep->ptp_inc - corr_inc;
320 	else
321 		corr_ns = fep->ptp_inc + corr_inc;
322 
323 	spin_lock_irqsave(&fep->tmreg_lock, flags);
324 
325 	tmp = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_MASK;
326 	tmp |= corr_ns << FEC_T_INC_CORR_OFFSET;
327 	writel(tmp, fep->hwp + FEC_ATIME_INC);
328 	corr_period = corr_period > 1 ? corr_period - 1 : corr_period;
329 	writel(corr_period, fep->hwp + FEC_ATIME_CORR);
330 	/* dummy read to update the timer. */
331 	timecounter_read(&fep->tc);
332 
333 	spin_unlock_irqrestore(&fep->tmreg_lock, flags);
334 
335 	return 0;
336 }
337 
338 /**
339  * fec_ptp_adjtime
340  * @ptp: the ptp clock structure
341  * @delta: offset to adjust the cycle counter by
342  *
343  * adjust the timer by resetting the timecounter structure.
344  */
345 static int fec_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
346 {
347 	struct fec_enet_private *fep =
348 	    container_of(ptp, struct fec_enet_private, ptp_caps);
349 	unsigned long flags;
350 
351 	spin_lock_irqsave(&fep->tmreg_lock, flags);
352 	timecounter_adjtime(&fep->tc, delta);
353 	spin_unlock_irqrestore(&fep->tmreg_lock, flags);
354 
355 	return 0;
356 }
357 
358 /**
359  * fec_ptp_gettime
360  * @ptp: the ptp clock structure
361  * @ts: timespec structure to hold the current time value
362  *
363  * read the timecounter and return the correct value on ns,
364  * after converting it into a struct timespec.
365  */
366 static int fec_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
367 {
368 	struct fec_enet_private *fep =
369 	    container_of(ptp, struct fec_enet_private, ptp_caps);
370 	u64 ns;
371 	unsigned long flags;
372 
373 	spin_lock_irqsave(&fep->tmreg_lock, flags);
374 	/* Check the ptp clock */
375 	if (!fep->ptp_clk_on) {
376 		spin_unlock_irqrestore(&fep->tmreg_lock, flags);
377 		return -EINVAL;
378 	}
379 	ns = timecounter_read(&fep->tc);
380 	spin_unlock_irqrestore(&fep->tmreg_lock, flags);
381 
382 	*ts = ns_to_timespec64(ns);
383 
384 	return 0;
385 }
386 
387 /**
388  * fec_ptp_settime
389  * @ptp: the ptp clock structure
390  * @ts: the timespec containing the new time for the cycle counter
391  *
392  * reset the timecounter to use a new base value instead of the kernel
393  * wall timer value.
394  */
395 static int fec_ptp_settime(struct ptp_clock_info *ptp,
396 			   const struct timespec64 *ts)
397 {
398 	struct fec_enet_private *fep =
399 	    container_of(ptp, struct fec_enet_private, ptp_caps);
400 
401 	u64 ns;
402 	unsigned long flags;
403 	u32 counter;
404 
405 	spin_lock_irqsave(&fep->tmreg_lock, flags);
406 	/* Check the ptp clock */
407 	if (!fep->ptp_clk_on) {
408 		spin_unlock_irqrestore(&fep->tmreg_lock, flags);
409 		return -EINVAL;
410 	}
411 
412 	ns = timespec64_to_ns(ts);
413 	/* Get the timer value based on timestamp.
414 	 * Update the counter with the masked value.
415 	 */
416 	counter = ns & fep->cc.mask;
417 
418 	writel(counter, fep->hwp + FEC_ATIME);
419 	timecounter_init(&fep->tc, &fep->cc, ns);
420 	spin_unlock_irqrestore(&fep->tmreg_lock, flags);
421 	return 0;
422 }
423 
424 /**
425  * fec_ptp_enable
426  * @ptp: the ptp clock structure
427  * @rq: the requested feature to change
428  * @on: whether to enable or disable the feature
429  *
430  */
431 static int fec_ptp_enable(struct ptp_clock_info *ptp,
432 			  struct ptp_clock_request *rq, int on)
433 {
434 	struct fec_enet_private *fep =
435 	    container_of(ptp, struct fec_enet_private, ptp_caps);
436 	int ret = 0;
437 
438 	if (rq->type == PTP_CLK_REQ_PPS) {
439 		ret = fec_ptp_enable_pps(fep, on);
440 
441 		return ret;
442 	}
443 	return -EOPNOTSUPP;
444 }
445 
446 /**
447  * fec_ptp_disable_hwts - disable hardware time stamping
448  * @ndev: pointer to net_device
449  */
450 void fec_ptp_disable_hwts(struct net_device *ndev)
451 {
452 	struct fec_enet_private *fep = netdev_priv(ndev);
453 
454 	fep->hwts_tx_en = 0;
455 	fep->hwts_rx_en = 0;
456 }
457 
458 int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr)
459 {
460 	struct fec_enet_private *fep = netdev_priv(ndev);
461 
462 	struct hwtstamp_config config;
463 
464 	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
465 		return -EFAULT;
466 
467 	switch (config.tx_type) {
468 	case HWTSTAMP_TX_OFF:
469 		fep->hwts_tx_en = 0;
470 		break;
471 	case HWTSTAMP_TX_ON:
472 		fep->hwts_tx_en = 1;
473 		break;
474 	default:
475 		return -ERANGE;
476 	}
477 
478 	switch (config.rx_filter) {
479 	case HWTSTAMP_FILTER_NONE:
480 		fep->hwts_rx_en = 0;
481 		break;
482 
483 	default:
484 		fep->hwts_rx_en = 1;
485 		config.rx_filter = HWTSTAMP_FILTER_ALL;
486 		break;
487 	}
488 
489 	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
490 	    -EFAULT : 0;
491 }
492 
493 int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr)
494 {
495 	struct fec_enet_private *fep = netdev_priv(ndev);
496 	struct hwtstamp_config config;
497 
498 	config.flags = 0;
499 	config.tx_type = fep->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
500 	config.rx_filter = (fep->hwts_rx_en ?
501 			    HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
502 
503 	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
504 		-EFAULT : 0;
505 }
506 
507 /*
508  * fec_time_keep - call timecounter_read every second to avoid timer overrun
509  *                 because ENET just support 32bit counter, will timeout in 4s
510  */
511 static void fec_time_keep(struct work_struct *work)
512 {
513 	struct delayed_work *dwork = to_delayed_work(work);
514 	struct fec_enet_private *fep = container_of(dwork, struct fec_enet_private, time_keep);
515 	unsigned long flags;
516 
517 	spin_lock_irqsave(&fep->tmreg_lock, flags);
518 	if (fep->ptp_clk_on) {
519 		timecounter_read(&fep->tc);
520 	}
521 	spin_unlock_irqrestore(&fep->tmreg_lock, flags);
522 
523 	schedule_delayed_work(&fep->time_keep, HZ);
524 }
525 
526 /* This function checks the pps event and reloads the timer compare counter. */
527 static irqreturn_t fec_pps_interrupt(int irq, void *dev_id)
528 {
529 	struct net_device *ndev = dev_id;
530 	struct fec_enet_private *fep = netdev_priv(ndev);
531 	u32 val;
532 	u8 channel = fep->pps_channel;
533 	struct ptp_clock_event event;
534 
535 	val = readl(fep->hwp + FEC_TCSR(channel));
536 	if (val & FEC_T_TF_MASK) {
537 		/* Write the next next compare(not the next according the spec)
538 		 * value to the register
539 		 */
540 		writel(fep->next_counter, fep->hwp + FEC_TCCR(channel));
541 		do {
542 			writel(val, fep->hwp + FEC_TCSR(channel));
543 		} while (readl(fep->hwp + FEC_TCSR(channel)) & FEC_T_TF_MASK);
544 
545 		/* Update the counter; */
546 		fep->next_counter = (fep->next_counter + fep->reload_period) &
547 				fep->cc.mask;
548 
549 		event.type = PTP_CLOCK_PPS;
550 		ptp_clock_event(fep->ptp_clock, &event);
551 		return IRQ_HANDLED;
552 	}
553 
554 	return IRQ_NONE;
555 }
556 
557 /**
558  * fec_ptp_init
559  * @pdev: The FEC network adapter
560  * @irq_idx: the interrupt index
561  *
562  * This function performs the required steps for enabling ptp
563  * support. If ptp support has already been loaded it simply calls the
564  * cyclecounter init routine and exits.
565  */
566 
567 void fec_ptp_init(struct platform_device *pdev, int irq_idx)
568 {
569 	struct net_device *ndev = platform_get_drvdata(pdev);
570 	struct fec_enet_private *fep = netdev_priv(ndev);
571 	int irq;
572 	int ret;
573 
574 	fep->ptp_caps.owner = THIS_MODULE;
575 	strlcpy(fep->ptp_caps.name, "fec ptp", sizeof(fep->ptp_caps.name));
576 
577 	fep->ptp_caps.max_adj = 250000000;
578 	fep->ptp_caps.n_alarm = 0;
579 	fep->ptp_caps.n_ext_ts = 0;
580 	fep->ptp_caps.n_per_out = 0;
581 	fep->ptp_caps.n_pins = 0;
582 	fep->ptp_caps.pps = 1;
583 	fep->ptp_caps.adjfreq = fec_ptp_adjfreq;
584 	fep->ptp_caps.adjtime = fec_ptp_adjtime;
585 	fep->ptp_caps.gettime64 = fec_ptp_gettime;
586 	fep->ptp_caps.settime64 = fec_ptp_settime;
587 	fep->ptp_caps.enable = fec_ptp_enable;
588 
589 	fep->cycle_speed = clk_get_rate(fep->clk_ptp);
590 	if (!fep->cycle_speed) {
591 		fep->cycle_speed = NSEC_PER_SEC;
592 		dev_err(&fep->pdev->dev, "clk_ptp clock rate is zero\n");
593 	}
594 	fep->ptp_inc = NSEC_PER_SEC / fep->cycle_speed;
595 
596 	fec_ptp_start_cyclecounter(ndev);
597 
598 	INIT_DELAYED_WORK(&fep->time_keep, fec_time_keep);
599 
600 	irq = platform_get_irq_byname_optional(pdev, "pps");
601 	if (irq < 0)
602 		irq = platform_get_irq_optional(pdev, irq_idx);
603 	/* Failure to get an irq is not fatal,
604 	 * only the PTP_CLOCK_PPS clock events should stop
605 	 */
606 	if (irq >= 0) {
607 		ret = devm_request_irq(&pdev->dev, irq, fec_pps_interrupt,
608 				       0, pdev->name, ndev);
609 		if (ret < 0)
610 			dev_warn(&pdev->dev, "request for pps irq failed(%d)\n",
611 				 ret);
612 	}
613 
614 	fep->ptp_clock = ptp_clock_register(&fep->ptp_caps, &pdev->dev);
615 	if (IS_ERR(fep->ptp_clock)) {
616 		fep->ptp_clock = NULL;
617 		dev_err(&pdev->dev, "ptp_clock_register failed\n");
618 	}
619 
620 	schedule_delayed_work(&fep->time_keep, HZ);
621 }
622 
623 void fec_ptp_stop(struct platform_device *pdev)
624 {
625 	struct net_device *ndev = platform_get_drvdata(pdev);
626 	struct fec_enet_private *fep = netdev_priv(ndev);
627 
628 	if (fep->pps_enable)
629 		fec_ptp_enable_pps(fep, 0);
630 
631 	cancel_delayed_work_sync(&fep->time_keep);
632 	if (fep->ptp_clock)
633 		ptp_clock_unregister(fep->ptp_clock);
634 }
635 
636 void fec_ptp_save_state(struct fec_enet_private *fep)
637 {
638 	u32 atime_inc_corr;
639 
640 	fec_ptp_gettime(&fep->ptp_caps, &fep->ptp_saved_state.ts_phc);
641 	fep->ptp_saved_state.ns_sys = ktime_get_ns();
642 
643 	fep->ptp_saved_state.at_corr = readl(fep->hwp + FEC_ATIME_CORR);
644 	atime_inc_corr = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_CORR_MASK;
645 	fep->ptp_saved_state.at_inc_corr = (u8)(atime_inc_corr >> FEC_T_INC_CORR_OFFSET);
646 }
647 
648 int fec_ptp_restore_state(struct fec_enet_private *fep)
649 {
650 	u32 atime_inc = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_MASK;
651 	u64 ns_sys;
652 
653 	writel(fep->ptp_saved_state.at_corr, fep->hwp + FEC_ATIME_CORR);
654 	atime_inc |= ((u32)fep->ptp_saved_state.at_inc_corr) << FEC_T_INC_CORR_OFFSET;
655 	writel(atime_inc, fep->hwp + FEC_ATIME_INC);
656 
657 	ns_sys = ktime_get_ns() - fep->ptp_saved_state.ns_sys;
658 	timespec64_add_ns(&fep->ptp_saved_state.ts_phc, ns_sys);
659 	return fec_ptp_settime(&fep->ptp_caps, &fep->ptp_saved_state.ts_phc);
660 }
661