1 /*
2  * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/clocksource.h>
34 #include <linux/highmem.h>
35 #include <linux/ptp_clock_kernel.h>
36 #include <rdma/mlx5-abi.h>
37 #include "lib/eq.h"
38 #include "en.h"
39 #include "clock.h"
40 
41 enum {
42 	MLX5_CYCLES_SHIFT	= 23
43 };
44 
45 enum {
46 	MLX5_PIN_MODE_IN		= 0x0,
47 	MLX5_PIN_MODE_OUT		= 0x1,
48 };
49 
50 enum {
51 	MLX5_OUT_PATTERN_PULSE		= 0x0,
52 	MLX5_OUT_PATTERN_PERIODIC	= 0x1,
53 };
54 
55 enum {
56 	MLX5_EVENT_MODE_DISABLE	= 0x0,
57 	MLX5_EVENT_MODE_REPETETIVE	= 0x1,
58 	MLX5_EVENT_MODE_ONCE_TILL_ARM	= 0x2,
59 };
60 
61 enum {
62 	MLX5_MTPPS_FS_ENABLE			= BIT(0x0),
63 	MLX5_MTPPS_FS_PATTERN			= BIT(0x2),
64 	MLX5_MTPPS_FS_PIN_MODE			= BIT(0x3),
65 	MLX5_MTPPS_FS_TIME_STAMP		= BIT(0x4),
66 	MLX5_MTPPS_FS_OUT_PULSE_DURATION	= BIT(0x5),
67 	MLX5_MTPPS_FS_ENH_OUT_PER_ADJ		= BIT(0x7),
68 };
69 
70 static bool mlx5_real_time_mode(struct mlx5_core_dev *mdev)
71 {
72 	return (mlx5_is_real_time_rq(mdev) || mlx5_is_real_time_sq(mdev));
73 }
74 
75 static bool mlx5_modify_mtutc_allowed(struct mlx5_core_dev *mdev)
76 {
77 	return MLX5_CAP_MCAM_FEATURE(mdev, ptpcyc2realtime_modify);
78 }
79 
80 static int mlx5_set_mtutc(struct mlx5_core_dev *dev, u32 *mtutc, u32 size)
81 {
82 	u32 out[MLX5_ST_SZ_DW(mtutc_reg)] = {};
83 
84 	if (!MLX5_CAP_MCAM_REG(dev, mtutc))
85 		return -EOPNOTSUPP;
86 
87 	return mlx5_core_access_reg(dev, mtutc, size, out, sizeof(out),
88 				    MLX5_REG_MTUTC, 0, 1);
89 }
90 
91 static u64 mlx5_read_time(struct mlx5_core_dev *dev,
92 			  struct ptp_system_timestamp *sts,
93 			  bool real_time)
94 {
95 	u32 timer_h, timer_h1, timer_l;
96 
97 	timer_h = ioread32be(real_time ? &dev->iseg->real_time_h :
98 			     &dev->iseg->internal_timer_h);
99 	ptp_read_system_prets(sts);
100 	timer_l = ioread32be(real_time ? &dev->iseg->real_time_l :
101 			     &dev->iseg->internal_timer_l);
102 	ptp_read_system_postts(sts);
103 	timer_h1 = ioread32be(real_time ? &dev->iseg->real_time_h :
104 			      &dev->iseg->internal_timer_h);
105 	if (timer_h != timer_h1) {
106 		/* wrap around */
107 		ptp_read_system_prets(sts);
108 		timer_l = ioread32be(real_time ? &dev->iseg->real_time_l :
109 				     &dev->iseg->internal_timer_l);
110 		ptp_read_system_postts(sts);
111 	}
112 
113 	return real_time ? REAL_TIME_TO_NS(timer_h1, timer_l) :
114 			   (u64)timer_l | (u64)timer_h1 << 32;
115 }
116 
117 static u64 read_internal_timer(const struct cyclecounter *cc)
118 {
119 	struct mlx5_timer *timer = container_of(cc, struct mlx5_timer, cycles);
120 	struct mlx5_clock *clock = container_of(timer, struct mlx5_clock, timer);
121 	struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
122 						  clock);
123 
124 	return mlx5_read_time(mdev, NULL, false) & cc->mask;
125 }
126 
127 static void mlx5_update_clock_info_page(struct mlx5_core_dev *mdev)
128 {
129 	struct mlx5_ib_clock_info *clock_info = mdev->clock_info;
130 	struct mlx5_clock *clock = &mdev->clock;
131 	struct mlx5_timer *timer;
132 	u32 sign;
133 
134 	if (!clock_info)
135 		return;
136 
137 	sign = smp_load_acquire(&clock_info->sign);
138 	smp_store_mb(clock_info->sign,
139 		     sign | MLX5_IB_CLOCK_INFO_KERNEL_UPDATING);
140 
141 	timer = &clock->timer;
142 	clock_info->cycles = timer->tc.cycle_last;
143 	clock_info->mult   = timer->cycles.mult;
144 	clock_info->nsec   = timer->tc.nsec;
145 	clock_info->frac   = timer->tc.frac;
146 
147 	smp_store_release(&clock_info->sign,
148 			  sign + MLX5_IB_CLOCK_INFO_KERNEL_UPDATING * 2);
149 }
150 
151 static void mlx5_pps_out(struct work_struct *work)
152 {
153 	struct mlx5_pps *pps_info = container_of(work, struct mlx5_pps,
154 						 out_work);
155 	struct mlx5_clock *clock = container_of(pps_info, struct mlx5_clock,
156 						pps_info);
157 	struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
158 						  clock);
159 	u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
160 	unsigned long flags;
161 	int i;
162 
163 	for (i = 0; i < clock->ptp_info.n_pins; i++) {
164 		u64 tstart;
165 
166 		write_seqlock_irqsave(&clock->lock, flags);
167 		tstart = clock->pps_info.start[i];
168 		clock->pps_info.start[i] = 0;
169 		write_sequnlock_irqrestore(&clock->lock, flags);
170 		if (!tstart)
171 			continue;
172 
173 		MLX5_SET(mtpps_reg, in, pin, i);
174 		MLX5_SET64(mtpps_reg, in, time_stamp, tstart);
175 		MLX5_SET(mtpps_reg, in, field_select, MLX5_MTPPS_FS_TIME_STAMP);
176 		mlx5_set_mtpps(mdev, in, sizeof(in));
177 	}
178 }
179 
180 static void mlx5_timestamp_overflow(struct work_struct *work)
181 {
182 	struct delayed_work *dwork = to_delayed_work(work);
183 	struct mlx5_core_dev *mdev;
184 	struct mlx5_timer *timer;
185 	struct mlx5_clock *clock;
186 	unsigned long flags;
187 
188 	timer = container_of(dwork, struct mlx5_timer, overflow_work);
189 	clock = container_of(timer, struct mlx5_clock, timer);
190 	mdev = container_of(clock, struct mlx5_core_dev, clock);
191 
192 	write_seqlock_irqsave(&clock->lock, flags);
193 	timecounter_read(&timer->tc);
194 	mlx5_update_clock_info_page(mdev);
195 	write_sequnlock_irqrestore(&clock->lock, flags);
196 	schedule_delayed_work(&timer->overflow_work, timer->overflow_period);
197 }
198 
199 static int mlx5_ptp_settime_real_time(struct mlx5_core_dev *mdev,
200 				      const struct timespec64 *ts)
201 {
202 	u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
203 
204 	if (!mlx5_modify_mtutc_allowed(mdev))
205 		return 0;
206 
207 	if (ts->tv_sec < 0 || ts->tv_sec > U32_MAX ||
208 	    ts->tv_nsec < 0 || ts->tv_nsec > NSEC_PER_SEC)
209 		return -EINVAL;
210 
211 	MLX5_SET(mtutc_reg, in, operation, MLX5_MTUTC_OPERATION_SET_TIME_IMMEDIATE);
212 	MLX5_SET(mtutc_reg, in, utc_sec, ts->tv_sec);
213 	MLX5_SET(mtutc_reg, in, utc_nsec, ts->tv_nsec);
214 
215 	return mlx5_set_mtutc(mdev, in, sizeof(in));
216 }
217 
218 static int mlx5_ptp_settime(struct ptp_clock_info *ptp, const struct timespec64 *ts)
219 {
220 	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
221 	struct mlx5_timer *timer = &clock->timer;
222 	struct mlx5_core_dev *mdev;
223 	unsigned long flags;
224 	int err;
225 
226 	mdev = container_of(clock, struct mlx5_core_dev, clock);
227 	err = mlx5_ptp_settime_real_time(mdev, ts);
228 	if (err)
229 		return err;
230 
231 	write_seqlock_irqsave(&clock->lock, flags);
232 	timecounter_init(&timer->tc, &timer->cycles, timespec64_to_ns(ts));
233 	mlx5_update_clock_info_page(mdev);
234 	write_sequnlock_irqrestore(&clock->lock, flags);
235 
236 	return 0;
237 }
238 
239 static
240 struct timespec64 mlx5_ptp_gettimex_real_time(struct mlx5_core_dev *mdev,
241 					      struct ptp_system_timestamp *sts)
242 {
243 	struct timespec64 ts;
244 	u64 time;
245 
246 	time = mlx5_read_time(mdev, sts, true);
247 	ts = ns_to_timespec64(time);
248 	return ts;
249 }
250 
251 static int mlx5_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
252 			     struct ptp_system_timestamp *sts)
253 {
254 	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
255 	struct mlx5_timer *timer = &clock->timer;
256 	struct mlx5_core_dev *mdev;
257 	unsigned long flags;
258 	u64 cycles, ns;
259 
260 	mdev = container_of(clock, struct mlx5_core_dev, clock);
261 	if (mlx5_real_time_mode(mdev)) {
262 		*ts = mlx5_ptp_gettimex_real_time(mdev, sts);
263 		goto out;
264 	}
265 
266 	write_seqlock_irqsave(&clock->lock, flags);
267 	cycles = mlx5_read_time(mdev, sts, false);
268 	ns = timecounter_cyc2time(&timer->tc, cycles);
269 	write_sequnlock_irqrestore(&clock->lock, flags);
270 	*ts = ns_to_timespec64(ns);
271 out:
272 	return 0;
273 }
274 
275 static int mlx5_ptp_adjtime_real_time(struct mlx5_core_dev *mdev, s64 delta)
276 {
277 	u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
278 
279 	if (!mlx5_modify_mtutc_allowed(mdev))
280 		return 0;
281 
282 	/* HW time adjustment range is s16. If out of range, settime instead */
283 	if (delta < S16_MIN || delta > S16_MAX) {
284 		struct timespec64 ts;
285 		s64 ns;
286 
287 		ts = mlx5_ptp_gettimex_real_time(mdev, NULL);
288 		ns = timespec64_to_ns(&ts) + delta;
289 		ts = ns_to_timespec64(ns);
290 		return mlx5_ptp_settime_real_time(mdev, &ts);
291 	}
292 
293 	MLX5_SET(mtutc_reg, in, operation, MLX5_MTUTC_OPERATION_ADJUST_TIME);
294 	MLX5_SET(mtutc_reg, in, time_adjustment, delta);
295 
296 	return mlx5_set_mtutc(mdev, in, sizeof(in));
297 }
298 
299 static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
300 {
301 	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
302 	struct mlx5_timer *timer = &clock->timer;
303 	struct mlx5_core_dev *mdev;
304 	unsigned long flags;
305 	int err;
306 
307 	mdev = container_of(clock, struct mlx5_core_dev, clock);
308 
309 	err = mlx5_ptp_adjtime_real_time(mdev, delta);
310 	if (err)
311 		return err;
312 	write_seqlock_irqsave(&clock->lock, flags);
313 	timecounter_adjtime(&timer->tc, delta);
314 	mlx5_update_clock_info_page(mdev);
315 	write_sequnlock_irqrestore(&clock->lock, flags);
316 
317 	return 0;
318 }
319 
320 static int mlx5_ptp_adjfreq_real_time(struct mlx5_core_dev *mdev, s32 freq)
321 {
322 	u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
323 
324 	if (!mlx5_modify_mtutc_allowed(mdev))
325 		return 0;
326 
327 	MLX5_SET(mtutc_reg, in, operation, MLX5_MTUTC_OPERATION_ADJUST_FREQ_UTC);
328 	MLX5_SET(mtutc_reg, in, freq_adjustment, freq);
329 
330 	return mlx5_set_mtutc(mdev, in, sizeof(in));
331 }
332 
333 static int mlx5_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
334 {
335 	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
336 	struct mlx5_timer *timer = &clock->timer;
337 	struct mlx5_core_dev *mdev;
338 	unsigned long flags;
339 	int neg_adj = 0;
340 	u32 diff;
341 	u64 adj;
342 	int err;
343 
344 	mdev = container_of(clock, struct mlx5_core_dev, clock);
345 	err = mlx5_ptp_adjfreq_real_time(mdev, delta);
346 	if (err)
347 		return err;
348 
349 	if (delta < 0) {
350 		neg_adj = 1;
351 		delta = -delta;
352 	}
353 
354 	adj = timer->nominal_c_mult;
355 	adj *= delta;
356 	diff = div_u64(adj, 1000000000ULL);
357 
358 	write_seqlock_irqsave(&clock->lock, flags);
359 	timecounter_read(&timer->tc);
360 	timer->cycles.mult = neg_adj ? timer->nominal_c_mult - diff :
361 				       timer->nominal_c_mult + diff;
362 	mlx5_update_clock_info_page(mdev);
363 	write_sequnlock_irqrestore(&clock->lock, flags);
364 
365 	return 0;
366 }
367 
368 static int mlx5_extts_configure(struct ptp_clock_info *ptp,
369 				struct ptp_clock_request *rq,
370 				int on)
371 {
372 	struct mlx5_clock *clock =
373 			container_of(ptp, struct mlx5_clock, ptp_info);
374 	struct mlx5_core_dev *mdev =
375 			container_of(clock, struct mlx5_core_dev, clock);
376 	u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
377 	u32 field_select = 0;
378 	u8 pin_mode = 0;
379 	u8 pattern = 0;
380 	int pin = -1;
381 	int err = 0;
382 
383 	if (!MLX5_PPS_CAP(mdev))
384 		return -EOPNOTSUPP;
385 
386 	/* Reject requests with unsupported flags */
387 	if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
388 				PTP_RISING_EDGE |
389 				PTP_FALLING_EDGE |
390 				PTP_STRICT_FLAGS))
391 		return -EOPNOTSUPP;
392 
393 	/* Reject requests to enable time stamping on both edges. */
394 	if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
395 	    (rq->extts.flags & PTP_ENABLE_FEATURE) &&
396 	    (rq->extts.flags & PTP_EXTTS_EDGES) == PTP_EXTTS_EDGES)
397 		return -EOPNOTSUPP;
398 
399 	if (rq->extts.index >= clock->ptp_info.n_pins)
400 		return -EINVAL;
401 
402 	pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index);
403 	if (pin < 0)
404 		return -EBUSY;
405 
406 	if (on) {
407 		pin_mode = MLX5_PIN_MODE_IN;
408 		pattern = !!(rq->extts.flags & PTP_FALLING_EDGE);
409 		field_select = MLX5_MTPPS_FS_PIN_MODE |
410 			       MLX5_MTPPS_FS_PATTERN |
411 			       MLX5_MTPPS_FS_ENABLE;
412 	} else {
413 		field_select = MLX5_MTPPS_FS_ENABLE;
414 	}
415 
416 	MLX5_SET(mtpps_reg, in, pin, pin);
417 	MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
418 	MLX5_SET(mtpps_reg, in, pattern, pattern);
419 	MLX5_SET(mtpps_reg, in, enable, on);
420 	MLX5_SET(mtpps_reg, in, field_select, field_select);
421 
422 	err = mlx5_set_mtpps(mdev, in, sizeof(in));
423 	if (err)
424 		return err;
425 
426 	return mlx5_set_mtppse(mdev, pin, 0,
427 			       MLX5_EVENT_MODE_REPETETIVE & on);
428 }
429 
430 static u64 find_target_cycles(struct mlx5_core_dev *mdev, s64 target_ns)
431 {
432 	struct mlx5_clock *clock = &mdev->clock;
433 	u64 cycles_now, cycles_delta;
434 	u64 nsec_now, nsec_delta;
435 	struct mlx5_timer *timer;
436 	unsigned long flags;
437 
438 	timer = &clock->timer;
439 
440 	cycles_now = mlx5_read_time(mdev, NULL, false);
441 	write_seqlock_irqsave(&clock->lock, flags);
442 	nsec_now = timecounter_cyc2time(&timer->tc, cycles_now);
443 	nsec_delta = target_ns - nsec_now;
444 	cycles_delta = div64_u64(nsec_delta << timer->cycles.shift,
445 				 timer->cycles.mult);
446 	write_sequnlock_irqrestore(&clock->lock, flags);
447 
448 	return cycles_now + cycles_delta;
449 }
450 
451 static u64 perout_conf_internal_timer(struct mlx5_core_dev *mdev, s64 sec)
452 {
453 	struct timespec64 ts = {};
454 	s64 target_ns;
455 
456 	ts.tv_sec = sec;
457 	target_ns = timespec64_to_ns(&ts);
458 
459 	return find_target_cycles(mdev, target_ns);
460 }
461 
462 static u64 perout_conf_real_time(s64 sec)
463 {
464 	return (u64)sec << 32;
465 }
466 
467 static int mlx5_perout_configure(struct ptp_clock_info *ptp,
468 				 struct ptp_clock_request *rq,
469 				 int on)
470 {
471 	struct mlx5_clock *clock =
472 			container_of(ptp, struct mlx5_clock, ptp_info);
473 	struct mlx5_core_dev *mdev =
474 			container_of(clock, struct mlx5_core_dev, clock);
475 	bool rt_mode = mlx5_real_time_mode(mdev);
476 	u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
477 	struct timespec64 ts;
478 	u32 field_select = 0;
479 	u64 time_stamp = 0;
480 	u8 pin_mode = 0;
481 	u8 pattern = 0;
482 	int pin = -1;
483 	int err = 0;
484 	s64 ns;
485 
486 	if (!MLX5_PPS_CAP(mdev))
487 		return -EOPNOTSUPP;
488 
489 	/* Reject requests with unsupported flags */
490 	if (rq->perout.flags)
491 		return -EOPNOTSUPP;
492 
493 	if (rq->perout.index >= clock->ptp_info.n_pins)
494 		return -EINVAL;
495 
496 	field_select = MLX5_MTPPS_FS_ENABLE;
497 	pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT, rq->perout.index);
498 	if (pin < 0)
499 		return -EBUSY;
500 
501 	if (on) {
502 		bool rt_mode = mlx5_real_time_mode(mdev);
503 		s64 sec = rq->perout.start.sec;
504 
505 		if (rq->perout.start.nsec)
506 			return -EINVAL;
507 
508 		pin_mode = MLX5_PIN_MODE_OUT;
509 		pattern = MLX5_OUT_PATTERN_PERIODIC;
510 		ts.tv_sec = rq->perout.period.sec;
511 		ts.tv_nsec = rq->perout.period.nsec;
512 		ns = timespec64_to_ns(&ts);
513 
514 		if ((ns >> 1) != 500000000LL)
515 			return -EINVAL;
516 
517 		if (rt_mode && sec > U32_MAX)
518 			return -EINVAL;
519 
520 		time_stamp = rt_mode ? perout_conf_real_time(sec) :
521 				       perout_conf_internal_timer(mdev, sec);
522 
523 		field_select |= MLX5_MTPPS_FS_PIN_MODE |
524 				MLX5_MTPPS_FS_PATTERN |
525 				MLX5_MTPPS_FS_TIME_STAMP;
526 	}
527 
528 	MLX5_SET(mtpps_reg, in, pin, pin);
529 	MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
530 	MLX5_SET(mtpps_reg, in, pattern, pattern);
531 	MLX5_SET(mtpps_reg, in, enable, on);
532 	MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp);
533 	MLX5_SET(mtpps_reg, in, field_select, field_select);
534 
535 	err = mlx5_set_mtpps(mdev, in, sizeof(in));
536 	if (err)
537 		return err;
538 
539 	if (rt_mode)
540 		return 0;
541 
542 	return mlx5_set_mtppse(mdev, pin, 0,
543 			       MLX5_EVENT_MODE_REPETETIVE & on);
544 }
545 
546 static int mlx5_pps_configure(struct ptp_clock_info *ptp,
547 			      struct ptp_clock_request *rq,
548 			      int on)
549 {
550 	struct mlx5_clock *clock =
551 			container_of(ptp, struct mlx5_clock, ptp_info);
552 
553 	clock->pps_info.enabled = !!on;
554 	return 0;
555 }
556 
557 static int mlx5_ptp_enable(struct ptp_clock_info *ptp,
558 			   struct ptp_clock_request *rq,
559 			   int on)
560 {
561 	switch (rq->type) {
562 	case PTP_CLK_REQ_EXTTS:
563 		return mlx5_extts_configure(ptp, rq, on);
564 	case PTP_CLK_REQ_PEROUT:
565 		return mlx5_perout_configure(ptp, rq, on);
566 	case PTP_CLK_REQ_PPS:
567 		return mlx5_pps_configure(ptp, rq, on);
568 	default:
569 		return -EOPNOTSUPP;
570 	}
571 	return 0;
572 }
573 
574 enum {
575 	MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN = BIT(0),
576 	MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT = BIT(1),
577 };
578 
579 static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
580 			   enum ptp_pin_function func, unsigned int chan)
581 {
582 	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
583 						ptp_info);
584 
585 	switch (func) {
586 	case PTP_PF_NONE:
587 		return 0;
588 	case PTP_PF_EXTTS:
589 		return !(clock->pps_info.pin_caps[pin] &
590 			 MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN);
591 	case PTP_PF_PEROUT:
592 		return !(clock->pps_info.pin_caps[pin] &
593 			 MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT);
594 	default:
595 		return -EOPNOTSUPP;
596 	}
597 }
598 
599 static const struct ptp_clock_info mlx5_ptp_clock_info = {
600 	.owner		= THIS_MODULE,
601 	.name		= "mlx5_ptp",
602 	.max_adj	= 100000000,
603 	.n_alarm	= 0,
604 	.n_ext_ts	= 0,
605 	.n_per_out	= 0,
606 	.n_pins		= 0,
607 	.pps		= 0,
608 	.adjfreq	= mlx5_ptp_adjfreq,
609 	.adjtime	= mlx5_ptp_adjtime,
610 	.gettimex64	= mlx5_ptp_gettimex,
611 	.settime64	= mlx5_ptp_settime,
612 	.enable		= NULL,
613 	.verify		= NULL,
614 };
615 
616 static int mlx5_query_mtpps_pin_mode(struct mlx5_core_dev *mdev, u8 pin,
617 				     u32 *mtpps, u32 mtpps_size)
618 {
619 	u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {};
620 
621 	MLX5_SET(mtpps_reg, in, pin, pin);
622 
623 	return mlx5_core_access_reg(mdev, in, sizeof(in), mtpps,
624 				    mtpps_size, MLX5_REG_MTPPS, 0, 0);
625 }
626 
627 static int mlx5_get_pps_pin_mode(struct mlx5_clock *clock, u8 pin)
628 {
629 	struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, clock);
630 
631 	u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {};
632 	u8 mode;
633 	int err;
634 
635 	err = mlx5_query_mtpps_pin_mode(mdev, pin, out, sizeof(out));
636 	if (err || !MLX5_GET(mtpps_reg, out, enable))
637 		return PTP_PF_NONE;
638 
639 	mode = MLX5_GET(mtpps_reg, out, pin_mode);
640 
641 	if (mode == MLX5_PIN_MODE_IN)
642 		return PTP_PF_EXTTS;
643 	else if (mode == MLX5_PIN_MODE_OUT)
644 		return PTP_PF_PEROUT;
645 
646 	return PTP_PF_NONE;
647 }
648 
649 static void mlx5_init_pin_config(struct mlx5_clock *clock)
650 {
651 	int i;
652 
653 	if (!clock->ptp_info.n_pins)
654 		return;
655 
656 	clock->ptp_info.pin_config =
657 			kcalloc(clock->ptp_info.n_pins,
658 				sizeof(*clock->ptp_info.pin_config),
659 				GFP_KERNEL);
660 	if (!clock->ptp_info.pin_config)
661 		return;
662 	clock->ptp_info.enable = mlx5_ptp_enable;
663 	clock->ptp_info.verify = mlx5_ptp_verify;
664 	clock->ptp_info.pps = 1;
665 
666 	for (i = 0; i < clock->ptp_info.n_pins; i++) {
667 		snprintf(clock->ptp_info.pin_config[i].name,
668 			 sizeof(clock->ptp_info.pin_config[i].name),
669 			 "mlx5_pps%d", i);
670 		clock->ptp_info.pin_config[i].index = i;
671 		clock->ptp_info.pin_config[i].func = mlx5_get_pps_pin_mode(clock, i);
672 		clock->ptp_info.pin_config[i].chan = 0;
673 	}
674 }
675 
676 static void mlx5_get_pps_caps(struct mlx5_core_dev *mdev)
677 {
678 	struct mlx5_clock *clock = &mdev->clock;
679 	u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
680 
681 	mlx5_query_mtpps(mdev, out, sizeof(out));
682 
683 	clock->ptp_info.n_pins = MLX5_GET(mtpps_reg, out,
684 					  cap_number_of_pps_pins);
685 	clock->ptp_info.n_ext_ts = MLX5_GET(mtpps_reg, out,
686 					    cap_max_num_of_pps_in_pins);
687 	clock->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out,
688 					     cap_max_num_of_pps_out_pins);
689 
690 	clock->pps_info.pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode);
691 	clock->pps_info.pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode);
692 	clock->pps_info.pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode);
693 	clock->pps_info.pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode);
694 	clock->pps_info.pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode);
695 	clock->pps_info.pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode);
696 	clock->pps_info.pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode);
697 	clock->pps_info.pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode);
698 }
699 
700 static void ts_next_sec(struct timespec64 *ts)
701 {
702 	ts->tv_sec += 1;
703 	ts->tv_nsec = 0;
704 }
705 
706 static u64 perout_conf_next_event_timer(struct mlx5_core_dev *mdev,
707 					struct mlx5_clock *clock)
708 {
709 	struct timespec64 ts;
710 	s64 target_ns;
711 
712 	mlx5_ptp_gettimex(&clock->ptp_info, &ts, NULL);
713 	ts_next_sec(&ts);
714 	target_ns = timespec64_to_ns(&ts);
715 
716 	return find_target_cycles(mdev, target_ns);
717 }
718 
719 static int mlx5_pps_event(struct notifier_block *nb,
720 			  unsigned long type, void *data)
721 {
722 	struct mlx5_clock *clock = mlx5_nb_cof(nb, struct mlx5_clock, pps_nb);
723 	struct ptp_clock_event ptp_event;
724 	struct mlx5_eqe *eqe = data;
725 	int pin = eqe->data.pps.pin;
726 	struct mlx5_core_dev *mdev;
727 	unsigned long flags;
728 	u64 ns;
729 
730 	mdev = container_of(clock, struct mlx5_core_dev, clock);
731 
732 	switch (clock->ptp_info.pin_config[pin].func) {
733 	case PTP_PF_EXTTS:
734 		ptp_event.index = pin;
735 		ptp_event.timestamp = mlx5_real_time_mode(mdev) ?
736 			mlx5_real_time_cyc2time(clock,
737 						be64_to_cpu(eqe->data.pps.time_stamp)) :
738 			mlx5_timecounter_cyc2time(clock,
739 						  be64_to_cpu(eqe->data.pps.time_stamp));
740 		if (clock->pps_info.enabled) {
741 			ptp_event.type = PTP_CLOCK_PPSUSR;
742 			ptp_event.pps_times.ts_real =
743 					ns_to_timespec64(ptp_event.timestamp);
744 		} else {
745 			ptp_event.type = PTP_CLOCK_EXTTS;
746 		}
747 		/* TODOL clock->ptp can be NULL if ptp_clock_register fails */
748 		ptp_clock_event(clock->ptp, &ptp_event);
749 		break;
750 	case PTP_PF_PEROUT:
751 		ns = perout_conf_next_event_timer(mdev, clock);
752 		write_seqlock_irqsave(&clock->lock, flags);
753 		clock->pps_info.start[pin] = ns;
754 		write_sequnlock_irqrestore(&clock->lock, flags);
755 		schedule_work(&clock->pps_info.out_work);
756 		break;
757 	default:
758 		mlx5_core_err(mdev, " Unhandled clock PPS event, func %d\n",
759 			      clock->ptp_info.pin_config[pin].func);
760 	}
761 
762 	return NOTIFY_OK;
763 }
764 
765 static void mlx5_timecounter_init(struct mlx5_core_dev *mdev)
766 {
767 	struct mlx5_clock *clock = &mdev->clock;
768 	struct mlx5_timer *timer = &clock->timer;
769 	u32 dev_freq;
770 
771 	dev_freq = MLX5_CAP_GEN(mdev, device_frequency_khz);
772 	timer->cycles.read = read_internal_timer;
773 	timer->cycles.shift = MLX5_CYCLES_SHIFT;
774 	timer->cycles.mult = clocksource_khz2mult(dev_freq,
775 						  timer->cycles.shift);
776 	timer->nominal_c_mult = timer->cycles.mult;
777 	timer->cycles.mask = CLOCKSOURCE_MASK(41);
778 
779 	timecounter_init(&timer->tc, &timer->cycles,
780 			 ktime_to_ns(ktime_get_real()));
781 }
782 
783 static void mlx5_init_overflow_period(struct mlx5_clock *clock)
784 {
785 	struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, clock);
786 	struct mlx5_ib_clock_info *clock_info = mdev->clock_info;
787 	struct mlx5_timer *timer = &clock->timer;
788 	u64 overflow_cycles;
789 	u64 frac = 0;
790 	u64 ns;
791 
792 	/* Calculate period in seconds to call the overflow watchdog - to make
793 	 * sure counter is checked at least twice every wrap around.
794 	 * The period is calculated as the minimum between max HW cycles count
795 	 * (The clock source mask) and max amount of cycles that can be
796 	 * multiplied by clock multiplier where the result doesn't exceed
797 	 * 64bits.
798 	 */
799 	overflow_cycles = div64_u64(~0ULL >> 1, timer->cycles.mult);
800 	overflow_cycles = min(overflow_cycles, div_u64(timer->cycles.mask, 3));
801 
802 	ns = cyclecounter_cyc2ns(&timer->cycles, overflow_cycles,
803 				 frac, &frac);
804 	do_div(ns, NSEC_PER_SEC / HZ);
805 	timer->overflow_period = ns;
806 
807 	INIT_DELAYED_WORK(&timer->overflow_work, mlx5_timestamp_overflow);
808 	if (timer->overflow_period)
809 		schedule_delayed_work(&timer->overflow_work, 0);
810 	else
811 		mlx5_core_warn(mdev,
812 			       "invalid overflow period, overflow_work is not scheduled\n");
813 
814 	if (clock_info)
815 		clock_info->overflow_period = timer->overflow_period;
816 }
817 
818 static void mlx5_init_clock_info(struct mlx5_core_dev *mdev)
819 {
820 	struct mlx5_clock *clock = &mdev->clock;
821 	struct mlx5_ib_clock_info *info;
822 	struct mlx5_timer *timer;
823 
824 	mdev->clock_info = (struct mlx5_ib_clock_info *)get_zeroed_page(GFP_KERNEL);
825 	if (!mdev->clock_info) {
826 		mlx5_core_warn(mdev, "Failed to allocate IB clock info page\n");
827 		return;
828 	}
829 
830 	info = mdev->clock_info;
831 	timer = &clock->timer;
832 
833 	info->nsec = timer->tc.nsec;
834 	info->cycles = timer->tc.cycle_last;
835 	info->mask = timer->cycles.mask;
836 	info->mult = timer->nominal_c_mult;
837 	info->shift = timer->cycles.shift;
838 	info->frac = timer->tc.frac;
839 }
840 
841 static void mlx5_init_timer_clock(struct mlx5_core_dev *mdev)
842 {
843 	struct mlx5_clock *clock = &mdev->clock;
844 
845 	mlx5_timecounter_init(mdev);
846 	mlx5_init_clock_info(mdev);
847 	mlx5_init_overflow_period(clock);
848 	clock->ptp_info = mlx5_ptp_clock_info;
849 
850 	if (mlx5_real_time_mode(mdev)) {
851 		struct timespec64 ts;
852 
853 		ktime_get_real_ts64(&ts);
854 		mlx5_ptp_settime(&clock->ptp_info, &ts);
855 	}
856 }
857 
858 static void mlx5_init_pps(struct mlx5_core_dev *mdev)
859 {
860 	struct mlx5_clock *clock = &mdev->clock;
861 
862 	if (!MLX5_PPS_CAP(mdev))
863 		return;
864 
865 	mlx5_get_pps_caps(mdev);
866 	mlx5_init_pin_config(clock);
867 }
868 
869 void mlx5_init_clock(struct mlx5_core_dev *mdev)
870 {
871 	struct mlx5_clock *clock = &mdev->clock;
872 
873 	if (!MLX5_CAP_GEN(mdev, device_frequency_khz)) {
874 		mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n");
875 		return;
876 	}
877 
878 	seqlock_init(&clock->lock);
879 	mlx5_init_timer_clock(mdev);
880 	INIT_WORK(&clock->pps_info.out_work, mlx5_pps_out);
881 
882 	/* Configure the PHC */
883 	clock->ptp_info = mlx5_ptp_clock_info;
884 
885 	/* Initialize 1PPS data structures */
886 	mlx5_init_pps(mdev);
887 
888 	clock->ptp = ptp_clock_register(&clock->ptp_info,
889 					&mdev->pdev->dev);
890 	if (IS_ERR(clock->ptp)) {
891 		mlx5_core_warn(mdev, "ptp_clock_register failed %ld\n",
892 			       PTR_ERR(clock->ptp));
893 		clock->ptp = NULL;
894 	}
895 
896 	MLX5_NB_INIT(&clock->pps_nb, mlx5_pps_event, PPS_EVENT);
897 	mlx5_eq_notifier_register(mdev, &clock->pps_nb);
898 }
899 
900 void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
901 {
902 	struct mlx5_clock *clock = &mdev->clock;
903 
904 	if (!MLX5_CAP_GEN(mdev, device_frequency_khz))
905 		return;
906 
907 	mlx5_eq_notifier_unregister(mdev, &clock->pps_nb);
908 	if (clock->ptp) {
909 		ptp_clock_unregister(clock->ptp);
910 		clock->ptp = NULL;
911 	}
912 
913 	cancel_work_sync(&clock->pps_info.out_work);
914 	cancel_delayed_work_sync(&clock->timer.overflow_work);
915 
916 	if (mdev->clock_info) {
917 		free_page((unsigned long)mdev->clock_info);
918 		mdev->clock_info = NULL;
919 	}
920 
921 	kfree(clock->ptp_info.pin_config);
922 }
923