1 /*
2  * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/clocksource.h>
34 #include <linux/highmem.h>
35 #include <linux/ptp_clock_kernel.h>
36 #include <rdma/mlx5-abi.h>
37 #include "lib/eq.h"
38 #include "en.h"
39 #include "clock.h"
40 
41 enum {
42 	MLX5_CYCLES_SHIFT	= 31
43 };
44 
45 enum {
46 	MLX5_PIN_MODE_IN		= 0x0,
47 	MLX5_PIN_MODE_OUT		= 0x1,
48 };
49 
50 enum {
51 	MLX5_OUT_PATTERN_PULSE		= 0x0,
52 	MLX5_OUT_PATTERN_PERIODIC	= 0x1,
53 };
54 
55 enum {
56 	MLX5_EVENT_MODE_DISABLE	= 0x0,
57 	MLX5_EVENT_MODE_REPETETIVE	= 0x1,
58 	MLX5_EVENT_MODE_ONCE_TILL_ARM	= 0x2,
59 };
60 
61 enum {
62 	MLX5_MTPPS_FS_ENABLE			= BIT(0x0),
63 	MLX5_MTPPS_FS_PATTERN			= BIT(0x2),
64 	MLX5_MTPPS_FS_PIN_MODE			= BIT(0x3),
65 	MLX5_MTPPS_FS_TIME_STAMP		= BIT(0x4),
66 	MLX5_MTPPS_FS_OUT_PULSE_DURATION	= BIT(0x5),
67 	MLX5_MTPPS_FS_ENH_OUT_PER_ADJ		= BIT(0x7),
68 	MLX5_MTPPS_FS_NPPS_PERIOD               = BIT(0x9),
69 	MLX5_MTPPS_FS_OUT_PULSE_DURATION_NS     = BIT(0xa),
70 };
71 
72 enum {
73 	MLX5_MTUTC_OPERATION_ADJUST_TIME_MIN          = S16_MIN,
74 	MLX5_MTUTC_OPERATION_ADJUST_TIME_MAX          = S16_MAX,
75 	MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MIN = -200000,
76 	MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MAX = 200000,
77 };
78 
79 static bool mlx5_real_time_mode(struct mlx5_core_dev *mdev)
80 {
81 	return (mlx5_is_real_time_rq(mdev) || mlx5_is_real_time_sq(mdev));
82 }
83 
84 static bool mlx5_npps_real_time_supported(struct mlx5_core_dev *mdev)
85 {
86 	return (mlx5_real_time_mode(mdev) &&
87 		MLX5_CAP_MCAM_FEATURE(mdev, npps_period) &&
88 		MLX5_CAP_MCAM_FEATURE(mdev, out_pulse_duration_ns));
89 }
90 
91 static bool mlx5_modify_mtutc_allowed(struct mlx5_core_dev *mdev)
92 {
93 	return MLX5_CAP_MCAM_FEATURE(mdev, ptpcyc2realtime_modify);
94 }
95 
96 static s32 mlx5_ptp_getmaxphase(struct ptp_clock_info *ptp)
97 {
98 	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
99 	struct mlx5_core_dev *mdev;
100 
101 	mdev = container_of(clock, struct mlx5_core_dev, clock);
102 
103 	return MLX5_CAP_MCAM_FEATURE(mdev, mtutc_time_adjustment_extended_range) ?
104 		       MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MAX :
105 			     MLX5_MTUTC_OPERATION_ADJUST_TIME_MAX;
106 }
107 
108 static bool mlx5_is_mtutc_time_adj_cap(struct mlx5_core_dev *mdev, s64 delta)
109 {
110 	s64 max = mlx5_ptp_getmaxphase(&mdev->clock.ptp_info);
111 
112 	if (delta < -max || delta > max)
113 		return false;
114 
115 	return true;
116 }
117 
118 static int mlx5_set_mtutc(struct mlx5_core_dev *dev, u32 *mtutc, u32 size)
119 {
120 	u32 out[MLX5_ST_SZ_DW(mtutc_reg)] = {};
121 
122 	if (!MLX5_CAP_MCAM_REG(dev, mtutc))
123 		return -EOPNOTSUPP;
124 
125 	return mlx5_core_access_reg(dev, mtutc, size, out, sizeof(out),
126 				    MLX5_REG_MTUTC, 0, 1);
127 }
128 
129 static u64 mlx5_read_time(struct mlx5_core_dev *dev,
130 			  struct ptp_system_timestamp *sts,
131 			  bool real_time)
132 {
133 	u32 timer_h, timer_h1, timer_l;
134 
135 	timer_h = ioread32be(real_time ? &dev->iseg->real_time_h :
136 			     &dev->iseg->internal_timer_h);
137 	ptp_read_system_prets(sts);
138 	timer_l = ioread32be(real_time ? &dev->iseg->real_time_l :
139 			     &dev->iseg->internal_timer_l);
140 	ptp_read_system_postts(sts);
141 	timer_h1 = ioread32be(real_time ? &dev->iseg->real_time_h :
142 			      &dev->iseg->internal_timer_h);
143 	if (timer_h != timer_h1) {
144 		/* wrap around */
145 		ptp_read_system_prets(sts);
146 		timer_l = ioread32be(real_time ? &dev->iseg->real_time_l :
147 				     &dev->iseg->internal_timer_l);
148 		ptp_read_system_postts(sts);
149 	}
150 
151 	return real_time ? REAL_TIME_TO_NS(timer_h1, timer_l) :
152 			   (u64)timer_l | (u64)timer_h1 << 32;
153 }
154 
155 static u64 read_internal_timer(const struct cyclecounter *cc)
156 {
157 	struct mlx5_timer *timer = container_of(cc, struct mlx5_timer, cycles);
158 	struct mlx5_clock *clock = container_of(timer, struct mlx5_clock, timer);
159 	struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
160 						  clock);
161 
162 	return mlx5_read_time(mdev, NULL, false) & cc->mask;
163 }
164 
165 static void mlx5_update_clock_info_page(struct mlx5_core_dev *mdev)
166 {
167 	struct mlx5_ib_clock_info *clock_info = mdev->clock_info;
168 	struct mlx5_clock *clock = &mdev->clock;
169 	struct mlx5_timer *timer;
170 	u32 sign;
171 
172 	if (!clock_info)
173 		return;
174 
175 	sign = smp_load_acquire(&clock_info->sign);
176 	smp_store_mb(clock_info->sign,
177 		     sign | MLX5_IB_CLOCK_INFO_KERNEL_UPDATING);
178 
179 	timer = &clock->timer;
180 	clock_info->cycles = timer->tc.cycle_last;
181 	clock_info->mult   = timer->cycles.mult;
182 	clock_info->nsec   = timer->tc.nsec;
183 	clock_info->frac   = timer->tc.frac;
184 
185 	smp_store_release(&clock_info->sign,
186 			  sign + MLX5_IB_CLOCK_INFO_KERNEL_UPDATING * 2);
187 }
188 
189 static void mlx5_pps_out(struct work_struct *work)
190 {
191 	struct mlx5_pps *pps_info = container_of(work, struct mlx5_pps,
192 						 out_work);
193 	struct mlx5_clock *clock = container_of(pps_info, struct mlx5_clock,
194 						pps_info);
195 	struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
196 						  clock);
197 	u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
198 	unsigned long flags;
199 	int i;
200 
201 	for (i = 0; i < clock->ptp_info.n_pins; i++) {
202 		u64 tstart;
203 
204 		write_seqlock_irqsave(&clock->lock, flags);
205 		tstart = clock->pps_info.start[i];
206 		clock->pps_info.start[i] = 0;
207 		write_sequnlock_irqrestore(&clock->lock, flags);
208 		if (!tstart)
209 			continue;
210 
211 		MLX5_SET(mtpps_reg, in, pin, i);
212 		MLX5_SET64(mtpps_reg, in, time_stamp, tstart);
213 		MLX5_SET(mtpps_reg, in, field_select, MLX5_MTPPS_FS_TIME_STAMP);
214 		mlx5_set_mtpps(mdev, in, sizeof(in));
215 	}
216 }
217 
218 static void mlx5_timestamp_overflow(struct work_struct *work)
219 {
220 	struct delayed_work *dwork = to_delayed_work(work);
221 	struct mlx5_core_dev *mdev;
222 	struct mlx5_timer *timer;
223 	struct mlx5_clock *clock;
224 	unsigned long flags;
225 
226 	timer = container_of(dwork, struct mlx5_timer, overflow_work);
227 	clock = container_of(timer, struct mlx5_clock, timer);
228 	mdev = container_of(clock, struct mlx5_core_dev, clock);
229 
230 	write_seqlock_irqsave(&clock->lock, flags);
231 	timecounter_read(&timer->tc);
232 	mlx5_update_clock_info_page(mdev);
233 	write_sequnlock_irqrestore(&clock->lock, flags);
234 	schedule_delayed_work(&timer->overflow_work, timer->overflow_period);
235 }
236 
237 static int mlx5_ptp_settime_real_time(struct mlx5_core_dev *mdev,
238 				      const struct timespec64 *ts)
239 {
240 	u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
241 
242 	if (!mlx5_modify_mtutc_allowed(mdev))
243 		return 0;
244 
245 	if (ts->tv_sec < 0 || ts->tv_sec > U32_MAX ||
246 	    ts->tv_nsec < 0 || ts->tv_nsec > NSEC_PER_SEC)
247 		return -EINVAL;
248 
249 	MLX5_SET(mtutc_reg, in, operation, MLX5_MTUTC_OPERATION_SET_TIME_IMMEDIATE);
250 	MLX5_SET(mtutc_reg, in, utc_sec, ts->tv_sec);
251 	MLX5_SET(mtutc_reg, in, utc_nsec, ts->tv_nsec);
252 
253 	return mlx5_set_mtutc(mdev, in, sizeof(in));
254 }
255 
256 static int mlx5_ptp_settime(struct ptp_clock_info *ptp, const struct timespec64 *ts)
257 {
258 	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
259 	struct mlx5_timer *timer = &clock->timer;
260 	struct mlx5_core_dev *mdev;
261 	unsigned long flags;
262 	int err;
263 
264 	mdev = container_of(clock, struct mlx5_core_dev, clock);
265 	err = mlx5_ptp_settime_real_time(mdev, ts);
266 	if (err)
267 		return err;
268 
269 	write_seqlock_irqsave(&clock->lock, flags);
270 	timecounter_init(&timer->tc, &timer->cycles, timespec64_to_ns(ts));
271 	mlx5_update_clock_info_page(mdev);
272 	write_sequnlock_irqrestore(&clock->lock, flags);
273 
274 	return 0;
275 }
276 
277 static
278 struct timespec64 mlx5_ptp_gettimex_real_time(struct mlx5_core_dev *mdev,
279 					      struct ptp_system_timestamp *sts)
280 {
281 	struct timespec64 ts;
282 	u64 time;
283 
284 	time = mlx5_read_time(mdev, sts, true);
285 	ts = ns_to_timespec64(time);
286 	return ts;
287 }
288 
289 static int mlx5_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
290 			     struct ptp_system_timestamp *sts)
291 {
292 	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
293 	struct mlx5_timer *timer = &clock->timer;
294 	struct mlx5_core_dev *mdev;
295 	unsigned long flags;
296 	u64 cycles, ns;
297 
298 	mdev = container_of(clock, struct mlx5_core_dev, clock);
299 	if (mlx5_real_time_mode(mdev)) {
300 		*ts = mlx5_ptp_gettimex_real_time(mdev, sts);
301 		goto out;
302 	}
303 
304 	write_seqlock_irqsave(&clock->lock, flags);
305 	cycles = mlx5_read_time(mdev, sts, false);
306 	ns = timecounter_cyc2time(&timer->tc, cycles);
307 	write_sequnlock_irqrestore(&clock->lock, flags);
308 	*ts = ns_to_timespec64(ns);
309 out:
310 	return 0;
311 }
312 
313 static int mlx5_ptp_adjtime_real_time(struct mlx5_core_dev *mdev, s64 delta)
314 {
315 	u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
316 
317 	if (!mlx5_modify_mtutc_allowed(mdev))
318 		return 0;
319 
320 	/* HW time adjustment range is checked. If out of range, settime instead */
321 	if (!mlx5_is_mtutc_time_adj_cap(mdev, delta)) {
322 		struct timespec64 ts;
323 		s64 ns;
324 
325 		ts = mlx5_ptp_gettimex_real_time(mdev, NULL);
326 		ns = timespec64_to_ns(&ts) + delta;
327 		ts = ns_to_timespec64(ns);
328 		return mlx5_ptp_settime_real_time(mdev, &ts);
329 	}
330 
331 	MLX5_SET(mtutc_reg, in, operation, MLX5_MTUTC_OPERATION_ADJUST_TIME);
332 	MLX5_SET(mtutc_reg, in, time_adjustment, delta);
333 
334 	return mlx5_set_mtutc(mdev, in, sizeof(in));
335 }
336 
337 static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
338 {
339 	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
340 	struct mlx5_timer *timer = &clock->timer;
341 	struct mlx5_core_dev *mdev;
342 	unsigned long flags;
343 	int err;
344 
345 	mdev = container_of(clock, struct mlx5_core_dev, clock);
346 
347 	err = mlx5_ptp_adjtime_real_time(mdev, delta);
348 	if (err)
349 		return err;
350 	write_seqlock_irqsave(&clock->lock, flags);
351 	timecounter_adjtime(&timer->tc, delta);
352 	mlx5_update_clock_info_page(mdev);
353 	write_sequnlock_irqrestore(&clock->lock, flags);
354 
355 	return 0;
356 }
357 
358 static int mlx5_ptp_adjphase(struct ptp_clock_info *ptp, s32 delta)
359 {
360 	return mlx5_ptp_adjtime(ptp, delta);
361 }
362 
363 static int mlx5_ptp_freq_adj_real_time(struct mlx5_core_dev *mdev, long scaled_ppm)
364 {
365 	u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
366 
367 	if (!mlx5_modify_mtutc_allowed(mdev))
368 		return 0;
369 
370 	MLX5_SET(mtutc_reg, in, operation, MLX5_MTUTC_OPERATION_ADJUST_FREQ_UTC);
371 
372 	if (MLX5_CAP_MCAM_FEATURE(mdev, mtutc_freq_adj_units)) {
373 		MLX5_SET(mtutc_reg, in, freq_adj_units,
374 			 MLX5_MTUTC_FREQ_ADJ_UNITS_SCALED_PPM);
375 		MLX5_SET(mtutc_reg, in, freq_adjustment, scaled_ppm);
376 	} else {
377 		MLX5_SET(mtutc_reg, in, freq_adj_units, MLX5_MTUTC_FREQ_ADJ_UNITS_PPB);
378 		MLX5_SET(mtutc_reg, in, freq_adjustment, scaled_ppm_to_ppb(scaled_ppm));
379 	}
380 
381 	return mlx5_set_mtutc(mdev, in, sizeof(in));
382 }
383 
384 static int mlx5_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
385 {
386 	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
387 	struct mlx5_timer *timer = &clock->timer;
388 	struct mlx5_core_dev *mdev;
389 	unsigned long flags;
390 	u32 mult;
391 	int err;
392 
393 	mdev = container_of(clock, struct mlx5_core_dev, clock);
394 
395 	err = mlx5_ptp_freq_adj_real_time(mdev, scaled_ppm);
396 	if (err)
397 		return err;
398 
399 	mult = (u32)adjust_by_scaled_ppm(timer->nominal_c_mult, scaled_ppm);
400 
401 	write_seqlock_irqsave(&clock->lock, flags);
402 	timecounter_read(&timer->tc);
403 	timer->cycles.mult = mult;
404 	mlx5_update_clock_info_page(mdev);
405 	write_sequnlock_irqrestore(&clock->lock, flags);
406 
407 	return 0;
408 }
409 
410 static int mlx5_extts_configure(struct ptp_clock_info *ptp,
411 				struct ptp_clock_request *rq,
412 				int on)
413 {
414 	struct mlx5_clock *clock =
415 			container_of(ptp, struct mlx5_clock, ptp_info);
416 	struct mlx5_core_dev *mdev =
417 			container_of(clock, struct mlx5_core_dev, clock);
418 	u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
419 	u32 field_select = 0;
420 	u8 pin_mode = 0;
421 	u8 pattern = 0;
422 	int pin = -1;
423 	int err = 0;
424 
425 	if (!MLX5_PPS_CAP(mdev))
426 		return -EOPNOTSUPP;
427 
428 	/* Reject requests with unsupported flags */
429 	if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
430 				PTP_RISING_EDGE |
431 				PTP_FALLING_EDGE |
432 				PTP_STRICT_FLAGS))
433 		return -EOPNOTSUPP;
434 
435 	/* Reject requests to enable time stamping on both edges. */
436 	if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
437 	    (rq->extts.flags & PTP_ENABLE_FEATURE) &&
438 	    (rq->extts.flags & PTP_EXTTS_EDGES) == PTP_EXTTS_EDGES)
439 		return -EOPNOTSUPP;
440 
441 	if (rq->extts.index >= clock->ptp_info.n_pins)
442 		return -EINVAL;
443 
444 	pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index);
445 	if (pin < 0)
446 		return -EBUSY;
447 
448 	if (on) {
449 		pin_mode = MLX5_PIN_MODE_IN;
450 		pattern = !!(rq->extts.flags & PTP_FALLING_EDGE);
451 		field_select = MLX5_MTPPS_FS_PIN_MODE |
452 			       MLX5_MTPPS_FS_PATTERN |
453 			       MLX5_MTPPS_FS_ENABLE;
454 	} else {
455 		field_select = MLX5_MTPPS_FS_ENABLE;
456 	}
457 
458 	MLX5_SET(mtpps_reg, in, pin, pin);
459 	MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
460 	MLX5_SET(mtpps_reg, in, pattern, pattern);
461 	MLX5_SET(mtpps_reg, in, enable, on);
462 	MLX5_SET(mtpps_reg, in, field_select, field_select);
463 
464 	err = mlx5_set_mtpps(mdev, in, sizeof(in));
465 	if (err)
466 		return err;
467 
468 	return mlx5_set_mtppse(mdev, pin, 0,
469 			       MLX5_EVENT_MODE_REPETETIVE & on);
470 }
471 
472 static u64 find_target_cycles(struct mlx5_core_dev *mdev, s64 target_ns)
473 {
474 	struct mlx5_clock *clock = &mdev->clock;
475 	u64 cycles_now, cycles_delta;
476 	u64 nsec_now, nsec_delta;
477 	struct mlx5_timer *timer;
478 	unsigned long flags;
479 
480 	timer = &clock->timer;
481 
482 	cycles_now = mlx5_read_time(mdev, NULL, false);
483 	write_seqlock_irqsave(&clock->lock, flags);
484 	nsec_now = timecounter_cyc2time(&timer->tc, cycles_now);
485 	nsec_delta = target_ns - nsec_now;
486 	cycles_delta = div64_u64(nsec_delta << timer->cycles.shift,
487 				 timer->cycles.mult);
488 	write_sequnlock_irqrestore(&clock->lock, flags);
489 
490 	return cycles_now + cycles_delta;
491 }
492 
493 static u64 perout_conf_internal_timer(struct mlx5_core_dev *mdev, s64 sec)
494 {
495 	struct timespec64 ts = {};
496 	s64 target_ns;
497 
498 	ts.tv_sec = sec;
499 	target_ns = timespec64_to_ns(&ts);
500 
501 	return find_target_cycles(mdev, target_ns);
502 }
503 
504 static u64 perout_conf_real_time(s64 sec, u32 nsec)
505 {
506 	return (u64)nsec | (u64)sec << 32;
507 }
508 
509 static int perout_conf_1pps(struct mlx5_core_dev *mdev, struct ptp_clock_request *rq,
510 			    u64 *time_stamp, bool real_time)
511 {
512 	struct timespec64 ts;
513 	s64 ns;
514 
515 	ts.tv_nsec = rq->perout.period.nsec;
516 	ts.tv_sec = rq->perout.period.sec;
517 	ns = timespec64_to_ns(&ts);
518 
519 	if ((ns >> 1) != 500000000LL)
520 		return -EINVAL;
521 
522 	*time_stamp = real_time ? perout_conf_real_time(rq->perout.start.sec, 0) :
523 		      perout_conf_internal_timer(mdev, rq->perout.start.sec);
524 
525 	return 0;
526 }
527 
528 #define MLX5_MAX_PULSE_DURATION (BIT(__mlx5_bit_sz(mtpps_reg, out_pulse_duration_ns)) - 1)
529 static int mlx5_perout_conf_out_pulse_duration(struct mlx5_core_dev *mdev,
530 					       struct ptp_clock_request *rq,
531 					       u32 *out_pulse_duration_ns)
532 {
533 	struct mlx5_pps *pps_info = &mdev->clock.pps_info;
534 	u32 out_pulse_duration;
535 	struct timespec64 ts;
536 
537 	if (rq->perout.flags & PTP_PEROUT_DUTY_CYCLE) {
538 		ts.tv_sec = rq->perout.on.sec;
539 		ts.tv_nsec = rq->perout.on.nsec;
540 		out_pulse_duration = (u32)timespec64_to_ns(&ts);
541 	} else {
542 		/* out_pulse_duration_ns should be up to 50% of the
543 		 * pulse period as default
544 		 */
545 		ts.tv_sec = rq->perout.period.sec;
546 		ts.tv_nsec = rq->perout.period.nsec;
547 		out_pulse_duration = (u32)timespec64_to_ns(&ts) >> 1;
548 	}
549 
550 	if (out_pulse_duration < pps_info->min_out_pulse_duration_ns ||
551 	    out_pulse_duration > MLX5_MAX_PULSE_DURATION) {
552 		mlx5_core_err(mdev, "NPPS pulse duration %u is not in [%llu, %lu]\n",
553 			      out_pulse_duration, pps_info->min_out_pulse_duration_ns,
554 			      MLX5_MAX_PULSE_DURATION);
555 		return -EINVAL;
556 	}
557 	*out_pulse_duration_ns = out_pulse_duration;
558 
559 	return 0;
560 }
561 
562 static int perout_conf_npps_real_time(struct mlx5_core_dev *mdev, struct ptp_clock_request *rq,
563 				      u32 *field_select, u32 *out_pulse_duration_ns,
564 				      u64 *period, u64 *time_stamp)
565 {
566 	struct mlx5_pps *pps_info = &mdev->clock.pps_info;
567 	struct ptp_clock_time *time = &rq->perout.start;
568 	struct timespec64 ts;
569 
570 	ts.tv_sec = rq->perout.period.sec;
571 	ts.tv_nsec = rq->perout.period.nsec;
572 	if (timespec64_to_ns(&ts) < pps_info->min_npps_period) {
573 		mlx5_core_err(mdev, "NPPS period is lower than minimal npps period %llu\n",
574 			      pps_info->min_npps_period);
575 		return -EINVAL;
576 	}
577 	*period = perout_conf_real_time(rq->perout.period.sec, rq->perout.period.nsec);
578 
579 	if (mlx5_perout_conf_out_pulse_duration(mdev, rq, out_pulse_duration_ns))
580 		return -EINVAL;
581 
582 	*time_stamp = perout_conf_real_time(time->sec, time->nsec);
583 	*field_select |= MLX5_MTPPS_FS_NPPS_PERIOD |
584 			 MLX5_MTPPS_FS_OUT_PULSE_DURATION_NS;
585 
586 	return 0;
587 }
588 
589 static bool mlx5_perout_verify_flags(struct mlx5_core_dev *mdev, unsigned int flags)
590 {
591 	return ((!mlx5_npps_real_time_supported(mdev) && flags) ||
592 		(mlx5_npps_real_time_supported(mdev) && flags & ~PTP_PEROUT_DUTY_CYCLE));
593 }
594 
595 static int mlx5_perout_configure(struct ptp_clock_info *ptp,
596 				 struct ptp_clock_request *rq,
597 				 int on)
598 {
599 	struct mlx5_clock *clock =
600 			container_of(ptp, struct mlx5_clock, ptp_info);
601 	struct mlx5_core_dev *mdev =
602 			container_of(clock, struct mlx5_core_dev, clock);
603 	bool rt_mode = mlx5_real_time_mode(mdev);
604 	u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
605 	u32 out_pulse_duration_ns = 0;
606 	u32 field_select = 0;
607 	u64 npps_period = 0;
608 	u64 time_stamp = 0;
609 	u8 pin_mode = 0;
610 	u8 pattern = 0;
611 	int pin = -1;
612 	int err = 0;
613 
614 	if (!MLX5_PPS_CAP(mdev))
615 		return -EOPNOTSUPP;
616 
617 	/* Reject requests with unsupported flags */
618 	if (mlx5_perout_verify_flags(mdev, rq->perout.flags))
619 		return -EOPNOTSUPP;
620 
621 	if (rq->perout.index >= clock->ptp_info.n_pins)
622 		return -EINVAL;
623 
624 	field_select = MLX5_MTPPS_FS_ENABLE;
625 	pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT, rq->perout.index);
626 	if (pin < 0)
627 		return -EBUSY;
628 
629 	if (on) {
630 		bool rt_mode = mlx5_real_time_mode(mdev);
631 
632 		pin_mode = MLX5_PIN_MODE_OUT;
633 		pattern = MLX5_OUT_PATTERN_PERIODIC;
634 
635 		if (rt_mode &&  rq->perout.start.sec > U32_MAX)
636 			return -EINVAL;
637 
638 		field_select |= MLX5_MTPPS_FS_PIN_MODE |
639 				MLX5_MTPPS_FS_PATTERN |
640 				MLX5_MTPPS_FS_TIME_STAMP;
641 
642 		if (mlx5_npps_real_time_supported(mdev))
643 			err = perout_conf_npps_real_time(mdev, rq, &field_select,
644 							 &out_pulse_duration_ns, &npps_period,
645 							 &time_stamp);
646 		else
647 			err = perout_conf_1pps(mdev, rq, &time_stamp, rt_mode);
648 		if (err)
649 			return err;
650 	}
651 
652 	MLX5_SET(mtpps_reg, in, pin, pin);
653 	MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
654 	MLX5_SET(mtpps_reg, in, pattern, pattern);
655 	MLX5_SET(mtpps_reg, in, enable, on);
656 	MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp);
657 	MLX5_SET(mtpps_reg, in, field_select, field_select);
658 	MLX5_SET64(mtpps_reg, in, npps_period, npps_period);
659 	MLX5_SET(mtpps_reg, in, out_pulse_duration_ns, out_pulse_duration_ns);
660 	err = mlx5_set_mtpps(mdev, in, sizeof(in));
661 	if (err)
662 		return err;
663 
664 	if (rt_mode)
665 		return 0;
666 
667 	return mlx5_set_mtppse(mdev, pin, 0,
668 			       MLX5_EVENT_MODE_REPETETIVE & on);
669 }
670 
671 static int mlx5_pps_configure(struct ptp_clock_info *ptp,
672 			      struct ptp_clock_request *rq,
673 			      int on)
674 {
675 	struct mlx5_clock *clock =
676 			container_of(ptp, struct mlx5_clock, ptp_info);
677 
678 	clock->pps_info.enabled = !!on;
679 	return 0;
680 }
681 
682 static int mlx5_ptp_enable(struct ptp_clock_info *ptp,
683 			   struct ptp_clock_request *rq,
684 			   int on)
685 {
686 	switch (rq->type) {
687 	case PTP_CLK_REQ_EXTTS:
688 		return mlx5_extts_configure(ptp, rq, on);
689 	case PTP_CLK_REQ_PEROUT:
690 		return mlx5_perout_configure(ptp, rq, on);
691 	case PTP_CLK_REQ_PPS:
692 		return mlx5_pps_configure(ptp, rq, on);
693 	default:
694 		return -EOPNOTSUPP;
695 	}
696 	return 0;
697 }
698 
699 enum {
700 	MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN = BIT(0),
701 	MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT = BIT(1),
702 };
703 
704 static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
705 			   enum ptp_pin_function func, unsigned int chan)
706 {
707 	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
708 						ptp_info);
709 
710 	switch (func) {
711 	case PTP_PF_NONE:
712 		return 0;
713 	case PTP_PF_EXTTS:
714 		return !(clock->pps_info.pin_caps[pin] &
715 			 MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN);
716 	case PTP_PF_PEROUT:
717 		return !(clock->pps_info.pin_caps[pin] &
718 			 MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT);
719 	default:
720 		return -EOPNOTSUPP;
721 	}
722 }
723 
724 static const struct ptp_clock_info mlx5_ptp_clock_info = {
725 	.owner		= THIS_MODULE,
726 	.name		= "mlx5_ptp",
727 	.max_adj	= 50000000,
728 	.n_alarm	= 0,
729 	.n_ext_ts	= 0,
730 	.n_per_out	= 0,
731 	.n_pins		= 0,
732 	.pps		= 0,
733 	.adjfine	= mlx5_ptp_adjfine,
734 	.adjphase	= mlx5_ptp_adjphase,
735 	.getmaxphase    = mlx5_ptp_getmaxphase,
736 	.adjtime	= mlx5_ptp_adjtime,
737 	.gettimex64	= mlx5_ptp_gettimex,
738 	.settime64	= mlx5_ptp_settime,
739 	.enable		= NULL,
740 	.verify		= NULL,
741 };
742 
743 static int mlx5_query_mtpps_pin_mode(struct mlx5_core_dev *mdev, u8 pin,
744 				     u32 *mtpps, u32 mtpps_size)
745 {
746 	u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {};
747 
748 	MLX5_SET(mtpps_reg, in, pin, pin);
749 
750 	return mlx5_core_access_reg(mdev, in, sizeof(in), mtpps,
751 				    mtpps_size, MLX5_REG_MTPPS, 0, 0);
752 }
753 
754 static int mlx5_get_pps_pin_mode(struct mlx5_clock *clock, u8 pin)
755 {
756 	struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, clock);
757 
758 	u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {};
759 	u8 mode;
760 	int err;
761 
762 	err = mlx5_query_mtpps_pin_mode(mdev, pin, out, sizeof(out));
763 	if (err || !MLX5_GET(mtpps_reg, out, enable))
764 		return PTP_PF_NONE;
765 
766 	mode = MLX5_GET(mtpps_reg, out, pin_mode);
767 
768 	if (mode == MLX5_PIN_MODE_IN)
769 		return PTP_PF_EXTTS;
770 	else if (mode == MLX5_PIN_MODE_OUT)
771 		return PTP_PF_PEROUT;
772 
773 	return PTP_PF_NONE;
774 }
775 
776 static void mlx5_init_pin_config(struct mlx5_clock *clock)
777 {
778 	int i;
779 
780 	if (!clock->ptp_info.n_pins)
781 		return;
782 
783 	clock->ptp_info.pin_config =
784 			kcalloc(clock->ptp_info.n_pins,
785 				sizeof(*clock->ptp_info.pin_config),
786 				GFP_KERNEL);
787 	if (!clock->ptp_info.pin_config)
788 		return;
789 	clock->ptp_info.enable = mlx5_ptp_enable;
790 	clock->ptp_info.verify = mlx5_ptp_verify;
791 	clock->ptp_info.pps = 1;
792 
793 	for (i = 0; i < clock->ptp_info.n_pins; i++) {
794 		snprintf(clock->ptp_info.pin_config[i].name,
795 			 sizeof(clock->ptp_info.pin_config[i].name),
796 			 "mlx5_pps%d", i);
797 		clock->ptp_info.pin_config[i].index = i;
798 		clock->ptp_info.pin_config[i].func = mlx5_get_pps_pin_mode(clock, i);
799 		clock->ptp_info.pin_config[i].chan = 0;
800 	}
801 }
802 
803 static void mlx5_get_pps_caps(struct mlx5_core_dev *mdev)
804 {
805 	struct mlx5_clock *clock = &mdev->clock;
806 	u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
807 
808 	mlx5_query_mtpps(mdev, out, sizeof(out));
809 
810 	clock->ptp_info.n_pins = MLX5_GET(mtpps_reg, out,
811 					  cap_number_of_pps_pins);
812 	clock->ptp_info.n_ext_ts = MLX5_GET(mtpps_reg, out,
813 					    cap_max_num_of_pps_in_pins);
814 	clock->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out,
815 					     cap_max_num_of_pps_out_pins);
816 
817 	if (MLX5_CAP_MCAM_FEATURE(mdev, npps_period))
818 		clock->pps_info.min_npps_period = 1 << MLX5_GET(mtpps_reg, out,
819 								cap_log_min_npps_period);
820 	if (MLX5_CAP_MCAM_FEATURE(mdev, out_pulse_duration_ns))
821 		clock->pps_info.min_out_pulse_duration_ns = 1 << MLX5_GET(mtpps_reg, out,
822 								cap_log_min_out_pulse_duration_ns);
823 
824 	clock->pps_info.pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode);
825 	clock->pps_info.pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode);
826 	clock->pps_info.pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode);
827 	clock->pps_info.pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode);
828 	clock->pps_info.pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode);
829 	clock->pps_info.pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode);
830 	clock->pps_info.pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode);
831 	clock->pps_info.pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode);
832 }
833 
834 static void ts_next_sec(struct timespec64 *ts)
835 {
836 	ts->tv_sec += 1;
837 	ts->tv_nsec = 0;
838 }
839 
840 static u64 perout_conf_next_event_timer(struct mlx5_core_dev *mdev,
841 					struct mlx5_clock *clock)
842 {
843 	struct timespec64 ts;
844 	s64 target_ns;
845 
846 	mlx5_ptp_gettimex(&clock->ptp_info, &ts, NULL);
847 	ts_next_sec(&ts);
848 	target_ns = timespec64_to_ns(&ts);
849 
850 	return find_target_cycles(mdev, target_ns);
851 }
852 
853 static int mlx5_pps_event(struct notifier_block *nb,
854 			  unsigned long type, void *data)
855 {
856 	struct mlx5_clock *clock = mlx5_nb_cof(nb, struct mlx5_clock, pps_nb);
857 	struct ptp_clock_event ptp_event;
858 	struct mlx5_eqe *eqe = data;
859 	int pin = eqe->data.pps.pin;
860 	struct mlx5_core_dev *mdev;
861 	unsigned long flags;
862 	u64 ns;
863 
864 	mdev = container_of(clock, struct mlx5_core_dev, clock);
865 
866 	switch (clock->ptp_info.pin_config[pin].func) {
867 	case PTP_PF_EXTTS:
868 		ptp_event.index = pin;
869 		ptp_event.timestamp = mlx5_real_time_mode(mdev) ?
870 			mlx5_real_time_cyc2time(clock,
871 						be64_to_cpu(eqe->data.pps.time_stamp)) :
872 			mlx5_timecounter_cyc2time(clock,
873 						  be64_to_cpu(eqe->data.pps.time_stamp));
874 		if (clock->pps_info.enabled) {
875 			ptp_event.type = PTP_CLOCK_PPSUSR;
876 			ptp_event.pps_times.ts_real =
877 					ns_to_timespec64(ptp_event.timestamp);
878 		} else {
879 			ptp_event.type = PTP_CLOCK_EXTTS;
880 		}
881 		/* TODOL clock->ptp can be NULL if ptp_clock_register fails */
882 		ptp_clock_event(clock->ptp, &ptp_event);
883 		break;
884 	case PTP_PF_PEROUT:
885 		ns = perout_conf_next_event_timer(mdev, clock);
886 		write_seqlock_irqsave(&clock->lock, flags);
887 		clock->pps_info.start[pin] = ns;
888 		write_sequnlock_irqrestore(&clock->lock, flags);
889 		schedule_work(&clock->pps_info.out_work);
890 		break;
891 	default:
892 		mlx5_core_err(mdev, " Unhandled clock PPS event, func %d\n",
893 			      clock->ptp_info.pin_config[pin].func);
894 	}
895 
896 	return NOTIFY_OK;
897 }
898 
899 static void mlx5_timecounter_init(struct mlx5_core_dev *mdev)
900 {
901 	struct mlx5_clock *clock = &mdev->clock;
902 	struct mlx5_timer *timer = &clock->timer;
903 	u32 dev_freq;
904 
905 	dev_freq = MLX5_CAP_GEN(mdev, device_frequency_khz);
906 	timer->cycles.read = read_internal_timer;
907 	timer->cycles.shift = MLX5_CYCLES_SHIFT;
908 	timer->cycles.mult = clocksource_khz2mult(dev_freq,
909 						  timer->cycles.shift);
910 	timer->nominal_c_mult = timer->cycles.mult;
911 	timer->cycles.mask = CLOCKSOURCE_MASK(41);
912 
913 	timecounter_init(&timer->tc, &timer->cycles,
914 			 ktime_to_ns(ktime_get_real()));
915 }
916 
917 static void mlx5_init_overflow_period(struct mlx5_clock *clock)
918 {
919 	struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, clock);
920 	struct mlx5_ib_clock_info *clock_info = mdev->clock_info;
921 	struct mlx5_timer *timer = &clock->timer;
922 	u64 overflow_cycles;
923 	u64 frac = 0;
924 	u64 ns;
925 
926 	/* Calculate period in seconds to call the overflow watchdog - to make
927 	 * sure counter is checked at least twice every wrap around.
928 	 * The period is calculated as the minimum between max HW cycles count
929 	 * (The clock source mask) and max amount of cycles that can be
930 	 * multiplied by clock multiplier where the result doesn't exceed
931 	 * 64bits.
932 	 */
933 	overflow_cycles = div64_u64(~0ULL >> 1, timer->cycles.mult);
934 	overflow_cycles = min(overflow_cycles, div_u64(timer->cycles.mask, 3));
935 
936 	ns = cyclecounter_cyc2ns(&timer->cycles, overflow_cycles,
937 				 frac, &frac);
938 	do_div(ns, NSEC_PER_SEC / HZ);
939 	timer->overflow_period = ns;
940 
941 	INIT_DELAYED_WORK(&timer->overflow_work, mlx5_timestamp_overflow);
942 	if (timer->overflow_period)
943 		schedule_delayed_work(&timer->overflow_work, 0);
944 	else
945 		mlx5_core_warn(mdev,
946 			       "invalid overflow period, overflow_work is not scheduled\n");
947 
948 	if (clock_info)
949 		clock_info->overflow_period = timer->overflow_period;
950 }
951 
952 static void mlx5_init_clock_info(struct mlx5_core_dev *mdev)
953 {
954 	struct mlx5_clock *clock = &mdev->clock;
955 	struct mlx5_ib_clock_info *info;
956 	struct mlx5_timer *timer;
957 
958 	mdev->clock_info = (struct mlx5_ib_clock_info *)get_zeroed_page(GFP_KERNEL);
959 	if (!mdev->clock_info) {
960 		mlx5_core_warn(mdev, "Failed to allocate IB clock info page\n");
961 		return;
962 	}
963 
964 	info = mdev->clock_info;
965 	timer = &clock->timer;
966 
967 	info->nsec = timer->tc.nsec;
968 	info->cycles = timer->tc.cycle_last;
969 	info->mask = timer->cycles.mask;
970 	info->mult = timer->nominal_c_mult;
971 	info->shift = timer->cycles.shift;
972 	info->frac = timer->tc.frac;
973 }
974 
975 static void mlx5_init_timer_clock(struct mlx5_core_dev *mdev)
976 {
977 	struct mlx5_clock *clock = &mdev->clock;
978 
979 	mlx5_timecounter_init(mdev);
980 	mlx5_init_clock_info(mdev);
981 	mlx5_init_overflow_period(clock);
982 	clock->ptp_info = mlx5_ptp_clock_info;
983 
984 	if (mlx5_real_time_mode(mdev)) {
985 		struct timespec64 ts;
986 
987 		ktime_get_real_ts64(&ts);
988 		mlx5_ptp_settime(&clock->ptp_info, &ts);
989 	}
990 }
991 
992 static void mlx5_init_pps(struct mlx5_core_dev *mdev)
993 {
994 	struct mlx5_clock *clock = &mdev->clock;
995 
996 	if (!MLX5_PPS_CAP(mdev))
997 		return;
998 
999 	mlx5_get_pps_caps(mdev);
1000 	mlx5_init_pin_config(clock);
1001 }
1002 
1003 void mlx5_init_clock(struct mlx5_core_dev *mdev)
1004 {
1005 	struct mlx5_clock *clock = &mdev->clock;
1006 
1007 	if (!MLX5_CAP_GEN(mdev, device_frequency_khz)) {
1008 		mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n");
1009 		return;
1010 	}
1011 
1012 	seqlock_init(&clock->lock);
1013 	mlx5_init_timer_clock(mdev);
1014 	INIT_WORK(&clock->pps_info.out_work, mlx5_pps_out);
1015 
1016 	/* Configure the PHC */
1017 	clock->ptp_info = mlx5_ptp_clock_info;
1018 
1019 	/* Initialize 1PPS data structures */
1020 	mlx5_init_pps(mdev);
1021 
1022 	clock->ptp = ptp_clock_register(&clock->ptp_info,
1023 					&mdev->pdev->dev);
1024 	if (IS_ERR(clock->ptp)) {
1025 		mlx5_core_warn(mdev, "ptp_clock_register failed %ld\n",
1026 			       PTR_ERR(clock->ptp));
1027 		clock->ptp = NULL;
1028 	}
1029 
1030 	MLX5_NB_INIT(&clock->pps_nb, mlx5_pps_event, PPS_EVENT);
1031 	mlx5_eq_notifier_register(mdev, &clock->pps_nb);
1032 }
1033 
1034 void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
1035 {
1036 	struct mlx5_clock *clock = &mdev->clock;
1037 
1038 	if (!MLX5_CAP_GEN(mdev, device_frequency_khz))
1039 		return;
1040 
1041 	mlx5_eq_notifier_unregister(mdev, &clock->pps_nb);
1042 	if (clock->ptp) {
1043 		ptp_clock_unregister(clock->ptp);
1044 		clock->ptp = NULL;
1045 	}
1046 
1047 	cancel_work_sync(&clock->pps_info.out_work);
1048 	cancel_delayed_work_sync(&clock->timer.overflow_work);
1049 
1050 	if (mdev->clock_info) {
1051 		free_page((unsigned long)mdev->clock_info);
1052 		mdev->clock_info = NULL;
1053 	}
1054 
1055 	kfree(clock->ptp_info.pin_config);
1056 }
1057