1 /*
2  * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/clocksource.h>
34 #include <linux/highmem.h>
35 #include <linux/ptp_clock_kernel.h>
36 #include <rdma/mlx5-abi.h>
37 #include "lib/eq.h"
38 #include "en.h"
39 #include "clock.h"
40 
41 enum {
42 	MLX5_CYCLES_SHIFT	= 23
43 };
44 
45 enum {
46 	MLX5_PIN_MODE_IN		= 0x0,
47 	MLX5_PIN_MODE_OUT		= 0x1,
48 };
49 
50 enum {
51 	MLX5_OUT_PATTERN_PULSE		= 0x0,
52 	MLX5_OUT_PATTERN_PERIODIC	= 0x1,
53 };
54 
55 enum {
56 	MLX5_EVENT_MODE_DISABLE	= 0x0,
57 	MLX5_EVENT_MODE_REPETETIVE	= 0x1,
58 	MLX5_EVENT_MODE_ONCE_TILL_ARM	= 0x2,
59 };
60 
61 enum {
62 	MLX5_MTPPS_FS_ENABLE			= BIT(0x0),
63 	MLX5_MTPPS_FS_PATTERN			= BIT(0x2),
64 	MLX5_MTPPS_FS_PIN_MODE			= BIT(0x3),
65 	MLX5_MTPPS_FS_TIME_STAMP		= BIT(0x4),
66 	MLX5_MTPPS_FS_OUT_PULSE_DURATION	= BIT(0x5),
67 	MLX5_MTPPS_FS_ENH_OUT_PER_ADJ		= BIT(0x7),
68 	MLX5_MTPPS_FS_NPPS_PERIOD               = BIT(0x9),
69 	MLX5_MTPPS_FS_OUT_PULSE_DURATION_NS     = BIT(0xa),
70 };
71 
72 static bool mlx5_real_time_mode(struct mlx5_core_dev *mdev)
73 {
74 	return (mlx5_is_real_time_rq(mdev) || mlx5_is_real_time_sq(mdev));
75 }
76 
77 static bool mlx5_npps_real_time_supported(struct mlx5_core_dev *mdev)
78 {
79 	return (mlx5_real_time_mode(mdev) &&
80 		MLX5_CAP_MCAM_FEATURE(mdev, npps_period) &&
81 		MLX5_CAP_MCAM_FEATURE(mdev, out_pulse_duration_ns));
82 }
83 
84 static bool mlx5_modify_mtutc_allowed(struct mlx5_core_dev *mdev)
85 {
86 	return MLX5_CAP_MCAM_FEATURE(mdev, ptpcyc2realtime_modify);
87 }
88 
89 static int mlx5_set_mtutc(struct mlx5_core_dev *dev, u32 *mtutc, u32 size)
90 {
91 	u32 out[MLX5_ST_SZ_DW(mtutc_reg)] = {};
92 
93 	if (!MLX5_CAP_MCAM_REG(dev, mtutc))
94 		return -EOPNOTSUPP;
95 
96 	return mlx5_core_access_reg(dev, mtutc, size, out, sizeof(out),
97 				    MLX5_REG_MTUTC, 0, 1);
98 }
99 
100 static u64 mlx5_read_time(struct mlx5_core_dev *dev,
101 			  struct ptp_system_timestamp *sts,
102 			  bool real_time)
103 {
104 	u32 timer_h, timer_h1, timer_l;
105 
106 	timer_h = ioread32be(real_time ? &dev->iseg->real_time_h :
107 			     &dev->iseg->internal_timer_h);
108 	ptp_read_system_prets(sts);
109 	timer_l = ioread32be(real_time ? &dev->iseg->real_time_l :
110 			     &dev->iseg->internal_timer_l);
111 	ptp_read_system_postts(sts);
112 	timer_h1 = ioread32be(real_time ? &dev->iseg->real_time_h :
113 			      &dev->iseg->internal_timer_h);
114 	if (timer_h != timer_h1) {
115 		/* wrap around */
116 		ptp_read_system_prets(sts);
117 		timer_l = ioread32be(real_time ? &dev->iseg->real_time_l :
118 				     &dev->iseg->internal_timer_l);
119 		ptp_read_system_postts(sts);
120 	}
121 
122 	return real_time ? REAL_TIME_TO_NS(timer_h1, timer_l) :
123 			   (u64)timer_l | (u64)timer_h1 << 32;
124 }
125 
126 static u64 read_internal_timer(const struct cyclecounter *cc)
127 {
128 	struct mlx5_timer *timer = container_of(cc, struct mlx5_timer, cycles);
129 	struct mlx5_clock *clock = container_of(timer, struct mlx5_clock, timer);
130 	struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
131 						  clock);
132 
133 	return mlx5_read_time(mdev, NULL, false) & cc->mask;
134 }
135 
136 static void mlx5_update_clock_info_page(struct mlx5_core_dev *mdev)
137 {
138 	struct mlx5_ib_clock_info *clock_info = mdev->clock_info;
139 	struct mlx5_clock *clock = &mdev->clock;
140 	struct mlx5_timer *timer;
141 	u32 sign;
142 
143 	if (!clock_info)
144 		return;
145 
146 	sign = smp_load_acquire(&clock_info->sign);
147 	smp_store_mb(clock_info->sign,
148 		     sign | MLX5_IB_CLOCK_INFO_KERNEL_UPDATING);
149 
150 	timer = &clock->timer;
151 	clock_info->cycles = timer->tc.cycle_last;
152 	clock_info->mult   = timer->cycles.mult;
153 	clock_info->nsec   = timer->tc.nsec;
154 	clock_info->frac   = timer->tc.frac;
155 
156 	smp_store_release(&clock_info->sign,
157 			  sign + MLX5_IB_CLOCK_INFO_KERNEL_UPDATING * 2);
158 }
159 
160 static void mlx5_pps_out(struct work_struct *work)
161 {
162 	struct mlx5_pps *pps_info = container_of(work, struct mlx5_pps,
163 						 out_work);
164 	struct mlx5_clock *clock = container_of(pps_info, struct mlx5_clock,
165 						pps_info);
166 	struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
167 						  clock);
168 	u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
169 	unsigned long flags;
170 	int i;
171 
172 	for (i = 0; i < clock->ptp_info.n_pins; i++) {
173 		u64 tstart;
174 
175 		write_seqlock_irqsave(&clock->lock, flags);
176 		tstart = clock->pps_info.start[i];
177 		clock->pps_info.start[i] = 0;
178 		write_sequnlock_irqrestore(&clock->lock, flags);
179 		if (!tstart)
180 			continue;
181 
182 		MLX5_SET(mtpps_reg, in, pin, i);
183 		MLX5_SET64(mtpps_reg, in, time_stamp, tstart);
184 		MLX5_SET(mtpps_reg, in, field_select, MLX5_MTPPS_FS_TIME_STAMP);
185 		mlx5_set_mtpps(mdev, in, sizeof(in));
186 	}
187 }
188 
189 static void mlx5_timestamp_overflow(struct work_struct *work)
190 {
191 	struct delayed_work *dwork = to_delayed_work(work);
192 	struct mlx5_core_dev *mdev;
193 	struct mlx5_timer *timer;
194 	struct mlx5_clock *clock;
195 	unsigned long flags;
196 
197 	timer = container_of(dwork, struct mlx5_timer, overflow_work);
198 	clock = container_of(timer, struct mlx5_clock, timer);
199 	mdev = container_of(clock, struct mlx5_core_dev, clock);
200 
201 	write_seqlock_irqsave(&clock->lock, flags);
202 	timecounter_read(&timer->tc);
203 	mlx5_update_clock_info_page(mdev);
204 	write_sequnlock_irqrestore(&clock->lock, flags);
205 	schedule_delayed_work(&timer->overflow_work, timer->overflow_period);
206 }
207 
208 static int mlx5_ptp_settime_real_time(struct mlx5_core_dev *mdev,
209 				      const struct timespec64 *ts)
210 {
211 	u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
212 
213 	if (!mlx5_modify_mtutc_allowed(mdev))
214 		return 0;
215 
216 	if (ts->tv_sec < 0 || ts->tv_sec > U32_MAX ||
217 	    ts->tv_nsec < 0 || ts->tv_nsec > NSEC_PER_SEC)
218 		return -EINVAL;
219 
220 	MLX5_SET(mtutc_reg, in, operation, MLX5_MTUTC_OPERATION_SET_TIME_IMMEDIATE);
221 	MLX5_SET(mtutc_reg, in, utc_sec, ts->tv_sec);
222 	MLX5_SET(mtutc_reg, in, utc_nsec, ts->tv_nsec);
223 
224 	return mlx5_set_mtutc(mdev, in, sizeof(in));
225 }
226 
227 static int mlx5_ptp_settime(struct ptp_clock_info *ptp, const struct timespec64 *ts)
228 {
229 	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
230 	struct mlx5_timer *timer = &clock->timer;
231 	struct mlx5_core_dev *mdev;
232 	unsigned long flags;
233 	int err;
234 
235 	mdev = container_of(clock, struct mlx5_core_dev, clock);
236 	err = mlx5_ptp_settime_real_time(mdev, ts);
237 	if (err)
238 		return err;
239 
240 	write_seqlock_irqsave(&clock->lock, flags);
241 	timecounter_init(&timer->tc, &timer->cycles, timespec64_to_ns(ts));
242 	mlx5_update_clock_info_page(mdev);
243 	write_sequnlock_irqrestore(&clock->lock, flags);
244 
245 	return 0;
246 }
247 
248 static
249 struct timespec64 mlx5_ptp_gettimex_real_time(struct mlx5_core_dev *mdev,
250 					      struct ptp_system_timestamp *sts)
251 {
252 	struct timespec64 ts;
253 	u64 time;
254 
255 	time = mlx5_read_time(mdev, sts, true);
256 	ts = ns_to_timespec64(time);
257 	return ts;
258 }
259 
260 static int mlx5_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
261 			     struct ptp_system_timestamp *sts)
262 {
263 	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
264 	struct mlx5_timer *timer = &clock->timer;
265 	struct mlx5_core_dev *mdev;
266 	unsigned long flags;
267 	u64 cycles, ns;
268 
269 	mdev = container_of(clock, struct mlx5_core_dev, clock);
270 	if (mlx5_real_time_mode(mdev)) {
271 		*ts = mlx5_ptp_gettimex_real_time(mdev, sts);
272 		goto out;
273 	}
274 
275 	write_seqlock_irqsave(&clock->lock, flags);
276 	cycles = mlx5_read_time(mdev, sts, false);
277 	ns = timecounter_cyc2time(&timer->tc, cycles);
278 	write_sequnlock_irqrestore(&clock->lock, flags);
279 	*ts = ns_to_timespec64(ns);
280 out:
281 	return 0;
282 }
283 
284 static int mlx5_ptp_adjtime_real_time(struct mlx5_core_dev *mdev, s64 delta)
285 {
286 	u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
287 
288 	if (!mlx5_modify_mtutc_allowed(mdev))
289 		return 0;
290 
291 	/* HW time adjustment range is s16. If out of range, settime instead */
292 	if (delta < S16_MIN || delta > S16_MAX) {
293 		struct timespec64 ts;
294 		s64 ns;
295 
296 		ts = mlx5_ptp_gettimex_real_time(mdev, NULL);
297 		ns = timespec64_to_ns(&ts) + delta;
298 		ts = ns_to_timespec64(ns);
299 		return mlx5_ptp_settime_real_time(mdev, &ts);
300 	}
301 
302 	MLX5_SET(mtutc_reg, in, operation, MLX5_MTUTC_OPERATION_ADJUST_TIME);
303 	MLX5_SET(mtutc_reg, in, time_adjustment, delta);
304 
305 	return mlx5_set_mtutc(mdev, in, sizeof(in));
306 }
307 
308 static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
309 {
310 	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
311 	struct mlx5_timer *timer = &clock->timer;
312 	struct mlx5_core_dev *mdev;
313 	unsigned long flags;
314 	int err;
315 
316 	mdev = container_of(clock, struct mlx5_core_dev, clock);
317 
318 	err = mlx5_ptp_adjtime_real_time(mdev, delta);
319 	if (err)
320 		return err;
321 	write_seqlock_irqsave(&clock->lock, flags);
322 	timecounter_adjtime(&timer->tc, delta);
323 	mlx5_update_clock_info_page(mdev);
324 	write_sequnlock_irqrestore(&clock->lock, flags);
325 
326 	return 0;
327 }
328 
329 static int mlx5_ptp_adjfreq_real_time(struct mlx5_core_dev *mdev, s32 freq)
330 {
331 	u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
332 
333 	if (!mlx5_modify_mtutc_allowed(mdev))
334 		return 0;
335 
336 	MLX5_SET(mtutc_reg, in, operation, MLX5_MTUTC_OPERATION_ADJUST_FREQ_UTC);
337 	MLX5_SET(mtutc_reg, in, freq_adjustment, freq);
338 
339 	return mlx5_set_mtutc(mdev, in, sizeof(in));
340 }
341 
342 static int mlx5_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
343 {
344 	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
345 	struct mlx5_timer *timer = &clock->timer;
346 	struct mlx5_core_dev *mdev;
347 	unsigned long flags;
348 	u32 mult;
349 	int err;
350 
351 	mdev = container_of(clock, struct mlx5_core_dev, clock);
352 	err = mlx5_ptp_adjfreq_real_time(mdev, scaled_ppm_to_ppb(scaled_ppm));
353 	if (err)
354 		return err;
355 
356 	mult = (u32)adjust_by_scaled_ppm(timer->nominal_c_mult, scaled_ppm);
357 
358 	write_seqlock_irqsave(&clock->lock, flags);
359 	timecounter_read(&timer->tc);
360 	timer->cycles.mult = mult;
361 	mlx5_update_clock_info_page(mdev);
362 	write_sequnlock_irqrestore(&clock->lock, flags);
363 
364 	return 0;
365 }
366 
367 static int mlx5_extts_configure(struct ptp_clock_info *ptp,
368 				struct ptp_clock_request *rq,
369 				int on)
370 {
371 	struct mlx5_clock *clock =
372 			container_of(ptp, struct mlx5_clock, ptp_info);
373 	struct mlx5_core_dev *mdev =
374 			container_of(clock, struct mlx5_core_dev, clock);
375 	u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
376 	u32 field_select = 0;
377 	u8 pin_mode = 0;
378 	u8 pattern = 0;
379 	int pin = -1;
380 	int err = 0;
381 
382 	if (!MLX5_PPS_CAP(mdev))
383 		return -EOPNOTSUPP;
384 
385 	/* Reject requests with unsupported flags */
386 	if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
387 				PTP_RISING_EDGE |
388 				PTP_FALLING_EDGE |
389 				PTP_STRICT_FLAGS))
390 		return -EOPNOTSUPP;
391 
392 	/* Reject requests to enable time stamping on both edges. */
393 	if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
394 	    (rq->extts.flags & PTP_ENABLE_FEATURE) &&
395 	    (rq->extts.flags & PTP_EXTTS_EDGES) == PTP_EXTTS_EDGES)
396 		return -EOPNOTSUPP;
397 
398 	if (rq->extts.index >= clock->ptp_info.n_pins)
399 		return -EINVAL;
400 
401 	pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index);
402 	if (pin < 0)
403 		return -EBUSY;
404 
405 	if (on) {
406 		pin_mode = MLX5_PIN_MODE_IN;
407 		pattern = !!(rq->extts.flags & PTP_FALLING_EDGE);
408 		field_select = MLX5_MTPPS_FS_PIN_MODE |
409 			       MLX5_MTPPS_FS_PATTERN |
410 			       MLX5_MTPPS_FS_ENABLE;
411 	} else {
412 		field_select = MLX5_MTPPS_FS_ENABLE;
413 	}
414 
415 	MLX5_SET(mtpps_reg, in, pin, pin);
416 	MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
417 	MLX5_SET(mtpps_reg, in, pattern, pattern);
418 	MLX5_SET(mtpps_reg, in, enable, on);
419 	MLX5_SET(mtpps_reg, in, field_select, field_select);
420 
421 	err = mlx5_set_mtpps(mdev, in, sizeof(in));
422 	if (err)
423 		return err;
424 
425 	return mlx5_set_mtppse(mdev, pin, 0,
426 			       MLX5_EVENT_MODE_REPETETIVE & on);
427 }
428 
429 static u64 find_target_cycles(struct mlx5_core_dev *mdev, s64 target_ns)
430 {
431 	struct mlx5_clock *clock = &mdev->clock;
432 	u64 cycles_now, cycles_delta;
433 	u64 nsec_now, nsec_delta;
434 	struct mlx5_timer *timer;
435 	unsigned long flags;
436 
437 	timer = &clock->timer;
438 
439 	cycles_now = mlx5_read_time(mdev, NULL, false);
440 	write_seqlock_irqsave(&clock->lock, flags);
441 	nsec_now = timecounter_cyc2time(&timer->tc, cycles_now);
442 	nsec_delta = target_ns - nsec_now;
443 	cycles_delta = div64_u64(nsec_delta << timer->cycles.shift,
444 				 timer->cycles.mult);
445 	write_sequnlock_irqrestore(&clock->lock, flags);
446 
447 	return cycles_now + cycles_delta;
448 }
449 
450 static u64 perout_conf_internal_timer(struct mlx5_core_dev *mdev, s64 sec)
451 {
452 	struct timespec64 ts = {};
453 	s64 target_ns;
454 
455 	ts.tv_sec = sec;
456 	target_ns = timespec64_to_ns(&ts);
457 
458 	return find_target_cycles(mdev, target_ns);
459 }
460 
461 static u64 perout_conf_real_time(s64 sec, u32 nsec)
462 {
463 	return (u64)nsec | (u64)sec << 32;
464 }
465 
466 static int perout_conf_1pps(struct mlx5_core_dev *mdev, struct ptp_clock_request *rq,
467 			    u64 *time_stamp, bool real_time)
468 {
469 	struct timespec64 ts;
470 	s64 ns;
471 
472 	ts.tv_nsec = rq->perout.period.nsec;
473 	ts.tv_sec = rq->perout.period.sec;
474 	ns = timespec64_to_ns(&ts);
475 
476 	if ((ns >> 1) != 500000000LL)
477 		return -EINVAL;
478 
479 	*time_stamp = real_time ? perout_conf_real_time(rq->perout.start.sec, 0) :
480 		      perout_conf_internal_timer(mdev, rq->perout.start.sec);
481 
482 	return 0;
483 }
484 
485 #define MLX5_MAX_PULSE_DURATION (BIT(__mlx5_bit_sz(mtpps_reg, out_pulse_duration_ns)) - 1)
486 static int mlx5_perout_conf_out_pulse_duration(struct mlx5_core_dev *mdev,
487 					       struct ptp_clock_request *rq,
488 					       u32 *out_pulse_duration_ns)
489 {
490 	struct mlx5_pps *pps_info = &mdev->clock.pps_info;
491 	u32 out_pulse_duration;
492 	struct timespec64 ts;
493 
494 	if (rq->perout.flags & PTP_PEROUT_DUTY_CYCLE) {
495 		ts.tv_sec = rq->perout.on.sec;
496 		ts.tv_nsec = rq->perout.on.nsec;
497 		out_pulse_duration = (u32)timespec64_to_ns(&ts);
498 	} else {
499 		/* out_pulse_duration_ns should be up to 50% of the
500 		 * pulse period as default
501 		 */
502 		ts.tv_sec = rq->perout.period.sec;
503 		ts.tv_nsec = rq->perout.period.nsec;
504 		out_pulse_duration = (u32)timespec64_to_ns(&ts) >> 1;
505 	}
506 
507 	if (out_pulse_duration < pps_info->min_out_pulse_duration_ns ||
508 	    out_pulse_duration > MLX5_MAX_PULSE_DURATION) {
509 		mlx5_core_err(mdev, "NPPS pulse duration %u is not in [%llu, %lu]\n",
510 			      out_pulse_duration, pps_info->min_out_pulse_duration_ns,
511 			      MLX5_MAX_PULSE_DURATION);
512 		return -EINVAL;
513 	}
514 	*out_pulse_duration_ns = out_pulse_duration;
515 
516 	return 0;
517 }
518 
519 static int perout_conf_npps_real_time(struct mlx5_core_dev *mdev, struct ptp_clock_request *rq,
520 				      u32 *field_select, u32 *out_pulse_duration_ns,
521 				      u64 *period, u64 *time_stamp)
522 {
523 	struct mlx5_pps *pps_info = &mdev->clock.pps_info;
524 	struct ptp_clock_time *time = &rq->perout.start;
525 	struct timespec64 ts;
526 
527 	ts.tv_sec = rq->perout.period.sec;
528 	ts.tv_nsec = rq->perout.period.nsec;
529 	if (timespec64_to_ns(&ts) < pps_info->min_npps_period) {
530 		mlx5_core_err(mdev, "NPPS period is lower than minimal npps period %llu\n",
531 			      pps_info->min_npps_period);
532 		return -EINVAL;
533 	}
534 	*period = perout_conf_real_time(rq->perout.period.sec, rq->perout.period.nsec);
535 
536 	if (mlx5_perout_conf_out_pulse_duration(mdev, rq, out_pulse_duration_ns))
537 		return -EINVAL;
538 
539 	*time_stamp = perout_conf_real_time(time->sec, time->nsec);
540 	*field_select |= MLX5_MTPPS_FS_NPPS_PERIOD |
541 			 MLX5_MTPPS_FS_OUT_PULSE_DURATION_NS;
542 
543 	return 0;
544 }
545 
546 static bool mlx5_perout_verify_flags(struct mlx5_core_dev *mdev, unsigned int flags)
547 {
548 	return ((!mlx5_npps_real_time_supported(mdev) && flags) ||
549 		(mlx5_npps_real_time_supported(mdev) && flags & ~PTP_PEROUT_DUTY_CYCLE));
550 }
551 
552 static int mlx5_perout_configure(struct ptp_clock_info *ptp,
553 				 struct ptp_clock_request *rq,
554 				 int on)
555 {
556 	struct mlx5_clock *clock =
557 			container_of(ptp, struct mlx5_clock, ptp_info);
558 	struct mlx5_core_dev *mdev =
559 			container_of(clock, struct mlx5_core_dev, clock);
560 	bool rt_mode = mlx5_real_time_mode(mdev);
561 	u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
562 	u32 out_pulse_duration_ns = 0;
563 	u32 field_select = 0;
564 	u64 npps_period = 0;
565 	u64 time_stamp = 0;
566 	u8 pin_mode = 0;
567 	u8 pattern = 0;
568 	int pin = -1;
569 	int err = 0;
570 
571 	if (!MLX5_PPS_CAP(mdev))
572 		return -EOPNOTSUPP;
573 
574 	/* Reject requests with unsupported flags */
575 	if (mlx5_perout_verify_flags(mdev, rq->perout.flags))
576 		return -EOPNOTSUPP;
577 
578 	if (rq->perout.index >= clock->ptp_info.n_pins)
579 		return -EINVAL;
580 
581 	field_select = MLX5_MTPPS_FS_ENABLE;
582 	pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT, rq->perout.index);
583 	if (pin < 0)
584 		return -EBUSY;
585 
586 	if (on) {
587 		bool rt_mode = mlx5_real_time_mode(mdev);
588 
589 		pin_mode = MLX5_PIN_MODE_OUT;
590 		pattern = MLX5_OUT_PATTERN_PERIODIC;
591 
592 		if (rt_mode &&  rq->perout.start.sec > U32_MAX)
593 			return -EINVAL;
594 
595 		field_select |= MLX5_MTPPS_FS_PIN_MODE |
596 				MLX5_MTPPS_FS_PATTERN |
597 				MLX5_MTPPS_FS_TIME_STAMP;
598 
599 		if (mlx5_npps_real_time_supported(mdev))
600 			err = perout_conf_npps_real_time(mdev, rq, &field_select,
601 							 &out_pulse_duration_ns, &npps_period,
602 							 &time_stamp);
603 		else
604 			err = perout_conf_1pps(mdev, rq, &time_stamp, rt_mode);
605 		if (err)
606 			return err;
607 	}
608 
609 	MLX5_SET(mtpps_reg, in, pin, pin);
610 	MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
611 	MLX5_SET(mtpps_reg, in, pattern, pattern);
612 	MLX5_SET(mtpps_reg, in, enable, on);
613 	MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp);
614 	MLX5_SET(mtpps_reg, in, field_select, field_select);
615 	MLX5_SET64(mtpps_reg, in, npps_period, npps_period);
616 	MLX5_SET(mtpps_reg, in, out_pulse_duration_ns, out_pulse_duration_ns);
617 	err = mlx5_set_mtpps(mdev, in, sizeof(in));
618 	if (err)
619 		return err;
620 
621 	if (rt_mode)
622 		return 0;
623 
624 	return mlx5_set_mtppse(mdev, pin, 0,
625 			       MLX5_EVENT_MODE_REPETETIVE & on);
626 }
627 
628 static int mlx5_pps_configure(struct ptp_clock_info *ptp,
629 			      struct ptp_clock_request *rq,
630 			      int on)
631 {
632 	struct mlx5_clock *clock =
633 			container_of(ptp, struct mlx5_clock, ptp_info);
634 
635 	clock->pps_info.enabled = !!on;
636 	return 0;
637 }
638 
639 static int mlx5_ptp_enable(struct ptp_clock_info *ptp,
640 			   struct ptp_clock_request *rq,
641 			   int on)
642 {
643 	switch (rq->type) {
644 	case PTP_CLK_REQ_EXTTS:
645 		return mlx5_extts_configure(ptp, rq, on);
646 	case PTP_CLK_REQ_PEROUT:
647 		return mlx5_perout_configure(ptp, rq, on);
648 	case PTP_CLK_REQ_PPS:
649 		return mlx5_pps_configure(ptp, rq, on);
650 	default:
651 		return -EOPNOTSUPP;
652 	}
653 	return 0;
654 }
655 
656 enum {
657 	MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN = BIT(0),
658 	MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT = BIT(1),
659 };
660 
661 static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
662 			   enum ptp_pin_function func, unsigned int chan)
663 {
664 	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
665 						ptp_info);
666 
667 	switch (func) {
668 	case PTP_PF_NONE:
669 		return 0;
670 	case PTP_PF_EXTTS:
671 		return !(clock->pps_info.pin_caps[pin] &
672 			 MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN);
673 	case PTP_PF_PEROUT:
674 		return !(clock->pps_info.pin_caps[pin] &
675 			 MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT);
676 	default:
677 		return -EOPNOTSUPP;
678 	}
679 }
680 
681 static const struct ptp_clock_info mlx5_ptp_clock_info = {
682 	.owner		= THIS_MODULE,
683 	.name		= "mlx5_ptp",
684 	.max_adj	= 50000000,
685 	.n_alarm	= 0,
686 	.n_ext_ts	= 0,
687 	.n_per_out	= 0,
688 	.n_pins		= 0,
689 	.pps		= 0,
690 	.adjfine	= mlx5_ptp_adjfine,
691 	.adjtime	= mlx5_ptp_adjtime,
692 	.gettimex64	= mlx5_ptp_gettimex,
693 	.settime64	= mlx5_ptp_settime,
694 	.enable		= NULL,
695 	.verify		= NULL,
696 };
697 
698 static int mlx5_query_mtpps_pin_mode(struct mlx5_core_dev *mdev, u8 pin,
699 				     u32 *mtpps, u32 mtpps_size)
700 {
701 	u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {};
702 
703 	MLX5_SET(mtpps_reg, in, pin, pin);
704 
705 	return mlx5_core_access_reg(mdev, in, sizeof(in), mtpps,
706 				    mtpps_size, MLX5_REG_MTPPS, 0, 0);
707 }
708 
709 static int mlx5_get_pps_pin_mode(struct mlx5_clock *clock, u8 pin)
710 {
711 	struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, clock);
712 
713 	u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {};
714 	u8 mode;
715 	int err;
716 
717 	err = mlx5_query_mtpps_pin_mode(mdev, pin, out, sizeof(out));
718 	if (err || !MLX5_GET(mtpps_reg, out, enable))
719 		return PTP_PF_NONE;
720 
721 	mode = MLX5_GET(mtpps_reg, out, pin_mode);
722 
723 	if (mode == MLX5_PIN_MODE_IN)
724 		return PTP_PF_EXTTS;
725 	else if (mode == MLX5_PIN_MODE_OUT)
726 		return PTP_PF_PEROUT;
727 
728 	return PTP_PF_NONE;
729 }
730 
731 static void mlx5_init_pin_config(struct mlx5_clock *clock)
732 {
733 	int i;
734 
735 	if (!clock->ptp_info.n_pins)
736 		return;
737 
738 	clock->ptp_info.pin_config =
739 			kcalloc(clock->ptp_info.n_pins,
740 				sizeof(*clock->ptp_info.pin_config),
741 				GFP_KERNEL);
742 	if (!clock->ptp_info.pin_config)
743 		return;
744 	clock->ptp_info.enable = mlx5_ptp_enable;
745 	clock->ptp_info.verify = mlx5_ptp_verify;
746 	clock->ptp_info.pps = 1;
747 
748 	for (i = 0; i < clock->ptp_info.n_pins; i++) {
749 		snprintf(clock->ptp_info.pin_config[i].name,
750 			 sizeof(clock->ptp_info.pin_config[i].name),
751 			 "mlx5_pps%d", i);
752 		clock->ptp_info.pin_config[i].index = i;
753 		clock->ptp_info.pin_config[i].func = mlx5_get_pps_pin_mode(clock, i);
754 		clock->ptp_info.pin_config[i].chan = 0;
755 	}
756 }
757 
758 static void mlx5_get_pps_caps(struct mlx5_core_dev *mdev)
759 {
760 	struct mlx5_clock *clock = &mdev->clock;
761 	u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
762 
763 	mlx5_query_mtpps(mdev, out, sizeof(out));
764 
765 	clock->ptp_info.n_pins = MLX5_GET(mtpps_reg, out,
766 					  cap_number_of_pps_pins);
767 	clock->ptp_info.n_ext_ts = MLX5_GET(mtpps_reg, out,
768 					    cap_max_num_of_pps_in_pins);
769 	clock->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out,
770 					     cap_max_num_of_pps_out_pins);
771 
772 	if (MLX5_CAP_MCAM_FEATURE(mdev, npps_period))
773 		clock->pps_info.min_npps_period = 1 << MLX5_GET(mtpps_reg, out,
774 								cap_log_min_npps_period);
775 	if (MLX5_CAP_MCAM_FEATURE(mdev, out_pulse_duration_ns))
776 		clock->pps_info.min_out_pulse_duration_ns = 1 << MLX5_GET(mtpps_reg, out,
777 								cap_log_min_out_pulse_duration_ns);
778 
779 	clock->pps_info.pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode);
780 	clock->pps_info.pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode);
781 	clock->pps_info.pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode);
782 	clock->pps_info.pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode);
783 	clock->pps_info.pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode);
784 	clock->pps_info.pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode);
785 	clock->pps_info.pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode);
786 	clock->pps_info.pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode);
787 }
788 
789 static void ts_next_sec(struct timespec64 *ts)
790 {
791 	ts->tv_sec += 1;
792 	ts->tv_nsec = 0;
793 }
794 
795 static u64 perout_conf_next_event_timer(struct mlx5_core_dev *mdev,
796 					struct mlx5_clock *clock)
797 {
798 	struct timespec64 ts;
799 	s64 target_ns;
800 
801 	mlx5_ptp_gettimex(&clock->ptp_info, &ts, NULL);
802 	ts_next_sec(&ts);
803 	target_ns = timespec64_to_ns(&ts);
804 
805 	return find_target_cycles(mdev, target_ns);
806 }
807 
808 static int mlx5_pps_event(struct notifier_block *nb,
809 			  unsigned long type, void *data)
810 {
811 	struct mlx5_clock *clock = mlx5_nb_cof(nb, struct mlx5_clock, pps_nb);
812 	struct ptp_clock_event ptp_event;
813 	struct mlx5_eqe *eqe = data;
814 	int pin = eqe->data.pps.pin;
815 	struct mlx5_core_dev *mdev;
816 	unsigned long flags;
817 	u64 ns;
818 
819 	mdev = container_of(clock, struct mlx5_core_dev, clock);
820 
821 	switch (clock->ptp_info.pin_config[pin].func) {
822 	case PTP_PF_EXTTS:
823 		ptp_event.index = pin;
824 		ptp_event.timestamp = mlx5_real_time_mode(mdev) ?
825 			mlx5_real_time_cyc2time(clock,
826 						be64_to_cpu(eqe->data.pps.time_stamp)) :
827 			mlx5_timecounter_cyc2time(clock,
828 						  be64_to_cpu(eqe->data.pps.time_stamp));
829 		if (clock->pps_info.enabled) {
830 			ptp_event.type = PTP_CLOCK_PPSUSR;
831 			ptp_event.pps_times.ts_real =
832 					ns_to_timespec64(ptp_event.timestamp);
833 		} else {
834 			ptp_event.type = PTP_CLOCK_EXTTS;
835 		}
836 		/* TODOL clock->ptp can be NULL if ptp_clock_register fails */
837 		ptp_clock_event(clock->ptp, &ptp_event);
838 		break;
839 	case PTP_PF_PEROUT:
840 		ns = perout_conf_next_event_timer(mdev, clock);
841 		write_seqlock_irqsave(&clock->lock, flags);
842 		clock->pps_info.start[pin] = ns;
843 		write_sequnlock_irqrestore(&clock->lock, flags);
844 		schedule_work(&clock->pps_info.out_work);
845 		break;
846 	default:
847 		mlx5_core_err(mdev, " Unhandled clock PPS event, func %d\n",
848 			      clock->ptp_info.pin_config[pin].func);
849 	}
850 
851 	return NOTIFY_OK;
852 }
853 
854 static void mlx5_timecounter_init(struct mlx5_core_dev *mdev)
855 {
856 	struct mlx5_clock *clock = &mdev->clock;
857 	struct mlx5_timer *timer = &clock->timer;
858 	u32 dev_freq;
859 
860 	dev_freq = MLX5_CAP_GEN(mdev, device_frequency_khz);
861 	timer->cycles.read = read_internal_timer;
862 	timer->cycles.shift = MLX5_CYCLES_SHIFT;
863 	timer->cycles.mult = clocksource_khz2mult(dev_freq,
864 						  timer->cycles.shift);
865 	timer->nominal_c_mult = timer->cycles.mult;
866 	timer->cycles.mask = CLOCKSOURCE_MASK(41);
867 
868 	timecounter_init(&timer->tc, &timer->cycles,
869 			 ktime_to_ns(ktime_get_real()));
870 }
871 
872 static void mlx5_init_overflow_period(struct mlx5_clock *clock)
873 {
874 	struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, clock);
875 	struct mlx5_ib_clock_info *clock_info = mdev->clock_info;
876 	struct mlx5_timer *timer = &clock->timer;
877 	u64 overflow_cycles;
878 	u64 frac = 0;
879 	u64 ns;
880 
881 	/* Calculate period in seconds to call the overflow watchdog - to make
882 	 * sure counter is checked at least twice every wrap around.
883 	 * The period is calculated as the minimum between max HW cycles count
884 	 * (The clock source mask) and max amount of cycles that can be
885 	 * multiplied by clock multiplier where the result doesn't exceed
886 	 * 64bits.
887 	 */
888 	overflow_cycles = div64_u64(~0ULL >> 1, timer->cycles.mult);
889 	overflow_cycles = min(overflow_cycles, div_u64(timer->cycles.mask, 3));
890 
891 	ns = cyclecounter_cyc2ns(&timer->cycles, overflow_cycles,
892 				 frac, &frac);
893 	do_div(ns, NSEC_PER_SEC / HZ);
894 	timer->overflow_period = ns;
895 
896 	INIT_DELAYED_WORK(&timer->overflow_work, mlx5_timestamp_overflow);
897 	if (timer->overflow_period)
898 		schedule_delayed_work(&timer->overflow_work, 0);
899 	else
900 		mlx5_core_warn(mdev,
901 			       "invalid overflow period, overflow_work is not scheduled\n");
902 
903 	if (clock_info)
904 		clock_info->overflow_period = timer->overflow_period;
905 }
906 
907 static void mlx5_init_clock_info(struct mlx5_core_dev *mdev)
908 {
909 	struct mlx5_clock *clock = &mdev->clock;
910 	struct mlx5_ib_clock_info *info;
911 	struct mlx5_timer *timer;
912 
913 	mdev->clock_info = (struct mlx5_ib_clock_info *)get_zeroed_page(GFP_KERNEL);
914 	if (!mdev->clock_info) {
915 		mlx5_core_warn(mdev, "Failed to allocate IB clock info page\n");
916 		return;
917 	}
918 
919 	info = mdev->clock_info;
920 	timer = &clock->timer;
921 
922 	info->nsec = timer->tc.nsec;
923 	info->cycles = timer->tc.cycle_last;
924 	info->mask = timer->cycles.mask;
925 	info->mult = timer->nominal_c_mult;
926 	info->shift = timer->cycles.shift;
927 	info->frac = timer->tc.frac;
928 }
929 
930 static void mlx5_init_timer_clock(struct mlx5_core_dev *mdev)
931 {
932 	struct mlx5_clock *clock = &mdev->clock;
933 
934 	mlx5_timecounter_init(mdev);
935 	mlx5_init_clock_info(mdev);
936 	mlx5_init_overflow_period(clock);
937 	clock->ptp_info = mlx5_ptp_clock_info;
938 
939 	if (mlx5_real_time_mode(mdev)) {
940 		struct timespec64 ts;
941 
942 		ktime_get_real_ts64(&ts);
943 		mlx5_ptp_settime(&clock->ptp_info, &ts);
944 	}
945 }
946 
947 static void mlx5_init_pps(struct mlx5_core_dev *mdev)
948 {
949 	struct mlx5_clock *clock = &mdev->clock;
950 
951 	if (!MLX5_PPS_CAP(mdev))
952 		return;
953 
954 	mlx5_get_pps_caps(mdev);
955 	mlx5_init_pin_config(clock);
956 }
957 
958 void mlx5_init_clock(struct mlx5_core_dev *mdev)
959 {
960 	struct mlx5_clock *clock = &mdev->clock;
961 
962 	if (!MLX5_CAP_GEN(mdev, device_frequency_khz)) {
963 		mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n");
964 		return;
965 	}
966 
967 	seqlock_init(&clock->lock);
968 	mlx5_init_timer_clock(mdev);
969 	INIT_WORK(&clock->pps_info.out_work, mlx5_pps_out);
970 
971 	/* Configure the PHC */
972 	clock->ptp_info = mlx5_ptp_clock_info;
973 
974 	/* Initialize 1PPS data structures */
975 	mlx5_init_pps(mdev);
976 
977 	clock->ptp = ptp_clock_register(&clock->ptp_info,
978 					&mdev->pdev->dev);
979 	if (IS_ERR(clock->ptp)) {
980 		mlx5_core_warn(mdev, "ptp_clock_register failed %ld\n",
981 			       PTR_ERR(clock->ptp));
982 		clock->ptp = NULL;
983 	}
984 
985 	MLX5_NB_INIT(&clock->pps_nb, mlx5_pps_event, PPS_EVENT);
986 	mlx5_eq_notifier_register(mdev, &clock->pps_nb);
987 }
988 
989 void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
990 {
991 	struct mlx5_clock *clock = &mdev->clock;
992 
993 	if (!MLX5_CAP_GEN(mdev, device_frequency_khz))
994 		return;
995 
996 	mlx5_eq_notifier_unregister(mdev, &clock->pps_nb);
997 	if (clock->ptp) {
998 		ptp_clock_unregister(clock->ptp);
999 		clock->ptp = NULL;
1000 	}
1001 
1002 	cancel_work_sync(&clock->pps_info.out_work);
1003 	cancel_delayed_work_sync(&clock->timer.overflow_work);
1004 
1005 	if (mdev->clock_info) {
1006 		free_page((unsigned long)mdev->clock_info);
1007 		mdev->clock_info = NULL;
1008 	}
1009 
1010 	kfree(clock->ptp_info.pin_config);
1011 }
1012