1 /*
2  * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/clocksource.h>
34 #include <linux/highmem.h>
35 #include <linux/log2.h>
36 #include <linux/ptp_clock_kernel.h>
37 #include <rdma/mlx5-abi.h>
38 #include "lib/eq.h"
39 #include "en.h"
40 #include "clock.h"
41 
42 enum {
43 	MLX5_PIN_MODE_IN		= 0x0,
44 	MLX5_PIN_MODE_OUT		= 0x1,
45 };
46 
47 enum {
48 	MLX5_OUT_PATTERN_PULSE		= 0x0,
49 	MLX5_OUT_PATTERN_PERIODIC	= 0x1,
50 };
51 
52 enum {
53 	MLX5_EVENT_MODE_DISABLE	= 0x0,
54 	MLX5_EVENT_MODE_REPETETIVE	= 0x1,
55 	MLX5_EVENT_MODE_ONCE_TILL_ARM	= 0x2,
56 };
57 
58 enum {
59 	MLX5_MTPPS_FS_ENABLE			= BIT(0x0),
60 	MLX5_MTPPS_FS_PATTERN			= BIT(0x2),
61 	MLX5_MTPPS_FS_PIN_MODE			= BIT(0x3),
62 	MLX5_MTPPS_FS_TIME_STAMP		= BIT(0x4),
63 	MLX5_MTPPS_FS_OUT_PULSE_DURATION	= BIT(0x5),
64 	MLX5_MTPPS_FS_ENH_OUT_PER_ADJ		= BIT(0x7),
65 	MLX5_MTPPS_FS_NPPS_PERIOD               = BIT(0x9),
66 	MLX5_MTPPS_FS_OUT_PULSE_DURATION_NS     = BIT(0xa),
67 };
68 
69 enum {
70 	MLX5_MTUTC_OPERATION_ADJUST_TIME_MIN          = S16_MIN,
71 	MLX5_MTUTC_OPERATION_ADJUST_TIME_MAX          = S16_MAX,
72 	MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MIN = -200000,
73 	MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MAX = 200000,
74 };
75 
mlx5_real_time_mode(struct mlx5_core_dev * mdev)76 static bool mlx5_real_time_mode(struct mlx5_core_dev *mdev)
77 {
78 	return (mlx5_is_real_time_rq(mdev) || mlx5_is_real_time_sq(mdev));
79 }
80 
mlx5_npps_real_time_supported(struct mlx5_core_dev * mdev)81 static bool mlx5_npps_real_time_supported(struct mlx5_core_dev *mdev)
82 {
83 	return (mlx5_real_time_mode(mdev) &&
84 		MLX5_CAP_MCAM_FEATURE(mdev, npps_period) &&
85 		MLX5_CAP_MCAM_FEATURE(mdev, out_pulse_duration_ns));
86 }
87 
mlx5_modify_mtutc_allowed(struct mlx5_core_dev * mdev)88 static bool mlx5_modify_mtutc_allowed(struct mlx5_core_dev *mdev)
89 {
90 	return MLX5_CAP_MCAM_FEATURE(mdev, ptpcyc2realtime_modify);
91 }
92 
mlx5_ptp_shift_constant(u32 dev_freq_khz)93 static u32 mlx5_ptp_shift_constant(u32 dev_freq_khz)
94 {
95 	/* Optimal shift constant leads to corrections above just 1 scaled ppm.
96 	 *
97 	 * Two sets of equations are needed to derive the optimal shift
98 	 * constant for the cyclecounter.
99 	 *
100 	 *    dev_freq_khz * 1000 / 2^shift_constant = 1 scaled_ppm
101 	 *    ppb = scaled_ppm * 1000 / 2^16
102 	 *
103 	 * Using the two equations together
104 	 *
105 	 *    dev_freq_khz * 1000 / 1 scaled_ppm = 2^shift_constant
106 	 *    dev_freq_khz * 2^16 / 1 ppb = 2^shift_constant
107 	 *    dev_freq_khz = 2^(shift_constant - 16)
108 	 *
109 	 * then yields
110 	 *
111 	 *    shift_constant = ilog2(dev_freq_khz) + 16
112 	 */
113 
114 	return min(ilog2(dev_freq_khz) + 16,
115 		   ilog2((U32_MAX / NSEC_PER_MSEC) * dev_freq_khz));
116 }
117 
mlx5_ptp_getmaxphase(struct ptp_clock_info * ptp)118 static s32 mlx5_ptp_getmaxphase(struct ptp_clock_info *ptp)
119 {
120 	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
121 	struct mlx5_core_dev *mdev;
122 
123 	mdev = container_of(clock, struct mlx5_core_dev, clock);
124 
125 	return MLX5_CAP_MCAM_FEATURE(mdev, mtutc_time_adjustment_extended_range) ?
126 		       MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MAX :
127 			     MLX5_MTUTC_OPERATION_ADJUST_TIME_MAX;
128 }
129 
mlx5_is_mtutc_time_adj_cap(struct mlx5_core_dev * mdev,s64 delta)130 static bool mlx5_is_mtutc_time_adj_cap(struct mlx5_core_dev *mdev, s64 delta)
131 {
132 	s64 max = mlx5_ptp_getmaxphase(&mdev->clock.ptp_info);
133 
134 	if (delta < -max || delta > max)
135 		return false;
136 
137 	return true;
138 }
139 
mlx5_set_mtutc(struct mlx5_core_dev * dev,u32 * mtutc,u32 size)140 static int mlx5_set_mtutc(struct mlx5_core_dev *dev, u32 *mtutc, u32 size)
141 {
142 	u32 out[MLX5_ST_SZ_DW(mtutc_reg)] = {};
143 
144 	if (!MLX5_CAP_MCAM_REG(dev, mtutc))
145 		return -EOPNOTSUPP;
146 
147 	return mlx5_core_access_reg(dev, mtutc, size, out, sizeof(out),
148 				    MLX5_REG_MTUTC, 0, 1);
149 }
150 
mlx5_read_time(struct mlx5_core_dev * dev,struct ptp_system_timestamp * sts,bool real_time)151 static u64 mlx5_read_time(struct mlx5_core_dev *dev,
152 			  struct ptp_system_timestamp *sts,
153 			  bool real_time)
154 {
155 	u32 timer_h, timer_h1, timer_l;
156 
157 	timer_h = ioread32be(real_time ? &dev->iseg->real_time_h :
158 			     &dev->iseg->internal_timer_h);
159 	ptp_read_system_prets(sts);
160 	timer_l = ioread32be(real_time ? &dev->iseg->real_time_l :
161 			     &dev->iseg->internal_timer_l);
162 	ptp_read_system_postts(sts);
163 	timer_h1 = ioread32be(real_time ? &dev->iseg->real_time_h :
164 			      &dev->iseg->internal_timer_h);
165 	if (timer_h != timer_h1) {
166 		/* wrap around */
167 		ptp_read_system_prets(sts);
168 		timer_l = ioread32be(real_time ? &dev->iseg->real_time_l :
169 				     &dev->iseg->internal_timer_l);
170 		ptp_read_system_postts(sts);
171 	}
172 
173 	return real_time ? REAL_TIME_TO_NS(timer_h1, timer_l) :
174 			   (u64)timer_l | (u64)timer_h1 << 32;
175 }
176 
read_internal_timer(const struct cyclecounter * cc)177 static u64 read_internal_timer(const struct cyclecounter *cc)
178 {
179 	struct mlx5_timer *timer = container_of(cc, struct mlx5_timer, cycles);
180 	struct mlx5_clock *clock = container_of(timer, struct mlx5_clock, timer);
181 	struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
182 						  clock);
183 
184 	return mlx5_read_time(mdev, NULL, false) & cc->mask;
185 }
186 
mlx5_update_clock_info_page(struct mlx5_core_dev * mdev)187 static void mlx5_update_clock_info_page(struct mlx5_core_dev *mdev)
188 {
189 	struct mlx5_ib_clock_info *clock_info = mdev->clock_info;
190 	struct mlx5_clock *clock = &mdev->clock;
191 	struct mlx5_timer *timer;
192 	u32 sign;
193 
194 	if (!clock_info)
195 		return;
196 
197 	sign = smp_load_acquire(&clock_info->sign);
198 	smp_store_mb(clock_info->sign,
199 		     sign | MLX5_IB_CLOCK_INFO_KERNEL_UPDATING);
200 
201 	timer = &clock->timer;
202 	clock_info->cycles = timer->tc.cycle_last;
203 	clock_info->mult   = timer->cycles.mult;
204 	clock_info->nsec   = timer->tc.nsec;
205 	clock_info->frac   = timer->tc.frac;
206 
207 	smp_store_release(&clock_info->sign,
208 			  sign + MLX5_IB_CLOCK_INFO_KERNEL_UPDATING * 2);
209 }
210 
mlx5_pps_out(struct work_struct * work)211 static void mlx5_pps_out(struct work_struct *work)
212 {
213 	struct mlx5_pps *pps_info = container_of(work, struct mlx5_pps,
214 						 out_work);
215 	struct mlx5_clock *clock = container_of(pps_info, struct mlx5_clock,
216 						pps_info);
217 	struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
218 						  clock);
219 	u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
220 	unsigned long flags;
221 	int i;
222 
223 	for (i = 0; i < clock->ptp_info.n_pins; i++) {
224 		u64 tstart;
225 
226 		write_seqlock_irqsave(&clock->lock, flags);
227 		tstart = clock->pps_info.start[i];
228 		clock->pps_info.start[i] = 0;
229 		write_sequnlock_irqrestore(&clock->lock, flags);
230 		if (!tstart)
231 			continue;
232 
233 		MLX5_SET(mtpps_reg, in, pin, i);
234 		MLX5_SET64(mtpps_reg, in, time_stamp, tstart);
235 		MLX5_SET(mtpps_reg, in, field_select, MLX5_MTPPS_FS_TIME_STAMP);
236 		mlx5_set_mtpps(mdev, in, sizeof(in));
237 	}
238 }
239 
mlx5_timestamp_overflow(struct work_struct * work)240 static void mlx5_timestamp_overflow(struct work_struct *work)
241 {
242 	struct delayed_work *dwork = to_delayed_work(work);
243 	struct mlx5_core_dev *mdev;
244 	struct mlx5_timer *timer;
245 	struct mlx5_clock *clock;
246 	unsigned long flags;
247 
248 	timer = container_of(dwork, struct mlx5_timer, overflow_work);
249 	clock = container_of(timer, struct mlx5_clock, timer);
250 	mdev = container_of(clock, struct mlx5_core_dev, clock);
251 
252 	if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
253 		goto out;
254 
255 	write_seqlock_irqsave(&clock->lock, flags);
256 	timecounter_read(&timer->tc);
257 	mlx5_update_clock_info_page(mdev);
258 	write_sequnlock_irqrestore(&clock->lock, flags);
259 
260 out:
261 	schedule_delayed_work(&timer->overflow_work, timer->overflow_period);
262 }
263 
mlx5_ptp_settime_real_time(struct mlx5_core_dev * mdev,const struct timespec64 * ts)264 static int mlx5_ptp_settime_real_time(struct mlx5_core_dev *mdev,
265 				      const struct timespec64 *ts)
266 {
267 	u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
268 
269 	if (!mlx5_modify_mtutc_allowed(mdev))
270 		return 0;
271 
272 	if (ts->tv_sec < 0 || ts->tv_sec > U32_MAX ||
273 	    ts->tv_nsec < 0 || ts->tv_nsec > NSEC_PER_SEC)
274 		return -EINVAL;
275 
276 	MLX5_SET(mtutc_reg, in, operation, MLX5_MTUTC_OPERATION_SET_TIME_IMMEDIATE);
277 	MLX5_SET(mtutc_reg, in, utc_sec, ts->tv_sec);
278 	MLX5_SET(mtutc_reg, in, utc_nsec, ts->tv_nsec);
279 
280 	return mlx5_set_mtutc(mdev, in, sizeof(in));
281 }
282 
mlx5_ptp_settime(struct ptp_clock_info * ptp,const struct timespec64 * ts)283 static int mlx5_ptp_settime(struct ptp_clock_info *ptp, const struct timespec64 *ts)
284 {
285 	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
286 	struct mlx5_timer *timer = &clock->timer;
287 	struct mlx5_core_dev *mdev;
288 	unsigned long flags;
289 	int err;
290 
291 	mdev = container_of(clock, struct mlx5_core_dev, clock);
292 	err = mlx5_ptp_settime_real_time(mdev, ts);
293 	if (err)
294 		return err;
295 
296 	write_seqlock_irqsave(&clock->lock, flags);
297 	timecounter_init(&timer->tc, &timer->cycles, timespec64_to_ns(ts));
298 	mlx5_update_clock_info_page(mdev);
299 	write_sequnlock_irqrestore(&clock->lock, flags);
300 
301 	return 0;
302 }
303 
304 static
mlx5_ptp_gettimex_real_time(struct mlx5_core_dev * mdev,struct ptp_system_timestamp * sts)305 struct timespec64 mlx5_ptp_gettimex_real_time(struct mlx5_core_dev *mdev,
306 					      struct ptp_system_timestamp *sts)
307 {
308 	struct timespec64 ts;
309 	u64 time;
310 
311 	time = mlx5_read_time(mdev, sts, true);
312 	ts = ns_to_timespec64(time);
313 	return ts;
314 }
315 
mlx5_ptp_gettimex(struct ptp_clock_info * ptp,struct timespec64 * ts,struct ptp_system_timestamp * sts)316 static int mlx5_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
317 			     struct ptp_system_timestamp *sts)
318 {
319 	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
320 	struct mlx5_timer *timer = &clock->timer;
321 	struct mlx5_core_dev *mdev;
322 	unsigned long flags;
323 	u64 cycles, ns;
324 
325 	mdev = container_of(clock, struct mlx5_core_dev, clock);
326 	if (mlx5_real_time_mode(mdev)) {
327 		*ts = mlx5_ptp_gettimex_real_time(mdev, sts);
328 		goto out;
329 	}
330 
331 	write_seqlock_irqsave(&clock->lock, flags);
332 	cycles = mlx5_read_time(mdev, sts, false);
333 	ns = timecounter_cyc2time(&timer->tc, cycles);
334 	write_sequnlock_irqrestore(&clock->lock, flags);
335 	*ts = ns_to_timespec64(ns);
336 out:
337 	return 0;
338 }
339 
mlx5_ptp_adjtime_real_time(struct mlx5_core_dev * mdev,s64 delta)340 static int mlx5_ptp_adjtime_real_time(struct mlx5_core_dev *mdev, s64 delta)
341 {
342 	u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
343 
344 	if (!mlx5_modify_mtutc_allowed(mdev))
345 		return 0;
346 
347 	/* HW time adjustment range is checked. If out of range, settime instead */
348 	if (!mlx5_is_mtutc_time_adj_cap(mdev, delta)) {
349 		struct timespec64 ts;
350 		s64 ns;
351 
352 		ts = mlx5_ptp_gettimex_real_time(mdev, NULL);
353 		ns = timespec64_to_ns(&ts) + delta;
354 		ts = ns_to_timespec64(ns);
355 		return mlx5_ptp_settime_real_time(mdev, &ts);
356 	}
357 
358 	MLX5_SET(mtutc_reg, in, operation, MLX5_MTUTC_OPERATION_ADJUST_TIME);
359 	MLX5_SET(mtutc_reg, in, time_adjustment, delta);
360 
361 	return mlx5_set_mtutc(mdev, in, sizeof(in));
362 }
363 
mlx5_ptp_adjtime(struct ptp_clock_info * ptp,s64 delta)364 static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
365 {
366 	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
367 	struct mlx5_timer *timer = &clock->timer;
368 	struct mlx5_core_dev *mdev;
369 	unsigned long flags;
370 	int err;
371 
372 	mdev = container_of(clock, struct mlx5_core_dev, clock);
373 
374 	err = mlx5_ptp_adjtime_real_time(mdev, delta);
375 	if (err)
376 		return err;
377 	write_seqlock_irqsave(&clock->lock, flags);
378 	timecounter_adjtime(&timer->tc, delta);
379 	mlx5_update_clock_info_page(mdev);
380 	write_sequnlock_irqrestore(&clock->lock, flags);
381 
382 	return 0;
383 }
384 
mlx5_ptp_adjphase(struct ptp_clock_info * ptp,s32 delta)385 static int mlx5_ptp_adjphase(struct ptp_clock_info *ptp, s32 delta)
386 {
387 	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
388 	struct mlx5_core_dev *mdev;
389 
390 	mdev = container_of(clock, struct mlx5_core_dev, clock);
391 
392 	return mlx5_ptp_adjtime_real_time(mdev, delta);
393 }
394 
mlx5_ptp_freq_adj_real_time(struct mlx5_core_dev * mdev,long scaled_ppm)395 static int mlx5_ptp_freq_adj_real_time(struct mlx5_core_dev *mdev, long scaled_ppm)
396 {
397 	u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
398 
399 	if (!mlx5_modify_mtutc_allowed(mdev))
400 		return 0;
401 
402 	MLX5_SET(mtutc_reg, in, operation, MLX5_MTUTC_OPERATION_ADJUST_FREQ_UTC);
403 
404 	if (MLX5_CAP_MCAM_FEATURE(mdev, mtutc_freq_adj_units)) {
405 		MLX5_SET(mtutc_reg, in, freq_adj_units,
406 			 MLX5_MTUTC_FREQ_ADJ_UNITS_SCALED_PPM);
407 		MLX5_SET(mtutc_reg, in, freq_adjustment, scaled_ppm);
408 	} else {
409 		MLX5_SET(mtutc_reg, in, freq_adj_units, MLX5_MTUTC_FREQ_ADJ_UNITS_PPB);
410 		MLX5_SET(mtutc_reg, in, freq_adjustment, scaled_ppm_to_ppb(scaled_ppm));
411 	}
412 
413 	return mlx5_set_mtutc(mdev, in, sizeof(in));
414 }
415 
mlx5_ptp_adjfine(struct ptp_clock_info * ptp,long scaled_ppm)416 static int mlx5_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
417 {
418 	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
419 	struct mlx5_timer *timer = &clock->timer;
420 	struct mlx5_core_dev *mdev;
421 	unsigned long flags;
422 	u32 mult;
423 	int err;
424 
425 	mdev = container_of(clock, struct mlx5_core_dev, clock);
426 
427 	err = mlx5_ptp_freq_adj_real_time(mdev, scaled_ppm);
428 	if (err)
429 		return err;
430 
431 	mult = (u32)adjust_by_scaled_ppm(timer->nominal_c_mult, scaled_ppm);
432 
433 	write_seqlock_irqsave(&clock->lock, flags);
434 	timecounter_read(&timer->tc);
435 	timer->cycles.mult = mult;
436 	mlx5_update_clock_info_page(mdev);
437 	write_sequnlock_irqrestore(&clock->lock, flags);
438 
439 	return 0;
440 }
441 
mlx5_extts_configure(struct ptp_clock_info * ptp,struct ptp_clock_request * rq,int on)442 static int mlx5_extts_configure(struct ptp_clock_info *ptp,
443 				struct ptp_clock_request *rq,
444 				int on)
445 {
446 	struct mlx5_clock *clock =
447 			container_of(ptp, struct mlx5_clock, ptp_info);
448 	struct mlx5_core_dev *mdev =
449 			container_of(clock, struct mlx5_core_dev, clock);
450 	u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
451 	u32 field_select = 0;
452 	u8 pin_mode = 0;
453 	u8 pattern = 0;
454 	int pin = -1;
455 	int err = 0;
456 
457 	if (!MLX5_PPS_CAP(mdev))
458 		return -EOPNOTSUPP;
459 
460 	/* Reject requests with unsupported flags */
461 	if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
462 				PTP_RISING_EDGE |
463 				PTP_FALLING_EDGE |
464 				PTP_STRICT_FLAGS))
465 		return -EOPNOTSUPP;
466 
467 	/* Reject requests to enable time stamping on both edges. */
468 	if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
469 	    (rq->extts.flags & PTP_ENABLE_FEATURE) &&
470 	    (rq->extts.flags & PTP_EXTTS_EDGES) == PTP_EXTTS_EDGES)
471 		return -EOPNOTSUPP;
472 
473 	if (rq->extts.index >= clock->ptp_info.n_pins)
474 		return -EINVAL;
475 
476 	pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index);
477 	if (pin < 0)
478 		return -EBUSY;
479 
480 	if (on) {
481 		pin_mode = MLX5_PIN_MODE_IN;
482 		pattern = !!(rq->extts.flags & PTP_FALLING_EDGE);
483 		field_select = MLX5_MTPPS_FS_PIN_MODE |
484 			       MLX5_MTPPS_FS_PATTERN |
485 			       MLX5_MTPPS_FS_ENABLE;
486 	} else {
487 		field_select = MLX5_MTPPS_FS_ENABLE;
488 	}
489 
490 	MLX5_SET(mtpps_reg, in, pin, pin);
491 	MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
492 	MLX5_SET(mtpps_reg, in, pattern, pattern);
493 	MLX5_SET(mtpps_reg, in, enable, on);
494 	MLX5_SET(mtpps_reg, in, field_select, field_select);
495 
496 	err = mlx5_set_mtpps(mdev, in, sizeof(in));
497 	if (err)
498 		return err;
499 
500 	return mlx5_set_mtppse(mdev, pin, 0,
501 			       MLX5_EVENT_MODE_REPETETIVE & on);
502 }
503 
find_target_cycles(struct mlx5_core_dev * mdev,s64 target_ns)504 static u64 find_target_cycles(struct mlx5_core_dev *mdev, s64 target_ns)
505 {
506 	struct mlx5_clock *clock = &mdev->clock;
507 	u64 cycles_now, cycles_delta;
508 	u64 nsec_now, nsec_delta;
509 	struct mlx5_timer *timer;
510 	unsigned long flags;
511 
512 	timer = &clock->timer;
513 
514 	cycles_now = mlx5_read_time(mdev, NULL, false);
515 	write_seqlock_irqsave(&clock->lock, flags);
516 	nsec_now = timecounter_cyc2time(&timer->tc, cycles_now);
517 	nsec_delta = target_ns - nsec_now;
518 	cycles_delta = div64_u64(nsec_delta << timer->cycles.shift,
519 				 timer->cycles.mult);
520 	write_sequnlock_irqrestore(&clock->lock, flags);
521 
522 	return cycles_now + cycles_delta;
523 }
524 
perout_conf_internal_timer(struct mlx5_core_dev * mdev,s64 sec)525 static u64 perout_conf_internal_timer(struct mlx5_core_dev *mdev, s64 sec)
526 {
527 	struct timespec64 ts = {};
528 	s64 target_ns;
529 
530 	ts.tv_sec = sec;
531 	target_ns = timespec64_to_ns(&ts);
532 
533 	return find_target_cycles(mdev, target_ns);
534 }
535 
perout_conf_real_time(s64 sec,u32 nsec)536 static u64 perout_conf_real_time(s64 sec, u32 nsec)
537 {
538 	return (u64)nsec | (u64)sec << 32;
539 }
540 
perout_conf_1pps(struct mlx5_core_dev * mdev,struct ptp_clock_request * rq,u64 * time_stamp,bool real_time)541 static int perout_conf_1pps(struct mlx5_core_dev *mdev, struct ptp_clock_request *rq,
542 			    u64 *time_stamp, bool real_time)
543 {
544 	struct timespec64 ts;
545 	s64 ns;
546 
547 	ts.tv_nsec = rq->perout.period.nsec;
548 	ts.tv_sec = rq->perout.period.sec;
549 	ns = timespec64_to_ns(&ts);
550 
551 	if ((ns >> 1) != 500000000LL)
552 		return -EINVAL;
553 
554 	*time_stamp = real_time ? perout_conf_real_time(rq->perout.start.sec, 0) :
555 		      perout_conf_internal_timer(mdev, rq->perout.start.sec);
556 
557 	return 0;
558 }
559 
560 #define MLX5_MAX_PULSE_DURATION (BIT(__mlx5_bit_sz(mtpps_reg, out_pulse_duration_ns)) - 1)
mlx5_perout_conf_out_pulse_duration(struct mlx5_core_dev * mdev,struct ptp_clock_request * rq,u32 * out_pulse_duration_ns)561 static int mlx5_perout_conf_out_pulse_duration(struct mlx5_core_dev *mdev,
562 					       struct ptp_clock_request *rq,
563 					       u32 *out_pulse_duration_ns)
564 {
565 	struct mlx5_pps *pps_info = &mdev->clock.pps_info;
566 	u32 out_pulse_duration;
567 	struct timespec64 ts;
568 
569 	if (rq->perout.flags & PTP_PEROUT_DUTY_CYCLE) {
570 		ts.tv_sec = rq->perout.on.sec;
571 		ts.tv_nsec = rq->perout.on.nsec;
572 		out_pulse_duration = (u32)timespec64_to_ns(&ts);
573 	} else {
574 		/* out_pulse_duration_ns should be up to 50% of the
575 		 * pulse period as default
576 		 */
577 		ts.tv_sec = rq->perout.period.sec;
578 		ts.tv_nsec = rq->perout.period.nsec;
579 		out_pulse_duration = (u32)timespec64_to_ns(&ts) >> 1;
580 	}
581 
582 	if (out_pulse_duration < pps_info->min_out_pulse_duration_ns ||
583 	    out_pulse_duration > MLX5_MAX_PULSE_DURATION) {
584 		mlx5_core_err(mdev, "NPPS pulse duration %u is not in [%llu, %lu]\n",
585 			      out_pulse_duration, pps_info->min_out_pulse_duration_ns,
586 			      MLX5_MAX_PULSE_DURATION);
587 		return -EINVAL;
588 	}
589 	*out_pulse_duration_ns = out_pulse_duration;
590 
591 	return 0;
592 }
593 
perout_conf_npps_real_time(struct mlx5_core_dev * mdev,struct ptp_clock_request * rq,u32 * field_select,u32 * out_pulse_duration_ns,u64 * period,u64 * time_stamp)594 static int perout_conf_npps_real_time(struct mlx5_core_dev *mdev, struct ptp_clock_request *rq,
595 				      u32 *field_select, u32 *out_pulse_duration_ns,
596 				      u64 *period, u64 *time_stamp)
597 {
598 	struct mlx5_pps *pps_info = &mdev->clock.pps_info;
599 	struct ptp_clock_time *time = &rq->perout.start;
600 	struct timespec64 ts;
601 
602 	ts.tv_sec = rq->perout.period.sec;
603 	ts.tv_nsec = rq->perout.period.nsec;
604 	if (timespec64_to_ns(&ts) < pps_info->min_npps_period) {
605 		mlx5_core_err(mdev, "NPPS period is lower than minimal npps period %llu\n",
606 			      pps_info->min_npps_period);
607 		return -EINVAL;
608 	}
609 	*period = perout_conf_real_time(rq->perout.period.sec, rq->perout.period.nsec);
610 
611 	if (mlx5_perout_conf_out_pulse_duration(mdev, rq, out_pulse_duration_ns))
612 		return -EINVAL;
613 
614 	*time_stamp = perout_conf_real_time(time->sec, time->nsec);
615 	*field_select |= MLX5_MTPPS_FS_NPPS_PERIOD |
616 			 MLX5_MTPPS_FS_OUT_PULSE_DURATION_NS;
617 
618 	return 0;
619 }
620 
mlx5_perout_verify_flags(struct mlx5_core_dev * mdev,unsigned int flags)621 static bool mlx5_perout_verify_flags(struct mlx5_core_dev *mdev, unsigned int flags)
622 {
623 	return ((!mlx5_npps_real_time_supported(mdev) && flags) ||
624 		(mlx5_npps_real_time_supported(mdev) && flags & ~PTP_PEROUT_DUTY_CYCLE));
625 }
626 
mlx5_perout_configure(struct ptp_clock_info * ptp,struct ptp_clock_request * rq,int on)627 static int mlx5_perout_configure(struct ptp_clock_info *ptp,
628 				 struct ptp_clock_request *rq,
629 				 int on)
630 {
631 	struct mlx5_clock *clock =
632 			container_of(ptp, struct mlx5_clock, ptp_info);
633 	struct mlx5_core_dev *mdev =
634 			container_of(clock, struct mlx5_core_dev, clock);
635 	bool rt_mode = mlx5_real_time_mode(mdev);
636 	u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
637 	u32 out_pulse_duration_ns = 0;
638 	u32 field_select = 0;
639 	u64 npps_period = 0;
640 	u64 time_stamp = 0;
641 	u8 pin_mode = 0;
642 	u8 pattern = 0;
643 	int pin = -1;
644 	int err = 0;
645 
646 	if (!MLX5_PPS_CAP(mdev))
647 		return -EOPNOTSUPP;
648 
649 	/* Reject requests with unsupported flags */
650 	if (mlx5_perout_verify_flags(mdev, rq->perout.flags))
651 		return -EOPNOTSUPP;
652 
653 	if (rq->perout.index >= clock->ptp_info.n_pins)
654 		return -EINVAL;
655 
656 	field_select = MLX5_MTPPS_FS_ENABLE;
657 	pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT, rq->perout.index);
658 	if (pin < 0)
659 		return -EBUSY;
660 
661 	if (on) {
662 		bool rt_mode = mlx5_real_time_mode(mdev);
663 
664 		pin_mode = MLX5_PIN_MODE_OUT;
665 		pattern = MLX5_OUT_PATTERN_PERIODIC;
666 
667 		if (rt_mode &&  rq->perout.start.sec > U32_MAX)
668 			return -EINVAL;
669 
670 		field_select |= MLX5_MTPPS_FS_PIN_MODE |
671 				MLX5_MTPPS_FS_PATTERN |
672 				MLX5_MTPPS_FS_TIME_STAMP;
673 
674 		if (mlx5_npps_real_time_supported(mdev))
675 			err = perout_conf_npps_real_time(mdev, rq, &field_select,
676 							 &out_pulse_duration_ns, &npps_period,
677 							 &time_stamp);
678 		else
679 			err = perout_conf_1pps(mdev, rq, &time_stamp, rt_mode);
680 		if (err)
681 			return err;
682 	}
683 
684 	MLX5_SET(mtpps_reg, in, pin, pin);
685 	MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
686 	MLX5_SET(mtpps_reg, in, pattern, pattern);
687 	MLX5_SET(mtpps_reg, in, enable, on);
688 	MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp);
689 	MLX5_SET(mtpps_reg, in, field_select, field_select);
690 	MLX5_SET64(mtpps_reg, in, npps_period, npps_period);
691 	MLX5_SET(mtpps_reg, in, out_pulse_duration_ns, out_pulse_duration_ns);
692 	err = mlx5_set_mtpps(mdev, in, sizeof(in));
693 	if (err)
694 		return err;
695 
696 	if (rt_mode)
697 		return 0;
698 
699 	return mlx5_set_mtppse(mdev, pin, 0,
700 			       MLX5_EVENT_MODE_REPETETIVE & on);
701 }
702 
mlx5_pps_configure(struct ptp_clock_info * ptp,struct ptp_clock_request * rq,int on)703 static int mlx5_pps_configure(struct ptp_clock_info *ptp,
704 			      struct ptp_clock_request *rq,
705 			      int on)
706 {
707 	struct mlx5_clock *clock =
708 			container_of(ptp, struct mlx5_clock, ptp_info);
709 
710 	clock->pps_info.enabled = !!on;
711 	return 0;
712 }
713 
mlx5_ptp_enable(struct ptp_clock_info * ptp,struct ptp_clock_request * rq,int on)714 static int mlx5_ptp_enable(struct ptp_clock_info *ptp,
715 			   struct ptp_clock_request *rq,
716 			   int on)
717 {
718 	switch (rq->type) {
719 	case PTP_CLK_REQ_EXTTS:
720 		return mlx5_extts_configure(ptp, rq, on);
721 	case PTP_CLK_REQ_PEROUT:
722 		return mlx5_perout_configure(ptp, rq, on);
723 	case PTP_CLK_REQ_PPS:
724 		return mlx5_pps_configure(ptp, rq, on);
725 	default:
726 		return -EOPNOTSUPP;
727 	}
728 	return 0;
729 }
730 
731 enum {
732 	MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN = BIT(0),
733 	MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT = BIT(1),
734 };
735 
mlx5_ptp_verify(struct ptp_clock_info * ptp,unsigned int pin,enum ptp_pin_function func,unsigned int chan)736 static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
737 			   enum ptp_pin_function func, unsigned int chan)
738 {
739 	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
740 						ptp_info);
741 
742 	switch (func) {
743 	case PTP_PF_NONE:
744 		return 0;
745 	case PTP_PF_EXTTS:
746 		return !(clock->pps_info.pin_caps[pin] &
747 			 MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN);
748 	case PTP_PF_PEROUT:
749 		return !(clock->pps_info.pin_caps[pin] &
750 			 MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT);
751 	default:
752 		return -EOPNOTSUPP;
753 	}
754 }
755 
756 static const struct ptp_clock_info mlx5_ptp_clock_info = {
757 	.owner		= THIS_MODULE,
758 	.name		= "mlx5_ptp",
759 	.max_adj	= 50000000,
760 	.n_alarm	= 0,
761 	.n_ext_ts	= 0,
762 	.n_per_out	= 0,
763 	.n_pins		= 0,
764 	.pps		= 0,
765 	.adjfine	= mlx5_ptp_adjfine,
766 	.adjphase	= mlx5_ptp_adjphase,
767 	.getmaxphase    = mlx5_ptp_getmaxphase,
768 	.adjtime	= mlx5_ptp_adjtime,
769 	.gettimex64	= mlx5_ptp_gettimex,
770 	.settime64	= mlx5_ptp_settime,
771 	.enable		= NULL,
772 	.verify		= NULL,
773 };
774 
mlx5_query_mtpps_pin_mode(struct mlx5_core_dev * mdev,u8 pin,u32 * mtpps,u32 mtpps_size)775 static int mlx5_query_mtpps_pin_mode(struct mlx5_core_dev *mdev, u8 pin,
776 				     u32 *mtpps, u32 mtpps_size)
777 {
778 	u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {};
779 
780 	MLX5_SET(mtpps_reg, in, pin, pin);
781 
782 	return mlx5_core_access_reg(mdev, in, sizeof(in), mtpps,
783 				    mtpps_size, MLX5_REG_MTPPS, 0, 0);
784 }
785 
mlx5_get_pps_pin_mode(struct mlx5_clock * clock,u8 pin)786 static int mlx5_get_pps_pin_mode(struct mlx5_clock *clock, u8 pin)
787 {
788 	struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, clock);
789 
790 	u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {};
791 	u8 mode;
792 	int err;
793 
794 	err = mlx5_query_mtpps_pin_mode(mdev, pin, out, sizeof(out));
795 	if (err || !MLX5_GET(mtpps_reg, out, enable))
796 		return PTP_PF_NONE;
797 
798 	mode = MLX5_GET(mtpps_reg, out, pin_mode);
799 
800 	if (mode == MLX5_PIN_MODE_IN)
801 		return PTP_PF_EXTTS;
802 	else if (mode == MLX5_PIN_MODE_OUT)
803 		return PTP_PF_PEROUT;
804 
805 	return PTP_PF_NONE;
806 }
807 
mlx5_init_pin_config(struct mlx5_clock * clock)808 static void mlx5_init_pin_config(struct mlx5_clock *clock)
809 {
810 	int i;
811 
812 	if (!clock->ptp_info.n_pins)
813 		return;
814 
815 	clock->ptp_info.pin_config =
816 			kcalloc(clock->ptp_info.n_pins,
817 				sizeof(*clock->ptp_info.pin_config),
818 				GFP_KERNEL);
819 	if (!clock->ptp_info.pin_config)
820 		return;
821 	clock->ptp_info.enable = mlx5_ptp_enable;
822 	clock->ptp_info.verify = mlx5_ptp_verify;
823 	clock->ptp_info.pps = 1;
824 
825 	for (i = 0; i < clock->ptp_info.n_pins; i++) {
826 		snprintf(clock->ptp_info.pin_config[i].name,
827 			 sizeof(clock->ptp_info.pin_config[i].name),
828 			 "mlx5_pps%d", i);
829 		clock->ptp_info.pin_config[i].index = i;
830 		clock->ptp_info.pin_config[i].func = mlx5_get_pps_pin_mode(clock, i);
831 		clock->ptp_info.pin_config[i].chan = 0;
832 	}
833 }
834 
mlx5_get_pps_caps(struct mlx5_core_dev * mdev)835 static void mlx5_get_pps_caps(struct mlx5_core_dev *mdev)
836 {
837 	struct mlx5_clock *clock = &mdev->clock;
838 	u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
839 
840 	mlx5_query_mtpps(mdev, out, sizeof(out));
841 
842 	clock->ptp_info.n_pins = MLX5_GET(mtpps_reg, out,
843 					  cap_number_of_pps_pins);
844 	clock->ptp_info.n_ext_ts = MLX5_GET(mtpps_reg, out,
845 					    cap_max_num_of_pps_in_pins);
846 	clock->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out,
847 					     cap_max_num_of_pps_out_pins);
848 
849 	if (MLX5_CAP_MCAM_FEATURE(mdev, npps_period))
850 		clock->pps_info.min_npps_period = 1 << MLX5_GET(mtpps_reg, out,
851 								cap_log_min_npps_period);
852 	if (MLX5_CAP_MCAM_FEATURE(mdev, out_pulse_duration_ns))
853 		clock->pps_info.min_out_pulse_duration_ns = 1 << MLX5_GET(mtpps_reg, out,
854 								cap_log_min_out_pulse_duration_ns);
855 
856 	clock->pps_info.pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode);
857 	clock->pps_info.pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode);
858 	clock->pps_info.pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode);
859 	clock->pps_info.pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode);
860 	clock->pps_info.pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode);
861 	clock->pps_info.pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode);
862 	clock->pps_info.pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode);
863 	clock->pps_info.pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode);
864 }
865 
ts_next_sec(struct timespec64 * ts)866 static void ts_next_sec(struct timespec64 *ts)
867 {
868 	ts->tv_sec += 1;
869 	ts->tv_nsec = 0;
870 }
871 
perout_conf_next_event_timer(struct mlx5_core_dev * mdev,struct mlx5_clock * clock)872 static u64 perout_conf_next_event_timer(struct mlx5_core_dev *mdev,
873 					struct mlx5_clock *clock)
874 {
875 	struct timespec64 ts;
876 	s64 target_ns;
877 
878 	mlx5_ptp_gettimex(&clock->ptp_info, &ts, NULL);
879 	ts_next_sec(&ts);
880 	target_ns = timespec64_to_ns(&ts);
881 
882 	return find_target_cycles(mdev, target_ns);
883 }
884 
mlx5_pps_event(struct notifier_block * nb,unsigned long type,void * data)885 static int mlx5_pps_event(struct notifier_block *nb,
886 			  unsigned long type, void *data)
887 {
888 	struct mlx5_clock *clock = mlx5_nb_cof(nb, struct mlx5_clock, pps_nb);
889 	struct ptp_clock_event ptp_event;
890 	struct mlx5_eqe *eqe = data;
891 	int pin = eqe->data.pps.pin;
892 	struct mlx5_core_dev *mdev;
893 	unsigned long flags;
894 	u64 ns;
895 
896 	mdev = container_of(clock, struct mlx5_core_dev, clock);
897 
898 	switch (clock->ptp_info.pin_config[pin].func) {
899 	case PTP_PF_EXTTS:
900 		ptp_event.index = pin;
901 		ptp_event.timestamp = mlx5_real_time_mode(mdev) ?
902 			mlx5_real_time_cyc2time(clock,
903 						be64_to_cpu(eqe->data.pps.time_stamp)) :
904 			mlx5_timecounter_cyc2time(clock,
905 						  be64_to_cpu(eqe->data.pps.time_stamp));
906 		if (clock->pps_info.enabled) {
907 			ptp_event.type = PTP_CLOCK_PPSUSR;
908 			ptp_event.pps_times.ts_real =
909 					ns_to_timespec64(ptp_event.timestamp);
910 		} else {
911 			ptp_event.type = PTP_CLOCK_EXTTS;
912 		}
913 		/* TODOL clock->ptp can be NULL if ptp_clock_register fails */
914 		ptp_clock_event(clock->ptp, &ptp_event);
915 		break;
916 	case PTP_PF_PEROUT:
917 		ns = perout_conf_next_event_timer(mdev, clock);
918 		write_seqlock_irqsave(&clock->lock, flags);
919 		clock->pps_info.start[pin] = ns;
920 		write_sequnlock_irqrestore(&clock->lock, flags);
921 		schedule_work(&clock->pps_info.out_work);
922 		break;
923 	default:
924 		mlx5_core_err(mdev, " Unhandled clock PPS event, func %d\n",
925 			      clock->ptp_info.pin_config[pin].func);
926 	}
927 
928 	return NOTIFY_OK;
929 }
930 
mlx5_timecounter_init(struct mlx5_core_dev * mdev)931 static void mlx5_timecounter_init(struct mlx5_core_dev *mdev)
932 {
933 	struct mlx5_clock *clock = &mdev->clock;
934 	struct mlx5_timer *timer = &clock->timer;
935 	u32 dev_freq;
936 
937 	dev_freq = MLX5_CAP_GEN(mdev, device_frequency_khz);
938 	timer->cycles.read = read_internal_timer;
939 	timer->cycles.shift = mlx5_ptp_shift_constant(dev_freq);
940 	timer->cycles.mult = clocksource_khz2mult(dev_freq,
941 						  timer->cycles.shift);
942 	timer->nominal_c_mult = timer->cycles.mult;
943 	timer->cycles.mask = CLOCKSOURCE_MASK(41);
944 
945 	timecounter_init(&timer->tc, &timer->cycles,
946 			 ktime_to_ns(ktime_get_real()));
947 }
948 
mlx5_init_overflow_period(struct mlx5_clock * clock)949 static void mlx5_init_overflow_period(struct mlx5_clock *clock)
950 {
951 	struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, clock);
952 	struct mlx5_ib_clock_info *clock_info = mdev->clock_info;
953 	struct mlx5_timer *timer = &clock->timer;
954 	u64 overflow_cycles;
955 	u64 frac = 0;
956 	u64 ns;
957 
958 	/* Calculate period in seconds to call the overflow watchdog - to make
959 	 * sure counter is checked at least twice every wrap around.
960 	 * The period is calculated as the minimum between max HW cycles count
961 	 * (The clock source mask) and max amount of cycles that can be
962 	 * multiplied by clock multiplier where the result doesn't exceed
963 	 * 64bits.
964 	 */
965 	overflow_cycles = div64_u64(~0ULL >> 1, timer->cycles.mult);
966 	overflow_cycles = min(overflow_cycles, div_u64(timer->cycles.mask, 3));
967 
968 	ns = cyclecounter_cyc2ns(&timer->cycles, overflow_cycles,
969 				 frac, &frac);
970 	do_div(ns, NSEC_PER_SEC / HZ);
971 	timer->overflow_period = ns;
972 
973 	INIT_DELAYED_WORK(&timer->overflow_work, mlx5_timestamp_overflow);
974 	if (timer->overflow_period)
975 		schedule_delayed_work(&timer->overflow_work, 0);
976 	else
977 		mlx5_core_warn(mdev,
978 			       "invalid overflow period, overflow_work is not scheduled\n");
979 
980 	if (clock_info)
981 		clock_info->overflow_period = timer->overflow_period;
982 }
983 
mlx5_init_clock_info(struct mlx5_core_dev * mdev)984 static void mlx5_init_clock_info(struct mlx5_core_dev *mdev)
985 {
986 	struct mlx5_clock *clock = &mdev->clock;
987 	struct mlx5_ib_clock_info *info;
988 	struct mlx5_timer *timer;
989 
990 	mdev->clock_info = (struct mlx5_ib_clock_info *)get_zeroed_page(GFP_KERNEL);
991 	if (!mdev->clock_info) {
992 		mlx5_core_warn(mdev, "Failed to allocate IB clock info page\n");
993 		return;
994 	}
995 
996 	info = mdev->clock_info;
997 	timer = &clock->timer;
998 
999 	info->nsec = timer->tc.nsec;
1000 	info->cycles = timer->tc.cycle_last;
1001 	info->mask = timer->cycles.mask;
1002 	info->mult = timer->nominal_c_mult;
1003 	info->shift = timer->cycles.shift;
1004 	info->frac = timer->tc.frac;
1005 }
1006 
mlx5_init_timer_clock(struct mlx5_core_dev * mdev)1007 static void mlx5_init_timer_clock(struct mlx5_core_dev *mdev)
1008 {
1009 	struct mlx5_clock *clock = &mdev->clock;
1010 
1011 	mlx5_timecounter_init(mdev);
1012 	mlx5_init_clock_info(mdev);
1013 	mlx5_init_overflow_period(clock);
1014 	clock->ptp_info = mlx5_ptp_clock_info;
1015 
1016 	if (mlx5_real_time_mode(mdev)) {
1017 		struct timespec64 ts;
1018 
1019 		ktime_get_real_ts64(&ts);
1020 		mlx5_ptp_settime(&clock->ptp_info, &ts);
1021 	}
1022 }
1023 
mlx5_init_pps(struct mlx5_core_dev * mdev)1024 static void mlx5_init_pps(struct mlx5_core_dev *mdev)
1025 {
1026 	struct mlx5_clock *clock = &mdev->clock;
1027 
1028 	if (!MLX5_PPS_CAP(mdev))
1029 		return;
1030 
1031 	mlx5_get_pps_caps(mdev);
1032 	mlx5_init_pin_config(clock);
1033 }
1034 
mlx5_init_clock(struct mlx5_core_dev * mdev)1035 void mlx5_init_clock(struct mlx5_core_dev *mdev)
1036 {
1037 	struct mlx5_clock *clock = &mdev->clock;
1038 
1039 	if (!MLX5_CAP_GEN(mdev, device_frequency_khz)) {
1040 		mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n");
1041 		return;
1042 	}
1043 
1044 	seqlock_init(&clock->lock);
1045 	mlx5_init_timer_clock(mdev);
1046 	INIT_WORK(&clock->pps_info.out_work, mlx5_pps_out);
1047 
1048 	/* Configure the PHC */
1049 	clock->ptp_info = mlx5_ptp_clock_info;
1050 
1051 	/* Initialize 1PPS data structures */
1052 	mlx5_init_pps(mdev);
1053 
1054 	clock->ptp = ptp_clock_register(&clock->ptp_info,
1055 					&mdev->pdev->dev);
1056 	if (IS_ERR(clock->ptp)) {
1057 		mlx5_core_warn(mdev, "ptp_clock_register failed %ld\n",
1058 			       PTR_ERR(clock->ptp));
1059 		clock->ptp = NULL;
1060 	}
1061 
1062 	MLX5_NB_INIT(&clock->pps_nb, mlx5_pps_event, PPS_EVENT);
1063 	mlx5_eq_notifier_register(mdev, &clock->pps_nb);
1064 }
1065 
mlx5_cleanup_clock(struct mlx5_core_dev * mdev)1066 void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
1067 {
1068 	struct mlx5_clock *clock = &mdev->clock;
1069 
1070 	if (!MLX5_CAP_GEN(mdev, device_frequency_khz))
1071 		return;
1072 
1073 	mlx5_eq_notifier_unregister(mdev, &clock->pps_nb);
1074 	if (clock->ptp) {
1075 		ptp_clock_unregister(clock->ptp);
1076 		clock->ptp = NULL;
1077 	}
1078 
1079 	cancel_work_sync(&clock->pps_info.out_work);
1080 	cancel_delayed_work_sync(&clock->timer.overflow_work);
1081 
1082 	if (mdev->clock_info) {
1083 		free_page((unsigned long)mdev->clock_info);
1084 		mdev->clock_info = NULL;
1085 	}
1086 
1087 	kfree(clock->ptp_info.pin_config);
1088 }
1089