1 /*
2  * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/clocksource.h>
34 #include <linux/highmem.h>
35 #include <linux/ptp_clock_kernel.h>
36 #include <rdma/mlx5-abi.h>
37 #include "lib/eq.h"
38 #include "en.h"
39 #include "clock.h"
40 
41 enum {
42 	MLX5_CYCLES_SHIFT	= 23
43 };
44 
45 enum {
46 	MLX5_PIN_MODE_IN		= 0x0,
47 	MLX5_PIN_MODE_OUT		= 0x1,
48 };
49 
50 enum {
51 	MLX5_OUT_PATTERN_PULSE		= 0x0,
52 	MLX5_OUT_PATTERN_PERIODIC	= 0x1,
53 };
54 
55 enum {
56 	MLX5_EVENT_MODE_DISABLE	= 0x0,
57 	MLX5_EVENT_MODE_REPETETIVE	= 0x1,
58 	MLX5_EVENT_MODE_ONCE_TILL_ARM	= 0x2,
59 };
60 
61 enum {
62 	MLX5_MTPPS_FS_ENABLE			= BIT(0x0),
63 	MLX5_MTPPS_FS_PATTERN			= BIT(0x2),
64 	MLX5_MTPPS_FS_PIN_MODE			= BIT(0x3),
65 	MLX5_MTPPS_FS_TIME_STAMP		= BIT(0x4),
66 	MLX5_MTPPS_FS_OUT_PULSE_DURATION	= BIT(0x5),
67 	MLX5_MTPPS_FS_ENH_OUT_PER_ADJ		= BIT(0x7),
68 };
69 
70 static bool mlx5_real_time_mode(struct mlx5_core_dev *mdev)
71 {
72 	return (mlx5_is_real_time_rq(mdev) || mlx5_is_real_time_sq(mdev));
73 }
74 
75 static bool mlx5_modify_mtutc_allowed(struct mlx5_core_dev *mdev)
76 {
77 	return MLX5_CAP_MCAM_FEATURE(mdev, ptpcyc2realtime_modify);
78 }
79 
80 static int mlx5_set_mtutc(struct mlx5_core_dev *dev, u32 *mtutc, u32 size)
81 {
82 	u32 out[MLX5_ST_SZ_DW(mtutc_reg)] = {};
83 
84 	if (!MLX5_CAP_MCAM_REG(dev, mtutc))
85 		return -EOPNOTSUPP;
86 
87 	return mlx5_core_access_reg(dev, mtutc, size, out, sizeof(out),
88 				    MLX5_REG_MTUTC, 0, 1);
89 }
90 
91 static u64 mlx5_read_time(struct mlx5_core_dev *dev,
92 			  struct ptp_system_timestamp *sts,
93 			  bool real_time)
94 {
95 	u32 timer_h, timer_h1, timer_l;
96 
97 	timer_h = ioread32be(real_time ? &dev->iseg->real_time_h :
98 			     &dev->iseg->internal_timer_h);
99 	ptp_read_system_prets(sts);
100 	timer_l = ioread32be(real_time ? &dev->iseg->real_time_l :
101 			     &dev->iseg->internal_timer_l);
102 	ptp_read_system_postts(sts);
103 	timer_h1 = ioread32be(real_time ? &dev->iseg->real_time_h :
104 			      &dev->iseg->internal_timer_h);
105 	if (timer_h != timer_h1) {
106 		/* wrap around */
107 		ptp_read_system_prets(sts);
108 		timer_l = ioread32be(real_time ? &dev->iseg->real_time_l :
109 				     &dev->iseg->internal_timer_l);
110 		ptp_read_system_postts(sts);
111 	}
112 
113 	return real_time ? REAL_TIME_TO_NS(timer_h1, timer_l) :
114 			   (u64)timer_l | (u64)timer_h1 << 32;
115 }
116 
117 static u64 read_internal_timer(const struct cyclecounter *cc)
118 {
119 	struct mlx5_timer *timer = container_of(cc, struct mlx5_timer, cycles);
120 	struct mlx5_clock *clock = container_of(timer, struct mlx5_clock, timer);
121 	struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
122 						  clock);
123 
124 	return mlx5_read_time(mdev, NULL, false) & cc->mask;
125 }
126 
127 static void mlx5_update_clock_info_page(struct mlx5_core_dev *mdev)
128 {
129 	struct mlx5_ib_clock_info *clock_info = mdev->clock_info;
130 	struct mlx5_clock *clock = &mdev->clock;
131 	struct mlx5_timer *timer;
132 	u32 sign;
133 
134 	if (!clock_info)
135 		return;
136 
137 	sign = smp_load_acquire(&clock_info->sign);
138 	smp_store_mb(clock_info->sign,
139 		     sign | MLX5_IB_CLOCK_INFO_KERNEL_UPDATING);
140 
141 	timer = &clock->timer;
142 	clock_info->cycles = timer->tc.cycle_last;
143 	clock_info->mult   = timer->cycles.mult;
144 	clock_info->nsec   = timer->tc.nsec;
145 	clock_info->frac   = timer->tc.frac;
146 
147 	smp_store_release(&clock_info->sign,
148 			  sign + MLX5_IB_CLOCK_INFO_KERNEL_UPDATING * 2);
149 }
150 
151 static void mlx5_pps_out(struct work_struct *work)
152 {
153 	struct mlx5_pps *pps_info = container_of(work, struct mlx5_pps,
154 						 out_work);
155 	struct mlx5_clock *clock = container_of(pps_info, struct mlx5_clock,
156 						pps_info);
157 	struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
158 						  clock);
159 	u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
160 	unsigned long flags;
161 	int i;
162 
163 	for (i = 0; i < clock->ptp_info.n_pins; i++) {
164 		u64 tstart;
165 
166 		write_seqlock_irqsave(&clock->lock, flags);
167 		tstart = clock->pps_info.start[i];
168 		clock->pps_info.start[i] = 0;
169 		write_sequnlock_irqrestore(&clock->lock, flags);
170 		if (!tstart)
171 			continue;
172 
173 		MLX5_SET(mtpps_reg, in, pin, i);
174 		MLX5_SET64(mtpps_reg, in, time_stamp, tstart);
175 		MLX5_SET(mtpps_reg, in, field_select, MLX5_MTPPS_FS_TIME_STAMP);
176 		mlx5_set_mtpps(mdev, in, sizeof(in));
177 	}
178 }
179 
180 static void mlx5_timestamp_overflow(struct work_struct *work)
181 {
182 	struct delayed_work *dwork = to_delayed_work(work);
183 	struct mlx5_core_dev *mdev;
184 	struct mlx5_timer *timer;
185 	struct mlx5_clock *clock;
186 	unsigned long flags;
187 
188 	timer = container_of(dwork, struct mlx5_timer, overflow_work);
189 	clock = container_of(timer, struct mlx5_clock, timer);
190 	mdev = container_of(clock, struct mlx5_core_dev, clock);
191 
192 	write_seqlock_irqsave(&clock->lock, flags);
193 	timecounter_read(&timer->tc);
194 	mlx5_update_clock_info_page(mdev);
195 	write_sequnlock_irqrestore(&clock->lock, flags);
196 	schedule_delayed_work(&timer->overflow_work, timer->overflow_period);
197 }
198 
199 static int mlx5_ptp_settime_real_time(struct mlx5_core_dev *mdev,
200 				      const struct timespec64 *ts)
201 {
202 	u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
203 
204 	if (!mlx5_modify_mtutc_allowed(mdev))
205 		return 0;
206 
207 	if (ts->tv_sec < 0 || ts->tv_sec > U32_MAX ||
208 	    ts->tv_nsec < 0 || ts->tv_nsec > NSEC_PER_SEC)
209 		return -EINVAL;
210 
211 	MLX5_SET(mtutc_reg, in, operation, MLX5_MTUTC_OPERATION_SET_TIME_IMMEDIATE);
212 	MLX5_SET(mtutc_reg, in, utc_sec, ts->tv_sec);
213 	MLX5_SET(mtutc_reg, in, utc_nsec, ts->tv_nsec);
214 
215 	return mlx5_set_mtutc(mdev, in, sizeof(in));
216 }
217 
218 static int mlx5_ptp_settime(struct ptp_clock_info *ptp, const struct timespec64 *ts)
219 {
220 	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
221 	struct mlx5_timer *timer = &clock->timer;
222 	struct mlx5_core_dev *mdev;
223 	unsigned long flags;
224 	int err;
225 
226 	mdev = container_of(clock, struct mlx5_core_dev, clock);
227 	err = mlx5_ptp_settime_real_time(mdev, ts);
228 	if (err)
229 		return err;
230 
231 	write_seqlock_irqsave(&clock->lock, flags);
232 	timecounter_init(&timer->tc, &timer->cycles, timespec64_to_ns(ts));
233 	mlx5_update_clock_info_page(mdev);
234 	write_sequnlock_irqrestore(&clock->lock, flags);
235 
236 	return 0;
237 }
238 
239 static
240 struct timespec64 mlx5_ptp_gettimex_real_time(struct mlx5_core_dev *mdev,
241 					      struct ptp_system_timestamp *sts)
242 {
243 	struct timespec64 ts;
244 	u64 time;
245 
246 	time = mlx5_read_time(mdev, sts, true);
247 	ts = ns_to_timespec64(time);
248 	return ts;
249 }
250 
251 static int mlx5_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
252 			     struct ptp_system_timestamp *sts)
253 {
254 	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
255 	struct mlx5_timer *timer = &clock->timer;
256 	struct mlx5_core_dev *mdev;
257 	unsigned long flags;
258 	u64 cycles, ns;
259 
260 	mdev = container_of(clock, struct mlx5_core_dev, clock);
261 	if (mlx5_real_time_mode(mdev)) {
262 		*ts = mlx5_ptp_gettimex_real_time(mdev, sts);
263 		goto out;
264 	}
265 
266 	write_seqlock_irqsave(&clock->lock, flags);
267 	cycles = mlx5_read_time(mdev, sts, false);
268 	ns = timecounter_cyc2time(&timer->tc, cycles);
269 	write_sequnlock_irqrestore(&clock->lock, flags);
270 	*ts = ns_to_timespec64(ns);
271 out:
272 	return 0;
273 }
274 
275 static int mlx5_ptp_adjtime_real_time(struct mlx5_core_dev *mdev, s64 delta)
276 {
277 	u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
278 
279 	if (!mlx5_modify_mtutc_allowed(mdev))
280 		return 0;
281 
282 	/* HW time adjustment range is s16. If out of range, settime instead */
283 	if (delta < S16_MIN || delta > S16_MAX) {
284 		struct timespec64 ts;
285 		s64 ns;
286 
287 		ts = mlx5_ptp_gettimex_real_time(mdev, NULL);
288 		ns = timespec64_to_ns(&ts) + delta;
289 		ts = ns_to_timespec64(ns);
290 		return mlx5_ptp_settime_real_time(mdev, &ts);
291 	}
292 
293 	MLX5_SET(mtutc_reg, in, operation, MLX5_MTUTC_OPERATION_ADJUST_TIME);
294 	MLX5_SET(mtutc_reg, in, time_adjustment, delta);
295 
296 	return mlx5_set_mtutc(mdev, in, sizeof(in));
297 }
298 
299 static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
300 {
301 	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
302 	struct mlx5_timer *timer = &clock->timer;
303 	struct mlx5_core_dev *mdev;
304 	unsigned long flags;
305 	int err;
306 
307 	mdev = container_of(clock, struct mlx5_core_dev, clock);
308 
309 	err = mlx5_ptp_adjtime_real_time(mdev, delta);
310 	if (err)
311 		return err;
312 	write_seqlock_irqsave(&clock->lock, flags);
313 	timecounter_adjtime(&timer->tc, delta);
314 	mlx5_update_clock_info_page(mdev);
315 	write_sequnlock_irqrestore(&clock->lock, flags);
316 
317 	return 0;
318 }
319 
320 static int mlx5_ptp_adjfreq_real_time(struct mlx5_core_dev *mdev, s32 freq)
321 {
322 	u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
323 
324 	if (!mlx5_modify_mtutc_allowed(mdev))
325 		return 0;
326 
327 	MLX5_SET(mtutc_reg, in, operation, MLX5_MTUTC_OPERATION_ADJUST_FREQ_UTC);
328 	MLX5_SET(mtutc_reg, in, freq_adjustment, freq);
329 
330 	return mlx5_set_mtutc(mdev, in, sizeof(in));
331 }
332 
333 static int mlx5_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
334 {
335 	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
336 	struct mlx5_timer *timer = &clock->timer;
337 	struct mlx5_core_dev *mdev;
338 	unsigned long flags;
339 	int neg_adj = 0;
340 	u32 diff;
341 	u64 adj;
342 	int err;
343 
344 	mdev = container_of(clock, struct mlx5_core_dev, clock);
345 	err = mlx5_ptp_adjfreq_real_time(mdev, delta);
346 	if (err)
347 		return err;
348 
349 	if (delta < 0) {
350 		neg_adj = 1;
351 		delta = -delta;
352 	}
353 
354 	adj = timer->nominal_c_mult;
355 	adj *= delta;
356 	diff = div_u64(adj, 1000000000ULL);
357 
358 	write_seqlock_irqsave(&clock->lock, flags);
359 	timecounter_read(&timer->tc);
360 	timer->cycles.mult = neg_adj ? timer->nominal_c_mult - diff :
361 				       timer->nominal_c_mult + diff;
362 	mlx5_update_clock_info_page(mdev);
363 	write_sequnlock_irqrestore(&clock->lock, flags);
364 
365 	return 0;
366 }
367 
368 static int mlx5_extts_configure(struct ptp_clock_info *ptp,
369 				struct ptp_clock_request *rq,
370 				int on)
371 {
372 	struct mlx5_clock *clock =
373 			container_of(ptp, struct mlx5_clock, ptp_info);
374 	struct mlx5_core_dev *mdev =
375 			container_of(clock, struct mlx5_core_dev, clock);
376 	u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
377 	u32 field_select = 0;
378 	u8 pin_mode = 0;
379 	u8 pattern = 0;
380 	int pin = -1;
381 	int err = 0;
382 
383 	if (!MLX5_PPS_CAP(mdev))
384 		return -EOPNOTSUPP;
385 
386 	/* Reject requests with unsupported flags */
387 	if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
388 				PTP_RISING_EDGE |
389 				PTP_FALLING_EDGE |
390 				PTP_STRICT_FLAGS))
391 		return -EOPNOTSUPP;
392 
393 	/* Reject requests to enable time stamping on both edges. */
394 	if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
395 	    (rq->extts.flags & PTP_ENABLE_FEATURE) &&
396 	    (rq->extts.flags & PTP_EXTTS_EDGES) == PTP_EXTTS_EDGES)
397 		return -EOPNOTSUPP;
398 
399 	if (rq->extts.index >= clock->ptp_info.n_pins)
400 		return -EINVAL;
401 
402 	pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index);
403 	if (pin < 0)
404 		return -EBUSY;
405 
406 	if (on) {
407 		pin_mode = MLX5_PIN_MODE_IN;
408 		pattern = !!(rq->extts.flags & PTP_FALLING_EDGE);
409 		field_select = MLX5_MTPPS_FS_PIN_MODE |
410 			       MLX5_MTPPS_FS_PATTERN |
411 			       MLX5_MTPPS_FS_ENABLE;
412 	} else {
413 		field_select = MLX5_MTPPS_FS_ENABLE;
414 	}
415 
416 	MLX5_SET(mtpps_reg, in, pin, pin);
417 	MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
418 	MLX5_SET(mtpps_reg, in, pattern, pattern);
419 	MLX5_SET(mtpps_reg, in, enable, on);
420 	MLX5_SET(mtpps_reg, in, field_select, field_select);
421 
422 	err = mlx5_set_mtpps(mdev, in, sizeof(in));
423 	if (err)
424 		return err;
425 
426 	return mlx5_set_mtppse(mdev, pin, 0,
427 			       MLX5_EVENT_MODE_REPETETIVE & on);
428 }
429 
430 static u64 find_target_cycles(struct mlx5_core_dev *mdev, s64 target_ns)
431 {
432 	struct mlx5_clock *clock = &mdev->clock;
433 	u64 cycles_now, cycles_delta;
434 	u64 nsec_now, nsec_delta;
435 	struct mlx5_timer *timer;
436 	unsigned long flags;
437 
438 	timer = &clock->timer;
439 
440 	cycles_now = mlx5_read_time(mdev, NULL, false);
441 	write_seqlock_irqsave(&clock->lock, flags);
442 	nsec_now = timecounter_cyc2time(&timer->tc, cycles_now);
443 	nsec_delta = target_ns - nsec_now;
444 	cycles_delta = div64_u64(nsec_delta << timer->cycles.shift,
445 				 timer->cycles.mult);
446 	write_sequnlock_irqrestore(&clock->lock, flags);
447 
448 	return cycles_now + cycles_delta;
449 }
450 
451 static u64 perout_conf_internal_timer(struct mlx5_core_dev *mdev,
452 				      s64 sec, u32 nsec)
453 {
454 	struct timespec64 ts;
455 	s64 target_ns;
456 
457 	ts.tv_sec = sec;
458 	ts.tv_nsec = nsec;
459 	target_ns = timespec64_to_ns(&ts);
460 
461 	return find_target_cycles(mdev, target_ns);
462 }
463 
464 static u64 perout_conf_real_time(s64 sec, u32 nsec)
465 {
466 	return (u64)nsec | (u64)sec << 32;
467 }
468 
469 static int mlx5_perout_configure(struct ptp_clock_info *ptp,
470 				 struct ptp_clock_request *rq,
471 				 int on)
472 {
473 	struct mlx5_clock *clock =
474 			container_of(ptp, struct mlx5_clock, ptp_info);
475 	struct mlx5_core_dev *mdev =
476 			container_of(clock, struct mlx5_core_dev, clock);
477 	u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
478 	struct timespec64 ts;
479 	u32 field_select = 0;
480 	u64 time_stamp = 0;
481 	u8 pin_mode = 0;
482 	u8 pattern = 0;
483 	int pin = -1;
484 	int err = 0;
485 	s64 ns;
486 
487 	if (!MLX5_PPS_CAP(mdev))
488 		return -EOPNOTSUPP;
489 
490 	/* Reject requests with unsupported flags */
491 	if (rq->perout.flags)
492 		return -EOPNOTSUPP;
493 
494 	if (rq->perout.index >= clock->ptp_info.n_pins)
495 		return -EINVAL;
496 
497 	field_select = MLX5_MTPPS_FS_ENABLE;
498 	if (on) {
499 		bool rt_mode = mlx5_real_time_mode(mdev);
500 		u32 nsec;
501 		s64 sec;
502 
503 		pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT, rq->perout.index);
504 		if (pin < 0)
505 			return -EBUSY;
506 
507 		pin_mode = MLX5_PIN_MODE_OUT;
508 		pattern = MLX5_OUT_PATTERN_PERIODIC;
509 		ts.tv_sec = rq->perout.period.sec;
510 		ts.tv_nsec = rq->perout.period.nsec;
511 		ns = timespec64_to_ns(&ts);
512 
513 		if ((ns >> 1) != 500000000LL)
514 			return -EINVAL;
515 
516 		nsec = rq->perout.start.nsec;
517 		sec = rq->perout.start.sec;
518 
519 		if (rt_mode && sec > U32_MAX)
520 			return -EINVAL;
521 
522 		time_stamp = rt_mode ? perout_conf_real_time(sec, nsec) :
523 				       perout_conf_internal_timer(mdev, sec, nsec);
524 
525 		field_select |= MLX5_MTPPS_FS_PIN_MODE |
526 				MLX5_MTPPS_FS_PATTERN |
527 				MLX5_MTPPS_FS_TIME_STAMP;
528 	}
529 
530 	MLX5_SET(mtpps_reg, in, pin, pin);
531 	MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
532 	MLX5_SET(mtpps_reg, in, pattern, pattern);
533 	MLX5_SET(mtpps_reg, in, enable, on);
534 	MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp);
535 	MLX5_SET(mtpps_reg, in, field_select, field_select);
536 
537 	err = mlx5_set_mtpps(mdev, in, sizeof(in));
538 	if (err)
539 		return err;
540 
541 	return mlx5_set_mtppse(mdev, pin, 0,
542 			       MLX5_EVENT_MODE_REPETETIVE & on);
543 }
544 
545 static int mlx5_pps_configure(struct ptp_clock_info *ptp,
546 			      struct ptp_clock_request *rq,
547 			      int on)
548 {
549 	struct mlx5_clock *clock =
550 			container_of(ptp, struct mlx5_clock, ptp_info);
551 
552 	clock->pps_info.enabled = !!on;
553 	return 0;
554 }
555 
556 static int mlx5_ptp_enable(struct ptp_clock_info *ptp,
557 			   struct ptp_clock_request *rq,
558 			   int on)
559 {
560 	switch (rq->type) {
561 	case PTP_CLK_REQ_EXTTS:
562 		return mlx5_extts_configure(ptp, rq, on);
563 	case PTP_CLK_REQ_PEROUT:
564 		return mlx5_perout_configure(ptp, rq, on);
565 	case PTP_CLK_REQ_PPS:
566 		return mlx5_pps_configure(ptp, rq, on);
567 	default:
568 		return -EOPNOTSUPP;
569 	}
570 	return 0;
571 }
572 
573 enum {
574 	MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN = BIT(0),
575 	MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT = BIT(1),
576 };
577 
578 static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
579 			   enum ptp_pin_function func, unsigned int chan)
580 {
581 	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
582 						ptp_info);
583 
584 	switch (func) {
585 	case PTP_PF_NONE:
586 		return 0;
587 	case PTP_PF_EXTTS:
588 		return !(clock->pps_info.pin_caps[pin] &
589 			 MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN);
590 	case PTP_PF_PEROUT:
591 		return !(clock->pps_info.pin_caps[pin] &
592 			 MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT);
593 	default:
594 		return -EOPNOTSUPP;
595 	}
596 }
597 
598 static const struct ptp_clock_info mlx5_ptp_clock_info = {
599 	.owner		= THIS_MODULE,
600 	.name		= "mlx5_ptp",
601 	.max_adj	= 100000000,
602 	.n_alarm	= 0,
603 	.n_ext_ts	= 0,
604 	.n_per_out	= 0,
605 	.n_pins		= 0,
606 	.pps		= 0,
607 	.adjfreq	= mlx5_ptp_adjfreq,
608 	.adjtime	= mlx5_ptp_adjtime,
609 	.gettimex64	= mlx5_ptp_gettimex,
610 	.settime64	= mlx5_ptp_settime,
611 	.enable		= NULL,
612 	.verify		= NULL,
613 };
614 
615 static int mlx5_query_mtpps_pin_mode(struct mlx5_core_dev *mdev, u8 pin,
616 				     u32 *mtpps, u32 mtpps_size)
617 {
618 	u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {};
619 
620 	MLX5_SET(mtpps_reg, in, pin, pin);
621 
622 	return mlx5_core_access_reg(mdev, in, sizeof(in), mtpps,
623 				    mtpps_size, MLX5_REG_MTPPS, 0, 0);
624 }
625 
626 static int mlx5_get_pps_pin_mode(struct mlx5_clock *clock, u8 pin)
627 {
628 	struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, clock);
629 
630 	u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {};
631 	u8 mode;
632 	int err;
633 
634 	err = mlx5_query_mtpps_pin_mode(mdev, pin, out, sizeof(out));
635 	if (err || !MLX5_GET(mtpps_reg, out, enable))
636 		return PTP_PF_NONE;
637 
638 	mode = MLX5_GET(mtpps_reg, out, pin_mode);
639 
640 	if (mode == MLX5_PIN_MODE_IN)
641 		return PTP_PF_EXTTS;
642 	else if (mode == MLX5_PIN_MODE_OUT)
643 		return PTP_PF_PEROUT;
644 
645 	return PTP_PF_NONE;
646 }
647 
648 static int mlx5_init_pin_config(struct mlx5_clock *clock)
649 {
650 	int i;
651 
652 	clock->ptp_info.pin_config =
653 			kcalloc(clock->ptp_info.n_pins,
654 				sizeof(*clock->ptp_info.pin_config),
655 				GFP_KERNEL);
656 	if (!clock->ptp_info.pin_config)
657 		return -ENOMEM;
658 	clock->ptp_info.enable = mlx5_ptp_enable;
659 	clock->ptp_info.verify = mlx5_ptp_verify;
660 	clock->ptp_info.pps = 1;
661 
662 	for (i = 0; i < clock->ptp_info.n_pins; i++) {
663 		snprintf(clock->ptp_info.pin_config[i].name,
664 			 sizeof(clock->ptp_info.pin_config[i].name),
665 			 "mlx5_pps%d", i);
666 		clock->ptp_info.pin_config[i].index = i;
667 		clock->ptp_info.pin_config[i].func = mlx5_get_pps_pin_mode(clock, i);
668 		clock->ptp_info.pin_config[i].chan = 0;
669 	}
670 
671 	return 0;
672 }
673 
674 static void mlx5_get_pps_caps(struct mlx5_core_dev *mdev)
675 {
676 	struct mlx5_clock *clock = &mdev->clock;
677 	u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
678 
679 	mlx5_query_mtpps(mdev, out, sizeof(out));
680 
681 	clock->ptp_info.n_pins = MLX5_GET(mtpps_reg, out,
682 					  cap_number_of_pps_pins);
683 	clock->ptp_info.n_ext_ts = MLX5_GET(mtpps_reg, out,
684 					    cap_max_num_of_pps_in_pins);
685 	clock->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out,
686 					     cap_max_num_of_pps_out_pins);
687 
688 	clock->pps_info.pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode);
689 	clock->pps_info.pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode);
690 	clock->pps_info.pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode);
691 	clock->pps_info.pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode);
692 	clock->pps_info.pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode);
693 	clock->pps_info.pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode);
694 	clock->pps_info.pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode);
695 	clock->pps_info.pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode);
696 }
697 
698 static void ts_next_sec(struct timespec64 *ts)
699 {
700 	ts->tv_sec += 1;
701 	ts->tv_nsec = 0;
702 }
703 
704 static u64 perout_conf_next_event_timer(struct mlx5_core_dev *mdev,
705 					struct mlx5_clock *clock)
706 {
707 	bool rt_mode = mlx5_real_time_mode(mdev);
708 	struct timespec64 ts;
709 	s64 target_ns;
710 
711 	if (rt_mode)
712 		ts = mlx5_ptp_gettimex_real_time(mdev, NULL);
713 	else
714 		mlx5_ptp_gettimex(&clock->ptp_info, &ts, NULL);
715 
716 	ts_next_sec(&ts);
717 	target_ns = timespec64_to_ns(&ts);
718 
719 	return rt_mode ? perout_conf_real_time(ts.tv_sec, ts.tv_nsec) :
720 			 find_target_cycles(mdev, target_ns);
721 }
722 
723 static int mlx5_pps_event(struct notifier_block *nb,
724 			  unsigned long type, void *data)
725 {
726 	struct mlx5_clock *clock = mlx5_nb_cof(nb, struct mlx5_clock, pps_nb);
727 	struct ptp_clock_event ptp_event;
728 	struct mlx5_eqe *eqe = data;
729 	int pin = eqe->data.pps.pin;
730 	struct mlx5_core_dev *mdev;
731 	unsigned long flags;
732 	u64 ns;
733 
734 	mdev = container_of(clock, struct mlx5_core_dev, clock);
735 
736 	switch (clock->ptp_info.pin_config[pin].func) {
737 	case PTP_PF_EXTTS:
738 		ptp_event.index = pin;
739 		ptp_event.timestamp = mlx5_real_time_mode(mdev) ?
740 			mlx5_real_time_cyc2time(clock,
741 						be64_to_cpu(eqe->data.pps.time_stamp)) :
742 			mlx5_timecounter_cyc2time(clock,
743 						  be64_to_cpu(eqe->data.pps.time_stamp));
744 		if (clock->pps_info.enabled) {
745 			ptp_event.type = PTP_CLOCK_PPSUSR;
746 			ptp_event.pps_times.ts_real =
747 					ns_to_timespec64(ptp_event.timestamp);
748 		} else {
749 			ptp_event.type = PTP_CLOCK_EXTTS;
750 		}
751 		/* TODOL clock->ptp can be NULL if ptp_clock_register failes */
752 		ptp_clock_event(clock->ptp, &ptp_event);
753 		break;
754 	case PTP_PF_PEROUT:
755 		ns = perout_conf_next_event_timer(mdev, clock);
756 		write_seqlock_irqsave(&clock->lock, flags);
757 		clock->pps_info.start[pin] = ns;
758 		write_sequnlock_irqrestore(&clock->lock, flags);
759 		schedule_work(&clock->pps_info.out_work);
760 		break;
761 	default:
762 		mlx5_core_err(mdev, " Unhandled clock PPS event, func %d\n",
763 			      clock->ptp_info.pin_config[pin].func);
764 	}
765 
766 	return NOTIFY_OK;
767 }
768 
769 static void mlx5_timecounter_init(struct mlx5_core_dev *mdev)
770 {
771 	struct mlx5_clock *clock = &mdev->clock;
772 	struct mlx5_timer *timer = &clock->timer;
773 	u32 dev_freq;
774 
775 	dev_freq = MLX5_CAP_GEN(mdev, device_frequency_khz);
776 	timer->cycles.read = read_internal_timer;
777 	timer->cycles.shift = MLX5_CYCLES_SHIFT;
778 	timer->cycles.mult = clocksource_khz2mult(dev_freq,
779 						  timer->cycles.shift);
780 	timer->nominal_c_mult = timer->cycles.mult;
781 	timer->cycles.mask = CLOCKSOURCE_MASK(41);
782 
783 	timecounter_init(&timer->tc, &timer->cycles,
784 			 ktime_to_ns(ktime_get_real()));
785 }
786 
787 static void mlx5_init_overflow_period(struct mlx5_clock *clock)
788 {
789 	struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, clock);
790 	struct mlx5_ib_clock_info *clock_info = mdev->clock_info;
791 	struct mlx5_timer *timer = &clock->timer;
792 	u64 overflow_cycles;
793 	u64 frac = 0;
794 	u64 ns;
795 
796 	/* Calculate period in seconds to call the overflow watchdog - to make
797 	 * sure counter is checked at least twice every wrap around.
798 	 * The period is calculated as the minimum between max HW cycles count
799 	 * (The clock source mask) and max amount of cycles that can be
800 	 * multiplied by clock multiplier where the result doesn't exceed
801 	 * 64bits.
802 	 */
803 	overflow_cycles = div64_u64(~0ULL >> 1, timer->cycles.mult);
804 	overflow_cycles = min(overflow_cycles, div_u64(timer->cycles.mask, 3));
805 
806 	ns = cyclecounter_cyc2ns(&timer->cycles, overflow_cycles,
807 				 frac, &frac);
808 	do_div(ns, NSEC_PER_SEC / HZ);
809 	timer->overflow_period = ns;
810 
811 	INIT_DELAYED_WORK(&timer->overflow_work, mlx5_timestamp_overflow);
812 	if (timer->overflow_period)
813 		schedule_delayed_work(&timer->overflow_work, 0);
814 	else
815 		mlx5_core_warn(mdev,
816 			       "invalid overflow period, overflow_work is not scheduled\n");
817 
818 	if (clock_info)
819 		clock_info->overflow_period = timer->overflow_period;
820 }
821 
822 static void mlx5_init_clock_info(struct mlx5_core_dev *mdev)
823 {
824 	struct mlx5_clock *clock = &mdev->clock;
825 	struct mlx5_ib_clock_info *info;
826 	struct mlx5_timer *timer;
827 
828 	mdev->clock_info = (struct mlx5_ib_clock_info *)get_zeroed_page(GFP_KERNEL);
829 	if (!mdev->clock_info) {
830 		mlx5_core_warn(mdev, "Failed to allocate IB clock info page\n");
831 		return;
832 	}
833 
834 	info = mdev->clock_info;
835 	timer = &clock->timer;
836 
837 	info->nsec = timer->tc.nsec;
838 	info->cycles = timer->tc.cycle_last;
839 	info->mask = timer->cycles.mask;
840 	info->mult = timer->nominal_c_mult;
841 	info->shift = timer->cycles.shift;
842 	info->frac = timer->tc.frac;
843 }
844 
845 static void mlx5_init_timer_clock(struct mlx5_core_dev *mdev)
846 {
847 	struct mlx5_clock *clock = &mdev->clock;
848 
849 	mlx5_timecounter_init(mdev);
850 	mlx5_init_clock_info(mdev);
851 	mlx5_init_overflow_period(clock);
852 	clock->ptp_info = mlx5_ptp_clock_info;
853 
854 	if (mlx5_real_time_mode(mdev)) {
855 		struct timespec64 ts;
856 
857 		ktime_get_real_ts64(&ts);
858 		mlx5_ptp_settime(&clock->ptp_info, &ts);
859 	}
860 }
861 
862 void mlx5_init_clock(struct mlx5_core_dev *mdev)
863 {
864 	struct mlx5_clock *clock = &mdev->clock;
865 
866 	if (!MLX5_CAP_GEN(mdev, device_frequency_khz)) {
867 		mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n");
868 		return;
869 	}
870 
871 	seqlock_init(&clock->lock);
872 	mlx5_init_timer_clock(mdev);
873 	INIT_WORK(&clock->pps_info.out_work, mlx5_pps_out);
874 
875 	/* Configure the PHC */
876 	clock->ptp_info = mlx5_ptp_clock_info;
877 
878 	/* Initialize 1PPS data structures */
879 	if (MLX5_PPS_CAP(mdev))
880 		mlx5_get_pps_caps(mdev);
881 	if (clock->ptp_info.n_pins)
882 		mlx5_init_pin_config(clock);
883 
884 	clock->ptp = ptp_clock_register(&clock->ptp_info,
885 					&mdev->pdev->dev);
886 	if (IS_ERR(clock->ptp)) {
887 		mlx5_core_warn(mdev, "ptp_clock_register failed %ld\n",
888 			       PTR_ERR(clock->ptp));
889 		clock->ptp = NULL;
890 	}
891 
892 	MLX5_NB_INIT(&clock->pps_nb, mlx5_pps_event, PPS_EVENT);
893 	mlx5_eq_notifier_register(mdev, &clock->pps_nb);
894 }
895 
896 void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
897 {
898 	struct mlx5_clock *clock = &mdev->clock;
899 
900 	if (!MLX5_CAP_GEN(mdev, device_frequency_khz))
901 		return;
902 
903 	mlx5_eq_notifier_unregister(mdev, &clock->pps_nb);
904 	if (clock->ptp) {
905 		ptp_clock_unregister(clock->ptp);
906 		clock->ptp = NULL;
907 	}
908 
909 	cancel_work_sync(&clock->pps_info.out_work);
910 	cancel_delayed_work_sync(&clock->timer.overflow_work);
911 
912 	if (mdev->clock_info) {
913 		free_page((unsigned long)mdev->clock_info);
914 		mdev->clock_info = NULL;
915 	}
916 
917 	kfree(clock->ptp_info.pin_config);
918 }
919