1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2019 Mellanox Technologies. All rights reserved */
3
4 #include <linux/ptp_clock_kernel.h>
5 #include <linux/clocksource.h>
6 #include <linux/timecounter.h>
7 #include <linux/spinlock.h>
8 #include <linux/device.h>
9 #include <linux/rhashtable.h>
10 #include <linux/ptp_classify.h>
11 #include <linux/if_ether.h>
12 #include <linux/if_vlan.h>
13 #include <linux/net_tstamp.h>
14 #include <linux/refcount.h>
15
16 #include "spectrum.h"
17 #include "spectrum_ptp.h"
18 #include "core.h"
19 #include "txheader.h"
20
21 #define MLXSW_SP1_PTP_CLOCK_CYCLES_SHIFT 29
22 #define MLXSW_SP1_PTP_CLOCK_FREQ_KHZ 156257 /* 6.4nSec */
23 #define MLXSW_SP1_PTP_CLOCK_MASK 64
24
25 #define MLXSW_SP1_PTP_HT_GC_INTERVAL 500 /* ms */
26
27 /* How long, approximately, should the unmatched entries stay in the hash table
28 * before they are collected. Should be evenly divisible by the GC interval.
29 */
30 #define MLXSW_SP1_PTP_HT_GC_TIMEOUT 1000 /* ms */
31
32 struct mlxsw_sp_ptp_state {
33 struct mlxsw_sp *mlxsw_sp;
34 };
35
36 struct mlxsw_sp1_ptp_state {
37 struct mlxsw_sp_ptp_state common;
38 struct rhltable unmatched_ht;
39 spinlock_t unmatched_lock; /* protects the HT */
40 struct delayed_work ht_gc_dw;
41 u32 gc_cycle;
42 };
43
44 struct mlxsw_sp2_ptp_state {
45 struct mlxsw_sp_ptp_state common;
46 refcount_t ptp_port_enabled_ref; /* Number of ports with time stamping
47 * enabled.
48 */
49 struct hwtstamp_config config;
50 struct mutex lock; /* Protects 'config' and HW configuration. */
51 };
52
53 struct mlxsw_sp1_ptp_key {
54 u16 local_port;
55 u8 message_type;
56 u16 sequence_id;
57 u8 domain_number;
58 bool ingress;
59 };
60
61 struct mlxsw_sp1_ptp_unmatched {
62 struct mlxsw_sp1_ptp_key key;
63 struct rhlist_head ht_node;
64 struct rcu_head rcu;
65 struct sk_buff *skb;
66 u64 timestamp;
67 u32 gc_cycle;
68 };
69
70 static const struct rhashtable_params mlxsw_sp1_ptp_unmatched_ht_params = {
71 .key_len = sizeof_field(struct mlxsw_sp1_ptp_unmatched, key),
72 .key_offset = offsetof(struct mlxsw_sp1_ptp_unmatched, key),
73 .head_offset = offsetof(struct mlxsw_sp1_ptp_unmatched, ht_node),
74 };
75
76 struct mlxsw_sp_ptp_clock {
77 struct mlxsw_core *core;
78 struct ptp_clock *ptp;
79 struct ptp_clock_info ptp_info;
80 };
81
82 struct mlxsw_sp1_ptp_clock {
83 struct mlxsw_sp_ptp_clock common;
84 spinlock_t lock; /* protect this structure */
85 struct cyclecounter cycles;
86 struct timecounter tc;
87 u32 nominal_c_mult;
88 unsigned long overflow_period;
89 struct delayed_work overflow_work;
90 };
91
92 static struct mlxsw_sp1_ptp_state *
mlxsw_sp1_ptp_state(struct mlxsw_sp * mlxsw_sp)93 mlxsw_sp1_ptp_state(struct mlxsw_sp *mlxsw_sp)
94 {
95 return container_of(mlxsw_sp->ptp_state, struct mlxsw_sp1_ptp_state,
96 common);
97 }
98
99 static struct mlxsw_sp2_ptp_state *
mlxsw_sp2_ptp_state(struct mlxsw_sp * mlxsw_sp)100 mlxsw_sp2_ptp_state(struct mlxsw_sp *mlxsw_sp)
101 {
102 return container_of(mlxsw_sp->ptp_state, struct mlxsw_sp2_ptp_state,
103 common);
104 }
105
106 static struct mlxsw_sp1_ptp_clock *
mlxsw_sp1_ptp_clock(struct ptp_clock_info * ptp)107 mlxsw_sp1_ptp_clock(struct ptp_clock_info *ptp)
108 {
109 return container_of(ptp, struct mlxsw_sp1_ptp_clock, common.ptp_info);
110 }
111
__mlxsw_sp1_ptp_read_frc(struct mlxsw_sp1_ptp_clock * clock,struct ptp_system_timestamp * sts)112 static u64 __mlxsw_sp1_ptp_read_frc(struct mlxsw_sp1_ptp_clock *clock,
113 struct ptp_system_timestamp *sts)
114 {
115 struct mlxsw_core *mlxsw_core = clock->common.core;
116 u32 frc_h1, frc_h2, frc_l;
117
118 frc_h1 = mlxsw_core_read_frc_h(mlxsw_core);
119 ptp_read_system_prets(sts);
120 frc_l = mlxsw_core_read_frc_l(mlxsw_core);
121 ptp_read_system_postts(sts);
122 frc_h2 = mlxsw_core_read_frc_h(mlxsw_core);
123
124 if (frc_h1 != frc_h2) {
125 /* wrap around */
126 ptp_read_system_prets(sts);
127 frc_l = mlxsw_core_read_frc_l(mlxsw_core);
128 ptp_read_system_postts(sts);
129 }
130
131 return (u64) frc_l | (u64) frc_h2 << 32;
132 }
133
mlxsw_sp1_ptp_read_frc(const struct cyclecounter * cc)134 static u64 mlxsw_sp1_ptp_read_frc(const struct cyclecounter *cc)
135 {
136 struct mlxsw_sp1_ptp_clock *clock =
137 container_of(cc, struct mlxsw_sp1_ptp_clock, cycles);
138
139 return __mlxsw_sp1_ptp_read_frc(clock, NULL) & cc->mask;
140 }
141
142 static int
mlxsw_sp_ptp_phc_adjfreq(struct mlxsw_sp_ptp_clock * clock,int freq_adj)143 mlxsw_sp_ptp_phc_adjfreq(struct mlxsw_sp_ptp_clock *clock, int freq_adj)
144 {
145 struct mlxsw_core *mlxsw_core = clock->core;
146 char mtutc_pl[MLXSW_REG_MTUTC_LEN];
147
148 mlxsw_reg_mtutc_pack(mtutc_pl, MLXSW_REG_MTUTC_OPERATION_ADJUST_FREQ,
149 freq_adj, 0, 0, 0);
150 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mtutc), mtutc_pl);
151 }
152
mlxsw_sp1_ptp_ns2cycles(const struct timecounter * tc,u64 nsec)153 static u64 mlxsw_sp1_ptp_ns2cycles(const struct timecounter *tc, u64 nsec)
154 {
155 u64 cycles = (u64) nsec;
156
157 cycles <<= tc->cc->shift;
158 cycles = div_u64(cycles, tc->cc->mult);
159
160 return cycles;
161 }
162
163 static int
mlxsw_sp1_ptp_phc_settime(struct mlxsw_sp1_ptp_clock * clock,u64 nsec)164 mlxsw_sp1_ptp_phc_settime(struct mlxsw_sp1_ptp_clock *clock, u64 nsec)
165 {
166 struct mlxsw_core *mlxsw_core = clock->common.core;
167 u64 next_sec, next_sec_in_nsec, cycles;
168 char mtutc_pl[MLXSW_REG_MTUTC_LEN];
169 char mtpps_pl[MLXSW_REG_MTPPS_LEN];
170 int err;
171
172 next_sec = div_u64(nsec, NSEC_PER_SEC) + 1;
173 next_sec_in_nsec = next_sec * NSEC_PER_SEC;
174
175 spin_lock_bh(&clock->lock);
176 cycles = mlxsw_sp1_ptp_ns2cycles(&clock->tc, next_sec_in_nsec);
177 spin_unlock_bh(&clock->lock);
178
179 mlxsw_reg_mtpps_vpin_pack(mtpps_pl, cycles);
180 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(mtpps), mtpps_pl);
181 if (err)
182 return err;
183
184 mlxsw_reg_mtutc_pack(mtutc_pl,
185 MLXSW_REG_MTUTC_OPERATION_SET_TIME_AT_NEXT_SEC,
186 0, next_sec, 0, 0);
187 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mtutc), mtutc_pl);
188 }
189
mlxsw_sp1_ptp_adjfine(struct ptp_clock_info * ptp,long scaled_ppm)190 static int mlxsw_sp1_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
191 {
192 struct mlxsw_sp1_ptp_clock *clock = mlxsw_sp1_ptp_clock(ptp);
193 s32 ppb;
194
195 ppb = scaled_ppm_to_ppb(scaled_ppm);
196
197 spin_lock_bh(&clock->lock);
198 timecounter_read(&clock->tc);
199 clock->cycles.mult = adjust_by_scaled_ppm(clock->nominal_c_mult,
200 scaled_ppm);
201 spin_unlock_bh(&clock->lock);
202
203 return mlxsw_sp_ptp_phc_adjfreq(&clock->common, ppb);
204 }
205
mlxsw_sp1_ptp_adjtime(struct ptp_clock_info * ptp,s64 delta)206 static int mlxsw_sp1_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
207 {
208 struct mlxsw_sp1_ptp_clock *clock = mlxsw_sp1_ptp_clock(ptp);
209 u64 nsec;
210
211 spin_lock_bh(&clock->lock);
212 timecounter_adjtime(&clock->tc, delta);
213 nsec = timecounter_read(&clock->tc);
214 spin_unlock_bh(&clock->lock);
215
216 return mlxsw_sp1_ptp_phc_settime(clock, nsec);
217 }
218
mlxsw_sp1_ptp_gettimex(struct ptp_clock_info * ptp,struct timespec64 * ts,struct ptp_system_timestamp * sts)219 static int mlxsw_sp1_ptp_gettimex(struct ptp_clock_info *ptp,
220 struct timespec64 *ts,
221 struct ptp_system_timestamp *sts)
222 {
223 struct mlxsw_sp1_ptp_clock *clock = mlxsw_sp1_ptp_clock(ptp);
224 u64 cycles, nsec;
225
226 spin_lock_bh(&clock->lock);
227 cycles = __mlxsw_sp1_ptp_read_frc(clock, sts);
228 nsec = timecounter_cyc2time(&clock->tc, cycles);
229 spin_unlock_bh(&clock->lock);
230
231 *ts = ns_to_timespec64(nsec);
232
233 return 0;
234 }
235
mlxsw_sp1_ptp_settime(struct ptp_clock_info * ptp,const struct timespec64 * ts)236 static int mlxsw_sp1_ptp_settime(struct ptp_clock_info *ptp,
237 const struct timespec64 *ts)
238 {
239 struct mlxsw_sp1_ptp_clock *clock = mlxsw_sp1_ptp_clock(ptp);
240 u64 nsec = timespec64_to_ns(ts);
241
242 spin_lock_bh(&clock->lock);
243 timecounter_init(&clock->tc, &clock->cycles, nsec);
244 nsec = timecounter_read(&clock->tc);
245 spin_unlock_bh(&clock->lock);
246
247 return mlxsw_sp1_ptp_phc_settime(clock, nsec);
248 }
249
250 static const struct ptp_clock_info mlxsw_sp1_ptp_clock_info = {
251 .owner = THIS_MODULE,
252 .name = "mlxsw_sp_clock",
253 .max_adj = 100000000,
254 .adjfine = mlxsw_sp1_ptp_adjfine,
255 .adjtime = mlxsw_sp1_ptp_adjtime,
256 .gettimex64 = mlxsw_sp1_ptp_gettimex,
257 .settime64 = mlxsw_sp1_ptp_settime,
258 };
259
mlxsw_sp1_ptp_clock_overflow(struct work_struct * work)260 static void mlxsw_sp1_ptp_clock_overflow(struct work_struct *work)
261 {
262 struct delayed_work *dwork = to_delayed_work(work);
263 struct mlxsw_sp1_ptp_clock *clock;
264
265 clock = container_of(dwork, struct mlxsw_sp1_ptp_clock, overflow_work);
266
267 spin_lock_bh(&clock->lock);
268 timecounter_read(&clock->tc);
269 spin_unlock_bh(&clock->lock);
270 mlxsw_core_schedule_dw(&clock->overflow_work, clock->overflow_period);
271 }
272
273 struct mlxsw_sp_ptp_clock *
mlxsw_sp1_ptp_clock_init(struct mlxsw_sp * mlxsw_sp,struct device * dev)274 mlxsw_sp1_ptp_clock_init(struct mlxsw_sp *mlxsw_sp, struct device *dev)
275 {
276 u64 overflow_cycles, nsec, frac = 0;
277 struct mlxsw_sp1_ptp_clock *clock;
278 int err;
279
280 clock = kzalloc(sizeof(*clock), GFP_KERNEL);
281 if (!clock)
282 return ERR_PTR(-ENOMEM);
283
284 spin_lock_init(&clock->lock);
285 clock->cycles.read = mlxsw_sp1_ptp_read_frc;
286 clock->cycles.shift = MLXSW_SP1_PTP_CLOCK_CYCLES_SHIFT;
287 clock->cycles.mult = clocksource_khz2mult(MLXSW_SP1_PTP_CLOCK_FREQ_KHZ,
288 clock->cycles.shift);
289 clock->nominal_c_mult = clock->cycles.mult;
290 clock->cycles.mask = CLOCKSOURCE_MASK(MLXSW_SP1_PTP_CLOCK_MASK);
291 clock->common.core = mlxsw_sp->core;
292
293 timecounter_init(&clock->tc, &clock->cycles, 0);
294
295 /* Calculate period in seconds to call the overflow watchdog - to make
296 * sure counter is checked at least twice every wrap around.
297 * The period is calculated as the minimum between max HW cycles count
298 * (The clock source mask) and max amount of cycles that can be
299 * multiplied by clock multiplier where the result doesn't exceed
300 * 64bits.
301 */
302 overflow_cycles = div64_u64(~0ULL >> 1, clock->cycles.mult);
303 overflow_cycles = min(overflow_cycles, div_u64(clock->cycles.mask, 3));
304
305 nsec = cyclecounter_cyc2ns(&clock->cycles, overflow_cycles, 0, &frac);
306 clock->overflow_period = nsecs_to_jiffies(nsec);
307
308 INIT_DELAYED_WORK(&clock->overflow_work, mlxsw_sp1_ptp_clock_overflow);
309 mlxsw_core_schedule_dw(&clock->overflow_work, 0);
310
311 clock->common.ptp_info = mlxsw_sp1_ptp_clock_info;
312 clock->common.ptp = ptp_clock_register(&clock->common.ptp_info, dev);
313 if (IS_ERR(clock->common.ptp)) {
314 err = PTR_ERR(clock->common.ptp);
315 dev_err(dev, "ptp_clock_register failed %d\n", err);
316 goto err_ptp_clock_register;
317 }
318
319 return &clock->common;
320
321 err_ptp_clock_register:
322 cancel_delayed_work_sync(&clock->overflow_work);
323 kfree(clock);
324 return ERR_PTR(err);
325 }
326
mlxsw_sp1_ptp_clock_fini(struct mlxsw_sp_ptp_clock * clock_common)327 void mlxsw_sp1_ptp_clock_fini(struct mlxsw_sp_ptp_clock *clock_common)
328 {
329 struct mlxsw_sp1_ptp_clock *clock =
330 container_of(clock_common, struct mlxsw_sp1_ptp_clock, common);
331
332 ptp_clock_unregister(clock_common->ptp);
333 cancel_delayed_work_sync(&clock->overflow_work);
334 kfree(clock);
335 }
336
mlxsw_sp2_ptp_read_utc(struct mlxsw_sp_ptp_clock * clock,struct ptp_system_timestamp * sts)337 static u64 mlxsw_sp2_ptp_read_utc(struct mlxsw_sp_ptp_clock *clock,
338 struct ptp_system_timestamp *sts)
339 {
340 struct mlxsw_core *mlxsw_core = clock->core;
341 u32 utc_sec1, utc_sec2, utc_nsec;
342
343 utc_sec1 = mlxsw_core_read_utc_sec(mlxsw_core);
344 ptp_read_system_prets(sts);
345 utc_nsec = mlxsw_core_read_utc_nsec(mlxsw_core);
346 ptp_read_system_postts(sts);
347 utc_sec2 = mlxsw_core_read_utc_sec(mlxsw_core);
348
349 if (utc_sec1 != utc_sec2) {
350 /* Wrap around. */
351 ptp_read_system_prets(sts);
352 utc_nsec = mlxsw_core_read_utc_nsec(mlxsw_core);
353 ptp_read_system_postts(sts);
354 }
355
356 return (u64)utc_sec2 * NSEC_PER_SEC + utc_nsec;
357 }
358
359 static int
mlxsw_sp2_ptp_phc_settime(struct mlxsw_sp_ptp_clock * clock,u64 nsec)360 mlxsw_sp2_ptp_phc_settime(struct mlxsw_sp_ptp_clock *clock, u64 nsec)
361 {
362 struct mlxsw_core *mlxsw_core = clock->core;
363 char mtutc_pl[MLXSW_REG_MTUTC_LEN];
364 u32 sec, nsec_rem;
365
366 sec = div_u64_rem(nsec, NSEC_PER_SEC, &nsec_rem);
367 mlxsw_reg_mtutc_pack(mtutc_pl,
368 MLXSW_REG_MTUTC_OPERATION_SET_TIME_IMMEDIATE,
369 0, sec, nsec_rem, 0);
370 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mtutc), mtutc_pl);
371 }
372
mlxsw_sp2_ptp_adjfine(struct ptp_clock_info * ptp,long scaled_ppm)373 static int mlxsw_sp2_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
374 {
375 struct mlxsw_sp_ptp_clock *clock =
376 container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
377 s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
378
379 /* In Spectrum-2 and newer ASICs, the frequency adjustment in MTUTC is
380 * reversed, positive values mean to decrease the frequency. Adjust the
381 * sign of PPB to this behavior.
382 */
383 return mlxsw_sp_ptp_phc_adjfreq(clock, -ppb);
384 }
385
mlxsw_sp2_ptp_adjtime(struct ptp_clock_info * ptp,s64 delta)386 static int mlxsw_sp2_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
387 {
388 struct mlxsw_sp_ptp_clock *clock =
389 container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
390 struct mlxsw_core *mlxsw_core = clock->core;
391 char mtutc_pl[MLXSW_REG_MTUTC_LEN];
392
393 /* HW time adjustment range is s16. If out of range, set time instead. */
394 if (delta < S16_MIN || delta > S16_MAX) {
395 u64 nsec;
396
397 nsec = mlxsw_sp2_ptp_read_utc(clock, NULL);
398 nsec += delta;
399
400 return mlxsw_sp2_ptp_phc_settime(clock, nsec);
401 }
402
403 mlxsw_reg_mtutc_pack(mtutc_pl,
404 MLXSW_REG_MTUTC_OPERATION_ADJUST_TIME,
405 0, 0, 0, delta);
406 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mtutc), mtutc_pl);
407 }
408
mlxsw_sp2_ptp_gettimex(struct ptp_clock_info * ptp,struct timespec64 * ts,struct ptp_system_timestamp * sts)409 static int mlxsw_sp2_ptp_gettimex(struct ptp_clock_info *ptp,
410 struct timespec64 *ts,
411 struct ptp_system_timestamp *sts)
412 {
413 struct mlxsw_sp_ptp_clock *clock =
414 container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
415 u64 nsec;
416
417 nsec = mlxsw_sp2_ptp_read_utc(clock, sts);
418 *ts = ns_to_timespec64(nsec);
419
420 return 0;
421 }
422
mlxsw_sp2_ptp_settime(struct ptp_clock_info * ptp,const struct timespec64 * ts)423 static int mlxsw_sp2_ptp_settime(struct ptp_clock_info *ptp,
424 const struct timespec64 *ts)
425 {
426 struct mlxsw_sp_ptp_clock *clock =
427 container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
428 u64 nsec = timespec64_to_ns(ts);
429
430 return mlxsw_sp2_ptp_phc_settime(clock, nsec);
431 }
432
433 static const struct ptp_clock_info mlxsw_sp2_ptp_clock_info = {
434 .owner = THIS_MODULE,
435 .name = "mlxsw_sp_clock",
436 .max_adj = MLXSW_REG_MTUTC_MAX_FREQ_ADJ,
437 .adjfine = mlxsw_sp2_ptp_adjfine,
438 .adjtime = mlxsw_sp2_ptp_adjtime,
439 .gettimex64 = mlxsw_sp2_ptp_gettimex,
440 .settime64 = mlxsw_sp2_ptp_settime,
441 };
442
443 struct mlxsw_sp_ptp_clock *
mlxsw_sp2_ptp_clock_init(struct mlxsw_sp * mlxsw_sp,struct device * dev)444 mlxsw_sp2_ptp_clock_init(struct mlxsw_sp *mlxsw_sp, struct device *dev)
445 {
446 struct mlxsw_sp_ptp_clock *clock;
447 int err;
448
449 clock = kzalloc(sizeof(*clock), GFP_KERNEL);
450 if (!clock)
451 return ERR_PTR(-ENOMEM);
452
453 clock->core = mlxsw_sp->core;
454
455 clock->ptp_info = mlxsw_sp2_ptp_clock_info;
456
457 err = mlxsw_sp2_ptp_phc_settime(clock, 0);
458 if (err) {
459 dev_err(dev, "setting UTC time failed %d\n", err);
460 goto err_ptp_phc_settime;
461 }
462
463 clock->ptp = ptp_clock_register(&clock->ptp_info, dev);
464 if (IS_ERR(clock->ptp)) {
465 err = PTR_ERR(clock->ptp);
466 dev_err(dev, "ptp_clock_register failed %d\n", err);
467 goto err_ptp_clock_register;
468 }
469
470 return clock;
471
472 err_ptp_clock_register:
473 err_ptp_phc_settime:
474 kfree(clock);
475 return ERR_PTR(err);
476 }
477
mlxsw_sp2_ptp_clock_fini(struct mlxsw_sp_ptp_clock * clock)478 void mlxsw_sp2_ptp_clock_fini(struct mlxsw_sp_ptp_clock *clock)
479 {
480 ptp_clock_unregister(clock->ptp);
481 kfree(clock);
482 }
483
mlxsw_sp_ptp_parse(struct sk_buff * skb,u8 * p_domain_number,u8 * p_message_type,u16 * p_sequence_id)484 static int mlxsw_sp_ptp_parse(struct sk_buff *skb,
485 u8 *p_domain_number,
486 u8 *p_message_type,
487 u16 *p_sequence_id)
488 {
489 unsigned int ptp_class;
490 struct ptp_header *hdr;
491
492 ptp_class = ptp_classify_raw(skb);
493
494 switch (ptp_class & PTP_CLASS_VMASK) {
495 case PTP_CLASS_V1:
496 case PTP_CLASS_V2:
497 break;
498 default:
499 return -ERANGE;
500 }
501
502 hdr = ptp_parse_header(skb, ptp_class);
503 if (!hdr)
504 return -EINVAL;
505
506 *p_message_type = ptp_get_msgtype(hdr, ptp_class);
507 *p_domain_number = hdr->domain_number;
508 *p_sequence_id = be16_to_cpu(hdr->sequence_id);
509
510 return 0;
511 }
512
513 /* Returns NULL on successful insertion, a pointer on conflict, or an ERR_PTR on
514 * error.
515 */
516 static int
mlxsw_sp1_ptp_unmatched_save(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp1_ptp_key key,struct sk_buff * skb,u64 timestamp)517 mlxsw_sp1_ptp_unmatched_save(struct mlxsw_sp *mlxsw_sp,
518 struct mlxsw_sp1_ptp_key key,
519 struct sk_buff *skb,
520 u64 timestamp)
521 {
522 int cycles = MLXSW_SP1_PTP_HT_GC_TIMEOUT / MLXSW_SP1_PTP_HT_GC_INTERVAL;
523 struct mlxsw_sp1_ptp_state *ptp_state = mlxsw_sp1_ptp_state(mlxsw_sp);
524 struct mlxsw_sp1_ptp_unmatched *unmatched;
525 int err;
526
527 unmatched = kzalloc(sizeof(*unmatched), GFP_ATOMIC);
528 if (!unmatched)
529 return -ENOMEM;
530
531 unmatched->key = key;
532 unmatched->skb = skb;
533 unmatched->timestamp = timestamp;
534 unmatched->gc_cycle = ptp_state->gc_cycle + cycles;
535
536 err = rhltable_insert(&ptp_state->unmatched_ht, &unmatched->ht_node,
537 mlxsw_sp1_ptp_unmatched_ht_params);
538 if (err)
539 kfree(unmatched);
540
541 return err;
542 }
543
544 static struct mlxsw_sp1_ptp_unmatched *
mlxsw_sp1_ptp_unmatched_lookup(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp1_ptp_key key,int * p_length)545 mlxsw_sp1_ptp_unmatched_lookup(struct mlxsw_sp *mlxsw_sp,
546 struct mlxsw_sp1_ptp_key key, int *p_length)
547 {
548 struct mlxsw_sp1_ptp_state *ptp_state = mlxsw_sp1_ptp_state(mlxsw_sp);
549 struct mlxsw_sp1_ptp_unmatched *unmatched, *last = NULL;
550 struct rhlist_head *tmp, *list;
551 int length = 0;
552
553 list = rhltable_lookup(&ptp_state->unmatched_ht, &key,
554 mlxsw_sp1_ptp_unmatched_ht_params);
555 rhl_for_each_entry_rcu(unmatched, tmp, list, ht_node) {
556 last = unmatched;
557 length++;
558 }
559
560 *p_length = length;
561 return last;
562 }
563
564 static int
mlxsw_sp1_ptp_unmatched_remove(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp1_ptp_unmatched * unmatched)565 mlxsw_sp1_ptp_unmatched_remove(struct mlxsw_sp *mlxsw_sp,
566 struct mlxsw_sp1_ptp_unmatched *unmatched)
567 {
568 struct mlxsw_sp1_ptp_state *ptp_state = mlxsw_sp1_ptp_state(mlxsw_sp);
569
570 return rhltable_remove(&ptp_state->unmatched_ht,
571 &unmatched->ht_node,
572 mlxsw_sp1_ptp_unmatched_ht_params);
573 }
574
575 /* This function is called in the following scenarios:
576 *
577 * 1) When a packet is matched with its timestamp.
578 * 2) In several situation when it is necessary to immediately pass on
579 * an SKB without a timestamp.
580 * 3) From GC indirectly through mlxsw_sp1_ptp_unmatched_finish().
581 * This case is similar to 2) above.
582 */
mlxsw_sp1_ptp_packet_finish(struct mlxsw_sp * mlxsw_sp,struct sk_buff * skb,u16 local_port,bool ingress,struct skb_shared_hwtstamps * hwtstamps)583 static void mlxsw_sp1_ptp_packet_finish(struct mlxsw_sp *mlxsw_sp,
584 struct sk_buff *skb, u16 local_port,
585 bool ingress,
586 struct skb_shared_hwtstamps *hwtstamps)
587 {
588 struct mlxsw_sp_port *mlxsw_sp_port;
589
590 /* Between capturing the packet and finishing it, there is a window of
591 * opportunity for the originating port to go away (e.g. due to a
592 * split). Also make sure the SKB device reference is still valid.
593 */
594 mlxsw_sp_port = mlxsw_sp->ports[local_port];
595 if (!(mlxsw_sp_port && (!skb->dev || skb->dev == mlxsw_sp_port->dev))) {
596 dev_kfree_skb_any(skb);
597 return;
598 }
599
600 if (ingress) {
601 if (hwtstamps)
602 *skb_hwtstamps(skb) = *hwtstamps;
603 mlxsw_sp_rx_listener_no_mark_func(skb, local_port, mlxsw_sp);
604 } else {
605 /* skb_tstamp_tx() allows hwtstamps to be NULL. */
606 skb_tstamp_tx(skb, hwtstamps);
607 dev_kfree_skb_any(skb);
608 }
609 }
610
mlxsw_sp1_packet_timestamp(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp1_ptp_key key,struct sk_buff * skb,u64 timestamp)611 static void mlxsw_sp1_packet_timestamp(struct mlxsw_sp *mlxsw_sp,
612 struct mlxsw_sp1_ptp_key key,
613 struct sk_buff *skb,
614 u64 timestamp)
615 {
616 struct mlxsw_sp_ptp_clock *clock_common = mlxsw_sp->clock;
617 struct mlxsw_sp1_ptp_clock *clock =
618 container_of(clock_common, struct mlxsw_sp1_ptp_clock, common);
619
620 struct skb_shared_hwtstamps hwtstamps;
621 u64 nsec;
622
623 spin_lock_bh(&clock->lock);
624 nsec = timecounter_cyc2time(&clock->tc, timestamp);
625 spin_unlock_bh(&clock->lock);
626
627 hwtstamps.hwtstamp = ns_to_ktime(nsec);
628 mlxsw_sp1_ptp_packet_finish(mlxsw_sp, skb,
629 key.local_port, key.ingress, &hwtstamps);
630 }
631
632 static void
mlxsw_sp1_ptp_unmatched_finish(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp1_ptp_unmatched * unmatched)633 mlxsw_sp1_ptp_unmatched_finish(struct mlxsw_sp *mlxsw_sp,
634 struct mlxsw_sp1_ptp_unmatched *unmatched)
635 {
636 if (unmatched->skb && unmatched->timestamp)
637 mlxsw_sp1_packet_timestamp(mlxsw_sp, unmatched->key,
638 unmatched->skb,
639 unmatched->timestamp);
640 else if (unmatched->skb)
641 mlxsw_sp1_ptp_packet_finish(mlxsw_sp, unmatched->skb,
642 unmatched->key.local_port,
643 unmatched->key.ingress, NULL);
644 kfree_rcu(unmatched, rcu);
645 }
646
mlxsw_sp1_ptp_unmatched_free_fn(void * ptr,void * arg)647 static void mlxsw_sp1_ptp_unmatched_free_fn(void *ptr, void *arg)
648 {
649 struct mlxsw_sp1_ptp_unmatched *unmatched = ptr;
650
651 /* This is invoked at a point where the ports are gone already. Nothing
652 * to do with whatever is left in the HT but to free it.
653 */
654 if (unmatched->skb)
655 dev_kfree_skb_any(unmatched->skb);
656 kfree_rcu(unmatched, rcu);
657 }
658
mlxsw_sp1_ptp_got_piece(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp1_ptp_key key,struct sk_buff * skb,u64 timestamp)659 static void mlxsw_sp1_ptp_got_piece(struct mlxsw_sp *mlxsw_sp,
660 struct mlxsw_sp1_ptp_key key,
661 struct sk_buff *skb, u64 timestamp)
662 {
663 struct mlxsw_sp1_ptp_state *ptp_state = mlxsw_sp1_ptp_state(mlxsw_sp);
664 struct mlxsw_sp1_ptp_unmatched *unmatched;
665 int length;
666 int err;
667
668 rcu_read_lock();
669
670 spin_lock(&ptp_state->unmatched_lock);
671
672 unmatched = mlxsw_sp1_ptp_unmatched_lookup(mlxsw_sp, key, &length);
673 if (skb && unmatched && unmatched->timestamp) {
674 unmatched->skb = skb;
675 } else if (timestamp && unmatched && unmatched->skb) {
676 unmatched->timestamp = timestamp;
677 } else {
678 /* Either there is no entry to match, or one that is there is
679 * incompatible.
680 */
681 if (length < 100)
682 err = mlxsw_sp1_ptp_unmatched_save(mlxsw_sp, key,
683 skb, timestamp);
684 else
685 err = -E2BIG;
686 if (err && skb)
687 mlxsw_sp1_ptp_packet_finish(mlxsw_sp, skb,
688 key.local_port,
689 key.ingress, NULL);
690 unmatched = NULL;
691 }
692
693 if (unmatched) {
694 err = mlxsw_sp1_ptp_unmatched_remove(mlxsw_sp, unmatched);
695 WARN_ON_ONCE(err);
696 }
697
698 spin_unlock(&ptp_state->unmatched_lock);
699
700 if (unmatched)
701 mlxsw_sp1_ptp_unmatched_finish(mlxsw_sp, unmatched);
702
703 rcu_read_unlock();
704 }
705
mlxsw_sp1_ptp_got_packet(struct mlxsw_sp * mlxsw_sp,struct sk_buff * skb,u16 local_port,bool ingress)706 static void mlxsw_sp1_ptp_got_packet(struct mlxsw_sp *mlxsw_sp,
707 struct sk_buff *skb, u16 local_port,
708 bool ingress)
709 {
710 struct mlxsw_sp_port *mlxsw_sp_port;
711 struct mlxsw_sp1_ptp_key key;
712 u8 types;
713 int err;
714
715 mlxsw_sp_port = mlxsw_sp->ports[local_port];
716 if (!mlxsw_sp_port)
717 goto immediate;
718
719 types = ingress ? mlxsw_sp_port->ptp.ing_types :
720 mlxsw_sp_port->ptp.egr_types;
721 if (!types)
722 goto immediate;
723
724 memset(&key, 0, sizeof(key));
725 key.local_port = local_port;
726 key.ingress = ingress;
727
728 err = mlxsw_sp_ptp_parse(skb, &key.domain_number, &key.message_type,
729 &key.sequence_id);
730 if (err)
731 goto immediate;
732
733 /* For packets whose timestamping was not enabled on this port, don't
734 * bother trying to match the timestamp.
735 */
736 if (!((1 << key.message_type) & types))
737 goto immediate;
738
739 mlxsw_sp1_ptp_got_piece(mlxsw_sp, key, skb, 0);
740 return;
741
742 immediate:
743 mlxsw_sp1_ptp_packet_finish(mlxsw_sp, skb, local_port, ingress, NULL);
744 }
745
mlxsw_sp1_ptp_got_timestamp(struct mlxsw_sp * mlxsw_sp,bool ingress,u16 local_port,u8 message_type,u8 domain_number,u16 sequence_id,u64 timestamp)746 void mlxsw_sp1_ptp_got_timestamp(struct mlxsw_sp *mlxsw_sp, bool ingress,
747 u16 local_port, u8 message_type,
748 u8 domain_number, u16 sequence_id,
749 u64 timestamp)
750 {
751 struct mlxsw_sp_port *mlxsw_sp_port;
752 struct mlxsw_sp1_ptp_key key;
753 u8 types;
754
755 if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port)))
756 return;
757 mlxsw_sp_port = mlxsw_sp->ports[local_port];
758 if (!mlxsw_sp_port)
759 return;
760
761 types = ingress ? mlxsw_sp_port->ptp.ing_types :
762 mlxsw_sp_port->ptp.egr_types;
763
764 /* For message types whose timestamping was not enabled on this port,
765 * don't bother with the timestamp.
766 */
767 if (!((1 << message_type) & types))
768 return;
769
770 memset(&key, 0, sizeof(key));
771 key.local_port = local_port;
772 key.domain_number = domain_number;
773 key.message_type = message_type;
774 key.sequence_id = sequence_id;
775 key.ingress = ingress;
776
777 mlxsw_sp1_ptp_got_piece(mlxsw_sp, key, NULL, timestamp);
778 }
779
mlxsw_sp1_ptp_receive(struct mlxsw_sp * mlxsw_sp,struct sk_buff * skb,u16 local_port)780 void mlxsw_sp1_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
781 u16 local_port)
782 {
783 skb_reset_mac_header(skb);
784 mlxsw_sp1_ptp_got_packet(mlxsw_sp, skb, local_port, true);
785 }
786
mlxsw_sp1_ptp_transmitted(struct mlxsw_sp * mlxsw_sp,struct sk_buff * skb,u16 local_port)787 void mlxsw_sp1_ptp_transmitted(struct mlxsw_sp *mlxsw_sp,
788 struct sk_buff *skb, u16 local_port)
789 {
790 mlxsw_sp1_ptp_got_packet(mlxsw_sp, skb, local_port, false);
791 }
792
793 static void
mlxsw_sp1_ptp_ht_gc_collect(struct mlxsw_sp1_ptp_state * ptp_state,struct mlxsw_sp1_ptp_unmatched * unmatched)794 mlxsw_sp1_ptp_ht_gc_collect(struct mlxsw_sp1_ptp_state *ptp_state,
795 struct mlxsw_sp1_ptp_unmatched *unmatched)
796 {
797 struct mlxsw_sp *mlxsw_sp = ptp_state->common.mlxsw_sp;
798 struct mlxsw_sp_ptp_port_dir_stats *stats;
799 struct mlxsw_sp_port *mlxsw_sp_port;
800 int err;
801
802 /* If an unmatched entry has an SKB, it has to be handed over to the
803 * networking stack. This is usually done from a trap handler, which is
804 * invoked in a softirq context. Here we are going to do it in process
805 * context. If that were to be interrupted by a softirq, it could cause
806 * a deadlock when an attempt is made to take an already-taken lock
807 * somewhere along the sending path. Disable softirqs to prevent this.
808 */
809 local_bh_disable();
810
811 spin_lock(&ptp_state->unmatched_lock);
812 err = rhltable_remove(&ptp_state->unmatched_ht, &unmatched->ht_node,
813 mlxsw_sp1_ptp_unmatched_ht_params);
814 spin_unlock(&ptp_state->unmatched_lock);
815
816 if (err)
817 /* The packet was matched with timestamp during the walk. */
818 goto out;
819
820 mlxsw_sp_port = mlxsw_sp->ports[unmatched->key.local_port];
821 if (mlxsw_sp_port) {
822 stats = unmatched->key.ingress ?
823 &mlxsw_sp_port->ptp.stats.rx_gcd :
824 &mlxsw_sp_port->ptp.stats.tx_gcd;
825 if (unmatched->skb)
826 stats->packets++;
827 else
828 stats->timestamps++;
829 }
830
831 /* mlxsw_sp1_ptp_unmatched_finish() invokes netif_receive_skb(). While
832 * the comment at that function states that it can only be called in
833 * soft IRQ context, this pattern of local_bh_disable() +
834 * netif_receive_skb(), in process context, is seen elsewhere in the
835 * kernel, notably in pktgen.
836 */
837 mlxsw_sp1_ptp_unmatched_finish(mlxsw_sp, unmatched);
838
839 out:
840 local_bh_enable();
841 }
842
mlxsw_sp1_ptp_ht_gc(struct work_struct * work)843 static void mlxsw_sp1_ptp_ht_gc(struct work_struct *work)
844 {
845 struct delayed_work *dwork = to_delayed_work(work);
846 struct mlxsw_sp1_ptp_unmatched *unmatched;
847 struct mlxsw_sp1_ptp_state *ptp_state;
848 struct rhashtable_iter iter;
849 u32 gc_cycle;
850 void *obj;
851
852 ptp_state = container_of(dwork, struct mlxsw_sp1_ptp_state, ht_gc_dw);
853 gc_cycle = ptp_state->gc_cycle++;
854
855 rhltable_walk_enter(&ptp_state->unmatched_ht, &iter);
856 rhashtable_walk_start(&iter);
857 while ((obj = rhashtable_walk_next(&iter))) {
858 if (IS_ERR(obj))
859 continue;
860
861 unmatched = obj;
862 if (unmatched->gc_cycle <= gc_cycle)
863 mlxsw_sp1_ptp_ht_gc_collect(ptp_state, unmatched);
864 }
865 rhashtable_walk_stop(&iter);
866 rhashtable_walk_exit(&iter);
867
868 mlxsw_core_schedule_dw(&ptp_state->ht_gc_dw,
869 MLXSW_SP1_PTP_HT_GC_INTERVAL);
870 }
871
mlxsw_sp_ptp_mtptpt_set(struct mlxsw_sp * mlxsw_sp,enum mlxsw_reg_mtptpt_trap_id trap_id,u16 message_type)872 static int mlxsw_sp_ptp_mtptpt_set(struct mlxsw_sp *mlxsw_sp,
873 enum mlxsw_reg_mtptpt_trap_id trap_id,
874 u16 message_type)
875 {
876 char mtptpt_pl[MLXSW_REG_MTPTPT_LEN];
877
878 mlxsw_reg_mtptpt_pack(mtptpt_pl, trap_id, message_type);
879 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mtptpt), mtptpt_pl);
880 }
881
mlxsw_sp1_ptp_set_fifo_clr_on_trap(struct mlxsw_sp * mlxsw_sp,bool clr)882 static int mlxsw_sp1_ptp_set_fifo_clr_on_trap(struct mlxsw_sp *mlxsw_sp,
883 bool clr)
884 {
885 char mogcr_pl[MLXSW_REG_MOGCR_LEN] = {0};
886 int err;
887
888 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mogcr), mogcr_pl);
889 if (err)
890 return err;
891
892 mlxsw_reg_mogcr_ptp_iftc_set(mogcr_pl, clr);
893 mlxsw_reg_mogcr_ptp_eftc_set(mogcr_pl, clr);
894 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mogcr), mogcr_pl);
895 }
896
mlxsw_sp1_ptp_mtpppc_set(struct mlxsw_sp * mlxsw_sp,u16 ing_types,u16 egr_types)897 static int mlxsw_sp1_ptp_mtpppc_set(struct mlxsw_sp *mlxsw_sp,
898 u16 ing_types, u16 egr_types)
899 {
900 char mtpppc_pl[MLXSW_REG_MTPPPC_LEN];
901
902 mlxsw_reg_mtpppc_pack(mtpppc_pl, ing_types, egr_types);
903 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mtpppc), mtpppc_pl);
904 }
905
906 struct mlxsw_sp1_ptp_shaper_params {
907 u32 ethtool_speed;
908 enum mlxsw_reg_qpsc_port_speed port_speed;
909 u8 shaper_time_exp;
910 u8 shaper_time_mantissa;
911 u8 shaper_inc;
912 u8 shaper_bs;
913 u8 port_to_shaper_credits;
914 int ing_timestamp_inc;
915 int egr_timestamp_inc;
916 };
917
918 static const struct mlxsw_sp1_ptp_shaper_params
919 mlxsw_sp1_ptp_shaper_params[] = {
920 {
921 .ethtool_speed = SPEED_100,
922 .port_speed = MLXSW_REG_QPSC_PORT_SPEED_100M,
923 .shaper_time_exp = 4,
924 .shaper_time_mantissa = 12,
925 .shaper_inc = 9,
926 .shaper_bs = 1,
927 .port_to_shaper_credits = 1,
928 .ing_timestamp_inc = -313,
929 .egr_timestamp_inc = 313,
930 },
931 {
932 .ethtool_speed = SPEED_1000,
933 .port_speed = MLXSW_REG_QPSC_PORT_SPEED_1G,
934 .shaper_time_exp = 0,
935 .shaper_time_mantissa = 12,
936 .shaper_inc = 6,
937 .shaper_bs = 0,
938 .port_to_shaper_credits = 1,
939 .ing_timestamp_inc = -35,
940 .egr_timestamp_inc = 35,
941 },
942 {
943 .ethtool_speed = SPEED_10000,
944 .port_speed = MLXSW_REG_QPSC_PORT_SPEED_10G,
945 .shaper_time_exp = 0,
946 .shaper_time_mantissa = 2,
947 .shaper_inc = 14,
948 .shaper_bs = 1,
949 .port_to_shaper_credits = 1,
950 .ing_timestamp_inc = -11,
951 .egr_timestamp_inc = 11,
952 },
953 {
954 .ethtool_speed = SPEED_25000,
955 .port_speed = MLXSW_REG_QPSC_PORT_SPEED_25G,
956 .shaper_time_exp = 0,
957 .shaper_time_mantissa = 0,
958 .shaper_inc = 11,
959 .shaper_bs = 1,
960 .port_to_shaper_credits = 1,
961 .ing_timestamp_inc = -14,
962 .egr_timestamp_inc = 14,
963 },
964 };
965
966 #define MLXSW_SP1_PTP_SHAPER_PARAMS_LEN ARRAY_SIZE(mlxsw_sp1_ptp_shaper_params)
967
mlxsw_sp1_ptp_shaper_params_set(struct mlxsw_sp * mlxsw_sp)968 static int mlxsw_sp1_ptp_shaper_params_set(struct mlxsw_sp *mlxsw_sp)
969 {
970 const struct mlxsw_sp1_ptp_shaper_params *params;
971 char qpsc_pl[MLXSW_REG_QPSC_LEN];
972 int i, err;
973
974 for (i = 0; i < MLXSW_SP1_PTP_SHAPER_PARAMS_LEN; i++) {
975 params = &mlxsw_sp1_ptp_shaper_params[i];
976 mlxsw_reg_qpsc_pack(qpsc_pl, params->port_speed,
977 params->shaper_time_exp,
978 params->shaper_time_mantissa,
979 params->shaper_inc, params->shaper_bs,
980 params->port_to_shaper_credits,
981 params->ing_timestamp_inc,
982 params->egr_timestamp_inc);
983 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qpsc), qpsc_pl);
984 if (err)
985 return err;
986 }
987
988 return 0;
989 }
990
mlxsw_sp_ptp_traps_set(struct mlxsw_sp * mlxsw_sp)991 static int mlxsw_sp_ptp_traps_set(struct mlxsw_sp *mlxsw_sp)
992 {
993 u16 event_message_type;
994 int err;
995
996 /* Deliver these message types as PTP0. */
997 event_message_type = BIT(PTP_MSGTYPE_SYNC) |
998 BIT(PTP_MSGTYPE_DELAY_REQ) |
999 BIT(PTP_MSGTYPE_PDELAY_REQ) |
1000 BIT(PTP_MSGTYPE_PDELAY_RESP);
1001
1002 err = mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0,
1003 event_message_type);
1004 if (err)
1005 return err;
1006
1007 /* Everything else is PTP1. */
1008 err = mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP1,
1009 ~event_message_type);
1010 if (err)
1011 goto err_mtptpt1_set;
1012
1013 return 0;
1014
1015 err_mtptpt1_set:
1016 mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0, 0);
1017 return err;
1018 }
1019
mlxsw_sp_ptp_traps_unset(struct mlxsw_sp * mlxsw_sp)1020 static void mlxsw_sp_ptp_traps_unset(struct mlxsw_sp *mlxsw_sp)
1021 {
1022 mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP1, 0);
1023 mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0, 0);
1024 }
1025
mlxsw_sp1_ptp_init(struct mlxsw_sp * mlxsw_sp)1026 struct mlxsw_sp_ptp_state *mlxsw_sp1_ptp_init(struct mlxsw_sp *mlxsw_sp)
1027 {
1028 struct mlxsw_sp1_ptp_state *ptp_state;
1029 int err;
1030
1031 err = mlxsw_sp1_ptp_shaper_params_set(mlxsw_sp);
1032 if (err)
1033 return ERR_PTR(err);
1034
1035 ptp_state = kzalloc(sizeof(*ptp_state), GFP_KERNEL);
1036 if (!ptp_state)
1037 return ERR_PTR(-ENOMEM);
1038 ptp_state->common.mlxsw_sp = mlxsw_sp;
1039
1040 spin_lock_init(&ptp_state->unmatched_lock);
1041
1042 err = rhltable_init(&ptp_state->unmatched_ht,
1043 &mlxsw_sp1_ptp_unmatched_ht_params);
1044 if (err)
1045 goto err_hashtable_init;
1046
1047 err = mlxsw_sp_ptp_traps_set(mlxsw_sp);
1048 if (err)
1049 goto err_ptp_traps_set;
1050
1051 err = mlxsw_sp1_ptp_set_fifo_clr_on_trap(mlxsw_sp, true);
1052 if (err)
1053 goto err_fifo_clr;
1054
1055 INIT_DELAYED_WORK(&ptp_state->ht_gc_dw, mlxsw_sp1_ptp_ht_gc);
1056 mlxsw_core_schedule_dw(&ptp_state->ht_gc_dw,
1057 MLXSW_SP1_PTP_HT_GC_INTERVAL);
1058 return &ptp_state->common;
1059
1060 err_fifo_clr:
1061 mlxsw_sp_ptp_traps_unset(mlxsw_sp);
1062 err_ptp_traps_set:
1063 rhltable_destroy(&ptp_state->unmatched_ht);
1064 err_hashtable_init:
1065 kfree(ptp_state);
1066 return ERR_PTR(err);
1067 }
1068
mlxsw_sp1_ptp_fini(struct mlxsw_sp_ptp_state * ptp_state_common)1069 void mlxsw_sp1_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state_common)
1070 {
1071 struct mlxsw_sp *mlxsw_sp = ptp_state_common->mlxsw_sp;
1072 struct mlxsw_sp1_ptp_state *ptp_state;
1073
1074 ptp_state = mlxsw_sp1_ptp_state(mlxsw_sp);
1075
1076 cancel_delayed_work_sync(&ptp_state->ht_gc_dw);
1077 mlxsw_sp1_ptp_mtpppc_set(mlxsw_sp, 0, 0);
1078 mlxsw_sp1_ptp_set_fifo_clr_on_trap(mlxsw_sp, false);
1079 mlxsw_sp_ptp_traps_unset(mlxsw_sp);
1080 rhltable_free_and_destroy(&ptp_state->unmatched_ht,
1081 &mlxsw_sp1_ptp_unmatched_free_fn, NULL);
1082 kfree(ptp_state);
1083 }
1084
mlxsw_sp1_ptp_hwtstamp_get(struct mlxsw_sp_port * mlxsw_sp_port,struct hwtstamp_config * config)1085 int mlxsw_sp1_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
1086 struct hwtstamp_config *config)
1087 {
1088 *config = mlxsw_sp_port->ptp.hwtstamp_config;
1089 return 0;
1090 }
1091
1092 static int
mlxsw_sp1_ptp_get_message_types(const struct hwtstamp_config * config,u16 * p_ing_types,u16 * p_egr_types,enum hwtstamp_rx_filters * p_rx_filter)1093 mlxsw_sp1_ptp_get_message_types(const struct hwtstamp_config *config,
1094 u16 *p_ing_types, u16 *p_egr_types,
1095 enum hwtstamp_rx_filters *p_rx_filter)
1096 {
1097 enum hwtstamp_rx_filters rx_filter = config->rx_filter;
1098 enum hwtstamp_tx_types tx_type = config->tx_type;
1099 u16 ing_types = 0x00;
1100 u16 egr_types = 0x00;
1101
1102 switch (tx_type) {
1103 case HWTSTAMP_TX_OFF:
1104 egr_types = 0x00;
1105 break;
1106 case HWTSTAMP_TX_ON:
1107 egr_types = 0xff;
1108 break;
1109 case HWTSTAMP_TX_ONESTEP_SYNC:
1110 case HWTSTAMP_TX_ONESTEP_P2P:
1111 return -ERANGE;
1112 default:
1113 return -EINVAL;
1114 }
1115
1116 switch (rx_filter) {
1117 case HWTSTAMP_FILTER_NONE:
1118 ing_types = 0x00;
1119 break;
1120 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1121 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1122 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1123 case HWTSTAMP_FILTER_PTP_V2_SYNC:
1124 ing_types = 0x01;
1125 break;
1126 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1127 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1128 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1129 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1130 ing_types = 0x02;
1131 break;
1132 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1133 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1134 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1135 case HWTSTAMP_FILTER_PTP_V2_EVENT:
1136 ing_types = 0x0f;
1137 break;
1138 case HWTSTAMP_FILTER_ALL:
1139 ing_types = 0xff;
1140 break;
1141 case HWTSTAMP_FILTER_SOME:
1142 case HWTSTAMP_FILTER_NTP_ALL:
1143 return -ERANGE;
1144 default:
1145 return -EINVAL;
1146 }
1147
1148 *p_ing_types = ing_types;
1149 *p_egr_types = egr_types;
1150 *p_rx_filter = rx_filter;
1151 return 0;
1152 }
1153
mlxsw_sp1_ptp_mtpppc_update(struct mlxsw_sp_port * mlxsw_sp_port,u16 ing_types,u16 egr_types)1154 static int mlxsw_sp1_ptp_mtpppc_update(struct mlxsw_sp_port *mlxsw_sp_port,
1155 u16 ing_types, u16 egr_types)
1156 {
1157 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1158 struct mlxsw_sp_port *tmp;
1159 u16 orig_ing_types = 0;
1160 u16 orig_egr_types = 0;
1161 int err;
1162 int i;
1163
1164 /* MTPPPC configures timestamping globally, not per port. Find the
1165 * configuration that contains all configured timestamping requests.
1166 */
1167 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) {
1168 tmp = mlxsw_sp->ports[i];
1169 if (tmp) {
1170 orig_ing_types |= tmp->ptp.ing_types;
1171 orig_egr_types |= tmp->ptp.egr_types;
1172 }
1173 if (tmp && tmp != mlxsw_sp_port) {
1174 ing_types |= tmp->ptp.ing_types;
1175 egr_types |= tmp->ptp.egr_types;
1176 }
1177 }
1178
1179 if ((ing_types || egr_types) && !(orig_ing_types || orig_egr_types)) {
1180 err = mlxsw_sp_parsing_depth_inc(mlxsw_sp);
1181 if (err) {
1182 netdev_err(mlxsw_sp_port->dev, "Failed to increase parsing depth");
1183 return err;
1184 }
1185 }
1186 if (!(ing_types || egr_types) && (orig_ing_types || orig_egr_types))
1187 mlxsw_sp_parsing_depth_dec(mlxsw_sp);
1188
1189 return mlxsw_sp1_ptp_mtpppc_set(mlxsw_sp_port->mlxsw_sp,
1190 ing_types, egr_types);
1191 }
1192
mlxsw_sp1_ptp_hwtstamp_enabled(struct mlxsw_sp_port * mlxsw_sp_port)1193 static bool mlxsw_sp1_ptp_hwtstamp_enabled(struct mlxsw_sp_port *mlxsw_sp_port)
1194 {
1195 return mlxsw_sp_port->ptp.ing_types || mlxsw_sp_port->ptp.egr_types;
1196 }
1197
1198 static int
mlxsw_sp1_ptp_port_shaper_set(struct mlxsw_sp_port * mlxsw_sp_port,bool enable)1199 mlxsw_sp1_ptp_port_shaper_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
1200 {
1201 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1202 char qeec_pl[MLXSW_REG_QEEC_LEN];
1203
1204 mlxsw_reg_qeec_ptps_pack(qeec_pl, mlxsw_sp_port->local_port, enable);
1205 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1206 }
1207
mlxsw_sp1_ptp_port_shaper_check(struct mlxsw_sp_port * mlxsw_sp_port)1208 static int mlxsw_sp1_ptp_port_shaper_check(struct mlxsw_sp_port *mlxsw_sp_port)
1209 {
1210 bool ptps = false;
1211 int err, i;
1212 u32 speed;
1213
1214 if (!mlxsw_sp1_ptp_hwtstamp_enabled(mlxsw_sp_port))
1215 return mlxsw_sp1_ptp_port_shaper_set(mlxsw_sp_port, false);
1216
1217 err = mlxsw_sp_port_speed_get(mlxsw_sp_port, &speed);
1218 if (err)
1219 return err;
1220
1221 for (i = 0; i < MLXSW_SP1_PTP_SHAPER_PARAMS_LEN; i++) {
1222 if (mlxsw_sp1_ptp_shaper_params[i].ethtool_speed == speed) {
1223 ptps = true;
1224 break;
1225 }
1226 }
1227
1228 return mlxsw_sp1_ptp_port_shaper_set(mlxsw_sp_port, ptps);
1229 }
1230
mlxsw_sp1_ptp_shaper_work(struct work_struct * work)1231 void mlxsw_sp1_ptp_shaper_work(struct work_struct *work)
1232 {
1233 struct delayed_work *dwork = to_delayed_work(work);
1234 struct mlxsw_sp_port *mlxsw_sp_port;
1235 int err;
1236
1237 mlxsw_sp_port = container_of(dwork, struct mlxsw_sp_port,
1238 ptp.shaper_dw);
1239
1240 if (!mlxsw_sp1_ptp_hwtstamp_enabled(mlxsw_sp_port))
1241 return;
1242
1243 err = mlxsw_sp1_ptp_port_shaper_check(mlxsw_sp_port);
1244 if (err)
1245 netdev_err(mlxsw_sp_port->dev, "Failed to set up PTP shaper\n");
1246 }
1247
mlxsw_sp1_ptp_hwtstamp_set(struct mlxsw_sp_port * mlxsw_sp_port,struct hwtstamp_config * config)1248 int mlxsw_sp1_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
1249 struct hwtstamp_config *config)
1250 {
1251 enum hwtstamp_rx_filters rx_filter;
1252 u16 ing_types;
1253 u16 egr_types;
1254 int err;
1255
1256 err = mlxsw_sp1_ptp_get_message_types(config, &ing_types, &egr_types,
1257 &rx_filter);
1258 if (err)
1259 return err;
1260
1261 err = mlxsw_sp1_ptp_mtpppc_update(mlxsw_sp_port, ing_types, egr_types);
1262 if (err)
1263 return err;
1264
1265 mlxsw_sp_port->ptp.hwtstamp_config = *config;
1266 mlxsw_sp_port->ptp.ing_types = ing_types;
1267 mlxsw_sp_port->ptp.egr_types = egr_types;
1268
1269 err = mlxsw_sp1_ptp_port_shaper_check(mlxsw_sp_port);
1270 if (err)
1271 return err;
1272
1273 /* Notify the ioctl caller what we are actually timestamping. */
1274 config->rx_filter = rx_filter;
1275
1276 return 0;
1277 }
1278
mlxsw_sp1_ptp_get_ts_info(struct mlxsw_sp * mlxsw_sp,struct ethtool_ts_info * info)1279 int mlxsw_sp1_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
1280 struct ethtool_ts_info *info)
1281 {
1282 info->phc_index = ptp_clock_index(mlxsw_sp->clock->ptp);
1283
1284 info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
1285 SOF_TIMESTAMPING_RX_HARDWARE |
1286 SOF_TIMESTAMPING_RAW_HARDWARE;
1287
1288 info->tx_types = BIT(HWTSTAMP_TX_OFF) |
1289 BIT(HWTSTAMP_TX_ON);
1290
1291 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
1292 BIT(HWTSTAMP_FILTER_ALL);
1293
1294 return 0;
1295 }
1296
1297 struct mlxsw_sp_ptp_port_stat {
1298 char str[ETH_GSTRING_LEN];
1299 ptrdiff_t offset;
1300 };
1301
1302 #define MLXSW_SP_PTP_PORT_STAT(NAME, FIELD) \
1303 { \
1304 .str = NAME, \
1305 .offset = offsetof(struct mlxsw_sp_ptp_port_stats, \
1306 FIELD), \
1307 }
1308
1309 static const struct mlxsw_sp_ptp_port_stat mlxsw_sp_ptp_port_stats[] = {
1310 MLXSW_SP_PTP_PORT_STAT("ptp_rx_gcd_packets", rx_gcd.packets),
1311 MLXSW_SP_PTP_PORT_STAT("ptp_rx_gcd_timestamps", rx_gcd.timestamps),
1312 MLXSW_SP_PTP_PORT_STAT("ptp_tx_gcd_packets", tx_gcd.packets),
1313 MLXSW_SP_PTP_PORT_STAT("ptp_tx_gcd_timestamps", tx_gcd.timestamps),
1314 };
1315
1316 #undef MLXSW_SP_PTP_PORT_STAT
1317
1318 #define MLXSW_SP_PTP_PORT_STATS_LEN \
1319 ARRAY_SIZE(mlxsw_sp_ptp_port_stats)
1320
mlxsw_sp1_get_stats_count(void)1321 int mlxsw_sp1_get_stats_count(void)
1322 {
1323 return MLXSW_SP_PTP_PORT_STATS_LEN;
1324 }
1325
mlxsw_sp1_get_stats_strings(u8 ** p)1326 void mlxsw_sp1_get_stats_strings(u8 **p)
1327 {
1328 int i;
1329
1330 for (i = 0; i < MLXSW_SP_PTP_PORT_STATS_LEN; i++) {
1331 memcpy(*p, mlxsw_sp_ptp_port_stats[i].str,
1332 ETH_GSTRING_LEN);
1333 *p += ETH_GSTRING_LEN;
1334 }
1335 }
1336
mlxsw_sp1_get_stats(struct mlxsw_sp_port * mlxsw_sp_port,u64 * data,int data_index)1337 void mlxsw_sp1_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
1338 u64 *data, int data_index)
1339 {
1340 void *stats = &mlxsw_sp_port->ptp.stats;
1341 ptrdiff_t offset;
1342 int i;
1343
1344 data += data_index;
1345 for (i = 0; i < MLXSW_SP_PTP_PORT_STATS_LEN; i++) {
1346 offset = mlxsw_sp_ptp_port_stats[i].offset;
1347 *data++ = *(u64 *)(stats + offset);
1348 }
1349 }
1350
mlxsw_sp2_ptp_init(struct mlxsw_sp * mlxsw_sp)1351 struct mlxsw_sp_ptp_state *mlxsw_sp2_ptp_init(struct mlxsw_sp *mlxsw_sp)
1352 {
1353 struct mlxsw_sp2_ptp_state *ptp_state;
1354 int err;
1355
1356 ptp_state = kzalloc(sizeof(*ptp_state), GFP_KERNEL);
1357 if (!ptp_state)
1358 return ERR_PTR(-ENOMEM);
1359
1360 ptp_state->common.mlxsw_sp = mlxsw_sp;
1361
1362 err = mlxsw_sp_ptp_traps_set(mlxsw_sp);
1363 if (err)
1364 goto err_ptp_traps_set;
1365
1366 refcount_set(&ptp_state->ptp_port_enabled_ref, 0);
1367 mutex_init(&ptp_state->lock);
1368 return &ptp_state->common;
1369
1370 err_ptp_traps_set:
1371 kfree(ptp_state);
1372 return ERR_PTR(err);
1373 }
1374
mlxsw_sp2_ptp_fini(struct mlxsw_sp_ptp_state * ptp_state_common)1375 void mlxsw_sp2_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state_common)
1376 {
1377 struct mlxsw_sp *mlxsw_sp = ptp_state_common->mlxsw_sp;
1378 struct mlxsw_sp2_ptp_state *ptp_state;
1379
1380 ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp);
1381
1382 mutex_destroy(&ptp_state->lock);
1383 mlxsw_sp_ptp_traps_unset(mlxsw_sp);
1384 kfree(ptp_state);
1385 }
1386
mlxsw_ptp_utc_time_stamp_sec_get(struct mlxsw_core * mlxsw_core,u8 cqe_ts_sec)1387 static u32 mlxsw_ptp_utc_time_stamp_sec_get(struct mlxsw_core *mlxsw_core,
1388 u8 cqe_ts_sec)
1389 {
1390 u32 utc_sec = mlxsw_core_read_utc_sec(mlxsw_core);
1391
1392 if (cqe_ts_sec > (utc_sec & 0xff))
1393 /* Time stamp above the last bits of UTC (UTC & 0xff) means the
1394 * latter has wrapped after the time stamp was collected.
1395 */
1396 utc_sec -= 256;
1397
1398 utc_sec &= ~0xff;
1399 utc_sec |= cqe_ts_sec;
1400
1401 return utc_sec;
1402 }
1403
mlxsw_sp2_ptp_hwtstamp_fill(struct mlxsw_core * mlxsw_core,const struct mlxsw_skb_cb * cb,struct skb_shared_hwtstamps * hwtstamps)1404 static void mlxsw_sp2_ptp_hwtstamp_fill(struct mlxsw_core *mlxsw_core,
1405 const struct mlxsw_skb_cb *cb,
1406 struct skb_shared_hwtstamps *hwtstamps)
1407 {
1408 u64 ts_sec, ts_nsec, nsec;
1409
1410 WARN_ON_ONCE(!cb->cqe_ts.sec && !cb->cqe_ts.nsec);
1411
1412 /* The time stamp in the CQE is represented by 38 bits, which is a short
1413 * representation of UTC time. Software should create the full time
1414 * stamp using the global UTC clock. The seconds have only 8 bits in the
1415 * CQE, to create the full time stamp, use the current UTC time and fix
1416 * the seconds according to the relation between UTC seconds and CQE
1417 * seconds.
1418 */
1419 ts_sec = mlxsw_ptp_utc_time_stamp_sec_get(mlxsw_core, cb->cqe_ts.sec);
1420 ts_nsec = cb->cqe_ts.nsec;
1421
1422 nsec = ts_sec * NSEC_PER_SEC + ts_nsec;
1423
1424 hwtstamps->hwtstamp = ns_to_ktime(nsec);
1425 }
1426
mlxsw_sp2_ptp_receive(struct mlxsw_sp * mlxsw_sp,struct sk_buff * skb,u16 local_port)1427 void mlxsw_sp2_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
1428 u16 local_port)
1429 {
1430 struct skb_shared_hwtstamps hwtstamps;
1431
1432 mlxsw_sp2_ptp_hwtstamp_fill(mlxsw_sp->core, mlxsw_skb_cb(skb),
1433 &hwtstamps);
1434 *skb_hwtstamps(skb) = hwtstamps;
1435 mlxsw_sp_rx_listener_no_mark_func(skb, local_port, mlxsw_sp);
1436 }
1437
mlxsw_sp2_ptp_transmitted(struct mlxsw_sp * mlxsw_sp,struct sk_buff * skb,u16 local_port)1438 void mlxsw_sp2_ptp_transmitted(struct mlxsw_sp *mlxsw_sp,
1439 struct sk_buff *skb, u16 local_port)
1440 {
1441 struct skb_shared_hwtstamps hwtstamps;
1442
1443 mlxsw_sp2_ptp_hwtstamp_fill(mlxsw_sp->core, mlxsw_skb_cb(skb),
1444 &hwtstamps);
1445 skb_tstamp_tx(skb, &hwtstamps);
1446 dev_kfree_skb_any(skb);
1447 }
1448
mlxsw_sp2_ptp_hwtstamp_get(struct mlxsw_sp_port * mlxsw_sp_port,struct hwtstamp_config * config)1449 int mlxsw_sp2_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
1450 struct hwtstamp_config *config)
1451 {
1452 struct mlxsw_sp2_ptp_state *ptp_state;
1453
1454 ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp_port->mlxsw_sp);
1455
1456 mutex_lock(&ptp_state->lock);
1457 *config = ptp_state->config;
1458 mutex_unlock(&ptp_state->lock);
1459
1460 return 0;
1461 }
1462
1463 static int
mlxsw_sp2_ptp_get_message_types(const struct hwtstamp_config * config,u16 * p_ing_types,u16 * p_egr_types,enum hwtstamp_rx_filters * p_rx_filter)1464 mlxsw_sp2_ptp_get_message_types(const struct hwtstamp_config *config,
1465 u16 *p_ing_types, u16 *p_egr_types,
1466 enum hwtstamp_rx_filters *p_rx_filter)
1467 {
1468 enum hwtstamp_rx_filters rx_filter = config->rx_filter;
1469 enum hwtstamp_tx_types tx_type = config->tx_type;
1470 u16 ing_types = 0x00;
1471 u16 egr_types = 0x00;
1472
1473 *p_rx_filter = rx_filter;
1474
1475 switch (rx_filter) {
1476 case HWTSTAMP_FILTER_NONE:
1477 ing_types = 0x00;
1478 break;
1479 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1480 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1481 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1482 case HWTSTAMP_FILTER_PTP_V2_SYNC:
1483 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1484 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1485 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1486 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1487 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1488 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1489 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1490 case HWTSTAMP_FILTER_PTP_V2_EVENT:
1491 /* In Spectrum-2 and above, all packets get time stamp by
1492 * default and the driver fill the time stamp only for event
1493 * packets. Return all event types even if only specific types
1494 * were required.
1495 */
1496 ing_types = 0x0f;
1497 *p_rx_filter = HWTSTAMP_FILTER_SOME;
1498 break;
1499 case HWTSTAMP_FILTER_ALL:
1500 case HWTSTAMP_FILTER_SOME:
1501 case HWTSTAMP_FILTER_NTP_ALL:
1502 return -ERANGE;
1503 default:
1504 return -EINVAL;
1505 }
1506
1507 switch (tx_type) {
1508 case HWTSTAMP_TX_OFF:
1509 egr_types = 0x00;
1510 break;
1511 case HWTSTAMP_TX_ON:
1512 egr_types = 0x0f;
1513 break;
1514 case HWTSTAMP_TX_ONESTEP_SYNC:
1515 case HWTSTAMP_TX_ONESTEP_P2P:
1516 return -ERANGE;
1517 default:
1518 return -EINVAL;
1519 }
1520
1521 if ((ing_types && !egr_types) || (!ing_types && egr_types))
1522 return -EINVAL;
1523
1524 *p_ing_types = ing_types;
1525 *p_egr_types = egr_types;
1526 return 0;
1527 }
1528
mlxsw_sp2_ptp_mtpcpc_set(struct mlxsw_sp * mlxsw_sp,bool ptp_trap_en,u16 ing_types,u16 egr_types)1529 static int mlxsw_sp2_ptp_mtpcpc_set(struct mlxsw_sp *mlxsw_sp, bool ptp_trap_en,
1530 u16 ing_types, u16 egr_types)
1531 {
1532 char mtpcpc_pl[MLXSW_REG_MTPCPC_LEN];
1533
1534 mlxsw_reg_mtpcpc_pack(mtpcpc_pl, false, 0, ptp_trap_en, ing_types,
1535 egr_types);
1536 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mtpcpc), mtpcpc_pl);
1537 }
1538
mlxsw_sp2_ptp_enable(struct mlxsw_sp * mlxsw_sp,u16 ing_types,u16 egr_types,struct hwtstamp_config new_config)1539 static int mlxsw_sp2_ptp_enable(struct mlxsw_sp *mlxsw_sp, u16 ing_types,
1540 u16 egr_types,
1541 struct hwtstamp_config new_config)
1542 {
1543 struct mlxsw_sp2_ptp_state *ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp);
1544 int err;
1545
1546 err = mlxsw_sp2_ptp_mtpcpc_set(mlxsw_sp, true, ing_types, egr_types);
1547 if (err)
1548 return err;
1549
1550 ptp_state->config = new_config;
1551 return 0;
1552 }
1553
mlxsw_sp2_ptp_disable(struct mlxsw_sp * mlxsw_sp,struct hwtstamp_config new_config)1554 static int mlxsw_sp2_ptp_disable(struct mlxsw_sp *mlxsw_sp,
1555 struct hwtstamp_config new_config)
1556 {
1557 struct mlxsw_sp2_ptp_state *ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp);
1558 int err;
1559
1560 err = mlxsw_sp2_ptp_mtpcpc_set(mlxsw_sp, false, 0, 0);
1561 if (err)
1562 return err;
1563
1564 ptp_state->config = new_config;
1565 return 0;
1566 }
1567
mlxsw_sp2_ptp_configure_port(struct mlxsw_sp_port * mlxsw_sp_port,u16 ing_types,u16 egr_types,struct hwtstamp_config new_config)1568 static int mlxsw_sp2_ptp_configure_port(struct mlxsw_sp_port *mlxsw_sp_port,
1569 u16 ing_types, u16 egr_types,
1570 struct hwtstamp_config new_config)
1571 {
1572 struct mlxsw_sp2_ptp_state *ptp_state;
1573 int err;
1574
1575 ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp_port->mlxsw_sp);
1576
1577 if (refcount_inc_not_zero(&ptp_state->ptp_port_enabled_ref))
1578 return 0;
1579
1580 err = mlxsw_sp2_ptp_enable(mlxsw_sp_port->mlxsw_sp, ing_types,
1581 egr_types, new_config);
1582 if (err)
1583 return err;
1584
1585 refcount_set(&ptp_state->ptp_port_enabled_ref, 1);
1586
1587 return 0;
1588 }
1589
mlxsw_sp2_ptp_deconfigure_port(struct mlxsw_sp_port * mlxsw_sp_port,struct hwtstamp_config new_config)1590 static int mlxsw_sp2_ptp_deconfigure_port(struct mlxsw_sp_port *mlxsw_sp_port,
1591 struct hwtstamp_config new_config)
1592 {
1593 struct mlxsw_sp2_ptp_state *ptp_state;
1594 int err;
1595
1596 ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp_port->mlxsw_sp);
1597
1598 if (!refcount_dec_and_test(&ptp_state->ptp_port_enabled_ref))
1599 return 0;
1600
1601 err = mlxsw_sp2_ptp_disable(mlxsw_sp_port->mlxsw_sp, new_config);
1602 if (err)
1603 goto err_ptp_disable;
1604
1605 return 0;
1606
1607 err_ptp_disable:
1608 refcount_set(&ptp_state->ptp_port_enabled_ref, 1);
1609 return err;
1610 }
1611
mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port * mlxsw_sp_port,struct hwtstamp_config * config)1612 int mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
1613 struct hwtstamp_config *config)
1614 {
1615 struct mlxsw_sp2_ptp_state *ptp_state;
1616 enum hwtstamp_rx_filters rx_filter;
1617 struct hwtstamp_config new_config;
1618 u16 new_ing_types, new_egr_types;
1619 bool ptp_enabled;
1620 int err;
1621
1622 ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp_port->mlxsw_sp);
1623 mutex_lock(&ptp_state->lock);
1624
1625 err = mlxsw_sp2_ptp_get_message_types(config, &new_ing_types,
1626 &new_egr_types, &rx_filter);
1627 if (err)
1628 goto err_get_message_types;
1629
1630 new_config.flags = config->flags;
1631 new_config.tx_type = config->tx_type;
1632 new_config.rx_filter = rx_filter;
1633
1634 ptp_enabled = mlxsw_sp_port->ptp.ing_types ||
1635 mlxsw_sp_port->ptp.egr_types;
1636
1637 if ((new_ing_types || new_egr_types) && !ptp_enabled) {
1638 err = mlxsw_sp2_ptp_configure_port(mlxsw_sp_port, new_ing_types,
1639 new_egr_types, new_config);
1640 if (err)
1641 goto err_configure_port;
1642 } else if (!new_ing_types && !new_egr_types && ptp_enabled) {
1643 err = mlxsw_sp2_ptp_deconfigure_port(mlxsw_sp_port, new_config);
1644 if (err)
1645 goto err_deconfigure_port;
1646 }
1647
1648 mlxsw_sp_port->ptp.ing_types = new_ing_types;
1649 mlxsw_sp_port->ptp.egr_types = new_egr_types;
1650
1651 /* Notify the ioctl caller what we are actually timestamping. */
1652 config->rx_filter = rx_filter;
1653 mutex_unlock(&ptp_state->lock);
1654
1655 return 0;
1656
1657 err_deconfigure_port:
1658 err_configure_port:
1659 err_get_message_types:
1660 mutex_unlock(&ptp_state->lock);
1661 return err;
1662 }
1663
mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp * mlxsw_sp,struct ethtool_ts_info * info)1664 int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
1665 struct ethtool_ts_info *info)
1666 {
1667 info->phc_index = ptp_clock_index(mlxsw_sp->clock->ptp);
1668
1669 info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
1670 SOF_TIMESTAMPING_RX_HARDWARE |
1671 SOF_TIMESTAMPING_RAW_HARDWARE;
1672
1673 info->tx_types = BIT(HWTSTAMP_TX_OFF) |
1674 BIT(HWTSTAMP_TX_ON);
1675
1676 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
1677 BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
1678 BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
1679
1680 return 0;
1681 }
1682
mlxsw_sp_ptp_txhdr_construct(struct mlxsw_core * mlxsw_core,struct mlxsw_sp_port * mlxsw_sp_port,struct sk_buff * skb,const struct mlxsw_tx_info * tx_info)1683 int mlxsw_sp_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
1684 struct mlxsw_sp_port *mlxsw_sp_port,
1685 struct sk_buff *skb,
1686 const struct mlxsw_tx_info *tx_info)
1687 {
1688 if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
1689 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
1690 dev_kfree_skb_any(skb);
1691 return -ENOMEM;
1692 }
1693
1694 mlxsw_sp_txhdr_construct(skb, tx_info);
1695 return 0;
1696 }
1697
mlxsw_sp2_ptp_txhdr_construct(struct mlxsw_core * mlxsw_core,struct mlxsw_sp_port * mlxsw_sp_port,struct sk_buff * skb,const struct mlxsw_tx_info * tx_info)1698 int mlxsw_sp2_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
1699 struct mlxsw_sp_port *mlxsw_sp_port,
1700 struct sk_buff *skb,
1701 const struct mlxsw_tx_info *tx_info)
1702 {
1703 /* In Spectrum-2 and Spectrum-3, in order for PTP event packets to have
1704 * their correction field correctly set on the egress port they must be
1705 * transmitted as data packets. Such packets ingress the ASIC via the
1706 * CPU port and must have a VLAN tag, as the CPU port is not configured
1707 * with a PVID. Push the default VLAN (4095), which is configured as
1708 * egress untagged on all the ports.
1709 */
1710 if (!skb_vlan_tagged(skb)) {
1711 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1712 MLXSW_SP_DEFAULT_VID);
1713 if (!skb) {
1714 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
1715 return -ENOMEM;
1716 }
1717 }
1718
1719 return mlxsw_sp_txhdr_ptp_data_construct(mlxsw_core, mlxsw_sp_port, skb,
1720 tx_info);
1721 }
1722