1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qede NIC Driver
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
5 */
6
7 #include "qede_ptp.h"
8 #define QEDE_PTP_TX_TIMEOUT (2 * HZ)
9
10 struct qede_ptp {
11 const struct qed_eth_ptp_ops *ops;
12 struct ptp_clock_info clock_info;
13 struct cyclecounter cc;
14 struct timecounter tc;
15 struct ptp_clock *clock;
16 struct work_struct work;
17 unsigned long ptp_tx_start;
18 struct qede_dev *edev;
19 struct sk_buff *tx_skb;
20
21 /* ptp spinlock is used for protecting the cycle/time counter fields
22 * and, also for serializing the qed PTP API invocations.
23 */
24 spinlock_t lock;
25 bool hw_ts_ioctl_called;
26 u16 tx_type;
27 u16 rx_filter;
28 };
29
30 /**
31 * qede_ptp_adjfine() - Adjust the frequency of the PTP cycle counter.
32 *
33 * @info: The PTP clock info structure.
34 * @scaled_ppm: Scaled parts per million adjustment from base.
35 *
36 * Scaled parts per million is ppm with a 16-bit binary fractional field.
37 *
38 * Return: Zero on success, negative errno otherwise.
39 */
qede_ptp_adjfine(struct ptp_clock_info * info,long scaled_ppm)40 static int qede_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm)
41 {
42 struct qede_ptp *ptp = container_of(info, struct qede_ptp, clock_info);
43 s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
44 struct qede_dev *edev = ptp->edev;
45 int rc;
46
47 __qede_lock(edev);
48 if (edev->state == QEDE_STATE_OPEN) {
49 spin_lock_bh(&ptp->lock);
50 rc = ptp->ops->adjfreq(edev->cdev, ppb);
51 spin_unlock_bh(&ptp->lock);
52 } else {
53 DP_ERR(edev, "PTP adjfine called while interface is down\n");
54 rc = -EFAULT;
55 }
56 __qede_unlock(edev);
57
58 return rc;
59 }
60
qede_ptp_adjtime(struct ptp_clock_info * info,s64 delta)61 static int qede_ptp_adjtime(struct ptp_clock_info *info, s64 delta)
62 {
63 struct qede_dev *edev;
64 struct qede_ptp *ptp;
65
66 ptp = container_of(info, struct qede_ptp, clock_info);
67 edev = ptp->edev;
68
69 DP_VERBOSE(edev, QED_MSG_DEBUG, "PTP adjtime called, delta = %llx\n",
70 delta);
71
72 spin_lock_bh(&ptp->lock);
73 timecounter_adjtime(&ptp->tc, delta);
74 spin_unlock_bh(&ptp->lock);
75
76 return 0;
77 }
78
qede_ptp_gettime(struct ptp_clock_info * info,struct timespec64 * ts)79 static int qede_ptp_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
80 {
81 struct qede_dev *edev;
82 struct qede_ptp *ptp;
83 u64 ns;
84
85 ptp = container_of(info, struct qede_ptp, clock_info);
86 edev = ptp->edev;
87
88 spin_lock_bh(&ptp->lock);
89 ns = timecounter_read(&ptp->tc);
90 spin_unlock_bh(&ptp->lock);
91
92 DP_VERBOSE(edev, QED_MSG_DEBUG, "PTP gettime called, ns = %llu\n", ns);
93
94 *ts = ns_to_timespec64(ns);
95
96 return 0;
97 }
98
qede_ptp_settime(struct ptp_clock_info * info,const struct timespec64 * ts)99 static int qede_ptp_settime(struct ptp_clock_info *info,
100 const struct timespec64 *ts)
101 {
102 struct qede_dev *edev;
103 struct qede_ptp *ptp;
104 u64 ns;
105
106 ptp = container_of(info, struct qede_ptp, clock_info);
107 edev = ptp->edev;
108
109 ns = timespec64_to_ns(ts);
110
111 DP_VERBOSE(edev, QED_MSG_DEBUG, "PTP settime called, ns = %llu\n", ns);
112
113 /* Re-init the timecounter */
114 spin_lock_bh(&ptp->lock);
115 timecounter_init(&ptp->tc, &ptp->cc, ns);
116 spin_unlock_bh(&ptp->lock);
117
118 return 0;
119 }
120
121 /* Enable (or disable) ancillary features of the phc subsystem */
qede_ptp_ancillary_feature_enable(struct ptp_clock_info * info,struct ptp_clock_request * rq,int on)122 static int qede_ptp_ancillary_feature_enable(struct ptp_clock_info *info,
123 struct ptp_clock_request *rq,
124 int on)
125 {
126 struct qede_dev *edev;
127 struct qede_ptp *ptp;
128
129 ptp = container_of(info, struct qede_ptp, clock_info);
130 edev = ptp->edev;
131
132 DP_ERR(edev, "PHC ancillary features are not supported\n");
133
134 return -ENOTSUPP;
135 }
136
qede_ptp_task(struct work_struct * work)137 static void qede_ptp_task(struct work_struct *work)
138 {
139 struct skb_shared_hwtstamps shhwtstamps;
140 struct qede_dev *edev;
141 struct qede_ptp *ptp;
142 u64 timestamp, ns;
143 bool timedout;
144 int rc;
145
146 ptp = container_of(work, struct qede_ptp, work);
147 edev = ptp->edev;
148 timedout = time_is_before_jiffies(ptp->ptp_tx_start +
149 QEDE_PTP_TX_TIMEOUT);
150
151 /* Read Tx timestamp registers */
152 spin_lock_bh(&ptp->lock);
153 rc = ptp->ops->read_tx_ts(edev->cdev, ×tamp);
154 spin_unlock_bh(&ptp->lock);
155 if (rc) {
156 if (unlikely(timedout)) {
157 DP_INFO(edev, "Tx timestamp is not recorded\n");
158 dev_kfree_skb_any(ptp->tx_skb);
159 ptp->tx_skb = NULL;
160 clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS,
161 &edev->flags);
162 edev->ptp_skip_txts++;
163 } else {
164 /* Reschedule to keep checking for a valid TS value */
165 schedule_work(&ptp->work);
166 }
167 return;
168 }
169
170 ns = timecounter_cyc2time(&ptp->tc, timestamp);
171 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
172 shhwtstamps.hwtstamp = ns_to_ktime(ns);
173 skb_tstamp_tx(ptp->tx_skb, &shhwtstamps);
174 dev_kfree_skb_any(ptp->tx_skb);
175 ptp->tx_skb = NULL;
176 clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags);
177
178 DP_VERBOSE(edev, QED_MSG_DEBUG,
179 "Tx timestamp, timestamp cycles = %llu, ns = %llu\n",
180 timestamp, ns);
181 }
182
183 /* Read the PHC. This API is invoked with ptp_lock held. */
qede_ptp_read_cc(const struct cyclecounter * cc)184 static u64 qede_ptp_read_cc(const struct cyclecounter *cc)
185 {
186 struct qede_dev *edev;
187 struct qede_ptp *ptp;
188 u64 phc_cycles;
189 int rc;
190
191 ptp = container_of(cc, struct qede_ptp, cc);
192 edev = ptp->edev;
193 rc = ptp->ops->read_cc(edev->cdev, &phc_cycles);
194 if (rc)
195 WARN_ONCE(1, "PHC read err %d\n", rc);
196
197 DP_VERBOSE(edev, QED_MSG_DEBUG, "PHC read cycles = %llu\n", phc_cycles);
198
199 return phc_cycles;
200 }
201
qede_ptp_cfg_filters(struct qede_dev * edev)202 static int qede_ptp_cfg_filters(struct qede_dev *edev)
203 {
204 enum qed_ptp_hwtstamp_tx_type tx_type = QED_PTP_HWTSTAMP_TX_ON;
205 enum qed_ptp_filter_type rx_filter = QED_PTP_FILTER_NONE;
206 struct qede_ptp *ptp = edev->ptp;
207
208 if (!ptp)
209 return -EIO;
210
211 if (!ptp->hw_ts_ioctl_called) {
212 DP_INFO(edev, "TS IOCTL not called\n");
213 return 0;
214 }
215
216 switch (ptp->tx_type) {
217 case HWTSTAMP_TX_ON:
218 set_bit(QEDE_FLAGS_TX_TIMESTAMPING_EN, &edev->flags);
219 tx_type = QED_PTP_HWTSTAMP_TX_ON;
220 break;
221
222 case HWTSTAMP_TX_OFF:
223 clear_bit(QEDE_FLAGS_TX_TIMESTAMPING_EN, &edev->flags);
224 tx_type = QED_PTP_HWTSTAMP_TX_OFF;
225 break;
226
227 case HWTSTAMP_TX_ONESTEP_SYNC:
228 case HWTSTAMP_TX_ONESTEP_P2P:
229 DP_ERR(edev, "One-step timestamping is not supported\n");
230 return -ERANGE;
231 }
232
233 spin_lock_bh(&ptp->lock);
234 switch (ptp->rx_filter) {
235 case HWTSTAMP_FILTER_NONE:
236 rx_filter = QED_PTP_FILTER_NONE;
237 break;
238 case HWTSTAMP_FILTER_ALL:
239 case HWTSTAMP_FILTER_SOME:
240 case HWTSTAMP_FILTER_NTP_ALL:
241 ptp->rx_filter = HWTSTAMP_FILTER_NONE;
242 rx_filter = QED_PTP_FILTER_ALL;
243 break;
244 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
245 ptp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
246 rx_filter = QED_PTP_FILTER_V1_L4_EVENT;
247 break;
248 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
249 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
250 ptp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
251 /* Initialize PTP detection for UDP/IPv4 events */
252 rx_filter = QED_PTP_FILTER_V1_L4_GEN;
253 break;
254 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
255 ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
256 rx_filter = QED_PTP_FILTER_V2_L4_EVENT;
257 break;
258 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
259 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
260 ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
261 /* Initialize PTP detection for UDP/IPv4 or UDP/IPv6 events */
262 rx_filter = QED_PTP_FILTER_V2_L4_GEN;
263 break;
264 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
265 ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
266 rx_filter = QED_PTP_FILTER_V2_L2_EVENT;
267 break;
268 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
269 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
270 ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
271 /* Initialize PTP detection L2 events */
272 rx_filter = QED_PTP_FILTER_V2_L2_GEN;
273 break;
274 case HWTSTAMP_FILTER_PTP_V2_EVENT:
275 ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
276 rx_filter = QED_PTP_FILTER_V2_EVENT;
277 break;
278 case HWTSTAMP_FILTER_PTP_V2_SYNC:
279 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
280 ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
281 /* Initialize PTP detection L2, UDP/IPv4 or UDP/IPv6 events */
282 rx_filter = QED_PTP_FILTER_V2_GEN;
283 break;
284 }
285
286 ptp->ops->cfg_filters(edev->cdev, rx_filter, tx_type);
287
288 spin_unlock_bh(&ptp->lock);
289
290 return 0;
291 }
292
qede_ptp_hw_ts(struct qede_dev * edev,struct ifreq * ifr)293 int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *ifr)
294 {
295 struct hwtstamp_config config;
296 struct qede_ptp *ptp;
297 int rc;
298
299 ptp = edev->ptp;
300 if (!ptp)
301 return -EIO;
302
303 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
304 return -EFAULT;
305
306 DP_VERBOSE(edev, QED_MSG_DEBUG,
307 "HWTSTAMP IOCTL: Requested tx_type = %d, requested rx_filters = %d\n",
308 config.tx_type, config.rx_filter);
309
310 ptp->hw_ts_ioctl_called = 1;
311 ptp->tx_type = config.tx_type;
312 ptp->rx_filter = config.rx_filter;
313
314 rc = qede_ptp_cfg_filters(edev);
315 if (rc)
316 return rc;
317
318 config.rx_filter = ptp->rx_filter;
319
320 return copy_to_user(ifr->ifr_data, &config,
321 sizeof(config)) ? -EFAULT : 0;
322 }
323
qede_ptp_get_ts_info(struct qede_dev * edev,struct ethtool_ts_info * info)324 int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *info)
325 {
326 struct qede_ptp *ptp = edev->ptp;
327
328 if (!ptp) {
329 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
330 SOF_TIMESTAMPING_RX_SOFTWARE |
331 SOF_TIMESTAMPING_SOFTWARE;
332 info->phc_index = -1;
333
334 return 0;
335 }
336
337 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
338 SOF_TIMESTAMPING_RX_SOFTWARE |
339 SOF_TIMESTAMPING_SOFTWARE |
340 SOF_TIMESTAMPING_TX_HARDWARE |
341 SOF_TIMESTAMPING_RX_HARDWARE |
342 SOF_TIMESTAMPING_RAW_HARDWARE;
343
344 if (ptp->clock)
345 info->phc_index = ptp_clock_index(ptp->clock);
346 else
347 info->phc_index = -1;
348
349 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
350 BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
351 BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
352 BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
353 BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
354 BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
355 BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
356 BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
357 BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
358 BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
359 BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
360 BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) |
361 BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ);
362
363 info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
364
365 return 0;
366 }
367
qede_ptp_disable(struct qede_dev * edev)368 void qede_ptp_disable(struct qede_dev *edev)
369 {
370 struct qede_ptp *ptp;
371
372 ptp = edev->ptp;
373 if (!ptp)
374 return;
375
376 if (ptp->clock) {
377 ptp_clock_unregister(ptp->clock);
378 ptp->clock = NULL;
379 }
380
381 /* Cancel PTP work queue. Should be done after the Tx queues are
382 * drained to prevent additional scheduling.
383 */
384 cancel_work_sync(&ptp->work);
385 if (ptp->tx_skb) {
386 dev_kfree_skb_any(ptp->tx_skb);
387 ptp->tx_skb = NULL;
388 clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags);
389 }
390
391 /* Disable PTP in HW */
392 spin_lock_bh(&ptp->lock);
393 ptp->ops->disable(edev->cdev);
394 spin_unlock_bh(&ptp->lock);
395
396 kfree(ptp);
397 edev->ptp = NULL;
398 }
399
qede_ptp_init(struct qede_dev * edev)400 static int qede_ptp_init(struct qede_dev *edev)
401 {
402 struct qede_ptp *ptp;
403 int rc;
404
405 ptp = edev->ptp;
406 if (!ptp)
407 return -EINVAL;
408
409 spin_lock_init(&ptp->lock);
410
411 /* Configure PTP in HW */
412 rc = ptp->ops->enable(edev->cdev);
413 if (rc) {
414 DP_INFO(edev, "PTP HW enable failed\n");
415 return rc;
416 }
417
418 /* Init work queue for Tx timestamping */
419 INIT_WORK(&ptp->work, qede_ptp_task);
420
421 /* Init cyclecounter and timecounter */
422 memset(&ptp->cc, 0, sizeof(ptp->cc));
423 ptp->cc.read = qede_ptp_read_cc;
424 ptp->cc.mask = CYCLECOUNTER_MASK(64);
425 ptp->cc.shift = 0;
426 ptp->cc.mult = 1;
427
428 timecounter_init(&ptp->tc, &ptp->cc, ktime_to_ns(ktime_get_real()));
429
430 return 0;
431 }
432
qede_ptp_enable(struct qede_dev * edev)433 int qede_ptp_enable(struct qede_dev *edev)
434 {
435 struct qede_ptp *ptp;
436 int rc;
437
438 ptp = kzalloc(sizeof(*ptp), GFP_KERNEL);
439 if (!ptp) {
440 DP_INFO(edev, "Failed to allocate struct for PTP\n");
441 return -ENOMEM;
442 }
443
444 ptp->edev = edev;
445 ptp->ops = edev->ops->ptp;
446 if (!ptp->ops) {
447 DP_INFO(edev, "PTP enable failed\n");
448 rc = -EIO;
449 goto err1;
450 }
451
452 edev->ptp = ptp;
453
454 rc = qede_ptp_init(edev);
455 if (rc)
456 goto err1;
457
458 qede_ptp_cfg_filters(edev);
459
460 /* Fill the ptp_clock_info struct and register PTP clock */
461 ptp->clock_info.owner = THIS_MODULE;
462 snprintf(ptp->clock_info.name, 16, "%s", edev->ndev->name);
463 ptp->clock_info.max_adj = QED_MAX_PHC_DRIFT_PPB;
464 ptp->clock_info.n_alarm = 0;
465 ptp->clock_info.n_ext_ts = 0;
466 ptp->clock_info.n_per_out = 0;
467 ptp->clock_info.pps = 0;
468 ptp->clock_info.adjfine = qede_ptp_adjfine;
469 ptp->clock_info.adjtime = qede_ptp_adjtime;
470 ptp->clock_info.gettime64 = qede_ptp_gettime;
471 ptp->clock_info.settime64 = qede_ptp_settime;
472 ptp->clock_info.enable = qede_ptp_ancillary_feature_enable;
473
474 ptp->clock = ptp_clock_register(&ptp->clock_info, &edev->pdev->dev);
475 if (IS_ERR(ptp->clock)) {
476 DP_ERR(edev, "PTP clock registration failed\n");
477 qede_ptp_disable(edev);
478 rc = -EINVAL;
479 goto err2;
480 }
481
482 return 0;
483
484 err1:
485 kfree(ptp);
486 err2:
487 edev->ptp = NULL;
488
489 return rc;
490 }
491
qede_ptp_tx_ts(struct qede_dev * edev,struct sk_buff * skb)492 void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb)
493 {
494 struct qede_ptp *ptp;
495
496 ptp = edev->ptp;
497 if (!ptp)
498 return;
499
500 if (test_and_set_bit_lock(QEDE_FLAGS_PTP_TX_IN_PRORGESS,
501 &edev->flags)) {
502 DP_VERBOSE(edev, QED_MSG_DEBUG, "Timestamping in progress\n");
503 edev->ptp_skip_txts++;
504 return;
505 }
506
507 if (unlikely(!test_bit(QEDE_FLAGS_TX_TIMESTAMPING_EN, &edev->flags))) {
508 DP_VERBOSE(edev, QED_MSG_DEBUG,
509 "Tx timestamping was not enabled, this pkt will not be timestamped\n");
510 clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags);
511 edev->ptp_skip_txts++;
512 } else if (unlikely(ptp->tx_skb)) {
513 DP_VERBOSE(edev, QED_MSG_DEBUG,
514 "Device supports a single outstanding pkt to ts, It will not be ts\n");
515 clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags);
516 edev->ptp_skip_txts++;
517 } else {
518 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
519 /* schedule check for Tx timestamp */
520 ptp->tx_skb = skb_get(skb);
521 ptp->ptp_tx_start = jiffies;
522 schedule_work(&ptp->work);
523 }
524 }
525
qede_ptp_rx_ts(struct qede_dev * edev,struct sk_buff * skb)526 void qede_ptp_rx_ts(struct qede_dev *edev, struct sk_buff *skb)
527 {
528 struct qede_ptp *ptp;
529 u64 timestamp, ns;
530 int rc;
531
532 ptp = edev->ptp;
533 if (!ptp)
534 return;
535
536 spin_lock_bh(&ptp->lock);
537 rc = ptp->ops->read_rx_ts(edev->cdev, ×tamp);
538 if (rc) {
539 spin_unlock_bh(&ptp->lock);
540 DP_INFO(edev, "Invalid Rx timestamp\n");
541 return;
542 }
543
544 ns = timecounter_cyc2time(&ptp->tc, timestamp);
545 spin_unlock_bh(&ptp->lock);
546 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
547 DP_VERBOSE(edev, QED_MSG_DEBUG,
548 "Rx timestamp, timestamp cycles = %llu, ns = %llu\n",
549 timestamp, ns);
550 }
551