1 /* QLogic qede NIC Driver
2  * Copyright (c) 2015-2017  QLogic Corporation
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and /or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include "qede_ptp.h"
33 
34 struct qede_ptp {
35 	const struct qed_eth_ptp_ops	*ops;
36 	struct ptp_clock_info		clock_info;
37 	struct cyclecounter		cc;
38 	struct timecounter		tc;
39 	struct ptp_clock		*clock;
40 	struct work_struct		work;
41 	struct qede_dev			*edev;
42 	struct sk_buff			*tx_skb;
43 
44 	/* ptp spinlock is used for protecting the cycle/time counter fields
45 	 * and, also for serializing the qed PTP API invocations.
46 	 */
47 	spinlock_t			lock;
48 	bool				hw_ts_ioctl_called;
49 	u16				tx_type;
50 	u16				rx_filter;
51 };
52 
53 /**
54  * qede_ptp_adjfreq
55  * @ptp: the ptp clock structure
56  * @ppb: parts per billion adjustment from base
57  *
58  * Adjust the frequency of the ptp cycle counter by the
59  * indicated ppb from the base frequency.
60  */
61 static int qede_ptp_adjfreq(struct ptp_clock_info *info, s32 ppb)
62 {
63 	struct qede_ptp *ptp = container_of(info, struct qede_ptp, clock_info);
64 	struct qede_dev *edev = ptp->edev;
65 	int rc;
66 
67 	__qede_lock(edev);
68 	if (edev->state == QEDE_STATE_OPEN) {
69 		spin_lock_bh(&ptp->lock);
70 		rc = ptp->ops->adjfreq(edev->cdev, ppb);
71 		spin_unlock_bh(&ptp->lock);
72 	} else {
73 		DP_ERR(edev, "PTP adjfreq called while interface is down\n");
74 		rc = -EFAULT;
75 	}
76 	__qede_unlock(edev);
77 
78 	return rc;
79 }
80 
81 static int qede_ptp_adjtime(struct ptp_clock_info *info, s64 delta)
82 {
83 	struct qede_dev *edev;
84 	struct qede_ptp *ptp;
85 
86 	ptp = container_of(info, struct qede_ptp, clock_info);
87 	edev = ptp->edev;
88 
89 	DP_VERBOSE(edev, QED_MSG_DEBUG, "PTP adjtime called, delta = %llx\n",
90 		   delta);
91 
92 	spin_lock_bh(&ptp->lock);
93 	timecounter_adjtime(&ptp->tc, delta);
94 	spin_unlock_bh(&ptp->lock);
95 
96 	return 0;
97 }
98 
99 static int qede_ptp_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
100 {
101 	struct qede_dev *edev;
102 	struct qede_ptp *ptp;
103 	u64 ns;
104 
105 	ptp = container_of(info, struct qede_ptp, clock_info);
106 	edev = ptp->edev;
107 
108 	spin_lock_bh(&ptp->lock);
109 	ns = timecounter_read(&ptp->tc);
110 	spin_unlock_bh(&ptp->lock);
111 
112 	DP_VERBOSE(edev, QED_MSG_DEBUG, "PTP gettime called, ns = %llu\n", ns);
113 
114 	*ts = ns_to_timespec64(ns);
115 
116 	return 0;
117 }
118 
119 static int qede_ptp_settime(struct ptp_clock_info *info,
120 			    const struct timespec64 *ts)
121 {
122 	struct qede_dev *edev;
123 	struct qede_ptp *ptp;
124 	u64 ns;
125 
126 	ptp = container_of(info, struct qede_ptp, clock_info);
127 	edev = ptp->edev;
128 
129 	ns = timespec64_to_ns(ts);
130 
131 	DP_VERBOSE(edev, QED_MSG_DEBUG, "PTP settime called, ns = %llu\n", ns);
132 
133 	/* Re-init the timecounter */
134 	spin_lock_bh(&ptp->lock);
135 	timecounter_init(&ptp->tc, &ptp->cc, ns);
136 	spin_unlock_bh(&ptp->lock);
137 
138 	return 0;
139 }
140 
141 /* Enable (or disable) ancillary features of the phc subsystem */
142 static int qede_ptp_ancillary_feature_enable(struct ptp_clock_info *info,
143 					     struct ptp_clock_request *rq,
144 					     int on)
145 {
146 	struct qede_dev *edev;
147 	struct qede_ptp *ptp;
148 
149 	ptp = container_of(info, struct qede_ptp, clock_info);
150 	edev = ptp->edev;
151 
152 	DP_ERR(edev, "PHC ancillary features are not supported\n");
153 
154 	return -ENOTSUPP;
155 }
156 
157 static void qede_ptp_task(struct work_struct *work)
158 {
159 	struct skb_shared_hwtstamps shhwtstamps;
160 	struct qede_dev *edev;
161 	struct qede_ptp *ptp;
162 	u64 timestamp, ns;
163 	int rc;
164 
165 	ptp = container_of(work, struct qede_ptp, work);
166 	edev = ptp->edev;
167 
168 	/* Read Tx timestamp registers */
169 	spin_lock_bh(&ptp->lock);
170 	rc = ptp->ops->read_tx_ts(edev->cdev, &timestamp);
171 	spin_unlock_bh(&ptp->lock);
172 	if (rc) {
173 		/* Reschedule to keep checking for a valid timestamp value */
174 		schedule_work(&ptp->work);
175 		return;
176 	}
177 
178 	ns = timecounter_cyc2time(&ptp->tc, timestamp);
179 	memset(&shhwtstamps, 0, sizeof(shhwtstamps));
180 	shhwtstamps.hwtstamp = ns_to_ktime(ns);
181 	skb_tstamp_tx(ptp->tx_skb, &shhwtstamps);
182 	dev_kfree_skb_any(ptp->tx_skb);
183 	ptp->tx_skb = NULL;
184 	clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags);
185 
186 	DP_VERBOSE(edev, QED_MSG_DEBUG,
187 		   "Tx timestamp, timestamp cycles = %llu, ns = %llu\n",
188 		   timestamp, ns);
189 }
190 
191 /* Read the PHC. This API is invoked with ptp_lock held. */
192 static u64 qede_ptp_read_cc(const struct cyclecounter *cc)
193 {
194 	struct qede_dev *edev;
195 	struct qede_ptp *ptp;
196 	u64 phc_cycles;
197 	int rc;
198 
199 	ptp = container_of(cc, struct qede_ptp, cc);
200 	edev = ptp->edev;
201 	rc = ptp->ops->read_cc(edev->cdev, &phc_cycles);
202 	if (rc)
203 		WARN_ONCE(1, "PHC read err %d\n", rc);
204 
205 	DP_VERBOSE(edev, QED_MSG_DEBUG, "PHC read cycles = %llu\n", phc_cycles);
206 
207 	return phc_cycles;
208 }
209 
210 static int qede_ptp_cfg_filters(struct qede_dev *edev)
211 {
212 	enum qed_ptp_hwtstamp_tx_type tx_type = QED_PTP_HWTSTAMP_TX_ON;
213 	enum qed_ptp_filter_type rx_filter = QED_PTP_FILTER_NONE;
214 	struct qede_ptp *ptp = edev->ptp;
215 
216 	if (!ptp)
217 		return -EIO;
218 
219 	if (!ptp->hw_ts_ioctl_called) {
220 		DP_INFO(edev, "TS IOCTL not called\n");
221 		return 0;
222 	}
223 
224 	switch (ptp->tx_type) {
225 	case HWTSTAMP_TX_ON:
226 		edev->flags |= QEDE_TX_TIMESTAMPING_EN;
227 		tx_type = QED_PTP_HWTSTAMP_TX_ON;
228 		break;
229 
230 	case HWTSTAMP_TX_OFF:
231 		edev->flags &= ~QEDE_TX_TIMESTAMPING_EN;
232 		tx_type = QED_PTP_HWTSTAMP_TX_OFF;
233 		break;
234 
235 	case HWTSTAMP_TX_ONESTEP_SYNC:
236 		DP_ERR(edev, "One-step timestamping is not supported\n");
237 		return -ERANGE;
238 	}
239 
240 	spin_lock_bh(&ptp->lock);
241 	switch (ptp->rx_filter) {
242 	case HWTSTAMP_FILTER_NONE:
243 		rx_filter = QED_PTP_FILTER_NONE;
244 		break;
245 	case HWTSTAMP_FILTER_ALL:
246 	case HWTSTAMP_FILTER_SOME:
247 		ptp->rx_filter = HWTSTAMP_FILTER_NONE;
248 		rx_filter = QED_PTP_FILTER_ALL;
249 		break;
250 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
251 		ptp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
252 		rx_filter = QED_PTP_FILTER_V1_L4_EVENT;
253 		break;
254 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
255 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
256 		ptp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
257 		/* Initialize PTP detection for UDP/IPv4 events */
258 		rx_filter = QED_PTP_FILTER_V1_L4_GEN;
259 		break;
260 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
261 		ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
262 		rx_filter = QED_PTP_FILTER_V2_L4_EVENT;
263 		break;
264 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
265 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
266 		ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
267 		/* Initialize PTP detection for UDP/IPv4 or UDP/IPv6 events */
268 		rx_filter = QED_PTP_FILTER_V2_L4_GEN;
269 		break;
270 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
271 		ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
272 		rx_filter = QED_PTP_FILTER_V2_L2_EVENT;
273 		break;
274 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
275 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
276 		ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
277 		/* Initialize PTP detection L2 events */
278 		rx_filter = QED_PTP_FILTER_V2_L2_GEN;
279 		break;
280 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
281 		ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
282 		rx_filter = QED_PTP_FILTER_V2_EVENT;
283 		break;
284 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
285 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
286 		ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
287 		/* Initialize PTP detection L2, UDP/IPv4 or UDP/IPv6 events */
288 		rx_filter = QED_PTP_FILTER_V2_GEN;
289 		break;
290 	}
291 
292 	ptp->ops->cfg_filters(edev->cdev, rx_filter, tx_type);
293 
294 	spin_unlock_bh(&ptp->lock);
295 
296 	return 0;
297 }
298 
299 int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *ifr)
300 {
301 	struct hwtstamp_config config;
302 	struct qede_ptp *ptp;
303 	int rc;
304 
305 	ptp = edev->ptp;
306 	if (!ptp)
307 		return -EIO;
308 
309 	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
310 		return -EFAULT;
311 
312 	DP_VERBOSE(edev, QED_MSG_DEBUG,
313 		   "HWTSTAMP IOCTL: Requested tx_type = %d, requested rx_filters = %d\n",
314 		   config.tx_type, config.rx_filter);
315 
316 	if (config.flags) {
317 		DP_ERR(edev, "config.flags is reserved for future use\n");
318 		return -EINVAL;
319 	}
320 
321 	ptp->hw_ts_ioctl_called = 1;
322 	ptp->tx_type = config.tx_type;
323 	ptp->rx_filter = config.rx_filter;
324 
325 	rc = qede_ptp_cfg_filters(edev);
326 	if (rc)
327 		return rc;
328 
329 	config.rx_filter = ptp->rx_filter;
330 
331 	return copy_to_user(ifr->ifr_data, &config,
332 			    sizeof(config)) ? -EFAULT : 0;
333 }
334 
335 int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *info)
336 {
337 	struct qede_ptp *ptp = edev->ptp;
338 
339 	if (!ptp)
340 		return -EIO;
341 
342 	info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
343 				SOF_TIMESTAMPING_RX_SOFTWARE |
344 				SOF_TIMESTAMPING_SOFTWARE |
345 				SOF_TIMESTAMPING_TX_HARDWARE |
346 				SOF_TIMESTAMPING_RX_HARDWARE |
347 				SOF_TIMESTAMPING_RAW_HARDWARE;
348 
349 	if (ptp->clock)
350 		info->phc_index = ptp_clock_index(ptp->clock);
351 	else
352 		info->phc_index = -1;
353 
354 	info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
355 			   BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
356 			   BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
357 			   BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
358 			   BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
359 			   BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
360 			   BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
361 			   BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
362 			   BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
363 			   BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
364 			   BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
365 			   BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) |
366 			   BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ);
367 
368 	info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
369 
370 	return 0;
371 }
372 
373 void qede_ptp_disable(struct qede_dev *edev)
374 {
375 	struct qede_ptp *ptp;
376 
377 	ptp = edev->ptp;
378 	if (!ptp)
379 		return;
380 
381 	if (ptp->clock) {
382 		ptp_clock_unregister(ptp->clock);
383 		ptp->clock = NULL;
384 	}
385 
386 	/* Cancel PTP work queue. Should be done after the Tx queues are
387 	 * drained to prevent additional scheduling.
388 	 */
389 	cancel_work_sync(&ptp->work);
390 	if (ptp->tx_skb) {
391 		dev_kfree_skb_any(ptp->tx_skb);
392 		ptp->tx_skb = NULL;
393 	}
394 
395 	/* Disable PTP in HW */
396 	spin_lock_bh(&ptp->lock);
397 	ptp->ops->disable(edev->cdev);
398 	spin_unlock_bh(&ptp->lock);
399 
400 	kfree(ptp);
401 	edev->ptp = NULL;
402 }
403 
404 static int qede_ptp_init(struct qede_dev *edev, bool init_tc)
405 {
406 	struct qede_ptp *ptp;
407 	int rc;
408 
409 	ptp = edev->ptp;
410 	if (!ptp)
411 		return -EINVAL;
412 
413 	spin_lock_init(&ptp->lock);
414 
415 	/* Configure PTP in HW */
416 	rc = ptp->ops->enable(edev->cdev);
417 	if (rc) {
418 		DP_INFO(edev, "PTP HW enable failed\n");
419 		return rc;
420 	}
421 
422 	/* Init work queue for Tx timestamping */
423 	INIT_WORK(&ptp->work, qede_ptp_task);
424 
425 	/* Init cyclecounter and timecounter. This is done only in the first
426 	 * load. If done in every load, PTP application will fail when doing
427 	 * unload / load (e.g. MTU change) while it is running.
428 	 */
429 	if (init_tc) {
430 		memset(&ptp->cc, 0, sizeof(ptp->cc));
431 		ptp->cc.read = qede_ptp_read_cc;
432 		ptp->cc.mask = CYCLECOUNTER_MASK(64);
433 		ptp->cc.shift = 0;
434 		ptp->cc.mult = 1;
435 
436 		timecounter_init(&ptp->tc, &ptp->cc,
437 				 ktime_to_ns(ktime_get_real()));
438 	}
439 
440 	return rc;
441 }
442 
443 int qede_ptp_enable(struct qede_dev *edev, bool init_tc)
444 {
445 	struct qede_ptp *ptp;
446 	int rc;
447 
448 	ptp = kzalloc(sizeof(*ptp), GFP_KERNEL);
449 	if (!ptp) {
450 		DP_INFO(edev, "Failed to allocate struct for PTP\n");
451 		return -ENOMEM;
452 	}
453 
454 	ptp->edev = edev;
455 	ptp->ops = edev->ops->ptp;
456 	if (!ptp->ops) {
457 		DP_INFO(edev, "PTP enable failed\n");
458 		rc = -EIO;
459 		goto err1;
460 	}
461 
462 	edev->ptp = ptp;
463 
464 	rc = qede_ptp_init(edev, init_tc);
465 	if (rc)
466 		goto err1;
467 
468 	qede_ptp_cfg_filters(edev);
469 
470 	/* Fill the ptp_clock_info struct and register PTP clock */
471 	ptp->clock_info.owner = THIS_MODULE;
472 	snprintf(ptp->clock_info.name, 16, "%s", edev->ndev->name);
473 	ptp->clock_info.max_adj = QED_MAX_PHC_DRIFT_PPB;
474 	ptp->clock_info.n_alarm = 0;
475 	ptp->clock_info.n_ext_ts = 0;
476 	ptp->clock_info.n_per_out = 0;
477 	ptp->clock_info.pps = 0;
478 	ptp->clock_info.adjfreq = qede_ptp_adjfreq;
479 	ptp->clock_info.adjtime = qede_ptp_adjtime;
480 	ptp->clock_info.gettime64 = qede_ptp_gettime;
481 	ptp->clock_info.settime64 = qede_ptp_settime;
482 	ptp->clock_info.enable = qede_ptp_ancillary_feature_enable;
483 
484 	ptp->clock = ptp_clock_register(&ptp->clock_info, &edev->pdev->dev);
485 	if (IS_ERR(ptp->clock)) {
486 		rc = -EINVAL;
487 		DP_ERR(edev, "PTP clock registeration failed\n");
488 		goto err2;
489 	}
490 
491 	return 0;
492 
493 err2:
494 	qede_ptp_disable(edev);
495 	ptp->clock = NULL;
496 err1:
497 	kfree(ptp);
498 	edev->ptp = NULL;
499 
500 	return rc;
501 }
502 
503 void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb)
504 {
505 	struct qede_ptp *ptp;
506 
507 	ptp = edev->ptp;
508 	if (!ptp)
509 		return;
510 
511 	if (test_and_set_bit_lock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags))
512 		return;
513 
514 	if (unlikely(!(edev->flags & QEDE_TX_TIMESTAMPING_EN))) {
515 		DP_NOTICE(edev,
516 			  "Tx timestamping was not enabled, this packet will not be timestamped\n");
517 	} else if (unlikely(ptp->tx_skb)) {
518 		DP_NOTICE(edev,
519 			  "The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n");
520 	} else {
521 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
522 		/* schedule check for Tx timestamp */
523 		ptp->tx_skb = skb_get(skb);
524 		schedule_work(&ptp->work);
525 	}
526 }
527 
528 void qede_ptp_rx_ts(struct qede_dev *edev, struct sk_buff *skb)
529 {
530 	struct qede_ptp *ptp;
531 	u64 timestamp, ns;
532 	int rc;
533 
534 	ptp = edev->ptp;
535 	if (!ptp)
536 		return;
537 
538 	spin_lock_bh(&ptp->lock);
539 	rc = ptp->ops->read_rx_ts(edev->cdev, &timestamp);
540 	if (rc) {
541 		spin_unlock_bh(&ptp->lock);
542 		DP_INFO(edev, "Invalid Rx timestamp\n");
543 		return;
544 	}
545 
546 	ns = timecounter_cyc2time(&ptp->tc, timestamp);
547 	spin_unlock_bh(&ptp->lock);
548 	skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
549 	DP_VERBOSE(edev, QED_MSG_DEBUG,
550 		   "Rx timestamp, timestamp cycles = %llu, ns = %llu\n",
551 		   timestamp, ns);
552 }
553