1 /* QLogic qede NIC Driver
2  * Copyright (c) 2015-2017  QLogic Corporation
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and /or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include "qede_ptp.h"
33 
34 struct qede_ptp {
35 	const struct qed_eth_ptp_ops	*ops;
36 	struct ptp_clock_info		clock_info;
37 	struct cyclecounter		cc;
38 	struct timecounter		tc;
39 	struct ptp_clock		*clock;
40 	struct work_struct		work;
41 	struct qede_dev			*edev;
42 	struct sk_buff			*tx_skb;
43 
44 	/* ptp spinlock is used for protecting the cycle/time counter fields
45 	 * and, also for serializing the qed PTP API invocations.
46 	 */
47 	spinlock_t			lock;
48 	bool				hw_ts_ioctl_called;
49 	u16				tx_type;
50 	u16				rx_filter;
51 };
52 
53 /**
54  * qede_ptp_adjfreq
55  * @ptp: the ptp clock structure
56  * @ppb: parts per billion adjustment from base
57  *
58  * Adjust the frequency of the ptp cycle counter by the
59  * indicated ppb from the base frequency.
60  */
61 static int qede_ptp_adjfreq(struct ptp_clock_info *info, s32 ppb)
62 {
63 	struct qede_ptp *ptp = container_of(info, struct qede_ptp, clock_info);
64 	struct qede_dev *edev = ptp->edev;
65 	int rc;
66 
67 	__qede_lock(edev);
68 	if (edev->state == QEDE_STATE_OPEN) {
69 		spin_lock_bh(&ptp->lock);
70 		rc = ptp->ops->adjfreq(edev->cdev, ppb);
71 		spin_unlock_bh(&ptp->lock);
72 	} else {
73 		DP_ERR(edev, "PTP adjfreq called while interface is down\n");
74 		rc = -EFAULT;
75 	}
76 	__qede_unlock(edev);
77 
78 	return rc;
79 }
80 
81 static int qede_ptp_adjtime(struct ptp_clock_info *info, s64 delta)
82 {
83 	struct qede_dev *edev;
84 	struct qede_ptp *ptp;
85 
86 	ptp = container_of(info, struct qede_ptp, clock_info);
87 	edev = ptp->edev;
88 
89 	DP_VERBOSE(edev, QED_MSG_DEBUG, "PTP adjtime called, delta = %llx\n",
90 		   delta);
91 
92 	spin_lock_bh(&ptp->lock);
93 	timecounter_adjtime(&ptp->tc, delta);
94 	spin_unlock_bh(&ptp->lock);
95 
96 	return 0;
97 }
98 
99 static int qede_ptp_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
100 {
101 	struct qede_dev *edev;
102 	struct qede_ptp *ptp;
103 	u64 ns;
104 
105 	ptp = container_of(info, struct qede_ptp, clock_info);
106 	edev = ptp->edev;
107 
108 	spin_lock_bh(&ptp->lock);
109 	ns = timecounter_read(&ptp->tc);
110 	spin_unlock_bh(&ptp->lock);
111 
112 	DP_VERBOSE(edev, QED_MSG_DEBUG, "PTP gettime called, ns = %llu\n", ns);
113 
114 	*ts = ns_to_timespec64(ns);
115 
116 	return 0;
117 }
118 
119 static int qede_ptp_settime(struct ptp_clock_info *info,
120 			    const struct timespec64 *ts)
121 {
122 	struct qede_dev *edev;
123 	struct qede_ptp *ptp;
124 	u64 ns;
125 
126 	ptp = container_of(info, struct qede_ptp, clock_info);
127 	edev = ptp->edev;
128 
129 	ns = timespec64_to_ns(ts);
130 
131 	DP_VERBOSE(edev, QED_MSG_DEBUG, "PTP settime called, ns = %llu\n", ns);
132 
133 	/* Re-init the timecounter */
134 	spin_lock_bh(&ptp->lock);
135 	timecounter_init(&ptp->tc, &ptp->cc, ns);
136 	spin_unlock_bh(&ptp->lock);
137 
138 	return 0;
139 }
140 
141 /* Enable (or disable) ancillary features of the phc subsystem */
142 static int qede_ptp_ancillary_feature_enable(struct ptp_clock_info *info,
143 					     struct ptp_clock_request *rq,
144 					     int on)
145 {
146 	struct qede_dev *edev;
147 	struct qede_ptp *ptp;
148 
149 	ptp = container_of(info, struct qede_ptp, clock_info);
150 	edev = ptp->edev;
151 
152 	DP_ERR(edev, "PHC ancillary features are not supported\n");
153 
154 	return -ENOTSUPP;
155 }
156 
157 static void qede_ptp_task(struct work_struct *work)
158 {
159 	struct skb_shared_hwtstamps shhwtstamps;
160 	struct qede_dev *edev;
161 	struct qede_ptp *ptp;
162 	u64 timestamp, ns;
163 	int rc;
164 
165 	ptp = container_of(work, struct qede_ptp, work);
166 	edev = ptp->edev;
167 
168 	/* Read Tx timestamp registers */
169 	spin_lock_bh(&ptp->lock);
170 	rc = ptp->ops->read_tx_ts(edev->cdev, &timestamp);
171 	spin_unlock_bh(&ptp->lock);
172 	if (rc) {
173 		/* Reschedule to keep checking for a valid timestamp value */
174 		schedule_work(&ptp->work);
175 		return;
176 	}
177 
178 	ns = timecounter_cyc2time(&ptp->tc, timestamp);
179 	memset(&shhwtstamps, 0, sizeof(shhwtstamps));
180 	shhwtstamps.hwtstamp = ns_to_ktime(ns);
181 	skb_tstamp_tx(ptp->tx_skb, &shhwtstamps);
182 	dev_kfree_skb_any(ptp->tx_skb);
183 	ptp->tx_skb = NULL;
184 	clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags);
185 
186 	DP_VERBOSE(edev, QED_MSG_DEBUG,
187 		   "Tx timestamp, timestamp cycles = %llu, ns = %llu\n",
188 		   timestamp, ns);
189 }
190 
191 /* Read the PHC. This API is invoked with ptp_lock held. */
192 static u64 qede_ptp_read_cc(const struct cyclecounter *cc)
193 {
194 	struct qede_dev *edev;
195 	struct qede_ptp *ptp;
196 	u64 phc_cycles;
197 	int rc;
198 
199 	ptp = container_of(cc, struct qede_ptp, cc);
200 	edev = ptp->edev;
201 	rc = ptp->ops->read_cc(edev->cdev, &phc_cycles);
202 	if (rc)
203 		WARN_ONCE(1, "PHC read err %d\n", rc);
204 
205 	DP_VERBOSE(edev, QED_MSG_DEBUG, "PHC read cycles = %llu\n", phc_cycles);
206 
207 	return phc_cycles;
208 }
209 
210 static int qede_ptp_cfg_filters(struct qede_dev *edev)
211 {
212 	enum qed_ptp_hwtstamp_tx_type tx_type = QED_PTP_HWTSTAMP_TX_ON;
213 	enum qed_ptp_filter_type rx_filter = QED_PTP_FILTER_NONE;
214 	struct qede_ptp *ptp = edev->ptp;
215 
216 	if (!ptp)
217 		return -EIO;
218 
219 	if (!ptp->hw_ts_ioctl_called) {
220 		DP_INFO(edev, "TS IOCTL not called\n");
221 		return 0;
222 	}
223 
224 	switch (ptp->tx_type) {
225 	case HWTSTAMP_TX_ON:
226 		edev->flags |= QEDE_TX_TIMESTAMPING_EN;
227 		tx_type = QED_PTP_HWTSTAMP_TX_ON;
228 		break;
229 
230 	case HWTSTAMP_TX_OFF:
231 		edev->flags &= ~QEDE_TX_TIMESTAMPING_EN;
232 		tx_type = QED_PTP_HWTSTAMP_TX_OFF;
233 		break;
234 
235 	case HWTSTAMP_TX_ONESTEP_SYNC:
236 		DP_ERR(edev, "One-step timestamping is not supported\n");
237 		return -ERANGE;
238 	}
239 
240 	spin_lock_bh(&ptp->lock);
241 	switch (ptp->rx_filter) {
242 	case HWTSTAMP_FILTER_NONE:
243 		rx_filter = QED_PTP_FILTER_NONE;
244 		break;
245 	case HWTSTAMP_FILTER_ALL:
246 	case HWTSTAMP_FILTER_SOME:
247 	case HWTSTAMP_FILTER_NTP_ALL:
248 		ptp->rx_filter = HWTSTAMP_FILTER_NONE;
249 		rx_filter = QED_PTP_FILTER_ALL;
250 		break;
251 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
252 		ptp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
253 		rx_filter = QED_PTP_FILTER_V1_L4_EVENT;
254 		break;
255 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
256 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
257 		ptp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
258 		/* Initialize PTP detection for UDP/IPv4 events */
259 		rx_filter = QED_PTP_FILTER_V1_L4_GEN;
260 		break;
261 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
262 		ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
263 		rx_filter = QED_PTP_FILTER_V2_L4_EVENT;
264 		break;
265 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
266 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
267 		ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
268 		/* Initialize PTP detection for UDP/IPv4 or UDP/IPv6 events */
269 		rx_filter = QED_PTP_FILTER_V2_L4_GEN;
270 		break;
271 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
272 		ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
273 		rx_filter = QED_PTP_FILTER_V2_L2_EVENT;
274 		break;
275 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
276 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
277 		ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
278 		/* Initialize PTP detection L2 events */
279 		rx_filter = QED_PTP_FILTER_V2_L2_GEN;
280 		break;
281 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
282 		ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
283 		rx_filter = QED_PTP_FILTER_V2_EVENT;
284 		break;
285 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
286 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
287 		ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
288 		/* Initialize PTP detection L2, UDP/IPv4 or UDP/IPv6 events */
289 		rx_filter = QED_PTP_FILTER_V2_GEN;
290 		break;
291 	}
292 
293 	ptp->ops->cfg_filters(edev->cdev, rx_filter, tx_type);
294 
295 	spin_unlock_bh(&ptp->lock);
296 
297 	return 0;
298 }
299 
300 int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *ifr)
301 {
302 	struct hwtstamp_config config;
303 	struct qede_ptp *ptp;
304 	int rc;
305 
306 	ptp = edev->ptp;
307 	if (!ptp)
308 		return -EIO;
309 
310 	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
311 		return -EFAULT;
312 
313 	DP_VERBOSE(edev, QED_MSG_DEBUG,
314 		   "HWTSTAMP IOCTL: Requested tx_type = %d, requested rx_filters = %d\n",
315 		   config.tx_type, config.rx_filter);
316 
317 	if (config.flags) {
318 		DP_ERR(edev, "config.flags is reserved for future use\n");
319 		return -EINVAL;
320 	}
321 
322 	ptp->hw_ts_ioctl_called = 1;
323 	ptp->tx_type = config.tx_type;
324 	ptp->rx_filter = config.rx_filter;
325 
326 	rc = qede_ptp_cfg_filters(edev);
327 	if (rc)
328 		return rc;
329 
330 	config.rx_filter = ptp->rx_filter;
331 
332 	return copy_to_user(ifr->ifr_data, &config,
333 			    sizeof(config)) ? -EFAULT : 0;
334 }
335 
336 int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *info)
337 {
338 	struct qede_ptp *ptp = edev->ptp;
339 
340 	if (!ptp)
341 		return -EIO;
342 
343 	info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
344 				SOF_TIMESTAMPING_RX_SOFTWARE |
345 				SOF_TIMESTAMPING_SOFTWARE |
346 				SOF_TIMESTAMPING_TX_HARDWARE |
347 				SOF_TIMESTAMPING_RX_HARDWARE |
348 				SOF_TIMESTAMPING_RAW_HARDWARE;
349 
350 	if (ptp->clock)
351 		info->phc_index = ptp_clock_index(ptp->clock);
352 	else
353 		info->phc_index = -1;
354 
355 	info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
356 			   BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
357 			   BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
358 			   BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
359 			   BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
360 			   BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
361 			   BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
362 			   BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
363 			   BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
364 			   BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
365 			   BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
366 			   BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) |
367 			   BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ);
368 
369 	info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
370 
371 	return 0;
372 }
373 
374 void qede_ptp_disable(struct qede_dev *edev)
375 {
376 	struct qede_ptp *ptp;
377 
378 	ptp = edev->ptp;
379 	if (!ptp)
380 		return;
381 
382 	if (ptp->clock) {
383 		ptp_clock_unregister(ptp->clock);
384 		ptp->clock = NULL;
385 	}
386 
387 	/* Cancel PTP work queue. Should be done after the Tx queues are
388 	 * drained to prevent additional scheduling.
389 	 */
390 	cancel_work_sync(&ptp->work);
391 	if (ptp->tx_skb) {
392 		dev_kfree_skb_any(ptp->tx_skb);
393 		ptp->tx_skb = NULL;
394 	}
395 
396 	/* Disable PTP in HW */
397 	spin_lock_bh(&ptp->lock);
398 	ptp->ops->disable(edev->cdev);
399 	spin_unlock_bh(&ptp->lock);
400 
401 	kfree(ptp);
402 	edev->ptp = NULL;
403 }
404 
405 static int qede_ptp_init(struct qede_dev *edev, bool init_tc)
406 {
407 	struct qede_ptp *ptp;
408 	int rc;
409 
410 	ptp = edev->ptp;
411 	if (!ptp)
412 		return -EINVAL;
413 
414 	spin_lock_init(&ptp->lock);
415 
416 	/* Configure PTP in HW */
417 	rc = ptp->ops->enable(edev->cdev);
418 	if (rc) {
419 		DP_INFO(edev, "PTP HW enable failed\n");
420 		return rc;
421 	}
422 
423 	/* Init work queue for Tx timestamping */
424 	INIT_WORK(&ptp->work, qede_ptp_task);
425 
426 	/* Init cyclecounter and timecounter. This is done only in the first
427 	 * load. If done in every load, PTP application will fail when doing
428 	 * unload / load (e.g. MTU change) while it is running.
429 	 */
430 	if (init_tc) {
431 		memset(&ptp->cc, 0, sizeof(ptp->cc));
432 		ptp->cc.read = qede_ptp_read_cc;
433 		ptp->cc.mask = CYCLECOUNTER_MASK(64);
434 		ptp->cc.shift = 0;
435 		ptp->cc.mult = 1;
436 
437 		timecounter_init(&ptp->tc, &ptp->cc,
438 				 ktime_to_ns(ktime_get_real()));
439 	}
440 
441 	return rc;
442 }
443 
444 int qede_ptp_enable(struct qede_dev *edev, bool init_tc)
445 {
446 	struct qede_ptp *ptp;
447 	int rc;
448 
449 	ptp = kzalloc(sizeof(*ptp), GFP_KERNEL);
450 	if (!ptp) {
451 		DP_INFO(edev, "Failed to allocate struct for PTP\n");
452 		return -ENOMEM;
453 	}
454 
455 	ptp->edev = edev;
456 	ptp->ops = edev->ops->ptp;
457 	if (!ptp->ops) {
458 		DP_INFO(edev, "PTP enable failed\n");
459 		rc = -EIO;
460 		goto err1;
461 	}
462 
463 	edev->ptp = ptp;
464 
465 	rc = qede_ptp_init(edev, init_tc);
466 	if (rc)
467 		goto err1;
468 
469 	qede_ptp_cfg_filters(edev);
470 
471 	/* Fill the ptp_clock_info struct and register PTP clock */
472 	ptp->clock_info.owner = THIS_MODULE;
473 	snprintf(ptp->clock_info.name, 16, "%s", edev->ndev->name);
474 	ptp->clock_info.max_adj = QED_MAX_PHC_DRIFT_PPB;
475 	ptp->clock_info.n_alarm = 0;
476 	ptp->clock_info.n_ext_ts = 0;
477 	ptp->clock_info.n_per_out = 0;
478 	ptp->clock_info.pps = 0;
479 	ptp->clock_info.adjfreq = qede_ptp_adjfreq;
480 	ptp->clock_info.adjtime = qede_ptp_adjtime;
481 	ptp->clock_info.gettime64 = qede_ptp_gettime;
482 	ptp->clock_info.settime64 = qede_ptp_settime;
483 	ptp->clock_info.enable = qede_ptp_ancillary_feature_enable;
484 
485 	ptp->clock = ptp_clock_register(&ptp->clock_info, &edev->pdev->dev);
486 	if (IS_ERR(ptp->clock)) {
487 		rc = -EINVAL;
488 		DP_ERR(edev, "PTP clock registeration failed\n");
489 		goto err2;
490 	}
491 
492 	return 0;
493 
494 err2:
495 	qede_ptp_disable(edev);
496 	ptp->clock = NULL;
497 err1:
498 	kfree(ptp);
499 	edev->ptp = NULL;
500 
501 	return rc;
502 }
503 
504 void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb)
505 {
506 	struct qede_ptp *ptp;
507 
508 	ptp = edev->ptp;
509 	if (!ptp)
510 		return;
511 
512 	if (test_and_set_bit_lock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags))
513 		return;
514 
515 	if (unlikely(!(edev->flags & QEDE_TX_TIMESTAMPING_EN))) {
516 		DP_NOTICE(edev,
517 			  "Tx timestamping was not enabled, this packet will not be timestamped\n");
518 	} else if (unlikely(ptp->tx_skb)) {
519 		DP_NOTICE(edev,
520 			  "The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n");
521 	} else {
522 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
523 		/* schedule check for Tx timestamp */
524 		ptp->tx_skb = skb_get(skb);
525 		schedule_work(&ptp->work);
526 	}
527 }
528 
529 void qede_ptp_rx_ts(struct qede_dev *edev, struct sk_buff *skb)
530 {
531 	struct qede_ptp *ptp;
532 	u64 timestamp, ns;
533 	int rc;
534 
535 	ptp = edev->ptp;
536 	if (!ptp)
537 		return;
538 
539 	spin_lock_bh(&ptp->lock);
540 	rc = ptp->ops->read_rx_ts(edev->cdev, &timestamp);
541 	if (rc) {
542 		spin_unlock_bh(&ptp->lock);
543 		DP_INFO(edev, "Invalid Rx timestamp\n");
544 		return;
545 	}
546 
547 	ns = timecounter_cyc2time(&ptp->tc, timestamp);
548 	spin_unlock_bh(&ptp->lock);
549 	skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
550 	DP_VERBOSE(edev, QED_MSG_DEBUG,
551 		   "Rx timestamp, timestamp cycles = %llu, ns = %llu\n",
552 		   timestamp, ns);
553 }
554