1 /* QLogic qede NIC Driver 2 * Copyright (c) 2015-2017 QLogic Corporation 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and /or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 #include "qede_ptp.h" 33 34 struct qede_ptp { 35 const struct qed_eth_ptp_ops *ops; 36 struct ptp_clock_info clock_info; 37 struct cyclecounter cc; 38 struct timecounter tc; 39 struct ptp_clock *clock; 40 struct work_struct work; 41 struct qede_dev *edev; 42 struct sk_buff *tx_skb; 43 44 /* ptp spinlock is used for protecting the cycle/time counter fields 45 * and, also for serializing the qed PTP API invocations. 46 */ 47 spinlock_t lock; 48 bool hw_ts_ioctl_called; 49 u16 tx_type; 50 u16 rx_filter; 51 }; 52 53 /** 54 * qede_ptp_adjfreq 55 * @ptp: the ptp clock structure 56 * @ppb: parts per billion adjustment from base 57 * 58 * Adjust the frequency of the ptp cycle counter by the 59 * indicated ppb from the base frequency. 60 */ 61 static int qede_ptp_adjfreq(struct ptp_clock_info *info, s32 ppb) 62 { 63 struct qede_ptp *ptp = container_of(info, struct qede_ptp, clock_info); 64 struct qede_dev *edev = ptp->edev; 65 int rc; 66 67 __qede_lock(edev); 68 if (edev->state == QEDE_STATE_OPEN) { 69 spin_lock_bh(&ptp->lock); 70 rc = ptp->ops->adjfreq(edev->cdev, ppb); 71 spin_unlock_bh(&ptp->lock); 72 } else { 73 DP_ERR(edev, "PTP adjfreq called while interface is down\n"); 74 rc = -EFAULT; 75 } 76 __qede_unlock(edev); 77 78 return rc; 79 } 80 81 static int qede_ptp_adjtime(struct ptp_clock_info *info, s64 delta) 82 { 83 struct qede_dev *edev; 84 struct qede_ptp *ptp; 85 86 ptp = container_of(info, struct qede_ptp, clock_info); 87 edev = ptp->edev; 88 89 DP_VERBOSE(edev, QED_MSG_DEBUG, "PTP adjtime called, delta = %llx\n", 90 delta); 91 92 spin_lock_bh(&ptp->lock); 93 timecounter_adjtime(&ptp->tc, delta); 94 spin_unlock_bh(&ptp->lock); 95 96 return 0; 97 } 98 99 static int qede_ptp_gettime(struct ptp_clock_info *info, struct timespec64 *ts) 100 { 101 struct qede_dev *edev; 102 struct qede_ptp *ptp; 103 u64 ns; 104 105 ptp = container_of(info, struct qede_ptp, clock_info); 106 edev = ptp->edev; 107 108 spin_lock_bh(&ptp->lock); 109 ns = timecounter_read(&ptp->tc); 110 spin_unlock_bh(&ptp->lock); 111 112 DP_VERBOSE(edev, QED_MSG_DEBUG, "PTP gettime called, ns = %llu\n", ns); 113 114 *ts = ns_to_timespec64(ns); 115 116 return 0; 117 } 118 119 static int qede_ptp_settime(struct ptp_clock_info *info, 120 const struct timespec64 *ts) 121 { 122 struct qede_dev *edev; 123 struct qede_ptp *ptp; 124 u64 ns; 125 126 ptp = container_of(info, struct qede_ptp, clock_info); 127 edev = ptp->edev; 128 129 ns = timespec64_to_ns(ts); 130 131 DP_VERBOSE(edev, QED_MSG_DEBUG, "PTP settime called, ns = %llu\n", ns); 132 133 /* Re-init the timecounter */ 134 spin_lock_bh(&ptp->lock); 135 timecounter_init(&ptp->tc, &ptp->cc, ns); 136 spin_unlock_bh(&ptp->lock); 137 138 return 0; 139 } 140 141 /* Enable (or disable) ancillary features of the phc subsystem */ 142 static int qede_ptp_ancillary_feature_enable(struct ptp_clock_info *info, 143 struct ptp_clock_request *rq, 144 int on) 145 { 146 struct qede_dev *edev; 147 struct qede_ptp *ptp; 148 149 ptp = container_of(info, struct qede_ptp, clock_info); 150 edev = ptp->edev; 151 152 DP_ERR(edev, "PHC ancillary features are not supported\n"); 153 154 return -ENOTSUPP; 155 } 156 157 static void qede_ptp_task(struct work_struct *work) 158 { 159 struct skb_shared_hwtstamps shhwtstamps; 160 struct qede_dev *edev; 161 struct qede_ptp *ptp; 162 u64 timestamp, ns; 163 int rc; 164 165 ptp = container_of(work, struct qede_ptp, work); 166 edev = ptp->edev; 167 168 /* Read Tx timestamp registers */ 169 spin_lock_bh(&ptp->lock); 170 rc = ptp->ops->read_tx_ts(edev->cdev, ×tamp); 171 spin_unlock_bh(&ptp->lock); 172 if (rc) { 173 /* Reschedule to keep checking for a valid timestamp value */ 174 schedule_work(&ptp->work); 175 return; 176 } 177 178 ns = timecounter_cyc2time(&ptp->tc, timestamp); 179 memset(&shhwtstamps, 0, sizeof(shhwtstamps)); 180 shhwtstamps.hwtstamp = ns_to_ktime(ns); 181 skb_tstamp_tx(ptp->tx_skb, &shhwtstamps); 182 dev_kfree_skb_any(ptp->tx_skb); 183 ptp->tx_skb = NULL; 184 clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags); 185 186 DP_VERBOSE(edev, QED_MSG_DEBUG, 187 "Tx timestamp, timestamp cycles = %llu, ns = %llu\n", 188 timestamp, ns); 189 } 190 191 /* Read the PHC. This API is invoked with ptp_lock held. */ 192 static u64 qede_ptp_read_cc(const struct cyclecounter *cc) 193 { 194 struct qede_dev *edev; 195 struct qede_ptp *ptp; 196 u64 phc_cycles; 197 int rc; 198 199 ptp = container_of(cc, struct qede_ptp, cc); 200 edev = ptp->edev; 201 rc = ptp->ops->read_cc(edev->cdev, &phc_cycles); 202 if (rc) 203 WARN_ONCE(1, "PHC read err %d\n", rc); 204 205 DP_VERBOSE(edev, QED_MSG_DEBUG, "PHC read cycles = %llu\n", phc_cycles); 206 207 return phc_cycles; 208 } 209 210 static int qede_ptp_cfg_filters(struct qede_dev *edev) 211 { 212 enum qed_ptp_hwtstamp_tx_type tx_type = QED_PTP_HWTSTAMP_TX_ON; 213 enum qed_ptp_filter_type rx_filter = QED_PTP_FILTER_NONE; 214 struct qede_ptp *ptp = edev->ptp; 215 216 if (!ptp) 217 return -EIO; 218 219 if (!ptp->hw_ts_ioctl_called) { 220 DP_INFO(edev, "TS IOCTL not called\n"); 221 return 0; 222 } 223 224 switch (ptp->tx_type) { 225 case HWTSTAMP_TX_ON: 226 edev->flags |= QEDE_TX_TIMESTAMPING_EN; 227 tx_type = QED_PTP_HWTSTAMP_TX_ON; 228 break; 229 230 case HWTSTAMP_TX_OFF: 231 edev->flags &= ~QEDE_TX_TIMESTAMPING_EN; 232 tx_type = QED_PTP_HWTSTAMP_TX_OFF; 233 break; 234 235 case HWTSTAMP_TX_ONESTEP_SYNC: 236 DP_ERR(edev, "One-step timestamping is not supported\n"); 237 return -ERANGE; 238 } 239 240 spin_lock_bh(&ptp->lock); 241 switch (ptp->rx_filter) { 242 case HWTSTAMP_FILTER_NONE: 243 rx_filter = QED_PTP_FILTER_NONE; 244 break; 245 case HWTSTAMP_FILTER_ALL: 246 case HWTSTAMP_FILTER_SOME: 247 case HWTSTAMP_FILTER_NTP_ALL: 248 ptp->rx_filter = HWTSTAMP_FILTER_NONE; 249 rx_filter = QED_PTP_FILTER_ALL; 250 break; 251 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 252 ptp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 253 rx_filter = QED_PTP_FILTER_V1_L4_EVENT; 254 break; 255 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 256 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 257 ptp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 258 /* Initialize PTP detection for UDP/IPv4 events */ 259 rx_filter = QED_PTP_FILTER_V1_L4_GEN; 260 break; 261 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 262 ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; 263 rx_filter = QED_PTP_FILTER_V2_L4_EVENT; 264 break; 265 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 266 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 267 ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; 268 /* Initialize PTP detection for UDP/IPv4 or UDP/IPv6 events */ 269 rx_filter = QED_PTP_FILTER_V2_L4_GEN; 270 break; 271 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 272 ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; 273 rx_filter = QED_PTP_FILTER_V2_L2_EVENT; 274 break; 275 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 276 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 277 ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; 278 /* Initialize PTP detection L2 events */ 279 rx_filter = QED_PTP_FILTER_V2_L2_GEN; 280 break; 281 case HWTSTAMP_FILTER_PTP_V2_EVENT: 282 ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 283 rx_filter = QED_PTP_FILTER_V2_EVENT; 284 break; 285 case HWTSTAMP_FILTER_PTP_V2_SYNC: 286 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 287 ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 288 /* Initialize PTP detection L2, UDP/IPv4 or UDP/IPv6 events */ 289 rx_filter = QED_PTP_FILTER_V2_GEN; 290 break; 291 } 292 293 ptp->ops->cfg_filters(edev->cdev, rx_filter, tx_type); 294 295 spin_unlock_bh(&ptp->lock); 296 297 return 0; 298 } 299 300 int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *ifr) 301 { 302 struct hwtstamp_config config; 303 struct qede_ptp *ptp; 304 int rc; 305 306 ptp = edev->ptp; 307 if (!ptp) 308 return -EIO; 309 310 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 311 return -EFAULT; 312 313 DP_VERBOSE(edev, QED_MSG_DEBUG, 314 "HWTSTAMP IOCTL: Requested tx_type = %d, requested rx_filters = %d\n", 315 config.tx_type, config.rx_filter); 316 317 if (config.flags) { 318 DP_ERR(edev, "config.flags is reserved for future use\n"); 319 return -EINVAL; 320 } 321 322 ptp->hw_ts_ioctl_called = 1; 323 ptp->tx_type = config.tx_type; 324 ptp->rx_filter = config.rx_filter; 325 326 rc = qede_ptp_cfg_filters(edev); 327 if (rc) 328 return rc; 329 330 config.rx_filter = ptp->rx_filter; 331 332 return copy_to_user(ifr->ifr_data, &config, 333 sizeof(config)) ? -EFAULT : 0; 334 } 335 336 int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *info) 337 { 338 struct qede_ptp *ptp = edev->ptp; 339 340 if (!ptp) { 341 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | 342 SOF_TIMESTAMPING_RX_SOFTWARE | 343 SOF_TIMESTAMPING_SOFTWARE; 344 info->phc_index = -1; 345 346 return 0; 347 } 348 349 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | 350 SOF_TIMESTAMPING_RX_SOFTWARE | 351 SOF_TIMESTAMPING_SOFTWARE | 352 SOF_TIMESTAMPING_TX_HARDWARE | 353 SOF_TIMESTAMPING_RX_HARDWARE | 354 SOF_TIMESTAMPING_RAW_HARDWARE; 355 356 if (ptp->clock) 357 info->phc_index = ptp_clock_index(ptp->clock); 358 else 359 info->phc_index = -1; 360 361 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | 362 BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | 363 BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | 364 BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | 365 BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | 366 BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | 367 BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | 368 BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | 369 BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | 370 BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | 371 BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) | 372 BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) | 373 BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ); 374 375 info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON); 376 377 return 0; 378 } 379 380 void qede_ptp_disable(struct qede_dev *edev) 381 { 382 struct qede_ptp *ptp; 383 384 ptp = edev->ptp; 385 if (!ptp) 386 return; 387 388 if (ptp->clock) { 389 ptp_clock_unregister(ptp->clock); 390 ptp->clock = NULL; 391 } 392 393 /* Cancel PTP work queue. Should be done after the Tx queues are 394 * drained to prevent additional scheduling. 395 */ 396 cancel_work_sync(&ptp->work); 397 if (ptp->tx_skb) { 398 dev_kfree_skb_any(ptp->tx_skb); 399 ptp->tx_skb = NULL; 400 } 401 402 /* Disable PTP in HW */ 403 spin_lock_bh(&ptp->lock); 404 ptp->ops->disable(edev->cdev); 405 spin_unlock_bh(&ptp->lock); 406 407 kfree(ptp); 408 edev->ptp = NULL; 409 } 410 411 static int qede_ptp_init(struct qede_dev *edev, bool init_tc) 412 { 413 struct qede_ptp *ptp; 414 int rc; 415 416 ptp = edev->ptp; 417 if (!ptp) 418 return -EINVAL; 419 420 spin_lock_init(&ptp->lock); 421 422 /* Configure PTP in HW */ 423 rc = ptp->ops->enable(edev->cdev); 424 if (rc) { 425 DP_INFO(edev, "PTP HW enable failed\n"); 426 return rc; 427 } 428 429 /* Init work queue for Tx timestamping */ 430 INIT_WORK(&ptp->work, qede_ptp_task); 431 432 /* Init cyclecounter and timecounter. This is done only in the first 433 * load. If done in every load, PTP application will fail when doing 434 * unload / load (e.g. MTU change) while it is running. 435 */ 436 if (init_tc) { 437 memset(&ptp->cc, 0, sizeof(ptp->cc)); 438 ptp->cc.read = qede_ptp_read_cc; 439 ptp->cc.mask = CYCLECOUNTER_MASK(64); 440 ptp->cc.shift = 0; 441 ptp->cc.mult = 1; 442 443 timecounter_init(&ptp->tc, &ptp->cc, 444 ktime_to_ns(ktime_get_real())); 445 } 446 447 return rc; 448 } 449 450 int qede_ptp_enable(struct qede_dev *edev, bool init_tc) 451 { 452 struct qede_ptp *ptp; 453 int rc; 454 455 ptp = kzalloc(sizeof(*ptp), GFP_KERNEL); 456 if (!ptp) { 457 DP_INFO(edev, "Failed to allocate struct for PTP\n"); 458 return -ENOMEM; 459 } 460 461 ptp->edev = edev; 462 ptp->ops = edev->ops->ptp; 463 if (!ptp->ops) { 464 DP_INFO(edev, "PTP enable failed\n"); 465 rc = -EIO; 466 goto err1; 467 } 468 469 edev->ptp = ptp; 470 471 rc = qede_ptp_init(edev, init_tc); 472 if (rc) 473 goto err1; 474 475 qede_ptp_cfg_filters(edev); 476 477 /* Fill the ptp_clock_info struct and register PTP clock */ 478 ptp->clock_info.owner = THIS_MODULE; 479 snprintf(ptp->clock_info.name, 16, "%s", edev->ndev->name); 480 ptp->clock_info.max_adj = QED_MAX_PHC_DRIFT_PPB; 481 ptp->clock_info.n_alarm = 0; 482 ptp->clock_info.n_ext_ts = 0; 483 ptp->clock_info.n_per_out = 0; 484 ptp->clock_info.pps = 0; 485 ptp->clock_info.adjfreq = qede_ptp_adjfreq; 486 ptp->clock_info.adjtime = qede_ptp_adjtime; 487 ptp->clock_info.gettime64 = qede_ptp_gettime; 488 ptp->clock_info.settime64 = qede_ptp_settime; 489 ptp->clock_info.enable = qede_ptp_ancillary_feature_enable; 490 491 ptp->clock = ptp_clock_register(&ptp->clock_info, &edev->pdev->dev); 492 if (IS_ERR(ptp->clock)) { 493 rc = -EINVAL; 494 DP_ERR(edev, "PTP clock registration failed\n"); 495 goto err2; 496 } 497 498 return 0; 499 500 err2: 501 qede_ptp_disable(edev); 502 ptp->clock = NULL; 503 err1: 504 kfree(ptp); 505 edev->ptp = NULL; 506 507 return rc; 508 } 509 510 void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb) 511 { 512 struct qede_ptp *ptp; 513 514 ptp = edev->ptp; 515 if (!ptp) 516 return; 517 518 if (test_and_set_bit_lock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags)) 519 return; 520 521 if (unlikely(!(edev->flags & QEDE_TX_TIMESTAMPING_EN))) { 522 DP_NOTICE(edev, 523 "Tx timestamping was not enabled, this packet will not be timestamped\n"); 524 } else if (unlikely(ptp->tx_skb)) { 525 DP_NOTICE(edev, 526 "The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n"); 527 } else { 528 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 529 /* schedule check for Tx timestamp */ 530 ptp->tx_skb = skb_get(skb); 531 schedule_work(&ptp->work); 532 } 533 } 534 535 void qede_ptp_rx_ts(struct qede_dev *edev, struct sk_buff *skb) 536 { 537 struct qede_ptp *ptp; 538 u64 timestamp, ns; 539 int rc; 540 541 ptp = edev->ptp; 542 if (!ptp) 543 return; 544 545 spin_lock_bh(&ptp->lock); 546 rc = ptp->ops->read_rx_ts(edev->cdev, ×tamp); 547 if (rc) { 548 spin_unlock_bh(&ptp->lock); 549 DP_INFO(edev, "Invalid Rx timestamp\n"); 550 return; 551 } 552 553 ns = timecounter_cyc2time(&ptp->tc, timestamp); 554 spin_unlock_bh(&ptp->lock); 555 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns); 556 DP_VERBOSE(edev, QED_MSG_DEBUG, 557 "Rx timestamp, timestamp cycles = %llu, ns = %llu\n", 558 timestamp, ns); 559 } 560