1 /* QLogic qede NIC Driver 2 * Copyright (c) 2015-2017 QLogic Corporation 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and /or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 #include "qede_ptp.h" 33 34 struct qede_ptp { 35 const struct qed_eth_ptp_ops *ops; 36 struct ptp_clock_info clock_info; 37 struct cyclecounter cc; 38 struct timecounter tc; 39 struct ptp_clock *clock; 40 struct work_struct work; 41 struct qede_dev *edev; 42 struct sk_buff *tx_skb; 43 44 /* ptp spinlock is used for protecting the cycle/time counter fields 45 * and, also for serializing the qed PTP API invocations. 46 */ 47 spinlock_t lock; 48 bool hw_ts_ioctl_called; 49 u16 tx_type; 50 u16 rx_filter; 51 }; 52 53 /** 54 * qede_ptp_adjfreq 55 * @ptp: the ptp clock structure 56 * @ppb: parts per billion adjustment from base 57 * 58 * Adjust the frequency of the ptp cycle counter by the 59 * indicated ppb from the base frequency. 60 */ 61 static int qede_ptp_adjfreq(struct ptp_clock_info *info, s32 ppb) 62 { 63 struct qede_ptp *ptp = container_of(info, struct qede_ptp, clock_info); 64 struct qede_dev *edev = ptp->edev; 65 int rc; 66 67 __qede_lock(edev); 68 if (edev->state == QEDE_STATE_OPEN) { 69 spin_lock_bh(&ptp->lock); 70 rc = ptp->ops->adjfreq(edev->cdev, ppb); 71 spin_unlock_bh(&ptp->lock); 72 } else { 73 DP_ERR(edev, "PTP adjfreq called while interface is down\n"); 74 rc = -EFAULT; 75 } 76 __qede_unlock(edev); 77 78 return rc; 79 } 80 81 static int qede_ptp_adjtime(struct ptp_clock_info *info, s64 delta) 82 { 83 struct qede_dev *edev; 84 struct qede_ptp *ptp; 85 86 ptp = container_of(info, struct qede_ptp, clock_info); 87 edev = ptp->edev; 88 89 DP_VERBOSE(edev, QED_MSG_DEBUG, "PTP adjtime called, delta = %llx\n", 90 delta); 91 92 spin_lock_bh(&ptp->lock); 93 timecounter_adjtime(&ptp->tc, delta); 94 spin_unlock_bh(&ptp->lock); 95 96 return 0; 97 } 98 99 static int qede_ptp_gettime(struct ptp_clock_info *info, struct timespec64 *ts) 100 { 101 struct qede_dev *edev; 102 struct qede_ptp *ptp; 103 u64 ns; 104 105 ptp = container_of(info, struct qede_ptp, clock_info); 106 edev = ptp->edev; 107 108 spin_lock_bh(&ptp->lock); 109 ns = timecounter_read(&ptp->tc); 110 spin_unlock_bh(&ptp->lock); 111 112 DP_VERBOSE(edev, QED_MSG_DEBUG, "PTP gettime called, ns = %llu\n", ns); 113 114 *ts = ns_to_timespec64(ns); 115 116 return 0; 117 } 118 119 static int qede_ptp_settime(struct ptp_clock_info *info, 120 const struct timespec64 *ts) 121 { 122 struct qede_dev *edev; 123 struct qede_ptp *ptp; 124 u64 ns; 125 126 ptp = container_of(info, struct qede_ptp, clock_info); 127 edev = ptp->edev; 128 129 ns = timespec64_to_ns(ts); 130 131 DP_VERBOSE(edev, QED_MSG_DEBUG, "PTP settime called, ns = %llu\n", ns); 132 133 /* Re-init the timecounter */ 134 spin_lock_bh(&ptp->lock); 135 timecounter_init(&ptp->tc, &ptp->cc, ns); 136 spin_unlock_bh(&ptp->lock); 137 138 return 0; 139 } 140 141 /* Enable (or disable) ancillary features of the phc subsystem */ 142 static int qede_ptp_ancillary_feature_enable(struct ptp_clock_info *info, 143 struct ptp_clock_request *rq, 144 int on) 145 { 146 struct qede_dev *edev; 147 struct qede_ptp *ptp; 148 149 ptp = container_of(info, struct qede_ptp, clock_info); 150 edev = ptp->edev; 151 152 DP_ERR(edev, "PHC ancillary features are not supported\n"); 153 154 return -ENOTSUPP; 155 } 156 157 static void qede_ptp_task(struct work_struct *work) 158 { 159 struct skb_shared_hwtstamps shhwtstamps; 160 struct qede_dev *edev; 161 struct qede_ptp *ptp; 162 u64 timestamp, ns; 163 int rc; 164 165 ptp = container_of(work, struct qede_ptp, work); 166 edev = ptp->edev; 167 168 /* Read Tx timestamp registers */ 169 spin_lock_bh(&ptp->lock); 170 rc = ptp->ops->read_tx_ts(edev->cdev, ×tamp); 171 spin_unlock_bh(&ptp->lock); 172 if (rc) { 173 /* Reschedule to keep checking for a valid timestamp value */ 174 schedule_work(&ptp->work); 175 return; 176 } 177 178 ns = timecounter_cyc2time(&ptp->tc, timestamp); 179 memset(&shhwtstamps, 0, sizeof(shhwtstamps)); 180 shhwtstamps.hwtstamp = ns_to_ktime(ns); 181 skb_tstamp_tx(ptp->tx_skb, &shhwtstamps); 182 dev_kfree_skb_any(ptp->tx_skb); 183 ptp->tx_skb = NULL; 184 185 DP_VERBOSE(edev, QED_MSG_DEBUG, 186 "Tx timestamp, timestamp cycles = %llu, ns = %llu\n", 187 timestamp, ns); 188 } 189 190 /* Read the PHC. This API is invoked with ptp_lock held. */ 191 static u64 qede_ptp_read_cc(const struct cyclecounter *cc) 192 { 193 struct qede_dev *edev; 194 struct qede_ptp *ptp; 195 u64 phc_cycles; 196 int rc; 197 198 ptp = container_of(cc, struct qede_ptp, cc); 199 edev = ptp->edev; 200 rc = ptp->ops->read_cc(edev->cdev, &phc_cycles); 201 if (rc) 202 WARN_ONCE(1, "PHC read err %d\n", rc); 203 204 DP_VERBOSE(edev, QED_MSG_DEBUG, "PHC read cycles = %llu\n", phc_cycles); 205 206 return phc_cycles; 207 } 208 209 static void qede_ptp_init_cc(struct qede_dev *edev) 210 { 211 struct qede_ptp *ptp; 212 213 ptp = edev->ptp; 214 if (!ptp) 215 return; 216 217 memset(&ptp->cc, 0, sizeof(ptp->cc)); 218 ptp->cc.read = qede_ptp_read_cc; 219 ptp->cc.mask = CYCLECOUNTER_MASK(64); 220 ptp->cc.shift = 0; 221 ptp->cc.mult = 1; 222 } 223 224 static int qede_ptp_cfg_filters(struct qede_dev *edev) 225 { 226 struct qede_ptp *ptp = edev->ptp; 227 228 if (!ptp) 229 return -EIO; 230 231 if (!ptp->hw_ts_ioctl_called) { 232 DP_INFO(edev, "TS IOCTL not called\n"); 233 return 0; 234 } 235 236 switch (ptp->tx_type) { 237 case HWTSTAMP_TX_ON: 238 edev->flags |= QEDE_TX_TIMESTAMPING_EN; 239 ptp->ops->hwtstamp_tx_on(edev->cdev); 240 break; 241 242 case HWTSTAMP_TX_ONESTEP_SYNC: 243 DP_ERR(edev, "One-step timestamping is not supported\n"); 244 return -ERANGE; 245 } 246 247 spin_lock_bh(&ptp->lock); 248 switch (ptp->rx_filter) { 249 case HWTSTAMP_FILTER_NONE: 250 break; 251 case HWTSTAMP_FILTER_ALL: 252 case HWTSTAMP_FILTER_SOME: 253 ptp->rx_filter = HWTSTAMP_FILTER_NONE; 254 break; 255 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 256 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 257 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 258 ptp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 259 /* Initialize PTP detection for UDP/IPv4 events */ 260 ptp->ops->cfg_rx_filters(edev->cdev, QED_PTP_FILTER_IPV4); 261 break; 262 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 263 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 264 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 265 ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; 266 /* Initialize PTP detection for UDP/IPv4 or UDP/IPv6 events */ 267 ptp->ops->cfg_rx_filters(edev->cdev, QED_PTP_FILTER_IPV4_IPV6); 268 break; 269 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 270 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 271 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 272 ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; 273 /* Initialize PTP detection L2 events */ 274 ptp->ops->cfg_rx_filters(edev->cdev, QED_PTP_FILTER_L2); 275 break; 276 case HWTSTAMP_FILTER_PTP_V2_EVENT: 277 case HWTSTAMP_FILTER_PTP_V2_SYNC: 278 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 279 ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 280 /* Initialize PTP detection L2, UDP/IPv4 or UDP/IPv6 events */ 281 ptp->ops->cfg_rx_filters(edev->cdev, 282 QED_PTP_FILTER_L2_IPV4_IPV6); 283 break; 284 } 285 286 spin_unlock_bh(&ptp->lock); 287 288 return 0; 289 } 290 291 int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *ifr) 292 { 293 struct hwtstamp_config config; 294 struct qede_ptp *ptp; 295 int rc; 296 297 ptp = edev->ptp; 298 if (!ptp) 299 return -EIO; 300 301 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 302 return -EFAULT; 303 304 DP_VERBOSE(edev, QED_MSG_DEBUG, 305 "HWTSTAMP IOCTL: Requested tx_type = %d, requested rx_filters = %d\n", 306 config.tx_type, config.rx_filter); 307 308 if (config.flags) { 309 DP_ERR(edev, "config.flags is reserved for future use\n"); 310 return -EINVAL; 311 } 312 313 ptp->hw_ts_ioctl_called = 1; 314 ptp->tx_type = config.tx_type; 315 ptp->rx_filter = config.rx_filter; 316 317 rc = qede_ptp_cfg_filters(edev); 318 if (rc) 319 return rc; 320 321 config.rx_filter = ptp->rx_filter; 322 323 return copy_to_user(ifr->ifr_data, &config, 324 sizeof(config)) ? -EFAULT : 0; 325 } 326 327 /* Called during load, to initialize PTP-related stuff */ 328 static void qede_ptp_init(struct qede_dev *edev, bool init_tc) 329 { 330 struct qede_ptp *ptp; 331 int rc; 332 333 ptp = edev->ptp; 334 if (!ptp) 335 return; 336 337 spin_lock_init(&ptp->lock); 338 339 /* Configure PTP in HW */ 340 rc = ptp->ops->enable(edev->cdev); 341 if (rc) { 342 DP_ERR(edev, "Stopping PTP initialization\n"); 343 return; 344 } 345 346 /* Init work queue for Tx timestamping */ 347 INIT_WORK(&ptp->work, qede_ptp_task); 348 349 /* Init cyclecounter and timecounter. This is done only in the first 350 * load. If done in every load, PTP application will fail when doing 351 * unload / load (e.g. MTU change) while it is running. 352 */ 353 if (init_tc) { 354 qede_ptp_init_cc(edev); 355 timecounter_init(&ptp->tc, &ptp->cc, 356 ktime_to_ns(ktime_get_real())); 357 } 358 359 DP_VERBOSE(edev, QED_MSG_DEBUG, "PTP initialization is successful\n"); 360 } 361 362 void qede_ptp_start(struct qede_dev *edev, bool init_tc) 363 { 364 qede_ptp_init(edev, init_tc); 365 qede_ptp_cfg_filters(edev); 366 } 367 368 void qede_ptp_remove(struct qede_dev *edev) 369 { 370 struct qede_ptp *ptp; 371 372 ptp = edev->ptp; 373 if (ptp && ptp->clock) { 374 ptp_clock_unregister(ptp->clock); 375 ptp->clock = NULL; 376 } 377 378 kfree(ptp); 379 edev->ptp = NULL; 380 } 381 382 int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *info) 383 { 384 struct qede_ptp *ptp = edev->ptp; 385 386 if (!ptp) 387 return -EIO; 388 389 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | 390 SOF_TIMESTAMPING_RX_SOFTWARE | 391 SOF_TIMESTAMPING_SOFTWARE | 392 SOF_TIMESTAMPING_TX_HARDWARE | 393 SOF_TIMESTAMPING_RX_HARDWARE | 394 SOF_TIMESTAMPING_RAW_HARDWARE; 395 396 if (ptp->clock) 397 info->phc_index = ptp_clock_index(ptp->clock); 398 else 399 info->phc_index = -1; 400 401 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | 402 BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | 403 BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | 404 BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | 405 BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | 406 BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | 407 BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | 408 BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | 409 BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | 410 BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | 411 BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) | 412 BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) | 413 BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ); 414 415 info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON); 416 417 return 0; 418 } 419 420 /* Called during unload, to stop PTP-related stuff */ 421 void qede_ptp_stop(struct qede_dev *edev) 422 { 423 struct qede_ptp *ptp; 424 425 ptp = edev->ptp; 426 if (!ptp) 427 return; 428 429 /* Cancel PTP work queue. Should be done after the Tx queues are 430 * drained to prevent additional scheduling. 431 */ 432 cancel_work_sync(&ptp->work); 433 if (ptp->tx_skb) { 434 dev_kfree_skb_any(ptp->tx_skb); 435 ptp->tx_skb = NULL; 436 } 437 438 /* Disable PTP in HW */ 439 spin_lock_bh(&ptp->lock); 440 ptp->ops->disable(edev->cdev); 441 spin_unlock_bh(&ptp->lock); 442 } 443 444 int qede_ptp_register_phc(struct qede_dev *edev) 445 { 446 struct qede_ptp *ptp; 447 448 ptp = kzalloc(sizeof(*ptp), GFP_KERNEL); 449 if (!ptp) { 450 DP_INFO(edev, "Failed to allocate struct for PTP\n"); 451 return -ENOMEM; 452 } 453 454 ptp->edev = edev; 455 ptp->ops = edev->ops->ptp; 456 if (!ptp->ops) { 457 kfree(ptp); 458 edev->ptp = NULL; 459 DP_ERR(edev, "PTP clock registeration failed\n"); 460 return -EIO; 461 } 462 463 edev->ptp = ptp; 464 465 /* Fill the ptp_clock_info struct and register PTP clock */ 466 ptp->clock_info.owner = THIS_MODULE; 467 snprintf(ptp->clock_info.name, 16, "%s", edev->ndev->name); 468 ptp->clock_info.max_adj = QED_MAX_PHC_DRIFT_PPB; 469 ptp->clock_info.n_alarm = 0; 470 ptp->clock_info.n_ext_ts = 0; 471 ptp->clock_info.n_per_out = 0; 472 ptp->clock_info.pps = 0; 473 ptp->clock_info.adjfreq = qede_ptp_adjfreq; 474 ptp->clock_info.adjtime = qede_ptp_adjtime; 475 ptp->clock_info.gettime64 = qede_ptp_gettime; 476 ptp->clock_info.settime64 = qede_ptp_settime; 477 ptp->clock_info.enable = qede_ptp_ancillary_feature_enable; 478 479 ptp->clock = ptp_clock_register(&ptp->clock_info, &edev->pdev->dev); 480 if (IS_ERR(ptp->clock)) { 481 ptp->clock = NULL; 482 kfree(ptp); 483 edev->ptp = NULL; 484 DP_ERR(edev, "PTP clock registeration failed\n"); 485 } 486 487 return 0; 488 } 489 490 void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb) 491 { 492 struct qede_ptp *ptp; 493 494 ptp = edev->ptp; 495 if (!ptp) 496 return; 497 498 if (unlikely(!(edev->flags & QEDE_TX_TIMESTAMPING_EN))) { 499 DP_NOTICE(edev, 500 "Tx timestamping was not enabled, this packet will not be timestamped\n"); 501 } else if (unlikely(ptp->tx_skb)) { 502 DP_NOTICE(edev, 503 "The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n"); 504 } else { 505 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 506 /* schedule check for Tx timestamp */ 507 ptp->tx_skb = skb_get(skb); 508 schedule_work(&ptp->work); 509 } 510 } 511 512 void qede_ptp_rx_ts(struct qede_dev *edev, struct sk_buff *skb) 513 { 514 struct qede_ptp *ptp; 515 u64 timestamp, ns; 516 int rc; 517 518 ptp = edev->ptp; 519 if (!ptp) 520 return; 521 522 spin_lock_bh(&ptp->lock); 523 rc = ptp->ops->read_rx_ts(edev->cdev, ×tamp); 524 if (rc) { 525 spin_unlock_bh(&ptp->lock); 526 DP_INFO(edev, "Invalid Rx timestamp\n"); 527 return; 528 } 529 530 ns = timecounter_cyc2time(&ptp->tc, timestamp); 531 spin_unlock_bh(&ptp->lock); 532 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns); 533 DP_VERBOSE(edev, QED_MSG_DEBUG, 534 "Rx timestamp, timestamp cycles = %llu, ns = %llu\n", 535 timestamp, ns); 536 } 537