1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * TI Common Platform Time Sync 4 * 5 * Copyright (C) 2012 Richard Cochran <richardcochran@gmail.com> 6 * 7 */ 8 #include <linux/err.h> 9 #include <linux/if.h> 10 #include <linux/hrtimer.h> 11 #include <linux/module.h> 12 #include <linux/net_tstamp.h> 13 #include <linux/ptp_classify.h> 14 #include <linux/time.h> 15 #include <linux/uaccess.h> 16 #include <linux/workqueue.h> 17 #include <linux/if_ether.h> 18 #include <linux/if_vlan.h> 19 20 #include "cpts.h" 21 22 #define CPTS_SKB_TX_WORK_TIMEOUT 1 /* jiffies */ 23 24 struct cpts_skb_cb_data { 25 unsigned long tmo; 26 }; 27 28 #define cpts_read32(c, r) readl_relaxed(&c->reg->r) 29 #define cpts_write32(c, v, r) writel_relaxed(v, &c->reg->r) 30 31 static int cpts_match(struct sk_buff *skb, unsigned int ptp_class, 32 u16 ts_seqid, u8 ts_msgtype); 33 34 static int event_expired(struct cpts_event *event) 35 { 36 return time_after(jiffies, event->tmo); 37 } 38 39 static int event_type(struct cpts_event *event) 40 { 41 return (event->high >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK; 42 } 43 44 static int cpts_fifo_pop(struct cpts *cpts, u32 *high, u32 *low) 45 { 46 u32 r = cpts_read32(cpts, intstat_raw); 47 48 if (r & TS_PEND_RAW) { 49 *high = cpts_read32(cpts, event_high); 50 *low = cpts_read32(cpts, event_low); 51 cpts_write32(cpts, EVENT_POP, event_pop); 52 return 0; 53 } 54 return -1; 55 } 56 57 static int cpts_purge_events(struct cpts *cpts) 58 { 59 struct list_head *this, *next; 60 struct cpts_event *event; 61 int removed = 0; 62 63 list_for_each_safe(this, next, &cpts->events) { 64 event = list_entry(this, struct cpts_event, list); 65 if (event_expired(event)) { 66 list_del_init(&event->list); 67 list_add(&event->list, &cpts->pool); 68 ++removed; 69 } 70 } 71 72 if (removed) 73 pr_debug("cpts: event pool cleaned up %d\n", removed); 74 return removed ? 0 : -1; 75 } 76 77 static void cpts_purge_txq(struct cpts *cpts) 78 { 79 struct cpts_skb_cb_data *skb_cb; 80 struct sk_buff *skb, *tmp; 81 int removed = 0; 82 83 skb_queue_walk_safe(&cpts->txq, skb, tmp) { 84 skb_cb = (struct cpts_skb_cb_data *)skb->cb; 85 if (time_after(jiffies, skb_cb->tmo)) { 86 __skb_unlink(skb, &cpts->txq); 87 dev_consume_skb_any(skb); 88 ++removed; 89 } 90 } 91 92 if (removed) 93 dev_dbg(cpts->dev, "txq cleaned up %d\n", removed); 94 } 95 96 static bool cpts_match_tx_ts(struct cpts *cpts, struct cpts_event *event) 97 { 98 struct sk_buff *skb, *tmp; 99 u16 seqid; 100 u8 mtype; 101 bool found = false; 102 103 mtype = (event->high >> MESSAGE_TYPE_SHIFT) & MESSAGE_TYPE_MASK; 104 seqid = (event->high >> SEQUENCE_ID_SHIFT) & SEQUENCE_ID_MASK; 105 106 /* no need to grab txq.lock as access is always done under cpts->lock */ 107 skb_queue_walk_safe(&cpts->txq, skb, tmp) { 108 struct skb_shared_hwtstamps ssh; 109 unsigned int class = ptp_classify_raw(skb); 110 struct cpts_skb_cb_data *skb_cb = 111 (struct cpts_skb_cb_data *)skb->cb; 112 113 if (cpts_match(skb, class, seqid, mtype)) { 114 u64 ns = timecounter_cyc2time(&cpts->tc, event->low); 115 116 memset(&ssh, 0, sizeof(ssh)); 117 ssh.hwtstamp = ns_to_ktime(ns); 118 skb_tstamp_tx(skb, &ssh); 119 found = true; 120 __skb_unlink(skb, &cpts->txq); 121 dev_consume_skb_any(skb); 122 dev_dbg(cpts->dev, "match tx timestamp mtype %u seqid %04x\n", 123 mtype, seqid); 124 break; 125 } 126 127 if (time_after(jiffies, skb_cb->tmo)) { 128 /* timeout any expired skbs over 1s */ 129 dev_dbg(cpts->dev, "expiring tx timestamp from txq\n"); 130 __skb_unlink(skb, &cpts->txq); 131 dev_consume_skb_any(skb); 132 } 133 } 134 135 return found; 136 } 137 138 /* 139 * Returns zero if matching event type was found. 140 */ 141 static int cpts_fifo_read(struct cpts *cpts, int match) 142 { 143 int i, type = -1; 144 u32 hi, lo; 145 struct cpts_event *event; 146 147 for (i = 0; i < CPTS_FIFO_DEPTH; i++) { 148 if (cpts_fifo_pop(cpts, &hi, &lo)) 149 break; 150 151 if (list_empty(&cpts->pool) && cpts_purge_events(cpts)) { 152 pr_err("cpts: event pool empty\n"); 153 return -1; 154 } 155 156 event = list_first_entry(&cpts->pool, struct cpts_event, list); 157 event->tmo = jiffies + 2; 158 event->high = hi; 159 event->low = lo; 160 type = event_type(event); 161 switch (type) { 162 case CPTS_EV_TX: 163 if (cpts_match_tx_ts(cpts, event)) { 164 /* if the new event matches an existing skb, 165 * then don't queue it 166 */ 167 break; 168 } 169 /* fall through */ 170 case CPTS_EV_PUSH: 171 case CPTS_EV_RX: 172 list_del_init(&event->list); 173 list_add_tail(&event->list, &cpts->events); 174 break; 175 case CPTS_EV_ROLL: 176 case CPTS_EV_HALF: 177 case CPTS_EV_HW: 178 break; 179 default: 180 pr_err("cpts: unknown event type\n"); 181 break; 182 } 183 if (type == match) 184 break; 185 } 186 return type == match ? 0 : -1; 187 } 188 189 static u64 cpts_systim_read(const struct cyclecounter *cc) 190 { 191 u64 val = 0; 192 struct cpts_event *event; 193 struct list_head *this, *next; 194 struct cpts *cpts = container_of(cc, struct cpts, cc); 195 196 cpts_write32(cpts, TS_PUSH, ts_push); 197 if (cpts_fifo_read(cpts, CPTS_EV_PUSH)) 198 pr_err("cpts: unable to obtain a time stamp\n"); 199 200 list_for_each_safe(this, next, &cpts->events) { 201 event = list_entry(this, struct cpts_event, list); 202 if (event_type(event) == CPTS_EV_PUSH) { 203 list_del_init(&event->list); 204 list_add(&event->list, &cpts->pool); 205 val = event->low; 206 break; 207 } 208 } 209 210 return val; 211 } 212 213 /* PTP clock operations */ 214 215 static int cpts_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) 216 { 217 u64 adj; 218 u32 diff, mult; 219 int neg_adj = 0; 220 unsigned long flags; 221 struct cpts *cpts = container_of(ptp, struct cpts, info); 222 223 if (ppb < 0) { 224 neg_adj = 1; 225 ppb = -ppb; 226 } 227 mult = cpts->cc_mult; 228 adj = mult; 229 adj *= ppb; 230 diff = div_u64(adj, 1000000000ULL); 231 232 spin_lock_irqsave(&cpts->lock, flags); 233 234 timecounter_read(&cpts->tc); 235 236 cpts->cc.mult = neg_adj ? mult - diff : mult + diff; 237 238 spin_unlock_irqrestore(&cpts->lock, flags); 239 240 return 0; 241 } 242 243 static int cpts_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) 244 { 245 unsigned long flags; 246 struct cpts *cpts = container_of(ptp, struct cpts, info); 247 248 spin_lock_irqsave(&cpts->lock, flags); 249 timecounter_adjtime(&cpts->tc, delta); 250 spin_unlock_irqrestore(&cpts->lock, flags); 251 252 return 0; 253 } 254 255 static int cpts_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) 256 { 257 u64 ns; 258 unsigned long flags; 259 struct cpts *cpts = container_of(ptp, struct cpts, info); 260 261 spin_lock_irqsave(&cpts->lock, flags); 262 ns = timecounter_read(&cpts->tc); 263 spin_unlock_irqrestore(&cpts->lock, flags); 264 265 *ts = ns_to_timespec64(ns); 266 267 return 0; 268 } 269 270 static int cpts_ptp_settime(struct ptp_clock_info *ptp, 271 const struct timespec64 *ts) 272 { 273 u64 ns; 274 unsigned long flags; 275 struct cpts *cpts = container_of(ptp, struct cpts, info); 276 277 ns = timespec64_to_ns(ts); 278 279 spin_lock_irqsave(&cpts->lock, flags); 280 timecounter_init(&cpts->tc, &cpts->cc, ns); 281 spin_unlock_irqrestore(&cpts->lock, flags); 282 283 return 0; 284 } 285 286 static int cpts_ptp_enable(struct ptp_clock_info *ptp, 287 struct ptp_clock_request *rq, int on) 288 { 289 return -EOPNOTSUPP; 290 } 291 292 static long cpts_overflow_check(struct ptp_clock_info *ptp) 293 { 294 struct cpts *cpts = container_of(ptp, struct cpts, info); 295 unsigned long delay = cpts->ov_check_period; 296 struct timespec64 ts; 297 unsigned long flags; 298 299 spin_lock_irqsave(&cpts->lock, flags); 300 ts = ns_to_timespec64(timecounter_read(&cpts->tc)); 301 302 if (!skb_queue_empty(&cpts->txq)) { 303 cpts_purge_txq(cpts); 304 if (!skb_queue_empty(&cpts->txq)) 305 delay = CPTS_SKB_TX_WORK_TIMEOUT; 306 } 307 spin_unlock_irqrestore(&cpts->lock, flags); 308 309 pr_debug("cpts overflow check at %lld.%09ld\n", 310 (long long)ts.tv_sec, ts.tv_nsec); 311 return (long)delay; 312 } 313 314 static const struct ptp_clock_info cpts_info = { 315 .owner = THIS_MODULE, 316 .name = "CTPS timer", 317 .max_adj = 1000000, 318 .n_ext_ts = 0, 319 .n_pins = 0, 320 .pps = 0, 321 .adjfreq = cpts_ptp_adjfreq, 322 .adjtime = cpts_ptp_adjtime, 323 .gettime64 = cpts_ptp_gettime, 324 .settime64 = cpts_ptp_settime, 325 .enable = cpts_ptp_enable, 326 .do_aux_work = cpts_overflow_check, 327 }; 328 329 static int cpts_match(struct sk_buff *skb, unsigned int ptp_class, 330 u16 ts_seqid, u8 ts_msgtype) 331 { 332 u16 *seqid; 333 unsigned int offset = 0; 334 u8 *msgtype, *data = skb->data; 335 336 if (ptp_class & PTP_CLASS_VLAN) 337 offset += VLAN_HLEN; 338 339 switch (ptp_class & PTP_CLASS_PMASK) { 340 case PTP_CLASS_IPV4: 341 offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN; 342 break; 343 case PTP_CLASS_IPV6: 344 offset += ETH_HLEN + IP6_HLEN + UDP_HLEN; 345 break; 346 case PTP_CLASS_L2: 347 offset += ETH_HLEN; 348 break; 349 default: 350 return 0; 351 } 352 353 if (skb->len + ETH_HLEN < offset + OFF_PTP_SEQUENCE_ID + sizeof(*seqid)) 354 return 0; 355 356 if (unlikely(ptp_class & PTP_CLASS_V1)) 357 msgtype = data + offset + OFF_PTP_CONTROL; 358 else 359 msgtype = data + offset; 360 361 seqid = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID); 362 363 return (ts_msgtype == (*msgtype & 0xf) && ts_seqid == ntohs(*seqid)); 364 } 365 366 static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb, int ev_type) 367 { 368 u64 ns = 0; 369 struct cpts_event *event; 370 struct list_head *this, *next; 371 unsigned int class = ptp_classify_raw(skb); 372 unsigned long flags; 373 u16 seqid; 374 u8 mtype; 375 376 if (class == PTP_CLASS_NONE) 377 return 0; 378 379 spin_lock_irqsave(&cpts->lock, flags); 380 cpts_fifo_read(cpts, -1); 381 list_for_each_safe(this, next, &cpts->events) { 382 event = list_entry(this, struct cpts_event, list); 383 if (event_expired(event)) { 384 list_del_init(&event->list); 385 list_add(&event->list, &cpts->pool); 386 continue; 387 } 388 mtype = (event->high >> MESSAGE_TYPE_SHIFT) & MESSAGE_TYPE_MASK; 389 seqid = (event->high >> SEQUENCE_ID_SHIFT) & SEQUENCE_ID_MASK; 390 if (ev_type == event_type(event) && 391 cpts_match(skb, class, seqid, mtype)) { 392 ns = timecounter_cyc2time(&cpts->tc, event->low); 393 list_del_init(&event->list); 394 list_add(&event->list, &cpts->pool); 395 break; 396 } 397 } 398 399 if (ev_type == CPTS_EV_TX && !ns) { 400 struct cpts_skb_cb_data *skb_cb = 401 (struct cpts_skb_cb_data *)skb->cb; 402 /* Not found, add frame to queue for processing later. 403 * The periodic FIFO check will handle this. 404 */ 405 skb_get(skb); 406 /* get the timestamp for timeouts */ 407 skb_cb->tmo = jiffies + msecs_to_jiffies(100); 408 __skb_queue_tail(&cpts->txq, skb); 409 ptp_schedule_worker(cpts->clock, 0); 410 } 411 spin_unlock_irqrestore(&cpts->lock, flags); 412 413 return ns; 414 } 415 416 void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb) 417 { 418 u64 ns; 419 struct skb_shared_hwtstamps *ssh; 420 421 ns = cpts_find_ts(cpts, skb, CPTS_EV_RX); 422 if (!ns) 423 return; 424 ssh = skb_hwtstamps(skb); 425 memset(ssh, 0, sizeof(*ssh)); 426 ssh->hwtstamp = ns_to_ktime(ns); 427 } 428 EXPORT_SYMBOL_GPL(cpts_rx_timestamp); 429 430 void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb) 431 { 432 u64 ns; 433 struct skb_shared_hwtstamps ssh; 434 435 if (!(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) 436 return; 437 ns = cpts_find_ts(cpts, skb, CPTS_EV_TX); 438 if (!ns) 439 return; 440 memset(&ssh, 0, sizeof(ssh)); 441 ssh.hwtstamp = ns_to_ktime(ns); 442 skb_tstamp_tx(skb, &ssh); 443 } 444 EXPORT_SYMBOL_GPL(cpts_tx_timestamp); 445 446 int cpts_register(struct cpts *cpts) 447 { 448 int err, i; 449 450 skb_queue_head_init(&cpts->txq); 451 INIT_LIST_HEAD(&cpts->events); 452 INIT_LIST_HEAD(&cpts->pool); 453 for (i = 0; i < CPTS_MAX_EVENTS; i++) 454 list_add(&cpts->pool_data[i].list, &cpts->pool); 455 456 clk_enable(cpts->refclk); 457 458 cpts_write32(cpts, CPTS_EN, control); 459 cpts_write32(cpts, TS_PEND_EN, int_enable); 460 461 timecounter_init(&cpts->tc, &cpts->cc, ktime_to_ns(ktime_get_real())); 462 463 cpts->clock = ptp_clock_register(&cpts->info, cpts->dev); 464 if (IS_ERR(cpts->clock)) { 465 err = PTR_ERR(cpts->clock); 466 cpts->clock = NULL; 467 goto err_ptp; 468 } 469 cpts->phc_index = ptp_clock_index(cpts->clock); 470 471 ptp_schedule_worker(cpts->clock, cpts->ov_check_period); 472 return 0; 473 474 err_ptp: 475 clk_disable(cpts->refclk); 476 return err; 477 } 478 EXPORT_SYMBOL_GPL(cpts_register); 479 480 void cpts_unregister(struct cpts *cpts) 481 { 482 if (WARN_ON(!cpts->clock)) 483 return; 484 485 ptp_clock_unregister(cpts->clock); 486 cpts->clock = NULL; 487 488 cpts_write32(cpts, 0, int_enable); 489 cpts_write32(cpts, 0, control); 490 491 /* Drop all packet */ 492 skb_queue_purge(&cpts->txq); 493 494 clk_disable(cpts->refclk); 495 } 496 EXPORT_SYMBOL_GPL(cpts_unregister); 497 498 static void cpts_calc_mult_shift(struct cpts *cpts) 499 { 500 u64 frac, maxsec, ns; 501 u32 freq; 502 503 freq = clk_get_rate(cpts->refclk); 504 505 /* Calc the maximum number of seconds which we can run before 506 * wrapping around. 507 */ 508 maxsec = cpts->cc.mask; 509 do_div(maxsec, freq); 510 /* limit conversation rate to 10 sec as higher values will produce 511 * too small mult factors and so reduce the conversion accuracy 512 */ 513 if (maxsec > 10) 514 maxsec = 10; 515 516 /* Calc overflow check period (maxsec / 2) */ 517 cpts->ov_check_period = (HZ * maxsec) / 2; 518 dev_info(cpts->dev, "cpts: overflow check period %lu (jiffies)\n", 519 cpts->ov_check_period); 520 521 if (cpts->cc.mult || cpts->cc.shift) 522 return; 523 524 clocks_calc_mult_shift(&cpts->cc.mult, &cpts->cc.shift, 525 freq, NSEC_PER_SEC, maxsec); 526 527 frac = 0; 528 ns = cyclecounter_cyc2ns(&cpts->cc, freq, cpts->cc.mask, &frac); 529 530 dev_info(cpts->dev, 531 "CPTS: ref_clk_freq:%u calc_mult:%u calc_shift:%u error:%lld nsec/sec\n", 532 freq, cpts->cc.mult, cpts->cc.shift, (ns - NSEC_PER_SEC)); 533 } 534 535 static int cpts_of_parse(struct cpts *cpts, struct device_node *node) 536 { 537 int ret = -EINVAL; 538 u32 prop; 539 540 if (!of_property_read_u32(node, "cpts_clock_mult", &prop)) 541 cpts->cc.mult = prop; 542 543 if (!of_property_read_u32(node, "cpts_clock_shift", &prop)) 544 cpts->cc.shift = prop; 545 546 if ((cpts->cc.mult && !cpts->cc.shift) || 547 (!cpts->cc.mult && cpts->cc.shift)) 548 goto of_error; 549 550 return 0; 551 552 of_error: 553 dev_err(cpts->dev, "CPTS: Missing property in the DT.\n"); 554 return ret; 555 } 556 557 struct cpts *cpts_create(struct device *dev, void __iomem *regs, 558 struct device_node *node) 559 { 560 struct cpts *cpts; 561 int ret; 562 563 cpts = devm_kzalloc(dev, sizeof(*cpts), GFP_KERNEL); 564 if (!cpts) 565 return ERR_PTR(-ENOMEM); 566 567 cpts->dev = dev; 568 cpts->reg = (struct cpsw_cpts __iomem *)regs; 569 spin_lock_init(&cpts->lock); 570 571 ret = cpts_of_parse(cpts, node); 572 if (ret) 573 return ERR_PTR(ret); 574 575 cpts->refclk = devm_clk_get(dev, "cpts"); 576 if (IS_ERR(cpts->refclk)) { 577 dev_err(dev, "Failed to get cpts refclk\n"); 578 return ERR_CAST(cpts->refclk); 579 } 580 581 ret = clk_prepare(cpts->refclk); 582 if (ret) 583 return ERR_PTR(ret); 584 585 cpts->cc.read = cpts_systim_read; 586 cpts->cc.mask = CLOCKSOURCE_MASK(32); 587 cpts->info = cpts_info; 588 589 cpts_calc_mult_shift(cpts); 590 /* save cc.mult original value as it can be modified 591 * by cpts_ptp_adjfreq(). 592 */ 593 cpts->cc_mult = cpts->cc.mult; 594 595 return cpts; 596 } 597 EXPORT_SYMBOL_GPL(cpts_create); 598 599 void cpts_release(struct cpts *cpts) 600 { 601 if (!cpts) 602 return; 603 604 if (WARN_ON(!cpts->refclk)) 605 return; 606 607 clk_unprepare(cpts->refclk); 608 } 609 EXPORT_SYMBOL_GPL(cpts_release); 610 611 MODULE_LICENSE("GPL v2"); 612 MODULE_DESCRIPTION("TI CPTS driver"); 613 MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.com>"); 614