1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * 1588 PTP support for Cadence GEM device. 4 * 5 * Copyright (C) 2017 Cadence Design Systems - https://www.cadence.com 6 * 7 * Authors: Rafal Ozieblo <rafalo@cadence.com> 8 * Bartosz Folta <bfolta@cadence.com> 9 */ 10 #include <linux/kernel.h> 11 #include <linux/types.h> 12 #include <linux/clk.h> 13 #include <linux/device.h> 14 #include <linux/etherdevice.h> 15 #include <linux/platform_device.h> 16 #include <linux/time64.h> 17 #include <linux/ptp_classify.h> 18 #include <linux/if_ether.h> 19 #include <linux/if_vlan.h> 20 #include <linux/net_tstamp.h> 21 #include <linux/circ_buf.h> 22 #include <linux/spinlock.h> 23 24 #include "macb.h" 25 26 #define GEM_PTP_TIMER_NAME "gem-ptp-timer" 27 28 static struct macb_dma_desc_ptp *macb_ptp_desc(struct macb *bp, 29 struct macb_dma_desc *desc) 30 { 31 if (bp->hw_dma_cap == HW_DMA_CAP_PTP) 32 return (struct macb_dma_desc_ptp *) 33 ((u8 *)desc + sizeof(struct macb_dma_desc)); 34 if (bp->hw_dma_cap == HW_DMA_CAP_64B_PTP) 35 return (struct macb_dma_desc_ptp *) 36 ((u8 *)desc + sizeof(struct macb_dma_desc) 37 + sizeof(struct macb_dma_desc_64)); 38 return NULL; 39 } 40 41 static int gem_tsu_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts, 42 struct ptp_system_timestamp *sts) 43 { 44 struct macb *bp = container_of(ptp, struct macb, ptp_clock_info); 45 unsigned long flags; 46 long first, second; 47 u32 secl, sech; 48 49 spin_lock_irqsave(&bp->tsu_clk_lock, flags); 50 ptp_read_system_prets(sts); 51 first = gem_readl(bp, TN); 52 ptp_read_system_postts(sts); 53 secl = gem_readl(bp, TSL); 54 sech = gem_readl(bp, TSH); 55 second = gem_readl(bp, TN); 56 57 /* test for nsec rollover */ 58 if (first > second) { 59 /* if so, use later read & re-read seconds 60 * (assume all done within 1s) 61 */ 62 ptp_read_system_prets(sts); 63 ts->tv_nsec = gem_readl(bp, TN); 64 ptp_read_system_postts(sts); 65 secl = gem_readl(bp, TSL); 66 sech = gem_readl(bp, TSH); 67 } else { 68 ts->tv_nsec = first; 69 } 70 71 spin_unlock_irqrestore(&bp->tsu_clk_lock, flags); 72 ts->tv_sec = (((u64)sech << GEM_TSL_SIZE) | secl) 73 & TSU_SEC_MAX_VAL; 74 return 0; 75 } 76 77 static int gem_tsu_set_time(struct ptp_clock_info *ptp, 78 const struct timespec64 *ts) 79 { 80 struct macb *bp = container_of(ptp, struct macb, ptp_clock_info); 81 unsigned long flags; 82 u32 ns, sech, secl; 83 84 secl = (u32)ts->tv_sec; 85 sech = (ts->tv_sec >> GEM_TSL_SIZE) & ((1 << GEM_TSH_SIZE) - 1); 86 ns = ts->tv_nsec; 87 88 spin_lock_irqsave(&bp->tsu_clk_lock, flags); 89 90 /* TSH doesn't latch the time and no atomicity! */ 91 gem_writel(bp, TN, 0); /* clear to avoid overflow */ 92 gem_writel(bp, TSH, sech); 93 /* write lower bits 2nd, for synchronized secs update */ 94 gem_writel(bp, TSL, secl); 95 gem_writel(bp, TN, ns); 96 97 spin_unlock_irqrestore(&bp->tsu_clk_lock, flags); 98 99 return 0; 100 } 101 102 static int gem_tsu_incr_set(struct macb *bp, struct tsu_incr *incr_spec) 103 { 104 unsigned long flags; 105 106 /* tsu_timer_incr register must be written after 107 * the tsu_timer_incr_sub_ns register and the write operation 108 * will cause the value written to the tsu_timer_incr_sub_ns register 109 * to take effect. 110 */ 111 spin_lock_irqsave(&bp->tsu_clk_lock, flags); 112 /* RegBit[15:0] = Subns[23:8]; RegBit[31:24] = Subns[7:0] */ 113 gem_writel(bp, TISUBN, GEM_BF(SUBNSINCRL, incr_spec->sub_ns) | 114 GEM_BF(SUBNSINCRH, (incr_spec->sub_ns >> 115 GEM_SUBNSINCRL_SIZE))); 116 gem_writel(bp, TI, GEM_BF(NSINCR, incr_spec->ns)); 117 spin_unlock_irqrestore(&bp->tsu_clk_lock, flags); 118 119 return 0; 120 } 121 122 static int gem_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) 123 { 124 struct macb *bp = container_of(ptp, struct macb, ptp_clock_info); 125 struct tsu_incr incr_spec; 126 bool neg_adj = false; 127 u32 word; 128 u64 adj; 129 130 if (scaled_ppm < 0) { 131 neg_adj = true; 132 scaled_ppm = -scaled_ppm; 133 } 134 135 /* Adjustment is relative to base frequency */ 136 incr_spec.sub_ns = bp->tsu_incr.sub_ns; 137 incr_spec.ns = bp->tsu_incr.ns; 138 139 /* scaling: unused(8bit) | ns(8bit) | fractions(16bit) */ 140 word = ((u64)incr_spec.ns << GEM_SUBNSINCR_SIZE) + incr_spec.sub_ns; 141 adj = (u64)scaled_ppm * word; 142 /* Divide with rounding, equivalent to floating dividing: 143 * (temp / USEC_PER_SEC) + 0.5 144 */ 145 adj += (USEC_PER_SEC >> 1); 146 adj >>= PPM_FRACTION; /* remove fractions */ 147 adj = div_u64(adj, USEC_PER_SEC); 148 adj = neg_adj ? (word - adj) : (word + adj); 149 150 incr_spec.ns = (adj >> GEM_SUBNSINCR_SIZE) 151 & ((1 << GEM_NSINCR_SIZE) - 1); 152 incr_spec.sub_ns = adj & ((1 << GEM_SUBNSINCR_SIZE) - 1); 153 gem_tsu_incr_set(bp, &incr_spec); 154 return 0; 155 } 156 157 static int gem_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) 158 { 159 struct macb *bp = container_of(ptp, struct macb, ptp_clock_info); 160 struct timespec64 now, then = ns_to_timespec64(delta); 161 u32 adj, sign = 0; 162 163 if (delta < 0) { 164 sign = 1; 165 delta = -delta; 166 } 167 168 if (delta > TSU_NSEC_MAX_VAL) { 169 gem_tsu_get_time(&bp->ptp_clock_info, &now, NULL); 170 now = timespec64_add(now, then); 171 172 gem_tsu_set_time(&bp->ptp_clock_info, 173 (const struct timespec64 *)&now); 174 } else { 175 adj = (sign << GEM_ADDSUB_OFFSET) | delta; 176 177 gem_writel(bp, TA, adj); 178 } 179 180 return 0; 181 } 182 183 static int gem_ptp_enable(struct ptp_clock_info *ptp, 184 struct ptp_clock_request *rq, int on) 185 { 186 return -EOPNOTSUPP; 187 } 188 189 static const struct ptp_clock_info gem_ptp_caps_template = { 190 .owner = THIS_MODULE, 191 .name = GEM_PTP_TIMER_NAME, 192 .max_adj = 0, 193 .n_alarm = 0, 194 .n_ext_ts = 0, 195 .n_per_out = 0, 196 .n_pins = 0, 197 .pps = 1, 198 .adjfine = gem_ptp_adjfine, 199 .adjtime = gem_ptp_adjtime, 200 .gettimex64 = gem_tsu_get_time, 201 .settime64 = gem_tsu_set_time, 202 .enable = gem_ptp_enable, 203 }; 204 205 static void gem_ptp_init_timer(struct macb *bp) 206 { 207 u32 rem = 0; 208 u64 adj; 209 210 bp->tsu_incr.ns = div_u64_rem(NSEC_PER_SEC, bp->tsu_rate, &rem); 211 if (rem) { 212 adj = rem; 213 adj <<= GEM_SUBNSINCR_SIZE; 214 bp->tsu_incr.sub_ns = div_u64(adj, bp->tsu_rate); 215 } else { 216 bp->tsu_incr.sub_ns = 0; 217 } 218 } 219 220 static void gem_ptp_init_tsu(struct macb *bp) 221 { 222 struct timespec64 ts; 223 224 /* 1. get current system time */ 225 ts = ns_to_timespec64(ktime_to_ns(ktime_get_real())); 226 227 /* 2. set ptp timer */ 228 gem_tsu_set_time(&bp->ptp_clock_info, &ts); 229 230 /* 3. set PTP timer increment value to BASE_INCREMENT */ 231 gem_tsu_incr_set(bp, &bp->tsu_incr); 232 233 gem_writel(bp, TA, 0); 234 } 235 236 static void gem_ptp_clear_timer(struct macb *bp) 237 { 238 bp->tsu_incr.sub_ns = 0; 239 bp->tsu_incr.ns = 0; 240 241 gem_writel(bp, TISUBN, GEM_BF(SUBNSINCR, 0)); 242 gem_writel(bp, TI, GEM_BF(NSINCR, 0)); 243 gem_writel(bp, TA, 0); 244 } 245 246 static int gem_hw_timestamp(struct macb *bp, u32 dma_desc_ts_1, 247 u32 dma_desc_ts_2, struct timespec64 *ts) 248 { 249 struct timespec64 tsu; 250 251 ts->tv_sec = (GEM_BFEXT(DMA_SECH, dma_desc_ts_2) << GEM_DMA_SECL_SIZE) | 252 GEM_BFEXT(DMA_SECL, dma_desc_ts_1); 253 ts->tv_nsec = GEM_BFEXT(DMA_NSEC, dma_desc_ts_1); 254 255 /* TSU overlapping workaround 256 * The timestamp only contains lower few bits of seconds, 257 * so add value from 1588 timer 258 */ 259 gem_tsu_get_time(&bp->ptp_clock_info, &tsu, NULL); 260 261 /* If the top bit is set in the timestamp, 262 * but not in 1588 timer, it has rolled over, 263 * so subtract max size 264 */ 265 if ((ts->tv_sec & (GEM_DMA_SEC_TOP >> 1)) && 266 !(tsu.tv_sec & (GEM_DMA_SEC_TOP >> 1))) 267 ts->tv_sec -= GEM_DMA_SEC_TOP; 268 269 ts->tv_sec += ((~GEM_DMA_SEC_MASK) & tsu.tv_sec); 270 271 return 0; 272 } 273 274 void gem_ptp_rxstamp(struct macb *bp, struct sk_buff *skb, 275 struct macb_dma_desc *desc) 276 { 277 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); 278 struct macb_dma_desc_ptp *desc_ptp; 279 struct timespec64 ts; 280 281 if (GEM_BFEXT(DMA_RXVALID, desc->addr)) { 282 desc_ptp = macb_ptp_desc(bp, desc); 283 /* Unlikely but check */ 284 if (!desc_ptp) { 285 dev_warn_ratelimited(&bp->pdev->dev, 286 "Timestamp not supported in BD\n"); 287 return; 288 } 289 gem_hw_timestamp(bp, desc_ptp->ts_1, desc_ptp->ts_2, &ts); 290 memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps)); 291 shhwtstamps->hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec); 292 } 293 } 294 295 static void gem_tstamp_tx(struct macb *bp, struct sk_buff *skb, 296 struct macb_dma_desc_ptp *desc_ptp) 297 { 298 struct skb_shared_hwtstamps shhwtstamps; 299 struct timespec64 ts; 300 301 gem_hw_timestamp(bp, desc_ptp->ts_1, desc_ptp->ts_2, &ts); 302 memset(&shhwtstamps, 0, sizeof(shhwtstamps)); 303 shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec); 304 skb_tstamp_tx(skb, &shhwtstamps); 305 } 306 307 int gem_ptp_txstamp(struct macb_queue *queue, struct sk_buff *skb, 308 struct macb_dma_desc *desc) 309 { 310 unsigned long tail = READ_ONCE(queue->tx_ts_tail); 311 unsigned long head = queue->tx_ts_head; 312 struct macb_dma_desc_ptp *desc_ptp; 313 struct gem_tx_ts *tx_timestamp; 314 315 if (!GEM_BFEXT(DMA_TXVALID, desc->ctrl)) 316 return -EINVAL; 317 318 if (CIRC_SPACE(head, tail, PTP_TS_BUFFER_SIZE) == 0) 319 return -ENOMEM; 320 321 desc_ptp = macb_ptp_desc(queue->bp, desc); 322 /* Unlikely but check */ 323 if (!desc_ptp) 324 return -EINVAL; 325 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 326 tx_timestamp = &queue->tx_timestamps[head]; 327 tx_timestamp->skb = skb; 328 /* ensure ts_1/ts_2 is loaded after ctrl (TX_USED check) */ 329 dma_rmb(); 330 tx_timestamp->desc_ptp.ts_1 = desc_ptp->ts_1; 331 tx_timestamp->desc_ptp.ts_2 = desc_ptp->ts_2; 332 /* move head */ 333 smp_store_release(&queue->tx_ts_head, 334 (head + 1) & (PTP_TS_BUFFER_SIZE - 1)); 335 336 schedule_work(&queue->tx_ts_task); 337 return 0; 338 } 339 340 static void gem_tx_timestamp_flush(struct work_struct *work) 341 { 342 struct macb_queue *queue = 343 container_of(work, struct macb_queue, tx_ts_task); 344 unsigned long head, tail; 345 struct gem_tx_ts *tx_ts; 346 347 /* take current head */ 348 head = smp_load_acquire(&queue->tx_ts_head); 349 tail = queue->tx_ts_tail; 350 351 while (CIRC_CNT(head, tail, PTP_TS_BUFFER_SIZE)) { 352 tx_ts = &queue->tx_timestamps[tail]; 353 gem_tstamp_tx(queue->bp, tx_ts->skb, &tx_ts->desc_ptp); 354 /* cleanup */ 355 dev_kfree_skb_any(tx_ts->skb); 356 /* remove old tail */ 357 smp_store_release(&queue->tx_ts_tail, 358 (tail + 1) & (PTP_TS_BUFFER_SIZE - 1)); 359 tail = queue->tx_ts_tail; 360 } 361 } 362 363 void gem_ptp_init(struct net_device *dev) 364 { 365 struct macb *bp = netdev_priv(dev); 366 struct macb_queue *queue; 367 unsigned int q; 368 369 bp->ptp_clock_info = gem_ptp_caps_template; 370 371 /* nominal frequency and maximum adjustment in ppb */ 372 bp->tsu_rate = bp->ptp_info->get_tsu_rate(bp); 373 bp->ptp_clock_info.max_adj = bp->ptp_info->get_ptp_max_adj(); 374 gem_ptp_init_timer(bp); 375 bp->ptp_clock = ptp_clock_register(&bp->ptp_clock_info, &dev->dev); 376 if (IS_ERR(bp->ptp_clock)) { 377 pr_err("ptp clock register failed: %ld\n", 378 PTR_ERR(bp->ptp_clock)); 379 bp->ptp_clock = NULL; 380 return; 381 } else if (bp->ptp_clock == NULL) { 382 pr_err("ptp clock register failed\n"); 383 return; 384 } 385 386 spin_lock_init(&bp->tsu_clk_lock); 387 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 388 queue->tx_ts_head = 0; 389 queue->tx_ts_tail = 0; 390 INIT_WORK(&queue->tx_ts_task, gem_tx_timestamp_flush); 391 } 392 393 gem_ptp_init_tsu(bp); 394 395 dev_info(&bp->pdev->dev, "%s ptp clock registered.\n", 396 GEM_PTP_TIMER_NAME); 397 } 398 399 void gem_ptp_remove(struct net_device *ndev) 400 { 401 struct macb *bp = netdev_priv(ndev); 402 403 if (bp->ptp_clock) 404 ptp_clock_unregister(bp->ptp_clock); 405 406 gem_ptp_clear_timer(bp); 407 408 dev_info(&bp->pdev->dev, "%s ptp clock unregistered.\n", 409 GEM_PTP_TIMER_NAME); 410 } 411 412 static int gem_ptp_set_ts_mode(struct macb *bp, 413 enum macb_bd_control tx_bd_control, 414 enum macb_bd_control rx_bd_control) 415 { 416 gem_writel(bp, TXBDCTRL, GEM_BF(TXTSMODE, tx_bd_control)); 417 gem_writel(bp, RXBDCTRL, GEM_BF(RXTSMODE, rx_bd_control)); 418 419 return 0; 420 } 421 422 int gem_get_hwtst(struct net_device *dev, struct ifreq *rq) 423 { 424 struct hwtstamp_config *tstamp_config; 425 struct macb *bp = netdev_priv(dev); 426 427 tstamp_config = &bp->tstamp_config; 428 if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0) 429 return -EOPNOTSUPP; 430 431 if (copy_to_user(rq->ifr_data, tstamp_config, sizeof(*tstamp_config))) 432 return -EFAULT; 433 else 434 return 0; 435 } 436 437 static int gem_ptp_set_one_step_sync(struct macb *bp, u8 enable) 438 { 439 u32 reg_val; 440 441 reg_val = macb_readl(bp, NCR); 442 443 if (enable) 444 macb_writel(bp, NCR, reg_val | MACB_BIT(OSSMODE)); 445 else 446 macb_writel(bp, NCR, reg_val & ~MACB_BIT(OSSMODE)); 447 448 return 0; 449 } 450 451 int gem_set_hwtst(struct net_device *dev, struct ifreq *ifr, int cmd) 452 { 453 enum macb_bd_control tx_bd_control = TSTAMP_DISABLED; 454 enum macb_bd_control rx_bd_control = TSTAMP_DISABLED; 455 struct hwtstamp_config *tstamp_config; 456 struct macb *bp = netdev_priv(dev); 457 u32 regval; 458 459 tstamp_config = &bp->tstamp_config; 460 if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0) 461 return -EOPNOTSUPP; 462 463 if (copy_from_user(tstamp_config, ifr->ifr_data, 464 sizeof(*tstamp_config))) 465 return -EFAULT; 466 467 switch (tstamp_config->tx_type) { 468 case HWTSTAMP_TX_OFF: 469 break; 470 case HWTSTAMP_TX_ONESTEP_SYNC: 471 if (gem_ptp_set_one_step_sync(bp, 1) != 0) 472 return -ERANGE; 473 fallthrough; 474 case HWTSTAMP_TX_ON: 475 tx_bd_control = TSTAMP_ALL_FRAMES; 476 break; 477 default: 478 return -ERANGE; 479 } 480 481 switch (tstamp_config->rx_filter) { 482 case HWTSTAMP_FILTER_NONE: 483 break; 484 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 485 break; 486 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 487 break; 488 case HWTSTAMP_FILTER_PTP_V2_EVENT: 489 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 490 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 491 case HWTSTAMP_FILTER_PTP_V2_SYNC: 492 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 493 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 494 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 495 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 496 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 497 rx_bd_control = TSTAMP_ALL_PTP_FRAMES; 498 tstamp_config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 499 regval = macb_readl(bp, NCR); 500 macb_writel(bp, NCR, (regval | MACB_BIT(SRTSM))); 501 break; 502 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 503 case HWTSTAMP_FILTER_ALL: 504 rx_bd_control = TSTAMP_ALL_FRAMES; 505 tstamp_config->rx_filter = HWTSTAMP_FILTER_ALL; 506 break; 507 default: 508 tstamp_config->rx_filter = HWTSTAMP_FILTER_NONE; 509 return -ERANGE; 510 } 511 512 if (gem_ptp_set_ts_mode(bp, tx_bd_control, rx_bd_control) != 0) 513 return -ERANGE; 514 515 if (copy_to_user(ifr->ifr_data, tstamp_config, sizeof(*tstamp_config))) 516 return -EFAULT; 517 else 518 return 0; 519 } 520 521