1 /** 2 * 1588 PTP support for Cadence GEM device. 3 * 4 * Copyright (C) 2017 Cadence Design Systems - http://www.cadence.com 5 * 6 * Authors: Rafal Ozieblo <rafalo@cadence.com> 7 * Bartosz Folta <bfolta@cadence.com> 8 * 9 * This program is free software: you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 of 11 * the License as published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program. If not, see <http://www.gnu.org/licenses/>. 20 */ 21 #include <linux/kernel.h> 22 #include <linux/types.h> 23 #include <linux/clk.h> 24 #include <linux/device.h> 25 #include <linux/etherdevice.h> 26 #include <linux/platform_device.h> 27 #include <linux/time64.h> 28 #include <linux/ptp_classify.h> 29 #include <linux/if_ether.h> 30 #include <linux/if_vlan.h> 31 #include <linux/net_tstamp.h> 32 #include <linux/circ_buf.h> 33 #include <linux/spinlock.h> 34 35 #include "macb.h" 36 37 #define GEM_PTP_TIMER_NAME "gem-ptp-timer" 38 39 static struct macb_dma_desc_ptp *macb_ptp_desc(struct macb *bp, 40 struct macb_dma_desc *desc) 41 { 42 if (bp->hw_dma_cap == HW_DMA_CAP_PTP) 43 return (struct macb_dma_desc_ptp *) 44 ((u8 *)desc + sizeof(struct macb_dma_desc)); 45 if (bp->hw_dma_cap == HW_DMA_CAP_64B_PTP) 46 return (struct macb_dma_desc_ptp *) 47 ((u8 *)desc + sizeof(struct macb_dma_desc) 48 + sizeof(struct macb_dma_desc_64)); 49 return NULL; 50 } 51 52 static int gem_tsu_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts) 53 { 54 struct macb *bp = container_of(ptp, struct macb, ptp_clock_info); 55 unsigned long flags; 56 long first, second; 57 u32 secl, sech; 58 59 spin_lock_irqsave(&bp->tsu_clk_lock, flags); 60 first = gem_readl(bp, TN); 61 secl = gem_readl(bp, TSL); 62 sech = gem_readl(bp, TSH); 63 second = gem_readl(bp, TN); 64 65 /* test for nsec rollover */ 66 if (first > second) { 67 /* if so, use later read & re-read seconds 68 * (assume all done within 1s) 69 */ 70 ts->tv_nsec = gem_readl(bp, TN); 71 secl = gem_readl(bp, TSL); 72 sech = gem_readl(bp, TSH); 73 } else { 74 ts->tv_nsec = first; 75 } 76 77 spin_unlock_irqrestore(&bp->tsu_clk_lock, flags); 78 ts->tv_sec = (((u64)sech << GEM_TSL_SIZE) | secl) 79 & TSU_SEC_MAX_VAL; 80 return 0; 81 } 82 83 static int gem_tsu_set_time(struct ptp_clock_info *ptp, 84 const struct timespec64 *ts) 85 { 86 struct macb *bp = container_of(ptp, struct macb, ptp_clock_info); 87 unsigned long flags; 88 u32 ns, sech, secl; 89 90 secl = (u32)ts->tv_sec; 91 sech = (ts->tv_sec >> GEM_TSL_SIZE) & ((1 << GEM_TSH_SIZE) - 1); 92 ns = ts->tv_nsec; 93 94 spin_lock_irqsave(&bp->tsu_clk_lock, flags); 95 96 /* TSH doesn't latch the time and no atomicity! */ 97 gem_writel(bp, TN, 0); /* clear to avoid overflow */ 98 gem_writel(bp, TSH, sech); 99 /* write lower bits 2nd, for synchronized secs update */ 100 gem_writel(bp, TSL, secl); 101 gem_writel(bp, TN, ns); 102 103 spin_unlock_irqrestore(&bp->tsu_clk_lock, flags); 104 105 return 0; 106 } 107 108 static int gem_tsu_incr_set(struct macb *bp, struct tsu_incr *incr_spec) 109 { 110 unsigned long flags; 111 112 /* tsu_timer_incr register must be written after 113 * the tsu_timer_incr_sub_ns register and the write operation 114 * will cause the value written to the tsu_timer_incr_sub_ns register 115 * to take effect. 116 */ 117 spin_lock_irqsave(&bp->tsu_clk_lock, flags); 118 gem_writel(bp, TISUBN, GEM_BF(SUBNSINCR, incr_spec->sub_ns)); 119 gem_writel(bp, TI, GEM_BF(NSINCR, incr_spec->ns)); 120 spin_unlock_irqrestore(&bp->tsu_clk_lock, flags); 121 122 return 0; 123 } 124 125 static int gem_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) 126 { 127 struct macb *bp = container_of(ptp, struct macb, ptp_clock_info); 128 struct tsu_incr incr_spec; 129 bool neg_adj = false; 130 u32 word; 131 u64 adj; 132 133 if (scaled_ppm < 0) { 134 neg_adj = true; 135 scaled_ppm = -scaled_ppm; 136 } 137 138 /* Adjustment is relative to base frequency */ 139 incr_spec.sub_ns = bp->tsu_incr.sub_ns; 140 incr_spec.ns = bp->tsu_incr.ns; 141 142 /* scaling: unused(8bit) | ns(8bit) | fractions(16bit) */ 143 word = ((u64)incr_spec.ns << GEM_SUBNSINCR_SIZE) + incr_spec.sub_ns; 144 adj = (u64)scaled_ppm * word; 145 /* Divide with rounding, equivalent to floating dividing: 146 * (temp / USEC_PER_SEC) + 0.5 147 */ 148 adj += (USEC_PER_SEC >> 1); 149 adj >>= GEM_SUBNSINCR_SIZE; /* remove fractions */ 150 adj = div_u64(adj, USEC_PER_SEC); 151 adj = neg_adj ? (word - adj) : (word + adj); 152 153 incr_spec.ns = (adj >> GEM_SUBNSINCR_SIZE) 154 & ((1 << GEM_NSINCR_SIZE) - 1); 155 incr_spec.sub_ns = adj & ((1 << GEM_SUBNSINCR_SIZE) - 1); 156 gem_tsu_incr_set(bp, &incr_spec); 157 return 0; 158 } 159 160 static int gem_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) 161 { 162 struct macb *bp = container_of(ptp, struct macb, ptp_clock_info); 163 struct timespec64 now, then = ns_to_timespec64(delta); 164 u32 adj, sign = 0; 165 166 if (delta < 0) { 167 sign = 1; 168 delta = -delta; 169 } 170 171 if (delta > TSU_NSEC_MAX_VAL) { 172 gem_tsu_get_time(&bp->ptp_clock_info, &now); 173 now = timespec64_add(now, then); 174 175 gem_tsu_set_time(&bp->ptp_clock_info, 176 (const struct timespec64 *)&now); 177 } else { 178 adj = (sign << GEM_ADDSUB_OFFSET) | delta; 179 180 gem_writel(bp, TA, adj); 181 } 182 183 return 0; 184 } 185 186 static int gem_ptp_enable(struct ptp_clock_info *ptp, 187 struct ptp_clock_request *rq, int on) 188 { 189 return -EOPNOTSUPP; 190 } 191 192 static const struct ptp_clock_info gem_ptp_caps_template = { 193 .owner = THIS_MODULE, 194 .name = GEM_PTP_TIMER_NAME, 195 .max_adj = 0, 196 .n_alarm = 0, 197 .n_ext_ts = 0, 198 .n_per_out = 0, 199 .n_pins = 0, 200 .pps = 1, 201 .adjfine = gem_ptp_adjfine, 202 .adjtime = gem_ptp_adjtime, 203 .gettime64 = gem_tsu_get_time, 204 .settime64 = gem_tsu_set_time, 205 .enable = gem_ptp_enable, 206 }; 207 208 static void gem_ptp_init_timer(struct macb *bp) 209 { 210 u32 rem = 0; 211 u64 adj; 212 213 bp->tsu_incr.ns = div_u64_rem(NSEC_PER_SEC, bp->tsu_rate, &rem); 214 if (rem) { 215 adj = rem; 216 adj <<= GEM_SUBNSINCR_SIZE; 217 bp->tsu_incr.sub_ns = div_u64(adj, bp->tsu_rate); 218 } else { 219 bp->tsu_incr.sub_ns = 0; 220 } 221 } 222 223 static void gem_ptp_init_tsu(struct macb *bp) 224 { 225 struct timespec64 ts; 226 227 /* 1. get current system time */ 228 ts = ns_to_timespec64(ktime_to_ns(ktime_get_real())); 229 230 /* 2. set ptp timer */ 231 gem_tsu_set_time(&bp->ptp_clock_info, &ts); 232 233 /* 3. set PTP timer increment value to BASE_INCREMENT */ 234 gem_tsu_incr_set(bp, &bp->tsu_incr); 235 236 gem_writel(bp, TA, 0); 237 } 238 239 static void gem_ptp_clear_timer(struct macb *bp) 240 { 241 bp->tsu_incr.sub_ns = 0; 242 bp->tsu_incr.ns = 0; 243 244 gem_writel(bp, TISUBN, GEM_BF(SUBNSINCR, 0)); 245 gem_writel(bp, TI, GEM_BF(NSINCR, 0)); 246 gem_writel(bp, TA, 0); 247 } 248 249 static int gem_hw_timestamp(struct macb *bp, u32 dma_desc_ts_1, 250 u32 dma_desc_ts_2, struct timespec64 *ts) 251 { 252 struct timespec64 tsu; 253 254 ts->tv_sec = (GEM_BFEXT(DMA_SECH, dma_desc_ts_2) << GEM_DMA_SECL_SIZE) | 255 GEM_BFEXT(DMA_SECL, dma_desc_ts_1); 256 ts->tv_nsec = GEM_BFEXT(DMA_NSEC, dma_desc_ts_1); 257 258 /* TSU overlapping workaround 259 * The timestamp only contains lower few bits of seconds, 260 * so add value from 1588 timer 261 */ 262 gem_tsu_get_time(&bp->ptp_clock_info, &tsu); 263 264 /* If the top bit is set in the timestamp, 265 * but not in 1588 timer, it has rolled over, 266 * so subtract max size 267 */ 268 if ((ts->tv_sec & (GEM_DMA_SEC_TOP >> 1)) && 269 !(tsu.tv_sec & (GEM_DMA_SEC_TOP >> 1))) 270 ts->tv_sec -= GEM_DMA_SEC_TOP; 271 272 ts->tv_sec += ((~GEM_DMA_SEC_MASK) & tsu.tv_sec); 273 274 return 0; 275 } 276 277 void gem_ptp_rxstamp(struct macb *bp, struct sk_buff *skb, 278 struct macb_dma_desc *desc) 279 { 280 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); 281 struct macb_dma_desc_ptp *desc_ptp; 282 struct timespec64 ts; 283 284 if (GEM_BFEXT(DMA_RXVALID, desc->addr)) { 285 desc_ptp = macb_ptp_desc(bp, desc); 286 gem_hw_timestamp(bp, desc_ptp->ts_1, desc_ptp->ts_2, &ts); 287 memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps)); 288 shhwtstamps->hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec); 289 } 290 } 291 292 static void gem_tstamp_tx(struct macb *bp, struct sk_buff *skb, 293 struct macb_dma_desc_ptp *desc_ptp) 294 { 295 struct skb_shared_hwtstamps shhwtstamps; 296 struct timespec64 ts; 297 298 gem_hw_timestamp(bp, desc_ptp->ts_1, desc_ptp->ts_2, &ts); 299 memset(&shhwtstamps, 0, sizeof(shhwtstamps)); 300 shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec); 301 skb_tstamp_tx(skb, &shhwtstamps); 302 } 303 304 int gem_ptp_txstamp(struct macb_queue *queue, struct sk_buff *skb, 305 struct macb_dma_desc *desc) 306 { 307 unsigned long tail = READ_ONCE(queue->tx_ts_tail); 308 unsigned long head = queue->tx_ts_head; 309 struct macb_dma_desc_ptp *desc_ptp; 310 struct gem_tx_ts *tx_timestamp; 311 312 if (!GEM_BFEXT(DMA_TXVALID, desc->ctrl)) 313 return -EINVAL; 314 315 if (CIRC_SPACE(head, tail, PTP_TS_BUFFER_SIZE) == 0) 316 return -ENOMEM; 317 318 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 319 desc_ptp = macb_ptp_desc(queue->bp, desc); 320 tx_timestamp = &queue->tx_timestamps[head]; 321 tx_timestamp->skb = skb; 322 /* ensure ts_1/ts_2 is loaded after ctrl (TX_USED check) */ 323 dma_rmb(); 324 tx_timestamp->desc_ptp.ts_1 = desc_ptp->ts_1; 325 tx_timestamp->desc_ptp.ts_2 = desc_ptp->ts_2; 326 /* move head */ 327 smp_store_release(&queue->tx_ts_head, 328 (head + 1) & (PTP_TS_BUFFER_SIZE - 1)); 329 330 schedule_work(&queue->tx_ts_task); 331 return 0; 332 } 333 334 static void gem_tx_timestamp_flush(struct work_struct *work) 335 { 336 struct macb_queue *queue = 337 container_of(work, struct macb_queue, tx_ts_task); 338 unsigned long head, tail; 339 struct gem_tx_ts *tx_ts; 340 341 /* take current head */ 342 head = smp_load_acquire(&queue->tx_ts_head); 343 tail = queue->tx_ts_tail; 344 345 while (CIRC_CNT(head, tail, PTP_TS_BUFFER_SIZE)) { 346 tx_ts = &queue->tx_timestamps[tail]; 347 gem_tstamp_tx(queue->bp, tx_ts->skb, &tx_ts->desc_ptp); 348 /* cleanup */ 349 dev_kfree_skb_any(tx_ts->skb); 350 /* remove old tail */ 351 smp_store_release(&queue->tx_ts_tail, 352 (tail + 1) & (PTP_TS_BUFFER_SIZE - 1)); 353 tail = queue->tx_ts_tail; 354 } 355 } 356 357 void gem_ptp_init(struct net_device *dev) 358 { 359 struct macb *bp = netdev_priv(dev); 360 struct macb_queue *queue; 361 unsigned int q; 362 363 bp->ptp_clock_info = gem_ptp_caps_template; 364 365 /* nominal frequency and maximum adjustment in ppb */ 366 bp->tsu_rate = bp->ptp_info->get_tsu_rate(bp); 367 bp->ptp_clock_info.max_adj = bp->ptp_info->get_ptp_max_adj(); 368 gem_ptp_init_timer(bp); 369 bp->ptp_clock = ptp_clock_register(&bp->ptp_clock_info, &dev->dev); 370 if (IS_ERR(bp->ptp_clock)) { 371 pr_err("ptp clock register failed: %ld\n", 372 PTR_ERR(bp->ptp_clock)); 373 bp->ptp_clock = NULL; 374 return; 375 } else if (bp->ptp_clock == NULL) { 376 pr_err("ptp clock register failed\n"); 377 return; 378 } 379 380 spin_lock_init(&bp->tsu_clk_lock); 381 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 382 queue->tx_ts_head = 0; 383 queue->tx_ts_tail = 0; 384 INIT_WORK(&queue->tx_ts_task, gem_tx_timestamp_flush); 385 } 386 387 gem_ptp_init_tsu(bp); 388 389 dev_info(&bp->pdev->dev, "%s ptp clock registered.\n", 390 GEM_PTP_TIMER_NAME); 391 } 392 393 void gem_ptp_remove(struct net_device *ndev) 394 { 395 struct macb *bp = netdev_priv(ndev); 396 397 if (bp->ptp_clock) 398 ptp_clock_unregister(bp->ptp_clock); 399 400 gem_ptp_clear_timer(bp); 401 402 dev_info(&bp->pdev->dev, "%s ptp clock unregistered.\n", 403 GEM_PTP_TIMER_NAME); 404 } 405 406 static int gem_ptp_set_ts_mode(struct macb *bp, 407 enum macb_bd_control tx_bd_control, 408 enum macb_bd_control rx_bd_control) 409 { 410 gem_writel(bp, TXBDCTRL, GEM_BF(TXTSMODE, tx_bd_control)); 411 gem_writel(bp, RXBDCTRL, GEM_BF(RXTSMODE, rx_bd_control)); 412 413 return 0; 414 } 415 416 int gem_get_hwtst(struct net_device *dev, struct ifreq *rq) 417 { 418 struct hwtstamp_config *tstamp_config; 419 struct macb *bp = netdev_priv(dev); 420 421 tstamp_config = &bp->tstamp_config; 422 if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0) 423 return -EOPNOTSUPP; 424 425 if (copy_to_user(rq->ifr_data, tstamp_config, sizeof(*tstamp_config))) 426 return -EFAULT; 427 else 428 return 0; 429 } 430 431 static int gem_ptp_set_one_step_sync(struct macb *bp, u8 enable) 432 { 433 u32 reg_val; 434 435 reg_val = macb_readl(bp, NCR); 436 437 if (enable) 438 macb_writel(bp, NCR, reg_val | MACB_BIT(OSSMODE)); 439 else 440 macb_writel(bp, NCR, reg_val & ~MACB_BIT(OSSMODE)); 441 442 return 0; 443 } 444 445 int gem_set_hwtst(struct net_device *dev, struct ifreq *ifr, int cmd) 446 { 447 enum macb_bd_control tx_bd_control = TSTAMP_DISABLED; 448 enum macb_bd_control rx_bd_control = TSTAMP_DISABLED; 449 struct hwtstamp_config *tstamp_config; 450 struct macb *bp = netdev_priv(dev); 451 u32 regval; 452 453 tstamp_config = &bp->tstamp_config; 454 if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0) 455 return -EOPNOTSUPP; 456 457 if (copy_from_user(tstamp_config, ifr->ifr_data, 458 sizeof(*tstamp_config))) 459 return -EFAULT; 460 461 /* reserved for future extensions */ 462 if (tstamp_config->flags) 463 return -EINVAL; 464 465 switch (tstamp_config->tx_type) { 466 case HWTSTAMP_TX_OFF: 467 break; 468 case HWTSTAMP_TX_ONESTEP_SYNC: 469 if (gem_ptp_set_one_step_sync(bp, 1) != 0) 470 return -ERANGE; 471 /* fall through */ 472 case HWTSTAMP_TX_ON: 473 tx_bd_control = TSTAMP_ALL_FRAMES; 474 break; 475 default: 476 return -ERANGE; 477 } 478 479 switch (tstamp_config->rx_filter) { 480 case HWTSTAMP_FILTER_NONE: 481 break; 482 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 483 break; 484 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 485 break; 486 case HWTSTAMP_FILTER_PTP_V2_EVENT: 487 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 488 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 489 case HWTSTAMP_FILTER_PTP_V2_SYNC: 490 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 491 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 492 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 493 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 494 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 495 rx_bd_control = TSTAMP_ALL_PTP_FRAMES; 496 tstamp_config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 497 regval = macb_readl(bp, NCR); 498 macb_writel(bp, NCR, (regval | MACB_BIT(SRTSM))); 499 break; 500 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 501 case HWTSTAMP_FILTER_ALL: 502 rx_bd_control = TSTAMP_ALL_FRAMES; 503 tstamp_config->rx_filter = HWTSTAMP_FILTER_ALL; 504 break; 505 default: 506 tstamp_config->rx_filter = HWTSTAMP_FILTER_NONE; 507 return -ERANGE; 508 } 509 510 if (gem_ptp_set_ts_mode(bp, tx_bd_control, rx_bd_control) != 0) 511 return -ERANGE; 512 513 if (copy_to_user(ifr->ifr_data, tstamp_config, sizeof(*tstamp_config))) 514 return -EFAULT; 515 else 516 return 0; 517 } 518 519