1 /** 2 * 1588 PTP support for Cadence GEM device. 3 * 4 * Copyright (C) 2017 Cadence Design Systems - http://www.cadence.com 5 * 6 * Authors: Rafal Ozieblo <rafalo@cadence.com> 7 * Bartosz Folta <bfolta@cadence.com> 8 * 9 * This program is free software: you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 of 11 * the License as published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program. If not, see <http://www.gnu.org/licenses/>. 20 */ 21 #include <linux/kernel.h> 22 #include <linux/types.h> 23 #include <linux/clk.h> 24 #include <linux/device.h> 25 #include <linux/etherdevice.h> 26 #include <linux/platform_device.h> 27 #include <linux/time64.h> 28 #include <linux/ptp_classify.h> 29 #include <linux/if_ether.h> 30 #include <linux/if_vlan.h> 31 #include <linux/net_tstamp.h> 32 #include <linux/circ_buf.h> 33 #include <linux/spinlock.h> 34 35 #include "macb.h" 36 37 #define GEM_PTP_TIMER_NAME "gem-ptp-timer" 38 39 static struct macb_dma_desc_ptp *macb_ptp_desc(struct macb *bp, 40 struct macb_dma_desc *desc) 41 { 42 if (bp->hw_dma_cap == HW_DMA_CAP_PTP) 43 return (struct macb_dma_desc_ptp *) 44 ((u8 *)desc + sizeof(struct macb_dma_desc)); 45 if (bp->hw_dma_cap == HW_DMA_CAP_64B_PTP) 46 return (struct macb_dma_desc_ptp *) 47 ((u8 *)desc + sizeof(struct macb_dma_desc) 48 + sizeof(struct macb_dma_desc_64)); 49 return NULL; 50 } 51 52 static int gem_tsu_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts) 53 { 54 struct macb *bp = container_of(ptp, struct macb, ptp_clock_info); 55 unsigned long flags; 56 long first, second; 57 u32 secl, sech; 58 59 spin_lock_irqsave(&bp->tsu_clk_lock, flags); 60 first = gem_readl(bp, TN); 61 secl = gem_readl(bp, TSL); 62 sech = gem_readl(bp, TSH); 63 second = gem_readl(bp, TN); 64 65 /* test for nsec rollover */ 66 if (first > second) { 67 /* if so, use later read & re-read seconds 68 * (assume all done within 1s) 69 */ 70 ts->tv_nsec = gem_readl(bp, TN); 71 secl = gem_readl(bp, TSL); 72 sech = gem_readl(bp, TSH); 73 } else { 74 ts->tv_nsec = first; 75 } 76 77 spin_unlock_irqrestore(&bp->tsu_clk_lock, flags); 78 ts->tv_sec = (((u64)sech << GEM_TSL_SIZE) | secl) 79 & TSU_SEC_MAX_VAL; 80 return 0; 81 } 82 83 static int gem_tsu_set_time(struct ptp_clock_info *ptp, 84 const struct timespec64 *ts) 85 { 86 struct macb *bp = container_of(ptp, struct macb, ptp_clock_info); 87 unsigned long flags; 88 u32 ns, sech, secl; 89 90 secl = (u32)ts->tv_sec; 91 sech = (ts->tv_sec >> GEM_TSL_SIZE) & ((1 << GEM_TSH_SIZE) - 1); 92 ns = ts->tv_nsec; 93 94 spin_lock_irqsave(&bp->tsu_clk_lock, flags); 95 96 /* TSH doesn't latch the time and no atomicity! */ 97 gem_writel(bp, TN, 0); /* clear to avoid overflow */ 98 gem_writel(bp, TSH, sech); 99 /* write lower bits 2nd, for synchronized secs update */ 100 gem_writel(bp, TSL, secl); 101 gem_writel(bp, TN, ns); 102 103 spin_unlock_irqrestore(&bp->tsu_clk_lock, flags); 104 105 return 0; 106 } 107 108 static int gem_tsu_incr_set(struct macb *bp, struct tsu_incr *incr_spec) 109 { 110 unsigned long flags; 111 112 /* tsu_timer_incr register must be written after 113 * the tsu_timer_incr_sub_ns register and the write operation 114 * will cause the value written to the tsu_timer_incr_sub_ns register 115 * to take effect. 116 */ 117 spin_lock_irqsave(&bp->tsu_clk_lock, flags); 118 gem_writel(bp, TISUBN, GEM_BF(SUBNSINCR, incr_spec->sub_ns)); 119 gem_writel(bp, TI, GEM_BF(NSINCR, incr_spec->ns)); 120 spin_unlock_irqrestore(&bp->tsu_clk_lock, flags); 121 122 return 0; 123 } 124 125 static int gem_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) 126 { 127 struct macb *bp = container_of(ptp, struct macb, ptp_clock_info); 128 struct tsu_incr incr_spec; 129 bool neg_adj = false; 130 u32 word; 131 u64 adj; 132 133 if (scaled_ppm < 0) { 134 neg_adj = true; 135 scaled_ppm = -scaled_ppm; 136 } 137 138 /* Adjustment is relative to base frequency */ 139 incr_spec.sub_ns = bp->tsu_incr.sub_ns; 140 incr_spec.ns = bp->tsu_incr.ns; 141 142 /* scaling: unused(8bit) | ns(8bit) | fractions(16bit) */ 143 word = ((u64)incr_spec.ns << GEM_SUBNSINCR_SIZE) + incr_spec.sub_ns; 144 adj = (u64)scaled_ppm * word; 145 /* Divide with rounding, equivalent to floating dividing: 146 * (temp / USEC_PER_SEC) + 0.5 147 */ 148 adj += (USEC_PER_SEC >> 1); 149 adj >>= GEM_SUBNSINCR_SIZE; /* remove fractions */ 150 adj = div_u64(adj, USEC_PER_SEC); 151 adj = neg_adj ? (word - adj) : (word + adj); 152 153 incr_spec.ns = (adj >> GEM_SUBNSINCR_SIZE) 154 & ((1 << GEM_NSINCR_SIZE) - 1); 155 incr_spec.sub_ns = adj & ((1 << GEM_SUBNSINCR_SIZE) - 1); 156 gem_tsu_incr_set(bp, &incr_spec); 157 return 0; 158 } 159 160 static int gem_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) 161 { 162 struct macb *bp = container_of(ptp, struct macb, ptp_clock_info); 163 struct timespec64 now, then = ns_to_timespec64(delta); 164 u32 adj, sign = 0; 165 166 if (delta < 0) { 167 sign = 1; 168 delta = -delta; 169 } 170 171 if (delta > TSU_NSEC_MAX_VAL) { 172 gem_tsu_get_time(&bp->ptp_clock_info, &now); 173 if (sign) 174 now = timespec64_sub(now, then); 175 else 176 now = timespec64_add(now, then); 177 178 gem_tsu_set_time(&bp->ptp_clock_info, 179 (const struct timespec64 *)&now); 180 } else { 181 adj = (sign << GEM_ADDSUB_OFFSET) | delta; 182 183 gem_writel(bp, TA, adj); 184 } 185 186 return 0; 187 } 188 189 static int gem_ptp_enable(struct ptp_clock_info *ptp, 190 struct ptp_clock_request *rq, int on) 191 { 192 return -EOPNOTSUPP; 193 } 194 195 static const struct ptp_clock_info gem_ptp_caps_template = { 196 .owner = THIS_MODULE, 197 .name = GEM_PTP_TIMER_NAME, 198 .max_adj = 0, 199 .n_alarm = 0, 200 .n_ext_ts = 0, 201 .n_per_out = 0, 202 .n_pins = 0, 203 .pps = 1, 204 .adjfine = gem_ptp_adjfine, 205 .adjtime = gem_ptp_adjtime, 206 .gettime64 = gem_tsu_get_time, 207 .settime64 = gem_tsu_set_time, 208 .enable = gem_ptp_enable, 209 }; 210 211 static void gem_ptp_init_timer(struct macb *bp) 212 { 213 u32 rem = 0; 214 u64 adj; 215 216 bp->tsu_incr.ns = div_u64_rem(NSEC_PER_SEC, bp->tsu_rate, &rem); 217 if (rem) { 218 adj = rem; 219 adj <<= GEM_SUBNSINCR_SIZE; 220 bp->tsu_incr.sub_ns = div_u64(adj, bp->tsu_rate); 221 } else { 222 bp->tsu_incr.sub_ns = 0; 223 } 224 } 225 226 static void gem_ptp_init_tsu(struct macb *bp) 227 { 228 struct timespec64 ts; 229 230 /* 1. get current system time */ 231 ts = ns_to_timespec64(ktime_to_ns(ktime_get_real())); 232 233 /* 2. set ptp timer */ 234 gem_tsu_set_time(&bp->ptp_clock_info, &ts); 235 236 /* 3. set PTP timer increment value to BASE_INCREMENT */ 237 gem_tsu_incr_set(bp, &bp->tsu_incr); 238 239 gem_writel(bp, TA, 0); 240 } 241 242 static void gem_ptp_clear_timer(struct macb *bp) 243 { 244 bp->tsu_incr.sub_ns = 0; 245 bp->tsu_incr.ns = 0; 246 247 gem_writel(bp, TISUBN, GEM_BF(SUBNSINCR, 0)); 248 gem_writel(bp, TI, GEM_BF(NSINCR, 0)); 249 gem_writel(bp, TA, 0); 250 } 251 252 static int gem_hw_timestamp(struct macb *bp, u32 dma_desc_ts_1, 253 u32 dma_desc_ts_2, struct timespec64 *ts) 254 { 255 struct timespec64 tsu; 256 257 ts->tv_sec = (GEM_BFEXT(DMA_SECH, dma_desc_ts_2) << GEM_DMA_SECL_SIZE) | 258 GEM_BFEXT(DMA_SECL, dma_desc_ts_1); 259 ts->tv_nsec = GEM_BFEXT(DMA_NSEC, dma_desc_ts_1); 260 261 /* TSU overlapping workaround 262 * The timestamp only contains lower few bits of seconds, 263 * so add value from 1588 timer 264 */ 265 gem_tsu_get_time(&bp->ptp_clock_info, &tsu); 266 267 /* If the top bit is set in the timestamp, 268 * but not in 1588 timer, it has rolled over, 269 * so subtract max size 270 */ 271 if ((ts->tv_sec & (GEM_DMA_SEC_TOP >> 1)) && 272 !(tsu.tv_sec & (GEM_DMA_SEC_TOP >> 1))) 273 ts->tv_sec -= GEM_DMA_SEC_TOP; 274 275 ts->tv_sec += ((~GEM_DMA_SEC_MASK) & tsu.tv_sec); 276 277 return 0; 278 } 279 280 void gem_ptp_rxstamp(struct macb *bp, struct sk_buff *skb, 281 struct macb_dma_desc *desc) 282 { 283 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); 284 struct macb_dma_desc_ptp *desc_ptp; 285 struct timespec64 ts; 286 287 if (GEM_BFEXT(DMA_RXVALID, desc->addr)) { 288 desc_ptp = macb_ptp_desc(bp, desc); 289 gem_hw_timestamp(bp, desc_ptp->ts_1, desc_ptp->ts_2, &ts); 290 memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps)); 291 shhwtstamps->hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec); 292 } 293 } 294 295 static void gem_tstamp_tx(struct macb *bp, struct sk_buff *skb, 296 struct macb_dma_desc_ptp *desc_ptp) 297 { 298 struct skb_shared_hwtstamps shhwtstamps; 299 struct timespec64 ts; 300 301 gem_hw_timestamp(bp, desc_ptp->ts_1, desc_ptp->ts_2, &ts); 302 memset(&shhwtstamps, 0, sizeof(shhwtstamps)); 303 shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec); 304 skb_tstamp_tx(skb, &shhwtstamps); 305 } 306 307 int gem_ptp_txstamp(struct macb_queue *queue, struct sk_buff *skb, 308 struct macb_dma_desc *desc) 309 { 310 unsigned long tail = READ_ONCE(queue->tx_ts_tail); 311 unsigned long head = queue->tx_ts_head; 312 struct macb_dma_desc_ptp *desc_ptp; 313 struct gem_tx_ts *tx_timestamp; 314 315 if (!GEM_BFEXT(DMA_TXVALID, desc->ctrl)) 316 return -EINVAL; 317 318 if (CIRC_SPACE(head, tail, PTP_TS_BUFFER_SIZE) == 0) 319 return -ENOMEM; 320 321 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 322 desc_ptp = macb_ptp_desc(queue->bp, desc); 323 tx_timestamp = &queue->tx_timestamps[head]; 324 tx_timestamp->skb = skb; 325 tx_timestamp->desc_ptp.ts_1 = desc_ptp->ts_1; 326 tx_timestamp->desc_ptp.ts_2 = desc_ptp->ts_2; 327 /* move head */ 328 smp_store_release(&queue->tx_ts_head, 329 (head + 1) & (PTP_TS_BUFFER_SIZE - 1)); 330 331 schedule_work(&queue->tx_ts_task); 332 return 0; 333 } 334 335 static void gem_tx_timestamp_flush(struct work_struct *work) 336 { 337 struct macb_queue *queue = 338 container_of(work, struct macb_queue, tx_ts_task); 339 unsigned long head, tail; 340 struct gem_tx_ts *tx_ts; 341 342 /* take current head */ 343 head = smp_load_acquire(&queue->tx_ts_head); 344 tail = queue->tx_ts_tail; 345 346 while (CIRC_CNT(head, tail, PTP_TS_BUFFER_SIZE)) { 347 tx_ts = &queue->tx_timestamps[tail]; 348 gem_tstamp_tx(queue->bp, tx_ts->skb, &tx_ts->desc_ptp); 349 /* cleanup */ 350 dev_kfree_skb_any(tx_ts->skb); 351 /* remove old tail */ 352 smp_store_release(&queue->tx_ts_tail, 353 (tail + 1) & (PTP_TS_BUFFER_SIZE - 1)); 354 tail = queue->tx_ts_tail; 355 } 356 } 357 358 void gem_ptp_init(struct net_device *dev) 359 { 360 struct macb *bp = netdev_priv(dev); 361 struct macb_queue *queue; 362 unsigned int q; 363 364 bp->ptp_clock_info = gem_ptp_caps_template; 365 366 /* nominal frequency and maximum adjustment in ppb */ 367 bp->tsu_rate = bp->ptp_info->get_tsu_rate(bp); 368 bp->ptp_clock_info.max_adj = bp->ptp_info->get_ptp_max_adj(); 369 gem_ptp_init_timer(bp); 370 bp->ptp_clock = ptp_clock_register(&bp->ptp_clock_info, &dev->dev); 371 if (IS_ERR(bp->ptp_clock)) { 372 pr_err("ptp clock register failed: %ld\n", 373 PTR_ERR(bp->ptp_clock)); 374 bp->ptp_clock = NULL; 375 return; 376 } else if (bp->ptp_clock == NULL) { 377 pr_err("ptp clock register failed\n"); 378 return; 379 } 380 381 spin_lock_init(&bp->tsu_clk_lock); 382 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 383 queue->tx_ts_head = 0; 384 queue->tx_ts_tail = 0; 385 INIT_WORK(&queue->tx_ts_task, gem_tx_timestamp_flush); 386 } 387 388 gem_ptp_init_tsu(bp); 389 390 dev_info(&bp->pdev->dev, "%s ptp clock registered.\n", 391 GEM_PTP_TIMER_NAME); 392 } 393 394 void gem_ptp_remove(struct net_device *ndev) 395 { 396 struct macb *bp = netdev_priv(ndev); 397 398 if (bp->ptp_clock) 399 ptp_clock_unregister(bp->ptp_clock); 400 401 gem_ptp_clear_timer(bp); 402 403 dev_info(&bp->pdev->dev, "%s ptp clock unregistered.\n", 404 GEM_PTP_TIMER_NAME); 405 } 406 407 static int gem_ptp_set_ts_mode(struct macb *bp, 408 enum macb_bd_control tx_bd_control, 409 enum macb_bd_control rx_bd_control) 410 { 411 gem_writel(bp, TXBDCTRL, GEM_BF(TXTSMODE, tx_bd_control)); 412 gem_writel(bp, RXBDCTRL, GEM_BF(RXTSMODE, rx_bd_control)); 413 414 return 0; 415 } 416 417 int gem_get_hwtst(struct net_device *dev, struct ifreq *rq) 418 { 419 struct hwtstamp_config *tstamp_config; 420 struct macb *bp = netdev_priv(dev); 421 422 tstamp_config = &bp->tstamp_config; 423 if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0) 424 return -EOPNOTSUPP; 425 426 if (copy_to_user(rq->ifr_data, tstamp_config, sizeof(*tstamp_config))) 427 return -EFAULT; 428 else 429 return 0; 430 } 431 432 static int gem_ptp_set_one_step_sync(struct macb *bp, u8 enable) 433 { 434 u32 reg_val; 435 436 reg_val = macb_readl(bp, NCR); 437 438 if (enable) 439 macb_writel(bp, NCR, reg_val | MACB_BIT(OSSMODE)); 440 else 441 macb_writel(bp, NCR, reg_val & ~MACB_BIT(OSSMODE)); 442 443 return 0; 444 } 445 446 int gem_set_hwtst(struct net_device *dev, struct ifreq *ifr, int cmd) 447 { 448 enum macb_bd_control tx_bd_control = TSTAMP_DISABLED; 449 enum macb_bd_control rx_bd_control = TSTAMP_DISABLED; 450 struct hwtstamp_config *tstamp_config; 451 struct macb *bp = netdev_priv(dev); 452 u32 regval; 453 454 tstamp_config = &bp->tstamp_config; 455 if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0) 456 return -EOPNOTSUPP; 457 458 if (copy_from_user(tstamp_config, ifr->ifr_data, 459 sizeof(*tstamp_config))) 460 return -EFAULT; 461 462 /* reserved for future extensions */ 463 if (tstamp_config->flags) 464 return -EINVAL; 465 466 switch (tstamp_config->tx_type) { 467 case HWTSTAMP_TX_OFF: 468 break; 469 case HWTSTAMP_TX_ONESTEP_SYNC: 470 if (gem_ptp_set_one_step_sync(bp, 1) != 0) 471 return -ERANGE; 472 case HWTSTAMP_TX_ON: 473 tx_bd_control = TSTAMP_ALL_FRAMES; 474 break; 475 default: 476 return -ERANGE; 477 } 478 479 switch (tstamp_config->rx_filter) { 480 case HWTSTAMP_FILTER_NONE: 481 break; 482 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 483 break; 484 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 485 break; 486 case HWTSTAMP_FILTER_PTP_V2_EVENT: 487 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 488 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 489 case HWTSTAMP_FILTER_PTP_V2_SYNC: 490 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 491 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 492 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 493 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 494 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 495 rx_bd_control = TSTAMP_ALL_PTP_FRAMES; 496 tstamp_config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 497 regval = macb_readl(bp, NCR); 498 macb_writel(bp, NCR, (regval | MACB_BIT(SRTSM))); 499 break; 500 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 501 case HWTSTAMP_FILTER_ALL: 502 rx_bd_control = TSTAMP_ALL_FRAMES; 503 tstamp_config->rx_filter = HWTSTAMP_FILTER_ALL; 504 break; 505 default: 506 tstamp_config->rx_filter = HWTSTAMP_FILTER_NONE; 507 return -ERANGE; 508 } 509 510 if (gem_ptp_set_ts_mode(bp, tx_bd_control, rx_bd_control) != 0) 511 return -ERANGE; 512 513 if (copy_to_user(ifr->ifr_data, tstamp_config, sizeof(*tstamp_config))) 514 return -EFAULT; 515 else 516 return 0; 517 } 518 519