1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Aquantia Corporation Network Driver 3 * Copyright (C) 2014-2019 Aquantia Corporation. All rights reserved 4 */ 5 6 /* File aq_ptp.c: 7 * Definition of functions for Linux PTP support. 8 */ 9 10 #include <linux/ptp_clock_kernel.h> 11 #include <linux/ptp_classify.h> 12 #include <linux/interrupt.h> 13 #include <linux/clocksource.h> 14 15 #include "aq_nic.h" 16 #include "aq_ptp.h" 17 #include "aq_ring.h" 18 #include "aq_phy.h" 19 #include "aq_filters.h" 20 21 #define AQ_PTP_TX_TIMEOUT (HZ * 10) 22 23 #define POLL_SYNC_TIMER_MS 15 24 25 enum ptp_speed_offsets { 26 ptp_offset_idx_10 = 0, 27 ptp_offset_idx_100, 28 ptp_offset_idx_1000, 29 ptp_offset_idx_2500, 30 ptp_offset_idx_5000, 31 ptp_offset_idx_10000, 32 }; 33 34 struct ptp_skb_ring { 35 struct sk_buff **buff; 36 spinlock_t lock; 37 unsigned int size; 38 unsigned int head; 39 unsigned int tail; 40 }; 41 42 struct ptp_tx_timeout { 43 spinlock_t lock; 44 bool active; 45 unsigned long tx_start; 46 }; 47 48 struct aq_ptp_s { 49 struct aq_nic_s *aq_nic; 50 struct hwtstamp_config hwtstamp_config; 51 spinlock_t ptp_lock; 52 spinlock_t ptp_ring_lock; 53 struct ptp_clock *ptp_clock; 54 struct ptp_clock_info ptp_info; 55 56 atomic_t offset_egress; 57 atomic_t offset_ingress; 58 59 struct aq_ring_param_s ptp_ring_param; 60 61 struct ptp_tx_timeout ptp_tx_timeout; 62 63 unsigned int idx_vector; 64 struct napi_struct napi; 65 66 struct aq_ring_s ptp_tx; 67 struct aq_ring_s ptp_rx; 68 struct aq_ring_s hwts_rx; 69 70 struct ptp_skb_ring skb_ring; 71 72 struct aq_rx_filter_l3l4 udp_filter; 73 struct aq_rx_filter_l2 eth_type_filter; 74 75 struct delayed_work poll_sync; 76 u32 poll_timeout_ms; 77 78 bool extts_pin_enabled; 79 u64 last_sync1588_ts; 80 }; 81 82 struct ptp_tm_offset { 83 unsigned int mbps; 84 int egress; 85 int ingress; 86 }; 87 88 static struct ptp_tm_offset ptp_offset[6]; 89 90 void aq_ptp_tm_offset_set(struct aq_nic_s *aq_nic, unsigned int mbps) 91 { 92 struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp; 93 int i, egress, ingress; 94 95 if (!aq_ptp) 96 return; 97 98 egress = 0; 99 ingress = 0; 100 101 for (i = 0; i < ARRAY_SIZE(ptp_offset); i++) { 102 if (mbps == ptp_offset[i].mbps) { 103 egress = ptp_offset[i].egress; 104 ingress = ptp_offset[i].ingress; 105 break; 106 } 107 } 108 109 atomic_set(&aq_ptp->offset_egress, egress); 110 atomic_set(&aq_ptp->offset_ingress, ingress); 111 } 112 113 static int __aq_ptp_skb_put(struct ptp_skb_ring *ring, struct sk_buff *skb) 114 { 115 unsigned int next_head = (ring->head + 1) % ring->size; 116 117 if (next_head == ring->tail) 118 return -ENOMEM; 119 120 ring->buff[ring->head] = skb_get(skb); 121 ring->head = next_head; 122 123 return 0; 124 } 125 126 static int aq_ptp_skb_put(struct ptp_skb_ring *ring, struct sk_buff *skb) 127 { 128 unsigned long flags; 129 int ret; 130 131 spin_lock_irqsave(&ring->lock, flags); 132 ret = __aq_ptp_skb_put(ring, skb); 133 spin_unlock_irqrestore(&ring->lock, flags); 134 135 return ret; 136 } 137 138 static struct sk_buff *__aq_ptp_skb_get(struct ptp_skb_ring *ring) 139 { 140 struct sk_buff *skb; 141 142 if (ring->tail == ring->head) 143 return NULL; 144 145 skb = ring->buff[ring->tail]; 146 ring->tail = (ring->tail + 1) % ring->size; 147 148 return skb; 149 } 150 151 static struct sk_buff *aq_ptp_skb_get(struct ptp_skb_ring *ring) 152 { 153 unsigned long flags; 154 struct sk_buff *skb; 155 156 spin_lock_irqsave(&ring->lock, flags); 157 skb = __aq_ptp_skb_get(ring); 158 spin_unlock_irqrestore(&ring->lock, flags); 159 160 return skb; 161 } 162 163 static unsigned int aq_ptp_skb_buf_len(struct ptp_skb_ring *ring) 164 { 165 unsigned long flags; 166 unsigned int len; 167 168 spin_lock_irqsave(&ring->lock, flags); 169 len = (ring->head >= ring->tail) ? 170 ring->head - ring->tail : 171 ring->size - ring->tail + ring->head; 172 spin_unlock_irqrestore(&ring->lock, flags); 173 174 return len; 175 } 176 177 static int aq_ptp_skb_ring_init(struct ptp_skb_ring *ring, unsigned int size) 178 { 179 struct sk_buff **buff = kmalloc(sizeof(*buff) * size, GFP_KERNEL); 180 181 if (!buff) 182 return -ENOMEM; 183 184 spin_lock_init(&ring->lock); 185 186 ring->buff = buff; 187 ring->size = size; 188 ring->head = 0; 189 ring->tail = 0; 190 191 return 0; 192 } 193 194 static void aq_ptp_skb_ring_clean(struct ptp_skb_ring *ring) 195 { 196 struct sk_buff *skb; 197 198 while ((skb = aq_ptp_skb_get(ring)) != NULL) 199 dev_kfree_skb_any(skb); 200 } 201 202 static void aq_ptp_skb_ring_release(struct ptp_skb_ring *ring) 203 { 204 if (ring->buff) { 205 aq_ptp_skb_ring_clean(ring); 206 kfree(ring->buff); 207 ring->buff = NULL; 208 } 209 } 210 211 static void aq_ptp_tx_timeout_init(struct ptp_tx_timeout *timeout) 212 { 213 spin_lock_init(&timeout->lock); 214 timeout->active = false; 215 } 216 217 static void aq_ptp_tx_timeout_start(struct aq_ptp_s *aq_ptp) 218 { 219 struct ptp_tx_timeout *timeout = &aq_ptp->ptp_tx_timeout; 220 unsigned long flags; 221 222 spin_lock_irqsave(&timeout->lock, flags); 223 timeout->active = true; 224 timeout->tx_start = jiffies; 225 spin_unlock_irqrestore(&timeout->lock, flags); 226 } 227 228 static void aq_ptp_tx_timeout_update(struct aq_ptp_s *aq_ptp) 229 { 230 if (!aq_ptp_skb_buf_len(&aq_ptp->skb_ring)) { 231 struct ptp_tx_timeout *timeout = &aq_ptp->ptp_tx_timeout; 232 unsigned long flags; 233 234 spin_lock_irqsave(&timeout->lock, flags); 235 timeout->active = false; 236 spin_unlock_irqrestore(&timeout->lock, flags); 237 } 238 } 239 240 static void aq_ptp_tx_timeout_check(struct aq_ptp_s *aq_ptp) 241 { 242 struct ptp_tx_timeout *timeout = &aq_ptp->ptp_tx_timeout; 243 unsigned long flags; 244 bool timeout_flag; 245 246 timeout_flag = false; 247 248 spin_lock_irqsave(&timeout->lock, flags); 249 if (timeout->active) { 250 timeout_flag = time_is_before_jiffies(timeout->tx_start + 251 AQ_PTP_TX_TIMEOUT); 252 /* reset active flag if timeout detected */ 253 if (timeout_flag) 254 timeout->active = false; 255 } 256 spin_unlock_irqrestore(&timeout->lock, flags); 257 258 if (timeout_flag) { 259 aq_ptp_skb_ring_clean(&aq_ptp->skb_ring); 260 netdev_err(aq_ptp->aq_nic->ndev, 261 "PTP Timeout. Clearing Tx Timestamp SKBs\n"); 262 } 263 } 264 265 /* aq_ptp_adjfine 266 * @ptp: the ptp clock structure 267 * @ppb: parts per billion adjustment from base 268 * 269 * adjust the frequency of the ptp cycle counter by the 270 * indicated ppb from the base frequency. 271 */ 272 static int aq_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) 273 { 274 struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info); 275 struct aq_nic_s *aq_nic = aq_ptp->aq_nic; 276 277 mutex_lock(&aq_nic->fwreq_mutex); 278 aq_nic->aq_hw_ops->hw_adj_clock_freq(aq_nic->aq_hw, 279 scaled_ppm_to_ppb(scaled_ppm)); 280 mutex_unlock(&aq_nic->fwreq_mutex); 281 282 return 0; 283 } 284 285 /* aq_ptp_adjtime 286 * @ptp: the ptp clock structure 287 * @delta: offset to adjust the cycle counter by 288 * 289 * adjust the timer by resetting the timecounter structure. 290 */ 291 static int aq_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) 292 { 293 struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info); 294 struct aq_nic_s *aq_nic = aq_ptp->aq_nic; 295 unsigned long flags; 296 297 spin_lock_irqsave(&aq_ptp->ptp_lock, flags); 298 aq_nic->aq_hw_ops->hw_adj_sys_clock(aq_nic->aq_hw, delta); 299 spin_unlock_irqrestore(&aq_ptp->ptp_lock, flags); 300 301 return 0; 302 } 303 304 /* aq_ptp_gettime 305 * @ptp: the ptp clock structure 306 * @ts: timespec structure to hold the current time value 307 * 308 * read the timecounter and return the correct value on ns, 309 * after converting it into a struct timespec. 310 */ 311 static int aq_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) 312 { 313 struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info); 314 struct aq_nic_s *aq_nic = aq_ptp->aq_nic; 315 unsigned long flags; 316 u64 ns; 317 318 spin_lock_irqsave(&aq_ptp->ptp_lock, flags); 319 aq_nic->aq_hw_ops->hw_get_ptp_ts(aq_nic->aq_hw, &ns); 320 spin_unlock_irqrestore(&aq_ptp->ptp_lock, flags); 321 322 *ts = ns_to_timespec64(ns); 323 324 return 0; 325 } 326 327 /* aq_ptp_settime 328 * @ptp: the ptp clock structure 329 * @ts: the timespec containing the new time for the cycle counter 330 * 331 * reset the timecounter to use a new base value instead of the kernel 332 * wall timer value. 333 */ 334 static int aq_ptp_settime(struct ptp_clock_info *ptp, 335 const struct timespec64 *ts) 336 { 337 struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info); 338 struct aq_nic_s *aq_nic = aq_ptp->aq_nic; 339 unsigned long flags; 340 u64 ns = timespec64_to_ns(ts); 341 u64 now; 342 343 spin_lock_irqsave(&aq_ptp->ptp_lock, flags); 344 aq_nic->aq_hw_ops->hw_get_ptp_ts(aq_nic->aq_hw, &now); 345 aq_nic->aq_hw_ops->hw_adj_sys_clock(aq_nic->aq_hw, (s64)ns - (s64)now); 346 347 spin_unlock_irqrestore(&aq_ptp->ptp_lock, flags); 348 349 return 0; 350 } 351 352 static void aq_ptp_convert_to_hwtstamp(struct aq_ptp_s *aq_ptp, 353 struct skb_shared_hwtstamps *hwtstamp, 354 u64 timestamp) 355 { 356 memset(hwtstamp, 0, sizeof(*hwtstamp)); 357 hwtstamp->hwtstamp = ns_to_ktime(timestamp); 358 } 359 360 static int aq_ptp_hw_pin_conf(struct aq_nic_s *aq_nic, u32 pin_index, u64 start, 361 u64 period) 362 { 363 if (period) 364 netdev_dbg(aq_nic->ndev, 365 "Enable GPIO %d pulsing, start time %llu, period %u\n", 366 pin_index, start, (u32)period); 367 else 368 netdev_dbg(aq_nic->ndev, 369 "Disable GPIO %d pulsing, start time %llu, period %u\n", 370 pin_index, start, (u32)period); 371 372 /* Notify hardware of request to being sending pulses. 373 * If period is ZERO then pulsen is disabled. 374 */ 375 mutex_lock(&aq_nic->fwreq_mutex); 376 aq_nic->aq_hw_ops->hw_gpio_pulse(aq_nic->aq_hw, pin_index, 377 start, (u32)period); 378 mutex_unlock(&aq_nic->fwreq_mutex); 379 380 return 0; 381 } 382 383 static int aq_ptp_perout_pin_configure(struct ptp_clock_info *ptp, 384 struct ptp_clock_request *rq, int on) 385 { 386 struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info); 387 struct ptp_clock_time *t = &rq->perout.period; 388 struct ptp_clock_time *s = &rq->perout.start; 389 struct aq_nic_s *aq_nic = aq_ptp->aq_nic; 390 u64 start, period; 391 u32 pin_index = rq->perout.index; 392 393 /* verify the request channel is there */ 394 if (pin_index >= ptp->n_per_out) 395 return -EINVAL; 396 397 /* we cannot support periods greater 398 * than 4 seconds due to reg limit 399 */ 400 if (t->sec > 4 || t->sec < 0) 401 return -ERANGE; 402 403 /* convert to unsigned 64b ns, 404 * verify we can put it in a 32b register 405 */ 406 period = on ? t->sec * NSEC_PER_SEC + t->nsec : 0; 407 408 /* verify the value is in range supported by hardware */ 409 if (period > U32_MAX) 410 return -ERANGE; 411 /* convert to unsigned 64b ns */ 412 /* TODO convert to AQ time */ 413 start = on ? s->sec * NSEC_PER_SEC + s->nsec : 0; 414 415 aq_ptp_hw_pin_conf(aq_nic, pin_index, start, period); 416 417 return 0; 418 } 419 420 static int aq_ptp_pps_pin_configure(struct ptp_clock_info *ptp, 421 struct ptp_clock_request *rq, int on) 422 { 423 struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info); 424 struct aq_nic_s *aq_nic = aq_ptp->aq_nic; 425 u64 start, period; 426 u32 pin_index = 0; 427 u32 rest = 0; 428 429 /* verify the request channel is there */ 430 if (pin_index >= ptp->n_per_out) 431 return -EINVAL; 432 433 aq_nic->aq_hw_ops->hw_get_ptp_ts(aq_nic->aq_hw, &start); 434 div_u64_rem(start, NSEC_PER_SEC, &rest); 435 period = on ? NSEC_PER_SEC : 0; /* PPS - pulse per second */ 436 start = on ? start - rest + NSEC_PER_SEC * 437 (rest > 990000000LL ? 2 : 1) : 0; 438 439 aq_ptp_hw_pin_conf(aq_nic, pin_index, start, period); 440 441 return 0; 442 } 443 444 static void aq_ptp_extts_pin_ctrl(struct aq_ptp_s *aq_ptp) 445 { 446 struct aq_nic_s *aq_nic = aq_ptp->aq_nic; 447 u32 enable = aq_ptp->extts_pin_enabled; 448 449 if (aq_nic->aq_hw_ops->hw_extts_gpio_enable) 450 aq_nic->aq_hw_ops->hw_extts_gpio_enable(aq_nic->aq_hw, 0, 451 enable); 452 } 453 454 static int aq_ptp_extts_pin_configure(struct ptp_clock_info *ptp, 455 struct ptp_clock_request *rq, int on) 456 { 457 struct aq_ptp_s *aq_ptp = container_of(ptp, struct aq_ptp_s, ptp_info); 458 459 u32 pin_index = rq->extts.index; 460 461 if (pin_index >= ptp->n_ext_ts) 462 return -EINVAL; 463 464 aq_ptp->extts_pin_enabled = !!on; 465 if (on) { 466 aq_ptp->poll_timeout_ms = POLL_SYNC_TIMER_MS; 467 cancel_delayed_work_sync(&aq_ptp->poll_sync); 468 schedule_delayed_work(&aq_ptp->poll_sync, 469 msecs_to_jiffies(aq_ptp->poll_timeout_ms)); 470 } 471 472 aq_ptp_extts_pin_ctrl(aq_ptp); 473 return 0; 474 } 475 476 /* aq_ptp_gpio_feature_enable 477 * @ptp: the ptp clock structure 478 * @rq: the requested feature to change 479 * @on: whether to enable or disable the feature 480 */ 481 static int aq_ptp_gpio_feature_enable(struct ptp_clock_info *ptp, 482 struct ptp_clock_request *rq, int on) 483 { 484 switch (rq->type) { 485 case PTP_CLK_REQ_EXTTS: 486 return aq_ptp_extts_pin_configure(ptp, rq, on); 487 case PTP_CLK_REQ_PEROUT: 488 return aq_ptp_perout_pin_configure(ptp, rq, on); 489 case PTP_CLK_REQ_PPS: 490 return aq_ptp_pps_pin_configure(ptp, rq, on); 491 default: 492 return -EOPNOTSUPP; 493 } 494 495 return 0; 496 } 497 498 /* aq_ptp_verify 499 * @ptp: the ptp clock structure 500 * @pin: index of the pin in question 501 * @func: the desired function to use 502 * @chan: the function channel index to use 503 */ 504 static int aq_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin, 505 enum ptp_pin_function func, unsigned int chan) 506 { 507 /* verify the requested pin is there */ 508 if (!ptp->pin_config || pin >= ptp->n_pins) 509 return -EINVAL; 510 511 /* enforce locked channels, no changing them */ 512 if (chan != ptp->pin_config[pin].chan) 513 return -EINVAL; 514 515 /* we want to keep the functions locked as well */ 516 if (func != ptp->pin_config[pin].func) 517 return -EINVAL; 518 519 return 0; 520 } 521 522 /* aq_ptp_tx_hwtstamp - utility function which checks for TX time stamp 523 * @adapter: the private adapter struct 524 * 525 * if the timestamp is valid, we convert it into the timecounter ns 526 * value, then store that result into the hwtstamps structure which 527 * is passed up the network stack 528 */ 529 void aq_ptp_tx_hwtstamp(struct aq_nic_s *aq_nic, u64 timestamp) 530 { 531 struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp; 532 struct sk_buff *skb = aq_ptp_skb_get(&aq_ptp->skb_ring); 533 struct skb_shared_hwtstamps hwtstamp; 534 535 if (!skb) { 536 netdev_err(aq_nic->ndev, "have timestamp but tx_queues empty\n"); 537 return; 538 } 539 540 timestamp += atomic_read(&aq_ptp->offset_egress); 541 aq_ptp_convert_to_hwtstamp(aq_ptp, &hwtstamp, timestamp); 542 skb_tstamp_tx(skb, &hwtstamp); 543 dev_kfree_skb_any(skb); 544 545 aq_ptp_tx_timeout_update(aq_ptp); 546 } 547 548 /* aq_ptp_rx_hwtstamp - utility function which checks for RX time stamp 549 * @adapter: pointer to adapter struct 550 * @skb: particular skb to send timestamp with 551 * 552 * if the timestamp is valid, we convert it into the timecounter ns 553 * value, then store that result into the hwtstamps structure which 554 * is passed up the network stack 555 */ 556 static void aq_ptp_rx_hwtstamp(struct aq_ptp_s *aq_ptp, struct sk_buff *skb, 557 u64 timestamp) 558 { 559 timestamp -= atomic_read(&aq_ptp->offset_ingress); 560 aq_ptp_convert_to_hwtstamp(aq_ptp, skb_hwtstamps(skb), timestamp); 561 } 562 563 void aq_ptp_hwtstamp_config_get(struct aq_ptp_s *aq_ptp, 564 struct hwtstamp_config *config) 565 { 566 *config = aq_ptp->hwtstamp_config; 567 } 568 569 static void aq_ptp_prepare_filters(struct aq_ptp_s *aq_ptp) 570 { 571 aq_ptp->udp_filter.cmd = HW_ATL_RX_ENABLE_FLTR_L3L4 | 572 HW_ATL_RX_ENABLE_CMP_PROT_L4 | 573 HW_ATL_RX_UDP | 574 HW_ATL_RX_ENABLE_CMP_DEST_PORT_L4 | 575 HW_ATL_RX_HOST << HW_ATL_RX_ACTION_FL3F4_SHIFT | 576 HW_ATL_RX_ENABLE_QUEUE_L3L4 | 577 aq_ptp->ptp_rx.idx << HW_ATL_RX_QUEUE_FL3L4_SHIFT; 578 aq_ptp->udp_filter.p_dst = PTP_EV_PORT; 579 580 aq_ptp->eth_type_filter.ethertype = ETH_P_1588; 581 aq_ptp->eth_type_filter.queue = aq_ptp->ptp_rx.idx; 582 } 583 584 int aq_ptp_hwtstamp_config_set(struct aq_ptp_s *aq_ptp, 585 struct hwtstamp_config *config) 586 { 587 struct aq_nic_s *aq_nic = aq_ptp->aq_nic; 588 const struct aq_hw_ops *hw_ops; 589 int err = 0; 590 591 hw_ops = aq_nic->aq_hw_ops; 592 if (config->tx_type == HWTSTAMP_TX_ON || 593 config->rx_filter == HWTSTAMP_FILTER_PTP_V2_EVENT) { 594 aq_ptp_prepare_filters(aq_ptp); 595 if (hw_ops->hw_filter_l3l4_set) { 596 err = hw_ops->hw_filter_l3l4_set(aq_nic->aq_hw, 597 &aq_ptp->udp_filter); 598 } 599 if (!err && hw_ops->hw_filter_l2_set) { 600 err = hw_ops->hw_filter_l2_set(aq_nic->aq_hw, 601 &aq_ptp->eth_type_filter); 602 } 603 aq_utils_obj_set(&aq_nic->flags, AQ_NIC_PTP_DPATH_UP); 604 } else { 605 aq_ptp->udp_filter.cmd &= ~HW_ATL_RX_ENABLE_FLTR_L3L4; 606 if (hw_ops->hw_filter_l3l4_set) { 607 err = hw_ops->hw_filter_l3l4_set(aq_nic->aq_hw, 608 &aq_ptp->udp_filter); 609 } 610 if (!err && hw_ops->hw_filter_l2_clear) { 611 err = hw_ops->hw_filter_l2_clear(aq_nic->aq_hw, 612 &aq_ptp->eth_type_filter); 613 } 614 aq_utils_obj_clear(&aq_nic->flags, AQ_NIC_PTP_DPATH_UP); 615 } 616 617 if (err) 618 return -EREMOTEIO; 619 620 aq_ptp->hwtstamp_config = *config; 621 622 return 0; 623 } 624 625 bool aq_ptp_ring(struct aq_nic_s *aq_nic, struct aq_ring_s *ring) 626 { 627 struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp; 628 629 if (!aq_ptp) 630 return false; 631 632 return &aq_ptp->ptp_tx == ring || 633 &aq_ptp->ptp_rx == ring || &aq_ptp->hwts_rx == ring; 634 } 635 636 u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic, struct sk_buff *skb, u8 *p, 637 unsigned int len) 638 { 639 struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp; 640 u64 timestamp = 0; 641 u16 ret = aq_nic->aq_hw_ops->rx_extract_ts(aq_nic->aq_hw, 642 p, len, ×tamp); 643 644 if (ret > 0) 645 aq_ptp_rx_hwtstamp(aq_ptp, skb, timestamp); 646 647 return ret; 648 } 649 650 static int aq_ptp_poll(struct napi_struct *napi, int budget) 651 { 652 struct aq_ptp_s *aq_ptp = container_of(napi, struct aq_ptp_s, napi); 653 struct aq_nic_s *aq_nic = aq_ptp->aq_nic; 654 bool was_cleaned = false; 655 int work_done = 0; 656 int err; 657 658 /* Processing PTP TX traffic */ 659 err = aq_nic->aq_hw_ops->hw_ring_tx_head_update(aq_nic->aq_hw, 660 &aq_ptp->ptp_tx); 661 if (err < 0) 662 goto err_exit; 663 664 if (aq_ptp->ptp_tx.sw_head != aq_ptp->ptp_tx.hw_head) { 665 aq_ring_tx_clean(&aq_ptp->ptp_tx); 666 667 was_cleaned = true; 668 } 669 670 /* Processing HW_TIMESTAMP RX traffic */ 671 err = aq_nic->aq_hw_ops->hw_ring_hwts_rx_receive(aq_nic->aq_hw, 672 &aq_ptp->hwts_rx); 673 if (err < 0) 674 goto err_exit; 675 676 if (aq_ptp->hwts_rx.sw_head != aq_ptp->hwts_rx.hw_head) { 677 aq_ring_hwts_rx_clean(&aq_ptp->hwts_rx, aq_nic); 678 679 err = aq_nic->aq_hw_ops->hw_ring_hwts_rx_fill(aq_nic->aq_hw, 680 &aq_ptp->hwts_rx); 681 if (err < 0) 682 goto err_exit; 683 684 was_cleaned = true; 685 } 686 687 /* Processing PTP RX traffic */ 688 err = aq_nic->aq_hw_ops->hw_ring_rx_receive(aq_nic->aq_hw, 689 &aq_ptp->ptp_rx); 690 if (err < 0) 691 goto err_exit; 692 693 if (aq_ptp->ptp_rx.sw_head != aq_ptp->ptp_rx.hw_head) { 694 unsigned int sw_tail_old; 695 696 err = aq_ring_rx_clean(&aq_ptp->ptp_rx, napi, &work_done, budget); 697 if (err < 0) 698 goto err_exit; 699 700 sw_tail_old = aq_ptp->ptp_rx.sw_tail; 701 err = aq_ring_rx_fill(&aq_ptp->ptp_rx); 702 if (err < 0) 703 goto err_exit; 704 705 err = aq_nic->aq_hw_ops->hw_ring_rx_fill(aq_nic->aq_hw, 706 &aq_ptp->ptp_rx, 707 sw_tail_old); 708 if (err < 0) 709 goto err_exit; 710 } 711 712 if (was_cleaned) 713 work_done = budget; 714 715 if (work_done < budget) { 716 napi_complete_done(napi, work_done); 717 aq_nic->aq_hw_ops->hw_irq_enable(aq_nic->aq_hw, 718 BIT_ULL(aq_ptp->ptp_ring_param.vec_idx)); 719 } 720 721 err_exit: 722 return work_done; 723 } 724 725 static irqreturn_t aq_ptp_isr(int irq, void *private) 726 { 727 struct aq_ptp_s *aq_ptp = private; 728 int err = 0; 729 730 if (!aq_ptp) { 731 err = -EINVAL; 732 goto err_exit; 733 } 734 napi_schedule(&aq_ptp->napi); 735 736 err_exit: 737 return err >= 0 ? IRQ_HANDLED : IRQ_NONE; 738 } 739 740 int aq_ptp_xmit(struct aq_nic_s *aq_nic, struct sk_buff *skb) 741 { 742 struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp; 743 struct aq_ring_s *ring = &aq_ptp->ptp_tx; 744 unsigned long irq_flags; 745 int err = NETDEV_TX_OK; 746 unsigned int frags; 747 748 if (skb->len <= 0) { 749 dev_kfree_skb_any(skb); 750 goto err_exit; 751 } 752 753 frags = skb_shinfo(skb)->nr_frags + 1; 754 /* Frags cannot be bigger 16KB 755 * because PTP usually works 756 * without Jumbo even in a background 757 */ 758 if (frags > AQ_CFG_SKB_FRAGS_MAX || frags > aq_ring_avail_dx(ring)) { 759 /* Drop packet because it doesn't make sence to delay it */ 760 dev_kfree_skb_any(skb); 761 goto err_exit; 762 } 763 764 err = aq_ptp_skb_put(&aq_ptp->skb_ring, skb); 765 if (err) { 766 netdev_err(aq_nic->ndev, "SKB Ring is overflow (%u)!\n", 767 ring->size); 768 return NETDEV_TX_BUSY; 769 } 770 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 771 aq_ptp_tx_timeout_start(aq_ptp); 772 skb_tx_timestamp(skb); 773 774 spin_lock_irqsave(&aq_nic->aq_ptp->ptp_ring_lock, irq_flags); 775 frags = aq_nic_map_skb(aq_nic, skb, ring); 776 777 if (likely(frags)) { 778 err = aq_nic->aq_hw_ops->hw_ring_tx_xmit(aq_nic->aq_hw, 779 ring, frags); 780 if (err >= 0) { 781 ++ring->stats.tx.packets; 782 ring->stats.tx.bytes += skb->len; 783 } 784 } else { 785 err = NETDEV_TX_BUSY; 786 } 787 spin_unlock_irqrestore(&aq_nic->aq_ptp->ptp_ring_lock, irq_flags); 788 789 err_exit: 790 return err; 791 } 792 793 void aq_ptp_service_task(struct aq_nic_s *aq_nic) 794 { 795 struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp; 796 797 if (!aq_ptp) 798 return; 799 800 aq_ptp_tx_timeout_check(aq_ptp); 801 } 802 803 int aq_ptp_irq_alloc(struct aq_nic_s *aq_nic) 804 { 805 struct pci_dev *pdev = aq_nic->pdev; 806 struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp; 807 int err = 0; 808 809 if (!aq_ptp) 810 return 0; 811 812 if (pdev->msix_enabled || pdev->msi_enabled) { 813 err = request_irq(pci_irq_vector(pdev, aq_ptp->idx_vector), 814 aq_ptp_isr, 0, aq_nic->ndev->name, aq_ptp); 815 } else { 816 err = -EINVAL; 817 goto err_exit; 818 } 819 820 err_exit: 821 return err; 822 } 823 824 void aq_ptp_irq_free(struct aq_nic_s *aq_nic) 825 { 826 struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp; 827 struct pci_dev *pdev = aq_nic->pdev; 828 829 if (!aq_ptp) 830 return; 831 832 free_irq(pci_irq_vector(pdev, aq_ptp->idx_vector), aq_ptp); 833 } 834 835 int aq_ptp_ring_init(struct aq_nic_s *aq_nic) 836 { 837 struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp; 838 int err = 0; 839 840 if (!aq_ptp) 841 return 0; 842 843 err = aq_ring_init(&aq_ptp->ptp_tx); 844 if (err < 0) 845 goto err_exit; 846 err = aq_nic->aq_hw_ops->hw_ring_tx_init(aq_nic->aq_hw, 847 &aq_ptp->ptp_tx, 848 &aq_ptp->ptp_ring_param); 849 if (err < 0) 850 goto err_exit; 851 852 err = aq_ring_init(&aq_ptp->ptp_rx); 853 if (err < 0) 854 goto err_exit; 855 err = aq_nic->aq_hw_ops->hw_ring_rx_init(aq_nic->aq_hw, 856 &aq_ptp->ptp_rx, 857 &aq_ptp->ptp_ring_param); 858 if (err < 0) 859 goto err_exit; 860 861 err = aq_ring_rx_fill(&aq_ptp->ptp_rx); 862 if (err < 0) 863 goto err_rx_free; 864 err = aq_nic->aq_hw_ops->hw_ring_rx_fill(aq_nic->aq_hw, 865 &aq_ptp->ptp_rx, 866 0U); 867 if (err < 0) 868 goto err_rx_free; 869 870 err = aq_ring_init(&aq_ptp->hwts_rx); 871 if (err < 0) 872 goto err_rx_free; 873 err = aq_nic->aq_hw_ops->hw_ring_rx_init(aq_nic->aq_hw, 874 &aq_ptp->hwts_rx, 875 &aq_ptp->ptp_ring_param); 876 if (err < 0) 877 goto err_exit; 878 err = aq_nic->aq_hw_ops->hw_ring_hwts_rx_fill(aq_nic->aq_hw, 879 &aq_ptp->hwts_rx); 880 if (err < 0) 881 goto err_exit; 882 883 return err; 884 885 err_rx_free: 886 aq_ring_rx_deinit(&aq_ptp->ptp_rx); 887 err_exit: 888 return err; 889 } 890 891 int aq_ptp_ring_start(struct aq_nic_s *aq_nic) 892 { 893 struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp; 894 int err = 0; 895 896 if (!aq_ptp) 897 return 0; 898 899 err = aq_nic->aq_hw_ops->hw_ring_tx_start(aq_nic->aq_hw, &aq_ptp->ptp_tx); 900 if (err < 0) 901 goto err_exit; 902 903 err = aq_nic->aq_hw_ops->hw_ring_rx_start(aq_nic->aq_hw, &aq_ptp->ptp_rx); 904 if (err < 0) 905 goto err_exit; 906 907 err = aq_nic->aq_hw_ops->hw_ring_rx_start(aq_nic->aq_hw, 908 &aq_ptp->hwts_rx); 909 if (err < 0) 910 goto err_exit; 911 912 napi_enable(&aq_ptp->napi); 913 914 err_exit: 915 return err; 916 } 917 918 void aq_ptp_ring_stop(struct aq_nic_s *aq_nic) 919 { 920 struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp; 921 922 if (!aq_ptp) 923 return; 924 925 aq_nic->aq_hw_ops->hw_ring_tx_stop(aq_nic->aq_hw, &aq_ptp->ptp_tx); 926 aq_nic->aq_hw_ops->hw_ring_rx_stop(aq_nic->aq_hw, &aq_ptp->ptp_rx); 927 928 aq_nic->aq_hw_ops->hw_ring_rx_stop(aq_nic->aq_hw, &aq_ptp->hwts_rx); 929 930 napi_disable(&aq_ptp->napi); 931 } 932 933 void aq_ptp_ring_deinit(struct aq_nic_s *aq_nic) 934 { 935 struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp; 936 937 if (!aq_ptp || !aq_ptp->ptp_tx.aq_nic || !aq_ptp->ptp_rx.aq_nic) 938 return; 939 940 aq_ring_tx_clean(&aq_ptp->ptp_tx); 941 aq_ring_rx_deinit(&aq_ptp->ptp_rx); 942 } 943 944 #define PTP_8TC_RING_IDX 8 945 #define PTP_4TC_RING_IDX 16 946 #define PTP_HWST_RING_IDX 31 947 948 int aq_ptp_ring_alloc(struct aq_nic_s *aq_nic) 949 { 950 struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp; 951 unsigned int tx_ring_idx, rx_ring_idx; 952 struct aq_ring_s *hwts; 953 u32 tx_tc_mode, rx_tc_mode; 954 struct aq_ring_s *ring; 955 int err; 956 957 if (!aq_ptp) 958 return 0; 959 960 /* Index must to be 8 (8 TCs) or 16 (4 TCs). 961 * It depends from Traffic Class mode. 962 */ 963 aq_nic->aq_hw_ops->hw_tx_tc_mode_get(aq_nic->aq_hw, &tx_tc_mode); 964 if (tx_tc_mode == 0) 965 tx_ring_idx = PTP_8TC_RING_IDX; 966 else 967 tx_ring_idx = PTP_4TC_RING_IDX; 968 969 ring = aq_ring_tx_alloc(&aq_ptp->ptp_tx, aq_nic, 970 tx_ring_idx, &aq_nic->aq_nic_cfg); 971 if (!ring) { 972 err = -ENOMEM; 973 goto err_exit; 974 } 975 976 aq_nic->aq_hw_ops->hw_rx_tc_mode_get(aq_nic->aq_hw, &rx_tc_mode); 977 if (rx_tc_mode == 0) 978 rx_ring_idx = PTP_8TC_RING_IDX; 979 else 980 rx_ring_idx = PTP_4TC_RING_IDX; 981 982 ring = aq_ring_rx_alloc(&aq_ptp->ptp_rx, aq_nic, 983 rx_ring_idx, &aq_nic->aq_nic_cfg); 984 if (!ring) { 985 err = -ENOMEM; 986 goto err_exit_ptp_tx; 987 } 988 989 hwts = aq_ring_hwts_rx_alloc(&aq_ptp->hwts_rx, aq_nic, PTP_HWST_RING_IDX, 990 aq_nic->aq_nic_cfg.rxds, 991 aq_nic->aq_nic_cfg.aq_hw_caps->rxd_size); 992 if (!hwts) { 993 err = -ENOMEM; 994 goto err_exit_ptp_rx; 995 } 996 997 err = aq_ptp_skb_ring_init(&aq_ptp->skb_ring, aq_nic->aq_nic_cfg.rxds); 998 if (err != 0) { 999 err = -ENOMEM; 1000 goto err_exit_hwts_rx; 1001 } 1002 1003 aq_ptp->ptp_ring_param.vec_idx = aq_ptp->idx_vector; 1004 aq_ptp->ptp_ring_param.cpu = aq_ptp->ptp_ring_param.vec_idx + 1005 aq_nic_get_cfg(aq_nic)->aq_rss.base_cpu_number; 1006 cpumask_set_cpu(aq_ptp->ptp_ring_param.cpu, 1007 &aq_ptp->ptp_ring_param.affinity_mask); 1008 1009 return 0; 1010 1011 err_exit_hwts_rx: 1012 aq_ring_free(&aq_ptp->hwts_rx); 1013 err_exit_ptp_rx: 1014 aq_ring_free(&aq_ptp->ptp_rx); 1015 err_exit_ptp_tx: 1016 aq_ring_free(&aq_ptp->ptp_tx); 1017 err_exit: 1018 return err; 1019 } 1020 1021 void aq_ptp_ring_free(struct aq_nic_s *aq_nic) 1022 { 1023 struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp; 1024 1025 if (!aq_ptp) 1026 return; 1027 1028 aq_ring_free(&aq_ptp->ptp_tx); 1029 aq_ring_free(&aq_ptp->ptp_rx); 1030 aq_ring_free(&aq_ptp->hwts_rx); 1031 1032 aq_ptp_skb_ring_release(&aq_ptp->skb_ring); 1033 } 1034 1035 #define MAX_PTP_GPIO_COUNT 4 1036 1037 static struct ptp_clock_info aq_ptp_clock = { 1038 .owner = THIS_MODULE, 1039 .name = "atlantic ptp", 1040 .max_adj = 999999999, 1041 .n_ext_ts = 0, 1042 .pps = 0, 1043 .adjfine = aq_ptp_adjfine, 1044 .adjtime = aq_ptp_adjtime, 1045 .gettime64 = aq_ptp_gettime, 1046 .settime64 = aq_ptp_settime, 1047 .n_per_out = 0, 1048 .enable = aq_ptp_gpio_feature_enable, 1049 .n_pins = 0, 1050 .verify = aq_ptp_verify, 1051 .pin_config = NULL, 1052 }; 1053 1054 #define ptp_offset_init(__idx, __mbps, __egress, __ingress) do { \ 1055 ptp_offset[__idx].mbps = (__mbps); \ 1056 ptp_offset[__idx].egress = (__egress); \ 1057 ptp_offset[__idx].ingress = (__ingress); } \ 1058 while (0) 1059 1060 static void aq_ptp_offset_init_from_fw(const struct hw_atl_ptp_offset *offsets) 1061 { 1062 int i; 1063 1064 /* Load offsets for PTP */ 1065 for (i = 0; i < ARRAY_SIZE(ptp_offset); i++) { 1066 switch (i) { 1067 /* 100M */ 1068 case ptp_offset_idx_100: 1069 ptp_offset_init(i, 100, 1070 offsets->egress_100, 1071 offsets->ingress_100); 1072 break; 1073 /* 1G */ 1074 case ptp_offset_idx_1000: 1075 ptp_offset_init(i, 1000, 1076 offsets->egress_1000, 1077 offsets->ingress_1000); 1078 break; 1079 /* 2.5G */ 1080 case ptp_offset_idx_2500: 1081 ptp_offset_init(i, 2500, 1082 offsets->egress_2500, 1083 offsets->ingress_2500); 1084 break; 1085 /* 5G */ 1086 case ptp_offset_idx_5000: 1087 ptp_offset_init(i, 5000, 1088 offsets->egress_5000, 1089 offsets->ingress_5000); 1090 break; 1091 /* 10G */ 1092 case ptp_offset_idx_10000: 1093 ptp_offset_init(i, 10000, 1094 offsets->egress_10000, 1095 offsets->ingress_10000); 1096 break; 1097 } 1098 } 1099 } 1100 1101 static void aq_ptp_offset_init(const struct hw_atl_ptp_offset *offsets) 1102 { 1103 memset(ptp_offset, 0, sizeof(ptp_offset)); 1104 1105 aq_ptp_offset_init_from_fw(offsets); 1106 } 1107 1108 static void aq_ptp_gpio_init(struct ptp_clock_info *info, 1109 struct hw_atl_info *hw_info) 1110 { 1111 struct ptp_pin_desc pin_desc[MAX_PTP_GPIO_COUNT]; 1112 u32 extts_pin_cnt = 0; 1113 u32 out_pin_cnt = 0; 1114 u32 i; 1115 1116 memset(pin_desc, 0, sizeof(pin_desc)); 1117 1118 for (i = 0; i < MAX_PTP_GPIO_COUNT - 1; i++) { 1119 if (hw_info->gpio_pin[i] == 1120 (GPIO_PIN_FUNCTION_PTP0 + out_pin_cnt)) { 1121 snprintf(pin_desc[out_pin_cnt].name, 1122 sizeof(pin_desc[out_pin_cnt].name), 1123 "AQ_GPIO%d", i); 1124 pin_desc[out_pin_cnt].index = out_pin_cnt; 1125 pin_desc[out_pin_cnt].chan = out_pin_cnt; 1126 pin_desc[out_pin_cnt++].func = PTP_PF_PEROUT; 1127 } 1128 } 1129 1130 info->n_per_out = out_pin_cnt; 1131 1132 if (hw_info->caps_ex & BIT(CAPS_EX_PHY_CTRL_TS_PIN)) { 1133 extts_pin_cnt += 1; 1134 1135 snprintf(pin_desc[out_pin_cnt].name, 1136 sizeof(pin_desc[out_pin_cnt].name), 1137 "AQ_GPIO%d", out_pin_cnt); 1138 pin_desc[out_pin_cnt].index = out_pin_cnt; 1139 pin_desc[out_pin_cnt].chan = 0; 1140 pin_desc[out_pin_cnt].func = PTP_PF_EXTTS; 1141 } 1142 1143 info->n_pins = out_pin_cnt + extts_pin_cnt; 1144 info->n_ext_ts = extts_pin_cnt; 1145 1146 if (!info->n_pins) 1147 return; 1148 1149 info->pin_config = kcalloc(info->n_pins, sizeof(struct ptp_pin_desc), 1150 GFP_KERNEL); 1151 1152 if (!info->pin_config) 1153 return; 1154 1155 memcpy(info->pin_config, &pin_desc, 1156 sizeof(struct ptp_pin_desc) * info->n_pins); 1157 } 1158 1159 void aq_ptp_clock_init(struct aq_nic_s *aq_nic) 1160 { 1161 struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp; 1162 struct timespec64 ts; 1163 1164 ktime_get_real_ts64(&ts); 1165 aq_ptp_settime(&aq_ptp->ptp_info, &ts); 1166 } 1167 1168 static void aq_ptp_poll_sync_work_cb(struct work_struct *w); 1169 1170 int aq_ptp_init(struct aq_nic_s *aq_nic, unsigned int idx_vec) 1171 { 1172 struct hw_atl_utils_mbox mbox; 1173 struct ptp_clock *clock; 1174 struct aq_ptp_s *aq_ptp; 1175 int err = 0; 1176 1177 if (!aq_nic->aq_hw_ops->hw_get_ptp_ts) { 1178 aq_nic->aq_ptp = NULL; 1179 return 0; 1180 } 1181 1182 if (!aq_nic->aq_fw_ops->enable_ptp) { 1183 aq_nic->aq_ptp = NULL; 1184 return 0; 1185 } 1186 1187 hw_atl_utils_mpi_read_stats(aq_nic->aq_hw, &mbox); 1188 1189 if (!(mbox.info.caps_ex & BIT(CAPS_EX_PHY_PTP_EN))) { 1190 aq_nic->aq_ptp = NULL; 1191 return 0; 1192 } 1193 1194 aq_ptp_offset_init(&mbox.info.ptp_offset); 1195 1196 aq_ptp = kzalloc(sizeof(*aq_ptp), GFP_KERNEL); 1197 if (!aq_ptp) { 1198 err = -ENOMEM; 1199 goto err_exit; 1200 } 1201 1202 aq_ptp->aq_nic = aq_nic; 1203 1204 spin_lock_init(&aq_ptp->ptp_lock); 1205 spin_lock_init(&aq_ptp->ptp_ring_lock); 1206 1207 aq_ptp->ptp_info = aq_ptp_clock; 1208 aq_ptp_gpio_init(&aq_ptp->ptp_info, &mbox.info); 1209 clock = ptp_clock_register(&aq_ptp->ptp_info, &aq_nic->ndev->dev); 1210 if (IS_ERR(clock)) { 1211 netdev_err(aq_nic->ndev, "ptp_clock_register failed\n"); 1212 err = PTR_ERR(clock); 1213 goto err_exit; 1214 } 1215 aq_ptp->ptp_clock = clock; 1216 aq_ptp_tx_timeout_init(&aq_ptp->ptp_tx_timeout); 1217 1218 atomic_set(&aq_ptp->offset_egress, 0); 1219 atomic_set(&aq_ptp->offset_ingress, 0); 1220 1221 netif_napi_add(aq_nic_get_ndev(aq_nic), &aq_ptp->napi, 1222 aq_ptp_poll, AQ_CFG_NAPI_WEIGHT); 1223 1224 aq_ptp->idx_vector = idx_vec; 1225 1226 aq_nic->aq_ptp = aq_ptp; 1227 1228 /* enable ptp counter */ 1229 aq_utils_obj_set(&aq_nic->aq_hw->flags, AQ_HW_PTP_AVAILABLE); 1230 mutex_lock(&aq_nic->fwreq_mutex); 1231 aq_nic->aq_fw_ops->enable_ptp(aq_nic->aq_hw, 1); 1232 aq_ptp_clock_init(aq_nic); 1233 mutex_unlock(&aq_nic->fwreq_mutex); 1234 1235 INIT_DELAYED_WORK(&aq_ptp->poll_sync, &aq_ptp_poll_sync_work_cb); 1236 aq_ptp->eth_type_filter.location = 1237 aq_nic_reserve_filter(aq_nic, aq_rx_filter_ethertype); 1238 aq_ptp->udp_filter.location = 1239 aq_nic_reserve_filter(aq_nic, aq_rx_filter_l3l4); 1240 1241 return 0; 1242 1243 err_exit: 1244 if (aq_ptp) 1245 kfree(aq_ptp->ptp_info.pin_config); 1246 kfree(aq_ptp); 1247 aq_nic->aq_ptp = NULL; 1248 return err; 1249 } 1250 1251 void aq_ptp_unregister(struct aq_nic_s *aq_nic) 1252 { 1253 struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp; 1254 1255 if (!aq_ptp) 1256 return; 1257 1258 ptp_clock_unregister(aq_ptp->ptp_clock); 1259 } 1260 1261 void aq_ptp_free(struct aq_nic_s *aq_nic) 1262 { 1263 struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp; 1264 1265 if (!aq_ptp) 1266 return; 1267 1268 aq_nic_release_filter(aq_nic, aq_rx_filter_ethertype, 1269 aq_ptp->eth_type_filter.location); 1270 aq_nic_release_filter(aq_nic, aq_rx_filter_l3l4, 1271 aq_ptp->udp_filter.location); 1272 cancel_delayed_work_sync(&aq_ptp->poll_sync); 1273 /* disable ptp */ 1274 mutex_lock(&aq_nic->fwreq_mutex); 1275 aq_nic->aq_fw_ops->enable_ptp(aq_nic->aq_hw, 0); 1276 mutex_unlock(&aq_nic->fwreq_mutex); 1277 1278 kfree(aq_ptp->ptp_info.pin_config); 1279 1280 netif_napi_del(&aq_ptp->napi); 1281 kfree(aq_ptp); 1282 aq_nic->aq_ptp = NULL; 1283 } 1284 1285 struct ptp_clock *aq_ptp_get_ptp_clock(struct aq_ptp_s *aq_ptp) 1286 { 1287 return aq_ptp->ptp_clock; 1288 } 1289 1290 /* PTP external GPIO nanoseconds count */ 1291 static uint64_t aq_ptp_get_sync1588_ts(struct aq_nic_s *aq_nic) 1292 { 1293 u64 ts = 0; 1294 1295 if (aq_nic->aq_hw_ops->hw_get_sync_ts) 1296 aq_nic->aq_hw_ops->hw_get_sync_ts(aq_nic->aq_hw, &ts); 1297 1298 return ts; 1299 } 1300 1301 static void aq_ptp_start_work(struct aq_ptp_s *aq_ptp) 1302 { 1303 if (aq_ptp->extts_pin_enabled) { 1304 aq_ptp->poll_timeout_ms = POLL_SYNC_TIMER_MS; 1305 aq_ptp->last_sync1588_ts = 1306 aq_ptp_get_sync1588_ts(aq_ptp->aq_nic); 1307 schedule_delayed_work(&aq_ptp->poll_sync, 1308 msecs_to_jiffies(aq_ptp->poll_timeout_ms)); 1309 } 1310 } 1311 1312 int aq_ptp_link_change(struct aq_nic_s *aq_nic) 1313 { 1314 struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp; 1315 1316 if (!aq_ptp) 1317 return 0; 1318 1319 if (aq_nic->aq_hw->aq_link_status.mbps) 1320 aq_ptp_start_work(aq_ptp); 1321 else 1322 cancel_delayed_work_sync(&aq_ptp->poll_sync); 1323 1324 return 0; 1325 } 1326 1327 static bool aq_ptp_sync_ts_updated(struct aq_ptp_s *aq_ptp, u64 *new_ts) 1328 { 1329 struct aq_nic_s *aq_nic = aq_ptp->aq_nic; 1330 u64 sync_ts2; 1331 u64 sync_ts; 1332 1333 sync_ts = aq_ptp_get_sync1588_ts(aq_nic); 1334 1335 if (sync_ts != aq_ptp->last_sync1588_ts) { 1336 sync_ts2 = aq_ptp_get_sync1588_ts(aq_nic); 1337 if (sync_ts != sync_ts2) { 1338 sync_ts = sync_ts2; 1339 sync_ts2 = aq_ptp_get_sync1588_ts(aq_nic); 1340 if (sync_ts != sync_ts2) { 1341 netdev_err(aq_nic->ndev, 1342 "%s: Unable to get correct GPIO TS", 1343 __func__); 1344 sync_ts = 0; 1345 } 1346 } 1347 1348 *new_ts = sync_ts; 1349 return true; 1350 } 1351 return false; 1352 } 1353 1354 static int aq_ptp_check_sync1588(struct aq_ptp_s *aq_ptp) 1355 { 1356 struct aq_nic_s *aq_nic = aq_ptp->aq_nic; 1357 u64 sync_ts; 1358 1359 /* Sync1588 pin was triggered */ 1360 if (aq_ptp_sync_ts_updated(aq_ptp, &sync_ts)) { 1361 if (aq_ptp->extts_pin_enabled) { 1362 struct ptp_clock_event ptp_event; 1363 u64 time = 0; 1364 1365 aq_nic->aq_hw_ops->hw_ts_to_sys_clock(aq_nic->aq_hw, 1366 sync_ts, &time); 1367 ptp_event.index = aq_ptp->ptp_info.n_pins - 1; 1368 ptp_event.timestamp = time; 1369 1370 ptp_event.type = PTP_CLOCK_EXTTS; 1371 ptp_clock_event(aq_ptp->ptp_clock, &ptp_event); 1372 } 1373 1374 aq_ptp->last_sync1588_ts = sync_ts; 1375 } 1376 1377 return 0; 1378 } 1379 1380 static void aq_ptp_poll_sync_work_cb(struct work_struct *w) 1381 { 1382 struct delayed_work *dw = to_delayed_work(w); 1383 struct aq_ptp_s *aq_ptp = container_of(dw, struct aq_ptp_s, poll_sync); 1384 1385 aq_ptp_check_sync1588(aq_ptp); 1386 1387 if (aq_ptp->extts_pin_enabled) { 1388 unsigned long timeout = msecs_to_jiffies(aq_ptp->poll_timeout_ms); 1389 1390 schedule_delayed_work(&aq_ptp->poll_sync, timeout); 1391 } 1392 } 1393