1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) 2021, Intel Corporation. */ 3 4 #include "ice.h" 5 #include "ice_lib.h" 6 7 #define E810_OUT_PROP_DELAY_NS 1 8 9 /** 10 * ice_set_tx_tstamp - Enable or disable Tx timestamping 11 * @pf: The PF pointer to search in 12 * @on: bool value for whether timestamps are enabled or disabled 13 */ 14 static void ice_set_tx_tstamp(struct ice_pf *pf, bool on) 15 { 16 struct ice_vsi *vsi; 17 u32 val; 18 u16 i; 19 20 vsi = ice_get_main_vsi(pf); 21 if (!vsi) 22 return; 23 24 /* Set the timestamp enable flag for all the Tx rings */ 25 ice_for_each_rxq(vsi, i) { 26 if (!vsi->tx_rings[i]) 27 continue; 28 vsi->tx_rings[i]->ptp_tx = on; 29 } 30 31 /* Configure the Tx timestamp interrupt */ 32 val = rd32(&pf->hw, PFINT_OICR_ENA); 33 if (on) 34 val |= PFINT_OICR_TSYN_TX_M; 35 else 36 val &= ~PFINT_OICR_TSYN_TX_M; 37 wr32(&pf->hw, PFINT_OICR_ENA, val); 38 } 39 40 /** 41 * ice_set_rx_tstamp - Enable or disable Rx timestamping 42 * @pf: The PF pointer to search in 43 * @on: bool value for whether timestamps are enabled or disabled 44 */ 45 static void ice_set_rx_tstamp(struct ice_pf *pf, bool on) 46 { 47 struct ice_vsi *vsi; 48 u16 i; 49 50 vsi = ice_get_main_vsi(pf); 51 if (!vsi) 52 return; 53 54 /* Set the timestamp flag for all the Rx rings */ 55 ice_for_each_rxq(vsi, i) { 56 if (!vsi->rx_rings[i]) 57 continue; 58 vsi->rx_rings[i]->ptp_rx = on; 59 } 60 } 61 62 /** 63 * ice_ptp_cfg_timestamp - Configure timestamp for init/deinit 64 * @pf: Board private structure 65 * @ena: bool value to enable or disable time stamp 66 * 67 * This function will configure timestamping during PTP initialization 68 * and deinitialization 69 */ 70 static void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena) 71 { 72 ice_set_tx_tstamp(pf, ena); 73 ice_set_rx_tstamp(pf, ena); 74 75 if (ena) { 76 pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_ALL; 77 pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_ON; 78 } else { 79 pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; 80 pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_OFF; 81 } 82 } 83 84 /** 85 * ice_get_ptp_clock_index - Get the PTP clock index 86 * @pf: the PF pointer 87 * 88 * Determine the clock index of the PTP clock associated with this device. If 89 * this is the PF controlling the clock, just use the local access to the 90 * clock device pointer. 91 * 92 * Otherwise, read from the driver shared parameters to determine the clock 93 * index value. 94 * 95 * Returns: the index of the PTP clock associated with this device, or -1 if 96 * there is no associated clock. 97 */ 98 int ice_get_ptp_clock_index(struct ice_pf *pf) 99 { 100 struct device *dev = ice_pf_to_dev(pf); 101 enum ice_aqc_driver_params param_idx; 102 struct ice_hw *hw = &pf->hw; 103 u8 tmr_idx; 104 u32 value; 105 int err; 106 107 /* Use the ptp_clock structure if we're the main PF */ 108 if (pf->ptp.clock) 109 return ptp_clock_index(pf->ptp.clock); 110 111 tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc; 112 if (!tmr_idx) 113 param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR0; 114 else 115 param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR1; 116 117 err = ice_aq_get_driver_param(hw, param_idx, &value, NULL); 118 if (err) { 119 dev_err(dev, "Failed to read PTP clock index parameter, err %d aq_err %s\n", 120 err, ice_aq_str(hw->adminq.sq_last_status)); 121 return -1; 122 } 123 124 /* The PTP clock index is an integer, and will be between 0 and 125 * INT_MAX. The highest bit of the driver shared parameter is used to 126 * indicate whether or not the currently stored clock index is valid. 127 */ 128 if (!(value & PTP_SHARED_CLK_IDX_VALID)) 129 return -1; 130 131 return value & ~PTP_SHARED_CLK_IDX_VALID; 132 } 133 134 /** 135 * ice_set_ptp_clock_index - Set the PTP clock index 136 * @pf: the PF pointer 137 * 138 * Set the PTP clock index for this device into the shared driver parameters, 139 * so that other PFs associated with this device can read it. 140 * 141 * If the PF is unable to store the clock index, it will log an error, but 142 * will continue operating PTP. 143 */ 144 static void ice_set_ptp_clock_index(struct ice_pf *pf) 145 { 146 struct device *dev = ice_pf_to_dev(pf); 147 enum ice_aqc_driver_params param_idx; 148 struct ice_hw *hw = &pf->hw; 149 u8 tmr_idx; 150 u32 value; 151 int err; 152 153 if (!pf->ptp.clock) 154 return; 155 156 tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc; 157 if (!tmr_idx) 158 param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR0; 159 else 160 param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR1; 161 162 value = (u32)ptp_clock_index(pf->ptp.clock); 163 if (value > INT_MAX) { 164 dev_err(dev, "PTP Clock index is too large to store\n"); 165 return; 166 } 167 value |= PTP_SHARED_CLK_IDX_VALID; 168 169 err = ice_aq_set_driver_param(hw, param_idx, value, NULL); 170 if (err) { 171 dev_err(dev, "Failed to set PTP clock index parameter, err %d aq_err %s\n", 172 err, ice_aq_str(hw->adminq.sq_last_status)); 173 } 174 } 175 176 /** 177 * ice_clear_ptp_clock_index - Clear the PTP clock index 178 * @pf: the PF pointer 179 * 180 * Clear the PTP clock index for this device. Must be called when 181 * unregistering the PTP clock, in order to ensure other PFs stop reporting 182 * a clock object that no longer exists. 183 */ 184 static void ice_clear_ptp_clock_index(struct ice_pf *pf) 185 { 186 struct device *dev = ice_pf_to_dev(pf); 187 enum ice_aqc_driver_params param_idx; 188 struct ice_hw *hw = &pf->hw; 189 u8 tmr_idx; 190 int err; 191 192 /* Do not clear the index if we don't own the timer */ 193 if (!hw->func_caps.ts_func_info.src_tmr_owned) 194 return; 195 196 tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc; 197 if (!tmr_idx) 198 param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR0; 199 else 200 param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR1; 201 202 err = ice_aq_set_driver_param(hw, param_idx, 0, NULL); 203 if (err) { 204 dev_dbg(dev, "Failed to clear PTP clock index parameter, err %d aq_err %s\n", 205 err, ice_aq_str(hw->adminq.sq_last_status)); 206 } 207 } 208 209 /** 210 * ice_ptp_read_src_clk_reg - Read the source clock register 211 * @pf: Board private structure 212 * @sts: Optional parameter for holding a pair of system timestamps from 213 * the system clock. Will be ignored if NULL is given. 214 */ 215 static u64 216 ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts) 217 { 218 struct ice_hw *hw = &pf->hw; 219 u32 hi, lo, lo2; 220 u8 tmr_idx; 221 222 tmr_idx = ice_get_ptp_src_clock_index(hw); 223 /* Read the system timestamp pre PHC read */ 224 ptp_read_system_prets(sts); 225 226 lo = rd32(hw, GLTSYN_TIME_L(tmr_idx)); 227 228 /* Read the system timestamp post PHC read */ 229 ptp_read_system_postts(sts); 230 231 hi = rd32(hw, GLTSYN_TIME_H(tmr_idx)); 232 lo2 = rd32(hw, GLTSYN_TIME_L(tmr_idx)); 233 234 if (lo2 < lo) { 235 /* if TIME_L rolled over read TIME_L again and update 236 * system timestamps 237 */ 238 ptp_read_system_prets(sts); 239 lo = rd32(hw, GLTSYN_TIME_L(tmr_idx)); 240 ptp_read_system_postts(sts); 241 hi = rd32(hw, GLTSYN_TIME_H(tmr_idx)); 242 } 243 244 return ((u64)hi << 32) | lo; 245 } 246 247 /** 248 * ice_ptp_update_cached_phctime - Update the cached PHC time values 249 * @pf: Board specific private structure 250 * 251 * This function updates the system time values which are cached in the PF 252 * structure and the Rx rings. 253 * 254 * This function must be called periodically to ensure that the cached value 255 * is never more than 2 seconds old. It must also be called whenever the PHC 256 * time has been changed. 257 */ 258 static void ice_ptp_update_cached_phctime(struct ice_pf *pf) 259 { 260 u64 systime; 261 int i; 262 263 /* Read the current PHC time */ 264 systime = ice_ptp_read_src_clk_reg(pf, NULL); 265 266 /* Update the cached PHC time stored in the PF structure */ 267 WRITE_ONCE(pf->ptp.cached_phc_time, systime); 268 269 ice_for_each_vsi(pf, i) { 270 struct ice_vsi *vsi = pf->vsi[i]; 271 int j; 272 273 if (!vsi) 274 continue; 275 276 if (vsi->type != ICE_VSI_PF) 277 continue; 278 279 ice_for_each_rxq(vsi, j) { 280 if (!vsi->rx_rings[j]) 281 continue; 282 WRITE_ONCE(vsi->rx_rings[j]->cached_phctime, systime); 283 } 284 } 285 } 286 287 /** 288 * ice_ptp_extend_32b_ts - Convert a 32b nanoseconds timestamp to 64b 289 * @cached_phc_time: recently cached copy of PHC time 290 * @in_tstamp: Ingress/egress 32b nanoseconds timestamp value 291 * 292 * Hardware captures timestamps which contain only 32 bits of nominal 293 * nanoseconds, as opposed to the 64bit timestamps that the stack expects. 294 * Note that the captured timestamp values may be 40 bits, but the lower 295 * 8 bits are sub-nanoseconds and generally discarded. 296 * 297 * Extend the 32bit nanosecond timestamp using the following algorithm and 298 * assumptions: 299 * 300 * 1) have a recently cached copy of the PHC time 301 * 2) assume that the in_tstamp was captured 2^31 nanoseconds (~2.1 302 * seconds) before or after the PHC time was captured. 303 * 3) calculate the delta between the cached time and the timestamp 304 * 4) if the delta is smaller than 2^31 nanoseconds, then the timestamp was 305 * captured after the PHC time. In this case, the full timestamp is just 306 * the cached PHC time plus the delta. 307 * 5) otherwise, if the delta is larger than 2^31 nanoseconds, then the 308 * timestamp was captured *before* the PHC time, i.e. because the PHC 309 * cache was updated after the timestamp was captured by hardware. In this 310 * case, the full timestamp is the cached time minus the inverse delta. 311 * 312 * This algorithm works even if the PHC time was updated after a Tx timestamp 313 * was requested, but before the Tx timestamp event was reported from 314 * hardware. 315 * 316 * This calculation primarily relies on keeping the cached PHC time up to 317 * date. If the timestamp was captured more than 2^31 nanoseconds after the 318 * PHC time, it is possible that the lower 32bits of PHC time have 319 * overflowed more than once, and we might generate an incorrect timestamp. 320 * 321 * This is prevented by (a) periodically updating the cached PHC time once 322 * a second, and (b) discarding any Tx timestamp packet if it has waited for 323 * a timestamp for more than one second. 324 */ 325 static u64 ice_ptp_extend_32b_ts(u64 cached_phc_time, u32 in_tstamp) 326 { 327 u32 delta, phc_time_lo; 328 u64 ns; 329 330 /* Extract the lower 32 bits of the PHC time */ 331 phc_time_lo = (u32)cached_phc_time; 332 333 /* Calculate the delta between the lower 32bits of the cached PHC 334 * time and the in_tstamp value 335 */ 336 delta = (in_tstamp - phc_time_lo); 337 338 /* Do not assume that the in_tstamp is always more recent than the 339 * cached PHC time. If the delta is large, it indicates that the 340 * in_tstamp was taken in the past, and should be converted 341 * forward. 342 */ 343 if (delta > (U32_MAX / 2)) { 344 /* reverse the delta calculation here */ 345 delta = (phc_time_lo - in_tstamp); 346 ns = cached_phc_time - delta; 347 } else { 348 ns = cached_phc_time + delta; 349 } 350 351 return ns; 352 } 353 354 /** 355 * ice_ptp_extend_40b_ts - Convert a 40b timestamp to 64b nanoseconds 356 * @pf: Board private structure 357 * @in_tstamp: Ingress/egress 40b timestamp value 358 * 359 * The Tx and Rx timestamps are 40 bits wide, including 32 bits of nominal 360 * nanoseconds, 7 bits of sub-nanoseconds, and a valid bit. 361 * 362 * *--------------------------------------------------------------* 363 * | 32 bits of nanoseconds | 7 high bits of sub ns underflow | v | 364 * *--------------------------------------------------------------* 365 * 366 * The low bit is an indicator of whether the timestamp is valid. The next 367 * 7 bits are a capture of the upper 7 bits of the sub-nanosecond underflow, 368 * and the remaining 32 bits are the lower 32 bits of the PHC timer. 369 * 370 * It is assumed that the caller verifies the timestamp is valid prior to 371 * calling this function. 372 * 373 * Extract the 32bit nominal nanoseconds and extend them. Use the cached PHC 374 * time stored in the device private PTP structure as the basis for timestamp 375 * extension. 376 * 377 * See ice_ptp_extend_32b_ts for a detailed explanation of the extension 378 * algorithm. 379 */ 380 static u64 ice_ptp_extend_40b_ts(struct ice_pf *pf, u64 in_tstamp) 381 { 382 const u64 mask = GENMASK_ULL(31, 0); 383 384 return ice_ptp_extend_32b_ts(pf->ptp.cached_phc_time, 385 (in_tstamp >> 8) & mask); 386 } 387 388 /** 389 * ice_ptp_read_time - Read the time from the device 390 * @pf: Board private structure 391 * @ts: timespec structure to hold the current time value 392 * @sts: Optional parameter for holding a pair of system timestamps from 393 * the system clock. Will be ignored if NULL is given. 394 * 395 * This function reads the source clock registers and stores them in a timespec. 396 * However, since the registers are 64 bits of nanoseconds, we must convert the 397 * result to a timespec before we can return. 398 */ 399 static void 400 ice_ptp_read_time(struct ice_pf *pf, struct timespec64 *ts, 401 struct ptp_system_timestamp *sts) 402 { 403 u64 time_ns = ice_ptp_read_src_clk_reg(pf, sts); 404 405 *ts = ns_to_timespec64(time_ns); 406 } 407 408 /** 409 * ice_ptp_write_init - Set PHC time to provided value 410 * @pf: Board private structure 411 * @ts: timespec structure that holds the new time value 412 * 413 * Set the PHC time to the specified time provided in the timespec. 414 */ 415 static int ice_ptp_write_init(struct ice_pf *pf, struct timespec64 *ts) 416 { 417 u64 ns = timespec64_to_ns(ts); 418 struct ice_hw *hw = &pf->hw; 419 420 return ice_ptp_init_time(hw, ns); 421 } 422 423 /** 424 * ice_ptp_write_adj - Adjust PHC clock time atomically 425 * @pf: Board private structure 426 * @adj: Adjustment in nanoseconds 427 * 428 * Perform an atomic adjustment of the PHC time by the specified number of 429 * nanoseconds. 430 */ 431 static int ice_ptp_write_adj(struct ice_pf *pf, s32 adj) 432 { 433 struct ice_hw *hw = &pf->hw; 434 435 return ice_ptp_adj_clock(hw, adj); 436 } 437 438 /** 439 * ice_ptp_adjfine - Adjust clock increment rate 440 * @info: the driver's PTP info structure 441 * @scaled_ppm: Parts per million with 16-bit fractional field 442 * 443 * Adjust the frequency of the clock by the indicated scaled ppm from the 444 * base frequency. 445 */ 446 static int ice_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm) 447 { 448 struct ice_pf *pf = ptp_info_to_pf(info); 449 u64 freq, divisor = 1000000ULL; 450 struct ice_hw *hw = &pf->hw; 451 s64 incval, diff; 452 int neg_adj = 0; 453 int err; 454 455 incval = ICE_PTP_NOMINAL_INCVAL_E810; 456 457 if (scaled_ppm < 0) { 458 neg_adj = 1; 459 scaled_ppm = -scaled_ppm; 460 } 461 462 while ((u64)scaled_ppm > div_u64(U64_MAX, incval)) { 463 /* handle overflow by scaling down the scaled_ppm and 464 * the divisor, losing some precision 465 */ 466 scaled_ppm >>= 2; 467 divisor >>= 2; 468 } 469 470 freq = (incval * (u64)scaled_ppm) >> 16; 471 diff = div_u64(freq, divisor); 472 473 if (neg_adj) 474 incval -= diff; 475 else 476 incval += diff; 477 478 err = ice_ptp_write_incval_locked(hw, incval); 479 if (err) { 480 dev_err(ice_pf_to_dev(pf), "PTP failed to set incval, err %d\n", 481 err); 482 return -EIO; 483 } 484 485 return 0; 486 } 487 488 /** 489 * ice_ptp_extts_work - Workqueue task function 490 * @work: external timestamp work structure 491 * 492 * Service for PTP external clock event 493 */ 494 static void ice_ptp_extts_work(struct kthread_work *work) 495 { 496 struct ice_ptp *ptp = container_of(work, struct ice_ptp, extts_work); 497 struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp); 498 struct ptp_clock_event event; 499 struct ice_hw *hw = &pf->hw; 500 u8 chan, tmr_idx; 501 u32 hi, lo; 502 503 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 504 /* Event time is captured by one of the two matched registers 505 * GLTSYN_EVNT_L: 32 LSB of sampled time event 506 * GLTSYN_EVNT_H: 32 MSB of sampled time event 507 * Event is defined in GLTSYN_EVNT_0 register 508 */ 509 for (chan = 0; chan < GLTSYN_EVNT_H_IDX_MAX; chan++) { 510 /* Check if channel is enabled */ 511 if (pf->ptp.ext_ts_irq & (1 << chan)) { 512 lo = rd32(hw, GLTSYN_EVNT_L(chan, tmr_idx)); 513 hi = rd32(hw, GLTSYN_EVNT_H(chan, tmr_idx)); 514 event.timestamp = (((u64)hi) << 32) | lo; 515 event.type = PTP_CLOCK_EXTTS; 516 event.index = chan; 517 518 /* Fire event */ 519 ptp_clock_event(pf->ptp.clock, &event); 520 pf->ptp.ext_ts_irq &= ~(1 << chan); 521 } 522 } 523 } 524 525 /** 526 * ice_ptp_cfg_extts - Configure EXTTS pin and channel 527 * @pf: Board private structure 528 * @ena: true to enable; false to disable 529 * @chan: GPIO channel (0-3) 530 * @gpio_pin: GPIO pin 531 * @extts_flags: request flags from the ptp_extts_request.flags 532 */ 533 static int 534 ice_ptp_cfg_extts(struct ice_pf *pf, bool ena, unsigned int chan, u32 gpio_pin, 535 unsigned int extts_flags) 536 { 537 u32 func, aux_reg, gpio_reg, irq_reg; 538 struct ice_hw *hw = &pf->hw; 539 u8 tmr_idx; 540 541 if (chan > (unsigned int)pf->ptp.info.n_ext_ts) 542 return -EINVAL; 543 544 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 545 546 irq_reg = rd32(hw, PFINT_OICR_ENA); 547 548 if (ena) { 549 /* Enable the interrupt */ 550 irq_reg |= PFINT_OICR_TSYN_EVNT_M; 551 aux_reg = GLTSYN_AUX_IN_0_INT_ENA_M; 552 553 #define GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE BIT(0) 554 #define GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE BIT(1) 555 556 /* set event level to requested edge */ 557 if (extts_flags & PTP_FALLING_EDGE) 558 aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE; 559 if (extts_flags & PTP_RISING_EDGE) 560 aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE; 561 562 /* Write GPIO CTL reg. 563 * 0x1 is input sampled by EVENT register(channel) 564 * + num_in_channels * tmr_idx 565 */ 566 func = 1 + chan + (tmr_idx * 3); 567 gpio_reg = ((func << GLGEN_GPIO_CTL_PIN_FUNC_S) & 568 GLGEN_GPIO_CTL_PIN_FUNC_M); 569 pf->ptp.ext_ts_chan |= (1 << chan); 570 } else { 571 /* clear the values we set to reset defaults */ 572 aux_reg = 0; 573 gpio_reg = 0; 574 pf->ptp.ext_ts_chan &= ~(1 << chan); 575 if (!pf->ptp.ext_ts_chan) 576 irq_reg &= ~PFINT_OICR_TSYN_EVNT_M; 577 } 578 579 wr32(hw, PFINT_OICR_ENA, irq_reg); 580 wr32(hw, GLTSYN_AUX_IN(chan, tmr_idx), aux_reg); 581 wr32(hw, GLGEN_GPIO_CTL(gpio_pin), gpio_reg); 582 583 return 0; 584 } 585 586 /** 587 * ice_ptp_cfg_clkout - Configure clock to generate periodic wave 588 * @pf: Board private structure 589 * @chan: GPIO channel (0-3) 590 * @config: desired periodic clk configuration. NULL will disable channel 591 * @store: If set to true the values will be stored 592 * 593 * Configure the internal clock generator modules to generate the clock wave of 594 * specified period. 595 */ 596 static int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan, 597 struct ice_perout_channel *config, bool store) 598 { 599 u64 current_time, period, start_time, phase; 600 struct ice_hw *hw = &pf->hw; 601 u32 func, val, gpio_pin; 602 u8 tmr_idx; 603 604 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 605 606 /* 0. Reset mode & out_en in AUX_OUT */ 607 wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), 0); 608 609 /* If we're disabling the output, clear out CLKO and TGT and keep 610 * output level low 611 */ 612 if (!config || !config->ena) { 613 wr32(hw, GLTSYN_CLKO(chan, tmr_idx), 0); 614 wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), 0); 615 wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), 0); 616 617 val = GLGEN_GPIO_CTL_PIN_DIR_M; 618 gpio_pin = pf->ptp.perout_channels[chan].gpio_pin; 619 wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val); 620 621 /* Store the value if requested */ 622 if (store) 623 memset(&pf->ptp.perout_channels[chan], 0, 624 sizeof(struct ice_perout_channel)); 625 626 return 0; 627 } 628 period = config->period; 629 start_time = config->start_time; 630 div64_u64_rem(start_time, period, &phase); 631 gpio_pin = config->gpio_pin; 632 633 /* 1. Write clkout with half of required period value */ 634 if (period & 0x1) { 635 dev_err(ice_pf_to_dev(pf), "CLK Period must be an even value\n"); 636 goto err; 637 } 638 639 period >>= 1; 640 641 /* For proper operation, the GLTSYN_CLKO must be larger than clock tick 642 */ 643 #define MIN_PULSE 3 644 if (period <= MIN_PULSE || period > U32_MAX) { 645 dev_err(ice_pf_to_dev(pf), "CLK Period must be > %d && < 2^33", 646 MIN_PULSE * 2); 647 goto err; 648 } 649 650 wr32(hw, GLTSYN_CLKO(chan, tmr_idx), lower_32_bits(period)); 651 652 /* Allow time for programming before start_time is hit */ 653 current_time = ice_ptp_read_src_clk_reg(pf, NULL); 654 655 /* if start time is in the past start the timer at the nearest second 656 * maintaining phase 657 */ 658 if (start_time < current_time) 659 start_time = div64_u64(current_time + NSEC_PER_MSEC - 1, 660 NSEC_PER_SEC) * NSEC_PER_SEC + phase; 661 662 start_time -= E810_OUT_PROP_DELAY_NS; 663 664 /* 2. Write TARGET time */ 665 wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), lower_32_bits(start_time)); 666 wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), upper_32_bits(start_time)); 667 668 /* 3. Write AUX_OUT register */ 669 val = GLTSYN_AUX_OUT_0_OUT_ENA_M | GLTSYN_AUX_OUT_0_OUTMOD_M; 670 wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), val); 671 672 /* 4. write GPIO CTL reg */ 673 func = 8 + chan + (tmr_idx * 4); 674 val = GLGEN_GPIO_CTL_PIN_DIR_M | 675 ((func << GLGEN_GPIO_CTL_PIN_FUNC_S) & GLGEN_GPIO_CTL_PIN_FUNC_M); 676 wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val); 677 678 /* Store the value if requested */ 679 if (store) { 680 memcpy(&pf->ptp.perout_channels[chan], config, 681 sizeof(struct ice_perout_channel)); 682 pf->ptp.perout_channels[chan].start_time = phase; 683 } 684 685 return 0; 686 err: 687 dev_err(ice_pf_to_dev(pf), "PTP failed to cfg per_clk\n"); 688 return -EFAULT; 689 } 690 691 /** 692 * ice_ptp_gpio_enable_e810 - Enable/disable ancillary features of PHC 693 * @info: the driver's PTP info structure 694 * @rq: The requested feature to change 695 * @on: Enable/disable flag 696 */ 697 static int 698 ice_ptp_gpio_enable_e810(struct ptp_clock_info *info, 699 struct ptp_clock_request *rq, int on) 700 { 701 struct ice_pf *pf = ptp_info_to_pf(info); 702 struct ice_perout_channel clk_cfg = {0}; 703 unsigned int chan; 704 u32 gpio_pin; 705 int err; 706 707 switch (rq->type) { 708 case PTP_CLK_REQ_PEROUT: 709 chan = rq->perout.index; 710 if (chan == PPS_CLK_GEN_CHAN) 711 clk_cfg.gpio_pin = PPS_PIN_INDEX; 712 else 713 clk_cfg.gpio_pin = chan; 714 715 clk_cfg.period = ((rq->perout.period.sec * NSEC_PER_SEC) + 716 rq->perout.period.nsec); 717 clk_cfg.start_time = ((rq->perout.start.sec * NSEC_PER_SEC) + 718 rq->perout.start.nsec); 719 clk_cfg.ena = !!on; 720 721 err = ice_ptp_cfg_clkout(pf, chan, &clk_cfg, true); 722 break; 723 case PTP_CLK_REQ_EXTTS: 724 chan = rq->extts.index; 725 gpio_pin = chan; 726 727 err = ice_ptp_cfg_extts(pf, !!on, chan, gpio_pin, 728 rq->extts.flags); 729 break; 730 default: 731 return -EOPNOTSUPP; 732 } 733 734 return err; 735 } 736 737 /** 738 * ice_ptp_gettimex64 - Get the time of the clock 739 * @info: the driver's PTP info structure 740 * @ts: timespec64 structure to hold the current time value 741 * @sts: Optional parameter for holding a pair of system timestamps from 742 * the system clock. Will be ignored if NULL is given. 743 * 744 * Read the device clock and return the correct value on ns, after converting it 745 * into a timespec struct. 746 */ 747 static int 748 ice_ptp_gettimex64(struct ptp_clock_info *info, struct timespec64 *ts, 749 struct ptp_system_timestamp *sts) 750 { 751 struct ice_pf *pf = ptp_info_to_pf(info); 752 struct ice_hw *hw = &pf->hw; 753 754 if (!ice_ptp_lock(hw)) { 755 dev_err(ice_pf_to_dev(pf), "PTP failed to get time\n"); 756 return -EBUSY; 757 } 758 759 ice_ptp_read_time(pf, ts, sts); 760 ice_ptp_unlock(hw); 761 762 return 0; 763 } 764 765 /** 766 * ice_ptp_settime64 - Set the time of the clock 767 * @info: the driver's PTP info structure 768 * @ts: timespec64 structure that holds the new time value 769 * 770 * Set the device clock to the user input value. The conversion from timespec 771 * to ns happens in the write function. 772 */ 773 static int 774 ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts) 775 { 776 struct ice_pf *pf = ptp_info_to_pf(info); 777 struct timespec64 ts64 = *ts; 778 struct ice_hw *hw = &pf->hw; 779 int err; 780 781 if (!ice_ptp_lock(hw)) { 782 err = -EBUSY; 783 goto exit; 784 } 785 786 err = ice_ptp_write_init(pf, &ts64); 787 ice_ptp_unlock(hw); 788 789 if (!err) 790 ice_ptp_update_cached_phctime(pf); 791 792 exit: 793 if (err) { 794 dev_err(ice_pf_to_dev(pf), "PTP failed to set time %d\n", err); 795 return err; 796 } 797 798 return 0; 799 } 800 801 /** 802 * ice_ptp_adjtime_nonatomic - Do a non-atomic clock adjustment 803 * @info: the driver's PTP info structure 804 * @delta: Offset in nanoseconds to adjust the time by 805 */ 806 static int ice_ptp_adjtime_nonatomic(struct ptp_clock_info *info, s64 delta) 807 { 808 struct timespec64 now, then; 809 810 then = ns_to_timespec64(delta); 811 ice_ptp_gettimex64(info, &now, NULL); 812 now = timespec64_add(now, then); 813 814 return ice_ptp_settime64(info, (const struct timespec64 *)&now); 815 } 816 817 /** 818 * ice_ptp_adjtime - Adjust the time of the clock by the indicated delta 819 * @info: the driver's PTP info structure 820 * @delta: Offset in nanoseconds to adjust the time by 821 */ 822 static int ice_ptp_adjtime(struct ptp_clock_info *info, s64 delta) 823 { 824 struct ice_pf *pf = ptp_info_to_pf(info); 825 struct ice_hw *hw = &pf->hw; 826 struct device *dev; 827 int err; 828 829 dev = ice_pf_to_dev(pf); 830 831 /* Hardware only supports atomic adjustments using signed 32-bit 832 * integers. For any adjustment outside this range, perform 833 * a non-atomic get->adjust->set flow. 834 */ 835 if (delta > S32_MAX || delta < S32_MIN) { 836 dev_dbg(dev, "delta = %lld, adjtime non-atomic\n", delta); 837 return ice_ptp_adjtime_nonatomic(info, delta); 838 } 839 840 if (!ice_ptp_lock(hw)) { 841 dev_err(dev, "PTP failed to acquire semaphore in adjtime\n"); 842 return -EBUSY; 843 } 844 845 err = ice_ptp_write_adj(pf, delta); 846 847 ice_ptp_unlock(hw); 848 849 if (err) { 850 dev_err(dev, "PTP failed to adjust time, err %d\n", err); 851 return err; 852 } 853 854 ice_ptp_update_cached_phctime(pf); 855 856 return 0; 857 } 858 859 /** 860 * ice_ptp_get_ts_config - ioctl interface to read the timestamping config 861 * @pf: Board private structure 862 * @ifr: ioctl data 863 * 864 * Copy the timestamping config to user buffer 865 */ 866 int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr) 867 { 868 struct hwtstamp_config *config; 869 870 if (!test_bit(ICE_FLAG_PTP, pf->flags)) 871 return -EIO; 872 873 config = &pf->ptp.tstamp_config; 874 875 return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? 876 -EFAULT : 0; 877 } 878 879 /** 880 * ice_ptp_set_timestamp_mode - Setup driver for requested timestamp mode 881 * @pf: Board private structure 882 * @config: hwtstamp settings requested or saved 883 */ 884 static int 885 ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config) 886 { 887 /* Reserved for future extensions. */ 888 if (config->flags) 889 return -EINVAL; 890 891 switch (config->tx_type) { 892 case HWTSTAMP_TX_OFF: 893 ice_set_tx_tstamp(pf, false); 894 break; 895 case HWTSTAMP_TX_ON: 896 ice_set_tx_tstamp(pf, true); 897 break; 898 default: 899 return -ERANGE; 900 } 901 902 switch (config->rx_filter) { 903 case HWTSTAMP_FILTER_NONE: 904 ice_set_rx_tstamp(pf, false); 905 break; 906 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 907 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 908 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 909 case HWTSTAMP_FILTER_PTP_V2_EVENT: 910 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 911 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 912 case HWTSTAMP_FILTER_PTP_V2_SYNC: 913 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 914 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 915 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 916 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 917 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 918 case HWTSTAMP_FILTER_NTP_ALL: 919 case HWTSTAMP_FILTER_ALL: 920 config->rx_filter = HWTSTAMP_FILTER_ALL; 921 ice_set_rx_tstamp(pf, true); 922 break; 923 default: 924 return -ERANGE; 925 } 926 927 return 0; 928 } 929 930 /** 931 * ice_ptp_set_ts_config - ioctl interface to control the timestamping 932 * @pf: Board private structure 933 * @ifr: ioctl data 934 * 935 * Get the user config and store it 936 */ 937 int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr) 938 { 939 struct hwtstamp_config config; 940 int err; 941 942 if (!test_bit(ICE_FLAG_PTP, pf->flags)) 943 return -EAGAIN; 944 945 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 946 return -EFAULT; 947 948 err = ice_ptp_set_timestamp_mode(pf, &config); 949 if (err) 950 return err; 951 952 /* Save these settings for future reference */ 953 pf->ptp.tstamp_config = config; 954 955 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? 956 -EFAULT : 0; 957 } 958 959 /** 960 * ice_ptp_rx_hwtstamp - Check for an Rx timestamp 961 * @rx_ring: Ring to get the VSI info 962 * @rx_desc: Receive descriptor 963 * @skb: Particular skb to send timestamp with 964 * 965 * The driver receives a notification in the receive descriptor with timestamp. 966 * The timestamp is in ns, so we must convert the result first. 967 */ 968 void 969 ice_ptp_rx_hwtstamp(struct ice_ring *rx_ring, 970 union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb) 971 { 972 u32 ts_high; 973 u64 ts_ns; 974 975 /* Populate timesync data into skb */ 976 if (rx_desc->wb.time_stamp_low & ICE_PTP_TS_VALID) { 977 struct skb_shared_hwtstamps *hwtstamps; 978 979 /* Use ice_ptp_extend_32b_ts directly, using the ring-specific 980 * cached PHC value, rather than accessing the PF. This also 981 * allows us to simply pass the upper 32bits of nanoseconds 982 * directly. Calling ice_ptp_extend_40b_ts is unnecessary as 983 * it would just discard these bits itself. 984 */ 985 ts_high = le32_to_cpu(rx_desc->wb.flex_ts.ts_high); 986 ts_ns = ice_ptp_extend_32b_ts(rx_ring->cached_phctime, ts_high); 987 988 hwtstamps = skb_hwtstamps(skb); 989 memset(hwtstamps, 0, sizeof(*hwtstamps)); 990 hwtstamps->hwtstamp = ns_to_ktime(ts_ns); 991 } 992 } 993 994 /** 995 * ice_ptp_setup_pins_e810 - Setup PTP pins in sysfs 996 * @info: PTP clock capabilities 997 */ 998 static void ice_ptp_setup_pins_e810(struct ptp_clock_info *info) 999 { 1000 info->n_per_out = E810_N_PER_OUT; 1001 info->n_ext_ts = E810_N_EXT_TS; 1002 } 1003 1004 /** 1005 * ice_ptp_set_funcs_e810 - Set specialized functions for E810 support 1006 * @pf: Board private structure 1007 * @info: PTP info to fill 1008 * 1009 * Assign functions to the PTP capabiltiies structure for E810 devices. 1010 * Functions which operate across all device families should be set directly 1011 * in ice_ptp_set_caps. Only add functions here which are distinct for e810 1012 * devices. 1013 */ 1014 static void 1015 ice_ptp_set_funcs_e810(struct ice_pf *pf, struct ptp_clock_info *info) 1016 { 1017 info->enable = ice_ptp_gpio_enable_e810; 1018 1019 ice_ptp_setup_pins_e810(info); 1020 } 1021 1022 /** 1023 * ice_ptp_set_caps - Set PTP capabilities 1024 * @pf: Board private structure 1025 */ 1026 static void ice_ptp_set_caps(struct ice_pf *pf) 1027 { 1028 struct ptp_clock_info *info = &pf->ptp.info; 1029 struct device *dev = ice_pf_to_dev(pf); 1030 1031 snprintf(info->name, sizeof(info->name) - 1, "%s-%s-clk", 1032 dev_driver_string(dev), dev_name(dev)); 1033 info->owner = THIS_MODULE; 1034 info->max_adj = 999999999; 1035 info->adjtime = ice_ptp_adjtime; 1036 info->adjfine = ice_ptp_adjfine; 1037 info->gettimex64 = ice_ptp_gettimex64; 1038 info->settime64 = ice_ptp_settime64; 1039 1040 ice_ptp_set_funcs_e810(pf, info); 1041 } 1042 1043 /** 1044 * ice_ptp_create_clock - Create PTP clock device for userspace 1045 * @pf: Board private structure 1046 * 1047 * This function creates a new PTP clock device. It only creates one if we 1048 * don't already have one. Will return error if it can't create one, but success 1049 * if we already have a device. Should be used by ice_ptp_init to create clock 1050 * initially, and prevent global resets from creating new clock devices. 1051 */ 1052 static long ice_ptp_create_clock(struct ice_pf *pf) 1053 { 1054 struct ptp_clock_info *info; 1055 struct ptp_clock *clock; 1056 struct device *dev; 1057 1058 /* No need to create a clock device if we already have one */ 1059 if (pf->ptp.clock) 1060 return 0; 1061 1062 ice_ptp_set_caps(pf); 1063 1064 info = &pf->ptp.info; 1065 dev = ice_pf_to_dev(pf); 1066 1067 /* Allocate memory for kernel pins interface */ 1068 if (info->n_pins) { 1069 info->pin_config = devm_kcalloc(dev, info->n_pins, 1070 sizeof(*info->pin_config), 1071 GFP_KERNEL); 1072 if (!info->pin_config) { 1073 info->n_pins = 0; 1074 return -ENOMEM; 1075 } 1076 } 1077 1078 /* Attempt to register the clock before enabling the hardware. */ 1079 clock = ptp_clock_register(info, dev); 1080 if (IS_ERR(clock)) 1081 return PTR_ERR(clock); 1082 1083 pf->ptp.clock = clock; 1084 1085 return 0; 1086 } 1087 1088 /** 1089 * ice_ptp_tx_tstamp_work - Process Tx timestamps for a port 1090 * @work: pointer to the kthread_work struct 1091 * 1092 * Process timestamps captured by the PHY associated with this port. To do 1093 * this, loop over each index with a waiting skb. 1094 * 1095 * If a given index has a valid timestamp, perform the following steps: 1096 * 1097 * 1) copy the timestamp out of the PHY register 1098 * 4) clear the timestamp valid bit in the PHY register 1099 * 5) unlock the index by clearing the associated in_use bit. 1100 * 2) extend the 40b timestamp value to get a 64bit timestamp 1101 * 3) send that timestamp to the stack 1102 * 1103 * After looping, if we still have waiting SKBs, then re-queue the work. This 1104 * may cause us effectively poll even when not strictly necessary. We do this 1105 * because it's possible a new timestamp was requested around the same time as 1106 * the interrupt. In some cases hardware might not interrupt us again when the 1107 * timestamp is captured. 1108 * 1109 * Note that we only take the tracking lock when clearing the bit and when 1110 * checking if we need to re-queue this task. The only place where bits can be 1111 * set is the hard xmit routine where an SKB has a request flag set. The only 1112 * places where we clear bits are this work function, or the periodic cleanup 1113 * thread. If the cleanup thread clears a bit we're processing we catch it 1114 * when we lock to clear the bit and then grab the SKB pointer. If a Tx thread 1115 * starts a new timestamp, we might not begin processing it right away but we 1116 * will notice it at the end when we re-queue the work item. If a Tx thread 1117 * starts a new timestamp just after this function exits without re-queuing, 1118 * the interrupt when the timestamp finishes should trigger. Avoiding holding 1119 * the lock for the entire function is important in order to ensure that Tx 1120 * threads do not get blocked while waiting for the lock. 1121 */ 1122 static void ice_ptp_tx_tstamp_work(struct kthread_work *work) 1123 { 1124 struct ice_ptp_port *ptp_port; 1125 struct ice_ptp_tx *tx; 1126 struct ice_pf *pf; 1127 struct ice_hw *hw; 1128 u8 idx; 1129 1130 tx = container_of(work, struct ice_ptp_tx, work); 1131 if (!tx->init) 1132 return; 1133 1134 ptp_port = container_of(tx, struct ice_ptp_port, tx); 1135 pf = ptp_port_to_pf(ptp_port); 1136 hw = &pf->hw; 1137 1138 for_each_set_bit(idx, tx->in_use, tx->len) { 1139 struct skb_shared_hwtstamps shhwtstamps = {}; 1140 u8 phy_idx = idx + tx->quad_offset; 1141 u64 raw_tstamp, tstamp; 1142 struct sk_buff *skb; 1143 int err; 1144 1145 err = ice_read_phy_tstamp(hw, tx->quad, phy_idx, 1146 &raw_tstamp); 1147 if (err) 1148 continue; 1149 1150 /* Check if the timestamp is valid */ 1151 if (!(raw_tstamp & ICE_PTP_TS_VALID)) 1152 continue; 1153 1154 /* clear the timestamp register, so that it won't show valid 1155 * again when re-used. 1156 */ 1157 ice_clear_phy_tstamp(hw, tx->quad, phy_idx); 1158 1159 /* The timestamp is valid, so we'll go ahead and clear this 1160 * index and then send the timestamp up to the stack. 1161 */ 1162 spin_lock(&tx->lock); 1163 clear_bit(idx, tx->in_use); 1164 skb = tx->tstamps[idx].skb; 1165 tx->tstamps[idx].skb = NULL; 1166 spin_unlock(&tx->lock); 1167 1168 /* it's (unlikely but) possible we raced with the cleanup 1169 * thread for discarding old timestamp requests. 1170 */ 1171 if (!skb) 1172 continue; 1173 1174 /* Extend the timestamp using cached PHC time */ 1175 tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp); 1176 shhwtstamps.hwtstamp = ns_to_ktime(tstamp); 1177 1178 skb_tstamp_tx(skb, &shhwtstamps); 1179 dev_kfree_skb_any(skb); 1180 } 1181 1182 /* Check if we still have work to do. If so, re-queue this task to 1183 * poll for remaining timestamps. 1184 */ 1185 spin_lock(&tx->lock); 1186 if (!bitmap_empty(tx->in_use, tx->len)) 1187 kthread_queue_work(pf->ptp.kworker, &tx->work); 1188 spin_unlock(&tx->lock); 1189 } 1190 1191 /** 1192 * ice_ptp_request_ts - Request an available Tx timestamp index 1193 * @tx: the PTP Tx timestamp tracker to request from 1194 * @skb: the SKB to associate with this timestamp request 1195 */ 1196 s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb) 1197 { 1198 u8 idx; 1199 1200 /* Check if this tracker is initialized */ 1201 if (!tx->init) 1202 return -1; 1203 1204 spin_lock(&tx->lock); 1205 /* Find and set the first available index */ 1206 idx = find_first_zero_bit(tx->in_use, tx->len); 1207 if (idx < tx->len) { 1208 /* We got a valid index that no other thread could have set. Store 1209 * a reference to the skb and the start time to allow discarding old 1210 * requests. 1211 */ 1212 set_bit(idx, tx->in_use); 1213 tx->tstamps[idx].start = jiffies; 1214 tx->tstamps[idx].skb = skb_get(skb); 1215 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 1216 } 1217 1218 spin_unlock(&tx->lock); 1219 1220 /* return the appropriate PHY timestamp register index, -1 if no 1221 * indexes were available. 1222 */ 1223 if (idx >= tx->len) 1224 return -1; 1225 else 1226 return idx + tx->quad_offset; 1227 } 1228 1229 /** 1230 * ice_ptp_process_ts - Spawn kthread work to handle timestamps 1231 * @pf: Board private structure 1232 * 1233 * Queue work required to process the PTP Tx timestamps outside of interrupt 1234 * context. 1235 */ 1236 void ice_ptp_process_ts(struct ice_pf *pf) 1237 { 1238 if (pf->ptp.port.tx.init) 1239 kthread_queue_work(pf->ptp.kworker, &pf->ptp.port.tx.work); 1240 } 1241 1242 /** 1243 * ice_ptp_alloc_tx_tracker - Initialize tracking for Tx timestamps 1244 * @tx: Tx tracking structure to initialize 1245 * 1246 * Assumes that the length has already been initialized. Do not call directly, 1247 * use the ice_ptp_init_tx_e822 or ice_ptp_init_tx_e810 instead. 1248 */ 1249 static int 1250 ice_ptp_alloc_tx_tracker(struct ice_ptp_tx *tx) 1251 { 1252 tx->tstamps = kcalloc(tx->len, sizeof(*tx->tstamps), GFP_KERNEL); 1253 if (!tx->tstamps) 1254 return -ENOMEM; 1255 1256 tx->in_use = bitmap_zalloc(tx->len, GFP_KERNEL); 1257 if (!tx->in_use) { 1258 kfree(tx->tstamps); 1259 tx->tstamps = NULL; 1260 return -ENOMEM; 1261 } 1262 1263 spin_lock_init(&tx->lock); 1264 kthread_init_work(&tx->work, ice_ptp_tx_tstamp_work); 1265 1266 tx->init = 1; 1267 1268 return 0; 1269 } 1270 1271 /** 1272 * ice_ptp_flush_tx_tracker - Flush any remaining timestamps from the tracker 1273 * @pf: Board private structure 1274 * @tx: the tracker to flush 1275 */ 1276 static void 1277 ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx) 1278 { 1279 u8 idx; 1280 1281 for (idx = 0; idx < tx->len; idx++) { 1282 u8 phy_idx = idx + tx->quad_offset; 1283 1284 /* Clear any potential residual timestamp in the PHY block */ 1285 if (!pf->hw.reset_ongoing) 1286 ice_clear_phy_tstamp(&pf->hw, tx->quad, phy_idx); 1287 1288 if (tx->tstamps[idx].skb) { 1289 dev_kfree_skb_any(tx->tstamps[idx].skb); 1290 tx->tstamps[idx].skb = NULL; 1291 } 1292 } 1293 } 1294 1295 /** 1296 * ice_ptp_release_tx_tracker - Release allocated memory for Tx tracker 1297 * @pf: Board private structure 1298 * @tx: Tx tracking structure to release 1299 * 1300 * Free memory associated with the Tx timestamp tracker. 1301 */ 1302 static void 1303 ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx) 1304 { 1305 tx->init = 0; 1306 1307 kthread_cancel_work_sync(&tx->work); 1308 1309 ice_ptp_flush_tx_tracker(pf, tx); 1310 1311 kfree(tx->tstamps); 1312 tx->tstamps = NULL; 1313 1314 kfree(tx->in_use); 1315 tx->in_use = NULL; 1316 1317 tx->len = 0; 1318 } 1319 1320 /** 1321 * ice_ptp_init_tx_e810 - Initialize tracking for Tx timestamps 1322 * @pf: Board private structure 1323 * @tx: the Tx tracking structure to initialize 1324 * 1325 * Initialize the Tx timestamp tracker for this PF. For E810 devices, each 1326 * port has its own block of timestamps, independent of the other ports. 1327 */ 1328 static int 1329 ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx) 1330 { 1331 tx->quad = pf->hw.port_info->lport; 1332 tx->quad_offset = 0; 1333 tx->len = INDEX_PER_QUAD; 1334 1335 return ice_ptp_alloc_tx_tracker(tx); 1336 } 1337 1338 /** 1339 * ice_ptp_tx_tstamp_cleanup - Cleanup old timestamp requests that got dropped 1340 * @tx: PTP Tx tracker to clean up 1341 * 1342 * Loop through the Tx timestamp requests and see if any of them have been 1343 * waiting for a long time. Discard any SKBs that have been waiting for more 1344 * than 2 seconds. This is long enough to be reasonably sure that the 1345 * timestamp will never be captured. This might happen if the packet gets 1346 * discarded before it reaches the PHY timestamping block. 1347 */ 1348 static void ice_ptp_tx_tstamp_cleanup(struct ice_ptp_tx *tx) 1349 { 1350 u8 idx; 1351 1352 if (!tx->init) 1353 return; 1354 1355 for_each_set_bit(idx, tx->in_use, tx->len) { 1356 struct sk_buff *skb; 1357 1358 /* Check if this SKB has been waiting for too long */ 1359 if (time_is_after_jiffies(tx->tstamps[idx].start + 2 * HZ)) 1360 continue; 1361 1362 spin_lock(&tx->lock); 1363 skb = tx->tstamps[idx].skb; 1364 tx->tstamps[idx].skb = NULL; 1365 clear_bit(idx, tx->in_use); 1366 spin_unlock(&tx->lock); 1367 1368 /* Free the SKB after we've cleared the bit */ 1369 dev_kfree_skb_any(skb); 1370 } 1371 } 1372 1373 static void ice_ptp_periodic_work(struct kthread_work *work) 1374 { 1375 struct ice_ptp *ptp = container_of(work, struct ice_ptp, work.work); 1376 struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp); 1377 1378 if (!test_bit(ICE_FLAG_PTP, pf->flags)) 1379 return; 1380 1381 ice_ptp_update_cached_phctime(pf); 1382 1383 ice_ptp_tx_tstamp_cleanup(&pf->ptp.port.tx); 1384 1385 /* Run twice a second */ 1386 kthread_queue_delayed_work(ptp->kworker, &ptp->work, 1387 msecs_to_jiffies(500)); 1388 } 1389 1390 /** 1391 * ice_ptp_init_owner - Initialize PTP_1588_CLOCK device 1392 * @pf: Board private structure 1393 * 1394 * Setup and initialize a PTP clock device that represents the device hardware 1395 * clock. Save the clock index for other functions connected to the same 1396 * hardware resource. 1397 */ 1398 static int ice_ptp_init_owner(struct ice_pf *pf) 1399 { 1400 struct device *dev = ice_pf_to_dev(pf); 1401 struct ice_hw *hw = &pf->hw; 1402 struct timespec64 ts; 1403 u8 src_idx; 1404 int err; 1405 1406 wr32(hw, GLTSYN_SYNC_DLAY, 0); 1407 1408 /* Clear some HW residue and enable source clock */ 1409 src_idx = hw->func_caps.ts_func_info.tmr_index_owned; 1410 1411 /* Enable source clocks */ 1412 wr32(hw, GLTSYN_ENA(src_idx), GLTSYN_ENA_TSYN_ENA_M); 1413 1414 /* Enable PHY time sync */ 1415 err = ice_ptp_init_phy_e810(hw); 1416 if (err) 1417 goto err_exit; 1418 1419 /* Clear event status indications for auxiliary pins */ 1420 (void)rd32(hw, GLTSYN_STAT(src_idx)); 1421 1422 /* Acquire the global hardware lock */ 1423 if (!ice_ptp_lock(hw)) { 1424 err = -EBUSY; 1425 goto err_exit; 1426 } 1427 1428 /* Write the increment time value to PHY and LAN */ 1429 err = ice_ptp_write_incval(hw, ICE_PTP_NOMINAL_INCVAL_E810); 1430 if (err) { 1431 ice_ptp_unlock(hw); 1432 goto err_exit; 1433 } 1434 1435 ts = ktime_to_timespec64(ktime_get_real()); 1436 /* Write the initial Time value to PHY and LAN */ 1437 err = ice_ptp_write_init(pf, &ts); 1438 if (err) { 1439 ice_ptp_unlock(hw); 1440 goto err_exit; 1441 } 1442 1443 /* Release the global hardware lock */ 1444 ice_ptp_unlock(hw); 1445 1446 /* Ensure we have a clock device */ 1447 err = ice_ptp_create_clock(pf); 1448 if (err) 1449 goto err_clk; 1450 1451 /* Store the PTP clock index for other PFs */ 1452 ice_set_ptp_clock_index(pf); 1453 1454 return 0; 1455 1456 err_clk: 1457 pf->ptp.clock = NULL; 1458 err_exit: 1459 dev_err(dev, "PTP failed to register clock, err %d\n", err); 1460 1461 return err; 1462 } 1463 1464 /** 1465 * ice_ptp_init - Initialize the PTP support after device probe or reset 1466 * @pf: Board private structure 1467 * 1468 * This function sets device up for PTP support. The first time it is run, it 1469 * will create a clock device. It does not create a clock device if one 1470 * already exists. It also reconfigures the device after a reset. 1471 */ 1472 void ice_ptp_init(struct ice_pf *pf) 1473 { 1474 struct device *dev = ice_pf_to_dev(pf); 1475 struct kthread_worker *kworker; 1476 struct ice_hw *hw = &pf->hw; 1477 int err; 1478 1479 /* PTP is currently only supported on E810 devices */ 1480 if (!ice_is_e810(hw)) 1481 return; 1482 1483 /* Check if this PF owns the source timer */ 1484 if (hw->func_caps.ts_func_info.src_tmr_owned) { 1485 err = ice_ptp_init_owner(pf); 1486 if (err) 1487 return; 1488 } 1489 1490 /* Disable timestamping for both Tx and Rx */ 1491 ice_ptp_cfg_timestamp(pf, false); 1492 1493 /* Initialize the PTP port Tx timestamp tracker */ 1494 ice_ptp_init_tx_e810(pf, &pf->ptp.port.tx); 1495 1496 /* Initialize work functions */ 1497 kthread_init_delayed_work(&pf->ptp.work, ice_ptp_periodic_work); 1498 kthread_init_work(&pf->ptp.extts_work, ice_ptp_extts_work); 1499 1500 /* Allocate a kworker for handling work required for the ports 1501 * connected to the PTP hardware clock. 1502 */ 1503 kworker = kthread_create_worker(0, "ice-ptp-%s", dev_name(dev)); 1504 if (IS_ERR(kworker)) { 1505 err = PTR_ERR(kworker); 1506 goto err_kworker; 1507 } 1508 pf->ptp.kworker = kworker; 1509 1510 set_bit(ICE_FLAG_PTP, pf->flags); 1511 1512 /* Start periodic work going */ 1513 kthread_queue_delayed_work(pf->ptp.kworker, &pf->ptp.work, 0); 1514 1515 dev_info(dev, "PTP init successful\n"); 1516 return; 1517 1518 err_kworker: 1519 /* If we registered a PTP clock, release it */ 1520 if (pf->ptp.clock) { 1521 ptp_clock_unregister(pf->ptp.clock); 1522 pf->ptp.clock = NULL; 1523 } 1524 dev_err(dev, "PTP failed %d\n", err); 1525 } 1526 1527 /** 1528 * ice_ptp_release - Disable the driver/HW support and unregister the clock 1529 * @pf: Board private structure 1530 * 1531 * This function handles the cleanup work required from the initialization by 1532 * clearing out the important information and unregistering the clock 1533 */ 1534 void ice_ptp_release(struct ice_pf *pf) 1535 { 1536 /* Disable timestamping for both Tx and Rx */ 1537 ice_ptp_cfg_timestamp(pf, false); 1538 1539 ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx); 1540 1541 clear_bit(ICE_FLAG_PTP, pf->flags); 1542 1543 kthread_cancel_delayed_work_sync(&pf->ptp.work); 1544 1545 if (pf->ptp.kworker) { 1546 kthread_destroy_worker(pf->ptp.kworker); 1547 pf->ptp.kworker = NULL; 1548 } 1549 1550 if (!pf->ptp.clock) 1551 return; 1552 1553 ice_clear_ptp_clock_index(pf); 1554 ptp_clock_unregister(pf->ptp.clock); 1555 pf->ptp.clock = NULL; 1556 1557 dev_info(ice_pf_to_dev(pf), "Removed PTP clock\n"); 1558 } 1559