1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) 2021, Intel Corporation. */ 3 4 #include "ice.h" 5 #include "ice_lib.h" 6 #include "ice_trace.h" 7 8 #define E810_OUT_PROP_DELAY_NS 1 9 10 #define UNKNOWN_INCVAL_E822 0x100000000ULL 11 12 static const struct ptp_pin_desc ice_pin_desc_e810t[] = { 13 /* name idx func chan */ 14 { "GNSS", GNSS, PTP_PF_EXTTS, 0, { 0, } }, 15 { "SMA1", SMA1, PTP_PF_NONE, 1, { 0, } }, 16 { "U.FL1", UFL1, PTP_PF_NONE, 1, { 0, } }, 17 { "SMA2", SMA2, PTP_PF_NONE, 2, { 0, } }, 18 { "U.FL2", UFL2, PTP_PF_NONE, 2, { 0, } }, 19 }; 20 21 /** 22 * ice_get_sma_config_e810t 23 * @hw: pointer to the hw struct 24 * @ptp_pins: pointer to the ptp_pin_desc struture 25 * 26 * Read the configuration of the SMA control logic and put it into the 27 * ptp_pin_desc structure 28 */ 29 static int 30 ice_get_sma_config_e810t(struct ice_hw *hw, struct ptp_pin_desc *ptp_pins) 31 { 32 u8 data, i; 33 int status; 34 35 /* Read initial pin state */ 36 status = ice_read_sma_ctrl_e810t(hw, &data); 37 if (status) 38 return status; 39 40 /* initialize with defaults */ 41 for (i = 0; i < NUM_PTP_PINS_E810T; i++) { 42 snprintf(ptp_pins[i].name, sizeof(ptp_pins[i].name), 43 "%s", ice_pin_desc_e810t[i].name); 44 ptp_pins[i].index = ice_pin_desc_e810t[i].index; 45 ptp_pins[i].func = ice_pin_desc_e810t[i].func; 46 ptp_pins[i].chan = ice_pin_desc_e810t[i].chan; 47 } 48 49 /* Parse SMA1/UFL1 */ 50 switch (data & ICE_SMA1_MASK_E810T) { 51 case ICE_SMA1_MASK_E810T: 52 default: 53 ptp_pins[SMA1].func = PTP_PF_NONE; 54 ptp_pins[UFL1].func = PTP_PF_NONE; 55 break; 56 case ICE_SMA1_DIR_EN_E810T: 57 ptp_pins[SMA1].func = PTP_PF_PEROUT; 58 ptp_pins[UFL1].func = PTP_PF_NONE; 59 break; 60 case ICE_SMA1_TX_EN_E810T: 61 ptp_pins[SMA1].func = PTP_PF_EXTTS; 62 ptp_pins[UFL1].func = PTP_PF_NONE; 63 break; 64 case 0: 65 ptp_pins[SMA1].func = PTP_PF_EXTTS; 66 ptp_pins[UFL1].func = PTP_PF_PEROUT; 67 break; 68 } 69 70 /* Parse SMA2/UFL2 */ 71 switch (data & ICE_SMA2_MASK_E810T) { 72 case ICE_SMA2_MASK_E810T: 73 default: 74 ptp_pins[SMA2].func = PTP_PF_NONE; 75 ptp_pins[UFL2].func = PTP_PF_NONE; 76 break; 77 case (ICE_SMA2_TX_EN_E810T | ICE_SMA2_UFL2_RX_DIS_E810T): 78 ptp_pins[SMA2].func = PTP_PF_EXTTS; 79 ptp_pins[UFL2].func = PTP_PF_NONE; 80 break; 81 case (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_UFL2_RX_DIS_E810T): 82 ptp_pins[SMA2].func = PTP_PF_PEROUT; 83 ptp_pins[UFL2].func = PTP_PF_NONE; 84 break; 85 case (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_TX_EN_E810T): 86 ptp_pins[SMA2].func = PTP_PF_NONE; 87 ptp_pins[UFL2].func = PTP_PF_EXTTS; 88 break; 89 case ICE_SMA2_DIR_EN_E810T: 90 ptp_pins[SMA2].func = PTP_PF_PEROUT; 91 ptp_pins[UFL2].func = PTP_PF_EXTTS; 92 break; 93 } 94 95 return 0; 96 } 97 98 /** 99 * ice_ptp_set_sma_config_e810t 100 * @hw: pointer to the hw struct 101 * @ptp_pins: pointer to the ptp_pin_desc struture 102 * 103 * Set the configuration of the SMA control logic based on the configuration in 104 * num_pins parameter 105 */ 106 static int 107 ice_ptp_set_sma_config_e810t(struct ice_hw *hw, 108 const struct ptp_pin_desc *ptp_pins) 109 { 110 int status; 111 u8 data; 112 113 /* SMA1 and UFL1 cannot be set to TX at the same time */ 114 if (ptp_pins[SMA1].func == PTP_PF_PEROUT && 115 ptp_pins[UFL1].func == PTP_PF_PEROUT) 116 return -EINVAL; 117 118 /* SMA2 and UFL2 cannot be set to RX at the same time */ 119 if (ptp_pins[SMA2].func == PTP_PF_EXTTS && 120 ptp_pins[UFL2].func == PTP_PF_EXTTS) 121 return -EINVAL; 122 123 /* Read initial pin state value */ 124 status = ice_read_sma_ctrl_e810t(hw, &data); 125 if (status) 126 return status; 127 128 /* Set the right sate based on the desired configuration */ 129 data &= ~ICE_SMA1_MASK_E810T; 130 if (ptp_pins[SMA1].func == PTP_PF_NONE && 131 ptp_pins[UFL1].func == PTP_PF_NONE) { 132 dev_info(ice_hw_to_dev(hw), "SMA1 + U.FL1 disabled"); 133 data |= ICE_SMA1_MASK_E810T; 134 } else if (ptp_pins[SMA1].func == PTP_PF_EXTTS && 135 ptp_pins[UFL1].func == PTP_PF_NONE) { 136 dev_info(ice_hw_to_dev(hw), "SMA1 RX"); 137 data |= ICE_SMA1_TX_EN_E810T; 138 } else if (ptp_pins[SMA1].func == PTP_PF_NONE && 139 ptp_pins[UFL1].func == PTP_PF_PEROUT) { 140 /* U.FL 1 TX will always enable SMA 1 RX */ 141 dev_info(ice_hw_to_dev(hw), "SMA1 RX + U.FL1 TX"); 142 } else if (ptp_pins[SMA1].func == PTP_PF_EXTTS && 143 ptp_pins[UFL1].func == PTP_PF_PEROUT) { 144 dev_info(ice_hw_to_dev(hw), "SMA1 RX + U.FL1 TX"); 145 } else if (ptp_pins[SMA1].func == PTP_PF_PEROUT && 146 ptp_pins[UFL1].func == PTP_PF_NONE) { 147 dev_info(ice_hw_to_dev(hw), "SMA1 TX"); 148 data |= ICE_SMA1_DIR_EN_E810T; 149 } 150 151 data &= ~ICE_SMA2_MASK_E810T; 152 if (ptp_pins[SMA2].func == PTP_PF_NONE && 153 ptp_pins[UFL2].func == PTP_PF_NONE) { 154 dev_info(ice_hw_to_dev(hw), "SMA2 + U.FL2 disabled"); 155 data |= ICE_SMA2_MASK_E810T; 156 } else if (ptp_pins[SMA2].func == PTP_PF_EXTTS && 157 ptp_pins[UFL2].func == PTP_PF_NONE) { 158 dev_info(ice_hw_to_dev(hw), "SMA2 RX"); 159 data |= (ICE_SMA2_TX_EN_E810T | 160 ICE_SMA2_UFL2_RX_DIS_E810T); 161 } else if (ptp_pins[SMA2].func == PTP_PF_NONE && 162 ptp_pins[UFL2].func == PTP_PF_EXTTS) { 163 dev_info(ice_hw_to_dev(hw), "UFL2 RX"); 164 data |= (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_TX_EN_E810T); 165 } else if (ptp_pins[SMA2].func == PTP_PF_PEROUT && 166 ptp_pins[UFL2].func == PTP_PF_NONE) { 167 dev_info(ice_hw_to_dev(hw), "SMA2 TX"); 168 data |= (ICE_SMA2_DIR_EN_E810T | 169 ICE_SMA2_UFL2_RX_DIS_E810T); 170 } else if (ptp_pins[SMA2].func == PTP_PF_PEROUT && 171 ptp_pins[UFL2].func == PTP_PF_EXTTS) { 172 dev_info(ice_hw_to_dev(hw), "SMA2 TX + U.FL2 RX"); 173 data |= ICE_SMA2_DIR_EN_E810T; 174 } 175 176 return ice_write_sma_ctrl_e810t(hw, data); 177 } 178 179 /** 180 * ice_ptp_set_sma_e810t 181 * @info: the driver's PTP info structure 182 * @pin: pin index in kernel structure 183 * @func: Pin function to be set (PTP_PF_NONE, PTP_PF_EXTTS or PTP_PF_PEROUT) 184 * 185 * Set the configuration of a single SMA pin 186 */ 187 static int 188 ice_ptp_set_sma_e810t(struct ptp_clock_info *info, unsigned int pin, 189 enum ptp_pin_function func) 190 { 191 struct ptp_pin_desc ptp_pins[NUM_PTP_PINS_E810T]; 192 struct ice_pf *pf = ptp_info_to_pf(info); 193 struct ice_hw *hw = &pf->hw; 194 int err; 195 196 if (pin < SMA1 || func > PTP_PF_PEROUT) 197 return -EOPNOTSUPP; 198 199 err = ice_get_sma_config_e810t(hw, ptp_pins); 200 if (err) 201 return err; 202 203 /* Disable the same function on the other pin sharing the channel */ 204 if (pin == SMA1 && ptp_pins[UFL1].func == func) 205 ptp_pins[UFL1].func = PTP_PF_NONE; 206 if (pin == UFL1 && ptp_pins[SMA1].func == func) 207 ptp_pins[SMA1].func = PTP_PF_NONE; 208 209 if (pin == SMA2 && ptp_pins[UFL2].func == func) 210 ptp_pins[UFL2].func = PTP_PF_NONE; 211 if (pin == UFL2 && ptp_pins[SMA2].func == func) 212 ptp_pins[SMA2].func = PTP_PF_NONE; 213 214 /* Set up new pin function in the temp table */ 215 ptp_pins[pin].func = func; 216 217 return ice_ptp_set_sma_config_e810t(hw, ptp_pins); 218 } 219 220 /** 221 * ice_verify_pin_e810t 222 * @info: the driver's PTP info structure 223 * @pin: Pin index 224 * @func: Assigned function 225 * @chan: Assigned channel 226 * 227 * Verify if pin supports requested pin function. If the Check pins consistency. 228 * Reconfigure the SMA logic attached to the given pin to enable its 229 * desired functionality 230 */ 231 static int 232 ice_verify_pin_e810t(struct ptp_clock_info *info, unsigned int pin, 233 enum ptp_pin_function func, unsigned int chan) 234 { 235 /* Don't allow channel reassignment */ 236 if (chan != ice_pin_desc_e810t[pin].chan) 237 return -EOPNOTSUPP; 238 239 /* Check if functions are properly assigned */ 240 switch (func) { 241 case PTP_PF_NONE: 242 break; 243 case PTP_PF_EXTTS: 244 if (pin == UFL1) 245 return -EOPNOTSUPP; 246 break; 247 case PTP_PF_PEROUT: 248 if (pin == UFL2 || pin == GNSS) 249 return -EOPNOTSUPP; 250 break; 251 case PTP_PF_PHYSYNC: 252 return -EOPNOTSUPP; 253 } 254 255 return ice_ptp_set_sma_e810t(info, pin, func); 256 } 257 258 /** 259 * ice_set_tx_tstamp - Enable or disable Tx timestamping 260 * @pf: The PF pointer to search in 261 * @on: bool value for whether timestamps are enabled or disabled 262 */ 263 static void ice_set_tx_tstamp(struct ice_pf *pf, bool on) 264 { 265 struct ice_vsi *vsi; 266 u32 val; 267 u16 i; 268 269 vsi = ice_get_main_vsi(pf); 270 if (!vsi) 271 return; 272 273 /* Set the timestamp enable flag for all the Tx rings */ 274 ice_for_each_txq(vsi, i) { 275 if (!vsi->tx_rings[i]) 276 continue; 277 vsi->tx_rings[i]->ptp_tx = on; 278 } 279 280 /* Configure the Tx timestamp interrupt */ 281 val = rd32(&pf->hw, PFINT_OICR_ENA); 282 if (on) 283 val |= PFINT_OICR_TSYN_TX_M; 284 else 285 val &= ~PFINT_OICR_TSYN_TX_M; 286 wr32(&pf->hw, PFINT_OICR_ENA, val); 287 288 pf->ptp.tstamp_config.tx_type = on ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; 289 } 290 291 /** 292 * ice_set_rx_tstamp - Enable or disable Rx timestamping 293 * @pf: The PF pointer to search in 294 * @on: bool value for whether timestamps are enabled or disabled 295 */ 296 static void ice_set_rx_tstamp(struct ice_pf *pf, bool on) 297 { 298 struct ice_vsi *vsi; 299 u16 i; 300 301 vsi = ice_get_main_vsi(pf); 302 if (!vsi) 303 return; 304 305 /* Set the timestamp flag for all the Rx rings */ 306 ice_for_each_rxq(vsi, i) { 307 if (!vsi->rx_rings[i]) 308 continue; 309 vsi->rx_rings[i]->ptp_rx = on; 310 } 311 312 pf->ptp.tstamp_config.rx_filter = on ? HWTSTAMP_FILTER_ALL : 313 HWTSTAMP_FILTER_NONE; 314 } 315 316 /** 317 * ice_ptp_cfg_timestamp - Configure timestamp for init/deinit 318 * @pf: Board private structure 319 * @ena: bool value to enable or disable time stamp 320 * 321 * This function will configure timestamping during PTP initialization 322 * and deinitialization 323 */ 324 void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena) 325 { 326 ice_set_tx_tstamp(pf, ena); 327 ice_set_rx_tstamp(pf, ena); 328 } 329 330 /** 331 * ice_get_ptp_clock_index - Get the PTP clock index 332 * @pf: the PF pointer 333 * 334 * Determine the clock index of the PTP clock associated with this device. If 335 * this is the PF controlling the clock, just use the local access to the 336 * clock device pointer. 337 * 338 * Otherwise, read from the driver shared parameters to determine the clock 339 * index value. 340 * 341 * Returns: the index of the PTP clock associated with this device, or -1 if 342 * there is no associated clock. 343 */ 344 int ice_get_ptp_clock_index(struct ice_pf *pf) 345 { 346 struct device *dev = ice_pf_to_dev(pf); 347 enum ice_aqc_driver_params param_idx; 348 struct ice_hw *hw = &pf->hw; 349 u8 tmr_idx; 350 u32 value; 351 int err; 352 353 /* Use the ptp_clock structure if we're the main PF */ 354 if (pf->ptp.clock) 355 return ptp_clock_index(pf->ptp.clock); 356 357 tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc; 358 if (!tmr_idx) 359 param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR0; 360 else 361 param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR1; 362 363 err = ice_aq_get_driver_param(hw, param_idx, &value, NULL); 364 if (err) { 365 dev_err(dev, "Failed to read PTP clock index parameter, err %d aq_err %s\n", 366 err, ice_aq_str(hw->adminq.sq_last_status)); 367 return -1; 368 } 369 370 /* The PTP clock index is an integer, and will be between 0 and 371 * INT_MAX. The highest bit of the driver shared parameter is used to 372 * indicate whether or not the currently stored clock index is valid. 373 */ 374 if (!(value & PTP_SHARED_CLK_IDX_VALID)) 375 return -1; 376 377 return value & ~PTP_SHARED_CLK_IDX_VALID; 378 } 379 380 /** 381 * ice_set_ptp_clock_index - Set the PTP clock index 382 * @pf: the PF pointer 383 * 384 * Set the PTP clock index for this device into the shared driver parameters, 385 * so that other PFs associated with this device can read it. 386 * 387 * If the PF is unable to store the clock index, it will log an error, but 388 * will continue operating PTP. 389 */ 390 static void ice_set_ptp_clock_index(struct ice_pf *pf) 391 { 392 struct device *dev = ice_pf_to_dev(pf); 393 enum ice_aqc_driver_params param_idx; 394 struct ice_hw *hw = &pf->hw; 395 u8 tmr_idx; 396 u32 value; 397 int err; 398 399 if (!pf->ptp.clock) 400 return; 401 402 tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc; 403 if (!tmr_idx) 404 param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR0; 405 else 406 param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR1; 407 408 value = (u32)ptp_clock_index(pf->ptp.clock); 409 if (value > INT_MAX) { 410 dev_err(dev, "PTP Clock index is too large to store\n"); 411 return; 412 } 413 value |= PTP_SHARED_CLK_IDX_VALID; 414 415 err = ice_aq_set_driver_param(hw, param_idx, value, NULL); 416 if (err) { 417 dev_err(dev, "Failed to set PTP clock index parameter, err %d aq_err %s\n", 418 err, ice_aq_str(hw->adminq.sq_last_status)); 419 } 420 } 421 422 /** 423 * ice_clear_ptp_clock_index - Clear the PTP clock index 424 * @pf: the PF pointer 425 * 426 * Clear the PTP clock index for this device. Must be called when 427 * unregistering the PTP clock, in order to ensure other PFs stop reporting 428 * a clock object that no longer exists. 429 */ 430 static void ice_clear_ptp_clock_index(struct ice_pf *pf) 431 { 432 struct device *dev = ice_pf_to_dev(pf); 433 enum ice_aqc_driver_params param_idx; 434 struct ice_hw *hw = &pf->hw; 435 u8 tmr_idx; 436 int err; 437 438 /* Do not clear the index if we don't own the timer */ 439 if (!hw->func_caps.ts_func_info.src_tmr_owned) 440 return; 441 442 tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc; 443 if (!tmr_idx) 444 param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR0; 445 else 446 param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR1; 447 448 err = ice_aq_set_driver_param(hw, param_idx, 0, NULL); 449 if (err) { 450 dev_dbg(dev, "Failed to clear PTP clock index parameter, err %d aq_err %s\n", 451 err, ice_aq_str(hw->adminq.sq_last_status)); 452 } 453 } 454 455 /** 456 * ice_ptp_read_src_clk_reg - Read the source clock register 457 * @pf: Board private structure 458 * @sts: Optional parameter for holding a pair of system timestamps from 459 * the system clock. Will be ignored if NULL is given. 460 */ 461 static u64 462 ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts) 463 { 464 struct ice_hw *hw = &pf->hw; 465 u32 hi, lo, lo2; 466 u8 tmr_idx; 467 468 tmr_idx = ice_get_ptp_src_clock_index(hw); 469 /* Read the system timestamp pre PHC read */ 470 ptp_read_system_prets(sts); 471 472 lo = rd32(hw, GLTSYN_TIME_L(tmr_idx)); 473 474 /* Read the system timestamp post PHC read */ 475 ptp_read_system_postts(sts); 476 477 hi = rd32(hw, GLTSYN_TIME_H(tmr_idx)); 478 lo2 = rd32(hw, GLTSYN_TIME_L(tmr_idx)); 479 480 if (lo2 < lo) { 481 /* if TIME_L rolled over read TIME_L again and update 482 * system timestamps 483 */ 484 ptp_read_system_prets(sts); 485 lo = rd32(hw, GLTSYN_TIME_L(tmr_idx)); 486 ptp_read_system_postts(sts); 487 hi = rd32(hw, GLTSYN_TIME_H(tmr_idx)); 488 } 489 490 return ((u64)hi << 32) | lo; 491 } 492 493 /** 494 * ice_ptp_extend_32b_ts - Convert a 32b nanoseconds timestamp to 64b 495 * @cached_phc_time: recently cached copy of PHC time 496 * @in_tstamp: Ingress/egress 32b nanoseconds timestamp value 497 * 498 * Hardware captures timestamps which contain only 32 bits of nominal 499 * nanoseconds, as opposed to the 64bit timestamps that the stack expects. 500 * Note that the captured timestamp values may be 40 bits, but the lower 501 * 8 bits are sub-nanoseconds and generally discarded. 502 * 503 * Extend the 32bit nanosecond timestamp using the following algorithm and 504 * assumptions: 505 * 506 * 1) have a recently cached copy of the PHC time 507 * 2) assume that the in_tstamp was captured 2^31 nanoseconds (~2.1 508 * seconds) before or after the PHC time was captured. 509 * 3) calculate the delta between the cached time and the timestamp 510 * 4) if the delta is smaller than 2^31 nanoseconds, then the timestamp was 511 * captured after the PHC time. In this case, the full timestamp is just 512 * the cached PHC time plus the delta. 513 * 5) otherwise, if the delta is larger than 2^31 nanoseconds, then the 514 * timestamp was captured *before* the PHC time, i.e. because the PHC 515 * cache was updated after the timestamp was captured by hardware. In this 516 * case, the full timestamp is the cached time minus the inverse delta. 517 * 518 * This algorithm works even if the PHC time was updated after a Tx timestamp 519 * was requested, but before the Tx timestamp event was reported from 520 * hardware. 521 * 522 * This calculation primarily relies on keeping the cached PHC time up to 523 * date. If the timestamp was captured more than 2^31 nanoseconds after the 524 * PHC time, it is possible that the lower 32bits of PHC time have 525 * overflowed more than once, and we might generate an incorrect timestamp. 526 * 527 * This is prevented by (a) periodically updating the cached PHC time once 528 * a second, and (b) discarding any Tx timestamp packet if it has waited for 529 * a timestamp for more than one second. 530 */ 531 static u64 ice_ptp_extend_32b_ts(u64 cached_phc_time, u32 in_tstamp) 532 { 533 u32 delta, phc_time_lo; 534 u64 ns; 535 536 /* Extract the lower 32 bits of the PHC time */ 537 phc_time_lo = (u32)cached_phc_time; 538 539 /* Calculate the delta between the lower 32bits of the cached PHC 540 * time and the in_tstamp value 541 */ 542 delta = (in_tstamp - phc_time_lo); 543 544 /* Do not assume that the in_tstamp is always more recent than the 545 * cached PHC time. If the delta is large, it indicates that the 546 * in_tstamp was taken in the past, and should be converted 547 * forward. 548 */ 549 if (delta > (U32_MAX / 2)) { 550 /* reverse the delta calculation here */ 551 delta = (phc_time_lo - in_tstamp); 552 ns = cached_phc_time - delta; 553 } else { 554 ns = cached_phc_time + delta; 555 } 556 557 return ns; 558 } 559 560 /** 561 * ice_ptp_extend_40b_ts - Convert a 40b timestamp to 64b nanoseconds 562 * @pf: Board private structure 563 * @in_tstamp: Ingress/egress 40b timestamp value 564 * 565 * The Tx and Rx timestamps are 40 bits wide, including 32 bits of nominal 566 * nanoseconds, 7 bits of sub-nanoseconds, and a valid bit. 567 * 568 * *--------------------------------------------------------------* 569 * | 32 bits of nanoseconds | 7 high bits of sub ns underflow | v | 570 * *--------------------------------------------------------------* 571 * 572 * The low bit is an indicator of whether the timestamp is valid. The next 573 * 7 bits are a capture of the upper 7 bits of the sub-nanosecond underflow, 574 * and the remaining 32 bits are the lower 32 bits of the PHC timer. 575 * 576 * It is assumed that the caller verifies the timestamp is valid prior to 577 * calling this function. 578 * 579 * Extract the 32bit nominal nanoseconds and extend them. Use the cached PHC 580 * time stored in the device private PTP structure as the basis for timestamp 581 * extension. 582 * 583 * See ice_ptp_extend_32b_ts for a detailed explanation of the extension 584 * algorithm. 585 */ 586 static u64 ice_ptp_extend_40b_ts(struct ice_pf *pf, u64 in_tstamp) 587 { 588 const u64 mask = GENMASK_ULL(31, 0); 589 unsigned long discard_time; 590 591 /* Discard the hardware timestamp if the cached PHC time is too old */ 592 discard_time = pf->ptp.cached_phc_jiffies + msecs_to_jiffies(2000); 593 if (time_is_before_jiffies(discard_time)) { 594 pf->ptp.tx_hwtstamp_discarded++; 595 return 0; 596 } 597 598 return ice_ptp_extend_32b_ts(pf->ptp.cached_phc_time, 599 (in_tstamp >> 8) & mask); 600 } 601 602 /** 603 * ice_ptp_is_tx_tracker_up - Check if Tx tracker is ready for new timestamps 604 * @tx: the PTP Tx timestamp tracker to check 605 * 606 * Check that a given PTP Tx timestamp tracker is up, i.e. that it is ready 607 * to accept new timestamp requests. 608 * 609 * Assumes the tx->lock spinlock is already held. 610 */ 611 static bool 612 ice_ptp_is_tx_tracker_up(struct ice_ptp_tx *tx) 613 { 614 lockdep_assert_held(&tx->lock); 615 616 return tx->init && !tx->calibrating; 617 } 618 619 /** 620 * ice_ptp_tx_tstamp - Process Tx timestamps for a port 621 * @tx: the PTP Tx timestamp tracker 622 * 623 * Process timestamps captured by the PHY associated with this port. To do 624 * this, loop over each index with a waiting skb. 625 * 626 * If a given index has a valid timestamp, perform the following steps: 627 * 628 * 1) check that the timestamp request is not stale 629 * 2) check that a timestamp is ready and available in the PHY memory bank 630 * 3) read and copy the timestamp out of the PHY register 631 * 4) unlock the index by clearing the associated in_use bit 632 * 5) check if the timestamp is stale, and discard if so 633 * 6) extend the 40 bit timestamp value to get a 64 bit timestamp value 634 * 7) send this 64 bit timestamp to the stack 635 * 636 * Returns true if all timestamps were handled, and false if any slots remain 637 * without a timestamp. 638 * 639 * After looping, if we still have waiting SKBs, return false. This may cause 640 * us effectively poll even when not strictly necessary. We do this because 641 * it's possible a new timestamp was requested around the same time as the 642 * interrupt. In some cases hardware might not interrupt us again when the 643 * timestamp is captured. 644 * 645 * Note that we do not hold the tracking lock while reading the Tx timestamp. 646 * This is because reading the timestamp requires taking a mutex that might 647 * sleep. 648 * 649 * The only place where we set in_use is when a new timestamp is initiated 650 * with a slot index. This is only called in the hard xmit routine where an 651 * SKB has a request flag set. The only places where we clear this bit is this 652 * function, or during teardown when the Tx timestamp tracker is being 653 * removed. A timestamp index will never be re-used until the in_use bit for 654 * that index is cleared. 655 * 656 * If a Tx thread starts a new timestamp, we might not begin processing it 657 * right away but we will notice it at the end when we re-queue the task. 658 * 659 * If a Tx thread starts a new timestamp just after this function exits, the 660 * interrupt for that timestamp should re-trigger this function once 661 * a timestamp is ready. 662 * 663 * In cases where the PTP hardware clock was directly adjusted, some 664 * timestamps may not be able to safely use the timestamp extension math. In 665 * this case, software will set the stale bit for any outstanding Tx 666 * timestamps when the clock is adjusted. Then this function will discard 667 * those captured timestamps instead of sending them to the stack. 668 * 669 * If a Tx packet has been waiting for more than 2 seconds, it is not possible 670 * to correctly extend the timestamp using the cached PHC time. It is 671 * extremely unlikely that a packet will ever take this long to timestamp. If 672 * we detect a Tx timestamp request that has waited for this long we assume 673 * the packet will never be sent by hardware and discard it without reading 674 * the timestamp register. 675 */ 676 static bool ice_ptp_tx_tstamp(struct ice_ptp_tx *tx) 677 { 678 struct ice_ptp_port *ptp_port; 679 bool more_timestamps; 680 struct ice_pf *pf; 681 struct ice_hw *hw; 682 u64 tstamp_ready; 683 bool link_up; 684 int err; 685 u8 idx; 686 687 if (!tx->init) 688 return true; 689 690 ptp_port = container_of(tx, struct ice_ptp_port, tx); 691 pf = ptp_port_to_pf(ptp_port); 692 hw = &pf->hw; 693 694 /* Read the Tx ready status first */ 695 err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready); 696 if (err) 697 return false; 698 699 /* Drop packets if the link went down */ 700 link_up = ptp_port->link_up; 701 702 for_each_set_bit(idx, tx->in_use, tx->len) { 703 struct skb_shared_hwtstamps shhwtstamps = {}; 704 u8 phy_idx = idx + tx->offset; 705 u64 raw_tstamp = 0, tstamp; 706 bool drop_ts = !link_up; 707 struct sk_buff *skb; 708 709 /* Drop packets which have waited for more than 2 seconds */ 710 if (time_is_before_jiffies(tx->tstamps[idx].start + 2 * HZ)) { 711 drop_ts = true; 712 713 /* Count the number of Tx timestamps that timed out */ 714 pf->ptp.tx_hwtstamp_timeouts++; 715 } 716 717 /* Only read a timestamp from the PHY if its marked as ready 718 * by the tstamp_ready register. This avoids unnecessary 719 * reading of timestamps which are not yet valid. This is 720 * important as we must read all timestamps which are valid 721 * and only timestamps which are valid during each interrupt. 722 * If we do not, the hardware logic for generating a new 723 * interrupt can get stuck on some devices. 724 */ 725 if (!(tstamp_ready & BIT_ULL(phy_idx))) { 726 if (drop_ts) 727 goto skip_ts_read; 728 729 continue; 730 } 731 732 ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx); 733 734 err = ice_read_phy_tstamp(hw, tx->block, phy_idx, &raw_tstamp); 735 if (err && !drop_ts) 736 continue; 737 738 ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx); 739 740 /* For PHYs which don't implement a proper timestamp ready 741 * bitmap, verify that the timestamp value is different 742 * from the last cached timestamp. If it is not, skip this for 743 * now assuming it hasn't yet been captured by hardware. 744 */ 745 if (!drop_ts && tx->verify_cached && 746 raw_tstamp == tx->tstamps[idx].cached_tstamp) 747 continue; 748 749 /* Discard any timestamp value without the valid bit set */ 750 if (!(raw_tstamp & ICE_PTP_TS_VALID)) 751 drop_ts = true; 752 753 skip_ts_read: 754 spin_lock(&tx->lock); 755 if (tx->verify_cached && raw_tstamp) 756 tx->tstamps[idx].cached_tstamp = raw_tstamp; 757 clear_bit(idx, tx->in_use); 758 skb = tx->tstamps[idx].skb; 759 tx->tstamps[idx].skb = NULL; 760 if (test_and_clear_bit(idx, tx->stale)) 761 drop_ts = true; 762 spin_unlock(&tx->lock); 763 764 /* It is unlikely but possible that the SKB will have been 765 * flushed at this point due to link change or teardown. 766 */ 767 if (!skb) 768 continue; 769 770 if (drop_ts) { 771 dev_kfree_skb_any(skb); 772 continue; 773 } 774 775 /* Extend the timestamp using cached PHC time */ 776 tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp); 777 if (tstamp) { 778 shhwtstamps.hwtstamp = ns_to_ktime(tstamp); 779 ice_trace(tx_tstamp_complete, skb, idx); 780 } 781 782 skb_tstamp_tx(skb, &shhwtstamps); 783 dev_kfree_skb_any(skb); 784 } 785 786 /* Check if we still have work to do. If so, re-queue this task to 787 * poll for remaining timestamps. 788 */ 789 spin_lock(&tx->lock); 790 more_timestamps = tx->init && !bitmap_empty(tx->in_use, tx->len); 791 spin_unlock(&tx->lock); 792 793 return !more_timestamps; 794 } 795 796 /** 797 * ice_ptp_alloc_tx_tracker - Initialize tracking for Tx timestamps 798 * @tx: Tx tracking structure to initialize 799 * 800 * Assumes that the length has already been initialized. Do not call directly, 801 * use the ice_ptp_init_tx_* instead. 802 */ 803 static int 804 ice_ptp_alloc_tx_tracker(struct ice_ptp_tx *tx) 805 { 806 unsigned long *in_use, *stale; 807 struct ice_tx_tstamp *tstamps; 808 809 tstamps = kcalloc(tx->len, sizeof(*tstamps), GFP_KERNEL); 810 in_use = bitmap_zalloc(tx->len, GFP_KERNEL); 811 stale = bitmap_zalloc(tx->len, GFP_KERNEL); 812 813 if (!tstamps || !in_use || !stale) { 814 kfree(tstamps); 815 bitmap_free(in_use); 816 bitmap_free(stale); 817 818 return -ENOMEM; 819 } 820 821 tx->tstamps = tstamps; 822 tx->in_use = in_use; 823 tx->stale = stale; 824 tx->init = 1; 825 826 spin_lock_init(&tx->lock); 827 828 return 0; 829 } 830 831 /** 832 * ice_ptp_flush_tx_tracker - Flush any remaining timestamps from the tracker 833 * @pf: Board private structure 834 * @tx: the tracker to flush 835 * 836 * Called during teardown when a Tx tracker is being removed. 837 */ 838 static void 839 ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx) 840 { 841 struct ice_hw *hw = &pf->hw; 842 u64 tstamp_ready; 843 int err; 844 u8 idx; 845 846 err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready); 847 if (err) { 848 dev_dbg(ice_pf_to_dev(pf), "Failed to get the Tx tstamp ready bitmap for block %u, err %d\n", 849 tx->block, err); 850 851 /* If we fail to read the Tx timestamp ready bitmap just 852 * skip clearing the PHY timestamps. 853 */ 854 tstamp_ready = 0; 855 } 856 857 for_each_set_bit(idx, tx->in_use, tx->len) { 858 u8 phy_idx = idx + tx->offset; 859 struct sk_buff *skb; 860 861 /* In case this timestamp is ready, we need to clear it. */ 862 if (!hw->reset_ongoing && (tstamp_ready & BIT_ULL(phy_idx))) 863 ice_clear_phy_tstamp(hw, tx->block, phy_idx); 864 865 spin_lock(&tx->lock); 866 skb = tx->tstamps[idx].skb; 867 tx->tstamps[idx].skb = NULL; 868 clear_bit(idx, tx->in_use); 869 clear_bit(idx, tx->stale); 870 spin_unlock(&tx->lock); 871 872 /* Count the number of Tx timestamps flushed */ 873 pf->ptp.tx_hwtstamp_flushed++; 874 875 /* Free the SKB after we've cleared the bit */ 876 dev_kfree_skb_any(skb); 877 } 878 } 879 880 /** 881 * ice_ptp_mark_tx_tracker_stale - Mark unfinished timestamps as stale 882 * @tx: the tracker to mark 883 * 884 * Mark currently outstanding Tx timestamps as stale. This prevents sending 885 * their timestamp value to the stack. This is required to prevent extending 886 * the 40bit hardware timestamp incorrectly. 887 * 888 * This should be called when the PTP clock is modified such as after a set 889 * time request. 890 */ 891 static void 892 ice_ptp_mark_tx_tracker_stale(struct ice_ptp_tx *tx) 893 { 894 spin_lock(&tx->lock); 895 bitmap_or(tx->stale, tx->stale, tx->in_use, tx->len); 896 spin_unlock(&tx->lock); 897 } 898 899 /** 900 * ice_ptp_release_tx_tracker - Release allocated memory for Tx tracker 901 * @pf: Board private structure 902 * @tx: Tx tracking structure to release 903 * 904 * Free memory associated with the Tx timestamp tracker. 905 */ 906 static void 907 ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx) 908 { 909 spin_lock(&tx->lock); 910 tx->init = 0; 911 spin_unlock(&tx->lock); 912 913 /* wait for potentially outstanding interrupt to complete */ 914 synchronize_irq(pf->msix_entries[pf->oicr_idx].vector); 915 916 ice_ptp_flush_tx_tracker(pf, tx); 917 918 kfree(tx->tstamps); 919 tx->tstamps = NULL; 920 921 bitmap_free(tx->in_use); 922 tx->in_use = NULL; 923 924 bitmap_free(tx->stale); 925 tx->stale = NULL; 926 927 tx->len = 0; 928 } 929 930 /** 931 * ice_ptp_init_tx_e822 - Initialize tracking for Tx timestamps 932 * @pf: Board private structure 933 * @tx: the Tx tracking structure to initialize 934 * @port: the port this structure tracks 935 * 936 * Initialize the Tx timestamp tracker for this port. For generic MAC devices, 937 * the timestamp block is shared for all ports in the same quad. To avoid 938 * ports using the same timestamp index, logically break the block of 939 * registers into chunks based on the port number. 940 */ 941 static int 942 ice_ptp_init_tx_e822(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port) 943 { 944 tx->block = port / ICE_PORTS_PER_QUAD; 945 tx->offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT_E822; 946 tx->len = INDEX_PER_PORT_E822; 947 tx->verify_cached = 0; 948 949 return ice_ptp_alloc_tx_tracker(tx); 950 } 951 952 /** 953 * ice_ptp_init_tx_e810 - Initialize tracking for Tx timestamps 954 * @pf: Board private structure 955 * @tx: the Tx tracking structure to initialize 956 * 957 * Initialize the Tx timestamp tracker for this PF. For E810 devices, each 958 * port has its own block of timestamps, independent of the other ports. 959 */ 960 static int 961 ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx) 962 { 963 tx->block = pf->hw.port_info->lport; 964 tx->offset = 0; 965 tx->len = INDEX_PER_PORT_E810; 966 /* The E810 PHY does not provide a timestamp ready bitmap. Instead, 967 * verify new timestamps against cached copy of the last read 968 * timestamp. 969 */ 970 tx->verify_cached = 1; 971 972 return ice_ptp_alloc_tx_tracker(tx); 973 } 974 975 /** 976 * ice_ptp_update_cached_phctime - Update the cached PHC time values 977 * @pf: Board specific private structure 978 * 979 * This function updates the system time values which are cached in the PF 980 * structure and the Rx rings. 981 * 982 * This function must be called periodically to ensure that the cached value 983 * is never more than 2 seconds old. 984 * 985 * Note that the cached copy in the PF PTP structure is always updated, even 986 * if we can't update the copy in the Rx rings. 987 * 988 * Return: 989 * * 0 - OK, successfully updated 990 * * -EAGAIN - PF was busy, need to reschedule the update 991 */ 992 static int ice_ptp_update_cached_phctime(struct ice_pf *pf) 993 { 994 struct device *dev = ice_pf_to_dev(pf); 995 unsigned long update_before; 996 u64 systime; 997 int i; 998 999 update_before = pf->ptp.cached_phc_jiffies + msecs_to_jiffies(2000); 1000 if (pf->ptp.cached_phc_time && 1001 time_is_before_jiffies(update_before)) { 1002 unsigned long time_taken = jiffies - pf->ptp.cached_phc_jiffies; 1003 1004 dev_warn(dev, "%u msecs passed between update to cached PHC time\n", 1005 jiffies_to_msecs(time_taken)); 1006 pf->ptp.late_cached_phc_updates++; 1007 } 1008 1009 /* Read the current PHC time */ 1010 systime = ice_ptp_read_src_clk_reg(pf, NULL); 1011 1012 /* Update the cached PHC time stored in the PF structure */ 1013 WRITE_ONCE(pf->ptp.cached_phc_time, systime); 1014 WRITE_ONCE(pf->ptp.cached_phc_jiffies, jiffies); 1015 1016 if (test_and_set_bit(ICE_CFG_BUSY, pf->state)) 1017 return -EAGAIN; 1018 1019 ice_for_each_vsi(pf, i) { 1020 struct ice_vsi *vsi = pf->vsi[i]; 1021 int j; 1022 1023 if (!vsi) 1024 continue; 1025 1026 if (vsi->type != ICE_VSI_PF) 1027 continue; 1028 1029 ice_for_each_rxq(vsi, j) { 1030 if (!vsi->rx_rings[j]) 1031 continue; 1032 WRITE_ONCE(vsi->rx_rings[j]->cached_phctime, systime); 1033 } 1034 } 1035 clear_bit(ICE_CFG_BUSY, pf->state); 1036 1037 return 0; 1038 } 1039 1040 /** 1041 * ice_ptp_reset_cached_phctime - Reset cached PHC time after an update 1042 * @pf: Board specific private structure 1043 * 1044 * This function must be called when the cached PHC time is no longer valid, 1045 * such as after a time adjustment. It marks any currently outstanding Tx 1046 * timestamps as stale and updates the cached PHC time for both the PF and Rx 1047 * rings. 1048 * 1049 * If updating the PHC time cannot be done immediately, a warning message is 1050 * logged and the work item is scheduled immediately to minimize the window 1051 * with a wrong cached timestamp. 1052 */ 1053 static void ice_ptp_reset_cached_phctime(struct ice_pf *pf) 1054 { 1055 struct device *dev = ice_pf_to_dev(pf); 1056 int err; 1057 1058 /* Update the cached PHC time immediately if possible, otherwise 1059 * schedule the work item to execute soon. 1060 */ 1061 err = ice_ptp_update_cached_phctime(pf); 1062 if (err) { 1063 /* If another thread is updating the Rx rings, we won't 1064 * properly reset them here. This could lead to reporting of 1065 * invalid timestamps, but there isn't much we can do. 1066 */ 1067 dev_warn(dev, "%s: ICE_CFG_BUSY, unable to immediately update cached PHC time\n", 1068 __func__); 1069 1070 /* Queue the work item to update the Rx rings when possible */ 1071 kthread_queue_delayed_work(pf->ptp.kworker, &pf->ptp.work, 1072 msecs_to_jiffies(10)); 1073 } 1074 1075 /* Mark any outstanding timestamps as stale, since they might have 1076 * been captured in hardware before the time update. This could lead 1077 * to us extending them with the wrong cached value resulting in 1078 * incorrect timestamp values. 1079 */ 1080 ice_ptp_mark_tx_tracker_stale(&pf->ptp.port.tx); 1081 } 1082 1083 /** 1084 * ice_ptp_read_time - Read the time from the device 1085 * @pf: Board private structure 1086 * @ts: timespec structure to hold the current time value 1087 * @sts: Optional parameter for holding a pair of system timestamps from 1088 * the system clock. Will be ignored if NULL is given. 1089 * 1090 * This function reads the source clock registers and stores them in a timespec. 1091 * However, since the registers are 64 bits of nanoseconds, we must convert the 1092 * result to a timespec before we can return. 1093 */ 1094 static void 1095 ice_ptp_read_time(struct ice_pf *pf, struct timespec64 *ts, 1096 struct ptp_system_timestamp *sts) 1097 { 1098 u64 time_ns = ice_ptp_read_src_clk_reg(pf, sts); 1099 1100 *ts = ns_to_timespec64(time_ns); 1101 } 1102 1103 /** 1104 * ice_ptp_write_init - Set PHC time to provided value 1105 * @pf: Board private structure 1106 * @ts: timespec structure that holds the new time value 1107 * 1108 * Set the PHC time to the specified time provided in the timespec. 1109 */ 1110 static int ice_ptp_write_init(struct ice_pf *pf, struct timespec64 *ts) 1111 { 1112 u64 ns = timespec64_to_ns(ts); 1113 struct ice_hw *hw = &pf->hw; 1114 1115 return ice_ptp_init_time(hw, ns); 1116 } 1117 1118 /** 1119 * ice_ptp_write_adj - Adjust PHC clock time atomically 1120 * @pf: Board private structure 1121 * @adj: Adjustment in nanoseconds 1122 * 1123 * Perform an atomic adjustment of the PHC time by the specified number of 1124 * nanoseconds. 1125 */ 1126 static int ice_ptp_write_adj(struct ice_pf *pf, s32 adj) 1127 { 1128 struct ice_hw *hw = &pf->hw; 1129 1130 return ice_ptp_adj_clock(hw, adj); 1131 } 1132 1133 /** 1134 * ice_base_incval - Get base timer increment value 1135 * @pf: Board private structure 1136 * 1137 * Look up the base timer increment value for this device. The base increment 1138 * value is used to define the nominal clock tick rate. This increment value 1139 * is programmed during device initialization. It is also used as the basis 1140 * for calculating adjustments using scaled_ppm. 1141 */ 1142 static u64 ice_base_incval(struct ice_pf *pf) 1143 { 1144 struct ice_hw *hw = &pf->hw; 1145 u64 incval; 1146 1147 if (ice_is_e810(hw)) 1148 incval = ICE_PTP_NOMINAL_INCVAL_E810; 1149 else if (ice_e822_time_ref(hw) < NUM_ICE_TIME_REF_FREQ) 1150 incval = ice_e822_nominal_incval(ice_e822_time_ref(hw)); 1151 else 1152 incval = UNKNOWN_INCVAL_E822; 1153 1154 dev_dbg(ice_pf_to_dev(pf), "PTP: using base increment value of 0x%016llx\n", 1155 incval); 1156 1157 return incval; 1158 } 1159 1160 /** 1161 * ice_ptp_check_tx_fifo - Check whether Tx FIFO is in an OK state 1162 * @port: PTP port for which Tx FIFO is checked 1163 */ 1164 static int ice_ptp_check_tx_fifo(struct ice_ptp_port *port) 1165 { 1166 int quad = port->port_num / ICE_PORTS_PER_QUAD; 1167 int offs = port->port_num % ICE_PORTS_PER_QUAD; 1168 struct ice_pf *pf; 1169 struct ice_hw *hw; 1170 u32 val, phy_sts; 1171 int err; 1172 1173 pf = ptp_port_to_pf(port); 1174 hw = &pf->hw; 1175 1176 if (port->tx_fifo_busy_cnt == FIFO_OK) 1177 return 0; 1178 1179 /* need to read FIFO state */ 1180 if (offs == 0 || offs == 1) 1181 err = ice_read_quad_reg_e822(hw, quad, Q_REG_FIFO01_STATUS, 1182 &val); 1183 else 1184 err = ice_read_quad_reg_e822(hw, quad, Q_REG_FIFO23_STATUS, 1185 &val); 1186 1187 if (err) { 1188 dev_err(ice_pf_to_dev(pf), "PTP failed to check port %d Tx FIFO, err %d\n", 1189 port->port_num, err); 1190 return err; 1191 } 1192 1193 if (offs & 0x1) 1194 phy_sts = (val & Q_REG_FIFO13_M) >> Q_REG_FIFO13_S; 1195 else 1196 phy_sts = (val & Q_REG_FIFO02_M) >> Q_REG_FIFO02_S; 1197 1198 if (phy_sts & FIFO_EMPTY) { 1199 port->tx_fifo_busy_cnt = FIFO_OK; 1200 return 0; 1201 } 1202 1203 port->tx_fifo_busy_cnt++; 1204 1205 dev_dbg(ice_pf_to_dev(pf), "Try %d, port %d FIFO not empty\n", 1206 port->tx_fifo_busy_cnt, port->port_num); 1207 1208 if (port->tx_fifo_busy_cnt == ICE_PTP_FIFO_NUM_CHECKS) { 1209 dev_dbg(ice_pf_to_dev(pf), 1210 "Port %d Tx FIFO still not empty; resetting quad %d\n", 1211 port->port_num, quad); 1212 ice_ptp_reset_ts_memory_quad_e822(hw, quad); 1213 port->tx_fifo_busy_cnt = FIFO_OK; 1214 return 0; 1215 } 1216 1217 return -EAGAIN; 1218 } 1219 1220 /** 1221 * ice_ptp_wait_for_offsets - Check for valid Tx and Rx offsets 1222 * @work: Pointer to the kthread_work structure for this task 1223 * 1224 * Check whether hardware has completed measuring the Tx and Rx offset values 1225 * used to configure and enable vernier timestamp calibration. 1226 * 1227 * Once the offset in either direction is measured, configure the associated 1228 * registers with the calibrated offset values and enable timestamping. The Tx 1229 * and Rx directions are configured independently as soon as their associated 1230 * offsets are known. 1231 * 1232 * This function reschedules itself until both Tx and Rx calibration have 1233 * completed. 1234 */ 1235 static void ice_ptp_wait_for_offsets(struct kthread_work *work) 1236 { 1237 struct ice_ptp_port *port; 1238 struct ice_pf *pf; 1239 struct ice_hw *hw; 1240 int tx_err; 1241 int rx_err; 1242 1243 port = container_of(work, struct ice_ptp_port, ov_work.work); 1244 pf = ptp_port_to_pf(port); 1245 hw = &pf->hw; 1246 1247 if (ice_is_reset_in_progress(pf->state)) { 1248 /* wait for device driver to complete reset */ 1249 kthread_queue_delayed_work(pf->ptp.kworker, 1250 &port->ov_work, 1251 msecs_to_jiffies(100)); 1252 return; 1253 } 1254 1255 tx_err = ice_ptp_check_tx_fifo(port); 1256 if (!tx_err) 1257 tx_err = ice_phy_cfg_tx_offset_e822(hw, port->port_num); 1258 rx_err = ice_phy_cfg_rx_offset_e822(hw, port->port_num); 1259 if (tx_err || rx_err) { 1260 /* Tx and/or Rx offset not yet configured, try again later */ 1261 kthread_queue_delayed_work(pf->ptp.kworker, 1262 &port->ov_work, 1263 msecs_to_jiffies(100)); 1264 return; 1265 } 1266 } 1267 1268 /** 1269 * ice_ptp_port_phy_stop - Stop timestamping for a PHY port 1270 * @ptp_port: PTP port to stop 1271 */ 1272 static int 1273 ice_ptp_port_phy_stop(struct ice_ptp_port *ptp_port) 1274 { 1275 struct ice_pf *pf = ptp_port_to_pf(ptp_port); 1276 u8 port = ptp_port->port_num; 1277 struct ice_hw *hw = &pf->hw; 1278 int err; 1279 1280 if (ice_is_e810(hw)) 1281 return 0; 1282 1283 mutex_lock(&ptp_port->ps_lock); 1284 1285 kthread_cancel_delayed_work_sync(&ptp_port->ov_work); 1286 1287 err = ice_stop_phy_timer_e822(hw, port, true); 1288 if (err) 1289 dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d down, err %d\n", 1290 port, err); 1291 1292 mutex_unlock(&ptp_port->ps_lock); 1293 1294 return err; 1295 } 1296 1297 /** 1298 * ice_ptp_port_phy_restart - (Re)start and calibrate PHY timestamping 1299 * @ptp_port: PTP port for which the PHY start is set 1300 * 1301 * Start the PHY timestamping block, and initiate Vernier timestamping 1302 * calibration. If timestamping cannot be calibrated (such as if link is down) 1303 * then disable the timestamping block instead. 1304 */ 1305 static int 1306 ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port) 1307 { 1308 struct ice_pf *pf = ptp_port_to_pf(ptp_port); 1309 u8 port = ptp_port->port_num; 1310 struct ice_hw *hw = &pf->hw; 1311 int err; 1312 1313 if (ice_is_e810(hw)) 1314 return 0; 1315 1316 if (!ptp_port->link_up) 1317 return ice_ptp_port_phy_stop(ptp_port); 1318 1319 mutex_lock(&ptp_port->ps_lock); 1320 1321 kthread_cancel_delayed_work_sync(&ptp_port->ov_work); 1322 1323 /* temporarily disable Tx timestamps while calibrating PHY offset */ 1324 spin_lock(&ptp_port->tx.lock); 1325 ptp_port->tx.calibrating = true; 1326 spin_unlock(&ptp_port->tx.lock); 1327 ptp_port->tx_fifo_busy_cnt = 0; 1328 1329 /* Start the PHY timer in Vernier mode */ 1330 err = ice_start_phy_timer_e822(hw, port); 1331 if (err) 1332 goto out_unlock; 1333 1334 /* Enable Tx timestamps right away */ 1335 spin_lock(&ptp_port->tx.lock); 1336 ptp_port->tx.calibrating = false; 1337 spin_unlock(&ptp_port->tx.lock); 1338 1339 kthread_queue_delayed_work(pf->ptp.kworker, &ptp_port->ov_work, 0); 1340 1341 out_unlock: 1342 if (err) 1343 dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d up, err %d\n", 1344 port, err); 1345 1346 mutex_unlock(&ptp_port->ps_lock); 1347 1348 return err; 1349 } 1350 1351 /** 1352 * ice_ptp_link_change - Reconfigure PTP after link status change 1353 * @pf: Board private structure 1354 * @port: Port for which the PHY start is set 1355 * @linkup: Link is up or down 1356 */ 1357 void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup) 1358 { 1359 struct ice_ptp_port *ptp_port; 1360 1361 if (!test_bit(ICE_FLAG_PTP, pf->flags)) 1362 return; 1363 1364 if (WARN_ON_ONCE(port >= ICE_NUM_EXTERNAL_PORTS)) 1365 return; 1366 1367 ptp_port = &pf->ptp.port; 1368 if (WARN_ON_ONCE(ptp_port->port_num != port)) 1369 return; 1370 1371 /* Update cached link status for this port immediately */ 1372 ptp_port->link_up = linkup; 1373 1374 /* E810 devices do not need to reconfigure the PHY */ 1375 if (ice_is_e810(&pf->hw)) 1376 return; 1377 1378 ice_ptp_port_phy_restart(ptp_port); 1379 } 1380 1381 /** 1382 * ice_ptp_tx_ena_intr - Enable or disable the Tx timestamp interrupt 1383 * @pf: PF private structure 1384 * @ena: bool value to enable or disable interrupt 1385 * @threshold: Minimum number of packets at which intr is triggered 1386 * 1387 * Utility function to enable or disable Tx timestamp interrupt and threshold 1388 */ 1389 static int ice_ptp_tx_ena_intr(struct ice_pf *pf, bool ena, u32 threshold) 1390 { 1391 struct ice_hw *hw = &pf->hw; 1392 int err = 0; 1393 int quad; 1394 u32 val; 1395 1396 ice_ptp_reset_ts_memory(hw); 1397 1398 for (quad = 0; quad < ICE_MAX_QUAD; quad++) { 1399 err = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG, 1400 &val); 1401 if (err) 1402 break; 1403 1404 if (ena) { 1405 val |= Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M; 1406 val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_THR_M; 1407 val |= ((threshold << Q_REG_TX_MEM_GBL_CFG_INTR_THR_S) & 1408 Q_REG_TX_MEM_GBL_CFG_INTR_THR_M); 1409 } else { 1410 val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M; 1411 } 1412 1413 err = ice_write_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG, 1414 val); 1415 if (err) 1416 break; 1417 } 1418 1419 if (err) 1420 dev_err(ice_pf_to_dev(pf), "PTP failed in intr ena, err %d\n", 1421 err); 1422 return err; 1423 } 1424 1425 /** 1426 * ice_ptp_reset_phy_timestamping - Reset PHY timestamping block 1427 * @pf: Board private structure 1428 */ 1429 static void ice_ptp_reset_phy_timestamping(struct ice_pf *pf) 1430 { 1431 ice_ptp_port_phy_restart(&pf->ptp.port); 1432 } 1433 1434 /** 1435 * ice_ptp_adjfine - Adjust clock increment rate 1436 * @info: the driver's PTP info structure 1437 * @scaled_ppm: Parts per million with 16-bit fractional field 1438 * 1439 * Adjust the frequency of the clock by the indicated scaled ppm from the 1440 * base frequency. 1441 */ 1442 static int ice_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm) 1443 { 1444 struct ice_pf *pf = ptp_info_to_pf(info); 1445 struct ice_hw *hw = &pf->hw; 1446 u64 incval; 1447 int err; 1448 1449 incval = adjust_by_scaled_ppm(ice_base_incval(pf), scaled_ppm); 1450 err = ice_ptp_write_incval_locked(hw, incval); 1451 if (err) { 1452 dev_err(ice_pf_to_dev(pf), "PTP failed to set incval, err %d\n", 1453 err); 1454 return -EIO; 1455 } 1456 1457 return 0; 1458 } 1459 1460 /** 1461 * ice_ptp_extts_work - Workqueue task function 1462 * @work: external timestamp work structure 1463 * 1464 * Service for PTP external clock event 1465 */ 1466 static void ice_ptp_extts_work(struct kthread_work *work) 1467 { 1468 struct ice_ptp *ptp = container_of(work, struct ice_ptp, extts_work); 1469 struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp); 1470 struct ptp_clock_event event; 1471 struct ice_hw *hw = &pf->hw; 1472 u8 chan, tmr_idx; 1473 u32 hi, lo; 1474 1475 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 1476 /* Event time is captured by one of the two matched registers 1477 * GLTSYN_EVNT_L: 32 LSB of sampled time event 1478 * GLTSYN_EVNT_H: 32 MSB of sampled time event 1479 * Event is defined in GLTSYN_EVNT_0 register 1480 */ 1481 for (chan = 0; chan < GLTSYN_EVNT_H_IDX_MAX; chan++) { 1482 /* Check if channel is enabled */ 1483 if (pf->ptp.ext_ts_irq & (1 << chan)) { 1484 lo = rd32(hw, GLTSYN_EVNT_L(chan, tmr_idx)); 1485 hi = rd32(hw, GLTSYN_EVNT_H(chan, tmr_idx)); 1486 event.timestamp = (((u64)hi) << 32) | lo; 1487 event.type = PTP_CLOCK_EXTTS; 1488 event.index = chan; 1489 1490 /* Fire event */ 1491 ptp_clock_event(pf->ptp.clock, &event); 1492 pf->ptp.ext_ts_irq &= ~(1 << chan); 1493 } 1494 } 1495 } 1496 1497 /** 1498 * ice_ptp_cfg_extts - Configure EXTTS pin and channel 1499 * @pf: Board private structure 1500 * @ena: true to enable; false to disable 1501 * @chan: GPIO channel (0-3) 1502 * @gpio_pin: GPIO pin 1503 * @extts_flags: request flags from the ptp_extts_request.flags 1504 */ 1505 static int 1506 ice_ptp_cfg_extts(struct ice_pf *pf, bool ena, unsigned int chan, u32 gpio_pin, 1507 unsigned int extts_flags) 1508 { 1509 u32 func, aux_reg, gpio_reg, irq_reg; 1510 struct ice_hw *hw = &pf->hw; 1511 u8 tmr_idx; 1512 1513 if (chan > (unsigned int)pf->ptp.info.n_ext_ts) 1514 return -EINVAL; 1515 1516 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 1517 1518 irq_reg = rd32(hw, PFINT_OICR_ENA); 1519 1520 if (ena) { 1521 /* Enable the interrupt */ 1522 irq_reg |= PFINT_OICR_TSYN_EVNT_M; 1523 aux_reg = GLTSYN_AUX_IN_0_INT_ENA_M; 1524 1525 #define GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE BIT(0) 1526 #define GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE BIT(1) 1527 1528 /* set event level to requested edge */ 1529 if (extts_flags & PTP_FALLING_EDGE) 1530 aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE; 1531 if (extts_flags & PTP_RISING_EDGE) 1532 aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE; 1533 1534 /* Write GPIO CTL reg. 1535 * 0x1 is input sampled by EVENT register(channel) 1536 * + num_in_channels * tmr_idx 1537 */ 1538 func = 1 + chan + (tmr_idx * 3); 1539 gpio_reg = ((func << GLGEN_GPIO_CTL_PIN_FUNC_S) & 1540 GLGEN_GPIO_CTL_PIN_FUNC_M); 1541 pf->ptp.ext_ts_chan |= (1 << chan); 1542 } else { 1543 /* clear the values we set to reset defaults */ 1544 aux_reg = 0; 1545 gpio_reg = 0; 1546 pf->ptp.ext_ts_chan &= ~(1 << chan); 1547 if (!pf->ptp.ext_ts_chan) 1548 irq_reg &= ~PFINT_OICR_TSYN_EVNT_M; 1549 } 1550 1551 wr32(hw, PFINT_OICR_ENA, irq_reg); 1552 wr32(hw, GLTSYN_AUX_IN(chan, tmr_idx), aux_reg); 1553 wr32(hw, GLGEN_GPIO_CTL(gpio_pin), gpio_reg); 1554 1555 return 0; 1556 } 1557 1558 /** 1559 * ice_ptp_cfg_clkout - Configure clock to generate periodic wave 1560 * @pf: Board private structure 1561 * @chan: GPIO channel (0-3) 1562 * @config: desired periodic clk configuration. NULL will disable channel 1563 * @store: If set to true the values will be stored 1564 * 1565 * Configure the internal clock generator modules to generate the clock wave of 1566 * specified period. 1567 */ 1568 static int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan, 1569 struct ice_perout_channel *config, bool store) 1570 { 1571 u64 current_time, period, start_time, phase; 1572 struct ice_hw *hw = &pf->hw; 1573 u32 func, val, gpio_pin; 1574 u8 tmr_idx; 1575 1576 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 1577 1578 /* 0. Reset mode & out_en in AUX_OUT */ 1579 wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), 0); 1580 1581 /* If we're disabling the output, clear out CLKO and TGT and keep 1582 * output level low 1583 */ 1584 if (!config || !config->ena) { 1585 wr32(hw, GLTSYN_CLKO(chan, tmr_idx), 0); 1586 wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), 0); 1587 wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), 0); 1588 1589 val = GLGEN_GPIO_CTL_PIN_DIR_M; 1590 gpio_pin = pf->ptp.perout_channels[chan].gpio_pin; 1591 wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val); 1592 1593 /* Store the value if requested */ 1594 if (store) 1595 memset(&pf->ptp.perout_channels[chan], 0, 1596 sizeof(struct ice_perout_channel)); 1597 1598 return 0; 1599 } 1600 period = config->period; 1601 start_time = config->start_time; 1602 div64_u64_rem(start_time, period, &phase); 1603 gpio_pin = config->gpio_pin; 1604 1605 /* 1. Write clkout with half of required period value */ 1606 if (period & 0x1) { 1607 dev_err(ice_pf_to_dev(pf), "CLK Period must be an even value\n"); 1608 goto err; 1609 } 1610 1611 period >>= 1; 1612 1613 /* For proper operation, the GLTSYN_CLKO must be larger than clock tick 1614 */ 1615 #define MIN_PULSE 3 1616 if (period <= MIN_PULSE || period > U32_MAX) { 1617 dev_err(ice_pf_to_dev(pf), "CLK Period must be > %d && < 2^33", 1618 MIN_PULSE * 2); 1619 goto err; 1620 } 1621 1622 wr32(hw, GLTSYN_CLKO(chan, tmr_idx), lower_32_bits(period)); 1623 1624 /* Allow time for programming before start_time is hit */ 1625 current_time = ice_ptp_read_src_clk_reg(pf, NULL); 1626 1627 /* if start time is in the past start the timer at the nearest second 1628 * maintaining phase 1629 */ 1630 if (start_time < current_time) 1631 start_time = div64_u64(current_time + NSEC_PER_SEC - 1, 1632 NSEC_PER_SEC) * NSEC_PER_SEC + phase; 1633 1634 if (ice_is_e810(hw)) 1635 start_time -= E810_OUT_PROP_DELAY_NS; 1636 else 1637 start_time -= ice_e822_pps_delay(ice_e822_time_ref(hw)); 1638 1639 /* 2. Write TARGET time */ 1640 wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), lower_32_bits(start_time)); 1641 wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), upper_32_bits(start_time)); 1642 1643 /* 3. Write AUX_OUT register */ 1644 val = GLTSYN_AUX_OUT_0_OUT_ENA_M | GLTSYN_AUX_OUT_0_OUTMOD_M; 1645 wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), val); 1646 1647 /* 4. write GPIO CTL reg */ 1648 func = 8 + chan + (tmr_idx * 4); 1649 val = GLGEN_GPIO_CTL_PIN_DIR_M | 1650 ((func << GLGEN_GPIO_CTL_PIN_FUNC_S) & GLGEN_GPIO_CTL_PIN_FUNC_M); 1651 wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val); 1652 1653 /* Store the value if requested */ 1654 if (store) { 1655 memcpy(&pf->ptp.perout_channels[chan], config, 1656 sizeof(struct ice_perout_channel)); 1657 pf->ptp.perout_channels[chan].start_time = phase; 1658 } 1659 1660 return 0; 1661 err: 1662 dev_err(ice_pf_to_dev(pf), "PTP failed to cfg per_clk\n"); 1663 return -EFAULT; 1664 } 1665 1666 /** 1667 * ice_ptp_disable_all_clkout - Disable all currently configured outputs 1668 * @pf: pointer to the PF structure 1669 * 1670 * Disable all currently configured clock outputs. This is necessary before 1671 * certain changes to the PTP hardware clock. Use ice_ptp_enable_all_clkout to 1672 * re-enable the clocks again. 1673 */ 1674 static void ice_ptp_disable_all_clkout(struct ice_pf *pf) 1675 { 1676 uint i; 1677 1678 for (i = 0; i < pf->ptp.info.n_per_out; i++) 1679 if (pf->ptp.perout_channels[i].ena) 1680 ice_ptp_cfg_clkout(pf, i, NULL, false); 1681 } 1682 1683 /** 1684 * ice_ptp_enable_all_clkout - Enable all configured periodic clock outputs 1685 * @pf: pointer to the PF structure 1686 * 1687 * Enable all currently configured clock outputs. Use this after 1688 * ice_ptp_disable_all_clkout to reconfigure the output signals according to 1689 * their configuration. 1690 */ 1691 static void ice_ptp_enable_all_clkout(struct ice_pf *pf) 1692 { 1693 uint i; 1694 1695 for (i = 0; i < pf->ptp.info.n_per_out; i++) 1696 if (pf->ptp.perout_channels[i].ena) 1697 ice_ptp_cfg_clkout(pf, i, &pf->ptp.perout_channels[i], 1698 false); 1699 } 1700 1701 /** 1702 * ice_ptp_gpio_enable_e810 - Enable/disable ancillary features of PHC 1703 * @info: the driver's PTP info structure 1704 * @rq: The requested feature to change 1705 * @on: Enable/disable flag 1706 */ 1707 static int 1708 ice_ptp_gpio_enable_e810(struct ptp_clock_info *info, 1709 struct ptp_clock_request *rq, int on) 1710 { 1711 struct ice_pf *pf = ptp_info_to_pf(info); 1712 struct ice_perout_channel clk_cfg = {0}; 1713 bool sma_pres = false; 1714 unsigned int chan; 1715 u32 gpio_pin; 1716 int err; 1717 1718 if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) 1719 sma_pres = true; 1720 1721 switch (rq->type) { 1722 case PTP_CLK_REQ_PEROUT: 1723 chan = rq->perout.index; 1724 if (sma_pres) { 1725 if (chan == ice_pin_desc_e810t[SMA1].chan) 1726 clk_cfg.gpio_pin = GPIO_20; 1727 else if (chan == ice_pin_desc_e810t[SMA2].chan) 1728 clk_cfg.gpio_pin = GPIO_22; 1729 else 1730 return -1; 1731 } else if (ice_is_e810t(&pf->hw)) { 1732 if (chan == 0) 1733 clk_cfg.gpio_pin = GPIO_20; 1734 else 1735 clk_cfg.gpio_pin = GPIO_22; 1736 } else if (chan == PPS_CLK_GEN_CHAN) { 1737 clk_cfg.gpio_pin = PPS_PIN_INDEX; 1738 } else { 1739 clk_cfg.gpio_pin = chan; 1740 } 1741 1742 clk_cfg.period = ((rq->perout.period.sec * NSEC_PER_SEC) + 1743 rq->perout.period.nsec); 1744 clk_cfg.start_time = ((rq->perout.start.sec * NSEC_PER_SEC) + 1745 rq->perout.start.nsec); 1746 clk_cfg.ena = !!on; 1747 1748 err = ice_ptp_cfg_clkout(pf, chan, &clk_cfg, true); 1749 break; 1750 case PTP_CLK_REQ_EXTTS: 1751 chan = rq->extts.index; 1752 if (sma_pres) { 1753 if (chan < ice_pin_desc_e810t[SMA2].chan) 1754 gpio_pin = GPIO_21; 1755 else 1756 gpio_pin = GPIO_23; 1757 } else if (ice_is_e810t(&pf->hw)) { 1758 if (chan == 0) 1759 gpio_pin = GPIO_21; 1760 else 1761 gpio_pin = GPIO_23; 1762 } else { 1763 gpio_pin = chan; 1764 } 1765 1766 err = ice_ptp_cfg_extts(pf, !!on, chan, gpio_pin, 1767 rq->extts.flags); 1768 break; 1769 default: 1770 return -EOPNOTSUPP; 1771 } 1772 1773 return err; 1774 } 1775 1776 /** 1777 * ice_ptp_gpio_enable_e823 - Enable/disable ancillary features of PHC 1778 * @info: the driver's PTP info structure 1779 * @rq: The requested feature to change 1780 * @on: Enable/disable flag 1781 */ 1782 static int ice_ptp_gpio_enable_e823(struct ptp_clock_info *info, 1783 struct ptp_clock_request *rq, int on) 1784 { 1785 struct ice_pf *pf = ptp_info_to_pf(info); 1786 struct ice_perout_channel clk_cfg = {0}; 1787 int err; 1788 1789 switch (rq->type) { 1790 case PTP_CLK_REQ_PPS: 1791 clk_cfg.gpio_pin = PPS_PIN_INDEX; 1792 clk_cfg.period = NSEC_PER_SEC; 1793 clk_cfg.ena = !!on; 1794 1795 err = ice_ptp_cfg_clkout(pf, PPS_CLK_GEN_CHAN, &clk_cfg, true); 1796 break; 1797 case PTP_CLK_REQ_EXTTS: 1798 err = ice_ptp_cfg_extts(pf, !!on, rq->extts.index, 1799 TIME_SYNC_PIN_INDEX, rq->extts.flags); 1800 break; 1801 default: 1802 return -EOPNOTSUPP; 1803 } 1804 1805 return err; 1806 } 1807 1808 /** 1809 * ice_ptp_gettimex64 - Get the time of the clock 1810 * @info: the driver's PTP info structure 1811 * @ts: timespec64 structure to hold the current time value 1812 * @sts: Optional parameter for holding a pair of system timestamps from 1813 * the system clock. Will be ignored if NULL is given. 1814 * 1815 * Read the device clock and return the correct value on ns, after converting it 1816 * into a timespec struct. 1817 */ 1818 static int 1819 ice_ptp_gettimex64(struct ptp_clock_info *info, struct timespec64 *ts, 1820 struct ptp_system_timestamp *sts) 1821 { 1822 struct ice_pf *pf = ptp_info_to_pf(info); 1823 struct ice_hw *hw = &pf->hw; 1824 1825 if (!ice_ptp_lock(hw)) { 1826 dev_err(ice_pf_to_dev(pf), "PTP failed to get time\n"); 1827 return -EBUSY; 1828 } 1829 1830 ice_ptp_read_time(pf, ts, sts); 1831 ice_ptp_unlock(hw); 1832 1833 return 0; 1834 } 1835 1836 /** 1837 * ice_ptp_settime64 - Set the time of the clock 1838 * @info: the driver's PTP info structure 1839 * @ts: timespec64 structure that holds the new time value 1840 * 1841 * Set the device clock to the user input value. The conversion from timespec 1842 * to ns happens in the write function. 1843 */ 1844 static int 1845 ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts) 1846 { 1847 struct ice_pf *pf = ptp_info_to_pf(info); 1848 struct timespec64 ts64 = *ts; 1849 struct ice_hw *hw = &pf->hw; 1850 int err; 1851 1852 /* For Vernier mode, we need to recalibrate after new settime 1853 * Start with disabling timestamp block 1854 */ 1855 if (pf->ptp.port.link_up) 1856 ice_ptp_port_phy_stop(&pf->ptp.port); 1857 1858 if (!ice_ptp_lock(hw)) { 1859 err = -EBUSY; 1860 goto exit; 1861 } 1862 1863 /* Disable periodic outputs */ 1864 ice_ptp_disable_all_clkout(pf); 1865 1866 err = ice_ptp_write_init(pf, &ts64); 1867 ice_ptp_unlock(hw); 1868 1869 if (!err) 1870 ice_ptp_reset_cached_phctime(pf); 1871 1872 /* Reenable periodic outputs */ 1873 ice_ptp_enable_all_clkout(pf); 1874 1875 /* Recalibrate and re-enable timestamp block */ 1876 if (pf->ptp.port.link_up) 1877 ice_ptp_port_phy_restart(&pf->ptp.port); 1878 exit: 1879 if (err) { 1880 dev_err(ice_pf_to_dev(pf), "PTP failed to set time %d\n", err); 1881 return err; 1882 } 1883 1884 return 0; 1885 } 1886 1887 /** 1888 * ice_ptp_adjtime_nonatomic - Do a non-atomic clock adjustment 1889 * @info: the driver's PTP info structure 1890 * @delta: Offset in nanoseconds to adjust the time by 1891 */ 1892 static int ice_ptp_adjtime_nonatomic(struct ptp_clock_info *info, s64 delta) 1893 { 1894 struct timespec64 now, then; 1895 int ret; 1896 1897 then = ns_to_timespec64(delta); 1898 ret = ice_ptp_gettimex64(info, &now, NULL); 1899 if (ret) 1900 return ret; 1901 now = timespec64_add(now, then); 1902 1903 return ice_ptp_settime64(info, (const struct timespec64 *)&now); 1904 } 1905 1906 /** 1907 * ice_ptp_adjtime - Adjust the time of the clock by the indicated delta 1908 * @info: the driver's PTP info structure 1909 * @delta: Offset in nanoseconds to adjust the time by 1910 */ 1911 static int ice_ptp_adjtime(struct ptp_clock_info *info, s64 delta) 1912 { 1913 struct ice_pf *pf = ptp_info_to_pf(info); 1914 struct ice_hw *hw = &pf->hw; 1915 struct device *dev; 1916 int err; 1917 1918 dev = ice_pf_to_dev(pf); 1919 1920 /* Hardware only supports atomic adjustments using signed 32-bit 1921 * integers. For any adjustment outside this range, perform 1922 * a non-atomic get->adjust->set flow. 1923 */ 1924 if (delta > S32_MAX || delta < S32_MIN) { 1925 dev_dbg(dev, "delta = %lld, adjtime non-atomic\n", delta); 1926 return ice_ptp_adjtime_nonatomic(info, delta); 1927 } 1928 1929 if (!ice_ptp_lock(hw)) { 1930 dev_err(dev, "PTP failed to acquire semaphore in adjtime\n"); 1931 return -EBUSY; 1932 } 1933 1934 /* Disable periodic outputs */ 1935 ice_ptp_disable_all_clkout(pf); 1936 1937 err = ice_ptp_write_adj(pf, delta); 1938 1939 /* Reenable periodic outputs */ 1940 ice_ptp_enable_all_clkout(pf); 1941 1942 ice_ptp_unlock(hw); 1943 1944 if (err) { 1945 dev_err(dev, "PTP failed to adjust time, err %d\n", err); 1946 return err; 1947 } 1948 1949 ice_ptp_reset_cached_phctime(pf); 1950 1951 return 0; 1952 } 1953 1954 #ifdef CONFIG_ICE_HWTS 1955 /** 1956 * ice_ptp_get_syncdevicetime - Get the cross time stamp info 1957 * @device: Current device time 1958 * @system: System counter value read synchronously with device time 1959 * @ctx: Context provided by timekeeping code 1960 * 1961 * Read device and system (ART) clock simultaneously and return the corrected 1962 * clock values in ns. 1963 */ 1964 static int 1965 ice_ptp_get_syncdevicetime(ktime_t *device, 1966 struct system_counterval_t *system, 1967 void *ctx) 1968 { 1969 struct ice_pf *pf = (struct ice_pf *)ctx; 1970 struct ice_hw *hw = &pf->hw; 1971 u32 hh_lock, hh_art_ctl; 1972 int i; 1973 1974 /* Get the HW lock */ 1975 hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id)); 1976 if (hh_lock & PFHH_SEM_BUSY_M) { 1977 dev_err(ice_pf_to_dev(pf), "PTP failed to get hh lock\n"); 1978 return -EFAULT; 1979 } 1980 1981 /* Start the ART and device clock sync sequence */ 1982 hh_art_ctl = rd32(hw, GLHH_ART_CTL); 1983 hh_art_ctl = hh_art_ctl | GLHH_ART_CTL_ACTIVE_M; 1984 wr32(hw, GLHH_ART_CTL, hh_art_ctl); 1985 1986 #define MAX_HH_LOCK_TRIES 100 1987 1988 for (i = 0; i < MAX_HH_LOCK_TRIES; i++) { 1989 /* Wait for sync to complete */ 1990 hh_art_ctl = rd32(hw, GLHH_ART_CTL); 1991 if (hh_art_ctl & GLHH_ART_CTL_ACTIVE_M) { 1992 udelay(1); 1993 continue; 1994 } else { 1995 u32 hh_ts_lo, hh_ts_hi, tmr_idx; 1996 u64 hh_ts; 1997 1998 tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc; 1999 /* Read ART time */ 2000 hh_ts_lo = rd32(hw, GLHH_ART_TIME_L); 2001 hh_ts_hi = rd32(hw, GLHH_ART_TIME_H); 2002 hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo; 2003 *system = convert_art_ns_to_tsc(hh_ts); 2004 /* Read Device source clock time */ 2005 hh_ts_lo = rd32(hw, GLTSYN_HHTIME_L(tmr_idx)); 2006 hh_ts_hi = rd32(hw, GLTSYN_HHTIME_H(tmr_idx)); 2007 hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo; 2008 *device = ns_to_ktime(hh_ts); 2009 break; 2010 } 2011 } 2012 /* Release HW lock */ 2013 hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id)); 2014 hh_lock = hh_lock & ~PFHH_SEM_BUSY_M; 2015 wr32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id), hh_lock); 2016 2017 if (i == MAX_HH_LOCK_TRIES) 2018 return -ETIMEDOUT; 2019 2020 return 0; 2021 } 2022 2023 /** 2024 * ice_ptp_getcrosststamp_e822 - Capture a device cross timestamp 2025 * @info: the driver's PTP info structure 2026 * @cts: The memory to fill the cross timestamp info 2027 * 2028 * Capture a cross timestamp between the ART and the device PTP hardware 2029 * clock. Fill the cross timestamp information and report it back to the 2030 * caller. 2031 * 2032 * This is only valid for E822 devices which have support for generating the 2033 * cross timestamp via PCIe PTM. 2034 * 2035 * In order to correctly correlate the ART timestamp back to the TSC time, the 2036 * CPU must have X86_FEATURE_TSC_KNOWN_FREQ. 2037 */ 2038 static int 2039 ice_ptp_getcrosststamp_e822(struct ptp_clock_info *info, 2040 struct system_device_crosststamp *cts) 2041 { 2042 struct ice_pf *pf = ptp_info_to_pf(info); 2043 2044 return get_device_system_crosststamp(ice_ptp_get_syncdevicetime, 2045 pf, NULL, cts); 2046 } 2047 #endif /* CONFIG_ICE_HWTS */ 2048 2049 /** 2050 * ice_ptp_get_ts_config - ioctl interface to read the timestamping config 2051 * @pf: Board private structure 2052 * @ifr: ioctl data 2053 * 2054 * Copy the timestamping config to user buffer 2055 */ 2056 int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr) 2057 { 2058 struct hwtstamp_config *config; 2059 2060 if (!test_bit(ICE_FLAG_PTP, pf->flags)) 2061 return -EIO; 2062 2063 config = &pf->ptp.tstamp_config; 2064 2065 return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? 2066 -EFAULT : 0; 2067 } 2068 2069 /** 2070 * ice_ptp_set_timestamp_mode - Setup driver for requested timestamp mode 2071 * @pf: Board private structure 2072 * @config: hwtstamp settings requested or saved 2073 */ 2074 static int 2075 ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config) 2076 { 2077 switch (config->tx_type) { 2078 case HWTSTAMP_TX_OFF: 2079 ice_set_tx_tstamp(pf, false); 2080 break; 2081 case HWTSTAMP_TX_ON: 2082 ice_set_tx_tstamp(pf, true); 2083 break; 2084 default: 2085 return -ERANGE; 2086 } 2087 2088 switch (config->rx_filter) { 2089 case HWTSTAMP_FILTER_NONE: 2090 ice_set_rx_tstamp(pf, false); 2091 break; 2092 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 2093 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 2094 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 2095 case HWTSTAMP_FILTER_PTP_V2_EVENT: 2096 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 2097 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 2098 case HWTSTAMP_FILTER_PTP_V2_SYNC: 2099 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 2100 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 2101 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 2102 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 2103 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 2104 case HWTSTAMP_FILTER_NTP_ALL: 2105 case HWTSTAMP_FILTER_ALL: 2106 ice_set_rx_tstamp(pf, true); 2107 break; 2108 default: 2109 return -ERANGE; 2110 } 2111 2112 return 0; 2113 } 2114 2115 /** 2116 * ice_ptp_set_ts_config - ioctl interface to control the timestamping 2117 * @pf: Board private structure 2118 * @ifr: ioctl data 2119 * 2120 * Get the user config and store it 2121 */ 2122 int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr) 2123 { 2124 struct hwtstamp_config config; 2125 int err; 2126 2127 if (!test_bit(ICE_FLAG_PTP, pf->flags)) 2128 return -EAGAIN; 2129 2130 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 2131 return -EFAULT; 2132 2133 err = ice_ptp_set_timestamp_mode(pf, &config); 2134 if (err) 2135 return err; 2136 2137 /* Return the actual configuration set */ 2138 config = pf->ptp.tstamp_config; 2139 2140 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? 2141 -EFAULT : 0; 2142 } 2143 2144 /** 2145 * ice_ptp_rx_hwtstamp - Check for an Rx timestamp 2146 * @rx_ring: Ring to get the VSI info 2147 * @rx_desc: Receive descriptor 2148 * @skb: Particular skb to send timestamp with 2149 * 2150 * The driver receives a notification in the receive descriptor with timestamp. 2151 * The timestamp is in ns, so we must convert the result first. 2152 */ 2153 void 2154 ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring, 2155 union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb) 2156 { 2157 struct skb_shared_hwtstamps *hwtstamps; 2158 u64 ts_ns, cached_time; 2159 u32 ts_high; 2160 2161 if (!(rx_desc->wb.time_stamp_low & ICE_PTP_TS_VALID)) 2162 return; 2163 2164 cached_time = READ_ONCE(rx_ring->cached_phctime); 2165 2166 /* Do not report a timestamp if we don't have a cached PHC time */ 2167 if (!cached_time) 2168 return; 2169 2170 /* Use ice_ptp_extend_32b_ts directly, using the ring-specific cached 2171 * PHC value, rather than accessing the PF. This also allows us to 2172 * simply pass the upper 32bits of nanoseconds directly. Calling 2173 * ice_ptp_extend_40b_ts is unnecessary as it would just discard these 2174 * bits itself. 2175 */ 2176 ts_high = le32_to_cpu(rx_desc->wb.flex_ts.ts_high); 2177 ts_ns = ice_ptp_extend_32b_ts(cached_time, ts_high); 2178 2179 hwtstamps = skb_hwtstamps(skb); 2180 memset(hwtstamps, 0, sizeof(*hwtstamps)); 2181 hwtstamps->hwtstamp = ns_to_ktime(ts_ns); 2182 } 2183 2184 /** 2185 * ice_ptp_disable_sma_pins_e810t - Disable E810-T SMA pins 2186 * @pf: pointer to the PF structure 2187 * @info: PTP clock info structure 2188 * 2189 * Disable the OS access to the SMA pins. Called to clear out the OS 2190 * indications of pin support when we fail to setup the E810-T SMA control 2191 * register. 2192 */ 2193 static void 2194 ice_ptp_disable_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info) 2195 { 2196 struct device *dev = ice_pf_to_dev(pf); 2197 2198 dev_warn(dev, "Failed to configure E810-T SMA pin control\n"); 2199 2200 info->enable = NULL; 2201 info->verify = NULL; 2202 info->n_pins = 0; 2203 info->n_ext_ts = 0; 2204 info->n_per_out = 0; 2205 } 2206 2207 /** 2208 * ice_ptp_setup_sma_pins_e810t - Setup the SMA pins 2209 * @pf: pointer to the PF structure 2210 * @info: PTP clock info structure 2211 * 2212 * Finish setting up the SMA pins by allocating pin_config, and setting it up 2213 * according to the current status of the SMA. On failure, disable all of the 2214 * extended SMA pin support. 2215 */ 2216 static void 2217 ice_ptp_setup_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info) 2218 { 2219 struct device *dev = ice_pf_to_dev(pf); 2220 int err; 2221 2222 /* Allocate memory for kernel pins interface */ 2223 info->pin_config = devm_kcalloc(dev, info->n_pins, 2224 sizeof(*info->pin_config), GFP_KERNEL); 2225 if (!info->pin_config) { 2226 ice_ptp_disable_sma_pins_e810t(pf, info); 2227 return; 2228 } 2229 2230 /* Read current SMA status */ 2231 err = ice_get_sma_config_e810t(&pf->hw, info->pin_config); 2232 if (err) 2233 ice_ptp_disable_sma_pins_e810t(pf, info); 2234 } 2235 2236 /** 2237 * ice_ptp_setup_pins_e810 - Setup PTP pins in sysfs 2238 * @pf: pointer to the PF instance 2239 * @info: PTP clock capabilities 2240 */ 2241 static void 2242 ice_ptp_setup_pins_e810(struct ice_pf *pf, struct ptp_clock_info *info) 2243 { 2244 info->n_per_out = N_PER_OUT_E810; 2245 2246 if (ice_is_feature_supported(pf, ICE_F_PTP_EXTTS)) 2247 info->n_ext_ts = N_EXT_TS_E810; 2248 2249 if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) { 2250 info->n_ext_ts = N_EXT_TS_E810; 2251 info->n_pins = NUM_PTP_PINS_E810T; 2252 info->verify = ice_verify_pin_e810t; 2253 2254 /* Complete setup of the SMA pins */ 2255 ice_ptp_setup_sma_pins_e810t(pf, info); 2256 } 2257 } 2258 2259 /** 2260 * ice_ptp_setup_pins_e823 - Setup PTP pins in sysfs 2261 * @pf: pointer to the PF instance 2262 * @info: PTP clock capabilities 2263 */ 2264 static void 2265 ice_ptp_setup_pins_e823(struct ice_pf *pf, struct ptp_clock_info *info) 2266 { 2267 info->pps = 1; 2268 info->n_per_out = 0; 2269 info->n_ext_ts = 1; 2270 } 2271 2272 /** 2273 * ice_ptp_set_funcs_e822 - Set specialized functions for E822 support 2274 * @pf: Board private structure 2275 * @info: PTP info to fill 2276 * 2277 * Assign functions to the PTP capabiltiies structure for E822 devices. 2278 * Functions which operate across all device families should be set directly 2279 * in ice_ptp_set_caps. Only add functions here which are distinct for E822 2280 * devices. 2281 */ 2282 static void 2283 ice_ptp_set_funcs_e822(struct ice_pf *pf, struct ptp_clock_info *info) 2284 { 2285 #ifdef CONFIG_ICE_HWTS 2286 if (boot_cpu_has(X86_FEATURE_ART) && 2287 boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) 2288 info->getcrosststamp = ice_ptp_getcrosststamp_e822; 2289 #endif /* CONFIG_ICE_HWTS */ 2290 } 2291 2292 /** 2293 * ice_ptp_set_funcs_e810 - Set specialized functions for E810 support 2294 * @pf: Board private structure 2295 * @info: PTP info to fill 2296 * 2297 * Assign functions to the PTP capabiltiies structure for E810 devices. 2298 * Functions which operate across all device families should be set directly 2299 * in ice_ptp_set_caps. Only add functions here which are distinct for e810 2300 * devices. 2301 */ 2302 static void 2303 ice_ptp_set_funcs_e810(struct ice_pf *pf, struct ptp_clock_info *info) 2304 { 2305 info->enable = ice_ptp_gpio_enable_e810; 2306 ice_ptp_setup_pins_e810(pf, info); 2307 } 2308 2309 /** 2310 * ice_ptp_set_funcs_e823 - Set specialized functions for E823 support 2311 * @pf: Board private structure 2312 * @info: PTP info to fill 2313 * 2314 * Assign functions to the PTP capabiltiies structure for E823 devices. 2315 * Functions which operate across all device families should be set directly 2316 * in ice_ptp_set_caps. Only add functions here which are distinct for e823 2317 * devices. 2318 */ 2319 static void 2320 ice_ptp_set_funcs_e823(struct ice_pf *pf, struct ptp_clock_info *info) 2321 { 2322 info->enable = ice_ptp_gpio_enable_e823; 2323 ice_ptp_setup_pins_e823(pf, info); 2324 } 2325 2326 /** 2327 * ice_ptp_set_caps - Set PTP capabilities 2328 * @pf: Board private structure 2329 */ 2330 static void ice_ptp_set_caps(struct ice_pf *pf) 2331 { 2332 struct ptp_clock_info *info = &pf->ptp.info; 2333 struct device *dev = ice_pf_to_dev(pf); 2334 2335 snprintf(info->name, sizeof(info->name) - 1, "%s-%s-clk", 2336 dev_driver_string(dev), dev_name(dev)); 2337 info->owner = THIS_MODULE; 2338 info->max_adj = 100000000; 2339 info->adjtime = ice_ptp_adjtime; 2340 info->adjfine = ice_ptp_adjfine; 2341 info->gettimex64 = ice_ptp_gettimex64; 2342 info->settime64 = ice_ptp_settime64; 2343 2344 if (ice_is_e810(&pf->hw)) 2345 ice_ptp_set_funcs_e810(pf, info); 2346 else if (ice_is_e823(&pf->hw)) 2347 ice_ptp_set_funcs_e823(pf, info); 2348 else 2349 ice_ptp_set_funcs_e822(pf, info); 2350 } 2351 2352 /** 2353 * ice_ptp_create_clock - Create PTP clock device for userspace 2354 * @pf: Board private structure 2355 * 2356 * This function creates a new PTP clock device. It only creates one if we 2357 * don't already have one. Will return error if it can't create one, but success 2358 * if we already have a device. Should be used by ice_ptp_init to create clock 2359 * initially, and prevent global resets from creating new clock devices. 2360 */ 2361 static long ice_ptp_create_clock(struct ice_pf *pf) 2362 { 2363 struct ptp_clock_info *info; 2364 struct ptp_clock *clock; 2365 struct device *dev; 2366 2367 /* No need to create a clock device if we already have one */ 2368 if (pf->ptp.clock) 2369 return 0; 2370 2371 ice_ptp_set_caps(pf); 2372 2373 info = &pf->ptp.info; 2374 dev = ice_pf_to_dev(pf); 2375 2376 /* Attempt to register the clock before enabling the hardware. */ 2377 clock = ptp_clock_register(info, dev); 2378 if (IS_ERR(clock)) 2379 return PTR_ERR(clock); 2380 2381 pf->ptp.clock = clock; 2382 2383 return 0; 2384 } 2385 2386 /** 2387 * ice_ptp_request_ts - Request an available Tx timestamp index 2388 * @tx: the PTP Tx timestamp tracker to request from 2389 * @skb: the SKB to associate with this timestamp request 2390 */ 2391 s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb) 2392 { 2393 u8 idx; 2394 2395 spin_lock(&tx->lock); 2396 2397 /* Check that this tracker is accepting new timestamp requests */ 2398 if (!ice_ptp_is_tx_tracker_up(tx)) { 2399 spin_unlock(&tx->lock); 2400 return -1; 2401 } 2402 2403 /* Find and set the first available index */ 2404 idx = find_first_zero_bit(tx->in_use, tx->len); 2405 if (idx < tx->len) { 2406 /* We got a valid index that no other thread could have set. Store 2407 * a reference to the skb and the start time to allow discarding old 2408 * requests. 2409 */ 2410 set_bit(idx, tx->in_use); 2411 clear_bit(idx, tx->stale); 2412 tx->tstamps[idx].start = jiffies; 2413 tx->tstamps[idx].skb = skb_get(skb); 2414 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 2415 ice_trace(tx_tstamp_request, skb, idx); 2416 } 2417 2418 spin_unlock(&tx->lock); 2419 2420 /* return the appropriate PHY timestamp register index, -1 if no 2421 * indexes were available. 2422 */ 2423 if (idx >= tx->len) 2424 return -1; 2425 else 2426 return idx + tx->offset; 2427 } 2428 2429 /** 2430 * ice_ptp_process_ts - Process the PTP Tx timestamps 2431 * @pf: Board private structure 2432 * 2433 * Returns true if timestamps are processed. 2434 */ 2435 bool ice_ptp_process_ts(struct ice_pf *pf) 2436 { 2437 return ice_ptp_tx_tstamp(&pf->ptp.port.tx); 2438 } 2439 2440 static void ice_ptp_periodic_work(struct kthread_work *work) 2441 { 2442 struct ice_ptp *ptp = container_of(work, struct ice_ptp, work.work); 2443 struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp); 2444 int err; 2445 2446 if (!test_bit(ICE_FLAG_PTP, pf->flags)) 2447 return; 2448 2449 err = ice_ptp_update_cached_phctime(pf); 2450 2451 /* Run twice a second or reschedule if phc update failed */ 2452 kthread_queue_delayed_work(ptp->kworker, &ptp->work, 2453 msecs_to_jiffies(err ? 10 : 500)); 2454 } 2455 2456 /** 2457 * ice_ptp_reset - Initialize PTP hardware clock support after reset 2458 * @pf: Board private structure 2459 */ 2460 void ice_ptp_reset(struct ice_pf *pf) 2461 { 2462 struct ice_ptp *ptp = &pf->ptp; 2463 struct ice_hw *hw = &pf->hw; 2464 struct timespec64 ts; 2465 int err, itr = 1; 2466 u64 time_diff; 2467 2468 if (test_bit(ICE_PFR_REQ, pf->state)) 2469 goto pfr; 2470 2471 if (!hw->func_caps.ts_func_info.src_tmr_owned) 2472 goto reset_ts; 2473 2474 err = ice_ptp_init_phc(hw); 2475 if (err) 2476 goto err; 2477 2478 /* Acquire the global hardware lock */ 2479 if (!ice_ptp_lock(hw)) { 2480 err = -EBUSY; 2481 goto err; 2482 } 2483 2484 /* Write the increment time value to PHY and LAN */ 2485 err = ice_ptp_write_incval(hw, ice_base_incval(pf)); 2486 if (err) { 2487 ice_ptp_unlock(hw); 2488 goto err; 2489 } 2490 2491 /* Write the initial Time value to PHY and LAN using the cached PHC 2492 * time before the reset and time difference between stopping and 2493 * starting the clock. 2494 */ 2495 if (ptp->cached_phc_time) { 2496 time_diff = ktime_get_real_ns() - ptp->reset_time; 2497 ts = ns_to_timespec64(ptp->cached_phc_time + time_diff); 2498 } else { 2499 ts = ktime_to_timespec64(ktime_get_real()); 2500 } 2501 err = ice_ptp_write_init(pf, &ts); 2502 if (err) { 2503 ice_ptp_unlock(hw); 2504 goto err; 2505 } 2506 2507 /* Release the global hardware lock */ 2508 ice_ptp_unlock(hw); 2509 2510 if (!ice_is_e810(hw)) { 2511 /* Enable quad interrupts */ 2512 err = ice_ptp_tx_ena_intr(pf, true, itr); 2513 if (err) 2514 goto err; 2515 } 2516 2517 reset_ts: 2518 /* Restart the PHY timestamping block */ 2519 ice_ptp_reset_phy_timestamping(pf); 2520 2521 pfr: 2522 /* Init Tx structures */ 2523 if (ice_is_e810(&pf->hw)) { 2524 err = ice_ptp_init_tx_e810(pf, &ptp->port.tx); 2525 } else { 2526 kthread_init_delayed_work(&ptp->port.ov_work, 2527 ice_ptp_wait_for_offsets); 2528 err = ice_ptp_init_tx_e822(pf, &ptp->port.tx, 2529 ptp->port.port_num); 2530 } 2531 if (err) 2532 goto err; 2533 2534 set_bit(ICE_FLAG_PTP, pf->flags); 2535 2536 /* Start periodic work going */ 2537 kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0); 2538 2539 dev_info(ice_pf_to_dev(pf), "PTP reset successful\n"); 2540 return; 2541 2542 err: 2543 dev_err(ice_pf_to_dev(pf), "PTP reset failed %d\n", err); 2544 } 2545 2546 /** 2547 * ice_ptp_prepare_for_reset - Prepare PTP for reset 2548 * @pf: Board private structure 2549 */ 2550 void ice_ptp_prepare_for_reset(struct ice_pf *pf) 2551 { 2552 struct ice_ptp *ptp = &pf->ptp; 2553 u8 src_tmr; 2554 2555 clear_bit(ICE_FLAG_PTP, pf->flags); 2556 2557 /* Disable timestamping for both Tx and Rx */ 2558 ice_ptp_cfg_timestamp(pf, false); 2559 2560 kthread_cancel_delayed_work_sync(&ptp->work); 2561 kthread_cancel_work_sync(&ptp->extts_work); 2562 2563 if (test_bit(ICE_PFR_REQ, pf->state)) 2564 return; 2565 2566 ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx); 2567 2568 /* Disable periodic outputs */ 2569 ice_ptp_disable_all_clkout(pf); 2570 2571 src_tmr = ice_get_ptp_src_clock_index(&pf->hw); 2572 2573 /* Disable source clock */ 2574 wr32(&pf->hw, GLTSYN_ENA(src_tmr), (u32)~GLTSYN_ENA_TSYN_ENA_M); 2575 2576 /* Acquire PHC and system timer to restore after reset */ 2577 ptp->reset_time = ktime_get_real_ns(); 2578 } 2579 2580 /** 2581 * ice_ptp_init_owner - Initialize PTP_1588_CLOCK device 2582 * @pf: Board private structure 2583 * 2584 * Setup and initialize a PTP clock device that represents the device hardware 2585 * clock. Save the clock index for other functions connected to the same 2586 * hardware resource. 2587 */ 2588 static int ice_ptp_init_owner(struct ice_pf *pf) 2589 { 2590 struct ice_hw *hw = &pf->hw; 2591 struct timespec64 ts; 2592 int err, itr = 1; 2593 2594 err = ice_ptp_init_phc(hw); 2595 if (err) { 2596 dev_err(ice_pf_to_dev(pf), "Failed to initialize PHC, err %d\n", 2597 err); 2598 return err; 2599 } 2600 2601 /* Acquire the global hardware lock */ 2602 if (!ice_ptp_lock(hw)) { 2603 err = -EBUSY; 2604 goto err_exit; 2605 } 2606 2607 /* Write the increment time value to PHY and LAN */ 2608 err = ice_ptp_write_incval(hw, ice_base_incval(pf)); 2609 if (err) { 2610 ice_ptp_unlock(hw); 2611 goto err_exit; 2612 } 2613 2614 ts = ktime_to_timespec64(ktime_get_real()); 2615 /* Write the initial Time value to PHY and LAN */ 2616 err = ice_ptp_write_init(pf, &ts); 2617 if (err) { 2618 ice_ptp_unlock(hw); 2619 goto err_exit; 2620 } 2621 2622 /* Release the global hardware lock */ 2623 ice_ptp_unlock(hw); 2624 2625 if (!ice_is_e810(hw)) { 2626 /* Enable quad interrupts */ 2627 err = ice_ptp_tx_ena_intr(pf, true, itr); 2628 if (err) 2629 goto err_exit; 2630 } 2631 2632 /* Ensure we have a clock device */ 2633 err = ice_ptp_create_clock(pf); 2634 if (err) 2635 goto err_clk; 2636 2637 /* Store the PTP clock index for other PFs */ 2638 ice_set_ptp_clock_index(pf); 2639 2640 return 0; 2641 2642 err_clk: 2643 pf->ptp.clock = NULL; 2644 err_exit: 2645 return err; 2646 } 2647 2648 /** 2649 * ice_ptp_init_work - Initialize PTP work threads 2650 * @pf: Board private structure 2651 * @ptp: PF PTP structure 2652 */ 2653 static int ice_ptp_init_work(struct ice_pf *pf, struct ice_ptp *ptp) 2654 { 2655 struct kthread_worker *kworker; 2656 2657 /* Initialize work functions */ 2658 kthread_init_delayed_work(&ptp->work, ice_ptp_periodic_work); 2659 kthread_init_work(&ptp->extts_work, ice_ptp_extts_work); 2660 2661 /* Allocate a kworker for handling work required for the ports 2662 * connected to the PTP hardware clock. 2663 */ 2664 kworker = kthread_create_worker(0, "ice-ptp-%s", 2665 dev_name(ice_pf_to_dev(pf))); 2666 if (IS_ERR(kworker)) 2667 return PTR_ERR(kworker); 2668 2669 ptp->kworker = kworker; 2670 2671 /* Start periodic work going */ 2672 kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0); 2673 2674 return 0; 2675 } 2676 2677 /** 2678 * ice_ptp_init_port - Initialize PTP port structure 2679 * @pf: Board private structure 2680 * @ptp_port: PTP port structure 2681 */ 2682 static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port) 2683 { 2684 mutex_init(&ptp_port->ps_lock); 2685 2686 if (ice_is_e810(&pf->hw)) 2687 return ice_ptp_init_tx_e810(pf, &ptp_port->tx); 2688 2689 kthread_init_delayed_work(&ptp_port->ov_work, 2690 ice_ptp_wait_for_offsets); 2691 return ice_ptp_init_tx_e822(pf, &ptp_port->tx, ptp_port->port_num); 2692 } 2693 2694 /** 2695 * ice_ptp_init - Initialize PTP hardware clock support 2696 * @pf: Board private structure 2697 * 2698 * Set up the device for interacting with the PTP hardware clock for all 2699 * functions, both the function that owns the clock hardware, and the 2700 * functions connected to the clock hardware. 2701 * 2702 * The clock owner will allocate and register a ptp_clock with the 2703 * PTP_1588_CLOCK infrastructure. All functions allocate a kthread and work 2704 * items used for asynchronous work such as Tx timestamps and periodic work. 2705 */ 2706 void ice_ptp_init(struct ice_pf *pf) 2707 { 2708 struct ice_ptp *ptp = &pf->ptp; 2709 struct ice_hw *hw = &pf->hw; 2710 int err; 2711 2712 /* If this function owns the clock hardware, it must allocate and 2713 * configure the PTP clock device to represent it. 2714 */ 2715 if (hw->func_caps.ts_func_info.src_tmr_owned) { 2716 err = ice_ptp_init_owner(pf); 2717 if (err) 2718 goto err; 2719 } 2720 2721 ptp->port.port_num = hw->pf_id; 2722 err = ice_ptp_init_port(pf, &ptp->port); 2723 if (err) 2724 goto err; 2725 2726 /* Start the PHY timestamping block */ 2727 ice_ptp_reset_phy_timestamping(pf); 2728 2729 set_bit(ICE_FLAG_PTP, pf->flags); 2730 err = ice_ptp_init_work(pf, ptp); 2731 if (err) 2732 goto err; 2733 2734 dev_info(ice_pf_to_dev(pf), "PTP init successful\n"); 2735 return; 2736 2737 err: 2738 /* If we registered a PTP clock, release it */ 2739 if (pf->ptp.clock) { 2740 ptp_clock_unregister(ptp->clock); 2741 pf->ptp.clock = NULL; 2742 } 2743 clear_bit(ICE_FLAG_PTP, pf->flags); 2744 dev_err(ice_pf_to_dev(pf), "PTP failed %d\n", err); 2745 } 2746 2747 /** 2748 * ice_ptp_release - Disable the driver/HW support and unregister the clock 2749 * @pf: Board private structure 2750 * 2751 * This function handles the cleanup work required from the initialization by 2752 * clearing out the important information and unregistering the clock 2753 */ 2754 void ice_ptp_release(struct ice_pf *pf) 2755 { 2756 if (!test_bit(ICE_FLAG_PTP, pf->flags)) 2757 return; 2758 2759 /* Disable timestamping for both Tx and Rx */ 2760 ice_ptp_cfg_timestamp(pf, false); 2761 2762 ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx); 2763 2764 clear_bit(ICE_FLAG_PTP, pf->flags); 2765 2766 kthread_cancel_delayed_work_sync(&pf->ptp.work); 2767 2768 ice_ptp_port_phy_stop(&pf->ptp.port); 2769 mutex_destroy(&pf->ptp.port.ps_lock); 2770 if (pf->ptp.kworker) { 2771 kthread_destroy_worker(pf->ptp.kworker); 2772 pf->ptp.kworker = NULL; 2773 } 2774 2775 if (!pf->ptp.clock) 2776 return; 2777 2778 /* Disable periodic outputs */ 2779 ice_ptp_disable_all_clkout(pf); 2780 2781 ice_clear_ptp_clock_index(pf); 2782 ptp_clock_unregister(pf->ptp.clock); 2783 pf->ptp.clock = NULL; 2784 2785 dev_info(ice_pf_to_dev(pf), "Removed PTP clock\n"); 2786 } 2787