1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) 2021, Intel Corporation. */ 3 4 #include "ice.h" 5 #include "ice_lib.h" 6 #include "ice_trace.h" 7 8 #define E810_OUT_PROP_DELAY_NS 1 9 10 #define UNKNOWN_INCVAL_E822 0x100000000ULL 11 12 static const struct ptp_pin_desc ice_pin_desc_e810t[] = { 13 /* name idx func chan */ 14 { "GNSS", GNSS, PTP_PF_EXTTS, 0, { 0, } }, 15 { "SMA1", SMA1, PTP_PF_NONE, 1, { 0, } }, 16 { "U.FL1", UFL1, PTP_PF_NONE, 1, { 0, } }, 17 { "SMA2", SMA2, PTP_PF_NONE, 2, { 0, } }, 18 { "U.FL2", UFL2, PTP_PF_NONE, 2, { 0, } }, 19 }; 20 21 /** 22 * ice_get_sma_config_e810t 23 * @hw: pointer to the hw struct 24 * @ptp_pins: pointer to the ptp_pin_desc struture 25 * 26 * Read the configuration of the SMA control logic and put it into the 27 * ptp_pin_desc structure 28 */ 29 static int 30 ice_get_sma_config_e810t(struct ice_hw *hw, struct ptp_pin_desc *ptp_pins) 31 { 32 u8 data, i; 33 int status; 34 35 /* Read initial pin state */ 36 status = ice_read_sma_ctrl_e810t(hw, &data); 37 if (status) 38 return status; 39 40 /* initialize with defaults */ 41 for (i = 0; i < NUM_PTP_PINS_E810T; i++) { 42 snprintf(ptp_pins[i].name, sizeof(ptp_pins[i].name), 43 "%s", ice_pin_desc_e810t[i].name); 44 ptp_pins[i].index = ice_pin_desc_e810t[i].index; 45 ptp_pins[i].func = ice_pin_desc_e810t[i].func; 46 ptp_pins[i].chan = ice_pin_desc_e810t[i].chan; 47 } 48 49 /* Parse SMA1/UFL1 */ 50 switch (data & ICE_SMA1_MASK_E810T) { 51 case ICE_SMA1_MASK_E810T: 52 default: 53 ptp_pins[SMA1].func = PTP_PF_NONE; 54 ptp_pins[UFL1].func = PTP_PF_NONE; 55 break; 56 case ICE_SMA1_DIR_EN_E810T: 57 ptp_pins[SMA1].func = PTP_PF_PEROUT; 58 ptp_pins[UFL1].func = PTP_PF_NONE; 59 break; 60 case ICE_SMA1_TX_EN_E810T: 61 ptp_pins[SMA1].func = PTP_PF_EXTTS; 62 ptp_pins[UFL1].func = PTP_PF_NONE; 63 break; 64 case 0: 65 ptp_pins[SMA1].func = PTP_PF_EXTTS; 66 ptp_pins[UFL1].func = PTP_PF_PEROUT; 67 break; 68 } 69 70 /* Parse SMA2/UFL2 */ 71 switch (data & ICE_SMA2_MASK_E810T) { 72 case ICE_SMA2_MASK_E810T: 73 default: 74 ptp_pins[SMA2].func = PTP_PF_NONE; 75 ptp_pins[UFL2].func = PTP_PF_NONE; 76 break; 77 case (ICE_SMA2_TX_EN_E810T | ICE_SMA2_UFL2_RX_DIS_E810T): 78 ptp_pins[SMA2].func = PTP_PF_EXTTS; 79 ptp_pins[UFL2].func = PTP_PF_NONE; 80 break; 81 case (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_UFL2_RX_DIS_E810T): 82 ptp_pins[SMA2].func = PTP_PF_PEROUT; 83 ptp_pins[UFL2].func = PTP_PF_NONE; 84 break; 85 case (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_TX_EN_E810T): 86 ptp_pins[SMA2].func = PTP_PF_NONE; 87 ptp_pins[UFL2].func = PTP_PF_EXTTS; 88 break; 89 case ICE_SMA2_DIR_EN_E810T: 90 ptp_pins[SMA2].func = PTP_PF_PEROUT; 91 ptp_pins[UFL2].func = PTP_PF_EXTTS; 92 break; 93 } 94 95 return 0; 96 } 97 98 /** 99 * ice_ptp_set_sma_config_e810t 100 * @hw: pointer to the hw struct 101 * @ptp_pins: pointer to the ptp_pin_desc struture 102 * 103 * Set the configuration of the SMA control logic based on the configuration in 104 * num_pins parameter 105 */ 106 static int 107 ice_ptp_set_sma_config_e810t(struct ice_hw *hw, 108 const struct ptp_pin_desc *ptp_pins) 109 { 110 int status; 111 u8 data; 112 113 /* SMA1 and UFL1 cannot be set to TX at the same time */ 114 if (ptp_pins[SMA1].func == PTP_PF_PEROUT && 115 ptp_pins[UFL1].func == PTP_PF_PEROUT) 116 return -EINVAL; 117 118 /* SMA2 and UFL2 cannot be set to RX at the same time */ 119 if (ptp_pins[SMA2].func == PTP_PF_EXTTS && 120 ptp_pins[UFL2].func == PTP_PF_EXTTS) 121 return -EINVAL; 122 123 /* Read initial pin state value */ 124 status = ice_read_sma_ctrl_e810t(hw, &data); 125 if (status) 126 return status; 127 128 /* Set the right sate based on the desired configuration */ 129 data &= ~ICE_SMA1_MASK_E810T; 130 if (ptp_pins[SMA1].func == PTP_PF_NONE && 131 ptp_pins[UFL1].func == PTP_PF_NONE) { 132 dev_info(ice_hw_to_dev(hw), "SMA1 + U.FL1 disabled"); 133 data |= ICE_SMA1_MASK_E810T; 134 } else if (ptp_pins[SMA1].func == PTP_PF_EXTTS && 135 ptp_pins[UFL1].func == PTP_PF_NONE) { 136 dev_info(ice_hw_to_dev(hw), "SMA1 RX"); 137 data |= ICE_SMA1_TX_EN_E810T; 138 } else if (ptp_pins[SMA1].func == PTP_PF_NONE && 139 ptp_pins[UFL1].func == PTP_PF_PEROUT) { 140 /* U.FL 1 TX will always enable SMA 1 RX */ 141 dev_info(ice_hw_to_dev(hw), "SMA1 RX + U.FL1 TX"); 142 } else if (ptp_pins[SMA1].func == PTP_PF_EXTTS && 143 ptp_pins[UFL1].func == PTP_PF_PEROUT) { 144 dev_info(ice_hw_to_dev(hw), "SMA1 RX + U.FL1 TX"); 145 } else if (ptp_pins[SMA1].func == PTP_PF_PEROUT && 146 ptp_pins[UFL1].func == PTP_PF_NONE) { 147 dev_info(ice_hw_to_dev(hw), "SMA1 TX"); 148 data |= ICE_SMA1_DIR_EN_E810T; 149 } 150 151 data &= ~ICE_SMA2_MASK_E810T; 152 if (ptp_pins[SMA2].func == PTP_PF_NONE && 153 ptp_pins[UFL2].func == PTP_PF_NONE) { 154 dev_info(ice_hw_to_dev(hw), "SMA2 + U.FL2 disabled"); 155 data |= ICE_SMA2_MASK_E810T; 156 } else if (ptp_pins[SMA2].func == PTP_PF_EXTTS && 157 ptp_pins[UFL2].func == PTP_PF_NONE) { 158 dev_info(ice_hw_to_dev(hw), "SMA2 RX"); 159 data |= (ICE_SMA2_TX_EN_E810T | 160 ICE_SMA2_UFL2_RX_DIS_E810T); 161 } else if (ptp_pins[SMA2].func == PTP_PF_NONE && 162 ptp_pins[UFL2].func == PTP_PF_EXTTS) { 163 dev_info(ice_hw_to_dev(hw), "UFL2 RX"); 164 data |= (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_TX_EN_E810T); 165 } else if (ptp_pins[SMA2].func == PTP_PF_PEROUT && 166 ptp_pins[UFL2].func == PTP_PF_NONE) { 167 dev_info(ice_hw_to_dev(hw), "SMA2 TX"); 168 data |= (ICE_SMA2_DIR_EN_E810T | 169 ICE_SMA2_UFL2_RX_DIS_E810T); 170 } else if (ptp_pins[SMA2].func == PTP_PF_PEROUT && 171 ptp_pins[UFL2].func == PTP_PF_EXTTS) { 172 dev_info(ice_hw_to_dev(hw), "SMA2 TX + U.FL2 RX"); 173 data |= ICE_SMA2_DIR_EN_E810T; 174 } 175 176 return ice_write_sma_ctrl_e810t(hw, data); 177 } 178 179 /** 180 * ice_ptp_set_sma_e810t 181 * @info: the driver's PTP info structure 182 * @pin: pin index in kernel structure 183 * @func: Pin function to be set (PTP_PF_NONE, PTP_PF_EXTTS or PTP_PF_PEROUT) 184 * 185 * Set the configuration of a single SMA pin 186 */ 187 static int 188 ice_ptp_set_sma_e810t(struct ptp_clock_info *info, unsigned int pin, 189 enum ptp_pin_function func) 190 { 191 struct ptp_pin_desc ptp_pins[NUM_PTP_PINS_E810T]; 192 struct ice_pf *pf = ptp_info_to_pf(info); 193 struct ice_hw *hw = &pf->hw; 194 int err; 195 196 if (pin < SMA1 || func > PTP_PF_PEROUT) 197 return -EOPNOTSUPP; 198 199 err = ice_get_sma_config_e810t(hw, ptp_pins); 200 if (err) 201 return err; 202 203 /* Disable the same function on the other pin sharing the channel */ 204 if (pin == SMA1 && ptp_pins[UFL1].func == func) 205 ptp_pins[UFL1].func = PTP_PF_NONE; 206 if (pin == UFL1 && ptp_pins[SMA1].func == func) 207 ptp_pins[SMA1].func = PTP_PF_NONE; 208 209 if (pin == SMA2 && ptp_pins[UFL2].func == func) 210 ptp_pins[UFL2].func = PTP_PF_NONE; 211 if (pin == UFL2 && ptp_pins[SMA2].func == func) 212 ptp_pins[SMA2].func = PTP_PF_NONE; 213 214 /* Set up new pin function in the temp table */ 215 ptp_pins[pin].func = func; 216 217 return ice_ptp_set_sma_config_e810t(hw, ptp_pins); 218 } 219 220 /** 221 * ice_verify_pin_e810t 222 * @info: the driver's PTP info structure 223 * @pin: Pin index 224 * @func: Assigned function 225 * @chan: Assigned channel 226 * 227 * Verify if pin supports requested pin function. If the Check pins consistency. 228 * Reconfigure the SMA logic attached to the given pin to enable its 229 * desired functionality 230 */ 231 static int 232 ice_verify_pin_e810t(struct ptp_clock_info *info, unsigned int pin, 233 enum ptp_pin_function func, unsigned int chan) 234 { 235 /* Don't allow channel reassignment */ 236 if (chan != ice_pin_desc_e810t[pin].chan) 237 return -EOPNOTSUPP; 238 239 /* Check if functions are properly assigned */ 240 switch (func) { 241 case PTP_PF_NONE: 242 break; 243 case PTP_PF_EXTTS: 244 if (pin == UFL1) 245 return -EOPNOTSUPP; 246 break; 247 case PTP_PF_PEROUT: 248 if (pin == UFL2 || pin == GNSS) 249 return -EOPNOTSUPP; 250 break; 251 case PTP_PF_PHYSYNC: 252 return -EOPNOTSUPP; 253 } 254 255 return ice_ptp_set_sma_e810t(info, pin, func); 256 } 257 258 /** 259 * ice_set_tx_tstamp - Enable or disable Tx timestamping 260 * @pf: The PF pointer to search in 261 * @on: bool value for whether timestamps are enabled or disabled 262 */ 263 static void ice_set_tx_tstamp(struct ice_pf *pf, bool on) 264 { 265 struct ice_vsi *vsi; 266 u32 val; 267 u16 i; 268 269 vsi = ice_get_main_vsi(pf); 270 if (!vsi) 271 return; 272 273 /* Set the timestamp enable flag for all the Tx rings */ 274 ice_for_each_txq(vsi, i) { 275 if (!vsi->tx_rings[i]) 276 continue; 277 vsi->tx_rings[i]->ptp_tx = on; 278 } 279 280 /* Configure the Tx timestamp interrupt */ 281 val = rd32(&pf->hw, PFINT_OICR_ENA); 282 if (on) 283 val |= PFINT_OICR_TSYN_TX_M; 284 else 285 val &= ~PFINT_OICR_TSYN_TX_M; 286 wr32(&pf->hw, PFINT_OICR_ENA, val); 287 288 pf->ptp.tstamp_config.tx_type = on ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; 289 } 290 291 /** 292 * ice_set_rx_tstamp - Enable or disable Rx timestamping 293 * @pf: The PF pointer to search in 294 * @on: bool value for whether timestamps are enabled or disabled 295 */ 296 static void ice_set_rx_tstamp(struct ice_pf *pf, bool on) 297 { 298 struct ice_vsi *vsi; 299 u16 i; 300 301 vsi = ice_get_main_vsi(pf); 302 if (!vsi) 303 return; 304 305 /* Set the timestamp flag for all the Rx rings */ 306 ice_for_each_rxq(vsi, i) { 307 if (!vsi->rx_rings[i]) 308 continue; 309 vsi->rx_rings[i]->ptp_rx = on; 310 } 311 312 pf->ptp.tstamp_config.rx_filter = on ? HWTSTAMP_FILTER_ALL : 313 HWTSTAMP_FILTER_NONE; 314 } 315 316 /** 317 * ice_ptp_cfg_timestamp - Configure timestamp for init/deinit 318 * @pf: Board private structure 319 * @ena: bool value to enable or disable time stamp 320 * 321 * This function will configure timestamping during PTP initialization 322 * and deinitialization 323 */ 324 void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena) 325 { 326 ice_set_tx_tstamp(pf, ena); 327 ice_set_rx_tstamp(pf, ena); 328 } 329 330 /** 331 * ice_get_ptp_clock_index - Get the PTP clock index 332 * @pf: the PF pointer 333 * 334 * Determine the clock index of the PTP clock associated with this device. If 335 * this is the PF controlling the clock, just use the local access to the 336 * clock device pointer. 337 * 338 * Otherwise, read from the driver shared parameters to determine the clock 339 * index value. 340 * 341 * Returns: the index of the PTP clock associated with this device, or -1 if 342 * there is no associated clock. 343 */ 344 int ice_get_ptp_clock_index(struct ice_pf *pf) 345 { 346 struct device *dev = ice_pf_to_dev(pf); 347 enum ice_aqc_driver_params param_idx; 348 struct ice_hw *hw = &pf->hw; 349 u8 tmr_idx; 350 u32 value; 351 int err; 352 353 /* Use the ptp_clock structure if we're the main PF */ 354 if (pf->ptp.clock) 355 return ptp_clock_index(pf->ptp.clock); 356 357 tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc; 358 if (!tmr_idx) 359 param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR0; 360 else 361 param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR1; 362 363 err = ice_aq_get_driver_param(hw, param_idx, &value, NULL); 364 if (err) { 365 dev_err(dev, "Failed to read PTP clock index parameter, err %d aq_err %s\n", 366 err, ice_aq_str(hw->adminq.sq_last_status)); 367 return -1; 368 } 369 370 /* The PTP clock index is an integer, and will be between 0 and 371 * INT_MAX. The highest bit of the driver shared parameter is used to 372 * indicate whether or not the currently stored clock index is valid. 373 */ 374 if (!(value & PTP_SHARED_CLK_IDX_VALID)) 375 return -1; 376 377 return value & ~PTP_SHARED_CLK_IDX_VALID; 378 } 379 380 /** 381 * ice_set_ptp_clock_index - Set the PTP clock index 382 * @pf: the PF pointer 383 * 384 * Set the PTP clock index for this device into the shared driver parameters, 385 * so that other PFs associated with this device can read it. 386 * 387 * If the PF is unable to store the clock index, it will log an error, but 388 * will continue operating PTP. 389 */ 390 static void ice_set_ptp_clock_index(struct ice_pf *pf) 391 { 392 struct device *dev = ice_pf_to_dev(pf); 393 enum ice_aqc_driver_params param_idx; 394 struct ice_hw *hw = &pf->hw; 395 u8 tmr_idx; 396 u32 value; 397 int err; 398 399 if (!pf->ptp.clock) 400 return; 401 402 tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc; 403 if (!tmr_idx) 404 param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR0; 405 else 406 param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR1; 407 408 value = (u32)ptp_clock_index(pf->ptp.clock); 409 if (value > INT_MAX) { 410 dev_err(dev, "PTP Clock index is too large to store\n"); 411 return; 412 } 413 value |= PTP_SHARED_CLK_IDX_VALID; 414 415 err = ice_aq_set_driver_param(hw, param_idx, value, NULL); 416 if (err) { 417 dev_err(dev, "Failed to set PTP clock index parameter, err %d aq_err %s\n", 418 err, ice_aq_str(hw->adminq.sq_last_status)); 419 } 420 } 421 422 /** 423 * ice_clear_ptp_clock_index - Clear the PTP clock index 424 * @pf: the PF pointer 425 * 426 * Clear the PTP clock index for this device. Must be called when 427 * unregistering the PTP clock, in order to ensure other PFs stop reporting 428 * a clock object that no longer exists. 429 */ 430 static void ice_clear_ptp_clock_index(struct ice_pf *pf) 431 { 432 struct device *dev = ice_pf_to_dev(pf); 433 enum ice_aqc_driver_params param_idx; 434 struct ice_hw *hw = &pf->hw; 435 u8 tmr_idx; 436 int err; 437 438 /* Do not clear the index if we don't own the timer */ 439 if (!hw->func_caps.ts_func_info.src_tmr_owned) 440 return; 441 442 tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc; 443 if (!tmr_idx) 444 param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR0; 445 else 446 param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR1; 447 448 err = ice_aq_set_driver_param(hw, param_idx, 0, NULL); 449 if (err) { 450 dev_dbg(dev, "Failed to clear PTP clock index parameter, err %d aq_err %s\n", 451 err, ice_aq_str(hw->adminq.sq_last_status)); 452 } 453 } 454 455 /** 456 * ice_ptp_read_src_clk_reg - Read the source clock register 457 * @pf: Board private structure 458 * @sts: Optional parameter for holding a pair of system timestamps from 459 * the system clock. Will be ignored if NULL is given. 460 */ 461 static u64 462 ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts) 463 { 464 struct ice_hw *hw = &pf->hw; 465 u32 hi, lo, lo2; 466 u8 tmr_idx; 467 468 tmr_idx = ice_get_ptp_src_clock_index(hw); 469 /* Read the system timestamp pre PHC read */ 470 ptp_read_system_prets(sts); 471 472 lo = rd32(hw, GLTSYN_TIME_L(tmr_idx)); 473 474 /* Read the system timestamp post PHC read */ 475 ptp_read_system_postts(sts); 476 477 hi = rd32(hw, GLTSYN_TIME_H(tmr_idx)); 478 lo2 = rd32(hw, GLTSYN_TIME_L(tmr_idx)); 479 480 if (lo2 < lo) { 481 /* if TIME_L rolled over read TIME_L again and update 482 * system timestamps 483 */ 484 ptp_read_system_prets(sts); 485 lo = rd32(hw, GLTSYN_TIME_L(tmr_idx)); 486 ptp_read_system_postts(sts); 487 hi = rd32(hw, GLTSYN_TIME_H(tmr_idx)); 488 } 489 490 return ((u64)hi << 32) | lo; 491 } 492 493 /** 494 * ice_ptp_extend_32b_ts - Convert a 32b nanoseconds timestamp to 64b 495 * @cached_phc_time: recently cached copy of PHC time 496 * @in_tstamp: Ingress/egress 32b nanoseconds timestamp value 497 * 498 * Hardware captures timestamps which contain only 32 bits of nominal 499 * nanoseconds, as opposed to the 64bit timestamps that the stack expects. 500 * Note that the captured timestamp values may be 40 bits, but the lower 501 * 8 bits are sub-nanoseconds and generally discarded. 502 * 503 * Extend the 32bit nanosecond timestamp using the following algorithm and 504 * assumptions: 505 * 506 * 1) have a recently cached copy of the PHC time 507 * 2) assume that the in_tstamp was captured 2^31 nanoseconds (~2.1 508 * seconds) before or after the PHC time was captured. 509 * 3) calculate the delta between the cached time and the timestamp 510 * 4) if the delta is smaller than 2^31 nanoseconds, then the timestamp was 511 * captured after the PHC time. In this case, the full timestamp is just 512 * the cached PHC time plus the delta. 513 * 5) otherwise, if the delta is larger than 2^31 nanoseconds, then the 514 * timestamp was captured *before* the PHC time, i.e. because the PHC 515 * cache was updated after the timestamp was captured by hardware. In this 516 * case, the full timestamp is the cached time minus the inverse delta. 517 * 518 * This algorithm works even if the PHC time was updated after a Tx timestamp 519 * was requested, but before the Tx timestamp event was reported from 520 * hardware. 521 * 522 * This calculation primarily relies on keeping the cached PHC time up to 523 * date. If the timestamp was captured more than 2^31 nanoseconds after the 524 * PHC time, it is possible that the lower 32bits of PHC time have 525 * overflowed more than once, and we might generate an incorrect timestamp. 526 * 527 * This is prevented by (a) periodically updating the cached PHC time once 528 * a second, and (b) discarding any Tx timestamp packet if it has waited for 529 * a timestamp for more than one second. 530 */ 531 static u64 ice_ptp_extend_32b_ts(u64 cached_phc_time, u32 in_tstamp) 532 { 533 u32 delta, phc_time_lo; 534 u64 ns; 535 536 /* Extract the lower 32 bits of the PHC time */ 537 phc_time_lo = (u32)cached_phc_time; 538 539 /* Calculate the delta between the lower 32bits of the cached PHC 540 * time and the in_tstamp value 541 */ 542 delta = (in_tstamp - phc_time_lo); 543 544 /* Do not assume that the in_tstamp is always more recent than the 545 * cached PHC time. If the delta is large, it indicates that the 546 * in_tstamp was taken in the past, and should be converted 547 * forward. 548 */ 549 if (delta > (U32_MAX / 2)) { 550 /* reverse the delta calculation here */ 551 delta = (phc_time_lo - in_tstamp); 552 ns = cached_phc_time - delta; 553 } else { 554 ns = cached_phc_time + delta; 555 } 556 557 return ns; 558 } 559 560 /** 561 * ice_ptp_extend_40b_ts - Convert a 40b timestamp to 64b nanoseconds 562 * @pf: Board private structure 563 * @in_tstamp: Ingress/egress 40b timestamp value 564 * 565 * The Tx and Rx timestamps are 40 bits wide, including 32 bits of nominal 566 * nanoseconds, 7 bits of sub-nanoseconds, and a valid bit. 567 * 568 * *--------------------------------------------------------------* 569 * | 32 bits of nanoseconds | 7 high bits of sub ns underflow | v | 570 * *--------------------------------------------------------------* 571 * 572 * The low bit is an indicator of whether the timestamp is valid. The next 573 * 7 bits are a capture of the upper 7 bits of the sub-nanosecond underflow, 574 * and the remaining 32 bits are the lower 32 bits of the PHC timer. 575 * 576 * It is assumed that the caller verifies the timestamp is valid prior to 577 * calling this function. 578 * 579 * Extract the 32bit nominal nanoseconds and extend them. Use the cached PHC 580 * time stored in the device private PTP structure as the basis for timestamp 581 * extension. 582 * 583 * See ice_ptp_extend_32b_ts for a detailed explanation of the extension 584 * algorithm. 585 */ 586 static u64 ice_ptp_extend_40b_ts(struct ice_pf *pf, u64 in_tstamp) 587 { 588 const u64 mask = GENMASK_ULL(31, 0); 589 unsigned long discard_time; 590 591 /* Discard the hardware timestamp if the cached PHC time is too old */ 592 discard_time = pf->ptp.cached_phc_jiffies + msecs_to_jiffies(2000); 593 if (time_is_before_jiffies(discard_time)) { 594 pf->ptp.tx_hwtstamp_discarded++; 595 return 0; 596 } 597 598 return ice_ptp_extend_32b_ts(pf->ptp.cached_phc_time, 599 (in_tstamp >> 8) & mask); 600 } 601 602 /** 603 * ice_ptp_is_tx_tracker_up - Check if Tx tracker is ready for new timestamps 604 * @tx: the PTP Tx timestamp tracker to check 605 * 606 * Check that a given PTP Tx timestamp tracker is up, i.e. that it is ready 607 * to accept new timestamp requests. 608 * 609 * Assumes the tx->lock spinlock is already held. 610 */ 611 static bool 612 ice_ptp_is_tx_tracker_up(struct ice_ptp_tx *tx) 613 { 614 lockdep_assert_held(&tx->lock); 615 616 return tx->init && !tx->calibrating; 617 } 618 619 /** 620 * ice_ptp_process_tx_tstamp - Process Tx timestamps for a port 621 * @tx: the PTP Tx timestamp tracker 622 * 623 * Process timestamps captured by the PHY associated with this port. To do 624 * this, loop over each index with a waiting skb. 625 * 626 * If a given index has a valid timestamp, perform the following steps: 627 * 628 * 1) check that the timestamp request is not stale 629 * 2) check that a timestamp is ready and available in the PHY memory bank 630 * 3) read and copy the timestamp out of the PHY register 631 * 4) unlock the index by clearing the associated in_use bit 632 * 5) check if the timestamp is stale, and discard if so 633 * 6) extend the 40 bit timestamp value to get a 64 bit timestamp value 634 * 7) send this 64 bit timestamp to the stack 635 * 636 * Note that we do not hold the tracking lock while reading the Tx timestamp. 637 * This is because reading the timestamp requires taking a mutex that might 638 * sleep. 639 * 640 * The only place where we set in_use is when a new timestamp is initiated 641 * with a slot index. This is only called in the hard xmit routine where an 642 * SKB has a request flag set. The only places where we clear this bit is this 643 * function, or during teardown when the Tx timestamp tracker is being 644 * removed. A timestamp index will never be re-used until the in_use bit for 645 * that index is cleared. 646 * 647 * If a Tx thread starts a new timestamp, we might not begin processing it 648 * right away but we will notice it at the end when we re-queue the task. 649 * 650 * If a Tx thread starts a new timestamp just after this function exits, the 651 * interrupt for that timestamp should re-trigger this function once 652 * a timestamp is ready. 653 * 654 * In cases where the PTP hardware clock was directly adjusted, some 655 * timestamps may not be able to safely use the timestamp extension math. In 656 * this case, software will set the stale bit for any outstanding Tx 657 * timestamps when the clock is adjusted. Then this function will discard 658 * those captured timestamps instead of sending them to the stack. 659 * 660 * If a Tx packet has been waiting for more than 2 seconds, it is not possible 661 * to correctly extend the timestamp using the cached PHC time. It is 662 * extremely unlikely that a packet will ever take this long to timestamp. If 663 * we detect a Tx timestamp request that has waited for this long we assume 664 * the packet will never be sent by hardware and discard it without reading 665 * the timestamp register. 666 */ 667 static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx) 668 { 669 struct ice_ptp_port *ptp_port; 670 struct ice_pf *pf; 671 struct ice_hw *hw; 672 u64 tstamp_ready; 673 bool link_up; 674 int err; 675 u8 idx; 676 677 if (!tx->init) 678 return; 679 680 ptp_port = container_of(tx, struct ice_ptp_port, tx); 681 pf = ptp_port_to_pf(ptp_port); 682 hw = &pf->hw; 683 684 /* Read the Tx ready status first */ 685 err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready); 686 if (err) 687 return; 688 689 /* Drop packets if the link went down */ 690 link_up = ptp_port->link_up; 691 692 for_each_set_bit(idx, tx->in_use, tx->len) { 693 struct skb_shared_hwtstamps shhwtstamps = {}; 694 u8 phy_idx = idx + tx->offset; 695 u64 raw_tstamp = 0, tstamp; 696 bool drop_ts = !link_up; 697 struct sk_buff *skb; 698 699 /* Drop packets which have waited for more than 2 seconds */ 700 if (time_is_before_jiffies(tx->tstamps[idx].start + 2 * HZ)) { 701 drop_ts = true; 702 703 /* Count the number of Tx timestamps that timed out */ 704 pf->ptp.tx_hwtstamp_timeouts++; 705 } 706 707 /* Only read a timestamp from the PHY if its marked as ready 708 * by the tstamp_ready register. This avoids unnecessary 709 * reading of timestamps which are not yet valid. This is 710 * important as we must read all timestamps which are valid 711 * and only timestamps which are valid during each interrupt. 712 * If we do not, the hardware logic for generating a new 713 * interrupt can get stuck on some devices. 714 */ 715 if (!(tstamp_ready & BIT_ULL(phy_idx))) { 716 if (drop_ts) 717 goto skip_ts_read; 718 719 continue; 720 } 721 722 ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx); 723 724 err = ice_read_phy_tstamp(hw, tx->block, phy_idx, &raw_tstamp); 725 if (err && !drop_ts) 726 continue; 727 728 ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx); 729 730 /* For PHYs which don't implement a proper timestamp ready 731 * bitmap, verify that the timestamp value is different 732 * from the last cached timestamp. If it is not, skip this for 733 * now assuming it hasn't yet been captured by hardware. 734 */ 735 if (!drop_ts && tx->verify_cached && 736 raw_tstamp == tx->tstamps[idx].cached_tstamp) 737 continue; 738 739 /* Discard any timestamp value without the valid bit set */ 740 if (!(raw_tstamp & ICE_PTP_TS_VALID)) 741 drop_ts = true; 742 743 skip_ts_read: 744 spin_lock(&tx->lock); 745 if (tx->verify_cached && raw_tstamp) 746 tx->tstamps[idx].cached_tstamp = raw_tstamp; 747 clear_bit(idx, tx->in_use); 748 skb = tx->tstamps[idx].skb; 749 tx->tstamps[idx].skb = NULL; 750 if (test_and_clear_bit(idx, tx->stale)) 751 drop_ts = true; 752 spin_unlock(&tx->lock); 753 754 /* It is unlikely but possible that the SKB will have been 755 * flushed at this point due to link change or teardown. 756 */ 757 if (!skb) 758 continue; 759 760 if (drop_ts) { 761 dev_kfree_skb_any(skb); 762 continue; 763 } 764 765 /* Extend the timestamp using cached PHC time */ 766 tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp); 767 if (tstamp) { 768 shhwtstamps.hwtstamp = ns_to_ktime(tstamp); 769 ice_trace(tx_tstamp_complete, skb, idx); 770 } 771 772 skb_tstamp_tx(skb, &shhwtstamps); 773 dev_kfree_skb_any(skb); 774 } 775 } 776 777 /** 778 * ice_ptp_tx_tstamp - Process Tx timestamps for this function. 779 * @tx: Tx tracking structure to initialize 780 * 781 * Returns: ICE_TX_TSTAMP_WORK_PENDING if there are any outstanding incomplete 782 * Tx timestamps, or ICE_TX_TSTAMP_WORK_DONE otherwise. 783 */ 784 static enum ice_tx_tstamp_work ice_ptp_tx_tstamp(struct ice_ptp_tx *tx) 785 { 786 bool more_timestamps; 787 788 if (!tx->init) 789 return ICE_TX_TSTAMP_WORK_DONE; 790 791 /* Process the Tx timestamp tracker */ 792 ice_ptp_process_tx_tstamp(tx); 793 794 /* Check if there are outstanding Tx timestamps */ 795 spin_lock(&tx->lock); 796 more_timestamps = tx->init && !bitmap_empty(tx->in_use, tx->len); 797 spin_unlock(&tx->lock); 798 799 if (more_timestamps) 800 return ICE_TX_TSTAMP_WORK_PENDING; 801 802 return ICE_TX_TSTAMP_WORK_DONE; 803 } 804 805 /** 806 * ice_ptp_alloc_tx_tracker - Initialize tracking for Tx timestamps 807 * @tx: Tx tracking structure to initialize 808 * 809 * Assumes that the length has already been initialized. Do not call directly, 810 * use the ice_ptp_init_tx_* instead. 811 */ 812 static int 813 ice_ptp_alloc_tx_tracker(struct ice_ptp_tx *tx) 814 { 815 unsigned long *in_use, *stale; 816 struct ice_tx_tstamp *tstamps; 817 818 tstamps = kcalloc(tx->len, sizeof(*tstamps), GFP_KERNEL); 819 in_use = bitmap_zalloc(tx->len, GFP_KERNEL); 820 stale = bitmap_zalloc(tx->len, GFP_KERNEL); 821 822 if (!tstamps || !in_use || !stale) { 823 kfree(tstamps); 824 bitmap_free(in_use); 825 bitmap_free(stale); 826 827 return -ENOMEM; 828 } 829 830 tx->tstamps = tstamps; 831 tx->in_use = in_use; 832 tx->stale = stale; 833 tx->init = 1; 834 835 spin_lock_init(&tx->lock); 836 837 return 0; 838 } 839 840 /** 841 * ice_ptp_flush_tx_tracker - Flush any remaining timestamps from the tracker 842 * @pf: Board private structure 843 * @tx: the tracker to flush 844 * 845 * Called during teardown when a Tx tracker is being removed. 846 */ 847 static void 848 ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx) 849 { 850 struct ice_hw *hw = &pf->hw; 851 u64 tstamp_ready; 852 int err; 853 u8 idx; 854 855 err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready); 856 if (err) { 857 dev_dbg(ice_pf_to_dev(pf), "Failed to get the Tx tstamp ready bitmap for block %u, err %d\n", 858 tx->block, err); 859 860 /* If we fail to read the Tx timestamp ready bitmap just 861 * skip clearing the PHY timestamps. 862 */ 863 tstamp_ready = 0; 864 } 865 866 for_each_set_bit(idx, tx->in_use, tx->len) { 867 u8 phy_idx = idx + tx->offset; 868 struct sk_buff *skb; 869 870 /* In case this timestamp is ready, we need to clear it. */ 871 if (!hw->reset_ongoing && (tstamp_ready & BIT_ULL(phy_idx))) 872 ice_clear_phy_tstamp(hw, tx->block, phy_idx); 873 874 spin_lock(&tx->lock); 875 skb = tx->tstamps[idx].skb; 876 tx->tstamps[idx].skb = NULL; 877 clear_bit(idx, tx->in_use); 878 clear_bit(idx, tx->stale); 879 spin_unlock(&tx->lock); 880 881 /* Count the number of Tx timestamps flushed */ 882 pf->ptp.tx_hwtstamp_flushed++; 883 884 /* Free the SKB after we've cleared the bit */ 885 dev_kfree_skb_any(skb); 886 } 887 } 888 889 /** 890 * ice_ptp_mark_tx_tracker_stale - Mark unfinished timestamps as stale 891 * @tx: the tracker to mark 892 * 893 * Mark currently outstanding Tx timestamps as stale. This prevents sending 894 * their timestamp value to the stack. This is required to prevent extending 895 * the 40bit hardware timestamp incorrectly. 896 * 897 * This should be called when the PTP clock is modified such as after a set 898 * time request. 899 */ 900 static void 901 ice_ptp_mark_tx_tracker_stale(struct ice_ptp_tx *tx) 902 { 903 spin_lock(&tx->lock); 904 bitmap_or(tx->stale, tx->stale, tx->in_use, tx->len); 905 spin_unlock(&tx->lock); 906 } 907 908 /** 909 * ice_ptp_release_tx_tracker - Release allocated memory for Tx tracker 910 * @pf: Board private structure 911 * @tx: Tx tracking structure to release 912 * 913 * Free memory associated with the Tx timestamp tracker. 914 */ 915 static void 916 ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx) 917 { 918 spin_lock(&tx->lock); 919 tx->init = 0; 920 spin_unlock(&tx->lock); 921 922 /* wait for potentially outstanding interrupt to complete */ 923 synchronize_irq(pf->oicr_irq.virq); 924 925 ice_ptp_flush_tx_tracker(pf, tx); 926 927 kfree(tx->tstamps); 928 tx->tstamps = NULL; 929 930 bitmap_free(tx->in_use); 931 tx->in_use = NULL; 932 933 bitmap_free(tx->stale); 934 tx->stale = NULL; 935 936 tx->len = 0; 937 } 938 939 /** 940 * ice_ptp_init_tx_e822 - Initialize tracking for Tx timestamps 941 * @pf: Board private structure 942 * @tx: the Tx tracking structure to initialize 943 * @port: the port this structure tracks 944 * 945 * Initialize the Tx timestamp tracker for this port. For generic MAC devices, 946 * the timestamp block is shared for all ports in the same quad. To avoid 947 * ports using the same timestamp index, logically break the block of 948 * registers into chunks based on the port number. 949 */ 950 static int 951 ice_ptp_init_tx_e822(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port) 952 { 953 tx->block = port / ICE_PORTS_PER_QUAD; 954 tx->offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT_E822; 955 tx->len = INDEX_PER_PORT_E822; 956 tx->verify_cached = 0; 957 958 return ice_ptp_alloc_tx_tracker(tx); 959 } 960 961 /** 962 * ice_ptp_init_tx_e810 - Initialize tracking for Tx timestamps 963 * @pf: Board private structure 964 * @tx: the Tx tracking structure to initialize 965 * 966 * Initialize the Tx timestamp tracker for this PF. For E810 devices, each 967 * port has its own block of timestamps, independent of the other ports. 968 */ 969 static int 970 ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx) 971 { 972 tx->block = pf->hw.port_info->lport; 973 tx->offset = 0; 974 tx->len = INDEX_PER_PORT_E810; 975 /* The E810 PHY does not provide a timestamp ready bitmap. Instead, 976 * verify new timestamps against cached copy of the last read 977 * timestamp. 978 */ 979 tx->verify_cached = 1; 980 981 return ice_ptp_alloc_tx_tracker(tx); 982 } 983 984 /** 985 * ice_ptp_update_cached_phctime - Update the cached PHC time values 986 * @pf: Board specific private structure 987 * 988 * This function updates the system time values which are cached in the PF 989 * structure and the Rx rings. 990 * 991 * This function must be called periodically to ensure that the cached value 992 * is never more than 2 seconds old. 993 * 994 * Note that the cached copy in the PF PTP structure is always updated, even 995 * if we can't update the copy in the Rx rings. 996 * 997 * Return: 998 * * 0 - OK, successfully updated 999 * * -EAGAIN - PF was busy, need to reschedule the update 1000 */ 1001 static int ice_ptp_update_cached_phctime(struct ice_pf *pf) 1002 { 1003 struct device *dev = ice_pf_to_dev(pf); 1004 unsigned long update_before; 1005 u64 systime; 1006 int i; 1007 1008 update_before = pf->ptp.cached_phc_jiffies + msecs_to_jiffies(2000); 1009 if (pf->ptp.cached_phc_time && 1010 time_is_before_jiffies(update_before)) { 1011 unsigned long time_taken = jiffies - pf->ptp.cached_phc_jiffies; 1012 1013 dev_warn(dev, "%u msecs passed between update to cached PHC time\n", 1014 jiffies_to_msecs(time_taken)); 1015 pf->ptp.late_cached_phc_updates++; 1016 } 1017 1018 /* Read the current PHC time */ 1019 systime = ice_ptp_read_src_clk_reg(pf, NULL); 1020 1021 /* Update the cached PHC time stored in the PF structure */ 1022 WRITE_ONCE(pf->ptp.cached_phc_time, systime); 1023 WRITE_ONCE(pf->ptp.cached_phc_jiffies, jiffies); 1024 1025 if (test_and_set_bit(ICE_CFG_BUSY, pf->state)) 1026 return -EAGAIN; 1027 1028 ice_for_each_vsi(pf, i) { 1029 struct ice_vsi *vsi = pf->vsi[i]; 1030 int j; 1031 1032 if (!vsi) 1033 continue; 1034 1035 if (vsi->type != ICE_VSI_PF) 1036 continue; 1037 1038 ice_for_each_rxq(vsi, j) { 1039 if (!vsi->rx_rings[j]) 1040 continue; 1041 WRITE_ONCE(vsi->rx_rings[j]->cached_phctime, systime); 1042 } 1043 } 1044 clear_bit(ICE_CFG_BUSY, pf->state); 1045 1046 return 0; 1047 } 1048 1049 /** 1050 * ice_ptp_reset_cached_phctime - Reset cached PHC time after an update 1051 * @pf: Board specific private structure 1052 * 1053 * This function must be called when the cached PHC time is no longer valid, 1054 * such as after a time adjustment. It marks any currently outstanding Tx 1055 * timestamps as stale and updates the cached PHC time for both the PF and Rx 1056 * rings. 1057 * 1058 * If updating the PHC time cannot be done immediately, a warning message is 1059 * logged and the work item is scheduled immediately to minimize the window 1060 * with a wrong cached timestamp. 1061 */ 1062 static void ice_ptp_reset_cached_phctime(struct ice_pf *pf) 1063 { 1064 struct device *dev = ice_pf_to_dev(pf); 1065 int err; 1066 1067 /* Update the cached PHC time immediately if possible, otherwise 1068 * schedule the work item to execute soon. 1069 */ 1070 err = ice_ptp_update_cached_phctime(pf); 1071 if (err) { 1072 /* If another thread is updating the Rx rings, we won't 1073 * properly reset them here. This could lead to reporting of 1074 * invalid timestamps, but there isn't much we can do. 1075 */ 1076 dev_warn(dev, "%s: ICE_CFG_BUSY, unable to immediately update cached PHC time\n", 1077 __func__); 1078 1079 /* Queue the work item to update the Rx rings when possible */ 1080 kthread_queue_delayed_work(pf->ptp.kworker, &pf->ptp.work, 1081 msecs_to_jiffies(10)); 1082 } 1083 1084 /* Mark any outstanding timestamps as stale, since they might have 1085 * been captured in hardware before the time update. This could lead 1086 * to us extending them with the wrong cached value resulting in 1087 * incorrect timestamp values. 1088 */ 1089 ice_ptp_mark_tx_tracker_stale(&pf->ptp.port.tx); 1090 } 1091 1092 /** 1093 * ice_ptp_read_time - Read the time from the device 1094 * @pf: Board private structure 1095 * @ts: timespec structure to hold the current time value 1096 * @sts: Optional parameter for holding a pair of system timestamps from 1097 * the system clock. Will be ignored if NULL is given. 1098 * 1099 * This function reads the source clock registers and stores them in a timespec. 1100 * However, since the registers are 64 bits of nanoseconds, we must convert the 1101 * result to a timespec before we can return. 1102 */ 1103 static void 1104 ice_ptp_read_time(struct ice_pf *pf, struct timespec64 *ts, 1105 struct ptp_system_timestamp *sts) 1106 { 1107 u64 time_ns = ice_ptp_read_src_clk_reg(pf, sts); 1108 1109 *ts = ns_to_timespec64(time_ns); 1110 } 1111 1112 /** 1113 * ice_ptp_write_init - Set PHC time to provided value 1114 * @pf: Board private structure 1115 * @ts: timespec structure that holds the new time value 1116 * 1117 * Set the PHC time to the specified time provided in the timespec. 1118 */ 1119 static int ice_ptp_write_init(struct ice_pf *pf, struct timespec64 *ts) 1120 { 1121 u64 ns = timespec64_to_ns(ts); 1122 struct ice_hw *hw = &pf->hw; 1123 1124 return ice_ptp_init_time(hw, ns); 1125 } 1126 1127 /** 1128 * ice_ptp_write_adj - Adjust PHC clock time atomically 1129 * @pf: Board private structure 1130 * @adj: Adjustment in nanoseconds 1131 * 1132 * Perform an atomic adjustment of the PHC time by the specified number of 1133 * nanoseconds. 1134 */ 1135 static int ice_ptp_write_adj(struct ice_pf *pf, s32 adj) 1136 { 1137 struct ice_hw *hw = &pf->hw; 1138 1139 return ice_ptp_adj_clock(hw, adj); 1140 } 1141 1142 /** 1143 * ice_base_incval - Get base timer increment value 1144 * @pf: Board private structure 1145 * 1146 * Look up the base timer increment value for this device. The base increment 1147 * value is used to define the nominal clock tick rate. This increment value 1148 * is programmed during device initialization. It is also used as the basis 1149 * for calculating adjustments using scaled_ppm. 1150 */ 1151 static u64 ice_base_incval(struct ice_pf *pf) 1152 { 1153 struct ice_hw *hw = &pf->hw; 1154 u64 incval; 1155 1156 if (ice_is_e810(hw)) 1157 incval = ICE_PTP_NOMINAL_INCVAL_E810; 1158 else if (ice_e822_time_ref(hw) < NUM_ICE_TIME_REF_FREQ) 1159 incval = ice_e822_nominal_incval(ice_e822_time_ref(hw)); 1160 else 1161 incval = UNKNOWN_INCVAL_E822; 1162 1163 dev_dbg(ice_pf_to_dev(pf), "PTP: using base increment value of 0x%016llx\n", 1164 incval); 1165 1166 return incval; 1167 } 1168 1169 /** 1170 * ice_ptp_check_tx_fifo - Check whether Tx FIFO is in an OK state 1171 * @port: PTP port for which Tx FIFO is checked 1172 */ 1173 static int ice_ptp_check_tx_fifo(struct ice_ptp_port *port) 1174 { 1175 int quad = port->port_num / ICE_PORTS_PER_QUAD; 1176 int offs = port->port_num % ICE_PORTS_PER_QUAD; 1177 struct ice_pf *pf; 1178 struct ice_hw *hw; 1179 u32 val, phy_sts; 1180 int err; 1181 1182 pf = ptp_port_to_pf(port); 1183 hw = &pf->hw; 1184 1185 if (port->tx_fifo_busy_cnt == FIFO_OK) 1186 return 0; 1187 1188 /* need to read FIFO state */ 1189 if (offs == 0 || offs == 1) 1190 err = ice_read_quad_reg_e822(hw, quad, Q_REG_FIFO01_STATUS, 1191 &val); 1192 else 1193 err = ice_read_quad_reg_e822(hw, quad, Q_REG_FIFO23_STATUS, 1194 &val); 1195 1196 if (err) { 1197 dev_err(ice_pf_to_dev(pf), "PTP failed to check port %d Tx FIFO, err %d\n", 1198 port->port_num, err); 1199 return err; 1200 } 1201 1202 if (offs & 0x1) 1203 phy_sts = (val & Q_REG_FIFO13_M) >> Q_REG_FIFO13_S; 1204 else 1205 phy_sts = (val & Q_REG_FIFO02_M) >> Q_REG_FIFO02_S; 1206 1207 if (phy_sts & FIFO_EMPTY) { 1208 port->tx_fifo_busy_cnt = FIFO_OK; 1209 return 0; 1210 } 1211 1212 port->tx_fifo_busy_cnt++; 1213 1214 dev_dbg(ice_pf_to_dev(pf), "Try %d, port %d FIFO not empty\n", 1215 port->tx_fifo_busy_cnt, port->port_num); 1216 1217 if (port->tx_fifo_busy_cnt == ICE_PTP_FIFO_NUM_CHECKS) { 1218 dev_dbg(ice_pf_to_dev(pf), 1219 "Port %d Tx FIFO still not empty; resetting quad %d\n", 1220 port->port_num, quad); 1221 ice_ptp_reset_ts_memory_quad_e822(hw, quad); 1222 port->tx_fifo_busy_cnt = FIFO_OK; 1223 return 0; 1224 } 1225 1226 return -EAGAIN; 1227 } 1228 1229 /** 1230 * ice_ptp_wait_for_offsets - Check for valid Tx and Rx offsets 1231 * @work: Pointer to the kthread_work structure for this task 1232 * 1233 * Check whether hardware has completed measuring the Tx and Rx offset values 1234 * used to configure and enable vernier timestamp calibration. 1235 * 1236 * Once the offset in either direction is measured, configure the associated 1237 * registers with the calibrated offset values and enable timestamping. The Tx 1238 * and Rx directions are configured independently as soon as their associated 1239 * offsets are known. 1240 * 1241 * This function reschedules itself until both Tx and Rx calibration have 1242 * completed. 1243 */ 1244 static void ice_ptp_wait_for_offsets(struct kthread_work *work) 1245 { 1246 struct ice_ptp_port *port; 1247 struct ice_pf *pf; 1248 struct ice_hw *hw; 1249 int tx_err; 1250 int rx_err; 1251 1252 port = container_of(work, struct ice_ptp_port, ov_work.work); 1253 pf = ptp_port_to_pf(port); 1254 hw = &pf->hw; 1255 1256 if (ice_is_reset_in_progress(pf->state)) { 1257 /* wait for device driver to complete reset */ 1258 kthread_queue_delayed_work(pf->ptp.kworker, 1259 &port->ov_work, 1260 msecs_to_jiffies(100)); 1261 return; 1262 } 1263 1264 tx_err = ice_ptp_check_tx_fifo(port); 1265 if (!tx_err) 1266 tx_err = ice_phy_cfg_tx_offset_e822(hw, port->port_num); 1267 rx_err = ice_phy_cfg_rx_offset_e822(hw, port->port_num); 1268 if (tx_err || rx_err) { 1269 /* Tx and/or Rx offset not yet configured, try again later */ 1270 kthread_queue_delayed_work(pf->ptp.kworker, 1271 &port->ov_work, 1272 msecs_to_jiffies(100)); 1273 return; 1274 } 1275 } 1276 1277 /** 1278 * ice_ptp_port_phy_stop - Stop timestamping for a PHY port 1279 * @ptp_port: PTP port to stop 1280 */ 1281 static int 1282 ice_ptp_port_phy_stop(struct ice_ptp_port *ptp_port) 1283 { 1284 struct ice_pf *pf = ptp_port_to_pf(ptp_port); 1285 u8 port = ptp_port->port_num; 1286 struct ice_hw *hw = &pf->hw; 1287 int err; 1288 1289 if (ice_is_e810(hw)) 1290 return 0; 1291 1292 mutex_lock(&ptp_port->ps_lock); 1293 1294 kthread_cancel_delayed_work_sync(&ptp_port->ov_work); 1295 1296 err = ice_stop_phy_timer_e822(hw, port, true); 1297 if (err) 1298 dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d down, err %d\n", 1299 port, err); 1300 1301 mutex_unlock(&ptp_port->ps_lock); 1302 1303 return err; 1304 } 1305 1306 /** 1307 * ice_ptp_port_phy_restart - (Re)start and calibrate PHY timestamping 1308 * @ptp_port: PTP port for which the PHY start is set 1309 * 1310 * Start the PHY timestamping block, and initiate Vernier timestamping 1311 * calibration. If timestamping cannot be calibrated (such as if link is down) 1312 * then disable the timestamping block instead. 1313 */ 1314 static int 1315 ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port) 1316 { 1317 struct ice_pf *pf = ptp_port_to_pf(ptp_port); 1318 u8 port = ptp_port->port_num; 1319 struct ice_hw *hw = &pf->hw; 1320 int err; 1321 1322 if (ice_is_e810(hw)) 1323 return 0; 1324 1325 if (!ptp_port->link_up) 1326 return ice_ptp_port_phy_stop(ptp_port); 1327 1328 mutex_lock(&ptp_port->ps_lock); 1329 1330 kthread_cancel_delayed_work_sync(&ptp_port->ov_work); 1331 1332 /* temporarily disable Tx timestamps while calibrating PHY offset */ 1333 spin_lock(&ptp_port->tx.lock); 1334 ptp_port->tx.calibrating = true; 1335 spin_unlock(&ptp_port->tx.lock); 1336 ptp_port->tx_fifo_busy_cnt = 0; 1337 1338 /* Start the PHY timer in Vernier mode */ 1339 err = ice_start_phy_timer_e822(hw, port); 1340 if (err) 1341 goto out_unlock; 1342 1343 /* Enable Tx timestamps right away */ 1344 spin_lock(&ptp_port->tx.lock); 1345 ptp_port->tx.calibrating = false; 1346 spin_unlock(&ptp_port->tx.lock); 1347 1348 kthread_queue_delayed_work(pf->ptp.kworker, &ptp_port->ov_work, 0); 1349 1350 out_unlock: 1351 if (err) 1352 dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d up, err %d\n", 1353 port, err); 1354 1355 mutex_unlock(&ptp_port->ps_lock); 1356 1357 return err; 1358 } 1359 1360 /** 1361 * ice_ptp_link_change - Reconfigure PTP after link status change 1362 * @pf: Board private structure 1363 * @port: Port for which the PHY start is set 1364 * @linkup: Link is up or down 1365 */ 1366 void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup) 1367 { 1368 struct ice_ptp_port *ptp_port; 1369 1370 if (!test_bit(ICE_FLAG_PTP, pf->flags)) 1371 return; 1372 1373 if (WARN_ON_ONCE(port >= ICE_NUM_EXTERNAL_PORTS)) 1374 return; 1375 1376 ptp_port = &pf->ptp.port; 1377 if (WARN_ON_ONCE(ptp_port->port_num != port)) 1378 return; 1379 1380 /* Update cached link status for this port immediately */ 1381 ptp_port->link_up = linkup; 1382 1383 /* E810 devices do not need to reconfigure the PHY */ 1384 if (ice_is_e810(&pf->hw)) 1385 return; 1386 1387 ice_ptp_port_phy_restart(ptp_port); 1388 } 1389 1390 /** 1391 * ice_ptp_tx_ena_intr - Enable or disable the Tx timestamp interrupt 1392 * @pf: PF private structure 1393 * @ena: bool value to enable or disable interrupt 1394 * @threshold: Minimum number of packets at which intr is triggered 1395 * 1396 * Utility function to enable or disable Tx timestamp interrupt and threshold 1397 */ 1398 static int ice_ptp_tx_ena_intr(struct ice_pf *pf, bool ena, u32 threshold) 1399 { 1400 struct ice_hw *hw = &pf->hw; 1401 int err = 0; 1402 int quad; 1403 u32 val; 1404 1405 ice_ptp_reset_ts_memory(hw); 1406 1407 for (quad = 0; quad < ICE_MAX_QUAD; quad++) { 1408 err = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG, 1409 &val); 1410 if (err) 1411 break; 1412 1413 if (ena) { 1414 val |= Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M; 1415 val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_THR_M; 1416 val |= ((threshold << Q_REG_TX_MEM_GBL_CFG_INTR_THR_S) & 1417 Q_REG_TX_MEM_GBL_CFG_INTR_THR_M); 1418 } else { 1419 val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M; 1420 } 1421 1422 err = ice_write_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG, 1423 val); 1424 if (err) 1425 break; 1426 } 1427 1428 if (err) 1429 dev_err(ice_pf_to_dev(pf), "PTP failed in intr ena, err %d\n", 1430 err); 1431 return err; 1432 } 1433 1434 /** 1435 * ice_ptp_reset_phy_timestamping - Reset PHY timestamping block 1436 * @pf: Board private structure 1437 */ 1438 static void ice_ptp_reset_phy_timestamping(struct ice_pf *pf) 1439 { 1440 ice_ptp_port_phy_restart(&pf->ptp.port); 1441 } 1442 1443 /** 1444 * ice_ptp_adjfine - Adjust clock increment rate 1445 * @info: the driver's PTP info structure 1446 * @scaled_ppm: Parts per million with 16-bit fractional field 1447 * 1448 * Adjust the frequency of the clock by the indicated scaled ppm from the 1449 * base frequency. 1450 */ 1451 static int ice_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm) 1452 { 1453 struct ice_pf *pf = ptp_info_to_pf(info); 1454 struct ice_hw *hw = &pf->hw; 1455 u64 incval; 1456 int err; 1457 1458 incval = adjust_by_scaled_ppm(ice_base_incval(pf), scaled_ppm); 1459 err = ice_ptp_write_incval_locked(hw, incval); 1460 if (err) { 1461 dev_err(ice_pf_to_dev(pf), "PTP failed to set incval, err %d\n", 1462 err); 1463 return -EIO; 1464 } 1465 1466 return 0; 1467 } 1468 1469 /** 1470 * ice_ptp_extts_event - Process PTP external clock event 1471 * @pf: Board private structure 1472 */ 1473 void ice_ptp_extts_event(struct ice_pf *pf) 1474 { 1475 struct ptp_clock_event event; 1476 struct ice_hw *hw = &pf->hw; 1477 u8 chan, tmr_idx; 1478 u32 hi, lo; 1479 1480 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 1481 /* Event time is captured by one of the two matched registers 1482 * GLTSYN_EVNT_L: 32 LSB of sampled time event 1483 * GLTSYN_EVNT_H: 32 MSB of sampled time event 1484 * Event is defined in GLTSYN_EVNT_0 register 1485 */ 1486 for (chan = 0; chan < GLTSYN_EVNT_H_IDX_MAX; chan++) { 1487 /* Check if channel is enabled */ 1488 if (pf->ptp.ext_ts_irq & (1 << chan)) { 1489 lo = rd32(hw, GLTSYN_EVNT_L(chan, tmr_idx)); 1490 hi = rd32(hw, GLTSYN_EVNT_H(chan, tmr_idx)); 1491 event.timestamp = (((u64)hi) << 32) | lo; 1492 event.type = PTP_CLOCK_EXTTS; 1493 event.index = chan; 1494 1495 /* Fire event */ 1496 ptp_clock_event(pf->ptp.clock, &event); 1497 pf->ptp.ext_ts_irq &= ~(1 << chan); 1498 } 1499 } 1500 } 1501 1502 /** 1503 * ice_ptp_cfg_extts - Configure EXTTS pin and channel 1504 * @pf: Board private structure 1505 * @ena: true to enable; false to disable 1506 * @chan: GPIO channel (0-3) 1507 * @gpio_pin: GPIO pin 1508 * @extts_flags: request flags from the ptp_extts_request.flags 1509 */ 1510 static int 1511 ice_ptp_cfg_extts(struct ice_pf *pf, bool ena, unsigned int chan, u32 gpio_pin, 1512 unsigned int extts_flags) 1513 { 1514 u32 func, aux_reg, gpio_reg, irq_reg; 1515 struct ice_hw *hw = &pf->hw; 1516 u8 tmr_idx; 1517 1518 if (chan > (unsigned int)pf->ptp.info.n_ext_ts) 1519 return -EINVAL; 1520 1521 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 1522 1523 irq_reg = rd32(hw, PFINT_OICR_ENA); 1524 1525 if (ena) { 1526 /* Enable the interrupt */ 1527 irq_reg |= PFINT_OICR_TSYN_EVNT_M; 1528 aux_reg = GLTSYN_AUX_IN_0_INT_ENA_M; 1529 1530 #define GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE BIT(0) 1531 #define GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE BIT(1) 1532 1533 /* set event level to requested edge */ 1534 if (extts_flags & PTP_FALLING_EDGE) 1535 aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE; 1536 if (extts_flags & PTP_RISING_EDGE) 1537 aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE; 1538 1539 /* Write GPIO CTL reg. 1540 * 0x1 is input sampled by EVENT register(channel) 1541 * + num_in_channels * tmr_idx 1542 */ 1543 func = 1 + chan + (tmr_idx * 3); 1544 gpio_reg = ((func << GLGEN_GPIO_CTL_PIN_FUNC_S) & 1545 GLGEN_GPIO_CTL_PIN_FUNC_M); 1546 pf->ptp.ext_ts_chan |= (1 << chan); 1547 } else { 1548 /* clear the values we set to reset defaults */ 1549 aux_reg = 0; 1550 gpio_reg = 0; 1551 pf->ptp.ext_ts_chan &= ~(1 << chan); 1552 if (!pf->ptp.ext_ts_chan) 1553 irq_reg &= ~PFINT_OICR_TSYN_EVNT_M; 1554 } 1555 1556 wr32(hw, PFINT_OICR_ENA, irq_reg); 1557 wr32(hw, GLTSYN_AUX_IN(chan, tmr_idx), aux_reg); 1558 wr32(hw, GLGEN_GPIO_CTL(gpio_pin), gpio_reg); 1559 1560 return 0; 1561 } 1562 1563 /** 1564 * ice_ptp_cfg_clkout - Configure clock to generate periodic wave 1565 * @pf: Board private structure 1566 * @chan: GPIO channel (0-3) 1567 * @config: desired periodic clk configuration. NULL will disable channel 1568 * @store: If set to true the values will be stored 1569 * 1570 * Configure the internal clock generator modules to generate the clock wave of 1571 * specified period. 1572 */ 1573 static int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan, 1574 struct ice_perout_channel *config, bool store) 1575 { 1576 u64 current_time, period, start_time, phase; 1577 struct ice_hw *hw = &pf->hw; 1578 u32 func, val, gpio_pin; 1579 u8 tmr_idx; 1580 1581 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 1582 1583 /* 0. Reset mode & out_en in AUX_OUT */ 1584 wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), 0); 1585 1586 /* If we're disabling the output, clear out CLKO and TGT and keep 1587 * output level low 1588 */ 1589 if (!config || !config->ena) { 1590 wr32(hw, GLTSYN_CLKO(chan, tmr_idx), 0); 1591 wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), 0); 1592 wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), 0); 1593 1594 val = GLGEN_GPIO_CTL_PIN_DIR_M; 1595 gpio_pin = pf->ptp.perout_channels[chan].gpio_pin; 1596 wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val); 1597 1598 /* Store the value if requested */ 1599 if (store) 1600 memset(&pf->ptp.perout_channels[chan], 0, 1601 sizeof(struct ice_perout_channel)); 1602 1603 return 0; 1604 } 1605 period = config->period; 1606 start_time = config->start_time; 1607 div64_u64_rem(start_time, period, &phase); 1608 gpio_pin = config->gpio_pin; 1609 1610 /* 1. Write clkout with half of required period value */ 1611 if (period & 0x1) { 1612 dev_err(ice_pf_to_dev(pf), "CLK Period must be an even value\n"); 1613 goto err; 1614 } 1615 1616 period >>= 1; 1617 1618 /* For proper operation, the GLTSYN_CLKO must be larger than clock tick 1619 */ 1620 #define MIN_PULSE 3 1621 if (period <= MIN_PULSE || period > U32_MAX) { 1622 dev_err(ice_pf_to_dev(pf), "CLK Period must be > %d && < 2^33", 1623 MIN_PULSE * 2); 1624 goto err; 1625 } 1626 1627 wr32(hw, GLTSYN_CLKO(chan, tmr_idx), lower_32_bits(period)); 1628 1629 /* Allow time for programming before start_time is hit */ 1630 current_time = ice_ptp_read_src_clk_reg(pf, NULL); 1631 1632 /* if start time is in the past start the timer at the nearest second 1633 * maintaining phase 1634 */ 1635 if (start_time < current_time) 1636 start_time = div64_u64(current_time + NSEC_PER_SEC - 1, 1637 NSEC_PER_SEC) * NSEC_PER_SEC + phase; 1638 1639 if (ice_is_e810(hw)) 1640 start_time -= E810_OUT_PROP_DELAY_NS; 1641 else 1642 start_time -= ice_e822_pps_delay(ice_e822_time_ref(hw)); 1643 1644 /* 2. Write TARGET time */ 1645 wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), lower_32_bits(start_time)); 1646 wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), upper_32_bits(start_time)); 1647 1648 /* 3. Write AUX_OUT register */ 1649 val = GLTSYN_AUX_OUT_0_OUT_ENA_M | GLTSYN_AUX_OUT_0_OUTMOD_M; 1650 wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), val); 1651 1652 /* 4. write GPIO CTL reg */ 1653 func = 8 + chan + (tmr_idx * 4); 1654 val = GLGEN_GPIO_CTL_PIN_DIR_M | 1655 ((func << GLGEN_GPIO_CTL_PIN_FUNC_S) & GLGEN_GPIO_CTL_PIN_FUNC_M); 1656 wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val); 1657 1658 /* Store the value if requested */ 1659 if (store) { 1660 memcpy(&pf->ptp.perout_channels[chan], config, 1661 sizeof(struct ice_perout_channel)); 1662 pf->ptp.perout_channels[chan].start_time = phase; 1663 } 1664 1665 return 0; 1666 err: 1667 dev_err(ice_pf_to_dev(pf), "PTP failed to cfg per_clk\n"); 1668 return -EFAULT; 1669 } 1670 1671 /** 1672 * ice_ptp_disable_all_clkout - Disable all currently configured outputs 1673 * @pf: pointer to the PF structure 1674 * 1675 * Disable all currently configured clock outputs. This is necessary before 1676 * certain changes to the PTP hardware clock. Use ice_ptp_enable_all_clkout to 1677 * re-enable the clocks again. 1678 */ 1679 static void ice_ptp_disable_all_clkout(struct ice_pf *pf) 1680 { 1681 uint i; 1682 1683 for (i = 0; i < pf->ptp.info.n_per_out; i++) 1684 if (pf->ptp.perout_channels[i].ena) 1685 ice_ptp_cfg_clkout(pf, i, NULL, false); 1686 } 1687 1688 /** 1689 * ice_ptp_enable_all_clkout - Enable all configured periodic clock outputs 1690 * @pf: pointer to the PF structure 1691 * 1692 * Enable all currently configured clock outputs. Use this after 1693 * ice_ptp_disable_all_clkout to reconfigure the output signals according to 1694 * their configuration. 1695 */ 1696 static void ice_ptp_enable_all_clkout(struct ice_pf *pf) 1697 { 1698 uint i; 1699 1700 for (i = 0; i < pf->ptp.info.n_per_out; i++) 1701 if (pf->ptp.perout_channels[i].ena) 1702 ice_ptp_cfg_clkout(pf, i, &pf->ptp.perout_channels[i], 1703 false); 1704 } 1705 1706 /** 1707 * ice_ptp_gpio_enable_e810 - Enable/disable ancillary features of PHC 1708 * @info: the driver's PTP info structure 1709 * @rq: The requested feature to change 1710 * @on: Enable/disable flag 1711 */ 1712 static int 1713 ice_ptp_gpio_enable_e810(struct ptp_clock_info *info, 1714 struct ptp_clock_request *rq, int on) 1715 { 1716 struct ice_pf *pf = ptp_info_to_pf(info); 1717 struct ice_perout_channel clk_cfg = {0}; 1718 bool sma_pres = false; 1719 unsigned int chan; 1720 u32 gpio_pin; 1721 int err; 1722 1723 if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) 1724 sma_pres = true; 1725 1726 switch (rq->type) { 1727 case PTP_CLK_REQ_PEROUT: 1728 chan = rq->perout.index; 1729 if (sma_pres) { 1730 if (chan == ice_pin_desc_e810t[SMA1].chan) 1731 clk_cfg.gpio_pin = GPIO_20; 1732 else if (chan == ice_pin_desc_e810t[SMA2].chan) 1733 clk_cfg.gpio_pin = GPIO_22; 1734 else 1735 return -1; 1736 } else if (ice_is_e810t(&pf->hw)) { 1737 if (chan == 0) 1738 clk_cfg.gpio_pin = GPIO_20; 1739 else 1740 clk_cfg.gpio_pin = GPIO_22; 1741 } else if (chan == PPS_CLK_GEN_CHAN) { 1742 clk_cfg.gpio_pin = PPS_PIN_INDEX; 1743 } else { 1744 clk_cfg.gpio_pin = chan; 1745 } 1746 1747 clk_cfg.period = ((rq->perout.period.sec * NSEC_PER_SEC) + 1748 rq->perout.period.nsec); 1749 clk_cfg.start_time = ((rq->perout.start.sec * NSEC_PER_SEC) + 1750 rq->perout.start.nsec); 1751 clk_cfg.ena = !!on; 1752 1753 err = ice_ptp_cfg_clkout(pf, chan, &clk_cfg, true); 1754 break; 1755 case PTP_CLK_REQ_EXTTS: 1756 chan = rq->extts.index; 1757 if (sma_pres) { 1758 if (chan < ice_pin_desc_e810t[SMA2].chan) 1759 gpio_pin = GPIO_21; 1760 else 1761 gpio_pin = GPIO_23; 1762 } else if (ice_is_e810t(&pf->hw)) { 1763 if (chan == 0) 1764 gpio_pin = GPIO_21; 1765 else 1766 gpio_pin = GPIO_23; 1767 } else { 1768 gpio_pin = chan; 1769 } 1770 1771 err = ice_ptp_cfg_extts(pf, !!on, chan, gpio_pin, 1772 rq->extts.flags); 1773 break; 1774 default: 1775 return -EOPNOTSUPP; 1776 } 1777 1778 return err; 1779 } 1780 1781 /** 1782 * ice_ptp_gpio_enable_e823 - Enable/disable ancillary features of PHC 1783 * @info: the driver's PTP info structure 1784 * @rq: The requested feature to change 1785 * @on: Enable/disable flag 1786 */ 1787 static int ice_ptp_gpio_enable_e823(struct ptp_clock_info *info, 1788 struct ptp_clock_request *rq, int on) 1789 { 1790 struct ice_pf *pf = ptp_info_to_pf(info); 1791 struct ice_perout_channel clk_cfg = {0}; 1792 int err; 1793 1794 switch (rq->type) { 1795 case PTP_CLK_REQ_PPS: 1796 clk_cfg.gpio_pin = PPS_PIN_INDEX; 1797 clk_cfg.period = NSEC_PER_SEC; 1798 clk_cfg.ena = !!on; 1799 1800 err = ice_ptp_cfg_clkout(pf, PPS_CLK_GEN_CHAN, &clk_cfg, true); 1801 break; 1802 case PTP_CLK_REQ_EXTTS: 1803 err = ice_ptp_cfg_extts(pf, !!on, rq->extts.index, 1804 TIME_SYNC_PIN_INDEX, rq->extts.flags); 1805 break; 1806 default: 1807 return -EOPNOTSUPP; 1808 } 1809 1810 return err; 1811 } 1812 1813 /** 1814 * ice_ptp_gettimex64 - Get the time of the clock 1815 * @info: the driver's PTP info structure 1816 * @ts: timespec64 structure to hold the current time value 1817 * @sts: Optional parameter for holding a pair of system timestamps from 1818 * the system clock. Will be ignored if NULL is given. 1819 * 1820 * Read the device clock and return the correct value on ns, after converting it 1821 * into a timespec struct. 1822 */ 1823 static int 1824 ice_ptp_gettimex64(struct ptp_clock_info *info, struct timespec64 *ts, 1825 struct ptp_system_timestamp *sts) 1826 { 1827 struct ice_pf *pf = ptp_info_to_pf(info); 1828 struct ice_hw *hw = &pf->hw; 1829 1830 if (!ice_ptp_lock(hw)) { 1831 dev_err(ice_pf_to_dev(pf), "PTP failed to get time\n"); 1832 return -EBUSY; 1833 } 1834 1835 ice_ptp_read_time(pf, ts, sts); 1836 ice_ptp_unlock(hw); 1837 1838 return 0; 1839 } 1840 1841 /** 1842 * ice_ptp_settime64 - Set the time of the clock 1843 * @info: the driver's PTP info structure 1844 * @ts: timespec64 structure that holds the new time value 1845 * 1846 * Set the device clock to the user input value. The conversion from timespec 1847 * to ns happens in the write function. 1848 */ 1849 static int 1850 ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts) 1851 { 1852 struct ice_pf *pf = ptp_info_to_pf(info); 1853 struct timespec64 ts64 = *ts; 1854 struct ice_hw *hw = &pf->hw; 1855 int err; 1856 1857 /* For Vernier mode, we need to recalibrate after new settime 1858 * Start with disabling timestamp block 1859 */ 1860 if (pf->ptp.port.link_up) 1861 ice_ptp_port_phy_stop(&pf->ptp.port); 1862 1863 if (!ice_ptp_lock(hw)) { 1864 err = -EBUSY; 1865 goto exit; 1866 } 1867 1868 /* Disable periodic outputs */ 1869 ice_ptp_disable_all_clkout(pf); 1870 1871 err = ice_ptp_write_init(pf, &ts64); 1872 ice_ptp_unlock(hw); 1873 1874 if (!err) 1875 ice_ptp_reset_cached_phctime(pf); 1876 1877 /* Reenable periodic outputs */ 1878 ice_ptp_enable_all_clkout(pf); 1879 1880 /* Recalibrate and re-enable timestamp block */ 1881 if (pf->ptp.port.link_up) 1882 ice_ptp_port_phy_restart(&pf->ptp.port); 1883 exit: 1884 if (err) { 1885 dev_err(ice_pf_to_dev(pf), "PTP failed to set time %d\n", err); 1886 return err; 1887 } 1888 1889 return 0; 1890 } 1891 1892 /** 1893 * ice_ptp_adjtime_nonatomic - Do a non-atomic clock adjustment 1894 * @info: the driver's PTP info structure 1895 * @delta: Offset in nanoseconds to adjust the time by 1896 */ 1897 static int ice_ptp_adjtime_nonatomic(struct ptp_clock_info *info, s64 delta) 1898 { 1899 struct timespec64 now, then; 1900 int ret; 1901 1902 then = ns_to_timespec64(delta); 1903 ret = ice_ptp_gettimex64(info, &now, NULL); 1904 if (ret) 1905 return ret; 1906 now = timespec64_add(now, then); 1907 1908 return ice_ptp_settime64(info, (const struct timespec64 *)&now); 1909 } 1910 1911 /** 1912 * ice_ptp_adjtime - Adjust the time of the clock by the indicated delta 1913 * @info: the driver's PTP info structure 1914 * @delta: Offset in nanoseconds to adjust the time by 1915 */ 1916 static int ice_ptp_adjtime(struct ptp_clock_info *info, s64 delta) 1917 { 1918 struct ice_pf *pf = ptp_info_to_pf(info); 1919 struct ice_hw *hw = &pf->hw; 1920 struct device *dev; 1921 int err; 1922 1923 dev = ice_pf_to_dev(pf); 1924 1925 /* Hardware only supports atomic adjustments using signed 32-bit 1926 * integers. For any adjustment outside this range, perform 1927 * a non-atomic get->adjust->set flow. 1928 */ 1929 if (delta > S32_MAX || delta < S32_MIN) { 1930 dev_dbg(dev, "delta = %lld, adjtime non-atomic\n", delta); 1931 return ice_ptp_adjtime_nonatomic(info, delta); 1932 } 1933 1934 if (!ice_ptp_lock(hw)) { 1935 dev_err(dev, "PTP failed to acquire semaphore in adjtime\n"); 1936 return -EBUSY; 1937 } 1938 1939 /* Disable periodic outputs */ 1940 ice_ptp_disable_all_clkout(pf); 1941 1942 err = ice_ptp_write_adj(pf, delta); 1943 1944 /* Reenable periodic outputs */ 1945 ice_ptp_enable_all_clkout(pf); 1946 1947 ice_ptp_unlock(hw); 1948 1949 if (err) { 1950 dev_err(dev, "PTP failed to adjust time, err %d\n", err); 1951 return err; 1952 } 1953 1954 ice_ptp_reset_cached_phctime(pf); 1955 1956 return 0; 1957 } 1958 1959 #ifdef CONFIG_ICE_HWTS 1960 /** 1961 * ice_ptp_get_syncdevicetime - Get the cross time stamp info 1962 * @device: Current device time 1963 * @system: System counter value read synchronously with device time 1964 * @ctx: Context provided by timekeeping code 1965 * 1966 * Read device and system (ART) clock simultaneously and return the corrected 1967 * clock values in ns. 1968 */ 1969 static int 1970 ice_ptp_get_syncdevicetime(ktime_t *device, 1971 struct system_counterval_t *system, 1972 void *ctx) 1973 { 1974 struct ice_pf *pf = (struct ice_pf *)ctx; 1975 struct ice_hw *hw = &pf->hw; 1976 u32 hh_lock, hh_art_ctl; 1977 int i; 1978 1979 /* Get the HW lock */ 1980 hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id)); 1981 if (hh_lock & PFHH_SEM_BUSY_M) { 1982 dev_err(ice_pf_to_dev(pf), "PTP failed to get hh lock\n"); 1983 return -EFAULT; 1984 } 1985 1986 /* Start the ART and device clock sync sequence */ 1987 hh_art_ctl = rd32(hw, GLHH_ART_CTL); 1988 hh_art_ctl = hh_art_ctl | GLHH_ART_CTL_ACTIVE_M; 1989 wr32(hw, GLHH_ART_CTL, hh_art_ctl); 1990 1991 #define MAX_HH_LOCK_TRIES 100 1992 1993 for (i = 0; i < MAX_HH_LOCK_TRIES; i++) { 1994 /* Wait for sync to complete */ 1995 hh_art_ctl = rd32(hw, GLHH_ART_CTL); 1996 if (hh_art_ctl & GLHH_ART_CTL_ACTIVE_M) { 1997 udelay(1); 1998 continue; 1999 } else { 2000 u32 hh_ts_lo, hh_ts_hi, tmr_idx; 2001 u64 hh_ts; 2002 2003 tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc; 2004 /* Read ART time */ 2005 hh_ts_lo = rd32(hw, GLHH_ART_TIME_L); 2006 hh_ts_hi = rd32(hw, GLHH_ART_TIME_H); 2007 hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo; 2008 *system = convert_art_ns_to_tsc(hh_ts); 2009 /* Read Device source clock time */ 2010 hh_ts_lo = rd32(hw, GLTSYN_HHTIME_L(tmr_idx)); 2011 hh_ts_hi = rd32(hw, GLTSYN_HHTIME_H(tmr_idx)); 2012 hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo; 2013 *device = ns_to_ktime(hh_ts); 2014 break; 2015 } 2016 } 2017 /* Release HW lock */ 2018 hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id)); 2019 hh_lock = hh_lock & ~PFHH_SEM_BUSY_M; 2020 wr32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id), hh_lock); 2021 2022 if (i == MAX_HH_LOCK_TRIES) 2023 return -ETIMEDOUT; 2024 2025 return 0; 2026 } 2027 2028 /** 2029 * ice_ptp_getcrosststamp_e822 - Capture a device cross timestamp 2030 * @info: the driver's PTP info structure 2031 * @cts: The memory to fill the cross timestamp info 2032 * 2033 * Capture a cross timestamp between the ART and the device PTP hardware 2034 * clock. Fill the cross timestamp information and report it back to the 2035 * caller. 2036 * 2037 * This is only valid for E822 devices which have support for generating the 2038 * cross timestamp via PCIe PTM. 2039 * 2040 * In order to correctly correlate the ART timestamp back to the TSC time, the 2041 * CPU must have X86_FEATURE_TSC_KNOWN_FREQ. 2042 */ 2043 static int 2044 ice_ptp_getcrosststamp_e822(struct ptp_clock_info *info, 2045 struct system_device_crosststamp *cts) 2046 { 2047 struct ice_pf *pf = ptp_info_to_pf(info); 2048 2049 return get_device_system_crosststamp(ice_ptp_get_syncdevicetime, 2050 pf, NULL, cts); 2051 } 2052 #endif /* CONFIG_ICE_HWTS */ 2053 2054 /** 2055 * ice_ptp_get_ts_config - ioctl interface to read the timestamping config 2056 * @pf: Board private structure 2057 * @ifr: ioctl data 2058 * 2059 * Copy the timestamping config to user buffer 2060 */ 2061 int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr) 2062 { 2063 struct hwtstamp_config *config; 2064 2065 if (!test_bit(ICE_FLAG_PTP, pf->flags)) 2066 return -EIO; 2067 2068 config = &pf->ptp.tstamp_config; 2069 2070 return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? 2071 -EFAULT : 0; 2072 } 2073 2074 /** 2075 * ice_ptp_set_timestamp_mode - Setup driver for requested timestamp mode 2076 * @pf: Board private structure 2077 * @config: hwtstamp settings requested or saved 2078 */ 2079 static int 2080 ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config) 2081 { 2082 switch (config->tx_type) { 2083 case HWTSTAMP_TX_OFF: 2084 ice_set_tx_tstamp(pf, false); 2085 break; 2086 case HWTSTAMP_TX_ON: 2087 ice_set_tx_tstamp(pf, true); 2088 break; 2089 default: 2090 return -ERANGE; 2091 } 2092 2093 switch (config->rx_filter) { 2094 case HWTSTAMP_FILTER_NONE: 2095 ice_set_rx_tstamp(pf, false); 2096 break; 2097 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 2098 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 2099 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 2100 case HWTSTAMP_FILTER_PTP_V2_EVENT: 2101 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 2102 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 2103 case HWTSTAMP_FILTER_PTP_V2_SYNC: 2104 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 2105 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 2106 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 2107 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 2108 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 2109 case HWTSTAMP_FILTER_NTP_ALL: 2110 case HWTSTAMP_FILTER_ALL: 2111 ice_set_rx_tstamp(pf, true); 2112 break; 2113 default: 2114 return -ERANGE; 2115 } 2116 2117 return 0; 2118 } 2119 2120 /** 2121 * ice_ptp_set_ts_config - ioctl interface to control the timestamping 2122 * @pf: Board private structure 2123 * @ifr: ioctl data 2124 * 2125 * Get the user config and store it 2126 */ 2127 int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr) 2128 { 2129 struct hwtstamp_config config; 2130 int err; 2131 2132 if (!test_bit(ICE_FLAG_PTP, pf->flags)) 2133 return -EAGAIN; 2134 2135 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 2136 return -EFAULT; 2137 2138 err = ice_ptp_set_timestamp_mode(pf, &config); 2139 if (err) 2140 return err; 2141 2142 /* Return the actual configuration set */ 2143 config = pf->ptp.tstamp_config; 2144 2145 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? 2146 -EFAULT : 0; 2147 } 2148 2149 /** 2150 * ice_ptp_rx_hwtstamp - Check for an Rx timestamp 2151 * @rx_ring: Ring to get the VSI info 2152 * @rx_desc: Receive descriptor 2153 * @skb: Particular skb to send timestamp with 2154 * 2155 * The driver receives a notification in the receive descriptor with timestamp. 2156 * The timestamp is in ns, so we must convert the result first. 2157 */ 2158 void 2159 ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring, 2160 union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb) 2161 { 2162 struct skb_shared_hwtstamps *hwtstamps; 2163 u64 ts_ns, cached_time; 2164 u32 ts_high; 2165 2166 if (!(rx_desc->wb.time_stamp_low & ICE_PTP_TS_VALID)) 2167 return; 2168 2169 cached_time = READ_ONCE(rx_ring->cached_phctime); 2170 2171 /* Do not report a timestamp if we don't have a cached PHC time */ 2172 if (!cached_time) 2173 return; 2174 2175 /* Use ice_ptp_extend_32b_ts directly, using the ring-specific cached 2176 * PHC value, rather than accessing the PF. This also allows us to 2177 * simply pass the upper 32bits of nanoseconds directly. Calling 2178 * ice_ptp_extend_40b_ts is unnecessary as it would just discard these 2179 * bits itself. 2180 */ 2181 ts_high = le32_to_cpu(rx_desc->wb.flex_ts.ts_high); 2182 ts_ns = ice_ptp_extend_32b_ts(cached_time, ts_high); 2183 2184 hwtstamps = skb_hwtstamps(skb); 2185 memset(hwtstamps, 0, sizeof(*hwtstamps)); 2186 hwtstamps->hwtstamp = ns_to_ktime(ts_ns); 2187 } 2188 2189 /** 2190 * ice_ptp_disable_sma_pins_e810t - Disable E810-T SMA pins 2191 * @pf: pointer to the PF structure 2192 * @info: PTP clock info structure 2193 * 2194 * Disable the OS access to the SMA pins. Called to clear out the OS 2195 * indications of pin support when we fail to setup the E810-T SMA control 2196 * register. 2197 */ 2198 static void 2199 ice_ptp_disable_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info) 2200 { 2201 struct device *dev = ice_pf_to_dev(pf); 2202 2203 dev_warn(dev, "Failed to configure E810-T SMA pin control\n"); 2204 2205 info->enable = NULL; 2206 info->verify = NULL; 2207 info->n_pins = 0; 2208 info->n_ext_ts = 0; 2209 info->n_per_out = 0; 2210 } 2211 2212 /** 2213 * ice_ptp_setup_sma_pins_e810t - Setup the SMA pins 2214 * @pf: pointer to the PF structure 2215 * @info: PTP clock info structure 2216 * 2217 * Finish setting up the SMA pins by allocating pin_config, and setting it up 2218 * according to the current status of the SMA. On failure, disable all of the 2219 * extended SMA pin support. 2220 */ 2221 static void 2222 ice_ptp_setup_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info) 2223 { 2224 struct device *dev = ice_pf_to_dev(pf); 2225 int err; 2226 2227 /* Allocate memory for kernel pins interface */ 2228 info->pin_config = devm_kcalloc(dev, info->n_pins, 2229 sizeof(*info->pin_config), GFP_KERNEL); 2230 if (!info->pin_config) { 2231 ice_ptp_disable_sma_pins_e810t(pf, info); 2232 return; 2233 } 2234 2235 /* Read current SMA status */ 2236 err = ice_get_sma_config_e810t(&pf->hw, info->pin_config); 2237 if (err) 2238 ice_ptp_disable_sma_pins_e810t(pf, info); 2239 } 2240 2241 /** 2242 * ice_ptp_setup_pins_e810 - Setup PTP pins in sysfs 2243 * @pf: pointer to the PF instance 2244 * @info: PTP clock capabilities 2245 */ 2246 static void 2247 ice_ptp_setup_pins_e810(struct ice_pf *pf, struct ptp_clock_info *info) 2248 { 2249 info->n_per_out = N_PER_OUT_E810; 2250 2251 if (ice_is_feature_supported(pf, ICE_F_PTP_EXTTS)) 2252 info->n_ext_ts = N_EXT_TS_E810; 2253 2254 if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) { 2255 info->n_ext_ts = N_EXT_TS_E810; 2256 info->n_pins = NUM_PTP_PINS_E810T; 2257 info->verify = ice_verify_pin_e810t; 2258 2259 /* Complete setup of the SMA pins */ 2260 ice_ptp_setup_sma_pins_e810t(pf, info); 2261 } 2262 } 2263 2264 /** 2265 * ice_ptp_setup_pins_e823 - Setup PTP pins in sysfs 2266 * @pf: pointer to the PF instance 2267 * @info: PTP clock capabilities 2268 */ 2269 static void 2270 ice_ptp_setup_pins_e823(struct ice_pf *pf, struct ptp_clock_info *info) 2271 { 2272 info->pps = 1; 2273 info->n_per_out = 0; 2274 info->n_ext_ts = 1; 2275 } 2276 2277 /** 2278 * ice_ptp_set_funcs_e822 - Set specialized functions for E822 support 2279 * @pf: Board private structure 2280 * @info: PTP info to fill 2281 * 2282 * Assign functions to the PTP capabiltiies structure for E822 devices. 2283 * Functions which operate across all device families should be set directly 2284 * in ice_ptp_set_caps. Only add functions here which are distinct for E822 2285 * devices. 2286 */ 2287 static void 2288 ice_ptp_set_funcs_e822(struct ice_pf *pf, struct ptp_clock_info *info) 2289 { 2290 #ifdef CONFIG_ICE_HWTS 2291 if (boot_cpu_has(X86_FEATURE_ART) && 2292 boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) 2293 info->getcrosststamp = ice_ptp_getcrosststamp_e822; 2294 #endif /* CONFIG_ICE_HWTS */ 2295 } 2296 2297 /** 2298 * ice_ptp_set_funcs_e810 - Set specialized functions for E810 support 2299 * @pf: Board private structure 2300 * @info: PTP info to fill 2301 * 2302 * Assign functions to the PTP capabiltiies structure for E810 devices. 2303 * Functions which operate across all device families should be set directly 2304 * in ice_ptp_set_caps. Only add functions here which are distinct for e810 2305 * devices. 2306 */ 2307 static void 2308 ice_ptp_set_funcs_e810(struct ice_pf *pf, struct ptp_clock_info *info) 2309 { 2310 info->enable = ice_ptp_gpio_enable_e810; 2311 ice_ptp_setup_pins_e810(pf, info); 2312 } 2313 2314 /** 2315 * ice_ptp_set_funcs_e823 - Set specialized functions for E823 support 2316 * @pf: Board private structure 2317 * @info: PTP info to fill 2318 * 2319 * Assign functions to the PTP capabiltiies structure for E823 devices. 2320 * Functions which operate across all device families should be set directly 2321 * in ice_ptp_set_caps. Only add functions here which are distinct for e823 2322 * devices. 2323 */ 2324 static void 2325 ice_ptp_set_funcs_e823(struct ice_pf *pf, struct ptp_clock_info *info) 2326 { 2327 info->enable = ice_ptp_gpio_enable_e823; 2328 ice_ptp_setup_pins_e823(pf, info); 2329 } 2330 2331 /** 2332 * ice_ptp_set_caps - Set PTP capabilities 2333 * @pf: Board private structure 2334 */ 2335 static void ice_ptp_set_caps(struct ice_pf *pf) 2336 { 2337 struct ptp_clock_info *info = &pf->ptp.info; 2338 struct device *dev = ice_pf_to_dev(pf); 2339 2340 snprintf(info->name, sizeof(info->name) - 1, "%s-%s-clk", 2341 dev_driver_string(dev), dev_name(dev)); 2342 info->owner = THIS_MODULE; 2343 info->max_adj = 100000000; 2344 info->adjtime = ice_ptp_adjtime; 2345 info->adjfine = ice_ptp_adjfine; 2346 info->gettimex64 = ice_ptp_gettimex64; 2347 info->settime64 = ice_ptp_settime64; 2348 2349 if (ice_is_e810(&pf->hw)) 2350 ice_ptp_set_funcs_e810(pf, info); 2351 else if (ice_is_e823(&pf->hw)) 2352 ice_ptp_set_funcs_e823(pf, info); 2353 else 2354 ice_ptp_set_funcs_e822(pf, info); 2355 } 2356 2357 /** 2358 * ice_ptp_create_clock - Create PTP clock device for userspace 2359 * @pf: Board private structure 2360 * 2361 * This function creates a new PTP clock device. It only creates one if we 2362 * don't already have one. Will return error if it can't create one, but success 2363 * if we already have a device. Should be used by ice_ptp_init to create clock 2364 * initially, and prevent global resets from creating new clock devices. 2365 */ 2366 static long ice_ptp_create_clock(struct ice_pf *pf) 2367 { 2368 struct ptp_clock_info *info; 2369 struct ptp_clock *clock; 2370 struct device *dev; 2371 2372 /* No need to create a clock device if we already have one */ 2373 if (pf->ptp.clock) 2374 return 0; 2375 2376 ice_ptp_set_caps(pf); 2377 2378 info = &pf->ptp.info; 2379 dev = ice_pf_to_dev(pf); 2380 2381 /* Attempt to register the clock before enabling the hardware. */ 2382 clock = ptp_clock_register(info, dev); 2383 if (IS_ERR(clock)) 2384 return PTR_ERR(clock); 2385 2386 pf->ptp.clock = clock; 2387 2388 return 0; 2389 } 2390 2391 /** 2392 * ice_ptp_request_ts - Request an available Tx timestamp index 2393 * @tx: the PTP Tx timestamp tracker to request from 2394 * @skb: the SKB to associate with this timestamp request 2395 */ 2396 s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb) 2397 { 2398 u8 idx; 2399 2400 spin_lock(&tx->lock); 2401 2402 /* Check that this tracker is accepting new timestamp requests */ 2403 if (!ice_ptp_is_tx_tracker_up(tx)) { 2404 spin_unlock(&tx->lock); 2405 return -1; 2406 } 2407 2408 /* Find and set the first available index */ 2409 idx = find_first_zero_bit(tx->in_use, tx->len); 2410 if (idx < tx->len) { 2411 /* We got a valid index that no other thread could have set. Store 2412 * a reference to the skb and the start time to allow discarding old 2413 * requests. 2414 */ 2415 set_bit(idx, tx->in_use); 2416 clear_bit(idx, tx->stale); 2417 tx->tstamps[idx].start = jiffies; 2418 tx->tstamps[idx].skb = skb_get(skb); 2419 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 2420 ice_trace(tx_tstamp_request, skb, idx); 2421 } 2422 2423 spin_unlock(&tx->lock); 2424 2425 /* return the appropriate PHY timestamp register index, -1 if no 2426 * indexes were available. 2427 */ 2428 if (idx >= tx->len) 2429 return -1; 2430 else 2431 return idx + tx->offset; 2432 } 2433 2434 /** 2435 * ice_ptp_process_ts - Process the PTP Tx timestamps 2436 * @pf: Board private structure 2437 * 2438 * Returns: ICE_TX_TSTAMP_WORK_PENDING if there are any outstanding Tx 2439 * timestamps that need processing, and ICE_TX_TSTAMP_WORK_DONE otherwise. 2440 */ 2441 enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf) 2442 { 2443 return ice_ptp_tx_tstamp(&pf->ptp.port.tx); 2444 } 2445 2446 static void ice_ptp_periodic_work(struct kthread_work *work) 2447 { 2448 struct ice_ptp *ptp = container_of(work, struct ice_ptp, work.work); 2449 struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp); 2450 int err; 2451 2452 if (!test_bit(ICE_FLAG_PTP, pf->flags)) 2453 return; 2454 2455 err = ice_ptp_update_cached_phctime(pf); 2456 2457 /* Run twice a second or reschedule if phc update failed */ 2458 kthread_queue_delayed_work(ptp->kworker, &ptp->work, 2459 msecs_to_jiffies(err ? 10 : 500)); 2460 } 2461 2462 /** 2463 * ice_ptp_reset - Initialize PTP hardware clock support after reset 2464 * @pf: Board private structure 2465 */ 2466 void ice_ptp_reset(struct ice_pf *pf) 2467 { 2468 struct ice_ptp *ptp = &pf->ptp; 2469 struct ice_hw *hw = &pf->hw; 2470 struct timespec64 ts; 2471 int err, itr = 1; 2472 u64 time_diff; 2473 2474 if (test_bit(ICE_PFR_REQ, pf->state)) 2475 goto pfr; 2476 2477 if (!hw->func_caps.ts_func_info.src_tmr_owned) 2478 goto reset_ts; 2479 2480 err = ice_ptp_init_phc(hw); 2481 if (err) 2482 goto err; 2483 2484 /* Acquire the global hardware lock */ 2485 if (!ice_ptp_lock(hw)) { 2486 err = -EBUSY; 2487 goto err; 2488 } 2489 2490 /* Write the increment time value to PHY and LAN */ 2491 err = ice_ptp_write_incval(hw, ice_base_incval(pf)); 2492 if (err) { 2493 ice_ptp_unlock(hw); 2494 goto err; 2495 } 2496 2497 /* Write the initial Time value to PHY and LAN using the cached PHC 2498 * time before the reset and time difference between stopping and 2499 * starting the clock. 2500 */ 2501 if (ptp->cached_phc_time) { 2502 time_diff = ktime_get_real_ns() - ptp->reset_time; 2503 ts = ns_to_timespec64(ptp->cached_phc_time + time_diff); 2504 } else { 2505 ts = ktime_to_timespec64(ktime_get_real()); 2506 } 2507 err = ice_ptp_write_init(pf, &ts); 2508 if (err) { 2509 ice_ptp_unlock(hw); 2510 goto err; 2511 } 2512 2513 /* Release the global hardware lock */ 2514 ice_ptp_unlock(hw); 2515 2516 if (!ice_is_e810(hw)) { 2517 /* Enable quad interrupts */ 2518 err = ice_ptp_tx_ena_intr(pf, true, itr); 2519 if (err) 2520 goto err; 2521 } 2522 2523 reset_ts: 2524 /* Restart the PHY timestamping block */ 2525 ice_ptp_reset_phy_timestamping(pf); 2526 2527 pfr: 2528 /* Init Tx structures */ 2529 if (ice_is_e810(&pf->hw)) { 2530 err = ice_ptp_init_tx_e810(pf, &ptp->port.tx); 2531 } else { 2532 kthread_init_delayed_work(&ptp->port.ov_work, 2533 ice_ptp_wait_for_offsets); 2534 err = ice_ptp_init_tx_e822(pf, &ptp->port.tx, 2535 ptp->port.port_num); 2536 } 2537 if (err) 2538 goto err; 2539 2540 set_bit(ICE_FLAG_PTP, pf->flags); 2541 2542 /* Start periodic work going */ 2543 kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0); 2544 2545 dev_info(ice_pf_to_dev(pf), "PTP reset successful\n"); 2546 return; 2547 2548 err: 2549 dev_err(ice_pf_to_dev(pf), "PTP reset failed %d\n", err); 2550 } 2551 2552 /** 2553 * ice_ptp_prepare_for_reset - Prepare PTP for reset 2554 * @pf: Board private structure 2555 */ 2556 void ice_ptp_prepare_for_reset(struct ice_pf *pf) 2557 { 2558 struct ice_ptp *ptp = &pf->ptp; 2559 u8 src_tmr; 2560 2561 clear_bit(ICE_FLAG_PTP, pf->flags); 2562 2563 /* Disable timestamping for both Tx and Rx */ 2564 ice_ptp_cfg_timestamp(pf, false); 2565 2566 kthread_cancel_delayed_work_sync(&ptp->work); 2567 2568 if (test_bit(ICE_PFR_REQ, pf->state)) 2569 return; 2570 2571 ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx); 2572 2573 /* Disable periodic outputs */ 2574 ice_ptp_disable_all_clkout(pf); 2575 2576 src_tmr = ice_get_ptp_src_clock_index(&pf->hw); 2577 2578 /* Disable source clock */ 2579 wr32(&pf->hw, GLTSYN_ENA(src_tmr), (u32)~GLTSYN_ENA_TSYN_ENA_M); 2580 2581 /* Acquire PHC and system timer to restore after reset */ 2582 ptp->reset_time = ktime_get_real_ns(); 2583 } 2584 2585 /** 2586 * ice_ptp_init_owner - Initialize PTP_1588_CLOCK device 2587 * @pf: Board private structure 2588 * 2589 * Setup and initialize a PTP clock device that represents the device hardware 2590 * clock. Save the clock index for other functions connected to the same 2591 * hardware resource. 2592 */ 2593 static int ice_ptp_init_owner(struct ice_pf *pf) 2594 { 2595 struct ice_hw *hw = &pf->hw; 2596 struct timespec64 ts; 2597 int err, itr = 1; 2598 2599 err = ice_ptp_init_phc(hw); 2600 if (err) { 2601 dev_err(ice_pf_to_dev(pf), "Failed to initialize PHC, err %d\n", 2602 err); 2603 return err; 2604 } 2605 2606 /* Acquire the global hardware lock */ 2607 if (!ice_ptp_lock(hw)) { 2608 err = -EBUSY; 2609 goto err_exit; 2610 } 2611 2612 /* Write the increment time value to PHY and LAN */ 2613 err = ice_ptp_write_incval(hw, ice_base_incval(pf)); 2614 if (err) { 2615 ice_ptp_unlock(hw); 2616 goto err_exit; 2617 } 2618 2619 ts = ktime_to_timespec64(ktime_get_real()); 2620 /* Write the initial Time value to PHY and LAN */ 2621 err = ice_ptp_write_init(pf, &ts); 2622 if (err) { 2623 ice_ptp_unlock(hw); 2624 goto err_exit; 2625 } 2626 2627 /* Release the global hardware lock */ 2628 ice_ptp_unlock(hw); 2629 2630 if (!ice_is_e810(hw)) { 2631 /* Enable quad interrupts */ 2632 err = ice_ptp_tx_ena_intr(pf, true, itr); 2633 if (err) 2634 goto err_exit; 2635 } 2636 2637 /* Ensure we have a clock device */ 2638 err = ice_ptp_create_clock(pf); 2639 if (err) 2640 goto err_clk; 2641 2642 /* Store the PTP clock index for other PFs */ 2643 ice_set_ptp_clock_index(pf); 2644 2645 return 0; 2646 2647 err_clk: 2648 pf->ptp.clock = NULL; 2649 err_exit: 2650 return err; 2651 } 2652 2653 /** 2654 * ice_ptp_init_work - Initialize PTP work threads 2655 * @pf: Board private structure 2656 * @ptp: PF PTP structure 2657 */ 2658 static int ice_ptp_init_work(struct ice_pf *pf, struct ice_ptp *ptp) 2659 { 2660 struct kthread_worker *kworker; 2661 2662 /* Initialize work functions */ 2663 kthread_init_delayed_work(&ptp->work, ice_ptp_periodic_work); 2664 2665 /* Allocate a kworker for handling work required for the ports 2666 * connected to the PTP hardware clock. 2667 */ 2668 kworker = kthread_create_worker(0, "ice-ptp-%s", 2669 dev_name(ice_pf_to_dev(pf))); 2670 if (IS_ERR(kworker)) 2671 return PTR_ERR(kworker); 2672 2673 ptp->kworker = kworker; 2674 2675 /* Start periodic work going */ 2676 kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0); 2677 2678 return 0; 2679 } 2680 2681 /** 2682 * ice_ptp_init_port - Initialize PTP port structure 2683 * @pf: Board private structure 2684 * @ptp_port: PTP port structure 2685 */ 2686 static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port) 2687 { 2688 mutex_init(&ptp_port->ps_lock); 2689 2690 if (ice_is_e810(&pf->hw)) 2691 return ice_ptp_init_tx_e810(pf, &ptp_port->tx); 2692 2693 kthread_init_delayed_work(&ptp_port->ov_work, 2694 ice_ptp_wait_for_offsets); 2695 return ice_ptp_init_tx_e822(pf, &ptp_port->tx, ptp_port->port_num); 2696 } 2697 2698 /** 2699 * ice_ptp_init - Initialize PTP hardware clock support 2700 * @pf: Board private structure 2701 * 2702 * Set up the device for interacting with the PTP hardware clock for all 2703 * functions, both the function that owns the clock hardware, and the 2704 * functions connected to the clock hardware. 2705 * 2706 * The clock owner will allocate and register a ptp_clock with the 2707 * PTP_1588_CLOCK infrastructure. All functions allocate a kthread and work 2708 * items used for asynchronous work such as Tx timestamps and periodic work. 2709 */ 2710 void ice_ptp_init(struct ice_pf *pf) 2711 { 2712 struct ice_ptp *ptp = &pf->ptp; 2713 struct ice_hw *hw = &pf->hw; 2714 int err; 2715 2716 /* If this function owns the clock hardware, it must allocate and 2717 * configure the PTP clock device to represent it. 2718 */ 2719 if (hw->func_caps.ts_func_info.src_tmr_owned) { 2720 err = ice_ptp_init_owner(pf); 2721 if (err) 2722 goto err; 2723 } 2724 2725 ptp->port.port_num = hw->pf_id; 2726 err = ice_ptp_init_port(pf, &ptp->port); 2727 if (err) 2728 goto err; 2729 2730 /* Start the PHY timestamping block */ 2731 ice_ptp_reset_phy_timestamping(pf); 2732 2733 set_bit(ICE_FLAG_PTP, pf->flags); 2734 err = ice_ptp_init_work(pf, ptp); 2735 if (err) 2736 goto err; 2737 2738 dev_info(ice_pf_to_dev(pf), "PTP init successful\n"); 2739 return; 2740 2741 err: 2742 /* If we registered a PTP clock, release it */ 2743 if (pf->ptp.clock) { 2744 ptp_clock_unregister(ptp->clock); 2745 pf->ptp.clock = NULL; 2746 } 2747 clear_bit(ICE_FLAG_PTP, pf->flags); 2748 dev_err(ice_pf_to_dev(pf), "PTP failed %d\n", err); 2749 } 2750 2751 /** 2752 * ice_ptp_release - Disable the driver/HW support and unregister the clock 2753 * @pf: Board private structure 2754 * 2755 * This function handles the cleanup work required from the initialization by 2756 * clearing out the important information and unregistering the clock 2757 */ 2758 void ice_ptp_release(struct ice_pf *pf) 2759 { 2760 if (!test_bit(ICE_FLAG_PTP, pf->flags)) 2761 return; 2762 2763 /* Disable timestamping for both Tx and Rx */ 2764 ice_ptp_cfg_timestamp(pf, false); 2765 2766 ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx); 2767 2768 clear_bit(ICE_FLAG_PTP, pf->flags); 2769 2770 kthread_cancel_delayed_work_sync(&pf->ptp.work); 2771 2772 ice_ptp_port_phy_stop(&pf->ptp.port); 2773 mutex_destroy(&pf->ptp.port.ps_lock); 2774 if (pf->ptp.kworker) { 2775 kthread_destroy_worker(pf->ptp.kworker); 2776 pf->ptp.kworker = NULL; 2777 } 2778 2779 if (!pf->ptp.clock) 2780 return; 2781 2782 /* Disable periodic outputs */ 2783 ice_ptp_disable_all_clkout(pf); 2784 2785 ice_clear_ptp_clock_index(pf); 2786 ptp_clock_unregister(pf->ptp.clock); 2787 pf->ptp.clock = NULL; 2788 2789 dev_info(ice_pf_to_dev(pf), "Removed PTP clock\n"); 2790 } 2791