1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) 2021, Intel Corporation. */ 3 4 #include <linux/delay.h> 5 #include "ice_common.h" 6 #include "ice_ptp_hw.h" 7 #include "ice_ptp_consts.h" 8 #include "ice_cgu_regs.h" 9 10 /* Low level functions for interacting with and managing the device clock used 11 * for the Precision Time Protocol. 12 * 13 * The ice hardware represents the current time using three registers: 14 * 15 * GLTSYN_TIME_H GLTSYN_TIME_L GLTSYN_TIME_R 16 * +---------------+ +---------------+ +---------------+ 17 * | 32 bits | | 32 bits | | 32 bits | 18 * +---------------+ +---------------+ +---------------+ 19 * 20 * The registers are incremented every clock tick using a 40bit increment 21 * value defined over two registers: 22 * 23 * GLTSYN_INCVAL_H GLTSYN_INCVAL_L 24 * +---------------+ +---------------+ 25 * | 8 bit s | | 32 bits | 26 * +---------------+ +---------------+ 27 * 28 * The increment value is added to the GLSTYN_TIME_R and GLSTYN_TIME_L 29 * registers every clock source tick. Depending on the specific device 30 * configuration, the clock source frequency could be one of a number of 31 * values. 32 * 33 * For E810 devices, the increment frequency is 812.5 MHz 34 * 35 * For E822 devices the clock can be derived from different sources, and the 36 * increment has an effective frequency of one of the following: 37 * - 823.4375 MHz 38 * - 783.36 MHz 39 * - 796.875 MHz 40 * - 816 MHz 41 * - 830.078125 MHz 42 * - 783.36 MHz 43 * 44 * The hardware captures timestamps in the PHY for incoming packets, and for 45 * outgoing packets on request. To support this, the PHY maintains a timer 46 * that matches the lower 64 bits of the global source timer. 47 * 48 * In order to ensure that the PHY timers and the source timer are equivalent, 49 * shadow registers are used to prepare the desired initial values. A special 50 * sync command is issued to trigger copying from the shadow registers into 51 * the appropriate source and PHY registers simultaneously. 52 * 53 * The driver supports devices which have different PHYs with subtly different 54 * mechanisms to program and control the timers. We divide the devices into 55 * families named after the first major device, E810 and similar devices, and 56 * E822 and similar devices. 57 * 58 * - E822 based devices have additional support for fine grained Vernier 59 * calibration which requires significant setup 60 * - The layout of timestamp data in the PHY register blocks is different 61 * - The way timer synchronization commands are issued is different. 62 * 63 * To support this, very low level functions have an e810 or e822 suffix 64 * indicating what type of device they work on. Higher level abstractions for 65 * tasks that can be done on both devices do not have the suffix and will 66 * correctly look up the appropriate low level function when running. 67 * 68 * Functions which only make sense on a single device family may not have 69 * a suitable generic implementation 70 */ 71 72 /** 73 * ice_get_ptp_src_clock_index - determine source clock index 74 * @hw: pointer to HW struct 75 * 76 * Determine the source clock index currently in use, based on device 77 * capabilities reported during initialization. 78 */ 79 u8 ice_get_ptp_src_clock_index(struct ice_hw *hw) 80 { 81 return hw->func_caps.ts_func_info.tmr_index_assoc; 82 } 83 84 /** 85 * ice_ptp_read_src_incval - Read source timer increment value 86 * @hw: pointer to HW struct 87 * 88 * Read the increment value of the source timer and return it. 89 */ 90 static u64 ice_ptp_read_src_incval(struct ice_hw *hw) 91 { 92 u32 lo, hi; 93 u8 tmr_idx; 94 95 tmr_idx = ice_get_ptp_src_clock_index(hw); 96 97 lo = rd32(hw, GLTSYN_INCVAL_L(tmr_idx)); 98 hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx)); 99 100 return ((u64)(hi & INCVAL_HIGH_M) << 32) | lo; 101 } 102 103 /** 104 * ice_ptp_src_cmd - Prepare source timer for a timer command 105 * @hw: pointer to HW structure 106 * @cmd: Timer command 107 * 108 * Prepare the source timer for an upcoming timer sync command. 109 */ 110 static void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) 111 { 112 u32 cmd_val; 113 u8 tmr_idx; 114 115 tmr_idx = ice_get_ptp_src_clock_index(hw); 116 cmd_val = tmr_idx << SEL_CPK_SRC; 117 118 switch (cmd) { 119 case INIT_TIME: 120 cmd_val |= GLTSYN_CMD_INIT_TIME; 121 break; 122 case INIT_INCVAL: 123 cmd_val |= GLTSYN_CMD_INIT_INCVAL; 124 break; 125 case ADJ_TIME: 126 cmd_val |= GLTSYN_CMD_ADJ_TIME; 127 break; 128 case ADJ_TIME_AT_TIME: 129 cmd_val |= GLTSYN_CMD_ADJ_INIT_TIME; 130 break; 131 case READ_TIME: 132 cmd_val |= GLTSYN_CMD_READ_TIME; 133 break; 134 } 135 136 wr32(hw, GLTSYN_CMD, cmd_val); 137 } 138 139 /** 140 * ice_ptp_exec_tmr_cmd - Execute all prepared timer commands 141 * @hw: pointer to HW struct 142 * 143 * Write the SYNC_EXEC_CMD bit to the GLTSYN_CMD_SYNC register, and flush the 144 * write immediately. This triggers the hardware to begin executing all of the 145 * source and PHY timer commands synchronously. 146 */ 147 static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw) 148 { 149 wr32(hw, GLTSYN_CMD_SYNC, SYNC_EXEC_CMD); 150 ice_flush(hw); 151 } 152 153 /* E822 family functions 154 * 155 * The following functions operate on the E822 family of devices. 156 */ 157 158 /** 159 * ice_fill_phy_msg_e822 - Fill message data for a PHY register access 160 * @msg: the PHY message buffer to fill in 161 * @port: the port to access 162 * @offset: the register offset 163 */ 164 static void 165 ice_fill_phy_msg_e822(struct ice_sbq_msg_input *msg, u8 port, u16 offset) 166 { 167 int phy_port, phy, quadtype; 168 169 phy_port = port % ICE_PORTS_PER_PHY; 170 phy = port / ICE_PORTS_PER_PHY; 171 quadtype = (port / ICE_PORTS_PER_QUAD) % ICE_NUM_QUAD_TYPE; 172 173 if (quadtype == 0) { 174 msg->msg_addr_low = P_Q0_L(P_0_BASE + offset, phy_port); 175 msg->msg_addr_high = P_Q0_H(P_0_BASE + offset, phy_port); 176 } else { 177 msg->msg_addr_low = P_Q1_L(P_4_BASE + offset, phy_port); 178 msg->msg_addr_high = P_Q1_H(P_4_BASE + offset, phy_port); 179 } 180 181 if (phy == 0) 182 msg->dest_dev = rmn_0; 183 else if (phy == 1) 184 msg->dest_dev = rmn_1; 185 else 186 msg->dest_dev = rmn_2; 187 } 188 189 /** 190 * ice_is_64b_phy_reg_e822 - Check if this is a 64bit PHY register 191 * @low_addr: the low address to check 192 * @high_addr: on return, contains the high address of the 64bit register 193 * 194 * Checks if the provided low address is one of the known 64bit PHY values 195 * represented as two 32bit registers. If it is, return the appropriate high 196 * register offset to use. 197 */ 198 static bool ice_is_64b_phy_reg_e822(u16 low_addr, u16 *high_addr) 199 { 200 switch (low_addr) { 201 case P_REG_PAR_PCS_TX_OFFSET_L: 202 *high_addr = P_REG_PAR_PCS_TX_OFFSET_U; 203 return true; 204 case P_REG_PAR_PCS_RX_OFFSET_L: 205 *high_addr = P_REG_PAR_PCS_RX_OFFSET_U; 206 return true; 207 case P_REG_PAR_TX_TIME_L: 208 *high_addr = P_REG_PAR_TX_TIME_U; 209 return true; 210 case P_REG_PAR_RX_TIME_L: 211 *high_addr = P_REG_PAR_RX_TIME_U; 212 return true; 213 case P_REG_TOTAL_TX_OFFSET_L: 214 *high_addr = P_REG_TOTAL_TX_OFFSET_U; 215 return true; 216 case P_REG_TOTAL_RX_OFFSET_L: 217 *high_addr = P_REG_TOTAL_RX_OFFSET_U; 218 return true; 219 case P_REG_UIX66_10G_40G_L: 220 *high_addr = P_REG_UIX66_10G_40G_U; 221 return true; 222 case P_REG_UIX66_25G_100G_L: 223 *high_addr = P_REG_UIX66_25G_100G_U; 224 return true; 225 case P_REG_TX_CAPTURE_L: 226 *high_addr = P_REG_TX_CAPTURE_U; 227 return true; 228 case P_REG_RX_CAPTURE_L: 229 *high_addr = P_REG_RX_CAPTURE_U; 230 return true; 231 case P_REG_TX_TIMER_INC_PRE_L: 232 *high_addr = P_REG_TX_TIMER_INC_PRE_U; 233 return true; 234 case P_REG_RX_TIMER_INC_PRE_L: 235 *high_addr = P_REG_RX_TIMER_INC_PRE_U; 236 return true; 237 default: 238 return false; 239 } 240 } 241 242 /** 243 * ice_is_40b_phy_reg_e822 - Check if this is a 40bit PHY register 244 * @low_addr: the low address to check 245 * @high_addr: on return, contains the high address of the 40bit value 246 * 247 * Checks if the provided low address is one of the known 40bit PHY values 248 * split into two registers with the lower 8 bits in the low register and the 249 * upper 32 bits in the high register. If it is, return the appropriate high 250 * register offset to use. 251 */ 252 static bool ice_is_40b_phy_reg_e822(u16 low_addr, u16 *high_addr) 253 { 254 switch (low_addr) { 255 case P_REG_TIMETUS_L: 256 *high_addr = P_REG_TIMETUS_U; 257 return true; 258 case P_REG_PAR_RX_TUS_L: 259 *high_addr = P_REG_PAR_RX_TUS_U; 260 return true; 261 case P_REG_PAR_TX_TUS_L: 262 *high_addr = P_REG_PAR_TX_TUS_U; 263 return true; 264 case P_REG_PCS_RX_TUS_L: 265 *high_addr = P_REG_PCS_RX_TUS_U; 266 return true; 267 case P_REG_PCS_TX_TUS_L: 268 *high_addr = P_REG_PCS_TX_TUS_U; 269 return true; 270 case P_REG_DESK_PAR_RX_TUS_L: 271 *high_addr = P_REG_DESK_PAR_RX_TUS_U; 272 return true; 273 case P_REG_DESK_PAR_TX_TUS_L: 274 *high_addr = P_REG_DESK_PAR_TX_TUS_U; 275 return true; 276 case P_REG_DESK_PCS_RX_TUS_L: 277 *high_addr = P_REG_DESK_PCS_RX_TUS_U; 278 return true; 279 case P_REG_DESK_PCS_TX_TUS_L: 280 *high_addr = P_REG_DESK_PCS_TX_TUS_U; 281 return true; 282 default: 283 return false; 284 } 285 } 286 287 /** 288 * ice_read_phy_reg_e822 - Read a PHY register 289 * @hw: pointer to the HW struct 290 * @port: PHY port to read from 291 * @offset: PHY register offset to read 292 * @val: on return, the contents read from the PHY 293 * 294 * Read a PHY register for the given port over the device sideband queue. 295 */ 296 int 297 ice_read_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 *val) 298 { 299 struct ice_sbq_msg_input msg = {0}; 300 int err; 301 302 ice_fill_phy_msg_e822(&msg, port, offset); 303 msg.opcode = ice_sbq_msg_rd; 304 305 err = ice_sbq_rw_reg(hw, &msg); 306 if (err) { 307 ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n", 308 err); 309 return err; 310 } 311 312 *val = msg.data; 313 314 return 0; 315 } 316 317 /** 318 * ice_read_64b_phy_reg_e822 - Read a 64bit value from PHY registers 319 * @hw: pointer to the HW struct 320 * @port: PHY port to read from 321 * @low_addr: offset of the lower register to read from 322 * @val: on return, the contents of the 64bit value from the PHY registers 323 * 324 * Reads the two registers associated with a 64bit value and returns it in the 325 * val pointer. The offset always specifies the lower register offset to use. 326 * The high offset is looked up. This function only operates on registers 327 * known to be two parts of a 64bit value. 328 */ 329 static int 330 ice_read_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 *val) 331 { 332 u32 low, high; 333 u16 high_addr; 334 int err; 335 336 /* Only operate on registers known to be split into two 32bit 337 * registers. 338 */ 339 if (!ice_is_64b_phy_reg_e822(low_addr, &high_addr)) { 340 ice_debug(hw, ICE_DBG_PTP, "Invalid 64b register addr 0x%08x\n", 341 low_addr); 342 return -EINVAL; 343 } 344 345 err = ice_read_phy_reg_e822(hw, port, low_addr, &low); 346 if (err) { 347 ice_debug(hw, ICE_DBG_PTP, "Failed to read from low register 0x%08x\n, err %d", 348 low_addr, err); 349 return err; 350 } 351 352 err = ice_read_phy_reg_e822(hw, port, high_addr, &high); 353 if (err) { 354 ice_debug(hw, ICE_DBG_PTP, "Failed to read from high register 0x%08x\n, err %d", 355 high_addr, err); 356 return err; 357 } 358 359 *val = (u64)high << 32 | low; 360 361 return 0; 362 } 363 364 /** 365 * ice_write_phy_reg_e822 - Write a PHY register 366 * @hw: pointer to the HW struct 367 * @port: PHY port to write to 368 * @offset: PHY register offset to write 369 * @val: The value to write to the register 370 * 371 * Write a PHY register for the given port over the device sideband queue. 372 */ 373 int 374 ice_write_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 val) 375 { 376 struct ice_sbq_msg_input msg = {0}; 377 int err; 378 379 ice_fill_phy_msg_e822(&msg, port, offset); 380 msg.opcode = ice_sbq_msg_wr; 381 msg.data = val; 382 383 err = ice_sbq_rw_reg(hw, &msg); 384 if (err) { 385 ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n", 386 err); 387 return err; 388 } 389 390 return 0; 391 } 392 393 /** 394 * ice_write_40b_phy_reg_e822 - Write a 40b value to the PHY 395 * @hw: pointer to the HW struct 396 * @port: port to write to 397 * @low_addr: offset of the low register 398 * @val: 40b value to write 399 * 400 * Write the provided 40b value to the two associated registers by splitting 401 * it up into two chunks, the lower 8 bits and the upper 32 bits. 402 */ 403 static int 404 ice_write_40b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val) 405 { 406 u32 low, high; 407 u16 high_addr; 408 int err; 409 410 /* Only operate on registers known to be split into a lower 8 bit 411 * register and an upper 32 bit register. 412 */ 413 if (!ice_is_40b_phy_reg_e822(low_addr, &high_addr)) { 414 ice_debug(hw, ICE_DBG_PTP, "Invalid 40b register addr 0x%08x\n", 415 low_addr); 416 return -EINVAL; 417 } 418 419 low = (u32)(val & P_REG_40B_LOW_M); 420 high = (u32)(val >> P_REG_40B_HIGH_S); 421 422 err = ice_write_phy_reg_e822(hw, port, low_addr, low); 423 if (err) { 424 ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, err %d", 425 low_addr, err); 426 return err; 427 } 428 429 err = ice_write_phy_reg_e822(hw, port, high_addr, high); 430 if (err) { 431 ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, err %d", 432 high_addr, err); 433 return err; 434 } 435 436 return 0; 437 } 438 439 /** 440 * ice_write_64b_phy_reg_e822 - Write a 64bit value to PHY registers 441 * @hw: pointer to the HW struct 442 * @port: PHY port to read from 443 * @low_addr: offset of the lower register to read from 444 * @val: the contents of the 64bit value to write to PHY 445 * 446 * Write the 64bit value to the two associated 32bit PHY registers. The offset 447 * is always specified as the lower register, and the high address is looked 448 * up. This function only operates on registers known to be two parts of 449 * a 64bit value. 450 */ 451 static int 452 ice_write_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val) 453 { 454 u32 low, high; 455 u16 high_addr; 456 int err; 457 458 /* Only operate on registers known to be split into two 32bit 459 * registers. 460 */ 461 if (!ice_is_64b_phy_reg_e822(low_addr, &high_addr)) { 462 ice_debug(hw, ICE_DBG_PTP, "Invalid 64b register addr 0x%08x\n", 463 low_addr); 464 return -EINVAL; 465 } 466 467 low = lower_32_bits(val); 468 high = upper_32_bits(val); 469 470 err = ice_write_phy_reg_e822(hw, port, low_addr, low); 471 if (err) { 472 ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, err %d", 473 low_addr, err); 474 return err; 475 } 476 477 err = ice_write_phy_reg_e822(hw, port, high_addr, high); 478 if (err) { 479 ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, err %d", 480 high_addr, err); 481 return err; 482 } 483 484 return 0; 485 } 486 487 /** 488 * ice_fill_quad_msg_e822 - Fill message data for quad register access 489 * @msg: the PHY message buffer to fill in 490 * @quad: the quad to access 491 * @offset: the register offset 492 * 493 * Fill a message buffer for accessing a register in a quad shared between 494 * multiple PHYs. 495 */ 496 static void 497 ice_fill_quad_msg_e822(struct ice_sbq_msg_input *msg, u8 quad, u16 offset) 498 { 499 u32 addr; 500 501 msg->dest_dev = rmn_0; 502 503 if ((quad % ICE_NUM_QUAD_TYPE) == 0) 504 addr = Q_0_BASE + offset; 505 else 506 addr = Q_1_BASE + offset; 507 508 msg->msg_addr_low = lower_16_bits(addr); 509 msg->msg_addr_high = upper_16_bits(addr); 510 } 511 512 /** 513 * ice_read_quad_reg_e822 - Read a PHY quad register 514 * @hw: pointer to the HW struct 515 * @quad: quad to read from 516 * @offset: quad register offset to read 517 * @val: on return, the contents read from the quad 518 * 519 * Read a quad register over the device sideband queue. Quad registers are 520 * shared between multiple PHYs. 521 */ 522 int 523 ice_read_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 *val) 524 { 525 struct ice_sbq_msg_input msg = {0}; 526 int err; 527 528 if (quad >= ICE_MAX_QUAD) 529 return -EINVAL; 530 531 ice_fill_quad_msg_e822(&msg, quad, offset); 532 msg.opcode = ice_sbq_msg_rd; 533 534 err = ice_sbq_rw_reg(hw, &msg); 535 if (err) { 536 ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n", 537 err); 538 return err; 539 } 540 541 *val = msg.data; 542 543 return 0; 544 } 545 546 /** 547 * ice_write_quad_reg_e822 - Write a PHY quad register 548 * @hw: pointer to the HW struct 549 * @quad: quad to write to 550 * @offset: quad register offset to write 551 * @val: The value to write to the register 552 * 553 * Write a quad register over the device sideband queue. Quad registers are 554 * shared between multiple PHYs. 555 */ 556 int 557 ice_write_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 val) 558 { 559 struct ice_sbq_msg_input msg = {0}; 560 int err; 561 562 if (quad >= ICE_MAX_QUAD) 563 return -EINVAL; 564 565 ice_fill_quad_msg_e822(&msg, quad, offset); 566 msg.opcode = ice_sbq_msg_wr; 567 msg.data = val; 568 569 err = ice_sbq_rw_reg(hw, &msg); 570 if (err) { 571 ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n", 572 err); 573 return err; 574 } 575 576 return 0; 577 } 578 579 /** 580 * ice_read_phy_tstamp_e822 - Read a PHY timestamp out of the quad block 581 * @hw: pointer to the HW struct 582 * @quad: the quad to read from 583 * @idx: the timestamp index to read 584 * @tstamp: on return, the 40bit timestamp value 585 * 586 * Read a 40bit timestamp value out of the two associated registers in the 587 * quad memory block that is shared between the internal PHYs of the E822 588 * family of devices. 589 */ 590 static int 591 ice_read_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp) 592 { 593 u16 lo_addr, hi_addr; 594 u32 lo, hi; 595 int err; 596 597 lo_addr = (u16)TS_L(Q_REG_TX_MEMORY_BANK_START, idx); 598 hi_addr = (u16)TS_H(Q_REG_TX_MEMORY_BANK_START, idx); 599 600 err = ice_read_quad_reg_e822(hw, quad, lo_addr, &lo); 601 if (err) { 602 ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, err %d\n", 603 err); 604 return err; 605 } 606 607 err = ice_read_quad_reg_e822(hw, quad, hi_addr, &hi); 608 if (err) { 609 ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, err %d\n", 610 err); 611 return err; 612 } 613 614 /* For E822 based internal PHYs, the timestamp is reported with the 615 * lower 8 bits in the low register, and the upper 32 bits in the high 616 * register. 617 */ 618 *tstamp = ((u64)hi) << TS_PHY_HIGH_S | ((u64)lo & TS_PHY_LOW_M); 619 620 return 0; 621 } 622 623 /** 624 * ice_clear_phy_tstamp_e822 - Clear a timestamp from the quad block 625 * @hw: pointer to the HW struct 626 * @quad: the quad to read from 627 * @idx: the timestamp index to reset 628 * 629 * Clear a timestamp, resetting its valid bit, from the PHY quad block that is 630 * shared between the internal PHYs on the E822 devices. 631 */ 632 static int 633 ice_clear_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx) 634 { 635 u16 lo_addr, hi_addr; 636 int err; 637 638 lo_addr = (u16)TS_L(Q_REG_TX_MEMORY_BANK_START, idx); 639 hi_addr = (u16)TS_H(Q_REG_TX_MEMORY_BANK_START, idx); 640 641 err = ice_write_quad_reg_e822(hw, quad, lo_addr, 0); 642 if (err) { 643 ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register, err %d\n", 644 err); 645 return err; 646 } 647 648 err = ice_write_quad_reg_e822(hw, quad, hi_addr, 0); 649 if (err) { 650 ice_debug(hw, ICE_DBG_PTP, "Failed to clear high PTP timestamp register, err %d\n", 651 err); 652 return err; 653 } 654 655 return 0; 656 } 657 658 /** 659 * ice_ptp_reset_ts_memory_quad_e822 - Clear all timestamps from the quad block 660 * @hw: pointer to the HW struct 661 * @quad: the quad to read from 662 * 663 * Clear all timestamps from the PHY quad block that is shared between the 664 * internal PHYs on the E822 devices. 665 */ 666 void ice_ptp_reset_ts_memory_quad_e822(struct ice_hw *hw, u8 quad) 667 { 668 ice_write_quad_reg_e822(hw, quad, Q_REG_TS_CTRL, Q_REG_TS_CTRL_M); 669 ice_write_quad_reg_e822(hw, quad, Q_REG_TS_CTRL, ~(u32)Q_REG_TS_CTRL_M); 670 } 671 672 /** 673 * ice_ptp_reset_ts_memory_e822 - Clear all timestamps from all quad blocks 674 * @hw: pointer to the HW struct 675 */ 676 static void ice_ptp_reset_ts_memory_e822(struct ice_hw *hw) 677 { 678 unsigned int quad; 679 680 for (quad = 0; quad < ICE_MAX_QUAD; quad++) 681 ice_ptp_reset_ts_memory_quad_e822(hw, quad); 682 } 683 684 /** 685 * ice_read_cgu_reg_e822 - Read a CGU register 686 * @hw: pointer to the HW struct 687 * @addr: Register address to read 688 * @val: storage for register value read 689 * 690 * Read the contents of a register of the Clock Generation Unit. Only 691 * applicable to E822 devices. 692 */ 693 static int 694 ice_read_cgu_reg_e822(struct ice_hw *hw, u32 addr, u32 *val) 695 { 696 struct ice_sbq_msg_input cgu_msg; 697 int err; 698 699 cgu_msg.opcode = ice_sbq_msg_rd; 700 cgu_msg.dest_dev = cgu; 701 cgu_msg.msg_addr_low = addr; 702 cgu_msg.msg_addr_high = 0x0; 703 704 err = ice_sbq_rw_reg(hw, &cgu_msg); 705 if (err) { 706 ice_debug(hw, ICE_DBG_PTP, "Failed to read CGU register 0x%04x, err %d\n", 707 addr, err); 708 return err; 709 } 710 711 *val = cgu_msg.data; 712 713 return err; 714 } 715 716 /** 717 * ice_write_cgu_reg_e822 - Write a CGU register 718 * @hw: pointer to the HW struct 719 * @addr: Register address to write 720 * @val: value to write into the register 721 * 722 * Write the specified value to a register of the Clock Generation Unit. Only 723 * applicable to E822 devices. 724 */ 725 static int 726 ice_write_cgu_reg_e822(struct ice_hw *hw, u32 addr, u32 val) 727 { 728 struct ice_sbq_msg_input cgu_msg; 729 int err; 730 731 cgu_msg.opcode = ice_sbq_msg_wr; 732 cgu_msg.dest_dev = cgu; 733 cgu_msg.msg_addr_low = addr; 734 cgu_msg.msg_addr_high = 0x0; 735 cgu_msg.data = val; 736 737 err = ice_sbq_rw_reg(hw, &cgu_msg); 738 if (err) { 739 ice_debug(hw, ICE_DBG_PTP, "Failed to write CGU register 0x%04x, err %d\n", 740 addr, err); 741 return err; 742 } 743 744 return err; 745 } 746 747 /** 748 * ice_clk_freq_str - Convert time_ref_freq to string 749 * @clk_freq: Clock frequency 750 * 751 * Convert the specified TIME_REF clock frequency to a string. 752 */ 753 static const char *ice_clk_freq_str(u8 clk_freq) 754 { 755 switch ((enum ice_time_ref_freq)clk_freq) { 756 case ICE_TIME_REF_FREQ_25_000: 757 return "25 MHz"; 758 case ICE_TIME_REF_FREQ_122_880: 759 return "122.88 MHz"; 760 case ICE_TIME_REF_FREQ_125_000: 761 return "125 MHz"; 762 case ICE_TIME_REF_FREQ_153_600: 763 return "153.6 MHz"; 764 case ICE_TIME_REF_FREQ_156_250: 765 return "156.25 MHz"; 766 case ICE_TIME_REF_FREQ_245_760: 767 return "245.76 MHz"; 768 default: 769 return "Unknown"; 770 } 771 } 772 773 /** 774 * ice_clk_src_str - Convert time_ref_src to string 775 * @clk_src: Clock source 776 * 777 * Convert the specified clock source to its string name. 778 */ 779 static const char *ice_clk_src_str(u8 clk_src) 780 { 781 switch ((enum ice_clk_src)clk_src) { 782 case ICE_CLK_SRC_TCX0: 783 return "TCX0"; 784 case ICE_CLK_SRC_TIME_REF: 785 return "TIME_REF"; 786 default: 787 return "Unknown"; 788 } 789 } 790 791 /** 792 * ice_cfg_cgu_pll_e822 - Configure the Clock Generation Unit 793 * @hw: pointer to the HW struct 794 * @clk_freq: Clock frequency to program 795 * @clk_src: Clock source to select (TIME_REF, or TCX0) 796 * 797 * Configure the Clock Generation Unit with the desired clock frequency and 798 * time reference, enabling the PLL which drives the PTP hardware clock. 799 */ 800 static int 801 ice_cfg_cgu_pll_e822(struct ice_hw *hw, enum ice_time_ref_freq clk_freq, 802 enum ice_clk_src clk_src) 803 { 804 union tspll_ro_bwm_lf bwm_lf; 805 union nac_cgu_dword19 dw19; 806 union nac_cgu_dword22 dw22; 807 union nac_cgu_dword24 dw24; 808 union nac_cgu_dword9 dw9; 809 int err; 810 811 if (clk_freq >= NUM_ICE_TIME_REF_FREQ) { 812 dev_warn(ice_hw_to_dev(hw), "Invalid TIME_REF frequency %u\n", 813 clk_freq); 814 return -EINVAL; 815 } 816 817 if (clk_src >= NUM_ICE_CLK_SRC) { 818 dev_warn(ice_hw_to_dev(hw), "Invalid clock source %u\n", 819 clk_src); 820 return -EINVAL; 821 } 822 823 if (clk_src == ICE_CLK_SRC_TCX0 && 824 clk_freq != ICE_TIME_REF_FREQ_25_000) { 825 dev_warn(ice_hw_to_dev(hw), 826 "TCX0 only supports 25 MHz frequency\n"); 827 return -EINVAL; 828 } 829 830 err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD9, &dw9.val); 831 if (err) 832 return err; 833 834 err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD24, &dw24.val); 835 if (err) 836 return err; 837 838 err = ice_read_cgu_reg_e822(hw, TSPLL_RO_BWM_LF, &bwm_lf.val); 839 if (err) 840 return err; 841 842 /* Log the current clock configuration */ 843 ice_debug(hw, ICE_DBG_PTP, "Current CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n", 844 dw24.field.ts_pll_enable ? "enabled" : "disabled", 845 ice_clk_src_str(dw24.field.time_ref_sel), 846 ice_clk_freq_str(dw9.field.time_ref_freq_sel), 847 bwm_lf.field.plllock_true_lock_cri ? "locked" : "unlocked"); 848 849 /* Disable the PLL before changing the clock source or frequency */ 850 if (dw24.field.ts_pll_enable) { 851 dw24.field.ts_pll_enable = 0; 852 853 err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val); 854 if (err) 855 return err; 856 } 857 858 /* Set the frequency */ 859 dw9.field.time_ref_freq_sel = clk_freq; 860 err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD9, dw9.val); 861 if (err) 862 return err; 863 864 /* Configure the TS PLL feedback divisor */ 865 err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD19, &dw19.val); 866 if (err) 867 return err; 868 869 dw19.field.tspll_fbdiv_intgr = e822_cgu_params[clk_freq].feedback_div; 870 dw19.field.tspll_ndivratio = 1; 871 872 err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD19, dw19.val); 873 if (err) 874 return err; 875 876 /* Configure the TS PLL post divisor */ 877 err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD22, &dw22.val); 878 if (err) 879 return err; 880 881 dw22.field.time1588clk_div = e822_cgu_params[clk_freq].post_pll_div; 882 dw22.field.time1588clk_sel_div2 = 0; 883 884 err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD22, dw22.val); 885 if (err) 886 return err; 887 888 /* Configure the TS PLL pre divisor and clock source */ 889 err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD24, &dw24.val); 890 if (err) 891 return err; 892 893 dw24.field.ref1588_ck_div = e822_cgu_params[clk_freq].refclk_pre_div; 894 dw24.field.tspll_fbdiv_frac = e822_cgu_params[clk_freq].frac_n_div; 895 dw24.field.time_ref_sel = clk_src; 896 897 err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val); 898 if (err) 899 return err; 900 901 /* Finally, enable the PLL */ 902 dw24.field.ts_pll_enable = 1; 903 904 err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val); 905 if (err) 906 return err; 907 908 /* Wait to verify if the PLL locks */ 909 usleep_range(1000, 5000); 910 911 err = ice_read_cgu_reg_e822(hw, TSPLL_RO_BWM_LF, &bwm_lf.val); 912 if (err) 913 return err; 914 915 if (!bwm_lf.field.plllock_true_lock_cri) { 916 dev_warn(ice_hw_to_dev(hw), "CGU PLL failed to lock\n"); 917 return -EBUSY; 918 } 919 920 /* Log the current clock configuration */ 921 ice_debug(hw, ICE_DBG_PTP, "New CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n", 922 dw24.field.ts_pll_enable ? "enabled" : "disabled", 923 ice_clk_src_str(dw24.field.time_ref_sel), 924 ice_clk_freq_str(dw9.field.time_ref_freq_sel), 925 bwm_lf.field.plllock_true_lock_cri ? "locked" : "unlocked"); 926 927 return 0; 928 } 929 930 /** 931 * ice_init_cgu_e822 - Initialize CGU with settings from firmware 932 * @hw: pointer to the HW structure 933 * 934 * Initialize the Clock Generation Unit of the E822 device. 935 */ 936 static int ice_init_cgu_e822(struct ice_hw *hw) 937 { 938 struct ice_ts_func_info *ts_info = &hw->func_caps.ts_func_info; 939 union tspll_cntr_bist_settings cntr_bist; 940 int err; 941 942 err = ice_read_cgu_reg_e822(hw, TSPLL_CNTR_BIST_SETTINGS, 943 &cntr_bist.val); 944 if (err) 945 return err; 946 947 /* Disable sticky lock detection so lock err reported is accurate */ 948 cntr_bist.field.i_plllock_sel_0 = 0; 949 cntr_bist.field.i_plllock_sel_1 = 0; 950 951 err = ice_write_cgu_reg_e822(hw, TSPLL_CNTR_BIST_SETTINGS, 952 cntr_bist.val); 953 if (err) 954 return err; 955 956 /* Configure the CGU PLL using the parameters from the function 957 * capabilities. 958 */ 959 err = ice_cfg_cgu_pll_e822(hw, ts_info->time_ref, 960 (enum ice_clk_src)ts_info->clk_src); 961 if (err) 962 return err; 963 964 return 0; 965 } 966 967 /** 968 * ice_ptp_set_vernier_wl - Set the window length for vernier calibration 969 * @hw: pointer to the HW struct 970 * 971 * Set the window length used for the vernier port calibration process. 972 */ 973 static int ice_ptp_set_vernier_wl(struct ice_hw *hw) 974 { 975 u8 port; 976 977 for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) { 978 int err; 979 980 err = ice_write_phy_reg_e822(hw, port, P_REG_WL, 981 PTP_VERNIER_WL); 982 if (err) { 983 ice_debug(hw, ICE_DBG_PTP, "Failed to set vernier window length for port %u, err %d\n", 984 port, err); 985 return err; 986 } 987 } 988 989 return 0; 990 } 991 992 /** 993 * ice_ptp_init_phc_e822 - Perform E822 specific PHC initialization 994 * @hw: pointer to HW struct 995 * 996 * Perform PHC initialization steps specific to E822 devices. 997 */ 998 static int ice_ptp_init_phc_e822(struct ice_hw *hw) 999 { 1000 int err; 1001 u32 regval; 1002 1003 /* Enable reading switch and PHY registers over the sideband queue */ 1004 #define PF_SB_REM_DEV_CTL_SWITCH_READ BIT(1) 1005 #define PF_SB_REM_DEV_CTL_PHY0 BIT(2) 1006 regval = rd32(hw, PF_SB_REM_DEV_CTL); 1007 regval |= (PF_SB_REM_DEV_CTL_SWITCH_READ | 1008 PF_SB_REM_DEV_CTL_PHY0); 1009 wr32(hw, PF_SB_REM_DEV_CTL, regval); 1010 1011 /* Initialize the Clock Generation Unit */ 1012 err = ice_init_cgu_e822(hw); 1013 if (err) 1014 return err; 1015 1016 /* Set window length for all the ports */ 1017 return ice_ptp_set_vernier_wl(hw); 1018 } 1019 1020 /** 1021 * ice_ptp_prep_phy_time_e822 - Prepare PHY port with initial time 1022 * @hw: pointer to the HW struct 1023 * @time: Time to initialize the PHY port clocks to 1024 * 1025 * Program the PHY port registers with a new initial time value. The port 1026 * clock will be initialized once the driver issues an INIT_TIME sync 1027 * command. The time value is the upper 32 bits of the PHY timer, usually in 1028 * units of nominal nanoseconds. 1029 */ 1030 static int 1031 ice_ptp_prep_phy_time_e822(struct ice_hw *hw, u32 time) 1032 { 1033 u64 phy_time; 1034 u8 port; 1035 int err; 1036 1037 /* The time represents the upper 32 bits of the PHY timer, so we need 1038 * to shift to account for this when programming. 1039 */ 1040 phy_time = (u64)time << 32; 1041 1042 for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) { 1043 /* Tx case */ 1044 err = ice_write_64b_phy_reg_e822(hw, port, 1045 P_REG_TX_TIMER_INC_PRE_L, 1046 phy_time); 1047 if (err) 1048 goto exit_err; 1049 1050 /* Rx case */ 1051 err = ice_write_64b_phy_reg_e822(hw, port, 1052 P_REG_RX_TIMER_INC_PRE_L, 1053 phy_time); 1054 if (err) 1055 goto exit_err; 1056 } 1057 1058 return 0; 1059 1060 exit_err: 1061 ice_debug(hw, ICE_DBG_PTP, "Failed to write init time for port %u, err %d\n", 1062 port, err); 1063 1064 return err; 1065 } 1066 1067 /** 1068 * ice_ptp_prep_port_adj_e822 - Prepare a single port for time adjust 1069 * @hw: pointer to HW struct 1070 * @port: Port number to be programmed 1071 * @time: time in cycles to adjust the port Tx and Rx clocks 1072 * 1073 * Program the port for an atomic adjustment by writing the Tx and Rx timer 1074 * registers. The atomic adjustment won't be completed until the driver issues 1075 * an ADJ_TIME command. 1076 * 1077 * Note that time is not in units of nanoseconds. It is in clock time 1078 * including the lower sub-nanosecond portion of the port timer. 1079 * 1080 * Negative adjustments are supported using 2s complement arithmetic. 1081 */ 1082 int 1083 ice_ptp_prep_port_adj_e822(struct ice_hw *hw, u8 port, s64 time) 1084 { 1085 u32 l_time, u_time; 1086 int err; 1087 1088 l_time = lower_32_bits(time); 1089 u_time = upper_32_bits(time); 1090 1091 /* Tx case */ 1092 err = ice_write_phy_reg_e822(hw, port, P_REG_TX_TIMER_INC_PRE_L, 1093 l_time); 1094 if (err) 1095 goto exit_err; 1096 1097 err = ice_write_phy_reg_e822(hw, port, P_REG_TX_TIMER_INC_PRE_U, 1098 u_time); 1099 if (err) 1100 goto exit_err; 1101 1102 /* Rx case */ 1103 err = ice_write_phy_reg_e822(hw, port, P_REG_RX_TIMER_INC_PRE_L, 1104 l_time); 1105 if (err) 1106 goto exit_err; 1107 1108 err = ice_write_phy_reg_e822(hw, port, P_REG_RX_TIMER_INC_PRE_U, 1109 u_time); 1110 if (err) 1111 goto exit_err; 1112 1113 return 0; 1114 1115 exit_err: 1116 ice_debug(hw, ICE_DBG_PTP, "Failed to write time adjust for port %u, err %d\n", 1117 port, err); 1118 return err; 1119 } 1120 1121 /** 1122 * ice_ptp_prep_phy_adj_e822 - Prep PHY ports for a time adjustment 1123 * @hw: pointer to HW struct 1124 * @adj: adjustment in nanoseconds 1125 * 1126 * Prepare the PHY ports for an atomic time adjustment by programming the PHY 1127 * Tx and Rx port registers. The actual adjustment is completed by issuing an 1128 * ADJ_TIME or ADJ_TIME_AT_TIME sync command. 1129 */ 1130 static int 1131 ice_ptp_prep_phy_adj_e822(struct ice_hw *hw, s32 adj) 1132 { 1133 s64 cycles; 1134 u8 port; 1135 1136 /* The port clock supports adjustment of the sub-nanosecond portion of 1137 * the clock. We shift the provided adjustment in nanoseconds to 1138 * calculate the appropriate adjustment to program into the PHY ports. 1139 */ 1140 if (adj > 0) 1141 cycles = (s64)adj << 32; 1142 else 1143 cycles = -(((s64)-adj) << 32); 1144 1145 for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) { 1146 int err; 1147 1148 err = ice_ptp_prep_port_adj_e822(hw, port, cycles); 1149 if (err) 1150 return err; 1151 } 1152 1153 return 0; 1154 } 1155 1156 /** 1157 * ice_ptp_prep_phy_incval_e822 - Prepare PHY ports for time adjustment 1158 * @hw: pointer to HW struct 1159 * @incval: new increment value to prepare 1160 * 1161 * Prepare each of the PHY ports for a new increment value by programming the 1162 * port's TIMETUS registers. The new increment value will be updated after 1163 * issuing an INIT_INCVAL command. 1164 */ 1165 static int 1166 ice_ptp_prep_phy_incval_e822(struct ice_hw *hw, u64 incval) 1167 { 1168 int err; 1169 u8 port; 1170 1171 for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) { 1172 err = ice_write_40b_phy_reg_e822(hw, port, P_REG_TIMETUS_L, 1173 incval); 1174 if (err) 1175 goto exit_err; 1176 } 1177 1178 return 0; 1179 1180 exit_err: 1181 ice_debug(hw, ICE_DBG_PTP, "Failed to write incval for port %u, err %d\n", 1182 port, err); 1183 1184 return err; 1185 } 1186 1187 /** 1188 * ice_ptp_read_port_capture - Read a port's local time capture 1189 * @hw: pointer to HW struct 1190 * @port: Port number to read 1191 * @tx_ts: on return, the Tx port time capture 1192 * @rx_ts: on return, the Rx port time capture 1193 * 1194 * Read the port's Tx and Rx local time capture values. 1195 * 1196 * Note this has no equivalent for the E810 devices. 1197 */ 1198 static int 1199 ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts) 1200 { 1201 int err; 1202 1203 /* Tx case */ 1204 err = ice_read_64b_phy_reg_e822(hw, port, P_REG_TX_CAPTURE_L, tx_ts); 1205 if (err) { 1206 ice_debug(hw, ICE_DBG_PTP, "Failed to read REG_TX_CAPTURE, err %d\n", 1207 err); 1208 return err; 1209 } 1210 1211 ice_debug(hw, ICE_DBG_PTP, "tx_init = 0x%016llx\n", 1212 (unsigned long long)*tx_ts); 1213 1214 /* Rx case */ 1215 err = ice_read_64b_phy_reg_e822(hw, port, P_REG_RX_CAPTURE_L, rx_ts); 1216 if (err) { 1217 ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_CAPTURE, err %d\n", 1218 err); 1219 return err; 1220 } 1221 1222 ice_debug(hw, ICE_DBG_PTP, "rx_init = 0x%016llx\n", 1223 (unsigned long long)*rx_ts); 1224 1225 return 0; 1226 } 1227 1228 /** 1229 * ice_ptp_one_port_cmd - Prepare a single PHY port for a timer command 1230 * @hw: pointer to HW struct 1231 * @port: Port to which cmd has to be sent 1232 * @cmd: Command to be sent to the port 1233 * 1234 * Prepare the requested port for an upcoming timer sync command. 1235 * 1236 * Note there is no equivalent of this operation on E810, as that device 1237 * always handles all external PHYs internally. 1238 */ 1239 static int 1240 ice_ptp_one_port_cmd(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd) 1241 { 1242 u32 cmd_val, val; 1243 u8 tmr_idx; 1244 int err; 1245 1246 tmr_idx = ice_get_ptp_src_clock_index(hw); 1247 cmd_val = tmr_idx << SEL_PHY_SRC; 1248 switch (cmd) { 1249 case INIT_TIME: 1250 cmd_val |= PHY_CMD_INIT_TIME; 1251 break; 1252 case INIT_INCVAL: 1253 cmd_val |= PHY_CMD_INIT_INCVAL; 1254 break; 1255 case ADJ_TIME: 1256 cmd_val |= PHY_CMD_ADJ_TIME; 1257 break; 1258 case READ_TIME: 1259 cmd_val |= PHY_CMD_READ_TIME; 1260 break; 1261 case ADJ_TIME_AT_TIME: 1262 cmd_val |= PHY_CMD_ADJ_TIME_AT_TIME; 1263 break; 1264 } 1265 1266 /* Tx case */ 1267 /* Read, modify, write */ 1268 err = ice_read_phy_reg_e822(hw, port, P_REG_TX_TMR_CMD, &val); 1269 if (err) { 1270 ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_TMR_CMD, err %d\n", 1271 err); 1272 return err; 1273 } 1274 1275 /* Modify necessary bits only and perform write */ 1276 val &= ~TS_CMD_MASK; 1277 val |= cmd_val; 1278 1279 err = ice_write_phy_reg_e822(hw, port, P_REG_TX_TMR_CMD, val); 1280 if (err) { 1281 ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_TMR_CMD, err %d\n", 1282 err); 1283 return err; 1284 } 1285 1286 /* Rx case */ 1287 /* Read, modify, write */ 1288 err = ice_read_phy_reg_e822(hw, port, P_REG_RX_TMR_CMD, &val); 1289 if (err) { 1290 ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_TMR_CMD, err %d\n", 1291 err); 1292 return err; 1293 } 1294 1295 /* Modify necessary bits only and perform write */ 1296 val &= ~TS_CMD_MASK; 1297 val |= cmd_val; 1298 1299 err = ice_write_phy_reg_e822(hw, port, P_REG_RX_TMR_CMD, val); 1300 if (err) { 1301 ice_debug(hw, ICE_DBG_PTP, "Failed to write back RX_TMR_CMD, err %d\n", 1302 err); 1303 return err; 1304 } 1305 1306 return 0; 1307 } 1308 1309 /** 1310 * ice_ptp_port_cmd_e822 - Prepare all ports for a timer command 1311 * @hw: pointer to the HW struct 1312 * @cmd: timer command to prepare 1313 * 1314 * Prepare all ports connected to this device for an upcoming timer sync 1315 * command. 1316 */ 1317 static int 1318 ice_ptp_port_cmd_e822(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) 1319 { 1320 u8 port; 1321 1322 for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) { 1323 int err; 1324 1325 err = ice_ptp_one_port_cmd(hw, port, cmd); 1326 if (err) 1327 return err; 1328 } 1329 1330 return 0; 1331 } 1332 1333 /* E822 Vernier calibration functions 1334 * 1335 * The following functions are used as part of the vernier calibration of 1336 * a port. This calibration increases the precision of the timestamps on the 1337 * port. 1338 */ 1339 1340 /** 1341 * ice_phy_get_speed_and_fec_e822 - Get link speed and FEC based on serdes mode 1342 * @hw: pointer to HW struct 1343 * @port: the port to read from 1344 * @link_out: if non-NULL, holds link speed on success 1345 * @fec_out: if non-NULL, holds FEC algorithm on success 1346 * 1347 * Read the serdes data for the PHY port and extract the link speed and FEC 1348 * algorithm. 1349 */ 1350 static int 1351 ice_phy_get_speed_and_fec_e822(struct ice_hw *hw, u8 port, 1352 enum ice_ptp_link_spd *link_out, 1353 enum ice_ptp_fec_mode *fec_out) 1354 { 1355 enum ice_ptp_link_spd link; 1356 enum ice_ptp_fec_mode fec; 1357 u32 serdes; 1358 int err; 1359 1360 err = ice_read_phy_reg_e822(hw, port, P_REG_LINK_SPEED, &serdes); 1361 if (err) { 1362 ice_debug(hw, ICE_DBG_PTP, "Failed to read serdes info\n"); 1363 return err; 1364 } 1365 1366 /* Determine the FEC algorithm */ 1367 fec = (enum ice_ptp_fec_mode)P_REG_LINK_SPEED_FEC_MODE(serdes); 1368 1369 serdes &= P_REG_LINK_SPEED_SERDES_M; 1370 1371 /* Determine the link speed */ 1372 if (fec == ICE_PTP_FEC_MODE_RS_FEC) { 1373 switch (serdes) { 1374 case ICE_PTP_SERDES_25G: 1375 link = ICE_PTP_LNK_SPD_25G_RS; 1376 break; 1377 case ICE_PTP_SERDES_50G: 1378 link = ICE_PTP_LNK_SPD_50G_RS; 1379 break; 1380 case ICE_PTP_SERDES_100G: 1381 link = ICE_PTP_LNK_SPD_100G_RS; 1382 break; 1383 default: 1384 return -EIO; 1385 } 1386 } else { 1387 switch (serdes) { 1388 case ICE_PTP_SERDES_1G: 1389 link = ICE_PTP_LNK_SPD_1G; 1390 break; 1391 case ICE_PTP_SERDES_10G: 1392 link = ICE_PTP_LNK_SPD_10G; 1393 break; 1394 case ICE_PTP_SERDES_25G: 1395 link = ICE_PTP_LNK_SPD_25G; 1396 break; 1397 case ICE_PTP_SERDES_40G: 1398 link = ICE_PTP_LNK_SPD_40G; 1399 break; 1400 case ICE_PTP_SERDES_50G: 1401 link = ICE_PTP_LNK_SPD_50G; 1402 break; 1403 default: 1404 return -EIO; 1405 } 1406 } 1407 1408 if (link_out) 1409 *link_out = link; 1410 if (fec_out) 1411 *fec_out = fec; 1412 1413 return 0; 1414 } 1415 1416 /** 1417 * ice_phy_cfg_lane_e822 - Configure PHY quad for single/multi-lane timestamp 1418 * @hw: pointer to HW struct 1419 * @port: to configure the quad for 1420 */ 1421 static void ice_phy_cfg_lane_e822(struct ice_hw *hw, u8 port) 1422 { 1423 enum ice_ptp_link_spd link_spd; 1424 int err; 1425 u32 val; 1426 u8 quad; 1427 1428 err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, NULL); 1429 if (err) { 1430 ice_debug(hw, ICE_DBG_PTP, "Failed to get PHY link speed, err %d\n", 1431 err); 1432 return; 1433 } 1434 1435 quad = port / ICE_PORTS_PER_QUAD; 1436 1437 err = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG, &val); 1438 if (err) { 1439 ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEM_GLB_CFG, err %d\n", 1440 err); 1441 return; 1442 } 1443 1444 if (link_spd >= ICE_PTP_LNK_SPD_40G) 1445 val &= ~Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_M; 1446 else 1447 val |= Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_M; 1448 1449 err = ice_write_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG, val); 1450 if (err) { 1451 ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_MEM_GBL_CFG, err %d\n", 1452 err); 1453 return; 1454 } 1455 } 1456 1457 /** 1458 * ice_phy_cfg_uix_e822 - Configure Serdes UI to TU conversion for E822 1459 * @hw: pointer to the HW structure 1460 * @port: the port to configure 1461 * 1462 * Program the conversion ration of Serdes clock "unit intervals" (UIs) to PHC 1463 * hardware clock time units (TUs). That is, determine the number of TUs per 1464 * serdes unit interval, and program the UIX registers with this conversion. 1465 * 1466 * This conversion is used as part of the calibration process when determining 1467 * the additional error of a timestamp vs the real time of transmission or 1468 * receipt of the packet. 1469 * 1470 * Hardware uses the number of TUs per 66 UIs, written to the UIX registers 1471 * for the two main serdes clock rates, 10G/40G and 25G/100G serdes clocks. 1472 * 1473 * To calculate the conversion ratio, we use the following facts: 1474 * 1475 * a) the clock frequency in Hz (cycles per second) 1476 * b) the number of TUs per cycle (the increment value of the clock) 1477 * c) 1 second per 1 billion nanoseconds 1478 * d) the duration of 66 UIs in nanoseconds 1479 * 1480 * Given these facts, we can use the following table to work out what ratios 1481 * to multiply in order to get the number of TUs per 66 UIs: 1482 * 1483 * cycles | 1 second | incval (TUs) | nanoseconds 1484 * -------+--------------+--------------+------------- 1485 * second | 1 billion ns | cycle | 66 UIs 1486 * 1487 * To perform the multiplication using integers without too much loss of 1488 * precision, we can take use the following equation: 1489 * 1490 * (freq * incval * 6600 LINE_UI ) / ( 100 * 1 billion) 1491 * 1492 * We scale up to using 6600 UI instead of 66 in order to avoid fractional 1493 * nanosecond UIs (66 UI at 10G/40G is 6.4 ns) 1494 * 1495 * The increment value has a maximum expected range of about 34 bits, while 1496 * the frequency value is about 29 bits. Multiplying these values shouldn't 1497 * overflow the 64 bits. However, we must then further multiply them again by 1498 * the Serdes unit interval duration. To avoid overflow here, we split the 1499 * overall divide by 1e11 into a divide by 256 (shift down by 8 bits) and 1500 * a divide by 390,625,000. This does lose some precision, but avoids 1501 * miscalculation due to arithmetic overflow. 1502 */ 1503 static int ice_phy_cfg_uix_e822(struct ice_hw *hw, u8 port) 1504 { 1505 u64 cur_freq, clk_incval, tu_per_sec, uix; 1506 int err; 1507 1508 cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw)); 1509 clk_incval = ice_ptp_read_src_incval(hw); 1510 1511 /* Calculate TUs per second divided by 256 */ 1512 tu_per_sec = (cur_freq * clk_incval) >> 8; 1513 1514 #define LINE_UI_10G_40G 640 /* 6600 UIs is 640 nanoseconds at 10Gb/40Gb */ 1515 #define LINE_UI_25G_100G 256 /* 6600 UIs is 256 nanoseconds at 25Gb/100Gb */ 1516 1517 /* Program the 10Gb/40Gb conversion ratio */ 1518 uix = div_u64(tu_per_sec * LINE_UI_10G_40G, 390625000); 1519 1520 err = ice_write_64b_phy_reg_e822(hw, port, P_REG_UIX66_10G_40G_L, 1521 uix); 1522 if (err) { 1523 ice_debug(hw, ICE_DBG_PTP, "Failed to write UIX66_10G_40G, err %d\n", 1524 err); 1525 return err; 1526 } 1527 1528 /* Program the 25Gb/100Gb conversion ratio */ 1529 uix = div_u64(tu_per_sec * LINE_UI_25G_100G, 390625000); 1530 1531 err = ice_write_64b_phy_reg_e822(hw, port, P_REG_UIX66_25G_100G_L, 1532 uix); 1533 if (err) { 1534 ice_debug(hw, ICE_DBG_PTP, "Failed to write UIX66_25G_100G, err %d\n", 1535 err); 1536 return err; 1537 } 1538 1539 return 0; 1540 } 1541 1542 /** 1543 * ice_phy_cfg_parpcs_e822 - Configure TUs per PAR/PCS clock cycle 1544 * @hw: pointer to the HW struct 1545 * @port: port to configure 1546 * 1547 * Configure the number of TUs for the PAR and PCS clocks used as part of the 1548 * timestamp calibration process. This depends on the link speed, as the PHY 1549 * uses different markers depending on the speed. 1550 * 1551 * 1Gb/10Gb/25Gb: 1552 * - Tx/Rx PAR/PCS markers 1553 * 1554 * 25Gb RS: 1555 * - Tx/Rx Reed Solomon gearbox PAR/PCS markers 1556 * 1557 * 40Gb/50Gb: 1558 * - Tx/Rx PAR/PCS markers 1559 * - Rx Deskew PAR/PCS markers 1560 * 1561 * 50G RS and 100GB RS: 1562 * - Tx/Rx Reed Solomon gearbox PAR/PCS markers 1563 * - Rx Deskew PAR/PCS markers 1564 * - Tx PAR/PCS markers 1565 * 1566 * To calculate the conversion, we use the PHC clock frequency (cycles per 1567 * second), the increment value (TUs per cycle), and the related PHY clock 1568 * frequency to calculate the TUs per unit of the PHY link clock. The 1569 * following table shows how the units convert: 1570 * 1571 * cycles | TUs | second 1572 * -------+-------+-------- 1573 * second | cycle | cycles 1574 * 1575 * For each conversion register, look up the appropriate frequency from the 1576 * e822 PAR/PCS table and calculate the TUs per unit of that clock. Program 1577 * this to the appropriate register, preparing hardware to perform timestamp 1578 * calibration to calculate the total Tx or Rx offset to adjust the timestamp 1579 * in order to calibrate for the internal PHY delays. 1580 * 1581 * Note that the increment value ranges up to ~34 bits, and the clock 1582 * frequency is ~29 bits, so multiplying them together should fit within the 1583 * 64 bit arithmetic. 1584 */ 1585 static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port) 1586 { 1587 u64 cur_freq, clk_incval, tu_per_sec, phy_tus; 1588 enum ice_ptp_link_spd link_spd; 1589 enum ice_ptp_fec_mode fec_mode; 1590 int err; 1591 1592 err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode); 1593 if (err) 1594 return err; 1595 1596 cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw)); 1597 clk_incval = ice_ptp_read_src_incval(hw); 1598 1599 /* Calculate TUs per cycle of the PHC clock */ 1600 tu_per_sec = cur_freq * clk_incval; 1601 1602 /* For each PHY conversion register, look up the appropriate link 1603 * speed frequency and determine the TUs per that clock's cycle time. 1604 * Split this into a high and low value and then program the 1605 * appropriate register. If that link speed does not use the 1606 * associated register, write zeros to clear it instead. 1607 */ 1608 1609 /* P_REG_PAR_TX_TUS */ 1610 if (e822_vernier[link_spd].tx_par_clk) 1611 phy_tus = div_u64(tu_per_sec, 1612 e822_vernier[link_spd].tx_par_clk); 1613 else 1614 phy_tus = 0; 1615 1616 err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PAR_TX_TUS_L, 1617 phy_tus); 1618 if (err) 1619 return err; 1620 1621 /* P_REG_PAR_RX_TUS */ 1622 if (e822_vernier[link_spd].rx_par_clk) 1623 phy_tus = div_u64(tu_per_sec, 1624 e822_vernier[link_spd].rx_par_clk); 1625 else 1626 phy_tus = 0; 1627 1628 err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PAR_RX_TUS_L, 1629 phy_tus); 1630 if (err) 1631 return err; 1632 1633 /* P_REG_PCS_TX_TUS */ 1634 if (e822_vernier[link_spd].tx_pcs_clk) 1635 phy_tus = div_u64(tu_per_sec, 1636 e822_vernier[link_spd].tx_pcs_clk); 1637 else 1638 phy_tus = 0; 1639 1640 err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PCS_TX_TUS_L, 1641 phy_tus); 1642 if (err) 1643 return err; 1644 1645 /* P_REG_PCS_RX_TUS */ 1646 if (e822_vernier[link_spd].rx_pcs_clk) 1647 phy_tus = div_u64(tu_per_sec, 1648 e822_vernier[link_spd].rx_pcs_clk); 1649 else 1650 phy_tus = 0; 1651 1652 err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PCS_RX_TUS_L, 1653 phy_tus); 1654 if (err) 1655 return err; 1656 1657 /* P_REG_DESK_PAR_TX_TUS */ 1658 if (e822_vernier[link_spd].tx_desk_rsgb_par) 1659 phy_tus = div_u64(tu_per_sec, 1660 e822_vernier[link_spd].tx_desk_rsgb_par); 1661 else 1662 phy_tus = 0; 1663 1664 err = ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PAR_TX_TUS_L, 1665 phy_tus); 1666 if (err) 1667 return err; 1668 1669 /* P_REG_DESK_PAR_RX_TUS */ 1670 if (e822_vernier[link_spd].rx_desk_rsgb_par) 1671 phy_tus = div_u64(tu_per_sec, 1672 e822_vernier[link_spd].rx_desk_rsgb_par); 1673 else 1674 phy_tus = 0; 1675 1676 err = ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PAR_RX_TUS_L, 1677 phy_tus); 1678 if (err) 1679 return err; 1680 1681 /* P_REG_DESK_PCS_TX_TUS */ 1682 if (e822_vernier[link_spd].tx_desk_rsgb_pcs) 1683 phy_tus = div_u64(tu_per_sec, 1684 e822_vernier[link_spd].tx_desk_rsgb_pcs); 1685 else 1686 phy_tus = 0; 1687 1688 err = ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PCS_TX_TUS_L, 1689 phy_tus); 1690 if (err) 1691 return err; 1692 1693 /* P_REG_DESK_PCS_RX_TUS */ 1694 if (e822_vernier[link_spd].rx_desk_rsgb_pcs) 1695 phy_tus = div_u64(tu_per_sec, 1696 e822_vernier[link_spd].rx_desk_rsgb_pcs); 1697 else 1698 phy_tus = 0; 1699 1700 return ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PCS_RX_TUS_L, 1701 phy_tus); 1702 } 1703 1704 /** 1705 * ice_calc_fixed_tx_offset_e822 - Calculated Fixed Tx offset for a port 1706 * @hw: pointer to the HW struct 1707 * @link_spd: the Link speed to calculate for 1708 * 1709 * Calculate the fixed offset due to known static latency data. 1710 */ 1711 static u64 1712 ice_calc_fixed_tx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd) 1713 { 1714 u64 cur_freq, clk_incval, tu_per_sec, fixed_offset; 1715 1716 cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw)); 1717 clk_incval = ice_ptp_read_src_incval(hw); 1718 1719 /* Calculate TUs per second */ 1720 tu_per_sec = cur_freq * clk_incval; 1721 1722 /* Calculate number of TUs to add for the fixed Tx latency. Since the 1723 * latency measurement is in 1/100th of a nanosecond, we need to 1724 * multiply by tu_per_sec and then divide by 1e11. This calculation 1725 * overflows 64 bit integer arithmetic, so break it up into two 1726 * divisions by 1e4 first then by 1e7. 1727 */ 1728 fixed_offset = div_u64(tu_per_sec, 10000); 1729 fixed_offset *= e822_vernier[link_spd].tx_fixed_delay; 1730 fixed_offset = div_u64(fixed_offset, 10000000); 1731 1732 return fixed_offset; 1733 } 1734 1735 /** 1736 * ice_phy_cfg_tx_offset_e822 - Configure total Tx timestamp offset 1737 * @hw: pointer to the HW struct 1738 * @port: the PHY port to configure 1739 * 1740 * Program the P_REG_TOTAL_TX_OFFSET register with the total number of TUs to 1741 * adjust Tx timestamps by. This is calculated by combining some known static 1742 * latency along with the Vernier offset computations done by hardware. 1743 * 1744 * This function must be called only after the offset registers are valid, 1745 * i.e. after the Vernier calibration wait has passed, to ensure that the PHY 1746 * has measured the offset. 1747 * 1748 * To avoid overflow, when calculating the offset based on the known static 1749 * latency values, we use measurements in 1/100th of a nanosecond, and divide 1750 * the TUs per second up front. This avoids overflow while allowing 1751 * calculation of the adjustment using integer arithmetic. 1752 */ 1753 static int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port) 1754 { 1755 enum ice_ptp_link_spd link_spd; 1756 enum ice_ptp_fec_mode fec_mode; 1757 u64 total_offset, val; 1758 int err; 1759 1760 err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode); 1761 if (err) 1762 return err; 1763 1764 total_offset = ice_calc_fixed_tx_offset_e822(hw, link_spd); 1765 1766 /* Read the first Vernier offset from the PHY register and add it to 1767 * the total offset. 1768 */ 1769 if (link_spd == ICE_PTP_LNK_SPD_1G || 1770 link_spd == ICE_PTP_LNK_SPD_10G || 1771 link_spd == ICE_PTP_LNK_SPD_25G || 1772 link_spd == ICE_PTP_LNK_SPD_25G_RS || 1773 link_spd == ICE_PTP_LNK_SPD_40G || 1774 link_spd == ICE_PTP_LNK_SPD_50G) { 1775 err = ice_read_64b_phy_reg_e822(hw, port, 1776 P_REG_PAR_PCS_TX_OFFSET_L, 1777 &val); 1778 if (err) 1779 return err; 1780 1781 total_offset += val; 1782 } 1783 1784 /* For Tx, we only need to use the second Vernier offset for 1785 * multi-lane link speeds with RS-FEC. The lanes will always be 1786 * aligned. 1787 */ 1788 if (link_spd == ICE_PTP_LNK_SPD_50G_RS || 1789 link_spd == ICE_PTP_LNK_SPD_100G_RS) { 1790 err = ice_read_64b_phy_reg_e822(hw, port, 1791 P_REG_PAR_TX_TIME_L, 1792 &val); 1793 if (err) 1794 return err; 1795 1796 total_offset += val; 1797 } 1798 1799 /* Now that the total offset has been calculated, program it to the 1800 * PHY and indicate that the Tx offset is ready. After this, 1801 * timestamps will be enabled. 1802 */ 1803 err = ice_write_64b_phy_reg_e822(hw, port, P_REG_TOTAL_TX_OFFSET_L, 1804 total_offset); 1805 if (err) 1806 return err; 1807 1808 err = ice_write_phy_reg_e822(hw, port, P_REG_TX_OR, 1); 1809 if (err) 1810 return err; 1811 1812 return 0; 1813 } 1814 1815 /** 1816 * ice_phy_calc_pmd_adj_e822 - Calculate PMD adjustment for Rx 1817 * @hw: pointer to the HW struct 1818 * @port: the PHY port to adjust for 1819 * @link_spd: the current link speed of the PHY 1820 * @fec_mode: the current FEC mode of the PHY 1821 * @pmd_adj: on return, the amount to adjust the Rx total offset by 1822 * 1823 * Calculates the adjustment to Rx timestamps due to PMD alignment in the PHY. 1824 * This varies by link speed and FEC mode. The value calculated accounts for 1825 * various delays caused when receiving a packet. 1826 */ 1827 static int 1828 ice_phy_calc_pmd_adj_e822(struct ice_hw *hw, u8 port, 1829 enum ice_ptp_link_spd link_spd, 1830 enum ice_ptp_fec_mode fec_mode, u64 *pmd_adj) 1831 { 1832 u64 cur_freq, clk_incval, tu_per_sec, mult, adj; 1833 u8 pmd_align; 1834 u32 val; 1835 int err; 1836 1837 err = ice_read_phy_reg_e822(hw, port, P_REG_PMD_ALIGNMENT, &val); 1838 if (err) { 1839 ice_debug(hw, ICE_DBG_PTP, "Failed to read PMD alignment, err %d\n", 1840 err); 1841 return err; 1842 } 1843 1844 pmd_align = (u8)val; 1845 1846 cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw)); 1847 clk_incval = ice_ptp_read_src_incval(hw); 1848 1849 /* Calculate TUs per second */ 1850 tu_per_sec = cur_freq * clk_incval; 1851 1852 /* The PMD alignment adjustment measurement depends on the link speed, 1853 * and whether FEC is enabled. For each link speed, the alignment 1854 * adjustment is calculated by dividing a value by the length of 1855 * a Time Unit in nanoseconds. 1856 * 1857 * 1G: align == 4 ? 10 * 0.8 : (align + 6 % 10) * 0.8 1858 * 10G: align == 65 ? 0 : (align * 0.1 * 32/33) 1859 * 10G w/FEC: align * 0.1 * 32/33 1860 * 25G: align == 65 ? 0 : (align * 0.4 * 32/33) 1861 * 25G w/FEC: align * 0.4 * 32/33 1862 * 40G: align == 65 ? 0 : (align * 0.1 * 32/33) 1863 * 40G w/FEC: align * 0.1 * 32/33 1864 * 50G: align == 65 ? 0 : (align * 0.4 * 32/33) 1865 * 50G w/FEC: align * 0.8 * 32/33 1866 * 1867 * For RS-FEC, if align is < 17 then we must also add 1.6 * 32/33. 1868 * 1869 * To allow for calculating this value using integer arithmetic, we 1870 * instead start with the number of TUs per second, (inverse of the 1871 * length of a Time Unit in nanoseconds), multiply by a value based 1872 * on the PMD alignment register, and then divide by the right value 1873 * calculated based on the table above. To avoid integer overflow this 1874 * division is broken up into a step of dividing by 125 first. 1875 */ 1876 if (link_spd == ICE_PTP_LNK_SPD_1G) { 1877 if (pmd_align == 4) 1878 mult = 10; 1879 else 1880 mult = (pmd_align + 6) % 10; 1881 } else if (link_spd == ICE_PTP_LNK_SPD_10G || 1882 link_spd == ICE_PTP_LNK_SPD_25G || 1883 link_spd == ICE_PTP_LNK_SPD_40G || 1884 link_spd == ICE_PTP_LNK_SPD_50G) { 1885 /* If Clause 74 FEC, always calculate PMD adjust */ 1886 if (pmd_align != 65 || fec_mode == ICE_PTP_FEC_MODE_CLAUSE74) 1887 mult = pmd_align; 1888 else 1889 mult = 0; 1890 } else if (link_spd == ICE_PTP_LNK_SPD_25G_RS || 1891 link_spd == ICE_PTP_LNK_SPD_50G_RS || 1892 link_spd == ICE_PTP_LNK_SPD_100G_RS) { 1893 if (pmd_align < 17) 1894 mult = pmd_align + 40; 1895 else 1896 mult = pmd_align; 1897 } else { 1898 ice_debug(hw, ICE_DBG_PTP, "Unknown link speed %d, skipping PMD adjustment\n", 1899 link_spd); 1900 mult = 0; 1901 } 1902 1903 /* In some cases, there's no need to adjust for the PMD alignment */ 1904 if (!mult) { 1905 *pmd_adj = 0; 1906 return 0; 1907 } 1908 1909 /* Calculate the adjustment by multiplying TUs per second by the 1910 * appropriate multiplier and divisor. To avoid overflow, we first 1911 * divide by 125, and then handle remaining divisor based on the link 1912 * speed pmd_adj_divisor value. 1913 */ 1914 adj = div_u64(tu_per_sec, 125); 1915 adj *= mult; 1916 adj = div_u64(adj, e822_vernier[link_spd].pmd_adj_divisor); 1917 1918 /* Finally, for 25G-RS and 50G-RS, a further adjustment for the Rx 1919 * cycle count is necessary. 1920 */ 1921 if (link_spd == ICE_PTP_LNK_SPD_25G_RS) { 1922 u64 cycle_adj; 1923 u8 rx_cycle; 1924 1925 err = ice_read_phy_reg_e822(hw, port, P_REG_RX_40_TO_160_CNT, 1926 &val); 1927 if (err) { 1928 ice_debug(hw, ICE_DBG_PTP, "Failed to read 25G-RS Rx cycle count, err %d\n", 1929 err); 1930 return err; 1931 } 1932 1933 rx_cycle = val & P_REG_RX_40_TO_160_CNT_RXCYC_M; 1934 if (rx_cycle) { 1935 mult = (4 - rx_cycle) * 40; 1936 1937 cycle_adj = div_u64(tu_per_sec, 125); 1938 cycle_adj *= mult; 1939 cycle_adj = div_u64(cycle_adj, e822_vernier[link_spd].pmd_adj_divisor); 1940 1941 adj += cycle_adj; 1942 } 1943 } else if (link_spd == ICE_PTP_LNK_SPD_50G_RS) { 1944 u64 cycle_adj; 1945 u8 rx_cycle; 1946 1947 err = ice_read_phy_reg_e822(hw, port, P_REG_RX_80_TO_160_CNT, 1948 &val); 1949 if (err) { 1950 ice_debug(hw, ICE_DBG_PTP, "Failed to read 50G-RS Rx cycle count, err %d\n", 1951 err); 1952 return err; 1953 } 1954 1955 rx_cycle = val & P_REG_RX_80_TO_160_CNT_RXCYC_M; 1956 if (rx_cycle) { 1957 mult = rx_cycle * 40; 1958 1959 cycle_adj = div_u64(tu_per_sec, 125); 1960 cycle_adj *= mult; 1961 cycle_adj = div_u64(cycle_adj, e822_vernier[link_spd].pmd_adj_divisor); 1962 1963 adj += cycle_adj; 1964 } 1965 } 1966 1967 /* Return the calculated adjustment */ 1968 *pmd_adj = adj; 1969 1970 return 0; 1971 } 1972 1973 /** 1974 * ice_calc_fixed_rx_offset_e822 - Calculated the fixed Rx offset for a port 1975 * @hw: pointer to HW struct 1976 * @link_spd: The Link speed to calculate for 1977 * 1978 * Determine the fixed Rx latency for a given link speed. 1979 */ 1980 static u64 1981 ice_calc_fixed_rx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd) 1982 { 1983 u64 cur_freq, clk_incval, tu_per_sec, fixed_offset; 1984 1985 cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw)); 1986 clk_incval = ice_ptp_read_src_incval(hw); 1987 1988 /* Calculate TUs per second */ 1989 tu_per_sec = cur_freq * clk_incval; 1990 1991 /* Calculate number of TUs to add for the fixed Rx latency. Since the 1992 * latency measurement is in 1/100th of a nanosecond, we need to 1993 * multiply by tu_per_sec and then divide by 1e11. This calculation 1994 * overflows 64 bit integer arithmetic, so break it up into two 1995 * divisions by 1e4 first then by 1e7. 1996 */ 1997 fixed_offset = div_u64(tu_per_sec, 10000); 1998 fixed_offset *= e822_vernier[link_spd].rx_fixed_delay; 1999 fixed_offset = div_u64(fixed_offset, 10000000); 2000 2001 return fixed_offset; 2002 } 2003 2004 /** 2005 * ice_phy_cfg_rx_offset_e822 - Configure total Rx timestamp offset 2006 * @hw: pointer to the HW struct 2007 * @port: the PHY port to configure 2008 * 2009 * Program the P_REG_TOTAL_RX_OFFSET register with the number of Time Units to 2010 * adjust Rx timestamps by. This combines calculations from the Vernier offset 2011 * measurements taken in hardware with some data about known fixed delay as 2012 * well as adjusting for multi-lane alignment delay. 2013 * 2014 * This function must be called only after the offset registers are valid, 2015 * i.e. after the Vernier calibration wait has passed, to ensure that the PHY 2016 * has measured the offset. 2017 * 2018 * To avoid overflow, when calculating the offset based on the known static 2019 * latency values, we use measurements in 1/100th of a nanosecond, and divide 2020 * the TUs per second up front. This avoids overflow while allowing 2021 * calculation of the adjustment using integer arithmetic. 2022 */ 2023 static int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port) 2024 { 2025 enum ice_ptp_link_spd link_spd; 2026 enum ice_ptp_fec_mode fec_mode; 2027 u64 total_offset, pmd, val; 2028 int err; 2029 2030 err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode); 2031 if (err) 2032 return err; 2033 2034 total_offset = ice_calc_fixed_rx_offset_e822(hw, link_spd); 2035 2036 /* Read the first Vernier offset from the PHY register and add it to 2037 * the total offset. 2038 */ 2039 err = ice_read_64b_phy_reg_e822(hw, port, 2040 P_REG_PAR_PCS_RX_OFFSET_L, 2041 &val); 2042 if (err) 2043 return err; 2044 2045 total_offset += val; 2046 2047 /* For Rx, all multi-lane link speeds include a second Vernier 2048 * calibration, because the lanes might not be aligned. 2049 */ 2050 if (link_spd == ICE_PTP_LNK_SPD_40G || 2051 link_spd == ICE_PTP_LNK_SPD_50G || 2052 link_spd == ICE_PTP_LNK_SPD_50G_RS || 2053 link_spd == ICE_PTP_LNK_SPD_100G_RS) { 2054 err = ice_read_64b_phy_reg_e822(hw, port, 2055 P_REG_PAR_RX_TIME_L, 2056 &val); 2057 if (err) 2058 return err; 2059 2060 total_offset += val; 2061 } 2062 2063 /* In addition, Rx must account for the PMD alignment */ 2064 err = ice_phy_calc_pmd_adj_e822(hw, port, link_spd, fec_mode, &pmd); 2065 if (err) 2066 return err; 2067 2068 /* For RS-FEC, this adjustment adds delay, but for other modes, it 2069 * subtracts delay. 2070 */ 2071 if (fec_mode == ICE_PTP_FEC_MODE_RS_FEC) 2072 total_offset += pmd; 2073 else 2074 total_offset -= pmd; 2075 2076 /* Now that the total offset has been calculated, program it to the 2077 * PHY and indicate that the Rx offset is ready. After this, 2078 * timestamps will be enabled. 2079 */ 2080 err = ice_write_64b_phy_reg_e822(hw, port, P_REG_TOTAL_RX_OFFSET_L, 2081 total_offset); 2082 if (err) 2083 return err; 2084 2085 err = ice_write_phy_reg_e822(hw, port, P_REG_RX_OR, 1); 2086 if (err) 2087 return err; 2088 2089 return 0; 2090 } 2091 2092 /** 2093 * ice_read_phy_and_phc_time_e822 - Simultaneously capture PHC and PHY time 2094 * @hw: pointer to the HW struct 2095 * @port: the PHY port to read 2096 * @phy_time: on return, the 64bit PHY timer value 2097 * @phc_time: on return, the lower 64bits of PHC time 2098 * 2099 * Issue a READ_TIME timer command to simultaneously capture the PHY and PHC 2100 * timer values. 2101 */ 2102 static int 2103 ice_read_phy_and_phc_time_e822(struct ice_hw *hw, u8 port, u64 *phy_time, 2104 u64 *phc_time) 2105 { 2106 u64 tx_time, rx_time; 2107 u32 zo, lo; 2108 u8 tmr_idx; 2109 int err; 2110 2111 tmr_idx = ice_get_ptp_src_clock_index(hw); 2112 2113 /* Prepare the PHC timer for a READ_TIME capture command */ 2114 ice_ptp_src_cmd(hw, READ_TIME); 2115 2116 /* Prepare the PHY timer for a READ_TIME capture command */ 2117 err = ice_ptp_one_port_cmd(hw, port, READ_TIME); 2118 if (err) 2119 return err; 2120 2121 /* Issue the sync to start the READ_TIME capture */ 2122 ice_ptp_exec_tmr_cmd(hw); 2123 2124 /* Read the captured PHC time from the shadow time registers */ 2125 zo = rd32(hw, GLTSYN_SHTIME_0(tmr_idx)); 2126 lo = rd32(hw, GLTSYN_SHTIME_L(tmr_idx)); 2127 *phc_time = (u64)lo << 32 | zo; 2128 2129 /* Read the captured PHY time from the PHY shadow registers */ 2130 err = ice_ptp_read_port_capture(hw, port, &tx_time, &rx_time); 2131 if (err) 2132 return err; 2133 2134 /* If the PHY Tx and Rx timers don't match, log a warning message. 2135 * Note that this should not happen in normal circumstances since the 2136 * driver always programs them together. 2137 */ 2138 if (tx_time != rx_time) 2139 dev_warn(ice_hw_to_dev(hw), 2140 "PHY port %u Tx and Rx timers do not match, tx_time 0x%016llX, rx_time 0x%016llX\n", 2141 port, (unsigned long long)tx_time, 2142 (unsigned long long)rx_time); 2143 2144 *phy_time = tx_time; 2145 2146 return 0; 2147 } 2148 2149 /** 2150 * ice_sync_phy_timer_e822 - Synchronize the PHY timer with PHC timer 2151 * @hw: pointer to the HW struct 2152 * @port: the PHY port to synchronize 2153 * 2154 * Perform an adjustment to ensure that the PHY and PHC timers are in sync. 2155 * This is done by issuing a READ_TIME command which triggers a simultaneous 2156 * read of the PHY timer and PHC timer. Then we use the difference to 2157 * calculate an appropriate 2s complement addition to add to the PHY timer in 2158 * order to ensure it reads the same value as the primary PHC timer. 2159 */ 2160 static int ice_sync_phy_timer_e822(struct ice_hw *hw, u8 port) 2161 { 2162 u64 phc_time, phy_time, difference; 2163 int err; 2164 2165 if (!ice_ptp_lock(hw)) { 2166 ice_debug(hw, ICE_DBG_PTP, "Failed to acquire PTP semaphore\n"); 2167 return -EBUSY; 2168 } 2169 2170 err = ice_read_phy_and_phc_time_e822(hw, port, &phy_time, &phc_time); 2171 if (err) 2172 goto err_unlock; 2173 2174 /* Calculate the amount required to add to the port time in order for 2175 * it to match the PHC time. 2176 * 2177 * Note that the port adjustment is done using 2s complement 2178 * arithmetic. This is convenient since it means that we can simply 2179 * calculate the difference between the PHC time and the port time, 2180 * and it will be interpreted correctly. 2181 */ 2182 difference = phc_time - phy_time; 2183 2184 err = ice_ptp_prep_port_adj_e822(hw, port, (s64)difference); 2185 if (err) 2186 goto err_unlock; 2187 2188 err = ice_ptp_one_port_cmd(hw, port, ADJ_TIME); 2189 if (err) 2190 goto err_unlock; 2191 2192 /* Issue the sync to activate the time adjustment */ 2193 ice_ptp_exec_tmr_cmd(hw); 2194 2195 /* Re-capture the timer values to flush the command registers and 2196 * verify that the time was properly adjusted. 2197 */ 2198 err = ice_read_phy_and_phc_time_e822(hw, port, &phy_time, &phc_time); 2199 if (err) 2200 goto err_unlock; 2201 2202 dev_info(ice_hw_to_dev(hw), 2203 "Port %u PHY time synced to PHC: 0x%016llX, 0x%016llX\n", 2204 port, (unsigned long long)phy_time, 2205 (unsigned long long)phc_time); 2206 2207 ice_ptp_unlock(hw); 2208 2209 return 0; 2210 2211 err_unlock: 2212 ice_ptp_unlock(hw); 2213 return err; 2214 } 2215 2216 /** 2217 * ice_stop_phy_timer_e822 - Stop the PHY clock timer 2218 * @hw: pointer to the HW struct 2219 * @port: the PHY port to stop 2220 * @soft_reset: if true, hold the SOFT_RESET bit of P_REG_PS 2221 * 2222 * Stop the clock of a PHY port. This must be done as part of the flow to 2223 * re-calibrate Tx and Rx timestamping offsets whenever the clock time is 2224 * initialized or when link speed changes. 2225 */ 2226 int 2227 ice_stop_phy_timer_e822(struct ice_hw *hw, u8 port, bool soft_reset) 2228 { 2229 int err; 2230 u32 val; 2231 2232 err = ice_write_phy_reg_e822(hw, port, P_REG_TX_OR, 0); 2233 if (err) 2234 return err; 2235 2236 err = ice_write_phy_reg_e822(hw, port, P_REG_RX_OR, 0); 2237 if (err) 2238 return err; 2239 2240 err = ice_read_phy_reg_e822(hw, port, P_REG_PS, &val); 2241 if (err) 2242 return err; 2243 2244 val &= ~P_REG_PS_START_M; 2245 err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val); 2246 if (err) 2247 return err; 2248 2249 val &= ~P_REG_PS_ENA_CLK_M; 2250 err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val); 2251 if (err) 2252 return err; 2253 2254 if (soft_reset) { 2255 val |= P_REG_PS_SFT_RESET_M; 2256 err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val); 2257 if (err) 2258 return err; 2259 } 2260 2261 ice_debug(hw, ICE_DBG_PTP, "Disabled clock on PHY port %u\n", port); 2262 2263 return 0; 2264 } 2265 2266 /** 2267 * ice_start_phy_timer_e822 - Start the PHY clock timer 2268 * @hw: pointer to the HW struct 2269 * @port: the PHY port to start 2270 * 2271 * Start the clock of a PHY port. This must be done as part of the flow to 2272 * re-calibrate Tx and Rx timestamping offsets whenever the clock time is 2273 * initialized or when link speed changes. 2274 * 2275 * Hardware will take Vernier measurements on Tx or Rx of packets. 2276 */ 2277 int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port) 2278 { 2279 u32 lo, hi, val; 2280 u64 incval; 2281 u8 tmr_idx; 2282 int err; 2283 2284 tmr_idx = ice_get_ptp_src_clock_index(hw); 2285 2286 err = ice_stop_phy_timer_e822(hw, port, false); 2287 if (err) 2288 return err; 2289 2290 ice_phy_cfg_lane_e822(hw, port); 2291 2292 err = ice_phy_cfg_uix_e822(hw, port); 2293 if (err) 2294 return err; 2295 2296 err = ice_phy_cfg_parpcs_e822(hw, port); 2297 if (err) 2298 return err; 2299 2300 lo = rd32(hw, GLTSYN_INCVAL_L(tmr_idx)); 2301 hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx)); 2302 incval = (u64)hi << 32 | lo; 2303 2304 err = ice_write_40b_phy_reg_e822(hw, port, P_REG_TIMETUS_L, incval); 2305 if (err) 2306 return err; 2307 2308 err = ice_ptp_one_port_cmd(hw, port, INIT_INCVAL); 2309 if (err) 2310 return err; 2311 2312 ice_ptp_exec_tmr_cmd(hw); 2313 2314 err = ice_read_phy_reg_e822(hw, port, P_REG_PS, &val); 2315 if (err) 2316 return err; 2317 2318 val |= P_REG_PS_SFT_RESET_M; 2319 err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val); 2320 if (err) 2321 return err; 2322 2323 val |= P_REG_PS_START_M; 2324 err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val); 2325 if (err) 2326 return err; 2327 2328 val &= ~P_REG_PS_SFT_RESET_M; 2329 err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val); 2330 if (err) 2331 return err; 2332 2333 err = ice_ptp_one_port_cmd(hw, port, INIT_INCVAL); 2334 if (err) 2335 return err; 2336 2337 ice_ptp_exec_tmr_cmd(hw); 2338 2339 val |= P_REG_PS_ENA_CLK_M; 2340 err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val); 2341 if (err) 2342 return err; 2343 2344 val |= P_REG_PS_LOAD_OFFSET_M; 2345 err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val); 2346 if (err) 2347 return err; 2348 2349 ice_ptp_exec_tmr_cmd(hw); 2350 2351 err = ice_sync_phy_timer_e822(hw, port); 2352 if (err) 2353 return err; 2354 2355 ice_debug(hw, ICE_DBG_PTP, "Enabled clock on PHY port %u\n", port); 2356 2357 return 0; 2358 } 2359 2360 /** 2361 * ice_phy_calc_vernier_e822 - Perform vernier calculations 2362 * @hw: pointer to the HW struct 2363 * @port: the PHY port to configure 2364 * 2365 * Perform vernier calculations for the Tx and Rx offset. This will enable 2366 * hardware to include the more precise offset calibrations, 2367 * increasing precision of the generated timestamps. 2368 * 2369 * This cannot be done until hardware has measured the offsets, which requires 2370 * waiting until at least one packet has been sent and received by the device. 2371 */ 2372 int ice_phy_calc_vernier_e822(struct ice_hw *hw, u8 port) 2373 { 2374 int err; 2375 u32 val; 2376 2377 err = ice_read_phy_reg_e822(hw, port, P_REG_TX_OV_STATUS, &val); 2378 if (err) { 2379 ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_OV_STATUS for port %u, err %d\n", 2380 port, err); 2381 return err; 2382 } 2383 2384 if (!(val & P_REG_TX_OV_STATUS_OV_M)) { 2385 ice_debug(hw, ICE_DBG_PTP, "Tx offset is not yet valid for port %u\n", 2386 port); 2387 return -EBUSY; 2388 } 2389 2390 err = ice_read_phy_reg_e822(hw, port, P_REG_RX_OV_STATUS, &val); 2391 if (err) { 2392 ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_OV_STATUS for port %u, err %d\n", 2393 port, err); 2394 return err; 2395 } 2396 2397 if (!(val & P_REG_TX_OV_STATUS_OV_M)) { 2398 ice_debug(hw, ICE_DBG_PTP, "Rx offset is not yet valid for port %u\n", 2399 port); 2400 return -EBUSY; 2401 } 2402 2403 err = ice_phy_cfg_tx_offset_e822(hw, port); 2404 if (err) { 2405 ice_debug(hw, ICE_DBG_PTP, "Failed to program total Tx offset for port %u, err %d\n", 2406 port, err); 2407 return err; 2408 } 2409 2410 err = ice_phy_cfg_rx_offset_e822(hw, port); 2411 if (err) { 2412 ice_debug(hw, ICE_DBG_PTP, "Failed to program total Rx offset for port %u, err %d\n", 2413 port, err); 2414 return err; 2415 } 2416 2417 return 0; 2418 } 2419 2420 /** 2421 * ice_get_phy_tx_tstamp_ready_e822 - Read Tx memory status register 2422 * @hw: pointer to the HW struct 2423 * @quad: the timestamp quad to read from 2424 * @tstamp_ready: contents of the Tx memory status register 2425 * 2426 * Read the Q_REG_TX_MEMORY_STATUS register indicating which timestamps in 2427 * the PHY are ready. A set bit means the corresponding timestamp is valid and 2428 * ready to be captured from the PHY timestamp block. 2429 */ 2430 static int 2431 ice_get_phy_tx_tstamp_ready_e822(struct ice_hw *hw, u8 quad, u64 *tstamp_ready) 2432 { 2433 u32 hi, lo; 2434 int err; 2435 2436 err = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEMORY_STATUS_U, &hi); 2437 if (err) { 2438 ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEMORY_STATUS_U for quad %u, err %d\n", 2439 quad, err); 2440 return err; 2441 } 2442 2443 err = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEMORY_STATUS_L, &lo); 2444 if (err) { 2445 ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEMORY_STATUS_L for quad %u, err %d\n", 2446 quad, err); 2447 return err; 2448 } 2449 2450 *tstamp_ready = (u64)hi << 32 | (u64)lo; 2451 2452 return 0; 2453 } 2454 2455 /* E810 functions 2456 * 2457 * The following functions operate on the E810 series devices which use 2458 * a separate external PHY. 2459 */ 2460 2461 /** 2462 * ice_read_phy_reg_e810 - Read register from external PHY on E810 2463 * @hw: pointer to the HW struct 2464 * @addr: the address to read from 2465 * @val: On return, the value read from the PHY 2466 * 2467 * Read a register from the external PHY on the E810 device. 2468 */ 2469 static int ice_read_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 *val) 2470 { 2471 struct ice_sbq_msg_input msg = {0}; 2472 int err; 2473 2474 msg.msg_addr_low = lower_16_bits(addr); 2475 msg.msg_addr_high = upper_16_bits(addr); 2476 msg.opcode = ice_sbq_msg_rd; 2477 msg.dest_dev = rmn_0; 2478 2479 err = ice_sbq_rw_reg(hw, &msg); 2480 if (err) { 2481 ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n", 2482 err); 2483 return err; 2484 } 2485 2486 *val = msg.data; 2487 2488 return 0; 2489 } 2490 2491 /** 2492 * ice_write_phy_reg_e810 - Write register on external PHY on E810 2493 * @hw: pointer to the HW struct 2494 * @addr: the address to writem to 2495 * @val: the value to write to the PHY 2496 * 2497 * Write a value to a register of the external PHY on the E810 device. 2498 */ 2499 static int ice_write_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 val) 2500 { 2501 struct ice_sbq_msg_input msg = {0}; 2502 int err; 2503 2504 msg.msg_addr_low = lower_16_bits(addr); 2505 msg.msg_addr_high = upper_16_bits(addr); 2506 msg.opcode = ice_sbq_msg_wr; 2507 msg.dest_dev = rmn_0; 2508 msg.data = val; 2509 2510 err = ice_sbq_rw_reg(hw, &msg); 2511 if (err) { 2512 ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n", 2513 err); 2514 return err; 2515 } 2516 2517 return 0; 2518 } 2519 2520 /** 2521 * ice_read_phy_tstamp_ll_e810 - Read a PHY timestamp registers through the FW 2522 * @hw: pointer to the HW struct 2523 * @idx: the timestamp index to read 2524 * @hi: 8 bit timestamp high value 2525 * @lo: 32 bit timestamp low value 2526 * 2527 * Read a 8bit timestamp high value and 32 bit timestamp low value out of the 2528 * timestamp block of the external PHY on the E810 device using the low latency 2529 * timestamp read. 2530 */ 2531 static int 2532 ice_read_phy_tstamp_ll_e810(struct ice_hw *hw, u8 idx, u8 *hi, u32 *lo) 2533 { 2534 u32 val; 2535 u8 i; 2536 2537 /* Write TS index to read to the PF register so the FW can read it */ 2538 val = FIELD_PREP(TS_LL_READ_TS_IDX, idx) | TS_LL_READ_TS; 2539 wr32(hw, PF_SB_ATQBAL, val); 2540 2541 /* Read the register repeatedly until the FW provides us the TS */ 2542 for (i = TS_LL_READ_RETRIES; i > 0; i--) { 2543 val = rd32(hw, PF_SB_ATQBAL); 2544 2545 /* When the bit is cleared, the TS is ready in the register */ 2546 if (!(FIELD_GET(TS_LL_READ_TS, val))) { 2547 /* High 8 bit value of the TS is on the bits 16:23 */ 2548 *hi = FIELD_GET(TS_LL_READ_TS_HIGH, val); 2549 2550 /* Read the low 32 bit value and set the TS valid bit */ 2551 *lo = rd32(hw, PF_SB_ATQBAH) | TS_VALID; 2552 return 0; 2553 } 2554 2555 udelay(10); 2556 } 2557 2558 /* FW failed to provide the TS in time */ 2559 ice_debug(hw, ICE_DBG_PTP, "Failed to read PTP timestamp using low latency read\n"); 2560 return -EINVAL; 2561 } 2562 2563 /** 2564 * ice_read_phy_tstamp_sbq_e810 - Read a PHY timestamp registers through the sbq 2565 * @hw: pointer to the HW struct 2566 * @lport: the lport to read from 2567 * @idx: the timestamp index to read 2568 * @hi: 8 bit timestamp high value 2569 * @lo: 32 bit timestamp low value 2570 * 2571 * Read a 8bit timestamp high value and 32 bit timestamp low value out of the 2572 * timestamp block of the external PHY on the E810 device using sideband queue. 2573 */ 2574 static int 2575 ice_read_phy_tstamp_sbq_e810(struct ice_hw *hw, u8 lport, u8 idx, u8 *hi, 2576 u32 *lo) 2577 { 2578 u32 hi_addr = TS_EXT(HIGH_TX_MEMORY_BANK_START, lport, idx); 2579 u32 lo_addr = TS_EXT(LOW_TX_MEMORY_BANK_START, lport, idx); 2580 u32 lo_val, hi_val; 2581 int err; 2582 2583 err = ice_read_phy_reg_e810(hw, lo_addr, &lo_val); 2584 if (err) { 2585 ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, err %d\n", 2586 err); 2587 return err; 2588 } 2589 2590 err = ice_read_phy_reg_e810(hw, hi_addr, &hi_val); 2591 if (err) { 2592 ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, err %d\n", 2593 err); 2594 return err; 2595 } 2596 2597 *lo = lo_val; 2598 *hi = (u8)hi_val; 2599 2600 return 0; 2601 } 2602 2603 /** 2604 * ice_read_phy_tstamp_e810 - Read a PHY timestamp out of the external PHY 2605 * @hw: pointer to the HW struct 2606 * @lport: the lport to read from 2607 * @idx: the timestamp index to read 2608 * @tstamp: on return, the 40bit timestamp value 2609 * 2610 * Read a 40bit timestamp value out of the timestamp block of the external PHY 2611 * on the E810 device. 2612 */ 2613 static int 2614 ice_read_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx, u64 *tstamp) 2615 { 2616 u32 lo = 0; 2617 u8 hi = 0; 2618 int err; 2619 2620 if (hw->dev_caps.ts_dev_info.ts_ll_read) 2621 err = ice_read_phy_tstamp_ll_e810(hw, idx, &hi, &lo); 2622 else 2623 err = ice_read_phy_tstamp_sbq_e810(hw, lport, idx, &hi, &lo); 2624 2625 if (err) 2626 return err; 2627 2628 /* For E810 devices, the timestamp is reported with the lower 32 bits 2629 * in the low register, and the upper 8 bits in the high register. 2630 */ 2631 *tstamp = ((u64)hi) << TS_HIGH_S | ((u64)lo & TS_LOW_M); 2632 2633 return 0; 2634 } 2635 2636 /** 2637 * ice_clear_phy_tstamp_e810 - Clear a timestamp from the external PHY 2638 * @hw: pointer to the HW struct 2639 * @lport: the lport to read from 2640 * @idx: the timestamp index to reset 2641 * 2642 * Clear a timestamp, resetting its valid bit, from the timestamp block of the 2643 * external PHY on the E810 device. 2644 */ 2645 static int ice_clear_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx) 2646 { 2647 u32 lo_addr, hi_addr; 2648 int err; 2649 2650 lo_addr = TS_EXT(LOW_TX_MEMORY_BANK_START, lport, idx); 2651 hi_addr = TS_EXT(HIGH_TX_MEMORY_BANK_START, lport, idx); 2652 2653 err = ice_write_phy_reg_e810(hw, lo_addr, 0); 2654 if (err) { 2655 ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register, err %d\n", 2656 err); 2657 return err; 2658 } 2659 2660 err = ice_write_phy_reg_e810(hw, hi_addr, 0); 2661 if (err) { 2662 ice_debug(hw, ICE_DBG_PTP, "Failed to clear high PTP timestamp register, err %d\n", 2663 err); 2664 return err; 2665 } 2666 2667 return 0; 2668 } 2669 2670 /** 2671 * ice_ptp_init_phy_e810 - Enable PTP function on the external PHY 2672 * @hw: pointer to HW struct 2673 * 2674 * Enable the timesync PTP functionality for the external PHY connected to 2675 * this function. 2676 */ 2677 int ice_ptp_init_phy_e810(struct ice_hw *hw) 2678 { 2679 u8 tmr_idx; 2680 int err; 2681 2682 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 2683 err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_ENA(tmr_idx), 2684 GLTSYN_ENA_TSYN_ENA_M); 2685 if (err) 2686 ice_debug(hw, ICE_DBG_PTP, "PTP failed in ena_phy_time_syn %d\n", 2687 err); 2688 2689 return err; 2690 } 2691 2692 /** 2693 * ice_ptp_init_phc_e810 - Perform E810 specific PHC initialization 2694 * @hw: pointer to HW struct 2695 * 2696 * Perform E810-specific PTP hardware clock initialization steps. 2697 */ 2698 static int ice_ptp_init_phc_e810(struct ice_hw *hw) 2699 { 2700 /* Ensure synchronization delay is zero */ 2701 wr32(hw, GLTSYN_SYNC_DLAY, 0); 2702 2703 /* Initialize the PHY */ 2704 return ice_ptp_init_phy_e810(hw); 2705 } 2706 2707 /** 2708 * ice_ptp_prep_phy_time_e810 - Prepare PHY port with initial time 2709 * @hw: Board private structure 2710 * @time: Time to initialize the PHY port clock to 2711 * 2712 * Program the PHY port ETH_GLTSYN_SHTIME registers in preparation setting the 2713 * initial clock time. The time will not actually be programmed until the 2714 * driver issues an INIT_TIME command. 2715 * 2716 * The time value is the upper 32 bits of the PHY timer, usually in units of 2717 * nominal nanoseconds. 2718 */ 2719 static int ice_ptp_prep_phy_time_e810(struct ice_hw *hw, u32 time) 2720 { 2721 u8 tmr_idx; 2722 int err; 2723 2724 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 2725 err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHTIME_0(tmr_idx), 0); 2726 if (err) { 2727 ice_debug(hw, ICE_DBG_PTP, "Failed to write SHTIME_0, err %d\n", 2728 err); 2729 return err; 2730 } 2731 2732 err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHTIME_L(tmr_idx), time); 2733 if (err) { 2734 ice_debug(hw, ICE_DBG_PTP, "Failed to write SHTIME_L, err %d\n", 2735 err); 2736 return err; 2737 } 2738 2739 return 0; 2740 } 2741 2742 /** 2743 * ice_ptp_prep_phy_adj_e810 - Prep PHY port for a time adjustment 2744 * @hw: pointer to HW struct 2745 * @adj: adjustment value to program 2746 * 2747 * Prepare the PHY port for an atomic adjustment by programming the PHY 2748 * ETH_GLTSYN_SHADJ_L and ETH_GLTSYN_SHADJ_H registers. The actual adjustment 2749 * is completed by issuing an ADJ_TIME sync command. 2750 * 2751 * The adjustment value only contains the portion used for the upper 32bits of 2752 * the PHY timer, usually in units of nominal nanoseconds. Negative 2753 * adjustments are supported using 2s complement arithmetic. 2754 */ 2755 static int ice_ptp_prep_phy_adj_e810(struct ice_hw *hw, s32 adj) 2756 { 2757 u8 tmr_idx; 2758 int err; 2759 2760 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 2761 2762 /* Adjustments are represented as signed 2's complement values in 2763 * nanoseconds. Sub-nanosecond adjustment is not supported. 2764 */ 2765 err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_L(tmr_idx), 0); 2766 if (err) { 2767 ice_debug(hw, ICE_DBG_PTP, "Failed to write adj to PHY SHADJ_L, err %d\n", 2768 err); 2769 return err; 2770 } 2771 2772 err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_H(tmr_idx), adj); 2773 if (err) { 2774 ice_debug(hw, ICE_DBG_PTP, "Failed to write adj to PHY SHADJ_H, err %d\n", 2775 err); 2776 return err; 2777 } 2778 2779 return 0; 2780 } 2781 2782 /** 2783 * ice_ptp_prep_phy_incval_e810 - Prep PHY port increment value change 2784 * @hw: pointer to HW struct 2785 * @incval: The new 40bit increment value to prepare 2786 * 2787 * Prepare the PHY port for a new increment value by programming the PHY 2788 * ETH_GLTSYN_SHADJ_L and ETH_GLTSYN_SHADJ_H registers. The actual change is 2789 * completed by issuing an INIT_INCVAL command. 2790 */ 2791 static int ice_ptp_prep_phy_incval_e810(struct ice_hw *hw, u64 incval) 2792 { 2793 u32 high, low; 2794 u8 tmr_idx; 2795 int err; 2796 2797 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 2798 low = lower_32_bits(incval); 2799 high = upper_32_bits(incval); 2800 2801 err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_L(tmr_idx), low); 2802 if (err) { 2803 ice_debug(hw, ICE_DBG_PTP, "Failed to write incval to PHY SHADJ_L, err %d\n", 2804 err); 2805 return err; 2806 } 2807 2808 err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_H(tmr_idx), high); 2809 if (err) { 2810 ice_debug(hw, ICE_DBG_PTP, "Failed to write incval PHY SHADJ_H, err %d\n", 2811 err); 2812 return err; 2813 } 2814 2815 return 0; 2816 } 2817 2818 /** 2819 * ice_ptp_port_cmd_e810 - Prepare all external PHYs for a timer command 2820 * @hw: pointer to HW struct 2821 * @cmd: Command to be sent to the port 2822 * 2823 * Prepare the external PHYs connected to this device for a timer sync 2824 * command. 2825 */ 2826 static int ice_ptp_port_cmd_e810(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) 2827 { 2828 u32 cmd_val, val; 2829 int err; 2830 2831 switch (cmd) { 2832 case INIT_TIME: 2833 cmd_val = GLTSYN_CMD_INIT_TIME; 2834 break; 2835 case INIT_INCVAL: 2836 cmd_val = GLTSYN_CMD_INIT_INCVAL; 2837 break; 2838 case ADJ_TIME: 2839 cmd_val = GLTSYN_CMD_ADJ_TIME; 2840 break; 2841 case READ_TIME: 2842 cmd_val = GLTSYN_CMD_READ_TIME; 2843 break; 2844 case ADJ_TIME_AT_TIME: 2845 cmd_val = GLTSYN_CMD_ADJ_INIT_TIME; 2846 break; 2847 } 2848 2849 /* Read, modify, write */ 2850 err = ice_read_phy_reg_e810(hw, ETH_GLTSYN_CMD, &val); 2851 if (err) { 2852 ice_debug(hw, ICE_DBG_PTP, "Failed to read GLTSYN_CMD, err %d\n", err); 2853 return err; 2854 } 2855 2856 /* Modify necessary bits only and perform write */ 2857 val &= ~TS_CMD_MASK_E810; 2858 val |= cmd_val; 2859 2860 err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_CMD, val); 2861 if (err) { 2862 ice_debug(hw, ICE_DBG_PTP, "Failed to write back GLTSYN_CMD, err %d\n", err); 2863 return err; 2864 } 2865 2866 return 0; 2867 } 2868 2869 /* Device agnostic functions 2870 * 2871 * The following functions implement shared behavior common to both E822 and 2872 * E810 devices, possibly calling a device specific implementation where 2873 * necessary. 2874 */ 2875 2876 /** 2877 * ice_ptp_lock - Acquire PTP global semaphore register lock 2878 * @hw: pointer to the HW struct 2879 * 2880 * Acquire the global PTP hardware semaphore lock. Returns true if the lock 2881 * was acquired, false otherwise. 2882 * 2883 * The PFTSYN_SEM register sets the busy bit on read, returning the previous 2884 * value. If software sees the busy bit cleared, this means that this function 2885 * acquired the lock (and the busy bit is now set). If software sees the busy 2886 * bit set, it means that another function acquired the lock. 2887 * 2888 * Software must clear the busy bit with a write to release the lock for other 2889 * functions when done. 2890 */ 2891 bool ice_ptp_lock(struct ice_hw *hw) 2892 { 2893 u32 hw_lock; 2894 int i; 2895 2896 #define MAX_TRIES 15 2897 2898 for (i = 0; i < MAX_TRIES; i++) { 2899 hw_lock = rd32(hw, PFTSYN_SEM + (PFTSYN_SEM_BYTES * hw->pf_id)); 2900 hw_lock = hw_lock & PFTSYN_SEM_BUSY_M; 2901 if (hw_lock) { 2902 /* Somebody is holding the lock */ 2903 usleep_range(5000, 6000); 2904 continue; 2905 } 2906 2907 break; 2908 } 2909 2910 return !hw_lock; 2911 } 2912 2913 /** 2914 * ice_ptp_unlock - Release PTP global semaphore register lock 2915 * @hw: pointer to the HW struct 2916 * 2917 * Release the global PTP hardware semaphore lock. This is done by writing to 2918 * the PFTSYN_SEM register. 2919 */ 2920 void ice_ptp_unlock(struct ice_hw *hw) 2921 { 2922 wr32(hw, PFTSYN_SEM + (PFTSYN_SEM_BYTES * hw->pf_id), 0); 2923 } 2924 2925 /** 2926 * ice_ptp_tmr_cmd - Prepare and trigger a timer sync command 2927 * @hw: pointer to HW struct 2928 * @cmd: the command to issue 2929 * 2930 * Prepare the source timer and PHY timers and then trigger the requested 2931 * command. This causes the shadow registers previously written in preparation 2932 * for the command to be synchronously applied to both the source and PHY 2933 * timers. 2934 */ 2935 static int ice_ptp_tmr_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) 2936 { 2937 int err; 2938 2939 /* First, prepare the source timer */ 2940 ice_ptp_src_cmd(hw, cmd); 2941 2942 /* Next, prepare the ports */ 2943 if (ice_is_e810(hw)) 2944 err = ice_ptp_port_cmd_e810(hw, cmd); 2945 else 2946 err = ice_ptp_port_cmd_e822(hw, cmd); 2947 if (err) { 2948 ice_debug(hw, ICE_DBG_PTP, "Failed to prepare PHY ports for timer command %u, err %d\n", 2949 cmd, err); 2950 return err; 2951 } 2952 2953 /* Write the sync command register to drive both source and PHY timer 2954 * commands synchronously 2955 */ 2956 ice_ptp_exec_tmr_cmd(hw); 2957 2958 return 0; 2959 } 2960 2961 /** 2962 * ice_ptp_init_time - Initialize device time to provided value 2963 * @hw: pointer to HW struct 2964 * @time: 64bits of time (GLTSYN_TIME_L and GLTSYN_TIME_H) 2965 * 2966 * Initialize the device to the specified time provided. This requires a three 2967 * step process: 2968 * 2969 * 1) write the new init time to the source timer shadow registers 2970 * 2) write the new init time to the PHY timer shadow registers 2971 * 3) issue an init_time timer command to synchronously switch both the source 2972 * and port timers to the new init time value at the next clock cycle. 2973 */ 2974 int ice_ptp_init_time(struct ice_hw *hw, u64 time) 2975 { 2976 u8 tmr_idx; 2977 int err; 2978 2979 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 2980 2981 /* Source timers */ 2982 wr32(hw, GLTSYN_SHTIME_L(tmr_idx), lower_32_bits(time)); 2983 wr32(hw, GLTSYN_SHTIME_H(tmr_idx), upper_32_bits(time)); 2984 wr32(hw, GLTSYN_SHTIME_0(tmr_idx), 0); 2985 2986 /* PHY timers */ 2987 /* Fill Rx and Tx ports and send msg to PHY */ 2988 if (ice_is_e810(hw)) 2989 err = ice_ptp_prep_phy_time_e810(hw, time & 0xFFFFFFFF); 2990 else 2991 err = ice_ptp_prep_phy_time_e822(hw, time & 0xFFFFFFFF); 2992 if (err) 2993 return err; 2994 2995 return ice_ptp_tmr_cmd(hw, INIT_TIME); 2996 } 2997 2998 /** 2999 * ice_ptp_write_incval - Program PHC with new increment value 3000 * @hw: pointer to HW struct 3001 * @incval: Source timer increment value per clock cycle 3002 * 3003 * Program the PHC with a new increment value. This requires a three-step 3004 * process: 3005 * 3006 * 1) Write the increment value to the source timer shadow registers 3007 * 2) Write the increment value to the PHY timer shadow registers 3008 * 3) Issue an INIT_INCVAL timer command to synchronously switch both the 3009 * source and port timers to the new increment value at the next clock 3010 * cycle. 3011 */ 3012 int ice_ptp_write_incval(struct ice_hw *hw, u64 incval) 3013 { 3014 u8 tmr_idx; 3015 int err; 3016 3017 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 3018 3019 /* Shadow Adjust */ 3020 wr32(hw, GLTSYN_SHADJ_L(tmr_idx), lower_32_bits(incval)); 3021 wr32(hw, GLTSYN_SHADJ_H(tmr_idx), upper_32_bits(incval)); 3022 3023 if (ice_is_e810(hw)) 3024 err = ice_ptp_prep_phy_incval_e810(hw, incval); 3025 else 3026 err = ice_ptp_prep_phy_incval_e822(hw, incval); 3027 if (err) 3028 return err; 3029 3030 return ice_ptp_tmr_cmd(hw, INIT_INCVAL); 3031 } 3032 3033 /** 3034 * ice_ptp_write_incval_locked - Program new incval while holding semaphore 3035 * @hw: pointer to HW struct 3036 * @incval: Source timer increment value per clock cycle 3037 * 3038 * Program a new PHC incval while holding the PTP semaphore. 3039 */ 3040 int ice_ptp_write_incval_locked(struct ice_hw *hw, u64 incval) 3041 { 3042 int err; 3043 3044 if (!ice_ptp_lock(hw)) 3045 return -EBUSY; 3046 3047 err = ice_ptp_write_incval(hw, incval); 3048 3049 ice_ptp_unlock(hw); 3050 3051 return err; 3052 } 3053 3054 /** 3055 * ice_ptp_adj_clock - Adjust PHC clock time atomically 3056 * @hw: pointer to HW struct 3057 * @adj: Adjustment in nanoseconds 3058 * 3059 * Perform an atomic adjustment of the PHC time by the specified number of 3060 * nanoseconds. This requires a three-step process: 3061 * 3062 * 1) Write the adjustment to the source timer shadow registers 3063 * 2) Write the adjustment to the PHY timer shadow registers 3064 * 3) Issue an ADJ_TIME timer command to synchronously apply the adjustment to 3065 * both the source and port timers at the next clock cycle. 3066 */ 3067 int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj) 3068 { 3069 u8 tmr_idx; 3070 int err; 3071 3072 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; 3073 3074 /* Write the desired clock adjustment into the GLTSYN_SHADJ register. 3075 * For an ADJ_TIME command, this set of registers represents the value 3076 * to add to the clock time. It supports subtraction by interpreting 3077 * the value as a 2's complement integer. 3078 */ 3079 wr32(hw, GLTSYN_SHADJ_L(tmr_idx), 0); 3080 wr32(hw, GLTSYN_SHADJ_H(tmr_idx), adj); 3081 3082 if (ice_is_e810(hw)) 3083 err = ice_ptp_prep_phy_adj_e810(hw, adj); 3084 else 3085 err = ice_ptp_prep_phy_adj_e822(hw, adj); 3086 if (err) 3087 return err; 3088 3089 return ice_ptp_tmr_cmd(hw, ADJ_TIME); 3090 } 3091 3092 /** 3093 * ice_read_phy_tstamp - Read a PHY timestamp from the timestamo block 3094 * @hw: pointer to the HW struct 3095 * @block: the block to read from 3096 * @idx: the timestamp index to read 3097 * @tstamp: on return, the 40bit timestamp value 3098 * 3099 * Read a 40bit timestamp value out of the timestamp block. For E822 devices, 3100 * the block is the quad to read from. For E810 devices, the block is the 3101 * logical port to read from. 3102 */ 3103 int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp) 3104 { 3105 if (ice_is_e810(hw)) 3106 return ice_read_phy_tstamp_e810(hw, block, idx, tstamp); 3107 else 3108 return ice_read_phy_tstamp_e822(hw, block, idx, tstamp); 3109 } 3110 3111 /** 3112 * ice_clear_phy_tstamp - Clear a timestamp from the timestamp block 3113 * @hw: pointer to the HW struct 3114 * @block: the block to read from 3115 * @idx: the timestamp index to reset 3116 * 3117 * Clear a timestamp, resetting its valid bit, from the timestamp block. For 3118 * E822 devices, the block is the quad to clear from. For E810 devices, the 3119 * block is the logical port to clear from. 3120 */ 3121 int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx) 3122 { 3123 if (ice_is_e810(hw)) 3124 return ice_clear_phy_tstamp_e810(hw, block, idx); 3125 else 3126 return ice_clear_phy_tstamp_e822(hw, block, idx); 3127 } 3128 3129 /** 3130 * ice_get_phy_tx_tstamp_ready_e810 - Read Tx memory status register 3131 * @hw: pointer to the HW struct 3132 * @port: the PHY port to read 3133 * @tstamp_ready: contents of the Tx memory status register 3134 * 3135 * E810 devices do not use a Tx memory status register. Instead simply 3136 * indicate that all timestamps are currently ready. 3137 */ 3138 static int 3139 ice_get_phy_tx_tstamp_ready_e810(struct ice_hw *hw, u8 port, u64 *tstamp_ready) 3140 { 3141 *tstamp_ready = 0xFFFFFFFFFFFFFFFF; 3142 return 0; 3143 } 3144 3145 /* E810T SMA functions 3146 * 3147 * The following functions operate specifically on E810T hardware and are used 3148 * to access the extended GPIOs available. 3149 */ 3150 3151 /** 3152 * ice_get_pca9575_handle 3153 * @hw: pointer to the hw struct 3154 * @pca9575_handle: GPIO controller's handle 3155 * 3156 * Find and return the GPIO controller's handle in the netlist. 3157 * When found - the value will be cached in the hw structure and following calls 3158 * will return cached value 3159 */ 3160 static int 3161 ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle) 3162 { 3163 struct ice_aqc_get_link_topo *cmd; 3164 struct ice_aq_desc desc; 3165 int status; 3166 u8 idx; 3167 3168 /* If handle was read previously return cached value */ 3169 if (hw->io_expander_handle) { 3170 *pca9575_handle = hw->io_expander_handle; 3171 return 0; 3172 } 3173 3174 /* If handle was not detected read it from the netlist */ 3175 cmd = &desc.params.get_link_topo; 3176 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo); 3177 3178 /* Set node type to GPIO controller */ 3179 cmd->addr.topo_params.node_type_ctx = 3180 (ICE_AQC_LINK_TOPO_NODE_TYPE_M & 3181 ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL); 3182 3183 #define SW_PCA9575_SFP_TOPO_IDX 2 3184 #define SW_PCA9575_QSFP_TOPO_IDX 1 3185 3186 /* Check if the SW IO expander controlling SMA exists in the netlist. */ 3187 if (hw->device_id == ICE_DEV_ID_E810C_SFP) 3188 idx = SW_PCA9575_SFP_TOPO_IDX; 3189 else if (hw->device_id == ICE_DEV_ID_E810C_QSFP) 3190 idx = SW_PCA9575_QSFP_TOPO_IDX; 3191 else 3192 return -EOPNOTSUPP; 3193 3194 cmd->addr.topo_params.index = idx; 3195 3196 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 3197 if (status) 3198 return -EOPNOTSUPP; 3199 3200 /* Verify if we found the right IO expander type */ 3201 if (desc.params.get_link_topo.node_part_num != 3202 ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575) 3203 return -EOPNOTSUPP; 3204 3205 /* If present save the handle and return it */ 3206 hw->io_expander_handle = 3207 le16_to_cpu(desc.params.get_link_topo.addr.handle); 3208 *pca9575_handle = hw->io_expander_handle; 3209 3210 return 0; 3211 } 3212 3213 /** 3214 * ice_read_sma_ctrl_e810t 3215 * @hw: pointer to the hw struct 3216 * @data: pointer to data to be read from the GPIO controller 3217 * 3218 * Read the SMA controller state. It is connected to pins 3-7 of Port 1 of the 3219 * PCA9575 expander, so only bits 3-7 in data are valid. 3220 */ 3221 int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data) 3222 { 3223 int status; 3224 u16 handle; 3225 u8 i; 3226 3227 status = ice_get_pca9575_handle(hw, &handle); 3228 if (status) 3229 return status; 3230 3231 *data = 0; 3232 3233 for (i = ICE_SMA_MIN_BIT_E810T; i <= ICE_SMA_MAX_BIT_E810T; i++) { 3234 bool pin; 3235 3236 status = ice_aq_get_gpio(hw, handle, i + ICE_PCA9575_P1_OFFSET, 3237 &pin, NULL); 3238 if (status) 3239 break; 3240 *data |= (u8)(!pin) << i; 3241 } 3242 3243 return status; 3244 } 3245 3246 /** 3247 * ice_write_sma_ctrl_e810t 3248 * @hw: pointer to the hw struct 3249 * @data: data to be written to the GPIO controller 3250 * 3251 * Write the data to the SMA controller. It is connected to pins 3-7 of Port 1 3252 * of the PCA9575 expander, so only bits 3-7 in data are valid. 3253 */ 3254 int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data) 3255 { 3256 int status; 3257 u16 handle; 3258 u8 i; 3259 3260 status = ice_get_pca9575_handle(hw, &handle); 3261 if (status) 3262 return status; 3263 3264 for (i = ICE_SMA_MIN_BIT_E810T; i <= ICE_SMA_MAX_BIT_E810T; i++) { 3265 bool pin; 3266 3267 pin = !(data & (1 << i)); 3268 status = ice_aq_set_gpio(hw, handle, i + ICE_PCA9575_P1_OFFSET, 3269 pin, NULL); 3270 if (status) 3271 break; 3272 } 3273 3274 return status; 3275 } 3276 3277 /** 3278 * ice_read_pca9575_reg_e810t 3279 * @hw: pointer to the hw struct 3280 * @offset: GPIO controller register offset 3281 * @data: pointer to data to be read from the GPIO controller 3282 * 3283 * Read the register from the GPIO controller 3284 */ 3285 int ice_read_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 *data) 3286 { 3287 struct ice_aqc_link_topo_addr link_topo; 3288 __le16 addr; 3289 u16 handle; 3290 int err; 3291 3292 memset(&link_topo, 0, sizeof(link_topo)); 3293 3294 err = ice_get_pca9575_handle(hw, &handle); 3295 if (err) 3296 return err; 3297 3298 link_topo.handle = cpu_to_le16(handle); 3299 link_topo.topo_params.node_type_ctx = 3300 FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, 3301 ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED); 3302 3303 addr = cpu_to_le16((u16)offset); 3304 3305 return ice_aq_read_i2c(hw, link_topo, 0, addr, 1, data, NULL); 3306 } 3307 3308 /** 3309 * ice_is_pca9575_present 3310 * @hw: pointer to the hw struct 3311 * 3312 * Check if the SW IO expander is present in the netlist 3313 */ 3314 bool ice_is_pca9575_present(struct ice_hw *hw) 3315 { 3316 u16 handle = 0; 3317 int status; 3318 3319 if (!ice_is_e810t(hw)) 3320 return false; 3321 3322 status = ice_get_pca9575_handle(hw, &handle); 3323 3324 return !status && handle; 3325 } 3326 3327 /** 3328 * ice_ptp_reset_ts_memory - Reset timestamp memory for all blocks 3329 * @hw: pointer to the HW struct 3330 */ 3331 void ice_ptp_reset_ts_memory(struct ice_hw *hw) 3332 { 3333 if (ice_is_e810(hw)) 3334 return; 3335 3336 ice_ptp_reset_ts_memory_e822(hw); 3337 } 3338 3339 /** 3340 * ice_ptp_init_phc - Initialize PTP hardware clock 3341 * @hw: pointer to the HW struct 3342 * 3343 * Perform the steps required to initialize the PTP hardware clock. 3344 */ 3345 int ice_ptp_init_phc(struct ice_hw *hw) 3346 { 3347 u8 src_idx = hw->func_caps.ts_func_info.tmr_index_owned; 3348 3349 /* Enable source clocks */ 3350 wr32(hw, GLTSYN_ENA(src_idx), GLTSYN_ENA_TSYN_ENA_M); 3351 3352 /* Clear event err indications for auxiliary pins */ 3353 (void)rd32(hw, GLTSYN_STAT(src_idx)); 3354 3355 if (ice_is_e810(hw)) 3356 return ice_ptp_init_phc_e810(hw); 3357 else 3358 return ice_ptp_init_phc_e822(hw); 3359 } 3360 3361 /** 3362 * ice_get_phy_tx_tstamp_ready - Read PHY Tx memory status indication 3363 * @hw: pointer to the HW struct 3364 * @block: the timestamp block to check 3365 * @tstamp_ready: storage for the PHY Tx memory status information 3366 * 3367 * Check the PHY for Tx timestamp memory status. This reports a 64 bit value 3368 * which indicates which timestamps in the block may be captured. A set bit 3369 * means the timestamp can be read. An unset bit means the timestamp is not 3370 * ready and software should avoid reading the register. 3371 */ 3372 int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready) 3373 { 3374 if (ice_is_e810(hw)) 3375 return ice_get_phy_tx_tstamp_ready_e810(hw, block, 3376 tstamp_ready); 3377 else 3378 return ice_get_phy_tx_tstamp_ready_e822(hw, block, 3379 tstamp_ready); 3380 } 3381