1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2021, Intel Corporation. */
3 
4 #include <linux/delay.h>
5 #include "ice_common.h"
6 #include "ice_ptp_hw.h"
7 #include "ice_ptp_consts.h"
8 #include "ice_cgu_regs.h"
9 
10 /* Low level functions for interacting with and managing the device clock used
11  * for the Precision Time Protocol.
12  *
13  * The ice hardware represents the current time using three registers:
14  *
15  *    GLTSYN_TIME_H     GLTSYN_TIME_L     GLTSYN_TIME_R
16  *  +---------------+ +---------------+ +---------------+
17  *  |    32 bits    | |    32 bits    | |    32 bits    |
18  *  +---------------+ +---------------+ +---------------+
19  *
20  * The registers are incremented every clock tick using a 40bit increment
21  * value defined over two registers:
22  *
23  *                     GLTSYN_INCVAL_H   GLTSYN_INCVAL_L
24  *                    +---------------+ +---------------+
25  *                    |    8 bit s    | |    32 bits    |
26  *                    +---------------+ +---------------+
27  *
28  * The increment value is added to the GLSTYN_TIME_R and GLSTYN_TIME_L
29  * registers every clock source tick. Depending on the specific device
30  * configuration, the clock source frequency could be one of a number of
31  * values.
32  *
33  * For E810 devices, the increment frequency is 812.5 MHz
34  *
35  * For E822 devices the clock can be derived from different sources, and the
36  * increment has an effective frequency of one of the following:
37  * - 823.4375 MHz
38  * - 783.36 MHz
39  * - 796.875 MHz
40  * - 816 MHz
41  * - 830.078125 MHz
42  * - 783.36 MHz
43  *
44  * The hardware captures timestamps in the PHY for incoming packets, and for
45  * outgoing packets on request. To support this, the PHY maintains a timer
46  * that matches the lower 64 bits of the global source timer.
47  *
48  * In order to ensure that the PHY timers and the source timer are equivalent,
49  * shadow registers are used to prepare the desired initial values. A special
50  * sync command is issued to trigger copying from the shadow registers into
51  * the appropriate source and PHY registers simultaneously.
52  *
53  * The driver supports devices which have different PHYs with subtly different
54  * mechanisms to program and control the timers. We divide the devices into
55  * families named after the first major device, E810 and similar devices, and
56  * E822 and similar devices.
57  *
58  * - E822 based devices have additional support for fine grained Vernier
59  *   calibration which requires significant setup
60  * - The layout of timestamp data in the PHY register blocks is different
61  * - The way timer synchronization commands are issued is different.
62  *
63  * To support this, very low level functions have an e810 or e822 suffix
64  * indicating what type of device they work on. Higher level abstractions for
65  * tasks that can be done on both devices do not have the suffix and will
66  * correctly look up the appropriate low level function when running.
67  *
68  * Functions which only make sense on a single device family may not have
69  * a suitable generic implementation
70  */
71 
72 /**
73  * ice_get_ptp_src_clock_index - determine source clock index
74  * @hw: pointer to HW struct
75  *
76  * Determine the source clock index currently in use, based on device
77  * capabilities reported during initialization.
78  */
79 u8 ice_get_ptp_src_clock_index(struct ice_hw *hw)
80 {
81 	return hw->func_caps.ts_func_info.tmr_index_assoc;
82 }
83 
84 /**
85  * ice_ptp_read_src_incval - Read source timer increment value
86  * @hw: pointer to HW struct
87  *
88  * Read the increment value of the source timer and return it.
89  */
90 static u64 ice_ptp_read_src_incval(struct ice_hw *hw)
91 {
92 	u32 lo, hi;
93 	u8 tmr_idx;
94 
95 	tmr_idx = ice_get_ptp_src_clock_index(hw);
96 
97 	lo = rd32(hw, GLTSYN_INCVAL_L(tmr_idx));
98 	hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx));
99 
100 	return ((u64)(hi & INCVAL_HIGH_M) << 32) | lo;
101 }
102 
103 /**
104  * ice_ptp_src_cmd - Prepare source timer for a timer command
105  * @hw: pointer to HW structure
106  * @cmd: Timer command
107  *
108  * Prepare the source timer for an upcoming timer sync command.
109  */
110 static void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
111 {
112 	u32 cmd_val;
113 	u8 tmr_idx;
114 
115 	tmr_idx = ice_get_ptp_src_clock_index(hw);
116 	cmd_val = tmr_idx << SEL_CPK_SRC;
117 
118 	switch (cmd) {
119 	case INIT_TIME:
120 		cmd_val |= GLTSYN_CMD_INIT_TIME;
121 		break;
122 	case INIT_INCVAL:
123 		cmd_val |= GLTSYN_CMD_INIT_INCVAL;
124 		break;
125 	case ADJ_TIME:
126 		cmd_val |= GLTSYN_CMD_ADJ_TIME;
127 		break;
128 	case ADJ_TIME_AT_TIME:
129 		cmd_val |= GLTSYN_CMD_ADJ_INIT_TIME;
130 		break;
131 	case READ_TIME:
132 		cmd_val |= GLTSYN_CMD_READ_TIME;
133 		break;
134 	}
135 
136 	wr32(hw, GLTSYN_CMD, cmd_val);
137 }
138 
139 /**
140  * ice_ptp_exec_tmr_cmd - Execute all prepared timer commands
141  * @hw: pointer to HW struct
142  *
143  * Write the SYNC_EXEC_CMD bit to the GLTSYN_CMD_SYNC register, and flush the
144  * write immediately. This triggers the hardware to begin executing all of the
145  * source and PHY timer commands synchronously.
146  */
147 static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw)
148 {
149 	wr32(hw, GLTSYN_CMD_SYNC, SYNC_EXEC_CMD);
150 	ice_flush(hw);
151 }
152 
153 /* E822 family functions
154  *
155  * The following functions operate on the E822 family of devices.
156  */
157 
158 /**
159  * ice_fill_phy_msg_e822 - Fill message data for a PHY register access
160  * @msg: the PHY message buffer to fill in
161  * @port: the port to access
162  * @offset: the register offset
163  */
164 static void
165 ice_fill_phy_msg_e822(struct ice_sbq_msg_input *msg, u8 port, u16 offset)
166 {
167 	int phy_port, phy, quadtype;
168 
169 	phy_port = port % ICE_PORTS_PER_PHY;
170 	phy = port / ICE_PORTS_PER_PHY;
171 	quadtype = (port / ICE_PORTS_PER_QUAD) % ICE_NUM_QUAD_TYPE;
172 
173 	if (quadtype == 0) {
174 		msg->msg_addr_low = P_Q0_L(P_0_BASE + offset, phy_port);
175 		msg->msg_addr_high = P_Q0_H(P_0_BASE + offset, phy_port);
176 	} else {
177 		msg->msg_addr_low = P_Q1_L(P_4_BASE + offset, phy_port);
178 		msg->msg_addr_high = P_Q1_H(P_4_BASE + offset, phy_port);
179 	}
180 
181 	if (phy == 0)
182 		msg->dest_dev = rmn_0;
183 	else if (phy == 1)
184 		msg->dest_dev = rmn_1;
185 	else
186 		msg->dest_dev = rmn_2;
187 }
188 
189 /**
190  * ice_is_64b_phy_reg_e822 - Check if this is a 64bit PHY register
191  * @low_addr: the low address to check
192  * @high_addr: on return, contains the high address of the 64bit register
193  *
194  * Checks if the provided low address is one of the known 64bit PHY values
195  * represented as two 32bit registers. If it is, return the appropriate high
196  * register offset to use.
197  */
198 static bool ice_is_64b_phy_reg_e822(u16 low_addr, u16 *high_addr)
199 {
200 	switch (low_addr) {
201 	case P_REG_PAR_PCS_TX_OFFSET_L:
202 		*high_addr = P_REG_PAR_PCS_TX_OFFSET_U;
203 		return true;
204 	case P_REG_PAR_PCS_RX_OFFSET_L:
205 		*high_addr = P_REG_PAR_PCS_RX_OFFSET_U;
206 		return true;
207 	case P_REG_PAR_TX_TIME_L:
208 		*high_addr = P_REG_PAR_TX_TIME_U;
209 		return true;
210 	case P_REG_PAR_RX_TIME_L:
211 		*high_addr = P_REG_PAR_RX_TIME_U;
212 		return true;
213 	case P_REG_TOTAL_TX_OFFSET_L:
214 		*high_addr = P_REG_TOTAL_TX_OFFSET_U;
215 		return true;
216 	case P_REG_TOTAL_RX_OFFSET_L:
217 		*high_addr = P_REG_TOTAL_RX_OFFSET_U;
218 		return true;
219 	case P_REG_UIX66_10G_40G_L:
220 		*high_addr = P_REG_UIX66_10G_40G_U;
221 		return true;
222 	case P_REG_UIX66_25G_100G_L:
223 		*high_addr = P_REG_UIX66_25G_100G_U;
224 		return true;
225 	case P_REG_TX_CAPTURE_L:
226 		*high_addr = P_REG_TX_CAPTURE_U;
227 		return true;
228 	case P_REG_RX_CAPTURE_L:
229 		*high_addr = P_REG_RX_CAPTURE_U;
230 		return true;
231 	case P_REG_TX_TIMER_INC_PRE_L:
232 		*high_addr = P_REG_TX_TIMER_INC_PRE_U;
233 		return true;
234 	case P_REG_RX_TIMER_INC_PRE_L:
235 		*high_addr = P_REG_RX_TIMER_INC_PRE_U;
236 		return true;
237 	default:
238 		return false;
239 	}
240 }
241 
242 /**
243  * ice_is_40b_phy_reg_e822 - Check if this is a 40bit PHY register
244  * @low_addr: the low address to check
245  * @high_addr: on return, contains the high address of the 40bit value
246  *
247  * Checks if the provided low address is one of the known 40bit PHY values
248  * split into two registers with the lower 8 bits in the low register and the
249  * upper 32 bits in the high register. If it is, return the appropriate high
250  * register offset to use.
251  */
252 static bool ice_is_40b_phy_reg_e822(u16 low_addr, u16 *high_addr)
253 {
254 	switch (low_addr) {
255 	case P_REG_TIMETUS_L:
256 		*high_addr = P_REG_TIMETUS_U;
257 		return true;
258 	case P_REG_PAR_RX_TUS_L:
259 		*high_addr = P_REG_PAR_RX_TUS_U;
260 		return true;
261 	case P_REG_PAR_TX_TUS_L:
262 		*high_addr = P_REG_PAR_TX_TUS_U;
263 		return true;
264 	case P_REG_PCS_RX_TUS_L:
265 		*high_addr = P_REG_PCS_RX_TUS_U;
266 		return true;
267 	case P_REG_PCS_TX_TUS_L:
268 		*high_addr = P_REG_PCS_TX_TUS_U;
269 		return true;
270 	case P_REG_DESK_PAR_RX_TUS_L:
271 		*high_addr = P_REG_DESK_PAR_RX_TUS_U;
272 		return true;
273 	case P_REG_DESK_PAR_TX_TUS_L:
274 		*high_addr = P_REG_DESK_PAR_TX_TUS_U;
275 		return true;
276 	case P_REG_DESK_PCS_RX_TUS_L:
277 		*high_addr = P_REG_DESK_PCS_RX_TUS_U;
278 		return true;
279 	case P_REG_DESK_PCS_TX_TUS_L:
280 		*high_addr = P_REG_DESK_PCS_TX_TUS_U;
281 		return true;
282 	default:
283 		return false;
284 	}
285 }
286 
287 /**
288  * ice_read_phy_reg_e822 - Read a PHY register
289  * @hw: pointer to the HW struct
290  * @port: PHY port to read from
291  * @offset: PHY register offset to read
292  * @val: on return, the contents read from the PHY
293  *
294  * Read a PHY register for the given port over the device sideband queue.
295  */
296 int
297 ice_read_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 *val)
298 {
299 	struct ice_sbq_msg_input msg = {0};
300 	int err;
301 
302 	ice_fill_phy_msg_e822(&msg, port, offset);
303 	msg.opcode = ice_sbq_msg_rd;
304 
305 	err = ice_sbq_rw_reg(hw, &msg);
306 	if (err) {
307 		ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
308 			  err);
309 		return err;
310 	}
311 
312 	*val = msg.data;
313 
314 	return 0;
315 }
316 
317 /**
318  * ice_read_64b_phy_reg_e822 - Read a 64bit value from PHY registers
319  * @hw: pointer to the HW struct
320  * @port: PHY port to read from
321  * @low_addr: offset of the lower register to read from
322  * @val: on return, the contents of the 64bit value from the PHY registers
323  *
324  * Reads the two registers associated with a 64bit value and returns it in the
325  * val pointer. The offset always specifies the lower register offset to use.
326  * The high offset is looked up. This function only operates on registers
327  * known to be two parts of a 64bit value.
328  */
329 static int
330 ice_read_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 *val)
331 {
332 	u32 low, high;
333 	u16 high_addr;
334 	int err;
335 
336 	/* Only operate on registers known to be split into two 32bit
337 	 * registers.
338 	 */
339 	if (!ice_is_64b_phy_reg_e822(low_addr, &high_addr)) {
340 		ice_debug(hw, ICE_DBG_PTP, "Invalid 64b register addr 0x%08x\n",
341 			  low_addr);
342 		return -EINVAL;
343 	}
344 
345 	err = ice_read_phy_reg_e822(hw, port, low_addr, &low);
346 	if (err) {
347 		ice_debug(hw, ICE_DBG_PTP, "Failed to read from low register 0x%08x\n, err %d",
348 			  low_addr, err);
349 		return err;
350 	}
351 
352 	err = ice_read_phy_reg_e822(hw, port, high_addr, &high);
353 	if (err) {
354 		ice_debug(hw, ICE_DBG_PTP, "Failed to read from high register 0x%08x\n, err %d",
355 			  high_addr, err);
356 		return err;
357 	}
358 
359 	*val = (u64)high << 32 | low;
360 
361 	return 0;
362 }
363 
364 /**
365  * ice_write_phy_reg_e822 - Write a PHY register
366  * @hw: pointer to the HW struct
367  * @port: PHY port to write to
368  * @offset: PHY register offset to write
369  * @val: The value to write to the register
370  *
371  * Write a PHY register for the given port over the device sideband queue.
372  */
373 int
374 ice_write_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 val)
375 {
376 	struct ice_sbq_msg_input msg = {0};
377 	int err;
378 
379 	ice_fill_phy_msg_e822(&msg, port, offset);
380 	msg.opcode = ice_sbq_msg_wr;
381 	msg.data = val;
382 
383 	err = ice_sbq_rw_reg(hw, &msg);
384 	if (err) {
385 		ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
386 			  err);
387 		return err;
388 	}
389 
390 	return 0;
391 }
392 
393 /**
394  * ice_write_40b_phy_reg_e822 - Write a 40b value to the PHY
395  * @hw: pointer to the HW struct
396  * @port: port to write to
397  * @low_addr: offset of the low register
398  * @val: 40b value to write
399  *
400  * Write the provided 40b value to the two associated registers by splitting
401  * it up into two chunks, the lower 8 bits and the upper 32 bits.
402  */
403 static int
404 ice_write_40b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
405 {
406 	u32 low, high;
407 	u16 high_addr;
408 	int err;
409 
410 	/* Only operate on registers known to be split into a lower 8 bit
411 	 * register and an upper 32 bit register.
412 	 */
413 	if (!ice_is_40b_phy_reg_e822(low_addr, &high_addr)) {
414 		ice_debug(hw, ICE_DBG_PTP, "Invalid 40b register addr 0x%08x\n",
415 			  low_addr);
416 		return -EINVAL;
417 	}
418 
419 	low = (u32)(val & P_REG_40B_LOW_M);
420 	high = (u32)(val >> P_REG_40B_HIGH_S);
421 
422 	err = ice_write_phy_reg_e822(hw, port, low_addr, low);
423 	if (err) {
424 		ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, err %d",
425 			  low_addr, err);
426 		return err;
427 	}
428 
429 	err = ice_write_phy_reg_e822(hw, port, high_addr, high);
430 	if (err) {
431 		ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, err %d",
432 			  high_addr, err);
433 		return err;
434 	}
435 
436 	return 0;
437 }
438 
439 /**
440  * ice_write_64b_phy_reg_e822 - Write a 64bit value to PHY registers
441  * @hw: pointer to the HW struct
442  * @port: PHY port to read from
443  * @low_addr: offset of the lower register to read from
444  * @val: the contents of the 64bit value to write to PHY
445  *
446  * Write the 64bit value to the two associated 32bit PHY registers. The offset
447  * is always specified as the lower register, and the high address is looked
448  * up. This function only operates on registers known to be two parts of
449  * a 64bit value.
450  */
451 static int
452 ice_write_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
453 {
454 	u32 low, high;
455 	u16 high_addr;
456 	int err;
457 
458 	/* Only operate on registers known to be split into two 32bit
459 	 * registers.
460 	 */
461 	if (!ice_is_64b_phy_reg_e822(low_addr, &high_addr)) {
462 		ice_debug(hw, ICE_DBG_PTP, "Invalid 64b register addr 0x%08x\n",
463 			  low_addr);
464 		return -EINVAL;
465 	}
466 
467 	low = lower_32_bits(val);
468 	high = upper_32_bits(val);
469 
470 	err = ice_write_phy_reg_e822(hw, port, low_addr, low);
471 	if (err) {
472 		ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, err %d",
473 			  low_addr, err);
474 		return err;
475 	}
476 
477 	err = ice_write_phy_reg_e822(hw, port, high_addr, high);
478 	if (err) {
479 		ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, err %d",
480 			  high_addr, err);
481 		return err;
482 	}
483 
484 	return 0;
485 }
486 
487 /**
488  * ice_fill_quad_msg_e822 - Fill message data for quad register access
489  * @msg: the PHY message buffer to fill in
490  * @quad: the quad to access
491  * @offset: the register offset
492  *
493  * Fill a message buffer for accessing a register in a quad shared between
494  * multiple PHYs.
495  */
496 static void
497 ice_fill_quad_msg_e822(struct ice_sbq_msg_input *msg, u8 quad, u16 offset)
498 {
499 	u32 addr;
500 
501 	msg->dest_dev = rmn_0;
502 
503 	if ((quad % ICE_NUM_QUAD_TYPE) == 0)
504 		addr = Q_0_BASE + offset;
505 	else
506 		addr = Q_1_BASE + offset;
507 
508 	msg->msg_addr_low = lower_16_bits(addr);
509 	msg->msg_addr_high = upper_16_bits(addr);
510 }
511 
512 /**
513  * ice_read_quad_reg_e822 - Read a PHY quad register
514  * @hw: pointer to the HW struct
515  * @quad: quad to read from
516  * @offset: quad register offset to read
517  * @val: on return, the contents read from the quad
518  *
519  * Read a quad register over the device sideband queue. Quad registers are
520  * shared between multiple PHYs.
521  */
522 int
523 ice_read_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 *val)
524 {
525 	struct ice_sbq_msg_input msg = {0};
526 	int err;
527 
528 	if (quad >= ICE_MAX_QUAD)
529 		return -EINVAL;
530 
531 	ice_fill_quad_msg_e822(&msg, quad, offset);
532 	msg.opcode = ice_sbq_msg_rd;
533 
534 	err = ice_sbq_rw_reg(hw, &msg);
535 	if (err) {
536 		ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
537 			  err);
538 		return err;
539 	}
540 
541 	*val = msg.data;
542 
543 	return 0;
544 }
545 
546 /**
547  * ice_write_quad_reg_e822 - Write a PHY quad register
548  * @hw: pointer to the HW struct
549  * @quad: quad to write to
550  * @offset: quad register offset to write
551  * @val: The value to write to the register
552  *
553  * Write a quad register over the device sideband queue. Quad registers are
554  * shared between multiple PHYs.
555  */
556 int
557 ice_write_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 val)
558 {
559 	struct ice_sbq_msg_input msg = {0};
560 	int err;
561 
562 	if (quad >= ICE_MAX_QUAD)
563 		return -EINVAL;
564 
565 	ice_fill_quad_msg_e822(&msg, quad, offset);
566 	msg.opcode = ice_sbq_msg_wr;
567 	msg.data = val;
568 
569 	err = ice_sbq_rw_reg(hw, &msg);
570 	if (err) {
571 		ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
572 			  err);
573 		return err;
574 	}
575 
576 	return 0;
577 }
578 
579 /**
580  * ice_read_phy_tstamp_e822 - Read a PHY timestamp out of the quad block
581  * @hw: pointer to the HW struct
582  * @quad: the quad to read from
583  * @idx: the timestamp index to read
584  * @tstamp: on return, the 40bit timestamp value
585  *
586  * Read a 40bit timestamp value out of the two associated registers in the
587  * quad memory block that is shared between the internal PHYs of the E822
588  * family of devices.
589  */
590 static int
591 ice_read_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp)
592 {
593 	u16 lo_addr, hi_addr;
594 	u32 lo, hi;
595 	int err;
596 
597 	lo_addr = (u16)TS_L(Q_REG_TX_MEMORY_BANK_START, idx);
598 	hi_addr = (u16)TS_H(Q_REG_TX_MEMORY_BANK_START, idx);
599 
600 	err = ice_read_quad_reg_e822(hw, quad, lo_addr, &lo);
601 	if (err) {
602 		ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, err %d\n",
603 			  err);
604 		return err;
605 	}
606 
607 	err = ice_read_quad_reg_e822(hw, quad, hi_addr, &hi);
608 	if (err) {
609 		ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, err %d\n",
610 			  err);
611 		return err;
612 	}
613 
614 	/* For E822 based internal PHYs, the timestamp is reported with the
615 	 * lower 8 bits in the low register, and the upper 32 bits in the high
616 	 * register.
617 	 */
618 	*tstamp = ((u64)hi) << TS_PHY_HIGH_S | ((u64)lo & TS_PHY_LOW_M);
619 
620 	return 0;
621 }
622 
623 /**
624  * ice_clear_phy_tstamp_e822 - Clear a timestamp from the quad block
625  * @hw: pointer to the HW struct
626  * @quad: the quad to read from
627  * @idx: the timestamp index to reset
628  *
629  * Clear a timestamp, resetting its valid bit, from the PHY quad block that is
630  * shared between the internal PHYs on the E822 devices.
631  */
632 static int
633 ice_clear_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx)
634 {
635 	u16 lo_addr, hi_addr;
636 	int err;
637 
638 	lo_addr = (u16)TS_L(Q_REG_TX_MEMORY_BANK_START, idx);
639 	hi_addr = (u16)TS_H(Q_REG_TX_MEMORY_BANK_START, idx);
640 
641 	err = ice_write_quad_reg_e822(hw, quad, lo_addr, 0);
642 	if (err) {
643 		ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register, err %d\n",
644 			  err);
645 		return err;
646 	}
647 
648 	err = ice_write_quad_reg_e822(hw, quad, hi_addr, 0);
649 	if (err) {
650 		ice_debug(hw, ICE_DBG_PTP, "Failed to clear high PTP timestamp register, err %d\n",
651 			  err);
652 		return err;
653 	}
654 
655 	return 0;
656 }
657 
658 /**
659  * ice_read_cgu_reg_e822 - Read a CGU register
660  * @hw: pointer to the HW struct
661  * @addr: Register address to read
662  * @val: storage for register value read
663  *
664  * Read the contents of a register of the Clock Generation Unit. Only
665  * applicable to E822 devices.
666  */
667 static int
668 ice_read_cgu_reg_e822(struct ice_hw *hw, u32 addr, u32 *val)
669 {
670 	struct ice_sbq_msg_input cgu_msg;
671 	int err;
672 
673 	cgu_msg.opcode = ice_sbq_msg_rd;
674 	cgu_msg.dest_dev = cgu;
675 	cgu_msg.msg_addr_low = addr;
676 	cgu_msg.msg_addr_high = 0x0;
677 
678 	err = ice_sbq_rw_reg(hw, &cgu_msg);
679 	if (err) {
680 		ice_debug(hw, ICE_DBG_PTP, "Failed to read CGU register 0x%04x, err %d\n",
681 			  addr, err);
682 		return err;
683 	}
684 
685 	*val = cgu_msg.data;
686 
687 	return err;
688 }
689 
690 /**
691  * ice_write_cgu_reg_e822 - Write a CGU register
692  * @hw: pointer to the HW struct
693  * @addr: Register address to write
694  * @val: value to write into the register
695  *
696  * Write the specified value to a register of the Clock Generation Unit. Only
697  * applicable to E822 devices.
698  */
699 static int
700 ice_write_cgu_reg_e822(struct ice_hw *hw, u32 addr, u32 val)
701 {
702 	struct ice_sbq_msg_input cgu_msg;
703 	int err;
704 
705 	cgu_msg.opcode = ice_sbq_msg_wr;
706 	cgu_msg.dest_dev = cgu;
707 	cgu_msg.msg_addr_low = addr;
708 	cgu_msg.msg_addr_high = 0x0;
709 	cgu_msg.data = val;
710 
711 	err = ice_sbq_rw_reg(hw, &cgu_msg);
712 	if (err) {
713 		ice_debug(hw, ICE_DBG_PTP, "Failed to write CGU register 0x%04x, err %d\n",
714 			  addr, err);
715 		return err;
716 	}
717 
718 	return err;
719 }
720 
721 /**
722  * ice_clk_freq_str - Convert time_ref_freq to string
723  * @clk_freq: Clock frequency
724  *
725  * Convert the specified TIME_REF clock frequency to a string.
726  */
727 static const char *ice_clk_freq_str(u8 clk_freq)
728 {
729 	switch ((enum ice_time_ref_freq)clk_freq) {
730 	case ICE_TIME_REF_FREQ_25_000:
731 		return "25 MHz";
732 	case ICE_TIME_REF_FREQ_122_880:
733 		return "122.88 MHz";
734 	case ICE_TIME_REF_FREQ_125_000:
735 		return "125 MHz";
736 	case ICE_TIME_REF_FREQ_153_600:
737 		return "153.6 MHz";
738 	case ICE_TIME_REF_FREQ_156_250:
739 		return "156.25 MHz";
740 	case ICE_TIME_REF_FREQ_245_760:
741 		return "245.76 MHz";
742 	default:
743 		return "Unknown";
744 	}
745 }
746 
747 /**
748  * ice_clk_src_str - Convert time_ref_src to string
749  * @clk_src: Clock source
750  *
751  * Convert the specified clock source to its string name.
752  */
753 static const char *ice_clk_src_str(u8 clk_src)
754 {
755 	switch ((enum ice_clk_src)clk_src) {
756 	case ICE_CLK_SRC_TCX0:
757 		return "TCX0";
758 	case ICE_CLK_SRC_TIME_REF:
759 		return "TIME_REF";
760 	default:
761 		return "Unknown";
762 	}
763 }
764 
765 /**
766  * ice_cfg_cgu_pll_e822 - Configure the Clock Generation Unit
767  * @hw: pointer to the HW struct
768  * @clk_freq: Clock frequency to program
769  * @clk_src: Clock source to select (TIME_REF, or TCX0)
770  *
771  * Configure the Clock Generation Unit with the desired clock frequency and
772  * time reference, enabling the PLL which drives the PTP hardware clock.
773  */
774 static int
775 ice_cfg_cgu_pll_e822(struct ice_hw *hw, enum ice_time_ref_freq clk_freq,
776 		     enum ice_clk_src clk_src)
777 {
778 	union tspll_ro_bwm_lf bwm_lf;
779 	union nac_cgu_dword19 dw19;
780 	union nac_cgu_dword22 dw22;
781 	union nac_cgu_dword24 dw24;
782 	union nac_cgu_dword9 dw9;
783 	int err;
784 
785 	if (clk_freq >= NUM_ICE_TIME_REF_FREQ) {
786 		dev_warn(ice_hw_to_dev(hw), "Invalid TIME_REF frequency %u\n",
787 			 clk_freq);
788 		return -EINVAL;
789 	}
790 
791 	if (clk_src >= NUM_ICE_CLK_SRC) {
792 		dev_warn(ice_hw_to_dev(hw), "Invalid clock source %u\n",
793 			 clk_src);
794 		return -EINVAL;
795 	}
796 
797 	if (clk_src == ICE_CLK_SRC_TCX0 &&
798 	    clk_freq != ICE_TIME_REF_FREQ_25_000) {
799 		dev_warn(ice_hw_to_dev(hw),
800 			 "TCX0 only supports 25 MHz frequency\n");
801 		return -EINVAL;
802 	}
803 
804 	err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD9, &dw9.val);
805 	if (err)
806 		return err;
807 
808 	err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD24, &dw24.val);
809 	if (err)
810 		return err;
811 
812 	err = ice_read_cgu_reg_e822(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
813 	if (err)
814 		return err;
815 
816 	/* Log the current clock configuration */
817 	ice_debug(hw, ICE_DBG_PTP, "Current CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
818 		  dw24.field.ts_pll_enable ? "enabled" : "disabled",
819 		  ice_clk_src_str(dw24.field.time_ref_sel),
820 		  ice_clk_freq_str(dw9.field.time_ref_freq_sel),
821 		  bwm_lf.field.plllock_true_lock_cri ? "locked" : "unlocked");
822 
823 	/* Disable the PLL before changing the clock source or frequency */
824 	if (dw24.field.ts_pll_enable) {
825 		dw24.field.ts_pll_enable = 0;
826 
827 		err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val);
828 		if (err)
829 			return err;
830 	}
831 
832 	/* Set the frequency */
833 	dw9.field.time_ref_freq_sel = clk_freq;
834 	err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD9, dw9.val);
835 	if (err)
836 		return err;
837 
838 	/* Configure the TS PLL feedback divisor */
839 	err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD19, &dw19.val);
840 	if (err)
841 		return err;
842 
843 	dw19.field.tspll_fbdiv_intgr = e822_cgu_params[clk_freq].feedback_div;
844 	dw19.field.tspll_ndivratio = 1;
845 
846 	err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD19, dw19.val);
847 	if (err)
848 		return err;
849 
850 	/* Configure the TS PLL post divisor */
851 	err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD22, &dw22.val);
852 	if (err)
853 		return err;
854 
855 	dw22.field.time1588clk_div = e822_cgu_params[clk_freq].post_pll_div;
856 	dw22.field.time1588clk_sel_div2 = 0;
857 
858 	err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD22, dw22.val);
859 	if (err)
860 		return err;
861 
862 	/* Configure the TS PLL pre divisor and clock source */
863 	err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD24, &dw24.val);
864 	if (err)
865 		return err;
866 
867 	dw24.field.ref1588_ck_div = e822_cgu_params[clk_freq].refclk_pre_div;
868 	dw24.field.tspll_fbdiv_frac = e822_cgu_params[clk_freq].frac_n_div;
869 	dw24.field.time_ref_sel = clk_src;
870 
871 	err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val);
872 	if (err)
873 		return err;
874 
875 	/* Finally, enable the PLL */
876 	dw24.field.ts_pll_enable = 1;
877 
878 	err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val);
879 	if (err)
880 		return err;
881 
882 	/* Wait to verify if the PLL locks */
883 	usleep_range(1000, 5000);
884 
885 	err = ice_read_cgu_reg_e822(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
886 	if (err)
887 		return err;
888 
889 	if (!bwm_lf.field.plllock_true_lock_cri) {
890 		dev_warn(ice_hw_to_dev(hw), "CGU PLL failed to lock\n");
891 		return -EBUSY;
892 	}
893 
894 	/* Log the current clock configuration */
895 	ice_debug(hw, ICE_DBG_PTP, "New CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
896 		  dw24.field.ts_pll_enable ? "enabled" : "disabled",
897 		  ice_clk_src_str(dw24.field.time_ref_sel),
898 		  ice_clk_freq_str(dw9.field.time_ref_freq_sel),
899 		  bwm_lf.field.plllock_true_lock_cri ? "locked" : "unlocked");
900 
901 	return 0;
902 }
903 
904 /**
905  * ice_init_cgu_e822 - Initialize CGU with settings from firmware
906  * @hw: pointer to the HW structure
907  *
908  * Initialize the Clock Generation Unit of the E822 device.
909  */
910 static int ice_init_cgu_e822(struct ice_hw *hw)
911 {
912 	struct ice_ts_func_info *ts_info = &hw->func_caps.ts_func_info;
913 	union tspll_cntr_bist_settings cntr_bist;
914 	int err;
915 
916 	err = ice_read_cgu_reg_e822(hw, TSPLL_CNTR_BIST_SETTINGS,
917 				    &cntr_bist.val);
918 	if (err)
919 		return err;
920 
921 	/* Disable sticky lock detection so lock err reported is accurate */
922 	cntr_bist.field.i_plllock_sel_0 = 0;
923 	cntr_bist.field.i_plllock_sel_1 = 0;
924 
925 	err = ice_write_cgu_reg_e822(hw, TSPLL_CNTR_BIST_SETTINGS,
926 				     cntr_bist.val);
927 	if (err)
928 		return err;
929 
930 	/* Configure the CGU PLL using the parameters from the function
931 	 * capabilities.
932 	 */
933 	err = ice_cfg_cgu_pll_e822(hw, ts_info->time_ref,
934 				   (enum ice_clk_src)ts_info->clk_src);
935 	if (err)
936 		return err;
937 
938 	return 0;
939 }
940 
941 /**
942  * ice_ptp_set_vernier_wl - Set the window length for vernier calibration
943  * @hw: pointer to the HW struct
944  *
945  * Set the window length used for the vernier port calibration process.
946  */
947 static int ice_ptp_set_vernier_wl(struct ice_hw *hw)
948 {
949 	u8 port;
950 
951 	for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
952 		int err;
953 
954 		err = ice_write_phy_reg_e822(hw, port, P_REG_WL,
955 					     PTP_VERNIER_WL);
956 		if (err) {
957 			ice_debug(hw, ICE_DBG_PTP, "Failed to set vernier window length for port %u, err %d\n",
958 				  port, err);
959 			return err;
960 		}
961 	}
962 
963 	return 0;
964 }
965 
966 /**
967  * ice_ptp_init_phc_e822 - Perform E822 specific PHC initialization
968  * @hw: pointer to HW struct
969  *
970  * Perform PHC initialization steps specific to E822 devices.
971  */
972 static int ice_ptp_init_phc_e822(struct ice_hw *hw)
973 {
974 	int err;
975 	u32 regval;
976 
977 	/* Enable reading switch and PHY registers over the sideband queue */
978 #define PF_SB_REM_DEV_CTL_SWITCH_READ BIT(1)
979 #define PF_SB_REM_DEV_CTL_PHY0 BIT(2)
980 	regval = rd32(hw, PF_SB_REM_DEV_CTL);
981 	regval |= (PF_SB_REM_DEV_CTL_SWITCH_READ |
982 		   PF_SB_REM_DEV_CTL_PHY0);
983 	wr32(hw, PF_SB_REM_DEV_CTL, regval);
984 
985 	/* Initialize the Clock Generation Unit */
986 	err = ice_init_cgu_e822(hw);
987 	if (err)
988 		return err;
989 
990 	/* Set window length for all the ports */
991 	return ice_ptp_set_vernier_wl(hw);
992 }
993 
994 /**
995  * ice_ptp_prep_phy_time_e822 - Prepare PHY port with initial time
996  * @hw: pointer to the HW struct
997  * @time: Time to initialize the PHY port clocks to
998  *
999  * Program the PHY port registers with a new initial time value. The port
1000  * clock will be initialized once the driver issues an INIT_TIME sync
1001  * command. The time value is the upper 32 bits of the PHY timer, usually in
1002  * units of nominal nanoseconds.
1003  */
1004 static int
1005 ice_ptp_prep_phy_time_e822(struct ice_hw *hw, u32 time)
1006 {
1007 	u64 phy_time;
1008 	u8 port;
1009 	int err;
1010 
1011 	/* The time represents the upper 32 bits of the PHY timer, so we need
1012 	 * to shift to account for this when programming.
1013 	 */
1014 	phy_time = (u64)time << 32;
1015 
1016 	for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
1017 		/* Tx case */
1018 		err = ice_write_64b_phy_reg_e822(hw, port,
1019 						 P_REG_TX_TIMER_INC_PRE_L,
1020 						 phy_time);
1021 		if (err)
1022 			goto exit_err;
1023 
1024 		/* Rx case */
1025 		err = ice_write_64b_phy_reg_e822(hw, port,
1026 						 P_REG_RX_TIMER_INC_PRE_L,
1027 						 phy_time);
1028 		if (err)
1029 			goto exit_err;
1030 	}
1031 
1032 	return 0;
1033 
1034 exit_err:
1035 	ice_debug(hw, ICE_DBG_PTP, "Failed to write init time for port %u, err %d\n",
1036 		  port, err);
1037 
1038 	return err;
1039 }
1040 
1041 /**
1042  * ice_ptp_prep_port_adj_e822 - Prepare a single port for time adjust
1043  * @hw: pointer to HW struct
1044  * @port: Port number to be programmed
1045  * @time: time in cycles to adjust the port Tx and Rx clocks
1046  *
1047  * Program the port for an atomic adjustment by writing the Tx and Rx timer
1048  * registers. The atomic adjustment won't be completed until the driver issues
1049  * an ADJ_TIME command.
1050  *
1051  * Note that time is not in units of nanoseconds. It is in clock time
1052  * including the lower sub-nanosecond portion of the port timer.
1053  *
1054  * Negative adjustments are supported using 2s complement arithmetic.
1055  */
1056 int
1057 ice_ptp_prep_port_adj_e822(struct ice_hw *hw, u8 port, s64 time)
1058 {
1059 	u32 l_time, u_time;
1060 	int err;
1061 
1062 	l_time = lower_32_bits(time);
1063 	u_time = upper_32_bits(time);
1064 
1065 	/* Tx case */
1066 	err = ice_write_phy_reg_e822(hw, port, P_REG_TX_TIMER_INC_PRE_L,
1067 				     l_time);
1068 	if (err)
1069 		goto exit_err;
1070 
1071 	err = ice_write_phy_reg_e822(hw, port, P_REG_TX_TIMER_INC_PRE_U,
1072 				     u_time);
1073 	if (err)
1074 		goto exit_err;
1075 
1076 	/* Rx case */
1077 	err = ice_write_phy_reg_e822(hw, port, P_REG_RX_TIMER_INC_PRE_L,
1078 				     l_time);
1079 	if (err)
1080 		goto exit_err;
1081 
1082 	err = ice_write_phy_reg_e822(hw, port, P_REG_RX_TIMER_INC_PRE_U,
1083 				     u_time);
1084 	if (err)
1085 		goto exit_err;
1086 
1087 	return 0;
1088 
1089 exit_err:
1090 	ice_debug(hw, ICE_DBG_PTP, "Failed to write time adjust for port %u, err %d\n",
1091 		  port, err);
1092 	return err;
1093 }
1094 
1095 /**
1096  * ice_ptp_prep_phy_adj_e822 - Prep PHY ports for a time adjustment
1097  * @hw: pointer to HW struct
1098  * @adj: adjustment in nanoseconds
1099  *
1100  * Prepare the PHY ports for an atomic time adjustment by programming the PHY
1101  * Tx and Rx port registers. The actual adjustment is completed by issuing an
1102  * ADJ_TIME or ADJ_TIME_AT_TIME sync command.
1103  */
1104 static int
1105 ice_ptp_prep_phy_adj_e822(struct ice_hw *hw, s32 adj)
1106 {
1107 	s64 cycles;
1108 	u8 port;
1109 
1110 	/* The port clock supports adjustment of the sub-nanosecond portion of
1111 	 * the clock. We shift the provided adjustment in nanoseconds to
1112 	 * calculate the appropriate adjustment to program into the PHY ports.
1113 	 */
1114 	if (adj > 0)
1115 		cycles = (s64)adj << 32;
1116 	else
1117 		cycles = -(((s64)-adj) << 32);
1118 
1119 	for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
1120 		int err;
1121 
1122 		err = ice_ptp_prep_port_adj_e822(hw, port, cycles);
1123 		if (err)
1124 			return err;
1125 	}
1126 
1127 	return 0;
1128 }
1129 
1130 /**
1131  * ice_ptp_prep_phy_incval_e822 - Prepare PHY ports for time adjustment
1132  * @hw: pointer to HW struct
1133  * @incval: new increment value to prepare
1134  *
1135  * Prepare each of the PHY ports for a new increment value by programming the
1136  * port's TIMETUS registers. The new increment value will be updated after
1137  * issuing an INIT_INCVAL command.
1138  */
1139 static int
1140 ice_ptp_prep_phy_incval_e822(struct ice_hw *hw, u64 incval)
1141 {
1142 	int err;
1143 	u8 port;
1144 
1145 	for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
1146 		err = ice_write_40b_phy_reg_e822(hw, port, P_REG_TIMETUS_L,
1147 						 incval);
1148 		if (err)
1149 			goto exit_err;
1150 	}
1151 
1152 	return 0;
1153 
1154 exit_err:
1155 	ice_debug(hw, ICE_DBG_PTP, "Failed to write incval for port %u, err %d\n",
1156 		  port, err);
1157 
1158 	return err;
1159 }
1160 
1161 /**
1162  * ice_ptp_read_port_capture - Read a port's local time capture
1163  * @hw: pointer to HW struct
1164  * @port: Port number to read
1165  * @tx_ts: on return, the Tx port time capture
1166  * @rx_ts: on return, the Rx port time capture
1167  *
1168  * Read the port's Tx and Rx local time capture values.
1169  *
1170  * Note this has no equivalent for the E810 devices.
1171  */
1172 static int
1173 ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts)
1174 {
1175 	int err;
1176 
1177 	/* Tx case */
1178 	err = ice_read_64b_phy_reg_e822(hw, port, P_REG_TX_CAPTURE_L, tx_ts);
1179 	if (err) {
1180 		ice_debug(hw, ICE_DBG_PTP, "Failed to read REG_TX_CAPTURE, err %d\n",
1181 			  err);
1182 		return err;
1183 	}
1184 
1185 	ice_debug(hw, ICE_DBG_PTP, "tx_init = 0x%016llx\n",
1186 		  (unsigned long long)*tx_ts);
1187 
1188 	/* Rx case */
1189 	err = ice_read_64b_phy_reg_e822(hw, port, P_REG_RX_CAPTURE_L, rx_ts);
1190 	if (err) {
1191 		ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_CAPTURE, err %d\n",
1192 			  err);
1193 		return err;
1194 	}
1195 
1196 	ice_debug(hw, ICE_DBG_PTP, "rx_init = 0x%016llx\n",
1197 		  (unsigned long long)*rx_ts);
1198 
1199 	return 0;
1200 }
1201 
1202 /**
1203  * ice_ptp_one_port_cmd - Prepare a single PHY port for a timer command
1204  * @hw: pointer to HW struct
1205  * @port: Port to which cmd has to be sent
1206  * @cmd: Command to be sent to the port
1207  *
1208  * Prepare the requested port for an upcoming timer sync command.
1209  *
1210  * Note there is no equivalent of this operation on E810, as that device
1211  * always handles all external PHYs internally.
1212  */
1213 static int
1214 ice_ptp_one_port_cmd(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd)
1215 {
1216 	u32 cmd_val, val;
1217 	u8 tmr_idx;
1218 	int err;
1219 
1220 	tmr_idx = ice_get_ptp_src_clock_index(hw);
1221 	cmd_val = tmr_idx << SEL_PHY_SRC;
1222 	switch (cmd) {
1223 	case INIT_TIME:
1224 		cmd_val |= PHY_CMD_INIT_TIME;
1225 		break;
1226 	case INIT_INCVAL:
1227 		cmd_val |= PHY_CMD_INIT_INCVAL;
1228 		break;
1229 	case ADJ_TIME:
1230 		cmd_val |= PHY_CMD_ADJ_TIME;
1231 		break;
1232 	case READ_TIME:
1233 		cmd_val |= PHY_CMD_READ_TIME;
1234 		break;
1235 	case ADJ_TIME_AT_TIME:
1236 		cmd_val |= PHY_CMD_ADJ_TIME_AT_TIME;
1237 		break;
1238 	}
1239 
1240 	/* Tx case */
1241 	/* Read, modify, write */
1242 	err = ice_read_phy_reg_e822(hw, port, P_REG_TX_TMR_CMD, &val);
1243 	if (err) {
1244 		ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_TMR_CMD, err %d\n",
1245 			  err);
1246 		return err;
1247 	}
1248 
1249 	/* Modify necessary bits only and perform write */
1250 	val &= ~TS_CMD_MASK;
1251 	val |= cmd_val;
1252 
1253 	err = ice_write_phy_reg_e822(hw, port, P_REG_TX_TMR_CMD, val);
1254 	if (err) {
1255 		ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_TMR_CMD, err %d\n",
1256 			  err);
1257 		return err;
1258 	}
1259 
1260 	/* Rx case */
1261 	/* Read, modify, write */
1262 	err = ice_read_phy_reg_e822(hw, port, P_REG_RX_TMR_CMD, &val);
1263 	if (err) {
1264 		ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_TMR_CMD, err %d\n",
1265 			  err);
1266 		return err;
1267 	}
1268 
1269 	/* Modify necessary bits only and perform write */
1270 	val &= ~TS_CMD_MASK;
1271 	val |= cmd_val;
1272 
1273 	err = ice_write_phy_reg_e822(hw, port, P_REG_RX_TMR_CMD, val);
1274 	if (err) {
1275 		ice_debug(hw, ICE_DBG_PTP, "Failed to write back RX_TMR_CMD, err %d\n",
1276 			  err);
1277 		return err;
1278 	}
1279 
1280 	return 0;
1281 }
1282 
1283 /**
1284  * ice_ptp_port_cmd_e822 - Prepare all ports for a timer command
1285  * @hw: pointer to the HW struct
1286  * @cmd: timer command to prepare
1287  *
1288  * Prepare all ports connected to this device for an upcoming timer sync
1289  * command.
1290  */
1291 static int
1292 ice_ptp_port_cmd_e822(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
1293 {
1294 	u8 port;
1295 
1296 	for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
1297 		int err;
1298 
1299 		err = ice_ptp_one_port_cmd(hw, port, cmd);
1300 		if (err)
1301 			return err;
1302 	}
1303 
1304 	return 0;
1305 }
1306 
1307 /* E822 Vernier calibration functions
1308  *
1309  * The following functions are used as part of the vernier calibration of
1310  * a port. This calibration increases the precision of the timestamps on the
1311  * port.
1312  */
1313 
1314 /**
1315  * ice_phy_get_speed_and_fec_e822 - Get link speed and FEC based on serdes mode
1316  * @hw: pointer to HW struct
1317  * @port: the port to read from
1318  * @link_out: if non-NULL, holds link speed on success
1319  * @fec_out: if non-NULL, holds FEC algorithm on success
1320  *
1321  * Read the serdes data for the PHY port and extract the link speed and FEC
1322  * algorithm.
1323  */
1324 static int
1325 ice_phy_get_speed_and_fec_e822(struct ice_hw *hw, u8 port,
1326 			       enum ice_ptp_link_spd *link_out,
1327 			       enum ice_ptp_fec_mode *fec_out)
1328 {
1329 	enum ice_ptp_link_spd link;
1330 	enum ice_ptp_fec_mode fec;
1331 	u32 serdes;
1332 	int err;
1333 
1334 	err = ice_read_phy_reg_e822(hw, port, P_REG_LINK_SPEED, &serdes);
1335 	if (err) {
1336 		ice_debug(hw, ICE_DBG_PTP, "Failed to read serdes info\n");
1337 		return err;
1338 	}
1339 
1340 	/* Determine the FEC algorithm */
1341 	fec = (enum ice_ptp_fec_mode)P_REG_LINK_SPEED_FEC_MODE(serdes);
1342 
1343 	serdes &= P_REG_LINK_SPEED_SERDES_M;
1344 
1345 	/* Determine the link speed */
1346 	if (fec == ICE_PTP_FEC_MODE_RS_FEC) {
1347 		switch (serdes) {
1348 		case ICE_PTP_SERDES_25G:
1349 			link = ICE_PTP_LNK_SPD_25G_RS;
1350 			break;
1351 		case ICE_PTP_SERDES_50G:
1352 			link = ICE_PTP_LNK_SPD_50G_RS;
1353 			break;
1354 		case ICE_PTP_SERDES_100G:
1355 			link = ICE_PTP_LNK_SPD_100G_RS;
1356 			break;
1357 		default:
1358 			return -EIO;
1359 		}
1360 	} else {
1361 		switch (serdes) {
1362 		case ICE_PTP_SERDES_1G:
1363 			link = ICE_PTP_LNK_SPD_1G;
1364 			break;
1365 		case ICE_PTP_SERDES_10G:
1366 			link = ICE_PTP_LNK_SPD_10G;
1367 			break;
1368 		case ICE_PTP_SERDES_25G:
1369 			link = ICE_PTP_LNK_SPD_25G;
1370 			break;
1371 		case ICE_PTP_SERDES_40G:
1372 			link = ICE_PTP_LNK_SPD_40G;
1373 			break;
1374 		case ICE_PTP_SERDES_50G:
1375 			link = ICE_PTP_LNK_SPD_50G;
1376 			break;
1377 		default:
1378 			return -EIO;
1379 		}
1380 	}
1381 
1382 	if (link_out)
1383 		*link_out = link;
1384 	if (fec_out)
1385 		*fec_out = fec;
1386 
1387 	return 0;
1388 }
1389 
1390 /**
1391  * ice_phy_cfg_lane_e822 - Configure PHY quad for single/multi-lane timestamp
1392  * @hw: pointer to HW struct
1393  * @port: to configure the quad for
1394  */
1395 static void ice_phy_cfg_lane_e822(struct ice_hw *hw, u8 port)
1396 {
1397 	enum ice_ptp_link_spd link_spd;
1398 	int err;
1399 	u32 val;
1400 	u8 quad;
1401 
1402 	err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, NULL);
1403 	if (err) {
1404 		ice_debug(hw, ICE_DBG_PTP, "Failed to get PHY link speed, err %d\n",
1405 			  err);
1406 		return;
1407 	}
1408 
1409 	quad = port / ICE_PORTS_PER_QUAD;
1410 
1411 	err = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG, &val);
1412 	if (err) {
1413 		ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEM_GLB_CFG, err %d\n",
1414 			  err);
1415 		return;
1416 	}
1417 
1418 	if (link_spd >= ICE_PTP_LNK_SPD_40G)
1419 		val &= ~Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_M;
1420 	else
1421 		val |= Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_M;
1422 
1423 	err = ice_write_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG, val);
1424 	if (err) {
1425 		ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_MEM_GBL_CFG, err %d\n",
1426 			  err);
1427 		return;
1428 	}
1429 }
1430 
1431 /**
1432  * ice_phy_cfg_uix_e822 - Configure Serdes UI to TU conversion for E822
1433  * @hw: pointer to the HW structure
1434  * @port: the port to configure
1435  *
1436  * Program the conversion ration of Serdes clock "unit intervals" (UIs) to PHC
1437  * hardware clock time units (TUs). That is, determine the number of TUs per
1438  * serdes unit interval, and program the UIX registers with this conversion.
1439  *
1440  * This conversion is used as part of the calibration process when determining
1441  * the additional error of a timestamp vs the real time of transmission or
1442  * receipt of the packet.
1443  *
1444  * Hardware uses the number of TUs per 66 UIs, written to the UIX registers
1445  * for the two main serdes clock rates, 10G/40G and 25G/100G serdes clocks.
1446  *
1447  * To calculate the conversion ratio, we use the following facts:
1448  *
1449  * a) the clock frequency in Hz (cycles per second)
1450  * b) the number of TUs per cycle (the increment value of the clock)
1451  * c) 1 second per 1 billion nanoseconds
1452  * d) the duration of 66 UIs in nanoseconds
1453  *
1454  * Given these facts, we can use the following table to work out what ratios
1455  * to multiply in order to get the number of TUs per 66 UIs:
1456  *
1457  * cycles |   1 second   | incval (TUs) | nanoseconds
1458  * -------+--------------+--------------+-------------
1459  * second | 1 billion ns |    cycle     |   66 UIs
1460  *
1461  * To perform the multiplication using integers without too much loss of
1462  * precision, we can take use the following equation:
1463  *
1464  * (freq * incval * 6600 LINE_UI ) / ( 100 * 1 billion)
1465  *
1466  * We scale up to using 6600 UI instead of 66 in order to avoid fractional
1467  * nanosecond UIs (66 UI at 10G/40G is 6.4 ns)
1468  *
1469  * The increment value has a maximum expected range of about 34 bits, while
1470  * the frequency value is about 29 bits. Multiplying these values shouldn't
1471  * overflow the 64 bits. However, we must then further multiply them again by
1472  * the Serdes unit interval duration. To avoid overflow here, we split the
1473  * overall divide by 1e11 into a divide by 256 (shift down by 8 bits) and
1474  * a divide by 390,625,000. This does lose some precision, but avoids
1475  * miscalculation due to arithmetic overflow.
1476  */
1477 static int ice_phy_cfg_uix_e822(struct ice_hw *hw, u8 port)
1478 {
1479 	u64 cur_freq, clk_incval, tu_per_sec, uix;
1480 	int err;
1481 
1482 	cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
1483 	clk_incval = ice_ptp_read_src_incval(hw);
1484 
1485 	/* Calculate TUs per second divided by 256 */
1486 	tu_per_sec = (cur_freq * clk_incval) >> 8;
1487 
1488 #define LINE_UI_10G_40G 640 /* 6600 UIs is 640 nanoseconds at 10Gb/40Gb */
1489 #define LINE_UI_25G_100G 256 /* 6600 UIs is 256 nanoseconds at 25Gb/100Gb */
1490 
1491 	/* Program the 10Gb/40Gb conversion ratio */
1492 	uix = div_u64(tu_per_sec * LINE_UI_10G_40G, 390625000);
1493 
1494 	err = ice_write_64b_phy_reg_e822(hw, port, P_REG_UIX66_10G_40G_L,
1495 					 uix);
1496 	if (err) {
1497 		ice_debug(hw, ICE_DBG_PTP, "Failed to write UIX66_10G_40G, err %d\n",
1498 			  err);
1499 		return err;
1500 	}
1501 
1502 	/* Program the 25Gb/100Gb conversion ratio */
1503 	uix = div_u64(tu_per_sec * LINE_UI_25G_100G, 390625000);
1504 
1505 	err = ice_write_64b_phy_reg_e822(hw, port, P_REG_UIX66_25G_100G_L,
1506 					 uix);
1507 	if (err) {
1508 		ice_debug(hw, ICE_DBG_PTP, "Failed to write UIX66_25G_100G, err %d\n",
1509 			  err);
1510 		return err;
1511 	}
1512 
1513 	return 0;
1514 }
1515 
1516 /**
1517  * ice_phy_cfg_parpcs_e822 - Configure TUs per PAR/PCS clock cycle
1518  * @hw: pointer to the HW struct
1519  * @port: port to configure
1520  *
1521  * Configure the number of TUs for the PAR and PCS clocks used as part of the
1522  * timestamp calibration process. This depends on the link speed, as the PHY
1523  * uses different markers depending on the speed.
1524  *
1525  * 1Gb/10Gb/25Gb:
1526  * - Tx/Rx PAR/PCS markers
1527  *
1528  * 25Gb RS:
1529  * - Tx/Rx Reed Solomon gearbox PAR/PCS markers
1530  *
1531  * 40Gb/50Gb:
1532  * - Tx/Rx PAR/PCS markers
1533  * - Rx Deskew PAR/PCS markers
1534  *
1535  * 50G RS and 100GB RS:
1536  * - Tx/Rx Reed Solomon gearbox PAR/PCS markers
1537  * - Rx Deskew PAR/PCS markers
1538  * - Tx PAR/PCS markers
1539  *
1540  * To calculate the conversion, we use the PHC clock frequency (cycles per
1541  * second), the increment value (TUs per cycle), and the related PHY clock
1542  * frequency to calculate the TUs per unit of the PHY link clock. The
1543  * following table shows how the units convert:
1544  *
1545  * cycles |  TUs  | second
1546  * -------+-------+--------
1547  * second | cycle | cycles
1548  *
1549  * For each conversion register, look up the appropriate frequency from the
1550  * e822 PAR/PCS table and calculate the TUs per unit of that clock. Program
1551  * this to the appropriate register, preparing hardware to perform timestamp
1552  * calibration to calculate the total Tx or Rx offset to adjust the timestamp
1553  * in order to calibrate for the internal PHY delays.
1554  *
1555  * Note that the increment value ranges up to ~34 bits, and the clock
1556  * frequency is ~29 bits, so multiplying them together should fit within the
1557  * 64 bit arithmetic.
1558  */
1559 static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port)
1560 {
1561 	u64 cur_freq, clk_incval, tu_per_sec, phy_tus;
1562 	enum ice_ptp_link_spd link_spd;
1563 	enum ice_ptp_fec_mode fec_mode;
1564 	int err;
1565 
1566 	err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode);
1567 	if (err)
1568 		return err;
1569 
1570 	cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
1571 	clk_incval = ice_ptp_read_src_incval(hw);
1572 
1573 	/* Calculate TUs per cycle of the PHC clock */
1574 	tu_per_sec = cur_freq * clk_incval;
1575 
1576 	/* For each PHY conversion register, look up the appropriate link
1577 	 * speed frequency and determine the TUs per that clock's cycle time.
1578 	 * Split this into a high and low value and then program the
1579 	 * appropriate register. If that link speed does not use the
1580 	 * associated register, write zeros to clear it instead.
1581 	 */
1582 
1583 	/* P_REG_PAR_TX_TUS */
1584 	if (e822_vernier[link_spd].tx_par_clk)
1585 		phy_tus = div_u64(tu_per_sec,
1586 				  e822_vernier[link_spd].tx_par_clk);
1587 	else
1588 		phy_tus = 0;
1589 
1590 	err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PAR_TX_TUS_L,
1591 					 phy_tus);
1592 	if (err)
1593 		return err;
1594 
1595 	/* P_REG_PAR_RX_TUS */
1596 	if (e822_vernier[link_spd].rx_par_clk)
1597 		phy_tus = div_u64(tu_per_sec,
1598 				  e822_vernier[link_spd].rx_par_clk);
1599 	else
1600 		phy_tus = 0;
1601 
1602 	err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PAR_RX_TUS_L,
1603 					 phy_tus);
1604 	if (err)
1605 		return err;
1606 
1607 	/* P_REG_PCS_TX_TUS */
1608 	if (e822_vernier[link_spd].tx_pcs_clk)
1609 		phy_tus = div_u64(tu_per_sec,
1610 				  e822_vernier[link_spd].tx_pcs_clk);
1611 	else
1612 		phy_tus = 0;
1613 
1614 	err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PCS_TX_TUS_L,
1615 					 phy_tus);
1616 	if (err)
1617 		return err;
1618 
1619 	/* P_REG_PCS_RX_TUS */
1620 	if (e822_vernier[link_spd].rx_pcs_clk)
1621 		phy_tus = div_u64(tu_per_sec,
1622 				  e822_vernier[link_spd].rx_pcs_clk);
1623 	else
1624 		phy_tus = 0;
1625 
1626 	err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PCS_RX_TUS_L,
1627 					 phy_tus);
1628 	if (err)
1629 		return err;
1630 
1631 	/* P_REG_DESK_PAR_TX_TUS */
1632 	if (e822_vernier[link_spd].tx_desk_rsgb_par)
1633 		phy_tus = div_u64(tu_per_sec,
1634 				  e822_vernier[link_spd].tx_desk_rsgb_par);
1635 	else
1636 		phy_tus = 0;
1637 
1638 	err = ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PAR_TX_TUS_L,
1639 					 phy_tus);
1640 	if (err)
1641 		return err;
1642 
1643 	/* P_REG_DESK_PAR_RX_TUS */
1644 	if (e822_vernier[link_spd].rx_desk_rsgb_par)
1645 		phy_tus = div_u64(tu_per_sec,
1646 				  e822_vernier[link_spd].rx_desk_rsgb_par);
1647 	else
1648 		phy_tus = 0;
1649 
1650 	err = ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PAR_RX_TUS_L,
1651 					 phy_tus);
1652 	if (err)
1653 		return err;
1654 
1655 	/* P_REG_DESK_PCS_TX_TUS */
1656 	if (e822_vernier[link_spd].tx_desk_rsgb_pcs)
1657 		phy_tus = div_u64(tu_per_sec,
1658 				  e822_vernier[link_spd].tx_desk_rsgb_pcs);
1659 	else
1660 		phy_tus = 0;
1661 
1662 	err = ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PCS_TX_TUS_L,
1663 					 phy_tus);
1664 	if (err)
1665 		return err;
1666 
1667 	/* P_REG_DESK_PCS_RX_TUS */
1668 	if (e822_vernier[link_spd].rx_desk_rsgb_pcs)
1669 		phy_tus = div_u64(tu_per_sec,
1670 				  e822_vernier[link_spd].rx_desk_rsgb_pcs);
1671 	else
1672 		phy_tus = 0;
1673 
1674 	return ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PCS_RX_TUS_L,
1675 					  phy_tus);
1676 }
1677 
1678 /**
1679  * ice_calc_fixed_tx_offset_e822 - Calculated Fixed Tx offset for a port
1680  * @hw: pointer to the HW struct
1681  * @link_spd: the Link speed to calculate for
1682  *
1683  * Calculate the fixed offset due to known static latency data.
1684  */
1685 static u64
1686 ice_calc_fixed_tx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd)
1687 {
1688 	u64 cur_freq, clk_incval, tu_per_sec, fixed_offset;
1689 
1690 	cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
1691 	clk_incval = ice_ptp_read_src_incval(hw);
1692 
1693 	/* Calculate TUs per second */
1694 	tu_per_sec = cur_freq * clk_incval;
1695 
1696 	/* Calculate number of TUs to add for the fixed Tx latency. Since the
1697 	 * latency measurement is in 1/100th of a nanosecond, we need to
1698 	 * multiply by tu_per_sec and then divide by 1e11. This calculation
1699 	 * overflows 64 bit integer arithmetic, so break it up into two
1700 	 * divisions by 1e4 first then by 1e7.
1701 	 */
1702 	fixed_offset = div_u64(tu_per_sec, 10000);
1703 	fixed_offset *= e822_vernier[link_spd].tx_fixed_delay;
1704 	fixed_offset = div_u64(fixed_offset, 10000000);
1705 
1706 	return fixed_offset;
1707 }
1708 
1709 /**
1710  * ice_phy_cfg_tx_offset_e822 - Configure total Tx timestamp offset
1711  * @hw: pointer to the HW struct
1712  * @port: the PHY port to configure
1713  *
1714  * Program the P_REG_TOTAL_TX_OFFSET register with the total number of TUs to
1715  * adjust Tx timestamps by. This is calculated by combining some known static
1716  * latency along with the Vernier offset computations done by hardware.
1717  *
1718  * This function must be called only after the offset registers are valid,
1719  * i.e. after the Vernier calibration wait has passed, to ensure that the PHY
1720  * has measured the offset.
1721  *
1722  * To avoid overflow, when calculating the offset based on the known static
1723  * latency values, we use measurements in 1/100th of a nanosecond, and divide
1724  * the TUs per second up front. This avoids overflow while allowing
1725  * calculation of the adjustment using integer arithmetic.
1726  */
1727 static int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port)
1728 {
1729 	enum ice_ptp_link_spd link_spd;
1730 	enum ice_ptp_fec_mode fec_mode;
1731 	u64 total_offset, val;
1732 	int err;
1733 
1734 	err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode);
1735 	if (err)
1736 		return err;
1737 
1738 	total_offset = ice_calc_fixed_tx_offset_e822(hw, link_spd);
1739 
1740 	/* Read the first Vernier offset from the PHY register and add it to
1741 	 * the total offset.
1742 	 */
1743 	if (link_spd == ICE_PTP_LNK_SPD_1G ||
1744 	    link_spd == ICE_PTP_LNK_SPD_10G ||
1745 	    link_spd == ICE_PTP_LNK_SPD_25G ||
1746 	    link_spd == ICE_PTP_LNK_SPD_25G_RS ||
1747 	    link_spd == ICE_PTP_LNK_SPD_40G ||
1748 	    link_spd == ICE_PTP_LNK_SPD_50G) {
1749 		err = ice_read_64b_phy_reg_e822(hw, port,
1750 						P_REG_PAR_PCS_TX_OFFSET_L,
1751 						&val);
1752 		if (err)
1753 			return err;
1754 
1755 		total_offset += val;
1756 	}
1757 
1758 	/* For Tx, we only need to use the second Vernier offset for
1759 	 * multi-lane link speeds with RS-FEC. The lanes will always be
1760 	 * aligned.
1761 	 */
1762 	if (link_spd == ICE_PTP_LNK_SPD_50G_RS ||
1763 	    link_spd == ICE_PTP_LNK_SPD_100G_RS) {
1764 		err = ice_read_64b_phy_reg_e822(hw, port,
1765 						P_REG_PAR_TX_TIME_L,
1766 						&val);
1767 		if (err)
1768 			return err;
1769 
1770 		total_offset += val;
1771 	}
1772 
1773 	/* Now that the total offset has been calculated, program it to the
1774 	 * PHY and indicate that the Tx offset is ready. After this,
1775 	 * timestamps will be enabled.
1776 	 */
1777 	err = ice_write_64b_phy_reg_e822(hw, port, P_REG_TOTAL_TX_OFFSET_L,
1778 					 total_offset);
1779 	if (err)
1780 		return err;
1781 
1782 	err = ice_write_phy_reg_e822(hw, port, P_REG_TX_OR, 1);
1783 	if (err)
1784 		return err;
1785 
1786 	return 0;
1787 }
1788 
1789 /**
1790  * ice_phy_calc_pmd_adj_e822 - Calculate PMD adjustment for Rx
1791  * @hw: pointer to the HW struct
1792  * @port: the PHY port to adjust for
1793  * @link_spd: the current link speed of the PHY
1794  * @fec_mode: the current FEC mode of the PHY
1795  * @pmd_adj: on return, the amount to adjust the Rx total offset by
1796  *
1797  * Calculates the adjustment to Rx timestamps due to PMD alignment in the PHY.
1798  * This varies by link speed and FEC mode. The value calculated accounts for
1799  * various delays caused when receiving a packet.
1800  */
1801 static int
1802 ice_phy_calc_pmd_adj_e822(struct ice_hw *hw, u8 port,
1803 			  enum ice_ptp_link_spd link_spd,
1804 			  enum ice_ptp_fec_mode fec_mode, u64 *pmd_adj)
1805 {
1806 	u64 cur_freq, clk_incval, tu_per_sec, mult, adj;
1807 	u8 pmd_align;
1808 	u32 val;
1809 	int err;
1810 
1811 	err = ice_read_phy_reg_e822(hw, port, P_REG_PMD_ALIGNMENT, &val);
1812 	if (err) {
1813 		ice_debug(hw, ICE_DBG_PTP, "Failed to read PMD alignment, err %d\n",
1814 			  err);
1815 		return err;
1816 	}
1817 
1818 	pmd_align = (u8)val;
1819 
1820 	cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
1821 	clk_incval = ice_ptp_read_src_incval(hw);
1822 
1823 	/* Calculate TUs per second */
1824 	tu_per_sec = cur_freq * clk_incval;
1825 
1826 	/* The PMD alignment adjustment measurement depends on the link speed,
1827 	 * and whether FEC is enabled. For each link speed, the alignment
1828 	 * adjustment is calculated by dividing a value by the length of
1829 	 * a Time Unit in nanoseconds.
1830 	 *
1831 	 * 1G: align == 4 ? 10 * 0.8 : (align + 6 % 10) * 0.8
1832 	 * 10G: align == 65 ? 0 : (align * 0.1 * 32/33)
1833 	 * 10G w/FEC: align * 0.1 * 32/33
1834 	 * 25G: align == 65 ? 0 : (align * 0.4 * 32/33)
1835 	 * 25G w/FEC: align * 0.4 * 32/33
1836 	 * 40G: align == 65 ? 0 : (align * 0.1 * 32/33)
1837 	 * 40G w/FEC: align * 0.1 * 32/33
1838 	 * 50G: align == 65 ? 0 : (align * 0.4 * 32/33)
1839 	 * 50G w/FEC: align * 0.8 * 32/33
1840 	 *
1841 	 * For RS-FEC, if align is < 17 then we must also add 1.6 * 32/33.
1842 	 *
1843 	 * To allow for calculating this value using integer arithmetic, we
1844 	 * instead start with the number of TUs per second, (inverse of the
1845 	 * length of a Time Unit in nanoseconds), multiply by a value based
1846 	 * on the PMD alignment register, and then divide by the right value
1847 	 * calculated based on the table above. To avoid integer overflow this
1848 	 * division is broken up into a step of dividing by 125 first.
1849 	 */
1850 	if (link_spd == ICE_PTP_LNK_SPD_1G) {
1851 		if (pmd_align == 4)
1852 			mult = 10;
1853 		else
1854 			mult = (pmd_align + 6) % 10;
1855 	} else if (link_spd == ICE_PTP_LNK_SPD_10G ||
1856 		   link_spd == ICE_PTP_LNK_SPD_25G ||
1857 		   link_spd == ICE_PTP_LNK_SPD_40G ||
1858 		   link_spd == ICE_PTP_LNK_SPD_50G) {
1859 		/* If Clause 74 FEC, always calculate PMD adjust */
1860 		if (pmd_align != 65 || fec_mode == ICE_PTP_FEC_MODE_CLAUSE74)
1861 			mult = pmd_align;
1862 		else
1863 			mult = 0;
1864 	} else if (link_spd == ICE_PTP_LNK_SPD_25G_RS ||
1865 		   link_spd == ICE_PTP_LNK_SPD_50G_RS ||
1866 		   link_spd == ICE_PTP_LNK_SPD_100G_RS) {
1867 		if (pmd_align < 17)
1868 			mult = pmd_align + 40;
1869 		else
1870 			mult = pmd_align;
1871 	} else {
1872 		ice_debug(hw, ICE_DBG_PTP, "Unknown link speed %d, skipping PMD adjustment\n",
1873 			  link_spd);
1874 		mult = 0;
1875 	}
1876 
1877 	/* In some cases, there's no need to adjust for the PMD alignment */
1878 	if (!mult) {
1879 		*pmd_adj = 0;
1880 		return 0;
1881 	}
1882 
1883 	/* Calculate the adjustment by multiplying TUs per second by the
1884 	 * appropriate multiplier and divisor. To avoid overflow, we first
1885 	 * divide by 125, and then handle remaining divisor based on the link
1886 	 * speed pmd_adj_divisor value.
1887 	 */
1888 	adj = div_u64(tu_per_sec, 125);
1889 	adj *= mult;
1890 	adj = div_u64(adj, e822_vernier[link_spd].pmd_adj_divisor);
1891 
1892 	/* Finally, for 25G-RS and 50G-RS, a further adjustment for the Rx
1893 	 * cycle count is necessary.
1894 	 */
1895 	if (link_spd == ICE_PTP_LNK_SPD_25G_RS) {
1896 		u64 cycle_adj;
1897 		u8 rx_cycle;
1898 
1899 		err = ice_read_phy_reg_e822(hw, port, P_REG_RX_40_TO_160_CNT,
1900 					    &val);
1901 		if (err) {
1902 			ice_debug(hw, ICE_DBG_PTP, "Failed to read 25G-RS Rx cycle count, err %d\n",
1903 				  err);
1904 			return err;
1905 		}
1906 
1907 		rx_cycle = val & P_REG_RX_40_TO_160_CNT_RXCYC_M;
1908 		if (rx_cycle) {
1909 			mult = (4 - rx_cycle) * 40;
1910 
1911 			cycle_adj = div_u64(tu_per_sec, 125);
1912 			cycle_adj *= mult;
1913 			cycle_adj = div_u64(cycle_adj, e822_vernier[link_spd].pmd_adj_divisor);
1914 
1915 			adj += cycle_adj;
1916 		}
1917 	} else if (link_spd == ICE_PTP_LNK_SPD_50G_RS) {
1918 		u64 cycle_adj;
1919 		u8 rx_cycle;
1920 
1921 		err = ice_read_phy_reg_e822(hw, port, P_REG_RX_80_TO_160_CNT,
1922 					    &val);
1923 		if (err) {
1924 			ice_debug(hw, ICE_DBG_PTP, "Failed to read 50G-RS Rx cycle count, err %d\n",
1925 				  err);
1926 			return err;
1927 		}
1928 
1929 		rx_cycle = val & P_REG_RX_80_TO_160_CNT_RXCYC_M;
1930 		if (rx_cycle) {
1931 			mult = rx_cycle * 40;
1932 
1933 			cycle_adj = div_u64(tu_per_sec, 125);
1934 			cycle_adj *= mult;
1935 			cycle_adj = div_u64(cycle_adj, e822_vernier[link_spd].pmd_adj_divisor);
1936 
1937 			adj += cycle_adj;
1938 		}
1939 	}
1940 
1941 	/* Return the calculated adjustment */
1942 	*pmd_adj = adj;
1943 
1944 	return 0;
1945 }
1946 
1947 /**
1948  * ice_calc_fixed_rx_offset_e822 - Calculated the fixed Rx offset for a port
1949  * @hw: pointer to HW struct
1950  * @link_spd: The Link speed to calculate for
1951  *
1952  * Determine the fixed Rx latency for a given link speed.
1953  */
1954 static u64
1955 ice_calc_fixed_rx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd)
1956 {
1957 	u64 cur_freq, clk_incval, tu_per_sec, fixed_offset;
1958 
1959 	cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
1960 	clk_incval = ice_ptp_read_src_incval(hw);
1961 
1962 	/* Calculate TUs per second */
1963 	tu_per_sec = cur_freq * clk_incval;
1964 
1965 	/* Calculate number of TUs to add for the fixed Rx latency. Since the
1966 	 * latency measurement is in 1/100th of a nanosecond, we need to
1967 	 * multiply by tu_per_sec and then divide by 1e11. This calculation
1968 	 * overflows 64 bit integer arithmetic, so break it up into two
1969 	 * divisions by 1e4 first then by 1e7.
1970 	 */
1971 	fixed_offset = div_u64(tu_per_sec, 10000);
1972 	fixed_offset *= e822_vernier[link_spd].rx_fixed_delay;
1973 	fixed_offset = div_u64(fixed_offset, 10000000);
1974 
1975 	return fixed_offset;
1976 }
1977 
1978 /**
1979  * ice_phy_cfg_rx_offset_e822 - Configure total Rx timestamp offset
1980  * @hw: pointer to the HW struct
1981  * @port: the PHY port to configure
1982  *
1983  * Program the P_REG_TOTAL_RX_OFFSET register with the number of Time Units to
1984  * adjust Rx timestamps by. This combines calculations from the Vernier offset
1985  * measurements taken in hardware with some data about known fixed delay as
1986  * well as adjusting for multi-lane alignment delay.
1987  *
1988  * This function must be called only after the offset registers are valid,
1989  * i.e. after the Vernier calibration wait has passed, to ensure that the PHY
1990  * has measured the offset.
1991  *
1992  * To avoid overflow, when calculating the offset based on the known static
1993  * latency values, we use measurements in 1/100th of a nanosecond, and divide
1994  * the TUs per second up front. This avoids overflow while allowing
1995  * calculation of the adjustment using integer arithmetic.
1996  */
1997 static int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port)
1998 {
1999 	enum ice_ptp_link_spd link_spd;
2000 	enum ice_ptp_fec_mode fec_mode;
2001 	u64 total_offset, pmd, val;
2002 	int err;
2003 
2004 	err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode);
2005 	if (err)
2006 		return err;
2007 
2008 	total_offset = ice_calc_fixed_rx_offset_e822(hw, link_spd);
2009 
2010 	/* Read the first Vernier offset from the PHY register and add it to
2011 	 * the total offset.
2012 	 */
2013 	err = ice_read_64b_phy_reg_e822(hw, port,
2014 					P_REG_PAR_PCS_RX_OFFSET_L,
2015 					&val);
2016 	if (err)
2017 		return err;
2018 
2019 	total_offset += val;
2020 
2021 	/* For Rx, all multi-lane link speeds include a second Vernier
2022 	 * calibration, because the lanes might not be aligned.
2023 	 */
2024 	if (link_spd == ICE_PTP_LNK_SPD_40G ||
2025 	    link_spd == ICE_PTP_LNK_SPD_50G ||
2026 	    link_spd == ICE_PTP_LNK_SPD_50G_RS ||
2027 	    link_spd == ICE_PTP_LNK_SPD_100G_RS) {
2028 		err = ice_read_64b_phy_reg_e822(hw, port,
2029 						P_REG_PAR_RX_TIME_L,
2030 						&val);
2031 		if (err)
2032 			return err;
2033 
2034 		total_offset += val;
2035 	}
2036 
2037 	/* In addition, Rx must account for the PMD alignment */
2038 	err = ice_phy_calc_pmd_adj_e822(hw, port, link_spd, fec_mode, &pmd);
2039 	if (err)
2040 		return err;
2041 
2042 	/* For RS-FEC, this adjustment adds delay, but for other modes, it
2043 	 * subtracts delay.
2044 	 */
2045 	if (fec_mode == ICE_PTP_FEC_MODE_RS_FEC)
2046 		total_offset += pmd;
2047 	else
2048 		total_offset -= pmd;
2049 
2050 	/* Now that the total offset has been calculated, program it to the
2051 	 * PHY and indicate that the Rx offset is ready. After this,
2052 	 * timestamps will be enabled.
2053 	 */
2054 	err = ice_write_64b_phy_reg_e822(hw, port, P_REG_TOTAL_RX_OFFSET_L,
2055 					 total_offset);
2056 	if (err)
2057 		return err;
2058 
2059 	err = ice_write_phy_reg_e822(hw, port, P_REG_RX_OR, 1);
2060 	if (err)
2061 		return err;
2062 
2063 	return 0;
2064 }
2065 
2066 /**
2067  * ice_read_phy_and_phc_time_e822 - Simultaneously capture PHC and PHY time
2068  * @hw: pointer to the HW struct
2069  * @port: the PHY port to read
2070  * @phy_time: on return, the 64bit PHY timer value
2071  * @phc_time: on return, the lower 64bits of PHC time
2072  *
2073  * Issue a READ_TIME timer command to simultaneously capture the PHY and PHC
2074  * timer values.
2075  */
2076 static int
2077 ice_read_phy_and_phc_time_e822(struct ice_hw *hw, u8 port, u64 *phy_time,
2078 			       u64 *phc_time)
2079 {
2080 	u64 tx_time, rx_time;
2081 	u32 zo, lo;
2082 	u8 tmr_idx;
2083 	int err;
2084 
2085 	tmr_idx = ice_get_ptp_src_clock_index(hw);
2086 
2087 	/* Prepare the PHC timer for a READ_TIME capture command */
2088 	ice_ptp_src_cmd(hw, READ_TIME);
2089 
2090 	/* Prepare the PHY timer for a READ_TIME capture command */
2091 	err = ice_ptp_one_port_cmd(hw, port, READ_TIME);
2092 	if (err)
2093 		return err;
2094 
2095 	/* Issue the sync to start the READ_TIME capture */
2096 	ice_ptp_exec_tmr_cmd(hw);
2097 
2098 	/* Read the captured PHC time from the shadow time registers */
2099 	zo = rd32(hw, GLTSYN_SHTIME_0(tmr_idx));
2100 	lo = rd32(hw, GLTSYN_SHTIME_L(tmr_idx));
2101 	*phc_time = (u64)lo << 32 | zo;
2102 
2103 	/* Read the captured PHY time from the PHY shadow registers */
2104 	err = ice_ptp_read_port_capture(hw, port, &tx_time, &rx_time);
2105 	if (err)
2106 		return err;
2107 
2108 	/* If the PHY Tx and Rx timers don't match, log a warning message.
2109 	 * Note that this should not happen in normal circumstances since the
2110 	 * driver always programs them together.
2111 	 */
2112 	if (tx_time != rx_time)
2113 		dev_warn(ice_hw_to_dev(hw),
2114 			 "PHY port %u Tx and Rx timers do not match, tx_time 0x%016llX, rx_time 0x%016llX\n",
2115 			 port, (unsigned long long)tx_time,
2116 			 (unsigned long long)rx_time);
2117 
2118 	*phy_time = tx_time;
2119 
2120 	return 0;
2121 }
2122 
2123 /**
2124  * ice_sync_phy_timer_e822 - Synchronize the PHY timer with PHC timer
2125  * @hw: pointer to the HW struct
2126  * @port: the PHY port to synchronize
2127  *
2128  * Perform an adjustment to ensure that the PHY and PHC timers are in sync.
2129  * This is done by issuing a READ_TIME command which triggers a simultaneous
2130  * read of the PHY timer and PHC timer. Then we use the difference to
2131  * calculate an appropriate 2s complement addition to add to the PHY timer in
2132  * order to ensure it reads the same value as the primary PHC timer.
2133  */
2134 static int ice_sync_phy_timer_e822(struct ice_hw *hw, u8 port)
2135 {
2136 	u64 phc_time, phy_time, difference;
2137 	int err;
2138 
2139 	if (!ice_ptp_lock(hw)) {
2140 		ice_debug(hw, ICE_DBG_PTP, "Failed to acquire PTP semaphore\n");
2141 		return -EBUSY;
2142 	}
2143 
2144 	err = ice_read_phy_and_phc_time_e822(hw, port, &phy_time, &phc_time);
2145 	if (err)
2146 		goto err_unlock;
2147 
2148 	/* Calculate the amount required to add to the port time in order for
2149 	 * it to match the PHC time.
2150 	 *
2151 	 * Note that the port adjustment is done using 2s complement
2152 	 * arithmetic. This is convenient since it means that we can simply
2153 	 * calculate the difference between the PHC time and the port time,
2154 	 * and it will be interpreted correctly.
2155 	 */
2156 	difference = phc_time - phy_time;
2157 
2158 	err = ice_ptp_prep_port_adj_e822(hw, port, (s64)difference);
2159 	if (err)
2160 		goto err_unlock;
2161 
2162 	err = ice_ptp_one_port_cmd(hw, port, ADJ_TIME);
2163 	if (err)
2164 		goto err_unlock;
2165 
2166 	/* Issue the sync to activate the time adjustment */
2167 	ice_ptp_exec_tmr_cmd(hw);
2168 
2169 	/* Re-capture the timer values to flush the command registers and
2170 	 * verify that the time was properly adjusted.
2171 	 */
2172 	err = ice_read_phy_and_phc_time_e822(hw, port, &phy_time, &phc_time);
2173 	if (err)
2174 		goto err_unlock;
2175 
2176 	dev_info(ice_hw_to_dev(hw),
2177 		 "Port %u PHY time synced to PHC: 0x%016llX, 0x%016llX\n",
2178 		 port, (unsigned long long)phy_time,
2179 		 (unsigned long long)phc_time);
2180 
2181 	ice_ptp_unlock(hw);
2182 
2183 	return 0;
2184 
2185 err_unlock:
2186 	ice_ptp_unlock(hw);
2187 	return err;
2188 }
2189 
2190 /**
2191  * ice_stop_phy_timer_e822 - Stop the PHY clock timer
2192  * @hw: pointer to the HW struct
2193  * @port: the PHY port to stop
2194  * @soft_reset: if true, hold the SOFT_RESET bit of P_REG_PS
2195  *
2196  * Stop the clock of a PHY port. This must be done as part of the flow to
2197  * re-calibrate Tx and Rx timestamping offsets whenever the clock time is
2198  * initialized or when link speed changes.
2199  */
2200 int
2201 ice_stop_phy_timer_e822(struct ice_hw *hw, u8 port, bool soft_reset)
2202 {
2203 	int err;
2204 	u32 val;
2205 
2206 	err = ice_write_phy_reg_e822(hw, port, P_REG_TX_OR, 0);
2207 	if (err)
2208 		return err;
2209 
2210 	err = ice_write_phy_reg_e822(hw, port, P_REG_RX_OR, 0);
2211 	if (err)
2212 		return err;
2213 
2214 	err = ice_read_phy_reg_e822(hw, port, P_REG_PS, &val);
2215 	if (err)
2216 		return err;
2217 
2218 	val &= ~P_REG_PS_START_M;
2219 	err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
2220 	if (err)
2221 		return err;
2222 
2223 	val &= ~P_REG_PS_ENA_CLK_M;
2224 	err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
2225 	if (err)
2226 		return err;
2227 
2228 	if (soft_reset) {
2229 		val |= P_REG_PS_SFT_RESET_M;
2230 		err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
2231 		if (err)
2232 			return err;
2233 	}
2234 
2235 	ice_debug(hw, ICE_DBG_PTP, "Disabled clock on PHY port %u\n", port);
2236 
2237 	return 0;
2238 }
2239 
2240 /**
2241  * ice_start_phy_timer_e822 - Start the PHY clock timer
2242  * @hw: pointer to the HW struct
2243  * @port: the PHY port to start
2244  *
2245  * Start the clock of a PHY port. This must be done as part of the flow to
2246  * re-calibrate Tx and Rx timestamping offsets whenever the clock time is
2247  * initialized or when link speed changes.
2248  *
2249  * Hardware will take Vernier measurements on Tx or Rx of packets.
2250  */
2251 int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port)
2252 {
2253 	u32 lo, hi, val;
2254 	u64 incval;
2255 	u8 tmr_idx;
2256 	int err;
2257 
2258 	tmr_idx = ice_get_ptp_src_clock_index(hw);
2259 
2260 	err = ice_stop_phy_timer_e822(hw, port, false);
2261 	if (err)
2262 		return err;
2263 
2264 	ice_phy_cfg_lane_e822(hw, port);
2265 
2266 	err = ice_phy_cfg_uix_e822(hw, port);
2267 	if (err)
2268 		return err;
2269 
2270 	err = ice_phy_cfg_parpcs_e822(hw, port);
2271 	if (err)
2272 		return err;
2273 
2274 	lo = rd32(hw, GLTSYN_INCVAL_L(tmr_idx));
2275 	hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx));
2276 	incval = (u64)hi << 32 | lo;
2277 
2278 	err = ice_write_40b_phy_reg_e822(hw, port, P_REG_TIMETUS_L, incval);
2279 	if (err)
2280 		return err;
2281 
2282 	err = ice_ptp_one_port_cmd(hw, port, INIT_INCVAL);
2283 	if (err)
2284 		return err;
2285 
2286 	ice_ptp_exec_tmr_cmd(hw);
2287 
2288 	err = ice_read_phy_reg_e822(hw, port, P_REG_PS, &val);
2289 	if (err)
2290 		return err;
2291 
2292 	val |= P_REG_PS_SFT_RESET_M;
2293 	err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
2294 	if (err)
2295 		return err;
2296 
2297 	val |= P_REG_PS_START_M;
2298 	err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
2299 	if (err)
2300 		return err;
2301 
2302 	val &= ~P_REG_PS_SFT_RESET_M;
2303 	err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
2304 	if (err)
2305 		return err;
2306 
2307 	err = ice_ptp_one_port_cmd(hw, port, INIT_INCVAL);
2308 	if (err)
2309 		return err;
2310 
2311 	ice_ptp_exec_tmr_cmd(hw);
2312 
2313 	val |= P_REG_PS_ENA_CLK_M;
2314 	err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
2315 	if (err)
2316 		return err;
2317 
2318 	val |= P_REG_PS_LOAD_OFFSET_M;
2319 	err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
2320 	if (err)
2321 		return err;
2322 
2323 	ice_ptp_exec_tmr_cmd(hw);
2324 
2325 	err = ice_sync_phy_timer_e822(hw, port);
2326 	if (err)
2327 		return err;
2328 
2329 	ice_debug(hw, ICE_DBG_PTP, "Enabled clock on PHY port %u\n", port);
2330 
2331 	return 0;
2332 }
2333 
2334 /**
2335  * ice_phy_calc_vernier_e822 - Perform vernier calculations
2336  * @hw: pointer to the HW struct
2337  * @port: the PHY port to configure
2338  *
2339  * Perform vernier calculations for the Tx and Rx offset. This will enable
2340  * hardware to include the more precise offset calibrations,
2341  * increasing precision of the generated timestamps.
2342  *
2343  * This cannot be done until hardware has measured the offsets, which requires
2344  * waiting until at least one packet has been sent and received by the device.
2345  */
2346 int ice_phy_calc_vernier_e822(struct ice_hw *hw, u8 port)
2347 {
2348 	int err;
2349 	u32 val;
2350 
2351 	err = ice_read_phy_reg_e822(hw, port, P_REG_TX_OV_STATUS, &val);
2352 	if (err) {
2353 		ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_OV_STATUS for port %u, err %d\n",
2354 			  port, err);
2355 		return err;
2356 	}
2357 
2358 	if (!(val & P_REG_TX_OV_STATUS_OV_M)) {
2359 		ice_debug(hw, ICE_DBG_PTP, "Tx offset is not yet valid for port %u\n",
2360 			  port);
2361 		return -EBUSY;
2362 	}
2363 
2364 	err = ice_read_phy_reg_e822(hw, port, P_REG_RX_OV_STATUS, &val);
2365 	if (err) {
2366 		ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_OV_STATUS for port %u, err %d\n",
2367 			  port, err);
2368 		return err;
2369 	}
2370 
2371 	if (!(val & P_REG_TX_OV_STATUS_OV_M)) {
2372 		ice_debug(hw, ICE_DBG_PTP, "Rx offset is not yet valid for port %u\n",
2373 			  port);
2374 		return -EBUSY;
2375 	}
2376 
2377 	err = ice_phy_cfg_tx_offset_e822(hw, port);
2378 	if (err) {
2379 		ice_debug(hw, ICE_DBG_PTP, "Failed to program total Tx offset for port %u, err %d\n",
2380 			  port, err);
2381 		return err;
2382 	}
2383 
2384 	err = ice_phy_cfg_rx_offset_e822(hw, port);
2385 	if (err) {
2386 		ice_debug(hw, ICE_DBG_PTP, "Failed to program total Rx offset for port %u, err %d\n",
2387 			  port, err);
2388 		return err;
2389 	}
2390 
2391 	return 0;
2392 }
2393 
2394 /* E810 functions
2395  *
2396  * The following functions operate on the E810 series devices which use
2397  * a separate external PHY.
2398  */
2399 
2400 /**
2401  * ice_read_phy_reg_e810 - Read register from external PHY on E810
2402  * @hw: pointer to the HW struct
2403  * @addr: the address to read from
2404  * @val: On return, the value read from the PHY
2405  *
2406  * Read a register from the external PHY on the E810 device.
2407  */
2408 static int ice_read_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 *val)
2409 {
2410 	struct ice_sbq_msg_input msg = {0};
2411 	int err;
2412 
2413 	msg.msg_addr_low = lower_16_bits(addr);
2414 	msg.msg_addr_high = upper_16_bits(addr);
2415 	msg.opcode = ice_sbq_msg_rd;
2416 	msg.dest_dev = rmn_0;
2417 
2418 	err = ice_sbq_rw_reg(hw, &msg);
2419 	if (err) {
2420 		ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
2421 			  err);
2422 		return err;
2423 	}
2424 
2425 	*val = msg.data;
2426 
2427 	return 0;
2428 }
2429 
2430 /**
2431  * ice_write_phy_reg_e810 - Write register on external PHY on E810
2432  * @hw: pointer to the HW struct
2433  * @addr: the address to writem to
2434  * @val: the value to write to the PHY
2435  *
2436  * Write a value to a register of the external PHY on the E810 device.
2437  */
2438 static int ice_write_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 val)
2439 {
2440 	struct ice_sbq_msg_input msg = {0};
2441 	int err;
2442 
2443 	msg.msg_addr_low = lower_16_bits(addr);
2444 	msg.msg_addr_high = upper_16_bits(addr);
2445 	msg.opcode = ice_sbq_msg_wr;
2446 	msg.dest_dev = rmn_0;
2447 	msg.data = val;
2448 
2449 	err = ice_sbq_rw_reg(hw, &msg);
2450 	if (err) {
2451 		ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
2452 			  err);
2453 		return err;
2454 	}
2455 
2456 	return 0;
2457 }
2458 
2459 /**
2460  * ice_read_phy_tstamp_ll_e810 - Read a PHY timestamp registers through the FW
2461  * @hw: pointer to the HW struct
2462  * @idx: the timestamp index to read
2463  * @hi: 8 bit timestamp high value
2464  * @lo: 32 bit timestamp low value
2465  *
2466  * Read a 8bit timestamp high value and 32 bit timestamp low value out of the
2467  * timestamp block of the external PHY on the E810 device using the low latency
2468  * timestamp read.
2469  */
2470 static int
2471 ice_read_phy_tstamp_ll_e810(struct ice_hw *hw, u8 idx, u8 *hi, u32 *lo)
2472 {
2473 	u32 val;
2474 	u8 i;
2475 
2476 	/* Write TS index to read to the PF register so the FW can read it */
2477 	val = FIELD_PREP(TS_LL_READ_TS_IDX, idx) | TS_LL_READ_TS;
2478 	wr32(hw, PF_SB_ATQBAL, val);
2479 
2480 	/* Read the register repeatedly until the FW provides us the TS */
2481 	for (i = TS_LL_READ_RETRIES; i > 0; i--) {
2482 		val = rd32(hw, PF_SB_ATQBAL);
2483 
2484 		/* When the bit is cleared, the TS is ready in the register */
2485 		if (!(FIELD_GET(TS_LL_READ_TS, val))) {
2486 			/* High 8 bit value of the TS is on the bits 16:23 */
2487 			*hi = FIELD_GET(TS_LL_READ_TS_HIGH, val);
2488 
2489 			/* Read the low 32 bit value and set the TS valid bit */
2490 			*lo = rd32(hw, PF_SB_ATQBAH) | TS_VALID;
2491 			return 0;
2492 		}
2493 
2494 		udelay(10);
2495 	}
2496 
2497 	/* FW failed to provide the TS in time */
2498 	ice_debug(hw, ICE_DBG_PTP, "Failed to read PTP timestamp using low latency read\n");
2499 	return -EINVAL;
2500 }
2501 
2502 /**
2503  * ice_read_phy_tstamp_sbq_e810 - Read a PHY timestamp registers through the sbq
2504  * @hw: pointer to the HW struct
2505  * @lport: the lport to read from
2506  * @idx: the timestamp index to read
2507  * @hi: 8 bit timestamp high value
2508  * @lo: 32 bit timestamp low value
2509  *
2510  * Read a 8bit timestamp high value and 32 bit timestamp low value out of the
2511  * timestamp block of the external PHY on the E810 device using sideband queue.
2512  */
2513 static int
2514 ice_read_phy_tstamp_sbq_e810(struct ice_hw *hw, u8 lport, u8 idx, u8 *hi,
2515 			     u32 *lo)
2516 {
2517 	u32 hi_addr = TS_EXT(HIGH_TX_MEMORY_BANK_START, lport, idx);
2518 	u32 lo_addr = TS_EXT(LOW_TX_MEMORY_BANK_START, lport, idx);
2519 	u32 lo_val, hi_val;
2520 	int err;
2521 
2522 	err = ice_read_phy_reg_e810(hw, lo_addr, &lo_val);
2523 	if (err) {
2524 		ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, err %d\n",
2525 			  err);
2526 		return err;
2527 	}
2528 
2529 	err = ice_read_phy_reg_e810(hw, hi_addr, &hi_val);
2530 	if (err) {
2531 		ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, err %d\n",
2532 			  err);
2533 		return err;
2534 	}
2535 
2536 	*lo = lo_val;
2537 	*hi = (u8)hi_val;
2538 
2539 	return 0;
2540 }
2541 
2542 /**
2543  * ice_read_phy_tstamp_e810 - Read a PHY timestamp out of the external PHY
2544  * @hw: pointer to the HW struct
2545  * @lport: the lport to read from
2546  * @idx: the timestamp index to read
2547  * @tstamp: on return, the 40bit timestamp value
2548  *
2549  * Read a 40bit timestamp value out of the timestamp block of the external PHY
2550  * on the E810 device.
2551  */
2552 static int
2553 ice_read_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx, u64 *tstamp)
2554 {
2555 	u32 lo = 0;
2556 	u8 hi = 0;
2557 	int err;
2558 
2559 	if (hw->dev_caps.ts_dev_info.ts_ll_read)
2560 		err = ice_read_phy_tstamp_ll_e810(hw, idx, &hi, &lo);
2561 	else
2562 		err = ice_read_phy_tstamp_sbq_e810(hw, lport, idx, &hi, &lo);
2563 
2564 	if (err)
2565 		return err;
2566 
2567 	/* For E810 devices, the timestamp is reported with the lower 32 bits
2568 	 * in the low register, and the upper 8 bits in the high register.
2569 	 */
2570 	*tstamp = ((u64)hi) << TS_HIGH_S | ((u64)lo & TS_LOW_M);
2571 
2572 	return 0;
2573 }
2574 
2575 /**
2576  * ice_clear_phy_tstamp_e810 - Clear a timestamp from the external PHY
2577  * @hw: pointer to the HW struct
2578  * @lport: the lport to read from
2579  * @idx: the timestamp index to reset
2580  *
2581  * Clear a timestamp, resetting its valid bit, from the timestamp block of the
2582  * external PHY on the E810 device.
2583  */
2584 static int ice_clear_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx)
2585 {
2586 	u32 lo_addr, hi_addr;
2587 	int err;
2588 
2589 	lo_addr = TS_EXT(LOW_TX_MEMORY_BANK_START, lport, idx);
2590 	hi_addr = TS_EXT(HIGH_TX_MEMORY_BANK_START, lport, idx);
2591 
2592 	err = ice_write_phy_reg_e810(hw, lo_addr, 0);
2593 	if (err) {
2594 		ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register, err %d\n",
2595 			  err);
2596 		return err;
2597 	}
2598 
2599 	err = ice_write_phy_reg_e810(hw, hi_addr, 0);
2600 	if (err) {
2601 		ice_debug(hw, ICE_DBG_PTP, "Failed to clear high PTP timestamp register, err %d\n",
2602 			  err);
2603 		return err;
2604 	}
2605 
2606 	return 0;
2607 }
2608 
2609 /**
2610  * ice_ptp_init_phy_e810 - Enable PTP function on the external PHY
2611  * @hw: pointer to HW struct
2612  *
2613  * Enable the timesync PTP functionality for the external PHY connected to
2614  * this function.
2615  */
2616 int ice_ptp_init_phy_e810(struct ice_hw *hw)
2617 {
2618 	u8 tmr_idx;
2619 	int err;
2620 
2621 	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
2622 	err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_ENA(tmr_idx),
2623 				     GLTSYN_ENA_TSYN_ENA_M);
2624 	if (err)
2625 		ice_debug(hw, ICE_DBG_PTP, "PTP failed in ena_phy_time_syn %d\n",
2626 			  err);
2627 
2628 	return err;
2629 }
2630 
2631 /**
2632  * ice_ptp_init_phc_e810 - Perform E810 specific PHC initialization
2633  * @hw: pointer to HW struct
2634  *
2635  * Perform E810-specific PTP hardware clock initialization steps.
2636  */
2637 static int ice_ptp_init_phc_e810(struct ice_hw *hw)
2638 {
2639 	/* Ensure synchronization delay is zero */
2640 	wr32(hw, GLTSYN_SYNC_DLAY, 0);
2641 
2642 	/* Initialize the PHY */
2643 	return ice_ptp_init_phy_e810(hw);
2644 }
2645 
2646 /**
2647  * ice_ptp_prep_phy_time_e810 - Prepare PHY port with initial time
2648  * @hw: Board private structure
2649  * @time: Time to initialize the PHY port clock to
2650  *
2651  * Program the PHY port ETH_GLTSYN_SHTIME registers in preparation setting the
2652  * initial clock time. The time will not actually be programmed until the
2653  * driver issues an INIT_TIME command.
2654  *
2655  * The time value is the upper 32 bits of the PHY timer, usually in units of
2656  * nominal nanoseconds.
2657  */
2658 static int ice_ptp_prep_phy_time_e810(struct ice_hw *hw, u32 time)
2659 {
2660 	u8 tmr_idx;
2661 	int err;
2662 
2663 	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
2664 	err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHTIME_0(tmr_idx), 0);
2665 	if (err) {
2666 		ice_debug(hw, ICE_DBG_PTP, "Failed to write SHTIME_0, err %d\n",
2667 			  err);
2668 		return err;
2669 	}
2670 
2671 	err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHTIME_L(tmr_idx), time);
2672 	if (err) {
2673 		ice_debug(hw, ICE_DBG_PTP, "Failed to write SHTIME_L, err %d\n",
2674 			  err);
2675 		return err;
2676 	}
2677 
2678 	return 0;
2679 }
2680 
2681 /**
2682  * ice_ptp_prep_phy_adj_e810 - Prep PHY port for a time adjustment
2683  * @hw: pointer to HW struct
2684  * @adj: adjustment value to program
2685  *
2686  * Prepare the PHY port for an atomic adjustment by programming the PHY
2687  * ETH_GLTSYN_SHADJ_L and ETH_GLTSYN_SHADJ_H registers. The actual adjustment
2688  * is completed by issuing an ADJ_TIME sync command.
2689  *
2690  * The adjustment value only contains the portion used for the upper 32bits of
2691  * the PHY timer, usually in units of nominal nanoseconds. Negative
2692  * adjustments are supported using 2s complement arithmetic.
2693  */
2694 static int ice_ptp_prep_phy_adj_e810(struct ice_hw *hw, s32 adj)
2695 {
2696 	u8 tmr_idx;
2697 	int err;
2698 
2699 	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
2700 
2701 	/* Adjustments are represented as signed 2's complement values in
2702 	 * nanoseconds. Sub-nanosecond adjustment is not supported.
2703 	 */
2704 	err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_L(tmr_idx), 0);
2705 	if (err) {
2706 		ice_debug(hw, ICE_DBG_PTP, "Failed to write adj to PHY SHADJ_L, err %d\n",
2707 			  err);
2708 		return err;
2709 	}
2710 
2711 	err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_H(tmr_idx), adj);
2712 	if (err) {
2713 		ice_debug(hw, ICE_DBG_PTP, "Failed to write adj to PHY SHADJ_H, err %d\n",
2714 			  err);
2715 		return err;
2716 	}
2717 
2718 	return 0;
2719 }
2720 
2721 /**
2722  * ice_ptp_prep_phy_incval_e810 - Prep PHY port increment value change
2723  * @hw: pointer to HW struct
2724  * @incval: The new 40bit increment value to prepare
2725  *
2726  * Prepare the PHY port for a new increment value by programming the PHY
2727  * ETH_GLTSYN_SHADJ_L and ETH_GLTSYN_SHADJ_H registers. The actual change is
2728  * completed by issuing an INIT_INCVAL command.
2729  */
2730 static int ice_ptp_prep_phy_incval_e810(struct ice_hw *hw, u64 incval)
2731 {
2732 	u32 high, low;
2733 	u8 tmr_idx;
2734 	int err;
2735 
2736 	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
2737 	low = lower_32_bits(incval);
2738 	high = upper_32_bits(incval);
2739 
2740 	err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_L(tmr_idx), low);
2741 	if (err) {
2742 		ice_debug(hw, ICE_DBG_PTP, "Failed to write incval to PHY SHADJ_L, err %d\n",
2743 			  err);
2744 		return err;
2745 	}
2746 
2747 	err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_H(tmr_idx), high);
2748 	if (err) {
2749 		ice_debug(hw, ICE_DBG_PTP, "Failed to write incval PHY SHADJ_H, err %d\n",
2750 			  err);
2751 		return err;
2752 	}
2753 
2754 	return 0;
2755 }
2756 
2757 /**
2758  * ice_ptp_port_cmd_e810 - Prepare all external PHYs for a timer command
2759  * @hw: pointer to HW struct
2760  * @cmd: Command to be sent to the port
2761  *
2762  * Prepare the external PHYs connected to this device for a timer sync
2763  * command.
2764  */
2765 static int ice_ptp_port_cmd_e810(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
2766 {
2767 	u32 cmd_val, val;
2768 	int err;
2769 
2770 	switch (cmd) {
2771 	case INIT_TIME:
2772 		cmd_val = GLTSYN_CMD_INIT_TIME;
2773 		break;
2774 	case INIT_INCVAL:
2775 		cmd_val = GLTSYN_CMD_INIT_INCVAL;
2776 		break;
2777 	case ADJ_TIME:
2778 		cmd_val = GLTSYN_CMD_ADJ_TIME;
2779 		break;
2780 	case READ_TIME:
2781 		cmd_val = GLTSYN_CMD_READ_TIME;
2782 		break;
2783 	case ADJ_TIME_AT_TIME:
2784 		cmd_val = GLTSYN_CMD_ADJ_INIT_TIME;
2785 		break;
2786 	}
2787 
2788 	/* Read, modify, write */
2789 	err = ice_read_phy_reg_e810(hw, ETH_GLTSYN_CMD, &val);
2790 	if (err) {
2791 		ice_debug(hw, ICE_DBG_PTP, "Failed to read GLTSYN_CMD, err %d\n", err);
2792 		return err;
2793 	}
2794 
2795 	/* Modify necessary bits only and perform write */
2796 	val &= ~TS_CMD_MASK_E810;
2797 	val |= cmd_val;
2798 
2799 	err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_CMD, val);
2800 	if (err) {
2801 		ice_debug(hw, ICE_DBG_PTP, "Failed to write back GLTSYN_CMD, err %d\n", err);
2802 		return err;
2803 	}
2804 
2805 	return 0;
2806 }
2807 
2808 /* Device agnostic functions
2809  *
2810  * The following functions implement shared behavior common to both E822 and
2811  * E810 devices, possibly calling a device specific implementation where
2812  * necessary.
2813  */
2814 
2815 /**
2816  * ice_ptp_lock - Acquire PTP global semaphore register lock
2817  * @hw: pointer to the HW struct
2818  *
2819  * Acquire the global PTP hardware semaphore lock. Returns true if the lock
2820  * was acquired, false otherwise.
2821  *
2822  * The PFTSYN_SEM register sets the busy bit on read, returning the previous
2823  * value. If software sees the busy bit cleared, this means that this function
2824  * acquired the lock (and the busy bit is now set). If software sees the busy
2825  * bit set, it means that another function acquired the lock.
2826  *
2827  * Software must clear the busy bit with a write to release the lock for other
2828  * functions when done.
2829  */
2830 bool ice_ptp_lock(struct ice_hw *hw)
2831 {
2832 	u32 hw_lock;
2833 	int i;
2834 
2835 #define MAX_TRIES 15
2836 
2837 	for (i = 0; i < MAX_TRIES; i++) {
2838 		hw_lock = rd32(hw, PFTSYN_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
2839 		hw_lock = hw_lock & PFTSYN_SEM_BUSY_M;
2840 		if (hw_lock) {
2841 			/* Somebody is holding the lock */
2842 			usleep_range(5000, 6000);
2843 			continue;
2844 		}
2845 
2846 		break;
2847 	}
2848 
2849 	return !hw_lock;
2850 }
2851 
2852 /**
2853  * ice_ptp_unlock - Release PTP global semaphore register lock
2854  * @hw: pointer to the HW struct
2855  *
2856  * Release the global PTP hardware semaphore lock. This is done by writing to
2857  * the PFTSYN_SEM register.
2858  */
2859 void ice_ptp_unlock(struct ice_hw *hw)
2860 {
2861 	wr32(hw, PFTSYN_SEM + (PFTSYN_SEM_BYTES * hw->pf_id), 0);
2862 }
2863 
2864 /**
2865  * ice_ptp_tmr_cmd - Prepare and trigger a timer sync command
2866  * @hw: pointer to HW struct
2867  * @cmd: the command to issue
2868  *
2869  * Prepare the source timer and PHY timers and then trigger the requested
2870  * command. This causes the shadow registers previously written in preparation
2871  * for the command to be synchronously applied to both the source and PHY
2872  * timers.
2873  */
2874 static int ice_ptp_tmr_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
2875 {
2876 	int err;
2877 
2878 	/* First, prepare the source timer */
2879 	ice_ptp_src_cmd(hw, cmd);
2880 
2881 	/* Next, prepare the ports */
2882 	if (ice_is_e810(hw))
2883 		err = ice_ptp_port_cmd_e810(hw, cmd);
2884 	else
2885 		err = ice_ptp_port_cmd_e822(hw, cmd);
2886 	if (err) {
2887 		ice_debug(hw, ICE_DBG_PTP, "Failed to prepare PHY ports for timer command %u, err %d\n",
2888 			  cmd, err);
2889 		return err;
2890 	}
2891 
2892 	/* Write the sync command register to drive both source and PHY timer
2893 	 * commands synchronously
2894 	 */
2895 	ice_ptp_exec_tmr_cmd(hw);
2896 
2897 	return 0;
2898 }
2899 
2900 /**
2901  * ice_ptp_init_time - Initialize device time to provided value
2902  * @hw: pointer to HW struct
2903  * @time: 64bits of time (GLTSYN_TIME_L and GLTSYN_TIME_H)
2904  *
2905  * Initialize the device to the specified time provided. This requires a three
2906  * step process:
2907  *
2908  * 1) write the new init time to the source timer shadow registers
2909  * 2) write the new init time to the PHY timer shadow registers
2910  * 3) issue an init_time timer command to synchronously switch both the source
2911  *    and port timers to the new init time value at the next clock cycle.
2912  */
2913 int ice_ptp_init_time(struct ice_hw *hw, u64 time)
2914 {
2915 	u8 tmr_idx;
2916 	int err;
2917 
2918 	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
2919 
2920 	/* Source timers */
2921 	wr32(hw, GLTSYN_SHTIME_L(tmr_idx), lower_32_bits(time));
2922 	wr32(hw, GLTSYN_SHTIME_H(tmr_idx), upper_32_bits(time));
2923 	wr32(hw, GLTSYN_SHTIME_0(tmr_idx), 0);
2924 
2925 	/* PHY timers */
2926 	/* Fill Rx and Tx ports and send msg to PHY */
2927 	if (ice_is_e810(hw))
2928 		err = ice_ptp_prep_phy_time_e810(hw, time & 0xFFFFFFFF);
2929 	else
2930 		err = ice_ptp_prep_phy_time_e822(hw, time & 0xFFFFFFFF);
2931 	if (err)
2932 		return err;
2933 
2934 	return ice_ptp_tmr_cmd(hw, INIT_TIME);
2935 }
2936 
2937 /**
2938  * ice_ptp_write_incval - Program PHC with new increment value
2939  * @hw: pointer to HW struct
2940  * @incval: Source timer increment value per clock cycle
2941  *
2942  * Program the PHC with a new increment value. This requires a three-step
2943  * process:
2944  *
2945  * 1) Write the increment value to the source timer shadow registers
2946  * 2) Write the increment value to the PHY timer shadow registers
2947  * 3) Issue an INIT_INCVAL timer command to synchronously switch both the
2948  *    source and port timers to the new increment value at the next clock
2949  *    cycle.
2950  */
2951 int ice_ptp_write_incval(struct ice_hw *hw, u64 incval)
2952 {
2953 	u8 tmr_idx;
2954 	int err;
2955 
2956 	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
2957 
2958 	/* Shadow Adjust */
2959 	wr32(hw, GLTSYN_SHADJ_L(tmr_idx), lower_32_bits(incval));
2960 	wr32(hw, GLTSYN_SHADJ_H(tmr_idx), upper_32_bits(incval));
2961 
2962 	if (ice_is_e810(hw))
2963 		err = ice_ptp_prep_phy_incval_e810(hw, incval);
2964 	else
2965 		err = ice_ptp_prep_phy_incval_e822(hw, incval);
2966 	if (err)
2967 		return err;
2968 
2969 	return ice_ptp_tmr_cmd(hw, INIT_INCVAL);
2970 }
2971 
2972 /**
2973  * ice_ptp_write_incval_locked - Program new incval while holding semaphore
2974  * @hw: pointer to HW struct
2975  * @incval: Source timer increment value per clock cycle
2976  *
2977  * Program a new PHC incval while holding the PTP semaphore.
2978  */
2979 int ice_ptp_write_incval_locked(struct ice_hw *hw, u64 incval)
2980 {
2981 	int err;
2982 
2983 	if (!ice_ptp_lock(hw))
2984 		return -EBUSY;
2985 
2986 	err = ice_ptp_write_incval(hw, incval);
2987 
2988 	ice_ptp_unlock(hw);
2989 
2990 	return err;
2991 }
2992 
2993 /**
2994  * ice_ptp_adj_clock - Adjust PHC clock time atomically
2995  * @hw: pointer to HW struct
2996  * @adj: Adjustment in nanoseconds
2997  *
2998  * Perform an atomic adjustment of the PHC time by the specified number of
2999  * nanoseconds. This requires a three-step process:
3000  *
3001  * 1) Write the adjustment to the source timer shadow registers
3002  * 2) Write the adjustment to the PHY timer shadow registers
3003  * 3) Issue an ADJ_TIME timer command to synchronously apply the adjustment to
3004  *    both the source and port timers at the next clock cycle.
3005  */
3006 int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj)
3007 {
3008 	u8 tmr_idx;
3009 	int err;
3010 
3011 	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
3012 
3013 	/* Write the desired clock adjustment into the GLTSYN_SHADJ register.
3014 	 * For an ADJ_TIME command, this set of registers represents the value
3015 	 * to add to the clock time. It supports subtraction by interpreting
3016 	 * the value as a 2's complement integer.
3017 	 */
3018 	wr32(hw, GLTSYN_SHADJ_L(tmr_idx), 0);
3019 	wr32(hw, GLTSYN_SHADJ_H(tmr_idx), adj);
3020 
3021 	if (ice_is_e810(hw))
3022 		err = ice_ptp_prep_phy_adj_e810(hw, adj);
3023 	else
3024 		err = ice_ptp_prep_phy_adj_e822(hw, adj);
3025 	if (err)
3026 		return err;
3027 
3028 	return ice_ptp_tmr_cmd(hw, ADJ_TIME);
3029 }
3030 
3031 /**
3032  * ice_read_phy_tstamp - Read a PHY timestamp from the timestamo block
3033  * @hw: pointer to the HW struct
3034  * @block: the block to read from
3035  * @idx: the timestamp index to read
3036  * @tstamp: on return, the 40bit timestamp value
3037  *
3038  * Read a 40bit timestamp value out of the timestamp block. For E822 devices,
3039  * the block is the quad to read from. For E810 devices, the block is the
3040  * logical port to read from.
3041  */
3042 int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp)
3043 {
3044 	if (ice_is_e810(hw))
3045 		return ice_read_phy_tstamp_e810(hw, block, idx, tstamp);
3046 	else
3047 		return ice_read_phy_tstamp_e822(hw, block, idx, tstamp);
3048 }
3049 
3050 /**
3051  * ice_clear_phy_tstamp - Clear a timestamp from the timestamp block
3052  * @hw: pointer to the HW struct
3053  * @block: the block to read from
3054  * @idx: the timestamp index to reset
3055  *
3056  * Clear a timestamp, resetting its valid bit, from the timestamp block. For
3057  * E822 devices, the block is the quad to clear from. For E810 devices, the
3058  * block is the logical port to clear from.
3059  */
3060 int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx)
3061 {
3062 	if (ice_is_e810(hw))
3063 		return ice_clear_phy_tstamp_e810(hw, block, idx);
3064 	else
3065 		return ice_clear_phy_tstamp_e822(hw, block, idx);
3066 }
3067 
3068 /* E810T SMA functions
3069  *
3070  * The following functions operate specifically on E810T hardware and are used
3071  * to access the extended GPIOs available.
3072  */
3073 
3074 /**
3075  * ice_get_pca9575_handle
3076  * @hw: pointer to the hw struct
3077  * @pca9575_handle: GPIO controller's handle
3078  *
3079  * Find and return the GPIO controller's handle in the netlist.
3080  * When found - the value will be cached in the hw structure and following calls
3081  * will return cached value
3082  */
3083 static int
3084 ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle)
3085 {
3086 	struct ice_aqc_get_link_topo *cmd;
3087 	struct ice_aq_desc desc;
3088 	int status;
3089 	u8 idx;
3090 
3091 	/* If handle was read previously return cached value */
3092 	if (hw->io_expander_handle) {
3093 		*pca9575_handle = hw->io_expander_handle;
3094 		return 0;
3095 	}
3096 
3097 	/* If handle was not detected read it from the netlist */
3098 	cmd = &desc.params.get_link_topo;
3099 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
3100 
3101 	/* Set node type to GPIO controller */
3102 	cmd->addr.topo_params.node_type_ctx =
3103 		(ICE_AQC_LINK_TOPO_NODE_TYPE_M &
3104 		 ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL);
3105 
3106 #define SW_PCA9575_SFP_TOPO_IDX		2
3107 #define SW_PCA9575_QSFP_TOPO_IDX	1
3108 
3109 	/* Check if the SW IO expander controlling SMA exists in the netlist. */
3110 	if (hw->device_id == ICE_DEV_ID_E810C_SFP)
3111 		idx = SW_PCA9575_SFP_TOPO_IDX;
3112 	else if (hw->device_id == ICE_DEV_ID_E810C_QSFP)
3113 		idx = SW_PCA9575_QSFP_TOPO_IDX;
3114 	else
3115 		return -EOPNOTSUPP;
3116 
3117 	cmd->addr.topo_params.index = idx;
3118 
3119 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
3120 	if (status)
3121 		return -EOPNOTSUPP;
3122 
3123 	/* Verify if we found the right IO expander type */
3124 	if (desc.params.get_link_topo.node_part_num !=
3125 		ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575)
3126 		return -EOPNOTSUPP;
3127 
3128 	/* If present save the handle and return it */
3129 	hw->io_expander_handle =
3130 		le16_to_cpu(desc.params.get_link_topo.addr.handle);
3131 	*pca9575_handle = hw->io_expander_handle;
3132 
3133 	return 0;
3134 }
3135 
3136 /**
3137  * ice_read_sma_ctrl_e810t
3138  * @hw: pointer to the hw struct
3139  * @data: pointer to data to be read from the GPIO controller
3140  *
3141  * Read the SMA controller state. It is connected to pins 3-7 of Port 1 of the
3142  * PCA9575 expander, so only bits 3-7 in data are valid.
3143  */
3144 int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data)
3145 {
3146 	int status;
3147 	u16 handle;
3148 	u8 i;
3149 
3150 	status = ice_get_pca9575_handle(hw, &handle);
3151 	if (status)
3152 		return status;
3153 
3154 	*data = 0;
3155 
3156 	for (i = ICE_SMA_MIN_BIT_E810T; i <= ICE_SMA_MAX_BIT_E810T; i++) {
3157 		bool pin;
3158 
3159 		status = ice_aq_get_gpio(hw, handle, i + ICE_PCA9575_P1_OFFSET,
3160 					 &pin, NULL);
3161 		if (status)
3162 			break;
3163 		*data |= (u8)(!pin) << i;
3164 	}
3165 
3166 	return status;
3167 }
3168 
3169 /**
3170  * ice_write_sma_ctrl_e810t
3171  * @hw: pointer to the hw struct
3172  * @data: data to be written to the GPIO controller
3173  *
3174  * Write the data to the SMA controller. It is connected to pins 3-7 of Port 1
3175  * of the PCA9575 expander, so only bits 3-7 in data are valid.
3176  */
3177 int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data)
3178 {
3179 	int status;
3180 	u16 handle;
3181 	u8 i;
3182 
3183 	status = ice_get_pca9575_handle(hw, &handle);
3184 	if (status)
3185 		return status;
3186 
3187 	for (i = ICE_SMA_MIN_BIT_E810T; i <= ICE_SMA_MAX_BIT_E810T; i++) {
3188 		bool pin;
3189 
3190 		pin = !(data & (1 << i));
3191 		status = ice_aq_set_gpio(hw, handle, i + ICE_PCA9575_P1_OFFSET,
3192 					 pin, NULL);
3193 		if (status)
3194 			break;
3195 	}
3196 
3197 	return status;
3198 }
3199 
3200 /**
3201  * ice_read_pca9575_reg_e810t
3202  * @hw: pointer to the hw struct
3203  * @offset: GPIO controller register offset
3204  * @data: pointer to data to be read from the GPIO controller
3205  *
3206  * Read the register from the GPIO controller
3207  */
3208 int ice_read_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 *data)
3209 {
3210 	struct ice_aqc_link_topo_addr link_topo;
3211 	__le16 addr;
3212 	u16 handle;
3213 	int err;
3214 
3215 	memset(&link_topo, 0, sizeof(link_topo));
3216 
3217 	err = ice_get_pca9575_handle(hw, &handle);
3218 	if (err)
3219 		return err;
3220 
3221 	link_topo.handle = cpu_to_le16(handle);
3222 	link_topo.topo_params.node_type_ctx =
3223 		FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M,
3224 			   ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED);
3225 
3226 	addr = cpu_to_le16((u16)offset);
3227 
3228 	return ice_aq_read_i2c(hw, link_topo, 0, addr, 1, data, NULL);
3229 }
3230 
3231 /**
3232  * ice_is_pca9575_present
3233  * @hw: pointer to the hw struct
3234  *
3235  * Check if the SW IO expander is present in the netlist
3236  */
3237 bool ice_is_pca9575_present(struct ice_hw *hw)
3238 {
3239 	u16 handle = 0;
3240 	int status;
3241 
3242 	if (!ice_is_e810t(hw))
3243 		return false;
3244 
3245 	status = ice_get_pca9575_handle(hw, &handle);
3246 
3247 	return !status && handle;
3248 }
3249 
3250 /**
3251  * ice_ptp_init_phc - Initialize PTP hardware clock
3252  * @hw: pointer to the HW struct
3253  *
3254  * Perform the steps required to initialize the PTP hardware clock.
3255  */
3256 int ice_ptp_init_phc(struct ice_hw *hw)
3257 {
3258 	u8 src_idx = hw->func_caps.ts_func_info.tmr_index_owned;
3259 
3260 	/* Enable source clocks */
3261 	wr32(hw, GLTSYN_ENA(src_idx), GLTSYN_ENA_TSYN_ENA_M);
3262 
3263 	/* Clear event err indications for auxiliary pins */
3264 	(void)rd32(hw, GLTSYN_STAT(src_idx));
3265 
3266 	if (ice_is_e810(hw))
3267 		return ice_ptp_init_phc_e810(hw);
3268 	else
3269 		return ice_ptp_init_phc_e822(hw);
3270 }
3271