1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2021, Intel Corporation. */
3 
4 #include <linux/delay.h>
5 #include "ice_common.h"
6 #include "ice_ptp_hw.h"
7 #include "ice_ptp_consts.h"
8 #include "ice_cgu_regs.h"
9 
10 /* Low level functions for interacting with and managing the device clock used
11  * for the Precision Time Protocol.
12  *
13  * The ice hardware represents the current time using three registers:
14  *
15  *    GLTSYN_TIME_H     GLTSYN_TIME_L     GLTSYN_TIME_R
16  *  +---------------+ +---------------+ +---------------+
17  *  |    32 bits    | |    32 bits    | |    32 bits    |
18  *  +---------------+ +---------------+ +---------------+
19  *
20  * The registers are incremented every clock tick using a 40bit increment
21  * value defined over two registers:
22  *
23  *                     GLTSYN_INCVAL_H   GLTSYN_INCVAL_L
24  *                    +---------------+ +---------------+
25  *                    |    8 bit s    | |    32 bits    |
26  *                    +---------------+ +---------------+
27  *
28  * The increment value is added to the GLSTYN_TIME_R and GLSTYN_TIME_L
29  * registers every clock source tick. Depending on the specific device
30  * configuration, the clock source frequency could be one of a number of
31  * values.
32  *
33  * For E810 devices, the increment frequency is 812.5 MHz
34  *
35  * For E822 devices the clock can be derived from different sources, and the
36  * increment has an effective frequency of one of the following:
37  * - 823.4375 MHz
38  * - 783.36 MHz
39  * - 796.875 MHz
40  * - 816 MHz
41  * - 830.078125 MHz
42  * - 783.36 MHz
43  *
44  * The hardware captures timestamps in the PHY for incoming packets, and for
45  * outgoing packets on request. To support this, the PHY maintains a timer
46  * that matches the lower 64 bits of the global source timer.
47  *
48  * In order to ensure that the PHY timers and the source timer are equivalent,
49  * shadow registers are used to prepare the desired initial values. A special
50  * sync command is issued to trigger copying from the shadow registers into
51  * the appropriate source and PHY registers simultaneously.
52  *
53  * The driver supports devices which have different PHYs with subtly different
54  * mechanisms to program and control the timers. We divide the devices into
55  * families named after the first major device, E810 and similar devices, and
56  * E822 and similar devices.
57  *
58  * - E822 based devices have additional support for fine grained Vernier
59  *   calibration which requires significant setup
60  * - The layout of timestamp data in the PHY register blocks is different
61  * - The way timer synchronization commands are issued is different.
62  *
63  * To support this, very low level functions have an e810 or e822 suffix
64  * indicating what type of device they work on. Higher level abstractions for
65  * tasks that can be done on both devices do not have the suffix and will
66  * correctly look up the appropriate low level function when running.
67  *
68  * Functions which only make sense on a single device family may not have
69  * a suitable generic implementation
70  */
71 
72 /**
73  * ice_get_ptp_src_clock_index - determine source clock index
74  * @hw: pointer to HW struct
75  *
76  * Determine the source clock index currently in use, based on device
77  * capabilities reported during initialization.
78  */
ice_get_ptp_src_clock_index(struct ice_hw * hw)79 u8 ice_get_ptp_src_clock_index(struct ice_hw *hw)
80 {
81 	return hw->func_caps.ts_func_info.tmr_index_assoc;
82 }
83 
84 /**
85  * ice_ptp_read_src_incval - Read source timer increment value
86  * @hw: pointer to HW struct
87  *
88  * Read the increment value of the source timer and return it.
89  */
ice_ptp_read_src_incval(struct ice_hw * hw)90 static u64 ice_ptp_read_src_incval(struct ice_hw *hw)
91 {
92 	u32 lo, hi;
93 	u8 tmr_idx;
94 
95 	tmr_idx = ice_get_ptp_src_clock_index(hw);
96 
97 	lo = rd32(hw, GLTSYN_INCVAL_L(tmr_idx));
98 	hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx));
99 
100 	return ((u64)(hi & INCVAL_HIGH_M) << 32) | lo;
101 }
102 
103 /**
104  * ice_ptp_src_cmd - Prepare source timer for a timer command
105  * @hw: pointer to HW structure
106  * @cmd: Timer command
107  *
108  * Prepare the source timer for an upcoming timer sync command.
109  */
ice_ptp_src_cmd(struct ice_hw * hw,enum ice_ptp_tmr_cmd cmd)110 static void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
111 {
112 	u32 cmd_val;
113 	u8 tmr_idx;
114 
115 	tmr_idx = ice_get_ptp_src_clock_index(hw);
116 	cmd_val = tmr_idx << SEL_CPK_SRC;
117 
118 	switch (cmd) {
119 	case INIT_TIME:
120 		cmd_val |= GLTSYN_CMD_INIT_TIME;
121 		break;
122 	case INIT_INCVAL:
123 		cmd_val |= GLTSYN_CMD_INIT_INCVAL;
124 		break;
125 	case ADJ_TIME:
126 		cmd_val |= GLTSYN_CMD_ADJ_TIME;
127 		break;
128 	case ADJ_TIME_AT_TIME:
129 		cmd_val |= GLTSYN_CMD_ADJ_INIT_TIME;
130 		break;
131 	case READ_TIME:
132 		cmd_val |= GLTSYN_CMD_READ_TIME;
133 		break;
134 	case ICE_PTP_NOP:
135 		break;
136 	}
137 
138 	wr32(hw, GLTSYN_CMD, cmd_val);
139 }
140 
141 /**
142  * ice_ptp_exec_tmr_cmd - Execute all prepared timer commands
143  * @hw: pointer to HW struct
144  *
145  * Write the SYNC_EXEC_CMD bit to the GLTSYN_CMD_SYNC register, and flush the
146  * write immediately. This triggers the hardware to begin executing all of the
147  * source and PHY timer commands synchronously.
148  */
ice_ptp_exec_tmr_cmd(struct ice_hw * hw)149 static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw)
150 {
151 	wr32(hw, GLTSYN_CMD_SYNC, SYNC_EXEC_CMD);
152 	ice_flush(hw);
153 }
154 
155 /* E822 family functions
156  *
157  * The following functions operate on the E822 family of devices.
158  */
159 
160 /**
161  * ice_fill_phy_msg_e822 - Fill message data for a PHY register access
162  * @msg: the PHY message buffer to fill in
163  * @port: the port to access
164  * @offset: the register offset
165  */
166 static void
ice_fill_phy_msg_e822(struct ice_sbq_msg_input * msg,u8 port,u16 offset)167 ice_fill_phy_msg_e822(struct ice_sbq_msg_input *msg, u8 port, u16 offset)
168 {
169 	int phy_port, phy, quadtype;
170 
171 	phy_port = port % ICE_PORTS_PER_PHY;
172 	phy = port / ICE_PORTS_PER_PHY;
173 	quadtype = (port / ICE_PORTS_PER_QUAD) % ICE_NUM_QUAD_TYPE;
174 
175 	if (quadtype == 0) {
176 		msg->msg_addr_low = P_Q0_L(P_0_BASE + offset, phy_port);
177 		msg->msg_addr_high = P_Q0_H(P_0_BASE + offset, phy_port);
178 	} else {
179 		msg->msg_addr_low = P_Q1_L(P_4_BASE + offset, phy_port);
180 		msg->msg_addr_high = P_Q1_H(P_4_BASE + offset, phy_port);
181 	}
182 
183 	if (phy == 0)
184 		msg->dest_dev = rmn_0;
185 	else if (phy == 1)
186 		msg->dest_dev = rmn_1;
187 	else
188 		msg->dest_dev = rmn_2;
189 }
190 
191 /**
192  * ice_is_64b_phy_reg_e822 - Check if this is a 64bit PHY register
193  * @low_addr: the low address to check
194  * @high_addr: on return, contains the high address of the 64bit register
195  *
196  * Checks if the provided low address is one of the known 64bit PHY values
197  * represented as two 32bit registers. If it is, return the appropriate high
198  * register offset to use.
199  */
ice_is_64b_phy_reg_e822(u16 low_addr,u16 * high_addr)200 static bool ice_is_64b_phy_reg_e822(u16 low_addr, u16 *high_addr)
201 {
202 	switch (low_addr) {
203 	case P_REG_PAR_PCS_TX_OFFSET_L:
204 		*high_addr = P_REG_PAR_PCS_TX_OFFSET_U;
205 		return true;
206 	case P_REG_PAR_PCS_RX_OFFSET_L:
207 		*high_addr = P_REG_PAR_PCS_RX_OFFSET_U;
208 		return true;
209 	case P_REG_PAR_TX_TIME_L:
210 		*high_addr = P_REG_PAR_TX_TIME_U;
211 		return true;
212 	case P_REG_PAR_RX_TIME_L:
213 		*high_addr = P_REG_PAR_RX_TIME_U;
214 		return true;
215 	case P_REG_TOTAL_TX_OFFSET_L:
216 		*high_addr = P_REG_TOTAL_TX_OFFSET_U;
217 		return true;
218 	case P_REG_TOTAL_RX_OFFSET_L:
219 		*high_addr = P_REG_TOTAL_RX_OFFSET_U;
220 		return true;
221 	case P_REG_UIX66_10G_40G_L:
222 		*high_addr = P_REG_UIX66_10G_40G_U;
223 		return true;
224 	case P_REG_UIX66_25G_100G_L:
225 		*high_addr = P_REG_UIX66_25G_100G_U;
226 		return true;
227 	case P_REG_TX_CAPTURE_L:
228 		*high_addr = P_REG_TX_CAPTURE_U;
229 		return true;
230 	case P_REG_RX_CAPTURE_L:
231 		*high_addr = P_REG_RX_CAPTURE_U;
232 		return true;
233 	case P_REG_TX_TIMER_INC_PRE_L:
234 		*high_addr = P_REG_TX_TIMER_INC_PRE_U;
235 		return true;
236 	case P_REG_RX_TIMER_INC_PRE_L:
237 		*high_addr = P_REG_RX_TIMER_INC_PRE_U;
238 		return true;
239 	default:
240 		return false;
241 	}
242 }
243 
244 /**
245  * ice_is_40b_phy_reg_e822 - Check if this is a 40bit PHY register
246  * @low_addr: the low address to check
247  * @high_addr: on return, contains the high address of the 40bit value
248  *
249  * Checks if the provided low address is one of the known 40bit PHY values
250  * split into two registers with the lower 8 bits in the low register and the
251  * upper 32 bits in the high register. If it is, return the appropriate high
252  * register offset to use.
253  */
ice_is_40b_phy_reg_e822(u16 low_addr,u16 * high_addr)254 static bool ice_is_40b_phy_reg_e822(u16 low_addr, u16 *high_addr)
255 {
256 	switch (low_addr) {
257 	case P_REG_TIMETUS_L:
258 		*high_addr = P_REG_TIMETUS_U;
259 		return true;
260 	case P_REG_PAR_RX_TUS_L:
261 		*high_addr = P_REG_PAR_RX_TUS_U;
262 		return true;
263 	case P_REG_PAR_TX_TUS_L:
264 		*high_addr = P_REG_PAR_TX_TUS_U;
265 		return true;
266 	case P_REG_PCS_RX_TUS_L:
267 		*high_addr = P_REG_PCS_RX_TUS_U;
268 		return true;
269 	case P_REG_PCS_TX_TUS_L:
270 		*high_addr = P_REG_PCS_TX_TUS_U;
271 		return true;
272 	case P_REG_DESK_PAR_RX_TUS_L:
273 		*high_addr = P_REG_DESK_PAR_RX_TUS_U;
274 		return true;
275 	case P_REG_DESK_PAR_TX_TUS_L:
276 		*high_addr = P_REG_DESK_PAR_TX_TUS_U;
277 		return true;
278 	case P_REG_DESK_PCS_RX_TUS_L:
279 		*high_addr = P_REG_DESK_PCS_RX_TUS_U;
280 		return true;
281 	case P_REG_DESK_PCS_TX_TUS_L:
282 		*high_addr = P_REG_DESK_PCS_TX_TUS_U;
283 		return true;
284 	default:
285 		return false;
286 	}
287 }
288 
289 /**
290  * ice_read_phy_reg_e822 - Read a PHY register
291  * @hw: pointer to the HW struct
292  * @port: PHY port to read from
293  * @offset: PHY register offset to read
294  * @val: on return, the contents read from the PHY
295  *
296  * Read a PHY register for the given port over the device sideband queue.
297  */
298 static int
ice_read_phy_reg_e822(struct ice_hw * hw,u8 port,u16 offset,u32 * val)299 ice_read_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 *val)
300 {
301 	struct ice_sbq_msg_input msg = {0};
302 	int err;
303 
304 	ice_fill_phy_msg_e822(&msg, port, offset);
305 	msg.opcode = ice_sbq_msg_rd;
306 
307 	err = ice_sbq_rw_reg(hw, &msg);
308 	if (err) {
309 		ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
310 			  err);
311 		return err;
312 	}
313 
314 	*val = msg.data;
315 
316 	return 0;
317 }
318 
319 /**
320  * ice_read_64b_phy_reg_e822 - Read a 64bit value from PHY registers
321  * @hw: pointer to the HW struct
322  * @port: PHY port to read from
323  * @low_addr: offset of the lower register to read from
324  * @val: on return, the contents of the 64bit value from the PHY registers
325  *
326  * Reads the two registers associated with a 64bit value and returns it in the
327  * val pointer. The offset always specifies the lower register offset to use.
328  * The high offset is looked up. This function only operates on registers
329  * known to be two parts of a 64bit value.
330  */
331 static int
ice_read_64b_phy_reg_e822(struct ice_hw * hw,u8 port,u16 low_addr,u64 * val)332 ice_read_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 *val)
333 {
334 	u32 low, high;
335 	u16 high_addr;
336 	int err;
337 
338 	/* Only operate on registers known to be split into two 32bit
339 	 * registers.
340 	 */
341 	if (!ice_is_64b_phy_reg_e822(low_addr, &high_addr)) {
342 		ice_debug(hw, ICE_DBG_PTP, "Invalid 64b register addr 0x%08x\n",
343 			  low_addr);
344 		return -EINVAL;
345 	}
346 
347 	err = ice_read_phy_reg_e822(hw, port, low_addr, &low);
348 	if (err) {
349 		ice_debug(hw, ICE_DBG_PTP, "Failed to read from low register 0x%08x\n, err %d",
350 			  low_addr, err);
351 		return err;
352 	}
353 
354 	err = ice_read_phy_reg_e822(hw, port, high_addr, &high);
355 	if (err) {
356 		ice_debug(hw, ICE_DBG_PTP, "Failed to read from high register 0x%08x\n, err %d",
357 			  high_addr, err);
358 		return err;
359 	}
360 
361 	*val = (u64)high << 32 | low;
362 
363 	return 0;
364 }
365 
366 /**
367  * ice_write_phy_reg_e822 - Write a PHY register
368  * @hw: pointer to the HW struct
369  * @port: PHY port to write to
370  * @offset: PHY register offset to write
371  * @val: The value to write to the register
372  *
373  * Write a PHY register for the given port over the device sideband queue.
374  */
375 static int
ice_write_phy_reg_e822(struct ice_hw * hw,u8 port,u16 offset,u32 val)376 ice_write_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 val)
377 {
378 	struct ice_sbq_msg_input msg = {0};
379 	int err;
380 
381 	ice_fill_phy_msg_e822(&msg, port, offset);
382 	msg.opcode = ice_sbq_msg_wr;
383 	msg.data = val;
384 
385 	err = ice_sbq_rw_reg(hw, &msg);
386 	if (err) {
387 		ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
388 			  err);
389 		return err;
390 	}
391 
392 	return 0;
393 }
394 
395 /**
396  * ice_write_40b_phy_reg_e822 - Write a 40b value to the PHY
397  * @hw: pointer to the HW struct
398  * @port: port to write to
399  * @low_addr: offset of the low register
400  * @val: 40b value to write
401  *
402  * Write the provided 40b value to the two associated registers by splitting
403  * it up into two chunks, the lower 8 bits and the upper 32 bits.
404  */
405 static int
ice_write_40b_phy_reg_e822(struct ice_hw * hw,u8 port,u16 low_addr,u64 val)406 ice_write_40b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
407 {
408 	u32 low, high;
409 	u16 high_addr;
410 	int err;
411 
412 	/* Only operate on registers known to be split into a lower 8 bit
413 	 * register and an upper 32 bit register.
414 	 */
415 	if (!ice_is_40b_phy_reg_e822(low_addr, &high_addr)) {
416 		ice_debug(hw, ICE_DBG_PTP, "Invalid 40b register addr 0x%08x\n",
417 			  low_addr);
418 		return -EINVAL;
419 	}
420 
421 	low = (u32)(val & P_REG_40B_LOW_M);
422 	high = (u32)(val >> P_REG_40B_HIGH_S);
423 
424 	err = ice_write_phy_reg_e822(hw, port, low_addr, low);
425 	if (err) {
426 		ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, err %d",
427 			  low_addr, err);
428 		return err;
429 	}
430 
431 	err = ice_write_phy_reg_e822(hw, port, high_addr, high);
432 	if (err) {
433 		ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, err %d",
434 			  high_addr, err);
435 		return err;
436 	}
437 
438 	return 0;
439 }
440 
441 /**
442  * ice_write_64b_phy_reg_e822 - Write a 64bit value to PHY registers
443  * @hw: pointer to the HW struct
444  * @port: PHY port to read from
445  * @low_addr: offset of the lower register to read from
446  * @val: the contents of the 64bit value to write to PHY
447  *
448  * Write the 64bit value to the two associated 32bit PHY registers. The offset
449  * is always specified as the lower register, and the high address is looked
450  * up. This function only operates on registers known to be two parts of
451  * a 64bit value.
452  */
453 static int
ice_write_64b_phy_reg_e822(struct ice_hw * hw,u8 port,u16 low_addr,u64 val)454 ice_write_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
455 {
456 	u32 low, high;
457 	u16 high_addr;
458 	int err;
459 
460 	/* Only operate on registers known to be split into two 32bit
461 	 * registers.
462 	 */
463 	if (!ice_is_64b_phy_reg_e822(low_addr, &high_addr)) {
464 		ice_debug(hw, ICE_DBG_PTP, "Invalid 64b register addr 0x%08x\n",
465 			  low_addr);
466 		return -EINVAL;
467 	}
468 
469 	low = lower_32_bits(val);
470 	high = upper_32_bits(val);
471 
472 	err = ice_write_phy_reg_e822(hw, port, low_addr, low);
473 	if (err) {
474 		ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, err %d",
475 			  low_addr, err);
476 		return err;
477 	}
478 
479 	err = ice_write_phy_reg_e822(hw, port, high_addr, high);
480 	if (err) {
481 		ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, err %d",
482 			  high_addr, err);
483 		return err;
484 	}
485 
486 	return 0;
487 }
488 
489 /**
490  * ice_fill_quad_msg_e822 - Fill message data for quad register access
491  * @msg: the PHY message buffer to fill in
492  * @quad: the quad to access
493  * @offset: the register offset
494  *
495  * Fill a message buffer for accessing a register in a quad shared between
496  * multiple PHYs.
497  */
498 static void
ice_fill_quad_msg_e822(struct ice_sbq_msg_input * msg,u8 quad,u16 offset)499 ice_fill_quad_msg_e822(struct ice_sbq_msg_input *msg, u8 quad, u16 offset)
500 {
501 	u32 addr;
502 
503 	msg->dest_dev = rmn_0;
504 
505 	if ((quad % ICE_NUM_QUAD_TYPE) == 0)
506 		addr = Q_0_BASE + offset;
507 	else
508 		addr = Q_1_BASE + offset;
509 
510 	msg->msg_addr_low = lower_16_bits(addr);
511 	msg->msg_addr_high = upper_16_bits(addr);
512 }
513 
514 /**
515  * ice_read_quad_reg_e822 - Read a PHY quad register
516  * @hw: pointer to the HW struct
517  * @quad: quad to read from
518  * @offset: quad register offset to read
519  * @val: on return, the contents read from the quad
520  *
521  * Read a quad register over the device sideband queue. Quad registers are
522  * shared between multiple PHYs.
523  */
524 int
ice_read_quad_reg_e822(struct ice_hw * hw,u8 quad,u16 offset,u32 * val)525 ice_read_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 *val)
526 {
527 	struct ice_sbq_msg_input msg = {0};
528 	int err;
529 
530 	if (quad >= ICE_MAX_QUAD)
531 		return -EINVAL;
532 
533 	ice_fill_quad_msg_e822(&msg, quad, offset);
534 	msg.opcode = ice_sbq_msg_rd;
535 
536 	err = ice_sbq_rw_reg(hw, &msg);
537 	if (err) {
538 		ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
539 			  err);
540 		return err;
541 	}
542 
543 	*val = msg.data;
544 
545 	return 0;
546 }
547 
548 /**
549  * ice_write_quad_reg_e822 - Write a PHY quad register
550  * @hw: pointer to the HW struct
551  * @quad: quad to write to
552  * @offset: quad register offset to write
553  * @val: The value to write to the register
554  *
555  * Write a quad register over the device sideband queue. Quad registers are
556  * shared between multiple PHYs.
557  */
558 int
ice_write_quad_reg_e822(struct ice_hw * hw,u8 quad,u16 offset,u32 val)559 ice_write_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 val)
560 {
561 	struct ice_sbq_msg_input msg = {0};
562 	int err;
563 
564 	if (quad >= ICE_MAX_QUAD)
565 		return -EINVAL;
566 
567 	ice_fill_quad_msg_e822(&msg, quad, offset);
568 	msg.opcode = ice_sbq_msg_wr;
569 	msg.data = val;
570 
571 	err = ice_sbq_rw_reg(hw, &msg);
572 	if (err) {
573 		ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
574 			  err);
575 		return err;
576 	}
577 
578 	return 0;
579 }
580 
581 /**
582  * ice_read_phy_tstamp_e822 - Read a PHY timestamp out of the quad block
583  * @hw: pointer to the HW struct
584  * @quad: the quad to read from
585  * @idx: the timestamp index to read
586  * @tstamp: on return, the 40bit timestamp value
587  *
588  * Read a 40bit timestamp value out of the two associated registers in the
589  * quad memory block that is shared between the internal PHYs of the E822
590  * family of devices.
591  */
592 static int
ice_read_phy_tstamp_e822(struct ice_hw * hw,u8 quad,u8 idx,u64 * tstamp)593 ice_read_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp)
594 {
595 	u16 lo_addr, hi_addr;
596 	u32 lo, hi;
597 	int err;
598 
599 	lo_addr = (u16)TS_L(Q_REG_TX_MEMORY_BANK_START, idx);
600 	hi_addr = (u16)TS_H(Q_REG_TX_MEMORY_BANK_START, idx);
601 
602 	err = ice_read_quad_reg_e822(hw, quad, lo_addr, &lo);
603 	if (err) {
604 		ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, err %d\n",
605 			  err);
606 		return err;
607 	}
608 
609 	err = ice_read_quad_reg_e822(hw, quad, hi_addr, &hi);
610 	if (err) {
611 		ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, err %d\n",
612 			  err);
613 		return err;
614 	}
615 
616 	/* For E822 based internal PHYs, the timestamp is reported with the
617 	 * lower 8 bits in the low register, and the upper 32 bits in the high
618 	 * register.
619 	 */
620 	*tstamp = ((u64)hi) << TS_PHY_HIGH_S | ((u64)lo & TS_PHY_LOW_M);
621 
622 	return 0;
623 }
624 
625 /**
626  * ice_clear_phy_tstamp_e822 - Clear a timestamp from the quad block
627  * @hw: pointer to the HW struct
628  * @quad: the quad to read from
629  * @idx: the timestamp index to reset
630  *
631  * Clear a timestamp, resetting its valid bit, from the PHY quad block that is
632  * shared between the internal PHYs on the E822 devices.
633  */
634 static int
ice_clear_phy_tstamp_e822(struct ice_hw * hw,u8 quad,u8 idx)635 ice_clear_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx)
636 {
637 	u16 lo_addr, hi_addr;
638 	int err;
639 
640 	lo_addr = (u16)TS_L(Q_REG_TX_MEMORY_BANK_START, idx);
641 	hi_addr = (u16)TS_H(Q_REG_TX_MEMORY_BANK_START, idx);
642 
643 	err = ice_write_quad_reg_e822(hw, quad, lo_addr, 0);
644 	if (err) {
645 		ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register, err %d\n",
646 			  err);
647 		return err;
648 	}
649 
650 	err = ice_write_quad_reg_e822(hw, quad, hi_addr, 0);
651 	if (err) {
652 		ice_debug(hw, ICE_DBG_PTP, "Failed to clear high PTP timestamp register, err %d\n",
653 			  err);
654 		return err;
655 	}
656 
657 	return 0;
658 }
659 
660 /**
661  * ice_ptp_reset_ts_memory_quad_e822 - Clear all timestamps from the quad block
662  * @hw: pointer to the HW struct
663  * @quad: the quad to read from
664  *
665  * Clear all timestamps from the PHY quad block that is shared between the
666  * internal PHYs on the E822 devices.
667  */
ice_ptp_reset_ts_memory_quad_e822(struct ice_hw * hw,u8 quad)668 void ice_ptp_reset_ts_memory_quad_e822(struct ice_hw *hw, u8 quad)
669 {
670 	ice_write_quad_reg_e822(hw, quad, Q_REG_TS_CTRL, Q_REG_TS_CTRL_M);
671 	ice_write_quad_reg_e822(hw, quad, Q_REG_TS_CTRL, ~(u32)Q_REG_TS_CTRL_M);
672 }
673 
674 /**
675  * ice_ptp_reset_ts_memory_e822 - Clear all timestamps from all quad blocks
676  * @hw: pointer to the HW struct
677  */
ice_ptp_reset_ts_memory_e822(struct ice_hw * hw)678 static void ice_ptp_reset_ts_memory_e822(struct ice_hw *hw)
679 {
680 	unsigned int quad;
681 
682 	for (quad = 0; quad < ICE_MAX_QUAD; quad++)
683 		ice_ptp_reset_ts_memory_quad_e822(hw, quad);
684 }
685 
686 /**
687  * ice_read_cgu_reg_e822 - Read a CGU register
688  * @hw: pointer to the HW struct
689  * @addr: Register address to read
690  * @val: storage for register value read
691  *
692  * Read the contents of a register of the Clock Generation Unit. Only
693  * applicable to E822 devices.
694  */
695 static int
ice_read_cgu_reg_e822(struct ice_hw * hw,u32 addr,u32 * val)696 ice_read_cgu_reg_e822(struct ice_hw *hw, u32 addr, u32 *val)
697 {
698 	struct ice_sbq_msg_input cgu_msg;
699 	int err;
700 
701 	cgu_msg.opcode = ice_sbq_msg_rd;
702 	cgu_msg.dest_dev = cgu;
703 	cgu_msg.msg_addr_low = addr;
704 	cgu_msg.msg_addr_high = 0x0;
705 
706 	err = ice_sbq_rw_reg(hw, &cgu_msg);
707 	if (err) {
708 		ice_debug(hw, ICE_DBG_PTP, "Failed to read CGU register 0x%04x, err %d\n",
709 			  addr, err);
710 		return err;
711 	}
712 
713 	*val = cgu_msg.data;
714 
715 	return err;
716 }
717 
718 /**
719  * ice_write_cgu_reg_e822 - Write a CGU register
720  * @hw: pointer to the HW struct
721  * @addr: Register address to write
722  * @val: value to write into the register
723  *
724  * Write the specified value to a register of the Clock Generation Unit. Only
725  * applicable to E822 devices.
726  */
727 static int
ice_write_cgu_reg_e822(struct ice_hw * hw,u32 addr,u32 val)728 ice_write_cgu_reg_e822(struct ice_hw *hw, u32 addr, u32 val)
729 {
730 	struct ice_sbq_msg_input cgu_msg;
731 	int err;
732 
733 	cgu_msg.opcode = ice_sbq_msg_wr;
734 	cgu_msg.dest_dev = cgu;
735 	cgu_msg.msg_addr_low = addr;
736 	cgu_msg.msg_addr_high = 0x0;
737 	cgu_msg.data = val;
738 
739 	err = ice_sbq_rw_reg(hw, &cgu_msg);
740 	if (err) {
741 		ice_debug(hw, ICE_DBG_PTP, "Failed to write CGU register 0x%04x, err %d\n",
742 			  addr, err);
743 		return err;
744 	}
745 
746 	return err;
747 }
748 
749 /**
750  * ice_clk_freq_str - Convert time_ref_freq to string
751  * @clk_freq: Clock frequency
752  *
753  * Convert the specified TIME_REF clock frequency to a string.
754  */
ice_clk_freq_str(u8 clk_freq)755 static const char *ice_clk_freq_str(u8 clk_freq)
756 {
757 	switch ((enum ice_time_ref_freq)clk_freq) {
758 	case ICE_TIME_REF_FREQ_25_000:
759 		return "25 MHz";
760 	case ICE_TIME_REF_FREQ_122_880:
761 		return "122.88 MHz";
762 	case ICE_TIME_REF_FREQ_125_000:
763 		return "125 MHz";
764 	case ICE_TIME_REF_FREQ_153_600:
765 		return "153.6 MHz";
766 	case ICE_TIME_REF_FREQ_156_250:
767 		return "156.25 MHz";
768 	case ICE_TIME_REF_FREQ_245_760:
769 		return "245.76 MHz";
770 	default:
771 		return "Unknown";
772 	}
773 }
774 
775 /**
776  * ice_clk_src_str - Convert time_ref_src to string
777  * @clk_src: Clock source
778  *
779  * Convert the specified clock source to its string name.
780  */
ice_clk_src_str(u8 clk_src)781 static const char *ice_clk_src_str(u8 clk_src)
782 {
783 	switch ((enum ice_clk_src)clk_src) {
784 	case ICE_CLK_SRC_TCX0:
785 		return "TCX0";
786 	case ICE_CLK_SRC_TIME_REF:
787 		return "TIME_REF";
788 	default:
789 		return "Unknown";
790 	}
791 }
792 
793 /**
794  * ice_cfg_cgu_pll_e822 - Configure the Clock Generation Unit
795  * @hw: pointer to the HW struct
796  * @clk_freq: Clock frequency to program
797  * @clk_src: Clock source to select (TIME_REF, or TCX0)
798  *
799  * Configure the Clock Generation Unit with the desired clock frequency and
800  * time reference, enabling the PLL which drives the PTP hardware clock.
801  */
802 static int
ice_cfg_cgu_pll_e822(struct ice_hw * hw,enum ice_time_ref_freq clk_freq,enum ice_clk_src clk_src)803 ice_cfg_cgu_pll_e822(struct ice_hw *hw, enum ice_time_ref_freq clk_freq,
804 		     enum ice_clk_src clk_src)
805 {
806 	union tspll_ro_bwm_lf bwm_lf;
807 	union nac_cgu_dword19 dw19;
808 	union nac_cgu_dword22 dw22;
809 	union nac_cgu_dword24 dw24;
810 	union nac_cgu_dword9 dw9;
811 	int err;
812 
813 	if (clk_freq >= NUM_ICE_TIME_REF_FREQ) {
814 		dev_warn(ice_hw_to_dev(hw), "Invalid TIME_REF frequency %u\n",
815 			 clk_freq);
816 		return -EINVAL;
817 	}
818 
819 	if (clk_src >= NUM_ICE_CLK_SRC) {
820 		dev_warn(ice_hw_to_dev(hw), "Invalid clock source %u\n",
821 			 clk_src);
822 		return -EINVAL;
823 	}
824 
825 	if (clk_src == ICE_CLK_SRC_TCX0 &&
826 	    clk_freq != ICE_TIME_REF_FREQ_25_000) {
827 		dev_warn(ice_hw_to_dev(hw),
828 			 "TCX0 only supports 25 MHz frequency\n");
829 		return -EINVAL;
830 	}
831 
832 	err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD9, &dw9.val);
833 	if (err)
834 		return err;
835 
836 	err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD24, &dw24.val);
837 	if (err)
838 		return err;
839 
840 	err = ice_read_cgu_reg_e822(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
841 	if (err)
842 		return err;
843 
844 	/* Log the current clock configuration */
845 	ice_debug(hw, ICE_DBG_PTP, "Current CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
846 		  dw24.field.ts_pll_enable ? "enabled" : "disabled",
847 		  ice_clk_src_str(dw24.field.time_ref_sel),
848 		  ice_clk_freq_str(dw9.field.time_ref_freq_sel),
849 		  bwm_lf.field.plllock_true_lock_cri ? "locked" : "unlocked");
850 
851 	/* Disable the PLL before changing the clock source or frequency */
852 	if (dw24.field.ts_pll_enable) {
853 		dw24.field.ts_pll_enable = 0;
854 
855 		err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val);
856 		if (err)
857 			return err;
858 	}
859 
860 	/* Set the frequency */
861 	dw9.field.time_ref_freq_sel = clk_freq;
862 	err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD9, dw9.val);
863 	if (err)
864 		return err;
865 
866 	/* Configure the TS PLL feedback divisor */
867 	err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD19, &dw19.val);
868 	if (err)
869 		return err;
870 
871 	dw19.field.tspll_fbdiv_intgr = e822_cgu_params[clk_freq].feedback_div;
872 	dw19.field.tspll_ndivratio = 1;
873 
874 	err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD19, dw19.val);
875 	if (err)
876 		return err;
877 
878 	/* Configure the TS PLL post divisor */
879 	err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD22, &dw22.val);
880 	if (err)
881 		return err;
882 
883 	dw22.field.time1588clk_div = e822_cgu_params[clk_freq].post_pll_div;
884 	dw22.field.time1588clk_sel_div2 = 0;
885 
886 	err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD22, dw22.val);
887 	if (err)
888 		return err;
889 
890 	/* Configure the TS PLL pre divisor and clock source */
891 	err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD24, &dw24.val);
892 	if (err)
893 		return err;
894 
895 	dw24.field.ref1588_ck_div = e822_cgu_params[clk_freq].refclk_pre_div;
896 	dw24.field.tspll_fbdiv_frac = e822_cgu_params[clk_freq].frac_n_div;
897 	dw24.field.time_ref_sel = clk_src;
898 
899 	err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val);
900 	if (err)
901 		return err;
902 
903 	/* Finally, enable the PLL */
904 	dw24.field.ts_pll_enable = 1;
905 
906 	err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val);
907 	if (err)
908 		return err;
909 
910 	/* Wait to verify if the PLL locks */
911 	usleep_range(1000, 5000);
912 
913 	err = ice_read_cgu_reg_e822(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
914 	if (err)
915 		return err;
916 
917 	if (!bwm_lf.field.plllock_true_lock_cri) {
918 		dev_warn(ice_hw_to_dev(hw), "CGU PLL failed to lock\n");
919 		return -EBUSY;
920 	}
921 
922 	/* Log the current clock configuration */
923 	ice_debug(hw, ICE_DBG_PTP, "New CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
924 		  dw24.field.ts_pll_enable ? "enabled" : "disabled",
925 		  ice_clk_src_str(dw24.field.time_ref_sel),
926 		  ice_clk_freq_str(dw9.field.time_ref_freq_sel),
927 		  bwm_lf.field.plllock_true_lock_cri ? "locked" : "unlocked");
928 
929 	return 0;
930 }
931 
932 /**
933  * ice_init_cgu_e822 - Initialize CGU with settings from firmware
934  * @hw: pointer to the HW structure
935  *
936  * Initialize the Clock Generation Unit of the E822 device.
937  */
ice_init_cgu_e822(struct ice_hw * hw)938 static int ice_init_cgu_e822(struct ice_hw *hw)
939 {
940 	struct ice_ts_func_info *ts_info = &hw->func_caps.ts_func_info;
941 	union tspll_cntr_bist_settings cntr_bist;
942 	int err;
943 
944 	err = ice_read_cgu_reg_e822(hw, TSPLL_CNTR_BIST_SETTINGS,
945 				    &cntr_bist.val);
946 	if (err)
947 		return err;
948 
949 	/* Disable sticky lock detection so lock err reported is accurate */
950 	cntr_bist.field.i_plllock_sel_0 = 0;
951 	cntr_bist.field.i_plllock_sel_1 = 0;
952 
953 	err = ice_write_cgu_reg_e822(hw, TSPLL_CNTR_BIST_SETTINGS,
954 				     cntr_bist.val);
955 	if (err)
956 		return err;
957 
958 	/* Configure the CGU PLL using the parameters from the function
959 	 * capabilities.
960 	 */
961 	err = ice_cfg_cgu_pll_e822(hw, ts_info->time_ref,
962 				   (enum ice_clk_src)ts_info->clk_src);
963 	if (err)
964 		return err;
965 
966 	return 0;
967 }
968 
969 /**
970  * ice_ptp_set_vernier_wl - Set the window length for vernier calibration
971  * @hw: pointer to the HW struct
972  *
973  * Set the window length used for the vernier port calibration process.
974  */
ice_ptp_set_vernier_wl(struct ice_hw * hw)975 static int ice_ptp_set_vernier_wl(struct ice_hw *hw)
976 {
977 	u8 port;
978 
979 	for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
980 		int err;
981 
982 		err = ice_write_phy_reg_e822(hw, port, P_REG_WL,
983 					     PTP_VERNIER_WL);
984 		if (err) {
985 			ice_debug(hw, ICE_DBG_PTP, "Failed to set vernier window length for port %u, err %d\n",
986 				  port, err);
987 			return err;
988 		}
989 	}
990 
991 	return 0;
992 }
993 
994 /**
995  * ice_ptp_init_phc_e822 - Perform E822 specific PHC initialization
996  * @hw: pointer to HW struct
997  *
998  * Perform PHC initialization steps specific to E822 devices.
999  */
ice_ptp_init_phc_e822(struct ice_hw * hw)1000 static int ice_ptp_init_phc_e822(struct ice_hw *hw)
1001 {
1002 	int err;
1003 	u32 regval;
1004 
1005 	/* Enable reading switch and PHY registers over the sideband queue */
1006 #define PF_SB_REM_DEV_CTL_SWITCH_READ BIT(1)
1007 #define PF_SB_REM_DEV_CTL_PHY0 BIT(2)
1008 	regval = rd32(hw, PF_SB_REM_DEV_CTL);
1009 	regval |= (PF_SB_REM_DEV_CTL_SWITCH_READ |
1010 		   PF_SB_REM_DEV_CTL_PHY0);
1011 	wr32(hw, PF_SB_REM_DEV_CTL, regval);
1012 
1013 	/* Initialize the Clock Generation Unit */
1014 	err = ice_init_cgu_e822(hw);
1015 	if (err)
1016 		return err;
1017 
1018 	/* Set window length for all the ports */
1019 	return ice_ptp_set_vernier_wl(hw);
1020 }
1021 
1022 /**
1023  * ice_ptp_prep_phy_time_e822 - Prepare PHY port with initial time
1024  * @hw: pointer to the HW struct
1025  * @time: Time to initialize the PHY port clocks to
1026  *
1027  * Program the PHY port registers with a new initial time value. The port
1028  * clock will be initialized once the driver issues an INIT_TIME sync
1029  * command. The time value is the upper 32 bits of the PHY timer, usually in
1030  * units of nominal nanoseconds.
1031  */
1032 static int
ice_ptp_prep_phy_time_e822(struct ice_hw * hw,u32 time)1033 ice_ptp_prep_phy_time_e822(struct ice_hw *hw, u32 time)
1034 {
1035 	u64 phy_time;
1036 	u8 port;
1037 	int err;
1038 
1039 	/* The time represents the upper 32 bits of the PHY timer, so we need
1040 	 * to shift to account for this when programming.
1041 	 */
1042 	phy_time = (u64)time << 32;
1043 
1044 	for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
1045 		/* Tx case */
1046 		err = ice_write_64b_phy_reg_e822(hw, port,
1047 						 P_REG_TX_TIMER_INC_PRE_L,
1048 						 phy_time);
1049 		if (err)
1050 			goto exit_err;
1051 
1052 		/* Rx case */
1053 		err = ice_write_64b_phy_reg_e822(hw, port,
1054 						 P_REG_RX_TIMER_INC_PRE_L,
1055 						 phy_time);
1056 		if (err)
1057 			goto exit_err;
1058 	}
1059 
1060 	return 0;
1061 
1062 exit_err:
1063 	ice_debug(hw, ICE_DBG_PTP, "Failed to write init time for port %u, err %d\n",
1064 		  port, err);
1065 
1066 	return err;
1067 }
1068 
1069 /**
1070  * ice_ptp_prep_port_adj_e822 - Prepare a single port for time adjust
1071  * @hw: pointer to HW struct
1072  * @port: Port number to be programmed
1073  * @time: time in cycles to adjust the port Tx and Rx clocks
1074  *
1075  * Program the port for an atomic adjustment by writing the Tx and Rx timer
1076  * registers. The atomic adjustment won't be completed until the driver issues
1077  * an ADJ_TIME command.
1078  *
1079  * Note that time is not in units of nanoseconds. It is in clock time
1080  * including the lower sub-nanosecond portion of the port timer.
1081  *
1082  * Negative adjustments are supported using 2s complement arithmetic.
1083  */
1084 static int
ice_ptp_prep_port_adj_e822(struct ice_hw * hw,u8 port,s64 time)1085 ice_ptp_prep_port_adj_e822(struct ice_hw *hw, u8 port, s64 time)
1086 {
1087 	u32 l_time, u_time;
1088 	int err;
1089 
1090 	l_time = lower_32_bits(time);
1091 	u_time = upper_32_bits(time);
1092 
1093 	/* Tx case */
1094 	err = ice_write_phy_reg_e822(hw, port, P_REG_TX_TIMER_INC_PRE_L,
1095 				     l_time);
1096 	if (err)
1097 		goto exit_err;
1098 
1099 	err = ice_write_phy_reg_e822(hw, port, P_REG_TX_TIMER_INC_PRE_U,
1100 				     u_time);
1101 	if (err)
1102 		goto exit_err;
1103 
1104 	/* Rx case */
1105 	err = ice_write_phy_reg_e822(hw, port, P_REG_RX_TIMER_INC_PRE_L,
1106 				     l_time);
1107 	if (err)
1108 		goto exit_err;
1109 
1110 	err = ice_write_phy_reg_e822(hw, port, P_REG_RX_TIMER_INC_PRE_U,
1111 				     u_time);
1112 	if (err)
1113 		goto exit_err;
1114 
1115 	return 0;
1116 
1117 exit_err:
1118 	ice_debug(hw, ICE_DBG_PTP, "Failed to write time adjust for port %u, err %d\n",
1119 		  port, err);
1120 	return err;
1121 }
1122 
1123 /**
1124  * ice_ptp_prep_phy_adj_e822 - Prep PHY ports for a time adjustment
1125  * @hw: pointer to HW struct
1126  * @adj: adjustment in nanoseconds
1127  *
1128  * Prepare the PHY ports for an atomic time adjustment by programming the PHY
1129  * Tx and Rx port registers. The actual adjustment is completed by issuing an
1130  * ADJ_TIME or ADJ_TIME_AT_TIME sync command.
1131  */
1132 static int
ice_ptp_prep_phy_adj_e822(struct ice_hw * hw,s32 adj)1133 ice_ptp_prep_phy_adj_e822(struct ice_hw *hw, s32 adj)
1134 {
1135 	s64 cycles;
1136 	u8 port;
1137 
1138 	/* The port clock supports adjustment of the sub-nanosecond portion of
1139 	 * the clock. We shift the provided adjustment in nanoseconds to
1140 	 * calculate the appropriate adjustment to program into the PHY ports.
1141 	 */
1142 	if (adj > 0)
1143 		cycles = (s64)adj << 32;
1144 	else
1145 		cycles = -(((s64)-adj) << 32);
1146 
1147 	for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
1148 		int err;
1149 
1150 		err = ice_ptp_prep_port_adj_e822(hw, port, cycles);
1151 		if (err)
1152 			return err;
1153 	}
1154 
1155 	return 0;
1156 }
1157 
1158 /**
1159  * ice_ptp_prep_phy_incval_e822 - Prepare PHY ports for time adjustment
1160  * @hw: pointer to HW struct
1161  * @incval: new increment value to prepare
1162  *
1163  * Prepare each of the PHY ports for a new increment value by programming the
1164  * port's TIMETUS registers. The new increment value will be updated after
1165  * issuing an INIT_INCVAL command.
1166  */
1167 static int
ice_ptp_prep_phy_incval_e822(struct ice_hw * hw,u64 incval)1168 ice_ptp_prep_phy_incval_e822(struct ice_hw *hw, u64 incval)
1169 {
1170 	int err;
1171 	u8 port;
1172 
1173 	for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
1174 		err = ice_write_40b_phy_reg_e822(hw, port, P_REG_TIMETUS_L,
1175 						 incval);
1176 		if (err)
1177 			goto exit_err;
1178 	}
1179 
1180 	return 0;
1181 
1182 exit_err:
1183 	ice_debug(hw, ICE_DBG_PTP, "Failed to write incval for port %u, err %d\n",
1184 		  port, err);
1185 
1186 	return err;
1187 }
1188 
1189 /**
1190  * ice_ptp_read_port_capture - Read a port's local time capture
1191  * @hw: pointer to HW struct
1192  * @port: Port number to read
1193  * @tx_ts: on return, the Tx port time capture
1194  * @rx_ts: on return, the Rx port time capture
1195  *
1196  * Read the port's Tx and Rx local time capture values.
1197  *
1198  * Note this has no equivalent for the E810 devices.
1199  */
1200 static int
ice_ptp_read_port_capture(struct ice_hw * hw,u8 port,u64 * tx_ts,u64 * rx_ts)1201 ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts)
1202 {
1203 	int err;
1204 
1205 	/* Tx case */
1206 	err = ice_read_64b_phy_reg_e822(hw, port, P_REG_TX_CAPTURE_L, tx_ts);
1207 	if (err) {
1208 		ice_debug(hw, ICE_DBG_PTP, "Failed to read REG_TX_CAPTURE, err %d\n",
1209 			  err);
1210 		return err;
1211 	}
1212 
1213 	ice_debug(hw, ICE_DBG_PTP, "tx_init = 0x%016llx\n",
1214 		  (unsigned long long)*tx_ts);
1215 
1216 	/* Rx case */
1217 	err = ice_read_64b_phy_reg_e822(hw, port, P_REG_RX_CAPTURE_L, rx_ts);
1218 	if (err) {
1219 		ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_CAPTURE, err %d\n",
1220 			  err);
1221 		return err;
1222 	}
1223 
1224 	ice_debug(hw, ICE_DBG_PTP, "rx_init = 0x%016llx\n",
1225 		  (unsigned long long)*rx_ts);
1226 
1227 	return 0;
1228 }
1229 
1230 /**
1231  * ice_ptp_write_port_cmd_e822 - Prepare a single PHY port for a timer command
1232  * @hw: pointer to HW struct
1233  * @port: Port to which cmd has to be sent
1234  * @cmd: Command to be sent to the port
1235  *
1236  * Prepare the requested port for an upcoming timer sync command.
1237  *
1238  * Do not use this function directly. If you want to configure exactly one
1239  * port, use ice_ptp_one_port_cmd() instead.
1240  */
1241 static int
ice_ptp_write_port_cmd_e822(struct ice_hw * hw,u8 port,enum ice_ptp_tmr_cmd cmd)1242 ice_ptp_write_port_cmd_e822(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd)
1243 {
1244 	u32 cmd_val, val;
1245 	u8 tmr_idx;
1246 	int err;
1247 
1248 	tmr_idx = ice_get_ptp_src_clock_index(hw);
1249 	cmd_val = tmr_idx << SEL_PHY_SRC;
1250 	switch (cmd) {
1251 	case INIT_TIME:
1252 		cmd_val |= PHY_CMD_INIT_TIME;
1253 		break;
1254 	case INIT_INCVAL:
1255 		cmd_val |= PHY_CMD_INIT_INCVAL;
1256 		break;
1257 	case ADJ_TIME:
1258 		cmd_val |= PHY_CMD_ADJ_TIME;
1259 		break;
1260 	case READ_TIME:
1261 		cmd_val |= PHY_CMD_READ_TIME;
1262 		break;
1263 	case ADJ_TIME_AT_TIME:
1264 		cmd_val |= PHY_CMD_ADJ_TIME_AT_TIME;
1265 		break;
1266 	case ICE_PTP_NOP:
1267 		break;
1268 	}
1269 
1270 	/* Tx case */
1271 	/* Read, modify, write */
1272 	err = ice_read_phy_reg_e822(hw, port, P_REG_TX_TMR_CMD, &val);
1273 	if (err) {
1274 		ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_TMR_CMD, err %d\n",
1275 			  err);
1276 		return err;
1277 	}
1278 
1279 	/* Modify necessary bits only and perform write */
1280 	val &= ~TS_CMD_MASK;
1281 	val |= cmd_val;
1282 
1283 	err = ice_write_phy_reg_e822(hw, port, P_REG_TX_TMR_CMD, val);
1284 	if (err) {
1285 		ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_TMR_CMD, err %d\n",
1286 			  err);
1287 		return err;
1288 	}
1289 
1290 	/* Rx case */
1291 	/* Read, modify, write */
1292 	err = ice_read_phy_reg_e822(hw, port, P_REG_RX_TMR_CMD, &val);
1293 	if (err) {
1294 		ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_TMR_CMD, err %d\n",
1295 			  err);
1296 		return err;
1297 	}
1298 
1299 	/* Modify necessary bits only and perform write */
1300 	val &= ~TS_CMD_MASK;
1301 	val |= cmd_val;
1302 
1303 	err = ice_write_phy_reg_e822(hw, port, P_REG_RX_TMR_CMD, val);
1304 	if (err) {
1305 		ice_debug(hw, ICE_DBG_PTP, "Failed to write back RX_TMR_CMD, err %d\n",
1306 			  err);
1307 		return err;
1308 	}
1309 
1310 	return 0;
1311 }
1312 
1313 /**
1314  * ice_ptp_one_port_cmd - Prepare one port for a timer command
1315  * @hw: pointer to the HW struct
1316  * @configured_port: the port to configure with configured_cmd
1317  * @configured_cmd: timer command to prepare on the configured_port
1318  *
1319  * Prepare the configured_port for the configured_cmd, and prepare all other
1320  * ports for ICE_PTP_NOP. This causes the configured_port to execute the
1321  * desired command while all other ports perform no operation.
1322  */
1323 static int
ice_ptp_one_port_cmd(struct ice_hw * hw,u8 configured_port,enum ice_ptp_tmr_cmd configured_cmd)1324 ice_ptp_one_port_cmd(struct ice_hw *hw, u8 configured_port,
1325 		     enum ice_ptp_tmr_cmd configured_cmd)
1326 {
1327 	u8 port;
1328 
1329 	for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
1330 		enum ice_ptp_tmr_cmd cmd;
1331 		int err;
1332 
1333 		if (port == configured_port)
1334 			cmd = configured_cmd;
1335 		else
1336 			cmd = ICE_PTP_NOP;
1337 
1338 		err = ice_ptp_write_port_cmd_e822(hw, port, cmd);
1339 		if (err)
1340 			return err;
1341 	}
1342 
1343 	return 0;
1344 }
1345 
1346 /**
1347  * ice_ptp_port_cmd_e822 - Prepare all ports for a timer command
1348  * @hw: pointer to the HW struct
1349  * @cmd: timer command to prepare
1350  *
1351  * Prepare all ports connected to this device for an upcoming timer sync
1352  * command.
1353  */
1354 static int
ice_ptp_port_cmd_e822(struct ice_hw * hw,enum ice_ptp_tmr_cmd cmd)1355 ice_ptp_port_cmd_e822(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
1356 {
1357 	u8 port;
1358 
1359 	for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
1360 		int err;
1361 
1362 		err = ice_ptp_write_port_cmd_e822(hw, port, cmd);
1363 		if (err)
1364 			return err;
1365 	}
1366 
1367 	return 0;
1368 }
1369 
1370 /* E822 Vernier calibration functions
1371  *
1372  * The following functions are used as part of the vernier calibration of
1373  * a port. This calibration increases the precision of the timestamps on the
1374  * port.
1375  */
1376 
1377 /**
1378  * ice_phy_get_speed_and_fec_e822 - Get link speed and FEC based on serdes mode
1379  * @hw: pointer to HW struct
1380  * @port: the port to read from
1381  * @link_out: if non-NULL, holds link speed on success
1382  * @fec_out: if non-NULL, holds FEC algorithm on success
1383  *
1384  * Read the serdes data for the PHY port and extract the link speed and FEC
1385  * algorithm.
1386  */
1387 static int
ice_phy_get_speed_and_fec_e822(struct ice_hw * hw,u8 port,enum ice_ptp_link_spd * link_out,enum ice_ptp_fec_mode * fec_out)1388 ice_phy_get_speed_and_fec_e822(struct ice_hw *hw, u8 port,
1389 			       enum ice_ptp_link_spd *link_out,
1390 			       enum ice_ptp_fec_mode *fec_out)
1391 {
1392 	enum ice_ptp_link_spd link;
1393 	enum ice_ptp_fec_mode fec;
1394 	u32 serdes;
1395 	int err;
1396 
1397 	err = ice_read_phy_reg_e822(hw, port, P_REG_LINK_SPEED, &serdes);
1398 	if (err) {
1399 		ice_debug(hw, ICE_DBG_PTP, "Failed to read serdes info\n");
1400 		return err;
1401 	}
1402 
1403 	/* Determine the FEC algorithm */
1404 	fec = (enum ice_ptp_fec_mode)P_REG_LINK_SPEED_FEC_MODE(serdes);
1405 
1406 	serdes &= P_REG_LINK_SPEED_SERDES_M;
1407 
1408 	/* Determine the link speed */
1409 	if (fec == ICE_PTP_FEC_MODE_RS_FEC) {
1410 		switch (serdes) {
1411 		case ICE_PTP_SERDES_25G:
1412 			link = ICE_PTP_LNK_SPD_25G_RS;
1413 			break;
1414 		case ICE_PTP_SERDES_50G:
1415 			link = ICE_PTP_LNK_SPD_50G_RS;
1416 			break;
1417 		case ICE_PTP_SERDES_100G:
1418 			link = ICE_PTP_LNK_SPD_100G_RS;
1419 			break;
1420 		default:
1421 			return -EIO;
1422 		}
1423 	} else {
1424 		switch (serdes) {
1425 		case ICE_PTP_SERDES_1G:
1426 			link = ICE_PTP_LNK_SPD_1G;
1427 			break;
1428 		case ICE_PTP_SERDES_10G:
1429 			link = ICE_PTP_LNK_SPD_10G;
1430 			break;
1431 		case ICE_PTP_SERDES_25G:
1432 			link = ICE_PTP_LNK_SPD_25G;
1433 			break;
1434 		case ICE_PTP_SERDES_40G:
1435 			link = ICE_PTP_LNK_SPD_40G;
1436 			break;
1437 		case ICE_PTP_SERDES_50G:
1438 			link = ICE_PTP_LNK_SPD_50G;
1439 			break;
1440 		default:
1441 			return -EIO;
1442 		}
1443 	}
1444 
1445 	if (link_out)
1446 		*link_out = link;
1447 	if (fec_out)
1448 		*fec_out = fec;
1449 
1450 	return 0;
1451 }
1452 
1453 /**
1454  * ice_phy_cfg_lane_e822 - Configure PHY quad for single/multi-lane timestamp
1455  * @hw: pointer to HW struct
1456  * @port: to configure the quad for
1457  */
ice_phy_cfg_lane_e822(struct ice_hw * hw,u8 port)1458 static void ice_phy_cfg_lane_e822(struct ice_hw *hw, u8 port)
1459 {
1460 	enum ice_ptp_link_spd link_spd;
1461 	int err;
1462 	u32 val;
1463 	u8 quad;
1464 
1465 	err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, NULL);
1466 	if (err) {
1467 		ice_debug(hw, ICE_DBG_PTP, "Failed to get PHY link speed, err %d\n",
1468 			  err);
1469 		return;
1470 	}
1471 
1472 	quad = port / ICE_PORTS_PER_QUAD;
1473 
1474 	err = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG, &val);
1475 	if (err) {
1476 		ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEM_GLB_CFG, err %d\n",
1477 			  err);
1478 		return;
1479 	}
1480 
1481 	if (link_spd >= ICE_PTP_LNK_SPD_40G)
1482 		val &= ~Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_M;
1483 	else
1484 		val |= Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_M;
1485 
1486 	err = ice_write_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG, val);
1487 	if (err) {
1488 		ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_MEM_GBL_CFG, err %d\n",
1489 			  err);
1490 		return;
1491 	}
1492 }
1493 
1494 /**
1495  * ice_phy_cfg_uix_e822 - Configure Serdes UI to TU conversion for E822
1496  * @hw: pointer to the HW structure
1497  * @port: the port to configure
1498  *
1499  * Program the conversion ration of Serdes clock "unit intervals" (UIs) to PHC
1500  * hardware clock time units (TUs). That is, determine the number of TUs per
1501  * serdes unit interval, and program the UIX registers with this conversion.
1502  *
1503  * This conversion is used as part of the calibration process when determining
1504  * the additional error of a timestamp vs the real time of transmission or
1505  * receipt of the packet.
1506  *
1507  * Hardware uses the number of TUs per 66 UIs, written to the UIX registers
1508  * for the two main serdes clock rates, 10G/40G and 25G/100G serdes clocks.
1509  *
1510  * To calculate the conversion ratio, we use the following facts:
1511  *
1512  * a) the clock frequency in Hz (cycles per second)
1513  * b) the number of TUs per cycle (the increment value of the clock)
1514  * c) 1 second per 1 billion nanoseconds
1515  * d) the duration of 66 UIs in nanoseconds
1516  *
1517  * Given these facts, we can use the following table to work out what ratios
1518  * to multiply in order to get the number of TUs per 66 UIs:
1519  *
1520  * cycles |   1 second   | incval (TUs) | nanoseconds
1521  * -------+--------------+--------------+-------------
1522  * second | 1 billion ns |    cycle     |   66 UIs
1523  *
1524  * To perform the multiplication using integers without too much loss of
1525  * precision, we can take use the following equation:
1526  *
1527  * (freq * incval * 6600 LINE_UI ) / ( 100 * 1 billion)
1528  *
1529  * We scale up to using 6600 UI instead of 66 in order to avoid fractional
1530  * nanosecond UIs (66 UI at 10G/40G is 6.4 ns)
1531  *
1532  * The increment value has a maximum expected range of about 34 bits, while
1533  * the frequency value is about 29 bits. Multiplying these values shouldn't
1534  * overflow the 64 bits. However, we must then further multiply them again by
1535  * the Serdes unit interval duration. To avoid overflow here, we split the
1536  * overall divide by 1e11 into a divide by 256 (shift down by 8 bits) and
1537  * a divide by 390,625,000. This does lose some precision, but avoids
1538  * miscalculation due to arithmetic overflow.
1539  */
ice_phy_cfg_uix_e822(struct ice_hw * hw,u8 port)1540 static int ice_phy_cfg_uix_e822(struct ice_hw *hw, u8 port)
1541 {
1542 	u64 cur_freq, clk_incval, tu_per_sec, uix;
1543 	int err;
1544 
1545 	cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
1546 	clk_incval = ice_ptp_read_src_incval(hw);
1547 
1548 	/* Calculate TUs per second divided by 256 */
1549 	tu_per_sec = (cur_freq * clk_incval) >> 8;
1550 
1551 #define LINE_UI_10G_40G 640 /* 6600 UIs is 640 nanoseconds at 10Gb/40Gb */
1552 #define LINE_UI_25G_100G 256 /* 6600 UIs is 256 nanoseconds at 25Gb/100Gb */
1553 
1554 	/* Program the 10Gb/40Gb conversion ratio */
1555 	uix = div_u64(tu_per_sec * LINE_UI_10G_40G, 390625000);
1556 
1557 	err = ice_write_64b_phy_reg_e822(hw, port, P_REG_UIX66_10G_40G_L,
1558 					 uix);
1559 	if (err) {
1560 		ice_debug(hw, ICE_DBG_PTP, "Failed to write UIX66_10G_40G, err %d\n",
1561 			  err);
1562 		return err;
1563 	}
1564 
1565 	/* Program the 25Gb/100Gb conversion ratio */
1566 	uix = div_u64(tu_per_sec * LINE_UI_25G_100G, 390625000);
1567 
1568 	err = ice_write_64b_phy_reg_e822(hw, port, P_REG_UIX66_25G_100G_L,
1569 					 uix);
1570 	if (err) {
1571 		ice_debug(hw, ICE_DBG_PTP, "Failed to write UIX66_25G_100G, err %d\n",
1572 			  err);
1573 		return err;
1574 	}
1575 
1576 	return 0;
1577 }
1578 
1579 /**
1580  * ice_phy_cfg_parpcs_e822 - Configure TUs per PAR/PCS clock cycle
1581  * @hw: pointer to the HW struct
1582  * @port: port to configure
1583  *
1584  * Configure the number of TUs for the PAR and PCS clocks used as part of the
1585  * timestamp calibration process. This depends on the link speed, as the PHY
1586  * uses different markers depending on the speed.
1587  *
1588  * 1Gb/10Gb/25Gb:
1589  * - Tx/Rx PAR/PCS markers
1590  *
1591  * 25Gb RS:
1592  * - Tx/Rx Reed Solomon gearbox PAR/PCS markers
1593  *
1594  * 40Gb/50Gb:
1595  * - Tx/Rx PAR/PCS markers
1596  * - Rx Deskew PAR/PCS markers
1597  *
1598  * 50G RS and 100GB RS:
1599  * - Tx/Rx Reed Solomon gearbox PAR/PCS markers
1600  * - Rx Deskew PAR/PCS markers
1601  * - Tx PAR/PCS markers
1602  *
1603  * To calculate the conversion, we use the PHC clock frequency (cycles per
1604  * second), the increment value (TUs per cycle), and the related PHY clock
1605  * frequency to calculate the TUs per unit of the PHY link clock. The
1606  * following table shows how the units convert:
1607  *
1608  * cycles |  TUs  | second
1609  * -------+-------+--------
1610  * second | cycle | cycles
1611  *
1612  * For each conversion register, look up the appropriate frequency from the
1613  * e822 PAR/PCS table and calculate the TUs per unit of that clock. Program
1614  * this to the appropriate register, preparing hardware to perform timestamp
1615  * calibration to calculate the total Tx or Rx offset to adjust the timestamp
1616  * in order to calibrate for the internal PHY delays.
1617  *
1618  * Note that the increment value ranges up to ~34 bits, and the clock
1619  * frequency is ~29 bits, so multiplying them together should fit within the
1620  * 64 bit arithmetic.
1621  */
ice_phy_cfg_parpcs_e822(struct ice_hw * hw,u8 port)1622 static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port)
1623 {
1624 	u64 cur_freq, clk_incval, tu_per_sec, phy_tus;
1625 	enum ice_ptp_link_spd link_spd;
1626 	enum ice_ptp_fec_mode fec_mode;
1627 	int err;
1628 
1629 	err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode);
1630 	if (err)
1631 		return err;
1632 
1633 	cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
1634 	clk_incval = ice_ptp_read_src_incval(hw);
1635 
1636 	/* Calculate TUs per cycle of the PHC clock */
1637 	tu_per_sec = cur_freq * clk_incval;
1638 
1639 	/* For each PHY conversion register, look up the appropriate link
1640 	 * speed frequency and determine the TUs per that clock's cycle time.
1641 	 * Split this into a high and low value and then program the
1642 	 * appropriate register. If that link speed does not use the
1643 	 * associated register, write zeros to clear it instead.
1644 	 */
1645 
1646 	/* P_REG_PAR_TX_TUS */
1647 	if (e822_vernier[link_spd].tx_par_clk)
1648 		phy_tus = div_u64(tu_per_sec,
1649 				  e822_vernier[link_spd].tx_par_clk);
1650 	else
1651 		phy_tus = 0;
1652 
1653 	err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PAR_TX_TUS_L,
1654 					 phy_tus);
1655 	if (err)
1656 		return err;
1657 
1658 	/* P_REG_PAR_RX_TUS */
1659 	if (e822_vernier[link_spd].rx_par_clk)
1660 		phy_tus = div_u64(tu_per_sec,
1661 				  e822_vernier[link_spd].rx_par_clk);
1662 	else
1663 		phy_tus = 0;
1664 
1665 	err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PAR_RX_TUS_L,
1666 					 phy_tus);
1667 	if (err)
1668 		return err;
1669 
1670 	/* P_REG_PCS_TX_TUS */
1671 	if (e822_vernier[link_spd].tx_pcs_clk)
1672 		phy_tus = div_u64(tu_per_sec,
1673 				  e822_vernier[link_spd].tx_pcs_clk);
1674 	else
1675 		phy_tus = 0;
1676 
1677 	err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PCS_TX_TUS_L,
1678 					 phy_tus);
1679 	if (err)
1680 		return err;
1681 
1682 	/* P_REG_PCS_RX_TUS */
1683 	if (e822_vernier[link_spd].rx_pcs_clk)
1684 		phy_tus = div_u64(tu_per_sec,
1685 				  e822_vernier[link_spd].rx_pcs_clk);
1686 	else
1687 		phy_tus = 0;
1688 
1689 	err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PCS_RX_TUS_L,
1690 					 phy_tus);
1691 	if (err)
1692 		return err;
1693 
1694 	/* P_REG_DESK_PAR_TX_TUS */
1695 	if (e822_vernier[link_spd].tx_desk_rsgb_par)
1696 		phy_tus = div_u64(tu_per_sec,
1697 				  e822_vernier[link_spd].tx_desk_rsgb_par);
1698 	else
1699 		phy_tus = 0;
1700 
1701 	err = ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PAR_TX_TUS_L,
1702 					 phy_tus);
1703 	if (err)
1704 		return err;
1705 
1706 	/* P_REG_DESK_PAR_RX_TUS */
1707 	if (e822_vernier[link_spd].rx_desk_rsgb_par)
1708 		phy_tus = div_u64(tu_per_sec,
1709 				  e822_vernier[link_spd].rx_desk_rsgb_par);
1710 	else
1711 		phy_tus = 0;
1712 
1713 	err = ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PAR_RX_TUS_L,
1714 					 phy_tus);
1715 	if (err)
1716 		return err;
1717 
1718 	/* P_REG_DESK_PCS_TX_TUS */
1719 	if (e822_vernier[link_spd].tx_desk_rsgb_pcs)
1720 		phy_tus = div_u64(tu_per_sec,
1721 				  e822_vernier[link_spd].tx_desk_rsgb_pcs);
1722 	else
1723 		phy_tus = 0;
1724 
1725 	err = ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PCS_TX_TUS_L,
1726 					 phy_tus);
1727 	if (err)
1728 		return err;
1729 
1730 	/* P_REG_DESK_PCS_RX_TUS */
1731 	if (e822_vernier[link_spd].rx_desk_rsgb_pcs)
1732 		phy_tus = div_u64(tu_per_sec,
1733 				  e822_vernier[link_spd].rx_desk_rsgb_pcs);
1734 	else
1735 		phy_tus = 0;
1736 
1737 	return ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PCS_RX_TUS_L,
1738 					  phy_tus);
1739 }
1740 
1741 /**
1742  * ice_calc_fixed_tx_offset_e822 - Calculated Fixed Tx offset for a port
1743  * @hw: pointer to the HW struct
1744  * @link_spd: the Link speed to calculate for
1745  *
1746  * Calculate the fixed offset due to known static latency data.
1747  */
1748 static u64
ice_calc_fixed_tx_offset_e822(struct ice_hw * hw,enum ice_ptp_link_spd link_spd)1749 ice_calc_fixed_tx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd)
1750 {
1751 	u64 cur_freq, clk_incval, tu_per_sec, fixed_offset;
1752 
1753 	cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
1754 	clk_incval = ice_ptp_read_src_incval(hw);
1755 
1756 	/* Calculate TUs per second */
1757 	tu_per_sec = cur_freq * clk_incval;
1758 
1759 	/* Calculate number of TUs to add for the fixed Tx latency. Since the
1760 	 * latency measurement is in 1/100th of a nanosecond, we need to
1761 	 * multiply by tu_per_sec and then divide by 1e11. This calculation
1762 	 * overflows 64 bit integer arithmetic, so break it up into two
1763 	 * divisions by 1e4 first then by 1e7.
1764 	 */
1765 	fixed_offset = div_u64(tu_per_sec, 10000);
1766 	fixed_offset *= e822_vernier[link_spd].tx_fixed_delay;
1767 	fixed_offset = div_u64(fixed_offset, 10000000);
1768 
1769 	return fixed_offset;
1770 }
1771 
1772 /**
1773  * ice_phy_cfg_tx_offset_e822 - Configure total Tx timestamp offset
1774  * @hw: pointer to the HW struct
1775  * @port: the PHY port to configure
1776  *
1777  * Program the P_REG_TOTAL_TX_OFFSET register with the total number of TUs to
1778  * adjust Tx timestamps by. This is calculated by combining some known static
1779  * latency along with the Vernier offset computations done by hardware.
1780  *
1781  * This function will not return successfully until the Tx offset calculations
1782  * have been completed, which requires waiting until at least one packet has
1783  * been transmitted by the device. It is safe to call this function
1784  * periodically until calibration succeeds, as it will only program the offset
1785  * once.
1786  *
1787  * To avoid overflow, when calculating the offset based on the known static
1788  * latency values, we use measurements in 1/100th of a nanosecond, and divide
1789  * the TUs per second up front. This avoids overflow while allowing
1790  * calculation of the adjustment using integer arithmetic.
1791  *
1792  * Returns zero on success, -EBUSY if the hardware vernier offset
1793  * calibration has not completed, or another error code on failure.
1794  */
ice_phy_cfg_tx_offset_e822(struct ice_hw * hw,u8 port)1795 int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port)
1796 {
1797 	enum ice_ptp_link_spd link_spd;
1798 	enum ice_ptp_fec_mode fec_mode;
1799 	u64 total_offset, val;
1800 	int err;
1801 	u32 reg;
1802 
1803 	/* Nothing to do if we've already programmed the offset */
1804 	err = ice_read_phy_reg_e822(hw, port, P_REG_TX_OR, &reg);
1805 	if (err) {
1806 		ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_OR for port %u, err %d\n",
1807 			  port, err);
1808 		return err;
1809 	}
1810 
1811 	if (reg)
1812 		return 0;
1813 
1814 	err = ice_read_phy_reg_e822(hw, port, P_REG_TX_OV_STATUS, &reg);
1815 	if (err) {
1816 		ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_OV_STATUS for port %u, err %d\n",
1817 			  port, err);
1818 		return err;
1819 	}
1820 
1821 	if (!(reg & P_REG_TX_OV_STATUS_OV_M))
1822 		return -EBUSY;
1823 
1824 	err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode);
1825 	if (err)
1826 		return err;
1827 
1828 	total_offset = ice_calc_fixed_tx_offset_e822(hw, link_spd);
1829 
1830 	/* Read the first Vernier offset from the PHY register and add it to
1831 	 * the total offset.
1832 	 */
1833 	if (link_spd == ICE_PTP_LNK_SPD_1G ||
1834 	    link_spd == ICE_PTP_LNK_SPD_10G ||
1835 	    link_spd == ICE_PTP_LNK_SPD_25G ||
1836 	    link_spd == ICE_PTP_LNK_SPD_25G_RS ||
1837 	    link_spd == ICE_PTP_LNK_SPD_40G ||
1838 	    link_spd == ICE_PTP_LNK_SPD_50G) {
1839 		err = ice_read_64b_phy_reg_e822(hw, port,
1840 						P_REG_PAR_PCS_TX_OFFSET_L,
1841 						&val);
1842 		if (err)
1843 			return err;
1844 
1845 		total_offset += val;
1846 	}
1847 
1848 	/* For Tx, we only need to use the second Vernier offset for
1849 	 * multi-lane link speeds with RS-FEC. The lanes will always be
1850 	 * aligned.
1851 	 */
1852 	if (link_spd == ICE_PTP_LNK_SPD_50G_RS ||
1853 	    link_spd == ICE_PTP_LNK_SPD_100G_RS) {
1854 		err = ice_read_64b_phy_reg_e822(hw, port,
1855 						P_REG_PAR_TX_TIME_L,
1856 						&val);
1857 		if (err)
1858 			return err;
1859 
1860 		total_offset += val;
1861 	}
1862 
1863 	/* Now that the total offset has been calculated, program it to the
1864 	 * PHY and indicate that the Tx offset is ready. After this,
1865 	 * timestamps will be enabled.
1866 	 */
1867 	err = ice_write_64b_phy_reg_e822(hw, port, P_REG_TOTAL_TX_OFFSET_L,
1868 					 total_offset);
1869 	if (err)
1870 		return err;
1871 
1872 	err = ice_write_phy_reg_e822(hw, port, P_REG_TX_OR, 1);
1873 	if (err)
1874 		return err;
1875 
1876 	dev_info(ice_hw_to_dev(hw), "Port=%d Tx vernier offset calibration complete\n",
1877 		 port);
1878 
1879 	return 0;
1880 }
1881 
1882 /**
1883  * ice_phy_calc_pmd_adj_e822 - Calculate PMD adjustment for Rx
1884  * @hw: pointer to the HW struct
1885  * @port: the PHY port to adjust for
1886  * @link_spd: the current link speed of the PHY
1887  * @fec_mode: the current FEC mode of the PHY
1888  * @pmd_adj: on return, the amount to adjust the Rx total offset by
1889  *
1890  * Calculates the adjustment to Rx timestamps due to PMD alignment in the PHY.
1891  * This varies by link speed and FEC mode. The value calculated accounts for
1892  * various delays caused when receiving a packet.
1893  */
1894 static int
ice_phy_calc_pmd_adj_e822(struct ice_hw * hw,u8 port,enum ice_ptp_link_spd link_spd,enum ice_ptp_fec_mode fec_mode,u64 * pmd_adj)1895 ice_phy_calc_pmd_adj_e822(struct ice_hw *hw, u8 port,
1896 			  enum ice_ptp_link_spd link_spd,
1897 			  enum ice_ptp_fec_mode fec_mode, u64 *pmd_adj)
1898 {
1899 	u64 cur_freq, clk_incval, tu_per_sec, mult, adj;
1900 	u8 pmd_align;
1901 	u32 val;
1902 	int err;
1903 
1904 	err = ice_read_phy_reg_e822(hw, port, P_REG_PMD_ALIGNMENT, &val);
1905 	if (err) {
1906 		ice_debug(hw, ICE_DBG_PTP, "Failed to read PMD alignment, err %d\n",
1907 			  err);
1908 		return err;
1909 	}
1910 
1911 	pmd_align = (u8)val;
1912 
1913 	cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
1914 	clk_incval = ice_ptp_read_src_incval(hw);
1915 
1916 	/* Calculate TUs per second */
1917 	tu_per_sec = cur_freq * clk_incval;
1918 
1919 	/* The PMD alignment adjustment measurement depends on the link speed,
1920 	 * and whether FEC is enabled. For each link speed, the alignment
1921 	 * adjustment is calculated by dividing a value by the length of
1922 	 * a Time Unit in nanoseconds.
1923 	 *
1924 	 * 1G: align == 4 ? 10 * 0.8 : (align + 6 % 10) * 0.8
1925 	 * 10G: align == 65 ? 0 : (align * 0.1 * 32/33)
1926 	 * 10G w/FEC: align * 0.1 * 32/33
1927 	 * 25G: align == 65 ? 0 : (align * 0.4 * 32/33)
1928 	 * 25G w/FEC: align * 0.4 * 32/33
1929 	 * 40G: align == 65 ? 0 : (align * 0.1 * 32/33)
1930 	 * 40G w/FEC: align * 0.1 * 32/33
1931 	 * 50G: align == 65 ? 0 : (align * 0.4 * 32/33)
1932 	 * 50G w/FEC: align * 0.8 * 32/33
1933 	 *
1934 	 * For RS-FEC, if align is < 17 then we must also add 1.6 * 32/33.
1935 	 *
1936 	 * To allow for calculating this value using integer arithmetic, we
1937 	 * instead start with the number of TUs per second, (inverse of the
1938 	 * length of a Time Unit in nanoseconds), multiply by a value based
1939 	 * on the PMD alignment register, and then divide by the right value
1940 	 * calculated based on the table above. To avoid integer overflow this
1941 	 * division is broken up into a step of dividing by 125 first.
1942 	 */
1943 	if (link_spd == ICE_PTP_LNK_SPD_1G) {
1944 		if (pmd_align == 4)
1945 			mult = 10;
1946 		else
1947 			mult = (pmd_align + 6) % 10;
1948 	} else if (link_spd == ICE_PTP_LNK_SPD_10G ||
1949 		   link_spd == ICE_PTP_LNK_SPD_25G ||
1950 		   link_spd == ICE_PTP_LNK_SPD_40G ||
1951 		   link_spd == ICE_PTP_LNK_SPD_50G) {
1952 		/* If Clause 74 FEC, always calculate PMD adjust */
1953 		if (pmd_align != 65 || fec_mode == ICE_PTP_FEC_MODE_CLAUSE74)
1954 			mult = pmd_align;
1955 		else
1956 			mult = 0;
1957 	} else if (link_spd == ICE_PTP_LNK_SPD_25G_RS ||
1958 		   link_spd == ICE_PTP_LNK_SPD_50G_RS ||
1959 		   link_spd == ICE_PTP_LNK_SPD_100G_RS) {
1960 		if (pmd_align < 17)
1961 			mult = pmd_align + 40;
1962 		else
1963 			mult = pmd_align;
1964 	} else {
1965 		ice_debug(hw, ICE_DBG_PTP, "Unknown link speed %d, skipping PMD adjustment\n",
1966 			  link_spd);
1967 		mult = 0;
1968 	}
1969 
1970 	/* In some cases, there's no need to adjust for the PMD alignment */
1971 	if (!mult) {
1972 		*pmd_adj = 0;
1973 		return 0;
1974 	}
1975 
1976 	/* Calculate the adjustment by multiplying TUs per second by the
1977 	 * appropriate multiplier and divisor. To avoid overflow, we first
1978 	 * divide by 125, and then handle remaining divisor based on the link
1979 	 * speed pmd_adj_divisor value.
1980 	 */
1981 	adj = div_u64(tu_per_sec, 125);
1982 	adj *= mult;
1983 	adj = div_u64(adj, e822_vernier[link_spd].pmd_adj_divisor);
1984 
1985 	/* Finally, for 25G-RS and 50G-RS, a further adjustment for the Rx
1986 	 * cycle count is necessary.
1987 	 */
1988 	if (link_spd == ICE_PTP_LNK_SPD_25G_RS) {
1989 		u64 cycle_adj;
1990 		u8 rx_cycle;
1991 
1992 		err = ice_read_phy_reg_e822(hw, port, P_REG_RX_40_TO_160_CNT,
1993 					    &val);
1994 		if (err) {
1995 			ice_debug(hw, ICE_DBG_PTP, "Failed to read 25G-RS Rx cycle count, err %d\n",
1996 				  err);
1997 			return err;
1998 		}
1999 
2000 		rx_cycle = val & P_REG_RX_40_TO_160_CNT_RXCYC_M;
2001 		if (rx_cycle) {
2002 			mult = (4 - rx_cycle) * 40;
2003 
2004 			cycle_adj = div_u64(tu_per_sec, 125);
2005 			cycle_adj *= mult;
2006 			cycle_adj = div_u64(cycle_adj, e822_vernier[link_spd].pmd_adj_divisor);
2007 
2008 			adj += cycle_adj;
2009 		}
2010 	} else if (link_spd == ICE_PTP_LNK_SPD_50G_RS) {
2011 		u64 cycle_adj;
2012 		u8 rx_cycle;
2013 
2014 		err = ice_read_phy_reg_e822(hw, port, P_REG_RX_80_TO_160_CNT,
2015 					    &val);
2016 		if (err) {
2017 			ice_debug(hw, ICE_DBG_PTP, "Failed to read 50G-RS Rx cycle count, err %d\n",
2018 				  err);
2019 			return err;
2020 		}
2021 
2022 		rx_cycle = val & P_REG_RX_80_TO_160_CNT_RXCYC_M;
2023 		if (rx_cycle) {
2024 			mult = rx_cycle * 40;
2025 
2026 			cycle_adj = div_u64(tu_per_sec, 125);
2027 			cycle_adj *= mult;
2028 			cycle_adj = div_u64(cycle_adj, e822_vernier[link_spd].pmd_adj_divisor);
2029 
2030 			adj += cycle_adj;
2031 		}
2032 	}
2033 
2034 	/* Return the calculated adjustment */
2035 	*pmd_adj = adj;
2036 
2037 	return 0;
2038 }
2039 
2040 /**
2041  * ice_calc_fixed_rx_offset_e822 - Calculated the fixed Rx offset for a port
2042  * @hw: pointer to HW struct
2043  * @link_spd: The Link speed to calculate for
2044  *
2045  * Determine the fixed Rx latency for a given link speed.
2046  */
2047 static u64
ice_calc_fixed_rx_offset_e822(struct ice_hw * hw,enum ice_ptp_link_spd link_spd)2048 ice_calc_fixed_rx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd)
2049 {
2050 	u64 cur_freq, clk_incval, tu_per_sec, fixed_offset;
2051 
2052 	cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
2053 	clk_incval = ice_ptp_read_src_incval(hw);
2054 
2055 	/* Calculate TUs per second */
2056 	tu_per_sec = cur_freq * clk_incval;
2057 
2058 	/* Calculate number of TUs to add for the fixed Rx latency. Since the
2059 	 * latency measurement is in 1/100th of a nanosecond, we need to
2060 	 * multiply by tu_per_sec and then divide by 1e11. This calculation
2061 	 * overflows 64 bit integer arithmetic, so break it up into two
2062 	 * divisions by 1e4 first then by 1e7.
2063 	 */
2064 	fixed_offset = div_u64(tu_per_sec, 10000);
2065 	fixed_offset *= e822_vernier[link_spd].rx_fixed_delay;
2066 	fixed_offset = div_u64(fixed_offset, 10000000);
2067 
2068 	return fixed_offset;
2069 }
2070 
2071 /**
2072  * ice_phy_cfg_rx_offset_e822 - Configure total Rx timestamp offset
2073  * @hw: pointer to the HW struct
2074  * @port: the PHY port to configure
2075  *
2076  * Program the P_REG_TOTAL_RX_OFFSET register with the number of Time Units to
2077  * adjust Rx timestamps by. This combines calculations from the Vernier offset
2078  * measurements taken in hardware with some data about known fixed delay as
2079  * well as adjusting for multi-lane alignment delay.
2080  *
2081  * This function will not return successfully until the Rx offset calculations
2082  * have been completed, which requires waiting until at least one packet has
2083  * been received by the device. It is safe to call this function periodically
2084  * until calibration succeeds, as it will only program the offset once.
2085  *
2086  * This function must be called only after the offset registers are valid,
2087  * i.e. after the Vernier calibration wait has passed, to ensure that the PHY
2088  * has measured the offset.
2089  *
2090  * To avoid overflow, when calculating the offset based on the known static
2091  * latency values, we use measurements in 1/100th of a nanosecond, and divide
2092  * the TUs per second up front. This avoids overflow while allowing
2093  * calculation of the adjustment using integer arithmetic.
2094  *
2095  * Returns zero on success, -EBUSY if the hardware vernier offset
2096  * calibration has not completed, or another error code on failure.
2097  */
ice_phy_cfg_rx_offset_e822(struct ice_hw * hw,u8 port)2098 int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port)
2099 {
2100 	enum ice_ptp_link_spd link_spd;
2101 	enum ice_ptp_fec_mode fec_mode;
2102 	u64 total_offset, pmd, val;
2103 	int err;
2104 	u32 reg;
2105 
2106 	/* Nothing to do if we've already programmed the offset */
2107 	err = ice_read_phy_reg_e822(hw, port, P_REG_RX_OR, &reg);
2108 	if (err) {
2109 		ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_OR for port %u, err %d\n",
2110 			  port, err);
2111 		return err;
2112 	}
2113 
2114 	if (reg)
2115 		return 0;
2116 
2117 	err = ice_read_phy_reg_e822(hw, port, P_REG_RX_OV_STATUS, &reg);
2118 	if (err) {
2119 		ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_OV_STATUS for port %u, err %d\n",
2120 			  port, err);
2121 		return err;
2122 	}
2123 
2124 	if (!(reg & P_REG_RX_OV_STATUS_OV_M))
2125 		return -EBUSY;
2126 
2127 	err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode);
2128 	if (err)
2129 		return err;
2130 
2131 	total_offset = ice_calc_fixed_rx_offset_e822(hw, link_spd);
2132 
2133 	/* Read the first Vernier offset from the PHY register and add it to
2134 	 * the total offset.
2135 	 */
2136 	err = ice_read_64b_phy_reg_e822(hw, port,
2137 					P_REG_PAR_PCS_RX_OFFSET_L,
2138 					&val);
2139 	if (err)
2140 		return err;
2141 
2142 	total_offset += val;
2143 
2144 	/* For Rx, all multi-lane link speeds include a second Vernier
2145 	 * calibration, because the lanes might not be aligned.
2146 	 */
2147 	if (link_spd == ICE_PTP_LNK_SPD_40G ||
2148 	    link_spd == ICE_PTP_LNK_SPD_50G ||
2149 	    link_spd == ICE_PTP_LNK_SPD_50G_RS ||
2150 	    link_spd == ICE_PTP_LNK_SPD_100G_RS) {
2151 		err = ice_read_64b_phy_reg_e822(hw, port,
2152 						P_REG_PAR_RX_TIME_L,
2153 						&val);
2154 		if (err)
2155 			return err;
2156 
2157 		total_offset += val;
2158 	}
2159 
2160 	/* In addition, Rx must account for the PMD alignment */
2161 	err = ice_phy_calc_pmd_adj_e822(hw, port, link_spd, fec_mode, &pmd);
2162 	if (err)
2163 		return err;
2164 
2165 	/* For RS-FEC, this adjustment adds delay, but for other modes, it
2166 	 * subtracts delay.
2167 	 */
2168 	if (fec_mode == ICE_PTP_FEC_MODE_RS_FEC)
2169 		total_offset += pmd;
2170 	else
2171 		total_offset -= pmd;
2172 
2173 	/* Now that the total offset has been calculated, program it to the
2174 	 * PHY and indicate that the Rx offset is ready. After this,
2175 	 * timestamps will be enabled.
2176 	 */
2177 	err = ice_write_64b_phy_reg_e822(hw, port, P_REG_TOTAL_RX_OFFSET_L,
2178 					 total_offset);
2179 	if (err)
2180 		return err;
2181 
2182 	err = ice_write_phy_reg_e822(hw, port, P_REG_RX_OR, 1);
2183 	if (err)
2184 		return err;
2185 
2186 	dev_info(ice_hw_to_dev(hw), "Port=%d Rx vernier offset calibration complete\n",
2187 		 port);
2188 
2189 	return 0;
2190 }
2191 
2192 /**
2193  * ice_read_phy_and_phc_time_e822 - Simultaneously capture PHC and PHY time
2194  * @hw: pointer to the HW struct
2195  * @port: the PHY port to read
2196  * @phy_time: on return, the 64bit PHY timer value
2197  * @phc_time: on return, the lower 64bits of PHC time
2198  *
2199  * Issue a READ_TIME timer command to simultaneously capture the PHY and PHC
2200  * timer values.
2201  */
2202 static int
ice_read_phy_and_phc_time_e822(struct ice_hw * hw,u8 port,u64 * phy_time,u64 * phc_time)2203 ice_read_phy_and_phc_time_e822(struct ice_hw *hw, u8 port, u64 *phy_time,
2204 			       u64 *phc_time)
2205 {
2206 	u64 tx_time, rx_time;
2207 	u32 zo, lo;
2208 	u8 tmr_idx;
2209 	int err;
2210 
2211 	tmr_idx = ice_get_ptp_src_clock_index(hw);
2212 
2213 	/* Prepare the PHC timer for a READ_TIME capture command */
2214 	ice_ptp_src_cmd(hw, READ_TIME);
2215 
2216 	/* Prepare the PHY timer for a READ_TIME capture command */
2217 	err = ice_ptp_one_port_cmd(hw, port, READ_TIME);
2218 	if (err)
2219 		return err;
2220 
2221 	/* Issue the sync to start the READ_TIME capture */
2222 	ice_ptp_exec_tmr_cmd(hw);
2223 
2224 	/* Read the captured PHC time from the shadow time registers */
2225 	zo = rd32(hw, GLTSYN_SHTIME_0(tmr_idx));
2226 	lo = rd32(hw, GLTSYN_SHTIME_L(tmr_idx));
2227 	*phc_time = (u64)lo << 32 | zo;
2228 
2229 	/* Read the captured PHY time from the PHY shadow registers */
2230 	err = ice_ptp_read_port_capture(hw, port, &tx_time, &rx_time);
2231 	if (err)
2232 		return err;
2233 
2234 	/* If the PHY Tx and Rx timers don't match, log a warning message.
2235 	 * Note that this should not happen in normal circumstances since the
2236 	 * driver always programs them together.
2237 	 */
2238 	if (tx_time != rx_time)
2239 		dev_warn(ice_hw_to_dev(hw),
2240 			 "PHY port %u Tx and Rx timers do not match, tx_time 0x%016llX, rx_time 0x%016llX\n",
2241 			 port, (unsigned long long)tx_time,
2242 			 (unsigned long long)rx_time);
2243 
2244 	*phy_time = tx_time;
2245 
2246 	return 0;
2247 }
2248 
2249 /**
2250  * ice_sync_phy_timer_e822 - Synchronize the PHY timer with PHC timer
2251  * @hw: pointer to the HW struct
2252  * @port: the PHY port to synchronize
2253  *
2254  * Perform an adjustment to ensure that the PHY and PHC timers are in sync.
2255  * This is done by issuing a READ_TIME command which triggers a simultaneous
2256  * read of the PHY timer and PHC timer. Then we use the difference to
2257  * calculate an appropriate 2s complement addition to add to the PHY timer in
2258  * order to ensure it reads the same value as the primary PHC timer.
2259  */
ice_sync_phy_timer_e822(struct ice_hw * hw,u8 port)2260 static int ice_sync_phy_timer_e822(struct ice_hw *hw, u8 port)
2261 {
2262 	u64 phc_time, phy_time, difference;
2263 	int err;
2264 
2265 	if (!ice_ptp_lock(hw)) {
2266 		ice_debug(hw, ICE_DBG_PTP, "Failed to acquire PTP semaphore\n");
2267 		return -EBUSY;
2268 	}
2269 
2270 	err = ice_read_phy_and_phc_time_e822(hw, port, &phy_time, &phc_time);
2271 	if (err)
2272 		goto err_unlock;
2273 
2274 	/* Calculate the amount required to add to the port time in order for
2275 	 * it to match the PHC time.
2276 	 *
2277 	 * Note that the port adjustment is done using 2s complement
2278 	 * arithmetic. This is convenient since it means that we can simply
2279 	 * calculate the difference between the PHC time and the port time,
2280 	 * and it will be interpreted correctly.
2281 	 */
2282 	difference = phc_time - phy_time;
2283 
2284 	err = ice_ptp_prep_port_adj_e822(hw, port, (s64)difference);
2285 	if (err)
2286 		goto err_unlock;
2287 
2288 	err = ice_ptp_one_port_cmd(hw, port, ADJ_TIME);
2289 	if (err)
2290 		goto err_unlock;
2291 
2292 	/* Do not perform any action on the main timer */
2293 	ice_ptp_src_cmd(hw, ICE_PTP_NOP);
2294 
2295 	/* Issue the sync to activate the time adjustment */
2296 	ice_ptp_exec_tmr_cmd(hw);
2297 
2298 	/* Re-capture the timer values to flush the command registers and
2299 	 * verify that the time was properly adjusted.
2300 	 */
2301 	err = ice_read_phy_and_phc_time_e822(hw, port, &phy_time, &phc_time);
2302 	if (err)
2303 		goto err_unlock;
2304 
2305 	dev_info(ice_hw_to_dev(hw),
2306 		 "Port %u PHY time synced to PHC: 0x%016llX, 0x%016llX\n",
2307 		 port, (unsigned long long)phy_time,
2308 		 (unsigned long long)phc_time);
2309 
2310 	ice_ptp_unlock(hw);
2311 
2312 	return 0;
2313 
2314 err_unlock:
2315 	ice_ptp_unlock(hw);
2316 	return err;
2317 }
2318 
2319 /**
2320  * ice_stop_phy_timer_e822 - Stop the PHY clock timer
2321  * @hw: pointer to the HW struct
2322  * @port: the PHY port to stop
2323  * @soft_reset: if true, hold the SOFT_RESET bit of P_REG_PS
2324  *
2325  * Stop the clock of a PHY port. This must be done as part of the flow to
2326  * re-calibrate Tx and Rx timestamping offsets whenever the clock time is
2327  * initialized or when link speed changes.
2328  */
2329 int
ice_stop_phy_timer_e822(struct ice_hw * hw,u8 port,bool soft_reset)2330 ice_stop_phy_timer_e822(struct ice_hw *hw, u8 port, bool soft_reset)
2331 {
2332 	int err;
2333 	u32 val;
2334 
2335 	err = ice_write_phy_reg_e822(hw, port, P_REG_TX_OR, 0);
2336 	if (err)
2337 		return err;
2338 
2339 	err = ice_write_phy_reg_e822(hw, port, P_REG_RX_OR, 0);
2340 	if (err)
2341 		return err;
2342 
2343 	err = ice_read_phy_reg_e822(hw, port, P_REG_PS, &val);
2344 	if (err)
2345 		return err;
2346 
2347 	val &= ~P_REG_PS_START_M;
2348 	err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
2349 	if (err)
2350 		return err;
2351 
2352 	val &= ~P_REG_PS_ENA_CLK_M;
2353 	err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
2354 	if (err)
2355 		return err;
2356 
2357 	if (soft_reset) {
2358 		val |= P_REG_PS_SFT_RESET_M;
2359 		err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
2360 		if (err)
2361 			return err;
2362 	}
2363 
2364 	ice_debug(hw, ICE_DBG_PTP, "Disabled clock on PHY port %u\n", port);
2365 
2366 	return 0;
2367 }
2368 
2369 /**
2370  * ice_start_phy_timer_e822 - Start the PHY clock timer
2371  * @hw: pointer to the HW struct
2372  * @port: the PHY port to start
2373  *
2374  * Start the clock of a PHY port. This must be done as part of the flow to
2375  * re-calibrate Tx and Rx timestamping offsets whenever the clock time is
2376  * initialized or when link speed changes.
2377  *
2378  * Hardware will take Vernier measurements on Tx or Rx of packets.
2379  */
ice_start_phy_timer_e822(struct ice_hw * hw,u8 port)2380 int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port)
2381 {
2382 	u32 lo, hi, val;
2383 	u64 incval;
2384 	u8 tmr_idx;
2385 	int err;
2386 
2387 	tmr_idx = ice_get_ptp_src_clock_index(hw);
2388 
2389 	err = ice_stop_phy_timer_e822(hw, port, false);
2390 	if (err)
2391 		return err;
2392 
2393 	ice_phy_cfg_lane_e822(hw, port);
2394 
2395 	err = ice_phy_cfg_uix_e822(hw, port);
2396 	if (err)
2397 		return err;
2398 
2399 	err = ice_phy_cfg_parpcs_e822(hw, port);
2400 	if (err)
2401 		return err;
2402 
2403 	lo = rd32(hw, GLTSYN_INCVAL_L(tmr_idx));
2404 	hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx));
2405 	incval = (u64)hi << 32 | lo;
2406 
2407 	err = ice_write_40b_phy_reg_e822(hw, port, P_REG_TIMETUS_L, incval);
2408 	if (err)
2409 		return err;
2410 
2411 	err = ice_ptp_one_port_cmd(hw, port, INIT_INCVAL);
2412 	if (err)
2413 		return err;
2414 
2415 	/* Do not perform any action on the main timer */
2416 	ice_ptp_src_cmd(hw, ICE_PTP_NOP);
2417 
2418 	ice_ptp_exec_tmr_cmd(hw);
2419 
2420 	err = ice_read_phy_reg_e822(hw, port, P_REG_PS, &val);
2421 	if (err)
2422 		return err;
2423 
2424 	val |= P_REG_PS_SFT_RESET_M;
2425 	err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
2426 	if (err)
2427 		return err;
2428 
2429 	val |= P_REG_PS_START_M;
2430 	err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
2431 	if (err)
2432 		return err;
2433 
2434 	val &= ~P_REG_PS_SFT_RESET_M;
2435 	err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
2436 	if (err)
2437 		return err;
2438 
2439 	err = ice_ptp_one_port_cmd(hw, port, INIT_INCVAL);
2440 	if (err)
2441 		return err;
2442 
2443 	ice_ptp_exec_tmr_cmd(hw);
2444 
2445 	val |= P_REG_PS_ENA_CLK_M;
2446 	err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
2447 	if (err)
2448 		return err;
2449 
2450 	val |= P_REG_PS_LOAD_OFFSET_M;
2451 	err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
2452 	if (err)
2453 		return err;
2454 
2455 	ice_ptp_exec_tmr_cmd(hw);
2456 
2457 	err = ice_sync_phy_timer_e822(hw, port);
2458 	if (err)
2459 		return err;
2460 
2461 	ice_debug(hw, ICE_DBG_PTP, "Enabled clock on PHY port %u\n", port);
2462 
2463 	return 0;
2464 }
2465 
2466 /**
2467  * ice_get_phy_tx_tstamp_ready_e822 - Read Tx memory status register
2468  * @hw: pointer to the HW struct
2469  * @quad: the timestamp quad to read from
2470  * @tstamp_ready: contents of the Tx memory status register
2471  *
2472  * Read the Q_REG_TX_MEMORY_STATUS register indicating which timestamps in
2473  * the PHY are ready. A set bit means the corresponding timestamp is valid and
2474  * ready to be captured from the PHY timestamp block.
2475  */
2476 static int
ice_get_phy_tx_tstamp_ready_e822(struct ice_hw * hw,u8 quad,u64 * tstamp_ready)2477 ice_get_phy_tx_tstamp_ready_e822(struct ice_hw *hw, u8 quad, u64 *tstamp_ready)
2478 {
2479 	u32 hi, lo;
2480 	int err;
2481 
2482 	err = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEMORY_STATUS_U, &hi);
2483 	if (err) {
2484 		ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEMORY_STATUS_U for quad %u, err %d\n",
2485 			  quad, err);
2486 		return err;
2487 	}
2488 
2489 	err = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEMORY_STATUS_L, &lo);
2490 	if (err) {
2491 		ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEMORY_STATUS_L for quad %u, err %d\n",
2492 			  quad, err);
2493 		return err;
2494 	}
2495 
2496 	*tstamp_ready = (u64)hi << 32 | (u64)lo;
2497 
2498 	return 0;
2499 }
2500 
2501 /* E810 functions
2502  *
2503  * The following functions operate on the E810 series devices which use
2504  * a separate external PHY.
2505  */
2506 
2507 /**
2508  * ice_read_phy_reg_e810 - Read register from external PHY on E810
2509  * @hw: pointer to the HW struct
2510  * @addr: the address to read from
2511  * @val: On return, the value read from the PHY
2512  *
2513  * Read a register from the external PHY on the E810 device.
2514  */
ice_read_phy_reg_e810(struct ice_hw * hw,u32 addr,u32 * val)2515 static int ice_read_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 *val)
2516 {
2517 	struct ice_sbq_msg_input msg = {0};
2518 	int err;
2519 
2520 	msg.msg_addr_low = lower_16_bits(addr);
2521 	msg.msg_addr_high = upper_16_bits(addr);
2522 	msg.opcode = ice_sbq_msg_rd;
2523 	msg.dest_dev = rmn_0;
2524 
2525 	err = ice_sbq_rw_reg(hw, &msg);
2526 	if (err) {
2527 		ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
2528 			  err);
2529 		return err;
2530 	}
2531 
2532 	*val = msg.data;
2533 
2534 	return 0;
2535 }
2536 
2537 /**
2538  * ice_write_phy_reg_e810 - Write register on external PHY on E810
2539  * @hw: pointer to the HW struct
2540  * @addr: the address to writem to
2541  * @val: the value to write to the PHY
2542  *
2543  * Write a value to a register of the external PHY on the E810 device.
2544  */
ice_write_phy_reg_e810(struct ice_hw * hw,u32 addr,u32 val)2545 static int ice_write_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 val)
2546 {
2547 	struct ice_sbq_msg_input msg = {0};
2548 	int err;
2549 
2550 	msg.msg_addr_low = lower_16_bits(addr);
2551 	msg.msg_addr_high = upper_16_bits(addr);
2552 	msg.opcode = ice_sbq_msg_wr;
2553 	msg.dest_dev = rmn_0;
2554 	msg.data = val;
2555 
2556 	err = ice_sbq_rw_reg(hw, &msg);
2557 	if (err) {
2558 		ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
2559 			  err);
2560 		return err;
2561 	}
2562 
2563 	return 0;
2564 }
2565 
2566 /**
2567  * ice_read_phy_tstamp_ll_e810 - Read a PHY timestamp registers through the FW
2568  * @hw: pointer to the HW struct
2569  * @idx: the timestamp index to read
2570  * @hi: 8 bit timestamp high value
2571  * @lo: 32 bit timestamp low value
2572  *
2573  * Read a 8bit timestamp high value and 32 bit timestamp low value out of the
2574  * timestamp block of the external PHY on the E810 device using the low latency
2575  * timestamp read.
2576  */
2577 static int
ice_read_phy_tstamp_ll_e810(struct ice_hw * hw,u8 idx,u8 * hi,u32 * lo)2578 ice_read_phy_tstamp_ll_e810(struct ice_hw *hw, u8 idx, u8 *hi, u32 *lo)
2579 {
2580 	u32 val;
2581 	u8 i;
2582 
2583 	/* Write TS index to read to the PF register so the FW can read it */
2584 	val = FIELD_PREP(TS_LL_READ_TS_IDX, idx) | TS_LL_READ_TS;
2585 	wr32(hw, PF_SB_ATQBAL, val);
2586 
2587 	/* Read the register repeatedly until the FW provides us the TS */
2588 	for (i = TS_LL_READ_RETRIES; i > 0; i--) {
2589 		val = rd32(hw, PF_SB_ATQBAL);
2590 
2591 		/* When the bit is cleared, the TS is ready in the register */
2592 		if (!(FIELD_GET(TS_LL_READ_TS, val))) {
2593 			/* High 8 bit value of the TS is on the bits 16:23 */
2594 			*hi = FIELD_GET(TS_LL_READ_TS_HIGH, val);
2595 
2596 			/* Read the low 32 bit value and set the TS valid bit */
2597 			*lo = rd32(hw, PF_SB_ATQBAH) | TS_VALID;
2598 			return 0;
2599 		}
2600 
2601 		udelay(10);
2602 	}
2603 
2604 	/* FW failed to provide the TS in time */
2605 	ice_debug(hw, ICE_DBG_PTP, "Failed to read PTP timestamp using low latency read\n");
2606 	return -EINVAL;
2607 }
2608 
2609 /**
2610  * ice_read_phy_tstamp_sbq_e810 - Read a PHY timestamp registers through the sbq
2611  * @hw: pointer to the HW struct
2612  * @lport: the lport to read from
2613  * @idx: the timestamp index to read
2614  * @hi: 8 bit timestamp high value
2615  * @lo: 32 bit timestamp low value
2616  *
2617  * Read a 8bit timestamp high value and 32 bit timestamp low value out of the
2618  * timestamp block of the external PHY on the E810 device using sideband queue.
2619  */
2620 static int
ice_read_phy_tstamp_sbq_e810(struct ice_hw * hw,u8 lport,u8 idx,u8 * hi,u32 * lo)2621 ice_read_phy_tstamp_sbq_e810(struct ice_hw *hw, u8 lport, u8 idx, u8 *hi,
2622 			     u32 *lo)
2623 {
2624 	u32 hi_addr = TS_EXT(HIGH_TX_MEMORY_BANK_START, lport, idx);
2625 	u32 lo_addr = TS_EXT(LOW_TX_MEMORY_BANK_START, lport, idx);
2626 	u32 lo_val, hi_val;
2627 	int err;
2628 
2629 	err = ice_read_phy_reg_e810(hw, lo_addr, &lo_val);
2630 	if (err) {
2631 		ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, err %d\n",
2632 			  err);
2633 		return err;
2634 	}
2635 
2636 	err = ice_read_phy_reg_e810(hw, hi_addr, &hi_val);
2637 	if (err) {
2638 		ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, err %d\n",
2639 			  err);
2640 		return err;
2641 	}
2642 
2643 	*lo = lo_val;
2644 	*hi = (u8)hi_val;
2645 
2646 	return 0;
2647 }
2648 
2649 /**
2650  * ice_read_phy_tstamp_e810 - Read a PHY timestamp out of the external PHY
2651  * @hw: pointer to the HW struct
2652  * @lport: the lport to read from
2653  * @idx: the timestamp index to read
2654  * @tstamp: on return, the 40bit timestamp value
2655  *
2656  * Read a 40bit timestamp value out of the timestamp block of the external PHY
2657  * on the E810 device.
2658  */
2659 static int
ice_read_phy_tstamp_e810(struct ice_hw * hw,u8 lport,u8 idx,u64 * tstamp)2660 ice_read_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx, u64 *tstamp)
2661 {
2662 	u32 lo = 0;
2663 	u8 hi = 0;
2664 	int err;
2665 
2666 	if (hw->dev_caps.ts_dev_info.ts_ll_read)
2667 		err = ice_read_phy_tstamp_ll_e810(hw, idx, &hi, &lo);
2668 	else
2669 		err = ice_read_phy_tstamp_sbq_e810(hw, lport, idx, &hi, &lo);
2670 
2671 	if (err)
2672 		return err;
2673 
2674 	/* For E810 devices, the timestamp is reported with the lower 32 bits
2675 	 * in the low register, and the upper 8 bits in the high register.
2676 	 */
2677 	*tstamp = ((u64)hi) << TS_HIGH_S | ((u64)lo & TS_LOW_M);
2678 
2679 	return 0;
2680 }
2681 
2682 /**
2683  * ice_clear_phy_tstamp_e810 - Clear a timestamp from the external PHY
2684  * @hw: pointer to the HW struct
2685  * @lport: the lport to read from
2686  * @idx: the timestamp index to reset
2687  *
2688  * Clear a timestamp, resetting its valid bit, from the timestamp block of the
2689  * external PHY on the E810 device.
2690  */
ice_clear_phy_tstamp_e810(struct ice_hw * hw,u8 lport,u8 idx)2691 static int ice_clear_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx)
2692 {
2693 	u32 lo_addr, hi_addr;
2694 	int err;
2695 
2696 	lo_addr = TS_EXT(LOW_TX_MEMORY_BANK_START, lport, idx);
2697 	hi_addr = TS_EXT(HIGH_TX_MEMORY_BANK_START, lport, idx);
2698 
2699 	err = ice_write_phy_reg_e810(hw, lo_addr, 0);
2700 	if (err) {
2701 		ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register, err %d\n",
2702 			  err);
2703 		return err;
2704 	}
2705 
2706 	err = ice_write_phy_reg_e810(hw, hi_addr, 0);
2707 	if (err) {
2708 		ice_debug(hw, ICE_DBG_PTP, "Failed to clear high PTP timestamp register, err %d\n",
2709 			  err);
2710 		return err;
2711 	}
2712 
2713 	return 0;
2714 }
2715 
2716 /**
2717  * ice_ptp_init_phy_e810 - Enable PTP function on the external PHY
2718  * @hw: pointer to HW struct
2719  *
2720  * Enable the timesync PTP functionality for the external PHY connected to
2721  * this function.
2722  */
ice_ptp_init_phy_e810(struct ice_hw * hw)2723 int ice_ptp_init_phy_e810(struct ice_hw *hw)
2724 {
2725 	u8 tmr_idx;
2726 	int err;
2727 
2728 	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
2729 	err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_ENA(tmr_idx),
2730 				     GLTSYN_ENA_TSYN_ENA_M);
2731 	if (err)
2732 		ice_debug(hw, ICE_DBG_PTP, "PTP failed in ena_phy_time_syn %d\n",
2733 			  err);
2734 
2735 	return err;
2736 }
2737 
2738 /**
2739  * ice_ptp_init_phc_e810 - Perform E810 specific PHC initialization
2740  * @hw: pointer to HW struct
2741  *
2742  * Perform E810-specific PTP hardware clock initialization steps.
2743  */
ice_ptp_init_phc_e810(struct ice_hw * hw)2744 static int ice_ptp_init_phc_e810(struct ice_hw *hw)
2745 {
2746 	/* Ensure synchronization delay is zero */
2747 	wr32(hw, GLTSYN_SYNC_DLAY, 0);
2748 
2749 	/* Initialize the PHY */
2750 	return ice_ptp_init_phy_e810(hw);
2751 }
2752 
2753 /**
2754  * ice_ptp_prep_phy_time_e810 - Prepare PHY port with initial time
2755  * @hw: Board private structure
2756  * @time: Time to initialize the PHY port clock to
2757  *
2758  * Program the PHY port ETH_GLTSYN_SHTIME registers in preparation setting the
2759  * initial clock time. The time will not actually be programmed until the
2760  * driver issues an INIT_TIME command.
2761  *
2762  * The time value is the upper 32 bits of the PHY timer, usually in units of
2763  * nominal nanoseconds.
2764  */
ice_ptp_prep_phy_time_e810(struct ice_hw * hw,u32 time)2765 static int ice_ptp_prep_phy_time_e810(struct ice_hw *hw, u32 time)
2766 {
2767 	u8 tmr_idx;
2768 	int err;
2769 
2770 	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
2771 	err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHTIME_0(tmr_idx), 0);
2772 	if (err) {
2773 		ice_debug(hw, ICE_DBG_PTP, "Failed to write SHTIME_0, err %d\n",
2774 			  err);
2775 		return err;
2776 	}
2777 
2778 	err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHTIME_L(tmr_idx), time);
2779 	if (err) {
2780 		ice_debug(hw, ICE_DBG_PTP, "Failed to write SHTIME_L, err %d\n",
2781 			  err);
2782 		return err;
2783 	}
2784 
2785 	return 0;
2786 }
2787 
2788 /**
2789  * ice_ptp_prep_phy_adj_e810 - Prep PHY port for a time adjustment
2790  * @hw: pointer to HW struct
2791  * @adj: adjustment value to program
2792  *
2793  * Prepare the PHY port for an atomic adjustment by programming the PHY
2794  * ETH_GLTSYN_SHADJ_L and ETH_GLTSYN_SHADJ_H registers. The actual adjustment
2795  * is completed by issuing an ADJ_TIME sync command.
2796  *
2797  * The adjustment value only contains the portion used for the upper 32bits of
2798  * the PHY timer, usually in units of nominal nanoseconds. Negative
2799  * adjustments are supported using 2s complement arithmetic.
2800  */
ice_ptp_prep_phy_adj_e810(struct ice_hw * hw,s32 adj)2801 static int ice_ptp_prep_phy_adj_e810(struct ice_hw *hw, s32 adj)
2802 {
2803 	u8 tmr_idx;
2804 	int err;
2805 
2806 	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
2807 
2808 	/* Adjustments are represented as signed 2's complement values in
2809 	 * nanoseconds. Sub-nanosecond adjustment is not supported.
2810 	 */
2811 	err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_L(tmr_idx), 0);
2812 	if (err) {
2813 		ice_debug(hw, ICE_DBG_PTP, "Failed to write adj to PHY SHADJ_L, err %d\n",
2814 			  err);
2815 		return err;
2816 	}
2817 
2818 	err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_H(tmr_idx), adj);
2819 	if (err) {
2820 		ice_debug(hw, ICE_DBG_PTP, "Failed to write adj to PHY SHADJ_H, err %d\n",
2821 			  err);
2822 		return err;
2823 	}
2824 
2825 	return 0;
2826 }
2827 
2828 /**
2829  * ice_ptp_prep_phy_incval_e810 - Prep PHY port increment value change
2830  * @hw: pointer to HW struct
2831  * @incval: The new 40bit increment value to prepare
2832  *
2833  * Prepare the PHY port for a new increment value by programming the PHY
2834  * ETH_GLTSYN_SHADJ_L and ETH_GLTSYN_SHADJ_H registers. The actual change is
2835  * completed by issuing an INIT_INCVAL command.
2836  */
ice_ptp_prep_phy_incval_e810(struct ice_hw * hw,u64 incval)2837 static int ice_ptp_prep_phy_incval_e810(struct ice_hw *hw, u64 incval)
2838 {
2839 	u32 high, low;
2840 	u8 tmr_idx;
2841 	int err;
2842 
2843 	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
2844 	low = lower_32_bits(incval);
2845 	high = upper_32_bits(incval);
2846 
2847 	err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_L(tmr_idx), low);
2848 	if (err) {
2849 		ice_debug(hw, ICE_DBG_PTP, "Failed to write incval to PHY SHADJ_L, err %d\n",
2850 			  err);
2851 		return err;
2852 	}
2853 
2854 	err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_H(tmr_idx), high);
2855 	if (err) {
2856 		ice_debug(hw, ICE_DBG_PTP, "Failed to write incval PHY SHADJ_H, err %d\n",
2857 			  err);
2858 		return err;
2859 	}
2860 
2861 	return 0;
2862 }
2863 
2864 /**
2865  * ice_ptp_port_cmd_e810 - Prepare all external PHYs for a timer command
2866  * @hw: pointer to HW struct
2867  * @cmd: Command to be sent to the port
2868  *
2869  * Prepare the external PHYs connected to this device for a timer sync
2870  * command.
2871  */
ice_ptp_port_cmd_e810(struct ice_hw * hw,enum ice_ptp_tmr_cmd cmd)2872 static int ice_ptp_port_cmd_e810(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
2873 {
2874 	u32 cmd_val, val;
2875 	int err;
2876 
2877 	switch (cmd) {
2878 	case INIT_TIME:
2879 		cmd_val = GLTSYN_CMD_INIT_TIME;
2880 		break;
2881 	case INIT_INCVAL:
2882 		cmd_val = GLTSYN_CMD_INIT_INCVAL;
2883 		break;
2884 	case ADJ_TIME:
2885 		cmd_val = GLTSYN_CMD_ADJ_TIME;
2886 		break;
2887 	case READ_TIME:
2888 		cmd_val = GLTSYN_CMD_READ_TIME;
2889 		break;
2890 	case ADJ_TIME_AT_TIME:
2891 		cmd_val = GLTSYN_CMD_ADJ_INIT_TIME;
2892 		break;
2893 	case ICE_PTP_NOP:
2894 		return 0;
2895 	}
2896 
2897 	/* Read, modify, write */
2898 	err = ice_read_phy_reg_e810(hw, ETH_GLTSYN_CMD, &val);
2899 	if (err) {
2900 		ice_debug(hw, ICE_DBG_PTP, "Failed to read GLTSYN_CMD, err %d\n", err);
2901 		return err;
2902 	}
2903 
2904 	/* Modify necessary bits only and perform write */
2905 	val &= ~TS_CMD_MASK_E810;
2906 	val |= cmd_val;
2907 
2908 	err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_CMD, val);
2909 	if (err) {
2910 		ice_debug(hw, ICE_DBG_PTP, "Failed to write back GLTSYN_CMD, err %d\n", err);
2911 		return err;
2912 	}
2913 
2914 	return 0;
2915 }
2916 
2917 /**
2918  * ice_get_phy_tx_tstamp_ready_e810 - Read Tx memory status register
2919  * @hw: pointer to the HW struct
2920  * @port: the PHY port to read
2921  * @tstamp_ready: contents of the Tx memory status register
2922  *
2923  * E810 devices do not use a Tx memory status register. Instead simply
2924  * indicate that all timestamps are currently ready.
2925  */
2926 static int
ice_get_phy_tx_tstamp_ready_e810(struct ice_hw * hw,u8 port,u64 * tstamp_ready)2927 ice_get_phy_tx_tstamp_ready_e810(struct ice_hw *hw, u8 port, u64 *tstamp_ready)
2928 {
2929 	*tstamp_ready = 0xFFFFFFFFFFFFFFFF;
2930 	return 0;
2931 }
2932 
2933 /* E810T SMA functions
2934  *
2935  * The following functions operate specifically on E810T hardware and are used
2936  * to access the extended GPIOs available.
2937  */
2938 
2939 /**
2940  * ice_get_pca9575_handle
2941  * @hw: pointer to the hw struct
2942  * @pca9575_handle: GPIO controller's handle
2943  *
2944  * Find and return the GPIO controller's handle in the netlist.
2945  * When found - the value will be cached in the hw structure and following calls
2946  * will return cached value
2947  */
2948 static int
ice_get_pca9575_handle(struct ice_hw * hw,u16 * pca9575_handle)2949 ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle)
2950 {
2951 	struct ice_aqc_get_link_topo *cmd;
2952 	struct ice_aq_desc desc;
2953 	int status;
2954 	u8 idx;
2955 
2956 	/* If handle was read previously return cached value */
2957 	if (hw->io_expander_handle) {
2958 		*pca9575_handle = hw->io_expander_handle;
2959 		return 0;
2960 	}
2961 
2962 	/* If handle was not detected read it from the netlist */
2963 	cmd = &desc.params.get_link_topo;
2964 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
2965 
2966 	/* Set node type to GPIO controller */
2967 	cmd->addr.topo_params.node_type_ctx =
2968 		(ICE_AQC_LINK_TOPO_NODE_TYPE_M &
2969 		 ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL);
2970 
2971 #define SW_PCA9575_SFP_TOPO_IDX		2
2972 #define SW_PCA9575_QSFP_TOPO_IDX	1
2973 
2974 	/* Check if the SW IO expander controlling SMA exists in the netlist. */
2975 	if (hw->device_id == ICE_DEV_ID_E810C_SFP)
2976 		idx = SW_PCA9575_SFP_TOPO_IDX;
2977 	else if (hw->device_id == ICE_DEV_ID_E810C_QSFP)
2978 		idx = SW_PCA9575_QSFP_TOPO_IDX;
2979 	else
2980 		return -EOPNOTSUPP;
2981 
2982 	cmd->addr.topo_params.index = idx;
2983 
2984 	status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2985 	if (status)
2986 		return -EOPNOTSUPP;
2987 
2988 	/* Verify if we found the right IO expander type */
2989 	if (desc.params.get_link_topo.node_part_num !=
2990 		ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575)
2991 		return -EOPNOTSUPP;
2992 
2993 	/* If present save the handle and return it */
2994 	hw->io_expander_handle =
2995 		le16_to_cpu(desc.params.get_link_topo.addr.handle);
2996 	*pca9575_handle = hw->io_expander_handle;
2997 
2998 	return 0;
2999 }
3000 
3001 /**
3002  * ice_read_sma_ctrl_e810t
3003  * @hw: pointer to the hw struct
3004  * @data: pointer to data to be read from the GPIO controller
3005  *
3006  * Read the SMA controller state. It is connected to pins 3-7 of Port 1 of the
3007  * PCA9575 expander, so only bits 3-7 in data are valid.
3008  */
ice_read_sma_ctrl_e810t(struct ice_hw * hw,u8 * data)3009 int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data)
3010 {
3011 	int status;
3012 	u16 handle;
3013 	u8 i;
3014 
3015 	status = ice_get_pca9575_handle(hw, &handle);
3016 	if (status)
3017 		return status;
3018 
3019 	*data = 0;
3020 
3021 	for (i = ICE_SMA_MIN_BIT_E810T; i <= ICE_SMA_MAX_BIT_E810T; i++) {
3022 		bool pin;
3023 
3024 		status = ice_aq_get_gpio(hw, handle, i + ICE_PCA9575_P1_OFFSET,
3025 					 &pin, NULL);
3026 		if (status)
3027 			break;
3028 		*data |= (u8)(!pin) << i;
3029 	}
3030 
3031 	return status;
3032 }
3033 
3034 /**
3035  * ice_write_sma_ctrl_e810t
3036  * @hw: pointer to the hw struct
3037  * @data: data to be written to the GPIO controller
3038  *
3039  * Write the data to the SMA controller. It is connected to pins 3-7 of Port 1
3040  * of the PCA9575 expander, so only bits 3-7 in data are valid.
3041  */
ice_write_sma_ctrl_e810t(struct ice_hw * hw,u8 data)3042 int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data)
3043 {
3044 	int status;
3045 	u16 handle;
3046 	u8 i;
3047 
3048 	status = ice_get_pca9575_handle(hw, &handle);
3049 	if (status)
3050 		return status;
3051 
3052 	for (i = ICE_SMA_MIN_BIT_E810T; i <= ICE_SMA_MAX_BIT_E810T; i++) {
3053 		bool pin;
3054 
3055 		pin = !(data & (1 << i));
3056 		status = ice_aq_set_gpio(hw, handle, i + ICE_PCA9575_P1_OFFSET,
3057 					 pin, NULL);
3058 		if (status)
3059 			break;
3060 	}
3061 
3062 	return status;
3063 }
3064 
3065 /**
3066  * ice_read_pca9575_reg_e810t
3067  * @hw: pointer to the hw struct
3068  * @offset: GPIO controller register offset
3069  * @data: pointer to data to be read from the GPIO controller
3070  *
3071  * Read the register from the GPIO controller
3072  */
ice_read_pca9575_reg_e810t(struct ice_hw * hw,u8 offset,u8 * data)3073 int ice_read_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 *data)
3074 {
3075 	struct ice_aqc_link_topo_addr link_topo;
3076 	__le16 addr;
3077 	u16 handle;
3078 	int err;
3079 
3080 	memset(&link_topo, 0, sizeof(link_topo));
3081 
3082 	err = ice_get_pca9575_handle(hw, &handle);
3083 	if (err)
3084 		return err;
3085 
3086 	link_topo.handle = cpu_to_le16(handle);
3087 	link_topo.topo_params.node_type_ctx =
3088 		FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M,
3089 			   ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED);
3090 
3091 	addr = cpu_to_le16((u16)offset);
3092 
3093 	return ice_aq_read_i2c(hw, link_topo, 0, addr, 1, data, NULL);
3094 }
3095 
3096 /* Device agnostic functions
3097  *
3098  * The following functions implement shared behavior common to both E822 and
3099  * E810 devices, possibly calling a device specific implementation where
3100  * necessary.
3101  */
3102 
3103 /**
3104  * ice_ptp_lock - Acquire PTP global semaphore register lock
3105  * @hw: pointer to the HW struct
3106  *
3107  * Acquire the global PTP hardware semaphore lock. Returns true if the lock
3108  * was acquired, false otherwise.
3109  *
3110  * The PFTSYN_SEM register sets the busy bit on read, returning the previous
3111  * value. If software sees the busy bit cleared, this means that this function
3112  * acquired the lock (and the busy bit is now set). If software sees the busy
3113  * bit set, it means that another function acquired the lock.
3114  *
3115  * Software must clear the busy bit with a write to release the lock for other
3116  * functions when done.
3117  */
ice_ptp_lock(struct ice_hw * hw)3118 bool ice_ptp_lock(struct ice_hw *hw)
3119 {
3120 	u32 hw_lock;
3121 	int i;
3122 
3123 #define MAX_TRIES 15
3124 
3125 	for (i = 0; i < MAX_TRIES; i++) {
3126 		hw_lock = rd32(hw, PFTSYN_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
3127 		hw_lock = hw_lock & PFTSYN_SEM_BUSY_M;
3128 		if (hw_lock) {
3129 			/* Somebody is holding the lock */
3130 			usleep_range(5000, 6000);
3131 			continue;
3132 		}
3133 
3134 		break;
3135 	}
3136 
3137 	return !hw_lock;
3138 }
3139 
3140 /**
3141  * ice_ptp_unlock - Release PTP global semaphore register lock
3142  * @hw: pointer to the HW struct
3143  *
3144  * Release the global PTP hardware semaphore lock. This is done by writing to
3145  * the PFTSYN_SEM register.
3146  */
ice_ptp_unlock(struct ice_hw * hw)3147 void ice_ptp_unlock(struct ice_hw *hw)
3148 {
3149 	wr32(hw, PFTSYN_SEM + (PFTSYN_SEM_BYTES * hw->pf_id), 0);
3150 }
3151 
3152 /**
3153  * ice_ptp_tmr_cmd - Prepare and trigger a timer sync command
3154  * @hw: pointer to HW struct
3155  * @cmd: the command to issue
3156  *
3157  * Prepare the source timer and PHY timers and then trigger the requested
3158  * command. This causes the shadow registers previously written in preparation
3159  * for the command to be synchronously applied to both the source and PHY
3160  * timers.
3161  */
ice_ptp_tmr_cmd(struct ice_hw * hw,enum ice_ptp_tmr_cmd cmd)3162 static int ice_ptp_tmr_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
3163 {
3164 	int err;
3165 
3166 	/* First, prepare the source timer */
3167 	ice_ptp_src_cmd(hw, cmd);
3168 
3169 	/* Next, prepare the ports */
3170 	if (ice_is_e810(hw))
3171 		err = ice_ptp_port_cmd_e810(hw, cmd);
3172 	else
3173 		err = ice_ptp_port_cmd_e822(hw, cmd);
3174 	if (err) {
3175 		ice_debug(hw, ICE_DBG_PTP, "Failed to prepare PHY ports for timer command %u, err %d\n",
3176 			  cmd, err);
3177 		return err;
3178 	}
3179 
3180 	/* Write the sync command register to drive both source and PHY timer
3181 	 * commands synchronously
3182 	 */
3183 	ice_ptp_exec_tmr_cmd(hw);
3184 
3185 	return 0;
3186 }
3187 
3188 /**
3189  * ice_ptp_init_time - Initialize device time to provided value
3190  * @hw: pointer to HW struct
3191  * @time: 64bits of time (GLTSYN_TIME_L and GLTSYN_TIME_H)
3192  *
3193  * Initialize the device to the specified time provided. This requires a three
3194  * step process:
3195  *
3196  * 1) write the new init time to the source timer shadow registers
3197  * 2) write the new init time to the PHY timer shadow registers
3198  * 3) issue an init_time timer command to synchronously switch both the source
3199  *    and port timers to the new init time value at the next clock cycle.
3200  */
ice_ptp_init_time(struct ice_hw * hw,u64 time)3201 int ice_ptp_init_time(struct ice_hw *hw, u64 time)
3202 {
3203 	u8 tmr_idx;
3204 	int err;
3205 
3206 	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
3207 
3208 	/* Source timers */
3209 	wr32(hw, GLTSYN_SHTIME_L(tmr_idx), lower_32_bits(time));
3210 	wr32(hw, GLTSYN_SHTIME_H(tmr_idx), upper_32_bits(time));
3211 	wr32(hw, GLTSYN_SHTIME_0(tmr_idx), 0);
3212 
3213 	/* PHY timers */
3214 	/* Fill Rx and Tx ports and send msg to PHY */
3215 	if (ice_is_e810(hw))
3216 		err = ice_ptp_prep_phy_time_e810(hw, time & 0xFFFFFFFF);
3217 	else
3218 		err = ice_ptp_prep_phy_time_e822(hw, time & 0xFFFFFFFF);
3219 	if (err)
3220 		return err;
3221 
3222 	return ice_ptp_tmr_cmd(hw, INIT_TIME);
3223 }
3224 
3225 /**
3226  * ice_ptp_write_incval - Program PHC with new increment value
3227  * @hw: pointer to HW struct
3228  * @incval: Source timer increment value per clock cycle
3229  *
3230  * Program the PHC with a new increment value. This requires a three-step
3231  * process:
3232  *
3233  * 1) Write the increment value to the source timer shadow registers
3234  * 2) Write the increment value to the PHY timer shadow registers
3235  * 3) Issue an INIT_INCVAL timer command to synchronously switch both the
3236  *    source and port timers to the new increment value at the next clock
3237  *    cycle.
3238  */
ice_ptp_write_incval(struct ice_hw * hw,u64 incval)3239 int ice_ptp_write_incval(struct ice_hw *hw, u64 incval)
3240 {
3241 	u8 tmr_idx;
3242 	int err;
3243 
3244 	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
3245 
3246 	/* Shadow Adjust */
3247 	wr32(hw, GLTSYN_SHADJ_L(tmr_idx), lower_32_bits(incval));
3248 	wr32(hw, GLTSYN_SHADJ_H(tmr_idx), upper_32_bits(incval));
3249 
3250 	if (ice_is_e810(hw))
3251 		err = ice_ptp_prep_phy_incval_e810(hw, incval);
3252 	else
3253 		err = ice_ptp_prep_phy_incval_e822(hw, incval);
3254 	if (err)
3255 		return err;
3256 
3257 	return ice_ptp_tmr_cmd(hw, INIT_INCVAL);
3258 }
3259 
3260 /**
3261  * ice_ptp_write_incval_locked - Program new incval while holding semaphore
3262  * @hw: pointer to HW struct
3263  * @incval: Source timer increment value per clock cycle
3264  *
3265  * Program a new PHC incval while holding the PTP semaphore.
3266  */
ice_ptp_write_incval_locked(struct ice_hw * hw,u64 incval)3267 int ice_ptp_write_incval_locked(struct ice_hw *hw, u64 incval)
3268 {
3269 	int err;
3270 
3271 	if (!ice_ptp_lock(hw))
3272 		return -EBUSY;
3273 
3274 	err = ice_ptp_write_incval(hw, incval);
3275 
3276 	ice_ptp_unlock(hw);
3277 
3278 	return err;
3279 }
3280 
3281 /**
3282  * ice_ptp_adj_clock - Adjust PHC clock time atomically
3283  * @hw: pointer to HW struct
3284  * @adj: Adjustment in nanoseconds
3285  *
3286  * Perform an atomic adjustment of the PHC time by the specified number of
3287  * nanoseconds. This requires a three-step process:
3288  *
3289  * 1) Write the adjustment to the source timer shadow registers
3290  * 2) Write the adjustment to the PHY timer shadow registers
3291  * 3) Issue an ADJ_TIME timer command to synchronously apply the adjustment to
3292  *    both the source and port timers at the next clock cycle.
3293  */
ice_ptp_adj_clock(struct ice_hw * hw,s32 adj)3294 int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj)
3295 {
3296 	u8 tmr_idx;
3297 	int err;
3298 
3299 	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
3300 
3301 	/* Write the desired clock adjustment into the GLTSYN_SHADJ register.
3302 	 * For an ADJ_TIME command, this set of registers represents the value
3303 	 * to add to the clock time. It supports subtraction by interpreting
3304 	 * the value as a 2's complement integer.
3305 	 */
3306 	wr32(hw, GLTSYN_SHADJ_L(tmr_idx), 0);
3307 	wr32(hw, GLTSYN_SHADJ_H(tmr_idx), adj);
3308 
3309 	if (ice_is_e810(hw))
3310 		err = ice_ptp_prep_phy_adj_e810(hw, adj);
3311 	else
3312 		err = ice_ptp_prep_phy_adj_e822(hw, adj);
3313 	if (err)
3314 		return err;
3315 
3316 	return ice_ptp_tmr_cmd(hw, ADJ_TIME);
3317 }
3318 
3319 /**
3320  * ice_read_phy_tstamp - Read a PHY timestamp from the timestamo block
3321  * @hw: pointer to the HW struct
3322  * @block: the block to read from
3323  * @idx: the timestamp index to read
3324  * @tstamp: on return, the 40bit timestamp value
3325  *
3326  * Read a 40bit timestamp value out of the timestamp block. For E822 devices,
3327  * the block is the quad to read from. For E810 devices, the block is the
3328  * logical port to read from.
3329  */
ice_read_phy_tstamp(struct ice_hw * hw,u8 block,u8 idx,u64 * tstamp)3330 int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp)
3331 {
3332 	if (ice_is_e810(hw))
3333 		return ice_read_phy_tstamp_e810(hw, block, idx, tstamp);
3334 	else
3335 		return ice_read_phy_tstamp_e822(hw, block, idx, tstamp);
3336 }
3337 
3338 /**
3339  * ice_clear_phy_tstamp - Clear a timestamp from the timestamp block
3340  * @hw: pointer to the HW struct
3341  * @block: the block to read from
3342  * @idx: the timestamp index to reset
3343  *
3344  * Clear a timestamp, resetting its valid bit, from the timestamp block. For
3345  * E822 devices, the block is the quad to clear from. For E810 devices, the
3346  * block is the logical port to clear from.
3347  */
ice_clear_phy_tstamp(struct ice_hw * hw,u8 block,u8 idx)3348 int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx)
3349 {
3350 	if (ice_is_e810(hw))
3351 		return ice_clear_phy_tstamp_e810(hw, block, idx);
3352 	else
3353 		return ice_clear_phy_tstamp_e822(hw, block, idx);
3354 }
3355 
3356 /**
3357  * ice_ptp_reset_ts_memory - Reset timestamp memory for all blocks
3358  * @hw: pointer to the HW struct
3359  */
ice_ptp_reset_ts_memory(struct ice_hw * hw)3360 void ice_ptp_reset_ts_memory(struct ice_hw *hw)
3361 {
3362 	if (ice_is_e810(hw))
3363 		return;
3364 
3365 	ice_ptp_reset_ts_memory_e822(hw);
3366 }
3367 
3368 /**
3369  * ice_ptp_init_phc - Initialize PTP hardware clock
3370  * @hw: pointer to the HW struct
3371  *
3372  * Perform the steps required to initialize the PTP hardware clock.
3373  */
ice_ptp_init_phc(struct ice_hw * hw)3374 int ice_ptp_init_phc(struct ice_hw *hw)
3375 {
3376 	u8 src_idx = hw->func_caps.ts_func_info.tmr_index_owned;
3377 
3378 	/* Enable source clocks */
3379 	wr32(hw, GLTSYN_ENA(src_idx), GLTSYN_ENA_TSYN_ENA_M);
3380 
3381 	/* Clear event err indications for auxiliary pins */
3382 	(void)rd32(hw, GLTSYN_STAT(src_idx));
3383 
3384 	if (ice_is_e810(hw))
3385 		return ice_ptp_init_phc_e810(hw);
3386 	else
3387 		return ice_ptp_init_phc_e822(hw);
3388 }
3389 
3390 /**
3391  * ice_get_phy_tx_tstamp_ready - Read PHY Tx memory status indication
3392  * @hw: pointer to the HW struct
3393  * @block: the timestamp block to check
3394  * @tstamp_ready: storage for the PHY Tx memory status information
3395  *
3396  * Check the PHY for Tx timestamp memory status. This reports a 64 bit value
3397  * which indicates which timestamps in the block may be captured. A set bit
3398  * means the timestamp can be read. An unset bit means the timestamp is not
3399  * ready and software should avoid reading the register.
3400  */
ice_get_phy_tx_tstamp_ready(struct ice_hw * hw,u8 block,u64 * tstamp_ready)3401 int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready)
3402 {
3403 	if (ice_is_e810(hw))
3404 		return ice_get_phy_tx_tstamp_ready_e810(hw, block,
3405 							tstamp_ready);
3406 	else
3407 		return ice_get_phy_tx_tstamp_ready_e822(hw, block,
3408 							tstamp_ready);
3409 }
3410