1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2021, Intel Corporation. */
3 
4 #include "ice.h"
5 #include "ice_lib.h"
6 
7 #define E810_OUT_PROP_DELAY_NS 1
8 
9 static const struct ptp_pin_desc ice_pin_desc_e810t[] = {
10 	/* name    idx   func         chan */
11 	{ "GNSS",  GNSS, PTP_PF_EXTTS, 0, { 0, } },
12 	{ "SMA1",  SMA1, PTP_PF_NONE, 1, { 0, } },
13 	{ "U.FL1", UFL1, PTP_PF_NONE, 1, { 0, } },
14 	{ "SMA2",  SMA2, PTP_PF_NONE, 2, { 0, } },
15 	{ "U.FL2", UFL2, PTP_PF_NONE, 2, { 0, } },
16 };
17 
18 /**
19  * ice_get_sma_config_e810t
20  * @hw: pointer to the hw struct
21  * @ptp_pins: pointer to the ptp_pin_desc struture
22  *
23  * Read the configuration of the SMA control logic and put it into the
24  * ptp_pin_desc structure
25  */
26 static int
27 ice_get_sma_config_e810t(struct ice_hw *hw, struct ptp_pin_desc *ptp_pins)
28 {
29 	u8 data, i;
30 	int status;
31 
32 	/* Read initial pin state */
33 	status = ice_read_sma_ctrl_e810t(hw, &data);
34 	if (status)
35 		return status;
36 
37 	/* initialize with defaults */
38 	for (i = 0; i < NUM_PTP_PINS_E810T; i++) {
39 		snprintf(ptp_pins[i].name, sizeof(ptp_pins[i].name),
40 			 "%s", ice_pin_desc_e810t[i].name);
41 		ptp_pins[i].index = ice_pin_desc_e810t[i].index;
42 		ptp_pins[i].func = ice_pin_desc_e810t[i].func;
43 		ptp_pins[i].chan = ice_pin_desc_e810t[i].chan;
44 	}
45 
46 	/* Parse SMA1/UFL1 */
47 	switch (data & ICE_SMA1_MASK_E810T) {
48 	case ICE_SMA1_MASK_E810T:
49 	default:
50 		ptp_pins[SMA1].func = PTP_PF_NONE;
51 		ptp_pins[UFL1].func = PTP_PF_NONE;
52 		break;
53 	case ICE_SMA1_DIR_EN_E810T:
54 		ptp_pins[SMA1].func = PTP_PF_PEROUT;
55 		ptp_pins[UFL1].func = PTP_PF_NONE;
56 		break;
57 	case ICE_SMA1_TX_EN_E810T:
58 		ptp_pins[SMA1].func = PTP_PF_EXTTS;
59 		ptp_pins[UFL1].func = PTP_PF_NONE;
60 		break;
61 	case 0:
62 		ptp_pins[SMA1].func = PTP_PF_EXTTS;
63 		ptp_pins[UFL1].func = PTP_PF_PEROUT;
64 		break;
65 	}
66 
67 	/* Parse SMA2/UFL2 */
68 	switch (data & ICE_SMA2_MASK_E810T) {
69 	case ICE_SMA2_MASK_E810T:
70 	default:
71 		ptp_pins[SMA2].func = PTP_PF_NONE;
72 		ptp_pins[UFL2].func = PTP_PF_NONE;
73 		break;
74 	case (ICE_SMA2_TX_EN_E810T | ICE_SMA2_UFL2_RX_DIS_E810T):
75 		ptp_pins[SMA2].func = PTP_PF_EXTTS;
76 		ptp_pins[UFL2].func = PTP_PF_NONE;
77 		break;
78 	case (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_UFL2_RX_DIS_E810T):
79 		ptp_pins[SMA2].func = PTP_PF_PEROUT;
80 		ptp_pins[UFL2].func = PTP_PF_NONE;
81 		break;
82 	case (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_TX_EN_E810T):
83 		ptp_pins[SMA2].func = PTP_PF_NONE;
84 		ptp_pins[UFL2].func = PTP_PF_EXTTS;
85 		break;
86 	case ICE_SMA2_DIR_EN_E810T:
87 		ptp_pins[SMA2].func = PTP_PF_PEROUT;
88 		ptp_pins[UFL2].func = PTP_PF_EXTTS;
89 		break;
90 	}
91 
92 	return 0;
93 }
94 
95 /**
96  * ice_ptp_set_sma_config_e810t
97  * @hw: pointer to the hw struct
98  * @ptp_pins: pointer to the ptp_pin_desc struture
99  *
100  * Set the configuration of the SMA control logic based on the configuration in
101  * num_pins parameter
102  */
103 static int
104 ice_ptp_set_sma_config_e810t(struct ice_hw *hw,
105 			     const struct ptp_pin_desc *ptp_pins)
106 {
107 	int status;
108 	u8 data;
109 
110 	/* SMA1 and UFL1 cannot be set to TX at the same time */
111 	if (ptp_pins[SMA1].func == PTP_PF_PEROUT &&
112 	    ptp_pins[UFL1].func == PTP_PF_PEROUT)
113 		return -EINVAL;
114 
115 	/* SMA2 and UFL2 cannot be set to RX at the same time */
116 	if (ptp_pins[SMA2].func == PTP_PF_EXTTS &&
117 	    ptp_pins[UFL2].func == PTP_PF_EXTTS)
118 		return -EINVAL;
119 
120 	/* Read initial pin state value */
121 	status = ice_read_sma_ctrl_e810t(hw, &data);
122 	if (status)
123 		return status;
124 
125 	/* Set the right sate based on the desired configuration */
126 	data &= ~ICE_SMA1_MASK_E810T;
127 	if (ptp_pins[SMA1].func == PTP_PF_NONE &&
128 	    ptp_pins[UFL1].func == PTP_PF_NONE) {
129 		dev_info(ice_hw_to_dev(hw), "SMA1 + U.FL1 disabled");
130 		data |= ICE_SMA1_MASK_E810T;
131 	} else if (ptp_pins[SMA1].func == PTP_PF_EXTTS &&
132 		   ptp_pins[UFL1].func == PTP_PF_NONE) {
133 		dev_info(ice_hw_to_dev(hw), "SMA1 RX");
134 		data |= ICE_SMA1_TX_EN_E810T;
135 	} else if (ptp_pins[SMA1].func == PTP_PF_NONE &&
136 		   ptp_pins[UFL1].func == PTP_PF_PEROUT) {
137 		/* U.FL 1 TX will always enable SMA 1 RX */
138 		dev_info(ice_hw_to_dev(hw), "SMA1 RX + U.FL1 TX");
139 	} else if (ptp_pins[SMA1].func == PTP_PF_EXTTS &&
140 		   ptp_pins[UFL1].func == PTP_PF_PEROUT) {
141 		dev_info(ice_hw_to_dev(hw), "SMA1 RX + U.FL1 TX");
142 	} else if (ptp_pins[SMA1].func == PTP_PF_PEROUT &&
143 		   ptp_pins[UFL1].func == PTP_PF_NONE) {
144 		dev_info(ice_hw_to_dev(hw), "SMA1 TX");
145 		data |= ICE_SMA1_DIR_EN_E810T;
146 	}
147 
148 	data &= ~ICE_SMA2_MASK_E810T;
149 	if (ptp_pins[SMA2].func == PTP_PF_NONE &&
150 	    ptp_pins[UFL2].func == PTP_PF_NONE) {
151 		dev_info(ice_hw_to_dev(hw), "SMA2 + U.FL2 disabled");
152 		data |= ICE_SMA2_MASK_E810T;
153 	} else if (ptp_pins[SMA2].func == PTP_PF_EXTTS &&
154 			ptp_pins[UFL2].func == PTP_PF_NONE) {
155 		dev_info(ice_hw_to_dev(hw), "SMA2 RX");
156 		data |= (ICE_SMA2_TX_EN_E810T |
157 			 ICE_SMA2_UFL2_RX_DIS_E810T);
158 	} else if (ptp_pins[SMA2].func == PTP_PF_NONE &&
159 		   ptp_pins[UFL2].func == PTP_PF_EXTTS) {
160 		dev_info(ice_hw_to_dev(hw), "UFL2 RX");
161 		data |= (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_TX_EN_E810T);
162 	} else if (ptp_pins[SMA2].func == PTP_PF_PEROUT &&
163 		   ptp_pins[UFL2].func == PTP_PF_NONE) {
164 		dev_info(ice_hw_to_dev(hw), "SMA2 TX");
165 		data |= (ICE_SMA2_DIR_EN_E810T |
166 			 ICE_SMA2_UFL2_RX_DIS_E810T);
167 	} else if (ptp_pins[SMA2].func == PTP_PF_PEROUT &&
168 		   ptp_pins[UFL2].func == PTP_PF_EXTTS) {
169 		dev_info(ice_hw_to_dev(hw), "SMA2 TX + U.FL2 RX");
170 		data |= ICE_SMA2_DIR_EN_E810T;
171 	}
172 
173 	return ice_write_sma_ctrl_e810t(hw, data);
174 }
175 
176 /**
177  * ice_ptp_set_sma_e810t
178  * @info: the driver's PTP info structure
179  * @pin: pin index in kernel structure
180  * @func: Pin function to be set (PTP_PF_NONE, PTP_PF_EXTTS or PTP_PF_PEROUT)
181  *
182  * Set the configuration of a single SMA pin
183  */
184 static int
185 ice_ptp_set_sma_e810t(struct ptp_clock_info *info, unsigned int pin,
186 		      enum ptp_pin_function func)
187 {
188 	struct ptp_pin_desc ptp_pins[NUM_PTP_PINS_E810T];
189 	struct ice_pf *pf = ptp_info_to_pf(info);
190 	struct ice_hw *hw = &pf->hw;
191 	int err;
192 
193 	if (pin < SMA1 || func > PTP_PF_PEROUT)
194 		return -EOPNOTSUPP;
195 
196 	err = ice_get_sma_config_e810t(hw, ptp_pins);
197 	if (err)
198 		return err;
199 
200 	/* Disable the same function on the other pin sharing the channel */
201 	if (pin == SMA1 && ptp_pins[UFL1].func == func)
202 		ptp_pins[UFL1].func = PTP_PF_NONE;
203 	if (pin == UFL1 && ptp_pins[SMA1].func == func)
204 		ptp_pins[SMA1].func = PTP_PF_NONE;
205 
206 	if (pin == SMA2 && ptp_pins[UFL2].func == func)
207 		ptp_pins[UFL2].func = PTP_PF_NONE;
208 	if (pin == UFL2 && ptp_pins[SMA2].func == func)
209 		ptp_pins[SMA2].func = PTP_PF_NONE;
210 
211 	/* Set up new pin function in the temp table */
212 	ptp_pins[pin].func = func;
213 
214 	return ice_ptp_set_sma_config_e810t(hw, ptp_pins);
215 }
216 
217 /**
218  * ice_verify_pin_e810t
219  * @info: the driver's PTP info structure
220  * @pin: Pin index
221  * @func: Assigned function
222  * @chan: Assigned channel
223  *
224  * Verify if pin supports requested pin function. If the Check pins consistency.
225  * Reconfigure the SMA logic attached to the given pin to enable its
226  * desired functionality
227  */
228 static int
229 ice_verify_pin_e810t(struct ptp_clock_info *info, unsigned int pin,
230 		     enum ptp_pin_function func, unsigned int chan)
231 {
232 	/* Don't allow channel reassignment */
233 	if (chan != ice_pin_desc_e810t[pin].chan)
234 		return -EOPNOTSUPP;
235 
236 	/* Check if functions are properly assigned */
237 	switch (func) {
238 	case PTP_PF_NONE:
239 		break;
240 	case PTP_PF_EXTTS:
241 		if (pin == UFL1)
242 			return -EOPNOTSUPP;
243 		break;
244 	case PTP_PF_PEROUT:
245 		if (pin == UFL2 || pin == GNSS)
246 			return -EOPNOTSUPP;
247 		break;
248 	case PTP_PF_PHYSYNC:
249 		return -EOPNOTSUPP;
250 	}
251 
252 	return ice_ptp_set_sma_e810t(info, pin, func);
253 }
254 
255 /**
256  * ice_set_tx_tstamp - Enable or disable Tx timestamping
257  * @pf: The PF pointer to search in
258  * @on: bool value for whether timestamps are enabled or disabled
259  */
260 static void ice_set_tx_tstamp(struct ice_pf *pf, bool on)
261 {
262 	struct ice_vsi *vsi;
263 	u32 val;
264 	u16 i;
265 
266 	vsi = ice_get_main_vsi(pf);
267 	if (!vsi)
268 		return;
269 
270 	/* Set the timestamp enable flag for all the Tx rings */
271 	ice_for_each_txq(vsi, i) {
272 		if (!vsi->tx_rings[i])
273 			continue;
274 		vsi->tx_rings[i]->ptp_tx = on;
275 	}
276 
277 	/* Configure the Tx timestamp interrupt */
278 	val = rd32(&pf->hw, PFINT_OICR_ENA);
279 	if (on)
280 		val |= PFINT_OICR_TSYN_TX_M;
281 	else
282 		val &= ~PFINT_OICR_TSYN_TX_M;
283 	wr32(&pf->hw, PFINT_OICR_ENA, val);
284 }
285 
286 /**
287  * ice_set_rx_tstamp - Enable or disable Rx timestamping
288  * @pf: The PF pointer to search in
289  * @on: bool value for whether timestamps are enabled or disabled
290  */
291 static void ice_set_rx_tstamp(struct ice_pf *pf, bool on)
292 {
293 	struct ice_vsi *vsi;
294 	u16 i;
295 
296 	vsi = ice_get_main_vsi(pf);
297 	if (!vsi)
298 		return;
299 
300 	/* Set the timestamp flag for all the Rx rings */
301 	ice_for_each_rxq(vsi, i) {
302 		if (!vsi->rx_rings[i])
303 			continue;
304 		vsi->rx_rings[i]->ptp_rx = on;
305 	}
306 }
307 
308 /**
309  * ice_ptp_cfg_timestamp - Configure timestamp for init/deinit
310  * @pf: Board private structure
311  * @ena: bool value to enable or disable time stamp
312  *
313  * This function will configure timestamping during PTP initialization
314  * and deinitialization
315  */
316 static void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena)
317 {
318 	ice_set_tx_tstamp(pf, ena);
319 	ice_set_rx_tstamp(pf, ena);
320 
321 	if (ena) {
322 		pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_ALL;
323 		pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_ON;
324 	} else {
325 		pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
326 		pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_OFF;
327 	}
328 }
329 
330 /**
331  * ice_get_ptp_clock_index - Get the PTP clock index
332  * @pf: the PF pointer
333  *
334  * Determine the clock index of the PTP clock associated with this device. If
335  * this is the PF controlling the clock, just use the local access to the
336  * clock device pointer.
337  *
338  * Otherwise, read from the driver shared parameters to determine the clock
339  * index value.
340  *
341  * Returns: the index of the PTP clock associated with this device, or -1 if
342  * there is no associated clock.
343  */
344 int ice_get_ptp_clock_index(struct ice_pf *pf)
345 {
346 	struct device *dev = ice_pf_to_dev(pf);
347 	enum ice_aqc_driver_params param_idx;
348 	struct ice_hw *hw = &pf->hw;
349 	u8 tmr_idx;
350 	u32 value;
351 	int err;
352 
353 	/* Use the ptp_clock structure if we're the main PF */
354 	if (pf->ptp.clock)
355 		return ptp_clock_index(pf->ptp.clock);
356 
357 	tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
358 	if (!tmr_idx)
359 		param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR0;
360 	else
361 		param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR1;
362 
363 	err = ice_aq_get_driver_param(hw, param_idx, &value, NULL);
364 	if (err) {
365 		dev_err(dev, "Failed to read PTP clock index parameter, err %d aq_err %s\n",
366 			err, ice_aq_str(hw->adminq.sq_last_status));
367 		return -1;
368 	}
369 
370 	/* The PTP clock index is an integer, and will be between 0 and
371 	 * INT_MAX. The highest bit of the driver shared parameter is used to
372 	 * indicate whether or not the currently stored clock index is valid.
373 	 */
374 	if (!(value & PTP_SHARED_CLK_IDX_VALID))
375 		return -1;
376 
377 	return value & ~PTP_SHARED_CLK_IDX_VALID;
378 }
379 
380 /**
381  * ice_set_ptp_clock_index - Set the PTP clock index
382  * @pf: the PF pointer
383  *
384  * Set the PTP clock index for this device into the shared driver parameters,
385  * so that other PFs associated with this device can read it.
386  *
387  * If the PF is unable to store the clock index, it will log an error, but
388  * will continue operating PTP.
389  */
390 static void ice_set_ptp_clock_index(struct ice_pf *pf)
391 {
392 	struct device *dev = ice_pf_to_dev(pf);
393 	enum ice_aqc_driver_params param_idx;
394 	struct ice_hw *hw = &pf->hw;
395 	u8 tmr_idx;
396 	u32 value;
397 	int err;
398 
399 	if (!pf->ptp.clock)
400 		return;
401 
402 	tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
403 	if (!tmr_idx)
404 		param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR0;
405 	else
406 		param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR1;
407 
408 	value = (u32)ptp_clock_index(pf->ptp.clock);
409 	if (value > INT_MAX) {
410 		dev_err(dev, "PTP Clock index is too large to store\n");
411 		return;
412 	}
413 	value |= PTP_SHARED_CLK_IDX_VALID;
414 
415 	err = ice_aq_set_driver_param(hw, param_idx, value, NULL);
416 	if (err) {
417 		dev_err(dev, "Failed to set PTP clock index parameter, err %d aq_err %s\n",
418 			err, ice_aq_str(hw->adminq.sq_last_status));
419 	}
420 }
421 
422 /**
423  * ice_clear_ptp_clock_index - Clear the PTP clock index
424  * @pf: the PF pointer
425  *
426  * Clear the PTP clock index for this device. Must be called when
427  * unregistering the PTP clock, in order to ensure other PFs stop reporting
428  * a clock object that no longer exists.
429  */
430 static void ice_clear_ptp_clock_index(struct ice_pf *pf)
431 {
432 	struct device *dev = ice_pf_to_dev(pf);
433 	enum ice_aqc_driver_params param_idx;
434 	struct ice_hw *hw = &pf->hw;
435 	u8 tmr_idx;
436 	int err;
437 
438 	/* Do not clear the index if we don't own the timer */
439 	if (!hw->func_caps.ts_func_info.src_tmr_owned)
440 		return;
441 
442 	tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
443 	if (!tmr_idx)
444 		param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR0;
445 	else
446 		param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR1;
447 
448 	err = ice_aq_set_driver_param(hw, param_idx, 0, NULL);
449 	if (err) {
450 		dev_dbg(dev, "Failed to clear PTP clock index parameter, err %d aq_err %s\n",
451 			err, ice_aq_str(hw->adminq.sq_last_status));
452 	}
453 }
454 
455 /**
456  * ice_ptp_read_src_clk_reg - Read the source clock register
457  * @pf: Board private structure
458  * @sts: Optional parameter for holding a pair of system timestamps from
459  *       the system clock. Will be ignored if NULL is given.
460  */
461 static u64
462 ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts)
463 {
464 	struct ice_hw *hw = &pf->hw;
465 	u32 hi, lo, lo2;
466 	u8 tmr_idx;
467 
468 	tmr_idx = ice_get_ptp_src_clock_index(hw);
469 	/* Read the system timestamp pre PHC read */
470 	ptp_read_system_prets(sts);
471 
472 	lo = rd32(hw, GLTSYN_TIME_L(tmr_idx));
473 
474 	/* Read the system timestamp post PHC read */
475 	ptp_read_system_postts(sts);
476 
477 	hi = rd32(hw, GLTSYN_TIME_H(tmr_idx));
478 	lo2 = rd32(hw, GLTSYN_TIME_L(tmr_idx));
479 
480 	if (lo2 < lo) {
481 		/* if TIME_L rolled over read TIME_L again and update
482 		 * system timestamps
483 		 */
484 		ptp_read_system_prets(sts);
485 		lo = rd32(hw, GLTSYN_TIME_L(tmr_idx));
486 		ptp_read_system_postts(sts);
487 		hi = rd32(hw, GLTSYN_TIME_H(tmr_idx));
488 	}
489 
490 	return ((u64)hi << 32) | lo;
491 }
492 
493 /**
494  * ice_ptp_update_cached_phctime - Update the cached PHC time values
495  * @pf: Board specific private structure
496  *
497  * This function updates the system time values which are cached in the PF
498  * structure and the Rx rings.
499  *
500  * This function must be called periodically to ensure that the cached value
501  * is never more than 2 seconds old. It must also be called whenever the PHC
502  * time has been changed.
503  */
504 static void ice_ptp_update_cached_phctime(struct ice_pf *pf)
505 {
506 	u64 systime;
507 	int i;
508 
509 	/* Read the current PHC time */
510 	systime = ice_ptp_read_src_clk_reg(pf, NULL);
511 
512 	/* Update the cached PHC time stored in the PF structure */
513 	WRITE_ONCE(pf->ptp.cached_phc_time, systime);
514 
515 	ice_for_each_vsi(pf, i) {
516 		struct ice_vsi *vsi = pf->vsi[i];
517 		int j;
518 
519 		if (!vsi)
520 			continue;
521 
522 		if (vsi->type != ICE_VSI_PF)
523 			continue;
524 
525 		ice_for_each_rxq(vsi, j) {
526 			if (!vsi->rx_rings[j])
527 				continue;
528 			WRITE_ONCE(vsi->rx_rings[j]->cached_phctime, systime);
529 		}
530 	}
531 }
532 
533 /**
534  * ice_ptp_extend_32b_ts - Convert a 32b nanoseconds timestamp to 64b
535  * @cached_phc_time: recently cached copy of PHC time
536  * @in_tstamp: Ingress/egress 32b nanoseconds timestamp value
537  *
538  * Hardware captures timestamps which contain only 32 bits of nominal
539  * nanoseconds, as opposed to the 64bit timestamps that the stack expects.
540  * Note that the captured timestamp values may be 40 bits, but the lower
541  * 8 bits are sub-nanoseconds and generally discarded.
542  *
543  * Extend the 32bit nanosecond timestamp using the following algorithm and
544  * assumptions:
545  *
546  * 1) have a recently cached copy of the PHC time
547  * 2) assume that the in_tstamp was captured 2^31 nanoseconds (~2.1
548  *    seconds) before or after the PHC time was captured.
549  * 3) calculate the delta between the cached time and the timestamp
550  * 4) if the delta is smaller than 2^31 nanoseconds, then the timestamp was
551  *    captured after the PHC time. In this case, the full timestamp is just
552  *    the cached PHC time plus the delta.
553  * 5) otherwise, if the delta is larger than 2^31 nanoseconds, then the
554  *    timestamp was captured *before* the PHC time, i.e. because the PHC
555  *    cache was updated after the timestamp was captured by hardware. In this
556  *    case, the full timestamp is the cached time minus the inverse delta.
557  *
558  * This algorithm works even if the PHC time was updated after a Tx timestamp
559  * was requested, but before the Tx timestamp event was reported from
560  * hardware.
561  *
562  * This calculation primarily relies on keeping the cached PHC time up to
563  * date. If the timestamp was captured more than 2^31 nanoseconds after the
564  * PHC time, it is possible that the lower 32bits of PHC time have
565  * overflowed more than once, and we might generate an incorrect timestamp.
566  *
567  * This is prevented by (a) periodically updating the cached PHC time once
568  * a second, and (b) discarding any Tx timestamp packet if it has waited for
569  * a timestamp for more than one second.
570  */
571 static u64 ice_ptp_extend_32b_ts(u64 cached_phc_time, u32 in_tstamp)
572 {
573 	u32 delta, phc_time_lo;
574 	u64 ns;
575 
576 	/* Extract the lower 32 bits of the PHC time */
577 	phc_time_lo = (u32)cached_phc_time;
578 
579 	/* Calculate the delta between the lower 32bits of the cached PHC
580 	 * time and the in_tstamp value
581 	 */
582 	delta = (in_tstamp - phc_time_lo);
583 
584 	/* Do not assume that the in_tstamp is always more recent than the
585 	 * cached PHC time. If the delta is large, it indicates that the
586 	 * in_tstamp was taken in the past, and should be converted
587 	 * forward.
588 	 */
589 	if (delta > (U32_MAX / 2)) {
590 		/* reverse the delta calculation here */
591 		delta = (phc_time_lo - in_tstamp);
592 		ns = cached_phc_time - delta;
593 	} else {
594 		ns = cached_phc_time + delta;
595 	}
596 
597 	return ns;
598 }
599 
600 /**
601  * ice_ptp_extend_40b_ts - Convert a 40b timestamp to 64b nanoseconds
602  * @pf: Board private structure
603  * @in_tstamp: Ingress/egress 40b timestamp value
604  *
605  * The Tx and Rx timestamps are 40 bits wide, including 32 bits of nominal
606  * nanoseconds, 7 bits of sub-nanoseconds, and a valid bit.
607  *
608  *  *--------------------------------------------------------------*
609  *  | 32 bits of nanoseconds | 7 high bits of sub ns underflow | v |
610  *  *--------------------------------------------------------------*
611  *
612  * The low bit is an indicator of whether the timestamp is valid. The next
613  * 7 bits are a capture of the upper 7 bits of the sub-nanosecond underflow,
614  * and the remaining 32 bits are the lower 32 bits of the PHC timer.
615  *
616  * It is assumed that the caller verifies the timestamp is valid prior to
617  * calling this function.
618  *
619  * Extract the 32bit nominal nanoseconds and extend them. Use the cached PHC
620  * time stored in the device private PTP structure as the basis for timestamp
621  * extension.
622  *
623  * See ice_ptp_extend_32b_ts for a detailed explanation of the extension
624  * algorithm.
625  */
626 static u64 ice_ptp_extend_40b_ts(struct ice_pf *pf, u64 in_tstamp)
627 {
628 	const u64 mask = GENMASK_ULL(31, 0);
629 
630 	return ice_ptp_extend_32b_ts(pf->ptp.cached_phc_time,
631 				     (in_tstamp >> 8) & mask);
632 }
633 
634 /**
635  * ice_ptp_read_time - Read the time from the device
636  * @pf: Board private structure
637  * @ts: timespec structure to hold the current time value
638  * @sts: Optional parameter for holding a pair of system timestamps from
639  *       the system clock. Will be ignored if NULL is given.
640  *
641  * This function reads the source clock registers and stores them in a timespec.
642  * However, since the registers are 64 bits of nanoseconds, we must convert the
643  * result to a timespec before we can return.
644  */
645 static void
646 ice_ptp_read_time(struct ice_pf *pf, struct timespec64 *ts,
647 		  struct ptp_system_timestamp *sts)
648 {
649 	u64 time_ns = ice_ptp_read_src_clk_reg(pf, sts);
650 
651 	*ts = ns_to_timespec64(time_ns);
652 }
653 
654 /**
655  * ice_ptp_write_init - Set PHC time to provided value
656  * @pf: Board private structure
657  * @ts: timespec structure that holds the new time value
658  *
659  * Set the PHC time to the specified time provided in the timespec.
660  */
661 static int ice_ptp_write_init(struct ice_pf *pf, struct timespec64 *ts)
662 {
663 	u64 ns = timespec64_to_ns(ts);
664 	struct ice_hw *hw = &pf->hw;
665 
666 	return ice_ptp_init_time(hw, ns);
667 }
668 
669 /**
670  * ice_ptp_write_adj - Adjust PHC clock time atomically
671  * @pf: Board private structure
672  * @adj: Adjustment in nanoseconds
673  *
674  * Perform an atomic adjustment of the PHC time by the specified number of
675  * nanoseconds.
676  */
677 static int ice_ptp_write_adj(struct ice_pf *pf, s32 adj)
678 {
679 	struct ice_hw *hw = &pf->hw;
680 
681 	return ice_ptp_adj_clock(hw, adj);
682 }
683 
684 /**
685  * ice_ptp_adjfine - Adjust clock increment rate
686  * @info: the driver's PTP info structure
687  * @scaled_ppm: Parts per million with 16-bit fractional field
688  *
689  * Adjust the frequency of the clock by the indicated scaled ppm from the
690  * base frequency.
691  */
692 static int ice_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm)
693 {
694 	struct ice_pf *pf = ptp_info_to_pf(info);
695 	u64 freq, divisor = 1000000ULL;
696 	struct ice_hw *hw = &pf->hw;
697 	s64 incval, diff;
698 	int neg_adj = 0;
699 	int err;
700 
701 	incval = ICE_PTP_NOMINAL_INCVAL_E810;
702 
703 	if (scaled_ppm < 0) {
704 		neg_adj = 1;
705 		scaled_ppm = -scaled_ppm;
706 	}
707 
708 	while ((u64)scaled_ppm > div_u64(U64_MAX, incval)) {
709 		/* handle overflow by scaling down the scaled_ppm and
710 		 * the divisor, losing some precision
711 		 */
712 		scaled_ppm >>= 2;
713 		divisor >>= 2;
714 	}
715 
716 	freq = (incval * (u64)scaled_ppm) >> 16;
717 	diff = div_u64(freq, divisor);
718 
719 	if (neg_adj)
720 		incval -= diff;
721 	else
722 		incval += diff;
723 
724 	err = ice_ptp_write_incval_locked(hw, incval);
725 	if (err) {
726 		dev_err(ice_pf_to_dev(pf), "PTP failed to set incval, err %d\n",
727 			err);
728 		return -EIO;
729 	}
730 
731 	return 0;
732 }
733 
734 /**
735  * ice_ptp_extts_work - Workqueue task function
736  * @work: external timestamp work structure
737  *
738  * Service for PTP external clock event
739  */
740 static void ice_ptp_extts_work(struct kthread_work *work)
741 {
742 	struct ice_ptp *ptp = container_of(work, struct ice_ptp, extts_work);
743 	struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp);
744 	struct ptp_clock_event event;
745 	struct ice_hw *hw = &pf->hw;
746 	u8 chan, tmr_idx;
747 	u32 hi, lo;
748 
749 	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
750 	/* Event time is captured by one of the two matched registers
751 	 *      GLTSYN_EVNT_L: 32 LSB of sampled time event
752 	 *      GLTSYN_EVNT_H: 32 MSB of sampled time event
753 	 * Event is defined in GLTSYN_EVNT_0 register
754 	 */
755 	for (chan = 0; chan < GLTSYN_EVNT_H_IDX_MAX; chan++) {
756 		/* Check if channel is enabled */
757 		if (pf->ptp.ext_ts_irq & (1 << chan)) {
758 			lo = rd32(hw, GLTSYN_EVNT_L(chan, tmr_idx));
759 			hi = rd32(hw, GLTSYN_EVNT_H(chan, tmr_idx));
760 			event.timestamp = (((u64)hi) << 32) | lo;
761 			event.type = PTP_CLOCK_EXTTS;
762 			event.index = chan;
763 
764 			/* Fire event */
765 			ptp_clock_event(pf->ptp.clock, &event);
766 			pf->ptp.ext_ts_irq &= ~(1 << chan);
767 		}
768 	}
769 }
770 
771 /**
772  * ice_ptp_cfg_extts - Configure EXTTS pin and channel
773  * @pf: Board private structure
774  * @ena: true to enable; false to disable
775  * @chan: GPIO channel (0-3)
776  * @gpio_pin: GPIO pin
777  * @extts_flags: request flags from the ptp_extts_request.flags
778  */
779 static int
780 ice_ptp_cfg_extts(struct ice_pf *pf, bool ena, unsigned int chan, u32 gpio_pin,
781 		  unsigned int extts_flags)
782 {
783 	u32 func, aux_reg, gpio_reg, irq_reg;
784 	struct ice_hw *hw = &pf->hw;
785 	u8 tmr_idx;
786 
787 	if (chan > (unsigned int)pf->ptp.info.n_ext_ts)
788 		return -EINVAL;
789 
790 	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
791 
792 	irq_reg = rd32(hw, PFINT_OICR_ENA);
793 
794 	if (ena) {
795 		/* Enable the interrupt */
796 		irq_reg |= PFINT_OICR_TSYN_EVNT_M;
797 		aux_reg = GLTSYN_AUX_IN_0_INT_ENA_M;
798 
799 #define GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE	BIT(0)
800 #define GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE	BIT(1)
801 
802 		/* set event level to requested edge */
803 		if (extts_flags & PTP_FALLING_EDGE)
804 			aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE;
805 		if (extts_flags & PTP_RISING_EDGE)
806 			aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE;
807 
808 		/* Write GPIO CTL reg.
809 		 * 0x1 is input sampled by EVENT register(channel)
810 		 * + num_in_channels * tmr_idx
811 		 */
812 		func = 1 + chan + (tmr_idx * 3);
813 		gpio_reg = ((func << GLGEN_GPIO_CTL_PIN_FUNC_S) &
814 			    GLGEN_GPIO_CTL_PIN_FUNC_M);
815 		pf->ptp.ext_ts_chan |= (1 << chan);
816 	} else {
817 		/* clear the values we set to reset defaults */
818 		aux_reg = 0;
819 		gpio_reg = 0;
820 		pf->ptp.ext_ts_chan &= ~(1 << chan);
821 		if (!pf->ptp.ext_ts_chan)
822 			irq_reg &= ~PFINT_OICR_TSYN_EVNT_M;
823 	}
824 
825 	wr32(hw, PFINT_OICR_ENA, irq_reg);
826 	wr32(hw, GLTSYN_AUX_IN(chan, tmr_idx), aux_reg);
827 	wr32(hw, GLGEN_GPIO_CTL(gpio_pin), gpio_reg);
828 
829 	return 0;
830 }
831 
832 /**
833  * ice_ptp_cfg_clkout - Configure clock to generate periodic wave
834  * @pf: Board private structure
835  * @chan: GPIO channel (0-3)
836  * @config: desired periodic clk configuration. NULL will disable channel
837  * @store: If set to true the values will be stored
838  *
839  * Configure the internal clock generator modules to generate the clock wave of
840  * specified period.
841  */
842 static int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan,
843 			      struct ice_perout_channel *config, bool store)
844 {
845 	u64 current_time, period, start_time, phase;
846 	struct ice_hw *hw = &pf->hw;
847 	u32 func, val, gpio_pin;
848 	u8 tmr_idx;
849 
850 	tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
851 
852 	/* 0. Reset mode & out_en in AUX_OUT */
853 	wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), 0);
854 
855 	/* If we're disabling the output, clear out CLKO and TGT and keep
856 	 * output level low
857 	 */
858 	if (!config || !config->ena) {
859 		wr32(hw, GLTSYN_CLKO(chan, tmr_idx), 0);
860 		wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), 0);
861 		wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), 0);
862 
863 		val = GLGEN_GPIO_CTL_PIN_DIR_M;
864 		gpio_pin = pf->ptp.perout_channels[chan].gpio_pin;
865 		wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val);
866 
867 		/* Store the value if requested */
868 		if (store)
869 			memset(&pf->ptp.perout_channels[chan], 0,
870 			       sizeof(struct ice_perout_channel));
871 
872 		return 0;
873 	}
874 	period = config->period;
875 	start_time = config->start_time;
876 	div64_u64_rem(start_time, period, &phase);
877 	gpio_pin = config->gpio_pin;
878 
879 	/* 1. Write clkout with half of required period value */
880 	if (period & 0x1) {
881 		dev_err(ice_pf_to_dev(pf), "CLK Period must be an even value\n");
882 		goto err;
883 	}
884 
885 	period >>= 1;
886 
887 	/* For proper operation, the GLTSYN_CLKO must be larger than clock tick
888 	 */
889 #define MIN_PULSE 3
890 	if (period <= MIN_PULSE || period > U32_MAX) {
891 		dev_err(ice_pf_to_dev(pf), "CLK Period must be > %d && < 2^33",
892 			MIN_PULSE * 2);
893 		goto err;
894 	}
895 
896 	wr32(hw, GLTSYN_CLKO(chan, tmr_idx), lower_32_bits(period));
897 
898 	/* Allow time for programming before start_time is hit */
899 	current_time = ice_ptp_read_src_clk_reg(pf, NULL);
900 
901 	/* if start time is in the past start the timer at the nearest second
902 	 * maintaining phase
903 	 */
904 	if (start_time < current_time)
905 		start_time = div64_u64(current_time + NSEC_PER_SEC - 1,
906 				       NSEC_PER_SEC) * NSEC_PER_SEC + phase;
907 
908 	start_time -= E810_OUT_PROP_DELAY_NS;
909 
910 	/* 2. Write TARGET time */
911 	wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), lower_32_bits(start_time));
912 	wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), upper_32_bits(start_time));
913 
914 	/* 3. Write AUX_OUT register */
915 	val = GLTSYN_AUX_OUT_0_OUT_ENA_M | GLTSYN_AUX_OUT_0_OUTMOD_M;
916 	wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), val);
917 
918 	/* 4. write GPIO CTL reg */
919 	func = 8 + chan + (tmr_idx * 4);
920 	val = GLGEN_GPIO_CTL_PIN_DIR_M |
921 	      ((func << GLGEN_GPIO_CTL_PIN_FUNC_S) & GLGEN_GPIO_CTL_PIN_FUNC_M);
922 	wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val);
923 
924 	/* Store the value if requested */
925 	if (store) {
926 		memcpy(&pf->ptp.perout_channels[chan], config,
927 		       sizeof(struct ice_perout_channel));
928 		pf->ptp.perout_channels[chan].start_time = phase;
929 	}
930 
931 	return 0;
932 err:
933 	dev_err(ice_pf_to_dev(pf), "PTP failed to cfg per_clk\n");
934 	return -EFAULT;
935 }
936 
937 /**
938  * ice_ptp_disable_all_clkout - Disable all currently configured outputs
939  * @pf: pointer to the PF structure
940  *
941  * Disable all currently configured clock outputs. This is necessary before
942  * certain changes to the PTP hardware clock. Use ice_ptp_enable_all_clkout to
943  * re-enable the clocks again.
944  */
945 static void ice_ptp_disable_all_clkout(struct ice_pf *pf)
946 {
947 	uint i;
948 
949 	for (i = 0; i < pf->ptp.info.n_per_out; i++)
950 		if (pf->ptp.perout_channels[i].ena)
951 			ice_ptp_cfg_clkout(pf, i, NULL, false);
952 }
953 
954 /**
955  * ice_ptp_enable_all_clkout - Enable all configured periodic clock outputs
956  * @pf: pointer to the PF structure
957  *
958  * Enable all currently configured clock outputs. Use this after
959  * ice_ptp_disable_all_clkout to reconfigure the output signals according to
960  * their configuration.
961  */
962 static void ice_ptp_enable_all_clkout(struct ice_pf *pf)
963 {
964 	uint i;
965 
966 	for (i = 0; i < pf->ptp.info.n_per_out; i++)
967 		if (pf->ptp.perout_channels[i].ena)
968 			ice_ptp_cfg_clkout(pf, i, &pf->ptp.perout_channels[i],
969 					   false);
970 }
971 
972 /**
973  * ice_ptp_gpio_enable_e810 - Enable/disable ancillary features of PHC
974  * @info: the driver's PTP info structure
975  * @rq: The requested feature to change
976  * @on: Enable/disable flag
977  */
978 static int
979 ice_ptp_gpio_enable_e810(struct ptp_clock_info *info,
980 			 struct ptp_clock_request *rq, int on)
981 {
982 	struct ice_pf *pf = ptp_info_to_pf(info);
983 	struct ice_perout_channel clk_cfg = {0};
984 	bool sma_pres = false;
985 	unsigned int chan;
986 	u32 gpio_pin;
987 	int err;
988 
989 	if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL))
990 		sma_pres = true;
991 
992 	switch (rq->type) {
993 	case PTP_CLK_REQ_PEROUT:
994 		chan = rq->perout.index;
995 		if (sma_pres) {
996 			if (chan == ice_pin_desc_e810t[SMA1].chan)
997 				clk_cfg.gpio_pin = GPIO_20;
998 			else if (chan == ice_pin_desc_e810t[SMA2].chan)
999 				clk_cfg.gpio_pin = GPIO_22;
1000 			else
1001 				return -1;
1002 		} else if (ice_is_e810t(&pf->hw)) {
1003 			if (chan == 0)
1004 				clk_cfg.gpio_pin = GPIO_20;
1005 			else
1006 				clk_cfg.gpio_pin = GPIO_22;
1007 		} else if (chan == PPS_CLK_GEN_CHAN) {
1008 			clk_cfg.gpio_pin = PPS_PIN_INDEX;
1009 		} else {
1010 			clk_cfg.gpio_pin = chan;
1011 		}
1012 
1013 		clk_cfg.period = ((rq->perout.period.sec * NSEC_PER_SEC) +
1014 				   rq->perout.period.nsec);
1015 		clk_cfg.start_time = ((rq->perout.start.sec * NSEC_PER_SEC) +
1016 				       rq->perout.start.nsec);
1017 		clk_cfg.ena = !!on;
1018 
1019 		err = ice_ptp_cfg_clkout(pf, chan, &clk_cfg, true);
1020 		break;
1021 	case PTP_CLK_REQ_EXTTS:
1022 		chan = rq->extts.index;
1023 		if (sma_pres) {
1024 			if (chan < ice_pin_desc_e810t[SMA2].chan)
1025 				gpio_pin = GPIO_21;
1026 			else
1027 				gpio_pin = GPIO_23;
1028 		} else if (ice_is_e810t(&pf->hw)) {
1029 			if (chan == 0)
1030 				gpio_pin = GPIO_21;
1031 			else
1032 				gpio_pin = GPIO_23;
1033 		} else {
1034 			gpio_pin = chan;
1035 		}
1036 
1037 		err = ice_ptp_cfg_extts(pf, !!on, chan, gpio_pin,
1038 					rq->extts.flags);
1039 		break;
1040 	default:
1041 		return -EOPNOTSUPP;
1042 	}
1043 
1044 	return err;
1045 }
1046 
1047 /**
1048  * ice_ptp_gettimex64 - Get the time of the clock
1049  * @info: the driver's PTP info structure
1050  * @ts: timespec64 structure to hold the current time value
1051  * @sts: Optional parameter for holding a pair of system timestamps from
1052  *       the system clock. Will be ignored if NULL is given.
1053  *
1054  * Read the device clock and return the correct value on ns, after converting it
1055  * into a timespec struct.
1056  */
1057 static int
1058 ice_ptp_gettimex64(struct ptp_clock_info *info, struct timespec64 *ts,
1059 		   struct ptp_system_timestamp *sts)
1060 {
1061 	struct ice_pf *pf = ptp_info_to_pf(info);
1062 	struct ice_hw *hw = &pf->hw;
1063 
1064 	if (!ice_ptp_lock(hw)) {
1065 		dev_err(ice_pf_to_dev(pf), "PTP failed to get time\n");
1066 		return -EBUSY;
1067 	}
1068 
1069 	ice_ptp_read_time(pf, ts, sts);
1070 	ice_ptp_unlock(hw);
1071 
1072 	return 0;
1073 }
1074 
1075 /**
1076  * ice_ptp_settime64 - Set the time of the clock
1077  * @info: the driver's PTP info structure
1078  * @ts: timespec64 structure that holds the new time value
1079  *
1080  * Set the device clock to the user input value. The conversion from timespec
1081  * to ns happens in the write function.
1082  */
1083 static int
1084 ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
1085 {
1086 	struct ice_pf *pf = ptp_info_to_pf(info);
1087 	struct timespec64 ts64 = *ts;
1088 	struct ice_hw *hw = &pf->hw;
1089 	int err;
1090 
1091 	if (!ice_ptp_lock(hw)) {
1092 		err = -EBUSY;
1093 		goto exit;
1094 	}
1095 
1096 	/* Disable periodic outputs */
1097 	ice_ptp_disable_all_clkout(pf);
1098 
1099 	err = ice_ptp_write_init(pf, &ts64);
1100 	ice_ptp_unlock(hw);
1101 
1102 	if (!err)
1103 		ice_ptp_update_cached_phctime(pf);
1104 
1105 	/* Reenable periodic outputs */
1106 	ice_ptp_enable_all_clkout(pf);
1107 exit:
1108 	if (err) {
1109 		dev_err(ice_pf_to_dev(pf), "PTP failed to set time %d\n", err);
1110 		return err;
1111 	}
1112 
1113 	return 0;
1114 }
1115 
1116 /**
1117  * ice_ptp_adjtime_nonatomic - Do a non-atomic clock adjustment
1118  * @info: the driver's PTP info structure
1119  * @delta: Offset in nanoseconds to adjust the time by
1120  */
1121 static int ice_ptp_adjtime_nonatomic(struct ptp_clock_info *info, s64 delta)
1122 {
1123 	struct timespec64 now, then;
1124 
1125 	then = ns_to_timespec64(delta);
1126 	ice_ptp_gettimex64(info, &now, NULL);
1127 	now = timespec64_add(now, then);
1128 
1129 	return ice_ptp_settime64(info, (const struct timespec64 *)&now);
1130 }
1131 
1132 /**
1133  * ice_ptp_adjtime - Adjust the time of the clock by the indicated delta
1134  * @info: the driver's PTP info structure
1135  * @delta: Offset in nanoseconds to adjust the time by
1136  */
1137 static int ice_ptp_adjtime(struct ptp_clock_info *info, s64 delta)
1138 {
1139 	struct ice_pf *pf = ptp_info_to_pf(info);
1140 	struct ice_hw *hw = &pf->hw;
1141 	struct device *dev;
1142 	int err;
1143 
1144 	dev = ice_pf_to_dev(pf);
1145 
1146 	/* Hardware only supports atomic adjustments using signed 32-bit
1147 	 * integers. For any adjustment outside this range, perform
1148 	 * a non-atomic get->adjust->set flow.
1149 	 */
1150 	if (delta > S32_MAX || delta < S32_MIN) {
1151 		dev_dbg(dev, "delta = %lld, adjtime non-atomic\n", delta);
1152 		return ice_ptp_adjtime_nonatomic(info, delta);
1153 	}
1154 
1155 	if (!ice_ptp_lock(hw)) {
1156 		dev_err(dev, "PTP failed to acquire semaphore in adjtime\n");
1157 		return -EBUSY;
1158 	}
1159 
1160 	/* Disable periodic outputs */
1161 	ice_ptp_disable_all_clkout(pf);
1162 
1163 	err = ice_ptp_write_adj(pf, delta);
1164 
1165 	/* Reenable periodic outputs */
1166 	ice_ptp_enable_all_clkout(pf);
1167 
1168 	ice_ptp_unlock(hw);
1169 
1170 	if (err) {
1171 		dev_err(dev, "PTP failed to adjust time, err %d\n", err);
1172 		return err;
1173 	}
1174 
1175 	ice_ptp_update_cached_phctime(pf);
1176 
1177 	return 0;
1178 }
1179 
1180 /**
1181  * ice_ptp_get_ts_config - ioctl interface to read the timestamping config
1182  * @pf: Board private structure
1183  * @ifr: ioctl data
1184  *
1185  * Copy the timestamping config to user buffer
1186  */
1187 int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr)
1188 {
1189 	struct hwtstamp_config *config;
1190 
1191 	if (!test_bit(ICE_FLAG_PTP, pf->flags))
1192 		return -EIO;
1193 
1194 	config = &pf->ptp.tstamp_config;
1195 
1196 	return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
1197 		-EFAULT : 0;
1198 }
1199 
1200 /**
1201  * ice_ptp_set_timestamp_mode - Setup driver for requested timestamp mode
1202  * @pf: Board private structure
1203  * @config: hwtstamp settings requested or saved
1204  */
1205 static int
1206 ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config)
1207 {
1208 	/* Reserved for future extensions. */
1209 	if (config->flags)
1210 		return -EINVAL;
1211 
1212 	switch (config->tx_type) {
1213 	case HWTSTAMP_TX_OFF:
1214 		ice_set_tx_tstamp(pf, false);
1215 		break;
1216 	case HWTSTAMP_TX_ON:
1217 		ice_set_tx_tstamp(pf, true);
1218 		break;
1219 	default:
1220 		return -ERANGE;
1221 	}
1222 
1223 	switch (config->rx_filter) {
1224 	case HWTSTAMP_FILTER_NONE:
1225 		ice_set_rx_tstamp(pf, false);
1226 		break;
1227 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1228 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1229 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1230 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
1231 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1232 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1233 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
1234 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1235 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1236 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1237 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1238 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1239 	case HWTSTAMP_FILTER_NTP_ALL:
1240 	case HWTSTAMP_FILTER_ALL:
1241 		config->rx_filter = HWTSTAMP_FILTER_ALL;
1242 		ice_set_rx_tstamp(pf, true);
1243 		break;
1244 	default:
1245 		return -ERANGE;
1246 	}
1247 
1248 	return 0;
1249 }
1250 
1251 /**
1252  * ice_ptp_set_ts_config - ioctl interface to control the timestamping
1253  * @pf: Board private structure
1254  * @ifr: ioctl data
1255  *
1256  * Get the user config and store it
1257  */
1258 int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr)
1259 {
1260 	struct hwtstamp_config config;
1261 	int err;
1262 
1263 	if (!test_bit(ICE_FLAG_PTP, pf->flags))
1264 		return -EAGAIN;
1265 
1266 	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
1267 		return -EFAULT;
1268 
1269 	err = ice_ptp_set_timestamp_mode(pf, &config);
1270 	if (err)
1271 		return err;
1272 
1273 	/* Save these settings for future reference */
1274 	pf->ptp.tstamp_config = config;
1275 
1276 	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
1277 		-EFAULT : 0;
1278 }
1279 
1280 /**
1281  * ice_ptp_rx_hwtstamp - Check for an Rx timestamp
1282  * @rx_ring: Ring to get the VSI info
1283  * @rx_desc: Receive descriptor
1284  * @skb: Particular skb to send timestamp with
1285  *
1286  * The driver receives a notification in the receive descriptor with timestamp.
1287  * The timestamp is in ns, so we must convert the result first.
1288  */
1289 void
1290 ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring,
1291 		    union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb)
1292 {
1293 	u32 ts_high;
1294 	u64 ts_ns;
1295 
1296 	/* Populate timesync data into skb */
1297 	if (rx_desc->wb.time_stamp_low & ICE_PTP_TS_VALID) {
1298 		struct skb_shared_hwtstamps *hwtstamps;
1299 
1300 		/* Use ice_ptp_extend_32b_ts directly, using the ring-specific
1301 		 * cached PHC value, rather than accessing the PF. This also
1302 		 * allows us to simply pass the upper 32bits of nanoseconds
1303 		 * directly. Calling ice_ptp_extend_40b_ts is unnecessary as
1304 		 * it would just discard these bits itself.
1305 		 */
1306 		ts_high = le32_to_cpu(rx_desc->wb.flex_ts.ts_high);
1307 		ts_ns = ice_ptp_extend_32b_ts(rx_ring->cached_phctime, ts_high);
1308 
1309 		hwtstamps = skb_hwtstamps(skb);
1310 		memset(hwtstamps, 0, sizeof(*hwtstamps));
1311 		hwtstamps->hwtstamp = ns_to_ktime(ts_ns);
1312 	}
1313 }
1314 
1315 /**
1316  * ice_ptp_disable_sma_pins_e810t - Disable E810-T SMA pins
1317  * @pf: pointer to the PF structure
1318  * @info: PTP clock info structure
1319  *
1320  * Disable the OS access to the SMA pins. Called to clear out the OS
1321  * indications of pin support when we fail to setup the E810-T SMA control
1322  * register.
1323  */
1324 static void
1325 ice_ptp_disable_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
1326 {
1327 	struct device *dev = ice_pf_to_dev(pf);
1328 
1329 	dev_warn(dev, "Failed to configure E810-T SMA pin control\n");
1330 
1331 	info->enable = NULL;
1332 	info->verify = NULL;
1333 	info->n_pins = 0;
1334 	info->n_ext_ts = 0;
1335 	info->n_per_out = 0;
1336 }
1337 
1338 /**
1339  * ice_ptp_setup_sma_pins_e810t - Setup the SMA pins
1340  * @pf: pointer to the PF structure
1341  * @info: PTP clock info structure
1342  *
1343  * Finish setting up the SMA pins by allocating pin_config, and setting it up
1344  * according to the current status of the SMA. On failure, disable all of the
1345  * extended SMA pin support.
1346  */
1347 static void
1348 ice_ptp_setup_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
1349 {
1350 	struct device *dev = ice_pf_to_dev(pf);
1351 	int err;
1352 
1353 	/* Allocate memory for kernel pins interface */
1354 	info->pin_config = devm_kcalloc(dev, info->n_pins,
1355 					sizeof(*info->pin_config), GFP_KERNEL);
1356 	if (!info->pin_config) {
1357 		ice_ptp_disable_sma_pins_e810t(pf, info);
1358 		return;
1359 	}
1360 
1361 	/* Read current SMA status */
1362 	err = ice_get_sma_config_e810t(&pf->hw, info->pin_config);
1363 	if (err)
1364 		ice_ptp_disable_sma_pins_e810t(pf, info);
1365 }
1366 
1367 /**
1368  * ice_ptp_setup_pins_e810t - Setup PTP pins in sysfs
1369  * @pf: pointer to the PF instance
1370  * @info: PTP clock capabilities
1371  */
1372 static void
1373 ice_ptp_setup_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
1374 {
1375 	/* Check if SMA controller is in the netlist */
1376 	if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL) &&
1377 	    !ice_is_pca9575_present(&pf->hw))
1378 		ice_clear_feature_support(pf, ICE_F_SMA_CTRL);
1379 
1380 	if (!ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) {
1381 		info->n_ext_ts = N_EXT_TS_E810_NO_SMA;
1382 		info->n_per_out = N_PER_OUT_E810T_NO_SMA;
1383 		return;
1384 	}
1385 
1386 	info->n_per_out = N_PER_OUT_E810T;
1387 	info->n_ext_ts = N_EXT_TS_E810;
1388 	info->n_pins = NUM_PTP_PINS_E810T;
1389 	info->verify = ice_verify_pin_e810t;
1390 
1391 	/* Complete setup of the SMA pins */
1392 	ice_ptp_setup_sma_pins_e810t(pf, info);
1393 }
1394 
1395 /**
1396  * ice_ptp_setup_pins_e810 - Setup PTP pins in sysfs
1397  * @info: PTP clock capabilities
1398  */
1399 static void ice_ptp_setup_pins_e810(struct ptp_clock_info *info)
1400 {
1401 	info->n_per_out = N_PER_OUT_E810;
1402 	info->n_ext_ts = N_EXT_TS_E810;
1403 }
1404 
1405 /**
1406  * ice_ptp_set_funcs_e810 - Set specialized functions for E810 support
1407  * @pf: Board private structure
1408  * @info: PTP info to fill
1409  *
1410  * Assign functions to the PTP capabiltiies structure for E810 devices.
1411  * Functions which operate across all device families should be set directly
1412  * in ice_ptp_set_caps. Only add functions here which are distinct for e810
1413  * devices.
1414  */
1415 static void
1416 ice_ptp_set_funcs_e810(struct ice_pf *pf, struct ptp_clock_info *info)
1417 {
1418 	info->enable = ice_ptp_gpio_enable_e810;
1419 
1420 	if (ice_is_e810t(&pf->hw))
1421 		ice_ptp_setup_pins_e810t(pf, info);
1422 	else
1423 		ice_ptp_setup_pins_e810(info);
1424 }
1425 
1426 /**
1427  * ice_ptp_set_caps - Set PTP capabilities
1428  * @pf: Board private structure
1429  */
1430 static void ice_ptp_set_caps(struct ice_pf *pf)
1431 {
1432 	struct ptp_clock_info *info = &pf->ptp.info;
1433 	struct device *dev = ice_pf_to_dev(pf);
1434 
1435 	snprintf(info->name, sizeof(info->name) - 1, "%s-%s-clk",
1436 		 dev_driver_string(dev), dev_name(dev));
1437 	info->owner = THIS_MODULE;
1438 	info->max_adj = 999999999;
1439 	info->adjtime = ice_ptp_adjtime;
1440 	info->adjfine = ice_ptp_adjfine;
1441 	info->gettimex64 = ice_ptp_gettimex64;
1442 	info->settime64 = ice_ptp_settime64;
1443 
1444 	ice_ptp_set_funcs_e810(pf, info);
1445 }
1446 
1447 /**
1448  * ice_ptp_create_clock - Create PTP clock device for userspace
1449  * @pf: Board private structure
1450  *
1451  * This function creates a new PTP clock device. It only creates one if we
1452  * don't already have one. Will return error if it can't create one, but success
1453  * if we already have a device. Should be used by ice_ptp_init to create clock
1454  * initially, and prevent global resets from creating new clock devices.
1455  */
1456 static long ice_ptp_create_clock(struct ice_pf *pf)
1457 {
1458 	struct ptp_clock_info *info;
1459 	struct ptp_clock *clock;
1460 	struct device *dev;
1461 
1462 	/* No need to create a clock device if we already have one */
1463 	if (pf->ptp.clock)
1464 		return 0;
1465 
1466 	ice_ptp_set_caps(pf);
1467 
1468 	info = &pf->ptp.info;
1469 	dev = ice_pf_to_dev(pf);
1470 
1471 	/* Attempt to register the clock before enabling the hardware. */
1472 	clock = ptp_clock_register(info, dev);
1473 	if (IS_ERR(clock))
1474 		return PTR_ERR(clock);
1475 
1476 	pf->ptp.clock = clock;
1477 
1478 	return 0;
1479 }
1480 
1481 /**
1482  * ice_ptp_tx_tstamp_work - Process Tx timestamps for a port
1483  * @work: pointer to the kthread_work struct
1484  *
1485  * Process timestamps captured by the PHY associated with this port. To do
1486  * this, loop over each index with a waiting skb.
1487  *
1488  * If a given index has a valid timestamp, perform the following steps:
1489  *
1490  * 1) copy the timestamp out of the PHY register
1491  * 4) clear the timestamp valid bit in the PHY register
1492  * 5) unlock the index by clearing the associated in_use bit.
1493  * 2) extend the 40b timestamp value to get a 64bit timestamp
1494  * 3) send that timestamp to the stack
1495  *
1496  * After looping, if we still have waiting SKBs, then re-queue the work. This
1497  * may cause us effectively poll even when not strictly necessary. We do this
1498  * because it's possible a new timestamp was requested around the same time as
1499  * the interrupt. In some cases hardware might not interrupt us again when the
1500  * timestamp is captured.
1501  *
1502  * Note that we only take the tracking lock when clearing the bit and when
1503  * checking if we need to re-queue this task. The only place where bits can be
1504  * set is the hard xmit routine where an SKB has a request flag set. The only
1505  * places where we clear bits are this work function, or the periodic cleanup
1506  * thread. If the cleanup thread clears a bit we're processing we catch it
1507  * when we lock to clear the bit and then grab the SKB pointer. If a Tx thread
1508  * starts a new timestamp, we might not begin processing it right away but we
1509  * will notice it at the end when we re-queue the work item. If a Tx thread
1510  * starts a new timestamp just after this function exits without re-queuing,
1511  * the interrupt when the timestamp finishes should trigger. Avoiding holding
1512  * the lock for the entire function is important in order to ensure that Tx
1513  * threads do not get blocked while waiting for the lock.
1514  */
1515 static void ice_ptp_tx_tstamp_work(struct kthread_work *work)
1516 {
1517 	struct ice_ptp_port *ptp_port;
1518 	struct ice_ptp_tx *tx;
1519 	struct ice_pf *pf;
1520 	struct ice_hw *hw;
1521 	u8 idx;
1522 
1523 	tx = container_of(work, struct ice_ptp_tx, work);
1524 	if (!tx->init)
1525 		return;
1526 
1527 	ptp_port = container_of(tx, struct ice_ptp_port, tx);
1528 	pf = ptp_port_to_pf(ptp_port);
1529 	hw = &pf->hw;
1530 
1531 	for_each_set_bit(idx, tx->in_use, tx->len) {
1532 		struct skb_shared_hwtstamps shhwtstamps = {};
1533 		u8 phy_idx = idx + tx->quad_offset;
1534 		u64 raw_tstamp, tstamp;
1535 		struct sk_buff *skb;
1536 		int err;
1537 
1538 		err = ice_read_phy_tstamp(hw, tx->quad, phy_idx,
1539 					  &raw_tstamp);
1540 		if (err)
1541 			continue;
1542 
1543 		/* Check if the timestamp is valid */
1544 		if (!(raw_tstamp & ICE_PTP_TS_VALID))
1545 			continue;
1546 
1547 		/* clear the timestamp register, so that it won't show valid
1548 		 * again when re-used.
1549 		 */
1550 		ice_clear_phy_tstamp(hw, tx->quad, phy_idx);
1551 
1552 		/* The timestamp is valid, so we'll go ahead and clear this
1553 		 * index and then send the timestamp up to the stack.
1554 		 */
1555 		spin_lock(&tx->lock);
1556 		clear_bit(idx, tx->in_use);
1557 		skb = tx->tstamps[idx].skb;
1558 		tx->tstamps[idx].skb = NULL;
1559 		spin_unlock(&tx->lock);
1560 
1561 		/* it's (unlikely but) possible we raced with the cleanup
1562 		 * thread for discarding old timestamp requests.
1563 		 */
1564 		if (!skb)
1565 			continue;
1566 
1567 		/* Extend the timestamp using cached PHC time */
1568 		tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp);
1569 		shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
1570 
1571 		skb_tstamp_tx(skb, &shhwtstamps);
1572 		dev_kfree_skb_any(skb);
1573 	}
1574 
1575 	/* Check if we still have work to do. If so, re-queue this task to
1576 	 * poll for remaining timestamps.
1577 	 */
1578 	spin_lock(&tx->lock);
1579 	if (!bitmap_empty(tx->in_use, tx->len))
1580 		kthread_queue_work(pf->ptp.kworker, &tx->work);
1581 	spin_unlock(&tx->lock);
1582 }
1583 
1584 /**
1585  * ice_ptp_request_ts - Request an available Tx timestamp index
1586  * @tx: the PTP Tx timestamp tracker to request from
1587  * @skb: the SKB to associate with this timestamp request
1588  */
1589 s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
1590 {
1591 	u8 idx;
1592 
1593 	/* Check if this tracker is initialized */
1594 	if (!tx->init)
1595 		return -1;
1596 
1597 	spin_lock(&tx->lock);
1598 	/* Find and set the first available index */
1599 	idx = find_first_zero_bit(tx->in_use, tx->len);
1600 	if (idx < tx->len) {
1601 		/* We got a valid index that no other thread could have set. Store
1602 		 * a reference to the skb and the start time to allow discarding old
1603 		 * requests.
1604 		 */
1605 		set_bit(idx, tx->in_use);
1606 		tx->tstamps[idx].start = jiffies;
1607 		tx->tstamps[idx].skb = skb_get(skb);
1608 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1609 	}
1610 
1611 	spin_unlock(&tx->lock);
1612 
1613 	/* return the appropriate PHY timestamp register index, -1 if no
1614 	 * indexes were available.
1615 	 */
1616 	if (idx >= tx->len)
1617 		return -1;
1618 	else
1619 		return idx + tx->quad_offset;
1620 }
1621 
1622 /**
1623  * ice_ptp_process_ts - Spawn kthread work to handle timestamps
1624  * @pf: Board private structure
1625  *
1626  * Queue work required to process the PTP Tx timestamps outside of interrupt
1627  * context.
1628  */
1629 void ice_ptp_process_ts(struct ice_pf *pf)
1630 {
1631 	if (pf->ptp.port.tx.init)
1632 		kthread_queue_work(pf->ptp.kworker, &pf->ptp.port.tx.work);
1633 }
1634 
1635 /**
1636  * ice_ptp_alloc_tx_tracker - Initialize tracking for Tx timestamps
1637  * @tx: Tx tracking structure to initialize
1638  *
1639  * Assumes that the length has already been initialized. Do not call directly,
1640  * use the ice_ptp_init_tx_e822 or ice_ptp_init_tx_e810 instead.
1641  */
1642 static int
1643 ice_ptp_alloc_tx_tracker(struct ice_ptp_tx *tx)
1644 {
1645 	tx->tstamps = kcalloc(tx->len, sizeof(*tx->tstamps), GFP_KERNEL);
1646 	if (!tx->tstamps)
1647 		return -ENOMEM;
1648 
1649 	tx->in_use = bitmap_zalloc(tx->len, GFP_KERNEL);
1650 	if (!tx->in_use) {
1651 		kfree(tx->tstamps);
1652 		tx->tstamps = NULL;
1653 		return -ENOMEM;
1654 	}
1655 
1656 	spin_lock_init(&tx->lock);
1657 	kthread_init_work(&tx->work, ice_ptp_tx_tstamp_work);
1658 
1659 	tx->init = 1;
1660 
1661 	return 0;
1662 }
1663 
1664 /**
1665  * ice_ptp_flush_tx_tracker - Flush any remaining timestamps from the tracker
1666  * @pf: Board private structure
1667  * @tx: the tracker to flush
1668  */
1669 static void
1670 ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
1671 {
1672 	u8 idx;
1673 
1674 	for (idx = 0; idx < tx->len; idx++) {
1675 		u8 phy_idx = idx + tx->quad_offset;
1676 
1677 		spin_lock(&tx->lock);
1678 		if (tx->tstamps[idx].skb) {
1679 			dev_kfree_skb_any(tx->tstamps[idx].skb);
1680 			tx->tstamps[idx].skb = NULL;
1681 		}
1682 		clear_bit(idx, tx->in_use);
1683 		spin_unlock(&tx->lock);
1684 
1685 		/* Clear any potential residual timestamp in the PHY block */
1686 		if (!pf->hw.reset_ongoing)
1687 			ice_clear_phy_tstamp(&pf->hw, tx->quad, phy_idx);
1688 	}
1689 }
1690 
1691 /**
1692  * ice_ptp_release_tx_tracker - Release allocated memory for Tx tracker
1693  * @pf: Board private structure
1694  * @tx: Tx tracking structure to release
1695  *
1696  * Free memory associated with the Tx timestamp tracker.
1697  */
1698 static void
1699 ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
1700 {
1701 	tx->init = 0;
1702 
1703 	kthread_cancel_work_sync(&tx->work);
1704 
1705 	ice_ptp_flush_tx_tracker(pf, tx);
1706 
1707 	kfree(tx->tstamps);
1708 	tx->tstamps = NULL;
1709 
1710 	kfree(tx->in_use);
1711 	tx->in_use = NULL;
1712 
1713 	tx->len = 0;
1714 }
1715 
1716 /**
1717  * ice_ptp_init_tx_e810 - Initialize tracking for Tx timestamps
1718  * @pf: Board private structure
1719  * @tx: the Tx tracking structure to initialize
1720  *
1721  * Initialize the Tx timestamp tracker for this PF. For E810 devices, each
1722  * port has its own block of timestamps, independent of the other ports.
1723  */
1724 static int
1725 ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx)
1726 {
1727 	tx->quad = pf->hw.port_info->lport;
1728 	tx->quad_offset = 0;
1729 	tx->len = INDEX_PER_QUAD;
1730 
1731 	return ice_ptp_alloc_tx_tracker(tx);
1732 }
1733 
1734 /**
1735  * ice_ptp_tx_tstamp_cleanup - Cleanup old timestamp requests that got dropped
1736  * @tx: PTP Tx tracker to clean up
1737  *
1738  * Loop through the Tx timestamp requests and see if any of them have been
1739  * waiting for a long time. Discard any SKBs that have been waiting for more
1740  * than 2 seconds. This is long enough to be reasonably sure that the
1741  * timestamp will never be captured. This might happen if the packet gets
1742  * discarded before it reaches the PHY timestamping block.
1743  */
1744 static void ice_ptp_tx_tstamp_cleanup(struct ice_ptp_tx *tx)
1745 {
1746 	u8 idx;
1747 
1748 	if (!tx->init)
1749 		return;
1750 
1751 	for_each_set_bit(idx, tx->in_use, tx->len) {
1752 		struct sk_buff *skb;
1753 
1754 		/* Check if this SKB has been waiting for too long */
1755 		if (time_is_after_jiffies(tx->tstamps[idx].start + 2 * HZ))
1756 			continue;
1757 
1758 		spin_lock(&tx->lock);
1759 		skb = tx->tstamps[idx].skb;
1760 		tx->tstamps[idx].skb = NULL;
1761 		clear_bit(idx, tx->in_use);
1762 		spin_unlock(&tx->lock);
1763 
1764 		/* Free the SKB after we've cleared the bit */
1765 		dev_kfree_skb_any(skb);
1766 	}
1767 }
1768 
1769 static void ice_ptp_periodic_work(struct kthread_work *work)
1770 {
1771 	struct ice_ptp *ptp = container_of(work, struct ice_ptp, work.work);
1772 	struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp);
1773 
1774 	if (!test_bit(ICE_FLAG_PTP, pf->flags))
1775 		return;
1776 
1777 	ice_ptp_update_cached_phctime(pf);
1778 
1779 	ice_ptp_tx_tstamp_cleanup(&pf->ptp.port.tx);
1780 
1781 	/* Run twice a second */
1782 	kthread_queue_delayed_work(ptp->kworker, &ptp->work,
1783 				   msecs_to_jiffies(500));
1784 }
1785 
1786 /**
1787  * ice_ptp_init_owner - Initialize PTP_1588_CLOCK device
1788  * @pf: Board private structure
1789  *
1790  * Setup and initialize a PTP clock device that represents the device hardware
1791  * clock. Save the clock index for other functions connected to the same
1792  * hardware resource.
1793  */
1794 static int ice_ptp_init_owner(struct ice_pf *pf)
1795 {
1796 	struct device *dev = ice_pf_to_dev(pf);
1797 	struct ice_hw *hw = &pf->hw;
1798 	struct timespec64 ts;
1799 	u8 src_idx;
1800 	int err;
1801 
1802 	wr32(hw, GLTSYN_SYNC_DLAY, 0);
1803 
1804 	/* Clear some HW residue and enable source clock */
1805 	src_idx = hw->func_caps.ts_func_info.tmr_index_owned;
1806 
1807 	/* Enable source clocks */
1808 	wr32(hw, GLTSYN_ENA(src_idx), GLTSYN_ENA_TSYN_ENA_M);
1809 
1810 	/* Enable PHY time sync */
1811 	err = ice_ptp_init_phy_e810(hw);
1812 	if (err)
1813 		goto err_exit;
1814 
1815 	/* Clear event status indications for auxiliary pins */
1816 	(void)rd32(hw, GLTSYN_STAT(src_idx));
1817 
1818 	/* Acquire the global hardware lock */
1819 	if (!ice_ptp_lock(hw)) {
1820 		err = -EBUSY;
1821 		goto err_exit;
1822 	}
1823 
1824 	/* Write the increment time value to PHY and LAN */
1825 	err = ice_ptp_write_incval(hw, ICE_PTP_NOMINAL_INCVAL_E810);
1826 	if (err) {
1827 		ice_ptp_unlock(hw);
1828 		goto err_exit;
1829 	}
1830 
1831 	ts = ktime_to_timespec64(ktime_get_real());
1832 	/* Write the initial Time value to PHY and LAN */
1833 	err = ice_ptp_write_init(pf, &ts);
1834 	if (err) {
1835 		ice_ptp_unlock(hw);
1836 		goto err_exit;
1837 	}
1838 
1839 	/* Release the global hardware lock */
1840 	ice_ptp_unlock(hw);
1841 
1842 	/* Ensure we have a clock device */
1843 	err = ice_ptp_create_clock(pf);
1844 	if (err)
1845 		goto err_clk;
1846 
1847 	/* Store the PTP clock index for other PFs */
1848 	ice_set_ptp_clock_index(pf);
1849 
1850 	return 0;
1851 
1852 err_clk:
1853 	pf->ptp.clock = NULL;
1854 err_exit:
1855 	dev_err(dev, "PTP failed to register clock, err %d\n", err);
1856 
1857 	return err;
1858 }
1859 
1860 /**
1861  * ice_ptp_init - Initialize the PTP support after device probe or reset
1862  * @pf: Board private structure
1863  *
1864  * This function sets device up for PTP support. The first time it is run, it
1865  * will create a clock device. It does not create a clock device if one
1866  * already exists. It also reconfigures the device after a reset.
1867  */
1868 void ice_ptp_init(struct ice_pf *pf)
1869 {
1870 	struct device *dev = ice_pf_to_dev(pf);
1871 	struct kthread_worker *kworker;
1872 	struct ice_hw *hw = &pf->hw;
1873 	int err;
1874 
1875 	/* PTP is currently only supported on E810 devices */
1876 	if (!ice_is_e810(hw))
1877 		return;
1878 
1879 	/* Check if this PF owns the source timer */
1880 	if (hw->func_caps.ts_func_info.src_tmr_owned) {
1881 		err = ice_ptp_init_owner(pf);
1882 		if (err)
1883 			return;
1884 	}
1885 
1886 	/* Disable timestamping for both Tx and Rx */
1887 	ice_ptp_cfg_timestamp(pf, false);
1888 
1889 	/* Initialize the PTP port Tx timestamp tracker */
1890 	ice_ptp_init_tx_e810(pf, &pf->ptp.port.tx);
1891 
1892 	/* Initialize work functions */
1893 	kthread_init_delayed_work(&pf->ptp.work, ice_ptp_periodic_work);
1894 	kthread_init_work(&pf->ptp.extts_work, ice_ptp_extts_work);
1895 
1896 	/* Allocate a kworker for handling work required for the ports
1897 	 * connected to the PTP hardware clock.
1898 	 */
1899 	kworker = kthread_create_worker(0, "ice-ptp-%s", dev_name(dev));
1900 	if (IS_ERR(kworker)) {
1901 		err = PTR_ERR(kworker);
1902 		goto err_kworker;
1903 	}
1904 	pf->ptp.kworker = kworker;
1905 
1906 	set_bit(ICE_FLAG_PTP, pf->flags);
1907 
1908 	/* Start periodic work going */
1909 	kthread_queue_delayed_work(pf->ptp.kworker, &pf->ptp.work, 0);
1910 
1911 	dev_info(dev, "PTP init successful\n");
1912 	return;
1913 
1914 err_kworker:
1915 	/* If we registered a PTP clock, release it */
1916 	if (pf->ptp.clock) {
1917 		ptp_clock_unregister(pf->ptp.clock);
1918 		pf->ptp.clock = NULL;
1919 	}
1920 	dev_err(dev, "PTP failed %d\n", err);
1921 }
1922 
1923 /**
1924  * ice_ptp_release - Disable the driver/HW support and unregister the clock
1925  * @pf: Board private structure
1926  *
1927  * This function handles the cleanup work required from the initialization by
1928  * clearing out the important information and unregistering the clock
1929  */
1930 void ice_ptp_release(struct ice_pf *pf)
1931 {
1932 	if (!test_bit(ICE_FLAG_PTP, pf->flags))
1933 		return;
1934 
1935 	/* Disable timestamping for both Tx and Rx */
1936 	ice_ptp_cfg_timestamp(pf, false);
1937 
1938 	ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
1939 
1940 	clear_bit(ICE_FLAG_PTP, pf->flags);
1941 
1942 	kthread_cancel_delayed_work_sync(&pf->ptp.work);
1943 
1944 	if (pf->ptp.kworker) {
1945 		kthread_destroy_worker(pf->ptp.kworker);
1946 		pf->ptp.kworker = NULL;
1947 	}
1948 
1949 	if (!pf->ptp.clock)
1950 		return;
1951 
1952 	/* Disable periodic outputs */
1953 	ice_ptp_disable_all_clkout(pf);
1954 
1955 	ice_clear_ptp_clock_index(pf);
1956 	ptp_clock_unregister(pf->ptp.clock);
1957 	pf->ptp.clock = NULL;
1958 
1959 	dev_info(ice_pf_to_dev(pf), "Removed PTP clock\n");
1960 }
1961