1 // SPDX-License-Identifier: GPL-2.0
2 /* NXP C45 PHY driver
3  * Copyright (C) 2021 NXP
4  * Author: Radu Pirea <radu-nicolae.pirea@oss.nxp.com>
5  */
6 
7 #include <linux/delay.h>
8 #include <linux/ethtool.h>
9 #include <linux/ethtool_netlink.h>
10 #include <linux/kernel.h>
11 #include <linux/mii.h>
12 #include <linux/module.h>
13 #include <linux/phy.h>
14 #include <linux/processor.h>
15 #include <linux/property.h>
16 #include <linux/ptp_classify.h>
17 #include <linux/ptp_clock_kernel.h>
18 #include <linux/net_tstamp.h>
19 
20 #define PHY_ID_TJA_1103			0x001BB010
21 
22 #define VEND1_DEVICE_CONTROL		0x0040
23 #define DEVICE_CONTROL_RESET		BIT(15)
24 #define DEVICE_CONTROL_CONFIG_GLOBAL_EN	BIT(14)
25 #define DEVICE_CONTROL_CONFIG_ALL_EN	BIT(13)
26 
27 #define VEND1_PHY_IRQ_ACK		0x80A0
28 #define VEND1_PHY_IRQ_EN		0x80A1
29 #define VEND1_PHY_IRQ_STATUS		0x80A2
30 #define PHY_IRQ_LINK_EVENT		BIT(1)
31 
32 #define VEND1_PHY_CONTROL		0x8100
33 #define PHY_CONFIG_EN			BIT(14)
34 #define PHY_START_OP			BIT(0)
35 
36 #define VEND1_PHY_CONFIG		0x8108
37 #define PHY_CONFIG_AUTO			BIT(0)
38 
39 #define VEND1_SIGNAL_QUALITY		0x8320
40 #define SQI_VALID			BIT(14)
41 #define SQI_MASK			GENMASK(2, 0)
42 #define MAX_SQI				SQI_MASK
43 
44 #define CABLE_TEST_ENABLE		BIT(15)
45 #define CABLE_TEST_START		BIT(14)
46 #define CABLE_TEST_OK			0x00
47 #define CABLE_TEST_SHORTED		0x01
48 #define CABLE_TEST_OPEN			0x02
49 #define CABLE_TEST_UNKNOWN		0x07
50 
51 #define VEND1_PORT_CONTROL		0x8040
52 #define PORT_CONTROL_EN			BIT(14)
53 
54 #define VEND1_PORT_ABILITIES		0x8046
55 #define PTP_ABILITY			BIT(3)
56 
57 #define VEND1_PORT_INFRA_CONTROL	0xAC00
58 #define PORT_INFRA_CONTROL_EN		BIT(14)
59 
60 #define VEND1_RXID			0xAFCC
61 #define VEND1_TXID			0xAFCD
62 #define ID_ENABLE			BIT(15)
63 
64 #define VEND1_ABILITIES			0xAFC4
65 #define RGMII_ID_ABILITY		BIT(15)
66 #define RGMII_ABILITY			BIT(14)
67 #define RMII_ABILITY			BIT(10)
68 #define REVMII_ABILITY			BIT(9)
69 #define MII_ABILITY			BIT(8)
70 #define SGMII_ABILITY			BIT(0)
71 
72 #define VEND1_MII_BASIC_CONFIG		0xAFC6
73 #define MII_BASIC_CONFIG_REV		BIT(4)
74 #define MII_BASIC_CONFIG_SGMII		0x9
75 #define MII_BASIC_CONFIG_RGMII		0x7
76 #define MII_BASIC_CONFIG_RMII		0x5
77 #define MII_BASIC_CONFIG_MII		0x4
78 
79 #define VEND1_SYMBOL_ERROR_COUNTER	0x8350
80 #define VEND1_LINK_DROP_COUNTER		0x8352
81 #define VEND1_LINK_LOSSES_AND_FAILURES	0x8353
82 #define VEND1_RX_PREAMBLE_COUNT		0xAFCE
83 #define VEND1_TX_PREAMBLE_COUNT		0xAFCF
84 #define VEND1_RX_IPG_LENGTH		0xAFD0
85 #define VEND1_TX_IPG_LENGTH		0xAFD1
86 #define COUNTER_EN			BIT(15)
87 
88 #define VEND1_PTP_CONFIG		0x1102
89 #define EXT_TRG_EDGE			BIT(1)
90 
91 #define CLK_RATE_ADJ_LD			BIT(15)
92 #define CLK_RATE_ADJ_DIR		BIT(14)
93 
94 #define VEND1_RX_TS_INSRT_CTRL		0x114D
95 #define TJA1103_RX_TS_INSRT_MODE2	0x02
96 
97 #define VEND1_EGR_RING_DATA_0		0x114E
98 #define VEND1_EGR_RING_CTRL		0x1154
99 
100 #define RING_DATA_0_TS_VALID		BIT(15)
101 
102 #define RING_DONE			BIT(0)
103 
104 #define TS_SEC_MASK			GENMASK(1, 0)
105 
106 #define VEND1_PORT_FUNC_ENABLES		0x8048
107 #define PTP_ENABLE			BIT(3)
108 
109 #define VEND1_PORT_PTP_CONTROL		0x9000
110 #define PORT_PTP_CONTROL_BYPASS		BIT(11)
111 
112 #define PTP_CLK_PERIOD_100BT1		15ULL
113 
114 #define EVENT_MSG_FILT_ALL		0x0F
115 #define EVENT_MSG_FILT_NONE		0x00
116 
117 #define VEND1_GPIO_FUNC_CONFIG_BASE	0x2C40
118 #define GPIO_FUNC_EN			BIT(15)
119 #define GPIO_FUNC_PTP			BIT(6)
120 #define GPIO_SIGNAL_PTP_TRIGGER		0x01
121 #define GPIO_SIGNAL_PPS_OUT		0x12
122 #define GPIO_DISABLE			0
123 #define GPIO_PPS_OUT_CFG		(GPIO_FUNC_EN | GPIO_FUNC_PTP | \
124 	GPIO_SIGNAL_PPS_OUT)
125 #define GPIO_EXTTS_OUT_CFG		(GPIO_FUNC_EN | GPIO_FUNC_PTP | \
126 	GPIO_SIGNAL_PTP_TRIGGER)
127 
128 #define RGMII_PERIOD_PS			8000U
129 #define PS_PER_DEGREE			div_u64(RGMII_PERIOD_PS, 360)
130 #define MIN_ID_PS			1644U
131 #define MAX_ID_PS			2260U
132 #define DEFAULT_ID_PS			2000U
133 
134 #define PPM_TO_SUBNS_INC(ppb, ptp_clk_period) div_u64(GENMASK_ULL(31, 0) * \
135 	(ppb) * (ptp_clk_period), NSEC_PER_SEC)
136 
137 #define NXP_C45_SKB_CB(skb)	((struct nxp_c45_skb_cb *)(skb)->cb)
138 
139 struct nxp_c45_skb_cb {
140 	struct ptp_header *header;
141 	unsigned int type;
142 };
143 
144 #define NXP_C45_REG_FIELD(_reg, _devad, _offset, _size)	\
145 	((struct nxp_c45_reg_field) {			\
146 		.reg = _reg,				\
147 		.devad =  _devad,			\
148 		.offset = _offset,			\
149 		.size = _size,				\
150 	})
151 
152 struct nxp_c45_reg_field {
153 	u16 reg;
154 	u8 devad;
155 	u8 offset;
156 	u8 size;
157 };
158 
159 struct nxp_c45_hwts {
160 	u32	nsec;
161 	u32	sec;
162 	u8	domain_number;
163 	u16	sequence_id;
164 	u8	msg_type;
165 };
166 
167 struct nxp_c45_regmap {
168 	/* PTP config regs. */
169 	u16 vend1_ptp_clk_period;
170 	u16 vend1_event_msg_filt;
171 
172 	/* LTC bits and regs. */
173 	struct nxp_c45_reg_field ltc_read;
174 	struct nxp_c45_reg_field ltc_write;
175 	struct nxp_c45_reg_field ltc_lock_ctrl;
176 	u16 vend1_ltc_wr_nsec_0;
177 	u16 vend1_ltc_wr_nsec_1;
178 	u16 vend1_ltc_wr_sec_0;
179 	u16 vend1_ltc_wr_sec_1;
180 	u16 vend1_ltc_rd_nsec_0;
181 	u16 vend1_ltc_rd_nsec_1;
182 	u16 vend1_ltc_rd_sec_0;
183 	u16 vend1_ltc_rd_sec_1;
184 	u16 vend1_rate_adj_subns_0;
185 	u16 vend1_rate_adj_subns_1;
186 
187 	/* External trigger reg fields. */
188 	struct nxp_c45_reg_field irq_egr_ts_en;
189 	struct nxp_c45_reg_field irq_egr_ts_status;
190 	struct nxp_c45_reg_field domain_number;
191 	struct nxp_c45_reg_field msg_type;
192 	struct nxp_c45_reg_field sequence_id;
193 	struct nxp_c45_reg_field sec_1_0;
194 	struct nxp_c45_reg_field sec_4_2;
195 	struct nxp_c45_reg_field nsec_15_0;
196 	struct nxp_c45_reg_field nsec_29_16;
197 
198 	/* PPS and EXT Trigger bits and regs. */
199 	struct nxp_c45_reg_field pps_enable;
200 	struct nxp_c45_reg_field pps_polarity;
201 	u16 vend1_ext_trg_data_0;
202 	u16 vend1_ext_trg_data_1;
203 	u16 vend1_ext_trg_data_2;
204 	u16 vend1_ext_trg_data_3;
205 	u16 vend1_ext_trg_ctrl;
206 
207 	/* Cable test reg fields. */
208 	u16 cable_test;
209 	struct nxp_c45_reg_field cable_test_valid;
210 	struct nxp_c45_reg_field cable_test_result;
211 };
212 
213 struct nxp_c45_phy_stats {
214 	const char	*name;
215 	const struct nxp_c45_reg_field counter;
216 };
217 
218 struct nxp_c45_phy_data {
219 	const struct nxp_c45_regmap *regmap;
220 	const struct nxp_c45_phy_stats *stats;
221 	int n_stats;
222 	u8 ptp_clk_period;
223 	void (*counters_enable)(struct phy_device *phydev);
224 	void (*ptp_init)(struct phy_device *phydev);
225 	void (*ptp_enable)(struct phy_device *phydev, bool enable);
226 };
227 
228 struct nxp_c45_phy {
229 	const struct nxp_c45_phy_data *phy_data;
230 	struct phy_device *phydev;
231 	struct mii_timestamper mii_ts;
232 	struct ptp_clock *ptp_clock;
233 	struct ptp_clock_info caps;
234 	struct sk_buff_head tx_queue;
235 	struct sk_buff_head rx_queue;
236 	/* used to access the PTP registers atomic */
237 	struct mutex ptp_lock;
238 	int hwts_tx;
239 	int hwts_rx;
240 	u32 tx_delay;
241 	u32 rx_delay;
242 	struct timespec64 extts_ts;
243 	int extts_index;
244 	bool extts;
245 };
246 
247 static const
248 struct nxp_c45_phy_data *nxp_c45_get_data(struct phy_device *phydev)
249 {
250 	return phydev->drv->driver_data;
251 }
252 
253 static const
254 struct nxp_c45_regmap *nxp_c45_get_regmap(struct phy_device *phydev)
255 {
256 	const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev);
257 
258 	return phy_data->regmap;
259 }
260 
261 static int nxp_c45_read_reg_field(struct phy_device *phydev,
262 				  const struct nxp_c45_reg_field *reg_field)
263 {
264 	u16 mask;
265 	int ret;
266 
267 	if (reg_field->size == 0) {
268 		phydev_err(phydev, "Trying to read a reg field of size 0.\n");
269 		return -EINVAL;
270 	}
271 
272 	ret = phy_read_mmd(phydev, reg_field->devad, reg_field->reg);
273 	if (ret < 0)
274 		return ret;
275 
276 	mask = reg_field->size == 1 ? BIT(reg_field->offset) :
277 		GENMASK(reg_field->offset + reg_field->size - 1,
278 			reg_field->offset);
279 	ret &= mask;
280 	ret >>= reg_field->offset;
281 
282 	return ret;
283 }
284 
285 static int nxp_c45_write_reg_field(struct phy_device *phydev,
286 				   const struct nxp_c45_reg_field *reg_field,
287 				   u16 val)
288 {
289 	u16 mask;
290 	u16 set;
291 
292 	if (reg_field->size == 0) {
293 		phydev_err(phydev, "Trying to write a reg field of size 0.\n");
294 		return -EINVAL;
295 	}
296 
297 	mask = reg_field->size == 1 ? BIT(reg_field->offset) :
298 		GENMASK(reg_field->offset + reg_field->size - 1,
299 			reg_field->offset);
300 	set = val << reg_field->offset;
301 
302 	return phy_modify_mmd_changed(phydev, reg_field->devad,
303 				      reg_field->reg, mask, set);
304 }
305 
306 static int nxp_c45_set_reg_field(struct phy_device *phydev,
307 				 const struct nxp_c45_reg_field *reg_field)
308 {
309 	if (reg_field->size != 1) {
310 		phydev_err(phydev, "Trying to set a reg field of size different than 1.\n");
311 		return -EINVAL;
312 	}
313 
314 	return nxp_c45_write_reg_field(phydev, reg_field, 1);
315 }
316 
317 static int nxp_c45_clear_reg_field(struct phy_device *phydev,
318 				   const struct nxp_c45_reg_field *reg_field)
319 {
320 	if (reg_field->size != 1) {
321 		phydev_err(phydev, "Trying to set a reg field of size different than 1.\n");
322 		return -EINVAL;
323 	}
324 
325 	return nxp_c45_write_reg_field(phydev, reg_field, 0);
326 }
327 
328 static bool nxp_c45_poll_txts(struct phy_device *phydev)
329 {
330 	return phydev->irq <= 0;
331 }
332 
333 static int _nxp_c45_ptp_gettimex64(struct ptp_clock_info *ptp,
334 				   struct timespec64 *ts,
335 				   struct ptp_system_timestamp *sts)
336 {
337 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
338 	const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
339 
340 	nxp_c45_set_reg_field(priv->phydev, &regmap->ltc_read);
341 	ts->tv_nsec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
342 				   regmap->vend1_ltc_rd_nsec_0);
343 	ts->tv_nsec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
344 				    regmap->vend1_ltc_rd_nsec_1) << 16;
345 	ts->tv_sec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
346 				  regmap->vend1_ltc_rd_sec_0);
347 	ts->tv_sec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
348 				   regmap->vend1_ltc_rd_sec_1) << 16;
349 
350 	return 0;
351 }
352 
353 static int nxp_c45_ptp_gettimex64(struct ptp_clock_info *ptp,
354 				  struct timespec64 *ts,
355 				  struct ptp_system_timestamp *sts)
356 {
357 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
358 
359 	mutex_lock(&priv->ptp_lock);
360 	_nxp_c45_ptp_gettimex64(ptp, ts, sts);
361 	mutex_unlock(&priv->ptp_lock);
362 
363 	return 0;
364 }
365 
366 static int _nxp_c45_ptp_settime64(struct ptp_clock_info *ptp,
367 				  const struct timespec64 *ts)
368 {
369 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
370 	const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
371 
372 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_nsec_0,
373 		      ts->tv_nsec);
374 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_nsec_1,
375 		      ts->tv_nsec >> 16);
376 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_sec_0,
377 		      ts->tv_sec);
378 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_sec_1,
379 		      ts->tv_sec >> 16);
380 	nxp_c45_set_reg_field(priv->phydev, &regmap->ltc_write);
381 
382 	return 0;
383 }
384 
385 static int nxp_c45_ptp_settime64(struct ptp_clock_info *ptp,
386 				 const struct timespec64 *ts)
387 {
388 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
389 
390 	mutex_lock(&priv->ptp_lock);
391 	_nxp_c45_ptp_settime64(ptp, ts);
392 	mutex_unlock(&priv->ptp_lock);
393 
394 	return 0;
395 }
396 
397 static int nxp_c45_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
398 {
399 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
400 	const struct nxp_c45_phy_data *data = nxp_c45_get_data(priv->phydev);
401 	const struct nxp_c45_regmap *regmap = data->regmap;
402 	s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
403 	u64 subns_inc_val;
404 	bool inc;
405 
406 	mutex_lock(&priv->ptp_lock);
407 	inc = ppb >= 0;
408 	ppb = abs(ppb);
409 
410 	subns_inc_val = PPM_TO_SUBNS_INC(ppb, data->ptp_clk_period);
411 
412 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1,
413 		      regmap->vend1_rate_adj_subns_0,
414 		      subns_inc_val);
415 	subns_inc_val >>= 16;
416 	subns_inc_val |= CLK_RATE_ADJ_LD;
417 	if (inc)
418 		subns_inc_val |= CLK_RATE_ADJ_DIR;
419 
420 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1,
421 		      regmap->vend1_rate_adj_subns_1,
422 		      subns_inc_val);
423 	mutex_unlock(&priv->ptp_lock);
424 
425 	return 0;
426 }
427 
428 static int nxp_c45_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
429 {
430 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
431 	struct timespec64 now, then;
432 
433 	mutex_lock(&priv->ptp_lock);
434 	then = ns_to_timespec64(delta);
435 	_nxp_c45_ptp_gettimex64(ptp, &now, NULL);
436 	now = timespec64_add(now, then);
437 	_nxp_c45_ptp_settime64(ptp, &now);
438 	mutex_unlock(&priv->ptp_lock);
439 
440 	return 0;
441 }
442 
443 static void nxp_c45_reconstruct_ts(struct timespec64 *ts,
444 				   struct nxp_c45_hwts *hwts)
445 {
446 	ts->tv_nsec = hwts->nsec;
447 	if ((ts->tv_sec & TS_SEC_MASK) < (hwts->sec & TS_SEC_MASK))
448 		ts->tv_sec -= TS_SEC_MASK + 1;
449 	ts->tv_sec &= ~TS_SEC_MASK;
450 	ts->tv_sec |= hwts->sec & TS_SEC_MASK;
451 }
452 
453 static bool nxp_c45_match_ts(struct ptp_header *header,
454 			     struct nxp_c45_hwts *hwts,
455 			     unsigned int type)
456 {
457 	return ntohs(header->sequence_id) == hwts->sequence_id &&
458 	       ptp_get_msgtype(header, type) == hwts->msg_type &&
459 	       header->domain_number  == hwts->domain_number;
460 }
461 
462 static void nxp_c45_get_extts(struct nxp_c45_phy *priv,
463 			      struct timespec64 *extts)
464 {
465 	const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
466 
467 	extts->tv_nsec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
468 				      regmap->vend1_ext_trg_data_0);
469 	extts->tv_nsec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
470 				       regmap->vend1_ext_trg_data_1) << 16;
471 	extts->tv_sec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
472 				     regmap->vend1_ext_trg_data_2);
473 	extts->tv_sec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
474 				      regmap->vend1_ext_trg_data_3) << 16;
475 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1,
476 		      regmap->vend1_ext_trg_ctrl, RING_DONE);
477 }
478 
479 static bool nxp_c45_get_hwtxts(struct nxp_c45_phy *priv,
480 			       struct nxp_c45_hwts *hwts)
481 {
482 	const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
483 	struct phy_device *phydev = priv->phydev;
484 	bool valid;
485 	u16 reg;
486 
487 	mutex_lock(&priv->ptp_lock);
488 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EGR_RING_CTRL,
489 		      RING_DONE);
490 	reg = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EGR_RING_DATA_0);
491 	valid = !!(reg & RING_DATA_0_TS_VALID);
492 	if (!valid)
493 		goto nxp_c45_get_hwtxts_out;
494 
495 	hwts->domain_number =
496 		nxp_c45_read_reg_field(phydev, &regmap->domain_number);
497 	hwts->msg_type =
498 		nxp_c45_read_reg_field(phydev, &regmap->msg_type);
499 	hwts->sequence_id =
500 		nxp_c45_read_reg_field(phydev, &regmap->sequence_id);
501 	hwts->nsec =
502 		nxp_c45_read_reg_field(phydev, &regmap->nsec_15_0);
503 	hwts->nsec |=
504 		nxp_c45_read_reg_field(phydev, &regmap->nsec_29_16) << 16;
505 	hwts->sec = nxp_c45_read_reg_field(phydev, &regmap->sec_1_0);
506 	hwts->sec |= nxp_c45_read_reg_field(phydev, &regmap->sec_4_2) << 2;
507 
508 nxp_c45_get_hwtxts_out:
509 	mutex_unlock(&priv->ptp_lock);
510 	return valid;
511 }
512 
513 static void nxp_c45_process_txts(struct nxp_c45_phy *priv,
514 				 struct nxp_c45_hwts *txts)
515 {
516 	struct sk_buff *skb, *tmp, *skb_match = NULL;
517 	struct skb_shared_hwtstamps shhwtstamps;
518 	struct timespec64 ts;
519 	unsigned long flags;
520 	bool ts_match;
521 	s64 ts_ns;
522 
523 	spin_lock_irqsave(&priv->tx_queue.lock, flags);
524 	skb_queue_walk_safe(&priv->tx_queue, skb, tmp) {
525 		ts_match = nxp_c45_match_ts(NXP_C45_SKB_CB(skb)->header, txts,
526 					    NXP_C45_SKB_CB(skb)->type);
527 		if (!ts_match)
528 			continue;
529 		skb_match = skb;
530 		__skb_unlink(skb, &priv->tx_queue);
531 		break;
532 	}
533 	spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
534 
535 	if (skb_match) {
536 		nxp_c45_ptp_gettimex64(&priv->caps, &ts, NULL);
537 		nxp_c45_reconstruct_ts(&ts, txts);
538 		memset(&shhwtstamps, 0, sizeof(shhwtstamps));
539 		ts_ns = timespec64_to_ns(&ts);
540 		shhwtstamps.hwtstamp = ns_to_ktime(ts_ns);
541 		skb_complete_tx_timestamp(skb_match, &shhwtstamps);
542 	} else {
543 		phydev_warn(priv->phydev,
544 			    "the tx timestamp doesn't match with any skb\n");
545 	}
546 }
547 
548 static long nxp_c45_do_aux_work(struct ptp_clock_info *ptp)
549 {
550 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
551 	bool poll_txts = nxp_c45_poll_txts(priv->phydev);
552 	struct skb_shared_hwtstamps *shhwtstamps_rx;
553 	struct ptp_clock_event event;
554 	struct nxp_c45_hwts hwts;
555 	bool reschedule = false;
556 	struct timespec64 ts;
557 	struct sk_buff *skb;
558 	bool txts_valid;
559 	u32 ts_raw;
560 
561 	while (!skb_queue_empty_lockless(&priv->tx_queue) && poll_txts) {
562 		txts_valid = nxp_c45_get_hwtxts(priv, &hwts);
563 		if (unlikely(!txts_valid)) {
564 			/* Still more skbs in the queue */
565 			reschedule = true;
566 			break;
567 		}
568 
569 		nxp_c45_process_txts(priv, &hwts);
570 	}
571 
572 	while ((skb = skb_dequeue(&priv->rx_queue)) != NULL) {
573 		nxp_c45_ptp_gettimex64(&priv->caps, &ts, NULL);
574 		ts_raw = __be32_to_cpu(NXP_C45_SKB_CB(skb)->header->reserved2);
575 		hwts.sec = ts_raw >> 30;
576 		hwts.nsec = ts_raw & GENMASK(29, 0);
577 		nxp_c45_reconstruct_ts(&ts, &hwts);
578 		shhwtstamps_rx = skb_hwtstamps(skb);
579 		shhwtstamps_rx->hwtstamp = ns_to_ktime(timespec64_to_ns(&ts));
580 		NXP_C45_SKB_CB(skb)->header->reserved2 = 0;
581 		netif_rx(skb);
582 	}
583 
584 	if (priv->extts) {
585 		nxp_c45_get_extts(priv, &ts);
586 		if (timespec64_compare(&ts, &priv->extts_ts) != 0) {
587 			priv->extts_ts = ts;
588 			event.index = priv->extts_index;
589 			event.type = PTP_CLOCK_EXTTS;
590 			event.timestamp = ns_to_ktime(timespec64_to_ns(&ts));
591 			ptp_clock_event(priv->ptp_clock, &event);
592 		}
593 		reschedule = true;
594 	}
595 
596 	return reschedule ? 1 : -1;
597 }
598 
599 static void nxp_c45_gpio_config(struct nxp_c45_phy *priv,
600 				int pin, u16 pin_cfg)
601 {
602 	struct phy_device *phydev = priv->phydev;
603 
604 	phy_write_mmd(phydev, MDIO_MMD_VEND1,
605 		      VEND1_GPIO_FUNC_CONFIG_BASE + pin, pin_cfg);
606 }
607 
608 static int nxp_c45_perout_enable(struct nxp_c45_phy *priv,
609 				 struct ptp_perout_request *perout, int on)
610 {
611 	const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
612 	struct phy_device *phydev = priv->phydev;
613 	int pin;
614 
615 	if (perout->flags & ~PTP_PEROUT_PHASE)
616 		return -EOPNOTSUPP;
617 
618 	pin = ptp_find_pin(priv->ptp_clock, PTP_PF_PEROUT, perout->index);
619 	if (pin < 0)
620 		return pin;
621 
622 	if (!on) {
623 		nxp_c45_clear_reg_field(priv->phydev,
624 					&regmap->pps_enable);
625 		nxp_c45_clear_reg_field(priv->phydev,
626 					&regmap->pps_polarity);
627 
628 		nxp_c45_gpio_config(priv, pin, GPIO_DISABLE);
629 
630 		return 0;
631 	}
632 
633 	/* The PPS signal is fixed to 1 second and is always generated when the
634 	 * seconds counter is incremented. The start time is not configurable.
635 	 * If the clock is adjusted, the PPS signal is automatically readjusted.
636 	 */
637 	if (perout->period.sec != 1 || perout->period.nsec != 0) {
638 		phydev_warn(phydev, "The period can be set only to 1 second.");
639 		return -EINVAL;
640 	}
641 
642 	if (!(perout->flags & PTP_PEROUT_PHASE)) {
643 		if (perout->start.sec != 0 || perout->start.nsec != 0) {
644 			phydev_warn(phydev, "The start time is not configurable. Should be set to 0 seconds and 0 nanoseconds.");
645 			return -EINVAL;
646 		}
647 	} else {
648 		if (perout->phase.nsec != 0 &&
649 		    perout->phase.nsec != (NSEC_PER_SEC >> 1)) {
650 			phydev_warn(phydev, "The phase can be set only to 0 or 500000000 nanoseconds.");
651 			return -EINVAL;
652 		}
653 
654 		if (perout->phase.nsec == 0)
655 			nxp_c45_clear_reg_field(priv->phydev,
656 						&regmap->pps_polarity);
657 		else
658 			nxp_c45_set_reg_field(priv->phydev,
659 					      &regmap->pps_polarity);
660 	}
661 
662 	nxp_c45_gpio_config(priv, pin, GPIO_PPS_OUT_CFG);
663 
664 	nxp_c45_set_reg_field(priv->phydev, &regmap->pps_enable);
665 
666 	return 0;
667 }
668 
669 static int nxp_c45_extts_enable(struct nxp_c45_phy *priv,
670 				struct ptp_extts_request *extts, int on)
671 {
672 	int pin;
673 
674 	if (extts->flags & ~(PTP_ENABLE_FEATURE |
675 			      PTP_RISING_EDGE |
676 			      PTP_FALLING_EDGE |
677 			      PTP_STRICT_FLAGS))
678 		return -EOPNOTSUPP;
679 
680 	/* Sampling on both edges is not supported */
681 	if ((extts->flags & PTP_RISING_EDGE) &&
682 	    (extts->flags & PTP_FALLING_EDGE))
683 		return -EOPNOTSUPP;
684 
685 	pin = ptp_find_pin(priv->ptp_clock, PTP_PF_EXTTS, extts->index);
686 	if (pin < 0)
687 		return pin;
688 
689 	if (!on) {
690 		nxp_c45_gpio_config(priv, pin, GPIO_DISABLE);
691 		priv->extts = false;
692 
693 		return 0;
694 	}
695 
696 	if (extts->flags & PTP_RISING_EDGE)
697 		phy_clear_bits_mmd(priv->phydev, MDIO_MMD_VEND1,
698 				   VEND1_PTP_CONFIG, EXT_TRG_EDGE);
699 
700 	if (extts->flags & PTP_FALLING_EDGE)
701 		phy_set_bits_mmd(priv->phydev, MDIO_MMD_VEND1,
702 				 VEND1_PTP_CONFIG, EXT_TRG_EDGE);
703 
704 	nxp_c45_gpio_config(priv, pin, GPIO_EXTTS_OUT_CFG);
705 	priv->extts = true;
706 	priv->extts_index = extts->index;
707 	ptp_schedule_worker(priv->ptp_clock, 0);
708 
709 	return 0;
710 }
711 
712 static int nxp_c45_ptp_enable(struct ptp_clock_info *ptp,
713 			      struct ptp_clock_request *req, int on)
714 {
715 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
716 
717 	switch (req->type) {
718 	case PTP_CLK_REQ_EXTTS:
719 		return nxp_c45_extts_enable(priv, &req->extts, on);
720 	case PTP_CLK_REQ_PEROUT:
721 		return nxp_c45_perout_enable(priv, &req->perout, on);
722 	default:
723 		return -EOPNOTSUPP;
724 	}
725 }
726 
727 static struct ptp_pin_desc nxp_c45_ptp_pins[] = {
728 	{ "nxp_c45_gpio0", 0, PTP_PF_NONE},
729 	{ "nxp_c45_gpio1", 1, PTP_PF_NONE},
730 	{ "nxp_c45_gpio2", 2, PTP_PF_NONE},
731 	{ "nxp_c45_gpio3", 3, PTP_PF_NONE},
732 	{ "nxp_c45_gpio4", 4, PTP_PF_NONE},
733 	{ "nxp_c45_gpio5", 5, PTP_PF_NONE},
734 	{ "nxp_c45_gpio6", 6, PTP_PF_NONE},
735 	{ "nxp_c45_gpio7", 7, PTP_PF_NONE},
736 	{ "nxp_c45_gpio8", 8, PTP_PF_NONE},
737 	{ "nxp_c45_gpio9", 9, PTP_PF_NONE},
738 	{ "nxp_c45_gpio10", 10, PTP_PF_NONE},
739 	{ "nxp_c45_gpio11", 11, PTP_PF_NONE},
740 };
741 
742 static int nxp_c45_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
743 				  enum ptp_pin_function func, unsigned int chan)
744 {
745 	if (pin >= ARRAY_SIZE(nxp_c45_ptp_pins))
746 		return -EINVAL;
747 
748 	switch (func) {
749 	case PTP_PF_NONE:
750 	case PTP_PF_PEROUT:
751 	case PTP_PF_EXTTS:
752 		break;
753 	default:
754 		return -EOPNOTSUPP;
755 	}
756 
757 	return 0;
758 }
759 
760 static int nxp_c45_init_ptp_clock(struct nxp_c45_phy *priv)
761 {
762 	priv->caps = (struct ptp_clock_info) {
763 		.owner		= THIS_MODULE,
764 		.name		= "NXP C45 PHC",
765 		.max_adj	= 16666666,
766 		.adjfine	= nxp_c45_ptp_adjfine,
767 		.adjtime	= nxp_c45_ptp_adjtime,
768 		.gettimex64	= nxp_c45_ptp_gettimex64,
769 		.settime64	= nxp_c45_ptp_settime64,
770 		.enable		= nxp_c45_ptp_enable,
771 		.verify		= nxp_c45_ptp_verify_pin,
772 		.do_aux_work	= nxp_c45_do_aux_work,
773 		.pin_config	= nxp_c45_ptp_pins,
774 		.n_pins		= ARRAY_SIZE(nxp_c45_ptp_pins),
775 		.n_ext_ts	= 1,
776 		.n_per_out	= 1,
777 	};
778 
779 	priv->ptp_clock = ptp_clock_register(&priv->caps,
780 					     &priv->phydev->mdio.dev);
781 
782 	if (IS_ERR(priv->ptp_clock))
783 		return PTR_ERR(priv->ptp_clock);
784 
785 	if (!priv->ptp_clock)
786 		return -ENOMEM;
787 
788 	return 0;
789 }
790 
791 static void nxp_c45_txtstamp(struct mii_timestamper *mii_ts,
792 			     struct sk_buff *skb, int type)
793 {
794 	struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
795 						mii_ts);
796 
797 	switch (priv->hwts_tx) {
798 	case HWTSTAMP_TX_ON:
799 		NXP_C45_SKB_CB(skb)->type = type;
800 		NXP_C45_SKB_CB(skb)->header = ptp_parse_header(skb, type);
801 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
802 		skb_queue_tail(&priv->tx_queue, skb);
803 		if (nxp_c45_poll_txts(priv->phydev))
804 			ptp_schedule_worker(priv->ptp_clock, 0);
805 		break;
806 	case HWTSTAMP_TX_OFF:
807 	default:
808 		kfree_skb(skb);
809 		break;
810 	}
811 }
812 
813 static bool nxp_c45_rxtstamp(struct mii_timestamper *mii_ts,
814 			     struct sk_buff *skb, int type)
815 {
816 	struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
817 						mii_ts);
818 	struct ptp_header *header = ptp_parse_header(skb, type);
819 
820 	if (!header)
821 		return false;
822 
823 	if (!priv->hwts_rx)
824 		return false;
825 
826 	NXP_C45_SKB_CB(skb)->header = header;
827 	skb_queue_tail(&priv->rx_queue, skb);
828 	ptp_schedule_worker(priv->ptp_clock, 0);
829 
830 	return true;
831 }
832 
833 static int nxp_c45_hwtstamp(struct mii_timestamper *mii_ts,
834 			    struct ifreq *ifreq)
835 {
836 	struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
837 						mii_ts);
838 	struct phy_device *phydev = priv->phydev;
839 	const struct nxp_c45_phy_data *data;
840 	struct hwtstamp_config cfg;
841 
842 	if (copy_from_user(&cfg, ifreq->ifr_data, sizeof(cfg)))
843 		return -EFAULT;
844 
845 	if (cfg.tx_type < 0 || cfg.tx_type > HWTSTAMP_TX_ON)
846 		return -ERANGE;
847 
848 	data = nxp_c45_get_data(phydev);
849 	priv->hwts_tx = cfg.tx_type;
850 
851 	switch (cfg.rx_filter) {
852 	case HWTSTAMP_FILTER_NONE:
853 		priv->hwts_rx = 0;
854 		break;
855 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
856 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
857 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
858 		priv->hwts_rx = 1;
859 		cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
860 		break;
861 	default:
862 		return -ERANGE;
863 	}
864 
865 	if (priv->hwts_rx || priv->hwts_tx) {
866 		phy_write_mmd(phydev, MDIO_MMD_VEND1,
867 			      data->regmap->vend1_event_msg_filt,
868 			      EVENT_MSG_FILT_ALL);
869 		data->ptp_enable(phydev, true);
870 	} else {
871 		phy_write_mmd(phydev, MDIO_MMD_VEND1,
872 			      data->regmap->vend1_event_msg_filt,
873 			      EVENT_MSG_FILT_NONE);
874 		data->ptp_enable(phydev, false);
875 	}
876 
877 	if (nxp_c45_poll_txts(priv->phydev))
878 		goto nxp_c45_no_ptp_irq;
879 
880 	if (priv->hwts_tx)
881 		nxp_c45_set_reg_field(phydev, &data->regmap->irq_egr_ts_en);
882 	else
883 		nxp_c45_clear_reg_field(phydev, &data->regmap->irq_egr_ts_en);
884 
885 nxp_c45_no_ptp_irq:
886 	return copy_to_user(ifreq->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
887 }
888 
889 static int nxp_c45_ts_info(struct mii_timestamper *mii_ts,
890 			   struct ethtool_ts_info *ts_info)
891 {
892 	struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
893 						mii_ts);
894 
895 	ts_info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
896 			SOF_TIMESTAMPING_RX_HARDWARE |
897 			SOF_TIMESTAMPING_RAW_HARDWARE;
898 	ts_info->phc_index = ptp_clock_index(priv->ptp_clock);
899 	ts_info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
900 	ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
901 			(1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
902 			(1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
903 			(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT);
904 
905 	return 0;
906 }
907 
908 static const struct nxp_c45_phy_stats common_hw_stats[] = {
909 	{ "phy_link_status_drop_cnt",
910 		NXP_C45_REG_FIELD(0x8352, MDIO_MMD_VEND1, 8, 6), },
911 	{ "phy_link_availability_drop_cnt",
912 		NXP_C45_REG_FIELD(0x8352, MDIO_MMD_VEND1, 0, 6), },
913 	{ "phy_link_loss_cnt",
914 		NXP_C45_REG_FIELD(0x8353, MDIO_MMD_VEND1, 10, 6), },
915 	{ "phy_link_failure_cnt",
916 		NXP_C45_REG_FIELD(0x8353, MDIO_MMD_VEND1, 0, 10), },
917 	{ "phy_symbol_error_cnt",
918 		NXP_C45_REG_FIELD(0x8350, MDIO_MMD_VEND1, 0, 16) },
919 };
920 
921 static const struct nxp_c45_phy_stats tja1103_hw_stats[] = {
922 	{ "rx_preamble_count",
923 		NXP_C45_REG_FIELD(0xAFCE, MDIO_MMD_VEND1, 0, 6), },
924 	{ "tx_preamble_count",
925 		NXP_C45_REG_FIELD(0xAFCF, MDIO_MMD_VEND1, 0, 6), },
926 	{ "rx_ipg_length",
927 		NXP_C45_REG_FIELD(0xAFD0, MDIO_MMD_VEND1, 0, 9), },
928 	{ "tx_ipg_length",
929 		NXP_C45_REG_FIELD(0xAFD1, MDIO_MMD_VEND1, 0, 9), },
930 };
931 
932 static int nxp_c45_get_sset_count(struct phy_device *phydev)
933 {
934 	const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev);
935 
936 	return ARRAY_SIZE(common_hw_stats) + (phy_data ? phy_data->n_stats : 0);
937 }
938 
939 static void nxp_c45_get_strings(struct phy_device *phydev, u8 *data)
940 {
941 	const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev);
942 	size_t count = nxp_c45_get_sset_count(phydev);
943 	size_t idx;
944 	size_t i;
945 
946 	for (i = 0; i < count; i++) {
947 		if (i < ARRAY_SIZE(common_hw_stats)) {
948 			strscpy(data + i * ETH_GSTRING_LEN,
949 				common_hw_stats[i].name, ETH_GSTRING_LEN);
950 			continue;
951 		}
952 		idx = i - ARRAY_SIZE(common_hw_stats);
953 		strscpy(data + i * ETH_GSTRING_LEN,
954 			phy_data->stats[idx].name, ETH_GSTRING_LEN);
955 	}
956 }
957 
958 static void nxp_c45_get_stats(struct phy_device *phydev,
959 			      struct ethtool_stats *stats, u64 *data)
960 {
961 	const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev);
962 	size_t count = nxp_c45_get_sset_count(phydev);
963 	const struct nxp_c45_reg_field *reg_field;
964 	size_t idx;
965 	size_t i;
966 	int ret;
967 
968 	for (i = 0; i < count; i++) {
969 		if (i < ARRAY_SIZE(common_hw_stats)) {
970 			reg_field = &common_hw_stats[i].counter;
971 		} else {
972 			idx = i - ARRAY_SIZE(common_hw_stats);
973 			reg_field = &phy_data->stats[idx].counter;
974 		}
975 
976 		ret = nxp_c45_read_reg_field(phydev, reg_field);
977 		if (ret < 0)
978 			data[i] = U64_MAX;
979 		else
980 			data[i] = ret;
981 	}
982 }
983 
984 static int nxp_c45_config_enable(struct phy_device *phydev)
985 {
986 	phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONTROL,
987 		      DEVICE_CONTROL_CONFIG_GLOBAL_EN |
988 		      DEVICE_CONTROL_CONFIG_ALL_EN);
989 	usleep_range(400, 450);
990 
991 	phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_CONTROL,
992 		      PORT_CONTROL_EN);
993 	phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONTROL,
994 		      PHY_CONFIG_EN);
995 	phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_INFRA_CONTROL,
996 		      PORT_INFRA_CONTROL_EN);
997 
998 	return 0;
999 }
1000 
1001 static int nxp_c45_start_op(struct phy_device *phydev)
1002 {
1003 	return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONTROL,
1004 				PHY_START_OP);
1005 }
1006 
1007 static int nxp_c45_config_intr(struct phy_device *phydev)
1008 {
1009 	if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
1010 		return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1011 					VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT);
1012 	else
1013 		return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1014 					  VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT);
1015 }
1016 
1017 static irqreturn_t nxp_c45_handle_interrupt(struct phy_device *phydev)
1018 {
1019 	const struct nxp_c45_phy_data *data = nxp_c45_get_data(phydev);
1020 	struct nxp_c45_phy *priv = phydev->priv;
1021 	irqreturn_t ret = IRQ_NONE;
1022 	struct nxp_c45_hwts hwts;
1023 	int irq;
1024 
1025 	irq = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_IRQ_STATUS);
1026 	if (irq & PHY_IRQ_LINK_EVENT) {
1027 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_IRQ_ACK,
1028 			      PHY_IRQ_LINK_EVENT);
1029 		phy_trigger_machine(phydev);
1030 		ret = IRQ_HANDLED;
1031 	}
1032 
1033 	/* There is no need for ACK.
1034 	 * The irq signal will be asserted until the EGR TS FIFO will be
1035 	 * emptied.
1036 	 */
1037 	irq = nxp_c45_read_reg_field(phydev, &data->regmap->irq_egr_ts_status);
1038 	if (irq) {
1039 		while (nxp_c45_get_hwtxts(priv, &hwts))
1040 			nxp_c45_process_txts(priv, &hwts);
1041 
1042 		ret = IRQ_HANDLED;
1043 	}
1044 
1045 	return ret;
1046 }
1047 
1048 static int nxp_c45_soft_reset(struct phy_device *phydev)
1049 {
1050 	int ret;
1051 
1052 	ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONTROL,
1053 			    DEVICE_CONTROL_RESET);
1054 	if (ret)
1055 		return ret;
1056 
1057 	return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
1058 					 VEND1_DEVICE_CONTROL, ret,
1059 					 !(ret & DEVICE_CONTROL_RESET), 20000,
1060 					 240000, false);
1061 }
1062 
1063 static int nxp_c45_cable_test_start(struct phy_device *phydev)
1064 {
1065 	const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(phydev);
1066 
1067 	return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, regmap->cable_test,
1068 				CABLE_TEST_ENABLE | CABLE_TEST_START);
1069 }
1070 
1071 static int nxp_c45_cable_test_get_status(struct phy_device *phydev,
1072 					 bool *finished)
1073 {
1074 	const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(phydev);
1075 	int ret;
1076 	u8 cable_test_result;
1077 
1078 	ret = nxp_c45_read_reg_field(phydev, &regmap->cable_test_valid);
1079 	if (!ret) {
1080 		*finished = false;
1081 		return 0;
1082 	}
1083 
1084 	*finished = true;
1085 	cable_test_result = nxp_c45_read_reg_field(phydev,
1086 						   &regmap->cable_test_result);
1087 
1088 	switch (cable_test_result) {
1089 	case CABLE_TEST_OK:
1090 		ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
1091 					ETHTOOL_A_CABLE_RESULT_CODE_OK);
1092 		break;
1093 	case CABLE_TEST_SHORTED:
1094 		ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
1095 					ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT);
1096 		break;
1097 	case CABLE_TEST_OPEN:
1098 		ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
1099 					ETHTOOL_A_CABLE_RESULT_CODE_OPEN);
1100 		break;
1101 	default:
1102 		ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
1103 					ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC);
1104 	}
1105 
1106 	phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, regmap->cable_test,
1107 			   CABLE_TEST_ENABLE);
1108 
1109 	return nxp_c45_start_op(phydev);
1110 }
1111 
1112 static int nxp_c45_get_sqi(struct phy_device *phydev)
1113 {
1114 	int reg;
1115 
1116 	reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_SIGNAL_QUALITY);
1117 	if (!(reg & SQI_VALID))
1118 		return -EINVAL;
1119 
1120 	reg &= SQI_MASK;
1121 
1122 	return reg;
1123 }
1124 
1125 static int nxp_c45_get_sqi_max(struct phy_device *phydev)
1126 {
1127 	return MAX_SQI;
1128 }
1129 
1130 static int nxp_c45_check_delay(struct phy_device *phydev, u32 delay)
1131 {
1132 	if (delay < MIN_ID_PS) {
1133 		phydev_err(phydev, "delay value smaller than %u\n", MIN_ID_PS);
1134 		return -EINVAL;
1135 	}
1136 
1137 	if (delay > MAX_ID_PS) {
1138 		phydev_err(phydev, "delay value higher than %u\n", MAX_ID_PS);
1139 		return -EINVAL;
1140 	}
1141 
1142 	return 0;
1143 }
1144 
1145 static void nxp_c45_counters_enable(struct phy_device *phydev)
1146 {
1147 	const struct nxp_c45_phy_data *data = nxp_c45_get_data(phydev);
1148 
1149 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_LINK_DROP_COUNTER,
1150 			 COUNTER_EN);
1151 
1152 	data->counters_enable(phydev);
1153 }
1154 
1155 static void nxp_c45_ptp_init(struct phy_device *phydev)
1156 {
1157 	const struct nxp_c45_phy_data *data = nxp_c45_get_data(phydev);
1158 
1159 	phy_write_mmd(phydev, MDIO_MMD_VEND1,
1160 		      data->regmap->vend1_ptp_clk_period,
1161 		      data->ptp_clk_period);
1162 	nxp_c45_clear_reg_field(phydev, &data->regmap->ltc_lock_ctrl);
1163 
1164 	data->ptp_init(phydev);
1165 }
1166 
1167 static u64 nxp_c45_get_phase_shift(u64 phase_offset_raw)
1168 {
1169 	/* The delay in degree phase is 73.8 + phase_offset_raw * 0.9.
1170 	 * To avoid floating point operations we'll multiply by 10
1171 	 * and get 1 decimal point precision.
1172 	 */
1173 	phase_offset_raw *= 10;
1174 	phase_offset_raw -= 738;
1175 	return div_u64(phase_offset_raw, 9);
1176 }
1177 
1178 static void nxp_c45_disable_delays(struct phy_device *phydev)
1179 {
1180 	phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID, ID_ENABLE);
1181 	phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID, ID_ENABLE);
1182 }
1183 
1184 static void nxp_c45_set_delays(struct phy_device *phydev)
1185 {
1186 	struct nxp_c45_phy *priv = phydev->priv;
1187 	u64 tx_delay = priv->tx_delay;
1188 	u64 rx_delay = priv->rx_delay;
1189 	u64 degree;
1190 
1191 	if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1192 	    phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
1193 		degree = div_u64(tx_delay, PS_PER_DEGREE);
1194 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID,
1195 			      ID_ENABLE | nxp_c45_get_phase_shift(degree));
1196 	} else {
1197 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID,
1198 				   ID_ENABLE);
1199 	}
1200 
1201 	if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1202 	    phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
1203 		degree = div_u64(rx_delay, PS_PER_DEGREE);
1204 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID,
1205 			      ID_ENABLE | nxp_c45_get_phase_shift(degree));
1206 	} else {
1207 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID,
1208 				   ID_ENABLE);
1209 	}
1210 }
1211 
1212 static int nxp_c45_get_delays(struct phy_device *phydev)
1213 {
1214 	struct nxp_c45_phy *priv = phydev->priv;
1215 	int ret;
1216 
1217 	if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1218 	    phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
1219 		ret = device_property_read_u32(&phydev->mdio.dev,
1220 					       "tx-internal-delay-ps",
1221 					       &priv->tx_delay);
1222 		if (ret)
1223 			priv->tx_delay = DEFAULT_ID_PS;
1224 
1225 		ret = nxp_c45_check_delay(phydev, priv->tx_delay);
1226 		if (ret) {
1227 			phydev_err(phydev,
1228 				   "tx-internal-delay-ps invalid value\n");
1229 			return ret;
1230 		}
1231 	}
1232 
1233 	if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1234 	    phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
1235 		ret = device_property_read_u32(&phydev->mdio.dev,
1236 					       "rx-internal-delay-ps",
1237 					       &priv->rx_delay);
1238 		if (ret)
1239 			priv->rx_delay = DEFAULT_ID_PS;
1240 
1241 		ret = nxp_c45_check_delay(phydev, priv->rx_delay);
1242 		if (ret) {
1243 			phydev_err(phydev,
1244 				   "rx-internal-delay-ps invalid value\n");
1245 			return ret;
1246 		}
1247 	}
1248 
1249 	return 0;
1250 }
1251 
1252 static int nxp_c45_set_phy_mode(struct phy_device *phydev)
1253 {
1254 	int ret;
1255 
1256 	ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_ABILITIES);
1257 	phydev_dbg(phydev, "Clause 45 managed PHY abilities 0x%x\n", ret);
1258 
1259 	switch (phydev->interface) {
1260 	case PHY_INTERFACE_MODE_RGMII:
1261 		if (!(ret & RGMII_ABILITY)) {
1262 			phydev_err(phydev, "rgmii mode not supported\n");
1263 			return -EINVAL;
1264 		}
1265 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1266 			      MII_BASIC_CONFIG_RGMII);
1267 		nxp_c45_disable_delays(phydev);
1268 		break;
1269 	case PHY_INTERFACE_MODE_RGMII_ID:
1270 	case PHY_INTERFACE_MODE_RGMII_TXID:
1271 	case PHY_INTERFACE_MODE_RGMII_RXID:
1272 		if (!(ret & RGMII_ID_ABILITY)) {
1273 			phydev_err(phydev, "rgmii-id, rgmii-txid, rgmii-rxid modes are not supported\n");
1274 			return -EINVAL;
1275 		}
1276 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1277 			      MII_BASIC_CONFIG_RGMII);
1278 		ret = nxp_c45_get_delays(phydev);
1279 		if (ret)
1280 			return ret;
1281 
1282 		nxp_c45_set_delays(phydev);
1283 		break;
1284 	case PHY_INTERFACE_MODE_MII:
1285 		if (!(ret & MII_ABILITY)) {
1286 			phydev_err(phydev, "mii mode not supported\n");
1287 			return -EINVAL;
1288 		}
1289 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1290 			      MII_BASIC_CONFIG_MII);
1291 		break;
1292 	case PHY_INTERFACE_MODE_REVMII:
1293 		if (!(ret & REVMII_ABILITY)) {
1294 			phydev_err(phydev, "rev-mii mode not supported\n");
1295 			return -EINVAL;
1296 		}
1297 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1298 			      MII_BASIC_CONFIG_MII | MII_BASIC_CONFIG_REV);
1299 		break;
1300 	case PHY_INTERFACE_MODE_RMII:
1301 		if (!(ret & RMII_ABILITY)) {
1302 			phydev_err(phydev, "rmii mode not supported\n");
1303 			return -EINVAL;
1304 		}
1305 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1306 			      MII_BASIC_CONFIG_RMII);
1307 		break;
1308 	case PHY_INTERFACE_MODE_SGMII:
1309 		if (!(ret & SGMII_ABILITY)) {
1310 			phydev_err(phydev, "sgmii mode not supported\n");
1311 			return -EINVAL;
1312 		}
1313 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1314 			      MII_BASIC_CONFIG_SGMII);
1315 		break;
1316 	case PHY_INTERFACE_MODE_INTERNAL:
1317 		break;
1318 	default:
1319 		return -EINVAL;
1320 	}
1321 
1322 	return 0;
1323 }
1324 
1325 static int nxp_c45_config_init(struct phy_device *phydev)
1326 {
1327 	int ret;
1328 
1329 	ret = nxp_c45_config_enable(phydev);
1330 	if (ret) {
1331 		phydev_err(phydev, "Failed to enable config\n");
1332 		return ret;
1333 	}
1334 
1335 	/* Bug workaround for SJA1110 rev B: enable write access
1336 	 * to MDIO_MMD_PMAPMD
1337 	 */
1338 	phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 1);
1339 	phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 2);
1340 
1341 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONFIG,
1342 			 PHY_CONFIG_AUTO);
1343 
1344 	ret = nxp_c45_set_phy_mode(phydev);
1345 	if (ret)
1346 		return ret;
1347 
1348 	phydev->autoneg = AUTONEG_DISABLE;
1349 
1350 	nxp_c45_counters_enable(phydev);
1351 	nxp_c45_ptp_init(phydev);
1352 
1353 	return nxp_c45_start_op(phydev);
1354 }
1355 
1356 static int nxp_c45_get_features(struct phy_device *phydev)
1357 {
1358 	linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, phydev->supported);
1359 	linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, phydev->supported);
1360 
1361 	return genphy_c45_pma_read_abilities(phydev);
1362 }
1363 
1364 static int nxp_c45_probe(struct phy_device *phydev)
1365 {
1366 	struct nxp_c45_phy *priv;
1367 	int ptp_ability;
1368 	int ret = 0;
1369 
1370 	priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
1371 	if (!priv)
1372 		return -ENOMEM;
1373 
1374 	skb_queue_head_init(&priv->tx_queue);
1375 	skb_queue_head_init(&priv->rx_queue);
1376 
1377 	priv->phydev = phydev;
1378 
1379 	phydev->priv = priv;
1380 
1381 	mutex_init(&priv->ptp_lock);
1382 
1383 	ptp_ability = phy_read_mmd(phydev, MDIO_MMD_VEND1,
1384 				   VEND1_PORT_ABILITIES);
1385 	ptp_ability = !!(ptp_ability & PTP_ABILITY);
1386 	if (!ptp_ability) {
1387 		phydev_dbg(phydev, "the phy does not support PTP");
1388 		goto no_ptp_support;
1389 	}
1390 
1391 	if (IS_ENABLED(CONFIG_PTP_1588_CLOCK) &&
1392 	    IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING)) {
1393 		priv->mii_ts.rxtstamp = nxp_c45_rxtstamp;
1394 		priv->mii_ts.txtstamp = nxp_c45_txtstamp;
1395 		priv->mii_ts.hwtstamp = nxp_c45_hwtstamp;
1396 		priv->mii_ts.ts_info = nxp_c45_ts_info;
1397 		phydev->mii_ts = &priv->mii_ts;
1398 		ret = nxp_c45_init_ptp_clock(priv);
1399 	} else {
1400 		phydev_dbg(phydev, "PTP support not enabled even if the phy supports it");
1401 	}
1402 
1403 no_ptp_support:
1404 
1405 	return ret;
1406 }
1407 
1408 static void nxp_c45_remove(struct phy_device *phydev)
1409 {
1410 	struct nxp_c45_phy *priv = phydev->priv;
1411 
1412 	if (priv->ptp_clock)
1413 		ptp_clock_unregister(priv->ptp_clock);
1414 
1415 	skb_queue_purge(&priv->tx_queue);
1416 	skb_queue_purge(&priv->rx_queue);
1417 }
1418 
1419 static void tja1103_counters_enable(struct phy_device *phydev)
1420 {
1421 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_PREAMBLE_COUNT,
1422 			 COUNTER_EN);
1423 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TX_PREAMBLE_COUNT,
1424 			 COUNTER_EN);
1425 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_IPG_LENGTH,
1426 			 COUNTER_EN);
1427 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TX_IPG_LENGTH,
1428 			 COUNTER_EN);
1429 }
1430 
1431 static void tja1103_ptp_init(struct phy_device *phydev)
1432 {
1433 	phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_TS_INSRT_CTRL,
1434 		      TJA1103_RX_TS_INSRT_MODE2);
1435 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_FUNC_ENABLES,
1436 			 PTP_ENABLE);
1437 }
1438 
1439 static void tja1103_ptp_enable(struct phy_device *phydev, bool enable)
1440 {
1441 	if (enable)
1442 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1443 				   VEND1_PORT_PTP_CONTROL,
1444 				   PORT_PTP_CONTROL_BYPASS);
1445 	else
1446 		phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1447 				 VEND1_PORT_PTP_CONTROL,
1448 				 PORT_PTP_CONTROL_BYPASS);
1449 }
1450 
1451 static const struct nxp_c45_regmap tja1103_regmap = {
1452 	.vend1_ptp_clk_period	= 0x1104,
1453 	.vend1_event_msg_filt	= 0x1148,
1454 	.pps_enable		=
1455 		NXP_C45_REG_FIELD(0x1102, MDIO_MMD_VEND1, 3, 1),
1456 	.pps_polarity		=
1457 		NXP_C45_REG_FIELD(0x1102, MDIO_MMD_VEND1, 2, 1),
1458 	.ltc_lock_ctrl		=
1459 		NXP_C45_REG_FIELD(0x1115, MDIO_MMD_VEND1, 0, 1),
1460 	.ltc_read		=
1461 		NXP_C45_REG_FIELD(0x1105, MDIO_MMD_VEND1, 2, 1),
1462 	.ltc_write		=
1463 		NXP_C45_REG_FIELD(0x1105, MDIO_MMD_VEND1, 0, 1),
1464 	.vend1_ltc_wr_nsec_0	= 0x1106,
1465 	.vend1_ltc_wr_nsec_1	= 0x1107,
1466 	.vend1_ltc_wr_sec_0	= 0x1108,
1467 	.vend1_ltc_wr_sec_1	= 0x1109,
1468 	.vend1_ltc_rd_nsec_0	= 0x110A,
1469 	.vend1_ltc_rd_nsec_1	= 0x110B,
1470 	.vend1_ltc_rd_sec_0	= 0x110C,
1471 	.vend1_ltc_rd_sec_1	= 0x110D,
1472 	.vend1_rate_adj_subns_0	= 0x110F,
1473 	.vend1_rate_adj_subns_1	= 0x1110,
1474 	.irq_egr_ts_en		=
1475 		NXP_C45_REG_FIELD(0x1131, MDIO_MMD_VEND1, 0, 1),
1476 	.irq_egr_ts_status	=
1477 		NXP_C45_REG_FIELD(0x1132, MDIO_MMD_VEND1, 0, 1),
1478 	.domain_number		=
1479 		NXP_C45_REG_FIELD(0x114E, MDIO_MMD_VEND1, 0, 8),
1480 	.msg_type		=
1481 		NXP_C45_REG_FIELD(0x114E, MDIO_MMD_VEND1, 8, 4),
1482 	.sequence_id		=
1483 		NXP_C45_REG_FIELD(0x114F, MDIO_MMD_VEND1, 0, 16),
1484 	.sec_1_0		=
1485 		NXP_C45_REG_FIELD(0x1151, MDIO_MMD_VEND1, 14, 2),
1486 	.sec_4_2		=
1487 		NXP_C45_REG_FIELD(0x114E, MDIO_MMD_VEND1, 12, 3),
1488 	.nsec_15_0		=
1489 		NXP_C45_REG_FIELD(0x1150, MDIO_MMD_VEND1, 0, 16),
1490 	.nsec_29_16		=
1491 		NXP_C45_REG_FIELD(0x1151, MDIO_MMD_VEND1, 0, 14),
1492 	.vend1_ext_trg_data_0	= 0x1121,
1493 	.vend1_ext_trg_data_1	= 0x1122,
1494 	.vend1_ext_trg_data_2	= 0x1123,
1495 	.vend1_ext_trg_data_3	= 0x1124,
1496 	.vend1_ext_trg_ctrl	= 0x1126,
1497 	.cable_test		= 0x8330,
1498 	.cable_test_valid	=
1499 		NXP_C45_REG_FIELD(0x8330, MDIO_MMD_VEND1, 13, 1),
1500 	.cable_test_result	=
1501 		NXP_C45_REG_FIELD(0x8330, MDIO_MMD_VEND1, 0, 3),
1502 };
1503 
1504 static const struct nxp_c45_phy_data tja1103_phy_data = {
1505 	.regmap = &tja1103_regmap,
1506 	.stats = tja1103_hw_stats,
1507 	.n_stats = ARRAY_SIZE(tja1103_hw_stats),
1508 	.ptp_clk_period = PTP_CLK_PERIOD_100BT1,
1509 	.counters_enable = tja1103_counters_enable,
1510 	.ptp_init = tja1103_ptp_init,
1511 	.ptp_enable = tja1103_ptp_enable,
1512 };
1513 
1514 static struct phy_driver nxp_c45_driver[] = {
1515 	{
1516 		PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103),
1517 		.name			= "NXP C45 TJA1103",
1518 		.get_features		= nxp_c45_get_features,
1519 		.driver_data		= &tja1103_phy_data,
1520 		.probe			= nxp_c45_probe,
1521 		.soft_reset		= nxp_c45_soft_reset,
1522 		.config_aneg		= genphy_c45_config_aneg,
1523 		.config_init		= nxp_c45_config_init,
1524 		.config_intr		= nxp_c45_config_intr,
1525 		.handle_interrupt	= nxp_c45_handle_interrupt,
1526 		.read_status		= genphy_c45_read_status,
1527 		.suspend		= genphy_c45_pma_suspend,
1528 		.resume			= genphy_c45_pma_resume,
1529 		.get_sset_count		= nxp_c45_get_sset_count,
1530 		.get_strings		= nxp_c45_get_strings,
1531 		.get_stats		= nxp_c45_get_stats,
1532 		.cable_test_start	= nxp_c45_cable_test_start,
1533 		.cable_test_get_status	= nxp_c45_cable_test_get_status,
1534 		.set_loopback		= genphy_c45_loopback,
1535 		.get_sqi		= nxp_c45_get_sqi,
1536 		.get_sqi_max		= nxp_c45_get_sqi_max,
1537 		.remove			= nxp_c45_remove,
1538 	},
1539 };
1540 
1541 module_phy_driver(nxp_c45_driver);
1542 
1543 static struct mdio_device_id __maybe_unused nxp_c45_tbl[] = {
1544 	{ PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103) },
1545 	{ /*sentinel*/ },
1546 };
1547 
1548 MODULE_DEVICE_TABLE(mdio, nxp_c45_tbl);
1549 
1550 MODULE_AUTHOR("Radu Pirea <radu-nicolae.pirea@oss.nxp.com>");
1551 MODULE_DESCRIPTION("NXP C45 PHY driver");
1552 MODULE_LICENSE("GPL v2");
1553