1 // SPDX-License-Identifier: GPL-2.0
2 /* NXP C45 PHY driver
3  * Copyright (C) 2021 NXP
4  * Author: Radu Pirea <radu-nicolae.pirea@oss.nxp.com>
5  */
6 
7 #include <linux/delay.h>
8 #include <linux/ethtool.h>
9 #include <linux/ethtool_netlink.h>
10 #include <linux/kernel.h>
11 #include <linux/mii.h>
12 #include <linux/module.h>
13 #include <linux/phy.h>
14 #include <linux/processor.h>
15 #include <linux/property.h>
16 #include <linux/ptp_classify.h>
17 #include <linux/ptp_clock_kernel.h>
18 #include <linux/net_tstamp.h>
19 
20 #define PHY_ID_TJA_1103			0x001BB010
21 #define PHY_ID_TJA_1120			0x001BB031
22 
23 #define VEND1_DEVICE_CONTROL		0x0040
24 #define DEVICE_CONTROL_RESET		BIT(15)
25 #define DEVICE_CONTROL_CONFIG_GLOBAL_EN	BIT(14)
26 #define DEVICE_CONTROL_CONFIG_ALL_EN	BIT(13)
27 
28 #define VEND1_DEVICE_CONFIG		0x0048
29 
30 #define TJA1120_VEND1_EXT_TS_MODE	0x1012
31 
32 #define TJA1120_EGRESS_TS_DATA_S	0x9060
33 #define TJA1120_EGRESS_TS_END		0x9067
34 #define TJA1120_TS_VALID		BIT(0)
35 #define TJA1120_MORE_TS			BIT(15)
36 
37 #define VEND1_PHY_IRQ_ACK		0x80A0
38 #define VEND1_PHY_IRQ_EN		0x80A1
39 #define VEND1_PHY_IRQ_STATUS		0x80A2
40 #define PHY_IRQ_LINK_EVENT		BIT(1)
41 
42 #define VEND1_PHY_CONTROL		0x8100
43 #define PHY_CONFIG_EN			BIT(14)
44 #define PHY_START_OP			BIT(0)
45 
46 #define VEND1_PHY_CONFIG		0x8108
47 #define PHY_CONFIG_AUTO			BIT(0)
48 
49 #define VEND1_SIGNAL_QUALITY		0x8320
50 #define SQI_VALID			BIT(14)
51 #define SQI_MASK			GENMASK(2, 0)
52 #define MAX_SQI				SQI_MASK
53 
54 #define CABLE_TEST_ENABLE		BIT(15)
55 #define CABLE_TEST_START		BIT(14)
56 #define CABLE_TEST_OK			0x00
57 #define CABLE_TEST_SHORTED		0x01
58 #define CABLE_TEST_OPEN			0x02
59 #define CABLE_TEST_UNKNOWN		0x07
60 
61 #define VEND1_PORT_CONTROL		0x8040
62 #define PORT_CONTROL_EN			BIT(14)
63 
64 #define VEND1_PORT_ABILITIES		0x8046
65 #define PTP_ABILITY			BIT(3)
66 
67 #define VEND1_PORT_FUNC_IRQ_EN		0x807A
68 #define PTP_IRQS			BIT(3)
69 
70 #define VEND1_PTP_IRQ_ACK		0x9008
71 #define EGR_TS_IRQ			BIT(1)
72 
73 #define VEND1_PORT_INFRA_CONTROL	0xAC00
74 #define PORT_INFRA_CONTROL_EN		BIT(14)
75 
76 #define VEND1_RXID			0xAFCC
77 #define VEND1_TXID			0xAFCD
78 #define ID_ENABLE			BIT(15)
79 
80 #define VEND1_ABILITIES			0xAFC4
81 #define RGMII_ID_ABILITY		BIT(15)
82 #define RGMII_ABILITY			BIT(14)
83 #define RMII_ABILITY			BIT(10)
84 #define REVMII_ABILITY			BIT(9)
85 #define MII_ABILITY			BIT(8)
86 #define SGMII_ABILITY			BIT(0)
87 
88 #define VEND1_MII_BASIC_CONFIG		0xAFC6
89 #define MII_BASIC_CONFIG_REV		BIT(4)
90 #define MII_BASIC_CONFIG_SGMII		0x9
91 #define MII_BASIC_CONFIG_RGMII		0x7
92 #define MII_BASIC_CONFIG_RMII		0x5
93 #define MII_BASIC_CONFIG_MII		0x4
94 
95 #define VEND1_SYMBOL_ERROR_CNT_XTD	0x8351
96 #define EXTENDED_CNT_EN			BIT(15)
97 #define VEND1_MONITOR_STATUS		0xAC80
98 #define MONITOR_RESET			BIT(15)
99 #define VEND1_MONITOR_CONFIG		0xAC86
100 #define LOST_FRAMES_CNT_EN		BIT(9)
101 #define ALL_FRAMES_CNT_EN		BIT(8)
102 
103 #define VEND1_SYMBOL_ERROR_COUNTER	0x8350
104 #define VEND1_LINK_DROP_COUNTER		0x8352
105 #define VEND1_LINK_LOSSES_AND_FAILURES	0x8353
106 #define VEND1_RX_PREAMBLE_COUNT		0xAFCE
107 #define VEND1_TX_PREAMBLE_COUNT		0xAFCF
108 #define VEND1_RX_IPG_LENGTH		0xAFD0
109 #define VEND1_TX_IPG_LENGTH		0xAFD1
110 #define COUNTER_EN			BIT(15)
111 
112 #define VEND1_PTP_CONFIG		0x1102
113 #define EXT_TRG_EDGE			BIT(1)
114 
115 #define TJA1120_SYNC_TRIG_FILTER	0x1010
116 #define PTP_TRIG_RISE_TS		BIT(3)
117 #define PTP_TRIG_FALLING_TS		BIT(2)
118 
119 #define CLK_RATE_ADJ_LD			BIT(15)
120 #define CLK_RATE_ADJ_DIR		BIT(14)
121 
122 #define VEND1_RX_TS_INSRT_CTRL		0x114D
123 #define TJA1103_RX_TS_INSRT_MODE2	0x02
124 
125 #define TJA1120_RX_TS_INSRT_CTRL	0x9012
126 #define TJA1120_RX_TS_INSRT_EN		BIT(15)
127 #define TJA1120_TS_INSRT_MODE		BIT(4)
128 
129 #define VEND1_EGR_RING_DATA_0		0x114E
130 #define VEND1_EGR_RING_CTRL		0x1154
131 
132 #define RING_DATA_0_TS_VALID		BIT(15)
133 
134 #define RING_DONE			BIT(0)
135 
136 #define TS_SEC_MASK			GENMASK(1, 0)
137 
138 #define VEND1_PORT_FUNC_ENABLES		0x8048
139 #define PTP_ENABLE			BIT(3)
140 
141 #define VEND1_PORT_PTP_CONTROL		0x9000
142 #define PORT_PTP_CONTROL_BYPASS		BIT(11)
143 
144 #define PTP_CLK_PERIOD_100BT1		15ULL
145 #define PTP_CLK_PERIOD_1000BT1		8ULL
146 
147 #define EVENT_MSG_FILT_ALL		0x0F
148 #define EVENT_MSG_FILT_NONE		0x00
149 
150 #define VEND1_GPIO_FUNC_CONFIG_BASE	0x2C40
151 #define GPIO_FUNC_EN			BIT(15)
152 #define GPIO_FUNC_PTP			BIT(6)
153 #define GPIO_SIGNAL_PTP_TRIGGER		0x01
154 #define GPIO_SIGNAL_PPS_OUT		0x12
155 #define GPIO_DISABLE			0
156 #define GPIO_PPS_OUT_CFG		(GPIO_FUNC_EN | GPIO_FUNC_PTP | \
157 	GPIO_SIGNAL_PPS_OUT)
158 #define GPIO_EXTTS_OUT_CFG		(GPIO_FUNC_EN | GPIO_FUNC_PTP | \
159 	GPIO_SIGNAL_PTP_TRIGGER)
160 
161 #define RGMII_PERIOD_PS			8000U
162 #define PS_PER_DEGREE			div_u64(RGMII_PERIOD_PS, 360)
163 #define MIN_ID_PS			1644U
164 #define MAX_ID_PS			2260U
165 #define DEFAULT_ID_PS			2000U
166 
167 #define PPM_TO_SUBNS_INC(ppb, ptp_clk_period) div_u64(GENMASK_ULL(31, 0) * \
168 	(ppb) * (ptp_clk_period), NSEC_PER_SEC)
169 
170 #define NXP_C45_SKB_CB(skb)	((struct nxp_c45_skb_cb *)(skb)->cb)
171 
172 struct nxp_c45_phy;
173 
174 struct nxp_c45_skb_cb {
175 	struct ptp_header *header;
176 	unsigned int type;
177 };
178 
179 #define NXP_C45_REG_FIELD(_reg, _devad, _offset, _size)	\
180 	((struct nxp_c45_reg_field) {			\
181 		.reg = _reg,				\
182 		.devad =  _devad,			\
183 		.offset = _offset,			\
184 		.size = _size,				\
185 	})
186 
187 struct nxp_c45_reg_field {
188 	u16 reg;
189 	u8 devad;
190 	u8 offset;
191 	u8 size;
192 };
193 
194 struct nxp_c45_hwts {
195 	u32	nsec;
196 	u32	sec;
197 	u8	domain_number;
198 	u16	sequence_id;
199 	u8	msg_type;
200 };
201 
202 struct nxp_c45_regmap {
203 	/* PTP config regs. */
204 	u16 vend1_ptp_clk_period;
205 	u16 vend1_event_msg_filt;
206 
207 	/* LTC bits and regs. */
208 	struct nxp_c45_reg_field ltc_read;
209 	struct nxp_c45_reg_field ltc_write;
210 	struct nxp_c45_reg_field ltc_lock_ctrl;
211 	u16 vend1_ltc_wr_nsec_0;
212 	u16 vend1_ltc_wr_nsec_1;
213 	u16 vend1_ltc_wr_sec_0;
214 	u16 vend1_ltc_wr_sec_1;
215 	u16 vend1_ltc_rd_nsec_0;
216 	u16 vend1_ltc_rd_nsec_1;
217 	u16 vend1_ltc_rd_sec_0;
218 	u16 vend1_ltc_rd_sec_1;
219 	u16 vend1_rate_adj_subns_0;
220 	u16 vend1_rate_adj_subns_1;
221 
222 	/* External trigger reg fields. */
223 	struct nxp_c45_reg_field irq_egr_ts_en;
224 	struct nxp_c45_reg_field irq_egr_ts_status;
225 	struct nxp_c45_reg_field domain_number;
226 	struct nxp_c45_reg_field msg_type;
227 	struct nxp_c45_reg_field sequence_id;
228 	struct nxp_c45_reg_field sec_1_0;
229 	struct nxp_c45_reg_field sec_4_2;
230 	struct nxp_c45_reg_field nsec_15_0;
231 	struct nxp_c45_reg_field nsec_29_16;
232 
233 	/* PPS and EXT Trigger bits and regs. */
234 	struct nxp_c45_reg_field pps_enable;
235 	struct nxp_c45_reg_field pps_polarity;
236 	u16 vend1_ext_trg_data_0;
237 	u16 vend1_ext_trg_data_1;
238 	u16 vend1_ext_trg_data_2;
239 	u16 vend1_ext_trg_data_3;
240 	u16 vend1_ext_trg_ctrl;
241 
242 	/* Cable test reg fields. */
243 	u16 cable_test;
244 	struct nxp_c45_reg_field cable_test_valid;
245 	struct nxp_c45_reg_field cable_test_result;
246 };
247 
248 struct nxp_c45_phy_stats {
249 	const char	*name;
250 	const struct nxp_c45_reg_field counter;
251 };
252 
253 struct nxp_c45_phy_data {
254 	const struct nxp_c45_regmap *regmap;
255 	const struct nxp_c45_phy_stats *stats;
256 	int n_stats;
257 	u8 ptp_clk_period;
258 	bool ext_ts_both_edges;
259 	bool ack_ptp_irq;
260 	void (*counters_enable)(struct phy_device *phydev);
261 	bool (*get_egressts)(struct nxp_c45_phy *priv,
262 			     struct nxp_c45_hwts *hwts);
263 	void (*ptp_init)(struct phy_device *phydev);
264 	void (*ptp_enable)(struct phy_device *phydev, bool enable);
265 };
266 
267 struct nxp_c45_phy {
268 	const struct nxp_c45_phy_data *phy_data;
269 	struct phy_device *phydev;
270 	struct mii_timestamper mii_ts;
271 	struct ptp_clock *ptp_clock;
272 	struct ptp_clock_info caps;
273 	struct sk_buff_head tx_queue;
274 	struct sk_buff_head rx_queue;
275 	/* used to access the PTP registers atomic */
276 	struct mutex ptp_lock;
277 	int hwts_tx;
278 	int hwts_rx;
279 	u32 tx_delay;
280 	u32 rx_delay;
281 	struct timespec64 extts_ts;
282 	int extts_index;
283 	bool extts;
284 };
285 
286 static const
287 struct nxp_c45_phy_data *nxp_c45_get_data(struct phy_device *phydev)
288 {
289 	return phydev->drv->driver_data;
290 }
291 
292 static const
293 struct nxp_c45_regmap *nxp_c45_get_regmap(struct phy_device *phydev)
294 {
295 	const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev);
296 
297 	return phy_data->regmap;
298 }
299 
300 static int nxp_c45_read_reg_field(struct phy_device *phydev,
301 				  const struct nxp_c45_reg_field *reg_field)
302 {
303 	u16 mask;
304 	int ret;
305 
306 	if (reg_field->size == 0) {
307 		phydev_err(phydev, "Trying to read a reg field of size 0.\n");
308 		return -EINVAL;
309 	}
310 
311 	ret = phy_read_mmd(phydev, reg_field->devad, reg_field->reg);
312 	if (ret < 0)
313 		return ret;
314 
315 	mask = reg_field->size == 1 ? BIT(reg_field->offset) :
316 		GENMASK(reg_field->offset + reg_field->size - 1,
317 			reg_field->offset);
318 	ret &= mask;
319 	ret >>= reg_field->offset;
320 
321 	return ret;
322 }
323 
324 static int nxp_c45_write_reg_field(struct phy_device *phydev,
325 				   const struct nxp_c45_reg_field *reg_field,
326 				   u16 val)
327 {
328 	u16 mask;
329 	u16 set;
330 
331 	if (reg_field->size == 0) {
332 		phydev_err(phydev, "Trying to write a reg field of size 0.\n");
333 		return -EINVAL;
334 	}
335 
336 	mask = reg_field->size == 1 ? BIT(reg_field->offset) :
337 		GENMASK(reg_field->offset + reg_field->size - 1,
338 			reg_field->offset);
339 	set = val << reg_field->offset;
340 
341 	return phy_modify_mmd_changed(phydev, reg_field->devad,
342 				      reg_field->reg, mask, set);
343 }
344 
345 static int nxp_c45_set_reg_field(struct phy_device *phydev,
346 				 const struct nxp_c45_reg_field *reg_field)
347 {
348 	if (reg_field->size != 1) {
349 		phydev_err(phydev, "Trying to set a reg field of size different than 1.\n");
350 		return -EINVAL;
351 	}
352 
353 	return nxp_c45_write_reg_field(phydev, reg_field, 1);
354 }
355 
356 static int nxp_c45_clear_reg_field(struct phy_device *phydev,
357 				   const struct nxp_c45_reg_field *reg_field)
358 {
359 	if (reg_field->size != 1) {
360 		phydev_err(phydev, "Trying to set a reg field of size different than 1.\n");
361 		return -EINVAL;
362 	}
363 
364 	return nxp_c45_write_reg_field(phydev, reg_field, 0);
365 }
366 
367 static bool nxp_c45_poll_txts(struct phy_device *phydev)
368 {
369 	return phydev->irq <= 0;
370 }
371 
372 static int _nxp_c45_ptp_gettimex64(struct ptp_clock_info *ptp,
373 				   struct timespec64 *ts,
374 				   struct ptp_system_timestamp *sts)
375 {
376 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
377 	const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
378 
379 	nxp_c45_set_reg_field(priv->phydev, &regmap->ltc_read);
380 	ts->tv_nsec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
381 				   regmap->vend1_ltc_rd_nsec_0);
382 	ts->tv_nsec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
383 				    regmap->vend1_ltc_rd_nsec_1) << 16;
384 	ts->tv_sec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
385 				  regmap->vend1_ltc_rd_sec_0);
386 	ts->tv_sec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
387 				   regmap->vend1_ltc_rd_sec_1) << 16;
388 
389 	return 0;
390 }
391 
392 static int nxp_c45_ptp_gettimex64(struct ptp_clock_info *ptp,
393 				  struct timespec64 *ts,
394 				  struct ptp_system_timestamp *sts)
395 {
396 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
397 
398 	mutex_lock(&priv->ptp_lock);
399 	_nxp_c45_ptp_gettimex64(ptp, ts, sts);
400 	mutex_unlock(&priv->ptp_lock);
401 
402 	return 0;
403 }
404 
405 static int _nxp_c45_ptp_settime64(struct ptp_clock_info *ptp,
406 				  const struct timespec64 *ts)
407 {
408 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
409 	const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
410 
411 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_nsec_0,
412 		      ts->tv_nsec);
413 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_nsec_1,
414 		      ts->tv_nsec >> 16);
415 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_sec_0,
416 		      ts->tv_sec);
417 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_sec_1,
418 		      ts->tv_sec >> 16);
419 	nxp_c45_set_reg_field(priv->phydev, &regmap->ltc_write);
420 
421 	return 0;
422 }
423 
424 static int nxp_c45_ptp_settime64(struct ptp_clock_info *ptp,
425 				 const struct timespec64 *ts)
426 {
427 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
428 
429 	mutex_lock(&priv->ptp_lock);
430 	_nxp_c45_ptp_settime64(ptp, ts);
431 	mutex_unlock(&priv->ptp_lock);
432 
433 	return 0;
434 }
435 
436 static int nxp_c45_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
437 {
438 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
439 	const struct nxp_c45_phy_data *data = nxp_c45_get_data(priv->phydev);
440 	const struct nxp_c45_regmap *regmap = data->regmap;
441 	s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
442 	u64 subns_inc_val;
443 	bool inc;
444 
445 	mutex_lock(&priv->ptp_lock);
446 	inc = ppb >= 0;
447 	ppb = abs(ppb);
448 
449 	subns_inc_val = PPM_TO_SUBNS_INC(ppb, data->ptp_clk_period);
450 
451 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1,
452 		      regmap->vend1_rate_adj_subns_0,
453 		      subns_inc_val);
454 	subns_inc_val >>= 16;
455 	subns_inc_val |= CLK_RATE_ADJ_LD;
456 	if (inc)
457 		subns_inc_val |= CLK_RATE_ADJ_DIR;
458 
459 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1,
460 		      regmap->vend1_rate_adj_subns_1,
461 		      subns_inc_val);
462 	mutex_unlock(&priv->ptp_lock);
463 
464 	return 0;
465 }
466 
467 static int nxp_c45_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
468 {
469 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
470 	struct timespec64 now, then;
471 
472 	mutex_lock(&priv->ptp_lock);
473 	then = ns_to_timespec64(delta);
474 	_nxp_c45_ptp_gettimex64(ptp, &now, NULL);
475 	now = timespec64_add(now, then);
476 	_nxp_c45_ptp_settime64(ptp, &now);
477 	mutex_unlock(&priv->ptp_lock);
478 
479 	return 0;
480 }
481 
482 static void nxp_c45_reconstruct_ts(struct timespec64 *ts,
483 				   struct nxp_c45_hwts *hwts)
484 {
485 	ts->tv_nsec = hwts->nsec;
486 	if ((ts->tv_sec & TS_SEC_MASK) < (hwts->sec & TS_SEC_MASK))
487 		ts->tv_sec -= TS_SEC_MASK + 1;
488 	ts->tv_sec &= ~TS_SEC_MASK;
489 	ts->tv_sec |= hwts->sec & TS_SEC_MASK;
490 }
491 
492 static bool nxp_c45_match_ts(struct ptp_header *header,
493 			     struct nxp_c45_hwts *hwts,
494 			     unsigned int type)
495 {
496 	return ntohs(header->sequence_id) == hwts->sequence_id &&
497 	       ptp_get_msgtype(header, type) == hwts->msg_type &&
498 	       header->domain_number  == hwts->domain_number;
499 }
500 
501 static void nxp_c45_get_extts(struct nxp_c45_phy *priv,
502 			      struct timespec64 *extts)
503 {
504 	const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
505 
506 	extts->tv_nsec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
507 				      regmap->vend1_ext_trg_data_0);
508 	extts->tv_nsec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
509 				       regmap->vend1_ext_trg_data_1) << 16;
510 	extts->tv_sec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
511 				     regmap->vend1_ext_trg_data_2);
512 	extts->tv_sec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
513 				      regmap->vend1_ext_trg_data_3) << 16;
514 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1,
515 		      regmap->vend1_ext_trg_ctrl, RING_DONE);
516 }
517 
518 static void nxp_c45_read_egress_ts(struct nxp_c45_phy *priv,
519 				   struct nxp_c45_hwts *hwts)
520 {
521 	const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
522 	struct phy_device *phydev = priv->phydev;
523 
524 	hwts->domain_number =
525 		nxp_c45_read_reg_field(phydev, &regmap->domain_number);
526 	hwts->msg_type =
527 		nxp_c45_read_reg_field(phydev, &regmap->msg_type);
528 	hwts->sequence_id =
529 		nxp_c45_read_reg_field(phydev, &regmap->sequence_id);
530 	hwts->nsec =
531 		nxp_c45_read_reg_field(phydev, &regmap->nsec_15_0);
532 	hwts->nsec |=
533 		nxp_c45_read_reg_field(phydev, &regmap->nsec_29_16) << 16;
534 	hwts->sec = nxp_c45_read_reg_field(phydev, &regmap->sec_1_0);
535 	hwts->sec |= nxp_c45_read_reg_field(phydev, &regmap->sec_4_2) << 2;
536 }
537 
538 static bool nxp_c45_get_hwtxts(struct nxp_c45_phy *priv,
539 			       struct nxp_c45_hwts *hwts)
540 {
541 	bool valid;
542 	u16 reg;
543 
544 	mutex_lock(&priv->ptp_lock);
545 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EGR_RING_CTRL,
546 		      RING_DONE);
547 	reg = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EGR_RING_DATA_0);
548 	valid = !!(reg & RING_DATA_0_TS_VALID);
549 	if (!valid)
550 		goto nxp_c45_get_hwtxts_out;
551 
552 	nxp_c45_read_egress_ts(priv, hwts);
553 nxp_c45_get_hwtxts_out:
554 	mutex_unlock(&priv->ptp_lock);
555 	return valid;
556 }
557 
558 static bool tja1120_egress_ts_is_valid(struct phy_device *phydev)
559 {
560 	bool valid;
561 	u16 reg;
562 
563 	reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, TJA1120_EGRESS_TS_DATA_S);
564 	valid = !!(reg & TJA1120_TS_VALID);
565 
566 	return valid;
567 }
568 
569 static bool tja1120_get_hwtxts(struct nxp_c45_phy *priv,
570 			       struct nxp_c45_hwts *hwts)
571 {
572 	struct phy_device *phydev = priv->phydev;
573 	bool more_ts;
574 	bool valid;
575 	u16 reg;
576 
577 	mutex_lock(&priv->ptp_lock);
578 	reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, TJA1120_EGRESS_TS_END);
579 	more_ts = !!(reg & TJA1120_MORE_TS);
580 	valid = tja1120_egress_ts_is_valid(phydev);
581 	if (!valid) {
582 		if (!more_ts)
583 			goto tja1120_get_hwtxts_out;
584 
585 		/* Bug workaround for TJA1120 engineering samples: move the
586 		 * new timestamp from the FIFO to the buffer.
587 		 */
588 		phy_write_mmd(phydev, MDIO_MMD_VEND1,
589 			      TJA1120_EGRESS_TS_END, TJA1120_TS_VALID);
590 		valid = tja1120_egress_ts_is_valid(phydev);
591 		if (!valid)
592 			goto tja1120_get_hwtxts_out;
593 	}
594 	nxp_c45_read_egress_ts(priv, hwts);
595 	phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, TJA1120_EGRESS_TS_DATA_S,
596 			   TJA1120_TS_VALID);
597 tja1120_get_hwtxts_out:
598 	mutex_unlock(&priv->ptp_lock);
599 	return valid;
600 }
601 
602 static void nxp_c45_process_txts(struct nxp_c45_phy *priv,
603 				 struct nxp_c45_hwts *txts)
604 {
605 	struct sk_buff *skb, *tmp, *skb_match = NULL;
606 	struct skb_shared_hwtstamps shhwtstamps;
607 	struct timespec64 ts;
608 	unsigned long flags;
609 	bool ts_match;
610 	s64 ts_ns;
611 
612 	spin_lock_irqsave(&priv->tx_queue.lock, flags);
613 	skb_queue_walk_safe(&priv->tx_queue, skb, tmp) {
614 		ts_match = nxp_c45_match_ts(NXP_C45_SKB_CB(skb)->header, txts,
615 					    NXP_C45_SKB_CB(skb)->type);
616 		if (!ts_match)
617 			continue;
618 		skb_match = skb;
619 		__skb_unlink(skb, &priv->tx_queue);
620 		break;
621 	}
622 	spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
623 
624 	if (skb_match) {
625 		nxp_c45_ptp_gettimex64(&priv->caps, &ts, NULL);
626 		nxp_c45_reconstruct_ts(&ts, txts);
627 		memset(&shhwtstamps, 0, sizeof(shhwtstamps));
628 		ts_ns = timespec64_to_ns(&ts);
629 		shhwtstamps.hwtstamp = ns_to_ktime(ts_ns);
630 		skb_complete_tx_timestamp(skb_match, &shhwtstamps);
631 	} else {
632 		phydev_warn(priv->phydev,
633 			    "the tx timestamp doesn't match with any skb\n");
634 	}
635 }
636 
637 static long nxp_c45_do_aux_work(struct ptp_clock_info *ptp)
638 {
639 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
640 	const struct nxp_c45_phy_data *data = nxp_c45_get_data(priv->phydev);
641 	bool poll_txts = nxp_c45_poll_txts(priv->phydev);
642 	struct skb_shared_hwtstamps *shhwtstamps_rx;
643 	struct ptp_clock_event event;
644 	struct nxp_c45_hwts hwts;
645 	bool reschedule = false;
646 	struct timespec64 ts;
647 	struct sk_buff *skb;
648 	bool txts_valid;
649 	u32 ts_raw;
650 
651 	while (!skb_queue_empty_lockless(&priv->tx_queue) && poll_txts) {
652 		txts_valid = data->get_egressts(priv, &hwts);
653 		if (unlikely(!txts_valid)) {
654 			/* Still more skbs in the queue */
655 			reschedule = true;
656 			break;
657 		}
658 
659 		nxp_c45_process_txts(priv, &hwts);
660 	}
661 
662 	while ((skb = skb_dequeue(&priv->rx_queue)) != NULL) {
663 		nxp_c45_ptp_gettimex64(&priv->caps, &ts, NULL);
664 		ts_raw = __be32_to_cpu(NXP_C45_SKB_CB(skb)->header->reserved2);
665 		hwts.sec = ts_raw >> 30;
666 		hwts.nsec = ts_raw & GENMASK(29, 0);
667 		nxp_c45_reconstruct_ts(&ts, &hwts);
668 		shhwtstamps_rx = skb_hwtstamps(skb);
669 		shhwtstamps_rx->hwtstamp = ns_to_ktime(timespec64_to_ns(&ts));
670 		NXP_C45_SKB_CB(skb)->header->reserved2 = 0;
671 		netif_rx(skb);
672 	}
673 
674 	if (priv->extts) {
675 		nxp_c45_get_extts(priv, &ts);
676 		if (timespec64_compare(&ts, &priv->extts_ts) != 0) {
677 			priv->extts_ts = ts;
678 			event.index = priv->extts_index;
679 			event.type = PTP_CLOCK_EXTTS;
680 			event.timestamp = ns_to_ktime(timespec64_to_ns(&ts));
681 			ptp_clock_event(priv->ptp_clock, &event);
682 		}
683 		reschedule = true;
684 	}
685 
686 	return reschedule ? 1 : -1;
687 }
688 
689 static void nxp_c45_gpio_config(struct nxp_c45_phy *priv,
690 				int pin, u16 pin_cfg)
691 {
692 	struct phy_device *phydev = priv->phydev;
693 
694 	phy_write_mmd(phydev, MDIO_MMD_VEND1,
695 		      VEND1_GPIO_FUNC_CONFIG_BASE + pin, pin_cfg);
696 }
697 
698 static int nxp_c45_perout_enable(struct nxp_c45_phy *priv,
699 				 struct ptp_perout_request *perout, int on)
700 {
701 	const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
702 	struct phy_device *phydev = priv->phydev;
703 	int pin;
704 
705 	if (perout->flags & ~PTP_PEROUT_PHASE)
706 		return -EOPNOTSUPP;
707 
708 	pin = ptp_find_pin(priv->ptp_clock, PTP_PF_PEROUT, perout->index);
709 	if (pin < 0)
710 		return pin;
711 
712 	if (!on) {
713 		nxp_c45_clear_reg_field(priv->phydev,
714 					&regmap->pps_enable);
715 		nxp_c45_clear_reg_field(priv->phydev,
716 					&regmap->pps_polarity);
717 
718 		nxp_c45_gpio_config(priv, pin, GPIO_DISABLE);
719 
720 		return 0;
721 	}
722 
723 	/* The PPS signal is fixed to 1 second and is always generated when the
724 	 * seconds counter is incremented. The start time is not configurable.
725 	 * If the clock is adjusted, the PPS signal is automatically readjusted.
726 	 */
727 	if (perout->period.sec != 1 || perout->period.nsec != 0) {
728 		phydev_warn(phydev, "The period can be set only to 1 second.");
729 		return -EINVAL;
730 	}
731 
732 	if (!(perout->flags & PTP_PEROUT_PHASE)) {
733 		if (perout->start.sec != 0 || perout->start.nsec != 0) {
734 			phydev_warn(phydev, "The start time is not configurable. Should be set to 0 seconds and 0 nanoseconds.");
735 			return -EINVAL;
736 		}
737 	} else {
738 		if (perout->phase.nsec != 0 &&
739 		    perout->phase.nsec != (NSEC_PER_SEC >> 1)) {
740 			phydev_warn(phydev, "The phase can be set only to 0 or 500000000 nanoseconds.");
741 			return -EINVAL;
742 		}
743 
744 		if (perout->phase.nsec == 0)
745 			nxp_c45_clear_reg_field(priv->phydev,
746 						&regmap->pps_polarity);
747 		else
748 			nxp_c45_set_reg_field(priv->phydev,
749 					      &regmap->pps_polarity);
750 	}
751 
752 	nxp_c45_gpio_config(priv, pin, GPIO_PPS_OUT_CFG);
753 
754 	nxp_c45_set_reg_field(priv->phydev, &regmap->pps_enable);
755 
756 	return 0;
757 }
758 
759 static void nxp_c45_set_rising_or_falling(struct phy_device *phydev,
760 					  struct ptp_extts_request *extts)
761 {
762 	if (extts->flags & PTP_RISING_EDGE)
763 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
764 				   VEND1_PTP_CONFIG, EXT_TRG_EDGE);
765 
766 	if (extts->flags & PTP_FALLING_EDGE)
767 		phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
768 				 VEND1_PTP_CONFIG, EXT_TRG_EDGE);
769 }
770 
771 static void nxp_c45_set_rising_and_falling(struct phy_device *phydev,
772 					   struct ptp_extts_request *extts)
773 {
774 	/* PTP_EXTTS_REQUEST may have only the PTP_ENABLE_FEATURE flag set. In
775 	 * this case external ts will be enabled on rising edge.
776 	 */
777 	if (extts->flags & PTP_RISING_EDGE ||
778 	    extts->flags == PTP_ENABLE_FEATURE)
779 		phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
780 				 TJA1120_SYNC_TRIG_FILTER,
781 				 PTP_TRIG_RISE_TS);
782 	else
783 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
784 				   TJA1120_SYNC_TRIG_FILTER,
785 				   PTP_TRIG_RISE_TS);
786 
787 	if (extts->flags & PTP_FALLING_EDGE)
788 		phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
789 				 TJA1120_SYNC_TRIG_FILTER,
790 				 PTP_TRIG_FALLING_TS);
791 	else
792 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
793 				   TJA1120_SYNC_TRIG_FILTER,
794 				   PTP_TRIG_FALLING_TS);
795 }
796 
797 static int nxp_c45_extts_enable(struct nxp_c45_phy *priv,
798 				struct ptp_extts_request *extts, int on)
799 {
800 	const struct nxp_c45_phy_data *data = nxp_c45_get_data(priv->phydev);
801 	int pin;
802 
803 	if (extts->flags & ~(PTP_ENABLE_FEATURE |
804 			      PTP_RISING_EDGE |
805 			      PTP_FALLING_EDGE |
806 			      PTP_STRICT_FLAGS))
807 		return -EOPNOTSUPP;
808 
809 	/* Sampling on both edges is not supported */
810 	if ((extts->flags & PTP_RISING_EDGE) &&
811 	    (extts->flags & PTP_FALLING_EDGE) &&
812 	    !data->ext_ts_both_edges)
813 		return -EOPNOTSUPP;
814 
815 	pin = ptp_find_pin(priv->ptp_clock, PTP_PF_EXTTS, extts->index);
816 	if (pin < 0)
817 		return pin;
818 
819 	if (!on) {
820 		nxp_c45_gpio_config(priv, pin, GPIO_DISABLE);
821 		priv->extts = false;
822 
823 		return 0;
824 	}
825 
826 	if (data->ext_ts_both_edges)
827 		nxp_c45_set_rising_and_falling(priv->phydev, extts);
828 	else
829 		nxp_c45_set_rising_or_falling(priv->phydev, extts);
830 
831 	nxp_c45_gpio_config(priv, pin, GPIO_EXTTS_OUT_CFG);
832 	priv->extts = true;
833 	priv->extts_index = extts->index;
834 	ptp_schedule_worker(priv->ptp_clock, 0);
835 
836 	return 0;
837 }
838 
839 static int nxp_c45_ptp_enable(struct ptp_clock_info *ptp,
840 			      struct ptp_clock_request *req, int on)
841 {
842 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
843 
844 	switch (req->type) {
845 	case PTP_CLK_REQ_EXTTS:
846 		return nxp_c45_extts_enable(priv, &req->extts, on);
847 	case PTP_CLK_REQ_PEROUT:
848 		return nxp_c45_perout_enable(priv, &req->perout, on);
849 	default:
850 		return -EOPNOTSUPP;
851 	}
852 }
853 
854 static struct ptp_pin_desc nxp_c45_ptp_pins[] = {
855 	{ "nxp_c45_gpio0", 0, PTP_PF_NONE},
856 	{ "nxp_c45_gpio1", 1, PTP_PF_NONE},
857 	{ "nxp_c45_gpio2", 2, PTP_PF_NONE},
858 	{ "nxp_c45_gpio3", 3, PTP_PF_NONE},
859 	{ "nxp_c45_gpio4", 4, PTP_PF_NONE},
860 	{ "nxp_c45_gpio5", 5, PTP_PF_NONE},
861 	{ "nxp_c45_gpio6", 6, PTP_PF_NONE},
862 	{ "nxp_c45_gpio7", 7, PTP_PF_NONE},
863 	{ "nxp_c45_gpio8", 8, PTP_PF_NONE},
864 	{ "nxp_c45_gpio9", 9, PTP_PF_NONE},
865 	{ "nxp_c45_gpio10", 10, PTP_PF_NONE},
866 	{ "nxp_c45_gpio11", 11, PTP_PF_NONE},
867 };
868 
869 static int nxp_c45_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
870 				  enum ptp_pin_function func, unsigned int chan)
871 {
872 	if (pin >= ARRAY_SIZE(nxp_c45_ptp_pins))
873 		return -EINVAL;
874 
875 	switch (func) {
876 	case PTP_PF_NONE:
877 	case PTP_PF_PEROUT:
878 	case PTP_PF_EXTTS:
879 		break;
880 	default:
881 		return -EOPNOTSUPP;
882 	}
883 
884 	return 0;
885 }
886 
887 static int nxp_c45_init_ptp_clock(struct nxp_c45_phy *priv)
888 {
889 	priv->caps = (struct ptp_clock_info) {
890 		.owner		= THIS_MODULE,
891 		.name		= "NXP C45 PHC",
892 		.max_adj	= 16666666,
893 		.adjfine	= nxp_c45_ptp_adjfine,
894 		.adjtime	= nxp_c45_ptp_adjtime,
895 		.gettimex64	= nxp_c45_ptp_gettimex64,
896 		.settime64	= nxp_c45_ptp_settime64,
897 		.enable		= nxp_c45_ptp_enable,
898 		.verify		= nxp_c45_ptp_verify_pin,
899 		.do_aux_work	= nxp_c45_do_aux_work,
900 		.pin_config	= nxp_c45_ptp_pins,
901 		.n_pins		= ARRAY_SIZE(nxp_c45_ptp_pins),
902 		.n_ext_ts	= 1,
903 		.n_per_out	= 1,
904 	};
905 
906 	priv->ptp_clock = ptp_clock_register(&priv->caps,
907 					     &priv->phydev->mdio.dev);
908 
909 	if (IS_ERR(priv->ptp_clock))
910 		return PTR_ERR(priv->ptp_clock);
911 
912 	if (!priv->ptp_clock)
913 		return -ENOMEM;
914 
915 	return 0;
916 }
917 
918 static void nxp_c45_txtstamp(struct mii_timestamper *mii_ts,
919 			     struct sk_buff *skb, int type)
920 {
921 	struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
922 						mii_ts);
923 
924 	switch (priv->hwts_tx) {
925 	case HWTSTAMP_TX_ON:
926 		NXP_C45_SKB_CB(skb)->type = type;
927 		NXP_C45_SKB_CB(skb)->header = ptp_parse_header(skb, type);
928 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
929 		skb_queue_tail(&priv->tx_queue, skb);
930 		if (nxp_c45_poll_txts(priv->phydev))
931 			ptp_schedule_worker(priv->ptp_clock, 0);
932 		break;
933 	case HWTSTAMP_TX_OFF:
934 	default:
935 		kfree_skb(skb);
936 		break;
937 	}
938 }
939 
940 static bool nxp_c45_rxtstamp(struct mii_timestamper *mii_ts,
941 			     struct sk_buff *skb, int type)
942 {
943 	struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
944 						mii_ts);
945 	struct ptp_header *header = ptp_parse_header(skb, type);
946 
947 	if (!header)
948 		return false;
949 
950 	if (!priv->hwts_rx)
951 		return false;
952 
953 	NXP_C45_SKB_CB(skb)->header = header;
954 	skb_queue_tail(&priv->rx_queue, skb);
955 	ptp_schedule_worker(priv->ptp_clock, 0);
956 
957 	return true;
958 }
959 
960 static int nxp_c45_hwtstamp(struct mii_timestamper *mii_ts,
961 			    struct ifreq *ifreq)
962 {
963 	struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
964 						mii_ts);
965 	struct phy_device *phydev = priv->phydev;
966 	const struct nxp_c45_phy_data *data;
967 	struct hwtstamp_config cfg;
968 
969 	if (copy_from_user(&cfg, ifreq->ifr_data, sizeof(cfg)))
970 		return -EFAULT;
971 
972 	if (cfg.tx_type < 0 || cfg.tx_type > HWTSTAMP_TX_ON)
973 		return -ERANGE;
974 
975 	data = nxp_c45_get_data(phydev);
976 	priv->hwts_tx = cfg.tx_type;
977 
978 	switch (cfg.rx_filter) {
979 	case HWTSTAMP_FILTER_NONE:
980 		priv->hwts_rx = 0;
981 		break;
982 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
983 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
984 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
985 		priv->hwts_rx = 1;
986 		cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
987 		break;
988 	default:
989 		return -ERANGE;
990 	}
991 
992 	if (priv->hwts_rx || priv->hwts_tx) {
993 		phy_write_mmd(phydev, MDIO_MMD_VEND1,
994 			      data->regmap->vend1_event_msg_filt,
995 			      EVENT_MSG_FILT_ALL);
996 		data->ptp_enable(phydev, true);
997 	} else {
998 		phy_write_mmd(phydev, MDIO_MMD_VEND1,
999 			      data->regmap->vend1_event_msg_filt,
1000 			      EVENT_MSG_FILT_NONE);
1001 		data->ptp_enable(phydev, false);
1002 	}
1003 
1004 	if (nxp_c45_poll_txts(priv->phydev))
1005 		goto nxp_c45_no_ptp_irq;
1006 
1007 	if (priv->hwts_tx)
1008 		nxp_c45_set_reg_field(phydev, &data->regmap->irq_egr_ts_en);
1009 	else
1010 		nxp_c45_clear_reg_field(phydev, &data->regmap->irq_egr_ts_en);
1011 
1012 nxp_c45_no_ptp_irq:
1013 	return copy_to_user(ifreq->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
1014 }
1015 
1016 static int nxp_c45_ts_info(struct mii_timestamper *mii_ts,
1017 			   struct ethtool_ts_info *ts_info)
1018 {
1019 	struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
1020 						mii_ts);
1021 
1022 	ts_info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
1023 			SOF_TIMESTAMPING_RX_HARDWARE |
1024 			SOF_TIMESTAMPING_RAW_HARDWARE;
1025 	ts_info->phc_index = ptp_clock_index(priv->ptp_clock);
1026 	ts_info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
1027 	ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
1028 			(1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
1029 			(1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
1030 			(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT);
1031 
1032 	return 0;
1033 }
1034 
1035 static const struct nxp_c45_phy_stats common_hw_stats[] = {
1036 	{ "phy_link_status_drop_cnt",
1037 		NXP_C45_REG_FIELD(0x8352, MDIO_MMD_VEND1, 8, 6), },
1038 	{ "phy_link_availability_drop_cnt",
1039 		NXP_C45_REG_FIELD(0x8352, MDIO_MMD_VEND1, 0, 6), },
1040 	{ "phy_link_loss_cnt",
1041 		NXP_C45_REG_FIELD(0x8353, MDIO_MMD_VEND1, 10, 6), },
1042 	{ "phy_link_failure_cnt",
1043 		NXP_C45_REG_FIELD(0x8353, MDIO_MMD_VEND1, 0, 10), },
1044 	{ "phy_symbol_error_cnt",
1045 		NXP_C45_REG_FIELD(0x8350, MDIO_MMD_VEND1, 0, 16) },
1046 };
1047 
1048 static const struct nxp_c45_phy_stats tja1103_hw_stats[] = {
1049 	{ "rx_preamble_count",
1050 		NXP_C45_REG_FIELD(0xAFCE, MDIO_MMD_VEND1, 0, 6), },
1051 	{ "tx_preamble_count",
1052 		NXP_C45_REG_FIELD(0xAFCF, MDIO_MMD_VEND1, 0, 6), },
1053 	{ "rx_ipg_length",
1054 		NXP_C45_REG_FIELD(0xAFD0, MDIO_MMD_VEND1, 0, 9), },
1055 	{ "tx_ipg_length",
1056 		NXP_C45_REG_FIELD(0xAFD1, MDIO_MMD_VEND1, 0, 9), },
1057 };
1058 
1059 static const struct nxp_c45_phy_stats tja1120_hw_stats[] = {
1060 	{ "phy_symbol_error_cnt_ext",
1061 		NXP_C45_REG_FIELD(0x8351, MDIO_MMD_VEND1, 0, 14) },
1062 	{ "tx_frames_xtd",
1063 		NXP_C45_REG_FIELD(0xACA1, MDIO_MMD_VEND1, 0, 8), },
1064 	{ "tx_frames",
1065 		NXP_C45_REG_FIELD(0xACA0, MDIO_MMD_VEND1, 0, 16), },
1066 	{ "rx_frames_xtd",
1067 		NXP_C45_REG_FIELD(0xACA3, MDIO_MMD_VEND1, 0, 8), },
1068 	{ "rx_frames",
1069 		NXP_C45_REG_FIELD(0xACA2, MDIO_MMD_VEND1, 0, 16), },
1070 	{ "tx_lost_frames_xtd",
1071 		NXP_C45_REG_FIELD(0xACA5, MDIO_MMD_VEND1, 0, 8), },
1072 	{ "tx_lost_frames",
1073 		NXP_C45_REG_FIELD(0xACA4, MDIO_MMD_VEND1, 0, 16), },
1074 	{ "rx_lost_frames_xtd",
1075 		NXP_C45_REG_FIELD(0xACA7, MDIO_MMD_VEND1, 0, 8), },
1076 	{ "rx_lost_frames",
1077 		NXP_C45_REG_FIELD(0xACA6, MDIO_MMD_VEND1, 0, 16), },
1078 };
1079 
1080 static int nxp_c45_get_sset_count(struct phy_device *phydev)
1081 {
1082 	const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev);
1083 
1084 	return ARRAY_SIZE(common_hw_stats) + (phy_data ? phy_data->n_stats : 0);
1085 }
1086 
1087 static void nxp_c45_get_strings(struct phy_device *phydev, u8 *data)
1088 {
1089 	const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev);
1090 	size_t count = nxp_c45_get_sset_count(phydev);
1091 	size_t idx;
1092 	size_t i;
1093 
1094 	for (i = 0; i < count; i++) {
1095 		if (i < ARRAY_SIZE(common_hw_stats)) {
1096 			strscpy(data + i * ETH_GSTRING_LEN,
1097 				common_hw_stats[i].name, ETH_GSTRING_LEN);
1098 			continue;
1099 		}
1100 		idx = i - ARRAY_SIZE(common_hw_stats);
1101 		strscpy(data + i * ETH_GSTRING_LEN,
1102 			phy_data->stats[idx].name, ETH_GSTRING_LEN);
1103 	}
1104 }
1105 
1106 static void nxp_c45_get_stats(struct phy_device *phydev,
1107 			      struct ethtool_stats *stats, u64 *data)
1108 {
1109 	const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev);
1110 	size_t count = nxp_c45_get_sset_count(phydev);
1111 	const struct nxp_c45_reg_field *reg_field;
1112 	size_t idx;
1113 	size_t i;
1114 	int ret;
1115 
1116 	for (i = 0; i < count; i++) {
1117 		if (i < ARRAY_SIZE(common_hw_stats)) {
1118 			reg_field = &common_hw_stats[i].counter;
1119 		} else {
1120 			idx = i - ARRAY_SIZE(common_hw_stats);
1121 			reg_field = &phy_data->stats[idx].counter;
1122 		}
1123 
1124 		ret = nxp_c45_read_reg_field(phydev, reg_field);
1125 		if (ret < 0)
1126 			data[i] = U64_MAX;
1127 		else
1128 			data[i] = ret;
1129 	}
1130 }
1131 
1132 static int nxp_c45_config_enable(struct phy_device *phydev)
1133 {
1134 	phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONTROL,
1135 		      DEVICE_CONTROL_CONFIG_GLOBAL_EN |
1136 		      DEVICE_CONTROL_CONFIG_ALL_EN);
1137 	usleep_range(400, 450);
1138 
1139 	phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_CONTROL,
1140 		      PORT_CONTROL_EN);
1141 	phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONTROL,
1142 		      PHY_CONFIG_EN);
1143 	phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_INFRA_CONTROL,
1144 		      PORT_INFRA_CONTROL_EN);
1145 
1146 	return 0;
1147 }
1148 
1149 static int nxp_c45_start_op(struct phy_device *phydev)
1150 {
1151 	return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONTROL,
1152 				PHY_START_OP);
1153 }
1154 
1155 static int nxp_c45_config_intr(struct phy_device *phydev)
1156 {
1157 	if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
1158 		return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1159 					VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT);
1160 	else
1161 		return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1162 					  VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT);
1163 }
1164 
1165 static irqreturn_t nxp_c45_handle_interrupt(struct phy_device *phydev)
1166 {
1167 	const struct nxp_c45_phy_data *data = nxp_c45_get_data(phydev);
1168 	struct nxp_c45_phy *priv = phydev->priv;
1169 	irqreturn_t ret = IRQ_NONE;
1170 	struct nxp_c45_hwts hwts;
1171 	int irq;
1172 
1173 	irq = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_IRQ_STATUS);
1174 	if (irq & PHY_IRQ_LINK_EVENT) {
1175 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_IRQ_ACK,
1176 			      PHY_IRQ_LINK_EVENT);
1177 		phy_trigger_machine(phydev);
1178 		ret = IRQ_HANDLED;
1179 	}
1180 
1181 	irq = nxp_c45_read_reg_field(phydev, &data->regmap->irq_egr_ts_status);
1182 	if (irq) {
1183 		/* If ack_ptp_irq is false, the IRQ bit is self-clear and will
1184 		 * be cleared when the EGR TS FIFO is empty. Otherwise, the
1185 		 * IRQ bit should be cleared before reading the timestamp,
1186 		 */
1187 		if (data->ack_ptp_irq)
1188 			phy_write_mmd(phydev, MDIO_MMD_VEND1,
1189 				      VEND1_PTP_IRQ_ACK, EGR_TS_IRQ);
1190 		while (data->get_egressts(priv, &hwts))
1191 			nxp_c45_process_txts(priv, &hwts);
1192 
1193 		ret = IRQ_HANDLED;
1194 	}
1195 
1196 	return ret;
1197 }
1198 
1199 static int nxp_c45_soft_reset(struct phy_device *phydev)
1200 {
1201 	int ret;
1202 
1203 	ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONTROL,
1204 			    DEVICE_CONTROL_RESET);
1205 	if (ret)
1206 		return ret;
1207 
1208 	return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
1209 					 VEND1_DEVICE_CONTROL, ret,
1210 					 !(ret & DEVICE_CONTROL_RESET), 20000,
1211 					 240000, false);
1212 }
1213 
1214 static int nxp_c45_cable_test_start(struct phy_device *phydev)
1215 {
1216 	const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(phydev);
1217 
1218 	return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, regmap->cable_test,
1219 				CABLE_TEST_ENABLE | CABLE_TEST_START);
1220 }
1221 
1222 static int nxp_c45_cable_test_get_status(struct phy_device *phydev,
1223 					 bool *finished)
1224 {
1225 	const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(phydev);
1226 	int ret;
1227 	u8 cable_test_result;
1228 
1229 	ret = nxp_c45_read_reg_field(phydev, &regmap->cable_test_valid);
1230 	if (!ret) {
1231 		*finished = false;
1232 		return 0;
1233 	}
1234 
1235 	*finished = true;
1236 	cable_test_result = nxp_c45_read_reg_field(phydev,
1237 						   &regmap->cable_test_result);
1238 
1239 	switch (cable_test_result) {
1240 	case CABLE_TEST_OK:
1241 		ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
1242 					ETHTOOL_A_CABLE_RESULT_CODE_OK);
1243 		break;
1244 	case CABLE_TEST_SHORTED:
1245 		ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
1246 					ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT);
1247 		break;
1248 	case CABLE_TEST_OPEN:
1249 		ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
1250 					ETHTOOL_A_CABLE_RESULT_CODE_OPEN);
1251 		break;
1252 	default:
1253 		ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
1254 					ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC);
1255 	}
1256 
1257 	phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, regmap->cable_test,
1258 			   CABLE_TEST_ENABLE);
1259 
1260 	return nxp_c45_start_op(phydev);
1261 }
1262 
1263 static int nxp_c45_get_sqi(struct phy_device *phydev)
1264 {
1265 	int reg;
1266 
1267 	reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_SIGNAL_QUALITY);
1268 	if (!(reg & SQI_VALID))
1269 		return -EINVAL;
1270 
1271 	reg &= SQI_MASK;
1272 
1273 	return reg;
1274 }
1275 
1276 static int nxp_c45_get_sqi_max(struct phy_device *phydev)
1277 {
1278 	return MAX_SQI;
1279 }
1280 
1281 static int nxp_c45_check_delay(struct phy_device *phydev, u32 delay)
1282 {
1283 	if (delay < MIN_ID_PS) {
1284 		phydev_err(phydev, "delay value smaller than %u\n", MIN_ID_PS);
1285 		return -EINVAL;
1286 	}
1287 
1288 	if (delay > MAX_ID_PS) {
1289 		phydev_err(phydev, "delay value higher than %u\n", MAX_ID_PS);
1290 		return -EINVAL;
1291 	}
1292 
1293 	return 0;
1294 }
1295 
1296 static void nxp_c45_counters_enable(struct phy_device *phydev)
1297 {
1298 	const struct nxp_c45_phy_data *data = nxp_c45_get_data(phydev);
1299 
1300 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_LINK_DROP_COUNTER,
1301 			 COUNTER_EN);
1302 
1303 	data->counters_enable(phydev);
1304 }
1305 
1306 static void nxp_c45_ptp_init(struct phy_device *phydev)
1307 {
1308 	const struct nxp_c45_phy_data *data = nxp_c45_get_data(phydev);
1309 
1310 	phy_write_mmd(phydev, MDIO_MMD_VEND1,
1311 		      data->regmap->vend1_ptp_clk_period,
1312 		      data->ptp_clk_period);
1313 	nxp_c45_clear_reg_field(phydev, &data->regmap->ltc_lock_ctrl);
1314 
1315 	data->ptp_init(phydev);
1316 }
1317 
1318 static u64 nxp_c45_get_phase_shift(u64 phase_offset_raw)
1319 {
1320 	/* The delay in degree phase is 73.8 + phase_offset_raw * 0.9.
1321 	 * To avoid floating point operations we'll multiply by 10
1322 	 * and get 1 decimal point precision.
1323 	 */
1324 	phase_offset_raw *= 10;
1325 	phase_offset_raw -= 738;
1326 	return div_u64(phase_offset_raw, 9);
1327 }
1328 
1329 static void nxp_c45_disable_delays(struct phy_device *phydev)
1330 {
1331 	phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID, ID_ENABLE);
1332 	phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID, ID_ENABLE);
1333 }
1334 
1335 static void nxp_c45_set_delays(struct phy_device *phydev)
1336 {
1337 	struct nxp_c45_phy *priv = phydev->priv;
1338 	u64 tx_delay = priv->tx_delay;
1339 	u64 rx_delay = priv->rx_delay;
1340 	u64 degree;
1341 
1342 	if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1343 	    phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
1344 		degree = div_u64(tx_delay, PS_PER_DEGREE);
1345 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID,
1346 			      ID_ENABLE | nxp_c45_get_phase_shift(degree));
1347 	} else {
1348 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID,
1349 				   ID_ENABLE);
1350 	}
1351 
1352 	if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1353 	    phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
1354 		degree = div_u64(rx_delay, PS_PER_DEGREE);
1355 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID,
1356 			      ID_ENABLE | nxp_c45_get_phase_shift(degree));
1357 	} else {
1358 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID,
1359 				   ID_ENABLE);
1360 	}
1361 }
1362 
1363 static int nxp_c45_get_delays(struct phy_device *phydev)
1364 {
1365 	struct nxp_c45_phy *priv = phydev->priv;
1366 	int ret;
1367 
1368 	if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1369 	    phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
1370 		ret = device_property_read_u32(&phydev->mdio.dev,
1371 					       "tx-internal-delay-ps",
1372 					       &priv->tx_delay);
1373 		if (ret)
1374 			priv->tx_delay = DEFAULT_ID_PS;
1375 
1376 		ret = nxp_c45_check_delay(phydev, priv->tx_delay);
1377 		if (ret) {
1378 			phydev_err(phydev,
1379 				   "tx-internal-delay-ps invalid value\n");
1380 			return ret;
1381 		}
1382 	}
1383 
1384 	if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1385 	    phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
1386 		ret = device_property_read_u32(&phydev->mdio.dev,
1387 					       "rx-internal-delay-ps",
1388 					       &priv->rx_delay);
1389 		if (ret)
1390 			priv->rx_delay = DEFAULT_ID_PS;
1391 
1392 		ret = nxp_c45_check_delay(phydev, priv->rx_delay);
1393 		if (ret) {
1394 			phydev_err(phydev,
1395 				   "rx-internal-delay-ps invalid value\n");
1396 			return ret;
1397 		}
1398 	}
1399 
1400 	return 0;
1401 }
1402 
1403 static int nxp_c45_set_phy_mode(struct phy_device *phydev)
1404 {
1405 	int ret;
1406 
1407 	ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_ABILITIES);
1408 	phydev_dbg(phydev, "Clause 45 managed PHY abilities 0x%x\n", ret);
1409 
1410 	switch (phydev->interface) {
1411 	case PHY_INTERFACE_MODE_RGMII:
1412 		if (!(ret & RGMII_ABILITY)) {
1413 			phydev_err(phydev, "rgmii mode not supported\n");
1414 			return -EINVAL;
1415 		}
1416 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1417 			      MII_BASIC_CONFIG_RGMII);
1418 		nxp_c45_disable_delays(phydev);
1419 		break;
1420 	case PHY_INTERFACE_MODE_RGMII_ID:
1421 	case PHY_INTERFACE_MODE_RGMII_TXID:
1422 	case PHY_INTERFACE_MODE_RGMII_RXID:
1423 		if (!(ret & RGMII_ID_ABILITY)) {
1424 			phydev_err(phydev, "rgmii-id, rgmii-txid, rgmii-rxid modes are not supported\n");
1425 			return -EINVAL;
1426 		}
1427 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1428 			      MII_BASIC_CONFIG_RGMII);
1429 		ret = nxp_c45_get_delays(phydev);
1430 		if (ret)
1431 			return ret;
1432 
1433 		nxp_c45_set_delays(phydev);
1434 		break;
1435 	case PHY_INTERFACE_MODE_MII:
1436 		if (!(ret & MII_ABILITY)) {
1437 			phydev_err(phydev, "mii mode not supported\n");
1438 			return -EINVAL;
1439 		}
1440 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1441 			      MII_BASIC_CONFIG_MII);
1442 		break;
1443 	case PHY_INTERFACE_MODE_REVMII:
1444 		if (!(ret & REVMII_ABILITY)) {
1445 			phydev_err(phydev, "rev-mii mode not supported\n");
1446 			return -EINVAL;
1447 		}
1448 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1449 			      MII_BASIC_CONFIG_MII | MII_BASIC_CONFIG_REV);
1450 		break;
1451 	case PHY_INTERFACE_MODE_RMII:
1452 		if (!(ret & RMII_ABILITY)) {
1453 			phydev_err(phydev, "rmii mode not supported\n");
1454 			return -EINVAL;
1455 		}
1456 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1457 			      MII_BASIC_CONFIG_RMII);
1458 		break;
1459 	case PHY_INTERFACE_MODE_SGMII:
1460 		if (!(ret & SGMII_ABILITY)) {
1461 			phydev_err(phydev, "sgmii mode not supported\n");
1462 			return -EINVAL;
1463 		}
1464 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1465 			      MII_BASIC_CONFIG_SGMII);
1466 		break;
1467 	case PHY_INTERFACE_MODE_INTERNAL:
1468 		break;
1469 	default:
1470 		return -EINVAL;
1471 	}
1472 
1473 	return 0;
1474 }
1475 
1476 static int nxp_c45_config_init(struct phy_device *phydev)
1477 {
1478 	int ret;
1479 
1480 	ret = nxp_c45_config_enable(phydev);
1481 	if (ret) {
1482 		phydev_err(phydev, "Failed to enable config\n");
1483 		return ret;
1484 	}
1485 
1486 	/* Bug workaround for SJA1110 rev B: enable write access
1487 	 * to MDIO_MMD_PMAPMD
1488 	 */
1489 	phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 1);
1490 	phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 2);
1491 
1492 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONFIG,
1493 			 PHY_CONFIG_AUTO);
1494 
1495 	ret = nxp_c45_set_phy_mode(phydev);
1496 	if (ret)
1497 		return ret;
1498 
1499 	phydev->autoneg = AUTONEG_DISABLE;
1500 
1501 	nxp_c45_counters_enable(phydev);
1502 	nxp_c45_ptp_init(phydev);
1503 
1504 	return nxp_c45_start_op(phydev);
1505 }
1506 
1507 static int nxp_c45_get_features(struct phy_device *phydev)
1508 {
1509 	linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, phydev->supported);
1510 	linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, phydev->supported);
1511 
1512 	return genphy_c45_pma_read_abilities(phydev);
1513 }
1514 
1515 static int nxp_c45_probe(struct phy_device *phydev)
1516 {
1517 	struct nxp_c45_phy *priv;
1518 	int ptp_ability;
1519 	int ret = 0;
1520 
1521 	priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
1522 	if (!priv)
1523 		return -ENOMEM;
1524 
1525 	skb_queue_head_init(&priv->tx_queue);
1526 	skb_queue_head_init(&priv->rx_queue);
1527 
1528 	priv->phydev = phydev;
1529 
1530 	phydev->priv = priv;
1531 
1532 	mutex_init(&priv->ptp_lock);
1533 
1534 	ptp_ability = phy_read_mmd(phydev, MDIO_MMD_VEND1,
1535 				   VEND1_PORT_ABILITIES);
1536 	ptp_ability = !!(ptp_ability & PTP_ABILITY);
1537 	if (!ptp_ability) {
1538 		phydev_dbg(phydev, "the phy does not support PTP");
1539 		goto no_ptp_support;
1540 	}
1541 
1542 	if (IS_ENABLED(CONFIG_PTP_1588_CLOCK) &&
1543 	    IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING)) {
1544 		priv->mii_ts.rxtstamp = nxp_c45_rxtstamp;
1545 		priv->mii_ts.txtstamp = nxp_c45_txtstamp;
1546 		priv->mii_ts.hwtstamp = nxp_c45_hwtstamp;
1547 		priv->mii_ts.ts_info = nxp_c45_ts_info;
1548 		phydev->mii_ts = &priv->mii_ts;
1549 		ret = nxp_c45_init_ptp_clock(priv);
1550 	} else {
1551 		phydev_dbg(phydev, "PTP support not enabled even if the phy supports it");
1552 	}
1553 
1554 no_ptp_support:
1555 
1556 	return ret;
1557 }
1558 
1559 static void nxp_c45_remove(struct phy_device *phydev)
1560 {
1561 	struct nxp_c45_phy *priv = phydev->priv;
1562 
1563 	if (priv->ptp_clock)
1564 		ptp_clock_unregister(priv->ptp_clock);
1565 
1566 	skb_queue_purge(&priv->tx_queue);
1567 	skb_queue_purge(&priv->rx_queue);
1568 }
1569 
1570 static void tja1103_counters_enable(struct phy_device *phydev)
1571 {
1572 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_PREAMBLE_COUNT,
1573 			 COUNTER_EN);
1574 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TX_PREAMBLE_COUNT,
1575 			 COUNTER_EN);
1576 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_IPG_LENGTH,
1577 			 COUNTER_EN);
1578 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TX_IPG_LENGTH,
1579 			 COUNTER_EN);
1580 }
1581 
1582 static void tja1103_ptp_init(struct phy_device *phydev)
1583 {
1584 	phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_TS_INSRT_CTRL,
1585 		      TJA1103_RX_TS_INSRT_MODE2);
1586 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_FUNC_ENABLES,
1587 			 PTP_ENABLE);
1588 }
1589 
1590 static void tja1103_ptp_enable(struct phy_device *phydev, bool enable)
1591 {
1592 	if (enable)
1593 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1594 				   VEND1_PORT_PTP_CONTROL,
1595 				   PORT_PTP_CONTROL_BYPASS);
1596 	else
1597 		phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1598 				 VEND1_PORT_PTP_CONTROL,
1599 				 PORT_PTP_CONTROL_BYPASS);
1600 }
1601 
1602 static const struct nxp_c45_regmap tja1103_regmap = {
1603 	.vend1_ptp_clk_period	= 0x1104,
1604 	.vend1_event_msg_filt	= 0x1148,
1605 	.pps_enable		=
1606 		NXP_C45_REG_FIELD(0x1102, MDIO_MMD_VEND1, 3, 1),
1607 	.pps_polarity		=
1608 		NXP_C45_REG_FIELD(0x1102, MDIO_MMD_VEND1, 2, 1),
1609 	.ltc_lock_ctrl		=
1610 		NXP_C45_REG_FIELD(0x1115, MDIO_MMD_VEND1, 0, 1),
1611 	.ltc_read		=
1612 		NXP_C45_REG_FIELD(0x1105, MDIO_MMD_VEND1, 2, 1),
1613 	.ltc_write		=
1614 		NXP_C45_REG_FIELD(0x1105, MDIO_MMD_VEND1, 0, 1),
1615 	.vend1_ltc_wr_nsec_0	= 0x1106,
1616 	.vend1_ltc_wr_nsec_1	= 0x1107,
1617 	.vend1_ltc_wr_sec_0	= 0x1108,
1618 	.vend1_ltc_wr_sec_1	= 0x1109,
1619 	.vend1_ltc_rd_nsec_0	= 0x110A,
1620 	.vend1_ltc_rd_nsec_1	= 0x110B,
1621 	.vend1_ltc_rd_sec_0	= 0x110C,
1622 	.vend1_ltc_rd_sec_1	= 0x110D,
1623 	.vend1_rate_adj_subns_0	= 0x110F,
1624 	.vend1_rate_adj_subns_1	= 0x1110,
1625 	.irq_egr_ts_en		=
1626 		NXP_C45_REG_FIELD(0x1131, MDIO_MMD_VEND1, 0, 1),
1627 	.irq_egr_ts_status	=
1628 		NXP_C45_REG_FIELD(0x1132, MDIO_MMD_VEND1, 0, 1),
1629 	.domain_number		=
1630 		NXP_C45_REG_FIELD(0x114E, MDIO_MMD_VEND1, 0, 8),
1631 	.msg_type		=
1632 		NXP_C45_REG_FIELD(0x114E, MDIO_MMD_VEND1, 8, 4),
1633 	.sequence_id		=
1634 		NXP_C45_REG_FIELD(0x114F, MDIO_MMD_VEND1, 0, 16),
1635 	.sec_1_0		=
1636 		NXP_C45_REG_FIELD(0x1151, MDIO_MMD_VEND1, 14, 2),
1637 	.sec_4_2		=
1638 		NXP_C45_REG_FIELD(0x114E, MDIO_MMD_VEND1, 12, 3),
1639 	.nsec_15_0		=
1640 		NXP_C45_REG_FIELD(0x1150, MDIO_MMD_VEND1, 0, 16),
1641 	.nsec_29_16		=
1642 		NXP_C45_REG_FIELD(0x1151, MDIO_MMD_VEND1, 0, 14),
1643 	.vend1_ext_trg_data_0	= 0x1121,
1644 	.vend1_ext_trg_data_1	= 0x1122,
1645 	.vend1_ext_trg_data_2	= 0x1123,
1646 	.vend1_ext_trg_data_3	= 0x1124,
1647 	.vend1_ext_trg_ctrl	= 0x1126,
1648 	.cable_test		= 0x8330,
1649 	.cable_test_valid	=
1650 		NXP_C45_REG_FIELD(0x8330, MDIO_MMD_VEND1, 13, 1),
1651 	.cable_test_result	=
1652 		NXP_C45_REG_FIELD(0x8330, MDIO_MMD_VEND1, 0, 3),
1653 };
1654 
1655 static const struct nxp_c45_phy_data tja1103_phy_data = {
1656 	.regmap = &tja1103_regmap,
1657 	.stats = tja1103_hw_stats,
1658 	.n_stats = ARRAY_SIZE(tja1103_hw_stats),
1659 	.ptp_clk_period = PTP_CLK_PERIOD_100BT1,
1660 	.ext_ts_both_edges = false,
1661 	.ack_ptp_irq = false,
1662 	.counters_enable = tja1103_counters_enable,
1663 	.get_egressts = nxp_c45_get_hwtxts,
1664 	.ptp_init = tja1103_ptp_init,
1665 	.ptp_enable = tja1103_ptp_enable,
1666 };
1667 
1668 static void tja1120_counters_enable(struct phy_device *phydev)
1669 {
1670 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_SYMBOL_ERROR_CNT_XTD,
1671 			 EXTENDED_CNT_EN);
1672 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_MONITOR_STATUS,
1673 			 MONITOR_RESET);
1674 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_MONITOR_CONFIG,
1675 			 ALL_FRAMES_CNT_EN | LOST_FRAMES_CNT_EN);
1676 }
1677 
1678 static void tja1120_ptp_init(struct phy_device *phydev)
1679 {
1680 	phy_write_mmd(phydev, MDIO_MMD_VEND1, TJA1120_RX_TS_INSRT_CTRL,
1681 		      TJA1120_RX_TS_INSRT_EN | TJA1120_TS_INSRT_MODE);
1682 	phy_write_mmd(phydev, MDIO_MMD_VEND1, TJA1120_VEND1_EXT_TS_MODE,
1683 		      TJA1120_TS_INSRT_MODE);
1684 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONFIG,
1685 			 PTP_ENABLE);
1686 }
1687 
1688 static void tja1120_ptp_enable(struct phy_device *phydev, bool enable)
1689 {
1690 	if (enable)
1691 		phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1692 				 VEND1_PORT_FUNC_ENABLES,
1693 				 PTP_ENABLE);
1694 	else
1695 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1696 				   VEND1_PORT_FUNC_ENABLES,
1697 				   PTP_ENABLE);
1698 }
1699 
1700 static const struct nxp_c45_regmap tja1120_regmap = {
1701 	.vend1_ptp_clk_period	= 0x1020,
1702 	.vend1_event_msg_filt	= 0x9010,
1703 	.pps_enable		=
1704 		NXP_C45_REG_FIELD(0x1006, MDIO_MMD_VEND1, 4, 1),
1705 	.pps_polarity		=
1706 		NXP_C45_REG_FIELD(0x1006, MDIO_MMD_VEND1, 5, 1),
1707 	.ltc_lock_ctrl		=
1708 		NXP_C45_REG_FIELD(0x1006, MDIO_MMD_VEND1, 2, 1),
1709 	.ltc_read		=
1710 		NXP_C45_REG_FIELD(0x1000, MDIO_MMD_VEND1, 1, 1),
1711 	.ltc_write		=
1712 		NXP_C45_REG_FIELD(0x1000, MDIO_MMD_VEND1, 2, 1),
1713 	.vend1_ltc_wr_nsec_0	= 0x1040,
1714 	.vend1_ltc_wr_nsec_1	= 0x1041,
1715 	.vend1_ltc_wr_sec_0	= 0x1042,
1716 	.vend1_ltc_wr_sec_1	= 0x1043,
1717 	.vend1_ltc_rd_nsec_0	= 0x1048,
1718 	.vend1_ltc_rd_nsec_1	= 0x1049,
1719 	.vend1_ltc_rd_sec_0	= 0x104A,
1720 	.vend1_ltc_rd_sec_1	= 0x104B,
1721 	.vend1_rate_adj_subns_0	= 0x1030,
1722 	.vend1_rate_adj_subns_1	= 0x1031,
1723 	.irq_egr_ts_en		=
1724 		NXP_C45_REG_FIELD(0x900A, MDIO_MMD_VEND1, 1, 1),
1725 	.irq_egr_ts_status	=
1726 		NXP_C45_REG_FIELD(0x900C, MDIO_MMD_VEND1, 1, 1),
1727 	.domain_number		=
1728 		NXP_C45_REG_FIELD(0x9061, MDIO_MMD_VEND1, 8, 8),
1729 	.msg_type		=
1730 		NXP_C45_REG_FIELD(0x9061, MDIO_MMD_VEND1, 4, 4),
1731 	.sequence_id		=
1732 		NXP_C45_REG_FIELD(0x9062, MDIO_MMD_VEND1, 0, 16),
1733 	.sec_1_0		=
1734 		NXP_C45_REG_FIELD(0x9065, MDIO_MMD_VEND1, 0, 2),
1735 	.sec_4_2		=
1736 		NXP_C45_REG_FIELD(0x9065, MDIO_MMD_VEND1, 2, 3),
1737 	.nsec_15_0		=
1738 		NXP_C45_REG_FIELD(0x9063, MDIO_MMD_VEND1, 0, 16),
1739 	.nsec_29_16		=
1740 		NXP_C45_REG_FIELD(0x9064, MDIO_MMD_VEND1, 0, 14),
1741 	.vend1_ext_trg_data_0	= 0x1071,
1742 	.vend1_ext_trg_data_1	= 0x1072,
1743 	.vend1_ext_trg_data_2	= 0x1073,
1744 	.vend1_ext_trg_data_3	= 0x1074,
1745 	.vend1_ext_trg_ctrl	= 0x1075,
1746 	.cable_test		= 0x8360,
1747 	.cable_test_valid	=
1748 		NXP_C45_REG_FIELD(0x8361, MDIO_MMD_VEND1, 15, 1),
1749 	.cable_test_result	=
1750 		NXP_C45_REG_FIELD(0x8361, MDIO_MMD_VEND1, 0, 3),
1751 };
1752 
1753 static const struct nxp_c45_phy_data tja1120_phy_data = {
1754 	.regmap = &tja1120_regmap,
1755 	.stats = tja1120_hw_stats,
1756 	.n_stats = ARRAY_SIZE(tja1120_hw_stats),
1757 	.ptp_clk_period = PTP_CLK_PERIOD_1000BT1,
1758 	.ext_ts_both_edges = true,
1759 	.ack_ptp_irq = true,
1760 	.counters_enable = tja1120_counters_enable,
1761 	.get_egressts = tja1120_get_hwtxts,
1762 	.ptp_init = tja1120_ptp_init,
1763 	.ptp_enable = tja1120_ptp_enable,
1764 };
1765 
1766 static struct phy_driver nxp_c45_driver[] = {
1767 	{
1768 		PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103),
1769 		.name			= "NXP C45 TJA1103",
1770 		.get_features		= nxp_c45_get_features,
1771 		.driver_data		= &tja1103_phy_data,
1772 		.probe			= nxp_c45_probe,
1773 		.soft_reset		= nxp_c45_soft_reset,
1774 		.config_aneg		= genphy_c45_config_aneg,
1775 		.config_init		= nxp_c45_config_init,
1776 		.config_intr		= nxp_c45_config_intr,
1777 		.handle_interrupt	= nxp_c45_handle_interrupt,
1778 		.read_status		= genphy_c45_read_status,
1779 		.suspend		= genphy_c45_pma_suspend,
1780 		.resume			= genphy_c45_pma_resume,
1781 		.get_sset_count		= nxp_c45_get_sset_count,
1782 		.get_strings		= nxp_c45_get_strings,
1783 		.get_stats		= nxp_c45_get_stats,
1784 		.cable_test_start	= nxp_c45_cable_test_start,
1785 		.cable_test_get_status	= nxp_c45_cable_test_get_status,
1786 		.set_loopback		= genphy_c45_loopback,
1787 		.get_sqi		= nxp_c45_get_sqi,
1788 		.get_sqi_max		= nxp_c45_get_sqi_max,
1789 		.remove			= nxp_c45_remove,
1790 	},
1791 	{
1792 		PHY_ID_MATCH_MODEL(PHY_ID_TJA_1120),
1793 		.name			= "NXP C45 TJA1120",
1794 		.get_features		= nxp_c45_get_features,
1795 		.driver_data		= &tja1120_phy_data,
1796 		.probe			= nxp_c45_probe,
1797 		.soft_reset		= nxp_c45_soft_reset,
1798 		.config_aneg		= genphy_c45_config_aneg,
1799 		.config_init		= nxp_c45_config_init,
1800 		.config_intr		= nxp_c45_config_intr,
1801 		.handle_interrupt	= nxp_c45_handle_interrupt,
1802 		.read_status		= genphy_c45_read_status,
1803 		.suspend		= genphy_c45_pma_suspend,
1804 		.resume			= genphy_c45_pma_resume,
1805 		.get_sset_count		= nxp_c45_get_sset_count,
1806 		.get_strings		= nxp_c45_get_strings,
1807 		.get_stats		= nxp_c45_get_stats,
1808 		.cable_test_start	= nxp_c45_cable_test_start,
1809 		.cable_test_get_status	= nxp_c45_cable_test_get_status,
1810 		.set_loopback		= genphy_c45_loopback,
1811 		.get_sqi		= nxp_c45_get_sqi,
1812 		.get_sqi_max		= nxp_c45_get_sqi_max,
1813 		.remove			= nxp_c45_remove,
1814 	},
1815 };
1816 
1817 module_phy_driver(nxp_c45_driver);
1818 
1819 static struct mdio_device_id __maybe_unused nxp_c45_tbl[] = {
1820 	{ PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103) },
1821 	{ PHY_ID_MATCH_MODEL(PHY_ID_TJA_1120) },
1822 	{ /*sentinel*/ },
1823 };
1824 
1825 MODULE_DEVICE_TABLE(mdio, nxp_c45_tbl);
1826 
1827 MODULE_AUTHOR("Radu Pirea <radu-nicolae.pirea@oss.nxp.com>");
1828 MODULE_DESCRIPTION("NXP C45 PHY driver");
1829 MODULE_LICENSE("GPL v2");
1830