1 // SPDX-License-Identifier: GPL-2.0
2 /* NXP C45 PHY driver
3  * Copyright (C) 2021 NXP
4  * Author: Radu Pirea <radu-nicolae.pirea@oss.nxp.com>
5  */
6 
7 #include <linux/delay.h>
8 #include <linux/ethtool.h>
9 #include <linux/ethtool_netlink.h>
10 #include <linux/kernel.h>
11 #include <linux/mii.h>
12 #include <linux/module.h>
13 #include <linux/phy.h>
14 #include <linux/processor.h>
15 #include <linux/property.h>
16 #include <linux/ptp_classify.h>
17 #include <linux/ptp_clock_kernel.h>
18 #include <linux/net_tstamp.h>
19 
20 #define PHY_ID_TJA_1103			0x001BB010
21 
22 #define VEND1_DEVICE_CONTROL		0x0040
23 #define DEVICE_CONTROL_RESET		BIT(15)
24 #define DEVICE_CONTROL_CONFIG_GLOBAL_EN	BIT(14)
25 #define DEVICE_CONTROL_CONFIG_ALL_EN	BIT(13)
26 
27 #define VEND1_PHY_IRQ_ACK		0x80A0
28 #define VEND1_PHY_IRQ_EN		0x80A1
29 #define VEND1_PHY_IRQ_STATUS		0x80A2
30 #define PHY_IRQ_LINK_EVENT		BIT(1)
31 
32 #define VEND1_PHY_CONTROL		0x8100
33 #define PHY_CONFIG_EN			BIT(14)
34 #define PHY_START_OP			BIT(0)
35 
36 #define VEND1_PHY_CONFIG		0x8108
37 #define PHY_CONFIG_AUTO			BIT(0)
38 
39 #define VEND1_SIGNAL_QUALITY		0x8320
40 #define SQI_VALID			BIT(14)
41 #define SQI_MASK			GENMASK(2, 0)
42 #define MAX_SQI				SQI_MASK
43 
44 #define VEND1_CABLE_TEST		0x8330
45 #define CABLE_TEST_ENABLE		BIT(15)
46 #define CABLE_TEST_START		BIT(14)
47 #define CABLE_TEST_VALID		BIT(13)
48 #define CABLE_TEST_OK			0x00
49 #define CABLE_TEST_SHORTED		0x01
50 #define CABLE_TEST_OPEN			0x02
51 #define CABLE_TEST_UNKNOWN		0x07
52 
53 #define VEND1_PORT_CONTROL		0x8040
54 #define PORT_CONTROL_EN			BIT(14)
55 
56 #define VEND1_PORT_ABILITIES		0x8046
57 #define PTP_ABILITY			BIT(3)
58 
59 #define VEND1_PORT_INFRA_CONTROL	0xAC00
60 #define PORT_INFRA_CONTROL_EN		BIT(14)
61 
62 #define VEND1_RXID			0xAFCC
63 #define VEND1_TXID			0xAFCD
64 #define ID_ENABLE			BIT(15)
65 
66 #define VEND1_ABILITIES			0xAFC4
67 #define RGMII_ID_ABILITY		BIT(15)
68 #define RGMII_ABILITY			BIT(14)
69 #define RMII_ABILITY			BIT(10)
70 #define REVMII_ABILITY			BIT(9)
71 #define MII_ABILITY			BIT(8)
72 #define SGMII_ABILITY			BIT(0)
73 
74 #define VEND1_MII_BASIC_CONFIG		0xAFC6
75 #define MII_BASIC_CONFIG_REV		BIT(4)
76 #define MII_BASIC_CONFIG_SGMII		0x9
77 #define MII_BASIC_CONFIG_RGMII		0x7
78 #define MII_BASIC_CONFIG_RMII		0x5
79 #define MII_BASIC_CONFIG_MII		0x4
80 
81 #define VEND1_SYMBOL_ERROR_COUNTER	0x8350
82 #define VEND1_LINK_DROP_COUNTER		0x8352
83 #define VEND1_LINK_LOSSES_AND_FAILURES	0x8353
84 #define VEND1_RX_PREAMBLE_COUNT		0xAFCE
85 #define VEND1_TX_PREAMBLE_COUNT		0xAFCF
86 #define VEND1_RX_IPG_LENGTH		0xAFD0
87 #define VEND1_TX_IPG_LENGTH		0xAFD1
88 #define COUNTER_EN			BIT(15)
89 
90 #define VEND1_PTP_CONFIG		0x1102
91 #define EXT_TRG_EDGE			BIT(1)
92 #define PPS_OUT_POL			BIT(2)
93 #define PPS_OUT_EN			BIT(3)
94 
95 #define VEND1_LTC_LOAD_CTRL		0x1105
96 #define READ_LTC			BIT(2)
97 #define LOAD_LTC			BIT(0)
98 
99 #define VEND1_LTC_WR_NSEC_0		0x1106
100 #define VEND1_LTC_WR_NSEC_1		0x1107
101 #define VEND1_LTC_WR_SEC_0		0x1108
102 #define VEND1_LTC_WR_SEC_1		0x1109
103 
104 #define VEND1_LTC_RD_NSEC_0		0x110A
105 #define VEND1_LTC_RD_NSEC_1		0x110B
106 #define VEND1_LTC_RD_SEC_0		0x110C
107 #define VEND1_LTC_RD_SEC_1		0x110D
108 
109 #define VEND1_RATE_ADJ_SUBNS_0		0x110F
110 #define VEND1_RATE_ADJ_SUBNS_1		0x1110
111 #define CLK_RATE_ADJ_LD			BIT(15)
112 #define CLK_RATE_ADJ_DIR		BIT(14)
113 
114 #define VEND1_HW_LTC_LOCK_CTRL		0x1115
115 #define HW_LTC_LOCK_EN			BIT(0)
116 
117 #define VEND1_PTP_IRQ_EN		0x1131
118 #define VEND1_PTP_IRQ_STATUS		0x1132
119 #define PTP_IRQ_EGR_TS			BIT(0)
120 
121 #define VEND1_RX_TS_INSRT_CTRL		0x114D
122 #define RX_TS_INSRT_MODE2		0x02
123 
124 #define VEND1_EGR_RING_DATA_0		0x114E
125 #define VEND1_EGR_RING_DATA_1_SEQ_ID	0x114F
126 #define VEND1_EGR_RING_DATA_2_NSEC_15_0	0x1150
127 #define VEND1_EGR_RING_DATA_3		0x1151
128 #define VEND1_EGR_RING_CTRL		0x1154
129 
130 #define VEND1_EXT_TRG_TS_DATA_0		0x1121
131 #define VEND1_EXT_TRG_TS_DATA_1		0x1122
132 #define VEND1_EXT_TRG_TS_DATA_2		0x1123
133 #define VEND1_EXT_TRG_TS_DATA_3		0x1124
134 #define VEND1_EXT_TRG_TS_DATA_4		0x1125
135 #define VEND1_EXT_TRG_TS_CTRL		0x1126
136 
137 #define RING_DATA_0_DOMAIN_NUMBER	GENMASK(7, 0)
138 #define RING_DATA_0_MSG_TYPE		GENMASK(11, 8)
139 #define RING_DATA_0_SEC_4_2		GENMASK(14, 2)
140 #define RING_DATA_0_TS_VALID		BIT(15)
141 
142 #define RING_DATA_3_NSEC_29_16		GENMASK(13, 0)
143 #define RING_DATA_3_SEC_1_0		GENMASK(15, 14)
144 #define RING_DATA_5_SEC_16_5		GENMASK(15, 4)
145 #define RING_DONE			BIT(0)
146 
147 #define TS_SEC_MASK			GENMASK(1, 0)
148 
149 #define VEND1_PORT_FUNC_ENABLES		0x8048
150 #define PTP_ENABLE			BIT(3)
151 
152 #define VEND1_PORT_PTP_CONTROL		0x9000
153 #define PORT_PTP_CONTROL_BYPASS		BIT(11)
154 
155 #define VEND1_PTP_CLK_PERIOD		0x1104
156 #define PTP_CLK_PERIOD_100BT1		15ULL
157 
158 #define VEND1_EVENT_MSG_FILT		0x1148
159 #define EVENT_MSG_FILT_ALL		0x0F
160 #define EVENT_MSG_FILT_NONE		0x00
161 
162 #define VEND1_TX_PIPE_DLY_NS		0x1149
163 #define VEND1_TX_PIPEDLY_SUBNS		0x114A
164 #define VEND1_RX_PIPE_DLY_NS		0x114B
165 #define VEND1_RX_PIPEDLY_SUBNS		0x114C
166 
167 #define VEND1_GPIO_FUNC_CONFIG_BASE	0x2C40
168 #define GPIO_FUNC_EN			BIT(15)
169 #define GPIO_FUNC_PTP			BIT(6)
170 #define GPIO_SIGNAL_PTP_TRIGGER		0x01
171 #define GPIO_SIGNAL_PPS_OUT		0x12
172 #define GPIO_DISABLE			0
173 #define GPIO_PPS_OUT_CFG		(GPIO_FUNC_EN | GPIO_FUNC_PTP | \
174 	GPIO_SIGNAL_PPS_OUT)
175 #define GPIO_EXTTS_OUT_CFG		(GPIO_FUNC_EN | GPIO_FUNC_PTP | \
176 	GPIO_SIGNAL_PTP_TRIGGER)
177 
178 #define RGMII_PERIOD_PS			8000U
179 #define PS_PER_DEGREE			div_u64(RGMII_PERIOD_PS, 360)
180 #define MIN_ID_PS			1644U
181 #define MAX_ID_PS			2260U
182 #define DEFAULT_ID_PS			2000U
183 
184 #define PPM_TO_SUBNS_INC(ppb)	div_u64(GENMASK_ULL(31, 0) * (ppb) * \
185 					PTP_CLK_PERIOD_100BT1, NSEC_PER_SEC)
186 
187 #define NXP_C45_SKB_CB(skb)	((struct nxp_c45_skb_cb *)(skb)->cb)
188 
189 struct nxp_c45_skb_cb {
190 	struct ptp_header *header;
191 	unsigned int type;
192 };
193 
194 struct nxp_c45_hwts {
195 	u32	nsec;
196 	u32	sec;
197 	u8	domain_number;
198 	u16	sequence_id;
199 	u8	msg_type;
200 };
201 
202 struct nxp_c45_phy {
203 	struct phy_device *phydev;
204 	struct mii_timestamper mii_ts;
205 	struct ptp_clock *ptp_clock;
206 	struct ptp_clock_info caps;
207 	struct sk_buff_head tx_queue;
208 	struct sk_buff_head rx_queue;
209 	/* used to access the PTP registers atomic */
210 	struct mutex ptp_lock;
211 	int hwts_tx;
212 	int hwts_rx;
213 	u32 tx_delay;
214 	u32 rx_delay;
215 	struct timespec64 extts_ts;
216 	int extts_index;
217 	bool extts;
218 };
219 
220 struct nxp_c45_phy_stats {
221 	const char	*name;
222 	u8		mmd;
223 	u16		reg;
224 	u8		off;
225 	u16		mask;
226 };
227 
228 static bool nxp_c45_poll_txts(struct phy_device *phydev)
229 {
230 	return phydev->irq <= 0;
231 }
232 
233 static int _nxp_c45_ptp_gettimex64(struct ptp_clock_info *ptp,
234 				   struct timespec64 *ts,
235 				   struct ptp_system_timestamp *sts)
236 {
237 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
238 
239 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_LTC_LOAD_CTRL,
240 		      READ_LTC);
241 	ts->tv_nsec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
242 				   VEND1_LTC_RD_NSEC_0);
243 	ts->tv_nsec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
244 				    VEND1_LTC_RD_NSEC_1) << 16;
245 	ts->tv_sec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
246 				  VEND1_LTC_RD_SEC_0);
247 	ts->tv_sec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
248 				   VEND1_LTC_RD_SEC_1) << 16;
249 
250 	return 0;
251 }
252 
253 static int nxp_c45_ptp_gettimex64(struct ptp_clock_info *ptp,
254 				  struct timespec64 *ts,
255 				  struct ptp_system_timestamp *sts)
256 {
257 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
258 
259 	mutex_lock(&priv->ptp_lock);
260 	_nxp_c45_ptp_gettimex64(ptp, ts, sts);
261 	mutex_unlock(&priv->ptp_lock);
262 
263 	return 0;
264 }
265 
266 static int _nxp_c45_ptp_settime64(struct ptp_clock_info *ptp,
267 				  const struct timespec64 *ts)
268 {
269 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
270 
271 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_LTC_WR_NSEC_0,
272 		      ts->tv_nsec);
273 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_LTC_WR_NSEC_1,
274 		      ts->tv_nsec >> 16);
275 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_LTC_WR_SEC_0,
276 		      ts->tv_sec);
277 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_LTC_WR_SEC_1,
278 		      ts->tv_sec >> 16);
279 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_LTC_LOAD_CTRL,
280 		      LOAD_LTC);
281 
282 	return 0;
283 }
284 
285 static int nxp_c45_ptp_settime64(struct ptp_clock_info *ptp,
286 				 const struct timespec64 *ts)
287 {
288 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
289 
290 	mutex_lock(&priv->ptp_lock);
291 	_nxp_c45_ptp_settime64(ptp, ts);
292 	mutex_unlock(&priv->ptp_lock);
293 
294 	return 0;
295 }
296 
297 static int nxp_c45_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
298 {
299 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
300 	s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
301 	u64 subns_inc_val;
302 	bool inc;
303 
304 	mutex_lock(&priv->ptp_lock);
305 	inc = ppb >= 0;
306 	ppb = abs(ppb);
307 
308 	subns_inc_val = PPM_TO_SUBNS_INC(ppb);
309 
310 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_RATE_ADJ_SUBNS_0,
311 		      subns_inc_val);
312 	subns_inc_val >>= 16;
313 	subns_inc_val |= CLK_RATE_ADJ_LD;
314 	if (inc)
315 		subns_inc_val |= CLK_RATE_ADJ_DIR;
316 
317 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_RATE_ADJ_SUBNS_1,
318 		      subns_inc_val);
319 	mutex_unlock(&priv->ptp_lock);
320 
321 	return 0;
322 }
323 
324 static int nxp_c45_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
325 {
326 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
327 	struct timespec64 now, then;
328 
329 	mutex_lock(&priv->ptp_lock);
330 	then = ns_to_timespec64(delta);
331 	_nxp_c45_ptp_gettimex64(ptp, &now, NULL);
332 	now = timespec64_add(now, then);
333 	_nxp_c45_ptp_settime64(ptp, &now);
334 	mutex_unlock(&priv->ptp_lock);
335 
336 	return 0;
337 }
338 
339 static void nxp_c45_reconstruct_ts(struct timespec64 *ts,
340 				   struct nxp_c45_hwts *hwts)
341 {
342 	ts->tv_nsec = hwts->nsec;
343 	if ((ts->tv_sec & TS_SEC_MASK) < (hwts->sec & TS_SEC_MASK))
344 		ts->tv_sec -= TS_SEC_MASK + 1;
345 	ts->tv_sec &= ~TS_SEC_MASK;
346 	ts->tv_sec |= hwts->sec & TS_SEC_MASK;
347 }
348 
349 static bool nxp_c45_match_ts(struct ptp_header *header,
350 			     struct nxp_c45_hwts *hwts,
351 			     unsigned int type)
352 {
353 	return ntohs(header->sequence_id) == hwts->sequence_id &&
354 	       ptp_get_msgtype(header, type) == hwts->msg_type &&
355 	       header->domain_number  == hwts->domain_number;
356 }
357 
358 static void nxp_c45_get_extts(struct nxp_c45_phy *priv,
359 			      struct timespec64 *extts)
360 {
361 	extts->tv_nsec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
362 				      VEND1_EXT_TRG_TS_DATA_0);
363 	extts->tv_nsec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
364 				       VEND1_EXT_TRG_TS_DATA_1) << 16;
365 	extts->tv_sec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
366 				     VEND1_EXT_TRG_TS_DATA_2);
367 	extts->tv_sec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
368 				      VEND1_EXT_TRG_TS_DATA_3) << 16;
369 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EXT_TRG_TS_CTRL,
370 		      RING_DONE);
371 }
372 
373 static bool nxp_c45_get_hwtxts(struct nxp_c45_phy *priv,
374 			       struct nxp_c45_hwts *hwts)
375 {
376 	bool valid;
377 	u16 reg;
378 
379 	mutex_lock(&priv->ptp_lock);
380 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EGR_RING_CTRL,
381 		      RING_DONE);
382 	reg = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EGR_RING_DATA_0);
383 	valid = !!(reg & RING_DATA_0_TS_VALID);
384 	if (!valid)
385 		goto nxp_c45_get_hwtxts_out;
386 
387 	hwts->domain_number = reg;
388 	hwts->msg_type = (reg & RING_DATA_0_MSG_TYPE) >> 8;
389 	hwts->sec = (reg & RING_DATA_0_SEC_4_2) >> 10;
390 	hwts->sequence_id = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
391 					 VEND1_EGR_RING_DATA_1_SEQ_ID);
392 	hwts->nsec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
393 				  VEND1_EGR_RING_DATA_2_NSEC_15_0);
394 	reg = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EGR_RING_DATA_3);
395 	hwts->nsec |= (reg & RING_DATA_3_NSEC_29_16) << 16;
396 	hwts->sec |= (reg & RING_DATA_3_SEC_1_0) >> 14;
397 
398 nxp_c45_get_hwtxts_out:
399 	mutex_unlock(&priv->ptp_lock);
400 	return valid;
401 }
402 
403 static void nxp_c45_process_txts(struct nxp_c45_phy *priv,
404 				 struct nxp_c45_hwts *txts)
405 {
406 	struct sk_buff *skb, *tmp, *skb_match = NULL;
407 	struct skb_shared_hwtstamps shhwtstamps;
408 	struct timespec64 ts;
409 	unsigned long flags;
410 	bool ts_match;
411 	s64 ts_ns;
412 
413 	spin_lock_irqsave(&priv->tx_queue.lock, flags);
414 	skb_queue_walk_safe(&priv->tx_queue, skb, tmp) {
415 		ts_match = nxp_c45_match_ts(NXP_C45_SKB_CB(skb)->header, txts,
416 					    NXP_C45_SKB_CB(skb)->type);
417 		if (!ts_match)
418 			continue;
419 		skb_match = skb;
420 		__skb_unlink(skb, &priv->tx_queue);
421 		break;
422 	}
423 	spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
424 
425 	if (skb_match) {
426 		nxp_c45_ptp_gettimex64(&priv->caps, &ts, NULL);
427 		nxp_c45_reconstruct_ts(&ts, txts);
428 		memset(&shhwtstamps, 0, sizeof(shhwtstamps));
429 		ts_ns = timespec64_to_ns(&ts);
430 		shhwtstamps.hwtstamp = ns_to_ktime(ts_ns);
431 		skb_complete_tx_timestamp(skb_match, &shhwtstamps);
432 	} else {
433 		phydev_warn(priv->phydev,
434 			    "the tx timestamp doesn't match with any skb\n");
435 	}
436 }
437 
438 static long nxp_c45_do_aux_work(struct ptp_clock_info *ptp)
439 {
440 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
441 	bool poll_txts = nxp_c45_poll_txts(priv->phydev);
442 	struct skb_shared_hwtstamps *shhwtstamps_rx;
443 	struct ptp_clock_event event;
444 	struct nxp_c45_hwts hwts;
445 	bool reschedule = false;
446 	struct timespec64 ts;
447 	struct sk_buff *skb;
448 	bool txts_valid;
449 	u32 ts_raw;
450 
451 	while (!skb_queue_empty_lockless(&priv->tx_queue) && poll_txts) {
452 		txts_valid = nxp_c45_get_hwtxts(priv, &hwts);
453 		if (unlikely(!txts_valid)) {
454 			/* Still more skbs in the queue */
455 			reschedule = true;
456 			break;
457 		}
458 
459 		nxp_c45_process_txts(priv, &hwts);
460 	}
461 
462 	while ((skb = skb_dequeue(&priv->rx_queue)) != NULL) {
463 		nxp_c45_ptp_gettimex64(&priv->caps, &ts, NULL);
464 		ts_raw = __be32_to_cpu(NXP_C45_SKB_CB(skb)->header->reserved2);
465 		hwts.sec = ts_raw >> 30;
466 		hwts.nsec = ts_raw & GENMASK(29, 0);
467 		nxp_c45_reconstruct_ts(&ts, &hwts);
468 		shhwtstamps_rx = skb_hwtstamps(skb);
469 		shhwtstamps_rx->hwtstamp = ns_to_ktime(timespec64_to_ns(&ts));
470 		NXP_C45_SKB_CB(skb)->header->reserved2 = 0;
471 		netif_rx(skb);
472 	}
473 
474 	if (priv->extts) {
475 		nxp_c45_get_extts(priv, &ts);
476 		if (timespec64_compare(&ts, &priv->extts_ts) != 0) {
477 			priv->extts_ts = ts;
478 			event.index = priv->extts_index;
479 			event.type = PTP_CLOCK_EXTTS;
480 			event.timestamp = ns_to_ktime(timespec64_to_ns(&ts));
481 			ptp_clock_event(priv->ptp_clock, &event);
482 		}
483 		reschedule = true;
484 	}
485 
486 	return reschedule ? 1 : -1;
487 }
488 
489 static void nxp_c45_gpio_config(struct nxp_c45_phy *priv,
490 				int pin, u16 pin_cfg)
491 {
492 	struct phy_device *phydev = priv->phydev;
493 
494 	phy_write_mmd(phydev, MDIO_MMD_VEND1,
495 		      VEND1_GPIO_FUNC_CONFIG_BASE + pin, pin_cfg);
496 }
497 
498 static int nxp_c45_perout_enable(struct nxp_c45_phy *priv,
499 				 struct ptp_perout_request *perout, int on)
500 {
501 	struct phy_device *phydev = priv->phydev;
502 	int pin;
503 
504 	if (perout->flags & ~PTP_PEROUT_PHASE)
505 		return -EOPNOTSUPP;
506 
507 	pin = ptp_find_pin(priv->ptp_clock, PTP_PF_PEROUT, perout->index);
508 	if (pin < 0)
509 		return pin;
510 
511 	if (!on) {
512 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PTP_CONFIG,
513 				   PPS_OUT_EN);
514 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PTP_CONFIG,
515 				   PPS_OUT_POL);
516 
517 		nxp_c45_gpio_config(priv, pin, GPIO_DISABLE);
518 
519 		return 0;
520 	}
521 
522 	/* The PPS signal is fixed to 1 second and is always generated when the
523 	 * seconds counter is incremented. The start time is not configurable.
524 	 * If the clock is adjusted, the PPS signal is automatically readjusted.
525 	 */
526 	if (perout->period.sec != 1 || perout->period.nsec != 0) {
527 		phydev_warn(phydev, "The period can be set only to 1 second.");
528 		return -EINVAL;
529 	}
530 
531 	if (!(perout->flags & PTP_PEROUT_PHASE)) {
532 		if (perout->start.sec != 0 || perout->start.nsec != 0) {
533 			phydev_warn(phydev, "The start time is not configurable. Should be set to 0 seconds and 0 nanoseconds.");
534 			return -EINVAL;
535 		}
536 	} else {
537 		if (perout->phase.nsec != 0 &&
538 		    perout->phase.nsec != (NSEC_PER_SEC >> 1)) {
539 			phydev_warn(phydev, "The phase can be set only to 0 or 500000000 nanoseconds.");
540 			return -EINVAL;
541 		}
542 
543 		if (perout->phase.nsec == 0)
544 			phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
545 					   VEND1_PTP_CONFIG, PPS_OUT_POL);
546 		else
547 			phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
548 					 VEND1_PTP_CONFIG, PPS_OUT_POL);
549 	}
550 
551 	nxp_c45_gpio_config(priv, pin, GPIO_PPS_OUT_CFG);
552 
553 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PTP_CONFIG, PPS_OUT_EN);
554 
555 	return 0;
556 }
557 
558 static int nxp_c45_extts_enable(struct nxp_c45_phy *priv,
559 				struct ptp_extts_request *extts, int on)
560 {
561 	int pin;
562 
563 	if (extts->flags & ~(PTP_ENABLE_FEATURE |
564 			      PTP_RISING_EDGE |
565 			      PTP_FALLING_EDGE |
566 			      PTP_STRICT_FLAGS))
567 		return -EOPNOTSUPP;
568 
569 	/* Sampling on both edges is not supported */
570 	if ((extts->flags & PTP_RISING_EDGE) &&
571 	    (extts->flags & PTP_FALLING_EDGE))
572 		return -EOPNOTSUPP;
573 
574 	pin = ptp_find_pin(priv->ptp_clock, PTP_PF_EXTTS, extts->index);
575 	if (pin < 0)
576 		return pin;
577 
578 	if (!on) {
579 		nxp_c45_gpio_config(priv, pin, GPIO_DISABLE);
580 		priv->extts = false;
581 
582 		return 0;
583 	}
584 
585 	if (extts->flags & PTP_RISING_EDGE)
586 		phy_clear_bits_mmd(priv->phydev, MDIO_MMD_VEND1,
587 				   VEND1_PTP_CONFIG, EXT_TRG_EDGE);
588 
589 	if (extts->flags & PTP_FALLING_EDGE)
590 		phy_set_bits_mmd(priv->phydev, MDIO_MMD_VEND1,
591 				 VEND1_PTP_CONFIG, EXT_TRG_EDGE);
592 
593 	nxp_c45_gpio_config(priv, pin, GPIO_EXTTS_OUT_CFG);
594 	priv->extts = true;
595 	priv->extts_index = extts->index;
596 	ptp_schedule_worker(priv->ptp_clock, 0);
597 
598 	return 0;
599 }
600 
601 static int nxp_c45_ptp_enable(struct ptp_clock_info *ptp,
602 			      struct ptp_clock_request *req, int on)
603 {
604 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
605 
606 	switch (req->type) {
607 	case PTP_CLK_REQ_EXTTS:
608 		return nxp_c45_extts_enable(priv, &req->extts, on);
609 	case PTP_CLK_REQ_PEROUT:
610 		return nxp_c45_perout_enable(priv, &req->perout, on);
611 	default:
612 		return -EOPNOTSUPP;
613 	}
614 }
615 
616 static struct ptp_pin_desc nxp_c45_ptp_pins[] = {
617 	{ "nxp_c45_gpio0", 0, PTP_PF_NONE},
618 	{ "nxp_c45_gpio1", 1, PTP_PF_NONE},
619 	{ "nxp_c45_gpio2", 2, PTP_PF_NONE},
620 	{ "nxp_c45_gpio3", 3, PTP_PF_NONE},
621 	{ "nxp_c45_gpio4", 4, PTP_PF_NONE},
622 	{ "nxp_c45_gpio5", 5, PTP_PF_NONE},
623 	{ "nxp_c45_gpio6", 6, PTP_PF_NONE},
624 	{ "nxp_c45_gpio7", 7, PTP_PF_NONE},
625 	{ "nxp_c45_gpio8", 8, PTP_PF_NONE},
626 	{ "nxp_c45_gpio9", 9, PTP_PF_NONE},
627 	{ "nxp_c45_gpio10", 10, PTP_PF_NONE},
628 	{ "nxp_c45_gpio11", 11, PTP_PF_NONE},
629 };
630 
631 static int nxp_c45_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
632 				  enum ptp_pin_function func, unsigned int chan)
633 {
634 	if (pin >= ARRAY_SIZE(nxp_c45_ptp_pins))
635 		return -EINVAL;
636 
637 	switch (func) {
638 	case PTP_PF_NONE:
639 	case PTP_PF_PEROUT:
640 	case PTP_PF_EXTTS:
641 		break;
642 	default:
643 		return -EOPNOTSUPP;
644 	}
645 
646 	return 0;
647 }
648 
649 static int nxp_c45_init_ptp_clock(struct nxp_c45_phy *priv)
650 {
651 	priv->caps = (struct ptp_clock_info) {
652 		.owner		= THIS_MODULE,
653 		.name		= "NXP C45 PHC",
654 		.max_adj	= 16666666,
655 		.adjfine	= nxp_c45_ptp_adjfine,
656 		.adjtime	= nxp_c45_ptp_adjtime,
657 		.gettimex64	= nxp_c45_ptp_gettimex64,
658 		.settime64	= nxp_c45_ptp_settime64,
659 		.enable		= nxp_c45_ptp_enable,
660 		.verify		= nxp_c45_ptp_verify_pin,
661 		.do_aux_work	= nxp_c45_do_aux_work,
662 		.pin_config	= nxp_c45_ptp_pins,
663 		.n_pins		= ARRAY_SIZE(nxp_c45_ptp_pins),
664 		.n_ext_ts	= 1,
665 		.n_per_out	= 1,
666 	};
667 
668 	priv->ptp_clock = ptp_clock_register(&priv->caps,
669 					     &priv->phydev->mdio.dev);
670 
671 	if (IS_ERR(priv->ptp_clock))
672 		return PTR_ERR(priv->ptp_clock);
673 
674 	if (!priv->ptp_clock)
675 		return -ENOMEM;
676 
677 	return 0;
678 }
679 
680 static void nxp_c45_txtstamp(struct mii_timestamper *mii_ts,
681 			     struct sk_buff *skb, int type)
682 {
683 	struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
684 						mii_ts);
685 
686 	switch (priv->hwts_tx) {
687 	case HWTSTAMP_TX_ON:
688 		NXP_C45_SKB_CB(skb)->type = type;
689 		NXP_C45_SKB_CB(skb)->header = ptp_parse_header(skb, type);
690 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
691 		skb_queue_tail(&priv->tx_queue, skb);
692 		if (nxp_c45_poll_txts(priv->phydev))
693 			ptp_schedule_worker(priv->ptp_clock, 0);
694 		break;
695 	case HWTSTAMP_TX_OFF:
696 	default:
697 		kfree_skb(skb);
698 		break;
699 	}
700 }
701 
702 static bool nxp_c45_rxtstamp(struct mii_timestamper *mii_ts,
703 			     struct sk_buff *skb, int type)
704 {
705 	struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
706 						mii_ts);
707 	struct ptp_header *header = ptp_parse_header(skb, type);
708 
709 	if (!header)
710 		return false;
711 
712 	if (!priv->hwts_rx)
713 		return false;
714 
715 	NXP_C45_SKB_CB(skb)->header = header;
716 	skb_queue_tail(&priv->rx_queue, skb);
717 	ptp_schedule_worker(priv->ptp_clock, 0);
718 
719 	return true;
720 }
721 
722 static int nxp_c45_hwtstamp(struct mii_timestamper *mii_ts,
723 			    struct ifreq *ifreq)
724 {
725 	struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
726 						mii_ts);
727 	struct phy_device *phydev = priv->phydev;
728 	struct hwtstamp_config cfg;
729 
730 	if (copy_from_user(&cfg, ifreq->ifr_data, sizeof(cfg)))
731 		return -EFAULT;
732 
733 	if (cfg.tx_type < 0 || cfg.tx_type > HWTSTAMP_TX_ON)
734 		return -ERANGE;
735 
736 	priv->hwts_tx = cfg.tx_type;
737 
738 	switch (cfg.rx_filter) {
739 	case HWTSTAMP_FILTER_NONE:
740 		priv->hwts_rx = 0;
741 		break;
742 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
743 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
744 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
745 		priv->hwts_rx = 1;
746 		cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
747 		break;
748 	default:
749 		return -ERANGE;
750 	}
751 
752 	if (priv->hwts_rx || priv->hwts_tx) {
753 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_EVENT_MSG_FILT,
754 			      EVENT_MSG_FILT_ALL);
755 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
756 				   VEND1_PORT_PTP_CONTROL,
757 				   PORT_PTP_CONTROL_BYPASS);
758 	} else {
759 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_EVENT_MSG_FILT,
760 			      EVENT_MSG_FILT_NONE);
761 		phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_PTP_CONTROL,
762 				 PORT_PTP_CONTROL_BYPASS);
763 	}
764 
765 	if (nxp_c45_poll_txts(priv->phydev))
766 		goto nxp_c45_no_ptp_irq;
767 
768 	if (priv->hwts_tx)
769 		phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
770 				 VEND1_PTP_IRQ_EN, PTP_IRQ_EGR_TS);
771 	else
772 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
773 				   VEND1_PTP_IRQ_EN, PTP_IRQ_EGR_TS);
774 
775 nxp_c45_no_ptp_irq:
776 	return copy_to_user(ifreq->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
777 }
778 
779 static int nxp_c45_ts_info(struct mii_timestamper *mii_ts,
780 			   struct ethtool_ts_info *ts_info)
781 {
782 	struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
783 						mii_ts);
784 
785 	ts_info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
786 			SOF_TIMESTAMPING_RX_HARDWARE |
787 			SOF_TIMESTAMPING_RAW_HARDWARE;
788 	ts_info->phc_index = ptp_clock_index(priv->ptp_clock);
789 	ts_info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
790 	ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
791 			(1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
792 			(1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
793 			(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT);
794 
795 	return 0;
796 }
797 
798 static const struct nxp_c45_phy_stats nxp_c45_hw_stats[] = {
799 	{ "phy_symbol_error_cnt", MDIO_MMD_VEND1,
800 		VEND1_SYMBOL_ERROR_COUNTER, 0, GENMASK(15, 0) },
801 	{ "phy_link_status_drop_cnt", MDIO_MMD_VEND1,
802 		VEND1_LINK_DROP_COUNTER, 8, GENMASK(13, 8) },
803 	{ "phy_link_availability_drop_cnt", MDIO_MMD_VEND1,
804 		VEND1_LINK_DROP_COUNTER, 0, GENMASK(5, 0) },
805 	{ "phy_link_loss_cnt", MDIO_MMD_VEND1,
806 		VEND1_LINK_LOSSES_AND_FAILURES, 10, GENMASK(15, 10) },
807 	{ "phy_link_failure_cnt", MDIO_MMD_VEND1,
808 		VEND1_LINK_LOSSES_AND_FAILURES, 0, GENMASK(9, 0) },
809 	{ "rx_preamble_count", MDIO_MMD_VEND1,
810 		VEND1_RX_PREAMBLE_COUNT, 0, GENMASK(5, 0) },
811 	{ "tx_preamble_count", MDIO_MMD_VEND1,
812 		VEND1_TX_PREAMBLE_COUNT, 0, GENMASK(5, 0) },
813 	{ "rx_ipg_length", MDIO_MMD_VEND1,
814 		VEND1_RX_IPG_LENGTH, 0, GENMASK(8, 0) },
815 	{ "tx_ipg_length", MDIO_MMD_VEND1,
816 		VEND1_TX_IPG_LENGTH, 0, GENMASK(8, 0) },
817 };
818 
819 static int nxp_c45_get_sset_count(struct phy_device *phydev)
820 {
821 	return ARRAY_SIZE(nxp_c45_hw_stats);
822 }
823 
824 static void nxp_c45_get_strings(struct phy_device *phydev, u8 *data)
825 {
826 	size_t i;
827 
828 	for (i = 0; i < ARRAY_SIZE(nxp_c45_hw_stats); i++) {
829 		strncpy(data + i * ETH_GSTRING_LEN,
830 			nxp_c45_hw_stats[i].name, ETH_GSTRING_LEN);
831 	}
832 }
833 
834 static void nxp_c45_get_stats(struct phy_device *phydev,
835 			      struct ethtool_stats *stats, u64 *data)
836 {
837 	size_t i;
838 	int ret;
839 
840 	for (i = 0; i < ARRAY_SIZE(nxp_c45_hw_stats); i++) {
841 		ret = phy_read_mmd(phydev, nxp_c45_hw_stats[i].mmd,
842 				   nxp_c45_hw_stats[i].reg);
843 		if (ret < 0) {
844 			data[i] = U64_MAX;
845 		} else {
846 			data[i] = ret & nxp_c45_hw_stats[i].mask;
847 			data[i] >>= nxp_c45_hw_stats[i].off;
848 		}
849 	}
850 }
851 
852 static int nxp_c45_config_enable(struct phy_device *phydev)
853 {
854 	phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONTROL,
855 		      DEVICE_CONTROL_CONFIG_GLOBAL_EN |
856 		      DEVICE_CONTROL_CONFIG_ALL_EN);
857 	usleep_range(400, 450);
858 
859 	phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_CONTROL,
860 		      PORT_CONTROL_EN);
861 	phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONTROL,
862 		      PHY_CONFIG_EN);
863 	phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_INFRA_CONTROL,
864 		      PORT_INFRA_CONTROL_EN);
865 
866 	return 0;
867 }
868 
869 static int nxp_c45_start_op(struct phy_device *phydev)
870 {
871 	return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONTROL,
872 				PHY_START_OP);
873 }
874 
875 static int nxp_c45_config_intr(struct phy_device *phydev)
876 {
877 	if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
878 		return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
879 					VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT);
880 	else
881 		return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
882 					  VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT);
883 }
884 
885 static irqreturn_t nxp_c45_handle_interrupt(struct phy_device *phydev)
886 {
887 	struct nxp_c45_phy *priv = phydev->priv;
888 	irqreturn_t ret = IRQ_NONE;
889 	struct nxp_c45_hwts hwts;
890 	int irq;
891 
892 	irq = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_IRQ_STATUS);
893 	if (irq & PHY_IRQ_LINK_EVENT) {
894 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_IRQ_ACK,
895 			      PHY_IRQ_LINK_EVENT);
896 		phy_trigger_machine(phydev);
897 		ret = IRQ_HANDLED;
898 	}
899 
900 	/* There is no need for ACK.
901 	 * The irq signal will be asserted until the EGR TS FIFO will be
902 	 * emptied.
903 	 */
904 	irq = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_PTP_IRQ_STATUS);
905 	if (irq & PTP_IRQ_EGR_TS) {
906 		while (nxp_c45_get_hwtxts(priv, &hwts))
907 			nxp_c45_process_txts(priv, &hwts);
908 
909 		ret = IRQ_HANDLED;
910 	}
911 
912 	return ret;
913 }
914 
915 static int nxp_c45_soft_reset(struct phy_device *phydev)
916 {
917 	int ret;
918 
919 	ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONTROL,
920 			    DEVICE_CONTROL_RESET);
921 	if (ret)
922 		return ret;
923 
924 	return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
925 					 VEND1_DEVICE_CONTROL, ret,
926 					 !(ret & DEVICE_CONTROL_RESET), 20000,
927 					 240000, false);
928 }
929 
930 static int nxp_c45_cable_test_start(struct phy_device *phydev)
931 {
932 	return phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_CABLE_TEST,
933 			     CABLE_TEST_ENABLE | CABLE_TEST_START);
934 }
935 
936 static int nxp_c45_cable_test_get_status(struct phy_device *phydev,
937 					 bool *finished)
938 {
939 	int ret;
940 	u8 cable_test_result;
941 
942 	ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_CABLE_TEST);
943 	if (!(ret & CABLE_TEST_VALID)) {
944 		*finished = false;
945 		return 0;
946 	}
947 
948 	*finished = true;
949 	cable_test_result = ret & GENMASK(2, 0);
950 
951 	switch (cable_test_result) {
952 	case CABLE_TEST_OK:
953 		ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
954 					ETHTOOL_A_CABLE_RESULT_CODE_OK);
955 		break;
956 	case CABLE_TEST_SHORTED:
957 		ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
958 					ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT);
959 		break;
960 	case CABLE_TEST_OPEN:
961 		ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
962 					ETHTOOL_A_CABLE_RESULT_CODE_OPEN);
963 		break;
964 	default:
965 		ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
966 					ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC);
967 	}
968 
969 	phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_CABLE_TEST,
970 			   CABLE_TEST_ENABLE);
971 
972 	return nxp_c45_start_op(phydev);
973 }
974 
975 static int nxp_c45_get_sqi(struct phy_device *phydev)
976 {
977 	int reg;
978 
979 	reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_SIGNAL_QUALITY);
980 	if (!(reg & SQI_VALID))
981 		return -EINVAL;
982 
983 	reg &= SQI_MASK;
984 
985 	return reg;
986 }
987 
988 static int nxp_c45_get_sqi_max(struct phy_device *phydev)
989 {
990 	return MAX_SQI;
991 }
992 
993 static int nxp_c45_check_delay(struct phy_device *phydev, u32 delay)
994 {
995 	if (delay < MIN_ID_PS) {
996 		phydev_err(phydev, "delay value smaller than %u\n", MIN_ID_PS);
997 		return -EINVAL;
998 	}
999 
1000 	if (delay > MAX_ID_PS) {
1001 		phydev_err(phydev, "delay value higher than %u\n", MAX_ID_PS);
1002 		return -EINVAL;
1003 	}
1004 
1005 	return 0;
1006 }
1007 
1008 static u64 nxp_c45_get_phase_shift(u64 phase_offset_raw)
1009 {
1010 	/* The delay in degree phase is 73.8 + phase_offset_raw * 0.9.
1011 	 * To avoid floating point operations we'll multiply by 10
1012 	 * and get 1 decimal point precision.
1013 	 */
1014 	phase_offset_raw *= 10;
1015 	phase_offset_raw -= 738;
1016 	return div_u64(phase_offset_raw, 9);
1017 }
1018 
1019 static void nxp_c45_disable_delays(struct phy_device *phydev)
1020 {
1021 	phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID, ID_ENABLE);
1022 	phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID, ID_ENABLE);
1023 }
1024 
1025 static void nxp_c45_set_delays(struct phy_device *phydev)
1026 {
1027 	struct nxp_c45_phy *priv = phydev->priv;
1028 	u64 tx_delay = priv->tx_delay;
1029 	u64 rx_delay = priv->rx_delay;
1030 	u64 degree;
1031 
1032 	if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1033 	    phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
1034 		degree = div_u64(tx_delay, PS_PER_DEGREE);
1035 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID,
1036 			      ID_ENABLE | nxp_c45_get_phase_shift(degree));
1037 	} else {
1038 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID,
1039 				   ID_ENABLE);
1040 	}
1041 
1042 	if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1043 	    phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
1044 		degree = div_u64(rx_delay, PS_PER_DEGREE);
1045 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID,
1046 			      ID_ENABLE | nxp_c45_get_phase_shift(degree));
1047 	} else {
1048 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID,
1049 				   ID_ENABLE);
1050 	}
1051 }
1052 
1053 static int nxp_c45_get_delays(struct phy_device *phydev)
1054 {
1055 	struct nxp_c45_phy *priv = phydev->priv;
1056 	int ret;
1057 
1058 	if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1059 	    phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
1060 		ret = device_property_read_u32(&phydev->mdio.dev,
1061 					       "tx-internal-delay-ps",
1062 					       &priv->tx_delay);
1063 		if (ret)
1064 			priv->tx_delay = DEFAULT_ID_PS;
1065 
1066 		ret = nxp_c45_check_delay(phydev, priv->tx_delay);
1067 		if (ret) {
1068 			phydev_err(phydev,
1069 				   "tx-internal-delay-ps invalid value\n");
1070 			return ret;
1071 		}
1072 	}
1073 
1074 	if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1075 	    phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
1076 		ret = device_property_read_u32(&phydev->mdio.dev,
1077 					       "rx-internal-delay-ps",
1078 					       &priv->rx_delay);
1079 		if (ret)
1080 			priv->rx_delay = DEFAULT_ID_PS;
1081 
1082 		ret = nxp_c45_check_delay(phydev, priv->rx_delay);
1083 		if (ret) {
1084 			phydev_err(phydev,
1085 				   "rx-internal-delay-ps invalid value\n");
1086 			return ret;
1087 		}
1088 	}
1089 
1090 	return 0;
1091 }
1092 
1093 static int nxp_c45_set_phy_mode(struct phy_device *phydev)
1094 {
1095 	int ret;
1096 
1097 	ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_ABILITIES);
1098 	phydev_dbg(phydev, "Clause 45 managed PHY abilities 0x%x\n", ret);
1099 
1100 	switch (phydev->interface) {
1101 	case PHY_INTERFACE_MODE_RGMII:
1102 		if (!(ret & RGMII_ABILITY)) {
1103 			phydev_err(phydev, "rgmii mode not supported\n");
1104 			return -EINVAL;
1105 		}
1106 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1107 			      MII_BASIC_CONFIG_RGMII);
1108 		nxp_c45_disable_delays(phydev);
1109 		break;
1110 	case PHY_INTERFACE_MODE_RGMII_ID:
1111 	case PHY_INTERFACE_MODE_RGMII_TXID:
1112 	case PHY_INTERFACE_MODE_RGMII_RXID:
1113 		if (!(ret & RGMII_ID_ABILITY)) {
1114 			phydev_err(phydev, "rgmii-id, rgmii-txid, rgmii-rxid modes are not supported\n");
1115 			return -EINVAL;
1116 		}
1117 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1118 			      MII_BASIC_CONFIG_RGMII);
1119 		ret = nxp_c45_get_delays(phydev);
1120 		if (ret)
1121 			return ret;
1122 
1123 		nxp_c45_set_delays(phydev);
1124 		break;
1125 	case PHY_INTERFACE_MODE_MII:
1126 		if (!(ret & MII_ABILITY)) {
1127 			phydev_err(phydev, "mii mode not supported\n");
1128 			return -EINVAL;
1129 		}
1130 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1131 			      MII_BASIC_CONFIG_MII);
1132 		break;
1133 	case PHY_INTERFACE_MODE_REVMII:
1134 		if (!(ret & REVMII_ABILITY)) {
1135 			phydev_err(phydev, "rev-mii mode not supported\n");
1136 			return -EINVAL;
1137 		}
1138 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1139 			      MII_BASIC_CONFIG_MII | MII_BASIC_CONFIG_REV);
1140 		break;
1141 	case PHY_INTERFACE_MODE_RMII:
1142 		if (!(ret & RMII_ABILITY)) {
1143 			phydev_err(phydev, "rmii mode not supported\n");
1144 			return -EINVAL;
1145 		}
1146 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1147 			      MII_BASIC_CONFIG_RMII);
1148 		break;
1149 	case PHY_INTERFACE_MODE_SGMII:
1150 		if (!(ret & SGMII_ABILITY)) {
1151 			phydev_err(phydev, "sgmii mode not supported\n");
1152 			return -EINVAL;
1153 		}
1154 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1155 			      MII_BASIC_CONFIG_SGMII);
1156 		break;
1157 	case PHY_INTERFACE_MODE_INTERNAL:
1158 		break;
1159 	default:
1160 		return -EINVAL;
1161 	}
1162 
1163 	return 0;
1164 }
1165 
1166 static int nxp_c45_config_init(struct phy_device *phydev)
1167 {
1168 	int ret;
1169 
1170 	ret = nxp_c45_config_enable(phydev);
1171 	if (ret) {
1172 		phydev_err(phydev, "Failed to enable config\n");
1173 		return ret;
1174 	}
1175 
1176 	/* Bug workaround for SJA1110 rev B: enable write access
1177 	 * to MDIO_MMD_PMAPMD
1178 	 */
1179 	phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 1);
1180 	phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 2);
1181 
1182 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONFIG,
1183 			 PHY_CONFIG_AUTO);
1184 
1185 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_LINK_DROP_COUNTER,
1186 			 COUNTER_EN);
1187 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_PREAMBLE_COUNT,
1188 			 COUNTER_EN);
1189 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TX_PREAMBLE_COUNT,
1190 			 COUNTER_EN);
1191 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_IPG_LENGTH,
1192 			 COUNTER_EN);
1193 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TX_IPG_LENGTH,
1194 			 COUNTER_EN);
1195 
1196 	ret = nxp_c45_set_phy_mode(phydev);
1197 	if (ret)
1198 		return ret;
1199 
1200 	phydev->autoneg = AUTONEG_DISABLE;
1201 
1202 	phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PTP_CLK_PERIOD,
1203 		      PTP_CLK_PERIOD_100BT1);
1204 	phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_HW_LTC_LOCK_CTRL,
1205 			   HW_LTC_LOCK_EN);
1206 	phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_TS_INSRT_CTRL,
1207 		      RX_TS_INSRT_MODE2);
1208 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_FUNC_ENABLES,
1209 			 PTP_ENABLE);
1210 
1211 	return nxp_c45_start_op(phydev);
1212 }
1213 
1214 static int nxp_c45_probe(struct phy_device *phydev)
1215 {
1216 	struct nxp_c45_phy *priv;
1217 	int ptp_ability;
1218 	int ret = 0;
1219 
1220 	priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
1221 	if (!priv)
1222 		return -ENOMEM;
1223 
1224 	skb_queue_head_init(&priv->tx_queue);
1225 	skb_queue_head_init(&priv->rx_queue);
1226 
1227 	priv->phydev = phydev;
1228 
1229 	phydev->priv = priv;
1230 
1231 	mutex_init(&priv->ptp_lock);
1232 
1233 	ptp_ability = phy_read_mmd(phydev, MDIO_MMD_VEND1,
1234 				   VEND1_PORT_ABILITIES);
1235 	ptp_ability = !!(ptp_ability & PTP_ABILITY);
1236 	if (!ptp_ability) {
1237 		phydev_dbg(phydev, "the phy does not support PTP");
1238 		goto no_ptp_support;
1239 	}
1240 
1241 	if (IS_ENABLED(CONFIG_PTP_1588_CLOCK) &&
1242 	    IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING)) {
1243 		priv->mii_ts.rxtstamp = nxp_c45_rxtstamp;
1244 		priv->mii_ts.txtstamp = nxp_c45_txtstamp;
1245 		priv->mii_ts.hwtstamp = nxp_c45_hwtstamp;
1246 		priv->mii_ts.ts_info = nxp_c45_ts_info;
1247 		phydev->mii_ts = &priv->mii_ts;
1248 		ret = nxp_c45_init_ptp_clock(priv);
1249 	} else {
1250 		phydev_dbg(phydev, "PTP support not enabled even if the phy supports it");
1251 	}
1252 
1253 no_ptp_support:
1254 
1255 	return ret;
1256 }
1257 
1258 static void nxp_c45_remove(struct phy_device *phydev)
1259 {
1260 	struct nxp_c45_phy *priv = phydev->priv;
1261 
1262 	if (priv->ptp_clock)
1263 		ptp_clock_unregister(priv->ptp_clock);
1264 
1265 	skb_queue_purge(&priv->tx_queue);
1266 	skb_queue_purge(&priv->rx_queue);
1267 }
1268 
1269 static struct phy_driver nxp_c45_driver[] = {
1270 	{
1271 		PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103),
1272 		.name			= "NXP C45 TJA1103",
1273 		.features		= PHY_BASIC_T1_FEATURES,
1274 		.probe			= nxp_c45_probe,
1275 		.soft_reset		= nxp_c45_soft_reset,
1276 		.config_aneg		= genphy_c45_config_aneg,
1277 		.config_init		= nxp_c45_config_init,
1278 		.config_intr		= nxp_c45_config_intr,
1279 		.handle_interrupt	= nxp_c45_handle_interrupt,
1280 		.read_status		= genphy_c45_read_status,
1281 		.suspend		= genphy_c45_pma_suspend,
1282 		.resume			= genphy_c45_pma_resume,
1283 		.get_sset_count		= nxp_c45_get_sset_count,
1284 		.get_strings		= nxp_c45_get_strings,
1285 		.get_stats		= nxp_c45_get_stats,
1286 		.cable_test_start	= nxp_c45_cable_test_start,
1287 		.cable_test_get_status	= nxp_c45_cable_test_get_status,
1288 		.set_loopback		= genphy_c45_loopback,
1289 		.get_sqi		= nxp_c45_get_sqi,
1290 		.get_sqi_max		= nxp_c45_get_sqi_max,
1291 		.remove			= nxp_c45_remove,
1292 	},
1293 };
1294 
1295 module_phy_driver(nxp_c45_driver);
1296 
1297 static struct mdio_device_id __maybe_unused nxp_c45_tbl[] = {
1298 	{ PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103) },
1299 	{ /*sentinel*/ },
1300 };
1301 
1302 MODULE_DEVICE_TABLE(mdio, nxp_c45_tbl);
1303 
1304 MODULE_AUTHOR("Radu Pirea <radu-nicolae.pirea@oss.nxp.com>");
1305 MODULE_DESCRIPTION("NXP C45 PHY driver");
1306 MODULE_LICENSE("GPL v2");
1307