1 // SPDX-License-Identifier: GPL-2.0
2 /* NXP C45 PHY driver
3  * Copyright (C) 2021 NXP
4  * Author: Radu Pirea <radu-nicolae.pirea@oss.nxp.com>
5  */
6 
7 #include <linux/delay.h>
8 #include <linux/ethtool.h>
9 #include <linux/ethtool_netlink.h>
10 #include <linux/kernel.h>
11 #include <linux/mii.h>
12 #include <linux/module.h>
13 #include <linux/phy.h>
14 #include <linux/processor.h>
15 #include <linux/property.h>
16 #include <linux/ptp_classify.h>
17 #include <linux/ptp_clock_kernel.h>
18 #include <linux/net_tstamp.h>
19 
20 #define PHY_ID_TJA_1103			0x001BB010
21 
22 #define PMAPMD_B100T1_PMAPMD_CTL	0x0834
23 #define B100T1_PMAPMD_CONFIG_EN		BIT(15)
24 #define B100T1_PMAPMD_MASTER		BIT(14)
25 #define MASTER_MODE			(B100T1_PMAPMD_CONFIG_EN | \
26 					 B100T1_PMAPMD_MASTER)
27 #define SLAVE_MODE			(B100T1_PMAPMD_CONFIG_EN)
28 
29 #define VEND1_DEVICE_CONTROL		0x0040
30 #define DEVICE_CONTROL_RESET		BIT(15)
31 #define DEVICE_CONTROL_CONFIG_GLOBAL_EN	BIT(14)
32 #define DEVICE_CONTROL_CONFIG_ALL_EN	BIT(13)
33 
34 #define VEND1_PHY_IRQ_ACK		0x80A0
35 #define VEND1_PHY_IRQ_EN		0x80A1
36 #define VEND1_PHY_IRQ_STATUS		0x80A2
37 #define PHY_IRQ_LINK_EVENT		BIT(1)
38 
39 #define VEND1_PHY_CONTROL		0x8100
40 #define PHY_CONFIG_EN			BIT(14)
41 #define PHY_START_OP			BIT(0)
42 
43 #define VEND1_PHY_CONFIG		0x8108
44 #define PHY_CONFIG_AUTO			BIT(0)
45 
46 #define VEND1_SIGNAL_QUALITY		0x8320
47 #define SQI_VALID			BIT(14)
48 #define SQI_MASK			GENMASK(2, 0)
49 #define MAX_SQI				SQI_MASK
50 
51 #define VEND1_CABLE_TEST		0x8330
52 #define CABLE_TEST_ENABLE		BIT(15)
53 #define CABLE_TEST_START		BIT(14)
54 #define CABLE_TEST_VALID		BIT(13)
55 #define CABLE_TEST_OK			0x00
56 #define CABLE_TEST_SHORTED		0x01
57 #define CABLE_TEST_OPEN			0x02
58 #define CABLE_TEST_UNKNOWN		0x07
59 
60 #define VEND1_PORT_CONTROL		0x8040
61 #define PORT_CONTROL_EN			BIT(14)
62 
63 #define VEND1_PORT_ABILITIES		0x8046
64 #define PTP_ABILITY			BIT(3)
65 
66 #define VEND1_PORT_INFRA_CONTROL	0xAC00
67 #define PORT_INFRA_CONTROL_EN		BIT(14)
68 
69 #define VEND1_RXID			0xAFCC
70 #define VEND1_TXID			0xAFCD
71 #define ID_ENABLE			BIT(15)
72 
73 #define VEND1_ABILITIES			0xAFC4
74 #define RGMII_ID_ABILITY		BIT(15)
75 #define RGMII_ABILITY			BIT(14)
76 #define RMII_ABILITY			BIT(10)
77 #define REVMII_ABILITY			BIT(9)
78 #define MII_ABILITY			BIT(8)
79 #define SGMII_ABILITY			BIT(0)
80 
81 #define VEND1_MII_BASIC_CONFIG		0xAFC6
82 #define MII_BASIC_CONFIG_REV		BIT(8)
83 #define MII_BASIC_CONFIG_SGMII		0x9
84 #define MII_BASIC_CONFIG_RGMII		0x7
85 #define MII_BASIC_CONFIG_RMII		0x5
86 #define MII_BASIC_CONFIG_MII		0x4
87 
88 #define VEND1_SYMBOL_ERROR_COUNTER	0x8350
89 #define VEND1_LINK_DROP_COUNTER		0x8352
90 #define VEND1_LINK_LOSSES_AND_FAILURES	0x8353
91 #define VEND1_R_GOOD_FRAME_CNT		0xA950
92 #define VEND1_R_BAD_FRAME_CNT		0xA952
93 #define VEND1_R_RXER_FRAME_CNT		0xA954
94 #define VEND1_RX_PREAMBLE_COUNT		0xAFCE
95 #define VEND1_TX_PREAMBLE_COUNT		0xAFCF
96 #define VEND1_RX_IPG_LENGTH		0xAFD0
97 #define VEND1_TX_IPG_LENGTH		0xAFD1
98 #define COUNTER_EN			BIT(15)
99 
100 #define VEND1_LTC_LOAD_CTRL		0x1105
101 #define READ_LTC			BIT(2)
102 #define LOAD_LTC			BIT(0)
103 
104 #define VEND1_LTC_WR_NSEC_0		0x1106
105 #define VEND1_LTC_WR_NSEC_1		0x1107
106 #define VEND1_LTC_WR_SEC_0		0x1108
107 #define VEND1_LTC_WR_SEC_1		0x1109
108 
109 #define VEND1_LTC_RD_NSEC_0		0x110A
110 #define VEND1_LTC_RD_NSEC_1		0x110B
111 #define VEND1_LTC_RD_SEC_0		0x110C
112 #define VEND1_LTC_RD_SEC_1		0x110D
113 
114 #define VEND1_RATE_ADJ_SUBNS_0		0x110F
115 #define VEND1_RATE_ADJ_SUBNS_1		0x1110
116 #define CLK_RATE_ADJ_LD			BIT(15)
117 #define CLK_RATE_ADJ_DIR		BIT(14)
118 
119 #define VEND1_HW_LTC_LOCK_CTRL		0x1115
120 #define HW_LTC_LOCK_EN			BIT(0)
121 
122 #define VEND1_PTP_IRQ_EN		0x1131
123 #define VEND1_PTP_IRQ_STATUS		0x1132
124 #define PTP_IRQ_EGR_TS			BIT(0)
125 
126 #define VEND1_RX_TS_INSRT_CTRL		0x114D
127 #define RX_TS_INSRT_MODE2		0x02
128 
129 #define VEND1_EGR_RING_DATA_0		0x114E
130 #define VEND1_EGR_RING_DATA_1_SEQ_ID	0x114F
131 #define VEND1_EGR_RING_DATA_2_NSEC_15_0	0x1150
132 #define VEND1_EGR_RING_DATA_3		0x1151
133 #define VEND1_EGR_RING_CTRL		0x1154
134 
135 #define RING_DATA_0_DOMAIN_NUMBER	GENMASK(7, 0)
136 #define RING_DATA_0_MSG_TYPE		GENMASK(11, 8)
137 #define RING_DATA_0_SEC_4_2		GENMASK(14, 2)
138 #define RING_DATA_0_TS_VALID		BIT(15)
139 
140 #define RING_DATA_3_NSEC_29_16		GENMASK(13, 0)
141 #define RING_DATA_3_SEC_1_0		GENMASK(15, 14)
142 #define RING_DATA_5_SEC_16_5		GENMASK(15, 4)
143 #define RING_DONE			BIT(0)
144 
145 #define TS_SEC_MASK			GENMASK(1, 0)
146 
147 #define VEND1_PORT_FUNC_ENABLES		0x8048
148 #define PTP_ENABLE			BIT(3)
149 
150 #define VEND1_PORT_PTP_CONTROL		0x9000
151 #define PORT_PTP_CONTROL_BYPASS		BIT(11)
152 
153 #define VEND1_PTP_CLK_PERIOD		0x1104
154 #define PTP_CLK_PERIOD_100BT1		15ULL
155 
156 #define VEND1_EVENT_MSG_FILT		0x1148
157 #define EVENT_MSG_FILT_ALL		0x0F
158 #define EVENT_MSG_FILT_NONE		0x00
159 
160 #define VEND1_TX_PIPE_DLY_NS		0x1149
161 #define VEND1_TX_PIPEDLY_SUBNS		0x114A
162 #define VEND1_RX_PIPE_DLY_NS		0x114B
163 #define VEND1_RX_PIPEDLY_SUBNS		0x114C
164 
165 #define RGMII_PERIOD_PS			8000U
166 #define PS_PER_DEGREE			div_u64(RGMII_PERIOD_PS, 360)
167 #define MIN_ID_PS			1644U
168 #define MAX_ID_PS			2260U
169 #define DEFAULT_ID_PS			2000U
170 
171 #define PPM_TO_SUBNS_INC(ppb)	div_u64(GENMASK(31, 0) * (ppb) * \
172 					PTP_CLK_PERIOD_100BT1, NSEC_PER_SEC)
173 
174 #define NXP_C45_SKB_CB(skb)	((struct nxp_c45_skb_cb *)(skb)->cb)
175 
176 struct nxp_c45_skb_cb {
177 	struct ptp_header *header;
178 	unsigned int type;
179 };
180 
181 struct nxp_c45_hwts {
182 	u32	nsec;
183 	u32	sec;
184 	u8	domain_number;
185 	u16	sequence_id;
186 	u8	msg_type;
187 };
188 
189 struct nxp_c45_phy {
190 	struct phy_device *phydev;
191 	struct mii_timestamper mii_ts;
192 	struct ptp_clock *ptp_clock;
193 	struct ptp_clock_info caps;
194 	struct sk_buff_head tx_queue;
195 	struct sk_buff_head rx_queue;
196 	/* used to access the PTP registers atomic */
197 	struct mutex ptp_lock;
198 	int hwts_tx;
199 	int hwts_rx;
200 	u32 tx_delay;
201 	u32 rx_delay;
202 };
203 
204 struct nxp_c45_phy_stats {
205 	const char	*name;
206 	u8		mmd;
207 	u16		reg;
208 	u8		off;
209 	u16		mask;
210 };
211 
212 static bool nxp_c45_poll_txts(struct phy_device *phydev)
213 {
214 	return phydev->irq <= 0;
215 }
216 
217 static int _nxp_c45_ptp_gettimex64(struct ptp_clock_info *ptp,
218 				   struct timespec64 *ts,
219 				   struct ptp_system_timestamp *sts)
220 {
221 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
222 
223 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_LTC_LOAD_CTRL,
224 		      READ_LTC);
225 	ts->tv_nsec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
226 				   VEND1_LTC_RD_NSEC_0);
227 	ts->tv_nsec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
228 				    VEND1_LTC_RD_NSEC_1) << 16;
229 	ts->tv_sec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
230 				  VEND1_LTC_RD_SEC_0);
231 	ts->tv_sec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
232 				   VEND1_LTC_RD_SEC_1) << 16;
233 
234 	return 0;
235 }
236 
237 static int nxp_c45_ptp_gettimex64(struct ptp_clock_info *ptp,
238 				  struct timespec64 *ts,
239 				  struct ptp_system_timestamp *sts)
240 {
241 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
242 
243 	mutex_lock(&priv->ptp_lock);
244 	_nxp_c45_ptp_gettimex64(ptp, ts, sts);
245 	mutex_unlock(&priv->ptp_lock);
246 
247 	return 0;
248 }
249 
250 static int _nxp_c45_ptp_settime64(struct ptp_clock_info *ptp,
251 				  const struct timespec64 *ts)
252 {
253 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
254 
255 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_LTC_WR_NSEC_0,
256 		      ts->tv_nsec);
257 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_LTC_WR_NSEC_1,
258 		      ts->tv_nsec >> 16);
259 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_LTC_WR_SEC_0,
260 		      ts->tv_sec);
261 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_LTC_WR_SEC_1,
262 		      ts->tv_sec >> 16);
263 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_LTC_LOAD_CTRL,
264 		      LOAD_LTC);
265 
266 	return 0;
267 }
268 
269 static int nxp_c45_ptp_settime64(struct ptp_clock_info *ptp,
270 				 const struct timespec64 *ts)
271 {
272 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
273 
274 	mutex_lock(&priv->ptp_lock);
275 	_nxp_c45_ptp_settime64(ptp, ts);
276 	mutex_unlock(&priv->ptp_lock);
277 
278 	return 0;
279 }
280 
281 static int nxp_c45_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
282 {
283 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
284 	s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
285 	u64 subns_inc_val;
286 	bool inc;
287 
288 	mutex_lock(&priv->ptp_lock);
289 	inc = ppb >= 0;
290 	ppb = abs(ppb);
291 
292 	subns_inc_val = PPM_TO_SUBNS_INC(ppb);
293 
294 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_RATE_ADJ_SUBNS_0,
295 		      subns_inc_val);
296 	subns_inc_val >>= 16;
297 	subns_inc_val |= CLK_RATE_ADJ_LD;
298 	if (inc)
299 		subns_inc_val |= CLK_RATE_ADJ_DIR;
300 
301 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_RATE_ADJ_SUBNS_1,
302 		      subns_inc_val);
303 	mutex_unlock(&priv->ptp_lock);
304 
305 	return 0;
306 }
307 
308 static int nxp_c45_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
309 {
310 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
311 	struct timespec64 now, then;
312 
313 	mutex_lock(&priv->ptp_lock);
314 	then = ns_to_timespec64(delta);
315 	_nxp_c45_ptp_gettimex64(ptp, &now, NULL);
316 	now = timespec64_add(now, then);
317 	_nxp_c45_ptp_settime64(ptp, &now);
318 	mutex_unlock(&priv->ptp_lock);
319 
320 	return 0;
321 }
322 
323 static void nxp_c45_reconstruct_ts(struct timespec64 *ts,
324 				   struct nxp_c45_hwts *hwts)
325 {
326 	ts->tv_nsec = hwts->nsec;
327 	if ((ts->tv_sec & TS_SEC_MASK) < (hwts->sec & TS_SEC_MASK))
328 		ts->tv_sec -= BIT(2);
329 	ts->tv_sec &= ~TS_SEC_MASK;
330 	ts->tv_sec |= hwts->sec & TS_SEC_MASK;
331 }
332 
333 static bool nxp_c45_match_ts(struct ptp_header *header,
334 			     struct nxp_c45_hwts *hwts,
335 			     unsigned int type)
336 {
337 	return ntohs(header->sequence_id) == hwts->sequence_id &&
338 	       ptp_get_msgtype(header, type) == hwts->msg_type &&
339 	       header->domain_number  == hwts->domain_number;
340 }
341 
342 static bool nxp_c45_get_hwtxts(struct nxp_c45_phy *priv,
343 			       struct nxp_c45_hwts *hwts)
344 {
345 	bool valid;
346 	u16 reg;
347 
348 	mutex_lock(&priv->ptp_lock);
349 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EGR_RING_CTRL,
350 		      RING_DONE);
351 	reg = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EGR_RING_DATA_0);
352 	valid = !!(reg & RING_DATA_0_TS_VALID);
353 	if (!valid)
354 		goto nxp_c45_get_hwtxts_out;
355 
356 	hwts->domain_number = reg;
357 	hwts->msg_type = (reg & RING_DATA_0_MSG_TYPE) >> 8;
358 	hwts->sec = (reg & RING_DATA_0_SEC_4_2) >> 10;
359 	hwts->sequence_id = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
360 					 VEND1_EGR_RING_DATA_1_SEQ_ID);
361 	hwts->nsec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
362 				  VEND1_EGR_RING_DATA_2_NSEC_15_0);
363 	reg = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EGR_RING_DATA_3);
364 	hwts->nsec |= (reg & RING_DATA_3_NSEC_29_16) << 16;
365 	hwts->sec |= (reg & RING_DATA_3_SEC_1_0) >> 14;
366 
367 nxp_c45_get_hwtxts_out:
368 	mutex_unlock(&priv->ptp_lock);
369 	return valid;
370 }
371 
372 static void nxp_c45_process_txts(struct nxp_c45_phy *priv,
373 				 struct nxp_c45_hwts *txts)
374 {
375 	struct sk_buff *skb, *tmp, *skb_match = NULL;
376 	struct skb_shared_hwtstamps shhwtstamps;
377 	struct timespec64 ts;
378 	unsigned long flags;
379 	bool ts_match;
380 	s64 ts_ns;
381 
382 	spin_lock_irqsave(&priv->tx_queue.lock, flags);
383 	skb_queue_walk_safe(&priv->tx_queue, skb, tmp) {
384 		ts_match = nxp_c45_match_ts(NXP_C45_SKB_CB(skb)->header, txts,
385 					    NXP_C45_SKB_CB(skb)->type);
386 		if (!ts_match)
387 			continue;
388 		skb_match = skb;
389 		__skb_unlink(skb, &priv->tx_queue);
390 		break;
391 	}
392 	spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
393 
394 	if (skb_match) {
395 		nxp_c45_ptp_gettimex64(&priv->caps, &ts, NULL);
396 		nxp_c45_reconstruct_ts(&ts, txts);
397 		memset(&shhwtstamps, 0, sizeof(shhwtstamps));
398 		ts_ns = timespec64_to_ns(&ts);
399 		shhwtstamps.hwtstamp = ns_to_ktime(ts_ns);
400 		skb_complete_tx_timestamp(skb_match, &shhwtstamps);
401 	} else {
402 		phydev_warn(priv->phydev,
403 			    "the tx timestamp doesn't match with any skb\n");
404 	}
405 }
406 
407 static long nxp_c45_do_aux_work(struct ptp_clock_info *ptp)
408 {
409 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
410 	bool poll_txts = nxp_c45_poll_txts(priv->phydev);
411 	struct skb_shared_hwtstamps *shhwtstamps_rx;
412 	struct nxp_c45_hwts hwts;
413 	bool reschedule = false;
414 	struct timespec64 ts;
415 	struct sk_buff *skb;
416 	bool txts_valid;
417 	u32 ts_raw;
418 
419 	while (!skb_queue_empty_lockless(&priv->tx_queue) && poll_txts) {
420 		txts_valid = nxp_c45_get_hwtxts(priv, &hwts);
421 		if (unlikely(!txts_valid)) {
422 			/* Still more skbs in the queue */
423 			reschedule = true;
424 			break;
425 		}
426 
427 		nxp_c45_process_txts(priv, &hwts);
428 	}
429 
430 	nxp_c45_ptp_gettimex64(&priv->caps, &ts, NULL);
431 	while ((skb = skb_dequeue(&priv->rx_queue)) != NULL) {
432 		ts_raw = __be32_to_cpu(NXP_C45_SKB_CB(skb)->header->reserved2);
433 		hwts.sec = ts_raw >> 30;
434 		hwts.nsec = ts_raw & GENMASK(29, 0);
435 		nxp_c45_reconstruct_ts(&ts, &hwts);
436 		shhwtstamps_rx = skb_hwtstamps(skb);
437 		shhwtstamps_rx->hwtstamp = ns_to_ktime(timespec64_to_ns(&ts));
438 		NXP_C45_SKB_CB(skb)->header->reserved2 = 0;
439 		netif_rx_ni(skb);
440 	}
441 
442 	return reschedule ? 1 : -1;
443 }
444 
445 static int nxp_c45_init_ptp_clock(struct nxp_c45_phy *priv)
446 {
447 	priv->caps = (struct ptp_clock_info) {
448 		.owner		= THIS_MODULE,
449 		.name		= "NXP C45 PHC",
450 		.max_adj	= 16666666,
451 		.adjfine	= nxp_c45_ptp_adjfine,
452 		.adjtime	= nxp_c45_ptp_adjtime,
453 		.gettimex64	= nxp_c45_ptp_gettimex64,
454 		.settime64	= nxp_c45_ptp_settime64,
455 		.do_aux_work	= nxp_c45_do_aux_work,
456 	};
457 
458 	priv->ptp_clock = ptp_clock_register(&priv->caps,
459 					     &priv->phydev->mdio.dev);
460 
461 	if (IS_ERR(priv->ptp_clock))
462 		return PTR_ERR(priv->ptp_clock);
463 
464 	if (!priv->ptp_clock)
465 		return -ENOMEM;
466 
467 	return 0;
468 }
469 
470 static void nxp_c45_txtstamp(struct mii_timestamper *mii_ts,
471 			     struct sk_buff *skb, int type)
472 {
473 	struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
474 						mii_ts);
475 
476 	switch (priv->hwts_tx) {
477 	case HWTSTAMP_TX_ON:
478 		NXP_C45_SKB_CB(skb)->type = type;
479 		NXP_C45_SKB_CB(skb)->header = ptp_parse_header(skb, type);
480 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
481 		skb_queue_tail(&priv->tx_queue, skb);
482 		if (nxp_c45_poll_txts(priv->phydev))
483 			ptp_schedule_worker(priv->ptp_clock, 0);
484 		break;
485 	case HWTSTAMP_TX_OFF:
486 	default:
487 		kfree_skb(skb);
488 		break;
489 	}
490 }
491 
492 static bool nxp_c45_rxtstamp(struct mii_timestamper *mii_ts,
493 			     struct sk_buff *skb, int type)
494 {
495 	struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
496 						mii_ts);
497 	struct ptp_header *header = ptp_parse_header(skb, type);
498 
499 	if (!header)
500 		return false;
501 
502 	if (!priv->hwts_rx)
503 		return false;
504 
505 	NXP_C45_SKB_CB(skb)->header = header;
506 	skb_queue_tail(&priv->rx_queue, skb);
507 	ptp_schedule_worker(priv->ptp_clock, 0);
508 
509 	return true;
510 }
511 
512 static int nxp_c45_hwtstamp(struct mii_timestamper *mii_ts,
513 			    struct ifreq *ifreq)
514 {
515 	struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
516 						mii_ts);
517 	struct phy_device *phydev = priv->phydev;
518 	struct hwtstamp_config cfg;
519 
520 	if (copy_from_user(&cfg, ifreq->ifr_data, sizeof(cfg)))
521 		return -EFAULT;
522 
523 	if (cfg.tx_type < 0 || cfg.tx_type > HWTSTAMP_TX_ON)
524 		return -ERANGE;
525 
526 	priv->hwts_tx = cfg.tx_type;
527 
528 	switch (cfg.rx_filter) {
529 	case HWTSTAMP_FILTER_NONE:
530 		priv->hwts_rx = 0;
531 		break;
532 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
533 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
534 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
535 		priv->hwts_rx = 1;
536 		cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
537 		break;
538 	default:
539 		return -ERANGE;
540 	}
541 
542 	if (priv->hwts_rx || priv->hwts_tx) {
543 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_EVENT_MSG_FILT,
544 			      EVENT_MSG_FILT_ALL);
545 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
546 				   VEND1_PORT_PTP_CONTROL,
547 				   PORT_PTP_CONTROL_BYPASS);
548 	} else {
549 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_EVENT_MSG_FILT,
550 			      EVENT_MSG_FILT_NONE);
551 		phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_PTP_CONTROL,
552 				 PORT_PTP_CONTROL_BYPASS);
553 	}
554 
555 	if (nxp_c45_poll_txts(priv->phydev))
556 		goto nxp_c45_no_ptp_irq;
557 
558 	if (priv->hwts_tx)
559 		phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
560 				 VEND1_PTP_IRQ_EN, PTP_IRQ_EGR_TS);
561 	else
562 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
563 				   VEND1_PTP_IRQ_EN, PTP_IRQ_EGR_TS);
564 
565 nxp_c45_no_ptp_irq:
566 	return copy_to_user(ifreq->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
567 }
568 
569 static int nxp_c45_ts_info(struct mii_timestamper *mii_ts,
570 			   struct ethtool_ts_info *ts_info)
571 {
572 	struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
573 						mii_ts);
574 
575 	ts_info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
576 			SOF_TIMESTAMPING_RX_HARDWARE |
577 			SOF_TIMESTAMPING_RAW_HARDWARE;
578 	ts_info->phc_index = ptp_clock_index(priv->ptp_clock);
579 	ts_info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
580 	ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
581 			(1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
582 			(1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
583 			(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT);
584 
585 	return 0;
586 }
587 
588 static const struct nxp_c45_phy_stats nxp_c45_hw_stats[] = {
589 	{ "phy_symbol_error_cnt", MDIO_MMD_VEND1,
590 		VEND1_SYMBOL_ERROR_COUNTER, 0, GENMASK(15, 0) },
591 	{ "phy_link_status_drop_cnt", MDIO_MMD_VEND1,
592 		VEND1_LINK_DROP_COUNTER, 8, GENMASK(13, 8) },
593 	{ "phy_link_availability_drop_cnt", MDIO_MMD_VEND1,
594 		VEND1_LINK_DROP_COUNTER, 0, GENMASK(5, 0) },
595 	{ "phy_link_loss_cnt", MDIO_MMD_VEND1,
596 		VEND1_LINK_LOSSES_AND_FAILURES, 10, GENMASK(15, 10) },
597 	{ "phy_link_failure_cnt", MDIO_MMD_VEND1,
598 		VEND1_LINK_LOSSES_AND_FAILURES, 0, GENMASK(9, 0) },
599 	{ "r_good_frame_cnt", MDIO_MMD_VEND1,
600 		VEND1_R_GOOD_FRAME_CNT, 0, GENMASK(15, 0) },
601 	{ "r_bad_frame_cnt", MDIO_MMD_VEND1,
602 		VEND1_R_BAD_FRAME_CNT, 0, GENMASK(15, 0) },
603 	{ "r_rxer_frame_cnt", MDIO_MMD_VEND1,
604 		VEND1_R_RXER_FRAME_CNT, 0, GENMASK(15, 0) },
605 	{ "rx_preamble_count", MDIO_MMD_VEND1,
606 		VEND1_RX_PREAMBLE_COUNT, 0, GENMASK(5, 0) },
607 	{ "tx_preamble_count", MDIO_MMD_VEND1,
608 		VEND1_TX_PREAMBLE_COUNT, 0, GENMASK(5, 0) },
609 	{ "rx_ipg_length", MDIO_MMD_VEND1,
610 		VEND1_RX_IPG_LENGTH, 0, GENMASK(8, 0) },
611 	{ "tx_ipg_length", MDIO_MMD_VEND1,
612 		VEND1_TX_IPG_LENGTH, 0, GENMASK(8, 0) },
613 };
614 
615 static int nxp_c45_get_sset_count(struct phy_device *phydev)
616 {
617 	return ARRAY_SIZE(nxp_c45_hw_stats);
618 }
619 
620 static void nxp_c45_get_strings(struct phy_device *phydev, u8 *data)
621 {
622 	size_t i;
623 
624 	for (i = 0; i < ARRAY_SIZE(nxp_c45_hw_stats); i++) {
625 		strncpy(data + i * ETH_GSTRING_LEN,
626 			nxp_c45_hw_stats[i].name, ETH_GSTRING_LEN);
627 	}
628 }
629 
630 static void nxp_c45_get_stats(struct phy_device *phydev,
631 			      struct ethtool_stats *stats, u64 *data)
632 {
633 	size_t i;
634 	int ret;
635 
636 	for (i = 0; i < ARRAY_SIZE(nxp_c45_hw_stats); i++) {
637 		ret = phy_read_mmd(phydev, nxp_c45_hw_stats[i].mmd,
638 				   nxp_c45_hw_stats[i].reg);
639 		if (ret < 0) {
640 			data[i] = U64_MAX;
641 		} else {
642 			data[i] = ret & nxp_c45_hw_stats[i].mask;
643 			data[i] >>= nxp_c45_hw_stats[i].off;
644 		}
645 	}
646 }
647 
648 static int nxp_c45_config_enable(struct phy_device *phydev)
649 {
650 	phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONTROL,
651 		      DEVICE_CONTROL_CONFIG_GLOBAL_EN |
652 		      DEVICE_CONTROL_CONFIG_ALL_EN);
653 	usleep_range(400, 450);
654 
655 	phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_CONTROL,
656 		      PORT_CONTROL_EN);
657 	phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONTROL,
658 		      PHY_CONFIG_EN);
659 	phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_INFRA_CONTROL,
660 		      PORT_INFRA_CONTROL_EN);
661 
662 	return 0;
663 }
664 
665 static int nxp_c45_start_op(struct phy_device *phydev)
666 {
667 	return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONTROL,
668 				PHY_START_OP);
669 }
670 
671 static int nxp_c45_config_intr(struct phy_device *phydev)
672 {
673 	if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
674 		return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
675 					VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT);
676 	else
677 		return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
678 					  VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT);
679 }
680 
681 static irqreturn_t nxp_c45_handle_interrupt(struct phy_device *phydev)
682 {
683 	struct nxp_c45_phy *priv = phydev->priv;
684 	irqreturn_t ret = IRQ_NONE;
685 	struct nxp_c45_hwts hwts;
686 	int irq;
687 
688 	irq = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_IRQ_STATUS);
689 	if (irq & PHY_IRQ_LINK_EVENT) {
690 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_IRQ_ACK,
691 			      PHY_IRQ_LINK_EVENT);
692 		phy_trigger_machine(phydev);
693 		ret = IRQ_HANDLED;
694 	}
695 
696 	/* There is no need for ACK.
697 	 * The irq signal will be asserted until the EGR TS FIFO will be
698 	 * emptied.
699 	 */
700 	irq = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_PTP_IRQ_STATUS);
701 	if (irq & PTP_IRQ_EGR_TS) {
702 		while (nxp_c45_get_hwtxts(priv, &hwts))
703 			nxp_c45_process_txts(priv, &hwts);
704 
705 		ret = IRQ_HANDLED;
706 	}
707 
708 	return ret;
709 }
710 
711 static int nxp_c45_soft_reset(struct phy_device *phydev)
712 {
713 	int ret;
714 
715 	ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONTROL,
716 			    DEVICE_CONTROL_RESET);
717 	if (ret)
718 		return ret;
719 
720 	return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
721 					 VEND1_DEVICE_CONTROL, ret,
722 					 !(ret & DEVICE_CONTROL_RESET), 20000,
723 					 240000, false);
724 }
725 
726 static int nxp_c45_cable_test_start(struct phy_device *phydev)
727 {
728 	return phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_CABLE_TEST,
729 			     CABLE_TEST_ENABLE | CABLE_TEST_START);
730 }
731 
732 static int nxp_c45_cable_test_get_status(struct phy_device *phydev,
733 					 bool *finished)
734 {
735 	int ret;
736 	u8 cable_test_result;
737 
738 	ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_CABLE_TEST);
739 	if (!(ret & CABLE_TEST_VALID)) {
740 		*finished = false;
741 		return 0;
742 	}
743 
744 	*finished = true;
745 	cable_test_result = ret & GENMASK(2, 0);
746 
747 	switch (cable_test_result) {
748 	case CABLE_TEST_OK:
749 		ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
750 					ETHTOOL_A_CABLE_RESULT_CODE_OK);
751 		break;
752 	case CABLE_TEST_SHORTED:
753 		ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
754 					ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT);
755 		break;
756 	case CABLE_TEST_OPEN:
757 		ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
758 					ETHTOOL_A_CABLE_RESULT_CODE_OPEN);
759 		break;
760 	default:
761 		ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
762 					ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC);
763 	}
764 
765 	phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_CABLE_TEST,
766 			   CABLE_TEST_ENABLE);
767 
768 	return nxp_c45_start_op(phydev);
769 }
770 
771 static int nxp_c45_setup_master_slave(struct phy_device *phydev)
772 {
773 	switch (phydev->master_slave_set) {
774 	case MASTER_SLAVE_CFG_MASTER_FORCE:
775 	case MASTER_SLAVE_CFG_MASTER_PREFERRED:
776 		phy_write_mmd(phydev, MDIO_MMD_PMAPMD, PMAPMD_B100T1_PMAPMD_CTL,
777 			      MASTER_MODE);
778 		break;
779 	case MASTER_SLAVE_CFG_SLAVE_PREFERRED:
780 	case MASTER_SLAVE_CFG_SLAVE_FORCE:
781 		phy_write_mmd(phydev, MDIO_MMD_PMAPMD, PMAPMD_B100T1_PMAPMD_CTL,
782 			      SLAVE_MODE);
783 		break;
784 	case MASTER_SLAVE_CFG_UNKNOWN:
785 	case MASTER_SLAVE_CFG_UNSUPPORTED:
786 		return 0;
787 	default:
788 		phydev_warn(phydev, "Unsupported Master/Slave mode\n");
789 		return -EOPNOTSUPP;
790 	}
791 
792 	return 0;
793 }
794 
795 static int nxp_c45_read_master_slave(struct phy_device *phydev)
796 {
797 	int reg;
798 
799 	phydev->master_slave_get = MASTER_SLAVE_CFG_UNKNOWN;
800 	phydev->master_slave_state = MASTER_SLAVE_STATE_UNKNOWN;
801 
802 	reg = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, PMAPMD_B100T1_PMAPMD_CTL);
803 	if (reg < 0)
804 		return reg;
805 
806 	if (reg & B100T1_PMAPMD_MASTER) {
807 		phydev->master_slave_get = MASTER_SLAVE_CFG_MASTER_FORCE;
808 		phydev->master_slave_state = MASTER_SLAVE_STATE_MASTER;
809 	} else {
810 		phydev->master_slave_get = MASTER_SLAVE_CFG_SLAVE_FORCE;
811 		phydev->master_slave_state = MASTER_SLAVE_STATE_SLAVE;
812 	}
813 
814 	return 0;
815 }
816 
817 static int nxp_c45_config_aneg(struct phy_device *phydev)
818 {
819 	return nxp_c45_setup_master_slave(phydev);
820 }
821 
822 static int nxp_c45_read_status(struct phy_device *phydev)
823 {
824 	int ret;
825 
826 	ret = genphy_c45_read_status(phydev);
827 	if (ret)
828 		return ret;
829 
830 	ret = nxp_c45_read_master_slave(phydev);
831 	if (ret)
832 		return ret;
833 
834 	return 0;
835 }
836 
837 static int nxp_c45_get_sqi(struct phy_device *phydev)
838 {
839 	int reg;
840 
841 	reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_SIGNAL_QUALITY);
842 	if (!(reg & SQI_VALID))
843 		return -EINVAL;
844 
845 	reg &= SQI_MASK;
846 
847 	return reg;
848 }
849 
850 static int nxp_c45_get_sqi_max(struct phy_device *phydev)
851 {
852 	return MAX_SQI;
853 }
854 
855 static int nxp_c45_check_delay(struct phy_device *phydev, u32 delay)
856 {
857 	if (delay < MIN_ID_PS) {
858 		phydev_err(phydev, "delay value smaller than %u\n", MIN_ID_PS);
859 		return -EINVAL;
860 	}
861 
862 	if (delay > MAX_ID_PS) {
863 		phydev_err(phydev, "delay value higher than %u\n", MAX_ID_PS);
864 		return -EINVAL;
865 	}
866 
867 	return 0;
868 }
869 
870 static u64 nxp_c45_get_phase_shift(u64 phase_offset_raw)
871 {
872 	/* The delay in degree phase is 73.8 + phase_offset_raw * 0.9.
873 	 * To avoid floating point operations we'll multiply by 10
874 	 * and get 1 decimal point precision.
875 	 */
876 	phase_offset_raw *= 10;
877 	phase_offset_raw -= 738;
878 	return div_u64(phase_offset_raw, 9);
879 }
880 
881 static void nxp_c45_disable_delays(struct phy_device *phydev)
882 {
883 	phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID, ID_ENABLE);
884 	phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID, ID_ENABLE);
885 }
886 
887 static void nxp_c45_set_delays(struct phy_device *phydev)
888 {
889 	struct nxp_c45_phy *priv = phydev->priv;
890 	u64 tx_delay = priv->tx_delay;
891 	u64 rx_delay = priv->rx_delay;
892 	u64 degree;
893 
894 	if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
895 	    phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
896 		degree = div_u64(tx_delay, PS_PER_DEGREE);
897 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID,
898 			      ID_ENABLE | nxp_c45_get_phase_shift(degree));
899 	} else {
900 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID,
901 				   ID_ENABLE);
902 	}
903 
904 	if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
905 	    phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
906 		degree = div_u64(rx_delay, PS_PER_DEGREE);
907 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID,
908 			      ID_ENABLE | nxp_c45_get_phase_shift(degree));
909 	} else {
910 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID,
911 				   ID_ENABLE);
912 	}
913 }
914 
915 static int nxp_c45_get_delays(struct phy_device *phydev)
916 {
917 	struct nxp_c45_phy *priv = phydev->priv;
918 	int ret;
919 
920 	if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
921 	    phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
922 		ret = device_property_read_u32(&phydev->mdio.dev,
923 					       "tx-internal-delay-ps",
924 					       &priv->tx_delay);
925 		if (ret)
926 			priv->tx_delay = DEFAULT_ID_PS;
927 
928 		ret = nxp_c45_check_delay(phydev, priv->tx_delay);
929 		if (ret) {
930 			phydev_err(phydev,
931 				   "tx-internal-delay-ps invalid value\n");
932 			return ret;
933 		}
934 	}
935 
936 	if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
937 	    phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
938 		ret = device_property_read_u32(&phydev->mdio.dev,
939 					       "rx-internal-delay-ps",
940 					       &priv->rx_delay);
941 		if (ret)
942 			priv->rx_delay = DEFAULT_ID_PS;
943 
944 		ret = nxp_c45_check_delay(phydev, priv->rx_delay);
945 		if (ret) {
946 			phydev_err(phydev,
947 				   "rx-internal-delay-ps invalid value\n");
948 			return ret;
949 		}
950 	}
951 
952 	return 0;
953 }
954 
955 static int nxp_c45_set_phy_mode(struct phy_device *phydev)
956 {
957 	int ret;
958 
959 	ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_ABILITIES);
960 	phydev_dbg(phydev, "Clause 45 managed PHY abilities 0x%x\n", ret);
961 
962 	switch (phydev->interface) {
963 	case PHY_INTERFACE_MODE_RGMII:
964 		if (!(ret & RGMII_ABILITY)) {
965 			phydev_err(phydev, "rgmii mode not supported\n");
966 			return -EINVAL;
967 		}
968 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
969 			      MII_BASIC_CONFIG_RGMII);
970 		nxp_c45_disable_delays(phydev);
971 		break;
972 	case PHY_INTERFACE_MODE_RGMII_ID:
973 	case PHY_INTERFACE_MODE_RGMII_TXID:
974 	case PHY_INTERFACE_MODE_RGMII_RXID:
975 		if (!(ret & RGMII_ID_ABILITY)) {
976 			phydev_err(phydev, "rgmii-id, rgmii-txid, rgmii-rxid modes are not supported\n");
977 			return -EINVAL;
978 		}
979 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
980 			      MII_BASIC_CONFIG_RGMII);
981 		ret = nxp_c45_get_delays(phydev);
982 		if (ret)
983 			return ret;
984 
985 		nxp_c45_set_delays(phydev);
986 		break;
987 	case PHY_INTERFACE_MODE_MII:
988 		if (!(ret & MII_ABILITY)) {
989 			phydev_err(phydev, "mii mode not supported\n");
990 			return -EINVAL;
991 		}
992 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
993 			      MII_BASIC_CONFIG_MII);
994 		break;
995 	case PHY_INTERFACE_MODE_REVMII:
996 		if (!(ret & REVMII_ABILITY)) {
997 			phydev_err(phydev, "rev-mii mode not supported\n");
998 			return -EINVAL;
999 		}
1000 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1001 			      MII_BASIC_CONFIG_MII | MII_BASIC_CONFIG_REV);
1002 		break;
1003 	case PHY_INTERFACE_MODE_RMII:
1004 		if (!(ret & RMII_ABILITY)) {
1005 			phydev_err(phydev, "rmii mode not supported\n");
1006 			return -EINVAL;
1007 		}
1008 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1009 			      MII_BASIC_CONFIG_RMII);
1010 		break;
1011 	case PHY_INTERFACE_MODE_SGMII:
1012 		if (!(ret & SGMII_ABILITY)) {
1013 			phydev_err(phydev, "sgmii mode not supported\n");
1014 			return -EINVAL;
1015 		}
1016 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1017 			      MII_BASIC_CONFIG_SGMII);
1018 		break;
1019 	case PHY_INTERFACE_MODE_INTERNAL:
1020 		break;
1021 	default:
1022 		return -EINVAL;
1023 	}
1024 
1025 	return 0;
1026 }
1027 
1028 static int nxp_c45_config_init(struct phy_device *phydev)
1029 {
1030 	int ret;
1031 
1032 	ret = nxp_c45_config_enable(phydev);
1033 	if (ret) {
1034 		phydev_err(phydev, "Failed to enable config\n");
1035 		return ret;
1036 	}
1037 
1038 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONFIG,
1039 			 PHY_CONFIG_AUTO);
1040 
1041 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_LINK_DROP_COUNTER,
1042 			 COUNTER_EN);
1043 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_PREAMBLE_COUNT,
1044 			 COUNTER_EN);
1045 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TX_PREAMBLE_COUNT,
1046 			 COUNTER_EN);
1047 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_IPG_LENGTH,
1048 			 COUNTER_EN);
1049 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TX_IPG_LENGTH,
1050 			 COUNTER_EN);
1051 
1052 	ret = nxp_c45_set_phy_mode(phydev);
1053 	if (ret)
1054 		return ret;
1055 
1056 	phydev->autoneg = AUTONEG_DISABLE;
1057 
1058 	phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PTP_CLK_PERIOD,
1059 		      PTP_CLK_PERIOD_100BT1);
1060 	phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_HW_LTC_LOCK_CTRL,
1061 			   HW_LTC_LOCK_EN);
1062 	phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_TS_INSRT_CTRL,
1063 		      RX_TS_INSRT_MODE2);
1064 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_FUNC_ENABLES,
1065 			 PTP_ENABLE);
1066 
1067 	return nxp_c45_start_op(phydev);
1068 }
1069 
1070 static int nxp_c45_probe(struct phy_device *phydev)
1071 {
1072 	struct nxp_c45_phy *priv;
1073 	int ptp_ability;
1074 	int ret = 0;
1075 
1076 	priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
1077 	if (!priv)
1078 		return -ENOMEM;
1079 
1080 	skb_queue_head_init(&priv->tx_queue);
1081 	skb_queue_head_init(&priv->rx_queue);
1082 
1083 	priv->phydev = phydev;
1084 
1085 	phydev->priv = priv;
1086 
1087 	mutex_init(&priv->ptp_lock);
1088 
1089 	ptp_ability = phy_read_mmd(phydev, MDIO_MMD_VEND1,
1090 				   VEND1_PORT_ABILITIES);
1091 	ptp_ability = !!(ptp_ability & PTP_ABILITY);
1092 	if (!ptp_ability) {
1093 		phydev_info(phydev, "the phy does not support PTP");
1094 		goto no_ptp_support;
1095 	}
1096 
1097 	if (IS_ENABLED(CONFIG_PTP_1588_CLOCK) &&
1098 	    IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING)) {
1099 		priv->mii_ts.rxtstamp = nxp_c45_rxtstamp;
1100 		priv->mii_ts.txtstamp = nxp_c45_txtstamp;
1101 		priv->mii_ts.hwtstamp = nxp_c45_hwtstamp;
1102 		priv->mii_ts.ts_info = nxp_c45_ts_info;
1103 		phydev->mii_ts = &priv->mii_ts;
1104 		ret = nxp_c45_init_ptp_clock(priv);
1105 	} else {
1106 		phydev_dbg(phydev, "PTP support not enabled even if the phy supports it");
1107 	}
1108 
1109 no_ptp_support:
1110 
1111 	return ret;
1112 }
1113 
1114 static struct phy_driver nxp_c45_driver[] = {
1115 	{
1116 		PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103),
1117 		.name			= "NXP C45 TJA1103",
1118 		.features		= PHY_BASIC_T1_FEATURES,
1119 		.probe			= nxp_c45_probe,
1120 		.soft_reset		= nxp_c45_soft_reset,
1121 		.config_aneg		= nxp_c45_config_aneg,
1122 		.config_init		= nxp_c45_config_init,
1123 		.config_intr		= nxp_c45_config_intr,
1124 		.handle_interrupt	= nxp_c45_handle_interrupt,
1125 		.read_status		= nxp_c45_read_status,
1126 		.suspend		= genphy_c45_pma_suspend,
1127 		.resume			= genphy_c45_pma_resume,
1128 		.get_sset_count		= nxp_c45_get_sset_count,
1129 		.get_strings		= nxp_c45_get_strings,
1130 		.get_stats		= nxp_c45_get_stats,
1131 		.cable_test_start	= nxp_c45_cable_test_start,
1132 		.cable_test_get_status	= nxp_c45_cable_test_get_status,
1133 		.set_loopback		= genphy_c45_loopback,
1134 		.get_sqi		= nxp_c45_get_sqi,
1135 		.get_sqi_max		= nxp_c45_get_sqi_max,
1136 	},
1137 };
1138 
1139 module_phy_driver(nxp_c45_driver);
1140 
1141 static struct mdio_device_id __maybe_unused nxp_c45_tbl[] = {
1142 	{ PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103) },
1143 	{ /*sentinel*/ },
1144 };
1145 
1146 MODULE_DEVICE_TABLE(mdio, nxp_c45_tbl);
1147 
1148 MODULE_AUTHOR("Radu Pirea <radu-nicolae.pirea@oss.nxp.com>");
1149 MODULE_DESCRIPTION("NXP C45 PHY driver");
1150 MODULE_LICENSE("GPL v2");
1151