xref: /openbmc/linux/drivers/net/phy/mscc/mscc_ptp.c (revision 44ad3baf1cca483e418b6aadf2d3994f69e0f16a)
1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /*
3  * Driver for Microsemi VSC85xx PHYs - timestamping and PHC support
4  *
5  * Authors: Quentin Schulz & Antoine Tenart
6  * License: Dual MIT/GPL
7  * Copyright (c) 2020 Microsemi Corporation
8  */
9 
10 #include <linux/gpio/consumer.h>
11 #include <linux/ip.h>
12 #include <linux/net_tstamp.h>
13 #include <linux/mii.h>
14 #include <linux/phy.h>
15 #include <linux/ptp_classify.h>
16 #include <linux/ptp_clock_kernel.h>
17 #include <linux/udp.h>
18 #include <asm/unaligned.h>
19 
20 #include "mscc.h"
21 #include "mscc_ptp.h"
22 
23 /* Two PHYs share the same 1588 processor and it's to be entirely configured
24  * through the base PHY of this processor.
25  */
26 /* phydev->bus->mdio_lock should be locked when using this function */
phy_ts_base_write(struct phy_device * phydev,u32 regnum,u16 val)27 static int phy_ts_base_write(struct phy_device *phydev, u32 regnum, u16 val)
28 {
29 	struct vsc8531_private *priv = phydev->priv;
30 
31 	WARN_ON_ONCE(!mutex_is_locked(&phydev->mdio.bus->mdio_lock));
32 	return __mdiobus_write(phydev->mdio.bus, priv->ts_base_addr, regnum,
33 			       val);
34 }
35 
36 /* phydev->bus->mdio_lock should be locked when using this function */
phy_ts_base_read(struct phy_device * phydev,u32 regnum)37 static int phy_ts_base_read(struct phy_device *phydev, u32 regnum)
38 {
39 	struct vsc8531_private *priv = phydev->priv;
40 
41 	WARN_ON_ONCE(!mutex_is_locked(&phydev->mdio.bus->mdio_lock));
42 	return __mdiobus_read(phydev->mdio.bus, priv->ts_base_addr, regnum);
43 }
44 
45 enum ts_blk_hw {
46 	INGRESS_ENGINE_0,
47 	EGRESS_ENGINE_0,
48 	INGRESS_ENGINE_1,
49 	EGRESS_ENGINE_1,
50 	INGRESS_ENGINE_2,
51 	EGRESS_ENGINE_2,
52 	PROCESSOR_0,
53 	PROCESSOR_1,
54 };
55 
56 enum ts_blk {
57 	INGRESS,
58 	EGRESS,
59 	PROCESSOR,
60 };
61 
vsc85xx_ts_read_csr(struct phy_device * phydev,enum ts_blk blk,u16 addr)62 static u32 vsc85xx_ts_read_csr(struct phy_device *phydev, enum ts_blk blk,
63 			       u16 addr)
64 {
65 	struct vsc8531_private *priv = phydev->priv;
66 	bool base_port = phydev->mdio.addr == priv->ts_base_addr;
67 	u32 val, cnt = 0;
68 	enum ts_blk_hw blk_hw;
69 
70 	switch (blk) {
71 	case INGRESS:
72 		blk_hw = base_port ? INGRESS_ENGINE_0 : INGRESS_ENGINE_1;
73 		break;
74 	case EGRESS:
75 		blk_hw = base_port ? EGRESS_ENGINE_0 : EGRESS_ENGINE_1;
76 		break;
77 	case PROCESSOR:
78 	default:
79 		blk_hw = base_port ? PROCESSOR_0 : PROCESSOR_1;
80 		break;
81 	}
82 
83 	phy_lock_mdio_bus(phydev);
84 
85 	phy_ts_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_1588);
86 
87 	phy_ts_base_write(phydev, MSCC_PHY_TS_BIU_ADDR_CNTL, BIU_ADDR_EXE |
88 			  BIU_ADDR_READ | BIU_BLK_ID(blk_hw) |
89 			  BIU_CSR_ADDR(addr));
90 
91 	do {
92 		val = phy_ts_base_read(phydev, MSCC_PHY_TS_BIU_ADDR_CNTL);
93 	} while (!(val & BIU_ADDR_EXE) && cnt++ < BIU_ADDR_CNT_MAX);
94 
95 	val = phy_ts_base_read(phydev, MSCC_PHY_TS_CSR_DATA_MSB);
96 	val <<= 16;
97 	val |= phy_ts_base_read(phydev, MSCC_PHY_TS_CSR_DATA_LSB);
98 
99 	phy_ts_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
100 
101 	phy_unlock_mdio_bus(phydev);
102 
103 	return val;
104 }
105 
vsc85xx_ts_write_csr(struct phy_device * phydev,enum ts_blk blk,u16 addr,u32 val)106 static void vsc85xx_ts_write_csr(struct phy_device *phydev, enum ts_blk blk,
107 				 u16 addr, u32 val)
108 {
109 	struct vsc8531_private *priv = phydev->priv;
110 	bool base_port = phydev->mdio.addr == priv->ts_base_addr;
111 	u32 reg, bypass, cnt = 0, lower = val & 0xffff, upper = val >> 16;
112 	bool cond = (addr == MSCC_PHY_PTP_LTC_CTRL ||
113 		     addr == MSCC_PHY_1588_INGR_VSC85XX_INT_MASK ||
114 		     addr == MSCC_PHY_1588_VSC85XX_INT_MASK ||
115 		     addr == MSCC_PHY_1588_INGR_VSC85XX_INT_STATUS ||
116 		     addr == MSCC_PHY_1588_VSC85XX_INT_STATUS) &&
117 		    blk == PROCESSOR;
118 	enum ts_blk_hw blk_hw;
119 
120 	switch (blk) {
121 	case INGRESS:
122 		blk_hw = base_port ? INGRESS_ENGINE_0 : INGRESS_ENGINE_1;
123 		break;
124 	case EGRESS:
125 		blk_hw = base_port ? EGRESS_ENGINE_0 : EGRESS_ENGINE_1;
126 		break;
127 	case PROCESSOR:
128 	default:
129 		blk_hw = base_port ? PROCESSOR_0 : PROCESSOR_1;
130 		break;
131 	}
132 
133 	phy_lock_mdio_bus(phydev);
134 
135 	bypass = phy_ts_base_read(phydev, MSCC_PHY_BYPASS_CONTROL);
136 
137 	phy_ts_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_1588);
138 
139 	if (!cond || upper)
140 		phy_ts_base_write(phydev, MSCC_PHY_TS_CSR_DATA_MSB, upper);
141 
142 	phy_ts_base_write(phydev, MSCC_PHY_TS_CSR_DATA_LSB, lower);
143 
144 	phy_ts_base_write(phydev, MSCC_PHY_TS_BIU_ADDR_CNTL, BIU_ADDR_EXE |
145 			  BIU_ADDR_WRITE | BIU_BLK_ID(blk_hw) |
146 			  BIU_CSR_ADDR(addr));
147 
148 	do {
149 		reg = phy_ts_base_read(phydev, MSCC_PHY_TS_BIU_ADDR_CNTL);
150 	} while (!(reg & BIU_ADDR_EXE) && cnt++ < BIU_ADDR_CNT_MAX);
151 
152 	phy_ts_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
153 
154 	if (cond && upper)
155 		phy_ts_base_write(phydev, MSCC_PHY_BYPASS_CONTROL, bypass);
156 
157 	phy_unlock_mdio_bus(phydev);
158 }
159 
160 /* Pick bytes from PTP header */
161 #define PTP_HEADER_TRNSP_MSG		26
162 #define PTP_HEADER_DOMAIN_NUM		25
163 #define PTP_HEADER_BYTE_8_31(x)		(31 - (x))
164 #define MAC_ADDRESS_BYTE(x)		((x) + (35 - ETH_ALEN + 1))
165 
vsc85xx_ts_fsb_init(struct phy_device * phydev)166 static int vsc85xx_ts_fsb_init(struct phy_device *phydev)
167 {
168 	u8 sig_sel[16] = {};
169 	signed char i, pos = 0;
170 
171 	/* Seq ID is 2B long and starts at 30th byte */
172 	for (i = 1; i >= 0; i--)
173 		sig_sel[pos++] = PTP_HEADER_BYTE_8_31(30 + i);
174 
175 	/* DomainNum */
176 	sig_sel[pos++] = PTP_HEADER_DOMAIN_NUM;
177 
178 	/* MsgType */
179 	sig_sel[pos++] = PTP_HEADER_TRNSP_MSG;
180 
181 	/* MAC address is 6B long */
182 	for (i = ETH_ALEN - 1; i >= 0; i--)
183 		sig_sel[pos++] = MAC_ADDRESS_BYTE(i);
184 
185 	/* Fill the last bytes of the signature to reach a 16B signature */
186 	for (; pos < ARRAY_SIZE(sig_sel); pos++)
187 		sig_sel[pos] = PTP_HEADER_TRNSP_MSG;
188 
189 	for (i = 0; i <= 2; i++) {
190 		u32 val = 0;
191 
192 		for (pos = i * 5 + 4; pos >= i * 5; pos--)
193 			val = (val << 6) | sig_sel[pos];
194 
195 		vsc85xx_ts_write_csr(phydev, EGRESS, MSCC_PHY_ANA_FSB_REG(i),
196 				     val);
197 	}
198 
199 	vsc85xx_ts_write_csr(phydev, EGRESS, MSCC_PHY_ANA_FSB_REG(3),
200 			     sig_sel[15]);
201 
202 	return 0;
203 }
204 
205 static const u32 vsc85xx_egr_latency[] = {
206 	/* Copper Egress */
207 	1272, /* 1000Mbps */
208 	12516, /* 100Mbps */
209 	125444, /* 10Mbps */
210 	/* Fiber Egress */
211 	1277, /* 1000Mbps */
212 	12537, /* 100Mbps */
213 };
214 
215 static const u32 vsc85xx_egr_latency_macsec[] = {
216 	/* Copper Egress ON */
217 	3496, /* 1000Mbps */
218 	34760, /* 100Mbps */
219 	347844, /* 10Mbps */
220 	/* Fiber Egress ON */
221 	3502, /* 1000Mbps */
222 	34780, /* 100Mbps */
223 };
224 
225 static const u32 vsc85xx_ingr_latency[] = {
226 	/* Copper Ingress */
227 	208, /* 1000Mbps */
228 	304, /* 100Mbps */
229 	2023, /* 10Mbps */
230 	/* Fiber Ingress */
231 	98, /* 1000Mbps */
232 	197, /* 100Mbps */
233 };
234 
235 static const u32 vsc85xx_ingr_latency_macsec[] = {
236 	/* Copper Ingress */
237 	2408, /* 1000Mbps */
238 	22300, /* 100Mbps */
239 	222009, /* 10Mbps */
240 	/* Fiber Ingress */
241 	2299, /* 1000Mbps */
242 	22192, /* 100Mbps */
243 };
244 
vsc85xx_ts_set_latencies(struct phy_device * phydev)245 static void vsc85xx_ts_set_latencies(struct phy_device *phydev)
246 {
247 	u32 val, ingr_latency, egr_latency;
248 	u8 idx;
249 
250 	/* No need to set latencies of packets if the PHY is not connected */
251 	if (!phydev->link)
252 		return;
253 
254 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_STALL_LATENCY,
255 			     STALL_EGR_LATENCY(phydev->speed));
256 
257 	switch (phydev->speed) {
258 	case SPEED_100:
259 		idx = 1;
260 		break;
261 	case SPEED_1000:
262 		idx = 0;
263 		break;
264 	default:
265 		idx = 2;
266 		break;
267 	}
268 
269 	ingr_latency = IS_ENABLED(CONFIG_MACSEC) ?
270 		vsc85xx_ingr_latency_macsec[idx] : vsc85xx_ingr_latency[idx];
271 	egr_latency = IS_ENABLED(CONFIG_MACSEC) ?
272 		vsc85xx_egr_latency_macsec[idx] : vsc85xx_egr_latency[idx];
273 
274 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_LOCAL_LATENCY,
275 			     PTP_INGR_LOCAL_LATENCY(ingr_latency));
276 
277 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
278 				  MSCC_PHY_PTP_INGR_TSP_CTRL);
279 	val |= PHY_PTP_INGR_TSP_CTRL_LOAD_DELAYS;
280 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_TSP_CTRL,
281 			     val);
282 
283 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_LOCAL_LATENCY,
284 			     PTP_EGR_LOCAL_LATENCY(egr_latency));
285 
286 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_TSP_CTRL);
287 	val |= PHY_PTP_EGR_TSP_CTRL_LOAD_DELAYS;
288 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_TSP_CTRL, val);
289 }
290 
vsc85xx_ts_disable_flows(struct phy_device * phydev,enum ts_blk blk)291 static int vsc85xx_ts_disable_flows(struct phy_device *phydev, enum ts_blk blk)
292 {
293 	u8 i;
294 
295 	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_NXT_COMP, 0);
296 	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_UDP_CHKSUM,
297 			     IP1_NXT_PROT_UDP_CHKSUM_WIDTH(2));
298 	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP2_NXT_PROT_NXT_COMP, 0);
299 	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP2_NXT_PROT_UDP_CHKSUM,
300 			     IP2_NXT_PROT_UDP_CHKSUM_WIDTH(2));
301 	vsc85xx_ts_write_csr(phydev, blk, MSCC_PHY_ANA_MPLS_COMP_NXT_COMP, 0);
302 	vsc85xx_ts_write_csr(phydev, blk, MSCC_PHY_ANA_ETH1_NTX_PROT, 0);
303 	vsc85xx_ts_write_csr(phydev, blk, MSCC_PHY_ANA_ETH2_NTX_PROT, 0);
304 
305 	for (i = 0; i < COMP_MAX_FLOWS; i++) {
306 		vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_ENA(i),
307 				     IP1_FLOW_VALID_CH0 | IP1_FLOW_VALID_CH1);
308 		vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP2_FLOW_ENA(i),
309 				     IP2_FLOW_VALID_CH0 | IP2_FLOW_VALID_CH1);
310 		vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_ENA(i),
311 				     ETH1_FLOW_VALID_CH0 | ETH1_FLOW_VALID_CH1);
312 		vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH2_FLOW_ENA(i),
313 				     ETH2_FLOW_VALID_CH0 | ETH2_FLOW_VALID_CH1);
314 		vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_MPLS_FLOW_CTRL(i),
315 				     MPLS_FLOW_VALID_CH0 | MPLS_FLOW_VALID_CH1);
316 
317 		if (i >= PTP_COMP_MAX_FLOWS)
318 			continue;
319 
320 		vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_PTP_FLOW_ENA(i), 0);
321 		vsc85xx_ts_write_csr(phydev, blk,
322 				     MSCC_ANA_PTP_FLOW_DOMAIN_RANGE(i), 0);
323 		vsc85xx_ts_write_csr(phydev, blk,
324 				     MSCC_ANA_PTP_FLOW_MASK_UPPER(i), 0);
325 		vsc85xx_ts_write_csr(phydev, blk,
326 				     MSCC_ANA_PTP_FLOW_MASK_LOWER(i), 0);
327 		vsc85xx_ts_write_csr(phydev, blk,
328 				     MSCC_ANA_PTP_FLOW_MATCH_UPPER(i), 0);
329 		vsc85xx_ts_write_csr(phydev, blk,
330 				     MSCC_ANA_PTP_FLOW_MATCH_LOWER(i), 0);
331 		vsc85xx_ts_write_csr(phydev, blk,
332 				     MSCC_ANA_PTP_FLOW_PTP_ACTION(i), 0);
333 		vsc85xx_ts_write_csr(phydev, blk,
334 				     MSCC_ANA_PTP_FLOW_PTP_ACTION2(i), 0);
335 		vsc85xx_ts_write_csr(phydev, blk,
336 				     MSCC_ANA_PTP_FLOW_PTP_0_FIELD(i), 0);
337 		vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_OAM_PTP_FLOW_ENA(i),
338 				     0);
339 	}
340 
341 	return 0;
342 }
343 
vsc85xx_ts_eth_cmp1_sig(struct phy_device * phydev)344 static int vsc85xx_ts_eth_cmp1_sig(struct phy_device *phydev)
345 {
346 	u32 val;
347 
348 	val = vsc85xx_ts_read_csr(phydev, EGRESS, MSCC_PHY_ANA_ETH1_NTX_PROT);
349 	val &= ~ANA_ETH1_NTX_PROT_SIG_OFF_MASK;
350 	val |= ANA_ETH1_NTX_PROT_SIG_OFF(0);
351 	vsc85xx_ts_write_csr(phydev, EGRESS, MSCC_PHY_ANA_ETH1_NTX_PROT, val);
352 
353 	val = vsc85xx_ts_read_csr(phydev, EGRESS, MSCC_PHY_ANA_FSB_CFG);
354 	val &= ~ANA_FSB_ADDR_FROM_BLOCK_SEL_MASK;
355 	val |= ANA_FSB_ADDR_FROM_ETH1;
356 	vsc85xx_ts_write_csr(phydev, EGRESS, MSCC_PHY_ANA_FSB_CFG, val);
357 
358 	return 0;
359 }
360 
get_ptp_header_l4(struct sk_buff * skb,struct iphdr * iphdr,struct udphdr * udphdr)361 static struct vsc85xx_ptphdr *get_ptp_header_l4(struct sk_buff *skb,
362 						struct iphdr *iphdr,
363 						struct udphdr *udphdr)
364 {
365 	if (iphdr->version != 4 || iphdr->protocol != IPPROTO_UDP)
366 		return NULL;
367 
368 	return (struct vsc85xx_ptphdr *)(((unsigned char *)udphdr) + UDP_HLEN);
369 }
370 
get_ptp_header_tx(struct sk_buff * skb)371 static struct vsc85xx_ptphdr *get_ptp_header_tx(struct sk_buff *skb)
372 {
373 	struct ethhdr *ethhdr = eth_hdr(skb);
374 	struct udphdr *udphdr;
375 	struct iphdr *iphdr;
376 
377 	if (ethhdr->h_proto == htons(ETH_P_1588))
378 		return (struct vsc85xx_ptphdr *)(((unsigned char *)ethhdr) +
379 						 skb_mac_header_len(skb));
380 
381 	if (ethhdr->h_proto != htons(ETH_P_IP))
382 		return NULL;
383 
384 	iphdr = ip_hdr(skb);
385 	udphdr = udp_hdr(skb);
386 
387 	return get_ptp_header_l4(skb, iphdr, udphdr);
388 }
389 
get_ptp_header_rx(struct sk_buff * skb,enum hwtstamp_rx_filters rx_filter)390 static struct vsc85xx_ptphdr *get_ptp_header_rx(struct sk_buff *skb,
391 						enum hwtstamp_rx_filters rx_filter)
392 {
393 	struct udphdr *udphdr;
394 	struct iphdr *iphdr;
395 
396 	if (rx_filter == HWTSTAMP_FILTER_PTP_V2_L2_EVENT)
397 		return (struct vsc85xx_ptphdr *)skb->data;
398 
399 	iphdr = (struct iphdr *)skb->data;
400 	udphdr = (struct udphdr *)(skb->data + iphdr->ihl * 4);
401 
402 	return get_ptp_header_l4(skb, iphdr, udphdr);
403 }
404 
get_sig(struct sk_buff * skb,u8 * sig)405 static int get_sig(struct sk_buff *skb, u8 *sig)
406 {
407 	struct vsc85xx_ptphdr *ptphdr = get_ptp_header_tx(skb);
408 	struct ethhdr *ethhdr = eth_hdr(skb);
409 	unsigned int i;
410 
411 	if (!ptphdr)
412 		return -EOPNOTSUPP;
413 
414 	sig[0] = (__force u16)ptphdr->seq_id >> 8;
415 	sig[1] = (__force u16)ptphdr->seq_id & GENMASK(7, 0);
416 	sig[2] = ptphdr->domain;
417 	sig[3] = ptphdr->tsmt & GENMASK(3, 0);
418 
419 	memcpy(&sig[4], ethhdr->h_dest, ETH_ALEN);
420 
421 	/* Fill the last bytes of the signature to reach a 16B signature */
422 	for (i = 10; i < 16; i++)
423 		sig[i] = ptphdr->tsmt & GENMASK(3, 0);
424 
425 	return 0;
426 }
427 
vsc85xx_dequeue_skb(struct vsc85xx_ptp * ptp)428 static void vsc85xx_dequeue_skb(struct vsc85xx_ptp *ptp)
429 {
430 	struct skb_shared_hwtstamps shhwtstamps;
431 	struct vsc85xx_ts_fifo fifo;
432 	struct sk_buff *skb;
433 	u8 skb_sig[16], *p;
434 	int i, len;
435 	u32 reg;
436 
437 	memset(&fifo, 0, sizeof(fifo));
438 	p = (u8 *)&fifo;
439 
440 	reg = vsc85xx_ts_read_csr(ptp->phydev, PROCESSOR,
441 				  MSCC_PHY_PTP_EGR_TS_FIFO(0));
442 	if (reg & PTP_EGR_TS_FIFO_EMPTY)
443 		return;
444 
445 	*p++ = reg & 0xff;
446 	*p++ = (reg >> 8) & 0xff;
447 
448 	/* Read the current FIFO item. Reading FIFO6 pops the next one. */
449 	for (i = 1; i < 7; i++) {
450 		reg = vsc85xx_ts_read_csr(ptp->phydev, PROCESSOR,
451 					  MSCC_PHY_PTP_EGR_TS_FIFO(i));
452 		*p++ = reg & 0xff;
453 		*p++ = (reg >> 8) & 0xff;
454 		*p++ = (reg >> 16) & 0xff;
455 		*p++ = (reg >> 24) & 0xff;
456 	}
457 
458 	len = skb_queue_len(&ptp->tx_queue);
459 	if (len < 1)
460 		return;
461 
462 	while (len--) {
463 		skb = __skb_dequeue(&ptp->tx_queue);
464 		if (!skb)
465 			return;
466 
467 		/* Can't get the signature of the packet, won't ever
468 		 * be able to have one so let's dequeue the packet.
469 		 */
470 		if (get_sig(skb, skb_sig) < 0) {
471 			kfree_skb(skb);
472 			continue;
473 		}
474 
475 		/* Check if we found the signature we were looking for. */
476 		if (!memcmp(skb_sig, fifo.sig, sizeof(fifo.sig))) {
477 			memset(&shhwtstamps, 0, sizeof(shhwtstamps));
478 			shhwtstamps.hwtstamp = ktime_set(fifo.secs, fifo.ns);
479 			skb_complete_tx_timestamp(skb, &shhwtstamps);
480 
481 			return;
482 		}
483 
484 		/* Valid signature but does not match the one of the
485 		 * packet in the FIFO right now, reschedule it for later
486 		 * packets.
487 		 */
488 		__skb_queue_tail(&ptp->tx_queue, skb);
489 	}
490 }
491 
vsc85xx_get_tx_ts(struct vsc85xx_ptp * ptp)492 static void vsc85xx_get_tx_ts(struct vsc85xx_ptp *ptp)
493 {
494 	u32 reg;
495 
496 	do {
497 		vsc85xx_dequeue_skb(ptp);
498 
499 		/* If other timestamps are available in the FIFO, process them. */
500 		reg = vsc85xx_ts_read_csr(ptp->phydev, PROCESSOR,
501 					  MSCC_PHY_PTP_EGR_TS_FIFO_CTRL);
502 	} while (PTP_EGR_FIFO_LEVEL_LAST_READ(reg) > 1);
503 }
504 
vsc85xx_ptp_cmp_init(struct phy_device * phydev,enum ts_blk blk)505 static int vsc85xx_ptp_cmp_init(struct phy_device *phydev, enum ts_blk blk)
506 {
507 	struct vsc8531_private *vsc8531 = phydev->priv;
508 	bool base = phydev->mdio.addr == vsc8531->ts_base_addr;
509 	static const u8 msgs[] = {
510 		PTP_MSGTYPE_SYNC,
511 		PTP_MSGTYPE_DELAY_REQ
512 	};
513 	u32 val;
514 	u8 i;
515 
516 	for (i = 0; i < ARRAY_SIZE(msgs); i++) {
517 		vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_PTP_FLOW_ENA(i),
518 				     base ? PTP_FLOW_VALID_CH0 :
519 				     PTP_FLOW_VALID_CH1);
520 
521 		val = vsc85xx_ts_read_csr(phydev, blk,
522 					  MSCC_ANA_PTP_FLOW_DOMAIN_RANGE(i));
523 		val &= ~PTP_FLOW_DOMAIN_RANGE_ENA;
524 		vsc85xx_ts_write_csr(phydev, blk,
525 				     MSCC_ANA_PTP_FLOW_DOMAIN_RANGE(i), val);
526 
527 		vsc85xx_ts_write_csr(phydev, blk,
528 				     MSCC_ANA_PTP_FLOW_MATCH_UPPER(i),
529 				     msgs[i] << 24);
530 
531 		vsc85xx_ts_write_csr(phydev, blk,
532 				     MSCC_ANA_PTP_FLOW_MASK_UPPER(i),
533 				     PTP_FLOW_MSG_TYPE_MASK);
534 	}
535 
536 	return 0;
537 }
538 
vsc85xx_eth_cmp1_init(struct phy_device * phydev,enum ts_blk blk)539 static int vsc85xx_eth_cmp1_init(struct phy_device *phydev, enum ts_blk blk)
540 {
541 	struct vsc8531_private *vsc8531 = phydev->priv;
542 	bool base = phydev->mdio.addr == vsc8531->ts_base_addr;
543 	u32 val;
544 
545 	vsc85xx_ts_write_csr(phydev, blk, MSCC_PHY_ANA_ETH1_NXT_PROT_TAG, 0);
546 	vsc85xx_ts_write_csr(phydev, blk, MSCC_PHY_ANA_ETH1_NTX_PROT_VLAN_TPID,
547 			     ANA_ETH1_NTX_PROT_VLAN_TPID(ETH_P_8021AD));
548 
549 	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_ENA(0),
550 			     base ? ETH1_FLOW_VALID_CH0 : ETH1_FLOW_VALID_CH1);
551 	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_MATCH_MODE(0),
552 			     ANA_ETH1_FLOW_MATCH_VLAN_TAG2);
553 	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_ADDR_MATCH1(0), 0);
554 	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_ADDR_MATCH2(0), 0);
555 	vsc85xx_ts_write_csr(phydev, blk,
556 			     MSCC_ANA_ETH1_FLOW_VLAN_RANGE_I_TAG(0), 0);
557 	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_VLAN_TAG1(0), 0);
558 	vsc85xx_ts_write_csr(phydev, blk,
559 			     MSCC_ANA_ETH1_FLOW_VLAN_TAG2_I_TAG(0), 0);
560 
561 	val = vsc85xx_ts_read_csr(phydev, blk,
562 				  MSCC_ANA_ETH1_FLOW_MATCH_MODE(0));
563 	val &= ~ANA_ETH1_FLOW_MATCH_VLAN_TAG_MASK;
564 	val |= ANA_ETH1_FLOW_MATCH_VLAN_VERIFY;
565 	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_MATCH_MODE(0),
566 			     val);
567 
568 	return 0;
569 }
570 
vsc85xx_ip_cmp1_init(struct phy_device * phydev,enum ts_blk blk)571 static int vsc85xx_ip_cmp1_init(struct phy_device *phydev, enum ts_blk blk)
572 {
573 	struct vsc8531_private *vsc8531 = phydev->priv;
574 	bool base = phydev->mdio.addr == vsc8531->ts_base_addr;
575 	u32 val;
576 
577 	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_MATCH2_UPPER,
578 			     PTP_EV_PORT);
579 	/* Match on dest port only, ignore src */
580 	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_MASK2_UPPER,
581 			     0xffff);
582 	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_MATCH2_LOWER,
583 			     0);
584 	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_MASK2_LOWER, 0);
585 
586 	val = vsc85xx_ts_read_csr(phydev, blk, MSCC_ANA_IP1_FLOW_ENA(0));
587 	val &= ~IP1_FLOW_ENA_CHANNEL_MASK_MASK;
588 	val |= base ? IP1_FLOW_VALID_CH0 : IP1_FLOW_VALID_CH1;
589 	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_ENA(0), val);
590 
591 	/* Match all IPs */
592 	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MATCH_UPPER(0), 0);
593 	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MASK_UPPER(0), 0);
594 	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MATCH_UPPER_MID(0),
595 			     0);
596 	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MASK_UPPER_MID(0),
597 			     0);
598 	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MATCH_LOWER_MID(0),
599 			     0);
600 	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MASK_LOWER_MID(0),
601 			     0);
602 	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MATCH_LOWER(0), 0);
603 	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MASK_LOWER(0), 0);
604 
605 	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_PTP_IP_CHKSUM_SEL, 0);
606 
607 	return 0;
608 }
609 
vsc85xx_adjfine(struct ptp_clock_info * info,long scaled_ppm)610 static int vsc85xx_adjfine(struct ptp_clock_info *info, long scaled_ppm)
611 {
612 	struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps);
613 	struct phy_device *phydev = ptp->phydev;
614 	struct vsc8531_private *priv = phydev->priv;
615 	u64 adj = 0;
616 	u32 val;
617 
618 	if (abs(scaled_ppm) < 66 || abs(scaled_ppm) > 65536UL * 1000000UL)
619 		return 0;
620 
621 	adj = div64_u64(1000000ULL * 65536ULL, abs(scaled_ppm));
622 	if (adj > 1000000000L)
623 		adj = 1000000000L;
624 
625 	val = PTP_AUTO_ADJ_NS_ROLLOVER(adj);
626 	val |= scaled_ppm > 0 ? PTP_AUTO_ADJ_ADD_1NS : PTP_AUTO_ADJ_SUB_1NS;
627 
628 	mutex_lock(&priv->phc_lock);
629 
630 	/* Update the ppb val in nano seconds to the auto adjust reg. */
631 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_AUTO_ADJ,
632 			     val);
633 
634 	/* The auto adjust update val is set to 0 after write operation. */
635 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL);
636 	val |= PTP_LTC_CTRL_AUTO_ADJ_UPDATE;
637 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL, val);
638 
639 	mutex_unlock(&priv->phc_lock);
640 
641 	return 0;
642 }
643 
__vsc85xx_gettime(struct ptp_clock_info * info,struct timespec64 * ts)644 static int __vsc85xx_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
645 {
646 	struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps);
647 	struct phy_device *phydev = ptp->phydev;
648 	struct vsc85xx_shared_private *shared =
649 		(struct vsc85xx_shared_private *)phydev->shared->priv;
650 	struct vsc8531_private *priv = phydev->priv;
651 	u32 val;
652 
653 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL);
654 	val |= PTP_LTC_CTRL_SAVE_ENA;
655 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL, val);
656 
657 	/* Local Time Counter (LTC) is put in SAVE* regs on rising edge of
658 	 * LOAD_SAVE pin.
659 	 */
660 	mutex_lock(&shared->gpio_lock);
661 	gpiod_set_value(priv->load_save, 1);
662 
663 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
664 				  MSCC_PHY_PTP_LTC_SAVED_SEC_MSB);
665 
666 	ts->tv_sec = ((time64_t)val) << 32;
667 
668 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
669 				  MSCC_PHY_PTP_LTC_SAVED_SEC_LSB);
670 	ts->tv_sec += val;
671 
672 	ts->tv_nsec = vsc85xx_ts_read_csr(phydev, PROCESSOR,
673 					  MSCC_PHY_PTP_LTC_SAVED_NS);
674 
675 	gpiod_set_value(priv->load_save, 0);
676 	mutex_unlock(&shared->gpio_lock);
677 
678 	return 0;
679 }
680 
vsc85xx_gettime(struct ptp_clock_info * info,struct timespec64 * ts)681 static int vsc85xx_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
682 {
683 	struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps);
684 	struct phy_device *phydev = ptp->phydev;
685 	struct vsc8531_private *priv = phydev->priv;
686 
687 	mutex_lock(&priv->phc_lock);
688 	__vsc85xx_gettime(info, ts);
689 	mutex_unlock(&priv->phc_lock);
690 
691 	return 0;
692 }
693 
__vsc85xx_settime(struct ptp_clock_info * info,const struct timespec64 * ts)694 static int __vsc85xx_settime(struct ptp_clock_info *info,
695 			     const struct timespec64 *ts)
696 {
697 	struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps);
698 	struct phy_device *phydev = ptp->phydev;
699 	struct vsc85xx_shared_private *shared =
700 		(struct vsc85xx_shared_private *)phydev->shared->priv;
701 	struct vsc8531_private *priv = phydev->priv;
702 	u32 val;
703 
704 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_LOAD_SEC_MSB,
705 			     PTP_LTC_LOAD_SEC_MSB(ts->tv_sec));
706 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_LOAD_SEC_LSB,
707 			     PTP_LTC_LOAD_SEC_LSB(ts->tv_sec));
708 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_LOAD_NS,
709 			     PTP_LTC_LOAD_NS(ts->tv_nsec));
710 
711 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL);
712 	val |= PTP_LTC_CTRL_LOAD_ENA;
713 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL, val);
714 
715 	/* Local Time Counter (LTC) is set from LOAD* regs on rising edge of
716 	 * LOAD_SAVE pin.
717 	 */
718 	mutex_lock(&shared->gpio_lock);
719 	gpiod_set_value(priv->load_save, 1);
720 
721 	val &= ~PTP_LTC_CTRL_LOAD_ENA;
722 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL, val);
723 
724 	gpiod_set_value(priv->load_save, 0);
725 	mutex_unlock(&shared->gpio_lock);
726 
727 	return 0;
728 }
729 
vsc85xx_settime(struct ptp_clock_info * info,const struct timespec64 * ts)730 static int vsc85xx_settime(struct ptp_clock_info *info,
731 			   const struct timespec64 *ts)
732 {
733 	struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps);
734 	struct phy_device *phydev = ptp->phydev;
735 	struct vsc8531_private *priv = phydev->priv;
736 
737 	mutex_lock(&priv->phc_lock);
738 	__vsc85xx_settime(info, ts);
739 	mutex_unlock(&priv->phc_lock);
740 
741 	return 0;
742 }
743 
vsc85xx_adjtime(struct ptp_clock_info * info,s64 delta)744 static int vsc85xx_adjtime(struct ptp_clock_info *info, s64 delta)
745 {
746 	struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps);
747 	struct phy_device *phydev = ptp->phydev;
748 	struct vsc8531_private *priv = phydev->priv;
749 	u32 val;
750 
751 	/* Can't recover that big of an offset. Let's set the time directly. */
752 	if (abs(delta) >= NSEC_PER_SEC) {
753 		struct timespec64 ts;
754 		u64 now;
755 
756 		mutex_lock(&priv->phc_lock);
757 
758 		__vsc85xx_gettime(info, &ts);
759 		now = ktime_to_ns(timespec64_to_ktime(ts));
760 		ts = ns_to_timespec64(now + delta);
761 		__vsc85xx_settime(info, &ts);
762 
763 		mutex_unlock(&priv->phc_lock);
764 
765 		return 0;
766 	}
767 
768 	mutex_lock(&priv->phc_lock);
769 
770 	val = PTP_LTC_OFFSET_VAL(abs(delta)) | PTP_LTC_OFFSET_ADJ;
771 	if (delta > 0)
772 		val |= PTP_LTC_OFFSET_ADD;
773 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_OFFSET, val);
774 
775 	mutex_unlock(&priv->phc_lock);
776 
777 	return 0;
778 }
779 
vsc85xx_eth1_next_comp(struct phy_device * phydev,enum ts_blk blk,u32 next_comp,u32 etype)780 static int vsc85xx_eth1_next_comp(struct phy_device *phydev, enum ts_blk blk,
781 				  u32 next_comp, u32 etype)
782 {
783 	u32 val;
784 
785 	val = vsc85xx_ts_read_csr(phydev, blk, MSCC_PHY_ANA_ETH1_NTX_PROT);
786 	val &= ~ANA_ETH1_NTX_PROT_COMPARATOR_MASK;
787 	val |= next_comp;
788 	vsc85xx_ts_write_csr(phydev, blk, MSCC_PHY_ANA_ETH1_NTX_PROT, val);
789 
790 	val = ANA_ETH1_NXT_PROT_ETYPE_MATCH(etype) |
791 		ANA_ETH1_NXT_PROT_ETYPE_MATCH_ENA;
792 	vsc85xx_ts_write_csr(phydev, blk,
793 			     MSCC_PHY_ANA_ETH1_NXT_PROT_ETYPE_MATCH, val);
794 
795 	return 0;
796 }
797 
vsc85xx_ip1_next_comp(struct phy_device * phydev,enum ts_blk blk,u32 next_comp,u32 header)798 static int vsc85xx_ip1_next_comp(struct phy_device *phydev, enum ts_blk blk,
799 				 u32 next_comp, u32 header)
800 {
801 	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_NXT_COMP,
802 			     ANA_IP1_NXT_PROT_NXT_COMP_BYTES_HDR(header) |
803 			     next_comp);
804 
805 	return 0;
806 }
807 
vsc85xx_ts_ptp_action_flow(struct phy_device * phydev,enum ts_blk blk,u8 flow,enum ptp_cmd cmd)808 static int vsc85xx_ts_ptp_action_flow(struct phy_device *phydev, enum ts_blk blk, u8 flow, enum ptp_cmd cmd)
809 {
810 	u32 val;
811 
812 	/* Check non-zero reserved field */
813 	val = PTP_FLOW_PTP_0_FIELD_PTP_FRAME | PTP_FLOW_PTP_0_FIELD_RSVRD_CHECK;
814 	vsc85xx_ts_write_csr(phydev, blk,
815 			     MSCC_ANA_PTP_FLOW_PTP_0_FIELD(flow), val);
816 
817 	val = PTP_FLOW_PTP_ACTION_CORR_OFFSET(8) |
818 	      PTP_FLOW_PTP_ACTION_TIME_OFFSET(8) |
819 	      PTP_FLOW_PTP_ACTION_PTP_CMD(cmd == PTP_SAVE_IN_TS_FIFO ?
820 					  PTP_NOP : cmd);
821 	if (cmd == PTP_SAVE_IN_TS_FIFO)
822 		val |= PTP_FLOW_PTP_ACTION_SAVE_LOCAL_TIME;
823 	else if (cmd == PTP_WRITE_NS)
824 		val |= PTP_FLOW_PTP_ACTION_MOD_FRAME_STATUS_UPDATE |
825 		       PTP_FLOW_PTP_ACTION_MOD_FRAME_STATUS_BYTE_OFFSET(6);
826 	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_PTP_FLOW_PTP_ACTION(flow),
827 			     val);
828 
829 	if (cmd == PTP_WRITE_1588)
830 		/* Rewrite timestamp directly in frame */
831 		val = PTP_FLOW_PTP_ACTION2_REWRITE_OFFSET(34) |
832 		      PTP_FLOW_PTP_ACTION2_REWRITE_BYTES(10);
833 	else if (cmd == PTP_SAVE_IN_TS_FIFO)
834 		/* no rewrite */
835 		val = PTP_FLOW_PTP_ACTION2_REWRITE_OFFSET(0) |
836 		      PTP_FLOW_PTP_ACTION2_REWRITE_BYTES(0);
837 	else
838 		/* Write in reserved field */
839 		val = PTP_FLOW_PTP_ACTION2_REWRITE_OFFSET(16) |
840 		      PTP_FLOW_PTP_ACTION2_REWRITE_BYTES(4);
841 	vsc85xx_ts_write_csr(phydev, blk,
842 			     MSCC_ANA_PTP_FLOW_PTP_ACTION2(flow), val);
843 
844 	return 0;
845 }
846 
vsc85xx_ptp_conf(struct phy_device * phydev,enum ts_blk blk,bool one_step,bool enable)847 static int vsc85xx_ptp_conf(struct phy_device *phydev, enum ts_blk blk,
848 			    bool one_step, bool enable)
849 {
850 	static const u8 msgs[] = {
851 		PTP_MSGTYPE_SYNC,
852 		PTP_MSGTYPE_DELAY_REQ
853 	};
854 	u32 val;
855 	u8 i;
856 
857 	for (i = 0; i < ARRAY_SIZE(msgs); i++) {
858 		if (blk == INGRESS)
859 			vsc85xx_ts_ptp_action_flow(phydev, blk, msgs[i],
860 						   PTP_WRITE_NS);
861 		else if (msgs[i] == PTP_MSGTYPE_SYNC && one_step)
862 			/* no need to know Sync t when sending in one_step */
863 			vsc85xx_ts_ptp_action_flow(phydev, blk, msgs[i],
864 						   PTP_WRITE_1588);
865 		else
866 			vsc85xx_ts_ptp_action_flow(phydev, blk, msgs[i],
867 						   PTP_SAVE_IN_TS_FIFO);
868 
869 		val = vsc85xx_ts_read_csr(phydev, blk,
870 					  MSCC_ANA_PTP_FLOW_ENA(i));
871 		val &= ~PTP_FLOW_ENA;
872 		if (enable)
873 			val |= PTP_FLOW_ENA;
874 		vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_PTP_FLOW_ENA(i),
875 				     val);
876 	}
877 
878 	return 0;
879 }
880 
vsc85xx_eth1_conf(struct phy_device * phydev,enum ts_blk blk,bool enable)881 static int vsc85xx_eth1_conf(struct phy_device *phydev, enum ts_blk blk,
882 			     bool enable)
883 {
884 	struct vsc8531_private *vsc8531 = phydev->priv;
885 	u32 val = ANA_ETH1_FLOW_ADDR_MATCH2_DEST;
886 
887 	if (vsc8531->ptp->rx_filter == HWTSTAMP_FILTER_PTP_V2_L2_EVENT) {
888 		/* PTP over Ethernet multicast address for SYNC and DELAY msg */
889 		u8 ptp_multicast[6] = {0x01, 0x1b, 0x19, 0x00, 0x00, 0x00};
890 
891 		val |= ANA_ETH1_FLOW_ADDR_MATCH2_FULL_ADDR |
892 		       get_unaligned_be16(&ptp_multicast[4]);
893 		vsc85xx_ts_write_csr(phydev, blk,
894 				     MSCC_ANA_ETH1_FLOW_ADDR_MATCH2(0), val);
895 		vsc85xx_ts_write_csr(phydev, blk,
896 				     MSCC_ANA_ETH1_FLOW_ADDR_MATCH1(0),
897 				     get_unaligned_be32(ptp_multicast));
898 	} else {
899 		val |= ANA_ETH1_FLOW_ADDR_MATCH2_ANY_MULTICAST;
900 		vsc85xx_ts_write_csr(phydev, blk,
901 				     MSCC_ANA_ETH1_FLOW_ADDR_MATCH2(0), val);
902 		vsc85xx_ts_write_csr(phydev, blk,
903 				     MSCC_ANA_ETH1_FLOW_ADDR_MATCH1(0), 0);
904 	}
905 
906 	val = vsc85xx_ts_read_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_ENA(0));
907 	val &= ~ETH1_FLOW_ENA;
908 	if (enable)
909 		val |= ETH1_FLOW_ENA;
910 	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_ENA(0), val);
911 
912 	return 0;
913 }
914 
vsc85xx_ip1_conf(struct phy_device * phydev,enum ts_blk blk,bool enable)915 static int vsc85xx_ip1_conf(struct phy_device *phydev, enum ts_blk blk,
916 			    bool enable)
917 {
918 	u32 val;
919 
920 	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_IP1_MODE,
921 			     ANA_IP1_NXT_PROT_IPV4 |
922 			     ANA_IP1_NXT_PROT_FLOW_OFFSET_IPV4);
923 
924 	/* Matching UDP protocol number */
925 	val = ANA_IP1_NXT_PROT_IP_MATCH1_PROT_MASK(0xff) |
926 	      ANA_IP1_NXT_PROT_IP_MATCH1_PROT_MATCH(IPPROTO_UDP) |
927 	      ANA_IP1_NXT_PROT_IP_MATCH1_PROT_OFF(9);
928 	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_IP_MATCH1,
929 			     val);
930 
931 	/* End of IP protocol, start of next protocol (UDP) */
932 	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_OFFSET2,
933 			     ANA_IP1_NXT_PROT_OFFSET2(20));
934 
935 	val = vsc85xx_ts_read_csr(phydev, blk,
936 				  MSCC_ANA_IP1_NXT_PROT_UDP_CHKSUM);
937 	val &= ~(IP1_NXT_PROT_UDP_CHKSUM_OFF_MASK |
938 		 IP1_NXT_PROT_UDP_CHKSUM_WIDTH_MASK);
939 	val |= IP1_NXT_PROT_UDP_CHKSUM_WIDTH(2);
940 
941 	val &= ~(IP1_NXT_PROT_UDP_CHKSUM_UPDATE |
942 		 IP1_NXT_PROT_UDP_CHKSUM_CLEAR);
943 	/* UDP checksum offset in IPv4 packet
944 	 * according to: https://tools.ietf.org/html/rfc768
945 	 */
946 	val |= IP1_NXT_PROT_UDP_CHKSUM_OFF(26);
947 	if (enable)
948 		val |= IP1_NXT_PROT_UDP_CHKSUM_CLEAR;
949 	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_UDP_CHKSUM,
950 			     val);
951 
952 	val = vsc85xx_ts_read_csr(phydev, blk, MSCC_ANA_IP1_FLOW_ENA(0));
953 	val &= ~(IP1_FLOW_MATCH_ADDR_MASK | IP1_FLOW_ENA);
954 	val |= IP1_FLOW_MATCH_DEST_SRC_ADDR;
955 	if (enable)
956 		val |= IP1_FLOW_ENA;
957 	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_ENA(0), val);
958 
959 	return 0;
960 }
961 
vsc85xx_ts_engine_init(struct phy_device * phydev,bool one_step)962 static int vsc85xx_ts_engine_init(struct phy_device *phydev, bool one_step)
963 {
964 	struct vsc8531_private *vsc8531 = phydev->priv;
965 	bool ptp_l4, base = phydev->mdio.addr == vsc8531->ts_base_addr;
966 	u8 eng_id = base ? 0 : 1;
967 	u32 val;
968 
969 	ptp_l4 = vsc8531->ptp->rx_filter == HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
970 
971 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
972 				  MSCC_PHY_PTP_ANALYZER_MODE);
973 	/* Disable INGRESS and EGRESS so engine eng_id can be reconfigured */
974 	val &= ~(PTP_ANALYZER_MODE_EGR_ENA(BIT(eng_id)) |
975 		 PTP_ANALYZER_MODE_INGR_ENA(BIT(eng_id)));
976 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ANALYZER_MODE,
977 			     val);
978 
979 	if (vsc8531->ptp->rx_filter == HWTSTAMP_FILTER_PTP_V2_L2_EVENT) {
980 		vsc85xx_eth1_next_comp(phydev, INGRESS,
981 				       ANA_ETH1_NTX_PROT_PTP_OAM, ETH_P_1588);
982 		vsc85xx_eth1_next_comp(phydev, EGRESS,
983 				       ANA_ETH1_NTX_PROT_PTP_OAM, ETH_P_1588);
984 	} else {
985 		vsc85xx_eth1_next_comp(phydev, INGRESS,
986 				       ANA_ETH1_NTX_PROT_IP_UDP_ACH_1,
987 				       ETH_P_IP);
988 		vsc85xx_eth1_next_comp(phydev, EGRESS,
989 				       ANA_ETH1_NTX_PROT_IP_UDP_ACH_1,
990 				       ETH_P_IP);
991 		/* Header length of IPv[4/6] + UDP */
992 		vsc85xx_ip1_next_comp(phydev, INGRESS,
993 				      ANA_ETH1_NTX_PROT_PTP_OAM, 28);
994 		vsc85xx_ip1_next_comp(phydev, EGRESS,
995 				      ANA_ETH1_NTX_PROT_PTP_OAM, 28);
996 	}
997 
998 	vsc85xx_eth1_conf(phydev, INGRESS,
999 			  vsc8531->ptp->rx_filter != HWTSTAMP_FILTER_NONE);
1000 	vsc85xx_ip1_conf(phydev, INGRESS,
1001 			 ptp_l4 && vsc8531->ptp->rx_filter != HWTSTAMP_FILTER_NONE);
1002 	vsc85xx_ptp_conf(phydev, INGRESS, one_step,
1003 			 vsc8531->ptp->rx_filter != HWTSTAMP_FILTER_NONE);
1004 
1005 	vsc85xx_eth1_conf(phydev, EGRESS,
1006 			  vsc8531->ptp->tx_type != HWTSTAMP_TX_OFF);
1007 	vsc85xx_ip1_conf(phydev, EGRESS,
1008 			 ptp_l4 && vsc8531->ptp->tx_type != HWTSTAMP_TX_OFF);
1009 	vsc85xx_ptp_conf(phydev, EGRESS, one_step,
1010 			 vsc8531->ptp->tx_type != HWTSTAMP_TX_OFF);
1011 
1012 	val &= ~PTP_ANALYZER_MODE_EGR_ENA(BIT(eng_id));
1013 	if (vsc8531->ptp->tx_type != HWTSTAMP_TX_OFF)
1014 		val |= PTP_ANALYZER_MODE_EGR_ENA(BIT(eng_id));
1015 
1016 	val &= ~PTP_ANALYZER_MODE_INGR_ENA(BIT(eng_id));
1017 	if (vsc8531->ptp->rx_filter != HWTSTAMP_FILTER_NONE)
1018 		val |= PTP_ANALYZER_MODE_INGR_ENA(BIT(eng_id));
1019 
1020 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ANALYZER_MODE,
1021 			     val);
1022 
1023 	return 0;
1024 }
1025 
vsc85xx_link_change_notify(struct phy_device * phydev)1026 void vsc85xx_link_change_notify(struct phy_device *phydev)
1027 {
1028 	struct vsc8531_private *priv = phydev->priv;
1029 
1030 	mutex_lock(&priv->ts_lock);
1031 	vsc85xx_ts_set_latencies(phydev);
1032 	mutex_unlock(&priv->ts_lock);
1033 }
1034 
vsc85xx_ts_reset_fifo(struct phy_device * phydev)1035 static void vsc85xx_ts_reset_fifo(struct phy_device *phydev)
1036 {
1037 	u32 val;
1038 
1039 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1040 				  MSCC_PHY_PTP_EGR_TS_FIFO_CTRL);
1041 	val |= PTP_EGR_TS_FIFO_RESET;
1042 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_TS_FIFO_CTRL,
1043 			     val);
1044 
1045 	val &= ~PTP_EGR_TS_FIFO_RESET;
1046 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_TS_FIFO_CTRL,
1047 			     val);
1048 }
1049 
vsc85xx_hwtstamp(struct mii_timestamper * mii_ts,struct ifreq * ifr)1050 static int vsc85xx_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr)
1051 {
1052 	struct vsc8531_private *vsc8531 =
1053 		container_of(mii_ts, struct vsc8531_private, mii_ts);
1054 	struct phy_device *phydev = vsc8531->ptp->phydev;
1055 	struct hwtstamp_config cfg;
1056 	bool one_step = false;
1057 	u32 val;
1058 
1059 	if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
1060 		return -EFAULT;
1061 
1062 	switch (cfg.tx_type) {
1063 	case HWTSTAMP_TX_ONESTEP_SYNC:
1064 		one_step = true;
1065 		break;
1066 	case HWTSTAMP_TX_ON:
1067 		break;
1068 	case HWTSTAMP_TX_OFF:
1069 		break;
1070 	default:
1071 		return -ERANGE;
1072 	}
1073 
1074 	vsc8531->ptp->tx_type = cfg.tx_type;
1075 
1076 	switch (cfg.rx_filter) {
1077 	case HWTSTAMP_FILTER_NONE:
1078 		break;
1079 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1080 		/* ETH->IP->UDP->PTP */
1081 		break;
1082 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1083 		/* ETH->PTP */
1084 		break;
1085 	default:
1086 		return -ERANGE;
1087 	}
1088 
1089 	vsc8531->ptp->rx_filter = cfg.rx_filter;
1090 
1091 	mutex_lock(&vsc8531->ts_lock);
1092 
1093 	__skb_queue_purge(&vsc8531->ptp->tx_queue);
1094 	__skb_queue_head_init(&vsc8531->ptp->tx_queue);
1095 
1096 	/* Disable predictor while configuring the 1588 block */
1097 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1098 				  MSCC_PHY_PTP_INGR_PREDICTOR);
1099 	val &= ~PTP_INGR_PREDICTOR_EN;
1100 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_PREDICTOR,
1101 			     val);
1102 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1103 				  MSCC_PHY_PTP_EGR_PREDICTOR);
1104 	val &= ~PTP_EGR_PREDICTOR_EN;
1105 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_PREDICTOR,
1106 			     val);
1107 
1108 	/* Bypass egress or ingress blocks if timestamping isn't used */
1109 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_IFACE_CTRL);
1110 	val &= ~(PTP_IFACE_CTRL_EGR_BYPASS | PTP_IFACE_CTRL_INGR_BYPASS);
1111 	if (vsc8531->ptp->tx_type == HWTSTAMP_TX_OFF)
1112 		val |= PTP_IFACE_CTRL_EGR_BYPASS;
1113 	if (vsc8531->ptp->rx_filter == HWTSTAMP_FILTER_NONE)
1114 		val |= PTP_IFACE_CTRL_INGR_BYPASS;
1115 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_IFACE_CTRL, val);
1116 
1117 	/* Resetting FIFO so that it's empty after reconfiguration */
1118 	vsc85xx_ts_reset_fifo(phydev);
1119 
1120 	vsc85xx_ts_engine_init(phydev, one_step);
1121 
1122 	/* Re-enable predictors now */
1123 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1124 				  MSCC_PHY_PTP_INGR_PREDICTOR);
1125 	val |= PTP_INGR_PREDICTOR_EN;
1126 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_PREDICTOR,
1127 			     val);
1128 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1129 				  MSCC_PHY_PTP_EGR_PREDICTOR);
1130 	val |= PTP_EGR_PREDICTOR_EN;
1131 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_PREDICTOR,
1132 			     val);
1133 
1134 	vsc8531->ptp->configured = 1;
1135 	mutex_unlock(&vsc8531->ts_lock);
1136 
1137 	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
1138 }
1139 
vsc85xx_ts_info(struct mii_timestamper * mii_ts,struct ethtool_ts_info * info)1140 static int vsc85xx_ts_info(struct mii_timestamper *mii_ts,
1141 			   struct ethtool_ts_info *info)
1142 {
1143 	struct vsc8531_private *vsc8531 =
1144 		container_of(mii_ts, struct vsc8531_private, mii_ts);
1145 
1146 	info->phc_index = ptp_clock_index(vsc8531->ptp->ptp_clock);
1147 	info->so_timestamping =
1148 		SOF_TIMESTAMPING_TX_HARDWARE |
1149 		SOF_TIMESTAMPING_RX_HARDWARE |
1150 		SOF_TIMESTAMPING_RAW_HARDWARE;
1151 	info->tx_types =
1152 		(1 << HWTSTAMP_TX_OFF) |
1153 		(1 << HWTSTAMP_TX_ON) |
1154 		(1 << HWTSTAMP_TX_ONESTEP_SYNC);
1155 	info->rx_filters =
1156 		(1 << HWTSTAMP_FILTER_NONE) |
1157 		(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
1158 		(1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
1159 
1160 	return 0;
1161 }
1162 
vsc85xx_txtstamp(struct mii_timestamper * mii_ts,struct sk_buff * skb,int type)1163 static void vsc85xx_txtstamp(struct mii_timestamper *mii_ts,
1164 			     struct sk_buff *skb, int type)
1165 {
1166 	struct vsc8531_private *vsc8531 =
1167 		container_of(mii_ts, struct vsc8531_private, mii_ts);
1168 
1169 	if (!vsc8531->ptp->configured)
1170 		goto out;
1171 
1172 	if (vsc8531->ptp->tx_type == HWTSTAMP_TX_OFF)
1173 		goto out;
1174 
1175 	if (vsc8531->ptp->tx_type == HWTSTAMP_TX_ONESTEP_SYNC)
1176 		if (ptp_msg_is_sync(skb, type))
1177 			goto out;
1178 
1179 	skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1180 
1181 	mutex_lock(&vsc8531->ts_lock);
1182 	__skb_queue_tail(&vsc8531->ptp->tx_queue, skb);
1183 	mutex_unlock(&vsc8531->ts_lock);
1184 	return;
1185 
1186 out:
1187 	kfree_skb(skb);
1188 }
1189 
vsc85xx_rxtstamp(struct mii_timestamper * mii_ts,struct sk_buff * skb,int type)1190 static bool vsc85xx_rxtstamp(struct mii_timestamper *mii_ts,
1191 			     struct sk_buff *skb, int type)
1192 {
1193 	struct vsc8531_private *vsc8531 =
1194 		container_of(mii_ts, struct vsc8531_private, mii_ts);
1195 	struct skb_shared_hwtstamps *shhwtstamps = NULL;
1196 	struct vsc85xx_ptphdr *ptphdr;
1197 	struct timespec64 ts;
1198 	unsigned long ns;
1199 
1200 	if (!vsc8531->ptp->configured)
1201 		return false;
1202 
1203 	if (vsc8531->ptp->rx_filter == HWTSTAMP_FILTER_NONE ||
1204 	    type == PTP_CLASS_NONE)
1205 		return false;
1206 
1207 	vsc85xx_gettime(&vsc8531->ptp->caps, &ts);
1208 
1209 	ptphdr = get_ptp_header_rx(skb, vsc8531->ptp->rx_filter);
1210 	if (!ptphdr)
1211 		return false;
1212 
1213 	shhwtstamps = skb_hwtstamps(skb);
1214 	memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
1215 
1216 	ns = ntohl(ptphdr->rsrvd2);
1217 
1218 	/* nsec is in reserved field */
1219 	if (ts.tv_nsec < ns)
1220 		ts.tv_sec--;
1221 
1222 	shhwtstamps->hwtstamp = ktime_set(ts.tv_sec, ns);
1223 	netif_rx(skb);
1224 
1225 	return true;
1226 }
1227 
1228 static const struct ptp_clock_info vsc85xx_clk_caps = {
1229 	.owner		= THIS_MODULE,
1230 	.name		= "VSC85xx timer",
1231 	.max_adj	= S32_MAX,
1232 	.n_alarm	= 0,
1233 	.n_pins		= 0,
1234 	.n_ext_ts	= 0,
1235 	.n_per_out	= 0,
1236 	.pps		= 0,
1237 	.adjtime        = &vsc85xx_adjtime,
1238 	.adjfine	= &vsc85xx_adjfine,
1239 	.gettime64	= &vsc85xx_gettime,
1240 	.settime64	= &vsc85xx_settime,
1241 };
1242 
vsc8584_base_priv(struct phy_device * phydev)1243 static struct vsc8531_private *vsc8584_base_priv(struct phy_device *phydev)
1244 {
1245 	struct vsc8531_private *vsc8531 = phydev->priv;
1246 
1247 	if (vsc8531->ts_base_addr != phydev->mdio.addr) {
1248 		struct mdio_device *dev;
1249 
1250 		dev = phydev->mdio.bus->mdio_map[vsc8531->ts_base_addr];
1251 		phydev = container_of(dev, struct phy_device, mdio);
1252 
1253 		return phydev->priv;
1254 	}
1255 
1256 	return vsc8531;
1257 }
1258 
vsc8584_is_1588_input_clk_configured(struct phy_device * phydev)1259 static bool vsc8584_is_1588_input_clk_configured(struct phy_device *phydev)
1260 {
1261 	struct vsc8531_private *vsc8531 = vsc8584_base_priv(phydev);
1262 
1263 	return vsc8531->input_clk_init;
1264 }
1265 
vsc8584_set_input_clk_configured(struct phy_device * phydev)1266 static void vsc8584_set_input_clk_configured(struct phy_device *phydev)
1267 {
1268 	struct vsc8531_private *vsc8531 = vsc8584_base_priv(phydev);
1269 
1270 	vsc8531->input_clk_init = true;
1271 }
1272 
__vsc8584_init_ptp(struct phy_device * phydev)1273 static int __vsc8584_init_ptp(struct phy_device *phydev)
1274 {
1275 	struct vsc8531_private *vsc8531 = phydev->priv;
1276 	static const u32 ltc_seq_e[] = { 0, 400000, 0, 0, 0 };
1277 	static const u8  ltc_seq_a[] = { 8, 6, 5, 4, 2 };
1278 	u32 val;
1279 
1280 	if (!vsc8584_is_1588_input_clk_configured(phydev)) {
1281 		phy_lock_mdio_bus(phydev);
1282 
1283 		/* 1588_DIFF_INPUT_CLK configuration: Use an external clock for
1284 		 * the LTC, as per 3.13.29 in the VSC8584 datasheet.
1285 		 */
1286 		phy_ts_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
1287 				  MSCC_PHY_PAGE_1588);
1288 		phy_ts_base_write(phydev, 29, 0x7ae0);
1289 		phy_ts_base_write(phydev, 30, 0xb71c);
1290 		phy_ts_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
1291 				  MSCC_PHY_PAGE_STANDARD);
1292 
1293 		phy_unlock_mdio_bus(phydev);
1294 
1295 		vsc8584_set_input_clk_configured(phydev);
1296 	}
1297 
1298 	/* Disable predictor before configuring the 1588 block */
1299 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1300 				  MSCC_PHY_PTP_INGR_PREDICTOR);
1301 	val &= ~PTP_INGR_PREDICTOR_EN;
1302 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_PREDICTOR,
1303 			     val);
1304 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1305 				  MSCC_PHY_PTP_EGR_PREDICTOR);
1306 	val &= ~PTP_EGR_PREDICTOR_EN;
1307 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_PREDICTOR,
1308 			     val);
1309 
1310 	/* By default, the internal clock of fixed rate 250MHz is used */
1311 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL);
1312 	val &= ~PTP_LTC_CTRL_CLK_SEL_MASK;
1313 	val |= PTP_LTC_CTRL_CLK_SEL_INTERNAL_250;
1314 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL, val);
1315 
1316 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_SEQUENCE);
1317 	val &= ~PTP_LTC_SEQUENCE_A_MASK;
1318 	val |= PTP_LTC_SEQUENCE_A(ltc_seq_a[PHC_CLK_250MHZ]);
1319 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_SEQUENCE, val);
1320 
1321 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_SEQ);
1322 	val &= ~(PTP_LTC_SEQ_ERR_MASK | PTP_LTC_SEQ_ADD_SUB);
1323 	if (ltc_seq_e[PHC_CLK_250MHZ])
1324 		val |= PTP_LTC_SEQ_ADD_SUB;
1325 	val |= PTP_LTC_SEQ_ERR(ltc_seq_e[PHC_CLK_250MHZ]);
1326 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_SEQ, val);
1327 
1328 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_1PPS_WIDTH_ADJ,
1329 			     PPS_WIDTH_ADJ);
1330 
1331 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_DELAY_FIFO,
1332 			     IS_ENABLED(CONFIG_MACSEC) ?
1333 			     PTP_INGR_DELAY_FIFO_DEPTH_MACSEC :
1334 			     PTP_INGR_DELAY_FIFO_DEPTH_DEFAULT);
1335 
1336 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_DELAY_FIFO,
1337 			     IS_ENABLED(CONFIG_MACSEC) ?
1338 			     PTP_EGR_DELAY_FIFO_DEPTH_MACSEC :
1339 			     PTP_EGR_DELAY_FIFO_DEPTH_DEFAULT);
1340 
1341 	/* Enable n-phase sampler for Viper Rev-B */
1342 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1343 				  MSCC_PHY_PTP_ACCUR_CFG_STATUS);
1344 	val &= ~(PTP_ACCUR_PPS_OUT_BYPASS | PTP_ACCUR_PPS_IN_BYPASS |
1345 		 PTP_ACCUR_EGR_SOF_BYPASS | PTP_ACCUR_INGR_SOF_BYPASS |
1346 		 PTP_ACCUR_LOAD_SAVE_BYPASS);
1347 	val |= PTP_ACCUR_PPS_OUT_CALIB_ERR | PTP_ACCUR_PPS_OUT_CALIB_DONE |
1348 	       PTP_ACCUR_PPS_IN_CALIB_ERR | PTP_ACCUR_PPS_IN_CALIB_DONE |
1349 	       PTP_ACCUR_EGR_SOF_CALIB_ERR | PTP_ACCUR_EGR_SOF_CALIB_DONE |
1350 	       PTP_ACCUR_INGR_SOF_CALIB_ERR | PTP_ACCUR_INGR_SOF_CALIB_DONE |
1351 	       PTP_ACCUR_LOAD_SAVE_CALIB_ERR | PTP_ACCUR_LOAD_SAVE_CALIB_DONE;
1352 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ACCUR_CFG_STATUS,
1353 			     val);
1354 
1355 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1356 				  MSCC_PHY_PTP_ACCUR_CFG_STATUS);
1357 	val |= PTP_ACCUR_CALIB_TRIGG;
1358 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ACCUR_CFG_STATUS,
1359 			     val);
1360 
1361 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1362 				  MSCC_PHY_PTP_ACCUR_CFG_STATUS);
1363 	val &= ~PTP_ACCUR_CALIB_TRIGG;
1364 	val |= PTP_ACCUR_PPS_OUT_CALIB_ERR | PTP_ACCUR_PPS_OUT_CALIB_DONE |
1365 	       PTP_ACCUR_PPS_IN_CALIB_ERR | PTP_ACCUR_PPS_IN_CALIB_DONE |
1366 	       PTP_ACCUR_EGR_SOF_CALIB_ERR | PTP_ACCUR_EGR_SOF_CALIB_DONE |
1367 	       PTP_ACCUR_INGR_SOF_CALIB_ERR | PTP_ACCUR_INGR_SOF_CALIB_DONE |
1368 	       PTP_ACCUR_LOAD_SAVE_CALIB_ERR | PTP_ACCUR_LOAD_SAVE_CALIB_DONE;
1369 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ACCUR_CFG_STATUS,
1370 			     val);
1371 
1372 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1373 				  MSCC_PHY_PTP_ACCUR_CFG_STATUS);
1374 	val |= PTP_ACCUR_CALIB_TRIGG;
1375 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ACCUR_CFG_STATUS,
1376 			     val);
1377 
1378 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1379 				  MSCC_PHY_PTP_ACCUR_CFG_STATUS);
1380 	val &= ~PTP_ACCUR_CALIB_TRIGG;
1381 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ACCUR_CFG_STATUS,
1382 			     val);
1383 
1384 	/* Do not access FIFO via SI */
1385 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1386 				  MSCC_PHY_PTP_TSTAMP_FIFO_SI);
1387 	val &= ~PTP_TSTAMP_FIFO_SI_EN;
1388 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_TSTAMP_FIFO_SI,
1389 			     val);
1390 
1391 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1392 				  MSCC_PHY_PTP_INGR_REWRITER_CTRL);
1393 	val &= ~PTP_INGR_REWRITER_REDUCE_PREAMBLE;
1394 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_REWRITER_CTRL,
1395 			     val);
1396 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1397 				  MSCC_PHY_PTP_EGR_REWRITER_CTRL);
1398 	val &= ~PTP_EGR_REWRITER_REDUCE_PREAMBLE;
1399 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_REWRITER_CTRL,
1400 			     val);
1401 
1402 	/* Put the flag that indicates the frame has been modified to bit 7 */
1403 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1404 				  MSCC_PHY_PTP_INGR_REWRITER_CTRL);
1405 	val |= PTP_INGR_REWRITER_FLAG_BIT_OFF(7) | PTP_INGR_REWRITER_FLAG_VAL;
1406 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_REWRITER_CTRL,
1407 			     val);
1408 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1409 				  MSCC_PHY_PTP_EGR_REWRITER_CTRL);
1410 	val |= PTP_EGR_REWRITER_FLAG_BIT_OFF(7);
1411 	val &= ~PTP_EGR_REWRITER_FLAG_VAL;
1412 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_REWRITER_CTRL,
1413 			     val);
1414 
1415 	/* 30bit mode for RX timestamp, only the nanoseconds are kept in
1416 	 * reserved field.
1417 	 */
1418 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1419 				  MSCC_PHY_PTP_INGR_TSP_CTRL);
1420 	val |= PHY_PTP_INGR_TSP_CTRL_FRACT_NS;
1421 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_TSP_CTRL,
1422 			     val);
1423 
1424 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_TSP_CTRL);
1425 	val |= PHY_PTP_EGR_TSP_CTRL_FRACT_NS;
1426 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_TSP_CTRL, val);
1427 
1428 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1429 				  MSCC_PHY_PTP_SERIAL_TOD_IFACE);
1430 	val |= PTP_SERIAL_TOD_IFACE_LS_AUTO_CLR;
1431 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_SERIAL_TOD_IFACE,
1432 			     val);
1433 
1434 	vsc85xx_ts_fsb_init(phydev);
1435 
1436 	/* Set the Egress timestamp FIFO configuration and status register */
1437 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1438 				  MSCC_PHY_PTP_EGR_TS_FIFO_CTRL);
1439 	val &= ~(PTP_EGR_TS_FIFO_SIG_BYTES_MASK | PTP_EGR_TS_FIFO_THRESH_MASK);
1440 	/* 16 bytes for the signature, 10 for the timestamp in the TS FIFO */
1441 	val |= PTP_EGR_TS_FIFO_SIG_BYTES(16) | PTP_EGR_TS_FIFO_THRESH(7);
1442 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_TS_FIFO_CTRL,
1443 			     val);
1444 
1445 	vsc85xx_ts_reset_fifo(phydev);
1446 
1447 	val = PTP_IFACE_CTRL_CLK_ENA;
1448 	if (!IS_ENABLED(CONFIG_MACSEC))
1449 		val |= PTP_IFACE_CTRL_GMII_PROT;
1450 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_IFACE_CTRL, val);
1451 
1452 	vsc85xx_ts_set_latencies(phydev);
1453 
1454 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_VERSION_CODE);
1455 
1456 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_IFACE_CTRL);
1457 	val |= PTP_IFACE_CTRL_EGR_BYPASS;
1458 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_IFACE_CTRL, val);
1459 
1460 	vsc85xx_ts_disable_flows(phydev, EGRESS);
1461 	vsc85xx_ts_disable_flows(phydev, INGRESS);
1462 
1463 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1464 				  MSCC_PHY_PTP_ANALYZER_MODE);
1465 	/* Disable INGRESS and EGRESS so engine eng_id can be reconfigured */
1466 	val &= ~(PTP_ANALYZER_MODE_EGR_ENA_MASK |
1467 		 PTP_ANALYZER_MODE_INGR_ENA_MASK |
1468 		 PTP_ANA_INGR_ENCAP_FLOW_MODE_MASK |
1469 		 PTP_ANA_EGR_ENCAP_FLOW_MODE_MASK);
1470 	/* Strict matching in flow (packets should match flows from the same
1471 	 * index in all enabled comparators (except PTP)).
1472 	 */
1473 	val |= PTP_ANA_SPLIT_ENCAP_FLOW | PTP_ANA_INGR_ENCAP_FLOW_MODE(0x7) |
1474 	       PTP_ANA_EGR_ENCAP_FLOW_MODE(0x7);
1475 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ANALYZER_MODE,
1476 			     val);
1477 
1478 	/* Initialized for ingress and egress flows:
1479 	 * - The Ethernet comparator.
1480 	 * - The IP comparator.
1481 	 * - The PTP comparator.
1482 	 */
1483 	vsc85xx_eth_cmp1_init(phydev, INGRESS);
1484 	vsc85xx_ip_cmp1_init(phydev, INGRESS);
1485 	vsc85xx_ptp_cmp_init(phydev, INGRESS);
1486 	vsc85xx_eth_cmp1_init(phydev, EGRESS);
1487 	vsc85xx_ip_cmp1_init(phydev, EGRESS);
1488 	vsc85xx_ptp_cmp_init(phydev, EGRESS);
1489 
1490 	vsc85xx_ts_eth_cmp1_sig(phydev);
1491 
1492 	vsc8531->mii_ts.rxtstamp = vsc85xx_rxtstamp;
1493 	vsc8531->mii_ts.txtstamp = vsc85xx_txtstamp;
1494 	vsc8531->mii_ts.hwtstamp = vsc85xx_hwtstamp;
1495 	vsc8531->mii_ts.ts_info  = vsc85xx_ts_info;
1496 	phydev->mii_ts = &vsc8531->mii_ts;
1497 
1498 	memcpy(&vsc8531->ptp->caps, &vsc85xx_clk_caps, sizeof(vsc85xx_clk_caps));
1499 
1500 	vsc8531->ptp->ptp_clock = ptp_clock_register(&vsc8531->ptp->caps,
1501 						     &phydev->mdio.dev);
1502 	return PTR_ERR_OR_ZERO(vsc8531->ptp->ptp_clock);
1503 }
1504 
vsc8584_config_ts_intr(struct phy_device * phydev)1505 void vsc8584_config_ts_intr(struct phy_device *phydev)
1506 {
1507 	struct vsc8531_private *priv = phydev->priv;
1508 
1509 	mutex_lock(&priv->ts_lock);
1510 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_1588_VSC85XX_INT_MASK,
1511 			     VSC85XX_1588_INT_MASK_MASK);
1512 	mutex_unlock(&priv->ts_lock);
1513 }
1514 
vsc8584_ptp_init(struct phy_device * phydev)1515 int vsc8584_ptp_init(struct phy_device *phydev)
1516 {
1517 	switch (phydev->phy_id & phydev->drv->phy_id_mask) {
1518 	case PHY_ID_VSC8572:
1519 	case PHY_ID_VSC8574:
1520 	case PHY_ID_VSC8575:
1521 	case PHY_ID_VSC8582:
1522 	case PHY_ID_VSC8584:
1523 		return __vsc8584_init_ptp(phydev);
1524 	}
1525 
1526 	return 0;
1527 }
1528 
vsc8584_handle_ts_interrupt(struct phy_device * phydev)1529 irqreturn_t vsc8584_handle_ts_interrupt(struct phy_device *phydev)
1530 {
1531 	struct vsc8531_private *priv = phydev->priv;
1532 	int rc;
1533 
1534 	mutex_lock(&priv->ts_lock);
1535 	rc = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1536 				 MSCC_PHY_1588_VSC85XX_INT_STATUS);
1537 	/* Ack the PTP interrupt */
1538 	vsc85xx_ts_write_csr(phydev, PROCESSOR,
1539 			     MSCC_PHY_1588_VSC85XX_INT_STATUS, rc);
1540 
1541 	if (!(rc & VSC85XX_1588_INT_MASK_MASK)) {
1542 		mutex_unlock(&priv->ts_lock);
1543 		return IRQ_NONE;
1544 	}
1545 
1546 	if (rc & VSC85XX_1588_INT_FIFO_ADD) {
1547 		vsc85xx_get_tx_ts(priv->ptp);
1548 	} else if (rc & VSC85XX_1588_INT_FIFO_OVERFLOW) {
1549 		__skb_queue_purge(&priv->ptp->tx_queue);
1550 		vsc85xx_ts_reset_fifo(phydev);
1551 	}
1552 
1553 	mutex_unlock(&priv->ts_lock);
1554 	return IRQ_HANDLED;
1555 }
1556 
vsc8584_ptp_probe(struct phy_device * phydev)1557 int vsc8584_ptp_probe(struct phy_device *phydev)
1558 {
1559 	struct vsc8531_private *vsc8531 = phydev->priv;
1560 
1561 	vsc8531->ptp = devm_kzalloc(&phydev->mdio.dev, sizeof(*vsc8531->ptp),
1562 				    GFP_KERNEL);
1563 	if (!vsc8531->ptp)
1564 		return -ENOMEM;
1565 
1566 	mutex_init(&vsc8531->phc_lock);
1567 	mutex_init(&vsc8531->ts_lock);
1568 
1569 	/* Retrieve the shared load/save GPIO. Request it as non exclusive as
1570 	 * the same GPIO can be requested by all the PHYs of the same package.
1571 	 * This GPIO must be used with the gpio_lock taken (the lock is shared
1572 	 * between all PHYs).
1573 	 */
1574 	vsc8531->load_save = devm_gpiod_get_optional(&phydev->mdio.dev, "load-save",
1575 						     GPIOD_FLAGS_BIT_NONEXCLUSIVE |
1576 						     GPIOD_OUT_LOW);
1577 	if (IS_ERR(vsc8531->load_save)) {
1578 		phydev_err(phydev, "Can't get load-save GPIO (%ld)\n",
1579 			   PTR_ERR(vsc8531->load_save));
1580 		return PTR_ERR(vsc8531->load_save);
1581 	}
1582 
1583 	vsc8531->ptp->phydev = phydev;
1584 
1585 	return 0;
1586 }
1587 
vsc8584_ptp_probe_once(struct phy_device * phydev)1588 int vsc8584_ptp_probe_once(struct phy_device *phydev)
1589 {
1590 	struct vsc85xx_shared_private *shared =
1591 		(struct vsc85xx_shared_private *)phydev->shared->priv;
1592 
1593 	/* Initialize shared GPIO lock */
1594 	mutex_init(&shared->gpio_lock);
1595 
1596 	return 0;
1597 }
1598