1 // SPDX-License-Identifier: GPL-2.0
2 /* NXP C45 PHY driver
3 * Copyright (C) 2021 NXP
4 * Author: Radu Pirea <radu-nicolae.pirea@oss.nxp.com>
5 */
6
7 #include <linux/delay.h>
8 #include <linux/ethtool.h>
9 #include <linux/ethtool_netlink.h>
10 #include <linux/kernel.h>
11 #include <linux/mii.h>
12 #include <linux/module.h>
13 #include <linux/phy.h>
14 #include <linux/processor.h>
15 #include <linux/property.h>
16 #include <linux/ptp_classify.h>
17 #include <linux/ptp_clock_kernel.h>
18 #include <linux/net_tstamp.h>
19
20 #define PHY_ID_TJA_1103 0x001BB010
21 #define PHY_ID_TJA_1120 0x001BB031
22
23 #define VEND1_DEVICE_CONTROL 0x0040
24 #define DEVICE_CONTROL_RESET BIT(15)
25 #define DEVICE_CONTROL_CONFIG_GLOBAL_EN BIT(14)
26 #define DEVICE_CONTROL_CONFIG_ALL_EN BIT(13)
27
28 #define VEND1_DEVICE_CONFIG 0x0048
29
30 #define TJA1120_VEND1_EXT_TS_MODE 0x1012
31
32 #define TJA1120_GLOBAL_INFRA_IRQ_ACK 0x2C08
33 #define TJA1120_GLOBAL_INFRA_IRQ_EN 0x2C0A
34 #define TJA1120_GLOBAL_INFRA_IRQ_STATUS 0x2C0C
35 #define TJA1120_DEV_BOOT_DONE BIT(1)
36
37 #define TJA1120_VEND1_PTP_TRIG_DATA_S 0x1070
38
39 #define TJA1120_EGRESS_TS_DATA_S 0x9060
40 #define TJA1120_EGRESS_TS_END 0x9067
41 #define TJA1120_TS_VALID BIT(0)
42 #define TJA1120_MORE_TS BIT(15)
43
44 #define VEND1_PHY_IRQ_ACK 0x80A0
45 #define VEND1_PHY_IRQ_EN 0x80A1
46 #define VEND1_PHY_IRQ_STATUS 0x80A2
47 #define PHY_IRQ_LINK_EVENT BIT(1)
48
49 #define VEND1_ALWAYS_ACCESSIBLE 0x801F
50 #define FUSA_PASS BIT(4)
51
52 #define VEND1_PHY_CONTROL 0x8100
53 #define PHY_CONFIG_EN BIT(14)
54 #define PHY_START_OP BIT(0)
55
56 #define VEND1_PHY_CONFIG 0x8108
57 #define PHY_CONFIG_AUTO BIT(0)
58
59 #define TJA1120_EPHY_RESETS 0x810A
60 #define EPHY_PCS_RESET BIT(3)
61
62 #define VEND1_SIGNAL_QUALITY 0x8320
63 #define SQI_VALID BIT(14)
64 #define SQI_MASK GENMASK(2, 0)
65 #define MAX_SQI SQI_MASK
66
67 #define CABLE_TEST_ENABLE BIT(15)
68 #define CABLE_TEST_START BIT(14)
69 #define CABLE_TEST_OK 0x00
70 #define CABLE_TEST_SHORTED 0x01
71 #define CABLE_TEST_OPEN 0x02
72 #define CABLE_TEST_UNKNOWN 0x07
73
74 #define VEND1_PORT_CONTROL 0x8040
75 #define PORT_CONTROL_EN BIT(14)
76
77 #define VEND1_PORT_ABILITIES 0x8046
78 #define PTP_ABILITY BIT(3)
79
80 #define VEND1_PORT_FUNC_IRQ_EN 0x807A
81 #define PTP_IRQS BIT(3)
82
83 #define VEND1_PTP_IRQ_ACK 0x9008
84 #define EGR_TS_IRQ BIT(1)
85
86 #define VEND1_PORT_INFRA_CONTROL 0xAC00
87 #define PORT_INFRA_CONTROL_EN BIT(14)
88
89 #define VEND1_RXID 0xAFCC
90 #define VEND1_TXID 0xAFCD
91 #define ID_ENABLE BIT(15)
92
93 #define VEND1_ABILITIES 0xAFC4
94 #define RGMII_ID_ABILITY BIT(15)
95 #define RGMII_ABILITY BIT(14)
96 #define RMII_ABILITY BIT(10)
97 #define REVMII_ABILITY BIT(9)
98 #define MII_ABILITY BIT(8)
99 #define SGMII_ABILITY BIT(0)
100
101 #define VEND1_MII_BASIC_CONFIG 0xAFC6
102 #define MII_BASIC_CONFIG_REV BIT(4)
103 #define MII_BASIC_CONFIG_SGMII 0x9
104 #define MII_BASIC_CONFIG_RGMII 0x7
105 #define MII_BASIC_CONFIG_RMII 0x5
106 #define MII_BASIC_CONFIG_MII 0x4
107
108 #define VEND1_SYMBOL_ERROR_CNT_XTD 0x8351
109 #define EXTENDED_CNT_EN BIT(15)
110 #define VEND1_MONITOR_STATUS 0xAC80
111 #define MONITOR_RESET BIT(15)
112 #define VEND1_MONITOR_CONFIG 0xAC86
113 #define LOST_FRAMES_CNT_EN BIT(9)
114 #define ALL_FRAMES_CNT_EN BIT(8)
115
116 #define VEND1_SYMBOL_ERROR_COUNTER 0x8350
117 #define VEND1_LINK_DROP_COUNTER 0x8352
118 #define VEND1_LINK_LOSSES_AND_FAILURES 0x8353
119 #define VEND1_RX_PREAMBLE_COUNT 0xAFCE
120 #define VEND1_TX_PREAMBLE_COUNT 0xAFCF
121 #define VEND1_RX_IPG_LENGTH 0xAFD0
122 #define VEND1_TX_IPG_LENGTH 0xAFD1
123 #define COUNTER_EN BIT(15)
124
125 #define VEND1_PTP_CONFIG 0x1102
126 #define EXT_TRG_EDGE BIT(1)
127
128 #define TJA1120_SYNC_TRIG_FILTER 0x1010
129 #define PTP_TRIG_RISE_TS BIT(3)
130 #define PTP_TRIG_FALLING_TS BIT(2)
131
132 #define CLK_RATE_ADJ_LD BIT(15)
133 #define CLK_RATE_ADJ_DIR BIT(14)
134
135 #define VEND1_RX_TS_INSRT_CTRL 0x114D
136 #define TJA1103_RX_TS_INSRT_MODE2 0x02
137
138 #define TJA1120_RX_TS_INSRT_CTRL 0x9012
139 #define TJA1120_RX_TS_INSRT_EN BIT(15)
140 #define TJA1120_TS_INSRT_MODE BIT(4)
141
142 #define VEND1_EGR_RING_DATA_0 0x114E
143 #define VEND1_EGR_RING_CTRL 0x1154
144
145 #define RING_DATA_0_TS_VALID BIT(15)
146
147 #define RING_DONE BIT(0)
148
149 #define TS_SEC_MASK GENMASK(1, 0)
150
151 #define VEND1_PORT_FUNC_ENABLES 0x8048
152 #define PTP_ENABLE BIT(3)
153 #define PHY_TEST_ENABLE BIT(0)
154
155 #define VEND1_PORT_PTP_CONTROL 0x9000
156 #define PORT_PTP_CONTROL_BYPASS BIT(11)
157
158 #define PTP_CLK_PERIOD_100BT1 15ULL
159 #define PTP_CLK_PERIOD_1000BT1 8ULL
160
161 #define EVENT_MSG_FILT_ALL 0x0F
162 #define EVENT_MSG_FILT_NONE 0x00
163
164 #define VEND1_GPIO_FUNC_CONFIG_BASE 0x2C40
165 #define GPIO_FUNC_EN BIT(15)
166 #define GPIO_FUNC_PTP BIT(6)
167 #define GPIO_SIGNAL_PTP_TRIGGER 0x01
168 #define GPIO_SIGNAL_PPS_OUT 0x12
169 #define GPIO_DISABLE 0
170 #define GPIO_PPS_OUT_CFG (GPIO_FUNC_EN | GPIO_FUNC_PTP | \
171 GPIO_SIGNAL_PPS_OUT)
172 #define GPIO_EXTTS_OUT_CFG (GPIO_FUNC_EN | GPIO_FUNC_PTP | \
173 GPIO_SIGNAL_PTP_TRIGGER)
174
175 #define RGMII_PERIOD_PS 8000U
176 #define PS_PER_DEGREE div_u64(RGMII_PERIOD_PS, 360)
177 #define MIN_ID_PS 1644U
178 #define MAX_ID_PS 2260U
179 #define DEFAULT_ID_PS 2000U
180
181 #define PPM_TO_SUBNS_INC(ppb, ptp_clk_period) div_u64(GENMASK_ULL(31, 0) * \
182 (ppb) * (ptp_clk_period), NSEC_PER_SEC)
183
184 #define NXP_C45_SKB_CB(skb) ((struct nxp_c45_skb_cb *)(skb)->cb)
185
186 struct nxp_c45_phy;
187
188 struct nxp_c45_skb_cb {
189 struct ptp_header *header;
190 unsigned int type;
191 };
192
193 #define NXP_C45_REG_FIELD(_reg, _devad, _offset, _size) \
194 ((struct nxp_c45_reg_field) { \
195 .reg = _reg, \
196 .devad = _devad, \
197 .offset = _offset, \
198 .size = _size, \
199 })
200
201 struct nxp_c45_reg_field {
202 u16 reg;
203 u8 devad;
204 u8 offset;
205 u8 size;
206 };
207
208 struct nxp_c45_hwts {
209 u32 nsec;
210 u32 sec;
211 u8 domain_number;
212 u16 sequence_id;
213 u8 msg_type;
214 };
215
216 struct nxp_c45_regmap {
217 /* PTP config regs. */
218 u16 vend1_ptp_clk_period;
219 u16 vend1_event_msg_filt;
220
221 /* LTC bits and regs. */
222 struct nxp_c45_reg_field ltc_read;
223 struct nxp_c45_reg_field ltc_write;
224 struct nxp_c45_reg_field ltc_lock_ctrl;
225 u16 vend1_ltc_wr_nsec_0;
226 u16 vend1_ltc_wr_nsec_1;
227 u16 vend1_ltc_wr_sec_0;
228 u16 vend1_ltc_wr_sec_1;
229 u16 vend1_ltc_rd_nsec_0;
230 u16 vend1_ltc_rd_nsec_1;
231 u16 vend1_ltc_rd_sec_0;
232 u16 vend1_ltc_rd_sec_1;
233 u16 vend1_rate_adj_subns_0;
234 u16 vend1_rate_adj_subns_1;
235
236 /* External trigger reg fields. */
237 struct nxp_c45_reg_field irq_egr_ts_en;
238 struct nxp_c45_reg_field irq_egr_ts_status;
239 struct nxp_c45_reg_field domain_number;
240 struct nxp_c45_reg_field msg_type;
241 struct nxp_c45_reg_field sequence_id;
242 struct nxp_c45_reg_field sec_1_0;
243 struct nxp_c45_reg_field sec_4_2;
244 struct nxp_c45_reg_field nsec_15_0;
245 struct nxp_c45_reg_field nsec_29_16;
246
247 /* PPS and EXT Trigger bits and regs. */
248 struct nxp_c45_reg_field pps_enable;
249 struct nxp_c45_reg_field pps_polarity;
250 u16 vend1_ext_trg_data_0;
251 u16 vend1_ext_trg_data_1;
252 u16 vend1_ext_trg_data_2;
253 u16 vend1_ext_trg_data_3;
254 u16 vend1_ext_trg_ctrl;
255
256 /* Cable test reg fields. */
257 u16 cable_test;
258 struct nxp_c45_reg_field cable_test_valid;
259 struct nxp_c45_reg_field cable_test_result;
260 };
261
262 struct nxp_c45_phy_stats {
263 const char *name;
264 const struct nxp_c45_reg_field counter;
265 };
266
267 struct nxp_c45_phy_data {
268 const struct nxp_c45_regmap *regmap;
269 const struct nxp_c45_phy_stats *stats;
270 int n_stats;
271 u8 ptp_clk_period;
272 bool ext_ts_both_edges;
273 bool ack_ptp_irq;
274 void (*counters_enable)(struct phy_device *phydev);
275 bool (*get_egressts)(struct nxp_c45_phy *priv,
276 struct nxp_c45_hwts *hwts);
277 bool (*get_extts)(struct nxp_c45_phy *priv, struct timespec64 *extts);
278 void (*ptp_init)(struct phy_device *phydev);
279 void (*ptp_enable)(struct phy_device *phydev, bool enable);
280 void (*nmi_handler)(struct phy_device *phydev,
281 irqreturn_t *irq_status);
282 };
283
284 struct nxp_c45_phy {
285 const struct nxp_c45_phy_data *phy_data;
286 struct phy_device *phydev;
287 struct mii_timestamper mii_ts;
288 struct ptp_clock *ptp_clock;
289 struct ptp_clock_info caps;
290 struct sk_buff_head tx_queue;
291 struct sk_buff_head rx_queue;
292 /* used to access the PTP registers atomic */
293 struct mutex ptp_lock;
294 int hwts_tx;
295 int hwts_rx;
296 u32 tx_delay;
297 u32 rx_delay;
298 struct timespec64 extts_ts;
299 int extts_index;
300 bool extts;
301 };
302
303 static const
nxp_c45_get_data(struct phy_device * phydev)304 struct nxp_c45_phy_data *nxp_c45_get_data(struct phy_device *phydev)
305 {
306 return phydev->drv->driver_data;
307 }
308
309 static const
nxp_c45_get_regmap(struct phy_device * phydev)310 struct nxp_c45_regmap *nxp_c45_get_regmap(struct phy_device *phydev)
311 {
312 const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev);
313
314 return phy_data->regmap;
315 }
316
nxp_c45_read_reg_field(struct phy_device * phydev,const struct nxp_c45_reg_field * reg_field)317 static int nxp_c45_read_reg_field(struct phy_device *phydev,
318 const struct nxp_c45_reg_field *reg_field)
319 {
320 u16 mask;
321 int ret;
322
323 if (reg_field->size == 0) {
324 phydev_err(phydev, "Trying to read a reg field of size 0.\n");
325 return -EINVAL;
326 }
327
328 ret = phy_read_mmd(phydev, reg_field->devad, reg_field->reg);
329 if (ret < 0)
330 return ret;
331
332 mask = reg_field->size == 1 ? BIT(reg_field->offset) :
333 GENMASK(reg_field->offset + reg_field->size - 1,
334 reg_field->offset);
335 ret &= mask;
336 ret >>= reg_field->offset;
337
338 return ret;
339 }
340
nxp_c45_write_reg_field(struct phy_device * phydev,const struct nxp_c45_reg_field * reg_field,u16 val)341 static int nxp_c45_write_reg_field(struct phy_device *phydev,
342 const struct nxp_c45_reg_field *reg_field,
343 u16 val)
344 {
345 u16 mask;
346 u16 set;
347
348 if (reg_field->size == 0) {
349 phydev_err(phydev, "Trying to write a reg field of size 0.\n");
350 return -EINVAL;
351 }
352
353 mask = reg_field->size == 1 ? BIT(reg_field->offset) :
354 GENMASK(reg_field->offset + reg_field->size - 1,
355 reg_field->offset);
356 set = val << reg_field->offset;
357
358 return phy_modify_mmd_changed(phydev, reg_field->devad,
359 reg_field->reg, mask, set);
360 }
361
nxp_c45_set_reg_field(struct phy_device * phydev,const struct nxp_c45_reg_field * reg_field)362 static int nxp_c45_set_reg_field(struct phy_device *phydev,
363 const struct nxp_c45_reg_field *reg_field)
364 {
365 if (reg_field->size != 1) {
366 phydev_err(phydev, "Trying to set a reg field of size different than 1.\n");
367 return -EINVAL;
368 }
369
370 return nxp_c45_write_reg_field(phydev, reg_field, 1);
371 }
372
nxp_c45_clear_reg_field(struct phy_device * phydev,const struct nxp_c45_reg_field * reg_field)373 static int nxp_c45_clear_reg_field(struct phy_device *phydev,
374 const struct nxp_c45_reg_field *reg_field)
375 {
376 if (reg_field->size != 1) {
377 phydev_err(phydev, "Trying to set a reg field of size different than 1.\n");
378 return -EINVAL;
379 }
380
381 return nxp_c45_write_reg_field(phydev, reg_field, 0);
382 }
383
nxp_c45_poll_txts(struct phy_device * phydev)384 static bool nxp_c45_poll_txts(struct phy_device *phydev)
385 {
386 return phydev->irq <= 0;
387 }
388
_nxp_c45_ptp_gettimex64(struct ptp_clock_info * ptp,struct timespec64 * ts,struct ptp_system_timestamp * sts)389 static int _nxp_c45_ptp_gettimex64(struct ptp_clock_info *ptp,
390 struct timespec64 *ts,
391 struct ptp_system_timestamp *sts)
392 {
393 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
394 const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
395
396 nxp_c45_set_reg_field(priv->phydev, ®map->ltc_read);
397 ts->tv_nsec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
398 regmap->vend1_ltc_rd_nsec_0);
399 ts->tv_nsec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
400 regmap->vend1_ltc_rd_nsec_1) << 16;
401 ts->tv_sec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
402 regmap->vend1_ltc_rd_sec_0);
403 ts->tv_sec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
404 regmap->vend1_ltc_rd_sec_1) << 16;
405
406 return 0;
407 }
408
nxp_c45_ptp_gettimex64(struct ptp_clock_info * ptp,struct timespec64 * ts,struct ptp_system_timestamp * sts)409 static int nxp_c45_ptp_gettimex64(struct ptp_clock_info *ptp,
410 struct timespec64 *ts,
411 struct ptp_system_timestamp *sts)
412 {
413 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
414
415 mutex_lock(&priv->ptp_lock);
416 _nxp_c45_ptp_gettimex64(ptp, ts, sts);
417 mutex_unlock(&priv->ptp_lock);
418
419 return 0;
420 }
421
_nxp_c45_ptp_settime64(struct ptp_clock_info * ptp,const struct timespec64 * ts)422 static int _nxp_c45_ptp_settime64(struct ptp_clock_info *ptp,
423 const struct timespec64 *ts)
424 {
425 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
426 const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
427
428 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_nsec_0,
429 ts->tv_nsec);
430 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_nsec_1,
431 ts->tv_nsec >> 16);
432 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_sec_0,
433 ts->tv_sec);
434 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_sec_1,
435 ts->tv_sec >> 16);
436 nxp_c45_set_reg_field(priv->phydev, ®map->ltc_write);
437
438 return 0;
439 }
440
nxp_c45_ptp_settime64(struct ptp_clock_info * ptp,const struct timespec64 * ts)441 static int nxp_c45_ptp_settime64(struct ptp_clock_info *ptp,
442 const struct timespec64 *ts)
443 {
444 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
445
446 mutex_lock(&priv->ptp_lock);
447 _nxp_c45_ptp_settime64(ptp, ts);
448 mutex_unlock(&priv->ptp_lock);
449
450 return 0;
451 }
452
nxp_c45_ptp_adjfine(struct ptp_clock_info * ptp,long scaled_ppm)453 static int nxp_c45_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
454 {
455 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
456 const struct nxp_c45_phy_data *data = nxp_c45_get_data(priv->phydev);
457 const struct nxp_c45_regmap *regmap = data->regmap;
458 s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
459 u64 subns_inc_val;
460 bool inc;
461
462 mutex_lock(&priv->ptp_lock);
463 inc = ppb >= 0;
464 ppb = abs(ppb);
465
466 subns_inc_val = PPM_TO_SUBNS_INC(ppb, data->ptp_clk_period);
467
468 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1,
469 regmap->vend1_rate_adj_subns_0,
470 subns_inc_val);
471 subns_inc_val >>= 16;
472 subns_inc_val |= CLK_RATE_ADJ_LD;
473 if (inc)
474 subns_inc_val |= CLK_RATE_ADJ_DIR;
475
476 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1,
477 regmap->vend1_rate_adj_subns_1,
478 subns_inc_val);
479 mutex_unlock(&priv->ptp_lock);
480
481 return 0;
482 }
483
nxp_c45_ptp_adjtime(struct ptp_clock_info * ptp,s64 delta)484 static int nxp_c45_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
485 {
486 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
487 struct timespec64 now, then;
488
489 mutex_lock(&priv->ptp_lock);
490 then = ns_to_timespec64(delta);
491 _nxp_c45_ptp_gettimex64(ptp, &now, NULL);
492 now = timespec64_add(now, then);
493 _nxp_c45_ptp_settime64(ptp, &now);
494 mutex_unlock(&priv->ptp_lock);
495
496 return 0;
497 }
498
nxp_c45_reconstruct_ts(struct timespec64 * ts,struct nxp_c45_hwts * hwts)499 static void nxp_c45_reconstruct_ts(struct timespec64 *ts,
500 struct nxp_c45_hwts *hwts)
501 {
502 ts->tv_nsec = hwts->nsec;
503 if ((ts->tv_sec & TS_SEC_MASK) < (hwts->sec & TS_SEC_MASK))
504 ts->tv_sec -= TS_SEC_MASK + 1;
505 ts->tv_sec &= ~TS_SEC_MASK;
506 ts->tv_sec |= hwts->sec & TS_SEC_MASK;
507 }
508
nxp_c45_match_ts(struct ptp_header * header,struct nxp_c45_hwts * hwts,unsigned int type)509 static bool nxp_c45_match_ts(struct ptp_header *header,
510 struct nxp_c45_hwts *hwts,
511 unsigned int type)
512 {
513 return ntohs(header->sequence_id) == hwts->sequence_id &&
514 ptp_get_msgtype(header, type) == hwts->msg_type &&
515 header->domain_number == hwts->domain_number;
516 }
517
nxp_c45_get_extts(struct nxp_c45_phy * priv,struct timespec64 * extts)518 static bool nxp_c45_get_extts(struct nxp_c45_phy *priv,
519 struct timespec64 *extts)
520 {
521 const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
522
523 extts->tv_nsec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
524 regmap->vend1_ext_trg_data_0);
525 extts->tv_nsec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
526 regmap->vend1_ext_trg_data_1) << 16;
527 extts->tv_sec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
528 regmap->vend1_ext_trg_data_2);
529 extts->tv_sec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
530 regmap->vend1_ext_trg_data_3) << 16;
531 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1,
532 regmap->vend1_ext_trg_ctrl, RING_DONE);
533
534 return true;
535 }
536
tja1120_extts_is_valid(struct phy_device * phydev)537 static bool tja1120_extts_is_valid(struct phy_device *phydev)
538 {
539 bool valid;
540 int reg;
541
542 reg = phy_read_mmd(phydev, MDIO_MMD_VEND1,
543 TJA1120_VEND1_PTP_TRIG_DATA_S);
544 valid = !!(reg & TJA1120_TS_VALID);
545
546 return valid;
547 }
548
tja1120_get_extts(struct nxp_c45_phy * priv,struct timespec64 * extts)549 static bool tja1120_get_extts(struct nxp_c45_phy *priv,
550 struct timespec64 *extts)
551 {
552 const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
553 struct phy_device *phydev = priv->phydev;
554 bool more_ts;
555 bool valid;
556 u16 reg;
557
558 reg = phy_read_mmd(phydev, MDIO_MMD_VEND1,
559 regmap->vend1_ext_trg_ctrl);
560 more_ts = !!(reg & TJA1120_MORE_TS);
561
562 valid = tja1120_extts_is_valid(phydev);
563 if (!valid) {
564 if (!more_ts)
565 goto tja1120_get_extts_out;
566
567 /* Bug workaround for TJA1120 engineering samples: move the new
568 * timestamp from the FIFO to the buffer.
569 */
570 phy_write_mmd(phydev, MDIO_MMD_VEND1,
571 regmap->vend1_ext_trg_ctrl, RING_DONE);
572 valid = tja1120_extts_is_valid(phydev);
573 if (!valid)
574 goto tja1120_get_extts_out;
575 }
576
577 nxp_c45_get_extts(priv, extts);
578 tja1120_get_extts_out:
579 return valid;
580 }
581
nxp_c45_read_egress_ts(struct nxp_c45_phy * priv,struct nxp_c45_hwts * hwts)582 static void nxp_c45_read_egress_ts(struct nxp_c45_phy *priv,
583 struct nxp_c45_hwts *hwts)
584 {
585 const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
586 struct phy_device *phydev = priv->phydev;
587
588 hwts->domain_number =
589 nxp_c45_read_reg_field(phydev, ®map->domain_number);
590 hwts->msg_type =
591 nxp_c45_read_reg_field(phydev, ®map->msg_type);
592 hwts->sequence_id =
593 nxp_c45_read_reg_field(phydev, ®map->sequence_id);
594 hwts->nsec =
595 nxp_c45_read_reg_field(phydev, ®map->nsec_15_0);
596 hwts->nsec |=
597 nxp_c45_read_reg_field(phydev, ®map->nsec_29_16) << 16;
598 hwts->sec = nxp_c45_read_reg_field(phydev, ®map->sec_1_0);
599 hwts->sec |= nxp_c45_read_reg_field(phydev, ®map->sec_4_2) << 2;
600 }
601
nxp_c45_get_hwtxts(struct nxp_c45_phy * priv,struct nxp_c45_hwts * hwts)602 static bool nxp_c45_get_hwtxts(struct nxp_c45_phy *priv,
603 struct nxp_c45_hwts *hwts)
604 {
605 bool valid;
606 u16 reg;
607
608 mutex_lock(&priv->ptp_lock);
609 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EGR_RING_CTRL,
610 RING_DONE);
611 reg = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EGR_RING_DATA_0);
612 valid = !!(reg & RING_DATA_0_TS_VALID);
613 if (!valid)
614 goto nxp_c45_get_hwtxts_out;
615
616 nxp_c45_read_egress_ts(priv, hwts);
617 nxp_c45_get_hwtxts_out:
618 mutex_unlock(&priv->ptp_lock);
619 return valid;
620 }
621
tja1120_egress_ts_is_valid(struct phy_device * phydev)622 static bool tja1120_egress_ts_is_valid(struct phy_device *phydev)
623 {
624 bool valid;
625 u16 reg;
626
627 reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, TJA1120_EGRESS_TS_DATA_S);
628 valid = !!(reg & TJA1120_TS_VALID);
629
630 return valid;
631 }
632
tja1120_get_hwtxts(struct nxp_c45_phy * priv,struct nxp_c45_hwts * hwts)633 static bool tja1120_get_hwtxts(struct nxp_c45_phy *priv,
634 struct nxp_c45_hwts *hwts)
635 {
636 struct phy_device *phydev = priv->phydev;
637 bool more_ts;
638 bool valid;
639 u16 reg;
640
641 mutex_lock(&priv->ptp_lock);
642 reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, TJA1120_EGRESS_TS_END);
643 more_ts = !!(reg & TJA1120_MORE_TS);
644 valid = tja1120_egress_ts_is_valid(phydev);
645 if (!valid) {
646 if (!more_ts)
647 goto tja1120_get_hwtxts_out;
648
649 /* Bug workaround for TJA1120 engineering samples: move the
650 * new timestamp from the FIFO to the buffer.
651 */
652 phy_write_mmd(phydev, MDIO_MMD_VEND1,
653 TJA1120_EGRESS_TS_END, TJA1120_TS_VALID);
654 valid = tja1120_egress_ts_is_valid(phydev);
655 if (!valid)
656 goto tja1120_get_hwtxts_out;
657 }
658 nxp_c45_read_egress_ts(priv, hwts);
659 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, TJA1120_EGRESS_TS_DATA_S,
660 TJA1120_TS_VALID);
661 tja1120_get_hwtxts_out:
662 mutex_unlock(&priv->ptp_lock);
663 return valid;
664 }
665
nxp_c45_process_txts(struct nxp_c45_phy * priv,struct nxp_c45_hwts * txts)666 static void nxp_c45_process_txts(struct nxp_c45_phy *priv,
667 struct nxp_c45_hwts *txts)
668 {
669 struct sk_buff *skb, *tmp, *skb_match = NULL;
670 struct skb_shared_hwtstamps shhwtstamps;
671 struct timespec64 ts;
672 unsigned long flags;
673 bool ts_match;
674 s64 ts_ns;
675
676 spin_lock_irqsave(&priv->tx_queue.lock, flags);
677 skb_queue_walk_safe(&priv->tx_queue, skb, tmp) {
678 ts_match = nxp_c45_match_ts(NXP_C45_SKB_CB(skb)->header, txts,
679 NXP_C45_SKB_CB(skb)->type);
680 if (!ts_match)
681 continue;
682 skb_match = skb;
683 __skb_unlink(skb, &priv->tx_queue);
684 break;
685 }
686 spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
687
688 if (skb_match) {
689 nxp_c45_ptp_gettimex64(&priv->caps, &ts, NULL);
690 nxp_c45_reconstruct_ts(&ts, txts);
691 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
692 ts_ns = timespec64_to_ns(&ts);
693 shhwtstamps.hwtstamp = ns_to_ktime(ts_ns);
694 skb_complete_tx_timestamp(skb_match, &shhwtstamps);
695 } else {
696 phydev_warn(priv->phydev,
697 "the tx timestamp doesn't match with any skb\n");
698 }
699 }
700
nxp_c45_do_aux_work(struct ptp_clock_info * ptp)701 static long nxp_c45_do_aux_work(struct ptp_clock_info *ptp)
702 {
703 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
704 const struct nxp_c45_phy_data *data = nxp_c45_get_data(priv->phydev);
705 bool poll_txts = nxp_c45_poll_txts(priv->phydev);
706 struct skb_shared_hwtstamps *shhwtstamps_rx;
707 struct ptp_clock_event event;
708 struct nxp_c45_hwts hwts;
709 bool reschedule = false;
710 struct timespec64 ts;
711 struct sk_buff *skb;
712 bool ts_valid;
713 u32 ts_raw;
714
715 while (!skb_queue_empty_lockless(&priv->tx_queue) && poll_txts) {
716 ts_valid = data->get_egressts(priv, &hwts);
717 if (unlikely(!ts_valid)) {
718 /* Still more skbs in the queue */
719 reschedule = true;
720 break;
721 }
722
723 nxp_c45_process_txts(priv, &hwts);
724 }
725
726 while ((skb = skb_dequeue(&priv->rx_queue)) != NULL) {
727 nxp_c45_ptp_gettimex64(&priv->caps, &ts, NULL);
728 ts_raw = __be32_to_cpu(NXP_C45_SKB_CB(skb)->header->reserved2);
729 hwts.sec = ts_raw >> 30;
730 hwts.nsec = ts_raw & GENMASK(29, 0);
731 nxp_c45_reconstruct_ts(&ts, &hwts);
732 shhwtstamps_rx = skb_hwtstamps(skb);
733 shhwtstamps_rx->hwtstamp = ns_to_ktime(timespec64_to_ns(&ts));
734 NXP_C45_SKB_CB(skb)->header->reserved2 = 0;
735 netif_rx(skb);
736 }
737
738 if (priv->extts) {
739 ts_valid = data->get_extts(priv, &ts);
740 if (ts_valid && timespec64_compare(&ts, &priv->extts_ts) != 0) {
741 priv->extts_ts = ts;
742 event.index = priv->extts_index;
743 event.type = PTP_CLOCK_EXTTS;
744 event.timestamp = ns_to_ktime(timespec64_to_ns(&ts));
745 ptp_clock_event(priv->ptp_clock, &event);
746 }
747 reschedule = true;
748 }
749
750 return reschedule ? 1 : -1;
751 }
752
nxp_c45_gpio_config(struct nxp_c45_phy * priv,int pin,u16 pin_cfg)753 static void nxp_c45_gpio_config(struct nxp_c45_phy *priv,
754 int pin, u16 pin_cfg)
755 {
756 struct phy_device *phydev = priv->phydev;
757
758 phy_write_mmd(phydev, MDIO_MMD_VEND1,
759 VEND1_GPIO_FUNC_CONFIG_BASE + pin, pin_cfg);
760 }
761
nxp_c45_perout_enable(struct nxp_c45_phy * priv,struct ptp_perout_request * perout,int on)762 static int nxp_c45_perout_enable(struct nxp_c45_phy *priv,
763 struct ptp_perout_request *perout, int on)
764 {
765 const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
766 struct phy_device *phydev = priv->phydev;
767 int pin;
768
769 if (perout->flags & ~PTP_PEROUT_PHASE)
770 return -EOPNOTSUPP;
771
772 pin = ptp_find_pin(priv->ptp_clock, PTP_PF_PEROUT, perout->index);
773 if (pin < 0)
774 return pin;
775
776 if (!on) {
777 nxp_c45_clear_reg_field(priv->phydev,
778 ®map->pps_enable);
779 nxp_c45_clear_reg_field(priv->phydev,
780 ®map->pps_polarity);
781
782 nxp_c45_gpio_config(priv, pin, GPIO_DISABLE);
783
784 return 0;
785 }
786
787 /* The PPS signal is fixed to 1 second and is always generated when the
788 * seconds counter is incremented. The start time is not configurable.
789 * If the clock is adjusted, the PPS signal is automatically readjusted.
790 */
791 if (perout->period.sec != 1 || perout->period.nsec != 0) {
792 phydev_warn(phydev, "The period can be set only to 1 second.");
793 return -EINVAL;
794 }
795
796 if (!(perout->flags & PTP_PEROUT_PHASE)) {
797 if (perout->start.sec != 0 || perout->start.nsec != 0) {
798 phydev_warn(phydev, "The start time is not configurable. Should be set to 0 seconds and 0 nanoseconds.");
799 return -EINVAL;
800 }
801 } else {
802 if (perout->phase.nsec != 0 &&
803 perout->phase.nsec != (NSEC_PER_SEC >> 1)) {
804 phydev_warn(phydev, "The phase can be set only to 0 or 500000000 nanoseconds.");
805 return -EINVAL;
806 }
807
808 if (perout->phase.nsec == 0)
809 nxp_c45_clear_reg_field(priv->phydev,
810 ®map->pps_polarity);
811 else
812 nxp_c45_set_reg_field(priv->phydev,
813 ®map->pps_polarity);
814 }
815
816 nxp_c45_gpio_config(priv, pin, GPIO_PPS_OUT_CFG);
817
818 nxp_c45_set_reg_field(priv->phydev, ®map->pps_enable);
819
820 return 0;
821 }
822
nxp_c45_set_rising_or_falling(struct phy_device * phydev,struct ptp_extts_request * extts)823 static void nxp_c45_set_rising_or_falling(struct phy_device *phydev,
824 struct ptp_extts_request *extts)
825 {
826 if (extts->flags & PTP_RISING_EDGE)
827 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
828 VEND1_PTP_CONFIG, EXT_TRG_EDGE);
829
830 if (extts->flags & PTP_FALLING_EDGE)
831 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
832 VEND1_PTP_CONFIG, EXT_TRG_EDGE);
833 }
834
nxp_c45_set_rising_and_falling(struct phy_device * phydev,struct ptp_extts_request * extts)835 static void nxp_c45_set_rising_and_falling(struct phy_device *phydev,
836 struct ptp_extts_request *extts)
837 {
838 /* PTP_EXTTS_REQUEST may have only the PTP_ENABLE_FEATURE flag set. In
839 * this case external ts will be enabled on rising edge.
840 */
841 if (extts->flags & PTP_RISING_EDGE ||
842 extts->flags == PTP_ENABLE_FEATURE)
843 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
844 TJA1120_SYNC_TRIG_FILTER,
845 PTP_TRIG_RISE_TS);
846 else
847 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
848 TJA1120_SYNC_TRIG_FILTER,
849 PTP_TRIG_RISE_TS);
850
851 if (extts->flags & PTP_FALLING_EDGE)
852 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
853 TJA1120_SYNC_TRIG_FILTER,
854 PTP_TRIG_FALLING_TS);
855 else
856 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
857 TJA1120_SYNC_TRIG_FILTER,
858 PTP_TRIG_FALLING_TS);
859 }
860
nxp_c45_extts_enable(struct nxp_c45_phy * priv,struct ptp_extts_request * extts,int on)861 static int nxp_c45_extts_enable(struct nxp_c45_phy *priv,
862 struct ptp_extts_request *extts, int on)
863 {
864 const struct nxp_c45_phy_data *data = nxp_c45_get_data(priv->phydev);
865 int pin;
866
867 if (extts->flags & ~(PTP_ENABLE_FEATURE |
868 PTP_RISING_EDGE |
869 PTP_FALLING_EDGE |
870 PTP_STRICT_FLAGS))
871 return -EOPNOTSUPP;
872
873 /* Sampling on both edges is not supported */
874 if ((extts->flags & PTP_RISING_EDGE) &&
875 (extts->flags & PTP_FALLING_EDGE) &&
876 !data->ext_ts_both_edges)
877 return -EOPNOTSUPP;
878
879 pin = ptp_find_pin(priv->ptp_clock, PTP_PF_EXTTS, extts->index);
880 if (pin < 0)
881 return pin;
882
883 if (!on) {
884 nxp_c45_gpio_config(priv, pin, GPIO_DISABLE);
885 priv->extts = false;
886
887 return 0;
888 }
889
890 if (data->ext_ts_both_edges)
891 nxp_c45_set_rising_and_falling(priv->phydev, extts);
892 else
893 nxp_c45_set_rising_or_falling(priv->phydev, extts);
894
895 nxp_c45_gpio_config(priv, pin, GPIO_EXTTS_OUT_CFG);
896 priv->extts = true;
897 priv->extts_index = extts->index;
898 ptp_schedule_worker(priv->ptp_clock, 0);
899
900 return 0;
901 }
902
nxp_c45_ptp_enable(struct ptp_clock_info * ptp,struct ptp_clock_request * req,int on)903 static int nxp_c45_ptp_enable(struct ptp_clock_info *ptp,
904 struct ptp_clock_request *req, int on)
905 {
906 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
907
908 switch (req->type) {
909 case PTP_CLK_REQ_EXTTS:
910 return nxp_c45_extts_enable(priv, &req->extts, on);
911 case PTP_CLK_REQ_PEROUT:
912 return nxp_c45_perout_enable(priv, &req->perout, on);
913 default:
914 return -EOPNOTSUPP;
915 }
916 }
917
918 static struct ptp_pin_desc nxp_c45_ptp_pins[] = {
919 { "nxp_c45_gpio0", 0, PTP_PF_NONE},
920 { "nxp_c45_gpio1", 1, PTP_PF_NONE},
921 { "nxp_c45_gpio2", 2, PTP_PF_NONE},
922 { "nxp_c45_gpio3", 3, PTP_PF_NONE},
923 { "nxp_c45_gpio4", 4, PTP_PF_NONE},
924 { "nxp_c45_gpio5", 5, PTP_PF_NONE},
925 { "nxp_c45_gpio6", 6, PTP_PF_NONE},
926 { "nxp_c45_gpio7", 7, PTP_PF_NONE},
927 { "nxp_c45_gpio8", 8, PTP_PF_NONE},
928 { "nxp_c45_gpio9", 9, PTP_PF_NONE},
929 { "nxp_c45_gpio10", 10, PTP_PF_NONE},
930 { "nxp_c45_gpio11", 11, PTP_PF_NONE},
931 };
932
nxp_c45_ptp_verify_pin(struct ptp_clock_info * ptp,unsigned int pin,enum ptp_pin_function func,unsigned int chan)933 static int nxp_c45_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
934 enum ptp_pin_function func, unsigned int chan)
935 {
936 if (pin >= ARRAY_SIZE(nxp_c45_ptp_pins))
937 return -EINVAL;
938
939 switch (func) {
940 case PTP_PF_NONE:
941 case PTP_PF_PEROUT:
942 case PTP_PF_EXTTS:
943 break;
944 default:
945 return -EOPNOTSUPP;
946 }
947
948 return 0;
949 }
950
nxp_c45_init_ptp_clock(struct nxp_c45_phy * priv)951 static int nxp_c45_init_ptp_clock(struct nxp_c45_phy *priv)
952 {
953 priv->caps = (struct ptp_clock_info) {
954 .owner = THIS_MODULE,
955 .name = "NXP C45 PHC",
956 .max_adj = 16666666,
957 .adjfine = nxp_c45_ptp_adjfine,
958 .adjtime = nxp_c45_ptp_adjtime,
959 .gettimex64 = nxp_c45_ptp_gettimex64,
960 .settime64 = nxp_c45_ptp_settime64,
961 .enable = nxp_c45_ptp_enable,
962 .verify = nxp_c45_ptp_verify_pin,
963 .do_aux_work = nxp_c45_do_aux_work,
964 .pin_config = nxp_c45_ptp_pins,
965 .n_pins = ARRAY_SIZE(nxp_c45_ptp_pins),
966 .n_ext_ts = 1,
967 .n_per_out = 1,
968 };
969
970 priv->ptp_clock = ptp_clock_register(&priv->caps,
971 &priv->phydev->mdio.dev);
972
973 if (IS_ERR(priv->ptp_clock))
974 return PTR_ERR(priv->ptp_clock);
975
976 if (!priv->ptp_clock)
977 return -ENOMEM;
978
979 return 0;
980 }
981
nxp_c45_txtstamp(struct mii_timestamper * mii_ts,struct sk_buff * skb,int type)982 static void nxp_c45_txtstamp(struct mii_timestamper *mii_ts,
983 struct sk_buff *skb, int type)
984 {
985 struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
986 mii_ts);
987
988 switch (priv->hwts_tx) {
989 case HWTSTAMP_TX_ON:
990 NXP_C45_SKB_CB(skb)->type = type;
991 NXP_C45_SKB_CB(skb)->header = ptp_parse_header(skb, type);
992 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
993 skb_queue_tail(&priv->tx_queue, skb);
994 if (nxp_c45_poll_txts(priv->phydev))
995 ptp_schedule_worker(priv->ptp_clock, 0);
996 break;
997 case HWTSTAMP_TX_OFF:
998 default:
999 kfree_skb(skb);
1000 break;
1001 }
1002 }
1003
nxp_c45_rxtstamp(struct mii_timestamper * mii_ts,struct sk_buff * skb,int type)1004 static bool nxp_c45_rxtstamp(struct mii_timestamper *mii_ts,
1005 struct sk_buff *skb, int type)
1006 {
1007 struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
1008 mii_ts);
1009 struct ptp_header *header = ptp_parse_header(skb, type);
1010
1011 if (!header)
1012 return false;
1013
1014 if (!priv->hwts_rx)
1015 return false;
1016
1017 NXP_C45_SKB_CB(skb)->header = header;
1018 skb_queue_tail(&priv->rx_queue, skb);
1019 ptp_schedule_worker(priv->ptp_clock, 0);
1020
1021 return true;
1022 }
1023
nxp_c45_hwtstamp(struct mii_timestamper * mii_ts,struct ifreq * ifreq)1024 static int nxp_c45_hwtstamp(struct mii_timestamper *mii_ts,
1025 struct ifreq *ifreq)
1026 {
1027 struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
1028 mii_ts);
1029 struct phy_device *phydev = priv->phydev;
1030 const struct nxp_c45_phy_data *data;
1031 struct hwtstamp_config cfg;
1032
1033 if (copy_from_user(&cfg, ifreq->ifr_data, sizeof(cfg)))
1034 return -EFAULT;
1035
1036 if (cfg.tx_type < 0 || cfg.tx_type > HWTSTAMP_TX_ON)
1037 return -ERANGE;
1038
1039 data = nxp_c45_get_data(phydev);
1040 priv->hwts_tx = cfg.tx_type;
1041
1042 switch (cfg.rx_filter) {
1043 case HWTSTAMP_FILTER_NONE:
1044 priv->hwts_rx = 0;
1045 break;
1046 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1047 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1048 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1049 priv->hwts_rx = 1;
1050 cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
1051 break;
1052 default:
1053 return -ERANGE;
1054 }
1055
1056 if (priv->hwts_rx || priv->hwts_tx) {
1057 phy_write_mmd(phydev, MDIO_MMD_VEND1,
1058 data->regmap->vend1_event_msg_filt,
1059 EVENT_MSG_FILT_ALL);
1060 data->ptp_enable(phydev, true);
1061 } else {
1062 phy_write_mmd(phydev, MDIO_MMD_VEND1,
1063 data->regmap->vend1_event_msg_filt,
1064 EVENT_MSG_FILT_NONE);
1065 data->ptp_enable(phydev, false);
1066 }
1067
1068 if (nxp_c45_poll_txts(priv->phydev))
1069 goto nxp_c45_no_ptp_irq;
1070
1071 if (priv->hwts_tx)
1072 nxp_c45_set_reg_field(phydev, &data->regmap->irq_egr_ts_en);
1073 else
1074 nxp_c45_clear_reg_field(phydev, &data->regmap->irq_egr_ts_en);
1075
1076 nxp_c45_no_ptp_irq:
1077 return copy_to_user(ifreq->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
1078 }
1079
nxp_c45_ts_info(struct mii_timestamper * mii_ts,struct ethtool_ts_info * ts_info)1080 static int nxp_c45_ts_info(struct mii_timestamper *mii_ts,
1081 struct ethtool_ts_info *ts_info)
1082 {
1083 struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
1084 mii_ts);
1085
1086 ts_info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
1087 SOF_TIMESTAMPING_RX_HARDWARE |
1088 SOF_TIMESTAMPING_RAW_HARDWARE;
1089 ts_info->phc_index = ptp_clock_index(priv->ptp_clock);
1090 ts_info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
1091 ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
1092 (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
1093 (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
1094 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT);
1095
1096 return 0;
1097 }
1098
1099 static const struct nxp_c45_phy_stats common_hw_stats[] = {
1100 { "phy_link_status_drop_cnt",
1101 NXP_C45_REG_FIELD(0x8352, MDIO_MMD_VEND1, 8, 6), },
1102 { "phy_link_availability_drop_cnt",
1103 NXP_C45_REG_FIELD(0x8352, MDIO_MMD_VEND1, 0, 6), },
1104 { "phy_link_loss_cnt",
1105 NXP_C45_REG_FIELD(0x8353, MDIO_MMD_VEND1, 10, 6), },
1106 { "phy_link_failure_cnt",
1107 NXP_C45_REG_FIELD(0x8353, MDIO_MMD_VEND1, 0, 10), },
1108 { "phy_symbol_error_cnt",
1109 NXP_C45_REG_FIELD(0x8350, MDIO_MMD_VEND1, 0, 16) },
1110 };
1111
1112 static const struct nxp_c45_phy_stats tja1103_hw_stats[] = {
1113 { "rx_preamble_count",
1114 NXP_C45_REG_FIELD(0xAFCE, MDIO_MMD_VEND1, 0, 6), },
1115 { "tx_preamble_count",
1116 NXP_C45_REG_FIELD(0xAFCF, MDIO_MMD_VEND1, 0, 6), },
1117 { "rx_ipg_length",
1118 NXP_C45_REG_FIELD(0xAFD0, MDIO_MMD_VEND1, 0, 9), },
1119 { "tx_ipg_length",
1120 NXP_C45_REG_FIELD(0xAFD1, MDIO_MMD_VEND1, 0, 9), },
1121 };
1122
1123 static const struct nxp_c45_phy_stats tja1120_hw_stats[] = {
1124 { "phy_symbol_error_cnt_ext",
1125 NXP_C45_REG_FIELD(0x8351, MDIO_MMD_VEND1, 0, 14) },
1126 { "tx_frames_xtd",
1127 NXP_C45_REG_FIELD(0xACA1, MDIO_MMD_VEND1, 0, 8), },
1128 { "tx_frames",
1129 NXP_C45_REG_FIELD(0xACA0, MDIO_MMD_VEND1, 0, 16), },
1130 { "rx_frames_xtd",
1131 NXP_C45_REG_FIELD(0xACA3, MDIO_MMD_VEND1, 0, 8), },
1132 { "rx_frames",
1133 NXP_C45_REG_FIELD(0xACA2, MDIO_MMD_VEND1, 0, 16), },
1134 { "tx_lost_frames_xtd",
1135 NXP_C45_REG_FIELD(0xACA5, MDIO_MMD_VEND1, 0, 8), },
1136 { "tx_lost_frames",
1137 NXP_C45_REG_FIELD(0xACA4, MDIO_MMD_VEND1, 0, 16), },
1138 { "rx_lost_frames_xtd",
1139 NXP_C45_REG_FIELD(0xACA7, MDIO_MMD_VEND1, 0, 8), },
1140 { "rx_lost_frames",
1141 NXP_C45_REG_FIELD(0xACA6, MDIO_MMD_VEND1, 0, 16), },
1142 };
1143
nxp_c45_get_sset_count(struct phy_device * phydev)1144 static int nxp_c45_get_sset_count(struct phy_device *phydev)
1145 {
1146 const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev);
1147
1148 return ARRAY_SIZE(common_hw_stats) + (phy_data ? phy_data->n_stats : 0);
1149 }
1150
nxp_c45_get_strings(struct phy_device * phydev,u8 * data)1151 static void nxp_c45_get_strings(struct phy_device *phydev, u8 *data)
1152 {
1153 const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev);
1154 size_t count = nxp_c45_get_sset_count(phydev);
1155 size_t idx;
1156 size_t i;
1157
1158 for (i = 0; i < count; i++) {
1159 if (i < ARRAY_SIZE(common_hw_stats)) {
1160 strscpy(data + i * ETH_GSTRING_LEN,
1161 common_hw_stats[i].name, ETH_GSTRING_LEN);
1162 continue;
1163 }
1164 idx = i - ARRAY_SIZE(common_hw_stats);
1165 strscpy(data + i * ETH_GSTRING_LEN,
1166 phy_data->stats[idx].name, ETH_GSTRING_LEN);
1167 }
1168 }
1169
nxp_c45_get_stats(struct phy_device * phydev,struct ethtool_stats * stats,u64 * data)1170 static void nxp_c45_get_stats(struct phy_device *phydev,
1171 struct ethtool_stats *stats, u64 *data)
1172 {
1173 const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev);
1174 size_t count = nxp_c45_get_sset_count(phydev);
1175 const struct nxp_c45_reg_field *reg_field;
1176 size_t idx;
1177 size_t i;
1178 int ret;
1179
1180 for (i = 0; i < count; i++) {
1181 if (i < ARRAY_SIZE(common_hw_stats)) {
1182 reg_field = &common_hw_stats[i].counter;
1183 } else {
1184 idx = i - ARRAY_SIZE(common_hw_stats);
1185 reg_field = &phy_data->stats[idx].counter;
1186 }
1187
1188 ret = nxp_c45_read_reg_field(phydev, reg_field);
1189 if (ret < 0)
1190 data[i] = U64_MAX;
1191 else
1192 data[i] = ret;
1193 }
1194 }
1195
nxp_c45_config_enable(struct phy_device * phydev)1196 static int nxp_c45_config_enable(struct phy_device *phydev)
1197 {
1198 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONTROL,
1199 DEVICE_CONTROL_CONFIG_GLOBAL_EN |
1200 DEVICE_CONTROL_CONFIG_ALL_EN);
1201 usleep_range(400, 450);
1202
1203 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_CONTROL,
1204 PORT_CONTROL_EN);
1205 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONTROL,
1206 PHY_CONFIG_EN);
1207 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_INFRA_CONTROL,
1208 PORT_INFRA_CONTROL_EN);
1209
1210 return 0;
1211 }
1212
nxp_c45_start_op(struct phy_device * phydev)1213 static int nxp_c45_start_op(struct phy_device *phydev)
1214 {
1215 return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONTROL,
1216 PHY_START_OP);
1217 }
1218
nxp_c45_config_intr(struct phy_device * phydev)1219 static int nxp_c45_config_intr(struct phy_device *phydev)
1220 {
1221 if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
1222 return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1223 VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT);
1224 else
1225 return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1226 VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT);
1227 }
1228
tja1103_config_intr(struct phy_device * phydev)1229 static int tja1103_config_intr(struct phy_device *phydev)
1230 {
1231 int ret;
1232
1233 /* We can't disable the FUSA IRQ for TJA1103, but we can clean it up. */
1234 ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_ALWAYS_ACCESSIBLE,
1235 FUSA_PASS);
1236 if (ret)
1237 return ret;
1238
1239 return nxp_c45_config_intr(phydev);
1240 }
1241
tja1120_config_intr(struct phy_device * phydev)1242 static int tja1120_config_intr(struct phy_device *phydev)
1243 {
1244 int ret;
1245
1246 if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
1247 ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1248 TJA1120_GLOBAL_INFRA_IRQ_EN,
1249 TJA1120_DEV_BOOT_DONE);
1250 else
1251 ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1252 TJA1120_GLOBAL_INFRA_IRQ_EN,
1253 TJA1120_DEV_BOOT_DONE);
1254 if (ret)
1255 return ret;
1256
1257 return nxp_c45_config_intr(phydev);
1258 }
1259
nxp_c45_handle_interrupt(struct phy_device * phydev)1260 static irqreturn_t nxp_c45_handle_interrupt(struct phy_device *phydev)
1261 {
1262 const struct nxp_c45_phy_data *data = nxp_c45_get_data(phydev);
1263 struct nxp_c45_phy *priv = phydev->priv;
1264 irqreturn_t ret = IRQ_NONE;
1265 struct nxp_c45_hwts hwts;
1266 int irq;
1267
1268 irq = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_IRQ_STATUS);
1269 if (irq & PHY_IRQ_LINK_EVENT) {
1270 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_IRQ_ACK,
1271 PHY_IRQ_LINK_EVENT);
1272 phy_trigger_machine(phydev);
1273 ret = IRQ_HANDLED;
1274 }
1275
1276 irq = nxp_c45_read_reg_field(phydev, &data->regmap->irq_egr_ts_status);
1277 if (irq) {
1278 /* If ack_ptp_irq is false, the IRQ bit is self-clear and will
1279 * be cleared when the EGR TS FIFO is empty. Otherwise, the
1280 * IRQ bit should be cleared before reading the timestamp,
1281 */
1282 if (data->ack_ptp_irq)
1283 phy_write_mmd(phydev, MDIO_MMD_VEND1,
1284 VEND1_PTP_IRQ_ACK, EGR_TS_IRQ);
1285 while (data->get_egressts(priv, &hwts))
1286 nxp_c45_process_txts(priv, &hwts);
1287
1288 ret = IRQ_HANDLED;
1289 }
1290
1291 data->nmi_handler(phydev, &ret);
1292
1293 return ret;
1294 }
1295
nxp_c45_soft_reset(struct phy_device * phydev)1296 static int nxp_c45_soft_reset(struct phy_device *phydev)
1297 {
1298 int ret;
1299
1300 ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONTROL,
1301 DEVICE_CONTROL_RESET);
1302 if (ret)
1303 return ret;
1304
1305 return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
1306 VEND1_DEVICE_CONTROL, ret,
1307 !(ret & DEVICE_CONTROL_RESET), 20000,
1308 240000, false);
1309 }
1310
nxp_c45_cable_test_start(struct phy_device * phydev)1311 static int nxp_c45_cable_test_start(struct phy_device *phydev)
1312 {
1313 const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(phydev);
1314
1315 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1316 VEND1_PORT_FUNC_ENABLES, PHY_TEST_ENABLE);
1317 return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, regmap->cable_test,
1318 CABLE_TEST_ENABLE | CABLE_TEST_START);
1319 }
1320
nxp_c45_cable_test_get_status(struct phy_device * phydev,bool * finished)1321 static int nxp_c45_cable_test_get_status(struct phy_device *phydev,
1322 bool *finished)
1323 {
1324 const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(phydev);
1325 int ret;
1326 u8 cable_test_result;
1327
1328 ret = nxp_c45_read_reg_field(phydev, ®map->cable_test_valid);
1329 if (!ret) {
1330 *finished = false;
1331 return 0;
1332 }
1333
1334 *finished = true;
1335 cable_test_result = nxp_c45_read_reg_field(phydev,
1336 ®map->cable_test_result);
1337
1338 switch (cable_test_result) {
1339 case CABLE_TEST_OK:
1340 ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
1341 ETHTOOL_A_CABLE_RESULT_CODE_OK);
1342 break;
1343 case CABLE_TEST_SHORTED:
1344 ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
1345 ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT);
1346 break;
1347 case CABLE_TEST_OPEN:
1348 ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
1349 ETHTOOL_A_CABLE_RESULT_CODE_OPEN);
1350 break;
1351 default:
1352 ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
1353 ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC);
1354 }
1355
1356 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, regmap->cable_test,
1357 CABLE_TEST_ENABLE);
1358 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1359 VEND1_PORT_FUNC_ENABLES, PHY_TEST_ENABLE);
1360
1361 return nxp_c45_start_op(phydev);
1362 }
1363
nxp_c45_get_sqi(struct phy_device * phydev)1364 static int nxp_c45_get_sqi(struct phy_device *phydev)
1365 {
1366 int reg;
1367
1368 reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_SIGNAL_QUALITY);
1369 if (!(reg & SQI_VALID))
1370 return -EINVAL;
1371
1372 reg &= SQI_MASK;
1373
1374 return reg;
1375 }
1376
tja1120_link_change_notify(struct phy_device * phydev)1377 static void tja1120_link_change_notify(struct phy_device *phydev)
1378 {
1379 /* Bug workaround for TJA1120 enegineering samples: fix egress
1380 * timestamps lost after link recovery.
1381 */
1382 if (phydev->state == PHY_NOLINK) {
1383 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1384 TJA1120_EPHY_RESETS, EPHY_PCS_RESET);
1385 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1386 TJA1120_EPHY_RESETS, EPHY_PCS_RESET);
1387 }
1388 }
1389
nxp_c45_get_sqi_max(struct phy_device * phydev)1390 static int nxp_c45_get_sqi_max(struct phy_device *phydev)
1391 {
1392 return MAX_SQI;
1393 }
1394
nxp_c45_check_delay(struct phy_device * phydev,u32 delay)1395 static int nxp_c45_check_delay(struct phy_device *phydev, u32 delay)
1396 {
1397 if (delay < MIN_ID_PS) {
1398 phydev_err(phydev, "delay value smaller than %u\n", MIN_ID_PS);
1399 return -EINVAL;
1400 }
1401
1402 if (delay > MAX_ID_PS) {
1403 phydev_err(phydev, "delay value higher than %u\n", MAX_ID_PS);
1404 return -EINVAL;
1405 }
1406
1407 return 0;
1408 }
1409
nxp_c45_counters_enable(struct phy_device * phydev)1410 static void nxp_c45_counters_enable(struct phy_device *phydev)
1411 {
1412 const struct nxp_c45_phy_data *data = nxp_c45_get_data(phydev);
1413
1414 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_LINK_DROP_COUNTER,
1415 COUNTER_EN);
1416
1417 data->counters_enable(phydev);
1418 }
1419
nxp_c45_ptp_init(struct phy_device * phydev)1420 static void nxp_c45_ptp_init(struct phy_device *phydev)
1421 {
1422 const struct nxp_c45_phy_data *data = nxp_c45_get_data(phydev);
1423
1424 phy_write_mmd(phydev, MDIO_MMD_VEND1,
1425 data->regmap->vend1_ptp_clk_period,
1426 data->ptp_clk_period);
1427 nxp_c45_clear_reg_field(phydev, &data->regmap->ltc_lock_ctrl);
1428
1429 data->ptp_init(phydev);
1430 }
1431
nxp_c45_get_phase_shift(u64 phase_offset_raw)1432 static u64 nxp_c45_get_phase_shift(u64 phase_offset_raw)
1433 {
1434 /* The delay in degree phase is 73.8 + phase_offset_raw * 0.9.
1435 * To avoid floating point operations we'll multiply by 10
1436 * and get 1 decimal point precision.
1437 */
1438 phase_offset_raw *= 10;
1439 phase_offset_raw -= 738;
1440 return div_u64(phase_offset_raw, 9);
1441 }
1442
nxp_c45_disable_delays(struct phy_device * phydev)1443 static void nxp_c45_disable_delays(struct phy_device *phydev)
1444 {
1445 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID, ID_ENABLE);
1446 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID, ID_ENABLE);
1447 }
1448
nxp_c45_set_delays(struct phy_device * phydev)1449 static void nxp_c45_set_delays(struct phy_device *phydev)
1450 {
1451 struct nxp_c45_phy *priv = phydev->priv;
1452 u64 tx_delay = priv->tx_delay;
1453 u64 rx_delay = priv->rx_delay;
1454 u64 degree;
1455
1456 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1457 phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
1458 degree = div_u64(tx_delay, PS_PER_DEGREE);
1459 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID,
1460 ID_ENABLE | nxp_c45_get_phase_shift(degree));
1461 } else {
1462 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID,
1463 ID_ENABLE);
1464 }
1465
1466 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1467 phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
1468 degree = div_u64(rx_delay, PS_PER_DEGREE);
1469 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID,
1470 ID_ENABLE | nxp_c45_get_phase_shift(degree));
1471 } else {
1472 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID,
1473 ID_ENABLE);
1474 }
1475 }
1476
nxp_c45_get_delays(struct phy_device * phydev)1477 static int nxp_c45_get_delays(struct phy_device *phydev)
1478 {
1479 struct nxp_c45_phy *priv = phydev->priv;
1480 int ret;
1481
1482 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1483 phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
1484 ret = device_property_read_u32(&phydev->mdio.dev,
1485 "tx-internal-delay-ps",
1486 &priv->tx_delay);
1487 if (ret)
1488 priv->tx_delay = DEFAULT_ID_PS;
1489
1490 ret = nxp_c45_check_delay(phydev, priv->tx_delay);
1491 if (ret) {
1492 phydev_err(phydev,
1493 "tx-internal-delay-ps invalid value\n");
1494 return ret;
1495 }
1496 }
1497
1498 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1499 phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
1500 ret = device_property_read_u32(&phydev->mdio.dev,
1501 "rx-internal-delay-ps",
1502 &priv->rx_delay);
1503 if (ret)
1504 priv->rx_delay = DEFAULT_ID_PS;
1505
1506 ret = nxp_c45_check_delay(phydev, priv->rx_delay);
1507 if (ret) {
1508 phydev_err(phydev,
1509 "rx-internal-delay-ps invalid value\n");
1510 return ret;
1511 }
1512 }
1513
1514 return 0;
1515 }
1516
nxp_c45_set_phy_mode(struct phy_device * phydev)1517 static int nxp_c45_set_phy_mode(struct phy_device *phydev)
1518 {
1519 int ret;
1520
1521 ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_ABILITIES);
1522 phydev_dbg(phydev, "Clause 45 managed PHY abilities 0x%x\n", ret);
1523
1524 switch (phydev->interface) {
1525 case PHY_INTERFACE_MODE_RGMII:
1526 if (!(ret & RGMII_ABILITY)) {
1527 phydev_err(phydev, "rgmii mode not supported\n");
1528 return -EINVAL;
1529 }
1530 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1531 MII_BASIC_CONFIG_RGMII);
1532 nxp_c45_disable_delays(phydev);
1533 break;
1534 case PHY_INTERFACE_MODE_RGMII_ID:
1535 case PHY_INTERFACE_MODE_RGMII_TXID:
1536 case PHY_INTERFACE_MODE_RGMII_RXID:
1537 if (!(ret & RGMII_ID_ABILITY)) {
1538 phydev_err(phydev, "rgmii-id, rgmii-txid, rgmii-rxid modes are not supported\n");
1539 return -EINVAL;
1540 }
1541 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1542 MII_BASIC_CONFIG_RGMII);
1543 ret = nxp_c45_get_delays(phydev);
1544 if (ret)
1545 return ret;
1546
1547 nxp_c45_set_delays(phydev);
1548 break;
1549 case PHY_INTERFACE_MODE_MII:
1550 if (!(ret & MII_ABILITY)) {
1551 phydev_err(phydev, "mii mode not supported\n");
1552 return -EINVAL;
1553 }
1554 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1555 MII_BASIC_CONFIG_MII);
1556 break;
1557 case PHY_INTERFACE_MODE_REVMII:
1558 if (!(ret & REVMII_ABILITY)) {
1559 phydev_err(phydev, "rev-mii mode not supported\n");
1560 return -EINVAL;
1561 }
1562 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1563 MII_BASIC_CONFIG_MII | MII_BASIC_CONFIG_REV);
1564 break;
1565 case PHY_INTERFACE_MODE_RMII:
1566 if (!(ret & RMII_ABILITY)) {
1567 phydev_err(phydev, "rmii mode not supported\n");
1568 return -EINVAL;
1569 }
1570 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1571 MII_BASIC_CONFIG_RMII);
1572 break;
1573 case PHY_INTERFACE_MODE_SGMII:
1574 if (!(ret & SGMII_ABILITY)) {
1575 phydev_err(phydev, "sgmii mode not supported\n");
1576 return -EINVAL;
1577 }
1578 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1579 MII_BASIC_CONFIG_SGMII);
1580 break;
1581 case PHY_INTERFACE_MODE_INTERNAL:
1582 break;
1583 default:
1584 return -EINVAL;
1585 }
1586
1587 return 0;
1588 }
1589
nxp_c45_config_init(struct phy_device * phydev)1590 static int nxp_c45_config_init(struct phy_device *phydev)
1591 {
1592 int ret;
1593
1594 ret = nxp_c45_config_enable(phydev);
1595 if (ret) {
1596 phydev_err(phydev, "Failed to enable config\n");
1597 return ret;
1598 }
1599
1600 /* Bug workaround for SJA1110 rev B: enable write access
1601 * to MDIO_MMD_PMAPMD
1602 */
1603 phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 1);
1604 phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 2);
1605
1606 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONFIG,
1607 PHY_CONFIG_AUTO);
1608
1609 ret = nxp_c45_set_phy_mode(phydev);
1610 if (ret)
1611 return ret;
1612
1613 phydev->autoneg = AUTONEG_DISABLE;
1614
1615 nxp_c45_counters_enable(phydev);
1616 nxp_c45_ptp_init(phydev);
1617
1618 return nxp_c45_start_op(phydev);
1619 }
1620
nxp_c45_get_features(struct phy_device * phydev)1621 static int nxp_c45_get_features(struct phy_device *phydev)
1622 {
1623 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, phydev->supported);
1624 linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, phydev->supported);
1625
1626 return genphy_c45_pma_read_abilities(phydev);
1627 }
1628
nxp_c45_probe(struct phy_device * phydev)1629 static int nxp_c45_probe(struct phy_device *phydev)
1630 {
1631 struct nxp_c45_phy *priv;
1632 int ptp_ability;
1633 int ret = 0;
1634
1635 priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
1636 if (!priv)
1637 return -ENOMEM;
1638
1639 skb_queue_head_init(&priv->tx_queue);
1640 skb_queue_head_init(&priv->rx_queue);
1641
1642 priv->phydev = phydev;
1643
1644 phydev->priv = priv;
1645
1646 mutex_init(&priv->ptp_lock);
1647
1648 ptp_ability = phy_read_mmd(phydev, MDIO_MMD_VEND1,
1649 VEND1_PORT_ABILITIES);
1650 ptp_ability = !!(ptp_ability & PTP_ABILITY);
1651 if (!ptp_ability) {
1652 phydev_dbg(phydev, "the phy does not support PTP");
1653 goto no_ptp_support;
1654 }
1655
1656 if (IS_ENABLED(CONFIG_PTP_1588_CLOCK) &&
1657 IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING)) {
1658 priv->mii_ts.rxtstamp = nxp_c45_rxtstamp;
1659 priv->mii_ts.txtstamp = nxp_c45_txtstamp;
1660 priv->mii_ts.hwtstamp = nxp_c45_hwtstamp;
1661 priv->mii_ts.ts_info = nxp_c45_ts_info;
1662 phydev->mii_ts = &priv->mii_ts;
1663 ret = nxp_c45_init_ptp_clock(priv);
1664 } else {
1665 phydev_dbg(phydev, "PTP support not enabled even if the phy supports it");
1666 }
1667
1668 no_ptp_support:
1669
1670 return ret;
1671 }
1672
nxp_c45_remove(struct phy_device * phydev)1673 static void nxp_c45_remove(struct phy_device *phydev)
1674 {
1675 struct nxp_c45_phy *priv = phydev->priv;
1676
1677 if (priv->ptp_clock)
1678 ptp_clock_unregister(priv->ptp_clock);
1679
1680 skb_queue_purge(&priv->tx_queue);
1681 skb_queue_purge(&priv->rx_queue);
1682 }
1683
tja1103_counters_enable(struct phy_device * phydev)1684 static void tja1103_counters_enable(struct phy_device *phydev)
1685 {
1686 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_PREAMBLE_COUNT,
1687 COUNTER_EN);
1688 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TX_PREAMBLE_COUNT,
1689 COUNTER_EN);
1690 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_IPG_LENGTH,
1691 COUNTER_EN);
1692 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TX_IPG_LENGTH,
1693 COUNTER_EN);
1694 }
1695
tja1103_ptp_init(struct phy_device * phydev)1696 static void tja1103_ptp_init(struct phy_device *phydev)
1697 {
1698 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_TS_INSRT_CTRL,
1699 TJA1103_RX_TS_INSRT_MODE2);
1700 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_FUNC_ENABLES,
1701 PTP_ENABLE);
1702 }
1703
tja1103_ptp_enable(struct phy_device * phydev,bool enable)1704 static void tja1103_ptp_enable(struct phy_device *phydev, bool enable)
1705 {
1706 if (enable)
1707 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1708 VEND1_PORT_PTP_CONTROL,
1709 PORT_PTP_CONTROL_BYPASS);
1710 else
1711 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1712 VEND1_PORT_PTP_CONTROL,
1713 PORT_PTP_CONTROL_BYPASS);
1714 }
1715
tja1103_nmi_handler(struct phy_device * phydev,irqreturn_t * irq_status)1716 static void tja1103_nmi_handler(struct phy_device *phydev,
1717 irqreturn_t *irq_status)
1718 {
1719 int ret;
1720
1721 ret = phy_read_mmd(phydev, MDIO_MMD_VEND1,
1722 VEND1_ALWAYS_ACCESSIBLE);
1723 if (ret & FUSA_PASS) {
1724 phy_write_mmd(phydev, MDIO_MMD_VEND1,
1725 VEND1_ALWAYS_ACCESSIBLE,
1726 FUSA_PASS);
1727 *irq_status = IRQ_HANDLED;
1728 }
1729 }
1730
1731 static const struct nxp_c45_regmap tja1103_regmap = {
1732 .vend1_ptp_clk_period = 0x1104,
1733 .vend1_event_msg_filt = 0x1148,
1734 .pps_enable =
1735 NXP_C45_REG_FIELD(0x1102, MDIO_MMD_VEND1, 3, 1),
1736 .pps_polarity =
1737 NXP_C45_REG_FIELD(0x1102, MDIO_MMD_VEND1, 2, 1),
1738 .ltc_lock_ctrl =
1739 NXP_C45_REG_FIELD(0x1115, MDIO_MMD_VEND1, 0, 1),
1740 .ltc_read =
1741 NXP_C45_REG_FIELD(0x1105, MDIO_MMD_VEND1, 2, 1),
1742 .ltc_write =
1743 NXP_C45_REG_FIELD(0x1105, MDIO_MMD_VEND1, 0, 1),
1744 .vend1_ltc_wr_nsec_0 = 0x1106,
1745 .vend1_ltc_wr_nsec_1 = 0x1107,
1746 .vend1_ltc_wr_sec_0 = 0x1108,
1747 .vend1_ltc_wr_sec_1 = 0x1109,
1748 .vend1_ltc_rd_nsec_0 = 0x110A,
1749 .vend1_ltc_rd_nsec_1 = 0x110B,
1750 .vend1_ltc_rd_sec_0 = 0x110C,
1751 .vend1_ltc_rd_sec_1 = 0x110D,
1752 .vend1_rate_adj_subns_0 = 0x110F,
1753 .vend1_rate_adj_subns_1 = 0x1110,
1754 .irq_egr_ts_en =
1755 NXP_C45_REG_FIELD(0x1131, MDIO_MMD_VEND1, 0, 1),
1756 .irq_egr_ts_status =
1757 NXP_C45_REG_FIELD(0x1132, MDIO_MMD_VEND1, 0, 1),
1758 .domain_number =
1759 NXP_C45_REG_FIELD(0x114E, MDIO_MMD_VEND1, 0, 8),
1760 .msg_type =
1761 NXP_C45_REG_FIELD(0x114E, MDIO_MMD_VEND1, 8, 4),
1762 .sequence_id =
1763 NXP_C45_REG_FIELD(0x114F, MDIO_MMD_VEND1, 0, 16),
1764 .sec_1_0 =
1765 NXP_C45_REG_FIELD(0x1151, MDIO_MMD_VEND1, 14, 2),
1766 .sec_4_2 =
1767 NXP_C45_REG_FIELD(0x114E, MDIO_MMD_VEND1, 12, 3),
1768 .nsec_15_0 =
1769 NXP_C45_REG_FIELD(0x1150, MDIO_MMD_VEND1, 0, 16),
1770 .nsec_29_16 =
1771 NXP_C45_REG_FIELD(0x1151, MDIO_MMD_VEND1, 0, 14),
1772 .vend1_ext_trg_data_0 = 0x1121,
1773 .vend1_ext_trg_data_1 = 0x1122,
1774 .vend1_ext_trg_data_2 = 0x1123,
1775 .vend1_ext_trg_data_3 = 0x1124,
1776 .vend1_ext_trg_ctrl = 0x1126,
1777 .cable_test = 0x8330,
1778 .cable_test_valid =
1779 NXP_C45_REG_FIELD(0x8330, MDIO_MMD_VEND1, 13, 1),
1780 .cable_test_result =
1781 NXP_C45_REG_FIELD(0x8330, MDIO_MMD_VEND1, 0, 3),
1782 };
1783
1784 static const struct nxp_c45_phy_data tja1103_phy_data = {
1785 .regmap = &tja1103_regmap,
1786 .stats = tja1103_hw_stats,
1787 .n_stats = ARRAY_SIZE(tja1103_hw_stats),
1788 .ptp_clk_period = PTP_CLK_PERIOD_100BT1,
1789 .ext_ts_both_edges = false,
1790 .ack_ptp_irq = false,
1791 .counters_enable = tja1103_counters_enable,
1792 .get_egressts = nxp_c45_get_hwtxts,
1793 .get_extts = nxp_c45_get_extts,
1794 .ptp_init = tja1103_ptp_init,
1795 .ptp_enable = tja1103_ptp_enable,
1796 .nmi_handler = tja1103_nmi_handler,
1797 };
1798
tja1120_counters_enable(struct phy_device * phydev)1799 static void tja1120_counters_enable(struct phy_device *phydev)
1800 {
1801 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_SYMBOL_ERROR_CNT_XTD,
1802 EXTENDED_CNT_EN);
1803 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_MONITOR_STATUS,
1804 MONITOR_RESET);
1805 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_MONITOR_CONFIG,
1806 ALL_FRAMES_CNT_EN | LOST_FRAMES_CNT_EN);
1807 }
1808
tja1120_ptp_init(struct phy_device * phydev)1809 static void tja1120_ptp_init(struct phy_device *phydev)
1810 {
1811 phy_write_mmd(phydev, MDIO_MMD_VEND1, TJA1120_RX_TS_INSRT_CTRL,
1812 TJA1120_RX_TS_INSRT_EN | TJA1120_TS_INSRT_MODE);
1813 phy_write_mmd(phydev, MDIO_MMD_VEND1, TJA1120_VEND1_EXT_TS_MODE,
1814 TJA1120_TS_INSRT_MODE);
1815 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONFIG,
1816 PTP_ENABLE);
1817 }
1818
tja1120_ptp_enable(struct phy_device * phydev,bool enable)1819 static void tja1120_ptp_enable(struct phy_device *phydev, bool enable)
1820 {
1821 if (enable)
1822 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1823 VEND1_PORT_FUNC_ENABLES,
1824 PTP_ENABLE);
1825 else
1826 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1827 VEND1_PORT_FUNC_ENABLES,
1828 PTP_ENABLE);
1829 }
1830
tja1120_nmi_handler(struct phy_device * phydev,irqreturn_t * irq_status)1831 static void tja1120_nmi_handler(struct phy_device *phydev,
1832 irqreturn_t *irq_status)
1833 {
1834 int ret;
1835
1836 ret = phy_read_mmd(phydev, MDIO_MMD_VEND1,
1837 TJA1120_GLOBAL_INFRA_IRQ_STATUS);
1838 if (ret & TJA1120_DEV_BOOT_DONE) {
1839 phy_write_mmd(phydev, MDIO_MMD_VEND1,
1840 TJA1120_GLOBAL_INFRA_IRQ_ACK,
1841 TJA1120_DEV_BOOT_DONE);
1842 *irq_status = IRQ_HANDLED;
1843 }
1844 }
1845
1846 static const struct nxp_c45_regmap tja1120_regmap = {
1847 .vend1_ptp_clk_period = 0x1020,
1848 .vend1_event_msg_filt = 0x9010,
1849 .pps_enable =
1850 NXP_C45_REG_FIELD(0x1006, MDIO_MMD_VEND1, 4, 1),
1851 .pps_polarity =
1852 NXP_C45_REG_FIELD(0x1006, MDIO_MMD_VEND1, 5, 1),
1853 .ltc_lock_ctrl =
1854 NXP_C45_REG_FIELD(0x1006, MDIO_MMD_VEND1, 2, 1),
1855 .ltc_read =
1856 NXP_C45_REG_FIELD(0x1000, MDIO_MMD_VEND1, 1, 1),
1857 .ltc_write =
1858 NXP_C45_REG_FIELD(0x1000, MDIO_MMD_VEND1, 2, 1),
1859 .vend1_ltc_wr_nsec_0 = 0x1040,
1860 .vend1_ltc_wr_nsec_1 = 0x1041,
1861 .vend1_ltc_wr_sec_0 = 0x1042,
1862 .vend1_ltc_wr_sec_1 = 0x1043,
1863 .vend1_ltc_rd_nsec_0 = 0x1048,
1864 .vend1_ltc_rd_nsec_1 = 0x1049,
1865 .vend1_ltc_rd_sec_0 = 0x104A,
1866 .vend1_ltc_rd_sec_1 = 0x104B,
1867 .vend1_rate_adj_subns_0 = 0x1030,
1868 .vend1_rate_adj_subns_1 = 0x1031,
1869 .irq_egr_ts_en =
1870 NXP_C45_REG_FIELD(0x900A, MDIO_MMD_VEND1, 1, 1),
1871 .irq_egr_ts_status =
1872 NXP_C45_REG_FIELD(0x900C, MDIO_MMD_VEND1, 1, 1),
1873 .domain_number =
1874 NXP_C45_REG_FIELD(0x9061, MDIO_MMD_VEND1, 8, 8),
1875 .msg_type =
1876 NXP_C45_REG_FIELD(0x9061, MDIO_MMD_VEND1, 4, 4),
1877 .sequence_id =
1878 NXP_C45_REG_FIELD(0x9062, MDIO_MMD_VEND1, 0, 16),
1879 .sec_1_0 =
1880 NXP_C45_REG_FIELD(0x9065, MDIO_MMD_VEND1, 0, 2),
1881 .sec_4_2 =
1882 NXP_C45_REG_FIELD(0x9065, MDIO_MMD_VEND1, 2, 3),
1883 .nsec_15_0 =
1884 NXP_C45_REG_FIELD(0x9063, MDIO_MMD_VEND1, 0, 16),
1885 .nsec_29_16 =
1886 NXP_C45_REG_FIELD(0x9064, MDIO_MMD_VEND1, 0, 14),
1887 .vend1_ext_trg_data_0 = 0x1071,
1888 .vend1_ext_trg_data_1 = 0x1072,
1889 .vend1_ext_trg_data_2 = 0x1073,
1890 .vend1_ext_trg_data_3 = 0x1074,
1891 .vend1_ext_trg_ctrl = 0x1075,
1892 .cable_test = 0x8360,
1893 .cable_test_valid =
1894 NXP_C45_REG_FIELD(0x8361, MDIO_MMD_VEND1, 15, 1),
1895 .cable_test_result =
1896 NXP_C45_REG_FIELD(0x8361, MDIO_MMD_VEND1, 0, 3),
1897 };
1898
1899 static const struct nxp_c45_phy_data tja1120_phy_data = {
1900 .regmap = &tja1120_regmap,
1901 .stats = tja1120_hw_stats,
1902 .n_stats = ARRAY_SIZE(tja1120_hw_stats),
1903 .ptp_clk_period = PTP_CLK_PERIOD_1000BT1,
1904 .ext_ts_both_edges = true,
1905 .ack_ptp_irq = true,
1906 .counters_enable = tja1120_counters_enable,
1907 .get_egressts = tja1120_get_hwtxts,
1908 .get_extts = tja1120_get_extts,
1909 .ptp_init = tja1120_ptp_init,
1910 .ptp_enable = tja1120_ptp_enable,
1911 .nmi_handler = tja1120_nmi_handler,
1912 };
1913
1914 static struct phy_driver nxp_c45_driver[] = {
1915 {
1916 PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103),
1917 .name = "NXP C45 TJA1103",
1918 .get_features = nxp_c45_get_features,
1919 .driver_data = &tja1103_phy_data,
1920 .probe = nxp_c45_probe,
1921 .soft_reset = nxp_c45_soft_reset,
1922 .config_aneg = genphy_c45_config_aneg,
1923 .config_init = nxp_c45_config_init,
1924 .config_intr = tja1103_config_intr,
1925 .handle_interrupt = nxp_c45_handle_interrupt,
1926 .read_status = genphy_c45_read_status,
1927 .suspend = genphy_c45_pma_suspend,
1928 .resume = genphy_c45_pma_resume,
1929 .get_sset_count = nxp_c45_get_sset_count,
1930 .get_strings = nxp_c45_get_strings,
1931 .get_stats = nxp_c45_get_stats,
1932 .cable_test_start = nxp_c45_cable_test_start,
1933 .cable_test_get_status = nxp_c45_cable_test_get_status,
1934 .set_loopback = genphy_c45_loopback,
1935 .get_sqi = nxp_c45_get_sqi,
1936 .get_sqi_max = nxp_c45_get_sqi_max,
1937 .remove = nxp_c45_remove,
1938 },
1939 {
1940 PHY_ID_MATCH_MODEL(PHY_ID_TJA_1120),
1941 .name = "NXP C45 TJA1120",
1942 .get_features = nxp_c45_get_features,
1943 .driver_data = &tja1120_phy_data,
1944 .probe = nxp_c45_probe,
1945 .soft_reset = nxp_c45_soft_reset,
1946 .config_aneg = genphy_c45_config_aneg,
1947 .config_init = nxp_c45_config_init,
1948 .config_intr = tja1120_config_intr,
1949 .handle_interrupt = nxp_c45_handle_interrupt,
1950 .read_status = genphy_c45_read_status,
1951 .link_change_notify = tja1120_link_change_notify,
1952 .suspend = genphy_c45_pma_suspend,
1953 .resume = genphy_c45_pma_resume,
1954 .get_sset_count = nxp_c45_get_sset_count,
1955 .get_strings = nxp_c45_get_strings,
1956 .get_stats = nxp_c45_get_stats,
1957 .cable_test_start = nxp_c45_cable_test_start,
1958 .cable_test_get_status = nxp_c45_cable_test_get_status,
1959 .set_loopback = genphy_c45_loopback,
1960 .get_sqi = nxp_c45_get_sqi,
1961 .get_sqi_max = nxp_c45_get_sqi_max,
1962 .remove = nxp_c45_remove,
1963 },
1964 };
1965
1966 module_phy_driver(nxp_c45_driver);
1967
1968 static struct mdio_device_id __maybe_unused nxp_c45_tbl[] = {
1969 { PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103) },
1970 { PHY_ID_MATCH_MODEL(PHY_ID_TJA_1120) },
1971 { /*sentinel*/ },
1972 };
1973
1974 MODULE_DEVICE_TABLE(mdio, nxp_c45_tbl);
1975
1976 MODULE_AUTHOR("Radu Pirea <radu-nicolae.pirea@oss.nxp.com>");
1977 MODULE_DESCRIPTION("NXP C45 PHY driver");
1978 MODULE_LICENSE("GPL v2");
1979