1 // SPDX-License-Identifier: BSD-3-Clause
2 /* Copyright 2016-2018 NXP
3  * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
4  */
5 #include <linux/packing.h>
6 #include "sja1105.h"
7 
8 #define SJA1105_SIZE_CGU_CMD	4
9 #define SJA1110_BASE_MCSS_CLK	SJA1110_CGU_ADDR(0x70)
10 #define SJA1110_BASE_TIMER_CLK	SJA1110_CGU_ADDR(0x74)
11 
12 /* Common structure for CFG_PAD_MIIx_RX and CFG_PAD_MIIx_TX */
13 struct sja1105_cfg_pad_mii {
14 	u64 d32_os;
15 	u64 d32_ih;
16 	u64 d32_ipud;
17 	u64 d10_ih;
18 	u64 d10_os;
19 	u64 d10_ipud;
20 	u64 ctrl_os;
21 	u64 ctrl_ih;
22 	u64 ctrl_ipud;
23 	u64 clk_os;
24 	u64 clk_ih;
25 	u64 clk_ipud;
26 };
27 
28 struct sja1105_cfg_pad_mii_id {
29 	u64 rxc_stable_ovr;
30 	u64 rxc_delay;
31 	u64 rxc_bypass;
32 	u64 rxc_pd;
33 	u64 txc_stable_ovr;
34 	u64 txc_delay;
35 	u64 txc_bypass;
36 	u64 txc_pd;
37 };
38 
39 /* UM10944 Table 82.
40  * IDIV_0_C to IDIV_4_C control registers
41  * (addr. 10000Bh to 10000Fh)
42  */
43 struct sja1105_cgu_idiv {
44 	u64 clksrc;
45 	u64 autoblock;
46 	u64 idiv;
47 	u64 pd;
48 };
49 
50 /* PLL_1_C control register
51  *
52  * SJA1105 E/T: UM10944 Table 81 (address 10000Ah)
53  * SJA1105 P/Q/R/S: UM11040 Table 116 (address 10000Ah)
54  */
55 struct sja1105_cgu_pll_ctrl {
56 	u64 pllclksrc;
57 	u64 msel;
58 	u64 autoblock;
59 	u64 psel;
60 	u64 direct;
61 	u64 fbsel;
62 	u64 bypass;
63 	u64 pd;
64 };
65 
66 struct sja1110_cgu_outclk {
67 	u64 clksrc;
68 	u64 autoblock;
69 	u64 pd;
70 };
71 
72 enum {
73 	CLKSRC_MII0_TX_CLK	= 0x00,
74 	CLKSRC_MII0_RX_CLK	= 0x01,
75 	CLKSRC_MII1_TX_CLK	= 0x02,
76 	CLKSRC_MII1_RX_CLK	= 0x03,
77 	CLKSRC_MII2_TX_CLK	= 0x04,
78 	CLKSRC_MII2_RX_CLK	= 0x05,
79 	CLKSRC_MII3_TX_CLK	= 0x06,
80 	CLKSRC_MII3_RX_CLK	= 0x07,
81 	CLKSRC_MII4_TX_CLK	= 0x08,
82 	CLKSRC_MII4_RX_CLK	= 0x09,
83 	CLKSRC_PLL0		= 0x0B,
84 	CLKSRC_PLL1		= 0x0E,
85 	CLKSRC_IDIV0		= 0x11,
86 	CLKSRC_IDIV1		= 0x12,
87 	CLKSRC_IDIV2		= 0x13,
88 	CLKSRC_IDIV3		= 0x14,
89 	CLKSRC_IDIV4		= 0x15,
90 };
91 
92 /* UM10944 Table 83.
93  * MIIx clock control registers 1 to 30
94  * (addresses 100013h to 100035h)
95  */
96 struct sja1105_cgu_mii_ctrl {
97 	u64 clksrc;
98 	u64 autoblock;
99 	u64 pd;
100 };
101 
sja1105_cgu_idiv_packing(void * buf,struct sja1105_cgu_idiv * idiv,enum packing_op op)102 static void sja1105_cgu_idiv_packing(void *buf, struct sja1105_cgu_idiv *idiv,
103 				     enum packing_op op)
104 {
105 	const int size = 4;
106 
107 	sja1105_packing(buf, &idiv->clksrc,    28, 24, size, op);
108 	sja1105_packing(buf, &idiv->autoblock, 11, 11, size, op);
109 	sja1105_packing(buf, &idiv->idiv,       5,  2, size, op);
110 	sja1105_packing(buf, &idiv->pd,         0,  0, size, op);
111 }
112 
sja1105_cgu_idiv_config(struct sja1105_private * priv,int port,bool enabled,int factor)113 static int sja1105_cgu_idiv_config(struct sja1105_private *priv, int port,
114 				   bool enabled, int factor)
115 {
116 	const struct sja1105_regs *regs = priv->info->regs;
117 	struct device *dev = priv->ds->dev;
118 	struct sja1105_cgu_idiv idiv;
119 	u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
120 
121 	if (regs->cgu_idiv[port] == SJA1105_RSV_ADDR)
122 		return 0;
123 
124 	if (enabled && factor != 1 && factor != 10) {
125 		dev_err(dev, "idiv factor must be 1 or 10\n");
126 		return -ERANGE;
127 	}
128 
129 	/* Payload for packed_buf */
130 	idiv.clksrc    = 0x0A;            /* 25MHz */
131 	idiv.autoblock = 1;               /* Block clk automatically */
132 	idiv.idiv      = factor - 1;      /* Divide by 1 or 10 */
133 	idiv.pd        = enabled ? 0 : 1; /* Power down? */
134 	sja1105_cgu_idiv_packing(packed_buf, &idiv, PACK);
135 
136 	return sja1105_xfer_buf(priv, SPI_WRITE, regs->cgu_idiv[port],
137 				packed_buf, SJA1105_SIZE_CGU_CMD);
138 }
139 
140 static void
sja1105_cgu_mii_control_packing(void * buf,struct sja1105_cgu_mii_ctrl * cmd,enum packing_op op)141 sja1105_cgu_mii_control_packing(void *buf, struct sja1105_cgu_mii_ctrl *cmd,
142 				enum packing_op op)
143 {
144 	const int size = 4;
145 
146 	sja1105_packing(buf, &cmd->clksrc,    28, 24, size, op);
147 	sja1105_packing(buf, &cmd->autoblock, 11, 11, size, op);
148 	sja1105_packing(buf, &cmd->pd,         0,  0, size, op);
149 }
150 
sja1105_cgu_mii_tx_clk_config(struct sja1105_private * priv,int port,sja1105_mii_role_t role)151 static int sja1105_cgu_mii_tx_clk_config(struct sja1105_private *priv,
152 					 int port, sja1105_mii_role_t role)
153 {
154 	const struct sja1105_regs *regs = priv->info->regs;
155 	struct sja1105_cgu_mii_ctrl mii_tx_clk;
156 	const int mac_clk_sources[] = {
157 		CLKSRC_MII0_TX_CLK,
158 		CLKSRC_MII1_TX_CLK,
159 		CLKSRC_MII2_TX_CLK,
160 		CLKSRC_MII3_TX_CLK,
161 		CLKSRC_MII4_TX_CLK,
162 	};
163 	const int phy_clk_sources[] = {
164 		CLKSRC_IDIV0,
165 		CLKSRC_IDIV1,
166 		CLKSRC_IDIV2,
167 		CLKSRC_IDIV3,
168 		CLKSRC_IDIV4,
169 	};
170 	u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
171 	int clksrc;
172 
173 	if (regs->mii_tx_clk[port] == SJA1105_RSV_ADDR)
174 		return 0;
175 
176 	if (role == XMII_MAC)
177 		clksrc = mac_clk_sources[port];
178 	else
179 		clksrc = phy_clk_sources[port];
180 
181 	/* Payload for packed_buf */
182 	mii_tx_clk.clksrc    = clksrc;
183 	mii_tx_clk.autoblock = 1;  /* Autoblock clk while changing clksrc */
184 	mii_tx_clk.pd        = 0;  /* Power Down off => enabled */
185 	sja1105_cgu_mii_control_packing(packed_buf, &mii_tx_clk, PACK);
186 
187 	return sja1105_xfer_buf(priv, SPI_WRITE, regs->mii_tx_clk[port],
188 				packed_buf, SJA1105_SIZE_CGU_CMD);
189 }
190 
191 static int
sja1105_cgu_mii_rx_clk_config(struct sja1105_private * priv,int port)192 sja1105_cgu_mii_rx_clk_config(struct sja1105_private *priv, int port)
193 {
194 	const struct sja1105_regs *regs = priv->info->regs;
195 	struct sja1105_cgu_mii_ctrl mii_rx_clk;
196 	u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
197 	const int clk_sources[] = {
198 		CLKSRC_MII0_RX_CLK,
199 		CLKSRC_MII1_RX_CLK,
200 		CLKSRC_MII2_RX_CLK,
201 		CLKSRC_MII3_RX_CLK,
202 		CLKSRC_MII4_RX_CLK,
203 	};
204 
205 	if (regs->mii_rx_clk[port] == SJA1105_RSV_ADDR)
206 		return 0;
207 
208 	/* Payload for packed_buf */
209 	mii_rx_clk.clksrc    = clk_sources[port];
210 	mii_rx_clk.autoblock = 1;  /* Autoblock clk while changing clksrc */
211 	mii_rx_clk.pd        = 0;  /* Power Down off => enabled */
212 	sja1105_cgu_mii_control_packing(packed_buf, &mii_rx_clk, PACK);
213 
214 	return sja1105_xfer_buf(priv, SPI_WRITE, regs->mii_rx_clk[port],
215 				packed_buf, SJA1105_SIZE_CGU_CMD);
216 }
217 
218 static int
sja1105_cgu_mii_ext_tx_clk_config(struct sja1105_private * priv,int port)219 sja1105_cgu_mii_ext_tx_clk_config(struct sja1105_private *priv, int port)
220 {
221 	const struct sja1105_regs *regs = priv->info->regs;
222 	struct sja1105_cgu_mii_ctrl mii_ext_tx_clk;
223 	u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
224 	const int clk_sources[] = {
225 		CLKSRC_IDIV0,
226 		CLKSRC_IDIV1,
227 		CLKSRC_IDIV2,
228 		CLKSRC_IDIV3,
229 		CLKSRC_IDIV4,
230 	};
231 
232 	if (regs->mii_ext_tx_clk[port] == SJA1105_RSV_ADDR)
233 		return 0;
234 
235 	/* Payload for packed_buf */
236 	mii_ext_tx_clk.clksrc    = clk_sources[port];
237 	mii_ext_tx_clk.autoblock = 1; /* Autoblock clk while changing clksrc */
238 	mii_ext_tx_clk.pd        = 0; /* Power Down off => enabled */
239 	sja1105_cgu_mii_control_packing(packed_buf, &mii_ext_tx_clk, PACK);
240 
241 	return sja1105_xfer_buf(priv, SPI_WRITE, regs->mii_ext_tx_clk[port],
242 				packed_buf, SJA1105_SIZE_CGU_CMD);
243 }
244 
245 static int
sja1105_cgu_mii_ext_rx_clk_config(struct sja1105_private * priv,int port)246 sja1105_cgu_mii_ext_rx_clk_config(struct sja1105_private *priv, int port)
247 {
248 	const struct sja1105_regs *regs = priv->info->regs;
249 	struct sja1105_cgu_mii_ctrl mii_ext_rx_clk;
250 	u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
251 	const int clk_sources[] = {
252 		CLKSRC_IDIV0,
253 		CLKSRC_IDIV1,
254 		CLKSRC_IDIV2,
255 		CLKSRC_IDIV3,
256 		CLKSRC_IDIV4,
257 	};
258 
259 	if (regs->mii_ext_rx_clk[port] == SJA1105_RSV_ADDR)
260 		return 0;
261 
262 	/* Payload for packed_buf */
263 	mii_ext_rx_clk.clksrc    = clk_sources[port];
264 	mii_ext_rx_clk.autoblock = 1; /* Autoblock clk while changing clksrc */
265 	mii_ext_rx_clk.pd        = 0; /* Power Down off => enabled */
266 	sja1105_cgu_mii_control_packing(packed_buf, &mii_ext_rx_clk, PACK);
267 
268 	return sja1105_xfer_buf(priv, SPI_WRITE, regs->mii_ext_rx_clk[port],
269 				packed_buf, SJA1105_SIZE_CGU_CMD);
270 }
271 
sja1105_mii_clocking_setup(struct sja1105_private * priv,int port,sja1105_mii_role_t role)272 static int sja1105_mii_clocking_setup(struct sja1105_private *priv, int port,
273 				      sja1105_mii_role_t role)
274 {
275 	struct device *dev = priv->ds->dev;
276 	int rc;
277 
278 	dev_dbg(dev, "Configuring MII-%s clocking\n",
279 		(role == XMII_MAC) ? "MAC" : "PHY");
280 	/* If role is MAC, disable IDIV
281 	 * If role is PHY, enable IDIV and configure for 1/1 divider
282 	 */
283 	rc = sja1105_cgu_idiv_config(priv, port, (role == XMII_PHY), 1);
284 	if (rc < 0)
285 		return rc;
286 
287 	/* Configure CLKSRC of MII_TX_CLK_n
288 	 *   * If role is MAC, select TX_CLK_n
289 	 *   * If role is PHY, select IDIV_n
290 	 */
291 	rc = sja1105_cgu_mii_tx_clk_config(priv, port, role);
292 	if (rc < 0)
293 		return rc;
294 
295 	/* Configure CLKSRC of MII_RX_CLK_n
296 	 * Select RX_CLK_n
297 	 */
298 	rc = sja1105_cgu_mii_rx_clk_config(priv, port);
299 	if (rc < 0)
300 		return rc;
301 
302 	if (role == XMII_PHY) {
303 		/* Per MII spec, the PHY (which is us) drives the TX_CLK pin */
304 
305 		/* Configure CLKSRC of EXT_TX_CLK_n
306 		 * Select IDIV_n
307 		 */
308 		rc = sja1105_cgu_mii_ext_tx_clk_config(priv, port);
309 		if (rc < 0)
310 			return rc;
311 
312 		/* Configure CLKSRC of EXT_RX_CLK_n
313 		 * Select IDIV_n
314 		 */
315 		rc = sja1105_cgu_mii_ext_rx_clk_config(priv, port);
316 		if (rc < 0)
317 			return rc;
318 	}
319 	return 0;
320 }
321 
322 static void
sja1105_cgu_pll_control_packing(void * buf,struct sja1105_cgu_pll_ctrl * cmd,enum packing_op op)323 sja1105_cgu_pll_control_packing(void *buf, struct sja1105_cgu_pll_ctrl *cmd,
324 				enum packing_op op)
325 {
326 	const int size = 4;
327 
328 	sja1105_packing(buf, &cmd->pllclksrc, 28, 24, size, op);
329 	sja1105_packing(buf, &cmd->msel,      23, 16, size, op);
330 	sja1105_packing(buf, &cmd->autoblock, 11, 11, size, op);
331 	sja1105_packing(buf, &cmd->psel,       9,  8, size, op);
332 	sja1105_packing(buf, &cmd->direct,     7,  7, size, op);
333 	sja1105_packing(buf, &cmd->fbsel,      6,  6, size, op);
334 	sja1105_packing(buf, &cmd->bypass,     1,  1, size, op);
335 	sja1105_packing(buf, &cmd->pd,         0,  0, size, op);
336 }
337 
sja1105_cgu_rgmii_tx_clk_config(struct sja1105_private * priv,int port,u64 speed)338 static int sja1105_cgu_rgmii_tx_clk_config(struct sja1105_private *priv,
339 					   int port, u64 speed)
340 {
341 	const struct sja1105_regs *regs = priv->info->regs;
342 	struct sja1105_cgu_mii_ctrl txc;
343 	u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
344 	int clksrc;
345 
346 	if (regs->rgmii_tx_clk[port] == SJA1105_RSV_ADDR)
347 		return 0;
348 
349 	if (speed == priv->info->port_speed[SJA1105_SPEED_1000MBPS]) {
350 		clksrc = CLKSRC_PLL0;
351 	} else {
352 		int clk_sources[] = {CLKSRC_IDIV0, CLKSRC_IDIV1, CLKSRC_IDIV2,
353 				     CLKSRC_IDIV3, CLKSRC_IDIV4};
354 		clksrc = clk_sources[port];
355 	}
356 
357 	/* RGMII: 125MHz for 1000, 25MHz for 100, 2.5MHz for 10 */
358 	txc.clksrc = clksrc;
359 	/* Autoblock clk while changing clksrc */
360 	txc.autoblock = 1;
361 	/* Power Down off => enabled */
362 	txc.pd = 0;
363 	sja1105_cgu_mii_control_packing(packed_buf, &txc, PACK);
364 
365 	return sja1105_xfer_buf(priv, SPI_WRITE, regs->rgmii_tx_clk[port],
366 				packed_buf, SJA1105_SIZE_CGU_CMD);
367 }
368 
369 /* AGU */
370 static void
sja1105_cfg_pad_mii_packing(void * buf,struct sja1105_cfg_pad_mii * cmd,enum packing_op op)371 sja1105_cfg_pad_mii_packing(void *buf, struct sja1105_cfg_pad_mii *cmd,
372 			    enum packing_op op)
373 {
374 	const int size = 4;
375 
376 	sja1105_packing(buf, &cmd->d32_os,   28, 27, size, op);
377 	sja1105_packing(buf, &cmd->d32_ih,   26, 26, size, op);
378 	sja1105_packing(buf, &cmd->d32_ipud, 25, 24, size, op);
379 	sja1105_packing(buf, &cmd->d10_os,   20, 19, size, op);
380 	sja1105_packing(buf, &cmd->d10_ih,   18, 18, size, op);
381 	sja1105_packing(buf, &cmd->d10_ipud, 17, 16, size, op);
382 	sja1105_packing(buf, &cmd->ctrl_os,  12, 11, size, op);
383 	sja1105_packing(buf, &cmd->ctrl_ih,  10, 10, size, op);
384 	sja1105_packing(buf, &cmd->ctrl_ipud, 9,  8, size, op);
385 	sja1105_packing(buf, &cmd->clk_os,    4,  3, size, op);
386 	sja1105_packing(buf, &cmd->clk_ih,    2,  2, size, op);
387 	sja1105_packing(buf, &cmd->clk_ipud,  1,  0, size, op);
388 }
389 
sja1105_rgmii_cfg_pad_tx_config(struct sja1105_private * priv,int port)390 static int sja1105_rgmii_cfg_pad_tx_config(struct sja1105_private *priv,
391 					   int port)
392 {
393 	const struct sja1105_regs *regs = priv->info->regs;
394 	struct sja1105_cfg_pad_mii pad_mii_tx = {0};
395 	u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
396 
397 	if (regs->pad_mii_tx[port] == SJA1105_RSV_ADDR)
398 		return 0;
399 
400 	/* Payload */
401 	pad_mii_tx.d32_os    = 3; /* TXD[3:2] output stage: */
402 				  /*          high noise/high speed */
403 	pad_mii_tx.d10_os    = 3; /* TXD[1:0] output stage: */
404 				  /*          high noise/high speed */
405 	pad_mii_tx.d32_ipud  = 2; /* TXD[3:2] input stage: */
406 				  /*          plain input (default) */
407 	pad_mii_tx.d10_ipud  = 2; /* TXD[1:0] input stage: */
408 				  /*          plain input (default) */
409 	pad_mii_tx.ctrl_os   = 3; /* TX_CTL / TX_ER output stage */
410 	pad_mii_tx.ctrl_ipud = 2; /* TX_CTL / TX_ER input stage (default) */
411 	pad_mii_tx.clk_os    = 3; /* TX_CLK output stage */
412 	pad_mii_tx.clk_ih    = 0; /* TX_CLK input hysteresis (default) */
413 	pad_mii_tx.clk_ipud  = 2; /* TX_CLK input stage (default) */
414 	sja1105_cfg_pad_mii_packing(packed_buf, &pad_mii_tx, PACK);
415 
416 	return sja1105_xfer_buf(priv, SPI_WRITE, regs->pad_mii_tx[port],
417 				packed_buf, SJA1105_SIZE_CGU_CMD);
418 }
419 
sja1105_cfg_pad_rx_config(struct sja1105_private * priv,int port)420 static int sja1105_cfg_pad_rx_config(struct sja1105_private *priv, int port)
421 {
422 	const struct sja1105_regs *regs = priv->info->regs;
423 	struct sja1105_cfg_pad_mii pad_mii_rx = {0};
424 	u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
425 
426 	if (regs->pad_mii_rx[port] == SJA1105_RSV_ADDR)
427 		return 0;
428 
429 	/* Payload */
430 	pad_mii_rx.d32_ih    = 0; /* RXD[3:2] input stage hysteresis: */
431 				  /*          non-Schmitt (default) */
432 	pad_mii_rx.d32_ipud  = 2; /* RXD[3:2] input weak pull-up/down */
433 				  /*          plain input (default) */
434 	pad_mii_rx.d10_ih    = 0; /* RXD[1:0] input stage hysteresis: */
435 				  /*          non-Schmitt (default) */
436 	pad_mii_rx.d10_ipud  = 2; /* RXD[1:0] input weak pull-up/down */
437 				  /*          plain input (default) */
438 	pad_mii_rx.ctrl_ih   = 0; /* RX_DV/CRS_DV/RX_CTL and RX_ER */
439 				  /* input stage hysteresis: */
440 				  /* non-Schmitt (default) */
441 	pad_mii_rx.ctrl_ipud = 3; /* RX_DV/CRS_DV/RX_CTL and RX_ER */
442 				  /* input stage weak pull-up/down: */
443 				  /* pull-down */
444 	pad_mii_rx.clk_os    = 2; /* RX_CLK/RXC output stage: */
445 				  /* medium noise/fast speed (default) */
446 	pad_mii_rx.clk_ih    = 0; /* RX_CLK/RXC input hysteresis: */
447 				  /* non-Schmitt (default) */
448 	pad_mii_rx.clk_ipud  = 2; /* RX_CLK/RXC input pull-up/down: */
449 				  /* plain input (default) */
450 	sja1105_cfg_pad_mii_packing(packed_buf, &pad_mii_rx, PACK);
451 
452 	return sja1105_xfer_buf(priv, SPI_WRITE, regs->pad_mii_rx[port],
453 				packed_buf, SJA1105_SIZE_CGU_CMD);
454 }
455 
456 static void
sja1105_cfg_pad_mii_id_packing(void * buf,struct sja1105_cfg_pad_mii_id * cmd,enum packing_op op)457 sja1105_cfg_pad_mii_id_packing(void *buf, struct sja1105_cfg_pad_mii_id *cmd,
458 			       enum packing_op op)
459 {
460 	const int size = SJA1105_SIZE_CGU_CMD;
461 
462 	sja1105_packing(buf, &cmd->rxc_stable_ovr, 15, 15, size, op);
463 	sja1105_packing(buf, &cmd->rxc_delay,      14, 10, size, op);
464 	sja1105_packing(buf, &cmd->rxc_bypass,      9,  9, size, op);
465 	sja1105_packing(buf, &cmd->rxc_pd,          8,  8, size, op);
466 	sja1105_packing(buf, &cmd->txc_stable_ovr,  7,  7, size, op);
467 	sja1105_packing(buf, &cmd->txc_delay,       6,  2, size, op);
468 	sja1105_packing(buf, &cmd->txc_bypass,      1,  1, size, op);
469 	sja1105_packing(buf, &cmd->txc_pd,          0,  0, size, op);
470 }
471 
472 static void
sja1110_cfg_pad_mii_id_packing(void * buf,struct sja1105_cfg_pad_mii_id * cmd,enum packing_op op)473 sja1110_cfg_pad_mii_id_packing(void *buf, struct sja1105_cfg_pad_mii_id *cmd,
474 			       enum packing_op op)
475 {
476 	const int size = SJA1105_SIZE_CGU_CMD;
477 	u64 range = 4;
478 
479 	/* Fields RXC_RANGE and TXC_RANGE select the input frequency range:
480 	 * 0 = 2.5MHz
481 	 * 1 = 25MHz
482 	 * 2 = 50MHz
483 	 * 3 = 125MHz
484 	 * 4 = Automatically determined by port speed.
485 	 * There's no point in defining a structure different than the one for
486 	 * SJA1105, so just hardcode the frequency range to automatic, just as
487 	 * before.
488 	 */
489 	sja1105_packing(buf, &cmd->rxc_stable_ovr, 26, 26, size, op);
490 	sja1105_packing(buf, &cmd->rxc_delay,      25, 21, size, op);
491 	sja1105_packing(buf, &range,               20, 18, size, op);
492 	sja1105_packing(buf, &cmd->rxc_bypass,     17, 17, size, op);
493 	sja1105_packing(buf, &cmd->rxc_pd,         16, 16, size, op);
494 	sja1105_packing(buf, &cmd->txc_stable_ovr, 10, 10, size, op);
495 	sja1105_packing(buf, &cmd->txc_delay,       9,  5, size, op);
496 	sja1105_packing(buf, &range,                4,  2, size, op);
497 	sja1105_packing(buf, &cmd->txc_bypass,      1,  1, size, op);
498 	sja1105_packing(buf, &cmd->txc_pd,          0,  0, size, op);
499 }
500 
501 /* The RGMII delay setup procedure is 2-step and gets called upon each
502  * .phylink_mac_config. Both are strategic.
503  * The reason is that the RX Tunable Delay Line of the SJA1105 MAC has issues
504  * with recovering from a frequency change of the link partner's RGMII clock.
505  * The easiest way to recover from this is to temporarily power down the TDL,
506  * as it will re-lock at the new frequency afterwards.
507  */
sja1105pqrs_setup_rgmii_delay(const void * ctx,int port)508 int sja1105pqrs_setup_rgmii_delay(const void *ctx, int port)
509 {
510 	const struct sja1105_private *priv = ctx;
511 	const struct sja1105_regs *regs = priv->info->regs;
512 	struct sja1105_cfg_pad_mii_id pad_mii_id = {0};
513 	int rx_delay = priv->rgmii_rx_delay_ps[port];
514 	int tx_delay = priv->rgmii_tx_delay_ps[port];
515 	u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
516 	int rc;
517 
518 	if (rx_delay)
519 		pad_mii_id.rxc_delay = SJA1105_RGMII_DELAY_PS_TO_HW(rx_delay);
520 	if (tx_delay)
521 		pad_mii_id.txc_delay = SJA1105_RGMII_DELAY_PS_TO_HW(tx_delay);
522 
523 	/* Stage 1: Turn the RGMII delay lines off. */
524 	pad_mii_id.rxc_bypass = 1;
525 	pad_mii_id.rxc_pd = 1;
526 	pad_mii_id.txc_bypass = 1;
527 	pad_mii_id.txc_pd = 1;
528 	sja1105_cfg_pad_mii_id_packing(packed_buf, &pad_mii_id, PACK);
529 
530 	rc = sja1105_xfer_buf(priv, SPI_WRITE, regs->pad_mii_id[port],
531 			      packed_buf, SJA1105_SIZE_CGU_CMD);
532 	if (rc < 0)
533 		return rc;
534 
535 	/* Stage 2: Turn the RGMII delay lines on. */
536 	if (rx_delay) {
537 		pad_mii_id.rxc_bypass = 0;
538 		pad_mii_id.rxc_pd = 0;
539 	}
540 	if (tx_delay) {
541 		pad_mii_id.txc_bypass = 0;
542 		pad_mii_id.txc_pd = 0;
543 	}
544 	sja1105_cfg_pad_mii_id_packing(packed_buf, &pad_mii_id, PACK);
545 
546 	return sja1105_xfer_buf(priv, SPI_WRITE, regs->pad_mii_id[port],
547 				packed_buf, SJA1105_SIZE_CGU_CMD);
548 }
549 
sja1110_setup_rgmii_delay(const void * ctx,int port)550 int sja1110_setup_rgmii_delay(const void *ctx, int port)
551 {
552 	const struct sja1105_private *priv = ctx;
553 	const struct sja1105_regs *regs = priv->info->regs;
554 	struct sja1105_cfg_pad_mii_id pad_mii_id = {0};
555 	int rx_delay = priv->rgmii_rx_delay_ps[port];
556 	int tx_delay = priv->rgmii_tx_delay_ps[port];
557 	u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
558 
559 	pad_mii_id.rxc_pd = 1;
560 	pad_mii_id.txc_pd = 1;
561 
562 	if (rx_delay) {
563 		pad_mii_id.rxc_delay = SJA1105_RGMII_DELAY_PS_TO_HW(rx_delay);
564 		/* The "BYPASS" bit in SJA1110 is actually a "don't bypass" */
565 		pad_mii_id.rxc_bypass = 1;
566 		pad_mii_id.rxc_pd = 0;
567 	}
568 
569 	if (tx_delay) {
570 		pad_mii_id.txc_delay = SJA1105_RGMII_DELAY_PS_TO_HW(tx_delay);
571 		pad_mii_id.txc_bypass = 1;
572 		pad_mii_id.txc_pd = 0;
573 	}
574 
575 	sja1110_cfg_pad_mii_id_packing(packed_buf, &pad_mii_id, PACK);
576 
577 	return sja1105_xfer_buf(priv, SPI_WRITE, regs->pad_mii_id[port],
578 				packed_buf, SJA1105_SIZE_CGU_CMD);
579 }
580 
sja1105_rgmii_clocking_setup(struct sja1105_private * priv,int port,sja1105_mii_role_t role)581 static int sja1105_rgmii_clocking_setup(struct sja1105_private *priv, int port,
582 					sja1105_mii_role_t role)
583 {
584 	struct device *dev = priv->ds->dev;
585 	struct sja1105_mac_config_entry *mac;
586 	u64 speed;
587 	int rc;
588 
589 	mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
590 	speed = mac[port].speed;
591 
592 	dev_dbg(dev, "Configuring port %d RGMII at speed %lldMbps\n",
593 		port, speed);
594 
595 	if (speed == priv->info->port_speed[SJA1105_SPEED_1000MBPS]) {
596 		/* 1000Mbps, IDIV disabled (125 MHz) */
597 		rc = sja1105_cgu_idiv_config(priv, port, false, 1);
598 	} else if (speed == priv->info->port_speed[SJA1105_SPEED_100MBPS]) {
599 		/* 100Mbps, IDIV enabled, divide by 1 (25 MHz) */
600 		rc = sja1105_cgu_idiv_config(priv, port, true, 1);
601 	} else if (speed == priv->info->port_speed[SJA1105_SPEED_10MBPS]) {
602 		/* 10Mbps, IDIV enabled, divide by 10 (2.5 MHz) */
603 		rc = sja1105_cgu_idiv_config(priv, port, true, 10);
604 	} else if (speed == priv->info->port_speed[SJA1105_SPEED_AUTO]) {
605 		/* Skip CGU configuration if there is no speed available
606 		 * (e.g. link is not established yet)
607 		 */
608 		dev_dbg(dev, "Speed not available, skipping CGU config\n");
609 		return 0;
610 	} else {
611 		rc = -EINVAL;
612 	}
613 
614 	if (rc < 0) {
615 		dev_err(dev, "Failed to configure idiv\n");
616 		return rc;
617 	}
618 	rc = sja1105_cgu_rgmii_tx_clk_config(priv, port, speed);
619 	if (rc < 0) {
620 		dev_err(dev, "Failed to configure RGMII Tx clock\n");
621 		return rc;
622 	}
623 	rc = sja1105_rgmii_cfg_pad_tx_config(priv, port);
624 	if (rc < 0) {
625 		dev_err(dev, "Failed to configure Tx pad registers\n");
626 		return rc;
627 	}
628 
629 	if (!priv->info->setup_rgmii_delay)
630 		return 0;
631 
632 	return priv->info->setup_rgmii_delay(priv, port);
633 }
634 
sja1105_cgu_rmii_ref_clk_config(struct sja1105_private * priv,int port)635 static int sja1105_cgu_rmii_ref_clk_config(struct sja1105_private *priv,
636 					   int port)
637 {
638 	const struct sja1105_regs *regs = priv->info->regs;
639 	struct sja1105_cgu_mii_ctrl ref_clk;
640 	u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
641 	const int clk_sources[] = {
642 		CLKSRC_MII0_TX_CLK,
643 		CLKSRC_MII1_TX_CLK,
644 		CLKSRC_MII2_TX_CLK,
645 		CLKSRC_MII3_TX_CLK,
646 		CLKSRC_MII4_TX_CLK,
647 	};
648 
649 	if (regs->rmii_ref_clk[port] == SJA1105_RSV_ADDR)
650 		return 0;
651 
652 	/* Payload for packed_buf */
653 	ref_clk.clksrc    = clk_sources[port];
654 	ref_clk.autoblock = 1;      /* Autoblock clk while changing clksrc */
655 	ref_clk.pd        = 0;      /* Power Down off => enabled */
656 	sja1105_cgu_mii_control_packing(packed_buf, &ref_clk, PACK);
657 
658 	return sja1105_xfer_buf(priv, SPI_WRITE, regs->rmii_ref_clk[port],
659 				packed_buf, SJA1105_SIZE_CGU_CMD);
660 }
661 
662 static int
sja1105_cgu_rmii_ext_tx_clk_config(struct sja1105_private * priv,int port)663 sja1105_cgu_rmii_ext_tx_clk_config(struct sja1105_private *priv, int port)
664 {
665 	const struct sja1105_regs *regs = priv->info->regs;
666 	struct sja1105_cgu_mii_ctrl ext_tx_clk;
667 	u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
668 
669 	if (regs->rmii_ext_tx_clk[port] == SJA1105_RSV_ADDR)
670 		return 0;
671 
672 	/* Payload for packed_buf */
673 	ext_tx_clk.clksrc    = CLKSRC_PLL1;
674 	ext_tx_clk.autoblock = 1;   /* Autoblock clk while changing clksrc */
675 	ext_tx_clk.pd        = 0;   /* Power Down off => enabled */
676 	sja1105_cgu_mii_control_packing(packed_buf, &ext_tx_clk, PACK);
677 
678 	return sja1105_xfer_buf(priv, SPI_WRITE, regs->rmii_ext_tx_clk[port],
679 				packed_buf, SJA1105_SIZE_CGU_CMD);
680 }
681 
sja1105_cgu_rmii_pll_config(struct sja1105_private * priv)682 static int sja1105_cgu_rmii_pll_config(struct sja1105_private *priv)
683 {
684 	const struct sja1105_regs *regs = priv->info->regs;
685 	u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
686 	struct sja1105_cgu_pll_ctrl pll = {0};
687 	struct device *dev = priv->ds->dev;
688 	int rc;
689 
690 	if (regs->rmii_pll1 == SJA1105_RSV_ADDR)
691 		return 0;
692 
693 	/* PLL1 must be enabled and output 50 Mhz.
694 	 * This is done by writing first 0x0A010941 to
695 	 * the PLL_1_C register and then deasserting
696 	 * power down (PD) 0x0A010940.
697 	 */
698 
699 	/* Step 1: PLL1 setup for 50Mhz */
700 	pll.pllclksrc = 0xA;
701 	pll.msel      = 0x1;
702 	pll.autoblock = 0x1;
703 	pll.psel      = 0x1;
704 	pll.direct    = 0x0;
705 	pll.fbsel     = 0x1;
706 	pll.bypass    = 0x0;
707 	pll.pd        = 0x1;
708 
709 	sja1105_cgu_pll_control_packing(packed_buf, &pll, PACK);
710 	rc = sja1105_xfer_buf(priv, SPI_WRITE, regs->rmii_pll1, packed_buf,
711 			      SJA1105_SIZE_CGU_CMD);
712 	if (rc < 0) {
713 		dev_err(dev, "failed to configure PLL1 for 50MHz\n");
714 		return rc;
715 	}
716 
717 	/* Step 2: Enable PLL1 */
718 	pll.pd = 0x0;
719 
720 	sja1105_cgu_pll_control_packing(packed_buf, &pll, PACK);
721 	rc = sja1105_xfer_buf(priv, SPI_WRITE, regs->rmii_pll1, packed_buf,
722 			      SJA1105_SIZE_CGU_CMD);
723 	if (rc < 0) {
724 		dev_err(dev, "failed to enable PLL1\n");
725 		return rc;
726 	}
727 	return rc;
728 }
729 
sja1105_rmii_clocking_setup(struct sja1105_private * priv,int port,sja1105_mii_role_t role)730 static int sja1105_rmii_clocking_setup(struct sja1105_private *priv, int port,
731 				       sja1105_mii_role_t role)
732 {
733 	struct device *dev = priv->ds->dev;
734 	int rc;
735 
736 	dev_dbg(dev, "Configuring RMII-%s clocking\n",
737 		(role == XMII_MAC) ? "MAC" : "PHY");
738 	/* AH1601.pdf chapter 2.5.1. Sources */
739 	if (role == XMII_MAC) {
740 		/* Configure and enable PLL1 for 50Mhz output */
741 		rc = sja1105_cgu_rmii_pll_config(priv);
742 		if (rc < 0)
743 			return rc;
744 	}
745 	/* Disable IDIV for this port */
746 	rc = sja1105_cgu_idiv_config(priv, port, false, 1);
747 	if (rc < 0)
748 		return rc;
749 	/* Source to sink mappings */
750 	rc = sja1105_cgu_rmii_ref_clk_config(priv, port);
751 	if (rc < 0)
752 		return rc;
753 	if (role == XMII_MAC) {
754 		rc = sja1105_cgu_rmii_ext_tx_clk_config(priv, port);
755 		if (rc < 0)
756 			return rc;
757 	}
758 	return 0;
759 }
760 
sja1105_clocking_setup_port(struct sja1105_private * priv,int port)761 int sja1105_clocking_setup_port(struct sja1105_private *priv, int port)
762 {
763 	struct sja1105_xmii_params_entry *mii;
764 	struct device *dev = priv->ds->dev;
765 	sja1105_phy_interface_t phy_mode;
766 	sja1105_mii_role_t role;
767 	int rc;
768 
769 	mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
770 
771 	/* RGMII etc */
772 	phy_mode = mii->xmii_mode[port];
773 	/* MAC or PHY, for applicable types (not RGMII) */
774 	role = mii->phy_mac[port];
775 
776 	switch (phy_mode) {
777 	case XMII_MODE_MII:
778 		rc = sja1105_mii_clocking_setup(priv, port, role);
779 		break;
780 	case XMII_MODE_RMII:
781 		rc = sja1105_rmii_clocking_setup(priv, port, role);
782 		break;
783 	case XMII_MODE_RGMII:
784 		rc = sja1105_rgmii_clocking_setup(priv, port, role);
785 		break;
786 	case XMII_MODE_SGMII:
787 		/* Nothing to do in the CGU for SGMII */
788 		rc = 0;
789 		break;
790 	default:
791 		dev_err(dev, "Invalid interface mode specified: %d\n",
792 			phy_mode);
793 		return -EINVAL;
794 	}
795 	if (rc) {
796 		dev_err(dev, "Clocking setup for port %d failed: %d\n",
797 			port, rc);
798 		return rc;
799 	}
800 
801 	/* Internally pull down the RX_DV/CRS_DV/RX_CTL and RX_ER inputs */
802 	return sja1105_cfg_pad_rx_config(priv, port);
803 }
804 
sja1105_clocking_setup(struct sja1105_private * priv)805 int sja1105_clocking_setup(struct sja1105_private *priv)
806 {
807 	struct dsa_switch *ds = priv->ds;
808 	int port, rc;
809 
810 	for (port = 0; port < ds->num_ports; port++) {
811 		rc = sja1105_clocking_setup_port(priv, port);
812 		if (rc < 0)
813 			return rc;
814 	}
815 	return 0;
816 }
817 
818 static void
sja1110_cgu_outclk_packing(void * buf,struct sja1110_cgu_outclk * outclk,enum packing_op op)819 sja1110_cgu_outclk_packing(void *buf, struct sja1110_cgu_outclk *outclk,
820 			   enum packing_op op)
821 {
822 	const int size = 4;
823 
824 	sja1105_packing(buf, &outclk->clksrc,    27, 24, size, op);
825 	sja1105_packing(buf, &outclk->autoblock, 11, 11, size, op);
826 	sja1105_packing(buf, &outclk->pd,         0,  0, size, op);
827 }
828 
sja1110_disable_microcontroller(struct sja1105_private * priv)829 int sja1110_disable_microcontroller(struct sja1105_private *priv)
830 {
831 	u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
832 	struct sja1110_cgu_outclk outclk_6_c = {
833 		.clksrc = 0x3,
834 		.pd = true,
835 	};
836 	struct sja1110_cgu_outclk outclk_7_c = {
837 		.clksrc = 0x5,
838 		.pd = true,
839 	};
840 	int rc;
841 
842 	/* Power down the BASE_TIMER_CLK to disable the watchdog timer */
843 	sja1110_cgu_outclk_packing(packed_buf, &outclk_7_c, PACK);
844 
845 	rc = sja1105_xfer_buf(priv, SPI_WRITE, SJA1110_BASE_TIMER_CLK,
846 			      packed_buf, SJA1105_SIZE_CGU_CMD);
847 	if (rc)
848 		return rc;
849 
850 	/* Power down the BASE_MCSS_CLOCK to gate the microcontroller off */
851 	sja1110_cgu_outclk_packing(packed_buf, &outclk_6_c, PACK);
852 
853 	return sja1105_xfer_buf(priv, SPI_WRITE, SJA1110_BASE_MCSS_CLK,
854 				packed_buf, SJA1105_SIZE_CGU_CMD);
855 }
856