1 // SPDX-License-Identifier: BSD-3-Clause 2 /* Copyright (c) 2016-2018, NXP Semiconductors 3 * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com> 4 */ 5 #include <linux/packing.h> 6 #include "sja1105.h" 7 8 #define SJA1105_SIZE_CGU_CMD 4 9 #define SJA1110_BASE_TIMER_CLK SJA1110_CGU_ADDR(0x74) 10 11 /* Common structure for CFG_PAD_MIIx_RX and CFG_PAD_MIIx_TX */ 12 struct sja1105_cfg_pad_mii { 13 u64 d32_os; 14 u64 d32_ih; 15 u64 d32_ipud; 16 u64 d10_ih; 17 u64 d10_os; 18 u64 d10_ipud; 19 u64 ctrl_os; 20 u64 ctrl_ih; 21 u64 ctrl_ipud; 22 u64 clk_os; 23 u64 clk_ih; 24 u64 clk_ipud; 25 }; 26 27 struct sja1105_cfg_pad_mii_id { 28 u64 rxc_stable_ovr; 29 u64 rxc_delay; 30 u64 rxc_bypass; 31 u64 rxc_pd; 32 u64 txc_stable_ovr; 33 u64 txc_delay; 34 u64 txc_bypass; 35 u64 txc_pd; 36 }; 37 38 /* UM10944 Table 82. 39 * IDIV_0_C to IDIV_4_C control registers 40 * (addr. 10000Bh to 10000Fh) 41 */ 42 struct sja1105_cgu_idiv { 43 u64 clksrc; 44 u64 autoblock; 45 u64 idiv; 46 u64 pd; 47 }; 48 49 /* PLL_1_C control register 50 * 51 * SJA1105 E/T: UM10944 Table 81 (address 10000Ah) 52 * SJA1105 P/Q/R/S: UM11040 Table 116 (address 10000Ah) 53 */ 54 struct sja1105_cgu_pll_ctrl { 55 u64 pllclksrc; 56 u64 msel; 57 u64 autoblock; 58 u64 psel; 59 u64 direct; 60 u64 fbsel; 61 u64 bypass; 62 u64 pd; 63 }; 64 65 struct sja1110_cgu_outclk { 66 u64 clksrc; 67 u64 autoblock; 68 u64 pd; 69 }; 70 71 enum { 72 CLKSRC_MII0_TX_CLK = 0x00, 73 CLKSRC_MII0_RX_CLK = 0x01, 74 CLKSRC_MII1_TX_CLK = 0x02, 75 CLKSRC_MII1_RX_CLK = 0x03, 76 CLKSRC_MII2_TX_CLK = 0x04, 77 CLKSRC_MII2_RX_CLK = 0x05, 78 CLKSRC_MII3_TX_CLK = 0x06, 79 CLKSRC_MII3_RX_CLK = 0x07, 80 CLKSRC_MII4_TX_CLK = 0x08, 81 CLKSRC_MII4_RX_CLK = 0x09, 82 CLKSRC_PLL0 = 0x0B, 83 CLKSRC_PLL1 = 0x0E, 84 CLKSRC_IDIV0 = 0x11, 85 CLKSRC_IDIV1 = 0x12, 86 CLKSRC_IDIV2 = 0x13, 87 CLKSRC_IDIV3 = 0x14, 88 CLKSRC_IDIV4 = 0x15, 89 }; 90 91 /* UM10944 Table 83. 92 * MIIx clock control registers 1 to 30 93 * (addresses 100013h to 100035h) 94 */ 95 struct sja1105_cgu_mii_ctrl { 96 u64 clksrc; 97 u64 autoblock; 98 u64 pd; 99 }; 100 101 static void sja1105_cgu_idiv_packing(void *buf, struct sja1105_cgu_idiv *idiv, 102 enum packing_op op) 103 { 104 const int size = 4; 105 106 sja1105_packing(buf, &idiv->clksrc, 28, 24, size, op); 107 sja1105_packing(buf, &idiv->autoblock, 11, 11, size, op); 108 sja1105_packing(buf, &idiv->idiv, 5, 2, size, op); 109 sja1105_packing(buf, &idiv->pd, 0, 0, size, op); 110 } 111 112 static int sja1105_cgu_idiv_config(struct sja1105_private *priv, int port, 113 bool enabled, int factor) 114 { 115 const struct sja1105_regs *regs = priv->info->regs; 116 struct device *dev = priv->ds->dev; 117 struct sja1105_cgu_idiv idiv; 118 u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0}; 119 120 if (regs->cgu_idiv[port] == SJA1105_RSV_ADDR) 121 return 0; 122 123 if (enabled && factor != 1 && factor != 10) { 124 dev_err(dev, "idiv factor must be 1 or 10\n"); 125 return -ERANGE; 126 } 127 128 /* Payload for packed_buf */ 129 idiv.clksrc = 0x0A; /* 25MHz */ 130 idiv.autoblock = 1; /* Block clk automatically */ 131 idiv.idiv = factor - 1; /* Divide by 1 or 10 */ 132 idiv.pd = enabled ? 0 : 1; /* Power down? */ 133 sja1105_cgu_idiv_packing(packed_buf, &idiv, PACK); 134 135 return sja1105_xfer_buf(priv, SPI_WRITE, regs->cgu_idiv[port], 136 packed_buf, SJA1105_SIZE_CGU_CMD); 137 } 138 139 static void 140 sja1105_cgu_mii_control_packing(void *buf, struct sja1105_cgu_mii_ctrl *cmd, 141 enum packing_op op) 142 { 143 const int size = 4; 144 145 sja1105_packing(buf, &cmd->clksrc, 28, 24, size, op); 146 sja1105_packing(buf, &cmd->autoblock, 11, 11, size, op); 147 sja1105_packing(buf, &cmd->pd, 0, 0, size, op); 148 } 149 150 static int sja1105_cgu_mii_tx_clk_config(struct sja1105_private *priv, 151 int port, sja1105_mii_role_t role) 152 { 153 const struct sja1105_regs *regs = priv->info->regs; 154 struct sja1105_cgu_mii_ctrl mii_tx_clk; 155 const int mac_clk_sources[] = { 156 CLKSRC_MII0_TX_CLK, 157 CLKSRC_MII1_TX_CLK, 158 CLKSRC_MII2_TX_CLK, 159 CLKSRC_MII3_TX_CLK, 160 CLKSRC_MII4_TX_CLK, 161 }; 162 const int phy_clk_sources[] = { 163 CLKSRC_IDIV0, 164 CLKSRC_IDIV1, 165 CLKSRC_IDIV2, 166 CLKSRC_IDIV3, 167 CLKSRC_IDIV4, 168 }; 169 u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0}; 170 int clksrc; 171 172 if (regs->mii_tx_clk[port] == SJA1105_RSV_ADDR) 173 return 0; 174 175 if (role == XMII_MAC) 176 clksrc = mac_clk_sources[port]; 177 else 178 clksrc = phy_clk_sources[port]; 179 180 /* Payload for packed_buf */ 181 mii_tx_clk.clksrc = clksrc; 182 mii_tx_clk.autoblock = 1; /* Autoblock clk while changing clksrc */ 183 mii_tx_clk.pd = 0; /* Power Down off => enabled */ 184 sja1105_cgu_mii_control_packing(packed_buf, &mii_tx_clk, PACK); 185 186 return sja1105_xfer_buf(priv, SPI_WRITE, regs->mii_tx_clk[port], 187 packed_buf, SJA1105_SIZE_CGU_CMD); 188 } 189 190 static int 191 sja1105_cgu_mii_rx_clk_config(struct sja1105_private *priv, int port) 192 { 193 const struct sja1105_regs *regs = priv->info->regs; 194 struct sja1105_cgu_mii_ctrl mii_rx_clk; 195 u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0}; 196 const int clk_sources[] = { 197 CLKSRC_MII0_RX_CLK, 198 CLKSRC_MII1_RX_CLK, 199 CLKSRC_MII2_RX_CLK, 200 CLKSRC_MII3_RX_CLK, 201 CLKSRC_MII4_RX_CLK, 202 }; 203 204 if (regs->mii_rx_clk[port] == SJA1105_RSV_ADDR) 205 return 0; 206 207 /* Payload for packed_buf */ 208 mii_rx_clk.clksrc = clk_sources[port]; 209 mii_rx_clk.autoblock = 1; /* Autoblock clk while changing clksrc */ 210 mii_rx_clk.pd = 0; /* Power Down off => enabled */ 211 sja1105_cgu_mii_control_packing(packed_buf, &mii_rx_clk, PACK); 212 213 return sja1105_xfer_buf(priv, SPI_WRITE, regs->mii_rx_clk[port], 214 packed_buf, SJA1105_SIZE_CGU_CMD); 215 } 216 217 static int 218 sja1105_cgu_mii_ext_tx_clk_config(struct sja1105_private *priv, int port) 219 { 220 const struct sja1105_regs *regs = priv->info->regs; 221 struct sja1105_cgu_mii_ctrl mii_ext_tx_clk; 222 u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0}; 223 const int clk_sources[] = { 224 CLKSRC_IDIV0, 225 CLKSRC_IDIV1, 226 CLKSRC_IDIV2, 227 CLKSRC_IDIV3, 228 CLKSRC_IDIV4, 229 }; 230 231 if (regs->mii_ext_tx_clk[port] == SJA1105_RSV_ADDR) 232 return 0; 233 234 /* Payload for packed_buf */ 235 mii_ext_tx_clk.clksrc = clk_sources[port]; 236 mii_ext_tx_clk.autoblock = 1; /* Autoblock clk while changing clksrc */ 237 mii_ext_tx_clk.pd = 0; /* Power Down off => enabled */ 238 sja1105_cgu_mii_control_packing(packed_buf, &mii_ext_tx_clk, PACK); 239 240 return sja1105_xfer_buf(priv, SPI_WRITE, regs->mii_ext_tx_clk[port], 241 packed_buf, SJA1105_SIZE_CGU_CMD); 242 } 243 244 static int 245 sja1105_cgu_mii_ext_rx_clk_config(struct sja1105_private *priv, int port) 246 { 247 const struct sja1105_regs *regs = priv->info->regs; 248 struct sja1105_cgu_mii_ctrl mii_ext_rx_clk; 249 u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0}; 250 const int clk_sources[] = { 251 CLKSRC_IDIV0, 252 CLKSRC_IDIV1, 253 CLKSRC_IDIV2, 254 CLKSRC_IDIV3, 255 CLKSRC_IDIV4, 256 }; 257 258 if (regs->mii_ext_rx_clk[port] == SJA1105_RSV_ADDR) 259 return 0; 260 261 /* Payload for packed_buf */ 262 mii_ext_rx_clk.clksrc = clk_sources[port]; 263 mii_ext_rx_clk.autoblock = 1; /* Autoblock clk while changing clksrc */ 264 mii_ext_rx_clk.pd = 0; /* Power Down off => enabled */ 265 sja1105_cgu_mii_control_packing(packed_buf, &mii_ext_rx_clk, PACK); 266 267 return sja1105_xfer_buf(priv, SPI_WRITE, regs->mii_ext_rx_clk[port], 268 packed_buf, SJA1105_SIZE_CGU_CMD); 269 } 270 271 static int sja1105_mii_clocking_setup(struct sja1105_private *priv, int port, 272 sja1105_mii_role_t role) 273 { 274 struct device *dev = priv->ds->dev; 275 int rc; 276 277 dev_dbg(dev, "Configuring MII-%s clocking\n", 278 (role == XMII_MAC) ? "MAC" : "PHY"); 279 /* If role is MAC, disable IDIV 280 * If role is PHY, enable IDIV and configure for 1/1 divider 281 */ 282 rc = sja1105_cgu_idiv_config(priv, port, (role == XMII_PHY), 1); 283 if (rc < 0) 284 return rc; 285 286 /* Configure CLKSRC of MII_TX_CLK_n 287 * * If role is MAC, select TX_CLK_n 288 * * If role is PHY, select IDIV_n 289 */ 290 rc = sja1105_cgu_mii_tx_clk_config(priv, port, role); 291 if (rc < 0) 292 return rc; 293 294 /* Configure CLKSRC of MII_RX_CLK_n 295 * Select RX_CLK_n 296 */ 297 rc = sja1105_cgu_mii_rx_clk_config(priv, port); 298 if (rc < 0) 299 return rc; 300 301 if (role == XMII_PHY) { 302 /* Per MII spec, the PHY (which is us) drives the TX_CLK pin */ 303 304 /* Configure CLKSRC of EXT_TX_CLK_n 305 * Select IDIV_n 306 */ 307 rc = sja1105_cgu_mii_ext_tx_clk_config(priv, port); 308 if (rc < 0) 309 return rc; 310 311 /* Configure CLKSRC of EXT_RX_CLK_n 312 * Select IDIV_n 313 */ 314 rc = sja1105_cgu_mii_ext_rx_clk_config(priv, port); 315 if (rc < 0) 316 return rc; 317 } 318 return 0; 319 } 320 321 static void 322 sja1105_cgu_pll_control_packing(void *buf, struct sja1105_cgu_pll_ctrl *cmd, 323 enum packing_op op) 324 { 325 const int size = 4; 326 327 sja1105_packing(buf, &cmd->pllclksrc, 28, 24, size, op); 328 sja1105_packing(buf, &cmd->msel, 23, 16, size, op); 329 sja1105_packing(buf, &cmd->autoblock, 11, 11, size, op); 330 sja1105_packing(buf, &cmd->psel, 9, 8, size, op); 331 sja1105_packing(buf, &cmd->direct, 7, 7, size, op); 332 sja1105_packing(buf, &cmd->fbsel, 6, 6, size, op); 333 sja1105_packing(buf, &cmd->bypass, 1, 1, size, op); 334 sja1105_packing(buf, &cmd->pd, 0, 0, size, op); 335 } 336 337 static int sja1105_cgu_rgmii_tx_clk_config(struct sja1105_private *priv, 338 int port, u64 speed) 339 { 340 const struct sja1105_regs *regs = priv->info->regs; 341 struct sja1105_cgu_mii_ctrl txc; 342 u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0}; 343 int clksrc; 344 345 if (regs->rgmii_tx_clk[port] == SJA1105_RSV_ADDR) 346 return 0; 347 348 if (speed == priv->info->port_speed[SJA1105_SPEED_1000MBPS]) { 349 clksrc = CLKSRC_PLL0; 350 } else { 351 int clk_sources[] = {CLKSRC_IDIV0, CLKSRC_IDIV1, CLKSRC_IDIV2, 352 CLKSRC_IDIV3, CLKSRC_IDIV4}; 353 clksrc = clk_sources[port]; 354 } 355 356 /* RGMII: 125MHz for 1000, 25MHz for 100, 2.5MHz for 10 */ 357 txc.clksrc = clksrc; 358 /* Autoblock clk while changing clksrc */ 359 txc.autoblock = 1; 360 /* Power Down off => enabled */ 361 txc.pd = 0; 362 sja1105_cgu_mii_control_packing(packed_buf, &txc, PACK); 363 364 return sja1105_xfer_buf(priv, SPI_WRITE, regs->rgmii_tx_clk[port], 365 packed_buf, SJA1105_SIZE_CGU_CMD); 366 } 367 368 /* AGU */ 369 static void 370 sja1105_cfg_pad_mii_packing(void *buf, struct sja1105_cfg_pad_mii *cmd, 371 enum packing_op op) 372 { 373 const int size = 4; 374 375 sja1105_packing(buf, &cmd->d32_os, 28, 27, size, op); 376 sja1105_packing(buf, &cmd->d32_ih, 26, 26, size, op); 377 sja1105_packing(buf, &cmd->d32_ipud, 25, 24, size, op); 378 sja1105_packing(buf, &cmd->d10_os, 20, 19, size, op); 379 sja1105_packing(buf, &cmd->d10_ih, 18, 18, size, op); 380 sja1105_packing(buf, &cmd->d10_ipud, 17, 16, size, op); 381 sja1105_packing(buf, &cmd->ctrl_os, 12, 11, size, op); 382 sja1105_packing(buf, &cmd->ctrl_ih, 10, 10, size, op); 383 sja1105_packing(buf, &cmd->ctrl_ipud, 9, 8, size, op); 384 sja1105_packing(buf, &cmd->clk_os, 4, 3, size, op); 385 sja1105_packing(buf, &cmd->clk_ih, 2, 2, size, op); 386 sja1105_packing(buf, &cmd->clk_ipud, 1, 0, size, op); 387 } 388 389 static int sja1105_rgmii_cfg_pad_tx_config(struct sja1105_private *priv, 390 int port) 391 { 392 const struct sja1105_regs *regs = priv->info->regs; 393 struct sja1105_cfg_pad_mii pad_mii_tx = {0}; 394 u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0}; 395 396 if (regs->pad_mii_tx[port] == SJA1105_RSV_ADDR) 397 return 0; 398 399 /* Payload */ 400 pad_mii_tx.d32_os = 3; /* TXD[3:2] output stage: */ 401 /* high noise/high speed */ 402 pad_mii_tx.d10_os = 3; /* TXD[1:0] output stage: */ 403 /* high noise/high speed */ 404 pad_mii_tx.d32_ipud = 2; /* TXD[3:2] input stage: */ 405 /* plain input (default) */ 406 pad_mii_tx.d10_ipud = 2; /* TXD[1:0] input stage: */ 407 /* plain input (default) */ 408 pad_mii_tx.ctrl_os = 3; /* TX_CTL / TX_ER output stage */ 409 pad_mii_tx.ctrl_ipud = 2; /* TX_CTL / TX_ER input stage (default) */ 410 pad_mii_tx.clk_os = 3; /* TX_CLK output stage */ 411 pad_mii_tx.clk_ih = 0; /* TX_CLK input hysteresis (default) */ 412 pad_mii_tx.clk_ipud = 2; /* TX_CLK input stage (default) */ 413 sja1105_cfg_pad_mii_packing(packed_buf, &pad_mii_tx, PACK); 414 415 return sja1105_xfer_buf(priv, SPI_WRITE, regs->pad_mii_tx[port], 416 packed_buf, SJA1105_SIZE_CGU_CMD); 417 } 418 419 static int sja1105_cfg_pad_rx_config(struct sja1105_private *priv, int port) 420 { 421 const struct sja1105_regs *regs = priv->info->regs; 422 struct sja1105_cfg_pad_mii pad_mii_rx = {0}; 423 u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0}; 424 425 if (regs->pad_mii_rx[port] == SJA1105_RSV_ADDR) 426 return 0; 427 428 /* Payload */ 429 pad_mii_rx.d32_ih = 0; /* RXD[3:2] input stage hysteresis: */ 430 /* non-Schmitt (default) */ 431 pad_mii_rx.d32_ipud = 2; /* RXD[3:2] input weak pull-up/down */ 432 /* plain input (default) */ 433 pad_mii_rx.d10_ih = 0; /* RXD[1:0] input stage hysteresis: */ 434 /* non-Schmitt (default) */ 435 pad_mii_rx.d10_ipud = 2; /* RXD[1:0] input weak pull-up/down */ 436 /* plain input (default) */ 437 pad_mii_rx.ctrl_ih = 0; /* RX_DV/CRS_DV/RX_CTL and RX_ER */ 438 /* input stage hysteresis: */ 439 /* non-Schmitt (default) */ 440 pad_mii_rx.ctrl_ipud = 3; /* RX_DV/CRS_DV/RX_CTL and RX_ER */ 441 /* input stage weak pull-up/down: */ 442 /* pull-down */ 443 pad_mii_rx.clk_os = 2; /* RX_CLK/RXC output stage: */ 444 /* medium noise/fast speed (default) */ 445 pad_mii_rx.clk_ih = 0; /* RX_CLK/RXC input hysteresis: */ 446 /* non-Schmitt (default) */ 447 pad_mii_rx.clk_ipud = 2; /* RX_CLK/RXC input pull-up/down: */ 448 /* plain input (default) */ 449 sja1105_cfg_pad_mii_packing(packed_buf, &pad_mii_rx, PACK); 450 451 return sja1105_xfer_buf(priv, SPI_WRITE, regs->pad_mii_rx[port], 452 packed_buf, SJA1105_SIZE_CGU_CMD); 453 } 454 455 static void 456 sja1105_cfg_pad_mii_id_packing(void *buf, struct sja1105_cfg_pad_mii_id *cmd, 457 enum packing_op op) 458 { 459 const int size = SJA1105_SIZE_CGU_CMD; 460 461 sja1105_packing(buf, &cmd->rxc_stable_ovr, 15, 15, size, op); 462 sja1105_packing(buf, &cmd->rxc_delay, 14, 10, size, op); 463 sja1105_packing(buf, &cmd->rxc_bypass, 9, 9, size, op); 464 sja1105_packing(buf, &cmd->rxc_pd, 8, 8, size, op); 465 sja1105_packing(buf, &cmd->txc_stable_ovr, 7, 7, size, op); 466 sja1105_packing(buf, &cmd->txc_delay, 6, 2, size, op); 467 sja1105_packing(buf, &cmd->txc_bypass, 1, 1, size, op); 468 sja1105_packing(buf, &cmd->txc_pd, 0, 0, size, op); 469 } 470 471 static void 472 sja1110_cfg_pad_mii_id_packing(void *buf, struct sja1105_cfg_pad_mii_id *cmd, 473 enum packing_op op) 474 { 475 const int size = SJA1105_SIZE_CGU_CMD; 476 u64 range = 4; 477 478 /* Fields RXC_RANGE and TXC_RANGE select the input frequency range: 479 * 0 = 2.5MHz 480 * 1 = 25MHz 481 * 2 = 50MHz 482 * 3 = 125MHz 483 * 4 = Automatically determined by port speed. 484 * There's no point in defining a structure different than the one for 485 * SJA1105, so just hardcode the frequency range to automatic, just as 486 * before. 487 */ 488 sja1105_packing(buf, &cmd->rxc_stable_ovr, 26, 26, size, op); 489 sja1105_packing(buf, &cmd->rxc_delay, 25, 21, size, op); 490 sja1105_packing(buf, &range, 20, 18, size, op); 491 sja1105_packing(buf, &cmd->rxc_bypass, 17, 17, size, op); 492 sja1105_packing(buf, &cmd->rxc_pd, 16, 16, size, op); 493 sja1105_packing(buf, &cmd->txc_stable_ovr, 10, 10, size, op); 494 sja1105_packing(buf, &cmd->txc_delay, 9, 5, size, op); 495 sja1105_packing(buf, &range, 4, 2, size, op); 496 sja1105_packing(buf, &cmd->txc_bypass, 1, 1, size, op); 497 sja1105_packing(buf, &cmd->txc_pd, 0, 0, size, op); 498 } 499 500 /* Valid range in degrees is an integer between 73.8 and 101.7 */ 501 static u64 sja1105_rgmii_delay(u64 phase) 502 { 503 /* UM11040.pdf: The delay in degree phase is 73.8 + delay_tune * 0.9. 504 * To avoid floating point operations we'll multiply by 10 505 * and get 1 decimal point precision. 506 */ 507 phase *= 10; 508 return (phase - 738) / 9; 509 } 510 511 /* The RGMII delay setup procedure is 2-step and gets called upon each 512 * .phylink_mac_config. Both are strategic. 513 * The reason is that the RX Tunable Delay Line of the SJA1105 MAC has issues 514 * with recovering from a frequency change of the link partner's RGMII clock. 515 * The easiest way to recover from this is to temporarily power down the TDL, 516 * as it will re-lock at the new frequency afterwards. 517 */ 518 int sja1105pqrs_setup_rgmii_delay(const void *ctx, int port) 519 { 520 const struct sja1105_private *priv = ctx; 521 const struct sja1105_regs *regs = priv->info->regs; 522 struct sja1105_cfg_pad_mii_id pad_mii_id = {0}; 523 u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0}; 524 int rc; 525 526 if (priv->rgmii_rx_delay[port]) 527 pad_mii_id.rxc_delay = sja1105_rgmii_delay(90); 528 if (priv->rgmii_tx_delay[port]) 529 pad_mii_id.txc_delay = sja1105_rgmii_delay(90); 530 531 /* Stage 1: Turn the RGMII delay lines off. */ 532 pad_mii_id.rxc_bypass = 1; 533 pad_mii_id.rxc_pd = 1; 534 pad_mii_id.txc_bypass = 1; 535 pad_mii_id.txc_pd = 1; 536 sja1105_cfg_pad_mii_id_packing(packed_buf, &pad_mii_id, PACK); 537 538 rc = sja1105_xfer_buf(priv, SPI_WRITE, regs->pad_mii_id[port], 539 packed_buf, SJA1105_SIZE_CGU_CMD); 540 if (rc < 0) 541 return rc; 542 543 /* Stage 2: Turn the RGMII delay lines on. */ 544 if (priv->rgmii_rx_delay[port]) { 545 pad_mii_id.rxc_bypass = 0; 546 pad_mii_id.rxc_pd = 0; 547 } 548 if (priv->rgmii_tx_delay[port]) { 549 pad_mii_id.txc_bypass = 0; 550 pad_mii_id.txc_pd = 0; 551 } 552 sja1105_cfg_pad_mii_id_packing(packed_buf, &pad_mii_id, PACK); 553 554 return sja1105_xfer_buf(priv, SPI_WRITE, regs->pad_mii_id[port], 555 packed_buf, SJA1105_SIZE_CGU_CMD); 556 } 557 558 int sja1110_setup_rgmii_delay(const void *ctx, int port) 559 { 560 const struct sja1105_private *priv = ctx; 561 const struct sja1105_regs *regs = priv->info->regs; 562 struct sja1105_cfg_pad_mii_id pad_mii_id = {0}; 563 u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0}; 564 565 pad_mii_id.rxc_pd = 1; 566 pad_mii_id.txc_pd = 1; 567 568 if (priv->rgmii_rx_delay[port]) { 569 pad_mii_id.rxc_delay = sja1105_rgmii_delay(90); 570 /* The "BYPASS" bit in SJA1110 is actually a "don't bypass" */ 571 pad_mii_id.rxc_bypass = 1; 572 pad_mii_id.rxc_pd = 0; 573 } 574 575 if (priv->rgmii_tx_delay[port]) { 576 pad_mii_id.txc_delay = sja1105_rgmii_delay(90); 577 pad_mii_id.txc_bypass = 1; 578 pad_mii_id.txc_pd = 0; 579 } 580 581 sja1110_cfg_pad_mii_id_packing(packed_buf, &pad_mii_id, PACK); 582 583 return sja1105_xfer_buf(priv, SPI_WRITE, regs->pad_mii_id[port], 584 packed_buf, SJA1105_SIZE_CGU_CMD); 585 } 586 587 static int sja1105_rgmii_clocking_setup(struct sja1105_private *priv, int port, 588 sja1105_mii_role_t role) 589 { 590 struct device *dev = priv->ds->dev; 591 struct sja1105_mac_config_entry *mac; 592 u64 speed; 593 int rc; 594 595 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; 596 speed = mac[port].speed; 597 598 dev_dbg(dev, "Configuring port %d RGMII at speed %lldMbps\n", 599 port, speed); 600 601 if (speed == priv->info->port_speed[SJA1105_SPEED_1000MBPS]) { 602 /* 1000Mbps, IDIV disabled (125 MHz) */ 603 rc = sja1105_cgu_idiv_config(priv, port, false, 1); 604 } else if (speed == priv->info->port_speed[SJA1105_SPEED_100MBPS]) { 605 /* 100Mbps, IDIV enabled, divide by 1 (25 MHz) */ 606 rc = sja1105_cgu_idiv_config(priv, port, true, 1); 607 } else if (speed == priv->info->port_speed[SJA1105_SPEED_10MBPS]) { 608 /* 10Mbps, IDIV enabled, divide by 10 (2.5 MHz) */ 609 rc = sja1105_cgu_idiv_config(priv, port, true, 10); 610 } else if (speed == priv->info->port_speed[SJA1105_SPEED_AUTO]) { 611 /* Skip CGU configuration if there is no speed available 612 * (e.g. link is not established yet) 613 */ 614 dev_dbg(dev, "Speed not available, skipping CGU config\n"); 615 return 0; 616 } else { 617 rc = -EINVAL; 618 } 619 620 if (rc < 0) { 621 dev_err(dev, "Failed to configure idiv\n"); 622 return rc; 623 } 624 rc = sja1105_cgu_rgmii_tx_clk_config(priv, port, speed); 625 if (rc < 0) { 626 dev_err(dev, "Failed to configure RGMII Tx clock\n"); 627 return rc; 628 } 629 rc = sja1105_rgmii_cfg_pad_tx_config(priv, port); 630 if (rc < 0) { 631 dev_err(dev, "Failed to configure Tx pad registers\n"); 632 return rc; 633 } 634 635 if (!priv->info->setup_rgmii_delay) 636 return 0; 637 638 return priv->info->setup_rgmii_delay(priv, port); 639 } 640 641 static int sja1105_cgu_rmii_ref_clk_config(struct sja1105_private *priv, 642 int port) 643 { 644 const struct sja1105_regs *regs = priv->info->regs; 645 struct sja1105_cgu_mii_ctrl ref_clk; 646 u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0}; 647 const int clk_sources[] = { 648 CLKSRC_MII0_TX_CLK, 649 CLKSRC_MII1_TX_CLK, 650 CLKSRC_MII2_TX_CLK, 651 CLKSRC_MII3_TX_CLK, 652 CLKSRC_MII4_TX_CLK, 653 }; 654 655 if (regs->rmii_ref_clk[port] == SJA1105_RSV_ADDR) 656 return 0; 657 658 /* Payload for packed_buf */ 659 ref_clk.clksrc = clk_sources[port]; 660 ref_clk.autoblock = 1; /* Autoblock clk while changing clksrc */ 661 ref_clk.pd = 0; /* Power Down off => enabled */ 662 sja1105_cgu_mii_control_packing(packed_buf, &ref_clk, PACK); 663 664 return sja1105_xfer_buf(priv, SPI_WRITE, regs->rmii_ref_clk[port], 665 packed_buf, SJA1105_SIZE_CGU_CMD); 666 } 667 668 static int 669 sja1105_cgu_rmii_ext_tx_clk_config(struct sja1105_private *priv, int port) 670 { 671 const struct sja1105_regs *regs = priv->info->regs; 672 struct sja1105_cgu_mii_ctrl ext_tx_clk; 673 u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0}; 674 675 if (regs->rmii_ext_tx_clk[port] == SJA1105_RSV_ADDR) 676 return 0; 677 678 /* Payload for packed_buf */ 679 ext_tx_clk.clksrc = CLKSRC_PLL1; 680 ext_tx_clk.autoblock = 1; /* Autoblock clk while changing clksrc */ 681 ext_tx_clk.pd = 0; /* Power Down off => enabled */ 682 sja1105_cgu_mii_control_packing(packed_buf, &ext_tx_clk, PACK); 683 684 return sja1105_xfer_buf(priv, SPI_WRITE, regs->rmii_ext_tx_clk[port], 685 packed_buf, SJA1105_SIZE_CGU_CMD); 686 } 687 688 static int sja1105_cgu_rmii_pll_config(struct sja1105_private *priv) 689 { 690 const struct sja1105_regs *regs = priv->info->regs; 691 u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0}; 692 struct sja1105_cgu_pll_ctrl pll = {0}; 693 struct device *dev = priv->ds->dev; 694 int rc; 695 696 if (regs->rmii_pll1 == SJA1105_RSV_ADDR) 697 return 0; 698 699 /* PLL1 must be enabled and output 50 Mhz. 700 * This is done by writing first 0x0A010941 to 701 * the PLL_1_C register and then deasserting 702 * power down (PD) 0x0A010940. 703 */ 704 705 /* Step 1: PLL1 setup for 50Mhz */ 706 pll.pllclksrc = 0xA; 707 pll.msel = 0x1; 708 pll.autoblock = 0x1; 709 pll.psel = 0x1; 710 pll.direct = 0x0; 711 pll.fbsel = 0x1; 712 pll.bypass = 0x0; 713 pll.pd = 0x1; 714 715 sja1105_cgu_pll_control_packing(packed_buf, &pll, PACK); 716 rc = sja1105_xfer_buf(priv, SPI_WRITE, regs->rmii_pll1, packed_buf, 717 SJA1105_SIZE_CGU_CMD); 718 if (rc < 0) { 719 dev_err(dev, "failed to configure PLL1 for 50MHz\n"); 720 return rc; 721 } 722 723 /* Step 2: Enable PLL1 */ 724 pll.pd = 0x0; 725 726 sja1105_cgu_pll_control_packing(packed_buf, &pll, PACK); 727 rc = sja1105_xfer_buf(priv, SPI_WRITE, regs->rmii_pll1, packed_buf, 728 SJA1105_SIZE_CGU_CMD); 729 if (rc < 0) { 730 dev_err(dev, "failed to enable PLL1\n"); 731 return rc; 732 } 733 return rc; 734 } 735 736 static int sja1105_rmii_clocking_setup(struct sja1105_private *priv, int port, 737 sja1105_mii_role_t role) 738 { 739 struct device *dev = priv->ds->dev; 740 int rc; 741 742 dev_dbg(dev, "Configuring RMII-%s clocking\n", 743 (role == XMII_MAC) ? "MAC" : "PHY"); 744 /* AH1601.pdf chapter 2.5.1. Sources */ 745 if (role == XMII_MAC) { 746 /* Configure and enable PLL1 for 50Mhz output */ 747 rc = sja1105_cgu_rmii_pll_config(priv); 748 if (rc < 0) 749 return rc; 750 } 751 /* Disable IDIV for this port */ 752 rc = sja1105_cgu_idiv_config(priv, port, false, 1); 753 if (rc < 0) 754 return rc; 755 /* Source to sink mappings */ 756 rc = sja1105_cgu_rmii_ref_clk_config(priv, port); 757 if (rc < 0) 758 return rc; 759 if (role == XMII_MAC) { 760 rc = sja1105_cgu_rmii_ext_tx_clk_config(priv, port); 761 if (rc < 0) 762 return rc; 763 } 764 return 0; 765 } 766 767 int sja1105_clocking_setup_port(struct sja1105_private *priv, int port) 768 { 769 struct sja1105_xmii_params_entry *mii; 770 struct device *dev = priv->ds->dev; 771 sja1105_phy_interface_t phy_mode; 772 sja1105_mii_role_t role; 773 int rc; 774 775 mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries; 776 777 /* RGMII etc */ 778 phy_mode = mii->xmii_mode[port]; 779 /* MAC or PHY, for applicable types (not RGMII) */ 780 role = mii->phy_mac[port]; 781 782 switch (phy_mode) { 783 case XMII_MODE_MII: 784 rc = sja1105_mii_clocking_setup(priv, port, role); 785 break; 786 case XMII_MODE_RMII: 787 rc = sja1105_rmii_clocking_setup(priv, port, role); 788 break; 789 case XMII_MODE_RGMII: 790 rc = sja1105_rgmii_clocking_setup(priv, port, role); 791 break; 792 case XMII_MODE_SGMII: 793 /* Nothing to do in the CGU for SGMII */ 794 rc = 0; 795 break; 796 default: 797 dev_err(dev, "Invalid interface mode specified: %d\n", 798 phy_mode); 799 return -EINVAL; 800 } 801 if (rc) { 802 dev_err(dev, "Clocking setup for port %d failed: %d\n", 803 port, rc); 804 return rc; 805 } 806 807 /* Internally pull down the RX_DV/CRS_DV/RX_CTL and RX_ER inputs */ 808 return sja1105_cfg_pad_rx_config(priv, port); 809 } 810 811 int sja1105_clocking_setup(struct sja1105_private *priv) 812 { 813 struct dsa_switch *ds = priv->ds; 814 int port, rc; 815 816 for (port = 0; port < ds->num_ports; port++) { 817 rc = sja1105_clocking_setup_port(priv, port); 818 if (rc < 0) 819 return rc; 820 } 821 return 0; 822 } 823 824 static void 825 sja1110_cgu_outclk_packing(void *buf, struct sja1110_cgu_outclk *outclk, 826 enum packing_op op) 827 { 828 const int size = 4; 829 830 sja1105_packing(buf, &outclk->clksrc, 27, 24, size, op); 831 sja1105_packing(buf, &outclk->autoblock, 11, 11, size, op); 832 sja1105_packing(buf, &outclk->pd, 0, 0, size, op); 833 } 834 835 /* Power down the BASE_TIMER_CLK in order to disable the watchdog */ 836 int sja1110_clocking_setup(struct sja1105_private *priv) 837 { 838 u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0}; 839 struct sja1110_cgu_outclk outclk_7_c = { 840 .clksrc = 0x5, 841 .pd = true, 842 }; 843 844 sja1110_cgu_outclk_packing(packed_buf, &outclk_7_c, PACK); 845 846 return sja1105_xfer_buf(priv, SPI_WRITE, SJA1110_BASE_TIMER_CLK, 847 packed_buf, SJA1105_SIZE_CGU_CMD); 848 } 849