1 /* Applied Micro X-Gene SoC Ethernet Driver 2 * 3 * Copyright (c) 2014, Applied Micro Circuits Corporation 4 * Authors: Iyappan Subramanian <isubramanian@apm.com> 5 * Ravi Patel <rapatel@apm.com> 6 * Keyur Chudgar <kchudgar@apm.com> 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License as published by the 10 * Free Software Foundation; either version 2 of the License, or (at your 11 * option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program. If not, see <http://www.gnu.org/licenses/>. 20 */ 21 22 #include "xgene_enet_main.h" 23 #include "xgene_enet_hw.h" 24 25 static void xgene_enet_ring_init(struct xgene_enet_desc_ring *ring) 26 { 27 u32 *ring_cfg = ring->state; 28 u64 addr = ring->dma; 29 enum xgene_enet_ring_cfgsize cfgsize = ring->cfgsize; 30 31 ring_cfg[4] |= (1 << SELTHRSH_POS) & 32 CREATE_MASK(SELTHRSH_POS, SELTHRSH_LEN); 33 ring_cfg[3] |= ACCEPTLERR; 34 ring_cfg[2] |= QCOHERENT; 35 36 addr >>= 8; 37 ring_cfg[2] |= (addr << RINGADDRL_POS) & 38 CREATE_MASK_ULL(RINGADDRL_POS, RINGADDRL_LEN); 39 addr >>= RINGADDRL_LEN; 40 ring_cfg[3] |= addr & CREATE_MASK_ULL(RINGADDRH_POS, RINGADDRH_LEN); 41 ring_cfg[3] |= ((u32)cfgsize << RINGSIZE_POS) & 42 CREATE_MASK(RINGSIZE_POS, RINGSIZE_LEN); 43 } 44 45 static void xgene_enet_ring_set_type(struct xgene_enet_desc_ring *ring) 46 { 47 u32 *ring_cfg = ring->state; 48 bool is_bufpool; 49 u32 val; 50 51 is_bufpool = xgene_enet_is_bufpool(ring->id); 52 val = (is_bufpool) ? RING_BUFPOOL : RING_REGULAR; 53 ring_cfg[4] |= (val << RINGTYPE_POS) & 54 CREATE_MASK(RINGTYPE_POS, RINGTYPE_LEN); 55 56 if (is_bufpool) { 57 ring_cfg[3] |= (BUFPOOL_MODE << RINGMODE_POS) & 58 CREATE_MASK(RINGMODE_POS, RINGMODE_LEN); 59 } 60 } 61 62 static void xgene_enet_ring_set_recombbuf(struct xgene_enet_desc_ring *ring) 63 { 64 u32 *ring_cfg = ring->state; 65 66 ring_cfg[3] |= RECOMBBUF; 67 ring_cfg[3] |= (0xf << RECOMTIMEOUTL_POS) & 68 CREATE_MASK(RECOMTIMEOUTL_POS, RECOMTIMEOUTL_LEN); 69 ring_cfg[4] |= 0x7 & CREATE_MASK(RECOMTIMEOUTH_POS, RECOMTIMEOUTH_LEN); 70 } 71 72 static void xgene_enet_ring_wr32(struct xgene_enet_desc_ring *ring, 73 u32 offset, u32 data) 74 { 75 struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev); 76 77 iowrite32(data, pdata->ring_csr_addr + offset); 78 } 79 80 static void xgene_enet_ring_rd32(struct xgene_enet_desc_ring *ring, 81 u32 offset, u32 *data) 82 { 83 struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev); 84 85 *data = ioread32(pdata->ring_csr_addr + offset); 86 } 87 88 static void xgene_enet_write_ring_state(struct xgene_enet_desc_ring *ring) 89 { 90 struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev); 91 int i; 92 93 xgene_enet_ring_wr32(ring, CSR_RING_CONFIG, ring->num); 94 for (i = 0; i < pdata->ring_ops->num_ring_config; i++) { 95 xgene_enet_ring_wr32(ring, CSR_RING_WR_BASE + (i * 4), 96 ring->state[i]); 97 } 98 } 99 100 static void xgene_enet_clr_ring_state(struct xgene_enet_desc_ring *ring) 101 { 102 memset(ring->state, 0, sizeof(ring->state)); 103 xgene_enet_write_ring_state(ring); 104 } 105 106 static void xgene_enet_set_ring_state(struct xgene_enet_desc_ring *ring) 107 { 108 xgene_enet_ring_set_type(ring); 109 110 if (xgene_enet_ring_owner(ring->id) == RING_OWNER_ETH0 || 111 xgene_enet_ring_owner(ring->id) == RING_OWNER_ETH1) 112 xgene_enet_ring_set_recombbuf(ring); 113 114 xgene_enet_ring_init(ring); 115 xgene_enet_write_ring_state(ring); 116 } 117 118 static void xgene_enet_set_ring_id(struct xgene_enet_desc_ring *ring) 119 { 120 u32 ring_id_val, ring_id_buf; 121 bool is_bufpool; 122 123 is_bufpool = xgene_enet_is_bufpool(ring->id); 124 125 ring_id_val = ring->id & GENMASK(9, 0); 126 ring_id_val |= OVERWRITE; 127 128 ring_id_buf = (ring->num << 9) & GENMASK(18, 9); 129 ring_id_buf |= PREFETCH_BUF_EN; 130 if (is_bufpool) 131 ring_id_buf |= IS_BUFFER_POOL; 132 133 xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id_val); 134 xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, ring_id_buf); 135 } 136 137 static void xgene_enet_clr_desc_ring_id(struct xgene_enet_desc_ring *ring) 138 { 139 u32 ring_id; 140 141 ring_id = ring->id | OVERWRITE; 142 xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id); 143 xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, 0); 144 } 145 146 static struct xgene_enet_desc_ring *xgene_enet_setup_ring( 147 struct xgene_enet_desc_ring *ring) 148 { 149 u32 size = ring->size; 150 u32 i, data; 151 bool is_bufpool; 152 153 xgene_enet_clr_ring_state(ring); 154 xgene_enet_set_ring_state(ring); 155 xgene_enet_set_ring_id(ring); 156 157 ring->slots = xgene_enet_get_numslots(ring->id, size); 158 159 is_bufpool = xgene_enet_is_bufpool(ring->id); 160 if (is_bufpool || xgene_enet_ring_owner(ring->id) != RING_OWNER_CPU) 161 return ring; 162 163 for (i = 0; i < ring->slots; i++) 164 xgene_enet_mark_desc_slot_empty(&ring->raw_desc[i]); 165 166 xgene_enet_ring_rd32(ring, CSR_RING_NE_INT_MODE, &data); 167 data |= BIT(31 - xgene_enet_ring_bufnum(ring->id)); 168 xgene_enet_ring_wr32(ring, CSR_RING_NE_INT_MODE, data); 169 170 return ring; 171 } 172 173 static void xgene_enet_clear_ring(struct xgene_enet_desc_ring *ring) 174 { 175 u32 data; 176 bool is_bufpool; 177 178 is_bufpool = xgene_enet_is_bufpool(ring->id); 179 if (is_bufpool || xgene_enet_ring_owner(ring->id) != RING_OWNER_CPU) 180 goto out; 181 182 xgene_enet_ring_rd32(ring, CSR_RING_NE_INT_MODE, &data); 183 data &= ~BIT(31 - xgene_enet_ring_bufnum(ring->id)); 184 xgene_enet_ring_wr32(ring, CSR_RING_NE_INT_MODE, data); 185 186 out: 187 xgene_enet_clr_desc_ring_id(ring); 188 xgene_enet_clr_ring_state(ring); 189 } 190 191 static void xgene_enet_wr_cmd(struct xgene_enet_desc_ring *ring, int count) 192 { 193 iowrite32(count, ring->cmd); 194 } 195 196 static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring) 197 { 198 u32 __iomem *cmd_base = ring->cmd_base; 199 u32 ring_state, num_msgs; 200 201 ring_state = ioread32(&cmd_base[1]); 202 num_msgs = GET_VAL(NUMMSGSINQ, ring_state); 203 204 return num_msgs; 205 } 206 207 void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring, 208 struct xgene_enet_pdata *pdata, 209 enum xgene_enet_err_code status) 210 { 211 struct rtnl_link_stats64 *stats = &pdata->stats; 212 213 switch (status) { 214 case INGRESS_CRC: 215 stats->rx_crc_errors++; 216 break; 217 case INGRESS_CHECKSUM: 218 case INGRESS_CHECKSUM_COMPUTE: 219 stats->rx_errors++; 220 break; 221 case INGRESS_TRUNC_FRAME: 222 stats->rx_frame_errors++; 223 break; 224 case INGRESS_PKT_LEN: 225 stats->rx_length_errors++; 226 break; 227 case INGRESS_PKT_UNDER: 228 stats->rx_frame_errors++; 229 break; 230 case INGRESS_FIFO_OVERRUN: 231 stats->rx_fifo_errors++; 232 break; 233 default: 234 break; 235 } 236 } 237 238 static void xgene_enet_wr_csr(struct xgene_enet_pdata *pdata, 239 u32 offset, u32 val) 240 { 241 void __iomem *addr = pdata->eth_csr_addr + offset; 242 243 iowrite32(val, addr); 244 } 245 246 static void xgene_enet_wr_ring_if(struct xgene_enet_pdata *pdata, 247 u32 offset, u32 val) 248 { 249 void __iomem *addr = pdata->eth_ring_if_addr + offset; 250 251 iowrite32(val, addr); 252 } 253 254 static void xgene_enet_wr_diag_csr(struct xgene_enet_pdata *pdata, 255 u32 offset, u32 val) 256 { 257 void __iomem *addr = pdata->eth_diag_csr_addr + offset; 258 259 iowrite32(val, addr); 260 } 261 262 static void xgene_enet_wr_mcx_csr(struct xgene_enet_pdata *pdata, 263 u32 offset, u32 val) 264 { 265 void __iomem *addr = pdata->mcx_mac_csr_addr + offset; 266 267 iowrite32(val, addr); 268 } 269 270 static bool xgene_enet_wr_indirect(void __iomem *addr, void __iomem *wr, 271 void __iomem *cmd, void __iomem *cmd_done, 272 u32 wr_addr, u32 wr_data) 273 { 274 u32 done; 275 u8 wait = 10; 276 277 iowrite32(wr_addr, addr); 278 iowrite32(wr_data, wr); 279 iowrite32(XGENE_ENET_WR_CMD, cmd); 280 281 /* wait for write command to complete */ 282 while (!(done = ioread32(cmd_done)) && wait--) 283 udelay(1); 284 285 if (!done) 286 return false; 287 288 iowrite32(0, cmd); 289 290 return true; 291 } 292 293 static void xgene_enet_wr_mcx_mac(struct xgene_enet_pdata *pdata, 294 u32 wr_addr, u32 wr_data) 295 { 296 void __iomem *addr, *wr, *cmd, *cmd_done; 297 298 addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET; 299 wr = pdata->mcx_mac_addr + MAC_WRITE_REG_OFFSET; 300 cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET; 301 cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET; 302 303 if (!xgene_enet_wr_indirect(addr, wr, cmd, cmd_done, wr_addr, wr_data)) 304 netdev_err(pdata->ndev, "MCX mac write failed, addr: %04x\n", 305 wr_addr); 306 } 307 308 static void xgene_enet_rd_csr(struct xgene_enet_pdata *pdata, 309 u32 offset, u32 *val) 310 { 311 void __iomem *addr = pdata->eth_csr_addr + offset; 312 313 *val = ioread32(addr); 314 } 315 316 static void xgene_enet_rd_diag_csr(struct xgene_enet_pdata *pdata, 317 u32 offset, u32 *val) 318 { 319 void __iomem *addr = pdata->eth_diag_csr_addr + offset; 320 321 *val = ioread32(addr); 322 } 323 324 static void xgene_enet_rd_mcx_csr(struct xgene_enet_pdata *pdata, 325 u32 offset, u32 *val) 326 { 327 void __iomem *addr = pdata->mcx_mac_csr_addr + offset; 328 329 *val = ioread32(addr); 330 } 331 332 static bool xgene_enet_rd_indirect(void __iomem *addr, void __iomem *rd, 333 void __iomem *cmd, void __iomem *cmd_done, 334 u32 rd_addr, u32 *rd_data) 335 { 336 u32 done; 337 u8 wait = 10; 338 339 iowrite32(rd_addr, addr); 340 iowrite32(XGENE_ENET_RD_CMD, cmd); 341 342 /* wait for read command to complete */ 343 while (!(done = ioread32(cmd_done)) && wait--) 344 udelay(1); 345 346 if (!done) 347 return false; 348 349 *rd_data = ioread32(rd); 350 iowrite32(0, cmd); 351 352 return true; 353 } 354 355 static void xgene_enet_rd_mcx_mac(struct xgene_enet_pdata *pdata, 356 u32 rd_addr, u32 *rd_data) 357 { 358 void __iomem *addr, *rd, *cmd, *cmd_done; 359 360 addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET; 361 rd = pdata->mcx_mac_addr + MAC_READ_REG_OFFSET; 362 cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET; 363 cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET; 364 365 if (!xgene_enet_rd_indirect(addr, rd, cmd, cmd_done, rd_addr, rd_data)) 366 netdev_err(pdata->ndev, "MCX mac read failed, addr: %04x\n", 367 rd_addr); 368 } 369 370 static int xgene_mii_phy_write(struct xgene_enet_pdata *pdata, int phy_id, 371 u32 reg, u16 data) 372 { 373 u32 addr = 0, wr_data = 0; 374 u32 done; 375 u8 wait = 10; 376 377 PHY_ADDR_SET(&addr, phy_id); 378 REG_ADDR_SET(&addr, reg); 379 xgene_enet_wr_mcx_mac(pdata, MII_MGMT_ADDRESS_ADDR, addr); 380 381 PHY_CONTROL_SET(&wr_data, data); 382 xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONTROL_ADDR, wr_data); 383 do { 384 usleep_range(5, 10); 385 xgene_enet_rd_mcx_mac(pdata, MII_MGMT_INDICATORS_ADDR, &done); 386 } while ((done & BUSY_MASK) && wait--); 387 388 if (done & BUSY_MASK) { 389 netdev_err(pdata->ndev, "MII_MGMT write failed\n"); 390 return -EBUSY; 391 } 392 393 return 0; 394 } 395 396 static int xgene_mii_phy_read(struct xgene_enet_pdata *pdata, 397 u8 phy_id, u32 reg) 398 { 399 u32 addr = 0; 400 u32 data, done; 401 u8 wait = 10; 402 403 PHY_ADDR_SET(&addr, phy_id); 404 REG_ADDR_SET(&addr, reg); 405 xgene_enet_wr_mcx_mac(pdata, MII_MGMT_ADDRESS_ADDR, addr); 406 xgene_enet_wr_mcx_mac(pdata, MII_MGMT_COMMAND_ADDR, READ_CYCLE_MASK); 407 do { 408 usleep_range(5, 10); 409 xgene_enet_rd_mcx_mac(pdata, MII_MGMT_INDICATORS_ADDR, &done); 410 } while ((done & BUSY_MASK) && wait--); 411 412 if (done & BUSY_MASK) { 413 netdev_err(pdata->ndev, "MII_MGMT read failed\n"); 414 return -EBUSY; 415 } 416 417 xgene_enet_rd_mcx_mac(pdata, MII_MGMT_STATUS_ADDR, &data); 418 xgene_enet_wr_mcx_mac(pdata, MII_MGMT_COMMAND_ADDR, 0); 419 420 return data; 421 } 422 423 static void xgene_gmac_set_mac_addr(struct xgene_enet_pdata *pdata) 424 { 425 u32 addr0, addr1; 426 u8 *dev_addr = pdata->ndev->dev_addr; 427 428 addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) | 429 (dev_addr[1] << 8) | dev_addr[0]; 430 addr1 = (dev_addr[5] << 24) | (dev_addr[4] << 16); 431 432 xgene_enet_wr_mcx_mac(pdata, STATION_ADDR0_ADDR, addr0); 433 xgene_enet_wr_mcx_mac(pdata, STATION_ADDR1_ADDR, addr1); 434 } 435 436 static int xgene_enet_ecc_init(struct xgene_enet_pdata *pdata) 437 { 438 struct net_device *ndev = pdata->ndev; 439 u32 data; 440 u8 wait = 10; 441 442 xgene_enet_wr_diag_csr(pdata, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0x0); 443 do { 444 usleep_range(100, 110); 445 xgene_enet_rd_diag_csr(pdata, ENET_BLOCK_MEM_RDY_ADDR, &data); 446 } while ((data != 0xffffffff) && wait--); 447 448 if (data != 0xffffffff) { 449 netdev_err(ndev, "Failed to release memory from shutdown\n"); 450 return -ENODEV; 451 } 452 453 return 0; 454 } 455 456 static void xgene_gmac_reset(struct xgene_enet_pdata *pdata) 457 { 458 xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, SOFT_RESET1); 459 xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, 0); 460 } 461 462 static void xgene_gmac_init(struct xgene_enet_pdata *pdata) 463 { 464 u32 value, mc2; 465 u32 intf_ctl, rgmii; 466 u32 icm0, icm2; 467 468 xgene_gmac_reset(pdata); 469 470 xgene_enet_rd_mcx_csr(pdata, ICM_CONFIG0_REG_0_ADDR, &icm0); 471 xgene_enet_rd_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, &icm2); 472 xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_2_ADDR, &mc2); 473 xgene_enet_rd_mcx_mac(pdata, INTERFACE_CONTROL_ADDR, &intf_ctl); 474 xgene_enet_rd_csr(pdata, RGMII_REG_0_ADDR, &rgmii); 475 476 switch (pdata->phy_speed) { 477 case SPEED_10: 478 ENET_INTERFACE_MODE2_SET(&mc2, 1); 479 CFG_MACMODE_SET(&icm0, 0); 480 CFG_WAITASYNCRD_SET(&icm2, 500); 481 rgmii &= ~CFG_SPEED_1250; 482 break; 483 case SPEED_100: 484 ENET_INTERFACE_MODE2_SET(&mc2, 1); 485 intf_ctl |= ENET_LHD_MODE; 486 CFG_MACMODE_SET(&icm0, 1); 487 CFG_WAITASYNCRD_SET(&icm2, 80); 488 rgmii &= ~CFG_SPEED_1250; 489 break; 490 default: 491 ENET_INTERFACE_MODE2_SET(&mc2, 2); 492 intf_ctl |= ENET_GHD_MODE; 493 CFG_TXCLK_MUXSEL0_SET(&rgmii, 4); 494 xgene_enet_rd_csr(pdata, DEBUG_REG_ADDR, &value); 495 value |= CFG_BYPASS_UNISEC_TX | CFG_BYPASS_UNISEC_RX; 496 xgene_enet_wr_csr(pdata, DEBUG_REG_ADDR, value); 497 break; 498 } 499 500 mc2 |= FULL_DUPLEX2; 501 xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_2_ADDR, mc2); 502 xgene_enet_wr_mcx_mac(pdata, INTERFACE_CONTROL_ADDR, intf_ctl); 503 504 xgene_gmac_set_mac_addr(pdata); 505 506 /* Adjust MDC clock frequency */ 507 xgene_enet_rd_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, &value); 508 MGMT_CLOCK_SEL_SET(&value, 7); 509 xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, value); 510 511 /* Enable drop if bufpool not available */ 512 xgene_enet_rd_csr(pdata, RSIF_CONFIG_REG_ADDR, &value); 513 value |= CFG_RSIF_FPBUFF_TIMEOUT_EN; 514 xgene_enet_wr_csr(pdata, RSIF_CONFIG_REG_ADDR, value); 515 516 /* Rtype should be copied from FP */ 517 xgene_enet_wr_csr(pdata, RSIF_RAM_DBG_REG0_ADDR, 0); 518 xgene_enet_wr_csr(pdata, RGMII_REG_0_ADDR, rgmii); 519 520 /* Rx-Tx traffic resume */ 521 xgene_enet_wr_csr(pdata, CFG_LINK_AGGR_RESUME_0_ADDR, TX_PORT0); 522 523 xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG0_REG_0_ADDR, icm0); 524 xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, icm2); 525 526 xgene_enet_rd_mcx_csr(pdata, RX_DV_GATE_REG_0_ADDR, &value); 527 value &= ~TX_DV_GATE_EN0; 528 value &= ~RX_DV_GATE_EN0; 529 value |= RESUME_RX0; 530 xgene_enet_wr_mcx_csr(pdata, RX_DV_GATE_REG_0_ADDR, value); 531 532 xgene_enet_wr_csr(pdata, CFG_BYPASS_ADDR, RESUME_TX); 533 } 534 535 static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *pdata) 536 { 537 u32 val = 0xffffffff; 538 539 xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQASSOC_ADDR, val); 540 xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPQASSOC_ADDR, val); 541 xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEWQASSOC_ADDR, val); 542 xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEFPQASSOC_ADDR, val); 543 } 544 545 static void xgene_enet_cle_bypass(struct xgene_enet_pdata *pdata, 546 u32 dst_ring_num, u16 bufpool_id) 547 { 548 u32 cb; 549 u32 fpsel; 550 551 fpsel = xgene_enet_ring_bufnum(bufpool_id) - 0x20; 552 553 xgene_enet_rd_csr(pdata, CLE_BYPASS_REG0_0_ADDR, &cb); 554 cb |= CFG_CLE_BYPASS_EN0; 555 CFG_CLE_IP_PROTOCOL0_SET(&cb, 3); 556 xgene_enet_wr_csr(pdata, CLE_BYPASS_REG0_0_ADDR, cb); 557 558 xgene_enet_rd_csr(pdata, CLE_BYPASS_REG1_0_ADDR, &cb); 559 CFG_CLE_DSTQID0_SET(&cb, dst_ring_num); 560 CFG_CLE_FPSEL0_SET(&cb, fpsel); 561 xgene_enet_wr_csr(pdata, CLE_BYPASS_REG1_0_ADDR, cb); 562 } 563 564 static void xgene_gmac_rx_enable(struct xgene_enet_pdata *pdata) 565 { 566 u32 data; 567 568 xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data); 569 xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data | RX_EN); 570 } 571 572 static void xgene_gmac_tx_enable(struct xgene_enet_pdata *pdata) 573 { 574 u32 data; 575 576 xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data); 577 xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data | TX_EN); 578 } 579 580 static void xgene_gmac_rx_disable(struct xgene_enet_pdata *pdata) 581 { 582 u32 data; 583 584 xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data); 585 xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~RX_EN); 586 } 587 588 static void xgene_gmac_tx_disable(struct xgene_enet_pdata *pdata) 589 { 590 u32 data; 591 592 xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data); 593 xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~TX_EN); 594 } 595 596 bool xgene_ring_mgr_init(struct xgene_enet_pdata *p) 597 { 598 if (!ioread32(p->ring_csr_addr + CLKEN_ADDR)) 599 return false; 600 601 if (ioread32(p->ring_csr_addr + SRST_ADDR)) 602 return false; 603 604 return true; 605 } 606 607 static int xgene_enet_reset(struct xgene_enet_pdata *pdata) 608 { 609 u32 val; 610 611 if (!xgene_ring_mgr_init(pdata)) 612 return -ENODEV; 613 614 if (!IS_ERR(pdata->clk)) { 615 clk_prepare_enable(pdata->clk); 616 clk_disable_unprepare(pdata->clk); 617 clk_prepare_enable(pdata->clk); 618 xgene_enet_ecc_init(pdata); 619 } 620 xgene_enet_config_ring_if_assoc(pdata); 621 622 /* Enable auto-incr for scanning */ 623 xgene_enet_rd_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, &val); 624 val |= SCAN_AUTO_INCR; 625 MGMT_CLOCK_SEL_SET(&val, 1); 626 xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, val); 627 628 return 0; 629 } 630 631 static void xgene_gport_shutdown(struct xgene_enet_pdata *pdata) 632 { 633 if (!IS_ERR(pdata->clk)) 634 clk_disable_unprepare(pdata->clk); 635 } 636 637 static int xgene_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) 638 { 639 struct xgene_enet_pdata *pdata = bus->priv; 640 u32 val; 641 642 val = xgene_mii_phy_read(pdata, mii_id, regnum); 643 netdev_dbg(pdata->ndev, "mdio_rd: bus=%d reg=%d val=%x\n", 644 mii_id, regnum, val); 645 646 return val; 647 } 648 649 static int xgene_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, 650 u16 val) 651 { 652 struct xgene_enet_pdata *pdata = bus->priv; 653 654 netdev_dbg(pdata->ndev, "mdio_wr: bus=%d reg=%d val=%x\n", 655 mii_id, regnum, val); 656 return xgene_mii_phy_write(pdata, mii_id, regnum, val); 657 } 658 659 static void xgene_enet_adjust_link(struct net_device *ndev) 660 { 661 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 662 struct phy_device *phydev = pdata->phy_dev; 663 664 if (phydev->link) { 665 if (pdata->phy_speed != phydev->speed) { 666 pdata->phy_speed = phydev->speed; 667 xgene_gmac_init(pdata); 668 xgene_gmac_rx_enable(pdata); 669 xgene_gmac_tx_enable(pdata); 670 phy_print_status(phydev); 671 } 672 } else { 673 xgene_gmac_rx_disable(pdata); 674 xgene_gmac_tx_disable(pdata); 675 pdata->phy_speed = SPEED_UNKNOWN; 676 phy_print_status(phydev); 677 } 678 } 679 680 static int xgene_enet_phy_connect(struct net_device *ndev) 681 { 682 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 683 struct device_node *phy_np; 684 struct phy_device *phy_dev; 685 struct device *dev = &pdata->pdev->dev; 686 687 if (dev->of_node) { 688 phy_np = of_parse_phandle(dev->of_node, "phy-handle", 0); 689 if (!phy_np) { 690 netdev_dbg(ndev, "No phy-handle found in DT\n"); 691 return -ENODEV; 692 } 693 pdata->phy_dev = of_phy_find_device(phy_np); 694 } 695 696 phy_dev = pdata->phy_dev; 697 698 if (!phy_dev || 699 phy_connect_direct(ndev, phy_dev, &xgene_enet_adjust_link, 700 pdata->phy_mode)) { 701 netdev_err(ndev, "Could not connect to PHY\n"); 702 return -ENODEV; 703 } 704 705 pdata->phy_speed = SPEED_UNKNOWN; 706 phy_dev->supported &= ~SUPPORTED_10baseT_Half & 707 ~SUPPORTED_100baseT_Half & 708 ~SUPPORTED_1000baseT_Half; 709 phy_dev->advertising = phy_dev->supported; 710 711 return 0; 712 } 713 714 static int xgene_mdiobus_register(struct xgene_enet_pdata *pdata, 715 struct mii_bus *mdio) 716 { 717 struct device *dev = &pdata->pdev->dev; 718 struct net_device *ndev = pdata->ndev; 719 struct phy_device *phy; 720 struct device_node *child_np; 721 struct device_node *mdio_np = NULL; 722 int ret; 723 u32 phy_id; 724 725 if (dev->of_node) { 726 for_each_child_of_node(dev->of_node, child_np) { 727 if (of_device_is_compatible(child_np, 728 "apm,xgene-mdio")) { 729 mdio_np = child_np; 730 break; 731 } 732 } 733 734 if (!mdio_np) { 735 netdev_dbg(ndev, "No mdio node in the dts\n"); 736 return -ENXIO; 737 } 738 739 return of_mdiobus_register(mdio, mdio_np); 740 } 741 742 /* Mask out all PHYs from auto probing. */ 743 mdio->phy_mask = ~0; 744 745 /* Register the MDIO bus */ 746 ret = mdiobus_register(mdio); 747 if (ret) 748 return ret; 749 750 ret = device_property_read_u32(dev, "phy-channel", &phy_id); 751 if (ret) 752 ret = device_property_read_u32(dev, "phy-addr", &phy_id); 753 if (ret) 754 return -EINVAL; 755 756 phy = get_phy_device(mdio, phy_id, false); 757 if (!phy || IS_ERR(phy)) 758 return -EIO; 759 760 ret = phy_device_register(phy); 761 if (ret) 762 phy_device_free(phy); 763 else 764 pdata->phy_dev = phy; 765 766 return ret; 767 } 768 769 int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata) 770 { 771 struct net_device *ndev = pdata->ndev; 772 struct mii_bus *mdio_bus; 773 int ret; 774 775 mdio_bus = mdiobus_alloc(); 776 if (!mdio_bus) 777 return -ENOMEM; 778 779 mdio_bus->name = "APM X-Gene MDIO bus"; 780 mdio_bus->read = xgene_enet_mdio_read; 781 mdio_bus->write = xgene_enet_mdio_write; 782 snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-%s", "xgene-mii", 783 ndev->name); 784 785 mdio_bus->priv = pdata; 786 mdio_bus->parent = &ndev->dev; 787 788 ret = xgene_mdiobus_register(pdata, mdio_bus); 789 if (ret) { 790 netdev_err(ndev, "Failed to register MDIO bus\n"); 791 mdiobus_free(mdio_bus); 792 return ret; 793 } 794 pdata->mdio_bus = mdio_bus; 795 796 ret = xgene_enet_phy_connect(ndev); 797 if (ret) 798 xgene_enet_mdio_remove(pdata); 799 800 return ret; 801 } 802 803 void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata) 804 { 805 if (pdata->phy_dev) 806 phy_disconnect(pdata->phy_dev); 807 808 mdiobus_unregister(pdata->mdio_bus); 809 mdiobus_free(pdata->mdio_bus); 810 pdata->mdio_bus = NULL; 811 } 812 813 struct xgene_mac_ops xgene_gmac_ops = { 814 .init = xgene_gmac_init, 815 .reset = xgene_gmac_reset, 816 .rx_enable = xgene_gmac_rx_enable, 817 .tx_enable = xgene_gmac_tx_enable, 818 .rx_disable = xgene_gmac_rx_disable, 819 .tx_disable = xgene_gmac_tx_disable, 820 .set_mac_addr = xgene_gmac_set_mac_addr, 821 }; 822 823 struct xgene_port_ops xgene_gport_ops = { 824 .reset = xgene_enet_reset, 825 .cle_bypass = xgene_enet_cle_bypass, 826 .shutdown = xgene_gport_shutdown, 827 }; 828 829 struct xgene_ring_ops xgene_ring1_ops = { 830 .num_ring_config = NUM_RING_CONFIG, 831 .num_ring_id_shift = 6, 832 .setup = xgene_enet_setup_ring, 833 .clear = xgene_enet_clear_ring, 834 .wr_cmd = xgene_enet_wr_cmd, 835 .len = xgene_enet_ring_len, 836 }; 837