1 /* Applied Micro X-Gene SoC Ethernet Driver 2 * 3 * Copyright (c) 2014, Applied Micro Circuits Corporation 4 * Authors: Iyappan Subramanian <isubramanian@apm.com> 5 * Ravi Patel <rapatel@apm.com> 6 * Keyur Chudgar <kchudgar@apm.com> 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License as published by the 10 * Free Software Foundation; either version 2 of the License, or (at your 11 * option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program. If not, see <http://www.gnu.org/licenses/>. 20 */ 21 22 #include "xgene_enet_main.h" 23 #include "xgene_enet_hw.h" 24 25 static void xgene_enet_ring_init(struct xgene_enet_desc_ring *ring) 26 { 27 u32 *ring_cfg = ring->state; 28 u64 addr = ring->dma; 29 enum xgene_enet_ring_cfgsize cfgsize = ring->cfgsize; 30 31 ring_cfg[4] |= (1 << SELTHRSH_POS) & 32 CREATE_MASK(SELTHRSH_POS, SELTHRSH_LEN); 33 ring_cfg[3] |= ACCEPTLERR; 34 ring_cfg[2] |= QCOHERENT; 35 36 addr >>= 8; 37 ring_cfg[2] |= (addr << RINGADDRL_POS) & 38 CREATE_MASK_ULL(RINGADDRL_POS, RINGADDRL_LEN); 39 addr >>= RINGADDRL_LEN; 40 ring_cfg[3] |= addr & CREATE_MASK_ULL(RINGADDRH_POS, RINGADDRH_LEN); 41 ring_cfg[3] |= ((u32)cfgsize << RINGSIZE_POS) & 42 CREATE_MASK(RINGSIZE_POS, RINGSIZE_LEN); 43 } 44 45 static void xgene_enet_ring_set_type(struct xgene_enet_desc_ring *ring) 46 { 47 u32 *ring_cfg = ring->state; 48 bool is_bufpool; 49 u32 val; 50 51 is_bufpool = xgene_enet_is_bufpool(ring->id); 52 val = (is_bufpool) ? RING_BUFPOOL : RING_REGULAR; 53 ring_cfg[4] |= (val << RINGTYPE_POS) & 54 CREATE_MASK(RINGTYPE_POS, RINGTYPE_LEN); 55 56 if (is_bufpool) { 57 ring_cfg[3] |= (BUFPOOL_MODE << RINGMODE_POS) & 58 CREATE_MASK(RINGMODE_POS, RINGMODE_LEN); 59 } 60 } 61 62 static void xgene_enet_ring_set_recombbuf(struct xgene_enet_desc_ring *ring) 63 { 64 u32 *ring_cfg = ring->state; 65 66 ring_cfg[3] |= RECOMBBUF; 67 ring_cfg[3] |= (0xf << RECOMTIMEOUTL_POS) & 68 CREATE_MASK(RECOMTIMEOUTL_POS, RECOMTIMEOUTL_LEN); 69 ring_cfg[4] |= 0x7 & CREATE_MASK(RECOMTIMEOUTH_POS, RECOMTIMEOUTH_LEN); 70 } 71 72 static void xgene_enet_ring_wr32(struct xgene_enet_desc_ring *ring, 73 u32 offset, u32 data) 74 { 75 struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev); 76 77 iowrite32(data, pdata->ring_csr_addr + offset); 78 } 79 80 static void xgene_enet_ring_rd32(struct xgene_enet_desc_ring *ring, 81 u32 offset, u32 *data) 82 { 83 struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev); 84 85 *data = ioread32(pdata->ring_csr_addr + offset); 86 } 87 88 static void xgene_enet_write_ring_state(struct xgene_enet_desc_ring *ring) 89 { 90 int i; 91 92 xgene_enet_ring_wr32(ring, CSR_RING_CONFIG, ring->num); 93 for (i = 0; i < NUM_RING_CONFIG; i++) { 94 xgene_enet_ring_wr32(ring, CSR_RING_WR_BASE + (i * 4), 95 ring->state[i]); 96 } 97 } 98 99 static void xgene_enet_clr_ring_state(struct xgene_enet_desc_ring *ring) 100 { 101 memset(ring->state, 0, sizeof(u32) * NUM_RING_CONFIG); 102 xgene_enet_write_ring_state(ring); 103 } 104 105 static void xgene_enet_set_ring_state(struct xgene_enet_desc_ring *ring) 106 { 107 xgene_enet_ring_set_type(ring); 108 109 if (xgene_enet_ring_owner(ring->id) == RING_OWNER_ETH0) 110 xgene_enet_ring_set_recombbuf(ring); 111 112 xgene_enet_ring_init(ring); 113 xgene_enet_write_ring_state(ring); 114 } 115 116 static void xgene_enet_set_ring_id(struct xgene_enet_desc_ring *ring) 117 { 118 u32 ring_id_val, ring_id_buf; 119 bool is_bufpool; 120 121 is_bufpool = xgene_enet_is_bufpool(ring->id); 122 123 ring_id_val = ring->id & GENMASK(9, 0); 124 ring_id_val |= OVERWRITE; 125 126 ring_id_buf = (ring->num << 9) & GENMASK(18, 9); 127 ring_id_buf |= PREFETCH_BUF_EN; 128 if (is_bufpool) 129 ring_id_buf |= IS_BUFFER_POOL; 130 131 xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id_val); 132 xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, ring_id_buf); 133 } 134 135 static void xgene_enet_clr_desc_ring_id(struct xgene_enet_desc_ring *ring) 136 { 137 u32 ring_id; 138 139 ring_id = ring->id | OVERWRITE; 140 xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id); 141 xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, 0); 142 } 143 144 struct xgene_enet_desc_ring *xgene_enet_setup_ring( 145 struct xgene_enet_desc_ring *ring) 146 { 147 u32 size = ring->size; 148 u32 i, data; 149 bool is_bufpool; 150 151 xgene_enet_clr_ring_state(ring); 152 xgene_enet_set_ring_state(ring); 153 xgene_enet_set_ring_id(ring); 154 155 ring->slots = xgene_enet_get_numslots(ring->id, size); 156 157 is_bufpool = xgene_enet_is_bufpool(ring->id); 158 if (is_bufpool || xgene_enet_ring_owner(ring->id) != RING_OWNER_CPU) 159 return ring; 160 161 for (i = 0; i < ring->slots; i++) 162 xgene_enet_mark_desc_slot_empty(&ring->raw_desc[i]); 163 164 xgene_enet_ring_rd32(ring, CSR_RING_NE_INT_MODE, &data); 165 data |= BIT(31 - xgene_enet_ring_bufnum(ring->id)); 166 xgene_enet_ring_wr32(ring, CSR_RING_NE_INT_MODE, data); 167 168 return ring; 169 } 170 171 void xgene_enet_clear_ring(struct xgene_enet_desc_ring *ring) 172 { 173 u32 data; 174 bool is_bufpool; 175 176 is_bufpool = xgene_enet_is_bufpool(ring->id); 177 if (is_bufpool || xgene_enet_ring_owner(ring->id) != RING_OWNER_CPU) 178 goto out; 179 180 xgene_enet_ring_rd32(ring, CSR_RING_NE_INT_MODE, &data); 181 data &= ~BIT(31 - xgene_enet_ring_bufnum(ring->id)); 182 xgene_enet_ring_wr32(ring, CSR_RING_NE_INT_MODE, data); 183 184 out: 185 xgene_enet_clr_desc_ring_id(ring); 186 xgene_enet_clr_ring_state(ring); 187 } 188 189 void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring, 190 struct xgene_enet_pdata *pdata, 191 enum xgene_enet_err_code status) 192 { 193 struct rtnl_link_stats64 *stats = &pdata->stats; 194 195 switch (status) { 196 case INGRESS_CRC: 197 stats->rx_crc_errors++; 198 break; 199 case INGRESS_CHECKSUM: 200 case INGRESS_CHECKSUM_COMPUTE: 201 stats->rx_errors++; 202 break; 203 case INGRESS_TRUNC_FRAME: 204 stats->rx_frame_errors++; 205 break; 206 case INGRESS_PKT_LEN: 207 stats->rx_length_errors++; 208 break; 209 case INGRESS_PKT_UNDER: 210 stats->rx_frame_errors++; 211 break; 212 case INGRESS_FIFO_OVERRUN: 213 stats->rx_fifo_errors++; 214 break; 215 default: 216 break; 217 } 218 } 219 220 static void xgene_enet_wr_csr(struct xgene_enet_pdata *pdata, 221 u32 offset, u32 val) 222 { 223 void __iomem *addr = pdata->eth_csr_addr + offset; 224 225 iowrite32(val, addr); 226 } 227 228 static void xgene_enet_wr_ring_if(struct xgene_enet_pdata *pdata, 229 u32 offset, u32 val) 230 { 231 void __iomem *addr = pdata->eth_ring_if_addr + offset; 232 233 iowrite32(val, addr); 234 } 235 236 static void xgene_enet_wr_diag_csr(struct xgene_enet_pdata *pdata, 237 u32 offset, u32 val) 238 { 239 void __iomem *addr = pdata->eth_diag_csr_addr + offset; 240 241 iowrite32(val, addr); 242 } 243 244 static void xgene_enet_wr_mcx_csr(struct xgene_enet_pdata *pdata, 245 u32 offset, u32 val) 246 { 247 void __iomem *addr = pdata->mcx_mac_csr_addr + offset; 248 249 iowrite32(val, addr); 250 } 251 252 static bool xgene_enet_wr_indirect(void __iomem *addr, void __iomem *wr, 253 void __iomem *cmd, void __iomem *cmd_done, 254 u32 wr_addr, u32 wr_data) 255 { 256 u32 done; 257 u8 wait = 10; 258 259 iowrite32(wr_addr, addr); 260 iowrite32(wr_data, wr); 261 iowrite32(XGENE_ENET_WR_CMD, cmd); 262 263 /* wait for write command to complete */ 264 while (!(done = ioread32(cmd_done)) && wait--) 265 udelay(1); 266 267 if (!done) 268 return false; 269 270 iowrite32(0, cmd); 271 272 return true; 273 } 274 275 static void xgene_enet_wr_mcx_mac(struct xgene_enet_pdata *pdata, 276 u32 wr_addr, u32 wr_data) 277 { 278 void __iomem *addr, *wr, *cmd, *cmd_done; 279 280 addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET; 281 wr = pdata->mcx_mac_addr + MAC_WRITE_REG_OFFSET; 282 cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET; 283 cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET; 284 285 if (!xgene_enet_wr_indirect(addr, wr, cmd, cmd_done, wr_addr, wr_data)) 286 netdev_err(pdata->ndev, "MCX mac write failed, addr: %04x\n", 287 wr_addr); 288 } 289 290 static void xgene_enet_rd_csr(struct xgene_enet_pdata *pdata, 291 u32 offset, u32 *val) 292 { 293 void __iomem *addr = pdata->eth_csr_addr + offset; 294 295 *val = ioread32(addr); 296 } 297 298 static void xgene_enet_rd_diag_csr(struct xgene_enet_pdata *pdata, 299 u32 offset, u32 *val) 300 { 301 void __iomem *addr = pdata->eth_diag_csr_addr + offset; 302 303 *val = ioread32(addr); 304 } 305 306 static void xgene_enet_rd_mcx_csr(struct xgene_enet_pdata *pdata, 307 u32 offset, u32 *val) 308 { 309 void __iomem *addr = pdata->mcx_mac_csr_addr + offset; 310 311 *val = ioread32(addr); 312 } 313 314 static bool xgene_enet_rd_indirect(void __iomem *addr, void __iomem *rd, 315 void __iomem *cmd, void __iomem *cmd_done, 316 u32 rd_addr, u32 *rd_data) 317 { 318 u32 done; 319 u8 wait = 10; 320 321 iowrite32(rd_addr, addr); 322 iowrite32(XGENE_ENET_RD_CMD, cmd); 323 324 /* wait for read command to complete */ 325 while (!(done = ioread32(cmd_done)) && wait--) 326 udelay(1); 327 328 if (!done) 329 return false; 330 331 *rd_data = ioread32(rd); 332 iowrite32(0, cmd); 333 334 return true; 335 } 336 337 static void xgene_enet_rd_mcx_mac(struct xgene_enet_pdata *pdata, 338 u32 rd_addr, u32 *rd_data) 339 { 340 void __iomem *addr, *rd, *cmd, *cmd_done; 341 342 addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET; 343 rd = pdata->mcx_mac_addr + MAC_READ_REG_OFFSET; 344 cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET; 345 cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET; 346 347 if (!xgene_enet_rd_indirect(addr, rd, cmd, cmd_done, rd_addr, rd_data)) 348 netdev_err(pdata->ndev, "MCX mac read failed, addr: %04x\n", 349 rd_addr); 350 } 351 352 static int xgene_mii_phy_write(struct xgene_enet_pdata *pdata, int phy_id, 353 u32 reg, u16 data) 354 { 355 u32 addr = 0, wr_data = 0; 356 u32 done; 357 u8 wait = 10; 358 359 PHY_ADDR_SET(&addr, phy_id); 360 REG_ADDR_SET(&addr, reg); 361 xgene_enet_wr_mcx_mac(pdata, MII_MGMT_ADDRESS_ADDR, addr); 362 363 PHY_CONTROL_SET(&wr_data, data); 364 xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONTROL_ADDR, wr_data); 365 do { 366 usleep_range(5, 10); 367 xgene_enet_rd_mcx_mac(pdata, MII_MGMT_INDICATORS_ADDR, &done); 368 } while ((done & BUSY_MASK) && wait--); 369 370 if (done & BUSY_MASK) { 371 netdev_err(pdata->ndev, "MII_MGMT write failed\n"); 372 return -EBUSY; 373 } 374 375 return 0; 376 } 377 378 static int xgene_mii_phy_read(struct xgene_enet_pdata *pdata, 379 u8 phy_id, u32 reg) 380 { 381 u32 addr = 0; 382 u32 data, done; 383 u8 wait = 10; 384 385 PHY_ADDR_SET(&addr, phy_id); 386 REG_ADDR_SET(&addr, reg); 387 xgene_enet_wr_mcx_mac(pdata, MII_MGMT_ADDRESS_ADDR, addr); 388 xgene_enet_wr_mcx_mac(pdata, MII_MGMT_COMMAND_ADDR, READ_CYCLE_MASK); 389 do { 390 usleep_range(5, 10); 391 xgene_enet_rd_mcx_mac(pdata, MII_MGMT_INDICATORS_ADDR, &done); 392 } while ((done & BUSY_MASK) && wait--); 393 394 if (done & BUSY_MASK) { 395 netdev_err(pdata->ndev, "MII_MGMT read failed\n"); 396 return -EBUSY; 397 } 398 399 xgene_enet_rd_mcx_mac(pdata, MII_MGMT_STATUS_ADDR, &data); 400 xgene_enet_wr_mcx_mac(pdata, MII_MGMT_COMMAND_ADDR, 0); 401 402 return data; 403 } 404 405 static void xgene_gmac_set_mac_addr(struct xgene_enet_pdata *pdata) 406 { 407 u32 addr0, addr1; 408 u8 *dev_addr = pdata->ndev->dev_addr; 409 410 addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) | 411 (dev_addr[1] << 8) | dev_addr[0]; 412 addr1 = (dev_addr[5] << 24) | (dev_addr[4] << 16); 413 414 xgene_enet_wr_mcx_mac(pdata, STATION_ADDR0_ADDR, addr0); 415 xgene_enet_wr_mcx_mac(pdata, STATION_ADDR1_ADDR, addr1); 416 } 417 418 static int xgene_enet_ecc_init(struct xgene_enet_pdata *pdata) 419 { 420 struct net_device *ndev = pdata->ndev; 421 u32 data; 422 u8 wait = 10; 423 424 xgene_enet_wr_diag_csr(pdata, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0x0); 425 do { 426 usleep_range(100, 110); 427 xgene_enet_rd_diag_csr(pdata, ENET_BLOCK_MEM_RDY_ADDR, &data); 428 } while ((data != 0xffffffff) && wait--); 429 430 if (data != 0xffffffff) { 431 netdev_err(ndev, "Failed to release memory from shutdown\n"); 432 return -ENODEV; 433 } 434 435 return 0; 436 } 437 438 static void xgene_gmac_reset(struct xgene_enet_pdata *pdata) 439 { 440 xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, SOFT_RESET1); 441 xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, 0); 442 } 443 444 static void xgene_gmac_init(struct xgene_enet_pdata *pdata) 445 { 446 u32 value, mc2; 447 u32 intf_ctl, rgmii; 448 u32 icm0, icm2; 449 450 xgene_gmac_reset(pdata); 451 452 xgene_enet_rd_mcx_csr(pdata, ICM_CONFIG0_REG_0_ADDR, &icm0); 453 xgene_enet_rd_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, &icm2); 454 xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_2_ADDR, &mc2); 455 xgene_enet_rd_mcx_mac(pdata, INTERFACE_CONTROL_ADDR, &intf_ctl); 456 xgene_enet_rd_csr(pdata, RGMII_REG_0_ADDR, &rgmii); 457 458 switch (pdata->phy_speed) { 459 case SPEED_10: 460 ENET_INTERFACE_MODE2_SET(&mc2, 1); 461 CFG_MACMODE_SET(&icm0, 0); 462 CFG_WAITASYNCRD_SET(&icm2, 500); 463 rgmii &= ~CFG_SPEED_1250; 464 break; 465 case SPEED_100: 466 ENET_INTERFACE_MODE2_SET(&mc2, 1); 467 intf_ctl |= ENET_LHD_MODE; 468 CFG_MACMODE_SET(&icm0, 1); 469 CFG_WAITASYNCRD_SET(&icm2, 80); 470 rgmii &= ~CFG_SPEED_1250; 471 break; 472 default: 473 ENET_INTERFACE_MODE2_SET(&mc2, 2); 474 intf_ctl |= ENET_GHD_MODE; 475 CFG_TXCLK_MUXSEL0_SET(&rgmii, 4); 476 xgene_enet_rd_csr(pdata, DEBUG_REG_ADDR, &value); 477 value |= CFG_BYPASS_UNISEC_TX | CFG_BYPASS_UNISEC_RX; 478 xgene_enet_wr_csr(pdata, DEBUG_REG_ADDR, value); 479 break; 480 } 481 482 mc2 |= FULL_DUPLEX2; 483 xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_2_ADDR, mc2); 484 xgene_enet_wr_mcx_mac(pdata, INTERFACE_CONTROL_ADDR, intf_ctl); 485 486 xgene_gmac_set_mac_addr(pdata); 487 488 /* Adjust MDC clock frequency */ 489 xgene_enet_rd_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, &value); 490 MGMT_CLOCK_SEL_SET(&value, 7); 491 xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, value); 492 493 /* Enable drop if bufpool not available */ 494 xgene_enet_rd_csr(pdata, RSIF_CONFIG_REG_ADDR, &value); 495 value |= CFG_RSIF_FPBUFF_TIMEOUT_EN; 496 xgene_enet_wr_csr(pdata, RSIF_CONFIG_REG_ADDR, value); 497 498 /* Rtype should be copied from FP */ 499 xgene_enet_wr_csr(pdata, RSIF_RAM_DBG_REG0_ADDR, 0); 500 xgene_enet_wr_csr(pdata, RGMII_REG_0_ADDR, rgmii); 501 502 /* Rx-Tx traffic resume */ 503 xgene_enet_wr_csr(pdata, CFG_LINK_AGGR_RESUME_0_ADDR, TX_PORT0); 504 505 xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG0_REG_0_ADDR, icm0); 506 xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, icm2); 507 508 xgene_enet_rd_mcx_csr(pdata, RX_DV_GATE_REG_0_ADDR, &value); 509 value &= ~TX_DV_GATE_EN0; 510 value &= ~RX_DV_GATE_EN0; 511 value |= RESUME_RX0; 512 xgene_enet_wr_mcx_csr(pdata, RX_DV_GATE_REG_0_ADDR, value); 513 514 xgene_enet_wr_csr(pdata, CFG_BYPASS_ADDR, RESUME_TX); 515 } 516 517 static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *pdata) 518 { 519 u32 val = 0xffffffff; 520 521 xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQASSOC_ADDR, val); 522 xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPQASSOC_ADDR, val); 523 xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEWQASSOC_ADDR, val); 524 xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEFPQASSOC_ADDR, val); 525 } 526 527 static void xgene_enet_cle_bypass(struct xgene_enet_pdata *pdata, 528 u32 dst_ring_num, u16 bufpool_id) 529 { 530 u32 cb; 531 u32 fpsel; 532 533 fpsel = xgene_enet_ring_bufnum(bufpool_id) - 0x20; 534 535 xgene_enet_rd_csr(pdata, CLE_BYPASS_REG0_0_ADDR, &cb); 536 cb |= CFG_CLE_BYPASS_EN0; 537 CFG_CLE_IP_PROTOCOL0_SET(&cb, 3); 538 xgene_enet_wr_csr(pdata, CLE_BYPASS_REG0_0_ADDR, cb); 539 540 xgene_enet_rd_csr(pdata, CLE_BYPASS_REG1_0_ADDR, &cb); 541 CFG_CLE_DSTQID0_SET(&cb, dst_ring_num); 542 CFG_CLE_FPSEL0_SET(&cb, fpsel); 543 xgene_enet_wr_csr(pdata, CLE_BYPASS_REG1_0_ADDR, cb); 544 } 545 546 static void xgene_gmac_rx_enable(struct xgene_enet_pdata *pdata) 547 { 548 u32 data; 549 550 xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data); 551 xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data | RX_EN); 552 } 553 554 static void xgene_gmac_tx_enable(struct xgene_enet_pdata *pdata) 555 { 556 u32 data; 557 558 xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data); 559 xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data | TX_EN); 560 } 561 562 static void xgene_gmac_rx_disable(struct xgene_enet_pdata *pdata) 563 { 564 u32 data; 565 566 xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data); 567 xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~RX_EN); 568 } 569 570 static void xgene_gmac_tx_disable(struct xgene_enet_pdata *pdata) 571 { 572 u32 data; 573 574 xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data); 575 xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~TX_EN); 576 } 577 578 bool xgene_ring_mgr_init(struct xgene_enet_pdata *p) 579 { 580 if (!ioread32(p->ring_csr_addr + CLKEN_ADDR)) 581 return false; 582 583 if (ioread32(p->ring_csr_addr + SRST_ADDR)) 584 return false; 585 586 return true; 587 } 588 589 static int xgene_enet_reset(struct xgene_enet_pdata *pdata) 590 { 591 u32 val; 592 593 if (!xgene_ring_mgr_init(pdata)) 594 return -ENODEV; 595 596 if (pdata->clk) { 597 clk_prepare_enable(pdata->clk); 598 clk_disable_unprepare(pdata->clk); 599 clk_prepare_enable(pdata->clk); 600 xgene_enet_ecc_init(pdata); 601 } 602 xgene_enet_config_ring_if_assoc(pdata); 603 604 /* Enable auto-incr for scanning */ 605 xgene_enet_rd_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, &val); 606 val |= SCAN_AUTO_INCR; 607 MGMT_CLOCK_SEL_SET(&val, 1); 608 xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, val); 609 610 return 0; 611 } 612 613 static void xgene_gport_shutdown(struct xgene_enet_pdata *pdata) 614 { 615 clk_disable_unprepare(pdata->clk); 616 } 617 618 static int xgene_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) 619 { 620 struct xgene_enet_pdata *pdata = bus->priv; 621 u32 val; 622 623 val = xgene_mii_phy_read(pdata, mii_id, regnum); 624 netdev_dbg(pdata->ndev, "mdio_rd: bus=%d reg=%d val=%x\n", 625 mii_id, regnum, val); 626 627 return val; 628 } 629 630 static int xgene_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, 631 u16 val) 632 { 633 struct xgene_enet_pdata *pdata = bus->priv; 634 635 netdev_dbg(pdata->ndev, "mdio_wr: bus=%d reg=%d val=%x\n", 636 mii_id, regnum, val); 637 return xgene_mii_phy_write(pdata, mii_id, regnum, val); 638 } 639 640 static void xgene_enet_adjust_link(struct net_device *ndev) 641 { 642 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 643 struct phy_device *phydev = pdata->phy_dev; 644 645 if (phydev->link) { 646 if (pdata->phy_speed != phydev->speed) { 647 pdata->phy_speed = phydev->speed; 648 xgene_gmac_init(pdata); 649 xgene_gmac_rx_enable(pdata); 650 xgene_gmac_tx_enable(pdata); 651 phy_print_status(phydev); 652 } 653 } else { 654 xgene_gmac_rx_disable(pdata); 655 xgene_gmac_tx_disable(pdata); 656 pdata->phy_speed = SPEED_UNKNOWN; 657 phy_print_status(phydev); 658 } 659 } 660 661 static int xgene_enet_phy_connect(struct net_device *ndev) 662 { 663 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 664 struct device_node *phy_np; 665 struct phy_device *phy_dev; 666 struct device *dev = &pdata->pdev->dev; 667 668 if (dev->of_node) { 669 phy_np = of_parse_phandle(dev->of_node, "phy-handle", 0); 670 if (!phy_np) { 671 netdev_dbg(ndev, "No phy-handle found in DT\n"); 672 return -ENODEV; 673 } 674 pdata->phy_dev = of_phy_find_device(phy_np); 675 } 676 677 phy_dev = pdata->phy_dev; 678 679 if (!phy_dev || 680 phy_connect_direct(ndev, phy_dev, &xgene_enet_adjust_link, 681 pdata->phy_mode)) { 682 netdev_err(ndev, "Could not connect to PHY\n"); 683 return -ENODEV; 684 } 685 686 pdata->phy_speed = SPEED_UNKNOWN; 687 phy_dev->supported &= ~SUPPORTED_10baseT_Half & 688 ~SUPPORTED_100baseT_Half & 689 ~SUPPORTED_1000baseT_Half; 690 phy_dev->advertising = phy_dev->supported; 691 692 return 0; 693 } 694 695 static int xgene_mdiobus_register(struct xgene_enet_pdata *pdata, 696 struct mii_bus *mdio) 697 { 698 struct device *dev = &pdata->pdev->dev; 699 struct net_device *ndev = pdata->ndev; 700 struct phy_device *phy; 701 struct device_node *child_np; 702 struct device_node *mdio_np = NULL; 703 int ret; 704 u32 phy_id; 705 706 if (dev->of_node) { 707 for_each_child_of_node(dev->of_node, child_np) { 708 if (of_device_is_compatible(child_np, 709 "apm,xgene-mdio")) { 710 mdio_np = child_np; 711 break; 712 } 713 } 714 715 if (!mdio_np) { 716 netdev_dbg(ndev, "No mdio node in the dts\n"); 717 return -ENXIO; 718 } 719 720 return of_mdiobus_register(mdio, mdio_np); 721 } 722 723 /* Mask out all PHYs from auto probing. */ 724 mdio->phy_mask = ~0; 725 726 /* Register the MDIO bus */ 727 ret = mdiobus_register(mdio); 728 if (ret) 729 return ret; 730 731 ret = device_property_read_u32(dev, "phy-channel", &phy_id); 732 if (ret) 733 ret = device_property_read_u32(dev, "phy-addr", &phy_id); 734 if (ret) 735 return -EINVAL; 736 737 phy = get_phy_device(mdio, phy_id, true); 738 if (!phy || IS_ERR(phy)) 739 return -EIO; 740 741 ret = phy_device_register(phy); 742 if (ret) 743 phy_device_free(phy); 744 else 745 pdata->phy_dev = phy; 746 747 return ret; 748 } 749 750 int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata) 751 { 752 struct net_device *ndev = pdata->ndev; 753 struct mii_bus *mdio_bus; 754 int ret; 755 756 mdio_bus = mdiobus_alloc(); 757 if (!mdio_bus) 758 return -ENOMEM; 759 760 mdio_bus->name = "APM X-Gene MDIO bus"; 761 mdio_bus->read = xgene_enet_mdio_read; 762 mdio_bus->write = xgene_enet_mdio_write; 763 snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-%s", "xgene-mii", 764 ndev->name); 765 766 mdio_bus->priv = pdata; 767 mdio_bus->parent = &ndev->dev; 768 769 ret = xgene_mdiobus_register(pdata, mdio_bus); 770 if (ret) { 771 netdev_err(ndev, "Failed to register MDIO bus\n"); 772 mdiobus_free(mdio_bus); 773 return ret; 774 } 775 pdata->mdio_bus = mdio_bus; 776 777 ret = xgene_enet_phy_connect(ndev); 778 if (ret) 779 xgene_enet_mdio_remove(pdata); 780 781 return ret; 782 } 783 784 void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata) 785 { 786 mdiobus_unregister(pdata->mdio_bus); 787 mdiobus_free(pdata->mdio_bus); 788 pdata->mdio_bus = NULL; 789 } 790 791 struct xgene_mac_ops xgene_gmac_ops = { 792 .init = xgene_gmac_init, 793 .reset = xgene_gmac_reset, 794 .rx_enable = xgene_gmac_rx_enable, 795 .tx_enable = xgene_gmac_tx_enable, 796 .rx_disable = xgene_gmac_rx_disable, 797 .tx_disable = xgene_gmac_tx_disable, 798 .set_mac_addr = xgene_gmac_set_mac_addr, 799 }; 800 801 struct xgene_port_ops xgene_gport_ops = { 802 .reset = xgene_enet_reset, 803 .cle_bypass = xgene_enet_cle_bypass, 804 .shutdown = xgene_gport_shutdown, 805 }; 806