1 /* Applied Micro X-Gene SoC Ethernet Driver 2 * 3 * Copyright (c) 2014, Applied Micro Circuits Corporation 4 * Authors: Iyappan Subramanian <isubramanian@apm.com> 5 * Ravi Patel <rapatel@apm.com> 6 * Keyur Chudgar <kchudgar@apm.com> 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License as published by the 10 * Free Software Foundation; either version 2 of the License, or (at your 11 * option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program. If not, see <http://www.gnu.org/licenses/>. 20 */ 21 22 #include "xgene_enet_main.h" 23 #include "xgene_enet_hw.h" 24 25 static void xgene_enet_ring_init(struct xgene_enet_desc_ring *ring) 26 { 27 u32 *ring_cfg = ring->state; 28 u64 addr = ring->dma; 29 enum xgene_enet_ring_cfgsize cfgsize = ring->cfgsize; 30 31 ring_cfg[4] |= (1 << SELTHRSH_POS) & 32 CREATE_MASK(SELTHRSH_POS, SELTHRSH_LEN); 33 ring_cfg[3] |= ACCEPTLERR; 34 ring_cfg[2] |= QCOHERENT; 35 36 addr >>= 8; 37 ring_cfg[2] |= (addr << RINGADDRL_POS) & 38 CREATE_MASK_ULL(RINGADDRL_POS, RINGADDRL_LEN); 39 addr >>= RINGADDRL_LEN; 40 ring_cfg[3] |= addr & CREATE_MASK_ULL(RINGADDRH_POS, RINGADDRH_LEN); 41 ring_cfg[3] |= ((u32)cfgsize << RINGSIZE_POS) & 42 CREATE_MASK(RINGSIZE_POS, RINGSIZE_LEN); 43 } 44 45 static void xgene_enet_ring_set_type(struct xgene_enet_desc_ring *ring) 46 { 47 u32 *ring_cfg = ring->state; 48 bool is_bufpool; 49 u32 val; 50 51 is_bufpool = xgene_enet_is_bufpool(ring->id); 52 val = (is_bufpool) ? RING_BUFPOOL : RING_REGULAR; 53 ring_cfg[4] |= (val << RINGTYPE_POS) & 54 CREATE_MASK(RINGTYPE_POS, RINGTYPE_LEN); 55 56 if (is_bufpool) { 57 ring_cfg[3] |= (BUFPOOL_MODE << RINGMODE_POS) & 58 CREATE_MASK(RINGMODE_POS, RINGMODE_LEN); 59 } 60 } 61 62 static void xgene_enet_ring_set_recombbuf(struct xgene_enet_desc_ring *ring) 63 { 64 u32 *ring_cfg = ring->state; 65 66 ring_cfg[3] |= RECOMBBUF; 67 ring_cfg[3] |= (0xf << RECOMTIMEOUTL_POS) & 68 CREATE_MASK(RECOMTIMEOUTL_POS, RECOMTIMEOUTL_LEN); 69 ring_cfg[4] |= 0x7 & CREATE_MASK(RECOMTIMEOUTH_POS, RECOMTIMEOUTH_LEN); 70 } 71 72 static void xgene_enet_ring_wr32(struct xgene_enet_desc_ring *ring, 73 u32 offset, u32 data) 74 { 75 struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev); 76 77 iowrite32(data, pdata->ring_csr_addr + offset); 78 } 79 80 static void xgene_enet_ring_rd32(struct xgene_enet_desc_ring *ring, 81 u32 offset, u32 *data) 82 { 83 struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev); 84 85 *data = ioread32(pdata->ring_csr_addr + offset); 86 } 87 88 static void xgene_enet_write_ring_state(struct xgene_enet_desc_ring *ring) 89 { 90 struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev); 91 int i; 92 93 xgene_enet_ring_wr32(ring, CSR_RING_CONFIG, ring->num); 94 for (i = 0; i < pdata->ring_ops->num_ring_config; i++) { 95 xgene_enet_ring_wr32(ring, CSR_RING_WR_BASE + (i * 4), 96 ring->state[i]); 97 } 98 } 99 100 static void xgene_enet_clr_ring_state(struct xgene_enet_desc_ring *ring) 101 { 102 memset(ring->state, 0, sizeof(ring->state)); 103 xgene_enet_write_ring_state(ring); 104 } 105 106 static void xgene_enet_set_ring_state(struct xgene_enet_desc_ring *ring) 107 { 108 xgene_enet_ring_set_type(ring); 109 110 if (xgene_enet_ring_owner(ring->id) == RING_OWNER_ETH0 || 111 xgene_enet_ring_owner(ring->id) == RING_OWNER_ETH1) 112 xgene_enet_ring_set_recombbuf(ring); 113 114 xgene_enet_ring_init(ring); 115 xgene_enet_write_ring_state(ring); 116 } 117 118 static void xgene_enet_set_ring_id(struct xgene_enet_desc_ring *ring) 119 { 120 u32 ring_id_val, ring_id_buf; 121 bool is_bufpool; 122 123 is_bufpool = xgene_enet_is_bufpool(ring->id); 124 125 ring_id_val = ring->id & GENMASK(9, 0); 126 ring_id_val |= OVERWRITE; 127 128 ring_id_buf = (ring->num << 9) & GENMASK(18, 9); 129 ring_id_buf |= PREFETCH_BUF_EN; 130 if (is_bufpool) 131 ring_id_buf |= IS_BUFFER_POOL; 132 133 xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id_val); 134 xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, ring_id_buf); 135 } 136 137 static void xgene_enet_clr_desc_ring_id(struct xgene_enet_desc_ring *ring) 138 { 139 u32 ring_id; 140 141 ring_id = ring->id | OVERWRITE; 142 xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id); 143 xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, 0); 144 } 145 146 static struct xgene_enet_desc_ring *xgene_enet_setup_ring( 147 struct xgene_enet_desc_ring *ring) 148 { 149 u32 size = ring->size; 150 u32 i, data; 151 bool is_bufpool; 152 153 xgene_enet_clr_ring_state(ring); 154 xgene_enet_set_ring_state(ring); 155 xgene_enet_set_ring_id(ring); 156 157 ring->slots = xgene_enet_get_numslots(ring->id, size); 158 159 is_bufpool = xgene_enet_is_bufpool(ring->id); 160 if (is_bufpool || xgene_enet_ring_owner(ring->id) != RING_OWNER_CPU) 161 return ring; 162 163 for (i = 0; i < ring->slots; i++) 164 xgene_enet_mark_desc_slot_empty(&ring->raw_desc[i]); 165 166 xgene_enet_ring_rd32(ring, CSR_RING_NE_INT_MODE, &data); 167 data |= BIT(31 - xgene_enet_ring_bufnum(ring->id)); 168 xgene_enet_ring_wr32(ring, CSR_RING_NE_INT_MODE, data); 169 170 return ring; 171 } 172 173 static void xgene_enet_clear_ring(struct xgene_enet_desc_ring *ring) 174 { 175 u32 data; 176 bool is_bufpool; 177 178 is_bufpool = xgene_enet_is_bufpool(ring->id); 179 if (is_bufpool || xgene_enet_ring_owner(ring->id) != RING_OWNER_CPU) 180 goto out; 181 182 xgene_enet_ring_rd32(ring, CSR_RING_NE_INT_MODE, &data); 183 data &= ~BIT(31 - xgene_enet_ring_bufnum(ring->id)); 184 xgene_enet_ring_wr32(ring, CSR_RING_NE_INT_MODE, data); 185 186 out: 187 xgene_enet_clr_desc_ring_id(ring); 188 xgene_enet_clr_ring_state(ring); 189 } 190 191 static void xgene_enet_wr_cmd(struct xgene_enet_desc_ring *ring, int count) 192 { 193 iowrite32(count, ring->cmd); 194 } 195 196 static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring) 197 { 198 u32 __iomem *cmd_base = ring->cmd_base; 199 u32 ring_state, num_msgs; 200 201 ring_state = ioread32(&cmd_base[1]); 202 num_msgs = GET_VAL(NUMMSGSINQ, ring_state); 203 204 return num_msgs; 205 } 206 207 static void xgene_enet_setup_coalescing(struct xgene_enet_desc_ring *ring) 208 { 209 u32 data = 0x7777; 210 211 xgene_enet_ring_wr32(ring, CSR_PBM_COAL, 0x8e); 212 xgene_enet_ring_wr32(ring, CSR_PBM_CTICK1, data); 213 xgene_enet_ring_wr32(ring, CSR_PBM_CTICK2, data << 16); 214 xgene_enet_ring_wr32(ring, CSR_THRESHOLD0_SET1, 0x40); 215 xgene_enet_ring_wr32(ring, CSR_THRESHOLD1_SET1, 0x80); 216 } 217 218 void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring, 219 struct xgene_enet_pdata *pdata, 220 enum xgene_enet_err_code status) 221 { 222 switch (status) { 223 case INGRESS_CRC: 224 ring->rx_crc_errors++; 225 ring->rx_dropped++; 226 break; 227 case INGRESS_CHECKSUM: 228 case INGRESS_CHECKSUM_COMPUTE: 229 ring->rx_errors++; 230 ring->rx_dropped++; 231 break; 232 case INGRESS_TRUNC_FRAME: 233 ring->rx_frame_errors++; 234 ring->rx_dropped++; 235 break; 236 case INGRESS_PKT_LEN: 237 ring->rx_length_errors++; 238 ring->rx_dropped++; 239 break; 240 case INGRESS_PKT_UNDER: 241 ring->rx_frame_errors++; 242 ring->rx_dropped++; 243 break; 244 case INGRESS_FIFO_OVERRUN: 245 ring->rx_fifo_errors++; 246 break; 247 default: 248 break; 249 } 250 } 251 252 static void xgene_enet_wr_csr(struct xgene_enet_pdata *pdata, 253 u32 offset, u32 val) 254 { 255 void __iomem *addr = pdata->eth_csr_addr + offset; 256 257 iowrite32(val, addr); 258 } 259 260 static void xgene_enet_wr_ring_if(struct xgene_enet_pdata *pdata, 261 u32 offset, u32 val) 262 { 263 void __iomem *addr = pdata->eth_ring_if_addr + offset; 264 265 iowrite32(val, addr); 266 } 267 268 static void xgene_enet_wr_diag_csr(struct xgene_enet_pdata *pdata, 269 u32 offset, u32 val) 270 { 271 void __iomem *addr = pdata->eth_diag_csr_addr + offset; 272 273 iowrite32(val, addr); 274 } 275 276 static void xgene_enet_wr_mcx_csr(struct xgene_enet_pdata *pdata, 277 u32 offset, u32 val) 278 { 279 void __iomem *addr = pdata->mcx_mac_csr_addr + offset; 280 281 iowrite32(val, addr); 282 } 283 284 static bool xgene_enet_wr_indirect(void __iomem *addr, void __iomem *wr, 285 void __iomem *cmd, void __iomem *cmd_done, 286 u32 wr_addr, u32 wr_data) 287 { 288 u32 done; 289 u8 wait = 10; 290 291 iowrite32(wr_addr, addr); 292 iowrite32(wr_data, wr); 293 iowrite32(XGENE_ENET_WR_CMD, cmd); 294 295 /* wait for write command to complete */ 296 while (!(done = ioread32(cmd_done)) && wait--) 297 udelay(1); 298 299 if (!done) 300 return false; 301 302 iowrite32(0, cmd); 303 304 return true; 305 } 306 307 static void xgene_enet_wr_mcx_mac(struct xgene_enet_pdata *pdata, 308 u32 wr_addr, u32 wr_data) 309 { 310 void __iomem *addr, *wr, *cmd, *cmd_done; 311 312 addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET; 313 wr = pdata->mcx_mac_addr + MAC_WRITE_REG_OFFSET; 314 cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET; 315 cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET; 316 317 if (!xgene_enet_wr_indirect(addr, wr, cmd, cmd_done, wr_addr, wr_data)) 318 netdev_err(pdata->ndev, "MCX mac write failed, addr: %04x\n", 319 wr_addr); 320 } 321 322 static void xgene_enet_rd_csr(struct xgene_enet_pdata *pdata, 323 u32 offset, u32 *val) 324 { 325 void __iomem *addr = pdata->eth_csr_addr + offset; 326 327 *val = ioread32(addr); 328 } 329 330 static void xgene_enet_rd_diag_csr(struct xgene_enet_pdata *pdata, 331 u32 offset, u32 *val) 332 { 333 void __iomem *addr = pdata->eth_diag_csr_addr + offset; 334 335 *val = ioread32(addr); 336 } 337 338 static void xgene_enet_rd_mcx_csr(struct xgene_enet_pdata *pdata, 339 u32 offset, u32 *val) 340 { 341 void __iomem *addr = pdata->mcx_mac_csr_addr + offset; 342 343 *val = ioread32(addr); 344 } 345 346 static bool xgene_enet_rd_indirect(void __iomem *addr, void __iomem *rd, 347 void __iomem *cmd, void __iomem *cmd_done, 348 u32 rd_addr, u32 *rd_data) 349 { 350 u32 done; 351 u8 wait = 10; 352 353 iowrite32(rd_addr, addr); 354 iowrite32(XGENE_ENET_RD_CMD, cmd); 355 356 /* wait for read command to complete */ 357 while (!(done = ioread32(cmd_done)) && wait--) 358 udelay(1); 359 360 if (!done) 361 return false; 362 363 *rd_data = ioread32(rd); 364 iowrite32(0, cmd); 365 366 return true; 367 } 368 369 static void xgene_enet_rd_mcx_mac(struct xgene_enet_pdata *pdata, 370 u32 rd_addr, u32 *rd_data) 371 { 372 void __iomem *addr, *rd, *cmd, *cmd_done; 373 374 addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET; 375 rd = pdata->mcx_mac_addr + MAC_READ_REG_OFFSET; 376 cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET; 377 cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET; 378 379 if (!xgene_enet_rd_indirect(addr, rd, cmd, cmd_done, rd_addr, rd_data)) 380 netdev_err(pdata->ndev, "MCX mac read failed, addr: %04x\n", 381 rd_addr); 382 } 383 384 static void xgene_gmac_set_mac_addr(struct xgene_enet_pdata *pdata) 385 { 386 u32 addr0, addr1; 387 u8 *dev_addr = pdata->ndev->dev_addr; 388 389 addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) | 390 (dev_addr[1] << 8) | dev_addr[0]; 391 addr1 = (dev_addr[5] << 24) | (dev_addr[4] << 16); 392 393 xgene_enet_wr_mcx_mac(pdata, STATION_ADDR0_ADDR, addr0); 394 xgene_enet_wr_mcx_mac(pdata, STATION_ADDR1_ADDR, addr1); 395 } 396 397 static int xgene_enet_ecc_init(struct xgene_enet_pdata *pdata) 398 { 399 struct net_device *ndev = pdata->ndev; 400 u32 data; 401 u8 wait = 10; 402 403 xgene_enet_wr_diag_csr(pdata, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0x0); 404 do { 405 usleep_range(100, 110); 406 xgene_enet_rd_diag_csr(pdata, ENET_BLOCK_MEM_RDY_ADDR, &data); 407 } while ((data != 0xffffffff) && wait--); 408 409 if (data != 0xffffffff) { 410 netdev_err(ndev, "Failed to release memory from shutdown\n"); 411 return -ENODEV; 412 } 413 414 return 0; 415 } 416 417 static void xgene_gmac_reset(struct xgene_enet_pdata *pdata) 418 { 419 xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, SOFT_RESET1); 420 xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, 0); 421 } 422 423 static void xgene_enet_configure_clock(struct xgene_enet_pdata *pdata) 424 { 425 struct device *dev = &pdata->pdev->dev; 426 427 if (dev->of_node) { 428 struct clk *parent = clk_get_parent(pdata->clk); 429 430 switch (pdata->phy_speed) { 431 case SPEED_10: 432 clk_set_rate(parent, 2500000); 433 break; 434 case SPEED_100: 435 clk_set_rate(parent, 25000000); 436 break; 437 default: 438 clk_set_rate(parent, 125000000); 439 break; 440 } 441 } 442 #ifdef CONFIG_ACPI 443 else { 444 switch (pdata->phy_speed) { 445 case SPEED_10: 446 acpi_evaluate_object(ACPI_HANDLE(dev), 447 "S10", NULL, NULL); 448 break; 449 case SPEED_100: 450 acpi_evaluate_object(ACPI_HANDLE(dev), 451 "S100", NULL, NULL); 452 break; 453 default: 454 acpi_evaluate_object(ACPI_HANDLE(dev), 455 "S1G", NULL, NULL); 456 break; 457 } 458 } 459 #endif 460 } 461 462 static void xgene_gmac_set_speed(struct xgene_enet_pdata *pdata) 463 { 464 struct device *dev = &pdata->pdev->dev; 465 u32 icm0, icm2, mc2; 466 u32 intf_ctl, rgmii, value; 467 468 xgene_enet_rd_mcx_csr(pdata, ICM_CONFIG0_REG_0_ADDR, &icm0); 469 xgene_enet_rd_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, &icm2); 470 xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_2_ADDR, &mc2); 471 xgene_enet_rd_mcx_mac(pdata, INTERFACE_CONTROL_ADDR, &intf_ctl); 472 xgene_enet_rd_csr(pdata, RGMII_REG_0_ADDR, &rgmii); 473 474 switch (pdata->phy_speed) { 475 case SPEED_10: 476 ENET_INTERFACE_MODE2_SET(&mc2, 1); 477 intf_ctl &= ~(ENET_LHD_MODE | ENET_GHD_MODE); 478 CFG_MACMODE_SET(&icm0, 0); 479 CFG_WAITASYNCRD_SET(&icm2, 500); 480 rgmii &= ~CFG_SPEED_1250; 481 break; 482 case SPEED_100: 483 ENET_INTERFACE_MODE2_SET(&mc2, 1); 484 intf_ctl &= ~ENET_GHD_MODE; 485 intf_ctl |= ENET_LHD_MODE; 486 CFG_MACMODE_SET(&icm0, 1); 487 CFG_WAITASYNCRD_SET(&icm2, 80); 488 rgmii &= ~CFG_SPEED_1250; 489 break; 490 default: 491 ENET_INTERFACE_MODE2_SET(&mc2, 2); 492 intf_ctl &= ~ENET_LHD_MODE; 493 intf_ctl |= ENET_GHD_MODE; 494 CFG_MACMODE_SET(&icm0, 2); 495 CFG_WAITASYNCRD_SET(&icm2, 0); 496 if (dev->of_node) { 497 CFG_TXCLK_MUXSEL0_SET(&rgmii, pdata->tx_delay); 498 CFG_RXCLK_MUXSEL0_SET(&rgmii, pdata->rx_delay); 499 } 500 rgmii |= CFG_SPEED_1250; 501 502 xgene_enet_rd_csr(pdata, DEBUG_REG_ADDR, &value); 503 value |= CFG_BYPASS_UNISEC_TX | CFG_BYPASS_UNISEC_RX; 504 xgene_enet_wr_csr(pdata, DEBUG_REG_ADDR, value); 505 break; 506 } 507 508 mc2 |= FULL_DUPLEX2 | PAD_CRC; 509 xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_2_ADDR, mc2); 510 xgene_enet_wr_mcx_mac(pdata, INTERFACE_CONTROL_ADDR, intf_ctl); 511 xgene_enet_wr_csr(pdata, RGMII_REG_0_ADDR, rgmii); 512 xgene_enet_configure_clock(pdata); 513 514 xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG0_REG_0_ADDR, icm0); 515 xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, icm2); 516 } 517 518 static void xgene_gmac_init(struct xgene_enet_pdata *pdata) 519 { 520 u32 value; 521 522 if (!pdata->mdio_driver) 523 xgene_gmac_reset(pdata); 524 525 xgene_gmac_set_speed(pdata); 526 xgene_gmac_set_mac_addr(pdata); 527 528 /* Adjust MDC clock frequency */ 529 xgene_enet_rd_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, &value); 530 MGMT_CLOCK_SEL_SET(&value, 7); 531 xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, value); 532 533 /* Enable drop if bufpool not available */ 534 xgene_enet_rd_csr(pdata, RSIF_CONFIG_REG_ADDR, &value); 535 value |= CFG_RSIF_FPBUFF_TIMEOUT_EN; 536 xgene_enet_wr_csr(pdata, RSIF_CONFIG_REG_ADDR, value); 537 538 /* Rtype should be copied from FP */ 539 xgene_enet_wr_csr(pdata, RSIF_RAM_DBG_REG0_ADDR, 0); 540 541 /* Rx-Tx traffic resume */ 542 xgene_enet_wr_csr(pdata, CFG_LINK_AGGR_RESUME_0_ADDR, TX_PORT0); 543 544 xgene_enet_rd_mcx_csr(pdata, RX_DV_GATE_REG_0_ADDR, &value); 545 value &= ~TX_DV_GATE_EN0; 546 value &= ~RX_DV_GATE_EN0; 547 value |= RESUME_RX0; 548 xgene_enet_wr_mcx_csr(pdata, RX_DV_GATE_REG_0_ADDR, value); 549 550 xgene_enet_wr_csr(pdata, CFG_BYPASS_ADDR, RESUME_TX); 551 } 552 553 static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *pdata) 554 { 555 u32 val = 0xffffffff; 556 557 xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQASSOC_ADDR, val); 558 xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPQASSOC_ADDR, val); 559 xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEWQASSOC_ADDR, val); 560 xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEFPQASSOC_ADDR, val); 561 } 562 563 static void xgene_enet_cle_bypass(struct xgene_enet_pdata *pdata, 564 u32 dst_ring_num, u16 bufpool_id) 565 { 566 u32 cb; 567 u32 fpsel; 568 569 fpsel = xgene_enet_ring_bufnum(bufpool_id) - 0x20; 570 571 xgene_enet_rd_csr(pdata, CLE_BYPASS_REG0_0_ADDR, &cb); 572 cb |= CFG_CLE_BYPASS_EN0; 573 CFG_CLE_IP_PROTOCOL0_SET(&cb, 3); 574 xgene_enet_wr_csr(pdata, CLE_BYPASS_REG0_0_ADDR, cb); 575 576 xgene_enet_rd_csr(pdata, CLE_BYPASS_REG1_0_ADDR, &cb); 577 CFG_CLE_DSTQID0_SET(&cb, dst_ring_num); 578 CFG_CLE_FPSEL0_SET(&cb, fpsel); 579 xgene_enet_wr_csr(pdata, CLE_BYPASS_REG1_0_ADDR, cb); 580 } 581 582 static void xgene_gmac_rx_enable(struct xgene_enet_pdata *pdata) 583 { 584 u32 data; 585 586 xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data); 587 xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data | RX_EN); 588 } 589 590 static void xgene_gmac_tx_enable(struct xgene_enet_pdata *pdata) 591 { 592 u32 data; 593 594 xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data); 595 xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data | TX_EN); 596 } 597 598 static void xgene_gmac_rx_disable(struct xgene_enet_pdata *pdata) 599 { 600 u32 data; 601 602 xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data); 603 xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~RX_EN); 604 } 605 606 static void xgene_gmac_tx_disable(struct xgene_enet_pdata *pdata) 607 { 608 u32 data; 609 610 xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data); 611 xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~TX_EN); 612 } 613 614 bool xgene_ring_mgr_init(struct xgene_enet_pdata *p) 615 { 616 if (!ioread32(p->ring_csr_addr + CLKEN_ADDR)) 617 return false; 618 619 if (ioread32(p->ring_csr_addr + SRST_ADDR)) 620 return false; 621 622 return true; 623 } 624 625 static int xgene_enet_reset(struct xgene_enet_pdata *pdata) 626 { 627 struct device *dev = &pdata->pdev->dev; 628 629 if (!xgene_ring_mgr_init(pdata)) 630 return -ENODEV; 631 632 if (pdata->mdio_driver) { 633 xgene_enet_config_ring_if_assoc(pdata); 634 return 0; 635 } 636 637 if (dev->of_node) { 638 clk_prepare_enable(pdata->clk); 639 udelay(5); 640 clk_disable_unprepare(pdata->clk); 641 udelay(5); 642 clk_prepare_enable(pdata->clk); 643 udelay(5); 644 } else { 645 #ifdef CONFIG_ACPI 646 if (acpi_has_method(ACPI_HANDLE(&pdata->pdev->dev), "_RST")) { 647 acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev), 648 "_RST", NULL, NULL); 649 } else if (acpi_has_method(ACPI_HANDLE(&pdata->pdev->dev), 650 "_INI")) { 651 acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev), 652 "_INI", NULL, NULL); 653 } 654 #endif 655 } 656 657 xgene_enet_ecc_init(pdata); 658 xgene_enet_config_ring_if_assoc(pdata); 659 660 return 0; 661 } 662 663 static void xgene_enet_clear(struct xgene_enet_pdata *pdata, 664 struct xgene_enet_desc_ring *ring) 665 { 666 u32 addr, val, data; 667 668 val = xgene_enet_ring_bufnum(ring->id); 669 670 if (xgene_enet_is_bufpool(ring->id)) { 671 addr = ENET_CFGSSQMIFPRESET_ADDR; 672 data = BIT(val - 0x20); 673 } else { 674 addr = ENET_CFGSSQMIWQRESET_ADDR; 675 data = BIT(val); 676 } 677 678 xgene_enet_wr_ring_if(pdata, addr, data); 679 } 680 681 static void xgene_gport_shutdown(struct xgene_enet_pdata *pdata) 682 { 683 struct device *dev = &pdata->pdev->dev; 684 struct xgene_enet_desc_ring *ring; 685 u32 pb, val; 686 int i; 687 688 pb = 0; 689 for (i = 0; i < pdata->rxq_cnt; i++) { 690 ring = pdata->rx_ring[i]->buf_pool; 691 692 val = xgene_enet_ring_bufnum(ring->id); 693 pb |= BIT(val - 0x20); 694 } 695 xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPRESET_ADDR, pb); 696 697 pb = 0; 698 for (i = 0; i < pdata->txq_cnt; i++) { 699 ring = pdata->tx_ring[i]; 700 701 val = xgene_enet_ring_bufnum(ring->id); 702 pb |= BIT(val); 703 } 704 xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQRESET_ADDR, pb); 705 706 if (dev->of_node) { 707 if (!IS_ERR(pdata->clk)) 708 clk_disable_unprepare(pdata->clk); 709 } 710 } 711 712 static void xgene_enet_adjust_link(struct net_device *ndev) 713 { 714 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 715 const struct xgene_mac_ops *mac_ops = pdata->mac_ops; 716 struct phy_device *phydev = pdata->phy_dev; 717 718 if (phydev->link) { 719 if (pdata->phy_speed != phydev->speed) { 720 pdata->phy_speed = phydev->speed; 721 mac_ops->set_speed(pdata); 722 mac_ops->rx_enable(pdata); 723 mac_ops->tx_enable(pdata); 724 phy_print_status(phydev); 725 } 726 } else { 727 mac_ops->rx_disable(pdata); 728 mac_ops->tx_disable(pdata); 729 pdata->phy_speed = SPEED_UNKNOWN; 730 phy_print_status(phydev); 731 } 732 } 733 734 #ifdef CONFIG_ACPI 735 static struct acpi_device *acpi_phy_find_device(struct device *dev) 736 { 737 struct acpi_reference_args args; 738 struct fwnode_handle *fw_node; 739 int status; 740 741 fw_node = acpi_fwnode_handle(ACPI_COMPANION(dev)); 742 status = acpi_node_get_property_reference(fw_node, "phy-handle", 0, 743 &args); 744 if (ACPI_FAILURE(status)) { 745 dev_dbg(dev, "No matching phy in ACPI table\n"); 746 return NULL; 747 } 748 749 return args.adev; 750 } 751 #endif 752 753 int xgene_enet_phy_connect(struct net_device *ndev) 754 { 755 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 756 struct device_node *np; 757 struct phy_device *phy_dev; 758 struct device *dev = &pdata->pdev->dev; 759 int i; 760 761 if (dev->of_node) { 762 for (i = 0 ; i < 2; i++) { 763 np = of_parse_phandle(dev->of_node, "phy-handle", i); 764 if (np) 765 break; 766 } 767 768 if (!np) { 769 netdev_dbg(ndev, "No phy-handle found in DT\n"); 770 return -ENODEV; 771 } 772 773 phy_dev = of_phy_connect(ndev, np, &xgene_enet_adjust_link, 774 0, pdata->phy_mode); 775 of_node_put(np); 776 if (!phy_dev) { 777 netdev_err(ndev, "Could not connect to PHY\n"); 778 return -ENODEV; 779 } 780 781 pdata->phy_dev = phy_dev; 782 } else { 783 #ifdef CONFIG_ACPI 784 struct acpi_device *adev = acpi_phy_find_device(dev); 785 if (adev) 786 pdata->phy_dev = adev->driver_data; 787 788 phy_dev = pdata->phy_dev; 789 790 if (!phy_dev || 791 phy_connect_direct(ndev, phy_dev, &xgene_enet_adjust_link, 792 pdata->phy_mode)) { 793 netdev_err(ndev, "Could not connect to PHY\n"); 794 return -ENODEV; 795 } 796 #else 797 return -ENODEV; 798 #endif 799 } 800 801 pdata->phy_speed = SPEED_UNKNOWN; 802 phy_dev->supported &= ~SUPPORTED_10baseT_Half & 803 ~SUPPORTED_100baseT_Half & 804 ~SUPPORTED_1000baseT_Half; 805 phy_dev->advertising = phy_dev->supported; 806 807 return 0; 808 } 809 810 static int xgene_mdiobus_register(struct xgene_enet_pdata *pdata, 811 struct mii_bus *mdio) 812 { 813 struct device *dev = &pdata->pdev->dev; 814 struct net_device *ndev = pdata->ndev; 815 struct phy_device *phy; 816 struct device_node *child_np; 817 struct device_node *mdio_np = NULL; 818 u32 phy_addr; 819 int ret; 820 821 if (dev->of_node) { 822 for_each_child_of_node(dev->of_node, child_np) { 823 if (of_device_is_compatible(child_np, 824 "apm,xgene-mdio")) { 825 mdio_np = child_np; 826 break; 827 } 828 } 829 830 if (!mdio_np) { 831 netdev_dbg(ndev, "No mdio node in the dts\n"); 832 return -ENXIO; 833 } 834 835 return of_mdiobus_register(mdio, mdio_np); 836 } 837 838 /* Mask out all PHYs from auto probing. */ 839 mdio->phy_mask = ~0; 840 841 /* Register the MDIO bus */ 842 ret = mdiobus_register(mdio); 843 if (ret) 844 return ret; 845 846 ret = device_property_read_u32(dev, "phy-channel", &phy_addr); 847 if (ret) 848 ret = device_property_read_u32(dev, "phy-addr", &phy_addr); 849 if (ret) 850 return -EINVAL; 851 852 phy = xgene_enet_phy_register(mdio, phy_addr); 853 if (!phy) 854 return -EIO; 855 856 pdata->phy_dev = phy; 857 858 return ret; 859 } 860 861 int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata) 862 { 863 struct net_device *ndev = pdata->ndev; 864 struct mii_bus *mdio_bus; 865 int ret; 866 867 mdio_bus = mdiobus_alloc(); 868 if (!mdio_bus) 869 return -ENOMEM; 870 871 mdio_bus->name = "APM X-Gene MDIO bus"; 872 mdio_bus->read = xgene_mdio_rgmii_read; 873 mdio_bus->write = xgene_mdio_rgmii_write; 874 snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-%s", "xgene-mii", 875 ndev->name); 876 877 mdio_bus->priv = (void __force *)pdata->mcx_mac_addr; 878 mdio_bus->parent = &pdata->pdev->dev; 879 880 ret = xgene_mdiobus_register(pdata, mdio_bus); 881 if (ret) { 882 netdev_err(ndev, "Failed to register MDIO bus\n"); 883 mdiobus_free(mdio_bus); 884 return ret; 885 } 886 pdata->mdio_bus = mdio_bus; 887 888 ret = xgene_enet_phy_connect(ndev); 889 if (ret) 890 xgene_enet_mdio_remove(pdata); 891 892 return ret; 893 } 894 895 void xgene_enet_phy_disconnect(struct xgene_enet_pdata *pdata) 896 { 897 if (pdata->phy_dev) 898 phy_disconnect(pdata->phy_dev); 899 } 900 901 void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata) 902 { 903 if (pdata->phy_dev) 904 phy_disconnect(pdata->phy_dev); 905 906 mdiobus_unregister(pdata->mdio_bus); 907 mdiobus_free(pdata->mdio_bus); 908 pdata->mdio_bus = NULL; 909 } 910 911 const struct xgene_mac_ops xgene_gmac_ops = { 912 .init = xgene_gmac_init, 913 .reset = xgene_gmac_reset, 914 .rx_enable = xgene_gmac_rx_enable, 915 .tx_enable = xgene_gmac_tx_enable, 916 .rx_disable = xgene_gmac_rx_disable, 917 .tx_disable = xgene_gmac_tx_disable, 918 .set_speed = xgene_gmac_set_speed, 919 .set_mac_addr = xgene_gmac_set_mac_addr, 920 }; 921 922 const struct xgene_port_ops xgene_gport_ops = { 923 .reset = xgene_enet_reset, 924 .clear = xgene_enet_clear, 925 .cle_bypass = xgene_enet_cle_bypass, 926 .shutdown = xgene_gport_shutdown, 927 }; 928 929 struct xgene_ring_ops xgene_ring1_ops = { 930 .num_ring_config = NUM_RING_CONFIG, 931 .num_ring_id_shift = 6, 932 .setup = xgene_enet_setup_ring, 933 .clear = xgene_enet_clear_ring, 934 .wr_cmd = xgene_enet_wr_cmd, 935 .len = xgene_enet_ring_len, 936 .coalesce = xgene_enet_setup_coalescing, 937 }; 938