1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * This is the driver for the GMAC on-chip Ethernet controller for ST SoCs. 4 * DWC Ether MAC version 4.00 has been used for developing this code. 5 * 6 * This only implements the mac core functions for this chip. 7 * 8 * Copyright (C) 2015 STMicroelectronics Ltd 9 * 10 * Author: Alexandre Torgue <alexandre.torgue@st.com> 11 */ 12 13 #include <linux/crc32.h> 14 #include <linux/slab.h> 15 #include <linux/ethtool.h> 16 #include <linux/io.h> 17 #include "stmmac.h" 18 #include "stmmac_pcs.h" 19 #include "dwmac4.h" 20 #include "dwmac5.h" 21 22 static void dwmac4_core_init(struct mac_device_info *hw, 23 struct net_device *dev) 24 { 25 struct stmmac_priv *priv = netdev_priv(dev); 26 void __iomem *ioaddr = hw->pcsr; 27 u32 value = readl(ioaddr + GMAC_CONFIG); 28 29 value |= GMAC_CORE_INIT; 30 31 if (hw->ps) { 32 value |= GMAC_CONFIG_TE; 33 34 value &= hw->link.speed_mask; 35 switch (hw->ps) { 36 case SPEED_1000: 37 value |= hw->link.speed1000; 38 break; 39 case SPEED_100: 40 value |= hw->link.speed100; 41 break; 42 case SPEED_10: 43 value |= hw->link.speed10; 44 break; 45 } 46 } 47 48 writel(value, ioaddr + GMAC_CONFIG); 49 50 /* Enable GMAC interrupts */ 51 value = GMAC_INT_DEFAULT_ENABLE; 52 53 if (hw->pcs) 54 value |= GMAC_PCS_IRQ_DEFAULT; 55 56 /* Enable FPE interrupt */ 57 if ((GMAC_HW_FEAT_FPESEL & readl(ioaddr + GMAC_HW_FEATURE3)) >> 26) 58 value |= GMAC_INT_FPE_EN; 59 60 writel(value, ioaddr + GMAC_INT_EN); 61 62 if (GMAC_INT_DEFAULT_ENABLE & GMAC_INT_TSIE) 63 init_waitqueue_head(&priv->tstamp_busy_wait); 64 } 65 66 static void dwmac4_rx_queue_enable(struct mac_device_info *hw, 67 u8 mode, u32 queue) 68 { 69 void __iomem *ioaddr = hw->pcsr; 70 u32 value = readl(ioaddr + GMAC_RXQ_CTRL0); 71 72 value &= GMAC_RX_QUEUE_CLEAR(queue); 73 if (mode == MTL_QUEUE_AVB) 74 value |= GMAC_RX_AV_QUEUE_ENABLE(queue); 75 else if (mode == MTL_QUEUE_DCB) 76 value |= GMAC_RX_DCB_QUEUE_ENABLE(queue); 77 78 writel(value, ioaddr + GMAC_RXQ_CTRL0); 79 } 80 81 static void dwmac4_rx_queue_priority(struct mac_device_info *hw, 82 u32 prio, u32 queue) 83 { 84 void __iomem *ioaddr = hw->pcsr; 85 u32 base_register; 86 u32 value; 87 88 base_register = (queue < 4) ? GMAC_RXQ_CTRL2 : GMAC_RXQ_CTRL3; 89 if (queue >= 4) 90 queue -= 4; 91 92 value = readl(ioaddr + base_register); 93 94 value &= ~GMAC_RXQCTRL_PSRQX_MASK(queue); 95 value |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) & 96 GMAC_RXQCTRL_PSRQX_MASK(queue); 97 writel(value, ioaddr + base_register); 98 } 99 100 static void dwmac4_tx_queue_priority(struct mac_device_info *hw, 101 u32 prio, u32 queue) 102 { 103 void __iomem *ioaddr = hw->pcsr; 104 u32 base_register; 105 u32 value; 106 107 base_register = (queue < 4) ? GMAC_TXQ_PRTY_MAP0 : GMAC_TXQ_PRTY_MAP1; 108 if (queue >= 4) 109 queue -= 4; 110 111 value = readl(ioaddr + base_register); 112 113 value &= ~GMAC_TXQCTRL_PSTQX_MASK(queue); 114 value |= (prio << GMAC_TXQCTRL_PSTQX_SHIFT(queue)) & 115 GMAC_TXQCTRL_PSTQX_MASK(queue); 116 117 writel(value, ioaddr + base_register); 118 } 119 120 static void dwmac4_rx_queue_routing(struct mac_device_info *hw, 121 u8 packet, u32 queue) 122 { 123 void __iomem *ioaddr = hw->pcsr; 124 u32 value; 125 126 static const struct stmmac_rx_routing route_possibilities[] = { 127 { GMAC_RXQCTRL_AVCPQ_MASK, GMAC_RXQCTRL_AVCPQ_SHIFT }, 128 { GMAC_RXQCTRL_PTPQ_MASK, GMAC_RXQCTRL_PTPQ_SHIFT }, 129 { GMAC_RXQCTRL_DCBCPQ_MASK, GMAC_RXQCTRL_DCBCPQ_SHIFT }, 130 { GMAC_RXQCTRL_UPQ_MASK, GMAC_RXQCTRL_UPQ_SHIFT }, 131 { GMAC_RXQCTRL_MCBCQ_MASK, GMAC_RXQCTRL_MCBCQ_SHIFT }, 132 }; 133 134 value = readl(ioaddr + GMAC_RXQ_CTRL1); 135 136 /* routing configuration */ 137 value &= ~route_possibilities[packet - 1].reg_mask; 138 value |= (queue << route_possibilities[packet-1].reg_shift) & 139 route_possibilities[packet - 1].reg_mask; 140 141 /* some packets require extra ops */ 142 if (packet == PACKET_AVCPQ) { 143 value &= ~GMAC_RXQCTRL_TACPQE; 144 value |= 0x1 << GMAC_RXQCTRL_TACPQE_SHIFT; 145 } else if (packet == PACKET_MCBCQ) { 146 value &= ~GMAC_RXQCTRL_MCBCQEN; 147 value |= 0x1 << GMAC_RXQCTRL_MCBCQEN_SHIFT; 148 } 149 150 writel(value, ioaddr + GMAC_RXQ_CTRL1); 151 } 152 153 static void dwmac4_prog_mtl_rx_algorithms(struct mac_device_info *hw, 154 u32 rx_alg) 155 { 156 void __iomem *ioaddr = hw->pcsr; 157 u32 value = readl(ioaddr + MTL_OPERATION_MODE); 158 159 value &= ~MTL_OPERATION_RAA; 160 switch (rx_alg) { 161 case MTL_RX_ALGORITHM_SP: 162 value |= MTL_OPERATION_RAA_SP; 163 break; 164 case MTL_RX_ALGORITHM_WSP: 165 value |= MTL_OPERATION_RAA_WSP; 166 break; 167 default: 168 break; 169 } 170 171 writel(value, ioaddr + MTL_OPERATION_MODE); 172 } 173 174 static void dwmac4_prog_mtl_tx_algorithms(struct mac_device_info *hw, 175 u32 tx_alg) 176 { 177 void __iomem *ioaddr = hw->pcsr; 178 u32 value = readl(ioaddr + MTL_OPERATION_MODE); 179 180 value &= ~MTL_OPERATION_SCHALG_MASK; 181 switch (tx_alg) { 182 case MTL_TX_ALGORITHM_WRR: 183 value |= MTL_OPERATION_SCHALG_WRR; 184 break; 185 case MTL_TX_ALGORITHM_WFQ: 186 value |= MTL_OPERATION_SCHALG_WFQ; 187 break; 188 case MTL_TX_ALGORITHM_DWRR: 189 value |= MTL_OPERATION_SCHALG_DWRR; 190 break; 191 case MTL_TX_ALGORITHM_SP: 192 value |= MTL_OPERATION_SCHALG_SP; 193 break; 194 default: 195 break; 196 } 197 198 writel(value, ioaddr + MTL_OPERATION_MODE); 199 } 200 201 static void dwmac4_set_mtl_tx_queue_weight(struct mac_device_info *hw, 202 u32 weight, u32 queue) 203 { 204 void __iomem *ioaddr = hw->pcsr; 205 u32 value = readl(ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue)); 206 207 value &= ~MTL_TXQ_WEIGHT_ISCQW_MASK; 208 value |= weight & MTL_TXQ_WEIGHT_ISCQW_MASK; 209 writel(value, ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue)); 210 } 211 212 static void dwmac4_map_mtl_dma(struct mac_device_info *hw, u32 queue, u32 chan) 213 { 214 void __iomem *ioaddr = hw->pcsr; 215 u32 value; 216 217 if (queue < 4) 218 value = readl(ioaddr + MTL_RXQ_DMA_MAP0); 219 else 220 value = readl(ioaddr + MTL_RXQ_DMA_MAP1); 221 222 if (queue == 0 || queue == 4) { 223 value &= ~MTL_RXQ_DMA_Q04MDMACH_MASK; 224 value |= MTL_RXQ_DMA_Q04MDMACH(chan); 225 } else if (queue > 4) { 226 value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue - 4); 227 value |= MTL_RXQ_DMA_QXMDMACH(chan, queue - 4); 228 } else { 229 value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue); 230 value |= MTL_RXQ_DMA_QXMDMACH(chan, queue); 231 } 232 233 if (queue < 4) 234 writel(value, ioaddr + MTL_RXQ_DMA_MAP0); 235 else 236 writel(value, ioaddr + MTL_RXQ_DMA_MAP1); 237 } 238 239 static void dwmac4_config_cbs(struct mac_device_info *hw, 240 u32 send_slope, u32 idle_slope, 241 u32 high_credit, u32 low_credit, u32 queue) 242 { 243 void __iomem *ioaddr = hw->pcsr; 244 u32 value; 245 246 pr_debug("Queue %d configured as AVB. Parameters:\n", queue); 247 pr_debug("\tsend_slope: 0x%08x\n", send_slope); 248 pr_debug("\tidle_slope: 0x%08x\n", idle_slope); 249 pr_debug("\thigh_credit: 0x%08x\n", high_credit); 250 pr_debug("\tlow_credit: 0x%08x\n", low_credit); 251 252 /* enable AV algorithm */ 253 value = readl(ioaddr + MTL_ETSX_CTRL_BASE_ADDR(queue)); 254 value |= MTL_ETS_CTRL_AVALG; 255 value |= MTL_ETS_CTRL_CC; 256 writel(value, ioaddr + MTL_ETSX_CTRL_BASE_ADDR(queue)); 257 258 /* configure send slope */ 259 value = readl(ioaddr + MTL_SEND_SLP_CREDX_BASE_ADDR(queue)); 260 value &= ~MTL_SEND_SLP_CRED_SSC_MASK; 261 value |= send_slope & MTL_SEND_SLP_CRED_SSC_MASK; 262 writel(value, ioaddr + MTL_SEND_SLP_CREDX_BASE_ADDR(queue)); 263 264 /* configure idle slope (same register as tx weight) */ 265 dwmac4_set_mtl_tx_queue_weight(hw, idle_slope, queue); 266 267 /* configure high credit */ 268 value = readl(ioaddr + MTL_HIGH_CREDX_BASE_ADDR(queue)); 269 value &= ~MTL_HIGH_CRED_HC_MASK; 270 value |= high_credit & MTL_HIGH_CRED_HC_MASK; 271 writel(value, ioaddr + MTL_HIGH_CREDX_BASE_ADDR(queue)); 272 273 /* configure high credit */ 274 value = readl(ioaddr + MTL_LOW_CREDX_BASE_ADDR(queue)); 275 value &= ~MTL_HIGH_CRED_LC_MASK; 276 value |= low_credit & MTL_HIGH_CRED_LC_MASK; 277 writel(value, ioaddr + MTL_LOW_CREDX_BASE_ADDR(queue)); 278 } 279 280 static void dwmac4_dump_regs(struct mac_device_info *hw, u32 *reg_space) 281 { 282 void __iomem *ioaddr = hw->pcsr; 283 int i; 284 285 for (i = 0; i < GMAC_REG_NUM; i++) 286 reg_space[i] = readl(ioaddr + i * 4); 287 } 288 289 static int dwmac4_rx_ipc_enable(struct mac_device_info *hw) 290 { 291 void __iomem *ioaddr = hw->pcsr; 292 u32 value = readl(ioaddr + GMAC_CONFIG); 293 294 if (hw->rx_csum) 295 value |= GMAC_CONFIG_IPC; 296 else 297 value &= ~GMAC_CONFIG_IPC; 298 299 writel(value, ioaddr + GMAC_CONFIG); 300 301 value = readl(ioaddr + GMAC_CONFIG); 302 303 return !!(value & GMAC_CONFIG_IPC); 304 } 305 306 static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode) 307 { 308 void __iomem *ioaddr = hw->pcsr; 309 unsigned int pmt = 0; 310 u32 config; 311 312 if (mode & WAKE_MAGIC) { 313 pr_debug("GMAC: WOL Magic frame\n"); 314 pmt |= power_down | magic_pkt_en; 315 } 316 if (mode & WAKE_UCAST) { 317 pr_debug("GMAC: WOL on global unicast\n"); 318 pmt |= power_down | global_unicast | wake_up_frame_en; 319 } 320 321 if (pmt) { 322 /* The receiver must be enabled for WOL before powering down */ 323 config = readl(ioaddr + GMAC_CONFIG); 324 config |= GMAC_CONFIG_RE; 325 writel(config, ioaddr + GMAC_CONFIG); 326 } 327 writel(pmt, ioaddr + GMAC_PMT); 328 } 329 330 static void dwmac4_set_umac_addr(struct mac_device_info *hw, 331 const unsigned char *addr, unsigned int reg_n) 332 { 333 void __iomem *ioaddr = hw->pcsr; 334 335 stmmac_dwmac4_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n), 336 GMAC_ADDR_LOW(reg_n)); 337 } 338 339 static void dwmac4_get_umac_addr(struct mac_device_info *hw, 340 unsigned char *addr, unsigned int reg_n) 341 { 342 void __iomem *ioaddr = hw->pcsr; 343 344 stmmac_dwmac4_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n), 345 GMAC_ADDR_LOW(reg_n)); 346 } 347 348 static void dwmac4_set_eee_mode(struct mac_device_info *hw, 349 bool en_tx_lpi_clockgating) 350 { 351 void __iomem *ioaddr = hw->pcsr; 352 u32 value; 353 354 /* Enable the link status receive on RGMII, SGMII ore SMII 355 * receive path and instruct the transmit to enter in LPI 356 * state. 357 */ 358 value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS); 359 value |= GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA; 360 361 if (en_tx_lpi_clockgating) 362 value |= GMAC4_LPI_CTRL_STATUS_LPITCSE; 363 364 writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS); 365 } 366 367 static void dwmac4_reset_eee_mode(struct mac_device_info *hw) 368 { 369 void __iomem *ioaddr = hw->pcsr; 370 u32 value; 371 372 value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS); 373 value &= ~(GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA); 374 writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS); 375 } 376 377 static void dwmac4_set_eee_pls(struct mac_device_info *hw, int link) 378 { 379 void __iomem *ioaddr = hw->pcsr; 380 u32 value; 381 382 value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS); 383 384 if (link) 385 value |= GMAC4_LPI_CTRL_STATUS_PLS; 386 else 387 value &= ~GMAC4_LPI_CTRL_STATUS_PLS; 388 389 writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS); 390 } 391 392 static void dwmac4_set_eee_lpi_entry_timer(struct mac_device_info *hw, int et) 393 { 394 void __iomem *ioaddr = hw->pcsr; 395 int value = et & STMMAC_ET_MAX; 396 int regval; 397 398 /* Program LPI entry timer value into register */ 399 writel(value, ioaddr + GMAC4_LPI_ENTRY_TIMER); 400 401 /* Enable/disable LPI entry timer */ 402 regval = readl(ioaddr + GMAC4_LPI_CTRL_STATUS); 403 regval |= GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA; 404 405 if (et) 406 regval |= GMAC4_LPI_CTRL_STATUS_LPIATE; 407 else 408 regval &= ~GMAC4_LPI_CTRL_STATUS_LPIATE; 409 410 writel(regval, ioaddr + GMAC4_LPI_CTRL_STATUS); 411 } 412 413 static void dwmac4_set_eee_timer(struct mac_device_info *hw, int ls, int tw) 414 { 415 void __iomem *ioaddr = hw->pcsr; 416 int value = ((tw & 0xffff)) | ((ls & 0x3ff) << 16); 417 418 /* Program the timers in the LPI timer control register: 419 * LS: minimum time (ms) for which the link 420 * status from PHY should be ok before transmitting 421 * the LPI pattern. 422 * TW: minimum time (us) for which the core waits 423 * after it has stopped transmitting the LPI pattern. 424 */ 425 writel(value, ioaddr + GMAC4_LPI_TIMER_CTRL); 426 } 427 428 static void dwmac4_write_single_vlan(struct net_device *dev, u16 vid) 429 { 430 void __iomem *ioaddr = (void __iomem *)dev->base_addr; 431 u32 val; 432 433 val = readl(ioaddr + GMAC_VLAN_TAG); 434 val &= ~GMAC_VLAN_TAG_VID; 435 val |= GMAC_VLAN_TAG_ETV | vid; 436 437 writel(val, ioaddr + GMAC_VLAN_TAG); 438 } 439 440 static int dwmac4_write_vlan_filter(struct net_device *dev, 441 struct mac_device_info *hw, 442 u8 index, u32 data) 443 { 444 void __iomem *ioaddr = (void __iomem *)dev->base_addr; 445 int i, timeout = 10; 446 u32 val; 447 448 if (index >= hw->num_vlan) 449 return -EINVAL; 450 451 writel(data, ioaddr + GMAC_VLAN_TAG_DATA); 452 453 val = readl(ioaddr + GMAC_VLAN_TAG); 454 val &= ~(GMAC_VLAN_TAG_CTRL_OFS_MASK | 455 GMAC_VLAN_TAG_CTRL_CT | 456 GMAC_VLAN_TAG_CTRL_OB); 457 val |= (index << GMAC_VLAN_TAG_CTRL_OFS_SHIFT) | GMAC_VLAN_TAG_CTRL_OB; 458 459 writel(val, ioaddr + GMAC_VLAN_TAG); 460 461 for (i = 0; i < timeout; i++) { 462 val = readl(ioaddr + GMAC_VLAN_TAG); 463 if (!(val & GMAC_VLAN_TAG_CTRL_OB)) 464 return 0; 465 udelay(1); 466 } 467 468 netdev_err(dev, "Timeout accessing MAC_VLAN_Tag_Filter\n"); 469 470 return -EBUSY; 471 } 472 473 static int dwmac4_add_hw_vlan_rx_fltr(struct net_device *dev, 474 struct mac_device_info *hw, 475 __be16 proto, u16 vid) 476 { 477 int index = -1; 478 u32 val = 0; 479 int i, ret; 480 481 if (vid > 4095) 482 return -EINVAL; 483 484 if (hw->promisc) { 485 netdev_err(dev, 486 "Adding VLAN in promisc mode not supported\n"); 487 return -EPERM; 488 } 489 490 /* Single Rx VLAN Filter */ 491 if (hw->num_vlan == 1) { 492 /* For single VLAN filter, VID 0 means VLAN promiscuous */ 493 if (vid == 0) { 494 netdev_warn(dev, "Adding VLAN ID 0 is not supported\n"); 495 return -EPERM; 496 } 497 498 if (hw->vlan_filter[0] & GMAC_VLAN_TAG_VID) { 499 netdev_err(dev, "Only single VLAN ID supported\n"); 500 return -EPERM; 501 } 502 503 hw->vlan_filter[0] = vid; 504 dwmac4_write_single_vlan(dev, vid); 505 506 return 0; 507 } 508 509 /* Extended Rx VLAN Filter Enable */ 510 val |= GMAC_VLAN_TAG_DATA_ETV | GMAC_VLAN_TAG_DATA_VEN | vid; 511 512 for (i = 0; i < hw->num_vlan; i++) { 513 if (hw->vlan_filter[i] == val) 514 return 0; 515 else if (!(hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN)) 516 index = i; 517 } 518 519 if (index == -1) { 520 netdev_err(dev, "MAC_VLAN_Tag_Filter full (size: %0u)\n", 521 hw->num_vlan); 522 return -EPERM; 523 } 524 525 ret = dwmac4_write_vlan_filter(dev, hw, index, val); 526 527 if (!ret) 528 hw->vlan_filter[index] = val; 529 530 return ret; 531 } 532 533 static int dwmac4_del_hw_vlan_rx_fltr(struct net_device *dev, 534 struct mac_device_info *hw, 535 __be16 proto, u16 vid) 536 { 537 int i, ret = 0; 538 539 if (hw->promisc) { 540 netdev_err(dev, 541 "Deleting VLAN in promisc mode not supported\n"); 542 return -EPERM; 543 } 544 545 /* Single Rx VLAN Filter */ 546 if (hw->num_vlan == 1) { 547 if ((hw->vlan_filter[0] & GMAC_VLAN_TAG_VID) == vid) { 548 hw->vlan_filter[0] = 0; 549 dwmac4_write_single_vlan(dev, 0); 550 } 551 return 0; 552 } 553 554 /* Extended Rx VLAN Filter Enable */ 555 for (i = 0; i < hw->num_vlan; i++) { 556 if ((hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VID) == vid) { 557 ret = dwmac4_write_vlan_filter(dev, hw, i, 0); 558 559 if (!ret) 560 hw->vlan_filter[i] = 0; 561 else 562 return ret; 563 } 564 } 565 566 return ret; 567 } 568 569 static void dwmac4_vlan_promisc_enable(struct net_device *dev, 570 struct mac_device_info *hw) 571 { 572 void __iomem *ioaddr = hw->pcsr; 573 u32 value; 574 u32 hash; 575 u32 val; 576 int i; 577 578 /* Single Rx VLAN Filter */ 579 if (hw->num_vlan == 1) { 580 dwmac4_write_single_vlan(dev, 0); 581 return; 582 } 583 584 /* Extended Rx VLAN Filter Enable */ 585 for (i = 0; i < hw->num_vlan; i++) { 586 if (hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN) { 587 val = hw->vlan_filter[i] & ~GMAC_VLAN_TAG_DATA_VEN; 588 dwmac4_write_vlan_filter(dev, hw, i, val); 589 } 590 } 591 592 hash = readl(ioaddr + GMAC_VLAN_HASH_TABLE); 593 if (hash & GMAC_VLAN_VLHT) { 594 value = readl(ioaddr + GMAC_VLAN_TAG); 595 if (value & GMAC_VLAN_VTHM) { 596 value &= ~GMAC_VLAN_VTHM; 597 writel(value, ioaddr + GMAC_VLAN_TAG); 598 } 599 } 600 } 601 602 static void dwmac4_restore_hw_vlan_rx_fltr(struct net_device *dev, 603 struct mac_device_info *hw) 604 { 605 void __iomem *ioaddr = hw->pcsr; 606 u32 value; 607 u32 hash; 608 u32 val; 609 int i; 610 611 /* Single Rx VLAN Filter */ 612 if (hw->num_vlan == 1) { 613 dwmac4_write_single_vlan(dev, hw->vlan_filter[0]); 614 return; 615 } 616 617 /* Extended Rx VLAN Filter Enable */ 618 for (i = 0; i < hw->num_vlan; i++) { 619 if (hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN) { 620 val = hw->vlan_filter[i]; 621 dwmac4_write_vlan_filter(dev, hw, i, val); 622 } 623 } 624 625 hash = readl(ioaddr + GMAC_VLAN_HASH_TABLE); 626 if (hash & GMAC_VLAN_VLHT) { 627 value = readl(ioaddr + GMAC_VLAN_TAG); 628 value |= GMAC_VLAN_VTHM; 629 writel(value, ioaddr + GMAC_VLAN_TAG); 630 } 631 } 632 633 static void dwmac4_set_filter(struct mac_device_info *hw, 634 struct net_device *dev) 635 { 636 void __iomem *ioaddr = (void __iomem *)dev->base_addr; 637 int numhashregs = (hw->multicast_filter_bins >> 5); 638 int mcbitslog2 = hw->mcast_bits_log2; 639 unsigned int value; 640 u32 mc_filter[8]; 641 int i; 642 643 memset(mc_filter, 0, sizeof(mc_filter)); 644 645 value = readl(ioaddr + GMAC_PACKET_FILTER); 646 value &= ~GMAC_PACKET_FILTER_HMC; 647 value &= ~GMAC_PACKET_FILTER_HPF; 648 value &= ~GMAC_PACKET_FILTER_PCF; 649 value &= ~GMAC_PACKET_FILTER_PM; 650 value &= ~GMAC_PACKET_FILTER_PR; 651 value &= ~GMAC_PACKET_FILTER_RA; 652 if (dev->flags & IFF_PROMISC) { 653 /* VLAN Tag Filter Fail Packets Queuing */ 654 if (hw->vlan_fail_q_en) { 655 value = readl(ioaddr + GMAC_RXQ_CTRL4); 656 value &= ~GMAC_RXQCTRL_VFFQ_MASK; 657 value |= GMAC_RXQCTRL_VFFQE | 658 (hw->vlan_fail_q << GMAC_RXQCTRL_VFFQ_SHIFT); 659 writel(value, ioaddr + GMAC_RXQ_CTRL4); 660 value = GMAC_PACKET_FILTER_PR | GMAC_PACKET_FILTER_RA; 661 } else { 662 value = GMAC_PACKET_FILTER_PR | GMAC_PACKET_FILTER_PCF; 663 } 664 665 } else if ((dev->flags & IFF_ALLMULTI) || 666 (netdev_mc_count(dev) > hw->multicast_filter_bins)) { 667 /* Pass all multi */ 668 value |= GMAC_PACKET_FILTER_PM; 669 /* Set all the bits of the HASH tab */ 670 memset(mc_filter, 0xff, sizeof(mc_filter)); 671 } else if (!netdev_mc_empty(dev) && (dev->flags & IFF_MULTICAST)) { 672 struct netdev_hw_addr *ha; 673 674 /* Hash filter for multicast */ 675 value |= GMAC_PACKET_FILTER_HMC; 676 677 netdev_for_each_mc_addr(ha, dev) { 678 /* The upper n bits of the calculated CRC are used to 679 * index the contents of the hash table. The number of 680 * bits used depends on the hardware configuration 681 * selected at core configuration time. 682 */ 683 u32 bit_nr = bitrev32(~crc32_le(~0, ha->addr, 684 ETH_ALEN)) >> (32 - mcbitslog2); 685 /* The most significant bit determines the register to 686 * use (H/L) while the other 5 bits determine the bit 687 * within the register. 688 */ 689 mc_filter[bit_nr >> 5] |= (1 << (bit_nr & 0x1f)); 690 } 691 } 692 693 for (i = 0; i < numhashregs; i++) 694 writel(mc_filter[i], ioaddr + GMAC_HASH_TAB(i)); 695 696 value |= GMAC_PACKET_FILTER_HPF; 697 698 /* Handle multiple unicast addresses */ 699 if (netdev_uc_count(dev) > hw->unicast_filter_entries) { 700 /* Switch to promiscuous mode if more than 128 addrs 701 * are required 702 */ 703 value |= GMAC_PACKET_FILTER_PR; 704 } else { 705 struct netdev_hw_addr *ha; 706 int reg = 1; 707 708 netdev_for_each_uc_addr(ha, dev) { 709 dwmac4_set_umac_addr(hw, ha->addr, reg); 710 reg++; 711 } 712 713 while (reg < GMAC_MAX_PERFECT_ADDRESSES) { 714 writel(0, ioaddr + GMAC_ADDR_HIGH(reg)); 715 writel(0, ioaddr + GMAC_ADDR_LOW(reg)); 716 reg++; 717 } 718 } 719 720 /* VLAN filtering */ 721 if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER) 722 value |= GMAC_PACKET_FILTER_VTFE; 723 724 writel(value, ioaddr + GMAC_PACKET_FILTER); 725 726 if (dev->flags & IFF_PROMISC && !hw->vlan_fail_q_en) { 727 if (!hw->promisc) { 728 hw->promisc = 1; 729 dwmac4_vlan_promisc_enable(dev, hw); 730 } 731 } else { 732 if (hw->promisc) { 733 hw->promisc = 0; 734 dwmac4_restore_hw_vlan_rx_fltr(dev, hw); 735 } 736 } 737 } 738 739 static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex, 740 unsigned int fc, unsigned int pause_time, 741 u32 tx_cnt) 742 { 743 void __iomem *ioaddr = hw->pcsr; 744 unsigned int flow = 0; 745 u32 queue = 0; 746 747 pr_debug("GMAC Flow-Control:\n"); 748 if (fc & FLOW_RX) { 749 pr_debug("\tReceive Flow-Control ON\n"); 750 flow |= GMAC_RX_FLOW_CTRL_RFE; 751 } 752 writel(flow, ioaddr + GMAC_RX_FLOW_CTRL); 753 754 if (fc & FLOW_TX) { 755 pr_debug("\tTransmit Flow-Control ON\n"); 756 757 if (duplex) 758 pr_debug("\tduplex mode: PAUSE %d\n", pause_time); 759 760 for (queue = 0; queue < tx_cnt; queue++) { 761 flow = GMAC_TX_FLOW_CTRL_TFE; 762 763 if (duplex) 764 flow |= 765 (pause_time << GMAC_TX_FLOW_CTRL_PT_SHIFT); 766 767 writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue)); 768 } 769 } else { 770 for (queue = 0; queue < tx_cnt; queue++) 771 writel(0, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue)); 772 } 773 } 774 775 static void dwmac4_ctrl_ane(void __iomem *ioaddr, bool ane, bool srgmi_ral, 776 bool loopback) 777 { 778 dwmac_ctrl_ane(ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback); 779 } 780 781 static void dwmac4_rane(void __iomem *ioaddr, bool restart) 782 { 783 dwmac_rane(ioaddr, GMAC_PCS_BASE, restart); 784 } 785 786 static void dwmac4_get_adv_lp(void __iomem *ioaddr, struct rgmii_adv *adv) 787 { 788 dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv); 789 } 790 791 /* RGMII or SMII interface */ 792 static void dwmac4_phystatus(void __iomem *ioaddr, struct stmmac_extra_stats *x) 793 { 794 u32 status; 795 796 status = readl(ioaddr + GMAC_PHYIF_CONTROL_STATUS); 797 x->irq_rgmii_n++; 798 799 /* Check the link status */ 800 if (status & GMAC_PHYIF_CTRLSTATUS_LNKSTS) { 801 int speed_value; 802 803 x->pcs_link = 1; 804 805 speed_value = ((status & GMAC_PHYIF_CTRLSTATUS_SPEED) >> 806 GMAC_PHYIF_CTRLSTATUS_SPEED_SHIFT); 807 if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_125) 808 x->pcs_speed = SPEED_1000; 809 else if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_25) 810 x->pcs_speed = SPEED_100; 811 else 812 x->pcs_speed = SPEED_10; 813 814 x->pcs_duplex = (status & GMAC_PHYIF_CTRLSTATUS_LNKMOD_MASK); 815 816 pr_info("Link is Up - %d/%s\n", (int)x->pcs_speed, 817 x->pcs_duplex ? "Full" : "Half"); 818 } else { 819 x->pcs_link = 0; 820 pr_info("Link is Down\n"); 821 } 822 } 823 824 static int dwmac4_irq_mtl_status(struct mac_device_info *hw, u32 chan) 825 { 826 void __iomem *ioaddr = hw->pcsr; 827 u32 mtl_int_qx_status; 828 int ret = 0; 829 830 mtl_int_qx_status = readl(ioaddr + MTL_INT_STATUS); 831 832 /* Check MTL Interrupt */ 833 if (mtl_int_qx_status & MTL_INT_QX(chan)) { 834 /* read Queue x Interrupt status */ 835 u32 status = readl(ioaddr + MTL_CHAN_INT_CTRL(chan)); 836 837 if (status & MTL_RX_OVERFLOW_INT) { 838 /* clear Interrupt */ 839 writel(status | MTL_RX_OVERFLOW_INT, 840 ioaddr + MTL_CHAN_INT_CTRL(chan)); 841 ret = CORE_IRQ_MTL_RX_OVERFLOW; 842 } 843 } 844 845 return ret; 846 } 847 848 static int dwmac4_irq_status(struct mac_device_info *hw, 849 struct stmmac_extra_stats *x) 850 { 851 void __iomem *ioaddr = hw->pcsr; 852 u32 intr_status = readl(ioaddr + GMAC_INT_STATUS); 853 u32 intr_enable = readl(ioaddr + GMAC_INT_EN); 854 int ret = 0; 855 856 /* Discard disabled bits */ 857 intr_status &= intr_enable; 858 859 /* Not used events (e.g. MMC interrupts) are not handled. */ 860 if ((intr_status & mmc_tx_irq)) 861 x->mmc_tx_irq_n++; 862 if (unlikely(intr_status & mmc_rx_irq)) 863 x->mmc_rx_irq_n++; 864 if (unlikely(intr_status & mmc_rx_csum_offload_irq)) 865 x->mmc_rx_csum_offload_irq_n++; 866 /* Clear the PMT bits 5 and 6 by reading the PMT status reg */ 867 if (unlikely(intr_status & pmt_irq)) { 868 readl(ioaddr + GMAC_PMT); 869 x->irq_receive_pmt_irq_n++; 870 } 871 872 /* MAC tx/rx EEE LPI entry/exit interrupts */ 873 if (intr_status & lpi_irq) { 874 /* Clear LPI interrupt by reading MAC_LPI_Control_Status */ 875 u32 status = readl(ioaddr + GMAC4_LPI_CTRL_STATUS); 876 877 if (status & GMAC4_LPI_CTRL_STATUS_TLPIEN) { 878 ret |= CORE_IRQ_TX_PATH_IN_LPI_MODE; 879 x->irq_tx_path_in_lpi_mode_n++; 880 } 881 if (status & GMAC4_LPI_CTRL_STATUS_TLPIEX) { 882 ret |= CORE_IRQ_TX_PATH_EXIT_LPI_MODE; 883 x->irq_tx_path_exit_lpi_mode_n++; 884 } 885 if (status & GMAC4_LPI_CTRL_STATUS_RLPIEN) 886 x->irq_rx_path_in_lpi_mode_n++; 887 if (status & GMAC4_LPI_CTRL_STATUS_RLPIEX) 888 x->irq_rx_path_exit_lpi_mode_n++; 889 } 890 891 dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x); 892 if (intr_status & PCS_RGSMIIIS_IRQ) 893 dwmac4_phystatus(ioaddr, x); 894 895 return ret; 896 } 897 898 static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x, 899 u32 rx_queues, u32 tx_queues) 900 { 901 u32 value; 902 u32 queue; 903 904 for (queue = 0; queue < tx_queues; queue++) { 905 value = readl(ioaddr + MTL_CHAN_TX_DEBUG(queue)); 906 907 if (value & MTL_DEBUG_TXSTSFSTS) 908 x->mtl_tx_status_fifo_full++; 909 if (value & MTL_DEBUG_TXFSTS) 910 x->mtl_tx_fifo_not_empty++; 911 if (value & MTL_DEBUG_TWCSTS) 912 x->mmtl_fifo_ctrl++; 913 if (value & MTL_DEBUG_TRCSTS_MASK) { 914 u32 trcsts = (value & MTL_DEBUG_TRCSTS_MASK) 915 >> MTL_DEBUG_TRCSTS_SHIFT; 916 if (trcsts == MTL_DEBUG_TRCSTS_WRITE) 917 x->mtl_tx_fifo_read_ctrl_write++; 918 else if (trcsts == MTL_DEBUG_TRCSTS_TXW) 919 x->mtl_tx_fifo_read_ctrl_wait++; 920 else if (trcsts == MTL_DEBUG_TRCSTS_READ) 921 x->mtl_tx_fifo_read_ctrl_read++; 922 else 923 x->mtl_tx_fifo_read_ctrl_idle++; 924 } 925 if (value & MTL_DEBUG_TXPAUSED) 926 x->mac_tx_in_pause++; 927 } 928 929 for (queue = 0; queue < rx_queues; queue++) { 930 value = readl(ioaddr + MTL_CHAN_RX_DEBUG(queue)); 931 932 if (value & MTL_DEBUG_RXFSTS_MASK) { 933 u32 rxfsts = (value & MTL_DEBUG_RXFSTS_MASK) 934 >> MTL_DEBUG_RRCSTS_SHIFT; 935 936 if (rxfsts == MTL_DEBUG_RXFSTS_FULL) 937 x->mtl_rx_fifo_fill_level_full++; 938 else if (rxfsts == MTL_DEBUG_RXFSTS_AT) 939 x->mtl_rx_fifo_fill_above_thresh++; 940 else if (rxfsts == MTL_DEBUG_RXFSTS_BT) 941 x->mtl_rx_fifo_fill_below_thresh++; 942 else 943 x->mtl_rx_fifo_fill_level_empty++; 944 } 945 if (value & MTL_DEBUG_RRCSTS_MASK) { 946 u32 rrcsts = (value & MTL_DEBUG_RRCSTS_MASK) >> 947 MTL_DEBUG_RRCSTS_SHIFT; 948 949 if (rrcsts == MTL_DEBUG_RRCSTS_FLUSH) 950 x->mtl_rx_fifo_read_ctrl_flush++; 951 else if (rrcsts == MTL_DEBUG_RRCSTS_RSTAT) 952 x->mtl_rx_fifo_read_ctrl_read_data++; 953 else if (rrcsts == MTL_DEBUG_RRCSTS_RDATA) 954 x->mtl_rx_fifo_read_ctrl_status++; 955 else 956 x->mtl_rx_fifo_read_ctrl_idle++; 957 } 958 if (value & MTL_DEBUG_RWCSTS) 959 x->mtl_rx_fifo_ctrl_active++; 960 } 961 962 /* GMAC debug */ 963 value = readl(ioaddr + GMAC_DEBUG); 964 965 if (value & GMAC_DEBUG_TFCSTS_MASK) { 966 u32 tfcsts = (value & GMAC_DEBUG_TFCSTS_MASK) 967 >> GMAC_DEBUG_TFCSTS_SHIFT; 968 969 if (tfcsts == GMAC_DEBUG_TFCSTS_XFER) 970 x->mac_tx_frame_ctrl_xfer++; 971 else if (tfcsts == GMAC_DEBUG_TFCSTS_GEN_PAUSE) 972 x->mac_tx_frame_ctrl_pause++; 973 else if (tfcsts == GMAC_DEBUG_TFCSTS_WAIT) 974 x->mac_tx_frame_ctrl_wait++; 975 else 976 x->mac_tx_frame_ctrl_idle++; 977 } 978 if (value & GMAC_DEBUG_TPESTS) 979 x->mac_gmii_tx_proto_engine++; 980 if (value & GMAC_DEBUG_RFCFCSTS_MASK) 981 x->mac_rx_frame_ctrl_fifo = (value & GMAC_DEBUG_RFCFCSTS_MASK) 982 >> GMAC_DEBUG_RFCFCSTS_SHIFT; 983 if (value & GMAC_DEBUG_RPESTS) 984 x->mac_gmii_rx_proto_engine++; 985 } 986 987 static void dwmac4_set_mac_loopback(void __iomem *ioaddr, bool enable) 988 { 989 u32 value = readl(ioaddr + GMAC_CONFIG); 990 991 if (enable) 992 value |= GMAC_CONFIG_LM; 993 else 994 value &= ~GMAC_CONFIG_LM; 995 996 writel(value, ioaddr + GMAC_CONFIG); 997 } 998 999 static void dwmac4_update_vlan_hash(struct mac_device_info *hw, u32 hash, 1000 __le16 perfect_match, bool is_double) 1001 { 1002 void __iomem *ioaddr = hw->pcsr; 1003 u32 value; 1004 1005 writel(hash, ioaddr + GMAC_VLAN_HASH_TABLE); 1006 1007 value = readl(ioaddr + GMAC_VLAN_TAG); 1008 1009 if (hash) { 1010 value |= GMAC_VLAN_VTHM | GMAC_VLAN_ETV; 1011 if (is_double) { 1012 value |= GMAC_VLAN_EDVLP; 1013 value |= GMAC_VLAN_ESVL; 1014 value |= GMAC_VLAN_DOVLTC; 1015 } 1016 1017 writel(value, ioaddr + GMAC_VLAN_TAG); 1018 } else if (perfect_match) { 1019 u32 value = GMAC_VLAN_ETV; 1020 1021 if (is_double) { 1022 value |= GMAC_VLAN_EDVLP; 1023 value |= GMAC_VLAN_ESVL; 1024 value |= GMAC_VLAN_DOVLTC; 1025 } 1026 1027 writel(value | perfect_match, ioaddr + GMAC_VLAN_TAG); 1028 } else { 1029 value &= ~(GMAC_VLAN_VTHM | GMAC_VLAN_ETV); 1030 value &= ~(GMAC_VLAN_EDVLP | GMAC_VLAN_ESVL); 1031 value &= ~GMAC_VLAN_DOVLTC; 1032 value &= ~GMAC_VLAN_VID; 1033 1034 writel(value, ioaddr + GMAC_VLAN_TAG); 1035 } 1036 } 1037 1038 static void dwmac4_sarc_configure(void __iomem *ioaddr, int val) 1039 { 1040 u32 value = readl(ioaddr + GMAC_CONFIG); 1041 1042 value &= ~GMAC_CONFIG_SARC; 1043 value |= val << GMAC_CONFIG_SARC_SHIFT; 1044 1045 writel(value, ioaddr + GMAC_CONFIG); 1046 } 1047 1048 static void dwmac4_enable_vlan(struct mac_device_info *hw, u32 type) 1049 { 1050 void __iomem *ioaddr = hw->pcsr; 1051 u32 value; 1052 1053 value = readl(ioaddr + GMAC_VLAN_INCL); 1054 value |= GMAC_VLAN_VLTI; 1055 value |= GMAC_VLAN_CSVL; /* Only use SVLAN */ 1056 value &= ~GMAC_VLAN_VLC; 1057 value |= (type << GMAC_VLAN_VLC_SHIFT) & GMAC_VLAN_VLC; 1058 writel(value, ioaddr + GMAC_VLAN_INCL); 1059 } 1060 1061 static void dwmac4_set_arp_offload(struct mac_device_info *hw, bool en, 1062 u32 addr) 1063 { 1064 void __iomem *ioaddr = hw->pcsr; 1065 u32 value; 1066 1067 writel(addr, ioaddr + GMAC_ARP_ADDR); 1068 1069 value = readl(ioaddr + GMAC_CONFIG); 1070 if (en) 1071 value |= GMAC_CONFIG_ARPEN; 1072 else 1073 value &= ~GMAC_CONFIG_ARPEN; 1074 writel(value, ioaddr + GMAC_CONFIG); 1075 } 1076 1077 static int dwmac4_config_l3_filter(struct mac_device_info *hw, u32 filter_no, 1078 bool en, bool ipv6, bool sa, bool inv, 1079 u32 match) 1080 { 1081 void __iomem *ioaddr = hw->pcsr; 1082 u32 value; 1083 1084 value = readl(ioaddr + GMAC_PACKET_FILTER); 1085 value |= GMAC_PACKET_FILTER_IPFE; 1086 writel(value, ioaddr + GMAC_PACKET_FILTER); 1087 1088 value = readl(ioaddr + GMAC_L3L4_CTRL(filter_no)); 1089 1090 /* For IPv6 not both SA/DA filters can be active */ 1091 if (ipv6) { 1092 value |= GMAC_L3PEN0; 1093 value &= ~(GMAC_L3SAM0 | GMAC_L3SAIM0); 1094 value &= ~(GMAC_L3DAM0 | GMAC_L3DAIM0); 1095 if (sa) { 1096 value |= GMAC_L3SAM0; 1097 if (inv) 1098 value |= GMAC_L3SAIM0; 1099 } else { 1100 value |= GMAC_L3DAM0; 1101 if (inv) 1102 value |= GMAC_L3DAIM0; 1103 } 1104 } else { 1105 value &= ~GMAC_L3PEN0; 1106 if (sa) { 1107 value |= GMAC_L3SAM0; 1108 if (inv) 1109 value |= GMAC_L3SAIM0; 1110 } else { 1111 value |= GMAC_L3DAM0; 1112 if (inv) 1113 value |= GMAC_L3DAIM0; 1114 } 1115 } 1116 1117 writel(value, ioaddr + GMAC_L3L4_CTRL(filter_no)); 1118 1119 if (sa) { 1120 writel(match, ioaddr + GMAC_L3_ADDR0(filter_no)); 1121 } else { 1122 writel(match, ioaddr + GMAC_L3_ADDR1(filter_no)); 1123 } 1124 1125 if (!en) 1126 writel(0, ioaddr + GMAC_L3L4_CTRL(filter_no)); 1127 1128 return 0; 1129 } 1130 1131 static int dwmac4_config_l4_filter(struct mac_device_info *hw, u32 filter_no, 1132 bool en, bool udp, bool sa, bool inv, 1133 u32 match) 1134 { 1135 void __iomem *ioaddr = hw->pcsr; 1136 u32 value; 1137 1138 value = readl(ioaddr + GMAC_PACKET_FILTER); 1139 value |= GMAC_PACKET_FILTER_IPFE; 1140 writel(value, ioaddr + GMAC_PACKET_FILTER); 1141 1142 value = readl(ioaddr + GMAC_L3L4_CTRL(filter_no)); 1143 if (udp) { 1144 value |= GMAC_L4PEN0; 1145 } else { 1146 value &= ~GMAC_L4PEN0; 1147 } 1148 1149 value &= ~(GMAC_L4SPM0 | GMAC_L4SPIM0); 1150 value &= ~(GMAC_L4DPM0 | GMAC_L4DPIM0); 1151 if (sa) { 1152 value |= GMAC_L4SPM0; 1153 if (inv) 1154 value |= GMAC_L4SPIM0; 1155 } else { 1156 value |= GMAC_L4DPM0; 1157 if (inv) 1158 value |= GMAC_L4DPIM0; 1159 } 1160 1161 writel(value, ioaddr + GMAC_L3L4_CTRL(filter_no)); 1162 1163 if (sa) { 1164 value = match & GMAC_L4SP0; 1165 } else { 1166 value = (match << GMAC_L4DP0_SHIFT) & GMAC_L4DP0; 1167 } 1168 1169 writel(value, ioaddr + GMAC_L4_ADDR(filter_no)); 1170 1171 if (!en) 1172 writel(0, ioaddr + GMAC_L3L4_CTRL(filter_no)); 1173 1174 return 0; 1175 } 1176 1177 const struct stmmac_ops dwmac4_ops = { 1178 .core_init = dwmac4_core_init, 1179 .set_mac = stmmac_set_mac, 1180 .rx_ipc = dwmac4_rx_ipc_enable, 1181 .rx_queue_enable = dwmac4_rx_queue_enable, 1182 .rx_queue_prio = dwmac4_rx_queue_priority, 1183 .tx_queue_prio = dwmac4_tx_queue_priority, 1184 .rx_queue_routing = dwmac4_rx_queue_routing, 1185 .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms, 1186 .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms, 1187 .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight, 1188 .map_mtl_to_dma = dwmac4_map_mtl_dma, 1189 .config_cbs = dwmac4_config_cbs, 1190 .dump_regs = dwmac4_dump_regs, 1191 .host_irq_status = dwmac4_irq_status, 1192 .host_mtl_irq_status = dwmac4_irq_mtl_status, 1193 .flow_ctrl = dwmac4_flow_ctrl, 1194 .pmt = dwmac4_pmt, 1195 .set_umac_addr = dwmac4_set_umac_addr, 1196 .get_umac_addr = dwmac4_get_umac_addr, 1197 .set_eee_mode = dwmac4_set_eee_mode, 1198 .reset_eee_mode = dwmac4_reset_eee_mode, 1199 .set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer, 1200 .set_eee_timer = dwmac4_set_eee_timer, 1201 .set_eee_pls = dwmac4_set_eee_pls, 1202 .pcs_ctrl_ane = dwmac4_ctrl_ane, 1203 .pcs_rane = dwmac4_rane, 1204 .pcs_get_adv_lp = dwmac4_get_adv_lp, 1205 .debug = dwmac4_debug, 1206 .set_filter = dwmac4_set_filter, 1207 .set_mac_loopback = dwmac4_set_mac_loopback, 1208 .update_vlan_hash = dwmac4_update_vlan_hash, 1209 .sarc_configure = dwmac4_sarc_configure, 1210 .enable_vlan = dwmac4_enable_vlan, 1211 .set_arp_offload = dwmac4_set_arp_offload, 1212 .config_l3_filter = dwmac4_config_l3_filter, 1213 .config_l4_filter = dwmac4_config_l4_filter, 1214 .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr, 1215 .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr, 1216 .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr, 1217 }; 1218 1219 const struct stmmac_ops dwmac410_ops = { 1220 .core_init = dwmac4_core_init, 1221 .set_mac = stmmac_dwmac4_set_mac, 1222 .rx_ipc = dwmac4_rx_ipc_enable, 1223 .rx_queue_enable = dwmac4_rx_queue_enable, 1224 .rx_queue_prio = dwmac4_rx_queue_priority, 1225 .tx_queue_prio = dwmac4_tx_queue_priority, 1226 .rx_queue_routing = dwmac4_rx_queue_routing, 1227 .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms, 1228 .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms, 1229 .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight, 1230 .map_mtl_to_dma = dwmac4_map_mtl_dma, 1231 .config_cbs = dwmac4_config_cbs, 1232 .dump_regs = dwmac4_dump_regs, 1233 .host_irq_status = dwmac4_irq_status, 1234 .host_mtl_irq_status = dwmac4_irq_mtl_status, 1235 .flow_ctrl = dwmac4_flow_ctrl, 1236 .pmt = dwmac4_pmt, 1237 .set_umac_addr = dwmac4_set_umac_addr, 1238 .get_umac_addr = dwmac4_get_umac_addr, 1239 .set_eee_mode = dwmac4_set_eee_mode, 1240 .reset_eee_mode = dwmac4_reset_eee_mode, 1241 .set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer, 1242 .set_eee_timer = dwmac4_set_eee_timer, 1243 .set_eee_pls = dwmac4_set_eee_pls, 1244 .pcs_ctrl_ane = dwmac4_ctrl_ane, 1245 .pcs_rane = dwmac4_rane, 1246 .pcs_get_adv_lp = dwmac4_get_adv_lp, 1247 .debug = dwmac4_debug, 1248 .set_filter = dwmac4_set_filter, 1249 .flex_pps_config = dwmac5_flex_pps_config, 1250 .set_mac_loopback = dwmac4_set_mac_loopback, 1251 .update_vlan_hash = dwmac4_update_vlan_hash, 1252 .sarc_configure = dwmac4_sarc_configure, 1253 .enable_vlan = dwmac4_enable_vlan, 1254 .set_arp_offload = dwmac4_set_arp_offload, 1255 .config_l3_filter = dwmac4_config_l3_filter, 1256 .config_l4_filter = dwmac4_config_l4_filter, 1257 .est_configure = dwmac5_est_configure, 1258 .est_irq_status = dwmac5_est_irq_status, 1259 .fpe_configure = dwmac5_fpe_configure, 1260 .fpe_send_mpacket = dwmac5_fpe_send_mpacket, 1261 .fpe_irq_status = dwmac5_fpe_irq_status, 1262 .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr, 1263 .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr, 1264 .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr, 1265 }; 1266 1267 const struct stmmac_ops dwmac510_ops = { 1268 .core_init = dwmac4_core_init, 1269 .set_mac = stmmac_dwmac4_set_mac, 1270 .rx_ipc = dwmac4_rx_ipc_enable, 1271 .rx_queue_enable = dwmac4_rx_queue_enable, 1272 .rx_queue_prio = dwmac4_rx_queue_priority, 1273 .tx_queue_prio = dwmac4_tx_queue_priority, 1274 .rx_queue_routing = dwmac4_rx_queue_routing, 1275 .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms, 1276 .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms, 1277 .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight, 1278 .map_mtl_to_dma = dwmac4_map_mtl_dma, 1279 .config_cbs = dwmac4_config_cbs, 1280 .dump_regs = dwmac4_dump_regs, 1281 .host_irq_status = dwmac4_irq_status, 1282 .host_mtl_irq_status = dwmac4_irq_mtl_status, 1283 .flow_ctrl = dwmac4_flow_ctrl, 1284 .pmt = dwmac4_pmt, 1285 .set_umac_addr = dwmac4_set_umac_addr, 1286 .get_umac_addr = dwmac4_get_umac_addr, 1287 .set_eee_mode = dwmac4_set_eee_mode, 1288 .reset_eee_mode = dwmac4_reset_eee_mode, 1289 .set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer, 1290 .set_eee_timer = dwmac4_set_eee_timer, 1291 .set_eee_pls = dwmac4_set_eee_pls, 1292 .pcs_ctrl_ane = dwmac4_ctrl_ane, 1293 .pcs_rane = dwmac4_rane, 1294 .pcs_get_adv_lp = dwmac4_get_adv_lp, 1295 .debug = dwmac4_debug, 1296 .set_filter = dwmac4_set_filter, 1297 .safety_feat_config = dwmac5_safety_feat_config, 1298 .safety_feat_irq_status = dwmac5_safety_feat_irq_status, 1299 .safety_feat_dump = dwmac5_safety_feat_dump, 1300 .rxp_config = dwmac5_rxp_config, 1301 .flex_pps_config = dwmac5_flex_pps_config, 1302 .set_mac_loopback = dwmac4_set_mac_loopback, 1303 .update_vlan_hash = dwmac4_update_vlan_hash, 1304 .sarc_configure = dwmac4_sarc_configure, 1305 .enable_vlan = dwmac4_enable_vlan, 1306 .set_arp_offload = dwmac4_set_arp_offload, 1307 .config_l3_filter = dwmac4_config_l3_filter, 1308 .config_l4_filter = dwmac4_config_l4_filter, 1309 .est_configure = dwmac5_est_configure, 1310 .est_irq_status = dwmac5_est_irq_status, 1311 .fpe_configure = dwmac5_fpe_configure, 1312 .fpe_send_mpacket = dwmac5_fpe_send_mpacket, 1313 .fpe_irq_status = dwmac5_fpe_irq_status, 1314 .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr, 1315 .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr, 1316 .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr, 1317 }; 1318 1319 static u32 dwmac4_get_num_vlan(void __iomem *ioaddr) 1320 { 1321 u32 val, num_vlan; 1322 1323 val = readl(ioaddr + GMAC_HW_FEATURE3); 1324 switch (val & GMAC_HW_FEAT_NRVF) { 1325 case 0: 1326 num_vlan = 1; 1327 break; 1328 case 1: 1329 num_vlan = 4; 1330 break; 1331 case 2: 1332 num_vlan = 8; 1333 break; 1334 case 3: 1335 num_vlan = 16; 1336 break; 1337 case 4: 1338 num_vlan = 24; 1339 break; 1340 case 5: 1341 num_vlan = 32; 1342 break; 1343 default: 1344 num_vlan = 1; 1345 } 1346 1347 return num_vlan; 1348 } 1349 1350 int dwmac4_setup(struct stmmac_priv *priv) 1351 { 1352 struct mac_device_info *mac = priv->hw; 1353 1354 dev_info(priv->device, "\tDWMAC4/5\n"); 1355 1356 priv->dev->priv_flags |= IFF_UNICAST_FLT; 1357 mac->pcsr = priv->ioaddr; 1358 mac->multicast_filter_bins = priv->plat->multicast_filter_bins; 1359 mac->unicast_filter_entries = priv->plat->unicast_filter_entries; 1360 mac->mcast_bits_log2 = 0; 1361 1362 if (mac->multicast_filter_bins) 1363 mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins); 1364 1365 mac->link.duplex = GMAC_CONFIG_DM; 1366 mac->link.speed10 = GMAC_CONFIG_PS; 1367 mac->link.speed100 = GMAC_CONFIG_FES | GMAC_CONFIG_PS; 1368 mac->link.speed1000 = 0; 1369 mac->link.speed2500 = GMAC_CONFIG_FES; 1370 mac->link.speed_mask = GMAC_CONFIG_FES | GMAC_CONFIG_PS; 1371 mac->mii.addr = GMAC_MDIO_ADDR; 1372 mac->mii.data = GMAC_MDIO_DATA; 1373 mac->mii.addr_shift = 21; 1374 mac->mii.addr_mask = GENMASK(25, 21); 1375 mac->mii.reg_shift = 16; 1376 mac->mii.reg_mask = GENMASK(20, 16); 1377 mac->mii.clk_csr_shift = 8; 1378 mac->mii.clk_csr_mask = GENMASK(11, 8); 1379 mac->num_vlan = dwmac4_get_num_vlan(priv->ioaddr); 1380 1381 return 0; 1382 } 1383