gianfar.c (75bf465f0bc33e9b776a46d6a1b9b990f5fb7c37) | gianfar.c (7d993c5f86aa308b00c2fd420fe5208da18125e2) |
---|---|
1// SPDX-License-Identifier: GPL-2.0-or-later 2/* drivers/net/ethernet/freescale/gianfar.c 3 * 4 * Gianfar Ethernet Driver 5 * This driver is designed for the non-CPM ethernet controllers 6 * on the 85xx and 83xx family of integrated processors 7 * Based on 8260_io/fcc_enet.c 8 * --- 91 unchanged lines hidden (view full) --- 100#include <linux/of_net.h> 101 102#include "gianfar.h" 103 104#define TX_TIMEOUT (5*HZ) 105 106const char gfar_driver_version[] = "2.0"; 107 | 1// SPDX-License-Identifier: GPL-2.0-or-later 2/* drivers/net/ethernet/freescale/gianfar.c 3 * 4 * Gianfar Ethernet Driver 5 * This driver is designed for the non-CPM ethernet controllers 6 * on the 85xx and 83xx family of integrated processors 7 * Based on 8260_io/fcc_enet.c 8 * --- 91 unchanged lines hidden (view full) --- 100#include <linux/of_net.h> 101 102#include "gianfar.h" 103 104#define TX_TIMEOUT (5*HZ) 105 106const char gfar_driver_version[] = "2.0"; 107 |
108static int gfar_enet_open(struct net_device *dev); 109static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); 110static void gfar_reset_task(struct work_struct *work); 111static void gfar_timeout(struct net_device *dev); 112static int gfar_close(struct net_device *dev); 113static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue, 114 int alloc_cnt); 115static int gfar_set_mac_address(struct net_device *dev); 116static int gfar_change_mtu(struct net_device *dev, int new_mtu); 117static irqreturn_t gfar_error(int irq, void *dev_id); 118static irqreturn_t gfar_transmit(int irq, void *dev_id); 119static irqreturn_t gfar_interrupt(int irq, void *dev_id); 120static void adjust_link(struct net_device *dev); 121static noinline void gfar_update_link_state(struct gfar_private *priv); 122static int init_phy(struct net_device *dev); 123static int gfar_probe(struct platform_device *ofdev); 124static int gfar_remove(struct platform_device *ofdev); 125static void free_skb_resources(struct gfar_private *priv); 126static void gfar_set_multi(struct net_device *dev); 127static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr); 128static void gfar_configure_serdes(struct net_device *dev); 129static int gfar_poll_rx(struct napi_struct *napi, int budget); 130static int gfar_poll_tx(struct napi_struct *napi, int budget); 131static int gfar_poll_rx_sq(struct napi_struct *napi, int budget); 132static int gfar_poll_tx_sq(struct napi_struct *napi, int budget); 133#ifdef CONFIG_NET_POLL_CONTROLLER 134static void gfar_netpoll(struct net_device *dev); 135#endif 136int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit); 137static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue); 138static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb); 139static void gfar_halt_nodisable(struct gfar_private *priv); 140static void gfar_clear_exact_match(struct net_device *dev); 141static void gfar_set_mac_for_addr(struct net_device *dev, int num, 142 const u8 *addr); 143static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 144 | |
145MODULE_AUTHOR("Freescale Semiconductor, Inc"); 146MODULE_DESCRIPTION("Gianfar Ethernet Driver"); 147MODULE_LICENSE("GPL"); 148 149static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, 150 dma_addr_t buf) 151{ 152 u32 lstatus; --- 4 unchanged lines hidden (view full) --- 157 if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1) 158 lstatus |= BD_LFLAG(RXBD_WRAP); 159 160 gfar_wmb(); 161 162 bdp->lstatus = cpu_to_be32(lstatus); 163} 164 | 108MODULE_AUTHOR("Freescale Semiconductor, Inc"); 109MODULE_DESCRIPTION("Gianfar Ethernet Driver"); 110MODULE_LICENSE("GPL"); 111 112static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, 113 dma_addr_t buf) 114{ 115 u32 lstatus; --- 4 unchanged lines hidden (view full) --- 120 if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1) 121 lstatus |= BD_LFLAG(RXBD_WRAP); 122 123 gfar_wmb(); 124 125 bdp->lstatus = cpu_to_be32(lstatus); 126} 127 |
165static void gfar_init_bds(struct net_device *ndev) 166{ 167 struct gfar_private *priv = netdev_priv(ndev); 168 struct gfar __iomem *regs = priv->gfargrp[0].regs; 169 struct gfar_priv_tx_q *tx_queue = NULL; 170 struct gfar_priv_rx_q *rx_queue = NULL; 171 struct txbd8 *txbdp; 172 u32 __iomem *rfbptr; 173 int i, j; 174 175 for (i = 0; i < priv->num_tx_queues; i++) { 176 tx_queue = priv->tx_queue[i]; 177 /* Initialize some variables in our dev structure */ 178 tx_queue->num_txbdfree = tx_queue->tx_ring_size; 179 tx_queue->dirty_tx = tx_queue->tx_bd_base; 180 tx_queue->cur_tx = tx_queue->tx_bd_base; 181 tx_queue->skb_curtx = 0; 182 tx_queue->skb_dirtytx = 0; 183 184 /* Initialize Transmit Descriptor Ring */ 185 txbdp = tx_queue->tx_bd_base; 186 for (j = 0; j < tx_queue->tx_ring_size; j++) { 187 txbdp->lstatus = 0; 188 txbdp->bufPtr = 0; 189 txbdp++; 190 } 191 192 /* Set the last descriptor in the ring to indicate wrap */ 193 txbdp--; 194 txbdp->status = cpu_to_be16(be16_to_cpu(txbdp->status) | 195 TXBD_WRAP); 196 } 197 198 rfbptr = ®s->rfbptr0; 199 for (i = 0; i < priv->num_rx_queues; i++) { 200 rx_queue = priv->rx_queue[i]; 201 202 rx_queue->next_to_clean = 0; 203 rx_queue->next_to_use = 0; 204 rx_queue->next_to_alloc = 0; 205 206 /* make sure next_to_clean != next_to_use after this 207 * by leaving at least 1 unused descriptor 208 */ 209 gfar_alloc_rx_buffs(rx_queue, gfar_rxbd_unused(rx_queue)); 210 211 rx_queue->rfbptr = rfbptr; 212 rfbptr += 2; 213 } 214} 215 216static int gfar_alloc_skb_resources(struct net_device *ndev) 217{ 218 void *vaddr; 219 dma_addr_t addr; 220 int i, j; 221 struct gfar_private *priv = netdev_priv(ndev); 222 struct device *dev = priv->dev; 223 struct gfar_priv_tx_q *tx_queue = NULL; 224 struct gfar_priv_rx_q *rx_queue = NULL; 225 226 priv->total_tx_ring_size = 0; 227 for (i = 0; i < priv->num_tx_queues; i++) 228 priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size; 229 230 priv->total_rx_ring_size = 0; 231 for (i = 0; i < priv->num_rx_queues; i++) 232 priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size; 233 234 /* Allocate memory for the buffer descriptors */ 235 vaddr = dma_alloc_coherent(dev, 236 (priv->total_tx_ring_size * 237 sizeof(struct txbd8)) + 238 (priv->total_rx_ring_size * 239 sizeof(struct rxbd8)), 240 &addr, GFP_KERNEL); 241 if (!vaddr) 242 return -ENOMEM; 243 244 for (i = 0; i < priv->num_tx_queues; i++) { 245 tx_queue = priv->tx_queue[i]; 246 tx_queue->tx_bd_base = vaddr; 247 tx_queue->tx_bd_dma_base = addr; 248 tx_queue->dev = ndev; 249 /* enet DMA only understands physical addresses */ 250 addr += sizeof(struct txbd8) * tx_queue->tx_ring_size; 251 vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size; 252 } 253 254 /* Start the rx descriptor ring where the tx ring leaves off */ 255 for (i = 0; i < priv->num_rx_queues; i++) { 256 rx_queue = priv->rx_queue[i]; 257 rx_queue->rx_bd_base = vaddr; 258 rx_queue->rx_bd_dma_base = addr; 259 rx_queue->ndev = ndev; 260 rx_queue->dev = dev; 261 addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size; 262 vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size; 263 } 264 265 /* Setup the skbuff rings */ 266 for (i = 0; i < priv->num_tx_queues; i++) { 267 tx_queue = priv->tx_queue[i]; 268 tx_queue->tx_skbuff = 269 kmalloc_array(tx_queue->tx_ring_size, 270 sizeof(*tx_queue->tx_skbuff), 271 GFP_KERNEL); 272 if (!tx_queue->tx_skbuff) 273 goto cleanup; 274 275 for (j = 0; j < tx_queue->tx_ring_size; j++) 276 tx_queue->tx_skbuff[j] = NULL; 277 } 278 279 for (i = 0; i < priv->num_rx_queues; i++) { 280 rx_queue = priv->rx_queue[i]; 281 rx_queue->rx_buff = kcalloc(rx_queue->rx_ring_size, 282 sizeof(*rx_queue->rx_buff), 283 GFP_KERNEL); 284 if (!rx_queue->rx_buff) 285 goto cleanup; 286 } 287 288 gfar_init_bds(ndev); 289 290 return 0; 291 292cleanup: 293 free_skb_resources(priv); 294 return -ENOMEM; 295} 296 | |
297static void gfar_init_tx_rx_base(struct gfar_private *priv) 298{ 299 struct gfar __iomem *regs = priv->gfargrp[0].regs; 300 u32 __iomem *baddr; 301 int i; 302 303 baddr = ®s->tbase0; 304 for (i = 0; i < priv->num_tx_queues; i++) { --- 167 unchanged lines hidden (view full) --- 472 } 473 474 dev->stats.tx_bytes = tx_bytes; 475 dev->stats.tx_packets = tx_packets; 476 477 return &dev->stats; 478} 479 | 128static void gfar_init_tx_rx_base(struct gfar_private *priv) 129{ 130 struct gfar __iomem *regs = priv->gfargrp[0].regs; 131 u32 __iomem *baddr; 132 int i; 133 134 baddr = ®s->tbase0; 135 for (i = 0; i < priv->num_tx_queues; i++) { --- 167 unchanged lines hidden (view full) --- 303 } 304 305 dev->stats.tx_bytes = tx_bytes; 306 dev->stats.tx_packets = tx_packets; 307 308 return &dev->stats; 309} 310 |
311/* Set the appropriate hash bit for the given addr */ 312/* The algorithm works like so: 313 * 1) Take the Destination Address (ie the multicast address), and 314 * do a CRC on it (little endian), and reverse the bits of the 315 * result. 316 * 2) Use the 8 most significant bits as a hash into a 256-entry 317 * table. The table is controlled through 8 32-bit registers: 318 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is 319 * gaddr7. This means that the 3 most significant bits in the 320 * hash index which gaddr register to use, and the 5 other bits 321 * indicate which bit (assuming an IBM numbering scheme, which 322 * for PowerPC (tm) is usually the case) in the register holds 323 * the entry. 324 */ 325static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr) 326{ 327 u32 tempval; 328 struct gfar_private *priv = netdev_priv(dev); 329 u32 result = ether_crc(ETH_ALEN, addr); 330 int width = priv->hash_width; 331 u8 whichbit = (result >> (32 - width)) & 0x1f; 332 u8 whichreg = result >> (32 - width + 5); 333 u32 value = (1 << (31-whichbit)); 334 335 tempval = gfar_read(priv->hash_regs[whichreg]); 336 tempval |= value; 337 gfar_write(priv->hash_regs[whichreg], tempval); 338} 339 340/* There are multiple MAC Address register pairs on some controllers 341 * This function sets the numth pair to a given address 342 */ 343static void gfar_set_mac_for_addr(struct net_device *dev, int num, 344 const u8 *addr) 345{ 346 struct gfar_private *priv = netdev_priv(dev); 347 struct gfar __iomem *regs = priv->gfargrp[0].regs; 348 u32 tempval; 349 u32 __iomem *macptr = ®s->macstnaddr1; 350 351 macptr += num*2; 352 353 /* For a station address of 0x12345678ABCD in transmission 354 * order (BE), MACnADDR1 is set to 0xCDAB7856 and 355 * MACnADDR2 is set to 0x34120000. 356 */ 357 tempval = (addr[5] << 24) | (addr[4] << 16) | 358 (addr[3] << 8) | addr[2]; 359 360 gfar_write(macptr, tempval); 361 362 tempval = (addr[1] << 24) | (addr[0] << 16); 363 364 gfar_write(macptr+1, tempval); 365} 366 |
|
480static int gfar_set_mac_addr(struct net_device *dev, void *p) 481{ 482 eth_mac_addr(dev, p); 483 484 gfar_set_mac_for_addr(dev, 0, dev->dev_addr); 485 486 return 0; 487} 488 | 367static int gfar_set_mac_addr(struct net_device *dev, void *p) 368{ 369 eth_mac_addr(dev, p); 370 371 gfar_set_mac_for_addr(dev, 0, dev->dev_addr); 372 373 return 0; 374} 375 |
489static const struct net_device_ops gfar_netdev_ops = { 490 .ndo_open = gfar_enet_open, 491 .ndo_start_xmit = gfar_start_xmit, 492 .ndo_stop = gfar_close, 493 .ndo_change_mtu = gfar_change_mtu, 494 .ndo_set_features = gfar_set_features, 495 .ndo_set_rx_mode = gfar_set_multi, 496 .ndo_tx_timeout = gfar_timeout, 497 .ndo_do_ioctl = gfar_ioctl, 498 .ndo_get_stats = gfar_get_stats, 499 .ndo_change_carrier = fixed_phy_change_carrier, 500 .ndo_set_mac_address = gfar_set_mac_addr, 501 .ndo_validate_addr = eth_validate_addr, 502#ifdef CONFIG_NET_POLL_CONTROLLER 503 .ndo_poll_controller = gfar_netpoll, 504#endif 505}; 506 | |
507static void gfar_ints_disable(struct gfar_private *priv) 508{ 509 int i; 510 for (i = 0; i < priv->num_grps; i++) { 511 struct gfar __iomem *regs = priv->gfargrp[i].regs; 512 /* Clear IEVENT */ 513 gfar_write(®s->ievent, IEVENT_INIT_CLEAR); 514 --- 203 unchanged lines hidden (view full) --- 718 719 for_each_available_child_of_node(np, child) 720 if (of_node_name_eq(child, "queue-group")) 721 num++; 722 723 return num; 724} 725 | 376static void gfar_ints_disable(struct gfar_private *priv) 377{ 378 int i; 379 for (i = 0; i < priv->num_grps; i++) { 380 struct gfar __iomem *regs = priv->gfargrp[i].regs; 381 /* Clear IEVENT */ 382 gfar_write(®s->ievent, IEVENT_INIT_CLEAR); 383 --- 203 unchanged lines hidden (view full) --- 587 588 for_each_available_child_of_node(np, child) 589 if (of_node_name_eq(child, "queue-group")) 590 num++; 591 592 return num; 593} 594 |
595/* Reads the controller's registers to determine what interface 596 * connects it to the PHY. 597 */ 598static phy_interface_t gfar_get_interface(struct net_device *dev) 599{ 600 struct gfar_private *priv = netdev_priv(dev); 601 struct gfar __iomem *regs = priv->gfargrp[0].regs; 602 u32 ecntrl; 603 604 ecntrl = gfar_read(®s->ecntrl); 605 606 if (ecntrl & ECNTRL_SGMII_MODE) 607 return PHY_INTERFACE_MODE_SGMII; 608 609 if (ecntrl & ECNTRL_TBI_MODE) { 610 if (ecntrl & ECNTRL_REDUCED_MODE) 611 return PHY_INTERFACE_MODE_RTBI; 612 else 613 return PHY_INTERFACE_MODE_TBI; 614 } 615 616 if (ecntrl & ECNTRL_REDUCED_MODE) { 617 if (ecntrl & ECNTRL_REDUCED_MII_MODE) { 618 return PHY_INTERFACE_MODE_RMII; 619 } 620 else { 621 phy_interface_t interface = priv->interface; 622 623 /* This isn't autodetected right now, so it must 624 * be set by the device tree or platform code. 625 */ 626 if (interface == PHY_INTERFACE_MODE_RGMII_ID) 627 return PHY_INTERFACE_MODE_RGMII_ID; 628 629 return PHY_INTERFACE_MODE_RGMII; 630 } 631 } 632 633 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT) 634 return PHY_INTERFACE_MODE_GMII; 635 636 return PHY_INTERFACE_MODE_MII; 637} 638 |
|
726static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) 727{ 728 const char *model; 729 const char *ctype; 730 const void *mac_addr; 731 int err = 0, i; 732 struct net_device *dev = NULL; 733 struct gfar_private *priv = NULL; --- 192 unchanged lines hidden (view full) --- 926rx_alloc_failed: 927 gfar_free_rx_queues(priv); 928tx_alloc_failed: 929 gfar_free_tx_queues(priv); 930 free_gfar_dev(priv); 931 return err; 932} 933 | 639static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) 640{ 641 const char *model; 642 const char *ctype; 643 const void *mac_addr; 644 int err = 0, i; 645 struct net_device *dev = NULL; 646 struct gfar_private *priv = NULL; --- 192 unchanged lines hidden (view full) --- 839rx_alloc_failed: 840 gfar_free_rx_queues(priv); 841tx_alloc_failed: 842 gfar_free_tx_queues(priv); 843 free_gfar_dev(priv); 844 return err; 845} 846 |
934static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr) 935{ 936 struct hwtstamp_config config; 937 struct gfar_private *priv = netdev_priv(netdev); 938 939 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 940 return -EFAULT; 941 942 /* reserved for future extensions */ 943 if (config.flags) 944 return -EINVAL; 945 946 switch (config.tx_type) { 947 case HWTSTAMP_TX_OFF: 948 priv->hwts_tx_en = 0; 949 break; 950 case HWTSTAMP_TX_ON: 951 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) 952 return -ERANGE; 953 priv->hwts_tx_en = 1; 954 break; 955 default: 956 return -ERANGE; 957 } 958 959 switch (config.rx_filter) { 960 case HWTSTAMP_FILTER_NONE: 961 if (priv->hwts_rx_en) { 962 priv->hwts_rx_en = 0; 963 reset_gfar(netdev); 964 } 965 break; 966 default: 967 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) 968 return -ERANGE; 969 if (!priv->hwts_rx_en) { 970 priv->hwts_rx_en = 1; 971 reset_gfar(netdev); 972 } 973 config.rx_filter = HWTSTAMP_FILTER_ALL; 974 break; 975 } 976 977 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? 978 -EFAULT : 0; 979} 980 981static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr) 982{ 983 struct hwtstamp_config config; 984 struct gfar_private *priv = netdev_priv(netdev); 985 986 config.flags = 0; 987 config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; 988 config.rx_filter = (priv->hwts_rx_en ? 989 HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE); 990 991 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? 992 -EFAULT : 0; 993} 994 995static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 996{ 997 struct phy_device *phydev = dev->phydev; 998 999 if (!netif_running(dev)) 1000 return -EINVAL; 1001 1002 if (cmd == SIOCSHWTSTAMP) 1003 return gfar_hwtstamp_set(dev, rq); 1004 if (cmd == SIOCGHWTSTAMP) 1005 return gfar_hwtstamp_get(dev, rq); 1006 1007 if (!phydev) 1008 return -ENODEV; 1009 1010 return phy_mii_ioctl(phydev, rq, cmd); 1011} 1012 | |
1013static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar, 1014 u32 class) 1015{ 1016 u32 rqfpr = FPR_FILER_MASK; 1017 u32 rqfcr = 0x0; 1018 1019 rqfar--; 1020 rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT; --- 107 unchanged lines hidden (view full) --- 1128 __gfar_detect_errata_83xx(priv); 1129#endif 1130 1131 if (priv->errata) 1132 dev_info(dev, "enabled errata workarounds, flags: 0x%x\n", 1133 priv->errata); 1134} 1135 | 847static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar, 848 u32 class) 849{ 850 u32 rqfpr = FPR_FILER_MASK; 851 u32 rqfcr = 0x0; 852 853 rqfar--; 854 rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT; --- 107 unchanged lines hidden (view full) --- 962 __gfar_detect_errata_83xx(priv); 963#endif 964 965 if (priv->errata) 966 dev_info(dev, "enabled errata workarounds, flags: 0x%x\n", 967 priv->errata); 968} 969 |
1136void gfar_mac_reset(struct gfar_private *priv) 1137{ 1138 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1139 u32 tempval; 1140 1141 /* Reset MAC layer */ 1142 gfar_write(®s->maccfg1, MACCFG1_SOFT_RESET); 1143 1144 /* We need to delay at least 3 TX clocks */ 1145 udelay(3); 1146 1147 /* the soft reset bit is not self-resetting, so we need to 1148 * clear it before resuming normal operation 1149 */ 1150 gfar_write(®s->maccfg1, 0); 1151 1152 udelay(3); 1153 1154 gfar_rx_offload_en(priv); 1155 1156 /* Initialize the max receive frame/buffer lengths */ 1157 gfar_write(®s->maxfrm, GFAR_JUMBO_FRAME_SIZE); 1158 gfar_write(®s->mrblr, GFAR_RXB_SIZE); 1159 1160 /* Initialize the Minimum Frame Length Register */ 1161 gfar_write(®s->minflr, MINFLR_INIT_SETTINGS); 1162 1163 /* Initialize MACCFG2. */ 1164 tempval = MACCFG2_INIT_SETTINGS; 1165 1166 /* eTSEC74 erratum: Rx frames of length MAXFRM or MAXFRM-1 1167 * are marked as truncated. Avoid this by MACCFG2[Huge Frame]=1, 1168 * and by checking RxBD[LG] and discarding larger than MAXFRM. 1169 */ 1170 if (gfar_has_errata(priv, GFAR_ERRATA_74)) 1171 tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK; 1172 1173 gfar_write(®s->maccfg2, tempval); 1174 1175 /* Clear mac addr hash registers */ 1176 gfar_write(®s->igaddr0, 0); 1177 gfar_write(®s->igaddr1, 0); 1178 gfar_write(®s->igaddr2, 0); 1179 gfar_write(®s->igaddr3, 0); 1180 gfar_write(®s->igaddr4, 0); 1181 gfar_write(®s->igaddr5, 0); 1182 gfar_write(®s->igaddr6, 0); 1183 gfar_write(®s->igaddr7, 0); 1184 1185 gfar_write(®s->gaddr0, 0); 1186 gfar_write(®s->gaddr1, 0); 1187 gfar_write(®s->gaddr2, 0); 1188 gfar_write(®s->gaddr3, 0); 1189 gfar_write(®s->gaddr4, 0); 1190 gfar_write(®s->gaddr5, 0); 1191 gfar_write(®s->gaddr6, 0); 1192 gfar_write(®s->gaddr7, 0); 1193 1194 if (priv->extended_hash) 1195 gfar_clear_exact_match(priv->ndev); 1196 1197 gfar_mac_rx_config(priv); 1198 1199 gfar_mac_tx_config(priv); 1200 1201 gfar_set_mac_address(priv->ndev); 1202 1203 gfar_set_multi(priv->ndev); 1204 1205 /* clear ievent and imask before configuring coalescing */ 1206 gfar_ints_disable(priv); 1207 1208 /* Configure the coalescing support */ 1209 gfar_configure_coalescing_all(priv); 1210} 1211 1212static void gfar_hw_init(struct gfar_private *priv) 1213{ 1214 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1215 u32 attrs; 1216 1217 /* Stop the DMA engine now, in case it was running before 1218 * (The firmware could have used it, and left it running). 1219 */ 1220 gfar_halt(priv); 1221 1222 gfar_mac_reset(priv); 1223 1224 /* Zero out the rmon mib registers if it has them */ 1225 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { 1226 memset_io(&(regs->rmon), 0, sizeof(struct rmon_mib)); 1227 1228 /* Mask off the CAM interrupts */ 1229 gfar_write(®s->rmon.cam1, 0xffffffff); 1230 gfar_write(®s->rmon.cam2, 0xffffffff); 1231 } 1232 1233 /* Initialize ECNTRL */ 1234 gfar_write(®s->ecntrl, ECNTRL_INIT_SETTINGS); 1235 1236 /* Set the extraction length and index */ 1237 attrs = ATTRELI_EL(priv->rx_stash_size) | 1238 ATTRELI_EI(priv->rx_stash_index); 1239 1240 gfar_write(®s->attreli, attrs); 1241 1242 /* Start with defaults, and add stashing 1243 * depending on driver parameters 1244 */ 1245 attrs = ATTR_INIT_SETTINGS; 1246 1247 if (priv->bd_stash_en) 1248 attrs |= ATTR_BDSTASH; 1249 1250 if (priv->rx_stash_size != 0) 1251 attrs |= ATTR_BUFSTASH; 1252 1253 gfar_write(®s->attr, attrs); 1254 1255 /* FIFO configs */ 1256 gfar_write(®s->fifo_tx_thr, DEFAULT_FIFO_TX_THR); 1257 gfar_write(®s->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE); 1258 gfar_write(®s->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF); 1259 1260 /* Program the interrupt steering regs, only for MG devices */ 1261 if (priv->num_grps > 1) 1262 gfar_write_isrg(priv); 1263} 1264 | |
1265static void gfar_init_addr_hash_table(struct gfar_private *priv) 1266{ 1267 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1268 1269 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) { 1270 priv->extended_hash = 1; 1271 priv->hash_width = 9; 1272 --- 24 unchanged lines hidden (view full) --- 1297 priv->hash_regs[3] = ®s->gaddr3; 1298 priv->hash_regs[4] = ®s->gaddr4; 1299 priv->hash_regs[5] = ®s->gaddr5; 1300 priv->hash_regs[6] = ®s->gaddr6; 1301 priv->hash_regs[7] = ®s->gaddr7; 1302 } 1303} 1304 | 970static void gfar_init_addr_hash_table(struct gfar_private *priv) 971{ 972 struct gfar __iomem *regs = priv->gfargrp[0].regs; 973 974 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) { 975 priv->extended_hash = 1; 976 priv->hash_width = 9; 977 --- 24 unchanged lines hidden (view full) --- 1002 priv->hash_regs[3] = ®s->gaddr3; 1003 priv->hash_regs[4] = ®s->gaddr4; 1004 priv->hash_regs[5] = ®s->gaddr5; 1005 priv->hash_regs[6] = ®s->gaddr6; 1006 priv->hash_regs[7] = ®s->gaddr7; 1007 } 1008} 1009 |
1305/* Set up the ethernet device structure, private data, 1306 * and anything else we need before we start 1307 */ 1308static int gfar_probe(struct platform_device *ofdev) 1309{ 1310 struct device_node *np = ofdev->dev.of_node; 1311 struct net_device *dev = NULL; 1312 struct gfar_private *priv = NULL; 1313 int err = 0, i; 1314 1315 err = gfar_of_init(ofdev, &dev); 1316 1317 if (err) 1318 return err; 1319 1320 priv = netdev_priv(dev); 1321 priv->ndev = dev; 1322 priv->ofdev = ofdev; 1323 priv->dev = &ofdev->dev; 1324 SET_NETDEV_DEV(dev, &ofdev->dev); 1325 1326 INIT_WORK(&priv->reset_task, gfar_reset_task); 1327 1328 platform_set_drvdata(ofdev, priv); 1329 1330 gfar_detect_errata(priv); 1331 1332 /* Set the dev->base_addr to the gfar reg region */ 1333 dev->base_addr = (unsigned long) priv->gfargrp[0].regs; 1334 1335 /* Fill in the dev structure */ 1336 dev->watchdog_timeo = TX_TIMEOUT; 1337 /* MTU range: 50 - 9586 */ 1338 dev->mtu = 1500; 1339 dev->min_mtu = 50; 1340 dev->max_mtu = GFAR_JUMBO_FRAME_SIZE - ETH_HLEN; 1341 dev->netdev_ops = &gfar_netdev_ops; 1342 dev->ethtool_ops = &gfar_ethtool_ops; 1343 1344 /* Register for napi ...We are registering NAPI for each grp */ 1345 for (i = 0; i < priv->num_grps; i++) { 1346 if (priv->poll_mode == GFAR_SQ_POLLING) { 1347 netif_napi_add(dev, &priv->gfargrp[i].napi_rx, 1348 gfar_poll_rx_sq, GFAR_DEV_WEIGHT); 1349 netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx, 1350 gfar_poll_tx_sq, 2); 1351 } else { 1352 netif_napi_add(dev, &priv->gfargrp[i].napi_rx, 1353 gfar_poll_rx, GFAR_DEV_WEIGHT); 1354 netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx, 1355 gfar_poll_tx, 2); 1356 } 1357 } 1358 1359 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { 1360 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | 1361 NETIF_F_RXCSUM; 1362 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | 1363 NETIF_F_RXCSUM | NETIF_F_HIGHDMA; 1364 } 1365 1366 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) { 1367 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | 1368 NETIF_F_HW_VLAN_CTAG_RX; 1369 dev->features |= NETIF_F_HW_VLAN_CTAG_RX; 1370 } 1371 1372 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 1373 1374 gfar_init_addr_hash_table(priv); 1375 1376 /* Insert receive time stamps into padding alignment bytes, and 1377 * plus 2 bytes padding to ensure the cpu alignment. 1378 */ 1379 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) 1380 priv->padding = 8 + DEFAULT_PADDING; 1381 1382 if (dev->features & NETIF_F_IP_CSUM || 1383 priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) 1384 dev->needed_headroom = GMAC_FCB_LEN; 1385 1386 /* Initializing some of the rx/tx queue level parameters */ 1387 for (i = 0; i < priv->num_tx_queues; i++) { 1388 priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE; 1389 priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE; 1390 priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE; 1391 priv->tx_queue[i]->txic = DEFAULT_TXIC; 1392 } 1393 1394 for (i = 0; i < priv->num_rx_queues; i++) { 1395 priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE; 1396 priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE; 1397 priv->rx_queue[i]->rxic = DEFAULT_RXIC; 1398 } 1399 1400 /* Always enable rx filer if available */ 1401 priv->rx_filer_enable = 1402 (priv->device_flags & FSL_GIANFAR_DEV_HAS_RX_FILER) ? 1 : 0; 1403 /* Enable most messages by default */ 1404 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; 1405 /* use pritority h/w tx queue scheduling for single queue devices */ 1406 if (priv->num_tx_queues == 1) 1407 priv->prio_sched_en = 1; 1408 1409 set_bit(GFAR_DOWN, &priv->state); 1410 1411 gfar_hw_init(priv); 1412 1413 /* Carrier starts down, phylib will bring it up */ 1414 netif_carrier_off(dev); 1415 1416 err = register_netdev(dev); 1417 1418 if (err) { 1419 pr_err("%s: Cannot register net device, aborting\n", dev->name); 1420 goto register_fail; 1421 } 1422 1423 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) 1424 priv->wol_supported |= GFAR_WOL_MAGIC; 1425 1426 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER) && 1427 priv->rx_filer_enable) 1428 priv->wol_supported |= GFAR_WOL_FILER_UCAST; 1429 1430 device_set_wakeup_capable(&ofdev->dev, priv->wol_supported); 1431 1432 /* fill out IRQ number and name fields */ 1433 for (i = 0; i < priv->num_grps; i++) { 1434 struct gfar_priv_grp *grp = &priv->gfargrp[i]; 1435 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 1436 sprintf(gfar_irq(grp, TX)->name, "%s%s%c%s", 1437 dev->name, "_g", '0' + i, "_tx"); 1438 sprintf(gfar_irq(grp, RX)->name, "%s%s%c%s", 1439 dev->name, "_g", '0' + i, "_rx"); 1440 sprintf(gfar_irq(grp, ER)->name, "%s%s%c%s", 1441 dev->name, "_g", '0' + i, "_er"); 1442 } else 1443 strcpy(gfar_irq(grp, TX)->name, dev->name); 1444 } 1445 1446 /* Initialize the filer table */ 1447 gfar_init_filer_table(priv); 1448 1449 /* Print out the device info */ 1450 netdev_info(dev, "mac: %pM\n", dev->dev_addr); 1451 1452 /* Even more device info helps when determining which kernel 1453 * provided which set of benchmarks. 1454 */ 1455 netdev_info(dev, "Running with NAPI enabled\n"); 1456 for (i = 0; i < priv->num_rx_queues; i++) 1457 netdev_info(dev, "RX BD ring size for Q[%d]: %d\n", 1458 i, priv->rx_queue[i]->rx_ring_size); 1459 for (i = 0; i < priv->num_tx_queues; i++) 1460 netdev_info(dev, "TX BD ring size for Q[%d]: %d\n", 1461 i, priv->tx_queue[i]->tx_ring_size); 1462 1463 return 0; 1464 1465register_fail: 1466 if (of_phy_is_fixed_link(np)) 1467 of_phy_deregister_fixed_link(np); 1468 unmap_group_regs(priv); 1469 gfar_free_rx_queues(priv); 1470 gfar_free_tx_queues(priv); 1471 of_node_put(priv->phy_node); 1472 of_node_put(priv->tbi_node); 1473 free_gfar_dev(priv); 1474 return err; 1475} 1476 1477static int gfar_remove(struct platform_device *ofdev) 1478{ 1479 struct gfar_private *priv = platform_get_drvdata(ofdev); 1480 struct device_node *np = ofdev->dev.of_node; 1481 1482 of_node_put(priv->phy_node); 1483 of_node_put(priv->tbi_node); 1484 1485 unregister_netdev(priv->ndev); 1486 1487 if (of_phy_is_fixed_link(np)) 1488 of_phy_deregister_fixed_link(np); 1489 1490 unmap_group_regs(priv); 1491 gfar_free_rx_queues(priv); 1492 gfar_free_tx_queues(priv); 1493 free_gfar_dev(priv); 1494 1495 return 0; 1496} 1497 1498#ifdef CONFIG_PM 1499 1500static void __gfar_filer_disable(struct gfar_private *priv) 1501{ 1502 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1503 u32 temp; 1504 1505 temp = gfar_read(®s->rctrl); 1506 temp &= ~(RCTRL_FILREN | RCTRL_PRSDEP_INIT); 1507 gfar_write(®s->rctrl, temp); 1508} 1509 1510static void __gfar_filer_enable(struct gfar_private *priv) 1511{ 1512 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1513 u32 temp; 1514 1515 temp = gfar_read(®s->rctrl); 1516 temp |= RCTRL_FILREN | RCTRL_PRSDEP_INIT; 1517 gfar_write(®s->rctrl, temp); 1518} 1519 1520/* Filer rules implementing wol capabilities */ 1521static void gfar_filer_config_wol(struct gfar_private *priv) 1522{ 1523 unsigned int i; 1524 u32 rqfcr; 1525 1526 __gfar_filer_disable(priv); 1527 1528 /* clear the filer table, reject any packet by default */ 1529 rqfcr = RQFCR_RJE | RQFCR_CMP_MATCH; 1530 for (i = 0; i <= MAX_FILER_IDX; i++) 1531 gfar_write_filer(priv, i, rqfcr, 0); 1532 1533 i = 0; 1534 if (priv->wol_opts & GFAR_WOL_FILER_UCAST) { 1535 /* unicast packet, accept it */ 1536 struct net_device *ndev = priv->ndev; 1537 /* get the default rx queue index */ 1538 u8 qindex = (u8)priv->gfargrp[0].rx_queue->qindex; 1539 u32 dest_mac_addr = (ndev->dev_addr[0] << 16) | 1540 (ndev->dev_addr[1] << 8) | 1541 ndev->dev_addr[2]; 1542 1543 rqfcr = (qindex << 10) | RQFCR_AND | 1544 RQFCR_CMP_EXACT | RQFCR_PID_DAH; 1545 1546 gfar_write_filer(priv, i++, rqfcr, dest_mac_addr); 1547 1548 dest_mac_addr = (ndev->dev_addr[3] << 16) | 1549 (ndev->dev_addr[4] << 8) | 1550 ndev->dev_addr[5]; 1551 rqfcr = (qindex << 10) | RQFCR_GPI | 1552 RQFCR_CMP_EXACT | RQFCR_PID_DAL; 1553 gfar_write_filer(priv, i++, rqfcr, dest_mac_addr); 1554 } 1555 1556 __gfar_filer_enable(priv); 1557} 1558 1559static void gfar_filer_restore_table(struct gfar_private *priv) 1560{ 1561 u32 rqfcr, rqfpr; 1562 unsigned int i; 1563 1564 __gfar_filer_disable(priv); 1565 1566 for (i = 0; i <= MAX_FILER_IDX; i++) { 1567 rqfcr = priv->ftp_rqfcr[i]; 1568 rqfpr = priv->ftp_rqfpr[i]; 1569 gfar_write_filer(priv, i, rqfcr, rqfpr); 1570 } 1571 1572 __gfar_filer_enable(priv); 1573} 1574 1575/* gfar_start() for Rx only and with the FGPI filer interrupt enabled */ 1576static void gfar_start_wol_filer(struct gfar_private *priv) 1577{ 1578 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1579 u32 tempval; 1580 int i = 0; 1581 1582 /* Enable Rx hw queues */ 1583 gfar_write(®s->rqueue, priv->rqueue); 1584 1585 /* Initialize DMACTRL to have WWR and WOP */ 1586 tempval = gfar_read(®s->dmactrl); 1587 tempval |= DMACTRL_INIT_SETTINGS; 1588 gfar_write(®s->dmactrl, tempval); 1589 1590 /* Make sure we aren't stopped */ 1591 tempval = gfar_read(®s->dmactrl); 1592 tempval &= ~DMACTRL_GRS; 1593 gfar_write(®s->dmactrl, tempval); 1594 1595 for (i = 0; i < priv->num_grps; i++) { 1596 regs = priv->gfargrp[i].regs; 1597 /* Clear RHLT, so that the DMA starts polling now */ 1598 gfar_write(®s->rstat, priv->gfargrp[i].rstat); 1599 /* enable the Filer General Purpose Interrupt */ 1600 gfar_write(®s->imask, IMASK_FGPI); 1601 } 1602 1603 /* Enable Rx DMA */ 1604 tempval = gfar_read(®s->maccfg1); 1605 tempval |= MACCFG1_RX_EN; 1606 gfar_write(®s->maccfg1, tempval); 1607} 1608 1609static int gfar_suspend(struct device *dev) 1610{ 1611 struct gfar_private *priv = dev_get_drvdata(dev); 1612 struct net_device *ndev = priv->ndev; 1613 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1614 u32 tempval; 1615 u16 wol = priv->wol_opts; 1616 1617 if (!netif_running(ndev)) 1618 return 0; 1619 1620 disable_napi(priv); 1621 netif_tx_lock(ndev); 1622 netif_device_detach(ndev); 1623 netif_tx_unlock(ndev); 1624 1625 gfar_halt(priv); 1626 1627 if (wol & GFAR_WOL_MAGIC) { 1628 /* Enable interrupt on Magic Packet */ 1629 gfar_write(®s->imask, IMASK_MAG); 1630 1631 /* Enable Magic Packet mode */ 1632 tempval = gfar_read(®s->maccfg2); 1633 tempval |= MACCFG2_MPEN; 1634 gfar_write(®s->maccfg2, tempval); 1635 1636 /* re-enable the Rx block */ 1637 tempval = gfar_read(®s->maccfg1); 1638 tempval |= MACCFG1_RX_EN; 1639 gfar_write(®s->maccfg1, tempval); 1640 1641 } else if (wol & GFAR_WOL_FILER_UCAST) { 1642 gfar_filer_config_wol(priv); 1643 gfar_start_wol_filer(priv); 1644 1645 } else { 1646 phy_stop(ndev->phydev); 1647 } 1648 1649 return 0; 1650} 1651 1652static int gfar_resume(struct device *dev) 1653{ 1654 struct gfar_private *priv = dev_get_drvdata(dev); 1655 struct net_device *ndev = priv->ndev; 1656 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1657 u32 tempval; 1658 u16 wol = priv->wol_opts; 1659 1660 if (!netif_running(ndev)) 1661 return 0; 1662 1663 if (wol & GFAR_WOL_MAGIC) { 1664 /* Disable Magic Packet mode */ 1665 tempval = gfar_read(®s->maccfg2); 1666 tempval &= ~MACCFG2_MPEN; 1667 gfar_write(®s->maccfg2, tempval); 1668 1669 } else if (wol & GFAR_WOL_FILER_UCAST) { 1670 /* need to stop rx only, tx is already down */ 1671 gfar_halt(priv); 1672 gfar_filer_restore_table(priv); 1673 1674 } else { 1675 phy_start(ndev->phydev); 1676 } 1677 1678 gfar_start(priv); 1679 1680 netif_device_attach(ndev); 1681 enable_napi(priv); 1682 1683 return 0; 1684} 1685 1686static int gfar_restore(struct device *dev) 1687{ 1688 struct gfar_private *priv = dev_get_drvdata(dev); 1689 struct net_device *ndev = priv->ndev; 1690 1691 if (!netif_running(ndev)) { 1692 netif_device_attach(ndev); 1693 1694 return 0; 1695 } 1696 1697 gfar_init_bds(ndev); 1698 1699 gfar_mac_reset(priv); 1700 1701 gfar_init_tx_rx_base(priv); 1702 1703 gfar_start(priv); 1704 1705 priv->oldlink = 0; 1706 priv->oldspeed = 0; 1707 priv->oldduplex = -1; 1708 1709 if (ndev->phydev) 1710 phy_start(ndev->phydev); 1711 1712 netif_device_attach(ndev); 1713 enable_napi(priv); 1714 1715 return 0; 1716} 1717 1718static const struct dev_pm_ops gfar_pm_ops = { 1719 .suspend = gfar_suspend, 1720 .resume = gfar_resume, 1721 .freeze = gfar_suspend, 1722 .thaw = gfar_resume, 1723 .restore = gfar_restore, 1724}; 1725 1726#define GFAR_PM_OPS (&gfar_pm_ops) 1727 1728#else 1729 1730#define GFAR_PM_OPS NULL 1731 1732#endif 1733 1734/* Reads the controller's registers to determine what interface 1735 * connects it to the PHY. 1736 */ 1737static phy_interface_t gfar_get_interface(struct net_device *dev) 1738{ 1739 struct gfar_private *priv = netdev_priv(dev); 1740 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1741 u32 ecntrl; 1742 1743 ecntrl = gfar_read(®s->ecntrl); 1744 1745 if (ecntrl & ECNTRL_SGMII_MODE) 1746 return PHY_INTERFACE_MODE_SGMII; 1747 1748 if (ecntrl & ECNTRL_TBI_MODE) { 1749 if (ecntrl & ECNTRL_REDUCED_MODE) 1750 return PHY_INTERFACE_MODE_RTBI; 1751 else 1752 return PHY_INTERFACE_MODE_TBI; 1753 } 1754 1755 if (ecntrl & ECNTRL_REDUCED_MODE) { 1756 if (ecntrl & ECNTRL_REDUCED_MII_MODE) { 1757 return PHY_INTERFACE_MODE_RMII; 1758 } 1759 else { 1760 phy_interface_t interface = priv->interface; 1761 1762 /* This isn't autodetected right now, so it must 1763 * be set by the device tree or platform code. 1764 */ 1765 if (interface == PHY_INTERFACE_MODE_RGMII_ID) 1766 return PHY_INTERFACE_MODE_RGMII_ID; 1767 1768 return PHY_INTERFACE_MODE_RGMII; 1769 } 1770 } 1771 1772 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT) 1773 return PHY_INTERFACE_MODE_GMII; 1774 1775 return PHY_INTERFACE_MODE_MII; 1776} 1777 1778 1779/* Initializes driver's PHY state, and attaches to the PHY. 1780 * Returns 0 on success. 1781 */ 1782static int init_phy(struct net_device *dev) 1783{ 1784 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; 1785 struct gfar_private *priv = netdev_priv(dev); 1786 phy_interface_t interface; 1787 struct phy_device *phydev; 1788 struct ethtool_eee edata; 1789 1790 linkmode_set_bit_array(phy_10_100_features_array, 1791 ARRAY_SIZE(phy_10_100_features_array), 1792 mask); 1793 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask); 1794 linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask); 1795 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT) 1796 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mask); 1797 1798 priv->oldlink = 0; 1799 priv->oldspeed = 0; 1800 priv->oldduplex = -1; 1801 1802 interface = gfar_get_interface(dev); 1803 1804 phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0, 1805 interface); 1806 if (!phydev) { 1807 dev_err(&dev->dev, "could not attach to PHY\n"); 1808 return -ENODEV; 1809 } 1810 1811 if (interface == PHY_INTERFACE_MODE_SGMII) 1812 gfar_configure_serdes(dev); 1813 1814 /* Remove any features not supported by the controller */ 1815 linkmode_and(phydev->supported, phydev->supported, mask); 1816 linkmode_copy(phydev->advertising, phydev->supported); 1817 1818 /* Add support for flow control */ 1819 phy_support_asym_pause(phydev); 1820 1821 /* disable EEE autoneg, EEE not supported by eTSEC */ 1822 memset(&edata, 0, sizeof(struct ethtool_eee)); 1823 phy_ethtool_set_eee(phydev, &edata); 1824 1825 return 0; 1826} 1827 1828/* Initialize TBI PHY interface for communicating with the 1829 * SERDES lynx PHY on the chip. We communicate with this PHY 1830 * through the MDIO bus on each controller, treating it as a 1831 * "normal" PHY at the address found in the TBIPA register. We assume 1832 * that the TBIPA register is valid. Either the MDIO bus code will set 1833 * it to a value that doesn't conflict with other PHYs on the bus, or the 1834 * value doesn't matter, as there are no other PHYs on the bus. 1835 */ 1836static void gfar_configure_serdes(struct net_device *dev) 1837{ 1838 struct gfar_private *priv = netdev_priv(dev); 1839 struct phy_device *tbiphy; 1840 1841 if (!priv->tbi_node) { 1842 dev_warn(&dev->dev, "error: SGMII mode requires that the " 1843 "device tree specify a tbi-handle\n"); 1844 return; 1845 } 1846 1847 tbiphy = of_phy_find_device(priv->tbi_node); 1848 if (!tbiphy) { 1849 dev_err(&dev->dev, "error: Could not get TBI device\n"); 1850 return; 1851 } 1852 1853 /* If the link is already up, we must already be ok, and don't need to 1854 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured 1855 * everything for us? Resetting it takes the link down and requires 1856 * several seconds for it to come back. 1857 */ 1858 if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) { 1859 put_device(&tbiphy->mdio.dev); 1860 return; 1861 } 1862 1863 /* Single clk mode, mii mode off(for serdes communication) */ 1864 phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT); 1865 1866 phy_write(tbiphy, MII_ADVERTISE, 1867 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE | 1868 ADVERTISE_1000XPSE_ASYM); 1869 1870 phy_write(tbiphy, MII_BMCR, 1871 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX | 1872 BMCR_SPEED1000); 1873 1874 put_device(&tbiphy->mdio.dev); 1875} 1876 | |
1877static int __gfar_is_rx_idle(struct gfar_private *priv) 1878{ 1879 u32 res; 1880 1881 /* Normaly TSEC should not hang on GRS commands, so we should 1882 * actually wait for IEVENT_GRSC flag. 1883 */ 1884 if (!gfar_has_errata(priv, GFAR_ERRATA_A002)) --- 59 unchanged lines hidden (view full) --- 1944 gfar_halt_nodisable(priv); 1945 1946 /* Disable Rx/Tx DMA */ 1947 tempval = gfar_read(®s->maccfg1); 1948 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN); 1949 gfar_write(®s->maccfg1, tempval); 1950} 1951 | 1010static int __gfar_is_rx_idle(struct gfar_private *priv) 1011{ 1012 u32 res; 1013 1014 /* Normaly TSEC should not hang on GRS commands, so we should 1015 * actually wait for IEVENT_GRSC flag. 1016 */ 1017 if (!gfar_has_errata(priv, GFAR_ERRATA_A002)) --- 59 unchanged lines hidden (view full) --- 1077 gfar_halt_nodisable(priv); 1078 1079 /* Disable Rx/Tx DMA */ 1080 tempval = gfar_read(®s->maccfg1); 1081 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN); 1082 gfar_write(®s->maccfg1, tempval); 1083} 1084 |
1952void stop_gfar(struct net_device *dev) 1953{ 1954 struct gfar_private *priv = netdev_priv(dev); 1955 1956 netif_tx_stop_all_queues(dev); 1957 1958 smp_mb__before_atomic(); 1959 set_bit(GFAR_DOWN, &priv->state); 1960 smp_mb__after_atomic(); 1961 1962 disable_napi(priv); 1963 1964 /* disable ints and gracefully shut down Rx/Tx DMA */ 1965 gfar_halt(priv); 1966 1967 phy_stop(dev->phydev); 1968 1969 free_skb_resources(priv); 1970} 1971 | |
1972static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue) 1973{ 1974 struct txbd8 *txbdp; 1975 struct gfar_private *priv = netdev_priv(tx_queue->dev); 1976 int i, j; 1977 1978 txbdp = tx_queue->tx_bd_base; 1979 --- 20 unchanged lines hidden (view full) --- 2000} 2001 2002static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue) 2003{ 2004 int i; 2005 2006 struct rxbd8 *rxbdp = rx_queue->rx_bd_base; 2007 | 1085static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue) 1086{ 1087 struct txbd8 *txbdp; 1088 struct gfar_private *priv = netdev_priv(tx_queue->dev); 1089 int i, j; 1090 1091 txbdp = tx_queue->tx_bd_base; 1092 --- 20 unchanged lines hidden (view full) --- 1113} 1114 1115static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue) 1116{ 1117 int i; 1118 1119 struct rxbd8 *rxbdp = rx_queue->rx_bd_base; 1120 |
2008 if (rx_queue->skb) 2009 dev_kfree_skb(rx_queue->skb); | 1121 dev_kfree_skb(rx_queue->skb); |
2010 2011 for (i = 0; i < rx_queue->rx_ring_size; i++) { 2012 struct gfar_rx_buff *rxb = &rx_queue->rx_buff[i]; 2013 2014 rxbdp->lstatus = 0; 2015 rxbdp->bufPtr = 0; 2016 rxbdp++; 2017 --- 39 unchanged lines hidden (view full) --- 2057 2058 dma_free_coherent(priv->dev, 2059 sizeof(struct txbd8) * priv->total_tx_ring_size + 2060 sizeof(struct rxbd8) * priv->total_rx_ring_size, 2061 priv->tx_queue[0]->tx_bd_base, 2062 priv->tx_queue[0]->tx_bd_dma_base); 2063} 2064 | 1122 1123 for (i = 0; i < rx_queue->rx_ring_size; i++) { 1124 struct gfar_rx_buff *rxb = &rx_queue->rx_buff[i]; 1125 1126 rxbdp->lstatus = 0; 1127 rxbdp->bufPtr = 0; 1128 rxbdp++; 1129 --- 39 unchanged lines hidden (view full) --- 1169 1170 dma_free_coherent(priv->dev, 1171 sizeof(struct txbd8) * priv->total_tx_ring_size + 1172 sizeof(struct rxbd8) * priv->total_rx_ring_size, 1173 priv->tx_queue[0]->tx_bd_base, 1174 priv->tx_queue[0]->tx_bd_dma_base); 1175} 1176 |
1177void stop_gfar(struct net_device *dev) 1178{ 1179 struct gfar_private *priv = netdev_priv(dev); 1180 1181 netif_tx_stop_all_queues(dev); 1182 1183 smp_mb__before_atomic(); 1184 set_bit(GFAR_DOWN, &priv->state); 1185 smp_mb__after_atomic(); 1186 1187 disable_napi(priv); 1188 1189 /* disable ints and gracefully shut down Rx/Tx DMA */ 1190 gfar_halt(priv); 1191 1192 phy_stop(dev->phydev); 1193 1194 free_skb_resources(priv); 1195} 1196 |
|
2065void gfar_start(struct gfar_private *priv) 2066{ 2067 struct gfar __iomem *regs = priv->gfargrp[0].regs; 2068 u32 tempval; 2069 int i = 0; 2070 2071 /* Enable Rx/Tx hw queues */ 2072 gfar_write(®s->rqueue, priv->rqueue); --- 21 unchanged lines hidden (view full) --- 2094 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN); 2095 gfar_write(®s->maccfg1, tempval); 2096 2097 gfar_ints_enable(priv); 2098 2099 netif_trans_update(priv->ndev); /* prevent tx timeout */ 2100} 2101 | 1197void gfar_start(struct gfar_private *priv) 1198{ 1199 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1200 u32 tempval; 1201 int i = 0; 1202 1203 /* Enable Rx/Tx hw queues */ 1204 gfar_write(®s->rqueue, priv->rqueue); --- 21 unchanged lines hidden (view full) --- 1226 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN); 1227 gfar_write(®s->maccfg1, tempval); 1228 1229 gfar_ints_enable(priv); 1230 1231 netif_trans_update(priv->ndev); /* prevent tx timeout */ 1232} 1233 |
2102static void free_grp_irqs(struct gfar_priv_grp *grp) | 1234static bool gfar_new_page(struct gfar_priv_rx_q *rxq, struct gfar_rx_buff *rxb) |
2103{ | 1235{ |
2104 free_irq(gfar_irq(grp, TX)->irq, grp); 2105 free_irq(gfar_irq(grp, RX)->irq, grp); 2106 free_irq(gfar_irq(grp, ER)->irq, grp); | 1236 struct page *page; 1237 dma_addr_t addr; 1238 1239 page = dev_alloc_page(); 1240 if (unlikely(!page)) 1241 return false; 1242 1243 addr = dma_map_page(rxq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); 1244 if (unlikely(dma_mapping_error(rxq->dev, addr))) { 1245 __free_page(page); 1246 1247 return false; 1248 } 1249 1250 rxb->dma = addr; 1251 rxb->page = page; 1252 rxb->page_offset = 0; 1253 1254 return true; |
2107} 2108 | 1255} 1256 |
2109static int register_grp_irqs(struct gfar_priv_grp *grp) | 1257static void gfar_rx_alloc_err(struct gfar_priv_rx_q *rx_queue) |
2110{ | 1258{ |
2111 struct gfar_private *priv = grp->priv; 2112 struct net_device *dev = priv->ndev; 2113 int err; | 1259 struct gfar_private *priv = netdev_priv(rx_queue->ndev); 1260 struct gfar_extra_stats *estats = &priv->extra_stats; |
2114 | 1261 |
2115 /* If the device has multiple interrupts, register for 2116 * them. Otherwise, only register for the one 2117 */ 2118 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 2119 /* Install our interrupt handlers for Error, 2120 * Transmit, and Receive 2121 */ 2122 err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0, 2123 gfar_irq(grp, ER)->name, grp); 2124 if (err < 0) { 2125 netif_err(priv, intr, dev, "Can't get IRQ %d\n", 2126 gfar_irq(grp, ER)->irq); | 1262 netdev_err(rx_queue->ndev, "Can't alloc RX buffers\n"); 1263 atomic64_inc(&estats->rx_alloc_err); 1264} |
2127 | 1265 |
2128 goto err_irq_fail; | 1266static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue, 1267 int alloc_cnt) 1268{ 1269 struct rxbd8 *bdp; 1270 struct gfar_rx_buff *rxb; 1271 int i; 1272 1273 i = rx_queue->next_to_use; 1274 bdp = &rx_queue->rx_bd_base[i]; 1275 rxb = &rx_queue->rx_buff[i]; 1276 1277 while (alloc_cnt--) { 1278 /* try reuse page */ 1279 if (unlikely(!rxb->page)) { 1280 if (unlikely(!gfar_new_page(rx_queue, rxb))) { 1281 gfar_rx_alloc_err(rx_queue); 1282 break; 1283 } |
2129 } | 1284 } |
2130 enable_irq_wake(gfar_irq(grp, ER)->irq); | |
2131 | 1285 |
2132 err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0, 2133 gfar_irq(grp, TX)->name, grp); 2134 if (err < 0) { 2135 netif_err(priv, intr, dev, "Can't get IRQ %d\n", 2136 gfar_irq(grp, TX)->irq); 2137 goto tx_irq_fail; | 1286 /* Setup the new RxBD */ 1287 gfar_init_rxbdp(rx_queue, bdp, 1288 rxb->dma + rxb->page_offset + RXBUF_ALIGNMENT); 1289 1290 /* Update to the next pointer */ 1291 bdp++; 1292 rxb++; 1293 1294 if (unlikely(++i == rx_queue->rx_ring_size)) { 1295 i = 0; 1296 bdp = rx_queue->rx_bd_base; 1297 rxb = rx_queue->rx_buff; |
2138 } | 1298 } |
2139 err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0, 2140 gfar_irq(grp, RX)->name, grp); 2141 if (err < 0) { 2142 netif_err(priv, intr, dev, "Can't get IRQ %d\n", 2143 gfar_irq(grp, RX)->irq); 2144 goto rx_irq_fail; 2145 } 2146 enable_irq_wake(gfar_irq(grp, RX)->irq); | 1299 } |
2147 | 1300 |
2148 } else { 2149 err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0, 2150 gfar_irq(grp, TX)->name, grp); 2151 if (err < 0) { 2152 netif_err(priv, intr, dev, "Can't get IRQ %d\n", 2153 gfar_irq(grp, TX)->irq); 2154 goto err_irq_fail; | 1301 rx_queue->next_to_use = i; 1302 rx_queue->next_to_alloc = i; 1303} 1304 1305static void gfar_init_bds(struct net_device *ndev) 1306{ 1307 struct gfar_private *priv = netdev_priv(ndev); 1308 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1309 struct gfar_priv_tx_q *tx_queue = NULL; 1310 struct gfar_priv_rx_q *rx_queue = NULL; 1311 struct txbd8 *txbdp; 1312 u32 __iomem *rfbptr; 1313 int i, j; 1314 1315 for (i = 0; i < priv->num_tx_queues; i++) { 1316 tx_queue = priv->tx_queue[i]; 1317 /* Initialize some variables in our dev structure */ 1318 tx_queue->num_txbdfree = tx_queue->tx_ring_size; 1319 tx_queue->dirty_tx = tx_queue->tx_bd_base; 1320 tx_queue->cur_tx = tx_queue->tx_bd_base; 1321 tx_queue->skb_curtx = 0; 1322 tx_queue->skb_dirtytx = 0; 1323 1324 /* Initialize Transmit Descriptor Ring */ 1325 txbdp = tx_queue->tx_bd_base; 1326 for (j = 0; j < tx_queue->tx_ring_size; j++) { 1327 txbdp->lstatus = 0; 1328 txbdp->bufPtr = 0; 1329 txbdp++; |
2155 } | 1330 } |
2156 enable_irq_wake(gfar_irq(grp, TX)->irq); | 1331 1332 /* Set the last descriptor in the ring to indicate wrap */ 1333 txbdp--; 1334 txbdp->status = cpu_to_be16(be16_to_cpu(txbdp->status) | 1335 TXBD_WRAP); |
2157 } 2158 | 1336 } 1337 |
2159 return 0; | 1338 rfbptr = ®s->rfbptr0; 1339 for (i = 0; i < priv->num_rx_queues; i++) { 1340 rx_queue = priv->rx_queue[i]; |
2160 | 1341 |
2161rx_irq_fail: 2162 free_irq(gfar_irq(grp, TX)->irq, grp); 2163tx_irq_fail: 2164 free_irq(gfar_irq(grp, ER)->irq, grp); 2165err_irq_fail: 2166 return err; | 1342 rx_queue->next_to_clean = 0; 1343 rx_queue->next_to_use = 0; 1344 rx_queue->next_to_alloc = 0; |
2167 | 1345 |
1346 /* make sure next_to_clean != next_to_use after this 1347 * by leaving at least 1 unused descriptor 1348 */ 1349 gfar_alloc_rx_buffs(rx_queue, gfar_rxbd_unused(rx_queue)); 1350 1351 rx_queue->rfbptr = rfbptr; 1352 rfbptr += 2; 1353 } |
|
2168} 2169 | 1354} 1355 |
2170static void gfar_free_irq(struct gfar_private *priv) | 1356static int gfar_alloc_skb_resources(struct net_device *ndev) |
2171{ | 1357{ |
2172 int i; | 1358 void *vaddr; 1359 dma_addr_t addr; 1360 int i, j; 1361 struct gfar_private *priv = netdev_priv(ndev); 1362 struct device *dev = priv->dev; 1363 struct gfar_priv_tx_q *tx_queue = NULL; 1364 struct gfar_priv_rx_q *rx_queue = NULL; |
2173 | 1365 |
2174 /* Free the IRQs */ 2175 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 2176 for (i = 0; i < priv->num_grps; i++) 2177 free_grp_irqs(&priv->gfargrp[i]); 2178 } else { 2179 for (i = 0; i < priv->num_grps; i++) 2180 free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq, 2181 &priv->gfargrp[i]); | 1366 priv->total_tx_ring_size = 0; 1367 for (i = 0; i < priv->num_tx_queues; i++) 1368 priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size; 1369 1370 priv->total_rx_ring_size = 0; 1371 for (i = 0; i < priv->num_rx_queues; i++) 1372 priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size; 1373 1374 /* Allocate memory for the buffer descriptors */ 1375 vaddr = dma_alloc_coherent(dev, 1376 (priv->total_tx_ring_size * 1377 sizeof(struct txbd8)) + 1378 (priv->total_rx_ring_size * 1379 sizeof(struct rxbd8)), 1380 &addr, GFP_KERNEL); 1381 if (!vaddr) 1382 return -ENOMEM; 1383 1384 for (i = 0; i < priv->num_tx_queues; i++) { 1385 tx_queue = priv->tx_queue[i]; 1386 tx_queue->tx_bd_base = vaddr; 1387 tx_queue->tx_bd_dma_base = addr; 1388 tx_queue->dev = ndev; 1389 /* enet DMA only understands physical addresses */ 1390 addr += sizeof(struct txbd8) * tx_queue->tx_ring_size; 1391 vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size; |
2182 } | 1392 } |
2183} | |
2184 | 1393 |
2185static int gfar_request_irq(struct gfar_private *priv) 2186{ 2187 int err, i, j; | 1394 /* Start the rx descriptor ring where the tx ring leaves off */ 1395 for (i = 0; i < priv->num_rx_queues; i++) { 1396 rx_queue = priv->rx_queue[i]; 1397 rx_queue->rx_bd_base = vaddr; 1398 rx_queue->rx_bd_dma_base = addr; 1399 rx_queue->ndev = ndev; 1400 rx_queue->dev = dev; 1401 addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size; 1402 vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size; 1403 } |
2188 | 1404 |
2189 for (i = 0; i < priv->num_grps; i++) { 2190 err = register_grp_irqs(&priv->gfargrp[i]); 2191 if (err) { 2192 for (j = 0; j < i; j++) 2193 free_grp_irqs(&priv->gfargrp[j]); 2194 return err; 2195 } | 1405 /* Setup the skbuff rings */ 1406 for (i = 0; i < priv->num_tx_queues; i++) { 1407 tx_queue = priv->tx_queue[i]; 1408 tx_queue->tx_skbuff = 1409 kmalloc_array(tx_queue->tx_ring_size, 1410 sizeof(*tx_queue->tx_skbuff), 1411 GFP_KERNEL); 1412 if (!tx_queue->tx_skbuff) 1413 goto cleanup; 1414 1415 for (j = 0; j < tx_queue->tx_ring_size; j++) 1416 tx_queue->tx_skbuff[j] = NULL; |
2196 } 2197 | 1417 } 1418 |
1419 for (i = 0; i < priv->num_rx_queues; i++) { 1420 rx_queue = priv->rx_queue[i]; 1421 rx_queue->rx_buff = kcalloc(rx_queue->rx_ring_size, 1422 sizeof(*rx_queue->rx_buff), 1423 GFP_KERNEL); 1424 if (!rx_queue->rx_buff) 1425 goto cleanup; 1426 } 1427 1428 gfar_init_bds(ndev); 1429 |
|
2198 return 0; | 1430 return 0; |
1431 1432cleanup: 1433 free_skb_resources(priv); 1434 return -ENOMEM; |
|
2199} 2200 2201/* Bring the controller up and running */ 2202int startup_gfar(struct net_device *ndev) 2203{ 2204 struct gfar_private *priv = netdev_priv(ndev); 2205 int err; 2206 --- 21 unchanged lines hidden (view full) --- 2228 2229 enable_napi(priv); 2230 2231 netif_tx_wake_all_queues(ndev); 2232 2233 return 0; 2234} 2235 | 1435} 1436 1437/* Bring the controller up and running */ 1438int startup_gfar(struct net_device *ndev) 1439{ 1440 struct gfar_private *priv = netdev_priv(ndev); 1441 int err; 1442 --- 21 unchanged lines hidden (view full) --- 1464 1465 enable_napi(priv); 1466 1467 netif_tx_wake_all_queues(ndev); 1468 1469 return 0; 1470} 1471 |
2236/* Called when something needs to use the ethernet device 2237 * Returns 0 for success. | 1472static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv) 1473{ 1474 struct net_device *ndev = priv->ndev; 1475 struct phy_device *phydev = ndev->phydev; 1476 u32 val = 0; 1477 1478 if (!phydev->duplex) 1479 return val; 1480 1481 if (!priv->pause_aneg_en) { 1482 if (priv->tx_pause_en) 1483 val |= MACCFG1_TX_FLOW; 1484 if (priv->rx_pause_en) 1485 val |= MACCFG1_RX_FLOW; 1486 } else { 1487 u16 lcl_adv, rmt_adv; 1488 u8 flowctrl; 1489 /* get link partner capabilities */ 1490 rmt_adv = 0; 1491 if (phydev->pause) 1492 rmt_adv = LPA_PAUSE_CAP; 1493 if (phydev->asym_pause) 1494 rmt_adv |= LPA_PAUSE_ASYM; 1495 1496 lcl_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising); 1497 flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); 1498 if (flowctrl & FLOW_CTRL_TX) 1499 val |= MACCFG1_TX_FLOW; 1500 if (flowctrl & FLOW_CTRL_RX) 1501 val |= MACCFG1_RX_FLOW; 1502 } 1503 1504 return val; 1505} 1506 1507static noinline void gfar_update_link_state(struct gfar_private *priv) 1508{ 1509 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1510 struct net_device *ndev = priv->ndev; 1511 struct phy_device *phydev = ndev->phydev; 1512 struct gfar_priv_rx_q *rx_queue = NULL; 1513 int i; 1514 1515 if (unlikely(test_bit(GFAR_RESETTING, &priv->state))) 1516 return; 1517 1518 if (phydev->link) { 1519 u32 tempval1 = gfar_read(®s->maccfg1); 1520 u32 tempval = gfar_read(®s->maccfg2); 1521 u32 ecntrl = gfar_read(®s->ecntrl); 1522 u32 tx_flow_oldval = (tempval1 & MACCFG1_TX_FLOW); 1523 1524 if (phydev->duplex != priv->oldduplex) { 1525 if (!(phydev->duplex)) 1526 tempval &= ~(MACCFG2_FULL_DUPLEX); 1527 else 1528 tempval |= MACCFG2_FULL_DUPLEX; 1529 1530 priv->oldduplex = phydev->duplex; 1531 } 1532 1533 if (phydev->speed != priv->oldspeed) { 1534 switch (phydev->speed) { 1535 case 1000: 1536 tempval = 1537 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII); 1538 1539 ecntrl &= ~(ECNTRL_R100); 1540 break; 1541 case 100: 1542 case 10: 1543 tempval = 1544 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII); 1545 1546 /* Reduced mode distinguishes 1547 * between 10 and 100 1548 */ 1549 if (phydev->speed == SPEED_100) 1550 ecntrl |= ECNTRL_R100; 1551 else 1552 ecntrl &= ~(ECNTRL_R100); 1553 break; 1554 default: 1555 netif_warn(priv, link, priv->ndev, 1556 "Ack! Speed (%d) is not 10/100/1000!\n", 1557 phydev->speed); 1558 break; 1559 } 1560 1561 priv->oldspeed = phydev->speed; 1562 } 1563 1564 tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); 1565 tempval1 |= gfar_get_flowctrl_cfg(priv); 1566 1567 /* Turn last free buffer recording on */ 1568 if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) { 1569 for (i = 0; i < priv->num_rx_queues; i++) { 1570 u32 bdp_dma; 1571 1572 rx_queue = priv->rx_queue[i]; 1573 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue); 1574 gfar_write(rx_queue->rfbptr, bdp_dma); 1575 } 1576 1577 priv->tx_actual_en = 1; 1578 } 1579 1580 if (unlikely(!(tempval1 & MACCFG1_TX_FLOW) && tx_flow_oldval)) 1581 priv->tx_actual_en = 0; 1582 1583 gfar_write(®s->maccfg1, tempval1); 1584 gfar_write(®s->maccfg2, tempval); 1585 gfar_write(®s->ecntrl, ecntrl); 1586 1587 if (!priv->oldlink) 1588 priv->oldlink = 1; 1589 1590 } else if (priv->oldlink) { 1591 priv->oldlink = 0; 1592 priv->oldspeed = 0; 1593 priv->oldduplex = -1; 1594 } 1595 1596 if (netif_msg_link(priv)) 1597 phy_print_status(phydev); 1598} 1599 1600/* Called every time the controller might need to be made 1601 * aware of new link state. The PHY code conveys this 1602 * information through variables in the phydev structure, and this 1603 * function converts those variables into the appropriate 1604 * register values, and can bring down the device if needed. |
2238 */ | 1605 */ |
2239static int gfar_enet_open(struct net_device *dev) | 1606static void adjust_link(struct net_device *dev) |
2240{ 2241 struct gfar_private *priv = netdev_priv(dev); | 1607{ 1608 struct gfar_private *priv = netdev_priv(dev); |
2242 int err; | 1609 struct phy_device *phydev = dev->phydev; |
2243 | 1610 |
2244 err = init_phy(dev); 2245 if (err) 2246 return err; | 1611 if (unlikely(phydev->link != priv->oldlink || 1612 (phydev->link && (phydev->duplex != priv->oldduplex || 1613 phydev->speed != priv->oldspeed)))) 1614 gfar_update_link_state(priv); 1615} |
2247 | 1616 |
2248 err = gfar_request_irq(priv); 2249 if (err) 2250 return err; | 1617/* Initialize TBI PHY interface for communicating with the 1618 * SERDES lynx PHY on the chip. We communicate with this PHY 1619 * through the MDIO bus on each controller, treating it as a 1620 * "normal" PHY at the address found in the TBIPA register. We assume 1621 * that the TBIPA register is valid. Either the MDIO bus code will set 1622 * it to a value that doesn't conflict with other PHYs on the bus, or the 1623 * value doesn't matter, as there are no other PHYs on the bus. 1624 */ 1625static void gfar_configure_serdes(struct net_device *dev) 1626{ 1627 struct gfar_private *priv = netdev_priv(dev); 1628 struct phy_device *tbiphy; |
2251 | 1629 |
2252 err = startup_gfar(dev); 2253 if (err) 2254 return err; | 1630 if (!priv->tbi_node) { 1631 dev_warn(&dev->dev, "error: SGMII mode requires that the " 1632 "device tree specify a tbi-handle\n"); 1633 return; 1634 } |
2255 | 1635 |
2256 return err; | 1636 tbiphy = of_phy_find_device(priv->tbi_node); 1637 if (!tbiphy) { 1638 dev_err(&dev->dev, "error: Could not get TBI device\n"); 1639 return; 1640 } 1641 1642 /* If the link is already up, we must already be ok, and don't need to 1643 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured 1644 * everything for us? Resetting it takes the link down and requires 1645 * several seconds for it to come back. 1646 */ 1647 if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) { 1648 put_device(&tbiphy->mdio.dev); 1649 return; 1650 } 1651 1652 /* Single clk mode, mii mode off(for serdes communication) */ 1653 phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT); 1654 1655 phy_write(tbiphy, MII_ADVERTISE, 1656 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE | 1657 ADVERTISE_1000XPSE_ASYM); 1658 1659 phy_write(tbiphy, MII_BMCR, 1660 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX | 1661 BMCR_SPEED1000); 1662 1663 put_device(&tbiphy->mdio.dev); |
2257} 2258 | 1664} 1665 |
1666/* Initializes driver's PHY state, and attaches to the PHY. 1667 * Returns 0 on success. 1668 */ 1669static int init_phy(struct net_device *dev) 1670{ 1671 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; 1672 struct gfar_private *priv = netdev_priv(dev); 1673 phy_interface_t interface; 1674 struct phy_device *phydev; 1675 struct ethtool_eee edata; 1676 1677 linkmode_set_bit_array(phy_10_100_features_array, 1678 ARRAY_SIZE(phy_10_100_features_array), 1679 mask); 1680 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask); 1681 linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask); 1682 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT) 1683 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mask); 1684 1685 priv->oldlink = 0; 1686 priv->oldspeed = 0; 1687 priv->oldduplex = -1; 1688 1689 interface = gfar_get_interface(dev); 1690 1691 phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0, 1692 interface); 1693 if (!phydev) { 1694 dev_err(&dev->dev, "could not attach to PHY\n"); 1695 return -ENODEV; 1696 } 1697 1698 if (interface == PHY_INTERFACE_MODE_SGMII) 1699 gfar_configure_serdes(dev); 1700 1701 /* Remove any features not supported by the controller */ 1702 linkmode_and(phydev->supported, phydev->supported, mask); 1703 linkmode_copy(phydev->advertising, phydev->supported); 1704 1705 /* Add support for flow control */ 1706 phy_support_asym_pause(phydev); 1707 1708 /* disable EEE autoneg, EEE not supported by eTSEC */ 1709 memset(&edata, 0, sizeof(struct ethtool_eee)); 1710 phy_ethtool_set_eee(phydev, &edata); 1711 1712 return 0; 1713} 1714 |
|
2259static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb) 2260{ 2261 struct txfcb *fcb = skb_push(skb, GMAC_FCB_LEN); 2262 2263 memset(fcb, 0, GMAC_FCB_LEN); 2264 2265 return fcb; 2266} --- 312 unchanged lines hidden (view full) --- 2579 DMA_TO_DEVICE); 2580 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); 2581 } 2582 gfar_wmb(); 2583 dev_kfree_skb_any(skb); 2584 return NETDEV_TX_OK; 2585} 2586 | 1715static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb) 1716{ 1717 struct txfcb *fcb = skb_push(skb, GMAC_FCB_LEN); 1718 1719 memset(fcb, 0, GMAC_FCB_LEN); 1720 1721 return fcb; 1722} --- 312 unchanged lines hidden (view full) --- 2035 DMA_TO_DEVICE); 2036 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); 2037 } 2038 gfar_wmb(); 2039 dev_kfree_skb_any(skb); 2040 return NETDEV_TX_OK; 2041} 2042 |
2587/* Stops the kernel queue, and halts the controller */ 2588static int gfar_close(struct net_device *dev) 2589{ 2590 struct gfar_private *priv = netdev_priv(dev); 2591 2592 cancel_work_sync(&priv->reset_task); 2593 stop_gfar(dev); 2594 2595 /* Disconnect from the PHY */ 2596 phy_disconnect(dev->phydev); 2597 2598 gfar_free_irq(priv); 2599 2600 return 0; 2601} 2602 | |
2603/* Changes the mac address if the controller is not running. */ 2604static int gfar_set_mac_address(struct net_device *dev) 2605{ 2606 gfar_set_mac_for_addr(dev, 0, dev->dev_addr); 2607 2608 return 0; 2609} 2610 --- 45 unchanged lines hidden (view full) --- 2656static void gfar_timeout(struct net_device *dev) 2657{ 2658 struct gfar_private *priv = netdev_priv(dev); 2659 2660 dev->stats.tx_errors++; 2661 schedule_work(&priv->reset_task); 2662} 2663 | 2043/* Changes the mac address if the controller is not running. */ 2044static int gfar_set_mac_address(struct net_device *dev) 2045{ 2046 gfar_set_mac_for_addr(dev, 0, dev->dev_addr); 2047 2048 return 0; 2049} 2050 --- 45 unchanged lines hidden (view full) --- 2096static void gfar_timeout(struct net_device *dev) 2097{ 2098 struct gfar_private *priv = netdev_priv(dev); 2099 2100 dev->stats.tx_errors++; 2101 schedule_work(&priv->reset_task); 2102} 2103 |
2104static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr) 2105{ 2106 struct hwtstamp_config config; 2107 struct gfar_private *priv = netdev_priv(netdev); 2108 2109 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 2110 return -EFAULT; 2111 2112 /* reserved for future extensions */ 2113 if (config.flags) 2114 return -EINVAL; 2115 2116 switch (config.tx_type) { 2117 case HWTSTAMP_TX_OFF: 2118 priv->hwts_tx_en = 0; 2119 break; 2120 case HWTSTAMP_TX_ON: 2121 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) 2122 return -ERANGE; 2123 priv->hwts_tx_en = 1; 2124 break; 2125 default: 2126 return -ERANGE; 2127 } 2128 2129 switch (config.rx_filter) { 2130 case HWTSTAMP_FILTER_NONE: 2131 if (priv->hwts_rx_en) { 2132 priv->hwts_rx_en = 0; 2133 reset_gfar(netdev); 2134 } 2135 break; 2136 default: 2137 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) 2138 return -ERANGE; 2139 if (!priv->hwts_rx_en) { 2140 priv->hwts_rx_en = 1; 2141 reset_gfar(netdev); 2142 } 2143 config.rx_filter = HWTSTAMP_FILTER_ALL; 2144 break; 2145 } 2146 2147 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? 2148 -EFAULT : 0; 2149} 2150 2151static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr) 2152{ 2153 struct hwtstamp_config config; 2154 struct gfar_private *priv = netdev_priv(netdev); 2155 2156 config.flags = 0; 2157 config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; 2158 config.rx_filter = (priv->hwts_rx_en ? 2159 HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE); 2160 2161 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? 2162 -EFAULT : 0; 2163} 2164 2165static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 2166{ 2167 struct phy_device *phydev = dev->phydev; 2168 2169 if (!netif_running(dev)) 2170 return -EINVAL; 2171 2172 if (cmd == SIOCSHWTSTAMP) 2173 return gfar_hwtstamp_set(dev, rq); 2174 if (cmd == SIOCGHWTSTAMP) 2175 return gfar_hwtstamp_get(dev, rq); 2176 2177 if (!phydev) 2178 return -ENODEV; 2179 2180 return phy_mii_ioctl(phydev, rq, cmd); 2181} 2182 |
|
2664/* Interrupt Handler for Transmit complete */ 2665static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) 2666{ 2667 struct net_device *dev = tx_queue->dev; 2668 struct netdev_queue *txq; 2669 struct gfar_private *priv = netdev_priv(dev); 2670 struct txbd8 *bdp, *next = NULL; 2671 struct txbd8 *lbdp = NULL; --- 91 unchanged lines hidden (view full) --- 2763 2764 /* Update dirty indicators */ 2765 tx_queue->skb_dirtytx = skb_dirtytx; 2766 tx_queue->dirty_tx = bdp; 2767 2768 netdev_tx_completed_queue(txq, howmany, bytes_sent); 2769} 2770 | 2183/* Interrupt Handler for Transmit complete */ 2184static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) 2185{ 2186 struct net_device *dev = tx_queue->dev; 2187 struct netdev_queue *txq; 2188 struct gfar_private *priv = netdev_priv(dev); 2189 struct txbd8 *bdp, *next = NULL; 2190 struct txbd8 *lbdp = NULL; --- 91 unchanged lines hidden (view full) --- 2282 2283 /* Update dirty indicators */ 2284 tx_queue->skb_dirtytx = skb_dirtytx; 2285 tx_queue->dirty_tx = bdp; 2286 2287 netdev_tx_completed_queue(txq, howmany, bytes_sent); 2288} 2289 |
2771static bool gfar_new_page(struct gfar_priv_rx_q *rxq, struct gfar_rx_buff *rxb) 2772{ 2773 struct page *page; 2774 dma_addr_t addr; 2775 2776 page = dev_alloc_page(); 2777 if (unlikely(!page)) 2778 return false; 2779 2780 addr = dma_map_page(rxq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); 2781 if (unlikely(dma_mapping_error(rxq->dev, addr))) { 2782 __free_page(page); 2783 2784 return false; 2785 } 2786 2787 rxb->dma = addr; 2788 rxb->page = page; 2789 rxb->page_offset = 0; 2790 2791 return true; 2792} 2793 2794static void gfar_rx_alloc_err(struct gfar_priv_rx_q *rx_queue) 2795{ 2796 struct gfar_private *priv = netdev_priv(rx_queue->ndev); 2797 struct gfar_extra_stats *estats = &priv->extra_stats; 2798 2799 netdev_err(rx_queue->ndev, "Can't alloc RX buffers\n"); 2800 atomic64_inc(&estats->rx_alloc_err); 2801} 2802 2803static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue, 2804 int alloc_cnt) 2805{ 2806 struct rxbd8 *bdp; 2807 struct gfar_rx_buff *rxb; 2808 int i; 2809 2810 i = rx_queue->next_to_use; 2811 bdp = &rx_queue->rx_bd_base[i]; 2812 rxb = &rx_queue->rx_buff[i]; 2813 2814 while (alloc_cnt--) { 2815 /* try reuse page */ 2816 if (unlikely(!rxb->page)) { 2817 if (unlikely(!gfar_new_page(rx_queue, rxb))) { 2818 gfar_rx_alloc_err(rx_queue); 2819 break; 2820 } 2821 } 2822 2823 /* Setup the new RxBD */ 2824 gfar_init_rxbdp(rx_queue, bdp, 2825 rxb->dma + rxb->page_offset + RXBUF_ALIGNMENT); 2826 2827 /* Update to the next pointer */ 2828 bdp++; 2829 rxb++; 2830 2831 if (unlikely(++i == rx_queue->rx_ring_size)) { 2832 i = 0; 2833 bdp = rx_queue->rx_bd_base; 2834 rxb = rx_queue->rx_buff; 2835 } 2836 } 2837 2838 rx_queue->next_to_use = i; 2839 rx_queue->next_to_alloc = i; 2840} 2841 | |
2842static void count_errors(u32 lstatus, struct net_device *ndev) 2843{ 2844 struct gfar_private *priv = netdev_priv(ndev); 2845 struct net_device_stats *stats = &ndev->stats; 2846 struct gfar_extra_stats *estats = &priv->extra_stats; 2847 2848 /* If the packet was truncated, none of the other errors matter */ 2849 if (lstatus & BD_LFLAG(RXBD_TRUNCATED)) { --- 473 unchanged lines hidden (view full) --- 3323 imask |= IMASK_TX_DEFAULT; 3324 gfar_write(®s->imask, imask); 3325 spin_unlock_irq(&gfargrp->grplock); 3326 } 3327 3328 return 0; 3329} 3330 | 2290static void count_errors(u32 lstatus, struct net_device *ndev) 2291{ 2292 struct gfar_private *priv = netdev_priv(ndev); 2293 struct net_device_stats *stats = &ndev->stats; 2294 struct gfar_extra_stats *estats = &priv->extra_stats; 2295 2296 /* If the packet was truncated, none of the other errors matter */ 2297 if (lstatus & BD_LFLAG(RXBD_TRUNCATED)) { --- 473 unchanged lines hidden (view full) --- 2771 imask |= IMASK_TX_DEFAULT; 2772 gfar_write(®s->imask, imask); 2773 spin_unlock_irq(&gfargrp->grplock); 2774 } 2775 2776 return 0; 2777} 2778 |
2779/* GFAR error interrupt handler */ 2780static irqreturn_t gfar_error(int irq, void *grp_id) 2781{ 2782 struct gfar_priv_grp *gfargrp = grp_id; 2783 struct gfar __iomem *regs = gfargrp->regs; 2784 struct gfar_private *priv= gfargrp->priv; 2785 struct net_device *dev = priv->ndev; |
|
3331 | 2786 |
2787 /* Save ievent for future reference */ 2788 u32 events = gfar_read(®s->ievent); 2789 2790 /* Clear IEVENT */ 2791 gfar_write(®s->ievent, events & IEVENT_ERR_MASK); 2792 2793 /* Magic Packet is not an error. */ 2794 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) && 2795 (events & IEVENT_MAG)) 2796 events &= ~IEVENT_MAG; 2797 2798 /* Hmm... */ 2799 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv)) 2800 netdev_dbg(dev, 2801 "error interrupt (ievent=0x%08x imask=0x%08x)\n", 2802 events, gfar_read(®s->imask)); 2803 2804 /* Update the error counters */ 2805 if (events & IEVENT_TXE) { 2806 dev->stats.tx_errors++; 2807 2808 if (events & IEVENT_LC) 2809 dev->stats.tx_window_errors++; 2810 if (events & IEVENT_CRL) 2811 dev->stats.tx_aborted_errors++; 2812 if (events & IEVENT_XFUN) { 2813 netif_dbg(priv, tx_err, dev, 2814 "TX FIFO underrun, packet dropped\n"); 2815 dev->stats.tx_dropped++; 2816 atomic64_inc(&priv->extra_stats.tx_underrun); 2817 2818 schedule_work(&priv->reset_task); 2819 } 2820 netif_dbg(priv, tx_err, dev, "Transmit Error\n"); 2821 } 2822 if (events & IEVENT_BSY) { 2823 dev->stats.rx_over_errors++; 2824 atomic64_inc(&priv->extra_stats.rx_bsy); 2825 2826 netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n", 2827 gfar_read(®s->rstat)); 2828 } 2829 if (events & IEVENT_BABR) { 2830 dev->stats.rx_errors++; 2831 atomic64_inc(&priv->extra_stats.rx_babr); 2832 2833 netif_dbg(priv, rx_err, dev, "babbling RX error\n"); 2834 } 2835 if (events & IEVENT_EBERR) { 2836 atomic64_inc(&priv->extra_stats.eberr); 2837 netif_dbg(priv, rx_err, dev, "bus error\n"); 2838 } 2839 if (events & IEVENT_RXC) 2840 netif_dbg(priv, rx_status, dev, "control frame\n"); 2841 2842 if (events & IEVENT_BABT) { 2843 atomic64_inc(&priv->extra_stats.tx_babt); 2844 netif_dbg(priv, tx_err, dev, "babbling TX error\n"); 2845 } 2846 return IRQ_HANDLED; 2847} 2848 2849/* The interrupt handler for devices with one interrupt */ 2850static irqreturn_t gfar_interrupt(int irq, void *grp_id) 2851{ 2852 struct gfar_priv_grp *gfargrp = grp_id; 2853 2854 /* Save ievent for future reference */ 2855 u32 events = gfar_read(&gfargrp->regs->ievent); 2856 2857 /* Check for reception */ 2858 if (events & IEVENT_RX_MASK) 2859 gfar_receive(irq, grp_id); 2860 2861 /* Check for transmit completion */ 2862 if (events & IEVENT_TX_MASK) 2863 gfar_transmit(irq, grp_id); 2864 2865 /* Check for errors */ 2866 if (events & IEVENT_ERR_MASK) 2867 gfar_error(irq, grp_id); 2868 2869 return IRQ_HANDLED; 2870} 2871 |
|
3332#ifdef CONFIG_NET_POLL_CONTROLLER 3333/* Polling 'interrupt' - used by things like netconsole to send skbs 3334 * without having to re-enable interrupts. It's not called while 3335 * the interrupt routine is executing. 3336 */ 3337static void gfar_netpoll(struct net_device *dev) 3338{ 3339 struct gfar_private *priv = netdev_priv(dev); --- 19 unchanged lines hidden (view full) --- 3359 disable_irq(gfar_irq(grp, TX)->irq); 3360 gfar_interrupt(gfar_irq(grp, TX)->irq, grp); 3361 enable_irq(gfar_irq(grp, TX)->irq); 3362 } 3363 } 3364} 3365#endif 3366 | 2872#ifdef CONFIG_NET_POLL_CONTROLLER 2873/* Polling 'interrupt' - used by things like netconsole to send skbs 2874 * without having to re-enable interrupts. It's not called while 2875 * the interrupt routine is executing. 2876 */ 2877static void gfar_netpoll(struct net_device *dev) 2878{ 2879 struct gfar_private *priv = netdev_priv(dev); --- 19 unchanged lines hidden (view full) --- 2899 disable_irq(gfar_irq(grp, TX)->irq); 2900 gfar_interrupt(gfar_irq(grp, TX)->irq, grp); 2901 enable_irq(gfar_irq(grp, TX)->irq); 2902 } 2903 } 2904} 2905#endif 2906 |
3367/* The interrupt handler for devices with one interrupt */ 3368static irqreturn_t gfar_interrupt(int irq, void *grp_id) | 2907static void free_grp_irqs(struct gfar_priv_grp *grp) |
3369{ | 2908{ |
3370 struct gfar_priv_grp *gfargrp = grp_id; | 2909 free_irq(gfar_irq(grp, TX)->irq, grp); 2910 free_irq(gfar_irq(grp, RX)->irq, grp); 2911 free_irq(gfar_irq(grp, ER)->irq, grp); 2912} |
3371 | 2913 |
3372 /* Save ievent for future reference */ 3373 u32 events = gfar_read(&gfargrp->regs->ievent); | 2914static int register_grp_irqs(struct gfar_priv_grp *grp) 2915{ 2916 struct gfar_private *priv = grp->priv; 2917 struct net_device *dev = priv->ndev; 2918 int err; |
3374 | 2919 |
3375 /* Check for reception */ 3376 if (events & IEVENT_RX_MASK) 3377 gfar_receive(irq, grp_id); | 2920 /* If the device has multiple interrupts, register for 2921 * them. Otherwise, only register for the one 2922 */ 2923 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 2924 /* Install our interrupt handlers for Error, 2925 * Transmit, and Receive 2926 */ 2927 err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0, 2928 gfar_irq(grp, ER)->name, grp); 2929 if (err < 0) { 2930 netif_err(priv, intr, dev, "Can't get IRQ %d\n", 2931 gfar_irq(grp, ER)->irq); |
3378 | 2932 |
3379 /* Check for transmit completion */ 3380 if (events & IEVENT_TX_MASK) 3381 gfar_transmit(irq, grp_id); | 2933 goto err_irq_fail; 2934 } 2935 enable_irq_wake(gfar_irq(grp, ER)->irq); |
3382 | 2936 |
3383 /* Check for errors */ 3384 if (events & IEVENT_ERR_MASK) 3385 gfar_error(irq, grp_id); | 2937 err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0, 2938 gfar_irq(grp, TX)->name, grp); 2939 if (err < 0) { 2940 netif_err(priv, intr, dev, "Can't get IRQ %d\n", 2941 gfar_irq(grp, TX)->irq); 2942 goto tx_irq_fail; 2943 } 2944 err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0, 2945 gfar_irq(grp, RX)->name, grp); 2946 if (err < 0) { 2947 netif_err(priv, intr, dev, "Can't get IRQ %d\n", 2948 gfar_irq(grp, RX)->irq); 2949 goto rx_irq_fail; 2950 } 2951 enable_irq_wake(gfar_irq(grp, RX)->irq); |
3386 | 2952 |
3387 return IRQ_HANDLED; | 2953 } else { 2954 err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0, 2955 gfar_irq(grp, TX)->name, grp); 2956 if (err < 0) { 2957 netif_err(priv, intr, dev, "Can't get IRQ %d\n", 2958 gfar_irq(grp, TX)->irq); 2959 goto err_irq_fail; 2960 } 2961 enable_irq_wake(gfar_irq(grp, TX)->irq); 2962 } 2963 2964 return 0; 2965 2966rx_irq_fail: 2967 free_irq(gfar_irq(grp, TX)->irq, grp); 2968tx_irq_fail: 2969 free_irq(gfar_irq(grp, ER)->irq, grp); 2970err_irq_fail: 2971 return err; 2972 |
3388} 3389 | 2973} 2974 |
3390/* Called every time the controller might need to be made 3391 * aware of new link state. The PHY code conveys this 3392 * information through variables in the phydev structure, and this 3393 * function converts those variables into the appropriate 3394 * register values, and can bring down the device if needed. | 2975static void gfar_free_irq(struct gfar_private *priv) 2976{ 2977 int i; 2978 2979 /* Free the IRQs */ 2980 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 2981 for (i = 0; i < priv->num_grps; i++) 2982 free_grp_irqs(&priv->gfargrp[i]); 2983 } else { 2984 for (i = 0; i < priv->num_grps; i++) 2985 free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq, 2986 &priv->gfargrp[i]); 2987 } 2988} 2989 2990static int gfar_request_irq(struct gfar_private *priv) 2991{ 2992 int err, i, j; 2993 2994 for (i = 0; i < priv->num_grps; i++) { 2995 err = register_grp_irqs(&priv->gfargrp[i]); 2996 if (err) { 2997 for (j = 0; j < i; j++) 2998 free_grp_irqs(&priv->gfargrp[j]); 2999 return err; 3000 } 3001 } 3002 3003 return 0; 3004} 3005 3006/* Called when something needs to use the ethernet device 3007 * Returns 0 for success. |
3395 */ | 3008 */ |
3396static void adjust_link(struct net_device *dev) | 3009static int gfar_enet_open(struct net_device *dev) |
3397{ 3398 struct gfar_private *priv = netdev_priv(dev); | 3010{ 3011 struct gfar_private *priv = netdev_priv(dev); |
3399 struct phy_device *phydev = dev->phydev; | 3012 int err; |
3400 | 3013 |
3401 if (unlikely(phydev->link != priv->oldlink || 3402 (phydev->link && (phydev->duplex != priv->oldduplex || 3403 phydev->speed != priv->oldspeed)))) 3404 gfar_update_link_state(priv); | 3014 err = init_phy(dev); 3015 if (err) 3016 return err; 3017 3018 err = gfar_request_irq(priv); 3019 if (err) 3020 return err; 3021 3022 err = startup_gfar(dev); 3023 if (err) 3024 return err; 3025 3026 return err; |
3405} 3406 | 3027} 3028 |
3029/* Stops the kernel queue, and halts the controller */ 3030static int gfar_close(struct net_device *dev) 3031{ 3032 struct gfar_private *priv = netdev_priv(dev); 3033 3034 cancel_work_sync(&priv->reset_task); 3035 stop_gfar(dev); 3036 3037 /* Disconnect from the PHY */ 3038 phy_disconnect(dev->phydev); 3039 3040 gfar_free_irq(priv); 3041 3042 return 0; 3043} 3044 3045/* Clears each of the exact match registers to zero, so they 3046 * don't interfere with normal reception 3047 */ 3048static void gfar_clear_exact_match(struct net_device *dev) 3049{ 3050 int idx; 3051 static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0}; 3052 3053 for (idx = 1; idx < GFAR_EM_NUM + 1; idx++) 3054 gfar_set_mac_for_addr(dev, idx, zero_arr); 3055} 3056 |
|
3407/* Update the hash table based on the current list of multicast 3408 * addresses we subscribe to. Also, change the promiscuity of 3409 * the device based on the flags (this function is called 3410 * whenever dev->flags is changed 3411 */ 3412static void gfar_set_multi(struct net_device *dev) 3413{ 3414 struct netdev_hw_addr *ha; --- 75 unchanged lines hidden (view full) --- 3490 gfar_set_mac_for_addr(dev, idx, ha->addr); 3491 idx++; 3492 } else 3493 gfar_set_hash_for_addr(dev, ha->addr); 3494 } 3495 } 3496} 3497 | 3057/* Update the hash table based on the current list of multicast 3058 * addresses we subscribe to. Also, change the promiscuity of 3059 * the device based on the flags (this function is called 3060 * whenever dev->flags is changed 3061 */ 3062static void gfar_set_multi(struct net_device *dev) 3063{ 3064 struct netdev_hw_addr *ha; --- 75 unchanged lines hidden (view full) --- 3140 gfar_set_mac_for_addr(dev, idx, ha->addr); 3141 idx++; 3142 } else 3143 gfar_set_hash_for_addr(dev, ha->addr); 3144 } 3145 } 3146} 3147 |
3498 3499/* Clears each of the exact match registers to zero, so they 3500 * don't interfere with normal reception 3501 */ 3502static void gfar_clear_exact_match(struct net_device *dev) | 3148void gfar_mac_reset(struct gfar_private *priv) |
3503{ | 3149{ |
3504 int idx; 3505 static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0}; | 3150 struct gfar __iomem *regs = priv->gfargrp[0].regs; 3151 u32 tempval; |
3506 | 3152 |
3507 for (idx = 1; idx < GFAR_EM_NUM + 1; idx++) 3508 gfar_set_mac_for_addr(dev, idx, zero_arr); 3509} | 3153 /* Reset MAC layer */ 3154 gfar_write(®s->maccfg1, MACCFG1_SOFT_RESET); |
3510 | 3155 |
3511/* Set the appropriate hash bit for the given addr */ 3512/* The algorithm works like so: 3513 * 1) Take the Destination Address (ie the multicast address), and 3514 * do a CRC on it (little endian), and reverse the bits of the 3515 * result. 3516 * 2) Use the 8 most significant bits as a hash into a 256-entry 3517 * table. The table is controlled through 8 32-bit registers: 3518 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is 3519 * gaddr7. This means that the 3 most significant bits in the 3520 * hash index which gaddr register to use, and the 5 other bits 3521 * indicate which bit (assuming an IBM numbering scheme, which 3522 * for PowerPC (tm) is usually the case) in the register holds 3523 * the entry. 3524 */ 3525static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr) 3526{ 3527 u32 tempval; 3528 struct gfar_private *priv = netdev_priv(dev); 3529 u32 result = ether_crc(ETH_ALEN, addr); 3530 int width = priv->hash_width; 3531 u8 whichbit = (result >> (32 - width)) & 0x1f; 3532 u8 whichreg = result >> (32 - width + 5); 3533 u32 value = (1 << (31-whichbit)); | 3156 /* We need to delay at least 3 TX clocks */ 3157 udelay(3); |
3534 | 3158 |
3535 tempval = gfar_read(priv->hash_regs[whichreg]); 3536 tempval |= value; 3537 gfar_write(priv->hash_regs[whichreg], tempval); 3538} | 3159 /* the soft reset bit is not self-resetting, so we need to 3160 * clear it before resuming normal operation 3161 */ 3162 gfar_write(®s->maccfg1, 0); |
3539 | 3163 |
3164 udelay(3); |
|
3540 | 3165 |
3541/* There are multiple MAC Address register pairs on some controllers 3542 * This function sets the numth pair to a given address 3543 */ 3544static void gfar_set_mac_for_addr(struct net_device *dev, int num, 3545 const u8 *addr) | 3166 gfar_rx_offload_en(priv); 3167 3168 /* Initialize the max receive frame/buffer lengths */ 3169 gfar_write(®s->maxfrm, GFAR_JUMBO_FRAME_SIZE); 3170 gfar_write(®s->mrblr, GFAR_RXB_SIZE); 3171 3172 /* Initialize the Minimum Frame Length Register */ 3173 gfar_write(®s->minflr, MINFLR_INIT_SETTINGS); 3174 3175 /* Initialize MACCFG2. */ 3176 tempval = MACCFG2_INIT_SETTINGS; 3177 3178 /* eTSEC74 erratum: Rx frames of length MAXFRM or MAXFRM-1 3179 * are marked as truncated. Avoid this by MACCFG2[Huge Frame]=1, 3180 * and by checking RxBD[LG] and discarding larger than MAXFRM. 3181 */ 3182 if (gfar_has_errata(priv, GFAR_ERRATA_74)) 3183 tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK; 3184 3185 gfar_write(®s->maccfg2, tempval); 3186 3187 /* Clear mac addr hash registers */ 3188 gfar_write(®s->igaddr0, 0); 3189 gfar_write(®s->igaddr1, 0); 3190 gfar_write(®s->igaddr2, 0); 3191 gfar_write(®s->igaddr3, 0); 3192 gfar_write(®s->igaddr4, 0); 3193 gfar_write(®s->igaddr5, 0); 3194 gfar_write(®s->igaddr6, 0); 3195 gfar_write(®s->igaddr7, 0); 3196 3197 gfar_write(®s->gaddr0, 0); 3198 gfar_write(®s->gaddr1, 0); 3199 gfar_write(®s->gaddr2, 0); 3200 gfar_write(®s->gaddr3, 0); 3201 gfar_write(®s->gaddr4, 0); 3202 gfar_write(®s->gaddr5, 0); 3203 gfar_write(®s->gaddr6, 0); 3204 gfar_write(®s->gaddr7, 0); 3205 3206 if (priv->extended_hash) 3207 gfar_clear_exact_match(priv->ndev); 3208 3209 gfar_mac_rx_config(priv); 3210 3211 gfar_mac_tx_config(priv); 3212 3213 gfar_set_mac_address(priv->ndev); 3214 3215 gfar_set_multi(priv->ndev); 3216 3217 /* clear ievent and imask before configuring coalescing */ 3218 gfar_ints_disable(priv); 3219 3220 /* Configure the coalescing support */ 3221 gfar_configure_coalescing_all(priv); 3222} 3223 3224static void gfar_hw_init(struct gfar_private *priv) |
3546{ | 3225{ |
3547 struct gfar_private *priv = netdev_priv(dev); | |
3548 struct gfar __iomem *regs = priv->gfargrp[0].regs; | 3226 struct gfar __iomem *regs = priv->gfargrp[0].regs; |
3549 u32 tempval; 3550 u32 __iomem *macptr = ®s->macstnaddr1; | 3227 u32 attrs; |
3551 | 3228 |
3552 macptr += num*2; | 3229 /* Stop the DMA engine now, in case it was running before 3230 * (The firmware could have used it, and left it running). 3231 */ 3232 gfar_halt(priv); |
3553 | 3233 |
3554 /* For a station address of 0x12345678ABCD in transmission 3555 * order (BE), MACnADDR1 is set to 0xCDAB7856 and 3556 * MACnADDR2 is set to 0x34120000. | 3234 gfar_mac_reset(priv); 3235 3236 /* Zero out the rmon mib registers if it has them */ 3237 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { 3238 memset_io(&(regs->rmon), 0, sizeof(struct rmon_mib)); 3239 3240 /* Mask off the CAM interrupts */ 3241 gfar_write(®s->rmon.cam1, 0xffffffff); 3242 gfar_write(®s->rmon.cam2, 0xffffffff); 3243 } 3244 3245 /* Initialize ECNTRL */ 3246 gfar_write(®s->ecntrl, ECNTRL_INIT_SETTINGS); 3247 3248 /* Set the extraction length and index */ 3249 attrs = ATTRELI_EL(priv->rx_stash_size) | 3250 ATTRELI_EI(priv->rx_stash_index); 3251 3252 gfar_write(®s->attreli, attrs); 3253 3254 /* Start with defaults, and add stashing 3255 * depending on driver parameters |
3557 */ | 3256 */ |
3558 tempval = (addr[5] << 24) | (addr[4] << 16) | 3559 (addr[3] << 8) | addr[2]; | 3257 attrs = ATTR_INIT_SETTINGS; |
3560 | 3258 |
3561 gfar_write(macptr, tempval); | 3259 if (priv->bd_stash_en) 3260 attrs |= ATTR_BDSTASH; |
3562 | 3261 |
3563 tempval = (addr[1] << 24) | (addr[0] << 16); | 3262 if (priv->rx_stash_size != 0) 3263 attrs |= ATTR_BUFSTASH; |
3564 | 3264 |
3565 gfar_write(macptr+1, tempval); | 3265 gfar_write(®s->attr, attrs); 3266 3267 /* FIFO configs */ 3268 gfar_write(®s->fifo_tx_thr, DEFAULT_FIFO_TX_THR); 3269 gfar_write(®s->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE); 3270 gfar_write(®s->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF); 3271 3272 /* Program the interrupt steering regs, only for MG devices */ 3273 if (priv->num_grps > 1) 3274 gfar_write_isrg(priv); |
3566} 3567 | 3275} 3276 |
3568/* GFAR error interrupt handler */ 3569static irqreturn_t gfar_error(int irq, void *grp_id) | 3277static const struct net_device_ops gfar_netdev_ops = { 3278 .ndo_open = gfar_enet_open, 3279 .ndo_start_xmit = gfar_start_xmit, 3280 .ndo_stop = gfar_close, 3281 .ndo_change_mtu = gfar_change_mtu, 3282 .ndo_set_features = gfar_set_features, 3283 .ndo_set_rx_mode = gfar_set_multi, 3284 .ndo_tx_timeout = gfar_timeout, 3285 .ndo_do_ioctl = gfar_ioctl, 3286 .ndo_get_stats = gfar_get_stats, 3287 .ndo_change_carrier = fixed_phy_change_carrier, 3288 .ndo_set_mac_address = gfar_set_mac_addr, 3289 .ndo_validate_addr = eth_validate_addr, 3290#ifdef CONFIG_NET_POLL_CONTROLLER 3291 .ndo_poll_controller = gfar_netpoll, 3292#endif 3293}; 3294 3295/* Set up the ethernet device structure, private data, 3296 * and anything else we need before we start 3297 */ 3298static int gfar_probe(struct platform_device *ofdev) |
3570{ | 3299{ |
3571 struct gfar_priv_grp *gfargrp = grp_id; 3572 struct gfar __iomem *regs = gfargrp->regs; 3573 struct gfar_private *priv= gfargrp->priv; 3574 struct net_device *dev = priv->ndev; | 3300 struct device_node *np = ofdev->dev.of_node; 3301 struct net_device *dev = NULL; 3302 struct gfar_private *priv = NULL; 3303 int err = 0, i; |
3575 | 3304 |
3576 /* Save ievent for future reference */ 3577 u32 events = gfar_read(®s->ievent); | 3305 err = gfar_of_init(ofdev, &dev); |
3578 | 3306 |
3579 /* Clear IEVENT */ 3580 gfar_write(®s->ievent, events & IEVENT_ERR_MASK); | 3307 if (err) 3308 return err; |
3581 | 3309 |
3582 /* Magic Packet is not an error. */ 3583 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) && 3584 (events & IEVENT_MAG)) 3585 events &= ~IEVENT_MAG; | 3310 priv = netdev_priv(dev); 3311 priv->ndev = dev; 3312 priv->ofdev = ofdev; 3313 priv->dev = &ofdev->dev; 3314 SET_NETDEV_DEV(dev, &ofdev->dev); |
3586 | 3315 |
3587 /* Hmm... */ 3588 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv)) 3589 netdev_dbg(dev, 3590 "error interrupt (ievent=0x%08x imask=0x%08x)\n", 3591 events, gfar_read(®s->imask)); | 3316 INIT_WORK(&priv->reset_task, gfar_reset_task); |
3592 | 3317 |
3593 /* Update the error counters */ 3594 if (events & IEVENT_TXE) { 3595 dev->stats.tx_errors++; | 3318 platform_set_drvdata(ofdev, priv); |
3596 | 3319 |
3597 if (events & IEVENT_LC) 3598 dev->stats.tx_window_errors++; 3599 if (events & IEVENT_CRL) 3600 dev->stats.tx_aborted_errors++; 3601 if (events & IEVENT_XFUN) { 3602 netif_dbg(priv, tx_err, dev, 3603 "TX FIFO underrun, packet dropped\n"); 3604 dev->stats.tx_dropped++; 3605 atomic64_inc(&priv->extra_stats.tx_underrun); | 3320 gfar_detect_errata(priv); |
3606 | 3321 |
3607 schedule_work(&priv->reset_task); | 3322 /* Set the dev->base_addr to the gfar reg region */ 3323 dev->base_addr = (unsigned long) priv->gfargrp[0].regs; 3324 3325 /* Fill in the dev structure */ 3326 dev->watchdog_timeo = TX_TIMEOUT; 3327 /* MTU range: 50 - 9586 */ 3328 dev->mtu = 1500; 3329 dev->min_mtu = 50; 3330 dev->max_mtu = GFAR_JUMBO_FRAME_SIZE - ETH_HLEN; 3331 dev->netdev_ops = &gfar_netdev_ops; 3332 dev->ethtool_ops = &gfar_ethtool_ops; 3333 3334 /* Register for napi ...We are registering NAPI for each grp */ 3335 for (i = 0; i < priv->num_grps; i++) { 3336 if (priv->poll_mode == GFAR_SQ_POLLING) { 3337 netif_napi_add(dev, &priv->gfargrp[i].napi_rx, 3338 gfar_poll_rx_sq, GFAR_DEV_WEIGHT); 3339 netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx, 3340 gfar_poll_tx_sq, 2); 3341 } else { 3342 netif_napi_add(dev, &priv->gfargrp[i].napi_rx, 3343 gfar_poll_rx, GFAR_DEV_WEIGHT); 3344 netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx, 3345 gfar_poll_tx, 2); |
3608 } | 3346 } |
3609 netif_dbg(priv, tx_err, dev, "Transmit Error\n"); | |
3610 } | 3347 } |
3611 if (events & IEVENT_BSY) { 3612 dev->stats.rx_over_errors++; 3613 atomic64_inc(&priv->extra_stats.rx_bsy); | |
3614 | 3348 |
3615 netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n", 3616 gfar_read(®s->rstat)); | 3349 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { 3350 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | 3351 NETIF_F_RXCSUM; 3352 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | 3353 NETIF_F_RXCSUM | NETIF_F_HIGHDMA; |
3617 } | 3354 } |
3618 if (events & IEVENT_BABR) { 3619 dev->stats.rx_errors++; 3620 atomic64_inc(&priv->extra_stats.rx_babr); | |
3621 | 3355 |
3622 netif_dbg(priv, rx_err, dev, "babbling RX error\n"); | 3356 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) { 3357 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | 3358 NETIF_F_HW_VLAN_CTAG_RX; 3359 dev->features |= NETIF_F_HW_VLAN_CTAG_RX; |
3623 } | 3360 } |
3624 if (events & IEVENT_EBERR) { 3625 atomic64_inc(&priv->extra_stats.eberr); 3626 netif_dbg(priv, rx_err, dev, "bus error\n"); | 3361 3362 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 3363 3364 gfar_init_addr_hash_table(priv); 3365 3366 /* Insert receive time stamps into padding alignment bytes, and 3367 * plus 2 bytes padding to ensure the cpu alignment. 3368 */ 3369 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) 3370 priv->padding = 8 + DEFAULT_PADDING; 3371 3372 if (dev->features & NETIF_F_IP_CSUM || 3373 priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) 3374 dev->needed_headroom = GMAC_FCB_LEN; 3375 3376 /* Initializing some of the rx/tx queue level parameters */ 3377 for (i = 0; i < priv->num_tx_queues; i++) { 3378 priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE; 3379 priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE; 3380 priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE; 3381 priv->tx_queue[i]->txic = DEFAULT_TXIC; |
3627 } | 3382 } |
3628 if (events & IEVENT_RXC) 3629 netif_dbg(priv, rx_status, dev, "control frame\n"); | |
3630 | 3383 |
3631 if (events & IEVENT_BABT) { 3632 atomic64_inc(&priv->extra_stats.tx_babt); 3633 netif_dbg(priv, tx_err, dev, "babbling TX error\n"); | 3384 for (i = 0; i < priv->num_rx_queues; i++) { 3385 priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE; 3386 priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE; 3387 priv->rx_queue[i]->rxic = DEFAULT_RXIC; |
3634 } | 3388 } |
3635 return IRQ_HANDLED; | 3389 3390 /* Always enable rx filer if available */ 3391 priv->rx_filer_enable = 3392 (priv->device_flags & FSL_GIANFAR_DEV_HAS_RX_FILER) ? 1 : 0; 3393 /* Enable most messages by default */ 3394 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; 3395 /* use pritority h/w tx queue scheduling for single queue devices */ 3396 if (priv->num_tx_queues == 1) 3397 priv->prio_sched_en = 1; 3398 3399 set_bit(GFAR_DOWN, &priv->state); 3400 3401 gfar_hw_init(priv); 3402 3403 /* Carrier starts down, phylib will bring it up */ 3404 netif_carrier_off(dev); 3405 3406 err = register_netdev(dev); 3407 3408 if (err) { 3409 pr_err("%s: Cannot register net device, aborting\n", dev->name); 3410 goto register_fail; 3411 } 3412 3413 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) 3414 priv->wol_supported |= GFAR_WOL_MAGIC; 3415 3416 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER) && 3417 priv->rx_filer_enable) 3418 priv->wol_supported |= GFAR_WOL_FILER_UCAST; 3419 3420 device_set_wakeup_capable(&ofdev->dev, priv->wol_supported); 3421 3422 /* fill out IRQ number and name fields */ 3423 for (i = 0; i < priv->num_grps; i++) { 3424 struct gfar_priv_grp *grp = &priv->gfargrp[i]; 3425 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 3426 sprintf(gfar_irq(grp, TX)->name, "%s%s%c%s", 3427 dev->name, "_g", '0' + i, "_tx"); 3428 sprintf(gfar_irq(grp, RX)->name, "%s%s%c%s", 3429 dev->name, "_g", '0' + i, "_rx"); 3430 sprintf(gfar_irq(grp, ER)->name, "%s%s%c%s", 3431 dev->name, "_g", '0' + i, "_er"); 3432 } else 3433 strcpy(gfar_irq(grp, TX)->name, dev->name); 3434 } 3435 3436 /* Initialize the filer table */ 3437 gfar_init_filer_table(priv); 3438 3439 /* Print out the device info */ 3440 netdev_info(dev, "mac: %pM\n", dev->dev_addr); 3441 3442 /* Even more device info helps when determining which kernel 3443 * provided which set of benchmarks. 3444 */ 3445 netdev_info(dev, "Running with NAPI enabled\n"); 3446 for (i = 0; i < priv->num_rx_queues; i++) 3447 netdev_info(dev, "RX BD ring size for Q[%d]: %d\n", 3448 i, priv->rx_queue[i]->rx_ring_size); 3449 for (i = 0; i < priv->num_tx_queues; i++) 3450 netdev_info(dev, "TX BD ring size for Q[%d]: %d\n", 3451 i, priv->tx_queue[i]->tx_ring_size); 3452 3453 return 0; 3454 3455register_fail: 3456 if (of_phy_is_fixed_link(np)) 3457 of_phy_deregister_fixed_link(np); 3458 unmap_group_regs(priv); 3459 gfar_free_rx_queues(priv); 3460 gfar_free_tx_queues(priv); 3461 of_node_put(priv->phy_node); 3462 of_node_put(priv->tbi_node); 3463 free_gfar_dev(priv); 3464 return err; |
3636} 3637 | 3465} 3466 |
3638static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv) | 3467static int gfar_remove(struct platform_device *ofdev) |
3639{ | 3468{ |
3640 struct net_device *ndev = priv->ndev; 3641 struct phy_device *phydev = ndev->phydev; 3642 u32 val = 0; | 3469 struct gfar_private *priv = platform_get_drvdata(ofdev); 3470 struct device_node *np = ofdev->dev.of_node; |
3643 | 3471 |
3644 if (!phydev->duplex) 3645 return val; | 3472 of_node_put(priv->phy_node); 3473 of_node_put(priv->tbi_node); |
3646 | 3474 |
3647 if (!priv->pause_aneg_en) { 3648 if (priv->tx_pause_en) 3649 val |= MACCFG1_TX_FLOW; 3650 if (priv->rx_pause_en) 3651 val |= MACCFG1_RX_FLOW; 3652 } else { 3653 u16 lcl_adv, rmt_adv; 3654 u8 flowctrl; 3655 /* get link partner capabilities */ 3656 rmt_adv = 0; 3657 if (phydev->pause) 3658 rmt_adv = LPA_PAUSE_CAP; 3659 if (phydev->asym_pause) 3660 rmt_adv |= LPA_PAUSE_ASYM; | 3475 unregister_netdev(priv->ndev); |
3661 | 3476 |
3662 lcl_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising); 3663 flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); 3664 if (flowctrl & FLOW_CTRL_TX) 3665 val |= MACCFG1_TX_FLOW; 3666 if (flowctrl & FLOW_CTRL_RX) 3667 val |= MACCFG1_RX_FLOW; | 3477 if (of_phy_is_fixed_link(np)) 3478 of_phy_deregister_fixed_link(np); 3479 3480 unmap_group_regs(priv); 3481 gfar_free_rx_queues(priv); 3482 gfar_free_tx_queues(priv); 3483 free_gfar_dev(priv); 3484 3485 return 0; 3486} 3487 3488#ifdef CONFIG_PM 3489 3490static void __gfar_filer_disable(struct gfar_private *priv) 3491{ 3492 struct gfar __iomem *regs = priv->gfargrp[0].regs; 3493 u32 temp; 3494 3495 temp = gfar_read(®s->rctrl); 3496 temp &= ~(RCTRL_FILREN | RCTRL_PRSDEP_INIT); 3497 gfar_write(®s->rctrl, temp); 3498} 3499 3500static void __gfar_filer_enable(struct gfar_private *priv) 3501{ 3502 struct gfar __iomem *regs = priv->gfargrp[0].regs; 3503 u32 temp; 3504 3505 temp = gfar_read(®s->rctrl); 3506 temp |= RCTRL_FILREN | RCTRL_PRSDEP_INIT; 3507 gfar_write(®s->rctrl, temp); 3508} 3509 3510/* Filer rules implementing wol capabilities */ 3511static void gfar_filer_config_wol(struct gfar_private *priv) 3512{ 3513 unsigned int i; 3514 u32 rqfcr; 3515 3516 __gfar_filer_disable(priv); 3517 3518 /* clear the filer table, reject any packet by default */ 3519 rqfcr = RQFCR_RJE | RQFCR_CMP_MATCH; 3520 for (i = 0; i <= MAX_FILER_IDX; i++) 3521 gfar_write_filer(priv, i, rqfcr, 0); 3522 3523 i = 0; 3524 if (priv->wol_opts & GFAR_WOL_FILER_UCAST) { 3525 /* unicast packet, accept it */ 3526 struct net_device *ndev = priv->ndev; 3527 /* get the default rx queue index */ 3528 u8 qindex = (u8)priv->gfargrp[0].rx_queue->qindex; 3529 u32 dest_mac_addr = (ndev->dev_addr[0] << 16) | 3530 (ndev->dev_addr[1] << 8) | 3531 ndev->dev_addr[2]; 3532 3533 rqfcr = (qindex << 10) | RQFCR_AND | 3534 RQFCR_CMP_EXACT | RQFCR_PID_DAH; 3535 3536 gfar_write_filer(priv, i++, rqfcr, dest_mac_addr); 3537 3538 dest_mac_addr = (ndev->dev_addr[3] << 16) | 3539 (ndev->dev_addr[4] << 8) | 3540 ndev->dev_addr[5]; 3541 rqfcr = (qindex << 10) | RQFCR_GPI | 3542 RQFCR_CMP_EXACT | RQFCR_PID_DAL; 3543 gfar_write_filer(priv, i++, rqfcr, dest_mac_addr); |
3668 } 3669 | 3544 } 3545 |
3670 return val; | 3546 __gfar_filer_enable(priv); |
3671} 3672 | 3547} 3548 |
3673static noinline void gfar_update_link_state(struct gfar_private *priv) | 3549static void gfar_filer_restore_table(struct gfar_private *priv) |
3674{ | 3550{ |
3551 u32 rqfcr, rqfpr; 3552 unsigned int i; 3553 3554 __gfar_filer_disable(priv); 3555 3556 for (i = 0; i <= MAX_FILER_IDX; i++) { 3557 rqfcr = priv->ftp_rqfcr[i]; 3558 rqfpr = priv->ftp_rqfpr[i]; 3559 gfar_write_filer(priv, i, rqfcr, rqfpr); 3560 } 3561 3562 __gfar_filer_enable(priv); 3563} 3564 3565/* gfar_start() for Rx only and with the FGPI filer interrupt enabled */ 3566static void gfar_start_wol_filer(struct gfar_private *priv) 3567{ |
|
3675 struct gfar __iomem *regs = priv->gfargrp[0].regs; | 3568 struct gfar __iomem *regs = priv->gfargrp[0].regs; |
3676 struct net_device *ndev = priv->ndev; 3677 struct phy_device *phydev = ndev->phydev; 3678 struct gfar_priv_rx_q *rx_queue = NULL; 3679 int i; | 3569 u32 tempval; 3570 int i = 0; |
3680 | 3571 |
3681 if (unlikely(test_bit(GFAR_RESETTING, &priv->state))) 3682 return; | 3572 /* Enable Rx hw queues */ 3573 gfar_write(®s->rqueue, priv->rqueue); |
3683 | 3574 |
3684 if (phydev->link) { 3685 u32 tempval1 = gfar_read(®s->maccfg1); 3686 u32 tempval = gfar_read(®s->maccfg2); 3687 u32 ecntrl = gfar_read(®s->ecntrl); 3688 u32 tx_flow_oldval = (tempval1 & MACCFG1_TX_FLOW); | 3575 /* Initialize DMACTRL to have WWR and WOP */ 3576 tempval = gfar_read(®s->dmactrl); 3577 tempval |= DMACTRL_INIT_SETTINGS; 3578 gfar_write(®s->dmactrl, tempval); |
3689 | 3579 |
3690 if (phydev->duplex != priv->oldduplex) { 3691 if (!(phydev->duplex)) 3692 tempval &= ~(MACCFG2_FULL_DUPLEX); 3693 else 3694 tempval |= MACCFG2_FULL_DUPLEX; | 3580 /* Make sure we aren't stopped */ 3581 tempval = gfar_read(®s->dmactrl); 3582 tempval &= ~DMACTRL_GRS; 3583 gfar_write(®s->dmactrl, tempval); |
3695 | 3584 |
3696 priv->oldduplex = phydev->duplex; 3697 } | 3585 for (i = 0; i < priv->num_grps; i++) { 3586 regs = priv->gfargrp[i].regs; 3587 /* Clear RHLT, so that the DMA starts polling now */ 3588 gfar_write(®s->rstat, priv->gfargrp[i].rstat); 3589 /* enable the Filer General Purpose Interrupt */ 3590 gfar_write(®s->imask, IMASK_FGPI); 3591 } |
3698 | 3592 |
3699 if (phydev->speed != priv->oldspeed) { 3700 switch (phydev->speed) { 3701 case 1000: 3702 tempval = 3703 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII); | 3593 /* Enable Rx DMA */ 3594 tempval = gfar_read(®s->maccfg1); 3595 tempval |= MACCFG1_RX_EN; 3596 gfar_write(®s->maccfg1, tempval); 3597} |
3704 | 3598 |
3705 ecntrl &= ~(ECNTRL_R100); 3706 break; 3707 case 100: 3708 case 10: 3709 tempval = 3710 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII); | 3599static int gfar_suspend(struct device *dev) 3600{ 3601 struct gfar_private *priv = dev_get_drvdata(dev); 3602 struct net_device *ndev = priv->ndev; 3603 struct gfar __iomem *regs = priv->gfargrp[0].regs; 3604 u32 tempval; 3605 u16 wol = priv->wol_opts; |
3711 | 3606 |
3712 /* Reduced mode distinguishes 3713 * between 10 and 100 3714 */ 3715 if (phydev->speed == SPEED_100) 3716 ecntrl |= ECNTRL_R100; 3717 else 3718 ecntrl &= ~(ECNTRL_R100); 3719 break; 3720 default: 3721 netif_warn(priv, link, priv->ndev, 3722 "Ack! Speed (%d) is not 10/100/1000!\n", 3723 phydev->speed); 3724 break; 3725 } | 3607 if (!netif_running(ndev)) 3608 return 0; |
3726 | 3609 |
3727 priv->oldspeed = phydev->speed; 3728 } | 3610 disable_napi(priv); 3611 netif_tx_lock(ndev); 3612 netif_device_detach(ndev); 3613 netif_tx_unlock(ndev); |
3729 | 3614 |
3730 tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); 3731 tempval1 |= gfar_get_flowctrl_cfg(priv); | 3615 gfar_halt(priv); |
3732 | 3616 |
3733 /* Turn last free buffer recording on */ 3734 if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) { 3735 for (i = 0; i < priv->num_rx_queues; i++) { 3736 u32 bdp_dma; | 3617 if (wol & GFAR_WOL_MAGIC) { 3618 /* Enable interrupt on Magic Packet */ 3619 gfar_write(®s->imask, IMASK_MAG); |
3737 | 3620 |
3738 rx_queue = priv->rx_queue[i]; 3739 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue); 3740 gfar_write(rx_queue->rfbptr, bdp_dma); 3741 } | 3621 /* Enable Magic Packet mode */ 3622 tempval = gfar_read(®s->maccfg2); 3623 tempval |= MACCFG2_MPEN; 3624 gfar_write(®s->maccfg2, tempval); |
3742 | 3625 |
3743 priv->tx_actual_en = 1; 3744 } | 3626 /* re-enable the Rx block */ 3627 tempval = gfar_read(®s->maccfg1); 3628 tempval |= MACCFG1_RX_EN; 3629 gfar_write(®s->maccfg1, tempval); |
3745 | 3630 |
3746 if (unlikely(!(tempval1 & MACCFG1_TX_FLOW) && tx_flow_oldval)) 3747 priv->tx_actual_en = 0; | 3631 } else if (wol & GFAR_WOL_FILER_UCAST) { 3632 gfar_filer_config_wol(priv); 3633 gfar_start_wol_filer(priv); |
3748 | 3634 |
3749 gfar_write(®s->maccfg1, tempval1); | 3635 } else { 3636 phy_stop(ndev->phydev); 3637 } 3638 3639 return 0; 3640} 3641 3642static int gfar_resume(struct device *dev) 3643{ 3644 struct gfar_private *priv = dev_get_drvdata(dev); 3645 struct net_device *ndev = priv->ndev; 3646 struct gfar __iomem *regs = priv->gfargrp[0].regs; 3647 u32 tempval; 3648 u16 wol = priv->wol_opts; 3649 3650 if (!netif_running(ndev)) 3651 return 0; 3652 3653 if (wol & GFAR_WOL_MAGIC) { 3654 /* Disable Magic Packet mode */ 3655 tempval = gfar_read(®s->maccfg2); 3656 tempval &= ~MACCFG2_MPEN; |
3750 gfar_write(®s->maccfg2, tempval); | 3657 gfar_write(®s->maccfg2, tempval); |
3751 gfar_write(®s->ecntrl, ecntrl); | |
3752 | 3658 |
3753 if (!priv->oldlink) 3754 priv->oldlink = 1; | 3659 } else if (wol & GFAR_WOL_FILER_UCAST) { 3660 /* need to stop rx only, tx is already down */ 3661 gfar_halt(priv); 3662 gfar_filer_restore_table(priv); |
3755 | 3663 |
3756 } else if (priv->oldlink) { 3757 priv->oldlink = 0; 3758 priv->oldspeed = 0; 3759 priv->oldduplex = -1; | 3664 } else { 3665 phy_start(ndev->phydev); |
3760 } 3761 | 3666 } 3667 |
3762 if (netif_msg_link(priv)) 3763 phy_print_status(phydev); | 3668 gfar_start(priv); 3669 3670 netif_device_attach(ndev); 3671 enable_napi(priv); 3672 3673 return 0; |
3764} 3765 | 3674} 3675 |
3676static int gfar_restore(struct device *dev) 3677{ 3678 struct gfar_private *priv = dev_get_drvdata(dev); 3679 struct net_device *ndev = priv->ndev; 3680 3681 if (!netif_running(ndev)) { 3682 netif_device_attach(ndev); 3683 3684 return 0; 3685 } 3686 3687 gfar_init_bds(ndev); 3688 3689 gfar_mac_reset(priv); 3690 3691 gfar_init_tx_rx_base(priv); 3692 3693 gfar_start(priv); 3694 3695 priv->oldlink = 0; 3696 priv->oldspeed = 0; 3697 priv->oldduplex = -1; 3698 3699 if (ndev->phydev) 3700 phy_start(ndev->phydev); 3701 3702 netif_device_attach(ndev); 3703 enable_napi(priv); 3704 3705 return 0; 3706} 3707 3708static const struct dev_pm_ops gfar_pm_ops = { 3709 .suspend = gfar_suspend, 3710 .resume = gfar_resume, 3711 .freeze = gfar_suspend, 3712 .thaw = gfar_resume, 3713 .restore = gfar_restore, 3714}; 3715 3716#define GFAR_PM_OPS (&gfar_pm_ops) 3717 3718#else 3719 3720#define GFAR_PM_OPS NULL 3721 3722#endif 3723 |
|
3766static const struct of_device_id gfar_match[] = 3767{ 3768 { 3769 .type = "network", 3770 .compatible = "gianfar", 3771 }, 3772 { 3773 .compatible = "fsl,etsec2", --- 17 unchanged lines hidden --- | 3724static const struct of_device_id gfar_match[] = 3725{ 3726 { 3727 .type = "network", 3728 .compatible = "gianfar", 3729 }, 3730 { 3731 .compatible = "fsl,etsec2", --- 17 unchanged lines hidden --- |