1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Xilinx Axi Ethernet device driver 4 * 5 * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi 6 * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net> 7 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd. 8 * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu> 9 * Copyright (c) 2010 - 2011 PetaLogix 10 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved. 11 * 12 * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6 13 * and Spartan6. 14 * 15 * TODO: 16 * - Add Axi Fifo support. 17 * - Factor out Axi DMA code into separate driver. 18 * - Test and fix basic multicast filtering. 19 * - Add support for extended multicast filtering. 20 * - Test basic VLAN support. 21 * - Add support for extended VLAN support. 22 */ 23 24 #include <linux/delay.h> 25 #include <linux/etherdevice.h> 26 #include <linux/module.h> 27 #include <linux/netdevice.h> 28 #include <linux/of_mdio.h> 29 #include <linux/of_net.h> 30 #include <linux/of_platform.h> 31 #include <linux/of_irq.h> 32 #include <linux/of_address.h> 33 #include <linux/skbuff.h> 34 #include <linux/spinlock.h> 35 #include <linux/phy.h> 36 #include <linux/mii.h> 37 #include <linux/ethtool.h> 38 39 #include "xilinx_axienet.h" 40 41 /* Descriptors defines for Tx and Rx DMA - 2^n for the best performance */ 42 #define TX_BD_NUM 64 43 #define RX_BD_NUM 128 44 45 /* Must be shorter than length of ethtool_drvinfo.driver field to fit */ 46 #define DRIVER_NAME "xaxienet" 47 #define DRIVER_DESCRIPTION "Xilinx Axi Ethernet driver" 48 #define DRIVER_VERSION "1.00a" 49 50 #define AXIENET_REGS_N 32 51 52 /* Match table for of_platform binding */ 53 static const struct of_device_id axienet_of_match[] = { 54 { .compatible = "xlnx,axi-ethernet-1.00.a", }, 55 { .compatible = "xlnx,axi-ethernet-1.01.a", }, 56 { .compatible = "xlnx,axi-ethernet-2.01.a", }, 57 {}, 58 }; 59 60 MODULE_DEVICE_TABLE(of, axienet_of_match); 61 62 /* Option table for setting up Axi Ethernet hardware options */ 63 static struct axienet_option axienet_options[] = { 64 /* Turn on jumbo packet support for both Rx and Tx */ 65 { 66 .opt = XAE_OPTION_JUMBO, 67 .reg = XAE_TC_OFFSET, 68 .m_or = XAE_TC_JUM_MASK, 69 }, { 70 .opt = XAE_OPTION_JUMBO, 71 .reg = XAE_RCW1_OFFSET, 72 .m_or = XAE_RCW1_JUM_MASK, 73 }, { /* Turn on VLAN packet support for both Rx and Tx */ 74 .opt = XAE_OPTION_VLAN, 75 .reg = XAE_TC_OFFSET, 76 .m_or = XAE_TC_VLAN_MASK, 77 }, { 78 .opt = XAE_OPTION_VLAN, 79 .reg = XAE_RCW1_OFFSET, 80 .m_or = XAE_RCW1_VLAN_MASK, 81 }, { /* Turn on FCS stripping on receive packets */ 82 .opt = XAE_OPTION_FCS_STRIP, 83 .reg = XAE_RCW1_OFFSET, 84 .m_or = XAE_RCW1_FCS_MASK, 85 }, { /* Turn on FCS insertion on transmit packets */ 86 .opt = XAE_OPTION_FCS_INSERT, 87 .reg = XAE_TC_OFFSET, 88 .m_or = XAE_TC_FCS_MASK, 89 }, { /* Turn off length/type field checking on receive packets */ 90 .opt = XAE_OPTION_LENTYPE_ERR, 91 .reg = XAE_RCW1_OFFSET, 92 .m_or = XAE_RCW1_LT_DIS_MASK, 93 }, { /* Turn on Rx flow control */ 94 .opt = XAE_OPTION_FLOW_CONTROL, 95 .reg = XAE_FCC_OFFSET, 96 .m_or = XAE_FCC_FCRX_MASK, 97 }, { /* Turn on Tx flow control */ 98 .opt = XAE_OPTION_FLOW_CONTROL, 99 .reg = XAE_FCC_OFFSET, 100 .m_or = XAE_FCC_FCTX_MASK, 101 }, { /* Turn on promiscuous frame filtering */ 102 .opt = XAE_OPTION_PROMISC, 103 .reg = XAE_FMI_OFFSET, 104 .m_or = XAE_FMI_PM_MASK, 105 }, { /* Enable transmitter */ 106 .opt = XAE_OPTION_TXEN, 107 .reg = XAE_TC_OFFSET, 108 .m_or = XAE_TC_TX_MASK, 109 }, { /* Enable receiver */ 110 .opt = XAE_OPTION_RXEN, 111 .reg = XAE_RCW1_OFFSET, 112 .m_or = XAE_RCW1_RX_MASK, 113 }, 114 {} 115 }; 116 117 /** 118 * axienet_dma_in32 - Memory mapped Axi DMA register read 119 * @lp: Pointer to axienet local structure 120 * @reg: Address offset from the base address of the Axi DMA core 121 * 122 * Return: The contents of the Axi DMA register 123 * 124 * This function returns the contents of the corresponding Axi DMA register. 125 */ 126 static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg) 127 { 128 return in_be32(lp->dma_regs + reg); 129 } 130 131 /** 132 * axienet_dma_out32 - Memory mapped Axi DMA register write. 133 * @lp: Pointer to axienet local structure 134 * @reg: Address offset from the base address of the Axi DMA core 135 * @value: Value to be written into the Axi DMA register 136 * 137 * This function writes the desired value into the corresponding Axi DMA 138 * register. 139 */ 140 static inline void axienet_dma_out32(struct axienet_local *lp, 141 off_t reg, u32 value) 142 { 143 out_be32((lp->dma_regs + reg), value); 144 } 145 146 /** 147 * axienet_dma_bd_release - Release buffer descriptor rings 148 * @ndev: Pointer to the net_device structure 149 * 150 * This function is used to release the descriptors allocated in 151 * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet 152 * driver stop api is called. 153 */ 154 static void axienet_dma_bd_release(struct net_device *ndev) 155 { 156 int i; 157 struct axienet_local *lp = netdev_priv(ndev); 158 159 for (i = 0; i < RX_BD_NUM; i++) { 160 dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys, 161 lp->max_frm_size, DMA_FROM_DEVICE); 162 dev_kfree_skb((struct sk_buff *) 163 (lp->rx_bd_v[i].sw_id_offset)); 164 } 165 166 if (lp->rx_bd_v) { 167 dma_free_coherent(ndev->dev.parent, 168 sizeof(*lp->rx_bd_v) * RX_BD_NUM, 169 lp->rx_bd_v, 170 lp->rx_bd_p); 171 } 172 if (lp->tx_bd_v) { 173 dma_free_coherent(ndev->dev.parent, 174 sizeof(*lp->tx_bd_v) * TX_BD_NUM, 175 lp->tx_bd_v, 176 lp->tx_bd_p); 177 } 178 } 179 180 /** 181 * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA 182 * @ndev: Pointer to the net_device structure 183 * 184 * Return: 0, on success -ENOMEM, on failure 185 * 186 * This function is called to initialize the Rx and Tx DMA descriptor 187 * rings. This initializes the descriptors with required default values 188 * and is called when Axi Ethernet driver reset is called. 189 */ 190 static int axienet_dma_bd_init(struct net_device *ndev) 191 { 192 u32 cr; 193 int i; 194 struct sk_buff *skb; 195 struct axienet_local *lp = netdev_priv(ndev); 196 197 /* Reset the indexes which are used for accessing the BDs */ 198 lp->tx_bd_ci = 0; 199 lp->tx_bd_tail = 0; 200 lp->rx_bd_ci = 0; 201 202 /* Allocate the Tx and Rx buffer descriptors. */ 203 lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent, 204 sizeof(*lp->tx_bd_v) * TX_BD_NUM, 205 &lp->tx_bd_p, GFP_KERNEL); 206 if (!lp->tx_bd_v) 207 goto out; 208 209 lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent, 210 sizeof(*lp->rx_bd_v) * RX_BD_NUM, 211 &lp->rx_bd_p, GFP_KERNEL); 212 if (!lp->rx_bd_v) 213 goto out; 214 215 for (i = 0; i < TX_BD_NUM; i++) { 216 lp->tx_bd_v[i].next = lp->tx_bd_p + 217 sizeof(*lp->tx_bd_v) * 218 ((i + 1) % TX_BD_NUM); 219 } 220 221 for (i = 0; i < RX_BD_NUM; i++) { 222 lp->rx_bd_v[i].next = lp->rx_bd_p + 223 sizeof(*lp->rx_bd_v) * 224 ((i + 1) % RX_BD_NUM); 225 226 skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size); 227 if (!skb) 228 goto out; 229 230 lp->rx_bd_v[i].sw_id_offset = (u32) skb; 231 lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent, 232 skb->data, 233 lp->max_frm_size, 234 DMA_FROM_DEVICE); 235 lp->rx_bd_v[i].cntrl = lp->max_frm_size; 236 } 237 238 /* Start updating the Rx channel control register */ 239 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 240 /* Update the interrupt coalesce count */ 241 cr = ((cr & ~XAXIDMA_COALESCE_MASK) | 242 ((lp->coalesce_count_rx) << XAXIDMA_COALESCE_SHIFT)); 243 /* Update the delay timer count */ 244 cr = ((cr & ~XAXIDMA_DELAY_MASK) | 245 (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); 246 /* Enable coalesce, delay timer and error interrupts */ 247 cr |= XAXIDMA_IRQ_ALL_MASK; 248 /* Write to the Rx channel control register */ 249 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 250 251 /* Start updating the Tx channel control register */ 252 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 253 /* Update the interrupt coalesce count */ 254 cr = (((cr & ~XAXIDMA_COALESCE_MASK)) | 255 ((lp->coalesce_count_tx) << XAXIDMA_COALESCE_SHIFT)); 256 /* Update the delay timer count */ 257 cr = (((cr & ~XAXIDMA_DELAY_MASK)) | 258 (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); 259 /* Enable coalesce, delay timer and error interrupts */ 260 cr |= XAXIDMA_IRQ_ALL_MASK; 261 /* Write to the Tx channel control register */ 262 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 263 264 /* Populate the tail pointer and bring the Rx Axi DMA engine out of 265 * halted state. This will make the Rx side ready for reception. 266 */ 267 axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); 268 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 269 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, 270 cr | XAXIDMA_CR_RUNSTOP_MASK); 271 axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + 272 (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1))); 273 274 /* Write to the RS (Run-stop) bit in the Tx channel control register. 275 * Tx channel is now ready to run. But only after we write to the 276 * tail pointer register that the Tx channel will start transmitting. 277 */ 278 axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); 279 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 280 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, 281 cr | XAXIDMA_CR_RUNSTOP_MASK); 282 283 return 0; 284 out: 285 axienet_dma_bd_release(ndev); 286 return -ENOMEM; 287 } 288 289 /** 290 * axienet_set_mac_address - Write the MAC address 291 * @ndev: Pointer to the net_device structure 292 * @address: 6 byte Address to be written as MAC address 293 * 294 * This function is called to initialize the MAC address of the Axi Ethernet 295 * core. It writes to the UAW0 and UAW1 registers of the core. 296 */ 297 static void axienet_set_mac_address(struct net_device *ndev, 298 const void *address) 299 { 300 struct axienet_local *lp = netdev_priv(ndev); 301 302 if (address) 303 memcpy(ndev->dev_addr, address, ETH_ALEN); 304 if (!is_valid_ether_addr(ndev->dev_addr)) 305 eth_hw_addr_random(ndev); 306 307 /* Set up unicast MAC address filter set its mac address */ 308 axienet_iow(lp, XAE_UAW0_OFFSET, 309 (ndev->dev_addr[0]) | 310 (ndev->dev_addr[1] << 8) | 311 (ndev->dev_addr[2] << 16) | 312 (ndev->dev_addr[3] << 24)); 313 axienet_iow(lp, XAE_UAW1_OFFSET, 314 (((axienet_ior(lp, XAE_UAW1_OFFSET)) & 315 ~XAE_UAW1_UNICASTADDR_MASK) | 316 (ndev->dev_addr[4] | 317 (ndev->dev_addr[5] << 8)))); 318 } 319 320 /** 321 * netdev_set_mac_address - Write the MAC address (from outside the driver) 322 * @ndev: Pointer to the net_device structure 323 * @p: 6 byte Address to be written as MAC address 324 * 325 * Return: 0 for all conditions. Presently, there is no failure case. 326 * 327 * This function is called to initialize the MAC address of the Axi Ethernet 328 * core. It calls the core specific axienet_set_mac_address. This is the 329 * function that goes into net_device_ops structure entry ndo_set_mac_address. 330 */ 331 static int netdev_set_mac_address(struct net_device *ndev, void *p) 332 { 333 struct sockaddr *addr = p; 334 axienet_set_mac_address(ndev, addr->sa_data); 335 return 0; 336 } 337 338 /** 339 * axienet_set_multicast_list - Prepare the multicast table 340 * @ndev: Pointer to the net_device structure 341 * 342 * This function is called to initialize the multicast table during 343 * initialization. The Axi Ethernet basic multicast support has a four-entry 344 * multicast table which is initialized here. Additionally this function 345 * goes into the net_device_ops structure entry ndo_set_multicast_list. This 346 * means whenever the multicast table entries need to be updated this 347 * function gets called. 348 */ 349 static void axienet_set_multicast_list(struct net_device *ndev) 350 { 351 int i; 352 u32 reg, af0reg, af1reg; 353 struct axienet_local *lp = netdev_priv(ndev); 354 355 if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) || 356 netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) { 357 /* We must make the kernel realize we had to move into 358 * promiscuous mode. If it was a promiscuous mode request 359 * the flag is already set. If not we set it. 360 */ 361 ndev->flags |= IFF_PROMISC; 362 reg = axienet_ior(lp, XAE_FMI_OFFSET); 363 reg |= XAE_FMI_PM_MASK; 364 axienet_iow(lp, XAE_FMI_OFFSET, reg); 365 dev_info(&ndev->dev, "Promiscuous mode enabled.\n"); 366 } else if (!netdev_mc_empty(ndev)) { 367 struct netdev_hw_addr *ha; 368 369 i = 0; 370 netdev_for_each_mc_addr(ha, ndev) { 371 if (i >= XAE_MULTICAST_CAM_TABLE_NUM) 372 break; 373 374 af0reg = (ha->addr[0]); 375 af0reg |= (ha->addr[1] << 8); 376 af0reg |= (ha->addr[2] << 16); 377 af0reg |= (ha->addr[3] << 24); 378 379 af1reg = (ha->addr[4]); 380 af1reg |= (ha->addr[5] << 8); 381 382 reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00; 383 reg |= i; 384 385 axienet_iow(lp, XAE_FMI_OFFSET, reg); 386 axienet_iow(lp, XAE_AF0_OFFSET, af0reg); 387 axienet_iow(lp, XAE_AF1_OFFSET, af1reg); 388 i++; 389 } 390 } else { 391 reg = axienet_ior(lp, XAE_FMI_OFFSET); 392 reg &= ~XAE_FMI_PM_MASK; 393 394 axienet_iow(lp, XAE_FMI_OFFSET, reg); 395 396 for (i = 0; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) { 397 reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00; 398 reg |= i; 399 400 axienet_iow(lp, XAE_FMI_OFFSET, reg); 401 axienet_iow(lp, XAE_AF0_OFFSET, 0); 402 axienet_iow(lp, XAE_AF1_OFFSET, 0); 403 } 404 405 dev_info(&ndev->dev, "Promiscuous mode disabled.\n"); 406 } 407 } 408 409 /** 410 * axienet_setoptions - Set an Axi Ethernet option 411 * @ndev: Pointer to the net_device structure 412 * @options: Option to be enabled/disabled 413 * 414 * The Axi Ethernet core has multiple features which can be selectively turned 415 * on or off. The typical options could be jumbo frame option, basic VLAN 416 * option, promiscuous mode option etc. This function is used to set or clear 417 * these options in the Axi Ethernet hardware. This is done through 418 * axienet_option structure . 419 */ 420 static void axienet_setoptions(struct net_device *ndev, u32 options) 421 { 422 int reg; 423 struct axienet_local *lp = netdev_priv(ndev); 424 struct axienet_option *tp = &axienet_options[0]; 425 426 while (tp->opt) { 427 reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or)); 428 if (options & tp->opt) 429 reg |= tp->m_or; 430 axienet_iow(lp, tp->reg, reg); 431 tp++; 432 } 433 434 lp->options |= options; 435 } 436 437 static void __axienet_device_reset(struct axienet_local *lp, off_t offset) 438 { 439 u32 timeout; 440 /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset 441 * process of Axi DMA takes a while to complete as all pending 442 * commands/transfers will be flushed or completed during this 443 * reset process. 444 */ 445 axienet_dma_out32(lp, offset, XAXIDMA_CR_RESET_MASK); 446 timeout = DELAY_OF_ONE_MILLISEC; 447 while (axienet_dma_in32(lp, offset) & XAXIDMA_CR_RESET_MASK) { 448 udelay(1); 449 if (--timeout == 0) { 450 netdev_err(lp->ndev, "%s: DMA reset timeout!\n", 451 __func__); 452 break; 453 } 454 } 455 } 456 457 /** 458 * axienet_device_reset - Reset and initialize the Axi Ethernet hardware. 459 * @ndev: Pointer to the net_device structure 460 * 461 * This function is called to reset and initialize the Axi Ethernet core. This 462 * is typically called during initialization. It does a reset of the Axi DMA 463 * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines 464 * areconnected to Axi Ethernet reset lines, this in turn resets the Axi 465 * Ethernet core. No separate hardware reset is done for the Axi Ethernet 466 * core. 467 */ 468 static void axienet_device_reset(struct net_device *ndev) 469 { 470 u32 axienet_status; 471 struct axienet_local *lp = netdev_priv(ndev); 472 473 __axienet_device_reset(lp, XAXIDMA_TX_CR_OFFSET); 474 __axienet_device_reset(lp, XAXIDMA_RX_CR_OFFSET); 475 476 lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE; 477 lp->options |= XAE_OPTION_VLAN; 478 lp->options &= (~XAE_OPTION_JUMBO); 479 480 if ((ndev->mtu > XAE_MTU) && 481 (ndev->mtu <= XAE_JUMBO_MTU)) { 482 lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN + 483 XAE_TRL_SIZE; 484 485 if (lp->max_frm_size <= lp->rxmem) 486 lp->options |= XAE_OPTION_JUMBO; 487 } 488 489 if (axienet_dma_bd_init(ndev)) { 490 netdev_err(ndev, "%s: descriptor allocation failed\n", 491 __func__); 492 } 493 494 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); 495 axienet_status &= ~XAE_RCW1_RX_MASK; 496 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status); 497 498 axienet_status = axienet_ior(lp, XAE_IP_OFFSET); 499 if (axienet_status & XAE_INT_RXRJECT_MASK) 500 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK); 501 502 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK); 503 504 /* Sync default options with HW but leave receiver and 505 * transmitter disabled. 506 */ 507 axienet_setoptions(ndev, lp->options & 508 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 509 axienet_set_mac_address(ndev, NULL); 510 axienet_set_multicast_list(ndev); 511 axienet_setoptions(ndev, lp->options); 512 513 netif_trans_update(ndev); 514 } 515 516 /** 517 * axienet_adjust_link - Adjust the PHY link speed/duplex. 518 * @ndev: Pointer to the net_device structure 519 * 520 * This function is called to change the speed and duplex setting after 521 * auto negotiation is done by the PHY. This is the function that gets 522 * registered with the PHY interface through the "of_phy_connect" call. 523 */ 524 static void axienet_adjust_link(struct net_device *ndev) 525 { 526 u32 emmc_reg; 527 u32 link_state; 528 u32 setspeed = 1; 529 struct axienet_local *lp = netdev_priv(ndev); 530 struct phy_device *phy = ndev->phydev; 531 532 link_state = phy->speed | (phy->duplex << 1) | phy->link; 533 if (lp->last_link != link_state) { 534 if ((phy->speed == SPEED_10) || (phy->speed == SPEED_100)) { 535 if (lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) 536 setspeed = 0; 537 } else { 538 if ((phy->speed == SPEED_1000) && 539 (lp->phy_mode == PHY_INTERFACE_MODE_MII)) 540 setspeed = 0; 541 } 542 543 if (setspeed == 1) { 544 emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET); 545 emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK; 546 547 switch (phy->speed) { 548 case SPEED_1000: 549 emmc_reg |= XAE_EMMC_LINKSPD_1000; 550 break; 551 case SPEED_100: 552 emmc_reg |= XAE_EMMC_LINKSPD_100; 553 break; 554 case SPEED_10: 555 emmc_reg |= XAE_EMMC_LINKSPD_10; 556 break; 557 default: 558 dev_err(&ndev->dev, "Speed other than 10, 100 " 559 "or 1Gbps is not supported\n"); 560 break; 561 } 562 563 axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg); 564 lp->last_link = link_state; 565 phy_print_status(phy); 566 } else { 567 netdev_err(ndev, 568 "Error setting Axi Ethernet mac speed\n"); 569 } 570 } 571 } 572 573 /** 574 * axienet_start_xmit_done - Invoked once a transmit is completed by the 575 * Axi DMA Tx channel. 576 * @ndev: Pointer to the net_device structure 577 * 578 * This function is invoked from the Axi DMA Tx isr to notify the completion 579 * of transmit operation. It clears fields in the corresponding Tx BDs and 580 * unmaps the corresponding buffer so that CPU can regain ownership of the 581 * buffer. It finally invokes "netif_wake_queue" to restart transmission if 582 * required. 583 */ 584 static void axienet_start_xmit_done(struct net_device *ndev) 585 { 586 u32 size = 0; 587 u32 packets = 0; 588 struct axienet_local *lp = netdev_priv(ndev); 589 struct axidma_bd *cur_p; 590 unsigned int status = 0; 591 592 cur_p = &lp->tx_bd_v[lp->tx_bd_ci]; 593 status = cur_p->status; 594 while (status & XAXIDMA_BD_STS_COMPLETE_MASK) { 595 dma_unmap_single(ndev->dev.parent, cur_p->phys, 596 (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK), 597 DMA_TO_DEVICE); 598 if (cur_p->app4) 599 dev_consume_skb_irq((struct sk_buff *)cur_p->app4); 600 /*cur_p->phys = 0;*/ 601 cur_p->app0 = 0; 602 cur_p->app1 = 0; 603 cur_p->app2 = 0; 604 cur_p->app4 = 0; 605 cur_p->status = 0; 606 607 size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK; 608 packets++; 609 610 ++lp->tx_bd_ci; 611 lp->tx_bd_ci %= TX_BD_NUM; 612 cur_p = &lp->tx_bd_v[lp->tx_bd_ci]; 613 status = cur_p->status; 614 } 615 616 ndev->stats.tx_packets += packets; 617 ndev->stats.tx_bytes += size; 618 netif_wake_queue(ndev); 619 } 620 621 /** 622 * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy 623 * @lp: Pointer to the axienet_local structure 624 * @num_frag: The number of BDs to check for 625 * 626 * Return: 0, on success 627 * NETDEV_TX_BUSY, if any of the descriptors are not free 628 * 629 * This function is invoked before BDs are allocated and transmission starts. 630 * This function returns 0 if a BD or group of BDs can be allocated for 631 * transmission. If the BD or any of the BDs are not free the function 632 * returns a busy status. This is invoked from axienet_start_xmit. 633 */ 634 static inline int axienet_check_tx_bd_space(struct axienet_local *lp, 635 int num_frag) 636 { 637 struct axidma_bd *cur_p; 638 cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % TX_BD_NUM]; 639 if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK) 640 return NETDEV_TX_BUSY; 641 return 0; 642 } 643 644 /** 645 * axienet_start_xmit - Starts the transmission. 646 * @skb: sk_buff pointer that contains data to be Txed. 647 * @ndev: Pointer to net_device structure. 648 * 649 * Return: NETDEV_TX_OK, on success 650 * NETDEV_TX_BUSY, if any of the descriptors are not free 651 * 652 * This function is invoked from upper layers to initiate transmission. The 653 * function uses the next available free BDs and populates their fields to 654 * start the transmission. Additionally if checksum offloading is supported, 655 * it populates AXI Stream Control fields with appropriate values. 656 */ 657 static netdev_tx_t 658 axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) 659 { 660 u32 ii; 661 u32 num_frag; 662 u32 csum_start_off; 663 u32 csum_index_off; 664 skb_frag_t *frag; 665 dma_addr_t tail_p; 666 struct axienet_local *lp = netdev_priv(ndev); 667 struct axidma_bd *cur_p; 668 669 num_frag = skb_shinfo(skb)->nr_frags; 670 cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; 671 672 if (axienet_check_tx_bd_space(lp, num_frag)) { 673 if (!netif_queue_stopped(ndev)) 674 netif_stop_queue(ndev); 675 return NETDEV_TX_BUSY; 676 } 677 678 if (skb->ip_summed == CHECKSUM_PARTIAL) { 679 if (lp->features & XAE_FEATURE_FULL_TX_CSUM) { 680 /* Tx Full Checksum Offload Enabled */ 681 cur_p->app0 |= 2; 682 } else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) { 683 csum_start_off = skb_transport_offset(skb); 684 csum_index_off = csum_start_off + skb->csum_offset; 685 /* Tx Partial Checksum Offload Enabled */ 686 cur_p->app0 |= 1; 687 cur_p->app1 = (csum_start_off << 16) | csum_index_off; 688 } 689 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { 690 cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */ 691 } 692 693 cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK; 694 cur_p->phys = dma_map_single(ndev->dev.parent, skb->data, 695 skb_headlen(skb), DMA_TO_DEVICE); 696 697 for (ii = 0; ii < num_frag; ii++) { 698 ++lp->tx_bd_tail; 699 lp->tx_bd_tail %= TX_BD_NUM; 700 cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; 701 frag = &skb_shinfo(skb)->frags[ii]; 702 cur_p->phys = dma_map_single(ndev->dev.parent, 703 skb_frag_address(frag), 704 skb_frag_size(frag), 705 DMA_TO_DEVICE); 706 cur_p->cntrl = skb_frag_size(frag); 707 } 708 709 cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK; 710 cur_p->app4 = (unsigned long)skb; 711 712 tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail; 713 /* Start the transfer */ 714 axienet_dma_out32(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p); 715 ++lp->tx_bd_tail; 716 lp->tx_bd_tail %= TX_BD_NUM; 717 718 return NETDEV_TX_OK; 719 } 720 721 /** 722 * axienet_recv - Is called from Axi DMA Rx Isr to complete the received 723 * BD processing. 724 * @ndev: Pointer to net_device structure. 725 * 726 * This function is invoked from the Axi DMA Rx isr to process the Rx BDs. It 727 * does minimal processing and invokes "netif_rx" to complete further 728 * processing. 729 */ 730 static void axienet_recv(struct net_device *ndev) 731 { 732 u32 length; 733 u32 csumstatus; 734 u32 size = 0; 735 u32 packets = 0; 736 dma_addr_t tail_p = 0; 737 struct axienet_local *lp = netdev_priv(ndev); 738 struct sk_buff *skb, *new_skb; 739 struct axidma_bd *cur_p; 740 741 cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; 742 743 while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) { 744 tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci; 745 skb = (struct sk_buff *) (cur_p->sw_id_offset); 746 length = cur_p->app4 & 0x0000FFFF; 747 748 dma_unmap_single(ndev->dev.parent, cur_p->phys, 749 lp->max_frm_size, 750 DMA_FROM_DEVICE); 751 752 skb_put(skb, length); 753 skb->protocol = eth_type_trans(skb, ndev); 754 /*skb_checksum_none_assert(skb);*/ 755 skb->ip_summed = CHECKSUM_NONE; 756 757 /* if we're doing Rx csum offload, set it up */ 758 if (lp->features & XAE_FEATURE_FULL_RX_CSUM) { 759 csumstatus = (cur_p->app2 & 760 XAE_FULL_CSUM_STATUS_MASK) >> 3; 761 if ((csumstatus == XAE_IP_TCP_CSUM_VALIDATED) || 762 (csumstatus == XAE_IP_UDP_CSUM_VALIDATED)) { 763 skb->ip_summed = CHECKSUM_UNNECESSARY; 764 } 765 } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 && 766 skb->protocol == htons(ETH_P_IP) && 767 skb->len > 64) { 768 skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF); 769 skb->ip_summed = CHECKSUM_COMPLETE; 770 } 771 772 netif_rx(skb); 773 774 size += length; 775 packets++; 776 777 new_skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size); 778 if (!new_skb) 779 return; 780 781 cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data, 782 lp->max_frm_size, 783 DMA_FROM_DEVICE); 784 cur_p->cntrl = lp->max_frm_size; 785 cur_p->status = 0; 786 cur_p->sw_id_offset = (u32) new_skb; 787 788 ++lp->rx_bd_ci; 789 lp->rx_bd_ci %= RX_BD_NUM; 790 cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; 791 } 792 793 ndev->stats.rx_packets += packets; 794 ndev->stats.rx_bytes += size; 795 796 if (tail_p) 797 axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p); 798 } 799 800 /** 801 * axienet_tx_irq - Tx Done Isr. 802 * @irq: irq number 803 * @_ndev: net_device pointer 804 * 805 * Return: IRQ_HANDLED for all cases. 806 * 807 * This is the Axi DMA Tx done Isr. It invokes "axienet_start_xmit_done" 808 * to complete the BD processing. 809 */ 810 static irqreturn_t axienet_tx_irq(int irq, void *_ndev) 811 { 812 u32 cr; 813 unsigned int status; 814 struct net_device *ndev = _ndev; 815 struct axienet_local *lp = netdev_priv(ndev); 816 817 status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 818 if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) { 819 axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status); 820 axienet_start_xmit_done(lp->ndev); 821 goto out; 822 } 823 if (!(status & XAXIDMA_IRQ_ALL_MASK)) 824 dev_err(&ndev->dev, "No interrupts asserted in Tx path\n"); 825 if (status & XAXIDMA_IRQ_ERROR_MASK) { 826 dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status); 827 dev_err(&ndev->dev, "Current BD is at: 0x%x\n", 828 (lp->tx_bd_v[lp->tx_bd_ci]).phys); 829 830 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 831 /* Disable coalesce, delay timer and error interrupts */ 832 cr &= (~XAXIDMA_IRQ_ALL_MASK); 833 /* Write to the Tx channel control register */ 834 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 835 836 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 837 /* Disable coalesce, delay timer and error interrupts */ 838 cr &= (~XAXIDMA_IRQ_ALL_MASK); 839 /* Write to the Rx channel control register */ 840 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 841 842 tasklet_schedule(&lp->dma_err_tasklet); 843 axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status); 844 } 845 out: 846 return IRQ_HANDLED; 847 } 848 849 /** 850 * axienet_rx_irq - Rx Isr. 851 * @irq: irq number 852 * @_ndev: net_device pointer 853 * 854 * Return: IRQ_HANDLED for all cases. 855 * 856 * This is the Axi DMA Rx Isr. It invokes "axienet_recv" to complete the BD 857 * processing. 858 */ 859 static irqreturn_t axienet_rx_irq(int irq, void *_ndev) 860 { 861 u32 cr; 862 unsigned int status; 863 struct net_device *ndev = _ndev; 864 struct axienet_local *lp = netdev_priv(ndev); 865 866 status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 867 if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) { 868 axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status); 869 axienet_recv(lp->ndev); 870 goto out; 871 } 872 if (!(status & XAXIDMA_IRQ_ALL_MASK)) 873 dev_err(&ndev->dev, "No interrupts asserted in Rx path\n"); 874 if (status & XAXIDMA_IRQ_ERROR_MASK) { 875 dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status); 876 dev_err(&ndev->dev, "Current BD is at: 0x%x\n", 877 (lp->rx_bd_v[lp->rx_bd_ci]).phys); 878 879 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 880 /* Disable coalesce, delay timer and error interrupts */ 881 cr &= (~XAXIDMA_IRQ_ALL_MASK); 882 /* Finally write to the Tx channel control register */ 883 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 884 885 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 886 /* Disable coalesce, delay timer and error interrupts */ 887 cr &= (~XAXIDMA_IRQ_ALL_MASK); 888 /* write to the Rx channel control register */ 889 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 890 891 tasklet_schedule(&lp->dma_err_tasklet); 892 axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status); 893 } 894 out: 895 return IRQ_HANDLED; 896 } 897 898 static void axienet_dma_err_handler(unsigned long data); 899 900 /** 901 * axienet_open - Driver open routine. 902 * @ndev: Pointer to net_device structure 903 * 904 * Return: 0, on success. 905 * non-zero error value on failure 906 * 907 * This is the driver open routine. It calls phy_start to start the PHY device. 908 * It also allocates interrupt service routines, enables the interrupt lines 909 * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer 910 * descriptors are initialized. 911 */ 912 static int axienet_open(struct net_device *ndev) 913 { 914 int ret, mdio_mcreg; 915 struct axienet_local *lp = netdev_priv(ndev); 916 struct phy_device *phydev = NULL; 917 918 dev_dbg(&ndev->dev, "axienet_open()\n"); 919 920 mdio_mcreg = axienet_ior(lp, XAE_MDIO_MC_OFFSET); 921 ret = axienet_mdio_wait_until_ready(lp); 922 if (ret < 0) 923 return ret; 924 /* Disable the MDIO interface till Axi Ethernet Reset is completed. 925 * When we do an Axi Ethernet reset, it resets the complete core 926 * including the MDIO. If MDIO is not disabled when the reset 927 * process is started, MDIO will be broken afterwards. 928 */ 929 axienet_iow(lp, XAE_MDIO_MC_OFFSET, 930 (mdio_mcreg & (~XAE_MDIO_MC_MDIOEN_MASK))); 931 axienet_device_reset(ndev); 932 /* Enable the MDIO */ 933 axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg); 934 ret = axienet_mdio_wait_until_ready(lp); 935 if (ret < 0) 936 return ret; 937 938 if (lp->phy_node) { 939 phydev = of_phy_connect(lp->ndev, lp->phy_node, 940 axienet_adjust_link, 0, lp->phy_mode); 941 942 if (!phydev) 943 dev_err(lp->dev, "of_phy_connect() failed\n"); 944 else 945 phy_start(phydev); 946 } 947 948 /* Enable tasklets for Axi DMA error handling */ 949 tasklet_init(&lp->dma_err_tasklet, axienet_dma_err_handler, 950 (unsigned long) lp); 951 952 /* Enable interrupts for Axi DMA Tx */ 953 ret = request_irq(lp->tx_irq, axienet_tx_irq, 0, ndev->name, ndev); 954 if (ret) 955 goto err_tx_irq; 956 /* Enable interrupts for Axi DMA Rx */ 957 ret = request_irq(lp->rx_irq, axienet_rx_irq, 0, ndev->name, ndev); 958 if (ret) 959 goto err_rx_irq; 960 961 return 0; 962 963 err_rx_irq: 964 free_irq(lp->tx_irq, ndev); 965 err_tx_irq: 966 if (phydev) 967 phy_disconnect(phydev); 968 tasklet_kill(&lp->dma_err_tasklet); 969 dev_err(lp->dev, "request_irq() failed\n"); 970 return ret; 971 } 972 973 /** 974 * axienet_stop - Driver stop routine. 975 * @ndev: Pointer to net_device structure 976 * 977 * Return: 0, on success. 978 * 979 * This is the driver stop routine. It calls phy_disconnect to stop the PHY 980 * device. It also removes the interrupt handlers and disables the interrupts. 981 * The Axi DMA Tx/Rx BDs are released. 982 */ 983 static int axienet_stop(struct net_device *ndev) 984 { 985 u32 cr; 986 struct axienet_local *lp = netdev_priv(ndev); 987 988 dev_dbg(&ndev->dev, "axienet_close()\n"); 989 990 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 991 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, 992 cr & (~XAXIDMA_CR_RUNSTOP_MASK)); 993 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 994 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, 995 cr & (~XAXIDMA_CR_RUNSTOP_MASK)); 996 axienet_setoptions(ndev, lp->options & 997 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 998 999 tasklet_kill(&lp->dma_err_tasklet); 1000 1001 free_irq(lp->tx_irq, ndev); 1002 free_irq(lp->rx_irq, ndev); 1003 1004 if (ndev->phydev) 1005 phy_disconnect(ndev->phydev); 1006 1007 axienet_dma_bd_release(ndev); 1008 return 0; 1009 } 1010 1011 /** 1012 * axienet_change_mtu - Driver change mtu routine. 1013 * @ndev: Pointer to net_device structure 1014 * @new_mtu: New mtu value to be applied 1015 * 1016 * Return: Always returns 0 (success). 1017 * 1018 * This is the change mtu driver routine. It checks if the Axi Ethernet 1019 * hardware supports jumbo frames before changing the mtu. This can be 1020 * called only when the device is not up. 1021 */ 1022 static int axienet_change_mtu(struct net_device *ndev, int new_mtu) 1023 { 1024 struct axienet_local *lp = netdev_priv(ndev); 1025 1026 if (netif_running(ndev)) 1027 return -EBUSY; 1028 1029 if ((new_mtu + VLAN_ETH_HLEN + 1030 XAE_TRL_SIZE) > lp->rxmem) 1031 return -EINVAL; 1032 1033 ndev->mtu = new_mtu; 1034 1035 return 0; 1036 } 1037 1038 #ifdef CONFIG_NET_POLL_CONTROLLER 1039 /** 1040 * axienet_poll_controller - Axi Ethernet poll mechanism. 1041 * @ndev: Pointer to net_device structure 1042 * 1043 * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior 1044 * to polling the ISRs and are enabled back after the polling is done. 1045 */ 1046 static void axienet_poll_controller(struct net_device *ndev) 1047 { 1048 struct axienet_local *lp = netdev_priv(ndev); 1049 disable_irq(lp->tx_irq); 1050 disable_irq(lp->rx_irq); 1051 axienet_rx_irq(lp->tx_irq, ndev); 1052 axienet_tx_irq(lp->rx_irq, ndev); 1053 enable_irq(lp->tx_irq); 1054 enable_irq(lp->rx_irq); 1055 } 1056 #endif 1057 1058 static const struct net_device_ops axienet_netdev_ops = { 1059 .ndo_open = axienet_open, 1060 .ndo_stop = axienet_stop, 1061 .ndo_start_xmit = axienet_start_xmit, 1062 .ndo_change_mtu = axienet_change_mtu, 1063 .ndo_set_mac_address = netdev_set_mac_address, 1064 .ndo_validate_addr = eth_validate_addr, 1065 .ndo_set_rx_mode = axienet_set_multicast_list, 1066 #ifdef CONFIG_NET_POLL_CONTROLLER 1067 .ndo_poll_controller = axienet_poll_controller, 1068 #endif 1069 }; 1070 1071 /** 1072 * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information. 1073 * @ndev: Pointer to net_device structure 1074 * @ed: Pointer to ethtool_drvinfo structure 1075 * 1076 * This implements ethtool command for getting the driver information. 1077 * Issue "ethtool -i ethX" under linux prompt to execute this function. 1078 */ 1079 static void axienet_ethtools_get_drvinfo(struct net_device *ndev, 1080 struct ethtool_drvinfo *ed) 1081 { 1082 strlcpy(ed->driver, DRIVER_NAME, sizeof(ed->driver)); 1083 strlcpy(ed->version, DRIVER_VERSION, sizeof(ed->version)); 1084 } 1085 1086 /** 1087 * axienet_ethtools_get_regs_len - Get the total regs length present in the 1088 * AxiEthernet core. 1089 * @ndev: Pointer to net_device structure 1090 * 1091 * This implements ethtool command for getting the total register length 1092 * information. 1093 * 1094 * Return: the total regs length 1095 */ 1096 static int axienet_ethtools_get_regs_len(struct net_device *ndev) 1097 { 1098 return sizeof(u32) * AXIENET_REGS_N; 1099 } 1100 1101 /** 1102 * axienet_ethtools_get_regs - Dump the contents of all registers present 1103 * in AxiEthernet core. 1104 * @ndev: Pointer to net_device structure 1105 * @regs: Pointer to ethtool_regs structure 1106 * @ret: Void pointer used to return the contents of the registers. 1107 * 1108 * This implements ethtool command for getting the Axi Ethernet register dump. 1109 * Issue "ethtool -d ethX" to execute this function. 1110 */ 1111 static void axienet_ethtools_get_regs(struct net_device *ndev, 1112 struct ethtool_regs *regs, void *ret) 1113 { 1114 u32 *data = (u32 *) ret; 1115 size_t len = sizeof(u32) * AXIENET_REGS_N; 1116 struct axienet_local *lp = netdev_priv(ndev); 1117 1118 regs->version = 0; 1119 regs->len = len; 1120 1121 memset(data, 0, len); 1122 data[0] = axienet_ior(lp, XAE_RAF_OFFSET); 1123 data[1] = axienet_ior(lp, XAE_TPF_OFFSET); 1124 data[2] = axienet_ior(lp, XAE_IFGP_OFFSET); 1125 data[3] = axienet_ior(lp, XAE_IS_OFFSET); 1126 data[4] = axienet_ior(lp, XAE_IP_OFFSET); 1127 data[5] = axienet_ior(lp, XAE_IE_OFFSET); 1128 data[6] = axienet_ior(lp, XAE_TTAG_OFFSET); 1129 data[7] = axienet_ior(lp, XAE_RTAG_OFFSET); 1130 data[8] = axienet_ior(lp, XAE_UAWL_OFFSET); 1131 data[9] = axienet_ior(lp, XAE_UAWU_OFFSET); 1132 data[10] = axienet_ior(lp, XAE_TPID0_OFFSET); 1133 data[11] = axienet_ior(lp, XAE_TPID1_OFFSET); 1134 data[12] = axienet_ior(lp, XAE_PPST_OFFSET); 1135 data[13] = axienet_ior(lp, XAE_RCW0_OFFSET); 1136 data[14] = axienet_ior(lp, XAE_RCW1_OFFSET); 1137 data[15] = axienet_ior(lp, XAE_TC_OFFSET); 1138 data[16] = axienet_ior(lp, XAE_FCC_OFFSET); 1139 data[17] = axienet_ior(lp, XAE_EMMC_OFFSET); 1140 data[18] = axienet_ior(lp, XAE_PHYC_OFFSET); 1141 data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET); 1142 data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET); 1143 data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET); 1144 data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET); 1145 data[23] = axienet_ior(lp, XAE_MDIO_MIS_OFFSET); 1146 data[24] = axienet_ior(lp, XAE_MDIO_MIP_OFFSET); 1147 data[25] = axienet_ior(lp, XAE_MDIO_MIE_OFFSET); 1148 data[26] = axienet_ior(lp, XAE_MDIO_MIC_OFFSET); 1149 data[27] = axienet_ior(lp, XAE_UAW0_OFFSET); 1150 data[28] = axienet_ior(lp, XAE_UAW1_OFFSET); 1151 data[29] = axienet_ior(lp, XAE_FMI_OFFSET); 1152 data[30] = axienet_ior(lp, XAE_AF0_OFFSET); 1153 data[31] = axienet_ior(lp, XAE_AF1_OFFSET); 1154 } 1155 1156 /** 1157 * axienet_ethtools_get_pauseparam - Get the pause parameter setting for 1158 * Tx and Rx paths. 1159 * @ndev: Pointer to net_device structure 1160 * @epauseparm: Pointer to ethtool_pauseparam structure. 1161 * 1162 * This implements ethtool command for getting axi ethernet pause frame 1163 * setting. Issue "ethtool -a ethX" to execute this function. 1164 */ 1165 static void 1166 axienet_ethtools_get_pauseparam(struct net_device *ndev, 1167 struct ethtool_pauseparam *epauseparm) 1168 { 1169 u32 regval; 1170 struct axienet_local *lp = netdev_priv(ndev); 1171 epauseparm->autoneg = 0; 1172 regval = axienet_ior(lp, XAE_FCC_OFFSET); 1173 epauseparm->tx_pause = regval & XAE_FCC_FCTX_MASK; 1174 epauseparm->rx_pause = regval & XAE_FCC_FCRX_MASK; 1175 } 1176 1177 /** 1178 * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control) 1179 * settings. 1180 * @ndev: Pointer to net_device structure 1181 * @epauseparm:Pointer to ethtool_pauseparam structure 1182 * 1183 * This implements ethtool command for enabling flow control on Rx and Tx 1184 * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this 1185 * function. 1186 * 1187 * Return: 0 on success, -EFAULT if device is running 1188 */ 1189 static int 1190 axienet_ethtools_set_pauseparam(struct net_device *ndev, 1191 struct ethtool_pauseparam *epauseparm) 1192 { 1193 u32 regval = 0; 1194 struct axienet_local *lp = netdev_priv(ndev); 1195 1196 if (netif_running(ndev)) { 1197 netdev_err(ndev, 1198 "Please stop netif before applying configuration\n"); 1199 return -EFAULT; 1200 } 1201 1202 regval = axienet_ior(lp, XAE_FCC_OFFSET); 1203 if (epauseparm->tx_pause) 1204 regval |= XAE_FCC_FCTX_MASK; 1205 else 1206 regval &= ~XAE_FCC_FCTX_MASK; 1207 if (epauseparm->rx_pause) 1208 regval |= XAE_FCC_FCRX_MASK; 1209 else 1210 regval &= ~XAE_FCC_FCRX_MASK; 1211 axienet_iow(lp, XAE_FCC_OFFSET, regval); 1212 1213 return 0; 1214 } 1215 1216 /** 1217 * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count. 1218 * @ndev: Pointer to net_device structure 1219 * @ecoalesce: Pointer to ethtool_coalesce structure 1220 * 1221 * This implements ethtool command for getting the DMA interrupt coalescing 1222 * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to 1223 * execute this function. 1224 * 1225 * Return: 0 always 1226 */ 1227 static int axienet_ethtools_get_coalesce(struct net_device *ndev, 1228 struct ethtool_coalesce *ecoalesce) 1229 { 1230 u32 regval = 0; 1231 struct axienet_local *lp = netdev_priv(ndev); 1232 regval = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 1233 ecoalesce->rx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK) 1234 >> XAXIDMA_COALESCE_SHIFT; 1235 regval = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 1236 ecoalesce->tx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK) 1237 >> XAXIDMA_COALESCE_SHIFT; 1238 return 0; 1239 } 1240 1241 /** 1242 * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count. 1243 * @ndev: Pointer to net_device structure 1244 * @ecoalesce: Pointer to ethtool_coalesce structure 1245 * 1246 * This implements ethtool command for setting the DMA interrupt coalescing 1247 * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux 1248 * prompt to execute this function. 1249 * 1250 * Return: 0, on success, Non-zero error value on failure. 1251 */ 1252 static int axienet_ethtools_set_coalesce(struct net_device *ndev, 1253 struct ethtool_coalesce *ecoalesce) 1254 { 1255 struct axienet_local *lp = netdev_priv(ndev); 1256 1257 if (netif_running(ndev)) { 1258 netdev_err(ndev, 1259 "Please stop netif before applying configuration\n"); 1260 return -EFAULT; 1261 } 1262 1263 if ((ecoalesce->rx_coalesce_usecs) || 1264 (ecoalesce->rx_coalesce_usecs_irq) || 1265 (ecoalesce->rx_max_coalesced_frames_irq) || 1266 (ecoalesce->tx_coalesce_usecs) || 1267 (ecoalesce->tx_coalesce_usecs_irq) || 1268 (ecoalesce->tx_max_coalesced_frames_irq) || 1269 (ecoalesce->stats_block_coalesce_usecs) || 1270 (ecoalesce->use_adaptive_rx_coalesce) || 1271 (ecoalesce->use_adaptive_tx_coalesce) || 1272 (ecoalesce->pkt_rate_low) || 1273 (ecoalesce->rx_coalesce_usecs_low) || 1274 (ecoalesce->rx_max_coalesced_frames_low) || 1275 (ecoalesce->tx_coalesce_usecs_low) || 1276 (ecoalesce->tx_max_coalesced_frames_low) || 1277 (ecoalesce->pkt_rate_high) || 1278 (ecoalesce->rx_coalesce_usecs_high) || 1279 (ecoalesce->rx_max_coalesced_frames_high) || 1280 (ecoalesce->tx_coalesce_usecs_high) || 1281 (ecoalesce->tx_max_coalesced_frames_high) || 1282 (ecoalesce->rate_sample_interval)) 1283 return -EOPNOTSUPP; 1284 if (ecoalesce->rx_max_coalesced_frames) 1285 lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames; 1286 if (ecoalesce->tx_max_coalesced_frames) 1287 lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames; 1288 1289 return 0; 1290 } 1291 1292 static const struct ethtool_ops axienet_ethtool_ops = { 1293 .get_drvinfo = axienet_ethtools_get_drvinfo, 1294 .get_regs_len = axienet_ethtools_get_regs_len, 1295 .get_regs = axienet_ethtools_get_regs, 1296 .get_link = ethtool_op_get_link, 1297 .get_pauseparam = axienet_ethtools_get_pauseparam, 1298 .set_pauseparam = axienet_ethtools_set_pauseparam, 1299 .get_coalesce = axienet_ethtools_get_coalesce, 1300 .set_coalesce = axienet_ethtools_set_coalesce, 1301 .get_link_ksettings = phy_ethtool_get_link_ksettings, 1302 .set_link_ksettings = phy_ethtool_set_link_ksettings, 1303 }; 1304 1305 /** 1306 * axienet_dma_err_handler - Tasklet handler for Axi DMA Error 1307 * @data: Data passed 1308 * 1309 * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the 1310 * Tx/Rx BDs. 1311 */ 1312 static void axienet_dma_err_handler(unsigned long data) 1313 { 1314 u32 axienet_status; 1315 u32 cr, i; 1316 int mdio_mcreg; 1317 struct axienet_local *lp = (struct axienet_local *) data; 1318 struct net_device *ndev = lp->ndev; 1319 struct axidma_bd *cur_p; 1320 1321 axienet_setoptions(ndev, lp->options & 1322 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 1323 mdio_mcreg = axienet_ior(lp, XAE_MDIO_MC_OFFSET); 1324 axienet_mdio_wait_until_ready(lp); 1325 /* Disable the MDIO interface till Axi Ethernet Reset is completed. 1326 * When we do an Axi Ethernet reset, it resets the complete core 1327 * including the MDIO. So if MDIO is not disabled when the reset 1328 * process is started, MDIO will be broken afterwards. 1329 */ 1330 axienet_iow(lp, XAE_MDIO_MC_OFFSET, (mdio_mcreg & 1331 ~XAE_MDIO_MC_MDIOEN_MASK)); 1332 1333 __axienet_device_reset(lp, XAXIDMA_TX_CR_OFFSET); 1334 __axienet_device_reset(lp, XAXIDMA_RX_CR_OFFSET); 1335 1336 axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg); 1337 axienet_mdio_wait_until_ready(lp); 1338 1339 for (i = 0; i < TX_BD_NUM; i++) { 1340 cur_p = &lp->tx_bd_v[i]; 1341 if (cur_p->phys) 1342 dma_unmap_single(ndev->dev.parent, cur_p->phys, 1343 (cur_p->cntrl & 1344 XAXIDMA_BD_CTRL_LENGTH_MASK), 1345 DMA_TO_DEVICE); 1346 if (cur_p->app4) 1347 dev_kfree_skb_irq((struct sk_buff *) cur_p->app4); 1348 cur_p->phys = 0; 1349 cur_p->cntrl = 0; 1350 cur_p->status = 0; 1351 cur_p->app0 = 0; 1352 cur_p->app1 = 0; 1353 cur_p->app2 = 0; 1354 cur_p->app3 = 0; 1355 cur_p->app4 = 0; 1356 cur_p->sw_id_offset = 0; 1357 } 1358 1359 for (i = 0; i < RX_BD_NUM; i++) { 1360 cur_p = &lp->rx_bd_v[i]; 1361 cur_p->status = 0; 1362 cur_p->app0 = 0; 1363 cur_p->app1 = 0; 1364 cur_p->app2 = 0; 1365 cur_p->app3 = 0; 1366 cur_p->app4 = 0; 1367 } 1368 1369 lp->tx_bd_ci = 0; 1370 lp->tx_bd_tail = 0; 1371 lp->rx_bd_ci = 0; 1372 1373 /* Start updating the Rx channel control register */ 1374 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 1375 /* Update the interrupt coalesce count */ 1376 cr = ((cr & ~XAXIDMA_COALESCE_MASK) | 1377 (XAXIDMA_DFT_RX_THRESHOLD << XAXIDMA_COALESCE_SHIFT)); 1378 /* Update the delay timer count */ 1379 cr = ((cr & ~XAXIDMA_DELAY_MASK) | 1380 (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); 1381 /* Enable coalesce, delay timer and error interrupts */ 1382 cr |= XAXIDMA_IRQ_ALL_MASK; 1383 /* Finally write to the Rx channel control register */ 1384 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 1385 1386 /* Start updating the Tx channel control register */ 1387 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 1388 /* Update the interrupt coalesce count */ 1389 cr = (((cr & ~XAXIDMA_COALESCE_MASK)) | 1390 (XAXIDMA_DFT_TX_THRESHOLD << XAXIDMA_COALESCE_SHIFT)); 1391 /* Update the delay timer count */ 1392 cr = (((cr & ~XAXIDMA_DELAY_MASK)) | 1393 (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); 1394 /* Enable coalesce, delay timer and error interrupts */ 1395 cr |= XAXIDMA_IRQ_ALL_MASK; 1396 /* Finally write to the Tx channel control register */ 1397 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 1398 1399 /* Populate the tail pointer and bring the Rx Axi DMA engine out of 1400 * halted state. This will make the Rx side ready for reception. 1401 */ 1402 axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); 1403 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 1404 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, 1405 cr | XAXIDMA_CR_RUNSTOP_MASK); 1406 axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + 1407 (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1))); 1408 1409 /* Write to the RS (Run-stop) bit in the Tx channel control register. 1410 * Tx channel is now ready to run. But only after we write to the 1411 * tail pointer register that the Tx channel will start transmitting 1412 */ 1413 axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); 1414 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 1415 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, 1416 cr | XAXIDMA_CR_RUNSTOP_MASK); 1417 1418 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); 1419 axienet_status &= ~XAE_RCW1_RX_MASK; 1420 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status); 1421 1422 axienet_status = axienet_ior(lp, XAE_IP_OFFSET); 1423 if (axienet_status & XAE_INT_RXRJECT_MASK) 1424 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK); 1425 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK); 1426 1427 /* Sync default options with HW but leave receiver and 1428 * transmitter disabled. 1429 */ 1430 axienet_setoptions(ndev, lp->options & 1431 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 1432 axienet_set_mac_address(ndev, NULL); 1433 axienet_set_multicast_list(ndev); 1434 axienet_setoptions(ndev, lp->options); 1435 } 1436 1437 /** 1438 * axienet_probe - Axi Ethernet probe function. 1439 * @pdev: Pointer to platform device structure. 1440 * 1441 * Return: 0, on success 1442 * Non-zero error value on failure. 1443 * 1444 * This is the probe routine for Axi Ethernet driver. This is called before 1445 * any other driver routines are invoked. It allocates and sets up the Ethernet 1446 * device. Parses through device tree and populates fields of 1447 * axienet_local. It registers the Ethernet device. 1448 */ 1449 static int axienet_probe(struct platform_device *pdev) 1450 { 1451 int ret; 1452 struct device_node *np; 1453 struct axienet_local *lp; 1454 struct net_device *ndev; 1455 const void *mac_addr; 1456 struct resource *ethres, dmares; 1457 u32 value; 1458 1459 ndev = alloc_etherdev(sizeof(*lp)); 1460 if (!ndev) 1461 return -ENOMEM; 1462 1463 platform_set_drvdata(pdev, ndev); 1464 1465 SET_NETDEV_DEV(ndev, &pdev->dev); 1466 ndev->flags &= ~IFF_MULTICAST; /* clear multicast */ 1467 ndev->features = NETIF_F_SG; 1468 ndev->netdev_ops = &axienet_netdev_ops; 1469 ndev->ethtool_ops = &axienet_ethtool_ops; 1470 1471 /* MTU range: 64 - 9000 */ 1472 ndev->min_mtu = 64; 1473 ndev->max_mtu = XAE_JUMBO_MTU; 1474 1475 lp = netdev_priv(ndev); 1476 lp->ndev = ndev; 1477 lp->dev = &pdev->dev; 1478 lp->options = XAE_OPTION_DEFAULTS; 1479 /* Map device registers */ 1480 ethres = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1481 lp->regs = devm_ioremap_resource(&pdev->dev, ethres); 1482 if (IS_ERR(lp->regs)) { 1483 dev_err(&pdev->dev, "could not map Axi Ethernet regs.\n"); 1484 ret = PTR_ERR(lp->regs); 1485 goto free_netdev; 1486 } 1487 1488 /* Setup checksum offload, but default to off if not specified */ 1489 lp->features = 0; 1490 1491 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,txcsum", &value); 1492 if (!ret) { 1493 switch (value) { 1494 case 1: 1495 lp->csum_offload_on_tx_path = 1496 XAE_FEATURE_PARTIAL_TX_CSUM; 1497 lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM; 1498 /* Can checksum TCP/UDP over IPv4. */ 1499 ndev->features |= NETIF_F_IP_CSUM; 1500 break; 1501 case 2: 1502 lp->csum_offload_on_tx_path = 1503 XAE_FEATURE_FULL_TX_CSUM; 1504 lp->features |= XAE_FEATURE_FULL_TX_CSUM; 1505 /* Can checksum TCP/UDP over IPv4. */ 1506 ndev->features |= NETIF_F_IP_CSUM; 1507 break; 1508 default: 1509 lp->csum_offload_on_tx_path = XAE_NO_CSUM_OFFLOAD; 1510 } 1511 } 1512 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value); 1513 if (!ret) { 1514 switch (value) { 1515 case 1: 1516 lp->csum_offload_on_rx_path = 1517 XAE_FEATURE_PARTIAL_RX_CSUM; 1518 lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM; 1519 break; 1520 case 2: 1521 lp->csum_offload_on_rx_path = 1522 XAE_FEATURE_FULL_RX_CSUM; 1523 lp->features |= XAE_FEATURE_FULL_RX_CSUM; 1524 break; 1525 default: 1526 lp->csum_offload_on_rx_path = XAE_NO_CSUM_OFFLOAD; 1527 } 1528 } 1529 /* For supporting jumbo frames, the Axi Ethernet hardware must have 1530 * a larger Rx/Tx Memory. Typically, the size must be large so that 1531 * we can enable jumbo option and start supporting jumbo frames. 1532 * Here we check for memory allocated for Rx/Tx in the hardware from 1533 * the device-tree and accordingly set flags. 1534 */ 1535 of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem); 1536 1537 /* Start with the proprietary, and broken phy_type */ 1538 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &value); 1539 if (!ret) { 1540 netdev_warn(ndev, "Please upgrade your device tree binary blob to use phy-mode"); 1541 switch (value) { 1542 case XAE_PHY_TYPE_MII: 1543 lp->phy_mode = PHY_INTERFACE_MODE_MII; 1544 break; 1545 case XAE_PHY_TYPE_GMII: 1546 lp->phy_mode = PHY_INTERFACE_MODE_GMII; 1547 break; 1548 case XAE_PHY_TYPE_RGMII_2_0: 1549 lp->phy_mode = PHY_INTERFACE_MODE_RGMII_ID; 1550 break; 1551 case XAE_PHY_TYPE_SGMII: 1552 lp->phy_mode = PHY_INTERFACE_MODE_SGMII; 1553 break; 1554 case XAE_PHY_TYPE_1000BASE_X: 1555 lp->phy_mode = PHY_INTERFACE_MODE_1000BASEX; 1556 break; 1557 default: 1558 ret = -EINVAL; 1559 goto free_netdev; 1560 } 1561 } else { 1562 lp->phy_mode = of_get_phy_mode(pdev->dev.of_node); 1563 if (lp->phy_mode < 0) { 1564 ret = -EINVAL; 1565 goto free_netdev; 1566 } 1567 } 1568 1569 /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */ 1570 np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0); 1571 if (!np) { 1572 dev_err(&pdev->dev, "could not find DMA node\n"); 1573 ret = -ENODEV; 1574 goto free_netdev; 1575 } 1576 ret = of_address_to_resource(np, 0, &dmares); 1577 if (ret) { 1578 dev_err(&pdev->dev, "unable to get DMA resource\n"); 1579 of_node_put(np); 1580 goto free_netdev; 1581 } 1582 lp->dma_regs = devm_ioremap_resource(&pdev->dev, &dmares); 1583 if (IS_ERR(lp->dma_regs)) { 1584 dev_err(&pdev->dev, "could not map DMA regs\n"); 1585 ret = PTR_ERR(lp->dma_regs); 1586 of_node_put(np); 1587 goto free_netdev; 1588 } 1589 lp->rx_irq = irq_of_parse_and_map(np, 1); 1590 lp->tx_irq = irq_of_parse_and_map(np, 0); 1591 of_node_put(np); 1592 if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) { 1593 dev_err(&pdev->dev, "could not determine irqs\n"); 1594 ret = -ENOMEM; 1595 goto free_netdev; 1596 } 1597 1598 /* Retrieve the MAC address */ 1599 mac_addr = of_get_mac_address(pdev->dev.of_node); 1600 if (IS_ERR(mac_addr)) { 1601 dev_err(&pdev->dev, "could not find MAC address\n"); 1602 goto free_netdev; 1603 } 1604 axienet_set_mac_address(ndev, mac_addr); 1605 1606 lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD; 1607 lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD; 1608 1609 lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); 1610 if (lp->phy_node) { 1611 ret = axienet_mdio_setup(lp, pdev->dev.of_node); 1612 if (ret) 1613 dev_warn(&pdev->dev, "error registering MDIO bus\n"); 1614 } 1615 1616 ret = register_netdev(lp->ndev); 1617 if (ret) { 1618 dev_err(lp->dev, "register_netdev() error (%i)\n", ret); 1619 goto free_netdev; 1620 } 1621 1622 return 0; 1623 1624 free_netdev: 1625 free_netdev(ndev); 1626 1627 return ret; 1628 } 1629 1630 static int axienet_remove(struct platform_device *pdev) 1631 { 1632 struct net_device *ndev = platform_get_drvdata(pdev); 1633 struct axienet_local *lp = netdev_priv(ndev); 1634 1635 axienet_mdio_teardown(lp); 1636 unregister_netdev(ndev); 1637 1638 of_node_put(lp->phy_node); 1639 lp->phy_node = NULL; 1640 1641 free_netdev(ndev); 1642 1643 return 0; 1644 } 1645 1646 static struct platform_driver axienet_driver = { 1647 .probe = axienet_probe, 1648 .remove = axienet_remove, 1649 .driver = { 1650 .name = "xilinx_axienet", 1651 .of_match_table = axienet_of_match, 1652 }, 1653 }; 1654 1655 module_platform_driver(axienet_driver); 1656 1657 MODULE_DESCRIPTION("Xilinx Axi Ethernet driver"); 1658 MODULE_AUTHOR("Xilinx"); 1659 MODULE_LICENSE("GPL"); 1660