1 /* 2 * Xilinx Axi Ethernet device driver 3 * 4 * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi 5 * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net> 6 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd. 7 * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu> 8 * Copyright (c) 2010 - 2011 PetaLogix 9 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved. 10 * 11 * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6 12 * and Spartan6. 13 * 14 * TODO: 15 * - Add Axi Fifo support. 16 * - Factor out Axi DMA code into separate driver. 17 * - Test and fix basic multicast filtering. 18 * - Add support for extended multicast filtering. 19 * - Test basic VLAN support. 20 * - Add support for extended VLAN support. 21 */ 22 23 #include <linux/delay.h> 24 #include <linux/etherdevice.h> 25 #include <linux/module.h> 26 #include <linux/netdevice.h> 27 #include <linux/of_mdio.h> 28 #include <linux/of_platform.h> 29 #include <linux/of_address.h> 30 #include <linux/skbuff.h> 31 #include <linux/spinlock.h> 32 #include <linux/phy.h> 33 #include <linux/mii.h> 34 #include <linux/ethtool.h> 35 36 #include "xilinx_axienet.h" 37 38 /* Descriptors defines for Tx and Rx DMA - 2^n for the best performance */ 39 #define TX_BD_NUM 64 40 #define RX_BD_NUM 128 41 42 /* Must be shorter than length of ethtool_drvinfo.driver field to fit */ 43 #define DRIVER_NAME "xaxienet" 44 #define DRIVER_DESCRIPTION "Xilinx Axi Ethernet driver" 45 #define DRIVER_VERSION "1.00a" 46 47 #define AXIENET_REGS_N 32 48 49 /* Match table for of_platform binding */ 50 static struct of_device_id axienet_of_match[] = { 51 { .compatible = "xlnx,axi-ethernet-1.00.a", }, 52 { .compatible = "xlnx,axi-ethernet-1.01.a", }, 53 { .compatible = "xlnx,axi-ethernet-2.01.a", }, 54 {}, 55 }; 56 57 MODULE_DEVICE_TABLE(of, axienet_of_match); 58 59 /* Option table for setting up Axi Ethernet hardware options */ 60 static struct axienet_option axienet_options[] = { 61 /* Turn on jumbo packet support for both Rx and Tx */ 62 { 63 .opt = XAE_OPTION_JUMBO, 64 .reg = XAE_TC_OFFSET, 65 .m_or = XAE_TC_JUM_MASK, 66 }, { 67 .opt = XAE_OPTION_JUMBO, 68 .reg = XAE_RCW1_OFFSET, 69 .m_or = XAE_RCW1_JUM_MASK, 70 }, { /* Turn on VLAN packet support for both Rx and Tx */ 71 .opt = XAE_OPTION_VLAN, 72 .reg = XAE_TC_OFFSET, 73 .m_or = XAE_TC_VLAN_MASK, 74 }, { 75 .opt = XAE_OPTION_VLAN, 76 .reg = XAE_RCW1_OFFSET, 77 .m_or = XAE_RCW1_VLAN_MASK, 78 }, { /* Turn on FCS stripping on receive packets */ 79 .opt = XAE_OPTION_FCS_STRIP, 80 .reg = XAE_RCW1_OFFSET, 81 .m_or = XAE_RCW1_FCS_MASK, 82 }, { /* Turn on FCS insertion on transmit packets */ 83 .opt = XAE_OPTION_FCS_INSERT, 84 .reg = XAE_TC_OFFSET, 85 .m_or = XAE_TC_FCS_MASK, 86 }, { /* Turn off length/type field checking on receive packets */ 87 .opt = XAE_OPTION_LENTYPE_ERR, 88 .reg = XAE_RCW1_OFFSET, 89 .m_or = XAE_RCW1_LT_DIS_MASK, 90 }, { /* Turn on Rx flow control */ 91 .opt = XAE_OPTION_FLOW_CONTROL, 92 .reg = XAE_FCC_OFFSET, 93 .m_or = XAE_FCC_FCRX_MASK, 94 }, { /* Turn on Tx flow control */ 95 .opt = XAE_OPTION_FLOW_CONTROL, 96 .reg = XAE_FCC_OFFSET, 97 .m_or = XAE_FCC_FCTX_MASK, 98 }, { /* Turn on promiscuous frame filtering */ 99 .opt = XAE_OPTION_PROMISC, 100 .reg = XAE_FMI_OFFSET, 101 .m_or = XAE_FMI_PM_MASK, 102 }, { /* Enable transmitter */ 103 .opt = XAE_OPTION_TXEN, 104 .reg = XAE_TC_OFFSET, 105 .m_or = XAE_TC_TX_MASK, 106 }, { /* Enable receiver */ 107 .opt = XAE_OPTION_RXEN, 108 .reg = XAE_RCW1_OFFSET, 109 .m_or = XAE_RCW1_RX_MASK, 110 }, 111 {} 112 }; 113 114 /** 115 * axienet_dma_in32 - Memory mapped Axi DMA register read 116 * @lp: Pointer to axienet local structure 117 * @reg: Address offset from the base address of the Axi DMA core 118 * 119 * returns: The contents of the Axi DMA register 120 * 121 * This function returns the contents of the corresponding Axi DMA register. 122 */ 123 static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg) 124 { 125 return in_be32(lp->dma_regs + reg); 126 } 127 128 /** 129 * axienet_dma_out32 - Memory mapped Axi DMA register write. 130 * @lp: Pointer to axienet local structure 131 * @reg: Address offset from the base address of the Axi DMA core 132 * @value: Value to be written into the Axi DMA register 133 * 134 * This function writes the desired value into the corresponding Axi DMA 135 * register. 136 */ 137 static inline void axienet_dma_out32(struct axienet_local *lp, 138 off_t reg, u32 value) 139 { 140 out_be32((lp->dma_regs + reg), value); 141 } 142 143 /** 144 * axienet_dma_bd_release - Release buffer descriptor rings 145 * @ndev: Pointer to the net_device structure 146 * 147 * This function is used to release the descriptors allocated in 148 * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet 149 * driver stop api is called. 150 */ 151 static void axienet_dma_bd_release(struct net_device *ndev) 152 { 153 int i; 154 struct axienet_local *lp = netdev_priv(ndev); 155 156 for (i = 0; i < RX_BD_NUM; i++) { 157 dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys, 158 lp->max_frm_size, DMA_FROM_DEVICE); 159 dev_kfree_skb((struct sk_buff *) 160 (lp->rx_bd_v[i].sw_id_offset)); 161 } 162 163 if (lp->rx_bd_v) { 164 dma_free_coherent(ndev->dev.parent, 165 sizeof(*lp->rx_bd_v) * RX_BD_NUM, 166 lp->rx_bd_v, 167 lp->rx_bd_p); 168 } 169 if (lp->tx_bd_v) { 170 dma_free_coherent(ndev->dev.parent, 171 sizeof(*lp->tx_bd_v) * TX_BD_NUM, 172 lp->tx_bd_v, 173 lp->tx_bd_p); 174 } 175 } 176 177 /** 178 * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA 179 * @ndev: Pointer to the net_device structure 180 * 181 * returns: 0, on success 182 * -ENOMEM, on failure 183 * 184 * This function is called to initialize the Rx and Tx DMA descriptor 185 * rings. This initializes the descriptors with required default values 186 * and is called when Axi Ethernet driver reset is called. 187 */ 188 static int axienet_dma_bd_init(struct net_device *ndev) 189 { 190 u32 cr; 191 int i; 192 struct sk_buff *skb; 193 struct axienet_local *lp = netdev_priv(ndev); 194 195 /* Reset the indexes which are used for accessing the BDs */ 196 lp->tx_bd_ci = 0; 197 lp->tx_bd_tail = 0; 198 lp->rx_bd_ci = 0; 199 200 /* 201 * Allocate the Tx and Rx buffer descriptors. 202 */ 203 lp->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent, 204 sizeof(*lp->tx_bd_v) * TX_BD_NUM, 205 &lp->tx_bd_p, GFP_KERNEL); 206 if (!lp->tx_bd_v) 207 goto out; 208 209 lp->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent, 210 sizeof(*lp->rx_bd_v) * RX_BD_NUM, 211 &lp->rx_bd_p, GFP_KERNEL); 212 if (!lp->rx_bd_v) 213 goto out; 214 215 for (i = 0; i < TX_BD_NUM; i++) { 216 lp->tx_bd_v[i].next = lp->tx_bd_p + 217 sizeof(*lp->tx_bd_v) * 218 ((i + 1) % TX_BD_NUM); 219 } 220 221 for (i = 0; i < RX_BD_NUM; i++) { 222 lp->rx_bd_v[i].next = lp->rx_bd_p + 223 sizeof(*lp->rx_bd_v) * 224 ((i + 1) % RX_BD_NUM); 225 226 skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size); 227 if (!skb) 228 goto out; 229 230 lp->rx_bd_v[i].sw_id_offset = (u32) skb; 231 lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent, 232 skb->data, 233 lp->max_frm_size, 234 DMA_FROM_DEVICE); 235 lp->rx_bd_v[i].cntrl = lp->max_frm_size; 236 } 237 238 /* Start updating the Rx channel control register */ 239 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 240 /* Update the interrupt coalesce count */ 241 cr = ((cr & ~XAXIDMA_COALESCE_MASK) | 242 ((lp->coalesce_count_rx) << XAXIDMA_COALESCE_SHIFT)); 243 /* Update the delay timer count */ 244 cr = ((cr & ~XAXIDMA_DELAY_MASK) | 245 (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); 246 /* Enable coalesce, delay timer and error interrupts */ 247 cr |= XAXIDMA_IRQ_ALL_MASK; 248 /* Write to the Rx channel control register */ 249 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 250 251 /* Start updating the Tx channel control register */ 252 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 253 /* Update the interrupt coalesce count */ 254 cr = (((cr & ~XAXIDMA_COALESCE_MASK)) | 255 ((lp->coalesce_count_tx) << XAXIDMA_COALESCE_SHIFT)); 256 /* Update the delay timer count */ 257 cr = (((cr & ~XAXIDMA_DELAY_MASK)) | 258 (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); 259 /* Enable coalesce, delay timer and error interrupts */ 260 cr |= XAXIDMA_IRQ_ALL_MASK; 261 /* Write to the Tx channel control register */ 262 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 263 264 /* Populate the tail pointer and bring the Rx Axi DMA engine out of 265 * halted state. This will make the Rx side ready for reception.*/ 266 axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); 267 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 268 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, 269 cr | XAXIDMA_CR_RUNSTOP_MASK); 270 axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + 271 (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1))); 272 273 /* Write to the RS (Run-stop) bit in the Tx channel control register. 274 * Tx channel is now ready to run. But only after we write to the 275 * tail pointer register that the Tx channel will start transmitting */ 276 axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); 277 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 278 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, 279 cr | XAXIDMA_CR_RUNSTOP_MASK); 280 281 return 0; 282 out: 283 axienet_dma_bd_release(ndev); 284 return -ENOMEM; 285 } 286 287 /** 288 * axienet_set_mac_address - Write the MAC address 289 * @ndev: Pointer to the net_device structure 290 * @address: 6 byte Address to be written as MAC address 291 * 292 * This function is called to initialize the MAC address of the Axi Ethernet 293 * core. It writes to the UAW0 and UAW1 registers of the core. 294 */ 295 static void axienet_set_mac_address(struct net_device *ndev, void *address) 296 { 297 struct axienet_local *lp = netdev_priv(ndev); 298 299 if (address) 300 memcpy(ndev->dev_addr, address, ETH_ALEN); 301 if (!is_valid_ether_addr(ndev->dev_addr)) 302 eth_random_addr(ndev->dev_addr); 303 304 /* Set up unicast MAC address filter set its mac address */ 305 axienet_iow(lp, XAE_UAW0_OFFSET, 306 (ndev->dev_addr[0]) | 307 (ndev->dev_addr[1] << 8) | 308 (ndev->dev_addr[2] << 16) | 309 (ndev->dev_addr[3] << 24)); 310 axienet_iow(lp, XAE_UAW1_OFFSET, 311 (((axienet_ior(lp, XAE_UAW1_OFFSET)) & 312 ~XAE_UAW1_UNICASTADDR_MASK) | 313 (ndev->dev_addr[4] | 314 (ndev->dev_addr[5] << 8)))); 315 } 316 317 /** 318 * netdev_set_mac_address - Write the MAC address (from outside the driver) 319 * @ndev: Pointer to the net_device structure 320 * @p: 6 byte Address to be written as MAC address 321 * 322 * returns: 0 for all conditions. Presently, there is no failure case. 323 * 324 * This function is called to initialize the MAC address of the Axi Ethernet 325 * core. It calls the core specific axienet_set_mac_address. This is the 326 * function that goes into net_device_ops structure entry ndo_set_mac_address. 327 */ 328 static int netdev_set_mac_address(struct net_device *ndev, void *p) 329 { 330 struct sockaddr *addr = p; 331 axienet_set_mac_address(ndev, addr->sa_data); 332 return 0; 333 } 334 335 /** 336 * axienet_set_multicast_list - Prepare the multicast table 337 * @ndev: Pointer to the net_device structure 338 * 339 * This function is called to initialize the multicast table during 340 * initialization. The Axi Ethernet basic multicast support has a four-entry 341 * multicast table which is initialized here. Additionally this function 342 * goes into the net_device_ops structure entry ndo_set_multicast_list. This 343 * means whenever the multicast table entries need to be updated this 344 * function gets called. 345 */ 346 static void axienet_set_multicast_list(struct net_device *ndev) 347 { 348 int i; 349 u32 reg, af0reg, af1reg; 350 struct axienet_local *lp = netdev_priv(ndev); 351 352 if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) || 353 netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) { 354 /* We must make the kernel realize we had to move into 355 * promiscuous mode. If it was a promiscuous mode request 356 * the flag is already set. If not we set it. */ 357 ndev->flags |= IFF_PROMISC; 358 reg = axienet_ior(lp, XAE_FMI_OFFSET); 359 reg |= XAE_FMI_PM_MASK; 360 axienet_iow(lp, XAE_FMI_OFFSET, reg); 361 dev_info(&ndev->dev, "Promiscuous mode enabled.\n"); 362 } else if (!netdev_mc_empty(ndev)) { 363 struct netdev_hw_addr *ha; 364 365 i = 0; 366 netdev_for_each_mc_addr(ha, ndev) { 367 if (i >= XAE_MULTICAST_CAM_TABLE_NUM) 368 break; 369 370 af0reg = (ha->addr[0]); 371 af0reg |= (ha->addr[1] << 8); 372 af0reg |= (ha->addr[2] << 16); 373 af0reg |= (ha->addr[3] << 24); 374 375 af1reg = (ha->addr[4]); 376 af1reg |= (ha->addr[5] << 8); 377 378 reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00; 379 reg |= i; 380 381 axienet_iow(lp, XAE_FMI_OFFSET, reg); 382 axienet_iow(lp, XAE_AF0_OFFSET, af0reg); 383 axienet_iow(lp, XAE_AF1_OFFSET, af1reg); 384 i++; 385 } 386 } else { 387 reg = axienet_ior(lp, XAE_FMI_OFFSET); 388 reg &= ~XAE_FMI_PM_MASK; 389 390 axienet_iow(lp, XAE_FMI_OFFSET, reg); 391 392 for (i = 0; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) { 393 reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00; 394 reg |= i; 395 396 axienet_iow(lp, XAE_FMI_OFFSET, reg); 397 axienet_iow(lp, XAE_AF0_OFFSET, 0); 398 axienet_iow(lp, XAE_AF1_OFFSET, 0); 399 } 400 401 dev_info(&ndev->dev, "Promiscuous mode disabled.\n"); 402 } 403 } 404 405 /** 406 * axienet_setoptions - Set an Axi Ethernet option 407 * @ndev: Pointer to the net_device structure 408 * @options: Option to be enabled/disabled 409 * 410 * The Axi Ethernet core has multiple features which can be selectively turned 411 * on or off. The typical options could be jumbo frame option, basic VLAN 412 * option, promiscuous mode option etc. This function is used to set or clear 413 * these options in the Axi Ethernet hardware. This is done through 414 * axienet_option structure . 415 */ 416 static void axienet_setoptions(struct net_device *ndev, u32 options) 417 { 418 int reg; 419 struct axienet_local *lp = netdev_priv(ndev); 420 struct axienet_option *tp = &axienet_options[0]; 421 422 while (tp->opt) { 423 reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or)); 424 if (options & tp->opt) 425 reg |= tp->m_or; 426 axienet_iow(lp, tp->reg, reg); 427 tp++; 428 } 429 430 lp->options |= options; 431 } 432 433 static void __axienet_device_reset(struct axienet_local *lp, 434 struct device *dev, off_t offset) 435 { 436 u32 timeout; 437 /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset 438 * process of Axi DMA takes a while to complete as all pending 439 * commands/transfers will be flushed or completed during this 440 * reset process. */ 441 axienet_dma_out32(lp, offset, XAXIDMA_CR_RESET_MASK); 442 timeout = DELAY_OF_ONE_MILLISEC; 443 while (axienet_dma_in32(lp, offset) & XAXIDMA_CR_RESET_MASK) { 444 udelay(1); 445 if (--timeout == 0) { 446 dev_err(dev, "axienet_device_reset DMA " 447 "reset timeout!\n"); 448 break; 449 } 450 } 451 } 452 453 /** 454 * axienet_device_reset - Reset and initialize the Axi Ethernet hardware. 455 * @ndev: Pointer to the net_device structure 456 * 457 * This function is called to reset and initialize the Axi Ethernet core. This 458 * is typically called during initialization. It does a reset of the Axi DMA 459 * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines 460 * areconnected to Axi Ethernet reset lines, this in turn resets the Axi 461 * Ethernet core. No separate hardware reset is done for the Axi Ethernet 462 * core. 463 */ 464 static void axienet_device_reset(struct net_device *ndev) 465 { 466 u32 axienet_status; 467 struct axienet_local *lp = netdev_priv(ndev); 468 469 __axienet_device_reset(lp, &ndev->dev, XAXIDMA_TX_CR_OFFSET); 470 __axienet_device_reset(lp, &ndev->dev, XAXIDMA_RX_CR_OFFSET); 471 472 lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE; 473 lp->options &= (~XAE_OPTION_JUMBO); 474 475 if ((ndev->mtu > XAE_MTU) && 476 (ndev->mtu <= XAE_JUMBO_MTU) && 477 (lp->jumbo_support)) { 478 lp->max_frm_size = ndev->mtu + XAE_HDR_VLAN_SIZE + 479 XAE_TRL_SIZE; 480 lp->options |= XAE_OPTION_JUMBO; 481 } 482 483 if (axienet_dma_bd_init(ndev)) { 484 dev_err(&ndev->dev, "axienet_device_reset descriptor " 485 "allocation failed\n"); 486 } 487 488 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); 489 axienet_status &= ~XAE_RCW1_RX_MASK; 490 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status); 491 492 axienet_status = axienet_ior(lp, XAE_IP_OFFSET); 493 if (axienet_status & XAE_INT_RXRJECT_MASK) 494 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK); 495 496 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK); 497 498 /* Sync default options with HW but leave receiver and 499 * transmitter disabled.*/ 500 axienet_setoptions(ndev, lp->options & 501 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 502 axienet_set_mac_address(ndev, NULL); 503 axienet_set_multicast_list(ndev); 504 axienet_setoptions(ndev, lp->options); 505 506 ndev->trans_start = jiffies; 507 } 508 509 /** 510 * axienet_adjust_link - Adjust the PHY link speed/duplex. 511 * @ndev: Pointer to the net_device structure 512 * 513 * This function is called to change the speed and duplex setting after 514 * auto negotiation is done by the PHY. This is the function that gets 515 * registered with the PHY interface through the "of_phy_connect" call. 516 */ 517 static void axienet_adjust_link(struct net_device *ndev) 518 { 519 u32 emmc_reg; 520 u32 link_state; 521 u32 setspeed = 1; 522 struct axienet_local *lp = netdev_priv(ndev); 523 struct phy_device *phy = lp->phy_dev; 524 525 link_state = phy->speed | (phy->duplex << 1) | phy->link; 526 if (lp->last_link != link_state) { 527 if ((phy->speed == SPEED_10) || (phy->speed == SPEED_100)) { 528 if (lp->phy_type == XAE_PHY_TYPE_1000BASE_X) 529 setspeed = 0; 530 } else { 531 if ((phy->speed == SPEED_1000) && 532 (lp->phy_type == XAE_PHY_TYPE_MII)) 533 setspeed = 0; 534 } 535 536 if (setspeed == 1) { 537 emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET); 538 emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK; 539 540 switch (phy->speed) { 541 case SPEED_1000: 542 emmc_reg |= XAE_EMMC_LINKSPD_1000; 543 break; 544 case SPEED_100: 545 emmc_reg |= XAE_EMMC_LINKSPD_100; 546 break; 547 case SPEED_10: 548 emmc_reg |= XAE_EMMC_LINKSPD_10; 549 break; 550 default: 551 dev_err(&ndev->dev, "Speed other than 10, 100 " 552 "or 1Gbps is not supported\n"); 553 break; 554 } 555 556 axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg); 557 lp->last_link = link_state; 558 phy_print_status(phy); 559 } else { 560 dev_err(&ndev->dev, "Error setting Axi Ethernet " 561 "mac speed\n"); 562 } 563 } 564 } 565 566 /** 567 * axienet_start_xmit_done - Invoked once a transmit is completed by the 568 * Axi DMA Tx channel. 569 * @ndev: Pointer to the net_device structure 570 * 571 * This function is invoked from the Axi DMA Tx isr to notify the completion 572 * of transmit operation. It clears fields in the corresponding Tx BDs and 573 * unmaps the corresponding buffer so that CPU can regain ownership of the 574 * buffer. It finally invokes "netif_wake_queue" to restart transmission if 575 * required. 576 */ 577 static void axienet_start_xmit_done(struct net_device *ndev) 578 { 579 u32 size = 0; 580 u32 packets = 0; 581 struct axienet_local *lp = netdev_priv(ndev); 582 struct axidma_bd *cur_p; 583 unsigned int status = 0; 584 585 cur_p = &lp->tx_bd_v[lp->tx_bd_ci]; 586 status = cur_p->status; 587 while (status & XAXIDMA_BD_STS_COMPLETE_MASK) { 588 dma_unmap_single(ndev->dev.parent, cur_p->phys, 589 (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK), 590 DMA_TO_DEVICE); 591 if (cur_p->app4) 592 dev_kfree_skb_irq((struct sk_buff *)cur_p->app4); 593 /*cur_p->phys = 0;*/ 594 cur_p->app0 = 0; 595 cur_p->app1 = 0; 596 cur_p->app2 = 0; 597 cur_p->app4 = 0; 598 cur_p->status = 0; 599 600 size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK; 601 packets++; 602 603 lp->tx_bd_ci = ++lp->tx_bd_ci % TX_BD_NUM; 604 cur_p = &lp->tx_bd_v[lp->tx_bd_ci]; 605 status = cur_p->status; 606 } 607 608 ndev->stats.tx_packets += packets; 609 ndev->stats.tx_bytes += size; 610 netif_wake_queue(ndev); 611 } 612 613 /** 614 * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy 615 * @lp: Pointer to the axienet_local structure 616 * @num_frag: The number of BDs to check for 617 * 618 * returns: 0, on success 619 * NETDEV_TX_BUSY, if any of the descriptors are not free 620 * 621 * This function is invoked before BDs are allocated and transmission starts. 622 * This function returns 0 if a BD or group of BDs can be allocated for 623 * transmission. If the BD or any of the BDs are not free the function 624 * returns a busy status. This is invoked from axienet_start_xmit. 625 */ 626 static inline int axienet_check_tx_bd_space(struct axienet_local *lp, 627 int num_frag) 628 { 629 struct axidma_bd *cur_p; 630 cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % TX_BD_NUM]; 631 if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK) 632 return NETDEV_TX_BUSY; 633 return 0; 634 } 635 636 /** 637 * axienet_start_xmit - Starts the transmission. 638 * @skb: sk_buff pointer that contains data to be Txed. 639 * @ndev: Pointer to net_device structure. 640 * 641 * returns: NETDEV_TX_OK, on success 642 * NETDEV_TX_BUSY, if any of the descriptors are not free 643 * 644 * This function is invoked from upper layers to initiate transmission. The 645 * function uses the next available free BDs and populates their fields to 646 * start the transmission. Additionally if checksum offloading is supported, 647 * it populates AXI Stream Control fields with appropriate values. 648 */ 649 static int axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) 650 { 651 u32 ii; 652 u32 num_frag; 653 u32 csum_start_off; 654 u32 csum_index_off; 655 skb_frag_t *frag; 656 dma_addr_t tail_p; 657 struct axienet_local *lp = netdev_priv(ndev); 658 struct axidma_bd *cur_p; 659 660 num_frag = skb_shinfo(skb)->nr_frags; 661 cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; 662 663 if (axienet_check_tx_bd_space(lp, num_frag)) { 664 if (!netif_queue_stopped(ndev)) 665 netif_stop_queue(ndev); 666 return NETDEV_TX_BUSY; 667 } 668 669 if (skb->ip_summed == CHECKSUM_PARTIAL) { 670 if (lp->features & XAE_FEATURE_FULL_TX_CSUM) { 671 /* Tx Full Checksum Offload Enabled */ 672 cur_p->app0 |= 2; 673 } else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) { 674 csum_start_off = skb_transport_offset(skb); 675 csum_index_off = csum_start_off + skb->csum_offset; 676 /* Tx Partial Checksum Offload Enabled */ 677 cur_p->app0 |= 1; 678 cur_p->app1 = (csum_start_off << 16) | csum_index_off; 679 } 680 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { 681 cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */ 682 } 683 684 cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK; 685 cur_p->phys = dma_map_single(ndev->dev.parent, skb->data, 686 skb_headlen(skb), DMA_TO_DEVICE); 687 688 for (ii = 0; ii < num_frag; ii++) { 689 lp->tx_bd_tail = ++lp->tx_bd_tail % TX_BD_NUM; 690 cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; 691 frag = &skb_shinfo(skb)->frags[ii]; 692 cur_p->phys = dma_map_single(ndev->dev.parent, 693 skb_frag_address(frag), 694 skb_frag_size(frag), 695 DMA_TO_DEVICE); 696 cur_p->cntrl = skb_frag_size(frag); 697 } 698 699 cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK; 700 cur_p->app4 = (unsigned long)skb; 701 702 tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail; 703 /* Start the transfer */ 704 axienet_dma_out32(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p); 705 lp->tx_bd_tail = ++lp->tx_bd_tail % TX_BD_NUM; 706 707 return NETDEV_TX_OK; 708 } 709 710 /** 711 * axienet_recv - Is called from Axi DMA Rx Isr to complete the received 712 * BD processing. 713 * @ndev: Pointer to net_device structure. 714 * 715 * This function is invoked from the Axi DMA Rx isr to process the Rx BDs. It 716 * does minimal processing and invokes "netif_rx" to complete further 717 * processing. 718 */ 719 static void axienet_recv(struct net_device *ndev) 720 { 721 u32 length; 722 u32 csumstatus; 723 u32 size = 0; 724 u32 packets = 0; 725 dma_addr_t tail_p; 726 struct axienet_local *lp = netdev_priv(ndev); 727 struct sk_buff *skb, *new_skb; 728 struct axidma_bd *cur_p; 729 730 tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci; 731 cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; 732 733 while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) { 734 skb = (struct sk_buff *) (cur_p->sw_id_offset); 735 length = cur_p->app4 & 0x0000FFFF; 736 737 dma_unmap_single(ndev->dev.parent, cur_p->phys, 738 lp->max_frm_size, 739 DMA_FROM_DEVICE); 740 741 skb_put(skb, length); 742 skb->protocol = eth_type_trans(skb, ndev); 743 /*skb_checksum_none_assert(skb);*/ 744 skb->ip_summed = CHECKSUM_NONE; 745 746 /* if we're doing Rx csum offload, set it up */ 747 if (lp->features & XAE_FEATURE_FULL_RX_CSUM) { 748 csumstatus = (cur_p->app2 & 749 XAE_FULL_CSUM_STATUS_MASK) >> 3; 750 if ((csumstatus == XAE_IP_TCP_CSUM_VALIDATED) || 751 (csumstatus == XAE_IP_UDP_CSUM_VALIDATED)) { 752 skb->ip_summed = CHECKSUM_UNNECESSARY; 753 } 754 } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 && 755 skb->protocol == __constant_htons(ETH_P_IP) && 756 skb->len > 64) { 757 skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF); 758 skb->ip_summed = CHECKSUM_COMPLETE; 759 } 760 761 netif_rx(skb); 762 763 size += length; 764 packets++; 765 766 new_skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size); 767 if (!new_skb) 768 return; 769 770 cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data, 771 lp->max_frm_size, 772 DMA_FROM_DEVICE); 773 cur_p->cntrl = lp->max_frm_size; 774 cur_p->status = 0; 775 cur_p->sw_id_offset = (u32) new_skb; 776 777 lp->rx_bd_ci = ++lp->rx_bd_ci % RX_BD_NUM; 778 cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; 779 } 780 781 ndev->stats.rx_packets += packets; 782 ndev->stats.rx_bytes += size; 783 784 axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p); 785 } 786 787 /** 788 * axienet_tx_irq - Tx Done Isr. 789 * @irq: irq number 790 * @_ndev: net_device pointer 791 * 792 * returns: IRQ_HANDLED for all cases. 793 * 794 * This is the Axi DMA Tx done Isr. It invokes "axienet_start_xmit_done" 795 * to complete the BD processing. 796 */ 797 static irqreturn_t axienet_tx_irq(int irq, void *_ndev) 798 { 799 u32 cr; 800 unsigned int status; 801 struct net_device *ndev = _ndev; 802 struct axienet_local *lp = netdev_priv(ndev); 803 804 status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 805 if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) { 806 axienet_start_xmit_done(lp->ndev); 807 goto out; 808 } 809 if (!(status & XAXIDMA_IRQ_ALL_MASK)) 810 dev_err(&ndev->dev, "No interrupts asserted in Tx path"); 811 if (status & XAXIDMA_IRQ_ERROR_MASK) { 812 dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status); 813 dev_err(&ndev->dev, "Current BD is at: 0x%x\n", 814 (lp->tx_bd_v[lp->tx_bd_ci]).phys); 815 816 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 817 /* Disable coalesce, delay timer and error interrupts */ 818 cr &= (~XAXIDMA_IRQ_ALL_MASK); 819 /* Write to the Tx channel control register */ 820 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 821 822 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 823 /* Disable coalesce, delay timer and error interrupts */ 824 cr &= (~XAXIDMA_IRQ_ALL_MASK); 825 /* Write to the Rx channel control register */ 826 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 827 828 tasklet_schedule(&lp->dma_err_tasklet); 829 } 830 out: 831 axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status); 832 return IRQ_HANDLED; 833 } 834 835 /** 836 * axienet_rx_irq - Rx Isr. 837 * @irq: irq number 838 * @_ndev: net_device pointer 839 * 840 * returns: IRQ_HANDLED for all cases. 841 * 842 * This is the Axi DMA Rx Isr. It invokes "axienet_recv" to complete the BD 843 * processing. 844 */ 845 static irqreturn_t axienet_rx_irq(int irq, void *_ndev) 846 { 847 u32 cr; 848 unsigned int status; 849 struct net_device *ndev = _ndev; 850 struct axienet_local *lp = netdev_priv(ndev); 851 852 status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 853 if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) { 854 axienet_recv(lp->ndev); 855 goto out; 856 } 857 if (!(status & XAXIDMA_IRQ_ALL_MASK)) 858 dev_err(&ndev->dev, "No interrupts asserted in Rx path"); 859 if (status & XAXIDMA_IRQ_ERROR_MASK) { 860 dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status); 861 dev_err(&ndev->dev, "Current BD is at: 0x%x\n", 862 (lp->rx_bd_v[lp->rx_bd_ci]).phys); 863 864 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 865 /* Disable coalesce, delay timer and error interrupts */ 866 cr &= (~XAXIDMA_IRQ_ALL_MASK); 867 /* Finally write to the Tx channel control register */ 868 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 869 870 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 871 /* Disable coalesce, delay timer and error interrupts */ 872 cr &= (~XAXIDMA_IRQ_ALL_MASK); 873 /* write to the Rx channel control register */ 874 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 875 876 tasklet_schedule(&lp->dma_err_tasklet); 877 } 878 out: 879 axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status); 880 return IRQ_HANDLED; 881 } 882 883 static void axienet_dma_err_handler(unsigned long data); 884 885 /** 886 * axienet_open - Driver open routine. 887 * @ndev: Pointer to net_device structure 888 * 889 * returns: 0, on success. 890 * -ENODEV, if PHY cannot be connected to 891 * non-zero error value on failure 892 * 893 * This is the driver open routine. It calls phy_start to start the PHY device. 894 * It also allocates interrupt service routines, enables the interrupt lines 895 * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer 896 * descriptors are initialized. 897 */ 898 static int axienet_open(struct net_device *ndev) 899 { 900 int ret, mdio_mcreg; 901 struct axienet_local *lp = netdev_priv(ndev); 902 903 dev_dbg(&ndev->dev, "axienet_open()\n"); 904 905 mdio_mcreg = axienet_ior(lp, XAE_MDIO_MC_OFFSET); 906 ret = axienet_mdio_wait_until_ready(lp); 907 if (ret < 0) 908 return ret; 909 /* Disable the MDIO interface till Axi Ethernet Reset is completed. 910 * When we do an Axi Ethernet reset, it resets the complete core 911 * including the MDIO. If MDIO is not disabled when the reset 912 * process is started, MDIO will be broken afterwards. */ 913 axienet_iow(lp, XAE_MDIO_MC_OFFSET, 914 (mdio_mcreg & (~XAE_MDIO_MC_MDIOEN_MASK))); 915 axienet_device_reset(ndev); 916 /* Enable the MDIO */ 917 axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg); 918 ret = axienet_mdio_wait_until_ready(lp); 919 if (ret < 0) 920 return ret; 921 922 if (lp->phy_node) { 923 lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node, 924 axienet_adjust_link, 0, 925 PHY_INTERFACE_MODE_GMII); 926 if (!lp->phy_dev) { 927 dev_err(lp->dev, "of_phy_connect() failed\n"); 928 return -ENODEV; 929 } 930 phy_start(lp->phy_dev); 931 } 932 933 /* Enable tasklets for Axi DMA error handling */ 934 tasklet_init(&lp->dma_err_tasklet, axienet_dma_err_handler, 935 (unsigned long) lp); 936 937 /* Enable interrupts for Axi DMA Tx */ 938 ret = request_irq(lp->tx_irq, axienet_tx_irq, 0, ndev->name, ndev); 939 if (ret) 940 goto err_tx_irq; 941 /* Enable interrupts for Axi DMA Rx */ 942 ret = request_irq(lp->rx_irq, axienet_rx_irq, 0, ndev->name, ndev); 943 if (ret) 944 goto err_rx_irq; 945 946 return 0; 947 948 err_rx_irq: 949 free_irq(lp->tx_irq, ndev); 950 err_tx_irq: 951 if (lp->phy_dev) 952 phy_disconnect(lp->phy_dev); 953 lp->phy_dev = NULL; 954 tasklet_kill(&lp->dma_err_tasklet); 955 dev_err(lp->dev, "request_irq() failed\n"); 956 return ret; 957 } 958 959 /** 960 * axienet_stop - Driver stop routine. 961 * @ndev: Pointer to net_device structure 962 * 963 * returns: 0, on success. 964 * 965 * This is the driver stop routine. It calls phy_disconnect to stop the PHY 966 * device. It also removes the interrupt handlers and disables the interrupts. 967 * The Axi DMA Tx/Rx BDs are released. 968 */ 969 static int axienet_stop(struct net_device *ndev) 970 { 971 u32 cr; 972 struct axienet_local *lp = netdev_priv(ndev); 973 974 dev_dbg(&ndev->dev, "axienet_close()\n"); 975 976 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 977 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, 978 cr & (~XAXIDMA_CR_RUNSTOP_MASK)); 979 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 980 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, 981 cr & (~XAXIDMA_CR_RUNSTOP_MASK)); 982 axienet_setoptions(ndev, lp->options & 983 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 984 985 tasklet_kill(&lp->dma_err_tasklet); 986 987 free_irq(lp->tx_irq, ndev); 988 free_irq(lp->rx_irq, ndev); 989 990 if (lp->phy_dev) 991 phy_disconnect(lp->phy_dev); 992 lp->phy_dev = NULL; 993 994 axienet_dma_bd_release(ndev); 995 return 0; 996 } 997 998 /** 999 * axienet_change_mtu - Driver change mtu routine. 1000 * @ndev: Pointer to net_device structure 1001 * @new_mtu: New mtu value to be applied 1002 * 1003 * returns: Always returns 0 (success). 1004 * 1005 * This is the change mtu driver routine. It checks if the Axi Ethernet 1006 * hardware supports jumbo frames before changing the mtu. This can be 1007 * called only when the device is not up. 1008 */ 1009 static int axienet_change_mtu(struct net_device *ndev, int new_mtu) 1010 { 1011 struct axienet_local *lp = netdev_priv(ndev); 1012 1013 if (netif_running(ndev)) 1014 return -EBUSY; 1015 if (lp->jumbo_support) { 1016 if ((new_mtu > XAE_JUMBO_MTU) || (new_mtu < 64)) 1017 return -EINVAL; 1018 ndev->mtu = new_mtu; 1019 } else { 1020 if ((new_mtu > XAE_MTU) || (new_mtu < 64)) 1021 return -EINVAL; 1022 ndev->mtu = new_mtu; 1023 } 1024 1025 return 0; 1026 } 1027 1028 #ifdef CONFIG_NET_POLL_CONTROLLER 1029 /** 1030 * axienet_poll_controller - Axi Ethernet poll mechanism. 1031 * @ndev: Pointer to net_device structure 1032 * 1033 * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior 1034 * to polling the ISRs and are enabled back after the polling is done. 1035 */ 1036 static void axienet_poll_controller(struct net_device *ndev) 1037 { 1038 struct axienet_local *lp = netdev_priv(ndev); 1039 disable_irq(lp->tx_irq); 1040 disable_irq(lp->rx_irq); 1041 axienet_rx_irq(lp->tx_irq, ndev); 1042 axienet_tx_irq(lp->rx_irq, ndev); 1043 enable_irq(lp->tx_irq); 1044 enable_irq(lp->rx_irq); 1045 } 1046 #endif 1047 1048 static const struct net_device_ops axienet_netdev_ops = { 1049 .ndo_open = axienet_open, 1050 .ndo_stop = axienet_stop, 1051 .ndo_start_xmit = axienet_start_xmit, 1052 .ndo_change_mtu = axienet_change_mtu, 1053 .ndo_set_mac_address = netdev_set_mac_address, 1054 .ndo_validate_addr = eth_validate_addr, 1055 .ndo_set_rx_mode = axienet_set_multicast_list, 1056 #ifdef CONFIG_NET_POLL_CONTROLLER 1057 .ndo_poll_controller = axienet_poll_controller, 1058 #endif 1059 }; 1060 1061 /** 1062 * axienet_ethtools_get_settings - Get Axi Ethernet settings related to PHY. 1063 * @ndev: Pointer to net_device structure 1064 * @ecmd: Pointer to ethtool_cmd structure 1065 * 1066 * This implements ethtool command for getting PHY settings. If PHY could 1067 * not be found, the function returns -ENODEV. This function calls the 1068 * relevant PHY ethtool API to get the PHY settings. 1069 * Issue "ethtool ethX" under linux prompt to execute this function. 1070 */ 1071 static int axienet_ethtools_get_settings(struct net_device *ndev, 1072 struct ethtool_cmd *ecmd) 1073 { 1074 struct axienet_local *lp = netdev_priv(ndev); 1075 struct phy_device *phydev = lp->phy_dev; 1076 if (!phydev) 1077 return -ENODEV; 1078 return phy_ethtool_gset(phydev, ecmd); 1079 } 1080 1081 /** 1082 * axienet_ethtools_set_settings - Set PHY settings as passed in the argument. 1083 * @ndev: Pointer to net_device structure 1084 * @ecmd: Pointer to ethtool_cmd structure 1085 * 1086 * This implements ethtool command for setting various PHY settings. If PHY 1087 * could not be found, the function returns -ENODEV. This function calls the 1088 * relevant PHY ethtool API to set the PHY. 1089 * Issue e.g. "ethtool -s ethX speed 1000" under linux prompt to execute this 1090 * function. 1091 */ 1092 static int axienet_ethtools_set_settings(struct net_device *ndev, 1093 struct ethtool_cmd *ecmd) 1094 { 1095 struct axienet_local *lp = netdev_priv(ndev); 1096 struct phy_device *phydev = lp->phy_dev; 1097 if (!phydev) 1098 return -ENODEV; 1099 return phy_ethtool_sset(phydev, ecmd); 1100 } 1101 1102 /** 1103 * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information. 1104 * @ndev: Pointer to net_device structure 1105 * @ed: Pointer to ethtool_drvinfo structure 1106 * 1107 * This implements ethtool command for getting the driver information. 1108 * Issue "ethtool -i ethX" under linux prompt to execute this function. 1109 */ 1110 static void axienet_ethtools_get_drvinfo(struct net_device *ndev, 1111 struct ethtool_drvinfo *ed) 1112 { 1113 strlcpy(ed->driver, DRIVER_NAME, sizeof(ed->driver)); 1114 strlcpy(ed->version, DRIVER_VERSION, sizeof(ed->version)); 1115 ed->regdump_len = sizeof(u32) * AXIENET_REGS_N; 1116 } 1117 1118 /** 1119 * axienet_ethtools_get_regs_len - Get the total regs length present in the 1120 * AxiEthernet core. 1121 * @ndev: Pointer to net_device structure 1122 * 1123 * This implements ethtool command for getting the total register length 1124 * information. 1125 */ 1126 static int axienet_ethtools_get_regs_len(struct net_device *ndev) 1127 { 1128 return sizeof(u32) * AXIENET_REGS_N; 1129 } 1130 1131 /** 1132 * axienet_ethtools_get_regs - Dump the contents of all registers present 1133 * in AxiEthernet core. 1134 * @ndev: Pointer to net_device structure 1135 * @regs: Pointer to ethtool_regs structure 1136 * @ret: Void pointer used to return the contents of the registers. 1137 * 1138 * This implements ethtool command for getting the Axi Ethernet register dump. 1139 * Issue "ethtool -d ethX" to execute this function. 1140 */ 1141 static void axienet_ethtools_get_regs(struct net_device *ndev, 1142 struct ethtool_regs *regs, void *ret) 1143 { 1144 u32 *data = (u32 *) ret; 1145 size_t len = sizeof(u32) * AXIENET_REGS_N; 1146 struct axienet_local *lp = netdev_priv(ndev); 1147 1148 regs->version = 0; 1149 regs->len = len; 1150 1151 memset(data, 0, len); 1152 data[0] = axienet_ior(lp, XAE_RAF_OFFSET); 1153 data[1] = axienet_ior(lp, XAE_TPF_OFFSET); 1154 data[2] = axienet_ior(lp, XAE_IFGP_OFFSET); 1155 data[3] = axienet_ior(lp, XAE_IS_OFFSET); 1156 data[4] = axienet_ior(lp, XAE_IP_OFFSET); 1157 data[5] = axienet_ior(lp, XAE_IE_OFFSET); 1158 data[6] = axienet_ior(lp, XAE_TTAG_OFFSET); 1159 data[7] = axienet_ior(lp, XAE_RTAG_OFFSET); 1160 data[8] = axienet_ior(lp, XAE_UAWL_OFFSET); 1161 data[9] = axienet_ior(lp, XAE_UAWU_OFFSET); 1162 data[10] = axienet_ior(lp, XAE_TPID0_OFFSET); 1163 data[11] = axienet_ior(lp, XAE_TPID1_OFFSET); 1164 data[12] = axienet_ior(lp, XAE_PPST_OFFSET); 1165 data[13] = axienet_ior(lp, XAE_RCW0_OFFSET); 1166 data[14] = axienet_ior(lp, XAE_RCW1_OFFSET); 1167 data[15] = axienet_ior(lp, XAE_TC_OFFSET); 1168 data[16] = axienet_ior(lp, XAE_FCC_OFFSET); 1169 data[17] = axienet_ior(lp, XAE_EMMC_OFFSET); 1170 data[18] = axienet_ior(lp, XAE_PHYC_OFFSET); 1171 data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET); 1172 data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET); 1173 data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET); 1174 data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET); 1175 data[23] = axienet_ior(lp, XAE_MDIO_MIS_OFFSET); 1176 data[24] = axienet_ior(lp, XAE_MDIO_MIP_OFFSET); 1177 data[25] = axienet_ior(lp, XAE_MDIO_MIE_OFFSET); 1178 data[26] = axienet_ior(lp, XAE_MDIO_MIC_OFFSET); 1179 data[27] = axienet_ior(lp, XAE_UAW0_OFFSET); 1180 data[28] = axienet_ior(lp, XAE_UAW1_OFFSET); 1181 data[29] = axienet_ior(lp, XAE_FMI_OFFSET); 1182 data[30] = axienet_ior(lp, XAE_AF0_OFFSET); 1183 data[31] = axienet_ior(lp, XAE_AF1_OFFSET); 1184 } 1185 1186 /** 1187 * axienet_ethtools_get_pauseparam - Get the pause parameter setting for 1188 * Tx and Rx paths. 1189 * @ndev: Pointer to net_device structure 1190 * @epauseparm: Pointer to ethtool_pauseparam structure. 1191 * 1192 * This implements ethtool command for getting axi ethernet pause frame 1193 * setting. Issue "ethtool -a ethX" to execute this function. 1194 */ 1195 static void 1196 axienet_ethtools_get_pauseparam(struct net_device *ndev, 1197 struct ethtool_pauseparam *epauseparm) 1198 { 1199 u32 regval; 1200 struct axienet_local *lp = netdev_priv(ndev); 1201 epauseparm->autoneg = 0; 1202 regval = axienet_ior(lp, XAE_FCC_OFFSET); 1203 epauseparm->tx_pause = regval & XAE_FCC_FCTX_MASK; 1204 epauseparm->rx_pause = regval & XAE_FCC_FCRX_MASK; 1205 } 1206 1207 /** 1208 * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control) 1209 * settings. 1210 * @ndev: Pointer to net_device structure 1211 * @epauseparam:Pointer to ethtool_pauseparam structure 1212 * 1213 * This implements ethtool command for enabling flow control on Rx and Tx 1214 * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this 1215 * function. 1216 */ 1217 static int 1218 axienet_ethtools_set_pauseparam(struct net_device *ndev, 1219 struct ethtool_pauseparam *epauseparm) 1220 { 1221 u32 regval = 0; 1222 struct axienet_local *lp = netdev_priv(ndev); 1223 1224 if (netif_running(ndev)) { 1225 printk(KERN_ERR "%s: Please stop netif before applying " 1226 "configruation\n", ndev->name); 1227 return -EFAULT; 1228 } 1229 1230 regval = axienet_ior(lp, XAE_FCC_OFFSET); 1231 if (epauseparm->tx_pause) 1232 regval |= XAE_FCC_FCTX_MASK; 1233 else 1234 regval &= ~XAE_FCC_FCTX_MASK; 1235 if (epauseparm->rx_pause) 1236 regval |= XAE_FCC_FCRX_MASK; 1237 else 1238 regval &= ~XAE_FCC_FCRX_MASK; 1239 axienet_iow(lp, XAE_FCC_OFFSET, regval); 1240 1241 return 0; 1242 } 1243 1244 /** 1245 * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count. 1246 * @ndev: Pointer to net_device structure 1247 * @ecoalesce: Pointer to ethtool_coalesce structure 1248 * 1249 * This implements ethtool command for getting the DMA interrupt coalescing 1250 * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to 1251 * execute this function. 1252 */ 1253 static int axienet_ethtools_get_coalesce(struct net_device *ndev, 1254 struct ethtool_coalesce *ecoalesce) 1255 { 1256 u32 regval = 0; 1257 struct axienet_local *lp = netdev_priv(ndev); 1258 regval = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 1259 ecoalesce->rx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK) 1260 >> XAXIDMA_COALESCE_SHIFT; 1261 regval = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 1262 ecoalesce->tx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK) 1263 >> XAXIDMA_COALESCE_SHIFT; 1264 return 0; 1265 } 1266 1267 /** 1268 * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count. 1269 * @ndev: Pointer to net_device structure 1270 * @ecoalesce: Pointer to ethtool_coalesce structure 1271 * 1272 * This implements ethtool command for setting the DMA interrupt coalescing 1273 * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux 1274 * prompt to execute this function. 1275 */ 1276 static int axienet_ethtools_set_coalesce(struct net_device *ndev, 1277 struct ethtool_coalesce *ecoalesce) 1278 { 1279 struct axienet_local *lp = netdev_priv(ndev); 1280 1281 if (netif_running(ndev)) { 1282 printk(KERN_ERR "%s: Please stop netif before applying " 1283 "configruation\n", ndev->name); 1284 return -EFAULT; 1285 } 1286 1287 if ((ecoalesce->rx_coalesce_usecs) || 1288 (ecoalesce->rx_coalesce_usecs_irq) || 1289 (ecoalesce->rx_max_coalesced_frames_irq) || 1290 (ecoalesce->tx_coalesce_usecs) || 1291 (ecoalesce->tx_coalesce_usecs_irq) || 1292 (ecoalesce->tx_max_coalesced_frames_irq) || 1293 (ecoalesce->stats_block_coalesce_usecs) || 1294 (ecoalesce->use_adaptive_rx_coalesce) || 1295 (ecoalesce->use_adaptive_tx_coalesce) || 1296 (ecoalesce->pkt_rate_low) || 1297 (ecoalesce->rx_coalesce_usecs_low) || 1298 (ecoalesce->rx_max_coalesced_frames_low) || 1299 (ecoalesce->tx_coalesce_usecs_low) || 1300 (ecoalesce->tx_max_coalesced_frames_low) || 1301 (ecoalesce->pkt_rate_high) || 1302 (ecoalesce->rx_coalesce_usecs_high) || 1303 (ecoalesce->rx_max_coalesced_frames_high) || 1304 (ecoalesce->tx_coalesce_usecs_high) || 1305 (ecoalesce->tx_max_coalesced_frames_high) || 1306 (ecoalesce->rate_sample_interval)) 1307 return -EOPNOTSUPP; 1308 if (ecoalesce->rx_max_coalesced_frames) 1309 lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames; 1310 if (ecoalesce->tx_max_coalesced_frames) 1311 lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames; 1312 1313 return 0; 1314 } 1315 1316 static struct ethtool_ops axienet_ethtool_ops = { 1317 .get_settings = axienet_ethtools_get_settings, 1318 .set_settings = axienet_ethtools_set_settings, 1319 .get_drvinfo = axienet_ethtools_get_drvinfo, 1320 .get_regs_len = axienet_ethtools_get_regs_len, 1321 .get_regs = axienet_ethtools_get_regs, 1322 .get_link = ethtool_op_get_link, 1323 .get_pauseparam = axienet_ethtools_get_pauseparam, 1324 .set_pauseparam = axienet_ethtools_set_pauseparam, 1325 .get_coalesce = axienet_ethtools_get_coalesce, 1326 .set_coalesce = axienet_ethtools_set_coalesce, 1327 }; 1328 1329 /** 1330 * axienet_dma_err_handler - Tasklet handler for Axi DMA Error 1331 * @data: Data passed 1332 * 1333 * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the 1334 * Tx/Rx BDs. 1335 */ 1336 static void axienet_dma_err_handler(unsigned long data) 1337 { 1338 u32 axienet_status; 1339 u32 cr, i; 1340 int mdio_mcreg; 1341 struct axienet_local *lp = (struct axienet_local *) data; 1342 struct net_device *ndev = lp->ndev; 1343 struct axidma_bd *cur_p; 1344 1345 axienet_setoptions(ndev, lp->options & 1346 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 1347 mdio_mcreg = axienet_ior(lp, XAE_MDIO_MC_OFFSET); 1348 axienet_mdio_wait_until_ready(lp); 1349 /* Disable the MDIO interface till Axi Ethernet Reset is completed. 1350 * When we do an Axi Ethernet reset, it resets the complete core 1351 * including the MDIO. So if MDIO is not disabled when the reset 1352 * process is started, MDIO will be broken afterwards. */ 1353 axienet_iow(lp, XAE_MDIO_MC_OFFSET, (mdio_mcreg & 1354 ~XAE_MDIO_MC_MDIOEN_MASK)); 1355 1356 __axienet_device_reset(lp, &ndev->dev, XAXIDMA_TX_CR_OFFSET); 1357 __axienet_device_reset(lp, &ndev->dev, XAXIDMA_RX_CR_OFFSET); 1358 1359 axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg); 1360 axienet_mdio_wait_until_ready(lp); 1361 1362 for (i = 0; i < TX_BD_NUM; i++) { 1363 cur_p = &lp->tx_bd_v[i]; 1364 if (cur_p->phys) 1365 dma_unmap_single(ndev->dev.parent, cur_p->phys, 1366 (cur_p->cntrl & 1367 XAXIDMA_BD_CTRL_LENGTH_MASK), 1368 DMA_TO_DEVICE); 1369 if (cur_p->app4) 1370 dev_kfree_skb_irq((struct sk_buff *) cur_p->app4); 1371 cur_p->phys = 0; 1372 cur_p->cntrl = 0; 1373 cur_p->status = 0; 1374 cur_p->app0 = 0; 1375 cur_p->app1 = 0; 1376 cur_p->app2 = 0; 1377 cur_p->app3 = 0; 1378 cur_p->app4 = 0; 1379 cur_p->sw_id_offset = 0; 1380 } 1381 1382 for (i = 0; i < RX_BD_NUM; i++) { 1383 cur_p = &lp->rx_bd_v[i]; 1384 cur_p->status = 0; 1385 cur_p->app0 = 0; 1386 cur_p->app1 = 0; 1387 cur_p->app2 = 0; 1388 cur_p->app3 = 0; 1389 cur_p->app4 = 0; 1390 } 1391 1392 lp->tx_bd_ci = 0; 1393 lp->tx_bd_tail = 0; 1394 lp->rx_bd_ci = 0; 1395 1396 /* Start updating the Rx channel control register */ 1397 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 1398 /* Update the interrupt coalesce count */ 1399 cr = ((cr & ~XAXIDMA_COALESCE_MASK) | 1400 (XAXIDMA_DFT_RX_THRESHOLD << XAXIDMA_COALESCE_SHIFT)); 1401 /* Update the delay timer count */ 1402 cr = ((cr & ~XAXIDMA_DELAY_MASK) | 1403 (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); 1404 /* Enable coalesce, delay timer and error interrupts */ 1405 cr |= XAXIDMA_IRQ_ALL_MASK; 1406 /* Finally write to the Rx channel control register */ 1407 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 1408 1409 /* Start updating the Tx channel control register */ 1410 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 1411 /* Update the interrupt coalesce count */ 1412 cr = (((cr & ~XAXIDMA_COALESCE_MASK)) | 1413 (XAXIDMA_DFT_TX_THRESHOLD << XAXIDMA_COALESCE_SHIFT)); 1414 /* Update the delay timer count */ 1415 cr = (((cr & ~XAXIDMA_DELAY_MASK)) | 1416 (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); 1417 /* Enable coalesce, delay timer and error interrupts */ 1418 cr |= XAXIDMA_IRQ_ALL_MASK; 1419 /* Finally write to the Tx channel control register */ 1420 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 1421 1422 /* Populate the tail pointer and bring the Rx Axi DMA engine out of 1423 * halted state. This will make the Rx side ready for reception.*/ 1424 axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); 1425 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 1426 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, 1427 cr | XAXIDMA_CR_RUNSTOP_MASK); 1428 axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + 1429 (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1))); 1430 1431 /* Write to the RS (Run-stop) bit in the Tx channel control register. 1432 * Tx channel is now ready to run. But only after we write to the 1433 * tail pointer register that the Tx channel will start transmitting */ 1434 axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); 1435 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 1436 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, 1437 cr | XAXIDMA_CR_RUNSTOP_MASK); 1438 1439 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); 1440 axienet_status &= ~XAE_RCW1_RX_MASK; 1441 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status); 1442 1443 axienet_status = axienet_ior(lp, XAE_IP_OFFSET); 1444 if (axienet_status & XAE_INT_RXRJECT_MASK) 1445 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK); 1446 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK); 1447 1448 /* Sync default options with HW but leave receiver and 1449 * transmitter disabled.*/ 1450 axienet_setoptions(ndev, lp->options & 1451 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 1452 axienet_set_mac_address(ndev, NULL); 1453 axienet_set_multicast_list(ndev); 1454 axienet_setoptions(ndev, lp->options); 1455 } 1456 1457 /** 1458 * axienet_of_probe - Axi Ethernet probe function. 1459 * @op: Pointer to platform device structure. 1460 * @match: Pointer to device id structure 1461 * 1462 * returns: 0, on success 1463 * Non-zero error value on failure. 1464 * 1465 * This is the probe routine for Axi Ethernet driver. This is called before 1466 * any other driver routines are invoked. It allocates and sets up the Ethernet 1467 * device. Parses through device tree and populates fields of 1468 * axienet_local. It registers the Ethernet device. 1469 */ 1470 static int axienet_of_probe(struct platform_device *op) 1471 { 1472 __be32 *p; 1473 int size, ret = 0; 1474 struct device_node *np; 1475 struct axienet_local *lp; 1476 struct net_device *ndev; 1477 const void *addr; 1478 1479 ndev = alloc_etherdev(sizeof(*lp)); 1480 if (!ndev) 1481 return -ENOMEM; 1482 1483 ether_setup(ndev); 1484 platform_set_drvdata(op, ndev); 1485 1486 SET_NETDEV_DEV(ndev, &op->dev); 1487 ndev->flags &= ~IFF_MULTICAST; /* clear multicast */ 1488 ndev->features = NETIF_F_SG; 1489 ndev->netdev_ops = &axienet_netdev_ops; 1490 ndev->ethtool_ops = &axienet_ethtool_ops; 1491 1492 lp = netdev_priv(ndev); 1493 lp->ndev = ndev; 1494 lp->dev = &op->dev; 1495 lp->options = XAE_OPTION_DEFAULTS; 1496 /* Map device registers */ 1497 lp->regs = of_iomap(op->dev.of_node, 0); 1498 if (!lp->regs) { 1499 dev_err(&op->dev, "could not map Axi Ethernet regs.\n"); 1500 goto nodev; 1501 } 1502 /* Setup checksum offload, but default to off if not specified */ 1503 lp->features = 0; 1504 1505 p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,txcsum", NULL); 1506 if (p) { 1507 switch (be32_to_cpup(p)) { 1508 case 1: 1509 lp->csum_offload_on_tx_path = 1510 XAE_FEATURE_PARTIAL_TX_CSUM; 1511 lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM; 1512 /* Can checksum TCP/UDP over IPv4. */ 1513 ndev->features |= NETIF_F_IP_CSUM; 1514 break; 1515 case 2: 1516 lp->csum_offload_on_tx_path = 1517 XAE_FEATURE_FULL_TX_CSUM; 1518 lp->features |= XAE_FEATURE_FULL_TX_CSUM; 1519 /* Can checksum TCP/UDP over IPv4. */ 1520 ndev->features |= NETIF_F_IP_CSUM; 1521 break; 1522 default: 1523 lp->csum_offload_on_tx_path = XAE_NO_CSUM_OFFLOAD; 1524 } 1525 } 1526 p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,rxcsum", NULL); 1527 if (p) { 1528 switch (be32_to_cpup(p)) { 1529 case 1: 1530 lp->csum_offload_on_rx_path = 1531 XAE_FEATURE_PARTIAL_RX_CSUM; 1532 lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM; 1533 break; 1534 case 2: 1535 lp->csum_offload_on_rx_path = 1536 XAE_FEATURE_FULL_RX_CSUM; 1537 lp->features |= XAE_FEATURE_FULL_RX_CSUM; 1538 break; 1539 default: 1540 lp->csum_offload_on_rx_path = XAE_NO_CSUM_OFFLOAD; 1541 } 1542 } 1543 /* For supporting jumbo frames, the Axi Ethernet hardware must have 1544 * a larger Rx/Tx Memory. Typically, the size must be more than or 1545 * equal to 16384 bytes, so that we can enable jumbo option and start 1546 * supporting jumbo frames. Here we check for memory allocated for 1547 * Rx/Tx in the hardware from the device-tree and accordingly set 1548 * flags. */ 1549 p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,rxmem", NULL); 1550 if (p) { 1551 if ((be32_to_cpup(p)) >= 0x4000) 1552 lp->jumbo_support = 1; 1553 } 1554 p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,temac-type", 1555 NULL); 1556 if (p) 1557 lp->temac_type = be32_to_cpup(p); 1558 p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,phy-type", NULL); 1559 if (p) 1560 lp->phy_type = be32_to_cpup(p); 1561 1562 /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */ 1563 np = of_parse_phandle(op->dev.of_node, "axistream-connected", 0); 1564 if (!np) { 1565 dev_err(&op->dev, "could not find DMA node\n"); 1566 goto err_iounmap; 1567 } 1568 lp->dma_regs = of_iomap(np, 0); 1569 if (lp->dma_regs) { 1570 dev_dbg(&op->dev, "MEM base: %p\n", lp->dma_regs); 1571 } else { 1572 dev_err(&op->dev, "unable to map DMA registers\n"); 1573 of_node_put(np); 1574 } 1575 lp->rx_irq = irq_of_parse_and_map(np, 1); 1576 lp->tx_irq = irq_of_parse_and_map(np, 0); 1577 of_node_put(np); 1578 if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) { 1579 dev_err(&op->dev, "could not determine irqs\n"); 1580 ret = -ENOMEM; 1581 goto err_iounmap_2; 1582 } 1583 1584 /* Retrieve the MAC address */ 1585 addr = of_get_property(op->dev.of_node, "local-mac-address", &size); 1586 if ((!addr) || (size != 6)) { 1587 dev_err(&op->dev, "could not find MAC address\n"); 1588 ret = -ENODEV; 1589 goto err_iounmap_2; 1590 } 1591 axienet_set_mac_address(ndev, (void *) addr); 1592 1593 lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD; 1594 lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD; 1595 1596 lp->phy_node = of_parse_phandle(op->dev.of_node, "phy-handle", 0); 1597 ret = axienet_mdio_setup(lp, op->dev.of_node); 1598 if (ret) 1599 dev_warn(&op->dev, "error registering MDIO bus\n"); 1600 1601 ret = register_netdev(lp->ndev); 1602 if (ret) { 1603 dev_err(lp->dev, "register_netdev() error (%i)\n", ret); 1604 goto err_iounmap_2; 1605 } 1606 1607 return 0; 1608 1609 err_iounmap_2: 1610 if (lp->dma_regs) 1611 iounmap(lp->dma_regs); 1612 err_iounmap: 1613 iounmap(lp->regs); 1614 nodev: 1615 free_netdev(ndev); 1616 ndev = NULL; 1617 return ret; 1618 } 1619 1620 static int axienet_of_remove(struct platform_device *op) 1621 { 1622 struct net_device *ndev = platform_get_drvdata(op); 1623 struct axienet_local *lp = netdev_priv(ndev); 1624 1625 axienet_mdio_teardown(lp); 1626 unregister_netdev(ndev); 1627 1628 if (lp->phy_node) 1629 of_node_put(lp->phy_node); 1630 lp->phy_node = NULL; 1631 1632 iounmap(lp->regs); 1633 if (lp->dma_regs) 1634 iounmap(lp->dma_regs); 1635 free_netdev(ndev); 1636 1637 return 0; 1638 } 1639 1640 static struct platform_driver axienet_of_driver = { 1641 .probe = axienet_of_probe, 1642 .remove = axienet_of_remove, 1643 .driver = { 1644 .owner = THIS_MODULE, 1645 .name = "xilinx_axienet", 1646 .of_match_table = axienet_of_match, 1647 }, 1648 }; 1649 1650 module_platform_driver(axienet_of_driver); 1651 1652 MODULE_DESCRIPTION("Xilinx Axi Ethernet driver"); 1653 MODULE_AUTHOR("Xilinx"); 1654 MODULE_LICENSE("GPL"); 1655