1 /* 2 * Xilinx Axi Ethernet device driver 3 * 4 * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi 5 * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net> 6 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd. 7 * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu> 8 * Copyright (c) 2010 - 2011 PetaLogix 9 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved. 10 * 11 * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6 12 * and Spartan6. 13 * 14 * TODO: 15 * - Add Axi Fifo support. 16 * - Factor out Axi DMA code into separate driver. 17 * - Test and fix basic multicast filtering. 18 * - Add support for extended multicast filtering. 19 * - Test basic VLAN support. 20 * - Add support for extended VLAN support. 21 */ 22 23 #include <linux/delay.h> 24 #include <linux/etherdevice.h> 25 #include <linux/init.h> 26 #include <linux/module.h> 27 #include <linux/netdevice.h> 28 #include <linux/of_mdio.h> 29 #include <linux/of_platform.h> 30 #include <linux/of_address.h> 31 #include <linux/skbuff.h> 32 #include <linux/spinlock.h> 33 #include <linux/phy.h> 34 #include <linux/mii.h> 35 #include <linux/ethtool.h> 36 37 #include "xilinx_axienet.h" 38 39 /* Descriptors defines for Tx and Rx DMA - 2^n for the best performance */ 40 #define TX_BD_NUM 64 41 #define RX_BD_NUM 128 42 43 /* Must be shorter than length of ethtool_drvinfo.driver field to fit */ 44 #define DRIVER_NAME "xaxienet" 45 #define DRIVER_DESCRIPTION "Xilinx Axi Ethernet driver" 46 #define DRIVER_VERSION "1.00a" 47 48 #define AXIENET_REGS_N 32 49 50 /* Match table for of_platform binding */ 51 static struct of_device_id axienet_of_match[] = { 52 { .compatible = "xlnx,axi-ethernet-1.00.a", }, 53 { .compatible = "xlnx,axi-ethernet-1.01.a", }, 54 { .compatible = "xlnx,axi-ethernet-2.01.a", }, 55 {}, 56 }; 57 58 MODULE_DEVICE_TABLE(of, axienet_of_match); 59 60 /* Option table for setting up Axi Ethernet hardware options */ 61 static struct axienet_option axienet_options[] = { 62 /* Turn on jumbo packet support for both Rx and Tx */ 63 { 64 .opt = XAE_OPTION_JUMBO, 65 .reg = XAE_TC_OFFSET, 66 .m_or = XAE_TC_JUM_MASK, 67 }, { 68 .opt = XAE_OPTION_JUMBO, 69 .reg = XAE_RCW1_OFFSET, 70 .m_or = XAE_RCW1_JUM_MASK, 71 }, { /* Turn on VLAN packet support for both Rx and Tx */ 72 .opt = XAE_OPTION_VLAN, 73 .reg = XAE_TC_OFFSET, 74 .m_or = XAE_TC_VLAN_MASK, 75 }, { 76 .opt = XAE_OPTION_VLAN, 77 .reg = XAE_RCW1_OFFSET, 78 .m_or = XAE_RCW1_VLAN_MASK, 79 }, { /* Turn on FCS stripping on receive packets */ 80 .opt = XAE_OPTION_FCS_STRIP, 81 .reg = XAE_RCW1_OFFSET, 82 .m_or = XAE_RCW1_FCS_MASK, 83 }, { /* Turn on FCS insertion on transmit packets */ 84 .opt = XAE_OPTION_FCS_INSERT, 85 .reg = XAE_TC_OFFSET, 86 .m_or = XAE_TC_FCS_MASK, 87 }, { /* Turn off length/type field checking on receive packets */ 88 .opt = XAE_OPTION_LENTYPE_ERR, 89 .reg = XAE_RCW1_OFFSET, 90 .m_or = XAE_RCW1_LT_DIS_MASK, 91 }, { /* Turn on Rx flow control */ 92 .opt = XAE_OPTION_FLOW_CONTROL, 93 .reg = XAE_FCC_OFFSET, 94 .m_or = XAE_FCC_FCRX_MASK, 95 }, { /* Turn on Tx flow control */ 96 .opt = XAE_OPTION_FLOW_CONTROL, 97 .reg = XAE_FCC_OFFSET, 98 .m_or = XAE_FCC_FCTX_MASK, 99 }, { /* Turn on promiscuous frame filtering */ 100 .opt = XAE_OPTION_PROMISC, 101 .reg = XAE_FMI_OFFSET, 102 .m_or = XAE_FMI_PM_MASK, 103 }, { /* Enable transmitter */ 104 .opt = XAE_OPTION_TXEN, 105 .reg = XAE_TC_OFFSET, 106 .m_or = XAE_TC_TX_MASK, 107 }, { /* Enable receiver */ 108 .opt = XAE_OPTION_RXEN, 109 .reg = XAE_RCW1_OFFSET, 110 .m_or = XAE_RCW1_RX_MASK, 111 }, 112 {} 113 }; 114 115 /** 116 * axienet_dma_in32 - Memory mapped Axi DMA register read 117 * @lp: Pointer to axienet local structure 118 * @reg: Address offset from the base address of the Axi DMA core 119 * 120 * returns: The contents of the Axi DMA register 121 * 122 * This function returns the contents of the corresponding Axi DMA register. 123 */ 124 static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg) 125 { 126 return in_be32(lp->dma_regs + reg); 127 } 128 129 /** 130 * axienet_dma_out32 - Memory mapped Axi DMA register write. 131 * @lp: Pointer to axienet local structure 132 * @reg: Address offset from the base address of the Axi DMA core 133 * @value: Value to be written into the Axi DMA register 134 * 135 * This function writes the desired value into the corresponding Axi DMA 136 * register. 137 */ 138 static inline void axienet_dma_out32(struct axienet_local *lp, 139 off_t reg, u32 value) 140 { 141 out_be32((lp->dma_regs + reg), value); 142 } 143 144 /** 145 * axienet_dma_bd_release - Release buffer descriptor rings 146 * @ndev: Pointer to the net_device structure 147 * 148 * This function is used to release the descriptors allocated in 149 * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet 150 * driver stop api is called. 151 */ 152 static void axienet_dma_bd_release(struct net_device *ndev) 153 { 154 int i; 155 struct axienet_local *lp = netdev_priv(ndev); 156 157 for (i = 0; i < RX_BD_NUM; i++) { 158 dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys, 159 lp->max_frm_size, DMA_FROM_DEVICE); 160 dev_kfree_skb((struct sk_buff *) 161 (lp->rx_bd_v[i].sw_id_offset)); 162 } 163 164 if (lp->rx_bd_v) { 165 dma_free_coherent(ndev->dev.parent, 166 sizeof(*lp->rx_bd_v) * RX_BD_NUM, 167 lp->rx_bd_v, 168 lp->rx_bd_p); 169 } 170 if (lp->tx_bd_v) { 171 dma_free_coherent(ndev->dev.parent, 172 sizeof(*lp->tx_bd_v) * TX_BD_NUM, 173 lp->tx_bd_v, 174 lp->tx_bd_p); 175 } 176 } 177 178 /** 179 * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA 180 * @ndev: Pointer to the net_device structure 181 * 182 * returns: 0, on success 183 * -ENOMEM, on failure 184 * 185 * This function is called to initialize the Rx and Tx DMA descriptor 186 * rings. This initializes the descriptors with required default values 187 * and is called when Axi Ethernet driver reset is called. 188 */ 189 static int axienet_dma_bd_init(struct net_device *ndev) 190 { 191 u32 cr; 192 int i; 193 struct sk_buff *skb; 194 struct axienet_local *lp = netdev_priv(ndev); 195 196 /* Reset the indexes which are used for accessing the BDs */ 197 lp->tx_bd_ci = 0; 198 lp->tx_bd_tail = 0; 199 lp->rx_bd_ci = 0; 200 201 /* 202 * Allocate the Tx and Rx buffer descriptors. 203 */ 204 lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent, 205 sizeof(*lp->tx_bd_v) * TX_BD_NUM, 206 &lp->tx_bd_p, 207 GFP_KERNEL | __GFP_ZERO); 208 if (!lp->tx_bd_v) 209 goto out; 210 211 lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent, 212 sizeof(*lp->rx_bd_v) * RX_BD_NUM, 213 &lp->rx_bd_p, 214 GFP_KERNEL | __GFP_ZERO); 215 if (!lp->rx_bd_v) 216 goto out; 217 218 for (i = 0; i < TX_BD_NUM; i++) { 219 lp->tx_bd_v[i].next = lp->tx_bd_p + 220 sizeof(*lp->tx_bd_v) * 221 ((i + 1) % TX_BD_NUM); 222 } 223 224 for (i = 0; i < RX_BD_NUM; i++) { 225 lp->rx_bd_v[i].next = lp->rx_bd_p + 226 sizeof(*lp->rx_bd_v) * 227 ((i + 1) % RX_BD_NUM); 228 229 skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size); 230 if (!skb) 231 goto out; 232 233 lp->rx_bd_v[i].sw_id_offset = (u32) skb; 234 lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent, 235 skb->data, 236 lp->max_frm_size, 237 DMA_FROM_DEVICE); 238 lp->rx_bd_v[i].cntrl = lp->max_frm_size; 239 } 240 241 /* Start updating the Rx channel control register */ 242 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 243 /* Update the interrupt coalesce count */ 244 cr = ((cr & ~XAXIDMA_COALESCE_MASK) | 245 ((lp->coalesce_count_rx) << XAXIDMA_COALESCE_SHIFT)); 246 /* Update the delay timer count */ 247 cr = ((cr & ~XAXIDMA_DELAY_MASK) | 248 (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); 249 /* Enable coalesce, delay timer and error interrupts */ 250 cr |= XAXIDMA_IRQ_ALL_MASK; 251 /* Write to the Rx channel control register */ 252 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 253 254 /* Start updating the Tx channel control register */ 255 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 256 /* Update the interrupt coalesce count */ 257 cr = (((cr & ~XAXIDMA_COALESCE_MASK)) | 258 ((lp->coalesce_count_tx) << XAXIDMA_COALESCE_SHIFT)); 259 /* Update the delay timer count */ 260 cr = (((cr & ~XAXIDMA_DELAY_MASK)) | 261 (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); 262 /* Enable coalesce, delay timer and error interrupts */ 263 cr |= XAXIDMA_IRQ_ALL_MASK; 264 /* Write to the Tx channel control register */ 265 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 266 267 /* Populate the tail pointer and bring the Rx Axi DMA engine out of 268 * halted state. This will make the Rx side ready for reception.*/ 269 axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); 270 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 271 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, 272 cr | XAXIDMA_CR_RUNSTOP_MASK); 273 axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + 274 (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1))); 275 276 /* Write to the RS (Run-stop) bit in the Tx channel control register. 277 * Tx channel is now ready to run. But only after we write to the 278 * tail pointer register that the Tx channel will start transmitting */ 279 axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); 280 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 281 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, 282 cr | XAXIDMA_CR_RUNSTOP_MASK); 283 284 return 0; 285 out: 286 axienet_dma_bd_release(ndev); 287 return -ENOMEM; 288 } 289 290 /** 291 * axienet_set_mac_address - Write the MAC address 292 * @ndev: Pointer to the net_device structure 293 * @address: 6 byte Address to be written as MAC address 294 * 295 * This function is called to initialize the MAC address of the Axi Ethernet 296 * core. It writes to the UAW0 and UAW1 registers of the core. 297 */ 298 static void axienet_set_mac_address(struct net_device *ndev, void *address) 299 { 300 struct axienet_local *lp = netdev_priv(ndev); 301 302 if (address) 303 memcpy(ndev->dev_addr, address, ETH_ALEN); 304 if (!is_valid_ether_addr(ndev->dev_addr)) 305 eth_random_addr(ndev->dev_addr); 306 307 /* Set up unicast MAC address filter set its mac address */ 308 axienet_iow(lp, XAE_UAW0_OFFSET, 309 (ndev->dev_addr[0]) | 310 (ndev->dev_addr[1] << 8) | 311 (ndev->dev_addr[2] << 16) | 312 (ndev->dev_addr[3] << 24)); 313 axienet_iow(lp, XAE_UAW1_OFFSET, 314 (((axienet_ior(lp, XAE_UAW1_OFFSET)) & 315 ~XAE_UAW1_UNICASTADDR_MASK) | 316 (ndev->dev_addr[4] | 317 (ndev->dev_addr[5] << 8)))); 318 } 319 320 /** 321 * netdev_set_mac_address - Write the MAC address (from outside the driver) 322 * @ndev: Pointer to the net_device structure 323 * @p: 6 byte Address to be written as MAC address 324 * 325 * returns: 0 for all conditions. Presently, there is no failure case. 326 * 327 * This function is called to initialize the MAC address of the Axi Ethernet 328 * core. It calls the core specific axienet_set_mac_address. This is the 329 * function that goes into net_device_ops structure entry ndo_set_mac_address. 330 */ 331 static int netdev_set_mac_address(struct net_device *ndev, void *p) 332 { 333 struct sockaddr *addr = p; 334 axienet_set_mac_address(ndev, addr->sa_data); 335 return 0; 336 } 337 338 /** 339 * axienet_set_multicast_list - Prepare the multicast table 340 * @ndev: Pointer to the net_device structure 341 * 342 * This function is called to initialize the multicast table during 343 * initialization. The Axi Ethernet basic multicast support has a four-entry 344 * multicast table which is initialized here. Additionally this function 345 * goes into the net_device_ops structure entry ndo_set_multicast_list. This 346 * means whenever the multicast table entries need to be updated this 347 * function gets called. 348 */ 349 static void axienet_set_multicast_list(struct net_device *ndev) 350 { 351 int i; 352 u32 reg, af0reg, af1reg; 353 struct axienet_local *lp = netdev_priv(ndev); 354 355 if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) || 356 netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) { 357 /* We must make the kernel realize we had to move into 358 * promiscuous mode. If it was a promiscuous mode request 359 * the flag is already set. If not we set it. */ 360 ndev->flags |= IFF_PROMISC; 361 reg = axienet_ior(lp, XAE_FMI_OFFSET); 362 reg |= XAE_FMI_PM_MASK; 363 axienet_iow(lp, XAE_FMI_OFFSET, reg); 364 dev_info(&ndev->dev, "Promiscuous mode enabled.\n"); 365 } else if (!netdev_mc_empty(ndev)) { 366 struct netdev_hw_addr *ha; 367 368 i = 0; 369 netdev_for_each_mc_addr(ha, ndev) { 370 if (i >= XAE_MULTICAST_CAM_TABLE_NUM) 371 break; 372 373 af0reg = (ha->addr[0]); 374 af0reg |= (ha->addr[1] << 8); 375 af0reg |= (ha->addr[2] << 16); 376 af0reg |= (ha->addr[3] << 24); 377 378 af1reg = (ha->addr[4]); 379 af1reg |= (ha->addr[5] << 8); 380 381 reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00; 382 reg |= i; 383 384 axienet_iow(lp, XAE_FMI_OFFSET, reg); 385 axienet_iow(lp, XAE_AF0_OFFSET, af0reg); 386 axienet_iow(lp, XAE_AF1_OFFSET, af1reg); 387 i++; 388 } 389 } else { 390 reg = axienet_ior(lp, XAE_FMI_OFFSET); 391 reg &= ~XAE_FMI_PM_MASK; 392 393 axienet_iow(lp, XAE_FMI_OFFSET, reg); 394 395 for (i = 0; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) { 396 reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00; 397 reg |= i; 398 399 axienet_iow(lp, XAE_FMI_OFFSET, reg); 400 axienet_iow(lp, XAE_AF0_OFFSET, 0); 401 axienet_iow(lp, XAE_AF1_OFFSET, 0); 402 } 403 404 dev_info(&ndev->dev, "Promiscuous mode disabled.\n"); 405 } 406 } 407 408 /** 409 * axienet_setoptions - Set an Axi Ethernet option 410 * @ndev: Pointer to the net_device structure 411 * @options: Option to be enabled/disabled 412 * 413 * The Axi Ethernet core has multiple features which can be selectively turned 414 * on or off. The typical options could be jumbo frame option, basic VLAN 415 * option, promiscuous mode option etc. This function is used to set or clear 416 * these options in the Axi Ethernet hardware. This is done through 417 * axienet_option structure . 418 */ 419 static void axienet_setoptions(struct net_device *ndev, u32 options) 420 { 421 int reg; 422 struct axienet_local *lp = netdev_priv(ndev); 423 struct axienet_option *tp = &axienet_options[0]; 424 425 while (tp->opt) { 426 reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or)); 427 if (options & tp->opt) 428 reg |= tp->m_or; 429 axienet_iow(lp, tp->reg, reg); 430 tp++; 431 } 432 433 lp->options |= options; 434 } 435 436 static void __axienet_device_reset(struct axienet_local *lp, 437 struct device *dev, off_t offset) 438 { 439 u32 timeout; 440 /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset 441 * process of Axi DMA takes a while to complete as all pending 442 * commands/transfers will be flushed or completed during this 443 * reset process. */ 444 axienet_dma_out32(lp, offset, XAXIDMA_CR_RESET_MASK); 445 timeout = DELAY_OF_ONE_MILLISEC; 446 while (axienet_dma_in32(lp, offset) & XAXIDMA_CR_RESET_MASK) { 447 udelay(1); 448 if (--timeout == 0) { 449 dev_err(dev, "axienet_device_reset DMA " 450 "reset timeout!\n"); 451 break; 452 } 453 } 454 } 455 456 /** 457 * axienet_device_reset - Reset and initialize the Axi Ethernet hardware. 458 * @ndev: Pointer to the net_device structure 459 * 460 * This function is called to reset and initialize the Axi Ethernet core. This 461 * is typically called during initialization. It does a reset of the Axi DMA 462 * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines 463 * areconnected to Axi Ethernet reset lines, this in turn resets the Axi 464 * Ethernet core. No separate hardware reset is done for the Axi Ethernet 465 * core. 466 */ 467 static void axienet_device_reset(struct net_device *ndev) 468 { 469 u32 axienet_status; 470 struct axienet_local *lp = netdev_priv(ndev); 471 472 __axienet_device_reset(lp, &ndev->dev, XAXIDMA_TX_CR_OFFSET); 473 __axienet_device_reset(lp, &ndev->dev, XAXIDMA_RX_CR_OFFSET); 474 475 lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE; 476 lp->options &= (~XAE_OPTION_JUMBO); 477 478 if ((ndev->mtu > XAE_MTU) && 479 (ndev->mtu <= XAE_JUMBO_MTU) && 480 (lp->jumbo_support)) { 481 lp->max_frm_size = ndev->mtu + XAE_HDR_VLAN_SIZE + 482 XAE_TRL_SIZE; 483 lp->options |= XAE_OPTION_JUMBO; 484 } 485 486 if (axienet_dma_bd_init(ndev)) { 487 dev_err(&ndev->dev, "axienet_device_reset descriptor " 488 "allocation failed\n"); 489 } 490 491 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); 492 axienet_status &= ~XAE_RCW1_RX_MASK; 493 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status); 494 495 axienet_status = axienet_ior(lp, XAE_IP_OFFSET); 496 if (axienet_status & XAE_INT_RXRJECT_MASK) 497 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK); 498 499 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK); 500 501 /* Sync default options with HW but leave receiver and 502 * transmitter disabled.*/ 503 axienet_setoptions(ndev, lp->options & 504 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 505 axienet_set_mac_address(ndev, NULL); 506 axienet_set_multicast_list(ndev); 507 axienet_setoptions(ndev, lp->options); 508 509 ndev->trans_start = jiffies; 510 } 511 512 /** 513 * axienet_adjust_link - Adjust the PHY link speed/duplex. 514 * @ndev: Pointer to the net_device structure 515 * 516 * This function is called to change the speed and duplex setting after 517 * auto negotiation is done by the PHY. This is the function that gets 518 * registered with the PHY interface through the "of_phy_connect" call. 519 */ 520 static void axienet_adjust_link(struct net_device *ndev) 521 { 522 u32 emmc_reg; 523 u32 link_state; 524 u32 setspeed = 1; 525 struct axienet_local *lp = netdev_priv(ndev); 526 struct phy_device *phy = lp->phy_dev; 527 528 link_state = phy->speed | (phy->duplex << 1) | phy->link; 529 if (lp->last_link != link_state) { 530 if ((phy->speed == SPEED_10) || (phy->speed == SPEED_100)) { 531 if (lp->phy_type == XAE_PHY_TYPE_1000BASE_X) 532 setspeed = 0; 533 } else { 534 if ((phy->speed == SPEED_1000) && 535 (lp->phy_type == XAE_PHY_TYPE_MII)) 536 setspeed = 0; 537 } 538 539 if (setspeed == 1) { 540 emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET); 541 emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK; 542 543 switch (phy->speed) { 544 case SPEED_1000: 545 emmc_reg |= XAE_EMMC_LINKSPD_1000; 546 break; 547 case SPEED_100: 548 emmc_reg |= XAE_EMMC_LINKSPD_100; 549 break; 550 case SPEED_10: 551 emmc_reg |= XAE_EMMC_LINKSPD_10; 552 break; 553 default: 554 dev_err(&ndev->dev, "Speed other than 10, 100 " 555 "or 1Gbps is not supported\n"); 556 break; 557 } 558 559 axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg); 560 lp->last_link = link_state; 561 phy_print_status(phy); 562 } else { 563 dev_err(&ndev->dev, "Error setting Axi Ethernet " 564 "mac speed\n"); 565 } 566 } 567 } 568 569 /** 570 * axienet_start_xmit_done - Invoked once a transmit is completed by the 571 * Axi DMA Tx channel. 572 * @ndev: Pointer to the net_device structure 573 * 574 * This function is invoked from the Axi DMA Tx isr to notify the completion 575 * of transmit operation. It clears fields in the corresponding Tx BDs and 576 * unmaps the corresponding buffer so that CPU can regain ownership of the 577 * buffer. It finally invokes "netif_wake_queue" to restart transmission if 578 * required. 579 */ 580 static void axienet_start_xmit_done(struct net_device *ndev) 581 { 582 u32 size = 0; 583 u32 packets = 0; 584 struct axienet_local *lp = netdev_priv(ndev); 585 struct axidma_bd *cur_p; 586 unsigned int status = 0; 587 588 cur_p = &lp->tx_bd_v[lp->tx_bd_ci]; 589 status = cur_p->status; 590 while (status & XAXIDMA_BD_STS_COMPLETE_MASK) { 591 dma_unmap_single(ndev->dev.parent, cur_p->phys, 592 (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK), 593 DMA_TO_DEVICE); 594 if (cur_p->app4) 595 dev_kfree_skb_irq((struct sk_buff *)cur_p->app4); 596 /*cur_p->phys = 0;*/ 597 cur_p->app0 = 0; 598 cur_p->app1 = 0; 599 cur_p->app2 = 0; 600 cur_p->app4 = 0; 601 cur_p->status = 0; 602 603 size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK; 604 packets++; 605 606 lp->tx_bd_ci = ++lp->tx_bd_ci % TX_BD_NUM; 607 cur_p = &lp->tx_bd_v[lp->tx_bd_ci]; 608 status = cur_p->status; 609 } 610 611 ndev->stats.tx_packets += packets; 612 ndev->stats.tx_bytes += size; 613 netif_wake_queue(ndev); 614 } 615 616 /** 617 * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy 618 * @lp: Pointer to the axienet_local structure 619 * @num_frag: The number of BDs to check for 620 * 621 * returns: 0, on success 622 * NETDEV_TX_BUSY, if any of the descriptors are not free 623 * 624 * This function is invoked before BDs are allocated and transmission starts. 625 * This function returns 0 if a BD or group of BDs can be allocated for 626 * transmission. If the BD or any of the BDs are not free the function 627 * returns a busy status. This is invoked from axienet_start_xmit. 628 */ 629 static inline int axienet_check_tx_bd_space(struct axienet_local *lp, 630 int num_frag) 631 { 632 struct axidma_bd *cur_p; 633 cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % TX_BD_NUM]; 634 if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK) 635 return NETDEV_TX_BUSY; 636 return 0; 637 } 638 639 /** 640 * axienet_start_xmit - Starts the transmission. 641 * @skb: sk_buff pointer that contains data to be Txed. 642 * @ndev: Pointer to net_device structure. 643 * 644 * returns: NETDEV_TX_OK, on success 645 * NETDEV_TX_BUSY, if any of the descriptors are not free 646 * 647 * This function is invoked from upper layers to initiate transmission. The 648 * function uses the next available free BDs and populates their fields to 649 * start the transmission. Additionally if checksum offloading is supported, 650 * it populates AXI Stream Control fields with appropriate values. 651 */ 652 static int axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) 653 { 654 u32 ii; 655 u32 num_frag; 656 u32 csum_start_off; 657 u32 csum_index_off; 658 skb_frag_t *frag; 659 dma_addr_t tail_p; 660 struct axienet_local *lp = netdev_priv(ndev); 661 struct axidma_bd *cur_p; 662 663 num_frag = skb_shinfo(skb)->nr_frags; 664 cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; 665 666 if (axienet_check_tx_bd_space(lp, num_frag)) { 667 if (!netif_queue_stopped(ndev)) 668 netif_stop_queue(ndev); 669 return NETDEV_TX_BUSY; 670 } 671 672 if (skb->ip_summed == CHECKSUM_PARTIAL) { 673 if (lp->features & XAE_FEATURE_FULL_TX_CSUM) { 674 /* Tx Full Checksum Offload Enabled */ 675 cur_p->app0 |= 2; 676 } else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) { 677 csum_start_off = skb_transport_offset(skb); 678 csum_index_off = csum_start_off + skb->csum_offset; 679 /* Tx Partial Checksum Offload Enabled */ 680 cur_p->app0 |= 1; 681 cur_p->app1 = (csum_start_off << 16) | csum_index_off; 682 } 683 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { 684 cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */ 685 } 686 687 cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK; 688 cur_p->phys = dma_map_single(ndev->dev.parent, skb->data, 689 skb_headlen(skb), DMA_TO_DEVICE); 690 691 for (ii = 0; ii < num_frag; ii++) { 692 lp->tx_bd_tail = ++lp->tx_bd_tail % TX_BD_NUM; 693 cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; 694 frag = &skb_shinfo(skb)->frags[ii]; 695 cur_p->phys = dma_map_single(ndev->dev.parent, 696 skb_frag_address(frag), 697 skb_frag_size(frag), 698 DMA_TO_DEVICE); 699 cur_p->cntrl = skb_frag_size(frag); 700 } 701 702 cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK; 703 cur_p->app4 = (unsigned long)skb; 704 705 tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail; 706 /* Start the transfer */ 707 axienet_dma_out32(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p); 708 lp->tx_bd_tail = ++lp->tx_bd_tail % TX_BD_NUM; 709 710 return NETDEV_TX_OK; 711 } 712 713 /** 714 * axienet_recv - Is called from Axi DMA Rx Isr to complete the received 715 * BD processing. 716 * @ndev: Pointer to net_device structure. 717 * 718 * This function is invoked from the Axi DMA Rx isr to process the Rx BDs. It 719 * does minimal processing and invokes "netif_rx" to complete further 720 * processing. 721 */ 722 static void axienet_recv(struct net_device *ndev) 723 { 724 u32 length; 725 u32 csumstatus; 726 u32 size = 0; 727 u32 packets = 0; 728 dma_addr_t tail_p; 729 struct axienet_local *lp = netdev_priv(ndev); 730 struct sk_buff *skb, *new_skb; 731 struct axidma_bd *cur_p; 732 733 tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci; 734 cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; 735 736 while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) { 737 skb = (struct sk_buff *) (cur_p->sw_id_offset); 738 length = cur_p->app4 & 0x0000FFFF; 739 740 dma_unmap_single(ndev->dev.parent, cur_p->phys, 741 lp->max_frm_size, 742 DMA_FROM_DEVICE); 743 744 skb_put(skb, length); 745 skb->protocol = eth_type_trans(skb, ndev); 746 /*skb_checksum_none_assert(skb);*/ 747 skb->ip_summed = CHECKSUM_NONE; 748 749 /* if we're doing Rx csum offload, set it up */ 750 if (lp->features & XAE_FEATURE_FULL_RX_CSUM) { 751 csumstatus = (cur_p->app2 & 752 XAE_FULL_CSUM_STATUS_MASK) >> 3; 753 if ((csumstatus == XAE_IP_TCP_CSUM_VALIDATED) || 754 (csumstatus == XAE_IP_UDP_CSUM_VALIDATED)) { 755 skb->ip_summed = CHECKSUM_UNNECESSARY; 756 } 757 } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 && 758 skb->protocol == __constant_htons(ETH_P_IP) && 759 skb->len > 64) { 760 skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF); 761 skb->ip_summed = CHECKSUM_COMPLETE; 762 } 763 764 netif_rx(skb); 765 766 size += length; 767 packets++; 768 769 new_skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size); 770 if (!new_skb) 771 return; 772 773 cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data, 774 lp->max_frm_size, 775 DMA_FROM_DEVICE); 776 cur_p->cntrl = lp->max_frm_size; 777 cur_p->status = 0; 778 cur_p->sw_id_offset = (u32) new_skb; 779 780 lp->rx_bd_ci = ++lp->rx_bd_ci % RX_BD_NUM; 781 cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; 782 } 783 784 ndev->stats.rx_packets += packets; 785 ndev->stats.rx_bytes += size; 786 787 axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p); 788 } 789 790 /** 791 * axienet_tx_irq - Tx Done Isr. 792 * @irq: irq number 793 * @_ndev: net_device pointer 794 * 795 * returns: IRQ_HANDLED for all cases. 796 * 797 * This is the Axi DMA Tx done Isr. It invokes "axienet_start_xmit_done" 798 * to complete the BD processing. 799 */ 800 static irqreturn_t axienet_tx_irq(int irq, void *_ndev) 801 { 802 u32 cr; 803 unsigned int status; 804 struct net_device *ndev = _ndev; 805 struct axienet_local *lp = netdev_priv(ndev); 806 807 status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 808 if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) { 809 axienet_start_xmit_done(lp->ndev); 810 goto out; 811 } 812 if (!(status & XAXIDMA_IRQ_ALL_MASK)) 813 dev_err(&ndev->dev, "No interrupts asserted in Tx path"); 814 if (status & XAXIDMA_IRQ_ERROR_MASK) { 815 dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status); 816 dev_err(&ndev->dev, "Current BD is at: 0x%x\n", 817 (lp->tx_bd_v[lp->tx_bd_ci]).phys); 818 819 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 820 /* Disable coalesce, delay timer and error interrupts */ 821 cr &= (~XAXIDMA_IRQ_ALL_MASK); 822 /* Write to the Tx channel control register */ 823 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 824 825 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 826 /* Disable coalesce, delay timer and error interrupts */ 827 cr &= (~XAXIDMA_IRQ_ALL_MASK); 828 /* Write to the Rx channel control register */ 829 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 830 831 tasklet_schedule(&lp->dma_err_tasklet); 832 } 833 out: 834 axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status); 835 return IRQ_HANDLED; 836 } 837 838 /** 839 * axienet_rx_irq - Rx Isr. 840 * @irq: irq number 841 * @_ndev: net_device pointer 842 * 843 * returns: IRQ_HANDLED for all cases. 844 * 845 * This is the Axi DMA Rx Isr. It invokes "axienet_recv" to complete the BD 846 * processing. 847 */ 848 static irqreturn_t axienet_rx_irq(int irq, void *_ndev) 849 { 850 u32 cr; 851 unsigned int status; 852 struct net_device *ndev = _ndev; 853 struct axienet_local *lp = netdev_priv(ndev); 854 855 status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 856 if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) { 857 axienet_recv(lp->ndev); 858 goto out; 859 } 860 if (!(status & XAXIDMA_IRQ_ALL_MASK)) 861 dev_err(&ndev->dev, "No interrupts asserted in Rx path"); 862 if (status & XAXIDMA_IRQ_ERROR_MASK) { 863 dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status); 864 dev_err(&ndev->dev, "Current BD is at: 0x%x\n", 865 (lp->rx_bd_v[lp->rx_bd_ci]).phys); 866 867 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 868 /* Disable coalesce, delay timer and error interrupts */ 869 cr &= (~XAXIDMA_IRQ_ALL_MASK); 870 /* Finally write to the Tx channel control register */ 871 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 872 873 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 874 /* Disable coalesce, delay timer and error interrupts */ 875 cr &= (~XAXIDMA_IRQ_ALL_MASK); 876 /* write to the Rx channel control register */ 877 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 878 879 tasklet_schedule(&lp->dma_err_tasklet); 880 } 881 out: 882 axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status); 883 return IRQ_HANDLED; 884 } 885 886 static void axienet_dma_err_handler(unsigned long data); 887 888 /** 889 * axienet_open - Driver open routine. 890 * @ndev: Pointer to net_device structure 891 * 892 * returns: 0, on success. 893 * -ENODEV, if PHY cannot be connected to 894 * non-zero error value on failure 895 * 896 * This is the driver open routine. It calls phy_start to start the PHY device. 897 * It also allocates interrupt service routines, enables the interrupt lines 898 * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer 899 * descriptors are initialized. 900 */ 901 static int axienet_open(struct net_device *ndev) 902 { 903 int ret, mdio_mcreg; 904 struct axienet_local *lp = netdev_priv(ndev); 905 906 dev_dbg(&ndev->dev, "axienet_open()\n"); 907 908 mdio_mcreg = axienet_ior(lp, XAE_MDIO_MC_OFFSET); 909 ret = axienet_mdio_wait_until_ready(lp); 910 if (ret < 0) 911 return ret; 912 /* Disable the MDIO interface till Axi Ethernet Reset is completed. 913 * When we do an Axi Ethernet reset, it resets the complete core 914 * including the MDIO. If MDIO is not disabled when the reset 915 * process is started, MDIO will be broken afterwards. */ 916 axienet_iow(lp, XAE_MDIO_MC_OFFSET, 917 (mdio_mcreg & (~XAE_MDIO_MC_MDIOEN_MASK))); 918 axienet_device_reset(ndev); 919 /* Enable the MDIO */ 920 axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg); 921 ret = axienet_mdio_wait_until_ready(lp); 922 if (ret < 0) 923 return ret; 924 925 if (lp->phy_node) { 926 lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node, 927 axienet_adjust_link, 0, 928 PHY_INTERFACE_MODE_GMII); 929 if (!lp->phy_dev) { 930 dev_err(lp->dev, "of_phy_connect() failed\n"); 931 return -ENODEV; 932 } 933 phy_start(lp->phy_dev); 934 } 935 936 /* Enable tasklets for Axi DMA error handling */ 937 tasklet_init(&lp->dma_err_tasklet, axienet_dma_err_handler, 938 (unsigned long) lp); 939 940 /* Enable interrupts for Axi DMA Tx */ 941 ret = request_irq(lp->tx_irq, axienet_tx_irq, 0, ndev->name, ndev); 942 if (ret) 943 goto err_tx_irq; 944 /* Enable interrupts for Axi DMA Rx */ 945 ret = request_irq(lp->rx_irq, axienet_rx_irq, 0, ndev->name, ndev); 946 if (ret) 947 goto err_rx_irq; 948 949 return 0; 950 951 err_rx_irq: 952 free_irq(lp->tx_irq, ndev); 953 err_tx_irq: 954 if (lp->phy_dev) 955 phy_disconnect(lp->phy_dev); 956 lp->phy_dev = NULL; 957 tasklet_kill(&lp->dma_err_tasklet); 958 dev_err(lp->dev, "request_irq() failed\n"); 959 return ret; 960 } 961 962 /** 963 * axienet_stop - Driver stop routine. 964 * @ndev: Pointer to net_device structure 965 * 966 * returns: 0, on success. 967 * 968 * This is the driver stop routine. It calls phy_disconnect to stop the PHY 969 * device. It also removes the interrupt handlers and disables the interrupts. 970 * The Axi DMA Tx/Rx BDs are released. 971 */ 972 static int axienet_stop(struct net_device *ndev) 973 { 974 u32 cr; 975 struct axienet_local *lp = netdev_priv(ndev); 976 977 dev_dbg(&ndev->dev, "axienet_close()\n"); 978 979 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 980 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, 981 cr & (~XAXIDMA_CR_RUNSTOP_MASK)); 982 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 983 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, 984 cr & (~XAXIDMA_CR_RUNSTOP_MASK)); 985 axienet_setoptions(ndev, lp->options & 986 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 987 988 tasklet_kill(&lp->dma_err_tasklet); 989 990 free_irq(lp->tx_irq, ndev); 991 free_irq(lp->rx_irq, ndev); 992 993 if (lp->phy_dev) 994 phy_disconnect(lp->phy_dev); 995 lp->phy_dev = NULL; 996 997 axienet_dma_bd_release(ndev); 998 return 0; 999 } 1000 1001 /** 1002 * axienet_change_mtu - Driver change mtu routine. 1003 * @ndev: Pointer to net_device structure 1004 * @new_mtu: New mtu value to be applied 1005 * 1006 * returns: Always returns 0 (success). 1007 * 1008 * This is the change mtu driver routine. It checks if the Axi Ethernet 1009 * hardware supports jumbo frames before changing the mtu. This can be 1010 * called only when the device is not up. 1011 */ 1012 static int axienet_change_mtu(struct net_device *ndev, int new_mtu) 1013 { 1014 struct axienet_local *lp = netdev_priv(ndev); 1015 1016 if (netif_running(ndev)) 1017 return -EBUSY; 1018 if (lp->jumbo_support) { 1019 if ((new_mtu > XAE_JUMBO_MTU) || (new_mtu < 64)) 1020 return -EINVAL; 1021 ndev->mtu = new_mtu; 1022 } else { 1023 if ((new_mtu > XAE_MTU) || (new_mtu < 64)) 1024 return -EINVAL; 1025 ndev->mtu = new_mtu; 1026 } 1027 1028 return 0; 1029 } 1030 1031 #ifdef CONFIG_NET_POLL_CONTROLLER 1032 /** 1033 * axienet_poll_controller - Axi Ethernet poll mechanism. 1034 * @ndev: Pointer to net_device structure 1035 * 1036 * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior 1037 * to polling the ISRs and are enabled back after the polling is done. 1038 */ 1039 static void axienet_poll_controller(struct net_device *ndev) 1040 { 1041 struct axienet_local *lp = netdev_priv(ndev); 1042 disable_irq(lp->tx_irq); 1043 disable_irq(lp->rx_irq); 1044 axienet_rx_irq(lp->tx_irq, ndev); 1045 axienet_tx_irq(lp->rx_irq, ndev); 1046 enable_irq(lp->tx_irq); 1047 enable_irq(lp->rx_irq); 1048 } 1049 #endif 1050 1051 static const struct net_device_ops axienet_netdev_ops = { 1052 .ndo_open = axienet_open, 1053 .ndo_stop = axienet_stop, 1054 .ndo_start_xmit = axienet_start_xmit, 1055 .ndo_change_mtu = axienet_change_mtu, 1056 .ndo_set_mac_address = netdev_set_mac_address, 1057 .ndo_validate_addr = eth_validate_addr, 1058 .ndo_set_rx_mode = axienet_set_multicast_list, 1059 #ifdef CONFIG_NET_POLL_CONTROLLER 1060 .ndo_poll_controller = axienet_poll_controller, 1061 #endif 1062 }; 1063 1064 /** 1065 * axienet_ethtools_get_settings - Get Axi Ethernet settings related to PHY. 1066 * @ndev: Pointer to net_device structure 1067 * @ecmd: Pointer to ethtool_cmd structure 1068 * 1069 * This implements ethtool command for getting PHY settings. If PHY could 1070 * not be found, the function returns -ENODEV. This function calls the 1071 * relevant PHY ethtool API to get the PHY settings. 1072 * Issue "ethtool ethX" under linux prompt to execute this function. 1073 */ 1074 static int axienet_ethtools_get_settings(struct net_device *ndev, 1075 struct ethtool_cmd *ecmd) 1076 { 1077 struct axienet_local *lp = netdev_priv(ndev); 1078 struct phy_device *phydev = lp->phy_dev; 1079 if (!phydev) 1080 return -ENODEV; 1081 return phy_ethtool_gset(phydev, ecmd); 1082 } 1083 1084 /** 1085 * axienet_ethtools_set_settings - Set PHY settings as passed in the argument. 1086 * @ndev: Pointer to net_device structure 1087 * @ecmd: Pointer to ethtool_cmd structure 1088 * 1089 * This implements ethtool command for setting various PHY settings. If PHY 1090 * could not be found, the function returns -ENODEV. This function calls the 1091 * relevant PHY ethtool API to set the PHY. 1092 * Issue e.g. "ethtool -s ethX speed 1000" under linux prompt to execute this 1093 * function. 1094 */ 1095 static int axienet_ethtools_set_settings(struct net_device *ndev, 1096 struct ethtool_cmd *ecmd) 1097 { 1098 struct axienet_local *lp = netdev_priv(ndev); 1099 struct phy_device *phydev = lp->phy_dev; 1100 if (!phydev) 1101 return -ENODEV; 1102 return phy_ethtool_sset(phydev, ecmd); 1103 } 1104 1105 /** 1106 * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information. 1107 * @ndev: Pointer to net_device structure 1108 * @ed: Pointer to ethtool_drvinfo structure 1109 * 1110 * This implements ethtool command for getting the driver information. 1111 * Issue "ethtool -i ethX" under linux prompt to execute this function. 1112 */ 1113 static void axienet_ethtools_get_drvinfo(struct net_device *ndev, 1114 struct ethtool_drvinfo *ed) 1115 { 1116 strlcpy(ed->driver, DRIVER_NAME, sizeof(ed->driver)); 1117 strlcpy(ed->version, DRIVER_VERSION, sizeof(ed->version)); 1118 ed->regdump_len = sizeof(u32) * AXIENET_REGS_N; 1119 } 1120 1121 /** 1122 * axienet_ethtools_get_regs_len - Get the total regs length present in the 1123 * AxiEthernet core. 1124 * @ndev: Pointer to net_device structure 1125 * 1126 * This implements ethtool command for getting the total register length 1127 * information. 1128 */ 1129 static int axienet_ethtools_get_regs_len(struct net_device *ndev) 1130 { 1131 return sizeof(u32) * AXIENET_REGS_N; 1132 } 1133 1134 /** 1135 * axienet_ethtools_get_regs - Dump the contents of all registers present 1136 * in AxiEthernet core. 1137 * @ndev: Pointer to net_device structure 1138 * @regs: Pointer to ethtool_regs structure 1139 * @ret: Void pointer used to return the contents of the registers. 1140 * 1141 * This implements ethtool command for getting the Axi Ethernet register dump. 1142 * Issue "ethtool -d ethX" to execute this function. 1143 */ 1144 static void axienet_ethtools_get_regs(struct net_device *ndev, 1145 struct ethtool_regs *regs, void *ret) 1146 { 1147 u32 *data = (u32 *) ret; 1148 size_t len = sizeof(u32) * AXIENET_REGS_N; 1149 struct axienet_local *lp = netdev_priv(ndev); 1150 1151 regs->version = 0; 1152 regs->len = len; 1153 1154 memset(data, 0, len); 1155 data[0] = axienet_ior(lp, XAE_RAF_OFFSET); 1156 data[1] = axienet_ior(lp, XAE_TPF_OFFSET); 1157 data[2] = axienet_ior(lp, XAE_IFGP_OFFSET); 1158 data[3] = axienet_ior(lp, XAE_IS_OFFSET); 1159 data[4] = axienet_ior(lp, XAE_IP_OFFSET); 1160 data[5] = axienet_ior(lp, XAE_IE_OFFSET); 1161 data[6] = axienet_ior(lp, XAE_TTAG_OFFSET); 1162 data[7] = axienet_ior(lp, XAE_RTAG_OFFSET); 1163 data[8] = axienet_ior(lp, XAE_UAWL_OFFSET); 1164 data[9] = axienet_ior(lp, XAE_UAWU_OFFSET); 1165 data[10] = axienet_ior(lp, XAE_TPID0_OFFSET); 1166 data[11] = axienet_ior(lp, XAE_TPID1_OFFSET); 1167 data[12] = axienet_ior(lp, XAE_PPST_OFFSET); 1168 data[13] = axienet_ior(lp, XAE_RCW0_OFFSET); 1169 data[14] = axienet_ior(lp, XAE_RCW1_OFFSET); 1170 data[15] = axienet_ior(lp, XAE_TC_OFFSET); 1171 data[16] = axienet_ior(lp, XAE_FCC_OFFSET); 1172 data[17] = axienet_ior(lp, XAE_EMMC_OFFSET); 1173 data[18] = axienet_ior(lp, XAE_PHYC_OFFSET); 1174 data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET); 1175 data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET); 1176 data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET); 1177 data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET); 1178 data[23] = axienet_ior(lp, XAE_MDIO_MIS_OFFSET); 1179 data[24] = axienet_ior(lp, XAE_MDIO_MIP_OFFSET); 1180 data[25] = axienet_ior(lp, XAE_MDIO_MIE_OFFSET); 1181 data[26] = axienet_ior(lp, XAE_MDIO_MIC_OFFSET); 1182 data[27] = axienet_ior(lp, XAE_UAW0_OFFSET); 1183 data[28] = axienet_ior(lp, XAE_UAW1_OFFSET); 1184 data[29] = axienet_ior(lp, XAE_FMI_OFFSET); 1185 data[30] = axienet_ior(lp, XAE_AF0_OFFSET); 1186 data[31] = axienet_ior(lp, XAE_AF1_OFFSET); 1187 } 1188 1189 /** 1190 * axienet_ethtools_get_pauseparam - Get the pause parameter setting for 1191 * Tx and Rx paths. 1192 * @ndev: Pointer to net_device structure 1193 * @epauseparm: Pointer to ethtool_pauseparam structure. 1194 * 1195 * This implements ethtool command for getting axi ethernet pause frame 1196 * setting. Issue "ethtool -a ethX" to execute this function. 1197 */ 1198 static void 1199 axienet_ethtools_get_pauseparam(struct net_device *ndev, 1200 struct ethtool_pauseparam *epauseparm) 1201 { 1202 u32 regval; 1203 struct axienet_local *lp = netdev_priv(ndev); 1204 epauseparm->autoneg = 0; 1205 regval = axienet_ior(lp, XAE_FCC_OFFSET); 1206 epauseparm->tx_pause = regval & XAE_FCC_FCTX_MASK; 1207 epauseparm->rx_pause = regval & XAE_FCC_FCRX_MASK; 1208 } 1209 1210 /** 1211 * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control) 1212 * settings. 1213 * @ndev: Pointer to net_device structure 1214 * @epauseparam:Pointer to ethtool_pauseparam structure 1215 * 1216 * This implements ethtool command for enabling flow control on Rx and Tx 1217 * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this 1218 * function. 1219 */ 1220 static int 1221 axienet_ethtools_set_pauseparam(struct net_device *ndev, 1222 struct ethtool_pauseparam *epauseparm) 1223 { 1224 u32 regval = 0; 1225 struct axienet_local *lp = netdev_priv(ndev); 1226 1227 if (netif_running(ndev)) { 1228 printk(KERN_ERR "%s: Please stop netif before applying " 1229 "configruation\n", ndev->name); 1230 return -EFAULT; 1231 } 1232 1233 regval = axienet_ior(lp, XAE_FCC_OFFSET); 1234 if (epauseparm->tx_pause) 1235 regval |= XAE_FCC_FCTX_MASK; 1236 else 1237 regval &= ~XAE_FCC_FCTX_MASK; 1238 if (epauseparm->rx_pause) 1239 regval |= XAE_FCC_FCRX_MASK; 1240 else 1241 regval &= ~XAE_FCC_FCRX_MASK; 1242 axienet_iow(lp, XAE_FCC_OFFSET, regval); 1243 1244 return 0; 1245 } 1246 1247 /** 1248 * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count. 1249 * @ndev: Pointer to net_device structure 1250 * @ecoalesce: Pointer to ethtool_coalesce structure 1251 * 1252 * This implements ethtool command for getting the DMA interrupt coalescing 1253 * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to 1254 * execute this function. 1255 */ 1256 static int axienet_ethtools_get_coalesce(struct net_device *ndev, 1257 struct ethtool_coalesce *ecoalesce) 1258 { 1259 u32 regval = 0; 1260 struct axienet_local *lp = netdev_priv(ndev); 1261 regval = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 1262 ecoalesce->rx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK) 1263 >> XAXIDMA_COALESCE_SHIFT; 1264 regval = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 1265 ecoalesce->tx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK) 1266 >> XAXIDMA_COALESCE_SHIFT; 1267 return 0; 1268 } 1269 1270 /** 1271 * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count. 1272 * @ndev: Pointer to net_device structure 1273 * @ecoalesce: Pointer to ethtool_coalesce structure 1274 * 1275 * This implements ethtool command for setting the DMA interrupt coalescing 1276 * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux 1277 * prompt to execute this function. 1278 */ 1279 static int axienet_ethtools_set_coalesce(struct net_device *ndev, 1280 struct ethtool_coalesce *ecoalesce) 1281 { 1282 struct axienet_local *lp = netdev_priv(ndev); 1283 1284 if (netif_running(ndev)) { 1285 printk(KERN_ERR "%s: Please stop netif before applying " 1286 "configruation\n", ndev->name); 1287 return -EFAULT; 1288 } 1289 1290 if ((ecoalesce->rx_coalesce_usecs) || 1291 (ecoalesce->rx_coalesce_usecs_irq) || 1292 (ecoalesce->rx_max_coalesced_frames_irq) || 1293 (ecoalesce->tx_coalesce_usecs) || 1294 (ecoalesce->tx_coalesce_usecs_irq) || 1295 (ecoalesce->tx_max_coalesced_frames_irq) || 1296 (ecoalesce->stats_block_coalesce_usecs) || 1297 (ecoalesce->use_adaptive_rx_coalesce) || 1298 (ecoalesce->use_adaptive_tx_coalesce) || 1299 (ecoalesce->pkt_rate_low) || 1300 (ecoalesce->rx_coalesce_usecs_low) || 1301 (ecoalesce->rx_max_coalesced_frames_low) || 1302 (ecoalesce->tx_coalesce_usecs_low) || 1303 (ecoalesce->tx_max_coalesced_frames_low) || 1304 (ecoalesce->pkt_rate_high) || 1305 (ecoalesce->rx_coalesce_usecs_high) || 1306 (ecoalesce->rx_max_coalesced_frames_high) || 1307 (ecoalesce->tx_coalesce_usecs_high) || 1308 (ecoalesce->tx_max_coalesced_frames_high) || 1309 (ecoalesce->rate_sample_interval)) 1310 return -EOPNOTSUPP; 1311 if (ecoalesce->rx_max_coalesced_frames) 1312 lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames; 1313 if (ecoalesce->tx_max_coalesced_frames) 1314 lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames; 1315 1316 return 0; 1317 } 1318 1319 static struct ethtool_ops axienet_ethtool_ops = { 1320 .get_settings = axienet_ethtools_get_settings, 1321 .set_settings = axienet_ethtools_set_settings, 1322 .get_drvinfo = axienet_ethtools_get_drvinfo, 1323 .get_regs_len = axienet_ethtools_get_regs_len, 1324 .get_regs = axienet_ethtools_get_regs, 1325 .get_link = ethtool_op_get_link, 1326 .get_pauseparam = axienet_ethtools_get_pauseparam, 1327 .set_pauseparam = axienet_ethtools_set_pauseparam, 1328 .get_coalesce = axienet_ethtools_get_coalesce, 1329 .set_coalesce = axienet_ethtools_set_coalesce, 1330 }; 1331 1332 /** 1333 * axienet_dma_err_handler - Tasklet handler for Axi DMA Error 1334 * @data: Data passed 1335 * 1336 * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the 1337 * Tx/Rx BDs. 1338 */ 1339 static void axienet_dma_err_handler(unsigned long data) 1340 { 1341 u32 axienet_status; 1342 u32 cr, i; 1343 int mdio_mcreg; 1344 struct axienet_local *lp = (struct axienet_local *) data; 1345 struct net_device *ndev = lp->ndev; 1346 struct axidma_bd *cur_p; 1347 1348 axienet_setoptions(ndev, lp->options & 1349 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 1350 mdio_mcreg = axienet_ior(lp, XAE_MDIO_MC_OFFSET); 1351 axienet_mdio_wait_until_ready(lp); 1352 /* Disable the MDIO interface till Axi Ethernet Reset is completed. 1353 * When we do an Axi Ethernet reset, it resets the complete core 1354 * including the MDIO. So if MDIO is not disabled when the reset 1355 * process is started, MDIO will be broken afterwards. */ 1356 axienet_iow(lp, XAE_MDIO_MC_OFFSET, (mdio_mcreg & 1357 ~XAE_MDIO_MC_MDIOEN_MASK)); 1358 1359 __axienet_device_reset(lp, &ndev->dev, XAXIDMA_TX_CR_OFFSET); 1360 __axienet_device_reset(lp, &ndev->dev, XAXIDMA_RX_CR_OFFSET); 1361 1362 axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg); 1363 axienet_mdio_wait_until_ready(lp); 1364 1365 for (i = 0; i < TX_BD_NUM; i++) { 1366 cur_p = &lp->tx_bd_v[i]; 1367 if (cur_p->phys) 1368 dma_unmap_single(ndev->dev.parent, cur_p->phys, 1369 (cur_p->cntrl & 1370 XAXIDMA_BD_CTRL_LENGTH_MASK), 1371 DMA_TO_DEVICE); 1372 if (cur_p->app4) 1373 dev_kfree_skb_irq((struct sk_buff *) cur_p->app4); 1374 cur_p->phys = 0; 1375 cur_p->cntrl = 0; 1376 cur_p->status = 0; 1377 cur_p->app0 = 0; 1378 cur_p->app1 = 0; 1379 cur_p->app2 = 0; 1380 cur_p->app3 = 0; 1381 cur_p->app4 = 0; 1382 cur_p->sw_id_offset = 0; 1383 } 1384 1385 for (i = 0; i < RX_BD_NUM; i++) { 1386 cur_p = &lp->rx_bd_v[i]; 1387 cur_p->status = 0; 1388 cur_p->app0 = 0; 1389 cur_p->app1 = 0; 1390 cur_p->app2 = 0; 1391 cur_p->app3 = 0; 1392 cur_p->app4 = 0; 1393 } 1394 1395 lp->tx_bd_ci = 0; 1396 lp->tx_bd_tail = 0; 1397 lp->rx_bd_ci = 0; 1398 1399 /* Start updating the Rx channel control register */ 1400 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 1401 /* Update the interrupt coalesce count */ 1402 cr = ((cr & ~XAXIDMA_COALESCE_MASK) | 1403 (XAXIDMA_DFT_RX_THRESHOLD << XAXIDMA_COALESCE_SHIFT)); 1404 /* Update the delay timer count */ 1405 cr = ((cr & ~XAXIDMA_DELAY_MASK) | 1406 (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); 1407 /* Enable coalesce, delay timer and error interrupts */ 1408 cr |= XAXIDMA_IRQ_ALL_MASK; 1409 /* Finally write to the Rx channel control register */ 1410 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 1411 1412 /* Start updating the Tx channel control register */ 1413 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 1414 /* Update the interrupt coalesce count */ 1415 cr = (((cr & ~XAXIDMA_COALESCE_MASK)) | 1416 (XAXIDMA_DFT_TX_THRESHOLD << XAXIDMA_COALESCE_SHIFT)); 1417 /* Update the delay timer count */ 1418 cr = (((cr & ~XAXIDMA_DELAY_MASK)) | 1419 (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); 1420 /* Enable coalesce, delay timer and error interrupts */ 1421 cr |= XAXIDMA_IRQ_ALL_MASK; 1422 /* Finally write to the Tx channel control register */ 1423 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 1424 1425 /* Populate the tail pointer and bring the Rx Axi DMA engine out of 1426 * halted state. This will make the Rx side ready for reception.*/ 1427 axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); 1428 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 1429 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, 1430 cr | XAXIDMA_CR_RUNSTOP_MASK); 1431 axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + 1432 (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1))); 1433 1434 /* Write to the RS (Run-stop) bit in the Tx channel control register. 1435 * Tx channel is now ready to run. But only after we write to the 1436 * tail pointer register that the Tx channel will start transmitting */ 1437 axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); 1438 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 1439 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, 1440 cr | XAXIDMA_CR_RUNSTOP_MASK); 1441 1442 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); 1443 axienet_status &= ~XAE_RCW1_RX_MASK; 1444 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status); 1445 1446 axienet_status = axienet_ior(lp, XAE_IP_OFFSET); 1447 if (axienet_status & XAE_INT_RXRJECT_MASK) 1448 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK); 1449 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK); 1450 1451 /* Sync default options with HW but leave receiver and 1452 * transmitter disabled.*/ 1453 axienet_setoptions(ndev, lp->options & 1454 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 1455 axienet_set_mac_address(ndev, NULL); 1456 axienet_set_multicast_list(ndev); 1457 axienet_setoptions(ndev, lp->options); 1458 } 1459 1460 /** 1461 * axienet_of_probe - Axi Ethernet probe function. 1462 * @op: Pointer to platform device structure. 1463 * @match: Pointer to device id structure 1464 * 1465 * returns: 0, on success 1466 * Non-zero error value on failure. 1467 * 1468 * This is the probe routine for Axi Ethernet driver. This is called before 1469 * any other driver routines are invoked. It allocates and sets up the Ethernet 1470 * device. Parses through device tree and populates fields of 1471 * axienet_local. It registers the Ethernet device. 1472 */ 1473 static int axienet_of_probe(struct platform_device *op) 1474 { 1475 __be32 *p; 1476 int size, ret = 0; 1477 struct device_node *np; 1478 struct axienet_local *lp; 1479 struct net_device *ndev; 1480 const void *addr; 1481 1482 ndev = alloc_etherdev(sizeof(*lp)); 1483 if (!ndev) 1484 return -ENOMEM; 1485 1486 ether_setup(ndev); 1487 platform_set_drvdata(op, ndev); 1488 1489 SET_NETDEV_DEV(ndev, &op->dev); 1490 ndev->flags &= ~IFF_MULTICAST; /* clear multicast */ 1491 ndev->features = NETIF_F_SG | NETIF_F_FRAGLIST; 1492 ndev->netdev_ops = &axienet_netdev_ops; 1493 ndev->ethtool_ops = &axienet_ethtool_ops; 1494 1495 lp = netdev_priv(ndev); 1496 lp->ndev = ndev; 1497 lp->dev = &op->dev; 1498 lp->options = XAE_OPTION_DEFAULTS; 1499 /* Map device registers */ 1500 lp->regs = of_iomap(op->dev.of_node, 0); 1501 if (!lp->regs) { 1502 dev_err(&op->dev, "could not map Axi Ethernet regs.\n"); 1503 goto nodev; 1504 } 1505 /* Setup checksum offload, but default to off if not specified */ 1506 lp->features = 0; 1507 1508 p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,txcsum", NULL); 1509 if (p) { 1510 switch (be32_to_cpup(p)) { 1511 case 1: 1512 lp->csum_offload_on_tx_path = 1513 XAE_FEATURE_PARTIAL_TX_CSUM; 1514 lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM; 1515 /* Can checksum TCP/UDP over IPv4. */ 1516 ndev->features |= NETIF_F_IP_CSUM; 1517 break; 1518 case 2: 1519 lp->csum_offload_on_tx_path = 1520 XAE_FEATURE_FULL_TX_CSUM; 1521 lp->features |= XAE_FEATURE_FULL_TX_CSUM; 1522 /* Can checksum TCP/UDP over IPv4. */ 1523 ndev->features |= NETIF_F_IP_CSUM; 1524 break; 1525 default: 1526 lp->csum_offload_on_tx_path = XAE_NO_CSUM_OFFLOAD; 1527 } 1528 } 1529 p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,rxcsum", NULL); 1530 if (p) { 1531 switch (be32_to_cpup(p)) { 1532 case 1: 1533 lp->csum_offload_on_rx_path = 1534 XAE_FEATURE_PARTIAL_RX_CSUM; 1535 lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM; 1536 break; 1537 case 2: 1538 lp->csum_offload_on_rx_path = 1539 XAE_FEATURE_FULL_RX_CSUM; 1540 lp->features |= XAE_FEATURE_FULL_RX_CSUM; 1541 break; 1542 default: 1543 lp->csum_offload_on_rx_path = XAE_NO_CSUM_OFFLOAD; 1544 } 1545 } 1546 /* For supporting jumbo frames, the Axi Ethernet hardware must have 1547 * a larger Rx/Tx Memory. Typically, the size must be more than or 1548 * equal to 16384 bytes, so that we can enable jumbo option and start 1549 * supporting jumbo frames. Here we check for memory allocated for 1550 * Rx/Tx in the hardware from the device-tree and accordingly set 1551 * flags. */ 1552 p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,rxmem", NULL); 1553 if (p) { 1554 if ((be32_to_cpup(p)) >= 0x4000) 1555 lp->jumbo_support = 1; 1556 } 1557 p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,temac-type", 1558 NULL); 1559 if (p) 1560 lp->temac_type = be32_to_cpup(p); 1561 p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,phy-type", NULL); 1562 if (p) 1563 lp->phy_type = be32_to_cpup(p); 1564 1565 /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */ 1566 np = of_parse_phandle(op->dev.of_node, "axistream-connected", 0); 1567 if (!np) { 1568 dev_err(&op->dev, "could not find DMA node\n"); 1569 goto err_iounmap; 1570 } 1571 lp->dma_regs = of_iomap(np, 0); 1572 if (lp->dma_regs) { 1573 dev_dbg(&op->dev, "MEM base: %p\n", lp->dma_regs); 1574 } else { 1575 dev_err(&op->dev, "unable to map DMA registers\n"); 1576 of_node_put(np); 1577 } 1578 lp->rx_irq = irq_of_parse_and_map(np, 1); 1579 lp->tx_irq = irq_of_parse_and_map(np, 0); 1580 of_node_put(np); 1581 if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) { 1582 dev_err(&op->dev, "could not determine irqs\n"); 1583 ret = -ENOMEM; 1584 goto err_iounmap_2; 1585 } 1586 1587 /* Retrieve the MAC address */ 1588 addr = of_get_property(op->dev.of_node, "local-mac-address", &size); 1589 if ((!addr) || (size != 6)) { 1590 dev_err(&op->dev, "could not find MAC address\n"); 1591 ret = -ENODEV; 1592 goto err_iounmap_2; 1593 } 1594 axienet_set_mac_address(ndev, (void *) addr); 1595 1596 lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD; 1597 lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD; 1598 1599 lp->phy_node = of_parse_phandle(op->dev.of_node, "phy-handle", 0); 1600 ret = axienet_mdio_setup(lp, op->dev.of_node); 1601 if (ret) 1602 dev_warn(&op->dev, "error registering MDIO bus\n"); 1603 1604 ret = register_netdev(lp->ndev); 1605 if (ret) { 1606 dev_err(lp->dev, "register_netdev() error (%i)\n", ret); 1607 goto err_iounmap_2; 1608 } 1609 1610 return 0; 1611 1612 err_iounmap_2: 1613 if (lp->dma_regs) 1614 iounmap(lp->dma_regs); 1615 err_iounmap: 1616 iounmap(lp->regs); 1617 nodev: 1618 free_netdev(ndev); 1619 ndev = NULL; 1620 return ret; 1621 } 1622 1623 static int axienet_of_remove(struct platform_device *op) 1624 { 1625 struct net_device *ndev = platform_get_drvdata(op); 1626 struct axienet_local *lp = netdev_priv(ndev); 1627 1628 axienet_mdio_teardown(lp); 1629 unregister_netdev(ndev); 1630 1631 if (lp->phy_node) 1632 of_node_put(lp->phy_node); 1633 lp->phy_node = NULL; 1634 1635 iounmap(lp->regs); 1636 if (lp->dma_regs) 1637 iounmap(lp->dma_regs); 1638 free_netdev(ndev); 1639 1640 return 0; 1641 } 1642 1643 static struct platform_driver axienet_of_driver = { 1644 .probe = axienet_of_probe, 1645 .remove = axienet_of_remove, 1646 .driver = { 1647 .owner = THIS_MODULE, 1648 .name = "xilinx_axienet", 1649 .of_match_table = axienet_of_match, 1650 }, 1651 }; 1652 1653 module_platform_driver(axienet_of_driver); 1654 1655 MODULE_DESCRIPTION("Xilinx Axi Ethernet driver"); 1656 MODULE_AUTHOR("Xilinx"); 1657 MODULE_LICENSE("GPL"); 1658