1 /* 2 * Xilinx Axi Ethernet device driver 3 * 4 * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi 5 * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net> 6 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd. 7 * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu> 8 * Copyright (c) 2010 - 2011 PetaLogix 9 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved. 10 * 11 * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6 12 * and Spartan6. 13 * 14 * TODO: 15 * - Add Axi Fifo support. 16 * - Factor out Axi DMA code into separate driver. 17 * - Test and fix basic multicast filtering. 18 * - Add support for extended multicast filtering. 19 * - Test basic VLAN support. 20 * - Add support for extended VLAN support. 21 */ 22 23 #include <linux/delay.h> 24 #include <linux/etherdevice.h> 25 #include <linux/init.h> 26 #include <linux/module.h> 27 #include <linux/netdevice.h> 28 #include <linux/of_mdio.h> 29 #include <linux/of_platform.h> 30 #include <linux/of_address.h> 31 #include <linux/skbuff.h> 32 #include <linux/spinlock.h> 33 #include <linux/phy.h> 34 #include <linux/mii.h> 35 #include <linux/ethtool.h> 36 37 #include "xilinx_axienet.h" 38 39 /* Descriptors defines for Tx and Rx DMA - 2^n for the best performance */ 40 #define TX_BD_NUM 64 41 #define RX_BD_NUM 128 42 43 /* Must be shorter than length of ethtool_drvinfo.driver field to fit */ 44 #define DRIVER_NAME "xaxienet" 45 #define DRIVER_DESCRIPTION "Xilinx Axi Ethernet driver" 46 #define DRIVER_VERSION "1.00a" 47 48 #define AXIENET_REGS_N 32 49 50 /* Match table for of_platform binding */ 51 static struct of_device_id axienet_of_match[] = { 52 { .compatible = "xlnx,axi-ethernet-1.00.a", }, 53 { .compatible = "xlnx,axi-ethernet-1.01.a", }, 54 { .compatible = "xlnx,axi-ethernet-2.01.a", }, 55 {}, 56 }; 57 58 MODULE_DEVICE_TABLE(of, axienet_of_match); 59 60 /* Option table for setting up Axi Ethernet hardware options */ 61 static struct axienet_option axienet_options[] = { 62 /* Turn on jumbo packet support for both Rx and Tx */ 63 { 64 .opt = XAE_OPTION_JUMBO, 65 .reg = XAE_TC_OFFSET, 66 .m_or = XAE_TC_JUM_MASK, 67 }, { 68 .opt = XAE_OPTION_JUMBO, 69 .reg = XAE_RCW1_OFFSET, 70 .m_or = XAE_RCW1_JUM_MASK, 71 }, { /* Turn on VLAN packet support for both Rx and Tx */ 72 .opt = XAE_OPTION_VLAN, 73 .reg = XAE_TC_OFFSET, 74 .m_or = XAE_TC_VLAN_MASK, 75 }, { 76 .opt = XAE_OPTION_VLAN, 77 .reg = XAE_RCW1_OFFSET, 78 .m_or = XAE_RCW1_VLAN_MASK, 79 }, { /* Turn on FCS stripping on receive packets */ 80 .opt = XAE_OPTION_FCS_STRIP, 81 .reg = XAE_RCW1_OFFSET, 82 .m_or = XAE_RCW1_FCS_MASK, 83 }, { /* Turn on FCS insertion on transmit packets */ 84 .opt = XAE_OPTION_FCS_INSERT, 85 .reg = XAE_TC_OFFSET, 86 .m_or = XAE_TC_FCS_MASK, 87 }, { /* Turn off length/type field checking on receive packets */ 88 .opt = XAE_OPTION_LENTYPE_ERR, 89 .reg = XAE_RCW1_OFFSET, 90 .m_or = XAE_RCW1_LT_DIS_MASK, 91 }, { /* Turn on Rx flow control */ 92 .opt = XAE_OPTION_FLOW_CONTROL, 93 .reg = XAE_FCC_OFFSET, 94 .m_or = XAE_FCC_FCRX_MASK, 95 }, { /* Turn on Tx flow control */ 96 .opt = XAE_OPTION_FLOW_CONTROL, 97 .reg = XAE_FCC_OFFSET, 98 .m_or = XAE_FCC_FCTX_MASK, 99 }, { /* Turn on promiscuous frame filtering */ 100 .opt = XAE_OPTION_PROMISC, 101 .reg = XAE_FMI_OFFSET, 102 .m_or = XAE_FMI_PM_MASK, 103 }, { /* Enable transmitter */ 104 .opt = XAE_OPTION_TXEN, 105 .reg = XAE_TC_OFFSET, 106 .m_or = XAE_TC_TX_MASK, 107 }, { /* Enable receiver */ 108 .opt = XAE_OPTION_RXEN, 109 .reg = XAE_RCW1_OFFSET, 110 .m_or = XAE_RCW1_RX_MASK, 111 }, 112 {} 113 }; 114 115 /** 116 * axienet_dma_in32 - Memory mapped Axi DMA register read 117 * @lp: Pointer to axienet local structure 118 * @reg: Address offset from the base address of the Axi DMA core 119 * 120 * returns: The contents of the Axi DMA register 121 * 122 * This function returns the contents of the corresponding Axi DMA register. 123 */ 124 static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg) 125 { 126 return in_be32(lp->dma_regs + reg); 127 } 128 129 /** 130 * axienet_dma_out32 - Memory mapped Axi DMA register write. 131 * @lp: Pointer to axienet local structure 132 * @reg: Address offset from the base address of the Axi DMA core 133 * @value: Value to be written into the Axi DMA register 134 * 135 * This function writes the desired value into the corresponding Axi DMA 136 * register. 137 */ 138 static inline void axienet_dma_out32(struct axienet_local *lp, 139 off_t reg, u32 value) 140 { 141 out_be32((lp->dma_regs + reg), value); 142 } 143 144 /** 145 * axienet_dma_bd_release - Release buffer descriptor rings 146 * @ndev: Pointer to the net_device structure 147 * 148 * This function is used to release the descriptors allocated in 149 * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet 150 * driver stop api is called. 151 */ 152 static void axienet_dma_bd_release(struct net_device *ndev) 153 { 154 int i; 155 struct axienet_local *lp = netdev_priv(ndev); 156 157 for (i = 0; i < RX_BD_NUM; i++) { 158 dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys, 159 lp->max_frm_size, DMA_FROM_DEVICE); 160 dev_kfree_skb((struct sk_buff *) 161 (lp->rx_bd_v[i].sw_id_offset)); 162 } 163 164 if (lp->rx_bd_v) { 165 dma_free_coherent(ndev->dev.parent, 166 sizeof(*lp->rx_bd_v) * RX_BD_NUM, 167 lp->rx_bd_v, 168 lp->rx_bd_p); 169 } 170 if (lp->tx_bd_v) { 171 dma_free_coherent(ndev->dev.parent, 172 sizeof(*lp->tx_bd_v) * TX_BD_NUM, 173 lp->tx_bd_v, 174 lp->tx_bd_p); 175 } 176 } 177 178 /** 179 * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA 180 * @ndev: Pointer to the net_device structure 181 * 182 * returns: 0, on success 183 * -ENOMEM, on failure 184 * 185 * This function is called to initialize the Rx and Tx DMA descriptor 186 * rings. This initializes the descriptors with required default values 187 * and is called when Axi Ethernet driver reset is called. 188 */ 189 static int axienet_dma_bd_init(struct net_device *ndev) 190 { 191 u32 cr; 192 int i; 193 struct sk_buff *skb; 194 struct axienet_local *lp = netdev_priv(ndev); 195 196 /* Reset the indexes which are used for accessing the BDs */ 197 lp->tx_bd_ci = 0; 198 lp->tx_bd_tail = 0; 199 lp->rx_bd_ci = 0; 200 201 /* 202 * Allocate the Tx and Rx buffer descriptors. 203 */ 204 lp->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent, 205 sizeof(*lp->tx_bd_v) * TX_BD_NUM, 206 &lp->tx_bd_p, GFP_KERNEL); 207 if (!lp->tx_bd_v) 208 goto out; 209 210 lp->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent, 211 sizeof(*lp->rx_bd_v) * RX_BD_NUM, 212 &lp->rx_bd_p, GFP_KERNEL); 213 if (!lp->rx_bd_v) 214 goto out; 215 216 for (i = 0; i < TX_BD_NUM; i++) { 217 lp->tx_bd_v[i].next = lp->tx_bd_p + 218 sizeof(*lp->tx_bd_v) * 219 ((i + 1) % TX_BD_NUM); 220 } 221 222 for (i = 0; i < RX_BD_NUM; i++) { 223 lp->rx_bd_v[i].next = lp->rx_bd_p + 224 sizeof(*lp->rx_bd_v) * 225 ((i + 1) % RX_BD_NUM); 226 227 skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size); 228 if (!skb) 229 goto out; 230 231 lp->rx_bd_v[i].sw_id_offset = (u32) skb; 232 lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent, 233 skb->data, 234 lp->max_frm_size, 235 DMA_FROM_DEVICE); 236 lp->rx_bd_v[i].cntrl = lp->max_frm_size; 237 } 238 239 /* Start updating the Rx channel control register */ 240 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 241 /* Update the interrupt coalesce count */ 242 cr = ((cr & ~XAXIDMA_COALESCE_MASK) | 243 ((lp->coalesce_count_rx) << XAXIDMA_COALESCE_SHIFT)); 244 /* Update the delay timer count */ 245 cr = ((cr & ~XAXIDMA_DELAY_MASK) | 246 (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); 247 /* Enable coalesce, delay timer and error interrupts */ 248 cr |= XAXIDMA_IRQ_ALL_MASK; 249 /* Write to the Rx channel control register */ 250 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 251 252 /* Start updating the Tx channel control register */ 253 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 254 /* Update the interrupt coalesce count */ 255 cr = (((cr & ~XAXIDMA_COALESCE_MASK)) | 256 ((lp->coalesce_count_tx) << XAXIDMA_COALESCE_SHIFT)); 257 /* Update the delay timer count */ 258 cr = (((cr & ~XAXIDMA_DELAY_MASK)) | 259 (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); 260 /* Enable coalesce, delay timer and error interrupts */ 261 cr |= XAXIDMA_IRQ_ALL_MASK; 262 /* Write to the Tx channel control register */ 263 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 264 265 /* Populate the tail pointer and bring the Rx Axi DMA engine out of 266 * halted state. This will make the Rx side ready for reception.*/ 267 axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); 268 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 269 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, 270 cr | XAXIDMA_CR_RUNSTOP_MASK); 271 axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + 272 (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1))); 273 274 /* Write to the RS (Run-stop) bit in the Tx channel control register. 275 * Tx channel is now ready to run. But only after we write to the 276 * tail pointer register that the Tx channel will start transmitting */ 277 axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); 278 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 279 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, 280 cr | XAXIDMA_CR_RUNSTOP_MASK); 281 282 return 0; 283 out: 284 axienet_dma_bd_release(ndev); 285 return -ENOMEM; 286 } 287 288 /** 289 * axienet_set_mac_address - Write the MAC address 290 * @ndev: Pointer to the net_device structure 291 * @address: 6 byte Address to be written as MAC address 292 * 293 * This function is called to initialize the MAC address of the Axi Ethernet 294 * core. It writes to the UAW0 and UAW1 registers of the core. 295 */ 296 static void axienet_set_mac_address(struct net_device *ndev, void *address) 297 { 298 struct axienet_local *lp = netdev_priv(ndev); 299 300 if (address) 301 memcpy(ndev->dev_addr, address, ETH_ALEN); 302 if (!is_valid_ether_addr(ndev->dev_addr)) 303 eth_random_addr(ndev->dev_addr); 304 305 /* Set up unicast MAC address filter set its mac address */ 306 axienet_iow(lp, XAE_UAW0_OFFSET, 307 (ndev->dev_addr[0]) | 308 (ndev->dev_addr[1] << 8) | 309 (ndev->dev_addr[2] << 16) | 310 (ndev->dev_addr[3] << 24)); 311 axienet_iow(lp, XAE_UAW1_OFFSET, 312 (((axienet_ior(lp, XAE_UAW1_OFFSET)) & 313 ~XAE_UAW1_UNICASTADDR_MASK) | 314 (ndev->dev_addr[4] | 315 (ndev->dev_addr[5] << 8)))); 316 } 317 318 /** 319 * netdev_set_mac_address - Write the MAC address (from outside the driver) 320 * @ndev: Pointer to the net_device structure 321 * @p: 6 byte Address to be written as MAC address 322 * 323 * returns: 0 for all conditions. Presently, there is no failure case. 324 * 325 * This function is called to initialize the MAC address of the Axi Ethernet 326 * core. It calls the core specific axienet_set_mac_address. This is the 327 * function that goes into net_device_ops structure entry ndo_set_mac_address. 328 */ 329 static int netdev_set_mac_address(struct net_device *ndev, void *p) 330 { 331 struct sockaddr *addr = p; 332 axienet_set_mac_address(ndev, addr->sa_data); 333 return 0; 334 } 335 336 /** 337 * axienet_set_multicast_list - Prepare the multicast table 338 * @ndev: Pointer to the net_device structure 339 * 340 * This function is called to initialize the multicast table during 341 * initialization. The Axi Ethernet basic multicast support has a four-entry 342 * multicast table which is initialized here. Additionally this function 343 * goes into the net_device_ops structure entry ndo_set_multicast_list. This 344 * means whenever the multicast table entries need to be updated this 345 * function gets called. 346 */ 347 static void axienet_set_multicast_list(struct net_device *ndev) 348 { 349 int i; 350 u32 reg, af0reg, af1reg; 351 struct axienet_local *lp = netdev_priv(ndev); 352 353 if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) || 354 netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) { 355 /* We must make the kernel realize we had to move into 356 * promiscuous mode. If it was a promiscuous mode request 357 * the flag is already set. If not we set it. */ 358 ndev->flags |= IFF_PROMISC; 359 reg = axienet_ior(lp, XAE_FMI_OFFSET); 360 reg |= XAE_FMI_PM_MASK; 361 axienet_iow(lp, XAE_FMI_OFFSET, reg); 362 dev_info(&ndev->dev, "Promiscuous mode enabled.\n"); 363 } else if (!netdev_mc_empty(ndev)) { 364 struct netdev_hw_addr *ha; 365 366 i = 0; 367 netdev_for_each_mc_addr(ha, ndev) { 368 if (i >= XAE_MULTICAST_CAM_TABLE_NUM) 369 break; 370 371 af0reg = (ha->addr[0]); 372 af0reg |= (ha->addr[1] << 8); 373 af0reg |= (ha->addr[2] << 16); 374 af0reg |= (ha->addr[3] << 24); 375 376 af1reg = (ha->addr[4]); 377 af1reg |= (ha->addr[5] << 8); 378 379 reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00; 380 reg |= i; 381 382 axienet_iow(lp, XAE_FMI_OFFSET, reg); 383 axienet_iow(lp, XAE_AF0_OFFSET, af0reg); 384 axienet_iow(lp, XAE_AF1_OFFSET, af1reg); 385 i++; 386 } 387 } else { 388 reg = axienet_ior(lp, XAE_FMI_OFFSET); 389 reg &= ~XAE_FMI_PM_MASK; 390 391 axienet_iow(lp, XAE_FMI_OFFSET, reg); 392 393 for (i = 0; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) { 394 reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00; 395 reg |= i; 396 397 axienet_iow(lp, XAE_FMI_OFFSET, reg); 398 axienet_iow(lp, XAE_AF0_OFFSET, 0); 399 axienet_iow(lp, XAE_AF1_OFFSET, 0); 400 } 401 402 dev_info(&ndev->dev, "Promiscuous mode disabled.\n"); 403 } 404 } 405 406 /** 407 * axienet_setoptions - Set an Axi Ethernet option 408 * @ndev: Pointer to the net_device structure 409 * @options: Option to be enabled/disabled 410 * 411 * The Axi Ethernet core has multiple features which can be selectively turned 412 * on or off. The typical options could be jumbo frame option, basic VLAN 413 * option, promiscuous mode option etc. This function is used to set or clear 414 * these options in the Axi Ethernet hardware. This is done through 415 * axienet_option structure . 416 */ 417 static void axienet_setoptions(struct net_device *ndev, u32 options) 418 { 419 int reg; 420 struct axienet_local *lp = netdev_priv(ndev); 421 struct axienet_option *tp = &axienet_options[0]; 422 423 while (tp->opt) { 424 reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or)); 425 if (options & tp->opt) 426 reg |= tp->m_or; 427 axienet_iow(lp, tp->reg, reg); 428 tp++; 429 } 430 431 lp->options |= options; 432 } 433 434 static void __axienet_device_reset(struct axienet_local *lp, 435 struct device *dev, off_t offset) 436 { 437 u32 timeout; 438 /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset 439 * process of Axi DMA takes a while to complete as all pending 440 * commands/transfers will be flushed or completed during this 441 * reset process. */ 442 axienet_dma_out32(lp, offset, XAXIDMA_CR_RESET_MASK); 443 timeout = DELAY_OF_ONE_MILLISEC; 444 while (axienet_dma_in32(lp, offset) & XAXIDMA_CR_RESET_MASK) { 445 udelay(1); 446 if (--timeout == 0) { 447 dev_err(dev, "axienet_device_reset DMA " 448 "reset timeout!\n"); 449 break; 450 } 451 } 452 } 453 454 /** 455 * axienet_device_reset - Reset and initialize the Axi Ethernet hardware. 456 * @ndev: Pointer to the net_device structure 457 * 458 * This function is called to reset and initialize the Axi Ethernet core. This 459 * is typically called during initialization. It does a reset of the Axi DMA 460 * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines 461 * areconnected to Axi Ethernet reset lines, this in turn resets the Axi 462 * Ethernet core. No separate hardware reset is done for the Axi Ethernet 463 * core. 464 */ 465 static void axienet_device_reset(struct net_device *ndev) 466 { 467 u32 axienet_status; 468 struct axienet_local *lp = netdev_priv(ndev); 469 470 __axienet_device_reset(lp, &ndev->dev, XAXIDMA_TX_CR_OFFSET); 471 __axienet_device_reset(lp, &ndev->dev, XAXIDMA_RX_CR_OFFSET); 472 473 lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE; 474 lp->options &= (~XAE_OPTION_JUMBO); 475 476 if ((ndev->mtu > XAE_MTU) && 477 (ndev->mtu <= XAE_JUMBO_MTU) && 478 (lp->jumbo_support)) { 479 lp->max_frm_size = ndev->mtu + XAE_HDR_VLAN_SIZE + 480 XAE_TRL_SIZE; 481 lp->options |= XAE_OPTION_JUMBO; 482 } 483 484 if (axienet_dma_bd_init(ndev)) { 485 dev_err(&ndev->dev, "axienet_device_reset descriptor " 486 "allocation failed\n"); 487 } 488 489 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); 490 axienet_status &= ~XAE_RCW1_RX_MASK; 491 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status); 492 493 axienet_status = axienet_ior(lp, XAE_IP_OFFSET); 494 if (axienet_status & XAE_INT_RXRJECT_MASK) 495 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK); 496 497 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK); 498 499 /* Sync default options with HW but leave receiver and 500 * transmitter disabled.*/ 501 axienet_setoptions(ndev, lp->options & 502 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 503 axienet_set_mac_address(ndev, NULL); 504 axienet_set_multicast_list(ndev); 505 axienet_setoptions(ndev, lp->options); 506 507 ndev->trans_start = jiffies; 508 } 509 510 /** 511 * axienet_adjust_link - Adjust the PHY link speed/duplex. 512 * @ndev: Pointer to the net_device structure 513 * 514 * This function is called to change the speed and duplex setting after 515 * auto negotiation is done by the PHY. This is the function that gets 516 * registered with the PHY interface through the "of_phy_connect" call. 517 */ 518 static void axienet_adjust_link(struct net_device *ndev) 519 { 520 u32 emmc_reg; 521 u32 link_state; 522 u32 setspeed = 1; 523 struct axienet_local *lp = netdev_priv(ndev); 524 struct phy_device *phy = lp->phy_dev; 525 526 link_state = phy->speed | (phy->duplex << 1) | phy->link; 527 if (lp->last_link != link_state) { 528 if ((phy->speed == SPEED_10) || (phy->speed == SPEED_100)) { 529 if (lp->phy_type == XAE_PHY_TYPE_1000BASE_X) 530 setspeed = 0; 531 } else { 532 if ((phy->speed == SPEED_1000) && 533 (lp->phy_type == XAE_PHY_TYPE_MII)) 534 setspeed = 0; 535 } 536 537 if (setspeed == 1) { 538 emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET); 539 emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK; 540 541 switch (phy->speed) { 542 case SPEED_1000: 543 emmc_reg |= XAE_EMMC_LINKSPD_1000; 544 break; 545 case SPEED_100: 546 emmc_reg |= XAE_EMMC_LINKSPD_100; 547 break; 548 case SPEED_10: 549 emmc_reg |= XAE_EMMC_LINKSPD_10; 550 break; 551 default: 552 dev_err(&ndev->dev, "Speed other than 10, 100 " 553 "or 1Gbps is not supported\n"); 554 break; 555 } 556 557 axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg); 558 lp->last_link = link_state; 559 phy_print_status(phy); 560 } else { 561 dev_err(&ndev->dev, "Error setting Axi Ethernet " 562 "mac speed\n"); 563 } 564 } 565 } 566 567 /** 568 * axienet_start_xmit_done - Invoked once a transmit is completed by the 569 * Axi DMA Tx channel. 570 * @ndev: Pointer to the net_device structure 571 * 572 * This function is invoked from the Axi DMA Tx isr to notify the completion 573 * of transmit operation. It clears fields in the corresponding Tx BDs and 574 * unmaps the corresponding buffer so that CPU can regain ownership of the 575 * buffer. It finally invokes "netif_wake_queue" to restart transmission if 576 * required. 577 */ 578 static void axienet_start_xmit_done(struct net_device *ndev) 579 { 580 u32 size = 0; 581 u32 packets = 0; 582 struct axienet_local *lp = netdev_priv(ndev); 583 struct axidma_bd *cur_p; 584 unsigned int status = 0; 585 586 cur_p = &lp->tx_bd_v[lp->tx_bd_ci]; 587 status = cur_p->status; 588 while (status & XAXIDMA_BD_STS_COMPLETE_MASK) { 589 dma_unmap_single(ndev->dev.parent, cur_p->phys, 590 (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK), 591 DMA_TO_DEVICE); 592 if (cur_p->app4) 593 dev_kfree_skb_irq((struct sk_buff *)cur_p->app4); 594 /*cur_p->phys = 0;*/ 595 cur_p->app0 = 0; 596 cur_p->app1 = 0; 597 cur_p->app2 = 0; 598 cur_p->app4 = 0; 599 cur_p->status = 0; 600 601 size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK; 602 packets++; 603 604 lp->tx_bd_ci = ++lp->tx_bd_ci % TX_BD_NUM; 605 cur_p = &lp->tx_bd_v[lp->tx_bd_ci]; 606 status = cur_p->status; 607 } 608 609 ndev->stats.tx_packets += packets; 610 ndev->stats.tx_bytes += size; 611 netif_wake_queue(ndev); 612 } 613 614 /** 615 * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy 616 * @lp: Pointer to the axienet_local structure 617 * @num_frag: The number of BDs to check for 618 * 619 * returns: 0, on success 620 * NETDEV_TX_BUSY, if any of the descriptors are not free 621 * 622 * This function is invoked before BDs are allocated and transmission starts. 623 * This function returns 0 if a BD or group of BDs can be allocated for 624 * transmission. If the BD or any of the BDs are not free the function 625 * returns a busy status. This is invoked from axienet_start_xmit. 626 */ 627 static inline int axienet_check_tx_bd_space(struct axienet_local *lp, 628 int num_frag) 629 { 630 struct axidma_bd *cur_p; 631 cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % TX_BD_NUM]; 632 if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK) 633 return NETDEV_TX_BUSY; 634 return 0; 635 } 636 637 /** 638 * axienet_start_xmit - Starts the transmission. 639 * @skb: sk_buff pointer that contains data to be Txed. 640 * @ndev: Pointer to net_device structure. 641 * 642 * returns: NETDEV_TX_OK, on success 643 * NETDEV_TX_BUSY, if any of the descriptors are not free 644 * 645 * This function is invoked from upper layers to initiate transmission. The 646 * function uses the next available free BDs and populates their fields to 647 * start the transmission. Additionally if checksum offloading is supported, 648 * it populates AXI Stream Control fields with appropriate values. 649 */ 650 static int axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) 651 { 652 u32 ii; 653 u32 num_frag; 654 u32 csum_start_off; 655 u32 csum_index_off; 656 skb_frag_t *frag; 657 dma_addr_t tail_p; 658 struct axienet_local *lp = netdev_priv(ndev); 659 struct axidma_bd *cur_p; 660 661 num_frag = skb_shinfo(skb)->nr_frags; 662 cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; 663 664 if (axienet_check_tx_bd_space(lp, num_frag)) { 665 if (!netif_queue_stopped(ndev)) 666 netif_stop_queue(ndev); 667 return NETDEV_TX_BUSY; 668 } 669 670 if (skb->ip_summed == CHECKSUM_PARTIAL) { 671 if (lp->features & XAE_FEATURE_FULL_TX_CSUM) { 672 /* Tx Full Checksum Offload Enabled */ 673 cur_p->app0 |= 2; 674 } else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) { 675 csum_start_off = skb_transport_offset(skb); 676 csum_index_off = csum_start_off + skb->csum_offset; 677 /* Tx Partial Checksum Offload Enabled */ 678 cur_p->app0 |= 1; 679 cur_p->app1 = (csum_start_off << 16) | csum_index_off; 680 } 681 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { 682 cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */ 683 } 684 685 cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK; 686 cur_p->phys = dma_map_single(ndev->dev.parent, skb->data, 687 skb_headlen(skb), DMA_TO_DEVICE); 688 689 for (ii = 0; ii < num_frag; ii++) { 690 lp->tx_bd_tail = ++lp->tx_bd_tail % TX_BD_NUM; 691 cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; 692 frag = &skb_shinfo(skb)->frags[ii]; 693 cur_p->phys = dma_map_single(ndev->dev.parent, 694 skb_frag_address(frag), 695 skb_frag_size(frag), 696 DMA_TO_DEVICE); 697 cur_p->cntrl = skb_frag_size(frag); 698 } 699 700 cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK; 701 cur_p->app4 = (unsigned long)skb; 702 703 tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail; 704 /* Start the transfer */ 705 axienet_dma_out32(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p); 706 lp->tx_bd_tail = ++lp->tx_bd_tail % TX_BD_NUM; 707 708 return NETDEV_TX_OK; 709 } 710 711 /** 712 * axienet_recv - Is called from Axi DMA Rx Isr to complete the received 713 * BD processing. 714 * @ndev: Pointer to net_device structure. 715 * 716 * This function is invoked from the Axi DMA Rx isr to process the Rx BDs. It 717 * does minimal processing and invokes "netif_rx" to complete further 718 * processing. 719 */ 720 static void axienet_recv(struct net_device *ndev) 721 { 722 u32 length; 723 u32 csumstatus; 724 u32 size = 0; 725 u32 packets = 0; 726 dma_addr_t tail_p; 727 struct axienet_local *lp = netdev_priv(ndev); 728 struct sk_buff *skb, *new_skb; 729 struct axidma_bd *cur_p; 730 731 tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci; 732 cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; 733 734 while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) { 735 skb = (struct sk_buff *) (cur_p->sw_id_offset); 736 length = cur_p->app4 & 0x0000FFFF; 737 738 dma_unmap_single(ndev->dev.parent, cur_p->phys, 739 lp->max_frm_size, 740 DMA_FROM_DEVICE); 741 742 skb_put(skb, length); 743 skb->protocol = eth_type_trans(skb, ndev); 744 /*skb_checksum_none_assert(skb);*/ 745 skb->ip_summed = CHECKSUM_NONE; 746 747 /* if we're doing Rx csum offload, set it up */ 748 if (lp->features & XAE_FEATURE_FULL_RX_CSUM) { 749 csumstatus = (cur_p->app2 & 750 XAE_FULL_CSUM_STATUS_MASK) >> 3; 751 if ((csumstatus == XAE_IP_TCP_CSUM_VALIDATED) || 752 (csumstatus == XAE_IP_UDP_CSUM_VALIDATED)) { 753 skb->ip_summed = CHECKSUM_UNNECESSARY; 754 } 755 } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 && 756 skb->protocol == __constant_htons(ETH_P_IP) && 757 skb->len > 64) { 758 skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF); 759 skb->ip_summed = CHECKSUM_COMPLETE; 760 } 761 762 netif_rx(skb); 763 764 size += length; 765 packets++; 766 767 new_skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size); 768 if (!new_skb) 769 return; 770 771 cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data, 772 lp->max_frm_size, 773 DMA_FROM_DEVICE); 774 cur_p->cntrl = lp->max_frm_size; 775 cur_p->status = 0; 776 cur_p->sw_id_offset = (u32) new_skb; 777 778 lp->rx_bd_ci = ++lp->rx_bd_ci % RX_BD_NUM; 779 cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; 780 } 781 782 ndev->stats.rx_packets += packets; 783 ndev->stats.rx_bytes += size; 784 785 axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p); 786 } 787 788 /** 789 * axienet_tx_irq - Tx Done Isr. 790 * @irq: irq number 791 * @_ndev: net_device pointer 792 * 793 * returns: IRQ_HANDLED for all cases. 794 * 795 * This is the Axi DMA Tx done Isr. It invokes "axienet_start_xmit_done" 796 * to complete the BD processing. 797 */ 798 static irqreturn_t axienet_tx_irq(int irq, void *_ndev) 799 { 800 u32 cr; 801 unsigned int status; 802 struct net_device *ndev = _ndev; 803 struct axienet_local *lp = netdev_priv(ndev); 804 805 status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 806 if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) { 807 axienet_start_xmit_done(lp->ndev); 808 goto out; 809 } 810 if (!(status & XAXIDMA_IRQ_ALL_MASK)) 811 dev_err(&ndev->dev, "No interrupts asserted in Tx path"); 812 if (status & XAXIDMA_IRQ_ERROR_MASK) { 813 dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status); 814 dev_err(&ndev->dev, "Current BD is at: 0x%x\n", 815 (lp->tx_bd_v[lp->tx_bd_ci]).phys); 816 817 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 818 /* Disable coalesce, delay timer and error interrupts */ 819 cr &= (~XAXIDMA_IRQ_ALL_MASK); 820 /* Write to the Tx channel control register */ 821 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 822 823 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 824 /* Disable coalesce, delay timer and error interrupts */ 825 cr &= (~XAXIDMA_IRQ_ALL_MASK); 826 /* Write to the Rx channel control register */ 827 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 828 829 tasklet_schedule(&lp->dma_err_tasklet); 830 } 831 out: 832 axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status); 833 return IRQ_HANDLED; 834 } 835 836 /** 837 * axienet_rx_irq - Rx Isr. 838 * @irq: irq number 839 * @_ndev: net_device pointer 840 * 841 * returns: IRQ_HANDLED for all cases. 842 * 843 * This is the Axi DMA Rx Isr. It invokes "axienet_recv" to complete the BD 844 * processing. 845 */ 846 static irqreturn_t axienet_rx_irq(int irq, void *_ndev) 847 { 848 u32 cr; 849 unsigned int status; 850 struct net_device *ndev = _ndev; 851 struct axienet_local *lp = netdev_priv(ndev); 852 853 status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 854 if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) { 855 axienet_recv(lp->ndev); 856 goto out; 857 } 858 if (!(status & XAXIDMA_IRQ_ALL_MASK)) 859 dev_err(&ndev->dev, "No interrupts asserted in Rx path"); 860 if (status & XAXIDMA_IRQ_ERROR_MASK) { 861 dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status); 862 dev_err(&ndev->dev, "Current BD is at: 0x%x\n", 863 (lp->rx_bd_v[lp->rx_bd_ci]).phys); 864 865 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 866 /* Disable coalesce, delay timer and error interrupts */ 867 cr &= (~XAXIDMA_IRQ_ALL_MASK); 868 /* Finally write to the Tx channel control register */ 869 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 870 871 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 872 /* Disable coalesce, delay timer and error interrupts */ 873 cr &= (~XAXIDMA_IRQ_ALL_MASK); 874 /* write to the Rx channel control register */ 875 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 876 877 tasklet_schedule(&lp->dma_err_tasklet); 878 } 879 out: 880 axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status); 881 return IRQ_HANDLED; 882 } 883 884 static void axienet_dma_err_handler(unsigned long data); 885 886 /** 887 * axienet_open - Driver open routine. 888 * @ndev: Pointer to net_device structure 889 * 890 * returns: 0, on success. 891 * -ENODEV, if PHY cannot be connected to 892 * non-zero error value on failure 893 * 894 * This is the driver open routine. It calls phy_start to start the PHY device. 895 * It also allocates interrupt service routines, enables the interrupt lines 896 * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer 897 * descriptors are initialized. 898 */ 899 static int axienet_open(struct net_device *ndev) 900 { 901 int ret, mdio_mcreg; 902 struct axienet_local *lp = netdev_priv(ndev); 903 904 dev_dbg(&ndev->dev, "axienet_open()\n"); 905 906 mdio_mcreg = axienet_ior(lp, XAE_MDIO_MC_OFFSET); 907 ret = axienet_mdio_wait_until_ready(lp); 908 if (ret < 0) 909 return ret; 910 /* Disable the MDIO interface till Axi Ethernet Reset is completed. 911 * When we do an Axi Ethernet reset, it resets the complete core 912 * including the MDIO. If MDIO is not disabled when the reset 913 * process is started, MDIO will be broken afterwards. */ 914 axienet_iow(lp, XAE_MDIO_MC_OFFSET, 915 (mdio_mcreg & (~XAE_MDIO_MC_MDIOEN_MASK))); 916 axienet_device_reset(ndev); 917 /* Enable the MDIO */ 918 axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg); 919 ret = axienet_mdio_wait_until_ready(lp); 920 if (ret < 0) 921 return ret; 922 923 if (lp->phy_node) { 924 lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node, 925 axienet_adjust_link, 0, 926 PHY_INTERFACE_MODE_GMII); 927 if (!lp->phy_dev) { 928 dev_err(lp->dev, "of_phy_connect() failed\n"); 929 return -ENODEV; 930 } 931 phy_start(lp->phy_dev); 932 } 933 934 /* Enable tasklets for Axi DMA error handling */ 935 tasklet_init(&lp->dma_err_tasklet, axienet_dma_err_handler, 936 (unsigned long) lp); 937 938 /* Enable interrupts for Axi DMA Tx */ 939 ret = request_irq(lp->tx_irq, axienet_tx_irq, 0, ndev->name, ndev); 940 if (ret) 941 goto err_tx_irq; 942 /* Enable interrupts for Axi DMA Rx */ 943 ret = request_irq(lp->rx_irq, axienet_rx_irq, 0, ndev->name, ndev); 944 if (ret) 945 goto err_rx_irq; 946 947 return 0; 948 949 err_rx_irq: 950 free_irq(lp->tx_irq, ndev); 951 err_tx_irq: 952 if (lp->phy_dev) 953 phy_disconnect(lp->phy_dev); 954 lp->phy_dev = NULL; 955 tasklet_kill(&lp->dma_err_tasklet); 956 dev_err(lp->dev, "request_irq() failed\n"); 957 return ret; 958 } 959 960 /** 961 * axienet_stop - Driver stop routine. 962 * @ndev: Pointer to net_device structure 963 * 964 * returns: 0, on success. 965 * 966 * This is the driver stop routine. It calls phy_disconnect to stop the PHY 967 * device. It also removes the interrupt handlers and disables the interrupts. 968 * The Axi DMA Tx/Rx BDs are released. 969 */ 970 static int axienet_stop(struct net_device *ndev) 971 { 972 u32 cr; 973 struct axienet_local *lp = netdev_priv(ndev); 974 975 dev_dbg(&ndev->dev, "axienet_close()\n"); 976 977 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 978 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, 979 cr & (~XAXIDMA_CR_RUNSTOP_MASK)); 980 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 981 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, 982 cr & (~XAXIDMA_CR_RUNSTOP_MASK)); 983 axienet_setoptions(ndev, lp->options & 984 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 985 986 tasklet_kill(&lp->dma_err_tasklet); 987 988 free_irq(lp->tx_irq, ndev); 989 free_irq(lp->rx_irq, ndev); 990 991 if (lp->phy_dev) 992 phy_disconnect(lp->phy_dev); 993 lp->phy_dev = NULL; 994 995 axienet_dma_bd_release(ndev); 996 return 0; 997 } 998 999 /** 1000 * axienet_change_mtu - Driver change mtu routine. 1001 * @ndev: Pointer to net_device structure 1002 * @new_mtu: New mtu value to be applied 1003 * 1004 * returns: Always returns 0 (success). 1005 * 1006 * This is the change mtu driver routine. It checks if the Axi Ethernet 1007 * hardware supports jumbo frames before changing the mtu. This can be 1008 * called only when the device is not up. 1009 */ 1010 static int axienet_change_mtu(struct net_device *ndev, int new_mtu) 1011 { 1012 struct axienet_local *lp = netdev_priv(ndev); 1013 1014 if (netif_running(ndev)) 1015 return -EBUSY; 1016 if (lp->jumbo_support) { 1017 if ((new_mtu > XAE_JUMBO_MTU) || (new_mtu < 64)) 1018 return -EINVAL; 1019 ndev->mtu = new_mtu; 1020 } else { 1021 if ((new_mtu > XAE_MTU) || (new_mtu < 64)) 1022 return -EINVAL; 1023 ndev->mtu = new_mtu; 1024 } 1025 1026 return 0; 1027 } 1028 1029 #ifdef CONFIG_NET_POLL_CONTROLLER 1030 /** 1031 * axienet_poll_controller - Axi Ethernet poll mechanism. 1032 * @ndev: Pointer to net_device structure 1033 * 1034 * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior 1035 * to polling the ISRs and are enabled back after the polling is done. 1036 */ 1037 static void axienet_poll_controller(struct net_device *ndev) 1038 { 1039 struct axienet_local *lp = netdev_priv(ndev); 1040 disable_irq(lp->tx_irq); 1041 disable_irq(lp->rx_irq); 1042 axienet_rx_irq(lp->tx_irq, ndev); 1043 axienet_tx_irq(lp->rx_irq, ndev); 1044 enable_irq(lp->tx_irq); 1045 enable_irq(lp->rx_irq); 1046 } 1047 #endif 1048 1049 static const struct net_device_ops axienet_netdev_ops = { 1050 .ndo_open = axienet_open, 1051 .ndo_stop = axienet_stop, 1052 .ndo_start_xmit = axienet_start_xmit, 1053 .ndo_change_mtu = axienet_change_mtu, 1054 .ndo_set_mac_address = netdev_set_mac_address, 1055 .ndo_validate_addr = eth_validate_addr, 1056 .ndo_set_rx_mode = axienet_set_multicast_list, 1057 #ifdef CONFIG_NET_POLL_CONTROLLER 1058 .ndo_poll_controller = axienet_poll_controller, 1059 #endif 1060 }; 1061 1062 /** 1063 * axienet_ethtools_get_settings - Get Axi Ethernet settings related to PHY. 1064 * @ndev: Pointer to net_device structure 1065 * @ecmd: Pointer to ethtool_cmd structure 1066 * 1067 * This implements ethtool command for getting PHY settings. If PHY could 1068 * not be found, the function returns -ENODEV. This function calls the 1069 * relevant PHY ethtool API to get the PHY settings. 1070 * Issue "ethtool ethX" under linux prompt to execute this function. 1071 */ 1072 static int axienet_ethtools_get_settings(struct net_device *ndev, 1073 struct ethtool_cmd *ecmd) 1074 { 1075 struct axienet_local *lp = netdev_priv(ndev); 1076 struct phy_device *phydev = lp->phy_dev; 1077 if (!phydev) 1078 return -ENODEV; 1079 return phy_ethtool_gset(phydev, ecmd); 1080 } 1081 1082 /** 1083 * axienet_ethtools_set_settings - Set PHY settings as passed in the argument. 1084 * @ndev: Pointer to net_device structure 1085 * @ecmd: Pointer to ethtool_cmd structure 1086 * 1087 * This implements ethtool command for setting various PHY settings. If PHY 1088 * could not be found, the function returns -ENODEV. This function calls the 1089 * relevant PHY ethtool API to set the PHY. 1090 * Issue e.g. "ethtool -s ethX speed 1000" under linux prompt to execute this 1091 * function. 1092 */ 1093 static int axienet_ethtools_set_settings(struct net_device *ndev, 1094 struct ethtool_cmd *ecmd) 1095 { 1096 struct axienet_local *lp = netdev_priv(ndev); 1097 struct phy_device *phydev = lp->phy_dev; 1098 if (!phydev) 1099 return -ENODEV; 1100 return phy_ethtool_sset(phydev, ecmd); 1101 } 1102 1103 /** 1104 * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information. 1105 * @ndev: Pointer to net_device structure 1106 * @ed: Pointer to ethtool_drvinfo structure 1107 * 1108 * This implements ethtool command for getting the driver information. 1109 * Issue "ethtool -i ethX" under linux prompt to execute this function. 1110 */ 1111 static void axienet_ethtools_get_drvinfo(struct net_device *ndev, 1112 struct ethtool_drvinfo *ed) 1113 { 1114 strlcpy(ed->driver, DRIVER_NAME, sizeof(ed->driver)); 1115 strlcpy(ed->version, DRIVER_VERSION, sizeof(ed->version)); 1116 ed->regdump_len = sizeof(u32) * AXIENET_REGS_N; 1117 } 1118 1119 /** 1120 * axienet_ethtools_get_regs_len - Get the total regs length present in the 1121 * AxiEthernet core. 1122 * @ndev: Pointer to net_device structure 1123 * 1124 * This implements ethtool command for getting the total register length 1125 * information. 1126 */ 1127 static int axienet_ethtools_get_regs_len(struct net_device *ndev) 1128 { 1129 return sizeof(u32) * AXIENET_REGS_N; 1130 } 1131 1132 /** 1133 * axienet_ethtools_get_regs - Dump the contents of all registers present 1134 * in AxiEthernet core. 1135 * @ndev: Pointer to net_device structure 1136 * @regs: Pointer to ethtool_regs structure 1137 * @ret: Void pointer used to return the contents of the registers. 1138 * 1139 * This implements ethtool command for getting the Axi Ethernet register dump. 1140 * Issue "ethtool -d ethX" to execute this function. 1141 */ 1142 static void axienet_ethtools_get_regs(struct net_device *ndev, 1143 struct ethtool_regs *regs, void *ret) 1144 { 1145 u32 *data = (u32 *) ret; 1146 size_t len = sizeof(u32) * AXIENET_REGS_N; 1147 struct axienet_local *lp = netdev_priv(ndev); 1148 1149 regs->version = 0; 1150 regs->len = len; 1151 1152 memset(data, 0, len); 1153 data[0] = axienet_ior(lp, XAE_RAF_OFFSET); 1154 data[1] = axienet_ior(lp, XAE_TPF_OFFSET); 1155 data[2] = axienet_ior(lp, XAE_IFGP_OFFSET); 1156 data[3] = axienet_ior(lp, XAE_IS_OFFSET); 1157 data[4] = axienet_ior(lp, XAE_IP_OFFSET); 1158 data[5] = axienet_ior(lp, XAE_IE_OFFSET); 1159 data[6] = axienet_ior(lp, XAE_TTAG_OFFSET); 1160 data[7] = axienet_ior(lp, XAE_RTAG_OFFSET); 1161 data[8] = axienet_ior(lp, XAE_UAWL_OFFSET); 1162 data[9] = axienet_ior(lp, XAE_UAWU_OFFSET); 1163 data[10] = axienet_ior(lp, XAE_TPID0_OFFSET); 1164 data[11] = axienet_ior(lp, XAE_TPID1_OFFSET); 1165 data[12] = axienet_ior(lp, XAE_PPST_OFFSET); 1166 data[13] = axienet_ior(lp, XAE_RCW0_OFFSET); 1167 data[14] = axienet_ior(lp, XAE_RCW1_OFFSET); 1168 data[15] = axienet_ior(lp, XAE_TC_OFFSET); 1169 data[16] = axienet_ior(lp, XAE_FCC_OFFSET); 1170 data[17] = axienet_ior(lp, XAE_EMMC_OFFSET); 1171 data[18] = axienet_ior(lp, XAE_PHYC_OFFSET); 1172 data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET); 1173 data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET); 1174 data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET); 1175 data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET); 1176 data[23] = axienet_ior(lp, XAE_MDIO_MIS_OFFSET); 1177 data[24] = axienet_ior(lp, XAE_MDIO_MIP_OFFSET); 1178 data[25] = axienet_ior(lp, XAE_MDIO_MIE_OFFSET); 1179 data[26] = axienet_ior(lp, XAE_MDIO_MIC_OFFSET); 1180 data[27] = axienet_ior(lp, XAE_UAW0_OFFSET); 1181 data[28] = axienet_ior(lp, XAE_UAW1_OFFSET); 1182 data[29] = axienet_ior(lp, XAE_FMI_OFFSET); 1183 data[30] = axienet_ior(lp, XAE_AF0_OFFSET); 1184 data[31] = axienet_ior(lp, XAE_AF1_OFFSET); 1185 } 1186 1187 /** 1188 * axienet_ethtools_get_pauseparam - Get the pause parameter setting for 1189 * Tx and Rx paths. 1190 * @ndev: Pointer to net_device structure 1191 * @epauseparm: Pointer to ethtool_pauseparam structure. 1192 * 1193 * This implements ethtool command for getting axi ethernet pause frame 1194 * setting. Issue "ethtool -a ethX" to execute this function. 1195 */ 1196 static void 1197 axienet_ethtools_get_pauseparam(struct net_device *ndev, 1198 struct ethtool_pauseparam *epauseparm) 1199 { 1200 u32 regval; 1201 struct axienet_local *lp = netdev_priv(ndev); 1202 epauseparm->autoneg = 0; 1203 regval = axienet_ior(lp, XAE_FCC_OFFSET); 1204 epauseparm->tx_pause = regval & XAE_FCC_FCTX_MASK; 1205 epauseparm->rx_pause = regval & XAE_FCC_FCRX_MASK; 1206 } 1207 1208 /** 1209 * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control) 1210 * settings. 1211 * @ndev: Pointer to net_device structure 1212 * @epauseparam:Pointer to ethtool_pauseparam structure 1213 * 1214 * This implements ethtool command for enabling flow control on Rx and Tx 1215 * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this 1216 * function. 1217 */ 1218 static int 1219 axienet_ethtools_set_pauseparam(struct net_device *ndev, 1220 struct ethtool_pauseparam *epauseparm) 1221 { 1222 u32 regval = 0; 1223 struct axienet_local *lp = netdev_priv(ndev); 1224 1225 if (netif_running(ndev)) { 1226 printk(KERN_ERR "%s: Please stop netif before applying " 1227 "configruation\n", ndev->name); 1228 return -EFAULT; 1229 } 1230 1231 regval = axienet_ior(lp, XAE_FCC_OFFSET); 1232 if (epauseparm->tx_pause) 1233 regval |= XAE_FCC_FCTX_MASK; 1234 else 1235 regval &= ~XAE_FCC_FCTX_MASK; 1236 if (epauseparm->rx_pause) 1237 regval |= XAE_FCC_FCRX_MASK; 1238 else 1239 regval &= ~XAE_FCC_FCRX_MASK; 1240 axienet_iow(lp, XAE_FCC_OFFSET, regval); 1241 1242 return 0; 1243 } 1244 1245 /** 1246 * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count. 1247 * @ndev: Pointer to net_device structure 1248 * @ecoalesce: Pointer to ethtool_coalesce structure 1249 * 1250 * This implements ethtool command for getting the DMA interrupt coalescing 1251 * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to 1252 * execute this function. 1253 */ 1254 static int axienet_ethtools_get_coalesce(struct net_device *ndev, 1255 struct ethtool_coalesce *ecoalesce) 1256 { 1257 u32 regval = 0; 1258 struct axienet_local *lp = netdev_priv(ndev); 1259 regval = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 1260 ecoalesce->rx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK) 1261 >> XAXIDMA_COALESCE_SHIFT; 1262 regval = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 1263 ecoalesce->tx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK) 1264 >> XAXIDMA_COALESCE_SHIFT; 1265 return 0; 1266 } 1267 1268 /** 1269 * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count. 1270 * @ndev: Pointer to net_device structure 1271 * @ecoalesce: Pointer to ethtool_coalesce structure 1272 * 1273 * This implements ethtool command for setting the DMA interrupt coalescing 1274 * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux 1275 * prompt to execute this function. 1276 */ 1277 static int axienet_ethtools_set_coalesce(struct net_device *ndev, 1278 struct ethtool_coalesce *ecoalesce) 1279 { 1280 struct axienet_local *lp = netdev_priv(ndev); 1281 1282 if (netif_running(ndev)) { 1283 printk(KERN_ERR "%s: Please stop netif before applying " 1284 "configruation\n", ndev->name); 1285 return -EFAULT; 1286 } 1287 1288 if ((ecoalesce->rx_coalesce_usecs) || 1289 (ecoalesce->rx_coalesce_usecs_irq) || 1290 (ecoalesce->rx_max_coalesced_frames_irq) || 1291 (ecoalesce->tx_coalesce_usecs) || 1292 (ecoalesce->tx_coalesce_usecs_irq) || 1293 (ecoalesce->tx_max_coalesced_frames_irq) || 1294 (ecoalesce->stats_block_coalesce_usecs) || 1295 (ecoalesce->use_adaptive_rx_coalesce) || 1296 (ecoalesce->use_adaptive_tx_coalesce) || 1297 (ecoalesce->pkt_rate_low) || 1298 (ecoalesce->rx_coalesce_usecs_low) || 1299 (ecoalesce->rx_max_coalesced_frames_low) || 1300 (ecoalesce->tx_coalesce_usecs_low) || 1301 (ecoalesce->tx_max_coalesced_frames_low) || 1302 (ecoalesce->pkt_rate_high) || 1303 (ecoalesce->rx_coalesce_usecs_high) || 1304 (ecoalesce->rx_max_coalesced_frames_high) || 1305 (ecoalesce->tx_coalesce_usecs_high) || 1306 (ecoalesce->tx_max_coalesced_frames_high) || 1307 (ecoalesce->rate_sample_interval)) 1308 return -EOPNOTSUPP; 1309 if (ecoalesce->rx_max_coalesced_frames) 1310 lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames; 1311 if (ecoalesce->tx_max_coalesced_frames) 1312 lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames; 1313 1314 return 0; 1315 } 1316 1317 static struct ethtool_ops axienet_ethtool_ops = { 1318 .get_settings = axienet_ethtools_get_settings, 1319 .set_settings = axienet_ethtools_set_settings, 1320 .get_drvinfo = axienet_ethtools_get_drvinfo, 1321 .get_regs_len = axienet_ethtools_get_regs_len, 1322 .get_regs = axienet_ethtools_get_regs, 1323 .get_link = ethtool_op_get_link, 1324 .get_pauseparam = axienet_ethtools_get_pauseparam, 1325 .set_pauseparam = axienet_ethtools_set_pauseparam, 1326 .get_coalesce = axienet_ethtools_get_coalesce, 1327 .set_coalesce = axienet_ethtools_set_coalesce, 1328 }; 1329 1330 /** 1331 * axienet_dma_err_handler - Tasklet handler for Axi DMA Error 1332 * @data: Data passed 1333 * 1334 * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the 1335 * Tx/Rx BDs. 1336 */ 1337 static void axienet_dma_err_handler(unsigned long data) 1338 { 1339 u32 axienet_status; 1340 u32 cr, i; 1341 int mdio_mcreg; 1342 struct axienet_local *lp = (struct axienet_local *) data; 1343 struct net_device *ndev = lp->ndev; 1344 struct axidma_bd *cur_p; 1345 1346 axienet_setoptions(ndev, lp->options & 1347 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 1348 mdio_mcreg = axienet_ior(lp, XAE_MDIO_MC_OFFSET); 1349 axienet_mdio_wait_until_ready(lp); 1350 /* Disable the MDIO interface till Axi Ethernet Reset is completed. 1351 * When we do an Axi Ethernet reset, it resets the complete core 1352 * including the MDIO. So if MDIO is not disabled when the reset 1353 * process is started, MDIO will be broken afterwards. */ 1354 axienet_iow(lp, XAE_MDIO_MC_OFFSET, (mdio_mcreg & 1355 ~XAE_MDIO_MC_MDIOEN_MASK)); 1356 1357 __axienet_device_reset(lp, &ndev->dev, XAXIDMA_TX_CR_OFFSET); 1358 __axienet_device_reset(lp, &ndev->dev, XAXIDMA_RX_CR_OFFSET); 1359 1360 axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg); 1361 axienet_mdio_wait_until_ready(lp); 1362 1363 for (i = 0; i < TX_BD_NUM; i++) { 1364 cur_p = &lp->tx_bd_v[i]; 1365 if (cur_p->phys) 1366 dma_unmap_single(ndev->dev.parent, cur_p->phys, 1367 (cur_p->cntrl & 1368 XAXIDMA_BD_CTRL_LENGTH_MASK), 1369 DMA_TO_DEVICE); 1370 if (cur_p->app4) 1371 dev_kfree_skb_irq((struct sk_buff *) cur_p->app4); 1372 cur_p->phys = 0; 1373 cur_p->cntrl = 0; 1374 cur_p->status = 0; 1375 cur_p->app0 = 0; 1376 cur_p->app1 = 0; 1377 cur_p->app2 = 0; 1378 cur_p->app3 = 0; 1379 cur_p->app4 = 0; 1380 cur_p->sw_id_offset = 0; 1381 } 1382 1383 for (i = 0; i < RX_BD_NUM; i++) { 1384 cur_p = &lp->rx_bd_v[i]; 1385 cur_p->status = 0; 1386 cur_p->app0 = 0; 1387 cur_p->app1 = 0; 1388 cur_p->app2 = 0; 1389 cur_p->app3 = 0; 1390 cur_p->app4 = 0; 1391 } 1392 1393 lp->tx_bd_ci = 0; 1394 lp->tx_bd_tail = 0; 1395 lp->rx_bd_ci = 0; 1396 1397 /* Start updating the Rx channel control register */ 1398 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 1399 /* Update the interrupt coalesce count */ 1400 cr = ((cr & ~XAXIDMA_COALESCE_MASK) | 1401 (XAXIDMA_DFT_RX_THRESHOLD << XAXIDMA_COALESCE_SHIFT)); 1402 /* Update the delay timer count */ 1403 cr = ((cr & ~XAXIDMA_DELAY_MASK) | 1404 (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); 1405 /* Enable coalesce, delay timer and error interrupts */ 1406 cr |= XAXIDMA_IRQ_ALL_MASK; 1407 /* Finally write to the Rx channel control register */ 1408 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 1409 1410 /* Start updating the Tx channel control register */ 1411 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 1412 /* Update the interrupt coalesce count */ 1413 cr = (((cr & ~XAXIDMA_COALESCE_MASK)) | 1414 (XAXIDMA_DFT_TX_THRESHOLD << XAXIDMA_COALESCE_SHIFT)); 1415 /* Update the delay timer count */ 1416 cr = (((cr & ~XAXIDMA_DELAY_MASK)) | 1417 (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); 1418 /* Enable coalesce, delay timer and error interrupts */ 1419 cr |= XAXIDMA_IRQ_ALL_MASK; 1420 /* Finally write to the Tx channel control register */ 1421 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 1422 1423 /* Populate the tail pointer and bring the Rx Axi DMA engine out of 1424 * halted state. This will make the Rx side ready for reception.*/ 1425 axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); 1426 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 1427 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, 1428 cr | XAXIDMA_CR_RUNSTOP_MASK); 1429 axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + 1430 (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1))); 1431 1432 /* Write to the RS (Run-stop) bit in the Tx channel control register. 1433 * Tx channel is now ready to run. But only after we write to the 1434 * tail pointer register that the Tx channel will start transmitting */ 1435 axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); 1436 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 1437 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, 1438 cr | XAXIDMA_CR_RUNSTOP_MASK); 1439 1440 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); 1441 axienet_status &= ~XAE_RCW1_RX_MASK; 1442 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status); 1443 1444 axienet_status = axienet_ior(lp, XAE_IP_OFFSET); 1445 if (axienet_status & XAE_INT_RXRJECT_MASK) 1446 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK); 1447 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK); 1448 1449 /* Sync default options with HW but leave receiver and 1450 * transmitter disabled.*/ 1451 axienet_setoptions(ndev, lp->options & 1452 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 1453 axienet_set_mac_address(ndev, NULL); 1454 axienet_set_multicast_list(ndev); 1455 axienet_setoptions(ndev, lp->options); 1456 } 1457 1458 /** 1459 * axienet_of_probe - Axi Ethernet probe function. 1460 * @op: Pointer to platform device structure. 1461 * @match: Pointer to device id structure 1462 * 1463 * returns: 0, on success 1464 * Non-zero error value on failure. 1465 * 1466 * This is the probe routine for Axi Ethernet driver. This is called before 1467 * any other driver routines are invoked. It allocates and sets up the Ethernet 1468 * device. Parses through device tree and populates fields of 1469 * axienet_local. It registers the Ethernet device. 1470 */ 1471 static int axienet_of_probe(struct platform_device *op) 1472 { 1473 __be32 *p; 1474 int size, ret = 0; 1475 struct device_node *np; 1476 struct axienet_local *lp; 1477 struct net_device *ndev; 1478 const void *addr; 1479 1480 ndev = alloc_etherdev(sizeof(*lp)); 1481 if (!ndev) 1482 return -ENOMEM; 1483 1484 ether_setup(ndev); 1485 platform_set_drvdata(op, ndev); 1486 1487 SET_NETDEV_DEV(ndev, &op->dev); 1488 ndev->flags &= ~IFF_MULTICAST; /* clear multicast */ 1489 ndev->features = NETIF_F_SG; 1490 ndev->netdev_ops = &axienet_netdev_ops; 1491 ndev->ethtool_ops = &axienet_ethtool_ops; 1492 1493 lp = netdev_priv(ndev); 1494 lp->ndev = ndev; 1495 lp->dev = &op->dev; 1496 lp->options = XAE_OPTION_DEFAULTS; 1497 /* Map device registers */ 1498 lp->regs = of_iomap(op->dev.of_node, 0); 1499 if (!lp->regs) { 1500 dev_err(&op->dev, "could not map Axi Ethernet regs.\n"); 1501 goto nodev; 1502 } 1503 /* Setup checksum offload, but default to off if not specified */ 1504 lp->features = 0; 1505 1506 p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,txcsum", NULL); 1507 if (p) { 1508 switch (be32_to_cpup(p)) { 1509 case 1: 1510 lp->csum_offload_on_tx_path = 1511 XAE_FEATURE_PARTIAL_TX_CSUM; 1512 lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM; 1513 /* Can checksum TCP/UDP over IPv4. */ 1514 ndev->features |= NETIF_F_IP_CSUM; 1515 break; 1516 case 2: 1517 lp->csum_offload_on_tx_path = 1518 XAE_FEATURE_FULL_TX_CSUM; 1519 lp->features |= XAE_FEATURE_FULL_TX_CSUM; 1520 /* Can checksum TCP/UDP over IPv4. */ 1521 ndev->features |= NETIF_F_IP_CSUM; 1522 break; 1523 default: 1524 lp->csum_offload_on_tx_path = XAE_NO_CSUM_OFFLOAD; 1525 } 1526 } 1527 p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,rxcsum", NULL); 1528 if (p) { 1529 switch (be32_to_cpup(p)) { 1530 case 1: 1531 lp->csum_offload_on_rx_path = 1532 XAE_FEATURE_PARTIAL_RX_CSUM; 1533 lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM; 1534 break; 1535 case 2: 1536 lp->csum_offload_on_rx_path = 1537 XAE_FEATURE_FULL_RX_CSUM; 1538 lp->features |= XAE_FEATURE_FULL_RX_CSUM; 1539 break; 1540 default: 1541 lp->csum_offload_on_rx_path = XAE_NO_CSUM_OFFLOAD; 1542 } 1543 } 1544 /* For supporting jumbo frames, the Axi Ethernet hardware must have 1545 * a larger Rx/Tx Memory. Typically, the size must be more than or 1546 * equal to 16384 bytes, so that we can enable jumbo option and start 1547 * supporting jumbo frames. Here we check for memory allocated for 1548 * Rx/Tx in the hardware from the device-tree and accordingly set 1549 * flags. */ 1550 p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,rxmem", NULL); 1551 if (p) { 1552 if ((be32_to_cpup(p)) >= 0x4000) 1553 lp->jumbo_support = 1; 1554 } 1555 p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,temac-type", 1556 NULL); 1557 if (p) 1558 lp->temac_type = be32_to_cpup(p); 1559 p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,phy-type", NULL); 1560 if (p) 1561 lp->phy_type = be32_to_cpup(p); 1562 1563 /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */ 1564 np = of_parse_phandle(op->dev.of_node, "axistream-connected", 0); 1565 if (!np) { 1566 dev_err(&op->dev, "could not find DMA node\n"); 1567 goto err_iounmap; 1568 } 1569 lp->dma_regs = of_iomap(np, 0); 1570 if (lp->dma_regs) { 1571 dev_dbg(&op->dev, "MEM base: %p\n", lp->dma_regs); 1572 } else { 1573 dev_err(&op->dev, "unable to map DMA registers\n"); 1574 of_node_put(np); 1575 } 1576 lp->rx_irq = irq_of_parse_and_map(np, 1); 1577 lp->tx_irq = irq_of_parse_and_map(np, 0); 1578 of_node_put(np); 1579 if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) { 1580 dev_err(&op->dev, "could not determine irqs\n"); 1581 ret = -ENOMEM; 1582 goto err_iounmap_2; 1583 } 1584 1585 /* Retrieve the MAC address */ 1586 addr = of_get_property(op->dev.of_node, "local-mac-address", &size); 1587 if ((!addr) || (size != 6)) { 1588 dev_err(&op->dev, "could not find MAC address\n"); 1589 ret = -ENODEV; 1590 goto err_iounmap_2; 1591 } 1592 axienet_set_mac_address(ndev, (void *) addr); 1593 1594 lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD; 1595 lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD; 1596 1597 lp->phy_node = of_parse_phandle(op->dev.of_node, "phy-handle", 0); 1598 ret = axienet_mdio_setup(lp, op->dev.of_node); 1599 if (ret) 1600 dev_warn(&op->dev, "error registering MDIO bus\n"); 1601 1602 ret = register_netdev(lp->ndev); 1603 if (ret) { 1604 dev_err(lp->dev, "register_netdev() error (%i)\n", ret); 1605 goto err_iounmap_2; 1606 } 1607 1608 return 0; 1609 1610 err_iounmap_2: 1611 if (lp->dma_regs) 1612 iounmap(lp->dma_regs); 1613 err_iounmap: 1614 iounmap(lp->regs); 1615 nodev: 1616 free_netdev(ndev); 1617 ndev = NULL; 1618 return ret; 1619 } 1620 1621 static int axienet_of_remove(struct platform_device *op) 1622 { 1623 struct net_device *ndev = platform_get_drvdata(op); 1624 struct axienet_local *lp = netdev_priv(ndev); 1625 1626 axienet_mdio_teardown(lp); 1627 unregister_netdev(ndev); 1628 1629 if (lp->phy_node) 1630 of_node_put(lp->phy_node); 1631 lp->phy_node = NULL; 1632 1633 iounmap(lp->regs); 1634 if (lp->dma_regs) 1635 iounmap(lp->dma_regs); 1636 free_netdev(ndev); 1637 1638 return 0; 1639 } 1640 1641 static struct platform_driver axienet_of_driver = { 1642 .probe = axienet_of_probe, 1643 .remove = axienet_of_remove, 1644 .driver = { 1645 .owner = THIS_MODULE, 1646 .name = "xilinx_axienet", 1647 .of_match_table = axienet_of_match, 1648 }, 1649 }; 1650 1651 module_platform_driver(axienet_of_driver); 1652 1653 MODULE_DESCRIPTION("Xilinx Axi Ethernet driver"); 1654 MODULE_AUTHOR("Xilinx"); 1655 MODULE_LICENSE("GPL"); 1656