1 /* 2 * Xilinx Axi Ethernet device driver 3 * 4 * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi 5 * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net> 6 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd. 7 * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu> 8 * Copyright (c) 2010 - 2011 PetaLogix 9 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved. 10 * 11 * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6 12 * and Spartan6. 13 * 14 * TODO: 15 * - Add Axi Fifo support. 16 * - Factor out Axi DMA code into separate driver. 17 * - Test and fix basic multicast filtering. 18 * - Add support for extended multicast filtering. 19 * - Test basic VLAN support. 20 * - Add support for extended VLAN support. 21 */ 22 23 #include <linux/delay.h> 24 #include <linux/etherdevice.h> 25 #include <linux/module.h> 26 #include <linux/netdevice.h> 27 #include <linux/of_mdio.h> 28 #include <linux/of_net.h> 29 #include <linux/of_platform.h> 30 #include <linux/of_irq.h> 31 #include <linux/of_address.h> 32 #include <linux/skbuff.h> 33 #include <linux/spinlock.h> 34 #include <linux/phy.h> 35 #include <linux/mii.h> 36 #include <linux/ethtool.h> 37 38 #include "xilinx_axienet.h" 39 40 /* Descriptors defines for Tx and Rx DMA - 2^n for the best performance */ 41 #define TX_BD_NUM 64 42 #define RX_BD_NUM 128 43 44 /* Must be shorter than length of ethtool_drvinfo.driver field to fit */ 45 #define DRIVER_NAME "xaxienet" 46 #define DRIVER_DESCRIPTION "Xilinx Axi Ethernet driver" 47 #define DRIVER_VERSION "1.00a" 48 49 #define AXIENET_REGS_N 32 50 51 /* Match table for of_platform binding */ 52 static const struct of_device_id axienet_of_match[] = { 53 { .compatible = "xlnx,axi-ethernet-1.00.a", }, 54 { .compatible = "xlnx,axi-ethernet-1.01.a", }, 55 { .compatible = "xlnx,axi-ethernet-2.01.a", }, 56 {}, 57 }; 58 59 MODULE_DEVICE_TABLE(of, axienet_of_match); 60 61 /* Option table for setting up Axi Ethernet hardware options */ 62 static struct axienet_option axienet_options[] = { 63 /* Turn on jumbo packet support for both Rx and Tx */ 64 { 65 .opt = XAE_OPTION_JUMBO, 66 .reg = XAE_TC_OFFSET, 67 .m_or = XAE_TC_JUM_MASK, 68 }, { 69 .opt = XAE_OPTION_JUMBO, 70 .reg = XAE_RCW1_OFFSET, 71 .m_or = XAE_RCW1_JUM_MASK, 72 }, { /* Turn on VLAN packet support for both Rx and Tx */ 73 .opt = XAE_OPTION_VLAN, 74 .reg = XAE_TC_OFFSET, 75 .m_or = XAE_TC_VLAN_MASK, 76 }, { 77 .opt = XAE_OPTION_VLAN, 78 .reg = XAE_RCW1_OFFSET, 79 .m_or = XAE_RCW1_VLAN_MASK, 80 }, { /* Turn on FCS stripping on receive packets */ 81 .opt = XAE_OPTION_FCS_STRIP, 82 .reg = XAE_RCW1_OFFSET, 83 .m_or = XAE_RCW1_FCS_MASK, 84 }, { /* Turn on FCS insertion on transmit packets */ 85 .opt = XAE_OPTION_FCS_INSERT, 86 .reg = XAE_TC_OFFSET, 87 .m_or = XAE_TC_FCS_MASK, 88 }, { /* Turn off length/type field checking on receive packets */ 89 .opt = XAE_OPTION_LENTYPE_ERR, 90 .reg = XAE_RCW1_OFFSET, 91 .m_or = XAE_RCW1_LT_DIS_MASK, 92 }, { /* Turn on Rx flow control */ 93 .opt = XAE_OPTION_FLOW_CONTROL, 94 .reg = XAE_FCC_OFFSET, 95 .m_or = XAE_FCC_FCRX_MASK, 96 }, { /* Turn on Tx flow control */ 97 .opt = XAE_OPTION_FLOW_CONTROL, 98 .reg = XAE_FCC_OFFSET, 99 .m_or = XAE_FCC_FCTX_MASK, 100 }, { /* Turn on promiscuous frame filtering */ 101 .opt = XAE_OPTION_PROMISC, 102 .reg = XAE_FMI_OFFSET, 103 .m_or = XAE_FMI_PM_MASK, 104 }, { /* Enable transmitter */ 105 .opt = XAE_OPTION_TXEN, 106 .reg = XAE_TC_OFFSET, 107 .m_or = XAE_TC_TX_MASK, 108 }, { /* Enable receiver */ 109 .opt = XAE_OPTION_RXEN, 110 .reg = XAE_RCW1_OFFSET, 111 .m_or = XAE_RCW1_RX_MASK, 112 }, 113 {} 114 }; 115 116 /** 117 * axienet_dma_in32 - Memory mapped Axi DMA register read 118 * @lp: Pointer to axienet local structure 119 * @reg: Address offset from the base address of the Axi DMA core 120 * 121 * Return: The contents of the Axi DMA register 122 * 123 * This function returns the contents of the corresponding Axi DMA register. 124 */ 125 static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg) 126 { 127 return in_be32(lp->dma_regs + reg); 128 } 129 130 /** 131 * axienet_dma_out32 - Memory mapped Axi DMA register write. 132 * @lp: Pointer to axienet local structure 133 * @reg: Address offset from the base address of the Axi DMA core 134 * @value: Value to be written into the Axi DMA register 135 * 136 * This function writes the desired value into the corresponding Axi DMA 137 * register. 138 */ 139 static inline void axienet_dma_out32(struct axienet_local *lp, 140 off_t reg, u32 value) 141 { 142 out_be32((lp->dma_regs + reg), value); 143 } 144 145 /** 146 * axienet_dma_bd_release - Release buffer descriptor rings 147 * @ndev: Pointer to the net_device structure 148 * 149 * This function is used to release the descriptors allocated in 150 * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet 151 * driver stop api is called. 152 */ 153 static void axienet_dma_bd_release(struct net_device *ndev) 154 { 155 int i; 156 struct axienet_local *lp = netdev_priv(ndev); 157 158 for (i = 0; i < RX_BD_NUM; i++) { 159 dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys, 160 lp->max_frm_size, DMA_FROM_DEVICE); 161 dev_kfree_skb((struct sk_buff *) 162 (lp->rx_bd_v[i].sw_id_offset)); 163 } 164 165 if (lp->rx_bd_v) { 166 dma_free_coherent(ndev->dev.parent, 167 sizeof(*lp->rx_bd_v) * RX_BD_NUM, 168 lp->rx_bd_v, 169 lp->rx_bd_p); 170 } 171 if (lp->tx_bd_v) { 172 dma_free_coherent(ndev->dev.parent, 173 sizeof(*lp->tx_bd_v) * TX_BD_NUM, 174 lp->tx_bd_v, 175 lp->tx_bd_p); 176 } 177 } 178 179 /** 180 * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA 181 * @ndev: Pointer to the net_device structure 182 * 183 * Return: 0, on success -ENOMEM, on failure 184 * 185 * This function is called to initialize the Rx and Tx DMA descriptor 186 * rings. This initializes the descriptors with required default values 187 * and is called when Axi Ethernet driver reset is called. 188 */ 189 static int axienet_dma_bd_init(struct net_device *ndev) 190 { 191 u32 cr; 192 int i; 193 struct sk_buff *skb; 194 struct axienet_local *lp = netdev_priv(ndev); 195 196 /* Reset the indexes which are used for accessing the BDs */ 197 lp->tx_bd_ci = 0; 198 lp->tx_bd_tail = 0; 199 lp->rx_bd_ci = 0; 200 201 /* Allocate the Tx and Rx buffer descriptors. */ 202 lp->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent, 203 sizeof(*lp->tx_bd_v) * TX_BD_NUM, 204 &lp->tx_bd_p, GFP_KERNEL); 205 if (!lp->tx_bd_v) 206 goto out; 207 208 lp->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent, 209 sizeof(*lp->rx_bd_v) * RX_BD_NUM, 210 &lp->rx_bd_p, GFP_KERNEL); 211 if (!lp->rx_bd_v) 212 goto out; 213 214 for (i = 0; i < TX_BD_NUM; i++) { 215 lp->tx_bd_v[i].next = lp->tx_bd_p + 216 sizeof(*lp->tx_bd_v) * 217 ((i + 1) % TX_BD_NUM); 218 } 219 220 for (i = 0; i < RX_BD_NUM; i++) { 221 lp->rx_bd_v[i].next = lp->rx_bd_p + 222 sizeof(*lp->rx_bd_v) * 223 ((i + 1) % RX_BD_NUM); 224 225 skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size); 226 if (!skb) 227 goto out; 228 229 lp->rx_bd_v[i].sw_id_offset = (u32) skb; 230 lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent, 231 skb->data, 232 lp->max_frm_size, 233 DMA_FROM_DEVICE); 234 lp->rx_bd_v[i].cntrl = lp->max_frm_size; 235 } 236 237 /* Start updating the Rx channel control register */ 238 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 239 /* Update the interrupt coalesce count */ 240 cr = ((cr & ~XAXIDMA_COALESCE_MASK) | 241 ((lp->coalesce_count_rx) << XAXIDMA_COALESCE_SHIFT)); 242 /* Update the delay timer count */ 243 cr = ((cr & ~XAXIDMA_DELAY_MASK) | 244 (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); 245 /* Enable coalesce, delay timer and error interrupts */ 246 cr |= XAXIDMA_IRQ_ALL_MASK; 247 /* Write to the Rx channel control register */ 248 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 249 250 /* Start updating the Tx channel control register */ 251 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 252 /* Update the interrupt coalesce count */ 253 cr = (((cr & ~XAXIDMA_COALESCE_MASK)) | 254 ((lp->coalesce_count_tx) << XAXIDMA_COALESCE_SHIFT)); 255 /* Update the delay timer count */ 256 cr = (((cr & ~XAXIDMA_DELAY_MASK)) | 257 (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); 258 /* Enable coalesce, delay timer and error interrupts */ 259 cr |= XAXIDMA_IRQ_ALL_MASK; 260 /* Write to the Tx channel control register */ 261 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 262 263 /* Populate the tail pointer and bring the Rx Axi DMA engine out of 264 * halted state. This will make the Rx side ready for reception. 265 */ 266 axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); 267 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 268 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, 269 cr | XAXIDMA_CR_RUNSTOP_MASK); 270 axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + 271 (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1))); 272 273 /* Write to the RS (Run-stop) bit in the Tx channel control register. 274 * Tx channel is now ready to run. But only after we write to the 275 * tail pointer register that the Tx channel will start transmitting. 276 */ 277 axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); 278 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 279 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, 280 cr | XAXIDMA_CR_RUNSTOP_MASK); 281 282 return 0; 283 out: 284 axienet_dma_bd_release(ndev); 285 return -ENOMEM; 286 } 287 288 /** 289 * axienet_set_mac_address - Write the MAC address 290 * @ndev: Pointer to the net_device structure 291 * @address: 6 byte Address to be written as MAC address 292 * 293 * This function is called to initialize the MAC address of the Axi Ethernet 294 * core. It writes to the UAW0 and UAW1 registers of the core. 295 */ 296 static void axienet_set_mac_address(struct net_device *ndev, 297 const void *address) 298 { 299 struct axienet_local *lp = netdev_priv(ndev); 300 301 if (address) 302 memcpy(ndev->dev_addr, address, ETH_ALEN); 303 if (!is_valid_ether_addr(ndev->dev_addr)) 304 eth_hw_addr_random(ndev); 305 306 /* Set up unicast MAC address filter set its mac address */ 307 axienet_iow(lp, XAE_UAW0_OFFSET, 308 (ndev->dev_addr[0]) | 309 (ndev->dev_addr[1] << 8) | 310 (ndev->dev_addr[2] << 16) | 311 (ndev->dev_addr[3] << 24)); 312 axienet_iow(lp, XAE_UAW1_OFFSET, 313 (((axienet_ior(lp, XAE_UAW1_OFFSET)) & 314 ~XAE_UAW1_UNICASTADDR_MASK) | 315 (ndev->dev_addr[4] | 316 (ndev->dev_addr[5] << 8)))); 317 } 318 319 /** 320 * netdev_set_mac_address - Write the MAC address (from outside the driver) 321 * @ndev: Pointer to the net_device structure 322 * @p: 6 byte Address to be written as MAC address 323 * 324 * Return: 0 for all conditions. Presently, there is no failure case. 325 * 326 * This function is called to initialize the MAC address of the Axi Ethernet 327 * core. It calls the core specific axienet_set_mac_address. This is the 328 * function that goes into net_device_ops structure entry ndo_set_mac_address. 329 */ 330 static int netdev_set_mac_address(struct net_device *ndev, void *p) 331 { 332 struct sockaddr *addr = p; 333 axienet_set_mac_address(ndev, addr->sa_data); 334 return 0; 335 } 336 337 /** 338 * axienet_set_multicast_list - Prepare the multicast table 339 * @ndev: Pointer to the net_device structure 340 * 341 * This function is called to initialize the multicast table during 342 * initialization. The Axi Ethernet basic multicast support has a four-entry 343 * multicast table which is initialized here. Additionally this function 344 * goes into the net_device_ops structure entry ndo_set_multicast_list. This 345 * means whenever the multicast table entries need to be updated this 346 * function gets called. 347 */ 348 static void axienet_set_multicast_list(struct net_device *ndev) 349 { 350 int i; 351 u32 reg, af0reg, af1reg; 352 struct axienet_local *lp = netdev_priv(ndev); 353 354 if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) || 355 netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) { 356 /* We must make the kernel realize we had to move into 357 * promiscuous mode. If it was a promiscuous mode request 358 * the flag is already set. If not we set it. 359 */ 360 ndev->flags |= IFF_PROMISC; 361 reg = axienet_ior(lp, XAE_FMI_OFFSET); 362 reg |= XAE_FMI_PM_MASK; 363 axienet_iow(lp, XAE_FMI_OFFSET, reg); 364 dev_info(&ndev->dev, "Promiscuous mode enabled.\n"); 365 } else if (!netdev_mc_empty(ndev)) { 366 struct netdev_hw_addr *ha; 367 368 i = 0; 369 netdev_for_each_mc_addr(ha, ndev) { 370 if (i >= XAE_MULTICAST_CAM_TABLE_NUM) 371 break; 372 373 af0reg = (ha->addr[0]); 374 af0reg |= (ha->addr[1] << 8); 375 af0reg |= (ha->addr[2] << 16); 376 af0reg |= (ha->addr[3] << 24); 377 378 af1reg = (ha->addr[4]); 379 af1reg |= (ha->addr[5] << 8); 380 381 reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00; 382 reg |= i; 383 384 axienet_iow(lp, XAE_FMI_OFFSET, reg); 385 axienet_iow(lp, XAE_AF0_OFFSET, af0reg); 386 axienet_iow(lp, XAE_AF1_OFFSET, af1reg); 387 i++; 388 } 389 } else { 390 reg = axienet_ior(lp, XAE_FMI_OFFSET); 391 reg &= ~XAE_FMI_PM_MASK; 392 393 axienet_iow(lp, XAE_FMI_OFFSET, reg); 394 395 for (i = 0; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) { 396 reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00; 397 reg |= i; 398 399 axienet_iow(lp, XAE_FMI_OFFSET, reg); 400 axienet_iow(lp, XAE_AF0_OFFSET, 0); 401 axienet_iow(lp, XAE_AF1_OFFSET, 0); 402 } 403 404 dev_info(&ndev->dev, "Promiscuous mode disabled.\n"); 405 } 406 } 407 408 /** 409 * axienet_setoptions - Set an Axi Ethernet option 410 * @ndev: Pointer to the net_device structure 411 * @options: Option to be enabled/disabled 412 * 413 * The Axi Ethernet core has multiple features which can be selectively turned 414 * on or off. The typical options could be jumbo frame option, basic VLAN 415 * option, promiscuous mode option etc. This function is used to set or clear 416 * these options in the Axi Ethernet hardware. This is done through 417 * axienet_option structure . 418 */ 419 static void axienet_setoptions(struct net_device *ndev, u32 options) 420 { 421 int reg; 422 struct axienet_local *lp = netdev_priv(ndev); 423 struct axienet_option *tp = &axienet_options[0]; 424 425 while (tp->opt) { 426 reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or)); 427 if (options & tp->opt) 428 reg |= tp->m_or; 429 axienet_iow(lp, tp->reg, reg); 430 tp++; 431 } 432 433 lp->options |= options; 434 } 435 436 static void __axienet_device_reset(struct axienet_local *lp, off_t offset) 437 { 438 u32 timeout; 439 /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset 440 * process of Axi DMA takes a while to complete as all pending 441 * commands/transfers will be flushed or completed during this 442 * reset process. 443 */ 444 axienet_dma_out32(lp, offset, XAXIDMA_CR_RESET_MASK); 445 timeout = DELAY_OF_ONE_MILLISEC; 446 while (axienet_dma_in32(lp, offset) & XAXIDMA_CR_RESET_MASK) { 447 udelay(1); 448 if (--timeout == 0) { 449 netdev_err(lp->ndev, "%s: DMA reset timeout!\n", 450 __func__); 451 break; 452 } 453 } 454 } 455 456 /** 457 * axienet_device_reset - Reset and initialize the Axi Ethernet hardware. 458 * @ndev: Pointer to the net_device structure 459 * 460 * This function is called to reset and initialize the Axi Ethernet core. This 461 * is typically called during initialization. It does a reset of the Axi DMA 462 * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines 463 * areconnected to Axi Ethernet reset lines, this in turn resets the Axi 464 * Ethernet core. No separate hardware reset is done for the Axi Ethernet 465 * core. 466 */ 467 static void axienet_device_reset(struct net_device *ndev) 468 { 469 u32 axienet_status; 470 struct axienet_local *lp = netdev_priv(ndev); 471 472 __axienet_device_reset(lp, XAXIDMA_TX_CR_OFFSET); 473 __axienet_device_reset(lp, XAXIDMA_RX_CR_OFFSET); 474 475 lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE; 476 lp->options |= XAE_OPTION_VLAN; 477 lp->options &= (~XAE_OPTION_JUMBO); 478 479 if ((ndev->mtu > XAE_MTU) && 480 (ndev->mtu <= XAE_JUMBO_MTU)) { 481 lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN + 482 XAE_TRL_SIZE; 483 484 if (lp->max_frm_size <= lp->rxmem) 485 lp->options |= XAE_OPTION_JUMBO; 486 } 487 488 if (axienet_dma_bd_init(ndev)) { 489 netdev_err(ndev, "%s: descriptor allocation failed\n", 490 __func__); 491 } 492 493 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); 494 axienet_status &= ~XAE_RCW1_RX_MASK; 495 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status); 496 497 axienet_status = axienet_ior(lp, XAE_IP_OFFSET); 498 if (axienet_status & XAE_INT_RXRJECT_MASK) 499 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK); 500 501 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK); 502 503 /* Sync default options with HW but leave receiver and 504 * transmitter disabled. 505 */ 506 axienet_setoptions(ndev, lp->options & 507 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 508 axienet_set_mac_address(ndev, NULL); 509 axienet_set_multicast_list(ndev); 510 axienet_setoptions(ndev, lp->options); 511 512 netif_trans_update(ndev); 513 } 514 515 /** 516 * axienet_adjust_link - Adjust the PHY link speed/duplex. 517 * @ndev: Pointer to the net_device structure 518 * 519 * This function is called to change the speed and duplex setting after 520 * auto negotiation is done by the PHY. This is the function that gets 521 * registered with the PHY interface through the "of_phy_connect" call. 522 */ 523 static void axienet_adjust_link(struct net_device *ndev) 524 { 525 u32 emmc_reg; 526 u32 link_state; 527 u32 setspeed = 1; 528 struct axienet_local *lp = netdev_priv(ndev); 529 struct phy_device *phy = ndev->phydev; 530 531 link_state = phy->speed | (phy->duplex << 1) | phy->link; 532 if (lp->last_link != link_state) { 533 if ((phy->speed == SPEED_10) || (phy->speed == SPEED_100)) { 534 if (lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) 535 setspeed = 0; 536 } else { 537 if ((phy->speed == SPEED_1000) && 538 (lp->phy_mode == PHY_INTERFACE_MODE_MII)) 539 setspeed = 0; 540 } 541 542 if (setspeed == 1) { 543 emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET); 544 emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK; 545 546 switch (phy->speed) { 547 case SPEED_1000: 548 emmc_reg |= XAE_EMMC_LINKSPD_1000; 549 break; 550 case SPEED_100: 551 emmc_reg |= XAE_EMMC_LINKSPD_100; 552 break; 553 case SPEED_10: 554 emmc_reg |= XAE_EMMC_LINKSPD_10; 555 break; 556 default: 557 dev_err(&ndev->dev, "Speed other than 10, 100 " 558 "or 1Gbps is not supported\n"); 559 break; 560 } 561 562 axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg); 563 lp->last_link = link_state; 564 phy_print_status(phy); 565 } else { 566 netdev_err(ndev, 567 "Error setting Axi Ethernet mac speed\n"); 568 } 569 } 570 } 571 572 /** 573 * axienet_start_xmit_done - Invoked once a transmit is completed by the 574 * Axi DMA Tx channel. 575 * @ndev: Pointer to the net_device structure 576 * 577 * This function is invoked from the Axi DMA Tx isr to notify the completion 578 * of transmit operation. It clears fields in the corresponding Tx BDs and 579 * unmaps the corresponding buffer so that CPU can regain ownership of the 580 * buffer. It finally invokes "netif_wake_queue" to restart transmission if 581 * required. 582 */ 583 static void axienet_start_xmit_done(struct net_device *ndev) 584 { 585 u32 size = 0; 586 u32 packets = 0; 587 struct axienet_local *lp = netdev_priv(ndev); 588 struct axidma_bd *cur_p; 589 unsigned int status = 0; 590 591 cur_p = &lp->tx_bd_v[lp->tx_bd_ci]; 592 status = cur_p->status; 593 while (status & XAXIDMA_BD_STS_COMPLETE_MASK) { 594 dma_unmap_single(ndev->dev.parent, cur_p->phys, 595 (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK), 596 DMA_TO_DEVICE); 597 if (cur_p->app4) 598 dev_kfree_skb_irq((struct sk_buff *)cur_p->app4); 599 /*cur_p->phys = 0;*/ 600 cur_p->app0 = 0; 601 cur_p->app1 = 0; 602 cur_p->app2 = 0; 603 cur_p->app4 = 0; 604 cur_p->status = 0; 605 606 size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK; 607 packets++; 608 609 ++lp->tx_bd_ci; 610 lp->tx_bd_ci %= TX_BD_NUM; 611 cur_p = &lp->tx_bd_v[lp->tx_bd_ci]; 612 status = cur_p->status; 613 } 614 615 ndev->stats.tx_packets += packets; 616 ndev->stats.tx_bytes += size; 617 netif_wake_queue(ndev); 618 } 619 620 /** 621 * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy 622 * @lp: Pointer to the axienet_local structure 623 * @num_frag: The number of BDs to check for 624 * 625 * Return: 0, on success 626 * NETDEV_TX_BUSY, if any of the descriptors are not free 627 * 628 * This function is invoked before BDs are allocated and transmission starts. 629 * This function returns 0 if a BD or group of BDs can be allocated for 630 * transmission. If the BD or any of the BDs are not free the function 631 * returns a busy status. This is invoked from axienet_start_xmit. 632 */ 633 static inline int axienet_check_tx_bd_space(struct axienet_local *lp, 634 int num_frag) 635 { 636 struct axidma_bd *cur_p; 637 cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % TX_BD_NUM]; 638 if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK) 639 return NETDEV_TX_BUSY; 640 return 0; 641 } 642 643 /** 644 * axienet_start_xmit - Starts the transmission. 645 * @skb: sk_buff pointer that contains data to be Txed. 646 * @ndev: Pointer to net_device structure. 647 * 648 * Return: NETDEV_TX_OK, on success 649 * NETDEV_TX_BUSY, if any of the descriptors are not free 650 * 651 * This function is invoked from upper layers to initiate transmission. The 652 * function uses the next available free BDs and populates their fields to 653 * start the transmission. Additionally if checksum offloading is supported, 654 * it populates AXI Stream Control fields with appropriate values. 655 */ 656 static netdev_tx_t 657 axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) 658 { 659 u32 ii; 660 u32 num_frag; 661 u32 csum_start_off; 662 u32 csum_index_off; 663 skb_frag_t *frag; 664 dma_addr_t tail_p; 665 struct axienet_local *lp = netdev_priv(ndev); 666 struct axidma_bd *cur_p; 667 668 num_frag = skb_shinfo(skb)->nr_frags; 669 cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; 670 671 if (axienet_check_tx_bd_space(lp, num_frag)) { 672 if (!netif_queue_stopped(ndev)) 673 netif_stop_queue(ndev); 674 return NETDEV_TX_BUSY; 675 } 676 677 if (skb->ip_summed == CHECKSUM_PARTIAL) { 678 if (lp->features & XAE_FEATURE_FULL_TX_CSUM) { 679 /* Tx Full Checksum Offload Enabled */ 680 cur_p->app0 |= 2; 681 } else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) { 682 csum_start_off = skb_transport_offset(skb); 683 csum_index_off = csum_start_off + skb->csum_offset; 684 /* Tx Partial Checksum Offload Enabled */ 685 cur_p->app0 |= 1; 686 cur_p->app1 = (csum_start_off << 16) | csum_index_off; 687 } 688 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { 689 cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */ 690 } 691 692 cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK; 693 cur_p->phys = dma_map_single(ndev->dev.parent, skb->data, 694 skb_headlen(skb), DMA_TO_DEVICE); 695 696 for (ii = 0; ii < num_frag; ii++) { 697 ++lp->tx_bd_tail; 698 lp->tx_bd_tail %= TX_BD_NUM; 699 cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; 700 frag = &skb_shinfo(skb)->frags[ii]; 701 cur_p->phys = dma_map_single(ndev->dev.parent, 702 skb_frag_address(frag), 703 skb_frag_size(frag), 704 DMA_TO_DEVICE); 705 cur_p->cntrl = skb_frag_size(frag); 706 } 707 708 cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK; 709 cur_p->app4 = (unsigned long)skb; 710 711 tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail; 712 /* Start the transfer */ 713 axienet_dma_out32(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p); 714 ++lp->tx_bd_tail; 715 lp->tx_bd_tail %= TX_BD_NUM; 716 717 return NETDEV_TX_OK; 718 } 719 720 /** 721 * axienet_recv - Is called from Axi DMA Rx Isr to complete the received 722 * BD processing. 723 * @ndev: Pointer to net_device structure. 724 * 725 * This function is invoked from the Axi DMA Rx isr to process the Rx BDs. It 726 * does minimal processing and invokes "netif_rx" to complete further 727 * processing. 728 */ 729 static void axienet_recv(struct net_device *ndev) 730 { 731 u32 length; 732 u32 csumstatus; 733 u32 size = 0; 734 u32 packets = 0; 735 dma_addr_t tail_p = 0; 736 struct axienet_local *lp = netdev_priv(ndev); 737 struct sk_buff *skb, *new_skb; 738 struct axidma_bd *cur_p; 739 740 cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; 741 742 while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) { 743 tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci; 744 skb = (struct sk_buff *) (cur_p->sw_id_offset); 745 length = cur_p->app4 & 0x0000FFFF; 746 747 dma_unmap_single(ndev->dev.parent, cur_p->phys, 748 lp->max_frm_size, 749 DMA_FROM_DEVICE); 750 751 skb_put(skb, length); 752 skb->protocol = eth_type_trans(skb, ndev); 753 /*skb_checksum_none_assert(skb);*/ 754 skb->ip_summed = CHECKSUM_NONE; 755 756 /* if we're doing Rx csum offload, set it up */ 757 if (lp->features & XAE_FEATURE_FULL_RX_CSUM) { 758 csumstatus = (cur_p->app2 & 759 XAE_FULL_CSUM_STATUS_MASK) >> 3; 760 if ((csumstatus == XAE_IP_TCP_CSUM_VALIDATED) || 761 (csumstatus == XAE_IP_UDP_CSUM_VALIDATED)) { 762 skb->ip_summed = CHECKSUM_UNNECESSARY; 763 } 764 } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 && 765 skb->protocol == htons(ETH_P_IP) && 766 skb->len > 64) { 767 skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF); 768 skb->ip_summed = CHECKSUM_COMPLETE; 769 } 770 771 netif_rx(skb); 772 773 size += length; 774 packets++; 775 776 new_skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size); 777 if (!new_skb) 778 return; 779 780 cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data, 781 lp->max_frm_size, 782 DMA_FROM_DEVICE); 783 cur_p->cntrl = lp->max_frm_size; 784 cur_p->status = 0; 785 cur_p->sw_id_offset = (u32) new_skb; 786 787 ++lp->rx_bd_ci; 788 lp->rx_bd_ci %= RX_BD_NUM; 789 cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; 790 } 791 792 ndev->stats.rx_packets += packets; 793 ndev->stats.rx_bytes += size; 794 795 if (tail_p) 796 axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p); 797 } 798 799 /** 800 * axienet_tx_irq - Tx Done Isr. 801 * @irq: irq number 802 * @_ndev: net_device pointer 803 * 804 * Return: IRQ_HANDLED for all cases. 805 * 806 * This is the Axi DMA Tx done Isr. It invokes "axienet_start_xmit_done" 807 * to complete the BD processing. 808 */ 809 static irqreturn_t axienet_tx_irq(int irq, void *_ndev) 810 { 811 u32 cr; 812 unsigned int status; 813 struct net_device *ndev = _ndev; 814 struct axienet_local *lp = netdev_priv(ndev); 815 816 status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 817 if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) { 818 axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status); 819 axienet_start_xmit_done(lp->ndev); 820 goto out; 821 } 822 if (!(status & XAXIDMA_IRQ_ALL_MASK)) 823 dev_err(&ndev->dev, "No interrupts asserted in Tx path\n"); 824 if (status & XAXIDMA_IRQ_ERROR_MASK) { 825 dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status); 826 dev_err(&ndev->dev, "Current BD is at: 0x%x\n", 827 (lp->tx_bd_v[lp->tx_bd_ci]).phys); 828 829 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 830 /* Disable coalesce, delay timer and error interrupts */ 831 cr &= (~XAXIDMA_IRQ_ALL_MASK); 832 /* Write to the Tx channel control register */ 833 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 834 835 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 836 /* Disable coalesce, delay timer and error interrupts */ 837 cr &= (~XAXIDMA_IRQ_ALL_MASK); 838 /* Write to the Rx channel control register */ 839 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 840 841 tasklet_schedule(&lp->dma_err_tasklet); 842 axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status); 843 } 844 out: 845 return IRQ_HANDLED; 846 } 847 848 /** 849 * axienet_rx_irq - Rx Isr. 850 * @irq: irq number 851 * @_ndev: net_device pointer 852 * 853 * Return: IRQ_HANDLED for all cases. 854 * 855 * This is the Axi DMA Rx Isr. It invokes "axienet_recv" to complete the BD 856 * processing. 857 */ 858 static irqreturn_t axienet_rx_irq(int irq, void *_ndev) 859 { 860 u32 cr; 861 unsigned int status; 862 struct net_device *ndev = _ndev; 863 struct axienet_local *lp = netdev_priv(ndev); 864 865 status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 866 if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) { 867 axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status); 868 axienet_recv(lp->ndev); 869 goto out; 870 } 871 if (!(status & XAXIDMA_IRQ_ALL_MASK)) 872 dev_err(&ndev->dev, "No interrupts asserted in Rx path\n"); 873 if (status & XAXIDMA_IRQ_ERROR_MASK) { 874 dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status); 875 dev_err(&ndev->dev, "Current BD is at: 0x%x\n", 876 (lp->rx_bd_v[lp->rx_bd_ci]).phys); 877 878 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 879 /* Disable coalesce, delay timer and error interrupts */ 880 cr &= (~XAXIDMA_IRQ_ALL_MASK); 881 /* Finally write to the Tx channel control register */ 882 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 883 884 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 885 /* Disable coalesce, delay timer and error interrupts */ 886 cr &= (~XAXIDMA_IRQ_ALL_MASK); 887 /* write to the Rx channel control register */ 888 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 889 890 tasklet_schedule(&lp->dma_err_tasklet); 891 axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status); 892 } 893 out: 894 return IRQ_HANDLED; 895 } 896 897 static void axienet_dma_err_handler(unsigned long data); 898 899 /** 900 * axienet_open - Driver open routine. 901 * @ndev: Pointer to net_device structure 902 * 903 * Return: 0, on success. 904 * non-zero error value on failure 905 * 906 * This is the driver open routine. It calls phy_start to start the PHY device. 907 * It also allocates interrupt service routines, enables the interrupt lines 908 * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer 909 * descriptors are initialized. 910 */ 911 static int axienet_open(struct net_device *ndev) 912 { 913 int ret, mdio_mcreg; 914 struct axienet_local *lp = netdev_priv(ndev); 915 struct phy_device *phydev = NULL; 916 917 dev_dbg(&ndev->dev, "axienet_open()\n"); 918 919 mdio_mcreg = axienet_ior(lp, XAE_MDIO_MC_OFFSET); 920 ret = axienet_mdio_wait_until_ready(lp); 921 if (ret < 0) 922 return ret; 923 /* Disable the MDIO interface till Axi Ethernet Reset is completed. 924 * When we do an Axi Ethernet reset, it resets the complete core 925 * including the MDIO. If MDIO is not disabled when the reset 926 * process is started, MDIO will be broken afterwards. 927 */ 928 axienet_iow(lp, XAE_MDIO_MC_OFFSET, 929 (mdio_mcreg & (~XAE_MDIO_MC_MDIOEN_MASK))); 930 axienet_device_reset(ndev); 931 /* Enable the MDIO */ 932 axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg); 933 ret = axienet_mdio_wait_until_ready(lp); 934 if (ret < 0) 935 return ret; 936 937 if (lp->phy_node) { 938 phydev = of_phy_connect(lp->ndev, lp->phy_node, 939 axienet_adjust_link, 0, lp->phy_mode); 940 941 if (!phydev) 942 dev_err(lp->dev, "of_phy_connect() failed\n"); 943 else 944 phy_start(phydev); 945 } 946 947 /* Enable tasklets for Axi DMA error handling */ 948 tasklet_init(&lp->dma_err_tasklet, axienet_dma_err_handler, 949 (unsigned long) lp); 950 951 /* Enable interrupts for Axi DMA Tx */ 952 ret = request_irq(lp->tx_irq, axienet_tx_irq, 0, ndev->name, ndev); 953 if (ret) 954 goto err_tx_irq; 955 /* Enable interrupts for Axi DMA Rx */ 956 ret = request_irq(lp->rx_irq, axienet_rx_irq, 0, ndev->name, ndev); 957 if (ret) 958 goto err_rx_irq; 959 960 return 0; 961 962 err_rx_irq: 963 free_irq(lp->tx_irq, ndev); 964 err_tx_irq: 965 if (phydev) 966 phy_disconnect(phydev); 967 tasklet_kill(&lp->dma_err_tasklet); 968 dev_err(lp->dev, "request_irq() failed\n"); 969 return ret; 970 } 971 972 /** 973 * axienet_stop - Driver stop routine. 974 * @ndev: Pointer to net_device structure 975 * 976 * Return: 0, on success. 977 * 978 * This is the driver stop routine. It calls phy_disconnect to stop the PHY 979 * device. It also removes the interrupt handlers and disables the interrupts. 980 * The Axi DMA Tx/Rx BDs are released. 981 */ 982 static int axienet_stop(struct net_device *ndev) 983 { 984 u32 cr; 985 struct axienet_local *lp = netdev_priv(ndev); 986 987 dev_dbg(&ndev->dev, "axienet_close()\n"); 988 989 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 990 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, 991 cr & (~XAXIDMA_CR_RUNSTOP_MASK)); 992 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 993 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, 994 cr & (~XAXIDMA_CR_RUNSTOP_MASK)); 995 axienet_setoptions(ndev, lp->options & 996 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 997 998 tasklet_kill(&lp->dma_err_tasklet); 999 1000 free_irq(lp->tx_irq, ndev); 1001 free_irq(lp->rx_irq, ndev); 1002 1003 if (ndev->phydev) 1004 phy_disconnect(ndev->phydev); 1005 1006 axienet_dma_bd_release(ndev); 1007 return 0; 1008 } 1009 1010 /** 1011 * axienet_change_mtu - Driver change mtu routine. 1012 * @ndev: Pointer to net_device structure 1013 * @new_mtu: New mtu value to be applied 1014 * 1015 * Return: Always returns 0 (success). 1016 * 1017 * This is the change mtu driver routine. It checks if the Axi Ethernet 1018 * hardware supports jumbo frames before changing the mtu. This can be 1019 * called only when the device is not up. 1020 */ 1021 static int axienet_change_mtu(struct net_device *ndev, int new_mtu) 1022 { 1023 struct axienet_local *lp = netdev_priv(ndev); 1024 1025 if (netif_running(ndev)) 1026 return -EBUSY; 1027 1028 if ((new_mtu + VLAN_ETH_HLEN + 1029 XAE_TRL_SIZE) > lp->rxmem) 1030 return -EINVAL; 1031 1032 ndev->mtu = new_mtu; 1033 1034 return 0; 1035 } 1036 1037 #ifdef CONFIG_NET_POLL_CONTROLLER 1038 /** 1039 * axienet_poll_controller - Axi Ethernet poll mechanism. 1040 * @ndev: Pointer to net_device structure 1041 * 1042 * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior 1043 * to polling the ISRs and are enabled back after the polling is done. 1044 */ 1045 static void axienet_poll_controller(struct net_device *ndev) 1046 { 1047 struct axienet_local *lp = netdev_priv(ndev); 1048 disable_irq(lp->tx_irq); 1049 disable_irq(lp->rx_irq); 1050 axienet_rx_irq(lp->tx_irq, ndev); 1051 axienet_tx_irq(lp->rx_irq, ndev); 1052 enable_irq(lp->tx_irq); 1053 enable_irq(lp->rx_irq); 1054 } 1055 #endif 1056 1057 static const struct net_device_ops axienet_netdev_ops = { 1058 .ndo_open = axienet_open, 1059 .ndo_stop = axienet_stop, 1060 .ndo_start_xmit = axienet_start_xmit, 1061 .ndo_change_mtu = axienet_change_mtu, 1062 .ndo_set_mac_address = netdev_set_mac_address, 1063 .ndo_validate_addr = eth_validate_addr, 1064 .ndo_set_rx_mode = axienet_set_multicast_list, 1065 #ifdef CONFIG_NET_POLL_CONTROLLER 1066 .ndo_poll_controller = axienet_poll_controller, 1067 #endif 1068 }; 1069 1070 /** 1071 * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information. 1072 * @ndev: Pointer to net_device structure 1073 * @ed: Pointer to ethtool_drvinfo structure 1074 * 1075 * This implements ethtool command for getting the driver information. 1076 * Issue "ethtool -i ethX" under linux prompt to execute this function. 1077 */ 1078 static void axienet_ethtools_get_drvinfo(struct net_device *ndev, 1079 struct ethtool_drvinfo *ed) 1080 { 1081 strlcpy(ed->driver, DRIVER_NAME, sizeof(ed->driver)); 1082 strlcpy(ed->version, DRIVER_VERSION, sizeof(ed->version)); 1083 } 1084 1085 /** 1086 * axienet_ethtools_get_regs_len - Get the total regs length present in the 1087 * AxiEthernet core. 1088 * @ndev: Pointer to net_device structure 1089 * 1090 * This implements ethtool command for getting the total register length 1091 * information. 1092 * 1093 * Return: the total regs length 1094 */ 1095 static int axienet_ethtools_get_regs_len(struct net_device *ndev) 1096 { 1097 return sizeof(u32) * AXIENET_REGS_N; 1098 } 1099 1100 /** 1101 * axienet_ethtools_get_regs - Dump the contents of all registers present 1102 * in AxiEthernet core. 1103 * @ndev: Pointer to net_device structure 1104 * @regs: Pointer to ethtool_regs structure 1105 * @ret: Void pointer used to return the contents of the registers. 1106 * 1107 * This implements ethtool command for getting the Axi Ethernet register dump. 1108 * Issue "ethtool -d ethX" to execute this function. 1109 */ 1110 static void axienet_ethtools_get_regs(struct net_device *ndev, 1111 struct ethtool_regs *regs, void *ret) 1112 { 1113 u32 *data = (u32 *) ret; 1114 size_t len = sizeof(u32) * AXIENET_REGS_N; 1115 struct axienet_local *lp = netdev_priv(ndev); 1116 1117 regs->version = 0; 1118 regs->len = len; 1119 1120 memset(data, 0, len); 1121 data[0] = axienet_ior(lp, XAE_RAF_OFFSET); 1122 data[1] = axienet_ior(lp, XAE_TPF_OFFSET); 1123 data[2] = axienet_ior(lp, XAE_IFGP_OFFSET); 1124 data[3] = axienet_ior(lp, XAE_IS_OFFSET); 1125 data[4] = axienet_ior(lp, XAE_IP_OFFSET); 1126 data[5] = axienet_ior(lp, XAE_IE_OFFSET); 1127 data[6] = axienet_ior(lp, XAE_TTAG_OFFSET); 1128 data[7] = axienet_ior(lp, XAE_RTAG_OFFSET); 1129 data[8] = axienet_ior(lp, XAE_UAWL_OFFSET); 1130 data[9] = axienet_ior(lp, XAE_UAWU_OFFSET); 1131 data[10] = axienet_ior(lp, XAE_TPID0_OFFSET); 1132 data[11] = axienet_ior(lp, XAE_TPID1_OFFSET); 1133 data[12] = axienet_ior(lp, XAE_PPST_OFFSET); 1134 data[13] = axienet_ior(lp, XAE_RCW0_OFFSET); 1135 data[14] = axienet_ior(lp, XAE_RCW1_OFFSET); 1136 data[15] = axienet_ior(lp, XAE_TC_OFFSET); 1137 data[16] = axienet_ior(lp, XAE_FCC_OFFSET); 1138 data[17] = axienet_ior(lp, XAE_EMMC_OFFSET); 1139 data[18] = axienet_ior(lp, XAE_PHYC_OFFSET); 1140 data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET); 1141 data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET); 1142 data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET); 1143 data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET); 1144 data[23] = axienet_ior(lp, XAE_MDIO_MIS_OFFSET); 1145 data[24] = axienet_ior(lp, XAE_MDIO_MIP_OFFSET); 1146 data[25] = axienet_ior(lp, XAE_MDIO_MIE_OFFSET); 1147 data[26] = axienet_ior(lp, XAE_MDIO_MIC_OFFSET); 1148 data[27] = axienet_ior(lp, XAE_UAW0_OFFSET); 1149 data[28] = axienet_ior(lp, XAE_UAW1_OFFSET); 1150 data[29] = axienet_ior(lp, XAE_FMI_OFFSET); 1151 data[30] = axienet_ior(lp, XAE_AF0_OFFSET); 1152 data[31] = axienet_ior(lp, XAE_AF1_OFFSET); 1153 } 1154 1155 /** 1156 * axienet_ethtools_get_pauseparam - Get the pause parameter setting for 1157 * Tx and Rx paths. 1158 * @ndev: Pointer to net_device structure 1159 * @epauseparm: Pointer to ethtool_pauseparam structure. 1160 * 1161 * This implements ethtool command for getting axi ethernet pause frame 1162 * setting. Issue "ethtool -a ethX" to execute this function. 1163 */ 1164 static void 1165 axienet_ethtools_get_pauseparam(struct net_device *ndev, 1166 struct ethtool_pauseparam *epauseparm) 1167 { 1168 u32 regval; 1169 struct axienet_local *lp = netdev_priv(ndev); 1170 epauseparm->autoneg = 0; 1171 regval = axienet_ior(lp, XAE_FCC_OFFSET); 1172 epauseparm->tx_pause = regval & XAE_FCC_FCTX_MASK; 1173 epauseparm->rx_pause = regval & XAE_FCC_FCRX_MASK; 1174 } 1175 1176 /** 1177 * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control) 1178 * settings. 1179 * @ndev: Pointer to net_device structure 1180 * @epauseparm:Pointer to ethtool_pauseparam structure 1181 * 1182 * This implements ethtool command for enabling flow control on Rx and Tx 1183 * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this 1184 * function. 1185 * 1186 * Return: 0 on success, -EFAULT if device is running 1187 */ 1188 static int 1189 axienet_ethtools_set_pauseparam(struct net_device *ndev, 1190 struct ethtool_pauseparam *epauseparm) 1191 { 1192 u32 regval = 0; 1193 struct axienet_local *lp = netdev_priv(ndev); 1194 1195 if (netif_running(ndev)) { 1196 netdev_err(ndev, 1197 "Please stop netif before applying configuration\n"); 1198 return -EFAULT; 1199 } 1200 1201 regval = axienet_ior(lp, XAE_FCC_OFFSET); 1202 if (epauseparm->tx_pause) 1203 regval |= XAE_FCC_FCTX_MASK; 1204 else 1205 regval &= ~XAE_FCC_FCTX_MASK; 1206 if (epauseparm->rx_pause) 1207 regval |= XAE_FCC_FCRX_MASK; 1208 else 1209 regval &= ~XAE_FCC_FCRX_MASK; 1210 axienet_iow(lp, XAE_FCC_OFFSET, regval); 1211 1212 return 0; 1213 } 1214 1215 /** 1216 * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count. 1217 * @ndev: Pointer to net_device structure 1218 * @ecoalesce: Pointer to ethtool_coalesce structure 1219 * 1220 * This implements ethtool command for getting the DMA interrupt coalescing 1221 * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to 1222 * execute this function. 1223 * 1224 * Return: 0 always 1225 */ 1226 static int axienet_ethtools_get_coalesce(struct net_device *ndev, 1227 struct ethtool_coalesce *ecoalesce) 1228 { 1229 u32 regval = 0; 1230 struct axienet_local *lp = netdev_priv(ndev); 1231 regval = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 1232 ecoalesce->rx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK) 1233 >> XAXIDMA_COALESCE_SHIFT; 1234 regval = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 1235 ecoalesce->tx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK) 1236 >> XAXIDMA_COALESCE_SHIFT; 1237 return 0; 1238 } 1239 1240 /** 1241 * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count. 1242 * @ndev: Pointer to net_device structure 1243 * @ecoalesce: Pointer to ethtool_coalesce structure 1244 * 1245 * This implements ethtool command for setting the DMA interrupt coalescing 1246 * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux 1247 * prompt to execute this function. 1248 * 1249 * Return: 0, on success, Non-zero error value on failure. 1250 */ 1251 static int axienet_ethtools_set_coalesce(struct net_device *ndev, 1252 struct ethtool_coalesce *ecoalesce) 1253 { 1254 struct axienet_local *lp = netdev_priv(ndev); 1255 1256 if (netif_running(ndev)) { 1257 netdev_err(ndev, 1258 "Please stop netif before applying configuration\n"); 1259 return -EFAULT; 1260 } 1261 1262 if ((ecoalesce->rx_coalesce_usecs) || 1263 (ecoalesce->rx_coalesce_usecs_irq) || 1264 (ecoalesce->rx_max_coalesced_frames_irq) || 1265 (ecoalesce->tx_coalesce_usecs) || 1266 (ecoalesce->tx_coalesce_usecs_irq) || 1267 (ecoalesce->tx_max_coalesced_frames_irq) || 1268 (ecoalesce->stats_block_coalesce_usecs) || 1269 (ecoalesce->use_adaptive_rx_coalesce) || 1270 (ecoalesce->use_adaptive_tx_coalesce) || 1271 (ecoalesce->pkt_rate_low) || 1272 (ecoalesce->rx_coalesce_usecs_low) || 1273 (ecoalesce->rx_max_coalesced_frames_low) || 1274 (ecoalesce->tx_coalesce_usecs_low) || 1275 (ecoalesce->tx_max_coalesced_frames_low) || 1276 (ecoalesce->pkt_rate_high) || 1277 (ecoalesce->rx_coalesce_usecs_high) || 1278 (ecoalesce->rx_max_coalesced_frames_high) || 1279 (ecoalesce->tx_coalesce_usecs_high) || 1280 (ecoalesce->tx_max_coalesced_frames_high) || 1281 (ecoalesce->rate_sample_interval)) 1282 return -EOPNOTSUPP; 1283 if (ecoalesce->rx_max_coalesced_frames) 1284 lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames; 1285 if (ecoalesce->tx_max_coalesced_frames) 1286 lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames; 1287 1288 return 0; 1289 } 1290 1291 static const struct ethtool_ops axienet_ethtool_ops = { 1292 .get_drvinfo = axienet_ethtools_get_drvinfo, 1293 .get_regs_len = axienet_ethtools_get_regs_len, 1294 .get_regs = axienet_ethtools_get_regs, 1295 .get_link = ethtool_op_get_link, 1296 .get_pauseparam = axienet_ethtools_get_pauseparam, 1297 .set_pauseparam = axienet_ethtools_set_pauseparam, 1298 .get_coalesce = axienet_ethtools_get_coalesce, 1299 .set_coalesce = axienet_ethtools_set_coalesce, 1300 .get_link_ksettings = phy_ethtool_get_link_ksettings, 1301 .set_link_ksettings = phy_ethtool_set_link_ksettings, 1302 }; 1303 1304 /** 1305 * axienet_dma_err_handler - Tasklet handler for Axi DMA Error 1306 * @data: Data passed 1307 * 1308 * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the 1309 * Tx/Rx BDs. 1310 */ 1311 static void axienet_dma_err_handler(unsigned long data) 1312 { 1313 u32 axienet_status; 1314 u32 cr, i; 1315 int mdio_mcreg; 1316 struct axienet_local *lp = (struct axienet_local *) data; 1317 struct net_device *ndev = lp->ndev; 1318 struct axidma_bd *cur_p; 1319 1320 axienet_setoptions(ndev, lp->options & 1321 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 1322 mdio_mcreg = axienet_ior(lp, XAE_MDIO_MC_OFFSET); 1323 axienet_mdio_wait_until_ready(lp); 1324 /* Disable the MDIO interface till Axi Ethernet Reset is completed. 1325 * When we do an Axi Ethernet reset, it resets the complete core 1326 * including the MDIO. So if MDIO is not disabled when the reset 1327 * process is started, MDIO will be broken afterwards. 1328 */ 1329 axienet_iow(lp, XAE_MDIO_MC_OFFSET, (mdio_mcreg & 1330 ~XAE_MDIO_MC_MDIOEN_MASK)); 1331 1332 __axienet_device_reset(lp, XAXIDMA_TX_CR_OFFSET); 1333 __axienet_device_reset(lp, XAXIDMA_RX_CR_OFFSET); 1334 1335 axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg); 1336 axienet_mdio_wait_until_ready(lp); 1337 1338 for (i = 0; i < TX_BD_NUM; i++) { 1339 cur_p = &lp->tx_bd_v[i]; 1340 if (cur_p->phys) 1341 dma_unmap_single(ndev->dev.parent, cur_p->phys, 1342 (cur_p->cntrl & 1343 XAXIDMA_BD_CTRL_LENGTH_MASK), 1344 DMA_TO_DEVICE); 1345 if (cur_p->app4) 1346 dev_kfree_skb_irq((struct sk_buff *) cur_p->app4); 1347 cur_p->phys = 0; 1348 cur_p->cntrl = 0; 1349 cur_p->status = 0; 1350 cur_p->app0 = 0; 1351 cur_p->app1 = 0; 1352 cur_p->app2 = 0; 1353 cur_p->app3 = 0; 1354 cur_p->app4 = 0; 1355 cur_p->sw_id_offset = 0; 1356 } 1357 1358 for (i = 0; i < RX_BD_NUM; i++) { 1359 cur_p = &lp->rx_bd_v[i]; 1360 cur_p->status = 0; 1361 cur_p->app0 = 0; 1362 cur_p->app1 = 0; 1363 cur_p->app2 = 0; 1364 cur_p->app3 = 0; 1365 cur_p->app4 = 0; 1366 } 1367 1368 lp->tx_bd_ci = 0; 1369 lp->tx_bd_tail = 0; 1370 lp->rx_bd_ci = 0; 1371 1372 /* Start updating the Rx channel control register */ 1373 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 1374 /* Update the interrupt coalesce count */ 1375 cr = ((cr & ~XAXIDMA_COALESCE_MASK) | 1376 (XAXIDMA_DFT_RX_THRESHOLD << XAXIDMA_COALESCE_SHIFT)); 1377 /* Update the delay timer count */ 1378 cr = ((cr & ~XAXIDMA_DELAY_MASK) | 1379 (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); 1380 /* Enable coalesce, delay timer and error interrupts */ 1381 cr |= XAXIDMA_IRQ_ALL_MASK; 1382 /* Finally write to the Rx channel control register */ 1383 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 1384 1385 /* Start updating the Tx channel control register */ 1386 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 1387 /* Update the interrupt coalesce count */ 1388 cr = (((cr & ~XAXIDMA_COALESCE_MASK)) | 1389 (XAXIDMA_DFT_TX_THRESHOLD << XAXIDMA_COALESCE_SHIFT)); 1390 /* Update the delay timer count */ 1391 cr = (((cr & ~XAXIDMA_DELAY_MASK)) | 1392 (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); 1393 /* Enable coalesce, delay timer and error interrupts */ 1394 cr |= XAXIDMA_IRQ_ALL_MASK; 1395 /* Finally write to the Tx channel control register */ 1396 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 1397 1398 /* Populate the tail pointer and bring the Rx Axi DMA engine out of 1399 * halted state. This will make the Rx side ready for reception. 1400 */ 1401 axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); 1402 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 1403 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, 1404 cr | XAXIDMA_CR_RUNSTOP_MASK); 1405 axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + 1406 (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1))); 1407 1408 /* Write to the RS (Run-stop) bit in the Tx channel control register. 1409 * Tx channel is now ready to run. But only after we write to the 1410 * tail pointer register that the Tx channel will start transmitting 1411 */ 1412 axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); 1413 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 1414 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, 1415 cr | XAXIDMA_CR_RUNSTOP_MASK); 1416 1417 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); 1418 axienet_status &= ~XAE_RCW1_RX_MASK; 1419 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status); 1420 1421 axienet_status = axienet_ior(lp, XAE_IP_OFFSET); 1422 if (axienet_status & XAE_INT_RXRJECT_MASK) 1423 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK); 1424 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK); 1425 1426 /* Sync default options with HW but leave receiver and 1427 * transmitter disabled. 1428 */ 1429 axienet_setoptions(ndev, lp->options & 1430 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 1431 axienet_set_mac_address(ndev, NULL); 1432 axienet_set_multicast_list(ndev); 1433 axienet_setoptions(ndev, lp->options); 1434 } 1435 1436 /** 1437 * axienet_probe - Axi Ethernet probe function. 1438 * @pdev: Pointer to platform device structure. 1439 * 1440 * Return: 0, on success 1441 * Non-zero error value on failure. 1442 * 1443 * This is the probe routine for Axi Ethernet driver. This is called before 1444 * any other driver routines are invoked. It allocates and sets up the Ethernet 1445 * device. Parses through device tree and populates fields of 1446 * axienet_local. It registers the Ethernet device. 1447 */ 1448 static int axienet_probe(struct platform_device *pdev) 1449 { 1450 int ret; 1451 struct device_node *np; 1452 struct axienet_local *lp; 1453 struct net_device *ndev; 1454 const void *mac_addr; 1455 struct resource *ethres, dmares; 1456 u32 value; 1457 1458 ndev = alloc_etherdev(sizeof(*lp)); 1459 if (!ndev) 1460 return -ENOMEM; 1461 1462 platform_set_drvdata(pdev, ndev); 1463 1464 SET_NETDEV_DEV(ndev, &pdev->dev); 1465 ndev->flags &= ~IFF_MULTICAST; /* clear multicast */ 1466 ndev->features = NETIF_F_SG; 1467 ndev->netdev_ops = &axienet_netdev_ops; 1468 ndev->ethtool_ops = &axienet_ethtool_ops; 1469 1470 /* MTU range: 64 - 9000 */ 1471 ndev->min_mtu = 64; 1472 ndev->max_mtu = XAE_JUMBO_MTU; 1473 1474 lp = netdev_priv(ndev); 1475 lp->ndev = ndev; 1476 lp->dev = &pdev->dev; 1477 lp->options = XAE_OPTION_DEFAULTS; 1478 /* Map device registers */ 1479 ethres = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1480 lp->regs = devm_ioremap_resource(&pdev->dev, ethres); 1481 if (IS_ERR(lp->regs)) { 1482 dev_err(&pdev->dev, "could not map Axi Ethernet regs.\n"); 1483 ret = PTR_ERR(lp->regs); 1484 goto free_netdev; 1485 } 1486 1487 /* Setup checksum offload, but default to off if not specified */ 1488 lp->features = 0; 1489 1490 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,txcsum", &value); 1491 if (!ret) { 1492 switch (value) { 1493 case 1: 1494 lp->csum_offload_on_tx_path = 1495 XAE_FEATURE_PARTIAL_TX_CSUM; 1496 lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM; 1497 /* Can checksum TCP/UDP over IPv4. */ 1498 ndev->features |= NETIF_F_IP_CSUM; 1499 break; 1500 case 2: 1501 lp->csum_offload_on_tx_path = 1502 XAE_FEATURE_FULL_TX_CSUM; 1503 lp->features |= XAE_FEATURE_FULL_TX_CSUM; 1504 /* Can checksum TCP/UDP over IPv4. */ 1505 ndev->features |= NETIF_F_IP_CSUM; 1506 break; 1507 default: 1508 lp->csum_offload_on_tx_path = XAE_NO_CSUM_OFFLOAD; 1509 } 1510 } 1511 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value); 1512 if (!ret) { 1513 switch (value) { 1514 case 1: 1515 lp->csum_offload_on_rx_path = 1516 XAE_FEATURE_PARTIAL_RX_CSUM; 1517 lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM; 1518 break; 1519 case 2: 1520 lp->csum_offload_on_rx_path = 1521 XAE_FEATURE_FULL_RX_CSUM; 1522 lp->features |= XAE_FEATURE_FULL_RX_CSUM; 1523 break; 1524 default: 1525 lp->csum_offload_on_rx_path = XAE_NO_CSUM_OFFLOAD; 1526 } 1527 } 1528 /* For supporting jumbo frames, the Axi Ethernet hardware must have 1529 * a larger Rx/Tx Memory. Typically, the size must be large so that 1530 * we can enable jumbo option and start supporting jumbo frames. 1531 * Here we check for memory allocated for Rx/Tx in the hardware from 1532 * the device-tree and accordingly set flags. 1533 */ 1534 of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem); 1535 1536 /* Start with the proprietary, and broken phy_type */ 1537 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &value); 1538 if (!ret) { 1539 netdev_warn(ndev, "Please upgrade your device tree binary blob to use phy-mode"); 1540 switch (value) { 1541 case XAE_PHY_TYPE_MII: 1542 lp->phy_mode = PHY_INTERFACE_MODE_MII; 1543 break; 1544 case XAE_PHY_TYPE_GMII: 1545 lp->phy_mode = PHY_INTERFACE_MODE_GMII; 1546 break; 1547 case XAE_PHY_TYPE_RGMII_2_0: 1548 lp->phy_mode = PHY_INTERFACE_MODE_RGMII_ID; 1549 break; 1550 case XAE_PHY_TYPE_SGMII: 1551 lp->phy_mode = PHY_INTERFACE_MODE_SGMII; 1552 break; 1553 case XAE_PHY_TYPE_1000BASE_X: 1554 lp->phy_mode = PHY_INTERFACE_MODE_1000BASEX; 1555 break; 1556 default: 1557 ret = -EINVAL; 1558 goto free_netdev; 1559 } 1560 } else { 1561 lp->phy_mode = of_get_phy_mode(pdev->dev.of_node); 1562 if (lp->phy_mode < 0) { 1563 ret = -EINVAL; 1564 goto free_netdev; 1565 } 1566 } 1567 1568 /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */ 1569 np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0); 1570 if (!np) { 1571 dev_err(&pdev->dev, "could not find DMA node\n"); 1572 ret = -ENODEV; 1573 goto free_netdev; 1574 } 1575 ret = of_address_to_resource(np, 0, &dmares); 1576 if (ret) { 1577 dev_err(&pdev->dev, "unable to get DMA resource\n"); 1578 goto free_netdev; 1579 } 1580 lp->dma_regs = devm_ioremap_resource(&pdev->dev, &dmares); 1581 if (IS_ERR(lp->dma_regs)) { 1582 dev_err(&pdev->dev, "could not map DMA regs\n"); 1583 ret = PTR_ERR(lp->dma_regs); 1584 goto free_netdev; 1585 } 1586 lp->rx_irq = irq_of_parse_and_map(np, 1); 1587 lp->tx_irq = irq_of_parse_and_map(np, 0); 1588 of_node_put(np); 1589 if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) { 1590 dev_err(&pdev->dev, "could not determine irqs\n"); 1591 ret = -ENOMEM; 1592 goto free_netdev; 1593 } 1594 1595 /* Retrieve the MAC address */ 1596 mac_addr = of_get_mac_address(pdev->dev.of_node); 1597 if (!mac_addr) { 1598 dev_err(&pdev->dev, "could not find MAC address\n"); 1599 goto free_netdev; 1600 } 1601 axienet_set_mac_address(ndev, mac_addr); 1602 1603 lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD; 1604 lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD; 1605 1606 lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); 1607 if (lp->phy_node) { 1608 ret = axienet_mdio_setup(lp, pdev->dev.of_node); 1609 if (ret) 1610 dev_warn(&pdev->dev, "error registering MDIO bus\n"); 1611 } 1612 1613 ret = register_netdev(lp->ndev); 1614 if (ret) { 1615 dev_err(lp->dev, "register_netdev() error (%i)\n", ret); 1616 goto free_netdev; 1617 } 1618 1619 return 0; 1620 1621 free_netdev: 1622 free_netdev(ndev); 1623 1624 return ret; 1625 } 1626 1627 static int axienet_remove(struct platform_device *pdev) 1628 { 1629 struct net_device *ndev = platform_get_drvdata(pdev); 1630 struct axienet_local *lp = netdev_priv(ndev); 1631 1632 axienet_mdio_teardown(lp); 1633 unregister_netdev(ndev); 1634 1635 of_node_put(lp->phy_node); 1636 lp->phy_node = NULL; 1637 1638 free_netdev(ndev); 1639 1640 return 0; 1641 } 1642 1643 static struct platform_driver axienet_driver = { 1644 .probe = axienet_probe, 1645 .remove = axienet_remove, 1646 .driver = { 1647 .name = "xilinx_axienet", 1648 .of_match_table = axienet_of_match, 1649 }, 1650 }; 1651 1652 module_platform_driver(axienet_driver); 1653 1654 MODULE_DESCRIPTION("Xilinx Axi Ethernet driver"); 1655 MODULE_AUTHOR("Xilinx"); 1656 MODULE_LICENSE("GPL"); 1657