1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Xilinx Axi Ethernet device driver 4 * 5 * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi 6 * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net> 7 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd. 8 * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu> 9 * Copyright (c) 2010 - 2011 PetaLogix 10 * Copyright (c) 2019 SED Systems, a division of Calian Ltd. 11 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved. 12 * 13 * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6 14 * and Spartan6. 15 * 16 * TODO: 17 * - Add Axi Fifo support. 18 * - Factor out Axi DMA code into separate driver. 19 * - Test and fix basic multicast filtering. 20 * - Add support for extended multicast filtering. 21 * - Test basic VLAN support. 22 * - Add support for extended VLAN support. 23 */ 24 25 #include <linux/clk.h> 26 #include <linux/delay.h> 27 #include <linux/etherdevice.h> 28 #include <linux/module.h> 29 #include <linux/netdevice.h> 30 #include <linux/of_mdio.h> 31 #include <linux/of_net.h> 32 #include <linux/of_platform.h> 33 #include <linux/of_irq.h> 34 #include <linux/of_address.h> 35 #include <linux/skbuff.h> 36 #include <linux/spinlock.h> 37 #include <linux/phy.h> 38 #include <linux/mii.h> 39 #include <linux/ethtool.h> 40 41 #include "xilinx_axienet.h" 42 43 /* Descriptors defines for Tx and Rx DMA */ 44 #define TX_BD_NUM_DEFAULT 64 45 #define RX_BD_NUM_DEFAULT 1024 46 #define TX_BD_NUM_MAX 4096 47 #define RX_BD_NUM_MAX 4096 48 49 /* Must be shorter than length of ethtool_drvinfo.driver field to fit */ 50 #define DRIVER_NAME "xaxienet" 51 #define DRIVER_DESCRIPTION "Xilinx Axi Ethernet driver" 52 #define DRIVER_VERSION "1.00a" 53 54 #define AXIENET_REGS_N 40 55 56 /* Match table for of_platform binding */ 57 static const struct of_device_id axienet_of_match[] = { 58 { .compatible = "xlnx,axi-ethernet-1.00.a", }, 59 { .compatible = "xlnx,axi-ethernet-1.01.a", }, 60 { .compatible = "xlnx,axi-ethernet-2.01.a", }, 61 {}, 62 }; 63 64 MODULE_DEVICE_TABLE(of, axienet_of_match); 65 66 /* Option table for setting up Axi Ethernet hardware options */ 67 static struct axienet_option axienet_options[] = { 68 /* Turn on jumbo packet support for both Rx and Tx */ 69 { 70 .opt = XAE_OPTION_JUMBO, 71 .reg = XAE_TC_OFFSET, 72 .m_or = XAE_TC_JUM_MASK, 73 }, { 74 .opt = XAE_OPTION_JUMBO, 75 .reg = XAE_RCW1_OFFSET, 76 .m_or = XAE_RCW1_JUM_MASK, 77 }, { /* Turn on VLAN packet support for both Rx and Tx */ 78 .opt = XAE_OPTION_VLAN, 79 .reg = XAE_TC_OFFSET, 80 .m_or = XAE_TC_VLAN_MASK, 81 }, { 82 .opt = XAE_OPTION_VLAN, 83 .reg = XAE_RCW1_OFFSET, 84 .m_or = XAE_RCW1_VLAN_MASK, 85 }, { /* Turn on FCS stripping on receive packets */ 86 .opt = XAE_OPTION_FCS_STRIP, 87 .reg = XAE_RCW1_OFFSET, 88 .m_or = XAE_RCW1_FCS_MASK, 89 }, { /* Turn on FCS insertion on transmit packets */ 90 .opt = XAE_OPTION_FCS_INSERT, 91 .reg = XAE_TC_OFFSET, 92 .m_or = XAE_TC_FCS_MASK, 93 }, { /* Turn off length/type field checking on receive packets */ 94 .opt = XAE_OPTION_LENTYPE_ERR, 95 .reg = XAE_RCW1_OFFSET, 96 .m_or = XAE_RCW1_LT_DIS_MASK, 97 }, { /* Turn on Rx flow control */ 98 .opt = XAE_OPTION_FLOW_CONTROL, 99 .reg = XAE_FCC_OFFSET, 100 .m_or = XAE_FCC_FCRX_MASK, 101 }, { /* Turn on Tx flow control */ 102 .opt = XAE_OPTION_FLOW_CONTROL, 103 .reg = XAE_FCC_OFFSET, 104 .m_or = XAE_FCC_FCTX_MASK, 105 }, { /* Turn on promiscuous frame filtering */ 106 .opt = XAE_OPTION_PROMISC, 107 .reg = XAE_FMI_OFFSET, 108 .m_or = XAE_FMI_PM_MASK, 109 }, { /* Enable transmitter */ 110 .opt = XAE_OPTION_TXEN, 111 .reg = XAE_TC_OFFSET, 112 .m_or = XAE_TC_TX_MASK, 113 }, { /* Enable receiver */ 114 .opt = XAE_OPTION_RXEN, 115 .reg = XAE_RCW1_OFFSET, 116 .m_or = XAE_RCW1_RX_MASK, 117 }, 118 {} 119 }; 120 121 /** 122 * axienet_dma_in32 - Memory mapped Axi DMA register read 123 * @lp: Pointer to axienet local structure 124 * @reg: Address offset from the base address of the Axi DMA core 125 * 126 * Return: The contents of the Axi DMA register 127 * 128 * This function returns the contents of the corresponding Axi DMA register. 129 */ 130 static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg) 131 { 132 return ioread32(lp->dma_regs + reg); 133 } 134 135 /** 136 * axienet_dma_out32 - Memory mapped Axi DMA register write. 137 * @lp: Pointer to axienet local structure 138 * @reg: Address offset from the base address of the Axi DMA core 139 * @value: Value to be written into the Axi DMA register 140 * 141 * This function writes the desired value into the corresponding Axi DMA 142 * register. 143 */ 144 static inline void axienet_dma_out32(struct axienet_local *lp, 145 off_t reg, u32 value) 146 { 147 iowrite32(value, lp->dma_regs + reg); 148 } 149 150 /** 151 * axienet_dma_bd_release - Release buffer descriptor rings 152 * @ndev: Pointer to the net_device structure 153 * 154 * This function is used to release the descriptors allocated in 155 * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet 156 * driver stop api is called. 157 */ 158 static void axienet_dma_bd_release(struct net_device *ndev) 159 { 160 int i; 161 struct axienet_local *lp = netdev_priv(ndev); 162 163 for (i = 0; i < lp->rx_bd_num; i++) { 164 dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys, 165 lp->max_frm_size, DMA_FROM_DEVICE); 166 dev_kfree_skb(lp->rx_bd_v[i].skb); 167 } 168 169 if (lp->rx_bd_v) { 170 dma_free_coherent(ndev->dev.parent, 171 sizeof(*lp->rx_bd_v) * lp->rx_bd_num, 172 lp->rx_bd_v, 173 lp->rx_bd_p); 174 } 175 if (lp->tx_bd_v) { 176 dma_free_coherent(ndev->dev.parent, 177 sizeof(*lp->tx_bd_v) * lp->tx_bd_num, 178 lp->tx_bd_v, 179 lp->tx_bd_p); 180 } 181 } 182 183 /** 184 * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA 185 * @ndev: Pointer to the net_device structure 186 * 187 * Return: 0, on success -ENOMEM, on failure 188 * 189 * This function is called to initialize the Rx and Tx DMA descriptor 190 * rings. This initializes the descriptors with required default values 191 * and is called when Axi Ethernet driver reset is called. 192 */ 193 static int axienet_dma_bd_init(struct net_device *ndev) 194 { 195 u32 cr; 196 int i; 197 struct sk_buff *skb; 198 struct axienet_local *lp = netdev_priv(ndev); 199 200 /* Reset the indexes which are used for accessing the BDs */ 201 lp->tx_bd_ci = 0; 202 lp->tx_bd_tail = 0; 203 lp->rx_bd_ci = 0; 204 205 /* Allocate the Tx and Rx buffer descriptors. */ 206 lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent, 207 sizeof(*lp->tx_bd_v) * lp->tx_bd_num, 208 &lp->tx_bd_p, GFP_KERNEL); 209 if (!lp->tx_bd_v) 210 goto out; 211 212 lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent, 213 sizeof(*lp->rx_bd_v) * lp->rx_bd_num, 214 &lp->rx_bd_p, GFP_KERNEL); 215 if (!lp->rx_bd_v) 216 goto out; 217 218 for (i = 0; i < lp->tx_bd_num; i++) { 219 lp->tx_bd_v[i].next = lp->tx_bd_p + 220 sizeof(*lp->tx_bd_v) * 221 ((i + 1) % lp->tx_bd_num); 222 } 223 224 for (i = 0; i < lp->rx_bd_num; i++) { 225 lp->rx_bd_v[i].next = lp->rx_bd_p + 226 sizeof(*lp->rx_bd_v) * 227 ((i + 1) % lp->rx_bd_num); 228 229 skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size); 230 if (!skb) 231 goto out; 232 233 lp->rx_bd_v[i].skb = skb; 234 lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent, 235 skb->data, 236 lp->max_frm_size, 237 DMA_FROM_DEVICE); 238 lp->rx_bd_v[i].cntrl = lp->max_frm_size; 239 } 240 241 /* Start updating the Rx channel control register */ 242 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 243 /* Update the interrupt coalesce count */ 244 cr = ((cr & ~XAXIDMA_COALESCE_MASK) | 245 ((lp->coalesce_count_rx) << XAXIDMA_COALESCE_SHIFT)); 246 /* Update the delay timer count */ 247 cr = ((cr & ~XAXIDMA_DELAY_MASK) | 248 (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); 249 /* Enable coalesce, delay timer and error interrupts */ 250 cr |= XAXIDMA_IRQ_ALL_MASK; 251 /* Write to the Rx channel control register */ 252 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 253 254 /* Start updating the Tx channel control register */ 255 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 256 /* Update the interrupt coalesce count */ 257 cr = (((cr & ~XAXIDMA_COALESCE_MASK)) | 258 ((lp->coalesce_count_tx) << XAXIDMA_COALESCE_SHIFT)); 259 /* Update the delay timer count */ 260 cr = (((cr & ~XAXIDMA_DELAY_MASK)) | 261 (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); 262 /* Enable coalesce, delay timer and error interrupts */ 263 cr |= XAXIDMA_IRQ_ALL_MASK; 264 /* Write to the Tx channel control register */ 265 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 266 267 /* Populate the tail pointer and bring the Rx Axi DMA engine out of 268 * halted state. This will make the Rx side ready for reception. 269 */ 270 axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); 271 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 272 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, 273 cr | XAXIDMA_CR_RUNSTOP_MASK); 274 axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + 275 (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1))); 276 277 /* Write to the RS (Run-stop) bit in the Tx channel control register. 278 * Tx channel is now ready to run. But only after we write to the 279 * tail pointer register that the Tx channel will start transmitting. 280 */ 281 axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); 282 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 283 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, 284 cr | XAXIDMA_CR_RUNSTOP_MASK); 285 286 return 0; 287 out: 288 axienet_dma_bd_release(ndev); 289 return -ENOMEM; 290 } 291 292 /** 293 * axienet_set_mac_address - Write the MAC address 294 * @ndev: Pointer to the net_device structure 295 * @address: 6 byte Address to be written as MAC address 296 * 297 * This function is called to initialize the MAC address of the Axi Ethernet 298 * core. It writes to the UAW0 and UAW1 registers of the core. 299 */ 300 static void axienet_set_mac_address(struct net_device *ndev, 301 const void *address) 302 { 303 struct axienet_local *lp = netdev_priv(ndev); 304 305 if (address) 306 memcpy(ndev->dev_addr, address, ETH_ALEN); 307 if (!is_valid_ether_addr(ndev->dev_addr)) 308 eth_hw_addr_random(ndev); 309 310 /* Set up unicast MAC address filter set its mac address */ 311 axienet_iow(lp, XAE_UAW0_OFFSET, 312 (ndev->dev_addr[0]) | 313 (ndev->dev_addr[1] << 8) | 314 (ndev->dev_addr[2] << 16) | 315 (ndev->dev_addr[3] << 24)); 316 axienet_iow(lp, XAE_UAW1_OFFSET, 317 (((axienet_ior(lp, XAE_UAW1_OFFSET)) & 318 ~XAE_UAW1_UNICASTADDR_MASK) | 319 (ndev->dev_addr[4] | 320 (ndev->dev_addr[5] << 8)))); 321 } 322 323 /** 324 * netdev_set_mac_address - Write the MAC address (from outside the driver) 325 * @ndev: Pointer to the net_device structure 326 * @p: 6 byte Address to be written as MAC address 327 * 328 * Return: 0 for all conditions. Presently, there is no failure case. 329 * 330 * This function is called to initialize the MAC address of the Axi Ethernet 331 * core. It calls the core specific axienet_set_mac_address. This is the 332 * function that goes into net_device_ops structure entry ndo_set_mac_address. 333 */ 334 static int netdev_set_mac_address(struct net_device *ndev, void *p) 335 { 336 struct sockaddr *addr = p; 337 axienet_set_mac_address(ndev, addr->sa_data); 338 return 0; 339 } 340 341 /** 342 * axienet_set_multicast_list - Prepare the multicast table 343 * @ndev: Pointer to the net_device structure 344 * 345 * This function is called to initialize the multicast table during 346 * initialization. The Axi Ethernet basic multicast support has a four-entry 347 * multicast table which is initialized here. Additionally this function 348 * goes into the net_device_ops structure entry ndo_set_multicast_list. This 349 * means whenever the multicast table entries need to be updated this 350 * function gets called. 351 */ 352 static void axienet_set_multicast_list(struct net_device *ndev) 353 { 354 int i; 355 u32 reg, af0reg, af1reg; 356 struct axienet_local *lp = netdev_priv(ndev); 357 358 if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) || 359 netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) { 360 /* We must make the kernel realize we had to move into 361 * promiscuous mode. If it was a promiscuous mode request 362 * the flag is already set. If not we set it. 363 */ 364 ndev->flags |= IFF_PROMISC; 365 reg = axienet_ior(lp, XAE_FMI_OFFSET); 366 reg |= XAE_FMI_PM_MASK; 367 axienet_iow(lp, XAE_FMI_OFFSET, reg); 368 dev_info(&ndev->dev, "Promiscuous mode enabled.\n"); 369 } else if (!netdev_mc_empty(ndev)) { 370 struct netdev_hw_addr *ha; 371 372 i = 0; 373 netdev_for_each_mc_addr(ha, ndev) { 374 if (i >= XAE_MULTICAST_CAM_TABLE_NUM) 375 break; 376 377 af0reg = (ha->addr[0]); 378 af0reg |= (ha->addr[1] << 8); 379 af0reg |= (ha->addr[2] << 16); 380 af0reg |= (ha->addr[3] << 24); 381 382 af1reg = (ha->addr[4]); 383 af1reg |= (ha->addr[5] << 8); 384 385 reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00; 386 reg |= i; 387 388 axienet_iow(lp, XAE_FMI_OFFSET, reg); 389 axienet_iow(lp, XAE_AF0_OFFSET, af0reg); 390 axienet_iow(lp, XAE_AF1_OFFSET, af1reg); 391 i++; 392 } 393 } else { 394 reg = axienet_ior(lp, XAE_FMI_OFFSET); 395 reg &= ~XAE_FMI_PM_MASK; 396 397 axienet_iow(lp, XAE_FMI_OFFSET, reg); 398 399 for (i = 0; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) { 400 reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00; 401 reg |= i; 402 403 axienet_iow(lp, XAE_FMI_OFFSET, reg); 404 axienet_iow(lp, XAE_AF0_OFFSET, 0); 405 axienet_iow(lp, XAE_AF1_OFFSET, 0); 406 } 407 408 dev_info(&ndev->dev, "Promiscuous mode disabled.\n"); 409 } 410 } 411 412 /** 413 * axienet_setoptions - Set an Axi Ethernet option 414 * @ndev: Pointer to the net_device structure 415 * @options: Option to be enabled/disabled 416 * 417 * The Axi Ethernet core has multiple features which can be selectively turned 418 * on or off. The typical options could be jumbo frame option, basic VLAN 419 * option, promiscuous mode option etc. This function is used to set or clear 420 * these options in the Axi Ethernet hardware. This is done through 421 * axienet_option structure . 422 */ 423 static void axienet_setoptions(struct net_device *ndev, u32 options) 424 { 425 int reg; 426 struct axienet_local *lp = netdev_priv(ndev); 427 struct axienet_option *tp = &axienet_options[0]; 428 429 while (tp->opt) { 430 reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or)); 431 if (options & tp->opt) 432 reg |= tp->m_or; 433 axienet_iow(lp, tp->reg, reg); 434 tp++; 435 } 436 437 lp->options |= options; 438 } 439 440 static void __axienet_device_reset(struct axienet_local *lp) 441 { 442 u32 timeout; 443 /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset 444 * process of Axi DMA takes a while to complete as all pending 445 * commands/transfers will be flushed or completed during this 446 * reset process. 447 * Note that even though both TX and RX have their own reset register, 448 * they both reset the entire DMA core, so only one needs to be used. 449 */ 450 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK); 451 timeout = DELAY_OF_ONE_MILLISEC; 452 while (axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET) & 453 XAXIDMA_CR_RESET_MASK) { 454 udelay(1); 455 if (--timeout == 0) { 456 netdev_err(lp->ndev, "%s: DMA reset timeout!\n", 457 __func__); 458 break; 459 } 460 } 461 } 462 463 /** 464 * axienet_device_reset - Reset and initialize the Axi Ethernet hardware. 465 * @ndev: Pointer to the net_device structure 466 * 467 * This function is called to reset and initialize the Axi Ethernet core. This 468 * is typically called during initialization. It does a reset of the Axi DMA 469 * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines 470 * areconnected to Axi Ethernet reset lines, this in turn resets the Axi 471 * Ethernet core. No separate hardware reset is done for the Axi Ethernet 472 * core. 473 */ 474 static void axienet_device_reset(struct net_device *ndev) 475 { 476 u32 axienet_status; 477 struct axienet_local *lp = netdev_priv(ndev); 478 479 __axienet_device_reset(lp); 480 481 lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE; 482 lp->options |= XAE_OPTION_VLAN; 483 lp->options &= (~XAE_OPTION_JUMBO); 484 485 if ((ndev->mtu > XAE_MTU) && 486 (ndev->mtu <= XAE_JUMBO_MTU)) { 487 lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN + 488 XAE_TRL_SIZE; 489 490 if (lp->max_frm_size <= lp->rxmem) 491 lp->options |= XAE_OPTION_JUMBO; 492 } 493 494 if (axienet_dma_bd_init(ndev)) { 495 netdev_err(ndev, "%s: descriptor allocation failed\n", 496 __func__); 497 } 498 499 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); 500 axienet_status &= ~XAE_RCW1_RX_MASK; 501 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status); 502 503 axienet_status = axienet_ior(lp, XAE_IP_OFFSET); 504 if (axienet_status & XAE_INT_RXRJECT_MASK) 505 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK); 506 axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ? 507 XAE_INT_RECV_ERROR_MASK : 0); 508 509 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK); 510 511 /* Sync default options with HW but leave receiver and 512 * transmitter disabled. 513 */ 514 axienet_setoptions(ndev, lp->options & 515 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 516 axienet_set_mac_address(ndev, NULL); 517 axienet_set_multicast_list(ndev); 518 axienet_setoptions(ndev, lp->options); 519 520 netif_trans_update(ndev); 521 } 522 523 /** 524 * axienet_start_xmit_done - Invoked once a transmit is completed by the 525 * Axi DMA Tx channel. 526 * @ndev: Pointer to the net_device structure 527 * 528 * This function is invoked from the Axi DMA Tx isr to notify the completion 529 * of transmit operation. It clears fields in the corresponding Tx BDs and 530 * unmaps the corresponding buffer so that CPU can regain ownership of the 531 * buffer. It finally invokes "netif_wake_queue" to restart transmission if 532 * required. 533 */ 534 static void axienet_start_xmit_done(struct net_device *ndev) 535 { 536 u32 size = 0; 537 u32 packets = 0; 538 struct axienet_local *lp = netdev_priv(ndev); 539 struct axidma_bd *cur_p; 540 unsigned int status = 0; 541 542 cur_p = &lp->tx_bd_v[lp->tx_bd_ci]; 543 status = cur_p->status; 544 while (status & XAXIDMA_BD_STS_COMPLETE_MASK) { 545 dma_unmap_single(ndev->dev.parent, cur_p->phys, 546 (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK), 547 DMA_TO_DEVICE); 548 if (cur_p->skb) 549 dev_consume_skb_irq(cur_p->skb); 550 /*cur_p->phys = 0;*/ 551 cur_p->app0 = 0; 552 cur_p->app1 = 0; 553 cur_p->app2 = 0; 554 cur_p->app4 = 0; 555 cur_p->status = 0; 556 cur_p->skb = NULL; 557 558 size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK; 559 packets++; 560 561 if (++lp->tx_bd_ci >= lp->tx_bd_num) 562 lp->tx_bd_ci = 0; 563 cur_p = &lp->tx_bd_v[lp->tx_bd_ci]; 564 status = cur_p->status; 565 } 566 567 ndev->stats.tx_packets += packets; 568 ndev->stats.tx_bytes += size; 569 570 /* Matches barrier in axienet_start_xmit */ 571 smp_mb(); 572 573 netif_wake_queue(ndev); 574 } 575 576 /** 577 * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy 578 * @lp: Pointer to the axienet_local structure 579 * @num_frag: The number of BDs to check for 580 * 581 * Return: 0, on success 582 * NETDEV_TX_BUSY, if any of the descriptors are not free 583 * 584 * This function is invoked before BDs are allocated and transmission starts. 585 * This function returns 0 if a BD or group of BDs can be allocated for 586 * transmission. If the BD or any of the BDs are not free the function 587 * returns a busy status. This is invoked from axienet_start_xmit. 588 */ 589 static inline int axienet_check_tx_bd_space(struct axienet_local *lp, 590 int num_frag) 591 { 592 struct axidma_bd *cur_p; 593 cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % lp->tx_bd_num]; 594 if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK) 595 return NETDEV_TX_BUSY; 596 return 0; 597 } 598 599 /** 600 * axienet_start_xmit - Starts the transmission. 601 * @skb: sk_buff pointer that contains data to be Txed. 602 * @ndev: Pointer to net_device structure. 603 * 604 * Return: NETDEV_TX_OK, on success 605 * NETDEV_TX_BUSY, if any of the descriptors are not free 606 * 607 * This function is invoked from upper layers to initiate transmission. The 608 * function uses the next available free BDs and populates their fields to 609 * start the transmission. Additionally if checksum offloading is supported, 610 * it populates AXI Stream Control fields with appropriate values. 611 */ 612 static netdev_tx_t 613 axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) 614 { 615 u32 ii; 616 u32 num_frag; 617 u32 csum_start_off; 618 u32 csum_index_off; 619 skb_frag_t *frag; 620 dma_addr_t tail_p; 621 struct axienet_local *lp = netdev_priv(ndev); 622 struct axidma_bd *cur_p; 623 624 num_frag = skb_shinfo(skb)->nr_frags; 625 cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; 626 627 if (axienet_check_tx_bd_space(lp, num_frag)) { 628 if (netif_queue_stopped(ndev)) 629 return NETDEV_TX_BUSY; 630 631 netif_stop_queue(ndev); 632 633 /* Matches barrier in axienet_start_xmit_done */ 634 smp_mb(); 635 636 /* Space might have just been freed - check again */ 637 if (axienet_check_tx_bd_space(lp, num_frag)) 638 return NETDEV_TX_BUSY; 639 640 netif_wake_queue(ndev); 641 } 642 643 if (skb->ip_summed == CHECKSUM_PARTIAL) { 644 if (lp->features & XAE_FEATURE_FULL_TX_CSUM) { 645 /* Tx Full Checksum Offload Enabled */ 646 cur_p->app0 |= 2; 647 } else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) { 648 csum_start_off = skb_transport_offset(skb); 649 csum_index_off = csum_start_off + skb->csum_offset; 650 /* Tx Partial Checksum Offload Enabled */ 651 cur_p->app0 |= 1; 652 cur_p->app1 = (csum_start_off << 16) | csum_index_off; 653 } 654 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { 655 cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */ 656 } 657 658 cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK; 659 cur_p->phys = dma_map_single(ndev->dev.parent, skb->data, 660 skb_headlen(skb), DMA_TO_DEVICE); 661 662 for (ii = 0; ii < num_frag; ii++) { 663 if (++lp->tx_bd_tail >= lp->tx_bd_num) 664 lp->tx_bd_tail = 0; 665 cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; 666 frag = &skb_shinfo(skb)->frags[ii]; 667 cur_p->phys = dma_map_single(ndev->dev.parent, 668 skb_frag_address(frag), 669 skb_frag_size(frag), 670 DMA_TO_DEVICE); 671 cur_p->cntrl = skb_frag_size(frag); 672 } 673 674 cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK; 675 cur_p->skb = skb; 676 677 tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail; 678 /* Start the transfer */ 679 axienet_dma_out32(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p); 680 if (++lp->tx_bd_tail >= lp->tx_bd_num) 681 lp->tx_bd_tail = 0; 682 683 return NETDEV_TX_OK; 684 } 685 686 /** 687 * axienet_recv - Is called from Axi DMA Rx Isr to complete the received 688 * BD processing. 689 * @ndev: Pointer to net_device structure. 690 * 691 * This function is invoked from the Axi DMA Rx isr to process the Rx BDs. It 692 * does minimal processing and invokes "netif_rx" to complete further 693 * processing. 694 */ 695 static void axienet_recv(struct net_device *ndev) 696 { 697 u32 length; 698 u32 csumstatus; 699 u32 size = 0; 700 u32 packets = 0; 701 dma_addr_t tail_p = 0; 702 struct axienet_local *lp = netdev_priv(ndev); 703 struct sk_buff *skb, *new_skb; 704 struct axidma_bd *cur_p; 705 706 cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; 707 708 while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) { 709 tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci; 710 711 dma_unmap_single(ndev->dev.parent, cur_p->phys, 712 lp->max_frm_size, 713 DMA_FROM_DEVICE); 714 715 skb = cur_p->skb; 716 cur_p->skb = NULL; 717 length = cur_p->app4 & 0x0000FFFF; 718 719 skb_put(skb, length); 720 skb->protocol = eth_type_trans(skb, ndev); 721 /*skb_checksum_none_assert(skb);*/ 722 skb->ip_summed = CHECKSUM_NONE; 723 724 /* if we're doing Rx csum offload, set it up */ 725 if (lp->features & XAE_FEATURE_FULL_RX_CSUM) { 726 csumstatus = (cur_p->app2 & 727 XAE_FULL_CSUM_STATUS_MASK) >> 3; 728 if ((csumstatus == XAE_IP_TCP_CSUM_VALIDATED) || 729 (csumstatus == XAE_IP_UDP_CSUM_VALIDATED)) { 730 skb->ip_summed = CHECKSUM_UNNECESSARY; 731 } 732 } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 && 733 skb->protocol == htons(ETH_P_IP) && 734 skb->len > 64) { 735 skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF); 736 skb->ip_summed = CHECKSUM_COMPLETE; 737 } 738 739 netif_rx(skb); 740 741 size += length; 742 packets++; 743 744 new_skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size); 745 if (!new_skb) 746 return; 747 748 cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data, 749 lp->max_frm_size, 750 DMA_FROM_DEVICE); 751 cur_p->cntrl = lp->max_frm_size; 752 cur_p->status = 0; 753 cur_p->skb = new_skb; 754 755 if (++lp->rx_bd_ci >= lp->rx_bd_num) 756 lp->rx_bd_ci = 0; 757 cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; 758 } 759 760 ndev->stats.rx_packets += packets; 761 ndev->stats.rx_bytes += size; 762 763 if (tail_p) 764 axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p); 765 } 766 767 /** 768 * axienet_tx_irq - Tx Done Isr. 769 * @irq: irq number 770 * @_ndev: net_device pointer 771 * 772 * Return: IRQ_HANDLED if device generated a TX interrupt, IRQ_NONE otherwise. 773 * 774 * This is the Axi DMA Tx done Isr. It invokes "axienet_start_xmit_done" 775 * to complete the BD processing. 776 */ 777 static irqreturn_t axienet_tx_irq(int irq, void *_ndev) 778 { 779 u32 cr; 780 unsigned int status; 781 struct net_device *ndev = _ndev; 782 struct axienet_local *lp = netdev_priv(ndev); 783 784 status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 785 if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) { 786 axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status); 787 axienet_start_xmit_done(lp->ndev); 788 goto out; 789 } 790 if (!(status & XAXIDMA_IRQ_ALL_MASK)) 791 return IRQ_NONE; 792 if (status & XAXIDMA_IRQ_ERROR_MASK) { 793 dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status); 794 dev_err(&ndev->dev, "Current BD is at: 0x%x\n", 795 (lp->tx_bd_v[lp->tx_bd_ci]).phys); 796 797 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 798 /* Disable coalesce, delay timer and error interrupts */ 799 cr &= (~XAXIDMA_IRQ_ALL_MASK); 800 /* Write to the Tx channel control register */ 801 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 802 803 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 804 /* Disable coalesce, delay timer and error interrupts */ 805 cr &= (~XAXIDMA_IRQ_ALL_MASK); 806 /* Write to the Rx channel control register */ 807 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 808 809 tasklet_schedule(&lp->dma_err_tasklet); 810 axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status); 811 } 812 out: 813 return IRQ_HANDLED; 814 } 815 816 /** 817 * axienet_rx_irq - Rx Isr. 818 * @irq: irq number 819 * @_ndev: net_device pointer 820 * 821 * Return: IRQ_HANDLED if device generated a RX interrupt, IRQ_NONE otherwise. 822 * 823 * This is the Axi DMA Rx Isr. It invokes "axienet_recv" to complete the BD 824 * processing. 825 */ 826 static irqreturn_t axienet_rx_irq(int irq, void *_ndev) 827 { 828 u32 cr; 829 unsigned int status; 830 struct net_device *ndev = _ndev; 831 struct axienet_local *lp = netdev_priv(ndev); 832 833 status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 834 if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) { 835 axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status); 836 axienet_recv(lp->ndev); 837 goto out; 838 } 839 if (!(status & XAXIDMA_IRQ_ALL_MASK)) 840 return IRQ_NONE; 841 if (status & XAXIDMA_IRQ_ERROR_MASK) { 842 dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status); 843 dev_err(&ndev->dev, "Current BD is at: 0x%x\n", 844 (lp->rx_bd_v[lp->rx_bd_ci]).phys); 845 846 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 847 /* Disable coalesce, delay timer and error interrupts */ 848 cr &= (~XAXIDMA_IRQ_ALL_MASK); 849 /* Finally write to the Tx channel control register */ 850 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 851 852 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 853 /* Disable coalesce, delay timer and error interrupts */ 854 cr &= (~XAXIDMA_IRQ_ALL_MASK); 855 /* write to the Rx channel control register */ 856 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 857 858 tasklet_schedule(&lp->dma_err_tasklet); 859 axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status); 860 } 861 out: 862 return IRQ_HANDLED; 863 } 864 865 /** 866 * axienet_eth_irq - Ethernet core Isr. 867 * @irq: irq number 868 * @_ndev: net_device pointer 869 * 870 * Return: IRQ_HANDLED if device generated a core interrupt, IRQ_NONE otherwise. 871 * 872 * Handle miscellaneous conditions indicated by Ethernet core IRQ. 873 */ 874 static irqreturn_t axienet_eth_irq(int irq, void *_ndev) 875 { 876 struct net_device *ndev = _ndev; 877 struct axienet_local *lp = netdev_priv(ndev); 878 unsigned int pending; 879 880 pending = axienet_ior(lp, XAE_IP_OFFSET); 881 if (!pending) 882 return IRQ_NONE; 883 884 if (pending & XAE_INT_RXFIFOOVR_MASK) 885 ndev->stats.rx_missed_errors++; 886 887 if (pending & XAE_INT_RXRJECT_MASK) 888 ndev->stats.rx_frame_errors++; 889 890 axienet_iow(lp, XAE_IS_OFFSET, pending); 891 return IRQ_HANDLED; 892 } 893 894 static void axienet_dma_err_handler(unsigned long data); 895 896 /** 897 * axienet_open - Driver open routine. 898 * @ndev: Pointer to net_device structure 899 * 900 * Return: 0, on success. 901 * non-zero error value on failure 902 * 903 * This is the driver open routine. It calls phylink_start to start the 904 * PHY device. 905 * It also allocates interrupt service routines, enables the interrupt lines 906 * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer 907 * descriptors are initialized. 908 */ 909 static int axienet_open(struct net_device *ndev) 910 { 911 int ret; 912 struct axienet_local *lp = netdev_priv(ndev); 913 914 dev_dbg(&ndev->dev, "axienet_open()\n"); 915 916 /* Disable the MDIO interface till Axi Ethernet Reset is completed. 917 * When we do an Axi Ethernet reset, it resets the complete core 918 * including the MDIO. MDIO must be disabled before resetting 919 * and re-enabled afterwards. 920 * Hold MDIO bus lock to avoid MDIO accesses during the reset. 921 */ 922 mutex_lock(&lp->mii_bus->mdio_lock); 923 axienet_mdio_disable(lp); 924 axienet_device_reset(ndev); 925 ret = axienet_mdio_enable(lp); 926 mutex_unlock(&lp->mii_bus->mdio_lock); 927 if (ret < 0) 928 return ret; 929 930 ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0); 931 if (ret) { 932 dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret); 933 return ret; 934 } 935 936 phylink_start(lp->phylink); 937 938 /* Enable tasklets for Axi DMA error handling */ 939 tasklet_init(&lp->dma_err_tasklet, axienet_dma_err_handler, 940 (unsigned long) lp); 941 942 /* Enable interrupts for Axi DMA Tx */ 943 ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED, 944 ndev->name, ndev); 945 if (ret) 946 goto err_tx_irq; 947 /* Enable interrupts for Axi DMA Rx */ 948 ret = request_irq(lp->rx_irq, axienet_rx_irq, IRQF_SHARED, 949 ndev->name, ndev); 950 if (ret) 951 goto err_rx_irq; 952 /* Enable interrupts for Axi Ethernet core (if defined) */ 953 if (lp->eth_irq > 0) { 954 ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED, 955 ndev->name, ndev); 956 if (ret) 957 goto err_eth_irq; 958 } 959 960 return 0; 961 962 err_eth_irq: 963 free_irq(lp->rx_irq, ndev); 964 err_rx_irq: 965 free_irq(lp->tx_irq, ndev); 966 err_tx_irq: 967 phylink_stop(lp->phylink); 968 phylink_disconnect_phy(lp->phylink); 969 tasklet_kill(&lp->dma_err_tasklet); 970 dev_err(lp->dev, "request_irq() failed\n"); 971 return ret; 972 } 973 974 /** 975 * axienet_stop - Driver stop routine. 976 * @ndev: Pointer to net_device structure 977 * 978 * Return: 0, on success. 979 * 980 * This is the driver stop routine. It calls phylink_disconnect to stop the PHY 981 * device. It also removes the interrupt handlers and disables the interrupts. 982 * The Axi DMA Tx/Rx BDs are released. 983 */ 984 static int axienet_stop(struct net_device *ndev) 985 { 986 u32 cr, sr; 987 int count; 988 struct axienet_local *lp = netdev_priv(ndev); 989 990 dev_dbg(&ndev->dev, "axienet_close()\n"); 991 992 phylink_stop(lp->phylink); 993 phylink_disconnect_phy(lp->phylink); 994 995 axienet_setoptions(ndev, lp->options & 996 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 997 998 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 999 cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK); 1000 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 1001 1002 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 1003 cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK); 1004 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 1005 1006 axienet_iow(lp, XAE_IE_OFFSET, 0); 1007 1008 /* Give DMAs a chance to halt gracefully */ 1009 sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 1010 for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) { 1011 msleep(20); 1012 sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 1013 } 1014 1015 sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 1016 for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) { 1017 msleep(20); 1018 sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 1019 } 1020 1021 /* Do a reset to ensure DMA is really stopped */ 1022 mutex_lock(&lp->mii_bus->mdio_lock); 1023 axienet_mdio_disable(lp); 1024 __axienet_device_reset(lp); 1025 axienet_mdio_enable(lp); 1026 mutex_unlock(&lp->mii_bus->mdio_lock); 1027 1028 tasklet_kill(&lp->dma_err_tasklet); 1029 1030 if (lp->eth_irq > 0) 1031 free_irq(lp->eth_irq, ndev); 1032 free_irq(lp->tx_irq, ndev); 1033 free_irq(lp->rx_irq, ndev); 1034 1035 axienet_dma_bd_release(ndev); 1036 return 0; 1037 } 1038 1039 /** 1040 * axienet_change_mtu - Driver change mtu routine. 1041 * @ndev: Pointer to net_device structure 1042 * @new_mtu: New mtu value to be applied 1043 * 1044 * Return: Always returns 0 (success). 1045 * 1046 * This is the change mtu driver routine. It checks if the Axi Ethernet 1047 * hardware supports jumbo frames before changing the mtu. This can be 1048 * called only when the device is not up. 1049 */ 1050 static int axienet_change_mtu(struct net_device *ndev, int new_mtu) 1051 { 1052 struct axienet_local *lp = netdev_priv(ndev); 1053 1054 if (netif_running(ndev)) 1055 return -EBUSY; 1056 1057 if ((new_mtu + VLAN_ETH_HLEN + 1058 XAE_TRL_SIZE) > lp->rxmem) 1059 return -EINVAL; 1060 1061 ndev->mtu = new_mtu; 1062 1063 return 0; 1064 } 1065 1066 #ifdef CONFIG_NET_POLL_CONTROLLER 1067 /** 1068 * axienet_poll_controller - Axi Ethernet poll mechanism. 1069 * @ndev: Pointer to net_device structure 1070 * 1071 * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior 1072 * to polling the ISRs and are enabled back after the polling is done. 1073 */ 1074 static void axienet_poll_controller(struct net_device *ndev) 1075 { 1076 struct axienet_local *lp = netdev_priv(ndev); 1077 disable_irq(lp->tx_irq); 1078 disable_irq(lp->rx_irq); 1079 axienet_rx_irq(lp->tx_irq, ndev); 1080 axienet_tx_irq(lp->rx_irq, ndev); 1081 enable_irq(lp->tx_irq); 1082 enable_irq(lp->rx_irq); 1083 } 1084 #endif 1085 1086 static const struct net_device_ops axienet_netdev_ops = { 1087 .ndo_open = axienet_open, 1088 .ndo_stop = axienet_stop, 1089 .ndo_start_xmit = axienet_start_xmit, 1090 .ndo_change_mtu = axienet_change_mtu, 1091 .ndo_set_mac_address = netdev_set_mac_address, 1092 .ndo_validate_addr = eth_validate_addr, 1093 .ndo_set_rx_mode = axienet_set_multicast_list, 1094 #ifdef CONFIG_NET_POLL_CONTROLLER 1095 .ndo_poll_controller = axienet_poll_controller, 1096 #endif 1097 }; 1098 1099 /** 1100 * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information. 1101 * @ndev: Pointer to net_device structure 1102 * @ed: Pointer to ethtool_drvinfo structure 1103 * 1104 * This implements ethtool command for getting the driver information. 1105 * Issue "ethtool -i ethX" under linux prompt to execute this function. 1106 */ 1107 static void axienet_ethtools_get_drvinfo(struct net_device *ndev, 1108 struct ethtool_drvinfo *ed) 1109 { 1110 strlcpy(ed->driver, DRIVER_NAME, sizeof(ed->driver)); 1111 strlcpy(ed->version, DRIVER_VERSION, sizeof(ed->version)); 1112 } 1113 1114 /** 1115 * axienet_ethtools_get_regs_len - Get the total regs length present in the 1116 * AxiEthernet core. 1117 * @ndev: Pointer to net_device structure 1118 * 1119 * This implements ethtool command for getting the total register length 1120 * information. 1121 * 1122 * Return: the total regs length 1123 */ 1124 static int axienet_ethtools_get_regs_len(struct net_device *ndev) 1125 { 1126 return sizeof(u32) * AXIENET_REGS_N; 1127 } 1128 1129 /** 1130 * axienet_ethtools_get_regs - Dump the contents of all registers present 1131 * in AxiEthernet core. 1132 * @ndev: Pointer to net_device structure 1133 * @regs: Pointer to ethtool_regs structure 1134 * @ret: Void pointer used to return the contents of the registers. 1135 * 1136 * This implements ethtool command for getting the Axi Ethernet register dump. 1137 * Issue "ethtool -d ethX" to execute this function. 1138 */ 1139 static void axienet_ethtools_get_regs(struct net_device *ndev, 1140 struct ethtool_regs *regs, void *ret) 1141 { 1142 u32 *data = (u32 *) ret; 1143 size_t len = sizeof(u32) * AXIENET_REGS_N; 1144 struct axienet_local *lp = netdev_priv(ndev); 1145 1146 regs->version = 0; 1147 regs->len = len; 1148 1149 memset(data, 0, len); 1150 data[0] = axienet_ior(lp, XAE_RAF_OFFSET); 1151 data[1] = axienet_ior(lp, XAE_TPF_OFFSET); 1152 data[2] = axienet_ior(lp, XAE_IFGP_OFFSET); 1153 data[3] = axienet_ior(lp, XAE_IS_OFFSET); 1154 data[4] = axienet_ior(lp, XAE_IP_OFFSET); 1155 data[5] = axienet_ior(lp, XAE_IE_OFFSET); 1156 data[6] = axienet_ior(lp, XAE_TTAG_OFFSET); 1157 data[7] = axienet_ior(lp, XAE_RTAG_OFFSET); 1158 data[8] = axienet_ior(lp, XAE_UAWL_OFFSET); 1159 data[9] = axienet_ior(lp, XAE_UAWU_OFFSET); 1160 data[10] = axienet_ior(lp, XAE_TPID0_OFFSET); 1161 data[11] = axienet_ior(lp, XAE_TPID1_OFFSET); 1162 data[12] = axienet_ior(lp, XAE_PPST_OFFSET); 1163 data[13] = axienet_ior(lp, XAE_RCW0_OFFSET); 1164 data[14] = axienet_ior(lp, XAE_RCW1_OFFSET); 1165 data[15] = axienet_ior(lp, XAE_TC_OFFSET); 1166 data[16] = axienet_ior(lp, XAE_FCC_OFFSET); 1167 data[17] = axienet_ior(lp, XAE_EMMC_OFFSET); 1168 data[18] = axienet_ior(lp, XAE_PHYC_OFFSET); 1169 data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET); 1170 data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET); 1171 data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET); 1172 data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET); 1173 data[23] = axienet_ior(lp, XAE_MDIO_MIS_OFFSET); 1174 data[24] = axienet_ior(lp, XAE_MDIO_MIP_OFFSET); 1175 data[25] = axienet_ior(lp, XAE_MDIO_MIE_OFFSET); 1176 data[26] = axienet_ior(lp, XAE_MDIO_MIC_OFFSET); 1177 data[27] = axienet_ior(lp, XAE_UAW0_OFFSET); 1178 data[28] = axienet_ior(lp, XAE_UAW1_OFFSET); 1179 data[29] = axienet_ior(lp, XAE_FMI_OFFSET); 1180 data[30] = axienet_ior(lp, XAE_AF0_OFFSET); 1181 data[31] = axienet_ior(lp, XAE_AF1_OFFSET); 1182 data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 1183 data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 1184 data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET); 1185 data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET); 1186 data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 1187 data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 1188 data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET); 1189 data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET); 1190 } 1191 1192 static void axienet_ethtools_get_ringparam(struct net_device *ndev, 1193 struct ethtool_ringparam *ering) 1194 { 1195 struct axienet_local *lp = netdev_priv(ndev); 1196 1197 ering->rx_max_pending = RX_BD_NUM_MAX; 1198 ering->rx_mini_max_pending = 0; 1199 ering->rx_jumbo_max_pending = 0; 1200 ering->tx_max_pending = TX_BD_NUM_MAX; 1201 ering->rx_pending = lp->rx_bd_num; 1202 ering->rx_mini_pending = 0; 1203 ering->rx_jumbo_pending = 0; 1204 ering->tx_pending = lp->tx_bd_num; 1205 } 1206 1207 static int axienet_ethtools_set_ringparam(struct net_device *ndev, 1208 struct ethtool_ringparam *ering) 1209 { 1210 struct axienet_local *lp = netdev_priv(ndev); 1211 1212 if (ering->rx_pending > RX_BD_NUM_MAX || 1213 ering->rx_mini_pending || 1214 ering->rx_jumbo_pending || 1215 ering->rx_pending > TX_BD_NUM_MAX) 1216 return -EINVAL; 1217 1218 if (netif_running(ndev)) 1219 return -EBUSY; 1220 1221 lp->rx_bd_num = ering->rx_pending; 1222 lp->tx_bd_num = ering->tx_pending; 1223 return 0; 1224 } 1225 1226 /** 1227 * axienet_ethtools_get_pauseparam - Get the pause parameter setting for 1228 * Tx and Rx paths. 1229 * @ndev: Pointer to net_device structure 1230 * @epauseparm: Pointer to ethtool_pauseparam structure. 1231 * 1232 * This implements ethtool command for getting axi ethernet pause frame 1233 * setting. Issue "ethtool -a ethX" to execute this function. 1234 */ 1235 static void 1236 axienet_ethtools_get_pauseparam(struct net_device *ndev, 1237 struct ethtool_pauseparam *epauseparm) 1238 { 1239 struct axienet_local *lp = netdev_priv(ndev); 1240 1241 phylink_ethtool_get_pauseparam(lp->phylink, epauseparm); 1242 } 1243 1244 /** 1245 * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control) 1246 * settings. 1247 * @ndev: Pointer to net_device structure 1248 * @epauseparm:Pointer to ethtool_pauseparam structure 1249 * 1250 * This implements ethtool command for enabling flow control on Rx and Tx 1251 * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this 1252 * function. 1253 * 1254 * Return: 0 on success, -EFAULT if device is running 1255 */ 1256 static int 1257 axienet_ethtools_set_pauseparam(struct net_device *ndev, 1258 struct ethtool_pauseparam *epauseparm) 1259 { 1260 struct axienet_local *lp = netdev_priv(ndev); 1261 1262 return phylink_ethtool_set_pauseparam(lp->phylink, epauseparm); 1263 } 1264 1265 /** 1266 * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count. 1267 * @ndev: Pointer to net_device structure 1268 * @ecoalesce: Pointer to ethtool_coalesce structure 1269 * 1270 * This implements ethtool command for getting the DMA interrupt coalescing 1271 * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to 1272 * execute this function. 1273 * 1274 * Return: 0 always 1275 */ 1276 static int axienet_ethtools_get_coalesce(struct net_device *ndev, 1277 struct ethtool_coalesce *ecoalesce) 1278 { 1279 u32 regval = 0; 1280 struct axienet_local *lp = netdev_priv(ndev); 1281 regval = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 1282 ecoalesce->rx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK) 1283 >> XAXIDMA_COALESCE_SHIFT; 1284 regval = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 1285 ecoalesce->tx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK) 1286 >> XAXIDMA_COALESCE_SHIFT; 1287 return 0; 1288 } 1289 1290 /** 1291 * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count. 1292 * @ndev: Pointer to net_device structure 1293 * @ecoalesce: Pointer to ethtool_coalesce structure 1294 * 1295 * This implements ethtool command for setting the DMA interrupt coalescing 1296 * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux 1297 * prompt to execute this function. 1298 * 1299 * Return: 0, on success, Non-zero error value on failure. 1300 */ 1301 static int axienet_ethtools_set_coalesce(struct net_device *ndev, 1302 struct ethtool_coalesce *ecoalesce) 1303 { 1304 struct axienet_local *lp = netdev_priv(ndev); 1305 1306 if (netif_running(ndev)) { 1307 netdev_err(ndev, 1308 "Please stop netif before applying configuration\n"); 1309 return -EFAULT; 1310 } 1311 1312 if ((ecoalesce->rx_coalesce_usecs) || 1313 (ecoalesce->rx_coalesce_usecs_irq) || 1314 (ecoalesce->rx_max_coalesced_frames_irq) || 1315 (ecoalesce->tx_coalesce_usecs) || 1316 (ecoalesce->tx_coalesce_usecs_irq) || 1317 (ecoalesce->tx_max_coalesced_frames_irq) || 1318 (ecoalesce->stats_block_coalesce_usecs) || 1319 (ecoalesce->use_adaptive_rx_coalesce) || 1320 (ecoalesce->use_adaptive_tx_coalesce) || 1321 (ecoalesce->pkt_rate_low) || 1322 (ecoalesce->rx_coalesce_usecs_low) || 1323 (ecoalesce->rx_max_coalesced_frames_low) || 1324 (ecoalesce->tx_coalesce_usecs_low) || 1325 (ecoalesce->tx_max_coalesced_frames_low) || 1326 (ecoalesce->pkt_rate_high) || 1327 (ecoalesce->rx_coalesce_usecs_high) || 1328 (ecoalesce->rx_max_coalesced_frames_high) || 1329 (ecoalesce->tx_coalesce_usecs_high) || 1330 (ecoalesce->tx_max_coalesced_frames_high) || 1331 (ecoalesce->rate_sample_interval)) 1332 return -EOPNOTSUPP; 1333 if (ecoalesce->rx_max_coalesced_frames) 1334 lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames; 1335 if (ecoalesce->tx_max_coalesced_frames) 1336 lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames; 1337 1338 return 0; 1339 } 1340 1341 static int 1342 axienet_ethtools_get_link_ksettings(struct net_device *ndev, 1343 struct ethtool_link_ksettings *cmd) 1344 { 1345 struct axienet_local *lp = netdev_priv(ndev); 1346 1347 return phylink_ethtool_ksettings_get(lp->phylink, cmd); 1348 } 1349 1350 static int 1351 axienet_ethtools_set_link_ksettings(struct net_device *ndev, 1352 const struct ethtool_link_ksettings *cmd) 1353 { 1354 struct axienet_local *lp = netdev_priv(ndev); 1355 1356 return phylink_ethtool_ksettings_set(lp->phylink, cmd); 1357 } 1358 1359 static const struct ethtool_ops axienet_ethtool_ops = { 1360 .get_drvinfo = axienet_ethtools_get_drvinfo, 1361 .get_regs_len = axienet_ethtools_get_regs_len, 1362 .get_regs = axienet_ethtools_get_regs, 1363 .get_link = ethtool_op_get_link, 1364 .get_ringparam = axienet_ethtools_get_ringparam, 1365 .set_ringparam = axienet_ethtools_set_ringparam, 1366 .get_pauseparam = axienet_ethtools_get_pauseparam, 1367 .set_pauseparam = axienet_ethtools_set_pauseparam, 1368 .get_coalesce = axienet_ethtools_get_coalesce, 1369 .set_coalesce = axienet_ethtools_set_coalesce, 1370 .get_link_ksettings = axienet_ethtools_get_link_ksettings, 1371 .set_link_ksettings = axienet_ethtools_set_link_ksettings, 1372 }; 1373 1374 static void axienet_validate(struct phylink_config *config, 1375 unsigned long *supported, 1376 struct phylink_link_state *state) 1377 { 1378 struct net_device *ndev = to_net_dev(config->dev); 1379 struct axienet_local *lp = netdev_priv(ndev); 1380 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; 1381 1382 /* Only support the mode we are configured for */ 1383 if (state->interface != PHY_INTERFACE_MODE_NA && 1384 state->interface != lp->phy_mode) { 1385 netdev_warn(ndev, "Cannot use PHY mode %s, supported: %s\n", 1386 phy_modes(state->interface), 1387 phy_modes(lp->phy_mode)); 1388 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); 1389 return; 1390 } 1391 1392 phylink_set(mask, Autoneg); 1393 phylink_set_port_modes(mask); 1394 1395 phylink_set(mask, Asym_Pause); 1396 phylink_set(mask, Pause); 1397 phylink_set(mask, 1000baseX_Full); 1398 phylink_set(mask, 10baseT_Full); 1399 phylink_set(mask, 100baseT_Full); 1400 phylink_set(mask, 1000baseT_Full); 1401 1402 bitmap_and(supported, supported, mask, 1403 __ETHTOOL_LINK_MODE_MASK_NBITS); 1404 bitmap_and(state->advertising, state->advertising, mask, 1405 __ETHTOOL_LINK_MODE_MASK_NBITS); 1406 } 1407 1408 static void axienet_mac_pcs_get_state(struct phylink_config *config, 1409 struct phylink_link_state *state) 1410 { 1411 struct net_device *ndev = to_net_dev(config->dev); 1412 struct axienet_local *lp = netdev_priv(ndev); 1413 u32 emmc_reg, fcc_reg; 1414 1415 state->interface = lp->phy_mode; 1416 1417 emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET); 1418 if (emmc_reg & XAE_EMMC_LINKSPD_1000) 1419 state->speed = SPEED_1000; 1420 else if (emmc_reg & XAE_EMMC_LINKSPD_100) 1421 state->speed = SPEED_100; 1422 else 1423 state->speed = SPEED_10; 1424 1425 state->pause = 0; 1426 fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET); 1427 if (fcc_reg & XAE_FCC_FCTX_MASK) 1428 state->pause |= MLO_PAUSE_TX; 1429 if (fcc_reg & XAE_FCC_FCRX_MASK) 1430 state->pause |= MLO_PAUSE_RX; 1431 1432 state->an_complete = 0; 1433 state->duplex = 1; 1434 } 1435 1436 static void axienet_mac_an_restart(struct phylink_config *config) 1437 { 1438 /* Unsupported, do nothing */ 1439 } 1440 1441 static void axienet_mac_config(struct phylink_config *config, unsigned int mode, 1442 const struct phylink_link_state *state) 1443 { 1444 struct net_device *ndev = to_net_dev(config->dev); 1445 struct axienet_local *lp = netdev_priv(ndev); 1446 u32 emmc_reg, fcc_reg; 1447 1448 emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET); 1449 emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK; 1450 1451 switch (state->speed) { 1452 case SPEED_1000: 1453 emmc_reg |= XAE_EMMC_LINKSPD_1000; 1454 break; 1455 case SPEED_100: 1456 emmc_reg |= XAE_EMMC_LINKSPD_100; 1457 break; 1458 case SPEED_10: 1459 emmc_reg |= XAE_EMMC_LINKSPD_10; 1460 break; 1461 default: 1462 dev_err(&ndev->dev, 1463 "Speed other than 10, 100 or 1Gbps is not supported\n"); 1464 break; 1465 } 1466 1467 axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg); 1468 1469 fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET); 1470 if (state->pause & MLO_PAUSE_TX) 1471 fcc_reg |= XAE_FCC_FCTX_MASK; 1472 else 1473 fcc_reg &= ~XAE_FCC_FCTX_MASK; 1474 if (state->pause & MLO_PAUSE_RX) 1475 fcc_reg |= XAE_FCC_FCRX_MASK; 1476 else 1477 fcc_reg &= ~XAE_FCC_FCRX_MASK; 1478 axienet_iow(lp, XAE_FCC_OFFSET, fcc_reg); 1479 } 1480 1481 static void axienet_mac_link_down(struct phylink_config *config, 1482 unsigned int mode, 1483 phy_interface_t interface) 1484 { 1485 /* nothing meaningful to do */ 1486 } 1487 1488 static void axienet_mac_link_up(struct phylink_config *config, 1489 unsigned int mode, 1490 phy_interface_t interface, 1491 struct phy_device *phy) 1492 { 1493 /* nothing meaningful to do */ 1494 } 1495 1496 static const struct phylink_mac_ops axienet_phylink_ops = { 1497 .validate = axienet_validate, 1498 .mac_pcs_get_state = axienet_mac_pcs_get_state, 1499 .mac_an_restart = axienet_mac_an_restart, 1500 .mac_config = axienet_mac_config, 1501 .mac_link_down = axienet_mac_link_down, 1502 .mac_link_up = axienet_mac_link_up, 1503 }; 1504 1505 /** 1506 * axienet_dma_err_handler - Tasklet handler for Axi DMA Error 1507 * @data: Data passed 1508 * 1509 * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the 1510 * Tx/Rx BDs. 1511 */ 1512 static void axienet_dma_err_handler(unsigned long data) 1513 { 1514 u32 axienet_status; 1515 u32 cr, i; 1516 struct axienet_local *lp = (struct axienet_local *) data; 1517 struct net_device *ndev = lp->ndev; 1518 struct axidma_bd *cur_p; 1519 1520 axienet_setoptions(ndev, lp->options & 1521 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 1522 /* Disable the MDIO interface till Axi Ethernet Reset is completed. 1523 * When we do an Axi Ethernet reset, it resets the complete core 1524 * including the MDIO. MDIO must be disabled before resetting 1525 * and re-enabled afterwards. 1526 * Hold MDIO bus lock to avoid MDIO accesses during the reset. 1527 */ 1528 mutex_lock(&lp->mii_bus->mdio_lock); 1529 axienet_mdio_disable(lp); 1530 __axienet_device_reset(lp); 1531 axienet_mdio_enable(lp); 1532 mutex_unlock(&lp->mii_bus->mdio_lock); 1533 1534 for (i = 0; i < lp->tx_bd_num; i++) { 1535 cur_p = &lp->tx_bd_v[i]; 1536 if (cur_p->phys) 1537 dma_unmap_single(ndev->dev.parent, cur_p->phys, 1538 (cur_p->cntrl & 1539 XAXIDMA_BD_CTRL_LENGTH_MASK), 1540 DMA_TO_DEVICE); 1541 if (cur_p->skb) 1542 dev_kfree_skb_irq(cur_p->skb); 1543 cur_p->phys = 0; 1544 cur_p->cntrl = 0; 1545 cur_p->status = 0; 1546 cur_p->app0 = 0; 1547 cur_p->app1 = 0; 1548 cur_p->app2 = 0; 1549 cur_p->app3 = 0; 1550 cur_p->app4 = 0; 1551 cur_p->skb = NULL; 1552 } 1553 1554 for (i = 0; i < lp->rx_bd_num; i++) { 1555 cur_p = &lp->rx_bd_v[i]; 1556 cur_p->status = 0; 1557 cur_p->app0 = 0; 1558 cur_p->app1 = 0; 1559 cur_p->app2 = 0; 1560 cur_p->app3 = 0; 1561 cur_p->app4 = 0; 1562 } 1563 1564 lp->tx_bd_ci = 0; 1565 lp->tx_bd_tail = 0; 1566 lp->rx_bd_ci = 0; 1567 1568 /* Start updating the Rx channel control register */ 1569 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 1570 /* Update the interrupt coalesce count */ 1571 cr = ((cr & ~XAXIDMA_COALESCE_MASK) | 1572 (XAXIDMA_DFT_RX_THRESHOLD << XAXIDMA_COALESCE_SHIFT)); 1573 /* Update the delay timer count */ 1574 cr = ((cr & ~XAXIDMA_DELAY_MASK) | 1575 (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); 1576 /* Enable coalesce, delay timer and error interrupts */ 1577 cr |= XAXIDMA_IRQ_ALL_MASK; 1578 /* Finally write to the Rx channel control register */ 1579 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 1580 1581 /* Start updating the Tx channel control register */ 1582 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 1583 /* Update the interrupt coalesce count */ 1584 cr = (((cr & ~XAXIDMA_COALESCE_MASK)) | 1585 (XAXIDMA_DFT_TX_THRESHOLD << XAXIDMA_COALESCE_SHIFT)); 1586 /* Update the delay timer count */ 1587 cr = (((cr & ~XAXIDMA_DELAY_MASK)) | 1588 (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); 1589 /* Enable coalesce, delay timer and error interrupts */ 1590 cr |= XAXIDMA_IRQ_ALL_MASK; 1591 /* Finally write to the Tx channel control register */ 1592 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 1593 1594 /* Populate the tail pointer and bring the Rx Axi DMA engine out of 1595 * halted state. This will make the Rx side ready for reception. 1596 */ 1597 axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); 1598 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 1599 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, 1600 cr | XAXIDMA_CR_RUNSTOP_MASK); 1601 axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + 1602 (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1))); 1603 1604 /* Write to the RS (Run-stop) bit in the Tx channel control register. 1605 * Tx channel is now ready to run. But only after we write to the 1606 * tail pointer register that the Tx channel will start transmitting 1607 */ 1608 axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); 1609 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 1610 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, 1611 cr | XAXIDMA_CR_RUNSTOP_MASK); 1612 1613 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); 1614 axienet_status &= ~XAE_RCW1_RX_MASK; 1615 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status); 1616 1617 axienet_status = axienet_ior(lp, XAE_IP_OFFSET); 1618 if (axienet_status & XAE_INT_RXRJECT_MASK) 1619 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK); 1620 axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ? 1621 XAE_INT_RECV_ERROR_MASK : 0); 1622 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK); 1623 1624 /* Sync default options with HW but leave receiver and 1625 * transmitter disabled. 1626 */ 1627 axienet_setoptions(ndev, lp->options & 1628 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 1629 axienet_set_mac_address(ndev, NULL); 1630 axienet_set_multicast_list(ndev); 1631 axienet_setoptions(ndev, lp->options); 1632 } 1633 1634 /** 1635 * axienet_probe - Axi Ethernet probe function. 1636 * @pdev: Pointer to platform device structure. 1637 * 1638 * Return: 0, on success 1639 * Non-zero error value on failure. 1640 * 1641 * This is the probe routine for Axi Ethernet driver. This is called before 1642 * any other driver routines are invoked. It allocates and sets up the Ethernet 1643 * device. Parses through device tree and populates fields of 1644 * axienet_local. It registers the Ethernet device. 1645 */ 1646 static int axienet_probe(struct platform_device *pdev) 1647 { 1648 int ret; 1649 struct device_node *np; 1650 struct axienet_local *lp; 1651 struct net_device *ndev; 1652 const void *mac_addr; 1653 struct resource *ethres; 1654 u32 value; 1655 1656 ndev = alloc_etherdev(sizeof(*lp)); 1657 if (!ndev) 1658 return -ENOMEM; 1659 1660 platform_set_drvdata(pdev, ndev); 1661 1662 SET_NETDEV_DEV(ndev, &pdev->dev); 1663 ndev->flags &= ~IFF_MULTICAST; /* clear multicast */ 1664 ndev->features = NETIF_F_SG; 1665 ndev->netdev_ops = &axienet_netdev_ops; 1666 ndev->ethtool_ops = &axienet_ethtool_ops; 1667 1668 /* MTU range: 64 - 9000 */ 1669 ndev->min_mtu = 64; 1670 ndev->max_mtu = XAE_JUMBO_MTU; 1671 1672 lp = netdev_priv(ndev); 1673 lp->ndev = ndev; 1674 lp->dev = &pdev->dev; 1675 lp->options = XAE_OPTION_DEFAULTS; 1676 lp->rx_bd_num = RX_BD_NUM_DEFAULT; 1677 lp->tx_bd_num = TX_BD_NUM_DEFAULT; 1678 /* Map device registers */ 1679 ethres = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1680 lp->regs = devm_ioremap_resource(&pdev->dev, ethres); 1681 if (IS_ERR(lp->regs)) { 1682 dev_err(&pdev->dev, "could not map Axi Ethernet regs.\n"); 1683 ret = PTR_ERR(lp->regs); 1684 goto free_netdev; 1685 } 1686 lp->regs_start = ethres->start; 1687 1688 /* Setup checksum offload, but default to off if not specified */ 1689 lp->features = 0; 1690 1691 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,txcsum", &value); 1692 if (!ret) { 1693 switch (value) { 1694 case 1: 1695 lp->csum_offload_on_tx_path = 1696 XAE_FEATURE_PARTIAL_TX_CSUM; 1697 lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM; 1698 /* Can checksum TCP/UDP over IPv4. */ 1699 ndev->features |= NETIF_F_IP_CSUM; 1700 break; 1701 case 2: 1702 lp->csum_offload_on_tx_path = 1703 XAE_FEATURE_FULL_TX_CSUM; 1704 lp->features |= XAE_FEATURE_FULL_TX_CSUM; 1705 /* Can checksum TCP/UDP over IPv4. */ 1706 ndev->features |= NETIF_F_IP_CSUM; 1707 break; 1708 default: 1709 lp->csum_offload_on_tx_path = XAE_NO_CSUM_OFFLOAD; 1710 } 1711 } 1712 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value); 1713 if (!ret) { 1714 switch (value) { 1715 case 1: 1716 lp->csum_offload_on_rx_path = 1717 XAE_FEATURE_PARTIAL_RX_CSUM; 1718 lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM; 1719 break; 1720 case 2: 1721 lp->csum_offload_on_rx_path = 1722 XAE_FEATURE_FULL_RX_CSUM; 1723 lp->features |= XAE_FEATURE_FULL_RX_CSUM; 1724 break; 1725 default: 1726 lp->csum_offload_on_rx_path = XAE_NO_CSUM_OFFLOAD; 1727 } 1728 } 1729 /* For supporting jumbo frames, the Axi Ethernet hardware must have 1730 * a larger Rx/Tx Memory. Typically, the size must be large so that 1731 * we can enable jumbo option and start supporting jumbo frames. 1732 * Here we check for memory allocated for Rx/Tx in the hardware from 1733 * the device-tree and accordingly set flags. 1734 */ 1735 of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem); 1736 1737 /* Start with the proprietary, and broken phy_type */ 1738 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &value); 1739 if (!ret) { 1740 netdev_warn(ndev, "Please upgrade your device tree binary blob to use phy-mode"); 1741 switch (value) { 1742 case XAE_PHY_TYPE_MII: 1743 lp->phy_mode = PHY_INTERFACE_MODE_MII; 1744 break; 1745 case XAE_PHY_TYPE_GMII: 1746 lp->phy_mode = PHY_INTERFACE_MODE_GMII; 1747 break; 1748 case XAE_PHY_TYPE_RGMII_2_0: 1749 lp->phy_mode = PHY_INTERFACE_MODE_RGMII_ID; 1750 break; 1751 case XAE_PHY_TYPE_SGMII: 1752 lp->phy_mode = PHY_INTERFACE_MODE_SGMII; 1753 break; 1754 case XAE_PHY_TYPE_1000BASE_X: 1755 lp->phy_mode = PHY_INTERFACE_MODE_1000BASEX; 1756 break; 1757 default: 1758 ret = -EINVAL; 1759 goto free_netdev; 1760 } 1761 } else { 1762 ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode); 1763 if (ret) 1764 goto free_netdev; 1765 } 1766 1767 /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */ 1768 np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0); 1769 if (np) { 1770 struct resource dmares; 1771 1772 ret = of_address_to_resource(np, 0, &dmares); 1773 if (ret) { 1774 dev_err(&pdev->dev, 1775 "unable to get DMA resource\n"); 1776 of_node_put(np); 1777 goto free_netdev; 1778 } 1779 lp->dma_regs = devm_ioremap_resource(&pdev->dev, 1780 &dmares); 1781 lp->rx_irq = irq_of_parse_and_map(np, 1); 1782 lp->tx_irq = irq_of_parse_and_map(np, 0); 1783 of_node_put(np); 1784 lp->eth_irq = platform_get_irq(pdev, 0); 1785 } else { 1786 /* Check for these resources directly on the Ethernet node. */ 1787 struct resource *res = platform_get_resource(pdev, 1788 IORESOURCE_MEM, 1); 1789 lp->dma_regs = devm_ioremap_resource(&pdev->dev, res); 1790 lp->rx_irq = platform_get_irq(pdev, 1); 1791 lp->tx_irq = platform_get_irq(pdev, 0); 1792 lp->eth_irq = platform_get_irq(pdev, 2); 1793 } 1794 if (IS_ERR(lp->dma_regs)) { 1795 dev_err(&pdev->dev, "could not map DMA regs\n"); 1796 ret = PTR_ERR(lp->dma_regs); 1797 goto free_netdev; 1798 } 1799 if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) { 1800 dev_err(&pdev->dev, "could not determine irqs\n"); 1801 ret = -ENOMEM; 1802 goto free_netdev; 1803 } 1804 1805 /* Check for Ethernet core IRQ (optional) */ 1806 if (lp->eth_irq <= 0) 1807 dev_info(&pdev->dev, "Ethernet core IRQ not defined\n"); 1808 1809 /* Retrieve the MAC address */ 1810 mac_addr = of_get_mac_address(pdev->dev.of_node); 1811 if (IS_ERR(mac_addr)) { 1812 dev_warn(&pdev->dev, "could not find MAC address property: %ld\n", 1813 PTR_ERR(mac_addr)); 1814 mac_addr = NULL; 1815 } 1816 axienet_set_mac_address(ndev, mac_addr); 1817 1818 lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD; 1819 lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD; 1820 1821 lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); 1822 if (lp->phy_node) { 1823 lp->clk = devm_clk_get(&pdev->dev, NULL); 1824 if (IS_ERR(lp->clk)) { 1825 dev_warn(&pdev->dev, "Failed to get clock: %ld\n", 1826 PTR_ERR(lp->clk)); 1827 lp->clk = NULL; 1828 } else { 1829 ret = clk_prepare_enable(lp->clk); 1830 if (ret) { 1831 dev_err(&pdev->dev, "Unable to enable clock: %d\n", 1832 ret); 1833 goto free_netdev; 1834 } 1835 } 1836 1837 ret = axienet_mdio_setup(lp); 1838 if (ret) 1839 dev_warn(&pdev->dev, 1840 "error registering MDIO bus: %d\n", ret); 1841 } 1842 1843 lp->phylink_config.dev = &ndev->dev; 1844 lp->phylink_config.type = PHYLINK_NETDEV; 1845 1846 lp->phylink = phylink_create(&lp->phylink_config, pdev->dev.fwnode, 1847 lp->phy_mode, 1848 &axienet_phylink_ops); 1849 if (IS_ERR(lp->phylink)) { 1850 ret = PTR_ERR(lp->phylink); 1851 dev_err(&pdev->dev, "phylink_create error (%i)\n", ret); 1852 goto free_netdev; 1853 } 1854 1855 ret = register_netdev(lp->ndev); 1856 if (ret) { 1857 dev_err(lp->dev, "register_netdev() error (%i)\n", ret); 1858 goto free_netdev; 1859 } 1860 1861 return 0; 1862 1863 free_netdev: 1864 free_netdev(ndev); 1865 1866 return ret; 1867 } 1868 1869 static int axienet_remove(struct platform_device *pdev) 1870 { 1871 struct net_device *ndev = platform_get_drvdata(pdev); 1872 struct axienet_local *lp = netdev_priv(ndev); 1873 1874 unregister_netdev(ndev); 1875 1876 if (lp->phylink) 1877 phylink_destroy(lp->phylink); 1878 1879 axienet_mdio_teardown(lp); 1880 1881 if (lp->clk) 1882 clk_disable_unprepare(lp->clk); 1883 1884 of_node_put(lp->phy_node); 1885 lp->phy_node = NULL; 1886 1887 free_netdev(ndev); 1888 1889 return 0; 1890 } 1891 1892 static void axienet_shutdown(struct platform_device *pdev) 1893 { 1894 struct net_device *ndev = platform_get_drvdata(pdev); 1895 1896 rtnl_lock(); 1897 netif_device_detach(ndev); 1898 1899 if (netif_running(ndev)) 1900 dev_close(ndev); 1901 1902 rtnl_unlock(); 1903 } 1904 1905 static struct platform_driver axienet_driver = { 1906 .probe = axienet_probe, 1907 .remove = axienet_remove, 1908 .shutdown = axienet_shutdown, 1909 .driver = { 1910 .name = "xilinx_axienet", 1911 .of_match_table = axienet_of_match, 1912 }, 1913 }; 1914 1915 module_platform_driver(axienet_driver); 1916 1917 MODULE_DESCRIPTION("Xilinx Axi Ethernet driver"); 1918 MODULE_AUTHOR("Xilinx"); 1919 MODULE_LICENSE("GPL"); 1920