1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Xilinx Axi Ethernet device driver 4 * 5 * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi 6 * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net> 7 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd. 8 * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu> 9 * Copyright (c) 2010 - 2011 PetaLogix 10 * Copyright (c) 2019 SED Systems, a division of Calian Ltd. 11 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved. 12 * 13 * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6 14 * and Spartan6. 15 * 16 * TODO: 17 * - Add Axi Fifo support. 18 * - Factor out Axi DMA code into separate driver. 19 * - Test and fix basic multicast filtering. 20 * - Add support for extended multicast filtering. 21 * - Test basic VLAN support. 22 * - Add support for extended VLAN support. 23 */ 24 25 #include <linux/clk.h> 26 #include <linux/delay.h> 27 #include <linux/etherdevice.h> 28 #include <linux/module.h> 29 #include <linux/netdevice.h> 30 #include <linux/of_mdio.h> 31 #include <linux/of_net.h> 32 #include <linux/of_platform.h> 33 #include <linux/of_irq.h> 34 #include <linux/of_address.h> 35 #include <linux/skbuff.h> 36 #include <linux/spinlock.h> 37 #include <linux/phy.h> 38 #include <linux/mii.h> 39 #include <linux/ethtool.h> 40 41 #include "xilinx_axienet.h" 42 43 /* Descriptors defines for Tx and Rx DMA */ 44 #define TX_BD_NUM_DEFAULT 64 45 #define RX_BD_NUM_DEFAULT 1024 46 #define TX_BD_NUM_MAX 4096 47 #define RX_BD_NUM_MAX 4096 48 49 /* Must be shorter than length of ethtool_drvinfo.driver field to fit */ 50 #define DRIVER_NAME "xaxienet" 51 #define DRIVER_DESCRIPTION "Xilinx Axi Ethernet driver" 52 #define DRIVER_VERSION "1.00a" 53 54 #define AXIENET_REGS_N 40 55 56 /* Match table for of_platform binding */ 57 static const struct of_device_id axienet_of_match[] = { 58 { .compatible = "xlnx,axi-ethernet-1.00.a", }, 59 { .compatible = "xlnx,axi-ethernet-1.01.a", }, 60 { .compatible = "xlnx,axi-ethernet-2.01.a", }, 61 {}, 62 }; 63 64 MODULE_DEVICE_TABLE(of, axienet_of_match); 65 66 /* Option table for setting up Axi Ethernet hardware options */ 67 static struct axienet_option axienet_options[] = { 68 /* Turn on jumbo packet support for both Rx and Tx */ 69 { 70 .opt = XAE_OPTION_JUMBO, 71 .reg = XAE_TC_OFFSET, 72 .m_or = XAE_TC_JUM_MASK, 73 }, { 74 .opt = XAE_OPTION_JUMBO, 75 .reg = XAE_RCW1_OFFSET, 76 .m_or = XAE_RCW1_JUM_MASK, 77 }, { /* Turn on VLAN packet support for both Rx and Tx */ 78 .opt = XAE_OPTION_VLAN, 79 .reg = XAE_TC_OFFSET, 80 .m_or = XAE_TC_VLAN_MASK, 81 }, { 82 .opt = XAE_OPTION_VLAN, 83 .reg = XAE_RCW1_OFFSET, 84 .m_or = XAE_RCW1_VLAN_MASK, 85 }, { /* Turn on FCS stripping on receive packets */ 86 .opt = XAE_OPTION_FCS_STRIP, 87 .reg = XAE_RCW1_OFFSET, 88 .m_or = XAE_RCW1_FCS_MASK, 89 }, { /* Turn on FCS insertion on transmit packets */ 90 .opt = XAE_OPTION_FCS_INSERT, 91 .reg = XAE_TC_OFFSET, 92 .m_or = XAE_TC_FCS_MASK, 93 }, { /* Turn off length/type field checking on receive packets */ 94 .opt = XAE_OPTION_LENTYPE_ERR, 95 .reg = XAE_RCW1_OFFSET, 96 .m_or = XAE_RCW1_LT_DIS_MASK, 97 }, { /* Turn on Rx flow control */ 98 .opt = XAE_OPTION_FLOW_CONTROL, 99 .reg = XAE_FCC_OFFSET, 100 .m_or = XAE_FCC_FCRX_MASK, 101 }, { /* Turn on Tx flow control */ 102 .opt = XAE_OPTION_FLOW_CONTROL, 103 .reg = XAE_FCC_OFFSET, 104 .m_or = XAE_FCC_FCTX_MASK, 105 }, { /* Turn on promiscuous frame filtering */ 106 .opt = XAE_OPTION_PROMISC, 107 .reg = XAE_FMI_OFFSET, 108 .m_or = XAE_FMI_PM_MASK, 109 }, { /* Enable transmitter */ 110 .opt = XAE_OPTION_TXEN, 111 .reg = XAE_TC_OFFSET, 112 .m_or = XAE_TC_TX_MASK, 113 }, { /* Enable receiver */ 114 .opt = XAE_OPTION_RXEN, 115 .reg = XAE_RCW1_OFFSET, 116 .m_or = XAE_RCW1_RX_MASK, 117 }, 118 {} 119 }; 120 121 /** 122 * axienet_dma_in32 - Memory mapped Axi DMA register read 123 * @lp: Pointer to axienet local structure 124 * @reg: Address offset from the base address of the Axi DMA core 125 * 126 * Return: The contents of the Axi DMA register 127 * 128 * This function returns the contents of the corresponding Axi DMA register. 129 */ 130 static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg) 131 { 132 return ioread32(lp->dma_regs + reg); 133 } 134 135 /** 136 * axienet_dma_out32 - Memory mapped Axi DMA register write. 137 * @lp: Pointer to axienet local structure 138 * @reg: Address offset from the base address of the Axi DMA core 139 * @value: Value to be written into the Axi DMA register 140 * 141 * This function writes the desired value into the corresponding Axi DMA 142 * register. 143 */ 144 static inline void axienet_dma_out32(struct axienet_local *lp, 145 off_t reg, u32 value) 146 { 147 iowrite32(value, lp->dma_regs + reg); 148 } 149 150 static void axienet_dma_out_addr(struct axienet_local *lp, off_t reg, 151 dma_addr_t addr) 152 { 153 axienet_dma_out32(lp, reg, lower_32_bits(addr)); 154 155 if (lp->features & XAE_FEATURE_DMA_64BIT) 156 axienet_dma_out32(lp, reg + 4, upper_32_bits(addr)); 157 } 158 159 static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr, 160 struct axidma_bd *desc) 161 { 162 desc->phys = lower_32_bits(addr); 163 if (lp->features & XAE_FEATURE_DMA_64BIT) 164 desc->phys_msb = upper_32_bits(addr); 165 } 166 167 static dma_addr_t desc_get_phys_addr(struct axienet_local *lp, 168 struct axidma_bd *desc) 169 { 170 dma_addr_t ret = desc->phys; 171 172 if (lp->features & XAE_FEATURE_DMA_64BIT) 173 ret |= ((dma_addr_t)desc->phys_msb << 16) << 16; 174 175 return ret; 176 } 177 178 /** 179 * axienet_dma_bd_release - Release buffer descriptor rings 180 * @ndev: Pointer to the net_device structure 181 * 182 * This function is used to release the descriptors allocated in 183 * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet 184 * driver stop api is called. 185 */ 186 static void axienet_dma_bd_release(struct net_device *ndev) 187 { 188 int i; 189 struct axienet_local *lp = netdev_priv(ndev); 190 191 /* If we end up here, tx_bd_v must have been DMA allocated. */ 192 dma_free_coherent(ndev->dev.parent, 193 sizeof(*lp->tx_bd_v) * lp->tx_bd_num, 194 lp->tx_bd_v, 195 lp->tx_bd_p); 196 197 if (!lp->rx_bd_v) 198 return; 199 200 for (i = 0; i < lp->rx_bd_num; i++) { 201 dma_addr_t phys; 202 203 /* A NULL skb means this descriptor has not been initialised 204 * at all. 205 */ 206 if (!lp->rx_bd_v[i].skb) 207 break; 208 209 dev_kfree_skb(lp->rx_bd_v[i].skb); 210 211 /* For each descriptor, we programmed cntrl with the (non-zero) 212 * descriptor size, after it had been successfully allocated. 213 * So a non-zero value in there means we need to unmap it. 214 */ 215 if (lp->rx_bd_v[i].cntrl) { 216 phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]); 217 dma_unmap_single(ndev->dev.parent, phys, 218 lp->max_frm_size, DMA_FROM_DEVICE); 219 } 220 } 221 222 dma_free_coherent(ndev->dev.parent, 223 sizeof(*lp->rx_bd_v) * lp->rx_bd_num, 224 lp->rx_bd_v, 225 lp->rx_bd_p); 226 } 227 228 /** 229 * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA 230 * @ndev: Pointer to the net_device structure 231 * 232 * Return: 0, on success -ENOMEM, on failure 233 * 234 * This function is called to initialize the Rx and Tx DMA descriptor 235 * rings. This initializes the descriptors with required default values 236 * and is called when Axi Ethernet driver reset is called. 237 */ 238 static int axienet_dma_bd_init(struct net_device *ndev) 239 { 240 u32 cr; 241 int i; 242 struct sk_buff *skb; 243 struct axienet_local *lp = netdev_priv(ndev); 244 245 /* Reset the indexes which are used for accessing the BDs */ 246 lp->tx_bd_ci = 0; 247 lp->tx_bd_tail = 0; 248 lp->rx_bd_ci = 0; 249 250 /* Allocate the Tx and Rx buffer descriptors. */ 251 lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent, 252 sizeof(*lp->tx_bd_v) * lp->tx_bd_num, 253 &lp->tx_bd_p, GFP_KERNEL); 254 if (!lp->tx_bd_v) 255 return -ENOMEM; 256 257 lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent, 258 sizeof(*lp->rx_bd_v) * lp->rx_bd_num, 259 &lp->rx_bd_p, GFP_KERNEL); 260 if (!lp->rx_bd_v) 261 goto out; 262 263 for (i = 0; i < lp->tx_bd_num; i++) { 264 dma_addr_t addr = lp->tx_bd_p + 265 sizeof(*lp->tx_bd_v) * 266 ((i + 1) % lp->tx_bd_num); 267 268 lp->tx_bd_v[i].next = lower_32_bits(addr); 269 if (lp->features & XAE_FEATURE_DMA_64BIT) 270 lp->tx_bd_v[i].next_msb = upper_32_bits(addr); 271 } 272 273 for (i = 0; i < lp->rx_bd_num; i++) { 274 dma_addr_t addr; 275 276 addr = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * 277 ((i + 1) % lp->rx_bd_num); 278 lp->rx_bd_v[i].next = lower_32_bits(addr); 279 if (lp->features & XAE_FEATURE_DMA_64BIT) 280 lp->rx_bd_v[i].next_msb = upper_32_bits(addr); 281 282 skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size); 283 if (!skb) 284 goto out; 285 286 lp->rx_bd_v[i].skb = skb; 287 addr = dma_map_single(ndev->dev.parent, skb->data, 288 lp->max_frm_size, DMA_FROM_DEVICE); 289 if (dma_mapping_error(ndev->dev.parent, addr)) { 290 netdev_err(ndev, "DMA mapping error\n"); 291 goto out; 292 } 293 desc_set_phys_addr(lp, addr, &lp->rx_bd_v[i]); 294 295 lp->rx_bd_v[i].cntrl = lp->max_frm_size; 296 } 297 298 /* Start updating the Rx channel control register */ 299 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 300 /* Update the interrupt coalesce count */ 301 cr = ((cr & ~XAXIDMA_COALESCE_MASK) | 302 ((lp->coalesce_count_rx) << XAXIDMA_COALESCE_SHIFT)); 303 /* Update the delay timer count */ 304 cr = ((cr & ~XAXIDMA_DELAY_MASK) | 305 (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); 306 /* Enable coalesce, delay timer and error interrupts */ 307 cr |= XAXIDMA_IRQ_ALL_MASK; 308 /* Write to the Rx channel control register */ 309 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 310 311 /* Start updating the Tx channel control register */ 312 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 313 /* Update the interrupt coalesce count */ 314 cr = (((cr & ~XAXIDMA_COALESCE_MASK)) | 315 ((lp->coalesce_count_tx) << XAXIDMA_COALESCE_SHIFT)); 316 /* Update the delay timer count */ 317 cr = (((cr & ~XAXIDMA_DELAY_MASK)) | 318 (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); 319 /* Enable coalesce, delay timer and error interrupts */ 320 cr |= XAXIDMA_IRQ_ALL_MASK; 321 /* Write to the Tx channel control register */ 322 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 323 324 /* Populate the tail pointer and bring the Rx Axi DMA engine out of 325 * halted state. This will make the Rx side ready for reception. 326 */ 327 axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); 328 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 329 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, 330 cr | XAXIDMA_CR_RUNSTOP_MASK); 331 axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + 332 (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1))); 333 334 /* Write to the RS (Run-stop) bit in the Tx channel control register. 335 * Tx channel is now ready to run. But only after we write to the 336 * tail pointer register that the Tx channel will start transmitting. 337 */ 338 axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); 339 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 340 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, 341 cr | XAXIDMA_CR_RUNSTOP_MASK); 342 343 return 0; 344 out: 345 axienet_dma_bd_release(ndev); 346 return -ENOMEM; 347 } 348 349 /** 350 * axienet_set_mac_address - Write the MAC address 351 * @ndev: Pointer to the net_device structure 352 * @address: 6 byte Address to be written as MAC address 353 * 354 * This function is called to initialize the MAC address of the Axi Ethernet 355 * core. It writes to the UAW0 and UAW1 registers of the core. 356 */ 357 static void axienet_set_mac_address(struct net_device *ndev, 358 const void *address) 359 { 360 struct axienet_local *lp = netdev_priv(ndev); 361 362 if (address) 363 memcpy(ndev->dev_addr, address, ETH_ALEN); 364 if (!is_valid_ether_addr(ndev->dev_addr)) 365 eth_hw_addr_random(ndev); 366 367 /* Set up unicast MAC address filter set its mac address */ 368 axienet_iow(lp, XAE_UAW0_OFFSET, 369 (ndev->dev_addr[0]) | 370 (ndev->dev_addr[1] << 8) | 371 (ndev->dev_addr[2] << 16) | 372 (ndev->dev_addr[3] << 24)); 373 axienet_iow(lp, XAE_UAW1_OFFSET, 374 (((axienet_ior(lp, XAE_UAW1_OFFSET)) & 375 ~XAE_UAW1_UNICASTADDR_MASK) | 376 (ndev->dev_addr[4] | 377 (ndev->dev_addr[5] << 8)))); 378 } 379 380 /** 381 * netdev_set_mac_address - Write the MAC address (from outside the driver) 382 * @ndev: Pointer to the net_device structure 383 * @p: 6 byte Address to be written as MAC address 384 * 385 * Return: 0 for all conditions. Presently, there is no failure case. 386 * 387 * This function is called to initialize the MAC address of the Axi Ethernet 388 * core. It calls the core specific axienet_set_mac_address. This is the 389 * function that goes into net_device_ops structure entry ndo_set_mac_address. 390 */ 391 static int netdev_set_mac_address(struct net_device *ndev, void *p) 392 { 393 struct sockaddr *addr = p; 394 axienet_set_mac_address(ndev, addr->sa_data); 395 return 0; 396 } 397 398 /** 399 * axienet_set_multicast_list - Prepare the multicast table 400 * @ndev: Pointer to the net_device structure 401 * 402 * This function is called to initialize the multicast table during 403 * initialization. The Axi Ethernet basic multicast support has a four-entry 404 * multicast table which is initialized here. Additionally this function 405 * goes into the net_device_ops structure entry ndo_set_multicast_list. This 406 * means whenever the multicast table entries need to be updated this 407 * function gets called. 408 */ 409 static void axienet_set_multicast_list(struct net_device *ndev) 410 { 411 int i; 412 u32 reg, af0reg, af1reg; 413 struct axienet_local *lp = netdev_priv(ndev); 414 415 if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) || 416 netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) { 417 /* We must make the kernel realize we had to move into 418 * promiscuous mode. If it was a promiscuous mode request 419 * the flag is already set. If not we set it. 420 */ 421 ndev->flags |= IFF_PROMISC; 422 reg = axienet_ior(lp, XAE_FMI_OFFSET); 423 reg |= XAE_FMI_PM_MASK; 424 axienet_iow(lp, XAE_FMI_OFFSET, reg); 425 dev_info(&ndev->dev, "Promiscuous mode enabled.\n"); 426 } else if (!netdev_mc_empty(ndev)) { 427 struct netdev_hw_addr *ha; 428 429 i = 0; 430 netdev_for_each_mc_addr(ha, ndev) { 431 if (i >= XAE_MULTICAST_CAM_TABLE_NUM) 432 break; 433 434 af0reg = (ha->addr[0]); 435 af0reg |= (ha->addr[1] << 8); 436 af0reg |= (ha->addr[2] << 16); 437 af0reg |= (ha->addr[3] << 24); 438 439 af1reg = (ha->addr[4]); 440 af1reg |= (ha->addr[5] << 8); 441 442 reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00; 443 reg |= i; 444 445 axienet_iow(lp, XAE_FMI_OFFSET, reg); 446 axienet_iow(lp, XAE_AF0_OFFSET, af0reg); 447 axienet_iow(lp, XAE_AF1_OFFSET, af1reg); 448 i++; 449 } 450 } else { 451 reg = axienet_ior(lp, XAE_FMI_OFFSET); 452 reg &= ~XAE_FMI_PM_MASK; 453 454 axienet_iow(lp, XAE_FMI_OFFSET, reg); 455 456 for (i = 0; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) { 457 reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00; 458 reg |= i; 459 460 axienet_iow(lp, XAE_FMI_OFFSET, reg); 461 axienet_iow(lp, XAE_AF0_OFFSET, 0); 462 axienet_iow(lp, XAE_AF1_OFFSET, 0); 463 } 464 465 dev_info(&ndev->dev, "Promiscuous mode disabled.\n"); 466 } 467 } 468 469 /** 470 * axienet_setoptions - Set an Axi Ethernet option 471 * @ndev: Pointer to the net_device structure 472 * @options: Option to be enabled/disabled 473 * 474 * The Axi Ethernet core has multiple features which can be selectively turned 475 * on or off. The typical options could be jumbo frame option, basic VLAN 476 * option, promiscuous mode option etc. This function is used to set or clear 477 * these options in the Axi Ethernet hardware. This is done through 478 * axienet_option structure . 479 */ 480 static void axienet_setoptions(struct net_device *ndev, u32 options) 481 { 482 int reg; 483 struct axienet_local *lp = netdev_priv(ndev); 484 struct axienet_option *tp = &axienet_options[0]; 485 486 while (tp->opt) { 487 reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or)); 488 if (options & tp->opt) 489 reg |= tp->m_or; 490 axienet_iow(lp, tp->reg, reg); 491 tp++; 492 } 493 494 lp->options |= options; 495 } 496 497 static int __axienet_device_reset(struct axienet_local *lp) 498 { 499 u32 timeout; 500 501 /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset 502 * process of Axi DMA takes a while to complete as all pending 503 * commands/transfers will be flushed or completed during this 504 * reset process. 505 * Note that even though both TX and RX have their own reset register, 506 * they both reset the entire DMA core, so only one needs to be used. 507 */ 508 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK); 509 timeout = DELAY_OF_ONE_MILLISEC; 510 while (axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET) & 511 XAXIDMA_CR_RESET_MASK) { 512 udelay(1); 513 if (--timeout == 0) { 514 netdev_err(lp->ndev, "%s: DMA reset timeout!\n", 515 __func__); 516 return -ETIMEDOUT; 517 } 518 } 519 520 return 0; 521 } 522 523 /** 524 * axienet_device_reset - Reset and initialize the Axi Ethernet hardware. 525 * @ndev: Pointer to the net_device structure 526 * 527 * This function is called to reset and initialize the Axi Ethernet core. This 528 * is typically called during initialization. It does a reset of the Axi DMA 529 * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines 530 * areconnected to Axi Ethernet reset lines, this in turn resets the Axi 531 * Ethernet core. No separate hardware reset is done for the Axi Ethernet 532 * core. 533 * Returns 0 on success or a negative error number otherwise. 534 */ 535 static int axienet_device_reset(struct net_device *ndev) 536 { 537 u32 axienet_status; 538 struct axienet_local *lp = netdev_priv(ndev); 539 int ret; 540 541 ret = __axienet_device_reset(lp); 542 if (ret) 543 return ret; 544 545 lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE; 546 lp->options |= XAE_OPTION_VLAN; 547 lp->options &= (~XAE_OPTION_JUMBO); 548 549 if ((ndev->mtu > XAE_MTU) && 550 (ndev->mtu <= XAE_JUMBO_MTU)) { 551 lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN + 552 XAE_TRL_SIZE; 553 554 if (lp->max_frm_size <= lp->rxmem) 555 lp->options |= XAE_OPTION_JUMBO; 556 } 557 558 ret = axienet_dma_bd_init(ndev); 559 if (ret) { 560 netdev_err(ndev, "%s: descriptor allocation failed\n", 561 __func__); 562 return ret; 563 } 564 565 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); 566 axienet_status &= ~XAE_RCW1_RX_MASK; 567 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status); 568 569 axienet_status = axienet_ior(lp, XAE_IP_OFFSET); 570 if (axienet_status & XAE_INT_RXRJECT_MASK) 571 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK); 572 axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ? 573 XAE_INT_RECV_ERROR_MASK : 0); 574 575 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK); 576 577 /* Sync default options with HW but leave receiver and 578 * transmitter disabled. 579 */ 580 axienet_setoptions(ndev, lp->options & 581 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 582 axienet_set_mac_address(ndev, NULL); 583 axienet_set_multicast_list(ndev); 584 axienet_setoptions(ndev, lp->options); 585 586 netif_trans_update(ndev); 587 588 return 0; 589 } 590 591 /** 592 * axienet_free_tx_chain - Clean up a series of linked TX descriptors. 593 * @ndev: Pointer to the net_device structure 594 * @first_bd: Index of first descriptor to clean up 595 * @nr_bds: Number of descriptors to clean up, can be -1 if unknown. 596 * @sizep: Pointer to a u32 filled with the total sum of all bytes 597 * in all cleaned-up descriptors. Ignored if NULL. 598 * 599 * Would either be called after a successful transmit operation, or after 600 * there was an error when setting up the chain. 601 * Returns the number of descriptors handled. 602 */ 603 static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd, 604 int nr_bds, u32 *sizep) 605 { 606 struct axienet_local *lp = netdev_priv(ndev); 607 struct axidma_bd *cur_p; 608 int max_bds = nr_bds; 609 unsigned int status; 610 dma_addr_t phys; 611 int i; 612 613 if (max_bds == -1) 614 max_bds = lp->tx_bd_num; 615 616 for (i = 0; i < max_bds; i++) { 617 cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num]; 618 status = cur_p->status; 619 620 /* If no number is given, clean up *all* descriptors that have 621 * been completed by the MAC. 622 */ 623 if (nr_bds == -1 && !(status & XAXIDMA_BD_STS_COMPLETE_MASK)) 624 break; 625 626 phys = desc_get_phys_addr(lp, cur_p); 627 dma_unmap_single(ndev->dev.parent, phys, 628 (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK), 629 DMA_TO_DEVICE); 630 631 if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK)) 632 dev_consume_skb_irq(cur_p->skb); 633 634 cur_p->cntrl = 0; 635 cur_p->app0 = 0; 636 cur_p->app1 = 0; 637 cur_p->app2 = 0; 638 cur_p->app4 = 0; 639 cur_p->status = 0; 640 cur_p->skb = NULL; 641 642 if (sizep) 643 *sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK; 644 } 645 646 return i; 647 } 648 649 /** 650 * axienet_start_xmit_done - Invoked once a transmit is completed by the 651 * Axi DMA Tx channel. 652 * @ndev: Pointer to the net_device structure 653 * 654 * This function is invoked from the Axi DMA Tx isr to notify the completion 655 * of transmit operation. It clears fields in the corresponding Tx BDs and 656 * unmaps the corresponding buffer so that CPU can regain ownership of the 657 * buffer. It finally invokes "netif_wake_queue" to restart transmission if 658 * required. 659 */ 660 static void axienet_start_xmit_done(struct net_device *ndev) 661 { 662 struct axienet_local *lp = netdev_priv(ndev); 663 u32 packets = 0; 664 u32 size = 0; 665 666 packets = axienet_free_tx_chain(ndev, lp->tx_bd_ci, -1, &size); 667 668 lp->tx_bd_ci += packets; 669 if (lp->tx_bd_ci >= lp->tx_bd_num) 670 lp->tx_bd_ci -= lp->tx_bd_num; 671 672 ndev->stats.tx_packets += packets; 673 ndev->stats.tx_bytes += size; 674 675 /* Matches barrier in axienet_start_xmit */ 676 smp_mb(); 677 678 netif_wake_queue(ndev); 679 } 680 681 /** 682 * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy 683 * @lp: Pointer to the axienet_local structure 684 * @num_frag: The number of BDs to check for 685 * 686 * Return: 0, on success 687 * NETDEV_TX_BUSY, if any of the descriptors are not free 688 * 689 * This function is invoked before BDs are allocated and transmission starts. 690 * This function returns 0 if a BD or group of BDs can be allocated for 691 * transmission. If the BD or any of the BDs are not free the function 692 * returns a busy status. This is invoked from axienet_start_xmit. 693 */ 694 static inline int axienet_check_tx_bd_space(struct axienet_local *lp, 695 int num_frag) 696 { 697 struct axidma_bd *cur_p; 698 cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % lp->tx_bd_num]; 699 if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK) 700 return NETDEV_TX_BUSY; 701 return 0; 702 } 703 704 /** 705 * axienet_start_xmit - Starts the transmission. 706 * @skb: sk_buff pointer that contains data to be Txed. 707 * @ndev: Pointer to net_device structure. 708 * 709 * Return: NETDEV_TX_OK, on success 710 * NETDEV_TX_BUSY, if any of the descriptors are not free 711 * 712 * This function is invoked from upper layers to initiate transmission. The 713 * function uses the next available free BDs and populates their fields to 714 * start the transmission. Additionally if checksum offloading is supported, 715 * it populates AXI Stream Control fields with appropriate values. 716 */ 717 static netdev_tx_t 718 axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) 719 { 720 u32 ii; 721 u32 num_frag; 722 u32 csum_start_off; 723 u32 csum_index_off; 724 skb_frag_t *frag; 725 dma_addr_t tail_p, phys; 726 struct axienet_local *lp = netdev_priv(ndev); 727 struct axidma_bd *cur_p; 728 u32 orig_tail_ptr = lp->tx_bd_tail; 729 730 num_frag = skb_shinfo(skb)->nr_frags; 731 cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; 732 733 if (axienet_check_tx_bd_space(lp, num_frag)) { 734 if (netif_queue_stopped(ndev)) 735 return NETDEV_TX_BUSY; 736 737 netif_stop_queue(ndev); 738 739 /* Matches barrier in axienet_start_xmit_done */ 740 smp_mb(); 741 742 /* Space might have just been freed - check again */ 743 if (axienet_check_tx_bd_space(lp, num_frag)) 744 return NETDEV_TX_BUSY; 745 746 netif_wake_queue(ndev); 747 } 748 749 if (skb->ip_summed == CHECKSUM_PARTIAL) { 750 if (lp->features & XAE_FEATURE_FULL_TX_CSUM) { 751 /* Tx Full Checksum Offload Enabled */ 752 cur_p->app0 |= 2; 753 } else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) { 754 csum_start_off = skb_transport_offset(skb); 755 csum_index_off = csum_start_off + skb->csum_offset; 756 /* Tx Partial Checksum Offload Enabled */ 757 cur_p->app0 |= 1; 758 cur_p->app1 = (csum_start_off << 16) | csum_index_off; 759 } 760 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { 761 cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */ 762 } 763 764 phys = dma_map_single(ndev->dev.parent, skb->data, 765 skb_headlen(skb), DMA_TO_DEVICE); 766 if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) { 767 if (net_ratelimit()) 768 netdev_err(ndev, "TX DMA mapping error\n"); 769 ndev->stats.tx_dropped++; 770 return NETDEV_TX_OK; 771 } 772 desc_set_phys_addr(lp, phys, cur_p); 773 cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK; 774 775 for (ii = 0; ii < num_frag; ii++) { 776 if (++lp->tx_bd_tail >= lp->tx_bd_num) 777 lp->tx_bd_tail = 0; 778 cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; 779 frag = &skb_shinfo(skb)->frags[ii]; 780 phys = dma_map_single(ndev->dev.parent, 781 skb_frag_address(frag), 782 skb_frag_size(frag), 783 DMA_TO_DEVICE); 784 if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) { 785 if (net_ratelimit()) 786 netdev_err(ndev, "TX DMA mapping error\n"); 787 ndev->stats.tx_dropped++; 788 axienet_free_tx_chain(ndev, orig_tail_ptr, ii + 1, 789 NULL); 790 lp->tx_bd_tail = orig_tail_ptr; 791 792 return NETDEV_TX_OK; 793 } 794 desc_set_phys_addr(lp, phys, cur_p); 795 cur_p->cntrl = skb_frag_size(frag); 796 } 797 798 cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK; 799 cur_p->skb = skb; 800 801 tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail; 802 /* Start the transfer */ 803 axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p); 804 if (++lp->tx_bd_tail >= lp->tx_bd_num) 805 lp->tx_bd_tail = 0; 806 807 return NETDEV_TX_OK; 808 } 809 810 /** 811 * axienet_recv - Is called from Axi DMA Rx Isr to complete the received 812 * BD processing. 813 * @ndev: Pointer to net_device structure. 814 * 815 * This function is invoked from the Axi DMA Rx isr to process the Rx BDs. It 816 * does minimal processing and invokes "netif_rx" to complete further 817 * processing. 818 */ 819 static void axienet_recv(struct net_device *ndev) 820 { 821 u32 length; 822 u32 csumstatus; 823 u32 size = 0; 824 u32 packets = 0; 825 dma_addr_t tail_p = 0; 826 struct axienet_local *lp = netdev_priv(ndev); 827 struct sk_buff *skb, *new_skb; 828 struct axidma_bd *cur_p; 829 830 cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; 831 832 while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) { 833 dma_addr_t phys; 834 835 tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci; 836 837 phys = desc_get_phys_addr(lp, cur_p); 838 dma_unmap_single(ndev->dev.parent, phys, lp->max_frm_size, 839 DMA_FROM_DEVICE); 840 841 skb = cur_p->skb; 842 cur_p->skb = NULL; 843 length = cur_p->app4 & 0x0000FFFF; 844 845 skb_put(skb, length); 846 skb->protocol = eth_type_trans(skb, ndev); 847 /*skb_checksum_none_assert(skb);*/ 848 skb->ip_summed = CHECKSUM_NONE; 849 850 /* if we're doing Rx csum offload, set it up */ 851 if (lp->features & XAE_FEATURE_FULL_RX_CSUM) { 852 csumstatus = (cur_p->app2 & 853 XAE_FULL_CSUM_STATUS_MASK) >> 3; 854 if ((csumstatus == XAE_IP_TCP_CSUM_VALIDATED) || 855 (csumstatus == XAE_IP_UDP_CSUM_VALIDATED)) { 856 skb->ip_summed = CHECKSUM_UNNECESSARY; 857 } 858 } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 && 859 skb->protocol == htons(ETH_P_IP) && 860 skb->len > 64) { 861 skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF); 862 skb->ip_summed = CHECKSUM_COMPLETE; 863 } 864 865 netif_rx(skb); 866 867 size += length; 868 packets++; 869 870 new_skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size); 871 if (!new_skb) 872 return; 873 874 phys = dma_map_single(ndev->dev.parent, new_skb->data, 875 lp->max_frm_size, 876 DMA_FROM_DEVICE); 877 if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) { 878 if (net_ratelimit()) 879 netdev_err(ndev, "RX DMA mapping error\n"); 880 dev_kfree_skb(new_skb); 881 return; 882 } 883 desc_set_phys_addr(lp, phys, cur_p); 884 885 cur_p->cntrl = lp->max_frm_size; 886 cur_p->status = 0; 887 cur_p->skb = new_skb; 888 889 if (++lp->rx_bd_ci >= lp->rx_bd_num) 890 lp->rx_bd_ci = 0; 891 cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; 892 } 893 894 ndev->stats.rx_packets += packets; 895 ndev->stats.rx_bytes += size; 896 897 if (tail_p) 898 axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p); 899 } 900 901 /** 902 * axienet_tx_irq - Tx Done Isr. 903 * @irq: irq number 904 * @_ndev: net_device pointer 905 * 906 * Return: IRQ_HANDLED if device generated a TX interrupt, IRQ_NONE otherwise. 907 * 908 * This is the Axi DMA Tx done Isr. It invokes "axienet_start_xmit_done" 909 * to complete the BD processing. 910 */ 911 static irqreturn_t axienet_tx_irq(int irq, void *_ndev) 912 { 913 u32 cr; 914 unsigned int status; 915 struct net_device *ndev = _ndev; 916 struct axienet_local *lp = netdev_priv(ndev); 917 918 status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 919 if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) { 920 axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status); 921 axienet_start_xmit_done(lp->ndev); 922 goto out; 923 } 924 if (!(status & XAXIDMA_IRQ_ALL_MASK)) 925 return IRQ_NONE; 926 if (status & XAXIDMA_IRQ_ERROR_MASK) { 927 dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status); 928 dev_err(&ndev->dev, "Current BD is at: 0x%x%08x\n", 929 (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb, 930 (lp->tx_bd_v[lp->tx_bd_ci]).phys); 931 932 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 933 /* Disable coalesce, delay timer and error interrupts */ 934 cr &= (~XAXIDMA_IRQ_ALL_MASK); 935 /* Write to the Tx channel control register */ 936 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 937 938 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 939 /* Disable coalesce, delay timer and error interrupts */ 940 cr &= (~XAXIDMA_IRQ_ALL_MASK); 941 /* Write to the Rx channel control register */ 942 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 943 944 schedule_work(&lp->dma_err_task); 945 axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status); 946 } 947 out: 948 return IRQ_HANDLED; 949 } 950 951 /** 952 * axienet_rx_irq - Rx Isr. 953 * @irq: irq number 954 * @_ndev: net_device pointer 955 * 956 * Return: IRQ_HANDLED if device generated a RX interrupt, IRQ_NONE otherwise. 957 * 958 * This is the Axi DMA Rx Isr. It invokes "axienet_recv" to complete the BD 959 * processing. 960 */ 961 static irqreturn_t axienet_rx_irq(int irq, void *_ndev) 962 { 963 u32 cr; 964 unsigned int status; 965 struct net_device *ndev = _ndev; 966 struct axienet_local *lp = netdev_priv(ndev); 967 968 status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 969 if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) { 970 axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status); 971 axienet_recv(lp->ndev); 972 goto out; 973 } 974 if (!(status & XAXIDMA_IRQ_ALL_MASK)) 975 return IRQ_NONE; 976 if (status & XAXIDMA_IRQ_ERROR_MASK) { 977 dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status); 978 dev_err(&ndev->dev, "Current BD is at: 0x%x%08x\n", 979 (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb, 980 (lp->rx_bd_v[lp->rx_bd_ci]).phys); 981 982 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 983 /* Disable coalesce, delay timer and error interrupts */ 984 cr &= (~XAXIDMA_IRQ_ALL_MASK); 985 /* Finally write to the Tx channel control register */ 986 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 987 988 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 989 /* Disable coalesce, delay timer and error interrupts */ 990 cr &= (~XAXIDMA_IRQ_ALL_MASK); 991 /* write to the Rx channel control register */ 992 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 993 994 schedule_work(&lp->dma_err_task); 995 axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status); 996 } 997 out: 998 return IRQ_HANDLED; 999 } 1000 1001 /** 1002 * axienet_eth_irq - Ethernet core Isr. 1003 * @irq: irq number 1004 * @_ndev: net_device pointer 1005 * 1006 * Return: IRQ_HANDLED if device generated a core interrupt, IRQ_NONE otherwise. 1007 * 1008 * Handle miscellaneous conditions indicated by Ethernet core IRQ. 1009 */ 1010 static irqreturn_t axienet_eth_irq(int irq, void *_ndev) 1011 { 1012 struct net_device *ndev = _ndev; 1013 struct axienet_local *lp = netdev_priv(ndev); 1014 unsigned int pending; 1015 1016 pending = axienet_ior(lp, XAE_IP_OFFSET); 1017 if (!pending) 1018 return IRQ_NONE; 1019 1020 if (pending & XAE_INT_RXFIFOOVR_MASK) 1021 ndev->stats.rx_missed_errors++; 1022 1023 if (pending & XAE_INT_RXRJECT_MASK) 1024 ndev->stats.rx_frame_errors++; 1025 1026 axienet_iow(lp, XAE_IS_OFFSET, pending); 1027 return IRQ_HANDLED; 1028 } 1029 1030 static void axienet_dma_err_handler(struct work_struct *work); 1031 1032 /** 1033 * axienet_open - Driver open routine. 1034 * @ndev: Pointer to net_device structure 1035 * 1036 * Return: 0, on success. 1037 * non-zero error value on failure 1038 * 1039 * This is the driver open routine. It calls phylink_start to start the 1040 * PHY device. 1041 * It also allocates interrupt service routines, enables the interrupt lines 1042 * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer 1043 * descriptors are initialized. 1044 */ 1045 static int axienet_open(struct net_device *ndev) 1046 { 1047 int ret; 1048 struct axienet_local *lp = netdev_priv(ndev); 1049 1050 dev_dbg(&ndev->dev, "axienet_open()\n"); 1051 1052 /* When we do an Axi Ethernet reset, it resets the complete core 1053 * including the MDIO. MDIO must be disabled before resetting. 1054 * Hold MDIO bus lock to avoid MDIO accesses during the reset. 1055 */ 1056 mutex_lock(&lp->mii_bus->mdio_lock); 1057 ret = axienet_device_reset(ndev); 1058 mutex_unlock(&lp->mii_bus->mdio_lock); 1059 1060 ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0); 1061 if (ret) { 1062 dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret); 1063 return ret; 1064 } 1065 1066 phylink_start(lp->phylink); 1067 1068 /* Enable worker thread for Axi DMA error handling */ 1069 INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler); 1070 1071 /* Enable interrupts for Axi DMA Tx */ 1072 ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED, 1073 ndev->name, ndev); 1074 if (ret) 1075 goto err_tx_irq; 1076 /* Enable interrupts for Axi DMA Rx */ 1077 ret = request_irq(lp->rx_irq, axienet_rx_irq, IRQF_SHARED, 1078 ndev->name, ndev); 1079 if (ret) 1080 goto err_rx_irq; 1081 /* Enable interrupts for Axi Ethernet core (if defined) */ 1082 if (lp->eth_irq > 0) { 1083 ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED, 1084 ndev->name, ndev); 1085 if (ret) 1086 goto err_eth_irq; 1087 } 1088 1089 return 0; 1090 1091 err_eth_irq: 1092 free_irq(lp->rx_irq, ndev); 1093 err_rx_irq: 1094 free_irq(lp->tx_irq, ndev); 1095 err_tx_irq: 1096 phylink_stop(lp->phylink); 1097 phylink_disconnect_phy(lp->phylink); 1098 cancel_work_sync(&lp->dma_err_task); 1099 dev_err(lp->dev, "request_irq() failed\n"); 1100 return ret; 1101 } 1102 1103 /** 1104 * axienet_stop - Driver stop routine. 1105 * @ndev: Pointer to net_device structure 1106 * 1107 * Return: 0, on success. 1108 * 1109 * This is the driver stop routine. It calls phylink_disconnect to stop the PHY 1110 * device. It also removes the interrupt handlers and disables the interrupts. 1111 * The Axi DMA Tx/Rx BDs are released. 1112 */ 1113 static int axienet_stop(struct net_device *ndev) 1114 { 1115 u32 cr, sr; 1116 int count; 1117 struct axienet_local *lp = netdev_priv(ndev); 1118 1119 dev_dbg(&ndev->dev, "axienet_close()\n"); 1120 1121 phylink_stop(lp->phylink); 1122 phylink_disconnect_phy(lp->phylink); 1123 1124 axienet_setoptions(ndev, lp->options & 1125 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 1126 1127 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 1128 cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK); 1129 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 1130 1131 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 1132 cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK); 1133 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 1134 1135 axienet_iow(lp, XAE_IE_OFFSET, 0); 1136 1137 /* Give DMAs a chance to halt gracefully */ 1138 sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 1139 for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) { 1140 msleep(20); 1141 sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 1142 } 1143 1144 sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 1145 for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) { 1146 msleep(20); 1147 sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 1148 } 1149 1150 /* Do a reset to ensure DMA is really stopped */ 1151 mutex_lock(&lp->mii_bus->mdio_lock); 1152 __axienet_device_reset(lp); 1153 mutex_unlock(&lp->mii_bus->mdio_lock); 1154 1155 cancel_work_sync(&lp->dma_err_task); 1156 1157 if (lp->eth_irq > 0) 1158 free_irq(lp->eth_irq, ndev); 1159 free_irq(lp->tx_irq, ndev); 1160 free_irq(lp->rx_irq, ndev); 1161 1162 axienet_dma_bd_release(ndev); 1163 return 0; 1164 } 1165 1166 /** 1167 * axienet_change_mtu - Driver change mtu routine. 1168 * @ndev: Pointer to net_device structure 1169 * @new_mtu: New mtu value to be applied 1170 * 1171 * Return: Always returns 0 (success). 1172 * 1173 * This is the change mtu driver routine. It checks if the Axi Ethernet 1174 * hardware supports jumbo frames before changing the mtu. This can be 1175 * called only when the device is not up. 1176 */ 1177 static int axienet_change_mtu(struct net_device *ndev, int new_mtu) 1178 { 1179 struct axienet_local *lp = netdev_priv(ndev); 1180 1181 if (netif_running(ndev)) 1182 return -EBUSY; 1183 1184 if ((new_mtu + VLAN_ETH_HLEN + 1185 XAE_TRL_SIZE) > lp->rxmem) 1186 return -EINVAL; 1187 1188 ndev->mtu = new_mtu; 1189 1190 return 0; 1191 } 1192 1193 #ifdef CONFIG_NET_POLL_CONTROLLER 1194 /** 1195 * axienet_poll_controller - Axi Ethernet poll mechanism. 1196 * @ndev: Pointer to net_device structure 1197 * 1198 * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior 1199 * to polling the ISRs and are enabled back after the polling is done. 1200 */ 1201 static void axienet_poll_controller(struct net_device *ndev) 1202 { 1203 struct axienet_local *lp = netdev_priv(ndev); 1204 disable_irq(lp->tx_irq); 1205 disable_irq(lp->rx_irq); 1206 axienet_rx_irq(lp->tx_irq, ndev); 1207 axienet_tx_irq(lp->rx_irq, ndev); 1208 enable_irq(lp->tx_irq); 1209 enable_irq(lp->rx_irq); 1210 } 1211 #endif 1212 1213 static int axienet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1214 { 1215 struct axienet_local *lp = netdev_priv(dev); 1216 1217 if (!netif_running(dev)) 1218 return -EINVAL; 1219 1220 return phylink_mii_ioctl(lp->phylink, rq, cmd); 1221 } 1222 1223 static const struct net_device_ops axienet_netdev_ops = { 1224 .ndo_open = axienet_open, 1225 .ndo_stop = axienet_stop, 1226 .ndo_start_xmit = axienet_start_xmit, 1227 .ndo_change_mtu = axienet_change_mtu, 1228 .ndo_set_mac_address = netdev_set_mac_address, 1229 .ndo_validate_addr = eth_validate_addr, 1230 .ndo_do_ioctl = axienet_ioctl, 1231 .ndo_set_rx_mode = axienet_set_multicast_list, 1232 #ifdef CONFIG_NET_POLL_CONTROLLER 1233 .ndo_poll_controller = axienet_poll_controller, 1234 #endif 1235 }; 1236 1237 /** 1238 * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information. 1239 * @ndev: Pointer to net_device structure 1240 * @ed: Pointer to ethtool_drvinfo structure 1241 * 1242 * This implements ethtool command for getting the driver information. 1243 * Issue "ethtool -i ethX" under linux prompt to execute this function. 1244 */ 1245 static void axienet_ethtools_get_drvinfo(struct net_device *ndev, 1246 struct ethtool_drvinfo *ed) 1247 { 1248 strlcpy(ed->driver, DRIVER_NAME, sizeof(ed->driver)); 1249 strlcpy(ed->version, DRIVER_VERSION, sizeof(ed->version)); 1250 } 1251 1252 /** 1253 * axienet_ethtools_get_regs_len - Get the total regs length present in the 1254 * AxiEthernet core. 1255 * @ndev: Pointer to net_device structure 1256 * 1257 * This implements ethtool command for getting the total register length 1258 * information. 1259 * 1260 * Return: the total regs length 1261 */ 1262 static int axienet_ethtools_get_regs_len(struct net_device *ndev) 1263 { 1264 return sizeof(u32) * AXIENET_REGS_N; 1265 } 1266 1267 /** 1268 * axienet_ethtools_get_regs - Dump the contents of all registers present 1269 * in AxiEthernet core. 1270 * @ndev: Pointer to net_device structure 1271 * @regs: Pointer to ethtool_regs structure 1272 * @ret: Void pointer used to return the contents of the registers. 1273 * 1274 * This implements ethtool command for getting the Axi Ethernet register dump. 1275 * Issue "ethtool -d ethX" to execute this function. 1276 */ 1277 static void axienet_ethtools_get_regs(struct net_device *ndev, 1278 struct ethtool_regs *regs, void *ret) 1279 { 1280 u32 *data = (u32 *) ret; 1281 size_t len = sizeof(u32) * AXIENET_REGS_N; 1282 struct axienet_local *lp = netdev_priv(ndev); 1283 1284 regs->version = 0; 1285 regs->len = len; 1286 1287 memset(data, 0, len); 1288 data[0] = axienet_ior(lp, XAE_RAF_OFFSET); 1289 data[1] = axienet_ior(lp, XAE_TPF_OFFSET); 1290 data[2] = axienet_ior(lp, XAE_IFGP_OFFSET); 1291 data[3] = axienet_ior(lp, XAE_IS_OFFSET); 1292 data[4] = axienet_ior(lp, XAE_IP_OFFSET); 1293 data[5] = axienet_ior(lp, XAE_IE_OFFSET); 1294 data[6] = axienet_ior(lp, XAE_TTAG_OFFSET); 1295 data[7] = axienet_ior(lp, XAE_RTAG_OFFSET); 1296 data[8] = axienet_ior(lp, XAE_UAWL_OFFSET); 1297 data[9] = axienet_ior(lp, XAE_UAWU_OFFSET); 1298 data[10] = axienet_ior(lp, XAE_TPID0_OFFSET); 1299 data[11] = axienet_ior(lp, XAE_TPID1_OFFSET); 1300 data[12] = axienet_ior(lp, XAE_PPST_OFFSET); 1301 data[13] = axienet_ior(lp, XAE_RCW0_OFFSET); 1302 data[14] = axienet_ior(lp, XAE_RCW1_OFFSET); 1303 data[15] = axienet_ior(lp, XAE_TC_OFFSET); 1304 data[16] = axienet_ior(lp, XAE_FCC_OFFSET); 1305 data[17] = axienet_ior(lp, XAE_EMMC_OFFSET); 1306 data[18] = axienet_ior(lp, XAE_PHYC_OFFSET); 1307 data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET); 1308 data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET); 1309 data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET); 1310 data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET); 1311 data[27] = axienet_ior(lp, XAE_UAW0_OFFSET); 1312 data[28] = axienet_ior(lp, XAE_UAW1_OFFSET); 1313 data[29] = axienet_ior(lp, XAE_FMI_OFFSET); 1314 data[30] = axienet_ior(lp, XAE_AF0_OFFSET); 1315 data[31] = axienet_ior(lp, XAE_AF1_OFFSET); 1316 data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 1317 data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 1318 data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET); 1319 data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET); 1320 data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 1321 data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 1322 data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET); 1323 data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET); 1324 } 1325 1326 static void axienet_ethtools_get_ringparam(struct net_device *ndev, 1327 struct ethtool_ringparam *ering) 1328 { 1329 struct axienet_local *lp = netdev_priv(ndev); 1330 1331 ering->rx_max_pending = RX_BD_NUM_MAX; 1332 ering->rx_mini_max_pending = 0; 1333 ering->rx_jumbo_max_pending = 0; 1334 ering->tx_max_pending = TX_BD_NUM_MAX; 1335 ering->rx_pending = lp->rx_bd_num; 1336 ering->rx_mini_pending = 0; 1337 ering->rx_jumbo_pending = 0; 1338 ering->tx_pending = lp->tx_bd_num; 1339 } 1340 1341 static int axienet_ethtools_set_ringparam(struct net_device *ndev, 1342 struct ethtool_ringparam *ering) 1343 { 1344 struct axienet_local *lp = netdev_priv(ndev); 1345 1346 if (ering->rx_pending > RX_BD_NUM_MAX || 1347 ering->rx_mini_pending || 1348 ering->rx_jumbo_pending || 1349 ering->rx_pending > TX_BD_NUM_MAX) 1350 return -EINVAL; 1351 1352 if (netif_running(ndev)) 1353 return -EBUSY; 1354 1355 lp->rx_bd_num = ering->rx_pending; 1356 lp->tx_bd_num = ering->tx_pending; 1357 return 0; 1358 } 1359 1360 /** 1361 * axienet_ethtools_get_pauseparam - Get the pause parameter setting for 1362 * Tx and Rx paths. 1363 * @ndev: Pointer to net_device structure 1364 * @epauseparm: Pointer to ethtool_pauseparam structure. 1365 * 1366 * This implements ethtool command for getting axi ethernet pause frame 1367 * setting. Issue "ethtool -a ethX" to execute this function. 1368 */ 1369 static void 1370 axienet_ethtools_get_pauseparam(struct net_device *ndev, 1371 struct ethtool_pauseparam *epauseparm) 1372 { 1373 struct axienet_local *lp = netdev_priv(ndev); 1374 1375 phylink_ethtool_get_pauseparam(lp->phylink, epauseparm); 1376 } 1377 1378 /** 1379 * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control) 1380 * settings. 1381 * @ndev: Pointer to net_device structure 1382 * @epauseparm:Pointer to ethtool_pauseparam structure 1383 * 1384 * This implements ethtool command for enabling flow control on Rx and Tx 1385 * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this 1386 * function. 1387 * 1388 * Return: 0 on success, -EFAULT if device is running 1389 */ 1390 static int 1391 axienet_ethtools_set_pauseparam(struct net_device *ndev, 1392 struct ethtool_pauseparam *epauseparm) 1393 { 1394 struct axienet_local *lp = netdev_priv(ndev); 1395 1396 return phylink_ethtool_set_pauseparam(lp->phylink, epauseparm); 1397 } 1398 1399 /** 1400 * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count. 1401 * @ndev: Pointer to net_device structure 1402 * @ecoalesce: Pointer to ethtool_coalesce structure 1403 * 1404 * This implements ethtool command for getting the DMA interrupt coalescing 1405 * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to 1406 * execute this function. 1407 * 1408 * Return: 0 always 1409 */ 1410 static int axienet_ethtools_get_coalesce(struct net_device *ndev, 1411 struct ethtool_coalesce *ecoalesce) 1412 { 1413 u32 regval = 0; 1414 struct axienet_local *lp = netdev_priv(ndev); 1415 regval = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 1416 ecoalesce->rx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK) 1417 >> XAXIDMA_COALESCE_SHIFT; 1418 regval = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 1419 ecoalesce->tx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK) 1420 >> XAXIDMA_COALESCE_SHIFT; 1421 return 0; 1422 } 1423 1424 /** 1425 * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count. 1426 * @ndev: Pointer to net_device structure 1427 * @ecoalesce: Pointer to ethtool_coalesce structure 1428 * 1429 * This implements ethtool command for setting the DMA interrupt coalescing 1430 * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux 1431 * prompt to execute this function. 1432 * 1433 * Return: 0, on success, Non-zero error value on failure. 1434 */ 1435 static int axienet_ethtools_set_coalesce(struct net_device *ndev, 1436 struct ethtool_coalesce *ecoalesce) 1437 { 1438 struct axienet_local *lp = netdev_priv(ndev); 1439 1440 if (netif_running(ndev)) { 1441 netdev_err(ndev, 1442 "Please stop netif before applying configuration\n"); 1443 return -EFAULT; 1444 } 1445 1446 if (ecoalesce->rx_max_coalesced_frames) 1447 lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames; 1448 if (ecoalesce->tx_max_coalesced_frames) 1449 lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames; 1450 1451 return 0; 1452 } 1453 1454 static int 1455 axienet_ethtools_get_link_ksettings(struct net_device *ndev, 1456 struct ethtool_link_ksettings *cmd) 1457 { 1458 struct axienet_local *lp = netdev_priv(ndev); 1459 1460 return phylink_ethtool_ksettings_get(lp->phylink, cmd); 1461 } 1462 1463 static int 1464 axienet_ethtools_set_link_ksettings(struct net_device *ndev, 1465 const struct ethtool_link_ksettings *cmd) 1466 { 1467 struct axienet_local *lp = netdev_priv(ndev); 1468 1469 return phylink_ethtool_ksettings_set(lp->phylink, cmd); 1470 } 1471 1472 static const struct ethtool_ops axienet_ethtool_ops = { 1473 .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES, 1474 .get_drvinfo = axienet_ethtools_get_drvinfo, 1475 .get_regs_len = axienet_ethtools_get_regs_len, 1476 .get_regs = axienet_ethtools_get_regs, 1477 .get_link = ethtool_op_get_link, 1478 .get_ringparam = axienet_ethtools_get_ringparam, 1479 .set_ringparam = axienet_ethtools_set_ringparam, 1480 .get_pauseparam = axienet_ethtools_get_pauseparam, 1481 .set_pauseparam = axienet_ethtools_set_pauseparam, 1482 .get_coalesce = axienet_ethtools_get_coalesce, 1483 .set_coalesce = axienet_ethtools_set_coalesce, 1484 .get_link_ksettings = axienet_ethtools_get_link_ksettings, 1485 .set_link_ksettings = axienet_ethtools_set_link_ksettings, 1486 }; 1487 1488 static void axienet_validate(struct phylink_config *config, 1489 unsigned long *supported, 1490 struct phylink_link_state *state) 1491 { 1492 struct net_device *ndev = to_net_dev(config->dev); 1493 struct axienet_local *lp = netdev_priv(ndev); 1494 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; 1495 1496 /* Only support the mode we are configured for */ 1497 if (state->interface != PHY_INTERFACE_MODE_NA && 1498 state->interface != lp->phy_mode) { 1499 netdev_warn(ndev, "Cannot use PHY mode %s, supported: %s\n", 1500 phy_modes(state->interface), 1501 phy_modes(lp->phy_mode)); 1502 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); 1503 return; 1504 } 1505 1506 phylink_set(mask, Autoneg); 1507 phylink_set_port_modes(mask); 1508 1509 phylink_set(mask, Asym_Pause); 1510 phylink_set(mask, Pause); 1511 1512 switch (state->interface) { 1513 case PHY_INTERFACE_MODE_NA: 1514 case PHY_INTERFACE_MODE_1000BASEX: 1515 case PHY_INTERFACE_MODE_SGMII: 1516 case PHY_INTERFACE_MODE_GMII: 1517 case PHY_INTERFACE_MODE_RGMII: 1518 case PHY_INTERFACE_MODE_RGMII_ID: 1519 case PHY_INTERFACE_MODE_RGMII_RXID: 1520 case PHY_INTERFACE_MODE_RGMII_TXID: 1521 phylink_set(mask, 1000baseX_Full); 1522 phylink_set(mask, 1000baseT_Full); 1523 if (state->interface == PHY_INTERFACE_MODE_1000BASEX) 1524 break; 1525 fallthrough; 1526 case PHY_INTERFACE_MODE_MII: 1527 phylink_set(mask, 100baseT_Full); 1528 phylink_set(mask, 10baseT_Full); 1529 default: 1530 break; 1531 } 1532 1533 bitmap_and(supported, supported, mask, 1534 __ETHTOOL_LINK_MODE_MASK_NBITS); 1535 bitmap_and(state->advertising, state->advertising, mask, 1536 __ETHTOOL_LINK_MODE_MASK_NBITS); 1537 } 1538 1539 static void axienet_mac_pcs_get_state(struct phylink_config *config, 1540 struct phylink_link_state *state) 1541 { 1542 struct net_device *ndev = to_net_dev(config->dev); 1543 struct axienet_local *lp = netdev_priv(ndev); 1544 1545 switch (state->interface) { 1546 case PHY_INTERFACE_MODE_SGMII: 1547 case PHY_INTERFACE_MODE_1000BASEX: 1548 phylink_mii_c22_pcs_get_state(lp->pcs_phy, state); 1549 break; 1550 default: 1551 break; 1552 } 1553 } 1554 1555 static void axienet_mac_an_restart(struct phylink_config *config) 1556 { 1557 struct net_device *ndev = to_net_dev(config->dev); 1558 struct axienet_local *lp = netdev_priv(ndev); 1559 1560 phylink_mii_c22_pcs_an_restart(lp->pcs_phy); 1561 } 1562 1563 static void axienet_mac_config(struct phylink_config *config, unsigned int mode, 1564 const struct phylink_link_state *state) 1565 { 1566 struct net_device *ndev = to_net_dev(config->dev); 1567 struct axienet_local *lp = netdev_priv(ndev); 1568 int ret; 1569 1570 switch (state->interface) { 1571 case PHY_INTERFACE_MODE_SGMII: 1572 case PHY_INTERFACE_MODE_1000BASEX: 1573 ret = phylink_mii_c22_pcs_config(lp->pcs_phy, mode, 1574 state->interface, 1575 state->advertising); 1576 if (ret < 0) 1577 netdev_warn(ndev, "Failed to configure PCS: %d\n", 1578 ret); 1579 break; 1580 1581 default: 1582 break; 1583 } 1584 } 1585 1586 static void axienet_mac_link_down(struct phylink_config *config, 1587 unsigned int mode, 1588 phy_interface_t interface) 1589 { 1590 /* nothing meaningful to do */ 1591 } 1592 1593 static void axienet_mac_link_up(struct phylink_config *config, 1594 struct phy_device *phy, 1595 unsigned int mode, phy_interface_t interface, 1596 int speed, int duplex, 1597 bool tx_pause, bool rx_pause) 1598 { 1599 struct net_device *ndev = to_net_dev(config->dev); 1600 struct axienet_local *lp = netdev_priv(ndev); 1601 u32 emmc_reg, fcc_reg; 1602 1603 emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET); 1604 emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK; 1605 1606 switch (speed) { 1607 case SPEED_1000: 1608 emmc_reg |= XAE_EMMC_LINKSPD_1000; 1609 break; 1610 case SPEED_100: 1611 emmc_reg |= XAE_EMMC_LINKSPD_100; 1612 break; 1613 case SPEED_10: 1614 emmc_reg |= XAE_EMMC_LINKSPD_10; 1615 break; 1616 default: 1617 dev_err(&ndev->dev, 1618 "Speed other than 10, 100 or 1Gbps is not supported\n"); 1619 break; 1620 } 1621 1622 axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg); 1623 1624 fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET); 1625 if (tx_pause) 1626 fcc_reg |= XAE_FCC_FCTX_MASK; 1627 else 1628 fcc_reg &= ~XAE_FCC_FCTX_MASK; 1629 if (rx_pause) 1630 fcc_reg |= XAE_FCC_FCRX_MASK; 1631 else 1632 fcc_reg &= ~XAE_FCC_FCRX_MASK; 1633 axienet_iow(lp, XAE_FCC_OFFSET, fcc_reg); 1634 } 1635 1636 static const struct phylink_mac_ops axienet_phylink_ops = { 1637 .validate = axienet_validate, 1638 .mac_pcs_get_state = axienet_mac_pcs_get_state, 1639 .mac_an_restart = axienet_mac_an_restart, 1640 .mac_config = axienet_mac_config, 1641 .mac_link_down = axienet_mac_link_down, 1642 .mac_link_up = axienet_mac_link_up, 1643 }; 1644 1645 /** 1646 * axienet_dma_err_handler - Work queue task for Axi DMA Error 1647 * @work: pointer to work_struct 1648 * 1649 * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the 1650 * Tx/Rx BDs. 1651 */ 1652 static void axienet_dma_err_handler(struct work_struct *work) 1653 { 1654 u32 axienet_status; 1655 u32 cr, i; 1656 struct axienet_local *lp = container_of(work, struct axienet_local, 1657 dma_err_task); 1658 struct net_device *ndev = lp->ndev; 1659 struct axidma_bd *cur_p; 1660 1661 axienet_setoptions(ndev, lp->options & 1662 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 1663 /* When we do an Axi Ethernet reset, it resets the complete core 1664 * including the MDIO. MDIO must be disabled before resetting. 1665 * Hold MDIO bus lock to avoid MDIO accesses during the reset. 1666 */ 1667 mutex_lock(&lp->mii_bus->mdio_lock); 1668 __axienet_device_reset(lp); 1669 mutex_unlock(&lp->mii_bus->mdio_lock); 1670 1671 for (i = 0; i < lp->tx_bd_num; i++) { 1672 cur_p = &lp->tx_bd_v[i]; 1673 if (cur_p->cntrl) { 1674 dma_addr_t addr = desc_get_phys_addr(lp, cur_p); 1675 1676 dma_unmap_single(ndev->dev.parent, addr, 1677 (cur_p->cntrl & 1678 XAXIDMA_BD_CTRL_LENGTH_MASK), 1679 DMA_TO_DEVICE); 1680 } 1681 if (cur_p->skb) 1682 dev_kfree_skb_irq(cur_p->skb); 1683 cur_p->phys = 0; 1684 cur_p->phys_msb = 0; 1685 cur_p->cntrl = 0; 1686 cur_p->status = 0; 1687 cur_p->app0 = 0; 1688 cur_p->app1 = 0; 1689 cur_p->app2 = 0; 1690 cur_p->app3 = 0; 1691 cur_p->app4 = 0; 1692 cur_p->skb = NULL; 1693 } 1694 1695 for (i = 0; i < lp->rx_bd_num; i++) { 1696 cur_p = &lp->rx_bd_v[i]; 1697 cur_p->status = 0; 1698 cur_p->app0 = 0; 1699 cur_p->app1 = 0; 1700 cur_p->app2 = 0; 1701 cur_p->app3 = 0; 1702 cur_p->app4 = 0; 1703 } 1704 1705 lp->tx_bd_ci = 0; 1706 lp->tx_bd_tail = 0; 1707 lp->rx_bd_ci = 0; 1708 1709 /* Start updating the Rx channel control register */ 1710 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 1711 /* Update the interrupt coalesce count */ 1712 cr = ((cr & ~XAXIDMA_COALESCE_MASK) | 1713 (XAXIDMA_DFT_RX_THRESHOLD << XAXIDMA_COALESCE_SHIFT)); 1714 /* Update the delay timer count */ 1715 cr = ((cr & ~XAXIDMA_DELAY_MASK) | 1716 (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); 1717 /* Enable coalesce, delay timer and error interrupts */ 1718 cr |= XAXIDMA_IRQ_ALL_MASK; 1719 /* Finally write to the Rx channel control register */ 1720 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 1721 1722 /* Start updating the Tx channel control register */ 1723 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 1724 /* Update the interrupt coalesce count */ 1725 cr = (((cr & ~XAXIDMA_COALESCE_MASK)) | 1726 (XAXIDMA_DFT_TX_THRESHOLD << XAXIDMA_COALESCE_SHIFT)); 1727 /* Update the delay timer count */ 1728 cr = (((cr & ~XAXIDMA_DELAY_MASK)) | 1729 (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); 1730 /* Enable coalesce, delay timer and error interrupts */ 1731 cr |= XAXIDMA_IRQ_ALL_MASK; 1732 /* Finally write to the Tx channel control register */ 1733 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 1734 1735 /* Populate the tail pointer and bring the Rx Axi DMA engine out of 1736 * halted state. This will make the Rx side ready for reception. 1737 */ 1738 axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); 1739 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 1740 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, 1741 cr | XAXIDMA_CR_RUNSTOP_MASK); 1742 axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + 1743 (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1))); 1744 1745 /* Write to the RS (Run-stop) bit in the Tx channel control register. 1746 * Tx channel is now ready to run. But only after we write to the 1747 * tail pointer register that the Tx channel will start transmitting 1748 */ 1749 axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); 1750 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 1751 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, 1752 cr | XAXIDMA_CR_RUNSTOP_MASK); 1753 1754 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); 1755 axienet_status &= ~XAE_RCW1_RX_MASK; 1756 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status); 1757 1758 axienet_status = axienet_ior(lp, XAE_IP_OFFSET); 1759 if (axienet_status & XAE_INT_RXRJECT_MASK) 1760 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK); 1761 axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ? 1762 XAE_INT_RECV_ERROR_MASK : 0); 1763 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK); 1764 1765 /* Sync default options with HW but leave receiver and 1766 * transmitter disabled. 1767 */ 1768 axienet_setoptions(ndev, lp->options & 1769 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 1770 axienet_set_mac_address(ndev, NULL); 1771 axienet_set_multicast_list(ndev); 1772 axienet_setoptions(ndev, lp->options); 1773 } 1774 1775 /** 1776 * axienet_probe - Axi Ethernet probe function. 1777 * @pdev: Pointer to platform device structure. 1778 * 1779 * Return: 0, on success 1780 * Non-zero error value on failure. 1781 * 1782 * This is the probe routine for Axi Ethernet driver. This is called before 1783 * any other driver routines are invoked. It allocates and sets up the Ethernet 1784 * device. Parses through device tree and populates fields of 1785 * axienet_local. It registers the Ethernet device. 1786 */ 1787 static int axienet_probe(struct platform_device *pdev) 1788 { 1789 int ret; 1790 struct device_node *np; 1791 struct axienet_local *lp; 1792 struct net_device *ndev; 1793 const void *mac_addr; 1794 struct resource *ethres; 1795 int addr_width = 32; 1796 u32 value; 1797 1798 ndev = alloc_etherdev(sizeof(*lp)); 1799 if (!ndev) 1800 return -ENOMEM; 1801 1802 platform_set_drvdata(pdev, ndev); 1803 1804 SET_NETDEV_DEV(ndev, &pdev->dev); 1805 ndev->flags &= ~IFF_MULTICAST; /* clear multicast */ 1806 ndev->features = NETIF_F_SG; 1807 ndev->netdev_ops = &axienet_netdev_ops; 1808 ndev->ethtool_ops = &axienet_ethtool_ops; 1809 1810 /* MTU range: 64 - 9000 */ 1811 ndev->min_mtu = 64; 1812 ndev->max_mtu = XAE_JUMBO_MTU; 1813 1814 lp = netdev_priv(ndev); 1815 lp->ndev = ndev; 1816 lp->dev = &pdev->dev; 1817 lp->options = XAE_OPTION_DEFAULTS; 1818 lp->rx_bd_num = RX_BD_NUM_DEFAULT; 1819 lp->tx_bd_num = TX_BD_NUM_DEFAULT; 1820 /* Map device registers */ 1821 ethres = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1822 lp->regs = devm_ioremap_resource(&pdev->dev, ethres); 1823 if (IS_ERR(lp->regs)) { 1824 dev_err(&pdev->dev, "could not map Axi Ethernet regs.\n"); 1825 ret = PTR_ERR(lp->regs); 1826 goto free_netdev; 1827 } 1828 lp->regs_start = ethres->start; 1829 1830 /* Setup checksum offload, but default to off if not specified */ 1831 lp->features = 0; 1832 1833 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,txcsum", &value); 1834 if (!ret) { 1835 switch (value) { 1836 case 1: 1837 lp->csum_offload_on_tx_path = 1838 XAE_FEATURE_PARTIAL_TX_CSUM; 1839 lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM; 1840 /* Can checksum TCP/UDP over IPv4. */ 1841 ndev->features |= NETIF_F_IP_CSUM; 1842 break; 1843 case 2: 1844 lp->csum_offload_on_tx_path = 1845 XAE_FEATURE_FULL_TX_CSUM; 1846 lp->features |= XAE_FEATURE_FULL_TX_CSUM; 1847 /* Can checksum TCP/UDP over IPv4. */ 1848 ndev->features |= NETIF_F_IP_CSUM; 1849 break; 1850 default: 1851 lp->csum_offload_on_tx_path = XAE_NO_CSUM_OFFLOAD; 1852 } 1853 } 1854 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value); 1855 if (!ret) { 1856 switch (value) { 1857 case 1: 1858 lp->csum_offload_on_rx_path = 1859 XAE_FEATURE_PARTIAL_RX_CSUM; 1860 lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM; 1861 break; 1862 case 2: 1863 lp->csum_offload_on_rx_path = 1864 XAE_FEATURE_FULL_RX_CSUM; 1865 lp->features |= XAE_FEATURE_FULL_RX_CSUM; 1866 break; 1867 default: 1868 lp->csum_offload_on_rx_path = XAE_NO_CSUM_OFFLOAD; 1869 } 1870 } 1871 /* For supporting jumbo frames, the Axi Ethernet hardware must have 1872 * a larger Rx/Tx Memory. Typically, the size must be large so that 1873 * we can enable jumbo option and start supporting jumbo frames. 1874 * Here we check for memory allocated for Rx/Tx in the hardware from 1875 * the device-tree and accordingly set flags. 1876 */ 1877 of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem); 1878 1879 /* Start with the proprietary, and broken phy_type */ 1880 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &value); 1881 if (!ret) { 1882 netdev_warn(ndev, "Please upgrade your device tree binary blob to use phy-mode"); 1883 switch (value) { 1884 case XAE_PHY_TYPE_MII: 1885 lp->phy_mode = PHY_INTERFACE_MODE_MII; 1886 break; 1887 case XAE_PHY_TYPE_GMII: 1888 lp->phy_mode = PHY_INTERFACE_MODE_GMII; 1889 break; 1890 case XAE_PHY_TYPE_RGMII_2_0: 1891 lp->phy_mode = PHY_INTERFACE_MODE_RGMII_ID; 1892 break; 1893 case XAE_PHY_TYPE_SGMII: 1894 lp->phy_mode = PHY_INTERFACE_MODE_SGMII; 1895 break; 1896 case XAE_PHY_TYPE_1000BASE_X: 1897 lp->phy_mode = PHY_INTERFACE_MODE_1000BASEX; 1898 break; 1899 default: 1900 ret = -EINVAL; 1901 goto free_netdev; 1902 } 1903 } else { 1904 ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode); 1905 if (ret) 1906 goto free_netdev; 1907 } 1908 1909 /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */ 1910 np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0); 1911 if (np) { 1912 struct resource dmares; 1913 1914 ret = of_address_to_resource(np, 0, &dmares); 1915 if (ret) { 1916 dev_err(&pdev->dev, 1917 "unable to get DMA resource\n"); 1918 of_node_put(np); 1919 goto free_netdev; 1920 } 1921 lp->dma_regs = devm_ioremap_resource(&pdev->dev, 1922 &dmares); 1923 lp->rx_irq = irq_of_parse_and_map(np, 1); 1924 lp->tx_irq = irq_of_parse_and_map(np, 0); 1925 of_node_put(np); 1926 lp->eth_irq = platform_get_irq_optional(pdev, 0); 1927 } else { 1928 /* Check for these resources directly on the Ethernet node. */ 1929 struct resource *res = platform_get_resource(pdev, 1930 IORESOURCE_MEM, 1); 1931 lp->dma_regs = devm_ioremap_resource(&pdev->dev, res); 1932 lp->rx_irq = platform_get_irq(pdev, 1); 1933 lp->tx_irq = platform_get_irq(pdev, 0); 1934 lp->eth_irq = platform_get_irq_optional(pdev, 2); 1935 } 1936 if (IS_ERR(lp->dma_regs)) { 1937 dev_err(&pdev->dev, "could not map DMA regs\n"); 1938 ret = PTR_ERR(lp->dma_regs); 1939 goto free_netdev; 1940 } 1941 if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) { 1942 dev_err(&pdev->dev, "could not determine irqs\n"); 1943 ret = -ENOMEM; 1944 goto free_netdev; 1945 } 1946 1947 /* Autodetect the need for 64-bit DMA pointers. 1948 * When the IP is configured for a bus width bigger than 32 bits, 1949 * writing the MSB registers is mandatory, even if they are all 0. 1950 * We can detect this case by writing all 1's to one such register 1951 * and see if that sticks: when the IP is configured for 32 bits 1952 * only, those registers are RES0. 1953 * Those MSB registers were introduced in IP v7.1, which we check first. 1954 */ 1955 if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) { 1956 void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4; 1957 1958 iowrite32(0x0, desc); 1959 if (ioread32(desc) == 0) { /* sanity check */ 1960 iowrite32(0xffffffff, desc); 1961 if (ioread32(desc) > 0) { 1962 lp->features |= XAE_FEATURE_DMA_64BIT; 1963 addr_width = 64; 1964 dev_info(&pdev->dev, 1965 "autodetected 64-bit DMA range\n"); 1966 } 1967 iowrite32(0x0, desc); 1968 } 1969 } 1970 1971 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width)); 1972 if (ret) { 1973 dev_err(&pdev->dev, "No suitable DMA available\n"); 1974 goto free_netdev; 1975 } 1976 1977 /* Check for Ethernet core IRQ (optional) */ 1978 if (lp->eth_irq <= 0) 1979 dev_info(&pdev->dev, "Ethernet core IRQ not defined\n"); 1980 1981 /* Retrieve the MAC address */ 1982 mac_addr = of_get_mac_address(pdev->dev.of_node); 1983 if (IS_ERR(mac_addr)) { 1984 dev_warn(&pdev->dev, "could not find MAC address property: %ld\n", 1985 PTR_ERR(mac_addr)); 1986 mac_addr = NULL; 1987 } 1988 axienet_set_mac_address(ndev, mac_addr); 1989 1990 lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD; 1991 lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD; 1992 1993 lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); 1994 if (lp->phy_node) { 1995 lp->clk = devm_clk_get(&pdev->dev, NULL); 1996 if (IS_ERR(lp->clk)) { 1997 dev_warn(&pdev->dev, "Failed to get clock: %ld\n", 1998 PTR_ERR(lp->clk)); 1999 lp->clk = NULL; 2000 } else { 2001 ret = clk_prepare_enable(lp->clk); 2002 if (ret) { 2003 dev_err(&pdev->dev, "Unable to enable clock: %d\n", 2004 ret); 2005 goto free_netdev; 2006 } 2007 } 2008 2009 ret = axienet_mdio_setup(lp); 2010 if (ret) 2011 dev_warn(&pdev->dev, 2012 "error registering MDIO bus: %d\n", ret); 2013 } 2014 if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII || 2015 lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) { 2016 if (!lp->phy_node) { 2017 dev_err(&pdev->dev, "phy-handle required for 1000BaseX/SGMII\n"); 2018 ret = -EINVAL; 2019 goto free_netdev; 2020 } 2021 lp->pcs_phy = of_mdio_find_device(lp->phy_node); 2022 if (!lp->pcs_phy) { 2023 ret = -EPROBE_DEFER; 2024 goto free_netdev; 2025 } 2026 lp->phylink_config.pcs_poll = true; 2027 } 2028 2029 lp->phylink_config.dev = &ndev->dev; 2030 lp->phylink_config.type = PHYLINK_NETDEV; 2031 2032 lp->phylink = phylink_create(&lp->phylink_config, pdev->dev.fwnode, 2033 lp->phy_mode, 2034 &axienet_phylink_ops); 2035 if (IS_ERR(lp->phylink)) { 2036 ret = PTR_ERR(lp->phylink); 2037 dev_err(&pdev->dev, "phylink_create error (%i)\n", ret); 2038 goto free_netdev; 2039 } 2040 2041 ret = register_netdev(lp->ndev); 2042 if (ret) { 2043 dev_err(lp->dev, "register_netdev() error (%i)\n", ret); 2044 goto free_netdev; 2045 } 2046 2047 return 0; 2048 2049 free_netdev: 2050 free_netdev(ndev); 2051 2052 return ret; 2053 } 2054 2055 static int axienet_remove(struct platform_device *pdev) 2056 { 2057 struct net_device *ndev = platform_get_drvdata(pdev); 2058 struct axienet_local *lp = netdev_priv(ndev); 2059 2060 unregister_netdev(ndev); 2061 2062 if (lp->phylink) 2063 phylink_destroy(lp->phylink); 2064 2065 if (lp->pcs_phy) 2066 put_device(&lp->pcs_phy->dev); 2067 2068 axienet_mdio_teardown(lp); 2069 2070 clk_disable_unprepare(lp->clk); 2071 2072 of_node_put(lp->phy_node); 2073 lp->phy_node = NULL; 2074 2075 free_netdev(ndev); 2076 2077 return 0; 2078 } 2079 2080 static void axienet_shutdown(struct platform_device *pdev) 2081 { 2082 struct net_device *ndev = platform_get_drvdata(pdev); 2083 2084 rtnl_lock(); 2085 netif_device_detach(ndev); 2086 2087 if (netif_running(ndev)) 2088 dev_close(ndev); 2089 2090 rtnl_unlock(); 2091 } 2092 2093 static struct platform_driver axienet_driver = { 2094 .probe = axienet_probe, 2095 .remove = axienet_remove, 2096 .shutdown = axienet_shutdown, 2097 .driver = { 2098 .name = "xilinx_axienet", 2099 .of_match_table = axienet_of_match, 2100 }, 2101 }; 2102 2103 module_platform_driver(axienet_driver); 2104 2105 MODULE_DESCRIPTION("Xilinx Axi Ethernet driver"); 2106 MODULE_AUTHOR("Xilinx"); 2107 MODULE_LICENSE("GPL"); 2108