1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Xilinx Axi Ethernet device driver 4 * 5 * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi 6 * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net> 7 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd. 8 * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu> 9 * Copyright (c) 2010 - 2011 PetaLogix 10 * Copyright (c) 2019 SED Systems, a division of Calian Ltd. 11 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved. 12 * 13 * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6 14 * and Spartan6. 15 * 16 * TODO: 17 * - Add Axi Fifo support. 18 * - Factor out Axi DMA code into separate driver. 19 * - Test and fix basic multicast filtering. 20 * - Add support for extended multicast filtering. 21 * - Test basic VLAN support. 22 * - Add support for extended VLAN support. 23 */ 24 25 #include <linux/clk.h> 26 #include <linux/delay.h> 27 #include <linux/etherdevice.h> 28 #include <linux/module.h> 29 #include <linux/netdevice.h> 30 #include <linux/of_mdio.h> 31 #include <linux/of_net.h> 32 #include <linux/of_platform.h> 33 #include <linux/of_irq.h> 34 #include <linux/of_address.h> 35 #include <linux/skbuff.h> 36 #include <linux/spinlock.h> 37 #include <linux/phy.h> 38 #include <linux/mii.h> 39 #include <linux/ethtool.h> 40 41 #include "xilinx_axienet.h" 42 43 /* Descriptors defines for Tx and Rx DMA */ 44 #define TX_BD_NUM_DEFAULT 128 45 #define RX_BD_NUM_DEFAULT 1024 46 #define TX_BD_NUM_MIN (MAX_SKB_FRAGS + 1) 47 #define TX_BD_NUM_MAX 4096 48 #define RX_BD_NUM_MAX 4096 49 50 /* Must be shorter than length of ethtool_drvinfo.driver field to fit */ 51 #define DRIVER_NAME "xaxienet" 52 #define DRIVER_DESCRIPTION "Xilinx Axi Ethernet driver" 53 #define DRIVER_VERSION "1.00a" 54 55 #define AXIENET_REGS_N 40 56 57 /* Match table for of_platform binding */ 58 static const struct of_device_id axienet_of_match[] = { 59 { .compatible = "xlnx,axi-ethernet-1.00.a", }, 60 { .compatible = "xlnx,axi-ethernet-1.01.a", }, 61 { .compatible = "xlnx,axi-ethernet-2.01.a", }, 62 {}, 63 }; 64 65 MODULE_DEVICE_TABLE(of, axienet_of_match); 66 67 /* Option table for setting up Axi Ethernet hardware options */ 68 static struct axienet_option axienet_options[] = { 69 /* Turn on jumbo packet support for both Rx and Tx */ 70 { 71 .opt = XAE_OPTION_JUMBO, 72 .reg = XAE_TC_OFFSET, 73 .m_or = XAE_TC_JUM_MASK, 74 }, { 75 .opt = XAE_OPTION_JUMBO, 76 .reg = XAE_RCW1_OFFSET, 77 .m_or = XAE_RCW1_JUM_MASK, 78 }, { /* Turn on VLAN packet support for both Rx and Tx */ 79 .opt = XAE_OPTION_VLAN, 80 .reg = XAE_TC_OFFSET, 81 .m_or = XAE_TC_VLAN_MASK, 82 }, { 83 .opt = XAE_OPTION_VLAN, 84 .reg = XAE_RCW1_OFFSET, 85 .m_or = XAE_RCW1_VLAN_MASK, 86 }, { /* Turn on FCS stripping on receive packets */ 87 .opt = XAE_OPTION_FCS_STRIP, 88 .reg = XAE_RCW1_OFFSET, 89 .m_or = XAE_RCW1_FCS_MASK, 90 }, { /* Turn on FCS insertion on transmit packets */ 91 .opt = XAE_OPTION_FCS_INSERT, 92 .reg = XAE_TC_OFFSET, 93 .m_or = XAE_TC_FCS_MASK, 94 }, { /* Turn off length/type field checking on receive packets */ 95 .opt = XAE_OPTION_LENTYPE_ERR, 96 .reg = XAE_RCW1_OFFSET, 97 .m_or = XAE_RCW1_LT_DIS_MASK, 98 }, { /* Turn on Rx flow control */ 99 .opt = XAE_OPTION_FLOW_CONTROL, 100 .reg = XAE_FCC_OFFSET, 101 .m_or = XAE_FCC_FCRX_MASK, 102 }, { /* Turn on Tx flow control */ 103 .opt = XAE_OPTION_FLOW_CONTROL, 104 .reg = XAE_FCC_OFFSET, 105 .m_or = XAE_FCC_FCTX_MASK, 106 }, { /* Turn on promiscuous frame filtering */ 107 .opt = XAE_OPTION_PROMISC, 108 .reg = XAE_FMI_OFFSET, 109 .m_or = XAE_FMI_PM_MASK, 110 }, { /* Enable transmitter */ 111 .opt = XAE_OPTION_TXEN, 112 .reg = XAE_TC_OFFSET, 113 .m_or = XAE_TC_TX_MASK, 114 }, { /* Enable receiver */ 115 .opt = XAE_OPTION_RXEN, 116 .reg = XAE_RCW1_OFFSET, 117 .m_or = XAE_RCW1_RX_MASK, 118 }, 119 {} 120 }; 121 122 /** 123 * axienet_dma_in32 - Memory mapped Axi DMA register read 124 * @lp: Pointer to axienet local structure 125 * @reg: Address offset from the base address of the Axi DMA core 126 * 127 * Return: The contents of the Axi DMA register 128 * 129 * This function returns the contents of the corresponding Axi DMA register. 130 */ 131 static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg) 132 { 133 return ioread32(lp->dma_regs + reg); 134 } 135 136 /** 137 * axienet_dma_out32 - Memory mapped Axi DMA register write. 138 * @lp: Pointer to axienet local structure 139 * @reg: Address offset from the base address of the Axi DMA core 140 * @value: Value to be written into the Axi DMA register 141 * 142 * This function writes the desired value into the corresponding Axi DMA 143 * register. 144 */ 145 static inline void axienet_dma_out32(struct axienet_local *lp, 146 off_t reg, u32 value) 147 { 148 iowrite32(value, lp->dma_regs + reg); 149 } 150 151 static void axienet_dma_out_addr(struct axienet_local *lp, off_t reg, 152 dma_addr_t addr) 153 { 154 axienet_dma_out32(lp, reg, lower_32_bits(addr)); 155 156 if (lp->features & XAE_FEATURE_DMA_64BIT) 157 axienet_dma_out32(lp, reg + 4, upper_32_bits(addr)); 158 } 159 160 static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr, 161 struct axidma_bd *desc) 162 { 163 desc->phys = lower_32_bits(addr); 164 if (lp->features & XAE_FEATURE_DMA_64BIT) 165 desc->phys_msb = upper_32_bits(addr); 166 } 167 168 static dma_addr_t desc_get_phys_addr(struct axienet_local *lp, 169 struct axidma_bd *desc) 170 { 171 dma_addr_t ret = desc->phys; 172 173 if (lp->features & XAE_FEATURE_DMA_64BIT) 174 ret |= ((dma_addr_t)desc->phys_msb << 16) << 16; 175 176 return ret; 177 } 178 179 /** 180 * axienet_dma_bd_release - Release buffer descriptor rings 181 * @ndev: Pointer to the net_device structure 182 * 183 * This function is used to release the descriptors allocated in 184 * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet 185 * driver stop api is called. 186 */ 187 static void axienet_dma_bd_release(struct net_device *ndev) 188 { 189 int i; 190 struct axienet_local *lp = netdev_priv(ndev); 191 192 /* If we end up here, tx_bd_v must have been DMA allocated. */ 193 dma_free_coherent(ndev->dev.parent, 194 sizeof(*lp->tx_bd_v) * lp->tx_bd_num, 195 lp->tx_bd_v, 196 lp->tx_bd_p); 197 198 if (!lp->rx_bd_v) 199 return; 200 201 for (i = 0; i < lp->rx_bd_num; i++) { 202 dma_addr_t phys; 203 204 /* A NULL skb means this descriptor has not been initialised 205 * at all. 206 */ 207 if (!lp->rx_bd_v[i].skb) 208 break; 209 210 dev_kfree_skb(lp->rx_bd_v[i].skb); 211 212 /* For each descriptor, we programmed cntrl with the (non-zero) 213 * descriptor size, after it had been successfully allocated. 214 * So a non-zero value in there means we need to unmap it. 215 */ 216 if (lp->rx_bd_v[i].cntrl) { 217 phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]); 218 dma_unmap_single(ndev->dev.parent, phys, 219 lp->max_frm_size, DMA_FROM_DEVICE); 220 } 221 } 222 223 dma_free_coherent(ndev->dev.parent, 224 sizeof(*lp->rx_bd_v) * lp->rx_bd_num, 225 lp->rx_bd_v, 226 lp->rx_bd_p); 227 } 228 229 /** 230 * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA 231 * @ndev: Pointer to the net_device structure 232 * 233 * Return: 0, on success -ENOMEM, on failure 234 * 235 * This function is called to initialize the Rx and Tx DMA descriptor 236 * rings. This initializes the descriptors with required default values 237 * and is called when Axi Ethernet driver reset is called. 238 */ 239 static int axienet_dma_bd_init(struct net_device *ndev) 240 { 241 u32 cr; 242 int i; 243 struct sk_buff *skb; 244 struct axienet_local *lp = netdev_priv(ndev); 245 246 /* Reset the indexes which are used for accessing the BDs */ 247 lp->tx_bd_ci = 0; 248 lp->tx_bd_tail = 0; 249 lp->rx_bd_ci = 0; 250 251 /* Allocate the Tx and Rx buffer descriptors. */ 252 lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent, 253 sizeof(*lp->tx_bd_v) * lp->tx_bd_num, 254 &lp->tx_bd_p, GFP_KERNEL); 255 if (!lp->tx_bd_v) 256 return -ENOMEM; 257 258 lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent, 259 sizeof(*lp->rx_bd_v) * lp->rx_bd_num, 260 &lp->rx_bd_p, GFP_KERNEL); 261 if (!lp->rx_bd_v) 262 goto out; 263 264 for (i = 0; i < lp->tx_bd_num; i++) { 265 dma_addr_t addr = lp->tx_bd_p + 266 sizeof(*lp->tx_bd_v) * 267 ((i + 1) % lp->tx_bd_num); 268 269 lp->tx_bd_v[i].next = lower_32_bits(addr); 270 if (lp->features & XAE_FEATURE_DMA_64BIT) 271 lp->tx_bd_v[i].next_msb = upper_32_bits(addr); 272 } 273 274 for (i = 0; i < lp->rx_bd_num; i++) { 275 dma_addr_t addr; 276 277 addr = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * 278 ((i + 1) % lp->rx_bd_num); 279 lp->rx_bd_v[i].next = lower_32_bits(addr); 280 if (lp->features & XAE_FEATURE_DMA_64BIT) 281 lp->rx_bd_v[i].next_msb = upper_32_bits(addr); 282 283 skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size); 284 if (!skb) 285 goto out; 286 287 lp->rx_bd_v[i].skb = skb; 288 addr = dma_map_single(ndev->dev.parent, skb->data, 289 lp->max_frm_size, DMA_FROM_DEVICE); 290 if (dma_mapping_error(ndev->dev.parent, addr)) { 291 netdev_err(ndev, "DMA mapping error\n"); 292 goto out; 293 } 294 desc_set_phys_addr(lp, addr, &lp->rx_bd_v[i]); 295 296 lp->rx_bd_v[i].cntrl = lp->max_frm_size; 297 } 298 299 /* Start updating the Rx channel control register */ 300 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 301 /* Update the interrupt coalesce count */ 302 cr = ((cr & ~XAXIDMA_COALESCE_MASK) | 303 ((lp->coalesce_count_rx) << XAXIDMA_COALESCE_SHIFT)); 304 /* Update the delay timer count */ 305 cr = ((cr & ~XAXIDMA_DELAY_MASK) | 306 (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); 307 /* Enable coalesce, delay timer and error interrupts */ 308 cr |= XAXIDMA_IRQ_ALL_MASK; 309 /* Write to the Rx channel control register */ 310 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 311 312 /* Start updating the Tx channel control register */ 313 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 314 /* Update the interrupt coalesce count */ 315 cr = (((cr & ~XAXIDMA_COALESCE_MASK)) | 316 ((lp->coalesce_count_tx) << XAXIDMA_COALESCE_SHIFT)); 317 /* Update the delay timer count */ 318 cr = (((cr & ~XAXIDMA_DELAY_MASK)) | 319 (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); 320 /* Enable coalesce, delay timer and error interrupts */ 321 cr |= XAXIDMA_IRQ_ALL_MASK; 322 /* Write to the Tx channel control register */ 323 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 324 325 /* Populate the tail pointer and bring the Rx Axi DMA engine out of 326 * halted state. This will make the Rx side ready for reception. 327 */ 328 axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); 329 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 330 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, 331 cr | XAXIDMA_CR_RUNSTOP_MASK); 332 axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + 333 (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1))); 334 335 /* Write to the RS (Run-stop) bit in the Tx channel control register. 336 * Tx channel is now ready to run. But only after we write to the 337 * tail pointer register that the Tx channel will start transmitting. 338 */ 339 axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); 340 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 341 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, 342 cr | XAXIDMA_CR_RUNSTOP_MASK); 343 344 return 0; 345 out: 346 axienet_dma_bd_release(ndev); 347 return -ENOMEM; 348 } 349 350 /** 351 * axienet_set_mac_address - Write the MAC address 352 * @ndev: Pointer to the net_device structure 353 * @address: 6 byte Address to be written as MAC address 354 * 355 * This function is called to initialize the MAC address of the Axi Ethernet 356 * core. It writes to the UAW0 and UAW1 registers of the core. 357 */ 358 static void axienet_set_mac_address(struct net_device *ndev, 359 const void *address) 360 { 361 struct axienet_local *lp = netdev_priv(ndev); 362 363 if (address) 364 eth_hw_addr_set(ndev, address); 365 if (!is_valid_ether_addr(ndev->dev_addr)) 366 eth_hw_addr_random(ndev); 367 368 /* Set up unicast MAC address filter set its mac address */ 369 axienet_iow(lp, XAE_UAW0_OFFSET, 370 (ndev->dev_addr[0]) | 371 (ndev->dev_addr[1] << 8) | 372 (ndev->dev_addr[2] << 16) | 373 (ndev->dev_addr[3] << 24)); 374 axienet_iow(lp, XAE_UAW1_OFFSET, 375 (((axienet_ior(lp, XAE_UAW1_OFFSET)) & 376 ~XAE_UAW1_UNICASTADDR_MASK) | 377 (ndev->dev_addr[4] | 378 (ndev->dev_addr[5] << 8)))); 379 } 380 381 /** 382 * netdev_set_mac_address - Write the MAC address (from outside the driver) 383 * @ndev: Pointer to the net_device structure 384 * @p: 6 byte Address to be written as MAC address 385 * 386 * Return: 0 for all conditions. Presently, there is no failure case. 387 * 388 * This function is called to initialize the MAC address of the Axi Ethernet 389 * core. It calls the core specific axienet_set_mac_address. This is the 390 * function that goes into net_device_ops structure entry ndo_set_mac_address. 391 */ 392 static int netdev_set_mac_address(struct net_device *ndev, void *p) 393 { 394 struct sockaddr *addr = p; 395 axienet_set_mac_address(ndev, addr->sa_data); 396 return 0; 397 } 398 399 /** 400 * axienet_set_multicast_list - Prepare the multicast table 401 * @ndev: Pointer to the net_device structure 402 * 403 * This function is called to initialize the multicast table during 404 * initialization. The Axi Ethernet basic multicast support has a four-entry 405 * multicast table which is initialized here. Additionally this function 406 * goes into the net_device_ops structure entry ndo_set_multicast_list. This 407 * means whenever the multicast table entries need to be updated this 408 * function gets called. 409 */ 410 static void axienet_set_multicast_list(struct net_device *ndev) 411 { 412 int i; 413 u32 reg, af0reg, af1reg; 414 struct axienet_local *lp = netdev_priv(ndev); 415 416 if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) || 417 netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) { 418 /* We must make the kernel realize we had to move into 419 * promiscuous mode. If it was a promiscuous mode request 420 * the flag is already set. If not we set it. 421 */ 422 ndev->flags |= IFF_PROMISC; 423 reg = axienet_ior(lp, XAE_FMI_OFFSET); 424 reg |= XAE_FMI_PM_MASK; 425 axienet_iow(lp, XAE_FMI_OFFSET, reg); 426 dev_info(&ndev->dev, "Promiscuous mode enabled.\n"); 427 } else if (!netdev_mc_empty(ndev)) { 428 struct netdev_hw_addr *ha; 429 430 i = 0; 431 netdev_for_each_mc_addr(ha, ndev) { 432 if (i >= XAE_MULTICAST_CAM_TABLE_NUM) 433 break; 434 435 af0reg = (ha->addr[0]); 436 af0reg |= (ha->addr[1] << 8); 437 af0reg |= (ha->addr[2] << 16); 438 af0reg |= (ha->addr[3] << 24); 439 440 af1reg = (ha->addr[4]); 441 af1reg |= (ha->addr[5] << 8); 442 443 reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00; 444 reg |= i; 445 446 axienet_iow(lp, XAE_FMI_OFFSET, reg); 447 axienet_iow(lp, XAE_AF0_OFFSET, af0reg); 448 axienet_iow(lp, XAE_AF1_OFFSET, af1reg); 449 i++; 450 } 451 } else { 452 reg = axienet_ior(lp, XAE_FMI_OFFSET); 453 reg &= ~XAE_FMI_PM_MASK; 454 455 axienet_iow(lp, XAE_FMI_OFFSET, reg); 456 457 for (i = 0; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) { 458 reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00; 459 reg |= i; 460 461 axienet_iow(lp, XAE_FMI_OFFSET, reg); 462 axienet_iow(lp, XAE_AF0_OFFSET, 0); 463 axienet_iow(lp, XAE_AF1_OFFSET, 0); 464 } 465 466 dev_info(&ndev->dev, "Promiscuous mode disabled.\n"); 467 } 468 } 469 470 /** 471 * axienet_setoptions - Set an Axi Ethernet option 472 * @ndev: Pointer to the net_device structure 473 * @options: Option to be enabled/disabled 474 * 475 * The Axi Ethernet core has multiple features which can be selectively turned 476 * on or off. The typical options could be jumbo frame option, basic VLAN 477 * option, promiscuous mode option etc. This function is used to set or clear 478 * these options in the Axi Ethernet hardware. This is done through 479 * axienet_option structure . 480 */ 481 static void axienet_setoptions(struct net_device *ndev, u32 options) 482 { 483 int reg; 484 struct axienet_local *lp = netdev_priv(ndev); 485 struct axienet_option *tp = &axienet_options[0]; 486 487 while (tp->opt) { 488 reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or)); 489 if (options & tp->opt) 490 reg |= tp->m_or; 491 axienet_iow(lp, tp->reg, reg); 492 tp++; 493 } 494 495 lp->options |= options; 496 } 497 498 static int __axienet_device_reset(struct axienet_local *lp) 499 { 500 u32 value; 501 int ret; 502 503 /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset 504 * process of Axi DMA takes a while to complete as all pending 505 * commands/transfers will be flushed or completed during this 506 * reset process. 507 * Note that even though both TX and RX have their own reset register, 508 * they both reset the entire DMA core, so only one needs to be used. 509 */ 510 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK); 511 ret = read_poll_timeout(axienet_dma_in32, value, 512 !(value & XAXIDMA_CR_RESET_MASK), 513 DELAY_OF_ONE_MILLISEC, 50000, false, lp, 514 XAXIDMA_TX_CR_OFFSET); 515 if (ret) { 516 dev_err(lp->dev, "%s: DMA reset timeout!\n", __func__); 517 return ret; 518 } 519 520 /* Wait for PhyRstCmplt bit to be set, indicating the PHY reset has finished */ 521 ret = read_poll_timeout(axienet_ior, value, 522 value & XAE_INT_PHYRSTCMPLT_MASK, 523 DELAY_OF_ONE_MILLISEC, 50000, false, lp, 524 XAE_IS_OFFSET); 525 if (ret) { 526 dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n", __func__); 527 return ret; 528 } 529 530 return 0; 531 } 532 533 /** 534 * axienet_device_reset - Reset and initialize the Axi Ethernet hardware. 535 * @ndev: Pointer to the net_device structure 536 * 537 * This function is called to reset and initialize the Axi Ethernet core. This 538 * is typically called during initialization. It does a reset of the Axi DMA 539 * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines 540 * areconnected to Axi Ethernet reset lines, this in turn resets the Axi 541 * Ethernet core. No separate hardware reset is done for the Axi Ethernet 542 * core. 543 * Returns 0 on success or a negative error number otherwise. 544 */ 545 static int axienet_device_reset(struct net_device *ndev) 546 { 547 u32 axienet_status; 548 struct axienet_local *lp = netdev_priv(ndev); 549 int ret; 550 551 ret = __axienet_device_reset(lp); 552 if (ret) 553 return ret; 554 555 lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE; 556 lp->options |= XAE_OPTION_VLAN; 557 lp->options &= (~XAE_OPTION_JUMBO); 558 559 if ((ndev->mtu > XAE_MTU) && 560 (ndev->mtu <= XAE_JUMBO_MTU)) { 561 lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN + 562 XAE_TRL_SIZE; 563 564 if (lp->max_frm_size <= lp->rxmem) 565 lp->options |= XAE_OPTION_JUMBO; 566 } 567 568 ret = axienet_dma_bd_init(ndev); 569 if (ret) { 570 netdev_err(ndev, "%s: descriptor allocation failed\n", 571 __func__); 572 return ret; 573 } 574 575 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); 576 axienet_status &= ~XAE_RCW1_RX_MASK; 577 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status); 578 579 axienet_status = axienet_ior(lp, XAE_IP_OFFSET); 580 if (axienet_status & XAE_INT_RXRJECT_MASK) 581 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK); 582 axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ? 583 XAE_INT_RECV_ERROR_MASK : 0); 584 585 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK); 586 587 /* Sync default options with HW but leave receiver and 588 * transmitter disabled. 589 */ 590 axienet_setoptions(ndev, lp->options & 591 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 592 axienet_set_mac_address(ndev, NULL); 593 axienet_set_multicast_list(ndev); 594 axienet_setoptions(ndev, lp->options); 595 596 netif_trans_update(ndev); 597 598 return 0; 599 } 600 601 /** 602 * axienet_free_tx_chain - Clean up a series of linked TX descriptors. 603 * @ndev: Pointer to the net_device structure 604 * @first_bd: Index of first descriptor to clean up 605 * @nr_bds: Number of descriptors to clean up, can be -1 if unknown. 606 * @sizep: Pointer to a u32 filled with the total sum of all bytes 607 * in all cleaned-up descriptors. Ignored if NULL. 608 * 609 * Would either be called after a successful transmit operation, or after 610 * there was an error when setting up the chain. 611 * Returns the number of descriptors handled. 612 */ 613 static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd, 614 int nr_bds, u32 *sizep) 615 { 616 struct axienet_local *lp = netdev_priv(ndev); 617 struct axidma_bd *cur_p; 618 int max_bds = nr_bds; 619 unsigned int status; 620 dma_addr_t phys; 621 int i; 622 623 if (max_bds == -1) 624 max_bds = lp->tx_bd_num; 625 626 for (i = 0; i < max_bds; i++) { 627 cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num]; 628 status = cur_p->status; 629 630 /* If no number is given, clean up *all* descriptors that have 631 * been completed by the MAC. 632 */ 633 if (nr_bds == -1 && !(status & XAXIDMA_BD_STS_COMPLETE_MASK)) 634 break; 635 636 /* Ensure we see complete descriptor update */ 637 dma_rmb(); 638 phys = desc_get_phys_addr(lp, cur_p); 639 dma_unmap_single(ndev->dev.parent, phys, 640 (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK), 641 DMA_TO_DEVICE); 642 643 if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK)) 644 dev_consume_skb_irq(cur_p->skb); 645 646 cur_p->app0 = 0; 647 cur_p->app1 = 0; 648 cur_p->app2 = 0; 649 cur_p->app4 = 0; 650 cur_p->skb = NULL; 651 /* ensure our transmit path and device don't prematurely see status cleared */ 652 wmb(); 653 cur_p->cntrl = 0; 654 cur_p->status = 0; 655 656 if (sizep) 657 *sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK; 658 } 659 660 return i; 661 } 662 663 /** 664 * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy 665 * @lp: Pointer to the axienet_local structure 666 * @num_frag: The number of BDs to check for 667 * 668 * Return: 0, on success 669 * NETDEV_TX_BUSY, if any of the descriptors are not free 670 * 671 * This function is invoked before BDs are allocated and transmission starts. 672 * This function returns 0 if a BD or group of BDs can be allocated for 673 * transmission. If the BD or any of the BDs are not free the function 674 * returns a busy status. This is invoked from axienet_start_xmit. 675 */ 676 static inline int axienet_check_tx_bd_space(struct axienet_local *lp, 677 int num_frag) 678 { 679 struct axidma_bd *cur_p; 680 681 /* Ensure we see all descriptor updates from device or TX IRQ path */ 682 rmb(); 683 cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % lp->tx_bd_num]; 684 if (cur_p->cntrl) 685 return NETDEV_TX_BUSY; 686 return 0; 687 } 688 689 /** 690 * axienet_start_xmit_done - Invoked once a transmit is completed by the 691 * Axi DMA Tx channel. 692 * @ndev: Pointer to the net_device structure 693 * 694 * This function is invoked from the Axi DMA Tx isr to notify the completion 695 * of transmit operation. It clears fields in the corresponding Tx BDs and 696 * unmaps the corresponding buffer so that CPU can regain ownership of the 697 * buffer. It finally invokes "netif_wake_queue" to restart transmission if 698 * required. 699 */ 700 static void axienet_start_xmit_done(struct net_device *ndev) 701 { 702 struct axienet_local *lp = netdev_priv(ndev); 703 u32 packets = 0; 704 u32 size = 0; 705 706 packets = axienet_free_tx_chain(ndev, lp->tx_bd_ci, -1, &size); 707 708 lp->tx_bd_ci += packets; 709 if (lp->tx_bd_ci >= lp->tx_bd_num) 710 lp->tx_bd_ci -= lp->tx_bd_num; 711 712 ndev->stats.tx_packets += packets; 713 ndev->stats.tx_bytes += size; 714 715 /* Matches barrier in axienet_start_xmit */ 716 smp_mb(); 717 718 if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) 719 netif_wake_queue(ndev); 720 } 721 722 /** 723 * axienet_start_xmit - Starts the transmission. 724 * @skb: sk_buff pointer that contains data to be Txed. 725 * @ndev: Pointer to net_device structure. 726 * 727 * Return: NETDEV_TX_OK, on success 728 * NETDEV_TX_BUSY, if any of the descriptors are not free 729 * 730 * This function is invoked from upper layers to initiate transmission. The 731 * function uses the next available free BDs and populates their fields to 732 * start the transmission. Additionally if checksum offloading is supported, 733 * it populates AXI Stream Control fields with appropriate values. 734 */ 735 static netdev_tx_t 736 axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) 737 { 738 u32 ii; 739 u32 num_frag; 740 u32 csum_start_off; 741 u32 csum_index_off; 742 skb_frag_t *frag; 743 dma_addr_t tail_p, phys; 744 struct axienet_local *lp = netdev_priv(ndev); 745 struct axidma_bd *cur_p; 746 u32 orig_tail_ptr = lp->tx_bd_tail; 747 748 num_frag = skb_shinfo(skb)->nr_frags; 749 cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; 750 751 if (axienet_check_tx_bd_space(lp, num_frag + 1)) { 752 /* Should not happen as last start_xmit call should have 753 * checked for sufficient space and queue should only be 754 * woken when sufficient space is available. 755 */ 756 netif_stop_queue(ndev); 757 if (net_ratelimit()) 758 netdev_warn(ndev, "TX ring unexpectedly full\n"); 759 return NETDEV_TX_BUSY; 760 } 761 762 if (skb->ip_summed == CHECKSUM_PARTIAL) { 763 if (lp->features & XAE_FEATURE_FULL_TX_CSUM) { 764 /* Tx Full Checksum Offload Enabled */ 765 cur_p->app0 |= 2; 766 } else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) { 767 csum_start_off = skb_transport_offset(skb); 768 csum_index_off = csum_start_off + skb->csum_offset; 769 /* Tx Partial Checksum Offload Enabled */ 770 cur_p->app0 |= 1; 771 cur_p->app1 = (csum_start_off << 16) | csum_index_off; 772 } 773 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { 774 cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */ 775 } 776 777 phys = dma_map_single(ndev->dev.parent, skb->data, 778 skb_headlen(skb), DMA_TO_DEVICE); 779 if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) { 780 if (net_ratelimit()) 781 netdev_err(ndev, "TX DMA mapping error\n"); 782 ndev->stats.tx_dropped++; 783 return NETDEV_TX_OK; 784 } 785 desc_set_phys_addr(lp, phys, cur_p); 786 cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK; 787 788 for (ii = 0; ii < num_frag; ii++) { 789 if (++lp->tx_bd_tail >= lp->tx_bd_num) 790 lp->tx_bd_tail = 0; 791 cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; 792 frag = &skb_shinfo(skb)->frags[ii]; 793 phys = dma_map_single(ndev->dev.parent, 794 skb_frag_address(frag), 795 skb_frag_size(frag), 796 DMA_TO_DEVICE); 797 if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) { 798 if (net_ratelimit()) 799 netdev_err(ndev, "TX DMA mapping error\n"); 800 ndev->stats.tx_dropped++; 801 axienet_free_tx_chain(ndev, orig_tail_ptr, ii + 1, 802 NULL); 803 lp->tx_bd_tail = orig_tail_ptr; 804 805 return NETDEV_TX_OK; 806 } 807 desc_set_phys_addr(lp, phys, cur_p); 808 cur_p->cntrl = skb_frag_size(frag); 809 } 810 811 cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK; 812 cur_p->skb = skb; 813 814 tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail; 815 /* Start the transfer */ 816 axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p); 817 if (++lp->tx_bd_tail >= lp->tx_bd_num) 818 lp->tx_bd_tail = 0; 819 820 /* Stop queue if next transmit may not have space */ 821 if (axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) { 822 netif_stop_queue(ndev); 823 824 /* Matches barrier in axienet_start_xmit_done */ 825 smp_mb(); 826 827 /* Space might have just been freed - check again */ 828 if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) 829 netif_wake_queue(ndev); 830 } 831 832 return NETDEV_TX_OK; 833 } 834 835 /** 836 * axienet_recv - Is called from Axi DMA Rx Isr to complete the received 837 * BD processing. 838 * @ndev: Pointer to net_device structure. 839 * 840 * This function is invoked from the Axi DMA Rx isr to process the Rx BDs. It 841 * does minimal processing and invokes "netif_rx" to complete further 842 * processing. 843 */ 844 static void axienet_recv(struct net_device *ndev) 845 { 846 u32 length; 847 u32 csumstatus; 848 u32 size = 0; 849 u32 packets = 0; 850 dma_addr_t tail_p = 0; 851 struct axienet_local *lp = netdev_priv(ndev); 852 struct sk_buff *skb, *new_skb; 853 struct axidma_bd *cur_p; 854 855 cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; 856 857 while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) { 858 dma_addr_t phys; 859 860 tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci; 861 862 /* Ensure we see complete descriptor update */ 863 dma_rmb(); 864 phys = desc_get_phys_addr(lp, cur_p); 865 dma_unmap_single(ndev->dev.parent, phys, lp->max_frm_size, 866 DMA_FROM_DEVICE); 867 868 skb = cur_p->skb; 869 cur_p->skb = NULL; 870 length = cur_p->app4 & 0x0000FFFF; 871 872 skb_put(skb, length); 873 skb->protocol = eth_type_trans(skb, ndev); 874 /*skb_checksum_none_assert(skb);*/ 875 skb->ip_summed = CHECKSUM_NONE; 876 877 /* if we're doing Rx csum offload, set it up */ 878 if (lp->features & XAE_FEATURE_FULL_RX_CSUM) { 879 csumstatus = (cur_p->app2 & 880 XAE_FULL_CSUM_STATUS_MASK) >> 3; 881 if ((csumstatus == XAE_IP_TCP_CSUM_VALIDATED) || 882 (csumstatus == XAE_IP_UDP_CSUM_VALIDATED)) { 883 skb->ip_summed = CHECKSUM_UNNECESSARY; 884 } 885 } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 && 886 skb->protocol == htons(ETH_P_IP) && 887 skb->len > 64) { 888 skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF); 889 skb->ip_summed = CHECKSUM_COMPLETE; 890 } 891 892 netif_rx(skb); 893 894 size += length; 895 packets++; 896 897 new_skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size); 898 if (!new_skb) 899 return; 900 901 phys = dma_map_single(ndev->dev.parent, new_skb->data, 902 lp->max_frm_size, 903 DMA_FROM_DEVICE); 904 if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) { 905 if (net_ratelimit()) 906 netdev_err(ndev, "RX DMA mapping error\n"); 907 dev_kfree_skb(new_skb); 908 return; 909 } 910 desc_set_phys_addr(lp, phys, cur_p); 911 912 cur_p->cntrl = lp->max_frm_size; 913 cur_p->status = 0; 914 cur_p->skb = new_skb; 915 916 if (++lp->rx_bd_ci >= lp->rx_bd_num) 917 lp->rx_bd_ci = 0; 918 cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; 919 } 920 921 ndev->stats.rx_packets += packets; 922 ndev->stats.rx_bytes += size; 923 924 if (tail_p) 925 axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p); 926 } 927 928 /** 929 * axienet_tx_irq - Tx Done Isr. 930 * @irq: irq number 931 * @_ndev: net_device pointer 932 * 933 * Return: IRQ_HANDLED if device generated a TX interrupt, IRQ_NONE otherwise. 934 * 935 * This is the Axi DMA Tx done Isr. It invokes "axienet_start_xmit_done" 936 * to complete the BD processing. 937 */ 938 static irqreturn_t axienet_tx_irq(int irq, void *_ndev) 939 { 940 u32 cr; 941 unsigned int status; 942 struct net_device *ndev = _ndev; 943 struct axienet_local *lp = netdev_priv(ndev); 944 945 status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 946 if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) { 947 axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status); 948 axienet_start_xmit_done(lp->ndev); 949 goto out; 950 } 951 if (!(status & XAXIDMA_IRQ_ALL_MASK)) 952 return IRQ_NONE; 953 if (status & XAXIDMA_IRQ_ERROR_MASK) { 954 dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status); 955 dev_err(&ndev->dev, "Current BD is at: 0x%x%08x\n", 956 (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb, 957 (lp->tx_bd_v[lp->tx_bd_ci]).phys); 958 959 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 960 /* Disable coalesce, delay timer and error interrupts */ 961 cr &= (~XAXIDMA_IRQ_ALL_MASK); 962 /* Write to the Tx channel control register */ 963 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 964 965 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 966 /* Disable coalesce, delay timer and error interrupts */ 967 cr &= (~XAXIDMA_IRQ_ALL_MASK); 968 /* Write to the Rx channel control register */ 969 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 970 971 schedule_work(&lp->dma_err_task); 972 axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status); 973 } 974 out: 975 return IRQ_HANDLED; 976 } 977 978 /** 979 * axienet_rx_irq - Rx Isr. 980 * @irq: irq number 981 * @_ndev: net_device pointer 982 * 983 * Return: IRQ_HANDLED if device generated a RX interrupt, IRQ_NONE otherwise. 984 * 985 * This is the Axi DMA Rx Isr. It invokes "axienet_recv" to complete the BD 986 * processing. 987 */ 988 static irqreturn_t axienet_rx_irq(int irq, void *_ndev) 989 { 990 u32 cr; 991 unsigned int status; 992 struct net_device *ndev = _ndev; 993 struct axienet_local *lp = netdev_priv(ndev); 994 995 status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 996 if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) { 997 axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status); 998 axienet_recv(lp->ndev); 999 goto out; 1000 } 1001 if (!(status & XAXIDMA_IRQ_ALL_MASK)) 1002 return IRQ_NONE; 1003 if (status & XAXIDMA_IRQ_ERROR_MASK) { 1004 dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status); 1005 dev_err(&ndev->dev, "Current BD is at: 0x%x%08x\n", 1006 (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb, 1007 (lp->rx_bd_v[lp->rx_bd_ci]).phys); 1008 1009 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 1010 /* Disable coalesce, delay timer and error interrupts */ 1011 cr &= (~XAXIDMA_IRQ_ALL_MASK); 1012 /* Finally write to the Tx channel control register */ 1013 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 1014 1015 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 1016 /* Disable coalesce, delay timer and error interrupts */ 1017 cr &= (~XAXIDMA_IRQ_ALL_MASK); 1018 /* write to the Rx channel control register */ 1019 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 1020 1021 schedule_work(&lp->dma_err_task); 1022 axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status); 1023 } 1024 out: 1025 return IRQ_HANDLED; 1026 } 1027 1028 /** 1029 * axienet_eth_irq - Ethernet core Isr. 1030 * @irq: irq number 1031 * @_ndev: net_device pointer 1032 * 1033 * Return: IRQ_HANDLED if device generated a core interrupt, IRQ_NONE otherwise. 1034 * 1035 * Handle miscellaneous conditions indicated by Ethernet core IRQ. 1036 */ 1037 static irqreturn_t axienet_eth_irq(int irq, void *_ndev) 1038 { 1039 struct net_device *ndev = _ndev; 1040 struct axienet_local *lp = netdev_priv(ndev); 1041 unsigned int pending; 1042 1043 pending = axienet_ior(lp, XAE_IP_OFFSET); 1044 if (!pending) 1045 return IRQ_NONE; 1046 1047 if (pending & XAE_INT_RXFIFOOVR_MASK) 1048 ndev->stats.rx_missed_errors++; 1049 1050 if (pending & XAE_INT_RXRJECT_MASK) 1051 ndev->stats.rx_frame_errors++; 1052 1053 axienet_iow(lp, XAE_IS_OFFSET, pending); 1054 return IRQ_HANDLED; 1055 } 1056 1057 static void axienet_dma_err_handler(struct work_struct *work); 1058 1059 /** 1060 * axienet_open - Driver open routine. 1061 * @ndev: Pointer to net_device structure 1062 * 1063 * Return: 0, on success. 1064 * non-zero error value on failure 1065 * 1066 * This is the driver open routine. It calls phylink_start to start the 1067 * PHY device. 1068 * It also allocates interrupt service routines, enables the interrupt lines 1069 * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer 1070 * descriptors are initialized. 1071 */ 1072 static int axienet_open(struct net_device *ndev) 1073 { 1074 int ret; 1075 struct axienet_local *lp = netdev_priv(ndev); 1076 1077 dev_dbg(&ndev->dev, "axienet_open()\n"); 1078 1079 /* When we do an Axi Ethernet reset, it resets the complete core 1080 * including the MDIO. MDIO must be disabled before resetting. 1081 * Hold MDIO bus lock to avoid MDIO accesses during the reset. 1082 */ 1083 axienet_lock_mii(lp); 1084 ret = axienet_device_reset(ndev); 1085 axienet_unlock_mii(lp); 1086 1087 ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0); 1088 if (ret) { 1089 dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret); 1090 return ret; 1091 } 1092 1093 phylink_start(lp->phylink); 1094 1095 /* Enable worker thread for Axi DMA error handling */ 1096 INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler); 1097 1098 /* Enable interrupts for Axi DMA Tx */ 1099 ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED, 1100 ndev->name, ndev); 1101 if (ret) 1102 goto err_tx_irq; 1103 /* Enable interrupts for Axi DMA Rx */ 1104 ret = request_irq(lp->rx_irq, axienet_rx_irq, IRQF_SHARED, 1105 ndev->name, ndev); 1106 if (ret) 1107 goto err_rx_irq; 1108 /* Enable interrupts for Axi Ethernet core (if defined) */ 1109 if (lp->eth_irq > 0) { 1110 ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED, 1111 ndev->name, ndev); 1112 if (ret) 1113 goto err_eth_irq; 1114 } 1115 1116 return 0; 1117 1118 err_eth_irq: 1119 free_irq(lp->rx_irq, ndev); 1120 err_rx_irq: 1121 free_irq(lp->tx_irq, ndev); 1122 err_tx_irq: 1123 phylink_stop(lp->phylink); 1124 phylink_disconnect_phy(lp->phylink); 1125 cancel_work_sync(&lp->dma_err_task); 1126 dev_err(lp->dev, "request_irq() failed\n"); 1127 return ret; 1128 } 1129 1130 /** 1131 * axienet_stop - Driver stop routine. 1132 * @ndev: Pointer to net_device structure 1133 * 1134 * Return: 0, on success. 1135 * 1136 * This is the driver stop routine. It calls phylink_disconnect to stop the PHY 1137 * device. It also removes the interrupt handlers and disables the interrupts. 1138 * The Axi DMA Tx/Rx BDs are released. 1139 */ 1140 static int axienet_stop(struct net_device *ndev) 1141 { 1142 u32 cr, sr; 1143 int count; 1144 struct axienet_local *lp = netdev_priv(ndev); 1145 1146 dev_dbg(&ndev->dev, "axienet_close()\n"); 1147 1148 phylink_stop(lp->phylink); 1149 phylink_disconnect_phy(lp->phylink); 1150 1151 axienet_setoptions(ndev, lp->options & 1152 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 1153 1154 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 1155 cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK); 1156 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 1157 1158 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 1159 cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK); 1160 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 1161 1162 axienet_iow(lp, XAE_IE_OFFSET, 0); 1163 1164 /* Give DMAs a chance to halt gracefully */ 1165 sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 1166 for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) { 1167 msleep(20); 1168 sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 1169 } 1170 1171 sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 1172 for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) { 1173 msleep(20); 1174 sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 1175 } 1176 1177 /* Do a reset to ensure DMA is really stopped */ 1178 axienet_lock_mii(lp); 1179 __axienet_device_reset(lp); 1180 axienet_unlock_mii(lp); 1181 1182 cancel_work_sync(&lp->dma_err_task); 1183 1184 if (lp->eth_irq > 0) 1185 free_irq(lp->eth_irq, ndev); 1186 free_irq(lp->tx_irq, ndev); 1187 free_irq(lp->rx_irq, ndev); 1188 1189 axienet_dma_bd_release(ndev); 1190 return 0; 1191 } 1192 1193 /** 1194 * axienet_change_mtu - Driver change mtu routine. 1195 * @ndev: Pointer to net_device structure 1196 * @new_mtu: New mtu value to be applied 1197 * 1198 * Return: Always returns 0 (success). 1199 * 1200 * This is the change mtu driver routine. It checks if the Axi Ethernet 1201 * hardware supports jumbo frames before changing the mtu. This can be 1202 * called only when the device is not up. 1203 */ 1204 static int axienet_change_mtu(struct net_device *ndev, int new_mtu) 1205 { 1206 struct axienet_local *lp = netdev_priv(ndev); 1207 1208 if (netif_running(ndev)) 1209 return -EBUSY; 1210 1211 if ((new_mtu + VLAN_ETH_HLEN + 1212 XAE_TRL_SIZE) > lp->rxmem) 1213 return -EINVAL; 1214 1215 ndev->mtu = new_mtu; 1216 1217 return 0; 1218 } 1219 1220 #ifdef CONFIG_NET_POLL_CONTROLLER 1221 /** 1222 * axienet_poll_controller - Axi Ethernet poll mechanism. 1223 * @ndev: Pointer to net_device structure 1224 * 1225 * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior 1226 * to polling the ISRs and are enabled back after the polling is done. 1227 */ 1228 static void axienet_poll_controller(struct net_device *ndev) 1229 { 1230 struct axienet_local *lp = netdev_priv(ndev); 1231 disable_irq(lp->tx_irq); 1232 disable_irq(lp->rx_irq); 1233 axienet_rx_irq(lp->tx_irq, ndev); 1234 axienet_tx_irq(lp->rx_irq, ndev); 1235 enable_irq(lp->tx_irq); 1236 enable_irq(lp->rx_irq); 1237 } 1238 #endif 1239 1240 static int axienet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1241 { 1242 struct axienet_local *lp = netdev_priv(dev); 1243 1244 if (!netif_running(dev)) 1245 return -EINVAL; 1246 1247 return phylink_mii_ioctl(lp->phylink, rq, cmd); 1248 } 1249 1250 static const struct net_device_ops axienet_netdev_ops = { 1251 .ndo_open = axienet_open, 1252 .ndo_stop = axienet_stop, 1253 .ndo_start_xmit = axienet_start_xmit, 1254 .ndo_change_mtu = axienet_change_mtu, 1255 .ndo_set_mac_address = netdev_set_mac_address, 1256 .ndo_validate_addr = eth_validate_addr, 1257 .ndo_eth_ioctl = axienet_ioctl, 1258 .ndo_set_rx_mode = axienet_set_multicast_list, 1259 #ifdef CONFIG_NET_POLL_CONTROLLER 1260 .ndo_poll_controller = axienet_poll_controller, 1261 #endif 1262 }; 1263 1264 /** 1265 * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information. 1266 * @ndev: Pointer to net_device structure 1267 * @ed: Pointer to ethtool_drvinfo structure 1268 * 1269 * This implements ethtool command for getting the driver information. 1270 * Issue "ethtool -i ethX" under linux prompt to execute this function. 1271 */ 1272 static void axienet_ethtools_get_drvinfo(struct net_device *ndev, 1273 struct ethtool_drvinfo *ed) 1274 { 1275 strlcpy(ed->driver, DRIVER_NAME, sizeof(ed->driver)); 1276 strlcpy(ed->version, DRIVER_VERSION, sizeof(ed->version)); 1277 } 1278 1279 /** 1280 * axienet_ethtools_get_regs_len - Get the total regs length present in the 1281 * AxiEthernet core. 1282 * @ndev: Pointer to net_device structure 1283 * 1284 * This implements ethtool command for getting the total register length 1285 * information. 1286 * 1287 * Return: the total regs length 1288 */ 1289 static int axienet_ethtools_get_regs_len(struct net_device *ndev) 1290 { 1291 return sizeof(u32) * AXIENET_REGS_N; 1292 } 1293 1294 /** 1295 * axienet_ethtools_get_regs - Dump the contents of all registers present 1296 * in AxiEthernet core. 1297 * @ndev: Pointer to net_device structure 1298 * @regs: Pointer to ethtool_regs structure 1299 * @ret: Void pointer used to return the contents of the registers. 1300 * 1301 * This implements ethtool command for getting the Axi Ethernet register dump. 1302 * Issue "ethtool -d ethX" to execute this function. 1303 */ 1304 static void axienet_ethtools_get_regs(struct net_device *ndev, 1305 struct ethtool_regs *regs, void *ret) 1306 { 1307 u32 *data = (u32 *) ret; 1308 size_t len = sizeof(u32) * AXIENET_REGS_N; 1309 struct axienet_local *lp = netdev_priv(ndev); 1310 1311 regs->version = 0; 1312 regs->len = len; 1313 1314 memset(data, 0, len); 1315 data[0] = axienet_ior(lp, XAE_RAF_OFFSET); 1316 data[1] = axienet_ior(lp, XAE_TPF_OFFSET); 1317 data[2] = axienet_ior(lp, XAE_IFGP_OFFSET); 1318 data[3] = axienet_ior(lp, XAE_IS_OFFSET); 1319 data[4] = axienet_ior(lp, XAE_IP_OFFSET); 1320 data[5] = axienet_ior(lp, XAE_IE_OFFSET); 1321 data[6] = axienet_ior(lp, XAE_TTAG_OFFSET); 1322 data[7] = axienet_ior(lp, XAE_RTAG_OFFSET); 1323 data[8] = axienet_ior(lp, XAE_UAWL_OFFSET); 1324 data[9] = axienet_ior(lp, XAE_UAWU_OFFSET); 1325 data[10] = axienet_ior(lp, XAE_TPID0_OFFSET); 1326 data[11] = axienet_ior(lp, XAE_TPID1_OFFSET); 1327 data[12] = axienet_ior(lp, XAE_PPST_OFFSET); 1328 data[13] = axienet_ior(lp, XAE_RCW0_OFFSET); 1329 data[14] = axienet_ior(lp, XAE_RCW1_OFFSET); 1330 data[15] = axienet_ior(lp, XAE_TC_OFFSET); 1331 data[16] = axienet_ior(lp, XAE_FCC_OFFSET); 1332 data[17] = axienet_ior(lp, XAE_EMMC_OFFSET); 1333 data[18] = axienet_ior(lp, XAE_PHYC_OFFSET); 1334 data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET); 1335 data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET); 1336 data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET); 1337 data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET); 1338 data[27] = axienet_ior(lp, XAE_UAW0_OFFSET); 1339 data[28] = axienet_ior(lp, XAE_UAW1_OFFSET); 1340 data[29] = axienet_ior(lp, XAE_FMI_OFFSET); 1341 data[30] = axienet_ior(lp, XAE_AF0_OFFSET); 1342 data[31] = axienet_ior(lp, XAE_AF1_OFFSET); 1343 data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 1344 data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 1345 data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET); 1346 data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET); 1347 data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 1348 data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 1349 data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET); 1350 data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET); 1351 } 1352 1353 static void 1354 axienet_ethtools_get_ringparam(struct net_device *ndev, 1355 struct ethtool_ringparam *ering, 1356 struct kernel_ethtool_ringparam *kernel_ering, 1357 struct netlink_ext_ack *extack) 1358 { 1359 struct axienet_local *lp = netdev_priv(ndev); 1360 1361 ering->rx_max_pending = RX_BD_NUM_MAX; 1362 ering->rx_mini_max_pending = 0; 1363 ering->rx_jumbo_max_pending = 0; 1364 ering->tx_max_pending = TX_BD_NUM_MAX; 1365 ering->rx_pending = lp->rx_bd_num; 1366 ering->rx_mini_pending = 0; 1367 ering->rx_jumbo_pending = 0; 1368 ering->tx_pending = lp->tx_bd_num; 1369 } 1370 1371 static int 1372 axienet_ethtools_set_ringparam(struct net_device *ndev, 1373 struct ethtool_ringparam *ering, 1374 struct kernel_ethtool_ringparam *kernel_ering, 1375 struct netlink_ext_ack *extack) 1376 { 1377 struct axienet_local *lp = netdev_priv(ndev); 1378 1379 if (ering->rx_pending > RX_BD_NUM_MAX || 1380 ering->rx_mini_pending || 1381 ering->rx_jumbo_pending || 1382 ering->tx_pending < TX_BD_NUM_MIN || 1383 ering->tx_pending > TX_BD_NUM_MAX) 1384 return -EINVAL; 1385 1386 if (netif_running(ndev)) 1387 return -EBUSY; 1388 1389 lp->rx_bd_num = ering->rx_pending; 1390 lp->tx_bd_num = ering->tx_pending; 1391 return 0; 1392 } 1393 1394 /** 1395 * axienet_ethtools_get_pauseparam - Get the pause parameter setting for 1396 * Tx and Rx paths. 1397 * @ndev: Pointer to net_device structure 1398 * @epauseparm: Pointer to ethtool_pauseparam structure. 1399 * 1400 * This implements ethtool command for getting axi ethernet pause frame 1401 * setting. Issue "ethtool -a ethX" to execute this function. 1402 */ 1403 static void 1404 axienet_ethtools_get_pauseparam(struct net_device *ndev, 1405 struct ethtool_pauseparam *epauseparm) 1406 { 1407 struct axienet_local *lp = netdev_priv(ndev); 1408 1409 phylink_ethtool_get_pauseparam(lp->phylink, epauseparm); 1410 } 1411 1412 /** 1413 * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control) 1414 * settings. 1415 * @ndev: Pointer to net_device structure 1416 * @epauseparm:Pointer to ethtool_pauseparam structure 1417 * 1418 * This implements ethtool command for enabling flow control on Rx and Tx 1419 * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this 1420 * function. 1421 * 1422 * Return: 0 on success, -EFAULT if device is running 1423 */ 1424 static int 1425 axienet_ethtools_set_pauseparam(struct net_device *ndev, 1426 struct ethtool_pauseparam *epauseparm) 1427 { 1428 struct axienet_local *lp = netdev_priv(ndev); 1429 1430 return phylink_ethtool_set_pauseparam(lp->phylink, epauseparm); 1431 } 1432 1433 /** 1434 * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count. 1435 * @ndev: Pointer to net_device structure 1436 * @ecoalesce: Pointer to ethtool_coalesce structure 1437 * @kernel_coal: ethtool CQE mode setting structure 1438 * @extack: extack for reporting error messages 1439 * 1440 * This implements ethtool command for getting the DMA interrupt coalescing 1441 * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to 1442 * execute this function. 1443 * 1444 * Return: 0 always 1445 */ 1446 static int 1447 axienet_ethtools_get_coalesce(struct net_device *ndev, 1448 struct ethtool_coalesce *ecoalesce, 1449 struct kernel_ethtool_coalesce *kernel_coal, 1450 struct netlink_ext_ack *extack) 1451 { 1452 u32 regval = 0; 1453 struct axienet_local *lp = netdev_priv(ndev); 1454 regval = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 1455 ecoalesce->rx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK) 1456 >> XAXIDMA_COALESCE_SHIFT; 1457 regval = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 1458 ecoalesce->tx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK) 1459 >> XAXIDMA_COALESCE_SHIFT; 1460 return 0; 1461 } 1462 1463 /** 1464 * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count. 1465 * @ndev: Pointer to net_device structure 1466 * @ecoalesce: Pointer to ethtool_coalesce structure 1467 * @kernel_coal: ethtool CQE mode setting structure 1468 * @extack: extack for reporting error messages 1469 * 1470 * This implements ethtool command for setting the DMA interrupt coalescing 1471 * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux 1472 * prompt to execute this function. 1473 * 1474 * Return: 0, on success, Non-zero error value on failure. 1475 */ 1476 static int 1477 axienet_ethtools_set_coalesce(struct net_device *ndev, 1478 struct ethtool_coalesce *ecoalesce, 1479 struct kernel_ethtool_coalesce *kernel_coal, 1480 struct netlink_ext_ack *extack) 1481 { 1482 struct axienet_local *lp = netdev_priv(ndev); 1483 1484 if (netif_running(ndev)) { 1485 netdev_err(ndev, 1486 "Please stop netif before applying configuration\n"); 1487 return -EFAULT; 1488 } 1489 1490 if (ecoalesce->rx_max_coalesced_frames) 1491 lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames; 1492 if (ecoalesce->tx_max_coalesced_frames) 1493 lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames; 1494 1495 return 0; 1496 } 1497 1498 static int 1499 axienet_ethtools_get_link_ksettings(struct net_device *ndev, 1500 struct ethtool_link_ksettings *cmd) 1501 { 1502 struct axienet_local *lp = netdev_priv(ndev); 1503 1504 return phylink_ethtool_ksettings_get(lp->phylink, cmd); 1505 } 1506 1507 static int 1508 axienet_ethtools_set_link_ksettings(struct net_device *ndev, 1509 const struct ethtool_link_ksettings *cmd) 1510 { 1511 struct axienet_local *lp = netdev_priv(ndev); 1512 1513 return phylink_ethtool_ksettings_set(lp->phylink, cmd); 1514 } 1515 1516 static int axienet_ethtools_nway_reset(struct net_device *dev) 1517 { 1518 struct axienet_local *lp = netdev_priv(dev); 1519 1520 return phylink_ethtool_nway_reset(lp->phylink); 1521 } 1522 1523 static const struct ethtool_ops axienet_ethtool_ops = { 1524 .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES, 1525 .get_drvinfo = axienet_ethtools_get_drvinfo, 1526 .get_regs_len = axienet_ethtools_get_regs_len, 1527 .get_regs = axienet_ethtools_get_regs, 1528 .get_link = ethtool_op_get_link, 1529 .get_ringparam = axienet_ethtools_get_ringparam, 1530 .set_ringparam = axienet_ethtools_set_ringparam, 1531 .get_pauseparam = axienet_ethtools_get_pauseparam, 1532 .set_pauseparam = axienet_ethtools_set_pauseparam, 1533 .get_coalesce = axienet_ethtools_get_coalesce, 1534 .set_coalesce = axienet_ethtools_set_coalesce, 1535 .get_link_ksettings = axienet_ethtools_get_link_ksettings, 1536 .set_link_ksettings = axienet_ethtools_set_link_ksettings, 1537 .nway_reset = axienet_ethtools_nway_reset, 1538 }; 1539 1540 static void axienet_mac_pcs_get_state(struct phylink_config *config, 1541 struct phylink_link_state *state) 1542 { 1543 struct net_device *ndev = to_net_dev(config->dev); 1544 struct axienet_local *lp = netdev_priv(ndev); 1545 1546 switch (state->interface) { 1547 case PHY_INTERFACE_MODE_SGMII: 1548 case PHY_INTERFACE_MODE_1000BASEX: 1549 phylink_mii_c22_pcs_get_state(lp->pcs_phy, state); 1550 break; 1551 default: 1552 break; 1553 } 1554 } 1555 1556 static void axienet_mac_an_restart(struct phylink_config *config) 1557 { 1558 struct net_device *ndev = to_net_dev(config->dev); 1559 struct axienet_local *lp = netdev_priv(ndev); 1560 1561 phylink_mii_c22_pcs_an_restart(lp->pcs_phy); 1562 } 1563 1564 static int axienet_mac_prepare(struct phylink_config *config, unsigned int mode, 1565 phy_interface_t iface) 1566 { 1567 struct net_device *ndev = to_net_dev(config->dev); 1568 struct axienet_local *lp = netdev_priv(ndev); 1569 int ret; 1570 1571 switch (iface) { 1572 case PHY_INTERFACE_MODE_SGMII: 1573 case PHY_INTERFACE_MODE_1000BASEX: 1574 if (!lp->switch_x_sgmii) 1575 return 0; 1576 1577 ret = mdiobus_write(lp->pcs_phy->bus, 1578 lp->pcs_phy->addr, 1579 XLNX_MII_STD_SELECT_REG, 1580 iface == PHY_INTERFACE_MODE_SGMII ? 1581 XLNX_MII_STD_SELECT_SGMII : 0); 1582 if (ret < 0) 1583 netdev_warn(ndev, "Failed to switch PHY interface: %d\n", 1584 ret); 1585 return ret; 1586 default: 1587 return 0; 1588 } 1589 } 1590 1591 static void axienet_mac_config(struct phylink_config *config, unsigned int mode, 1592 const struct phylink_link_state *state) 1593 { 1594 struct net_device *ndev = to_net_dev(config->dev); 1595 struct axienet_local *lp = netdev_priv(ndev); 1596 int ret; 1597 1598 switch (state->interface) { 1599 case PHY_INTERFACE_MODE_SGMII: 1600 case PHY_INTERFACE_MODE_1000BASEX: 1601 ret = phylink_mii_c22_pcs_config(lp->pcs_phy, mode, 1602 state->interface, 1603 state->advertising); 1604 if (ret < 0) 1605 netdev_warn(ndev, "Failed to configure PCS: %d\n", 1606 ret); 1607 break; 1608 1609 default: 1610 break; 1611 } 1612 } 1613 1614 static void axienet_mac_link_down(struct phylink_config *config, 1615 unsigned int mode, 1616 phy_interface_t interface) 1617 { 1618 /* nothing meaningful to do */ 1619 } 1620 1621 static void axienet_mac_link_up(struct phylink_config *config, 1622 struct phy_device *phy, 1623 unsigned int mode, phy_interface_t interface, 1624 int speed, int duplex, 1625 bool tx_pause, bool rx_pause) 1626 { 1627 struct net_device *ndev = to_net_dev(config->dev); 1628 struct axienet_local *lp = netdev_priv(ndev); 1629 u32 emmc_reg, fcc_reg; 1630 1631 emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET); 1632 emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK; 1633 1634 switch (speed) { 1635 case SPEED_1000: 1636 emmc_reg |= XAE_EMMC_LINKSPD_1000; 1637 break; 1638 case SPEED_100: 1639 emmc_reg |= XAE_EMMC_LINKSPD_100; 1640 break; 1641 case SPEED_10: 1642 emmc_reg |= XAE_EMMC_LINKSPD_10; 1643 break; 1644 default: 1645 dev_err(&ndev->dev, 1646 "Speed other than 10, 100 or 1Gbps is not supported\n"); 1647 break; 1648 } 1649 1650 axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg); 1651 1652 fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET); 1653 if (tx_pause) 1654 fcc_reg |= XAE_FCC_FCTX_MASK; 1655 else 1656 fcc_reg &= ~XAE_FCC_FCTX_MASK; 1657 if (rx_pause) 1658 fcc_reg |= XAE_FCC_FCRX_MASK; 1659 else 1660 fcc_reg &= ~XAE_FCC_FCRX_MASK; 1661 axienet_iow(lp, XAE_FCC_OFFSET, fcc_reg); 1662 } 1663 1664 static const struct phylink_mac_ops axienet_phylink_ops = { 1665 .validate = phylink_generic_validate, 1666 .mac_pcs_get_state = axienet_mac_pcs_get_state, 1667 .mac_an_restart = axienet_mac_an_restart, 1668 .mac_prepare = axienet_mac_prepare, 1669 .mac_config = axienet_mac_config, 1670 .mac_link_down = axienet_mac_link_down, 1671 .mac_link_up = axienet_mac_link_up, 1672 }; 1673 1674 /** 1675 * axienet_dma_err_handler - Work queue task for Axi DMA Error 1676 * @work: pointer to work_struct 1677 * 1678 * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the 1679 * Tx/Rx BDs. 1680 */ 1681 static void axienet_dma_err_handler(struct work_struct *work) 1682 { 1683 u32 axienet_status; 1684 u32 cr, i; 1685 struct axienet_local *lp = container_of(work, struct axienet_local, 1686 dma_err_task); 1687 struct net_device *ndev = lp->ndev; 1688 struct axidma_bd *cur_p; 1689 1690 axienet_setoptions(ndev, lp->options & 1691 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 1692 /* When we do an Axi Ethernet reset, it resets the complete core 1693 * including the MDIO. MDIO must be disabled before resetting. 1694 * Hold MDIO bus lock to avoid MDIO accesses during the reset. 1695 */ 1696 axienet_lock_mii(lp); 1697 __axienet_device_reset(lp); 1698 axienet_unlock_mii(lp); 1699 1700 for (i = 0; i < lp->tx_bd_num; i++) { 1701 cur_p = &lp->tx_bd_v[i]; 1702 if (cur_p->cntrl) { 1703 dma_addr_t addr = desc_get_phys_addr(lp, cur_p); 1704 1705 dma_unmap_single(ndev->dev.parent, addr, 1706 (cur_p->cntrl & 1707 XAXIDMA_BD_CTRL_LENGTH_MASK), 1708 DMA_TO_DEVICE); 1709 } 1710 if (cur_p->skb) 1711 dev_kfree_skb_irq(cur_p->skb); 1712 cur_p->phys = 0; 1713 cur_p->phys_msb = 0; 1714 cur_p->cntrl = 0; 1715 cur_p->status = 0; 1716 cur_p->app0 = 0; 1717 cur_p->app1 = 0; 1718 cur_p->app2 = 0; 1719 cur_p->app3 = 0; 1720 cur_p->app4 = 0; 1721 cur_p->skb = NULL; 1722 } 1723 1724 for (i = 0; i < lp->rx_bd_num; i++) { 1725 cur_p = &lp->rx_bd_v[i]; 1726 cur_p->status = 0; 1727 cur_p->app0 = 0; 1728 cur_p->app1 = 0; 1729 cur_p->app2 = 0; 1730 cur_p->app3 = 0; 1731 cur_p->app4 = 0; 1732 } 1733 1734 lp->tx_bd_ci = 0; 1735 lp->tx_bd_tail = 0; 1736 lp->rx_bd_ci = 0; 1737 1738 /* Start updating the Rx channel control register */ 1739 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 1740 /* Update the interrupt coalesce count */ 1741 cr = ((cr & ~XAXIDMA_COALESCE_MASK) | 1742 (XAXIDMA_DFT_RX_THRESHOLD << XAXIDMA_COALESCE_SHIFT)); 1743 /* Update the delay timer count */ 1744 cr = ((cr & ~XAXIDMA_DELAY_MASK) | 1745 (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); 1746 /* Enable coalesce, delay timer and error interrupts */ 1747 cr |= XAXIDMA_IRQ_ALL_MASK; 1748 /* Finally write to the Rx channel control register */ 1749 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 1750 1751 /* Start updating the Tx channel control register */ 1752 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 1753 /* Update the interrupt coalesce count */ 1754 cr = (((cr & ~XAXIDMA_COALESCE_MASK)) | 1755 (XAXIDMA_DFT_TX_THRESHOLD << XAXIDMA_COALESCE_SHIFT)); 1756 /* Update the delay timer count */ 1757 cr = (((cr & ~XAXIDMA_DELAY_MASK)) | 1758 (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); 1759 /* Enable coalesce, delay timer and error interrupts */ 1760 cr |= XAXIDMA_IRQ_ALL_MASK; 1761 /* Finally write to the Tx channel control register */ 1762 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 1763 1764 /* Populate the tail pointer and bring the Rx Axi DMA engine out of 1765 * halted state. This will make the Rx side ready for reception. 1766 */ 1767 axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); 1768 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 1769 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, 1770 cr | XAXIDMA_CR_RUNSTOP_MASK); 1771 axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + 1772 (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1))); 1773 1774 /* Write to the RS (Run-stop) bit in the Tx channel control register. 1775 * Tx channel is now ready to run. But only after we write to the 1776 * tail pointer register that the Tx channel will start transmitting 1777 */ 1778 axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); 1779 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 1780 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, 1781 cr | XAXIDMA_CR_RUNSTOP_MASK); 1782 1783 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); 1784 axienet_status &= ~XAE_RCW1_RX_MASK; 1785 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status); 1786 1787 axienet_status = axienet_ior(lp, XAE_IP_OFFSET); 1788 if (axienet_status & XAE_INT_RXRJECT_MASK) 1789 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK); 1790 axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ? 1791 XAE_INT_RECV_ERROR_MASK : 0); 1792 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK); 1793 1794 /* Sync default options with HW but leave receiver and 1795 * transmitter disabled. 1796 */ 1797 axienet_setoptions(ndev, lp->options & 1798 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 1799 axienet_set_mac_address(ndev, NULL); 1800 axienet_set_multicast_list(ndev); 1801 axienet_setoptions(ndev, lp->options); 1802 } 1803 1804 /** 1805 * axienet_probe - Axi Ethernet probe function. 1806 * @pdev: Pointer to platform device structure. 1807 * 1808 * Return: 0, on success 1809 * Non-zero error value on failure. 1810 * 1811 * This is the probe routine for Axi Ethernet driver. This is called before 1812 * any other driver routines are invoked. It allocates and sets up the Ethernet 1813 * device. Parses through device tree and populates fields of 1814 * axienet_local. It registers the Ethernet device. 1815 */ 1816 static int axienet_probe(struct platform_device *pdev) 1817 { 1818 int ret; 1819 struct device_node *np; 1820 struct axienet_local *lp; 1821 struct net_device *ndev; 1822 struct resource *ethres; 1823 u8 mac_addr[ETH_ALEN]; 1824 int addr_width = 32; 1825 u32 value; 1826 1827 ndev = alloc_etherdev(sizeof(*lp)); 1828 if (!ndev) 1829 return -ENOMEM; 1830 1831 platform_set_drvdata(pdev, ndev); 1832 1833 SET_NETDEV_DEV(ndev, &pdev->dev); 1834 ndev->flags &= ~IFF_MULTICAST; /* clear multicast */ 1835 ndev->features = NETIF_F_SG; 1836 ndev->netdev_ops = &axienet_netdev_ops; 1837 ndev->ethtool_ops = &axienet_ethtool_ops; 1838 1839 /* MTU range: 64 - 9000 */ 1840 ndev->min_mtu = 64; 1841 ndev->max_mtu = XAE_JUMBO_MTU; 1842 1843 lp = netdev_priv(ndev); 1844 lp->ndev = ndev; 1845 lp->dev = &pdev->dev; 1846 lp->options = XAE_OPTION_DEFAULTS; 1847 lp->rx_bd_num = RX_BD_NUM_DEFAULT; 1848 lp->tx_bd_num = TX_BD_NUM_DEFAULT; 1849 1850 lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk"); 1851 if (!lp->axi_clk) { 1852 /* For backward compatibility, if named AXI clock is not present, 1853 * treat the first clock specified as the AXI clock. 1854 */ 1855 lp->axi_clk = devm_clk_get_optional(&pdev->dev, NULL); 1856 } 1857 if (IS_ERR(lp->axi_clk)) { 1858 ret = PTR_ERR(lp->axi_clk); 1859 goto free_netdev; 1860 } 1861 ret = clk_prepare_enable(lp->axi_clk); 1862 if (ret) { 1863 dev_err(&pdev->dev, "Unable to enable AXI clock: %d\n", ret); 1864 goto free_netdev; 1865 } 1866 1867 lp->misc_clks[0].id = "axis_clk"; 1868 lp->misc_clks[1].id = "ref_clk"; 1869 lp->misc_clks[2].id = "mgt_clk"; 1870 1871 ret = devm_clk_bulk_get_optional(&pdev->dev, XAE_NUM_MISC_CLOCKS, lp->misc_clks); 1872 if (ret) 1873 goto cleanup_clk; 1874 1875 ret = clk_bulk_prepare_enable(XAE_NUM_MISC_CLOCKS, lp->misc_clks); 1876 if (ret) 1877 goto cleanup_clk; 1878 1879 /* Map device registers */ 1880 lp->regs = devm_platform_get_and_ioremap_resource(pdev, 0, ðres); 1881 if (IS_ERR(lp->regs)) { 1882 ret = PTR_ERR(lp->regs); 1883 goto cleanup_clk; 1884 } 1885 lp->regs_start = ethres->start; 1886 1887 /* Setup checksum offload, but default to off if not specified */ 1888 lp->features = 0; 1889 1890 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,txcsum", &value); 1891 if (!ret) { 1892 switch (value) { 1893 case 1: 1894 lp->csum_offload_on_tx_path = 1895 XAE_FEATURE_PARTIAL_TX_CSUM; 1896 lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM; 1897 /* Can checksum TCP/UDP over IPv4. */ 1898 ndev->features |= NETIF_F_IP_CSUM; 1899 break; 1900 case 2: 1901 lp->csum_offload_on_tx_path = 1902 XAE_FEATURE_FULL_TX_CSUM; 1903 lp->features |= XAE_FEATURE_FULL_TX_CSUM; 1904 /* Can checksum TCP/UDP over IPv4. */ 1905 ndev->features |= NETIF_F_IP_CSUM; 1906 break; 1907 default: 1908 lp->csum_offload_on_tx_path = XAE_NO_CSUM_OFFLOAD; 1909 } 1910 } 1911 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value); 1912 if (!ret) { 1913 switch (value) { 1914 case 1: 1915 lp->csum_offload_on_rx_path = 1916 XAE_FEATURE_PARTIAL_RX_CSUM; 1917 lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM; 1918 break; 1919 case 2: 1920 lp->csum_offload_on_rx_path = 1921 XAE_FEATURE_FULL_RX_CSUM; 1922 lp->features |= XAE_FEATURE_FULL_RX_CSUM; 1923 break; 1924 default: 1925 lp->csum_offload_on_rx_path = XAE_NO_CSUM_OFFLOAD; 1926 } 1927 } 1928 /* For supporting jumbo frames, the Axi Ethernet hardware must have 1929 * a larger Rx/Tx Memory. Typically, the size must be large so that 1930 * we can enable jumbo option and start supporting jumbo frames. 1931 * Here we check for memory allocated for Rx/Tx in the hardware from 1932 * the device-tree and accordingly set flags. 1933 */ 1934 of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem); 1935 1936 lp->switch_x_sgmii = of_property_read_bool(pdev->dev.of_node, 1937 "xlnx,switch-x-sgmii"); 1938 1939 /* Start with the proprietary, and broken phy_type */ 1940 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &value); 1941 if (!ret) { 1942 netdev_warn(ndev, "Please upgrade your device tree binary blob to use phy-mode"); 1943 switch (value) { 1944 case XAE_PHY_TYPE_MII: 1945 lp->phy_mode = PHY_INTERFACE_MODE_MII; 1946 break; 1947 case XAE_PHY_TYPE_GMII: 1948 lp->phy_mode = PHY_INTERFACE_MODE_GMII; 1949 break; 1950 case XAE_PHY_TYPE_RGMII_2_0: 1951 lp->phy_mode = PHY_INTERFACE_MODE_RGMII_ID; 1952 break; 1953 case XAE_PHY_TYPE_SGMII: 1954 lp->phy_mode = PHY_INTERFACE_MODE_SGMII; 1955 break; 1956 case XAE_PHY_TYPE_1000BASE_X: 1957 lp->phy_mode = PHY_INTERFACE_MODE_1000BASEX; 1958 break; 1959 default: 1960 ret = -EINVAL; 1961 goto cleanup_clk; 1962 } 1963 } else { 1964 ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode); 1965 if (ret) 1966 goto cleanup_clk; 1967 } 1968 if (lp->switch_x_sgmii && lp->phy_mode != PHY_INTERFACE_MODE_SGMII && 1969 lp->phy_mode != PHY_INTERFACE_MODE_1000BASEX) { 1970 dev_err(&pdev->dev, "xlnx,switch-x-sgmii only supported with SGMII or 1000BaseX\n"); 1971 ret = -EINVAL; 1972 goto cleanup_clk; 1973 } 1974 1975 /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */ 1976 np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0); 1977 if (np) { 1978 struct resource dmares; 1979 1980 ret = of_address_to_resource(np, 0, &dmares); 1981 if (ret) { 1982 dev_err(&pdev->dev, 1983 "unable to get DMA resource\n"); 1984 of_node_put(np); 1985 goto cleanup_clk; 1986 } 1987 lp->dma_regs = devm_ioremap_resource(&pdev->dev, 1988 &dmares); 1989 lp->rx_irq = irq_of_parse_and_map(np, 1); 1990 lp->tx_irq = irq_of_parse_and_map(np, 0); 1991 of_node_put(np); 1992 lp->eth_irq = platform_get_irq_optional(pdev, 0); 1993 } else { 1994 /* Check for these resources directly on the Ethernet node. */ 1995 lp->dma_regs = devm_platform_get_and_ioremap_resource(pdev, 1, NULL); 1996 lp->rx_irq = platform_get_irq(pdev, 1); 1997 lp->tx_irq = platform_get_irq(pdev, 0); 1998 lp->eth_irq = platform_get_irq_optional(pdev, 2); 1999 } 2000 if (IS_ERR(lp->dma_regs)) { 2001 dev_err(&pdev->dev, "could not map DMA regs\n"); 2002 ret = PTR_ERR(lp->dma_regs); 2003 goto cleanup_clk; 2004 } 2005 if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) { 2006 dev_err(&pdev->dev, "could not determine irqs\n"); 2007 ret = -ENOMEM; 2008 goto cleanup_clk; 2009 } 2010 2011 /* Autodetect the need for 64-bit DMA pointers. 2012 * When the IP is configured for a bus width bigger than 32 bits, 2013 * writing the MSB registers is mandatory, even if they are all 0. 2014 * We can detect this case by writing all 1's to one such register 2015 * and see if that sticks: when the IP is configured for 32 bits 2016 * only, those registers are RES0. 2017 * Those MSB registers were introduced in IP v7.1, which we check first. 2018 */ 2019 if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) { 2020 void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4; 2021 2022 iowrite32(0x0, desc); 2023 if (ioread32(desc) == 0) { /* sanity check */ 2024 iowrite32(0xffffffff, desc); 2025 if (ioread32(desc) > 0) { 2026 lp->features |= XAE_FEATURE_DMA_64BIT; 2027 addr_width = 64; 2028 dev_info(&pdev->dev, 2029 "autodetected 64-bit DMA range\n"); 2030 } 2031 iowrite32(0x0, desc); 2032 } 2033 } 2034 2035 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width)); 2036 if (ret) { 2037 dev_err(&pdev->dev, "No suitable DMA available\n"); 2038 goto cleanup_clk; 2039 } 2040 2041 /* Check for Ethernet core IRQ (optional) */ 2042 if (lp->eth_irq <= 0) 2043 dev_info(&pdev->dev, "Ethernet core IRQ not defined\n"); 2044 2045 /* Retrieve the MAC address */ 2046 ret = of_get_mac_address(pdev->dev.of_node, mac_addr); 2047 if (!ret) { 2048 axienet_set_mac_address(ndev, mac_addr); 2049 } else { 2050 dev_warn(&pdev->dev, "could not find MAC address property: %d\n", 2051 ret); 2052 axienet_set_mac_address(ndev, NULL); 2053 } 2054 2055 lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD; 2056 lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD; 2057 2058 /* Reset core now that clocks are enabled, prior to accessing MDIO */ 2059 ret = __axienet_device_reset(lp); 2060 if (ret) 2061 goto cleanup_clk; 2062 2063 lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); 2064 if (lp->phy_node) { 2065 ret = axienet_mdio_setup(lp); 2066 if (ret) 2067 dev_warn(&pdev->dev, 2068 "error registering MDIO bus: %d\n", ret); 2069 } 2070 if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII || 2071 lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) { 2072 if (!lp->phy_node) { 2073 dev_err(&pdev->dev, "phy-handle required for 1000BaseX/SGMII\n"); 2074 ret = -EINVAL; 2075 goto cleanup_mdio; 2076 } 2077 lp->pcs_phy = of_mdio_find_device(lp->phy_node); 2078 if (!lp->pcs_phy) { 2079 ret = -EPROBE_DEFER; 2080 goto cleanup_mdio; 2081 } 2082 lp->phylink_config.pcs_poll = true; 2083 } 2084 2085 lp->phylink_config.dev = &ndev->dev; 2086 lp->phylink_config.type = PHYLINK_NETDEV; 2087 lp->phylink_config.legacy_pre_march2020 = true; 2088 lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE | 2089 MAC_10FD | MAC_100FD | MAC_1000FD; 2090 2091 __set_bit(lp->phy_mode, lp->phylink_config.supported_interfaces); 2092 if (lp->switch_x_sgmii) { 2093 __set_bit(PHY_INTERFACE_MODE_1000BASEX, 2094 lp->phylink_config.supported_interfaces); 2095 __set_bit(PHY_INTERFACE_MODE_SGMII, 2096 lp->phylink_config.supported_interfaces); 2097 } 2098 2099 lp->phylink = phylink_create(&lp->phylink_config, pdev->dev.fwnode, 2100 lp->phy_mode, 2101 &axienet_phylink_ops); 2102 if (IS_ERR(lp->phylink)) { 2103 ret = PTR_ERR(lp->phylink); 2104 dev_err(&pdev->dev, "phylink_create error (%i)\n", ret); 2105 goto cleanup_mdio; 2106 } 2107 2108 ret = register_netdev(lp->ndev); 2109 if (ret) { 2110 dev_err(lp->dev, "register_netdev() error (%i)\n", ret); 2111 goto cleanup_phylink; 2112 } 2113 2114 return 0; 2115 2116 cleanup_phylink: 2117 phylink_destroy(lp->phylink); 2118 2119 cleanup_mdio: 2120 if (lp->pcs_phy) 2121 put_device(&lp->pcs_phy->dev); 2122 if (lp->mii_bus) 2123 axienet_mdio_teardown(lp); 2124 of_node_put(lp->phy_node); 2125 2126 cleanup_clk: 2127 clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks); 2128 clk_disable_unprepare(lp->axi_clk); 2129 2130 free_netdev: 2131 free_netdev(ndev); 2132 2133 return ret; 2134 } 2135 2136 static int axienet_remove(struct platform_device *pdev) 2137 { 2138 struct net_device *ndev = platform_get_drvdata(pdev); 2139 struct axienet_local *lp = netdev_priv(ndev); 2140 2141 unregister_netdev(ndev); 2142 2143 if (lp->phylink) 2144 phylink_destroy(lp->phylink); 2145 2146 if (lp->pcs_phy) 2147 put_device(&lp->pcs_phy->dev); 2148 2149 axienet_mdio_teardown(lp); 2150 2151 clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks); 2152 clk_disable_unprepare(lp->axi_clk); 2153 2154 of_node_put(lp->phy_node); 2155 lp->phy_node = NULL; 2156 2157 free_netdev(ndev); 2158 2159 return 0; 2160 } 2161 2162 static void axienet_shutdown(struct platform_device *pdev) 2163 { 2164 struct net_device *ndev = platform_get_drvdata(pdev); 2165 2166 rtnl_lock(); 2167 netif_device_detach(ndev); 2168 2169 if (netif_running(ndev)) 2170 dev_close(ndev); 2171 2172 rtnl_unlock(); 2173 } 2174 2175 static struct platform_driver axienet_driver = { 2176 .probe = axienet_probe, 2177 .remove = axienet_remove, 2178 .shutdown = axienet_shutdown, 2179 .driver = { 2180 .name = "xilinx_axienet", 2181 .of_match_table = axienet_of_match, 2182 }, 2183 }; 2184 2185 module_platform_driver(axienet_driver); 2186 2187 MODULE_DESCRIPTION("Xilinx Axi Ethernet driver"); 2188 MODULE_AUTHOR("Xilinx"); 2189 MODULE_LICENSE("GPL"); 2190