1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Xilinx Axi Ethernet device driver 4 * 5 * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi 6 * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net> 7 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd. 8 * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu> 9 * Copyright (c) 2010 - 2011 PetaLogix 10 * Copyright (c) 2019 SED Systems, a division of Calian Ltd. 11 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved. 12 * 13 * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6 14 * and Spartan6. 15 * 16 * TODO: 17 * - Add Axi Fifo support. 18 * - Factor out Axi DMA code into separate driver. 19 * - Test and fix basic multicast filtering. 20 * - Add support for extended multicast filtering. 21 * - Test basic VLAN support. 22 * - Add support for extended VLAN support. 23 */ 24 25 #include <linux/clk.h> 26 #include <linux/delay.h> 27 #include <linux/etherdevice.h> 28 #include <linux/module.h> 29 #include <linux/netdevice.h> 30 #include <linux/of_mdio.h> 31 #include <linux/of_net.h> 32 #include <linux/of_platform.h> 33 #include <linux/of_irq.h> 34 #include <linux/of_address.h> 35 #include <linux/skbuff.h> 36 #include <linux/spinlock.h> 37 #include <linux/phy.h> 38 #include <linux/mii.h> 39 #include <linux/ethtool.h> 40 41 #include "xilinx_axienet.h" 42 43 /* Descriptors defines for Tx and Rx DMA */ 44 #define TX_BD_NUM_DEFAULT 64 45 #define RX_BD_NUM_DEFAULT 1024 46 #define TX_BD_NUM_MAX 4096 47 #define RX_BD_NUM_MAX 4096 48 49 /* Must be shorter than length of ethtool_drvinfo.driver field to fit */ 50 #define DRIVER_NAME "xaxienet" 51 #define DRIVER_DESCRIPTION "Xilinx Axi Ethernet driver" 52 #define DRIVER_VERSION "1.00a" 53 54 #define AXIENET_REGS_N 40 55 56 /* Match table for of_platform binding */ 57 static const struct of_device_id axienet_of_match[] = { 58 { .compatible = "xlnx,axi-ethernet-1.00.a", }, 59 { .compatible = "xlnx,axi-ethernet-1.01.a", }, 60 { .compatible = "xlnx,axi-ethernet-2.01.a", }, 61 {}, 62 }; 63 64 MODULE_DEVICE_TABLE(of, axienet_of_match); 65 66 /* Option table for setting up Axi Ethernet hardware options */ 67 static struct axienet_option axienet_options[] = { 68 /* Turn on jumbo packet support for both Rx and Tx */ 69 { 70 .opt = XAE_OPTION_JUMBO, 71 .reg = XAE_TC_OFFSET, 72 .m_or = XAE_TC_JUM_MASK, 73 }, { 74 .opt = XAE_OPTION_JUMBO, 75 .reg = XAE_RCW1_OFFSET, 76 .m_or = XAE_RCW1_JUM_MASK, 77 }, { /* Turn on VLAN packet support for both Rx and Tx */ 78 .opt = XAE_OPTION_VLAN, 79 .reg = XAE_TC_OFFSET, 80 .m_or = XAE_TC_VLAN_MASK, 81 }, { 82 .opt = XAE_OPTION_VLAN, 83 .reg = XAE_RCW1_OFFSET, 84 .m_or = XAE_RCW1_VLAN_MASK, 85 }, { /* Turn on FCS stripping on receive packets */ 86 .opt = XAE_OPTION_FCS_STRIP, 87 .reg = XAE_RCW1_OFFSET, 88 .m_or = XAE_RCW1_FCS_MASK, 89 }, { /* Turn on FCS insertion on transmit packets */ 90 .opt = XAE_OPTION_FCS_INSERT, 91 .reg = XAE_TC_OFFSET, 92 .m_or = XAE_TC_FCS_MASK, 93 }, { /* Turn off length/type field checking on receive packets */ 94 .opt = XAE_OPTION_LENTYPE_ERR, 95 .reg = XAE_RCW1_OFFSET, 96 .m_or = XAE_RCW1_LT_DIS_MASK, 97 }, { /* Turn on Rx flow control */ 98 .opt = XAE_OPTION_FLOW_CONTROL, 99 .reg = XAE_FCC_OFFSET, 100 .m_or = XAE_FCC_FCRX_MASK, 101 }, { /* Turn on Tx flow control */ 102 .opt = XAE_OPTION_FLOW_CONTROL, 103 .reg = XAE_FCC_OFFSET, 104 .m_or = XAE_FCC_FCTX_MASK, 105 }, { /* Turn on promiscuous frame filtering */ 106 .opt = XAE_OPTION_PROMISC, 107 .reg = XAE_FMI_OFFSET, 108 .m_or = XAE_FMI_PM_MASK, 109 }, { /* Enable transmitter */ 110 .opt = XAE_OPTION_TXEN, 111 .reg = XAE_TC_OFFSET, 112 .m_or = XAE_TC_TX_MASK, 113 }, { /* Enable receiver */ 114 .opt = XAE_OPTION_RXEN, 115 .reg = XAE_RCW1_OFFSET, 116 .m_or = XAE_RCW1_RX_MASK, 117 }, 118 {} 119 }; 120 121 /** 122 * axienet_dma_in32 - Memory mapped Axi DMA register read 123 * @lp: Pointer to axienet local structure 124 * @reg: Address offset from the base address of the Axi DMA core 125 * 126 * Return: The contents of the Axi DMA register 127 * 128 * This function returns the contents of the corresponding Axi DMA register. 129 */ 130 static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg) 131 { 132 return ioread32(lp->dma_regs + reg); 133 } 134 135 /** 136 * axienet_dma_out32 - Memory mapped Axi DMA register write. 137 * @lp: Pointer to axienet local structure 138 * @reg: Address offset from the base address of the Axi DMA core 139 * @value: Value to be written into the Axi DMA register 140 * 141 * This function writes the desired value into the corresponding Axi DMA 142 * register. 143 */ 144 static inline void axienet_dma_out32(struct axienet_local *lp, 145 off_t reg, u32 value) 146 { 147 iowrite32(value, lp->dma_regs + reg); 148 } 149 150 static void axienet_dma_out_addr(struct axienet_local *lp, off_t reg, 151 dma_addr_t addr) 152 { 153 axienet_dma_out32(lp, reg, lower_32_bits(addr)); 154 155 if (lp->features & XAE_FEATURE_DMA_64BIT) 156 axienet_dma_out32(lp, reg + 4, upper_32_bits(addr)); 157 } 158 159 static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr, 160 struct axidma_bd *desc) 161 { 162 desc->phys = lower_32_bits(addr); 163 if (lp->features & XAE_FEATURE_DMA_64BIT) 164 desc->phys_msb = upper_32_bits(addr); 165 } 166 167 static dma_addr_t desc_get_phys_addr(struct axienet_local *lp, 168 struct axidma_bd *desc) 169 { 170 dma_addr_t ret = desc->phys; 171 172 if (lp->features & XAE_FEATURE_DMA_64BIT) 173 ret |= ((dma_addr_t)desc->phys_msb << 16) << 16; 174 175 return ret; 176 } 177 178 /** 179 * axienet_dma_bd_release - Release buffer descriptor rings 180 * @ndev: Pointer to the net_device structure 181 * 182 * This function is used to release the descriptors allocated in 183 * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet 184 * driver stop api is called. 185 */ 186 static void axienet_dma_bd_release(struct net_device *ndev) 187 { 188 int i; 189 struct axienet_local *lp = netdev_priv(ndev); 190 191 /* If we end up here, tx_bd_v must have been DMA allocated. */ 192 dma_free_coherent(ndev->dev.parent, 193 sizeof(*lp->tx_bd_v) * lp->tx_bd_num, 194 lp->tx_bd_v, 195 lp->tx_bd_p); 196 197 if (!lp->rx_bd_v) 198 return; 199 200 for (i = 0; i < lp->rx_bd_num; i++) { 201 dma_addr_t phys; 202 203 /* A NULL skb means this descriptor has not been initialised 204 * at all. 205 */ 206 if (!lp->rx_bd_v[i].skb) 207 break; 208 209 dev_kfree_skb(lp->rx_bd_v[i].skb); 210 211 /* For each descriptor, we programmed cntrl with the (non-zero) 212 * descriptor size, after it had been successfully allocated. 213 * So a non-zero value in there means we need to unmap it. 214 */ 215 if (lp->rx_bd_v[i].cntrl) { 216 phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]); 217 dma_unmap_single(ndev->dev.parent, phys, 218 lp->max_frm_size, DMA_FROM_DEVICE); 219 } 220 } 221 222 dma_free_coherent(ndev->dev.parent, 223 sizeof(*lp->rx_bd_v) * lp->rx_bd_num, 224 lp->rx_bd_v, 225 lp->rx_bd_p); 226 } 227 228 /** 229 * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA 230 * @ndev: Pointer to the net_device structure 231 * 232 * Return: 0, on success -ENOMEM, on failure 233 * 234 * This function is called to initialize the Rx and Tx DMA descriptor 235 * rings. This initializes the descriptors with required default values 236 * and is called when Axi Ethernet driver reset is called. 237 */ 238 static int axienet_dma_bd_init(struct net_device *ndev) 239 { 240 u32 cr; 241 int i; 242 struct sk_buff *skb; 243 struct axienet_local *lp = netdev_priv(ndev); 244 245 /* Reset the indexes which are used for accessing the BDs */ 246 lp->tx_bd_ci = 0; 247 lp->tx_bd_tail = 0; 248 lp->rx_bd_ci = 0; 249 250 /* Allocate the Tx and Rx buffer descriptors. */ 251 lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent, 252 sizeof(*lp->tx_bd_v) * lp->tx_bd_num, 253 &lp->tx_bd_p, GFP_KERNEL); 254 if (!lp->tx_bd_v) 255 return -ENOMEM; 256 257 lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent, 258 sizeof(*lp->rx_bd_v) * lp->rx_bd_num, 259 &lp->rx_bd_p, GFP_KERNEL); 260 if (!lp->rx_bd_v) 261 goto out; 262 263 for (i = 0; i < lp->tx_bd_num; i++) { 264 dma_addr_t addr = lp->tx_bd_p + 265 sizeof(*lp->tx_bd_v) * 266 ((i + 1) % lp->tx_bd_num); 267 268 lp->tx_bd_v[i].next = lower_32_bits(addr); 269 if (lp->features & XAE_FEATURE_DMA_64BIT) 270 lp->tx_bd_v[i].next_msb = upper_32_bits(addr); 271 } 272 273 for (i = 0; i < lp->rx_bd_num; i++) { 274 dma_addr_t addr; 275 276 addr = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * 277 ((i + 1) % lp->rx_bd_num); 278 lp->rx_bd_v[i].next = lower_32_bits(addr); 279 if (lp->features & XAE_FEATURE_DMA_64BIT) 280 lp->rx_bd_v[i].next_msb = upper_32_bits(addr); 281 282 skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size); 283 if (!skb) 284 goto out; 285 286 lp->rx_bd_v[i].skb = skb; 287 addr = dma_map_single(ndev->dev.parent, skb->data, 288 lp->max_frm_size, DMA_FROM_DEVICE); 289 if (dma_mapping_error(ndev->dev.parent, addr)) { 290 netdev_err(ndev, "DMA mapping error\n"); 291 goto out; 292 } 293 desc_set_phys_addr(lp, addr, &lp->rx_bd_v[i]); 294 295 lp->rx_bd_v[i].cntrl = lp->max_frm_size; 296 } 297 298 /* Start updating the Rx channel control register */ 299 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 300 /* Update the interrupt coalesce count */ 301 cr = ((cr & ~XAXIDMA_COALESCE_MASK) | 302 ((lp->coalesce_count_rx) << XAXIDMA_COALESCE_SHIFT)); 303 /* Update the delay timer count */ 304 cr = ((cr & ~XAXIDMA_DELAY_MASK) | 305 (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); 306 /* Enable coalesce, delay timer and error interrupts */ 307 cr |= XAXIDMA_IRQ_ALL_MASK; 308 /* Write to the Rx channel control register */ 309 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 310 311 /* Start updating the Tx channel control register */ 312 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 313 /* Update the interrupt coalesce count */ 314 cr = (((cr & ~XAXIDMA_COALESCE_MASK)) | 315 ((lp->coalesce_count_tx) << XAXIDMA_COALESCE_SHIFT)); 316 /* Update the delay timer count */ 317 cr = (((cr & ~XAXIDMA_DELAY_MASK)) | 318 (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); 319 /* Enable coalesce, delay timer and error interrupts */ 320 cr |= XAXIDMA_IRQ_ALL_MASK; 321 /* Write to the Tx channel control register */ 322 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 323 324 /* Populate the tail pointer and bring the Rx Axi DMA engine out of 325 * halted state. This will make the Rx side ready for reception. 326 */ 327 axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); 328 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 329 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, 330 cr | XAXIDMA_CR_RUNSTOP_MASK); 331 axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + 332 (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1))); 333 334 /* Write to the RS (Run-stop) bit in the Tx channel control register. 335 * Tx channel is now ready to run. But only after we write to the 336 * tail pointer register that the Tx channel will start transmitting. 337 */ 338 axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); 339 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 340 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, 341 cr | XAXIDMA_CR_RUNSTOP_MASK); 342 343 return 0; 344 out: 345 axienet_dma_bd_release(ndev); 346 return -ENOMEM; 347 } 348 349 /** 350 * axienet_set_mac_address - Write the MAC address 351 * @ndev: Pointer to the net_device structure 352 * @address: 6 byte Address to be written as MAC address 353 * 354 * This function is called to initialize the MAC address of the Axi Ethernet 355 * core. It writes to the UAW0 and UAW1 registers of the core. 356 */ 357 static void axienet_set_mac_address(struct net_device *ndev, 358 const void *address) 359 { 360 struct axienet_local *lp = netdev_priv(ndev); 361 362 if (address) 363 eth_hw_addr_set(ndev, address); 364 if (!is_valid_ether_addr(ndev->dev_addr)) 365 eth_hw_addr_random(ndev); 366 367 /* Set up unicast MAC address filter set its mac address */ 368 axienet_iow(lp, XAE_UAW0_OFFSET, 369 (ndev->dev_addr[0]) | 370 (ndev->dev_addr[1] << 8) | 371 (ndev->dev_addr[2] << 16) | 372 (ndev->dev_addr[3] << 24)); 373 axienet_iow(lp, XAE_UAW1_OFFSET, 374 (((axienet_ior(lp, XAE_UAW1_OFFSET)) & 375 ~XAE_UAW1_UNICASTADDR_MASK) | 376 (ndev->dev_addr[4] | 377 (ndev->dev_addr[5] << 8)))); 378 } 379 380 /** 381 * netdev_set_mac_address - Write the MAC address (from outside the driver) 382 * @ndev: Pointer to the net_device structure 383 * @p: 6 byte Address to be written as MAC address 384 * 385 * Return: 0 for all conditions. Presently, there is no failure case. 386 * 387 * This function is called to initialize the MAC address of the Axi Ethernet 388 * core. It calls the core specific axienet_set_mac_address. This is the 389 * function that goes into net_device_ops structure entry ndo_set_mac_address. 390 */ 391 static int netdev_set_mac_address(struct net_device *ndev, void *p) 392 { 393 struct sockaddr *addr = p; 394 axienet_set_mac_address(ndev, addr->sa_data); 395 return 0; 396 } 397 398 /** 399 * axienet_set_multicast_list - Prepare the multicast table 400 * @ndev: Pointer to the net_device structure 401 * 402 * This function is called to initialize the multicast table during 403 * initialization. The Axi Ethernet basic multicast support has a four-entry 404 * multicast table which is initialized here. Additionally this function 405 * goes into the net_device_ops structure entry ndo_set_multicast_list. This 406 * means whenever the multicast table entries need to be updated this 407 * function gets called. 408 */ 409 static void axienet_set_multicast_list(struct net_device *ndev) 410 { 411 int i; 412 u32 reg, af0reg, af1reg; 413 struct axienet_local *lp = netdev_priv(ndev); 414 415 if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) || 416 netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) { 417 /* We must make the kernel realize we had to move into 418 * promiscuous mode. If it was a promiscuous mode request 419 * the flag is already set. If not we set it. 420 */ 421 ndev->flags |= IFF_PROMISC; 422 reg = axienet_ior(lp, XAE_FMI_OFFSET); 423 reg |= XAE_FMI_PM_MASK; 424 axienet_iow(lp, XAE_FMI_OFFSET, reg); 425 dev_info(&ndev->dev, "Promiscuous mode enabled.\n"); 426 } else if (!netdev_mc_empty(ndev)) { 427 struct netdev_hw_addr *ha; 428 429 i = 0; 430 netdev_for_each_mc_addr(ha, ndev) { 431 if (i >= XAE_MULTICAST_CAM_TABLE_NUM) 432 break; 433 434 af0reg = (ha->addr[0]); 435 af0reg |= (ha->addr[1] << 8); 436 af0reg |= (ha->addr[2] << 16); 437 af0reg |= (ha->addr[3] << 24); 438 439 af1reg = (ha->addr[4]); 440 af1reg |= (ha->addr[5] << 8); 441 442 reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00; 443 reg |= i; 444 445 axienet_iow(lp, XAE_FMI_OFFSET, reg); 446 axienet_iow(lp, XAE_AF0_OFFSET, af0reg); 447 axienet_iow(lp, XAE_AF1_OFFSET, af1reg); 448 i++; 449 } 450 } else { 451 reg = axienet_ior(lp, XAE_FMI_OFFSET); 452 reg &= ~XAE_FMI_PM_MASK; 453 454 axienet_iow(lp, XAE_FMI_OFFSET, reg); 455 456 for (i = 0; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) { 457 reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00; 458 reg |= i; 459 460 axienet_iow(lp, XAE_FMI_OFFSET, reg); 461 axienet_iow(lp, XAE_AF0_OFFSET, 0); 462 axienet_iow(lp, XAE_AF1_OFFSET, 0); 463 } 464 465 dev_info(&ndev->dev, "Promiscuous mode disabled.\n"); 466 } 467 } 468 469 /** 470 * axienet_setoptions - Set an Axi Ethernet option 471 * @ndev: Pointer to the net_device structure 472 * @options: Option to be enabled/disabled 473 * 474 * The Axi Ethernet core has multiple features which can be selectively turned 475 * on or off. The typical options could be jumbo frame option, basic VLAN 476 * option, promiscuous mode option etc. This function is used to set or clear 477 * these options in the Axi Ethernet hardware. This is done through 478 * axienet_option structure . 479 */ 480 static void axienet_setoptions(struct net_device *ndev, u32 options) 481 { 482 int reg; 483 struct axienet_local *lp = netdev_priv(ndev); 484 struct axienet_option *tp = &axienet_options[0]; 485 486 while (tp->opt) { 487 reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or)); 488 if (options & tp->opt) 489 reg |= tp->m_or; 490 axienet_iow(lp, tp->reg, reg); 491 tp++; 492 } 493 494 lp->options |= options; 495 } 496 497 static int __axienet_device_reset(struct axienet_local *lp) 498 { 499 u32 timeout; 500 501 /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset 502 * process of Axi DMA takes a while to complete as all pending 503 * commands/transfers will be flushed or completed during this 504 * reset process. 505 * Note that even though both TX and RX have their own reset register, 506 * they both reset the entire DMA core, so only one needs to be used. 507 */ 508 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK); 509 timeout = DELAY_OF_ONE_MILLISEC; 510 while (axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET) & 511 XAXIDMA_CR_RESET_MASK) { 512 udelay(1); 513 if (--timeout == 0) { 514 netdev_err(lp->ndev, "%s: DMA reset timeout!\n", 515 __func__); 516 return -ETIMEDOUT; 517 } 518 } 519 520 return 0; 521 } 522 523 /** 524 * axienet_device_reset - Reset and initialize the Axi Ethernet hardware. 525 * @ndev: Pointer to the net_device structure 526 * 527 * This function is called to reset and initialize the Axi Ethernet core. This 528 * is typically called during initialization. It does a reset of the Axi DMA 529 * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines 530 * areconnected to Axi Ethernet reset lines, this in turn resets the Axi 531 * Ethernet core. No separate hardware reset is done for the Axi Ethernet 532 * core. 533 * Returns 0 on success or a negative error number otherwise. 534 */ 535 static int axienet_device_reset(struct net_device *ndev) 536 { 537 u32 axienet_status; 538 struct axienet_local *lp = netdev_priv(ndev); 539 int ret; 540 541 ret = __axienet_device_reset(lp); 542 if (ret) 543 return ret; 544 545 lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE; 546 lp->options |= XAE_OPTION_VLAN; 547 lp->options &= (~XAE_OPTION_JUMBO); 548 549 if ((ndev->mtu > XAE_MTU) && 550 (ndev->mtu <= XAE_JUMBO_MTU)) { 551 lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN + 552 XAE_TRL_SIZE; 553 554 if (lp->max_frm_size <= lp->rxmem) 555 lp->options |= XAE_OPTION_JUMBO; 556 } 557 558 ret = axienet_dma_bd_init(ndev); 559 if (ret) { 560 netdev_err(ndev, "%s: descriptor allocation failed\n", 561 __func__); 562 return ret; 563 } 564 565 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); 566 axienet_status &= ~XAE_RCW1_RX_MASK; 567 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status); 568 569 axienet_status = axienet_ior(lp, XAE_IP_OFFSET); 570 if (axienet_status & XAE_INT_RXRJECT_MASK) 571 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK); 572 axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ? 573 XAE_INT_RECV_ERROR_MASK : 0); 574 575 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK); 576 577 /* Sync default options with HW but leave receiver and 578 * transmitter disabled. 579 */ 580 axienet_setoptions(ndev, lp->options & 581 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 582 axienet_set_mac_address(ndev, NULL); 583 axienet_set_multicast_list(ndev); 584 axienet_setoptions(ndev, lp->options); 585 586 netif_trans_update(ndev); 587 588 return 0; 589 } 590 591 /** 592 * axienet_free_tx_chain - Clean up a series of linked TX descriptors. 593 * @ndev: Pointer to the net_device structure 594 * @first_bd: Index of first descriptor to clean up 595 * @nr_bds: Number of descriptors to clean up, can be -1 if unknown. 596 * @sizep: Pointer to a u32 filled with the total sum of all bytes 597 * in all cleaned-up descriptors. Ignored if NULL. 598 * 599 * Would either be called after a successful transmit operation, or after 600 * there was an error when setting up the chain. 601 * Returns the number of descriptors handled. 602 */ 603 static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd, 604 int nr_bds, u32 *sizep) 605 { 606 struct axienet_local *lp = netdev_priv(ndev); 607 struct axidma_bd *cur_p; 608 int max_bds = nr_bds; 609 unsigned int status; 610 dma_addr_t phys; 611 int i; 612 613 if (max_bds == -1) 614 max_bds = lp->tx_bd_num; 615 616 for (i = 0; i < max_bds; i++) { 617 cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num]; 618 status = cur_p->status; 619 620 /* If no number is given, clean up *all* descriptors that have 621 * been completed by the MAC. 622 */ 623 if (nr_bds == -1 && !(status & XAXIDMA_BD_STS_COMPLETE_MASK)) 624 break; 625 626 phys = desc_get_phys_addr(lp, cur_p); 627 dma_unmap_single(ndev->dev.parent, phys, 628 (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK), 629 DMA_TO_DEVICE); 630 631 if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK)) 632 dev_consume_skb_irq(cur_p->skb); 633 634 cur_p->cntrl = 0; 635 cur_p->app0 = 0; 636 cur_p->app1 = 0; 637 cur_p->app2 = 0; 638 cur_p->app4 = 0; 639 cur_p->status = 0; 640 cur_p->skb = NULL; 641 642 if (sizep) 643 *sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK; 644 } 645 646 return i; 647 } 648 649 /** 650 * axienet_start_xmit_done - Invoked once a transmit is completed by the 651 * Axi DMA Tx channel. 652 * @ndev: Pointer to the net_device structure 653 * 654 * This function is invoked from the Axi DMA Tx isr to notify the completion 655 * of transmit operation. It clears fields in the corresponding Tx BDs and 656 * unmaps the corresponding buffer so that CPU can regain ownership of the 657 * buffer. It finally invokes "netif_wake_queue" to restart transmission if 658 * required. 659 */ 660 static void axienet_start_xmit_done(struct net_device *ndev) 661 { 662 struct axienet_local *lp = netdev_priv(ndev); 663 u32 packets = 0; 664 u32 size = 0; 665 666 packets = axienet_free_tx_chain(ndev, lp->tx_bd_ci, -1, &size); 667 668 lp->tx_bd_ci += packets; 669 if (lp->tx_bd_ci >= lp->tx_bd_num) 670 lp->tx_bd_ci -= lp->tx_bd_num; 671 672 ndev->stats.tx_packets += packets; 673 ndev->stats.tx_bytes += size; 674 675 /* Matches barrier in axienet_start_xmit */ 676 smp_mb(); 677 678 netif_wake_queue(ndev); 679 } 680 681 /** 682 * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy 683 * @lp: Pointer to the axienet_local structure 684 * @num_frag: The number of BDs to check for 685 * 686 * Return: 0, on success 687 * NETDEV_TX_BUSY, if any of the descriptors are not free 688 * 689 * This function is invoked before BDs are allocated and transmission starts. 690 * This function returns 0 if a BD or group of BDs can be allocated for 691 * transmission. If the BD or any of the BDs are not free the function 692 * returns a busy status. This is invoked from axienet_start_xmit. 693 */ 694 static inline int axienet_check_tx_bd_space(struct axienet_local *lp, 695 int num_frag) 696 { 697 struct axidma_bd *cur_p; 698 cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % lp->tx_bd_num]; 699 if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK) 700 return NETDEV_TX_BUSY; 701 return 0; 702 } 703 704 /** 705 * axienet_start_xmit - Starts the transmission. 706 * @skb: sk_buff pointer that contains data to be Txed. 707 * @ndev: Pointer to net_device structure. 708 * 709 * Return: NETDEV_TX_OK, on success 710 * NETDEV_TX_BUSY, if any of the descriptors are not free 711 * 712 * This function is invoked from upper layers to initiate transmission. The 713 * function uses the next available free BDs and populates their fields to 714 * start the transmission. Additionally if checksum offloading is supported, 715 * it populates AXI Stream Control fields with appropriate values. 716 */ 717 static netdev_tx_t 718 axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) 719 { 720 u32 ii; 721 u32 num_frag; 722 u32 csum_start_off; 723 u32 csum_index_off; 724 skb_frag_t *frag; 725 dma_addr_t tail_p, phys; 726 struct axienet_local *lp = netdev_priv(ndev); 727 struct axidma_bd *cur_p; 728 u32 orig_tail_ptr = lp->tx_bd_tail; 729 730 num_frag = skb_shinfo(skb)->nr_frags; 731 cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; 732 733 if (axienet_check_tx_bd_space(lp, num_frag)) { 734 if (netif_queue_stopped(ndev)) 735 return NETDEV_TX_BUSY; 736 737 netif_stop_queue(ndev); 738 739 /* Matches barrier in axienet_start_xmit_done */ 740 smp_mb(); 741 742 /* Space might have just been freed - check again */ 743 if (axienet_check_tx_bd_space(lp, num_frag)) 744 return NETDEV_TX_BUSY; 745 746 netif_wake_queue(ndev); 747 } 748 749 if (skb->ip_summed == CHECKSUM_PARTIAL) { 750 if (lp->features & XAE_FEATURE_FULL_TX_CSUM) { 751 /* Tx Full Checksum Offload Enabled */ 752 cur_p->app0 |= 2; 753 } else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) { 754 csum_start_off = skb_transport_offset(skb); 755 csum_index_off = csum_start_off + skb->csum_offset; 756 /* Tx Partial Checksum Offload Enabled */ 757 cur_p->app0 |= 1; 758 cur_p->app1 = (csum_start_off << 16) | csum_index_off; 759 } 760 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { 761 cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */ 762 } 763 764 phys = dma_map_single(ndev->dev.parent, skb->data, 765 skb_headlen(skb), DMA_TO_DEVICE); 766 if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) { 767 if (net_ratelimit()) 768 netdev_err(ndev, "TX DMA mapping error\n"); 769 ndev->stats.tx_dropped++; 770 return NETDEV_TX_OK; 771 } 772 desc_set_phys_addr(lp, phys, cur_p); 773 cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK; 774 775 for (ii = 0; ii < num_frag; ii++) { 776 if (++lp->tx_bd_tail >= lp->tx_bd_num) 777 lp->tx_bd_tail = 0; 778 cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; 779 frag = &skb_shinfo(skb)->frags[ii]; 780 phys = dma_map_single(ndev->dev.parent, 781 skb_frag_address(frag), 782 skb_frag_size(frag), 783 DMA_TO_DEVICE); 784 if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) { 785 if (net_ratelimit()) 786 netdev_err(ndev, "TX DMA mapping error\n"); 787 ndev->stats.tx_dropped++; 788 axienet_free_tx_chain(ndev, orig_tail_ptr, ii + 1, 789 NULL); 790 lp->tx_bd_tail = orig_tail_ptr; 791 792 return NETDEV_TX_OK; 793 } 794 desc_set_phys_addr(lp, phys, cur_p); 795 cur_p->cntrl = skb_frag_size(frag); 796 } 797 798 cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK; 799 cur_p->skb = skb; 800 801 tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail; 802 /* Start the transfer */ 803 axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p); 804 if (++lp->tx_bd_tail >= lp->tx_bd_num) 805 lp->tx_bd_tail = 0; 806 807 return NETDEV_TX_OK; 808 } 809 810 /** 811 * axienet_recv - Is called from Axi DMA Rx Isr to complete the received 812 * BD processing. 813 * @ndev: Pointer to net_device structure. 814 * 815 * This function is invoked from the Axi DMA Rx isr to process the Rx BDs. It 816 * does minimal processing and invokes "netif_rx" to complete further 817 * processing. 818 */ 819 static void axienet_recv(struct net_device *ndev) 820 { 821 u32 length; 822 u32 csumstatus; 823 u32 size = 0; 824 u32 packets = 0; 825 dma_addr_t tail_p = 0; 826 struct axienet_local *lp = netdev_priv(ndev); 827 struct sk_buff *skb, *new_skb; 828 struct axidma_bd *cur_p; 829 830 cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; 831 832 while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) { 833 dma_addr_t phys; 834 835 tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci; 836 837 phys = desc_get_phys_addr(lp, cur_p); 838 dma_unmap_single(ndev->dev.parent, phys, lp->max_frm_size, 839 DMA_FROM_DEVICE); 840 841 skb = cur_p->skb; 842 cur_p->skb = NULL; 843 length = cur_p->app4 & 0x0000FFFF; 844 845 skb_put(skb, length); 846 skb->protocol = eth_type_trans(skb, ndev); 847 /*skb_checksum_none_assert(skb);*/ 848 skb->ip_summed = CHECKSUM_NONE; 849 850 /* if we're doing Rx csum offload, set it up */ 851 if (lp->features & XAE_FEATURE_FULL_RX_CSUM) { 852 csumstatus = (cur_p->app2 & 853 XAE_FULL_CSUM_STATUS_MASK) >> 3; 854 if ((csumstatus == XAE_IP_TCP_CSUM_VALIDATED) || 855 (csumstatus == XAE_IP_UDP_CSUM_VALIDATED)) { 856 skb->ip_summed = CHECKSUM_UNNECESSARY; 857 } 858 } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 && 859 skb->protocol == htons(ETH_P_IP) && 860 skb->len > 64) { 861 skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF); 862 skb->ip_summed = CHECKSUM_COMPLETE; 863 } 864 865 netif_rx(skb); 866 867 size += length; 868 packets++; 869 870 new_skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size); 871 if (!new_skb) 872 return; 873 874 phys = dma_map_single(ndev->dev.parent, new_skb->data, 875 lp->max_frm_size, 876 DMA_FROM_DEVICE); 877 if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) { 878 if (net_ratelimit()) 879 netdev_err(ndev, "RX DMA mapping error\n"); 880 dev_kfree_skb(new_skb); 881 return; 882 } 883 desc_set_phys_addr(lp, phys, cur_p); 884 885 cur_p->cntrl = lp->max_frm_size; 886 cur_p->status = 0; 887 cur_p->skb = new_skb; 888 889 if (++lp->rx_bd_ci >= lp->rx_bd_num) 890 lp->rx_bd_ci = 0; 891 cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; 892 } 893 894 ndev->stats.rx_packets += packets; 895 ndev->stats.rx_bytes += size; 896 897 if (tail_p) 898 axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p); 899 } 900 901 /** 902 * axienet_tx_irq - Tx Done Isr. 903 * @irq: irq number 904 * @_ndev: net_device pointer 905 * 906 * Return: IRQ_HANDLED if device generated a TX interrupt, IRQ_NONE otherwise. 907 * 908 * This is the Axi DMA Tx done Isr. It invokes "axienet_start_xmit_done" 909 * to complete the BD processing. 910 */ 911 static irqreturn_t axienet_tx_irq(int irq, void *_ndev) 912 { 913 u32 cr; 914 unsigned int status; 915 struct net_device *ndev = _ndev; 916 struct axienet_local *lp = netdev_priv(ndev); 917 918 status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 919 if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) { 920 axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status); 921 axienet_start_xmit_done(lp->ndev); 922 goto out; 923 } 924 if (!(status & XAXIDMA_IRQ_ALL_MASK)) 925 return IRQ_NONE; 926 if (status & XAXIDMA_IRQ_ERROR_MASK) { 927 dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status); 928 dev_err(&ndev->dev, "Current BD is at: 0x%x%08x\n", 929 (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb, 930 (lp->tx_bd_v[lp->tx_bd_ci]).phys); 931 932 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 933 /* Disable coalesce, delay timer and error interrupts */ 934 cr &= (~XAXIDMA_IRQ_ALL_MASK); 935 /* Write to the Tx channel control register */ 936 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 937 938 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 939 /* Disable coalesce, delay timer and error interrupts */ 940 cr &= (~XAXIDMA_IRQ_ALL_MASK); 941 /* Write to the Rx channel control register */ 942 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 943 944 schedule_work(&lp->dma_err_task); 945 axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status); 946 } 947 out: 948 return IRQ_HANDLED; 949 } 950 951 /** 952 * axienet_rx_irq - Rx Isr. 953 * @irq: irq number 954 * @_ndev: net_device pointer 955 * 956 * Return: IRQ_HANDLED if device generated a RX interrupt, IRQ_NONE otherwise. 957 * 958 * This is the Axi DMA Rx Isr. It invokes "axienet_recv" to complete the BD 959 * processing. 960 */ 961 static irqreturn_t axienet_rx_irq(int irq, void *_ndev) 962 { 963 u32 cr; 964 unsigned int status; 965 struct net_device *ndev = _ndev; 966 struct axienet_local *lp = netdev_priv(ndev); 967 968 status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 969 if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) { 970 axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status); 971 axienet_recv(lp->ndev); 972 goto out; 973 } 974 if (!(status & XAXIDMA_IRQ_ALL_MASK)) 975 return IRQ_NONE; 976 if (status & XAXIDMA_IRQ_ERROR_MASK) { 977 dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status); 978 dev_err(&ndev->dev, "Current BD is at: 0x%x%08x\n", 979 (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb, 980 (lp->rx_bd_v[lp->rx_bd_ci]).phys); 981 982 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 983 /* Disable coalesce, delay timer and error interrupts */ 984 cr &= (~XAXIDMA_IRQ_ALL_MASK); 985 /* Finally write to the Tx channel control register */ 986 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 987 988 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 989 /* Disable coalesce, delay timer and error interrupts */ 990 cr &= (~XAXIDMA_IRQ_ALL_MASK); 991 /* write to the Rx channel control register */ 992 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 993 994 schedule_work(&lp->dma_err_task); 995 axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status); 996 } 997 out: 998 return IRQ_HANDLED; 999 } 1000 1001 /** 1002 * axienet_eth_irq - Ethernet core Isr. 1003 * @irq: irq number 1004 * @_ndev: net_device pointer 1005 * 1006 * Return: IRQ_HANDLED if device generated a core interrupt, IRQ_NONE otherwise. 1007 * 1008 * Handle miscellaneous conditions indicated by Ethernet core IRQ. 1009 */ 1010 static irqreturn_t axienet_eth_irq(int irq, void *_ndev) 1011 { 1012 struct net_device *ndev = _ndev; 1013 struct axienet_local *lp = netdev_priv(ndev); 1014 unsigned int pending; 1015 1016 pending = axienet_ior(lp, XAE_IP_OFFSET); 1017 if (!pending) 1018 return IRQ_NONE; 1019 1020 if (pending & XAE_INT_RXFIFOOVR_MASK) 1021 ndev->stats.rx_missed_errors++; 1022 1023 if (pending & XAE_INT_RXRJECT_MASK) 1024 ndev->stats.rx_frame_errors++; 1025 1026 axienet_iow(lp, XAE_IS_OFFSET, pending); 1027 return IRQ_HANDLED; 1028 } 1029 1030 static void axienet_dma_err_handler(struct work_struct *work); 1031 1032 /** 1033 * axienet_open - Driver open routine. 1034 * @ndev: Pointer to net_device structure 1035 * 1036 * Return: 0, on success. 1037 * non-zero error value on failure 1038 * 1039 * This is the driver open routine. It calls phylink_start to start the 1040 * PHY device. 1041 * It also allocates interrupt service routines, enables the interrupt lines 1042 * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer 1043 * descriptors are initialized. 1044 */ 1045 static int axienet_open(struct net_device *ndev) 1046 { 1047 int ret; 1048 struct axienet_local *lp = netdev_priv(ndev); 1049 1050 dev_dbg(&ndev->dev, "axienet_open()\n"); 1051 1052 /* When we do an Axi Ethernet reset, it resets the complete core 1053 * including the MDIO. MDIO must be disabled before resetting. 1054 * Hold MDIO bus lock to avoid MDIO accesses during the reset. 1055 */ 1056 axienet_lock_mii(lp); 1057 ret = axienet_device_reset(ndev); 1058 axienet_unlock_mii(lp); 1059 1060 ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0); 1061 if (ret) { 1062 dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret); 1063 return ret; 1064 } 1065 1066 phylink_start(lp->phylink); 1067 1068 /* Enable worker thread for Axi DMA error handling */ 1069 INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler); 1070 1071 /* Enable interrupts for Axi DMA Tx */ 1072 ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED, 1073 ndev->name, ndev); 1074 if (ret) 1075 goto err_tx_irq; 1076 /* Enable interrupts for Axi DMA Rx */ 1077 ret = request_irq(lp->rx_irq, axienet_rx_irq, IRQF_SHARED, 1078 ndev->name, ndev); 1079 if (ret) 1080 goto err_rx_irq; 1081 /* Enable interrupts for Axi Ethernet core (if defined) */ 1082 if (lp->eth_irq > 0) { 1083 ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED, 1084 ndev->name, ndev); 1085 if (ret) 1086 goto err_eth_irq; 1087 } 1088 1089 return 0; 1090 1091 err_eth_irq: 1092 free_irq(lp->rx_irq, ndev); 1093 err_rx_irq: 1094 free_irq(lp->tx_irq, ndev); 1095 err_tx_irq: 1096 phylink_stop(lp->phylink); 1097 phylink_disconnect_phy(lp->phylink); 1098 cancel_work_sync(&lp->dma_err_task); 1099 dev_err(lp->dev, "request_irq() failed\n"); 1100 return ret; 1101 } 1102 1103 /** 1104 * axienet_stop - Driver stop routine. 1105 * @ndev: Pointer to net_device structure 1106 * 1107 * Return: 0, on success. 1108 * 1109 * This is the driver stop routine. It calls phylink_disconnect to stop the PHY 1110 * device. It also removes the interrupt handlers and disables the interrupts. 1111 * The Axi DMA Tx/Rx BDs are released. 1112 */ 1113 static int axienet_stop(struct net_device *ndev) 1114 { 1115 u32 cr, sr; 1116 int count; 1117 struct axienet_local *lp = netdev_priv(ndev); 1118 1119 dev_dbg(&ndev->dev, "axienet_close()\n"); 1120 1121 phylink_stop(lp->phylink); 1122 phylink_disconnect_phy(lp->phylink); 1123 1124 axienet_setoptions(ndev, lp->options & 1125 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 1126 1127 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 1128 cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK); 1129 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 1130 1131 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 1132 cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK); 1133 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 1134 1135 axienet_iow(lp, XAE_IE_OFFSET, 0); 1136 1137 /* Give DMAs a chance to halt gracefully */ 1138 sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 1139 for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) { 1140 msleep(20); 1141 sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 1142 } 1143 1144 sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 1145 for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) { 1146 msleep(20); 1147 sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 1148 } 1149 1150 /* Do a reset to ensure DMA is really stopped */ 1151 axienet_lock_mii(lp); 1152 __axienet_device_reset(lp); 1153 axienet_unlock_mii(lp); 1154 1155 cancel_work_sync(&lp->dma_err_task); 1156 1157 if (lp->eth_irq > 0) 1158 free_irq(lp->eth_irq, ndev); 1159 free_irq(lp->tx_irq, ndev); 1160 free_irq(lp->rx_irq, ndev); 1161 1162 axienet_dma_bd_release(ndev); 1163 return 0; 1164 } 1165 1166 /** 1167 * axienet_change_mtu - Driver change mtu routine. 1168 * @ndev: Pointer to net_device structure 1169 * @new_mtu: New mtu value to be applied 1170 * 1171 * Return: Always returns 0 (success). 1172 * 1173 * This is the change mtu driver routine. It checks if the Axi Ethernet 1174 * hardware supports jumbo frames before changing the mtu. This can be 1175 * called only when the device is not up. 1176 */ 1177 static int axienet_change_mtu(struct net_device *ndev, int new_mtu) 1178 { 1179 struct axienet_local *lp = netdev_priv(ndev); 1180 1181 if (netif_running(ndev)) 1182 return -EBUSY; 1183 1184 if ((new_mtu + VLAN_ETH_HLEN + 1185 XAE_TRL_SIZE) > lp->rxmem) 1186 return -EINVAL; 1187 1188 ndev->mtu = new_mtu; 1189 1190 return 0; 1191 } 1192 1193 #ifdef CONFIG_NET_POLL_CONTROLLER 1194 /** 1195 * axienet_poll_controller - Axi Ethernet poll mechanism. 1196 * @ndev: Pointer to net_device structure 1197 * 1198 * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior 1199 * to polling the ISRs and are enabled back after the polling is done. 1200 */ 1201 static void axienet_poll_controller(struct net_device *ndev) 1202 { 1203 struct axienet_local *lp = netdev_priv(ndev); 1204 disable_irq(lp->tx_irq); 1205 disable_irq(lp->rx_irq); 1206 axienet_rx_irq(lp->tx_irq, ndev); 1207 axienet_tx_irq(lp->rx_irq, ndev); 1208 enable_irq(lp->tx_irq); 1209 enable_irq(lp->rx_irq); 1210 } 1211 #endif 1212 1213 static int axienet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1214 { 1215 struct axienet_local *lp = netdev_priv(dev); 1216 1217 if (!netif_running(dev)) 1218 return -EINVAL; 1219 1220 return phylink_mii_ioctl(lp->phylink, rq, cmd); 1221 } 1222 1223 static const struct net_device_ops axienet_netdev_ops = { 1224 .ndo_open = axienet_open, 1225 .ndo_stop = axienet_stop, 1226 .ndo_start_xmit = axienet_start_xmit, 1227 .ndo_change_mtu = axienet_change_mtu, 1228 .ndo_set_mac_address = netdev_set_mac_address, 1229 .ndo_validate_addr = eth_validate_addr, 1230 .ndo_eth_ioctl = axienet_ioctl, 1231 .ndo_set_rx_mode = axienet_set_multicast_list, 1232 #ifdef CONFIG_NET_POLL_CONTROLLER 1233 .ndo_poll_controller = axienet_poll_controller, 1234 #endif 1235 }; 1236 1237 /** 1238 * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information. 1239 * @ndev: Pointer to net_device structure 1240 * @ed: Pointer to ethtool_drvinfo structure 1241 * 1242 * This implements ethtool command for getting the driver information. 1243 * Issue "ethtool -i ethX" under linux prompt to execute this function. 1244 */ 1245 static void axienet_ethtools_get_drvinfo(struct net_device *ndev, 1246 struct ethtool_drvinfo *ed) 1247 { 1248 strlcpy(ed->driver, DRIVER_NAME, sizeof(ed->driver)); 1249 strlcpy(ed->version, DRIVER_VERSION, sizeof(ed->version)); 1250 } 1251 1252 /** 1253 * axienet_ethtools_get_regs_len - Get the total regs length present in the 1254 * AxiEthernet core. 1255 * @ndev: Pointer to net_device structure 1256 * 1257 * This implements ethtool command for getting the total register length 1258 * information. 1259 * 1260 * Return: the total regs length 1261 */ 1262 static int axienet_ethtools_get_regs_len(struct net_device *ndev) 1263 { 1264 return sizeof(u32) * AXIENET_REGS_N; 1265 } 1266 1267 /** 1268 * axienet_ethtools_get_regs - Dump the contents of all registers present 1269 * in AxiEthernet core. 1270 * @ndev: Pointer to net_device structure 1271 * @regs: Pointer to ethtool_regs structure 1272 * @ret: Void pointer used to return the contents of the registers. 1273 * 1274 * This implements ethtool command for getting the Axi Ethernet register dump. 1275 * Issue "ethtool -d ethX" to execute this function. 1276 */ 1277 static void axienet_ethtools_get_regs(struct net_device *ndev, 1278 struct ethtool_regs *regs, void *ret) 1279 { 1280 u32 *data = (u32 *) ret; 1281 size_t len = sizeof(u32) * AXIENET_REGS_N; 1282 struct axienet_local *lp = netdev_priv(ndev); 1283 1284 regs->version = 0; 1285 regs->len = len; 1286 1287 memset(data, 0, len); 1288 data[0] = axienet_ior(lp, XAE_RAF_OFFSET); 1289 data[1] = axienet_ior(lp, XAE_TPF_OFFSET); 1290 data[2] = axienet_ior(lp, XAE_IFGP_OFFSET); 1291 data[3] = axienet_ior(lp, XAE_IS_OFFSET); 1292 data[4] = axienet_ior(lp, XAE_IP_OFFSET); 1293 data[5] = axienet_ior(lp, XAE_IE_OFFSET); 1294 data[6] = axienet_ior(lp, XAE_TTAG_OFFSET); 1295 data[7] = axienet_ior(lp, XAE_RTAG_OFFSET); 1296 data[8] = axienet_ior(lp, XAE_UAWL_OFFSET); 1297 data[9] = axienet_ior(lp, XAE_UAWU_OFFSET); 1298 data[10] = axienet_ior(lp, XAE_TPID0_OFFSET); 1299 data[11] = axienet_ior(lp, XAE_TPID1_OFFSET); 1300 data[12] = axienet_ior(lp, XAE_PPST_OFFSET); 1301 data[13] = axienet_ior(lp, XAE_RCW0_OFFSET); 1302 data[14] = axienet_ior(lp, XAE_RCW1_OFFSET); 1303 data[15] = axienet_ior(lp, XAE_TC_OFFSET); 1304 data[16] = axienet_ior(lp, XAE_FCC_OFFSET); 1305 data[17] = axienet_ior(lp, XAE_EMMC_OFFSET); 1306 data[18] = axienet_ior(lp, XAE_PHYC_OFFSET); 1307 data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET); 1308 data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET); 1309 data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET); 1310 data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET); 1311 data[27] = axienet_ior(lp, XAE_UAW0_OFFSET); 1312 data[28] = axienet_ior(lp, XAE_UAW1_OFFSET); 1313 data[29] = axienet_ior(lp, XAE_FMI_OFFSET); 1314 data[30] = axienet_ior(lp, XAE_AF0_OFFSET); 1315 data[31] = axienet_ior(lp, XAE_AF1_OFFSET); 1316 data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 1317 data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 1318 data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET); 1319 data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET); 1320 data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 1321 data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 1322 data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET); 1323 data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET); 1324 } 1325 1326 static void axienet_ethtools_get_ringparam(struct net_device *ndev, 1327 struct ethtool_ringparam *ering) 1328 { 1329 struct axienet_local *lp = netdev_priv(ndev); 1330 1331 ering->rx_max_pending = RX_BD_NUM_MAX; 1332 ering->rx_mini_max_pending = 0; 1333 ering->rx_jumbo_max_pending = 0; 1334 ering->tx_max_pending = TX_BD_NUM_MAX; 1335 ering->rx_pending = lp->rx_bd_num; 1336 ering->rx_mini_pending = 0; 1337 ering->rx_jumbo_pending = 0; 1338 ering->tx_pending = lp->tx_bd_num; 1339 } 1340 1341 static int axienet_ethtools_set_ringparam(struct net_device *ndev, 1342 struct ethtool_ringparam *ering) 1343 { 1344 struct axienet_local *lp = netdev_priv(ndev); 1345 1346 if (ering->rx_pending > RX_BD_NUM_MAX || 1347 ering->rx_mini_pending || 1348 ering->rx_jumbo_pending || 1349 ering->rx_pending > TX_BD_NUM_MAX) 1350 return -EINVAL; 1351 1352 if (netif_running(ndev)) 1353 return -EBUSY; 1354 1355 lp->rx_bd_num = ering->rx_pending; 1356 lp->tx_bd_num = ering->tx_pending; 1357 return 0; 1358 } 1359 1360 /** 1361 * axienet_ethtools_get_pauseparam - Get the pause parameter setting for 1362 * Tx and Rx paths. 1363 * @ndev: Pointer to net_device structure 1364 * @epauseparm: Pointer to ethtool_pauseparam structure. 1365 * 1366 * This implements ethtool command for getting axi ethernet pause frame 1367 * setting. Issue "ethtool -a ethX" to execute this function. 1368 */ 1369 static void 1370 axienet_ethtools_get_pauseparam(struct net_device *ndev, 1371 struct ethtool_pauseparam *epauseparm) 1372 { 1373 struct axienet_local *lp = netdev_priv(ndev); 1374 1375 phylink_ethtool_get_pauseparam(lp->phylink, epauseparm); 1376 } 1377 1378 /** 1379 * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control) 1380 * settings. 1381 * @ndev: Pointer to net_device structure 1382 * @epauseparm:Pointer to ethtool_pauseparam structure 1383 * 1384 * This implements ethtool command for enabling flow control on Rx and Tx 1385 * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this 1386 * function. 1387 * 1388 * Return: 0 on success, -EFAULT if device is running 1389 */ 1390 static int 1391 axienet_ethtools_set_pauseparam(struct net_device *ndev, 1392 struct ethtool_pauseparam *epauseparm) 1393 { 1394 struct axienet_local *lp = netdev_priv(ndev); 1395 1396 return phylink_ethtool_set_pauseparam(lp->phylink, epauseparm); 1397 } 1398 1399 /** 1400 * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count. 1401 * @ndev: Pointer to net_device structure 1402 * @ecoalesce: Pointer to ethtool_coalesce structure 1403 * @kernel_coal: ethtool CQE mode setting structure 1404 * @extack: extack for reporting error messages 1405 * 1406 * This implements ethtool command for getting the DMA interrupt coalescing 1407 * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to 1408 * execute this function. 1409 * 1410 * Return: 0 always 1411 */ 1412 static int 1413 axienet_ethtools_get_coalesce(struct net_device *ndev, 1414 struct ethtool_coalesce *ecoalesce, 1415 struct kernel_ethtool_coalesce *kernel_coal, 1416 struct netlink_ext_ack *extack) 1417 { 1418 u32 regval = 0; 1419 struct axienet_local *lp = netdev_priv(ndev); 1420 regval = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 1421 ecoalesce->rx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK) 1422 >> XAXIDMA_COALESCE_SHIFT; 1423 regval = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 1424 ecoalesce->tx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK) 1425 >> XAXIDMA_COALESCE_SHIFT; 1426 return 0; 1427 } 1428 1429 /** 1430 * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count. 1431 * @ndev: Pointer to net_device structure 1432 * @ecoalesce: Pointer to ethtool_coalesce structure 1433 * @kernel_coal: ethtool CQE mode setting structure 1434 * @extack: extack for reporting error messages 1435 * 1436 * This implements ethtool command for setting the DMA interrupt coalescing 1437 * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux 1438 * prompt to execute this function. 1439 * 1440 * Return: 0, on success, Non-zero error value on failure. 1441 */ 1442 static int 1443 axienet_ethtools_set_coalesce(struct net_device *ndev, 1444 struct ethtool_coalesce *ecoalesce, 1445 struct kernel_ethtool_coalesce *kernel_coal, 1446 struct netlink_ext_ack *extack) 1447 { 1448 struct axienet_local *lp = netdev_priv(ndev); 1449 1450 if (netif_running(ndev)) { 1451 netdev_err(ndev, 1452 "Please stop netif before applying configuration\n"); 1453 return -EFAULT; 1454 } 1455 1456 if (ecoalesce->rx_max_coalesced_frames) 1457 lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames; 1458 if (ecoalesce->tx_max_coalesced_frames) 1459 lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames; 1460 1461 return 0; 1462 } 1463 1464 static int 1465 axienet_ethtools_get_link_ksettings(struct net_device *ndev, 1466 struct ethtool_link_ksettings *cmd) 1467 { 1468 struct axienet_local *lp = netdev_priv(ndev); 1469 1470 return phylink_ethtool_ksettings_get(lp->phylink, cmd); 1471 } 1472 1473 static int 1474 axienet_ethtools_set_link_ksettings(struct net_device *ndev, 1475 const struct ethtool_link_ksettings *cmd) 1476 { 1477 struct axienet_local *lp = netdev_priv(ndev); 1478 1479 return phylink_ethtool_ksettings_set(lp->phylink, cmd); 1480 } 1481 1482 static int axienet_ethtools_nway_reset(struct net_device *dev) 1483 { 1484 struct axienet_local *lp = netdev_priv(dev); 1485 1486 return phylink_ethtool_nway_reset(lp->phylink); 1487 } 1488 1489 static const struct ethtool_ops axienet_ethtool_ops = { 1490 .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES, 1491 .get_drvinfo = axienet_ethtools_get_drvinfo, 1492 .get_regs_len = axienet_ethtools_get_regs_len, 1493 .get_regs = axienet_ethtools_get_regs, 1494 .get_link = ethtool_op_get_link, 1495 .get_ringparam = axienet_ethtools_get_ringparam, 1496 .set_ringparam = axienet_ethtools_set_ringparam, 1497 .get_pauseparam = axienet_ethtools_get_pauseparam, 1498 .set_pauseparam = axienet_ethtools_set_pauseparam, 1499 .get_coalesce = axienet_ethtools_get_coalesce, 1500 .set_coalesce = axienet_ethtools_set_coalesce, 1501 .get_link_ksettings = axienet_ethtools_get_link_ksettings, 1502 .set_link_ksettings = axienet_ethtools_set_link_ksettings, 1503 .nway_reset = axienet_ethtools_nway_reset, 1504 }; 1505 1506 static void axienet_validate(struct phylink_config *config, 1507 unsigned long *supported, 1508 struct phylink_link_state *state) 1509 { 1510 struct net_device *ndev = to_net_dev(config->dev); 1511 struct axienet_local *lp = netdev_priv(ndev); 1512 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; 1513 1514 /* Only support the mode we are configured for */ 1515 switch (state->interface) { 1516 case PHY_INTERFACE_MODE_NA: 1517 break; 1518 case PHY_INTERFACE_MODE_1000BASEX: 1519 case PHY_INTERFACE_MODE_SGMII: 1520 if (lp->switch_x_sgmii) 1521 break; 1522 fallthrough; 1523 default: 1524 if (state->interface != lp->phy_mode) { 1525 netdev_warn(ndev, "Cannot use PHY mode %s, supported: %s\n", 1526 phy_modes(state->interface), 1527 phy_modes(lp->phy_mode)); 1528 linkmode_zero(supported); 1529 return; 1530 } 1531 } 1532 1533 phylink_set(mask, Autoneg); 1534 phylink_set_port_modes(mask); 1535 1536 phylink_set(mask, Asym_Pause); 1537 phylink_set(mask, Pause); 1538 1539 switch (state->interface) { 1540 case PHY_INTERFACE_MODE_NA: 1541 case PHY_INTERFACE_MODE_1000BASEX: 1542 case PHY_INTERFACE_MODE_SGMII: 1543 case PHY_INTERFACE_MODE_GMII: 1544 case PHY_INTERFACE_MODE_RGMII: 1545 case PHY_INTERFACE_MODE_RGMII_ID: 1546 case PHY_INTERFACE_MODE_RGMII_RXID: 1547 case PHY_INTERFACE_MODE_RGMII_TXID: 1548 phylink_set(mask, 1000baseX_Full); 1549 phylink_set(mask, 1000baseT_Full); 1550 if (state->interface == PHY_INTERFACE_MODE_1000BASEX) 1551 break; 1552 fallthrough; 1553 case PHY_INTERFACE_MODE_MII: 1554 phylink_set(mask, 100baseT_Full); 1555 phylink_set(mask, 10baseT_Full); 1556 fallthrough; 1557 default: 1558 break; 1559 } 1560 1561 linkmode_and(supported, supported, mask); 1562 linkmode_and(state->advertising, state->advertising, mask); 1563 } 1564 1565 static void axienet_mac_pcs_get_state(struct phylink_config *config, 1566 struct phylink_link_state *state) 1567 { 1568 struct net_device *ndev = to_net_dev(config->dev); 1569 struct axienet_local *lp = netdev_priv(ndev); 1570 1571 switch (state->interface) { 1572 case PHY_INTERFACE_MODE_SGMII: 1573 case PHY_INTERFACE_MODE_1000BASEX: 1574 phylink_mii_c22_pcs_get_state(lp->pcs_phy, state); 1575 break; 1576 default: 1577 break; 1578 } 1579 } 1580 1581 static void axienet_mac_an_restart(struct phylink_config *config) 1582 { 1583 struct net_device *ndev = to_net_dev(config->dev); 1584 struct axienet_local *lp = netdev_priv(ndev); 1585 1586 phylink_mii_c22_pcs_an_restart(lp->pcs_phy); 1587 } 1588 1589 static int axienet_mac_prepare(struct phylink_config *config, unsigned int mode, 1590 phy_interface_t iface) 1591 { 1592 struct net_device *ndev = to_net_dev(config->dev); 1593 struct axienet_local *lp = netdev_priv(ndev); 1594 int ret; 1595 1596 switch (iface) { 1597 case PHY_INTERFACE_MODE_SGMII: 1598 case PHY_INTERFACE_MODE_1000BASEX: 1599 if (!lp->switch_x_sgmii) 1600 return 0; 1601 1602 ret = mdiobus_write(lp->pcs_phy->bus, 1603 lp->pcs_phy->addr, 1604 XLNX_MII_STD_SELECT_REG, 1605 iface == PHY_INTERFACE_MODE_SGMII ? 1606 XLNX_MII_STD_SELECT_SGMII : 0); 1607 if (ret < 0) 1608 netdev_warn(ndev, "Failed to switch PHY interface: %d\n", 1609 ret); 1610 return ret; 1611 default: 1612 return 0; 1613 } 1614 } 1615 1616 static void axienet_mac_config(struct phylink_config *config, unsigned int mode, 1617 const struct phylink_link_state *state) 1618 { 1619 struct net_device *ndev = to_net_dev(config->dev); 1620 struct axienet_local *lp = netdev_priv(ndev); 1621 int ret; 1622 1623 switch (state->interface) { 1624 case PHY_INTERFACE_MODE_SGMII: 1625 case PHY_INTERFACE_MODE_1000BASEX: 1626 ret = phylink_mii_c22_pcs_config(lp->pcs_phy, mode, 1627 state->interface, 1628 state->advertising); 1629 if (ret < 0) 1630 netdev_warn(ndev, "Failed to configure PCS: %d\n", 1631 ret); 1632 break; 1633 1634 default: 1635 break; 1636 } 1637 } 1638 1639 static void axienet_mac_link_down(struct phylink_config *config, 1640 unsigned int mode, 1641 phy_interface_t interface) 1642 { 1643 /* nothing meaningful to do */ 1644 } 1645 1646 static void axienet_mac_link_up(struct phylink_config *config, 1647 struct phy_device *phy, 1648 unsigned int mode, phy_interface_t interface, 1649 int speed, int duplex, 1650 bool tx_pause, bool rx_pause) 1651 { 1652 struct net_device *ndev = to_net_dev(config->dev); 1653 struct axienet_local *lp = netdev_priv(ndev); 1654 u32 emmc_reg, fcc_reg; 1655 1656 emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET); 1657 emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK; 1658 1659 switch (speed) { 1660 case SPEED_1000: 1661 emmc_reg |= XAE_EMMC_LINKSPD_1000; 1662 break; 1663 case SPEED_100: 1664 emmc_reg |= XAE_EMMC_LINKSPD_100; 1665 break; 1666 case SPEED_10: 1667 emmc_reg |= XAE_EMMC_LINKSPD_10; 1668 break; 1669 default: 1670 dev_err(&ndev->dev, 1671 "Speed other than 10, 100 or 1Gbps is not supported\n"); 1672 break; 1673 } 1674 1675 axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg); 1676 1677 fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET); 1678 if (tx_pause) 1679 fcc_reg |= XAE_FCC_FCTX_MASK; 1680 else 1681 fcc_reg &= ~XAE_FCC_FCTX_MASK; 1682 if (rx_pause) 1683 fcc_reg |= XAE_FCC_FCRX_MASK; 1684 else 1685 fcc_reg &= ~XAE_FCC_FCRX_MASK; 1686 axienet_iow(lp, XAE_FCC_OFFSET, fcc_reg); 1687 } 1688 1689 static const struct phylink_mac_ops axienet_phylink_ops = { 1690 .validate = axienet_validate, 1691 .mac_pcs_get_state = axienet_mac_pcs_get_state, 1692 .mac_an_restart = axienet_mac_an_restart, 1693 .mac_prepare = axienet_mac_prepare, 1694 .mac_config = axienet_mac_config, 1695 .mac_link_down = axienet_mac_link_down, 1696 .mac_link_up = axienet_mac_link_up, 1697 }; 1698 1699 /** 1700 * axienet_dma_err_handler - Work queue task for Axi DMA Error 1701 * @work: pointer to work_struct 1702 * 1703 * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the 1704 * Tx/Rx BDs. 1705 */ 1706 static void axienet_dma_err_handler(struct work_struct *work) 1707 { 1708 u32 axienet_status; 1709 u32 cr, i; 1710 struct axienet_local *lp = container_of(work, struct axienet_local, 1711 dma_err_task); 1712 struct net_device *ndev = lp->ndev; 1713 struct axidma_bd *cur_p; 1714 1715 axienet_setoptions(ndev, lp->options & 1716 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 1717 /* When we do an Axi Ethernet reset, it resets the complete core 1718 * including the MDIO. MDIO must be disabled before resetting. 1719 * Hold MDIO bus lock to avoid MDIO accesses during the reset. 1720 */ 1721 axienet_lock_mii(lp); 1722 __axienet_device_reset(lp); 1723 axienet_unlock_mii(lp); 1724 1725 for (i = 0; i < lp->tx_bd_num; i++) { 1726 cur_p = &lp->tx_bd_v[i]; 1727 if (cur_p->cntrl) { 1728 dma_addr_t addr = desc_get_phys_addr(lp, cur_p); 1729 1730 dma_unmap_single(ndev->dev.parent, addr, 1731 (cur_p->cntrl & 1732 XAXIDMA_BD_CTRL_LENGTH_MASK), 1733 DMA_TO_DEVICE); 1734 } 1735 if (cur_p->skb) 1736 dev_kfree_skb_irq(cur_p->skb); 1737 cur_p->phys = 0; 1738 cur_p->phys_msb = 0; 1739 cur_p->cntrl = 0; 1740 cur_p->status = 0; 1741 cur_p->app0 = 0; 1742 cur_p->app1 = 0; 1743 cur_p->app2 = 0; 1744 cur_p->app3 = 0; 1745 cur_p->app4 = 0; 1746 cur_p->skb = NULL; 1747 } 1748 1749 for (i = 0; i < lp->rx_bd_num; i++) { 1750 cur_p = &lp->rx_bd_v[i]; 1751 cur_p->status = 0; 1752 cur_p->app0 = 0; 1753 cur_p->app1 = 0; 1754 cur_p->app2 = 0; 1755 cur_p->app3 = 0; 1756 cur_p->app4 = 0; 1757 } 1758 1759 lp->tx_bd_ci = 0; 1760 lp->tx_bd_tail = 0; 1761 lp->rx_bd_ci = 0; 1762 1763 /* Start updating the Rx channel control register */ 1764 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 1765 /* Update the interrupt coalesce count */ 1766 cr = ((cr & ~XAXIDMA_COALESCE_MASK) | 1767 (XAXIDMA_DFT_RX_THRESHOLD << XAXIDMA_COALESCE_SHIFT)); 1768 /* Update the delay timer count */ 1769 cr = ((cr & ~XAXIDMA_DELAY_MASK) | 1770 (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); 1771 /* Enable coalesce, delay timer and error interrupts */ 1772 cr |= XAXIDMA_IRQ_ALL_MASK; 1773 /* Finally write to the Rx channel control register */ 1774 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 1775 1776 /* Start updating the Tx channel control register */ 1777 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 1778 /* Update the interrupt coalesce count */ 1779 cr = (((cr & ~XAXIDMA_COALESCE_MASK)) | 1780 (XAXIDMA_DFT_TX_THRESHOLD << XAXIDMA_COALESCE_SHIFT)); 1781 /* Update the delay timer count */ 1782 cr = (((cr & ~XAXIDMA_DELAY_MASK)) | 1783 (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); 1784 /* Enable coalesce, delay timer and error interrupts */ 1785 cr |= XAXIDMA_IRQ_ALL_MASK; 1786 /* Finally write to the Tx channel control register */ 1787 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 1788 1789 /* Populate the tail pointer and bring the Rx Axi DMA engine out of 1790 * halted state. This will make the Rx side ready for reception. 1791 */ 1792 axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); 1793 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 1794 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, 1795 cr | XAXIDMA_CR_RUNSTOP_MASK); 1796 axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + 1797 (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1))); 1798 1799 /* Write to the RS (Run-stop) bit in the Tx channel control register. 1800 * Tx channel is now ready to run. But only after we write to the 1801 * tail pointer register that the Tx channel will start transmitting 1802 */ 1803 axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); 1804 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 1805 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, 1806 cr | XAXIDMA_CR_RUNSTOP_MASK); 1807 1808 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); 1809 axienet_status &= ~XAE_RCW1_RX_MASK; 1810 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status); 1811 1812 axienet_status = axienet_ior(lp, XAE_IP_OFFSET); 1813 if (axienet_status & XAE_INT_RXRJECT_MASK) 1814 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK); 1815 axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ? 1816 XAE_INT_RECV_ERROR_MASK : 0); 1817 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK); 1818 1819 /* Sync default options with HW but leave receiver and 1820 * transmitter disabled. 1821 */ 1822 axienet_setoptions(ndev, lp->options & 1823 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 1824 axienet_set_mac_address(ndev, NULL); 1825 axienet_set_multicast_list(ndev); 1826 axienet_setoptions(ndev, lp->options); 1827 } 1828 1829 /** 1830 * axienet_probe - Axi Ethernet probe function. 1831 * @pdev: Pointer to platform device structure. 1832 * 1833 * Return: 0, on success 1834 * Non-zero error value on failure. 1835 * 1836 * This is the probe routine for Axi Ethernet driver. This is called before 1837 * any other driver routines are invoked. It allocates and sets up the Ethernet 1838 * device. Parses through device tree and populates fields of 1839 * axienet_local. It registers the Ethernet device. 1840 */ 1841 static int axienet_probe(struct platform_device *pdev) 1842 { 1843 int ret; 1844 struct device_node *np; 1845 struct axienet_local *lp; 1846 struct net_device *ndev; 1847 struct resource *ethres; 1848 u8 mac_addr[ETH_ALEN]; 1849 int addr_width = 32; 1850 u32 value; 1851 1852 ndev = alloc_etherdev(sizeof(*lp)); 1853 if (!ndev) 1854 return -ENOMEM; 1855 1856 platform_set_drvdata(pdev, ndev); 1857 1858 SET_NETDEV_DEV(ndev, &pdev->dev); 1859 ndev->flags &= ~IFF_MULTICAST; /* clear multicast */ 1860 ndev->features = NETIF_F_SG; 1861 ndev->netdev_ops = &axienet_netdev_ops; 1862 ndev->ethtool_ops = &axienet_ethtool_ops; 1863 1864 /* MTU range: 64 - 9000 */ 1865 ndev->min_mtu = 64; 1866 ndev->max_mtu = XAE_JUMBO_MTU; 1867 1868 lp = netdev_priv(ndev); 1869 lp->ndev = ndev; 1870 lp->dev = &pdev->dev; 1871 lp->options = XAE_OPTION_DEFAULTS; 1872 lp->rx_bd_num = RX_BD_NUM_DEFAULT; 1873 lp->tx_bd_num = TX_BD_NUM_DEFAULT; 1874 1875 lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk"); 1876 if (!lp->axi_clk) { 1877 /* For backward compatibility, if named AXI clock is not present, 1878 * treat the first clock specified as the AXI clock. 1879 */ 1880 lp->axi_clk = devm_clk_get_optional(&pdev->dev, NULL); 1881 } 1882 if (IS_ERR(lp->axi_clk)) { 1883 ret = PTR_ERR(lp->axi_clk); 1884 goto free_netdev; 1885 } 1886 ret = clk_prepare_enable(lp->axi_clk); 1887 if (ret) { 1888 dev_err(&pdev->dev, "Unable to enable AXI clock: %d\n", ret); 1889 goto free_netdev; 1890 } 1891 1892 lp->misc_clks[0].id = "axis_clk"; 1893 lp->misc_clks[1].id = "ref_clk"; 1894 lp->misc_clks[2].id = "mgt_clk"; 1895 1896 ret = devm_clk_bulk_get_optional(&pdev->dev, XAE_NUM_MISC_CLOCKS, lp->misc_clks); 1897 if (ret) 1898 goto cleanup_clk; 1899 1900 ret = clk_bulk_prepare_enable(XAE_NUM_MISC_CLOCKS, lp->misc_clks); 1901 if (ret) 1902 goto cleanup_clk; 1903 1904 /* Map device registers */ 1905 lp->regs = devm_platform_get_and_ioremap_resource(pdev, 0, ðres); 1906 if (IS_ERR(lp->regs)) { 1907 ret = PTR_ERR(lp->regs); 1908 goto cleanup_clk; 1909 } 1910 lp->regs_start = ethres->start; 1911 1912 /* Setup checksum offload, but default to off if not specified */ 1913 lp->features = 0; 1914 1915 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,txcsum", &value); 1916 if (!ret) { 1917 switch (value) { 1918 case 1: 1919 lp->csum_offload_on_tx_path = 1920 XAE_FEATURE_PARTIAL_TX_CSUM; 1921 lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM; 1922 /* Can checksum TCP/UDP over IPv4. */ 1923 ndev->features |= NETIF_F_IP_CSUM; 1924 break; 1925 case 2: 1926 lp->csum_offload_on_tx_path = 1927 XAE_FEATURE_FULL_TX_CSUM; 1928 lp->features |= XAE_FEATURE_FULL_TX_CSUM; 1929 /* Can checksum TCP/UDP over IPv4. */ 1930 ndev->features |= NETIF_F_IP_CSUM; 1931 break; 1932 default: 1933 lp->csum_offload_on_tx_path = XAE_NO_CSUM_OFFLOAD; 1934 } 1935 } 1936 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value); 1937 if (!ret) { 1938 switch (value) { 1939 case 1: 1940 lp->csum_offload_on_rx_path = 1941 XAE_FEATURE_PARTIAL_RX_CSUM; 1942 lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM; 1943 break; 1944 case 2: 1945 lp->csum_offload_on_rx_path = 1946 XAE_FEATURE_FULL_RX_CSUM; 1947 lp->features |= XAE_FEATURE_FULL_RX_CSUM; 1948 break; 1949 default: 1950 lp->csum_offload_on_rx_path = XAE_NO_CSUM_OFFLOAD; 1951 } 1952 } 1953 /* For supporting jumbo frames, the Axi Ethernet hardware must have 1954 * a larger Rx/Tx Memory. Typically, the size must be large so that 1955 * we can enable jumbo option and start supporting jumbo frames. 1956 * Here we check for memory allocated for Rx/Tx in the hardware from 1957 * the device-tree and accordingly set flags. 1958 */ 1959 of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem); 1960 1961 lp->switch_x_sgmii = of_property_read_bool(pdev->dev.of_node, 1962 "xlnx,switch-x-sgmii"); 1963 1964 /* Start with the proprietary, and broken phy_type */ 1965 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &value); 1966 if (!ret) { 1967 netdev_warn(ndev, "Please upgrade your device tree binary blob to use phy-mode"); 1968 switch (value) { 1969 case XAE_PHY_TYPE_MII: 1970 lp->phy_mode = PHY_INTERFACE_MODE_MII; 1971 break; 1972 case XAE_PHY_TYPE_GMII: 1973 lp->phy_mode = PHY_INTERFACE_MODE_GMII; 1974 break; 1975 case XAE_PHY_TYPE_RGMII_2_0: 1976 lp->phy_mode = PHY_INTERFACE_MODE_RGMII_ID; 1977 break; 1978 case XAE_PHY_TYPE_SGMII: 1979 lp->phy_mode = PHY_INTERFACE_MODE_SGMII; 1980 break; 1981 case XAE_PHY_TYPE_1000BASE_X: 1982 lp->phy_mode = PHY_INTERFACE_MODE_1000BASEX; 1983 break; 1984 default: 1985 ret = -EINVAL; 1986 goto cleanup_clk; 1987 } 1988 } else { 1989 ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode); 1990 if (ret) 1991 goto cleanup_clk; 1992 } 1993 if (lp->switch_x_sgmii && lp->phy_mode != PHY_INTERFACE_MODE_SGMII && 1994 lp->phy_mode != PHY_INTERFACE_MODE_1000BASEX) { 1995 dev_err(&pdev->dev, "xlnx,switch-x-sgmii only supported with SGMII or 1000BaseX\n"); 1996 ret = -EINVAL; 1997 goto cleanup_clk; 1998 } 1999 2000 /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */ 2001 np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0); 2002 if (np) { 2003 struct resource dmares; 2004 2005 ret = of_address_to_resource(np, 0, &dmares); 2006 if (ret) { 2007 dev_err(&pdev->dev, 2008 "unable to get DMA resource\n"); 2009 of_node_put(np); 2010 goto cleanup_clk; 2011 } 2012 lp->dma_regs = devm_ioremap_resource(&pdev->dev, 2013 &dmares); 2014 lp->rx_irq = irq_of_parse_and_map(np, 1); 2015 lp->tx_irq = irq_of_parse_and_map(np, 0); 2016 of_node_put(np); 2017 lp->eth_irq = platform_get_irq_optional(pdev, 0); 2018 } else { 2019 /* Check for these resources directly on the Ethernet node. */ 2020 lp->dma_regs = devm_platform_get_and_ioremap_resource(pdev, 1, NULL); 2021 lp->rx_irq = platform_get_irq(pdev, 1); 2022 lp->tx_irq = platform_get_irq(pdev, 0); 2023 lp->eth_irq = platform_get_irq_optional(pdev, 2); 2024 } 2025 if (IS_ERR(lp->dma_regs)) { 2026 dev_err(&pdev->dev, "could not map DMA regs\n"); 2027 ret = PTR_ERR(lp->dma_regs); 2028 goto cleanup_clk; 2029 } 2030 if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) { 2031 dev_err(&pdev->dev, "could not determine irqs\n"); 2032 ret = -ENOMEM; 2033 goto cleanup_clk; 2034 } 2035 2036 /* Autodetect the need for 64-bit DMA pointers. 2037 * When the IP is configured for a bus width bigger than 32 bits, 2038 * writing the MSB registers is mandatory, even if they are all 0. 2039 * We can detect this case by writing all 1's to one such register 2040 * and see if that sticks: when the IP is configured for 32 bits 2041 * only, those registers are RES0. 2042 * Those MSB registers were introduced in IP v7.1, which we check first. 2043 */ 2044 if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) { 2045 void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4; 2046 2047 iowrite32(0x0, desc); 2048 if (ioread32(desc) == 0) { /* sanity check */ 2049 iowrite32(0xffffffff, desc); 2050 if (ioread32(desc) > 0) { 2051 lp->features |= XAE_FEATURE_DMA_64BIT; 2052 addr_width = 64; 2053 dev_info(&pdev->dev, 2054 "autodetected 64-bit DMA range\n"); 2055 } 2056 iowrite32(0x0, desc); 2057 } 2058 } 2059 2060 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width)); 2061 if (ret) { 2062 dev_err(&pdev->dev, "No suitable DMA available\n"); 2063 goto cleanup_clk; 2064 } 2065 2066 /* Check for Ethernet core IRQ (optional) */ 2067 if (lp->eth_irq <= 0) 2068 dev_info(&pdev->dev, "Ethernet core IRQ not defined\n"); 2069 2070 /* Retrieve the MAC address */ 2071 ret = of_get_mac_address(pdev->dev.of_node, mac_addr); 2072 if (!ret) { 2073 axienet_set_mac_address(ndev, mac_addr); 2074 } else { 2075 dev_warn(&pdev->dev, "could not find MAC address property: %d\n", 2076 ret); 2077 axienet_set_mac_address(ndev, NULL); 2078 } 2079 2080 lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD; 2081 lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD; 2082 2083 lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); 2084 if (lp->phy_node) { 2085 ret = axienet_mdio_setup(lp); 2086 if (ret) 2087 dev_warn(&pdev->dev, 2088 "error registering MDIO bus: %d\n", ret); 2089 } 2090 if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII || 2091 lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) { 2092 if (!lp->phy_node) { 2093 dev_err(&pdev->dev, "phy-handle required for 1000BaseX/SGMII\n"); 2094 ret = -EINVAL; 2095 goto cleanup_mdio; 2096 } 2097 lp->pcs_phy = of_mdio_find_device(lp->phy_node); 2098 if (!lp->pcs_phy) { 2099 ret = -EPROBE_DEFER; 2100 goto cleanup_mdio; 2101 } 2102 lp->phylink_config.pcs_poll = true; 2103 } 2104 2105 lp->phylink_config.dev = &ndev->dev; 2106 lp->phylink_config.type = PHYLINK_NETDEV; 2107 2108 lp->phylink = phylink_create(&lp->phylink_config, pdev->dev.fwnode, 2109 lp->phy_mode, 2110 &axienet_phylink_ops); 2111 if (IS_ERR(lp->phylink)) { 2112 ret = PTR_ERR(lp->phylink); 2113 dev_err(&pdev->dev, "phylink_create error (%i)\n", ret); 2114 goto cleanup_mdio; 2115 } 2116 2117 ret = register_netdev(lp->ndev); 2118 if (ret) { 2119 dev_err(lp->dev, "register_netdev() error (%i)\n", ret); 2120 goto cleanup_phylink; 2121 } 2122 2123 return 0; 2124 2125 cleanup_phylink: 2126 phylink_destroy(lp->phylink); 2127 2128 cleanup_mdio: 2129 if (lp->pcs_phy) 2130 put_device(&lp->pcs_phy->dev); 2131 if (lp->mii_bus) 2132 axienet_mdio_teardown(lp); 2133 of_node_put(lp->phy_node); 2134 2135 cleanup_clk: 2136 clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks); 2137 clk_disable_unprepare(lp->axi_clk); 2138 2139 free_netdev: 2140 free_netdev(ndev); 2141 2142 return ret; 2143 } 2144 2145 static int axienet_remove(struct platform_device *pdev) 2146 { 2147 struct net_device *ndev = platform_get_drvdata(pdev); 2148 struct axienet_local *lp = netdev_priv(ndev); 2149 2150 unregister_netdev(ndev); 2151 2152 if (lp->phylink) 2153 phylink_destroy(lp->phylink); 2154 2155 if (lp->pcs_phy) 2156 put_device(&lp->pcs_phy->dev); 2157 2158 axienet_mdio_teardown(lp); 2159 2160 clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks); 2161 clk_disable_unprepare(lp->axi_clk); 2162 2163 of_node_put(lp->phy_node); 2164 lp->phy_node = NULL; 2165 2166 free_netdev(ndev); 2167 2168 return 0; 2169 } 2170 2171 static void axienet_shutdown(struct platform_device *pdev) 2172 { 2173 struct net_device *ndev = platform_get_drvdata(pdev); 2174 2175 rtnl_lock(); 2176 netif_device_detach(ndev); 2177 2178 if (netif_running(ndev)) 2179 dev_close(ndev); 2180 2181 rtnl_unlock(); 2182 } 2183 2184 static struct platform_driver axienet_driver = { 2185 .probe = axienet_probe, 2186 .remove = axienet_remove, 2187 .shutdown = axienet_shutdown, 2188 .driver = { 2189 .name = "xilinx_axienet", 2190 .of_match_table = axienet_of_match, 2191 }, 2192 }; 2193 2194 module_platform_driver(axienet_driver); 2195 2196 MODULE_DESCRIPTION("Xilinx Axi Ethernet driver"); 2197 MODULE_AUTHOR("Xilinx"); 2198 MODULE_LICENSE("GPL"); 2199