1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Xilinx Axi Ethernet device driver 4 * 5 * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi 6 * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net> 7 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd. 8 * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu> 9 * Copyright (c) 2010 - 2011 PetaLogix 10 * Copyright (c) 2019 - 2022 Calian Advanced Technologies 11 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved. 12 * 13 * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6 14 * and Spartan6. 15 * 16 * TODO: 17 * - Add Axi Fifo support. 18 * - Factor out Axi DMA code into separate driver. 19 * - Test and fix basic multicast filtering. 20 * - Add support for extended multicast filtering. 21 * - Test basic VLAN support. 22 * - Add support for extended VLAN support. 23 */ 24 25 #include <linux/clk.h> 26 #include <linux/delay.h> 27 #include <linux/etherdevice.h> 28 #include <linux/module.h> 29 #include <linux/netdevice.h> 30 #include <linux/of.h> 31 #include <linux/of_mdio.h> 32 #include <linux/of_net.h> 33 #include <linux/of_irq.h> 34 #include <linux/of_address.h> 35 #include <linux/platform_device.h> 36 #include <linux/skbuff.h> 37 #include <linux/math64.h> 38 #include <linux/phy.h> 39 #include <linux/mii.h> 40 #include <linux/ethtool.h> 41 42 #include "xilinx_axienet.h" 43 44 /* Descriptors defines for Tx and Rx DMA */ 45 #define TX_BD_NUM_DEFAULT 128 46 #define RX_BD_NUM_DEFAULT 1024 47 #define TX_BD_NUM_MIN (MAX_SKB_FRAGS + 1) 48 #define TX_BD_NUM_MAX 4096 49 #define RX_BD_NUM_MAX 4096 50 51 /* Must be shorter than length of ethtool_drvinfo.driver field to fit */ 52 #define DRIVER_NAME "xaxienet" 53 #define DRIVER_DESCRIPTION "Xilinx Axi Ethernet driver" 54 #define DRIVER_VERSION "1.00a" 55 56 #define AXIENET_REGS_N 40 57 58 /* Match table for of_platform binding */ 59 static const struct of_device_id axienet_of_match[] = { 60 { .compatible = "xlnx,axi-ethernet-1.00.a", }, 61 { .compatible = "xlnx,axi-ethernet-1.01.a", }, 62 { .compatible = "xlnx,axi-ethernet-2.01.a", }, 63 {}, 64 }; 65 66 MODULE_DEVICE_TABLE(of, axienet_of_match); 67 68 /* Option table for setting up Axi Ethernet hardware options */ 69 static struct axienet_option axienet_options[] = { 70 /* Turn on jumbo packet support for both Rx and Tx */ 71 { 72 .opt = XAE_OPTION_JUMBO, 73 .reg = XAE_TC_OFFSET, 74 .m_or = XAE_TC_JUM_MASK, 75 }, { 76 .opt = XAE_OPTION_JUMBO, 77 .reg = XAE_RCW1_OFFSET, 78 .m_or = XAE_RCW1_JUM_MASK, 79 }, { /* Turn on VLAN packet support for both Rx and Tx */ 80 .opt = XAE_OPTION_VLAN, 81 .reg = XAE_TC_OFFSET, 82 .m_or = XAE_TC_VLAN_MASK, 83 }, { 84 .opt = XAE_OPTION_VLAN, 85 .reg = XAE_RCW1_OFFSET, 86 .m_or = XAE_RCW1_VLAN_MASK, 87 }, { /* Turn on FCS stripping on receive packets */ 88 .opt = XAE_OPTION_FCS_STRIP, 89 .reg = XAE_RCW1_OFFSET, 90 .m_or = XAE_RCW1_FCS_MASK, 91 }, { /* Turn on FCS insertion on transmit packets */ 92 .opt = XAE_OPTION_FCS_INSERT, 93 .reg = XAE_TC_OFFSET, 94 .m_or = XAE_TC_FCS_MASK, 95 }, { /* Turn off length/type field checking on receive packets */ 96 .opt = XAE_OPTION_LENTYPE_ERR, 97 .reg = XAE_RCW1_OFFSET, 98 .m_or = XAE_RCW1_LT_DIS_MASK, 99 }, { /* Turn on Rx flow control */ 100 .opt = XAE_OPTION_FLOW_CONTROL, 101 .reg = XAE_FCC_OFFSET, 102 .m_or = XAE_FCC_FCRX_MASK, 103 }, { /* Turn on Tx flow control */ 104 .opt = XAE_OPTION_FLOW_CONTROL, 105 .reg = XAE_FCC_OFFSET, 106 .m_or = XAE_FCC_FCTX_MASK, 107 }, { /* Turn on promiscuous frame filtering */ 108 .opt = XAE_OPTION_PROMISC, 109 .reg = XAE_FMI_OFFSET, 110 .m_or = XAE_FMI_PM_MASK, 111 }, { /* Enable transmitter */ 112 .opt = XAE_OPTION_TXEN, 113 .reg = XAE_TC_OFFSET, 114 .m_or = XAE_TC_TX_MASK, 115 }, { /* Enable receiver */ 116 .opt = XAE_OPTION_RXEN, 117 .reg = XAE_RCW1_OFFSET, 118 .m_or = XAE_RCW1_RX_MASK, 119 }, 120 {} 121 }; 122 123 /** 124 * axienet_dma_in32 - Memory mapped Axi DMA register read 125 * @lp: Pointer to axienet local structure 126 * @reg: Address offset from the base address of the Axi DMA core 127 * 128 * Return: The contents of the Axi DMA register 129 * 130 * This function returns the contents of the corresponding Axi DMA register. 131 */ 132 static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg) 133 { 134 return ioread32(lp->dma_regs + reg); 135 } 136 137 static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr, 138 struct axidma_bd *desc) 139 { 140 desc->phys = lower_32_bits(addr); 141 if (lp->features & XAE_FEATURE_DMA_64BIT) 142 desc->phys_msb = upper_32_bits(addr); 143 } 144 145 static dma_addr_t desc_get_phys_addr(struct axienet_local *lp, 146 struct axidma_bd *desc) 147 { 148 dma_addr_t ret = desc->phys; 149 150 if (lp->features & XAE_FEATURE_DMA_64BIT) 151 ret |= ((dma_addr_t)desc->phys_msb << 16) << 16; 152 153 return ret; 154 } 155 156 /** 157 * axienet_dma_bd_release - Release buffer descriptor rings 158 * @ndev: Pointer to the net_device structure 159 * 160 * This function is used to release the descriptors allocated in 161 * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet 162 * driver stop api is called. 163 */ 164 static void axienet_dma_bd_release(struct net_device *ndev) 165 { 166 int i; 167 struct axienet_local *lp = netdev_priv(ndev); 168 169 /* If we end up here, tx_bd_v must have been DMA allocated. */ 170 dma_free_coherent(lp->dev, 171 sizeof(*lp->tx_bd_v) * lp->tx_bd_num, 172 lp->tx_bd_v, 173 lp->tx_bd_p); 174 175 if (!lp->rx_bd_v) 176 return; 177 178 for (i = 0; i < lp->rx_bd_num; i++) { 179 dma_addr_t phys; 180 181 /* A NULL skb means this descriptor has not been initialised 182 * at all. 183 */ 184 if (!lp->rx_bd_v[i].skb) 185 break; 186 187 dev_kfree_skb(lp->rx_bd_v[i].skb); 188 189 /* For each descriptor, we programmed cntrl with the (non-zero) 190 * descriptor size, after it had been successfully allocated. 191 * So a non-zero value in there means we need to unmap it. 192 */ 193 if (lp->rx_bd_v[i].cntrl) { 194 phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]); 195 dma_unmap_single(lp->dev, phys, 196 lp->max_frm_size, DMA_FROM_DEVICE); 197 } 198 } 199 200 dma_free_coherent(lp->dev, 201 sizeof(*lp->rx_bd_v) * lp->rx_bd_num, 202 lp->rx_bd_v, 203 lp->rx_bd_p); 204 } 205 206 /** 207 * axienet_usec_to_timer - Calculate IRQ delay timer value 208 * @lp: Pointer to the axienet_local structure 209 * @coalesce_usec: Microseconds to convert into timer value 210 */ 211 static u32 axienet_usec_to_timer(struct axienet_local *lp, u32 coalesce_usec) 212 { 213 u32 result; 214 u64 clk_rate = 125000000; /* arbitrary guess if no clock rate set */ 215 216 if (lp->axi_clk) 217 clk_rate = clk_get_rate(lp->axi_clk); 218 219 /* 1 Timeout Interval = 125 * (clock period of SG clock) */ 220 result = DIV64_U64_ROUND_CLOSEST((u64)coalesce_usec * clk_rate, 221 (u64)125000000); 222 if (result > 255) 223 result = 255; 224 225 return result; 226 } 227 228 /** 229 * axienet_dma_start - Set up DMA registers and start DMA operation 230 * @lp: Pointer to the axienet_local structure 231 */ 232 static void axienet_dma_start(struct axienet_local *lp) 233 { 234 /* Start updating the Rx channel control register */ 235 lp->rx_dma_cr = (lp->coalesce_count_rx << XAXIDMA_COALESCE_SHIFT) | 236 XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK; 237 /* Only set interrupt delay timer if not generating an interrupt on 238 * the first RX packet. Otherwise leave at 0 to disable delay interrupt. 239 */ 240 if (lp->coalesce_count_rx > 1) 241 lp->rx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_rx) 242 << XAXIDMA_DELAY_SHIFT) | 243 XAXIDMA_IRQ_DELAY_MASK; 244 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr); 245 246 /* Start updating the Tx channel control register */ 247 lp->tx_dma_cr = (lp->coalesce_count_tx << XAXIDMA_COALESCE_SHIFT) | 248 XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK; 249 /* Only set interrupt delay timer if not generating an interrupt on 250 * the first TX packet. Otherwise leave at 0 to disable delay interrupt. 251 */ 252 if (lp->coalesce_count_tx > 1) 253 lp->tx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_tx) 254 << XAXIDMA_DELAY_SHIFT) | 255 XAXIDMA_IRQ_DELAY_MASK; 256 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr); 257 258 /* Populate the tail pointer and bring the Rx Axi DMA engine out of 259 * halted state. This will make the Rx side ready for reception. 260 */ 261 axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); 262 lp->rx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK; 263 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr); 264 axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + 265 (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1))); 266 267 /* Write to the RS (Run-stop) bit in the Tx channel control register. 268 * Tx channel is now ready to run. But only after we write to the 269 * tail pointer register that the Tx channel will start transmitting. 270 */ 271 axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); 272 lp->tx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK; 273 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr); 274 } 275 276 /** 277 * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA 278 * @ndev: Pointer to the net_device structure 279 * 280 * Return: 0, on success -ENOMEM, on failure 281 * 282 * This function is called to initialize the Rx and Tx DMA descriptor 283 * rings. This initializes the descriptors with required default values 284 * and is called when Axi Ethernet driver reset is called. 285 */ 286 static int axienet_dma_bd_init(struct net_device *ndev) 287 { 288 int i; 289 struct sk_buff *skb; 290 struct axienet_local *lp = netdev_priv(ndev); 291 292 /* Reset the indexes which are used for accessing the BDs */ 293 lp->tx_bd_ci = 0; 294 lp->tx_bd_tail = 0; 295 lp->rx_bd_ci = 0; 296 297 /* Allocate the Tx and Rx buffer descriptors. */ 298 lp->tx_bd_v = dma_alloc_coherent(lp->dev, 299 sizeof(*lp->tx_bd_v) * lp->tx_bd_num, 300 &lp->tx_bd_p, GFP_KERNEL); 301 if (!lp->tx_bd_v) 302 return -ENOMEM; 303 304 lp->rx_bd_v = dma_alloc_coherent(lp->dev, 305 sizeof(*lp->rx_bd_v) * lp->rx_bd_num, 306 &lp->rx_bd_p, GFP_KERNEL); 307 if (!lp->rx_bd_v) 308 goto out; 309 310 for (i = 0; i < lp->tx_bd_num; i++) { 311 dma_addr_t addr = lp->tx_bd_p + 312 sizeof(*lp->tx_bd_v) * 313 ((i + 1) % lp->tx_bd_num); 314 315 lp->tx_bd_v[i].next = lower_32_bits(addr); 316 if (lp->features & XAE_FEATURE_DMA_64BIT) 317 lp->tx_bd_v[i].next_msb = upper_32_bits(addr); 318 } 319 320 for (i = 0; i < lp->rx_bd_num; i++) { 321 dma_addr_t addr; 322 323 addr = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * 324 ((i + 1) % lp->rx_bd_num); 325 lp->rx_bd_v[i].next = lower_32_bits(addr); 326 if (lp->features & XAE_FEATURE_DMA_64BIT) 327 lp->rx_bd_v[i].next_msb = upper_32_bits(addr); 328 329 skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size); 330 if (!skb) 331 goto out; 332 333 lp->rx_bd_v[i].skb = skb; 334 addr = dma_map_single(lp->dev, skb->data, 335 lp->max_frm_size, DMA_FROM_DEVICE); 336 if (dma_mapping_error(lp->dev, addr)) { 337 netdev_err(ndev, "DMA mapping error\n"); 338 goto out; 339 } 340 desc_set_phys_addr(lp, addr, &lp->rx_bd_v[i]); 341 342 lp->rx_bd_v[i].cntrl = lp->max_frm_size; 343 } 344 345 axienet_dma_start(lp); 346 347 return 0; 348 out: 349 axienet_dma_bd_release(ndev); 350 return -ENOMEM; 351 } 352 353 /** 354 * axienet_set_mac_address - Write the MAC address 355 * @ndev: Pointer to the net_device structure 356 * @address: 6 byte Address to be written as MAC address 357 * 358 * This function is called to initialize the MAC address of the Axi Ethernet 359 * core. It writes to the UAW0 and UAW1 registers of the core. 360 */ 361 static void axienet_set_mac_address(struct net_device *ndev, 362 const void *address) 363 { 364 struct axienet_local *lp = netdev_priv(ndev); 365 366 if (address) 367 eth_hw_addr_set(ndev, address); 368 if (!is_valid_ether_addr(ndev->dev_addr)) 369 eth_hw_addr_random(ndev); 370 371 /* Set up unicast MAC address filter set its mac address */ 372 axienet_iow(lp, XAE_UAW0_OFFSET, 373 (ndev->dev_addr[0]) | 374 (ndev->dev_addr[1] << 8) | 375 (ndev->dev_addr[2] << 16) | 376 (ndev->dev_addr[3] << 24)); 377 axienet_iow(lp, XAE_UAW1_OFFSET, 378 (((axienet_ior(lp, XAE_UAW1_OFFSET)) & 379 ~XAE_UAW1_UNICASTADDR_MASK) | 380 (ndev->dev_addr[4] | 381 (ndev->dev_addr[5] << 8)))); 382 } 383 384 /** 385 * netdev_set_mac_address - Write the MAC address (from outside the driver) 386 * @ndev: Pointer to the net_device structure 387 * @p: 6 byte Address to be written as MAC address 388 * 389 * Return: 0 for all conditions. Presently, there is no failure case. 390 * 391 * This function is called to initialize the MAC address of the Axi Ethernet 392 * core. It calls the core specific axienet_set_mac_address. This is the 393 * function that goes into net_device_ops structure entry ndo_set_mac_address. 394 */ 395 static int netdev_set_mac_address(struct net_device *ndev, void *p) 396 { 397 struct sockaddr *addr = p; 398 axienet_set_mac_address(ndev, addr->sa_data); 399 return 0; 400 } 401 402 /** 403 * axienet_set_multicast_list - Prepare the multicast table 404 * @ndev: Pointer to the net_device structure 405 * 406 * This function is called to initialize the multicast table during 407 * initialization. The Axi Ethernet basic multicast support has a four-entry 408 * multicast table which is initialized here. Additionally this function 409 * goes into the net_device_ops structure entry ndo_set_multicast_list. This 410 * means whenever the multicast table entries need to be updated this 411 * function gets called. 412 */ 413 static void axienet_set_multicast_list(struct net_device *ndev) 414 { 415 int i = 0; 416 u32 reg, af0reg, af1reg; 417 struct axienet_local *lp = netdev_priv(ndev); 418 419 if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) || 420 netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) { 421 /* We must make the kernel realize we had to move into 422 * promiscuous mode. If it was a promiscuous mode request 423 * the flag is already set. If not we set it. 424 */ 425 ndev->flags |= IFF_PROMISC; 426 reg = axienet_ior(lp, XAE_FMI_OFFSET); 427 reg |= XAE_FMI_PM_MASK; 428 axienet_iow(lp, XAE_FMI_OFFSET, reg); 429 dev_info(&ndev->dev, "Promiscuous mode enabled.\n"); 430 } else if (!netdev_mc_empty(ndev)) { 431 struct netdev_hw_addr *ha; 432 433 reg = axienet_ior(lp, XAE_FMI_OFFSET); 434 reg &= ~XAE_FMI_PM_MASK; 435 axienet_iow(lp, XAE_FMI_OFFSET, reg); 436 437 netdev_for_each_mc_addr(ha, ndev) { 438 if (i >= XAE_MULTICAST_CAM_TABLE_NUM) 439 break; 440 441 af0reg = (ha->addr[0]); 442 af0reg |= (ha->addr[1] << 8); 443 af0reg |= (ha->addr[2] << 16); 444 af0reg |= (ha->addr[3] << 24); 445 446 af1reg = (ha->addr[4]); 447 af1reg |= (ha->addr[5] << 8); 448 449 reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00; 450 reg |= i; 451 452 axienet_iow(lp, XAE_FMI_OFFSET, reg); 453 axienet_iow(lp, XAE_AF0_OFFSET, af0reg); 454 axienet_iow(lp, XAE_AF1_OFFSET, af1reg); 455 axienet_iow(lp, XAE_FFE_OFFSET, 1); 456 i++; 457 } 458 } else { 459 reg = axienet_ior(lp, XAE_FMI_OFFSET); 460 reg &= ~XAE_FMI_PM_MASK; 461 462 axienet_iow(lp, XAE_FMI_OFFSET, reg); 463 dev_info(&ndev->dev, "Promiscuous mode disabled.\n"); 464 } 465 466 for (; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) { 467 reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00; 468 reg |= i; 469 axienet_iow(lp, XAE_FMI_OFFSET, reg); 470 axienet_iow(lp, XAE_FFE_OFFSET, 0); 471 } 472 } 473 474 /** 475 * axienet_setoptions - Set an Axi Ethernet option 476 * @ndev: Pointer to the net_device structure 477 * @options: Option to be enabled/disabled 478 * 479 * The Axi Ethernet core has multiple features which can be selectively turned 480 * on or off. The typical options could be jumbo frame option, basic VLAN 481 * option, promiscuous mode option etc. This function is used to set or clear 482 * these options in the Axi Ethernet hardware. This is done through 483 * axienet_option structure . 484 */ 485 static void axienet_setoptions(struct net_device *ndev, u32 options) 486 { 487 int reg; 488 struct axienet_local *lp = netdev_priv(ndev); 489 struct axienet_option *tp = &axienet_options[0]; 490 491 while (tp->opt) { 492 reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or)); 493 if (options & tp->opt) 494 reg |= tp->m_or; 495 axienet_iow(lp, tp->reg, reg); 496 tp++; 497 } 498 499 lp->options |= options; 500 } 501 502 static int __axienet_device_reset(struct axienet_local *lp) 503 { 504 u32 value; 505 int ret; 506 507 /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset 508 * process of Axi DMA takes a while to complete as all pending 509 * commands/transfers will be flushed or completed during this 510 * reset process. 511 * Note that even though both TX and RX have their own reset register, 512 * they both reset the entire DMA core, so only one needs to be used. 513 */ 514 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK); 515 ret = read_poll_timeout(axienet_dma_in32, value, 516 !(value & XAXIDMA_CR_RESET_MASK), 517 DELAY_OF_ONE_MILLISEC, 50000, false, lp, 518 XAXIDMA_TX_CR_OFFSET); 519 if (ret) { 520 dev_err(lp->dev, "%s: DMA reset timeout!\n", __func__); 521 return ret; 522 } 523 524 /* Wait for PhyRstCmplt bit to be set, indicating the PHY reset has finished */ 525 ret = read_poll_timeout(axienet_ior, value, 526 value & XAE_INT_PHYRSTCMPLT_MASK, 527 DELAY_OF_ONE_MILLISEC, 50000, false, lp, 528 XAE_IS_OFFSET); 529 if (ret) { 530 dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n", __func__); 531 return ret; 532 } 533 534 return 0; 535 } 536 537 /** 538 * axienet_dma_stop - Stop DMA operation 539 * @lp: Pointer to the axienet_local structure 540 */ 541 static void axienet_dma_stop(struct axienet_local *lp) 542 { 543 int count; 544 u32 cr, sr; 545 546 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 547 cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK); 548 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 549 synchronize_irq(lp->rx_irq); 550 551 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 552 cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK); 553 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 554 synchronize_irq(lp->tx_irq); 555 556 /* Give DMAs a chance to halt gracefully */ 557 sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 558 for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) { 559 msleep(20); 560 sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 561 } 562 563 sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 564 for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) { 565 msleep(20); 566 sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 567 } 568 569 /* Do a reset to ensure DMA is really stopped */ 570 axienet_lock_mii(lp); 571 __axienet_device_reset(lp); 572 axienet_unlock_mii(lp); 573 } 574 575 /** 576 * axienet_device_reset - Reset and initialize the Axi Ethernet hardware. 577 * @ndev: Pointer to the net_device structure 578 * 579 * This function is called to reset and initialize the Axi Ethernet core. This 580 * is typically called during initialization. It does a reset of the Axi DMA 581 * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines 582 * are connected to Axi Ethernet reset lines, this in turn resets the Axi 583 * Ethernet core. No separate hardware reset is done for the Axi Ethernet 584 * core. 585 * Returns 0 on success or a negative error number otherwise. 586 */ 587 static int axienet_device_reset(struct net_device *ndev) 588 { 589 u32 axienet_status; 590 struct axienet_local *lp = netdev_priv(ndev); 591 int ret; 592 593 ret = __axienet_device_reset(lp); 594 if (ret) 595 return ret; 596 597 lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE; 598 lp->options |= XAE_OPTION_VLAN; 599 lp->options &= (~XAE_OPTION_JUMBO); 600 601 if ((ndev->mtu > XAE_MTU) && 602 (ndev->mtu <= XAE_JUMBO_MTU)) { 603 lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN + 604 XAE_TRL_SIZE; 605 606 if (lp->max_frm_size <= lp->rxmem) 607 lp->options |= XAE_OPTION_JUMBO; 608 } 609 610 ret = axienet_dma_bd_init(ndev); 611 if (ret) { 612 netdev_err(ndev, "%s: descriptor allocation failed\n", 613 __func__); 614 return ret; 615 } 616 617 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); 618 axienet_status &= ~XAE_RCW1_RX_MASK; 619 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status); 620 621 axienet_status = axienet_ior(lp, XAE_IP_OFFSET); 622 if (axienet_status & XAE_INT_RXRJECT_MASK) 623 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK); 624 axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ? 625 XAE_INT_RECV_ERROR_MASK : 0); 626 627 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK); 628 629 /* Sync default options with HW but leave receiver and 630 * transmitter disabled. 631 */ 632 axienet_setoptions(ndev, lp->options & 633 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 634 axienet_set_mac_address(ndev, NULL); 635 axienet_set_multicast_list(ndev); 636 axienet_setoptions(ndev, lp->options); 637 638 netif_trans_update(ndev); 639 640 return 0; 641 } 642 643 /** 644 * axienet_free_tx_chain - Clean up a series of linked TX descriptors. 645 * @lp: Pointer to the axienet_local structure 646 * @first_bd: Index of first descriptor to clean up 647 * @nr_bds: Max number of descriptors to clean up 648 * @force: Whether to clean descriptors even if not complete 649 * @sizep: Pointer to a u32 filled with the total sum of all bytes 650 * in all cleaned-up descriptors. Ignored if NULL. 651 * @budget: NAPI budget (use 0 when not called from NAPI poll) 652 * 653 * Would either be called after a successful transmit operation, or after 654 * there was an error when setting up the chain. 655 * Returns the number of packets handled. 656 */ 657 static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd, 658 int nr_bds, bool force, u32 *sizep, int budget) 659 { 660 struct axidma_bd *cur_p; 661 unsigned int status; 662 int i, packets = 0; 663 dma_addr_t phys; 664 665 for (i = 0; i < nr_bds; i++) { 666 cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num]; 667 status = cur_p->status; 668 669 /* If force is not specified, clean up only descriptors 670 * that have been completed by the MAC. 671 */ 672 if (!force && !(status & XAXIDMA_BD_STS_COMPLETE_MASK)) 673 break; 674 675 /* Ensure we see complete descriptor update */ 676 dma_rmb(); 677 phys = desc_get_phys_addr(lp, cur_p); 678 dma_unmap_single(lp->dev, phys, 679 (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK), 680 DMA_TO_DEVICE); 681 682 if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK)) { 683 napi_consume_skb(cur_p->skb, budget); 684 packets++; 685 } 686 687 cur_p->app0 = 0; 688 cur_p->app1 = 0; 689 cur_p->app2 = 0; 690 cur_p->app4 = 0; 691 cur_p->skb = NULL; 692 /* ensure our transmit path and device don't prematurely see status cleared */ 693 wmb(); 694 cur_p->cntrl = 0; 695 cur_p->status = 0; 696 697 if (sizep) 698 *sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK; 699 } 700 701 if (!force) { 702 lp->tx_bd_ci += i; 703 if (lp->tx_bd_ci >= lp->tx_bd_num) 704 lp->tx_bd_ci %= lp->tx_bd_num; 705 } 706 707 return packets; 708 } 709 710 /** 711 * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy 712 * @lp: Pointer to the axienet_local structure 713 * @num_frag: The number of BDs to check for 714 * 715 * Return: 0, on success 716 * NETDEV_TX_BUSY, if any of the descriptors are not free 717 * 718 * This function is invoked before BDs are allocated and transmission starts. 719 * This function returns 0 if a BD or group of BDs can be allocated for 720 * transmission. If the BD or any of the BDs are not free the function 721 * returns a busy status. 722 */ 723 static inline int axienet_check_tx_bd_space(struct axienet_local *lp, 724 int num_frag) 725 { 726 struct axidma_bd *cur_p; 727 728 /* Ensure we see all descriptor updates from device or TX polling */ 729 rmb(); 730 cur_p = &lp->tx_bd_v[(READ_ONCE(lp->tx_bd_tail) + num_frag) % 731 lp->tx_bd_num]; 732 if (cur_p->cntrl) 733 return NETDEV_TX_BUSY; 734 return 0; 735 } 736 737 /** 738 * axienet_tx_poll - Invoked once a transmit is completed by the 739 * Axi DMA Tx channel. 740 * @napi: Pointer to NAPI structure. 741 * @budget: Max number of TX packets to process. 742 * 743 * Return: Number of TX packets processed. 744 * 745 * This function is invoked from the NAPI processing to notify the completion 746 * of transmit operation. It clears fields in the corresponding Tx BDs and 747 * unmaps the corresponding buffer so that CPU can regain ownership of the 748 * buffer. It finally invokes "netif_wake_queue" to restart transmission if 749 * required. 750 */ 751 static int axienet_tx_poll(struct napi_struct *napi, int budget) 752 { 753 struct axienet_local *lp = container_of(napi, struct axienet_local, napi_tx); 754 struct net_device *ndev = lp->ndev; 755 u32 size = 0; 756 int packets; 757 758 packets = axienet_free_tx_chain(lp, lp->tx_bd_ci, lp->tx_bd_num, false, 759 &size, budget); 760 761 if (packets) { 762 u64_stats_update_begin(&lp->tx_stat_sync); 763 u64_stats_add(&lp->tx_packets, packets); 764 u64_stats_add(&lp->tx_bytes, size); 765 u64_stats_update_end(&lp->tx_stat_sync); 766 767 /* Matches barrier in axienet_start_xmit */ 768 smp_mb(); 769 770 if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) 771 netif_wake_queue(ndev); 772 } 773 774 if (packets < budget && napi_complete_done(napi, packets)) { 775 /* Re-enable TX completion interrupts. This should 776 * cause an immediate interrupt if any TX packets are 777 * already pending. 778 */ 779 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr); 780 } 781 return packets; 782 } 783 784 /** 785 * axienet_start_xmit - Starts the transmission. 786 * @skb: sk_buff pointer that contains data to be Txed. 787 * @ndev: Pointer to net_device structure. 788 * 789 * Return: NETDEV_TX_OK, on success 790 * NETDEV_TX_BUSY, if any of the descriptors are not free 791 * 792 * This function is invoked from upper layers to initiate transmission. The 793 * function uses the next available free BDs and populates their fields to 794 * start the transmission. Additionally if checksum offloading is supported, 795 * it populates AXI Stream Control fields with appropriate values. 796 */ 797 static netdev_tx_t 798 axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) 799 { 800 u32 ii; 801 u32 num_frag; 802 u32 csum_start_off; 803 u32 csum_index_off; 804 skb_frag_t *frag; 805 dma_addr_t tail_p, phys; 806 u32 orig_tail_ptr, new_tail_ptr; 807 struct axienet_local *lp = netdev_priv(ndev); 808 struct axidma_bd *cur_p; 809 810 orig_tail_ptr = lp->tx_bd_tail; 811 new_tail_ptr = orig_tail_ptr; 812 813 num_frag = skb_shinfo(skb)->nr_frags; 814 cur_p = &lp->tx_bd_v[orig_tail_ptr]; 815 816 if (axienet_check_tx_bd_space(lp, num_frag + 1)) { 817 /* Should not happen as last start_xmit call should have 818 * checked for sufficient space and queue should only be 819 * woken when sufficient space is available. 820 */ 821 netif_stop_queue(ndev); 822 if (net_ratelimit()) 823 netdev_warn(ndev, "TX ring unexpectedly full\n"); 824 return NETDEV_TX_BUSY; 825 } 826 827 if (skb->ip_summed == CHECKSUM_PARTIAL) { 828 if (lp->features & XAE_FEATURE_FULL_TX_CSUM) { 829 /* Tx Full Checksum Offload Enabled */ 830 cur_p->app0 |= 2; 831 } else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) { 832 csum_start_off = skb_transport_offset(skb); 833 csum_index_off = csum_start_off + skb->csum_offset; 834 /* Tx Partial Checksum Offload Enabled */ 835 cur_p->app0 |= 1; 836 cur_p->app1 = (csum_start_off << 16) | csum_index_off; 837 } 838 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { 839 cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */ 840 } 841 842 phys = dma_map_single(lp->dev, skb->data, 843 skb_headlen(skb), DMA_TO_DEVICE); 844 if (unlikely(dma_mapping_error(lp->dev, phys))) { 845 if (net_ratelimit()) 846 netdev_err(ndev, "TX DMA mapping error\n"); 847 ndev->stats.tx_dropped++; 848 dev_kfree_skb_any(skb); 849 return NETDEV_TX_OK; 850 } 851 desc_set_phys_addr(lp, phys, cur_p); 852 cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK; 853 854 for (ii = 0; ii < num_frag; ii++) { 855 if (++new_tail_ptr >= lp->tx_bd_num) 856 new_tail_ptr = 0; 857 cur_p = &lp->tx_bd_v[new_tail_ptr]; 858 frag = &skb_shinfo(skb)->frags[ii]; 859 phys = dma_map_single(lp->dev, 860 skb_frag_address(frag), 861 skb_frag_size(frag), 862 DMA_TO_DEVICE); 863 if (unlikely(dma_mapping_error(lp->dev, phys))) { 864 if (net_ratelimit()) 865 netdev_err(ndev, "TX DMA mapping error\n"); 866 ndev->stats.tx_dropped++; 867 axienet_free_tx_chain(lp, orig_tail_ptr, ii + 1, 868 true, NULL, 0); 869 dev_kfree_skb_any(skb); 870 return NETDEV_TX_OK; 871 } 872 desc_set_phys_addr(lp, phys, cur_p); 873 cur_p->cntrl = skb_frag_size(frag); 874 } 875 876 cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK; 877 cur_p->skb = skb; 878 879 tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * new_tail_ptr; 880 if (++new_tail_ptr >= lp->tx_bd_num) 881 new_tail_ptr = 0; 882 WRITE_ONCE(lp->tx_bd_tail, new_tail_ptr); 883 884 /* Start the transfer */ 885 axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p); 886 887 /* Stop queue if next transmit may not have space */ 888 if (axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) { 889 netif_stop_queue(ndev); 890 891 /* Matches barrier in axienet_tx_poll */ 892 smp_mb(); 893 894 /* Space might have just been freed - check again */ 895 if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) 896 netif_wake_queue(ndev); 897 } 898 899 return NETDEV_TX_OK; 900 } 901 902 /** 903 * axienet_rx_poll - Triggered by RX ISR to complete the BD processing. 904 * @napi: Pointer to NAPI structure. 905 * @budget: Max number of RX packets to process. 906 * 907 * Return: Number of RX packets processed. 908 */ 909 static int axienet_rx_poll(struct napi_struct *napi, int budget) 910 { 911 u32 length; 912 u32 csumstatus; 913 u32 size = 0; 914 int packets = 0; 915 dma_addr_t tail_p = 0; 916 struct axidma_bd *cur_p; 917 struct sk_buff *skb, *new_skb; 918 struct axienet_local *lp = container_of(napi, struct axienet_local, napi_rx); 919 920 cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; 921 922 while (packets < budget && (cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) { 923 dma_addr_t phys; 924 925 /* Ensure we see complete descriptor update */ 926 dma_rmb(); 927 928 skb = cur_p->skb; 929 cur_p->skb = NULL; 930 931 /* skb could be NULL if a previous pass already received the 932 * packet for this slot in the ring, but failed to refill it 933 * with a newly allocated buffer. In this case, don't try to 934 * receive it again. 935 */ 936 if (likely(skb)) { 937 length = cur_p->app4 & 0x0000FFFF; 938 939 phys = desc_get_phys_addr(lp, cur_p); 940 dma_unmap_single(lp->dev, phys, lp->max_frm_size, 941 DMA_FROM_DEVICE); 942 943 skb_put(skb, length); 944 skb->protocol = eth_type_trans(skb, lp->ndev); 945 /*skb_checksum_none_assert(skb);*/ 946 skb->ip_summed = CHECKSUM_NONE; 947 948 /* if we're doing Rx csum offload, set it up */ 949 if (lp->features & XAE_FEATURE_FULL_RX_CSUM) { 950 csumstatus = (cur_p->app2 & 951 XAE_FULL_CSUM_STATUS_MASK) >> 3; 952 if (csumstatus == XAE_IP_TCP_CSUM_VALIDATED || 953 csumstatus == XAE_IP_UDP_CSUM_VALIDATED) { 954 skb->ip_summed = CHECKSUM_UNNECESSARY; 955 } 956 } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 && 957 skb->protocol == htons(ETH_P_IP) && 958 skb->len > 64) { 959 skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF); 960 skb->ip_summed = CHECKSUM_COMPLETE; 961 } 962 963 napi_gro_receive(napi, skb); 964 965 size += length; 966 packets++; 967 } 968 969 new_skb = napi_alloc_skb(napi, lp->max_frm_size); 970 if (!new_skb) 971 break; 972 973 phys = dma_map_single(lp->dev, new_skb->data, 974 lp->max_frm_size, 975 DMA_FROM_DEVICE); 976 if (unlikely(dma_mapping_error(lp->dev, phys))) { 977 if (net_ratelimit()) 978 netdev_err(lp->ndev, "RX DMA mapping error\n"); 979 dev_kfree_skb(new_skb); 980 break; 981 } 982 desc_set_phys_addr(lp, phys, cur_p); 983 984 cur_p->cntrl = lp->max_frm_size; 985 cur_p->status = 0; 986 cur_p->skb = new_skb; 987 988 /* Only update tail_p to mark this slot as usable after it has 989 * been successfully refilled. 990 */ 991 tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci; 992 993 if (++lp->rx_bd_ci >= lp->rx_bd_num) 994 lp->rx_bd_ci = 0; 995 cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; 996 } 997 998 u64_stats_update_begin(&lp->rx_stat_sync); 999 u64_stats_add(&lp->rx_packets, packets); 1000 u64_stats_add(&lp->rx_bytes, size); 1001 u64_stats_update_end(&lp->rx_stat_sync); 1002 1003 if (tail_p) 1004 axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p); 1005 1006 if (packets < budget && napi_complete_done(napi, packets)) { 1007 /* Re-enable RX completion interrupts. This should 1008 * cause an immediate interrupt if any RX packets are 1009 * already pending. 1010 */ 1011 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr); 1012 } 1013 return packets; 1014 } 1015 1016 /** 1017 * axienet_tx_irq - Tx Done Isr. 1018 * @irq: irq number 1019 * @_ndev: net_device pointer 1020 * 1021 * Return: IRQ_HANDLED if device generated a TX interrupt, IRQ_NONE otherwise. 1022 * 1023 * This is the Axi DMA Tx done Isr. It invokes NAPI polling to complete the 1024 * TX BD processing. 1025 */ 1026 static irqreturn_t axienet_tx_irq(int irq, void *_ndev) 1027 { 1028 unsigned int status; 1029 struct net_device *ndev = _ndev; 1030 struct axienet_local *lp = netdev_priv(ndev); 1031 1032 status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 1033 1034 if (!(status & XAXIDMA_IRQ_ALL_MASK)) 1035 return IRQ_NONE; 1036 1037 axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status); 1038 1039 if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) { 1040 netdev_err(ndev, "DMA Tx error 0x%x\n", status); 1041 netdev_err(ndev, "Current BD is at: 0x%x%08x\n", 1042 (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb, 1043 (lp->tx_bd_v[lp->tx_bd_ci]).phys); 1044 schedule_work(&lp->dma_err_task); 1045 } else { 1046 /* Disable further TX completion interrupts and schedule 1047 * NAPI to handle the completions. 1048 */ 1049 u32 cr = lp->tx_dma_cr; 1050 1051 cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK); 1052 if (napi_schedule_prep(&lp->napi_tx)) { 1053 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 1054 __napi_schedule(&lp->napi_tx); 1055 } 1056 } 1057 1058 return IRQ_HANDLED; 1059 } 1060 1061 /** 1062 * axienet_rx_irq - Rx Isr. 1063 * @irq: irq number 1064 * @_ndev: net_device pointer 1065 * 1066 * Return: IRQ_HANDLED if device generated a RX interrupt, IRQ_NONE otherwise. 1067 * 1068 * This is the Axi DMA Rx Isr. It invokes NAPI polling to complete the RX BD 1069 * processing. 1070 */ 1071 static irqreturn_t axienet_rx_irq(int irq, void *_ndev) 1072 { 1073 unsigned int status; 1074 struct net_device *ndev = _ndev; 1075 struct axienet_local *lp = netdev_priv(ndev); 1076 1077 status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 1078 1079 if (!(status & XAXIDMA_IRQ_ALL_MASK)) 1080 return IRQ_NONE; 1081 1082 axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status); 1083 1084 if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) { 1085 netdev_err(ndev, "DMA Rx error 0x%x\n", status); 1086 netdev_err(ndev, "Current BD is at: 0x%x%08x\n", 1087 (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb, 1088 (lp->rx_bd_v[lp->rx_bd_ci]).phys); 1089 schedule_work(&lp->dma_err_task); 1090 } else { 1091 /* Disable further RX completion interrupts and schedule 1092 * NAPI receive. 1093 */ 1094 u32 cr = lp->rx_dma_cr; 1095 1096 cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK); 1097 if (napi_schedule_prep(&lp->napi_rx)) { 1098 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 1099 __napi_schedule(&lp->napi_rx); 1100 } 1101 } 1102 1103 return IRQ_HANDLED; 1104 } 1105 1106 /** 1107 * axienet_eth_irq - Ethernet core Isr. 1108 * @irq: irq number 1109 * @_ndev: net_device pointer 1110 * 1111 * Return: IRQ_HANDLED if device generated a core interrupt, IRQ_NONE otherwise. 1112 * 1113 * Handle miscellaneous conditions indicated by Ethernet core IRQ. 1114 */ 1115 static irqreturn_t axienet_eth_irq(int irq, void *_ndev) 1116 { 1117 struct net_device *ndev = _ndev; 1118 struct axienet_local *lp = netdev_priv(ndev); 1119 unsigned int pending; 1120 1121 pending = axienet_ior(lp, XAE_IP_OFFSET); 1122 if (!pending) 1123 return IRQ_NONE; 1124 1125 if (pending & XAE_INT_RXFIFOOVR_MASK) 1126 ndev->stats.rx_missed_errors++; 1127 1128 if (pending & XAE_INT_RXRJECT_MASK) 1129 ndev->stats.rx_frame_errors++; 1130 1131 axienet_iow(lp, XAE_IS_OFFSET, pending); 1132 return IRQ_HANDLED; 1133 } 1134 1135 static void axienet_dma_err_handler(struct work_struct *work); 1136 1137 /** 1138 * axienet_open - Driver open routine. 1139 * @ndev: Pointer to net_device structure 1140 * 1141 * Return: 0, on success. 1142 * non-zero error value on failure 1143 * 1144 * This is the driver open routine. It calls phylink_start to start the 1145 * PHY device. 1146 * It also allocates interrupt service routines, enables the interrupt lines 1147 * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer 1148 * descriptors are initialized. 1149 */ 1150 static int axienet_open(struct net_device *ndev) 1151 { 1152 int ret; 1153 struct axienet_local *lp = netdev_priv(ndev); 1154 1155 dev_dbg(&ndev->dev, "axienet_open()\n"); 1156 1157 /* When we do an Axi Ethernet reset, it resets the complete core 1158 * including the MDIO. MDIO must be disabled before resetting. 1159 * Hold MDIO bus lock to avoid MDIO accesses during the reset. 1160 */ 1161 axienet_lock_mii(lp); 1162 ret = axienet_device_reset(ndev); 1163 axienet_unlock_mii(lp); 1164 1165 ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0); 1166 if (ret) { 1167 dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret); 1168 return ret; 1169 } 1170 1171 phylink_start(lp->phylink); 1172 1173 /* Enable worker thread for Axi DMA error handling */ 1174 lp->stopping = false; 1175 INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler); 1176 1177 napi_enable(&lp->napi_rx); 1178 napi_enable(&lp->napi_tx); 1179 1180 /* Enable interrupts for Axi DMA Tx */ 1181 ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED, 1182 ndev->name, ndev); 1183 if (ret) 1184 goto err_tx_irq; 1185 /* Enable interrupts for Axi DMA Rx */ 1186 ret = request_irq(lp->rx_irq, axienet_rx_irq, IRQF_SHARED, 1187 ndev->name, ndev); 1188 if (ret) 1189 goto err_rx_irq; 1190 /* Enable interrupts for Axi Ethernet core (if defined) */ 1191 if (lp->eth_irq > 0) { 1192 ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED, 1193 ndev->name, ndev); 1194 if (ret) 1195 goto err_eth_irq; 1196 } 1197 1198 return 0; 1199 1200 err_eth_irq: 1201 free_irq(lp->rx_irq, ndev); 1202 err_rx_irq: 1203 free_irq(lp->tx_irq, ndev); 1204 err_tx_irq: 1205 napi_disable(&lp->napi_tx); 1206 napi_disable(&lp->napi_rx); 1207 phylink_stop(lp->phylink); 1208 phylink_disconnect_phy(lp->phylink); 1209 cancel_work_sync(&lp->dma_err_task); 1210 dev_err(lp->dev, "request_irq() failed\n"); 1211 return ret; 1212 } 1213 1214 /** 1215 * axienet_stop - Driver stop routine. 1216 * @ndev: Pointer to net_device structure 1217 * 1218 * Return: 0, on success. 1219 * 1220 * This is the driver stop routine. It calls phylink_disconnect to stop the PHY 1221 * device. It also removes the interrupt handlers and disables the interrupts. 1222 * The Axi DMA Tx/Rx BDs are released. 1223 */ 1224 static int axienet_stop(struct net_device *ndev) 1225 { 1226 struct axienet_local *lp = netdev_priv(ndev); 1227 1228 dev_dbg(&ndev->dev, "axienet_close()\n"); 1229 1230 WRITE_ONCE(lp->stopping, true); 1231 flush_work(&lp->dma_err_task); 1232 1233 napi_disable(&lp->napi_tx); 1234 napi_disable(&lp->napi_rx); 1235 1236 phylink_stop(lp->phylink); 1237 phylink_disconnect_phy(lp->phylink); 1238 1239 axienet_setoptions(ndev, lp->options & 1240 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 1241 1242 axienet_dma_stop(lp); 1243 1244 axienet_iow(lp, XAE_IE_OFFSET, 0); 1245 1246 cancel_work_sync(&lp->dma_err_task); 1247 1248 if (lp->eth_irq > 0) 1249 free_irq(lp->eth_irq, ndev); 1250 free_irq(lp->tx_irq, ndev); 1251 free_irq(lp->rx_irq, ndev); 1252 1253 axienet_dma_bd_release(ndev); 1254 return 0; 1255 } 1256 1257 /** 1258 * axienet_change_mtu - Driver change mtu routine. 1259 * @ndev: Pointer to net_device structure 1260 * @new_mtu: New mtu value to be applied 1261 * 1262 * Return: Always returns 0 (success). 1263 * 1264 * This is the change mtu driver routine. It checks if the Axi Ethernet 1265 * hardware supports jumbo frames before changing the mtu. This can be 1266 * called only when the device is not up. 1267 */ 1268 static int axienet_change_mtu(struct net_device *ndev, int new_mtu) 1269 { 1270 struct axienet_local *lp = netdev_priv(ndev); 1271 1272 if (netif_running(ndev)) 1273 return -EBUSY; 1274 1275 if ((new_mtu + VLAN_ETH_HLEN + 1276 XAE_TRL_SIZE) > lp->rxmem) 1277 return -EINVAL; 1278 1279 ndev->mtu = new_mtu; 1280 1281 return 0; 1282 } 1283 1284 #ifdef CONFIG_NET_POLL_CONTROLLER 1285 /** 1286 * axienet_poll_controller - Axi Ethernet poll mechanism. 1287 * @ndev: Pointer to net_device structure 1288 * 1289 * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior 1290 * to polling the ISRs and are enabled back after the polling is done. 1291 */ 1292 static void axienet_poll_controller(struct net_device *ndev) 1293 { 1294 struct axienet_local *lp = netdev_priv(ndev); 1295 disable_irq(lp->tx_irq); 1296 disable_irq(lp->rx_irq); 1297 axienet_rx_irq(lp->tx_irq, ndev); 1298 axienet_tx_irq(lp->rx_irq, ndev); 1299 enable_irq(lp->tx_irq); 1300 enable_irq(lp->rx_irq); 1301 } 1302 #endif 1303 1304 static int axienet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1305 { 1306 struct axienet_local *lp = netdev_priv(dev); 1307 1308 if (!netif_running(dev)) 1309 return -EINVAL; 1310 1311 return phylink_mii_ioctl(lp->phylink, rq, cmd); 1312 } 1313 1314 static void 1315 axienet_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 1316 { 1317 struct axienet_local *lp = netdev_priv(dev); 1318 unsigned int start; 1319 1320 netdev_stats_to_stats64(stats, &dev->stats); 1321 1322 do { 1323 start = u64_stats_fetch_begin(&lp->rx_stat_sync); 1324 stats->rx_packets = u64_stats_read(&lp->rx_packets); 1325 stats->rx_bytes = u64_stats_read(&lp->rx_bytes); 1326 } while (u64_stats_fetch_retry(&lp->rx_stat_sync, start)); 1327 1328 do { 1329 start = u64_stats_fetch_begin(&lp->tx_stat_sync); 1330 stats->tx_packets = u64_stats_read(&lp->tx_packets); 1331 stats->tx_bytes = u64_stats_read(&lp->tx_bytes); 1332 } while (u64_stats_fetch_retry(&lp->tx_stat_sync, start)); 1333 } 1334 1335 static const struct net_device_ops axienet_netdev_ops = { 1336 .ndo_open = axienet_open, 1337 .ndo_stop = axienet_stop, 1338 .ndo_start_xmit = axienet_start_xmit, 1339 .ndo_get_stats64 = axienet_get_stats64, 1340 .ndo_change_mtu = axienet_change_mtu, 1341 .ndo_set_mac_address = netdev_set_mac_address, 1342 .ndo_validate_addr = eth_validate_addr, 1343 .ndo_eth_ioctl = axienet_ioctl, 1344 .ndo_set_rx_mode = axienet_set_multicast_list, 1345 #ifdef CONFIG_NET_POLL_CONTROLLER 1346 .ndo_poll_controller = axienet_poll_controller, 1347 #endif 1348 }; 1349 1350 /** 1351 * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information. 1352 * @ndev: Pointer to net_device structure 1353 * @ed: Pointer to ethtool_drvinfo structure 1354 * 1355 * This implements ethtool command for getting the driver information. 1356 * Issue "ethtool -i ethX" under linux prompt to execute this function. 1357 */ 1358 static void axienet_ethtools_get_drvinfo(struct net_device *ndev, 1359 struct ethtool_drvinfo *ed) 1360 { 1361 strscpy(ed->driver, DRIVER_NAME, sizeof(ed->driver)); 1362 strscpy(ed->version, DRIVER_VERSION, sizeof(ed->version)); 1363 } 1364 1365 /** 1366 * axienet_ethtools_get_regs_len - Get the total regs length present in the 1367 * AxiEthernet core. 1368 * @ndev: Pointer to net_device structure 1369 * 1370 * This implements ethtool command for getting the total register length 1371 * information. 1372 * 1373 * Return: the total regs length 1374 */ 1375 static int axienet_ethtools_get_regs_len(struct net_device *ndev) 1376 { 1377 return sizeof(u32) * AXIENET_REGS_N; 1378 } 1379 1380 /** 1381 * axienet_ethtools_get_regs - Dump the contents of all registers present 1382 * in AxiEthernet core. 1383 * @ndev: Pointer to net_device structure 1384 * @regs: Pointer to ethtool_regs structure 1385 * @ret: Void pointer used to return the contents of the registers. 1386 * 1387 * This implements ethtool command for getting the Axi Ethernet register dump. 1388 * Issue "ethtool -d ethX" to execute this function. 1389 */ 1390 static void axienet_ethtools_get_regs(struct net_device *ndev, 1391 struct ethtool_regs *regs, void *ret) 1392 { 1393 u32 *data = (u32 *)ret; 1394 size_t len = sizeof(u32) * AXIENET_REGS_N; 1395 struct axienet_local *lp = netdev_priv(ndev); 1396 1397 regs->version = 0; 1398 regs->len = len; 1399 1400 memset(data, 0, len); 1401 data[0] = axienet_ior(lp, XAE_RAF_OFFSET); 1402 data[1] = axienet_ior(lp, XAE_TPF_OFFSET); 1403 data[2] = axienet_ior(lp, XAE_IFGP_OFFSET); 1404 data[3] = axienet_ior(lp, XAE_IS_OFFSET); 1405 data[4] = axienet_ior(lp, XAE_IP_OFFSET); 1406 data[5] = axienet_ior(lp, XAE_IE_OFFSET); 1407 data[6] = axienet_ior(lp, XAE_TTAG_OFFSET); 1408 data[7] = axienet_ior(lp, XAE_RTAG_OFFSET); 1409 data[8] = axienet_ior(lp, XAE_UAWL_OFFSET); 1410 data[9] = axienet_ior(lp, XAE_UAWU_OFFSET); 1411 data[10] = axienet_ior(lp, XAE_TPID0_OFFSET); 1412 data[11] = axienet_ior(lp, XAE_TPID1_OFFSET); 1413 data[12] = axienet_ior(lp, XAE_PPST_OFFSET); 1414 data[13] = axienet_ior(lp, XAE_RCW0_OFFSET); 1415 data[14] = axienet_ior(lp, XAE_RCW1_OFFSET); 1416 data[15] = axienet_ior(lp, XAE_TC_OFFSET); 1417 data[16] = axienet_ior(lp, XAE_FCC_OFFSET); 1418 data[17] = axienet_ior(lp, XAE_EMMC_OFFSET); 1419 data[18] = axienet_ior(lp, XAE_PHYC_OFFSET); 1420 data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET); 1421 data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET); 1422 data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET); 1423 data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET); 1424 data[27] = axienet_ior(lp, XAE_UAW0_OFFSET); 1425 data[28] = axienet_ior(lp, XAE_UAW1_OFFSET); 1426 data[29] = axienet_ior(lp, XAE_FMI_OFFSET); 1427 data[30] = axienet_ior(lp, XAE_AF0_OFFSET); 1428 data[31] = axienet_ior(lp, XAE_AF1_OFFSET); 1429 data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 1430 data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 1431 data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET); 1432 data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET); 1433 data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 1434 data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 1435 data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET); 1436 data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET); 1437 } 1438 1439 static void 1440 axienet_ethtools_get_ringparam(struct net_device *ndev, 1441 struct ethtool_ringparam *ering, 1442 struct kernel_ethtool_ringparam *kernel_ering, 1443 struct netlink_ext_ack *extack) 1444 { 1445 struct axienet_local *lp = netdev_priv(ndev); 1446 1447 ering->rx_max_pending = RX_BD_NUM_MAX; 1448 ering->rx_mini_max_pending = 0; 1449 ering->rx_jumbo_max_pending = 0; 1450 ering->tx_max_pending = TX_BD_NUM_MAX; 1451 ering->rx_pending = lp->rx_bd_num; 1452 ering->rx_mini_pending = 0; 1453 ering->rx_jumbo_pending = 0; 1454 ering->tx_pending = lp->tx_bd_num; 1455 } 1456 1457 static int 1458 axienet_ethtools_set_ringparam(struct net_device *ndev, 1459 struct ethtool_ringparam *ering, 1460 struct kernel_ethtool_ringparam *kernel_ering, 1461 struct netlink_ext_ack *extack) 1462 { 1463 struct axienet_local *lp = netdev_priv(ndev); 1464 1465 if (ering->rx_pending > RX_BD_NUM_MAX || 1466 ering->rx_mini_pending || 1467 ering->rx_jumbo_pending || 1468 ering->tx_pending < TX_BD_NUM_MIN || 1469 ering->tx_pending > TX_BD_NUM_MAX) 1470 return -EINVAL; 1471 1472 if (netif_running(ndev)) 1473 return -EBUSY; 1474 1475 lp->rx_bd_num = ering->rx_pending; 1476 lp->tx_bd_num = ering->tx_pending; 1477 return 0; 1478 } 1479 1480 /** 1481 * axienet_ethtools_get_pauseparam - Get the pause parameter setting for 1482 * Tx and Rx paths. 1483 * @ndev: Pointer to net_device structure 1484 * @epauseparm: Pointer to ethtool_pauseparam structure. 1485 * 1486 * This implements ethtool command for getting axi ethernet pause frame 1487 * setting. Issue "ethtool -a ethX" to execute this function. 1488 */ 1489 static void 1490 axienet_ethtools_get_pauseparam(struct net_device *ndev, 1491 struct ethtool_pauseparam *epauseparm) 1492 { 1493 struct axienet_local *lp = netdev_priv(ndev); 1494 1495 phylink_ethtool_get_pauseparam(lp->phylink, epauseparm); 1496 } 1497 1498 /** 1499 * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control) 1500 * settings. 1501 * @ndev: Pointer to net_device structure 1502 * @epauseparm:Pointer to ethtool_pauseparam structure 1503 * 1504 * This implements ethtool command for enabling flow control on Rx and Tx 1505 * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this 1506 * function. 1507 * 1508 * Return: 0 on success, -EFAULT if device is running 1509 */ 1510 static int 1511 axienet_ethtools_set_pauseparam(struct net_device *ndev, 1512 struct ethtool_pauseparam *epauseparm) 1513 { 1514 struct axienet_local *lp = netdev_priv(ndev); 1515 1516 return phylink_ethtool_set_pauseparam(lp->phylink, epauseparm); 1517 } 1518 1519 /** 1520 * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count. 1521 * @ndev: Pointer to net_device structure 1522 * @ecoalesce: Pointer to ethtool_coalesce structure 1523 * @kernel_coal: ethtool CQE mode setting structure 1524 * @extack: extack for reporting error messages 1525 * 1526 * This implements ethtool command for getting the DMA interrupt coalescing 1527 * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to 1528 * execute this function. 1529 * 1530 * Return: 0 always 1531 */ 1532 static int 1533 axienet_ethtools_get_coalesce(struct net_device *ndev, 1534 struct ethtool_coalesce *ecoalesce, 1535 struct kernel_ethtool_coalesce *kernel_coal, 1536 struct netlink_ext_ack *extack) 1537 { 1538 struct axienet_local *lp = netdev_priv(ndev); 1539 1540 ecoalesce->rx_max_coalesced_frames = lp->coalesce_count_rx; 1541 ecoalesce->rx_coalesce_usecs = lp->coalesce_usec_rx; 1542 ecoalesce->tx_max_coalesced_frames = lp->coalesce_count_tx; 1543 ecoalesce->tx_coalesce_usecs = lp->coalesce_usec_tx; 1544 return 0; 1545 } 1546 1547 /** 1548 * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count. 1549 * @ndev: Pointer to net_device structure 1550 * @ecoalesce: Pointer to ethtool_coalesce structure 1551 * @kernel_coal: ethtool CQE mode setting structure 1552 * @extack: extack for reporting error messages 1553 * 1554 * This implements ethtool command for setting the DMA interrupt coalescing 1555 * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux 1556 * prompt to execute this function. 1557 * 1558 * Return: 0, on success, Non-zero error value on failure. 1559 */ 1560 static int 1561 axienet_ethtools_set_coalesce(struct net_device *ndev, 1562 struct ethtool_coalesce *ecoalesce, 1563 struct kernel_ethtool_coalesce *kernel_coal, 1564 struct netlink_ext_ack *extack) 1565 { 1566 struct axienet_local *lp = netdev_priv(ndev); 1567 1568 if (netif_running(ndev)) { 1569 netdev_err(ndev, 1570 "Please stop netif before applying configuration\n"); 1571 return -EFAULT; 1572 } 1573 1574 if (ecoalesce->rx_max_coalesced_frames) 1575 lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames; 1576 if (ecoalesce->rx_coalesce_usecs) 1577 lp->coalesce_usec_rx = ecoalesce->rx_coalesce_usecs; 1578 if (ecoalesce->tx_max_coalesced_frames) 1579 lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames; 1580 if (ecoalesce->tx_coalesce_usecs) 1581 lp->coalesce_usec_tx = ecoalesce->tx_coalesce_usecs; 1582 1583 return 0; 1584 } 1585 1586 static int 1587 axienet_ethtools_get_link_ksettings(struct net_device *ndev, 1588 struct ethtool_link_ksettings *cmd) 1589 { 1590 struct axienet_local *lp = netdev_priv(ndev); 1591 1592 return phylink_ethtool_ksettings_get(lp->phylink, cmd); 1593 } 1594 1595 static int 1596 axienet_ethtools_set_link_ksettings(struct net_device *ndev, 1597 const struct ethtool_link_ksettings *cmd) 1598 { 1599 struct axienet_local *lp = netdev_priv(ndev); 1600 1601 return phylink_ethtool_ksettings_set(lp->phylink, cmd); 1602 } 1603 1604 static int axienet_ethtools_nway_reset(struct net_device *dev) 1605 { 1606 struct axienet_local *lp = netdev_priv(dev); 1607 1608 return phylink_ethtool_nway_reset(lp->phylink); 1609 } 1610 1611 static const struct ethtool_ops axienet_ethtool_ops = { 1612 .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES | 1613 ETHTOOL_COALESCE_USECS, 1614 .get_drvinfo = axienet_ethtools_get_drvinfo, 1615 .get_regs_len = axienet_ethtools_get_regs_len, 1616 .get_regs = axienet_ethtools_get_regs, 1617 .get_link = ethtool_op_get_link, 1618 .get_ringparam = axienet_ethtools_get_ringparam, 1619 .set_ringparam = axienet_ethtools_set_ringparam, 1620 .get_pauseparam = axienet_ethtools_get_pauseparam, 1621 .set_pauseparam = axienet_ethtools_set_pauseparam, 1622 .get_coalesce = axienet_ethtools_get_coalesce, 1623 .set_coalesce = axienet_ethtools_set_coalesce, 1624 .get_link_ksettings = axienet_ethtools_get_link_ksettings, 1625 .set_link_ksettings = axienet_ethtools_set_link_ksettings, 1626 .nway_reset = axienet_ethtools_nway_reset, 1627 }; 1628 1629 static struct axienet_local *pcs_to_axienet_local(struct phylink_pcs *pcs) 1630 { 1631 return container_of(pcs, struct axienet_local, pcs); 1632 } 1633 1634 static void axienet_pcs_get_state(struct phylink_pcs *pcs, 1635 struct phylink_link_state *state) 1636 { 1637 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy; 1638 1639 phylink_mii_c22_pcs_get_state(pcs_phy, state); 1640 } 1641 1642 static void axienet_pcs_an_restart(struct phylink_pcs *pcs) 1643 { 1644 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy; 1645 1646 phylink_mii_c22_pcs_an_restart(pcs_phy); 1647 } 1648 1649 static int axienet_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode, 1650 phy_interface_t interface, 1651 const unsigned long *advertising, 1652 bool permit_pause_to_mac) 1653 { 1654 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy; 1655 struct net_device *ndev = pcs_to_axienet_local(pcs)->ndev; 1656 struct axienet_local *lp = netdev_priv(ndev); 1657 int ret; 1658 1659 if (lp->switch_x_sgmii) { 1660 ret = mdiodev_write(pcs_phy, XLNX_MII_STD_SELECT_REG, 1661 interface == PHY_INTERFACE_MODE_SGMII ? 1662 XLNX_MII_STD_SELECT_SGMII : 0); 1663 if (ret < 0) { 1664 netdev_warn(ndev, 1665 "Failed to switch PHY interface: %d\n", 1666 ret); 1667 return ret; 1668 } 1669 } 1670 1671 ret = phylink_mii_c22_pcs_config(pcs_phy, interface, advertising, 1672 neg_mode); 1673 if (ret < 0) 1674 netdev_warn(ndev, "Failed to configure PCS: %d\n", ret); 1675 1676 return ret; 1677 } 1678 1679 static const struct phylink_pcs_ops axienet_pcs_ops = { 1680 .pcs_get_state = axienet_pcs_get_state, 1681 .pcs_config = axienet_pcs_config, 1682 .pcs_an_restart = axienet_pcs_an_restart, 1683 }; 1684 1685 static struct phylink_pcs *axienet_mac_select_pcs(struct phylink_config *config, 1686 phy_interface_t interface) 1687 { 1688 struct net_device *ndev = to_net_dev(config->dev); 1689 struct axienet_local *lp = netdev_priv(ndev); 1690 1691 if (interface == PHY_INTERFACE_MODE_1000BASEX || 1692 interface == PHY_INTERFACE_MODE_SGMII) 1693 return &lp->pcs; 1694 1695 return NULL; 1696 } 1697 1698 static void axienet_mac_config(struct phylink_config *config, unsigned int mode, 1699 const struct phylink_link_state *state) 1700 { 1701 /* nothing meaningful to do */ 1702 } 1703 1704 static void axienet_mac_link_down(struct phylink_config *config, 1705 unsigned int mode, 1706 phy_interface_t interface) 1707 { 1708 /* nothing meaningful to do */ 1709 } 1710 1711 static void axienet_mac_link_up(struct phylink_config *config, 1712 struct phy_device *phy, 1713 unsigned int mode, phy_interface_t interface, 1714 int speed, int duplex, 1715 bool tx_pause, bool rx_pause) 1716 { 1717 struct net_device *ndev = to_net_dev(config->dev); 1718 struct axienet_local *lp = netdev_priv(ndev); 1719 u32 emmc_reg, fcc_reg; 1720 1721 emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET); 1722 emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK; 1723 1724 switch (speed) { 1725 case SPEED_1000: 1726 emmc_reg |= XAE_EMMC_LINKSPD_1000; 1727 break; 1728 case SPEED_100: 1729 emmc_reg |= XAE_EMMC_LINKSPD_100; 1730 break; 1731 case SPEED_10: 1732 emmc_reg |= XAE_EMMC_LINKSPD_10; 1733 break; 1734 default: 1735 dev_err(&ndev->dev, 1736 "Speed other than 10, 100 or 1Gbps is not supported\n"); 1737 break; 1738 } 1739 1740 axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg); 1741 1742 fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET); 1743 if (tx_pause) 1744 fcc_reg |= XAE_FCC_FCTX_MASK; 1745 else 1746 fcc_reg &= ~XAE_FCC_FCTX_MASK; 1747 if (rx_pause) 1748 fcc_reg |= XAE_FCC_FCRX_MASK; 1749 else 1750 fcc_reg &= ~XAE_FCC_FCRX_MASK; 1751 axienet_iow(lp, XAE_FCC_OFFSET, fcc_reg); 1752 } 1753 1754 static const struct phylink_mac_ops axienet_phylink_ops = { 1755 .mac_select_pcs = axienet_mac_select_pcs, 1756 .mac_config = axienet_mac_config, 1757 .mac_link_down = axienet_mac_link_down, 1758 .mac_link_up = axienet_mac_link_up, 1759 }; 1760 1761 /** 1762 * axienet_dma_err_handler - Work queue task for Axi DMA Error 1763 * @work: pointer to work_struct 1764 * 1765 * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the 1766 * Tx/Rx BDs. 1767 */ 1768 static void axienet_dma_err_handler(struct work_struct *work) 1769 { 1770 u32 i; 1771 u32 axienet_status; 1772 struct axidma_bd *cur_p; 1773 struct axienet_local *lp = container_of(work, struct axienet_local, 1774 dma_err_task); 1775 struct net_device *ndev = lp->ndev; 1776 1777 /* Don't bother if we are going to stop anyway */ 1778 if (READ_ONCE(lp->stopping)) 1779 return; 1780 1781 napi_disable(&lp->napi_tx); 1782 napi_disable(&lp->napi_rx); 1783 1784 axienet_setoptions(ndev, lp->options & 1785 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 1786 1787 axienet_dma_stop(lp); 1788 1789 for (i = 0; i < lp->tx_bd_num; i++) { 1790 cur_p = &lp->tx_bd_v[i]; 1791 if (cur_p->cntrl) { 1792 dma_addr_t addr = desc_get_phys_addr(lp, cur_p); 1793 1794 dma_unmap_single(lp->dev, addr, 1795 (cur_p->cntrl & 1796 XAXIDMA_BD_CTRL_LENGTH_MASK), 1797 DMA_TO_DEVICE); 1798 } 1799 if (cur_p->skb) 1800 dev_kfree_skb_irq(cur_p->skb); 1801 cur_p->phys = 0; 1802 cur_p->phys_msb = 0; 1803 cur_p->cntrl = 0; 1804 cur_p->status = 0; 1805 cur_p->app0 = 0; 1806 cur_p->app1 = 0; 1807 cur_p->app2 = 0; 1808 cur_p->app3 = 0; 1809 cur_p->app4 = 0; 1810 cur_p->skb = NULL; 1811 } 1812 1813 for (i = 0; i < lp->rx_bd_num; i++) { 1814 cur_p = &lp->rx_bd_v[i]; 1815 cur_p->status = 0; 1816 cur_p->app0 = 0; 1817 cur_p->app1 = 0; 1818 cur_p->app2 = 0; 1819 cur_p->app3 = 0; 1820 cur_p->app4 = 0; 1821 } 1822 1823 lp->tx_bd_ci = 0; 1824 lp->tx_bd_tail = 0; 1825 lp->rx_bd_ci = 0; 1826 1827 axienet_dma_start(lp); 1828 1829 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); 1830 axienet_status &= ~XAE_RCW1_RX_MASK; 1831 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status); 1832 1833 axienet_status = axienet_ior(lp, XAE_IP_OFFSET); 1834 if (axienet_status & XAE_INT_RXRJECT_MASK) 1835 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK); 1836 axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ? 1837 XAE_INT_RECV_ERROR_MASK : 0); 1838 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK); 1839 1840 /* Sync default options with HW but leave receiver and 1841 * transmitter disabled. 1842 */ 1843 axienet_setoptions(ndev, lp->options & 1844 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 1845 axienet_set_mac_address(ndev, NULL); 1846 axienet_set_multicast_list(ndev); 1847 napi_enable(&lp->napi_rx); 1848 napi_enable(&lp->napi_tx); 1849 axienet_setoptions(ndev, lp->options); 1850 } 1851 1852 /** 1853 * axienet_probe - Axi Ethernet probe function. 1854 * @pdev: Pointer to platform device structure. 1855 * 1856 * Return: 0, on success 1857 * Non-zero error value on failure. 1858 * 1859 * This is the probe routine for Axi Ethernet driver. This is called before 1860 * any other driver routines are invoked. It allocates and sets up the Ethernet 1861 * device. Parses through device tree and populates fields of 1862 * axienet_local. It registers the Ethernet device. 1863 */ 1864 static int axienet_probe(struct platform_device *pdev) 1865 { 1866 int ret; 1867 struct device_node *np; 1868 struct axienet_local *lp; 1869 struct net_device *ndev; 1870 struct resource *ethres; 1871 u8 mac_addr[ETH_ALEN]; 1872 int addr_width = 32; 1873 u32 value; 1874 1875 ndev = alloc_etherdev(sizeof(*lp)); 1876 if (!ndev) 1877 return -ENOMEM; 1878 1879 platform_set_drvdata(pdev, ndev); 1880 1881 SET_NETDEV_DEV(ndev, &pdev->dev); 1882 ndev->flags &= ~IFF_MULTICAST; /* clear multicast */ 1883 ndev->features = NETIF_F_SG; 1884 ndev->netdev_ops = &axienet_netdev_ops; 1885 ndev->ethtool_ops = &axienet_ethtool_ops; 1886 1887 /* MTU range: 64 - 9000 */ 1888 ndev->min_mtu = 64; 1889 ndev->max_mtu = XAE_JUMBO_MTU; 1890 1891 lp = netdev_priv(ndev); 1892 lp->ndev = ndev; 1893 lp->dev = &pdev->dev; 1894 lp->options = XAE_OPTION_DEFAULTS; 1895 lp->rx_bd_num = RX_BD_NUM_DEFAULT; 1896 lp->tx_bd_num = TX_BD_NUM_DEFAULT; 1897 1898 u64_stats_init(&lp->rx_stat_sync); 1899 u64_stats_init(&lp->tx_stat_sync); 1900 1901 netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll); 1902 netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll); 1903 1904 lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk"); 1905 if (!lp->axi_clk) { 1906 /* For backward compatibility, if named AXI clock is not present, 1907 * treat the first clock specified as the AXI clock. 1908 */ 1909 lp->axi_clk = devm_clk_get_optional(&pdev->dev, NULL); 1910 } 1911 if (IS_ERR(lp->axi_clk)) { 1912 ret = PTR_ERR(lp->axi_clk); 1913 goto free_netdev; 1914 } 1915 ret = clk_prepare_enable(lp->axi_clk); 1916 if (ret) { 1917 dev_err(&pdev->dev, "Unable to enable AXI clock: %d\n", ret); 1918 goto free_netdev; 1919 } 1920 1921 lp->misc_clks[0].id = "axis_clk"; 1922 lp->misc_clks[1].id = "ref_clk"; 1923 lp->misc_clks[2].id = "mgt_clk"; 1924 1925 ret = devm_clk_bulk_get_optional(&pdev->dev, XAE_NUM_MISC_CLOCKS, lp->misc_clks); 1926 if (ret) 1927 goto cleanup_clk; 1928 1929 ret = clk_bulk_prepare_enable(XAE_NUM_MISC_CLOCKS, lp->misc_clks); 1930 if (ret) 1931 goto cleanup_clk; 1932 1933 /* Map device registers */ 1934 lp->regs = devm_platform_get_and_ioremap_resource(pdev, 0, ðres); 1935 if (IS_ERR(lp->regs)) { 1936 ret = PTR_ERR(lp->regs); 1937 goto cleanup_clk; 1938 } 1939 lp->regs_start = ethres->start; 1940 1941 /* Setup checksum offload, but default to off if not specified */ 1942 lp->features = 0; 1943 1944 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,txcsum", &value); 1945 if (!ret) { 1946 switch (value) { 1947 case 1: 1948 lp->csum_offload_on_tx_path = 1949 XAE_FEATURE_PARTIAL_TX_CSUM; 1950 lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM; 1951 /* Can checksum TCP/UDP over IPv4. */ 1952 ndev->features |= NETIF_F_IP_CSUM; 1953 break; 1954 case 2: 1955 lp->csum_offload_on_tx_path = 1956 XAE_FEATURE_FULL_TX_CSUM; 1957 lp->features |= XAE_FEATURE_FULL_TX_CSUM; 1958 /* Can checksum TCP/UDP over IPv4. */ 1959 ndev->features |= NETIF_F_IP_CSUM; 1960 break; 1961 default: 1962 lp->csum_offload_on_tx_path = XAE_NO_CSUM_OFFLOAD; 1963 } 1964 } 1965 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value); 1966 if (!ret) { 1967 switch (value) { 1968 case 1: 1969 lp->csum_offload_on_rx_path = 1970 XAE_FEATURE_PARTIAL_RX_CSUM; 1971 lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM; 1972 break; 1973 case 2: 1974 lp->csum_offload_on_rx_path = 1975 XAE_FEATURE_FULL_RX_CSUM; 1976 lp->features |= XAE_FEATURE_FULL_RX_CSUM; 1977 break; 1978 default: 1979 lp->csum_offload_on_rx_path = XAE_NO_CSUM_OFFLOAD; 1980 } 1981 } 1982 /* For supporting jumbo frames, the Axi Ethernet hardware must have 1983 * a larger Rx/Tx Memory. Typically, the size must be large so that 1984 * we can enable jumbo option and start supporting jumbo frames. 1985 * Here we check for memory allocated for Rx/Tx in the hardware from 1986 * the device-tree and accordingly set flags. 1987 */ 1988 of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem); 1989 1990 lp->switch_x_sgmii = of_property_read_bool(pdev->dev.of_node, 1991 "xlnx,switch-x-sgmii"); 1992 1993 /* Start with the proprietary, and broken phy_type */ 1994 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &value); 1995 if (!ret) { 1996 netdev_warn(ndev, "Please upgrade your device tree binary blob to use phy-mode"); 1997 switch (value) { 1998 case XAE_PHY_TYPE_MII: 1999 lp->phy_mode = PHY_INTERFACE_MODE_MII; 2000 break; 2001 case XAE_PHY_TYPE_GMII: 2002 lp->phy_mode = PHY_INTERFACE_MODE_GMII; 2003 break; 2004 case XAE_PHY_TYPE_RGMII_2_0: 2005 lp->phy_mode = PHY_INTERFACE_MODE_RGMII_ID; 2006 break; 2007 case XAE_PHY_TYPE_SGMII: 2008 lp->phy_mode = PHY_INTERFACE_MODE_SGMII; 2009 break; 2010 case XAE_PHY_TYPE_1000BASE_X: 2011 lp->phy_mode = PHY_INTERFACE_MODE_1000BASEX; 2012 break; 2013 default: 2014 ret = -EINVAL; 2015 goto cleanup_clk; 2016 } 2017 } else { 2018 ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode); 2019 if (ret) 2020 goto cleanup_clk; 2021 } 2022 if (lp->switch_x_sgmii && lp->phy_mode != PHY_INTERFACE_MODE_SGMII && 2023 lp->phy_mode != PHY_INTERFACE_MODE_1000BASEX) { 2024 dev_err(&pdev->dev, "xlnx,switch-x-sgmii only supported with SGMII or 1000BaseX\n"); 2025 ret = -EINVAL; 2026 goto cleanup_clk; 2027 } 2028 2029 /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */ 2030 np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0); 2031 if (np) { 2032 struct resource dmares; 2033 2034 ret = of_address_to_resource(np, 0, &dmares); 2035 if (ret) { 2036 dev_err(&pdev->dev, 2037 "unable to get DMA resource\n"); 2038 of_node_put(np); 2039 goto cleanup_clk; 2040 } 2041 lp->dma_regs = devm_ioremap_resource(&pdev->dev, 2042 &dmares); 2043 lp->rx_irq = irq_of_parse_and_map(np, 1); 2044 lp->tx_irq = irq_of_parse_and_map(np, 0); 2045 of_node_put(np); 2046 lp->eth_irq = platform_get_irq_optional(pdev, 0); 2047 } else { 2048 /* Check for these resources directly on the Ethernet node. */ 2049 lp->dma_regs = devm_platform_get_and_ioremap_resource(pdev, 1, NULL); 2050 lp->rx_irq = platform_get_irq(pdev, 1); 2051 lp->tx_irq = platform_get_irq(pdev, 0); 2052 lp->eth_irq = platform_get_irq_optional(pdev, 2); 2053 } 2054 if (IS_ERR(lp->dma_regs)) { 2055 dev_err(&pdev->dev, "could not map DMA regs\n"); 2056 ret = PTR_ERR(lp->dma_regs); 2057 goto cleanup_clk; 2058 } 2059 if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) { 2060 dev_err(&pdev->dev, "could not determine irqs\n"); 2061 ret = -ENOMEM; 2062 goto cleanup_clk; 2063 } 2064 2065 /* Reset core now that clocks are enabled, prior to accessing MDIO */ 2066 ret = __axienet_device_reset(lp); 2067 if (ret) 2068 goto cleanup_clk; 2069 2070 /* Autodetect the need for 64-bit DMA pointers. 2071 * When the IP is configured for a bus width bigger than 32 bits, 2072 * writing the MSB registers is mandatory, even if they are all 0. 2073 * We can detect this case by writing all 1's to one such register 2074 * and see if that sticks: when the IP is configured for 32 bits 2075 * only, those registers are RES0. 2076 * Those MSB registers were introduced in IP v7.1, which we check first. 2077 */ 2078 if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) { 2079 void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4; 2080 2081 iowrite32(0x0, desc); 2082 if (ioread32(desc) == 0) { /* sanity check */ 2083 iowrite32(0xffffffff, desc); 2084 if (ioread32(desc) > 0) { 2085 lp->features |= XAE_FEATURE_DMA_64BIT; 2086 addr_width = 64; 2087 dev_info(&pdev->dev, 2088 "autodetected 64-bit DMA range\n"); 2089 } 2090 iowrite32(0x0, desc); 2091 } 2092 } 2093 if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) { 2094 dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit archecture\n"); 2095 ret = -EINVAL; 2096 goto cleanup_clk; 2097 } 2098 2099 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width)); 2100 if (ret) { 2101 dev_err(&pdev->dev, "No suitable DMA available\n"); 2102 goto cleanup_clk; 2103 } 2104 2105 /* Check for Ethernet core IRQ (optional) */ 2106 if (lp->eth_irq <= 0) 2107 dev_info(&pdev->dev, "Ethernet core IRQ not defined\n"); 2108 2109 /* Retrieve the MAC address */ 2110 ret = of_get_mac_address(pdev->dev.of_node, mac_addr); 2111 if (!ret) { 2112 axienet_set_mac_address(ndev, mac_addr); 2113 } else { 2114 dev_warn(&pdev->dev, "could not find MAC address property: %d\n", 2115 ret); 2116 axienet_set_mac_address(ndev, NULL); 2117 } 2118 2119 lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD; 2120 lp->coalesce_usec_rx = XAXIDMA_DFT_RX_USEC; 2121 lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD; 2122 lp->coalesce_usec_tx = XAXIDMA_DFT_TX_USEC; 2123 2124 ret = axienet_mdio_setup(lp); 2125 if (ret) 2126 dev_warn(&pdev->dev, 2127 "error registering MDIO bus: %d\n", ret); 2128 2129 if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII || 2130 lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) { 2131 np = of_parse_phandle(pdev->dev.of_node, "pcs-handle", 0); 2132 if (!np) { 2133 /* Deprecated: Always use "pcs-handle" for pcs_phy. 2134 * Falling back to "phy-handle" here is only for 2135 * backward compatibility with old device trees. 2136 */ 2137 np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); 2138 } 2139 if (!np) { 2140 dev_err(&pdev->dev, "pcs-handle (preferred) or phy-handle required for 1000BaseX/SGMII\n"); 2141 ret = -EINVAL; 2142 goto cleanup_mdio; 2143 } 2144 lp->pcs_phy = of_mdio_find_device(np); 2145 if (!lp->pcs_phy) { 2146 ret = -EPROBE_DEFER; 2147 of_node_put(np); 2148 goto cleanup_mdio; 2149 } 2150 of_node_put(np); 2151 lp->pcs.ops = &axienet_pcs_ops; 2152 lp->pcs.neg_mode = true; 2153 lp->pcs.poll = true; 2154 } 2155 2156 lp->phylink_config.dev = &ndev->dev; 2157 lp->phylink_config.type = PHYLINK_NETDEV; 2158 lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE | 2159 MAC_10FD | MAC_100FD | MAC_1000FD; 2160 2161 __set_bit(lp->phy_mode, lp->phylink_config.supported_interfaces); 2162 if (lp->switch_x_sgmii) { 2163 __set_bit(PHY_INTERFACE_MODE_1000BASEX, 2164 lp->phylink_config.supported_interfaces); 2165 __set_bit(PHY_INTERFACE_MODE_SGMII, 2166 lp->phylink_config.supported_interfaces); 2167 } 2168 2169 lp->phylink = phylink_create(&lp->phylink_config, pdev->dev.fwnode, 2170 lp->phy_mode, 2171 &axienet_phylink_ops); 2172 if (IS_ERR(lp->phylink)) { 2173 ret = PTR_ERR(lp->phylink); 2174 dev_err(&pdev->dev, "phylink_create error (%i)\n", ret); 2175 goto cleanup_mdio; 2176 } 2177 2178 ret = register_netdev(lp->ndev); 2179 if (ret) { 2180 dev_err(lp->dev, "register_netdev() error (%i)\n", ret); 2181 goto cleanup_phylink; 2182 } 2183 2184 return 0; 2185 2186 cleanup_phylink: 2187 phylink_destroy(lp->phylink); 2188 2189 cleanup_mdio: 2190 if (lp->pcs_phy) 2191 put_device(&lp->pcs_phy->dev); 2192 if (lp->mii_bus) 2193 axienet_mdio_teardown(lp); 2194 cleanup_clk: 2195 clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks); 2196 clk_disable_unprepare(lp->axi_clk); 2197 2198 free_netdev: 2199 free_netdev(ndev); 2200 2201 return ret; 2202 } 2203 2204 static int axienet_remove(struct platform_device *pdev) 2205 { 2206 struct net_device *ndev = platform_get_drvdata(pdev); 2207 struct axienet_local *lp = netdev_priv(ndev); 2208 2209 unregister_netdev(ndev); 2210 2211 if (lp->phylink) 2212 phylink_destroy(lp->phylink); 2213 2214 if (lp->pcs_phy) 2215 put_device(&lp->pcs_phy->dev); 2216 2217 axienet_mdio_teardown(lp); 2218 2219 clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks); 2220 clk_disable_unprepare(lp->axi_clk); 2221 2222 free_netdev(ndev); 2223 2224 return 0; 2225 } 2226 2227 static void axienet_shutdown(struct platform_device *pdev) 2228 { 2229 struct net_device *ndev = platform_get_drvdata(pdev); 2230 2231 rtnl_lock(); 2232 netif_device_detach(ndev); 2233 2234 if (netif_running(ndev)) 2235 dev_close(ndev); 2236 2237 rtnl_unlock(); 2238 } 2239 2240 static int axienet_suspend(struct device *dev) 2241 { 2242 struct net_device *ndev = dev_get_drvdata(dev); 2243 2244 if (!netif_running(ndev)) 2245 return 0; 2246 2247 netif_device_detach(ndev); 2248 2249 rtnl_lock(); 2250 axienet_stop(ndev); 2251 rtnl_unlock(); 2252 2253 return 0; 2254 } 2255 2256 static int axienet_resume(struct device *dev) 2257 { 2258 struct net_device *ndev = dev_get_drvdata(dev); 2259 2260 if (!netif_running(ndev)) 2261 return 0; 2262 2263 rtnl_lock(); 2264 axienet_open(ndev); 2265 rtnl_unlock(); 2266 2267 netif_device_attach(ndev); 2268 2269 return 0; 2270 } 2271 2272 static DEFINE_SIMPLE_DEV_PM_OPS(axienet_pm_ops, 2273 axienet_suspend, axienet_resume); 2274 2275 static struct platform_driver axienet_driver = { 2276 .probe = axienet_probe, 2277 .remove = axienet_remove, 2278 .shutdown = axienet_shutdown, 2279 .driver = { 2280 .name = "xilinx_axienet", 2281 .pm = &axienet_pm_ops, 2282 .of_match_table = axienet_of_match, 2283 }, 2284 }; 2285 2286 module_platform_driver(axienet_driver); 2287 2288 MODULE_DESCRIPTION("Xilinx Axi Ethernet driver"); 2289 MODULE_AUTHOR("Xilinx"); 2290 MODULE_LICENSE("GPL"); 2291