1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Xilinx Axi Ethernet device driver 4 * 5 * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi 6 * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net> 7 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd. 8 * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu> 9 * Copyright (c) 2010 - 2011 PetaLogix 10 * Copyright (c) 2019 - 2022 Calian Advanced Technologies 11 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved. 12 * 13 * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6 14 * and Spartan6. 15 * 16 * TODO: 17 * - Add Axi Fifo support. 18 * - Factor out Axi DMA code into separate driver. 19 * - Test and fix basic multicast filtering. 20 * - Add support for extended multicast filtering. 21 * - Test basic VLAN support. 22 * - Add support for extended VLAN support. 23 */ 24 25 #include <linux/clk.h> 26 #include <linux/delay.h> 27 #include <linux/etherdevice.h> 28 #include <linux/module.h> 29 #include <linux/netdevice.h> 30 #include <linux/of_mdio.h> 31 #include <linux/of_net.h> 32 #include <linux/of_platform.h> 33 #include <linux/of_irq.h> 34 #include <linux/of_address.h> 35 #include <linux/skbuff.h> 36 #include <linux/math64.h> 37 #include <linux/phy.h> 38 #include <linux/mii.h> 39 #include <linux/ethtool.h> 40 41 #include "xilinx_axienet.h" 42 43 /* Descriptors defines for Tx and Rx DMA */ 44 #define TX_BD_NUM_DEFAULT 128 45 #define RX_BD_NUM_DEFAULT 1024 46 #define TX_BD_NUM_MIN (MAX_SKB_FRAGS + 1) 47 #define TX_BD_NUM_MAX 4096 48 #define RX_BD_NUM_MAX 4096 49 50 /* Must be shorter than length of ethtool_drvinfo.driver field to fit */ 51 #define DRIVER_NAME "xaxienet" 52 #define DRIVER_DESCRIPTION "Xilinx Axi Ethernet driver" 53 #define DRIVER_VERSION "1.00a" 54 55 #define AXIENET_REGS_N 40 56 57 /* Match table for of_platform binding */ 58 static const struct of_device_id axienet_of_match[] = { 59 { .compatible = "xlnx,axi-ethernet-1.00.a", }, 60 { .compatible = "xlnx,axi-ethernet-1.01.a", }, 61 { .compatible = "xlnx,axi-ethernet-2.01.a", }, 62 {}, 63 }; 64 65 MODULE_DEVICE_TABLE(of, axienet_of_match); 66 67 /* Option table for setting up Axi Ethernet hardware options */ 68 static struct axienet_option axienet_options[] = { 69 /* Turn on jumbo packet support for both Rx and Tx */ 70 { 71 .opt = XAE_OPTION_JUMBO, 72 .reg = XAE_TC_OFFSET, 73 .m_or = XAE_TC_JUM_MASK, 74 }, { 75 .opt = XAE_OPTION_JUMBO, 76 .reg = XAE_RCW1_OFFSET, 77 .m_or = XAE_RCW1_JUM_MASK, 78 }, { /* Turn on VLAN packet support for both Rx and Tx */ 79 .opt = XAE_OPTION_VLAN, 80 .reg = XAE_TC_OFFSET, 81 .m_or = XAE_TC_VLAN_MASK, 82 }, { 83 .opt = XAE_OPTION_VLAN, 84 .reg = XAE_RCW1_OFFSET, 85 .m_or = XAE_RCW1_VLAN_MASK, 86 }, { /* Turn on FCS stripping on receive packets */ 87 .opt = XAE_OPTION_FCS_STRIP, 88 .reg = XAE_RCW1_OFFSET, 89 .m_or = XAE_RCW1_FCS_MASK, 90 }, { /* Turn on FCS insertion on transmit packets */ 91 .opt = XAE_OPTION_FCS_INSERT, 92 .reg = XAE_TC_OFFSET, 93 .m_or = XAE_TC_FCS_MASK, 94 }, { /* Turn off length/type field checking on receive packets */ 95 .opt = XAE_OPTION_LENTYPE_ERR, 96 .reg = XAE_RCW1_OFFSET, 97 .m_or = XAE_RCW1_LT_DIS_MASK, 98 }, { /* Turn on Rx flow control */ 99 .opt = XAE_OPTION_FLOW_CONTROL, 100 .reg = XAE_FCC_OFFSET, 101 .m_or = XAE_FCC_FCRX_MASK, 102 }, { /* Turn on Tx flow control */ 103 .opt = XAE_OPTION_FLOW_CONTROL, 104 .reg = XAE_FCC_OFFSET, 105 .m_or = XAE_FCC_FCTX_MASK, 106 }, { /* Turn on promiscuous frame filtering */ 107 .opt = XAE_OPTION_PROMISC, 108 .reg = XAE_FMI_OFFSET, 109 .m_or = XAE_FMI_PM_MASK, 110 }, { /* Enable transmitter */ 111 .opt = XAE_OPTION_TXEN, 112 .reg = XAE_TC_OFFSET, 113 .m_or = XAE_TC_TX_MASK, 114 }, { /* Enable receiver */ 115 .opt = XAE_OPTION_RXEN, 116 .reg = XAE_RCW1_OFFSET, 117 .m_or = XAE_RCW1_RX_MASK, 118 }, 119 {} 120 }; 121 122 /** 123 * axienet_dma_in32 - Memory mapped Axi DMA register read 124 * @lp: Pointer to axienet local structure 125 * @reg: Address offset from the base address of the Axi DMA core 126 * 127 * Return: The contents of the Axi DMA register 128 * 129 * This function returns the contents of the corresponding Axi DMA register. 130 */ 131 static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg) 132 { 133 return ioread32(lp->dma_regs + reg); 134 } 135 136 static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr, 137 struct axidma_bd *desc) 138 { 139 desc->phys = lower_32_bits(addr); 140 if (lp->features & XAE_FEATURE_DMA_64BIT) 141 desc->phys_msb = upper_32_bits(addr); 142 } 143 144 static dma_addr_t desc_get_phys_addr(struct axienet_local *lp, 145 struct axidma_bd *desc) 146 { 147 dma_addr_t ret = desc->phys; 148 149 if (lp->features & XAE_FEATURE_DMA_64BIT) 150 ret |= ((dma_addr_t)desc->phys_msb << 16) << 16; 151 152 return ret; 153 } 154 155 /** 156 * axienet_dma_bd_release - Release buffer descriptor rings 157 * @ndev: Pointer to the net_device structure 158 * 159 * This function is used to release the descriptors allocated in 160 * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet 161 * driver stop api is called. 162 */ 163 static void axienet_dma_bd_release(struct net_device *ndev) 164 { 165 int i; 166 struct axienet_local *lp = netdev_priv(ndev); 167 168 /* If we end up here, tx_bd_v must have been DMA allocated. */ 169 dma_free_coherent(lp->dev, 170 sizeof(*lp->tx_bd_v) * lp->tx_bd_num, 171 lp->tx_bd_v, 172 lp->tx_bd_p); 173 174 if (!lp->rx_bd_v) 175 return; 176 177 for (i = 0; i < lp->rx_bd_num; i++) { 178 dma_addr_t phys; 179 180 /* A NULL skb means this descriptor has not been initialised 181 * at all. 182 */ 183 if (!lp->rx_bd_v[i].skb) 184 break; 185 186 dev_kfree_skb(lp->rx_bd_v[i].skb); 187 188 /* For each descriptor, we programmed cntrl with the (non-zero) 189 * descriptor size, after it had been successfully allocated. 190 * So a non-zero value in there means we need to unmap it. 191 */ 192 if (lp->rx_bd_v[i].cntrl) { 193 phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]); 194 dma_unmap_single(lp->dev, phys, 195 lp->max_frm_size, DMA_FROM_DEVICE); 196 } 197 } 198 199 dma_free_coherent(lp->dev, 200 sizeof(*lp->rx_bd_v) * lp->rx_bd_num, 201 lp->rx_bd_v, 202 lp->rx_bd_p); 203 } 204 205 /** 206 * axienet_usec_to_timer - Calculate IRQ delay timer value 207 * @lp: Pointer to the axienet_local structure 208 * @coalesce_usec: Microseconds to convert into timer value 209 */ 210 static u32 axienet_usec_to_timer(struct axienet_local *lp, u32 coalesce_usec) 211 { 212 u32 result; 213 u64 clk_rate = 125000000; /* arbitrary guess if no clock rate set */ 214 215 if (lp->axi_clk) 216 clk_rate = clk_get_rate(lp->axi_clk); 217 218 /* 1 Timeout Interval = 125 * (clock period of SG clock) */ 219 result = DIV64_U64_ROUND_CLOSEST((u64)coalesce_usec * clk_rate, 220 (u64)125000000); 221 if (result > 255) 222 result = 255; 223 224 return result; 225 } 226 227 /** 228 * axienet_dma_start - Set up DMA registers and start DMA operation 229 * @lp: Pointer to the axienet_local structure 230 */ 231 static void axienet_dma_start(struct axienet_local *lp) 232 { 233 /* Start updating the Rx channel control register */ 234 lp->rx_dma_cr = (lp->coalesce_count_rx << XAXIDMA_COALESCE_SHIFT) | 235 XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK; 236 /* Only set interrupt delay timer if not generating an interrupt on 237 * the first RX packet. Otherwise leave at 0 to disable delay interrupt. 238 */ 239 if (lp->coalesce_count_rx > 1) 240 lp->rx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_rx) 241 << XAXIDMA_DELAY_SHIFT) | 242 XAXIDMA_IRQ_DELAY_MASK; 243 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr); 244 245 /* Start updating the Tx channel control register */ 246 lp->tx_dma_cr = (lp->coalesce_count_tx << XAXIDMA_COALESCE_SHIFT) | 247 XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK; 248 /* Only set interrupt delay timer if not generating an interrupt on 249 * the first TX packet. Otherwise leave at 0 to disable delay interrupt. 250 */ 251 if (lp->coalesce_count_tx > 1) 252 lp->tx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_tx) 253 << XAXIDMA_DELAY_SHIFT) | 254 XAXIDMA_IRQ_DELAY_MASK; 255 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr); 256 257 /* Populate the tail pointer and bring the Rx Axi DMA engine out of 258 * halted state. This will make the Rx side ready for reception. 259 */ 260 axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); 261 lp->rx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK; 262 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr); 263 axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + 264 (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1))); 265 266 /* Write to the RS (Run-stop) bit in the Tx channel control register. 267 * Tx channel is now ready to run. But only after we write to the 268 * tail pointer register that the Tx channel will start transmitting. 269 */ 270 axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); 271 lp->tx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK; 272 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr); 273 } 274 275 /** 276 * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA 277 * @ndev: Pointer to the net_device structure 278 * 279 * Return: 0, on success -ENOMEM, on failure 280 * 281 * This function is called to initialize the Rx and Tx DMA descriptor 282 * rings. This initializes the descriptors with required default values 283 * and is called when Axi Ethernet driver reset is called. 284 */ 285 static int axienet_dma_bd_init(struct net_device *ndev) 286 { 287 int i; 288 struct sk_buff *skb; 289 struct axienet_local *lp = netdev_priv(ndev); 290 291 /* Reset the indexes which are used for accessing the BDs */ 292 lp->tx_bd_ci = 0; 293 lp->tx_bd_tail = 0; 294 lp->rx_bd_ci = 0; 295 296 /* Allocate the Tx and Rx buffer descriptors. */ 297 lp->tx_bd_v = dma_alloc_coherent(lp->dev, 298 sizeof(*lp->tx_bd_v) * lp->tx_bd_num, 299 &lp->tx_bd_p, GFP_KERNEL); 300 if (!lp->tx_bd_v) 301 return -ENOMEM; 302 303 lp->rx_bd_v = dma_alloc_coherent(lp->dev, 304 sizeof(*lp->rx_bd_v) * lp->rx_bd_num, 305 &lp->rx_bd_p, GFP_KERNEL); 306 if (!lp->rx_bd_v) 307 goto out; 308 309 for (i = 0; i < lp->tx_bd_num; i++) { 310 dma_addr_t addr = lp->tx_bd_p + 311 sizeof(*lp->tx_bd_v) * 312 ((i + 1) % lp->tx_bd_num); 313 314 lp->tx_bd_v[i].next = lower_32_bits(addr); 315 if (lp->features & XAE_FEATURE_DMA_64BIT) 316 lp->tx_bd_v[i].next_msb = upper_32_bits(addr); 317 } 318 319 for (i = 0; i < lp->rx_bd_num; i++) { 320 dma_addr_t addr; 321 322 addr = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * 323 ((i + 1) % lp->rx_bd_num); 324 lp->rx_bd_v[i].next = lower_32_bits(addr); 325 if (lp->features & XAE_FEATURE_DMA_64BIT) 326 lp->rx_bd_v[i].next_msb = upper_32_bits(addr); 327 328 skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size); 329 if (!skb) 330 goto out; 331 332 lp->rx_bd_v[i].skb = skb; 333 addr = dma_map_single(lp->dev, skb->data, 334 lp->max_frm_size, DMA_FROM_DEVICE); 335 if (dma_mapping_error(lp->dev, addr)) { 336 netdev_err(ndev, "DMA mapping error\n"); 337 goto out; 338 } 339 desc_set_phys_addr(lp, addr, &lp->rx_bd_v[i]); 340 341 lp->rx_bd_v[i].cntrl = lp->max_frm_size; 342 } 343 344 axienet_dma_start(lp); 345 346 return 0; 347 out: 348 axienet_dma_bd_release(ndev); 349 return -ENOMEM; 350 } 351 352 /** 353 * axienet_set_mac_address - Write the MAC address 354 * @ndev: Pointer to the net_device structure 355 * @address: 6 byte Address to be written as MAC address 356 * 357 * This function is called to initialize the MAC address of the Axi Ethernet 358 * core. It writes to the UAW0 and UAW1 registers of the core. 359 */ 360 static void axienet_set_mac_address(struct net_device *ndev, 361 const void *address) 362 { 363 struct axienet_local *lp = netdev_priv(ndev); 364 365 if (address) 366 eth_hw_addr_set(ndev, address); 367 if (!is_valid_ether_addr(ndev->dev_addr)) 368 eth_hw_addr_random(ndev); 369 370 /* Set up unicast MAC address filter set its mac address */ 371 axienet_iow(lp, XAE_UAW0_OFFSET, 372 (ndev->dev_addr[0]) | 373 (ndev->dev_addr[1] << 8) | 374 (ndev->dev_addr[2] << 16) | 375 (ndev->dev_addr[3] << 24)); 376 axienet_iow(lp, XAE_UAW1_OFFSET, 377 (((axienet_ior(lp, XAE_UAW1_OFFSET)) & 378 ~XAE_UAW1_UNICASTADDR_MASK) | 379 (ndev->dev_addr[4] | 380 (ndev->dev_addr[5] << 8)))); 381 } 382 383 /** 384 * netdev_set_mac_address - Write the MAC address (from outside the driver) 385 * @ndev: Pointer to the net_device structure 386 * @p: 6 byte Address to be written as MAC address 387 * 388 * Return: 0 for all conditions. Presently, there is no failure case. 389 * 390 * This function is called to initialize the MAC address of the Axi Ethernet 391 * core. It calls the core specific axienet_set_mac_address. This is the 392 * function that goes into net_device_ops structure entry ndo_set_mac_address. 393 */ 394 static int netdev_set_mac_address(struct net_device *ndev, void *p) 395 { 396 struct sockaddr *addr = p; 397 axienet_set_mac_address(ndev, addr->sa_data); 398 return 0; 399 } 400 401 /** 402 * axienet_set_multicast_list - Prepare the multicast table 403 * @ndev: Pointer to the net_device structure 404 * 405 * This function is called to initialize the multicast table during 406 * initialization. The Axi Ethernet basic multicast support has a four-entry 407 * multicast table which is initialized here. Additionally this function 408 * goes into the net_device_ops structure entry ndo_set_multicast_list. This 409 * means whenever the multicast table entries need to be updated this 410 * function gets called. 411 */ 412 static void axienet_set_multicast_list(struct net_device *ndev) 413 { 414 int i; 415 u32 reg, af0reg, af1reg; 416 struct axienet_local *lp = netdev_priv(ndev); 417 418 if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) || 419 netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) { 420 /* We must make the kernel realize we had to move into 421 * promiscuous mode. If it was a promiscuous mode request 422 * the flag is already set. If not we set it. 423 */ 424 ndev->flags |= IFF_PROMISC; 425 reg = axienet_ior(lp, XAE_FMI_OFFSET); 426 reg |= XAE_FMI_PM_MASK; 427 axienet_iow(lp, XAE_FMI_OFFSET, reg); 428 dev_info(&ndev->dev, "Promiscuous mode enabled.\n"); 429 } else if (!netdev_mc_empty(ndev)) { 430 struct netdev_hw_addr *ha; 431 432 i = 0; 433 netdev_for_each_mc_addr(ha, ndev) { 434 if (i >= XAE_MULTICAST_CAM_TABLE_NUM) 435 break; 436 437 af0reg = (ha->addr[0]); 438 af0reg |= (ha->addr[1] << 8); 439 af0reg |= (ha->addr[2] << 16); 440 af0reg |= (ha->addr[3] << 24); 441 442 af1reg = (ha->addr[4]); 443 af1reg |= (ha->addr[5] << 8); 444 445 reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00; 446 reg |= i; 447 448 axienet_iow(lp, XAE_FMI_OFFSET, reg); 449 axienet_iow(lp, XAE_AF0_OFFSET, af0reg); 450 axienet_iow(lp, XAE_AF1_OFFSET, af1reg); 451 i++; 452 } 453 } else { 454 reg = axienet_ior(lp, XAE_FMI_OFFSET); 455 reg &= ~XAE_FMI_PM_MASK; 456 457 axienet_iow(lp, XAE_FMI_OFFSET, reg); 458 459 for (i = 0; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) { 460 reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00; 461 reg |= i; 462 463 axienet_iow(lp, XAE_FMI_OFFSET, reg); 464 axienet_iow(lp, XAE_AF0_OFFSET, 0); 465 axienet_iow(lp, XAE_AF1_OFFSET, 0); 466 } 467 468 dev_info(&ndev->dev, "Promiscuous mode disabled.\n"); 469 } 470 } 471 472 /** 473 * axienet_setoptions - Set an Axi Ethernet option 474 * @ndev: Pointer to the net_device structure 475 * @options: Option to be enabled/disabled 476 * 477 * The Axi Ethernet core has multiple features which can be selectively turned 478 * on or off. The typical options could be jumbo frame option, basic VLAN 479 * option, promiscuous mode option etc. This function is used to set or clear 480 * these options in the Axi Ethernet hardware. This is done through 481 * axienet_option structure . 482 */ 483 static void axienet_setoptions(struct net_device *ndev, u32 options) 484 { 485 int reg; 486 struct axienet_local *lp = netdev_priv(ndev); 487 struct axienet_option *tp = &axienet_options[0]; 488 489 while (tp->opt) { 490 reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or)); 491 if (options & tp->opt) 492 reg |= tp->m_or; 493 axienet_iow(lp, tp->reg, reg); 494 tp++; 495 } 496 497 lp->options |= options; 498 } 499 500 static int __axienet_device_reset(struct axienet_local *lp) 501 { 502 u32 value; 503 int ret; 504 505 /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset 506 * process of Axi DMA takes a while to complete as all pending 507 * commands/transfers will be flushed or completed during this 508 * reset process. 509 * Note that even though both TX and RX have their own reset register, 510 * they both reset the entire DMA core, so only one needs to be used. 511 */ 512 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK); 513 ret = read_poll_timeout(axienet_dma_in32, value, 514 !(value & XAXIDMA_CR_RESET_MASK), 515 DELAY_OF_ONE_MILLISEC, 50000, false, lp, 516 XAXIDMA_TX_CR_OFFSET); 517 if (ret) { 518 dev_err(lp->dev, "%s: DMA reset timeout!\n", __func__); 519 return ret; 520 } 521 522 /* Wait for PhyRstCmplt bit to be set, indicating the PHY reset has finished */ 523 ret = read_poll_timeout(axienet_ior, value, 524 value & XAE_INT_PHYRSTCMPLT_MASK, 525 DELAY_OF_ONE_MILLISEC, 50000, false, lp, 526 XAE_IS_OFFSET); 527 if (ret) { 528 dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n", __func__); 529 return ret; 530 } 531 532 return 0; 533 } 534 535 /** 536 * axienet_dma_stop - Stop DMA operation 537 * @lp: Pointer to the axienet_local structure 538 */ 539 static void axienet_dma_stop(struct axienet_local *lp) 540 { 541 int count; 542 u32 cr, sr; 543 544 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 545 cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK); 546 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 547 synchronize_irq(lp->rx_irq); 548 549 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 550 cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK); 551 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 552 synchronize_irq(lp->tx_irq); 553 554 /* Give DMAs a chance to halt gracefully */ 555 sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 556 for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) { 557 msleep(20); 558 sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 559 } 560 561 sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 562 for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) { 563 msleep(20); 564 sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 565 } 566 567 /* Do a reset to ensure DMA is really stopped */ 568 axienet_lock_mii(lp); 569 __axienet_device_reset(lp); 570 axienet_unlock_mii(lp); 571 } 572 573 /** 574 * axienet_device_reset - Reset and initialize the Axi Ethernet hardware. 575 * @ndev: Pointer to the net_device structure 576 * 577 * This function is called to reset and initialize the Axi Ethernet core. This 578 * is typically called during initialization. It does a reset of the Axi DMA 579 * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines 580 * are connected to Axi Ethernet reset lines, this in turn resets the Axi 581 * Ethernet core. No separate hardware reset is done for the Axi Ethernet 582 * core. 583 * Returns 0 on success or a negative error number otherwise. 584 */ 585 static int axienet_device_reset(struct net_device *ndev) 586 { 587 u32 axienet_status; 588 struct axienet_local *lp = netdev_priv(ndev); 589 int ret; 590 591 ret = __axienet_device_reset(lp); 592 if (ret) 593 return ret; 594 595 lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE; 596 lp->options |= XAE_OPTION_VLAN; 597 lp->options &= (~XAE_OPTION_JUMBO); 598 599 if ((ndev->mtu > XAE_MTU) && 600 (ndev->mtu <= XAE_JUMBO_MTU)) { 601 lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN + 602 XAE_TRL_SIZE; 603 604 if (lp->max_frm_size <= lp->rxmem) 605 lp->options |= XAE_OPTION_JUMBO; 606 } 607 608 ret = axienet_dma_bd_init(ndev); 609 if (ret) { 610 netdev_err(ndev, "%s: descriptor allocation failed\n", 611 __func__); 612 return ret; 613 } 614 615 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); 616 axienet_status &= ~XAE_RCW1_RX_MASK; 617 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status); 618 619 axienet_status = axienet_ior(lp, XAE_IP_OFFSET); 620 if (axienet_status & XAE_INT_RXRJECT_MASK) 621 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK); 622 axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ? 623 XAE_INT_RECV_ERROR_MASK : 0); 624 625 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK); 626 627 /* Sync default options with HW but leave receiver and 628 * transmitter disabled. 629 */ 630 axienet_setoptions(ndev, lp->options & 631 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 632 axienet_set_mac_address(ndev, NULL); 633 axienet_set_multicast_list(ndev); 634 axienet_setoptions(ndev, lp->options); 635 636 netif_trans_update(ndev); 637 638 return 0; 639 } 640 641 /** 642 * axienet_free_tx_chain - Clean up a series of linked TX descriptors. 643 * @lp: Pointer to the axienet_local structure 644 * @first_bd: Index of first descriptor to clean up 645 * @nr_bds: Max number of descriptors to clean up 646 * @force: Whether to clean descriptors even if not complete 647 * @sizep: Pointer to a u32 filled with the total sum of all bytes 648 * in all cleaned-up descriptors. Ignored if NULL. 649 * @budget: NAPI budget (use 0 when not called from NAPI poll) 650 * 651 * Would either be called after a successful transmit operation, or after 652 * there was an error when setting up the chain. 653 * Returns the number of descriptors handled. 654 */ 655 static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd, 656 int nr_bds, bool force, u32 *sizep, int budget) 657 { 658 struct axidma_bd *cur_p; 659 unsigned int status; 660 dma_addr_t phys; 661 int i; 662 663 for (i = 0; i < nr_bds; i++) { 664 cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num]; 665 status = cur_p->status; 666 667 /* If force is not specified, clean up only descriptors 668 * that have been completed by the MAC. 669 */ 670 if (!force && !(status & XAXIDMA_BD_STS_COMPLETE_MASK)) 671 break; 672 673 /* Ensure we see complete descriptor update */ 674 dma_rmb(); 675 phys = desc_get_phys_addr(lp, cur_p); 676 dma_unmap_single(lp->dev, phys, 677 (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK), 678 DMA_TO_DEVICE); 679 680 if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK)) 681 napi_consume_skb(cur_p->skb, budget); 682 683 cur_p->app0 = 0; 684 cur_p->app1 = 0; 685 cur_p->app2 = 0; 686 cur_p->app4 = 0; 687 cur_p->skb = NULL; 688 /* ensure our transmit path and device don't prematurely see status cleared */ 689 wmb(); 690 cur_p->cntrl = 0; 691 cur_p->status = 0; 692 693 if (sizep) 694 *sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK; 695 } 696 697 return i; 698 } 699 700 /** 701 * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy 702 * @lp: Pointer to the axienet_local structure 703 * @num_frag: The number of BDs to check for 704 * 705 * Return: 0, on success 706 * NETDEV_TX_BUSY, if any of the descriptors are not free 707 * 708 * This function is invoked before BDs are allocated and transmission starts. 709 * This function returns 0 if a BD or group of BDs can be allocated for 710 * transmission. If the BD or any of the BDs are not free the function 711 * returns a busy status. 712 */ 713 static inline int axienet_check_tx_bd_space(struct axienet_local *lp, 714 int num_frag) 715 { 716 struct axidma_bd *cur_p; 717 718 /* Ensure we see all descriptor updates from device or TX polling */ 719 rmb(); 720 cur_p = &lp->tx_bd_v[(READ_ONCE(lp->tx_bd_tail) + num_frag) % 721 lp->tx_bd_num]; 722 if (cur_p->cntrl) 723 return NETDEV_TX_BUSY; 724 return 0; 725 } 726 727 /** 728 * axienet_tx_poll - Invoked once a transmit is completed by the 729 * Axi DMA Tx channel. 730 * @napi: Pointer to NAPI structure. 731 * @budget: Max number of TX packets to process. 732 * 733 * Return: Number of TX packets processed. 734 * 735 * This function is invoked from the NAPI processing to notify the completion 736 * of transmit operation. It clears fields in the corresponding Tx BDs and 737 * unmaps the corresponding buffer so that CPU can regain ownership of the 738 * buffer. It finally invokes "netif_wake_queue" to restart transmission if 739 * required. 740 */ 741 static int axienet_tx_poll(struct napi_struct *napi, int budget) 742 { 743 struct axienet_local *lp = container_of(napi, struct axienet_local, napi_tx); 744 struct net_device *ndev = lp->ndev; 745 u32 size = 0; 746 int packets; 747 748 packets = axienet_free_tx_chain(lp, lp->tx_bd_ci, budget, false, &size, budget); 749 750 if (packets) { 751 lp->tx_bd_ci += packets; 752 if (lp->tx_bd_ci >= lp->tx_bd_num) 753 lp->tx_bd_ci %= lp->tx_bd_num; 754 755 u64_stats_update_begin(&lp->tx_stat_sync); 756 u64_stats_add(&lp->tx_packets, packets); 757 u64_stats_add(&lp->tx_bytes, size); 758 u64_stats_update_end(&lp->tx_stat_sync); 759 760 /* Matches barrier in axienet_start_xmit */ 761 smp_mb(); 762 763 if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) 764 netif_wake_queue(ndev); 765 } 766 767 if (packets < budget && napi_complete_done(napi, packets)) { 768 /* Re-enable TX completion interrupts. This should 769 * cause an immediate interrupt if any TX packets are 770 * already pending. 771 */ 772 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr); 773 } 774 return packets; 775 } 776 777 /** 778 * axienet_start_xmit - Starts the transmission. 779 * @skb: sk_buff pointer that contains data to be Txed. 780 * @ndev: Pointer to net_device structure. 781 * 782 * Return: NETDEV_TX_OK, on success 783 * NETDEV_TX_BUSY, if any of the descriptors are not free 784 * 785 * This function is invoked from upper layers to initiate transmission. The 786 * function uses the next available free BDs and populates their fields to 787 * start the transmission. Additionally if checksum offloading is supported, 788 * it populates AXI Stream Control fields with appropriate values. 789 */ 790 static netdev_tx_t 791 axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) 792 { 793 u32 ii; 794 u32 num_frag; 795 u32 csum_start_off; 796 u32 csum_index_off; 797 skb_frag_t *frag; 798 dma_addr_t tail_p, phys; 799 u32 orig_tail_ptr, new_tail_ptr; 800 struct axienet_local *lp = netdev_priv(ndev); 801 struct axidma_bd *cur_p; 802 803 orig_tail_ptr = lp->tx_bd_tail; 804 new_tail_ptr = orig_tail_ptr; 805 806 num_frag = skb_shinfo(skb)->nr_frags; 807 cur_p = &lp->tx_bd_v[orig_tail_ptr]; 808 809 if (axienet_check_tx_bd_space(lp, num_frag + 1)) { 810 /* Should not happen as last start_xmit call should have 811 * checked for sufficient space and queue should only be 812 * woken when sufficient space is available. 813 */ 814 netif_stop_queue(ndev); 815 if (net_ratelimit()) 816 netdev_warn(ndev, "TX ring unexpectedly full\n"); 817 return NETDEV_TX_BUSY; 818 } 819 820 if (skb->ip_summed == CHECKSUM_PARTIAL) { 821 if (lp->features & XAE_FEATURE_FULL_TX_CSUM) { 822 /* Tx Full Checksum Offload Enabled */ 823 cur_p->app0 |= 2; 824 } else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) { 825 csum_start_off = skb_transport_offset(skb); 826 csum_index_off = csum_start_off + skb->csum_offset; 827 /* Tx Partial Checksum Offload Enabled */ 828 cur_p->app0 |= 1; 829 cur_p->app1 = (csum_start_off << 16) | csum_index_off; 830 } 831 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { 832 cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */ 833 } 834 835 phys = dma_map_single(lp->dev, skb->data, 836 skb_headlen(skb), DMA_TO_DEVICE); 837 if (unlikely(dma_mapping_error(lp->dev, phys))) { 838 if (net_ratelimit()) 839 netdev_err(ndev, "TX DMA mapping error\n"); 840 ndev->stats.tx_dropped++; 841 return NETDEV_TX_OK; 842 } 843 desc_set_phys_addr(lp, phys, cur_p); 844 cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK; 845 846 for (ii = 0; ii < num_frag; ii++) { 847 if (++new_tail_ptr >= lp->tx_bd_num) 848 new_tail_ptr = 0; 849 cur_p = &lp->tx_bd_v[new_tail_ptr]; 850 frag = &skb_shinfo(skb)->frags[ii]; 851 phys = dma_map_single(lp->dev, 852 skb_frag_address(frag), 853 skb_frag_size(frag), 854 DMA_TO_DEVICE); 855 if (unlikely(dma_mapping_error(lp->dev, phys))) { 856 if (net_ratelimit()) 857 netdev_err(ndev, "TX DMA mapping error\n"); 858 ndev->stats.tx_dropped++; 859 axienet_free_tx_chain(lp, orig_tail_ptr, ii + 1, 860 true, NULL, 0); 861 return NETDEV_TX_OK; 862 } 863 desc_set_phys_addr(lp, phys, cur_p); 864 cur_p->cntrl = skb_frag_size(frag); 865 } 866 867 cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK; 868 cur_p->skb = skb; 869 870 tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * new_tail_ptr; 871 if (++new_tail_ptr >= lp->tx_bd_num) 872 new_tail_ptr = 0; 873 WRITE_ONCE(lp->tx_bd_tail, new_tail_ptr); 874 875 /* Start the transfer */ 876 axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p); 877 878 /* Stop queue if next transmit may not have space */ 879 if (axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) { 880 netif_stop_queue(ndev); 881 882 /* Matches barrier in axienet_tx_poll */ 883 smp_mb(); 884 885 /* Space might have just been freed - check again */ 886 if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) 887 netif_wake_queue(ndev); 888 } 889 890 return NETDEV_TX_OK; 891 } 892 893 /** 894 * axienet_rx_poll - Triggered by RX ISR to complete the BD processing. 895 * @napi: Pointer to NAPI structure. 896 * @budget: Max number of RX packets to process. 897 * 898 * Return: Number of RX packets processed. 899 */ 900 static int axienet_rx_poll(struct napi_struct *napi, int budget) 901 { 902 u32 length; 903 u32 csumstatus; 904 u32 size = 0; 905 int packets = 0; 906 dma_addr_t tail_p = 0; 907 struct axidma_bd *cur_p; 908 struct sk_buff *skb, *new_skb; 909 struct axienet_local *lp = container_of(napi, struct axienet_local, napi_rx); 910 911 cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; 912 913 while (packets < budget && (cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) { 914 dma_addr_t phys; 915 916 /* Ensure we see complete descriptor update */ 917 dma_rmb(); 918 919 skb = cur_p->skb; 920 cur_p->skb = NULL; 921 922 /* skb could be NULL if a previous pass already received the 923 * packet for this slot in the ring, but failed to refill it 924 * with a newly allocated buffer. In this case, don't try to 925 * receive it again. 926 */ 927 if (likely(skb)) { 928 length = cur_p->app4 & 0x0000FFFF; 929 930 phys = desc_get_phys_addr(lp, cur_p); 931 dma_unmap_single(lp->dev, phys, lp->max_frm_size, 932 DMA_FROM_DEVICE); 933 934 skb_put(skb, length); 935 skb->protocol = eth_type_trans(skb, lp->ndev); 936 /*skb_checksum_none_assert(skb);*/ 937 skb->ip_summed = CHECKSUM_NONE; 938 939 /* if we're doing Rx csum offload, set it up */ 940 if (lp->features & XAE_FEATURE_FULL_RX_CSUM) { 941 csumstatus = (cur_p->app2 & 942 XAE_FULL_CSUM_STATUS_MASK) >> 3; 943 if (csumstatus == XAE_IP_TCP_CSUM_VALIDATED || 944 csumstatus == XAE_IP_UDP_CSUM_VALIDATED) { 945 skb->ip_summed = CHECKSUM_UNNECESSARY; 946 } 947 } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 && 948 skb->protocol == htons(ETH_P_IP) && 949 skb->len > 64) { 950 skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF); 951 skb->ip_summed = CHECKSUM_COMPLETE; 952 } 953 954 napi_gro_receive(napi, skb); 955 956 size += length; 957 packets++; 958 } 959 960 new_skb = napi_alloc_skb(napi, lp->max_frm_size); 961 if (!new_skb) 962 break; 963 964 phys = dma_map_single(lp->dev, new_skb->data, 965 lp->max_frm_size, 966 DMA_FROM_DEVICE); 967 if (unlikely(dma_mapping_error(lp->dev, phys))) { 968 if (net_ratelimit()) 969 netdev_err(lp->ndev, "RX DMA mapping error\n"); 970 dev_kfree_skb(new_skb); 971 break; 972 } 973 desc_set_phys_addr(lp, phys, cur_p); 974 975 cur_p->cntrl = lp->max_frm_size; 976 cur_p->status = 0; 977 cur_p->skb = new_skb; 978 979 /* Only update tail_p to mark this slot as usable after it has 980 * been successfully refilled. 981 */ 982 tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci; 983 984 if (++lp->rx_bd_ci >= lp->rx_bd_num) 985 lp->rx_bd_ci = 0; 986 cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; 987 } 988 989 u64_stats_update_begin(&lp->rx_stat_sync); 990 u64_stats_add(&lp->rx_packets, packets); 991 u64_stats_add(&lp->rx_bytes, size); 992 u64_stats_update_end(&lp->rx_stat_sync); 993 994 if (tail_p) 995 axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p); 996 997 if (packets < budget && napi_complete_done(napi, packets)) { 998 /* Re-enable RX completion interrupts. This should 999 * cause an immediate interrupt if any RX packets are 1000 * already pending. 1001 */ 1002 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr); 1003 } 1004 return packets; 1005 } 1006 1007 /** 1008 * axienet_tx_irq - Tx Done Isr. 1009 * @irq: irq number 1010 * @_ndev: net_device pointer 1011 * 1012 * Return: IRQ_HANDLED if device generated a TX interrupt, IRQ_NONE otherwise. 1013 * 1014 * This is the Axi DMA Tx done Isr. It invokes NAPI polling to complete the 1015 * TX BD processing. 1016 */ 1017 static irqreturn_t axienet_tx_irq(int irq, void *_ndev) 1018 { 1019 unsigned int status; 1020 struct net_device *ndev = _ndev; 1021 struct axienet_local *lp = netdev_priv(ndev); 1022 1023 status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 1024 1025 if (!(status & XAXIDMA_IRQ_ALL_MASK)) 1026 return IRQ_NONE; 1027 1028 axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status); 1029 1030 if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) { 1031 netdev_err(ndev, "DMA Tx error 0x%x\n", status); 1032 netdev_err(ndev, "Current BD is at: 0x%x%08x\n", 1033 (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb, 1034 (lp->tx_bd_v[lp->tx_bd_ci]).phys); 1035 schedule_work(&lp->dma_err_task); 1036 } else { 1037 /* Disable further TX completion interrupts and schedule 1038 * NAPI to handle the completions. 1039 */ 1040 u32 cr = lp->tx_dma_cr; 1041 1042 cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK); 1043 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 1044 1045 napi_schedule(&lp->napi_tx); 1046 } 1047 1048 return IRQ_HANDLED; 1049 } 1050 1051 /** 1052 * axienet_rx_irq - Rx Isr. 1053 * @irq: irq number 1054 * @_ndev: net_device pointer 1055 * 1056 * Return: IRQ_HANDLED if device generated a RX interrupt, IRQ_NONE otherwise. 1057 * 1058 * This is the Axi DMA Rx Isr. It invokes NAPI polling to complete the RX BD 1059 * processing. 1060 */ 1061 static irqreturn_t axienet_rx_irq(int irq, void *_ndev) 1062 { 1063 unsigned int status; 1064 struct net_device *ndev = _ndev; 1065 struct axienet_local *lp = netdev_priv(ndev); 1066 1067 status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 1068 1069 if (!(status & XAXIDMA_IRQ_ALL_MASK)) 1070 return IRQ_NONE; 1071 1072 axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status); 1073 1074 if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) { 1075 netdev_err(ndev, "DMA Rx error 0x%x\n", status); 1076 netdev_err(ndev, "Current BD is at: 0x%x%08x\n", 1077 (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb, 1078 (lp->rx_bd_v[lp->rx_bd_ci]).phys); 1079 schedule_work(&lp->dma_err_task); 1080 } else { 1081 /* Disable further RX completion interrupts and schedule 1082 * NAPI receive. 1083 */ 1084 u32 cr = lp->rx_dma_cr; 1085 1086 cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK); 1087 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 1088 1089 napi_schedule(&lp->napi_rx); 1090 } 1091 1092 return IRQ_HANDLED; 1093 } 1094 1095 /** 1096 * axienet_eth_irq - Ethernet core Isr. 1097 * @irq: irq number 1098 * @_ndev: net_device pointer 1099 * 1100 * Return: IRQ_HANDLED if device generated a core interrupt, IRQ_NONE otherwise. 1101 * 1102 * Handle miscellaneous conditions indicated by Ethernet core IRQ. 1103 */ 1104 static irqreturn_t axienet_eth_irq(int irq, void *_ndev) 1105 { 1106 struct net_device *ndev = _ndev; 1107 struct axienet_local *lp = netdev_priv(ndev); 1108 unsigned int pending; 1109 1110 pending = axienet_ior(lp, XAE_IP_OFFSET); 1111 if (!pending) 1112 return IRQ_NONE; 1113 1114 if (pending & XAE_INT_RXFIFOOVR_MASK) 1115 ndev->stats.rx_missed_errors++; 1116 1117 if (pending & XAE_INT_RXRJECT_MASK) 1118 ndev->stats.rx_frame_errors++; 1119 1120 axienet_iow(lp, XAE_IS_OFFSET, pending); 1121 return IRQ_HANDLED; 1122 } 1123 1124 static void axienet_dma_err_handler(struct work_struct *work); 1125 1126 /** 1127 * axienet_open - Driver open routine. 1128 * @ndev: Pointer to net_device structure 1129 * 1130 * Return: 0, on success. 1131 * non-zero error value on failure 1132 * 1133 * This is the driver open routine. It calls phylink_start to start the 1134 * PHY device. 1135 * It also allocates interrupt service routines, enables the interrupt lines 1136 * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer 1137 * descriptors are initialized. 1138 */ 1139 static int axienet_open(struct net_device *ndev) 1140 { 1141 int ret; 1142 struct axienet_local *lp = netdev_priv(ndev); 1143 1144 dev_dbg(&ndev->dev, "axienet_open()\n"); 1145 1146 /* When we do an Axi Ethernet reset, it resets the complete core 1147 * including the MDIO. MDIO must be disabled before resetting. 1148 * Hold MDIO bus lock to avoid MDIO accesses during the reset. 1149 */ 1150 axienet_lock_mii(lp); 1151 ret = axienet_device_reset(ndev); 1152 axienet_unlock_mii(lp); 1153 1154 ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0); 1155 if (ret) { 1156 dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret); 1157 return ret; 1158 } 1159 1160 phylink_start(lp->phylink); 1161 1162 /* Enable worker thread for Axi DMA error handling */ 1163 INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler); 1164 1165 napi_enable(&lp->napi_rx); 1166 napi_enable(&lp->napi_tx); 1167 1168 /* Enable interrupts for Axi DMA Tx */ 1169 ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED, 1170 ndev->name, ndev); 1171 if (ret) 1172 goto err_tx_irq; 1173 /* Enable interrupts for Axi DMA Rx */ 1174 ret = request_irq(lp->rx_irq, axienet_rx_irq, IRQF_SHARED, 1175 ndev->name, ndev); 1176 if (ret) 1177 goto err_rx_irq; 1178 /* Enable interrupts for Axi Ethernet core (if defined) */ 1179 if (lp->eth_irq > 0) { 1180 ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED, 1181 ndev->name, ndev); 1182 if (ret) 1183 goto err_eth_irq; 1184 } 1185 1186 return 0; 1187 1188 err_eth_irq: 1189 free_irq(lp->rx_irq, ndev); 1190 err_rx_irq: 1191 free_irq(lp->tx_irq, ndev); 1192 err_tx_irq: 1193 napi_disable(&lp->napi_tx); 1194 napi_disable(&lp->napi_rx); 1195 phylink_stop(lp->phylink); 1196 phylink_disconnect_phy(lp->phylink); 1197 cancel_work_sync(&lp->dma_err_task); 1198 dev_err(lp->dev, "request_irq() failed\n"); 1199 return ret; 1200 } 1201 1202 /** 1203 * axienet_stop - Driver stop routine. 1204 * @ndev: Pointer to net_device structure 1205 * 1206 * Return: 0, on success. 1207 * 1208 * This is the driver stop routine. It calls phylink_disconnect to stop the PHY 1209 * device. It also removes the interrupt handlers and disables the interrupts. 1210 * The Axi DMA Tx/Rx BDs are released. 1211 */ 1212 static int axienet_stop(struct net_device *ndev) 1213 { 1214 struct axienet_local *lp = netdev_priv(ndev); 1215 1216 dev_dbg(&ndev->dev, "axienet_close()\n"); 1217 1218 napi_disable(&lp->napi_tx); 1219 napi_disable(&lp->napi_rx); 1220 1221 phylink_stop(lp->phylink); 1222 phylink_disconnect_phy(lp->phylink); 1223 1224 axienet_setoptions(ndev, lp->options & 1225 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 1226 1227 axienet_dma_stop(lp); 1228 1229 axienet_iow(lp, XAE_IE_OFFSET, 0); 1230 1231 cancel_work_sync(&lp->dma_err_task); 1232 1233 if (lp->eth_irq > 0) 1234 free_irq(lp->eth_irq, ndev); 1235 free_irq(lp->tx_irq, ndev); 1236 free_irq(lp->rx_irq, ndev); 1237 1238 axienet_dma_bd_release(ndev); 1239 return 0; 1240 } 1241 1242 /** 1243 * axienet_change_mtu - Driver change mtu routine. 1244 * @ndev: Pointer to net_device structure 1245 * @new_mtu: New mtu value to be applied 1246 * 1247 * Return: Always returns 0 (success). 1248 * 1249 * This is the change mtu driver routine. It checks if the Axi Ethernet 1250 * hardware supports jumbo frames before changing the mtu. This can be 1251 * called only when the device is not up. 1252 */ 1253 static int axienet_change_mtu(struct net_device *ndev, int new_mtu) 1254 { 1255 struct axienet_local *lp = netdev_priv(ndev); 1256 1257 if (netif_running(ndev)) 1258 return -EBUSY; 1259 1260 if ((new_mtu + VLAN_ETH_HLEN + 1261 XAE_TRL_SIZE) > lp->rxmem) 1262 return -EINVAL; 1263 1264 ndev->mtu = new_mtu; 1265 1266 return 0; 1267 } 1268 1269 #ifdef CONFIG_NET_POLL_CONTROLLER 1270 /** 1271 * axienet_poll_controller - Axi Ethernet poll mechanism. 1272 * @ndev: Pointer to net_device structure 1273 * 1274 * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior 1275 * to polling the ISRs and are enabled back after the polling is done. 1276 */ 1277 static void axienet_poll_controller(struct net_device *ndev) 1278 { 1279 struct axienet_local *lp = netdev_priv(ndev); 1280 disable_irq(lp->tx_irq); 1281 disable_irq(lp->rx_irq); 1282 axienet_rx_irq(lp->tx_irq, ndev); 1283 axienet_tx_irq(lp->rx_irq, ndev); 1284 enable_irq(lp->tx_irq); 1285 enable_irq(lp->rx_irq); 1286 } 1287 #endif 1288 1289 static int axienet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1290 { 1291 struct axienet_local *lp = netdev_priv(dev); 1292 1293 if (!netif_running(dev)) 1294 return -EINVAL; 1295 1296 return phylink_mii_ioctl(lp->phylink, rq, cmd); 1297 } 1298 1299 static void 1300 axienet_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 1301 { 1302 struct axienet_local *lp = netdev_priv(dev); 1303 unsigned int start; 1304 1305 netdev_stats_to_stats64(stats, &dev->stats); 1306 1307 do { 1308 start = u64_stats_fetch_begin(&lp->rx_stat_sync); 1309 stats->rx_packets = u64_stats_read(&lp->rx_packets); 1310 stats->rx_bytes = u64_stats_read(&lp->rx_bytes); 1311 } while (u64_stats_fetch_retry(&lp->rx_stat_sync, start)); 1312 1313 do { 1314 start = u64_stats_fetch_begin(&lp->tx_stat_sync); 1315 stats->tx_packets = u64_stats_read(&lp->tx_packets); 1316 stats->tx_bytes = u64_stats_read(&lp->tx_bytes); 1317 } while (u64_stats_fetch_retry(&lp->tx_stat_sync, start)); 1318 } 1319 1320 static const struct net_device_ops axienet_netdev_ops = { 1321 .ndo_open = axienet_open, 1322 .ndo_stop = axienet_stop, 1323 .ndo_start_xmit = axienet_start_xmit, 1324 .ndo_get_stats64 = axienet_get_stats64, 1325 .ndo_change_mtu = axienet_change_mtu, 1326 .ndo_set_mac_address = netdev_set_mac_address, 1327 .ndo_validate_addr = eth_validate_addr, 1328 .ndo_eth_ioctl = axienet_ioctl, 1329 .ndo_set_rx_mode = axienet_set_multicast_list, 1330 #ifdef CONFIG_NET_POLL_CONTROLLER 1331 .ndo_poll_controller = axienet_poll_controller, 1332 #endif 1333 }; 1334 1335 /** 1336 * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information. 1337 * @ndev: Pointer to net_device structure 1338 * @ed: Pointer to ethtool_drvinfo structure 1339 * 1340 * This implements ethtool command for getting the driver information. 1341 * Issue "ethtool -i ethX" under linux prompt to execute this function. 1342 */ 1343 static void axienet_ethtools_get_drvinfo(struct net_device *ndev, 1344 struct ethtool_drvinfo *ed) 1345 { 1346 strscpy(ed->driver, DRIVER_NAME, sizeof(ed->driver)); 1347 strscpy(ed->version, DRIVER_VERSION, sizeof(ed->version)); 1348 } 1349 1350 /** 1351 * axienet_ethtools_get_regs_len - Get the total regs length present in the 1352 * AxiEthernet core. 1353 * @ndev: Pointer to net_device structure 1354 * 1355 * This implements ethtool command for getting the total register length 1356 * information. 1357 * 1358 * Return: the total regs length 1359 */ 1360 static int axienet_ethtools_get_regs_len(struct net_device *ndev) 1361 { 1362 return sizeof(u32) * AXIENET_REGS_N; 1363 } 1364 1365 /** 1366 * axienet_ethtools_get_regs - Dump the contents of all registers present 1367 * in AxiEthernet core. 1368 * @ndev: Pointer to net_device structure 1369 * @regs: Pointer to ethtool_regs structure 1370 * @ret: Void pointer used to return the contents of the registers. 1371 * 1372 * This implements ethtool command for getting the Axi Ethernet register dump. 1373 * Issue "ethtool -d ethX" to execute this function. 1374 */ 1375 static void axienet_ethtools_get_regs(struct net_device *ndev, 1376 struct ethtool_regs *regs, void *ret) 1377 { 1378 u32 *data = (u32 *)ret; 1379 size_t len = sizeof(u32) * AXIENET_REGS_N; 1380 struct axienet_local *lp = netdev_priv(ndev); 1381 1382 regs->version = 0; 1383 regs->len = len; 1384 1385 memset(data, 0, len); 1386 data[0] = axienet_ior(lp, XAE_RAF_OFFSET); 1387 data[1] = axienet_ior(lp, XAE_TPF_OFFSET); 1388 data[2] = axienet_ior(lp, XAE_IFGP_OFFSET); 1389 data[3] = axienet_ior(lp, XAE_IS_OFFSET); 1390 data[4] = axienet_ior(lp, XAE_IP_OFFSET); 1391 data[5] = axienet_ior(lp, XAE_IE_OFFSET); 1392 data[6] = axienet_ior(lp, XAE_TTAG_OFFSET); 1393 data[7] = axienet_ior(lp, XAE_RTAG_OFFSET); 1394 data[8] = axienet_ior(lp, XAE_UAWL_OFFSET); 1395 data[9] = axienet_ior(lp, XAE_UAWU_OFFSET); 1396 data[10] = axienet_ior(lp, XAE_TPID0_OFFSET); 1397 data[11] = axienet_ior(lp, XAE_TPID1_OFFSET); 1398 data[12] = axienet_ior(lp, XAE_PPST_OFFSET); 1399 data[13] = axienet_ior(lp, XAE_RCW0_OFFSET); 1400 data[14] = axienet_ior(lp, XAE_RCW1_OFFSET); 1401 data[15] = axienet_ior(lp, XAE_TC_OFFSET); 1402 data[16] = axienet_ior(lp, XAE_FCC_OFFSET); 1403 data[17] = axienet_ior(lp, XAE_EMMC_OFFSET); 1404 data[18] = axienet_ior(lp, XAE_PHYC_OFFSET); 1405 data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET); 1406 data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET); 1407 data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET); 1408 data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET); 1409 data[27] = axienet_ior(lp, XAE_UAW0_OFFSET); 1410 data[28] = axienet_ior(lp, XAE_UAW1_OFFSET); 1411 data[29] = axienet_ior(lp, XAE_FMI_OFFSET); 1412 data[30] = axienet_ior(lp, XAE_AF0_OFFSET); 1413 data[31] = axienet_ior(lp, XAE_AF1_OFFSET); 1414 data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 1415 data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 1416 data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET); 1417 data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET); 1418 data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 1419 data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 1420 data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET); 1421 data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET); 1422 } 1423 1424 static void 1425 axienet_ethtools_get_ringparam(struct net_device *ndev, 1426 struct ethtool_ringparam *ering, 1427 struct kernel_ethtool_ringparam *kernel_ering, 1428 struct netlink_ext_ack *extack) 1429 { 1430 struct axienet_local *lp = netdev_priv(ndev); 1431 1432 ering->rx_max_pending = RX_BD_NUM_MAX; 1433 ering->rx_mini_max_pending = 0; 1434 ering->rx_jumbo_max_pending = 0; 1435 ering->tx_max_pending = TX_BD_NUM_MAX; 1436 ering->rx_pending = lp->rx_bd_num; 1437 ering->rx_mini_pending = 0; 1438 ering->rx_jumbo_pending = 0; 1439 ering->tx_pending = lp->tx_bd_num; 1440 } 1441 1442 static int 1443 axienet_ethtools_set_ringparam(struct net_device *ndev, 1444 struct ethtool_ringparam *ering, 1445 struct kernel_ethtool_ringparam *kernel_ering, 1446 struct netlink_ext_ack *extack) 1447 { 1448 struct axienet_local *lp = netdev_priv(ndev); 1449 1450 if (ering->rx_pending > RX_BD_NUM_MAX || 1451 ering->rx_mini_pending || 1452 ering->rx_jumbo_pending || 1453 ering->tx_pending < TX_BD_NUM_MIN || 1454 ering->tx_pending > TX_BD_NUM_MAX) 1455 return -EINVAL; 1456 1457 if (netif_running(ndev)) 1458 return -EBUSY; 1459 1460 lp->rx_bd_num = ering->rx_pending; 1461 lp->tx_bd_num = ering->tx_pending; 1462 return 0; 1463 } 1464 1465 /** 1466 * axienet_ethtools_get_pauseparam - Get the pause parameter setting for 1467 * Tx and Rx paths. 1468 * @ndev: Pointer to net_device structure 1469 * @epauseparm: Pointer to ethtool_pauseparam structure. 1470 * 1471 * This implements ethtool command for getting axi ethernet pause frame 1472 * setting. Issue "ethtool -a ethX" to execute this function. 1473 */ 1474 static void 1475 axienet_ethtools_get_pauseparam(struct net_device *ndev, 1476 struct ethtool_pauseparam *epauseparm) 1477 { 1478 struct axienet_local *lp = netdev_priv(ndev); 1479 1480 phylink_ethtool_get_pauseparam(lp->phylink, epauseparm); 1481 } 1482 1483 /** 1484 * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control) 1485 * settings. 1486 * @ndev: Pointer to net_device structure 1487 * @epauseparm:Pointer to ethtool_pauseparam structure 1488 * 1489 * This implements ethtool command for enabling flow control on Rx and Tx 1490 * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this 1491 * function. 1492 * 1493 * Return: 0 on success, -EFAULT if device is running 1494 */ 1495 static int 1496 axienet_ethtools_set_pauseparam(struct net_device *ndev, 1497 struct ethtool_pauseparam *epauseparm) 1498 { 1499 struct axienet_local *lp = netdev_priv(ndev); 1500 1501 return phylink_ethtool_set_pauseparam(lp->phylink, epauseparm); 1502 } 1503 1504 /** 1505 * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count. 1506 * @ndev: Pointer to net_device structure 1507 * @ecoalesce: Pointer to ethtool_coalesce structure 1508 * @kernel_coal: ethtool CQE mode setting structure 1509 * @extack: extack for reporting error messages 1510 * 1511 * This implements ethtool command for getting the DMA interrupt coalescing 1512 * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to 1513 * execute this function. 1514 * 1515 * Return: 0 always 1516 */ 1517 static int 1518 axienet_ethtools_get_coalesce(struct net_device *ndev, 1519 struct ethtool_coalesce *ecoalesce, 1520 struct kernel_ethtool_coalesce *kernel_coal, 1521 struct netlink_ext_ack *extack) 1522 { 1523 struct axienet_local *lp = netdev_priv(ndev); 1524 1525 ecoalesce->rx_max_coalesced_frames = lp->coalesce_count_rx; 1526 ecoalesce->rx_coalesce_usecs = lp->coalesce_usec_rx; 1527 ecoalesce->tx_max_coalesced_frames = lp->coalesce_count_tx; 1528 ecoalesce->tx_coalesce_usecs = lp->coalesce_usec_tx; 1529 return 0; 1530 } 1531 1532 /** 1533 * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count. 1534 * @ndev: Pointer to net_device structure 1535 * @ecoalesce: Pointer to ethtool_coalesce structure 1536 * @kernel_coal: ethtool CQE mode setting structure 1537 * @extack: extack for reporting error messages 1538 * 1539 * This implements ethtool command for setting the DMA interrupt coalescing 1540 * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux 1541 * prompt to execute this function. 1542 * 1543 * Return: 0, on success, Non-zero error value on failure. 1544 */ 1545 static int 1546 axienet_ethtools_set_coalesce(struct net_device *ndev, 1547 struct ethtool_coalesce *ecoalesce, 1548 struct kernel_ethtool_coalesce *kernel_coal, 1549 struct netlink_ext_ack *extack) 1550 { 1551 struct axienet_local *lp = netdev_priv(ndev); 1552 1553 if (netif_running(ndev)) { 1554 netdev_err(ndev, 1555 "Please stop netif before applying configuration\n"); 1556 return -EFAULT; 1557 } 1558 1559 if (ecoalesce->rx_max_coalesced_frames) 1560 lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames; 1561 if (ecoalesce->rx_coalesce_usecs) 1562 lp->coalesce_usec_rx = ecoalesce->rx_coalesce_usecs; 1563 if (ecoalesce->tx_max_coalesced_frames) 1564 lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames; 1565 if (ecoalesce->tx_coalesce_usecs) 1566 lp->coalesce_usec_tx = ecoalesce->tx_coalesce_usecs; 1567 1568 return 0; 1569 } 1570 1571 static int 1572 axienet_ethtools_get_link_ksettings(struct net_device *ndev, 1573 struct ethtool_link_ksettings *cmd) 1574 { 1575 struct axienet_local *lp = netdev_priv(ndev); 1576 1577 return phylink_ethtool_ksettings_get(lp->phylink, cmd); 1578 } 1579 1580 static int 1581 axienet_ethtools_set_link_ksettings(struct net_device *ndev, 1582 const struct ethtool_link_ksettings *cmd) 1583 { 1584 struct axienet_local *lp = netdev_priv(ndev); 1585 1586 return phylink_ethtool_ksettings_set(lp->phylink, cmd); 1587 } 1588 1589 static int axienet_ethtools_nway_reset(struct net_device *dev) 1590 { 1591 struct axienet_local *lp = netdev_priv(dev); 1592 1593 return phylink_ethtool_nway_reset(lp->phylink); 1594 } 1595 1596 static const struct ethtool_ops axienet_ethtool_ops = { 1597 .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES | 1598 ETHTOOL_COALESCE_USECS, 1599 .get_drvinfo = axienet_ethtools_get_drvinfo, 1600 .get_regs_len = axienet_ethtools_get_regs_len, 1601 .get_regs = axienet_ethtools_get_regs, 1602 .get_link = ethtool_op_get_link, 1603 .get_ringparam = axienet_ethtools_get_ringparam, 1604 .set_ringparam = axienet_ethtools_set_ringparam, 1605 .get_pauseparam = axienet_ethtools_get_pauseparam, 1606 .set_pauseparam = axienet_ethtools_set_pauseparam, 1607 .get_coalesce = axienet_ethtools_get_coalesce, 1608 .set_coalesce = axienet_ethtools_set_coalesce, 1609 .get_link_ksettings = axienet_ethtools_get_link_ksettings, 1610 .set_link_ksettings = axienet_ethtools_set_link_ksettings, 1611 .nway_reset = axienet_ethtools_nway_reset, 1612 }; 1613 1614 static struct axienet_local *pcs_to_axienet_local(struct phylink_pcs *pcs) 1615 { 1616 return container_of(pcs, struct axienet_local, pcs); 1617 } 1618 1619 static void axienet_pcs_get_state(struct phylink_pcs *pcs, 1620 struct phylink_link_state *state) 1621 { 1622 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy; 1623 1624 phylink_mii_c22_pcs_get_state(pcs_phy, state); 1625 } 1626 1627 static void axienet_pcs_an_restart(struct phylink_pcs *pcs) 1628 { 1629 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy; 1630 1631 phylink_mii_c22_pcs_an_restart(pcs_phy); 1632 } 1633 1634 static int axienet_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode, 1635 phy_interface_t interface, 1636 const unsigned long *advertising, 1637 bool permit_pause_to_mac) 1638 { 1639 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy; 1640 struct net_device *ndev = pcs_to_axienet_local(pcs)->ndev; 1641 struct axienet_local *lp = netdev_priv(ndev); 1642 int ret; 1643 1644 if (lp->switch_x_sgmii) { 1645 ret = mdiodev_write(pcs_phy, XLNX_MII_STD_SELECT_REG, 1646 interface == PHY_INTERFACE_MODE_SGMII ? 1647 XLNX_MII_STD_SELECT_SGMII : 0); 1648 if (ret < 0) { 1649 netdev_warn(ndev, 1650 "Failed to switch PHY interface: %d\n", 1651 ret); 1652 return ret; 1653 } 1654 } 1655 1656 ret = phylink_mii_c22_pcs_config(pcs_phy, interface, advertising, 1657 neg_mode); 1658 if (ret < 0) 1659 netdev_warn(ndev, "Failed to configure PCS: %d\n", ret); 1660 1661 return ret; 1662 } 1663 1664 static const struct phylink_pcs_ops axienet_pcs_ops = { 1665 .pcs_get_state = axienet_pcs_get_state, 1666 .pcs_config = axienet_pcs_config, 1667 .pcs_an_restart = axienet_pcs_an_restart, 1668 }; 1669 1670 static struct phylink_pcs *axienet_mac_select_pcs(struct phylink_config *config, 1671 phy_interface_t interface) 1672 { 1673 struct net_device *ndev = to_net_dev(config->dev); 1674 struct axienet_local *lp = netdev_priv(ndev); 1675 1676 if (interface == PHY_INTERFACE_MODE_1000BASEX || 1677 interface == PHY_INTERFACE_MODE_SGMII) 1678 return &lp->pcs; 1679 1680 return NULL; 1681 } 1682 1683 static void axienet_mac_config(struct phylink_config *config, unsigned int mode, 1684 const struct phylink_link_state *state) 1685 { 1686 /* nothing meaningful to do */ 1687 } 1688 1689 static void axienet_mac_link_down(struct phylink_config *config, 1690 unsigned int mode, 1691 phy_interface_t interface) 1692 { 1693 /* nothing meaningful to do */ 1694 } 1695 1696 static void axienet_mac_link_up(struct phylink_config *config, 1697 struct phy_device *phy, 1698 unsigned int mode, phy_interface_t interface, 1699 int speed, int duplex, 1700 bool tx_pause, bool rx_pause) 1701 { 1702 struct net_device *ndev = to_net_dev(config->dev); 1703 struct axienet_local *lp = netdev_priv(ndev); 1704 u32 emmc_reg, fcc_reg; 1705 1706 emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET); 1707 emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK; 1708 1709 switch (speed) { 1710 case SPEED_1000: 1711 emmc_reg |= XAE_EMMC_LINKSPD_1000; 1712 break; 1713 case SPEED_100: 1714 emmc_reg |= XAE_EMMC_LINKSPD_100; 1715 break; 1716 case SPEED_10: 1717 emmc_reg |= XAE_EMMC_LINKSPD_10; 1718 break; 1719 default: 1720 dev_err(&ndev->dev, 1721 "Speed other than 10, 100 or 1Gbps is not supported\n"); 1722 break; 1723 } 1724 1725 axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg); 1726 1727 fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET); 1728 if (tx_pause) 1729 fcc_reg |= XAE_FCC_FCTX_MASK; 1730 else 1731 fcc_reg &= ~XAE_FCC_FCTX_MASK; 1732 if (rx_pause) 1733 fcc_reg |= XAE_FCC_FCRX_MASK; 1734 else 1735 fcc_reg &= ~XAE_FCC_FCRX_MASK; 1736 axienet_iow(lp, XAE_FCC_OFFSET, fcc_reg); 1737 } 1738 1739 static const struct phylink_mac_ops axienet_phylink_ops = { 1740 .mac_select_pcs = axienet_mac_select_pcs, 1741 .mac_config = axienet_mac_config, 1742 .mac_link_down = axienet_mac_link_down, 1743 .mac_link_up = axienet_mac_link_up, 1744 }; 1745 1746 /** 1747 * axienet_dma_err_handler - Work queue task for Axi DMA Error 1748 * @work: pointer to work_struct 1749 * 1750 * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the 1751 * Tx/Rx BDs. 1752 */ 1753 static void axienet_dma_err_handler(struct work_struct *work) 1754 { 1755 u32 i; 1756 u32 axienet_status; 1757 struct axidma_bd *cur_p; 1758 struct axienet_local *lp = container_of(work, struct axienet_local, 1759 dma_err_task); 1760 struct net_device *ndev = lp->ndev; 1761 1762 napi_disable(&lp->napi_tx); 1763 napi_disable(&lp->napi_rx); 1764 1765 axienet_setoptions(ndev, lp->options & 1766 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 1767 1768 axienet_dma_stop(lp); 1769 1770 for (i = 0; i < lp->tx_bd_num; i++) { 1771 cur_p = &lp->tx_bd_v[i]; 1772 if (cur_p->cntrl) { 1773 dma_addr_t addr = desc_get_phys_addr(lp, cur_p); 1774 1775 dma_unmap_single(lp->dev, addr, 1776 (cur_p->cntrl & 1777 XAXIDMA_BD_CTRL_LENGTH_MASK), 1778 DMA_TO_DEVICE); 1779 } 1780 if (cur_p->skb) 1781 dev_kfree_skb_irq(cur_p->skb); 1782 cur_p->phys = 0; 1783 cur_p->phys_msb = 0; 1784 cur_p->cntrl = 0; 1785 cur_p->status = 0; 1786 cur_p->app0 = 0; 1787 cur_p->app1 = 0; 1788 cur_p->app2 = 0; 1789 cur_p->app3 = 0; 1790 cur_p->app4 = 0; 1791 cur_p->skb = NULL; 1792 } 1793 1794 for (i = 0; i < lp->rx_bd_num; i++) { 1795 cur_p = &lp->rx_bd_v[i]; 1796 cur_p->status = 0; 1797 cur_p->app0 = 0; 1798 cur_p->app1 = 0; 1799 cur_p->app2 = 0; 1800 cur_p->app3 = 0; 1801 cur_p->app4 = 0; 1802 } 1803 1804 lp->tx_bd_ci = 0; 1805 lp->tx_bd_tail = 0; 1806 lp->rx_bd_ci = 0; 1807 1808 axienet_dma_start(lp); 1809 1810 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); 1811 axienet_status &= ~XAE_RCW1_RX_MASK; 1812 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status); 1813 1814 axienet_status = axienet_ior(lp, XAE_IP_OFFSET); 1815 if (axienet_status & XAE_INT_RXRJECT_MASK) 1816 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK); 1817 axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ? 1818 XAE_INT_RECV_ERROR_MASK : 0); 1819 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK); 1820 1821 /* Sync default options with HW but leave receiver and 1822 * transmitter disabled. 1823 */ 1824 axienet_setoptions(ndev, lp->options & 1825 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 1826 axienet_set_mac_address(ndev, NULL); 1827 axienet_set_multicast_list(ndev); 1828 axienet_setoptions(ndev, lp->options); 1829 napi_enable(&lp->napi_rx); 1830 napi_enable(&lp->napi_tx); 1831 } 1832 1833 /** 1834 * axienet_probe - Axi Ethernet probe function. 1835 * @pdev: Pointer to platform device structure. 1836 * 1837 * Return: 0, on success 1838 * Non-zero error value on failure. 1839 * 1840 * This is the probe routine for Axi Ethernet driver. This is called before 1841 * any other driver routines are invoked. It allocates and sets up the Ethernet 1842 * device. Parses through device tree and populates fields of 1843 * axienet_local. It registers the Ethernet device. 1844 */ 1845 static int axienet_probe(struct platform_device *pdev) 1846 { 1847 int ret; 1848 struct device_node *np; 1849 struct axienet_local *lp; 1850 struct net_device *ndev; 1851 struct resource *ethres; 1852 u8 mac_addr[ETH_ALEN]; 1853 int addr_width = 32; 1854 u32 value; 1855 1856 ndev = alloc_etherdev(sizeof(*lp)); 1857 if (!ndev) 1858 return -ENOMEM; 1859 1860 platform_set_drvdata(pdev, ndev); 1861 1862 SET_NETDEV_DEV(ndev, &pdev->dev); 1863 ndev->flags &= ~IFF_MULTICAST; /* clear multicast */ 1864 ndev->features = NETIF_F_SG; 1865 ndev->netdev_ops = &axienet_netdev_ops; 1866 ndev->ethtool_ops = &axienet_ethtool_ops; 1867 1868 /* MTU range: 64 - 9000 */ 1869 ndev->min_mtu = 64; 1870 ndev->max_mtu = XAE_JUMBO_MTU; 1871 1872 lp = netdev_priv(ndev); 1873 lp->ndev = ndev; 1874 lp->dev = &pdev->dev; 1875 lp->options = XAE_OPTION_DEFAULTS; 1876 lp->rx_bd_num = RX_BD_NUM_DEFAULT; 1877 lp->tx_bd_num = TX_BD_NUM_DEFAULT; 1878 1879 u64_stats_init(&lp->rx_stat_sync); 1880 u64_stats_init(&lp->tx_stat_sync); 1881 1882 netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll); 1883 netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll); 1884 1885 lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk"); 1886 if (!lp->axi_clk) { 1887 /* For backward compatibility, if named AXI clock is not present, 1888 * treat the first clock specified as the AXI clock. 1889 */ 1890 lp->axi_clk = devm_clk_get_optional(&pdev->dev, NULL); 1891 } 1892 if (IS_ERR(lp->axi_clk)) { 1893 ret = PTR_ERR(lp->axi_clk); 1894 goto free_netdev; 1895 } 1896 ret = clk_prepare_enable(lp->axi_clk); 1897 if (ret) { 1898 dev_err(&pdev->dev, "Unable to enable AXI clock: %d\n", ret); 1899 goto free_netdev; 1900 } 1901 1902 lp->misc_clks[0].id = "axis_clk"; 1903 lp->misc_clks[1].id = "ref_clk"; 1904 lp->misc_clks[2].id = "mgt_clk"; 1905 1906 ret = devm_clk_bulk_get_optional(&pdev->dev, XAE_NUM_MISC_CLOCKS, lp->misc_clks); 1907 if (ret) 1908 goto cleanup_clk; 1909 1910 ret = clk_bulk_prepare_enable(XAE_NUM_MISC_CLOCKS, lp->misc_clks); 1911 if (ret) 1912 goto cleanup_clk; 1913 1914 /* Map device registers */ 1915 lp->regs = devm_platform_get_and_ioremap_resource(pdev, 0, ðres); 1916 if (IS_ERR(lp->regs)) { 1917 ret = PTR_ERR(lp->regs); 1918 goto cleanup_clk; 1919 } 1920 lp->regs_start = ethres->start; 1921 1922 /* Setup checksum offload, but default to off if not specified */ 1923 lp->features = 0; 1924 1925 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,txcsum", &value); 1926 if (!ret) { 1927 switch (value) { 1928 case 1: 1929 lp->csum_offload_on_tx_path = 1930 XAE_FEATURE_PARTIAL_TX_CSUM; 1931 lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM; 1932 /* Can checksum TCP/UDP over IPv4. */ 1933 ndev->features |= NETIF_F_IP_CSUM; 1934 break; 1935 case 2: 1936 lp->csum_offload_on_tx_path = 1937 XAE_FEATURE_FULL_TX_CSUM; 1938 lp->features |= XAE_FEATURE_FULL_TX_CSUM; 1939 /* Can checksum TCP/UDP over IPv4. */ 1940 ndev->features |= NETIF_F_IP_CSUM; 1941 break; 1942 default: 1943 lp->csum_offload_on_tx_path = XAE_NO_CSUM_OFFLOAD; 1944 } 1945 } 1946 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value); 1947 if (!ret) { 1948 switch (value) { 1949 case 1: 1950 lp->csum_offload_on_rx_path = 1951 XAE_FEATURE_PARTIAL_RX_CSUM; 1952 lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM; 1953 break; 1954 case 2: 1955 lp->csum_offload_on_rx_path = 1956 XAE_FEATURE_FULL_RX_CSUM; 1957 lp->features |= XAE_FEATURE_FULL_RX_CSUM; 1958 break; 1959 default: 1960 lp->csum_offload_on_rx_path = XAE_NO_CSUM_OFFLOAD; 1961 } 1962 } 1963 /* For supporting jumbo frames, the Axi Ethernet hardware must have 1964 * a larger Rx/Tx Memory. Typically, the size must be large so that 1965 * we can enable jumbo option and start supporting jumbo frames. 1966 * Here we check for memory allocated for Rx/Tx in the hardware from 1967 * the device-tree and accordingly set flags. 1968 */ 1969 of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem); 1970 1971 lp->switch_x_sgmii = of_property_read_bool(pdev->dev.of_node, 1972 "xlnx,switch-x-sgmii"); 1973 1974 /* Start with the proprietary, and broken phy_type */ 1975 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &value); 1976 if (!ret) { 1977 netdev_warn(ndev, "Please upgrade your device tree binary blob to use phy-mode"); 1978 switch (value) { 1979 case XAE_PHY_TYPE_MII: 1980 lp->phy_mode = PHY_INTERFACE_MODE_MII; 1981 break; 1982 case XAE_PHY_TYPE_GMII: 1983 lp->phy_mode = PHY_INTERFACE_MODE_GMII; 1984 break; 1985 case XAE_PHY_TYPE_RGMII_2_0: 1986 lp->phy_mode = PHY_INTERFACE_MODE_RGMII_ID; 1987 break; 1988 case XAE_PHY_TYPE_SGMII: 1989 lp->phy_mode = PHY_INTERFACE_MODE_SGMII; 1990 break; 1991 case XAE_PHY_TYPE_1000BASE_X: 1992 lp->phy_mode = PHY_INTERFACE_MODE_1000BASEX; 1993 break; 1994 default: 1995 ret = -EINVAL; 1996 goto cleanup_clk; 1997 } 1998 } else { 1999 ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode); 2000 if (ret) 2001 goto cleanup_clk; 2002 } 2003 if (lp->switch_x_sgmii && lp->phy_mode != PHY_INTERFACE_MODE_SGMII && 2004 lp->phy_mode != PHY_INTERFACE_MODE_1000BASEX) { 2005 dev_err(&pdev->dev, "xlnx,switch-x-sgmii only supported with SGMII or 1000BaseX\n"); 2006 ret = -EINVAL; 2007 goto cleanup_clk; 2008 } 2009 2010 /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */ 2011 np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0); 2012 if (np) { 2013 struct resource dmares; 2014 2015 ret = of_address_to_resource(np, 0, &dmares); 2016 if (ret) { 2017 dev_err(&pdev->dev, 2018 "unable to get DMA resource\n"); 2019 of_node_put(np); 2020 goto cleanup_clk; 2021 } 2022 lp->dma_regs = devm_ioremap_resource(&pdev->dev, 2023 &dmares); 2024 lp->rx_irq = irq_of_parse_and_map(np, 1); 2025 lp->tx_irq = irq_of_parse_and_map(np, 0); 2026 of_node_put(np); 2027 lp->eth_irq = platform_get_irq_optional(pdev, 0); 2028 } else { 2029 /* Check for these resources directly on the Ethernet node. */ 2030 lp->dma_regs = devm_platform_get_and_ioremap_resource(pdev, 1, NULL); 2031 lp->rx_irq = platform_get_irq(pdev, 1); 2032 lp->tx_irq = platform_get_irq(pdev, 0); 2033 lp->eth_irq = platform_get_irq_optional(pdev, 2); 2034 } 2035 if (IS_ERR(lp->dma_regs)) { 2036 dev_err(&pdev->dev, "could not map DMA regs\n"); 2037 ret = PTR_ERR(lp->dma_regs); 2038 goto cleanup_clk; 2039 } 2040 if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) { 2041 dev_err(&pdev->dev, "could not determine irqs\n"); 2042 ret = -ENOMEM; 2043 goto cleanup_clk; 2044 } 2045 2046 /* Reset core now that clocks are enabled, prior to accessing MDIO */ 2047 ret = __axienet_device_reset(lp); 2048 if (ret) 2049 goto cleanup_clk; 2050 2051 /* Autodetect the need for 64-bit DMA pointers. 2052 * When the IP is configured for a bus width bigger than 32 bits, 2053 * writing the MSB registers is mandatory, even if they are all 0. 2054 * We can detect this case by writing all 1's to one such register 2055 * and see if that sticks: when the IP is configured for 32 bits 2056 * only, those registers are RES0. 2057 * Those MSB registers were introduced in IP v7.1, which we check first. 2058 */ 2059 if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) { 2060 void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4; 2061 2062 iowrite32(0x0, desc); 2063 if (ioread32(desc) == 0) { /* sanity check */ 2064 iowrite32(0xffffffff, desc); 2065 if (ioread32(desc) > 0) { 2066 lp->features |= XAE_FEATURE_DMA_64BIT; 2067 addr_width = 64; 2068 dev_info(&pdev->dev, 2069 "autodetected 64-bit DMA range\n"); 2070 } 2071 iowrite32(0x0, desc); 2072 } 2073 } 2074 if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) { 2075 dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit archecture\n"); 2076 ret = -EINVAL; 2077 goto cleanup_clk; 2078 } 2079 2080 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width)); 2081 if (ret) { 2082 dev_err(&pdev->dev, "No suitable DMA available\n"); 2083 goto cleanup_clk; 2084 } 2085 2086 /* Check for Ethernet core IRQ (optional) */ 2087 if (lp->eth_irq <= 0) 2088 dev_info(&pdev->dev, "Ethernet core IRQ not defined\n"); 2089 2090 /* Retrieve the MAC address */ 2091 ret = of_get_mac_address(pdev->dev.of_node, mac_addr); 2092 if (!ret) { 2093 axienet_set_mac_address(ndev, mac_addr); 2094 } else { 2095 dev_warn(&pdev->dev, "could not find MAC address property: %d\n", 2096 ret); 2097 axienet_set_mac_address(ndev, NULL); 2098 } 2099 2100 lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD; 2101 lp->coalesce_usec_rx = XAXIDMA_DFT_RX_USEC; 2102 lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD; 2103 lp->coalesce_usec_tx = XAXIDMA_DFT_TX_USEC; 2104 2105 ret = axienet_mdio_setup(lp); 2106 if (ret) 2107 dev_warn(&pdev->dev, 2108 "error registering MDIO bus: %d\n", ret); 2109 2110 if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII || 2111 lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) { 2112 np = of_parse_phandle(pdev->dev.of_node, "pcs-handle", 0); 2113 if (!np) { 2114 /* Deprecated: Always use "pcs-handle" for pcs_phy. 2115 * Falling back to "phy-handle" here is only for 2116 * backward compatibility with old device trees. 2117 */ 2118 np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); 2119 } 2120 if (!np) { 2121 dev_err(&pdev->dev, "pcs-handle (preferred) or phy-handle required for 1000BaseX/SGMII\n"); 2122 ret = -EINVAL; 2123 goto cleanup_mdio; 2124 } 2125 lp->pcs_phy = of_mdio_find_device(np); 2126 if (!lp->pcs_phy) { 2127 ret = -EPROBE_DEFER; 2128 of_node_put(np); 2129 goto cleanup_mdio; 2130 } 2131 of_node_put(np); 2132 lp->pcs.ops = &axienet_pcs_ops; 2133 lp->pcs.neg_mode = true; 2134 lp->pcs.poll = true; 2135 } 2136 2137 lp->phylink_config.dev = &ndev->dev; 2138 lp->phylink_config.type = PHYLINK_NETDEV; 2139 lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE | 2140 MAC_10FD | MAC_100FD | MAC_1000FD; 2141 2142 __set_bit(lp->phy_mode, lp->phylink_config.supported_interfaces); 2143 if (lp->switch_x_sgmii) { 2144 __set_bit(PHY_INTERFACE_MODE_1000BASEX, 2145 lp->phylink_config.supported_interfaces); 2146 __set_bit(PHY_INTERFACE_MODE_SGMII, 2147 lp->phylink_config.supported_interfaces); 2148 } 2149 2150 lp->phylink = phylink_create(&lp->phylink_config, pdev->dev.fwnode, 2151 lp->phy_mode, 2152 &axienet_phylink_ops); 2153 if (IS_ERR(lp->phylink)) { 2154 ret = PTR_ERR(lp->phylink); 2155 dev_err(&pdev->dev, "phylink_create error (%i)\n", ret); 2156 goto cleanup_mdio; 2157 } 2158 2159 ret = register_netdev(lp->ndev); 2160 if (ret) { 2161 dev_err(lp->dev, "register_netdev() error (%i)\n", ret); 2162 goto cleanup_phylink; 2163 } 2164 2165 return 0; 2166 2167 cleanup_phylink: 2168 phylink_destroy(lp->phylink); 2169 2170 cleanup_mdio: 2171 if (lp->pcs_phy) 2172 put_device(&lp->pcs_phy->dev); 2173 if (lp->mii_bus) 2174 axienet_mdio_teardown(lp); 2175 cleanup_clk: 2176 clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks); 2177 clk_disable_unprepare(lp->axi_clk); 2178 2179 free_netdev: 2180 free_netdev(ndev); 2181 2182 return ret; 2183 } 2184 2185 static int axienet_remove(struct platform_device *pdev) 2186 { 2187 struct net_device *ndev = platform_get_drvdata(pdev); 2188 struct axienet_local *lp = netdev_priv(ndev); 2189 2190 unregister_netdev(ndev); 2191 2192 if (lp->phylink) 2193 phylink_destroy(lp->phylink); 2194 2195 if (lp->pcs_phy) 2196 put_device(&lp->pcs_phy->dev); 2197 2198 axienet_mdio_teardown(lp); 2199 2200 clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks); 2201 clk_disable_unprepare(lp->axi_clk); 2202 2203 free_netdev(ndev); 2204 2205 return 0; 2206 } 2207 2208 static void axienet_shutdown(struct platform_device *pdev) 2209 { 2210 struct net_device *ndev = platform_get_drvdata(pdev); 2211 2212 rtnl_lock(); 2213 netif_device_detach(ndev); 2214 2215 if (netif_running(ndev)) 2216 dev_close(ndev); 2217 2218 rtnl_unlock(); 2219 } 2220 2221 static int axienet_suspend(struct device *dev) 2222 { 2223 struct net_device *ndev = dev_get_drvdata(dev); 2224 2225 if (!netif_running(ndev)) 2226 return 0; 2227 2228 netif_device_detach(ndev); 2229 2230 rtnl_lock(); 2231 axienet_stop(ndev); 2232 rtnl_unlock(); 2233 2234 return 0; 2235 } 2236 2237 static int axienet_resume(struct device *dev) 2238 { 2239 struct net_device *ndev = dev_get_drvdata(dev); 2240 2241 if (!netif_running(ndev)) 2242 return 0; 2243 2244 rtnl_lock(); 2245 axienet_open(ndev); 2246 rtnl_unlock(); 2247 2248 netif_device_attach(ndev); 2249 2250 return 0; 2251 } 2252 2253 static DEFINE_SIMPLE_DEV_PM_OPS(axienet_pm_ops, 2254 axienet_suspend, axienet_resume); 2255 2256 static struct platform_driver axienet_driver = { 2257 .probe = axienet_probe, 2258 .remove = axienet_remove, 2259 .shutdown = axienet_shutdown, 2260 .driver = { 2261 .name = "xilinx_axienet", 2262 .pm = &axienet_pm_ops, 2263 .of_match_table = axienet_of_match, 2264 }, 2265 }; 2266 2267 module_platform_driver(axienet_driver); 2268 2269 MODULE_DESCRIPTION("Xilinx Axi Ethernet driver"); 2270 MODULE_AUTHOR("Xilinx"); 2271 MODULE_LICENSE("GPL"); 2272