1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Xilinx Axi Ethernet device driver 4 * 5 * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi 6 * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net> 7 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd. 8 * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu> 9 * Copyright (c) 2010 - 2011 PetaLogix 10 * Copyright (c) 2019 - 2022 Calian Advanced Technologies 11 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved. 12 * 13 * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6 14 * and Spartan6. 15 * 16 * TODO: 17 * - Add Axi Fifo support. 18 * - Factor out Axi DMA code into separate driver. 19 * - Test and fix basic multicast filtering. 20 * - Add support for extended multicast filtering. 21 * - Test basic VLAN support. 22 * - Add support for extended VLAN support. 23 */ 24 25 #include <linux/clk.h> 26 #include <linux/delay.h> 27 #include <linux/etherdevice.h> 28 #include <linux/module.h> 29 #include <linux/netdevice.h> 30 #include <linux/of_mdio.h> 31 #include <linux/of_net.h> 32 #include <linux/of_platform.h> 33 #include <linux/of_irq.h> 34 #include <linux/of_address.h> 35 #include <linux/skbuff.h> 36 #include <linux/math64.h> 37 #include <linux/phy.h> 38 #include <linux/mii.h> 39 #include <linux/ethtool.h> 40 41 #include "xilinx_axienet.h" 42 43 /* Descriptors defines for Tx and Rx DMA */ 44 #define TX_BD_NUM_DEFAULT 128 45 #define RX_BD_NUM_DEFAULT 1024 46 #define TX_BD_NUM_MIN (MAX_SKB_FRAGS + 1) 47 #define TX_BD_NUM_MAX 4096 48 #define RX_BD_NUM_MAX 4096 49 50 /* Must be shorter than length of ethtool_drvinfo.driver field to fit */ 51 #define DRIVER_NAME "xaxienet" 52 #define DRIVER_DESCRIPTION "Xilinx Axi Ethernet driver" 53 #define DRIVER_VERSION "1.00a" 54 55 #define AXIENET_REGS_N 40 56 57 /* Match table for of_platform binding */ 58 static const struct of_device_id axienet_of_match[] = { 59 { .compatible = "xlnx,axi-ethernet-1.00.a", }, 60 { .compatible = "xlnx,axi-ethernet-1.01.a", }, 61 { .compatible = "xlnx,axi-ethernet-2.01.a", }, 62 {}, 63 }; 64 65 MODULE_DEVICE_TABLE(of, axienet_of_match); 66 67 /* Option table for setting up Axi Ethernet hardware options */ 68 static struct axienet_option axienet_options[] = { 69 /* Turn on jumbo packet support for both Rx and Tx */ 70 { 71 .opt = XAE_OPTION_JUMBO, 72 .reg = XAE_TC_OFFSET, 73 .m_or = XAE_TC_JUM_MASK, 74 }, { 75 .opt = XAE_OPTION_JUMBO, 76 .reg = XAE_RCW1_OFFSET, 77 .m_or = XAE_RCW1_JUM_MASK, 78 }, { /* Turn on VLAN packet support for both Rx and Tx */ 79 .opt = XAE_OPTION_VLAN, 80 .reg = XAE_TC_OFFSET, 81 .m_or = XAE_TC_VLAN_MASK, 82 }, { 83 .opt = XAE_OPTION_VLAN, 84 .reg = XAE_RCW1_OFFSET, 85 .m_or = XAE_RCW1_VLAN_MASK, 86 }, { /* Turn on FCS stripping on receive packets */ 87 .opt = XAE_OPTION_FCS_STRIP, 88 .reg = XAE_RCW1_OFFSET, 89 .m_or = XAE_RCW1_FCS_MASK, 90 }, { /* Turn on FCS insertion on transmit packets */ 91 .opt = XAE_OPTION_FCS_INSERT, 92 .reg = XAE_TC_OFFSET, 93 .m_or = XAE_TC_FCS_MASK, 94 }, { /* Turn off length/type field checking on receive packets */ 95 .opt = XAE_OPTION_LENTYPE_ERR, 96 .reg = XAE_RCW1_OFFSET, 97 .m_or = XAE_RCW1_LT_DIS_MASK, 98 }, { /* Turn on Rx flow control */ 99 .opt = XAE_OPTION_FLOW_CONTROL, 100 .reg = XAE_FCC_OFFSET, 101 .m_or = XAE_FCC_FCRX_MASK, 102 }, { /* Turn on Tx flow control */ 103 .opt = XAE_OPTION_FLOW_CONTROL, 104 .reg = XAE_FCC_OFFSET, 105 .m_or = XAE_FCC_FCTX_MASK, 106 }, { /* Turn on promiscuous frame filtering */ 107 .opt = XAE_OPTION_PROMISC, 108 .reg = XAE_FMI_OFFSET, 109 .m_or = XAE_FMI_PM_MASK, 110 }, { /* Enable transmitter */ 111 .opt = XAE_OPTION_TXEN, 112 .reg = XAE_TC_OFFSET, 113 .m_or = XAE_TC_TX_MASK, 114 }, { /* Enable receiver */ 115 .opt = XAE_OPTION_RXEN, 116 .reg = XAE_RCW1_OFFSET, 117 .m_or = XAE_RCW1_RX_MASK, 118 }, 119 {} 120 }; 121 122 /** 123 * axienet_dma_in32 - Memory mapped Axi DMA register read 124 * @lp: Pointer to axienet local structure 125 * @reg: Address offset from the base address of the Axi DMA core 126 * 127 * Return: The contents of the Axi DMA register 128 * 129 * This function returns the contents of the corresponding Axi DMA register. 130 */ 131 static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg) 132 { 133 return ioread32(lp->dma_regs + reg); 134 } 135 136 /** 137 * axienet_dma_out32 - Memory mapped Axi DMA register write. 138 * @lp: Pointer to axienet local structure 139 * @reg: Address offset from the base address of the Axi DMA core 140 * @value: Value to be written into the Axi DMA register 141 * 142 * This function writes the desired value into the corresponding Axi DMA 143 * register. 144 */ 145 static inline void axienet_dma_out32(struct axienet_local *lp, 146 off_t reg, u32 value) 147 { 148 iowrite32(value, lp->dma_regs + reg); 149 } 150 151 static void axienet_dma_out_addr(struct axienet_local *lp, off_t reg, 152 dma_addr_t addr) 153 { 154 axienet_dma_out32(lp, reg, lower_32_bits(addr)); 155 156 if (lp->features & XAE_FEATURE_DMA_64BIT) 157 axienet_dma_out32(lp, reg + 4, upper_32_bits(addr)); 158 } 159 160 static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr, 161 struct axidma_bd *desc) 162 { 163 desc->phys = lower_32_bits(addr); 164 if (lp->features & XAE_FEATURE_DMA_64BIT) 165 desc->phys_msb = upper_32_bits(addr); 166 } 167 168 static dma_addr_t desc_get_phys_addr(struct axienet_local *lp, 169 struct axidma_bd *desc) 170 { 171 dma_addr_t ret = desc->phys; 172 173 if (lp->features & XAE_FEATURE_DMA_64BIT) 174 ret |= ((dma_addr_t)desc->phys_msb << 16) << 16; 175 176 return ret; 177 } 178 179 /** 180 * axienet_dma_bd_release - Release buffer descriptor rings 181 * @ndev: Pointer to the net_device structure 182 * 183 * This function is used to release the descriptors allocated in 184 * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet 185 * driver stop api is called. 186 */ 187 static void axienet_dma_bd_release(struct net_device *ndev) 188 { 189 int i; 190 struct axienet_local *lp = netdev_priv(ndev); 191 192 /* If we end up here, tx_bd_v must have been DMA allocated. */ 193 dma_free_coherent(lp->dev, 194 sizeof(*lp->tx_bd_v) * lp->tx_bd_num, 195 lp->tx_bd_v, 196 lp->tx_bd_p); 197 198 if (!lp->rx_bd_v) 199 return; 200 201 for (i = 0; i < lp->rx_bd_num; i++) { 202 dma_addr_t phys; 203 204 /* A NULL skb means this descriptor has not been initialised 205 * at all. 206 */ 207 if (!lp->rx_bd_v[i].skb) 208 break; 209 210 dev_kfree_skb(lp->rx_bd_v[i].skb); 211 212 /* For each descriptor, we programmed cntrl with the (non-zero) 213 * descriptor size, after it had been successfully allocated. 214 * So a non-zero value in there means we need to unmap it. 215 */ 216 if (lp->rx_bd_v[i].cntrl) { 217 phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]); 218 dma_unmap_single(lp->dev, phys, 219 lp->max_frm_size, DMA_FROM_DEVICE); 220 } 221 } 222 223 dma_free_coherent(lp->dev, 224 sizeof(*lp->rx_bd_v) * lp->rx_bd_num, 225 lp->rx_bd_v, 226 lp->rx_bd_p); 227 } 228 229 /** 230 * axienet_usec_to_timer - Calculate IRQ delay timer value 231 * @lp: Pointer to the axienet_local structure 232 * @coalesce_usec: Microseconds to convert into timer value 233 */ 234 static u32 axienet_usec_to_timer(struct axienet_local *lp, u32 coalesce_usec) 235 { 236 u32 result; 237 u64 clk_rate = 125000000; /* arbitrary guess if no clock rate set */ 238 239 if (lp->axi_clk) 240 clk_rate = clk_get_rate(lp->axi_clk); 241 242 /* 1 Timeout Interval = 125 * (clock period of SG clock) */ 243 result = DIV64_U64_ROUND_CLOSEST((u64)coalesce_usec * clk_rate, 244 (u64)125000000); 245 if (result > 255) 246 result = 255; 247 248 return result; 249 } 250 251 /** 252 * axienet_dma_start - Set up DMA registers and start DMA operation 253 * @lp: Pointer to the axienet_local structure 254 */ 255 static void axienet_dma_start(struct axienet_local *lp) 256 { 257 /* Start updating the Rx channel control register */ 258 lp->rx_dma_cr = (lp->coalesce_count_rx << XAXIDMA_COALESCE_SHIFT) | 259 XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK; 260 /* Only set interrupt delay timer if not generating an interrupt on 261 * the first RX packet. Otherwise leave at 0 to disable delay interrupt. 262 */ 263 if (lp->coalesce_count_rx > 1) 264 lp->rx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_rx) 265 << XAXIDMA_DELAY_SHIFT) | 266 XAXIDMA_IRQ_DELAY_MASK; 267 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr); 268 269 /* Start updating the Tx channel control register */ 270 lp->tx_dma_cr = (lp->coalesce_count_tx << XAXIDMA_COALESCE_SHIFT) | 271 XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK; 272 /* Only set interrupt delay timer if not generating an interrupt on 273 * the first TX packet. Otherwise leave at 0 to disable delay interrupt. 274 */ 275 if (lp->coalesce_count_tx > 1) 276 lp->tx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_tx) 277 << XAXIDMA_DELAY_SHIFT) | 278 XAXIDMA_IRQ_DELAY_MASK; 279 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr); 280 281 /* Populate the tail pointer and bring the Rx Axi DMA engine out of 282 * halted state. This will make the Rx side ready for reception. 283 */ 284 axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); 285 lp->rx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK; 286 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr); 287 axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + 288 (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1))); 289 290 /* Write to the RS (Run-stop) bit in the Tx channel control register. 291 * Tx channel is now ready to run. But only after we write to the 292 * tail pointer register that the Tx channel will start transmitting. 293 */ 294 axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); 295 lp->tx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK; 296 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr); 297 } 298 299 /** 300 * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA 301 * @ndev: Pointer to the net_device structure 302 * 303 * Return: 0, on success -ENOMEM, on failure 304 * 305 * This function is called to initialize the Rx and Tx DMA descriptor 306 * rings. This initializes the descriptors with required default values 307 * and is called when Axi Ethernet driver reset is called. 308 */ 309 static int axienet_dma_bd_init(struct net_device *ndev) 310 { 311 int i; 312 struct sk_buff *skb; 313 struct axienet_local *lp = netdev_priv(ndev); 314 315 /* Reset the indexes which are used for accessing the BDs */ 316 lp->tx_bd_ci = 0; 317 lp->tx_bd_tail = 0; 318 lp->rx_bd_ci = 0; 319 320 /* Allocate the Tx and Rx buffer descriptors. */ 321 lp->tx_bd_v = dma_alloc_coherent(lp->dev, 322 sizeof(*lp->tx_bd_v) * lp->tx_bd_num, 323 &lp->tx_bd_p, GFP_KERNEL); 324 if (!lp->tx_bd_v) 325 return -ENOMEM; 326 327 lp->rx_bd_v = dma_alloc_coherent(lp->dev, 328 sizeof(*lp->rx_bd_v) * lp->rx_bd_num, 329 &lp->rx_bd_p, GFP_KERNEL); 330 if (!lp->rx_bd_v) 331 goto out; 332 333 for (i = 0; i < lp->tx_bd_num; i++) { 334 dma_addr_t addr = lp->tx_bd_p + 335 sizeof(*lp->tx_bd_v) * 336 ((i + 1) % lp->tx_bd_num); 337 338 lp->tx_bd_v[i].next = lower_32_bits(addr); 339 if (lp->features & XAE_FEATURE_DMA_64BIT) 340 lp->tx_bd_v[i].next_msb = upper_32_bits(addr); 341 } 342 343 for (i = 0; i < lp->rx_bd_num; i++) { 344 dma_addr_t addr; 345 346 addr = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * 347 ((i + 1) % lp->rx_bd_num); 348 lp->rx_bd_v[i].next = lower_32_bits(addr); 349 if (lp->features & XAE_FEATURE_DMA_64BIT) 350 lp->rx_bd_v[i].next_msb = upper_32_bits(addr); 351 352 skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size); 353 if (!skb) 354 goto out; 355 356 lp->rx_bd_v[i].skb = skb; 357 addr = dma_map_single(lp->dev, skb->data, 358 lp->max_frm_size, DMA_FROM_DEVICE); 359 if (dma_mapping_error(lp->dev, addr)) { 360 netdev_err(ndev, "DMA mapping error\n"); 361 goto out; 362 } 363 desc_set_phys_addr(lp, addr, &lp->rx_bd_v[i]); 364 365 lp->rx_bd_v[i].cntrl = lp->max_frm_size; 366 } 367 368 axienet_dma_start(lp); 369 370 return 0; 371 out: 372 axienet_dma_bd_release(ndev); 373 return -ENOMEM; 374 } 375 376 /** 377 * axienet_set_mac_address - Write the MAC address 378 * @ndev: Pointer to the net_device structure 379 * @address: 6 byte Address to be written as MAC address 380 * 381 * This function is called to initialize the MAC address of the Axi Ethernet 382 * core. It writes to the UAW0 and UAW1 registers of the core. 383 */ 384 static void axienet_set_mac_address(struct net_device *ndev, 385 const void *address) 386 { 387 struct axienet_local *lp = netdev_priv(ndev); 388 389 if (address) 390 eth_hw_addr_set(ndev, address); 391 if (!is_valid_ether_addr(ndev->dev_addr)) 392 eth_hw_addr_random(ndev); 393 394 /* Set up unicast MAC address filter set its mac address */ 395 axienet_iow(lp, XAE_UAW0_OFFSET, 396 (ndev->dev_addr[0]) | 397 (ndev->dev_addr[1] << 8) | 398 (ndev->dev_addr[2] << 16) | 399 (ndev->dev_addr[3] << 24)); 400 axienet_iow(lp, XAE_UAW1_OFFSET, 401 (((axienet_ior(lp, XAE_UAW1_OFFSET)) & 402 ~XAE_UAW1_UNICASTADDR_MASK) | 403 (ndev->dev_addr[4] | 404 (ndev->dev_addr[5] << 8)))); 405 } 406 407 /** 408 * netdev_set_mac_address - Write the MAC address (from outside the driver) 409 * @ndev: Pointer to the net_device structure 410 * @p: 6 byte Address to be written as MAC address 411 * 412 * Return: 0 for all conditions. Presently, there is no failure case. 413 * 414 * This function is called to initialize the MAC address of the Axi Ethernet 415 * core. It calls the core specific axienet_set_mac_address. This is the 416 * function that goes into net_device_ops structure entry ndo_set_mac_address. 417 */ 418 static int netdev_set_mac_address(struct net_device *ndev, void *p) 419 { 420 struct sockaddr *addr = p; 421 axienet_set_mac_address(ndev, addr->sa_data); 422 return 0; 423 } 424 425 /** 426 * axienet_set_multicast_list - Prepare the multicast table 427 * @ndev: Pointer to the net_device structure 428 * 429 * This function is called to initialize the multicast table during 430 * initialization. The Axi Ethernet basic multicast support has a four-entry 431 * multicast table which is initialized here. Additionally this function 432 * goes into the net_device_ops structure entry ndo_set_multicast_list. This 433 * means whenever the multicast table entries need to be updated this 434 * function gets called. 435 */ 436 static void axienet_set_multicast_list(struct net_device *ndev) 437 { 438 int i; 439 u32 reg, af0reg, af1reg; 440 struct axienet_local *lp = netdev_priv(ndev); 441 442 if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) || 443 netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) { 444 /* We must make the kernel realize we had to move into 445 * promiscuous mode. If it was a promiscuous mode request 446 * the flag is already set. If not we set it. 447 */ 448 ndev->flags |= IFF_PROMISC; 449 reg = axienet_ior(lp, XAE_FMI_OFFSET); 450 reg |= XAE_FMI_PM_MASK; 451 axienet_iow(lp, XAE_FMI_OFFSET, reg); 452 dev_info(&ndev->dev, "Promiscuous mode enabled.\n"); 453 } else if (!netdev_mc_empty(ndev)) { 454 struct netdev_hw_addr *ha; 455 456 i = 0; 457 netdev_for_each_mc_addr(ha, ndev) { 458 if (i >= XAE_MULTICAST_CAM_TABLE_NUM) 459 break; 460 461 af0reg = (ha->addr[0]); 462 af0reg |= (ha->addr[1] << 8); 463 af0reg |= (ha->addr[2] << 16); 464 af0reg |= (ha->addr[3] << 24); 465 466 af1reg = (ha->addr[4]); 467 af1reg |= (ha->addr[5] << 8); 468 469 reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00; 470 reg |= i; 471 472 axienet_iow(lp, XAE_FMI_OFFSET, reg); 473 axienet_iow(lp, XAE_AF0_OFFSET, af0reg); 474 axienet_iow(lp, XAE_AF1_OFFSET, af1reg); 475 i++; 476 } 477 } else { 478 reg = axienet_ior(lp, XAE_FMI_OFFSET); 479 reg &= ~XAE_FMI_PM_MASK; 480 481 axienet_iow(lp, XAE_FMI_OFFSET, reg); 482 483 for (i = 0; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) { 484 reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00; 485 reg |= i; 486 487 axienet_iow(lp, XAE_FMI_OFFSET, reg); 488 axienet_iow(lp, XAE_AF0_OFFSET, 0); 489 axienet_iow(lp, XAE_AF1_OFFSET, 0); 490 } 491 492 dev_info(&ndev->dev, "Promiscuous mode disabled.\n"); 493 } 494 } 495 496 /** 497 * axienet_setoptions - Set an Axi Ethernet option 498 * @ndev: Pointer to the net_device structure 499 * @options: Option to be enabled/disabled 500 * 501 * The Axi Ethernet core has multiple features which can be selectively turned 502 * on or off. The typical options could be jumbo frame option, basic VLAN 503 * option, promiscuous mode option etc. This function is used to set or clear 504 * these options in the Axi Ethernet hardware. This is done through 505 * axienet_option structure . 506 */ 507 static void axienet_setoptions(struct net_device *ndev, u32 options) 508 { 509 int reg; 510 struct axienet_local *lp = netdev_priv(ndev); 511 struct axienet_option *tp = &axienet_options[0]; 512 513 while (tp->opt) { 514 reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or)); 515 if (options & tp->opt) 516 reg |= tp->m_or; 517 axienet_iow(lp, tp->reg, reg); 518 tp++; 519 } 520 521 lp->options |= options; 522 } 523 524 static int __axienet_device_reset(struct axienet_local *lp) 525 { 526 u32 value; 527 int ret; 528 529 /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset 530 * process of Axi DMA takes a while to complete as all pending 531 * commands/transfers will be flushed or completed during this 532 * reset process. 533 * Note that even though both TX and RX have their own reset register, 534 * they both reset the entire DMA core, so only one needs to be used. 535 */ 536 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK); 537 ret = read_poll_timeout(axienet_dma_in32, value, 538 !(value & XAXIDMA_CR_RESET_MASK), 539 DELAY_OF_ONE_MILLISEC, 50000, false, lp, 540 XAXIDMA_TX_CR_OFFSET); 541 if (ret) { 542 dev_err(lp->dev, "%s: DMA reset timeout!\n", __func__); 543 return ret; 544 } 545 546 /* Wait for PhyRstCmplt bit to be set, indicating the PHY reset has finished */ 547 ret = read_poll_timeout(axienet_ior, value, 548 value & XAE_INT_PHYRSTCMPLT_MASK, 549 DELAY_OF_ONE_MILLISEC, 50000, false, lp, 550 XAE_IS_OFFSET); 551 if (ret) { 552 dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n", __func__); 553 return ret; 554 } 555 556 return 0; 557 } 558 559 /** 560 * axienet_dma_stop - Stop DMA operation 561 * @lp: Pointer to the axienet_local structure 562 */ 563 static void axienet_dma_stop(struct axienet_local *lp) 564 { 565 int count; 566 u32 cr, sr; 567 568 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 569 cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK); 570 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 571 synchronize_irq(lp->rx_irq); 572 573 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 574 cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK); 575 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 576 synchronize_irq(lp->tx_irq); 577 578 /* Give DMAs a chance to halt gracefully */ 579 sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 580 for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) { 581 msleep(20); 582 sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 583 } 584 585 sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 586 for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) { 587 msleep(20); 588 sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 589 } 590 591 /* Do a reset to ensure DMA is really stopped */ 592 axienet_lock_mii(lp); 593 __axienet_device_reset(lp); 594 axienet_unlock_mii(lp); 595 } 596 597 /** 598 * axienet_device_reset - Reset and initialize the Axi Ethernet hardware. 599 * @ndev: Pointer to the net_device structure 600 * 601 * This function is called to reset and initialize the Axi Ethernet core. This 602 * is typically called during initialization. It does a reset of the Axi DMA 603 * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines 604 * are connected to Axi Ethernet reset lines, this in turn resets the Axi 605 * Ethernet core. No separate hardware reset is done for the Axi Ethernet 606 * core. 607 * Returns 0 on success or a negative error number otherwise. 608 */ 609 static int axienet_device_reset(struct net_device *ndev) 610 { 611 u32 axienet_status; 612 struct axienet_local *lp = netdev_priv(ndev); 613 int ret; 614 615 ret = __axienet_device_reset(lp); 616 if (ret) 617 return ret; 618 619 lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE; 620 lp->options |= XAE_OPTION_VLAN; 621 lp->options &= (~XAE_OPTION_JUMBO); 622 623 if ((ndev->mtu > XAE_MTU) && 624 (ndev->mtu <= XAE_JUMBO_MTU)) { 625 lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN + 626 XAE_TRL_SIZE; 627 628 if (lp->max_frm_size <= lp->rxmem) 629 lp->options |= XAE_OPTION_JUMBO; 630 } 631 632 ret = axienet_dma_bd_init(ndev); 633 if (ret) { 634 netdev_err(ndev, "%s: descriptor allocation failed\n", 635 __func__); 636 return ret; 637 } 638 639 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); 640 axienet_status &= ~XAE_RCW1_RX_MASK; 641 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status); 642 643 axienet_status = axienet_ior(lp, XAE_IP_OFFSET); 644 if (axienet_status & XAE_INT_RXRJECT_MASK) 645 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK); 646 axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ? 647 XAE_INT_RECV_ERROR_MASK : 0); 648 649 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK); 650 651 /* Sync default options with HW but leave receiver and 652 * transmitter disabled. 653 */ 654 axienet_setoptions(ndev, lp->options & 655 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 656 axienet_set_mac_address(ndev, NULL); 657 axienet_set_multicast_list(ndev); 658 axienet_setoptions(ndev, lp->options); 659 660 netif_trans_update(ndev); 661 662 return 0; 663 } 664 665 /** 666 * axienet_free_tx_chain - Clean up a series of linked TX descriptors. 667 * @lp: Pointer to the axienet_local structure 668 * @first_bd: Index of first descriptor to clean up 669 * @nr_bds: Max number of descriptors to clean up 670 * @force: Whether to clean descriptors even if not complete 671 * @sizep: Pointer to a u32 filled with the total sum of all bytes 672 * in all cleaned-up descriptors. Ignored if NULL. 673 * @budget: NAPI budget (use 0 when not called from NAPI poll) 674 * 675 * Would either be called after a successful transmit operation, or after 676 * there was an error when setting up the chain. 677 * Returns the number of descriptors handled. 678 */ 679 static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd, 680 int nr_bds, bool force, u32 *sizep, int budget) 681 { 682 struct axidma_bd *cur_p; 683 unsigned int status; 684 dma_addr_t phys; 685 int i; 686 687 for (i = 0; i < nr_bds; i++) { 688 cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num]; 689 status = cur_p->status; 690 691 /* If force is not specified, clean up only descriptors 692 * that have been completed by the MAC. 693 */ 694 if (!force && !(status & XAXIDMA_BD_STS_COMPLETE_MASK)) 695 break; 696 697 /* Ensure we see complete descriptor update */ 698 dma_rmb(); 699 phys = desc_get_phys_addr(lp, cur_p); 700 dma_unmap_single(lp->dev, phys, 701 (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK), 702 DMA_TO_DEVICE); 703 704 if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK)) 705 napi_consume_skb(cur_p->skb, budget); 706 707 cur_p->app0 = 0; 708 cur_p->app1 = 0; 709 cur_p->app2 = 0; 710 cur_p->app4 = 0; 711 cur_p->skb = NULL; 712 /* ensure our transmit path and device don't prematurely see status cleared */ 713 wmb(); 714 cur_p->cntrl = 0; 715 cur_p->status = 0; 716 717 if (sizep) 718 *sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK; 719 } 720 721 return i; 722 } 723 724 /** 725 * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy 726 * @lp: Pointer to the axienet_local structure 727 * @num_frag: The number of BDs to check for 728 * 729 * Return: 0, on success 730 * NETDEV_TX_BUSY, if any of the descriptors are not free 731 * 732 * This function is invoked before BDs are allocated and transmission starts. 733 * This function returns 0 if a BD or group of BDs can be allocated for 734 * transmission. If the BD or any of the BDs are not free the function 735 * returns a busy status. 736 */ 737 static inline int axienet_check_tx_bd_space(struct axienet_local *lp, 738 int num_frag) 739 { 740 struct axidma_bd *cur_p; 741 742 /* Ensure we see all descriptor updates from device or TX polling */ 743 rmb(); 744 cur_p = &lp->tx_bd_v[(READ_ONCE(lp->tx_bd_tail) + num_frag) % 745 lp->tx_bd_num]; 746 if (cur_p->cntrl) 747 return NETDEV_TX_BUSY; 748 return 0; 749 } 750 751 /** 752 * axienet_tx_poll - Invoked once a transmit is completed by the 753 * Axi DMA Tx channel. 754 * @napi: Pointer to NAPI structure. 755 * @budget: Max number of TX packets to process. 756 * 757 * Return: Number of TX packets processed. 758 * 759 * This function is invoked from the NAPI processing to notify the completion 760 * of transmit operation. It clears fields in the corresponding Tx BDs and 761 * unmaps the corresponding buffer so that CPU can regain ownership of the 762 * buffer. It finally invokes "netif_wake_queue" to restart transmission if 763 * required. 764 */ 765 static int axienet_tx_poll(struct napi_struct *napi, int budget) 766 { 767 struct axienet_local *lp = container_of(napi, struct axienet_local, napi_tx); 768 struct net_device *ndev = lp->ndev; 769 u32 size = 0; 770 int packets; 771 772 packets = axienet_free_tx_chain(lp, lp->tx_bd_ci, budget, false, &size, budget); 773 774 if (packets) { 775 lp->tx_bd_ci += packets; 776 if (lp->tx_bd_ci >= lp->tx_bd_num) 777 lp->tx_bd_ci %= lp->tx_bd_num; 778 779 ndev->stats.tx_packets += packets; 780 ndev->stats.tx_bytes += size; 781 782 /* Matches barrier in axienet_start_xmit */ 783 smp_mb(); 784 785 if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) 786 netif_wake_queue(ndev); 787 } 788 789 if (packets < budget && napi_complete_done(napi, packets)) { 790 /* Re-enable TX completion interrupts. This should 791 * cause an immediate interrupt if any TX packets are 792 * already pending. 793 */ 794 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr); 795 } 796 return packets; 797 } 798 799 /** 800 * axienet_start_xmit - Starts the transmission. 801 * @skb: sk_buff pointer that contains data to be Txed. 802 * @ndev: Pointer to net_device structure. 803 * 804 * Return: NETDEV_TX_OK, on success 805 * NETDEV_TX_BUSY, if any of the descriptors are not free 806 * 807 * This function is invoked from upper layers to initiate transmission. The 808 * function uses the next available free BDs and populates their fields to 809 * start the transmission. Additionally if checksum offloading is supported, 810 * it populates AXI Stream Control fields with appropriate values. 811 */ 812 static netdev_tx_t 813 axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) 814 { 815 u32 ii; 816 u32 num_frag; 817 u32 csum_start_off; 818 u32 csum_index_off; 819 skb_frag_t *frag; 820 dma_addr_t tail_p, phys; 821 u32 orig_tail_ptr, new_tail_ptr; 822 struct axienet_local *lp = netdev_priv(ndev); 823 struct axidma_bd *cur_p; 824 825 orig_tail_ptr = lp->tx_bd_tail; 826 new_tail_ptr = orig_tail_ptr; 827 828 num_frag = skb_shinfo(skb)->nr_frags; 829 cur_p = &lp->tx_bd_v[orig_tail_ptr]; 830 831 if (axienet_check_tx_bd_space(lp, num_frag + 1)) { 832 /* Should not happen as last start_xmit call should have 833 * checked for sufficient space and queue should only be 834 * woken when sufficient space is available. 835 */ 836 netif_stop_queue(ndev); 837 if (net_ratelimit()) 838 netdev_warn(ndev, "TX ring unexpectedly full\n"); 839 return NETDEV_TX_BUSY; 840 } 841 842 if (skb->ip_summed == CHECKSUM_PARTIAL) { 843 if (lp->features & XAE_FEATURE_FULL_TX_CSUM) { 844 /* Tx Full Checksum Offload Enabled */ 845 cur_p->app0 |= 2; 846 } else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) { 847 csum_start_off = skb_transport_offset(skb); 848 csum_index_off = csum_start_off + skb->csum_offset; 849 /* Tx Partial Checksum Offload Enabled */ 850 cur_p->app0 |= 1; 851 cur_p->app1 = (csum_start_off << 16) | csum_index_off; 852 } 853 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { 854 cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */ 855 } 856 857 phys = dma_map_single(lp->dev, skb->data, 858 skb_headlen(skb), DMA_TO_DEVICE); 859 if (unlikely(dma_mapping_error(lp->dev, phys))) { 860 if (net_ratelimit()) 861 netdev_err(ndev, "TX DMA mapping error\n"); 862 ndev->stats.tx_dropped++; 863 return NETDEV_TX_OK; 864 } 865 desc_set_phys_addr(lp, phys, cur_p); 866 cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK; 867 868 for (ii = 0; ii < num_frag; ii++) { 869 if (++new_tail_ptr >= lp->tx_bd_num) 870 new_tail_ptr = 0; 871 cur_p = &lp->tx_bd_v[new_tail_ptr]; 872 frag = &skb_shinfo(skb)->frags[ii]; 873 phys = dma_map_single(lp->dev, 874 skb_frag_address(frag), 875 skb_frag_size(frag), 876 DMA_TO_DEVICE); 877 if (unlikely(dma_mapping_error(lp->dev, phys))) { 878 if (net_ratelimit()) 879 netdev_err(ndev, "TX DMA mapping error\n"); 880 ndev->stats.tx_dropped++; 881 axienet_free_tx_chain(lp, orig_tail_ptr, ii + 1, 882 true, NULL, 0); 883 return NETDEV_TX_OK; 884 } 885 desc_set_phys_addr(lp, phys, cur_p); 886 cur_p->cntrl = skb_frag_size(frag); 887 } 888 889 cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK; 890 cur_p->skb = skb; 891 892 tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * new_tail_ptr; 893 if (++new_tail_ptr >= lp->tx_bd_num) 894 new_tail_ptr = 0; 895 WRITE_ONCE(lp->tx_bd_tail, new_tail_ptr); 896 897 /* Start the transfer */ 898 axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p); 899 900 /* Stop queue if next transmit may not have space */ 901 if (axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) { 902 netif_stop_queue(ndev); 903 904 /* Matches barrier in axienet_tx_poll */ 905 smp_mb(); 906 907 /* Space might have just been freed - check again */ 908 if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) 909 netif_wake_queue(ndev); 910 } 911 912 return NETDEV_TX_OK; 913 } 914 915 /** 916 * axienet_rx_poll - Triggered by RX ISR to complete the BD processing. 917 * @napi: Pointer to NAPI structure. 918 * @budget: Max number of RX packets to process. 919 * 920 * Return: Number of RX packets processed. 921 */ 922 static int axienet_rx_poll(struct napi_struct *napi, int budget) 923 { 924 u32 length; 925 u32 csumstatus; 926 u32 size = 0; 927 int packets = 0; 928 dma_addr_t tail_p = 0; 929 struct axidma_bd *cur_p; 930 struct sk_buff *skb, *new_skb; 931 struct axienet_local *lp = container_of(napi, struct axienet_local, napi_rx); 932 933 cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; 934 935 while (packets < budget && (cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) { 936 dma_addr_t phys; 937 938 /* Ensure we see complete descriptor update */ 939 dma_rmb(); 940 941 skb = cur_p->skb; 942 cur_p->skb = NULL; 943 944 /* skb could be NULL if a previous pass already received the 945 * packet for this slot in the ring, but failed to refill it 946 * with a newly allocated buffer. In this case, don't try to 947 * receive it again. 948 */ 949 if (likely(skb)) { 950 length = cur_p->app4 & 0x0000FFFF; 951 952 phys = desc_get_phys_addr(lp, cur_p); 953 dma_unmap_single(lp->dev, phys, lp->max_frm_size, 954 DMA_FROM_DEVICE); 955 956 skb_put(skb, length); 957 skb->protocol = eth_type_trans(skb, lp->ndev); 958 /*skb_checksum_none_assert(skb);*/ 959 skb->ip_summed = CHECKSUM_NONE; 960 961 /* if we're doing Rx csum offload, set it up */ 962 if (lp->features & XAE_FEATURE_FULL_RX_CSUM) { 963 csumstatus = (cur_p->app2 & 964 XAE_FULL_CSUM_STATUS_MASK) >> 3; 965 if (csumstatus == XAE_IP_TCP_CSUM_VALIDATED || 966 csumstatus == XAE_IP_UDP_CSUM_VALIDATED) { 967 skb->ip_summed = CHECKSUM_UNNECESSARY; 968 } 969 } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 && 970 skb->protocol == htons(ETH_P_IP) && 971 skb->len > 64) { 972 skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF); 973 skb->ip_summed = CHECKSUM_COMPLETE; 974 } 975 976 napi_gro_receive(napi, skb); 977 978 size += length; 979 packets++; 980 } 981 982 new_skb = napi_alloc_skb(napi, lp->max_frm_size); 983 if (!new_skb) 984 break; 985 986 phys = dma_map_single(lp->dev, new_skb->data, 987 lp->max_frm_size, 988 DMA_FROM_DEVICE); 989 if (unlikely(dma_mapping_error(lp->dev, phys))) { 990 if (net_ratelimit()) 991 netdev_err(lp->ndev, "RX DMA mapping error\n"); 992 dev_kfree_skb(new_skb); 993 break; 994 } 995 desc_set_phys_addr(lp, phys, cur_p); 996 997 cur_p->cntrl = lp->max_frm_size; 998 cur_p->status = 0; 999 cur_p->skb = new_skb; 1000 1001 /* Only update tail_p to mark this slot as usable after it has 1002 * been successfully refilled. 1003 */ 1004 tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci; 1005 1006 if (++lp->rx_bd_ci >= lp->rx_bd_num) 1007 lp->rx_bd_ci = 0; 1008 cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; 1009 } 1010 1011 lp->ndev->stats.rx_packets += packets; 1012 lp->ndev->stats.rx_bytes += size; 1013 1014 if (tail_p) 1015 axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p); 1016 1017 if (packets < budget && napi_complete_done(napi, packets)) { 1018 /* Re-enable RX completion interrupts. This should 1019 * cause an immediate interrupt if any RX packets are 1020 * already pending. 1021 */ 1022 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr); 1023 } 1024 return packets; 1025 } 1026 1027 /** 1028 * axienet_tx_irq - Tx Done Isr. 1029 * @irq: irq number 1030 * @_ndev: net_device pointer 1031 * 1032 * Return: IRQ_HANDLED if device generated a TX interrupt, IRQ_NONE otherwise. 1033 * 1034 * This is the Axi DMA Tx done Isr. It invokes NAPI polling to complete the 1035 * TX BD processing. 1036 */ 1037 static irqreturn_t axienet_tx_irq(int irq, void *_ndev) 1038 { 1039 unsigned int status; 1040 struct net_device *ndev = _ndev; 1041 struct axienet_local *lp = netdev_priv(ndev); 1042 1043 status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 1044 1045 if (!(status & XAXIDMA_IRQ_ALL_MASK)) 1046 return IRQ_NONE; 1047 1048 axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status); 1049 1050 if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) { 1051 netdev_err(ndev, "DMA Tx error 0x%x\n", status); 1052 netdev_err(ndev, "Current BD is at: 0x%x%08x\n", 1053 (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb, 1054 (lp->tx_bd_v[lp->tx_bd_ci]).phys); 1055 schedule_work(&lp->dma_err_task); 1056 } else { 1057 /* Disable further TX completion interrupts and schedule 1058 * NAPI to handle the completions. 1059 */ 1060 u32 cr = lp->tx_dma_cr; 1061 1062 cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK); 1063 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 1064 1065 napi_schedule(&lp->napi_tx); 1066 } 1067 1068 return IRQ_HANDLED; 1069 } 1070 1071 /** 1072 * axienet_rx_irq - Rx Isr. 1073 * @irq: irq number 1074 * @_ndev: net_device pointer 1075 * 1076 * Return: IRQ_HANDLED if device generated a RX interrupt, IRQ_NONE otherwise. 1077 * 1078 * This is the Axi DMA Rx Isr. It invokes NAPI polling to complete the RX BD 1079 * processing. 1080 */ 1081 static irqreturn_t axienet_rx_irq(int irq, void *_ndev) 1082 { 1083 unsigned int status; 1084 struct net_device *ndev = _ndev; 1085 struct axienet_local *lp = netdev_priv(ndev); 1086 1087 status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 1088 1089 if (!(status & XAXIDMA_IRQ_ALL_MASK)) 1090 return IRQ_NONE; 1091 1092 axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status); 1093 1094 if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) { 1095 netdev_err(ndev, "DMA Rx error 0x%x\n", status); 1096 netdev_err(ndev, "Current BD is at: 0x%x%08x\n", 1097 (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb, 1098 (lp->rx_bd_v[lp->rx_bd_ci]).phys); 1099 schedule_work(&lp->dma_err_task); 1100 } else { 1101 /* Disable further RX completion interrupts and schedule 1102 * NAPI receive. 1103 */ 1104 u32 cr = lp->rx_dma_cr; 1105 1106 cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK); 1107 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 1108 1109 napi_schedule(&lp->napi_rx); 1110 } 1111 1112 return IRQ_HANDLED; 1113 } 1114 1115 /** 1116 * axienet_eth_irq - Ethernet core Isr. 1117 * @irq: irq number 1118 * @_ndev: net_device pointer 1119 * 1120 * Return: IRQ_HANDLED if device generated a core interrupt, IRQ_NONE otherwise. 1121 * 1122 * Handle miscellaneous conditions indicated by Ethernet core IRQ. 1123 */ 1124 static irqreturn_t axienet_eth_irq(int irq, void *_ndev) 1125 { 1126 struct net_device *ndev = _ndev; 1127 struct axienet_local *lp = netdev_priv(ndev); 1128 unsigned int pending; 1129 1130 pending = axienet_ior(lp, XAE_IP_OFFSET); 1131 if (!pending) 1132 return IRQ_NONE; 1133 1134 if (pending & XAE_INT_RXFIFOOVR_MASK) 1135 ndev->stats.rx_missed_errors++; 1136 1137 if (pending & XAE_INT_RXRJECT_MASK) 1138 ndev->stats.rx_frame_errors++; 1139 1140 axienet_iow(lp, XAE_IS_OFFSET, pending); 1141 return IRQ_HANDLED; 1142 } 1143 1144 static void axienet_dma_err_handler(struct work_struct *work); 1145 1146 /** 1147 * axienet_open - Driver open routine. 1148 * @ndev: Pointer to net_device structure 1149 * 1150 * Return: 0, on success. 1151 * non-zero error value on failure 1152 * 1153 * This is the driver open routine. It calls phylink_start to start the 1154 * PHY device. 1155 * It also allocates interrupt service routines, enables the interrupt lines 1156 * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer 1157 * descriptors are initialized. 1158 */ 1159 static int axienet_open(struct net_device *ndev) 1160 { 1161 int ret; 1162 struct axienet_local *lp = netdev_priv(ndev); 1163 1164 dev_dbg(&ndev->dev, "axienet_open()\n"); 1165 1166 /* When we do an Axi Ethernet reset, it resets the complete core 1167 * including the MDIO. MDIO must be disabled before resetting. 1168 * Hold MDIO bus lock to avoid MDIO accesses during the reset. 1169 */ 1170 axienet_lock_mii(lp); 1171 ret = axienet_device_reset(ndev); 1172 axienet_unlock_mii(lp); 1173 1174 ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0); 1175 if (ret) { 1176 dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret); 1177 return ret; 1178 } 1179 1180 phylink_start(lp->phylink); 1181 1182 /* Enable worker thread for Axi DMA error handling */ 1183 INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler); 1184 1185 napi_enable(&lp->napi_rx); 1186 napi_enable(&lp->napi_tx); 1187 1188 /* Enable interrupts for Axi DMA Tx */ 1189 ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED, 1190 ndev->name, ndev); 1191 if (ret) 1192 goto err_tx_irq; 1193 /* Enable interrupts for Axi DMA Rx */ 1194 ret = request_irq(lp->rx_irq, axienet_rx_irq, IRQF_SHARED, 1195 ndev->name, ndev); 1196 if (ret) 1197 goto err_rx_irq; 1198 /* Enable interrupts for Axi Ethernet core (if defined) */ 1199 if (lp->eth_irq > 0) { 1200 ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED, 1201 ndev->name, ndev); 1202 if (ret) 1203 goto err_eth_irq; 1204 } 1205 1206 return 0; 1207 1208 err_eth_irq: 1209 free_irq(lp->rx_irq, ndev); 1210 err_rx_irq: 1211 free_irq(lp->tx_irq, ndev); 1212 err_tx_irq: 1213 napi_disable(&lp->napi_tx); 1214 napi_disable(&lp->napi_rx); 1215 phylink_stop(lp->phylink); 1216 phylink_disconnect_phy(lp->phylink); 1217 cancel_work_sync(&lp->dma_err_task); 1218 dev_err(lp->dev, "request_irq() failed\n"); 1219 return ret; 1220 } 1221 1222 /** 1223 * axienet_stop - Driver stop routine. 1224 * @ndev: Pointer to net_device structure 1225 * 1226 * Return: 0, on success. 1227 * 1228 * This is the driver stop routine. It calls phylink_disconnect to stop the PHY 1229 * device. It also removes the interrupt handlers and disables the interrupts. 1230 * The Axi DMA Tx/Rx BDs are released. 1231 */ 1232 static int axienet_stop(struct net_device *ndev) 1233 { 1234 struct axienet_local *lp = netdev_priv(ndev); 1235 1236 dev_dbg(&ndev->dev, "axienet_close()\n"); 1237 1238 napi_disable(&lp->napi_tx); 1239 napi_disable(&lp->napi_rx); 1240 1241 phylink_stop(lp->phylink); 1242 phylink_disconnect_phy(lp->phylink); 1243 1244 axienet_setoptions(ndev, lp->options & 1245 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 1246 1247 axienet_dma_stop(lp); 1248 1249 axienet_iow(lp, XAE_IE_OFFSET, 0); 1250 1251 cancel_work_sync(&lp->dma_err_task); 1252 1253 if (lp->eth_irq > 0) 1254 free_irq(lp->eth_irq, ndev); 1255 free_irq(lp->tx_irq, ndev); 1256 free_irq(lp->rx_irq, ndev); 1257 1258 axienet_dma_bd_release(ndev); 1259 return 0; 1260 } 1261 1262 /** 1263 * axienet_change_mtu - Driver change mtu routine. 1264 * @ndev: Pointer to net_device structure 1265 * @new_mtu: New mtu value to be applied 1266 * 1267 * Return: Always returns 0 (success). 1268 * 1269 * This is the change mtu driver routine. It checks if the Axi Ethernet 1270 * hardware supports jumbo frames before changing the mtu. This can be 1271 * called only when the device is not up. 1272 */ 1273 static int axienet_change_mtu(struct net_device *ndev, int new_mtu) 1274 { 1275 struct axienet_local *lp = netdev_priv(ndev); 1276 1277 if (netif_running(ndev)) 1278 return -EBUSY; 1279 1280 if ((new_mtu + VLAN_ETH_HLEN + 1281 XAE_TRL_SIZE) > lp->rxmem) 1282 return -EINVAL; 1283 1284 ndev->mtu = new_mtu; 1285 1286 return 0; 1287 } 1288 1289 #ifdef CONFIG_NET_POLL_CONTROLLER 1290 /** 1291 * axienet_poll_controller - Axi Ethernet poll mechanism. 1292 * @ndev: Pointer to net_device structure 1293 * 1294 * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior 1295 * to polling the ISRs and are enabled back after the polling is done. 1296 */ 1297 static void axienet_poll_controller(struct net_device *ndev) 1298 { 1299 struct axienet_local *lp = netdev_priv(ndev); 1300 disable_irq(lp->tx_irq); 1301 disable_irq(lp->rx_irq); 1302 axienet_rx_irq(lp->tx_irq, ndev); 1303 axienet_tx_irq(lp->rx_irq, ndev); 1304 enable_irq(lp->tx_irq); 1305 enable_irq(lp->rx_irq); 1306 } 1307 #endif 1308 1309 static int axienet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1310 { 1311 struct axienet_local *lp = netdev_priv(dev); 1312 1313 if (!netif_running(dev)) 1314 return -EINVAL; 1315 1316 return phylink_mii_ioctl(lp->phylink, rq, cmd); 1317 } 1318 1319 static const struct net_device_ops axienet_netdev_ops = { 1320 .ndo_open = axienet_open, 1321 .ndo_stop = axienet_stop, 1322 .ndo_start_xmit = axienet_start_xmit, 1323 .ndo_change_mtu = axienet_change_mtu, 1324 .ndo_set_mac_address = netdev_set_mac_address, 1325 .ndo_validate_addr = eth_validate_addr, 1326 .ndo_eth_ioctl = axienet_ioctl, 1327 .ndo_set_rx_mode = axienet_set_multicast_list, 1328 #ifdef CONFIG_NET_POLL_CONTROLLER 1329 .ndo_poll_controller = axienet_poll_controller, 1330 #endif 1331 }; 1332 1333 /** 1334 * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information. 1335 * @ndev: Pointer to net_device structure 1336 * @ed: Pointer to ethtool_drvinfo structure 1337 * 1338 * This implements ethtool command for getting the driver information. 1339 * Issue "ethtool -i ethX" under linux prompt to execute this function. 1340 */ 1341 static void axienet_ethtools_get_drvinfo(struct net_device *ndev, 1342 struct ethtool_drvinfo *ed) 1343 { 1344 strlcpy(ed->driver, DRIVER_NAME, sizeof(ed->driver)); 1345 strlcpy(ed->version, DRIVER_VERSION, sizeof(ed->version)); 1346 } 1347 1348 /** 1349 * axienet_ethtools_get_regs_len - Get the total regs length present in the 1350 * AxiEthernet core. 1351 * @ndev: Pointer to net_device structure 1352 * 1353 * This implements ethtool command for getting the total register length 1354 * information. 1355 * 1356 * Return: the total regs length 1357 */ 1358 static int axienet_ethtools_get_regs_len(struct net_device *ndev) 1359 { 1360 return sizeof(u32) * AXIENET_REGS_N; 1361 } 1362 1363 /** 1364 * axienet_ethtools_get_regs - Dump the contents of all registers present 1365 * in AxiEthernet core. 1366 * @ndev: Pointer to net_device structure 1367 * @regs: Pointer to ethtool_regs structure 1368 * @ret: Void pointer used to return the contents of the registers. 1369 * 1370 * This implements ethtool command for getting the Axi Ethernet register dump. 1371 * Issue "ethtool -d ethX" to execute this function. 1372 */ 1373 static void axienet_ethtools_get_regs(struct net_device *ndev, 1374 struct ethtool_regs *regs, void *ret) 1375 { 1376 u32 *data = (u32 *) ret; 1377 size_t len = sizeof(u32) * AXIENET_REGS_N; 1378 struct axienet_local *lp = netdev_priv(ndev); 1379 1380 regs->version = 0; 1381 regs->len = len; 1382 1383 memset(data, 0, len); 1384 data[0] = axienet_ior(lp, XAE_RAF_OFFSET); 1385 data[1] = axienet_ior(lp, XAE_TPF_OFFSET); 1386 data[2] = axienet_ior(lp, XAE_IFGP_OFFSET); 1387 data[3] = axienet_ior(lp, XAE_IS_OFFSET); 1388 data[4] = axienet_ior(lp, XAE_IP_OFFSET); 1389 data[5] = axienet_ior(lp, XAE_IE_OFFSET); 1390 data[6] = axienet_ior(lp, XAE_TTAG_OFFSET); 1391 data[7] = axienet_ior(lp, XAE_RTAG_OFFSET); 1392 data[8] = axienet_ior(lp, XAE_UAWL_OFFSET); 1393 data[9] = axienet_ior(lp, XAE_UAWU_OFFSET); 1394 data[10] = axienet_ior(lp, XAE_TPID0_OFFSET); 1395 data[11] = axienet_ior(lp, XAE_TPID1_OFFSET); 1396 data[12] = axienet_ior(lp, XAE_PPST_OFFSET); 1397 data[13] = axienet_ior(lp, XAE_RCW0_OFFSET); 1398 data[14] = axienet_ior(lp, XAE_RCW1_OFFSET); 1399 data[15] = axienet_ior(lp, XAE_TC_OFFSET); 1400 data[16] = axienet_ior(lp, XAE_FCC_OFFSET); 1401 data[17] = axienet_ior(lp, XAE_EMMC_OFFSET); 1402 data[18] = axienet_ior(lp, XAE_PHYC_OFFSET); 1403 data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET); 1404 data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET); 1405 data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET); 1406 data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET); 1407 data[27] = axienet_ior(lp, XAE_UAW0_OFFSET); 1408 data[28] = axienet_ior(lp, XAE_UAW1_OFFSET); 1409 data[29] = axienet_ior(lp, XAE_FMI_OFFSET); 1410 data[30] = axienet_ior(lp, XAE_AF0_OFFSET); 1411 data[31] = axienet_ior(lp, XAE_AF1_OFFSET); 1412 data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 1413 data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 1414 data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET); 1415 data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET); 1416 data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 1417 data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 1418 data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET); 1419 data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET); 1420 } 1421 1422 static void 1423 axienet_ethtools_get_ringparam(struct net_device *ndev, 1424 struct ethtool_ringparam *ering, 1425 struct kernel_ethtool_ringparam *kernel_ering, 1426 struct netlink_ext_ack *extack) 1427 { 1428 struct axienet_local *lp = netdev_priv(ndev); 1429 1430 ering->rx_max_pending = RX_BD_NUM_MAX; 1431 ering->rx_mini_max_pending = 0; 1432 ering->rx_jumbo_max_pending = 0; 1433 ering->tx_max_pending = TX_BD_NUM_MAX; 1434 ering->rx_pending = lp->rx_bd_num; 1435 ering->rx_mini_pending = 0; 1436 ering->rx_jumbo_pending = 0; 1437 ering->tx_pending = lp->tx_bd_num; 1438 } 1439 1440 static int 1441 axienet_ethtools_set_ringparam(struct net_device *ndev, 1442 struct ethtool_ringparam *ering, 1443 struct kernel_ethtool_ringparam *kernel_ering, 1444 struct netlink_ext_ack *extack) 1445 { 1446 struct axienet_local *lp = netdev_priv(ndev); 1447 1448 if (ering->rx_pending > RX_BD_NUM_MAX || 1449 ering->rx_mini_pending || 1450 ering->rx_jumbo_pending || 1451 ering->tx_pending < TX_BD_NUM_MIN || 1452 ering->tx_pending > TX_BD_NUM_MAX) 1453 return -EINVAL; 1454 1455 if (netif_running(ndev)) 1456 return -EBUSY; 1457 1458 lp->rx_bd_num = ering->rx_pending; 1459 lp->tx_bd_num = ering->tx_pending; 1460 return 0; 1461 } 1462 1463 /** 1464 * axienet_ethtools_get_pauseparam - Get the pause parameter setting for 1465 * Tx and Rx paths. 1466 * @ndev: Pointer to net_device structure 1467 * @epauseparm: Pointer to ethtool_pauseparam structure. 1468 * 1469 * This implements ethtool command for getting axi ethernet pause frame 1470 * setting. Issue "ethtool -a ethX" to execute this function. 1471 */ 1472 static void 1473 axienet_ethtools_get_pauseparam(struct net_device *ndev, 1474 struct ethtool_pauseparam *epauseparm) 1475 { 1476 struct axienet_local *lp = netdev_priv(ndev); 1477 1478 phylink_ethtool_get_pauseparam(lp->phylink, epauseparm); 1479 } 1480 1481 /** 1482 * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control) 1483 * settings. 1484 * @ndev: Pointer to net_device structure 1485 * @epauseparm:Pointer to ethtool_pauseparam structure 1486 * 1487 * This implements ethtool command for enabling flow control on Rx and Tx 1488 * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this 1489 * function. 1490 * 1491 * Return: 0 on success, -EFAULT if device is running 1492 */ 1493 static int 1494 axienet_ethtools_set_pauseparam(struct net_device *ndev, 1495 struct ethtool_pauseparam *epauseparm) 1496 { 1497 struct axienet_local *lp = netdev_priv(ndev); 1498 1499 return phylink_ethtool_set_pauseparam(lp->phylink, epauseparm); 1500 } 1501 1502 /** 1503 * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count. 1504 * @ndev: Pointer to net_device structure 1505 * @ecoalesce: Pointer to ethtool_coalesce structure 1506 * @kernel_coal: ethtool CQE mode setting structure 1507 * @extack: extack for reporting error messages 1508 * 1509 * This implements ethtool command for getting the DMA interrupt coalescing 1510 * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to 1511 * execute this function. 1512 * 1513 * Return: 0 always 1514 */ 1515 static int 1516 axienet_ethtools_get_coalesce(struct net_device *ndev, 1517 struct ethtool_coalesce *ecoalesce, 1518 struct kernel_ethtool_coalesce *kernel_coal, 1519 struct netlink_ext_ack *extack) 1520 { 1521 struct axienet_local *lp = netdev_priv(ndev); 1522 1523 ecoalesce->rx_max_coalesced_frames = lp->coalesce_count_rx; 1524 ecoalesce->rx_coalesce_usecs = lp->coalesce_usec_rx; 1525 ecoalesce->tx_max_coalesced_frames = lp->coalesce_count_tx; 1526 ecoalesce->tx_coalesce_usecs = lp->coalesce_usec_tx; 1527 return 0; 1528 } 1529 1530 /** 1531 * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count. 1532 * @ndev: Pointer to net_device structure 1533 * @ecoalesce: Pointer to ethtool_coalesce structure 1534 * @kernel_coal: ethtool CQE mode setting structure 1535 * @extack: extack for reporting error messages 1536 * 1537 * This implements ethtool command for setting the DMA interrupt coalescing 1538 * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux 1539 * prompt to execute this function. 1540 * 1541 * Return: 0, on success, Non-zero error value on failure. 1542 */ 1543 static int 1544 axienet_ethtools_set_coalesce(struct net_device *ndev, 1545 struct ethtool_coalesce *ecoalesce, 1546 struct kernel_ethtool_coalesce *kernel_coal, 1547 struct netlink_ext_ack *extack) 1548 { 1549 struct axienet_local *lp = netdev_priv(ndev); 1550 1551 if (netif_running(ndev)) { 1552 netdev_err(ndev, 1553 "Please stop netif before applying configuration\n"); 1554 return -EFAULT; 1555 } 1556 1557 if (ecoalesce->rx_max_coalesced_frames) 1558 lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames; 1559 if (ecoalesce->rx_coalesce_usecs) 1560 lp->coalesce_usec_rx = ecoalesce->rx_coalesce_usecs; 1561 if (ecoalesce->tx_max_coalesced_frames) 1562 lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames; 1563 if (ecoalesce->tx_coalesce_usecs) 1564 lp->coalesce_usec_tx = ecoalesce->tx_coalesce_usecs; 1565 1566 return 0; 1567 } 1568 1569 static int 1570 axienet_ethtools_get_link_ksettings(struct net_device *ndev, 1571 struct ethtool_link_ksettings *cmd) 1572 { 1573 struct axienet_local *lp = netdev_priv(ndev); 1574 1575 return phylink_ethtool_ksettings_get(lp->phylink, cmd); 1576 } 1577 1578 static int 1579 axienet_ethtools_set_link_ksettings(struct net_device *ndev, 1580 const struct ethtool_link_ksettings *cmd) 1581 { 1582 struct axienet_local *lp = netdev_priv(ndev); 1583 1584 return phylink_ethtool_ksettings_set(lp->phylink, cmd); 1585 } 1586 1587 static int axienet_ethtools_nway_reset(struct net_device *dev) 1588 { 1589 struct axienet_local *lp = netdev_priv(dev); 1590 1591 return phylink_ethtool_nway_reset(lp->phylink); 1592 } 1593 1594 static const struct ethtool_ops axienet_ethtool_ops = { 1595 .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES | 1596 ETHTOOL_COALESCE_USECS, 1597 .get_drvinfo = axienet_ethtools_get_drvinfo, 1598 .get_regs_len = axienet_ethtools_get_regs_len, 1599 .get_regs = axienet_ethtools_get_regs, 1600 .get_link = ethtool_op_get_link, 1601 .get_ringparam = axienet_ethtools_get_ringparam, 1602 .set_ringparam = axienet_ethtools_set_ringparam, 1603 .get_pauseparam = axienet_ethtools_get_pauseparam, 1604 .set_pauseparam = axienet_ethtools_set_pauseparam, 1605 .get_coalesce = axienet_ethtools_get_coalesce, 1606 .set_coalesce = axienet_ethtools_set_coalesce, 1607 .get_link_ksettings = axienet_ethtools_get_link_ksettings, 1608 .set_link_ksettings = axienet_ethtools_set_link_ksettings, 1609 .nway_reset = axienet_ethtools_nway_reset, 1610 }; 1611 1612 static struct axienet_local *pcs_to_axienet_local(struct phylink_pcs *pcs) 1613 { 1614 return container_of(pcs, struct axienet_local, pcs); 1615 } 1616 1617 static void axienet_pcs_get_state(struct phylink_pcs *pcs, 1618 struct phylink_link_state *state) 1619 { 1620 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy; 1621 1622 phylink_mii_c22_pcs_get_state(pcs_phy, state); 1623 } 1624 1625 static void axienet_pcs_an_restart(struct phylink_pcs *pcs) 1626 { 1627 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy; 1628 1629 phylink_mii_c22_pcs_an_restart(pcs_phy); 1630 } 1631 1632 static int axienet_pcs_config(struct phylink_pcs *pcs, unsigned int mode, 1633 phy_interface_t interface, 1634 const unsigned long *advertising, 1635 bool permit_pause_to_mac) 1636 { 1637 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy; 1638 struct net_device *ndev = pcs_to_axienet_local(pcs)->ndev; 1639 struct axienet_local *lp = netdev_priv(ndev); 1640 int ret; 1641 1642 if (lp->switch_x_sgmii) { 1643 ret = mdiodev_write(pcs_phy, XLNX_MII_STD_SELECT_REG, 1644 interface == PHY_INTERFACE_MODE_SGMII ? 1645 XLNX_MII_STD_SELECT_SGMII : 0); 1646 if (ret < 0) { 1647 netdev_warn(ndev, 1648 "Failed to switch PHY interface: %d\n", 1649 ret); 1650 return ret; 1651 } 1652 } 1653 1654 ret = phylink_mii_c22_pcs_config(pcs_phy, mode, interface, advertising); 1655 if (ret < 0) 1656 netdev_warn(ndev, "Failed to configure PCS: %d\n", ret); 1657 1658 return ret; 1659 } 1660 1661 static const struct phylink_pcs_ops axienet_pcs_ops = { 1662 .pcs_get_state = axienet_pcs_get_state, 1663 .pcs_config = axienet_pcs_config, 1664 .pcs_an_restart = axienet_pcs_an_restart, 1665 }; 1666 1667 static struct phylink_pcs *axienet_mac_select_pcs(struct phylink_config *config, 1668 phy_interface_t interface) 1669 { 1670 struct net_device *ndev = to_net_dev(config->dev); 1671 struct axienet_local *lp = netdev_priv(ndev); 1672 1673 if (interface == PHY_INTERFACE_MODE_1000BASEX || 1674 interface == PHY_INTERFACE_MODE_SGMII) 1675 return &lp->pcs; 1676 1677 return NULL; 1678 } 1679 1680 static void axienet_mac_config(struct phylink_config *config, unsigned int mode, 1681 const struct phylink_link_state *state) 1682 { 1683 /* nothing meaningful to do */ 1684 } 1685 1686 static void axienet_mac_link_down(struct phylink_config *config, 1687 unsigned int mode, 1688 phy_interface_t interface) 1689 { 1690 /* nothing meaningful to do */ 1691 } 1692 1693 static void axienet_mac_link_up(struct phylink_config *config, 1694 struct phy_device *phy, 1695 unsigned int mode, phy_interface_t interface, 1696 int speed, int duplex, 1697 bool tx_pause, bool rx_pause) 1698 { 1699 struct net_device *ndev = to_net_dev(config->dev); 1700 struct axienet_local *lp = netdev_priv(ndev); 1701 u32 emmc_reg, fcc_reg; 1702 1703 emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET); 1704 emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK; 1705 1706 switch (speed) { 1707 case SPEED_1000: 1708 emmc_reg |= XAE_EMMC_LINKSPD_1000; 1709 break; 1710 case SPEED_100: 1711 emmc_reg |= XAE_EMMC_LINKSPD_100; 1712 break; 1713 case SPEED_10: 1714 emmc_reg |= XAE_EMMC_LINKSPD_10; 1715 break; 1716 default: 1717 dev_err(&ndev->dev, 1718 "Speed other than 10, 100 or 1Gbps is not supported\n"); 1719 break; 1720 } 1721 1722 axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg); 1723 1724 fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET); 1725 if (tx_pause) 1726 fcc_reg |= XAE_FCC_FCTX_MASK; 1727 else 1728 fcc_reg &= ~XAE_FCC_FCTX_MASK; 1729 if (rx_pause) 1730 fcc_reg |= XAE_FCC_FCRX_MASK; 1731 else 1732 fcc_reg &= ~XAE_FCC_FCRX_MASK; 1733 axienet_iow(lp, XAE_FCC_OFFSET, fcc_reg); 1734 } 1735 1736 static const struct phylink_mac_ops axienet_phylink_ops = { 1737 .validate = phylink_generic_validate, 1738 .mac_select_pcs = axienet_mac_select_pcs, 1739 .mac_config = axienet_mac_config, 1740 .mac_link_down = axienet_mac_link_down, 1741 .mac_link_up = axienet_mac_link_up, 1742 }; 1743 1744 /** 1745 * axienet_dma_err_handler - Work queue task for Axi DMA Error 1746 * @work: pointer to work_struct 1747 * 1748 * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the 1749 * Tx/Rx BDs. 1750 */ 1751 static void axienet_dma_err_handler(struct work_struct *work) 1752 { 1753 u32 i; 1754 u32 axienet_status; 1755 struct axidma_bd *cur_p; 1756 struct axienet_local *lp = container_of(work, struct axienet_local, 1757 dma_err_task); 1758 struct net_device *ndev = lp->ndev; 1759 1760 napi_disable(&lp->napi_tx); 1761 napi_disable(&lp->napi_rx); 1762 1763 axienet_setoptions(ndev, lp->options & 1764 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 1765 1766 axienet_dma_stop(lp); 1767 1768 for (i = 0; i < lp->tx_bd_num; i++) { 1769 cur_p = &lp->tx_bd_v[i]; 1770 if (cur_p->cntrl) { 1771 dma_addr_t addr = desc_get_phys_addr(lp, cur_p); 1772 1773 dma_unmap_single(lp->dev, addr, 1774 (cur_p->cntrl & 1775 XAXIDMA_BD_CTRL_LENGTH_MASK), 1776 DMA_TO_DEVICE); 1777 } 1778 if (cur_p->skb) 1779 dev_kfree_skb_irq(cur_p->skb); 1780 cur_p->phys = 0; 1781 cur_p->phys_msb = 0; 1782 cur_p->cntrl = 0; 1783 cur_p->status = 0; 1784 cur_p->app0 = 0; 1785 cur_p->app1 = 0; 1786 cur_p->app2 = 0; 1787 cur_p->app3 = 0; 1788 cur_p->app4 = 0; 1789 cur_p->skb = NULL; 1790 } 1791 1792 for (i = 0; i < lp->rx_bd_num; i++) { 1793 cur_p = &lp->rx_bd_v[i]; 1794 cur_p->status = 0; 1795 cur_p->app0 = 0; 1796 cur_p->app1 = 0; 1797 cur_p->app2 = 0; 1798 cur_p->app3 = 0; 1799 cur_p->app4 = 0; 1800 } 1801 1802 lp->tx_bd_ci = 0; 1803 lp->tx_bd_tail = 0; 1804 lp->rx_bd_ci = 0; 1805 1806 axienet_dma_start(lp); 1807 1808 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); 1809 axienet_status &= ~XAE_RCW1_RX_MASK; 1810 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status); 1811 1812 axienet_status = axienet_ior(lp, XAE_IP_OFFSET); 1813 if (axienet_status & XAE_INT_RXRJECT_MASK) 1814 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK); 1815 axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ? 1816 XAE_INT_RECV_ERROR_MASK : 0); 1817 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK); 1818 1819 /* Sync default options with HW but leave receiver and 1820 * transmitter disabled. 1821 */ 1822 axienet_setoptions(ndev, lp->options & 1823 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 1824 axienet_set_mac_address(ndev, NULL); 1825 axienet_set_multicast_list(ndev); 1826 axienet_setoptions(ndev, lp->options); 1827 napi_enable(&lp->napi_rx); 1828 napi_enable(&lp->napi_tx); 1829 } 1830 1831 /** 1832 * axienet_probe - Axi Ethernet probe function. 1833 * @pdev: Pointer to platform device structure. 1834 * 1835 * Return: 0, on success 1836 * Non-zero error value on failure. 1837 * 1838 * This is the probe routine for Axi Ethernet driver. This is called before 1839 * any other driver routines are invoked. It allocates and sets up the Ethernet 1840 * device. Parses through device tree and populates fields of 1841 * axienet_local. It registers the Ethernet device. 1842 */ 1843 static int axienet_probe(struct platform_device *pdev) 1844 { 1845 int ret; 1846 struct device_node *np; 1847 struct axienet_local *lp; 1848 struct net_device *ndev; 1849 struct resource *ethres; 1850 u8 mac_addr[ETH_ALEN]; 1851 int addr_width = 32; 1852 u32 value; 1853 1854 ndev = alloc_etherdev(sizeof(*lp)); 1855 if (!ndev) 1856 return -ENOMEM; 1857 1858 platform_set_drvdata(pdev, ndev); 1859 1860 SET_NETDEV_DEV(ndev, &pdev->dev); 1861 ndev->flags &= ~IFF_MULTICAST; /* clear multicast */ 1862 ndev->features = NETIF_F_SG; 1863 ndev->netdev_ops = &axienet_netdev_ops; 1864 ndev->ethtool_ops = &axienet_ethtool_ops; 1865 1866 /* MTU range: 64 - 9000 */ 1867 ndev->min_mtu = 64; 1868 ndev->max_mtu = XAE_JUMBO_MTU; 1869 1870 lp = netdev_priv(ndev); 1871 lp->ndev = ndev; 1872 lp->dev = &pdev->dev; 1873 lp->options = XAE_OPTION_DEFAULTS; 1874 lp->rx_bd_num = RX_BD_NUM_DEFAULT; 1875 lp->tx_bd_num = TX_BD_NUM_DEFAULT; 1876 1877 netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll, NAPI_POLL_WEIGHT); 1878 netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll, NAPI_POLL_WEIGHT); 1879 1880 lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk"); 1881 if (!lp->axi_clk) { 1882 /* For backward compatibility, if named AXI clock is not present, 1883 * treat the first clock specified as the AXI clock. 1884 */ 1885 lp->axi_clk = devm_clk_get_optional(&pdev->dev, NULL); 1886 } 1887 if (IS_ERR(lp->axi_clk)) { 1888 ret = PTR_ERR(lp->axi_clk); 1889 goto free_netdev; 1890 } 1891 ret = clk_prepare_enable(lp->axi_clk); 1892 if (ret) { 1893 dev_err(&pdev->dev, "Unable to enable AXI clock: %d\n", ret); 1894 goto free_netdev; 1895 } 1896 1897 lp->misc_clks[0].id = "axis_clk"; 1898 lp->misc_clks[1].id = "ref_clk"; 1899 lp->misc_clks[2].id = "mgt_clk"; 1900 1901 ret = devm_clk_bulk_get_optional(&pdev->dev, XAE_NUM_MISC_CLOCKS, lp->misc_clks); 1902 if (ret) 1903 goto cleanup_clk; 1904 1905 ret = clk_bulk_prepare_enable(XAE_NUM_MISC_CLOCKS, lp->misc_clks); 1906 if (ret) 1907 goto cleanup_clk; 1908 1909 /* Map device registers */ 1910 lp->regs = devm_platform_get_and_ioremap_resource(pdev, 0, ðres); 1911 if (IS_ERR(lp->regs)) { 1912 ret = PTR_ERR(lp->regs); 1913 goto cleanup_clk; 1914 } 1915 lp->regs_start = ethres->start; 1916 1917 /* Setup checksum offload, but default to off if not specified */ 1918 lp->features = 0; 1919 1920 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,txcsum", &value); 1921 if (!ret) { 1922 switch (value) { 1923 case 1: 1924 lp->csum_offload_on_tx_path = 1925 XAE_FEATURE_PARTIAL_TX_CSUM; 1926 lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM; 1927 /* Can checksum TCP/UDP over IPv4. */ 1928 ndev->features |= NETIF_F_IP_CSUM; 1929 break; 1930 case 2: 1931 lp->csum_offload_on_tx_path = 1932 XAE_FEATURE_FULL_TX_CSUM; 1933 lp->features |= XAE_FEATURE_FULL_TX_CSUM; 1934 /* Can checksum TCP/UDP over IPv4. */ 1935 ndev->features |= NETIF_F_IP_CSUM; 1936 break; 1937 default: 1938 lp->csum_offload_on_tx_path = XAE_NO_CSUM_OFFLOAD; 1939 } 1940 } 1941 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value); 1942 if (!ret) { 1943 switch (value) { 1944 case 1: 1945 lp->csum_offload_on_rx_path = 1946 XAE_FEATURE_PARTIAL_RX_CSUM; 1947 lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM; 1948 break; 1949 case 2: 1950 lp->csum_offload_on_rx_path = 1951 XAE_FEATURE_FULL_RX_CSUM; 1952 lp->features |= XAE_FEATURE_FULL_RX_CSUM; 1953 break; 1954 default: 1955 lp->csum_offload_on_rx_path = XAE_NO_CSUM_OFFLOAD; 1956 } 1957 } 1958 /* For supporting jumbo frames, the Axi Ethernet hardware must have 1959 * a larger Rx/Tx Memory. Typically, the size must be large so that 1960 * we can enable jumbo option and start supporting jumbo frames. 1961 * Here we check for memory allocated for Rx/Tx in the hardware from 1962 * the device-tree and accordingly set flags. 1963 */ 1964 of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem); 1965 1966 lp->switch_x_sgmii = of_property_read_bool(pdev->dev.of_node, 1967 "xlnx,switch-x-sgmii"); 1968 1969 /* Start with the proprietary, and broken phy_type */ 1970 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &value); 1971 if (!ret) { 1972 netdev_warn(ndev, "Please upgrade your device tree binary blob to use phy-mode"); 1973 switch (value) { 1974 case XAE_PHY_TYPE_MII: 1975 lp->phy_mode = PHY_INTERFACE_MODE_MII; 1976 break; 1977 case XAE_PHY_TYPE_GMII: 1978 lp->phy_mode = PHY_INTERFACE_MODE_GMII; 1979 break; 1980 case XAE_PHY_TYPE_RGMII_2_0: 1981 lp->phy_mode = PHY_INTERFACE_MODE_RGMII_ID; 1982 break; 1983 case XAE_PHY_TYPE_SGMII: 1984 lp->phy_mode = PHY_INTERFACE_MODE_SGMII; 1985 break; 1986 case XAE_PHY_TYPE_1000BASE_X: 1987 lp->phy_mode = PHY_INTERFACE_MODE_1000BASEX; 1988 break; 1989 default: 1990 ret = -EINVAL; 1991 goto cleanup_clk; 1992 } 1993 } else { 1994 ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode); 1995 if (ret) 1996 goto cleanup_clk; 1997 } 1998 if (lp->switch_x_sgmii && lp->phy_mode != PHY_INTERFACE_MODE_SGMII && 1999 lp->phy_mode != PHY_INTERFACE_MODE_1000BASEX) { 2000 dev_err(&pdev->dev, "xlnx,switch-x-sgmii only supported with SGMII or 1000BaseX\n"); 2001 ret = -EINVAL; 2002 goto cleanup_clk; 2003 } 2004 2005 /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */ 2006 np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0); 2007 if (np) { 2008 struct resource dmares; 2009 2010 ret = of_address_to_resource(np, 0, &dmares); 2011 if (ret) { 2012 dev_err(&pdev->dev, 2013 "unable to get DMA resource\n"); 2014 of_node_put(np); 2015 goto cleanup_clk; 2016 } 2017 lp->dma_regs = devm_ioremap_resource(&pdev->dev, 2018 &dmares); 2019 lp->rx_irq = irq_of_parse_and_map(np, 1); 2020 lp->tx_irq = irq_of_parse_and_map(np, 0); 2021 of_node_put(np); 2022 lp->eth_irq = platform_get_irq_optional(pdev, 0); 2023 } else { 2024 /* Check for these resources directly on the Ethernet node. */ 2025 lp->dma_regs = devm_platform_get_and_ioremap_resource(pdev, 1, NULL); 2026 lp->rx_irq = platform_get_irq(pdev, 1); 2027 lp->tx_irq = platform_get_irq(pdev, 0); 2028 lp->eth_irq = platform_get_irq_optional(pdev, 2); 2029 } 2030 if (IS_ERR(lp->dma_regs)) { 2031 dev_err(&pdev->dev, "could not map DMA regs\n"); 2032 ret = PTR_ERR(lp->dma_regs); 2033 goto cleanup_clk; 2034 } 2035 if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) { 2036 dev_err(&pdev->dev, "could not determine irqs\n"); 2037 ret = -ENOMEM; 2038 goto cleanup_clk; 2039 } 2040 2041 /* Autodetect the need for 64-bit DMA pointers. 2042 * When the IP is configured for a bus width bigger than 32 bits, 2043 * writing the MSB registers is mandatory, even if they are all 0. 2044 * We can detect this case by writing all 1's to one such register 2045 * and see if that sticks: when the IP is configured for 32 bits 2046 * only, those registers are RES0. 2047 * Those MSB registers were introduced in IP v7.1, which we check first. 2048 */ 2049 if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) { 2050 void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4; 2051 2052 iowrite32(0x0, desc); 2053 if (ioread32(desc) == 0) { /* sanity check */ 2054 iowrite32(0xffffffff, desc); 2055 if (ioread32(desc) > 0) { 2056 lp->features |= XAE_FEATURE_DMA_64BIT; 2057 addr_width = 64; 2058 dev_info(&pdev->dev, 2059 "autodetected 64-bit DMA range\n"); 2060 } 2061 iowrite32(0x0, desc); 2062 } 2063 } 2064 2065 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width)); 2066 if (ret) { 2067 dev_err(&pdev->dev, "No suitable DMA available\n"); 2068 goto cleanup_clk; 2069 } 2070 2071 /* Check for Ethernet core IRQ (optional) */ 2072 if (lp->eth_irq <= 0) 2073 dev_info(&pdev->dev, "Ethernet core IRQ not defined\n"); 2074 2075 /* Retrieve the MAC address */ 2076 ret = of_get_mac_address(pdev->dev.of_node, mac_addr); 2077 if (!ret) { 2078 axienet_set_mac_address(ndev, mac_addr); 2079 } else { 2080 dev_warn(&pdev->dev, "could not find MAC address property: %d\n", 2081 ret); 2082 axienet_set_mac_address(ndev, NULL); 2083 } 2084 2085 lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD; 2086 lp->coalesce_usec_rx = XAXIDMA_DFT_RX_USEC; 2087 lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD; 2088 lp->coalesce_usec_tx = XAXIDMA_DFT_TX_USEC; 2089 2090 /* Reset core now that clocks are enabled, prior to accessing MDIO */ 2091 ret = __axienet_device_reset(lp); 2092 if (ret) 2093 goto cleanup_clk; 2094 2095 ret = axienet_mdio_setup(lp); 2096 if (ret) 2097 dev_warn(&pdev->dev, 2098 "error registering MDIO bus: %d\n", ret); 2099 2100 if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII || 2101 lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) { 2102 np = of_parse_phandle(pdev->dev.of_node, "pcs-handle", 0); 2103 if (!np) { 2104 /* Deprecated: Always use "pcs-handle" for pcs_phy. 2105 * Falling back to "phy-handle" here is only for 2106 * backward compatibility with old device trees. 2107 */ 2108 np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); 2109 } 2110 if (!np) { 2111 dev_err(&pdev->dev, "pcs-handle (preferred) or phy-handle required for 1000BaseX/SGMII\n"); 2112 ret = -EINVAL; 2113 goto cleanup_mdio; 2114 } 2115 lp->pcs_phy = of_mdio_find_device(np); 2116 if (!lp->pcs_phy) { 2117 ret = -EPROBE_DEFER; 2118 of_node_put(np); 2119 goto cleanup_mdio; 2120 } 2121 of_node_put(np); 2122 lp->pcs.ops = &axienet_pcs_ops; 2123 lp->pcs.poll = true; 2124 } 2125 2126 lp->phylink_config.dev = &ndev->dev; 2127 lp->phylink_config.type = PHYLINK_NETDEV; 2128 lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE | 2129 MAC_10FD | MAC_100FD | MAC_1000FD; 2130 2131 __set_bit(lp->phy_mode, lp->phylink_config.supported_interfaces); 2132 if (lp->switch_x_sgmii) { 2133 __set_bit(PHY_INTERFACE_MODE_1000BASEX, 2134 lp->phylink_config.supported_interfaces); 2135 __set_bit(PHY_INTERFACE_MODE_SGMII, 2136 lp->phylink_config.supported_interfaces); 2137 } 2138 2139 lp->phylink = phylink_create(&lp->phylink_config, pdev->dev.fwnode, 2140 lp->phy_mode, 2141 &axienet_phylink_ops); 2142 if (IS_ERR(lp->phylink)) { 2143 ret = PTR_ERR(lp->phylink); 2144 dev_err(&pdev->dev, "phylink_create error (%i)\n", ret); 2145 goto cleanup_mdio; 2146 } 2147 2148 ret = register_netdev(lp->ndev); 2149 if (ret) { 2150 dev_err(lp->dev, "register_netdev() error (%i)\n", ret); 2151 goto cleanup_phylink; 2152 } 2153 2154 return 0; 2155 2156 cleanup_phylink: 2157 phylink_destroy(lp->phylink); 2158 2159 cleanup_mdio: 2160 if (lp->pcs_phy) 2161 put_device(&lp->pcs_phy->dev); 2162 if (lp->mii_bus) 2163 axienet_mdio_teardown(lp); 2164 cleanup_clk: 2165 clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks); 2166 clk_disable_unprepare(lp->axi_clk); 2167 2168 free_netdev: 2169 free_netdev(ndev); 2170 2171 return ret; 2172 } 2173 2174 static int axienet_remove(struct platform_device *pdev) 2175 { 2176 struct net_device *ndev = platform_get_drvdata(pdev); 2177 struct axienet_local *lp = netdev_priv(ndev); 2178 2179 unregister_netdev(ndev); 2180 2181 if (lp->phylink) 2182 phylink_destroy(lp->phylink); 2183 2184 if (lp->pcs_phy) 2185 put_device(&lp->pcs_phy->dev); 2186 2187 axienet_mdio_teardown(lp); 2188 2189 clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks); 2190 clk_disable_unprepare(lp->axi_clk); 2191 2192 free_netdev(ndev); 2193 2194 return 0; 2195 } 2196 2197 static void axienet_shutdown(struct platform_device *pdev) 2198 { 2199 struct net_device *ndev = platform_get_drvdata(pdev); 2200 2201 rtnl_lock(); 2202 netif_device_detach(ndev); 2203 2204 if (netif_running(ndev)) 2205 dev_close(ndev); 2206 2207 rtnl_unlock(); 2208 } 2209 2210 static struct platform_driver axienet_driver = { 2211 .probe = axienet_probe, 2212 .remove = axienet_remove, 2213 .shutdown = axienet_shutdown, 2214 .driver = { 2215 .name = "xilinx_axienet", 2216 .of_match_table = axienet_of_match, 2217 }, 2218 }; 2219 2220 module_platform_driver(axienet_driver); 2221 2222 MODULE_DESCRIPTION("Xilinx Axi Ethernet driver"); 2223 MODULE_AUTHOR("Xilinx"); 2224 MODULE_LICENSE("GPL"); 2225