1 /* 2 * Broadcom GENET (Gigabit Ethernet) controller driver 3 * 4 * Copyright (c) 2014 Broadcom Corporation 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #define pr_fmt(fmt) "bcmgenet: " fmt 12 13 #include <linux/kernel.h> 14 #include <linux/module.h> 15 #include <linux/sched.h> 16 #include <linux/types.h> 17 #include <linux/fcntl.h> 18 #include <linux/interrupt.h> 19 #include <linux/string.h> 20 #include <linux/if_ether.h> 21 #include <linux/init.h> 22 #include <linux/errno.h> 23 #include <linux/delay.h> 24 #include <linux/platform_device.h> 25 #include <linux/dma-mapping.h> 26 #include <linux/pm.h> 27 #include <linux/clk.h> 28 #include <linux/of.h> 29 #include <linux/of_address.h> 30 #include <linux/of_irq.h> 31 #include <linux/of_net.h> 32 #include <linux/of_platform.h> 33 #include <net/arp.h> 34 35 #include <linux/mii.h> 36 #include <linux/ethtool.h> 37 #include <linux/netdevice.h> 38 #include <linux/inetdevice.h> 39 #include <linux/etherdevice.h> 40 #include <linux/skbuff.h> 41 #include <linux/in.h> 42 #include <linux/ip.h> 43 #include <linux/ipv6.h> 44 #include <linux/phy.h> 45 46 #include <asm/unaligned.h> 47 48 #include "bcmgenet.h" 49 50 /* Maximum number of hardware queues, downsized if needed */ 51 #define GENET_MAX_MQ_CNT 4 52 53 /* Default highest priority queue for multi queue support */ 54 #define GENET_Q0_PRIORITY 0 55 56 #define GENET_DEFAULT_BD_CNT \ 57 (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->bds_cnt) 58 59 #define RX_BUF_LENGTH 2048 60 #define SKB_ALIGNMENT 32 61 62 /* Tx/Rx DMA register offset, skip 256 descriptors */ 63 #define WORDS_PER_BD(p) (p->hw_params->words_per_bd) 64 #define DMA_DESC_SIZE (WORDS_PER_BD(priv) * sizeof(u32)) 65 66 #define GENET_TDMA_REG_OFF (priv->hw_params->tdma_offset + \ 67 TOTAL_DESC * DMA_DESC_SIZE) 68 69 #define GENET_RDMA_REG_OFF (priv->hw_params->rdma_offset + \ 70 TOTAL_DESC * DMA_DESC_SIZE) 71 72 static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv, 73 void __iomem *d, u32 value) 74 { 75 __raw_writel(value, d + DMA_DESC_LENGTH_STATUS); 76 } 77 78 static inline u32 dmadesc_get_length_status(struct bcmgenet_priv *priv, 79 void __iomem *d) 80 { 81 return __raw_readl(d + DMA_DESC_LENGTH_STATUS); 82 } 83 84 static inline void dmadesc_set_addr(struct bcmgenet_priv *priv, 85 void __iomem *d, 86 dma_addr_t addr) 87 { 88 __raw_writel(lower_32_bits(addr), d + DMA_DESC_ADDRESS_LO); 89 90 /* Register writes to GISB bus can take couple hundred nanoseconds 91 * and are done for each packet, save these expensive writes unless 92 * the platform is explicitly configured for 64-bits/LPAE. 93 */ 94 #ifdef CONFIG_PHYS_ADDR_T_64BIT 95 if (priv->hw_params->flags & GENET_HAS_40BITS) 96 __raw_writel(upper_32_bits(addr), d + DMA_DESC_ADDRESS_HI); 97 #endif 98 } 99 100 /* Combined address + length/status setter */ 101 static inline void dmadesc_set(struct bcmgenet_priv *priv, 102 void __iomem *d, dma_addr_t addr, u32 val) 103 { 104 dmadesc_set_length_status(priv, d, val); 105 dmadesc_set_addr(priv, d, addr); 106 } 107 108 static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv, 109 void __iomem *d) 110 { 111 dma_addr_t addr; 112 113 addr = __raw_readl(d + DMA_DESC_ADDRESS_LO); 114 115 /* Register writes to GISB bus can take couple hundred nanoseconds 116 * and are done for each packet, save these expensive writes unless 117 * the platform is explicitly configured for 64-bits/LPAE. 118 */ 119 #ifdef CONFIG_PHYS_ADDR_T_64BIT 120 if (priv->hw_params->flags & GENET_HAS_40BITS) 121 addr |= (u64)__raw_readl(d + DMA_DESC_ADDRESS_HI) << 32; 122 #endif 123 return addr; 124 } 125 126 #define GENET_VER_FMT "%1d.%1d EPHY: 0x%04x" 127 128 #define GENET_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \ 129 NETIF_MSG_LINK) 130 131 static inline u32 bcmgenet_rbuf_ctrl_get(struct bcmgenet_priv *priv) 132 { 133 if (GENET_IS_V1(priv)) 134 return bcmgenet_rbuf_readl(priv, RBUF_FLUSH_CTRL_V1); 135 else 136 return bcmgenet_sys_readl(priv, SYS_RBUF_FLUSH_CTRL); 137 } 138 139 static inline void bcmgenet_rbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val) 140 { 141 if (GENET_IS_V1(priv)) 142 bcmgenet_rbuf_writel(priv, val, RBUF_FLUSH_CTRL_V1); 143 else 144 bcmgenet_sys_writel(priv, val, SYS_RBUF_FLUSH_CTRL); 145 } 146 147 /* These macros are defined to deal with register map change 148 * between GENET1.1 and GENET2. Only those currently being used 149 * by driver are defined. 150 */ 151 static inline u32 bcmgenet_tbuf_ctrl_get(struct bcmgenet_priv *priv) 152 { 153 if (GENET_IS_V1(priv)) 154 return bcmgenet_rbuf_readl(priv, TBUF_CTRL_V1); 155 else 156 return __raw_readl(priv->base + 157 priv->hw_params->tbuf_offset + TBUF_CTRL); 158 } 159 160 static inline void bcmgenet_tbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val) 161 { 162 if (GENET_IS_V1(priv)) 163 bcmgenet_rbuf_writel(priv, val, TBUF_CTRL_V1); 164 else 165 __raw_writel(val, priv->base + 166 priv->hw_params->tbuf_offset + TBUF_CTRL); 167 } 168 169 static inline u32 bcmgenet_bp_mc_get(struct bcmgenet_priv *priv) 170 { 171 if (GENET_IS_V1(priv)) 172 return bcmgenet_rbuf_readl(priv, TBUF_BP_MC_V1); 173 else 174 return __raw_readl(priv->base + 175 priv->hw_params->tbuf_offset + TBUF_BP_MC); 176 } 177 178 static inline void bcmgenet_bp_mc_set(struct bcmgenet_priv *priv, u32 val) 179 { 180 if (GENET_IS_V1(priv)) 181 bcmgenet_rbuf_writel(priv, val, TBUF_BP_MC_V1); 182 else 183 __raw_writel(val, priv->base + 184 priv->hw_params->tbuf_offset + TBUF_BP_MC); 185 } 186 187 /* RX/TX DMA register accessors */ 188 enum dma_reg { 189 DMA_RING_CFG = 0, 190 DMA_CTRL, 191 DMA_STATUS, 192 DMA_SCB_BURST_SIZE, 193 DMA_ARB_CTRL, 194 DMA_PRIORITY_0, 195 DMA_PRIORITY_1, 196 DMA_PRIORITY_2, 197 }; 198 199 static const u8 bcmgenet_dma_regs_v3plus[] = { 200 [DMA_RING_CFG] = 0x00, 201 [DMA_CTRL] = 0x04, 202 [DMA_STATUS] = 0x08, 203 [DMA_SCB_BURST_SIZE] = 0x0C, 204 [DMA_ARB_CTRL] = 0x2C, 205 [DMA_PRIORITY_0] = 0x30, 206 [DMA_PRIORITY_1] = 0x34, 207 [DMA_PRIORITY_2] = 0x38, 208 }; 209 210 static const u8 bcmgenet_dma_regs_v2[] = { 211 [DMA_RING_CFG] = 0x00, 212 [DMA_CTRL] = 0x04, 213 [DMA_STATUS] = 0x08, 214 [DMA_SCB_BURST_SIZE] = 0x0C, 215 [DMA_ARB_CTRL] = 0x30, 216 [DMA_PRIORITY_0] = 0x34, 217 [DMA_PRIORITY_1] = 0x38, 218 [DMA_PRIORITY_2] = 0x3C, 219 }; 220 221 static const u8 bcmgenet_dma_regs_v1[] = { 222 [DMA_CTRL] = 0x00, 223 [DMA_STATUS] = 0x04, 224 [DMA_SCB_BURST_SIZE] = 0x0C, 225 [DMA_ARB_CTRL] = 0x30, 226 [DMA_PRIORITY_0] = 0x34, 227 [DMA_PRIORITY_1] = 0x38, 228 [DMA_PRIORITY_2] = 0x3C, 229 }; 230 231 /* Set at runtime once bcmgenet version is known */ 232 static const u8 *bcmgenet_dma_regs; 233 234 static inline struct bcmgenet_priv *dev_to_priv(struct device *dev) 235 { 236 return netdev_priv(dev_get_drvdata(dev)); 237 } 238 239 static inline u32 bcmgenet_tdma_readl(struct bcmgenet_priv *priv, 240 enum dma_reg r) 241 { 242 return __raw_readl(priv->base + GENET_TDMA_REG_OFF + 243 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); 244 } 245 246 static inline void bcmgenet_tdma_writel(struct bcmgenet_priv *priv, 247 u32 val, enum dma_reg r) 248 { 249 __raw_writel(val, priv->base + GENET_TDMA_REG_OFF + 250 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); 251 } 252 253 static inline u32 bcmgenet_rdma_readl(struct bcmgenet_priv *priv, 254 enum dma_reg r) 255 { 256 return __raw_readl(priv->base + GENET_RDMA_REG_OFF + 257 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); 258 } 259 260 static inline void bcmgenet_rdma_writel(struct bcmgenet_priv *priv, 261 u32 val, enum dma_reg r) 262 { 263 __raw_writel(val, priv->base + GENET_RDMA_REG_OFF + 264 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); 265 } 266 267 /* RDMA/TDMA ring registers and accessors 268 * we merge the common fields and just prefix with T/D the registers 269 * having different meaning depending on the direction 270 */ 271 enum dma_ring_reg { 272 TDMA_READ_PTR = 0, 273 RDMA_WRITE_PTR = TDMA_READ_PTR, 274 TDMA_READ_PTR_HI, 275 RDMA_WRITE_PTR_HI = TDMA_READ_PTR_HI, 276 TDMA_CONS_INDEX, 277 RDMA_PROD_INDEX = TDMA_CONS_INDEX, 278 TDMA_PROD_INDEX, 279 RDMA_CONS_INDEX = TDMA_PROD_INDEX, 280 DMA_RING_BUF_SIZE, 281 DMA_START_ADDR, 282 DMA_START_ADDR_HI, 283 DMA_END_ADDR, 284 DMA_END_ADDR_HI, 285 DMA_MBUF_DONE_THRESH, 286 TDMA_FLOW_PERIOD, 287 RDMA_XON_XOFF_THRESH = TDMA_FLOW_PERIOD, 288 TDMA_WRITE_PTR, 289 RDMA_READ_PTR = TDMA_WRITE_PTR, 290 TDMA_WRITE_PTR_HI, 291 RDMA_READ_PTR_HI = TDMA_WRITE_PTR_HI 292 }; 293 294 /* GENET v4 supports 40-bits pointer addressing 295 * for obvious reasons the LO and HI word parts 296 * are contiguous, but this offsets the other 297 * registers. 298 */ 299 static const u8 genet_dma_ring_regs_v4[] = { 300 [TDMA_READ_PTR] = 0x00, 301 [TDMA_READ_PTR_HI] = 0x04, 302 [TDMA_CONS_INDEX] = 0x08, 303 [TDMA_PROD_INDEX] = 0x0C, 304 [DMA_RING_BUF_SIZE] = 0x10, 305 [DMA_START_ADDR] = 0x14, 306 [DMA_START_ADDR_HI] = 0x18, 307 [DMA_END_ADDR] = 0x1C, 308 [DMA_END_ADDR_HI] = 0x20, 309 [DMA_MBUF_DONE_THRESH] = 0x24, 310 [TDMA_FLOW_PERIOD] = 0x28, 311 [TDMA_WRITE_PTR] = 0x2C, 312 [TDMA_WRITE_PTR_HI] = 0x30, 313 }; 314 315 static const u8 genet_dma_ring_regs_v123[] = { 316 [TDMA_READ_PTR] = 0x00, 317 [TDMA_CONS_INDEX] = 0x04, 318 [TDMA_PROD_INDEX] = 0x08, 319 [DMA_RING_BUF_SIZE] = 0x0C, 320 [DMA_START_ADDR] = 0x10, 321 [DMA_END_ADDR] = 0x14, 322 [DMA_MBUF_DONE_THRESH] = 0x18, 323 [TDMA_FLOW_PERIOD] = 0x1C, 324 [TDMA_WRITE_PTR] = 0x20, 325 }; 326 327 /* Set at runtime once GENET version is known */ 328 static const u8 *genet_dma_ring_regs; 329 330 static inline u32 bcmgenet_tdma_ring_readl(struct bcmgenet_priv *priv, 331 unsigned int ring, 332 enum dma_ring_reg r) 333 { 334 return __raw_readl(priv->base + GENET_TDMA_REG_OFF + 335 (DMA_RING_SIZE * ring) + 336 genet_dma_ring_regs[r]); 337 } 338 339 static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv *priv, 340 unsigned int ring, u32 val, 341 enum dma_ring_reg r) 342 { 343 __raw_writel(val, priv->base + GENET_TDMA_REG_OFF + 344 (DMA_RING_SIZE * ring) + 345 genet_dma_ring_regs[r]); 346 } 347 348 static inline u32 bcmgenet_rdma_ring_readl(struct bcmgenet_priv *priv, 349 unsigned int ring, 350 enum dma_ring_reg r) 351 { 352 return __raw_readl(priv->base + GENET_RDMA_REG_OFF + 353 (DMA_RING_SIZE * ring) + 354 genet_dma_ring_regs[r]); 355 } 356 357 static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv, 358 unsigned int ring, u32 val, 359 enum dma_ring_reg r) 360 { 361 __raw_writel(val, priv->base + GENET_RDMA_REG_OFF + 362 (DMA_RING_SIZE * ring) + 363 genet_dma_ring_regs[r]); 364 } 365 366 static int bcmgenet_get_settings(struct net_device *dev, 367 struct ethtool_cmd *cmd) 368 { 369 struct bcmgenet_priv *priv = netdev_priv(dev); 370 371 if (!netif_running(dev)) 372 return -EINVAL; 373 374 if (!priv->phydev) 375 return -ENODEV; 376 377 return phy_ethtool_gset(priv->phydev, cmd); 378 } 379 380 static int bcmgenet_set_settings(struct net_device *dev, 381 struct ethtool_cmd *cmd) 382 { 383 struct bcmgenet_priv *priv = netdev_priv(dev); 384 385 if (!netif_running(dev)) 386 return -EINVAL; 387 388 if (!priv->phydev) 389 return -ENODEV; 390 391 return phy_ethtool_sset(priv->phydev, cmd); 392 } 393 394 static int bcmgenet_set_rx_csum(struct net_device *dev, 395 netdev_features_t wanted) 396 { 397 struct bcmgenet_priv *priv = netdev_priv(dev); 398 u32 rbuf_chk_ctrl; 399 bool rx_csum_en; 400 401 rx_csum_en = !!(wanted & NETIF_F_RXCSUM); 402 403 rbuf_chk_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL); 404 405 /* enable rx checksumming */ 406 if (rx_csum_en) 407 rbuf_chk_ctrl |= RBUF_RXCHK_EN; 408 else 409 rbuf_chk_ctrl &= ~RBUF_RXCHK_EN; 410 priv->desc_rxchk_en = rx_csum_en; 411 412 /* If UniMAC forwards CRC, we need to skip over it to get 413 * a valid CHK bit to be set in the per-packet status word 414 */ 415 if (rx_csum_en && priv->crc_fwd_en) 416 rbuf_chk_ctrl |= RBUF_SKIP_FCS; 417 else 418 rbuf_chk_ctrl &= ~RBUF_SKIP_FCS; 419 420 bcmgenet_rbuf_writel(priv, rbuf_chk_ctrl, RBUF_CHK_CTRL); 421 422 return 0; 423 } 424 425 static int bcmgenet_set_tx_csum(struct net_device *dev, 426 netdev_features_t wanted) 427 { 428 struct bcmgenet_priv *priv = netdev_priv(dev); 429 bool desc_64b_en; 430 u32 tbuf_ctrl, rbuf_ctrl; 431 432 tbuf_ctrl = bcmgenet_tbuf_ctrl_get(priv); 433 rbuf_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CTRL); 434 435 desc_64b_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)); 436 437 /* enable 64 bytes descriptor in both directions (RBUF and TBUF) */ 438 if (desc_64b_en) { 439 tbuf_ctrl |= RBUF_64B_EN; 440 rbuf_ctrl |= RBUF_64B_EN; 441 } else { 442 tbuf_ctrl &= ~RBUF_64B_EN; 443 rbuf_ctrl &= ~RBUF_64B_EN; 444 } 445 priv->desc_64b_en = desc_64b_en; 446 447 bcmgenet_tbuf_ctrl_set(priv, tbuf_ctrl); 448 bcmgenet_rbuf_writel(priv, rbuf_ctrl, RBUF_CTRL); 449 450 return 0; 451 } 452 453 static int bcmgenet_set_features(struct net_device *dev, 454 netdev_features_t features) 455 { 456 netdev_features_t changed = features ^ dev->features; 457 netdev_features_t wanted = dev->wanted_features; 458 int ret = 0; 459 460 if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) 461 ret = bcmgenet_set_tx_csum(dev, wanted); 462 if (changed & (NETIF_F_RXCSUM)) 463 ret = bcmgenet_set_rx_csum(dev, wanted); 464 465 return ret; 466 } 467 468 static u32 bcmgenet_get_msglevel(struct net_device *dev) 469 { 470 struct bcmgenet_priv *priv = netdev_priv(dev); 471 472 return priv->msg_enable; 473 } 474 475 static void bcmgenet_set_msglevel(struct net_device *dev, u32 level) 476 { 477 struct bcmgenet_priv *priv = netdev_priv(dev); 478 479 priv->msg_enable = level; 480 } 481 482 /* standard ethtool support functions. */ 483 enum bcmgenet_stat_type { 484 BCMGENET_STAT_NETDEV = -1, 485 BCMGENET_STAT_MIB_RX, 486 BCMGENET_STAT_MIB_TX, 487 BCMGENET_STAT_RUNT, 488 BCMGENET_STAT_MISC, 489 }; 490 491 struct bcmgenet_stats { 492 char stat_string[ETH_GSTRING_LEN]; 493 int stat_sizeof; 494 int stat_offset; 495 enum bcmgenet_stat_type type; 496 /* reg offset from UMAC base for misc counters */ 497 u16 reg_offset; 498 }; 499 500 #define STAT_NETDEV(m) { \ 501 .stat_string = __stringify(m), \ 502 .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \ 503 .stat_offset = offsetof(struct net_device_stats, m), \ 504 .type = BCMGENET_STAT_NETDEV, \ 505 } 506 507 #define STAT_GENET_MIB(str, m, _type) { \ 508 .stat_string = str, \ 509 .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \ 510 .stat_offset = offsetof(struct bcmgenet_priv, m), \ 511 .type = _type, \ 512 } 513 514 #define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX) 515 #define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX) 516 #define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT) 517 518 #define STAT_GENET_MISC(str, m, offset) { \ 519 .stat_string = str, \ 520 .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \ 521 .stat_offset = offsetof(struct bcmgenet_priv, m), \ 522 .type = BCMGENET_STAT_MISC, \ 523 .reg_offset = offset, \ 524 } 525 526 527 /* There is a 0xC gap between the end of RX and beginning of TX stats and then 528 * between the end of TX stats and the beginning of the RX RUNT 529 */ 530 #define BCMGENET_STAT_OFFSET 0xc 531 532 /* Hardware counters must be kept in sync because the order/offset 533 * is important here (order in structure declaration = order in hardware) 534 */ 535 static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = { 536 /* general stats */ 537 STAT_NETDEV(rx_packets), 538 STAT_NETDEV(tx_packets), 539 STAT_NETDEV(rx_bytes), 540 STAT_NETDEV(tx_bytes), 541 STAT_NETDEV(rx_errors), 542 STAT_NETDEV(tx_errors), 543 STAT_NETDEV(rx_dropped), 544 STAT_NETDEV(tx_dropped), 545 STAT_NETDEV(multicast), 546 /* UniMAC RSV counters */ 547 STAT_GENET_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64), 548 STAT_GENET_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127), 549 STAT_GENET_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255), 550 STAT_GENET_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511), 551 STAT_GENET_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023), 552 STAT_GENET_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518), 553 STAT_GENET_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv), 554 STAT_GENET_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047), 555 STAT_GENET_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095), 556 STAT_GENET_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216), 557 STAT_GENET_MIB_RX("rx_pkts", mib.rx.pkt), 558 STAT_GENET_MIB_RX("rx_bytes", mib.rx.bytes), 559 STAT_GENET_MIB_RX("rx_multicast", mib.rx.mca), 560 STAT_GENET_MIB_RX("rx_broadcast", mib.rx.bca), 561 STAT_GENET_MIB_RX("rx_fcs", mib.rx.fcs), 562 STAT_GENET_MIB_RX("rx_control", mib.rx.cf), 563 STAT_GENET_MIB_RX("rx_pause", mib.rx.pf), 564 STAT_GENET_MIB_RX("rx_unknown", mib.rx.uo), 565 STAT_GENET_MIB_RX("rx_align", mib.rx.aln), 566 STAT_GENET_MIB_RX("rx_outrange", mib.rx.flr), 567 STAT_GENET_MIB_RX("rx_code", mib.rx.cde), 568 STAT_GENET_MIB_RX("rx_carrier", mib.rx.fcr), 569 STAT_GENET_MIB_RX("rx_oversize", mib.rx.ovr), 570 STAT_GENET_MIB_RX("rx_jabber", mib.rx.jbr), 571 STAT_GENET_MIB_RX("rx_mtu_err", mib.rx.mtue), 572 STAT_GENET_MIB_RX("rx_good_pkts", mib.rx.pok), 573 STAT_GENET_MIB_RX("rx_unicast", mib.rx.uc), 574 STAT_GENET_MIB_RX("rx_ppp", mib.rx.ppp), 575 STAT_GENET_MIB_RX("rx_crc", mib.rx.rcrc), 576 /* UniMAC TSV counters */ 577 STAT_GENET_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64), 578 STAT_GENET_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127), 579 STAT_GENET_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255), 580 STAT_GENET_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511), 581 STAT_GENET_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023), 582 STAT_GENET_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518), 583 STAT_GENET_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv), 584 STAT_GENET_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047), 585 STAT_GENET_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095), 586 STAT_GENET_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216), 587 STAT_GENET_MIB_TX("tx_pkts", mib.tx.pkts), 588 STAT_GENET_MIB_TX("tx_multicast", mib.tx.mca), 589 STAT_GENET_MIB_TX("tx_broadcast", mib.tx.bca), 590 STAT_GENET_MIB_TX("tx_pause", mib.tx.pf), 591 STAT_GENET_MIB_TX("tx_control", mib.tx.cf), 592 STAT_GENET_MIB_TX("tx_fcs_err", mib.tx.fcs), 593 STAT_GENET_MIB_TX("tx_oversize", mib.tx.ovr), 594 STAT_GENET_MIB_TX("tx_defer", mib.tx.drf), 595 STAT_GENET_MIB_TX("tx_excess_defer", mib.tx.edf), 596 STAT_GENET_MIB_TX("tx_single_col", mib.tx.scl), 597 STAT_GENET_MIB_TX("tx_multi_col", mib.tx.mcl), 598 STAT_GENET_MIB_TX("tx_late_col", mib.tx.lcl), 599 STAT_GENET_MIB_TX("tx_excess_col", mib.tx.ecl), 600 STAT_GENET_MIB_TX("tx_frags", mib.tx.frg), 601 STAT_GENET_MIB_TX("tx_total_col", mib.tx.ncl), 602 STAT_GENET_MIB_TX("tx_jabber", mib.tx.jbr), 603 STAT_GENET_MIB_TX("tx_bytes", mib.tx.bytes), 604 STAT_GENET_MIB_TX("tx_good_pkts", mib.tx.pok), 605 STAT_GENET_MIB_TX("tx_unicast", mib.tx.uc), 606 /* UniMAC RUNT counters */ 607 STAT_GENET_RUNT("rx_runt_pkts", mib.rx_runt_cnt), 608 STAT_GENET_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs), 609 STAT_GENET_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align), 610 STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes), 611 /* Misc UniMAC counters */ 612 STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, 613 UMAC_RBUF_OVFL_CNT), 614 STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT), 615 STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT), 616 }; 617 618 #define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats) 619 620 static void bcmgenet_get_drvinfo(struct net_device *dev, 621 struct ethtool_drvinfo *info) 622 { 623 strlcpy(info->driver, "bcmgenet", sizeof(info->driver)); 624 strlcpy(info->version, "v2.0", sizeof(info->version)); 625 info->n_stats = BCMGENET_STATS_LEN; 626 } 627 628 static int bcmgenet_get_sset_count(struct net_device *dev, int string_set) 629 { 630 switch (string_set) { 631 case ETH_SS_STATS: 632 return BCMGENET_STATS_LEN; 633 default: 634 return -EOPNOTSUPP; 635 } 636 } 637 638 static void bcmgenet_get_strings(struct net_device *dev, u32 stringset, 639 u8 *data) 640 { 641 int i; 642 643 switch (stringset) { 644 case ETH_SS_STATS: 645 for (i = 0; i < BCMGENET_STATS_LEN; i++) { 646 memcpy(data + i * ETH_GSTRING_LEN, 647 bcmgenet_gstrings_stats[i].stat_string, 648 ETH_GSTRING_LEN); 649 } 650 break; 651 } 652 } 653 654 static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv) 655 { 656 int i, j = 0; 657 658 for (i = 0; i < BCMGENET_STATS_LEN; i++) { 659 const struct bcmgenet_stats *s; 660 u8 offset = 0; 661 u32 val = 0; 662 char *p; 663 664 s = &bcmgenet_gstrings_stats[i]; 665 switch (s->type) { 666 case BCMGENET_STAT_NETDEV: 667 continue; 668 case BCMGENET_STAT_MIB_RX: 669 case BCMGENET_STAT_MIB_TX: 670 case BCMGENET_STAT_RUNT: 671 if (s->type != BCMGENET_STAT_MIB_RX) 672 offset = BCMGENET_STAT_OFFSET; 673 val = bcmgenet_umac_readl(priv, 674 UMAC_MIB_START + j + offset); 675 break; 676 case BCMGENET_STAT_MISC: 677 val = bcmgenet_umac_readl(priv, s->reg_offset); 678 /* clear if overflowed */ 679 if (val == ~0) 680 bcmgenet_umac_writel(priv, 0, s->reg_offset); 681 break; 682 } 683 684 j += s->stat_sizeof; 685 p = (char *)priv + s->stat_offset; 686 *(u32 *)p = val; 687 } 688 } 689 690 static void bcmgenet_get_ethtool_stats(struct net_device *dev, 691 struct ethtool_stats *stats, 692 u64 *data) 693 { 694 struct bcmgenet_priv *priv = netdev_priv(dev); 695 int i; 696 697 if (netif_running(dev)) 698 bcmgenet_update_mib_counters(priv); 699 700 for (i = 0; i < BCMGENET_STATS_LEN; i++) { 701 const struct bcmgenet_stats *s; 702 char *p; 703 704 s = &bcmgenet_gstrings_stats[i]; 705 if (s->type == BCMGENET_STAT_NETDEV) 706 p = (char *)&dev->stats; 707 else 708 p = (char *)priv; 709 p += s->stat_offset; 710 data[i] = *(u32 *)p; 711 } 712 } 713 714 /* standard ethtool support functions. */ 715 static struct ethtool_ops bcmgenet_ethtool_ops = { 716 .get_strings = bcmgenet_get_strings, 717 .get_sset_count = bcmgenet_get_sset_count, 718 .get_ethtool_stats = bcmgenet_get_ethtool_stats, 719 .get_settings = bcmgenet_get_settings, 720 .set_settings = bcmgenet_set_settings, 721 .get_drvinfo = bcmgenet_get_drvinfo, 722 .get_link = ethtool_op_get_link, 723 .get_msglevel = bcmgenet_get_msglevel, 724 .set_msglevel = bcmgenet_set_msglevel, 725 .get_wol = bcmgenet_get_wol, 726 .set_wol = bcmgenet_set_wol, 727 }; 728 729 /* Power down the unimac, based on mode. */ 730 static void bcmgenet_power_down(struct bcmgenet_priv *priv, 731 enum bcmgenet_power_mode mode) 732 { 733 u32 reg; 734 735 switch (mode) { 736 case GENET_POWER_CABLE_SENSE: 737 phy_detach(priv->phydev); 738 break; 739 740 case GENET_POWER_WOL_MAGIC: 741 bcmgenet_wol_power_down_cfg(priv, mode); 742 break; 743 744 case GENET_POWER_PASSIVE: 745 /* Power down LED */ 746 if (priv->hw_params->flags & GENET_HAS_EXT) { 747 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); 748 reg |= (EXT_PWR_DOWN_PHY | 749 EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS); 750 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); 751 } 752 break; 753 default: 754 break; 755 } 756 } 757 758 static void bcmgenet_power_up(struct bcmgenet_priv *priv, 759 enum bcmgenet_power_mode mode) 760 { 761 u32 reg; 762 763 if (!(priv->hw_params->flags & GENET_HAS_EXT)) 764 return; 765 766 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); 767 768 switch (mode) { 769 case GENET_POWER_PASSIVE: 770 reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_PHY | 771 EXT_PWR_DOWN_BIAS); 772 /* fallthrough */ 773 case GENET_POWER_CABLE_SENSE: 774 /* enable APD */ 775 reg |= EXT_PWR_DN_EN_LD; 776 break; 777 case GENET_POWER_WOL_MAGIC: 778 bcmgenet_wol_power_up_cfg(priv, mode); 779 return; 780 default: 781 break; 782 } 783 784 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); 785 786 if (mode == GENET_POWER_PASSIVE) 787 bcmgenet_mii_reset(priv->dev); 788 } 789 790 /* ioctl handle special commands that are not present in ethtool. */ 791 static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 792 { 793 struct bcmgenet_priv *priv = netdev_priv(dev); 794 int val = 0; 795 796 if (!netif_running(dev)) 797 return -EINVAL; 798 799 switch (cmd) { 800 case SIOCGMIIPHY: 801 case SIOCGMIIREG: 802 case SIOCSMIIREG: 803 if (!priv->phydev) 804 val = -ENODEV; 805 else 806 val = phy_mii_ioctl(priv->phydev, rq, cmd); 807 break; 808 809 default: 810 val = -EINVAL; 811 break; 812 } 813 814 return val; 815 } 816 817 static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv, 818 struct bcmgenet_tx_ring *ring) 819 { 820 struct enet_cb *tx_cb_ptr; 821 822 tx_cb_ptr = ring->cbs; 823 tx_cb_ptr += ring->write_ptr - ring->cb_ptr; 824 tx_cb_ptr->bd_addr = priv->tx_bds + ring->write_ptr * DMA_DESC_SIZE; 825 /* Advancing local write pointer */ 826 if (ring->write_ptr == ring->end_ptr) 827 ring->write_ptr = ring->cb_ptr; 828 else 829 ring->write_ptr++; 830 831 return tx_cb_ptr; 832 } 833 834 /* Simple helper to free a control block's resources */ 835 static void bcmgenet_free_cb(struct enet_cb *cb) 836 { 837 dev_kfree_skb_any(cb->skb); 838 cb->skb = NULL; 839 dma_unmap_addr_set(cb, dma_addr, 0); 840 } 841 842 static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_priv *priv, 843 struct bcmgenet_tx_ring *ring) 844 { 845 bcmgenet_intrl2_0_writel(priv, 846 UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE, 847 INTRL2_CPU_MASK_SET); 848 } 849 850 static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_priv *priv, 851 struct bcmgenet_tx_ring *ring) 852 { 853 bcmgenet_intrl2_0_writel(priv, 854 UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE, 855 INTRL2_CPU_MASK_CLEAR); 856 } 857 858 static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_priv *priv, 859 struct bcmgenet_tx_ring *ring) 860 { 861 bcmgenet_intrl2_1_writel(priv, (1 << ring->index), 862 INTRL2_CPU_MASK_CLEAR); 863 priv->int1_mask &= ~(1 << ring->index); 864 } 865 866 static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_priv *priv, 867 struct bcmgenet_tx_ring *ring) 868 { 869 bcmgenet_intrl2_1_writel(priv, (1 << ring->index), 870 INTRL2_CPU_MASK_SET); 871 priv->int1_mask |= (1 << ring->index); 872 } 873 874 /* Unlocked version of the reclaim routine */ 875 static void __bcmgenet_tx_reclaim(struct net_device *dev, 876 struct bcmgenet_tx_ring *ring) 877 { 878 struct bcmgenet_priv *priv = netdev_priv(dev); 879 int last_tx_cn, last_c_index, num_tx_bds; 880 struct enet_cb *tx_cb_ptr; 881 struct netdev_queue *txq; 882 unsigned int bds_compl; 883 unsigned int c_index; 884 885 /* Compute how many buffers are transmitted since last xmit call */ 886 c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX); 887 txq = netdev_get_tx_queue(dev, ring->queue); 888 889 last_c_index = ring->c_index; 890 num_tx_bds = ring->size; 891 892 c_index &= (num_tx_bds - 1); 893 894 if (c_index >= last_c_index) 895 last_tx_cn = c_index - last_c_index; 896 else 897 last_tx_cn = num_tx_bds - last_c_index + c_index; 898 899 netif_dbg(priv, tx_done, dev, 900 "%s ring=%d index=%d last_tx_cn=%d last_index=%d\n", 901 __func__, ring->index, 902 c_index, last_tx_cn, last_c_index); 903 904 /* Reclaim transmitted buffers */ 905 while (last_tx_cn-- > 0) { 906 tx_cb_ptr = ring->cbs + last_c_index; 907 bds_compl = 0; 908 if (tx_cb_ptr->skb) { 909 bds_compl = skb_shinfo(tx_cb_ptr->skb)->nr_frags + 1; 910 dev->stats.tx_bytes += tx_cb_ptr->skb->len; 911 dma_unmap_single(&dev->dev, 912 dma_unmap_addr(tx_cb_ptr, dma_addr), 913 tx_cb_ptr->skb->len, 914 DMA_TO_DEVICE); 915 bcmgenet_free_cb(tx_cb_ptr); 916 } else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) { 917 dev->stats.tx_bytes += 918 dma_unmap_len(tx_cb_ptr, dma_len); 919 dma_unmap_page(&dev->dev, 920 dma_unmap_addr(tx_cb_ptr, dma_addr), 921 dma_unmap_len(tx_cb_ptr, dma_len), 922 DMA_TO_DEVICE); 923 dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0); 924 } 925 dev->stats.tx_packets++; 926 ring->free_bds += bds_compl; 927 928 last_c_index++; 929 last_c_index &= (num_tx_bds - 1); 930 } 931 932 if (ring->free_bds > (MAX_SKB_FRAGS + 1)) 933 ring->int_disable(priv, ring); 934 935 if (netif_tx_queue_stopped(txq)) 936 netif_tx_wake_queue(txq); 937 938 ring->c_index = c_index; 939 } 940 941 static void bcmgenet_tx_reclaim(struct net_device *dev, 942 struct bcmgenet_tx_ring *ring) 943 { 944 unsigned long flags; 945 946 spin_lock_irqsave(&ring->lock, flags); 947 __bcmgenet_tx_reclaim(dev, ring); 948 spin_unlock_irqrestore(&ring->lock, flags); 949 } 950 951 static void bcmgenet_tx_reclaim_all(struct net_device *dev) 952 { 953 struct bcmgenet_priv *priv = netdev_priv(dev); 954 int i; 955 956 if (netif_is_multiqueue(dev)) { 957 for (i = 0; i < priv->hw_params->tx_queues; i++) 958 bcmgenet_tx_reclaim(dev, &priv->tx_rings[i]); 959 } 960 961 bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]); 962 } 963 964 /* Transmits a single SKB (either head of a fragment or a single SKB) 965 * caller must hold priv->lock 966 */ 967 static int bcmgenet_xmit_single(struct net_device *dev, 968 struct sk_buff *skb, 969 u16 dma_desc_flags, 970 struct bcmgenet_tx_ring *ring) 971 { 972 struct bcmgenet_priv *priv = netdev_priv(dev); 973 struct device *kdev = &priv->pdev->dev; 974 struct enet_cb *tx_cb_ptr; 975 unsigned int skb_len; 976 dma_addr_t mapping; 977 u32 length_status; 978 int ret; 979 980 tx_cb_ptr = bcmgenet_get_txcb(priv, ring); 981 982 if (unlikely(!tx_cb_ptr)) 983 BUG(); 984 985 tx_cb_ptr->skb = skb; 986 987 skb_len = skb_headlen(skb) < ETH_ZLEN ? ETH_ZLEN : skb_headlen(skb); 988 989 mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE); 990 ret = dma_mapping_error(kdev, mapping); 991 if (ret) { 992 netif_err(priv, tx_err, dev, "Tx DMA map failed\n"); 993 dev_kfree_skb(skb); 994 return ret; 995 } 996 997 dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping); 998 dma_unmap_len_set(tx_cb_ptr, dma_len, skb->len); 999 length_status = (skb_len << DMA_BUFLENGTH_SHIFT) | dma_desc_flags | 1000 (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT) | 1001 DMA_TX_APPEND_CRC; 1002 1003 if (skb->ip_summed == CHECKSUM_PARTIAL) 1004 length_status |= DMA_TX_DO_CSUM; 1005 1006 dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, length_status); 1007 1008 /* Decrement total BD count and advance our write pointer */ 1009 ring->free_bds -= 1; 1010 ring->prod_index += 1; 1011 ring->prod_index &= DMA_P_INDEX_MASK; 1012 1013 return 0; 1014 } 1015 1016 /* Transmit a SKB fragment */ 1017 static int bcmgenet_xmit_frag(struct net_device *dev, 1018 skb_frag_t *frag, 1019 u16 dma_desc_flags, 1020 struct bcmgenet_tx_ring *ring) 1021 { 1022 struct bcmgenet_priv *priv = netdev_priv(dev); 1023 struct device *kdev = &priv->pdev->dev; 1024 struct enet_cb *tx_cb_ptr; 1025 dma_addr_t mapping; 1026 int ret; 1027 1028 tx_cb_ptr = bcmgenet_get_txcb(priv, ring); 1029 1030 if (unlikely(!tx_cb_ptr)) 1031 BUG(); 1032 tx_cb_ptr->skb = NULL; 1033 1034 mapping = skb_frag_dma_map(kdev, frag, 0, 1035 skb_frag_size(frag), DMA_TO_DEVICE); 1036 ret = dma_mapping_error(kdev, mapping); 1037 if (ret) { 1038 netif_err(priv, tx_err, dev, "%s: Tx DMA map failed\n", 1039 __func__); 1040 return ret; 1041 } 1042 1043 dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping); 1044 dma_unmap_len_set(tx_cb_ptr, dma_len, frag->size); 1045 1046 dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, 1047 (frag->size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags | 1048 (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT)); 1049 1050 1051 ring->free_bds -= 1; 1052 ring->prod_index += 1; 1053 ring->prod_index &= DMA_P_INDEX_MASK; 1054 1055 return 0; 1056 } 1057 1058 /* Reallocate the SKB to put enough headroom in front of it and insert 1059 * the transmit checksum offsets in the descriptors 1060 */ 1061 static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev, 1062 struct sk_buff *skb) 1063 { 1064 struct status_64 *status = NULL; 1065 struct sk_buff *new_skb; 1066 u16 offset; 1067 u8 ip_proto; 1068 u16 ip_ver; 1069 u32 tx_csum_info; 1070 1071 if (unlikely(skb_headroom(skb) < sizeof(*status))) { 1072 /* If 64 byte status block enabled, must make sure skb has 1073 * enough headroom for us to insert 64B status block. 1074 */ 1075 new_skb = skb_realloc_headroom(skb, sizeof(*status)); 1076 dev_kfree_skb(skb); 1077 if (!new_skb) { 1078 dev->stats.tx_errors++; 1079 dev->stats.tx_dropped++; 1080 return NULL; 1081 } 1082 skb = new_skb; 1083 } 1084 1085 skb_push(skb, sizeof(*status)); 1086 status = (struct status_64 *)skb->data; 1087 1088 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1089 ip_ver = htons(skb->protocol); 1090 switch (ip_ver) { 1091 case ETH_P_IP: 1092 ip_proto = ip_hdr(skb)->protocol; 1093 break; 1094 case ETH_P_IPV6: 1095 ip_proto = ipv6_hdr(skb)->nexthdr; 1096 break; 1097 default: 1098 return skb; 1099 } 1100 1101 offset = skb_checksum_start_offset(skb) - sizeof(*status); 1102 tx_csum_info = (offset << STATUS_TX_CSUM_START_SHIFT) | 1103 (offset + skb->csum_offset); 1104 1105 /* Set the length valid bit for TCP and UDP and just set 1106 * the special UDP flag for IPv4, else just set to 0. 1107 */ 1108 if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) { 1109 tx_csum_info |= STATUS_TX_CSUM_LV; 1110 if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP) 1111 tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP; 1112 } else { 1113 tx_csum_info = 0; 1114 } 1115 1116 status->tx_csum_info = tx_csum_info; 1117 } 1118 1119 return skb; 1120 } 1121 1122 static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) 1123 { 1124 struct bcmgenet_priv *priv = netdev_priv(dev); 1125 struct bcmgenet_tx_ring *ring = NULL; 1126 struct netdev_queue *txq; 1127 unsigned long flags = 0; 1128 int nr_frags, index; 1129 u16 dma_desc_flags; 1130 int ret; 1131 int i; 1132 1133 index = skb_get_queue_mapping(skb); 1134 /* Mapping strategy: 1135 * queue_mapping = 0, unclassified, packet xmited through ring16 1136 * queue_mapping = 1, goes to ring 0. (highest priority queue 1137 * queue_mapping = 2, goes to ring 1. 1138 * queue_mapping = 3, goes to ring 2. 1139 * queue_mapping = 4, goes to ring 3. 1140 */ 1141 if (index == 0) 1142 index = DESC_INDEX; 1143 else 1144 index -= 1; 1145 1146 nr_frags = skb_shinfo(skb)->nr_frags; 1147 ring = &priv->tx_rings[index]; 1148 txq = netdev_get_tx_queue(dev, ring->queue); 1149 1150 spin_lock_irqsave(&ring->lock, flags); 1151 if (ring->free_bds <= nr_frags + 1) { 1152 netif_tx_stop_queue(txq); 1153 netdev_err(dev, "%s: tx ring %d full when queue %d awake\n", 1154 __func__, index, ring->queue); 1155 ret = NETDEV_TX_BUSY; 1156 goto out; 1157 } 1158 1159 if (skb_padto(skb, ETH_ZLEN)) { 1160 ret = NETDEV_TX_OK; 1161 goto out; 1162 } 1163 1164 /* set the SKB transmit checksum */ 1165 if (priv->desc_64b_en) { 1166 skb = bcmgenet_put_tx_csum(dev, skb); 1167 if (!skb) { 1168 ret = NETDEV_TX_OK; 1169 goto out; 1170 } 1171 } 1172 1173 dma_desc_flags = DMA_SOP; 1174 if (nr_frags == 0) 1175 dma_desc_flags |= DMA_EOP; 1176 1177 /* Transmit single SKB or head of fragment list */ 1178 ret = bcmgenet_xmit_single(dev, skb, dma_desc_flags, ring); 1179 if (ret) { 1180 ret = NETDEV_TX_OK; 1181 goto out; 1182 } 1183 1184 /* xmit fragment */ 1185 for (i = 0; i < nr_frags; i++) { 1186 ret = bcmgenet_xmit_frag(dev, 1187 &skb_shinfo(skb)->frags[i], 1188 (i == nr_frags - 1) ? DMA_EOP : 0, 1189 ring); 1190 if (ret) { 1191 ret = NETDEV_TX_OK; 1192 goto out; 1193 } 1194 } 1195 1196 skb_tx_timestamp(skb); 1197 1198 /* we kept a software copy of how much we should advance the TDMA 1199 * producer index, now write it down to the hardware 1200 */ 1201 bcmgenet_tdma_ring_writel(priv, ring->index, 1202 ring->prod_index, TDMA_PROD_INDEX); 1203 1204 if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) { 1205 netif_tx_stop_queue(txq); 1206 ring->int_enable(priv, ring); 1207 } 1208 1209 out: 1210 spin_unlock_irqrestore(&ring->lock, flags); 1211 1212 return ret; 1213 } 1214 1215 1216 static int bcmgenet_rx_refill(struct bcmgenet_priv *priv, struct enet_cb *cb) 1217 { 1218 struct device *kdev = &priv->pdev->dev; 1219 struct sk_buff *skb; 1220 dma_addr_t mapping; 1221 int ret; 1222 1223 skb = netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT); 1224 if (!skb) 1225 return -ENOMEM; 1226 1227 /* a caller did not release this control block */ 1228 WARN_ON(cb->skb != NULL); 1229 cb->skb = skb; 1230 mapping = dma_map_single(kdev, skb->data, 1231 priv->rx_buf_len, DMA_FROM_DEVICE); 1232 ret = dma_mapping_error(kdev, mapping); 1233 if (ret) { 1234 bcmgenet_free_cb(cb); 1235 netif_err(priv, rx_err, priv->dev, 1236 "%s DMA map failed\n", __func__); 1237 return ret; 1238 } 1239 1240 dma_unmap_addr_set(cb, dma_addr, mapping); 1241 /* assign packet, prepare descriptor, and advance pointer */ 1242 1243 dmadesc_set_addr(priv, priv->rx_bd_assign_ptr, mapping); 1244 1245 /* turn on the newly assigned BD for DMA to use */ 1246 priv->rx_bd_assign_index++; 1247 priv->rx_bd_assign_index &= (priv->num_rx_bds - 1); 1248 1249 priv->rx_bd_assign_ptr = priv->rx_bds + 1250 (priv->rx_bd_assign_index * DMA_DESC_SIZE); 1251 1252 return 0; 1253 } 1254 1255 /* bcmgenet_desc_rx - descriptor based rx process. 1256 * this could be called from bottom half, or from NAPI polling method. 1257 */ 1258 static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv, 1259 unsigned int budget) 1260 { 1261 struct net_device *dev = priv->dev; 1262 struct enet_cb *cb; 1263 struct sk_buff *skb; 1264 u32 dma_length_status; 1265 unsigned long dma_flag; 1266 int len, err; 1267 unsigned int rxpktprocessed = 0, rxpkttoprocess; 1268 unsigned int p_index; 1269 unsigned int chksum_ok = 0; 1270 1271 p_index = bcmgenet_rdma_ring_readl(priv, DESC_INDEX, RDMA_PROD_INDEX); 1272 p_index &= DMA_P_INDEX_MASK; 1273 1274 if (p_index < priv->rx_c_index) 1275 rxpkttoprocess = (DMA_C_INDEX_MASK + 1) - 1276 priv->rx_c_index + p_index; 1277 else 1278 rxpkttoprocess = p_index - priv->rx_c_index; 1279 1280 netif_dbg(priv, rx_status, dev, 1281 "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess); 1282 1283 while ((rxpktprocessed < rxpkttoprocess) && 1284 (rxpktprocessed < budget)) { 1285 cb = &priv->rx_cbs[priv->rx_read_ptr]; 1286 skb = cb->skb; 1287 1288 /* We do not have a backing SKB, so we do not have a 1289 * corresponding DMA mapping for this incoming packet since 1290 * bcmgenet_rx_refill always either has both skb and mapping or 1291 * none. 1292 */ 1293 if (unlikely(!skb)) { 1294 dev->stats.rx_dropped++; 1295 dev->stats.rx_errors++; 1296 goto refill; 1297 } 1298 1299 /* Unmap the packet contents such that we can use the 1300 * RSV from the 64 bytes descriptor when enabled and save 1301 * a 32-bits register read 1302 */ 1303 dma_unmap_single(&dev->dev, dma_unmap_addr(cb, dma_addr), 1304 priv->rx_buf_len, DMA_FROM_DEVICE); 1305 1306 if (!priv->desc_64b_en) { 1307 dma_length_status = 1308 dmadesc_get_length_status(priv, 1309 priv->rx_bds + 1310 (priv->rx_read_ptr * 1311 DMA_DESC_SIZE)); 1312 } else { 1313 struct status_64 *status; 1314 1315 status = (struct status_64 *)skb->data; 1316 dma_length_status = status->length_status; 1317 } 1318 1319 /* DMA flags and length are still valid no matter how 1320 * we got the Receive Status Vector (64B RSB or register) 1321 */ 1322 dma_flag = dma_length_status & 0xffff; 1323 len = dma_length_status >> DMA_BUFLENGTH_SHIFT; 1324 1325 netif_dbg(priv, rx_status, dev, 1326 "%s:p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n", 1327 __func__, p_index, priv->rx_c_index, 1328 priv->rx_read_ptr, dma_length_status); 1329 1330 if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) { 1331 netif_err(priv, rx_status, dev, 1332 "dropping fragmented packet!\n"); 1333 dev->stats.rx_dropped++; 1334 dev->stats.rx_errors++; 1335 dev_kfree_skb_any(cb->skb); 1336 cb->skb = NULL; 1337 goto refill; 1338 } 1339 /* report errors */ 1340 if (unlikely(dma_flag & (DMA_RX_CRC_ERROR | 1341 DMA_RX_OV | 1342 DMA_RX_NO | 1343 DMA_RX_LG | 1344 DMA_RX_RXER))) { 1345 netif_err(priv, rx_status, dev, "dma_flag=0x%x\n", 1346 (unsigned int)dma_flag); 1347 if (dma_flag & DMA_RX_CRC_ERROR) 1348 dev->stats.rx_crc_errors++; 1349 if (dma_flag & DMA_RX_OV) 1350 dev->stats.rx_over_errors++; 1351 if (dma_flag & DMA_RX_NO) 1352 dev->stats.rx_frame_errors++; 1353 if (dma_flag & DMA_RX_LG) 1354 dev->stats.rx_length_errors++; 1355 dev->stats.rx_dropped++; 1356 dev->stats.rx_errors++; 1357 1358 /* discard the packet and advance consumer index.*/ 1359 dev_kfree_skb_any(cb->skb); 1360 cb->skb = NULL; 1361 goto refill; 1362 } /* error packet */ 1363 1364 chksum_ok = (dma_flag & priv->dma_rx_chk_bit) && 1365 priv->desc_rxchk_en; 1366 1367 skb_put(skb, len); 1368 if (priv->desc_64b_en) { 1369 skb_pull(skb, 64); 1370 len -= 64; 1371 } 1372 1373 if (likely(chksum_ok)) 1374 skb->ip_summed = CHECKSUM_UNNECESSARY; 1375 1376 /* remove hardware 2bytes added for IP alignment */ 1377 skb_pull(skb, 2); 1378 len -= 2; 1379 1380 if (priv->crc_fwd_en) { 1381 skb_trim(skb, len - ETH_FCS_LEN); 1382 len -= ETH_FCS_LEN; 1383 } 1384 1385 /*Finish setting up the received SKB and send it to the kernel*/ 1386 skb->protocol = eth_type_trans(skb, priv->dev); 1387 dev->stats.rx_packets++; 1388 dev->stats.rx_bytes += len; 1389 if (dma_flag & DMA_RX_MULT) 1390 dev->stats.multicast++; 1391 1392 /* Notify kernel */ 1393 napi_gro_receive(&priv->napi, skb); 1394 cb->skb = NULL; 1395 netif_dbg(priv, rx_status, dev, "pushed up to kernel\n"); 1396 1397 /* refill RX path on the current control block */ 1398 refill: 1399 err = bcmgenet_rx_refill(priv, cb); 1400 if (err) 1401 netif_err(priv, rx_err, dev, "Rx refill failed\n"); 1402 1403 rxpktprocessed++; 1404 priv->rx_read_ptr++; 1405 priv->rx_read_ptr &= (priv->num_rx_bds - 1); 1406 } 1407 1408 return rxpktprocessed; 1409 } 1410 1411 /* Assign skb to RX DMA descriptor. */ 1412 static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv) 1413 { 1414 struct enet_cb *cb; 1415 int ret = 0; 1416 int i; 1417 1418 netif_dbg(priv, hw, priv->dev, "%s:\n", __func__); 1419 1420 /* loop here for each buffer needing assign */ 1421 for (i = 0; i < priv->num_rx_bds; i++) { 1422 cb = &priv->rx_cbs[priv->rx_bd_assign_index]; 1423 if (cb->skb) 1424 continue; 1425 1426 ret = bcmgenet_rx_refill(priv, cb); 1427 if (ret) 1428 break; 1429 } 1430 1431 return ret; 1432 } 1433 1434 static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv) 1435 { 1436 struct enet_cb *cb; 1437 int i; 1438 1439 for (i = 0; i < priv->num_rx_bds; i++) { 1440 cb = &priv->rx_cbs[i]; 1441 1442 if (dma_unmap_addr(cb, dma_addr)) { 1443 dma_unmap_single(&priv->dev->dev, 1444 dma_unmap_addr(cb, dma_addr), 1445 priv->rx_buf_len, DMA_FROM_DEVICE); 1446 dma_unmap_addr_set(cb, dma_addr, 0); 1447 } 1448 1449 if (cb->skb) 1450 bcmgenet_free_cb(cb); 1451 } 1452 } 1453 1454 static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable) 1455 { 1456 u32 reg; 1457 1458 reg = bcmgenet_umac_readl(priv, UMAC_CMD); 1459 if (enable) 1460 reg |= mask; 1461 else 1462 reg &= ~mask; 1463 bcmgenet_umac_writel(priv, reg, UMAC_CMD); 1464 1465 /* UniMAC stops on a packet boundary, wait for a full-size packet 1466 * to be processed 1467 */ 1468 if (enable == 0) 1469 usleep_range(1000, 2000); 1470 } 1471 1472 static int reset_umac(struct bcmgenet_priv *priv) 1473 { 1474 struct device *kdev = &priv->pdev->dev; 1475 unsigned int timeout = 0; 1476 u32 reg; 1477 1478 /* 7358a0/7552a0: bad default in RBUF_FLUSH_CTRL.umac_sw_rst */ 1479 bcmgenet_rbuf_ctrl_set(priv, 0); 1480 udelay(10); 1481 1482 /* disable MAC while updating its registers */ 1483 bcmgenet_umac_writel(priv, 0, UMAC_CMD); 1484 1485 /* issue soft reset, wait for it to complete */ 1486 bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD); 1487 while (timeout++ < 1000) { 1488 reg = bcmgenet_umac_readl(priv, UMAC_CMD); 1489 if (!(reg & CMD_SW_RESET)) 1490 return 0; 1491 1492 udelay(1); 1493 } 1494 1495 if (timeout == 1000) { 1496 dev_err(kdev, 1497 "timeout waiting for MAC to come out of reset\n"); 1498 return -ETIMEDOUT; 1499 } 1500 1501 return 0; 1502 } 1503 1504 static void bcmgenet_intr_disable(struct bcmgenet_priv *priv) 1505 { 1506 /* Mask all interrupts.*/ 1507 bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET); 1508 bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR); 1509 bcmgenet_intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR); 1510 bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET); 1511 bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR); 1512 bcmgenet_intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR); 1513 } 1514 1515 static int init_umac(struct bcmgenet_priv *priv) 1516 { 1517 struct device *kdev = &priv->pdev->dev; 1518 int ret; 1519 u32 reg, cpu_mask_clear; 1520 1521 dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n"); 1522 1523 ret = reset_umac(priv); 1524 if (ret) 1525 return ret; 1526 1527 bcmgenet_umac_writel(priv, 0, UMAC_CMD); 1528 /* clear tx/rx counter */ 1529 bcmgenet_umac_writel(priv, 1530 MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT, 1531 UMAC_MIB_CTRL); 1532 bcmgenet_umac_writel(priv, 0, UMAC_MIB_CTRL); 1533 1534 bcmgenet_umac_writel(priv, ENET_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); 1535 1536 /* init rx registers, enable ip header optimization */ 1537 reg = bcmgenet_rbuf_readl(priv, RBUF_CTRL); 1538 reg |= RBUF_ALIGN_2B; 1539 bcmgenet_rbuf_writel(priv, reg, RBUF_CTRL); 1540 1541 if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv)) 1542 bcmgenet_rbuf_writel(priv, 1, RBUF_TBUF_SIZE_CTRL); 1543 1544 bcmgenet_intr_disable(priv); 1545 1546 cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE; 1547 1548 dev_dbg(kdev, "%s:Enabling RXDMA_BDONE interrupt\n", __func__); 1549 1550 /* Monitor cable plug/unplugged event for internal PHY */ 1551 if (phy_is_internal(priv->phydev)) { 1552 cpu_mask_clear |= (UMAC_IRQ_LINK_DOWN | UMAC_IRQ_LINK_UP); 1553 } else if (priv->ext_phy) { 1554 cpu_mask_clear |= (UMAC_IRQ_LINK_DOWN | UMAC_IRQ_LINK_UP); 1555 } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { 1556 reg = bcmgenet_bp_mc_get(priv); 1557 reg |= BIT(priv->hw_params->bp_in_en_shift); 1558 1559 /* bp_mask: back pressure mask */ 1560 if (netif_is_multiqueue(priv->dev)) 1561 reg |= priv->hw_params->bp_in_mask; 1562 else 1563 reg &= ~priv->hw_params->bp_in_mask; 1564 bcmgenet_bp_mc_set(priv, reg); 1565 } 1566 1567 /* Enable MDIO interrupts on GENET v3+ */ 1568 if (priv->hw_params->flags & GENET_HAS_MDIO_INTR) 1569 cpu_mask_clear |= UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR; 1570 1571 bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, INTRL2_CPU_MASK_CLEAR); 1572 1573 /* Enable rx/tx engine.*/ 1574 dev_dbg(kdev, "done init umac\n"); 1575 1576 return 0; 1577 } 1578 1579 /* Initialize all house-keeping variables for a TX ring, along 1580 * with corresponding hardware registers 1581 */ 1582 static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv, 1583 unsigned int index, unsigned int size, 1584 unsigned int write_ptr, unsigned int end_ptr) 1585 { 1586 struct bcmgenet_tx_ring *ring = &priv->tx_rings[index]; 1587 u32 words_per_bd = WORDS_PER_BD(priv); 1588 u32 flow_period_val = 0; 1589 unsigned int first_bd; 1590 1591 spin_lock_init(&ring->lock); 1592 ring->index = index; 1593 if (index == DESC_INDEX) { 1594 ring->queue = 0; 1595 ring->int_enable = bcmgenet_tx_ring16_int_enable; 1596 ring->int_disable = bcmgenet_tx_ring16_int_disable; 1597 } else { 1598 ring->queue = index + 1; 1599 ring->int_enable = bcmgenet_tx_ring_int_enable; 1600 ring->int_disable = bcmgenet_tx_ring_int_disable; 1601 } 1602 ring->cbs = priv->tx_cbs + write_ptr; 1603 ring->size = size; 1604 ring->c_index = 0; 1605 ring->free_bds = size; 1606 ring->write_ptr = write_ptr; 1607 ring->cb_ptr = write_ptr; 1608 ring->end_ptr = end_ptr - 1; 1609 ring->prod_index = 0; 1610 1611 /* Set flow period for ring != 16 */ 1612 if (index != DESC_INDEX) 1613 flow_period_val = ENET_MAX_MTU_SIZE << 16; 1614 1615 bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_PROD_INDEX); 1616 bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_CONS_INDEX); 1617 bcmgenet_tdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH); 1618 /* Disable rate control for now */ 1619 bcmgenet_tdma_ring_writel(priv, index, flow_period_val, 1620 TDMA_FLOW_PERIOD); 1621 /* Unclassified traffic goes to ring 16 */ 1622 bcmgenet_tdma_ring_writel(priv, index, 1623 ((size << DMA_RING_SIZE_SHIFT) | 1624 RX_BUF_LENGTH), DMA_RING_BUF_SIZE); 1625 1626 first_bd = write_ptr; 1627 1628 /* Set start and end address, read and write pointers */ 1629 bcmgenet_tdma_ring_writel(priv, index, first_bd * words_per_bd, 1630 DMA_START_ADDR); 1631 bcmgenet_tdma_ring_writel(priv, index, first_bd * words_per_bd, 1632 TDMA_READ_PTR); 1633 bcmgenet_tdma_ring_writel(priv, index, first_bd, 1634 TDMA_WRITE_PTR); 1635 bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, 1636 DMA_END_ADDR); 1637 } 1638 1639 /* Initialize a RDMA ring */ 1640 static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv, 1641 unsigned int index, unsigned int size) 1642 { 1643 u32 words_per_bd = WORDS_PER_BD(priv); 1644 int ret; 1645 1646 priv->num_rx_bds = TOTAL_DESC; 1647 priv->rx_bds = priv->base + priv->hw_params->rdma_offset; 1648 priv->rx_bd_assign_ptr = priv->rx_bds; 1649 priv->rx_bd_assign_index = 0; 1650 priv->rx_c_index = 0; 1651 priv->rx_read_ptr = 0; 1652 priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct enet_cb), 1653 GFP_KERNEL); 1654 if (!priv->rx_cbs) 1655 return -ENOMEM; 1656 1657 ret = bcmgenet_alloc_rx_buffers(priv); 1658 if (ret) { 1659 kfree(priv->rx_cbs); 1660 return ret; 1661 } 1662 1663 bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_WRITE_PTR); 1664 bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX); 1665 bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX); 1666 bcmgenet_rdma_ring_writel(priv, index, 1667 ((size << DMA_RING_SIZE_SHIFT) | 1668 RX_BUF_LENGTH), DMA_RING_BUF_SIZE); 1669 bcmgenet_rdma_ring_writel(priv, index, 0, DMA_START_ADDR); 1670 bcmgenet_rdma_ring_writel(priv, index, 1671 words_per_bd * size - 1, DMA_END_ADDR); 1672 bcmgenet_rdma_ring_writel(priv, index, 1673 (DMA_FC_THRESH_LO << 1674 DMA_XOFF_THRESHOLD_SHIFT) | 1675 DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH); 1676 bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_READ_PTR); 1677 1678 return ret; 1679 } 1680 1681 /* init multi xmit queues, only available for GENET2+ 1682 * the queue is partitioned as follows: 1683 * 1684 * queue 0 - 3 is priority based, each one has 32 descriptors, 1685 * with queue 0 being the highest priority queue. 1686 * 1687 * queue 16 is the default tx queue with GENET_DEFAULT_BD_CNT 1688 * descriptors: 256 - (number of tx queues * bds per queues) = 128 1689 * descriptors. 1690 * 1691 * The transmit control block pool is then partitioned as following: 1692 * - tx_cbs[0...127] are for queue 16 1693 * - tx_ring_cbs[0] points to tx_cbs[128..159] 1694 * - tx_ring_cbs[1] points to tx_cbs[160..191] 1695 * - tx_ring_cbs[2] points to tx_cbs[192..223] 1696 * - tx_ring_cbs[3] points to tx_cbs[224..255] 1697 */ 1698 static void bcmgenet_init_multiq(struct net_device *dev) 1699 { 1700 struct bcmgenet_priv *priv = netdev_priv(dev); 1701 unsigned int i, dma_enable; 1702 u32 reg, dma_ctrl, ring_cfg = 0; 1703 u32 dma_priority[3] = {0, 0, 0}; 1704 1705 if (!netif_is_multiqueue(dev)) { 1706 netdev_warn(dev, "called with non multi queue aware HW\n"); 1707 return; 1708 } 1709 1710 dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL); 1711 dma_enable = dma_ctrl & DMA_EN; 1712 dma_ctrl &= ~DMA_EN; 1713 bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL); 1714 1715 /* Enable strict priority arbiter mode */ 1716 bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL); 1717 1718 for (i = 0; i < priv->hw_params->tx_queues; i++) { 1719 /* first 64 tx_cbs are reserved for default tx queue 1720 * (ring 16) 1721 */ 1722 bcmgenet_init_tx_ring(priv, i, priv->hw_params->bds_cnt, 1723 i * priv->hw_params->bds_cnt, 1724 (i + 1) * priv->hw_params->bds_cnt); 1725 1726 /* Configure ring as descriptor ring and setup priority */ 1727 ring_cfg |= 1 << i; 1728 dma_ctrl |= 1 << (i + DMA_RING_BUF_EN_SHIFT); 1729 1730 dma_priority[DMA_PRIO_REG_INDEX(i)] |= 1731 ((GENET_Q0_PRIORITY + i) << DMA_PRIO_REG_SHIFT(i)); 1732 } 1733 1734 /* Set ring 16 priority and program the hardware registers */ 1735 dma_priority[DMA_PRIO_REG_INDEX(DESC_INDEX)] |= 1736 ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) << 1737 DMA_PRIO_REG_SHIFT(DESC_INDEX)); 1738 bcmgenet_tdma_writel(priv, dma_priority[0], DMA_PRIORITY_0); 1739 bcmgenet_tdma_writel(priv, dma_priority[1], DMA_PRIORITY_1); 1740 bcmgenet_tdma_writel(priv, dma_priority[2], DMA_PRIORITY_2); 1741 1742 /* Enable rings */ 1743 reg = bcmgenet_tdma_readl(priv, DMA_RING_CFG); 1744 reg |= ring_cfg; 1745 bcmgenet_tdma_writel(priv, reg, DMA_RING_CFG); 1746 1747 /* Configure ring as descriptor ring and re-enable DMA if enabled */ 1748 reg = bcmgenet_tdma_readl(priv, DMA_CTRL); 1749 reg |= dma_ctrl; 1750 if (dma_enable) 1751 reg |= DMA_EN; 1752 bcmgenet_tdma_writel(priv, reg, DMA_CTRL); 1753 } 1754 1755 static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv) 1756 { 1757 int ret = 0; 1758 int timeout = 0; 1759 u32 reg; 1760 1761 /* Disable TDMA to stop add more frames in TX DMA */ 1762 reg = bcmgenet_tdma_readl(priv, DMA_CTRL); 1763 reg &= ~DMA_EN; 1764 bcmgenet_tdma_writel(priv, reg, DMA_CTRL); 1765 1766 /* Check TDMA status register to confirm TDMA is disabled */ 1767 while (timeout++ < DMA_TIMEOUT_VAL) { 1768 reg = bcmgenet_tdma_readl(priv, DMA_STATUS); 1769 if (reg & DMA_DISABLED) 1770 break; 1771 1772 udelay(1); 1773 } 1774 1775 if (timeout == DMA_TIMEOUT_VAL) { 1776 netdev_warn(priv->dev, "Timed out while disabling TX DMA\n"); 1777 ret = -ETIMEDOUT; 1778 } 1779 1780 /* Wait 10ms for packet drain in both tx and rx dma */ 1781 usleep_range(10000, 20000); 1782 1783 /* Disable RDMA */ 1784 reg = bcmgenet_rdma_readl(priv, DMA_CTRL); 1785 reg &= ~DMA_EN; 1786 bcmgenet_rdma_writel(priv, reg, DMA_CTRL); 1787 1788 timeout = 0; 1789 /* Check RDMA status register to confirm RDMA is disabled */ 1790 while (timeout++ < DMA_TIMEOUT_VAL) { 1791 reg = bcmgenet_rdma_readl(priv, DMA_STATUS); 1792 if (reg & DMA_DISABLED) 1793 break; 1794 1795 udelay(1); 1796 } 1797 1798 if (timeout == DMA_TIMEOUT_VAL) { 1799 netdev_warn(priv->dev, "Timed out while disabling RX DMA\n"); 1800 ret = -ETIMEDOUT; 1801 } 1802 1803 return ret; 1804 } 1805 1806 static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) 1807 { 1808 int i; 1809 1810 /* disable DMA */ 1811 bcmgenet_dma_teardown(priv); 1812 1813 for (i = 0; i < priv->num_tx_bds; i++) { 1814 if (priv->tx_cbs[i].skb != NULL) { 1815 dev_kfree_skb(priv->tx_cbs[i].skb); 1816 priv->tx_cbs[i].skb = NULL; 1817 } 1818 } 1819 1820 bcmgenet_free_rx_buffers(priv); 1821 kfree(priv->rx_cbs); 1822 kfree(priv->tx_cbs); 1823 } 1824 1825 /* init_edma: Initialize DMA control register */ 1826 static int bcmgenet_init_dma(struct bcmgenet_priv *priv) 1827 { 1828 int ret; 1829 1830 netif_dbg(priv, hw, priv->dev, "bcmgenet: init_edma\n"); 1831 1832 /* by default, enable ring 16 (descriptor based) */ 1833 ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, TOTAL_DESC); 1834 if (ret) { 1835 netdev_err(priv->dev, "failed to initialize RX ring\n"); 1836 return ret; 1837 } 1838 1839 /* init rDma */ 1840 bcmgenet_rdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE); 1841 1842 /* Init tDma */ 1843 bcmgenet_tdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE); 1844 1845 /* Initialize common TX ring structures */ 1846 priv->tx_bds = priv->base + priv->hw_params->tdma_offset; 1847 priv->num_tx_bds = TOTAL_DESC; 1848 priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb), 1849 GFP_KERNEL); 1850 if (!priv->tx_cbs) { 1851 bcmgenet_fini_dma(priv); 1852 return -ENOMEM; 1853 } 1854 1855 /* initialize multi xmit queue */ 1856 bcmgenet_init_multiq(priv->dev); 1857 1858 /* initialize special ring 16 */ 1859 bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_DEFAULT_BD_CNT, 1860 priv->hw_params->tx_queues * 1861 priv->hw_params->bds_cnt, 1862 TOTAL_DESC); 1863 1864 return 0; 1865 } 1866 1867 /* NAPI polling method*/ 1868 static int bcmgenet_poll(struct napi_struct *napi, int budget) 1869 { 1870 struct bcmgenet_priv *priv = container_of(napi, 1871 struct bcmgenet_priv, napi); 1872 unsigned int work_done; 1873 1874 /* tx reclaim */ 1875 bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]); 1876 1877 work_done = bcmgenet_desc_rx(priv, budget); 1878 1879 /* Advancing our consumer index*/ 1880 priv->rx_c_index += work_done; 1881 priv->rx_c_index &= DMA_C_INDEX_MASK; 1882 bcmgenet_rdma_ring_writel(priv, DESC_INDEX, 1883 priv->rx_c_index, RDMA_CONS_INDEX); 1884 if (work_done < budget) { 1885 napi_complete(napi); 1886 bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_BDONE, 1887 INTRL2_CPU_MASK_CLEAR); 1888 } 1889 1890 return work_done; 1891 } 1892 1893 /* Interrupt bottom half */ 1894 static void bcmgenet_irq_task(struct work_struct *work) 1895 { 1896 struct bcmgenet_priv *priv = container_of( 1897 work, struct bcmgenet_priv, bcmgenet_irq_work); 1898 1899 netif_dbg(priv, intr, priv->dev, "%s\n", __func__); 1900 1901 if (priv->irq0_stat & UMAC_IRQ_MPD_R) { 1902 priv->irq0_stat &= ~UMAC_IRQ_MPD_R; 1903 netif_dbg(priv, wol, priv->dev, 1904 "magic packet detected, waking up\n"); 1905 bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC); 1906 } 1907 1908 /* Link UP/DOWN event */ 1909 if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) && 1910 (priv->irq0_stat & (UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN))) { 1911 phy_mac_interrupt(priv->phydev, 1912 priv->irq0_stat & UMAC_IRQ_LINK_UP); 1913 priv->irq0_stat &= ~(UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN); 1914 } 1915 } 1916 1917 /* bcmgenet_isr1: interrupt handler for ring buffer. */ 1918 static irqreturn_t bcmgenet_isr1(int irq, void *dev_id) 1919 { 1920 struct bcmgenet_priv *priv = dev_id; 1921 unsigned int index; 1922 1923 /* Save irq status for bottom-half processing. */ 1924 priv->irq1_stat = 1925 bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) & 1926 ~priv->int1_mask; 1927 /* clear interrupts */ 1928 bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR); 1929 1930 netif_dbg(priv, intr, priv->dev, 1931 "%s: IRQ=0x%x\n", __func__, priv->irq1_stat); 1932 /* Check the MBDONE interrupts. 1933 * packet is done, reclaim descriptors 1934 */ 1935 if (priv->irq1_stat & 0x0000ffff) { 1936 index = 0; 1937 for (index = 0; index < 16; index++) { 1938 if (priv->irq1_stat & (1 << index)) 1939 bcmgenet_tx_reclaim(priv->dev, 1940 &priv->tx_rings[index]); 1941 } 1942 } 1943 return IRQ_HANDLED; 1944 } 1945 1946 /* bcmgenet_isr0: Handle various interrupts. */ 1947 static irqreturn_t bcmgenet_isr0(int irq, void *dev_id) 1948 { 1949 struct bcmgenet_priv *priv = dev_id; 1950 1951 /* Save irq status for bottom-half processing. */ 1952 priv->irq0_stat = 1953 bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) & 1954 ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); 1955 /* clear interrupts */ 1956 bcmgenet_intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR); 1957 1958 netif_dbg(priv, intr, priv->dev, 1959 "IRQ=0x%x\n", priv->irq0_stat); 1960 1961 if (priv->irq0_stat & (UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_RXDMA_PDONE)) { 1962 /* We use NAPI(software interrupt throttling, if 1963 * Rx Descriptor throttling is not used. 1964 * Disable interrupt, will be enabled in the poll method. 1965 */ 1966 if (likely(napi_schedule_prep(&priv->napi))) { 1967 bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_BDONE, 1968 INTRL2_CPU_MASK_SET); 1969 __napi_schedule(&priv->napi); 1970 } 1971 } 1972 if (priv->irq0_stat & 1973 (UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE)) { 1974 /* Tx reclaim */ 1975 bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]); 1976 } 1977 if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R | 1978 UMAC_IRQ_PHY_DET_F | 1979 UMAC_IRQ_LINK_UP | 1980 UMAC_IRQ_LINK_DOWN | 1981 UMAC_IRQ_HFB_SM | 1982 UMAC_IRQ_HFB_MM | 1983 UMAC_IRQ_MPD_R)) { 1984 /* all other interested interrupts handled in bottom half */ 1985 schedule_work(&priv->bcmgenet_irq_work); 1986 } 1987 1988 if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) && 1989 priv->irq0_stat & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) { 1990 priv->irq0_stat &= ~(UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR); 1991 wake_up(&priv->wq); 1992 } 1993 1994 return IRQ_HANDLED; 1995 } 1996 1997 static irqreturn_t bcmgenet_wol_isr(int irq, void *dev_id) 1998 { 1999 struct bcmgenet_priv *priv = dev_id; 2000 2001 pm_wakeup_event(&priv->pdev->dev, 0); 2002 2003 return IRQ_HANDLED; 2004 } 2005 2006 static void bcmgenet_umac_reset(struct bcmgenet_priv *priv) 2007 { 2008 u32 reg; 2009 2010 reg = bcmgenet_rbuf_ctrl_get(priv); 2011 reg |= BIT(1); 2012 bcmgenet_rbuf_ctrl_set(priv, reg); 2013 udelay(10); 2014 2015 reg &= ~BIT(1); 2016 bcmgenet_rbuf_ctrl_set(priv, reg); 2017 udelay(10); 2018 } 2019 2020 static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv, 2021 unsigned char *addr) 2022 { 2023 bcmgenet_umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) | 2024 (addr[2] << 8) | addr[3], UMAC_MAC0); 2025 bcmgenet_umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1); 2026 } 2027 2028 /* Returns a reusable dma control register value */ 2029 static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv) 2030 { 2031 u32 reg; 2032 u32 dma_ctrl; 2033 2034 /* disable DMA */ 2035 dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN; 2036 reg = bcmgenet_tdma_readl(priv, DMA_CTRL); 2037 reg &= ~dma_ctrl; 2038 bcmgenet_tdma_writel(priv, reg, DMA_CTRL); 2039 2040 reg = bcmgenet_rdma_readl(priv, DMA_CTRL); 2041 reg &= ~dma_ctrl; 2042 bcmgenet_rdma_writel(priv, reg, DMA_CTRL); 2043 2044 bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH); 2045 udelay(10); 2046 bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH); 2047 2048 return dma_ctrl; 2049 } 2050 2051 static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl) 2052 { 2053 u32 reg; 2054 2055 reg = bcmgenet_rdma_readl(priv, DMA_CTRL); 2056 reg |= dma_ctrl; 2057 bcmgenet_rdma_writel(priv, reg, DMA_CTRL); 2058 2059 reg = bcmgenet_tdma_readl(priv, DMA_CTRL); 2060 reg |= dma_ctrl; 2061 bcmgenet_tdma_writel(priv, reg, DMA_CTRL); 2062 } 2063 2064 static void bcmgenet_netif_start(struct net_device *dev) 2065 { 2066 struct bcmgenet_priv *priv = netdev_priv(dev); 2067 2068 /* Start the network engine */ 2069 napi_enable(&priv->napi); 2070 2071 umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true); 2072 2073 if (phy_is_internal(priv->phydev)) 2074 bcmgenet_power_up(priv, GENET_POWER_PASSIVE); 2075 2076 netif_tx_start_all_queues(dev); 2077 2078 phy_start(priv->phydev); 2079 } 2080 2081 static int bcmgenet_open(struct net_device *dev) 2082 { 2083 struct bcmgenet_priv *priv = netdev_priv(dev); 2084 unsigned long dma_ctrl; 2085 u32 reg; 2086 int ret; 2087 2088 netif_dbg(priv, ifup, dev, "bcmgenet_open\n"); 2089 2090 /* Turn on the clock */ 2091 if (!IS_ERR(priv->clk)) 2092 clk_prepare_enable(priv->clk); 2093 2094 /* take MAC out of reset */ 2095 bcmgenet_umac_reset(priv); 2096 2097 ret = init_umac(priv); 2098 if (ret) 2099 goto err_clk_disable; 2100 2101 /* disable ethernet MAC while updating its registers */ 2102 umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false); 2103 2104 /* Make sure we reflect the value of CRC_CMD_FWD */ 2105 reg = bcmgenet_umac_readl(priv, UMAC_CMD); 2106 priv->crc_fwd_en = !!(reg & CMD_CRC_FWD); 2107 2108 bcmgenet_set_hw_addr(priv, dev->dev_addr); 2109 2110 if (phy_is_internal(priv->phydev)) { 2111 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); 2112 reg |= EXT_ENERGY_DET_MASK; 2113 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); 2114 } 2115 2116 /* Disable RX/TX DMA and flush TX queues */ 2117 dma_ctrl = bcmgenet_dma_disable(priv); 2118 2119 /* Reinitialize TDMA and RDMA and SW housekeeping */ 2120 ret = bcmgenet_init_dma(priv); 2121 if (ret) { 2122 netdev_err(dev, "failed to initialize DMA\n"); 2123 goto err_fini_dma; 2124 } 2125 2126 /* Always enable ring 16 - descriptor ring */ 2127 bcmgenet_enable_dma(priv, dma_ctrl); 2128 2129 ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED, 2130 dev->name, priv); 2131 if (ret < 0) { 2132 netdev_err(dev, "can't request IRQ %d\n", priv->irq0); 2133 goto err_fini_dma; 2134 } 2135 2136 ret = request_irq(priv->irq1, bcmgenet_isr1, IRQF_SHARED, 2137 dev->name, priv); 2138 if (ret < 0) { 2139 netdev_err(dev, "can't request IRQ %d\n", priv->irq1); 2140 goto err_irq0; 2141 } 2142 2143 /* Re-configure the port multiplexer towards the PHY device */ 2144 bcmgenet_mii_config(priv->dev, false); 2145 2146 phy_connect_direct(dev, priv->phydev, bcmgenet_mii_setup, 2147 priv->phy_interface); 2148 2149 bcmgenet_netif_start(dev); 2150 2151 return 0; 2152 2153 err_irq0: 2154 free_irq(priv->irq0, dev); 2155 err_fini_dma: 2156 bcmgenet_fini_dma(priv); 2157 err_clk_disable: 2158 if (!IS_ERR(priv->clk)) 2159 clk_disable_unprepare(priv->clk); 2160 return ret; 2161 } 2162 2163 static void bcmgenet_netif_stop(struct net_device *dev) 2164 { 2165 struct bcmgenet_priv *priv = netdev_priv(dev); 2166 2167 netif_tx_stop_all_queues(dev); 2168 napi_disable(&priv->napi); 2169 phy_stop(priv->phydev); 2170 2171 bcmgenet_intr_disable(priv); 2172 2173 /* Wait for pending work items to complete. Since interrupts are 2174 * disabled no new work will be scheduled. 2175 */ 2176 cancel_work_sync(&priv->bcmgenet_irq_work); 2177 2178 priv->old_link = -1; 2179 priv->old_speed = -1; 2180 priv->old_duplex = -1; 2181 priv->old_pause = -1; 2182 } 2183 2184 static int bcmgenet_close(struct net_device *dev) 2185 { 2186 struct bcmgenet_priv *priv = netdev_priv(dev); 2187 int ret; 2188 2189 netif_dbg(priv, ifdown, dev, "bcmgenet_close\n"); 2190 2191 bcmgenet_netif_stop(dev); 2192 2193 /* Really kill the PHY state machine and disconnect from it */ 2194 phy_disconnect(priv->phydev); 2195 2196 /* Disable MAC receive */ 2197 umac_enable_set(priv, CMD_RX_EN, false); 2198 2199 ret = bcmgenet_dma_teardown(priv); 2200 if (ret) 2201 return ret; 2202 2203 /* Disable MAC transmit. TX DMA disabled have to done before this */ 2204 umac_enable_set(priv, CMD_TX_EN, false); 2205 2206 /* tx reclaim */ 2207 bcmgenet_tx_reclaim_all(dev); 2208 bcmgenet_fini_dma(priv); 2209 2210 free_irq(priv->irq0, priv); 2211 free_irq(priv->irq1, priv); 2212 2213 if (phy_is_internal(priv->phydev)) 2214 bcmgenet_power_down(priv, GENET_POWER_PASSIVE); 2215 2216 if (!IS_ERR(priv->clk)) 2217 clk_disable_unprepare(priv->clk); 2218 2219 return 0; 2220 } 2221 2222 static void bcmgenet_timeout(struct net_device *dev) 2223 { 2224 struct bcmgenet_priv *priv = netdev_priv(dev); 2225 2226 netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n"); 2227 2228 dev->trans_start = jiffies; 2229 2230 dev->stats.tx_errors++; 2231 2232 netif_tx_wake_all_queues(dev); 2233 } 2234 2235 #define MAX_MC_COUNT 16 2236 2237 static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv, 2238 unsigned char *addr, 2239 int *i, 2240 int *mc) 2241 { 2242 u32 reg; 2243 2244 bcmgenet_umac_writel(priv, addr[0] << 8 | addr[1], 2245 UMAC_MDF_ADDR + (*i * 4)); 2246 bcmgenet_umac_writel(priv, addr[2] << 24 | addr[3] << 16 | 2247 addr[4] << 8 | addr[5], 2248 UMAC_MDF_ADDR + ((*i + 1) * 4)); 2249 reg = bcmgenet_umac_readl(priv, UMAC_MDF_CTRL); 2250 reg |= (1 << (MAX_MC_COUNT - *mc)); 2251 bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL); 2252 *i += 2; 2253 (*mc)++; 2254 } 2255 2256 static void bcmgenet_set_rx_mode(struct net_device *dev) 2257 { 2258 struct bcmgenet_priv *priv = netdev_priv(dev); 2259 struct netdev_hw_addr *ha; 2260 int i, mc; 2261 u32 reg; 2262 2263 netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags); 2264 2265 /* Promiscuous mode */ 2266 reg = bcmgenet_umac_readl(priv, UMAC_CMD); 2267 if (dev->flags & IFF_PROMISC) { 2268 reg |= CMD_PROMISC; 2269 bcmgenet_umac_writel(priv, reg, UMAC_CMD); 2270 bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL); 2271 return; 2272 } else { 2273 reg &= ~CMD_PROMISC; 2274 bcmgenet_umac_writel(priv, reg, UMAC_CMD); 2275 } 2276 2277 /* UniMac doesn't support ALLMULTI */ 2278 if (dev->flags & IFF_ALLMULTI) { 2279 netdev_warn(dev, "ALLMULTI is not supported\n"); 2280 return; 2281 } 2282 2283 /* update MDF filter */ 2284 i = 0; 2285 mc = 0; 2286 /* Broadcast */ 2287 bcmgenet_set_mdf_addr(priv, dev->broadcast, &i, &mc); 2288 /* my own address.*/ 2289 bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i, &mc); 2290 /* Unicast list*/ 2291 if (netdev_uc_count(dev) > (MAX_MC_COUNT - mc)) 2292 return; 2293 2294 if (!netdev_uc_empty(dev)) 2295 netdev_for_each_uc_addr(ha, dev) 2296 bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc); 2297 /* Multicast */ 2298 if (netdev_mc_empty(dev) || netdev_mc_count(dev) >= (MAX_MC_COUNT - mc)) 2299 return; 2300 2301 netdev_for_each_mc_addr(ha, dev) 2302 bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc); 2303 } 2304 2305 /* Set the hardware MAC address. */ 2306 static int bcmgenet_set_mac_addr(struct net_device *dev, void *p) 2307 { 2308 struct sockaddr *addr = p; 2309 2310 /* Setting the MAC address at the hardware level is not possible 2311 * without disabling the UniMAC RX/TX enable bits. 2312 */ 2313 if (netif_running(dev)) 2314 return -EBUSY; 2315 2316 ether_addr_copy(dev->dev_addr, addr->sa_data); 2317 2318 return 0; 2319 } 2320 2321 static const struct net_device_ops bcmgenet_netdev_ops = { 2322 .ndo_open = bcmgenet_open, 2323 .ndo_stop = bcmgenet_close, 2324 .ndo_start_xmit = bcmgenet_xmit, 2325 .ndo_tx_timeout = bcmgenet_timeout, 2326 .ndo_set_rx_mode = bcmgenet_set_rx_mode, 2327 .ndo_set_mac_address = bcmgenet_set_mac_addr, 2328 .ndo_do_ioctl = bcmgenet_ioctl, 2329 .ndo_set_features = bcmgenet_set_features, 2330 }; 2331 2332 /* Array of GENET hardware parameters/characteristics */ 2333 static struct bcmgenet_hw_params bcmgenet_hw_params[] = { 2334 [GENET_V1] = { 2335 .tx_queues = 0, 2336 .rx_queues = 0, 2337 .bds_cnt = 0, 2338 .bp_in_en_shift = 16, 2339 .bp_in_mask = 0xffff, 2340 .hfb_filter_cnt = 16, 2341 .qtag_mask = 0x1F, 2342 .hfb_offset = 0x1000, 2343 .rdma_offset = 0x2000, 2344 .tdma_offset = 0x3000, 2345 .words_per_bd = 2, 2346 }, 2347 [GENET_V2] = { 2348 .tx_queues = 4, 2349 .rx_queues = 4, 2350 .bds_cnt = 32, 2351 .bp_in_en_shift = 16, 2352 .bp_in_mask = 0xffff, 2353 .hfb_filter_cnt = 16, 2354 .qtag_mask = 0x1F, 2355 .tbuf_offset = 0x0600, 2356 .hfb_offset = 0x1000, 2357 .hfb_reg_offset = 0x2000, 2358 .rdma_offset = 0x3000, 2359 .tdma_offset = 0x4000, 2360 .words_per_bd = 2, 2361 .flags = GENET_HAS_EXT, 2362 }, 2363 [GENET_V3] = { 2364 .tx_queues = 4, 2365 .rx_queues = 4, 2366 .bds_cnt = 32, 2367 .bp_in_en_shift = 17, 2368 .bp_in_mask = 0x1ffff, 2369 .hfb_filter_cnt = 48, 2370 .qtag_mask = 0x3F, 2371 .tbuf_offset = 0x0600, 2372 .hfb_offset = 0x8000, 2373 .hfb_reg_offset = 0xfc00, 2374 .rdma_offset = 0x10000, 2375 .tdma_offset = 0x11000, 2376 .words_per_bd = 2, 2377 .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR, 2378 }, 2379 [GENET_V4] = { 2380 .tx_queues = 4, 2381 .rx_queues = 4, 2382 .bds_cnt = 32, 2383 .bp_in_en_shift = 17, 2384 .bp_in_mask = 0x1ffff, 2385 .hfb_filter_cnt = 48, 2386 .qtag_mask = 0x3F, 2387 .tbuf_offset = 0x0600, 2388 .hfb_offset = 0x8000, 2389 .hfb_reg_offset = 0xfc00, 2390 .rdma_offset = 0x2000, 2391 .tdma_offset = 0x4000, 2392 .words_per_bd = 3, 2393 .flags = GENET_HAS_40BITS | GENET_HAS_EXT | GENET_HAS_MDIO_INTR, 2394 }, 2395 }; 2396 2397 /* Infer hardware parameters from the detected GENET version */ 2398 static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv) 2399 { 2400 struct bcmgenet_hw_params *params; 2401 u32 reg; 2402 u8 major; 2403 2404 if (GENET_IS_V4(priv)) { 2405 bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus; 2406 genet_dma_ring_regs = genet_dma_ring_regs_v4; 2407 priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS; 2408 priv->version = GENET_V4; 2409 } else if (GENET_IS_V3(priv)) { 2410 bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus; 2411 genet_dma_ring_regs = genet_dma_ring_regs_v123; 2412 priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS; 2413 priv->version = GENET_V3; 2414 } else if (GENET_IS_V2(priv)) { 2415 bcmgenet_dma_regs = bcmgenet_dma_regs_v2; 2416 genet_dma_ring_regs = genet_dma_ring_regs_v123; 2417 priv->dma_rx_chk_bit = DMA_RX_CHK_V12; 2418 priv->version = GENET_V2; 2419 } else if (GENET_IS_V1(priv)) { 2420 bcmgenet_dma_regs = bcmgenet_dma_regs_v1; 2421 genet_dma_ring_regs = genet_dma_ring_regs_v123; 2422 priv->dma_rx_chk_bit = DMA_RX_CHK_V12; 2423 priv->version = GENET_V1; 2424 } 2425 2426 /* enum genet_version starts at 1 */ 2427 priv->hw_params = &bcmgenet_hw_params[priv->version]; 2428 params = priv->hw_params; 2429 2430 /* Read GENET HW version */ 2431 reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL); 2432 major = (reg >> 24 & 0x0f); 2433 if (major == 5) 2434 major = 4; 2435 else if (major == 0) 2436 major = 1; 2437 if (major != priv->version) { 2438 dev_err(&priv->pdev->dev, 2439 "GENET version mismatch, got: %d, configured for: %d\n", 2440 major, priv->version); 2441 } 2442 2443 /* Print the GENET core version */ 2444 dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT, 2445 major, (reg >> 16) & 0x0f, reg & 0xffff); 2446 2447 /* Store the integrated PHY revision for the MDIO probing function 2448 * to pass this information to the PHY driver. The PHY driver expects 2449 * to find the PHY major revision in bits 15:8 while the GENET register 2450 * stores that information in bits 7:0, account for that. 2451 */ 2452 priv->gphy_rev = (reg & 0xffff) << 8; 2453 2454 #ifdef CONFIG_PHYS_ADDR_T_64BIT 2455 if (!(params->flags & GENET_HAS_40BITS)) 2456 pr_warn("GENET does not support 40-bits PA\n"); 2457 #endif 2458 2459 pr_debug("Configuration for version: %d\n" 2460 "TXq: %1d, RXq: %1d, BDs: %1d\n" 2461 "BP << en: %2d, BP msk: 0x%05x\n" 2462 "HFB count: %2d, QTAQ msk: 0x%05x\n" 2463 "TBUF: 0x%04x, HFB: 0x%04x, HFBreg: 0x%04x\n" 2464 "RDMA: 0x%05x, TDMA: 0x%05x\n" 2465 "Words/BD: %d\n", 2466 priv->version, 2467 params->tx_queues, params->rx_queues, params->bds_cnt, 2468 params->bp_in_en_shift, params->bp_in_mask, 2469 params->hfb_filter_cnt, params->qtag_mask, 2470 params->tbuf_offset, params->hfb_offset, 2471 params->hfb_reg_offset, 2472 params->rdma_offset, params->tdma_offset, 2473 params->words_per_bd); 2474 } 2475 2476 static const struct of_device_id bcmgenet_match[] = { 2477 { .compatible = "brcm,genet-v1", .data = (void *)GENET_V1 }, 2478 { .compatible = "brcm,genet-v2", .data = (void *)GENET_V2 }, 2479 { .compatible = "brcm,genet-v3", .data = (void *)GENET_V3 }, 2480 { .compatible = "brcm,genet-v4", .data = (void *)GENET_V4 }, 2481 { }, 2482 }; 2483 2484 static int bcmgenet_probe(struct platform_device *pdev) 2485 { 2486 struct device_node *dn = pdev->dev.of_node; 2487 const struct of_device_id *of_id; 2488 struct bcmgenet_priv *priv; 2489 struct net_device *dev; 2490 const void *macaddr; 2491 struct resource *r; 2492 int err = -EIO; 2493 2494 /* Up to GENET_MAX_MQ_CNT + 1 TX queues and a single RX queue */ 2495 dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1, 1); 2496 if (!dev) { 2497 dev_err(&pdev->dev, "can't allocate net device\n"); 2498 return -ENOMEM; 2499 } 2500 2501 of_id = of_match_node(bcmgenet_match, dn); 2502 if (!of_id) 2503 return -EINVAL; 2504 2505 priv = netdev_priv(dev); 2506 priv->irq0 = platform_get_irq(pdev, 0); 2507 priv->irq1 = platform_get_irq(pdev, 1); 2508 priv->wol_irq = platform_get_irq(pdev, 2); 2509 if (!priv->irq0 || !priv->irq1) { 2510 dev_err(&pdev->dev, "can't find IRQs\n"); 2511 err = -EINVAL; 2512 goto err; 2513 } 2514 2515 macaddr = of_get_mac_address(dn); 2516 if (!macaddr) { 2517 dev_err(&pdev->dev, "can't find MAC address\n"); 2518 err = -EINVAL; 2519 goto err; 2520 } 2521 2522 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2523 priv->base = devm_ioremap_resource(&pdev->dev, r); 2524 if (IS_ERR(priv->base)) { 2525 err = PTR_ERR(priv->base); 2526 goto err; 2527 } 2528 2529 SET_NETDEV_DEV(dev, &pdev->dev); 2530 dev_set_drvdata(&pdev->dev, dev); 2531 ether_addr_copy(dev->dev_addr, macaddr); 2532 dev->watchdog_timeo = 2 * HZ; 2533 dev->ethtool_ops = &bcmgenet_ethtool_ops; 2534 dev->netdev_ops = &bcmgenet_netdev_ops; 2535 netif_napi_add(dev, &priv->napi, bcmgenet_poll, 64); 2536 2537 priv->msg_enable = netif_msg_init(-1, GENET_MSG_DEFAULT); 2538 2539 /* Set hardware features */ 2540 dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | 2541 NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM; 2542 2543 /* Request the WOL interrupt and advertise suspend if available */ 2544 priv->wol_irq_disabled = true; 2545 err = devm_request_irq(&pdev->dev, priv->wol_irq, bcmgenet_wol_isr, 0, 2546 dev->name, priv); 2547 if (!err) 2548 device_set_wakeup_capable(&pdev->dev, 1); 2549 2550 /* Set the needed headroom to account for any possible 2551 * features enabling/disabling at runtime 2552 */ 2553 dev->needed_headroom += 64; 2554 2555 netdev_boot_setup_check(dev); 2556 2557 priv->dev = dev; 2558 priv->pdev = pdev; 2559 priv->version = (enum bcmgenet_version)of_id->data; 2560 2561 priv->clk = devm_clk_get(&priv->pdev->dev, "enet"); 2562 if (IS_ERR(priv->clk)) 2563 dev_warn(&priv->pdev->dev, "failed to get enet clock\n"); 2564 2565 if (!IS_ERR(priv->clk)) 2566 clk_prepare_enable(priv->clk); 2567 2568 bcmgenet_set_hw_params(priv); 2569 2570 /* Mii wait queue */ 2571 init_waitqueue_head(&priv->wq); 2572 /* Always use RX_BUF_LENGTH (2KB) buffer for all chips */ 2573 priv->rx_buf_len = RX_BUF_LENGTH; 2574 INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task); 2575 2576 priv->clk_wol = devm_clk_get(&priv->pdev->dev, "enet-wol"); 2577 if (IS_ERR(priv->clk_wol)) 2578 dev_warn(&priv->pdev->dev, "failed to get enet-wol clock\n"); 2579 2580 err = reset_umac(priv); 2581 if (err) 2582 goto err_clk_disable; 2583 2584 err = bcmgenet_mii_init(dev); 2585 if (err) 2586 goto err_clk_disable; 2587 2588 /* setup number of real queues + 1 (GENET_V1 has 0 hardware queues 2589 * just the ring 16 descriptor based TX 2590 */ 2591 netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1); 2592 netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1); 2593 2594 /* libphy will determine the link state */ 2595 netif_carrier_off(dev); 2596 2597 /* Turn off the main clock, WOL clock is handled separately */ 2598 if (!IS_ERR(priv->clk)) 2599 clk_disable_unprepare(priv->clk); 2600 2601 err = register_netdev(dev); 2602 if (err) 2603 goto err; 2604 2605 return err; 2606 2607 err_clk_disable: 2608 if (!IS_ERR(priv->clk)) 2609 clk_disable_unprepare(priv->clk); 2610 err: 2611 free_netdev(dev); 2612 return err; 2613 } 2614 2615 static int bcmgenet_remove(struct platform_device *pdev) 2616 { 2617 struct bcmgenet_priv *priv = dev_to_priv(&pdev->dev); 2618 2619 dev_set_drvdata(&pdev->dev, NULL); 2620 unregister_netdev(priv->dev); 2621 bcmgenet_mii_exit(priv->dev); 2622 free_netdev(priv->dev); 2623 2624 return 0; 2625 } 2626 2627 #ifdef CONFIG_PM_SLEEP 2628 static int bcmgenet_suspend(struct device *d) 2629 { 2630 struct net_device *dev = dev_get_drvdata(d); 2631 struct bcmgenet_priv *priv = netdev_priv(dev); 2632 int ret; 2633 2634 if (!netif_running(dev)) 2635 return 0; 2636 2637 bcmgenet_netif_stop(dev); 2638 2639 phy_suspend(priv->phydev); 2640 2641 netif_device_detach(dev); 2642 2643 /* Disable MAC receive */ 2644 umac_enable_set(priv, CMD_RX_EN, false); 2645 2646 ret = bcmgenet_dma_teardown(priv); 2647 if (ret) 2648 return ret; 2649 2650 /* Disable MAC transmit. TX DMA disabled have to done before this */ 2651 umac_enable_set(priv, CMD_TX_EN, false); 2652 2653 /* tx reclaim */ 2654 bcmgenet_tx_reclaim_all(dev); 2655 bcmgenet_fini_dma(priv); 2656 2657 /* Prepare the device for Wake-on-LAN and switch to the slow clock */ 2658 if (device_may_wakeup(d) && priv->wolopts) { 2659 bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC); 2660 clk_prepare_enable(priv->clk_wol); 2661 } 2662 2663 /* Turn off the clocks */ 2664 clk_disable_unprepare(priv->clk); 2665 2666 return 0; 2667 } 2668 2669 static int bcmgenet_resume(struct device *d) 2670 { 2671 struct net_device *dev = dev_get_drvdata(d); 2672 struct bcmgenet_priv *priv = netdev_priv(dev); 2673 unsigned long dma_ctrl; 2674 int ret; 2675 u32 reg; 2676 2677 if (!netif_running(dev)) 2678 return 0; 2679 2680 /* Turn on the clock */ 2681 ret = clk_prepare_enable(priv->clk); 2682 if (ret) 2683 return ret; 2684 2685 bcmgenet_umac_reset(priv); 2686 2687 ret = init_umac(priv); 2688 if (ret) 2689 goto out_clk_disable; 2690 2691 /* From WOL-enabled suspend, switch to regular clock */ 2692 if (priv->wolopts) 2693 clk_disable_unprepare(priv->clk_wol); 2694 2695 phy_init_hw(priv->phydev); 2696 /* Speed settings must be restored */ 2697 bcmgenet_mii_config(priv->dev, false); 2698 2699 /* disable ethernet MAC while updating its registers */ 2700 umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false); 2701 2702 bcmgenet_set_hw_addr(priv, dev->dev_addr); 2703 2704 if (phy_is_internal(priv->phydev)) { 2705 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); 2706 reg |= EXT_ENERGY_DET_MASK; 2707 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); 2708 } 2709 2710 if (priv->wolopts) 2711 bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC); 2712 2713 /* Disable RX/TX DMA and flush TX queues */ 2714 dma_ctrl = bcmgenet_dma_disable(priv); 2715 2716 /* Reinitialize TDMA and RDMA and SW housekeeping */ 2717 ret = bcmgenet_init_dma(priv); 2718 if (ret) { 2719 netdev_err(dev, "failed to initialize DMA\n"); 2720 goto out_clk_disable; 2721 } 2722 2723 /* Always enable ring 16 - descriptor ring */ 2724 bcmgenet_enable_dma(priv, dma_ctrl); 2725 2726 netif_device_attach(dev); 2727 2728 phy_resume(priv->phydev); 2729 2730 bcmgenet_netif_start(dev); 2731 2732 return 0; 2733 2734 out_clk_disable: 2735 clk_disable_unprepare(priv->clk); 2736 return ret; 2737 } 2738 #endif /* CONFIG_PM_SLEEP */ 2739 2740 static SIMPLE_DEV_PM_OPS(bcmgenet_pm_ops, bcmgenet_suspend, bcmgenet_resume); 2741 2742 static struct platform_driver bcmgenet_driver = { 2743 .probe = bcmgenet_probe, 2744 .remove = bcmgenet_remove, 2745 .driver = { 2746 .name = "bcmgenet", 2747 .owner = THIS_MODULE, 2748 .of_match_table = bcmgenet_match, 2749 .pm = &bcmgenet_pm_ops, 2750 }, 2751 }; 2752 module_platform_driver(bcmgenet_driver); 2753 2754 MODULE_AUTHOR("Broadcom Corporation"); 2755 MODULE_DESCRIPTION("Broadcom GENET Ethernet controller driver"); 2756 MODULE_ALIAS("platform:bcmgenet"); 2757 MODULE_LICENSE("GPL"); 2758