1490cb412SJustin Chen // SPDX-License-Identifier: GPL-2.0 2490cb412SJustin Chen #define pr_fmt(fmt) "bcmasp_intf: " fmt 3490cb412SJustin Chen 4490cb412SJustin Chen #include <asm/byteorder.h> 5490cb412SJustin Chen #include <linux/brcmphy.h> 6490cb412SJustin Chen #include <linux/clk.h> 7490cb412SJustin Chen #include <linux/delay.h> 8490cb412SJustin Chen #include <linux/etherdevice.h> 9490cb412SJustin Chen #include <linux/netdevice.h> 10490cb412SJustin Chen #include <linux/of_net.h> 11490cb412SJustin Chen #include <linux/of_mdio.h> 12490cb412SJustin Chen #include <linux/phy.h> 13490cb412SJustin Chen #include <linux/phy_fixed.h> 14490cb412SJustin Chen #include <linux/ptp_classify.h> 15490cb412SJustin Chen #include <linux/platform_device.h> 16490cb412SJustin Chen #include <net/ip.h> 17490cb412SJustin Chen #include <net/ipv6.h> 18490cb412SJustin Chen 19490cb412SJustin Chen #include "bcmasp.h" 20490cb412SJustin Chen #include "bcmasp_intf_defs.h" 21490cb412SJustin Chen 22490cb412SJustin Chen static int incr_ring(int index, int ring_count) 23490cb412SJustin Chen { 24490cb412SJustin Chen index++; 25490cb412SJustin Chen if (index == ring_count) 26490cb412SJustin Chen return 0; 27490cb412SJustin Chen 28490cb412SJustin Chen return index; 29490cb412SJustin Chen } 30490cb412SJustin Chen 31490cb412SJustin Chen /* Points to last byte of descriptor */ 32490cb412SJustin Chen static dma_addr_t incr_last_byte(dma_addr_t addr, dma_addr_t beg, 33490cb412SJustin Chen int ring_count) 34490cb412SJustin Chen { 35490cb412SJustin Chen dma_addr_t end = beg + (ring_count * DESC_SIZE); 36490cb412SJustin Chen 37490cb412SJustin Chen addr += DESC_SIZE; 38490cb412SJustin Chen if (addr > end) 39490cb412SJustin Chen return beg + DESC_SIZE - 1; 40490cb412SJustin Chen 41490cb412SJustin Chen return addr; 42490cb412SJustin Chen } 43490cb412SJustin Chen 44490cb412SJustin Chen /* Points to first byte of descriptor */ 45490cb412SJustin Chen static dma_addr_t incr_first_byte(dma_addr_t addr, dma_addr_t beg, 46490cb412SJustin Chen int ring_count) 47490cb412SJustin Chen { 48490cb412SJustin Chen dma_addr_t end = beg + (ring_count * DESC_SIZE); 49490cb412SJustin Chen 50490cb412SJustin Chen addr += DESC_SIZE; 51490cb412SJustin Chen if (addr >= end) 52490cb412SJustin Chen return beg; 53490cb412SJustin Chen 54490cb412SJustin Chen return addr; 55490cb412SJustin Chen } 56490cb412SJustin Chen 57490cb412SJustin Chen static void bcmasp_enable_tx(struct bcmasp_intf *intf, int en) 58490cb412SJustin Chen { 59490cb412SJustin Chen if (en) { 60490cb412SJustin Chen tx_spb_ctrl_wl(intf, TX_SPB_CTRL_ENABLE_EN, TX_SPB_CTRL_ENABLE); 61490cb412SJustin Chen tx_epkt_core_wl(intf, (TX_EPKT_C_CFG_MISC_EN | 62490cb412SJustin Chen TX_EPKT_C_CFG_MISC_PT | 63490cb412SJustin Chen (intf->port << TX_EPKT_C_CFG_MISC_PS_SHIFT)), 64490cb412SJustin Chen TX_EPKT_C_CFG_MISC); 65490cb412SJustin Chen } else { 66490cb412SJustin Chen tx_spb_ctrl_wl(intf, 0x0, TX_SPB_CTRL_ENABLE); 67490cb412SJustin Chen tx_epkt_core_wl(intf, 0x0, TX_EPKT_C_CFG_MISC); 68490cb412SJustin Chen } 69490cb412SJustin Chen } 70490cb412SJustin Chen 71490cb412SJustin Chen static void bcmasp_enable_rx(struct bcmasp_intf *intf, int en) 72490cb412SJustin Chen { 73490cb412SJustin Chen if (en) 74490cb412SJustin Chen rx_edpkt_cfg_wl(intf, RX_EDPKT_CFG_ENABLE_EN, 75490cb412SJustin Chen RX_EDPKT_CFG_ENABLE); 76490cb412SJustin Chen else 77490cb412SJustin Chen rx_edpkt_cfg_wl(intf, 0x0, RX_EDPKT_CFG_ENABLE); 78490cb412SJustin Chen } 79490cb412SJustin Chen 80490cb412SJustin Chen static void bcmasp_set_rx_mode(struct net_device *dev) 81490cb412SJustin Chen { 82490cb412SJustin Chen unsigned char mask[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 83490cb412SJustin Chen struct bcmasp_intf *intf = netdev_priv(dev); 84490cb412SJustin Chen struct netdev_hw_addr *ha; 85490cb412SJustin Chen int ret; 86490cb412SJustin Chen 87490cb412SJustin Chen spin_lock_bh(&intf->parent->mda_lock); 88490cb412SJustin Chen 89490cb412SJustin Chen bcmasp_disable_all_filters(intf); 90490cb412SJustin Chen 91490cb412SJustin Chen if (dev->flags & IFF_PROMISC) 92490cb412SJustin Chen goto set_promisc; 93490cb412SJustin Chen 94490cb412SJustin Chen bcmasp_set_promisc(intf, 0); 95490cb412SJustin Chen 96490cb412SJustin Chen bcmasp_set_broad(intf, 1); 97490cb412SJustin Chen 98490cb412SJustin Chen bcmasp_set_oaddr(intf, dev->dev_addr, 1); 99490cb412SJustin Chen 100490cb412SJustin Chen if (dev->flags & IFF_ALLMULTI) { 101490cb412SJustin Chen bcmasp_set_allmulti(intf, 1); 102490cb412SJustin Chen } else { 103490cb412SJustin Chen bcmasp_set_allmulti(intf, 0); 104490cb412SJustin Chen 105490cb412SJustin Chen netdev_for_each_mc_addr(ha, dev) { 106490cb412SJustin Chen ret = bcmasp_set_en_mda_filter(intf, ha->addr, mask); 1077c10691eSJustin Chen if (ret) { 1087c10691eSJustin Chen intf->mib.mc_filters_full_cnt++; 109490cb412SJustin Chen goto set_promisc; 110490cb412SJustin Chen } 111490cb412SJustin Chen } 1127c10691eSJustin Chen } 113490cb412SJustin Chen 114490cb412SJustin Chen netdev_for_each_uc_addr(ha, dev) { 115490cb412SJustin Chen ret = bcmasp_set_en_mda_filter(intf, ha->addr, mask); 1167c10691eSJustin Chen if (ret) { 1177c10691eSJustin Chen intf->mib.uc_filters_full_cnt++; 118490cb412SJustin Chen goto set_promisc; 119490cb412SJustin Chen } 1207c10691eSJustin Chen } 121490cb412SJustin Chen 122490cb412SJustin Chen spin_unlock_bh(&intf->parent->mda_lock); 123490cb412SJustin Chen return; 124490cb412SJustin Chen 125490cb412SJustin Chen set_promisc: 126490cb412SJustin Chen bcmasp_set_promisc(intf, 1); 1277c10691eSJustin Chen intf->mib.promisc_filters_cnt++; 128490cb412SJustin Chen 129490cb412SJustin Chen /* disable all filters used by this port */ 130490cb412SJustin Chen bcmasp_disable_all_filters(intf); 131490cb412SJustin Chen 132490cb412SJustin Chen spin_unlock_bh(&intf->parent->mda_lock); 133490cb412SJustin Chen } 134490cb412SJustin Chen 135490cb412SJustin Chen static void bcmasp_clean_txcb(struct bcmasp_intf *intf, int index) 136490cb412SJustin Chen { 137490cb412SJustin Chen struct bcmasp_tx_cb *txcb = &intf->tx_cbs[index]; 138490cb412SJustin Chen 139490cb412SJustin Chen txcb->skb = NULL; 140490cb412SJustin Chen dma_unmap_addr_set(txcb, dma_addr, 0); 141490cb412SJustin Chen dma_unmap_len_set(txcb, dma_len, 0); 142490cb412SJustin Chen txcb->last = false; 143490cb412SJustin Chen } 144490cb412SJustin Chen 145490cb412SJustin Chen static int tx_spb_ring_full(struct bcmasp_intf *intf, int cnt) 146490cb412SJustin Chen { 147490cb412SJustin Chen int next_index, i; 148490cb412SJustin Chen 149490cb412SJustin Chen /* Check if we have enough room for cnt descriptors */ 150490cb412SJustin Chen for (i = 0; i < cnt; i++) { 151490cb412SJustin Chen next_index = incr_ring(intf->tx_spb_index, DESC_RING_COUNT); 152490cb412SJustin Chen if (next_index == intf->tx_spb_clean_index) 153490cb412SJustin Chen return 1; 154490cb412SJustin Chen } 155490cb412SJustin Chen 156490cb412SJustin Chen return 0; 157490cb412SJustin Chen } 158490cb412SJustin Chen 159490cb412SJustin Chen static struct sk_buff *bcmasp_csum_offload(struct net_device *dev, 160490cb412SJustin Chen struct sk_buff *skb, 161490cb412SJustin Chen bool *csum_hw) 162490cb412SJustin Chen { 1637c10691eSJustin Chen struct bcmasp_intf *intf = netdev_priv(dev); 164490cb412SJustin Chen u32 header = 0, header2 = 0, epkt = 0; 165490cb412SJustin Chen struct bcmasp_pkt_offload *offload; 166490cb412SJustin Chen unsigned int header_cnt = 0; 167490cb412SJustin Chen u8 ip_proto; 168490cb412SJustin Chen int ret; 169490cb412SJustin Chen 170490cb412SJustin Chen if (skb->ip_summed != CHECKSUM_PARTIAL) 171490cb412SJustin Chen return skb; 172490cb412SJustin Chen 173490cb412SJustin Chen ret = skb_cow_head(skb, sizeof(*offload)); 1747c10691eSJustin Chen if (ret < 0) { 1757c10691eSJustin Chen intf->mib.tx_realloc_offload_failed++; 176490cb412SJustin Chen goto help; 1777c10691eSJustin Chen } 178490cb412SJustin Chen 179490cb412SJustin Chen switch (skb->protocol) { 180490cb412SJustin Chen case htons(ETH_P_IP): 181490cb412SJustin Chen header |= PKT_OFFLOAD_HDR_SIZE_2((ip_hdrlen(skb) >> 8) & 0xf); 182490cb412SJustin Chen header2 |= PKT_OFFLOAD_HDR2_SIZE_2(ip_hdrlen(skb) & 0xff); 183490cb412SJustin Chen epkt |= PKT_OFFLOAD_EPKT_IP(0) | PKT_OFFLOAD_EPKT_CSUM_L2; 184490cb412SJustin Chen ip_proto = ip_hdr(skb)->protocol; 185490cb412SJustin Chen header_cnt += 2; 186490cb412SJustin Chen break; 187490cb412SJustin Chen case htons(ETH_P_IPV6): 188490cb412SJustin Chen header |= PKT_OFFLOAD_HDR_SIZE_2((IP6_HLEN >> 8) & 0xf); 189490cb412SJustin Chen header2 |= PKT_OFFLOAD_HDR2_SIZE_2(IP6_HLEN & 0xff); 190490cb412SJustin Chen epkt |= PKT_OFFLOAD_EPKT_IP(1) | PKT_OFFLOAD_EPKT_CSUM_L2; 191490cb412SJustin Chen ip_proto = ipv6_hdr(skb)->nexthdr; 192490cb412SJustin Chen header_cnt += 2; 193490cb412SJustin Chen break; 194490cb412SJustin Chen default: 195490cb412SJustin Chen goto help; 196490cb412SJustin Chen } 197490cb412SJustin Chen 198490cb412SJustin Chen switch (ip_proto) { 199490cb412SJustin Chen case IPPROTO_TCP: 200490cb412SJustin Chen header2 |= PKT_OFFLOAD_HDR2_SIZE_3(tcp_hdrlen(skb)); 201490cb412SJustin Chen epkt |= PKT_OFFLOAD_EPKT_TP(0) | PKT_OFFLOAD_EPKT_CSUM_L3; 202490cb412SJustin Chen header_cnt++; 203490cb412SJustin Chen break; 204490cb412SJustin Chen case IPPROTO_UDP: 205490cb412SJustin Chen header2 |= PKT_OFFLOAD_HDR2_SIZE_3(UDP_HLEN); 206490cb412SJustin Chen epkt |= PKT_OFFLOAD_EPKT_TP(1) | PKT_OFFLOAD_EPKT_CSUM_L3; 207490cb412SJustin Chen header_cnt++; 208490cb412SJustin Chen break; 209490cb412SJustin Chen default: 210490cb412SJustin Chen goto help; 211490cb412SJustin Chen } 212490cb412SJustin Chen 213490cb412SJustin Chen offload = (struct bcmasp_pkt_offload *)skb_push(skb, sizeof(*offload)); 214490cb412SJustin Chen 215490cb412SJustin Chen header |= PKT_OFFLOAD_HDR_OP | PKT_OFFLOAD_HDR_COUNT(header_cnt) | 216490cb412SJustin Chen PKT_OFFLOAD_HDR_SIZE_1(ETH_HLEN); 217490cb412SJustin Chen epkt |= PKT_OFFLOAD_EPKT_OP; 218490cb412SJustin Chen 219490cb412SJustin Chen offload->nop = htonl(PKT_OFFLOAD_NOP); 220490cb412SJustin Chen offload->header = htonl(header); 221490cb412SJustin Chen offload->header2 = htonl(header2); 222490cb412SJustin Chen offload->epkt = htonl(epkt); 223490cb412SJustin Chen offload->end = htonl(PKT_OFFLOAD_END_OP); 224490cb412SJustin Chen *csum_hw = true; 225490cb412SJustin Chen 226490cb412SJustin Chen return skb; 227490cb412SJustin Chen 228490cb412SJustin Chen help: 229490cb412SJustin Chen skb_checksum_help(skb); 230490cb412SJustin Chen 231490cb412SJustin Chen return skb; 232490cb412SJustin Chen } 233490cb412SJustin Chen 234490cb412SJustin Chen static unsigned long bcmasp_rx_edpkt_dma_rq(struct bcmasp_intf *intf) 235490cb412SJustin Chen { 236490cb412SJustin Chen return rx_edpkt_dma_rq(intf, RX_EDPKT_DMA_VALID); 237490cb412SJustin Chen } 238490cb412SJustin Chen 239490cb412SJustin Chen static void bcmasp_rx_edpkt_cfg_wq(struct bcmasp_intf *intf, dma_addr_t addr) 240490cb412SJustin Chen { 241490cb412SJustin Chen rx_edpkt_cfg_wq(intf, addr, RX_EDPKT_RING_BUFFER_READ); 242490cb412SJustin Chen } 243490cb412SJustin Chen 244490cb412SJustin Chen static void bcmasp_rx_edpkt_dma_wq(struct bcmasp_intf *intf, dma_addr_t addr) 245490cb412SJustin Chen { 246490cb412SJustin Chen rx_edpkt_dma_wq(intf, addr, RX_EDPKT_DMA_READ); 247490cb412SJustin Chen } 248490cb412SJustin Chen 249490cb412SJustin Chen static unsigned long bcmasp_tx_spb_dma_rq(struct bcmasp_intf *intf) 250490cb412SJustin Chen { 251490cb412SJustin Chen return tx_spb_dma_rq(intf, TX_SPB_DMA_READ); 252490cb412SJustin Chen } 253490cb412SJustin Chen 254490cb412SJustin Chen static void bcmasp_tx_spb_dma_wq(struct bcmasp_intf *intf, dma_addr_t addr) 255490cb412SJustin Chen { 256490cb412SJustin Chen tx_spb_dma_wq(intf, addr, TX_SPB_DMA_VALID); 257490cb412SJustin Chen } 258490cb412SJustin Chen 259490cb412SJustin Chen static const struct bcmasp_intf_ops bcmasp_intf_ops = { 260490cb412SJustin Chen .rx_desc_read = bcmasp_rx_edpkt_dma_rq, 261490cb412SJustin Chen .rx_buffer_write = bcmasp_rx_edpkt_cfg_wq, 262490cb412SJustin Chen .rx_desc_write = bcmasp_rx_edpkt_dma_wq, 263490cb412SJustin Chen .tx_read = bcmasp_tx_spb_dma_rq, 264490cb412SJustin Chen .tx_write = bcmasp_tx_spb_dma_wq, 265490cb412SJustin Chen }; 266490cb412SJustin Chen 267490cb412SJustin Chen static netdev_tx_t bcmasp_xmit(struct sk_buff *skb, struct net_device *dev) 268490cb412SJustin Chen { 269490cb412SJustin Chen struct bcmasp_intf *intf = netdev_priv(dev); 270490cb412SJustin Chen unsigned int total_bytes, size; 271490cb412SJustin Chen int spb_index, nr_frags, i, j; 272490cb412SJustin Chen struct bcmasp_tx_cb *txcb; 273490cb412SJustin Chen dma_addr_t mapping, valid; 274490cb412SJustin Chen struct bcmasp_desc *desc; 275490cb412SJustin Chen bool csum_hw = false; 276490cb412SJustin Chen struct device *kdev; 277490cb412SJustin Chen skb_frag_t *frag; 278490cb412SJustin Chen 279490cb412SJustin Chen kdev = &intf->parent->pdev->dev; 280490cb412SJustin Chen 281490cb412SJustin Chen nr_frags = skb_shinfo(skb)->nr_frags; 282490cb412SJustin Chen 283490cb412SJustin Chen if (tx_spb_ring_full(intf, nr_frags + 1)) { 284490cb412SJustin Chen netif_stop_queue(dev); 285490cb412SJustin Chen if (net_ratelimit()) 286490cb412SJustin Chen netdev_err(dev, "Tx Ring Full!\n"); 287490cb412SJustin Chen return NETDEV_TX_BUSY; 288490cb412SJustin Chen } 289490cb412SJustin Chen 290490cb412SJustin Chen /* Save skb len before adding csum offload header */ 291490cb412SJustin Chen total_bytes = skb->len; 292490cb412SJustin Chen skb = bcmasp_csum_offload(dev, skb, &csum_hw); 293490cb412SJustin Chen if (!skb) 294490cb412SJustin Chen return NETDEV_TX_OK; 295490cb412SJustin Chen 296490cb412SJustin Chen spb_index = intf->tx_spb_index; 297490cb412SJustin Chen valid = intf->tx_spb_dma_valid; 298490cb412SJustin Chen for (i = 0; i <= nr_frags; i++) { 299490cb412SJustin Chen if (!i) { 300490cb412SJustin Chen size = skb_headlen(skb); 301490cb412SJustin Chen if (!nr_frags && size < (ETH_ZLEN + ETH_FCS_LEN)) { 302490cb412SJustin Chen if (skb_put_padto(skb, ETH_ZLEN + ETH_FCS_LEN)) 303490cb412SJustin Chen return NETDEV_TX_OK; 304490cb412SJustin Chen size = skb->len; 305490cb412SJustin Chen } 306490cb412SJustin Chen mapping = dma_map_single(kdev, skb->data, size, 307490cb412SJustin Chen DMA_TO_DEVICE); 308490cb412SJustin Chen } else { 309490cb412SJustin Chen frag = &skb_shinfo(skb)->frags[i - 1]; 310490cb412SJustin Chen size = skb_frag_size(frag); 311490cb412SJustin Chen mapping = skb_frag_dma_map(kdev, frag, 0, size, 312490cb412SJustin Chen DMA_TO_DEVICE); 313490cb412SJustin Chen } 314490cb412SJustin Chen 315490cb412SJustin Chen if (dma_mapping_error(kdev, mapping)) { 3167c10691eSJustin Chen intf->mib.tx_dma_failed++; 317490cb412SJustin Chen spb_index = intf->tx_spb_index; 318490cb412SJustin Chen for (j = 0; j < i; j++) { 319490cb412SJustin Chen bcmasp_clean_txcb(intf, spb_index); 320490cb412SJustin Chen spb_index = incr_ring(spb_index, 321490cb412SJustin Chen DESC_RING_COUNT); 322490cb412SJustin Chen } 323490cb412SJustin Chen /* Rewind so we do not have a hole */ 324490cb412SJustin Chen spb_index = intf->tx_spb_index; 325490cb412SJustin Chen return NETDEV_TX_OK; 326490cb412SJustin Chen } 327490cb412SJustin Chen 328490cb412SJustin Chen txcb = &intf->tx_cbs[spb_index]; 329490cb412SJustin Chen desc = &intf->tx_spb_cpu[spb_index]; 330490cb412SJustin Chen memset(desc, 0, sizeof(*desc)); 331490cb412SJustin Chen txcb->skb = skb; 332490cb412SJustin Chen txcb->bytes_sent = total_bytes; 333490cb412SJustin Chen dma_unmap_addr_set(txcb, dma_addr, mapping); 334490cb412SJustin Chen dma_unmap_len_set(txcb, dma_len, size); 335490cb412SJustin Chen if (!i) { 336490cb412SJustin Chen desc->flags |= DESC_SOF; 337490cb412SJustin Chen if (csum_hw) 338490cb412SJustin Chen desc->flags |= DESC_EPKT_CMD; 339490cb412SJustin Chen } 340490cb412SJustin Chen 341490cb412SJustin Chen if (i == nr_frags) { 342490cb412SJustin Chen desc->flags |= DESC_EOF; 343490cb412SJustin Chen txcb->last = true; 344490cb412SJustin Chen } 345490cb412SJustin Chen 346490cb412SJustin Chen desc->buf = mapping; 347490cb412SJustin Chen desc->size = size; 348490cb412SJustin Chen desc->flags |= DESC_INT_EN; 349490cb412SJustin Chen 350490cb412SJustin Chen netif_dbg(intf, tx_queued, dev, 351490cb412SJustin Chen "%s dma_buf=%pad dma_len=0x%x flags=0x%x index=0x%x\n", 352490cb412SJustin Chen __func__, &mapping, desc->size, desc->flags, 353490cb412SJustin Chen spb_index); 354490cb412SJustin Chen 355490cb412SJustin Chen spb_index = incr_ring(spb_index, DESC_RING_COUNT); 356490cb412SJustin Chen valid = incr_last_byte(valid, intf->tx_spb_dma_addr, 357490cb412SJustin Chen DESC_RING_COUNT); 358490cb412SJustin Chen } 359490cb412SJustin Chen 360490cb412SJustin Chen /* Ensure all descriptors have been written to DRAM for the 361490cb412SJustin Chen * hardware to see up-to-date contents. 362490cb412SJustin Chen */ 363490cb412SJustin Chen wmb(); 364490cb412SJustin Chen 365490cb412SJustin Chen intf->tx_spb_index = spb_index; 366490cb412SJustin Chen intf->tx_spb_dma_valid = valid; 367490cb412SJustin Chen bcmasp_intf_tx_write(intf, intf->tx_spb_dma_valid); 368490cb412SJustin Chen 369490cb412SJustin Chen if (tx_spb_ring_full(intf, MAX_SKB_FRAGS + 1)) 370490cb412SJustin Chen netif_stop_queue(dev); 371490cb412SJustin Chen 372490cb412SJustin Chen return NETDEV_TX_OK; 373490cb412SJustin Chen } 374490cb412SJustin Chen 375490cb412SJustin Chen static void bcmasp_netif_start(struct net_device *dev) 376490cb412SJustin Chen { 377490cb412SJustin Chen struct bcmasp_intf *intf = netdev_priv(dev); 378490cb412SJustin Chen 379490cb412SJustin Chen bcmasp_set_rx_mode(dev); 380490cb412SJustin Chen napi_enable(&intf->tx_napi); 381490cb412SJustin Chen napi_enable(&intf->rx_napi); 382490cb412SJustin Chen 383490cb412SJustin Chen bcmasp_enable_rx_irq(intf, 1); 384490cb412SJustin Chen bcmasp_enable_tx_irq(intf, 1); 385490cb412SJustin Chen 386490cb412SJustin Chen phy_start(dev->phydev); 387490cb412SJustin Chen } 388490cb412SJustin Chen 389490cb412SJustin Chen static void umac_reset(struct bcmasp_intf *intf) 390490cb412SJustin Chen { 391490cb412SJustin Chen umac_wl(intf, 0x0, UMC_CMD); 392490cb412SJustin Chen umac_wl(intf, UMC_CMD_SW_RESET, UMC_CMD); 393490cb412SJustin Chen usleep_range(10, 100); 394*4bb7ad11SJustin Chen /* We hold the umac in reset and bring it out of 395*4bb7ad11SJustin Chen * reset when phy link is up. 396*4bb7ad11SJustin Chen */ 397490cb412SJustin Chen } 398490cb412SJustin Chen 399490cb412SJustin Chen static void umac_set_hw_addr(struct bcmasp_intf *intf, 400490cb412SJustin Chen const unsigned char *addr) 401490cb412SJustin Chen { 402490cb412SJustin Chen u32 mac0 = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | 403490cb412SJustin Chen addr[3]; 404490cb412SJustin Chen u32 mac1 = (addr[4] << 8) | addr[5]; 405490cb412SJustin Chen 406490cb412SJustin Chen umac_wl(intf, mac0, UMC_MAC0); 407490cb412SJustin Chen umac_wl(intf, mac1, UMC_MAC1); 408490cb412SJustin Chen } 409490cb412SJustin Chen 410490cb412SJustin Chen static void umac_enable_set(struct bcmasp_intf *intf, u32 mask, 411490cb412SJustin Chen unsigned int enable) 412490cb412SJustin Chen { 413490cb412SJustin Chen u32 reg; 414490cb412SJustin Chen 415490cb412SJustin Chen reg = umac_rl(intf, UMC_CMD); 416*4bb7ad11SJustin Chen if (reg & UMC_CMD_SW_RESET) 417*4bb7ad11SJustin Chen return; 418490cb412SJustin Chen if (enable) 419490cb412SJustin Chen reg |= mask; 420490cb412SJustin Chen else 421490cb412SJustin Chen reg &= ~mask; 422490cb412SJustin Chen umac_wl(intf, reg, UMC_CMD); 423490cb412SJustin Chen 424490cb412SJustin Chen /* UniMAC stops on a packet boundary, wait for a full-sized packet 425490cb412SJustin Chen * to be processed (1 msec). 426490cb412SJustin Chen */ 427490cb412SJustin Chen if (enable == 0) 428490cb412SJustin Chen usleep_range(1000, 2000); 429490cb412SJustin Chen } 430490cb412SJustin Chen 431490cb412SJustin Chen static void umac_init(struct bcmasp_intf *intf) 432490cb412SJustin Chen { 433490cb412SJustin Chen umac_wl(intf, 0x800, UMC_FRM_LEN); 434490cb412SJustin Chen umac_wl(intf, 0xffff, UMC_PAUSE_CNTRL); 435490cb412SJustin Chen umac_wl(intf, 0x800, UMC_RX_MAX_PKT_SZ); 436490cb412SJustin Chen } 437490cb412SJustin Chen 438490cb412SJustin Chen static int bcmasp_tx_poll(struct napi_struct *napi, int budget) 439490cb412SJustin Chen { 440490cb412SJustin Chen struct bcmasp_intf *intf = 441490cb412SJustin Chen container_of(napi, struct bcmasp_intf, tx_napi); 442490cb412SJustin Chen struct bcmasp_intf_stats64 *stats = &intf->stats64; 443490cb412SJustin Chen struct device *kdev = &intf->parent->pdev->dev; 444490cb412SJustin Chen unsigned long read, released = 0; 445490cb412SJustin Chen struct bcmasp_tx_cb *txcb; 446490cb412SJustin Chen struct bcmasp_desc *desc; 447490cb412SJustin Chen dma_addr_t mapping; 448490cb412SJustin Chen 449490cb412SJustin Chen read = bcmasp_intf_tx_read(intf); 450490cb412SJustin Chen while (intf->tx_spb_dma_read != read) { 451490cb412SJustin Chen txcb = &intf->tx_cbs[intf->tx_spb_clean_index]; 452490cb412SJustin Chen mapping = dma_unmap_addr(txcb, dma_addr); 453490cb412SJustin Chen 454490cb412SJustin Chen dma_unmap_single(kdev, mapping, 455490cb412SJustin Chen dma_unmap_len(txcb, dma_len), 456490cb412SJustin Chen DMA_TO_DEVICE); 457490cb412SJustin Chen 458490cb412SJustin Chen if (txcb->last) { 459490cb412SJustin Chen dev_consume_skb_any(txcb->skb); 460490cb412SJustin Chen 461490cb412SJustin Chen u64_stats_update_begin(&stats->syncp); 462490cb412SJustin Chen u64_stats_inc(&stats->tx_packets); 463490cb412SJustin Chen u64_stats_add(&stats->tx_bytes, txcb->bytes_sent); 464490cb412SJustin Chen u64_stats_update_end(&stats->syncp); 465490cb412SJustin Chen } 466490cb412SJustin Chen 467490cb412SJustin Chen desc = &intf->tx_spb_cpu[intf->tx_spb_clean_index]; 468490cb412SJustin Chen 469490cb412SJustin Chen netif_dbg(intf, tx_done, intf->ndev, 470490cb412SJustin Chen "%s dma_buf=%pad dma_len=0x%x flags=0x%x c_index=0x%x\n", 471490cb412SJustin Chen __func__, &mapping, desc->size, desc->flags, 472490cb412SJustin Chen intf->tx_spb_clean_index); 473490cb412SJustin Chen 474490cb412SJustin Chen bcmasp_clean_txcb(intf, intf->tx_spb_clean_index); 475490cb412SJustin Chen released++; 476490cb412SJustin Chen 477490cb412SJustin Chen intf->tx_spb_clean_index = incr_ring(intf->tx_spb_clean_index, 478490cb412SJustin Chen DESC_RING_COUNT); 479490cb412SJustin Chen intf->tx_spb_dma_read = incr_first_byte(intf->tx_spb_dma_read, 480490cb412SJustin Chen intf->tx_spb_dma_addr, 481490cb412SJustin Chen DESC_RING_COUNT); 482490cb412SJustin Chen } 483490cb412SJustin Chen 484490cb412SJustin Chen /* Ensure all descriptors have been written to DRAM for the hardware 485490cb412SJustin Chen * to see updated contents. 486490cb412SJustin Chen */ 487490cb412SJustin Chen wmb(); 488490cb412SJustin Chen 489490cb412SJustin Chen napi_complete(&intf->tx_napi); 490490cb412SJustin Chen 491490cb412SJustin Chen bcmasp_enable_tx_irq(intf, 1); 492490cb412SJustin Chen 493490cb412SJustin Chen if (released) 494490cb412SJustin Chen netif_wake_queue(intf->ndev); 495490cb412SJustin Chen 496490cb412SJustin Chen return 0; 497490cb412SJustin Chen } 498490cb412SJustin Chen 499490cb412SJustin Chen static int bcmasp_rx_poll(struct napi_struct *napi, int budget) 500490cb412SJustin Chen { 501490cb412SJustin Chen struct bcmasp_intf *intf = 502490cb412SJustin Chen container_of(napi, struct bcmasp_intf, rx_napi); 503490cb412SJustin Chen struct bcmasp_intf_stats64 *stats = &intf->stats64; 504490cb412SJustin Chen struct device *kdev = &intf->parent->pdev->dev; 505490cb412SJustin Chen unsigned long processed = 0; 506490cb412SJustin Chen struct bcmasp_desc *desc; 507490cb412SJustin Chen struct sk_buff *skb; 508490cb412SJustin Chen dma_addr_t valid; 509490cb412SJustin Chen void *data; 510490cb412SJustin Chen u64 flags; 511490cb412SJustin Chen u32 len; 512490cb412SJustin Chen 513490cb412SJustin Chen valid = bcmasp_intf_rx_desc_read(intf) + 1; 514490cb412SJustin Chen if (valid == intf->rx_edpkt_dma_addr + DESC_RING_SIZE) 515490cb412SJustin Chen valid = intf->rx_edpkt_dma_addr; 516490cb412SJustin Chen 517490cb412SJustin Chen while ((processed < budget) && (valid != intf->rx_edpkt_dma_read)) { 518490cb412SJustin Chen desc = &intf->rx_edpkt_cpu[intf->rx_edpkt_index]; 519490cb412SJustin Chen 520490cb412SJustin Chen /* Ensure that descriptor has been fully written to DRAM by 521490cb412SJustin Chen * hardware before reading by the CPU 522490cb412SJustin Chen */ 523490cb412SJustin Chen rmb(); 524490cb412SJustin Chen 525490cb412SJustin Chen /* Calculate virt addr by offsetting from physical addr */ 526490cb412SJustin Chen data = intf->rx_ring_cpu + 527490cb412SJustin Chen (DESC_ADDR(desc->buf) - intf->rx_ring_dma); 528490cb412SJustin Chen 529490cb412SJustin Chen flags = DESC_FLAGS(desc->buf); 530490cb412SJustin Chen if (unlikely(flags & (DESC_CRC_ERR | DESC_RX_SYM_ERR))) { 531490cb412SJustin Chen if (net_ratelimit()) { 532490cb412SJustin Chen netif_err(intf, rx_status, intf->ndev, 533490cb412SJustin Chen "flags=0x%llx\n", flags); 534490cb412SJustin Chen } 535490cb412SJustin Chen 536490cb412SJustin Chen u64_stats_update_begin(&stats->syncp); 537490cb412SJustin Chen if (flags & DESC_CRC_ERR) 538490cb412SJustin Chen u64_stats_inc(&stats->rx_crc_errs); 539490cb412SJustin Chen if (flags & DESC_RX_SYM_ERR) 540490cb412SJustin Chen u64_stats_inc(&stats->rx_sym_errs); 541490cb412SJustin Chen u64_stats_update_end(&stats->syncp); 542490cb412SJustin Chen 543490cb412SJustin Chen goto next; 544490cb412SJustin Chen } 545490cb412SJustin Chen 546490cb412SJustin Chen dma_sync_single_for_cpu(kdev, DESC_ADDR(desc->buf), desc->size, 547490cb412SJustin Chen DMA_FROM_DEVICE); 548490cb412SJustin Chen 549490cb412SJustin Chen len = desc->size; 550490cb412SJustin Chen 551490cb412SJustin Chen skb = napi_alloc_skb(napi, len); 552490cb412SJustin Chen if (!skb) { 553490cb412SJustin Chen u64_stats_update_begin(&stats->syncp); 554490cb412SJustin Chen u64_stats_inc(&stats->rx_dropped); 555490cb412SJustin Chen u64_stats_update_end(&stats->syncp); 5567c10691eSJustin Chen intf->mib.alloc_rx_skb_failed++; 5577c10691eSJustin Chen 558490cb412SJustin Chen goto next; 559490cb412SJustin Chen } 560490cb412SJustin Chen 561490cb412SJustin Chen skb_put(skb, len); 562490cb412SJustin Chen memcpy(skb->data, data, len); 563490cb412SJustin Chen 564490cb412SJustin Chen skb_pull(skb, 2); 565490cb412SJustin Chen len -= 2; 566490cb412SJustin Chen if (likely(intf->crc_fwd)) { 567490cb412SJustin Chen skb_trim(skb, len - ETH_FCS_LEN); 568490cb412SJustin Chen len -= ETH_FCS_LEN; 569490cb412SJustin Chen } 570490cb412SJustin Chen 571490cb412SJustin Chen if ((intf->ndev->features & NETIF_F_RXCSUM) && 572490cb412SJustin Chen (desc->buf & DESC_CHKSUM)) 573490cb412SJustin Chen skb->ip_summed = CHECKSUM_UNNECESSARY; 574490cb412SJustin Chen 575490cb412SJustin Chen skb->protocol = eth_type_trans(skb, intf->ndev); 576490cb412SJustin Chen 577490cb412SJustin Chen napi_gro_receive(napi, skb); 578490cb412SJustin Chen 579490cb412SJustin Chen u64_stats_update_begin(&stats->syncp); 580490cb412SJustin Chen u64_stats_inc(&stats->rx_packets); 581490cb412SJustin Chen u64_stats_add(&stats->rx_bytes, len); 582490cb412SJustin Chen u64_stats_update_end(&stats->syncp); 583490cb412SJustin Chen 584490cb412SJustin Chen next: 585490cb412SJustin Chen bcmasp_intf_rx_buffer_write(intf, (DESC_ADDR(desc->buf) + 586490cb412SJustin Chen desc->size)); 587490cb412SJustin Chen 588490cb412SJustin Chen processed++; 589490cb412SJustin Chen intf->rx_edpkt_dma_read = 590490cb412SJustin Chen incr_first_byte(intf->rx_edpkt_dma_read, 591490cb412SJustin Chen intf->rx_edpkt_dma_addr, 592490cb412SJustin Chen DESC_RING_COUNT); 593490cb412SJustin Chen intf->rx_edpkt_index = incr_ring(intf->rx_edpkt_index, 594490cb412SJustin Chen DESC_RING_COUNT); 595490cb412SJustin Chen } 596490cb412SJustin Chen 597490cb412SJustin Chen bcmasp_intf_rx_desc_write(intf, intf->rx_edpkt_dma_read); 598490cb412SJustin Chen 599490cb412SJustin Chen if (processed < budget) { 600490cb412SJustin Chen napi_complete_done(&intf->rx_napi, processed); 601490cb412SJustin Chen bcmasp_enable_rx_irq(intf, 1); 602490cb412SJustin Chen } 603490cb412SJustin Chen 604490cb412SJustin Chen return processed; 605490cb412SJustin Chen } 606490cb412SJustin Chen 607490cb412SJustin Chen static void bcmasp_adj_link(struct net_device *dev) 608490cb412SJustin Chen { 609490cb412SJustin Chen struct bcmasp_intf *intf = netdev_priv(dev); 610490cb412SJustin Chen struct phy_device *phydev = dev->phydev; 611490cb412SJustin Chen u32 cmd_bits = 0, reg; 612490cb412SJustin Chen int changed = 0; 613490cb412SJustin Chen 614490cb412SJustin Chen if (intf->old_link != phydev->link) { 615490cb412SJustin Chen changed = 1; 616490cb412SJustin Chen intf->old_link = phydev->link; 617490cb412SJustin Chen } 618490cb412SJustin Chen 619490cb412SJustin Chen if (intf->old_duplex != phydev->duplex) { 620490cb412SJustin Chen changed = 1; 621490cb412SJustin Chen intf->old_duplex = phydev->duplex; 622490cb412SJustin Chen } 623490cb412SJustin Chen 624490cb412SJustin Chen switch (phydev->speed) { 625490cb412SJustin Chen case SPEED_2500: 626490cb412SJustin Chen cmd_bits = UMC_CMD_SPEED_2500; 627490cb412SJustin Chen break; 628490cb412SJustin Chen case SPEED_1000: 629490cb412SJustin Chen cmd_bits = UMC_CMD_SPEED_1000; 630490cb412SJustin Chen break; 631490cb412SJustin Chen case SPEED_100: 632490cb412SJustin Chen cmd_bits = UMC_CMD_SPEED_100; 633490cb412SJustin Chen break; 634490cb412SJustin Chen case SPEED_10: 635490cb412SJustin Chen cmd_bits = UMC_CMD_SPEED_10; 636490cb412SJustin Chen break; 637490cb412SJustin Chen default: 638490cb412SJustin Chen break; 639490cb412SJustin Chen } 640490cb412SJustin Chen cmd_bits <<= UMC_CMD_SPEED_SHIFT; 641490cb412SJustin Chen 642490cb412SJustin Chen if (phydev->duplex == DUPLEX_HALF) 643490cb412SJustin Chen cmd_bits |= UMC_CMD_HD_EN; 644490cb412SJustin Chen 645490cb412SJustin Chen if (intf->old_pause != phydev->pause) { 646490cb412SJustin Chen changed = 1; 647490cb412SJustin Chen intf->old_pause = phydev->pause; 648490cb412SJustin Chen } 649490cb412SJustin Chen 650490cb412SJustin Chen if (!phydev->pause) 651490cb412SJustin Chen cmd_bits |= UMC_CMD_RX_PAUSE_IGNORE | UMC_CMD_TX_PAUSE_IGNORE; 652490cb412SJustin Chen 653490cb412SJustin Chen if (!changed) 654490cb412SJustin Chen return; 655490cb412SJustin Chen 656490cb412SJustin Chen if (phydev->link) { 657490cb412SJustin Chen reg = umac_rl(intf, UMC_CMD); 658490cb412SJustin Chen reg &= ~((UMC_CMD_SPEED_MASK << UMC_CMD_SPEED_SHIFT) | 659490cb412SJustin Chen UMC_CMD_HD_EN | UMC_CMD_RX_PAUSE_IGNORE | 660490cb412SJustin Chen UMC_CMD_TX_PAUSE_IGNORE); 661490cb412SJustin Chen reg |= cmd_bits; 662*4bb7ad11SJustin Chen if (reg & UMC_CMD_SW_RESET) { 663*4bb7ad11SJustin Chen reg &= ~UMC_CMD_SW_RESET; 664*4bb7ad11SJustin Chen umac_wl(intf, reg, UMC_CMD); 665*4bb7ad11SJustin Chen udelay(2); 666*4bb7ad11SJustin Chen reg |= UMC_CMD_TX_EN | UMC_CMD_RX_EN | UMC_CMD_PROMISC; 667*4bb7ad11SJustin Chen } 668490cb412SJustin Chen umac_wl(intf, reg, UMC_CMD); 669550e6f34SJustin Chen 670550e6f34SJustin Chen intf->eee.eee_active = phy_init_eee(phydev, 0) >= 0; 671550e6f34SJustin Chen bcmasp_eee_enable_set(intf, intf->eee.eee_active); 672490cb412SJustin Chen } 673490cb412SJustin Chen 674490cb412SJustin Chen reg = rgmii_rl(intf, RGMII_OOB_CNTRL); 675490cb412SJustin Chen if (phydev->link) 676490cb412SJustin Chen reg |= RGMII_LINK; 677490cb412SJustin Chen else 678490cb412SJustin Chen reg &= ~RGMII_LINK; 679490cb412SJustin Chen rgmii_wl(intf, reg, RGMII_OOB_CNTRL); 680490cb412SJustin Chen 681490cb412SJustin Chen if (changed) 682490cb412SJustin Chen phy_print_status(phydev); 683490cb412SJustin Chen } 684490cb412SJustin Chen 685490cb412SJustin Chen static int bcmasp_init_rx(struct bcmasp_intf *intf) 686490cb412SJustin Chen { 687490cb412SJustin Chen struct device *kdev = &intf->parent->pdev->dev; 688490cb412SJustin Chen struct page *buffer_pg; 689490cb412SJustin Chen dma_addr_t dma; 690490cb412SJustin Chen void *p; 691490cb412SJustin Chen u32 reg; 692490cb412SJustin Chen int ret; 693490cb412SJustin Chen 694490cb412SJustin Chen intf->rx_buf_order = get_order(RING_BUFFER_SIZE); 695490cb412SJustin Chen buffer_pg = alloc_pages(GFP_KERNEL, intf->rx_buf_order); 696490cb412SJustin Chen 697490cb412SJustin Chen dma = dma_map_page(kdev, buffer_pg, 0, RING_BUFFER_SIZE, 698490cb412SJustin Chen DMA_FROM_DEVICE); 699490cb412SJustin Chen if (dma_mapping_error(kdev, dma)) { 700490cb412SJustin Chen __free_pages(buffer_pg, intf->rx_buf_order); 701490cb412SJustin Chen return -ENOMEM; 702490cb412SJustin Chen } 703490cb412SJustin Chen intf->rx_ring_cpu = page_to_virt(buffer_pg); 704490cb412SJustin Chen intf->rx_ring_dma = dma; 705490cb412SJustin Chen intf->rx_ring_dma_valid = intf->rx_ring_dma + RING_BUFFER_SIZE - 1; 706490cb412SJustin Chen 707490cb412SJustin Chen p = dma_alloc_coherent(kdev, DESC_RING_SIZE, &intf->rx_edpkt_dma_addr, 708490cb412SJustin Chen GFP_KERNEL); 709490cb412SJustin Chen if (!p) { 710490cb412SJustin Chen ret = -ENOMEM; 711490cb412SJustin Chen goto free_rx_ring; 712490cb412SJustin Chen } 713490cb412SJustin Chen intf->rx_edpkt_cpu = p; 714490cb412SJustin Chen 715490cb412SJustin Chen netif_napi_add(intf->ndev, &intf->rx_napi, bcmasp_rx_poll); 716490cb412SJustin Chen 717490cb412SJustin Chen intf->rx_edpkt_dma_read = intf->rx_edpkt_dma_addr; 718490cb412SJustin Chen intf->rx_edpkt_index = 0; 719490cb412SJustin Chen 720490cb412SJustin Chen /* Make sure channels are disabled */ 721490cb412SJustin Chen rx_edpkt_cfg_wl(intf, 0x0, RX_EDPKT_CFG_ENABLE); 722490cb412SJustin Chen 723490cb412SJustin Chen /* Rx SPB */ 724490cb412SJustin Chen rx_edpkt_cfg_wq(intf, intf->rx_ring_dma, RX_EDPKT_RING_BUFFER_READ); 725490cb412SJustin Chen rx_edpkt_cfg_wq(intf, intf->rx_ring_dma, RX_EDPKT_RING_BUFFER_WRITE); 726490cb412SJustin Chen rx_edpkt_cfg_wq(intf, intf->rx_ring_dma, RX_EDPKT_RING_BUFFER_BASE); 727490cb412SJustin Chen rx_edpkt_cfg_wq(intf, intf->rx_ring_dma_valid, 728490cb412SJustin Chen RX_EDPKT_RING_BUFFER_END); 729490cb412SJustin Chen rx_edpkt_cfg_wq(intf, intf->rx_ring_dma_valid, 730490cb412SJustin Chen RX_EDPKT_RING_BUFFER_VALID); 731490cb412SJustin Chen 732490cb412SJustin Chen /* EDPKT */ 733490cb412SJustin Chen rx_edpkt_cfg_wl(intf, (RX_EDPKT_CFG_CFG0_RBUF_4K << 734490cb412SJustin Chen RX_EDPKT_CFG_CFG0_DBUF_SHIFT) | 735490cb412SJustin Chen (RX_EDPKT_CFG_CFG0_64_ALN << 736490cb412SJustin Chen RX_EDPKT_CFG_CFG0_BALN_SHIFT) | 737490cb412SJustin Chen (RX_EDPKT_CFG_CFG0_EFRM_STUF), 738490cb412SJustin Chen RX_EDPKT_CFG_CFG0); 739490cb412SJustin Chen rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_WRITE); 740490cb412SJustin Chen rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_READ); 741490cb412SJustin Chen rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_BASE); 742490cb412SJustin Chen rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr + (DESC_RING_SIZE - 1), 743490cb412SJustin Chen RX_EDPKT_DMA_END); 744490cb412SJustin Chen rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr + (DESC_RING_SIZE - 1), 745490cb412SJustin Chen RX_EDPKT_DMA_VALID); 746490cb412SJustin Chen 747490cb412SJustin Chen reg = UMAC2FB_CFG_DEFAULT_EN | 748490cb412SJustin Chen ((intf->channel + 11) << UMAC2FB_CFG_CHID_SHIFT); 749490cb412SJustin Chen reg |= (0xd << UMAC2FB_CFG_OK_SEND_SHIFT); 750490cb412SJustin Chen umac2fb_wl(intf, reg, UMAC2FB_CFG); 751490cb412SJustin Chen 752490cb412SJustin Chen return 0; 753490cb412SJustin Chen 754490cb412SJustin Chen free_rx_ring: 755490cb412SJustin Chen dma_unmap_page(kdev, intf->rx_ring_dma, RING_BUFFER_SIZE, 756490cb412SJustin Chen DMA_FROM_DEVICE); 757490cb412SJustin Chen __free_pages(virt_to_page(intf->rx_ring_cpu), intf->rx_buf_order); 758490cb412SJustin Chen 759490cb412SJustin Chen return ret; 760490cb412SJustin Chen } 761490cb412SJustin Chen 762490cb412SJustin Chen static void bcmasp_reclaim_free_all_rx(struct bcmasp_intf *intf) 763490cb412SJustin Chen { 764490cb412SJustin Chen struct device *kdev = &intf->parent->pdev->dev; 765490cb412SJustin Chen 766490cb412SJustin Chen dma_free_coherent(kdev, DESC_RING_SIZE, intf->rx_edpkt_cpu, 767490cb412SJustin Chen intf->rx_edpkt_dma_addr); 768490cb412SJustin Chen dma_unmap_page(kdev, intf->rx_ring_dma, RING_BUFFER_SIZE, 769490cb412SJustin Chen DMA_FROM_DEVICE); 770490cb412SJustin Chen __free_pages(virt_to_page(intf->rx_ring_cpu), intf->rx_buf_order); 771490cb412SJustin Chen } 772490cb412SJustin Chen 773490cb412SJustin Chen static int bcmasp_init_tx(struct bcmasp_intf *intf) 774490cb412SJustin Chen { 775490cb412SJustin Chen struct device *kdev = &intf->parent->pdev->dev; 776490cb412SJustin Chen void *p; 777490cb412SJustin Chen int ret; 778490cb412SJustin Chen 779490cb412SJustin Chen p = dma_alloc_coherent(kdev, DESC_RING_SIZE, &intf->tx_spb_dma_addr, 780490cb412SJustin Chen GFP_KERNEL); 781490cb412SJustin Chen if (!p) 782490cb412SJustin Chen return -ENOMEM; 783490cb412SJustin Chen 784490cb412SJustin Chen intf->tx_spb_cpu = p; 785490cb412SJustin Chen intf->tx_spb_dma_valid = intf->tx_spb_dma_addr + DESC_RING_SIZE - 1; 786490cb412SJustin Chen intf->tx_spb_dma_read = intf->tx_spb_dma_addr; 787490cb412SJustin Chen 788490cb412SJustin Chen intf->tx_cbs = kcalloc(DESC_RING_COUNT, sizeof(struct bcmasp_tx_cb), 789490cb412SJustin Chen GFP_KERNEL); 790490cb412SJustin Chen if (!intf->tx_cbs) { 791490cb412SJustin Chen ret = -ENOMEM; 792490cb412SJustin Chen goto free_tx_spb; 793490cb412SJustin Chen } 794490cb412SJustin Chen 795490cb412SJustin Chen intf->tx_spb_index = 0; 796490cb412SJustin Chen intf->tx_spb_clean_index = 0; 797490cb412SJustin Chen 798490cb412SJustin Chen netif_napi_add_tx(intf->ndev, &intf->tx_napi, bcmasp_tx_poll); 799490cb412SJustin Chen 800490cb412SJustin Chen /* Make sure channels are disabled */ 801490cb412SJustin Chen tx_spb_ctrl_wl(intf, 0x0, TX_SPB_CTRL_ENABLE); 802490cb412SJustin Chen tx_epkt_core_wl(intf, 0x0, TX_EPKT_C_CFG_MISC); 803490cb412SJustin Chen 804490cb412SJustin Chen /* Tx SPB */ 805490cb412SJustin Chen tx_spb_ctrl_wl(intf, ((intf->channel + 8) << TX_SPB_CTRL_XF_BID_SHIFT), 806490cb412SJustin Chen TX_SPB_CTRL_XF_CTRL2); 807490cb412SJustin Chen tx_pause_ctrl_wl(intf, (1 << (intf->channel + 8)), TX_PAUSE_MAP_VECTOR); 808490cb412SJustin Chen tx_spb_top_wl(intf, 0x1e, TX_SPB_TOP_BLKOUT); 809490cb412SJustin Chen tx_spb_top_wl(intf, 0x0, TX_SPB_TOP_SPRE_BW_CTRL); 810490cb412SJustin Chen 811490cb412SJustin Chen tx_spb_dma_wq(intf, intf->tx_spb_dma_addr, TX_SPB_DMA_READ); 812490cb412SJustin Chen tx_spb_dma_wq(intf, intf->tx_spb_dma_addr, TX_SPB_DMA_BASE); 813490cb412SJustin Chen tx_spb_dma_wq(intf, intf->tx_spb_dma_valid, TX_SPB_DMA_END); 814490cb412SJustin Chen tx_spb_dma_wq(intf, intf->tx_spb_dma_valid, TX_SPB_DMA_VALID); 815490cb412SJustin Chen 816490cb412SJustin Chen return 0; 817490cb412SJustin Chen 818490cb412SJustin Chen free_tx_spb: 819490cb412SJustin Chen dma_free_coherent(kdev, DESC_RING_SIZE, intf->tx_spb_cpu, 820490cb412SJustin Chen intf->tx_spb_dma_addr); 821490cb412SJustin Chen 822490cb412SJustin Chen return ret; 823490cb412SJustin Chen } 824490cb412SJustin Chen 825490cb412SJustin Chen static void bcmasp_reclaim_free_all_tx(struct bcmasp_intf *intf) 826490cb412SJustin Chen { 827490cb412SJustin Chen struct device *kdev = &intf->parent->pdev->dev; 828490cb412SJustin Chen 829490cb412SJustin Chen /* Free descriptors */ 830490cb412SJustin Chen dma_free_coherent(kdev, DESC_RING_SIZE, intf->tx_spb_cpu, 831490cb412SJustin Chen intf->tx_spb_dma_addr); 832490cb412SJustin Chen 833490cb412SJustin Chen /* Free cbs */ 834490cb412SJustin Chen kfree(intf->tx_cbs); 835490cb412SJustin Chen } 836490cb412SJustin Chen 837490cb412SJustin Chen static void bcmasp_ephy_enable_set(struct bcmasp_intf *intf, bool enable) 838490cb412SJustin Chen { 839490cb412SJustin Chen u32 mask = RGMII_EPHY_CFG_IDDQ_BIAS | RGMII_EPHY_CFG_EXT_PWRDOWN | 840490cb412SJustin Chen RGMII_EPHY_CFG_IDDQ_GLOBAL; 841490cb412SJustin Chen u32 reg; 842490cb412SJustin Chen 843490cb412SJustin Chen reg = rgmii_rl(intf, RGMII_EPHY_CNTRL); 844490cb412SJustin Chen if (enable) { 845490cb412SJustin Chen reg &= ~RGMII_EPHY_CK25_DIS; 846490cb412SJustin Chen rgmii_wl(intf, reg, RGMII_EPHY_CNTRL); 847490cb412SJustin Chen mdelay(1); 848490cb412SJustin Chen 849490cb412SJustin Chen reg &= ~mask; 850490cb412SJustin Chen reg |= RGMII_EPHY_RESET; 851490cb412SJustin Chen rgmii_wl(intf, reg, RGMII_EPHY_CNTRL); 852490cb412SJustin Chen mdelay(1); 853490cb412SJustin Chen 854490cb412SJustin Chen reg &= ~RGMII_EPHY_RESET; 855490cb412SJustin Chen } else { 856490cb412SJustin Chen reg |= mask | RGMII_EPHY_RESET; 857490cb412SJustin Chen rgmii_wl(intf, reg, RGMII_EPHY_CNTRL); 858490cb412SJustin Chen mdelay(1); 859490cb412SJustin Chen reg |= RGMII_EPHY_CK25_DIS; 860490cb412SJustin Chen } 861490cb412SJustin Chen rgmii_wl(intf, reg, RGMII_EPHY_CNTRL); 862490cb412SJustin Chen mdelay(1); 863490cb412SJustin Chen 864490cb412SJustin Chen /* Set or clear the LED control override to avoid lighting up LEDs 865490cb412SJustin Chen * while the EPHY is powered off and drawing unnecessary current. 866490cb412SJustin Chen */ 867490cb412SJustin Chen reg = rgmii_rl(intf, RGMII_SYS_LED_CNTRL); 868490cb412SJustin Chen if (enable) 869490cb412SJustin Chen reg &= ~RGMII_SYS_LED_CNTRL_LINK_OVRD; 870490cb412SJustin Chen else 871490cb412SJustin Chen reg |= RGMII_SYS_LED_CNTRL_LINK_OVRD; 872490cb412SJustin Chen rgmii_wl(intf, reg, RGMII_SYS_LED_CNTRL); 873490cb412SJustin Chen } 874490cb412SJustin Chen 875490cb412SJustin Chen static void bcmasp_rgmii_mode_en_set(struct bcmasp_intf *intf, bool enable) 876490cb412SJustin Chen { 877490cb412SJustin Chen u32 reg; 878490cb412SJustin Chen 879490cb412SJustin Chen reg = rgmii_rl(intf, RGMII_OOB_CNTRL); 880490cb412SJustin Chen reg &= ~RGMII_OOB_DIS; 881490cb412SJustin Chen if (enable) 882490cb412SJustin Chen reg |= RGMII_MODE_EN; 883490cb412SJustin Chen else 884490cb412SJustin Chen reg &= ~RGMII_MODE_EN; 885490cb412SJustin Chen rgmii_wl(intf, reg, RGMII_OOB_CNTRL); 886490cb412SJustin Chen } 887490cb412SJustin Chen 888490cb412SJustin Chen static void bcmasp_netif_deinit(struct net_device *dev) 889490cb412SJustin Chen { 890490cb412SJustin Chen struct bcmasp_intf *intf = netdev_priv(dev); 891490cb412SJustin Chen u32 reg, timeout = 1000; 892490cb412SJustin Chen 893490cb412SJustin Chen napi_disable(&intf->tx_napi); 894490cb412SJustin Chen 895490cb412SJustin Chen bcmasp_enable_tx(intf, 0); 896490cb412SJustin Chen 897490cb412SJustin Chen /* Flush any TX packets in the pipe */ 898490cb412SJustin Chen tx_spb_dma_wl(intf, TX_SPB_DMA_FIFO_FLUSH, TX_SPB_DMA_FIFO_CTRL); 899490cb412SJustin Chen do { 900490cb412SJustin Chen reg = tx_spb_dma_rl(intf, TX_SPB_DMA_FIFO_STATUS); 901490cb412SJustin Chen if (!(reg & TX_SPB_DMA_FIFO_FLUSH)) 902490cb412SJustin Chen break; 903490cb412SJustin Chen usleep_range(1000, 2000); 904490cb412SJustin Chen } while (timeout-- > 0); 905490cb412SJustin Chen tx_spb_dma_wl(intf, 0x0, TX_SPB_DMA_FIFO_CTRL); 906490cb412SJustin Chen 907490cb412SJustin Chen umac_enable_set(intf, UMC_CMD_TX_EN, 0); 908490cb412SJustin Chen 909490cb412SJustin Chen phy_stop(dev->phydev); 910490cb412SJustin Chen 911490cb412SJustin Chen umac_enable_set(intf, UMC_CMD_RX_EN, 0); 912490cb412SJustin Chen 913490cb412SJustin Chen bcmasp_flush_rx_port(intf); 914490cb412SJustin Chen usleep_range(1000, 2000); 915490cb412SJustin Chen bcmasp_enable_rx(intf, 0); 916490cb412SJustin Chen 917490cb412SJustin Chen napi_disable(&intf->rx_napi); 918490cb412SJustin Chen 919490cb412SJustin Chen /* Disable interrupts */ 920490cb412SJustin Chen bcmasp_enable_tx_irq(intf, 0); 921490cb412SJustin Chen bcmasp_enable_rx_irq(intf, 0); 922490cb412SJustin Chen 923490cb412SJustin Chen netif_napi_del(&intf->tx_napi); 924490cb412SJustin Chen bcmasp_reclaim_free_all_tx(intf); 925490cb412SJustin Chen 926490cb412SJustin Chen netif_napi_del(&intf->rx_napi); 927490cb412SJustin Chen bcmasp_reclaim_free_all_rx(intf); 928490cb412SJustin Chen } 929490cb412SJustin Chen 930490cb412SJustin Chen static int bcmasp_stop(struct net_device *dev) 931490cb412SJustin Chen { 932490cb412SJustin Chen struct bcmasp_intf *intf = netdev_priv(dev); 933490cb412SJustin Chen 934490cb412SJustin Chen netif_dbg(intf, ifdown, dev, "bcmasp stop\n"); 935490cb412SJustin Chen 936490cb412SJustin Chen /* Stop tx from updating HW */ 937490cb412SJustin Chen netif_tx_disable(dev); 938490cb412SJustin Chen 939490cb412SJustin Chen bcmasp_netif_deinit(dev); 940490cb412SJustin Chen 941490cb412SJustin Chen phy_disconnect(dev->phydev); 942490cb412SJustin Chen 943490cb412SJustin Chen /* Disable internal EPHY or external PHY */ 944490cb412SJustin Chen if (intf->internal_phy) 945490cb412SJustin Chen bcmasp_ephy_enable_set(intf, false); 946490cb412SJustin Chen else 947490cb412SJustin Chen bcmasp_rgmii_mode_en_set(intf, false); 948490cb412SJustin Chen 949490cb412SJustin Chen /* Disable the interface clocks */ 950490cb412SJustin Chen bcmasp_core_clock_set_intf(intf, false); 951490cb412SJustin Chen 952490cb412SJustin Chen clk_disable_unprepare(intf->parent->clk); 953490cb412SJustin Chen 954490cb412SJustin Chen return 0; 955490cb412SJustin Chen } 956490cb412SJustin Chen 957490cb412SJustin Chen static void bcmasp_configure_port(struct bcmasp_intf *intf) 958490cb412SJustin Chen { 959490cb412SJustin Chen u32 reg, id_mode_dis = 0; 960490cb412SJustin Chen 961490cb412SJustin Chen reg = rgmii_rl(intf, RGMII_PORT_CNTRL); 962490cb412SJustin Chen reg &= ~RGMII_PORT_MODE_MASK; 963490cb412SJustin Chen 964490cb412SJustin Chen switch (intf->phy_interface) { 965490cb412SJustin Chen case PHY_INTERFACE_MODE_RGMII: 966490cb412SJustin Chen /* RGMII_NO_ID: TXC transitions at the same time as TXD 967490cb412SJustin Chen * (requires PCB or receiver-side delay) 968490cb412SJustin Chen * RGMII: Add 2ns delay on TXC (90 degree shift) 969490cb412SJustin Chen * 970490cb412SJustin Chen * ID is implicitly disabled for 100Mbps (RG)MII operation. 971490cb412SJustin Chen */ 972490cb412SJustin Chen id_mode_dis = RGMII_ID_MODE_DIS; 973490cb412SJustin Chen fallthrough; 974490cb412SJustin Chen case PHY_INTERFACE_MODE_RGMII_TXID: 975490cb412SJustin Chen reg |= RGMII_PORT_MODE_EXT_GPHY; 976490cb412SJustin Chen break; 977490cb412SJustin Chen case PHY_INTERFACE_MODE_MII: 978490cb412SJustin Chen reg |= RGMII_PORT_MODE_EXT_EPHY; 979490cb412SJustin Chen break; 980490cb412SJustin Chen default: 981490cb412SJustin Chen break; 982490cb412SJustin Chen } 983490cb412SJustin Chen 984490cb412SJustin Chen if (intf->internal_phy) 985490cb412SJustin Chen reg |= RGMII_PORT_MODE_EPHY; 986490cb412SJustin Chen 987490cb412SJustin Chen rgmii_wl(intf, reg, RGMII_PORT_CNTRL); 988490cb412SJustin Chen 989490cb412SJustin Chen reg = rgmii_rl(intf, RGMII_OOB_CNTRL); 990490cb412SJustin Chen reg &= ~RGMII_ID_MODE_DIS; 991490cb412SJustin Chen reg |= id_mode_dis; 992490cb412SJustin Chen rgmii_wl(intf, reg, RGMII_OOB_CNTRL); 993490cb412SJustin Chen } 994490cb412SJustin Chen 995490cb412SJustin Chen static int bcmasp_netif_init(struct net_device *dev, bool phy_connect) 996490cb412SJustin Chen { 997490cb412SJustin Chen struct bcmasp_intf *intf = netdev_priv(dev); 998490cb412SJustin Chen phy_interface_t phy_iface = intf->phy_interface; 999490cb412SJustin Chen u32 phy_flags = PHY_BRCM_AUTO_PWRDWN_ENABLE | 1000490cb412SJustin Chen PHY_BRCM_DIS_TXCRXC_NOENRGY | 1001490cb412SJustin Chen PHY_BRCM_IDDQ_SUSPEND; 1002490cb412SJustin Chen struct phy_device *phydev = NULL; 1003490cb412SJustin Chen int ret; 1004490cb412SJustin Chen 1005490cb412SJustin Chen /* Always enable interface clocks */ 1006490cb412SJustin Chen bcmasp_core_clock_set_intf(intf, true); 1007490cb412SJustin Chen 1008490cb412SJustin Chen /* Enable internal PHY or external PHY before any MAC activity */ 1009490cb412SJustin Chen if (intf->internal_phy) 1010490cb412SJustin Chen bcmasp_ephy_enable_set(intf, true); 1011490cb412SJustin Chen else 1012490cb412SJustin Chen bcmasp_rgmii_mode_en_set(intf, true); 1013490cb412SJustin Chen bcmasp_configure_port(intf); 1014490cb412SJustin Chen 1015490cb412SJustin Chen /* This is an ugly quirk but we have not been correctly 1016490cb412SJustin Chen * interpreting the phy_interface values and we have done that 1017490cb412SJustin Chen * across different drivers, so at least we are consistent in 1018490cb412SJustin Chen * our mistakes. 1019490cb412SJustin Chen * 1020490cb412SJustin Chen * When the Generic PHY driver is in use either the PHY has 1021490cb412SJustin Chen * been strapped or programmed correctly by the boot loader so 1022490cb412SJustin Chen * we should stick to our incorrect interpretation since we 1023490cb412SJustin Chen * have validated it. 1024490cb412SJustin Chen * 1025490cb412SJustin Chen * Now when a dedicated PHY driver is in use, we need to 1026490cb412SJustin Chen * reverse the meaning of the phy_interface_mode values to 1027490cb412SJustin Chen * something that the PHY driver will interpret and act on such 1028490cb412SJustin Chen * that we have two mistakes canceling themselves so to speak. 1029490cb412SJustin Chen * We only do this for the two modes that GENET driver 1030490cb412SJustin Chen * officially supports on Broadcom STB chips: 1031490cb412SJustin Chen * PHY_INTERFACE_MODE_RGMII and PHY_INTERFACE_MODE_RGMII_TXID. 1032490cb412SJustin Chen * Other modes are not *officially* supported with the boot 1033490cb412SJustin Chen * loader and the scripted environment generating Device Tree 1034490cb412SJustin Chen * blobs for those platforms. 1035490cb412SJustin Chen * 1036490cb412SJustin Chen * Note that internal PHY and fixed-link configurations are not 1037490cb412SJustin Chen * affected because they use different phy_interface_t values 1038490cb412SJustin Chen * or the Generic PHY driver. 1039490cb412SJustin Chen */ 1040490cb412SJustin Chen switch (phy_iface) { 1041490cb412SJustin Chen case PHY_INTERFACE_MODE_RGMII: 1042490cb412SJustin Chen phy_iface = PHY_INTERFACE_MODE_RGMII_ID; 1043490cb412SJustin Chen break; 1044490cb412SJustin Chen case PHY_INTERFACE_MODE_RGMII_TXID: 1045490cb412SJustin Chen phy_iface = PHY_INTERFACE_MODE_RGMII_RXID; 1046490cb412SJustin Chen break; 1047490cb412SJustin Chen default: 1048490cb412SJustin Chen break; 1049490cb412SJustin Chen } 1050490cb412SJustin Chen 1051490cb412SJustin Chen if (phy_connect) { 1052490cb412SJustin Chen phydev = of_phy_connect(dev, intf->phy_dn, 1053490cb412SJustin Chen bcmasp_adj_link, phy_flags, 1054490cb412SJustin Chen phy_iface); 1055490cb412SJustin Chen if (!phydev) { 1056490cb412SJustin Chen ret = -ENODEV; 1057490cb412SJustin Chen netdev_err(dev, "could not attach to PHY\n"); 1058490cb412SJustin Chen goto err_phy_disable; 1059490cb412SJustin Chen } 1060ae24a16aSFlorian Fainelli 1061ae24a16aSFlorian Fainelli /* Indicate that the MAC is responsible for PHY PM */ 1062ae24a16aSFlorian Fainelli phydev->mac_managed_pm = true; 1063a2f07512SJustin Chen } else if (!intf->wolopts) { 1064490cb412SJustin Chen ret = phy_resume(dev->phydev); 1065490cb412SJustin Chen if (ret) 1066490cb412SJustin Chen goto err_phy_disable; 1067490cb412SJustin Chen } 1068490cb412SJustin Chen 1069490cb412SJustin Chen umac_reset(intf); 1070490cb412SJustin Chen 1071490cb412SJustin Chen umac_init(intf); 1072490cb412SJustin Chen 1073490cb412SJustin Chen umac_set_hw_addr(intf, dev->dev_addr); 1074490cb412SJustin Chen 1075490cb412SJustin Chen intf->old_duplex = -1; 1076490cb412SJustin Chen intf->old_link = -1; 1077490cb412SJustin Chen intf->old_pause = -1; 1078490cb412SJustin Chen 1079490cb412SJustin Chen ret = bcmasp_init_tx(intf); 1080490cb412SJustin Chen if (ret) 1081490cb412SJustin Chen goto err_phy_disconnect; 1082490cb412SJustin Chen 1083490cb412SJustin Chen /* Turn on asp */ 1084490cb412SJustin Chen bcmasp_enable_tx(intf, 1); 1085490cb412SJustin Chen 1086490cb412SJustin Chen ret = bcmasp_init_rx(intf); 1087490cb412SJustin Chen if (ret) 1088490cb412SJustin Chen goto err_reclaim_tx; 1089490cb412SJustin Chen 1090490cb412SJustin Chen bcmasp_enable_rx(intf, 1); 1091490cb412SJustin Chen 1092490cb412SJustin Chen intf->crc_fwd = !!(umac_rl(intf, UMC_CMD) & UMC_CMD_CRC_FWD); 1093490cb412SJustin Chen 1094490cb412SJustin Chen bcmasp_netif_start(dev); 1095490cb412SJustin Chen 1096490cb412SJustin Chen netif_start_queue(dev); 1097490cb412SJustin Chen 1098490cb412SJustin Chen return 0; 1099490cb412SJustin Chen 1100490cb412SJustin Chen err_reclaim_tx: 1101490cb412SJustin Chen bcmasp_reclaim_free_all_tx(intf); 1102490cb412SJustin Chen err_phy_disconnect: 1103490cb412SJustin Chen if (phydev) 1104490cb412SJustin Chen phy_disconnect(phydev); 1105490cb412SJustin Chen err_phy_disable: 1106490cb412SJustin Chen if (intf->internal_phy) 1107490cb412SJustin Chen bcmasp_ephy_enable_set(intf, false); 1108490cb412SJustin Chen else 1109490cb412SJustin Chen bcmasp_rgmii_mode_en_set(intf, false); 1110490cb412SJustin Chen return ret; 1111490cb412SJustin Chen } 1112490cb412SJustin Chen 1113490cb412SJustin Chen static int bcmasp_open(struct net_device *dev) 1114490cb412SJustin Chen { 1115490cb412SJustin Chen struct bcmasp_intf *intf = netdev_priv(dev); 1116490cb412SJustin Chen int ret; 1117490cb412SJustin Chen 1118490cb412SJustin Chen netif_dbg(intf, ifup, dev, "bcmasp open\n"); 1119490cb412SJustin Chen 1120490cb412SJustin Chen ret = clk_prepare_enable(intf->parent->clk); 1121490cb412SJustin Chen if (ret) 1122490cb412SJustin Chen return ret; 1123490cb412SJustin Chen 1124490cb412SJustin Chen ret = bcmasp_netif_init(dev, true); 1125490cb412SJustin Chen if (ret) 1126490cb412SJustin Chen clk_disable_unprepare(intf->parent->clk); 1127490cb412SJustin Chen 1128490cb412SJustin Chen return ret; 1129490cb412SJustin Chen } 1130490cb412SJustin Chen 1131490cb412SJustin Chen static void bcmasp_tx_timeout(struct net_device *dev, unsigned int txqueue) 1132490cb412SJustin Chen { 1133490cb412SJustin Chen struct bcmasp_intf *intf = netdev_priv(dev); 1134490cb412SJustin Chen 1135490cb412SJustin Chen netif_dbg(intf, tx_err, dev, "transmit timeout!\n"); 11367c10691eSJustin Chen intf->mib.tx_timeout_cnt++; 1137490cb412SJustin Chen } 1138490cb412SJustin Chen 1139490cb412SJustin Chen static int bcmasp_get_phys_port_name(struct net_device *dev, 1140490cb412SJustin Chen char *name, size_t len) 1141490cb412SJustin Chen { 1142490cb412SJustin Chen struct bcmasp_intf *intf = netdev_priv(dev); 1143490cb412SJustin Chen 1144490cb412SJustin Chen if (snprintf(name, len, "p%d", intf->port) >= len) 1145490cb412SJustin Chen return -EINVAL; 1146490cb412SJustin Chen 1147490cb412SJustin Chen return 0; 1148490cb412SJustin Chen } 1149490cb412SJustin Chen 1150490cb412SJustin Chen static void bcmasp_get_stats64(struct net_device *dev, 1151490cb412SJustin Chen struct rtnl_link_stats64 *stats) 1152490cb412SJustin Chen { 1153490cb412SJustin Chen struct bcmasp_intf *intf = netdev_priv(dev); 1154490cb412SJustin Chen struct bcmasp_intf_stats64 *lstats; 1155490cb412SJustin Chen unsigned int start; 1156490cb412SJustin Chen 1157490cb412SJustin Chen lstats = &intf->stats64; 1158490cb412SJustin Chen 1159490cb412SJustin Chen do { 1160490cb412SJustin Chen start = u64_stats_fetch_begin(&lstats->syncp); 1161490cb412SJustin Chen stats->rx_packets = u64_stats_read(&lstats->rx_packets); 1162490cb412SJustin Chen stats->rx_bytes = u64_stats_read(&lstats->rx_bytes); 1163490cb412SJustin Chen stats->rx_dropped = u64_stats_read(&lstats->rx_dropped); 1164490cb412SJustin Chen stats->rx_crc_errors = u64_stats_read(&lstats->rx_crc_errs); 1165490cb412SJustin Chen stats->rx_frame_errors = u64_stats_read(&lstats->rx_sym_errs); 1166490cb412SJustin Chen stats->rx_errors = stats->rx_crc_errors + stats->rx_frame_errors; 1167490cb412SJustin Chen 1168490cb412SJustin Chen stats->tx_packets = u64_stats_read(&lstats->tx_packets); 1169490cb412SJustin Chen stats->tx_bytes = u64_stats_read(&lstats->tx_bytes); 1170490cb412SJustin Chen } while (u64_stats_fetch_retry(&lstats->syncp, start)); 1171490cb412SJustin Chen } 1172490cb412SJustin Chen 1173490cb412SJustin Chen static const struct net_device_ops bcmasp_netdev_ops = { 1174490cb412SJustin Chen .ndo_open = bcmasp_open, 1175490cb412SJustin Chen .ndo_stop = bcmasp_stop, 1176490cb412SJustin Chen .ndo_start_xmit = bcmasp_xmit, 1177490cb412SJustin Chen .ndo_tx_timeout = bcmasp_tx_timeout, 1178490cb412SJustin Chen .ndo_set_rx_mode = bcmasp_set_rx_mode, 1179490cb412SJustin Chen .ndo_get_phys_port_name = bcmasp_get_phys_port_name, 1180490cb412SJustin Chen .ndo_eth_ioctl = phy_do_ioctl_running, 1181490cb412SJustin Chen .ndo_set_mac_address = eth_mac_addr, 1182490cb412SJustin Chen .ndo_get_stats64 = bcmasp_get_stats64, 1183490cb412SJustin Chen }; 1184490cb412SJustin Chen 1185490cb412SJustin Chen static void bcmasp_map_res(struct bcmasp_priv *priv, struct bcmasp_intf *intf) 1186490cb412SJustin Chen { 1187490cb412SJustin Chen /* Per port */ 1188490cb412SJustin Chen intf->res.umac = priv->base + UMC_OFFSET(intf); 1189490cb412SJustin Chen intf->res.umac2fb = priv->base + (priv->hw_info->umac2fb + 1190490cb412SJustin Chen (intf->port * 0x4)); 1191490cb412SJustin Chen intf->res.rgmii = priv->base + RGMII_OFFSET(intf); 1192490cb412SJustin Chen 1193490cb412SJustin Chen /* Per ch */ 1194490cb412SJustin Chen intf->tx_spb_dma = priv->base + TX_SPB_DMA_OFFSET(intf); 1195490cb412SJustin Chen intf->res.tx_spb_ctrl = priv->base + TX_SPB_CTRL_OFFSET(intf); 1196490cb412SJustin Chen intf->res.tx_spb_top = priv->base + TX_SPB_TOP_OFFSET(intf); 1197490cb412SJustin Chen intf->res.tx_epkt_core = priv->base + TX_EPKT_C_OFFSET(intf); 1198490cb412SJustin Chen intf->res.tx_pause_ctrl = priv->base + TX_PAUSE_CTRL_OFFSET(intf); 1199490cb412SJustin Chen 1200490cb412SJustin Chen intf->rx_edpkt_dma = priv->base + RX_EDPKT_DMA_OFFSET(intf); 1201490cb412SJustin Chen intf->rx_edpkt_cfg = priv->base + RX_EDPKT_CFG_OFFSET(intf); 1202490cb412SJustin Chen } 1203490cb412SJustin Chen 1204490cb412SJustin Chen #define MAX_IRQ_STR_LEN 64 1205490cb412SJustin Chen struct bcmasp_intf *bcmasp_interface_create(struct bcmasp_priv *priv, 1206490cb412SJustin Chen struct device_node *ndev_dn, int i) 1207490cb412SJustin Chen { 1208490cb412SJustin Chen struct device *dev = &priv->pdev->dev; 1209490cb412SJustin Chen struct bcmasp_intf *intf; 1210490cb412SJustin Chen struct net_device *ndev; 1211490cb412SJustin Chen int ch, port, ret; 1212490cb412SJustin Chen 1213490cb412SJustin Chen if (of_property_read_u32(ndev_dn, "reg", &port)) { 1214490cb412SJustin Chen dev_warn(dev, "%s: invalid port number\n", ndev_dn->name); 1215490cb412SJustin Chen goto err; 1216490cb412SJustin Chen } 1217490cb412SJustin Chen 1218490cb412SJustin Chen if (of_property_read_u32(ndev_dn, "brcm,channel", &ch)) { 1219490cb412SJustin Chen dev_warn(dev, "%s: invalid ch number\n", ndev_dn->name); 1220490cb412SJustin Chen goto err; 1221490cb412SJustin Chen } 1222490cb412SJustin Chen 1223490cb412SJustin Chen ndev = alloc_etherdev(sizeof(struct bcmasp_intf)); 1224490cb412SJustin Chen if (!ndev) { 1225490cb412SJustin Chen dev_warn(dev, "%s: unable to alloc ndev\n", ndev_dn->name); 1226490cb412SJustin Chen goto err; 1227490cb412SJustin Chen } 1228490cb412SJustin Chen intf = netdev_priv(ndev); 1229490cb412SJustin Chen 1230490cb412SJustin Chen intf->parent = priv; 1231490cb412SJustin Chen intf->ndev = ndev; 1232490cb412SJustin Chen intf->channel = ch; 1233490cb412SJustin Chen intf->port = port; 1234490cb412SJustin Chen intf->ndev_dn = ndev_dn; 1235490cb412SJustin Chen intf->index = i; 1236490cb412SJustin Chen 1237490cb412SJustin Chen ret = of_get_phy_mode(ndev_dn, &intf->phy_interface); 1238490cb412SJustin Chen if (ret < 0) { 1239490cb412SJustin Chen dev_err(dev, "invalid PHY mode property\n"); 1240490cb412SJustin Chen goto err_free_netdev; 1241490cb412SJustin Chen } 1242490cb412SJustin Chen 1243490cb412SJustin Chen if (intf->phy_interface == PHY_INTERFACE_MODE_INTERNAL) 1244490cb412SJustin Chen intf->internal_phy = true; 1245490cb412SJustin Chen 1246490cb412SJustin Chen intf->phy_dn = of_parse_phandle(ndev_dn, "phy-handle", 0); 1247490cb412SJustin Chen if (!intf->phy_dn && of_phy_is_fixed_link(ndev_dn)) { 1248490cb412SJustin Chen ret = of_phy_register_fixed_link(ndev_dn); 1249490cb412SJustin Chen if (ret) { 1250490cb412SJustin Chen dev_warn(dev, "%s: failed to register fixed PHY\n", 1251490cb412SJustin Chen ndev_dn->name); 1252490cb412SJustin Chen goto err_free_netdev; 1253490cb412SJustin Chen } 1254490cb412SJustin Chen intf->phy_dn = ndev_dn; 1255490cb412SJustin Chen } 1256490cb412SJustin Chen 1257490cb412SJustin Chen /* Map resource */ 1258490cb412SJustin Chen bcmasp_map_res(priv, intf); 1259490cb412SJustin Chen 1260490cb412SJustin Chen if ((!phy_interface_mode_is_rgmii(intf->phy_interface) && 1261490cb412SJustin Chen intf->phy_interface != PHY_INTERFACE_MODE_MII && 1262490cb412SJustin Chen intf->phy_interface != PHY_INTERFACE_MODE_INTERNAL) || 1263490cb412SJustin Chen (intf->port != 1 && intf->internal_phy)) { 1264490cb412SJustin Chen netdev_err(intf->ndev, "invalid PHY mode: %s for port %d\n", 1265490cb412SJustin Chen phy_modes(intf->phy_interface), intf->port); 1266490cb412SJustin Chen ret = -EINVAL; 1267490cb412SJustin Chen goto err_free_netdev; 1268490cb412SJustin Chen } 1269490cb412SJustin Chen 1270490cb412SJustin Chen ret = of_get_ethdev_address(ndev_dn, ndev); 1271490cb412SJustin Chen if (ret) { 1272490cb412SJustin Chen netdev_warn(ndev, "using random Ethernet MAC\n"); 1273490cb412SJustin Chen eth_hw_addr_random(ndev); 1274490cb412SJustin Chen } 1275490cb412SJustin Chen 1276490cb412SJustin Chen SET_NETDEV_DEV(ndev, dev); 1277490cb412SJustin Chen intf->ops = &bcmasp_intf_ops; 1278490cb412SJustin Chen ndev->netdev_ops = &bcmasp_netdev_ops; 1279490cb412SJustin Chen ndev->ethtool_ops = &bcmasp_ethtool_ops; 1280490cb412SJustin Chen intf->msg_enable = netif_msg_init(-1, NETIF_MSG_DRV | 1281490cb412SJustin Chen NETIF_MSG_PROBE | 1282490cb412SJustin Chen NETIF_MSG_LINK); 1283490cb412SJustin Chen ndev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | 1284490cb412SJustin Chen NETIF_F_RXCSUM; 1285490cb412SJustin Chen ndev->hw_features |= ndev->features; 1286490cb412SJustin Chen ndev->needed_headroom += sizeof(struct bcmasp_pkt_offload); 1287490cb412SJustin Chen 1288490cb412SJustin Chen return intf; 1289490cb412SJustin Chen 1290490cb412SJustin Chen err_free_netdev: 1291490cb412SJustin Chen free_netdev(ndev); 1292490cb412SJustin Chen err: 1293490cb412SJustin Chen return NULL; 1294490cb412SJustin Chen } 1295490cb412SJustin Chen 1296490cb412SJustin Chen void bcmasp_interface_destroy(struct bcmasp_intf *intf) 1297490cb412SJustin Chen { 1298490cb412SJustin Chen if (intf->ndev->reg_state == NETREG_REGISTERED) 1299490cb412SJustin Chen unregister_netdev(intf->ndev); 1300490cb412SJustin Chen if (of_phy_is_fixed_link(intf->ndev_dn)) 1301490cb412SJustin Chen of_phy_deregister_fixed_link(intf->ndev_dn); 1302490cb412SJustin Chen free_netdev(intf->ndev); 1303490cb412SJustin Chen } 1304490cb412SJustin Chen 1305a2f07512SJustin Chen static void bcmasp_suspend_to_wol(struct bcmasp_intf *intf) 1306a2f07512SJustin Chen { 1307a2f07512SJustin Chen struct net_device *ndev = intf->ndev; 1308a2f07512SJustin Chen u32 reg; 1309a2f07512SJustin Chen 1310a2f07512SJustin Chen reg = umac_rl(intf, UMC_MPD_CTRL); 1311a2f07512SJustin Chen if (intf->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) 1312a2f07512SJustin Chen reg |= UMC_MPD_CTRL_MPD_EN; 1313a2f07512SJustin Chen reg &= ~UMC_MPD_CTRL_PSW_EN; 1314a2f07512SJustin Chen if (intf->wolopts & WAKE_MAGICSECURE) { 1315a2f07512SJustin Chen /* Program the SecureOn password */ 1316a2f07512SJustin Chen umac_wl(intf, get_unaligned_be16(&intf->sopass[0]), 1317a2f07512SJustin Chen UMC_PSW_MS); 1318a2f07512SJustin Chen umac_wl(intf, get_unaligned_be32(&intf->sopass[2]), 1319a2f07512SJustin Chen UMC_PSW_LS); 1320a2f07512SJustin Chen reg |= UMC_MPD_CTRL_PSW_EN; 1321a2f07512SJustin Chen } 1322a2f07512SJustin Chen umac_wl(intf, reg, UMC_MPD_CTRL); 1323a2f07512SJustin Chen 1324c5d511c4SJustin Chen if (intf->wolopts & WAKE_FILTER) 1325c5d511c4SJustin Chen bcmasp_netfilt_suspend(intf); 1326c5d511c4SJustin Chen 1327*4bb7ad11SJustin Chen /* Bring UniMAC out of reset if needed and enable RX */ 1328*4bb7ad11SJustin Chen reg = umac_rl(intf, UMC_CMD); 1329*4bb7ad11SJustin Chen if (reg & UMC_CMD_SW_RESET) 1330*4bb7ad11SJustin Chen reg &= ~UMC_CMD_SW_RESET; 1331*4bb7ad11SJustin Chen 1332*4bb7ad11SJustin Chen reg |= UMC_CMD_RX_EN | UMC_CMD_PROMISC; 1333*4bb7ad11SJustin Chen umac_wl(intf, reg, UMC_CMD); 1334*4bb7ad11SJustin Chen 1335a2f07512SJustin Chen umac_enable_set(intf, UMC_CMD_RX_EN, 1); 1336a2f07512SJustin Chen 1337a2f07512SJustin Chen if (intf->parent->wol_irq > 0) { 1338a2f07512SJustin Chen wakeup_intr2_core_wl(intf->parent, 0xffffffff, 1339a2f07512SJustin Chen ASP_WAKEUP_INTR2_MASK_CLEAR); 1340a2f07512SJustin Chen } 1341a2f07512SJustin Chen 1342a2f07512SJustin Chen netif_dbg(intf, wol, ndev, "entered WOL mode\n"); 1343a2f07512SJustin Chen } 1344a2f07512SJustin Chen 1345490cb412SJustin Chen int bcmasp_interface_suspend(struct bcmasp_intf *intf) 1346490cb412SJustin Chen { 1347a2f07512SJustin Chen struct device *kdev = &intf->parent->pdev->dev; 1348490cb412SJustin Chen struct net_device *dev = intf->ndev; 1349490cb412SJustin Chen int ret = 0; 1350490cb412SJustin Chen 1351490cb412SJustin Chen if (!netif_running(dev)) 1352490cb412SJustin Chen return 0; 1353490cb412SJustin Chen 1354490cb412SJustin Chen netif_device_detach(dev); 1355490cb412SJustin Chen 1356490cb412SJustin Chen bcmasp_netif_deinit(dev); 1357490cb412SJustin Chen 1358a2f07512SJustin Chen if (!intf->wolopts) { 1359490cb412SJustin Chen ret = phy_suspend(dev->phydev); 1360490cb412SJustin Chen if (ret) 1361490cb412SJustin Chen goto out; 1362490cb412SJustin Chen 1363490cb412SJustin Chen if (intf->internal_phy) 1364490cb412SJustin Chen bcmasp_ephy_enable_set(intf, false); 1365490cb412SJustin Chen else 1366490cb412SJustin Chen bcmasp_rgmii_mode_en_set(intf, false); 1367490cb412SJustin Chen 1368490cb412SJustin Chen /* If Wake-on-LAN is disabled, we can safely 1369490cb412SJustin Chen * disable the network interface clocks. 1370490cb412SJustin Chen */ 1371490cb412SJustin Chen bcmasp_core_clock_set_intf(intf, false); 1372a2f07512SJustin Chen } 1373a2f07512SJustin Chen 1374a2f07512SJustin Chen if (device_may_wakeup(kdev) && intf->wolopts) 1375a2f07512SJustin Chen bcmasp_suspend_to_wol(intf); 1376490cb412SJustin Chen 1377490cb412SJustin Chen clk_disable_unprepare(intf->parent->clk); 1378490cb412SJustin Chen 1379490cb412SJustin Chen return ret; 1380490cb412SJustin Chen 1381490cb412SJustin Chen out: 1382490cb412SJustin Chen bcmasp_netif_init(dev, false); 1383490cb412SJustin Chen return ret; 1384490cb412SJustin Chen } 1385490cb412SJustin Chen 1386a2f07512SJustin Chen static void bcmasp_resume_from_wol(struct bcmasp_intf *intf) 1387a2f07512SJustin Chen { 1388a2f07512SJustin Chen u32 reg; 1389a2f07512SJustin Chen 1390a2f07512SJustin Chen reg = umac_rl(intf, UMC_MPD_CTRL); 1391a2f07512SJustin Chen reg &= ~UMC_MPD_CTRL_MPD_EN; 1392a2f07512SJustin Chen umac_wl(intf, reg, UMC_MPD_CTRL); 1393a2f07512SJustin Chen 1394a2f07512SJustin Chen if (intf->parent->wol_irq > 0) { 1395a2f07512SJustin Chen wakeup_intr2_core_wl(intf->parent, 0xffffffff, 1396a2f07512SJustin Chen ASP_WAKEUP_INTR2_MASK_SET); 1397a2f07512SJustin Chen } 1398a2f07512SJustin Chen } 1399a2f07512SJustin Chen 1400490cb412SJustin Chen int bcmasp_interface_resume(struct bcmasp_intf *intf) 1401490cb412SJustin Chen { 1402490cb412SJustin Chen struct net_device *dev = intf->ndev; 1403490cb412SJustin Chen int ret; 1404490cb412SJustin Chen 1405490cb412SJustin Chen if (!netif_running(dev)) 1406490cb412SJustin Chen return 0; 1407490cb412SJustin Chen 1408490cb412SJustin Chen ret = clk_prepare_enable(intf->parent->clk); 1409490cb412SJustin Chen if (ret) 1410490cb412SJustin Chen return ret; 1411490cb412SJustin Chen 1412490cb412SJustin Chen ret = bcmasp_netif_init(dev, false); 1413490cb412SJustin Chen if (ret) 1414490cb412SJustin Chen goto out; 1415490cb412SJustin Chen 1416a2f07512SJustin Chen bcmasp_resume_from_wol(intf); 1417a2f07512SJustin Chen 1418550e6f34SJustin Chen if (intf->eee.eee_enabled) 1419550e6f34SJustin Chen bcmasp_eee_enable_set(intf, true); 1420550e6f34SJustin Chen 1421490cb412SJustin Chen netif_device_attach(dev); 1422490cb412SJustin Chen 1423490cb412SJustin Chen return 0; 1424490cb412SJustin Chen 1425490cb412SJustin Chen out: 1426490cb412SJustin Chen clk_disable_unprepare(intf->parent->clk); 1427490cb412SJustin Chen return ret; 1428490cb412SJustin Chen } 1429