Lines Matching +full:apb3 +full:- +full:bus

9  * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
59 * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
125 #include "xgbe-common.h"
129 return pdata->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; in xgbe_get_max_frame()
138 DBGPR("-->xgbe_usec_to_riwt\n"); in xgbe_usec_to_riwt()
140 rate = pdata->sysclk_rate; in xgbe_usec_to_riwt()
150 DBGPR("<--xgbe_usec_to_riwt\n"); in xgbe_usec_to_riwt()
161 DBGPR("-->xgbe_riwt_to_usec\n"); in xgbe_riwt_to_usec()
163 rate = pdata->sysclk_rate; in xgbe_riwt_to_usec()
173 DBGPR("<--xgbe_riwt_to_usec\n"); in xgbe_riwt_to_usec()
184 pbl = pdata->pbl; in xgbe_config_pbl_val()
186 if (pdata->pbl > 32) { in xgbe_config_pbl_val()
191 for (i = 0; i < pdata->channel_count; i++) { in xgbe_config_pbl_val()
192 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, PBLX8, in xgbe_config_pbl_val()
195 if (pdata->channel[i]->tx_ring) in xgbe_config_pbl_val()
196 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, in xgbe_config_pbl_val()
199 if (pdata->channel[i]->rx_ring) in xgbe_config_pbl_val()
200 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, in xgbe_config_pbl_val()
211 for (i = 0; i < pdata->channel_count; i++) { in xgbe_config_osp_mode()
212 if (!pdata->channel[i]->tx_ring) in xgbe_config_osp_mode()
215 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, OSP, in xgbe_config_osp_mode()
216 pdata->tx_osp_mode); in xgbe_config_osp_mode()
226 for (i = 0; i < pdata->rx_q_count; i++) in xgbe_config_rsf_mode()
236 for (i = 0; i < pdata->tx_q_count; i++) in xgbe_config_tsf_mode()
247 for (i = 0; i < pdata->rx_q_count; i++) in xgbe_config_rx_threshold()
258 for (i = 0; i < pdata->tx_q_count; i++) in xgbe_config_tx_threshold()
268 for (i = 0; i < pdata->channel_count; i++) { in xgbe_config_rx_coalesce()
269 if (!pdata->channel[i]->rx_ring) in xgbe_config_rx_coalesce()
272 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RIWT, RWT, in xgbe_config_rx_coalesce()
273 pdata->rx_riwt); in xgbe_config_rx_coalesce()
288 for (i = 0; i < pdata->channel_count; i++) { in xgbe_config_rx_buffer_size()
289 if (!pdata->channel[i]->rx_ring) in xgbe_config_rx_buffer_size()
292 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, RBSZ, in xgbe_config_rx_buffer_size()
293 pdata->rx_buf_size); in xgbe_config_rx_buffer_size()
301 for (i = 0; i < pdata->channel_count; i++) { in xgbe_config_tso_mode()
302 if (!pdata->channel[i]->tx_ring) in xgbe_config_tso_mode()
305 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, TSE, 1); in xgbe_config_tso_mode()
313 for (i = 0; i < pdata->channel_count; i++) { in xgbe_config_sph_mode()
314 if (!pdata->channel[i]->rx_ring) in xgbe_config_sph_mode()
317 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, SPH, 1); in xgbe_config_sph_mode()
327 for (i = 0; i < pdata->channel_count; i++) { in xgbe_disable_sph_mode()
328 if (!pdata->channel[i]->rx_ring) in xgbe_disable_sph_mode()
331 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, SPH, 0); in xgbe_disable_sph_mode()
341 mutex_lock(&pdata->rss_mutex); in xgbe_write_rss_reg()
344 ret = -EBUSY; in xgbe_write_rss_reg()
356 while (wait--) { in xgbe_write_rss_reg()
363 ret = -EBUSY; in xgbe_write_rss_reg()
366 mutex_unlock(&pdata->rss_mutex); in xgbe_write_rss_reg()
373 unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32); in xgbe_write_rss_hash_key()
374 unsigned int *key = (unsigned int *)&pdata->rss_key; in xgbe_write_rss_hash_key()
377 while (key_regs--) { in xgbe_write_rss_hash_key()
392 for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) { in xgbe_write_rss_lookup_table()
395 pdata->rss_table[i]); in xgbe_write_rss_lookup_table()
405 memcpy(pdata->rss_key, key, sizeof(pdata->rss_key)); in xgbe_set_rss_hash_key()
415 for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) in xgbe_set_rss_lookup_table()
416 XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, table[i]); in xgbe_set_rss_lookup_table()
425 if (!pdata->hw_feat.rss) in xgbe_enable_rss()
426 return -EOPNOTSUPP; in xgbe_enable_rss()
439 XGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options); in xgbe_enable_rss()
449 if (!pdata->hw_feat.rss) in xgbe_disable_rss()
450 return -EOPNOTSUPP; in xgbe_disable_rss()
461 if (!pdata->hw_feat.rss) in xgbe_config_rss()
464 if (pdata->netdev->features & NETIF_F_RXHASH) in xgbe_config_rss()
470 netdev_err(pdata->netdev, in xgbe_config_rss()
481 if (pdata->prio2q_map[prio] != queue) in xgbe_is_pfc_queue()
485 tc = pdata->ets->prio_tc[prio]; in xgbe_is_pfc_queue()
488 if (pdata->pfc->pfc_en & (1 << tc)) in xgbe_is_pfc_queue()
498 XGMAC_IOWRITE_BITS(pdata, MAC_TIR, TNID, pdata->vxlan_port); in xgbe_set_vxlan_id()
500 netif_dbg(pdata, drv, pdata->netdev, "VXLAN tunnel id set to %hx\n", in xgbe_set_vxlan_id()
501 pdata->vxlan_port); in xgbe_set_vxlan_id()
506 if (!pdata->hw_feat.vxn) in xgbe_enable_vxlan()
512 /* Allow for IPv6/UDP zero-checksum VXLAN packets */ in xgbe_enable_vxlan()
519 netif_dbg(pdata, drv, pdata->netdev, "VXLAN acceleration enabled\n"); in xgbe_enable_vxlan()
524 if (!pdata->hw_feat.vxn) in xgbe_disable_vxlan()
530 /* Clear IPv6/UDP zero-checksum VXLAN packets setting */ in xgbe_disable_vxlan()
536 netif_dbg(pdata, drv, pdata->netdev, "VXLAN acceleration disabled\n"); in xgbe_disable_vxlan()
544 if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) >= 0x30) in xgbe_get_fc_queue_count()
547 return min_t(unsigned int, pdata->tx_q_count, max_q_count); in xgbe_get_fc_queue_count()
556 for (i = 0; i < pdata->rx_q_count; i++) in xgbe_disable_tx_flow_control()
575 struct ieee_pfc *pfc = pdata->pfc; in xgbe_enable_tx_flow_control()
576 struct ieee_ets *ets = pdata->ets; in xgbe_enable_tx_flow_control()
581 for (i = 0; i < pdata->rx_q_count; i++) { in xgbe_enable_tx_flow_control()
584 if (pdata->rx_rfd[i]) { in xgbe_enable_tx_flow_control()
596 netif_dbg(pdata, drv, pdata->netdev, in xgbe_enable_tx_flow_control()
636 struct ieee_pfc *pfc = pdata->pfc; in xgbe_config_tx_flow_control()
638 if (pdata->tx_pause || (pfc && pfc->pfc_en)) in xgbe_config_tx_flow_control()
648 struct ieee_pfc *pfc = pdata->pfc; in xgbe_config_rx_flow_control()
650 if (pdata->rx_pause || (pfc && pfc->pfc_en)) in xgbe_config_rx_flow_control()
660 struct ieee_pfc *pfc = pdata->pfc; in xgbe_config_flow_control()
666 (pfc && pfc->pfc_en) ? 1 : 0); in xgbe_config_flow_control()
675 if (pdata->channel_irq_mode) in xgbe_enable_dma_interrupts()
677 pdata->channel_irq_mode); in xgbe_enable_dma_interrupts()
679 ver = XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER); in xgbe_enable_dma_interrupts()
681 for (i = 0; i < pdata->channel_count; i++) { in xgbe_enable_dma_interrupts()
682 channel = pdata->channel[i]; in xgbe_enable_dma_interrupts()
689 channel->curr_ier = 0; in xgbe_enable_dma_interrupts()
692 * NIE - Normal Interrupt Summary Enable in xgbe_enable_dma_interrupts()
693 * AIE - Abnormal Interrupt Summary Enable in xgbe_enable_dma_interrupts()
694 * FBEE - Fatal Bus Error Enable in xgbe_enable_dma_interrupts()
697 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, NIE20, 1); in xgbe_enable_dma_interrupts()
698 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, AIE20, 1); in xgbe_enable_dma_interrupts()
700 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, NIE, 1); in xgbe_enable_dma_interrupts()
701 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, AIE, 1); in xgbe_enable_dma_interrupts()
703 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 1); in xgbe_enable_dma_interrupts()
705 if (channel->tx_ring) { in xgbe_enable_dma_interrupts()
707 * TIE - Transmit Interrupt Enable (unless using in xgbe_enable_dma_interrupts()
711 if (!pdata->per_channel_irq || pdata->channel_irq_mode) in xgbe_enable_dma_interrupts()
712 XGMAC_SET_BITS(channel->curr_ier, in xgbe_enable_dma_interrupts()
715 if (channel->rx_ring) { in xgbe_enable_dma_interrupts()
717 * RBUE - Receive Buffer Unavailable Enable in xgbe_enable_dma_interrupts()
718 * RIE - Receive Interrupt Enable (unless using in xgbe_enable_dma_interrupts()
722 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 1); in xgbe_enable_dma_interrupts()
723 if (!pdata->per_channel_irq || pdata->channel_irq_mode) in xgbe_enable_dma_interrupts()
724 XGMAC_SET_BITS(channel->curr_ier, in xgbe_enable_dma_interrupts()
728 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier); in xgbe_enable_dma_interrupts()
737 q_count = max(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt); in xgbe_enable_mtl_interrupts()
769 if (!pdata->vdata->ecc_support) in xgbe_enable_ecc_interrupts()
842 return -EINVAL; in xgbe_set_speed()
859 /* Check only C-TAG (0x8100) packets */ in xgbe_enable_rx_vlan_stripping()
862 /* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */ in xgbe_enable_rx_vlan_stripping()
889 /* Only filter on the lower 12-bits of the VLAN tag */ in xgbe_enable_rx_vlan_filtering()
943 for_each_set_bit(vid, pdata->active_vlans, VLAN_N_VID) { in xgbe_update_vlan_hash_table()
965 netif_dbg(pdata, drv, pdata->netdev, "%s promiscuous mode\n", in xgbe_set_promiscuous_mode()
973 if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER) in xgbe_set_promiscuous_mode()
988 netif_dbg(pdata, drv, pdata->netdev, "%s allmulti mode\n", in xgbe_set_all_multicast_mode()
1006 mac_addr[0] = ha->addr[0]; in xgbe_set_mac_reg()
1007 mac_addr[1] = ha->addr[1]; in xgbe_set_mac_reg()
1008 mac_addr[2] = ha->addr[2]; in xgbe_set_mac_reg()
1009 mac_addr[3] = ha->addr[3]; in xgbe_set_mac_reg()
1011 mac_addr[0] = ha->addr[4]; in xgbe_set_mac_reg()
1012 mac_addr[1] = ha->addr[5]; in xgbe_set_mac_reg()
1014 netif_dbg(pdata, drv, pdata->netdev, in xgbe_set_mac_reg()
1016 ha->addr, *mac_reg); in xgbe_set_mac_reg()
1029 struct net_device *netdev = pdata->netdev; in xgbe_set_mac_addn_addrs()
1035 addn_macs = pdata->hw_feat.addn_mac; in xgbe_set_mac_addn_addrs()
1042 addn_macs--; in xgbe_set_mac_addn_addrs()
1050 addn_macs--; in xgbe_set_mac_addn_addrs()
1056 while (addn_macs--) in xgbe_set_mac_addn_addrs()
1062 struct net_device *netdev = pdata->netdev; in xgbe_set_mac_hash_table()
1070 hash_table_shift = 26 - (pdata->hw_feat.hash_table_size >> 7); in xgbe_set_mac_hash_table()
1071 hash_table_count = pdata->hw_feat.hash_table_size / 32; in xgbe_set_mac_hash_table()
1076 crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN)); in xgbe_set_mac_hash_table()
1082 crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN)); in xgbe_set_mac_hash_table()
1097 if (pdata->hw_feat.hash_table_size) in xgbe_add_mac_addresses()
1121 struct net_device *netdev = pdata->netdev; in xgbe_config_rx_mode()
1124 pr_mode = ((netdev->flags & IFF_PROMISC) != 0); in xgbe_config_rx_mode()
1125 am_mode = ((netdev->flags & IFF_ALLMULTI) != 0); in xgbe_config_rx_mode()
1140 return -EINVAL; in xgbe_clr_gpio()
1155 return -EINVAL; in xgbe_set_gpio()
1175 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); in xgbe_read_mmd_regs_v2()
1182 * The mmio interface is based on 16-bit offsets and values. All in xgbe_read_mmd_regs_v2()
1187 index = mmd_address & ~pdata->xpcs_window_mask; in xgbe_read_mmd_regs_v2()
1188 offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask); in xgbe_read_mmd_regs_v2()
1190 spin_lock_irqsave(&pdata->xpcs_lock, flags); in xgbe_read_mmd_regs_v2()
1191 XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index); in xgbe_read_mmd_regs_v2()
1193 spin_unlock_irqrestore(&pdata->xpcs_lock, flags); in xgbe_read_mmd_regs_v2()
1207 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); in xgbe_write_mmd_regs_v2()
1214 * The mmio interface is based on 16-bit offsets and values. All in xgbe_write_mmd_regs_v2()
1219 index = mmd_address & ~pdata->xpcs_window_mask; in xgbe_write_mmd_regs_v2()
1220 offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask); in xgbe_write_mmd_regs_v2()
1222 spin_lock_irqsave(&pdata->xpcs_lock, flags); in xgbe_write_mmd_regs_v2()
1223 XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index); in xgbe_write_mmd_regs_v2()
1225 spin_unlock_irqrestore(&pdata->xpcs_lock, flags); in xgbe_write_mmd_regs_v2()
1238 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); in xgbe_read_mmd_regs_v1()
1240 /* The PCS registers are accessed using mmio. The underlying APB3 in xgbe_read_mmd_regs_v1()
1245 * The mmio interface is based on 32-bit offsets and values. All in xgbe_read_mmd_regs_v1()
1249 spin_lock_irqsave(&pdata->xpcs_lock, flags); in xgbe_read_mmd_regs_v1()
1252 spin_unlock_irqrestore(&pdata->xpcs_lock, flags); in xgbe_read_mmd_regs_v1()
1266 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); in xgbe_write_mmd_regs_v1()
1268 /* The PCS registers are accessed using mmio. The underlying APB3 in xgbe_write_mmd_regs_v1()
1273 * The mmio interface is based on 32-bit offsets and values. All in xgbe_write_mmd_regs_v1()
1277 spin_lock_irqsave(&pdata->xpcs_lock, flags); in xgbe_write_mmd_regs_v1()
1280 spin_unlock_irqrestore(&pdata->xpcs_lock, flags); in xgbe_write_mmd_regs_v1()
1286 switch (pdata->vdata->xpcs_access) { in xgbe_read_mmd_regs()
1299 switch (pdata->vdata->xpcs_access) { in xgbe_write_mmd_regs()
1337 reinit_completion(&pdata->mdio_complete); in xgbe_write_ext_mii_regs()
1347 if (!wait_for_completion_timeout(&pdata->mdio_complete, HZ)) { in xgbe_write_ext_mii_regs()
1348 netdev_err(pdata->netdev, "mdio write operation timed out\n"); in xgbe_write_ext_mii_regs()
1349 return -ETIMEDOUT; in xgbe_write_ext_mii_regs()
1380 reinit_completion(&pdata->mdio_complete); in xgbe_read_ext_mii_regs()
1389 if (!wait_for_completion_timeout(&pdata->mdio_complete, HZ)) { in xgbe_read_ext_mii_regs()
1390 netdev_err(pdata->netdev, "mdio read operation timed out\n"); in xgbe_read_ext_mii_regs()
1391 return -ETIMEDOUT; in xgbe_read_ext_mii_regs()
1425 return -EINVAL; in xgbe_set_ext_mii_mode()
1431 return -EINVAL; in xgbe_set_ext_mii_mode()
1441 return !XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN); in xgbe_tx_complete()
1460 struct xgbe_ring_desc *rdesc = rdata->rdesc; in xgbe_tx_desc_reset()
1468 rdesc->desc0 = 0; in xgbe_tx_desc_reset()
1469 rdesc->desc1 = 0; in xgbe_tx_desc_reset()
1470 rdesc->desc2 = 0; in xgbe_tx_desc_reset()
1471 rdesc->desc3 = 0; in xgbe_tx_desc_reset()
1479 struct xgbe_ring *ring = channel->tx_ring; in xgbe_tx_desc_init()
1482 int start_index = ring->cur; in xgbe_tx_desc_init()
1484 DBGPR("-->tx_desc_init\n"); in xgbe_tx_desc_init()
1487 for (i = 0; i < ring->rdesc_count; i++) { in xgbe_tx_desc_init()
1495 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDRLR, ring->rdesc_count - 1); in xgbe_tx_desc_init()
1500 upper_32_bits(rdata->rdesc_dma)); in xgbe_tx_desc_init()
1502 lower_32_bits(rdata->rdesc_dma)); in xgbe_tx_desc_init()
1504 DBGPR("<--tx_desc_init\n"); in xgbe_tx_desc_init()
1510 struct xgbe_ring_desc *rdesc = rdata->rdesc; in xgbe_rx_desc_reset()
1511 unsigned int rx_usecs = pdata->rx_usecs; in xgbe_rx_desc_reset()
1512 unsigned int rx_frames = pdata->rx_frames; in xgbe_rx_desc_reset()
1534 hdr_dma = rdata->rx.hdr.dma_base + rdata->rx.hdr.dma_off; in xgbe_rx_desc_reset()
1535 buf_dma = rdata->rx.buf.dma_base + rdata->rx.buf.dma_off; in xgbe_rx_desc_reset()
1536 rdesc->desc0 = cpu_to_le32(lower_32_bits(hdr_dma)); in xgbe_rx_desc_reset()
1537 rdesc->desc1 = cpu_to_le32(upper_32_bits(hdr_dma)); in xgbe_rx_desc_reset()
1538 rdesc->desc2 = cpu_to_le32(lower_32_bits(buf_dma)); in xgbe_rx_desc_reset()
1539 rdesc->desc3 = cpu_to_le32(upper_32_bits(buf_dma)); in xgbe_rx_desc_reset()
1541 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, inte); in xgbe_rx_desc_reset()
1549 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1); in xgbe_rx_desc_reset()
1557 struct xgbe_prv_data *pdata = channel->pdata; in xgbe_rx_desc_init()
1558 struct xgbe_ring *ring = channel->rx_ring; in xgbe_rx_desc_init()
1560 unsigned int start_index = ring->cur; in xgbe_rx_desc_init()
1563 DBGPR("-->rx_desc_init\n"); in xgbe_rx_desc_init()
1566 for (i = 0; i < ring->rdesc_count; i++) { in xgbe_rx_desc_init()
1574 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDRLR, ring->rdesc_count - 1); in xgbe_rx_desc_init()
1579 upper_32_bits(rdata->rdesc_dma)); in xgbe_rx_desc_init()
1581 lower_32_bits(rdata->rdesc_dma)); in xgbe_rx_desc_init()
1584 rdata = XGBE_GET_DESC_DATA(ring, start_index + ring->rdesc_count - 1); in xgbe_rx_desc_init()
1586 lower_32_bits(rdata->rdesc_dma)); in xgbe_rx_desc_init()
1588 DBGPR("<--rx_desc_init\n"); in xgbe_rx_desc_init()
1601 while (--count && XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSADDREG)) in xgbe_update_tstamp_addend()
1605 netdev_err(pdata->netdev, in xgbe_update_tstamp_addend()
1620 while (--count && XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSINIT)) in xgbe_set_tstamp_time()
1624 netdev_err(pdata->netdev, "timed out initializing timestamp\n"); in xgbe_set_tstamp_time()
1643 if (pdata->vdata->tx_tstamp_workaround) { in xgbe_get_tx_tstamp()
1666 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSA) && in xgbe_get_rx_tstamp()
1667 !XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSD)) { in xgbe_get_rx_tstamp()
1668 nsec = le32_to_cpu(rdesc->desc1); in xgbe_get_rx_tstamp()
1670 nsec |= le32_to_cpu(rdesc->desc0); in xgbe_get_rx_tstamp()
1672 packet->rx_tstamp = nsec; in xgbe_get_rx_tstamp()
1673 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, in xgbe_get_rx_tstamp()
1682 /* Set one nano-second accuracy */ in xgbe_config_tstamp()
1700 xgbe_update_tstamp_addend(pdata, pdata->tstamp_addend); in xgbe_config_tstamp()
1704 timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc, in xgbe_config_tstamp()
1713 struct xgbe_prv_data *pdata = channel->pdata; in xgbe_tx_start_xmit()
1721 rdata = XGBE_GET_DESC_DATA(ring, ring->cur); in xgbe_tx_start_xmit()
1723 lower_32_bits(rdata->rdesc_dma)); in xgbe_tx_start_xmit()
1726 if (pdata->tx_usecs && !channel->tx_timer_active) { in xgbe_tx_start_xmit()
1727 channel->tx_timer_active = 1; in xgbe_tx_start_xmit()
1728 mod_timer(&channel->tx_timer, in xgbe_tx_start_xmit()
1729 jiffies + usecs_to_jiffies(pdata->tx_usecs)); in xgbe_tx_start_xmit()
1732 ring->tx.xmit_more = 0; in xgbe_tx_start_xmit()
1737 struct xgbe_prv_data *pdata = channel->pdata; in xgbe_dev_xmit()
1738 struct xgbe_ring *ring = channel->tx_ring; in xgbe_dev_xmit()
1741 struct xgbe_packet_data *packet = &ring->packet_data; in xgbe_dev_xmit()
1746 int start_index = ring->cur; in xgbe_dev_xmit()
1747 int cur_index = ring->cur; in xgbe_dev_xmit()
1750 DBGPR("-->xgbe_dev_xmit\n"); in xgbe_dev_xmit()
1752 tx_packets = packet->tx_packets; in xgbe_dev_xmit()
1753 tx_bytes = packet->tx_bytes; in xgbe_dev_xmit()
1755 csum = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, in xgbe_dev_xmit()
1757 tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, in xgbe_dev_xmit()
1759 vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, in xgbe_dev_xmit()
1761 vxlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, in xgbe_dev_xmit()
1764 if (tso && (packet->mss != ring->tx.cur_mss)) in xgbe_dev_xmit()
1769 if (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag)) in xgbe_dev_xmit()
1776 * - Tx frame count exceeds the frame count setting in xgbe_dev_xmit()
1777 * - Addition of Tx frame count to the frame count since the in xgbe_dev_xmit()
1780 * - No frame count setting specified (ethtool -C ethX tx-frames 0) in xgbe_dev_xmit()
1781 * - Addition of Tx frame count to the frame count since the in xgbe_dev_xmit()
1784 ring->coalesce_count += tx_packets; in xgbe_dev_xmit()
1785 if (!pdata->tx_frames) in xgbe_dev_xmit()
1787 else if (tx_packets > pdata->tx_frames) in xgbe_dev_xmit()
1789 else if ((ring->coalesce_count % pdata->tx_frames) < tx_packets) in xgbe_dev_xmit()
1795 rdesc = rdata->rdesc; in xgbe_dev_xmit()
1800 netif_dbg(pdata, tx_queued, pdata->netdev, in xgbe_dev_xmit()
1802 packet->mss); in xgbe_dev_xmit()
1805 XGMAC_SET_BITS_LE(rdesc->desc2, TX_CONTEXT_DESC2, in xgbe_dev_xmit()
1806 MSS, packet->mss); in xgbe_dev_xmit()
1809 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, in xgbe_dev_xmit()
1813 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, in xgbe_dev_xmit()
1816 ring->tx.cur_mss = packet->mss; in xgbe_dev_xmit()
1820 netif_dbg(pdata, tx_queued, pdata->netdev, in xgbe_dev_xmit()
1822 packet->vlan_ctag); in xgbe_dev_xmit()
1825 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, in xgbe_dev_xmit()
1829 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, in xgbe_dev_xmit()
1830 VT, packet->vlan_ctag); in xgbe_dev_xmit()
1833 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, in xgbe_dev_xmit()
1836 ring->tx.cur_vlan_ctag = packet->vlan_ctag; in xgbe_dev_xmit()
1841 rdesc = rdata->rdesc; in xgbe_dev_xmit()
1845 rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma)); in xgbe_dev_xmit()
1846 rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma)); in xgbe_dev_xmit()
1849 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L, in xgbe_dev_xmit()
1850 rdata->skb_dma_len); in xgbe_dev_xmit()
1854 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, VTIR, in xgbe_dev_xmit()
1858 if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP)) in xgbe_dev_xmit()
1859 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, TTSE, 1); in xgbe_dev_xmit()
1862 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FD, 1); in xgbe_dev_xmit()
1865 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0); in xgbe_dev_xmit()
1869 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1); in xgbe_dev_xmit()
1873 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TSE, 1); in xgbe_dev_xmit()
1874 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPPL, in xgbe_dev_xmit()
1875 packet->tcp_payload_len); in xgbe_dev_xmit()
1876 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPHDRLEN, in xgbe_dev_xmit()
1877 packet->tcp_header_len / 4); in xgbe_dev_xmit()
1879 pdata->ext_stats.tx_tso_packets += tx_packets; in xgbe_dev_xmit()
1882 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CPC, 0); in xgbe_dev_xmit()
1886 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, in xgbe_dev_xmit()
1890 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FL, in xgbe_dev_xmit()
1891 packet->length); in xgbe_dev_xmit()
1895 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, VNP, in xgbe_dev_xmit()
1898 pdata->ext_stats.tx_vxlan_packets += packet->tx_packets; in xgbe_dev_xmit()
1901 for (i = cur_index - start_index + 1; i < packet->rdesc_count; i++) { in xgbe_dev_xmit()
1904 rdesc = rdata->rdesc; in xgbe_dev_xmit()
1907 rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma)); in xgbe_dev_xmit()
1908 rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma)); in xgbe_dev_xmit()
1911 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L, in xgbe_dev_xmit()
1912 rdata->skb_dma_len); in xgbe_dev_xmit()
1915 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1); in xgbe_dev_xmit()
1918 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0); in xgbe_dev_xmit()
1922 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, in xgbe_dev_xmit()
1927 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD, 1); in xgbe_dev_xmit()
1931 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1); in xgbe_dev_xmit()
1934 rdata->tx.packets = tx_packets; in xgbe_dev_xmit()
1935 rdata->tx.bytes = tx_bytes; in xgbe_dev_xmit()
1937 pdata->ext_stats.txq_packets[channel->queue_index] += tx_packets; in xgbe_dev_xmit()
1938 pdata->ext_stats.txq_bytes[channel->queue_index] += tx_bytes; in xgbe_dev_xmit()
1948 rdesc = rdata->rdesc; in xgbe_dev_xmit()
1949 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1); in xgbe_dev_xmit()
1953 packet->rdesc_count, 1); in xgbe_dev_xmit()
1958 ring->cur = cur_index + 1; in xgbe_dev_xmit()
1960 netif_xmit_stopped(netdev_get_tx_queue(pdata->netdev, in xgbe_dev_xmit()
1961 channel->queue_index))) in xgbe_dev_xmit()
1964 ring->tx.xmit_more = 1; in xgbe_dev_xmit()
1967 channel->name, start_index & (ring->rdesc_count - 1), in xgbe_dev_xmit()
1968 (ring->cur - 1) & (ring->rdesc_count - 1)); in xgbe_dev_xmit()
1970 DBGPR("<--xgbe_dev_xmit\n"); in xgbe_dev_xmit()
1975 struct xgbe_prv_data *pdata = channel->pdata; in xgbe_dev_read()
1976 struct xgbe_ring *ring = channel->rx_ring; in xgbe_dev_read()
1979 struct xgbe_packet_data *packet = &ring->packet_data; in xgbe_dev_read()
1980 struct net_device *netdev = pdata->netdev; in xgbe_dev_read()
1983 DBGPR("-->xgbe_dev_read: cur = %d\n", ring->cur); in xgbe_dev_read()
1985 rdata = XGBE_GET_DESC_DATA(ring, ring->cur); in xgbe_dev_read()
1986 rdesc = rdata->rdesc; in xgbe_dev_read()
1989 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN)) in xgbe_dev_read()
1996 xgbe_dump_rx_desc(pdata, ring, ring->cur); in xgbe_dev_read()
1998 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CTXT)) { in xgbe_dev_read()
2002 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, in xgbe_dev_read()
2004 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, in xgbe_dev_read()
2010 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CONTEXT, 0); in xgbe_dev_read()
2013 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CDA)) in xgbe_dev_read()
2014 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, in xgbe_dev_read()
2018 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD)) { in xgbe_dev_read()
2019 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, in xgbe_dev_read()
2021 rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2, in xgbe_dev_read()
2023 if (rdata->rx.hdr_len) in xgbe_dev_read()
2024 pdata->ext_stats.rx_split_header_packets++; in xgbe_dev_read()
2026 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, in xgbe_dev_read()
2031 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, RSV)) { in xgbe_dev_read()
2032 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, in xgbe_dev_read()
2035 packet->rss_hash = le32_to_cpu(rdesc->desc1); in xgbe_dev_read()
2037 l34t = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, L34T); in xgbe_dev_read()
2043 packet->rss_hash_type = PKT_HASH_TYPE_L4; in xgbe_dev_read()
2046 packet->rss_hash_type = PKT_HASH_TYPE_L3; in xgbe_dev_read()
2051 if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD)) in xgbe_dev_read()
2055 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, in xgbe_dev_read()
2059 rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL); in xgbe_dev_read()
2062 if (netdev->features & NETIF_F_RXCSUM) { in xgbe_dev_read()
2063 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, in xgbe_dev_read()
2065 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, in xgbe_dev_read()
2070 if (XGMAC_GET_BITS_LE(rdesc->desc2, RX_NORMAL_DESC2, TNP)) { in xgbe_dev_read()
2071 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, in xgbe_dev_read()
2073 pdata->ext_stats.rx_vxlan_packets++; in xgbe_dev_read()
2075 l34t = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, L34T); in xgbe_dev_read()
2079 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, in xgbe_dev_read()
2086 err = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ES); in xgbe_dev_read()
2087 etlt = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ETLT); in xgbe_dev_read()
2093 (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) { in xgbe_dev_read()
2094 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, in xgbe_dev_read()
2096 packet->vlan_ctag = XGMAC_GET_BITS_LE(rdesc->desc0, in xgbe_dev_read()
2099 netif_dbg(pdata, rx_status, netdev, "vlan-ctag=%#06x\n", in xgbe_dev_read()
2100 packet->vlan_ctag); in xgbe_dev_read()
2103 unsigned int tnp = XGMAC_GET_BITS(packet->attributes, in xgbe_dev_read()
2107 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, in xgbe_dev_read()
2109 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, in xgbe_dev_read()
2111 pdata->ext_stats.rx_csum_errors++; in xgbe_dev_read()
2113 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, in xgbe_dev_read()
2115 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, in xgbe_dev_read()
2117 pdata->ext_stats.rx_vxlan_csum_errors++; in xgbe_dev_read()
2119 XGMAC_SET_BITS(packet->errors, RX_PACKET_ERRORS, in xgbe_dev_read()
2124 pdata->ext_stats.rxq_packets[channel->queue_index]++; in xgbe_dev_read()
2125 pdata->ext_stats.rxq_bytes[channel->queue_index] += rdata->rx.len; in xgbe_dev_read()
2127 DBGPR("<--xgbe_dev_read: %s - descriptor=%u (cur=%d)\n", channel->name, in xgbe_dev_read()
2128 ring->cur & (ring->rdesc_count - 1), ring->cur); in xgbe_dev_read()
2136 return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT); in xgbe_is_context_desc()
2142 return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD); in xgbe_is_last_desc()
2150 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 1); in xgbe_enable_int()
2153 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TXSE, 1); in xgbe_enable_int()
2156 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TBUE, 1); in xgbe_enable_int()
2159 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 1); in xgbe_enable_int()
2162 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 1); in xgbe_enable_int()
2165 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RSE, 1); in xgbe_enable_int()
2168 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 1); in xgbe_enable_int()
2169 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 1); in xgbe_enable_int()
2172 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 1); in xgbe_enable_int()
2175 channel->curr_ier |= channel->saved_ier; in xgbe_enable_int()
2178 return -1; in xgbe_enable_int()
2181 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier); in xgbe_enable_int()
2191 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 0); in xgbe_disable_int()
2194 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TXSE, 0); in xgbe_disable_int()
2197 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TBUE, 0); in xgbe_disable_int()
2200 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 0); in xgbe_disable_int()
2203 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 0); in xgbe_disable_int()
2206 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RSE, 0); in xgbe_disable_int()
2209 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 0); in xgbe_disable_int()
2210 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 0); in xgbe_disable_int()
2213 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 0); in xgbe_disable_int()
2216 channel->saved_ier = channel->curr_ier; in xgbe_disable_int()
2217 channel->curr_ier = 0; in xgbe_disable_int()
2220 return -1; in xgbe_disable_int()
2223 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier); in xgbe_disable_int()
2232 DBGPR("-->xgbe_exit\n"); in __xgbe_exit()
2239 while (--count && XGMAC_IOREAD_BITS(pdata, DMA_MR, SWR)) in __xgbe_exit()
2243 return -EBUSY; in __xgbe_exit()
2245 DBGPR("<--xgbe_exit\n"); in __xgbe_exit()
2268 if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) < 0x21) in xgbe_flush_tx_queues()
2271 for (i = 0; i < pdata->tx_q_count; i++) in xgbe_flush_tx_queues()
2275 for (i = 0; i < pdata->tx_q_count; i++) { in xgbe_flush_tx_queues()
2277 while (--count && XGMAC_MTL_IOREAD_BITS(pdata, i, in xgbe_flush_tx_queues()
2282 return -EBUSY; in xgbe_flush_tx_queues()
2297 /* Set the System Bus mode */ in xgbe_config_dma_bus()
2299 XGMAC_SET_BITS(sbmr, DMA_SBMR, BLEN, pdata->blen >> 2); in xgbe_config_dma_bus()
2300 XGMAC_SET_BITS(sbmr, DMA_SBMR, AAL, pdata->aal); in xgbe_config_dma_bus()
2301 XGMAC_SET_BITS(sbmr, DMA_SBMR, RD_OSR_LMT, pdata->rd_osr_limit - 1); in xgbe_config_dma_bus()
2302 XGMAC_SET_BITS(sbmr, DMA_SBMR, WR_OSR_LMT, pdata->wr_osr_limit - 1); in xgbe_config_dma_bus()
2307 if (pdata->vdata->tx_desc_prefetch) in xgbe_config_dma_bus()
2309 pdata->vdata->tx_desc_prefetch); in xgbe_config_dma_bus()
2311 if (pdata->vdata->rx_desc_prefetch) in xgbe_config_dma_bus()
2313 pdata->vdata->rx_desc_prefetch); in xgbe_config_dma_bus()
2318 XGMAC_IOWRITE(pdata, DMA_AXIARCR, pdata->arcr); in xgbe_config_dma_cache()
2319 XGMAC_IOWRITE(pdata, DMA_AXIAWCR, pdata->awcr); in xgbe_config_dma_cache()
2320 if (pdata->awarcr) in xgbe_config_dma_cache()
2321 XGMAC_IOWRITE(pdata, DMA_AXIAWARCR, pdata->awarcr); in xgbe_config_dma_cache()
2332 for (i = 0; i < pdata->hw_feat.tc_cnt; i++) { in xgbe_config_mtl_mode()
2351 if (pdata->pfcq[queue] && (q_fifo_size > pdata->pfc_rfa)) { in xgbe_queue_flow_control_threshold()
2353 rfa = pdata->pfc_rfa; in xgbe_queue_flow_control_threshold()
2358 rfa = XGMAC_FLOW_CONTROL_MAX - XGMAC_FLOW_CONTROL_UNIT; in xgbe_queue_flow_control_threshold()
2367 pdata->rx_rfa[queue] = 0; in xgbe_queue_flow_control_threshold()
2368 pdata->rx_rfd[queue] = 0; in xgbe_queue_flow_control_threshold()
2374 pdata->rx_rfa[queue] = 0; /* Full - 1024 bytes */ in xgbe_queue_flow_control_threshold()
2375 pdata->rx_rfd[queue] = 1; /* Full - 1536 bytes */ in xgbe_queue_flow_control_threshold()
2380 /* Between 4096 and max-frame */ in xgbe_queue_flow_control_threshold()
2381 pdata->rx_rfa[queue] = 2; /* Full - 2048 bytes */ in xgbe_queue_flow_control_threshold()
2382 pdata->rx_rfd[queue] = 5; /* Full - 3584 bytes */ in xgbe_queue_flow_control_threshold()
2387 /* Between max-frame and 3 max-frames, in xgbe_queue_flow_control_threshold()
2391 rfa = q_fifo_size - frame_fifo_size; in xgbe_queue_flow_control_threshold()
2394 /* Above 3 max-frames - trigger when just over in xgbe_queue_flow_control_threshold()
2403 pdata->rx_rfa[queue] = XGMAC_FLOW_CONTROL_VALUE(rfa); in xgbe_queue_flow_control_threshold()
2404 pdata->rx_rfd[queue] = XGMAC_FLOW_CONTROL_VALUE(rfd); in xgbe_queue_flow_control_threshold()
2413 for (i = 0; i < pdata->rx_q_count; i++) { in xgbe_calculate_flow_control_threshold()
2424 for (i = 0; i < pdata->rx_q_count; i++) { in xgbe_config_flow_control_threshold()
2426 pdata->rx_rfa[i]); in xgbe_config_flow_control_threshold()
2428 pdata->rx_rfd[i]); in xgbe_config_flow_control_threshold()
2435 return min_t(unsigned int, pdata->tx_max_fifo_size, in xgbe_get_tx_fifo_size()
2436 pdata->hw_feat.tx_fifo_size); in xgbe_get_tx_fifo_size()
2442 return min_t(unsigned int, pdata->rx_max_fifo_size, in xgbe_get_rx_fifo_size()
2443 pdata->hw_feat.rx_fifo_size); in xgbe_get_rx_fifo_size()
2462 p_fifo--; in xgbe_calculate_equal_fifo()
2485 fifo[i] = (XGMAC_FIFO_MIN_ALLOC / XGMAC_FIFO_UNIT) - 1; in xgbe_set_nonprio_fifos()
2486 fifo_size -= XGMAC_FIFO_MIN_ALLOC; in xgbe_set_nonprio_fifos()
2497 if (pdata->pfc->delay) in xgbe_get_pfc_delay()
2498 return pdata->pfc->delay / 8; in xgbe_get_pfc_delay()
2521 if (!pdata->pfc->pfc_en) in xgbe_get_pfc_queues()
2525 prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count); in xgbe_get_pfc_queues()
2530 pdata->pfcq[i] = 1; in xgbe_get_pfc_queues()
2547 prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count); in xgbe_calculate_dcb_fifo()
2557 rem_fifo = fifo_size - (q_fifo_size * prio_queues); in xgbe_calculate_dcb_fifo()
2562 pdata->pfc_rfa = xgbe_get_pfc_delay(pdata); in xgbe_calculate_dcb_fifo()
2563 pdata->pfc_rfa = XGMAC_FLOW_CONTROL_ALIGN(pdata->pfc_rfa); in xgbe_calculate_dcb_fifo()
2565 if (pdata->pfc_rfa > q_fifo_size) { in xgbe_calculate_dcb_fifo()
2566 addn_fifo = pdata->pfc_rfa - q_fifo_size; in xgbe_calculate_dcb_fifo()
2573 * - distribute remaining fifo between the VLAN priority in xgbe_calculate_dcb_fifo()
2579 i--; in xgbe_calculate_dcb_fifo()
2581 fifo[i] = (q_fifo_size / XGMAC_FIFO_UNIT) - 1; in xgbe_calculate_dcb_fifo()
2583 if (!pdata->pfcq[i] || !addn_fifo) in xgbe_calculate_dcb_fifo()
2587 netdev_warn(pdata->netdev, in xgbe_calculate_dcb_fifo()
2596 rem_fifo -= addn_fifo; in xgbe_calculate_dcb_fifo()
2616 xgbe_calculate_equal_fifo(fifo_size, pdata->tx_q_count, fifo); in xgbe_config_tx_fifo_size()
2618 for (i = 0; i < pdata->tx_q_count; i++) in xgbe_config_tx_fifo_size()
2621 netif_info(pdata, drv, pdata->netdev, in xgbe_config_tx_fifo_size()
2623 pdata->tx_q_count, ((fifo[0] + 1) * XGMAC_FIFO_UNIT)); in xgbe_config_tx_fifo_size()
2634 memset(pdata->pfcq, 0, sizeof(pdata->pfcq)); in xgbe_config_rx_fifo_size()
2635 pdata->pfc_rfa = 0; in xgbe_config_rx_fifo_size()
2638 prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count); in xgbe_config_rx_fifo_size()
2640 /* Assign a minimum fifo to the non-VLAN priority queues */ in xgbe_config_rx_fifo_size()
2641 fifo_size = xgbe_set_nonprio_fifos(fifo_size, pdata->rx_q_count, fifo); in xgbe_config_rx_fifo_size()
2643 if (pdata->pfc && pdata->ets) in xgbe_config_rx_fifo_size()
2648 for (i = 0; i < pdata->rx_q_count; i++) in xgbe_config_rx_fifo_size()
2654 if (pdata->pfc && pdata->ets && pdata->pfc->pfc_en) { in xgbe_config_rx_fifo_size()
2655 netif_info(pdata, drv, pdata->netdev, in xgbe_config_rx_fifo_size()
2656 "%u Rx hardware queues\n", pdata->rx_q_count); in xgbe_config_rx_fifo_size()
2657 for (i = 0; i < pdata->rx_q_count; i++) in xgbe_config_rx_fifo_size()
2658 netif_info(pdata, drv, pdata->netdev, in xgbe_config_rx_fifo_size()
2662 netif_info(pdata, drv, pdata->netdev, in xgbe_config_rx_fifo_size()
2664 pdata->rx_q_count, in xgbe_config_rx_fifo_size()
2680 qptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt; in xgbe_config_queue_mapping()
2681 qptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt; in xgbe_config_queue_mapping()
2683 for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) { in xgbe_config_queue_mapping()
2685 netif_dbg(pdata, drv, pdata->netdev, in xgbe_config_queue_mapping()
2689 pdata->q2tc_map[queue++] = i; in xgbe_config_queue_mapping()
2693 netif_dbg(pdata, drv, pdata->netdev, in xgbe_config_queue_mapping()
2697 pdata->q2tc_map[queue++] = i; in xgbe_config_queue_mapping()
2702 prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count); in xgbe_config_queue_mapping()
2711 netif_dbg(pdata, drv, pdata->netdev, in xgbe_config_queue_mapping()
2714 pdata->prio2q_map[prio++] = i; in xgbe_config_queue_mapping()
2718 netif_dbg(pdata, drv, pdata->netdev, in xgbe_config_queue_mapping()
2721 pdata->prio2q_map[prio++] = i; in xgbe_config_queue_mapping()
2737 for (i = 0; i < pdata->rx_q_count;) { in xgbe_config_queue_mapping()
2740 if ((i % MTL_RQDCM_Q_PER_REG) && (i != pdata->rx_q_count)) in xgbe_config_queue_mapping()
2755 netdev_reset_tc(pdata->netdev); in xgbe_config_tc()
2756 if (!pdata->num_tcs) in xgbe_config_tc()
2759 netdev_set_num_tc(pdata->netdev, pdata->num_tcs); in xgbe_config_tc()
2761 for (i = 0, queue = 0, offset = 0; i < pdata->num_tcs; i++) { in xgbe_config_tc()
2762 while ((queue < pdata->tx_q_count) && in xgbe_config_tc()
2763 (pdata->q2tc_map[queue] == i)) in xgbe_config_tc()
2766 netif_dbg(pdata, drv, pdata->netdev, "TC%u using TXq%u-%u\n", in xgbe_config_tc()
2767 i, offset, queue - 1); in xgbe_config_tc()
2768 netdev_set_tc_queue(pdata->netdev, i, queue - offset, offset); in xgbe_config_tc()
2772 if (!pdata->ets) in xgbe_config_tc()
2776 netdev_set_prio_tc_map(pdata->netdev, prio, in xgbe_config_tc()
2777 pdata->ets->prio_tc[prio]); in xgbe_config_tc()
2782 struct ieee_ets *ets = pdata->ets; in xgbe_config_dcb_tc()
2796 total_weight = pdata->netdev->mtu * pdata->hw_feat.tc_cnt; in xgbe_config_dcb_tc()
2801 for (i = 0; i < pdata->hw_feat.tc_cnt; i++) { in xgbe_config_dcb_tc()
2805 if (ets->prio_tc[prio] == i) in xgbe_config_dcb_tc()
2810 netif_dbg(pdata, drv, pdata->netdev, "TC%u PRIO mask=%#x\n", in xgbe_config_dcb_tc()
2821 switch (ets->tc_tsa[i]) { in xgbe_config_dcb_tc()
2823 netif_dbg(pdata, drv, pdata->netdev, in xgbe_config_dcb_tc()
2829 weight = total_weight * ets->tc_tx_bw[i] / 100; in xgbe_config_dcb_tc()
2832 netif_dbg(pdata, drv, pdata->netdev, in xgbe_config_dcb_tc()
2847 if (!test_bit(XGBE_DOWN, &pdata->dev_state)) { in xgbe_config_dcb_pfc()
2849 netif_tx_stop_all_queues(pdata->netdev); in xgbe_config_dcb_pfc()
2852 pdata->hw_if.disable_rx(pdata); in xgbe_config_dcb_pfc()
2858 if (!test_bit(XGBE_DOWN, &pdata->dev_state)) { in xgbe_config_dcb_pfc()
2860 pdata->hw_if.enable_rx(pdata); in xgbe_config_dcb_pfc()
2863 netif_tx_start_all_queues(pdata->netdev); in xgbe_config_dcb_pfc()
2869 xgbe_set_mac_address(pdata, pdata->netdev->dev_addr); in xgbe_config_mac_address()
2872 if (pdata->hw_feat.hash_table_size) { in xgbe_config_mac_address()
2883 val = (pdata->netdev->mtu > XGMAC_STD_PACKET_MTU) ? 1 : 0; in xgbe_config_jumbo_enable()
2890 xgbe_set_speed(pdata, pdata->phy_speed); in xgbe_config_mac_speed()
2895 if (pdata->netdev->features & NETIF_F_RXCSUM) in xgbe_config_checksum_offload()
2910 if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER) in xgbe_config_vlan_support()
2915 if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) in xgbe_config_vlan_support()
2926 if (pdata->vdata->mmc_64bit) { in xgbe_mmc_read()
2965 struct xgbe_mmc_stats *stats = &pdata->mmc_stats; in xgbe_tx_mmc_int()
2969 stats->txoctetcount_gb += in xgbe_tx_mmc_int()
2973 stats->txframecount_gb += in xgbe_tx_mmc_int()
2977 stats->txbroadcastframes_g += in xgbe_tx_mmc_int()
2981 stats->txmulticastframes_g += in xgbe_tx_mmc_int()
2985 stats->tx64octets_gb += in xgbe_tx_mmc_int()
2989 stats->tx65to127octets_gb += in xgbe_tx_mmc_int()
2993 stats->tx128to255octets_gb += in xgbe_tx_mmc_int()
2997 stats->tx256to511octets_gb += in xgbe_tx_mmc_int()
3001 stats->tx512to1023octets_gb += in xgbe_tx_mmc_int()
3005 stats->tx1024tomaxoctets_gb += in xgbe_tx_mmc_int()
3009 stats->txunicastframes_gb += in xgbe_tx_mmc_int()
3013 stats->txmulticastframes_gb += in xgbe_tx_mmc_int()
3017 stats->txbroadcastframes_g += in xgbe_tx_mmc_int()
3021 stats->txunderflowerror += in xgbe_tx_mmc_int()
3025 stats->txoctetcount_g += in xgbe_tx_mmc_int()
3029 stats->txframecount_g += in xgbe_tx_mmc_int()
3033 stats->txpauseframes += in xgbe_tx_mmc_int()
3037 stats->txvlanframes_g += in xgbe_tx_mmc_int()
3043 struct xgbe_mmc_stats *stats = &pdata->mmc_stats; in xgbe_rx_mmc_int()
3047 stats->rxframecount_gb += in xgbe_rx_mmc_int()
3051 stats->rxoctetcount_gb += in xgbe_rx_mmc_int()
3055 stats->rxoctetcount_g += in xgbe_rx_mmc_int()
3059 stats->rxbroadcastframes_g += in xgbe_rx_mmc_int()
3063 stats->rxmulticastframes_g += in xgbe_rx_mmc_int()
3067 stats->rxcrcerror += in xgbe_rx_mmc_int()
3071 stats->rxrunterror += in xgbe_rx_mmc_int()
3075 stats->rxjabbererror += in xgbe_rx_mmc_int()
3079 stats->rxundersize_g += in xgbe_rx_mmc_int()
3083 stats->rxoversize_g += in xgbe_rx_mmc_int()
3087 stats->rx64octets_gb += in xgbe_rx_mmc_int()
3091 stats->rx65to127octets_gb += in xgbe_rx_mmc_int()
3095 stats->rx128to255octets_gb += in xgbe_rx_mmc_int()
3099 stats->rx256to511octets_gb += in xgbe_rx_mmc_int()
3103 stats->rx512to1023octets_gb += in xgbe_rx_mmc_int()
3107 stats->rx1024tomaxoctets_gb += in xgbe_rx_mmc_int()
3111 stats->rxunicastframes_g += in xgbe_rx_mmc_int()
3115 stats->rxlengtherror += in xgbe_rx_mmc_int()
3119 stats->rxoutofrangetype += in xgbe_rx_mmc_int()
3123 stats->rxpauseframes += in xgbe_rx_mmc_int()
3127 stats->rxfifooverflow += in xgbe_rx_mmc_int()
3131 stats->rxvlanframes_gb += in xgbe_rx_mmc_int()
3135 stats->rxwatchdogerror += in xgbe_rx_mmc_int()
3141 struct xgbe_mmc_stats *stats = &pdata->mmc_stats; in xgbe_read_mmc_stats()
3146 stats->txoctetcount_gb += in xgbe_read_mmc_stats()
3149 stats->txframecount_gb += in xgbe_read_mmc_stats()
3152 stats->txbroadcastframes_g += in xgbe_read_mmc_stats()
3155 stats->txmulticastframes_g += in xgbe_read_mmc_stats()
3158 stats->tx64octets_gb += in xgbe_read_mmc_stats()
3161 stats->tx65to127octets_gb += in xgbe_read_mmc_stats()
3164 stats->tx128to255octets_gb += in xgbe_read_mmc_stats()
3167 stats->tx256to511octets_gb += in xgbe_read_mmc_stats()
3170 stats->tx512to1023octets_gb += in xgbe_read_mmc_stats()
3173 stats->tx1024tomaxoctets_gb += in xgbe_read_mmc_stats()
3176 stats->txunicastframes_gb += in xgbe_read_mmc_stats()
3179 stats->txmulticastframes_gb += in xgbe_read_mmc_stats()
3182 stats->txbroadcastframes_g += in xgbe_read_mmc_stats()
3185 stats->txunderflowerror += in xgbe_read_mmc_stats()
3188 stats->txoctetcount_g += in xgbe_read_mmc_stats()
3191 stats->txframecount_g += in xgbe_read_mmc_stats()
3194 stats->txpauseframes += in xgbe_read_mmc_stats()
3197 stats->txvlanframes_g += in xgbe_read_mmc_stats()
3200 stats->rxframecount_gb += in xgbe_read_mmc_stats()
3203 stats->rxoctetcount_gb += in xgbe_read_mmc_stats()
3206 stats->rxoctetcount_g += in xgbe_read_mmc_stats()
3209 stats->rxbroadcastframes_g += in xgbe_read_mmc_stats()
3212 stats->rxmulticastframes_g += in xgbe_read_mmc_stats()
3215 stats->rxcrcerror += in xgbe_read_mmc_stats()
3218 stats->rxrunterror += in xgbe_read_mmc_stats()
3221 stats->rxjabbererror += in xgbe_read_mmc_stats()
3224 stats->rxundersize_g += in xgbe_read_mmc_stats()
3227 stats->rxoversize_g += in xgbe_read_mmc_stats()
3230 stats->rx64octets_gb += in xgbe_read_mmc_stats()
3233 stats->rx65to127octets_gb += in xgbe_read_mmc_stats()
3236 stats->rx128to255octets_gb += in xgbe_read_mmc_stats()
3239 stats->rx256to511octets_gb += in xgbe_read_mmc_stats()
3242 stats->rx512to1023octets_gb += in xgbe_read_mmc_stats()
3245 stats->rx1024tomaxoctets_gb += in xgbe_read_mmc_stats()
3248 stats->rxunicastframes_g += in xgbe_read_mmc_stats()
3251 stats->rxlengtherror += in xgbe_read_mmc_stats()
3254 stats->rxoutofrangetype += in xgbe_read_mmc_stats()
3257 stats->rxpauseframes += in xgbe_read_mmc_stats()
3260 stats->rxfifooverflow += in xgbe_read_mmc_stats()
3263 stats->rxvlanframes_gb += in xgbe_read_mmc_stats()
3266 stats->rxwatchdogerror += in xgbe_read_mmc_stats()
3269 /* Un-freeze counters */ in xgbe_read_mmc_stats()
3303 netdev_info(pdata->netdev, in xgbe_txq_prepare_tx_stop()
3315 if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) > 0x20) in xgbe_prepare_tx_stop()
3323 tx_qidx = queue - DMA_DSRX_FIRST_QUEUE; in xgbe_prepare_tx_stop()
3346 netdev_info(pdata->netdev, in xgbe_prepare_tx_stop()
3356 for (i = 0; i < pdata->channel_count; i++) { in xgbe_enable_tx()
3357 if (!pdata->channel[i]->tx_ring) in xgbe_enable_tx()
3360 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 1); in xgbe_enable_tx()
3364 for (i = 0; i < pdata->tx_q_count; i++) in xgbe_enable_tx()
3377 for (i = 0; i < pdata->tx_q_count; i++) in xgbe_disable_tx()
3384 for (i = 0; i < pdata->tx_q_count; i++) in xgbe_disable_tx()
3388 for (i = 0; i < pdata->channel_count; i++) { in xgbe_disable_tx()
3389 if (!pdata->channel[i]->tx_ring) in xgbe_disable_tx()
3392 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 0); in xgbe_disable_tx()
3417 netdev_info(pdata->netdev, in xgbe_prepare_rx_stop()
3427 for (i = 0; i < pdata->channel_count; i++) { in xgbe_enable_rx()
3428 if (!pdata->channel[i]->rx_ring) in xgbe_enable_rx()
3431 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 1); in xgbe_enable_rx()
3436 for (i = 0; i < pdata->rx_q_count; i++) in xgbe_enable_rx()
3458 for (i = 0; i < pdata->rx_q_count; i++) in xgbe_disable_rx()
3465 for (i = 0; i < pdata->channel_count; i++) { in xgbe_disable_rx()
3466 if (!pdata->channel[i]->rx_ring) in xgbe_disable_rx()
3469 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 0); in xgbe_disable_rx()
3478 for (i = 0; i < pdata->channel_count; i++) { in xgbe_powerup_tx()
3479 if (!pdata->channel[i]->tx_ring) in xgbe_powerup_tx()
3482 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 1); in xgbe_powerup_tx()
3494 for (i = 0; i < pdata->tx_q_count; i++) in xgbe_powerdown_tx()
3501 for (i = 0; i < pdata->channel_count; i++) { in xgbe_powerdown_tx()
3502 if (!pdata->channel[i]->tx_ring) in xgbe_powerdown_tx()
3505 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 0); in xgbe_powerdown_tx()
3514 for (i = 0; i < pdata->channel_count; i++) { in xgbe_powerup_rx()
3515 if (!pdata->channel[i]->rx_ring) in xgbe_powerup_rx()
3518 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 1); in xgbe_powerup_rx()
3527 for (i = 0; i < pdata->channel_count; i++) { in xgbe_powerdown_rx()
3528 if (!pdata->channel[i]->rx_ring) in xgbe_powerdown_rx()
3531 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 0); in xgbe_powerdown_rx()
3537 struct xgbe_desc_if *desc_if = &pdata->desc_if; in xgbe_init()
3540 DBGPR("-->xgbe_init\n"); in xgbe_init()
3545 netdev_err(pdata->netdev, "error flushing TX queues\n"); in xgbe_init()
3561 if (pdata->netdev->features & NETIF_F_RXCSUM) { in xgbe_init()
3566 desc_if->wrapper_tx_desc_init(pdata); in xgbe_init()
3567 desc_if->wrapper_rx_desc_init(pdata); in xgbe_init()
3575 xgbe_config_tsf_mode(pdata, pdata->tx_sf_mode); in xgbe_init()
3576 xgbe_config_rsf_mode(pdata, pdata->rx_sf_mode); in xgbe_init()
3577 xgbe_config_tx_threshold(pdata, pdata->tx_threshold); in xgbe_init()
3578 xgbe_config_rx_threshold(pdata, pdata->rx_threshold); in xgbe_init()
3605 DBGPR("<--xgbe_init\n"); in xgbe_init()
3612 DBGPR("-->xgbe_init_function_ptrs\n"); in xgbe_init_function_ptrs_dev()
3614 hw_if->tx_complete = xgbe_tx_complete; in xgbe_init_function_ptrs_dev()
3616 hw_if->set_mac_address = xgbe_set_mac_address; in xgbe_init_function_ptrs_dev()
3617 hw_if->config_rx_mode = xgbe_config_rx_mode; in xgbe_init_function_ptrs_dev()
3619 hw_if->enable_rx_csum = xgbe_enable_rx_csum; in xgbe_init_function_ptrs_dev()
3620 hw_if->disable_rx_csum = xgbe_disable_rx_csum; in xgbe_init_function_ptrs_dev()
3622 hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping; in xgbe_init_function_ptrs_dev()
3623 hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping; in xgbe_init_function_ptrs_dev()
3624 hw_if->enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering; in xgbe_init_function_ptrs_dev()
3625 hw_if->disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering; in xgbe_init_function_ptrs_dev()
3626 hw_if->update_vlan_hash_table = xgbe_update_vlan_hash_table; in xgbe_init_function_ptrs_dev()
3628 hw_if->read_mmd_regs = xgbe_read_mmd_regs; in xgbe_init_function_ptrs_dev()
3629 hw_if->write_mmd_regs = xgbe_write_mmd_regs; in xgbe_init_function_ptrs_dev()
3631 hw_if->set_speed = xgbe_set_speed; in xgbe_init_function_ptrs_dev()
3633 hw_if->set_ext_mii_mode = xgbe_set_ext_mii_mode; in xgbe_init_function_ptrs_dev()
3634 hw_if->read_ext_mii_regs_c22 = xgbe_read_ext_mii_regs_c22; in xgbe_init_function_ptrs_dev()
3635 hw_if->write_ext_mii_regs_c22 = xgbe_write_ext_mii_regs_c22; in xgbe_init_function_ptrs_dev()
3636 hw_if->read_ext_mii_regs_c45 = xgbe_read_ext_mii_regs_c45; in xgbe_init_function_ptrs_dev()
3637 hw_if->write_ext_mii_regs_c45 = xgbe_write_ext_mii_regs_c45; in xgbe_init_function_ptrs_dev()
3639 hw_if->set_gpio = xgbe_set_gpio; in xgbe_init_function_ptrs_dev()
3640 hw_if->clr_gpio = xgbe_clr_gpio; in xgbe_init_function_ptrs_dev()
3642 hw_if->enable_tx = xgbe_enable_tx; in xgbe_init_function_ptrs_dev()
3643 hw_if->disable_tx = xgbe_disable_tx; in xgbe_init_function_ptrs_dev()
3644 hw_if->enable_rx = xgbe_enable_rx; in xgbe_init_function_ptrs_dev()
3645 hw_if->disable_rx = xgbe_disable_rx; in xgbe_init_function_ptrs_dev()
3647 hw_if->powerup_tx = xgbe_powerup_tx; in xgbe_init_function_ptrs_dev()
3648 hw_if->powerdown_tx = xgbe_powerdown_tx; in xgbe_init_function_ptrs_dev()
3649 hw_if->powerup_rx = xgbe_powerup_rx; in xgbe_init_function_ptrs_dev()
3650 hw_if->powerdown_rx = xgbe_powerdown_rx; in xgbe_init_function_ptrs_dev()
3652 hw_if->dev_xmit = xgbe_dev_xmit; in xgbe_init_function_ptrs_dev()
3653 hw_if->dev_read = xgbe_dev_read; in xgbe_init_function_ptrs_dev()
3654 hw_if->enable_int = xgbe_enable_int; in xgbe_init_function_ptrs_dev()
3655 hw_if->disable_int = xgbe_disable_int; in xgbe_init_function_ptrs_dev()
3656 hw_if->init = xgbe_init; in xgbe_init_function_ptrs_dev()
3657 hw_if->exit = xgbe_exit; in xgbe_init_function_ptrs_dev()
3660 hw_if->tx_desc_init = xgbe_tx_desc_init; in xgbe_init_function_ptrs_dev()
3661 hw_if->rx_desc_init = xgbe_rx_desc_init; in xgbe_init_function_ptrs_dev()
3662 hw_if->tx_desc_reset = xgbe_tx_desc_reset; in xgbe_init_function_ptrs_dev()
3663 hw_if->rx_desc_reset = xgbe_rx_desc_reset; in xgbe_init_function_ptrs_dev()
3664 hw_if->is_last_desc = xgbe_is_last_desc; in xgbe_init_function_ptrs_dev()
3665 hw_if->is_context_desc = xgbe_is_context_desc; in xgbe_init_function_ptrs_dev()
3666 hw_if->tx_start_xmit = xgbe_tx_start_xmit; in xgbe_init_function_ptrs_dev()
3669 hw_if->config_tx_flow_control = xgbe_config_tx_flow_control; in xgbe_init_function_ptrs_dev()
3670 hw_if->config_rx_flow_control = xgbe_config_rx_flow_control; in xgbe_init_function_ptrs_dev()
3673 hw_if->config_rx_coalesce = xgbe_config_rx_coalesce; in xgbe_init_function_ptrs_dev()
3674 hw_if->config_tx_coalesce = xgbe_config_tx_coalesce; in xgbe_init_function_ptrs_dev()
3675 hw_if->usec_to_riwt = xgbe_usec_to_riwt; in xgbe_init_function_ptrs_dev()
3676 hw_if->riwt_to_usec = xgbe_riwt_to_usec; in xgbe_init_function_ptrs_dev()
3679 hw_if->config_rx_threshold = xgbe_config_rx_threshold; in xgbe_init_function_ptrs_dev()
3680 hw_if->config_tx_threshold = xgbe_config_tx_threshold; in xgbe_init_function_ptrs_dev()
3683 hw_if->config_rsf_mode = xgbe_config_rsf_mode; in xgbe_init_function_ptrs_dev()
3684 hw_if->config_tsf_mode = xgbe_config_tsf_mode; in xgbe_init_function_ptrs_dev()
3687 hw_if->config_osp_mode = xgbe_config_osp_mode; in xgbe_init_function_ptrs_dev()
3690 hw_if->tx_mmc_int = xgbe_tx_mmc_int; in xgbe_init_function_ptrs_dev()
3691 hw_if->rx_mmc_int = xgbe_rx_mmc_int; in xgbe_init_function_ptrs_dev()
3692 hw_if->read_mmc_stats = xgbe_read_mmc_stats; in xgbe_init_function_ptrs_dev()
3695 hw_if->config_tstamp = xgbe_config_tstamp; in xgbe_init_function_ptrs_dev()
3696 hw_if->update_tstamp_addend = xgbe_update_tstamp_addend; in xgbe_init_function_ptrs_dev()
3697 hw_if->set_tstamp_time = xgbe_set_tstamp_time; in xgbe_init_function_ptrs_dev()
3698 hw_if->get_tstamp_time = xgbe_get_tstamp_time; in xgbe_init_function_ptrs_dev()
3699 hw_if->get_tx_tstamp = xgbe_get_tx_tstamp; in xgbe_init_function_ptrs_dev()
3702 hw_if->config_tc = xgbe_config_tc; in xgbe_init_function_ptrs_dev()
3703 hw_if->config_dcb_tc = xgbe_config_dcb_tc; in xgbe_init_function_ptrs_dev()
3704 hw_if->config_dcb_pfc = xgbe_config_dcb_pfc; in xgbe_init_function_ptrs_dev()
3707 hw_if->enable_rss = xgbe_enable_rss; in xgbe_init_function_ptrs_dev()
3708 hw_if->disable_rss = xgbe_disable_rss; in xgbe_init_function_ptrs_dev()
3709 hw_if->set_rss_hash_key = xgbe_set_rss_hash_key; in xgbe_init_function_ptrs_dev()
3710 hw_if->set_rss_lookup_table = xgbe_set_rss_lookup_table; in xgbe_init_function_ptrs_dev()
3713 hw_if->disable_ecc_ded = xgbe_disable_ecc_ded; in xgbe_init_function_ptrs_dev()
3714 hw_if->disable_ecc_sec = xgbe_disable_ecc_sec; in xgbe_init_function_ptrs_dev()
3717 hw_if->enable_vxlan = xgbe_enable_vxlan; in xgbe_init_function_ptrs_dev()
3718 hw_if->disable_vxlan = xgbe_disable_vxlan; in xgbe_init_function_ptrs_dev()
3719 hw_if->set_vxlan_id = xgbe_set_vxlan_id; in xgbe_init_function_ptrs_dev()
3722 hw_if->enable_sph = xgbe_config_sph_mode; in xgbe_init_function_ptrs_dev()
3723 hw_if->disable_sph = xgbe_disable_sph_mode; in xgbe_init_function_ptrs_dev()
3725 DBGPR("<--xgbe_init_function_ptrs\n"); in xgbe_init_function_ptrs_dev()