Lines Matching +full:lo +full:- +full:x2 +full:- +full:en
1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /* Copyright 2017-2019 NXP */
16 return enetc_port_rd(&si->hw, reg); in enetc_port_mac_rd()
22 enetc_port_wr(&si->hw, reg, val); in enetc_port_mac_wr()
23 if (si->hw_features & ENETC_SI_F_QBU) in enetc_port_mac_wr()
24 enetc_port_wr(&si->hw, reg + ENETC_PMAC_OFFSET, val); in enetc_port_mac_wr()
31 if (!(priv->si->hw_features & ENETC_SI_F_QBU)) in enetc_change_preemptible_tcs()
34 priv->preemptible_tcs = preemptible_tcs; in enetc_change_preemptible_tcs()
40 int num_tx_rings = priv->num_tx_rings; in enetc_num_stack_tx_queues()
42 if (priv->xdp_prog) in enetc_num_stack_tx_queues()
43 return num_tx_rings - num_possible_cpus(); in enetc_num_stack_tx_queues()
51 int index = &priv->tx_ring[tx_ring->index] - priv->xdp_tx_ring; in enetc_rx_ring_from_xdp_tx_ring()
53 return priv->rx_ring[index]; in enetc_rx_ring_from_xdp_tx_ring()
58 if (tx_swbd->is_xdp_tx || tx_swbd->is_xdp_redirect) in enetc_tx_swbd_get_skb()
61 return tx_swbd->skb; in enetc_tx_swbd_get_skb()
67 if (tx_swbd->is_xdp_redirect) in enetc_tx_swbd_get_xdp_frame()
68 return tx_swbd->xdp_frame; in enetc_tx_swbd_get_xdp_frame()
80 if (tx_swbd->is_dma_page) in enetc_unmap_tx_buff()
81 dma_unmap_page(tx_ring->dev, tx_swbd->dma, in enetc_unmap_tx_buff()
82 tx_swbd->is_xdp_tx ? PAGE_SIZE : tx_swbd->len, in enetc_unmap_tx_buff()
83 tx_swbd->dir); in enetc_unmap_tx_buff()
85 dma_unmap_single(tx_ring->dev, tx_swbd->dma, in enetc_unmap_tx_buff()
86 tx_swbd->len, tx_swbd->dir); in enetc_unmap_tx_buff()
87 tx_swbd->dma = 0; in enetc_unmap_tx_buff()
96 if (tx_swbd->dma) in enetc_free_tx_frame()
100 xdp_return_frame(tx_swbd->xdp_frame); in enetc_free_tx_frame()
101 tx_swbd->xdp_frame = NULL; in enetc_free_tx_frame()
104 tx_swbd->skb = NULL; in enetc_free_tx_frame()
112 enetc_wr_reg_hot(tx_ring->tpir, tx_ring->next_to_use); in enetc_update_tx_ring_tail()
126 return -EINVAL; in enetc_ptp_parse()
130 return -EINVAL; in enetc_ptp_parse()
139 *twostep = hdr->flag_field[0] & 0x2; in enetc_ptp_parse()
142 *correction_offset = (u8 *)&hdr->correction - base; in enetc_ptp_parse()
143 *body_offset = (u8 *)hdr + sizeof(struct ptp_header) - base; in enetc_ptp_parse()
151 struct enetc_ndev_priv *priv = netdev_priv(tx_ring->ndev); in enetc_map_tx_buffs()
152 struct enetc_hw *hw = &priv->si->hw; in enetc_map_tx_buffs()
165 i = tx_ring->next_to_use; in enetc_map_tx_buffs()
169 dma = dma_map_single(tx_ring->dev, skb->data, len, DMA_TO_DEVICE); in enetc_map_tx_buffs()
170 if (unlikely(dma_mapping_error(tx_ring->dev, dma))) in enetc_map_tx_buffs()
177 tx_swbd = &tx_ring->tx_swbd[i]; in enetc_map_tx_buffs()
178 tx_swbd->dma = dma; in enetc_map_tx_buffs()
179 tx_swbd->len = len; in enetc_map_tx_buffs()
180 tx_swbd->is_dma_page = 0; in enetc_map_tx_buffs()
181 tx_swbd->dir = DMA_TO_DEVICE; in enetc_map_tx_buffs()
185 if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) { in enetc_map_tx_buffs()
189 WARN_ONCE(1, "Bad packet for one-step timestamping\n"); in enetc_map_tx_buffs()
192 } else if (skb->cb[0] & ENETC_F_TX_TSTAMP) { in enetc_map_tx_buffs()
196 tx_swbd->do_twostep_tstamp = do_twostep_tstamp; in enetc_map_tx_buffs()
197 tx_swbd->qbv_en = !!(priv->active_offloads & ENETC_F_QBV); in enetc_map_tx_buffs()
198 tx_swbd->check_wb = tx_swbd->do_twostep_tstamp || tx_swbd->qbv_en; in enetc_map_tx_buffs()
203 if (tx_ring->tsd_enable) in enetc_map_tx_buffs()
207 temp_bd.frm_len = cpu_to_le16(skb->len); in enetc_map_tx_buffs()
211 temp_bd.txstart = enetc_txbd_set_tx_start(skb->skb_mstamp_ns, in enetc_map_tx_buffs()
224 if (unlikely(i == tx_ring->bd_count)) { in enetc_map_tx_buffs()
226 tx_swbd = tx_ring->tx_swbd; in enetc_map_tx_buffs()
233 temp_bd.ext.tpid = 0; /* < C-TAG */ in enetc_map_tx_buffs()
238 u32 lo, hi, val; in enetc_map_tx_buffs() local
242 lo = enetc_rd_hot(hw, ENETC_SICTR0); in enetc_map_tx_buffs()
244 sec = (u64)hi << 32 | lo; in enetc_map_tx_buffs()
248 temp_bd.ext.tstamp = cpu_to_le32(lo & 0x3fffffff); in enetc_map_tx_buffs()
252 * - 48 bits seconds field in enetc_map_tx_buffs()
253 * - 32 bits nanseconds field in enetc_map_tx_buffs()
262 /* Configure single-step register */ in enetc_map_tx_buffs()
268 enetc_port_mac_wr(priv->si, ENETC_PM0_SINGLE_STEP, in enetc_map_tx_buffs()
271 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in enetc_map_tx_buffs()
279 frag = &skb_shinfo(skb)->frags[0]; in enetc_map_tx_buffs()
280 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++, frag++) { in enetc_map_tx_buffs()
282 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, len, in enetc_map_tx_buffs()
284 if (dma_mapping_error(tx_ring->dev, dma)) in enetc_map_tx_buffs()
294 if (unlikely(i == tx_ring->bd_count)) { in enetc_map_tx_buffs()
296 tx_swbd = tx_ring->tx_swbd; in enetc_map_tx_buffs()
304 tx_swbd->dma = dma; in enetc_map_tx_buffs()
305 tx_swbd->len = len; in enetc_map_tx_buffs()
306 tx_swbd->is_dma_page = 1; in enetc_map_tx_buffs()
307 tx_swbd->dir = DMA_TO_DEVICE; in enetc_map_tx_buffs()
316 tx_ring->tx_swbd[i].is_eof = true; in enetc_map_tx_buffs()
317 tx_ring->tx_swbd[i].skb = skb; in enetc_map_tx_buffs()
320 tx_ring->next_to_use = i; in enetc_map_tx_buffs()
329 dev_err(tx_ring->dev, "DMA map error"); in enetc_map_tx_buffs()
332 tx_swbd = &tx_ring->tx_swbd[i]; in enetc_map_tx_buffs()
335 i = tx_ring->bd_count; in enetc_map_tx_buffs()
336 i--; in enetc_map_tx_buffs()
337 } while (count--); in enetc_map_tx_buffs()
352 addr = tx_ring->tso_headers_dma + *i * TSO_HEADER_SIZE; in enetc_map_tx_tso_hdr()
368 tx_swbd->len = hdr_len; in enetc_map_tx_tso_hdr()
369 tx_swbd->do_twostep_tstamp = false; in enetc_map_tx_tso_hdr()
370 tx_swbd->check_wb = false; in enetc_map_tx_tso_hdr()
380 tx_swbd = &tx_ring->tx_swbd[*i]; in enetc_map_tx_tso_hdr()
386 txbd_tmp.ext.tpid = 0; /* < C-TAG */ in enetc_map_tx_tso_hdr()
406 addr = dma_map_single(tx_ring->dev, data, size, DMA_TO_DEVICE); in enetc_map_tx_tso_data()
407 if (unlikely(dma_mapping_error(tx_ring->dev, addr))) { in enetc_map_tx_tso_data()
408 netdev_err(tx_ring->ndev, "DMA map error\n"); in enetc_map_tx_tso_data()
409 return -ENOMEM; in enetc_map_tx_tso_data()
414 tx_swbd->is_eof = 1; in enetc_map_tx_tso_data()
421 tx_swbd->dma = addr; in enetc_map_tx_tso_data()
422 tx_swbd->len = size; in enetc_map_tx_tso_data()
423 tx_swbd->dir = DMA_TO_DEVICE; in enetc_map_tx_tso_data()
436 if (tso->tlen != sizeof(struct udphdr)) { in enetc_tso_hdr_csum()
439 tcph->check = 0; in enetc_tso_hdr_csum()
443 udph->check = 0; in enetc_tso_hdr_csum()
449 if (!tso->ipv6) { in enetc_tso_hdr_csum()
452 iph->check = 0; in enetc_tso_hdr_csum()
453 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); in enetc_tso_hdr_csum()
457 *l4_hdr_len = hdr_len - skb_transport_offset(skb); in enetc_tso_hdr_csum()
468 /* Complete the L4 checksum by appending the pseudo-header to the in enetc_tso_complete_csum()
471 if (!tso->ipv6) in enetc_tso_complete_csum()
472 csum_final = csum_tcpudp_magic(ip_hdr(skb)->saddr, in enetc_tso_complete_csum()
473 ip_hdr(skb)->daddr, in enetc_tso_complete_csum()
474 len, ip_hdr(skb)->protocol, sum); in enetc_tso_complete_csum()
476 csum_final = csum_ipv6_magic(&ipv6_hdr(skb)->saddr, in enetc_tso_complete_csum()
477 &ipv6_hdr(skb)->daddr, in enetc_tso_complete_csum()
478 len, ipv6_hdr(skb)->nexthdr, sum); in enetc_tso_complete_csum()
480 if (tso->tlen != sizeof(struct udphdr)) { in enetc_tso_complete_csum()
483 tcph->check = csum_final; in enetc_tso_complete_csum()
487 udph->check = csum_final; in enetc_tso_complete_csum()
503 total_len = skb->len - hdr_len; in enetc_map_tx_tso_buffs()
504 i = tx_ring->next_to_use; in enetc_map_tx_tso_buffs()
511 tx_swbd = &tx_ring->tx_swbd[i]; in enetc_map_tx_tso_buffs()
515 data_len = min_t(int, skb_shinfo(skb)->gso_size, total_len); in enetc_map_tx_tso_buffs()
516 total_len -= data_len; in enetc_map_tx_tso_buffs()
519 hdr = tx_ring->tso_headers + i * TSO_HEADER_SIZE; in enetc_map_tx_tso_buffs()
536 tx_swbd = &tx_ring->tx_swbd[i]; in enetc_map_tx_tso_buffs()
553 data_len -= size; in enetc_map_tx_tso_buffs()
565 tx_swbd->skb = skb; in enetc_map_tx_tso_buffs()
571 tx_ring->next_to_use = i; in enetc_map_tx_tso_buffs()
577 dev_err(tx_ring->dev, "DMA map error"); in enetc_map_tx_tso_buffs()
581 tx_swbd = &tx_ring->tx_swbd[i]; in enetc_map_tx_tso_buffs()
584 i = tx_ring->bd_count; in enetc_map_tx_tso_buffs()
585 i--; in enetc_map_tx_tso_buffs()
586 } while (count--); in enetc_map_tx_tso_buffs()
598 /* Queue one-step Sync packet if already locked */ in enetc_start_xmit()
599 if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) { in enetc_start_xmit()
601 &priv->flags)) { in enetc_start_xmit()
602 skb_queue_tail(&priv->tx_skbs, skb); in enetc_start_xmit()
607 tx_ring = priv->tx_ring[skb->queue_mapping]; in enetc_start_xmit()
611 netif_stop_subqueue(ndev, tx_ring->index); in enetc_start_xmit()
619 if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS)) in enetc_start_xmit()
623 count = skb_shinfo(skb)->nr_frags + 1; /* fragments + head */ in enetc_start_xmit()
625 netif_stop_subqueue(ndev, tx_ring->index); in enetc_start_xmit()
629 if (skb->ip_summed == CHECKSUM_PARTIAL) { in enetc_start_xmit()
643 netif_stop_subqueue(ndev, tx_ring->index); in enetc_start_xmit()
658 /* Mark tx timestamp type on skb->cb[0] if requires */ in enetc_xmit()
659 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && in enetc_xmit()
660 (priv->active_offloads & ENETC_F_TX_TSTAMP_MASK)) { in enetc_xmit()
661 skb->cb[0] = priv->active_offloads & ENETC_F_TX_TSTAMP_MASK; in enetc_xmit()
663 skb->cb[0] = 0; in enetc_xmit()
666 /* Fall back to two-step timestamp if not one-step Sync packet */ in enetc_xmit()
667 if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) { in enetc_xmit()
671 skb->cb[0] = ENETC_F_TX_TSTAMP; in enetc_xmit()
686 enetc_wr_reg_hot(v->rbier, 0); in enetc_msix()
687 enetc_wr_reg_hot(v->ricr1, v->rx_ictt); in enetc_msix()
689 for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS) in enetc_msix()
690 enetc_wr_reg_hot(v->tbier_base + ENETC_BDR_OFF(i), 0); in enetc_msix()
694 napi_schedule(&v->napi); in enetc_msix()
703 net_dim_get_rx_moderation(dim->mode, dim->profile_ix); in enetc_rx_dim_work()
707 v->rx_ictt = enetc_usecs_to_cycles(moder.usec); in enetc_rx_dim_work()
708 dim->state = DIM_START_MEASURE; in enetc_rx_dim_work()
715 v->comp_cnt++; in enetc_rx_net_dim()
717 if (!v->rx_napi_work) in enetc_rx_net_dim()
720 dim_update_sample(v->comp_cnt, in enetc_rx_net_dim()
721 v->rx_ring.stats.packets, in enetc_rx_net_dim()
722 v->rx_ring.stats.bytes, in enetc_rx_net_dim()
724 net_dim(&v->rx_dim, dim_sample); in enetc_rx_net_dim()
729 int pi = enetc_rd_reg_hot(tx_ring->tcir) & ENETC_TBCIR_IDX_MASK; in enetc_bd_ready_count()
731 return pi >= ci ? pi - ci : tx_ring->bd_count - ci + pi; in enetc_bd_ready_count()
744 new = &rx_ring->rx_swbd[rx_ring->next_to_alloc]; in enetc_reuse_page()
747 enetc_bdr_idx_inc(rx_ring, &rx_ring->next_to_alloc); in enetc_reuse_page()
756 u32 lo, hi, tstamp_lo; in enetc_get_tx_tstamp() local
758 lo = enetc_rd_hot(hw, ENETC_SICTR0); in enetc_get_tx_tstamp()
760 tstamp_lo = le32_to_cpu(txbd->wb.tstamp); in enetc_get_tx_tstamp()
761 if (lo <= tstamp_lo) in enetc_get_tx_tstamp()
762 hi -= 1; in enetc_get_tx_tstamp()
770 if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) { in enetc_tstamp_tx()
781 struct enetc_ndev_priv *priv = netdev_priv(tx_ring->ndev); in enetc_recycle_xdp_tx_buff()
783 .dma = tx_swbd->dma, in enetc_recycle_xdp_tx_buff()
784 .page = tx_swbd->page, in enetc_recycle_xdp_tx_buff()
785 .page_offset = tx_swbd->page_offset, in enetc_recycle_xdp_tx_buff()
786 .dir = tx_swbd->dir, in enetc_recycle_xdp_tx_buff()
787 .len = tx_swbd->len, in enetc_recycle_xdp_tx_buff()
797 dma_sync_single_range_for_device(rx_ring->dev, rx_swbd.dma, in enetc_recycle_xdp_tx_buff()
802 rx_ring->stats.recycles++; in enetc_recycle_xdp_tx_buff()
807 rx_ring->stats.recycle_failures++; in enetc_recycle_xdp_tx_buff()
809 dma_unmap_page(rx_ring->dev, rx_swbd.dma, PAGE_SIZE, in enetc_recycle_xdp_tx_buff()
814 rx_ring->xdp.xdp_tx_in_flight--; in enetc_recycle_xdp_tx_buff()
820 struct net_device *ndev = tx_ring->ndev; in enetc_clean_tx_ring()
827 i = tx_ring->next_to_clean; in enetc_clean_tx_ring()
828 tx_swbd = &tx_ring->tx_swbd[i]; in enetc_clean_tx_ring()
837 bool is_eof = tx_swbd->is_eof; in enetc_clean_tx_ring()
839 if (unlikely(tx_swbd->check_wb)) { in enetc_clean_tx_ring()
842 if (txbd->flags & ENETC_TXBD_FLAGS_W && in enetc_clean_tx_ring()
843 tx_swbd->do_twostep_tstamp) { in enetc_clean_tx_ring()
844 enetc_get_tx_tstamp(&priv->si->hw, txbd, in enetc_clean_tx_ring()
849 if (tx_swbd->qbv_en && in enetc_clean_tx_ring()
850 txbd->wb.status & ENETC_TXBD_STATS_WIN) in enetc_clean_tx_ring()
854 if (tx_swbd->is_xdp_tx) in enetc_clean_tx_ring()
856 else if (likely(tx_swbd->dma)) in enetc_clean_tx_ring()
862 if (unlikely(skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP)) { in enetc_clean_tx_ring()
863 /* Start work to release lock for next one-step in enetc_clean_tx_ring()
867 schedule_work(&priv->tx_onestep_tstamp); in enetc_clean_tx_ring()
875 tx_byte_cnt += tx_swbd->len; in enetc_clean_tx_ring()
881 bds_to_clean--; in enetc_clean_tx_ring()
884 if (unlikely(i == tx_ring->bd_count)) { in enetc_clean_tx_ring()
886 tx_swbd = tx_ring->tx_swbd; in enetc_clean_tx_ring()
892 /* re-arm interrupt source */ in enetc_clean_tx_ring()
893 enetc_wr_reg_hot(tx_ring->idr, BIT(tx_ring->index) | in enetc_clean_tx_ring()
894 BIT(16 + tx_ring->index)); in enetc_clean_tx_ring()
901 tx_ring->next_to_clean = i; in enetc_clean_tx_ring()
902 tx_ring->stats.packets += tx_frm_cnt; in enetc_clean_tx_ring()
903 tx_ring->stats.bytes += tx_byte_cnt; in enetc_clean_tx_ring()
904 tx_ring->stats.win_drop += tx_win_drop; in enetc_clean_tx_ring()
907 __netif_subqueue_stopped(ndev, tx_ring->index) && in enetc_clean_tx_ring()
908 !test_bit(ENETC_TX_DOWN, &priv->flags) && in enetc_clean_tx_ring()
910 netif_wake_subqueue(ndev, tx_ring->index); in enetc_clean_tx_ring()
919 bool xdp = !!(rx_ring->xdp.prog); in enetc_new_page()
927 /* For XDP_TX, we forgo dma_unmap -> dma_map */ in enetc_new_page()
928 rx_swbd->dir = xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; in enetc_new_page()
930 addr = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, rx_swbd->dir); in enetc_new_page()
931 if (unlikely(dma_mapping_error(rx_ring->dev, addr))) { in enetc_new_page()
937 rx_swbd->dma = addr; in enetc_new_page()
938 rx_swbd->page = page; in enetc_new_page()
939 rx_swbd->page_offset = rx_ring->buffer_offset; in enetc_new_page()
950 i = rx_ring->next_to_use; in enetc_refill_rx_ring()
951 rx_swbd = &rx_ring->rx_swbd[i]; in enetc_refill_rx_ring()
956 if (unlikely(!rx_swbd->page)) { in enetc_refill_rx_ring()
958 rx_ring->stats.rx_alloc_errs++; in enetc_refill_rx_ring()
964 rxbd->w.addr = cpu_to_le64(rx_swbd->dma + in enetc_refill_rx_ring()
965 rx_swbd->page_offset); in enetc_refill_rx_ring()
967 rxbd->r.lstatus = 0; in enetc_refill_rx_ring()
970 rx_swbd = &rx_ring->rx_swbd[i]; in enetc_refill_rx_ring()
974 rx_ring->next_to_alloc = i; /* keep track from page reuse */ in enetc_refill_rx_ring()
975 rx_ring->next_to_use = i; in enetc_refill_rx_ring()
978 enetc_wr_reg_hot(rx_ring->rcir, rx_ring->next_to_use); in enetc_refill_rx_ring()
991 struct enetc_hw *hw = &priv->si->hw; in enetc_get_rx_tstamp()
992 u32 lo, hi, tstamp_lo; in enetc_get_rx_tstamp() local
995 if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_TSTMP) { in enetc_get_rx_tstamp()
996 lo = enetc_rd_reg_hot(hw->reg + ENETC_SICTR0); in enetc_get_rx_tstamp()
997 hi = enetc_rd_reg_hot(hw->reg + ENETC_SICTR1); in enetc_get_rx_tstamp()
999 tstamp_lo = le32_to_cpu(rxbd->ext.tstamp); in enetc_get_rx_tstamp()
1000 if (lo <= tstamp_lo) in enetc_get_rx_tstamp()
1001 hi -= 1; in enetc_get_rx_tstamp()
1005 shhwtstamps->hwtstamp = ns_to_ktime(tstamp); in enetc_get_rx_tstamp()
1013 struct enetc_ndev_priv *priv = netdev_priv(rx_ring->ndev); in enetc_get_offloads()
1016 if (rx_ring->ndev->features & NETIF_F_RXCSUM) { in enetc_get_offloads()
1017 u16 inet_csum = le16_to_cpu(rxbd->r.inet_csum); in enetc_get_offloads()
1019 skb->csum = csum_unfold((__force __sum16)~htons(inet_csum)); in enetc_get_offloads()
1020 skb->ip_summed = CHECKSUM_COMPLETE; in enetc_get_offloads()
1023 if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_VLAN) { in enetc_get_offloads()
1026 switch (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_TPID) { in enetc_get_offloads()
1034 tpid = htons(enetc_port_rd(&priv->si->hw, in enetc_get_offloads()
1038 tpid = htons(enetc_port_rd(&priv->si->hw, in enetc_get_offloads()
1045 __vlan_hwaccel_put_tag(skb, tpid, le16_to_cpu(rxbd->r.vlan_opt)); in enetc_get_offloads()
1049 if (priv->active_offloads & ENETC_F_RX_TSTAMP) in enetc_get_offloads()
1050 enetc_get_rx_tstamp(rx_ring->ndev, rxbd, skb); in enetc_get_offloads()
1054 /* This gets called during the non-XDP NAPI poll cycle as well as on XDP_PASS,
1061 struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i]; in enetc_get_rx_buff()
1063 dma_sync_single_range_for_cpu(rx_ring->dev, rx_swbd->dma, in enetc_get_rx_buff()
1064 rx_swbd->page_offset, in enetc_get_rx_buff()
1065 size, rx_swbd->dir); in enetc_get_rx_buff()
1069 /* Reuse the current page without performing half-page buffer flipping */
1073 size_t buffer_size = ENETC_RXB_TRUESIZE - rx_ring->buffer_offset; in enetc_put_rx_buff()
1077 dma_sync_single_range_for_device(rx_ring->dev, rx_swbd->dma, in enetc_put_rx_buff()
1078 rx_swbd->page_offset, in enetc_put_rx_buff()
1079 buffer_size, rx_swbd->dir); in enetc_put_rx_buff()
1081 rx_swbd->page = NULL; in enetc_put_rx_buff()
1084 /* Reuse the current page by performing half-page buffer flipping */
1088 if (likely(enetc_page_reusable(rx_swbd->page))) { in enetc_flip_rx_buff()
1089 rx_swbd->page_offset ^= ENETC_RXB_TRUESIZE; in enetc_flip_rx_buff()
1090 page_ref_inc(rx_swbd->page); in enetc_flip_rx_buff()
1094 dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE, in enetc_flip_rx_buff()
1095 rx_swbd->dir); in enetc_flip_rx_buff()
1096 rx_swbd->page = NULL; in enetc_flip_rx_buff()
1107 ba = page_address(rx_swbd->page) + rx_swbd->page_offset; in enetc_map_rx_buff_to_skb()
1108 skb = build_skb(ba - rx_ring->buffer_offset, ENETC_RXB_TRUESIZE); in enetc_map_rx_buff_to_skb()
1110 rx_ring->stats.rx_alloc_errs++; in enetc_map_rx_buff_to_skb()
1114 skb_reserve(skb, rx_ring->buffer_offset); in enetc_map_rx_buff_to_skb()
1127 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_swbd->page, in enetc_add_rx_buff_to_skb()
1128 rx_swbd->page_offset, size, ENETC_RXB_TRUESIZE); in enetc_add_rx_buff_to_skb()
1140 enetc_put_rx_buff(rx_ring, &rx_ring->rx_swbd[*i]); in enetc_check_bd_errors_and_consume()
1145 bd_status = le32_to_cpu((*rxbd)->r.lstatus); in enetc_check_bd_errors_and_consume()
1147 enetc_put_rx_buff(rx_ring, &rx_ring->rx_swbd[*i]); in enetc_check_bd_errors_and_consume()
1151 rx_ring->ndev->stats.rx_dropped++; in enetc_check_bd_errors_and_consume()
1152 rx_ring->ndev->stats.rx_errors++; in enetc_check_bd_errors_and_consume()
1164 size = le16_to_cpu((*rxbd)->r.buf_len); in enetc_build_skb()
1177 bd_status = le32_to_cpu((*rxbd)->r.lstatus); in enetc_build_skb()
1182 size = le16_to_cpu((*rxbd)->r.buf_len); in enetc_build_skb()
1192 skb_record_rx_queue(skb, rx_ring->index); in enetc_build_skb()
1193 skb->protocol = eth_type_trans(skb, rx_ring->ndev); in enetc_build_skb()
1208 i = rx_ring->next_to_clean; in enetc_clean_rx_ring()
1216 cleaned_cnt -= enetc_refill_rx_ring(rx_ring, in enetc_clean_rx_ring()
1220 bd_status = le32_to_cpu(rxbd->r.lstatus); in enetc_clean_rx_ring()
1224 enetc_wr_reg_hot(rx_ring->idr, BIT(rx_ring->index)); in enetc_clean_rx_ring()
1242 rx_byte_cnt += skb->len + ETH_HLEN; in enetc_clean_rx_ring()
1248 rx_ring->next_to_clean = i; in enetc_clean_rx_ring()
1250 rx_ring->stats.packets += rx_frm_cnt; in enetc_clean_rx_ring()
1251 rx_ring->stats.bytes += rx_byte_cnt; in enetc_clean_rx_ring()
1265 txbd->addr = cpu_to_le64(tx_swbd->dma + tx_swbd->page_offset); in enetc_xdp_map_tx_buff()
1266 txbd->buf_len = cpu_to_le16(tx_swbd->len); in enetc_xdp_map_tx_buff()
1267 txbd->frm_len = cpu_to_le16(frm_len); in enetc_xdp_map_tx_buff()
1269 memcpy(&tx_ring->tx_swbd[i], tx_swbd, sizeof(*tx_swbd)); in enetc_xdp_map_tx_buff()
1279 int i, k, frm_len = tmp_tx_swbd->len; in enetc_xdp_tx()
1284 while (unlikely(!tmp_tx_swbd->is_eof)) { in enetc_xdp_tx()
1286 frm_len += tmp_tx_swbd->len; in enetc_xdp_tx()
1289 i = tx_ring->next_to_use; in enetc_xdp_tx()
1297 if (xdp_tx_swbd->is_eof) { in enetc_xdp_tx()
1300 txbd->flags = ENETC_TXBD_FLAGS_F; in enetc_xdp_tx()
1306 tx_ring->next_to_use = i; in enetc_xdp_tx()
1317 void *data = xdp_frame->data; in enetc_xdp_frame_to_xdp_tx_swbd()
1318 int len = xdp_frame->len; in enetc_xdp_frame_to_xdp_tx_swbd()
1324 dma = dma_map_single(tx_ring->dev, data, len, DMA_TO_DEVICE); in enetc_xdp_frame_to_xdp_tx_swbd()
1325 if (unlikely(dma_mapping_error(tx_ring->dev, dma))) { in enetc_xdp_frame_to_xdp_tx_swbd()
1326 netdev_err(tx_ring->ndev, "DMA map error\n"); in enetc_xdp_frame_to_xdp_tx_swbd()
1327 return -1; in enetc_xdp_frame_to_xdp_tx_swbd()
1330 xdp_tx_swbd->dma = dma; in enetc_xdp_frame_to_xdp_tx_swbd()
1331 xdp_tx_swbd->dir = DMA_TO_DEVICE; in enetc_xdp_frame_to_xdp_tx_swbd()
1332 xdp_tx_swbd->len = len; in enetc_xdp_frame_to_xdp_tx_swbd()
1333 xdp_tx_swbd->is_xdp_redirect = true; in enetc_xdp_frame_to_xdp_tx_swbd()
1334 xdp_tx_swbd->is_eof = false; in enetc_xdp_frame_to_xdp_tx_swbd()
1335 xdp_tx_swbd->xdp_frame = NULL; in enetc_xdp_frame_to_xdp_tx_swbd()
1346 for (f = 0, frag = &shinfo->frags[0]; f < shinfo->nr_frags; in enetc_xdp_frame_to_xdp_tx_swbd()
1351 dma = dma_map_single(tx_ring->dev, data, len, DMA_TO_DEVICE); in enetc_xdp_frame_to_xdp_tx_swbd()
1352 if (unlikely(dma_mapping_error(tx_ring->dev, dma))) { in enetc_xdp_frame_to_xdp_tx_swbd()
1354 while (--n >= 0) in enetc_xdp_frame_to_xdp_tx_swbd()
1357 netdev_err(tx_ring->ndev, "DMA map error\n"); in enetc_xdp_frame_to_xdp_tx_swbd()
1358 return -1; in enetc_xdp_frame_to_xdp_tx_swbd()
1361 xdp_tx_swbd->dma = dma; in enetc_xdp_frame_to_xdp_tx_swbd()
1362 xdp_tx_swbd->dir = DMA_TO_DEVICE; in enetc_xdp_frame_to_xdp_tx_swbd()
1363 xdp_tx_swbd->len = len; in enetc_xdp_frame_to_xdp_tx_swbd()
1364 xdp_tx_swbd->is_xdp_redirect = true; in enetc_xdp_frame_to_xdp_tx_swbd()
1365 xdp_tx_swbd->is_eof = false; in enetc_xdp_frame_to_xdp_tx_swbd()
1366 xdp_tx_swbd->xdp_frame = NULL; in enetc_xdp_frame_to_xdp_tx_swbd()
1372 xdp_tx_arr[n - 1].is_eof = true; in enetc_xdp_frame_to_xdp_tx_swbd()
1373 xdp_tx_arr[n - 1].xdp_frame = xdp_frame; in enetc_xdp_frame_to_xdp_tx_swbd()
1387 if (unlikely(test_bit(ENETC_TX_DOWN, &priv->flags))) in enetc_xdp_xmit()
1388 return -ENETDOWN; in enetc_xdp_xmit()
1392 tx_ring = priv->xdp_tx_ring[smp_processor_id()]; in enetc_xdp_xmit()
1394 prefetchw(ENETC_TXBD(*tx_ring, tx_ring->next_to_use)); in enetc_xdp_xmit()
1408 tx_ring->stats.xdp_tx_drops++; in enetc_xdp_xmit()
1418 tx_ring->stats.xdp_tx += xdp_tx_frm_cnt; in enetc_xdp_xmit()
1430 void *hard_start = page_address(rx_swbd->page) + rx_swbd->page_offset; in enetc_map_rx_buff_to_xdp()
1433 rx_swbd->len = size; in enetc_map_rx_buff_to_xdp()
1435 xdp_prepare_buff(xdp_buff, hard_start - rx_ring->buffer_offset, in enetc_map_rx_buff_to_xdp()
1436 rx_ring->buffer_offset, size, false); in enetc_map_rx_buff_to_xdp()
1447 rx_swbd->len = size; in enetc_add_rx_buff_to_xdp()
1451 shinfo->xdp_frags_size = size; in enetc_add_rx_buff_to_xdp()
1452 shinfo->nr_frags = 0; in enetc_add_rx_buff_to_xdp()
1454 shinfo->xdp_frags_size += size; in enetc_add_rx_buff_to_xdp()
1457 if (page_is_pfmemalloc(rx_swbd->page)) in enetc_add_rx_buff_to_xdp()
1460 frag = &shinfo->frags[shinfo->nr_frags]; in enetc_add_rx_buff_to_xdp()
1461 skb_frag_fill_page_desc(frag, rx_swbd->page, rx_swbd->page_offset, in enetc_add_rx_buff_to_xdp()
1464 shinfo->nr_frags++; in enetc_add_rx_buff_to_xdp()
1471 u16 size = le16_to_cpu((*rxbd)->r.buf_len); in enetc_build_xdp_buff()
1473 xdp_init_buff(xdp_buff, ENETC_RXB_TRUESIZE, &rx_ring->xdp.rxq); in enetc_build_xdp_buff()
1481 bd_status = le32_to_cpu((*rxbd)->r.lstatus); in enetc_build_xdp_buff()
1486 size = le16_to_cpu((*rxbd)->r.buf_len); in enetc_build_xdp_buff()
1506 struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[rx_ring_first]; in enetc_rx_swbd_to_xdp_tx_swbd()
1510 tx_swbd->dma = rx_swbd->dma; in enetc_rx_swbd_to_xdp_tx_swbd()
1511 tx_swbd->dir = rx_swbd->dir; in enetc_rx_swbd_to_xdp_tx_swbd()
1512 tx_swbd->page = rx_swbd->page; in enetc_rx_swbd_to_xdp_tx_swbd()
1513 tx_swbd->page_offset = rx_swbd->page_offset; in enetc_rx_swbd_to_xdp_tx_swbd()
1514 tx_swbd->len = rx_swbd->len; in enetc_rx_swbd_to_xdp_tx_swbd()
1515 tx_swbd->is_dma_page = true; in enetc_rx_swbd_to_xdp_tx_swbd()
1516 tx_swbd->is_xdp_tx = true; in enetc_rx_swbd_to_xdp_tx_swbd()
1517 tx_swbd->is_eof = false; in enetc_rx_swbd_to_xdp_tx_swbd()
1521 xdp_tx_arr[n - 1].is_eof = true; in enetc_rx_swbd_to_xdp_tx_swbd()
1531 &rx_ring->rx_swbd[rx_ring_first]); in enetc_xdp_drop()
1542 struct enetc_ndev_priv *priv = netdev_priv(rx_ring->ndev); in enetc_clean_rx_ring_xdp()
1550 i = rx_ring->next_to_clean; in enetc_clean_rx_ring_xdp()
1561 bd_status = le32_to_cpu(rxbd->r.lstatus); in enetc_clean_rx_ring_xdp()
1565 enetc_wr_reg_hot(rx_ring->idr, BIT(rx_ring->index)); in enetc_clean_rx_ring_xdp()
1591 bpf_warn_invalid_xdp_action(rx_ring->ndev, prog, xdp_act); in enetc_clean_rx_ring_xdp()
1594 trace_xdp_exception(rx_ring->ndev, prog, xdp_act); in enetc_clean_rx_ring_xdp()
1598 rx_ring->stats.xdp_drops++; in enetc_clean_rx_ring_xdp()
1614 tx_ring = priv->xdp_tx_ring[rx_ring->index]; in enetc_clean_rx_ring_xdp()
1615 if (unlikely(test_bit(ENETC_TX_DOWN, &priv->flags))) { in enetc_clean_rx_ring_xdp()
1617 tx_ring->stats.xdp_tx_drops++; in enetc_clean_rx_ring_xdp()
1627 tx_ring->stats.xdp_tx_drops++; in enetc_clean_rx_ring_xdp()
1629 tx_ring->stats.xdp_tx += xdp_tx_bd_cnt; in enetc_clean_rx_ring_xdp()
1630 rx_ring->xdp.xdp_tx_in_flight += xdp_tx_bd_cnt; in enetc_clean_rx_ring_xdp()
1637 * rx_swbd->page. in enetc_clean_rx_ring_xdp()
1640 rx_ring->rx_swbd[orig_i].page = NULL; in enetc_clean_rx_ring_xdp()
1646 err = xdp_do_redirect(rx_ring->ndev, &xdp_buff, prog); in enetc_clean_rx_ring_xdp()
1649 rx_ring->stats.xdp_redirect_failures++; in enetc_clean_rx_ring_xdp()
1653 &rx_ring->rx_swbd[orig_i]); in enetc_clean_rx_ring_xdp()
1657 rx_ring->stats.xdp_redirect++; in enetc_clean_rx_ring_xdp()
1665 rx_ring->next_to_clean = i; in enetc_clean_rx_ring_xdp()
1667 rx_ring->stats.packets += rx_frm_cnt; in enetc_clean_rx_ring_xdp()
1668 rx_ring->stats.bytes += rx_byte_cnt; in enetc_clean_rx_ring_xdp()
1676 if (cleaned_cnt > rx_ring->xdp.xdp_tx_in_flight) in enetc_clean_rx_ring_xdp()
1677 enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring) - in enetc_clean_rx_ring_xdp()
1678 rx_ring->xdp.xdp_tx_in_flight); in enetc_clean_rx_ring_xdp()
1687 struct enetc_bdr *rx_ring = &v->rx_ring; in enetc_poll()
1695 for (i = 0; i < v->count_tx_rings; i++) in enetc_poll()
1696 if (!enetc_clean_tx_ring(&v->tx_ring[i], budget)) in enetc_poll()
1699 prog = rx_ring->xdp.prog; in enetc_poll()
1707 v->rx_napi_work = true; in enetc_poll()
1716 if (likely(v->rx_dim_en)) in enetc_poll()
1719 v->rx_napi_work = false; in enetc_poll()
1722 enetc_wr_reg_hot(v->rbier, ENETC_RBIER_RXTIE); in enetc_poll()
1724 for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS) in enetc_poll()
1725 enetc_wr_reg_hot(v->tbier_base + ENETC_BDR_OFF(i), in enetc_poll()
1737 struct enetc_hw *hw = &si->hw; in enetc_get_si_caps()
1742 si->num_rx_rings = (val >> 16) & 0xff; in enetc_get_si_caps()
1743 si->num_tx_rings = val & 0xff; in enetc_get_si_caps()
1746 si->num_fs_entries = ENETC_SIRFSCAPR_GET_NUM_RFS(val); in enetc_get_si_caps()
1747 si->num_fs_entries = min(si->num_fs_entries, ENETC_MAX_RFS_SIZE); in enetc_get_si_caps()
1749 si->num_rss = 0; in enetc_get_si_caps()
1755 si->num_rss = ENETC_SIRSSCAPR_GET_NUM_RSS(rss); in enetc_get_si_caps()
1759 si->hw_features |= ENETC_SI_F_QBV; in enetc_get_si_caps()
1762 si->hw_features |= ENETC_SI_F_QBU; in enetc_get_si_caps()
1765 si->hw_features |= ENETC_SI_F_PSFP; in enetc_get_si_caps()
1771 size_t bd_base_size = res->bd_count * res->bd_size; in enetc_dma_alloc_bdr()
1773 res->bd_base = dma_alloc_coherent(res->dev, bd_base_size, in enetc_dma_alloc_bdr()
1774 &res->bd_dma_base, GFP_KERNEL); in enetc_dma_alloc_bdr()
1775 if (!res->bd_base) in enetc_dma_alloc_bdr()
1776 return -ENOMEM; in enetc_dma_alloc_bdr()
1779 if (!IS_ALIGNED(res->bd_dma_base, 128)) { in enetc_dma_alloc_bdr()
1780 dma_free_coherent(res->dev, bd_base_size, res->bd_base, in enetc_dma_alloc_bdr()
1781 res->bd_dma_base); in enetc_dma_alloc_bdr()
1782 return -EINVAL; in enetc_dma_alloc_bdr()
1790 size_t bd_base_size = res->bd_count * res->bd_size; in enetc_dma_free_bdr()
1792 dma_free_coherent(res->dev, bd_base_size, res->bd_base, in enetc_dma_free_bdr()
1793 res->bd_dma_base); in enetc_dma_free_bdr()
1801 res->dev = dev; in enetc_alloc_tx_resource()
1802 res->bd_count = bd_count; in enetc_alloc_tx_resource()
1803 res->bd_size = sizeof(union enetc_tx_bd); in enetc_alloc_tx_resource()
1805 res->tx_swbd = vcalloc(bd_count, sizeof(*res->tx_swbd)); in enetc_alloc_tx_resource()
1806 if (!res->tx_swbd) in enetc_alloc_tx_resource()
1807 return -ENOMEM; in enetc_alloc_tx_resource()
1813 res->tso_headers = dma_alloc_coherent(dev, bd_count * TSO_HEADER_SIZE, in enetc_alloc_tx_resource()
1814 &res->tso_headers_dma, in enetc_alloc_tx_resource()
1816 if (!res->tso_headers) { in enetc_alloc_tx_resource()
1817 err = -ENOMEM; in enetc_alloc_tx_resource()
1826 vfree(res->tx_swbd); in enetc_alloc_tx_resource()
1827 res->tx_swbd = NULL; in enetc_alloc_tx_resource()
1834 dma_free_coherent(res->dev, res->bd_count * TSO_HEADER_SIZE, in enetc_free_tx_resource()
1835 res->tso_headers, res->tso_headers_dma); in enetc_free_tx_resource()
1837 vfree(res->tx_swbd); in enetc_free_tx_resource()
1846 tx_res = kcalloc(priv->num_tx_rings, sizeof(*tx_res), GFP_KERNEL); in enetc_alloc_tx_resources()
1848 return ERR_PTR(-ENOMEM); in enetc_alloc_tx_resources()
1850 for (i = 0; i < priv->num_tx_rings; i++) { in enetc_alloc_tx_resources()
1851 struct enetc_bdr *tx_ring = priv->tx_ring[i]; in enetc_alloc_tx_resources()
1853 err = enetc_alloc_tx_resource(&tx_res[i], tx_ring->dev, in enetc_alloc_tx_resources()
1854 tx_ring->bd_count); in enetc_alloc_tx_resources()
1862 while (i-- > 0) in enetc_alloc_tx_resources()
1887 res->dev = dev; in enetc_alloc_rx_resource()
1888 res->bd_count = bd_count; in enetc_alloc_rx_resource()
1889 res->bd_size = sizeof(union enetc_rx_bd); in enetc_alloc_rx_resource()
1891 res->bd_size *= 2; in enetc_alloc_rx_resource()
1893 res->rx_swbd = vcalloc(bd_count, sizeof(struct enetc_rx_swbd)); in enetc_alloc_rx_resource()
1894 if (!res->rx_swbd) in enetc_alloc_rx_resource()
1895 return -ENOMEM; in enetc_alloc_rx_resource()
1899 vfree(res->rx_swbd); in enetc_alloc_rx_resource()
1909 vfree(res->rx_swbd); in enetc_free_rx_resource()
1918 rx_res = kcalloc(priv->num_rx_rings, sizeof(*rx_res), GFP_KERNEL); in enetc_alloc_rx_resources()
1920 return ERR_PTR(-ENOMEM); in enetc_alloc_rx_resources()
1922 for (i = 0; i < priv->num_rx_rings; i++) { in enetc_alloc_rx_resources()
1923 struct enetc_bdr *rx_ring = priv->rx_ring[i]; in enetc_alloc_rx_resources()
1925 err = enetc_alloc_rx_resource(&rx_res[i], rx_ring->dev, in enetc_alloc_rx_resources()
1926 rx_ring->bd_count, extended); in enetc_alloc_rx_resources()
1934 while (i-- > 0) in enetc_alloc_rx_resources()
1956 tx_ring->bd_base = res ? res->bd_base : NULL; in enetc_assign_tx_resource()
1957 tx_ring->bd_dma_base = res ? res->bd_dma_base : 0; in enetc_assign_tx_resource()
1958 tx_ring->tx_swbd = res ? res->tx_swbd : NULL; in enetc_assign_tx_resource()
1959 tx_ring->tso_headers = res ? res->tso_headers : NULL; in enetc_assign_tx_resource()
1960 tx_ring->tso_headers_dma = res ? res->tso_headers_dma : 0; in enetc_assign_tx_resource()
1966 rx_ring->bd_base = res ? res->bd_base : NULL; in enetc_assign_rx_resource()
1967 rx_ring->bd_dma_base = res ? res->bd_dma_base : 0; in enetc_assign_rx_resource()
1968 rx_ring->rx_swbd = res ? res->rx_swbd : NULL; in enetc_assign_rx_resource()
1976 if (priv->tx_res) in enetc_assign_tx_resources()
1977 enetc_free_tx_resources(priv->tx_res, priv->num_tx_rings); in enetc_assign_tx_resources()
1979 for (i = 0; i < priv->num_tx_rings; i++) { in enetc_assign_tx_resources()
1980 enetc_assign_tx_resource(priv->tx_ring[i], in enetc_assign_tx_resources()
1984 priv->tx_res = res; in enetc_assign_tx_resources()
1992 if (priv->rx_res) in enetc_assign_rx_resources()
1993 enetc_free_rx_resources(priv->rx_res, priv->num_rx_rings); in enetc_assign_rx_resources()
1995 for (i = 0; i < priv->num_rx_rings; i++) { in enetc_assign_rx_resources()
1996 enetc_assign_rx_resource(priv->rx_ring[i], in enetc_assign_rx_resources()
2000 priv->rx_res = res; in enetc_assign_rx_resources()
2007 for (i = 0; i < tx_ring->bd_count; i++) { in enetc_free_tx_ring()
2008 struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i]; in enetc_free_tx_ring()
2018 for (i = 0; i < rx_ring->bd_count; i++) { in enetc_free_rx_ring()
2019 struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i]; in enetc_free_rx_ring()
2021 if (!rx_swbd->page) in enetc_free_rx_ring()
2024 dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE, in enetc_free_rx_ring()
2025 rx_swbd->dir); in enetc_free_rx_ring()
2026 __free_page(rx_swbd->page); in enetc_free_rx_ring()
2027 rx_swbd->page = NULL; in enetc_free_rx_ring()
2035 for (i = 0; i < priv->num_rx_rings; i++) in enetc_free_rxtx_rings()
2036 enetc_free_rx_ring(priv->rx_ring[i]); in enetc_free_rxtx_rings()
2038 for (i = 0; i < priv->num_tx_rings; i++) in enetc_free_rxtx_rings()
2039 enetc_free_tx_ring(priv->tx_ring[i]); in enetc_free_rxtx_rings()
2047 rss_table = kmalloc_array(si->num_rss, sizeof(*rss_table), GFP_KERNEL); in enetc_setup_default_rss_table()
2049 return -ENOMEM; in enetc_setup_default_rss_table()
2052 for (i = 0; i < si->num_rss; i++) in enetc_setup_default_rss_table()
2055 enetc_set_rss_table(si, rss_table, si->num_rss); in enetc_setup_default_rss_table()
2064 struct enetc_si *si = priv->si; in enetc_configure_si()
2065 struct enetc_hw *hw = &si->hw; in enetc_configure_si()
2075 if (si->num_rss) { in enetc_configure_si()
2076 err = enetc_setup_default_rss_table(si, priv->num_rx_rings); in enetc_configure_si()
2087 struct enetc_si *si = priv->si; in enetc_init_si_rings_params()
2090 priv->tx_bd_count = ENETC_TX_RING_DEFAULT_SIZE; in enetc_init_si_rings_params()
2091 priv->rx_bd_count = ENETC_RX_RING_DEFAULT_SIZE; in enetc_init_si_rings_params()
2095 * TODO: Make # of TX rings run-time configurable in enetc_init_si_rings_params()
2097 priv->num_rx_rings = min_t(int, cpus, si->num_rx_rings); in enetc_init_si_rings_params()
2098 priv->num_tx_rings = si->num_tx_rings; in enetc_init_si_rings_params()
2099 priv->bdr_int_num = cpus; in enetc_init_si_rings_params()
2100 priv->ic_mode = ENETC_IC_RX_ADAPTIVE | ENETC_IC_TX_MANUAL; in enetc_init_si_rings_params()
2101 priv->tx_ictt = ENETC_TXIC_TIMETHR; in enetc_init_si_rings_params()
2107 struct enetc_si *si = priv->si; in enetc_alloc_si_resources()
2109 priv->cls_rules = kcalloc(si->num_fs_entries, sizeof(*priv->cls_rules), in enetc_alloc_si_resources()
2111 if (!priv->cls_rules) in enetc_alloc_si_resources()
2112 return -ENOMEM; in enetc_alloc_si_resources()
2120 kfree(priv->cls_rules); in enetc_free_si_resources()
2126 int idx = tx_ring->index; in enetc_setup_txbdr()
2130 lower_32_bits(tx_ring->bd_dma_base)); in enetc_setup_txbdr()
2133 upper_32_bits(tx_ring->bd_dma_base)); in enetc_setup_txbdr()
2135 WARN_ON(!IS_ALIGNED(tx_ring->bd_count, 64)); /* multiple of 64 */ in enetc_setup_txbdr()
2137 ENETC_RTBLENR_LEN(tx_ring->bd_count)); in enetc_setup_txbdr()
2140 tx_ring->next_to_use = enetc_txbdr_rd(hw, idx, ENETC_TBPIR); in enetc_setup_txbdr()
2141 tx_ring->next_to_clean = enetc_txbdr_rd(hw, idx, ENETC_TBCIR); in enetc_setup_txbdr()
2146 tbmr = ENETC_TBMR_SET_PRIO(tx_ring->prio); in enetc_setup_txbdr()
2147 if (tx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_TX) in enetc_setup_txbdr()
2153 tx_ring->tpir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBPIR); in enetc_setup_txbdr()
2154 tx_ring->tcir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBCIR); in enetc_setup_txbdr()
2155 tx_ring->idr = hw->reg + ENETC_SITXIDR; in enetc_setup_txbdr()
2161 int idx = rx_ring->index; in enetc_setup_rxbdr()
2165 lower_32_bits(rx_ring->bd_dma_base)); in enetc_setup_rxbdr()
2168 upper_32_bits(rx_ring->bd_dma_base)); in enetc_setup_rxbdr()
2170 WARN_ON(!IS_ALIGNED(rx_ring->bd_count, 64)); /* multiple of 64 */ in enetc_setup_rxbdr()
2172 ENETC_RTBLENR_LEN(rx_ring->bd_count)); in enetc_setup_rxbdr()
2174 if (rx_ring->xdp.prog) in enetc_setup_rxbdr()
2189 rx_ring->ext_en = extended; in enetc_setup_rxbdr()
2190 if (rx_ring->ext_en) in enetc_setup_rxbdr()
2193 if (rx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) in enetc_setup_rxbdr()
2196 rx_ring->rcir = hw->reg + ENETC_BDR(RX, idx, ENETC_RBCIR); in enetc_setup_rxbdr()
2197 rx_ring->idr = hw->reg + ENETC_SIRXIDR; in enetc_setup_rxbdr()
2199 rx_ring->next_to_clean = 0; in enetc_setup_rxbdr()
2200 rx_ring->next_to_use = 0; in enetc_setup_rxbdr()
2201 rx_ring->next_to_alloc = 0; in enetc_setup_rxbdr()
2212 struct enetc_hw *hw = &priv->si->hw; in enetc_setup_bdrs()
2215 for (i = 0; i < priv->num_tx_rings; i++) in enetc_setup_bdrs()
2216 enetc_setup_txbdr(hw, priv->tx_ring[i]); in enetc_setup_bdrs()
2218 for (i = 0; i < priv->num_rx_rings; i++) in enetc_setup_bdrs()
2219 enetc_setup_rxbdr(hw, priv->rx_ring[i], extended); in enetc_setup_bdrs()
2224 int idx = tx_ring->index; in enetc_enable_txbdr()
2234 int idx = rx_ring->index; in enetc_enable_rxbdr()
2244 struct enetc_hw *hw = &priv->si->hw; in enetc_enable_rx_bdrs()
2247 for (i = 0; i < priv->num_rx_rings; i++) in enetc_enable_rx_bdrs()
2248 enetc_enable_rxbdr(hw, priv->rx_ring[i]); in enetc_enable_rx_bdrs()
2253 struct enetc_hw *hw = &priv->si->hw; in enetc_enable_tx_bdrs()
2256 for (i = 0; i < priv->num_tx_rings; i++) in enetc_enable_tx_bdrs()
2257 enetc_enable_txbdr(hw, priv->tx_ring[i]); in enetc_enable_tx_bdrs()
2262 int idx = rx_ring->index; in enetc_disable_rxbdr()
2264 /* disable EN bit on ring */ in enetc_disable_rxbdr()
2270 int idx = rx_ring->index; in enetc_disable_txbdr()
2272 /* disable EN bit on ring */ in enetc_disable_txbdr()
2278 struct enetc_hw *hw = &priv->si->hw; in enetc_disable_rx_bdrs()
2281 for (i = 0; i < priv->num_rx_rings; i++) in enetc_disable_rx_bdrs()
2282 enetc_disable_rxbdr(hw, priv->rx_ring[i]); in enetc_disable_rx_bdrs()
2287 struct enetc_hw *hw = &priv->si->hw; in enetc_disable_tx_bdrs()
2290 for (i = 0; i < priv->num_tx_rings; i++) in enetc_disable_tx_bdrs()
2291 enetc_disable_txbdr(hw, priv->tx_ring[i]); in enetc_disable_tx_bdrs()
2297 int idx = tx_ring->index; in enetc_wait_txbdr()
2307 netdev_warn(tx_ring->ndev, "timeout for tx ring #%d clear\n", in enetc_wait_txbdr()
2313 struct enetc_hw *hw = &priv->si->hw; in enetc_wait_bdrs()
2316 for (i = 0; i < priv->num_tx_rings; i++) in enetc_wait_bdrs()
2317 enetc_wait_txbdr(hw, priv->tx_ring[i]); in enetc_wait_bdrs()
2322 struct pci_dev *pdev = priv->si->pdev; in enetc_setup_irqs()
2323 struct enetc_hw *hw = &priv->si->hw; in enetc_setup_irqs()
2326 for (i = 0; i < priv->bdr_int_num; i++) { in enetc_setup_irqs()
2328 struct enetc_int_vector *v = priv->int_vector[i]; in enetc_setup_irqs()
2331 snprintf(v->name, sizeof(v->name), "%s-rxtx%d", in enetc_setup_irqs()
2332 priv->ndev->name, i); in enetc_setup_irqs()
2333 err = request_irq(irq, enetc_msix, IRQF_NO_AUTOEN, v->name, v); in enetc_setup_irqs()
2335 dev_err(priv->dev, "request_irq() failed!\n"); in enetc_setup_irqs()
2339 v->tbier_base = hw->reg + ENETC_BDR(TX, 0, ENETC_TBIER); in enetc_setup_irqs()
2340 v->rbier = hw->reg + ENETC_BDR(RX, i, ENETC_RBIER); in enetc_setup_irqs()
2341 v->ricr1 = hw->reg + ENETC_BDR(RX, i, ENETC_RBICR1); in enetc_setup_irqs()
2345 for (j = 0; j < v->count_tx_rings; j++) { in enetc_setup_irqs()
2346 int idx = v->tx_ring[j].index; in enetc_setup_irqs()
2356 while (i--) { in enetc_setup_irqs()
2360 free_irq(irq, priv->int_vector[i]); in enetc_setup_irqs()
2368 struct pci_dev *pdev = priv->si->pdev; in enetc_free_irqs()
2371 for (i = 0; i < priv->bdr_int_num; i++) { in enetc_free_irqs()
2375 free_irq(irq, priv->int_vector[i]); in enetc_free_irqs()
2381 struct enetc_hw *hw = &priv->si->hw; in enetc_setup_interrupts()
2386 if (priv->ic_mode & in enetc_setup_interrupts()
2389 /* init to non-0 minimum, will be adjusted later */ in enetc_setup_interrupts()
2396 for (i = 0; i < priv->num_rx_rings; i++) { in enetc_setup_interrupts()
2402 if (priv->ic_mode & ENETC_IC_TX_MANUAL) in enetc_setup_interrupts()
2407 for (i = 0; i < priv->num_tx_rings; i++) { in enetc_setup_interrupts()
2408 enetc_txbdr_wr(hw, i, ENETC_TBICR1, priv->tx_ictt); in enetc_setup_interrupts()
2416 struct enetc_hw *hw = &priv->si->hw; in enetc_clear_interrupts()
2419 for (i = 0; i < priv->num_tx_rings; i++) in enetc_clear_interrupts()
2422 for (i = 0; i < priv->num_rx_rings; i++) in enetc_clear_interrupts()
2432 if (!priv->phylink) { in enetc_phylink_connect()
2433 /* phy-less mode */ in enetc_phylink_connect()
2438 err = phylink_of_phy_connect(priv->phylink, priv->dev->of_node, 0); in enetc_phylink_connect()
2440 dev_err(&ndev->dev, "could not attach to PHY\n"); in enetc_phylink_connect()
2446 phylink_ethtool_set_eee(priv->phylink, &edata); in enetc_phylink_connect()
2448 phylink_start(priv->phylink); in enetc_phylink_connect()
2460 netif_tx_lock_bh(priv->ndev); in enetc_tx_onestep_tstamp()
2462 clear_bit_unlock(ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS, &priv->flags); in enetc_tx_onestep_tstamp()
2463 skb = skb_dequeue(&priv->tx_skbs); in enetc_tx_onestep_tstamp()
2465 enetc_start_xmit(skb, priv->ndev); in enetc_tx_onestep_tstamp()
2467 netif_tx_unlock_bh(priv->ndev); in enetc_tx_onestep_tstamp()
2472 INIT_WORK(&priv->tx_onestep_tstamp, enetc_tx_onestep_tstamp); in enetc_tx_onestep_tstamp_init()
2473 skb_queue_head_init(&priv->tx_skbs); in enetc_tx_onestep_tstamp_init()
2483 for (i = 0; i < priv->bdr_int_num; i++) { in enetc_start()
2484 int irq = pci_irq_vector(priv->si->pdev, in enetc_start()
2487 napi_enable(&priv->int_vector[i]->napi); in enetc_start()
2497 clear_bit(ENETC_TX_DOWN, &priv->flags); in enetc_start()
2508 extended = !!(priv->active_offloads & ENETC_F_RX_TSTAMP); in enetc_open()
2539 enetc_free_tx_resources(tx_res, priv->num_tx_rings); in enetc_open()
2541 if (priv->phylink) in enetc_open()
2542 phylink_disconnect_phy(priv->phylink); in enetc_open()
2555 set_bit(ENETC_TX_DOWN, &priv->flags); in enetc_stop()
2565 for (i = 0; i < priv->bdr_int_num; i++) { in enetc_stop()
2566 int irq = pci_irq_vector(priv->si->pdev, in enetc_stop()
2570 napi_synchronize(&priv->int_vector[i]->napi); in enetc_stop()
2571 napi_disable(&priv->int_vector[i]->napi); in enetc_stop()
2584 if (priv->phylink) { in enetc_close()
2585 phylink_stop(priv->phylink); in enetc_close()
2586 phylink_disconnect_phy(priv->phylink); in enetc_close()
2615 if (!netif_running(priv->ndev)) { in enetc_reconfigure()
2637 enetc_stop(priv->ndev); in enetc_reconfigure()
2650 enetc_start(priv->ndev); in enetc_reconfigure()
2656 enetc_start(priv->ndev); in enetc_reconfigure()
2657 enetc_free_rx_resources(rx_res, priv->num_rx_rings); in enetc_reconfigure()
2659 enetc_free_tx_resources(tx_res, priv->num_tx_rings); in enetc_reconfigure()
2668 for (i = 0; i < priv->num_tx_rings; i++) in enetc_debug_tx_ring_prios()
2669 netdev_dbg(priv->ndev, "TX ring %d prio %d\n", i, in enetc_debug_tx_ring_prios()
2670 priv->tx_ring[i]->prio); in enetc_debug_tx_ring_prios()
2676 struct enetc_hw *hw = &priv->si->hw; in enetc_reset_tc_mqprio()
2685 priv->min_num_stack_tx_queues = num_possible_cpus(); in enetc_reset_tc_mqprio()
2688 for (i = 0; i < priv->num_tx_rings; i++) { in enetc_reset_tc_mqprio()
2689 tx_ring = priv->tx_ring[i]; in enetc_reset_tc_mqprio()
2690 tx_ring->prio = 0; in enetc_reset_tc_mqprio()
2691 enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio); in enetc_reset_tc_mqprio()
2704 struct tc_mqprio_qopt *qopt = &mqprio->qopt; in enetc_setup_tc_mqprio()
2705 struct enetc_hw *hw = &priv->si->hw; in enetc_setup_tc_mqprio()
2708 u8 num_tc = qopt->num_tc; in enetc_setup_tc_mqprio()
2722 offset = qopt->offset[tc]; in enetc_setup_tc_mqprio()
2723 count = qopt->count[tc]; in enetc_setup_tc_mqprio()
2731 tx_ring = priv->tx_ring[q]; in enetc_setup_tc_mqprio()
2733 * between TX queues based on skb->priority. As such, in enetc_setup_tc_mqprio()
2739 tx_ring->prio = tc; in enetc_setup_tc_mqprio()
2740 enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio); in enetc_setup_tc_mqprio()
2748 priv->min_num_stack_tx_queues = num_stack_tx_queues; in enetc_setup_tc_mqprio()
2752 enetc_change_preemptible_tcs(priv, mqprio->preemptible_tcs); in enetc_setup_tc_mqprio()
2768 old_prog = xchg(&priv->xdp_prog, prog); in enetc_reconfigure_xdp_cb()
2771 err = netif_set_real_num_tx_queues(priv->ndev, num_stack_tx_queues); in enetc_reconfigure_xdp_cb()
2773 xchg(&priv->xdp_prog, old_prog); in enetc_reconfigure_xdp_cb()
2780 for (i = 0; i < priv->num_rx_rings; i++) { in enetc_reconfigure_xdp_cb()
2781 struct enetc_bdr *rx_ring = priv->rx_ring[i]; in enetc_reconfigure_xdp_cb()
2783 rx_ring->xdp.prog = prog; in enetc_reconfigure_xdp_cb()
2786 rx_ring->buffer_offset = XDP_PACKET_HEADROOM; in enetc_reconfigure_xdp_cb()
2788 rx_ring->buffer_offset = ENETC_RXB_PAD; in enetc_reconfigure_xdp_cb()
2801 if (priv->min_num_stack_tx_queues + num_xdp_tx_queues > in enetc_setup_xdp_prog()
2802 priv->num_tx_rings) { in enetc_setup_xdp_prog()
2806 priv->min_num_stack_tx_queues, in enetc_setup_xdp_prog()
2807 priv->num_tx_rings); in enetc_setup_xdp_prog()
2808 return -EBUSY; in enetc_setup_xdp_prog()
2811 extended = !!(priv->active_offloads & ENETC_F_RX_TSTAMP); in enetc_setup_xdp_prog()
2821 switch (bpf->command) { in enetc_setup_bpf()
2823 return enetc_setup_xdp_prog(ndev, bpf->prog, bpf->extack); in enetc_setup_bpf()
2825 return -EINVAL; in enetc_setup_bpf()
2835 struct net_device_stats *stats = &ndev->stats; in enetc_get_stats()
2840 for (i = 0; i < priv->num_rx_rings; i++) { in enetc_get_stats()
2841 packets += priv->rx_ring[i]->stats.packets; in enetc_get_stats()
2842 bytes += priv->rx_ring[i]->stats.bytes; in enetc_get_stats()
2845 stats->rx_packets = packets; in enetc_get_stats()
2846 stats->rx_bytes = bytes; in enetc_get_stats()
2850 for (i = 0; i < priv->num_tx_rings; i++) { in enetc_get_stats()
2851 packets += priv->tx_ring[i]->stats.packets; in enetc_get_stats()
2852 bytes += priv->tx_ring[i]->stats.bytes; in enetc_get_stats()
2853 tx_dropped += priv->tx_ring[i]->stats.win_drop; in enetc_get_stats()
2856 stats->tx_packets = packets; in enetc_get_stats()
2857 stats->tx_bytes = bytes; in enetc_get_stats()
2858 stats->tx_dropped = tx_dropped; in enetc_get_stats()
2864 static int enetc_set_rss(struct net_device *ndev, int en) in enetc_set_rss() argument
2867 struct enetc_hw *hw = &priv->si->hw; in enetc_set_rss()
2870 enetc_wr(hw, ENETC_SIRBGCR, priv->num_rx_rings); in enetc_set_rss()
2874 reg |= (en) ? ENETC_SIMR_RSSE : 0; in enetc_set_rss()
2880 static void enetc_enable_rxvlan(struct net_device *ndev, bool en) in enetc_enable_rxvlan() argument
2883 struct enetc_hw *hw = &priv->si->hw; in enetc_enable_rxvlan()
2886 for (i = 0; i < priv->num_rx_rings; i++) in enetc_enable_rxvlan()
2887 enetc_bdr_enable_rxvlan(hw, i, en); in enetc_enable_rxvlan()
2890 static void enetc_enable_txvlan(struct net_device *ndev, bool en) in enetc_enable_txvlan() argument
2893 struct enetc_hw *hw = &priv->si->hw; in enetc_enable_txvlan()
2896 for (i = 0; i < priv->num_tx_rings; i++) in enetc_enable_txvlan()
2897 enetc_bdr_enable_txvlan(hw, i, en); in enetc_enable_txvlan()
2902 netdev_features_t changed = ndev->features ^ features; in enetc_set_features()
2921 int err, new_offloads = priv->active_offloads; in enetc_hwtstamp_set()
2924 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) in enetc_hwtstamp_set()
2925 return -EFAULT; in enetc_hwtstamp_set()
2940 return -ERANGE; in enetc_hwtstamp_set()
2952 if ((new_offloads ^ priv->active_offloads) & ENETC_F_RX_TSTAMP) { in enetc_hwtstamp_set()
2960 priv->active_offloads = new_offloads; in enetc_hwtstamp_set()
2962 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? in enetc_hwtstamp_set()
2963 -EFAULT : 0; in enetc_hwtstamp_set()
2973 if (priv->active_offloads & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) in enetc_hwtstamp_get()
2975 else if (priv->active_offloads & ENETC_F_TX_TSTAMP) in enetc_hwtstamp_get()
2980 config.rx_filter = (priv->active_offloads & ENETC_F_RX_TSTAMP) ? in enetc_hwtstamp_get()
2983 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? in enetc_hwtstamp_get()
2984 -EFAULT : 0; in enetc_hwtstamp_get()
2998 if (!priv->phylink) in enetc_ioctl()
2999 return -EOPNOTSUPP; in enetc_ioctl()
3001 return phylink_mii_ioctl(priv->phylink, rq, cmd); in enetc_ioctl()
3007 struct pci_dev *pdev = priv->si->pdev; in enetc_alloc_msix()
3013 nvec = ENETC_BDR_INT_BASE_IDX + priv->bdr_int_num; in enetc_alloc_msix()
3021 return -EPERM; in enetc_alloc_msix()
3024 v_tx_rings = priv->num_tx_rings / priv->bdr_int_num; in enetc_alloc_msix()
3026 for (i = 0; i < priv->bdr_int_num; i++) { in enetc_alloc_msix()
3033 err = -ENOMEM; in enetc_alloc_msix()
3037 priv->int_vector[i] = v; in enetc_alloc_msix()
3039 bdr = &v->rx_ring; in enetc_alloc_msix()
3040 bdr->index = i; in enetc_alloc_msix()
3041 bdr->ndev = priv->ndev; in enetc_alloc_msix()
3042 bdr->dev = priv->dev; in enetc_alloc_msix()
3043 bdr->bd_count = priv->rx_bd_count; in enetc_alloc_msix()
3044 bdr->buffer_offset = ENETC_RXB_PAD; in enetc_alloc_msix()
3045 priv->rx_ring[i] = bdr; in enetc_alloc_msix()
3047 err = xdp_rxq_info_reg(&bdr->xdp.rxq, priv->ndev, i, 0); in enetc_alloc_msix()
3053 err = xdp_rxq_info_reg_mem_model(&bdr->xdp.rxq, in enetc_alloc_msix()
3056 xdp_rxq_info_unreg(&bdr->xdp.rxq); in enetc_alloc_msix()
3062 if (priv->ic_mode & ENETC_IC_RX_ADAPTIVE) { in enetc_alloc_msix()
3063 v->rx_ictt = 0x1; in enetc_alloc_msix()
3064 v->rx_dim_en = true; in enetc_alloc_msix()
3066 INIT_WORK(&v->rx_dim.work, enetc_rx_dim_work); in enetc_alloc_msix()
3067 netif_napi_add(priv->ndev, &v->napi, enetc_poll); in enetc_alloc_msix()
3068 v->count_tx_rings = v_tx_rings; in enetc_alloc_msix()
3074 idx = priv->bdr_int_num * j + i; in enetc_alloc_msix()
3075 __set_bit(idx, &v->tx_rings_map); in enetc_alloc_msix()
3076 bdr = &v->tx_ring[j]; in enetc_alloc_msix()
3077 bdr->index = idx; in enetc_alloc_msix()
3078 bdr->ndev = priv->ndev; in enetc_alloc_msix()
3079 bdr->dev = priv->dev; in enetc_alloc_msix()
3080 bdr->bd_count = priv->tx_bd_count; in enetc_alloc_msix()
3081 priv->tx_ring[idx] = bdr; in enetc_alloc_msix()
3087 err = netif_set_real_num_tx_queues(priv->ndev, num_stack_tx_queues); in enetc_alloc_msix()
3091 err = netif_set_real_num_rx_queues(priv->ndev, priv->num_rx_rings); in enetc_alloc_msix()
3095 priv->min_num_stack_tx_queues = num_possible_cpus(); in enetc_alloc_msix()
3096 first_xdp_tx_ring = priv->num_tx_rings - num_possible_cpus(); in enetc_alloc_msix()
3097 priv->xdp_tx_ring = &priv->tx_ring[first_xdp_tx_ring]; in enetc_alloc_msix()
3102 while (i--) { in enetc_alloc_msix()
3103 struct enetc_int_vector *v = priv->int_vector[i]; in enetc_alloc_msix()
3104 struct enetc_bdr *rx_ring = &v->rx_ring; in enetc_alloc_msix()
3106 xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq); in enetc_alloc_msix()
3107 xdp_rxq_info_unreg(&rx_ring->xdp.rxq); in enetc_alloc_msix()
3108 netif_napi_del(&v->napi); in enetc_alloc_msix()
3109 cancel_work_sync(&v->rx_dim.work); in enetc_alloc_msix()
3123 for (i = 0; i < priv->bdr_int_num; i++) { in enetc_free_msix()
3124 struct enetc_int_vector *v = priv->int_vector[i]; in enetc_free_msix()
3125 struct enetc_bdr *rx_ring = &v->rx_ring; in enetc_free_msix()
3127 xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq); in enetc_free_msix()
3128 xdp_rxq_info_unreg(&rx_ring->xdp.rxq); in enetc_free_msix()
3129 netif_napi_del(&v->napi); in enetc_free_msix()
3130 cancel_work_sync(&v->rx_dim.work); in enetc_free_msix()
3133 for (i = 0; i < priv->num_rx_rings; i++) in enetc_free_msix()
3134 priv->rx_ring[i] = NULL; in enetc_free_msix()
3136 for (i = 0; i < priv->num_tx_rings; i++) in enetc_free_msix()
3137 priv->tx_ring[i] = NULL; in enetc_free_msix()
3139 for (i = 0; i < priv->bdr_int_num; i++) { in enetc_free_msix()
3140 kfree(priv->int_vector[i]); in enetc_free_msix()
3141 priv->int_vector[i] = NULL; in enetc_free_msix()
3145 pci_free_irq_vectors(priv->si->pdev); in enetc_free_msix()
3151 char *p = (char *)si - si->pad; in enetc_kfree_si()
3158 if (si->pdev->revision == ENETC_REV1) in enetc_detect_errata()
3159 si->errata = ENETC_ERR_VLAN_ISOL | ENETC_ERR_UCMCSWP; in enetc_detect_errata()
3172 return dev_err_probe(&pdev->dev, err, "device enable failed\n"); in enetc_pci_probe()
3175 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); in enetc_pci_probe()
3177 dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err); in enetc_pci_probe()
3183 dev_err(&pdev->dev, "pci_request_regions failed err=%d\n", err); in enetc_pci_probe()
3196 alloc_size += ENETC_SI_ALIGN - 1; in enetc_pci_probe()
3200 err = -ENOMEM; in enetc_pci_probe()
3205 si->pad = (char *)si - (char *)p; in enetc_pci_probe()
3208 si->pdev = pdev; in enetc_pci_probe()
3209 hw = &si->hw; in enetc_pci_probe()
3212 hw->reg = ioremap(pci_resource_start(pdev, ENETC_BAR_REGS), len); in enetc_pci_probe()
3213 if (!hw->reg) { in enetc_pci_probe()
3214 err = -ENXIO; in enetc_pci_probe()
3215 dev_err(&pdev->dev, "ioremap() failed\n"); in enetc_pci_probe()
3219 hw->port = hw->reg + ENETC_PORT_BASE; in enetc_pci_probe()
3221 hw->global = hw->reg + ENETC_GLOBAL_BASE; in enetc_pci_probe()
3242 struct enetc_hw *hw = &si->hw; in enetc_pci_remove()
3244 iounmap(hw->reg); in enetc_pci_remove()