10891d6d4SKrzysztof Kazimierczak // SPDX-License-Identifier: GPL-2.0 20891d6d4SKrzysztof Kazimierczak /* Copyright (c) 2019, Intel Corporation. */ 30891d6d4SKrzysztof Kazimierczak 40891d6d4SKrzysztof Kazimierczak #include "ice_txrx_lib.h" 50891d6d4SKrzysztof Kazimierczak 60891d6d4SKrzysztof Kazimierczak /** 70891d6d4SKrzysztof Kazimierczak * ice_release_rx_desc - Store the new tail and head values 80891d6d4SKrzysztof Kazimierczak * @rx_ring: ring to bump 90891d6d4SKrzysztof Kazimierczak * @val: new head index 100891d6d4SKrzysztof Kazimierczak */ 1188865fc4SKarol Kolacinski void ice_release_rx_desc(struct ice_ring *rx_ring, u16 val) 120891d6d4SKrzysztof Kazimierczak { 13168983a8SBrett Creeley u16 prev_ntu = rx_ring->next_to_use & ~0x7; 140891d6d4SKrzysztof Kazimierczak 150891d6d4SKrzysztof Kazimierczak rx_ring->next_to_use = val; 160891d6d4SKrzysztof Kazimierczak 170891d6d4SKrzysztof Kazimierczak /* update next to alloc since we have filled the ring */ 180891d6d4SKrzysztof Kazimierczak rx_ring->next_to_alloc = val; 190891d6d4SKrzysztof Kazimierczak 200891d6d4SKrzysztof Kazimierczak /* QRX_TAIL will be updated with any tail value, but hardware ignores 210891d6d4SKrzysztof Kazimierczak * the lower 3 bits. This makes it so we only bump tail on meaningful 220891d6d4SKrzysztof Kazimierczak * boundaries. Also, this allows us to bump tail on intervals of 8 up to 230891d6d4SKrzysztof Kazimierczak * the budget depending on the current traffic load. 240891d6d4SKrzysztof Kazimierczak */ 250891d6d4SKrzysztof Kazimierczak val &= ~0x7; 260891d6d4SKrzysztof Kazimierczak if (prev_ntu != val) { 270891d6d4SKrzysztof Kazimierczak /* Force memory writes to complete before letting h/w 280891d6d4SKrzysztof Kazimierczak * know there are new descriptors to fetch. (Only 290891d6d4SKrzysztof Kazimierczak * applicable for weak-ordered memory model archs, 300891d6d4SKrzysztof Kazimierczak * such as IA-64). 310891d6d4SKrzysztof Kazimierczak */ 320891d6d4SKrzysztof Kazimierczak wmb(); 330891d6d4SKrzysztof Kazimierczak writel(val, rx_ring->tail); 340891d6d4SKrzysztof Kazimierczak } 350891d6d4SKrzysztof Kazimierczak } 360891d6d4SKrzysztof Kazimierczak 370891d6d4SKrzysztof Kazimierczak /** 380891d6d4SKrzysztof Kazimierczak * ice_ptype_to_htype - get a hash type 390891d6d4SKrzysztof Kazimierczak * @ptype: the ptype value from the descriptor 400891d6d4SKrzysztof Kazimierczak * 41*dda90cb9SJesse Brandeburg * Returns appropriate hash type (such as PKT_HASH_TYPE_L2/L3/L4) to be used by 42*dda90cb9SJesse Brandeburg * skb_set_hash based on PTYPE as parsed by HW Rx pipeline and is part of 43*dda90cb9SJesse Brandeburg * Rx desc. 440891d6d4SKrzysztof Kazimierczak */ 45*dda90cb9SJesse Brandeburg static enum pkt_hash_types ice_ptype_to_htype(u16 ptype) 460891d6d4SKrzysztof Kazimierczak { 47*dda90cb9SJesse Brandeburg struct ice_rx_ptype_decoded decoded = ice_decode_rx_desc_ptype(ptype); 48*dda90cb9SJesse Brandeburg 49*dda90cb9SJesse Brandeburg if (!decoded.known) 50*dda90cb9SJesse Brandeburg return PKT_HASH_TYPE_NONE; 51*dda90cb9SJesse Brandeburg if (decoded.payload_layer == ICE_RX_PTYPE_PAYLOAD_LAYER_PAY4) 52*dda90cb9SJesse Brandeburg return PKT_HASH_TYPE_L4; 53*dda90cb9SJesse Brandeburg if (decoded.payload_layer == ICE_RX_PTYPE_PAYLOAD_LAYER_PAY3) 54*dda90cb9SJesse Brandeburg return PKT_HASH_TYPE_L3; 55*dda90cb9SJesse Brandeburg if (decoded.outer_ip == ICE_RX_PTYPE_OUTER_L2) 56*dda90cb9SJesse Brandeburg return PKT_HASH_TYPE_L2; 57*dda90cb9SJesse Brandeburg 580891d6d4SKrzysztof Kazimierczak return PKT_HASH_TYPE_NONE; 590891d6d4SKrzysztof Kazimierczak } 600891d6d4SKrzysztof Kazimierczak 610891d6d4SKrzysztof Kazimierczak /** 620891d6d4SKrzysztof Kazimierczak * ice_rx_hash - set the hash value in the skb 630891d6d4SKrzysztof Kazimierczak * @rx_ring: descriptor ring 640891d6d4SKrzysztof Kazimierczak * @rx_desc: specific descriptor 650891d6d4SKrzysztof Kazimierczak * @skb: pointer to current skb 660891d6d4SKrzysztof Kazimierczak * @rx_ptype: the ptype value from the descriptor 670891d6d4SKrzysztof Kazimierczak */ 680891d6d4SKrzysztof Kazimierczak static void 690891d6d4SKrzysztof Kazimierczak ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, 70*dda90cb9SJesse Brandeburg struct sk_buff *skb, u16 rx_ptype) 710891d6d4SKrzysztof Kazimierczak { 720891d6d4SKrzysztof Kazimierczak struct ice_32b_rx_flex_desc_nic *nic_mdid; 730891d6d4SKrzysztof Kazimierczak u32 hash; 740891d6d4SKrzysztof Kazimierczak 750891d6d4SKrzysztof Kazimierczak if (!(rx_ring->netdev->features & NETIF_F_RXHASH)) 760891d6d4SKrzysztof Kazimierczak return; 770891d6d4SKrzysztof Kazimierczak 780891d6d4SKrzysztof Kazimierczak if (rx_desc->wb.rxdid != ICE_RXDID_FLEX_NIC) 790891d6d4SKrzysztof Kazimierczak return; 800891d6d4SKrzysztof Kazimierczak 810891d6d4SKrzysztof Kazimierczak nic_mdid = (struct ice_32b_rx_flex_desc_nic *)rx_desc; 820891d6d4SKrzysztof Kazimierczak hash = le32_to_cpu(nic_mdid->rss_hash); 830891d6d4SKrzysztof Kazimierczak skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype)); 840891d6d4SKrzysztof Kazimierczak } 850891d6d4SKrzysztof Kazimierczak 860891d6d4SKrzysztof Kazimierczak /** 870891d6d4SKrzysztof Kazimierczak * ice_rx_csum - Indicate in skb if checksum is good 880891d6d4SKrzysztof Kazimierczak * @ring: the ring we care about 890891d6d4SKrzysztof Kazimierczak * @skb: skb currently being received and modified 900891d6d4SKrzysztof Kazimierczak * @rx_desc: the receive descriptor 910891d6d4SKrzysztof Kazimierczak * @ptype: the packet type decoded by hardware 920891d6d4SKrzysztof Kazimierczak * 930891d6d4SKrzysztof Kazimierczak * skb->protocol must be set before this function is called 940891d6d4SKrzysztof Kazimierczak */ 950891d6d4SKrzysztof Kazimierczak static void 960891d6d4SKrzysztof Kazimierczak ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb, 97*dda90cb9SJesse Brandeburg union ice_32b_rx_flex_desc *rx_desc, u16 ptype) 980891d6d4SKrzysztof Kazimierczak { 990891d6d4SKrzysztof Kazimierczak struct ice_rx_ptype_decoded decoded; 10013f90b39SAnirudh Venkataramanan u16 rx_status0, rx_status1; 1010891d6d4SKrzysztof Kazimierczak bool ipv4, ipv6; 1020891d6d4SKrzysztof Kazimierczak 10313f90b39SAnirudh Venkataramanan rx_status0 = le16_to_cpu(rx_desc->wb.status_error0); 10413f90b39SAnirudh Venkataramanan rx_status1 = le16_to_cpu(rx_desc->wb.status_error1); 1050891d6d4SKrzysztof Kazimierczak 1060891d6d4SKrzysztof Kazimierczak decoded = ice_decode_rx_desc_ptype(ptype); 1070891d6d4SKrzysztof Kazimierczak 1080891d6d4SKrzysztof Kazimierczak /* Start with CHECKSUM_NONE and by default csum_level = 0 */ 1090891d6d4SKrzysztof Kazimierczak skb->ip_summed = CHECKSUM_NONE; 1100891d6d4SKrzysztof Kazimierczak skb_checksum_none_assert(skb); 1110891d6d4SKrzysztof Kazimierczak 1120891d6d4SKrzysztof Kazimierczak /* check if Rx checksum is enabled */ 1130891d6d4SKrzysztof Kazimierczak if (!(ring->netdev->features & NETIF_F_RXCSUM)) 1140891d6d4SKrzysztof Kazimierczak return; 1150891d6d4SKrzysztof Kazimierczak 1160891d6d4SKrzysztof Kazimierczak /* check if HW has decoded the packet and checksum */ 11713f90b39SAnirudh Venkataramanan if (!(rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S))) 1180891d6d4SKrzysztof Kazimierczak return; 1190891d6d4SKrzysztof Kazimierczak 1200891d6d4SKrzysztof Kazimierczak if (!(decoded.known && decoded.outer_ip)) 1210891d6d4SKrzysztof Kazimierczak return; 1220891d6d4SKrzysztof Kazimierczak 1230891d6d4SKrzysztof Kazimierczak ipv4 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) && 1240891d6d4SKrzysztof Kazimierczak (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4); 1250891d6d4SKrzysztof Kazimierczak ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) && 1260891d6d4SKrzysztof Kazimierczak (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6); 1270891d6d4SKrzysztof Kazimierczak 12813f90b39SAnirudh Venkataramanan if (ipv4 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | 1290891d6d4SKrzysztof Kazimierczak BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))) 1300891d6d4SKrzysztof Kazimierczak goto checksum_fail; 13113f90b39SAnirudh Venkataramanan 13213f90b39SAnirudh Venkataramanan if (ipv6 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S)))) 1330891d6d4SKrzysztof Kazimierczak goto checksum_fail; 1340891d6d4SKrzysztof Kazimierczak 1350891d6d4SKrzysztof Kazimierczak /* check for L4 errors and handle packets that were not able to be 1360891d6d4SKrzysztof Kazimierczak * checksummed due to arrival speed 1370891d6d4SKrzysztof Kazimierczak */ 13813f90b39SAnirudh Venkataramanan if (rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)) 1390891d6d4SKrzysztof Kazimierczak goto checksum_fail; 1400891d6d4SKrzysztof Kazimierczak 141a4e82a81STony Nguyen /* check for outer UDP checksum error in tunneled packets */ 14213f90b39SAnirudh Venkataramanan if ((rx_status1 & BIT(ICE_RX_FLEX_DESC_STATUS1_NAT_S)) && 14313f90b39SAnirudh Venkataramanan (rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S))) 144a4e82a81STony Nguyen goto checksum_fail; 145a4e82a81STony Nguyen 146a4e82a81STony Nguyen /* If there is an outer header present that might contain a checksum 147a4e82a81STony Nguyen * we need to bump the checksum level by 1 to reflect the fact that 148a4e82a81STony Nguyen * we are indicating we validated the inner checksum. 149a4e82a81STony Nguyen */ 150a4e82a81STony Nguyen if (decoded.tunnel_type >= ICE_RX_PTYPE_TUNNEL_IP_GRENAT) 151a4e82a81STony Nguyen skb->csum_level = 1; 152a4e82a81STony Nguyen 1530891d6d4SKrzysztof Kazimierczak /* Only report checksum unnecessary for TCP, UDP, or SCTP */ 1540891d6d4SKrzysztof Kazimierczak switch (decoded.inner_prot) { 1550891d6d4SKrzysztof Kazimierczak case ICE_RX_PTYPE_INNER_PROT_TCP: 1560891d6d4SKrzysztof Kazimierczak case ICE_RX_PTYPE_INNER_PROT_UDP: 1570891d6d4SKrzysztof Kazimierczak case ICE_RX_PTYPE_INNER_PROT_SCTP: 1580891d6d4SKrzysztof Kazimierczak skb->ip_summed = CHECKSUM_UNNECESSARY; 1599ded647aSGustavo A. R. Silva break; 1600891d6d4SKrzysztof Kazimierczak default: 1610891d6d4SKrzysztof Kazimierczak break; 1620891d6d4SKrzysztof Kazimierczak } 1630891d6d4SKrzysztof Kazimierczak return; 1640891d6d4SKrzysztof Kazimierczak 1650891d6d4SKrzysztof Kazimierczak checksum_fail: 1660891d6d4SKrzysztof Kazimierczak ring->vsi->back->hw_csum_rx_error++; 1670891d6d4SKrzysztof Kazimierczak } 1680891d6d4SKrzysztof Kazimierczak 1690891d6d4SKrzysztof Kazimierczak /** 1700891d6d4SKrzysztof Kazimierczak * ice_process_skb_fields - Populate skb header fields from Rx descriptor 1710891d6d4SKrzysztof Kazimierczak * @rx_ring: Rx descriptor ring packet is being transacted on 1720891d6d4SKrzysztof Kazimierczak * @rx_desc: pointer to the EOP Rx descriptor 1730891d6d4SKrzysztof Kazimierczak * @skb: pointer to current skb being populated 1740891d6d4SKrzysztof Kazimierczak * @ptype: the packet type decoded by hardware 1750891d6d4SKrzysztof Kazimierczak * 1760891d6d4SKrzysztof Kazimierczak * This function checks the ring, descriptor, and packet information in 1770891d6d4SKrzysztof Kazimierczak * order to populate the hash, checksum, VLAN, protocol, and 1780891d6d4SKrzysztof Kazimierczak * other fields within the skb. 1790891d6d4SKrzysztof Kazimierczak */ 1800891d6d4SKrzysztof Kazimierczak void 1810891d6d4SKrzysztof Kazimierczak ice_process_skb_fields(struct ice_ring *rx_ring, 1820891d6d4SKrzysztof Kazimierczak union ice_32b_rx_flex_desc *rx_desc, 183*dda90cb9SJesse Brandeburg struct sk_buff *skb, u16 ptype) 1840891d6d4SKrzysztof Kazimierczak { 1850891d6d4SKrzysztof Kazimierczak ice_rx_hash(rx_ring, rx_desc, skb, ptype); 1860891d6d4SKrzysztof Kazimierczak 1870891d6d4SKrzysztof Kazimierczak /* modifies the skb - consumes the enet header */ 1880891d6d4SKrzysztof Kazimierczak skb->protocol = eth_type_trans(skb, rx_ring->netdev); 1890891d6d4SKrzysztof Kazimierczak 1900891d6d4SKrzysztof Kazimierczak ice_rx_csum(rx_ring, skb, rx_desc, ptype); 19177a78115SJacob Keller 19277a78115SJacob Keller if (rx_ring->ptp_rx) 19377a78115SJacob Keller ice_ptp_rx_hwtstamp(rx_ring, rx_desc, skb); 1940891d6d4SKrzysztof Kazimierczak } 1950891d6d4SKrzysztof Kazimierczak 1960891d6d4SKrzysztof Kazimierczak /** 1970891d6d4SKrzysztof Kazimierczak * ice_receive_skb - Send a completed packet up the stack 1980891d6d4SKrzysztof Kazimierczak * @rx_ring: Rx ring in play 1990891d6d4SKrzysztof Kazimierczak * @skb: packet to send up 2000891d6d4SKrzysztof Kazimierczak * @vlan_tag: VLAN tag for packet 2010891d6d4SKrzysztof Kazimierczak * 2020891d6d4SKrzysztof Kazimierczak * This function sends the completed packet (via. skb) up the stack using 2030891d6d4SKrzysztof Kazimierczak * gro receive functions (with/without VLAN tag) 2040891d6d4SKrzysztof Kazimierczak */ 2050891d6d4SKrzysztof Kazimierczak void 2060891d6d4SKrzysztof Kazimierczak ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag) 2070891d6d4SKrzysztof Kazimierczak { 2080891d6d4SKrzysztof Kazimierczak if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && 2090891d6d4SKrzysztof Kazimierczak (vlan_tag & VLAN_VID_MASK)) 2100891d6d4SKrzysztof Kazimierczak __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); 211f73fc403SEric Dumazet napi_gro_receive(&rx_ring->q_vector->napi, skb); 2120891d6d4SKrzysztof Kazimierczak } 2130891d6d4SKrzysztof Kazimierczak 2140891d6d4SKrzysztof Kazimierczak /** 2150891d6d4SKrzysztof Kazimierczak * ice_xmit_xdp_ring - submit single packet to XDP ring for transmission 2160891d6d4SKrzysztof Kazimierczak * @data: packet data pointer 2170891d6d4SKrzysztof Kazimierczak * @size: packet data size 2180891d6d4SKrzysztof Kazimierczak * @xdp_ring: XDP ring for transmission 2190891d6d4SKrzysztof Kazimierczak */ 2200891d6d4SKrzysztof Kazimierczak int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring) 2210891d6d4SKrzysztof Kazimierczak { 2220891d6d4SKrzysztof Kazimierczak u16 i = xdp_ring->next_to_use; 2230891d6d4SKrzysztof Kazimierczak struct ice_tx_desc *tx_desc; 2240891d6d4SKrzysztof Kazimierczak struct ice_tx_buf *tx_buf; 2250891d6d4SKrzysztof Kazimierczak dma_addr_t dma; 2260891d6d4SKrzysztof Kazimierczak 2270891d6d4SKrzysztof Kazimierczak if (!unlikely(ICE_DESC_UNUSED(xdp_ring))) { 2280891d6d4SKrzysztof Kazimierczak xdp_ring->tx_stats.tx_busy++; 2290891d6d4SKrzysztof Kazimierczak return ICE_XDP_CONSUMED; 2300891d6d4SKrzysztof Kazimierczak } 2310891d6d4SKrzysztof Kazimierczak 2320891d6d4SKrzysztof Kazimierczak dma = dma_map_single(xdp_ring->dev, data, size, DMA_TO_DEVICE); 2330891d6d4SKrzysztof Kazimierczak if (dma_mapping_error(xdp_ring->dev, dma)) 2340891d6d4SKrzysztof Kazimierczak return ICE_XDP_CONSUMED; 2350891d6d4SKrzysztof Kazimierczak 2360891d6d4SKrzysztof Kazimierczak tx_buf = &xdp_ring->tx_buf[i]; 2370891d6d4SKrzysztof Kazimierczak tx_buf->bytecount = size; 2380891d6d4SKrzysztof Kazimierczak tx_buf->gso_segs = 1; 2390891d6d4SKrzysztof Kazimierczak tx_buf->raw_buf = data; 2400891d6d4SKrzysztof Kazimierczak 2410891d6d4SKrzysztof Kazimierczak /* record length, and DMA address */ 2420891d6d4SKrzysztof Kazimierczak dma_unmap_len_set(tx_buf, len, size); 2430891d6d4SKrzysztof Kazimierczak dma_unmap_addr_set(tx_buf, dma, dma); 2440891d6d4SKrzysztof Kazimierczak 2450891d6d4SKrzysztof Kazimierczak tx_desc = ICE_TX_DESC(xdp_ring, i); 2460891d6d4SKrzysztof Kazimierczak tx_desc->buf_addr = cpu_to_le64(dma); 2475757cc7cSTony Nguyen tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TXD_LAST_DESC_CMD, 0, 2480891d6d4SKrzysztof Kazimierczak size, 0); 2490891d6d4SKrzysztof Kazimierczak 2500891d6d4SKrzysztof Kazimierczak /* Make certain all of the status bits have been updated 2510891d6d4SKrzysztof Kazimierczak * before next_to_watch is written. 2520891d6d4SKrzysztof Kazimierczak */ 2530891d6d4SKrzysztof Kazimierczak smp_wmb(); 2540891d6d4SKrzysztof Kazimierczak 2550891d6d4SKrzysztof Kazimierczak i++; 2560891d6d4SKrzysztof Kazimierczak if (i == xdp_ring->count) 2570891d6d4SKrzysztof Kazimierczak i = 0; 2580891d6d4SKrzysztof Kazimierczak 2590891d6d4SKrzysztof Kazimierczak tx_buf->next_to_watch = tx_desc; 2600891d6d4SKrzysztof Kazimierczak xdp_ring->next_to_use = i; 2610891d6d4SKrzysztof Kazimierczak 2620891d6d4SKrzysztof Kazimierczak return ICE_XDP_TX; 2630891d6d4SKrzysztof Kazimierczak } 2640891d6d4SKrzysztof Kazimierczak 2650891d6d4SKrzysztof Kazimierczak /** 2660891d6d4SKrzysztof Kazimierczak * ice_xmit_xdp_buff - convert an XDP buffer to an XDP frame and send it 2670891d6d4SKrzysztof Kazimierczak * @xdp: XDP buffer 2680891d6d4SKrzysztof Kazimierczak * @xdp_ring: XDP Tx ring 2690891d6d4SKrzysztof Kazimierczak * 2700891d6d4SKrzysztof Kazimierczak * Returns negative on failure, 0 on success. 2710891d6d4SKrzysztof Kazimierczak */ 2720891d6d4SKrzysztof Kazimierczak int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_ring *xdp_ring) 2730891d6d4SKrzysztof Kazimierczak { 2741b698fa5SLorenzo Bianconi struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); 2750891d6d4SKrzysztof Kazimierczak 2760891d6d4SKrzysztof Kazimierczak if (unlikely(!xdpf)) 2770891d6d4SKrzysztof Kazimierczak return ICE_XDP_CONSUMED; 2780891d6d4SKrzysztof Kazimierczak 2790891d6d4SKrzysztof Kazimierczak return ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring); 2800891d6d4SKrzysztof Kazimierczak } 2810891d6d4SKrzysztof Kazimierczak 2820891d6d4SKrzysztof Kazimierczak /** 2830891d6d4SKrzysztof Kazimierczak * ice_finalize_xdp_rx - Bump XDP Tx tail and/or flush redirect map 2840891d6d4SKrzysztof Kazimierczak * @rx_ring: Rx ring 2850891d6d4SKrzysztof Kazimierczak * @xdp_res: Result of the receive batch 2860891d6d4SKrzysztof Kazimierczak * 2870891d6d4SKrzysztof Kazimierczak * This function bumps XDP Tx tail and/or flush redirect map, and 2880891d6d4SKrzysztof Kazimierczak * should be called when a batch of packets has been processed in the 2890891d6d4SKrzysztof Kazimierczak * napi loop. 2900891d6d4SKrzysztof Kazimierczak */ 2910891d6d4SKrzysztof Kazimierczak void ice_finalize_xdp_rx(struct ice_ring *rx_ring, unsigned int xdp_res) 2920891d6d4SKrzysztof Kazimierczak { 2930891d6d4SKrzysztof Kazimierczak if (xdp_res & ICE_XDP_REDIR) 2940891d6d4SKrzysztof Kazimierczak xdp_do_flush_map(); 2950891d6d4SKrzysztof Kazimierczak 2960891d6d4SKrzysztof Kazimierczak if (xdp_res & ICE_XDP_TX) { 2970891d6d4SKrzysztof Kazimierczak struct ice_ring *xdp_ring = 2980891d6d4SKrzysztof Kazimierczak rx_ring->vsi->xdp_rings[rx_ring->q_index]; 2990891d6d4SKrzysztof Kazimierczak 3000891d6d4SKrzysztof Kazimierczak ice_xdp_ring_update_tail(xdp_ring); 3010891d6d4SKrzysztof Kazimierczak } 3020891d6d4SKrzysztof Kazimierczak } 303