Lines Matching refs:rx_swbd

741 	new = &rx_ring->rx_swbd[rx_ring->next_to_alloc];  in enetc_reuse_page()
779 struct enetc_rx_swbd rx_swbd = { in enetc_recycle_xdp_tx_buff() local
791 enetc_reuse_page(rx_ring, &rx_swbd); in enetc_recycle_xdp_tx_buff()
794 dma_sync_single_range_for_device(rx_ring->dev, rx_swbd.dma, in enetc_recycle_xdp_tx_buff()
795 rx_swbd.page_offset, in enetc_recycle_xdp_tx_buff()
797 rx_swbd.dir); in enetc_recycle_xdp_tx_buff()
806 dma_unmap_page(rx_ring->dev, rx_swbd.dma, PAGE_SIZE, in enetc_recycle_xdp_tx_buff()
807 rx_swbd.dir); in enetc_recycle_xdp_tx_buff()
808 __free_page(rx_swbd.page); in enetc_recycle_xdp_tx_buff()
913 struct enetc_rx_swbd *rx_swbd) in enetc_new_page() argument
924 rx_swbd->dir = xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; in enetc_new_page()
926 addr = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, rx_swbd->dir); in enetc_new_page()
933 rx_swbd->dma = addr; in enetc_new_page()
934 rx_swbd->page = page; in enetc_new_page()
935 rx_swbd->page_offset = rx_ring->buffer_offset; in enetc_new_page()
942 struct enetc_rx_swbd *rx_swbd; in enetc_refill_rx_ring() local
947 rx_swbd = &rx_ring->rx_swbd[i]; in enetc_refill_rx_ring()
952 if (unlikely(!rx_swbd->page)) { in enetc_refill_rx_ring()
953 if (unlikely(!enetc_new_page(rx_ring, rx_swbd))) { in enetc_refill_rx_ring()
960 rxbd->w.addr = cpu_to_le64(rx_swbd->dma + in enetc_refill_rx_ring()
961 rx_swbd->page_offset); in enetc_refill_rx_ring()
966 rx_swbd = &rx_ring->rx_swbd[i]; in enetc_refill_rx_ring()
1057 struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i]; in enetc_get_rx_buff() local
1059 dma_sync_single_range_for_cpu(rx_ring->dev, rx_swbd->dma, in enetc_get_rx_buff()
1060 rx_swbd->page_offset, in enetc_get_rx_buff()
1061 size, rx_swbd->dir); in enetc_get_rx_buff()
1062 return rx_swbd; in enetc_get_rx_buff()
1067 struct enetc_rx_swbd *rx_swbd) in enetc_put_rx_buff() argument
1071 enetc_reuse_page(rx_ring, rx_swbd); in enetc_put_rx_buff()
1073 dma_sync_single_range_for_device(rx_ring->dev, rx_swbd->dma, in enetc_put_rx_buff()
1074 rx_swbd->page_offset, in enetc_put_rx_buff()
1075 buffer_size, rx_swbd->dir); in enetc_put_rx_buff()
1077 rx_swbd->page = NULL; in enetc_put_rx_buff()
1082 struct enetc_rx_swbd *rx_swbd) in enetc_flip_rx_buff() argument
1084 if (likely(enetc_page_reusable(rx_swbd->page))) { in enetc_flip_rx_buff()
1085 rx_swbd->page_offset ^= ENETC_RXB_TRUESIZE; in enetc_flip_rx_buff()
1086 page_ref_inc(rx_swbd->page); in enetc_flip_rx_buff()
1088 enetc_put_rx_buff(rx_ring, rx_swbd); in enetc_flip_rx_buff()
1090 dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE, in enetc_flip_rx_buff()
1091 rx_swbd->dir); in enetc_flip_rx_buff()
1092 rx_swbd->page = NULL; in enetc_flip_rx_buff()
1099 struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size); in enetc_map_rx_buff_to_skb() local
1103 ba = page_address(rx_swbd->page) + rx_swbd->page_offset; in enetc_map_rx_buff_to_skb()
1113 enetc_flip_rx_buff(rx_ring, rx_swbd); in enetc_map_rx_buff_to_skb()
1121 struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size); in enetc_add_rx_buff_to_skb() local
1123 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_swbd->page, in enetc_add_rx_buff_to_skb()
1124 rx_swbd->page_offset, size, ENETC_RXB_TRUESIZE); in enetc_add_rx_buff_to_skb()
1126 enetc_flip_rx_buff(rx_ring, rx_swbd); in enetc_add_rx_buff_to_skb()
1136 enetc_put_rx_buff(rx_ring, &rx_ring->rx_swbd[*i]); in enetc_check_bd_errors_and_consume()
1143 enetc_put_rx_buff(rx_ring, &rx_ring->rx_swbd[*i]); in enetc_check_bd_errors_and_consume()
1422 struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size); in enetc_map_rx_buff_to_xdp() local
1423 void *hard_start = page_address(rx_swbd->page) + rx_swbd->page_offset; in enetc_map_rx_buff_to_xdp()
1426 rx_swbd->len = size; in enetc_map_rx_buff_to_xdp()
1436 struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size); in enetc_add_rx_buff_to_xdp() local
1440 rx_swbd->len = size; in enetc_add_rx_buff_to_xdp()
1450 if (page_is_pfmemalloc(rx_swbd->page)) in enetc_add_rx_buff_to_xdp()
1454 skb_frag_fill_page_desc(frag, rx_swbd->page, rx_swbd->page_offset, in enetc_add_rx_buff_to_xdp()
1499 struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[rx_ring_first]; in enetc_rx_swbd_to_xdp_tx_swbd() local
1503 tx_swbd->dma = rx_swbd->dma; in enetc_rx_swbd_to_xdp_tx_swbd()
1504 tx_swbd->dir = rx_swbd->dir; in enetc_rx_swbd_to_xdp_tx_swbd()
1505 tx_swbd->page = rx_swbd->page; in enetc_rx_swbd_to_xdp_tx_swbd()
1506 tx_swbd->page_offset = rx_swbd->page_offset; in enetc_rx_swbd_to_xdp_tx_swbd()
1507 tx_swbd->len = rx_swbd->len; in enetc_rx_swbd_to_xdp_tx_swbd()
1524 &rx_ring->rx_swbd[rx_ring_first]); in enetc_xdp_drop()
1627 rx_ring->rx_swbd[orig_i].page = NULL; in enetc_clean_rx_ring_xdp()
1640 &rx_ring->rx_swbd[orig_i]); in enetc_clean_rx_ring_xdp()
1880 res->rx_swbd = vcalloc(bd_count, sizeof(struct enetc_rx_swbd)); in enetc_alloc_rx_resource()
1881 if (!res->rx_swbd) in enetc_alloc_rx_resource()
1886 vfree(res->rx_swbd); in enetc_alloc_rx_resource()
1896 vfree(res->rx_swbd); in enetc_free_rx_resource()
1955 rx_ring->rx_swbd = res ? res->rx_swbd : NULL; in enetc_assign_rx_resource()
2006 struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i]; in enetc_free_rx_ring() local
2008 if (!rx_swbd->page) in enetc_free_rx_ring()
2011 dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE, in enetc_free_rx_ring()
2012 rx_swbd->dir); in enetc_free_rx_ring()
2013 __free_page(rx_swbd->page); in enetc_free_rx_ring()
2014 rx_swbd->page = NULL; in enetc_free_rx_ring()