Lines Matching refs:rx
101 if (priv->rx) { in gve_get_stats()
105 u64_stats_fetch_begin(&priv->rx[ring].statss); in gve_get_stats()
106 packets = priv->rx[ring].rpackets; in gve_get_stats()
107 bytes = priv->rx[ring].rbytes; in gve_get_stats()
108 } while (u64_stats_fetch_retry(&priv->rx[ring].statss, in gve_get_stats()
264 if (block->rx) { in gve_napi_poll()
284 if (block->rx) in gve_napi_poll()
285 reschedule |= gve_rx_work_pending(block->rx); in gve_napi_poll()
307 if (block->rx) { in gve_napi_poll_dqo()
756 gve_rx_write_doorbell(priv, &priv->rx[i]); in gve_create_rings()
760 gve_rx_post_buffers_dqo(&priv->rx[i]); in gve_create_rings()
802 u64_stats_init(&priv->rx[i].statss); in add_napi_init_sync_stats()
803 priv->rx[i].ntfy_id = ntfy_idx; in add_napi_init_sync_stats()
852 priv->rx = kvcalloc(priv->rx_cfg.max_queues, sizeof(*priv->rx), in gve_alloc_rings()
854 if (!priv->rx) { in gve_alloc_rings()
874 kvfree(priv->rx); in gve_alloc_rings()
875 priv->rx = NULL; in gve_alloc_rings()
966 if (priv->rx) { in gve_free_rings()
972 kvfree(priv->rx); in gve_free_rings()
973 priv->rx = NULL; in gve_free_rings()
1192 struct gve_rx_ring *rx; in gve_reg_xdp_info() local
1201 rx = &priv->rx[i]; in gve_reg_xdp_info()
1202 napi = &priv->ntfy_blocks[rx->ntfy_id].napi; in gve_reg_xdp_info()
1204 err = xdp_rxq_info_reg(&rx->xdp_rxq, dev, i, in gve_reg_xdp_info()
1208 err = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq, in gve_reg_xdp_info()
1212 rx->xsk_pool = xsk_get_pool_from_qid(dev, i); in gve_reg_xdp_info()
1213 if (rx->xsk_pool) { in gve_reg_xdp_info()
1214 err = xdp_rxq_info_reg(&rx->xsk_rxq, dev, i, in gve_reg_xdp_info()
1218 err = xdp_rxq_info_reg_mem_model(&rx->xsk_rxq, in gve_reg_xdp_info()
1222 xsk_pool_set_rxq_info(rx->xsk_pool, in gve_reg_xdp_info()
1223 &rx->xsk_rxq); in gve_reg_xdp_info()
1235 rx = &priv->rx[j]; in gve_reg_xdp_info()
1236 if (xdp_rxq_info_is_reg(&rx->xdp_rxq)) in gve_reg_xdp_info()
1237 xdp_rxq_info_unreg(&rx->xdp_rxq); in gve_reg_xdp_info()
1238 if (xdp_rxq_info_is_reg(&rx->xsk_rxq)) in gve_reg_xdp_info()
1239 xdp_rxq_info_unreg(&rx->xsk_rxq); in gve_reg_xdp_info()
1252 struct gve_rx_ring *rx = &priv->rx[i]; in gve_unreg_xdp_info() local
1254 xdp_rxq_info_unreg(&rx->xdp_rxq); in gve_unreg_xdp_info()
1255 if (rx->xsk_pool) { in gve_unreg_xdp_info()
1256 xdp_rxq_info_unreg(&rx->xsk_rxq); in gve_unreg_xdp_info()
1257 rx->xsk_pool = NULL; in gve_unreg_xdp_info()
1273 nc = &priv->rx[i].page_cache; in gve_drain_page_cache()
1512 struct gve_rx_ring *rx; in gve_xsk_pool_enable() local
1535 rx = &priv->rx[qid]; in gve_xsk_pool_enable()
1536 napi = &priv->ntfy_blocks[rx->ntfy_id].napi; in gve_xsk_pool_enable()
1537 err = xdp_rxq_info_reg(&rx->xsk_rxq, dev, qid, napi->napi_id); in gve_xsk_pool_enable()
1541 err = xdp_rxq_info_reg_mem_model(&rx->xsk_rxq, in gve_xsk_pool_enable()
1546 xsk_pool_set_rxq_info(pool, &rx->xsk_rxq); in gve_xsk_pool_enable()
1547 rx->xsk_pool = pool; in gve_xsk_pool_enable()
1554 if (xdp_rxq_info_is_reg(&rx->xsk_rxq)) in gve_xsk_pool_enable()
1555 xdp_rxq_info_unreg(&rx->xsk_rxq); in gve_xsk_pool_enable()
1583 priv->rx[qid].xsk_pool = NULL; in gve_xsk_pool_disable()
1584 xdp_rxq_info_unreg(&priv->rx[qid].xsk_rxq); in gve_xsk_pool_disable()
1589 napi_rx = &priv->ntfy_blocks[priv->rx[qid].ntfy_id].napi; in gve_xsk_pool_disable()
1595 priv->rx[qid].xsk_pool = NULL; in gve_xsk_pool_disable()
1596 xdp_rxq_info_unreg(&priv->rx[qid].xsk_rxq); in gve_xsk_pool_disable()
1601 if (gve_rx_work_pending(&priv->rx[qid])) in gve_xsk_pool_disable()
1980 if (priv->rx) { in gve_handle_report_stats()
1984 .value = cpu_to_be64(priv->rx[idx].desc.seqno), in gve_handle_report_stats()
1989 .value = cpu_to_be64(priv->rx[0].fill_cnt), in gve_handle_report_stats()