Lines Matching +full:0 +full:xa

24 #define REG_STATE_NEW		0x0
25 #define REG_STATE_REGISTERED 0x1
26 #define REG_STATE_UNREGISTERED 0x2
27 #define REG_STATE_UNUSED 0x3
31 #define MEM_ID_MAX 0xFFFE
53 const struct xdp_mem_allocator *xa = ptr; in xdp_mem_id_cmp() local
56 return xa->mem.id != mem_id; in xdp_mem_id_cmp()
73 struct xdp_mem_allocator *xa; in __xdp_mem_allocator_rcu_free() local
75 xa = container_of(rcu, struct xdp_mem_allocator, rcu); in __xdp_mem_allocator_rcu_free()
78 ida_simple_remove(&mem_id_pool, xa->mem.id); in __xdp_mem_allocator_rcu_free()
80 kfree(xa); in __xdp_mem_allocator_rcu_free()
83 static void mem_xa_remove(struct xdp_mem_allocator *xa) in mem_xa_remove() argument
85 trace_mem_disconnect(xa); in mem_xa_remove()
87 if (!rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params)) in mem_xa_remove()
88 call_rcu(&xa->rcu, __xdp_mem_allocator_rcu_free); in mem_xa_remove()
93 struct xdp_mem_allocator *xa; in mem_allocator_disconnect() local
102 while ((xa = rhashtable_walk_next(&iter)) && !IS_ERR(xa)) { in mem_allocator_disconnect()
103 if (xa->allocator == allocator) in mem_allocator_disconnect()
104 mem_xa_remove(xa); in mem_allocator_disconnect()
109 } while (xa == ERR_PTR(-EAGAIN)); in mem_allocator_disconnect()
117 struct xdp_mem_allocator *xa; in xdp_unreg_mem_model() local
122 mem->id = 0; in xdp_unreg_mem_model()
123 mem->type = 0; in xdp_unreg_mem_model()
125 if (id == 0) in xdp_unreg_mem_model()
129 xa = rhashtable_lookup_fast(mem_id_ht, &id, mem_id_rht_params); in xdp_unreg_mem_model()
130 page_pool_destroy(xa->page_pool); in xdp_unreg_mem_model()
161 memset(xdp_rxq, 0, sizeof(*xdp_rxq)); in xdp_rxq_info_init()
164 /* Returns 0 on success, negative on failure */
192 return 0; in __xdp_rxq_info_reg()
214 return 0; in __mem_id_init_hash_table()
221 if (ret < 0) { in __mem_id_init_hash_table()
229 return 0; in __mem_id_init_hash_table()
244 if (id < 0) { in __mem_id_cyclic_get()
295 if (ret < 0) in __xdp_reg_mem_model()
305 if (id < 0) { in __xdp_reg_mem_model()
317 mem->id = 0; in __xdp_reg_mem_model()
342 return 0; in xdp_reg_mem_model()
362 return 0; in xdp_rxq_info_reg_mem_model()
383 /* No need to check ((page->pp_magic & ~0x3UL) == PP_SIGNATURE) in __xdp_return()
415 for (i = 0; i < sinfo->nr_frags; i++) { in xdp_return_frame()
434 for (i = 0; i < sinfo->nr_frags; i++) { in xdp_return_frame_rx_napi()
456 struct xdp_mem_allocator *xa = bq->xa; in xdp_flush_frame_bulk() local
458 if (unlikely(!xa || !bq->count)) in xdp_flush_frame_bulk()
461 page_pool_put_page_bulk(xa->page_pool, bq->q, bq->count); in xdp_flush_frame_bulk()
462 /* bq->xa is not cleared to save lookup, if mem.id same in next bulk */ in xdp_flush_frame_bulk()
463 bq->count = 0; in xdp_flush_frame_bulk()
472 struct xdp_mem_allocator *xa; in xdp_return_frame_bulk() local
479 xa = bq->xa; in xdp_return_frame_bulk()
480 if (unlikely(!xa)) { in xdp_return_frame_bulk()
481 xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params); in xdp_return_frame_bulk()
482 bq->count = 0; in xdp_return_frame_bulk()
483 bq->xa = xa; in xdp_return_frame_bulk()
489 if (unlikely(mem->id != xa->mem.id)) { in xdp_return_frame_bulk()
491 bq->xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params); in xdp_return_frame_bulk()
499 for (i = 0; i < sinfo->nr_frags; i++) { in xdp_return_frame_bulk()
520 for (i = 0; i < sinfo->nr_frags; i++) { in xdp_return_buff()
548 metasize = xdp_data_meta_unsupported(xdp) ? 0 : in xdp_convert_zc_to_xdp_frame()
561 memset(xdpf, 0, sizeof(*xdpf)); in xdp_convert_zc_to_xdp_frame()
569 xdpf->headroom = 0; in xdp_convert_zc_to_xdp_frame()
592 return 0; in xdp_alloc_skb_bulk()
661 memset(skb, 0, offsetof(struct sk_buff, tail)); in xdp_build_skb_from_frame()
690 nxdpf->mem.id = 0; in xdpf_clone()
705 * * Returns 0 on success or ``-errno`` on error.
727 * * Returns 0 on success or ``-errno`` on error.