Lines Matching +full:dma +full:- +full:pool
1 /* SPDX-License-Identifier: GPL-2.0
16 #include <linux/dma-direction.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/page-flags.h>
33 #define alloc_stat_inc(pool, __stat) (pool->alloc_stats.__stat++) argument
35 #define recycle_stat_inc(pool, __stat) \ argument
37 struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \
38 this_cpu_inc(s->__stat); \
41 #define recycle_stat_add(pool, __stat, val) \ argument
43 struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \
44 this_cpu_add(s->__stat, val); \
62 * page_pool_get_stats() - fetch page pool stats
63 * @pool: pool from which page was allocated
72 bool page_pool_get_stats(struct page_pool *pool, in page_pool_get_stats() argument
81 stats->alloc_stats.fast += pool->alloc_stats.fast; in page_pool_get_stats()
82 stats->alloc_stats.slow += pool->alloc_stats.slow; in page_pool_get_stats()
83 stats->alloc_stats.slow_high_order += pool->alloc_stats.slow_high_order; in page_pool_get_stats()
84 stats->alloc_stats.empty += pool->alloc_stats.empty; in page_pool_get_stats()
85 stats->alloc_stats.refill += pool->alloc_stats.refill; in page_pool_get_stats()
86 stats->alloc_stats.waive += pool->alloc_stats.waive; in page_pool_get_stats()
90 per_cpu_ptr(pool->recycle_stats, cpu); in page_pool_get_stats()
92 stats->recycle_stats.cached += pcpu->cached; in page_pool_get_stats()
93 stats->recycle_stats.cache_full += pcpu->cache_full; in page_pool_get_stats()
94 stats->recycle_stats.ring += pcpu->ring; in page_pool_get_stats()
95 stats->recycle_stats.ring_full += pcpu->ring_full; in page_pool_get_stats()
96 stats->recycle_stats.released_refcnt += pcpu->released_refcnt; in page_pool_get_stats()
126 *data++ = pool_stats->alloc_stats.fast; in page_pool_ethtool_stats_get()
127 *data++ = pool_stats->alloc_stats.slow; in page_pool_ethtool_stats_get()
128 *data++ = pool_stats->alloc_stats.slow_high_order; in page_pool_ethtool_stats_get()
129 *data++ = pool_stats->alloc_stats.empty; in page_pool_ethtool_stats_get()
130 *data++ = pool_stats->alloc_stats.refill; in page_pool_ethtool_stats_get()
131 *data++ = pool_stats->alloc_stats.waive; in page_pool_ethtool_stats_get()
132 *data++ = pool_stats->recycle_stats.cached; in page_pool_ethtool_stats_get()
133 *data++ = pool_stats->recycle_stats.cache_full; in page_pool_ethtool_stats_get()
134 *data++ = pool_stats->recycle_stats.ring; in page_pool_ethtool_stats_get()
135 *data++ = pool_stats->recycle_stats.ring_full; in page_pool_ethtool_stats_get()
136 *data++ = pool_stats->recycle_stats.released_refcnt; in page_pool_ethtool_stats_get()
143 #define alloc_stat_inc(pool, __stat) argument
144 #define recycle_stat_inc(pool, __stat) argument
145 #define recycle_stat_add(pool, __stat, val) argument
148 static bool page_pool_producer_lock(struct page_pool *pool) in page_pool_producer_lock() argument
149 __acquires(&pool->ring.producer_lock) in page_pool_producer_lock()
154 spin_lock(&pool->ring.producer_lock); in page_pool_producer_lock()
156 spin_lock_bh(&pool->ring.producer_lock); in page_pool_producer_lock()
161 static void page_pool_producer_unlock(struct page_pool *pool, in page_pool_producer_unlock() argument
163 __releases(&pool->ring.producer_lock) in page_pool_producer_unlock()
166 spin_unlock(&pool->ring.producer_lock); in page_pool_producer_unlock()
168 spin_unlock_bh(&pool->ring.producer_lock); in page_pool_producer_unlock()
171 static int page_pool_init(struct page_pool *pool, in page_pool_init() argument
176 memcpy(&pool->p, params, sizeof(pool->p)); in page_pool_init()
179 if (pool->p.flags & ~(PP_FLAG_ALL)) in page_pool_init()
180 return -EINVAL; in page_pool_init()
182 if (pool->p.pool_size) in page_pool_init()
183 ring_qsize = pool->p.pool_size; in page_pool_init()
187 return -E2BIG; in page_pool_init()
189 /* DMA direction is either DMA_FROM_DEVICE or DMA_BIDIRECTIONAL. in page_pool_init()
190 * DMA_BIDIRECTIONAL is for allowing page used for DMA sending, in page_pool_init()
191 * which is the XDP_TX use-case. in page_pool_init()
193 if (pool->p.flags & PP_FLAG_DMA_MAP) { in page_pool_init()
194 if ((pool->p.dma_dir != DMA_FROM_DEVICE) && in page_pool_init()
195 (pool->p.dma_dir != DMA_BIDIRECTIONAL)) in page_pool_init()
196 return -EINVAL; in page_pool_init()
199 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) { in page_pool_init()
200 /* In order to request DMA-sync-for-device the page in page_pool_init()
203 if (!(pool->p.flags & PP_FLAG_DMA_MAP)) in page_pool_init()
204 return -EINVAL; in page_pool_init()
206 if (!pool->p.max_len) in page_pool_init()
207 return -EINVAL; in page_pool_init()
209 /* pool->p.offset has to be set according to the address in page_pool_init()
210 * offset used by the DMA engine to start copying rx data in page_pool_init()
215 pool->p.flags & PP_FLAG_PAGE_FRAG) in page_pool_init()
216 return -EINVAL; in page_pool_init()
219 pool->recycle_stats = alloc_percpu(struct page_pool_recycle_stats); in page_pool_init()
220 if (!pool->recycle_stats) in page_pool_init()
221 return -ENOMEM; in page_pool_init()
224 if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0) { in page_pool_init()
226 free_percpu(pool->recycle_stats); in page_pool_init()
228 return -ENOMEM; in page_pool_init()
231 atomic_set(&pool->pages_state_release_cnt, 0); in page_pool_init()
234 refcount_set(&pool->user_cnt, 1); in page_pool_init()
236 if (pool->p.flags & PP_FLAG_DMA_MAP) in page_pool_init()
237 get_device(pool->p.dev); in page_pool_init()
243 * page_pool_create() - create a page pool.
248 struct page_pool *pool; in page_pool_create() local
251 pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, params->nid); in page_pool_create()
252 if (!pool) in page_pool_create()
253 return ERR_PTR(-ENOMEM); in page_pool_create()
255 err = page_pool_init(pool, params); in page_pool_create()
258 kfree(pool); in page_pool_create()
262 return pool; in page_pool_create()
266 static void page_pool_return_page(struct page_pool *pool, struct page *page);
269 static struct page *page_pool_refill_alloc_cache(struct page_pool *pool) in page_pool_refill_alloc_cache() argument
271 struct ptr_ring *r = &pool->ring; in page_pool_refill_alloc_cache()
277 alloc_stat_inc(pool, empty); in page_pool_refill_alloc_cache()
282 * assumes CPU refilling driver RX-ring will also run RX-NAPI. in page_pool_refill_alloc_cache()
285 pref_nid = (pool->p.nid == NUMA_NO_NODE) ? numa_mem_id() : pool->p.nid; in page_pool_refill_alloc_cache()
287 /* Ignore pool->p.nid setting if !CONFIG_NUMA, helps compiler */ in page_pool_refill_alloc_cache()
298 pool->alloc.cache[pool->alloc.count++] = page; in page_pool_refill_alloc_cache()
301 * (1) release 1 page to page-allocator and in page_pool_refill_alloc_cache()
305 page_pool_return_page(pool, page); in page_pool_refill_alloc_cache()
306 alloc_stat_inc(pool, waive); in page_pool_refill_alloc_cache()
310 } while (pool->alloc.count < PP_ALLOC_CACHE_REFILL); in page_pool_refill_alloc_cache()
313 if (likely(pool->alloc.count > 0)) { in page_pool_refill_alloc_cache()
314 page = pool->alloc.cache[--pool->alloc.count]; in page_pool_refill_alloc_cache()
315 alloc_stat_inc(pool, refill); in page_pool_refill_alloc_cache()
322 static struct page *__page_pool_get_cached(struct page_pool *pool) in __page_pool_get_cached() argument
326 /* Caller MUST guarantee safe non-concurrent access, e.g. softirq */ in __page_pool_get_cached()
327 if (likely(pool->alloc.count)) { in __page_pool_get_cached()
328 /* Fast-path */ in __page_pool_get_cached()
329 page = pool->alloc.cache[--pool->alloc.count]; in __page_pool_get_cached()
330 alloc_stat_inc(pool, fast); in __page_pool_get_cached()
332 page = page_pool_refill_alloc_cache(pool); in __page_pool_get_cached()
338 static void page_pool_dma_sync_for_device(struct page_pool *pool, in page_pool_dma_sync_for_device() argument
344 dma_sync_size = min(dma_sync_size, pool->p.max_len); in page_pool_dma_sync_for_device()
345 dma_sync_single_range_for_device(pool->p.dev, dma_addr, in page_pool_dma_sync_for_device()
346 pool->p.offset, dma_sync_size, in page_pool_dma_sync_for_device()
347 pool->p.dma_dir); in page_pool_dma_sync_for_device()
350 static bool page_pool_dma_map(struct page_pool *pool, struct page *page) in page_pool_dma_map() argument
352 dma_addr_t dma; in page_pool_dma_map() local
354 /* Setup DMA mapping: use 'struct page' area for storing DMA-addr in page_pool_dma_map()
356 * into page private data (i.e 32bit cpu with 64bit DMA caps) in page_pool_dma_map()
357 * This mapping is kept for lifetime of page, until leaving pool. in page_pool_dma_map()
359 dma = dma_map_page_attrs(pool->p.dev, page, 0, in page_pool_dma_map()
360 (PAGE_SIZE << pool->p.order), in page_pool_dma_map()
361 pool->p.dma_dir, DMA_ATTR_SKIP_CPU_SYNC | in page_pool_dma_map()
363 if (dma_mapping_error(pool->p.dev, dma)) in page_pool_dma_map()
366 page_pool_set_dma_addr(page, dma); in page_pool_dma_map()
368 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) in page_pool_dma_map()
369 page_pool_dma_sync_for_device(pool, page, pool->p.max_len); in page_pool_dma_map()
374 static void page_pool_set_pp_info(struct page_pool *pool, in page_pool_set_pp_info() argument
377 page->pp = pool; in page_pool_set_pp_info()
378 page->pp_magic |= PP_SIGNATURE; in page_pool_set_pp_info()
379 if (pool->p.init_callback) in page_pool_set_pp_info()
380 pool->p.init_callback(page, pool->p.init_arg); in page_pool_set_pp_info()
385 page->pp_magic = 0; in page_pool_clear_pp_info()
386 page->pp = NULL; in page_pool_clear_pp_info()
389 static struct page *__page_pool_alloc_page_order(struct page_pool *pool, in __page_pool_alloc_page_order() argument
395 page = alloc_pages_node(pool->p.nid, gfp, pool->p.order); in __page_pool_alloc_page_order()
399 if ((pool->p.flags & PP_FLAG_DMA_MAP) && in __page_pool_alloc_page_order()
400 unlikely(!page_pool_dma_map(pool, page))) { in __page_pool_alloc_page_order()
405 alloc_stat_inc(pool, slow_high_order); in __page_pool_alloc_page_order()
406 page_pool_set_pp_info(pool, page); in __page_pool_alloc_page_order()
408 /* Track how many pages are held 'in-flight' */ in __page_pool_alloc_page_order()
409 pool->pages_state_hold_cnt++; in __page_pool_alloc_page_order()
410 trace_page_pool_state_hold(pool, page, pool->pages_state_hold_cnt); in __page_pool_alloc_page_order()
416 static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool, in __page_pool_alloc_pages_slow() argument
420 unsigned int pp_flags = pool->p.flags; in __page_pool_alloc_pages_slow()
421 unsigned int pp_order = pool->p.order; in __page_pool_alloc_pages_slow()
425 /* Don't support bulk alloc for high-order pages */ in __page_pool_alloc_pages_slow()
427 return __page_pool_alloc_page_order(pool, gfp); in __page_pool_alloc_pages_slow()
430 if (unlikely(pool->alloc.count > 0)) in __page_pool_alloc_pages_slow()
431 return pool->alloc.cache[--pool->alloc.count]; in __page_pool_alloc_pages_slow()
434 memset(&pool->alloc.cache, 0, sizeof(void *) * bulk); in __page_pool_alloc_pages_slow()
436 nr_pages = alloc_pages_bulk_array_node(gfp, pool->p.nid, bulk, in __page_pool_alloc_pages_slow()
437 pool->alloc.cache); in __page_pool_alloc_pages_slow()
442 * page element have not been (possibly) DMA mapped. in __page_pool_alloc_pages_slow()
445 page = pool->alloc.cache[i]; in __page_pool_alloc_pages_slow()
447 unlikely(!page_pool_dma_map(pool, page))) { in __page_pool_alloc_pages_slow()
452 page_pool_set_pp_info(pool, page); in __page_pool_alloc_pages_slow()
453 pool->alloc.cache[pool->alloc.count++] = page; in __page_pool_alloc_pages_slow()
454 /* Track how many pages are held 'in-flight' */ in __page_pool_alloc_pages_slow()
455 pool->pages_state_hold_cnt++; in __page_pool_alloc_pages_slow()
456 trace_page_pool_state_hold(pool, page, in __page_pool_alloc_pages_slow()
457 pool->pages_state_hold_cnt); in __page_pool_alloc_pages_slow()
461 if (likely(pool->alloc.count > 0)) { in __page_pool_alloc_pages_slow()
462 page = pool->alloc.cache[--pool->alloc.count]; in __page_pool_alloc_pages_slow()
463 alloc_stat_inc(pool, slow); in __page_pool_alloc_pages_slow()
475 struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp) in page_pool_alloc_pages() argument
479 /* Fast-path: Get a page from cache */ in page_pool_alloc_pages()
480 page = __page_pool_get_cached(pool); in page_pool_alloc_pages()
484 /* Slow-path: cache empty, do real allocation */ in page_pool_alloc_pages()
485 page = __page_pool_alloc_pages_slow(pool, gfp); in page_pool_alloc_pages()
493 #define _distance(a, b) (s32)((a) - (b))
495 static s32 page_pool_inflight(struct page_pool *pool) in page_pool_inflight() argument
497 u32 release_cnt = atomic_read(&pool->pages_state_release_cnt); in page_pool_inflight()
498 u32 hold_cnt = READ_ONCE(pool->pages_state_hold_cnt); in page_pool_inflight()
503 trace_page_pool_release(pool, inflight, hold_cnt, release_cnt); in page_pool_inflight()
504 WARN(inflight < 0, "Negative(%d) inflight packet-pages", inflight); in page_pool_inflight()
512 * page-allocator via put_page).
514 static void page_pool_return_page(struct page_pool *pool, struct page *page) in page_pool_return_page() argument
516 dma_addr_t dma; in page_pool_return_page() local
519 if (!(pool->p.flags & PP_FLAG_DMA_MAP)) in page_pool_return_page()
525 dma = page_pool_get_dma_addr(page); in page_pool_return_page()
527 /* When page is unmapped, it cannot be returned to our pool */ in page_pool_return_page()
528 dma_unmap_page_attrs(pool->p.dev, dma, in page_pool_return_page()
529 PAGE_SIZE << pool->p.order, pool->p.dma_dir, in page_pool_return_page()
535 /* This may be the last page returned, releasing the pool, so in page_pool_return_page()
536 * it is not safe to reference pool afterwards. in page_pool_return_page()
538 count = atomic_inc_return_relaxed(&pool->pages_state_release_cnt); in page_pool_return_page()
539 trace_page_pool_state_release(pool, page, count); in page_pool_return_page()
542 /* An optimization would be to call __free_pages(page, pool->p.order) in page_pool_return_page()
543 * knowing page is not part of page-cache (thus avoiding a in page_pool_return_page()
548 static bool page_pool_recycle_in_ring(struct page_pool *pool, struct page *page) in page_pool_recycle_in_ring() argument
553 ret = ptr_ring_produce(&pool->ring, page); in page_pool_recycle_in_ring()
555 ret = ptr_ring_produce_bh(&pool->ring, page); in page_pool_recycle_in_ring()
558 recycle_stat_inc(pool, ring); in page_pool_recycle_in_ring()
566 * alloc side cache. E.g. during RX-NAPI processing for XDP_DROP use-case.
571 struct page_pool *pool) in page_pool_recycle_in_cache() argument
573 if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE)) { in page_pool_recycle_in_cache()
574 recycle_stat_inc(pool, cache_full); in page_pool_recycle_in_cache()
579 pool->alloc.cache[pool->alloc.count++] = page; in page_pool_recycle_in_cache()
580 recycle_stat_inc(pool, cached); in page_pool_recycle_in_cache()
585 * if PP_FLAG_DMA_SYNC_DEV is set, we'll try to sync the DMA area for
586 * the configured size min(dma_sync_size, pool->max_len).
591 __page_pool_put_page(struct page_pool *pool, struct page *page, in __page_pool_put_page() argument
597 * one-frame-per-page, but have fallbacks that act like the in __page_pool_put_page()
608 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) in __page_pool_put_page()
609 page_pool_dma_sync_for_device(pool, page, in __page_pool_put_page()
613 page_pool_recycle_in_cache(page, pool)) in __page_pool_put_page()
619 /* Fallback/non-XDP mode: API user have elevated refcnt. in __page_pool_put_page()
624 * switching between XDP/non-XDP. in __page_pool_put_page()
626 * In-case page_pool maintains the DMA mapping, API user must in __page_pool_put_page()
628 * case, the DMA is unmapped/released, as driver is likely in __page_pool_put_page()
632 recycle_stat_inc(pool, released_refcnt); in __page_pool_put_page()
633 page_pool_return_page(pool, page); in __page_pool_put_page()
638 void page_pool_put_defragged_page(struct page_pool *pool, struct page *page, in page_pool_put_defragged_page() argument
641 page = __page_pool_put_page(pool, page, dma_sync_size, allow_direct); in page_pool_put_defragged_page()
642 if (page && !page_pool_recycle_in_ring(pool, page)) { in page_pool_put_defragged_page()
644 recycle_stat_inc(pool, ring_full); in page_pool_put_defragged_page()
645 page_pool_return_page(pool, page); in page_pool_put_defragged_page()
651 * page_pool_put_page_bulk() - release references on multiple pages
652 * @pool: pool from which pages were allocated
665 void page_pool_put_page_bulk(struct page_pool *pool, void **data, in page_pool_put_page_bulk() argument
675 if (!page_pool_is_last_frag(pool, page)) in page_pool_put_page_bulk()
678 page = __page_pool_put_page(pool, page, -1, false); in page_pool_put_page_bulk()
688 in_softirq = page_pool_producer_lock(pool); in page_pool_put_page_bulk()
690 if (__ptr_ring_produce(&pool->ring, data[i])) { in page_pool_put_page_bulk()
692 recycle_stat_inc(pool, ring_full); in page_pool_put_page_bulk()
696 recycle_stat_add(pool, ring, i); in page_pool_put_page_bulk()
697 page_pool_producer_unlock(pool, in_softirq); in page_pool_put_page_bulk()
707 page_pool_return_page(pool, data[i]); in page_pool_put_page_bulk()
711 static struct page *page_pool_drain_frag(struct page_pool *pool, in page_pool_drain_frag() argument
714 long drain_count = BIAS_MAX - pool->frag_users; in page_pool_drain_frag()
721 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) in page_pool_drain_frag()
722 page_pool_dma_sync_for_device(pool, page, -1); in page_pool_drain_frag()
727 page_pool_return_page(pool, page); in page_pool_drain_frag()
731 static void page_pool_free_frag(struct page_pool *pool) in page_pool_free_frag() argument
733 long drain_count = BIAS_MAX - pool->frag_users; in page_pool_free_frag()
734 struct page *page = pool->frag_page; in page_pool_free_frag()
736 pool->frag_page = NULL; in page_pool_free_frag()
741 page_pool_return_page(pool, page); in page_pool_free_frag()
744 struct page *page_pool_alloc_frag(struct page_pool *pool, in page_pool_alloc_frag() argument
748 unsigned int max_size = PAGE_SIZE << pool->p.order; in page_pool_alloc_frag()
749 struct page *page = pool->frag_page; in page_pool_alloc_frag()
751 if (WARN_ON(!(pool->p.flags & PP_FLAG_PAGE_FRAG) || in page_pool_alloc_frag()
756 *offset = pool->frag_offset; in page_pool_alloc_frag()
759 page = page_pool_drain_frag(pool, page); in page_pool_alloc_frag()
761 alloc_stat_inc(pool, fast); in page_pool_alloc_frag()
767 page = page_pool_alloc_pages(pool, gfp); in page_pool_alloc_frag()
769 pool->frag_page = NULL; in page_pool_alloc_frag()
773 pool->frag_page = page; in page_pool_alloc_frag()
776 pool->frag_users = 1; in page_pool_alloc_frag()
778 pool->frag_offset = size; in page_pool_alloc_frag()
783 pool->frag_users++; in page_pool_alloc_frag()
784 pool->frag_offset = *offset + size; in page_pool_alloc_frag()
785 alloc_stat_inc(pool, fast); in page_pool_alloc_frag()
790 static void page_pool_empty_ring(struct page_pool *pool) in page_pool_empty_ring() argument
795 while ((page = ptr_ring_consume_bh(&pool->ring))) { in page_pool_empty_ring()
801 page_pool_return_page(pool, page); in page_pool_empty_ring()
805 static void page_pool_free(struct page_pool *pool) in page_pool_free() argument
807 if (pool->disconnect) in page_pool_free()
808 pool->disconnect(pool); in page_pool_free()
810 ptr_ring_cleanup(&pool->ring, NULL); in page_pool_free()
812 if (pool->p.flags & PP_FLAG_DMA_MAP) in page_pool_free()
813 put_device(pool->p.dev); in page_pool_free()
816 free_percpu(pool->recycle_stats); in page_pool_free()
818 kfree(pool); in page_pool_free()
821 static void page_pool_empty_alloc_cache_once(struct page_pool *pool) in page_pool_empty_alloc_cache_once() argument
825 if (pool->destroy_cnt) in page_pool_empty_alloc_cache_once()
829 * no-longer in use, and page_pool_alloc_pages() cannot be in page_pool_empty_alloc_cache_once()
832 while (pool->alloc.count) { in page_pool_empty_alloc_cache_once()
833 page = pool->alloc.cache[--pool->alloc.count]; in page_pool_empty_alloc_cache_once()
834 page_pool_return_page(pool, page); in page_pool_empty_alloc_cache_once()
838 static void page_pool_scrub(struct page_pool *pool) in page_pool_scrub() argument
840 page_pool_empty_alloc_cache_once(pool); in page_pool_scrub()
841 pool->destroy_cnt++; in page_pool_scrub()
844 * be in-flight. in page_pool_scrub()
846 page_pool_empty_ring(pool); in page_pool_scrub()
849 static int page_pool_release(struct page_pool *pool) in page_pool_release() argument
853 page_pool_scrub(pool); in page_pool_release()
854 inflight = page_pool_inflight(pool); in page_pool_release()
856 page_pool_free(pool); in page_pool_release()
864 struct page_pool *pool = container_of(dwq, typeof(*pool), release_dw); in page_pool_release_retry() local
867 inflight = page_pool_release(pool); in page_pool_release_retry()
872 if (time_after_eq(jiffies, pool->defer_warn)) { in page_pool_release_retry()
873 int sec = (s32)((u32)jiffies - (u32)pool->defer_start) / HZ; in page_pool_release_retry()
875 pr_warn("%s() stalled pool shutdown %d inflight %d sec\n", in page_pool_release_retry()
877 pool->defer_warn = jiffies + DEFER_WARN_INTERVAL; in page_pool_release_retry()
881 schedule_delayed_work(&pool->release_dw, DEFER_TIME); in page_pool_release_retry()
884 void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *), in page_pool_use_xdp_mem() argument
887 refcount_inc(&pool->user_cnt); in page_pool_use_xdp_mem()
888 pool->disconnect = disconnect; in page_pool_use_xdp_mem()
889 pool->xdp_mem_id = mem->id; in page_pool_use_xdp_mem()
892 void page_pool_unlink_napi(struct page_pool *pool) in page_pool_unlink_napi() argument
894 if (!pool->p.napi) in page_pool_unlink_napi()
898 * pool and NAPI are unlinked when NAPI is disabled. in page_pool_unlink_napi()
900 WARN_ON(!test_bit(NAPI_STATE_SCHED, &pool->p.napi->state) || in page_pool_unlink_napi()
901 READ_ONCE(pool->p.napi->list_owner) != -1); in page_pool_unlink_napi()
903 WRITE_ONCE(pool->p.napi, NULL); in page_pool_unlink_napi()
907 void page_pool_destroy(struct page_pool *pool) in page_pool_destroy() argument
909 if (!pool) in page_pool_destroy()
912 if (!page_pool_put(pool)) in page_pool_destroy()
915 page_pool_unlink_napi(pool); in page_pool_destroy()
916 page_pool_free_frag(pool); in page_pool_destroy()
918 if (!page_pool_release(pool)) in page_pool_destroy()
921 pool->defer_start = jiffies; in page_pool_destroy()
922 pool->defer_warn = jiffies + DEFER_WARN_INTERVAL; in page_pool_destroy()
924 INIT_DELAYED_WORK(&pool->release_dw, page_pool_release_retry); in page_pool_destroy()
925 schedule_delayed_work(&pool->release_dw, DEFER_TIME); in page_pool_destroy()
930 void page_pool_update_nid(struct page_pool *pool, int new_nid) in page_pool_update_nid() argument
934 trace_page_pool_update_nid(pool, new_nid); in page_pool_update_nid()
935 pool->p.nid = new_nid; in page_pool_update_nid()
937 /* Flush pool alloc cache, as refill will check NUMA node */ in page_pool_update_nid()
938 while (pool->alloc.count) { in page_pool_update_nid()
939 page = pool->alloc.cache[--pool->alloc.count]; in page_pool_update_nid()
940 page_pool_return_page(pool, page); in page_pool_update_nid()