xref: /openbmc/linux/include/net/page_pool/helpers.h (revision f291209eca5eba0b4704fa0832af57b12dbc1a02)
1a9ca9f9cSYunsheng Lin /* SPDX-License-Identifier: GPL-2.0
2a9ca9f9cSYunsheng Lin  *
3a9ca9f9cSYunsheng Lin  * page_pool/helpers.h
4a9ca9f9cSYunsheng Lin  *	Author:	Jesper Dangaard Brouer <netoptimizer@brouer.com>
5a9ca9f9cSYunsheng Lin  *	Copyright (C) 2016 Red Hat, Inc.
6a9ca9f9cSYunsheng Lin  */
7a9ca9f9cSYunsheng Lin 
8a9ca9f9cSYunsheng Lin /**
9a9ca9f9cSYunsheng Lin  * DOC: page_pool allocator
10a9ca9f9cSYunsheng Lin  *
112c2b8874SJakub Kicinski  * The page_pool allocator is optimized for the XDP mode that
122c2b8874SJakub Kicinski  * uses one frame per-page, but it can fallback on the
13a9ca9f9cSYunsheng Lin  * regular page allocator APIs.
14a9ca9f9cSYunsheng Lin  *
152c2b8874SJakub Kicinski  * Basic use involves replacing alloc_pages() calls with the
162c2b8874SJakub Kicinski  * page_pool_alloc_pages() call.  Drivers should use
17a9ca9f9cSYunsheng Lin  * page_pool_dev_alloc_pages() replacing dev_alloc_pages().
18a9ca9f9cSYunsheng Lin  *
19*513dbc10SRandy Dunlap  * The API keeps track of in-flight pages, in order to let API users know
202c2b8874SJakub Kicinski  * when it is safe to free a page_pool object.  Thus, API users
212c2b8874SJakub Kicinski  * must call page_pool_put_page() to free the page, or attach
22*513dbc10SRandy Dunlap  * the page to a page_pool-aware object like skbs marked with
232c2b8874SJakub Kicinski  * skb_mark_for_recycle().
24a9ca9f9cSYunsheng Lin  *
25*513dbc10SRandy Dunlap  * API users must call page_pool_put_page() once on a page, as it
262c2b8874SJakub Kicinski  * will either recycle the page, or in case of refcnt > 1, it will
272c2b8874SJakub Kicinski  * release the DMA mapping and in-flight state accounting.
28a9ca9f9cSYunsheng Lin  */
29a9ca9f9cSYunsheng Lin #ifndef _NET_PAGE_POOL_HELPERS_H
30a9ca9f9cSYunsheng Lin #define _NET_PAGE_POOL_HELPERS_H
31a9ca9f9cSYunsheng Lin 
32a9ca9f9cSYunsheng Lin #include <net/page_pool/types.h>
33a9ca9f9cSYunsheng Lin 
34a9ca9f9cSYunsheng Lin #ifdef CONFIG_PAGE_POOL_STATS
35a9ca9f9cSYunsheng Lin int page_pool_ethtool_stats_get_count(void);
36a9ca9f9cSYunsheng Lin u8 *page_pool_ethtool_stats_get_strings(u8 *data);
37a9ca9f9cSYunsheng Lin u64 *page_pool_ethtool_stats_get(u64 *data, void *stats);
38a9ca9f9cSYunsheng Lin 
39a9ca9f9cSYunsheng Lin /*
40a9ca9f9cSYunsheng Lin  * Drivers that wish to harvest page pool stats and report them to users
41a9ca9f9cSYunsheng Lin  * (perhaps via ethtool, debugfs, or another mechanism) can allocate a
42a9ca9f9cSYunsheng Lin  * struct page_pool_stats call page_pool_get_stats to get stats for the specified pool.
43a9ca9f9cSYunsheng Lin  */
44a9ca9f9cSYunsheng Lin bool page_pool_get_stats(struct page_pool *pool,
45a9ca9f9cSYunsheng Lin 			 struct page_pool_stats *stats);
46a9ca9f9cSYunsheng Lin #else
page_pool_ethtool_stats_get_count(void)47a9ca9f9cSYunsheng Lin static inline int page_pool_ethtool_stats_get_count(void)
48a9ca9f9cSYunsheng Lin {
49a9ca9f9cSYunsheng Lin 	return 0;
50a9ca9f9cSYunsheng Lin }
51a9ca9f9cSYunsheng Lin 
page_pool_ethtool_stats_get_strings(u8 * data)52a9ca9f9cSYunsheng Lin static inline u8 *page_pool_ethtool_stats_get_strings(u8 *data)
53a9ca9f9cSYunsheng Lin {
54a9ca9f9cSYunsheng Lin 	return data;
55a9ca9f9cSYunsheng Lin }
56a9ca9f9cSYunsheng Lin 
page_pool_ethtool_stats_get(u64 * data,void * stats)57a9ca9f9cSYunsheng Lin static inline u64 *page_pool_ethtool_stats_get(u64 *data, void *stats)
58a9ca9f9cSYunsheng Lin {
59a9ca9f9cSYunsheng Lin 	return data;
60a9ca9f9cSYunsheng Lin }
61a9ca9f9cSYunsheng Lin #endif
62a9ca9f9cSYunsheng Lin 
63a9ca9f9cSYunsheng Lin /**
64a9ca9f9cSYunsheng Lin  * page_pool_dev_alloc_pages() - allocate a page.
65a9ca9f9cSYunsheng Lin  * @pool:	pool from which to allocate
66a9ca9f9cSYunsheng Lin  *
67a9ca9f9cSYunsheng Lin  * Get a page from the page allocator or page_pool caches.
68a9ca9f9cSYunsheng Lin  */
page_pool_dev_alloc_pages(struct page_pool * pool)69a9ca9f9cSYunsheng Lin static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool)
70a9ca9f9cSYunsheng Lin {
71a9ca9f9cSYunsheng Lin 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
72a9ca9f9cSYunsheng Lin 
73a9ca9f9cSYunsheng Lin 	return page_pool_alloc_pages(pool, gfp);
74a9ca9f9cSYunsheng Lin }
75a9ca9f9cSYunsheng Lin 
page_pool_dev_alloc_frag(struct page_pool * pool,unsigned int * offset,unsigned int size)76a9ca9f9cSYunsheng Lin static inline struct page *page_pool_dev_alloc_frag(struct page_pool *pool,
77a9ca9f9cSYunsheng Lin 						    unsigned int *offset,
78a9ca9f9cSYunsheng Lin 						    unsigned int size)
79a9ca9f9cSYunsheng Lin {
80a9ca9f9cSYunsheng Lin 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
81a9ca9f9cSYunsheng Lin 
82a9ca9f9cSYunsheng Lin 	return page_pool_alloc_frag(pool, offset, size, gfp);
83a9ca9f9cSYunsheng Lin }
84a9ca9f9cSYunsheng Lin 
85a9ca9f9cSYunsheng Lin /**
86a9ca9f9cSYunsheng Lin  * page_pool_get_dma_dir() - Retrieve the stored DMA direction.
87a9ca9f9cSYunsheng Lin  * @pool:	pool from which page was allocated
88a9ca9f9cSYunsheng Lin  *
89a9ca9f9cSYunsheng Lin  * Get the stored dma direction. A driver might decide to store this locally
90a9ca9f9cSYunsheng Lin  * and avoid the extra cache line from page_pool to determine the direction.
91a9ca9f9cSYunsheng Lin  */
92a9ca9f9cSYunsheng Lin static
page_pool_get_dma_dir(struct page_pool * pool)93a9ca9f9cSYunsheng Lin inline enum dma_data_direction page_pool_get_dma_dir(struct page_pool *pool)
94a9ca9f9cSYunsheng Lin {
95a9ca9f9cSYunsheng Lin 	return pool->p.dma_dir;
96a9ca9f9cSYunsheng Lin }
97a9ca9f9cSYunsheng Lin 
98a9ca9f9cSYunsheng Lin /* pp_frag_count represents the number of writers who can update the page
99a9ca9f9cSYunsheng Lin  * either by updating skb->data or via DMA mappings for the device.
100a9ca9f9cSYunsheng Lin  * We can't rely on the page refcnt for that as we don't know who might be
101a9ca9f9cSYunsheng Lin  * holding page references and we can't reliably destroy or sync DMA mappings
102a9ca9f9cSYunsheng Lin  * of the fragments.
103a9ca9f9cSYunsheng Lin  *
104a9ca9f9cSYunsheng Lin  * When pp_frag_count reaches 0 we can either recycle the page if the page
105a9ca9f9cSYunsheng Lin  * refcnt is 1 or return it back to the memory allocator and destroy any
106a9ca9f9cSYunsheng Lin  * mappings we have.
107a9ca9f9cSYunsheng Lin  */
page_pool_fragment_page(struct page * page,long nr)108a9ca9f9cSYunsheng Lin static inline void page_pool_fragment_page(struct page *page, long nr)
109a9ca9f9cSYunsheng Lin {
110a9ca9f9cSYunsheng Lin 	atomic_long_set(&page->pp_frag_count, nr);
111a9ca9f9cSYunsheng Lin }
112a9ca9f9cSYunsheng Lin 
page_pool_defrag_page(struct page * page,long nr)113a9ca9f9cSYunsheng Lin static inline long page_pool_defrag_page(struct page *page, long nr)
114a9ca9f9cSYunsheng Lin {
115a9ca9f9cSYunsheng Lin 	long ret;
116a9ca9f9cSYunsheng Lin 
117a9ca9f9cSYunsheng Lin 	/* If nr == pp_frag_count then we have cleared all remaining
118a9ca9f9cSYunsheng Lin 	 * references to the page. No need to actually overwrite it, instead
119a9ca9f9cSYunsheng Lin 	 * we can leave this to be overwritten by the calling function.
120a9ca9f9cSYunsheng Lin 	 *
121a9ca9f9cSYunsheng Lin 	 * The main advantage to doing this is that an atomic_read is
122a9ca9f9cSYunsheng Lin 	 * generally a much cheaper operation than an atomic update,
123a9ca9f9cSYunsheng Lin 	 * especially when dealing with a page that may be partitioned
124a9ca9f9cSYunsheng Lin 	 * into only 2 or 3 pieces.
125a9ca9f9cSYunsheng Lin 	 */
126a9ca9f9cSYunsheng Lin 	if (atomic_long_read(&page->pp_frag_count) == nr)
127a9ca9f9cSYunsheng Lin 		return 0;
128a9ca9f9cSYunsheng Lin 
129a9ca9f9cSYunsheng Lin 	ret = atomic_long_sub_return(nr, &page->pp_frag_count);
130a9ca9f9cSYunsheng Lin 	WARN_ON(ret < 0);
131a9ca9f9cSYunsheng Lin 	return ret;
132a9ca9f9cSYunsheng Lin }
133a9ca9f9cSYunsheng Lin 
page_pool_is_last_frag(struct page_pool * pool,struct page * page)134a9ca9f9cSYunsheng Lin static inline bool page_pool_is_last_frag(struct page_pool *pool,
135a9ca9f9cSYunsheng Lin 					  struct page *page)
136a9ca9f9cSYunsheng Lin {
137a9ca9f9cSYunsheng Lin 	/* If fragments aren't enabled or count is 0 we were the last user */
138a9ca9f9cSYunsheng Lin 	return !(pool->p.flags & PP_FLAG_PAGE_FRAG) ||
139a9ca9f9cSYunsheng Lin 	       (page_pool_defrag_page(page, 1) == 0);
140a9ca9f9cSYunsheng Lin }
141a9ca9f9cSYunsheng Lin 
142a9ca9f9cSYunsheng Lin /**
143a9ca9f9cSYunsheng Lin  * page_pool_put_page() - release a reference to a page pool page
144a9ca9f9cSYunsheng Lin  * @pool:	pool from which page was allocated
145a9ca9f9cSYunsheng Lin  * @page:	page to release a reference on
146a9ca9f9cSYunsheng Lin  * @dma_sync_size: how much of the page may have been touched by the device
147a9ca9f9cSYunsheng Lin  * @allow_direct: released by the consumer, allow lockless caching
148a9ca9f9cSYunsheng Lin  *
149a9ca9f9cSYunsheng Lin  * The outcome of this depends on the page refcnt. If the driver bumps
150a9ca9f9cSYunsheng Lin  * the refcnt > 1 this will unmap the page. If the page refcnt is 1
151a9ca9f9cSYunsheng Lin  * the allocator owns the page and will try to recycle it in one of the pool
152a9ca9f9cSYunsheng Lin  * caches. If PP_FLAG_DMA_SYNC_DEV is set, the page will be synced for_device
153a9ca9f9cSYunsheng Lin  * using dma_sync_single_range_for_device().
154a9ca9f9cSYunsheng Lin  */
page_pool_put_page(struct page_pool * pool,struct page * page,unsigned int dma_sync_size,bool allow_direct)155a9ca9f9cSYunsheng Lin static inline void page_pool_put_page(struct page_pool *pool,
156a9ca9f9cSYunsheng Lin 				      struct page *page,
157a9ca9f9cSYunsheng Lin 				      unsigned int dma_sync_size,
158a9ca9f9cSYunsheng Lin 				      bool allow_direct)
159a9ca9f9cSYunsheng Lin {
160a9ca9f9cSYunsheng Lin 	/* When page_pool isn't compiled-in, net/core/xdp.c doesn't
161a9ca9f9cSYunsheng Lin 	 * allow registering MEM_TYPE_PAGE_POOL, but shield linker.
162a9ca9f9cSYunsheng Lin 	 */
163a9ca9f9cSYunsheng Lin #ifdef CONFIG_PAGE_POOL
164a9ca9f9cSYunsheng Lin 	if (!page_pool_is_last_frag(pool, page))
165a9ca9f9cSYunsheng Lin 		return;
166a9ca9f9cSYunsheng Lin 
167a9ca9f9cSYunsheng Lin 	page_pool_put_defragged_page(pool, page, dma_sync_size, allow_direct);
168a9ca9f9cSYunsheng Lin #endif
169a9ca9f9cSYunsheng Lin }
170a9ca9f9cSYunsheng Lin 
171a9ca9f9cSYunsheng Lin /**
172a9ca9f9cSYunsheng Lin  * page_pool_put_full_page() - release a reference on a page pool page
173a9ca9f9cSYunsheng Lin  * @pool:	pool from which page was allocated
174a9ca9f9cSYunsheng Lin  * @page:	page to release a reference on
175a9ca9f9cSYunsheng Lin  * @allow_direct: released by the consumer, allow lockless caching
176a9ca9f9cSYunsheng Lin  *
177a9ca9f9cSYunsheng Lin  * Similar to page_pool_put_page(), but will DMA sync the entire memory area
178a9ca9f9cSYunsheng Lin  * as configured in &page_pool_params.max_len.
179a9ca9f9cSYunsheng Lin  */
page_pool_put_full_page(struct page_pool * pool,struct page * page,bool allow_direct)180a9ca9f9cSYunsheng Lin static inline void page_pool_put_full_page(struct page_pool *pool,
181a9ca9f9cSYunsheng Lin 					   struct page *page, bool allow_direct)
182a9ca9f9cSYunsheng Lin {
183a9ca9f9cSYunsheng Lin 	page_pool_put_page(pool, page, -1, allow_direct);
184a9ca9f9cSYunsheng Lin }
185a9ca9f9cSYunsheng Lin 
186a9ca9f9cSYunsheng Lin /**
187a9ca9f9cSYunsheng Lin  * page_pool_recycle_direct() - release a reference on a page pool page
188a9ca9f9cSYunsheng Lin  * @pool:	pool from which page was allocated
189a9ca9f9cSYunsheng Lin  * @page:	page to release a reference on
190a9ca9f9cSYunsheng Lin  *
191a9ca9f9cSYunsheng Lin  * Similar to page_pool_put_full_page() but caller must guarantee safe context
192a9ca9f9cSYunsheng Lin  * (e.g NAPI), since it will recycle the page directly into the pool fast cache.
193a9ca9f9cSYunsheng Lin  */
page_pool_recycle_direct(struct page_pool * pool,struct page * page)194a9ca9f9cSYunsheng Lin static inline void page_pool_recycle_direct(struct page_pool *pool,
195a9ca9f9cSYunsheng Lin 					    struct page *page)
196a9ca9f9cSYunsheng Lin {
197a9ca9f9cSYunsheng Lin 	page_pool_put_full_page(pool, page, true);
198a9ca9f9cSYunsheng Lin }
199a9ca9f9cSYunsheng Lin 
200a9ca9f9cSYunsheng Lin #define PAGE_POOL_DMA_USE_PP_FRAG_COUNT	\
201a9ca9f9cSYunsheng Lin 		(sizeof(dma_addr_t) > sizeof(unsigned long))
202a9ca9f9cSYunsheng Lin 
203a9ca9f9cSYunsheng Lin /**
204a9ca9f9cSYunsheng Lin  * page_pool_get_dma_addr() - Retrieve the stored DMA address.
205a9ca9f9cSYunsheng Lin  * @page:	page allocated from a page pool
206a9ca9f9cSYunsheng Lin  *
207a9ca9f9cSYunsheng Lin  * Fetch the DMA address of the page. The page pool to which the page belongs
208a9ca9f9cSYunsheng Lin  * must had been created with PP_FLAG_DMA_MAP.
209a9ca9f9cSYunsheng Lin  */
page_pool_get_dma_addr(struct page * page)210a9ca9f9cSYunsheng Lin static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
211a9ca9f9cSYunsheng Lin {
212a9ca9f9cSYunsheng Lin 	dma_addr_t ret = page->dma_addr;
213a9ca9f9cSYunsheng Lin 
214a9ca9f9cSYunsheng Lin 	if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT)
215a9ca9f9cSYunsheng Lin 		ret |= (dma_addr_t)page->dma_addr_upper << 16 << 16;
216a9ca9f9cSYunsheng Lin 
217a9ca9f9cSYunsheng Lin 	return ret;
218a9ca9f9cSYunsheng Lin }
219a9ca9f9cSYunsheng Lin 
page_pool_set_dma_addr(struct page * page,dma_addr_t addr)220a9ca9f9cSYunsheng Lin static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
221a9ca9f9cSYunsheng Lin {
222a9ca9f9cSYunsheng Lin 	page->dma_addr = addr;
223a9ca9f9cSYunsheng Lin 	if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT)
224a9ca9f9cSYunsheng Lin 		page->dma_addr_upper = upper_32_bits(addr);
225a9ca9f9cSYunsheng Lin }
226a9ca9f9cSYunsheng Lin 
page_pool_put(struct page_pool * pool)227a9ca9f9cSYunsheng Lin static inline bool page_pool_put(struct page_pool *pool)
228a9ca9f9cSYunsheng Lin {
229a9ca9f9cSYunsheng Lin 	return refcount_dec_and_test(&pool->user_cnt);
230a9ca9f9cSYunsheng Lin }
231a9ca9f9cSYunsheng Lin 
page_pool_nid_changed(struct page_pool * pool,int new_nid)232a9ca9f9cSYunsheng Lin static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid)
233a9ca9f9cSYunsheng Lin {
234a9ca9f9cSYunsheng Lin 	if (unlikely(pool->p.nid != new_nid))
235a9ca9f9cSYunsheng Lin 		page_pool_update_nid(pool, new_nid);
236a9ca9f9cSYunsheng Lin }
237a9ca9f9cSYunsheng Lin 
238a9ca9f9cSYunsheng Lin #endif /* _NET_PAGE_POOL_HELPERS_H */
239