xref: /openbmc/linux/include/net/page_pool/types.h (revision 06d0fbda)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 
3 #ifndef _NET_PAGE_POOL_TYPES_H
4 #define _NET_PAGE_POOL_TYPES_H
5 
6 #include <linux/dma-direction.h>
7 #include <linux/ptr_ring.h>
8 
9 #define PP_FLAG_DMA_MAP		BIT(0) /* Should page_pool do the DMA
10 					* map/unmap
11 					*/
12 #define PP_FLAG_DMA_SYNC_DEV	BIT(1) /* If set all pages that the driver gets
13 					* from page_pool will be
14 					* DMA-synced-for-device according to
15 					* the length provided by the device
16 					* driver.
17 					* Please note DMA-sync-for-CPU is still
18 					* device driver responsibility
19 					*/
20 #define PP_FLAG_PAGE_FRAG	BIT(2) /* for page frag feature */
21 #define PP_FLAG_ALL		(PP_FLAG_DMA_MAP |\
22 				 PP_FLAG_DMA_SYNC_DEV |\
23 				 PP_FLAG_PAGE_FRAG)
24 
25 /*
26  * Fast allocation side cache array/stack
27  *
28  * The cache size and refill watermark is related to the network
29  * use-case.  The NAPI budget is 64 packets.  After a NAPI poll the RX
30  * ring is usually refilled and the max consumed elements will be 64,
31  * thus a natural max size of objects needed in the cache.
32  *
33  * Keeping room for more objects, is due to XDP_DROP use-case.  As
34  * XDP_DROP allows the opportunity to recycle objects directly into
35  * this array, as it shares the same softirq/NAPI protection.  If
36  * cache is already full (or partly full) then the XDP_DROP recycles
37  * would have to take a slower code path.
38  */
39 #define PP_ALLOC_CACHE_SIZE	128
40 #define PP_ALLOC_CACHE_REFILL	64
41 struct pp_alloc_cache {
42 	u32 count;
43 	struct page *cache[PP_ALLOC_CACHE_SIZE];
44 };
45 
46 /**
47  * struct page_pool_params - page pool parameters
48  * @flags:	PP_FLAG_DMA_MAP, PP_FLAG_DMA_SYNC_DEV, PP_FLAG_PAGE_FRAG
49  * @order:	2^order pages on allocation
50  * @pool_size:	size of the ptr_ring
51  * @nid:	NUMA node id to allocate from pages from
52  * @dev:	device, for DMA pre-mapping purposes
53  * @napi:	NAPI which is the sole consumer of pages, otherwise NULL
54  * @dma_dir:	DMA mapping direction
55  * @max_len:	max DMA sync memory size for PP_FLAG_DMA_SYNC_DEV
56  * @offset:	DMA sync address offset for PP_FLAG_DMA_SYNC_DEV
57  */
58 struct page_pool_params {
59 	unsigned int	flags;
60 	unsigned int	order;
61 	unsigned int	pool_size;
62 	int		nid;
63 	struct device	*dev;
64 	struct napi_struct *napi;
65 	enum dma_data_direction dma_dir;
66 	unsigned int	max_len;
67 	unsigned int	offset;
68 /* private: used by test code only */
69 	void (*init_callback)(struct page *page, void *arg);
70 	void *init_arg;
71 };
72 
73 #ifdef CONFIG_PAGE_POOL_STATS
74 /**
75  * struct page_pool_alloc_stats - allocation statistics
76  * @fast:	successful fast path allocations
77  * @slow:	slow path order-0 allocations
78  * @slow_high_order: slow path high order allocations
79  * @empty:	ptr ring is empty, so a slow path allocation was forced
80  * @refill:	an allocation which triggered a refill of the cache
81  * @waive:	pages obtained from the ptr ring that cannot be added to
82  *		the cache due to a NUMA mismatch
83  */
84 struct page_pool_alloc_stats {
85 	u64 fast;
86 	u64 slow;
87 	u64 slow_high_order;
88 	u64 empty;
89 	u64 refill;
90 	u64 waive;
91 };
92 
93 /**
94  * struct page_pool_recycle_stats - recycling (freeing) statistics
95  * @cached:	recycling placed page in the page pool cache
96  * @cache_full:	page pool cache was full
97  * @ring:	page placed into the ptr ring
98  * @ring_full:	page released from page pool because the ptr ring was full
99  * @released_refcnt:	page released (and not recycled) because refcnt > 1
100  */
101 struct page_pool_recycle_stats {
102 	u64 cached;
103 	u64 cache_full;
104 	u64 ring;
105 	u64 ring_full;
106 	u64 released_refcnt;
107 };
108 
109 /**
110  * struct page_pool_stats - combined page pool use statistics
111  * @alloc_stats:	see struct page_pool_alloc_stats
112  * @recycle_stats:	see struct page_pool_recycle_stats
113  *
114  * Wrapper struct for combining page pool stats with different storage
115  * requirements.
116  */
117 struct page_pool_stats {
118 	struct page_pool_alloc_stats alloc_stats;
119 	struct page_pool_recycle_stats recycle_stats;
120 };
121 #endif
122 
123 struct page_pool {
124 	struct page_pool_params p;
125 
126 	long frag_users;
127 	struct page *frag_page;
128 	unsigned int frag_offset;
129 	u32 pages_state_hold_cnt;
130 
131 	struct delayed_work release_dw;
132 	void (*disconnect)(void *pool);
133 	unsigned long defer_start;
134 	unsigned long defer_warn;
135 
136 #ifdef CONFIG_PAGE_POOL_STATS
137 	/* these stats are incremented while in softirq context */
138 	struct page_pool_alloc_stats alloc_stats;
139 #endif
140 	u32 xdp_mem_id;
141 
142 	/*
143 	 * Data structure for allocation side
144 	 *
145 	 * Drivers allocation side usually already perform some kind
146 	 * of resource protection.  Piggyback on this protection, and
147 	 * require driver to protect allocation side.
148 	 *
149 	 * For NIC drivers this means, allocate a page_pool per
150 	 * RX-queue. As the RX-queue is already protected by
151 	 * Softirq/BH scheduling and napi_schedule. NAPI schedule
152 	 * guarantee that a single napi_struct will only be scheduled
153 	 * on a single CPU (see napi_schedule).
154 	 */
155 	struct pp_alloc_cache alloc ____cacheline_aligned_in_smp;
156 
157 	/* Data structure for storing recycled pages.
158 	 *
159 	 * Returning/freeing pages is more complicated synchronization
160 	 * wise, because free's can happen on remote CPUs, with no
161 	 * association with allocation resource.
162 	 *
163 	 * Use ptr_ring, as it separates consumer and producer
164 	 * efficiently, it a way that doesn't bounce cache-lines.
165 	 *
166 	 * TODO: Implement bulk return pages into this structure.
167 	 */
168 	struct ptr_ring ring;
169 
170 #ifdef CONFIG_PAGE_POOL_STATS
171 	/* recycle stats are per-cpu to avoid locking */
172 	struct page_pool_recycle_stats __percpu *recycle_stats;
173 #endif
174 	atomic_t pages_state_release_cnt;
175 
176 	/* A page_pool is strictly tied to a single RX-queue being
177 	 * protected by NAPI, due to above pp_alloc_cache. This
178 	 * refcnt serves purpose is to simplify drivers error handling.
179 	 */
180 	refcount_t user_cnt;
181 
182 	u64 destroy_cnt;
183 };
184 
185 struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
186 struct page *page_pool_alloc_frag(struct page_pool *pool, unsigned int *offset,
187 				  unsigned int size, gfp_t gfp);
188 struct page_pool *page_pool_create(const struct page_pool_params *params);
189 
190 struct xdp_mem_info;
191 
192 #ifdef CONFIG_PAGE_POOL
193 void page_pool_unlink_napi(struct page_pool *pool);
194 void page_pool_destroy(struct page_pool *pool);
195 void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
196 			   struct xdp_mem_info *mem);
197 void page_pool_put_page_bulk(struct page_pool *pool, void **data,
198 			     int count);
199 #else
page_pool_unlink_napi(struct page_pool * pool)200 static inline void page_pool_unlink_napi(struct page_pool *pool)
201 {
202 }
203 
page_pool_destroy(struct page_pool * pool)204 static inline void page_pool_destroy(struct page_pool *pool)
205 {
206 }
207 
page_pool_use_xdp_mem(struct page_pool * pool,void (* disconnect)(void *),struct xdp_mem_info * mem)208 static inline void page_pool_use_xdp_mem(struct page_pool *pool,
209 					 void (*disconnect)(void *),
210 					 struct xdp_mem_info *mem)
211 {
212 }
213 
page_pool_put_page_bulk(struct page_pool * pool,void ** data,int count)214 static inline void page_pool_put_page_bulk(struct page_pool *pool, void **data,
215 					   int count)
216 {
217 }
218 #endif
219 
220 void page_pool_put_defragged_page(struct page_pool *pool, struct page *page,
221 				  unsigned int dma_sync_size,
222 				  bool allow_direct);
223 
is_page_pool_compiled_in(void)224 static inline bool is_page_pool_compiled_in(void)
225 {
226 #ifdef CONFIG_PAGE_POOL
227 	return true;
228 #else
229 	return false;
230 #endif
231 }
232 
233 /* Caller must provide appropriate safe context, e.g. NAPI. */
234 void page_pool_update_nid(struct page_pool *pool, int new_nid);
235 
236 #endif /* _NET_PAGE_POOL_H */
237