xref: /openbmc/linux/include/linux/skb_array.h (revision 2874c5fd)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  *	Definitions for the 'struct skb_array' datastructure.
4  *
5  *	Author:
6  *		Michael S. Tsirkin <mst@redhat.com>
7  *
8  *	Copyright (C) 2016 Red Hat, Inc.
9  *
10  *	Limited-size FIFO of skbs. Can be used more or less whenever
11  *	sk_buff_head can be used, except you need to know the queue size in
12  *	advance.
13  *	Implemented as a type-safe wrapper around ptr_ring.
14  */
15 
16 #ifndef _LINUX_SKB_ARRAY_H
17 #define _LINUX_SKB_ARRAY_H 1
18 
19 #ifdef __KERNEL__
20 #include <linux/ptr_ring.h>
21 #include <linux/skbuff.h>
22 #include <linux/if_vlan.h>
23 #endif
24 
25 struct skb_array {
26 	struct ptr_ring ring;
27 };
28 
29 /* Might be slightly faster than skb_array_full below, but callers invoking
30  * this in a loop must use a compiler barrier, for example cpu_relax().
31  */
__skb_array_full(struct skb_array * a)32 static inline bool __skb_array_full(struct skb_array *a)
33 {
34 	return __ptr_ring_full(&a->ring);
35 }
36 
skb_array_full(struct skb_array * a)37 static inline bool skb_array_full(struct skb_array *a)
38 {
39 	return ptr_ring_full(&a->ring);
40 }
41 
skb_array_produce(struct skb_array * a,struct sk_buff * skb)42 static inline int skb_array_produce(struct skb_array *a, struct sk_buff *skb)
43 {
44 	return ptr_ring_produce(&a->ring, skb);
45 }
46 
skb_array_produce_irq(struct skb_array * a,struct sk_buff * skb)47 static inline int skb_array_produce_irq(struct skb_array *a, struct sk_buff *skb)
48 {
49 	return ptr_ring_produce_irq(&a->ring, skb);
50 }
51 
skb_array_produce_bh(struct skb_array * a,struct sk_buff * skb)52 static inline int skb_array_produce_bh(struct skb_array *a, struct sk_buff *skb)
53 {
54 	return ptr_ring_produce_bh(&a->ring, skb);
55 }
56 
skb_array_produce_any(struct skb_array * a,struct sk_buff * skb)57 static inline int skb_array_produce_any(struct skb_array *a, struct sk_buff *skb)
58 {
59 	return ptr_ring_produce_any(&a->ring, skb);
60 }
61 
62 /* Might be slightly faster than skb_array_empty below, but only safe if the
63  * array is never resized. Also, callers invoking this in a loop must take care
64  * to use a compiler barrier, for example cpu_relax().
65  */
__skb_array_empty(struct skb_array * a)66 static inline bool __skb_array_empty(struct skb_array *a)
67 {
68 	return __ptr_ring_empty(&a->ring);
69 }
70 
__skb_array_peek(struct skb_array * a)71 static inline struct sk_buff *__skb_array_peek(struct skb_array *a)
72 {
73 	return __ptr_ring_peek(&a->ring);
74 }
75 
skb_array_empty(struct skb_array * a)76 static inline bool skb_array_empty(struct skb_array *a)
77 {
78 	return ptr_ring_empty(&a->ring);
79 }
80 
skb_array_empty_bh(struct skb_array * a)81 static inline bool skb_array_empty_bh(struct skb_array *a)
82 {
83 	return ptr_ring_empty_bh(&a->ring);
84 }
85 
skb_array_empty_irq(struct skb_array * a)86 static inline bool skb_array_empty_irq(struct skb_array *a)
87 {
88 	return ptr_ring_empty_irq(&a->ring);
89 }
90 
skb_array_empty_any(struct skb_array * a)91 static inline bool skb_array_empty_any(struct skb_array *a)
92 {
93 	return ptr_ring_empty_any(&a->ring);
94 }
95 
__skb_array_consume(struct skb_array * a)96 static inline struct sk_buff *__skb_array_consume(struct skb_array *a)
97 {
98 	return __ptr_ring_consume(&a->ring);
99 }
100 
skb_array_consume(struct skb_array * a)101 static inline struct sk_buff *skb_array_consume(struct skb_array *a)
102 {
103 	return ptr_ring_consume(&a->ring);
104 }
105 
skb_array_consume_batched(struct skb_array * a,struct sk_buff ** array,int n)106 static inline int skb_array_consume_batched(struct skb_array *a,
107 					    struct sk_buff **array, int n)
108 {
109 	return ptr_ring_consume_batched(&a->ring, (void **)array, n);
110 }
111 
skb_array_consume_irq(struct skb_array * a)112 static inline struct sk_buff *skb_array_consume_irq(struct skb_array *a)
113 {
114 	return ptr_ring_consume_irq(&a->ring);
115 }
116 
skb_array_consume_batched_irq(struct skb_array * a,struct sk_buff ** array,int n)117 static inline int skb_array_consume_batched_irq(struct skb_array *a,
118 						struct sk_buff **array, int n)
119 {
120 	return ptr_ring_consume_batched_irq(&a->ring, (void **)array, n);
121 }
122 
skb_array_consume_any(struct skb_array * a)123 static inline struct sk_buff *skb_array_consume_any(struct skb_array *a)
124 {
125 	return ptr_ring_consume_any(&a->ring);
126 }
127 
skb_array_consume_batched_any(struct skb_array * a,struct sk_buff ** array,int n)128 static inline int skb_array_consume_batched_any(struct skb_array *a,
129 						struct sk_buff **array, int n)
130 {
131 	return ptr_ring_consume_batched_any(&a->ring, (void **)array, n);
132 }
133 
134 
skb_array_consume_bh(struct skb_array * a)135 static inline struct sk_buff *skb_array_consume_bh(struct skb_array *a)
136 {
137 	return ptr_ring_consume_bh(&a->ring);
138 }
139 
skb_array_consume_batched_bh(struct skb_array * a,struct sk_buff ** array,int n)140 static inline int skb_array_consume_batched_bh(struct skb_array *a,
141 					       struct sk_buff **array, int n)
142 {
143 	return ptr_ring_consume_batched_bh(&a->ring, (void **)array, n);
144 }
145 
__skb_array_len_with_tag(struct sk_buff * skb)146 static inline int __skb_array_len_with_tag(struct sk_buff *skb)
147 {
148 	if (likely(skb)) {
149 		int len = skb->len;
150 
151 		if (skb_vlan_tag_present(skb))
152 			len += VLAN_HLEN;
153 
154 		return len;
155 	} else {
156 		return 0;
157 	}
158 }
159 
skb_array_peek_len(struct skb_array * a)160 static inline int skb_array_peek_len(struct skb_array *a)
161 {
162 	return PTR_RING_PEEK_CALL(&a->ring, __skb_array_len_with_tag);
163 }
164 
skb_array_peek_len_irq(struct skb_array * a)165 static inline int skb_array_peek_len_irq(struct skb_array *a)
166 {
167 	return PTR_RING_PEEK_CALL_IRQ(&a->ring, __skb_array_len_with_tag);
168 }
169 
skb_array_peek_len_bh(struct skb_array * a)170 static inline int skb_array_peek_len_bh(struct skb_array *a)
171 {
172 	return PTR_RING_PEEK_CALL_BH(&a->ring, __skb_array_len_with_tag);
173 }
174 
skb_array_peek_len_any(struct skb_array * a)175 static inline int skb_array_peek_len_any(struct skb_array *a)
176 {
177 	return PTR_RING_PEEK_CALL_ANY(&a->ring, __skb_array_len_with_tag);
178 }
179 
skb_array_init(struct skb_array * a,int size,gfp_t gfp)180 static inline int skb_array_init(struct skb_array *a, int size, gfp_t gfp)
181 {
182 	return ptr_ring_init(&a->ring, size, gfp);
183 }
184 
__skb_array_destroy_skb(void * ptr)185 static void __skb_array_destroy_skb(void *ptr)
186 {
187 	kfree_skb(ptr);
188 }
189 
skb_array_unconsume(struct skb_array * a,struct sk_buff ** skbs,int n)190 static inline void skb_array_unconsume(struct skb_array *a,
191 				       struct sk_buff **skbs, int n)
192 {
193 	ptr_ring_unconsume(&a->ring, (void **)skbs, n, __skb_array_destroy_skb);
194 }
195 
skb_array_resize(struct skb_array * a,int size,gfp_t gfp)196 static inline int skb_array_resize(struct skb_array *a, int size, gfp_t gfp)
197 {
198 	return ptr_ring_resize(&a->ring, size, gfp, __skb_array_destroy_skb);
199 }
200 
skb_array_resize_multiple(struct skb_array ** rings,int nrings,unsigned int size,gfp_t gfp)201 static inline int skb_array_resize_multiple(struct skb_array **rings,
202 					    int nrings, unsigned int size,
203 					    gfp_t gfp)
204 {
205 	BUILD_BUG_ON(offsetof(struct skb_array, ring));
206 	return ptr_ring_resize_multiple((struct ptr_ring **)rings,
207 					nrings, size, gfp,
208 					__skb_array_destroy_skb);
209 }
210 
skb_array_cleanup(struct skb_array * a)211 static inline void skb_array_cleanup(struct skb_array *a)
212 {
213 	ptr_ring_cleanup(&a->ring, __skb_array_destroy_skb);
214 }
215 
216 #endif /* _LINUX_SKB_ARRAY_H  */
217