xref: /openbmc/linux/net/core/sock_reuseport.c (revision 75a49a5e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * To speed up listener socket lookup, create an array to store all sockets
4  * listening on the same port.  This allows a decision to be made after finding
5  * the first socket.  An optional BPF program can also be configured for
6  * selecting the socket index from the array of available sockets.
7  */
8 
9 #include <net/sock_reuseport.h>
10 #include <linux/bpf.h>
11 #include <linux/idr.h>
12 #include <linux/filter.h>
13 #include <linux/rcupdate.h>
14 
15 #define INIT_SOCKS 128
16 
17 DEFINE_SPINLOCK(reuseport_lock);
18 
19 static DEFINE_IDA(reuseport_ida);
20 
21 static struct sock_reuseport *__reuseport_alloc(unsigned int max_socks)
22 {
23 	unsigned int size = sizeof(struct sock_reuseport) +
24 		      sizeof(struct sock *) * max_socks;
25 	struct sock_reuseport *reuse = kzalloc(size, GFP_ATOMIC);
26 
27 	if (!reuse)
28 		return NULL;
29 
30 	reuse->max_socks = max_socks;
31 
32 	RCU_INIT_POINTER(reuse->prog, NULL);
33 	return reuse;
34 }
35 
36 int reuseport_alloc(struct sock *sk, bool bind_inany)
37 {
38 	struct sock_reuseport *reuse;
39 	int id, ret = 0;
40 
41 	/* bh lock used since this function call may precede hlist lock in
42 	 * soft irq of receive path or setsockopt from process context
43 	 */
44 	spin_lock_bh(&reuseport_lock);
45 
46 	/* Allocation attempts can occur concurrently via the setsockopt path
47 	 * and the bind/hash path.  Nothing to do when we lose the race.
48 	 */
49 	reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
50 					  lockdep_is_held(&reuseport_lock));
51 	if (reuse) {
52 		/* Only set reuse->bind_inany if the bind_inany is true.
53 		 * Otherwise, it will overwrite the reuse->bind_inany
54 		 * which was set by the bind/hash path.
55 		 */
56 		if (bind_inany)
57 			reuse->bind_inany = bind_inany;
58 		goto out;
59 	}
60 
61 	reuse = __reuseport_alloc(INIT_SOCKS);
62 	if (!reuse) {
63 		ret = -ENOMEM;
64 		goto out;
65 	}
66 
67 	id = ida_alloc(&reuseport_ida, GFP_ATOMIC);
68 	if (id < 0) {
69 		kfree(reuse);
70 		ret = id;
71 		goto out;
72 	}
73 
74 	reuse->reuseport_id = id;
75 	reuse->socks[0] = sk;
76 	reuse->num_socks = 1;
77 	reuse->bind_inany = bind_inany;
78 	rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
79 
80 out:
81 	spin_unlock_bh(&reuseport_lock);
82 
83 	return ret;
84 }
85 EXPORT_SYMBOL(reuseport_alloc);
86 
87 static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse)
88 {
89 	struct sock_reuseport *more_reuse;
90 	u32 more_socks_size, i;
91 
92 	more_socks_size = reuse->max_socks * 2U;
93 	if (more_socks_size > U16_MAX)
94 		return NULL;
95 
96 	more_reuse = __reuseport_alloc(more_socks_size);
97 	if (!more_reuse)
98 		return NULL;
99 
100 	more_reuse->num_socks = reuse->num_socks;
101 	more_reuse->prog = reuse->prog;
102 	more_reuse->reuseport_id = reuse->reuseport_id;
103 	more_reuse->bind_inany = reuse->bind_inany;
104 
105 	memcpy(more_reuse->socks, reuse->socks,
106 	       reuse->num_socks * sizeof(struct sock *));
107 	more_reuse->synq_overflow_ts = READ_ONCE(reuse->synq_overflow_ts);
108 
109 	for (i = 0; i < reuse->num_socks; ++i)
110 		rcu_assign_pointer(reuse->socks[i]->sk_reuseport_cb,
111 				   more_reuse);
112 
113 	/* Note: we use kfree_rcu here instead of reuseport_free_rcu so
114 	 * that reuse and more_reuse can temporarily share a reference
115 	 * to prog.
116 	 */
117 	kfree_rcu(reuse, rcu);
118 	return more_reuse;
119 }
120 
121 static void reuseport_free_rcu(struct rcu_head *head)
122 {
123 	struct sock_reuseport *reuse;
124 
125 	reuse = container_of(head, struct sock_reuseport, rcu);
126 	sk_reuseport_prog_free(rcu_dereference_protected(reuse->prog, 1));
127 	ida_free(&reuseport_ida, reuse->reuseport_id);
128 	kfree(reuse);
129 }
130 
131 /**
132  *  reuseport_add_sock - Add a socket to the reuseport group of another.
133  *  @sk:  New socket to add to the group.
134  *  @sk2: Socket belonging to the existing reuseport group.
135  *  @bind_inany: Whether or not the group is bound to a local INANY address.
136  *
137  *  May return ENOMEM and not add socket to group under memory pressure.
138  */
139 int reuseport_add_sock(struct sock *sk, struct sock *sk2, bool bind_inany)
140 {
141 	struct sock_reuseport *old_reuse, *reuse;
142 
143 	if (!rcu_access_pointer(sk2->sk_reuseport_cb)) {
144 		int err = reuseport_alloc(sk2, bind_inany);
145 
146 		if (err)
147 			return err;
148 	}
149 
150 	spin_lock_bh(&reuseport_lock);
151 	reuse = rcu_dereference_protected(sk2->sk_reuseport_cb,
152 					  lockdep_is_held(&reuseport_lock));
153 	old_reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
154 					     lockdep_is_held(&reuseport_lock));
155 	if (old_reuse && old_reuse->num_socks != 1) {
156 		spin_unlock_bh(&reuseport_lock);
157 		return -EBUSY;
158 	}
159 
160 	if (reuse->num_socks == reuse->max_socks) {
161 		reuse = reuseport_grow(reuse);
162 		if (!reuse) {
163 			spin_unlock_bh(&reuseport_lock);
164 			return -ENOMEM;
165 		}
166 	}
167 
168 	reuse->socks[reuse->num_socks] = sk;
169 	/* paired with smp_rmb() in reuseport_select_sock() */
170 	smp_wmb();
171 	reuse->num_socks++;
172 	rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
173 
174 	spin_unlock_bh(&reuseport_lock);
175 
176 	if (old_reuse)
177 		call_rcu(&old_reuse->rcu, reuseport_free_rcu);
178 	return 0;
179 }
180 EXPORT_SYMBOL(reuseport_add_sock);
181 
182 void reuseport_detach_sock(struct sock *sk)
183 {
184 	struct sock_reuseport *reuse;
185 	int i;
186 
187 	spin_lock_bh(&reuseport_lock);
188 	reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
189 					  lockdep_is_held(&reuseport_lock));
190 
191 	/* Notify the bpf side. The sk may be added to a sockarray
192 	 * map. If so, sockarray logic will remove it from the map.
193 	 *
194 	 * Other bpf map types that work with reuseport, like sockmap,
195 	 * don't need an explicit callback from here. They override sk
196 	 * unhash/close ops to remove the sk from the map before we
197 	 * get to this point.
198 	 */
199 	bpf_sk_reuseport_detach(sk);
200 
201 	rcu_assign_pointer(sk->sk_reuseport_cb, NULL);
202 
203 	for (i = 0; i < reuse->num_socks; i++) {
204 		if (reuse->socks[i] == sk) {
205 			reuse->socks[i] = reuse->socks[reuse->num_socks - 1];
206 			reuse->num_socks--;
207 			if (reuse->num_socks == 0)
208 				call_rcu(&reuse->rcu, reuseport_free_rcu);
209 			break;
210 		}
211 	}
212 	spin_unlock_bh(&reuseport_lock);
213 }
214 EXPORT_SYMBOL(reuseport_detach_sock);
215 
216 static struct sock *run_bpf_filter(struct sock_reuseport *reuse, u16 socks,
217 				   struct bpf_prog *prog, struct sk_buff *skb,
218 				   int hdr_len)
219 {
220 	struct sk_buff *nskb = NULL;
221 	u32 index;
222 
223 	if (skb_shared(skb)) {
224 		nskb = skb_clone(skb, GFP_ATOMIC);
225 		if (!nskb)
226 			return NULL;
227 		skb = nskb;
228 	}
229 
230 	/* temporarily advance data past protocol header */
231 	if (!pskb_pull(skb, hdr_len)) {
232 		kfree_skb(nskb);
233 		return NULL;
234 	}
235 	index = bpf_prog_run_save_cb(prog, skb);
236 	__skb_push(skb, hdr_len);
237 
238 	consume_skb(nskb);
239 
240 	if (index >= socks)
241 		return NULL;
242 
243 	return reuse->socks[index];
244 }
245 
246 /**
247  *  reuseport_select_sock - Select a socket from an SO_REUSEPORT group.
248  *  @sk: First socket in the group.
249  *  @hash: When no BPF filter is available, use this hash to select.
250  *  @skb: skb to run through BPF filter.
251  *  @hdr_len: BPF filter expects skb data pointer at payload data.  If
252  *    the skb does not yet point at the payload, this parameter represents
253  *    how far the pointer needs to advance to reach the payload.
254  *  Returns a socket that should receive the packet (or NULL on error).
255  */
256 struct sock *reuseport_select_sock(struct sock *sk,
257 				   u32 hash,
258 				   struct sk_buff *skb,
259 				   int hdr_len)
260 {
261 	struct sock_reuseport *reuse;
262 	struct bpf_prog *prog;
263 	struct sock *sk2 = NULL;
264 	u16 socks;
265 
266 	rcu_read_lock();
267 	reuse = rcu_dereference(sk->sk_reuseport_cb);
268 
269 	/* if memory allocation failed or add call is not yet complete */
270 	if (!reuse)
271 		goto out;
272 
273 	prog = rcu_dereference(reuse->prog);
274 	socks = READ_ONCE(reuse->num_socks);
275 	if (likely(socks)) {
276 		/* paired with smp_wmb() in reuseport_add_sock() */
277 		smp_rmb();
278 
279 		if (!prog || !skb)
280 			goto select_by_hash;
281 
282 		if (prog->type == BPF_PROG_TYPE_SK_REUSEPORT)
283 			sk2 = bpf_run_sk_reuseport(reuse, sk, prog, skb, hash);
284 		else
285 			sk2 = run_bpf_filter(reuse, socks, prog, skb, hdr_len);
286 
287 select_by_hash:
288 		/* no bpf or invalid bpf result: fall back to hash usage */
289 		if (!sk2) {
290 			int i, j;
291 
292 			i = j = reciprocal_scale(hash, socks);
293 			while (reuse->socks[i]->sk_state == TCP_ESTABLISHED) {
294 				i++;
295 				if (i >= reuse->num_socks)
296 					i = 0;
297 				if (i == j)
298 					goto out;
299 			}
300 			sk2 = reuse->socks[i];
301 		}
302 	}
303 
304 out:
305 	rcu_read_unlock();
306 	return sk2;
307 }
308 EXPORT_SYMBOL(reuseport_select_sock);
309 
310 int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog)
311 {
312 	struct sock_reuseport *reuse;
313 	struct bpf_prog *old_prog;
314 
315 	if (sk_unhashed(sk) && sk->sk_reuseport) {
316 		int err = reuseport_alloc(sk, false);
317 
318 		if (err)
319 			return err;
320 	} else if (!rcu_access_pointer(sk->sk_reuseport_cb)) {
321 		/* The socket wasn't bound with SO_REUSEPORT */
322 		return -EINVAL;
323 	}
324 
325 	spin_lock_bh(&reuseport_lock);
326 	reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
327 					  lockdep_is_held(&reuseport_lock));
328 	old_prog = rcu_dereference_protected(reuse->prog,
329 					     lockdep_is_held(&reuseport_lock));
330 	rcu_assign_pointer(reuse->prog, prog);
331 	spin_unlock_bh(&reuseport_lock);
332 
333 	sk_reuseport_prog_free(old_prog);
334 	return 0;
335 }
336 EXPORT_SYMBOL(reuseport_attach_prog);
337 
338 int reuseport_detach_prog(struct sock *sk)
339 {
340 	struct sock_reuseport *reuse;
341 	struct bpf_prog *old_prog;
342 
343 	if (!rcu_access_pointer(sk->sk_reuseport_cb))
344 		return sk->sk_reuseport ? -ENOENT : -EINVAL;
345 
346 	old_prog = NULL;
347 	spin_lock_bh(&reuseport_lock);
348 	reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
349 					  lockdep_is_held(&reuseport_lock));
350 	old_prog = rcu_replace_pointer(reuse->prog, old_prog,
351 				       lockdep_is_held(&reuseport_lock));
352 	spin_unlock_bh(&reuseport_lock);
353 
354 	if (!old_prog)
355 		return -ENOENT;
356 
357 	sk_reuseport_prog_free(old_prog);
358 	return 0;
359 }
360 EXPORT_SYMBOL(reuseport_detach_prog);
361