xref: /openbmc/linux/net/core/sock_reuseport.c (revision 6aa7de05)
1 /*
2  * To speed up listener socket lookup, create an array to store all sockets
3  * listening on the same port.  This allows a decision to be made after finding
4  * the first socket.  An optional BPF program can also be configured for
5  * selecting the socket index from the array of available sockets.
6  */
7 
8 #include <net/sock_reuseport.h>
9 #include <linux/bpf.h>
10 #include <linux/rcupdate.h>
11 
12 #define INIT_SOCKS 128
13 
14 static DEFINE_SPINLOCK(reuseport_lock);
15 
16 static struct sock_reuseport *__reuseport_alloc(unsigned int max_socks)
17 {
18 	unsigned int size = sizeof(struct sock_reuseport) +
19 		      sizeof(struct sock *) * max_socks;
20 	struct sock_reuseport *reuse = kzalloc(size, GFP_ATOMIC);
21 
22 	if (!reuse)
23 		return NULL;
24 
25 	reuse->max_socks = max_socks;
26 
27 	RCU_INIT_POINTER(reuse->prog, NULL);
28 	return reuse;
29 }
30 
31 int reuseport_alloc(struct sock *sk)
32 {
33 	struct sock_reuseport *reuse;
34 
35 	/* bh lock used since this function call may precede hlist lock in
36 	 * soft irq of receive path or setsockopt from process context
37 	 */
38 	spin_lock_bh(&reuseport_lock);
39 
40 	/* Allocation attempts can occur concurrently via the setsockopt path
41 	 * and the bind/hash path.  Nothing to do when we lose the race.
42 	 */
43 	if (rcu_dereference_protected(sk->sk_reuseport_cb,
44 				      lockdep_is_held(&reuseport_lock)))
45 		goto out;
46 
47 	reuse = __reuseport_alloc(INIT_SOCKS);
48 	if (!reuse) {
49 		spin_unlock_bh(&reuseport_lock);
50 		return -ENOMEM;
51 	}
52 
53 	reuse->socks[0] = sk;
54 	reuse->num_socks = 1;
55 	rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
56 
57 out:
58 	spin_unlock_bh(&reuseport_lock);
59 
60 	return 0;
61 }
62 EXPORT_SYMBOL(reuseport_alloc);
63 
64 static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse)
65 {
66 	struct sock_reuseport *more_reuse;
67 	u32 more_socks_size, i;
68 
69 	more_socks_size = reuse->max_socks * 2U;
70 	if (more_socks_size > U16_MAX)
71 		return NULL;
72 
73 	more_reuse = __reuseport_alloc(more_socks_size);
74 	if (!more_reuse)
75 		return NULL;
76 
77 	more_reuse->max_socks = more_socks_size;
78 	more_reuse->num_socks = reuse->num_socks;
79 	more_reuse->prog = reuse->prog;
80 
81 	memcpy(more_reuse->socks, reuse->socks,
82 	       reuse->num_socks * sizeof(struct sock *));
83 
84 	for (i = 0; i < reuse->num_socks; ++i)
85 		rcu_assign_pointer(reuse->socks[i]->sk_reuseport_cb,
86 				   more_reuse);
87 
88 	/* Note: we use kfree_rcu here instead of reuseport_free_rcu so
89 	 * that reuse and more_reuse can temporarily share a reference
90 	 * to prog.
91 	 */
92 	kfree_rcu(reuse, rcu);
93 	return more_reuse;
94 }
95 
96 /**
97  *  reuseport_add_sock - Add a socket to the reuseport group of another.
98  *  @sk:  New socket to add to the group.
99  *  @sk2: Socket belonging to the existing reuseport group.
100  *  May return ENOMEM and not add socket to group under memory pressure.
101  */
102 int reuseport_add_sock(struct sock *sk, struct sock *sk2)
103 {
104 	struct sock_reuseport *reuse;
105 
106 	if (!rcu_access_pointer(sk2->sk_reuseport_cb)) {
107 		int err = reuseport_alloc(sk2);
108 
109 		if (err)
110 			return err;
111 	}
112 
113 	spin_lock_bh(&reuseport_lock);
114 	reuse = rcu_dereference_protected(sk2->sk_reuseport_cb,
115 					  lockdep_is_held(&reuseport_lock)),
116 	WARN_ONCE(rcu_dereference_protected(sk->sk_reuseport_cb,
117 					    lockdep_is_held(&reuseport_lock)),
118 		  "socket already in reuseport group");
119 
120 	if (reuse->num_socks == reuse->max_socks) {
121 		reuse = reuseport_grow(reuse);
122 		if (!reuse) {
123 			spin_unlock_bh(&reuseport_lock);
124 			return -ENOMEM;
125 		}
126 	}
127 
128 	reuse->socks[reuse->num_socks] = sk;
129 	/* paired with smp_rmb() in reuseport_select_sock() */
130 	smp_wmb();
131 	reuse->num_socks++;
132 	rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
133 
134 	spin_unlock_bh(&reuseport_lock);
135 
136 	return 0;
137 }
138 
139 static void reuseport_free_rcu(struct rcu_head *head)
140 {
141 	struct sock_reuseport *reuse;
142 
143 	reuse = container_of(head, struct sock_reuseport, rcu);
144 	if (reuse->prog)
145 		bpf_prog_destroy(reuse->prog);
146 	kfree(reuse);
147 }
148 
149 void reuseport_detach_sock(struct sock *sk)
150 {
151 	struct sock_reuseport *reuse;
152 	int i;
153 
154 	spin_lock_bh(&reuseport_lock);
155 	reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
156 					  lockdep_is_held(&reuseport_lock));
157 	rcu_assign_pointer(sk->sk_reuseport_cb, NULL);
158 
159 	for (i = 0; i < reuse->num_socks; i++) {
160 		if (reuse->socks[i] == sk) {
161 			reuse->socks[i] = reuse->socks[reuse->num_socks - 1];
162 			reuse->num_socks--;
163 			if (reuse->num_socks == 0)
164 				call_rcu(&reuse->rcu, reuseport_free_rcu);
165 			break;
166 		}
167 	}
168 	spin_unlock_bh(&reuseport_lock);
169 }
170 EXPORT_SYMBOL(reuseport_detach_sock);
171 
172 static struct sock *run_bpf(struct sock_reuseport *reuse, u16 socks,
173 			    struct bpf_prog *prog, struct sk_buff *skb,
174 			    int hdr_len)
175 {
176 	struct sk_buff *nskb = NULL;
177 	u32 index;
178 
179 	if (skb_shared(skb)) {
180 		nskb = skb_clone(skb, GFP_ATOMIC);
181 		if (!nskb)
182 			return NULL;
183 		skb = nskb;
184 	}
185 
186 	/* temporarily advance data past protocol header */
187 	if (!pskb_pull(skb, hdr_len)) {
188 		kfree_skb(nskb);
189 		return NULL;
190 	}
191 	index = bpf_prog_run_save_cb(prog, skb);
192 	__skb_push(skb, hdr_len);
193 
194 	consume_skb(nskb);
195 
196 	if (index >= socks)
197 		return NULL;
198 
199 	return reuse->socks[index];
200 }
201 
202 /**
203  *  reuseport_select_sock - Select a socket from an SO_REUSEPORT group.
204  *  @sk: First socket in the group.
205  *  @hash: When no BPF filter is available, use this hash to select.
206  *  @skb: skb to run through BPF filter.
207  *  @hdr_len: BPF filter expects skb data pointer at payload data.  If
208  *    the skb does not yet point at the payload, this parameter represents
209  *    how far the pointer needs to advance to reach the payload.
210  *  Returns a socket that should receive the packet (or NULL on error).
211  */
212 struct sock *reuseport_select_sock(struct sock *sk,
213 				   u32 hash,
214 				   struct sk_buff *skb,
215 				   int hdr_len)
216 {
217 	struct sock_reuseport *reuse;
218 	struct bpf_prog *prog;
219 	struct sock *sk2 = NULL;
220 	u16 socks;
221 
222 	rcu_read_lock();
223 	reuse = rcu_dereference(sk->sk_reuseport_cb);
224 
225 	/* if memory allocation failed or add call is not yet complete */
226 	if (!reuse)
227 		goto out;
228 
229 	prog = rcu_dereference(reuse->prog);
230 	socks = READ_ONCE(reuse->num_socks);
231 	if (likely(socks)) {
232 		/* paired with smp_wmb() in reuseport_add_sock() */
233 		smp_rmb();
234 
235 		if (prog && skb)
236 			sk2 = run_bpf(reuse, socks, prog, skb, hdr_len);
237 		else
238 			sk2 = reuse->socks[reciprocal_scale(hash, socks)];
239 	}
240 
241 out:
242 	rcu_read_unlock();
243 	return sk2;
244 }
245 EXPORT_SYMBOL(reuseport_select_sock);
246 
247 struct bpf_prog *
248 reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog)
249 {
250 	struct sock_reuseport *reuse;
251 	struct bpf_prog *old_prog;
252 
253 	spin_lock_bh(&reuseport_lock);
254 	reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
255 					  lockdep_is_held(&reuseport_lock));
256 	old_prog = rcu_dereference_protected(reuse->prog,
257 					     lockdep_is_held(&reuseport_lock));
258 	rcu_assign_pointer(reuse->prog, prog);
259 	spin_unlock_bh(&reuseport_lock);
260 
261 	return old_prog;
262 }
263 EXPORT_SYMBOL(reuseport_attach_prog);
264