1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * To speed up listener socket lookup, create an array to store all sockets 4 * listening on the same port. This allows a decision to be made after finding 5 * the first socket. An optional BPF program can also be configured for 6 * selecting the socket index from the array of available sockets. 7 */ 8 9 #include <net/sock_reuseport.h> 10 #include <linux/bpf.h> 11 #include <linux/idr.h> 12 #include <linux/filter.h> 13 #include <linux/rcupdate.h> 14 15 #define INIT_SOCKS 128 16 17 DEFINE_SPINLOCK(reuseport_lock); 18 19 #define REUSEPORT_MIN_ID 1 20 static DEFINE_IDA(reuseport_ida); 21 22 int reuseport_get_id(struct sock_reuseport *reuse) 23 { 24 int id; 25 26 if (reuse->reuseport_id) 27 return reuse->reuseport_id; 28 29 id = ida_simple_get(&reuseport_ida, REUSEPORT_MIN_ID, 0, 30 /* Called under reuseport_lock */ 31 GFP_ATOMIC); 32 if (id < 0) 33 return id; 34 35 reuse->reuseport_id = id; 36 37 return reuse->reuseport_id; 38 } 39 40 static struct sock_reuseport *__reuseport_alloc(unsigned int max_socks) 41 { 42 unsigned int size = sizeof(struct sock_reuseport) + 43 sizeof(struct sock *) * max_socks; 44 struct sock_reuseport *reuse = kzalloc(size, GFP_ATOMIC); 45 46 if (!reuse) 47 return NULL; 48 49 reuse->max_socks = max_socks; 50 51 RCU_INIT_POINTER(reuse->prog, NULL); 52 return reuse; 53 } 54 55 int reuseport_alloc(struct sock *sk, bool bind_inany) 56 { 57 struct sock_reuseport *reuse; 58 59 /* bh lock used since this function call may precede hlist lock in 60 * soft irq of receive path or setsockopt from process context 61 */ 62 spin_lock_bh(&reuseport_lock); 63 64 /* Allocation attempts can occur concurrently via the setsockopt path 65 * and the bind/hash path. Nothing to do when we lose the race. 66 */ 67 reuse = rcu_dereference_protected(sk->sk_reuseport_cb, 68 lockdep_is_held(&reuseport_lock)); 69 if (reuse) { 70 /* Only set reuse->bind_inany if the bind_inany is true. 71 * Otherwise, it will overwrite the reuse->bind_inany 72 * which was set by the bind/hash path. 73 */ 74 if (bind_inany) 75 reuse->bind_inany = bind_inany; 76 goto out; 77 } 78 79 reuse = __reuseport_alloc(INIT_SOCKS); 80 if (!reuse) { 81 spin_unlock_bh(&reuseport_lock); 82 return -ENOMEM; 83 } 84 85 reuse->socks[0] = sk; 86 reuse->num_socks = 1; 87 reuse->bind_inany = bind_inany; 88 rcu_assign_pointer(sk->sk_reuseport_cb, reuse); 89 90 out: 91 spin_unlock_bh(&reuseport_lock); 92 93 return 0; 94 } 95 EXPORT_SYMBOL(reuseport_alloc); 96 97 static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse) 98 { 99 struct sock_reuseport *more_reuse; 100 u32 more_socks_size, i; 101 102 more_socks_size = reuse->max_socks * 2U; 103 if (more_socks_size > U16_MAX) 104 return NULL; 105 106 more_reuse = __reuseport_alloc(more_socks_size); 107 if (!more_reuse) 108 return NULL; 109 110 more_reuse->max_socks = more_socks_size; 111 more_reuse->num_socks = reuse->num_socks; 112 more_reuse->prog = reuse->prog; 113 more_reuse->reuseport_id = reuse->reuseport_id; 114 more_reuse->bind_inany = reuse->bind_inany; 115 116 memcpy(more_reuse->socks, reuse->socks, 117 reuse->num_socks * sizeof(struct sock *)); 118 more_reuse->synq_overflow_ts = READ_ONCE(reuse->synq_overflow_ts); 119 120 for (i = 0; i < reuse->num_socks; ++i) 121 rcu_assign_pointer(reuse->socks[i]->sk_reuseport_cb, 122 more_reuse); 123 124 /* Note: we use kfree_rcu here instead of reuseport_free_rcu so 125 * that reuse and more_reuse can temporarily share a reference 126 * to prog. 127 */ 128 kfree_rcu(reuse, rcu); 129 return more_reuse; 130 } 131 132 static void reuseport_free_rcu(struct rcu_head *head) 133 { 134 struct sock_reuseport *reuse; 135 136 reuse = container_of(head, struct sock_reuseport, rcu); 137 sk_reuseport_prog_free(rcu_dereference_protected(reuse->prog, 1)); 138 if (reuse->reuseport_id) 139 ida_simple_remove(&reuseport_ida, reuse->reuseport_id); 140 kfree(reuse); 141 } 142 143 /** 144 * reuseport_add_sock - Add a socket to the reuseport group of another. 145 * @sk: New socket to add to the group. 146 * @sk2: Socket belonging to the existing reuseport group. 147 * @bind_inany: Whether or not the group is bound to a local INANY address. 148 * 149 * May return ENOMEM and not add socket to group under memory pressure. 150 */ 151 int reuseport_add_sock(struct sock *sk, struct sock *sk2, bool bind_inany) 152 { 153 struct sock_reuseport *old_reuse, *reuse; 154 155 if (!rcu_access_pointer(sk2->sk_reuseport_cb)) { 156 int err = reuseport_alloc(sk2, bind_inany); 157 158 if (err) 159 return err; 160 } 161 162 spin_lock_bh(&reuseport_lock); 163 reuse = rcu_dereference_protected(sk2->sk_reuseport_cb, 164 lockdep_is_held(&reuseport_lock)); 165 old_reuse = rcu_dereference_protected(sk->sk_reuseport_cb, 166 lockdep_is_held(&reuseport_lock)); 167 if (old_reuse && old_reuse->num_socks != 1) { 168 spin_unlock_bh(&reuseport_lock); 169 return -EBUSY; 170 } 171 172 if (reuse->num_socks == reuse->max_socks) { 173 reuse = reuseport_grow(reuse); 174 if (!reuse) { 175 spin_unlock_bh(&reuseport_lock); 176 return -ENOMEM; 177 } 178 } 179 180 reuse->socks[reuse->num_socks] = sk; 181 /* paired with smp_rmb() in reuseport_select_sock() */ 182 smp_wmb(); 183 reuse->num_socks++; 184 rcu_assign_pointer(sk->sk_reuseport_cb, reuse); 185 186 spin_unlock_bh(&reuseport_lock); 187 188 if (old_reuse) 189 call_rcu(&old_reuse->rcu, reuseport_free_rcu); 190 return 0; 191 } 192 EXPORT_SYMBOL(reuseport_add_sock); 193 194 void reuseport_detach_sock(struct sock *sk) 195 { 196 struct sock_reuseport *reuse; 197 int i; 198 199 spin_lock_bh(&reuseport_lock); 200 reuse = rcu_dereference_protected(sk->sk_reuseport_cb, 201 lockdep_is_held(&reuseport_lock)); 202 203 /* At least one of the sk in this reuseport group is added to 204 * a bpf map. Notify the bpf side. The bpf map logic will 205 * remove the sk if it is indeed added to a bpf map. 206 */ 207 if (reuse->reuseport_id) 208 bpf_sk_reuseport_detach(sk); 209 210 rcu_assign_pointer(sk->sk_reuseport_cb, NULL); 211 212 for (i = 0; i < reuse->num_socks; i++) { 213 if (reuse->socks[i] == sk) { 214 reuse->socks[i] = reuse->socks[reuse->num_socks - 1]; 215 reuse->num_socks--; 216 if (reuse->num_socks == 0) 217 call_rcu(&reuse->rcu, reuseport_free_rcu); 218 break; 219 } 220 } 221 spin_unlock_bh(&reuseport_lock); 222 } 223 EXPORT_SYMBOL(reuseport_detach_sock); 224 225 static struct sock *run_bpf_filter(struct sock_reuseport *reuse, u16 socks, 226 struct bpf_prog *prog, struct sk_buff *skb, 227 int hdr_len) 228 { 229 struct sk_buff *nskb = NULL; 230 u32 index; 231 232 if (skb_shared(skb)) { 233 nskb = skb_clone(skb, GFP_ATOMIC); 234 if (!nskb) 235 return NULL; 236 skb = nskb; 237 } 238 239 /* temporarily advance data past protocol header */ 240 if (!pskb_pull(skb, hdr_len)) { 241 kfree_skb(nskb); 242 return NULL; 243 } 244 index = bpf_prog_run_save_cb(prog, skb); 245 __skb_push(skb, hdr_len); 246 247 consume_skb(nskb); 248 249 if (index >= socks) 250 return NULL; 251 252 return reuse->socks[index]; 253 } 254 255 /** 256 * reuseport_select_sock - Select a socket from an SO_REUSEPORT group. 257 * @sk: First socket in the group. 258 * @hash: When no BPF filter is available, use this hash to select. 259 * @skb: skb to run through BPF filter. 260 * @hdr_len: BPF filter expects skb data pointer at payload data. If 261 * the skb does not yet point at the payload, this parameter represents 262 * how far the pointer needs to advance to reach the payload. 263 * Returns a socket that should receive the packet (or NULL on error). 264 */ 265 struct sock *reuseport_select_sock(struct sock *sk, 266 u32 hash, 267 struct sk_buff *skb, 268 int hdr_len) 269 { 270 struct sock_reuseport *reuse; 271 struct bpf_prog *prog; 272 struct sock *sk2 = NULL; 273 u16 socks; 274 275 rcu_read_lock(); 276 reuse = rcu_dereference(sk->sk_reuseport_cb); 277 278 /* if memory allocation failed or add call is not yet complete */ 279 if (!reuse) 280 goto out; 281 282 prog = rcu_dereference(reuse->prog); 283 socks = READ_ONCE(reuse->num_socks); 284 if (likely(socks)) { 285 /* paired with smp_wmb() in reuseport_add_sock() */ 286 smp_rmb(); 287 288 if (!prog || !skb) 289 goto select_by_hash; 290 291 if (prog->type == BPF_PROG_TYPE_SK_REUSEPORT) 292 sk2 = bpf_run_sk_reuseport(reuse, sk, prog, skb, hash); 293 else 294 sk2 = run_bpf_filter(reuse, socks, prog, skb, hdr_len); 295 296 select_by_hash: 297 /* no bpf or invalid bpf result: fall back to hash usage */ 298 if (!sk2) 299 sk2 = reuse->socks[reciprocal_scale(hash, socks)]; 300 } 301 302 out: 303 rcu_read_unlock(); 304 return sk2; 305 } 306 EXPORT_SYMBOL(reuseport_select_sock); 307 308 int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog) 309 { 310 struct sock_reuseport *reuse; 311 struct bpf_prog *old_prog; 312 313 if (sk_unhashed(sk) && sk->sk_reuseport) { 314 int err = reuseport_alloc(sk, false); 315 316 if (err) 317 return err; 318 } else if (!rcu_access_pointer(sk->sk_reuseport_cb)) { 319 /* The socket wasn't bound with SO_REUSEPORT */ 320 return -EINVAL; 321 } 322 323 spin_lock_bh(&reuseport_lock); 324 reuse = rcu_dereference_protected(sk->sk_reuseport_cb, 325 lockdep_is_held(&reuseport_lock)); 326 old_prog = rcu_dereference_protected(reuse->prog, 327 lockdep_is_held(&reuseport_lock)); 328 rcu_assign_pointer(reuse->prog, prog); 329 spin_unlock_bh(&reuseport_lock); 330 331 sk_reuseport_prog_free(old_prog); 332 return 0; 333 } 334 EXPORT_SYMBOL(reuseport_attach_prog); 335 336 int reuseport_detach_prog(struct sock *sk) 337 { 338 struct sock_reuseport *reuse; 339 struct bpf_prog *old_prog; 340 341 if (!rcu_access_pointer(sk->sk_reuseport_cb)) 342 return sk->sk_reuseport ? -ENOENT : -EINVAL; 343 344 old_prog = NULL; 345 spin_lock_bh(&reuseport_lock); 346 reuse = rcu_dereference_protected(sk->sk_reuseport_cb, 347 lockdep_is_held(&reuseport_lock)); 348 rcu_swap_protected(reuse->prog, old_prog, 349 lockdep_is_held(&reuseport_lock)); 350 spin_unlock_bh(&reuseport_lock); 351 352 if (!old_prog) 353 return -ENOENT; 354 355 sk_reuseport_prog_free(old_prog); 356 return 0; 357 } 358 EXPORT_SYMBOL(reuseport_detach_prog); 359