1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * To speed up listener socket lookup, create an array to store all sockets 4 * listening on the same port. This allows a decision to be made after finding 5 * the first socket. An optional BPF program can also be configured for 6 * selecting the socket index from the array of available sockets. 7 */ 8 9 #include <net/sock_reuseport.h> 10 #include <linux/bpf.h> 11 #include <linux/idr.h> 12 #include <linux/rcupdate.h> 13 14 #define INIT_SOCKS 128 15 16 DEFINE_SPINLOCK(reuseport_lock); 17 18 #define REUSEPORT_MIN_ID 1 19 static DEFINE_IDA(reuseport_ida); 20 21 int reuseport_get_id(struct sock_reuseport *reuse) 22 { 23 int id; 24 25 if (reuse->reuseport_id) 26 return reuse->reuseport_id; 27 28 id = ida_simple_get(&reuseport_ida, REUSEPORT_MIN_ID, 0, 29 /* Called under reuseport_lock */ 30 GFP_ATOMIC); 31 if (id < 0) 32 return id; 33 34 reuse->reuseport_id = id; 35 36 return reuse->reuseport_id; 37 } 38 39 static struct sock_reuseport *__reuseport_alloc(unsigned int max_socks) 40 { 41 unsigned int size = sizeof(struct sock_reuseport) + 42 sizeof(struct sock *) * max_socks; 43 struct sock_reuseport *reuse = kzalloc(size, GFP_ATOMIC); 44 45 if (!reuse) 46 return NULL; 47 48 reuse->max_socks = max_socks; 49 50 RCU_INIT_POINTER(reuse->prog, NULL); 51 return reuse; 52 } 53 54 int reuseport_alloc(struct sock *sk) 55 { 56 struct sock_reuseport *reuse; 57 58 /* bh lock used since this function call may precede hlist lock in 59 * soft irq of receive path or setsockopt from process context 60 */ 61 spin_lock_bh(&reuseport_lock); 62 63 /* Allocation attempts can occur concurrently via the setsockopt path 64 * and the bind/hash path. Nothing to do when we lose the race. 65 */ 66 if (rcu_dereference_protected(sk->sk_reuseport_cb, 67 lockdep_is_held(&reuseport_lock))) 68 goto out; 69 70 reuse = __reuseport_alloc(INIT_SOCKS); 71 if (!reuse) { 72 spin_unlock_bh(&reuseport_lock); 73 return -ENOMEM; 74 } 75 76 reuse->socks[0] = sk; 77 reuse->num_socks = 1; 78 rcu_assign_pointer(sk->sk_reuseport_cb, reuse); 79 80 out: 81 spin_unlock_bh(&reuseport_lock); 82 83 return 0; 84 } 85 EXPORT_SYMBOL(reuseport_alloc); 86 87 static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse) 88 { 89 struct sock_reuseport *more_reuse; 90 u32 more_socks_size, i; 91 92 more_socks_size = reuse->max_socks * 2U; 93 if (more_socks_size > U16_MAX) 94 return NULL; 95 96 more_reuse = __reuseport_alloc(more_socks_size); 97 if (!more_reuse) 98 return NULL; 99 100 more_reuse->max_socks = more_socks_size; 101 more_reuse->num_socks = reuse->num_socks; 102 more_reuse->prog = reuse->prog; 103 more_reuse->reuseport_id = reuse->reuseport_id; 104 105 memcpy(more_reuse->socks, reuse->socks, 106 reuse->num_socks * sizeof(struct sock *)); 107 more_reuse->synq_overflow_ts = READ_ONCE(reuse->synq_overflow_ts); 108 109 for (i = 0; i < reuse->num_socks; ++i) 110 rcu_assign_pointer(reuse->socks[i]->sk_reuseport_cb, 111 more_reuse); 112 113 /* Note: we use kfree_rcu here instead of reuseport_free_rcu so 114 * that reuse and more_reuse can temporarily share a reference 115 * to prog. 116 */ 117 kfree_rcu(reuse, rcu); 118 return more_reuse; 119 } 120 121 static void reuseport_free_rcu(struct rcu_head *head) 122 { 123 struct sock_reuseport *reuse; 124 125 reuse = container_of(head, struct sock_reuseport, rcu); 126 if (reuse->prog) 127 bpf_prog_destroy(reuse->prog); 128 if (reuse->reuseport_id) 129 ida_simple_remove(&reuseport_ida, reuse->reuseport_id); 130 kfree(reuse); 131 } 132 133 /** 134 * reuseport_add_sock - Add a socket to the reuseport group of another. 135 * @sk: New socket to add to the group. 136 * @sk2: Socket belonging to the existing reuseport group. 137 * May return ENOMEM and not add socket to group under memory pressure. 138 */ 139 int reuseport_add_sock(struct sock *sk, struct sock *sk2) 140 { 141 struct sock_reuseport *old_reuse, *reuse; 142 143 if (!rcu_access_pointer(sk2->sk_reuseport_cb)) { 144 int err = reuseport_alloc(sk2); 145 146 if (err) 147 return err; 148 } 149 150 spin_lock_bh(&reuseport_lock); 151 reuse = rcu_dereference_protected(sk2->sk_reuseport_cb, 152 lockdep_is_held(&reuseport_lock)); 153 old_reuse = rcu_dereference_protected(sk->sk_reuseport_cb, 154 lockdep_is_held(&reuseport_lock)); 155 if (old_reuse && old_reuse->num_socks != 1) { 156 spin_unlock_bh(&reuseport_lock); 157 return -EBUSY; 158 } 159 160 if (reuse->num_socks == reuse->max_socks) { 161 reuse = reuseport_grow(reuse); 162 if (!reuse) { 163 spin_unlock_bh(&reuseport_lock); 164 return -ENOMEM; 165 } 166 } 167 168 reuse->socks[reuse->num_socks] = sk; 169 /* paired with smp_rmb() in reuseport_select_sock() */ 170 smp_wmb(); 171 reuse->num_socks++; 172 rcu_assign_pointer(sk->sk_reuseport_cb, reuse); 173 174 spin_unlock_bh(&reuseport_lock); 175 176 if (old_reuse) 177 call_rcu(&old_reuse->rcu, reuseport_free_rcu); 178 return 0; 179 } 180 181 void reuseport_detach_sock(struct sock *sk) 182 { 183 struct sock_reuseport *reuse; 184 int i; 185 186 spin_lock_bh(&reuseport_lock); 187 reuse = rcu_dereference_protected(sk->sk_reuseport_cb, 188 lockdep_is_held(&reuseport_lock)); 189 190 /* At least one of the sk in this reuseport group is added to 191 * a bpf map. Notify the bpf side. The bpf map logic will 192 * remove the sk if it is indeed added to a bpf map. 193 */ 194 if (reuse->reuseport_id) 195 bpf_sk_reuseport_detach(sk); 196 197 rcu_assign_pointer(sk->sk_reuseport_cb, NULL); 198 199 for (i = 0; i < reuse->num_socks; i++) { 200 if (reuse->socks[i] == sk) { 201 reuse->socks[i] = reuse->socks[reuse->num_socks - 1]; 202 reuse->num_socks--; 203 if (reuse->num_socks == 0) 204 call_rcu(&reuse->rcu, reuseport_free_rcu); 205 break; 206 } 207 } 208 spin_unlock_bh(&reuseport_lock); 209 } 210 EXPORT_SYMBOL(reuseport_detach_sock); 211 212 static struct sock *run_bpf(struct sock_reuseport *reuse, u16 socks, 213 struct bpf_prog *prog, struct sk_buff *skb, 214 int hdr_len) 215 { 216 struct sk_buff *nskb = NULL; 217 u32 index; 218 219 if (skb_shared(skb)) { 220 nskb = skb_clone(skb, GFP_ATOMIC); 221 if (!nskb) 222 return NULL; 223 skb = nskb; 224 } 225 226 /* temporarily advance data past protocol header */ 227 if (!pskb_pull(skb, hdr_len)) { 228 kfree_skb(nskb); 229 return NULL; 230 } 231 index = bpf_prog_run_save_cb(prog, skb); 232 __skb_push(skb, hdr_len); 233 234 consume_skb(nskb); 235 236 if (index >= socks) 237 return NULL; 238 239 return reuse->socks[index]; 240 } 241 242 /** 243 * reuseport_select_sock - Select a socket from an SO_REUSEPORT group. 244 * @sk: First socket in the group. 245 * @hash: When no BPF filter is available, use this hash to select. 246 * @skb: skb to run through BPF filter. 247 * @hdr_len: BPF filter expects skb data pointer at payload data. If 248 * the skb does not yet point at the payload, this parameter represents 249 * how far the pointer needs to advance to reach the payload. 250 * Returns a socket that should receive the packet (or NULL on error). 251 */ 252 struct sock *reuseport_select_sock(struct sock *sk, 253 u32 hash, 254 struct sk_buff *skb, 255 int hdr_len) 256 { 257 struct sock_reuseport *reuse; 258 struct bpf_prog *prog; 259 struct sock *sk2 = NULL; 260 u16 socks; 261 262 rcu_read_lock(); 263 reuse = rcu_dereference(sk->sk_reuseport_cb); 264 265 /* if memory allocation failed or add call is not yet complete */ 266 if (!reuse) 267 goto out; 268 269 prog = rcu_dereference(reuse->prog); 270 socks = READ_ONCE(reuse->num_socks); 271 if (likely(socks)) { 272 /* paired with smp_wmb() in reuseport_add_sock() */ 273 smp_rmb(); 274 275 if (prog && skb) 276 sk2 = run_bpf(reuse, socks, prog, skb, hdr_len); 277 278 /* no bpf or invalid bpf result: fall back to hash usage */ 279 if (!sk2) 280 sk2 = reuse->socks[reciprocal_scale(hash, socks)]; 281 } 282 283 out: 284 rcu_read_unlock(); 285 return sk2; 286 } 287 EXPORT_SYMBOL(reuseport_select_sock); 288 289 struct bpf_prog * 290 reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog) 291 { 292 struct sock_reuseport *reuse; 293 struct bpf_prog *old_prog; 294 295 spin_lock_bh(&reuseport_lock); 296 reuse = rcu_dereference_protected(sk->sk_reuseport_cb, 297 lockdep_is_held(&reuseport_lock)); 298 old_prog = rcu_dereference_protected(reuse->prog, 299 lockdep_is_held(&reuseport_lock)); 300 rcu_assign_pointer(reuse->prog, prog); 301 spin_unlock_bh(&reuseport_lock); 302 303 return old_prog; 304 } 305 EXPORT_SYMBOL(reuseport_attach_prog); 306