1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * To speed up listener socket lookup, create an array to store all sockets 4 * listening on the same port. This allows a decision to be made after finding 5 * the first socket. An optional BPF program can also be configured for 6 * selecting the socket index from the array of available sockets. 7 */ 8 9 #include <net/sock_reuseport.h> 10 #include <linux/bpf.h> 11 #include <linux/idr.h> 12 #include <linux/rcupdate.h> 13 14 #define INIT_SOCKS 128 15 16 DEFINE_SPINLOCK(reuseport_lock); 17 18 #define REUSEPORT_MIN_ID 1 19 static DEFINE_IDA(reuseport_ida); 20 21 int reuseport_get_id(struct sock_reuseport *reuse) 22 { 23 int id; 24 25 if (reuse->reuseport_id) 26 return reuse->reuseport_id; 27 28 id = ida_simple_get(&reuseport_ida, REUSEPORT_MIN_ID, 0, 29 /* Called under reuseport_lock */ 30 GFP_ATOMIC); 31 if (id < 0) 32 return id; 33 34 reuse->reuseport_id = id; 35 36 return reuse->reuseport_id; 37 } 38 39 static struct sock_reuseport *__reuseport_alloc(unsigned int max_socks) 40 { 41 unsigned int size = sizeof(struct sock_reuseport) + 42 sizeof(struct sock *) * max_socks; 43 struct sock_reuseport *reuse = kzalloc(size, GFP_ATOMIC); 44 45 if (!reuse) 46 return NULL; 47 48 reuse->max_socks = max_socks; 49 50 RCU_INIT_POINTER(reuse->prog, NULL); 51 return reuse; 52 } 53 54 int reuseport_alloc(struct sock *sk, bool bind_inany) 55 { 56 struct sock_reuseport *reuse; 57 58 /* bh lock used since this function call may precede hlist lock in 59 * soft irq of receive path or setsockopt from process context 60 */ 61 spin_lock_bh(&reuseport_lock); 62 63 /* Allocation attempts can occur concurrently via the setsockopt path 64 * and the bind/hash path. Nothing to do when we lose the race. 65 */ 66 reuse = rcu_dereference_protected(sk->sk_reuseport_cb, 67 lockdep_is_held(&reuseport_lock)); 68 if (reuse) { 69 /* Only set reuse->bind_inany if the bind_inany is true. 70 * Otherwise, it will overwrite the reuse->bind_inany 71 * which was set by the bind/hash path. 72 */ 73 if (bind_inany) 74 reuse->bind_inany = bind_inany; 75 goto out; 76 } 77 78 reuse = __reuseport_alloc(INIT_SOCKS); 79 if (!reuse) { 80 spin_unlock_bh(&reuseport_lock); 81 return -ENOMEM; 82 } 83 84 reuse->socks[0] = sk; 85 reuse->num_socks = 1; 86 reuse->bind_inany = bind_inany; 87 rcu_assign_pointer(sk->sk_reuseport_cb, reuse); 88 89 out: 90 spin_unlock_bh(&reuseport_lock); 91 92 return 0; 93 } 94 EXPORT_SYMBOL(reuseport_alloc); 95 96 static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse) 97 { 98 struct sock_reuseport *more_reuse; 99 u32 more_socks_size, i; 100 101 more_socks_size = reuse->max_socks * 2U; 102 if (more_socks_size > U16_MAX) 103 return NULL; 104 105 more_reuse = __reuseport_alloc(more_socks_size); 106 if (!more_reuse) 107 return NULL; 108 109 more_reuse->max_socks = more_socks_size; 110 more_reuse->num_socks = reuse->num_socks; 111 more_reuse->prog = reuse->prog; 112 more_reuse->reuseport_id = reuse->reuseport_id; 113 more_reuse->bind_inany = reuse->bind_inany; 114 115 memcpy(more_reuse->socks, reuse->socks, 116 reuse->num_socks * sizeof(struct sock *)); 117 more_reuse->synq_overflow_ts = READ_ONCE(reuse->synq_overflow_ts); 118 119 for (i = 0; i < reuse->num_socks; ++i) 120 rcu_assign_pointer(reuse->socks[i]->sk_reuseport_cb, 121 more_reuse); 122 123 /* Note: we use kfree_rcu here instead of reuseport_free_rcu so 124 * that reuse and more_reuse can temporarily share a reference 125 * to prog. 126 */ 127 kfree_rcu(reuse, rcu); 128 return more_reuse; 129 } 130 131 static void reuseport_free_rcu(struct rcu_head *head) 132 { 133 struct sock_reuseport *reuse; 134 135 reuse = container_of(head, struct sock_reuseport, rcu); 136 if (reuse->prog) 137 bpf_prog_destroy(reuse->prog); 138 if (reuse->reuseport_id) 139 ida_simple_remove(&reuseport_ida, reuse->reuseport_id); 140 kfree(reuse); 141 } 142 143 /** 144 * reuseport_add_sock - Add a socket to the reuseport group of another. 145 * @sk: New socket to add to the group. 146 * @sk2: Socket belonging to the existing reuseport group. 147 * May return ENOMEM and not add socket to group under memory pressure. 148 */ 149 int reuseport_add_sock(struct sock *sk, struct sock *sk2, bool bind_inany) 150 { 151 struct sock_reuseport *old_reuse, *reuse; 152 153 if (!rcu_access_pointer(sk2->sk_reuseport_cb)) { 154 int err = reuseport_alloc(sk2, bind_inany); 155 156 if (err) 157 return err; 158 } 159 160 spin_lock_bh(&reuseport_lock); 161 reuse = rcu_dereference_protected(sk2->sk_reuseport_cb, 162 lockdep_is_held(&reuseport_lock)); 163 old_reuse = rcu_dereference_protected(sk->sk_reuseport_cb, 164 lockdep_is_held(&reuseport_lock)); 165 if (old_reuse && old_reuse->num_socks != 1) { 166 spin_unlock_bh(&reuseport_lock); 167 return -EBUSY; 168 } 169 170 if (reuse->num_socks == reuse->max_socks) { 171 reuse = reuseport_grow(reuse); 172 if (!reuse) { 173 spin_unlock_bh(&reuseport_lock); 174 return -ENOMEM; 175 } 176 } 177 178 reuse->socks[reuse->num_socks] = sk; 179 /* paired with smp_rmb() in reuseport_select_sock() */ 180 smp_wmb(); 181 reuse->num_socks++; 182 rcu_assign_pointer(sk->sk_reuseport_cb, reuse); 183 184 spin_unlock_bh(&reuseport_lock); 185 186 if (old_reuse) 187 call_rcu(&old_reuse->rcu, reuseport_free_rcu); 188 return 0; 189 } 190 191 void reuseport_detach_sock(struct sock *sk) 192 { 193 struct sock_reuseport *reuse; 194 int i; 195 196 spin_lock_bh(&reuseport_lock); 197 reuse = rcu_dereference_protected(sk->sk_reuseport_cb, 198 lockdep_is_held(&reuseport_lock)); 199 200 /* At least one of the sk in this reuseport group is added to 201 * a bpf map. Notify the bpf side. The bpf map logic will 202 * remove the sk if it is indeed added to a bpf map. 203 */ 204 if (reuse->reuseport_id) 205 bpf_sk_reuseport_detach(sk); 206 207 rcu_assign_pointer(sk->sk_reuseport_cb, NULL); 208 209 for (i = 0; i < reuse->num_socks; i++) { 210 if (reuse->socks[i] == sk) { 211 reuse->socks[i] = reuse->socks[reuse->num_socks - 1]; 212 reuse->num_socks--; 213 if (reuse->num_socks == 0) 214 call_rcu(&reuse->rcu, reuseport_free_rcu); 215 break; 216 } 217 } 218 spin_unlock_bh(&reuseport_lock); 219 } 220 EXPORT_SYMBOL(reuseport_detach_sock); 221 222 static struct sock *run_bpf(struct sock_reuseport *reuse, u16 socks, 223 struct bpf_prog *prog, struct sk_buff *skb, 224 int hdr_len) 225 { 226 struct sk_buff *nskb = NULL; 227 u32 index; 228 229 if (skb_shared(skb)) { 230 nskb = skb_clone(skb, GFP_ATOMIC); 231 if (!nskb) 232 return NULL; 233 skb = nskb; 234 } 235 236 /* temporarily advance data past protocol header */ 237 if (!pskb_pull(skb, hdr_len)) { 238 kfree_skb(nskb); 239 return NULL; 240 } 241 index = bpf_prog_run_save_cb(prog, skb); 242 __skb_push(skb, hdr_len); 243 244 consume_skb(nskb); 245 246 if (index >= socks) 247 return NULL; 248 249 return reuse->socks[index]; 250 } 251 252 /** 253 * reuseport_select_sock - Select a socket from an SO_REUSEPORT group. 254 * @sk: First socket in the group. 255 * @hash: When no BPF filter is available, use this hash to select. 256 * @skb: skb to run through BPF filter. 257 * @hdr_len: BPF filter expects skb data pointer at payload data. If 258 * the skb does not yet point at the payload, this parameter represents 259 * how far the pointer needs to advance to reach the payload. 260 * Returns a socket that should receive the packet (or NULL on error). 261 */ 262 struct sock *reuseport_select_sock(struct sock *sk, 263 u32 hash, 264 struct sk_buff *skb, 265 int hdr_len) 266 { 267 struct sock_reuseport *reuse; 268 struct bpf_prog *prog; 269 struct sock *sk2 = NULL; 270 u16 socks; 271 272 rcu_read_lock(); 273 reuse = rcu_dereference(sk->sk_reuseport_cb); 274 275 /* if memory allocation failed or add call is not yet complete */ 276 if (!reuse) 277 goto out; 278 279 prog = rcu_dereference(reuse->prog); 280 socks = READ_ONCE(reuse->num_socks); 281 if (likely(socks)) { 282 /* paired with smp_wmb() in reuseport_add_sock() */ 283 smp_rmb(); 284 285 if (prog && skb) 286 sk2 = run_bpf(reuse, socks, prog, skb, hdr_len); 287 288 /* no bpf or invalid bpf result: fall back to hash usage */ 289 if (!sk2) 290 sk2 = reuse->socks[reciprocal_scale(hash, socks)]; 291 } 292 293 out: 294 rcu_read_unlock(); 295 return sk2; 296 } 297 EXPORT_SYMBOL(reuseport_select_sock); 298 299 struct bpf_prog * 300 reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog) 301 { 302 struct sock_reuseport *reuse; 303 struct bpf_prog *old_prog; 304 305 spin_lock_bh(&reuseport_lock); 306 reuse = rcu_dereference_protected(sk->sk_reuseport_cb, 307 lockdep_is_held(&reuseport_lock)); 308 old_prog = rcu_dereference_protected(reuse->prog, 309 lockdep_is_held(&reuseport_lock)); 310 rcu_assign_pointer(reuse->prog, prog); 311 spin_unlock_bh(&reuseport_lock); 312 313 return old_prog; 314 } 315 EXPORT_SYMBOL(reuseport_attach_prog); 316