1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2019 Facebook */ 3 4 #include <linux/types.h> 5 #include <linux/bpf_verifier.h> 6 #include <linux/bpf.h> 7 #include <linux/btf.h> 8 #include <linux/filter.h> 9 #include <net/tcp.h> 10 #include <net/bpf_sk_storage.h> 11 12 static u32 optional_ops[] = { 13 offsetof(struct tcp_congestion_ops, init), 14 offsetof(struct tcp_congestion_ops, release), 15 offsetof(struct tcp_congestion_ops, set_state), 16 offsetof(struct tcp_congestion_ops, cwnd_event), 17 offsetof(struct tcp_congestion_ops, in_ack_event), 18 offsetof(struct tcp_congestion_ops, pkts_acked), 19 offsetof(struct tcp_congestion_ops, min_tso_segs), 20 offsetof(struct tcp_congestion_ops, sndbuf_expand), 21 offsetof(struct tcp_congestion_ops, cong_control), 22 }; 23 24 static u32 unsupported_ops[] = { 25 offsetof(struct tcp_congestion_ops, get_info), 26 }; 27 28 static const struct btf_type *tcp_sock_type; 29 static u32 tcp_sock_id, sock_id; 30 31 static int btf_sk_storage_get_ids[5]; 32 static struct bpf_func_proto btf_sk_storage_get_proto __read_mostly; 33 34 static int btf_sk_storage_delete_ids[5]; 35 static struct bpf_func_proto btf_sk_storage_delete_proto __read_mostly; 36 37 static void convert_sk_func_proto(struct bpf_func_proto *to, int *to_btf_ids, 38 const struct bpf_func_proto *from) 39 { 40 int i; 41 42 *to = *from; 43 to->btf_id = to_btf_ids; 44 for (i = 0; i < ARRAY_SIZE(to->arg_type); i++) { 45 if (to->arg_type[i] == ARG_PTR_TO_SOCKET) { 46 to->arg_type[i] = ARG_PTR_TO_BTF_ID; 47 to->btf_id[i] = tcp_sock_id; 48 } 49 } 50 } 51 52 static int bpf_tcp_ca_init(struct btf *btf) 53 { 54 s32 type_id; 55 56 type_id = btf_find_by_name_kind(btf, "sock", BTF_KIND_STRUCT); 57 if (type_id < 0) 58 return -EINVAL; 59 sock_id = type_id; 60 61 type_id = btf_find_by_name_kind(btf, "tcp_sock", BTF_KIND_STRUCT); 62 if (type_id < 0) 63 return -EINVAL; 64 tcp_sock_id = type_id; 65 tcp_sock_type = btf_type_by_id(btf, tcp_sock_id); 66 67 convert_sk_func_proto(&btf_sk_storage_get_proto, 68 btf_sk_storage_get_ids, 69 &bpf_sk_storage_get_proto); 70 convert_sk_func_proto(&btf_sk_storage_delete_proto, 71 btf_sk_storage_delete_ids, 72 &bpf_sk_storage_delete_proto); 73 74 return 0; 75 } 76 77 static bool is_optional(u32 member_offset) 78 { 79 unsigned int i; 80 81 for (i = 0; i < ARRAY_SIZE(optional_ops); i++) { 82 if (member_offset == optional_ops[i]) 83 return true; 84 } 85 86 return false; 87 } 88 89 static bool is_unsupported(u32 member_offset) 90 { 91 unsigned int i; 92 93 for (i = 0; i < ARRAY_SIZE(unsupported_ops); i++) { 94 if (member_offset == unsupported_ops[i]) 95 return true; 96 } 97 98 return false; 99 } 100 101 extern struct btf *btf_vmlinux; 102 103 static bool bpf_tcp_ca_is_valid_access(int off, int size, 104 enum bpf_access_type type, 105 const struct bpf_prog *prog, 106 struct bpf_insn_access_aux *info) 107 { 108 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS) 109 return false; 110 if (type != BPF_READ) 111 return false; 112 if (off % size != 0) 113 return false; 114 115 if (!btf_ctx_access(off, size, type, prog, info)) 116 return false; 117 118 if (info->reg_type == PTR_TO_BTF_ID && info->btf_id == sock_id) 119 /* promote it to tcp_sock */ 120 info->btf_id = tcp_sock_id; 121 122 return true; 123 } 124 125 static int bpf_tcp_ca_btf_struct_access(struct bpf_verifier_log *log, 126 const struct btf_type *t, int off, 127 int size, enum bpf_access_type atype, 128 u32 *next_btf_id) 129 { 130 size_t end; 131 132 if (atype == BPF_READ) 133 return btf_struct_access(log, t, off, size, atype, next_btf_id); 134 135 if (t != tcp_sock_type) { 136 bpf_log(log, "only read is supported\n"); 137 return -EACCES; 138 } 139 140 switch (off) { 141 case bpf_ctx_range(struct inet_connection_sock, icsk_ca_priv): 142 end = offsetofend(struct inet_connection_sock, icsk_ca_priv); 143 break; 144 case offsetof(struct inet_connection_sock, icsk_ack.pending): 145 end = offsetofend(struct inet_connection_sock, 146 icsk_ack.pending); 147 break; 148 case offsetof(struct tcp_sock, snd_cwnd): 149 end = offsetofend(struct tcp_sock, snd_cwnd); 150 break; 151 case offsetof(struct tcp_sock, snd_cwnd_cnt): 152 end = offsetofend(struct tcp_sock, snd_cwnd_cnt); 153 break; 154 case offsetof(struct tcp_sock, snd_ssthresh): 155 end = offsetofend(struct tcp_sock, snd_ssthresh); 156 break; 157 case offsetof(struct tcp_sock, ecn_flags): 158 end = offsetofend(struct tcp_sock, ecn_flags); 159 break; 160 default: 161 bpf_log(log, "no write support to tcp_sock at off %d\n", off); 162 return -EACCES; 163 } 164 165 if (off + size > end) { 166 bpf_log(log, 167 "write access at off %d with size %d beyond the member of tcp_sock ended at %zu\n", 168 off, size, end); 169 return -EACCES; 170 } 171 172 return NOT_INIT; 173 } 174 175 BPF_CALL_2(bpf_tcp_send_ack, struct tcp_sock *, tp, u32, rcv_nxt) 176 { 177 /* bpf_tcp_ca prog cannot have NULL tp */ 178 __tcp_send_ack((struct sock *)tp, rcv_nxt); 179 return 0; 180 } 181 182 static const struct bpf_func_proto bpf_tcp_send_ack_proto = { 183 .func = bpf_tcp_send_ack, 184 .gpl_only = false, 185 /* In case we want to report error later */ 186 .ret_type = RET_INTEGER, 187 .arg1_type = ARG_PTR_TO_BTF_ID, 188 .arg2_type = ARG_ANYTHING, 189 .btf_id = &tcp_sock_id, 190 }; 191 192 static const struct bpf_func_proto * 193 bpf_tcp_ca_get_func_proto(enum bpf_func_id func_id, 194 const struct bpf_prog *prog) 195 { 196 switch (func_id) { 197 case BPF_FUNC_tcp_send_ack: 198 return &bpf_tcp_send_ack_proto; 199 case BPF_FUNC_sk_storage_get: 200 return &btf_sk_storage_get_proto; 201 case BPF_FUNC_sk_storage_delete: 202 return &btf_sk_storage_delete_proto; 203 default: 204 return bpf_base_func_proto(func_id); 205 } 206 } 207 208 static const struct bpf_verifier_ops bpf_tcp_ca_verifier_ops = { 209 .get_func_proto = bpf_tcp_ca_get_func_proto, 210 .is_valid_access = bpf_tcp_ca_is_valid_access, 211 .btf_struct_access = bpf_tcp_ca_btf_struct_access, 212 }; 213 214 static int bpf_tcp_ca_init_member(const struct btf_type *t, 215 const struct btf_member *member, 216 void *kdata, const void *udata) 217 { 218 const struct tcp_congestion_ops *utcp_ca; 219 struct tcp_congestion_ops *tcp_ca; 220 int prog_fd; 221 u32 moff; 222 223 utcp_ca = (const struct tcp_congestion_ops *)udata; 224 tcp_ca = (struct tcp_congestion_ops *)kdata; 225 226 moff = btf_member_bit_offset(t, member) / 8; 227 switch (moff) { 228 case offsetof(struct tcp_congestion_ops, flags): 229 if (utcp_ca->flags & ~TCP_CONG_MASK) 230 return -EINVAL; 231 tcp_ca->flags = utcp_ca->flags; 232 return 1; 233 case offsetof(struct tcp_congestion_ops, name): 234 if (bpf_obj_name_cpy(tcp_ca->name, utcp_ca->name, 235 sizeof(tcp_ca->name)) <= 0) 236 return -EINVAL; 237 if (tcp_ca_find(utcp_ca->name)) 238 return -EEXIST; 239 return 1; 240 } 241 242 if (!btf_type_resolve_func_ptr(btf_vmlinux, member->type, NULL)) 243 return 0; 244 245 /* Ensure bpf_prog is provided for compulsory func ptr */ 246 prog_fd = (int)(*(unsigned long *)(udata + moff)); 247 if (!prog_fd && !is_optional(moff) && !is_unsupported(moff)) 248 return -EINVAL; 249 250 return 0; 251 } 252 253 static int bpf_tcp_ca_check_member(const struct btf_type *t, 254 const struct btf_member *member) 255 { 256 if (is_unsupported(btf_member_bit_offset(t, member) / 8)) 257 return -ENOTSUPP; 258 return 0; 259 } 260 261 static int bpf_tcp_ca_reg(void *kdata) 262 { 263 return tcp_register_congestion_control(kdata); 264 } 265 266 static void bpf_tcp_ca_unreg(void *kdata) 267 { 268 tcp_unregister_congestion_control(kdata); 269 } 270 271 /* Avoid sparse warning. It is only used in bpf_struct_ops.c. */ 272 extern struct bpf_struct_ops bpf_tcp_congestion_ops; 273 274 struct bpf_struct_ops bpf_tcp_congestion_ops = { 275 .verifier_ops = &bpf_tcp_ca_verifier_ops, 276 .reg = bpf_tcp_ca_reg, 277 .unreg = bpf_tcp_ca_unreg, 278 .check_member = bpf_tcp_ca_check_member, 279 .init_member = bpf_tcp_ca_init_member, 280 .init = bpf_tcp_ca_init, 281 .name = "tcp_congestion_ops", 282 }; 283