xref: /openbmc/linux/net/ipv4/bpf_tcp_ca.c (revision 22b6e7f3)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 Facebook  */
3 
4 #include <linux/init.h>
5 #include <linux/types.h>
6 #include <linux/bpf_verifier.h>
7 #include <linux/bpf.h>
8 #include <linux/btf.h>
9 #include <linux/btf_ids.h>
10 #include <linux/filter.h>
11 #include <net/tcp.h>
12 #include <net/bpf_sk_storage.h>
13 
14 /* "extern" is to avoid sparse warning.  It is only used in bpf_struct_ops.c. */
15 extern struct bpf_struct_ops bpf_tcp_congestion_ops;
16 
17 static u32 unsupported_ops[] = {
18 	offsetof(struct tcp_congestion_ops, get_info),
19 };
20 
21 static const struct btf_type *tcp_sock_type;
22 static u32 tcp_sock_id, sock_id;
23 
24 static int bpf_tcp_ca_init(struct btf *btf)
25 {
26 	s32 type_id;
27 
28 	type_id = btf_find_by_name_kind(btf, "sock", BTF_KIND_STRUCT);
29 	if (type_id < 0)
30 		return -EINVAL;
31 	sock_id = type_id;
32 
33 	type_id = btf_find_by_name_kind(btf, "tcp_sock", BTF_KIND_STRUCT);
34 	if (type_id < 0)
35 		return -EINVAL;
36 	tcp_sock_id = type_id;
37 	tcp_sock_type = btf_type_by_id(btf, tcp_sock_id);
38 
39 	return 0;
40 }
41 
42 static bool is_unsupported(u32 member_offset)
43 {
44 	unsigned int i;
45 
46 	for (i = 0; i < ARRAY_SIZE(unsupported_ops); i++) {
47 		if (member_offset == unsupported_ops[i])
48 			return true;
49 	}
50 
51 	return false;
52 }
53 
54 static bool bpf_tcp_ca_is_valid_access(int off, int size,
55 				       enum bpf_access_type type,
56 				       const struct bpf_prog *prog,
57 				       struct bpf_insn_access_aux *info)
58 {
59 	if (!bpf_tracing_btf_ctx_access(off, size, type, prog, info))
60 		return false;
61 
62 	if (base_type(info->reg_type) == PTR_TO_BTF_ID &&
63 	    !bpf_type_has_unsafe_modifiers(info->reg_type) &&
64 	    info->btf_id == sock_id)
65 		/* promote it to tcp_sock */
66 		info->btf_id = tcp_sock_id;
67 
68 	return true;
69 }
70 
71 static int bpf_tcp_ca_btf_struct_access(struct bpf_verifier_log *log,
72 					const struct bpf_reg_state *reg,
73 					int off, int size)
74 {
75 	const struct btf_type *t;
76 	size_t end;
77 
78 	t = btf_type_by_id(reg->btf, reg->btf_id);
79 	if (t != tcp_sock_type) {
80 		bpf_log(log, "only read is supported\n");
81 		return -EACCES;
82 	}
83 
84 	switch (off) {
85 	case offsetof(struct sock, sk_pacing_rate):
86 		end = offsetofend(struct sock, sk_pacing_rate);
87 		break;
88 	case offsetof(struct sock, sk_pacing_status):
89 		end = offsetofend(struct sock, sk_pacing_status);
90 		break;
91 	case bpf_ctx_range(struct inet_connection_sock, icsk_ca_priv):
92 		end = offsetofend(struct inet_connection_sock, icsk_ca_priv);
93 		break;
94 	case offsetof(struct inet_connection_sock, icsk_ack.pending):
95 		end = offsetofend(struct inet_connection_sock,
96 				  icsk_ack.pending);
97 		break;
98 	case offsetof(struct tcp_sock, snd_cwnd):
99 		end = offsetofend(struct tcp_sock, snd_cwnd);
100 		break;
101 	case offsetof(struct tcp_sock, snd_cwnd_cnt):
102 		end = offsetofend(struct tcp_sock, snd_cwnd_cnt);
103 		break;
104 	case offsetof(struct tcp_sock, snd_ssthresh):
105 		end = offsetofend(struct tcp_sock, snd_ssthresh);
106 		break;
107 	case offsetof(struct tcp_sock, ecn_flags):
108 		end = offsetofend(struct tcp_sock, ecn_flags);
109 		break;
110 	case offsetof(struct tcp_sock, app_limited):
111 		end = offsetofend(struct tcp_sock, app_limited);
112 		break;
113 	default:
114 		bpf_log(log, "no write support to tcp_sock at off %d\n", off);
115 		return -EACCES;
116 	}
117 
118 	if (off + size > end) {
119 		bpf_log(log,
120 			"write access at off %d with size %d beyond the member of tcp_sock ended at %zu\n",
121 			off, size, end);
122 		return -EACCES;
123 	}
124 
125 	return 0;
126 }
127 
128 BPF_CALL_2(bpf_tcp_send_ack, struct tcp_sock *, tp, u32, rcv_nxt)
129 {
130 	/* bpf_tcp_ca prog cannot have NULL tp */
131 	__tcp_send_ack((struct sock *)tp, rcv_nxt);
132 	return 0;
133 }
134 
135 static const struct bpf_func_proto bpf_tcp_send_ack_proto = {
136 	.func		= bpf_tcp_send_ack,
137 	.gpl_only	= false,
138 	/* In case we want to report error later */
139 	.ret_type	= RET_INTEGER,
140 	.arg1_type	= ARG_PTR_TO_BTF_ID,
141 	.arg1_btf_id	= &tcp_sock_id,
142 	.arg2_type	= ARG_ANYTHING,
143 };
144 
145 static u32 prog_ops_moff(const struct bpf_prog *prog)
146 {
147 	const struct btf_member *m;
148 	const struct btf_type *t;
149 	u32 midx;
150 
151 	midx = prog->expected_attach_type;
152 	t = bpf_tcp_congestion_ops.type;
153 	m = &btf_type_member(t)[midx];
154 
155 	return __btf_member_bit_offset(t, m) / 8;
156 }
157 
158 static const struct bpf_func_proto *
159 bpf_tcp_ca_get_func_proto(enum bpf_func_id func_id,
160 			  const struct bpf_prog *prog)
161 {
162 	switch (func_id) {
163 	case BPF_FUNC_tcp_send_ack:
164 		return &bpf_tcp_send_ack_proto;
165 	case BPF_FUNC_sk_storage_get:
166 		return &bpf_sk_storage_get_proto;
167 	case BPF_FUNC_sk_storage_delete:
168 		return &bpf_sk_storage_delete_proto;
169 	case BPF_FUNC_setsockopt:
170 		/* Does not allow release() to call setsockopt.
171 		 * release() is called when the current bpf-tcp-cc
172 		 * is retiring.  It is not allowed to call
173 		 * setsockopt() to make further changes which
174 		 * may potentially allocate new resources.
175 		 */
176 		if (prog_ops_moff(prog) !=
177 		    offsetof(struct tcp_congestion_ops, release))
178 			return &bpf_sk_setsockopt_proto;
179 		return NULL;
180 	case BPF_FUNC_getsockopt:
181 		/* Since get/setsockopt is usually expected to
182 		 * be available together, disable getsockopt for
183 		 * release also to avoid usage surprise.
184 		 * The bpf-tcp-cc already has a more powerful way
185 		 * to read tcp_sock from the PTR_TO_BTF_ID.
186 		 */
187 		if (prog_ops_moff(prog) !=
188 		    offsetof(struct tcp_congestion_ops, release))
189 			return &bpf_sk_getsockopt_proto;
190 		return NULL;
191 	case BPF_FUNC_ktime_get_coarse_ns:
192 		return &bpf_ktime_get_coarse_ns_proto;
193 	default:
194 		return bpf_base_func_proto(func_id);
195 	}
196 }
197 
198 BTF_SET8_START(bpf_tcp_ca_check_kfunc_ids)
199 BTF_ID_FLAGS(func, tcp_reno_ssthresh)
200 BTF_ID_FLAGS(func, tcp_reno_cong_avoid)
201 BTF_ID_FLAGS(func, tcp_reno_undo_cwnd)
202 BTF_ID_FLAGS(func, tcp_slow_start)
203 BTF_ID_FLAGS(func, tcp_cong_avoid_ai)
204 BTF_SET8_END(bpf_tcp_ca_check_kfunc_ids)
205 
206 static const struct btf_kfunc_id_set bpf_tcp_ca_kfunc_set = {
207 	.owner = THIS_MODULE,
208 	.set   = &bpf_tcp_ca_check_kfunc_ids,
209 };
210 
211 static const struct bpf_verifier_ops bpf_tcp_ca_verifier_ops = {
212 	.get_func_proto		= bpf_tcp_ca_get_func_proto,
213 	.is_valid_access	= bpf_tcp_ca_is_valid_access,
214 	.btf_struct_access	= bpf_tcp_ca_btf_struct_access,
215 };
216 
217 static int bpf_tcp_ca_init_member(const struct btf_type *t,
218 				  const struct btf_member *member,
219 				  void *kdata, const void *udata)
220 {
221 	const struct tcp_congestion_ops *utcp_ca;
222 	struct tcp_congestion_ops *tcp_ca;
223 	u32 moff;
224 
225 	utcp_ca = (const struct tcp_congestion_ops *)udata;
226 	tcp_ca = (struct tcp_congestion_ops *)kdata;
227 
228 	moff = __btf_member_bit_offset(t, member) / 8;
229 	switch (moff) {
230 	case offsetof(struct tcp_congestion_ops, flags):
231 		if (utcp_ca->flags & ~TCP_CONG_MASK)
232 			return -EINVAL;
233 		tcp_ca->flags = utcp_ca->flags;
234 		return 1;
235 	case offsetof(struct tcp_congestion_ops, name):
236 		if (bpf_obj_name_cpy(tcp_ca->name, utcp_ca->name,
237 				     sizeof(tcp_ca->name)) <= 0)
238 			return -EINVAL;
239 		return 1;
240 	}
241 
242 	return 0;
243 }
244 
245 static int bpf_tcp_ca_check_member(const struct btf_type *t,
246 				   const struct btf_member *member,
247 				   const struct bpf_prog *prog)
248 {
249 	if (is_unsupported(__btf_member_bit_offset(t, member) / 8))
250 		return -ENOTSUPP;
251 	return 0;
252 }
253 
254 static int bpf_tcp_ca_reg(void *kdata)
255 {
256 	return tcp_register_congestion_control(kdata);
257 }
258 
259 static void bpf_tcp_ca_unreg(void *kdata)
260 {
261 	tcp_unregister_congestion_control(kdata);
262 }
263 
264 static int bpf_tcp_ca_update(void *kdata, void *old_kdata)
265 {
266 	return tcp_update_congestion_control(kdata, old_kdata);
267 }
268 
269 static int bpf_tcp_ca_validate(void *kdata)
270 {
271 	return tcp_validate_congestion_control(kdata);
272 }
273 
274 struct bpf_struct_ops bpf_tcp_congestion_ops = {
275 	.verifier_ops = &bpf_tcp_ca_verifier_ops,
276 	.reg = bpf_tcp_ca_reg,
277 	.unreg = bpf_tcp_ca_unreg,
278 	.update = bpf_tcp_ca_update,
279 	.check_member = bpf_tcp_ca_check_member,
280 	.init_member = bpf_tcp_ca_init_member,
281 	.init = bpf_tcp_ca_init,
282 	.validate = bpf_tcp_ca_validate,
283 	.name = "tcp_congestion_ops",
284 };
285 
286 static int __init bpf_tcp_ca_kfunc_init(void)
287 {
288 	return register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &bpf_tcp_ca_kfunc_set);
289 }
290 late_initcall(bpf_tcp_ca_kfunc_init);
291