xref: /openbmc/linux/net/ipv4/bpf_tcp_ca.c (revision 6c9111bc)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 Facebook  */
3 
4 #include <linux/types.h>
5 #include <linux/bpf_verifier.h>
6 #include <linux/bpf.h>
7 #include <linux/btf.h>
8 #include <linux/filter.h>
9 #include <net/tcp.h>
10 #include <net/bpf_sk_storage.h>
11 
12 static u32 optional_ops[] = {
13 	offsetof(struct tcp_congestion_ops, init),
14 	offsetof(struct tcp_congestion_ops, release),
15 	offsetof(struct tcp_congestion_ops, set_state),
16 	offsetof(struct tcp_congestion_ops, cwnd_event),
17 	offsetof(struct tcp_congestion_ops, in_ack_event),
18 	offsetof(struct tcp_congestion_ops, pkts_acked),
19 	offsetof(struct tcp_congestion_ops, min_tso_segs),
20 	offsetof(struct tcp_congestion_ops, sndbuf_expand),
21 	offsetof(struct tcp_congestion_ops, cong_control),
22 };
23 
24 static u32 unsupported_ops[] = {
25 	offsetof(struct tcp_congestion_ops, get_info),
26 };
27 
28 static const struct btf_type *tcp_sock_type;
29 static u32 tcp_sock_id, sock_id;
30 
31 static int bpf_tcp_ca_init(struct btf *btf)
32 {
33 	s32 type_id;
34 
35 	type_id = btf_find_by_name_kind(btf, "sock", BTF_KIND_STRUCT);
36 	if (type_id < 0)
37 		return -EINVAL;
38 	sock_id = type_id;
39 
40 	type_id = btf_find_by_name_kind(btf, "tcp_sock", BTF_KIND_STRUCT);
41 	if (type_id < 0)
42 		return -EINVAL;
43 	tcp_sock_id = type_id;
44 	tcp_sock_type = btf_type_by_id(btf, tcp_sock_id);
45 
46 	return 0;
47 }
48 
49 static bool is_optional(u32 member_offset)
50 {
51 	unsigned int i;
52 
53 	for (i = 0; i < ARRAY_SIZE(optional_ops); i++) {
54 		if (member_offset == optional_ops[i])
55 			return true;
56 	}
57 
58 	return false;
59 }
60 
61 static bool is_unsupported(u32 member_offset)
62 {
63 	unsigned int i;
64 
65 	for (i = 0; i < ARRAY_SIZE(unsupported_ops); i++) {
66 		if (member_offset == unsupported_ops[i])
67 			return true;
68 	}
69 
70 	return false;
71 }
72 
73 extern struct btf *btf_vmlinux;
74 
75 static bool bpf_tcp_ca_is_valid_access(int off, int size,
76 				       enum bpf_access_type type,
77 				       const struct bpf_prog *prog,
78 				       struct bpf_insn_access_aux *info)
79 {
80 	if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
81 		return false;
82 	if (type != BPF_READ)
83 		return false;
84 	if (off % size != 0)
85 		return false;
86 
87 	if (!btf_ctx_access(off, size, type, prog, info))
88 		return false;
89 
90 	if (info->reg_type == PTR_TO_BTF_ID && info->btf_id == sock_id)
91 		/* promote it to tcp_sock */
92 		info->btf_id = tcp_sock_id;
93 
94 	return true;
95 }
96 
97 static int bpf_tcp_ca_btf_struct_access(struct bpf_verifier_log *log,
98 					const struct btf_type *t, int off,
99 					int size, enum bpf_access_type atype,
100 					u32 *next_btf_id)
101 {
102 	size_t end;
103 
104 	if (atype == BPF_READ)
105 		return btf_struct_access(log, t, off, size, atype, next_btf_id);
106 
107 	if (t != tcp_sock_type) {
108 		bpf_log(log, "only read is supported\n");
109 		return -EACCES;
110 	}
111 
112 	switch (off) {
113 	case bpf_ctx_range(struct inet_connection_sock, icsk_ca_priv):
114 		end = offsetofend(struct inet_connection_sock, icsk_ca_priv);
115 		break;
116 	case offsetof(struct inet_connection_sock, icsk_ack.pending):
117 		end = offsetofend(struct inet_connection_sock,
118 				  icsk_ack.pending);
119 		break;
120 	case offsetof(struct tcp_sock, snd_cwnd):
121 		end = offsetofend(struct tcp_sock, snd_cwnd);
122 		break;
123 	case offsetof(struct tcp_sock, snd_cwnd_cnt):
124 		end = offsetofend(struct tcp_sock, snd_cwnd_cnt);
125 		break;
126 	case offsetof(struct tcp_sock, snd_ssthresh):
127 		end = offsetofend(struct tcp_sock, snd_ssthresh);
128 		break;
129 	case offsetof(struct tcp_sock, ecn_flags):
130 		end = offsetofend(struct tcp_sock, ecn_flags);
131 		break;
132 	default:
133 		bpf_log(log, "no write support to tcp_sock at off %d\n", off);
134 		return -EACCES;
135 	}
136 
137 	if (off + size > end) {
138 		bpf_log(log,
139 			"write access at off %d with size %d beyond the member of tcp_sock ended at %zu\n",
140 			off, size, end);
141 		return -EACCES;
142 	}
143 
144 	return NOT_INIT;
145 }
146 
147 BPF_CALL_2(bpf_tcp_send_ack, struct tcp_sock *, tp, u32, rcv_nxt)
148 {
149 	/* bpf_tcp_ca prog cannot have NULL tp */
150 	__tcp_send_ack((struct sock *)tp, rcv_nxt);
151 	return 0;
152 }
153 
154 static const struct bpf_func_proto bpf_tcp_send_ack_proto = {
155 	.func		= bpf_tcp_send_ack,
156 	.gpl_only	= false,
157 	/* In case we want to report error later */
158 	.ret_type	= RET_INTEGER,
159 	.arg1_type	= ARG_PTR_TO_BTF_ID,
160 	.arg1_btf_id	= &tcp_sock_id,
161 	.arg2_type	= ARG_ANYTHING,
162 };
163 
164 static const struct bpf_func_proto *
165 bpf_tcp_ca_get_func_proto(enum bpf_func_id func_id,
166 			  const struct bpf_prog *prog)
167 {
168 	switch (func_id) {
169 	case BPF_FUNC_tcp_send_ack:
170 		return &bpf_tcp_send_ack_proto;
171 	case BPF_FUNC_sk_storage_get:
172 		return &bpf_sk_storage_get_proto;
173 	case BPF_FUNC_sk_storage_delete:
174 		return &bpf_sk_storage_delete_proto;
175 	default:
176 		return bpf_base_func_proto(func_id);
177 	}
178 }
179 
180 static const struct bpf_verifier_ops bpf_tcp_ca_verifier_ops = {
181 	.get_func_proto		= bpf_tcp_ca_get_func_proto,
182 	.is_valid_access	= bpf_tcp_ca_is_valid_access,
183 	.btf_struct_access	= bpf_tcp_ca_btf_struct_access,
184 };
185 
186 static int bpf_tcp_ca_init_member(const struct btf_type *t,
187 				  const struct btf_member *member,
188 				  void *kdata, const void *udata)
189 {
190 	const struct tcp_congestion_ops *utcp_ca;
191 	struct tcp_congestion_ops *tcp_ca;
192 	int prog_fd;
193 	u32 moff;
194 
195 	utcp_ca = (const struct tcp_congestion_ops *)udata;
196 	tcp_ca = (struct tcp_congestion_ops *)kdata;
197 
198 	moff = btf_member_bit_offset(t, member) / 8;
199 	switch (moff) {
200 	case offsetof(struct tcp_congestion_ops, flags):
201 		if (utcp_ca->flags & ~TCP_CONG_MASK)
202 			return -EINVAL;
203 		tcp_ca->flags = utcp_ca->flags;
204 		return 1;
205 	case offsetof(struct tcp_congestion_ops, name):
206 		if (bpf_obj_name_cpy(tcp_ca->name, utcp_ca->name,
207 				     sizeof(tcp_ca->name)) <= 0)
208 			return -EINVAL;
209 		if (tcp_ca_find(utcp_ca->name))
210 			return -EEXIST;
211 		return 1;
212 	}
213 
214 	if (!btf_type_resolve_func_ptr(btf_vmlinux, member->type, NULL))
215 		return 0;
216 
217 	/* Ensure bpf_prog is provided for compulsory func ptr */
218 	prog_fd = (int)(*(unsigned long *)(udata + moff));
219 	if (!prog_fd && !is_optional(moff) && !is_unsupported(moff))
220 		return -EINVAL;
221 
222 	return 0;
223 }
224 
225 static int bpf_tcp_ca_check_member(const struct btf_type *t,
226 				   const struct btf_member *member)
227 {
228 	if (is_unsupported(btf_member_bit_offset(t, member) / 8))
229 		return -ENOTSUPP;
230 	return 0;
231 }
232 
233 static int bpf_tcp_ca_reg(void *kdata)
234 {
235 	return tcp_register_congestion_control(kdata);
236 }
237 
238 static void bpf_tcp_ca_unreg(void *kdata)
239 {
240 	tcp_unregister_congestion_control(kdata);
241 }
242 
243 /* Avoid sparse warning.  It is only used in bpf_struct_ops.c. */
244 extern struct bpf_struct_ops bpf_tcp_congestion_ops;
245 
246 struct bpf_struct_ops bpf_tcp_congestion_ops = {
247 	.verifier_ops = &bpf_tcp_ca_verifier_ops,
248 	.reg = bpf_tcp_ca_reg,
249 	.unreg = bpf_tcp_ca_unreg,
250 	.check_member = bpf_tcp_ca_check_member,
251 	.init_member = bpf_tcp_ca_init_member,
252 	.init = bpf_tcp_ca_init,
253 	.name = "tcp_congestion_ops",
254 };
255