xref: /openbmc/linux/kernel/bpf/tcx.c (revision 2f4503f9)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2023 Isovalent */
3 
4 #include <linux/bpf.h>
5 #include <linux/bpf_mprog.h>
6 #include <linux/netdevice.h>
7 
8 #include <net/tcx.h>
9 
10 int tcx_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog)
11 {
12 	bool created, ingress = attr->attach_type == BPF_TCX_INGRESS;
13 	struct net *net = current->nsproxy->net_ns;
14 	struct bpf_mprog_entry *entry, *entry_new;
15 	struct bpf_prog *replace_prog = NULL;
16 	struct net_device *dev;
17 	int ret;
18 
19 	rtnl_lock();
20 	dev = __dev_get_by_index(net, attr->target_ifindex);
21 	if (!dev) {
22 		ret = -ENODEV;
23 		goto out;
24 	}
25 	if (attr->attach_flags & BPF_F_REPLACE) {
26 		replace_prog = bpf_prog_get_type(attr->replace_bpf_fd,
27 						 prog->type);
28 		if (IS_ERR(replace_prog)) {
29 			ret = PTR_ERR(replace_prog);
30 			replace_prog = NULL;
31 			goto out;
32 		}
33 	}
34 	entry = tcx_entry_fetch_or_create(dev, ingress, &created);
35 	if (!entry) {
36 		ret = -ENOMEM;
37 		goto out;
38 	}
39 	ret = bpf_mprog_attach(entry, &entry_new, prog, NULL, replace_prog,
40 			       attr->attach_flags, attr->relative_fd,
41 			       attr->expected_revision);
42 	if (!ret) {
43 		if (entry != entry_new) {
44 			tcx_entry_update(dev, entry_new, ingress);
45 			tcx_entry_sync();
46 			tcx_skeys_inc(ingress);
47 		}
48 		bpf_mprog_commit(entry);
49 	} else if (created) {
50 		tcx_entry_free(entry);
51 	}
52 out:
53 	if (replace_prog)
54 		bpf_prog_put(replace_prog);
55 	rtnl_unlock();
56 	return ret;
57 }
58 
59 int tcx_prog_detach(const union bpf_attr *attr, struct bpf_prog *prog)
60 {
61 	bool ingress = attr->attach_type == BPF_TCX_INGRESS;
62 	struct net *net = current->nsproxy->net_ns;
63 	struct bpf_mprog_entry *entry, *entry_new;
64 	struct net_device *dev;
65 	int ret;
66 
67 	rtnl_lock();
68 	dev = __dev_get_by_index(net, attr->target_ifindex);
69 	if (!dev) {
70 		ret = -ENODEV;
71 		goto out;
72 	}
73 	entry = tcx_entry_fetch(dev, ingress);
74 	if (!entry) {
75 		ret = -ENOENT;
76 		goto out;
77 	}
78 	ret = bpf_mprog_detach(entry, &entry_new, prog, NULL, attr->attach_flags,
79 			       attr->relative_fd, attr->expected_revision);
80 	if (!ret) {
81 		if (!tcx_entry_is_active(entry_new))
82 			entry_new = NULL;
83 		tcx_entry_update(dev, entry_new, ingress);
84 		tcx_entry_sync();
85 		tcx_skeys_dec(ingress);
86 		bpf_mprog_commit(entry);
87 		if (!entry_new)
88 			tcx_entry_free(entry);
89 	}
90 out:
91 	rtnl_unlock();
92 	return ret;
93 }
94 
95 void tcx_uninstall(struct net_device *dev, bool ingress)
96 {
97 	struct bpf_mprog_entry *entry, *entry_new = NULL;
98 	struct bpf_tuple tuple = {};
99 	struct bpf_mprog_fp *fp;
100 	struct bpf_mprog_cp *cp;
101 	bool active;
102 
103 	entry = tcx_entry_fetch(dev, ingress);
104 	if (!entry)
105 		return;
106 	active = tcx_entry(entry)->miniq_active;
107 	if (active)
108 		bpf_mprog_clear_all(entry, &entry_new);
109 	tcx_entry_update(dev, entry_new, ingress);
110 	tcx_entry_sync();
111 	bpf_mprog_foreach_tuple(entry, fp, cp, tuple) {
112 		if (tuple.link)
113 			tcx_link(tuple.link)->dev = NULL;
114 		else
115 			bpf_prog_put(tuple.prog);
116 		tcx_skeys_dec(ingress);
117 	}
118 	if (!active)
119 		tcx_entry_free(entry);
120 }
121 
122 int tcx_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr)
123 {
124 	bool ingress = attr->query.attach_type == BPF_TCX_INGRESS;
125 	struct net *net = current->nsproxy->net_ns;
126 	struct bpf_mprog_entry *entry;
127 	struct net_device *dev;
128 	int ret;
129 
130 	rtnl_lock();
131 	dev = __dev_get_by_index(net, attr->query.target_ifindex);
132 	if (!dev) {
133 		ret = -ENODEV;
134 		goto out;
135 	}
136 	entry = tcx_entry_fetch(dev, ingress);
137 	if (!entry) {
138 		ret = -ENOENT;
139 		goto out;
140 	}
141 	ret = bpf_mprog_query(attr, uattr, entry);
142 out:
143 	rtnl_unlock();
144 	return ret;
145 }
146 
147 static int tcx_link_prog_attach(struct bpf_link *link, u32 flags, u32 id_or_fd,
148 				u64 revision)
149 {
150 	struct tcx_link *tcx = tcx_link(link);
151 	bool created, ingress = tcx->location == BPF_TCX_INGRESS;
152 	struct bpf_mprog_entry *entry, *entry_new;
153 	struct net_device *dev = tcx->dev;
154 	int ret;
155 
156 	ASSERT_RTNL();
157 	entry = tcx_entry_fetch_or_create(dev, ingress, &created);
158 	if (!entry)
159 		return -ENOMEM;
160 	ret = bpf_mprog_attach(entry, &entry_new, link->prog, link, NULL, flags,
161 			       id_or_fd, revision);
162 	if (!ret) {
163 		if (entry != entry_new) {
164 			tcx_entry_update(dev, entry_new, ingress);
165 			tcx_entry_sync();
166 			tcx_skeys_inc(ingress);
167 		}
168 		bpf_mprog_commit(entry);
169 	} else if (created) {
170 		tcx_entry_free(entry);
171 	}
172 	return ret;
173 }
174 
175 static void tcx_link_release(struct bpf_link *link)
176 {
177 	struct tcx_link *tcx = tcx_link(link);
178 	bool ingress = tcx->location == BPF_TCX_INGRESS;
179 	struct bpf_mprog_entry *entry, *entry_new;
180 	struct net_device *dev;
181 	int ret = 0;
182 
183 	rtnl_lock();
184 	dev = tcx->dev;
185 	if (!dev)
186 		goto out;
187 	entry = tcx_entry_fetch(dev, ingress);
188 	if (!entry) {
189 		ret = -ENOENT;
190 		goto out;
191 	}
192 	ret = bpf_mprog_detach(entry, &entry_new, link->prog, link, 0, 0, 0);
193 	if (!ret) {
194 		if (!tcx_entry_is_active(entry_new))
195 			entry_new = NULL;
196 		tcx_entry_update(dev, entry_new, ingress);
197 		tcx_entry_sync();
198 		tcx_skeys_dec(ingress);
199 		bpf_mprog_commit(entry);
200 		if (!entry_new)
201 			tcx_entry_free(entry);
202 		tcx->dev = NULL;
203 	}
204 out:
205 	WARN_ON_ONCE(ret);
206 	rtnl_unlock();
207 }
208 
209 static int tcx_link_update(struct bpf_link *link, struct bpf_prog *nprog,
210 			   struct bpf_prog *oprog)
211 {
212 	struct tcx_link *tcx = tcx_link(link);
213 	bool ingress = tcx->location == BPF_TCX_INGRESS;
214 	struct bpf_mprog_entry *entry, *entry_new;
215 	struct net_device *dev;
216 	int ret = 0;
217 
218 	rtnl_lock();
219 	dev = tcx->dev;
220 	if (!dev) {
221 		ret = -ENOLINK;
222 		goto out;
223 	}
224 	if (oprog && link->prog != oprog) {
225 		ret = -EPERM;
226 		goto out;
227 	}
228 	oprog = link->prog;
229 	if (oprog == nprog) {
230 		bpf_prog_put(nprog);
231 		goto out;
232 	}
233 	entry = tcx_entry_fetch(dev, ingress);
234 	if (!entry) {
235 		ret = -ENOENT;
236 		goto out;
237 	}
238 	ret = bpf_mprog_attach(entry, &entry_new, nprog, link, oprog,
239 			       BPF_F_REPLACE | BPF_F_ID,
240 			       link->prog->aux->id, 0);
241 	if (!ret) {
242 		WARN_ON_ONCE(entry != entry_new);
243 		oprog = xchg(&link->prog, nprog);
244 		bpf_prog_put(oprog);
245 		bpf_mprog_commit(entry);
246 	}
247 out:
248 	rtnl_unlock();
249 	return ret;
250 }
251 
252 static void tcx_link_dealloc(struct bpf_link *link)
253 {
254 	kfree(tcx_link(link));
255 }
256 
257 static void tcx_link_fdinfo(const struct bpf_link *link, struct seq_file *seq)
258 {
259 	const struct tcx_link *tcx = tcx_link_const(link);
260 	u32 ifindex = 0;
261 
262 	rtnl_lock();
263 	if (tcx->dev)
264 		ifindex = tcx->dev->ifindex;
265 	rtnl_unlock();
266 
267 	seq_printf(seq, "ifindex:\t%u\n", ifindex);
268 	seq_printf(seq, "attach_type:\t%u (%s)\n",
269 		   tcx->location,
270 		   tcx->location == BPF_TCX_INGRESS ? "ingress" : "egress");
271 }
272 
273 static int tcx_link_fill_info(const struct bpf_link *link,
274 			      struct bpf_link_info *info)
275 {
276 	const struct tcx_link *tcx = tcx_link_const(link);
277 	u32 ifindex = 0;
278 
279 	rtnl_lock();
280 	if (tcx->dev)
281 		ifindex = tcx->dev->ifindex;
282 	rtnl_unlock();
283 
284 	info->tcx.ifindex = ifindex;
285 	info->tcx.attach_type = tcx->location;
286 	return 0;
287 }
288 
289 static int tcx_link_detach(struct bpf_link *link)
290 {
291 	tcx_link_release(link);
292 	return 0;
293 }
294 
295 static const struct bpf_link_ops tcx_link_lops = {
296 	.release	= tcx_link_release,
297 	.detach		= tcx_link_detach,
298 	.dealloc	= tcx_link_dealloc,
299 	.update_prog	= tcx_link_update,
300 	.show_fdinfo	= tcx_link_fdinfo,
301 	.fill_link_info	= tcx_link_fill_info,
302 };
303 
304 static int tcx_link_init(struct tcx_link *tcx,
305 			 struct bpf_link_primer *link_primer,
306 			 const union bpf_attr *attr,
307 			 struct net_device *dev,
308 			 struct bpf_prog *prog)
309 {
310 	bpf_link_init(&tcx->link, BPF_LINK_TYPE_TCX, &tcx_link_lops, prog);
311 	tcx->location = attr->link_create.attach_type;
312 	tcx->dev = dev;
313 	return bpf_link_prime(&tcx->link, link_primer);
314 }
315 
316 int tcx_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
317 {
318 	struct net *net = current->nsproxy->net_ns;
319 	struct bpf_link_primer link_primer;
320 	struct net_device *dev;
321 	struct tcx_link *tcx;
322 	int ret;
323 
324 	rtnl_lock();
325 	dev = __dev_get_by_index(net, attr->link_create.target_ifindex);
326 	if (!dev) {
327 		ret = -ENODEV;
328 		goto out;
329 	}
330 	tcx = kzalloc(sizeof(*tcx), GFP_USER);
331 	if (!tcx) {
332 		ret = -ENOMEM;
333 		goto out;
334 	}
335 	ret = tcx_link_init(tcx, &link_primer, attr, dev, prog);
336 	if (ret) {
337 		kfree(tcx);
338 		goto out;
339 	}
340 	ret = tcx_link_prog_attach(&tcx->link, attr->link_create.flags,
341 				   attr->link_create.tcx.relative_fd,
342 				   attr->link_create.tcx.expected_revision);
343 	if (ret) {
344 		tcx->dev = NULL;
345 		bpf_link_cleanup(&link_primer);
346 		goto out;
347 	}
348 	ret = bpf_link_settle(&link_primer);
349 out:
350 	rtnl_unlock();
351 	return ret;
352 }
353