1d2912cb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2b2197755SDaniel Borkmann /*
3b2197755SDaniel Borkmann * Minimal file system backend for holding eBPF maps and programs,
4b2197755SDaniel Borkmann * used by bpf(2) object pinning.
5b2197755SDaniel Borkmann *
6b2197755SDaniel Borkmann * Authors:
7b2197755SDaniel Borkmann *
8b2197755SDaniel Borkmann * Daniel Borkmann <daniel@iogearbox.net>
9b2197755SDaniel Borkmann */
10b2197755SDaniel Borkmann
11a536a6e1SPaul Gortmaker #include <linux/init.h>
12b2197755SDaniel Borkmann #include <linux/magic.h>
13b2197755SDaniel Borkmann #include <linux/major.h>
14b2197755SDaniel Borkmann #include <linux/mount.h>
15b2197755SDaniel Borkmann #include <linux/namei.h>
16b2197755SDaniel Borkmann #include <linux/fs.h>
17d2935de7SDavid Howells #include <linux/fs_context.h>
18d2935de7SDavid Howells #include <linux/fs_parser.h>
19b2197755SDaniel Borkmann #include <linux/kdev_t.h>
20b2197755SDaniel Borkmann #include <linux/filter.h>
21b2197755SDaniel Borkmann #include <linux/bpf.h>
22a67edbf4SDaniel Borkmann #include <linux/bpf_trace.h>
23d71fa5c9SAlexei Starovoitov #include "preload/bpf_preload.h"
24b2197755SDaniel Borkmann
25b2197755SDaniel Borkmann enum bpf_type {
26b2197755SDaniel Borkmann BPF_TYPE_UNSPEC = 0,
27b2197755SDaniel Borkmann BPF_TYPE_PROG,
28b2197755SDaniel Borkmann BPF_TYPE_MAP,
2970ed506cSAndrii Nakryiko BPF_TYPE_LINK,
30b2197755SDaniel Borkmann };
31b2197755SDaniel Borkmann
bpf_any_get(void * raw,enum bpf_type type)32b2197755SDaniel Borkmann static void *bpf_any_get(void *raw, enum bpf_type type)
33b2197755SDaniel Borkmann {
34b2197755SDaniel Borkmann switch (type) {
35b2197755SDaniel Borkmann case BPF_TYPE_PROG:
3685192dbfSAndrii Nakryiko bpf_prog_inc(raw);
37b2197755SDaniel Borkmann break;
38b2197755SDaniel Borkmann case BPF_TYPE_MAP:
391e0bd5a0SAndrii Nakryiko bpf_map_inc_with_uref(raw);
40b2197755SDaniel Borkmann break;
4170ed506cSAndrii Nakryiko case BPF_TYPE_LINK:
4270ed506cSAndrii Nakryiko bpf_link_inc(raw);
4370ed506cSAndrii Nakryiko break;
44b2197755SDaniel Borkmann default:
45b2197755SDaniel Borkmann WARN_ON_ONCE(1);
46b2197755SDaniel Borkmann break;
47b2197755SDaniel Borkmann }
48b2197755SDaniel Borkmann
49b2197755SDaniel Borkmann return raw;
50b2197755SDaniel Borkmann }
51b2197755SDaniel Borkmann
bpf_any_put(void * raw,enum bpf_type type)52b2197755SDaniel Borkmann static void bpf_any_put(void *raw, enum bpf_type type)
53b2197755SDaniel Borkmann {
54b2197755SDaniel Borkmann switch (type) {
55b2197755SDaniel Borkmann case BPF_TYPE_PROG:
56b2197755SDaniel Borkmann bpf_prog_put(raw);
57b2197755SDaniel Borkmann break;
58b2197755SDaniel Borkmann case BPF_TYPE_MAP:
59c9da161cSDaniel Borkmann bpf_map_put_with_uref(raw);
60b2197755SDaniel Borkmann break;
6170ed506cSAndrii Nakryiko case BPF_TYPE_LINK:
6270ed506cSAndrii Nakryiko bpf_link_put(raw);
6370ed506cSAndrii Nakryiko break;
64b2197755SDaniel Borkmann default:
65b2197755SDaniel Borkmann WARN_ON_ONCE(1);
66b2197755SDaniel Borkmann break;
67b2197755SDaniel Borkmann }
68b2197755SDaniel Borkmann }
69b2197755SDaniel Borkmann
bpf_fd_probe_obj(u32 ufd,enum bpf_type * type)70b2197755SDaniel Borkmann static void *bpf_fd_probe_obj(u32 ufd, enum bpf_type *type)
71b2197755SDaniel Borkmann {
72b2197755SDaniel Borkmann void *raw;
73b2197755SDaniel Borkmann
74c9da161cSDaniel Borkmann raw = bpf_map_get_with_uref(ufd);
7570ed506cSAndrii Nakryiko if (!IS_ERR(raw)) {
7670ed506cSAndrii Nakryiko *type = BPF_TYPE_MAP;
7770ed506cSAndrii Nakryiko return raw;
78b2197755SDaniel Borkmann }
79b2197755SDaniel Borkmann
8070ed506cSAndrii Nakryiko raw = bpf_prog_get(ufd);
8170ed506cSAndrii Nakryiko if (!IS_ERR(raw)) {
8270ed506cSAndrii Nakryiko *type = BPF_TYPE_PROG;
83b2197755SDaniel Borkmann return raw;
84b2197755SDaniel Borkmann }
85b2197755SDaniel Borkmann
8670ed506cSAndrii Nakryiko raw = bpf_link_get_from_fd(ufd);
8770ed506cSAndrii Nakryiko if (!IS_ERR(raw)) {
8870ed506cSAndrii Nakryiko *type = BPF_TYPE_LINK;
8970ed506cSAndrii Nakryiko return raw;
9070ed506cSAndrii Nakryiko }
9170ed506cSAndrii Nakryiko
9270ed506cSAndrii Nakryiko return ERR_PTR(-EINVAL);
9370ed506cSAndrii Nakryiko }
9470ed506cSAndrii Nakryiko
95b2197755SDaniel Borkmann static const struct inode_operations bpf_dir_iops;
96b2197755SDaniel Borkmann
97b2197755SDaniel Borkmann static const struct inode_operations bpf_prog_iops = { };
98b2197755SDaniel Borkmann static const struct inode_operations bpf_map_iops = { };
9970ed506cSAndrii Nakryiko static const struct inode_operations bpf_link_iops = { };
100b2197755SDaniel Borkmann
bpf_get_inode(struct super_block * sb,const struct inode * dir,umode_t mode)101b2197755SDaniel Borkmann static struct inode *bpf_get_inode(struct super_block *sb,
102b2197755SDaniel Borkmann const struct inode *dir,
103b2197755SDaniel Borkmann umode_t mode)
104b2197755SDaniel Borkmann {
105b2197755SDaniel Borkmann struct inode *inode;
106b2197755SDaniel Borkmann
107b2197755SDaniel Borkmann switch (mode & S_IFMT) {
108b2197755SDaniel Borkmann case S_IFDIR:
109b2197755SDaniel Borkmann case S_IFREG:
1100f98621bSDaniel Borkmann case S_IFLNK:
111b2197755SDaniel Borkmann break;
112b2197755SDaniel Borkmann default:
113b2197755SDaniel Borkmann return ERR_PTR(-EINVAL);
114b2197755SDaniel Borkmann }
115b2197755SDaniel Borkmann
116b2197755SDaniel Borkmann inode = new_inode(sb);
117b2197755SDaniel Borkmann if (!inode)
118b2197755SDaniel Borkmann return ERR_PTR(-ENOSPC);
119b2197755SDaniel Borkmann
120b2197755SDaniel Borkmann inode->i_ino = get_next_ino();
121*417d2b6bSJeff Layton inode->i_atime = inode_set_ctime_current(inode);
122b2197755SDaniel Borkmann inode->i_mtime = inode->i_atime;
123b2197755SDaniel Borkmann
124f2d40141SChristian Brauner inode_init_owner(&nop_mnt_idmap, inode, dir, mode);
125b2197755SDaniel Borkmann
126b2197755SDaniel Borkmann return inode;
127b2197755SDaniel Borkmann }
128b2197755SDaniel Borkmann
bpf_inode_type(const struct inode * inode,enum bpf_type * type)129b2197755SDaniel Borkmann static int bpf_inode_type(const struct inode *inode, enum bpf_type *type)
130b2197755SDaniel Borkmann {
131b2197755SDaniel Borkmann *type = BPF_TYPE_UNSPEC;
132b2197755SDaniel Borkmann if (inode->i_op == &bpf_prog_iops)
133b2197755SDaniel Borkmann *type = BPF_TYPE_PROG;
134b2197755SDaniel Borkmann else if (inode->i_op == &bpf_map_iops)
135b2197755SDaniel Borkmann *type = BPF_TYPE_MAP;
13670ed506cSAndrii Nakryiko else if (inode->i_op == &bpf_link_iops)
13770ed506cSAndrii Nakryiko *type = BPF_TYPE_LINK;
138b2197755SDaniel Borkmann else
139b2197755SDaniel Borkmann return -EACCES;
140b2197755SDaniel Borkmann
141b2197755SDaniel Borkmann return 0;
142b2197755SDaniel Borkmann }
143b2197755SDaniel Borkmann
bpf_dentry_finalize(struct dentry * dentry,struct inode * inode,struct inode * dir)1440f98621bSDaniel Borkmann static void bpf_dentry_finalize(struct dentry *dentry, struct inode *inode,
1450f98621bSDaniel Borkmann struct inode *dir)
1460f98621bSDaniel Borkmann {
1470f98621bSDaniel Borkmann d_instantiate(dentry, inode);
1480f98621bSDaniel Borkmann dget(dentry);
1490f98621bSDaniel Borkmann
150*417d2b6bSJeff Layton dir->i_mtime = inode_set_ctime_current(dir);
1510f98621bSDaniel Borkmann }
1520f98621bSDaniel Borkmann
bpf_mkdir(struct mnt_idmap * idmap,struct inode * dir,struct dentry * dentry,umode_t mode)153c54bd91eSChristian Brauner static int bpf_mkdir(struct mnt_idmap *idmap, struct inode *dir,
154549c7297SChristian Brauner struct dentry *dentry, umode_t mode)
155b2197755SDaniel Borkmann {
156b2197755SDaniel Borkmann struct inode *inode;
157b2197755SDaniel Borkmann
158b2197755SDaniel Borkmann inode = bpf_get_inode(dir->i_sb, dir, mode | S_IFDIR);
159b2197755SDaniel Borkmann if (IS_ERR(inode))
160b2197755SDaniel Borkmann return PTR_ERR(inode);
161b2197755SDaniel Borkmann
162b2197755SDaniel Borkmann inode->i_op = &bpf_dir_iops;
163b2197755SDaniel Borkmann inode->i_fop = &simple_dir_operations;
164b2197755SDaniel Borkmann
165b2197755SDaniel Borkmann inc_nlink(inode);
166b2197755SDaniel Borkmann inc_nlink(dir);
167b2197755SDaniel Borkmann
1680f98621bSDaniel Borkmann bpf_dentry_finalize(dentry, inode, dir);
169b2197755SDaniel Borkmann return 0;
170b2197755SDaniel Borkmann }
171b2197755SDaniel Borkmann
172a26ca7c9SMartin KaFai Lau struct map_iter {
173a26ca7c9SMartin KaFai Lau void *key;
174a26ca7c9SMartin KaFai Lau bool done;
175a26ca7c9SMartin KaFai Lau };
176a26ca7c9SMartin KaFai Lau
map_iter(struct seq_file * m)177a26ca7c9SMartin KaFai Lau static struct map_iter *map_iter(struct seq_file *m)
178a26ca7c9SMartin KaFai Lau {
179a26ca7c9SMartin KaFai Lau return m->private;
180a26ca7c9SMartin KaFai Lau }
181a26ca7c9SMartin KaFai Lau
seq_file_to_map(struct seq_file * m)182a26ca7c9SMartin KaFai Lau static struct bpf_map *seq_file_to_map(struct seq_file *m)
183a26ca7c9SMartin KaFai Lau {
184a26ca7c9SMartin KaFai Lau return file_inode(m->file)->i_private;
185a26ca7c9SMartin KaFai Lau }
186a26ca7c9SMartin KaFai Lau
map_iter_free(struct map_iter * iter)187a26ca7c9SMartin KaFai Lau static void map_iter_free(struct map_iter *iter)
188a26ca7c9SMartin KaFai Lau {
189a26ca7c9SMartin KaFai Lau if (iter) {
190a26ca7c9SMartin KaFai Lau kfree(iter->key);
191a26ca7c9SMartin KaFai Lau kfree(iter);
192a26ca7c9SMartin KaFai Lau }
193a26ca7c9SMartin KaFai Lau }
194a26ca7c9SMartin KaFai Lau
map_iter_alloc(struct bpf_map * map)195a26ca7c9SMartin KaFai Lau static struct map_iter *map_iter_alloc(struct bpf_map *map)
196a26ca7c9SMartin KaFai Lau {
197a26ca7c9SMartin KaFai Lau struct map_iter *iter;
198a26ca7c9SMartin KaFai Lau
199a26ca7c9SMartin KaFai Lau iter = kzalloc(sizeof(*iter), GFP_KERNEL | __GFP_NOWARN);
200a26ca7c9SMartin KaFai Lau if (!iter)
201a26ca7c9SMartin KaFai Lau goto error;
202a26ca7c9SMartin KaFai Lau
203a26ca7c9SMartin KaFai Lau iter->key = kzalloc(map->key_size, GFP_KERNEL | __GFP_NOWARN);
204a26ca7c9SMartin KaFai Lau if (!iter->key)
205a26ca7c9SMartin KaFai Lau goto error;
206a26ca7c9SMartin KaFai Lau
207a26ca7c9SMartin KaFai Lau return iter;
208a26ca7c9SMartin KaFai Lau
209a26ca7c9SMartin KaFai Lau error:
210a26ca7c9SMartin KaFai Lau map_iter_free(iter);
211a26ca7c9SMartin KaFai Lau return NULL;
212a26ca7c9SMartin KaFai Lau }
213a26ca7c9SMartin KaFai Lau
map_seq_next(struct seq_file * m,void * v,loff_t * pos)214a26ca7c9SMartin KaFai Lau static void *map_seq_next(struct seq_file *m, void *v, loff_t *pos)
215a26ca7c9SMartin KaFai Lau {
216a26ca7c9SMartin KaFai Lau struct bpf_map *map = seq_file_to_map(m);
217a26ca7c9SMartin KaFai Lau void *key = map_iter(m)->key;
218dc1508a5SYonghong Song void *prev_key;
219a26ca7c9SMartin KaFai Lau
22090435a78SVasily Averin (*pos)++;
221a26ca7c9SMartin KaFai Lau if (map_iter(m)->done)
222a26ca7c9SMartin KaFai Lau return NULL;
223a26ca7c9SMartin KaFai Lau
224a26ca7c9SMartin KaFai Lau if (unlikely(v == SEQ_START_TOKEN))
225dc1508a5SYonghong Song prev_key = NULL;
226dc1508a5SYonghong Song else
227dc1508a5SYonghong Song prev_key = key;
228a26ca7c9SMartin KaFai Lau
229ce880cb8SYonghong Song rcu_read_lock();
230dc1508a5SYonghong Song if (map->ops->map_get_next_key(map, prev_key, key)) {
231a26ca7c9SMartin KaFai Lau map_iter(m)->done = true;
232ce880cb8SYonghong Song key = NULL;
233a26ca7c9SMartin KaFai Lau }
234ce880cb8SYonghong Song rcu_read_unlock();
235a26ca7c9SMartin KaFai Lau return key;
236a26ca7c9SMartin KaFai Lau }
237a26ca7c9SMartin KaFai Lau
map_seq_start(struct seq_file * m,loff_t * pos)238a26ca7c9SMartin KaFai Lau static void *map_seq_start(struct seq_file *m, loff_t *pos)
239a26ca7c9SMartin KaFai Lau {
240a26ca7c9SMartin KaFai Lau if (map_iter(m)->done)
241a26ca7c9SMartin KaFai Lau return NULL;
242a26ca7c9SMartin KaFai Lau
243a26ca7c9SMartin KaFai Lau return *pos ? map_iter(m)->key : SEQ_START_TOKEN;
244a26ca7c9SMartin KaFai Lau }
245a26ca7c9SMartin KaFai Lau
map_seq_stop(struct seq_file * m,void * v)246a26ca7c9SMartin KaFai Lau static void map_seq_stop(struct seq_file *m, void *v)
247a26ca7c9SMartin KaFai Lau {
248a26ca7c9SMartin KaFai Lau }
249a26ca7c9SMartin KaFai Lau
map_seq_show(struct seq_file * m,void * v)250a26ca7c9SMartin KaFai Lau static int map_seq_show(struct seq_file *m, void *v)
251a26ca7c9SMartin KaFai Lau {
252a26ca7c9SMartin KaFai Lau struct bpf_map *map = seq_file_to_map(m);
253a26ca7c9SMartin KaFai Lau void *key = map_iter(m)->key;
254a26ca7c9SMartin KaFai Lau
255a26ca7c9SMartin KaFai Lau if (unlikely(v == SEQ_START_TOKEN)) {
256a26ca7c9SMartin KaFai Lau seq_puts(m, "# WARNING!! The output is for debug purpose only\n");
257a26ca7c9SMartin KaFai Lau seq_puts(m, "# WARNING!! The output format will change\n");
258a26ca7c9SMartin KaFai Lau } else {
259a26ca7c9SMartin KaFai Lau map->ops->map_seq_show_elem(map, key, m);
260a26ca7c9SMartin KaFai Lau }
261a26ca7c9SMartin KaFai Lau
262a26ca7c9SMartin KaFai Lau return 0;
263a26ca7c9SMartin KaFai Lau }
264a26ca7c9SMartin KaFai Lau
265a26ca7c9SMartin KaFai Lau static const struct seq_operations bpffs_map_seq_ops = {
266a26ca7c9SMartin KaFai Lau .start = map_seq_start,
267a26ca7c9SMartin KaFai Lau .next = map_seq_next,
268a26ca7c9SMartin KaFai Lau .show = map_seq_show,
269a26ca7c9SMartin KaFai Lau .stop = map_seq_stop,
270a26ca7c9SMartin KaFai Lau };
271a26ca7c9SMartin KaFai Lau
bpffs_map_open(struct inode * inode,struct file * file)272a26ca7c9SMartin KaFai Lau static int bpffs_map_open(struct inode *inode, struct file *file)
273a26ca7c9SMartin KaFai Lau {
274a26ca7c9SMartin KaFai Lau struct bpf_map *map = inode->i_private;
275a26ca7c9SMartin KaFai Lau struct map_iter *iter;
276a26ca7c9SMartin KaFai Lau struct seq_file *m;
277a26ca7c9SMartin KaFai Lau int err;
278a26ca7c9SMartin KaFai Lau
279a26ca7c9SMartin KaFai Lau iter = map_iter_alloc(map);
280a26ca7c9SMartin KaFai Lau if (!iter)
281a26ca7c9SMartin KaFai Lau return -ENOMEM;
282a26ca7c9SMartin KaFai Lau
283a26ca7c9SMartin KaFai Lau err = seq_open(file, &bpffs_map_seq_ops);
284a26ca7c9SMartin KaFai Lau if (err) {
285a26ca7c9SMartin KaFai Lau map_iter_free(iter);
286a26ca7c9SMartin KaFai Lau return err;
287a26ca7c9SMartin KaFai Lau }
288a26ca7c9SMartin KaFai Lau
289a26ca7c9SMartin KaFai Lau m = file->private_data;
290a26ca7c9SMartin KaFai Lau m->private = iter;
291a26ca7c9SMartin KaFai Lau
292a26ca7c9SMartin KaFai Lau return 0;
293a26ca7c9SMartin KaFai Lau }
294a26ca7c9SMartin KaFai Lau
bpffs_map_release(struct inode * inode,struct file * file)295a26ca7c9SMartin KaFai Lau static int bpffs_map_release(struct inode *inode, struct file *file)
296a26ca7c9SMartin KaFai Lau {
297a26ca7c9SMartin KaFai Lau struct seq_file *m = file->private_data;
298a26ca7c9SMartin KaFai Lau
299a26ca7c9SMartin KaFai Lau map_iter_free(map_iter(m));
300a26ca7c9SMartin KaFai Lau
301a26ca7c9SMartin KaFai Lau return seq_release(inode, file);
302a26ca7c9SMartin KaFai Lau }
303a26ca7c9SMartin KaFai Lau
304a26ca7c9SMartin KaFai Lau /* bpffs_map_fops should only implement the basic
305a26ca7c9SMartin KaFai Lau * read operation for a BPF map. The purpose is to
306a26ca7c9SMartin KaFai Lau * provide a simple user intuitive way to do
307a26ca7c9SMartin KaFai Lau * "cat bpffs/pathto/a-pinned-map".
308a26ca7c9SMartin KaFai Lau *
309a26ca7c9SMartin KaFai Lau * Other operations (e.g. write, lookup...) should be realized by
310a26ca7c9SMartin KaFai Lau * the userspace tools (e.g. bpftool) through the
311a26ca7c9SMartin KaFai Lau * BPF_OBJ_GET_INFO_BY_FD and the map's lookup/update
312a26ca7c9SMartin KaFai Lau * interface.
313a26ca7c9SMartin KaFai Lau */
314a26ca7c9SMartin KaFai Lau static const struct file_operations bpffs_map_fops = {
315a26ca7c9SMartin KaFai Lau .open = bpffs_map_open,
316a26ca7c9SMartin KaFai Lau .read = seq_read,
317a26ca7c9SMartin KaFai Lau .release = bpffs_map_release,
318a26ca7c9SMartin KaFai Lau };
319a26ca7c9SMartin KaFai Lau
bpffs_obj_open(struct inode * inode,struct file * file)320b1655857SDaniel Borkmann static int bpffs_obj_open(struct inode *inode, struct file *file)
321b1655857SDaniel Borkmann {
322b1655857SDaniel Borkmann return -EIO;
323b1655857SDaniel Borkmann }
324b1655857SDaniel Borkmann
325b1655857SDaniel Borkmann static const struct file_operations bpffs_obj_fops = {
326b1655857SDaniel Borkmann .open = bpffs_obj_open,
327b1655857SDaniel Borkmann };
328b1655857SDaniel Borkmann
bpf_mkobj_ops(struct dentry * dentry,umode_t mode,void * raw,const struct inode_operations * iops,const struct file_operations * fops)329a4a0683fSAl Viro static int bpf_mkobj_ops(struct dentry *dentry, umode_t mode, void *raw,
330a26ca7c9SMartin KaFai Lau const struct inode_operations *iops,
331a26ca7c9SMartin KaFai Lau const struct file_operations *fops)
332b2197755SDaniel Borkmann {
333a4a0683fSAl Viro struct inode *dir = dentry->d_parent->d_inode;
334a4a0683fSAl Viro struct inode *inode = bpf_get_inode(dir->i_sb, dir, mode);
335b2197755SDaniel Borkmann if (IS_ERR(inode))
336b2197755SDaniel Borkmann return PTR_ERR(inode);
337b2197755SDaniel Borkmann
338b2197755SDaniel Borkmann inode->i_op = iops;
339a26ca7c9SMartin KaFai Lau inode->i_fop = fops;
340a4a0683fSAl Viro inode->i_private = raw;
341b2197755SDaniel Borkmann
3420f98621bSDaniel Borkmann bpf_dentry_finalize(dentry, inode, dir);
343b2197755SDaniel Borkmann return 0;
344b2197755SDaniel Borkmann }
345b2197755SDaniel Borkmann
bpf_mkprog(struct dentry * dentry,umode_t mode,void * arg)346a4a0683fSAl Viro static int bpf_mkprog(struct dentry *dentry, umode_t mode, void *arg)
347b2197755SDaniel Borkmann {
348b1655857SDaniel Borkmann return bpf_mkobj_ops(dentry, mode, arg, &bpf_prog_iops,
349b1655857SDaniel Borkmann &bpffs_obj_fops);
350b2197755SDaniel Borkmann }
351a4a0683fSAl Viro
bpf_mkmap(struct dentry * dentry,umode_t mode,void * arg)352a4a0683fSAl Viro static int bpf_mkmap(struct dentry *dentry, umode_t mode, void *arg)
353a4a0683fSAl Viro {
354a26ca7c9SMartin KaFai Lau struct bpf_map *map = arg;
355a26ca7c9SMartin KaFai Lau
356a26ca7c9SMartin KaFai Lau return bpf_mkobj_ops(dentry, mode, arg, &bpf_map_iops,
357e8d2bec0SDaniel Borkmann bpf_map_support_seq_show(map) ?
358e8d2bec0SDaniel Borkmann &bpffs_map_fops : &bpffs_obj_fops);
359b2197755SDaniel Borkmann }
360b2197755SDaniel Borkmann
bpf_mklink(struct dentry * dentry,umode_t mode,void * arg)36170ed506cSAndrii Nakryiko static int bpf_mklink(struct dentry *dentry, umode_t mode, void *arg)
36270ed506cSAndrii Nakryiko {
363367ec3e4SYonghong Song struct bpf_link *link = arg;
364367ec3e4SYonghong Song
36570ed506cSAndrii Nakryiko return bpf_mkobj_ops(dentry, mode, arg, &bpf_link_iops,
366367ec3e4SYonghong Song bpf_link_is_iter(link) ?
367367ec3e4SYonghong Song &bpf_iter_fops : &bpffs_obj_fops);
36870ed506cSAndrii Nakryiko }
36970ed506cSAndrii Nakryiko
3700c93b7d8SAl Viro static struct dentry *
bpf_lookup(struct inode * dir,struct dentry * dentry,unsigned flags)3710c93b7d8SAl Viro bpf_lookup(struct inode *dir, struct dentry *dentry, unsigned flags)
372bb35a6efSDaniel Borkmann {
3736d8cb045SQuentin Monnet /* Dots in names (e.g. "/sys/fs/bpf/foo.bar") are reserved for future
374d71fa5c9SAlexei Starovoitov * extensions. That allows popoulate_bpffs() create special files.
3756d8cb045SQuentin Monnet */
376d71fa5c9SAlexei Starovoitov if ((dir->i_mode & S_IALLUGO) &&
377d71fa5c9SAlexei Starovoitov strchr(dentry->d_name.name, '.'))
3780c93b7d8SAl Viro return ERR_PTR(-EPERM);
3790f98621bSDaniel Borkmann
3800c93b7d8SAl Viro return simple_lookup(dir, dentry, flags);
381bb35a6efSDaniel Borkmann }
382bb35a6efSDaniel Borkmann
bpf_symlink(struct mnt_idmap * idmap,struct inode * dir,struct dentry * dentry,const char * target)3837a77db95SChristian Brauner static int bpf_symlink(struct mnt_idmap *idmap, struct inode *dir,
384549c7297SChristian Brauner struct dentry *dentry, const char *target)
3850f98621bSDaniel Borkmann {
3860f98621bSDaniel Borkmann char *link = kstrdup(target, GFP_USER | __GFP_NOWARN);
3870f98621bSDaniel Borkmann struct inode *inode;
3880f98621bSDaniel Borkmann
3890f98621bSDaniel Borkmann if (!link)
3900f98621bSDaniel Borkmann return -ENOMEM;
3910f98621bSDaniel Borkmann
3920f98621bSDaniel Borkmann inode = bpf_get_inode(dir->i_sb, dir, S_IRWXUGO | S_IFLNK);
3930f98621bSDaniel Borkmann if (IS_ERR(inode)) {
3940f98621bSDaniel Borkmann kfree(link);
3950f98621bSDaniel Borkmann return PTR_ERR(inode);
3960f98621bSDaniel Borkmann }
3970f98621bSDaniel Borkmann
3980f98621bSDaniel Borkmann inode->i_op = &simple_symlink_inode_operations;
3990f98621bSDaniel Borkmann inode->i_link = link;
4000f98621bSDaniel Borkmann
4010f98621bSDaniel Borkmann bpf_dentry_finalize(dentry, inode, dir);
4020f98621bSDaniel Borkmann return 0;
4030f98621bSDaniel Borkmann }
4040f98621bSDaniel Borkmann
405b2197755SDaniel Borkmann static const struct inode_operations bpf_dir_iops = {
4060c93b7d8SAl Viro .lookup = bpf_lookup,
407b2197755SDaniel Borkmann .mkdir = bpf_mkdir,
4080f98621bSDaniel Borkmann .symlink = bpf_symlink,
409b2197755SDaniel Borkmann .rmdir = simple_rmdir,
4100c93b7d8SAl Viro .rename = simple_rename,
4110c93b7d8SAl Viro .link = simple_link,
412b2197755SDaniel Borkmann .unlink = simple_unlink,
413b2197755SDaniel Borkmann };
414b2197755SDaniel Borkmann
415d71fa5c9SAlexei Starovoitov /* pin iterator link into bpffs */
bpf_iter_link_pin_kernel(struct dentry * parent,const char * name,struct bpf_link * link)416d71fa5c9SAlexei Starovoitov static int bpf_iter_link_pin_kernel(struct dentry *parent,
417d71fa5c9SAlexei Starovoitov const char *name, struct bpf_link *link)
418d71fa5c9SAlexei Starovoitov {
419d71fa5c9SAlexei Starovoitov umode_t mode = S_IFREG | S_IRUSR;
420d71fa5c9SAlexei Starovoitov struct dentry *dentry;
421d71fa5c9SAlexei Starovoitov int ret;
422d71fa5c9SAlexei Starovoitov
423d71fa5c9SAlexei Starovoitov inode_lock(parent->d_inode);
424d71fa5c9SAlexei Starovoitov dentry = lookup_one_len(name, parent, strlen(name));
425d71fa5c9SAlexei Starovoitov if (IS_ERR(dentry)) {
426d71fa5c9SAlexei Starovoitov inode_unlock(parent->d_inode);
427d71fa5c9SAlexei Starovoitov return PTR_ERR(dentry);
428d71fa5c9SAlexei Starovoitov }
429d71fa5c9SAlexei Starovoitov ret = bpf_mkobj_ops(dentry, mode, link, &bpf_link_iops,
430d71fa5c9SAlexei Starovoitov &bpf_iter_fops);
431d71fa5c9SAlexei Starovoitov dput(dentry);
432d71fa5c9SAlexei Starovoitov inode_unlock(parent->d_inode);
433d71fa5c9SAlexei Starovoitov return ret;
434d71fa5c9SAlexei Starovoitov }
435d71fa5c9SAlexei Starovoitov
bpf_obj_do_pin(int path_fd,const char __user * pathname,void * raw,enum bpf_type type)436cb8edce2SAndrii Nakryiko static int bpf_obj_do_pin(int path_fd, const char __user *pathname, void *raw,
437b2197755SDaniel Borkmann enum bpf_type type)
438b2197755SDaniel Borkmann {
439b2197755SDaniel Borkmann struct dentry *dentry;
440b2197755SDaniel Borkmann struct inode *dir;
441b2197755SDaniel Borkmann struct path path;
442b2197755SDaniel Borkmann umode_t mode;
443b2197755SDaniel Borkmann int ret;
444b2197755SDaniel Borkmann
445cb8edce2SAndrii Nakryiko dentry = user_path_create(path_fd, pathname, &path, 0);
446b2197755SDaniel Borkmann if (IS_ERR(dentry))
447b2197755SDaniel Borkmann return PTR_ERR(dentry);
448b2197755SDaniel Borkmann
449b2197755SDaniel Borkmann dir = d_inode(path.dentry);
450b2197755SDaniel Borkmann if (dir->i_op != &bpf_dir_iops) {
451b2197755SDaniel Borkmann ret = -EPERM;
452b2197755SDaniel Borkmann goto out;
453b2197755SDaniel Borkmann }
454b2197755SDaniel Borkmann
455e7d85427SAndrii Nakryiko mode = S_IFREG | ((S_IRUSR | S_IWUSR) & ~current_umask());
456e7d85427SAndrii Nakryiko ret = security_path_mknod(&path, dentry, mode, 0);
457e7d85427SAndrii Nakryiko if (ret)
458e7d85427SAndrii Nakryiko goto out;
459e7d85427SAndrii Nakryiko
460a4a0683fSAl Viro switch (type) {
461a4a0683fSAl Viro case BPF_TYPE_PROG:
462a4a0683fSAl Viro ret = vfs_mkobj(dentry, mode, bpf_mkprog, raw);
463a4a0683fSAl Viro break;
464a4a0683fSAl Viro case BPF_TYPE_MAP:
465a4a0683fSAl Viro ret = vfs_mkobj(dentry, mode, bpf_mkmap, raw);
466a4a0683fSAl Viro break;
46770ed506cSAndrii Nakryiko case BPF_TYPE_LINK:
46870ed506cSAndrii Nakryiko ret = vfs_mkobj(dentry, mode, bpf_mklink, raw);
46970ed506cSAndrii Nakryiko break;
470a4a0683fSAl Viro default:
471a4a0683fSAl Viro ret = -EPERM;
472a4a0683fSAl Viro }
473b2197755SDaniel Borkmann out:
474b2197755SDaniel Borkmann done_path_create(&path, dentry);
475b2197755SDaniel Borkmann return ret;
476b2197755SDaniel Borkmann }
477b2197755SDaniel Borkmann
bpf_obj_pin_user(u32 ufd,int path_fd,const char __user * pathname)478cb8edce2SAndrii Nakryiko int bpf_obj_pin_user(u32 ufd, int path_fd, const char __user *pathname)
479b2197755SDaniel Borkmann {
480b2197755SDaniel Borkmann enum bpf_type type;
481b2197755SDaniel Borkmann void *raw;
482b2197755SDaniel Borkmann int ret;
483b2197755SDaniel Borkmann
484b2197755SDaniel Borkmann raw = bpf_fd_probe_obj(ufd, &type);
485b87121ddSAl Viro if (IS_ERR(raw))
486b87121ddSAl Viro return PTR_ERR(raw);
487b2197755SDaniel Borkmann
488cb8edce2SAndrii Nakryiko ret = bpf_obj_do_pin(path_fd, pathname, raw, type);
489b2197755SDaniel Borkmann if (ret != 0)
490b2197755SDaniel Borkmann bpf_any_put(raw, type);
491b87121ddSAl Viro
492b2197755SDaniel Borkmann return ret;
493b2197755SDaniel Borkmann }
494b2197755SDaniel Borkmann
bpf_obj_do_get(int path_fd,const char __user * pathname,enum bpf_type * type,int flags)495cb8edce2SAndrii Nakryiko static void *bpf_obj_do_get(int path_fd, const char __user *pathname,
4966e71b04aSChenbo Feng enum bpf_type *type, int flags)
497b2197755SDaniel Borkmann {
498b2197755SDaniel Borkmann struct inode *inode;
499b2197755SDaniel Borkmann struct path path;
500b2197755SDaniel Borkmann void *raw;
501b2197755SDaniel Borkmann int ret;
502b2197755SDaniel Borkmann
503cb8edce2SAndrii Nakryiko ret = user_path_at(path_fd, pathname, LOOKUP_FOLLOW, &path);
504b2197755SDaniel Borkmann if (ret)
505b2197755SDaniel Borkmann return ERR_PTR(ret);
506b2197755SDaniel Borkmann
507b2197755SDaniel Borkmann inode = d_backing_inode(path.dentry);
50802f92b38SChristian Brauner ret = path_permission(&path, ACC_MODE(flags));
509b2197755SDaniel Borkmann if (ret)
510b2197755SDaniel Borkmann goto out;
511b2197755SDaniel Borkmann
512b2197755SDaniel Borkmann ret = bpf_inode_type(inode, type);
513b2197755SDaniel Borkmann if (ret)
514b2197755SDaniel Borkmann goto out;
515b2197755SDaniel Borkmann
516b2197755SDaniel Borkmann raw = bpf_any_get(inode->i_private, *type);
51792117d84SAlexei Starovoitov if (!IS_ERR(raw))
518b2197755SDaniel Borkmann touch_atime(&path);
519b2197755SDaniel Borkmann
520b2197755SDaniel Borkmann path_put(&path);
521b2197755SDaniel Borkmann return raw;
522b2197755SDaniel Borkmann out:
523b2197755SDaniel Borkmann path_put(&path);
524b2197755SDaniel Borkmann return ERR_PTR(ret);
525b2197755SDaniel Borkmann }
526b2197755SDaniel Borkmann
bpf_obj_get_user(int path_fd,const char __user * pathname,int flags)527cb8edce2SAndrii Nakryiko int bpf_obj_get_user(int path_fd, const char __user *pathname, int flags)
528b2197755SDaniel Borkmann {
529b2197755SDaniel Borkmann enum bpf_type type = BPF_TYPE_UNSPEC;
5306e71b04aSChenbo Feng int f_flags;
531b2197755SDaniel Borkmann void *raw;
532b87121ddSAl Viro int ret;
533b2197755SDaniel Borkmann
5346e71b04aSChenbo Feng f_flags = bpf_get_file_flag(flags);
5356e71b04aSChenbo Feng if (f_flags < 0)
5366e71b04aSChenbo Feng return f_flags;
5376e71b04aSChenbo Feng
538cb8edce2SAndrii Nakryiko raw = bpf_obj_do_get(path_fd, pathname, &type, f_flags);
539b87121ddSAl Viro if (IS_ERR(raw))
540b87121ddSAl Viro return PTR_ERR(raw);
541b2197755SDaniel Borkmann
542b2197755SDaniel Borkmann if (type == BPF_TYPE_PROG)
5435dec6d96SMaciej Żenczykowski ret = bpf_prog_new_fd(raw);
544b2197755SDaniel Borkmann else if (type == BPF_TYPE_MAP)
5456e71b04aSChenbo Feng ret = bpf_map_new_fd(raw, f_flags);
54670ed506cSAndrii Nakryiko else if (type == BPF_TYPE_LINK)
54725fc94b2SLorenz Bauer ret = (f_flags != O_RDWR) ? -EINVAL : bpf_link_new_fd(raw);
548b2197755SDaniel Borkmann else
549b87121ddSAl Viro return -ENOENT;
550b2197755SDaniel Borkmann
5514d220ed0SAlexei Starovoitov if (ret < 0)
552b2197755SDaniel Borkmann bpf_any_put(raw, type);
553b2197755SDaniel Borkmann return ret;
554b2197755SDaniel Borkmann }
555040ee692SAl Viro
__get_prog_inode(struct inode * inode,enum bpf_prog_type type)556040ee692SAl Viro static struct bpf_prog *__get_prog_inode(struct inode *inode, enum bpf_prog_type type)
557040ee692SAl Viro {
558040ee692SAl Viro struct bpf_prog *prog;
5594609e1f1SChristian Brauner int ret = inode_permission(&nop_mnt_idmap, inode, MAY_READ);
560040ee692SAl Viro if (ret)
561040ee692SAl Viro return ERR_PTR(ret);
562040ee692SAl Viro
563040ee692SAl Viro if (inode->i_op == &bpf_map_iops)
564040ee692SAl Viro return ERR_PTR(-EINVAL);
56570ed506cSAndrii Nakryiko if (inode->i_op == &bpf_link_iops)
56670ed506cSAndrii Nakryiko return ERR_PTR(-EINVAL);
567040ee692SAl Viro if (inode->i_op != &bpf_prog_iops)
568040ee692SAl Viro return ERR_PTR(-EACCES);
569040ee692SAl Viro
570040ee692SAl Viro prog = inode->i_private;
571040ee692SAl Viro
572040ee692SAl Viro ret = security_bpf_prog(prog);
573040ee692SAl Viro if (ret < 0)
574040ee692SAl Viro return ERR_PTR(ret);
575040ee692SAl Viro
576040ee692SAl Viro if (!bpf_prog_get_ok(prog, &type, false))
577040ee692SAl Viro return ERR_PTR(-EINVAL);
578040ee692SAl Viro
57985192dbfSAndrii Nakryiko bpf_prog_inc(prog);
58085192dbfSAndrii Nakryiko return prog;
581040ee692SAl Viro }
582040ee692SAl Viro
bpf_prog_get_type_path(const char * name,enum bpf_prog_type type)583040ee692SAl Viro struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type)
584040ee692SAl Viro {
585040ee692SAl Viro struct bpf_prog *prog;
586040ee692SAl Viro struct path path;
587040ee692SAl Viro int ret = kern_path(name, LOOKUP_FOLLOW, &path);
588040ee692SAl Viro if (ret)
589040ee692SAl Viro return ERR_PTR(ret);
590040ee692SAl Viro prog = __get_prog_inode(d_backing_inode(path.dentry), type);
591040ee692SAl Viro if (!IS_ERR(prog))
592040ee692SAl Viro touch_atime(&path);
593040ee692SAl Viro path_put(&path);
594040ee692SAl Viro return prog;
595040ee692SAl Viro }
596040ee692SAl Viro EXPORT_SYMBOL(bpf_prog_get_type_path);
597b2197755SDaniel Borkmann
5984cc7c186SDavid Howells /*
5994cc7c186SDavid Howells * Display the mount options in /proc/mounts.
6004cc7c186SDavid Howells */
bpf_show_options(struct seq_file * m,struct dentry * root)6014cc7c186SDavid Howells static int bpf_show_options(struct seq_file *m, struct dentry *root)
6024cc7c186SDavid Howells {
6034cc7c186SDavid Howells umode_t mode = d_inode(root)->i_mode & S_IALLUGO & ~S_ISVTX;
6044cc7c186SDavid Howells
6054cc7c186SDavid Howells if (mode != S_IRWXUGO)
6064cc7c186SDavid Howells seq_printf(m, ",mode=%o", mode);
6074cc7c186SDavid Howells return 0;
6084cc7c186SDavid Howells }
6094cc7c186SDavid Howells
bpf_free_inode(struct inode * inode)610524845ffSAl Viro static void bpf_free_inode(struct inode *inode)
6111da6c4d9SDaniel Borkmann {
6121da6c4d9SDaniel Borkmann enum bpf_type type;
6131da6c4d9SDaniel Borkmann
6141da6c4d9SDaniel Borkmann if (S_ISLNK(inode->i_mode))
6151da6c4d9SDaniel Borkmann kfree(inode->i_link);
6161da6c4d9SDaniel Borkmann if (!bpf_inode_type(inode, &type))
6171da6c4d9SDaniel Borkmann bpf_any_put(inode->i_private, type);
6181da6c4d9SDaniel Borkmann free_inode_nonrcu(inode);
6191da6c4d9SDaniel Borkmann }
6201da6c4d9SDaniel Borkmann
621b2197755SDaniel Borkmann static const struct super_operations bpf_super_ops = {
622b2197755SDaniel Borkmann .statfs = simple_statfs,
623b2197755SDaniel Borkmann .drop_inode = generic_delete_inode,
6244cc7c186SDavid Howells .show_options = bpf_show_options,
625524845ffSAl Viro .free_inode = bpf_free_inode,
626b2197755SDaniel Borkmann };
627b2197755SDaniel Borkmann
628a3af5f80SDaniel Borkmann enum {
629a3af5f80SDaniel Borkmann OPT_MODE,
630a3af5f80SDaniel Borkmann };
631a3af5f80SDaniel Borkmann
632d7167b14SAl Viro static const struct fs_parameter_spec bpf_fs_parameters[] = {
633d2935de7SDavid Howells fsparam_u32oct ("mode", OPT_MODE),
634d2935de7SDavid Howells {}
635d2935de7SDavid Howells };
636d2935de7SDavid Howells
637a3af5f80SDaniel Borkmann struct bpf_mount_opts {
638a3af5f80SDaniel Borkmann umode_t mode;
639a3af5f80SDaniel Borkmann };
640a3af5f80SDaniel Borkmann
bpf_parse_param(struct fs_context * fc,struct fs_parameter * param)641d2935de7SDavid Howells static int bpf_parse_param(struct fs_context *fc, struct fs_parameter *param)
642a3af5f80SDaniel Borkmann {
643d2935de7SDavid Howells struct bpf_mount_opts *opts = fc->fs_private;
644d2935de7SDavid Howells struct fs_parse_result result;
645d2935de7SDavid Howells int opt;
646a3af5f80SDaniel Borkmann
647d7167b14SAl Viro opt = fs_parse(fc, bpf_fs_parameters, param, &result);
6481e9d7466SYafang Shao if (opt < 0) {
649a3af5f80SDaniel Borkmann /* We might like to report bad mount options here, but
650a3af5f80SDaniel Borkmann * traditionally we've ignored all mount options, so we'd
651a3af5f80SDaniel Borkmann * better continue to ignore non-existing options for bpf.
652a3af5f80SDaniel Borkmann */
6531e9d7466SYafang Shao if (opt == -ENOPARAM) {
6541e9d7466SYafang Shao opt = vfs_parse_fs_param_source(fc, param);
6551e9d7466SYafang Shao if (opt != -ENOPARAM)
6561e9d7466SYafang Shao return opt;
6571e9d7466SYafang Shao
6581e9d7466SYafang Shao return 0;
6591e9d7466SYafang Shao }
6601e9d7466SYafang Shao
6611e9d7466SYafang Shao if (opt < 0)
6621e9d7466SYafang Shao return opt;
6631e9d7466SYafang Shao }
664d2935de7SDavid Howells
665d2935de7SDavid Howells switch (opt) {
666d2935de7SDavid Howells case OPT_MODE:
667d2935de7SDavid Howells opts->mode = result.uint_32 & S_IALLUGO;
668d2935de7SDavid Howells break;
669a3af5f80SDaniel Borkmann }
670a3af5f80SDaniel Borkmann
671a3af5f80SDaniel Borkmann return 0;
672a3af5f80SDaniel Borkmann }
673a3af5f80SDaniel Borkmann
674d71fa5c9SAlexei Starovoitov struct bpf_preload_ops *bpf_preload_ops;
675d71fa5c9SAlexei Starovoitov EXPORT_SYMBOL_GPL(bpf_preload_ops);
676d71fa5c9SAlexei Starovoitov
bpf_preload_mod_get(void)677d71fa5c9SAlexei Starovoitov static bool bpf_preload_mod_get(void)
678d71fa5c9SAlexei Starovoitov {
679d71fa5c9SAlexei Starovoitov /* If bpf_preload.ko wasn't loaded earlier then load it now.
680d71fa5c9SAlexei Starovoitov * When bpf_preload is built into vmlinux the module's __init
681d71fa5c9SAlexei Starovoitov * function will populate it.
682d71fa5c9SAlexei Starovoitov */
683d71fa5c9SAlexei Starovoitov if (!bpf_preload_ops) {
684d71fa5c9SAlexei Starovoitov request_module("bpf_preload");
685d71fa5c9SAlexei Starovoitov if (!bpf_preload_ops)
686d71fa5c9SAlexei Starovoitov return false;
687d71fa5c9SAlexei Starovoitov }
688d71fa5c9SAlexei Starovoitov /* And grab the reference, so the module doesn't disappear while the
689d71fa5c9SAlexei Starovoitov * kernel is interacting with the kernel module and its UMD.
690d71fa5c9SAlexei Starovoitov */
691d71fa5c9SAlexei Starovoitov if (!try_module_get(bpf_preload_ops->owner)) {
692d71fa5c9SAlexei Starovoitov pr_err("bpf_preload module get failed.\n");
693d71fa5c9SAlexei Starovoitov return false;
694d71fa5c9SAlexei Starovoitov }
695d71fa5c9SAlexei Starovoitov return true;
696d71fa5c9SAlexei Starovoitov }
697d71fa5c9SAlexei Starovoitov
bpf_preload_mod_put(void)698d71fa5c9SAlexei Starovoitov static void bpf_preload_mod_put(void)
699d71fa5c9SAlexei Starovoitov {
700d71fa5c9SAlexei Starovoitov if (bpf_preload_ops)
701d71fa5c9SAlexei Starovoitov /* now user can "rmmod bpf_preload" if necessary */
702d71fa5c9SAlexei Starovoitov module_put(bpf_preload_ops->owner);
703d71fa5c9SAlexei Starovoitov }
704d71fa5c9SAlexei Starovoitov
705d71fa5c9SAlexei Starovoitov static DEFINE_MUTEX(bpf_preload_lock);
706d71fa5c9SAlexei Starovoitov
populate_bpffs(struct dentry * parent)707d71fa5c9SAlexei Starovoitov static int populate_bpffs(struct dentry *parent)
708d71fa5c9SAlexei Starovoitov {
709d71fa5c9SAlexei Starovoitov struct bpf_preload_info objs[BPF_PRELOAD_LINKS] = {};
710d71fa5c9SAlexei Starovoitov int err = 0, i;
711d71fa5c9SAlexei Starovoitov
712d71fa5c9SAlexei Starovoitov /* grab the mutex to make sure the kernel interactions with bpf_preload
713cb80ddc6SAlexei Starovoitov * are serialized
714d71fa5c9SAlexei Starovoitov */
715d71fa5c9SAlexei Starovoitov mutex_lock(&bpf_preload_lock);
716d71fa5c9SAlexei Starovoitov
717d71fa5c9SAlexei Starovoitov /* if bpf_preload.ko wasn't built into vmlinux then load it */
718d71fa5c9SAlexei Starovoitov if (!bpf_preload_mod_get())
719d71fa5c9SAlexei Starovoitov goto out;
720d71fa5c9SAlexei Starovoitov
721d71fa5c9SAlexei Starovoitov err = bpf_preload_ops->preload(objs);
722d71fa5c9SAlexei Starovoitov if (err)
723d71fa5c9SAlexei Starovoitov goto out_put;
724d71fa5c9SAlexei Starovoitov for (i = 0; i < BPF_PRELOAD_LINKS; i++) {
725cb80ddc6SAlexei Starovoitov bpf_link_inc(objs[i].link);
726d71fa5c9SAlexei Starovoitov err = bpf_iter_link_pin_kernel(parent,
727cb80ddc6SAlexei Starovoitov objs[i].link_name, objs[i].link);
728cb80ddc6SAlexei Starovoitov if (err) {
729cb80ddc6SAlexei Starovoitov bpf_link_put(objs[i].link);
730d71fa5c9SAlexei Starovoitov goto out_put;
731d71fa5c9SAlexei Starovoitov }
732d71fa5c9SAlexei Starovoitov }
733d71fa5c9SAlexei Starovoitov out_put:
734d71fa5c9SAlexei Starovoitov bpf_preload_mod_put();
735d71fa5c9SAlexei Starovoitov out:
736d71fa5c9SAlexei Starovoitov mutex_unlock(&bpf_preload_lock);
737d71fa5c9SAlexei Starovoitov return err;
738d71fa5c9SAlexei Starovoitov }
739d71fa5c9SAlexei Starovoitov
bpf_fill_super(struct super_block * sb,struct fs_context * fc)740d2935de7SDavid Howells static int bpf_fill_super(struct super_block *sb, struct fs_context *fc)
741b2197755SDaniel Borkmann {
742cda37124SEric Biggers static const struct tree_descr bpf_rfiles[] = { { "" } };
743d2935de7SDavid Howells struct bpf_mount_opts *opts = fc->fs_private;
744b2197755SDaniel Borkmann struct inode *inode;
745b2197755SDaniel Borkmann int ret;
746b2197755SDaniel Borkmann
747b2197755SDaniel Borkmann ret = simple_fill_super(sb, BPF_FS_MAGIC, bpf_rfiles);
748b2197755SDaniel Borkmann if (ret)
749b2197755SDaniel Borkmann return ret;
750b2197755SDaniel Borkmann
751b2197755SDaniel Borkmann sb->s_op = &bpf_super_ops;
752b2197755SDaniel Borkmann
753b2197755SDaniel Borkmann inode = sb->s_root->d_inode;
754b2197755SDaniel Borkmann inode->i_op = &bpf_dir_iops;
755b2197755SDaniel Borkmann inode->i_mode &= ~S_IALLUGO;
756d71fa5c9SAlexei Starovoitov populate_bpffs(sb->s_root);
757d2935de7SDavid Howells inode->i_mode |= S_ISVTX | opts->mode;
758b2197755SDaniel Borkmann return 0;
759b2197755SDaniel Borkmann }
760b2197755SDaniel Borkmann
bpf_get_tree(struct fs_context * fc)761d2935de7SDavid Howells static int bpf_get_tree(struct fs_context *fc)
762b2197755SDaniel Borkmann {
763d2935de7SDavid Howells return get_tree_nodev(fc, bpf_fill_super);
764d2935de7SDavid Howells }
765d2935de7SDavid Howells
bpf_free_fc(struct fs_context * fc)766d2935de7SDavid Howells static void bpf_free_fc(struct fs_context *fc)
767d2935de7SDavid Howells {
768d2935de7SDavid Howells kfree(fc->fs_private);
769d2935de7SDavid Howells }
770d2935de7SDavid Howells
771d2935de7SDavid Howells static const struct fs_context_operations bpf_context_ops = {
772d2935de7SDavid Howells .free = bpf_free_fc,
773d2935de7SDavid Howells .parse_param = bpf_parse_param,
774d2935de7SDavid Howells .get_tree = bpf_get_tree,
775d2935de7SDavid Howells };
776d2935de7SDavid Howells
777d2935de7SDavid Howells /*
778d2935de7SDavid Howells * Set up the filesystem mount context.
779d2935de7SDavid Howells */
bpf_init_fs_context(struct fs_context * fc)780d2935de7SDavid Howells static int bpf_init_fs_context(struct fs_context *fc)
781d2935de7SDavid Howells {
782d2935de7SDavid Howells struct bpf_mount_opts *opts;
783d2935de7SDavid Howells
784d2935de7SDavid Howells opts = kzalloc(sizeof(struct bpf_mount_opts), GFP_KERNEL);
785d2935de7SDavid Howells if (!opts)
786d2935de7SDavid Howells return -ENOMEM;
787d2935de7SDavid Howells
788d2935de7SDavid Howells opts->mode = S_IRWXUGO;
789d2935de7SDavid Howells
790d2935de7SDavid Howells fc->fs_private = opts;
791d2935de7SDavid Howells fc->ops = &bpf_context_ops;
792d2935de7SDavid Howells return 0;
793b2197755SDaniel Borkmann }
794b2197755SDaniel Borkmann
795b2197755SDaniel Borkmann static struct file_system_type bpf_fs_type = {
796b2197755SDaniel Borkmann .owner = THIS_MODULE,
797b2197755SDaniel Borkmann .name = "bpf",
798d2935de7SDavid Howells .init_fs_context = bpf_init_fs_context,
799d7167b14SAl Viro .parameters = bpf_fs_parameters,
800b2197755SDaniel Borkmann .kill_sb = kill_litter_super,
801b2197755SDaniel Borkmann };
802b2197755SDaniel Borkmann
bpf_init(void)803b2197755SDaniel Borkmann static int __init bpf_init(void)
804b2197755SDaniel Borkmann {
805b2197755SDaniel Borkmann int ret;
806b2197755SDaniel Borkmann
807b2197755SDaniel Borkmann ret = sysfs_create_mount_point(fs_kobj, "bpf");
808b2197755SDaniel Borkmann if (ret)
809b2197755SDaniel Borkmann return ret;
810b2197755SDaniel Borkmann
811b2197755SDaniel Borkmann ret = register_filesystem(&bpf_fs_type);
812b2197755SDaniel Borkmann if (ret)
813b2197755SDaniel Borkmann sysfs_remove_mount_point(fs_kobj, "bpf");
814b2197755SDaniel Borkmann
815b2197755SDaniel Borkmann return ret;
816b2197755SDaniel Borkmann }
817b2197755SDaniel Borkmann fs_initcall(bpf_init);
818