xref: /openbmc/linux/net/netfilter/nf_bpf_link.c (revision 37a826d8)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/bpf.h>
3 #include <linux/filter.h>
4 #include <linux/netfilter.h>
5 
6 #include <net/netfilter/nf_bpf_link.h>
7 #include <uapi/linux/netfilter_ipv4.h>
8 
9 static unsigned int nf_hook_run_bpf(void *bpf_prog, struct sk_buff *skb,
10 				    const struct nf_hook_state *s)
11 {
12 	const struct bpf_prog *prog = bpf_prog;
13 	struct bpf_nf_ctx ctx = {
14 		.state = s,
15 		.skb = skb,
16 	};
17 
18 	return bpf_prog_run(prog, &ctx);
19 }
20 
21 struct bpf_nf_link {
22 	struct bpf_link link;
23 	struct nf_hook_ops hook_ops;
24 	struct net *net;
25 	u32 dead;
26 };
27 
28 static void bpf_nf_link_release(struct bpf_link *link)
29 {
30 	struct bpf_nf_link *nf_link = container_of(link, struct bpf_nf_link, link);
31 
32 	if (nf_link->dead)
33 		return;
34 
35 	/* prevent hook-not-found warning splat from netfilter core when
36 	 * .detach was already called
37 	 */
38 	if (!cmpxchg(&nf_link->dead, 0, 1))
39 		nf_unregister_net_hook(nf_link->net, &nf_link->hook_ops);
40 }
41 
42 static void bpf_nf_link_dealloc(struct bpf_link *link)
43 {
44 	struct bpf_nf_link *nf_link = container_of(link, struct bpf_nf_link, link);
45 
46 	kfree(nf_link);
47 }
48 
49 static int bpf_nf_link_detach(struct bpf_link *link)
50 {
51 	bpf_nf_link_release(link);
52 	return 0;
53 }
54 
55 static void bpf_nf_link_show_info(const struct bpf_link *link,
56 				  struct seq_file *seq)
57 {
58 	struct bpf_nf_link *nf_link = container_of(link, struct bpf_nf_link, link);
59 
60 	seq_printf(seq, "pf:\t%u\thooknum:\t%u\tprio:\t%d\n",
61 		   nf_link->hook_ops.pf, nf_link->hook_ops.hooknum,
62 		   nf_link->hook_ops.priority);
63 }
64 
65 static int bpf_nf_link_fill_link_info(const struct bpf_link *link,
66 				      struct bpf_link_info *info)
67 {
68 	struct bpf_nf_link *nf_link = container_of(link, struct bpf_nf_link, link);
69 
70 	info->netfilter.pf = nf_link->hook_ops.pf;
71 	info->netfilter.hooknum = nf_link->hook_ops.hooknum;
72 	info->netfilter.priority = nf_link->hook_ops.priority;
73 	info->netfilter.flags = 0;
74 
75 	return 0;
76 }
77 
78 static int bpf_nf_link_update(struct bpf_link *link, struct bpf_prog *new_prog,
79 			      struct bpf_prog *old_prog)
80 {
81 	return -EOPNOTSUPP;
82 }
83 
84 static const struct bpf_link_ops bpf_nf_link_lops = {
85 	.release = bpf_nf_link_release,
86 	.dealloc = bpf_nf_link_dealloc,
87 	.detach = bpf_nf_link_detach,
88 	.show_fdinfo = bpf_nf_link_show_info,
89 	.fill_link_info = bpf_nf_link_fill_link_info,
90 	.update_prog = bpf_nf_link_update,
91 };
92 
93 static int bpf_nf_check_pf_and_hooks(const union bpf_attr *attr)
94 {
95 	switch (attr->link_create.netfilter.pf) {
96 	case NFPROTO_IPV4:
97 	case NFPROTO_IPV6:
98 		if (attr->link_create.netfilter.hooknum >= NF_INET_NUMHOOKS)
99 			return -EPROTO;
100 		break;
101 	default:
102 		return -EAFNOSUPPORT;
103 	}
104 
105 	if (attr->link_create.netfilter.flags)
106 		return -EOPNOTSUPP;
107 
108 	/* make sure conntrack confirm is always last.
109 	 *
110 	 * In the future, if userspace can e.g. request defrag, then
111 	 * "defrag_requested && prio before NF_IP_PRI_CONNTRACK_DEFRAG"
112 	 * should fail.
113 	 */
114 	switch (attr->link_create.netfilter.priority) {
115 	case NF_IP_PRI_FIRST: return -ERANGE; /* sabotage_in and other warts */
116 	case NF_IP_PRI_LAST: return -ERANGE; /* e.g. conntrack confirm */
117 	}
118 
119 	return 0;
120 }
121 
122 int bpf_nf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
123 {
124 	struct net *net = current->nsproxy->net_ns;
125 	struct bpf_link_primer link_primer;
126 	struct bpf_nf_link *link;
127 	int err;
128 
129 	if (attr->link_create.flags)
130 		return -EINVAL;
131 
132 	err = bpf_nf_check_pf_and_hooks(attr);
133 	if (err)
134 		return err;
135 
136 	link = kzalloc(sizeof(*link), GFP_USER);
137 	if (!link)
138 		return -ENOMEM;
139 
140 	bpf_link_init(&link->link, BPF_LINK_TYPE_NETFILTER, &bpf_nf_link_lops, prog);
141 
142 	link->hook_ops.hook = nf_hook_run_bpf;
143 	link->hook_ops.hook_ops_type = NF_HOOK_OP_BPF;
144 	link->hook_ops.priv = prog;
145 
146 	link->hook_ops.pf = attr->link_create.netfilter.pf;
147 	link->hook_ops.priority = attr->link_create.netfilter.priority;
148 	link->hook_ops.hooknum = attr->link_create.netfilter.hooknum;
149 
150 	link->net = net;
151 	link->dead = false;
152 
153 	err = bpf_link_prime(&link->link, &link_primer);
154 	if (err) {
155 		kfree(link);
156 		return err;
157 	}
158 
159 	err = nf_register_net_hook(net, &link->hook_ops);
160 	if (err) {
161 		bpf_link_cleanup(&link_primer);
162 		return err;
163 	}
164 
165 	return bpf_link_settle(&link_primer);
166 }
167 
168 const struct bpf_prog_ops netfilter_prog_ops = {
169 	.test_run = bpf_prog_test_run_nf,
170 };
171 
172 static bool nf_ptr_to_btf_id(struct bpf_insn_access_aux *info, const char *name)
173 {
174 	struct btf *btf;
175 	s32 type_id;
176 
177 	btf = bpf_get_btf_vmlinux();
178 	if (IS_ERR_OR_NULL(btf))
179 		return false;
180 
181 	type_id = btf_find_by_name_kind(btf, name, BTF_KIND_STRUCT);
182 	if (WARN_ON_ONCE(type_id < 0))
183 		return false;
184 
185 	info->btf = btf;
186 	info->btf_id = type_id;
187 	info->reg_type = PTR_TO_BTF_ID | PTR_TRUSTED;
188 	return true;
189 }
190 
191 static bool nf_is_valid_access(int off, int size, enum bpf_access_type type,
192 			       const struct bpf_prog *prog,
193 			       struct bpf_insn_access_aux *info)
194 {
195 	if (off < 0 || off >= sizeof(struct bpf_nf_ctx))
196 		return false;
197 
198 	if (type == BPF_WRITE)
199 		return false;
200 
201 	switch (off) {
202 	case bpf_ctx_range(struct bpf_nf_ctx, skb):
203 		if (size != sizeof_field(struct bpf_nf_ctx, skb))
204 			return false;
205 
206 		return nf_ptr_to_btf_id(info, "sk_buff");
207 	case bpf_ctx_range(struct bpf_nf_ctx, state):
208 		if (size != sizeof_field(struct bpf_nf_ctx, state))
209 			return false;
210 
211 		return nf_ptr_to_btf_id(info, "nf_hook_state");
212 	default:
213 		return false;
214 	}
215 
216 	return false;
217 }
218 
219 static const struct bpf_func_proto *
220 bpf_nf_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
221 {
222 	return bpf_base_func_proto(func_id);
223 }
224 
225 const struct bpf_verifier_ops netfilter_verifier_ops = {
226 	.is_valid_access	= nf_is_valid_access,
227 	.get_func_proto		= bpf_nf_func_proto,
228 };
229