xref: /openbmc/linux/net/sched/cls_basic.c (revision 0c6dfa75)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/cls_basic.c	Basic Packet Classifier.
4  *
5  * Authors:	Thomas Graf <tgraf@suug.ch>
6  */
7 
8 #include <linux/module.h>
9 #include <linux/slab.h>
10 #include <linux/types.h>
11 #include <linux/kernel.h>
12 #include <linux/string.h>
13 #include <linux/errno.h>
14 #include <linux/rtnetlink.h>
15 #include <linux/skbuff.h>
16 #include <linux/idr.h>
17 #include <linux/percpu.h>
18 #include <net/netlink.h>
19 #include <net/act_api.h>
20 #include <net/pkt_cls.h>
21 #include <net/tc_wrapper.h>
22 
23 struct basic_head {
24 	struct list_head	flist;
25 	struct idr		handle_idr;
26 	struct rcu_head		rcu;
27 };
28 
29 struct basic_filter {
30 	u32			handle;
31 	struct tcf_exts		exts;
32 	struct tcf_ematch_tree	ematches;
33 	struct tcf_result	res;
34 	struct tcf_proto	*tp;
35 	struct list_head	link;
36 	struct tc_basic_pcnt __percpu *pf;
37 	struct rcu_work		rwork;
38 };
39 
40 TC_INDIRECT_SCOPE int basic_classify(struct sk_buff *skb,
41 				     const struct tcf_proto *tp,
42 				     struct tcf_result *res)
43 {
44 	int r;
45 	struct basic_head *head = rcu_dereference_bh(tp->root);
46 	struct basic_filter *f;
47 
48 	list_for_each_entry_rcu(f, &head->flist, link) {
49 		__this_cpu_inc(f->pf->rcnt);
50 		if (!tcf_em_tree_match(skb, &f->ematches, NULL))
51 			continue;
52 		__this_cpu_inc(f->pf->rhit);
53 		*res = f->res;
54 		r = tcf_exts_exec(skb, &f->exts, res);
55 		if (r < 0)
56 			continue;
57 		return r;
58 	}
59 	return -1;
60 }
61 
62 static void *basic_get(struct tcf_proto *tp, u32 handle)
63 {
64 	struct basic_head *head = rtnl_dereference(tp->root);
65 	struct basic_filter *f;
66 
67 	list_for_each_entry(f, &head->flist, link) {
68 		if (f->handle == handle) {
69 			return f;
70 		}
71 	}
72 
73 	return NULL;
74 }
75 
76 static int basic_init(struct tcf_proto *tp)
77 {
78 	struct basic_head *head;
79 
80 	head = kzalloc(sizeof(*head), GFP_KERNEL);
81 	if (head == NULL)
82 		return -ENOBUFS;
83 	INIT_LIST_HEAD(&head->flist);
84 	idr_init(&head->handle_idr);
85 	rcu_assign_pointer(tp->root, head);
86 	return 0;
87 }
88 
89 static void __basic_delete_filter(struct basic_filter *f)
90 {
91 	tcf_exts_destroy(&f->exts);
92 	tcf_em_tree_destroy(&f->ematches);
93 	tcf_exts_put_net(&f->exts);
94 	free_percpu(f->pf);
95 	kfree(f);
96 }
97 
98 static void basic_delete_filter_work(struct work_struct *work)
99 {
100 	struct basic_filter *f = container_of(to_rcu_work(work),
101 					      struct basic_filter,
102 					      rwork);
103 	rtnl_lock();
104 	__basic_delete_filter(f);
105 	rtnl_unlock();
106 }
107 
108 static void basic_destroy(struct tcf_proto *tp, bool rtnl_held,
109 			  struct netlink_ext_ack *extack)
110 {
111 	struct basic_head *head = rtnl_dereference(tp->root);
112 	struct basic_filter *f, *n;
113 
114 	list_for_each_entry_safe(f, n, &head->flist, link) {
115 		list_del_rcu(&f->link);
116 		tcf_unbind_filter(tp, &f->res);
117 		idr_remove(&head->handle_idr, f->handle);
118 		if (tcf_exts_get_net(&f->exts))
119 			tcf_queue_work(&f->rwork, basic_delete_filter_work);
120 		else
121 			__basic_delete_filter(f);
122 	}
123 	idr_destroy(&head->handle_idr);
124 	kfree_rcu(head, rcu);
125 }
126 
127 static int basic_delete(struct tcf_proto *tp, void *arg, bool *last,
128 			bool rtnl_held, struct netlink_ext_ack *extack)
129 {
130 	struct basic_head *head = rtnl_dereference(tp->root);
131 	struct basic_filter *f = arg;
132 
133 	list_del_rcu(&f->link);
134 	tcf_unbind_filter(tp, &f->res);
135 	idr_remove(&head->handle_idr, f->handle);
136 	tcf_exts_get_net(&f->exts);
137 	tcf_queue_work(&f->rwork, basic_delete_filter_work);
138 	*last = list_empty(&head->flist);
139 	return 0;
140 }
141 
142 static const struct nla_policy basic_policy[TCA_BASIC_MAX + 1] = {
143 	[TCA_BASIC_CLASSID]	= { .type = NLA_U32 },
144 	[TCA_BASIC_EMATCHES]	= { .type = NLA_NESTED },
145 };
146 
147 static int basic_set_parms(struct net *net, struct tcf_proto *tp,
148 			   struct basic_filter *f, unsigned long base,
149 			   struct nlattr **tb,
150 			   struct nlattr *est, u32 flags,
151 			   struct netlink_ext_ack *extack)
152 {
153 	int err;
154 
155 	err = tcf_exts_validate(net, tp, tb, est, &f->exts, flags, extack);
156 	if (err < 0)
157 		return err;
158 
159 	err = tcf_em_tree_validate(tp, tb[TCA_BASIC_EMATCHES], &f->ematches);
160 	if (err < 0)
161 		return err;
162 
163 	if (tb[TCA_BASIC_CLASSID]) {
164 		f->res.classid = nla_get_u32(tb[TCA_BASIC_CLASSID]);
165 		tcf_bind_filter(tp, &f->res, base);
166 	}
167 
168 	f->tp = tp;
169 	return 0;
170 }
171 
172 static int basic_change(struct net *net, struct sk_buff *in_skb,
173 			struct tcf_proto *tp, unsigned long base, u32 handle,
174 			struct nlattr **tca, void **arg,
175 			u32 flags, struct netlink_ext_ack *extack)
176 {
177 	int err;
178 	struct basic_head *head = rtnl_dereference(tp->root);
179 	struct nlattr *tb[TCA_BASIC_MAX + 1];
180 	struct basic_filter *fold = (struct basic_filter *) *arg;
181 	struct basic_filter *fnew;
182 
183 	if (tca[TCA_OPTIONS] == NULL)
184 		return -EINVAL;
185 
186 	err = nla_parse_nested_deprecated(tb, TCA_BASIC_MAX, tca[TCA_OPTIONS],
187 					  basic_policy, NULL);
188 	if (err < 0)
189 		return err;
190 
191 	if (fold != NULL) {
192 		if (handle && fold->handle != handle)
193 			return -EINVAL;
194 	}
195 
196 	fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
197 	if (!fnew)
198 		return -ENOBUFS;
199 
200 	err = tcf_exts_init(&fnew->exts, net, TCA_BASIC_ACT, TCA_BASIC_POLICE);
201 	if (err < 0)
202 		goto errout;
203 
204 	if (!handle) {
205 		handle = 1;
206 		err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
207 				    INT_MAX, GFP_KERNEL);
208 	} else if (!fold) {
209 		err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
210 				    handle, GFP_KERNEL);
211 	}
212 	if (err)
213 		goto errout;
214 	fnew->handle = handle;
215 	fnew->pf = alloc_percpu(struct tc_basic_pcnt);
216 	if (!fnew->pf) {
217 		err = -ENOMEM;
218 		goto errout;
219 	}
220 
221 	err = basic_set_parms(net, tp, fnew, base, tb, tca[TCA_RATE], flags,
222 			      extack);
223 	if (err < 0) {
224 		if (!fold)
225 			idr_remove(&head->handle_idr, fnew->handle);
226 		goto errout;
227 	}
228 
229 	*arg = fnew;
230 
231 	if (fold) {
232 		idr_replace(&head->handle_idr, fnew, fnew->handle);
233 		list_replace_rcu(&fold->link, &fnew->link);
234 		tcf_unbind_filter(tp, &fold->res);
235 		tcf_exts_get_net(&fold->exts);
236 		tcf_queue_work(&fold->rwork, basic_delete_filter_work);
237 	} else {
238 		list_add_rcu(&fnew->link, &head->flist);
239 	}
240 
241 	return 0;
242 errout:
243 	free_percpu(fnew->pf);
244 	tcf_exts_destroy(&fnew->exts);
245 	kfree(fnew);
246 	return err;
247 }
248 
249 static void basic_walk(struct tcf_proto *tp, struct tcf_walker *arg,
250 		       bool rtnl_held)
251 {
252 	struct basic_head *head = rtnl_dereference(tp->root);
253 	struct basic_filter *f;
254 
255 	list_for_each_entry(f, &head->flist, link) {
256 		if (!tc_cls_stats_dump(tp, arg, f))
257 			break;
258 	}
259 }
260 
261 static void basic_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
262 			     unsigned long base)
263 {
264 	struct basic_filter *f = fh;
265 
266 	tc_cls_bind_class(classid, cl, q, &f->res, base);
267 }
268 
269 static int basic_dump(struct net *net, struct tcf_proto *tp, void *fh,
270 		      struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
271 {
272 	struct tc_basic_pcnt gpf = {};
273 	struct basic_filter *f = fh;
274 	struct nlattr *nest;
275 	int cpu;
276 
277 	if (f == NULL)
278 		return skb->len;
279 
280 	t->tcm_handle = f->handle;
281 
282 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
283 	if (nest == NULL)
284 		goto nla_put_failure;
285 
286 	if (f->res.classid &&
287 	    nla_put_u32(skb, TCA_BASIC_CLASSID, f->res.classid))
288 		goto nla_put_failure;
289 
290 	for_each_possible_cpu(cpu) {
291 		struct tc_basic_pcnt *pf = per_cpu_ptr(f->pf, cpu);
292 
293 		gpf.rcnt += pf->rcnt;
294 		gpf.rhit += pf->rhit;
295 	}
296 
297 	if (nla_put_64bit(skb, TCA_BASIC_PCNT,
298 			  sizeof(struct tc_basic_pcnt),
299 			  &gpf, TCA_BASIC_PAD))
300 		goto nla_put_failure;
301 
302 	if (tcf_exts_dump(skb, &f->exts) < 0 ||
303 	    tcf_em_tree_dump(skb, &f->ematches, TCA_BASIC_EMATCHES) < 0)
304 		goto nla_put_failure;
305 
306 	nla_nest_end(skb, nest);
307 
308 	if (tcf_exts_dump_stats(skb, &f->exts) < 0)
309 		goto nla_put_failure;
310 
311 	return skb->len;
312 
313 nla_put_failure:
314 	nla_nest_cancel(skb, nest);
315 	return -1;
316 }
317 
318 static struct tcf_proto_ops cls_basic_ops __read_mostly = {
319 	.kind		=	"basic",
320 	.classify	=	basic_classify,
321 	.init		=	basic_init,
322 	.destroy	=	basic_destroy,
323 	.get		=	basic_get,
324 	.change		=	basic_change,
325 	.delete		=	basic_delete,
326 	.walk		=	basic_walk,
327 	.dump		=	basic_dump,
328 	.bind_class	=	basic_bind_class,
329 	.owner		=	THIS_MODULE,
330 };
331 
332 static int __init init_basic(void)
333 {
334 	return register_tcf_proto_ops(&cls_basic_ops);
335 }
336 
337 static void __exit exit_basic(void)
338 {
339 	unregister_tcf_proto_ops(&cls_basic_ops);
340 }
341 
342 module_init(init_basic)
343 module_exit(exit_basic)
344 MODULE_LICENSE("GPL");
345