xref: /openbmc/linux/net/sched/cls_matchall.c (revision 0352f880)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/cls_matchll.c		Match-all classifier
4  *
5  * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/percpu.h>
12 
13 #include <net/sch_generic.h>
14 #include <net/pkt_cls.h>
15 
16 struct cls_mall_head {
17 	struct tcf_exts exts;
18 	struct tcf_result res;
19 	u32 handle;
20 	u32 flags;
21 	unsigned int in_hw_count;
22 	struct tc_matchall_pcnt __percpu *pf;
23 	struct rcu_work rwork;
24 	bool deleting;
25 };
26 
27 static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp,
28 			 struct tcf_result *res)
29 {
30 	struct cls_mall_head *head = rcu_dereference_bh(tp->root);
31 
32 	if (unlikely(!head))
33 		return -1;
34 
35 	if (tc_skip_sw(head->flags))
36 		return -1;
37 
38 	*res = head->res;
39 	__this_cpu_inc(head->pf->rhit);
40 	return tcf_exts_exec(skb, &head->exts, res);
41 }
42 
43 static int mall_init(struct tcf_proto *tp)
44 {
45 	return 0;
46 }
47 
48 static void __mall_destroy(struct cls_mall_head *head)
49 {
50 	tcf_exts_destroy(&head->exts);
51 	tcf_exts_put_net(&head->exts);
52 	free_percpu(head->pf);
53 	kfree(head);
54 }
55 
56 static void mall_destroy_work(struct work_struct *work)
57 {
58 	struct cls_mall_head *head = container_of(to_rcu_work(work),
59 						  struct cls_mall_head,
60 						  rwork);
61 	rtnl_lock();
62 	__mall_destroy(head);
63 	rtnl_unlock();
64 }
65 
66 static void mall_destroy_hw_filter(struct tcf_proto *tp,
67 				   struct cls_mall_head *head,
68 				   unsigned long cookie,
69 				   struct netlink_ext_ack *extack)
70 {
71 	struct tc_cls_matchall_offload cls_mall = {};
72 	struct tcf_block *block = tp->chain->block;
73 
74 	tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack);
75 	cls_mall.command = TC_CLSMATCHALL_DESTROY;
76 	cls_mall.cookie = cookie;
77 
78 	tc_setup_cb_destroy(block, tp, TC_SETUP_CLSMATCHALL, &cls_mall, false,
79 			    &head->flags, &head->in_hw_count, true);
80 }
81 
82 static int mall_replace_hw_filter(struct tcf_proto *tp,
83 				  struct cls_mall_head *head,
84 				  unsigned long cookie,
85 				  struct netlink_ext_ack *extack)
86 {
87 	struct tc_cls_matchall_offload cls_mall = {};
88 	struct tcf_block *block = tp->chain->block;
89 	bool skip_sw = tc_skip_sw(head->flags);
90 	int err;
91 
92 	cls_mall.rule =	flow_rule_alloc(tcf_exts_num_actions(&head->exts));
93 	if (!cls_mall.rule)
94 		return -ENOMEM;
95 
96 	tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack);
97 	cls_mall.command = TC_CLSMATCHALL_REPLACE;
98 	cls_mall.cookie = cookie;
99 
100 	err = tc_setup_offload_action(&cls_mall.rule->action, &head->exts,
101 				      cls_mall.common.extack);
102 	if (err) {
103 		kfree(cls_mall.rule);
104 		mall_destroy_hw_filter(tp, head, cookie, NULL);
105 
106 		return skip_sw ? err : 0;
107 	}
108 
109 	err = tc_setup_cb_add(block, tp, TC_SETUP_CLSMATCHALL, &cls_mall,
110 			      skip_sw, &head->flags, &head->in_hw_count, true);
111 	tc_cleanup_offload_action(&cls_mall.rule->action);
112 	kfree(cls_mall.rule);
113 
114 	if (err) {
115 		mall_destroy_hw_filter(tp, head, cookie, NULL);
116 		return err;
117 	}
118 
119 	if (skip_sw && !(head->flags & TCA_CLS_FLAGS_IN_HW))
120 		return -EINVAL;
121 
122 	return 0;
123 }
124 
125 static void mall_destroy(struct tcf_proto *tp, bool rtnl_held,
126 			 struct netlink_ext_ack *extack)
127 {
128 	struct cls_mall_head *head = rtnl_dereference(tp->root);
129 
130 	if (!head)
131 		return;
132 
133 	tcf_unbind_filter(tp, &head->res);
134 
135 	if (!tc_skip_hw(head->flags))
136 		mall_destroy_hw_filter(tp, head, (unsigned long) head, extack);
137 
138 	if (tcf_exts_get_net(&head->exts))
139 		tcf_queue_work(&head->rwork, mall_destroy_work);
140 	else
141 		__mall_destroy(head);
142 }
143 
144 static void *mall_get(struct tcf_proto *tp, u32 handle)
145 {
146 	struct cls_mall_head *head = rtnl_dereference(tp->root);
147 
148 	if (head && head->handle == handle)
149 		return head;
150 
151 	return NULL;
152 }
153 
154 static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = {
155 	[TCA_MATCHALL_UNSPEC]		= { .type = NLA_UNSPEC },
156 	[TCA_MATCHALL_CLASSID]		= { .type = NLA_U32 },
157 	[TCA_MATCHALL_FLAGS]		= { .type = NLA_U32 },
158 };
159 
160 static int mall_set_parms(struct net *net, struct tcf_proto *tp,
161 			  struct cls_mall_head *head,
162 			  unsigned long base, struct nlattr **tb,
163 			  struct nlattr *est, u32 flags, u32 fl_flags,
164 			  struct netlink_ext_ack *extack)
165 {
166 	int err;
167 
168 	err = tcf_exts_validate_ex(net, tp, tb, est, &head->exts, flags,
169 				   fl_flags, extack);
170 	if (err < 0)
171 		return err;
172 
173 	if (tb[TCA_MATCHALL_CLASSID]) {
174 		head->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]);
175 		tcf_bind_filter(tp, &head->res, base);
176 	}
177 	return 0;
178 }
179 
180 static int mall_change(struct net *net, struct sk_buff *in_skb,
181 		       struct tcf_proto *tp, unsigned long base,
182 		       u32 handle, struct nlattr **tca,
183 		       void **arg, u32 flags,
184 		       struct netlink_ext_ack *extack)
185 {
186 	struct cls_mall_head *head = rtnl_dereference(tp->root);
187 	struct nlattr *tb[TCA_MATCHALL_MAX + 1];
188 	struct cls_mall_head *new;
189 	u32 userflags = 0;
190 	int err;
191 
192 	if (!tca[TCA_OPTIONS])
193 		return -EINVAL;
194 
195 	if (head)
196 		return -EEXIST;
197 
198 	err = nla_parse_nested_deprecated(tb, TCA_MATCHALL_MAX,
199 					  tca[TCA_OPTIONS], mall_policy, NULL);
200 	if (err < 0)
201 		return err;
202 
203 	if (tb[TCA_MATCHALL_FLAGS]) {
204 		userflags = nla_get_u32(tb[TCA_MATCHALL_FLAGS]);
205 		if (!tc_flags_valid(userflags))
206 			return -EINVAL;
207 	}
208 
209 	new = kzalloc(sizeof(*new), GFP_KERNEL);
210 	if (!new)
211 		return -ENOBUFS;
212 
213 	err = tcf_exts_init(&new->exts, net, TCA_MATCHALL_ACT, 0);
214 	if (err)
215 		goto err_exts_init;
216 
217 	if (!handle)
218 		handle = 1;
219 	new->handle = handle;
220 	new->flags = userflags;
221 	new->pf = alloc_percpu(struct tc_matchall_pcnt);
222 	if (!new->pf) {
223 		err = -ENOMEM;
224 		goto err_alloc_percpu;
225 	}
226 
227 	err = mall_set_parms(net, tp, new, base, tb, tca[TCA_RATE],
228 			     flags, new->flags, extack);
229 	if (err)
230 		goto err_set_parms;
231 
232 	if (!tc_skip_hw(new->flags)) {
233 		err = mall_replace_hw_filter(tp, new, (unsigned long)new,
234 					     extack);
235 		if (err)
236 			goto err_replace_hw_filter;
237 	}
238 
239 	if (!tc_in_hw(new->flags))
240 		new->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
241 
242 	*arg = head;
243 	rcu_assign_pointer(tp->root, new);
244 	return 0;
245 
246 err_replace_hw_filter:
247 err_set_parms:
248 	free_percpu(new->pf);
249 err_alloc_percpu:
250 	tcf_exts_destroy(&new->exts);
251 err_exts_init:
252 	kfree(new);
253 	return err;
254 }
255 
256 static int mall_delete(struct tcf_proto *tp, void *arg, bool *last,
257 		       bool rtnl_held, struct netlink_ext_ack *extack)
258 {
259 	struct cls_mall_head *head = rtnl_dereference(tp->root);
260 
261 	head->deleting = true;
262 	*last = true;
263 	return 0;
264 }
265 
266 static void mall_walk(struct tcf_proto *tp, struct tcf_walker *arg,
267 		      bool rtnl_held)
268 {
269 	struct cls_mall_head *head = rtnl_dereference(tp->root);
270 
271 	if (arg->count < arg->skip)
272 		goto skip;
273 
274 	if (!head || head->deleting)
275 		return;
276 	if (arg->fn(tp, head, arg) < 0)
277 		arg->stop = 1;
278 skip:
279 	arg->count++;
280 }
281 
282 static int mall_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
283 			  void *cb_priv, struct netlink_ext_ack *extack)
284 {
285 	struct cls_mall_head *head = rtnl_dereference(tp->root);
286 	struct tc_cls_matchall_offload cls_mall = {};
287 	struct tcf_block *block = tp->chain->block;
288 	int err;
289 
290 	if (tc_skip_hw(head->flags))
291 		return 0;
292 
293 	cls_mall.rule =	flow_rule_alloc(tcf_exts_num_actions(&head->exts));
294 	if (!cls_mall.rule)
295 		return -ENOMEM;
296 
297 	tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack);
298 	cls_mall.command = add ?
299 		TC_CLSMATCHALL_REPLACE : TC_CLSMATCHALL_DESTROY;
300 	cls_mall.cookie = (unsigned long)head;
301 
302 	err = tc_setup_offload_action(&cls_mall.rule->action, &head->exts,
303 				      cls_mall.common.extack);
304 	if (err) {
305 		kfree(cls_mall.rule);
306 
307 		return add && tc_skip_sw(head->flags) ? err : 0;
308 	}
309 
310 	err = tc_setup_cb_reoffload(block, tp, add, cb, TC_SETUP_CLSMATCHALL,
311 				    &cls_mall, cb_priv, &head->flags,
312 				    &head->in_hw_count);
313 	tc_cleanup_offload_action(&cls_mall.rule->action);
314 	kfree(cls_mall.rule);
315 
316 	return err;
317 }
318 
319 static void mall_stats_hw_filter(struct tcf_proto *tp,
320 				 struct cls_mall_head *head,
321 				 unsigned long cookie)
322 {
323 	struct tc_cls_matchall_offload cls_mall = {};
324 	struct tcf_block *block = tp->chain->block;
325 
326 	tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, NULL);
327 	cls_mall.command = TC_CLSMATCHALL_STATS;
328 	cls_mall.cookie = cookie;
329 
330 	tc_setup_cb_call(block, TC_SETUP_CLSMATCHALL, &cls_mall, false, true);
331 
332 	tcf_exts_hw_stats_update(&head->exts, cls_mall.stats.bytes,
333 				 cls_mall.stats.pkts, cls_mall.stats.drops,
334 				 cls_mall.stats.lastused,
335 				 cls_mall.stats.used_hw_stats,
336 				 cls_mall.stats.used_hw_stats_valid);
337 }
338 
339 static int mall_dump(struct net *net, struct tcf_proto *tp, void *fh,
340 		     struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
341 {
342 	struct tc_matchall_pcnt gpf = {};
343 	struct cls_mall_head *head = fh;
344 	struct nlattr *nest;
345 	int cpu;
346 
347 	if (!head)
348 		return skb->len;
349 
350 	if (!tc_skip_hw(head->flags))
351 		mall_stats_hw_filter(tp, head, (unsigned long)head);
352 
353 	t->tcm_handle = head->handle;
354 
355 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
356 	if (!nest)
357 		goto nla_put_failure;
358 
359 	if (head->res.classid &&
360 	    nla_put_u32(skb, TCA_MATCHALL_CLASSID, head->res.classid))
361 		goto nla_put_failure;
362 
363 	if (head->flags && nla_put_u32(skb, TCA_MATCHALL_FLAGS, head->flags))
364 		goto nla_put_failure;
365 
366 	for_each_possible_cpu(cpu) {
367 		struct tc_matchall_pcnt *pf = per_cpu_ptr(head->pf, cpu);
368 
369 		gpf.rhit += pf->rhit;
370 	}
371 
372 	if (nla_put_64bit(skb, TCA_MATCHALL_PCNT,
373 			  sizeof(struct tc_matchall_pcnt),
374 			  &gpf, TCA_MATCHALL_PAD))
375 		goto nla_put_failure;
376 
377 	if (tcf_exts_dump(skb, &head->exts))
378 		goto nla_put_failure;
379 
380 	nla_nest_end(skb, nest);
381 
382 	if (tcf_exts_dump_stats(skb, &head->exts) < 0)
383 		goto nla_put_failure;
384 
385 	return skb->len;
386 
387 nla_put_failure:
388 	nla_nest_cancel(skb, nest);
389 	return -1;
390 }
391 
392 static void mall_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
393 			    unsigned long base)
394 {
395 	struct cls_mall_head *head = fh;
396 
397 	tc_cls_bind_class(classid, cl, q, &head->res, base);
398 }
399 
400 static struct tcf_proto_ops cls_mall_ops __read_mostly = {
401 	.kind		= "matchall",
402 	.classify	= mall_classify,
403 	.init		= mall_init,
404 	.destroy	= mall_destroy,
405 	.get		= mall_get,
406 	.change		= mall_change,
407 	.delete		= mall_delete,
408 	.walk		= mall_walk,
409 	.reoffload	= mall_reoffload,
410 	.dump		= mall_dump,
411 	.bind_class	= mall_bind_class,
412 	.owner		= THIS_MODULE,
413 };
414 
415 static int __init cls_mall_init(void)
416 {
417 	return register_tcf_proto_ops(&cls_mall_ops);
418 }
419 
420 static void __exit cls_mall_exit(void)
421 {
422 	unregister_tcf_proto_ops(&cls_mall_ops);
423 }
424 
425 module_init(cls_mall_init);
426 module_exit(cls_mall_exit);
427 
428 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
429 MODULE_DESCRIPTION("Match-all classifier");
430 MODULE_LICENSE("GPL v2");
431