xref: /openbmc/linux/net/sched/cls_matchall.c (revision e5c86679)
1 /*
2  * net/sched/cls_matchll.c		Match-all classifier
3  *
4  * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  */
11 
12 #include <linux/kernel.h>
13 #include <linux/init.h>
14 #include <linux/module.h>
15 
16 #include <net/sch_generic.h>
17 #include <net/pkt_cls.h>
18 
19 struct cls_mall_head {
20 	struct tcf_exts exts;
21 	struct tcf_result res;
22 	u32 handle;
23 	u32 flags;
24 	struct rcu_head	rcu;
25 };
26 
27 static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp,
28 			 struct tcf_result *res)
29 {
30 	struct cls_mall_head *head = rcu_dereference_bh(tp->root);
31 
32 	if (tc_skip_sw(head->flags))
33 		return -1;
34 
35 	return tcf_exts_exec(skb, &head->exts, res);
36 }
37 
38 static int mall_init(struct tcf_proto *tp)
39 {
40 	return 0;
41 }
42 
43 static void mall_destroy_rcu(struct rcu_head *rcu)
44 {
45 	struct cls_mall_head *head = container_of(rcu, struct cls_mall_head,
46 						  rcu);
47 
48 	tcf_exts_destroy(&head->exts);
49 	kfree(head);
50 }
51 
52 static int mall_replace_hw_filter(struct tcf_proto *tp,
53 				  struct cls_mall_head *head,
54 				  unsigned long cookie)
55 {
56 	struct net_device *dev = tp->q->dev_queue->dev;
57 	struct tc_to_netdev offload;
58 	struct tc_cls_matchall_offload mall_offload = {0};
59 	int err;
60 
61 	offload.type = TC_SETUP_MATCHALL;
62 	offload.cls_mall = &mall_offload;
63 	offload.cls_mall->command = TC_CLSMATCHALL_REPLACE;
64 	offload.cls_mall->exts = &head->exts;
65 	offload.cls_mall->cookie = cookie;
66 
67 	err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol,
68 					    &offload);
69 	if (!err)
70 		head->flags |= TCA_CLS_FLAGS_IN_HW;
71 
72 	return err;
73 }
74 
75 static void mall_destroy_hw_filter(struct tcf_proto *tp,
76 				   struct cls_mall_head *head,
77 				   unsigned long cookie)
78 {
79 	struct net_device *dev = tp->q->dev_queue->dev;
80 	struct tc_to_netdev offload;
81 	struct tc_cls_matchall_offload mall_offload = {0};
82 
83 	offload.type = TC_SETUP_MATCHALL;
84 	offload.cls_mall = &mall_offload;
85 	offload.cls_mall->command = TC_CLSMATCHALL_DESTROY;
86 	offload.cls_mall->exts = NULL;
87 	offload.cls_mall->cookie = cookie;
88 
89 	dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol,
90 					     &offload);
91 }
92 
93 static bool mall_destroy(struct tcf_proto *tp, bool force)
94 {
95 	struct cls_mall_head *head = rtnl_dereference(tp->root);
96 	struct net_device *dev = tp->q->dev_queue->dev;
97 
98 	if (!head)
99 		return true;
100 
101 	if (tc_should_offload(dev, tp, head->flags))
102 		mall_destroy_hw_filter(tp, head, (unsigned long) head);
103 
104 	call_rcu(&head->rcu, mall_destroy_rcu);
105 	return true;
106 }
107 
108 static unsigned long mall_get(struct tcf_proto *tp, u32 handle)
109 {
110 	return 0UL;
111 }
112 
113 static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = {
114 	[TCA_MATCHALL_UNSPEC]		= { .type = NLA_UNSPEC },
115 	[TCA_MATCHALL_CLASSID]		= { .type = NLA_U32 },
116 };
117 
118 static int mall_set_parms(struct net *net, struct tcf_proto *tp,
119 			  struct cls_mall_head *head,
120 			  unsigned long base, struct nlattr **tb,
121 			  struct nlattr *est, bool ovr)
122 {
123 	struct tcf_exts e;
124 	int err;
125 
126 	err = tcf_exts_init(&e, TCA_MATCHALL_ACT, 0);
127 	if (err)
128 		return err;
129 	err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
130 	if (err < 0)
131 		goto errout;
132 
133 	if (tb[TCA_MATCHALL_CLASSID]) {
134 		head->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]);
135 		tcf_bind_filter(tp, &head->res, base);
136 	}
137 
138 	tcf_exts_change(tp, &head->exts, &e);
139 
140 	return 0;
141 errout:
142 	tcf_exts_destroy(&e);
143 	return err;
144 }
145 
146 static int mall_change(struct net *net, struct sk_buff *in_skb,
147 		       struct tcf_proto *tp, unsigned long base,
148 		       u32 handle, struct nlattr **tca,
149 		       unsigned long *arg, bool ovr)
150 {
151 	struct cls_mall_head *head = rtnl_dereference(tp->root);
152 	struct net_device *dev = tp->q->dev_queue->dev;
153 	struct nlattr *tb[TCA_MATCHALL_MAX + 1];
154 	struct cls_mall_head *new;
155 	u32 flags = 0;
156 	int err;
157 
158 	if (!tca[TCA_OPTIONS])
159 		return -EINVAL;
160 
161 	if (head)
162 		return -EEXIST;
163 
164 	err = nla_parse_nested(tb, TCA_MATCHALL_MAX,
165 			       tca[TCA_OPTIONS], mall_policy);
166 	if (err < 0)
167 		return err;
168 
169 	if (tb[TCA_MATCHALL_FLAGS]) {
170 		flags = nla_get_u32(tb[TCA_MATCHALL_FLAGS]);
171 		if (!tc_flags_valid(flags))
172 			return -EINVAL;
173 	}
174 
175 	new = kzalloc(sizeof(*new), GFP_KERNEL);
176 	if (!new)
177 		return -ENOBUFS;
178 
179 	err = tcf_exts_init(&new->exts, TCA_MATCHALL_ACT, 0);
180 	if (err)
181 		goto err_exts_init;
182 
183 	if (!handle)
184 		handle = 1;
185 	new->handle = handle;
186 	new->flags = flags;
187 
188 	err = mall_set_parms(net, tp, new, base, tb, tca[TCA_RATE], ovr);
189 	if (err)
190 		goto err_set_parms;
191 
192 	if (tc_should_offload(dev, tp, flags)) {
193 		err = mall_replace_hw_filter(tp, new, (unsigned long) new);
194 		if (err) {
195 			if (tc_skip_sw(flags))
196 				goto err_replace_hw_filter;
197 			else
198 				err = 0;
199 		}
200 	}
201 
202 	if (!tc_in_hw(new->flags))
203 		new->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
204 
205 	*arg = (unsigned long) head;
206 	rcu_assign_pointer(tp->root, new);
207 	if (head)
208 		call_rcu(&head->rcu, mall_destroy_rcu);
209 	return 0;
210 
211 err_replace_hw_filter:
212 err_set_parms:
213 	tcf_exts_destroy(&new->exts);
214 err_exts_init:
215 	kfree(new);
216 	return err;
217 }
218 
219 static int mall_delete(struct tcf_proto *tp, unsigned long arg)
220 {
221 	return -EOPNOTSUPP;
222 }
223 
224 static void mall_walk(struct tcf_proto *tp, struct tcf_walker *arg)
225 {
226 	struct cls_mall_head *head = rtnl_dereference(tp->root);
227 
228 	if (arg->count < arg->skip)
229 		goto skip;
230 	if (arg->fn(tp, (unsigned long) head, arg) < 0)
231 		arg->stop = 1;
232 skip:
233 	arg->count++;
234 }
235 
236 static int mall_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
237 		     struct sk_buff *skb, struct tcmsg *t)
238 {
239 	struct cls_mall_head *head = (struct cls_mall_head *) fh;
240 	struct nlattr *nest;
241 
242 	if (!head)
243 		return skb->len;
244 
245 	t->tcm_handle = head->handle;
246 
247 	nest = nla_nest_start(skb, TCA_OPTIONS);
248 	if (!nest)
249 		goto nla_put_failure;
250 
251 	if (head->res.classid &&
252 	    nla_put_u32(skb, TCA_MATCHALL_CLASSID, head->res.classid))
253 		goto nla_put_failure;
254 
255 	if (head->flags && nla_put_u32(skb, TCA_MATCHALL_FLAGS, head->flags))
256 		goto nla_put_failure;
257 
258 	if (tcf_exts_dump(skb, &head->exts))
259 		goto nla_put_failure;
260 
261 	nla_nest_end(skb, nest);
262 
263 	if (tcf_exts_dump_stats(skb, &head->exts) < 0)
264 		goto nla_put_failure;
265 
266 	return skb->len;
267 
268 nla_put_failure:
269 	nla_nest_cancel(skb, nest);
270 	return -1;
271 }
272 
273 static struct tcf_proto_ops cls_mall_ops __read_mostly = {
274 	.kind		= "matchall",
275 	.classify	= mall_classify,
276 	.init		= mall_init,
277 	.destroy	= mall_destroy,
278 	.get		= mall_get,
279 	.change		= mall_change,
280 	.delete		= mall_delete,
281 	.walk		= mall_walk,
282 	.dump		= mall_dump,
283 	.owner		= THIS_MODULE,
284 };
285 
286 static int __init cls_mall_init(void)
287 {
288 	return register_tcf_proto_ops(&cls_mall_ops);
289 }
290 
291 static void __exit cls_mall_exit(void)
292 {
293 	unregister_tcf_proto_ops(&cls_mall_ops);
294 }
295 
296 module_init(cls_mall_init);
297 module_exit(cls_mall_exit);
298 
299 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
300 MODULE_DESCRIPTION("Match-all classifier");
301 MODULE_LICENSE("GPL v2");
302