xref: /openbmc/linux/net/sched/act_ctinfo.c (revision f5c27da4)
1 // SPDX-License-Identifier: GPL-2.0+
2 /* net/sched/act_ctinfo.c  netfilter ctinfo connmark actions
3  *
4  * Copyright (c) 2019 Kevin Darbyshire-Bryant <ldir@darbyshire-bryant.me.uk>
5  */
6 
7 #include <linux/module.h>
8 #include <linux/init.h>
9 #include <linux/kernel.h>
10 #include <linux/skbuff.h>
11 #include <linux/rtnetlink.h>
12 #include <linux/pkt_cls.h>
13 #include <linux/ip.h>
14 #include <linux/ipv6.h>
15 #include <net/netlink.h>
16 #include <net/pkt_sched.h>
17 #include <net/act_api.h>
18 #include <net/pkt_cls.h>
19 #include <uapi/linux/tc_act/tc_ctinfo.h>
20 #include <net/tc_act/tc_ctinfo.h>
21 
22 #include <net/netfilter/nf_conntrack.h>
23 #include <net/netfilter/nf_conntrack_core.h>
24 #include <net/netfilter/nf_conntrack_ecache.h>
25 #include <net/netfilter/nf_conntrack_zones.h>
26 
27 static struct tc_action_ops act_ctinfo_ops;
28 
29 static void tcf_ctinfo_dscp_set(struct nf_conn *ct, struct tcf_ctinfo *ca,
30 				struct tcf_ctinfo_params *cp,
31 				struct sk_buff *skb, int wlen, int proto)
32 {
33 	u8 dscp, newdscp;
34 
35 	newdscp = (((ct->mark & cp->dscpmask) >> cp->dscpmaskshift) << 2) &
36 		     ~INET_ECN_MASK;
37 
38 	switch (proto) {
39 	case NFPROTO_IPV4:
40 		dscp = ipv4_get_dsfield(ip_hdr(skb)) & ~INET_ECN_MASK;
41 		if (dscp != newdscp) {
42 			if (likely(!skb_try_make_writable(skb, wlen))) {
43 				ipv4_change_dsfield(ip_hdr(skb),
44 						    INET_ECN_MASK,
45 						    newdscp);
46 				ca->stats_dscp_set++;
47 			} else {
48 				ca->stats_dscp_error++;
49 			}
50 		}
51 		break;
52 	case NFPROTO_IPV6:
53 		dscp = ipv6_get_dsfield(ipv6_hdr(skb)) & ~INET_ECN_MASK;
54 		if (dscp != newdscp) {
55 			if (likely(!skb_try_make_writable(skb, wlen))) {
56 				ipv6_change_dsfield(ipv6_hdr(skb),
57 						    INET_ECN_MASK,
58 						    newdscp);
59 				ca->stats_dscp_set++;
60 			} else {
61 				ca->stats_dscp_error++;
62 			}
63 		}
64 		break;
65 	default:
66 		break;
67 	}
68 }
69 
70 static void tcf_ctinfo_cpmark_set(struct nf_conn *ct, struct tcf_ctinfo *ca,
71 				  struct tcf_ctinfo_params *cp,
72 				  struct sk_buff *skb)
73 {
74 	ca->stats_cpmark_set++;
75 	skb->mark = ct->mark & cp->cpmarkmask;
76 }
77 
78 static int tcf_ctinfo_act(struct sk_buff *skb, const struct tc_action *a,
79 			  struct tcf_result *res)
80 {
81 	const struct nf_conntrack_tuple_hash *thash = NULL;
82 	struct tcf_ctinfo *ca = to_ctinfo(a);
83 	struct nf_conntrack_tuple tuple;
84 	struct nf_conntrack_zone zone;
85 	enum ip_conntrack_info ctinfo;
86 	struct tcf_ctinfo_params *cp;
87 	struct nf_conn *ct;
88 	int proto, wlen;
89 	int action;
90 
91 	cp = rcu_dereference_bh(ca->params);
92 
93 	tcf_lastuse_update(&ca->tcf_tm);
94 	bstats_update(&ca->tcf_bstats, skb);
95 	action = READ_ONCE(ca->tcf_action);
96 
97 	wlen = skb_network_offset(skb);
98 	switch (skb_protocol(skb, true)) {
99 	case htons(ETH_P_IP):
100 		wlen += sizeof(struct iphdr);
101 		if (!pskb_may_pull(skb, wlen))
102 			goto out;
103 
104 		proto = NFPROTO_IPV4;
105 		break;
106 	case htons(ETH_P_IPV6):
107 		wlen += sizeof(struct ipv6hdr);
108 		if (!pskb_may_pull(skb, wlen))
109 			goto out;
110 
111 		proto = NFPROTO_IPV6;
112 		break;
113 	default:
114 		goto out;
115 	}
116 
117 	ct = nf_ct_get(skb, &ctinfo);
118 	if (!ct) { /* look harder, usually ingress */
119 		if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb),
120 				       proto, cp->net, &tuple))
121 			goto out;
122 		zone.id = cp->zone;
123 		zone.dir = NF_CT_DEFAULT_ZONE_DIR;
124 
125 		thash = nf_conntrack_find_get(cp->net, &zone, &tuple);
126 		if (!thash)
127 			goto out;
128 
129 		ct = nf_ct_tuplehash_to_ctrack(thash);
130 	}
131 
132 	if (cp->mode & CTINFO_MODE_DSCP)
133 		if (!cp->dscpstatemask || (ct->mark & cp->dscpstatemask))
134 			tcf_ctinfo_dscp_set(ct, ca, cp, skb, wlen, proto);
135 
136 	if (cp->mode & CTINFO_MODE_CPMARK)
137 		tcf_ctinfo_cpmark_set(ct, ca, cp, skb);
138 
139 	if (thash)
140 		nf_ct_put(ct);
141 out:
142 	return action;
143 }
144 
145 static const struct nla_policy ctinfo_policy[TCA_CTINFO_MAX + 1] = {
146 	[TCA_CTINFO_ACT]		  =
147 		NLA_POLICY_EXACT_LEN(sizeof(struct tc_ctinfo)),
148 	[TCA_CTINFO_ZONE]		  = { .type = NLA_U16 },
149 	[TCA_CTINFO_PARMS_DSCP_MASK]	  = { .type = NLA_U32 },
150 	[TCA_CTINFO_PARMS_DSCP_STATEMASK] = { .type = NLA_U32 },
151 	[TCA_CTINFO_PARMS_CPMARK_MASK]	  = { .type = NLA_U32 },
152 };
153 
154 static int tcf_ctinfo_init(struct net *net, struct nlattr *nla,
155 			   struct nlattr *est, struct tc_action **a,
156 			   struct tcf_proto *tp, u32 flags,
157 			   struct netlink_ext_ack *extack)
158 {
159 	struct tc_action_net *tn = net_generic(net, act_ctinfo_ops.net_id);
160 	bool bind = flags & TCA_ACT_FLAGS_BIND;
161 	u32 dscpmask = 0, dscpstatemask, index;
162 	struct nlattr *tb[TCA_CTINFO_MAX + 1];
163 	struct tcf_ctinfo_params *cp_new;
164 	struct tcf_chain *goto_ch = NULL;
165 	struct tc_ctinfo *actparm;
166 	struct tcf_ctinfo *ci;
167 	u8 dscpmaskshift;
168 	int ret = 0, err;
169 
170 	if (!nla) {
171 		NL_SET_ERR_MSG_MOD(extack, "ctinfo requires attributes to be passed");
172 		return -EINVAL;
173 	}
174 
175 	err = nla_parse_nested(tb, TCA_CTINFO_MAX, nla, ctinfo_policy, extack);
176 	if (err < 0)
177 		return err;
178 
179 	if (!tb[TCA_CTINFO_ACT]) {
180 		NL_SET_ERR_MSG_MOD(extack,
181 				   "Missing required TCA_CTINFO_ACT attribute");
182 		return -EINVAL;
183 	}
184 	actparm = nla_data(tb[TCA_CTINFO_ACT]);
185 
186 	/* do some basic validation here before dynamically allocating things */
187 	/* that we would otherwise have to clean up.			      */
188 	if (tb[TCA_CTINFO_PARMS_DSCP_MASK]) {
189 		dscpmask = nla_get_u32(tb[TCA_CTINFO_PARMS_DSCP_MASK]);
190 		/* need contiguous 6 bit mask */
191 		dscpmaskshift = dscpmask ? __ffs(dscpmask) : 0;
192 		if ((~0 & (dscpmask >> dscpmaskshift)) != 0x3f) {
193 			NL_SET_ERR_MSG_ATTR(extack,
194 					    tb[TCA_CTINFO_PARMS_DSCP_MASK],
195 					    "dscp mask must be 6 contiguous bits");
196 			return -EINVAL;
197 		}
198 		dscpstatemask = tb[TCA_CTINFO_PARMS_DSCP_STATEMASK] ?
199 			nla_get_u32(tb[TCA_CTINFO_PARMS_DSCP_STATEMASK]) : 0;
200 		/* mask & statemask must not overlap */
201 		if (dscpmask & dscpstatemask) {
202 			NL_SET_ERR_MSG_ATTR(extack,
203 					    tb[TCA_CTINFO_PARMS_DSCP_STATEMASK],
204 					    "dscp statemask must not overlap dscp mask");
205 			return -EINVAL;
206 		}
207 	}
208 
209 	/* done the validation:now to the actual action allocation */
210 	index = actparm->index;
211 	err = tcf_idr_check_alloc(tn, &index, a, bind);
212 	if (!err) {
213 		ret = tcf_idr_create(tn, index, est, a,
214 				     &act_ctinfo_ops, bind, false, flags);
215 		if (ret) {
216 			tcf_idr_cleanup(tn, index);
217 			return ret;
218 		}
219 		ret = ACT_P_CREATED;
220 	} else if (err > 0) {
221 		if (bind) /* don't override defaults */
222 			return 0;
223 		if (!(flags & TCA_ACT_FLAGS_REPLACE)) {
224 			tcf_idr_release(*a, bind);
225 			return -EEXIST;
226 		}
227 	} else {
228 		return err;
229 	}
230 
231 	err = tcf_action_check_ctrlact(actparm->action, tp, &goto_ch, extack);
232 	if (err < 0)
233 		goto release_idr;
234 
235 	ci = to_ctinfo(*a);
236 
237 	cp_new = kzalloc(sizeof(*cp_new), GFP_KERNEL);
238 	if (unlikely(!cp_new)) {
239 		err = -ENOMEM;
240 		goto put_chain;
241 	}
242 
243 	cp_new->net = net;
244 	cp_new->zone = tb[TCA_CTINFO_ZONE] ?
245 			nla_get_u16(tb[TCA_CTINFO_ZONE]) : 0;
246 	if (dscpmask) {
247 		cp_new->dscpmask = dscpmask;
248 		cp_new->dscpmaskshift = dscpmaskshift;
249 		cp_new->dscpstatemask = dscpstatemask;
250 		cp_new->mode |= CTINFO_MODE_DSCP;
251 	}
252 
253 	if (tb[TCA_CTINFO_PARMS_CPMARK_MASK]) {
254 		cp_new->cpmarkmask =
255 				nla_get_u32(tb[TCA_CTINFO_PARMS_CPMARK_MASK]);
256 		cp_new->mode |= CTINFO_MODE_CPMARK;
257 	}
258 
259 	spin_lock_bh(&ci->tcf_lock);
260 	goto_ch = tcf_action_set_ctrlact(*a, actparm->action, goto_ch);
261 	cp_new = rcu_replace_pointer(ci->params, cp_new,
262 				     lockdep_is_held(&ci->tcf_lock));
263 	spin_unlock_bh(&ci->tcf_lock);
264 
265 	if (goto_ch)
266 		tcf_chain_put_by_act(goto_ch);
267 	if (cp_new)
268 		kfree_rcu(cp_new, rcu);
269 
270 	return ret;
271 
272 put_chain:
273 	if (goto_ch)
274 		tcf_chain_put_by_act(goto_ch);
275 release_idr:
276 	tcf_idr_release(*a, bind);
277 	return err;
278 }
279 
280 static int tcf_ctinfo_dump(struct sk_buff *skb, struct tc_action *a,
281 			   int bind, int ref)
282 {
283 	struct tcf_ctinfo *ci = to_ctinfo(a);
284 	struct tc_ctinfo opt = {
285 		.index   = ci->tcf_index,
286 		.refcnt  = refcount_read(&ci->tcf_refcnt) - ref,
287 		.bindcnt = atomic_read(&ci->tcf_bindcnt) - bind,
288 	};
289 	unsigned char *b = skb_tail_pointer(skb);
290 	struct tcf_ctinfo_params *cp;
291 	struct tcf_t t;
292 
293 	spin_lock_bh(&ci->tcf_lock);
294 	cp = rcu_dereference_protected(ci->params,
295 				       lockdep_is_held(&ci->tcf_lock));
296 
297 	tcf_tm_dump(&t, &ci->tcf_tm);
298 	if (nla_put_64bit(skb, TCA_CTINFO_TM, sizeof(t), &t, TCA_CTINFO_PAD))
299 		goto nla_put_failure;
300 
301 	opt.action = ci->tcf_action;
302 	if (nla_put(skb, TCA_CTINFO_ACT, sizeof(opt), &opt))
303 		goto nla_put_failure;
304 
305 	if (nla_put_u16(skb, TCA_CTINFO_ZONE, cp->zone))
306 		goto nla_put_failure;
307 
308 	if (cp->mode & CTINFO_MODE_DSCP) {
309 		if (nla_put_u32(skb, TCA_CTINFO_PARMS_DSCP_MASK,
310 				cp->dscpmask))
311 			goto nla_put_failure;
312 		if (nla_put_u32(skb, TCA_CTINFO_PARMS_DSCP_STATEMASK,
313 				cp->dscpstatemask))
314 			goto nla_put_failure;
315 	}
316 
317 	if (cp->mode & CTINFO_MODE_CPMARK) {
318 		if (nla_put_u32(skb, TCA_CTINFO_PARMS_CPMARK_MASK,
319 				cp->cpmarkmask))
320 			goto nla_put_failure;
321 	}
322 
323 	if (nla_put_u64_64bit(skb, TCA_CTINFO_STATS_DSCP_SET,
324 			      ci->stats_dscp_set, TCA_CTINFO_PAD))
325 		goto nla_put_failure;
326 
327 	if (nla_put_u64_64bit(skb, TCA_CTINFO_STATS_DSCP_ERROR,
328 			      ci->stats_dscp_error, TCA_CTINFO_PAD))
329 		goto nla_put_failure;
330 
331 	if (nla_put_u64_64bit(skb, TCA_CTINFO_STATS_CPMARK_SET,
332 			      ci->stats_cpmark_set, TCA_CTINFO_PAD))
333 		goto nla_put_failure;
334 
335 	spin_unlock_bh(&ci->tcf_lock);
336 	return skb->len;
337 
338 nla_put_failure:
339 	spin_unlock_bh(&ci->tcf_lock);
340 	nlmsg_trim(skb, b);
341 	return -1;
342 }
343 
344 static void tcf_ctinfo_cleanup(struct tc_action *a)
345 {
346 	struct tcf_ctinfo *ci = to_ctinfo(a);
347 	struct tcf_ctinfo_params *cp;
348 
349 	cp = rcu_dereference_protected(ci->params, 1);
350 	if (cp)
351 		kfree_rcu(cp, rcu);
352 }
353 
354 static struct tc_action_ops act_ctinfo_ops = {
355 	.kind	= "ctinfo",
356 	.id	= TCA_ID_CTINFO,
357 	.owner	= THIS_MODULE,
358 	.act	= tcf_ctinfo_act,
359 	.dump	= tcf_ctinfo_dump,
360 	.init	= tcf_ctinfo_init,
361 	.cleanup= tcf_ctinfo_cleanup,
362 	.size	= sizeof(struct tcf_ctinfo),
363 };
364 
365 static __net_init int ctinfo_init_net(struct net *net)
366 {
367 	struct tc_action_net *tn = net_generic(net, act_ctinfo_ops.net_id);
368 
369 	return tc_action_net_init(net, tn, &act_ctinfo_ops);
370 }
371 
372 static void __net_exit ctinfo_exit_net(struct list_head *net_list)
373 {
374 	tc_action_net_exit(net_list, act_ctinfo_ops.net_id);
375 }
376 
377 static struct pernet_operations ctinfo_net_ops = {
378 	.init		= ctinfo_init_net,
379 	.exit_batch	= ctinfo_exit_net,
380 	.id		= &act_ctinfo_ops.net_id,
381 	.size		= sizeof(struct tc_action_net),
382 };
383 
384 static int __init ctinfo_init_module(void)
385 {
386 	return tcf_register_action(&act_ctinfo_ops, &ctinfo_net_ops);
387 }
388 
389 static void __exit ctinfo_cleanup_module(void)
390 {
391 	tcf_unregister_action(&act_ctinfo_ops, &ctinfo_net_ops);
392 }
393 
394 module_init(ctinfo_init_module);
395 module_exit(ctinfo_cleanup_module);
396 MODULE_AUTHOR("Kevin Darbyshire-Bryant <ldir@darbyshire-bryant.me.uk>");
397 MODULE_DESCRIPTION("Connection tracking mark actions");
398 MODULE_LICENSE("GPL");
399