xref: /openbmc/linux/net/sched/act_ipt.c (revision cd2a9e62c8a3c5cae7691982667d79a0edc65283)
1 /*
2  * net/sched/act_ipt.c		iptables target interface
3  *
4  *TODO: Add other tables. For now we only support the ipv4 table targets
5  *
6  *		This program is free software; you can redistribute it and/or
7  *		modify it under the terms of the GNU General Public License
8  *		as published by the Free Software Foundation; either version
9  *		2 of the License, or (at your option) any later version.
10  *
11  * Copyright:	Jamal Hadi Salim (2002-13)
12  */
13 
14 #include <linux/types.h>
15 #include <linux/kernel.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/skbuff.h>
19 #include <linux/rtnetlink.h>
20 #include <linux/module.h>
21 #include <linux/init.h>
22 #include <linux/slab.h>
23 #include <net/netlink.h>
24 #include <net/pkt_sched.h>
25 #include <linux/tc_act/tc_ipt.h>
26 #include <net/tc_act/tc_ipt.h>
27 
28 #include <linux/netfilter_ipv4/ip_tables.h>
29 
30 
31 #define IPT_TAB_MASK     15
32 
33 static int ipt_net_id;
34 
35 static int xt_net_id;
36 
37 static int ipt_init_target(struct xt_entry_target *t, char *table,
38 			   unsigned int hook)
39 {
40 	struct xt_tgchk_param par;
41 	struct xt_target *target;
42 	int ret = 0;
43 
44 	target = xt_request_find_target(AF_INET, t->u.user.name,
45 					t->u.user.revision);
46 	if (IS_ERR(target))
47 		return PTR_ERR(target);
48 
49 	t->u.kernel.target = target;
50 	par.table     = table;
51 	par.entryinfo = NULL;
52 	par.target    = target;
53 	par.targinfo  = t->data;
54 	par.hook_mask = hook;
55 	par.family    = NFPROTO_IPV4;
56 
57 	ret = xt_check_target(&par, t->u.target_size - sizeof(*t), 0, false);
58 	if (ret < 0) {
59 		module_put(t->u.kernel.target->me);
60 		return ret;
61 	}
62 	return 0;
63 }
64 
65 static void ipt_destroy_target(struct xt_entry_target *t)
66 {
67 	struct xt_tgdtor_param par = {
68 		.target   = t->u.kernel.target,
69 		.targinfo = t->data,
70 		.family   = NFPROTO_IPV4,
71 	};
72 	if (par.target->destroy != NULL)
73 		par.target->destroy(&par);
74 	module_put(par.target->me);
75 }
76 
77 static void tcf_ipt_release(struct tc_action *a, int bind)
78 {
79 	struct tcf_ipt *ipt = to_ipt(a);
80 	ipt_destroy_target(ipt->tcfi_t);
81 	kfree(ipt->tcfi_tname);
82 	kfree(ipt->tcfi_t);
83 }
84 
85 static const struct nla_policy ipt_policy[TCA_IPT_MAX + 1] = {
86 	[TCA_IPT_TABLE]	= { .type = NLA_STRING, .len = IFNAMSIZ },
87 	[TCA_IPT_HOOK]	= { .type = NLA_U32 },
88 	[TCA_IPT_INDEX]	= { .type = NLA_U32 },
89 	[TCA_IPT_TARG]	= { .len = sizeof(struct xt_entry_target) },
90 };
91 
92 static int __tcf_ipt_init(struct tc_action_net *tn, struct nlattr *nla,
93 			  struct nlattr *est, struct tc_action *a, int ovr,
94 			  int bind)
95 {
96 	struct nlattr *tb[TCA_IPT_MAX + 1];
97 	struct tcf_ipt *ipt;
98 	struct xt_entry_target *td, *t;
99 	char *tname;
100 	int ret = 0, err, exists = 0;
101 	u32 hook = 0;
102 	u32 index = 0;
103 
104 	if (nla == NULL)
105 		return -EINVAL;
106 
107 	err = nla_parse_nested(tb, TCA_IPT_MAX, nla, ipt_policy);
108 	if (err < 0)
109 		return err;
110 
111 	if (tb[TCA_IPT_INDEX] != NULL)
112 		index = nla_get_u32(tb[TCA_IPT_INDEX]);
113 
114 	exists = tcf_hash_check(tn, index, a, bind);
115 	if (exists && bind)
116 		return 0;
117 
118 	if (tb[TCA_IPT_HOOK] == NULL || tb[TCA_IPT_TARG] == NULL) {
119 		if (exists)
120 			tcf_hash_release(a, bind);
121 		return -EINVAL;
122 	}
123 
124 	td = (struct xt_entry_target *)nla_data(tb[TCA_IPT_TARG]);
125 	if (nla_len(tb[TCA_IPT_TARG]) < td->u.target_size)
126 		return -EINVAL;
127 
128 	if (!tcf_hash_check(tn, index, a, bind)) {
129 		ret = tcf_hash_create(tn, index, est, a, sizeof(*ipt), bind,
130 				      false);
131 		if (ret)
132 			return ret;
133 		ret = ACT_P_CREATED;
134 	} else {
135 		if (bind)/* dont override defaults */
136 			return 0;
137 		tcf_hash_release(a, bind);
138 
139 		if (!ovr)
140 			return -EEXIST;
141 	}
142 	ipt = to_ipt(a);
143 
144 	hook = nla_get_u32(tb[TCA_IPT_HOOK]);
145 
146 	err = -ENOMEM;
147 	tname = kmalloc(IFNAMSIZ, GFP_KERNEL);
148 	if (unlikely(!tname))
149 		goto err1;
150 	if (tb[TCA_IPT_TABLE] == NULL ||
151 	    nla_strlcpy(tname, tb[TCA_IPT_TABLE], IFNAMSIZ) >= IFNAMSIZ)
152 		strcpy(tname, "mangle");
153 
154 	t = kmemdup(td, td->u.target_size, GFP_KERNEL);
155 	if (unlikely(!t))
156 		goto err2;
157 
158 	err = ipt_init_target(t, tname, hook);
159 	if (err < 0)
160 		goto err3;
161 
162 	spin_lock_bh(&ipt->tcf_lock);
163 	if (ret != ACT_P_CREATED) {
164 		ipt_destroy_target(ipt->tcfi_t);
165 		kfree(ipt->tcfi_tname);
166 		kfree(ipt->tcfi_t);
167 	}
168 	ipt->tcfi_tname = tname;
169 	ipt->tcfi_t     = t;
170 	ipt->tcfi_hook  = hook;
171 	spin_unlock_bh(&ipt->tcf_lock);
172 	if (ret == ACT_P_CREATED)
173 		tcf_hash_insert(tn, a);
174 	return ret;
175 
176 err3:
177 	kfree(t);
178 err2:
179 	kfree(tname);
180 err1:
181 	if (ret == ACT_P_CREATED)
182 		tcf_hash_cleanup(a, est);
183 	return err;
184 }
185 
186 static int tcf_ipt_init(struct net *net, struct nlattr *nla,
187 			struct nlattr *est, struct tc_action *a, int ovr,
188 			int bind)
189 {
190 	struct tc_action_net *tn = net_generic(net, ipt_net_id);
191 
192 	return __tcf_ipt_init(tn, nla, est, a, ovr, bind);
193 }
194 
195 static int tcf_xt_init(struct net *net, struct nlattr *nla,
196 		       struct nlattr *est, struct tc_action *a, int ovr,
197 		       int bind)
198 {
199 	struct tc_action_net *tn = net_generic(net, xt_net_id);
200 
201 	return __tcf_ipt_init(tn, nla, est, a, ovr, bind);
202 }
203 
204 static int tcf_ipt(struct sk_buff *skb, const struct tc_action *a,
205 		   struct tcf_result *res)
206 {
207 	int ret = 0, result = 0;
208 	struct tcf_ipt *ipt = a->priv;
209 	struct xt_action_param par;
210 
211 	if (skb_unclone(skb, GFP_ATOMIC))
212 		return TC_ACT_UNSPEC;
213 
214 	spin_lock(&ipt->tcf_lock);
215 
216 	tcf_lastuse_update(&ipt->tcf_tm);
217 	bstats_update(&ipt->tcf_bstats, skb);
218 
219 	/* yes, we have to worry about both in and out dev
220 	 * worry later - danger - this API seems to have changed
221 	 * from earlier kernels
222 	 */
223 	par.net	     = dev_net(skb->dev);
224 	par.in       = skb->dev;
225 	par.out      = NULL;
226 	par.hooknum  = ipt->tcfi_hook;
227 	par.target   = ipt->tcfi_t->u.kernel.target;
228 	par.targinfo = ipt->tcfi_t->data;
229 	par.family   = NFPROTO_IPV4;
230 	ret = par.target->target(skb, &par);
231 
232 	switch (ret) {
233 	case NF_ACCEPT:
234 		result = TC_ACT_OK;
235 		break;
236 	case NF_DROP:
237 		result = TC_ACT_SHOT;
238 		ipt->tcf_qstats.drops++;
239 		break;
240 	case XT_CONTINUE:
241 		result = TC_ACT_PIPE;
242 		break;
243 	default:
244 		net_notice_ratelimited("tc filter: Bogus netfilter code %d assume ACCEPT\n",
245 				       ret);
246 		result = TC_ACT_OK;
247 		break;
248 	}
249 	spin_unlock(&ipt->tcf_lock);
250 	return result;
251 
252 }
253 
254 static int tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind,
255 			int ref)
256 {
257 	unsigned char *b = skb_tail_pointer(skb);
258 	struct tcf_ipt *ipt = a->priv;
259 	struct xt_entry_target *t;
260 	struct tcf_t tm;
261 	struct tc_cnt c;
262 
263 	/* for simple targets kernel size == user size
264 	 * user name = target name
265 	 * for foolproof you need to not assume this
266 	 */
267 
268 	t = kmemdup(ipt->tcfi_t, ipt->tcfi_t->u.user.target_size, GFP_ATOMIC);
269 	if (unlikely(!t))
270 		goto nla_put_failure;
271 
272 	c.bindcnt = ipt->tcf_bindcnt - bind;
273 	c.refcnt = ipt->tcf_refcnt - ref;
274 	strcpy(t->u.user.name, ipt->tcfi_t->u.kernel.target->name);
275 
276 	if (nla_put(skb, TCA_IPT_TARG, ipt->tcfi_t->u.user.target_size, t) ||
277 	    nla_put_u32(skb, TCA_IPT_INDEX, ipt->tcf_index) ||
278 	    nla_put_u32(skb, TCA_IPT_HOOK, ipt->tcfi_hook) ||
279 	    nla_put(skb, TCA_IPT_CNT, sizeof(struct tc_cnt), &c) ||
280 	    nla_put_string(skb, TCA_IPT_TABLE, ipt->tcfi_tname))
281 		goto nla_put_failure;
282 
283 	tcf_tm_dump(&tm, &ipt->tcf_tm);
284 	if (nla_put_64bit(skb, TCA_IPT_TM, sizeof(tm), &tm, TCA_IPT_PAD))
285 		goto nla_put_failure;
286 
287 	kfree(t);
288 	return skb->len;
289 
290 nla_put_failure:
291 	nlmsg_trim(skb, b);
292 	kfree(t);
293 	return -1;
294 }
295 
296 static int tcf_ipt_walker(struct net *net, struct sk_buff *skb,
297 			  struct netlink_callback *cb, int type,
298 			  struct tc_action *a)
299 {
300 	struct tc_action_net *tn = net_generic(net, ipt_net_id);
301 
302 	return tcf_generic_walker(tn, skb, cb, type, a);
303 }
304 
305 static int tcf_ipt_search(struct net *net, struct tc_action *a, u32 index)
306 {
307 	struct tc_action_net *tn = net_generic(net, ipt_net_id);
308 
309 	return tcf_hash_search(tn, a, index);
310 }
311 
312 static struct tc_action_ops act_ipt_ops = {
313 	.kind		=	"ipt",
314 	.type		=	TCA_ACT_IPT,
315 	.owner		=	THIS_MODULE,
316 	.act		=	tcf_ipt,
317 	.dump		=	tcf_ipt_dump,
318 	.cleanup	=	tcf_ipt_release,
319 	.init		=	tcf_ipt_init,
320 	.walk		=	tcf_ipt_walker,
321 	.lookup		=	tcf_ipt_search,
322 };
323 
324 static __net_init int ipt_init_net(struct net *net)
325 {
326 	struct tc_action_net *tn = net_generic(net, ipt_net_id);
327 
328 	return tc_action_net_init(tn, &act_ipt_ops, IPT_TAB_MASK);
329 }
330 
331 static void __net_exit ipt_exit_net(struct net *net)
332 {
333 	struct tc_action_net *tn = net_generic(net, ipt_net_id);
334 
335 	tc_action_net_exit(tn);
336 }
337 
338 static struct pernet_operations ipt_net_ops = {
339 	.init = ipt_init_net,
340 	.exit = ipt_exit_net,
341 	.id   = &ipt_net_id,
342 	.size = sizeof(struct tc_action_net),
343 };
344 
345 static int tcf_xt_walker(struct net *net, struct sk_buff *skb,
346 			 struct netlink_callback *cb, int type,
347 			 struct tc_action *a)
348 {
349 	struct tc_action_net *tn = net_generic(net, xt_net_id);
350 
351 	return tcf_generic_walker(tn, skb, cb, type, a);
352 }
353 
354 static int tcf_xt_search(struct net *net, struct tc_action *a, u32 index)
355 {
356 	struct tc_action_net *tn = net_generic(net, xt_net_id);
357 
358 	return tcf_hash_search(tn, a, index);
359 }
360 
361 static struct tc_action_ops act_xt_ops = {
362 	.kind		=	"xt",
363 	.type		=	TCA_ACT_XT,
364 	.owner		=	THIS_MODULE,
365 	.act		=	tcf_ipt,
366 	.dump		=	tcf_ipt_dump,
367 	.cleanup	=	tcf_ipt_release,
368 	.init		=	tcf_xt_init,
369 	.walk		=	tcf_xt_walker,
370 	.lookup		=	tcf_xt_search,
371 };
372 
373 static __net_init int xt_init_net(struct net *net)
374 {
375 	struct tc_action_net *tn = net_generic(net, xt_net_id);
376 
377 	return tc_action_net_init(tn, &act_xt_ops, IPT_TAB_MASK);
378 }
379 
380 static void __net_exit xt_exit_net(struct net *net)
381 {
382 	struct tc_action_net *tn = net_generic(net, xt_net_id);
383 
384 	tc_action_net_exit(tn);
385 }
386 
387 static struct pernet_operations xt_net_ops = {
388 	.init = xt_init_net,
389 	.exit = xt_exit_net,
390 	.id   = &xt_net_id,
391 	.size = sizeof(struct tc_action_net),
392 };
393 
394 MODULE_AUTHOR("Jamal Hadi Salim(2002-13)");
395 MODULE_DESCRIPTION("Iptables target actions");
396 MODULE_LICENSE("GPL");
397 MODULE_ALIAS("act_xt");
398 
399 static int __init ipt_init_module(void)
400 {
401 	int ret1, ret2;
402 
403 	ret1 = tcf_register_action(&act_xt_ops, &xt_net_ops);
404 	if (ret1 < 0)
405 		pr_err("Failed to load xt action\n");
406 
407 	ret2 = tcf_register_action(&act_ipt_ops, &ipt_net_ops);
408 	if (ret2 < 0)
409 		pr_err("Failed to load ipt action\n");
410 
411 	if (ret1 < 0 && ret2 < 0) {
412 		return ret1;
413 	} else
414 		return 0;
415 }
416 
417 static void __exit ipt_cleanup_module(void)
418 {
419 	tcf_unregister_action(&act_ipt_ops, &ipt_net_ops);
420 	tcf_unregister_action(&act_xt_ops, &xt_net_ops);
421 }
422 
423 module_init(ipt_init_module);
424 module_exit(ipt_cleanup_module);
425