xref: /openbmc/linux/net/sched/act_nat.c (revision a8fe58ce)
1 /*
2  * Stateless NAT actions
3  *
4  * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License as published by the Free
8  * Software Foundation; either version 2 of the License, or (at your option)
9  * any later version.
10  */
11 
12 #include <linux/errno.h>
13 #include <linux/init.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/netfilter.h>
17 #include <linux/rtnetlink.h>
18 #include <linux/skbuff.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock.h>
21 #include <linux/string.h>
22 #include <linux/tc_act/tc_nat.h>
23 #include <net/act_api.h>
24 #include <net/icmp.h>
25 #include <net/ip.h>
26 #include <net/netlink.h>
27 #include <net/tc_act/tc_nat.h>
28 #include <net/tcp.h>
29 #include <net/udp.h>
30 
31 
32 #define NAT_TAB_MASK	15
33 
34 static const struct nla_policy nat_policy[TCA_NAT_MAX + 1] = {
35 	[TCA_NAT_PARMS]	= { .len = sizeof(struct tc_nat) },
36 };
37 
38 static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
39 			struct tc_action *a, int ovr, int bind)
40 {
41 	struct nlattr *tb[TCA_NAT_MAX + 1];
42 	struct tc_nat *parm;
43 	int ret = 0, err;
44 	struct tcf_nat *p;
45 
46 	if (nla == NULL)
47 		return -EINVAL;
48 
49 	err = nla_parse_nested(tb, TCA_NAT_MAX, nla, nat_policy);
50 	if (err < 0)
51 		return err;
52 
53 	if (tb[TCA_NAT_PARMS] == NULL)
54 		return -EINVAL;
55 	parm = nla_data(tb[TCA_NAT_PARMS]);
56 
57 	if (!tcf_hash_check(parm->index, a, bind)) {
58 		ret = tcf_hash_create(parm->index, est, a, sizeof(*p),
59 				      bind, false);
60 		if (ret)
61 			return ret;
62 		ret = ACT_P_CREATED;
63 	} else {
64 		if (bind)
65 			return 0;
66 		tcf_hash_release(a, bind);
67 		if (!ovr)
68 			return -EEXIST;
69 	}
70 	p = to_tcf_nat(a);
71 
72 	spin_lock_bh(&p->tcf_lock);
73 	p->old_addr = parm->old_addr;
74 	p->new_addr = parm->new_addr;
75 	p->mask = parm->mask;
76 	p->flags = parm->flags;
77 
78 	p->tcf_action = parm->action;
79 	spin_unlock_bh(&p->tcf_lock);
80 
81 	if (ret == ACT_P_CREATED)
82 		tcf_hash_insert(a);
83 
84 	return ret;
85 }
86 
87 static int tcf_nat(struct sk_buff *skb, const struct tc_action *a,
88 		   struct tcf_result *res)
89 {
90 	struct tcf_nat *p = a->priv;
91 	struct iphdr *iph;
92 	__be32 old_addr;
93 	__be32 new_addr;
94 	__be32 mask;
95 	__be32 addr;
96 	int egress;
97 	int action;
98 	int ihl;
99 	int noff;
100 
101 	spin_lock(&p->tcf_lock);
102 
103 	p->tcf_tm.lastuse = jiffies;
104 	old_addr = p->old_addr;
105 	new_addr = p->new_addr;
106 	mask = p->mask;
107 	egress = p->flags & TCA_NAT_FLAG_EGRESS;
108 	action = p->tcf_action;
109 
110 	bstats_update(&p->tcf_bstats, skb);
111 
112 	spin_unlock(&p->tcf_lock);
113 
114 	if (unlikely(action == TC_ACT_SHOT))
115 		goto drop;
116 
117 	noff = skb_network_offset(skb);
118 	if (!pskb_may_pull(skb, sizeof(*iph) + noff))
119 		goto drop;
120 
121 	iph = ip_hdr(skb);
122 
123 	if (egress)
124 		addr = iph->saddr;
125 	else
126 		addr = iph->daddr;
127 
128 	if (!((old_addr ^ addr) & mask)) {
129 		if (skb_cloned(skb) &&
130 		    !skb_clone_writable(skb, sizeof(*iph) + noff) &&
131 		    pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
132 			goto drop;
133 
134 		new_addr &= mask;
135 		new_addr |= addr & ~mask;
136 
137 		/* Rewrite IP header */
138 		iph = ip_hdr(skb);
139 		if (egress)
140 			iph->saddr = new_addr;
141 		else
142 			iph->daddr = new_addr;
143 
144 		csum_replace4(&iph->check, addr, new_addr);
145 	} else if ((iph->frag_off & htons(IP_OFFSET)) ||
146 		   iph->protocol != IPPROTO_ICMP) {
147 		goto out;
148 	}
149 
150 	ihl = iph->ihl * 4;
151 
152 	/* It would be nice to share code with stateful NAT. */
153 	switch (iph->frag_off & htons(IP_OFFSET) ? 0 : iph->protocol) {
154 	case IPPROTO_TCP:
155 	{
156 		struct tcphdr *tcph;
157 
158 		if (!pskb_may_pull(skb, ihl + sizeof(*tcph) + noff) ||
159 		    (skb_cloned(skb) &&
160 		     !skb_clone_writable(skb, ihl + sizeof(*tcph) + noff) &&
161 		     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
162 			goto drop;
163 
164 		tcph = (void *)(skb_network_header(skb) + ihl);
165 		inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr,
166 					 true);
167 		break;
168 	}
169 	case IPPROTO_UDP:
170 	{
171 		struct udphdr *udph;
172 
173 		if (!pskb_may_pull(skb, ihl + sizeof(*udph) + noff) ||
174 		    (skb_cloned(skb) &&
175 		     !skb_clone_writable(skb, ihl + sizeof(*udph) + noff) &&
176 		     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
177 			goto drop;
178 
179 		udph = (void *)(skb_network_header(skb) + ihl);
180 		if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
181 			inet_proto_csum_replace4(&udph->check, skb, addr,
182 						 new_addr, true);
183 			if (!udph->check)
184 				udph->check = CSUM_MANGLED_0;
185 		}
186 		break;
187 	}
188 	case IPPROTO_ICMP:
189 	{
190 		struct icmphdr *icmph;
191 
192 		if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + noff))
193 			goto drop;
194 
195 		icmph = (void *)(skb_network_header(skb) + ihl);
196 
197 		if ((icmph->type != ICMP_DEST_UNREACH) &&
198 		    (icmph->type != ICMP_TIME_EXCEEDED) &&
199 		    (icmph->type != ICMP_PARAMETERPROB))
200 			break;
201 
202 		if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + sizeof(*iph) +
203 					noff))
204 			goto drop;
205 
206 		icmph = (void *)(skb_network_header(skb) + ihl);
207 		iph = (void *)(icmph + 1);
208 		if (egress)
209 			addr = iph->daddr;
210 		else
211 			addr = iph->saddr;
212 
213 		if ((old_addr ^ addr) & mask)
214 			break;
215 
216 		if (skb_cloned(skb) &&
217 		    !skb_clone_writable(skb, ihl + sizeof(*icmph) +
218 					     sizeof(*iph) + noff) &&
219 		    pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
220 			goto drop;
221 
222 		icmph = (void *)(skb_network_header(skb) + ihl);
223 		iph = (void *)(icmph + 1);
224 
225 		new_addr &= mask;
226 		new_addr |= addr & ~mask;
227 
228 		/* XXX Fix up the inner checksums. */
229 		if (egress)
230 			iph->daddr = new_addr;
231 		else
232 			iph->saddr = new_addr;
233 
234 		inet_proto_csum_replace4(&icmph->checksum, skb, addr, new_addr,
235 					 false);
236 		break;
237 	}
238 	default:
239 		break;
240 	}
241 
242 out:
243 	return action;
244 
245 drop:
246 	spin_lock(&p->tcf_lock);
247 	p->tcf_qstats.drops++;
248 	spin_unlock(&p->tcf_lock);
249 	return TC_ACT_SHOT;
250 }
251 
252 static int tcf_nat_dump(struct sk_buff *skb, struct tc_action *a,
253 			int bind, int ref)
254 {
255 	unsigned char *b = skb_tail_pointer(skb);
256 	struct tcf_nat *p = a->priv;
257 	struct tc_nat opt = {
258 		.old_addr = p->old_addr,
259 		.new_addr = p->new_addr,
260 		.mask     = p->mask,
261 		.flags    = p->flags,
262 
263 		.index    = p->tcf_index,
264 		.action   = p->tcf_action,
265 		.refcnt   = p->tcf_refcnt - ref,
266 		.bindcnt  = p->tcf_bindcnt - bind,
267 	};
268 	struct tcf_t t;
269 
270 	if (nla_put(skb, TCA_NAT_PARMS, sizeof(opt), &opt))
271 		goto nla_put_failure;
272 	t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install);
273 	t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse);
274 	t.expires = jiffies_to_clock_t(p->tcf_tm.expires);
275 	if (nla_put(skb, TCA_NAT_TM, sizeof(t), &t))
276 		goto nla_put_failure;
277 
278 	return skb->len;
279 
280 nla_put_failure:
281 	nlmsg_trim(skb, b);
282 	return -1;
283 }
284 
285 static struct tc_action_ops act_nat_ops = {
286 	.kind		=	"nat",
287 	.type		=	TCA_ACT_NAT,
288 	.owner		=	THIS_MODULE,
289 	.act		=	tcf_nat,
290 	.dump		=	tcf_nat_dump,
291 	.init		=	tcf_nat_init,
292 };
293 
294 MODULE_DESCRIPTION("Stateless NAT actions");
295 MODULE_LICENSE("GPL");
296 
297 static int __init nat_init_module(void)
298 {
299 	return tcf_register_action(&act_nat_ops, NAT_TAB_MASK);
300 }
301 
302 static void __exit nat_cleanup_module(void)
303 {
304 	tcf_unregister_action(&act_nat_ops);
305 }
306 
307 module_init(nat_init_module);
308 module_exit(nat_cleanup_module);
309