xref: /openbmc/linux/net/sched/act_nat.c (revision bbde9fc1824aab58bc78c084163007dd6c03fe5b)
1 /*
2  * Stateless NAT actions
3  *
4  * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License as published by the Free
8  * Software Foundation; either version 2 of the License, or (at your option)
9  * any later version.
10  */
11 
12 #include <linux/errno.h>
13 #include <linux/init.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/netfilter.h>
17 #include <linux/rtnetlink.h>
18 #include <linux/skbuff.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock.h>
21 #include <linux/string.h>
22 #include <linux/tc_act/tc_nat.h>
23 #include <net/act_api.h>
24 #include <net/icmp.h>
25 #include <net/ip.h>
26 #include <net/netlink.h>
27 #include <net/tc_act/tc_nat.h>
28 #include <net/tcp.h>
29 #include <net/udp.h>
30 
31 
32 #define NAT_TAB_MASK	15
33 
34 static const struct nla_policy nat_policy[TCA_NAT_MAX + 1] = {
35 	[TCA_NAT_PARMS]	= { .len = sizeof(struct tc_nat) },
36 };
37 
38 static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
39 			struct tc_action *a, int ovr, int bind)
40 {
41 	struct nlattr *tb[TCA_NAT_MAX + 1];
42 	struct tc_nat *parm;
43 	int ret = 0, err;
44 	struct tcf_nat *p;
45 
46 	if (nla == NULL)
47 		return -EINVAL;
48 
49 	err = nla_parse_nested(tb, TCA_NAT_MAX, nla, nat_policy);
50 	if (err < 0)
51 		return err;
52 
53 	if (tb[TCA_NAT_PARMS] == NULL)
54 		return -EINVAL;
55 	parm = nla_data(tb[TCA_NAT_PARMS]);
56 
57 	if (!tcf_hash_check(parm->index, a, bind)) {
58 		ret = tcf_hash_create(parm->index, est, a, sizeof(*p),
59 				      bind, false);
60 		if (ret)
61 			return ret;
62 		ret = ACT_P_CREATED;
63 	} else {
64 		if (bind)
65 			return 0;
66 		tcf_hash_release(a, bind);
67 		if (!ovr)
68 			return -EEXIST;
69 	}
70 	p = to_tcf_nat(a);
71 
72 	spin_lock_bh(&p->tcf_lock);
73 	p->old_addr = parm->old_addr;
74 	p->new_addr = parm->new_addr;
75 	p->mask = parm->mask;
76 	p->flags = parm->flags;
77 
78 	p->tcf_action = parm->action;
79 	spin_unlock_bh(&p->tcf_lock);
80 
81 	if (ret == ACT_P_CREATED)
82 		tcf_hash_insert(a);
83 
84 	return ret;
85 }
86 
87 static int tcf_nat(struct sk_buff *skb, const struct tc_action *a,
88 		   struct tcf_result *res)
89 {
90 	struct tcf_nat *p = a->priv;
91 	struct iphdr *iph;
92 	__be32 old_addr;
93 	__be32 new_addr;
94 	__be32 mask;
95 	__be32 addr;
96 	int egress;
97 	int action;
98 	int ihl;
99 	int noff;
100 
101 	spin_lock(&p->tcf_lock);
102 
103 	p->tcf_tm.lastuse = jiffies;
104 	old_addr = p->old_addr;
105 	new_addr = p->new_addr;
106 	mask = p->mask;
107 	egress = p->flags & TCA_NAT_FLAG_EGRESS;
108 	action = p->tcf_action;
109 
110 	bstats_update(&p->tcf_bstats, skb);
111 
112 	spin_unlock(&p->tcf_lock);
113 
114 	if (unlikely(action == TC_ACT_SHOT))
115 		goto drop;
116 
117 	noff = skb_network_offset(skb);
118 	if (!pskb_may_pull(skb, sizeof(*iph) + noff))
119 		goto drop;
120 
121 	iph = ip_hdr(skb);
122 
123 	if (egress)
124 		addr = iph->saddr;
125 	else
126 		addr = iph->daddr;
127 
128 	if (!((old_addr ^ addr) & mask)) {
129 		if (skb_cloned(skb) &&
130 		    !skb_clone_writable(skb, sizeof(*iph) + noff) &&
131 		    pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
132 			goto drop;
133 
134 		new_addr &= mask;
135 		new_addr |= addr & ~mask;
136 
137 		/* Rewrite IP header */
138 		iph = ip_hdr(skb);
139 		if (egress)
140 			iph->saddr = new_addr;
141 		else
142 			iph->daddr = new_addr;
143 
144 		csum_replace4(&iph->check, addr, new_addr);
145 	} else if ((iph->frag_off & htons(IP_OFFSET)) ||
146 		   iph->protocol != IPPROTO_ICMP) {
147 		goto out;
148 	}
149 
150 	ihl = iph->ihl * 4;
151 
152 	/* It would be nice to share code with stateful NAT. */
153 	switch (iph->frag_off & htons(IP_OFFSET) ? 0 : iph->protocol) {
154 	case IPPROTO_TCP:
155 	{
156 		struct tcphdr *tcph;
157 
158 		if (!pskb_may_pull(skb, ihl + sizeof(*tcph) + noff) ||
159 		    (skb_cloned(skb) &&
160 		     !skb_clone_writable(skb, ihl + sizeof(*tcph) + noff) &&
161 		     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
162 			goto drop;
163 
164 		tcph = (void *)(skb_network_header(skb) + ihl);
165 		inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr, 1);
166 		break;
167 	}
168 	case IPPROTO_UDP:
169 	{
170 		struct udphdr *udph;
171 
172 		if (!pskb_may_pull(skb, ihl + sizeof(*udph) + noff) ||
173 		    (skb_cloned(skb) &&
174 		     !skb_clone_writable(skb, ihl + sizeof(*udph) + noff) &&
175 		     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
176 			goto drop;
177 
178 		udph = (void *)(skb_network_header(skb) + ihl);
179 		if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
180 			inet_proto_csum_replace4(&udph->check, skb, addr,
181 						 new_addr, 1);
182 			if (!udph->check)
183 				udph->check = CSUM_MANGLED_0;
184 		}
185 		break;
186 	}
187 	case IPPROTO_ICMP:
188 	{
189 		struct icmphdr *icmph;
190 
191 		if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + noff))
192 			goto drop;
193 
194 		icmph = (void *)(skb_network_header(skb) + ihl);
195 
196 		if ((icmph->type != ICMP_DEST_UNREACH) &&
197 		    (icmph->type != ICMP_TIME_EXCEEDED) &&
198 		    (icmph->type != ICMP_PARAMETERPROB))
199 			break;
200 
201 		if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + sizeof(*iph) +
202 					noff))
203 			goto drop;
204 
205 		icmph = (void *)(skb_network_header(skb) + ihl);
206 		iph = (void *)(icmph + 1);
207 		if (egress)
208 			addr = iph->daddr;
209 		else
210 			addr = iph->saddr;
211 
212 		if ((old_addr ^ addr) & mask)
213 			break;
214 
215 		if (skb_cloned(skb) &&
216 		    !skb_clone_writable(skb, ihl + sizeof(*icmph) +
217 					     sizeof(*iph) + noff) &&
218 		    pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
219 			goto drop;
220 
221 		icmph = (void *)(skb_network_header(skb) + ihl);
222 		iph = (void *)(icmph + 1);
223 
224 		new_addr &= mask;
225 		new_addr |= addr & ~mask;
226 
227 		/* XXX Fix up the inner checksums. */
228 		if (egress)
229 			iph->daddr = new_addr;
230 		else
231 			iph->saddr = new_addr;
232 
233 		inet_proto_csum_replace4(&icmph->checksum, skb, addr, new_addr,
234 					 0);
235 		break;
236 	}
237 	default:
238 		break;
239 	}
240 
241 out:
242 	return action;
243 
244 drop:
245 	spin_lock(&p->tcf_lock);
246 	p->tcf_qstats.drops++;
247 	spin_unlock(&p->tcf_lock);
248 	return TC_ACT_SHOT;
249 }
250 
251 static int tcf_nat_dump(struct sk_buff *skb, struct tc_action *a,
252 			int bind, int ref)
253 {
254 	unsigned char *b = skb_tail_pointer(skb);
255 	struct tcf_nat *p = a->priv;
256 	struct tc_nat opt = {
257 		.old_addr = p->old_addr,
258 		.new_addr = p->new_addr,
259 		.mask     = p->mask,
260 		.flags    = p->flags,
261 
262 		.index    = p->tcf_index,
263 		.action   = p->tcf_action,
264 		.refcnt   = p->tcf_refcnt - ref,
265 		.bindcnt  = p->tcf_bindcnt - bind,
266 	};
267 	struct tcf_t t;
268 
269 	if (nla_put(skb, TCA_NAT_PARMS, sizeof(opt), &opt))
270 		goto nla_put_failure;
271 	t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install);
272 	t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse);
273 	t.expires = jiffies_to_clock_t(p->tcf_tm.expires);
274 	if (nla_put(skb, TCA_NAT_TM, sizeof(t), &t))
275 		goto nla_put_failure;
276 
277 	return skb->len;
278 
279 nla_put_failure:
280 	nlmsg_trim(skb, b);
281 	return -1;
282 }
283 
284 static struct tc_action_ops act_nat_ops = {
285 	.kind		=	"nat",
286 	.type		=	TCA_ACT_NAT,
287 	.owner		=	THIS_MODULE,
288 	.act		=	tcf_nat,
289 	.dump		=	tcf_nat_dump,
290 	.init		=	tcf_nat_init,
291 };
292 
293 MODULE_DESCRIPTION("Stateless NAT actions");
294 MODULE_LICENSE("GPL");
295 
296 static int __init nat_init_module(void)
297 {
298 	return tcf_register_action(&act_nat_ops, NAT_TAB_MASK);
299 }
300 
301 static void __exit nat_cleanup_module(void)
302 {
303 	tcf_unregister_action(&act_nat_ops);
304 }
305 
306 module_init(nat_init_module);
307 module_exit(nat_cleanup_module);
308