xref: /openbmc/linux/net/ipv4/xfrm4_policy.c (revision e190bfe5)
1 /*
2  * xfrm4_policy.c
3  *
4  * Changes:
5  *	Kazunori MIYAZAWA @USAGI
6  * 	YOSHIFUJI Hideaki @USAGI
7  *		Split up af-specific portion
8  *
9  */
10 
11 #include <linux/err.h>
12 #include <linux/kernel.h>
13 #include <linux/inetdevice.h>
14 #include <net/dst.h>
15 #include <net/xfrm.h>
16 #include <net/ip.h>
17 
18 static struct xfrm_policy_afinfo xfrm4_policy_afinfo;
19 
20 static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos,
21 					  xfrm_address_t *saddr,
22 					  xfrm_address_t *daddr)
23 {
24 	struct flowi fl = {
25 		.nl_u = {
26 			.ip4_u = {
27 				.tos = tos,
28 				.daddr = daddr->a4,
29 			},
30 		},
31 	};
32 	struct dst_entry *dst;
33 	struct rtable *rt;
34 	int err;
35 
36 	if (saddr)
37 		fl.fl4_src = saddr->a4;
38 
39 	err = __ip_route_output_key(net, &rt, &fl);
40 	dst = &rt->u.dst;
41 	if (err)
42 		dst = ERR_PTR(err);
43 	return dst;
44 }
45 
46 static int xfrm4_get_saddr(struct net *net,
47 			   xfrm_address_t *saddr, xfrm_address_t *daddr)
48 {
49 	struct dst_entry *dst;
50 	struct rtable *rt;
51 
52 	dst = xfrm4_dst_lookup(net, 0, NULL, daddr);
53 	if (IS_ERR(dst))
54 		return -EHOSTUNREACH;
55 
56 	rt = (struct rtable *)dst;
57 	saddr->a4 = rt->rt_src;
58 	dst_release(dst);
59 	return 0;
60 }
61 
62 static int xfrm4_get_tos(struct flowi *fl)
63 {
64 	return fl->fl4_tos;
65 }
66 
67 static int xfrm4_init_path(struct xfrm_dst *path, struct dst_entry *dst,
68 			   int nfheader_len)
69 {
70 	return 0;
71 }
72 
73 static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
74 			  struct flowi *fl)
75 {
76 	struct rtable *rt = (struct rtable *)xdst->route;
77 
78 	xdst->u.rt.fl = *fl;
79 
80 	xdst->u.dst.dev = dev;
81 	dev_hold(dev);
82 
83 	xdst->u.rt.idev = in_dev_get(dev);
84 	if (!xdst->u.rt.idev)
85 		return -ENODEV;
86 
87 	xdst->u.rt.peer = rt->peer;
88 	if (rt->peer)
89 		atomic_inc(&rt->peer->refcnt);
90 
91 	/* Sheit... I remember I did this right. Apparently,
92 	 * it was magically lost, so this code needs audit */
93 	xdst->u.rt.rt_flags = rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST |
94 					      RTCF_LOCAL);
95 	xdst->u.rt.rt_type = rt->rt_type;
96 	xdst->u.rt.rt_src = rt->rt_src;
97 	xdst->u.rt.rt_dst = rt->rt_dst;
98 	xdst->u.rt.rt_gateway = rt->rt_gateway;
99 	xdst->u.rt.rt_spec_dst = rt->rt_spec_dst;
100 
101 	return 0;
102 }
103 
104 static void
105 _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
106 {
107 	struct iphdr *iph = ip_hdr(skb);
108 	u8 *xprth = skb_network_header(skb) + iph->ihl * 4;
109 
110 	memset(fl, 0, sizeof(struct flowi));
111 	if (!(iph->frag_off & htons(IP_MF | IP_OFFSET))) {
112 		switch (iph->protocol) {
113 		case IPPROTO_UDP:
114 		case IPPROTO_UDPLITE:
115 		case IPPROTO_TCP:
116 		case IPPROTO_SCTP:
117 		case IPPROTO_DCCP:
118 			if (xprth + 4 < skb->data ||
119 			    pskb_may_pull(skb, xprth + 4 - skb->data)) {
120 				__be16 *ports = (__be16 *)xprth;
121 
122 				fl->fl_ip_sport = ports[!!reverse];
123 				fl->fl_ip_dport = ports[!reverse];
124 			}
125 			break;
126 
127 		case IPPROTO_ICMP:
128 			if (pskb_may_pull(skb, xprth + 2 - skb->data)) {
129 				u8 *icmp = xprth;
130 
131 				fl->fl_icmp_type = icmp[0];
132 				fl->fl_icmp_code = icmp[1];
133 			}
134 			break;
135 
136 		case IPPROTO_ESP:
137 			if (pskb_may_pull(skb, xprth + 4 - skb->data)) {
138 				__be32 *ehdr = (__be32 *)xprth;
139 
140 				fl->fl_ipsec_spi = ehdr[0];
141 			}
142 			break;
143 
144 		case IPPROTO_AH:
145 			if (pskb_may_pull(skb, xprth + 8 - skb->data)) {
146 				__be32 *ah_hdr = (__be32*)xprth;
147 
148 				fl->fl_ipsec_spi = ah_hdr[1];
149 			}
150 			break;
151 
152 		case IPPROTO_COMP:
153 			if (pskb_may_pull(skb, xprth + 4 - skb->data)) {
154 				__be16 *ipcomp_hdr = (__be16 *)xprth;
155 
156 				fl->fl_ipsec_spi = htonl(ntohs(ipcomp_hdr[1]));
157 			}
158 			break;
159 		default:
160 			fl->fl_ipsec_spi = 0;
161 			break;
162 		}
163 	}
164 	fl->proto = iph->protocol;
165 	fl->fl4_dst = reverse ? iph->saddr : iph->daddr;
166 	fl->fl4_src = reverse ? iph->daddr : iph->saddr;
167 	fl->fl4_tos = iph->tos;
168 }
169 
170 static inline int xfrm4_garbage_collect(struct dst_ops *ops)
171 {
172 	struct net *net = container_of(ops, struct net, xfrm.xfrm4_dst_ops);
173 
174 	xfrm4_policy_afinfo.garbage_collect(net);
175 	return (atomic_read(&ops->entries) > ops->gc_thresh * 2);
176 }
177 
178 static void xfrm4_update_pmtu(struct dst_entry *dst, u32 mtu)
179 {
180 	struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
181 	struct dst_entry *path = xdst->route;
182 
183 	path->ops->update_pmtu(path, mtu);
184 }
185 
186 static void xfrm4_dst_destroy(struct dst_entry *dst)
187 {
188 	struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
189 
190 	if (likely(xdst->u.rt.idev))
191 		in_dev_put(xdst->u.rt.idev);
192 	if (likely(xdst->u.rt.peer))
193 		inet_putpeer(xdst->u.rt.peer);
194 	xfrm_dst_destroy(xdst);
195 }
196 
197 static void xfrm4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
198 			     int unregister)
199 {
200 	struct xfrm_dst *xdst;
201 
202 	if (!unregister)
203 		return;
204 
205 	xdst = (struct xfrm_dst *)dst;
206 	if (xdst->u.rt.idev->dev == dev) {
207 		struct in_device *loopback_idev =
208 			in_dev_get(dev_net(dev)->loopback_dev);
209 		BUG_ON(!loopback_idev);
210 
211 		do {
212 			in_dev_put(xdst->u.rt.idev);
213 			xdst->u.rt.idev = loopback_idev;
214 			in_dev_hold(loopback_idev);
215 			xdst = (struct xfrm_dst *)xdst->u.dst.child;
216 		} while (xdst->u.dst.xfrm);
217 
218 		__in_dev_put(loopback_idev);
219 	}
220 
221 	xfrm_dst_ifdown(dst, dev);
222 }
223 
224 static struct dst_ops xfrm4_dst_ops = {
225 	.family =		AF_INET,
226 	.protocol =		cpu_to_be16(ETH_P_IP),
227 	.gc =			xfrm4_garbage_collect,
228 	.update_pmtu =		xfrm4_update_pmtu,
229 	.destroy =		xfrm4_dst_destroy,
230 	.ifdown =		xfrm4_dst_ifdown,
231 	.local_out =		__ip_local_out,
232 	.gc_thresh =		1024,
233 	.entries =		ATOMIC_INIT(0),
234 };
235 
236 static struct xfrm_policy_afinfo xfrm4_policy_afinfo = {
237 	.family = 		AF_INET,
238 	.dst_ops =		&xfrm4_dst_ops,
239 	.dst_lookup =		xfrm4_dst_lookup,
240 	.get_saddr =		xfrm4_get_saddr,
241 	.decode_session =	_decode_session4,
242 	.get_tos =		xfrm4_get_tos,
243 	.init_path =		xfrm4_init_path,
244 	.fill_dst =		xfrm4_fill_dst,
245 };
246 
247 #ifdef CONFIG_SYSCTL
248 static struct ctl_table xfrm4_policy_table[] = {
249 	{
250 		.procname       = "xfrm4_gc_thresh",
251 		.data           = &init_net.xfrm.xfrm4_dst_ops.gc_thresh,
252 		.maxlen         = sizeof(int),
253 		.mode           = 0644,
254 		.proc_handler   = proc_dointvec,
255 	},
256 	{ }
257 };
258 
259 static struct ctl_table_header *sysctl_hdr;
260 #endif
261 
262 static void __init xfrm4_policy_init(void)
263 {
264 	xfrm_policy_register_afinfo(&xfrm4_policy_afinfo);
265 }
266 
267 static void __exit xfrm4_policy_fini(void)
268 {
269 #ifdef CONFIG_SYSCTL
270 	if (sysctl_hdr)
271 		unregister_net_sysctl_table(sysctl_hdr);
272 #endif
273 	xfrm_policy_unregister_afinfo(&xfrm4_policy_afinfo);
274 }
275 
276 void __init xfrm4_init(int rt_max_size)
277 {
278 	/*
279 	 * Select a default value for the gc_thresh based on the main route
280 	 * table hash size.  It seems to me the worst case scenario is when
281 	 * we have ipsec operating in transport mode, in which we create a
282 	 * dst_entry per socket.  The xfrm gc algorithm starts trying to remove
283 	 * entries at gc_thresh, and prevents new allocations as 2*gc_thresh
284 	 * so lets set an initial xfrm gc_thresh value at the rt_max_size/2.
285 	 * That will let us store an ipsec connection per route table entry,
286 	 * and start cleaning when were 1/2 full
287 	 */
288 	xfrm4_dst_ops.gc_thresh = rt_max_size/2;
289 
290 	xfrm4_state_init();
291 	xfrm4_policy_init();
292 #ifdef CONFIG_SYSCTL
293 	sysctl_hdr = register_net_sysctl_table(&init_net, net_ipv4_ctl_path,
294 						xfrm4_policy_table);
295 #endif
296 }
297 
298