1 /* 2 * Copyright (C)2003,2004 USAGI/WIDE Project 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 * 18 * Authors Mitsuru KANDA <mk@linux-ipv6.org> 19 * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org> 20 * 21 * Based on net/ipv4/xfrm4_tunnel.c 22 * 23 */ 24 #include <linux/module.h> 25 #include <linux/xfrm.h> 26 #include <linux/list.h> 27 #include <net/ip.h> 28 #include <net/xfrm.h> 29 #include <net/ipv6.h> 30 #include <linux/ipv6.h> 31 #include <linux/icmpv6.h> 32 #include <linux/mutex.h> 33 34 /* 35 * xfrm_tunnel_spi things are for allocating unique id ("spi") 36 * per xfrm_address_t. 37 */ 38 struct xfrm6_tunnel_spi { 39 struct hlist_node list_byaddr; 40 struct hlist_node list_byspi; 41 xfrm_address_t addr; 42 u32 spi; 43 atomic_t refcnt; 44 }; 45 46 static DEFINE_RWLOCK(xfrm6_tunnel_spi_lock); 47 48 static u32 xfrm6_tunnel_spi; 49 50 #define XFRM6_TUNNEL_SPI_MIN 1 51 #define XFRM6_TUNNEL_SPI_MAX 0xffffffff 52 53 static struct kmem_cache *xfrm6_tunnel_spi_kmem __read_mostly; 54 55 #define XFRM6_TUNNEL_SPI_BYADDR_HSIZE 256 56 #define XFRM6_TUNNEL_SPI_BYSPI_HSIZE 256 57 58 static struct hlist_head xfrm6_tunnel_spi_byaddr[XFRM6_TUNNEL_SPI_BYADDR_HSIZE]; 59 static struct hlist_head xfrm6_tunnel_spi_byspi[XFRM6_TUNNEL_SPI_BYSPI_HSIZE]; 60 61 static inline unsigned xfrm6_tunnel_spi_hash_byaddr(xfrm_address_t *addr) 62 { 63 unsigned h; 64 65 h = (__force u32)(addr->a6[0] ^ addr->a6[1] ^ addr->a6[2] ^ addr->a6[3]); 66 h ^= h >> 16; 67 h ^= h >> 8; 68 h &= XFRM6_TUNNEL_SPI_BYADDR_HSIZE - 1; 69 70 return h; 71 } 72 73 static inline unsigned xfrm6_tunnel_spi_hash_byspi(u32 spi) 74 { 75 return spi % XFRM6_TUNNEL_SPI_BYSPI_HSIZE; 76 } 77 78 79 static int xfrm6_tunnel_spi_init(void) 80 { 81 int i; 82 83 xfrm6_tunnel_spi = 0; 84 xfrm6_tunnel_spi_kmem = kmem_cache_create("xfrm6_tunnel_spi", 85 sizeof(struct xfrm6_tunnel_spi), 86 0, SLAB_HWCACHE_ALIGN, 87 NULL); 88 if (!xfrm6_tunnel_spi_kmem) 89 return -ENOMEM; 90 91 for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++) 92 INIT_HLIST_HEAD(&xfrm6_tunnel_spi_byaddr[i]); 93 for (i = 0; i < XFRM6_TUNNEL_SPI_BYSPI_HSIZE; i++) 94 INIT_HLIST_HEAD(&xfrm6_tunnel_spi_byspi[i]); 95 return 0; 96 } 97 98 static void xfrm6_tunnel_spi_fini(void) 99 { 100 int i; 101 102 for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++) { 103 if (!hlist_empty(&xfrm6_tunnel_spi_byaddr[i])) 104 return; 105 } 106 for (i = 0; i < XFRM6_TUNNEL_SPI_BYSPI_HSIZE; i++) { 107 if (!hlist_empty(&xfrm6_tunnel_spi_byspi[i])) 108 return; 109 } 110 kmem_cache_destroy(xfrm6_tunnel_spi_kmem); 111 xfrm6_tunnel_spi_kmem = NULL; 112 } 113 114 static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr) 115 { 116 struct xfrm6_tunnel_spi *x6spi; 117 struct hlist_node *pos; 118 119 hlist_for_each_entry(x6spi, pos, 120 &xfrm6_tunnel_spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)], 121 list_byaddr) { 122 if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0) 123 return x6spi; 124 } 125 126 return NULL; 127 } 128 129 __be32 xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr) 130 { 131 struct xfrm6_tunnel_spi *x6spi; 132 u32 spi; 133 134 read_lock_bh(&xfrm6_tunnel_spi_lock); 135 x6spi = __xfrm6_tunnel_spi_lookup(saddr); 136 spi = x6spi ? x6spi->spi : 0; 137 read_unlock_bh(&xfrm6_tunnel_spi_lock); 138 return htonl(spi); 139 } 140 141 EXPORT_SYMBOL(xfrm6_tunnel_spi_lookup); 142 143 static int __xfrm6_tunnel_spi_check(u32 spi) 144 { 145 struct xfrm6_tunnel_spi *x6spi; 146 int index = xfrm6_tunnel_spi_hash_byspi(spi); 147 struct hlist_node *pos; 148 149 hlist_for_each_entry(x6spi, pos, 150 &xfrm6_tunnel_spi_byspi[index], 151 list_byspi) { 152 if (x6spi->spi == spi) 153 return -1; 154 } 155 return index; 156 } 157 158 static u32 __xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr) 159 { 160 u32 spi; 161 struct xfrm6_tunnel_spi *x6spi; 162 int index; 163 164 if (xfrm6_tunnel_spi < XFRM6_TUNNEL_SPI_MIN || 165 xfrm6_tunnel_spi >= XFRM6_TUNNEL_SPI_MAX) 166 xfrm6_tunnel_spi = XFRM6_TUNNEL_SPI_MIN; 167 else 168 xfrm6_tunnel_spi++; 169 170 for (spi = xfrm6_tunnel_spi; spi <= XFRM6_TUNNEL_SPI_MAX; spi++) { 171 index = __xfrm6_tunnel_spi_check(spi); 172 if (index >= 0) 173 goto alloc_spi; 174 } 175 for (spi = XFRM6_TUNNEL_SPI_MIN; spi < xfrm6_tunnel_spi; spi++) { 176 index = __xfrm6_tunnel_spi_check(spi); 177 if (index >= 0) 178 goto alloc_spi; 179 } 180 spi = 0; 181 goto out; 182 alloc_spi: 183 xfrm6_tunnel_spi = spi; 184 x6spi = kmem_cache_alloc(xfrm6_tunnel_spi_kmem, GFP_ATOMIC); 185 if (!x6spi) 186 goto out; 187 188 memcpy(&x6spi->addr, saddr, sizeof(x6spi->addr)); 189 x6spi->spi = spi; 190 atomic_set(&x6spi->refcnt, 1); 191 192 hlist_add_head(&x6spi->list_byspi, &xfrm6_tunnel_spi_byspi[index]); 193 194 index = xfrm6_tunnel_spi_hash_byaddr(saddr); 195 hlist_add_head(&x6spi->list_byaddr, &xfrm6_tunnel_spi_byaddr[index]); 196 out: 197 return spi; 198 } 199 200 __be32 xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr) 201 { 202 struct xfrm6_tunnel_spi *x6spi; 203 u32 spi; 204 205 write_lock_bh(&xfrm6_tunnel_spi_lock); 206 x6spi = __xfrm6_tunnel_spi_lookup(saddr); 207 if (x6spi) { 208 atomic_inc(&x6spi->refcnt); 209 spi = x6spi->spi; 210 } else 211 spi = __xfrm6_tunnel_alloc_spi(saddr); 212 write_unlock_bh(&xfrm6_tunnel_spi_lock); 213 214 return htonl(spi); 215 } 216 217 EXPORT_SYMBOL(xfrm6_tunnel_alloc_spi); 218 219 void xfrm6_tunnel_free_spi(xfrm_address_t *saddr) 220 { 221 struct xfrm6_tunnel_spi *x6spi; 222 struct hlist_node *pos, *n; 223 224 write_lock_bh(&xfrm6_tunnel_spi_lock); 225 226 hlist_for_each_entry_safe(x6spi, pos, n, 227 &xfrm6_tunnel_spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)], 228 list_byaddr) 229 { 230 if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0) { 231 if (atomic_dec_and_test(&x6spi->refcnt)) { 232 hlist_del(&x6spi->list_byaddr); 233 hlist_del(&x6spi->list_byspi); 234 kmem_cache_free(xfrm6_tunnel_spi_kmem, x6spi); 235 break; 236 } 237 } 238 } 239 write_unlock_bh(&xfrm6_tunnel_spi_lock); 240 } 241 242 EXPORT_SYMBOL(xfrm6_tunnel_free_spi); 243 244 static int xfrm6_tunnel_output(struct xfrm_state *x, struct sk_buff *skb) 245 { 246 skb_push(skb, -skb_network_offset(skb)); 247 return 0; 248 } 249 250 static int xfrm6_tunnel_input(struct xfrm_state *x, struct sk_buff *skb) 251 { 252 return skb_network_header(skb)[IP6CB(skb)->nhoff]; 253 } 254 255 static int xfrm6_tunnel_rcv(struct sk_buff *skb) 256 { 257 struct ipv6hdr *iph = ipv6_hdr(skb); 258 __be32 spi; 259 260 spi = xfrm6_tunnel_spi_lookup((xfrm_address_t *)&iph->saddr); 261 return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi) > 0 ? : 0; 262 } 263 264 static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 265 u8 type, u8 code, int offset, __be32 info) 266 { 267 /* xfrm6_tunnel native err handling */ 268 switch (type) { 269 case ICMPV6_DEST_UNREACH: 270 switch (code) { 271 case ICMPV6_NOROUTE: 272 case ICMPV6_ADM_PROHIBITED: 273 case ICMPV6_NOT_NEIGHBOUR: 274 case ICMPV6_ADDR_UNREACH: 275 case ICMPV6_PORT_UNREACH: 276 default: 277 break; 278 } 279 break; 280 case ICMPV6_PKT_TOOBIG: 281 break; 282 case ICMPV6_TIME_EXCEED: 283 switch (code) { 284 case ICMPV6_EXC_HOPLIMIT: 285 break; 286 case ICMPV6_EXC_FRAGTIME: 287 default: 288 break; 289 } 290 break; 291 case ICMPV6_PARAMPROB: 292 switch (code) { 293 case ICMPV6_HDR_FIELD: break; 294 case ICMPV6_UNK_NEXTHDR: break; 295 case ICMPV6_UNK_OPTION: break; 296 } 297 break; 298 default: 299 break; 300 } 301 302 return 0; 303 } 304 305 static int xfrm6_tunnel_init_state(struct xfrm_state *x) 306 { 307 if (x->props.mode != XFRM_MODE_TUNNEL) 308 return -EINVAL; 309 310 if (x->encap) 311 return -EINVAL; 312 313 x->props.header_len = sizeof(struct ipv6hdr); 314 315 return 0; 316 } 317 318 static void xfrm6_tunnel_destroy(struct xfrm_state *x) 319 { 320 xfrm6_tunnel_free_spi((xfrm_address_t *)&x->props.saddr); 321 } 322 323 static const struct xfrm_type xfrm6_tunnel_type = { 324 .description = "IP6IP6", 325 .owner = THIS_MODULE, 326 .proto = IPPROTO_IPV6, 327 .init_state = xfrm6_tunnel_init_state, 328 .destructor = xfrm6_tunnel_destroy, 329 .input = xfrm6_tunnel_input, 330 .output = xfrm6_tunnel_output, 331 }; 332 333 static struct xfrm6_tunnel xfrm6_tunnel_handler = { 334 .handler = xfrm6_tunnel_rcv, 335 .err_handler = xfrm6_tunnel_err, 336 .priority = 2, 337 }; 338 339 static struct xfrm6_tunnel xfrm46_tunnel_handler = { 340 .handler = xfrm6_tunnel_rcv, 341 .err_handler = xfrm6_tunnel_err, 342 .priority = 2, 343 }; 344 345 static int __init xfrm6_tunnel_init(void) 346 { 347 if (xfrm_register_type(&xfrm6_tunnel_type, AF_INET6) < 0) 348 goto err; 349 if (xfrm6_tunnel_register(&xfrm6_tunnel_handler, AF_INET6)) 350 goto unreg; 351 if (xfrm6_tunnel_register(&xfrm46_tunnel_handler, AF_INET)) 352 goto dereg6; 353 if (xfrm6_tunnel_spi_init() < 0) 354 goto dereg46; 355 return 0; 356 357 dereg46: 358 xfrm6_tunnel_deregister(&xfrm46_tunnel_handler, AF_INET); 359 dereg6: 360 xfrm6_tunnel_deregister(&xfrm6_tunnel_handler, AF_INET6); 361 unreg: 362 xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6); 363 err: 364 return -EAGAIN; 365 } 366 367 static void __exit xfrm6_tunnel_fini(void) 368 { 369 xfrm6_tunnel_spi_fini(); 370 xfrm6_tunnel_deregister(&xfrm46_tunnel_handler, AF_INET); 371 xfrm6_tunnel_deregister(&xfrm6_tunnel_handler, AF_INET6); 372 xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6); 373 } 374 375 module_init(xfrm6_tunnel_init); 376 module_exit(xfrm6_tunnel_fini); 377 MODULE_LICENSE("GPL"); 378 MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_IPV6); 379