1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #include <linux/kernel.h> 3 #include <linux/init.h> 4 #include <linux/module.h> 5 #include <linux/seqlock.h> 6 #include <linux/netlink.h> 7 #include <linux/netfilter.h> 8 #include <linux/netfilter/nf_tables.h> 9 #include <net/netfilter/nf_tables.h> 10 #include <net/dst_metadata.h> 11 #include <net/ip_tunnels.h> 12 #include <net/vxlan.h> 13 #include <net/erspan.h> 14 15 struct nft_tunnel { 16 enum nft_tunnel_keys key:8; 17 enum nft_registers dreg:8; 18 enum nft_tunnel_mode mode:8; 19 }; 20 21 static void nft_tunnel_get_eval(const struct nft_expr *expr, 22 struct nft_regs *regs, 23 const struct nft_pktinfo *pkt) 24 { 25 const struct nft_tunnel *priv = nft_expr_priv(expr); 26 u32 *dest = ®s->data[priv->dreg]; 27 struct ip_tunnel_info *tun_info; 28 29 tun_info = skb_tunnel_info(pkt->skb); 30 31 switch (priv->key) { 32 case NFT_TUNNEL_PATH: 33 if (!tun_info) { 34 nft_reg_store8(dest, false); 35 return; 36 } 37 if (priv->mode == NFT_TUNNEL_MODE_NONE || 38 (priv->mode == NFT_TUNNEL_MODE_RX && 39 !(tun_info->mode & IP_TUNNEL_INFO_TX)) || 40 (priv->mode == NFT_TUNNEL_MODE_TX && 41 (tun_info->mode & IP_TUNNEL_INFO_TX))) 42 nft_reg_store8(dest, true); 43 else 44 nft_reg_store8(dest, false); 45 break; 46 case NFT_TUNNEL_ID: 47 if (!tun_info) { 48 regs->verdict.code = NFT_BREAK; 49 return; 50 } 51 if (priv->mode == NFT_TUNNEL_MODE_NONE || 52 (priv->mode == NFT_TUNNEL_MODE_RX && 53 !(tun_info->mode & IP_TUNNEL_INFO_TX)) || 54 (priv->mode == NFT_TUNNEL_MODE_TX && 55 (tun_info->mode & IP_TUNNEL_INFO_TX))) 56 *dest = ntohl(tunnel_id_to_key32(tun_info->key.tun_id)); 57 else 58 regs->verdict.code = NFT_BREAK; 59 break; 60 default: 61 WARN_ON(1); 62 regs->verdict.code = NFT_BREAK; 63 } 64 } 65 66 static const struct nla_policy nft_tunnel_policy[NFTA_TUNNEL_MAX + 1] = { 67 [NFTA_TUNNEL_KEY] = { .type = NLA_U32 }, 68 [NFTA_TUNNEL_DREG] = { .type = NLA_U32 }, 69 [NFTA_TUNNEL_MODE] = { .type = NLA_U32 }, 70 }; 71 72 static int nft_tunnel_get_init(const struct nft_ctx *ctx, 73 const struct nft_expr *expr, 74 const struct nlattr * const tb[]) 75 { 76 struct nft_tunnel *priv = nft_expr_priv(expr); 77 u32 len; 78 79 if (!tb[NFTA_TUNNEL_KEY] || 80 !tb[NFTA_TUNNEL_DREG]) 81 return -EINVAL; 82 83 priv->key = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY])); 84 switch (priv->key) { 85 case NFT_TUNNEL_PATH: 86 len = sizeof(u8); 87 break; 88 case NFT_TUNNEL_ID: 89 len = sizeof(u32); 90 break; 91 default: 92 return -EOPNOTSUPP; 93 } 94 95 priv->dreg = nft_parse_register(tb[NFTA_TUNNEL_DREG]); 96 97 if (tb[NFTA_TUNNEL_MODE]) { 98 priv->mode = ntohl(nla_get_be32(tb[NFTA_TUNNEL_MODE])); 99 if (priv->mode > NFT_TUNNEL_MODE_MAX) 100 return -EOPNOTSUPP; 101 } else { 102 priv->mode = NFT_TUNNEL_MODE_NONE; 103 } 104 105 return nft_validate_register_store(ctx, priv->dreg, NULL, 106 NFT_DATA_VALUE, len); 107 } 108 109 static int nft_tunnel_get_dump(struct sk_buff *skb, 110 const struct nft_expr *expr) 111 { 112 const struct nft_tunnel *priv = nft_expr_priv(expr); 113 114 if (nla_put_be32(skb, NFTA_TUNNEL_KEY, htonl(priv->key))) 115 goto nla_put_failure; 116 if (nft_dump_register(skb, NFTA_TUNNEL_DREG, priv->dreg)) 117 goto nla_put_failure; 118 if (nla_put_be32(skb, NFTA_TUNNEL_MODE, htonl(priv->mode))) 119 goto nla_put_failure; 120 return 0; 121 122 nla_put_failure: 123 return -1; 124 } 125 126 static struct nft_expr_type nft_tunnel_type; 127 static const struct nft_expr_ops nft_tunnel_get_ops = { 128 .type = &nft_tunnel_type, 129 .size = NFT_EXPR_SIZE(sizeof(struct nft_tunnel)), 130 .eval = nft_tunnel_get_eval, 131 .init = nft_tunnel_get_init, 132 .dump = nft_tunnel_get_dump, 133 }; 134 135 static struct nft_expr_type nft_tunnel_type __read_mostly = { 136 .name = "tunnel", 137 .ops = &nft_tunnel_get_ops, 138 .policy = nft_tunnel_policy, 139 .maxattr = NFTA_TUNNEL_MAX, 140 .owner = THIS_MODULE, 141 }; 142 143 struct nft_tunnel_opts { 144 union { 145 struct vxlan_metadata vxlan; 146 struct erspan_metadata erspan; 147 } u; 148 u32 len; 149 __be16 flags; 150 }; 151 152 struct nft_tunnel_obj { 153 struct metadata_dst *md; 154 struct nft_tunnel_opts opts; 155 }; 156 157 static const struct nla_policy nft_tunnel_ip_policy[NFTA_TUNNEL_KEY_IP_MAX + 1] = { 158 [NFTA_TUNNEL_KEY_IP_SRC] = { .type = NLA_U32 }, 159 [NFTA_TUNNEL_KEY_IP_DST] = { .type = NLA_U32 }, 160 }; 161 162 static int nft_tunnel_obj_ip_init(const struct nft_ctx *ctx, 163 const struct nlattr *attr, 164 struct ip_tunnel_info *info) 165 { 166 struct nlattr *tb[NFTA_TUNNEL_KEY_IP_MAX + 1]; 167 int err; 168 169 err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_IP_MAX, attr, 170 nft_tunnel_ip_policy, NULL); 171 if (err < 0) 172 return err; 173 174 if (!tb[NFTA_TUNNEL_KEY_IP_DST]) 175 return -EINVAL; 176 177 if (tb[NFTA_TUNNEL_KEY_IP_SRC]) 178 info->key.u.ipv4.src = nla_get_be32(tb[NFTA_TUNNEL_KEY_IP_SRC]); 179 if (tb[NFTA_TUNNEL_KEY_IP_DST]) 180 info->key.u.ipv4.dst = nla_get_be32(tb[NFTA_TUNNEL_KEY_IP_DST]); 181 182 return 0; 183 } 184 185 static const struct nla_policy nft_tunnel_ip6_policy[NFTA_TUNNEL_KEY_IP6_MAX + 1] = { 186 [NFTA_TUNNEL_KEY_IP6_SRC] = { .len = sizeof(struct in6_addr), }, 187 [NFTA_TUNNEL_KEY_IP6_DST] = { .len = sizeof(struct in6_addr), }, 188 [NFTA_TUNNEL_KEY_IP6_FLOWLABEL] = { .type = NLA_U32, } 189 }; 190 191 static int nft_tunnel_obj_ip6_init(const struct nft_ctx *ctx, 192 const struct nlattr *attr, 193 struct ip_tunnel_info *info) 194 { 195 struct nlattr *tb[NFTA_TUNNEL_KEY_IP6_MAX + 1]; 196 int err; 197 198 err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_IP6_MAX, attr, 199 nft_tunnel_ip6_policy, NULL); 200 if (err < 0) 201 return err; 202 203 if (!tb[NFTA_TUNNEL_KEY_IP6_DST]) 204 return -EINVAL; 205 206 if (tb[NFTA_TUNNEL_KEY_IP6_SRC]) { 207 memcpy(&info->key.u.ipv6.src, 208 nla_data(tb[NFTA_TUNNEL_KEY_IP6_SRC]), 209 sizeof(struct in6_addr)); 210 } 211 if (tb[NFTA_TUNNEL_KEY_IP6_DST]) { 212 memcpy(&info->key.u.ipv6.dst, 213 nla_data(tb[NFTA_TUNNEL_KEY_IP6_DST]), 214 sizeof(struct in6_addr)); 215 } 216 if (tb[NFTA_TUNNEL_KEY_IP6_FLOWLABEL]) 217 info->key.label = nla_get_be32(tb[NFTA_TUNNEL_KEY_IP6_FLOWLABEL]); 218 219 info->mode |= IP_TUNNEL_INFO_IPV6; 220 221 return 0; 222 } 223 224 static const struct nla_policy nft_tunnel_opts_vxlan_policy[NFTA_TUNNEL_KEY_VXLAN_MAX + 1] = { 225 [NFTA_TUNNEL_KEY_VXLAN_GBP] = { .type = NLA_U32 }, 226 }; 227 228 static int nft_tunnel_obj_vxlan_init(const struct nlattr *attr, 229 struct nft_tunnel_opts *opts) 230 { 231 struct nlattr *tb[NFTA_TUNNEL_KEY_VXLAN_MAX + 1]; 232 int err; 233 234 err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_VXLAN_MAX, attr, 235 nft_tunnel_opts_vxlan_policy, NULL); 236 if (err < 0) 237 return err; 238 239 if (!tb[NFTA_TUNNEL_KEY_VXLAN_GBP]) 240 return -EINVAL; 241 242 opts->u.vxlan.gbp = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_VXLAN_GBP])); 243 244 opts->len = sizeof(struct vxlan_metadata); 245 opts->flags = TUNNEL_VXLAN_OPT; 246 247 return 0; 248 } 249 250 static const struct nla_policy nft_tunnel_opts_erspan_policy[NFTA_TUNNEL_KEY_ERSPAN_MAX + 1] = { 251 [NFTA_TUNNEL_KEY_ERSPAN_VERSION] = { .type = NLA_U32 }, 252 [NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX] = { .type = NLA_U32 }, 253 [NFTA_TUNNEL_KEY_ERSPAN_V2_DIR] = { .type = NLA_U8 }, 254 [NFTA_TUNNEL_KEY_ERSPAN_V2_HWID] = { .type = NLA_U8 }, 255 }; 256 257 static int nft_tunnel_obj_erspan_init(const struct nlattr *attr, 258 struct nft_tunnel_opts *opts) 259 { 260 struct nlattr *tb[NFTA_TUNNEL_KEY_ERSPAN_MAX + 1]; 261 uint8_t hwid, dir; 262 int err, version; 263 264 err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_ERSPAN_MAX, 265 attr, nft_tunnel_opts_erspan_policy, 266 NULL); 267 if (err < 0) 268 return err; 269 270 if (!tb[NFTA_TUNNEL_KEY_ERSPAN_VERSION]) 271 return -EINVAL; 272 273 version = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_ERSPAN_VERSION])); 274 switch (version) { 275 case ERSPAN_VERSION: 276 if (!tb[NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX]) 277 return -EINVAL; 278 279 opts->u.erspan.u.index = 280 nla_get_be32(tb[NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX]); 281 break; 282 case ERSPAN_VERSION2: 283 if (!tb[NFTA_TUNNEL_KEY_ERSPAN_V2_DIR] || 284 !tb[NFTA_TUNNEL_KEY_ERSPAN_V2_HWID]) 285 return -EINVAL; 286 287 hwid = nla_get_u8(tb[NFTA_TUNNEL_KEY_ERSPAN_V2_HWID]); 288 dir = nla_get_u8(tb[NFTA_TUNNEL_KEY_ERSPAN_V2_DIR]); 289 290 set_hwid(&opts->u.erspan.u.md2, hwid); 291 opts->u.erspan.u.md2.dir = dir; 292 break; 293 default: 294 return -EOPNOTSUPP; 295 } 296 opts->u.erspan.version = version; 297 298 opts->len = sizeof(struct erspan_metadata); 299 opts->flags = TUNNEL_ERSPAN_OPT; 300 301 return 0; 302 } 303 304 static const struct nla_policy nft_tunnel_opts_policy[NFTA_TUNNEL_KEY_OPTS_MAX + 1] = { 305 [NFTA_TUNNEL_KEY_OPTS_VXLAN] = { .type = NLA_NESTED, }, 306 [NFTA_TUNNEL_KEY_OPTS_ERSPAN] = { .type = NLA_NESTED, }, 307 }; 308 309 static int nft_tunnel_obj_opts_init(const struct nft_ctx *ctx, 310 const struct nlattr *attr, 311 struct ip_tunnel_info *info, 312 struct nft_tunnel_opts *opts) 313 { 314 struct nlattr *tb[NFTA_TUNNEL_KEY_OPTS_MAX + 1]; 315 int err; 316 317 err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_OPTS_MAX, attr, 318 nft_tunnel_opts_policy, NULL); 319 if (err < 0) 320 return err; 321 322 if (tb[NFTA_TUNNEL_KEY_OPTS_VXLAN]) { 323 err = nft_tunnel_obj_vxlan_init(tb[NFTA_TUNNEL_KEY_OPTS_VXLAN], 324 opts); 325 } else if (tb[NFTA_TUNNEL_KEY_OPTS_ERSPAN]) { 326 err = nft_tunnel_obj_erspan_init(tb[NFTA_TUNNEL_KEY_OPTS_ERSPAN], 327 opts); 328 } else { 329 return -EOPNOTSUPP; 330 } 331 332 return err; 333 } 334 335 static const struct nla_policy nft_tunnel_key_policy[NFTA_TUNNEL_KEY_MAX + 1] = { 336 [NFTA_TUNNEL_KEY_IP] = { .type = NLA_NESTED, }, 337 [NFTA_TUNNEL_KEY_IP6] = { .type = NLA_NESTED, }, 338 [NFTA_TUNNEL_KEY_ID] = { .type = NLA_U32, }, 339 [NFTA_TUNNEL_KEY_FLAGS] = { .type = NLA_U32, }, 340 [NFTA_TUNNEL_KEY_TOS] = { .type = NLA_U8, }, 341 [NFTA_TUNNEL_KEY_TTL] = { .type = NLA_U8, }, 342 [NFTA_TUNNEL_KEY_OPTS] = { .type = NLA_NESTED, }, 343 }; 344 345 static int nft_tunnel_obj_init(const struct nft_ctx *ctx, 346 const struct nlattr * const tb[], 347 struct nft_object *obj) 348 { 349 struct nft_tunnel_obj *priv = nft_obj_data(obj); 350 struct ip_tunnel_info info; 351 struct metadata_dst *md; 352 int err; 353 354 if (!tb[NFTA_TUNNEL_KEY_ID]) 355 return -EINVAL; 356 357 memset(&info, 0, sizeof(info)); 358 info.mode = IP_TUNNEL_INFO_TX; 359 info.key.tun_id = key32_to_tunnel_id(nla_get_be32(tb[NFTA_TUNNEL_KEY_ID])); 360 info.key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_NOCACHE; 361 362 if (tb[NFTA_TUNNEL_KEY_IP]) { 363 err = nft_tunnel_obj_ip_init(ctx, tb[NFTA_TUNNEL_KEY_IP], &info); 364 if (err < 0) 365 return err; 366 } else if (tb[NFTA_TUNNEL_KEY_IP6]) { 367 err = nft_tunnel_obj_ip6_init(ctx, tb[NFTA_TUNNEL_KEY_IP6], &info); 368 if (err < 0) 369 return err; 370 } else { 371 return -EINVAL; 372 } 373 374 if (tb[NFTA_TUNNEL_KEY_SPORT]) { 375 info.key.tp_src = nla_get_be16(tb[NFTA_TUNNEL_KEY_SPORT]); 376 } 377 if (tb[NFTA_TUNNEL_KEY_DPORT]) { 378 info.key.tp_dst = nla_get_be16(tb[NFTA_TUNNEL_KEY_DPORT]); 379 } 380 381 if (tb[NFTA_TUNNEL_KEY_FLAGS]) { 382 u32 tun_flags; 383 384 tun_flags = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_FLAGS])); 385 if (tun_flags & ~NFT_TUNNEL_F_MASK) 386 return -EOPNOTSUPP; 387 388 if (tun_flags & NFT_TUNNEL_F_ZERO_CSUM_TX) 389 info.key.tun_flags &= ~TUNNEL_CSUM; 390 if (tun_flags & NFT_TUNNEL_F_DONT_FRAGMENT) 391 info.key.tun_flags |= TUNNEL_DONT_FRAGMENT; 392 if (tun_flags & NFT_TUNNEL_F_SEQ_NUMBER) 393 info.key.tun_flags |= TUNNEL_SEQ; 394 } 395 if (tb[NFTA_TUNNEL_KEY_TOS]) 396 info.key.tos = nla_get_u8(tb[NFTA_TUNNEL_KEY_TOS]); 397 if (tb[NFTA_TUNNEL_KEY_TTL]) 398 info.key.ttl = nla_get_u8(tb[NFTA_TUNNEL_KEY_TTL]); 399 else 400 info.key.ttl = U8_MAX; 401 402 if (tb[NFTA_TUNNEL_KEY_OPTS]) { 403 err = nft_tunnel_obj_opts_init(ctx, tb[NFTA_TUNNEL_KEY_OPTS], 404 &info, &priv->opts); 405 if (err < 0) 406 return err; 407 } 408 409 md = metadata_dst_alloc(priv->opts.len, METADATA_IP_TUNNEL, GFP_KERNEL); 410 if (!md) 411 return -ENOMEM; 412 413 memcpy(&md->u.tun_info, &info, sizeof(info)); 414 #ifdef CONFIG_DST_CACHE 415 err = dst_cache_init(&md->u.tun_info.dst_cache, GFP_KERNEL); 416 if (err < 0) { 417 metadata_dst_free(md); 418 return err; 419 } 420 #endif 421 ip_tunnel_info_opts_set(&md->u.tun_info, &priv->opts.u, priv->opts.len, 422 priv->opts.flags); 423 priv->md = md; 424 425 return 0; 426 } 427 428 static inline void nft_tunnel_obj_eval(struct nft_object *obj, 429 struct nft_regs *regs, 430 const struct nft_pktinfo *pkt) 431 { 432 struct nft_tunnel_obj *priv = nft_obj_data(obj); 433 struct sk_buff *skb = pkt->skb; 434 435 skb_dst_drop(skb); 436 dst_hold((struct dst_entry *) priv->md); 437 skb_dst_set(skb, (struct dst_entry *) priv->md); 438 } 439 440 static int nft_tunnel_ip_dump(struct sk_buff *skb, struct ip_tunnel_info *info) 441 { 442 struct nlattr *nest; 443 444 if (info->mode & IP_TUNNEL_INFO_IPV6) { 445 nest = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_IP6); 446 if (!nest) 447 return -1; 448 449 if (nla_put_in6_addr(skb, NFTA_TUNNEL_KEY_IP6_SRC, 450 &info->key.u.ipv6.src) < 0 || 451 nla_put_in6_addr(skb, NFTA_TUNNEL_KEY_IP6_DST, 452 &info->key.u.ipv6.dst) < 0 || 453 nla_put_be32(skb, NFTA_TUNNEL_KEY_IP6_FLOWLABEL, 454 info->key.label)) { 455 nla_nest_cancel(skb, nest); 456 return -1; 457 } 458 459 nla_nest_end(skb, nest); 460 } else { 461 nest = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_IP); 462 if (!nest) 463 return -1; 464 465 if (nla_put_in_addr(skb, NFTA_TUNNEL_KEY_IP_SRC, 466 info->key.u.ipv4.src) < 0 || 467 nla_put_in_addr(skb, NFTA_TUNNEL_KEY_IP_DST, 468 info->key.u.ipv4.dst) < 0) { 469 nla_nest_cancel(skb, nest); 470 return -1; 471 } 472 473 nla_nest_end(skb, nest); 474 } 475 476 return 0; 477 } 478 479 static int nft_tunnel_opts_dump(struct sk_buff *skb, 480 struct nft_tunnel_obj *priv) 481 { 482 struct nft_tunnel_opts *opts = &priv->opts; 483 struct nlattr *nest, *inner; 484 485 nest = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS); 486 if (!nest) 487 return -1; 488 489 if (opts->flags & TUNNEL_VXLAN_OPT) { 490 inner = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS_VXLAN); 491 if (!inner) 492 goto failure; 493 if (nla_put_be32(skb, NFTA_TUNNEL_KEY_VXLAN_GBP, 494 htonl(opts->u.vxlan.gbp))) 495 goto inner_failure; 496 nla_nest_end(skb, inner); 497 } else if (opts->flags & TUNNEL_ERSPAN_OPT) { 498 inner = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS_ERSPAN); 499 if (!inner) 500 goto failure; 501 if (nla_put_be32(skb, NFTA_TUNNEL_KEY_ERSPAN_VERSION, 502 htonl(opts->u.erspan.version))) 503 goto inner_failure; 504 switch (opts->u.erspan.version) { 505 case ERSPAN_VERSION: 506 if (nla_put_be32(skb, NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX, 507 opts->u.erspan.u.index)) 508 goto inner_failure; 509 break; 510 case ERSPAN_VERSION2: 511 if (nla_put_u8(skb, NFTA_TUNNEL_KEY_ERSPAN_V2_HWID, 512 get_hwid(&opts->u.erspan.u.md2)) || 513 nla_put_u8(skb, NFTA_TUNNEL_KEY_ERSPAN_V2_DIR, 514 opts->u.erspan.u.md2.dir)) 515 goto inner_failure; 516 break; 517 } 518 nla_nest_end(skb, inner); 519 } 520 nla_nest_end(skb, nest); 521 return 0; 522 523 inner_failure: 524 nla_nest_cancel(skb, inner); 525 failure: 526 nla_nest_cancel(skb, nest); 527 return -1; 528 } 529 530 static int nft_tunnel_ports_dump(struct sk_buff *skb, 531 struct ip_tunnel_info *info) 532 { 533 if (nla_put_be16(skb, NFTA_TUNNEL_KEY_SPORT, info->key.tp_src) < 0 || 534 nla_put_be16(skb, NFTA_TUNNEL_KEY_DPORT, info->key.tp_dst) < 0) 535 return -1; 536 537 return 0; 538 } 539 540 static int nft_tunnel_flags_dump(struct sk_buff *skb, 541 struct ip_tunnel_info *info) 542 { 543 u32 flags = 0; 544 545 if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT) 546 flags |= NFT_TUNNEL_F_DONT_FRAGMENT; 547 if (!(info->key.tun_flags & TUNNEL_CSUM)) 548 flags |= NFT_TUNNEL_F_ZERO_CSUM_TX; 549 if (info->key.tun_flags & TUNNEL_SEQ) 550 flags |= NFT_TUNNEL_F_SEQ_NUMBER; 551 552 if (nla_put_be32(skb, NFTA_TUNNEL_KEY_FLAGS, htonl(flags)) < 0) 553 return -1; 554 555 return 0; 556 } 557 558 static int nft_tunnel_obj_dump(struct sk_buff *skb, 559 struct nft_object *obj, bool reset) 560 { 561 struct nft_tunnel_obj *priv = nft_obj_data(obj); 562 struct ip_tunnel_info *info = &priv->md->u.tun_info; 563 564 if (nla_put_be32(skb, NFTA_TUNNEL_KEY_ID, 565 tunnel_id_to_key32(info->key.tun_id)) || 566 nft_tunnel_ip_dump(skb, info) < 0 || 567 nft_tunnel_ports_dump(skb, info) < 0 || 568 nft_tunnel_flags_dump(skb, info) < 0 || 569 nla_put_u8(skb, NFTA_TUNNEL_KEY_TOS, info->key.tos) || 570 nla_put_u8(skb, NFTA_TUNNEL_KEY_TTL, info->key.ttl) || 571 nft_tunnel_opts_dump(skb, priv) < 0) 572 goto nla_put_failure; 573 574 return 0; 575 576 nla_put_failure: 577 return -1; 578 } 579 580 static void nft_tunnel_obj_destroy(const struct nft_ctx *ctx, 581 struct nft_object *obj) 582 { 583 struct nft_tunnel_obj *priv = nft_obj_data(obj); 584 585 metadata_dst_free(priv->md); 586 } 587 588 static struct nft_object_type nft_tunnel_obj_type; 589 static const struct nft_object_ops nft_tunnel_obj_ops = { 590 .type = &nft_tunnel_obj_type, 591 .size = sizeof(struct nft_tunnel_obj), 592 .eval = nft_tunnel_obj_eval, 593 .init = nft_tunnel_obj_init, 594 .destroy = nft_tunnel_obj_destroy, 595 .dump = nft_tunnel_obj_dump, 596 }; 597 598 static struct nft_object_type nft_tunnel_obj_type __read_mostly = { 599 .type = NFT_OBJECT_TUNNEL, 600 .ops = &nft_tunnel_obj_ops, 601 .maxattr = NFTA_TUNNEL_KEY_MAX, 602 .policy = nft_tunnel_key_policy, 603 .owner = THIS_MODULE, 604 }; 605 606 static int __init nft_tunnel_module_init(void) 607 { 608 int err; 609 610 err = nft_register_expr(&nft_tunnel_type); 611 if (err < 0) 612 return err; 613 614 err = nft_register_obj(&nft_tunnel_obj_type); 615 if (err < 0) 616 nft_unregister_expr(&nft_tunnel_type); 617 618 return err; 619 } 620 621 static void __exit nft_tunnel_module_exit(void) 622 { 623 nft_unregister_obj(&nft_tunnel_obj_type); 624 nft_unregister_expr(&nft_tunnel_type); 625 } 626 627 module_init(nft_tunnel_module_init); 628 module_exit(nft_tunnel_module_exit); 629 630 MODULE_LICENSE("GPL"); 631 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>"); 632 MODULE_ALIAS_NFT_EXPR("tunnel"); 633 MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_TUNNEL); 634