xref: /openbmc/linux/net/netfilter/nft_tunnel.c (revision 09bae3b6)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/kernel.h>
3 #include <linux/init.h>
4 #include <linux/module.h>
5 #include <linux/seqlock.h>
6 #include <linux/netlink.h>
7 #include <linux/netfilter.h>
8 #include <linux/netfilter/nf_tables.h>
9 #include <net/netfilter/nf_tables.h>
10 #include <net/dst_metadata.h>
11 #include <net/ip_tunnels.h>
12 #include <net/vxlan.h>
13 #include <net/erspan.h>
14 
15 struct nft_tunnel {
16 	enum nft_tunnel_keys	key:8;
17 	enum nft_registers	dreg:8;
18 };
19 
20 static void nft_tunnel_get_eval(const struct nft_expr *expr,
21 				struct nft_regs *regs,
22 				const struct nft_pktinfo *pkt)
23 {
24 	const struct nft_tunnel *priv = nft_expr_priv(expr);
25 	u32 *dest = &regs->data[priv->dreg];
26 	struct ip_tunnel_info *tun_info;
27 
28 	tun_info = skb_tunnel_info(pkt->skb);
29 
30 	switch (priv->key) {
31 	case NFT_TUNNEL_PATH:
32 		nft_reg_store8(dest, !!tun_info);
33 		break;
34 	case NFT_TUNNEL_ID:
35 		if (!tun_info) {
36 			regs->verdict.code = NFT_BREAK;
37 			return;
38 		}
39 		*dest = ntohl(tunnel_id_to_key32(tun_info->key.tun_id));
40 		break;
41 	default:
42 		WARN_ON(1);
43 		regs->verdict.code = NFT_BREAK;
44 	}
45 }
46 
47 static const struct nla_policy nft_tunnel_policy[NFTA_TUNNEL_MAX + 1] = {
48 	[NFTA_TUNNEL_KEY]	= { .type = NLA_U32 },
49 	[NFTA_TUNNEL_DREG]	= { .type = NLA_U32 },
50 };
51 
52 static int nft_tunnel_get_init(const struct nft_ctx *ctx,
53 			       const struct nft_expr *expr,
54 			       const struct nlattr * const tb[])
55 {
56 	struct nft_tunnel *priv = nft_expr_priv(expr);
57 	u32 len;
58 
59 	if (!tb[NFTA_TUNNEL_KEY] &&
60 	    !tb[NFTA_TUNNEL_DREG])
61 		return -EINVAL;
62 
63 	priv->key = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY]));
64 	switch (priv->key) {
65 	case NFT_TUNNEL_PATH:
66 		len = sizeof(u8);
67 		break;
68 	case NFT_TUNNEL_ID:
69 		len = sizeof(u32);
70 		break;
71 	default:
72 		return -EOPNOTSUPP;
73 	}
74 
75 	priv->dreg = nft_parse_register(tb[NFTA_TUNNEL_DREG]);
76 
77 	return nft_validate_register_store(ctx, priv->dreg, NULL,
78 					   NFT_DATA_VALUE, len);
79 }
80 
81 static int nft_tunnel_get_dump(struct sk_buff *skb,
82 			       const struct nft_expr *expr)
83 {
84 	const struct nft_tunnel *priv = nft_expr_priv(expr);
85 
86 	if (nla_put_be32(skb, NFTA_TUNNEL_KEY, htonl(priv->key)))
87 		goto nla_put_failure;
88 	if (nft_dump_register(skb, NFTA_TUNNEL_DREG, priv->dreg))
89 		goto nla_put_failure;
90 	return 0;
91 
92 nla_put_failure:
93 	return -1;
94 }
95 
96 static struct nft_expr_type nft_tunnel_type;
97 static const struct nft_expr_ops nft_tunnel_get_ops = {
98 	.type		= &nft_tunnel_type,
99 	.size		= NFT_EXPR_SIZE(sizeof(struct nft_tunnel)),
100 	.eval		= nft_tunnel_get_eval,
101 	.init		= nft_tunnel_get_init,
102 	.dump		= nft_tunnel_get_dump,
103 };
104 
105 static struct nft_expr_type nft_tunnel_type __read_mostly = {
106 	.name		= "tunnel",
107 	.ops		= &nft_tunnel_get_ops,
108 	.policy		= nft_tunnel_policy,
109 	.maxattr	= NFTA_TUNNEL_MAX,
110 	.owner		= THIS_MODULE,
111 };
112 
113 struct nft_tunnel_opts {
114 	union {
115 		struct vxlan_metadata	vxlan;
116 		struct erspan_metadata	erspan;
117 	} u;
118 	u32	len;
119 	__be16	flags;
120 };
121 
122 struct nft_tunnel_obj {
123 	struct metadata_dst	*md;
124 	struct nft_tunnel_opts	opts;
125 };
126 
127 static const struct nla_policy nft_tunnel_ip_policy[NFTA_TUNNEL_KEY_IP_MAX + 1] = {
128 	[NFTA_TUNNEL_KEY_IP_SRC]	= { .type = NLA_U32 },
129 	[NFTA_TUNNEL_KEY_IP_DST]	= { .type = NLA_U32 },
130 };
131 
132 static int nft_tunnel_obj_ip_init(const struct nft_ctx *ctx,
133 				  const struct nlattr *attr,
134 				  struct ip_tunnel_info *info)
135 {
136 	struct nlattr *tb[NFTA_TUNNEL_KEY_IP_MAX + 1];
137 	int err;
138 
139 	err = nla_parse_nested(tb, NFTA_TUNNEL_KEY_IP_MAX, attr,
140 			       nft_tunnel_ip_policy, NULL);
141 	if (err < 0)
142 		return err;
143 
144 	if (!tb[NFTA_TUNNEL_KEY_IP_DST])
145 		return -EINVAL;
146 
147 	if (tb[NFTA_TUNNEL_KEY_IP_SRC])
148 		info->key.u.ipv4.src = nla_get_be32(tb[NFTA_TUNNEL_KEY_IP_SRC]);
149 	if (tb[NFTA_TUNNEL_KEY_IP_DST])
150 		info->key.u.ipv4.dst = nla_get_be32(tb[NFTA_TUNNEL_KEY_IP_DST]);
151 
152 	return 0;
153 }
154 
155 static const struct nla_policy nft_tunnel_ip6_policy[NFTA_TUNNEL_KEY_IP6_MAX + 1] = {
156 	[NFTA_TUNNEL_KEY_IP6_SRC]	= { .len = sizeof(struct in6_addr), },
157 	[NFTA_TUNNEL_KEY_IP6_DST]	= { .len = sizeof(struct in6_addr), },
158 	[NFTA_TUNNEL_KEY_IP6_FLOWLABEL]	= { .type = NLA_U32, }
159 };
160 
161 static int nft_tunnel_obj_ip6_init(const struct nft_ctx *ctx,
162 				   const struct nlattr *attr,
163 				   struct ip_tunnel_info *info)
164 {
165 	struct nlattr *tb[NFTA_TUNNEL_KEY_IP6_MAX + 1];
166 	int err;
167 
168 	err = nla_parse_nested(tb, NFTA_TUNNEL_KEY_IP6_MAX, attr,
169 			       nft_tunnel_ip6_policy, NULL);
170 	if (err < 0)
171 		return err;
172 
173 	if (!tb[NFTA_TUNNEL_KEY_IP6_DST])
174 		return -EINVAL;
175 
176 	if (tb[NFTA_TUNNEL_KEY_IP6_SRC]) {
177 		memcpy(&info->key.u.ipv6.src,
178 		       nla_data(tb[NFTA_TUNNEL_KEY_IP6_SRC]),
179 		       sizeof(struct in6_addr));
180 	}
181 	if (tb[NFTA_TUNNEL_KEY_IP6_DST]) {
182 		memcpy(&info->key.u.ipv6.dst,
183 		       nla_data(tb[NFTA_TUNNEL_KEY_IP6_DST]),
184 		       sizeof(struct in6_addr));
185 	}
186 	if (tb[NFTA_TUNNEL_KEY_IP6_FLOWLABEL])
187 		info->key.label = nla_get_be32(tb[NFTA_TUNNEL_KEY_IP6_FLOWLABEL]);
188 
189 	info->mode |= IP_TUNNEL_INFO_IPV6;
190 
191 	return 0;
192 }
193 
194 static const struct nla_policy nft_tunnel_opts_vxlan_policy[NFTA_TUNNEL_KEY_VXLAN_MAX + 1] = {
195 	[NFTA_TUNNEL_KEY_VXLAN_GBP]	= { .type = NLA_U32 },
196 };
197 
198 static int nft_tunnel_obj_vxlan_init(const struct nlattr *attr,
199 				     struct nft_tunnel_opts *opts)
200 {
201 	struct nlattr *tb[NFTA_TUNNEL_KEY_VXLAN_MAX + 1];
202 	int err;
203 
204 	err = nla_parse_nested(tb, NFTA_TUNNEL_KEY_VXLAN_MAX, attr,
205 			       nft_tunnel_opts_vxlan_policy, NULL);
206 	if (err < 0)
207 		return err;
208 
209 	if (!tb[NFTA_TUNNEL_KEY_VXLAN_GBP])
210 		return -EINVAL;
211 
212 	opts->u.vxlan.gbp = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_VXLAN_GBP]));
213 
214 	opts->len	= sizeof(struct vxlan_metadata);
215 	opts->flags	= TUNNEL_VXLAN_OPT;
216 
217 	return 0;
218 }
219 
220 static const struct nla_policy nft_tunnel_opts_erspan_policy[NFTA_TUNNEL_KEY_ERSPAN_MAX + 1] = {
221 	[NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX]	= { .type = NLA_U32 },
222 	[NFTA_TUNNEL_KEY_ERSPAN_V2_DIR]	= { .type = NLA_U8 },
223 	[NFTA_TUNNEL_KEY_ERSPAN_V2_HWID]	= { .type = NLA_U8 },
224 };
225 
226 static int nft_tunnel_obj_erspan_init(const struct nlattr *attr,
227 				      struct nft_tunnel_opts *opts)
228 {
229 	struct nlattr *tb[NFTA_TUNNEL_KEY_ERSPAN_MAX + 1];
230 	uint8_t hwid, dir;
231 	int err, version;
232 
233 	err = nla_parse_nested(tb, NFTA_TUNNEL_KEY_ERSPAN_MAX, attr,
234 			       nft_tunnel_opts_erspan_policy, NULL);
235 	if (err < 0)
236 		return err;
237 
238 	version = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_ERSPAN_VERSION]));
239 	switch (version) {
240 	case ERSPAN_VERSION:
241 		if (!tb[NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX])
242 			return -EINVAL;
243 
244 		opts->u.erspan.u.index =
245 			nla_get_be32(tb[NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX]);
246 		break;
247 	case ERSPAN_VERSION2:
248 		if (!tb[NFTA_TUNNEL_KEY_ERSPAN_V2_DIR] ||
249 		    !tb[NFTA_TUNNEL_KEY_ERSPAN_V2_HWID])
250 			return -EINVAL;
251 
252 		hwid = nla_get_u8(tb[NFTA_TUNNEL_KEY_ERSPAN_V2_HWID]);
253 		dir = nla_get_u8(tb[NFTA_TUNNEL_KEY_ERSPAN_V2_DIR]);
254 
255 		set_hwid(&opts->u.erspan.u.md2, hwid);
256 		opts->u.erspan.u.md2.dir = dir;
257 		break;
258 	default:
259 		return -EOPNOTSUPP;
260 	}
261 	opts->u.erspan.version = version;
262 
263 	opts->len	= sizeof(struct erspan_metadata);
264 	opts->flags	= TUNNEL_ERSPAN_OPT;
265 
266 	return 0;
267 }
268 
269 static const struct nla_policy nft_tunnel_opts_policy[NFTA_TUNNEL_KEY_OPTS_MAX + 1] = {
270 	[NFTA_TUNNEL_KEY_OPTS_VXLAN]	= { .type = NLA_NESTED, },
271 	[NFTA_TUNNEL_KEY_OPTS_ERSPAN]	= { .type = NLA_NESTED, },
272 };
273 
274 static int nft_tunnel_obj_opts_init(const struct nft_ctx *ctx,
275 				    const struct nlattr *attr,
276 				    struct ip_tunnel_info *info,
277 				    struct nft_tunnel_opts *opts)
278 {
279 	struct nlattr *tb[NFTA_TUNNEL_KEY_OPTS_MAX + 1];
280 	int err;
281 
282 	err = nla_parse_nested(tb, NFTA_TUNNEL_KEY_OPTS_MAX, attr,
283 			       nft_tunnel_opts_policy, NULL);
284 	if (err < 0)
285 		return err;
286 
287 	if (tb[NFTA_TUNNEL_KEY_OPTS_VXLAN]) {
288 		err = nft_tunnel_obj_vxlan_init(tb[NFTA_TUNNEL_KEY_OPTS_VXLAN],
289 						opts);
290 	} else if (tb[NFTA_TUNNEL_KEY_OPTS_ERSPAN]) {
291 		err = nft_tunnel_obj_erspan_init(tb[NFTA_TUNNEL_KEY_OPTS_ERSPAN],
292 						 opts);
293 	} else {
294 		return -EOPNOTSUPP;
295 	}
296 
297 	return err;
298 }
299 
300 static const struct nla_policy nft_tunnel_key_policy[NFTA_TUNNEL_KEY_MAX + 1] = {
301 	[NFTA_TUNNEL_KEY_IP]	= { .type = NLA_NESTED, },
302 	[NFTA_TUNNEL_KEY_IP6]	= { .type = NLA_NESTED, },
303 	[NFTA_TUNNEL_KEY_ID]	= { .type = NLA_U32, },
304 	[NFTA_TUNNEL_KEY_FLAGS]	= { .type = NLA_U32, },
305 	[NFTA_TUNNEL_KEY_TOS]	= { .type = NLA_U8, },
306 	[NFTA_TUNNEL_KEY_TTL]	= { .type = NLA_U8, },
307 	[NFTA_TUNNEL_KEY_OPTS]	= { .type = NLA_NESTED, },
308 };
309 
310 static int nft_tunnel_obj_init(const struct nft_ctx *ctx,
311 			       const struct nlattr * const tb[],
312 			       struct nft_object *obj)
313 {
314 	struct nft_tunnel_obj *priv = nft_obj_data(obj);
315 	struct ip_tunnel_info info;
316 	struct metadata_dst *md;
317 	int err;
318 
319 	if (!tb[NFTA_TUNNEL_KEY_ID])
320 		return -EINVAL;
321 
322 	memset(&info, 0, sizeof(info));
323 	info.mode		= IP_TUNNEL_INFO_TX;
324 	info.key.tun_id		= key32_to_tunnel_id(nla_get_be32(tb[NFTA_TUNNEL_KEY_ID]));
325 	info.key.tun_flags	= TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_NOCACHE;
326 
327 	if (tb[NFTA_TUNNEL_KEY_IP]) {
328 		err = nft_tunnel_obj_ip_init(ctx, tb[NFTA_TUNNEL_KEY_IP], &info);
329 		if (err < 0)
330 			return err;
331 	} else if (tb[NFTA_TUNNEL_KEY_IP6]) {
332 		err = nft_tunnel_obj_ip6_init(ctx, tb[NFTA_TUNNEL_KEY_IP6], &info);
333 		if (err < 0)
334 			return err;
335 	} else {
336 		return -EINVAL;
337 	}
338 
339 	if (tb[NFTA_TUNNEL_KEY_SPORT]) {
340 		info.key.tp_src = nla_get_be16(tb[NFTA_TUNNEL_KEY_SPORT]);
341 	}
342 	if (tb[NFTA_TUNNEL_KEY_DPORT]) {
343 		info.key.tp_dst = nla_get_be16(tb[NFTA_TUNNEL_KEY_DPORT]);
344 	}
345 
346 	if (tb[NFTA_TUNNEL_KEY_FLAGS]) {
347 		u32 tun_flags;
348 
349 		tun_flags = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_FLAGS]));
350 		if (tun_flags & ~NFT_TUNNEL_F_MASK)
351 			return -EOPNOTSUPP;
352 
353 		if (tun_flags & NFT_TUNNEL_F_ZERO_CSUM_TX)
354 			info.key.tun_flags &= ~TUNNEL_CSUM;
355 		if (tun_flags & NFT_TUNNEL_F_DONT_FRAGMENT)
356 			info.key.tun_flags |= TUNNEL_DONT_FRAGMENT;
357 		if (tun_flags & NFT_TUNNEL_F_SEQ_NUMBER)
358 			info.key.tun_flags |= TUNNEL_SEQ;
359 	}
360 	if (tb[NFTA_TUNNEL_KEY_TOS])
361 		info.key.tos = nla_get_u8(tb[NFTA_TUNNEL_KEY_TOS]);
362 	if (tb[NFTA_TUNNEL_KEY_TTL])
363 		info.key.ttl = nla_get_u8(tb[NFTA_TUNNEL_KEY_TTL]);
364 	else
365 		info.key.ttl = U8_MAX;
366 
367 	if (tb[NFTA_TUNNEL_KEY_OPTS]) {
368 		err = nft_tunnel_obj_opts_init(ctx, tb[NFTA_TUNNEL_KEY_OPTS],
369 					       &info, &priv->opts);
370 		if (err < 0)
371 			return err;
372 	}
373 
374 	md = metadata_dst_alloc(priv->opts.len, METADATA_IP_TUNNEL, GFP_KERNEL);
375 	if (!md)
376 		return -ENOMEM;
377 
378 	memcpy(&md->u.tun_info, &info, sizeof(info));
379 	ip_tunnel_info_opts_set(&md->u.tun_info, &priv->opts.u, priv->opts.len,
380 				priv->opts.flags);
381 	priv->md = md;
382 
383 	return 0;
384 }
385 
386 static inline void nft_tunnel_obj_eval(struct nft_object *obj,
387 				       struct nft_regs *regs,
388 				       const struct nft_pktinfo *pkt)
389 {
390 	struct nft_tunnel_obj *priv = nft_obj_data(obj);
391 	struct sk_buff *skb = pkt->skb;
392 
393 	skb_dst_drop(skb);
394 	dst_hold((struct dst_entry *) priv->md);
395 	skb_dst_set(skb, (struct dst_entry *) priv->md);
396 }
397 
398 static int nft_tunnel_ip_dump(struct sk_buff *skb, struct ip_tunnel_info *info)
399 {
400 	struct nlattr *nest;
401 
402 	if (info->mode & IP_TUNNEL_INFO_IPV6) {
403 		nest = nla_nest_start(skb, NFTA_TUNNEL_KEY_IP6);
404 		if (!nest)
405 			return -1;
406 
407 		if (nla_put_in6_addr(skb, NFTA_TUNNEL_KEY_IP6_SRC, &info->key.u.ipv6.src) < 0 ||
408 		    nla_put_in6_addr(skb, NFTA_TUNNEL_KEY_IP6_DST, &info->key.u.ipv6.dst) < 0 ||
409 		    nla_put_be32(skb, NFTA_TUNNEL_KEY_IP6_FLOWLABEL, info->key.label))
410 			return -1;
411 
412 		nla_nest_end(skb, nest);
413 	} else {
414 		nest = nla_nest_start(skb, NFTA_TUNNEL_KEY_IP);
415 		if (!nest)
416 			return -1;
417 
418 		if (nla_put_in_addr(skb, NFTA_TUNNEL_KEY_IP_SRC, info->key.u.ipv4.src) < 0 ||
419 		    nla_put_in_addr(skb, NFTA_TUNNEL_KEY_IP_DST, info->key.u.ipv4.dst) < 0)
420 			return -1;
421 
422 		nla_nest_end(skb, nest);
423 	}
424 
425 	return 0;
426 }
427 
428 static int nft_tunnel_opts_dump(struct sk_buff *skb,
429 				struct nft_tunnel_obj *priv)
430 {
431 	struct nft_tunnel_opts *opts = &priv->opts;
432 	struct nlattr *nest;
433 
434 	nest = nla_nest_start(skb, NFTA_TUNNEL_KEY_OPTS);
435 	if (!nest)
436 		return -1;
437 
438 	if (opts->flags & TUNNEL_VXLAN_OPT) {
439 		if (nla_put_be32(skb, NFTA_TUNNEL_KEY_VXLAN_GBP,
440 				 htonl(opts->u.vxlan.gbp)))
441 			return -1;
442 	} else if (opts->flags & TUNNEL_ERSPAN_OPT) {
443 		switch (opts->u.erspan.version) {
444 		case ERSPAN_VERSION:
445 			if (nla_put_be32(skb, NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX,
446 					 opts->u.erspan.u.index))
447 				return -1;
448 			break;
449 		case ERSPAN_VERSION2:
450 			if (nla_put_u8(skb, NFTA_TUNNEL_KEY_ERSPAN_V2_HWID,
451 				       get_hwid(&opts->u.erspan.u.md2)) ||
452 			    nla_put_u8(skb, NFTA_TUNNEL_KEY_ERSPAN_V2_DIR,
453 				       opts->u.erspan.u.md2.dir))
454 				return -1;
455 			break;
456 		}
457 	}
458 	nla_nest_end(skb, nest);
459 
460 	return 0;
461 }
462 
463 static int nft_tunnel_ports_dump(struct sk_buff *skb,
464 				 struct ip_tunnel_info *info)
465 {
466 	if (nla_put_be16(skb, NFTA_TUNNEL_KEY_SPORT, htons(info->key.tp_src)) < 0 ||
467 	    nla_put_be16(skb, NFTA_TUNNEL_KEY_DPORT, htons(info->key.tp_dst)) < 0)
468 		return -1;
469 
470 	return 0;
471 }
472 
473 static int nft_tunnel_flags_dump(struct sk_buff *skb,
474 				 struct ip_tunnel_info *info)
475 {
476 	u32 flags = 0;
477 
478 	if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT)
479 		flags |= NFT_TUNNEL_F_DONT_FRAGMENT;
480 	if (!(info->key.tun_flags & TUNNEL_CSUM))
481 		flags |= NFT_TUNNEL_F_ZERO_CSUM_TX;
482 	if (info->key.tun_flags & TUNNEL_SEQ)
483 		flags |= NFT_TUNNEL_F_SEQ_NUMBER;
484 
485 	if (nla_put_be32(skb, NFTA_TUNNEL_KEY_FLAGS, htonl(flags)) < 0)
486 		return -1;
487 
488 	return 0;
489 }
490 
491 static int nft_tunnel_obj_dump(struct sk_buff *skb,
492 			       struct nft_object *obj, bool reset)
493 {
494 	struct nft_tunnel_obj *priv = nft_obj_data(obj);
495 	struct ip_tunnel_info *info = &priv->md->u.tun_info;
496 
497 	if (nla_put_be32(skb, NFTA_TUNNEL_KEY_ID,
498 			 tunnel_id_to_key32(info->key.tun_id)) ||
499 	    nft_tunnel_ip_dump(skb, info) < 0 ||
500 	    nft_tunnel_ports_dump(skb, info) < 0 ||
501 	    nft_tunnel_flags_dump(skb, info) < 0 ||
502 	    nla_put_u8(skb, NFTA_TUNNEL_KEY_TOS, info->key.tos) ||
503 	    nla_put_u8(skb, NFTA_TUNNEL_KEY_TTL, info->key.ttl) ||
504 	    nft_tunnel_opts_dump(skb, priv) < 0)
505 		goto nla_put_failure;
506 
507 	return 0;
508 
509 nla_put_failure:
510 	return -1;
511 }
512 
513 static void nft_tunnel_obj_destroy(const struct nft_ctx *ctx,
514 				   struct nft_object *obj)
515 {
516 	struct nft_tunnel_obj *priv = nft_obj_data(obj);
517 
518 	metadata_dst_free(priv->md);
519 }
520 
521 static struct nft_object_type nft_tunnel_obj_type;
522 static const struct nft_object_ops nft_tunnel_obj_ops = {
523 	.type		= &nft_tunnel_obj_type,
524 	.size		= sizeof(struct nft_tunnel_obj),
525 	.eval		= nft_tunnel_obj_eval,
526 	.init		= nft_tunnel_obj_init,
527 	.destroy	= nft_tunnel_obj_destroy,
528 	.dump		= nft_tunnel_obj_dump,
529 };
530 
531 static struct nft_object_type nft_tunnel_obj_type __read_mostly = {
532 	.type		= NFT_OBJECT_TUNNEL,
533 	.ops		= &nft_tunnel_obj_ops,
534 	.maxattr	= NFTA_TUNNEL_KEY_MAX,
535 	.policy		= nft_tunnel_key_policy,
536 	.owner		= THIS_MODULE,
537 };
538 
539 static int __init nft_tunnel_module_init(void)
540 {
541 	int err;
542 
543 	err = nft_register_expr(&nft_tunnel_type);
544 	if (err < 0)
545 		return err;
546 
547 	err = nft_register_obj(&nft_tunnel_obj_type);
548 	if (err < 0)
549 		nft_unregister_expr(&nft_tunnel_type);
550 
551 	return err;
552 }
553 
554 static void __exit nft_tunnel_module_exit(void)
555 {
556 	nft_unregister_obj(&nft_tunnel_obj_type);
557 	nft_unregister_expr(&nft_tunnel_type);
558 }
559 
560 module_init(nft_tunnel_module_init);
561 module_exit(nft_tunnel_module_exit);
562 
563 MODULE_LICENSE("GPL");
564 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
565 MODULE_ALIAS_NFT_EXPR("tunnel");
566 MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_TUNNEL);
567