xref: /openbmc/linux/drivers/net/gtp.c (revision 6cc23ed2)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* GTP according to GSM TS 09.60 / 3GPP TS 29.060
3  *
4  * (C) 2012-2014 by sysmocom - s.f.m.c. GmbH
5  * (C) 2016 by Pablo Neira Ayuso <pablo@netfilter.org>
6  *
7  * Author: Harald Welte <hwelte@sysmocom.de>
8  *	   Pablo Neira Ayuso <pablo@netfilter.org>
9  *	   Andreas Schultz <aschultz@travelping.com>
10  */
11 
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 
14 #include <linux/module.h>
15 #include <linux/skbuff.h>
16 #include <linux/udp.h>
17 #include <linux/rculist.h>
18 #include <linux/jhash.h>
19 #include <linux/if_tunnel.h>
20 #include <linux/net.h>
21 #include <linux/file.h>
22 #include <linux/gtp.h>
23 
24 #include <net/net_namespace.h>
25 #include <net/protocol.h>
26 #include <net/ip.h>
27 #include <net/udp.h>
28 #include <net/udp_tunnel.h>
29 #include <net/icmp.h>
30 #include <net/xfrm.h>
31 #include <net/genetlink.h>
32 #include <net/netns/generic.h>
33 #include <net/gtp.h>
34 
35 /* An active session for the subscriber. */
36 struct pdp_ctx {
37 	struct hlist_node	hlist_tid;
38 	struct hlist_node	hlist_addr;
39 
40 	union {
41 		u64		tid;
42 		struct {
43 			u64	tid;
44 			u16	flow;
45 		} v0;
46 		struct {
47 			u32	i_tei;
48 			u32	o_tei;
49 		} v1;
50 	} u;
51 	u8			gtp_version;
52 	u16			af;
53 
54 	struct in_addr		ms_addr_ip4;
55 	struct in_addr		peer_addr_ip4;
56 
57 	struct sock		*sk;
58 	struct net_device       *dev;
59 
60 	atomic_t		tx_seq;
61 	struct rcu_head		rcu_head;
62 };
63 
64 /* One instance of the GTP device. */
65 struct gtp_dev {
66 	struct list_head	list;
67 
68 	struct sock		*sk0;
69 	struct sock		*sk1u;
70 
71 	struct net_device	*dev;
72 
73 	unsigned int		role;
74 	unsigned int		hash_size;
75 	struct hlist_head	*tid_hash;
76 	struct hlist_head	*addr_hash;
77 };
78 
79 static unsigned int gtp_net_id __read_mostly;
80 
81 struct gtp_net {
82 	struct list_head gtp_dev_list;
83 };
84 
85 static u32 gtp_h_initval;
86 
87 static void pdp_context_delete(struct pdp_ctx *pctx);
88 
89 static inline u32 gtp0_hashfn(u64 tid)
90 {
91 	u32 *tid32 = (u32 *) &tid;
92 	return jhash_2words(tid32[0], tid32[1], gtp_h_initval);
93 }
94 
95 static inline u32 gtp1u_hashfn(u32 tid)
96 {
97 	return jhash_1word(tid, gtp_h_initval);
98 }
99 
100 static inline u32 ipv4_hashfn(__be32 ip)
101 {
102 	return jhash_1word((__force u32)ip, gtp_h_initval);
103 }
104 
105 /* Resolve a PDP context structure based on the 64bit TID. */
106 static struct pdp_ctx *gtp0_pdp_find(struct gtp_dev *gtp, u64 tid)
107 {
108 	struct hlist_head *head;
109 	struct pdp_ctx *pdp;
110 
111 	head = &gtp->tid_hash[gtp0_hashfn(tid) % gtp->hash_size];
112 
113 	hlist_for_each_entry_rcu(pdp, head, hlist_tid) {
114 		if (pdp->gtp_version == GTP_V0 &&
115 		    pdp->u.v0.tid == tid)
116 			return pdp;
117 	}
118 	return NULL;
119 }
120 
121 /* Resolve a PDP context structure based on the 32bit TEI. */
122 static struct pdp_ctx *gtp1_pdp_find(struct gtp_dev *gtp, u32 tid)
123 {
124 	struct hlist_head *head;
125 	struct pdp_ctx *pdp;
126 
127 	head = &gtp->tid_hash[gtp1u_hashfn(tid) % gtp->hash_size];
128 
129 	hlist_for_each_entry_rcu(pdp, head, hlist_tid) {
130 		if (pdp->gtp_version == GTP_V1 &&
131 		    pdp->u.v1.i_tei == tid)
132 			return pdp;
133 	}
134 	return NULL;
135 }
136 
137 /* Resolve a PDP context based on IPv4 address of MS. */
138 static struct pdp_ctx *ipv4_pdp_find(struct gtp_dev *gtp, __be32 ms_addr)
139 {
140 	struct hlist_head *head;
141 	struct pdp_ctx *pdp;
142 
143 	head = &gtp->addr_hash[ipv4_hashfn(ms_addr) % gtp->hash_size];
144 
145 	hlist_for_each_entry_rcu(pdp, head, hlist_addr) {
146 		if (pdp->af == AF_INET &&
147 		    pdp->ms_addr_ip4.s_addr == ms_addr)
148 			return pdp;
149 	}
150 
151 	return NULL;
152 }
153 
154 static bool gtp_check_ms_ipv4(struct sk_buff *skb, struct pdp_ctx *pctx,
155 				  unsigned int hdrlen, unsigned int role)
156 {
157 	struct iphdr *iph;
158 
159 	if (!pskb_may_pull(skb, hdrlen + sizeof(struct iphdr)))
160 		return false;
161 
162 	iph = (struct iphdr *)(skb->data + hdrlen);
163 
164 	if (role == GTP_ROLE_SGSN)
165 		return iph->daddr == pctx->ms_addr_ip4.s_addr;
166 	else
167 		return iph->saddr == pctx->ms_addr_ip4.s_addr;
168 }
169 
170 /* Check if the inner IP address in this packet is assigned to any
171  * existing mobile subscriber.
172  */
173 static bool gtp_check_ms(struct sk_buff *skb, struct pdp_ctx *pctx,
174 			     unsigned int hdrlen, unsigned int role)
175 {
176 	switch (ntohs(skb->protocol)) {
177 	case ETH_P_IP:
178 		return gtp_check_ms_ipv4(skb, pctx, hdrlen, role);
179 	}
180 	return false;
181 }
182 
183 static int gtp_rx(struct pdp_ctx *pctx, struct sk_buff *skb,
184 			unsigned int hdrlen, unsigned int role)
185 {
186 	struct pcpu_sw_netstats *stats;
187 
188 	if (!gtp_check_ms(skb, pctx, hdrlen, role)) {
189 		netdev_dbg(pctx->dev, "No PDP ctx for this MS\n");
190 		return 1;
191 	}
192 
193 	/* Get rid of the GTP + UDP headers. */
194 	if (iptunnel_pull_header(skb, hdrlen, skb->protocol,
195 				 !net_eq(sock_net(pctx->sk), dev_net(pctx->dev))))
196 		return -1;
197 
198 	netdev_dbg(pctx->dev, "forwarding packet from GGSN to uplink\n");
199 
200 	/* Now that the UDP and the GTP header have been removed, set up the
201 	 * new network header. This is required by the upper layer to
202 	 * calculate the transport header.
203 	 */
204 	skb_reset_network_header(skb);
205 
206 	skb->dev = pctx->dev;
207 
208 	stats = this_cpu_ptr(pctx->dev->tstats);
209 	u64_stats_update_begin(&stats->syncp);
210 	stats->rx_packets++;
211 	stats->rx_bytes += skb->len;
212 	u64_stats_update_end(&stats->syncp);
213 
214 	netif_rx(skb);
215 	return 0;
216 }
217 
218 /* 1 means pass up to the stack, -1 means drop and 0 means decapsulated. */
219 static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
220 {
221 	unsigned int hdrlen = sizeof(struct udphdr) +
222 			      sizeof(struct gtp0_header);
223 	struct gtp0_header *gtp0;
224 	struct pdp_ctx *pctx;
225 
226 	if (!pskb_may_pull(skb, hdrlen))
227 		return -1;
228 
229 	gtp0 = (struct gtp0_header *)(skb->data + sizeof(struct udphdr));
230 
231 	if ((gtp0->flags >> 5) != GTP_V0)
232 		return 1;
233 
234 	if (gtp0->type != GTP_TPDU)
235 		return 1;
236 
237 	pctx = gtp0_pdp_find(gtp, be64_to_cpu(gtp0->tid));
238 	if (!pctx) {
239 		netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb);
240 		return 1;
241 	}
242 
243 	return gtp_rx(pctx, skb, hdrlen, gtp->role);
244 }
245 
246 static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
247 {
248 	unsigned int hdrlen = sizeof(struct udphdr) +
249 			      sizeof(struct gtp1_header);
250 	struct gtp1_header *gtp1;
251 	struct pdp_ctx *pctx;
252 
253 	if (!pskb_may_pull(skb, hdrlen))
254 		return -1;
255 
256 	gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr));
257 
258 	if ((gtp1->flags >> 5) != GTP_V1)
259 		return 1;
260 
261 	if (gtp1->type != GTP_TPDU)
262 		return 1;
263 
264 	/* From 29.060: "This field shall be present if and only if any one or
265 	 * more of the S, PN and E flags are set.".
266 	 *
267 	 * If any of the bit is set, then the remaining ones also have to be
268 	 * set.
269 	 */
270 	if (gtp1->flags & GTP1_F_MASK)
271 		hdrlen += 4;
272 
273 	/* Make sure the header is larger enough, including extensions. */
274 	if (!pskb_may_pull(skb, hdrlen))
275 		return -1;
276 
277 	gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr));
278 
279 	pctx = gtp1_pdp_find(gtp, ntohl(gtp1->tid));
280 	if (!pctx) {
281 		netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb);
282 		return 1;
283 	}
284 
285 	return gtp_rx(pctx, skb, hdrlen, gtp->role);
286 }
287 
288 static void __gtp_encap_destroy(struct sock *sk)
289 {
290 	struct gtp_dev *gtp;
291 
292 	lock_sock(sk);
293 	gtp = sk->sk_user_data;
294 	if (gtp) {
295 		if (gtp->sk0 == sk)
296 			gtp->sk0 = NULL;
297 		else
298 			gtp->sk1u = NULL;
299 		udp_sk(sk)->encap_type = 0;
300 		rcu_assign_sk_user_data(sk, NULL);
301 		sock_put(sk);
302 	}
303 	release_sock(sk);
304 }
305 
306 static void gtp_encap_destroy(struct sock *sk)
307 {
308 	rtnl_lock();
309 	__gtp_encap_destroy(sk);
310 	rtnl_unlock();
311 }
312 
313 static void gtp_encap_disable_sock(struct sock *sk)
314 {
315 	if (!sk)
316 		return;
317 
318 	__gtp_encap_destroy(sk);
319 }
320 
321 static void gtp_encap_disable(struct gtp_dev *gtp)
322 {
323 	gtp_encap_disable_sock(gtp->sk0);
324 	gtp_encap_disable_sock(gtp->sk1u);
325 }
326 
327 /* UDP encapsulation receive handler. See net/ipv4/udp.c.
328  * Return codes: 0: success, <0: error, >0: pass up to userspace UDP socket.
329  */
330 static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb)
331 {
332 	struct gtp_dev *gtp;
333 	int ret = 0;
334 
335 	gtp = rcu_dereference_sk_user_data(sk);
336 	if (!gtp)
337 		return 1;
338 
339 	netdev_dbg(gtp->dev, "encap_recv sk=%p\n", sk);
340 
341 	switch (udp_sk(sk)->encap_type) {
342 	case UDP_ENCAP_GTP0:
343 		netdev_dbg(gtp->dev, "received GTP0 packet\n");
344 		ret = gtp0_udp_encap_recv(gtp, skb);
345 		break;
346 	case UDP_ENCAP_GTP1U:
347 		netdev_dbg(gtp->dev, "received GTP1U packet\n");
348 		ret = gtp1u_udp_encap_recv(gtp, skb);
349 		break;
350 	default:
351 		ret = -1; /* Shouldn't happen. */
352 	}
353 
354 	switch (ret) {
355 	case 1:
356 		netdev_dbg(gtp->dev, "pass up to the process\n");
357 		break;
358 	case 0:
359 		break;
360 	case -1:
361 		netdev_dbg(gtp->dev, "GTP packet has been dropped\n");
362 		kfree_skb(skb);
363 		ret = 0;
364 		break;
365 	}
366 
367 	return ret;
368 }
369 
370 static int gtp_dev_init(struct net_device *dev)
371 {
372 	struct gtp_dev *gtp = netdev_priv(dev);
373 
374 	gtp->dev = dev;
375 
376 	dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
377 	if (!dev->tstats)
378 		return -ENOMEM;
379 
380 	return 0;
381 }
382 
383 static void gtp_dev_uninit(struct net_device *dev)
384 {
385 	struct gtp_dev *gtp = netdev_priv(dev);
386 
387 	gtp_encap_disable(gtp);
388 	free_percpu(dev->tstats);
389 }
390 
391 static struct rtable *ip4_route_output_gtp(struct flowi4 *fl4,
392 					   const struct sock *sk,
393 					   __be32 daddr)
394 {
395 	memset(fl4, 0, sizeof(*fl4));
396 	fl4->flowi4_oif		= sk->sk_bound_dev_if;
397 	fl4->daddr		= daddr;
398 	fl4->saddr		= inet_sk(sk)->inet_saddr;
399 	fl4->flowi4_tos		= RT_CONN_FLAGS(sk);
400 	fl4->flowi4_proto	= sk->sk_protocol;
401 
402 	return ip_route_output_key(sock_net(sk), fl4);
403 }
404 
405 static inline void gtp0_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
406 {
407 	int payload_len = skb->len;
408 	struct gtp0_header *gtp0;
409 
410 	gtp0 = skb_push(skb, sizeof(*gtp0));
411 
412 	gtp0->flags	= 0x1e; /* v0, GTP-non-prime. */
413 	gtp0->type	= GTP_TPDU;
414 	gtp0->length	= htons(payload_len);
415 	gtp0->seq	= htons((atomic_inc_return(&pctx->tx_seq) - 1) % 0xffff);
416 	gtp0->flow	= htons(pctx->u.v0.flow);
417 	gtp0->number	= 0xff;
418 	gtp0->spare[0]	= gtp0->spare[1] = gtp0->spare[2] = 0xff;
419 	gtp0->tid	= cpu_to_be64(pctx->u.v0.tid);
420 }
421 
422 static inline void gtp1_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
423 {
424 	int payload_len = skb->len;
425 	struct gtp1_header *gtp1;
426 
427 	gtp1 = skb_push(skb, sizeof(*gtp1));
428 
429 	/* Bits    8  7  6  5  4  3  2	1
430 	 *	  +--+--+--+--+--+--+--+--+
431 	 *	  |version |PT| 0| E| S|PN|
432 	 *	  +--+--+--+--+--+--+--+--+
433 	 *	    0  0  1  1	1  0  0  0
434 	 */
435 	gtp1->flags	= 0x30; /* v1, GTP-non-prime. */
436 	gtp1->type	= GTP_TPDU;
437 	gtp1->length	= htons(payload_len);
438 	gtp1->tid	= htonl(pctx->u.v1.o_tei);
439 
440 	/* TODO: Suppport for extension header, sequence number and N-PDU.
441 	 *	 Update the length field if any of them is available.
442 	 */
443 }
444 
445 struct gtp_pktinfo {
446 	struct sock		*sk;
447 	struct iphdr		*iph;
448 	struct flowi4		fl4;
449 	struct rtable		*rt;
450 	struct pdp_ctx		*pctx;
451 	struct net_device	*dev;
452 	__be16			gtph_port;
453 };
454 
455 static void gtp_push_header(struct sk_buff *skb, struct gtp_pktinfo *pktinfo)
456 {
457 	switch (pktinfo->pctx->gtp_version) {
458 	case GTP_V0:
459 		pktinfo->gtph_port = htons(GTP0_PORT);
460 		gtp0_push_header(skb, pktinfo->pctx);
461 		break;
462 	case GTP_V1:
463 		pktinfo->gtph_port = htons(GTP1U_PORT);
464 		gtp1_push_header(skb, pktinfo->pctx);
465 		break;
466 	}
467 }
468 
469 static inline void gtp_set_pktinfo_ipv4(struct gtp_pktinfo *pktinfo,
470 					struct sock *sk, struct iphdr *iph,
471 					struct pdp_ctx *pctx, struct rtable *rt,
472 					struct flowi4 *fl4,
473 					struct net_device *dev)
474 {
475 	pktinfo->sk	= sk;
476 	pktinfo->iph	= iph;
477 	pktinfo->pctx	= pctx;
478 	pktinfo->rt	= rt;
479 	pktinfo->fl4	= *fl4;
480 	pktinfo->dev	= dev;
481 }
482 
483 static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
484 			     struct gtp_pktinfo *pktinfo)
485 {
486 	struct gtp_dev *gtp = netdev_priv(dev);
487 	struct pdp_ctx *pctx;
488 	struct rtable *rt;
489 	struct flowi4 fl4;
490 	struct iphdr *iph;
491 	__be16 df;
492 	int mtu;
493 
494 	/* Read the IP destination address and resolve the PDP context.
495 	 * Prepend PDP header with TEI/TID from PDP ctx.
496 	 */
497 	iph = ip_hdr(skb);
498 	if (gtp->role == GTP_ROLE_SGSN)
499 		pctx = ipv4_pdp_find(gtp, iph->saddr);
500 	else
501 		pctx = ipv4_pdp_find(gtp, iph->daddr);
502 
503 	if (!pctx) {
504 		netdev_dbg(dev, "no PDP ctx found for %pI4, skip\n",
505 			   &iph->daddr);
506 		return -ENOENT;
507 	}
508 	netdev_dbg(dev, "found PDP context %p\n", pctx);
509 
510 	rt = ip4_route_output_gtp(&fl4, pctx->sk, pctx->peer_addr_ip4.s_addr);
511 	if (IS_ERR(rt)) {
512 		netdev_dbg(dev, "no route to SSGN %pI4\n",
513 			   &pctx->peer_addr_ip4.s_addr);
514 		dev->stats.tx_carrier_errors++;
515 		goto err;
516 	}
517 
518 	if (rt->dst.dev == dev) {
519 		netdev_dbg(dev, "circular route to SSGN %pI4\n",
520 			   &pctx->peer_addr_ip4.s_addr);
521 		dev->stats.collisions++;
522 		goto err_rt;
523 	}
524 
525 	skb_dst_drop(skb);
526 
527 	/* This is similar to tnl_update_pmtu(). */
528 	df = iph->frag_off;
529 	if (df) {
530 		mtu = dst_mtu(&rt->dst) - dev->hard_header_len -
531 			sizeof(struct iphdr) - sizeof(struct udphdr);
532 		switch (pctx->gtp_version) {
533 		case GTP_V0:
534 			mtu -= sizeof(struct gtp0_header);
535 			break;
536 		case GTP_V1:
537 			mtu -= sizeof(struct gtp1_header);
538 			break;
539 		}
540 	} else {
541 		mtu = dst_mtu(&rt->dst);
542 	}
543 
544 	rt->dst.ops->update_pmtu(&rt->dst, NULL, skb, mtu);
545 
546 	if (!skb_is_gso(skb) && (iph->frag_off & htons(IP_DF)) &&
547 	    mtu < ntohs(iph->tot_len)) {
548 		netdev_dbg(dev, "packet too big, fragmentation needed\n");
549 		memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
550 		icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
551 			  htonl(mtu));
552 		goto err_rt;
553 	}
554 
555 	gtp_set_pktinfo_ipv4(pktinfo, pctx->sk, iph, pctx, rt, &fl4, dev);
556 	gtp_push_header(skb, pktinfo);
557 
558 	return 0;
559 err_rt:
560 	ip_rt_put(rt);
561 err:
562 	return -EBADMSG;
563 }
564 
565 static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev)
566 {
567 	unsigned int proto = ntohs(skb->protocol);
568 	struct gtp_pktinfo pktinfo;
569 	int err;
570 
571 	/* Ensure there is sufficient headroom. */
572 	if (skb_cow_head(skb, dev->needed_headroom))
573 		goto tx_err;
574 
575 	skb_reset_inner_headers(skb);
576 
577 	/* PDP context lookups in gtp_build_skb_*() need rcu read-side lock. */
578 	rcu_read_lock();
579 	switch (proto) {
580 	case ETH_P_IP:
581 		err = gtp_build_skb_ip4(skb, dev, &pktinfo);
582 		break;
583 	default:
584 		err = -EOPNOTSUPP;
585 		break;
586 	}
587 	rcu_read_unlock();
588 
589 	if (err < 0)
590 		goto tx_err;
591 
592 	switch (proto) {
593 	case ETH_P_IP:
594 		netdev_dbg(pktinfo.dev, "gtp -> IP src: %pI4 dst: %pI4\n",
595 			   &pktinfo.iph->saddr, &pktinfo.iph->daddr);
596 		udp_tunnel_xmit_skb(pktinfo.rt, pktinfo.sk, skb,
597 				    pktinfo.fl4.saddr, pktinfo.fl4.daddr,
598 				    pktinfo.iph->tos,
599 				    ip4_dst_hoplimit(&pktinfo.rt->dst),
600 				    0,
601 				    pktinfo.gtph_port, pktinfo.gtph_port,
602 				    true, false);
603 		break;
604 	}
605 
606 	return NETDEV_TX_OK;
607 tx_err:
608 	dev->stats.tx_errors++;
609 	dev_kfree_skb(skb);
610 	return NETDEV_TX_OK;
611 }
612 
613 static const struct net_device_ops gtp_netdev_ops = {
614 	.ndo_init		= gtp_dev_init,
615 	.ndo_uninit		= gtp_dev_uninit,
616 	.ndo_start_xmit		= gtp_dev_xmit,
617 	.ndo_get_stats64	= ip_tunnel_get_stats64,
618 };
619 
620 static void gtp_link_setup(struct net_device *dev)
621 {
622 	dev->netdev_ops		= &gtp_netdev_ops;
623 	dev->needs_free_netdev	= true;
624 
625 	dev->hard_header_len = 0;
626 	dev->addr_len = 0;
627 
628 	/* Zero header length. */
629 	dev->type = ARPHRD_NONE;
630 	dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
631 
632 	dev->priv_flags	|= IFF_NO_QUEUE;
633 	dev->features	|= NETIF_F_LLTX;
634 	netif_keep_dst(dev);
635 
636 	/* Assume largest header, ie. GTPv0. */
637 	dev->needed_headroom	= LL_MAX_HEADER +
638 				  sizeof(struct iphdr) +
639 				  sizeof(struct udphdr) +
640 				  sizeof(struct gtp0_header);
641 }
642 
643 static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize);
644 static void gtp_hashtable_free(struct gtp_dev *gtp);
645 static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[]);
646 
647 static int gtp_newlink(struct net *src_net, struct net_device *dev,
648 		       struct nlattr *tb[], struct nlattr *data[],
649 		       struct netlink_ext_ack *extack)
650 {
651 	struct gtp_dev *gtp;
652 	struct gtp_net *gn;
653 	int hashsize, err;
654 
655 	if (!data[IFLA_GTP_FD0] && !data[IFLA_GTP_FD1])
656 		return -EINVAL;
657 
658 	gtp = netdev_priv(dev);
659 
660 	err = gtp_encap_enable(gtp, data);
661 	if (err < 0)
662 		return err;
663 
664 	if (!data[IFLA_GTP_PDP_HASHSIZE])
665 		hashsize = 1024;
666 	else
667 		hashsize = nla_get_u32(data[IFLA_GTP_PDP_HASHSIZE]);
668 
669 	err = gtp_hashtable_new(gtp, hashsize);
670 	if (err < 0)
671 		goto out_encap;
672 
673 	err = register_netdevice(dev);
674 	if (err < 0) {
675 		netdev_dbg(dev, "failed to register new netdev %d\n", err);
676 		goto out_hashtable;
677 	}
678 
679 	gn = net_generic(dev_net(dev), gtp_net_id);
680 	list_add_rcu(&gtp->list, &gn->gtp_dev_list);
681 
682 	netdev_dbg(dev, "registered new GTP interface\n");
683 
684 	return 0;
685 
686 out_hashtable:
687 	gtp_hashtable_free(gtp);
688 out_encap:
689 	gtp_encap_disable(gtp);
690 	return err;
691 }
692 
693 static void gtp_dellink(struct net_device *dev, struct list_head *head)
694 {
695 	struct gtp_dev *gtp = netdev_priv(dev);
696 
697 	gtp_hashtable_free(gtp);
698 	list_del_rcu(&gtp->list);
699 	unregister_netdevice_queue(dev, head);
700 }
701 
702 static const struct nla_policy gtp_policy[IFLA_GTP_MAX + 1] = {
703 	[IFLA_GTP_FD0]			= { .type = NLA_U32 },
704 	[IFLA_GTP_FD1]			= { .type = NLA_U32 },
705 	[IFLA_GTP_PDP_HASHSIZE]		= { .type = NLA_U32 },
706 	[IFLA_GTP_ROLE]			= { .type = NLA_U32 },
707 };
708 
709 static int gtp_validate(struct nlattr *tb[], struct nlattr *data[],
710 			struct netlink_ext_ack *extack)
711 {
712 	if (!data)
713 		return -EINVAL;
714 
715 	return 0;
716 }
717 
718 static size_t gtp_get_size(const struct net_device *dev)
719 {
720 	return nla_total_size(sizeof(__u32));	/* IFLA_GTP_PDP_HASHSIZE */
721 }
722 
723 static int gtp_fill_info(struct sk_buff *skb, const struct net_device *dev)
724 {
725 	struct gtp_dev *gtp = netdev_priv(dev);
726 
727 	if (nla_put_u32(skb, IFLA_GTP_PDP_HASHSIZE, gtp->hash_size))
728 		goto nla_put_failure;
729 
730 	return 0;
731 
732 nla_put_failure:
733 	return -EMSGSIZE;
734 }
735 
736 static struct rtnl_link_ops gtp_link_ops __read_mostly = {
737 	.kind		= "gtp",
738 	.maxtype	= IFLA_GTP_MAX,
739 	.policy		= gtp_policy,
740 	.priv_size	= sizeof(struct gtp_dev),
741 	.setup		= gtp_link_setup,
742 	.validate	= gtp_validate,
743 	.newlink	= gtp_newlink,
744 	.dellink	= gtp_dellink,
745 	.get_size	= gtp_get_size,
746 	.fill_info	= gtp_fill_info,
747 };
748 
749 static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize)
750 {
751 	int i;
752 
753 	gtp->addr_hash = kmalloc_array(hsize, sizeof(struct hlist_head),
754 				       GFP_KERNEL);
755 	if (gtp->addr_hash == NULL)
756 		return -ENOMEM;
757 
758 	gtp->tid_hash = kmalloc_array(hsize, sizeof(struct hlist_head),
759 				      GFP_KERNEL);
760 	if (gtp->tid_hash == NULL)
761 		goto err1;
762 
763 	gtp->hash_size = hsize;
764 
765 	for (i = 0; i < hsize; i++) {
766 		INIT_HLIST_HEAD(&gtp->addr_hash[i]);
767 		INIT_HLIST_HEAD(&gtp->tid_hash[i]);
768 	}
769 	return 0;
770 err1:
771 	kfree(gtp->addr_hash);
772 	return -ENOMEM;
773 }
774 
775 static void gtp_hashtable_free(struct gtp_dev *gtp)
776 {
777 	struct pdp_ctx *pctx;
778 	int i;
779 
780 	for (i = 0; i < gtp->hash_size; i++)
781 		hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i], hlist_tid)
782 			pdp_context_delete(pctx);
783 
784 	synchronize_rcu();
785 	kfree(gtp->addr_hash);
786 	kfree(gtp->tid_hash);
787 }
788 
789 static struct sock *gtp_encap_enable_socket(int fd, int type,
790 					    struct gtp_dev *gtp)
791 {
792 	struct udp_tunnel_sock_cfg tuncfg = {NULL};
793 	struct socket *sock;
794 	struct sock *sk;
795 	int err;
796 
797 	pr_debug("enable gtp on %d, %d\n", fd, type);
798 
799 	sock = sockfd_lookup(fd, &err);
800 	if (!sock) {
801 		pr_debug("gtp socket fd=%d not found\n", fd);
802 		return NULL;
803 	}
804 
805 	if (sock->sk->sk_protocol != IPPROTO_UDP) {
806 		pr_debug("socket fd=%d not UDP\n", fd);
807 		sk = ERR_PTR(-EINVAL);
808 		goto out_sock;
809 	}
810 
811 	lock_sock(sock->sk);
812 	if (sock->sk->sk_user_data) {
813 		sk = ERR_PTR(-EBUSY);
814 		goto out_sock;
815 	}
816 
817 	sk = sock->sk;
818 	sock_hold(sk);
819 
820 	tuncfg.sk_user_data = gtp;
821 	tuncfg.encap_type = type;
822 	tuncfg.encap_rcv = gtp_encap_recv;
823 	tuncfg.encap_destroy = gtp_encap_destroy;
824 
825 	setup_udp_tunnel_sock(sock_net(sock->sk), sock, &tuncfg);
826 
827 out_sock:
828 	release_sock(sock->sk);
829 	sockfd_put(sock);
830 	return sk;
831 }
832 
833 static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[])
834 {
835 	struct sock *sk1u = NULL;
836 	struct sock *sk0 = NULL;
837 	unsigned int role = GTP_ROLE_GGSN;
838 
839 	if (data[IFLA_GTP_FD0]) {
840 		u32 fd0 = nla_get_u32(data[IFLA_GTP_FD0]);
841 
842 		sk0 = gtp_encap_enable_socket(fd0, UDP_ENCAP_GTP0, gtp);
843 		if (IS_ERR(sk0))
844 			return PTR_ERR(sk0);
845 	}
846 
847 	if (data[IFLA_GTP_FD1]) {
848 		u32 fd1 = nla_get_u32(data[IFLA_GTP_FD1]);
849 
850 		sk1u = gtp_encap_enable_socket(fd1, UDP_ENCAP_GTP1U, gtp);
851 		if (IS_ERR(sk1u)) {
852 			if (sk0)
853 				gtp_encap_disable_sock(sk0);
854 			return PTR_ERR(sk1u);
855 		}
856 	}
857 
858 	if (data[IFLA_GTP_ROLE]) {
859 		role = nla_get_u32(data[IFLA_GTP_ROLE]);
860 		if (role > GTP_ROLE_SGSN) {
861 			if (sk0)
862 				gtp_encap_disable_sock(sk0);
863 			if (sk1u)
864 				gtp_encap_disable_sock(sk1u);
865 			return -EINVAL;
866 		}
867 	}
868 
869 	gtp->sk0 = sk0;
870 	gtp->sk1u = sk1u;
871 	gtp->role = role;
872 
873 	return 0;
874 }
875 
876 static struct gtp_dev *gtp_find_dev(struct net *src_net, struct nlattr *nla[])
877 {
878 	struct gtp_dev *gtp = NULL;
879 	struct net_device *dev;
880 	struct net *net;
881 
882 	/* Examine the link attributes and figure out which network namespace
883 	 * we are talking about.
884 	 */
885 	if (nla[GTPA_NET_NS_FD])
886 		net = get_net_ns_by_fd(nla_get_u32(nla[GTPA_NET_NS_FD]));
887 	else
888 		net = get_net(src_net);
889 
890 	if (IS_ERR(net))
891 		return NULL;
892 
893 	/* Check if there's an existing gtpX device to configure */
894 	dev = dev_get_by_index_rcu(net, nla_get_u32(nla[GTPA_LINK]));
895 	if (dev && dev->netdev_ops == &gtp_netdev_ops)
896 		gtp = netdev_priv(dev);
897 
898 	put_net(net);
899 	return gtp;
900 }
901 
902 static void ipv4_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info)
903 {
904 	pctx->gtp_version = nla_get_u32(info->attrs[GTPA_VERSION]);
905 	pctx->af = AF_INET;
906 	pctx->peer_addr_ip4.s_addr =
907 		nla_get_be32(info->attrs[GTPA_PEER_ADDRESS]);
908 	pctx->ms_addr_ip4.s_addr =
909 		nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
910 
911 	switch (pctx->gtp_version) {
912 	case GTP_V0:
913 		/* According to TS 09.60, sections 7.5.1 and 7.5.2, the flow
914 		 * label needs to be the same for uplink and downlink packets,
915 		 * so let's annotate this.
916 		 */
917 		pctx->u.v0.tid = nla_get_u64(info->attrs[GTPA_TID]);
918 		pctx->u.v0.flow = nla_get_u16(info->attrs[GTPA_FLOW]);
919 		break;
920 	case GTP_V1:
921 		pctx->u.v1.i_tei = nla_get_u32(info->attrs[GTPA_I_TEI]);
922 		pctx->u.v1.o_tei = nla_get_u32(info->attrs[GTPA_O_TEI]);
923 		break;
924 	default:
925 		break;
926 	}
927 }
928 
929 static int ipv4_pdp_add(struct gtp_dev *gtp, struct sock *sk,
930 			struct genl_info *info)
931 {
932 	struct net_device *dev = gtp->dev;
933 	u32 hash_ms, hash_tid = 0;
934 	struct pdp_ctx *pctx;
935 	bool found = false;
936 	__be32 ms_addr;
937 
938 	ms_addr = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
939 	hash_ms = ipv4_hashfn(ms_addr) % gtp->hash_size;
940 
941 	hlist_for_each_entry_rcu(pctx, &gtp->addr_hash[hash_ms], hlist_addr) {
942 		if (pctx->ms_addr_ip4.s_addr == ms_addr) {
943 			found = true;
944 			break;
945 		}
946 	}
947 
948 	if (found) {
949 		if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
950 			return -EEXIST;
951 		if (info->nlhdr->nlmsg_flags & NLM_F_REPLACE)
952 			return -EOPNOTSUPP;
953 
954 		ipv4_pdp_fill(pctx, info);
955 
956 		if (pctx->gtp_version == GTP_V0)
957 			netdev_dbg(dev, "GTPv0-U: update tunnel id = %llx (pdp %p)\n",
958 				   pctx->u.v0.tid, pctx);
959 		else if (pctx->gtp_version == GTP_V1)
960 			netdev_dbg(dev, "GTPv1-U: update tunnel id = %x/%x (pdp %p)\n",
961 				   pctx->u.v1.i_tei, pctx->u.v1.o_tei, pctx);
962 
963 		return 0;
964 
965 	}
966 
967 	pctx = kmalloc(sizeof(*pctx), GFP_ATOMIC);
968 	if (pctx == NULL)
969 		return -ENOMEM;
970 
971 	sock_hold(sk);
972 	pctx->sk = sk;
973 	pctx->dev = gtp->dev;
974 	ipv4_pdp_fill(pctx, info);
975 	atomic_set(&pctx->tx_seq, 0);
976 
977 	switch (pctx->gtp_version) {
978 	case GTP_V0:
979 		/* TS 09.60: "The flow label identifies unambiguously a GTP
980 		 * flow.". We use the tid for this instead, I cannot find a
981 		 * situation in which this doesn't unambiguosly identify the
982 		 * PDP context.
983 		 */
984 		hash_tid = gtp0_hashfn(pctx->u.v0.tid) % gtp->hash_size;
985 		break;
986 	case GTP_V1:
987 		hash_tid = gtp1u_hashfn(pctx->u.v1.i_tei) % gtp->hash_size;
988 		break;
989 	}
990 
991 	hlist_add_head_rcu(&pctx->hlist_addr, &gtp->addr_hash[hash_ms]);
992 	hlist_add_head_rcu(&pctx->hlist_tid, &gtp->tid_hash[hash_tid]);
993 
994 	switch (pctx->gtp_version) {
995 	case GTP_V0:
996 		netdev_dbg(dev, "GTPv0-U: new PDP ctx id=%llx ssgn=%pI4 ms=%pI4 (pdp=%p)\n",
997 			   pctx->u.v0.tid, &pctx->peer_addr_ip4,
998 			   &pctx->ms_addr_ip4, pctx);
999 		break;
1000 	case GTP_V1:
1001 		netdev_dbg(dev, "GTPv1-U: new PDP ctx id=%x/%x ssgn=%pI4 ms=%pI4 (pdp=%p)\n",
1002 			   pctx->u.v1.i_tei, pctx->u.v1.o_tei,
1003 			   &pctx->peer_addr_ip4, &pctx->ms_addr_ip4, pctx);
1004 		break;
1005 	}
1006 
1007 	return 0;
1008 }
1009 
1010 static void pdp_context_free(struct rcu_head *head)
1011 {
1012 	struct pdp_ctx *pctx = container_of(head, struct pdp_ctx, rcu_head);
1013 
1014 	sock_put(pctx->sk);
1015 	kfree(pctx);
1016 }
1017 
1018 static void pdp_context_delete(struct pdp_ctx *pctx)
1019 {
1020 	hlist_del_rcu(&pctx->hlist_tid);
1021 	hlist_del_rcu(&pctx->hlist_addr);
1022 	call_rcu(&pctx->rcu_head, pdp_context_free);
1023 }
1024 
1025 static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info)
1026 {
1027 	unsigned int version;
1028 	struct gtp_dev *gtp;
1029 	struct sock *sk;
1030 	int err;
1031 
1032 	if (!info->attrs[GTPA_VERSION] ||
1033 	    !info->attrs[GTPA_LINK] ||
1034 	    !info->attrs[GTPA_PEER_ADDRESS] ||
1035 	    !info->attrs[GTPA_MS_ADDRESS])
1036 		return -EINVAL;
1037 
1038 	version = nla_get_u32(info->attrs[GTPA_VERSION]);
1039 
1040 	switch (version) {
1041 	case GTP_V0:
1042 		if (!info->attrs[GTPA_TID] ||
1043 		    !info->attrs[GTPA_FLOW])
1044 			return -EINVAL;
1045 		break;
1046 	case GTP_V1:
1047 		if (!info->attrs[GTPA_I_TEI] ||
1048 		    !info->attrs[GTPA_O_TEI])
1049 			return -EINVAL;
1050 		break;
1051 
1052 	default:
1053 		return -EINVAL;
1054 	}
1055 
1056 	rtnl_lock();
1057 	rcu_read_lock();
1058 
1059 	gtp = gtp_find_dev(sock_net(skb->sk), info->attrs);
1060 	if (!gtp) {
1061 		err = -ENODEV;
1062 		goto out_unlock;
1063 	}
1064 
1065 	if (version == GTP_V0)
1066 		sk = gtp->sk0;
1067 	else if (version == GTP_V1)
1068 		sk = gtp->sk1u;
1069 	else
1070 		sk = NULL;
1071 
1072 	if (!sk) {
1073 		err = -ENODEV;
1074 		goto out_unlock;
1075 	}
1076 
1077 	err = ipv4_pdp_add(gtp, sk, info);
1078 
1079 out_unlock:
1080 	rcu_read_unlock();
1081 	rtnl_unlock();
1082 	return err;
1083 }
1084 
1085 static struct pdp_ctx *gtp_find_pdp_by_link(struct net *net,
1086 					    struct nlattr *nla[])
1087 {
1088 	struct gtp_dev *gtp;
1089 
1090 	gtp = gtp_find_dev(net, nla);
1091 	if (!gtp)
1092 		return ERR_PTR(-ENODEV);
1093 
1094 	if (nla[GTPA_MS_ADDRESS]) {
1095 		__be32 ip = nla_get_be32(nla[GTPA_MS_ADDRESS]);
1096 
1097 		return ipv4_pdp_find(gtp, ip);
1098 	} else if (nla[GTPA_VERSION]) {
1099 		u32 gtp_version = nla_get_u32(nla[GTPA_VERSION]);
1100 
1101 		if (gtp_version == GTP_V0 && nla[GTPA_TID])
1102 			return gtp0_pdp_find(gtp, nla_get_u64(nla[GTPA_TID]));
1103 		else if (gtp_version == GTP_V1 && nla[GTPA_I_TEI])
1104 			return gtp1_pdp_find(gtp, nla_get_u32(nla[GTPA_I_TEI]));
1105 	}
1106 
1107 	return ERR_PTR(-EINVAL);
1108 }
1109 
1110 static struct pdp_ctx *gtp_find_pdp(struct net *net, struct nlattr *nla[])
1111 {
1112 	struct pdp_ctx *pctx;
1113 
1114 	if (nla[GTPA_LINK])
1115 		pctx = gtp_find_pdp_by_link(net, nla);
1116 	else
1117 		pctx = ERR_PTR(-EINVAL);
1118 
1119 	if (!pctx)
1120 		pctx = ERR_PTR(-ENOENT);
1121 
1122 	return pctx;
1123 }
1124 
1125 static int gtp_genl_del_pdp(struct sk_buff *skb, struct genl_info *info)
1126 {
1127 	struct pdp_ctx *pctx;
1128 	int err = 0;
1129 
1130 	if (!info->attrs[GTPA_VERSION])
1131 		return -EINVAL;
1132 
1133 	rcu_read_lock();
1134 
1135 	pctx = gtp_find_pdp(sock_net(skb->sk), info->attrs);
1136 	if (IS_ERR(pctx)) {
1137 		err = PTR_ERR(pctx);
1138 		goto out_unlock;
1139 	}
1140 
1141 	if (pctx->gtp_version == GTP_V0)
1142 		netdev_dbg(pctx->dev, "GTPv0-U: deleting tunnel id = %llx (pdp %p)\n",
1143 			   pctx->u.v0.tid, pctx);
1144 	else if (pctx->gtp_version == GTP_V1)
1145 		netdev_dbg(pctx->dev, "GTPv1-U: deleting tunnel id = %x/%x (pdp %p)\n",
1146 			   pctx->u.v1.i_tei, pctx->u.v1.o_tei, pctx);
1147 
1148 	pdp_context_delete(pctx);
1149 
1150 out_unlock:
1151 	rcu_read_unlock();
1152 	return err;
1153 }
1154 
1155 static struct genl_family gtp_genl_family;
1156 
1157 static int gtp_genl_fill_info(struct sk_buff *skb, u32 snd_portid, u32 snd_seq,
1158 			      u32 type, struct pdp_ctx *pctx)
1159 {
1160 	void *genlh;
1161 
1162 	genlh = genlmsg_put(skb, snd_portid, snd_seq, &gtp_genl_family, 0,
1163 			    type);
1164 	if (genlh == NULL)
1165 		goto nlmsg_failure;
1166 
1167 	if (nla_put_u32(skb, GTPA_VERSION, pctx->gtp_version) ||
1168 	    nla_put_be32(skb, GTPA_PEER_ADDRESS, pctx->peer_addr_ip4.s_addr) ||
1169 	    nla_put_be32(skb, GTPA_MS_ADDRESS, pctx->ms_addr_ip4.s_addr))
1170 		goto nla_put_failure;
1171 
1172 	switch (pctx->gtp_version) {
1173 	case GTP_V0:
1174 		if (nla_put_u64_64bit(skb, GTPA_TID, pctx->u.v0.tid, GTPA_PAD) ||
1175 		    nla_put_u16(skb, GTPA_FLOW, pctx->u.v0.flow))
1176 			goto nla_put_failure;
1177 		break;
1178 	case GTP_V1:
1179 		if (nla_put_u32(skb, GTPA_I_TEI, pctx->u.v1.i_tei) ||
1180 		    nla_put_u32(skb, GTPA_O_TEI, pctx->u.v1.o_tei))
1181 			goto nla_put_failure;
1182 		break;
1183 	}
1184 	genlmsg_end(skb, genlh);
1185 	return 0;
1186 
1187 nlmsg_failure:
1188 nla_put_failure:
1189 	genlmsg_cancel(skb, genlh);
1190 	return -EMSGSIZE;
1191 }
1192 
1193 static int gtp_genl_get_pdp(struct sk_buff *skb, struct genl_info *info)
1194 {
1195 	struct pdp_ctx *pctx = NULL;
1196 	struct sk_buff *skb2;
1197 	int err;
1198 
1199 	if (!info->attrs[GTPA_VERSION])
1200 		return -EINVAL;
1201 
1202 	rcu_read_lock();
1203 
1204 	pctx = gtp_find_pdp(sock_net(skb->sk), info->attrs);
1205 	if (IS_ERR(pctx)) {
1206 		err = PTR_ERR(pctx);
1207 		goto err_unlock;
1208 	}
1209 
1210 	skb2 = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
1211 	if (skb2 == NULL) {
1212 		err = -ENOMEM;
1213 		goto err_unlock;
1214 	}
1215 
1216 	err = gtp_genl_fill_info(skb2, NETLINK_CB(skb).portid,
1217 				 info->snd_seq, info->nlhdr->nlmsg_type, pctx);
1218 	if (err < 0)
1219 		goto err_unlock_free;
1220 
1221 	rcu_read_unlock();
1222 	return genlmsg_unicast(genl_info_net(info), skb2, info->snd_portid);
1223 
1224 err_unlock_free:
1225 	kfree_skb(skb2);
1226 err_unlock:
1227 	rcu_read_unlock();
1228 	return err;
1229 }
1230 
1231 static int gtp_genl_dump_pdp(struct sk_buff *skb,
1232 				struct netlink_callback *cb)
1233 {
1234 	struct gtp_dev *last_gtp = (struct gtp_dev *)cb->args[2], *gtp;
1235 	struct net *net = sock_net(skb->sk);
1236 	struct gtp_net *gn = net_generic(net, gtp_net_id);
1237 	unsigned long tid = cb->args[1];
1238 	int i, k = cb->args[0], ret;
1239 	struct pdp_ctx *pctx;
1240 
1241 	if (cb->args[4])
1242 		return 0;
1243 
1244 	list_for_each_entry_rcu(gtp, &gn->gtp_dev_list, list) {
1245 		if (last_gtp && last_gtp != gtp)
1246 			continue;
1247 		else
1248 			last_gtp = NULL;
1249 
1250 		for (i = k; i < gtp->hash_size; i++) {
1251 			hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i], hlist_tid) {
1252 				if (tid && tid != pctx->u.tid)
1253 					continue;
1254 				else
1255 					tid = 0;
1256 
1257 				ret = gtp_genl_fill_info(skb,
1258 							 NETLINK_CB(cb->skb).portid,
1259 							 cb->nlh->nlmsg_seq,
1260 							 cb->nlh->nlmsg_type, pctx);
1261 				if (ret < 0) {
1262 					cb->args[0] = i;
1263 					cb->args[1] = pctx->u.tid;
1264 					cb->args[2] = (unsigned long)gtp;
1265 					goto out;
1266 				}
1267 			}
1268 		}
1269 	}
1270 	cb->args[4] = 1;
1271 out:
1272 	return skb->len;
1273 }
1274 
1275 static const struct nla_policy gtp_genl_policy[GTPA_MAX + 1] = {
1276 	[GTPA_LINK]		= { .type = NLA_U32, },
1277 	[GTPA_VERSION]		= { .type = NLA_U32, },
1278 	[GTPA_TID]		= { .type = NLA_U64, },
1279 	[GTPA_PEER_ADDRESS]	= { .type = NLA_U32, },
1280 	[GTPA_MS_ADDRESS]	= { .type = NLA_U32, },
1281 	[GTPA_FLOW]		= { .type = NLA_U16, },
1282 	[GTPA_NET_NS_FD]	= { .type = NLA_U32, },
1283 	[GTPA_I_TEI]		= { .type = NLA_U32, },
1284 	[GTPA_O_TEI]		= { .type = NLA_U32, },
1285 };
1286 
1287 static const struct genl_ops gtp_genl_ops[] = {
1288 	{
1289 		.cmd = GTP_CMD_NEWPDP,
1290 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1291 		.doit = gtp_genl_new_pdp,
1292 		.flags = GENL_ADMIN_PERM,
1293 	},
1294 	{
1295 		.cmd = GTP_CMD_DELPDP,
1296 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1297 		.doit = gtp_genl_del_pdp,
1298 		.flags = GENL_ADMIN_PERM,
1299 	},
1300 	{
1301 		.cmd = GTP_CMD_GETPDP,
1302 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1303 		.doit = gtp_genl_get_pdp,
1304 		.dumpit = gtp_genl_dump_pdp,
1305 		.flags = GENL_ADMIN_PERM,
1306 	},
1307 };
1308 
1309 static struct genl_family gtp_genl_family __ro_after_init = {
1310 	.name		= "gtp",
1311 	.version	= 0,
1312 	.hdrsize	= 0,
1313 	.maxattr	= GTPA_MAX,
1314 	.policy = gtp_genl_policy,
1315 	.netnsok	= true,
1316 	.module		= THIS_MODULE,
1317 	.ops		= gtp_genl_ops,
1318 	.n_ops		= ARRAY_SIZE(gtp_genl_ops),
1319 };
1320 
1321 static int __net_init gtp_net_init(struct net *net)
1322 {
1323 	struct gtp_net *gn = net_generic(net, gtp_net_id);
1324 
1325 	INIT_LIST_HEAD(&gn->gtp_dev_list);
1326 	return 0;
1327 }
1328 
1329 static void __net_exit gtp_net_exit(struct net *net)
1330 {
1331 	struct gtp_net *gn = net_generic(net, gtp_net_id);
1332 	struct gtp_dev *gtp;
1333 	LIST_HEAD(list);
1334 
1335 	rtnl_lock();
1336 	list_for_each_entry(gtp, &gn->gtp_dev_list, list)
1337 		gtp_dellink(gtp->dev, &list);
1338 
1339 	unregister_netdevice_many(&list);
1340 	rtnl_unlock();
1341 }
1342 
1343 static struct pernet_operations gtp_net_ops = {
1344 	.init	= gtp_net_init,
1345 	.exit	= gtp_net_exit,
1346 	.id	= &gtp_net_id,
1347 	.size	= sizeof(struct gtp_net),
1348 };
1349 
1350 static int __init gtp_init(void)
1351 {
1352 	int err;
1353 
1354 	get_random_bytes(&gtp_h_initval, sizeof(gtp_h_initval));
1355 
1356 	err = rtnl_link_register(&gtp_link_ops);
1357 	if (err < 0)
1358 		goto error_out;
1359 
1360 	err = genl_register_family(&gtp_genl_family);
1361 	if (err < 0)
1362 		goto unreg_rtnl_link;
1363 
1364 	err = register_pernet_subsys(&gtp_net_ops);
1365 	if (err < 0)
1366 		goto unreg_genl_family;
1367 
1368 	pr_info("GTP module loaded (pdp ctx size %zd bytes)\n",
1369 		sizeof(struct pdp_ctx));
1370 	return 0;
1371 
1372 unreg_genl_family:
1373 	genl_unregister_family(&gtp_genl_family);
1374 unreg_rtnl_link:
1375 	rtnl_link_unregister(&gtp_link_ops);
1376 error_out:
1377 	pr_err("error loading GTP module loaded\n");
1378 	return err;
1379 }
1380 late_initcall(gtp_init);
1381 
1382 static void __exit gtp_fini(void)
1383 {
1384 	genl_unregister_family(&gtp_genl_family);
1385 	rtnl_link_unregister(&gtp_link_ops);
1386 	unregister_pernet_subsys(&gtp_net_ops);
1387 
1388 	pr_info("GTP module unloaded\n");
1389 }
1390 module_exit(gtp_fini);
1391 
1392 MODULE_LICENSE("GPL");
1393 MODULE_AUTHOR("Harald Welte <hwelte@sysmocom.de>");
1394 MODULE_DESCRIPTION("Interface driver for GTP encapsulated traffic");
1395 MODULE_ALIAS_RTNL_LINK("gtp");
1396 MODULE_ALIAS_GENL_FAMILY("gtp");
1397