xref: /openbmc/linux/include/net/ip.h (revision 305c8388)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * INET		An implementation of the TCP/IP protocol suite for the LINUX
4  *		operating system.  INET is implemented using the  BSD Socket
5  *		interface as the means of communication with the user level.
6  *
7  *		Definitions for the IP module.
8  *
9  * Version:	@(#)ip.h	1.0.2	05/07/93
10  *
11  * Authors:	Ross Biro
12  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
13  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
14  *
15  * Changes:
16  *		Mike McLagan    :       Routing by source
17  */
18 #ifndef _IP_H
19 #define _IP_H
20 
21 #include <linux/types.h>
22 #include <linux/ip.h>
23 #include <linux/in.h>
24 #include <linux/skbuff.h>
25 #include <linux/jhash.h>
26 
27 #include <net/inet_sock.h>
28 #include <net/route.h>
29 #include <net/snmp.h>
30 #include <net/flow.h>
31 #include <net/flow_dissector.h>
32 #include <net/netns/hash.h>
33 
34 #define IPV4_MAX_PMTU		65535U		/* RFC 2675, Section 5.1 */
35 #define IPV4_MIN_MTU		68			/* RFC 791 */
36 
37 extern unsigned int sysctl_fib_sync_mem;
38 extern unsigned int sysctl_fib_sync_mem_min;
39 extern unsigned int sysctl_fib_sync_mem_max;
40 
41 struct sock;
42 
43 struct inet_skb_parm {
44 	int			iif;
45 	struct ip_options	opt;		/* Compiled IP options		*/
46 	u16			flags;
47 
48 #define IPSKB_FORWARDED		BIT(0)
49 #define IPSKB_XFRM_TUNNEL_SIZE	BIT(1)
50 #define IPSKB_XFRM_TRANSFORMED	BIT(2)
51 #define IPSKB_FRAG_COMPLETE	BIT(3)
52 #define IPSKB_REROUTED		BIT(4)
53 #define IPSKB_DOREDIRECT	BIT(5)
54 #define IPSKB_FRAG_PMTU		BIT(6)
55 #define IPSKB_L3SLAVE		BIT(7)
56 
57 	u16			frag_max_size;
58 };
59 
60 static inline bool ipv4_l3mdev_skb(u16 flags)
61 {
62 	return !!(flags & IPSKB_L3SLAVE);
63 }
64 
65 static inline unsigned int ip_hdrlen(const struct sk_buff *skb)
66 {
67 	return ip_hdr(skb)->ihl * 4;
68 }
69 
70 struct ipcm_cookie {
71 	struct sockcm_cookie	sockc;
72 	__be32			addr;
73 	int			oif;
74 	struct ip_options_rcu	*opt;
75 	__u8			ttl;
76 	__s16			tos;
77 	char			priority;
78 	__u16			gso_size;
79 };
80 
81 static inline void ipcm_init(struct ipcm_cookie *ipcm)
82 {
83 	*ipcm = (struct ipcm_cookie) { .tos = -1 };
84 }
85 
86 static inline void ipcm_init_sk(struct ipcm_cookie *ipcm,
87 				const struct inet_sock *inet)
88 {
89 	ipcm_init(ipcm);
90 
91 	ipcm->sockc.mark = inet->sk.sk_mark;
92 	ipcm->sockc.tsflags = inet->sk.sk_tsflags;
93 	ipcm->oif = inet->sk.sk_bound_dev_if;
94 	ipcm->addr = inet->inet_saddr;
95 }
96 
97 #define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb))
98 #define PKTINFO_SKB_CB(skb) ((struct in_pktinfo *)((skb)->cb))
99 
100 /* return enslaved device index if relevant */
101 static inline int inet_sdif(struct sk_buff *skb)
102 {
103 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
104 	if (skb && ipv4_l3mdev_skb(IPCB(skb)->flags))
105 		return IPCB(skb)->iif;
106 #endif
107 	return 0;
108 }
109 
110 /* Special input handler for packets caught by router alert option.
111    They are selected only by protocol field, and then processed likely
112    local ones; but only if someone wants them! Otherwise, router
113    not running rsvpd will kill RSVP.
114 
115    It is user level problem, what it will make with them.
116    I have no idea, how it will masquearde or NAT them (it is joke, joke :-)),
117    but receiver should be enough clever f.e. to forward mtrace requests,
118    sent to multicast group to reach destination designated router.
119  */
120 
121 struct ip_ra_chain {
122 	struct ip_ra_chain __rcu *next;
123 	struct sock		*sk;
124 	union {
125 		void			(*destructor)(struct sock *);
126 		struct sock		*saved_sk;
127 	};
128 	struct rcu_head		rcu;
129 };
130 
131 /* IP flags. */
132 #define IP_CE		0x8000		/* Flag: "Congestion"		*/
133 #define IP_DF		0x4000		/* Flag: "Don't Fragment"	*/
134 #define IP_MF		0x2000		/* Flag: "More Fragments"	*/
135 #define IP_OFFSET	0x1FFF		/* "Fragment Offset" part	*/
136 
137 #define IP_FRAG_TIME	(30 * HZ)		/* fragment lifetime	*/
138 
139 struct msghdr;
140 struct net_device;
141 struct packet_type;
142 struct rtable;
143 struct sockaddr;
144 
145 int igmp_mc_init(void);
146 
147 /*
148  *	Functions provided by ip.c
149  */
150 
151 int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
152 			  __be32 saddr, __be32 daddr,
153 			  struct ip_options_rcu *opt);
154 int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
155 	   struct net_device *orig_dev);
156 void ip_list_rcv(struct list_head *head, struct packet_type *pt,
157 		 struct net_device *orig_dev);
158 int ip_local_deliver(struct sk_buff *skb);
159 void ip_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int proto);
160 int ip_mr_input(struct sk_buff *skb);
161 int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb);
162 int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb);
163 int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
164 		   int (*output)(struct net *, struct sock *, struct sk_buff *));
165 
166 struct ip_fraglist_iter {
167 	struct sk_buff	*frag;
168 	struct iphdr	*iph;
169 	int		offset;
170 	unsigned int	hlen;
171 };
172 
173 void ip_fraglist_init(struct sk_buff *skb, struct iphdr *iph,
174 		      unsigned int hlen, struct ip_fraglist_iter *iter);
175 void ip_fraglist_prepare(struct sk_buff *skb, struct ip_fraglist_iter *iter);
176 
177 static inline struct sk_buff *ip_fraglist_next(struct ip_fraglist_iter *iter)
178 {
179 	struct sk_buff *skb = iter->frag;
180 
181 	iter->frag = skb->next;
182 	skb_mark_not_on_list(skb);
183 
184 	return skb;
185 }
186 
187 struct ip_frag_state {
188 	bool		DF;
189 	unsigned int	hlen;
190 	unsigned int	ll_rs;
191 	unsigned int	mtu;
192 	unsigned int	left;
193 	int		offset;
194 	int		ptr;
195 	__be16		not_last_frag;
196 };
197 
198 void ip_frag_init(struct sk_buff *skb, unsigned int hlen, unsigned int ll_rs,
199 		  unsigned int mtu, bool DF, struct ip_frag_state *state);
200 struct sk_buff *ip_frag_next(struct sk_buff *skb,
201 			     struct ip_frag_state *state);
202 
203 void ip_send_check(struct iphdr *ip);
204 int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
205 int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
206 
207 int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
208 		    __u8 tos);
209 void ip_init(void);
210 int ip_append_data(struct sock *sk, struct flowi4 *fl4,
211 		   int getfrag(void *from, char *to, int offset, int len,
212 			       int odd, struct sk_buff *skb),
213 		   void *from, int len, int protolen,
214 		   struct ipcm_cookie *ipc,
215 		   struct rtable **rt,
216 		   unsigned int flags);
217 int ip_generic_getfrag(void *from, char *to, int offset, int len, int odd,
218 		       struct sk_buff *skb);
219 ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
220 		       int offset, size_t size, int flags);
221 struct sk_buff *__ip_make_skb(struct sock *sk, struct flowi4 *fl4,
222 			      struct sk_buff_head *queue,
223 			      struct inet_cork *cork);
224 int ip_send_skb(struct net *net, struct sk_buff *skb);
225 int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4);
226 void ip_flush_pending_frames(struct sock *sk);
227 struct sk_buff *ip_make_skb(struct sock *sk, struct flowi4 *fl4,
228 			    int getfrag(void *from, char *to, int offset,
229 					int len, int odd, struct sk_buff *skb),
230 			    void *from, int length, int transhdrlen,
231 			    struct ipcm_cookie *ipc, struct rtable **rtp,
232 			    struct inet_cork *cork, unsigned int flags);
233 
234 static inline int ip_queue_xmit(struct sock *sk, struct sk_buff *skb,
235 				struct flowi *fl)
236 {
237 	return __ip_queue_xmit(sk, skb, fl, inet_sk(sk)->tos);
238 }
239 
240 static inline struct sk_buff *ip_finish_skb(struct sock *sk, struct flowi4 *fl4)
241 {
242 	return __ip_make_skb(sk, fl4, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
243 }
244 
245 static inline __u8 get_rttos(struct ipcm_cookie* ipc, struct inet_sock *inet)
246 {
247 	return (ipc->tos != -1) ? RT_TOS(ipc->tos) : RT_TOS(inet->tos);
248 }
249 
250 static inline __u8 get_rtconn_flags(struct ipcm_cookie* ipc, struct sock* sk)
251 {
252 	return (ipc->tos != -1) ? RT_CONN_FLAGS_TOS(sk, ipc->tos) : RT_CONN_FLAGS(sk);
253 }
254 
255 /* datagram.c */
256 int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
257 int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
258 
259 void ip4_datagram_release_cb(struct sock *sk);
260 
261 struct ip_reply_arg {
262 	struct kvec iov[1];
263 	int	    flags;
264 	__wsum 	    csum;
265 	int	    csumoffset; /* u16 offset of csum in iov[0].iov_base */
266 				/* -1 if not needed */
267 	int	    bound_dev_if;
268 	u8  	    tos;
269 	kuid_t	    uid;
270 };
271 
272 #define IP_REPLY_ARG_NOSRCCHECK 1
273 
274 static inline __u8 ip_reply_arg_flowi_flags(const struct ip_reply_arg *arg)
275 {
276 	return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0;
277 }
278 
279 void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
280 			   const struct ip_options *sopt,
281 			   __be32 daddr, __be32 saddr,
282 			   const struct ip_reply_arg *arg,
283 			   unsigned int len, u64 transmit_time);
284 
285 #define IP_INC_STATS(net, field)	SNMP_INC_STATS64((net)->mib.ip_statistics, field)
286 #define __IP_INC_STATS(net, field)	__SNMP_INC_STATS64((net)->mib.ip_statistics, field)
287 #define IP_ADD_STATS(net, field, val)	SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val)
288 #define __IP_ADD_STATS(net, field, val) __SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val)
289 #define IP_UPD_PO_STATS(net, field, val) SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val)
290 #define __IP_UPD_PO_STATS(net, field, val) __SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val)
291 #define NET_INC_STATS(net, field)	SNMP_INC_STATS((net)->mib.net_statistics, field)
292 #define __NET_INC_STATS(net, field)	__SNMP_INC_STATS((net)->mib.net_statistics, field)
293 #define NET_ADD_STATS(net, field, adnd)	SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
294 #define __NET_ADD_STATS(net, field, adnd) __SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
295 
296 u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offct);
297 unsigned long snmp_fold_field(void __percpu *mib, int offt);
298 #if BITS_PER_LONG==32
299 u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct,
300 			 size_t syncp_offset);
301 u64 snmp_fold_field64(void __percpu *mib, int offt, size_t sync_off);
302 #else
303 static inline u64  snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct,
304 					size_t syncp_offset)
305 {
306 	return snmp_get_cpu_field(mib, cpu, offct);
307 
308 }
309 
310 static inline u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_off)
311 {
312 	return snmp_fold_field(mib, offt);
313 }
314 #endif
315 
316 #define snmp_get_cpu_field64_batch(buff64, stats_list, mib_statistic, offset) \
317 { \
318 	int i, c; \
319 	for_each_possible_cpu(c) { \
320 		for (i = 0; stats_list[i].name; i++) \
321 			buff64[i] += snmp_get_cpu_field64( \
322 					mib_statistic, \
323 					c, stats_list[i].entry, \
324 					offset); \
325 	} \
326 }
327 
328 #define snmp_get_cpu_field_batch(buff, stats_list, mib_statistic) \
329 { \
330 	int i, c; \
331 	for_each_possible_cpu(c) { \
332 		for (i = 0; stats_list[i].name; i++) \
333 			buff[i] += snmp_get_cpu_field( \
334 						mib_statistic, \
335 						c, stats_list[i].entry); \
336 	} \
337 }
338 
339 void inet_get_local_port_range(struct net *net, int *low, int *high);
340 
341 #ifdef CONFIG_SYSCTL
342 static inline bool inet_is_local_reserved_port(struct net *net, unsigned short port)
343 {
344 	if (!net->ipv4.sysctl_local_reserved_ports)
345 		return false;
346 	return test_bit(port, net->ipv4.sysctl_local_reserved_ports);
347 }
348 
349 static inline bool sysctl_dev_name_is_allowed(const char *name)
350 {
351 	return strcmp(name, "default") != 0  && strcmp(name, "all") != 0;
352 }
353 
354 static inline bool inet_port_requires_bind_service(struct net *net, unsigned short port)
355 {
356 	return port < net->ipv4.sysctl_ip_prot_sock;
357 }
358 
359 #else
360 static inline bool inet_is_local_reserved_port(struct net *net, unsigned short port)
361 {
362 	return false;
363 }
364 
365 static inline bool inet_port_requires_bind_service(struct net *net, unsigned short port)
366 {
367 	return port < PROT_SOCK;
368 }
369 #endif
370 
371 __be32 inet_current_timestamp(void);
372 
373 /* From inetpeer.c */
374 extern int inet_peer_threshold;
375 extern int inet_peer_minttl;
376 extern int inet_peer_maxttl;
377 
378 void ipfrag_init(void);
379 
380 void ip_static_sysctl_init(void);
381 
382 #define IP4_REPLY_MARK(net, mark) \
383 	((net)->ipv4.sysctl_fwmark_reflect ? (mark) : 0)
384 
385 static inline bool ip_is_fragment(const struct iphdr *iph)
386 {
387 	return (iph->frag_off & htons(IP_MF | IP_OFFSET)) != 0;
388 }
389 
390 #ifdef CONFIG_INET
391 #include <net/dst.h>
392 
393 /* The function in 2.2 was invalid, producing wrong result for
394  * check=0xFEFF. It was noticed by Arthur Skawina _year_ ago. --ANK(000625) */
395 static inline
396 int ip_decrease_ttl(struct iphdr *iph)
397 {
398 	u32 check = (__force u32)iph->check;
399 	check += (__force u32)htons(0x0100);
400 	iph->check = (__force __sum16)(check + (check>=0xFFFF));
401 	return --iph->ttl;
402 }
403 
404 static inline int ip_mtu_locked(const struct dst_entry *dst)
405 {
406 	const struct rtable *rt = (const struct rtable *)dst;
407 
408 	return rt->rt_mtu_locked || dst_metric_locked(dst, RTAX_MTU);
409 }
410 
411 static inline
412 int ip_dont_fragment(const struct sock *sk, const struct dst_entry *dst)
413 {
414 	u8 pmtudisc = READ_ONCE(inet_sk(sk)->pmtudisc);
415 
416 	return  pmtudisc == IP_PMTUDISC_DO ||
417 		(pmtudisc == IP_PMTUDISC_WANT &&
418 		 !ip_mtu_locked(dst));
419 }
420 
421 static inline bool ip_sk_accept_pmtu(const struct sock *sk)
422 {
423 	return inet_sk(sk)->pmtudisc != IP_PMTUDISC_INTERFACE &&
424 	       inet_sk(sk)->pmtudisc != IP_PMTUDISC_OMIT;
425 }
426 
427 static inline bool ip_sk_use_pmtu(const struct sock *sk)
428 {
429 	return inet_sk(sk)->pmtudisc < IP_PMTUDISC_PROBE;
430 }
431 
432 static inline bool ip_sk_ignore_df(const struct sock *sk)
433 {
434 	return inet_sk(sk)->pmtudisc < IP_PMTUDISC_DO ||
435 	       inet_sk(sk)->pmtudisc == IP_PMTUDISC_OMIT;
436 }
437 
438 static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
439 						    bool forwarding)
440 {
441 	struct net *net = dev_net(dst->dev);
442 
443 	if (net->ipv4.sysctl_ip_fwd_use_pmtu ||
444 	    ip_mtu_locked(dst) ||
445 	    !forwarding)
446 		return dst_mtu(dst);
447 
448 	return min(READ_ONCE(dst->dev->mtu), IP_MAX_MTU);
449 }
450 
451 static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
452 					  const struct sk_buff *skb)
453 {
454 	if (!sk || !sk_fullsock(sk) || ip_sk_use_pmtu(sk)) {
455 		bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED;
456 
457 		return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding);
458 	}
459 
460 	return min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU);
461 }
462 
463 struct dst_metrics *ip_fib_metrics_init(struct net *net, struct nlattr *fc_mx,
464 					int fc_mx_len,
465 					struct netlink_ext_ack *extack);
466 static inline void ip_fib_metrics_put(struct dst_metrics *fib_metrics)
467 {
468 	if (fib_metrics != &dst_default_metrics &&
469 	    refcount_dec_and_test(&fib_metrics->refcnt))
470 		kfree(fib_metrics);
471 }
472 
473 /* ipv4 and ipv6 both use refcounted metrics if it is not the default */
474 static inline
475 void ip_dst_init_metrics(struct dst_entry *dst, struct dst_metrics *fib_metrics)
476 {
477 	dst_init_metrics(dst, fib_metrics->metrics, true);
478 
479 	if (fib_metrics != &dst_default_metrics) {
480 		dst->_metrics |= DST_METRICS_REFCOUNTED;
481 		refcount_inc(&fib_metrics->refcnt);
482 	}
483 }
484 
485 static inline
486 void ip_dst_metrics_put(struct dst_entry *dst)
487 {
488 	struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst);
489 
490 	if (p != &dst_default_metrics && refcount_dec_and_test(&p->refcnt))
491 		kfree(p);
492 }
493 
494 u32 ip_idents_reserve(u32 hash, int segs);
495 void __ip_select_ident(struct net *net, struct iphdr *iph, int segs);
496 
497 static inline void ip_select_ident_segs(struct net *net, struct sk_buff *skb,
498 					struct sock *sk, int segs)
499 {
500 	struct iphdr *iph = ip_hdr(skb);
501 
502 	if ((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) {
503 		/* This is only to work around buggy Windows95/2000
504 		 * VJ compression implementations.  If the ID field
505 		 * does not change, they drop every other packet in
506 		 * a TCP stream using header compression.
507 		 */
508 		if (sk && inet_sk(sk)->inet_daddr) {
509 			iph->id = htons(inet_sk(sk)->inet_id);
510 			inet_sk(sk)->inet_id += segs;
511 		} else {
512 			iph->id = 0;
513 		}
514 	} else {
515 		__ip_select_ident(net, iph, segs);
516 	}
517 }
518 
519 static inline void ip_select_ident(struct net *net, struct sk_buff *skb,
520 				   struct sock *sk)
521 {
522 	ip_select_ident_segs(net, skb, sk, 1);
523 }
524 
525 static inline __wsum inet_compute_pseudo(struct sk_buff *skb, int proto)
526 {
527 	return csum_tcpudp_nofold(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
528 				  skb->len, proto, 0);
529 }
530 
531 /* copy IPv4 saddr & daddr to flow_keys, possibly using 64bit load/store
532  * Equivalent to :	flow->v4addrs.src = iph->saddr;
533  *			flow->v4addrs.dst = iph->daddr;
534  */
535 static inline void iph_to_flow_copy_v4addrs(struct flow_keys *flow,
536 					    const struct iphdr *iph)
537 {
538 	BUILD_BUG_ON(offsetof(typeof(flow->addrs), v4addrs.dst) !=
539 		     offsetof(typeof(flow->addrs), v4addrs.src) +
540 			      sizeof(flow->addrs.v4addrs.src));
541 	memcpy(&flow->addrs.v4addrs, &iph->saddr, sizeof(flow->addrs.v4addrs));
542 	flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
543 }
544 
545 static inline __wsum inet_gro_compute_pseudo(struct sk_buff *skb, int proto)
546 {
547 	const struct iphdr *iph = skb_gro_network_header(skb);
548 
549 	return csum_tcpudp_nofold(iph->saddr, iph->daddr,
550 				  skb_gro_len(skb), proto, 0);
551 }
552 
553 /*
554  *	Map a multicast IP onto multicast MAC for type ethernet.
555  */
556 
557 static inline void ip_eth_mc_map(__be32 naddr, char *buf)
558 {
559 	__u32 addr=ntohl(naddr);
560 	buf[0]=0x01;
561 	buf[1]=0x00;
562 	buf[2]=0x5e;
563 	buf[5]=addr&0xFF;
564 	addr>>=8;
565 	buf[4]=addr&0xFF;
566 	addr>>=8;
567 	buf[3]=addr&0x7F;
568 }
569 
570 /*
571  *	Map a multicast IP onto multicast MAC for type IP-over-InfiniBand.
572  *	Leave P_Key as 0 to be filled in by driver.
573  */
574 
575 static inline void ip_ib_mc_map(__be32 naddr, const unsigned char *broadcast, char *buf)
576 {
577 	__u32 addr;
578 	unsigned char scope = broadcast[5] & 0xF;
579 
580 	buf[0]  = 0;		/* Reserved */
581 	buf[1]  = 0xff;		/* Multicast QPN */
582 	buf[2]  = 0xff;
583 	buf[3]  = 0xff;
584 	addr    = ntohl(naddr);
585 	buf[4]  = 0xff;
586 	buf[5]  = 0x10 | scope;	/* scope from broadcast address */
587 	buf[6]  = 0x40;		/* IPv4 signature */
588 	buf[7]  = 0x1b;
589 	buf[8]  = broadcast[8];		/* P_Key */
590 	buf[9]  = broadcast[9];
591 	buf[10] = 0;
592 	buf[11] = 0;
593 	buf[12] = 0;
594 	buf[13] = 0;
595 	buf[14] = 0;
596 	buf[15] = 0;
597 	buf[19] = addr & 0xff;
598 	addr  >>= 8;
599 	buf[18] = addr & 0xff;
600 	addr  >>= 8;
601 	buf[17] = addr & 0xff;
602 	addr  >>= 8;
603 	buf[16] = addr & 0x0f;
604 }
605 
606 static inline void ip_ipgre_mc_map(__be32 naddr, const unsigned char *broadcast, char *buf)
607 {
608 	if ((broadcast[0] | broadcast[1] | broadcast[2] | broadcast[3]) != 0)
609 		memcpy(buf, broadcast, 4);
610 	else
611 		memcpy(buf, &naddr, sizeof(naddr));
612 }
613 
614 #if IS_ENABLED(CONFIG_IPV6)
615 #include <linux/ipv6.h>
616 #endif
617 
618 static __inline__ void inet_reset_saddr(struct sock *sk)
619 {
620 	inet_sk(sk)->inet_rcv_saddr = inet_sk(sk)->inet_saddr = 0;
621 #if IS_ENABLED(CONFIG_IPV6)
622 	if (sk->sk_family == PF_INET6) {
623 		struct ipv6_pinfo *np = inet6_sk(sk);
624 
625 		memset(&np->saddr, 0, sizeof(np->saddr));
626 		memset(&sk->sk_v6_rcv_saddr, 0, sizeof(sk->sk_v6_rcv_saddr));
627 	}
628 #endif
629 }
630 
631 #endif
632 
633 static inline unsigned int ipv4_addr_hash(__be32 ip)
634 {
635 	return (__force unsigned int) ip;
636 }
637 
638 static inline u32 ipv4_portaddr_hash(const struct net *net,
639 				     __be32 saddr,
640 				     unsigned int port)
641 {
642 	return jhash_1word((__force u32)saddr, net_hash_mix(net)) ^ port;
643 }
644 
645 bool ip_call_ra_chain(struct sk_buff *skb);
646 
647 /*
648  *	Functions provided by ip_fragment.c
649  */
650 
651 enum ip_defrag_users {
652 	IP_DEFRAG_LOCAL_DELIVER,
653 	IP_DEFRAG_CALL_RA_CHAIN,
654 	IP_DEFRAG_CONNTRACK_IN,
655 	__IP_DEFRAG_CONNTRACK_IN_END	= IP_DEFRAG_CONNTRACK_IN + USHRT_MAX,
656 	IP_DEFRAG_CONNTRACK_OUT,
657 	__IP_DEFRAG_CONNTRACK_OUT_END	= IP_DEFRAG_CONNTRACK_OUT + USHRT_MAX,
658 	IP_DEFRAG_CONNTRACK_BRIDGE_IN,
659 	__IP_DEFRAG_CONNTRACK_BRIDGE_IN = IP_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX,
660 	IP_DEFRAG_VS_IN,
661 	IP_DEFRAG_VS_OUT,
662 	IP_DEFRAG_VS_FWD,
663 	IP_DEFRAG_AF_PACKET,
664 	IP_DEFRAG_MACVLAN,
665 };
666 
667 /* Return true if the value of 'user' is between 'lower_bond'
668  * and 'upper_bond' inclusively.
669  */
670 static inline bool ip_defrag_user_in_between(u32 user,
671 					     enum ip_defrag_users lower_bond,
672 					     enum ip_defrag_users upper_bond)
673 {
674 	return user >= lower_bond && user <= upper_bond;
675 }
676 
677 int ip_defrag(struct net *net, struct sk_buff *skb, u32 user);
678 #ifdef CONFIG_INET
679 struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user);
680 #else
681 static inline struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user)
682 {
683 	return skb;
684 }
685 #endif
686 
687 /*
688  *	Functions provided by ip_forward.c
689  */
690 
691 int ip_forward(struct sk_buff *skb);
692 
693 /*
694  *	Functions provided by ip_options.c
695  */
696 
697 void ip_options_build(struct sk_buff *skb, struct ip_options *opt,
698 		      __be32 daddr, struct rtable *rt, int is_frag);
699 
700 int __ip_options_echo(struct net *net, struct ip_options *dopt,
701 		      struct sk_buff *skb, const struct ip_options *sopt);
702 static inline int ip_options_echo(struct net *net, struct ip_options *dopt,
703 				  struct sk_buff *skb)
704 {
705 	return __ip_options_echo(net, dopt, skb, &IPCB(skb)->opt);
706 }
707 
708 void ip_options_fragment(struct sk_buff *skb);
709 int __ip_options_compile(struct net *net, struct ip_options *opt,
710 			 struct sk_buff *skb, __be32 *info);
711 int ip_options_compile(struct net *net, struct ip_options *opt,
712 		       struct sk_buff *skb);
713 int ip_options_get(struct net *net, struct ip_options_rcu **optp,
714 		   unsigned char *data, int optlen);
715 int ip_options_get_from_user(struct net *net, struct ip_options_rcu **optp,
716 			     unsigned char __user *data, int optlen);
717 void ip_options_undo(struct ip_options *opt);
718 void ip_forward_options(struct sk_buff *skb);
719 int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev);
720 
721 /*
722  *	Functions provided by ip_sockglue.c
723  */
724 
725 void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb);
726 void ip_cmsg_recv_offset(struct msghdr *msg, struct sock *sk,
727 			 struct sk_buff *skb, int tlen, int offset);
728 int ip_cmsg_send(struct sock *sk, struct msghdr *msg,
729 		 struct ipcm_cookie *ipc, bool allow_ipv6);
730 int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
731 		  unsigned int optlen);
732 int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
733 		  int __user *optlen);
734 int compat_ip_setsockopt(struct sock *sk, int level, int optname,
735 			 char __user *optval, unsigned int optlen);
736 int compat_ip_getsockopt(struct sock *sk, int level, int optname,
737 			 char __user *optval, int __user *optlen);
738 int ip_ra_control(struct sock *sk, unsigned char on,
739 		  void (*destructor)(struct sock *));
740 
741 int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len);
742 void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
743 		   u32 info, u8 *payload);
744 void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport,
745 		    u32 info);
746 
747 static inline void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb)
748 {
749 	ip_cmsg_recv_offset(msg, skb->sk, skb, 0, 0);
750 }
751 
752 bool icmp_global_allow(void);
753 extern int sysctl_icmp_msgs_per_sec;
754 extern int sysctl_icmp_msgs_burst;
755 
756 #ifdef CONFIG_PROC_FS
757 int ip_misc_proc_init(void);
758 #endif
759 
760 int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto, u8 family,
761 				struct netlink_ext_ack *extack);
762 
763 static inline bool inetdev_valid_mtu(unsigned int mtu)
764 {
765 	return likely(mtu >= IPV4_MIN_MTU);
766 }
767 
768 void ip_sock_set_freebind(struct sock *sk);
769 int ip_sock_set_mtu_discover(struct sock *sk, int val);
770 void ip_sock_set_pktinfo(struct sock *sk);
771 void ip_sock_set_recverr(struct sock *sk);
772 void ip_sock_set_tos(struct sock *sk, int val);
773 
774 #endif	/* _IP_H */
775