xref: /openbmc/linux/include/net/ip.h (revision 8cfa7186)
1  /* SPDX-License-Identifier: GPL-2.0-or-later */
2  /*
3   * INET		An implementation of the TCP/IP protocol suite for the LINUX
4   *		operating system.  INET is implemented using the  BSD Socket
5   *		interface as the means of communication with the user level.
6   *
7   *		Definitions for the IP module.
8   *
9   * Version:	@(#)ip.h	1.0.2	05/07/93
10   *
11   * Authors:	Ross Biro
12   *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
13   *		Alan Cox, <gw4pts@gw4pts.ampr.org>
14   *
15   * Changes:
16   *		Mike McLagan    :       Routing by source
17   */
18  #ifndef _IP_H
19  #define _IP_H
20  
21  #include <linux/types.h>
22  #include <linux/ip.h>
23  #include <linux/in.h>
24  #include <linux/skbuff.h>
25  #include <linux/jhash.h>
26  #include <linux/sockptr.h>
27  #include <linux/static_key.h>
28  
29  #include <net/inet_sock.h>
30  #include <net/route.h>
31  #include <net/snmp.h>
32  #include <net/flow.h>
33  #include <net/flow_dissector.h>
34  #include <net/netns/hash.h>
35  #include <net/lwtunnel.h>
36  
37  #define IPV4_MAX_PMTU		65535U		/* RFC 2675, Section 5.1 */
38  #define IPV4_MIN_MTU		68			/* RFC 791 */
39  
40  extern unsigned int sysctl_fib_sync_mem;
41  extern unsigned int sysctl_fib_sync_mem_min;
42  extern unsigned int sysctl_fib_sync_mem_max;
43  
44  struct sock;
45  
46  struct inet_skb_parm {
47  	int			iif;
48  	struct ip_options	opt;		/* Compiled IP options		*/
49  	u16			flags;
50  
51  #define IPSKB_FORWARDED		BIT(0)
52  #define IPSKB_XFRM_TUNNEL_SIZE	BIT(1)
53  #define IPSKB_XFRM_TRANSFORMED	BIT(2)
54  #define IPSKB_FRAG_COMPLETE	BIT(3)
55  #define IPSKB_REROUTED		BIT(4)
56  #define IPSKB_DOREDIRECT	BIT(5)
57  #define IPSKB_FRAG_PMTU		BIT(6)
58  #define IPSKB_L3SLAVE		BIT(7)
59  #define IPSKB_NOPOLICY		BIT(8)
60  
61  	u16			frag_max_size;
62  };
63  
64  static inline bool ipv4_l3mdev_skb(u16 flags)
65  {
66  	return !!(flags & IPSKB_L3SLAVE);
67  }
68  
69  static inline unsigned int ip_hdrlen(const struct sk_buff *skb)
70  {
71  	return ip_hdr(skb)->ihl * 4;
72  }
73  
74  struct ipcm_cookie {
75  	struct sockcm_cookie	sockc;
76  	__be32			addr;
77  	int			oif;
78  	struct ip_options_rcu	*opt;
79  	__u8			protocol;
80  	__u8			ttl;
81  	__s16			tos;
82  	char			priority;
83  	__u16			gso_size;
84  };
85  
86  static inline void ipcm_init(struct ipcm_cookie *ipcm)
87  {
88  	*ipcm = (struct ipcm_cookie) { .tos = -1 };
89  }
90  
91  static inline void ipcm_init_sk(struct ipcm_cookie *ipcm,
92  				const struct inet_sock *inet)
93  {
94  	ipcm_init(ipcm);
95  
96  	ipcm->sockc.mark = inet->sk.sk_mark;
97  	ipcm->sockc.tsflags = inet->sk.sk_tsflags;
98  	ipcm->oif = READ_ONCE(inet->sk.sk_bound_dev_if);
99  	ipcm->addr = inet->inet_saddr;
100  	ipcm->protocol = inet->inet_num;
101  }
102  
103  #define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb))
104  #define PKTINFO_SKB_CB(skb) ((struct in_pktinfo *)((skb)->cb))
105  
106  /* return enslaved device index if relevant */
107  static inline int inet_sdif(const struct sk_buff *skb)
108  {
109  #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
110  	if (skb && ipv4_l3mdev_skb(IPCB(skb)->flags))
111  		return IPCB(skb)->iif;
112  #endif
113  	return 0;
114  }
115  
116  /* Special input handler for packets caught by router alert option.
117     They are selected only by protocol field, and then processed likely
118     local ones; but only if someone wants them! Otherwise, router
119     not running rsvpd will kill RSVP.
120  
121     It is user level problem, what it will make with them.
122     I have no idea, how it will masquearde or NAT them (it is joke, joke :-)),
123     but receiver should be enough clever f.e. to forward mtrace requests,
124     sent to multicast group to reach destination designated router.
125   */
126  
127  struct ip_ra_chain {
128  	struct ip_ra_chain __rcu *next;
129  	struct sock		*sk;
130  	union {
131  		void			(*destructor)(struct sock *);
132  		struct sock		*saved_sk;
133  	};
134  	struct rcu_head		rcu;
135  };
136  
137  /* IP flags. */
138  #define IP_CE		0x8000		/* Flag: "Congestion"		*/
139  #define IP_DF		0x4000		/* Flag: "Don't Fragment"	*/
140  #define IP_MF		0x2000		/* Flag: "More Fragments"	*/
141  #define IP_OFFSET	0x1FFF		/* "Fragment Offset" part	*/
142  
143  #define IP_FRAG_TIME	(30 * HZ)		/* fragment lifetime	*/
144  
145  struct msghdr;
146  struct net_device;
147  struct packet_type;
148  struct rtable;
149  struct sockaddr;
150  
151  int igmp_mc_init(void);
152  
153  /*
154   *	Functions provided by ip.c
155   */
156  
157  int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
158  			  __be32 saddr, __be32 daddr,
159  			  struct ip_options_rcu *opt, u8 tos);
160  int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
161  	   struct net_device *orig_dev);
162  void ip_list_rcv(struct list_head *head, struct packet_type *pt,
163  		 struct net_device *orig_dev);
164  int ip_local_deliver(struct sk_buff *skb);
165  void ip_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int proto);
166  int ip_mr_input(struct sk_buff *skb);
167  int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb);
168  int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb);
169  int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
170  		   int (*output)(struct net *, struct sock *, struct sk_buff *));
171  
172  struct ip_fraglist_iter {
173  	struct sk_buff	*frag;
174  	struct iphdr	*iph;
175  	int		offset;
176  	unsigned int	hlen;
177  };
178  
179  void ip_fraglist_init(struct sk_buff *skb, struct iphdr *iph,
180  		      unsigned int hlen, struct ip_fraglist_iter *iter);
181  void ip_fraglist_prepare(struct sk_buff *skb, struct ip_fraglist_iter *iter);
182  
183  static inline struct sk_buff *ip_fraglist_next(struct ip_fraglist_iter *iter)
184  {
185  	struct sk_buff *skb = iter->frag;
186  
187  	iter->frag = skb->next;
188  	skb_mark_not_on_list(skb);
189  
190  	return skb;
191  }
192  
193  struct ip_frag_state {
194  	bool		DF;
195  	unsigned int	hlen;
196  	unsigned int	ll_rs;
197  	unsigned int	mtu;
198  	unsigned int	left;
199  	int		offset;
200  	int		ptr;
201  	__be16		not_last_frag;
202  };
203  
204  void ip_frag_init(struct sk_buff *skb, unsigned int hlen, unsigned int ll_rs,
205  		  unsigned int mtu, bool DF, struct ip_frag_state *state);
206  struct sk_buff *ip_frag_next(struct sk_buff *skb,
207  			     struct ip_frag_state *state);
208  
209  void ip_send_check(struct iphdr *ip);
210  int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
211  int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
212  
213  int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
214  		    __u8 tos);
215  void ip_init(void);
216  int ip_append_data(struct sock *sk, struct flowi4 *fl4,
217  		   int getfrag(void *from, char *to, int offset, int len,
218  			       int odd, struct sk_buff *skb),
219  		   void *from, int len, int protolen,
220  		   struct ipcm_cookie *ipc,
221  		   struct rtable **rt,
222  		   unsigned int flags);
223  int ip_generic_getfrag(void *from, char *to, int offset, int len, int odd,
224  		       struct sk_buff *skb);
225  struct sk_buff *__ip_make_skb(struct sock *sk, struct flowi4 *fl4,
226  			      struct sk_buff_head *queue,
227  			      struct inet_cork *cork);
228  int ip_send_skb(struct net *net, struct sk_buff *skb);
229  int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4);
230  void ip_flush_pending_frames(struct sock *sk);
231  struct sk_buff *ip_make_skb(struct sock *sk, struct flowi4 *fl4,
232  			    int getfrag(void *from, char *to, int offset,
233  					int len, int odd, struct sk_buff *skb),
234  			    void *from, int length, int transhdrlen,
235  			    struct ipcm_cookie *ipc, struct rtable **rtp,
236  			    struct inet_cork *cork, unsigned int flags);
237  
238  int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl);
239  
240  static inline struct sk_buff *ip_finish_skb(struct sock *sk, struct flowi4 *fl4)
241  {
242  	return __ip_make_skb(sk, fl4, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
243  }
244  
245  /* Get the route scope that should be used when sending a packet. */
246  static inline u8 ip_sendmsg_scope(const struct inet_sock *inet,
247  				  const struct ipcm_cookie *ipc,
248  				  const struct msghdr *msg)
249  {
250  	if (sock_flag(&inet->sk, SOCK_LOCALROUTE) ||
251  	    msg->msg_flags & MSG_DONTROUTE ||
252  	    (ipc->opt && ipc->opt->opt.is_strictroute))
253  		return RT_SCOPE_LINK;
254  
255  	return RT_SCOPE_UNIVERSE;
256  }
257  
258  static inline __u8 get_rttos(struct ipcm_cookie* ipc, struct inet_sock *inet)
259  {
260  	return (ipc->tos != -1) ? RT_TOS(ipc->tos) : RT_TOS(inet->tos);
261  }
262  
263  /* datagram.c */
264  int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
265  int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
266  
267  void ip4_datagram_release_cb(struct sock *sk);
268  
269  struct ip_reply_arg {
270  	struct kvec iov[1];
271  	int	    flags;
272  	__wsum 	    csum;
273  	int	    csumoffset; /* u16 offset of csum in iov[0].iov_base */
274  				/* -1 if not needed */
275  	int	    bound_dev_if;
276  	u8  	    tos;
277  	kuid_t	    uid;
278  };
279  
280  #define IP_REPLY_ARG_NOSRCCHECK 1
281  
282  static inline __u8 ip_reply_arg_flowi_flags(const struct ip_reply_arg *arg)
283  {
284  	return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0;
285  }
286  
287  void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
288  			   const struct ip_options *sopt,
289  			   __be32 daddr, __be32 saddr,
290  			   const struct ip_reply_arg *arg,
291  			   unsigned int len, u64 transmit_time, u32 txhash);
292  
293  #define IP_INC_STATS(net, field)	SNMP_INC_STATS64((net)->mib.ip_statistics, field)
294  #define __IP_INC_STATS(net, field)	__SNMP_INC_STATS64((net)->mib.ip_statistics, field)
295  #define IP_ADD_STATS(net, field, val)	SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val)
296  #define __IP_ADD_STATS(net, field, val) __SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val)
297  #define IP_UPD_PO_STATS(net, field, val) SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val)
298  #define __IP_UPD_PO_STATS(net, field, val) __SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val)
299  #define NET_INC_STATS(net, field)	SNMP_INC_STATS((net)->mib.net_statistics, field)
300  #define __NET_INC_STATS(net, field)	__SNMP_INC_STATS((net)->mib.net_statistics, field)
301  #define NET_ADD_STATS(net, field, adnd)	SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
302  #define __NET_ADD_STATS(net, field, adnd) __SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
303  
304  static inline u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offt)
305  {
306  	return  *(((unsigned long *)per_cpu_ptr(mib, cpu)) + offt);
307  }
308  
309  unsigned long snmp_fold_field(void __percpu *mib, int offt);
310  #if BITS_PER_LONG==32
311  u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct,
312  			 size_t syncp_offset);
313  u64 snmp_fold_field64(void __percpu *mib, int offt, size_t sync_off);
314  #else
315  static inline u64  snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct,
316  					size_t syncp_offset)
317  {
318  	return snmp_get_cpu_field(mib, cpu, offct);
319  
320  }
321  
322  static inline u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_off)
323  {
324  	return snmp_fold_field(mib, offt);
325  }
326  #endif
327  
328  #define snmp_get_cpu_field64_batch(buff64, stats_list, mib_statistic, offset) \
329  { \
330  	int i, c; \
331  	for_each_possible_cpu(c) { \
332  		for (i = 0; stats_list[i].name; i++) \
333  			buff64[i] += snmp_get_cpu_field64( \
334  					mib_statistic, \
335  					c, stats_list[i].entry, \
336  					offset); \
337  	} \
338  }
339  
340  #define snmp_get_cpu_field_batch(buff, stats_list, mib_statistic) \
341  { \
342  	int i, c; \
343  	for_each_possible_cpu(c) { \
344  		for (i = 0; stats_list[i].name; i++) \
345  			buff[i] += snmp_get_cpu_field( \
346  						mib_statistic, \
347  						c, stats_list[i].entry); \
348  	} \
349  }
350  
351  void inet_get_local_port_range(const struct net *net, int *low, int *high);
352  void inet_sk_get_local_port_range(const struct sock *sk, int *low, int *high);
353  
354  #ifdef CONFIG_SYSCTL
355  static inline bool inet_is_local_reserved_port(struct net *net, unsigned short port)
356  {
357  	if (!net->ipv4.sysctl_local_reserved_ports)
358  		return false;
359  	return test_bit(port, net->ipv4.sysctl_local_reserved_ports);
360  }
361  
362  static inline bool sysctl_dev_name_is_allowed(const char *name)
363  {
364  	return strcmp(name, "default") != 0  && strcmp(name, "all") != 0;
365  }
366  
367  static inline bool inet_port_requires_bind_service(struct net *net, unsigned short port)
368  {
369  	return port < READ_ONCE(net->ipv4.sysctl_ip_prot_sock);
370  }
371  
372  #else
373  static inline bool inet_is_local_reserved_port(struct net *net, unsigned short port)
374  {
375  	return false;
376  }
377  
378  static inline bool inet_port_requires_bind_service(struct net *net, unsigned short port)
379  {
380  	return port < PROT_SOCK;
381  }
382  #endif
383  
384  __be32 inet_current_timestamp(void);
385  
386  /* From inetpeer.c */
387  extern int inet_peer_threshold;
388  extern int inet_peer_minttl;
389  extern int inet_peer_maxttl;
390  
391  void ipfrag_init(void);
392  
393  void ip_static_sysctl_init(void);
394  
395  #define IP4_REPLY_MARK(net, mark) \
396  	(READ_ONCE((net)->ipv4.sysctl_fwmark_reflect) ? (mark) : 0)
397  
398  static inline bool ip_is_fragment(const struct iphdr *iph)
399  {
400  	return (iph->frag_off & htons(IP_MF | IP_OFFSET)) != 0;
401  }
402  
403  #ifdef CONFIG_INET
404  #include <net/dst.h>
405  
406  /* The function in 2.2 was invalid, producing wrong result for
407   * check=0xFEFF. It was noticed by Arthur Skawina _year_ ago. --ANK(000625) */
408  static inline
409  int ip_decrease_ttl(struct iphdr *iph)
410  {
411  	u32 check = (__force u32)iph->check;
412  	check += (__force u32)htons(0x0100);
413  	iph->check = (__force __sum16)(check + (check>=0xFFFF));
414  	return --iph->ttl;
415  }
416  
417  static inline int ip_mtu_locked(const struct dst_entry *dst)
418  {
419  	const struct rtable *rt = (const struct rtable *)dst;
420  
421  	return rt->rt_mtu_locked || dst_metric_locked(dst, RTAX_MTU);
422  }
423  
424  static inline
425  int ip_dont_fragment(const struct sock *sk, const struct dst_entry *dst)
426  {
427  	u8 pmtudisc = READ_ONCE(inet_sk(sk)->pmtudisc);
428  
429  	return  pmtudisc == IP_PMTUDISC_DO ||
430  		(pmtudisc == IP_PMTUDISC_WANT &&
431  		 !ip_mtu_locked(dst));
432  }
433  
434  static inline bool ip_sk_accept_pmtu(const struct sock *sk)
435  {
436  	return inet_sk(sk)->pmtudisc != IP_PMTUDISC_INTERFACE &&
437  	       inet_sk(sk)->pmtudisc != IP_PMTUDISC_OMIT;
438  }
439  
440  static inline bool ip_sk_use_pmtu(const struct sock *sk)
441  {
442  	return inet_sk(sk)->pmtudisc < IP_PMTUDISC_PROBE;
443  }
444  
445  static inline bool ip_sk_ignore_df(const struct sock *sk)
446  {
447  	return inet_sk(sk)->pmtudisc < IP_PMTUDISC_DO ||
448  	       inet_sk(sk)->pmtudisc == IP_PMTUDISC_OMIT;
449  }
450  
451  static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
452  						    bool forwarding)
453  {
454  	const struct rtable *rt = container_of(dst, struct rtable, dst);
455  	struct net *net = dev_net(dst->dev);
456  	unsigned int mtu;
457  
458  	if (READ_ONCE(net->ipv4.sysctl_ip_fwd_use_pmtu) ||
459  	    ip_mtu_locked(dst) ||
460  	    !forwarding) {
461  		mtu = rt->rt_pmtu;
462  		if (mtu && time_before(jiffies, rt->dst.expires))
463  			goto out;
464  	}
465  
466  	/* 'forwarding = true' case should always honour route mtu */
467  	mtu = dst_metric_raw(dst, RTAX_MTU);
468  	if (mtu)
469  		goto out;
470  
471  	mtu = READ_ONCE(dst->dev->mtu);
472  
473  	if (unlikely(ip_mtu_locked(dst))) {
474  		if (rt->rt_uses_gateway && mtu > 576)
475  			mtu = 576;
476  	}
477  
478  out:
479  	mtu = min_t(unsigned int, mtu, IP_MAX_MTU);
480  
481  	return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
482  }
483  
484  static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
485  					  const struct sk_buff *skb)
486  {
487  	unsigned int mtu;
488  
489  	if (!sk || !sk_fullsock(sk) || ip_sk_use_pmtu(sk)) {
490  		bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED;
491  
492  		return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding);
493  	}
494  
495  	mtu = min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU);
496  	return mtu - lwtunnel_headroom(skb_dst(skb)->lwtstate, mtu);
497  }
498  
499  struct dst_metrics *ip_fib_metrics_init(struct net *net, struct nlattr *fc_mx,
500  					int fc_mx_len,
501  					struct netlink_ext_ack *extack);
502  static inline void ip_fib_metrics_put(struct dst_metrics *fib_metrics)
503  {
504  	if (fib_metrics != &dst_default_metrics &&
505  	    refcount_dec_and_test(&fib_metrics->refcnt))
506  		kfree(fib_metrics);
507  }
508  
509  /* ipv4 and ipv6 both use refcounted metrics if it is not the default */
510  static inline
511  void ip_dst_init_metrics(struct dst_entry *dst, struct dst_metrics *fib_metrics)
512  {
513  	dst_init_metrics(dst, fib_metrics->metrics, true);
514  
515  	if (fib_metrics != &dst_default_metrics) {
516  		dst->_metrics |= DST_METRICS_REFCOUNTED;
517  		refcount_inc(&fib_metrics->refcnt);
518  	}
519  }
520  
521  static inline
522  void ip_dst_metrics_put(struct dst_entry *dst)
523  {
524  	struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst);
525  
526  	if (p != &dst_default_metrics && refcount_dec_and_test(&p->refcnt))
527  		kfree(p);
528  }
529  
530  void __ip_select_ident(struct net *net, struct iphdr *iph, int segs);
531  
532  static inline void ip_select_ident_segs(struct net *net, struct sk_buff *skb,
533  					struct sock *sk, int segs)
534  {
535  	struct iphdr *iph = ip_hdr(skb);
536  
537  	/* We had many attacks based on IPID, use the private
538  	 * generator as much as we can.
539  	 */
540  	if (sk && inet_sk(sk)->inet_daddr) {
541  		iph->id = htons(inet_sk(sk)->inet_id);
542  		inet_sk(sk)->inet_id += segs;
543  		return;
544  	}
545  	if ((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) {
546  		iph->id = 0;
547  	} else {
548  		/* Unfortunately we need the big hammer to get a suitable IPID */
549  		__ip_select_ident(net, iph, segs);
550  	}
551  }
552  
553  static inline void ip_select_ident(struct net *net, struct sk_buff *skb,
554  				   struct sock *sk)
555  {
556  	ip_select_ident_segs(net, skb, sk, 1);
557  }
558  
559  static inline __wsum inet_compute_pseudo(struct sk_buff *skb, int proto)
560  {
561  	return csum_tcpudp_nofold(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
562  				  skb->len, proto, 0);
563  }
564  
565  /* copy IPv4 saddr & daddr to flow_keys, possibly using 64bit load/store
566   * Equivalent to :	flow->v4addrs.src = iph->saddr;
567   *			flow->v4addrs.dst = iph->daddr;
568   */
569  static inline void iph_to_flow_copy_v4addrs(struct flow_keys *flow,
570  					    const struct iphdr *iph)
571  {
572  	BUILD_BUG_ON(offsetof(typeof(flow->addrs), v4addrs.dst) !=
573  		     offsetof(typeof(flow->addrs), v4addrs.src) +
574  			      sizeof(flow->addrs.v4addrs.src));
575  	memcpy(&flow->addrs.v4addrs, &iph->addrs, sizeof(flow->addrs.v4addrs));
576  	flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
577  }
578  
579  /*
580   *	Map a multicast IP onto multicast MAC for type ethernet.
581   */
582  
583  static inline void ip_eth_mc_map(__be32 naddr, char *buf)
584  {
585  	__u32 addr=ntohl(naddr);
586  	buf[0]=0x01;
587  	buf[1]=0x00;
588  	buf[2]=0x5e;
589  	buf[5]=addr&0xFF;
590  	addr>>=8;
591  	buf[4]=addr&0xFF;
592  	addr>>=8;
593  	buf[3]=addr&0x7F;
594  }
595  
596  /*
597   *	Map a multicast IP onto multicast MAC for type IP-over-InfiniBand.
598   *	Leave P_Key as 0 to be filled in by driver.
599   */
600  
601  static inline void ip_ib_mc_map(__be32 naddr, const unsigned char *broadcast, char *buf)
602  {
603  	__u32 addr;
604  	unsigned char scope = broadcast[5] & 0xF;
605  
606  	buf[0]  = 0;		/* Reserved */
607  	buf[1]  = 0xff;		/* Multicast QPN */
608  	buf[2]  = 0xff;
609  	buf[3]  = 0xff;
610  	addr    = ntohl(naddr);
611  	buf[4]  = 0xff;
612  	buf[5]  = 0x10 | scope;	/* scope from broadcast address */
613  	buf[6]  = 0x40;		/* IPv4 signature */
614  	buf[7]  = 0x1b;
615  	buf[8]  = broadcast[8];		/* P_Key */
616  	buf[9]  = broadcast[9];
617  	buf[10] = 0;
618  	buf[11] = 0;
619  	buf[12] = 0;
620  	buf[13] = 0;
621  	buf[14] = 0;
622  	buf[15] = 0;
623  	buf[19] = addr & 0xff;
624  	addr  >>= 8;
625  	buf[18] = addr & 0xff;
626  	addr  >>= 8;
627  	buf[17] = addr & 0xff;
628  	addr  >>= 8;
629  	buf[16] = addr & 0x0f;
630  }
631  
632  static inline void ip_ipgre_mc_map(__be32 naddr, const unsigned char *broadcast, char *buf)
633  {
634  	if ((broadcast[0] | broadcast[1] | broadcast[2] | broadcast[3]) != 0)
635  		memcpy(buf, broadcast, 4);
636  	else
637  		memcpy(buf, &naddr, sizeof(naddr));
638  }
639  
640  #if IS_ENABLED(CONFIG_IPV6)
641  #include <linux/ipv6.h>
642  #endif
643  
644  static __inline__ void inet_reset_saddr(struct sock *sk)
645  {
646  	inet_sk(sk)->inet_rcv_saddr = inet_sk(sk)->inet_saddr = 0;
647  #if IS_ENABLED(CONFIG_IPV6)
648  	if (sk->sk_family == PF_INET6) {
649  		struct ipv6_pinfo *np = inet6_sk(sk);
650  
651  		memset(&np->saddr, 0, sizeof(np->saddr));
652  		memset(&sk->sk_v6_rcv_saddr, 0, sizeof(sk->sk_v6_rcv_saddr));
653  	}
654  #endif
655  }
656  
657  #endif
658  
659  static inline unsigned int ipv4_addr_hash(__be32 ip)
660  {
661  	return (__force unsigned int) ip;
662  }
663  
664  static inline u32 ipv4_portaddr_hash(const struct net *net,
665  				     __be32 saddr,
666  				     unsigned int port)
667  {
668  	return jhash_1word((__force u32)saddr, net_hash_mix(net)) ^ port;
669  }
670  
671  bool ip_call_ra_chain(struct sk_buff *skb);
672  
673  /*
674   *	Functions provided by ip_fragment.c
675   */
676  
677  enum ip_defrag_users {
678  	IP_DEFRAG_LOCAL_DELIVER,
679  	IP_DEFRAG_CALL_RA_CHAIN,
680  	IP_DEFRAG_CONNTRACK_IN,
681  	__IP_DEFRAG_CONNTRACK_IN_END	= IP_DEFRAG_CONNTRACK_IN + USHRT_MAX,
682  	IP_DEFRAG_CONNTRACK_OUT,
683  	__IP_DEFRAG_CONNTRACK_OUT_END	= IP_DEFRAG_CONNTRACK_OUT + USHRT_MAX,
684  	IP_DEFRAG_CONNTRACK_BRIDGE_IN,
685  	__IP_DEFRAG_CONNTRACK_BRIDGE_IN = IP_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX,
686  	IP_DEFRAG_VS_IN,
687  	IP_DEFRAG_VS_OUT,
688  	IP_DEFRAG_VS_FWD,
689  	IP_DEFRAG_AF_PACKET,
690  	IP_DEFRAG_MACVLAN,
691  };
692  
693  /* Return true if the value of 'user' is between 'lower_bond'
694   * and 'upper_bond' inclusively.
695   */
696  static inline bool ip_defrag_user_in_between(u32 user,
697  					     enum ip_defrag_users lower_bond,
698  					     enum ip_defrag_users upper_bond)
699  {
700  	return user >= lower_bond && user <= upper_bond;
701  }
702  
703  int ip_defrag(struct net *net, struct sk_buff *skb, u32 user);
704  #ifdef CONFIG_INET
705  struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user);
706  #else
707  static inline struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user)
708  {
709  	return skb;
710  }
711  #endif
712  
713  /*
714   *	Functions provided by ip_forward.c
715   */
716  
717  int ip_forward(struct sk_buff *skb);
718  
719  /*
720   *	Functions provided by ip_options.c
721   */
722  
723  void ip_options_build(struct sk_buff *skb, struct ip_options *opt,
724  		      __be32 daddr, struct rtable *rt);
725  
726  int __ip_options_echo(struct net *net, struct ip_options *dopt,
727  		      struct sk_buff *skb, const struct ip_options *sopt);
728  static inline int ip_options_echo(struct net *net, struct ip_options *dopt,
729  				  struct sk_buff *skb)
730  {
731  	return __ip_options_echo(net, dopt, skb, &IPCB(skb)->opt);
732  }
733  
734  void ip_options_fragment(struct sk_buff *skb);
735  int __ip_options_compile(struct net *net, struct ip_options *opt,
736  			 struct sk_buff *skb, __be32 *info);
737  int ip_options_compile(struct net *net, struct ip_options *opt,
738  		       struct sk_buff *skb);
739  int ip_options_get(struct net *net, struct ip_options_rcu **optp,
740  		   sockptr_t data, int optlen);
741  void ip_options_undo(struct ip_options *opt);
742  void ip_forward_options(struct sk_buff *skb);
743  int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev);
744  
745  /*
746   *	Functions provided by ip_sockglue.c
747   */
748  
749  void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb);
750  void ip_cmsg_recv_offset(struct msghdr *msg, struct sock *sk,
751  			 struct sk_buff *skb, int tlen, int offset);
752  int ip_cmsg_send(struct sock *sk, struct msghdr *msg,
753  		 struct ipcm_cookie *ipc, bool allow_ipv6);
754  DECLARE_STATIC_KEY_FALSE(ip4_min_ttl);
755  int do_ip_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
756  		     unsigned int optlen);
757  int ip_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
758  		  unsigned int optlen);
759  int do_ip_getsockopt(struct sock *sk, int level, int optname,
760  		     sockptr_t optval, sockptr_t optlen);
761  int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
762  		  int __user *optlen);
763  int ip_ra_control(struct sock *sk, unsigned char on,
764  		  void (*destructor)(struct sock *));
765  
766  int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len);
767  void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
768  		   u32 info, u8 *payload);
769  void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport,
770  		    u32 info);
771  
772  static inline void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb)
773  {
774  	ip_cmsg_recv_offset(msg, skb->sk, skb, 0, 0);
775  }
776  
777  bool icmp_global_allow(void);
778  extern int sysctl_icmp_msgs_per_sec;
779  extern int sysctl_icmp_msgs_burst;
780  
781  #ifdef CONFIG_PROC_FS
782  int ip_misc_proc_init(void);
783  #endif
784  
785  int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto, u8 family,
786  				struct netlink_ext_ack *extack);
787  
788  static inline bool inetdev_valid_mtu(unsigned int mtu)
789  {
790  	return likely(mtu >= IPV4_MIN_MTU);
791  }
792  
793  void ip_sock_set_freebind(struct sock *sk);
794  int ip_sock_set_mtu_discover(struct sock *sk, int val);
795  void ip_sock_set_pktinfo(struct sock *sk);
796  void ip_sock_set_recverr(struct sock *sk);
797  void ip_sock_set_tos(struct sock *sk, int val);
798  void  __ip_sock_set_tos(struct sock *sk, int val);
799  
800  #endif	/* _IP_H */
801