xref: /openbmc/linux/include/net/ip.h (revision 1c5af5cf)
1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		Definitions for the IP module.
7  *
8  * Version:	@(#)ip.h	1.0.2	05/07/93
9  *
10  * Authors:	Ross Biro
11  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
13  *
14  * Changes:
15  *		Mike McLagan    :       Routing by source
16  *
17  *		This program is free software; you can redistribute it and/or
18  *		modify it under the terms of the GNU General Public License
19  *		as published by the Free Software Foundation; either version
20  *		2 of the License, or (at your option) any later version.
21  */
22 #ifndef _IP_H
23 #define _IP_H
24 
25 #include <linux/types.h>
26 #include <linux/ip.h>
27 #include <linux/in.h>
28 #include <linux/skbuff.h>
29 #include <linux/jhash.h>
30 
31 #include <net/inet_sock.h>
32 #include <net/route.h>
33 #include <net/snmp.h>
34 #include <net/flow.h>
35 #include <net/flow_dissector.h>
36 #include <net/netns/hash.h>
37 
38 #define IPV4_MAX_PMTU		65535U		/* RFC 2675, Section 5.1 */
39 #define IPV4_MIN_MTU		68			/* RFC 791 */
40 
41 struct sock;
42 
43 struct inet_skb_parm {
44 	int			iif;
45 	struct ip_options	opt;		/* Compiled IP options		*/
46 	u16			flags;
47 
48 #define IPSKB_FORWARDED		BIT(0)
49 #define IPSKB_XFRM_TUNNEL_SIZE	BIT(1)
50 #define IPSKB_XFRM_TRANSFORMED	BIT(2)
51 #define IPSKB_FRAG_COMPLETE	BIT(3)
52 #define IPSKB_REROUTED		BIT(4)
53 #define IPSKB_DOREDIRECT	BIT(5)
54 #define IPSKB_FRAG_PMTU		BIT(6)
55 #define IPSKB_L3SLAVE		BIT(7)
56 
57 	u16			frag_max_size;
58 };
59 
60 static inline bool ipv4_l3mdev_skb(u16 flags)
61 {
62 	return !!(flags & IPSKB_L3SLAVE);
63 }
64 
65 static inline unsigned int ip_hdrlen(const struct sk_buff *skb)
66 {
67 	return ip_hdr(skb)->ihl * 4;
68 }
69 
70 struct ipcm_cookie {
71 	struct sockcm_cookie	sockc;
72 	__be32			addr;
73 	int			oif;
74 	struct ip_options_rcu	*opt;
75 	__u8			tx_flags;
76 	__u8			ttl;
77 	__s16			tos;
78 	char			priority;
79 };
80 
81 #define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb))
82 #define PKTINFO_SKB_CB(skb) ((struct in_pktinfo *)((skb)->cb))
83 
84 /* return enslaved device index if relevant */
85 static inline int inet_sdif(struct sk_buff *skb)
86 {
87 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
88 	if (skb && ipv4_l3mdev_skb(IPCB(skb)->flags))
89 		return IPCB(skb)->iif;
90 #endif
91 	return 0;
92 }
93 
94 /* Special input handler for packets caught by router alert option.
95    They are selected only by protocol field, and then processed likely
96    local ones; but only if someone wants them! Otherwise, router
97    not running rsvpd will kill RSVP.
98 
99    It is user level problem, what it will make with them.
100    I have no idea, how it will masquearde or NAT them (it is joke, joke :-)),
101    but receiver should be enough clever f.e. to forward mtrace requests,
102    sent to multicast group to reach destination designated router.
103  */
104 
105 struct ip_ra_chain {
106 	struct ip_ra_chain __rcu *next;
107 	struct sock		*sk;
108 	union {
109 		void			(*destructor)(struct sock *);
110 		struct sock		*saved_sk;
111 	};
112 	struct rcu_head		rcu;
113 };
114 
115 /* IP flags. */
116 #define IP_CE		0x8000		/* Flag: "Congestion"		*/
117 #define IP_DF		0x4000		/* Flag: "Don't Fragment"	*/
118 #define IP_MF		0x2000		/* Flag: "More Fragments"	*/
119 #define IP_OFFSET	0x1FFF		/* "Fragment Offset" part	*/
120 
121 #define IP_FRAG_TIME	(30 * HZ)		/* fragment lifetime	*/
122 
123 struct msghdr;
124 struct net_device;
125 struct packet_type;
126 struct rtable;
127 struct sockaddr;
128 
129 int igmp_mc_init(void);
130 
131 /*
132  *	Functions provided by ip.c
133  */
134 
135 int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
136 			  __be32 saddr, __be32 daddr,
137 			  struct ip_options_rcu *opt);
138 int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
139 	   struct net_device *orig_dev);
140 int ip_local_deliver(struct sk_buff *skb);
141 int ip_mr_input(struct sk_buff *skb);
142 int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb);
143 int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb);
144 int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
145 		   int (*output)(struct net *, struct sock *, struct sk_buff *));
146 void ip_send_check(struct iphdr *ip);
147 int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
148 int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
149 
150 int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl);
151 void ip_init(void);
152 int ip_append_data(struct sock *sk, struct flowi4 *fl4,
153 		   int getfrag(void *from, char *to, int offset, int len,
154 			       int odd, struct sk_buff *skb),
155 		   void *from, int len, int protolen,
156 		   struct ipcm_cookie *ipc,
157 		   struct rtable **rt,
158 		   unsigned int flags);
159 int ip_generic_getfrag(void *from, char *to, int offset, int len, int odd,
160 		       struct sk_buff *skb);
161 ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
162 		       int offset, size_t size, int flags);
163 struct sk_buff *__ip_make_skb(struct sock *sk, struct flowi4 *fl4,
164 			      struct sk_buff_head *queue,
165 			      struct inet_cork *cork);
166 int ip_send_skb(struct net *net, struct sk_buff *skb);
167 int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4);
168 void ip_flush_pending_frames(struct sock *sk);
169 struct sk_buff *ip_make_skb(struct sock *sk, struct flowi4 *fl4,
170 			    int getfrag(void *from, char *to, int offset,
171 					int len, int odd, struct sk_buff *skb),
172 			    void *from, int length, int transhdrlen,
173 			    struct ipcm_cookie *ipc, struct rtable **rtp,
174 			    unsigned int flags);
175 
176 static inline struct sk_buff *ip_finish_skb(struct sock *sk, struct flowi4 *fl4)
177 {
178 	return __ip_make_skb(sk, fl4, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
179 }
180 
181 static inline __u8 get_rttos(struct ipcm_cookie* ipc, struct inet_sock *inet)
182 {
183 	return (ipc->tos != -1) ? RT_TOS(ipc->tos) : RT_TOS(inet->tos);
184 }
185 
186 static inline __u8 get_rtconn_flags(struct ipcm_cookie* ipc, struct sock* sk)
187 {
188 	return (ipc->tos != -1) ? RT_CONN_FLAGS_TOS(sk, ipc->tos) : RT_CONN_FLAGS(sk);
189 }
190 
191 /* datagram.c */
192 int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
193 int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
194 
195 void ip4_datagram_release_cb(struct sock *sk);
196 
197 struct ip_reply_arg {
198 	struct kvec iov[1];
199 	int	    flags;
200 	__wsum 	    csum;
201 	int	    csumoffset; /* u16 offset of csum in iov[0].iov_base */
202 				/* -1 if not needed */
203 	int	    bound_dev_if;
204 	u8  	    tos;
205 	kuid_t	    uid;
206 };
207 
208 #define IP_REPLY_ARG_NOSRCCHECK 1
209 
210 static inline __u8 ip_reply_arg_flowi_flags(const struct ip_reply_arg *arg)
211 {
212 	return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0;
213 }
214 
215 void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
216 			   const struct ip_options *sopt,
217 			   __be32 daddr, __be32 saddr,
218 			   const struct ip_reply_arg *arg,
219 			   unsigned int len);
220 
221 #define IP_INC_STATS(net, field)	SNMP_INC_STATS64((net)->mib.ip_statistics, field)
222 #define __IP_INC_STATS(net, field)	__SNMP_INC_STATS64((net)->mib.ip_statistics, field)
223 #define IP_ADD_STATS(net, field, val)	SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val)
224 #define __IP_ADD_STATS(net, field, val) __SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val)
225 #define IP_UPD_PO_STATS(net, field, val) SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val)
226 #define __IP_UPD_PO_STATS(net, field, val) __SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val)
227 #define NET_INC_STATS(net, field)	SNMP_INC_STATS((net)->mib.net_statistics, field)
228 #define __NET_INC_STATS(net, field)	__SNMP_INC_STATS((net)->mib.net_statistics, field)
229 #define NET_ADD_STATS(net, field, adnd)	SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
230 #define __NET_ADD_STATS(net, field, adnd) __SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
231 
232 u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offct);
233 unsigned long snmp_fold_field(void __percpu *mib, int offt);
234 #if BITS_PER_LONG==32
235 u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct,
236 			 size_t syncp_offset);
237 u64 snmp_fold_field64(void __percpu *mib, int offt, size_t sync_off);
238 #else
239 static inline u64  snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct,
240 					size_t syncp_offset)
241 {
242 	return snmp_get_cpu_field(mib, cpu, offct);
243 
244 }
245 
246 static inline u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_off)
247 {
248 	return snmp_fold_field(mib, offt);
249 }
250 #endif
251 
252 #define snmp_get_cpu_field64_batch(buff64, stats_list, mib_statistic, offset) \
253 { \
254 	int i, c; \
255 	for_each_possible_cpu(c) { \
256 		for (i = 0; stats_list[i].name; i++) \
257 			buff64[i] += snmp_get_cpu_field64( \
258 					mib_statistic, \
259 					c, stats_list[i].entry, \
260 					offset); \
261 	} \
262 }
263 
264 #define snmp_get_cpu_field_batch(buff, stats_list, mib_statistic) \
265 { \
266 	int i, c; \
267 	for_each_possible_cpu(c) { \
268 		for (i = 0; stats_list[i].name; i++) \
269 			buff[i] += snmp_get_cpu_field( \
270 						mib_statistic, \
271 						c, stats_list[i].entry); \
272 	} \
273 }
274 
275 void inet_get_local_port_range(struct net *net, int *low, int *high);
276 
277 #ifdef CONFIG_SYSCTL
278 static inline int inet_is_local_reserved_port(struct net *net, int port)
279 {
280 	if (!net->ipv4.sysctl_local_reserved_ports)
281 		return 0;
282 	return test_bit(port, net->ipv4.sysctl_local_reserved_ports);
283 }
284 
285 static inline bool sysctl_dev_name_is_allowed(const char *name)
286 {
287 	return strcmp(name, "default") != 0  && strcmp(name, "all") != 0;
288 }
289 
290 static inline int inet_prot_sock(struct net *net)
291 {
292 	return net->ipv4.sysctl_ip_prot_sock;
293 }
294 
295 #else
296 static inline int inet_is_local_reserved_port(struct net *net, int port)
297 {
298 	return 0;
299 }
300 
301 static inline int inet_prot_sock(struct net *net)
302 {
303 	return PROT_SOCK;
304 }
305 #endif
306 
307 __be32 inet_current_timestamp(void);
308 
309 /* From inetpeer.c */
310 extern int inet_peer_threshold;
311 extern int inet_peer_minttl;
312 extern int inet_peer_maxttl;
313 
314 void ipfrag_init(void);
315 
316 void ip_static_sysctl_init(void);
317 
318 #define IP4_REPLY_MARK(net, mark) \
319 	((net)->ipv4.sysctl_fwmark_reflect ? (mark) : 0)
320 
321 static inline bool ip_is_fragment(const struct iphdr *iph)
322 {
323 	return (iph->frag_off & htons(IP_MF | IP_OFFSET)) != 0;
324 }
325 
326 #ifdef CONFIG_INET
327 #include <net/dst.h>
328 
329 /* The function in 2.2 was invalid, producing wrong result for
330  * check=0xFEFF. It was noticed by Arthur Skawina _year_ ago. --ANK(000625) */
331 static inline
332 int ip_decrease_ttl(struct iphdr *iph)
333 {
334 	u32 check = (__force u32)iph->check;
335 	check += (__force u32)htons(0x0100);
336 	iph->check = (__force __sum16)(check + (check>=0xFFFF));
337 	return --iph->ttl;
338 }
339 
340 static inline int ip_mtu_locked(const struct dst_entry *dst)
341 {
342 	const struct rtable *rt = (const struct rtable *)dst;
343 
344 	return rt->rt_mtu_locked || dst_metric_locked(dst, RTAX_MTU);
345 }
346 
347 static inline
348 int ip_dont_fragment(const struct sock *sk, const struct dst_entry *dst)
349 {
350 	u8 pmtudisc = READ_ONCE(inet_sk(sk)->pmtudisc);
351 
352 	return  pmtudisc == IP_PMTUDISC_DO ||
353 		(pmtudisc == IP_PMTUDISC_WANT &&
354 		 !ip_mtu_locked(dst));
355 }
356 
357 static inline bool ip_sk_accept_pmtu(const struct sock *sk)
358 {
359 	return inet_sk(sk)->pmtudisc != IP_PMTUDISC_INTERFACE &&
360 	       inet_sk(sk)->pmtudisc != IP_PMTUDISC_OMIT;
361 }
362 
363 static inline bool ip_sk_use_pmtu(const struct sock *sk)
364 {
365 	return inet_sk(sk)->pmtudisc < IP_PMTUDISC_PROBE;
366 }
367 
368 static inline bool ip_sk_ignore_df(const struct sock *sk)
369 {
370 	return inet_sk(sk)->pmtudisc < IP_PMTUDISC_DO ||
371 	       inet_sk(sk)->pmtudisc == IP_PMTUDISC_OMIT;
372 }
373 
374 static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
375 						    bool forwarding)
376 {
377 	struct net *net = dev_net(dst->dev);
378 
379 	if (net->ipv4.sysctl_ip_fwd_use_pmtu ||
380 	    ip_mtu_locked(dst) ||
381 	    !forwarding)
382 		return dst_mtu(dst);
383 
384 	return min(READ_ONCE(dst->dev->mtu), IP_MAX_MTU);
385 }
386 
387 static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
388 					  const struct sk_buff *skb)
389 {
390 	if (!sk || !sk_fullsock(sk) || ip_sk_use_pmtu(sk)) {
391 		bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED;
392 
393 		return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding);
394 	}
395 
396 	return min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU);
397 }
398 
399 u32 ip_idents_reserve(u32 hash, int segs);
400 void __ip_select_ident(struct net *net, struct iphdr *iph, int segs);
401 
402 static inline void ip_select_ident_segs(struct net *net, struct sk_buff *skb,
403 					struct sock *sk, int segs)
404 {
405 	struct iphdr *iph = ip_hdr(skb);
406 
407 	if ((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) {
408 		/* This is only to work around buggy Windows95/2000
409 		 * VJ compression implementations.  If the ID field
410 		 * does not change, they drop every other packet in
411 		 * a TCP stream using header compression.
412 		 */
413 		if (sk && inet_sk(sk)->inet_daddr) {
414 			iph->id = htons(inet_sk(sk)->inet_id);
415 			inet_sk(sk)->inet_id += segs;
416 		} else {
417 			iph->id = 0;
418 		}
419 	} else {
420 		__ip_select_ident(net, iph, segs);
421 	}
422 }
423 
424 static inline void ip_select_ident(struct net *net, struct sk_buff *skb,
425 				   struct sock *sk)
426 {
427 	ip_select_ident_segs(net, skb, sk, 1);
428 }
429 
430 static inline __wsum inet_compute_pseudo(struct sk_buff *skb, int proto)
431 {
432 	return csum_tcpudp_nofold(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
433 				  skb->len, proto, 0);
434 }
435 
436 /* copy IPv4 saddr & daddr to flow_keys, possibly using 64bit load/store
437  * Equivalent to :	flow->v4addrs.src = iph->saddr;
438  *			flow->v4addrs.dst = iph->daddr;
439  */
440 static inline void iph_to_flow_copy_v4addrs(struct flow_keys *flow,
441 					    const struct iphdr *iph)
442 {
443 	BUILD_BUG_ON(offsetof(typeof(flow->addrs), v4addrs.dst) !=
444 		     offsetof(typeof(flow->addrs), v4addrs.src) +
445 			      sizeof(flow->addrs.v4addrs.src));
446 	memcpy(&flow->addrs.v4addrs, &iph->saddr, sizeof(flow->addrs.v4addrs));
447 	flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
448 }
449 
450 static inline __wsum inet_gro_compute_pseudo(struct sk_buff *skb, int proto)
451 {
452 	const struct iphdr *iph = skb_gro_network_header(skb);
453 
454 	return csum_tcpudp_nofold(iph->saddr, iph->daddr,
455 				  skb_gro_len(skb), proto, 0);
456 }
457 
458 /*
459  *	Map a multicast IP onto multicast MAC for type ethernet.
460  */
461 
462 static inline void ip_eth_mc_map(__be32 naddr, char *buf)
463 {
464 	__u32 addr=ntohl(naddr);
465 	buf[0]=0x01;
466 	buf[1]=0x00;
467 	buf[2]=0x5e;
468 	buf[5]=addr&0xFF;
469 	addr>>=8;
470 	buf[4]=addr&0xFF;
471 	addr>>=8;
472 	buf[3]=addr&0x7F;
473 }
474 
475 /*
476  *	Map a multicast IP onto multicast MAC for type IP-over-InfiniBand.
477  *	Leave P_Key as 0 to be filled in by driver.
478  */
479 
480 static inline void ip_ib_mc_map(__be32 naddr, const unsigned char *broadcast, char *buf)
481 {
482 	__u32 addr;
483 	unsigned char scope = broadcast[5] & 0xF;
484 
485 	buf[0]  = 0;		/* Reserved */
486 	buf[1]  = 0xff;		/* Multicast QPN */
487 	buf[2]  = 0xff;
488 	buf[3]  = 0xff;
489 	addr    = ntohl(naddr);
490 	buf[4]  = 0xff;
491 	buf[5]  = 0x10 | scope;	/* scope from broadcast address */
492 	buf[6]  = 0x40;		/* IPv4 signature */
493 	buf[7]  = 0x1b;
494 	buf[8]  = broadcast[8];		/* P_Key */
495 	buf[9]  = broadcast[9];
496 	buf[10] = 0;
497 	buf[11] = 0;
498 	buf[12] = 0;
499 	buf[13] = 0;
500 	buf[14] = 0;
501 	buf[15] = 0;
502 	buf[19] = addr & 0xff;
503 	addr  >>= 8;
504 	buf[18] = addr & 0xff;
505 	addr  >>= 8;
506 	buf[17] = addr & 0xff;
507 	addr  >>= 8;
508 	buf[16] = addr & 0x0f;
509 }
510 
511 static inline void ip_ipgre_mc_map(__be32 naddr, const unsigned char *broadcast, char *buf)
512 {
513 	if ((broadcast[0] | broadcast[1] | broadcast[2] | broadcast[3]) != 0)
514 		memcpy(buf, broadcast, 4);
515 	else
516 		memcpy(buf, &naddr, sizeof(naddr));
517 }
518 
519 #if IS_ENABLED(CONFIG_IPV6)
520 #include <linux/ipv6.h>
521 #endif
522 
523 static __inline__ void inet_reset_saddr(struct sock *sk)
524 {
525 	inet_sk(sk)->inet_rcv_saddr = inet_sk(sk)->inet_saddr = 0;
526 #if IS_ENABLED(CONFIG_IPV6)
527 	if (sk->sk_family == PF_INET6) {
528 		struct ipv6_pinfo *np = inet6_sk(sk);
529 
530 		memset(&np->saddr, 0, sizeof(np->saddr));
531 		memset(&sk->sk_v6_rcv_saddr, 0, sizeof(sk->sk_v6_rcv_saddr));
532 	}
533 #endif
534 }
535 
536 #endif
537 
538 static inline unsigned int ipv4_addr_hash(__be32 ip)
539 {
540 	return (__force unsigned int) ip;
541 }
542 
543 static inline u32 ipv4_portaddr_hash(const struct net *net,
544 				     __be32 saddr,
545 				     unsigned int port)
546 {
547 	return jhash_1word((__force u32)saddr, net_hash_mix(net)) ^ port;
548 }
549 
550 bool ip_call_ra_chain(struct sk_buff *skb);
551 
552 /*
553  *	Functions provided by ip_fragment.c
554  */
555 
556 enum ip_defrag_users {
557 	IP_DEFRAG_LOCAL_DELIVER,
558 	IP_DEFRAG_CALL_RA_CHAIN,
559 	IP_DEFRAG_CONNTRACK_IN,
560 	__IP_DEFRAG_CONNTRACK_IN_END	= IP_DEFRAG_CONNTRACK_IN + USHRT_MAX,
561 	IP_DEFRAG_CONNTRACK_OUT,
562 	__IP_DEFRAG_CONNTRACK_OUT_END	= IP_DEFRAG_CONNTRACK_OUT + USHRT_MAX,
563 	IP_DEFRAG_CONNTRACK_BRIDGE_IN,
564 	__IP_DEFRAG_CONNTRACK_BRIDGE_IN = IP_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX,
565 	IP_DEFRAG_VS_IN,
566 	IP_DEFRAG_VS_OUT,
567 	IP_DEFRAG_VS_FWD,
568 	IP_DEFRAG_AF_PACKET,
569 	IP_DEFRAG_MACVLAN,
570 };
571 
572 /* Return true if the value of 'user' is between 'lower_bond'
573  * and 'upper_bond' inclusively.
574  */
575 static inline bool ip_defrag_user_in_between(u32 user,
576 					     enum ip_defrag_users lower_bond,
577 					     enum ip_defrag_users upper_bond)
578 {
579 	return user >= lower_bond && user <= upper_bond;
580 }
581 
582 int ip_defrag(struct net *net, struct sk_buff *skb, u32 user);
583 #ifdef CONFIG_INET
584 struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user);
585 #else
586 static inline struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user)
587 {
588 	return skb;
589 }
590 #endif
591 
592 /*
593  *	Functions provided by ip_forward.c
594  */
595 
596 int ip_forward(struct sk_buff *skb);
597 
598 /*
599  *	Functions provided by ip_options.c
600  */
601 
602 void ip_options_build(struct sk_buff *skb, struct ip_options *opt,
603 		      __be32 daddr, struct rtable *rt, int is_frag);
604 
605 int __ip_options_echo(struct net *net, struct ip_options *dopt,
606 		      struct sk_buff *skb, const struct ip_options *sopt);
607 static inline int ip_options_echo(struct net *net, struct ip_options *dopt,
608 				  struct sk_buff *skb)
609 {
610 	return __ip_options_echo(net, dopt, skb, &IPCB(skb)->opt);
611 }
612 
613 void ip_options_fragment(struct sk_buff *skb);
614 int ip_options_compile(struct net *net, struct ip_options *opt,
615 		       struct sk_buff *skb);
616 int ip_options_get(struct net *net, struct ip_options_rcu **optp,
617 		   unsigned char *data, int optlen);
618 int ip_options_get_from_user(struct net *net, struct ip_options_rcu **optp,
619 			     unsigned char __user *data, int optlen);
620 void ip_options_undo(struct ip_options *opt);
621 void ip_forward_options(struct sk_buff *skb);
622 int ip_options_rcv_srr(struct sk_buff *skb);
623 
624 /*
625  *	Functions provided by ip_sockglue.c
626  */
627 
628 void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb);
629 void ip_cmsg_recv_offset(struct msghdr *msg, struct sock *sk,
630 			 struct sk_buff *skb, int tlen, int offset);
631 int ip_cmsg_send(struct sock *sk, struct msghdr *msg,
632 		 struct ipcm_cookie *ipc, bool allow_ipv6);
633 int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
634 		  unsigned int optlen);
635 int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
636 		  int __user *optlen);
637 int compat_ip_setsockopt(struct sock *sk, int level, int optname,
638 			 char __user *optval, unsigned int optlen);
639 int compat_ip_getsockopt(struct sock *sk, int level, int optname,
640 			 char __user *optval, int __user *optlen);
641 int ip_ra_control(struct sock *sk, unsigned char on,
642 		  void (*destructor)(struct sock *));
643 
644 int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len);
645 void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
646 		   u32 info, u8 *payload);
647 void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport,
648 		    u32 info);
649 
650 static inline void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb)
651 {
652 	ip_cmsg_recv_offset(msg, skb->sk, skb, 0, 0);
653 }
654 
655 bool icmp_global_allow(void);
656 extern int sysctl_icmp_msgs_per_sec;
657 extern int sysctl_icmp_msgs_burst;
658 
659 #ifdef CONFIG_PROC_FS
660 int ip_misc_proc_init(void);
661 #endif
662 
663 #endif	/* _IP_H */
664