1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * Definitions for the IP module. 8 * 9 * Version: @(#)ip.h 1.0.2 05/07/93 10 * 11 * Authors: Ross Biro 12 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 13 * Alan Cox, <gw4pts@gw4pts.ampr.org> 14 * 15 * Changes: 16 * Mike McLagan : Routing by source 17 */ 18 #ifndef _IP_H 19 #define _IP_H 20 21 #include <linux/types.h> 22 #include <linux/ip.h> 23 #include <linux/in.h> 24 #include <linux/skbuff.h> 25 #include <linux/jhash.h> 26 27 #include <net/inet_sock.h> 28 #include <net/route.h> 29 #include <net/snmp.h> 30 #include <net/flow.h> 31 #include <net/flow_dissector.h> 32 #include <net/netns/hash.h> 33 34 #define IPV4_MAX_PMTU 65535U /* RFC 2675, Section 5.1 */ 35 #define IPV4_MIN_MTU 68 /* RFC 791 */ 36 37 extern unsigned int sysctl_fib_sync_mem; 38 extern unsigned int sysctl_fib_sync_mem_min; 39 extern unsigned int sysctl_fib_sync_mem_max; 40 41 struct sock; 42 43 struct inet_skb_parm { 44 int iif; 45 struct ip_options opt; /* Compiled IP options */ 46 u16 flags; 47 48 #define IPSKB_FORWARDED BIT(0) 49 #define IPSKB_XFRM_TUNNEL_SIZE BIT(1) 50 #define IPSKB_XFRM_TRANSFORMED BIT(2) 51 #define IPSKB_FRAG_COMPLETE BIT(3) 52 #define IPSKB_REROUTED BIT(4) 53 #define IPSKB_DOREDIRECT BIT(5) 54 #define IPSKB_FRAG_PMTU BIT(6) 55 #define IPSKB_L3SLAVE BIT(7) 56 57 u16 frag_max_size; 58 }; 59 60 static inline bool ipv4_l3mdev_skb(u16 flags) 61 { 62 return !!(flags & IPSKB_L3SLAVE); 63 } 64 65 static inline unsigned int ip_hdrlen(const struct sk_buff *skb) 66 { 67 return ip_hdr(skb)->ihl * 4; 68 } 69 70 struct ipcm_cookie { 71 struct sockcm_cookie sockc; 72 __be32 addr; 73 int oif; 74 struct ip_options_rcu *opt; 75 __u8 ttl; 76 __s16 tos; 77 char priority; 78 __u16 gso_size; 79 }; 80 81 static inline void ipcm_init(struct ipcm_cookie *ipcm) 82 { 83 *ipcm = (struct ipcm_cookie) { .tos = -1 }; 84 } 85 86 static inline void ipcm_init_sk(struct ipcm_cookie *ipcm, 87 const struct inet_sock *inet) 88 { 89 ipcm_init(ipcm); 90 91 ipcm->sockc.tsflags = inet->sk.sk_tsflags; 92 ipcm->oif = inet->sk.sk_bound_dev_if; 93 ipcm->addr = inet->inet_saddr; 94 } 95 96 #define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb)) 97 #define PKTINFO_SKB_CB(skb) ((struct in_pktinfo *)((skb)->cb)) 98 99 /* return enslaved device index if relevant */ 100 static inline int inet_sdif(struct sk_buff *skb) 101 { 102 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) 103 if (skb && ipv4_l3mdev_skb(IPCB(skb)->flags)) 104 return IPCB(skb)->iif; 105 #endif 106 return 0; 107 } 108 109 /* Special input handler for packets caught by router alert option. 110 They are selected only by protocol field, and then processed likely 111 local ones; but only if someone wants them! Otherwise, router 112 not running rsvpd will kill RSVP. 113 114 It is user level problem, what it will make with them. 115 I have no idea, how it will masquearde or NAT them (it is joke, joke :-)), 116 but receiver should be enough clever f.e. to forward mtrace requests, 117 sent to multicast group to reach destination designated router. 118 */ 119 120 struct ip_ra_chain { 121 struct ip_ra_chain __rcu *next; 122 struct sock *sk; 123 union { 124 void (*destructor)(struct sock *); 125 struct sock *saved_sk; 126 }; 127 struct rcu_head rcu; 128 }; 129 130 /* IP flags. */ 131 #define IP_CE 0x8000 /* Flag: "Congestion" */ 132 #define IP_DF 0x4000 /* Flag: "Don't Fragment" */ 133 #define IP_MF 0x2000 /* Flag: "More Fragments" */ 134 #define IP_OFFSET 0x1FFF /* "Fragment Offset" part */ 135 136 #define IP_FRAG_TIME (30 * HZ) /* fragment lifetime */ 137 138 struct msghdr; 139 struct net_device; 140 struct packet_type; 141 struct rtable; 142 struct sockaddr; 143 144 int igmp_mc_init(void); 145 146 /* 147 * Functions provided by ip.c 148 */ 149 150 int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk, 151 __be32 saddr, __be32 daddr, 152 struct ip_options_rcu *opt); 153 int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, 154 struct net_device *orig_dev); 155 void ip_list_rcv(struct list_head *head, struct packet_type *pt, 156 struct net_device *orig_dev); 157 int ip_local_deliver(struct sk_buff *skb); 158 void ip_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int proto); 159 int ip_mr_input(struct sk_buff *skb); 160 int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb); 161 int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb); 162 int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, 163 int (*output)(struct net *, struct sock *, struct sk_buff *)); 164 165 struct ip_fraglist_iter { 166 struct sk_buff *frag; 167 struct iphdr *iph; 168 int offset; 169 unsigned int hlen; 170 }; 171 172 void ip_fraglist_init(struct sk_buff *skb, struct iphdr *iph, 173 unsigned int hlen, struct ip_fraglist_iter *iter); 174 void ip_fraglist_prepare(struct sk_buff *skb, struct ip_fraglist_iter *iter); 175 176 static inline struct sk_buff *ip_fraglist_next(struct ip_fraglist_iter *iter) 177 { 178 struct sk_buff *skb = iter->frag; 179 180 iter->frag = skb->next; 181 skb_mark_not_on_list(skb); 182 183 return skb; 184 } 185 186 struct ip_frag_state { 187 struct iphdr *iph; 188 unsigned int hlen; 189 unsigned int ll_rs; 190 unsigned int mtu; 191 unsigned int left; 192 int offset; 193 int ptr; 194 __be16 not_last_frag; 195 }; 196 197 void ip_frag_init(struct sk_buff *skb, unsigned int hlen, unsigned int ll_rs, 198 unsigned int mtu, struct ip_frag_state *state); 199 struct sk_buff *ip_frag_next(struct sk_buff *skb, 200 struct ip_frag_state *state); 201 202 void ip_send_check(struct iphdr *ip); 203 int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb); 204 int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb); 205 206 int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl, 207 __u8 tos); 208 void ip_init(void); 209 int ip_append_data(struct sock *sk, struct flowi4 *fl4, 210 int getfrag(void *from, char *to, int offset, int len, 211 int odd, struct sk_buff *skb), 212 void *from, int len, int protolen, 213 struct ipcm_cookie *ipc, 214 struct rtable **rt, 215 unsigned int flags); 216 int ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, 217 struct sk_buff *skb); 218 ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page, 219 int offset, size_t size, int flags); 220 struct sk_buff *__ip_make_skb(struct sock *sk, struct flowi4 *fl4, 221 struct sk_buff_head *queue, 222 struct inet_cork *cork); 223 int ip_send_skb(struct net *net, struct sk_buff *skb); 224 int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4); 225 void ip_flush_pending_frames(struct sock *sk); 226 struct sk_buff *ip_make_skb(struct sock *sk, struct flowi4 *fl4, 227 int getfrag(void *from, char *to, int offset, 228 int len, int odd, struct sk_buff *skb), 229 void *from, int length, int transhdrlen, 230 struct ipcm_cookie *ipc, struct rtable **rtp, 231 struct inet_cork *cork, unsigned int flags); 232 233 static inline int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, 234 struct flowi *fl) 235 { 236 return __ip_queue_xmit(sk, skb, fl, inet_sk(sk)->tos); 237 } 238 239 static inline struct sk_buff *ip_finish_skb(struct sock *sk, struct flowi4 *fl4) 240 { 241 return __ip_make_skb(sk, fl4, &sk->sk_write_queue, &inet_sk(sk)->cork.base); 242 } 243 244 static inline __u8 get_rttos(struct ipcm_cookie* ipc, struct inet_sock *inet) 245 { 246 return (ipc->tos != -1) ? RT_TOS(ipc->tos) : RT_TOS(inet->tos); 247 } 248 249 static inline __u8 get_rtconn_flags(struct ipcm_cookie* ipc, struct sock* sk) 250 { 251 return (ipc->tos != -1) ? RT_CONN_FLAGS_TOS(sk, ipc->tos) : RT_CONN_FLAGS(sk); 252 } 253 254 /* datagram.c */ 255 int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); 256 int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); 257 258 void ip4_datagram_release_cb(struct sock *sk); 259 260 struct ip_reply_arg { 261 struct kvec iov[1]; 262 int flags; 263 __wsum csum; 264 int csumoffset; /* u16 offset of csum in iov[0].iov_base */ 265 /* -1 if not needed */ 266 int bound_dev_if; 267 u8 tos; 268 kuid_t uid; 269 }; 270 271 #define IP_REPLY_ARG_NOSRCCHECK 1 272 273 static inline __u8 ip_reply_arg_flowi_flags(const struct ip_reply_arg *arg) 274 { 275 return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0; 276 } 277 278 void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, 279 const struct ip_options *sopt, 280 __be32 daddr, __be32 saddr, 281 const struct ip_reply_arg *arg, 282 unsigned int len, u64 transmit_time); 283 284 #define IP_INC_STATS(net, field) SNMP_INC_STATS64((net)->mib.ip_statistics, field) 285 #define __IP_INC_STATS(net, field) __SNMP_INC_STATS64((net)->mib.ip_statistics, field) 286 #define IP_ADD_STATS(net, field, val) SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val) 287 #define __IP_ADD_STATS(net, field, val) __SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val) 288 #define IP_UPD_PO_STATS(net, field, val) SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val) 289 #define __IP_UPD_PO_STATS(net, field, val) __SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val) 290 #define NET_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.net_statistics, field) 291 #define __NET_INC_STATS(net, field) __SNMP_INC_STATS((net)->mib.net_statistics, field) 292 #define NET_ADD_STATS(net, field, adnd) SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd) 293 #define __NET_ADD_STATS(net, field, adnd) __SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd) 294 295 u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offct); 296 unsigned long snmp_fold_field(void __percpu *mib, int offt); 297 #if BITS_PER_LONG==32 298 u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct, 299 size_t syncp_offset); 300 u64 snmp_fold_field64(void __percpu *mib, int offt, size_t sync_off); 301 #else 302 static inline u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct, 303 size_t syncp_offset) 304 { 305 return snmp_get_cpu_field(mib, cpu, offct); 306 307 } 308 309 static inline u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_off) 310 { 311 return snmp_fold_field(mib, offt); 312 } 313 #endif 314 315 #define snmp_get_cpu_field64_batch(buff64, stats_list, mib_statistic, offset) \ 316 { \ 317 int i, c; \ 318 for_each_possible_cpu(c) { \ 319 for (i = 0; stats_list[i].name; i++) \ 320 buff64[i] += snmp_get_cpu_field64( \ 321 mib_statistic, \ 322 c, stats_list[i].entry, \ 323 offset); \ 324 } \ 325 } 326 327 #define snmp_get_cpu_field_batch(buff, stats_list, mib_statistic) \ 328 { \ 329 int i, c; \ 330 for_each_possible_cpu(c) { \ 331 for (i = 0; stats_list[i].name; i++) \ 332 buff[i] += snmp_get_cpu_field( \ 333 mib_statistic, \ 334 c, stats_list[i].entry); \ 335 } \ 336 } 337 338 void inet_get_local_port_range(struct net *net, int *low, int *high); 339 340 #ifdef CONFIG_SYSCTL 341 static inline int inet_is_local_reserved_port(struct net *net, int port) 342 { 343 if (!net->ipv4.sysctl_local_reserved_ports) 344 return 0; 345 return test_bit(port, net->ipv4.sysctl_local_reserved_ports); 346 } 347 348 static inline bool sysctl_dev_name_is_allowed(const char *name) 349 { 350 return strcmp(name, "default") != 0 && strcmp(name, "all") != 0; 351 } 352 353 static inline int inet_prot_sock(struct net *net) 354 { 355 return net->ipv4.sysctl_ip_prot_sock; 356 } 357 358 #else 359 static inline int inet_is_local_reserved_port(struct net *net, int port) 360 { 361 return 0; 362 } 363 364 static inline int inet_prot_sock(struct net *net) 365 { 366 return PROT_SOCK; 367 } 368 #endif 369 370 __be32 inet_current_timestamp(void); 371 372 /* From inetpeer.c */ 373 extern int inet_peer_threshold; 374 extern int inet_peer_minttl; 375 extern int inet_peer_maxttl; 376 377 void ipfrag_init(void); 378 379 void ip_static_sysctl_init(void); 380 381 #define IP4_REPLY_MARK(net, mark) \ 382 ((net)->ipv4.sysctl_fwmark_reflect ? (mark) : 0) 383 384 static inline bool ip_is_fragment(const struct iphdr *iph) 385 { 386 return (iph->frag_off & htons(IP_MF | IP_OFFSET)) != 0; 387 } 388 389 #ifdef CONFIG_INET 390 #include <net/dst.h> 391 392 /* The function in 2.2 was invalid, producing wrong result for 393 * check=0xFEFF. It was noticed by Arthur Skawina _year_ ago. --ANK(000625) */ 394 static inline 395 int ip_decrease_ttl(struct iphdr *iph) 396 { 397 u32 check = (__force u32)iph->check; 398 check += (__force u32)htons(0x0100); 399 iph->check = (__force __sum16)(check + (check>=0xFFFF)); 400 return --iph->ttl; 401 } 402 403 static inline int ip_mtu_locked(const struct dst_entry *dst) 404 { 405 const struct rtable *rt = (const struct rtable *)dst; 406 407 return rt->rt_mtu_locked || dst_metric_locked(dst, RTAX_MTU); 408 } 409 410 static inline 411 int ip_dont_fragment(const struct sock *sk, const struct dst_entry *dst) 412 { 413 u8 pmtudisc = READ_ONCE(inet_sk(sk)->pmtudisc); 414 415 return pmtudisc == IP_PMTUDISC_DO || 416 (pmtudisc == IP_PMTUDISC_WANT && 417 !ip_mtu_locked(dst)); 418 } 419 420 static inline bool ip_sk_accept_pmtu(const struct sock *sk) 421 { 422 return inet_sk(sk)->pmtudisc != IP_PMTUDISC_INTERFACE && 423 inet_sk(sk)->pmtudisc != IP_PMTUDISC_OMIT; 424 } 425 426 static inline bool ip_sk_use_pmtu(const struct sock *sk) 427 { 428 return inet_sk(sk)->pmtudisc < IP_PMTUDISC_PROBE; 429 } 430 431 static inline bool ip_sk_ignore_df(const struct sock *sk) 432 { 433 return inet_sk(sk)->pmtudisc < IP_PMTUDISC_DO || 434 inet_sk(sk)->pmtudisc == IP_PMTUDISC_OMIT; 435 } 436 437 static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst, 438 bool forwarding) 439 { 440 struct net *net = dev_net(dst->dev); 441 442 if (net->ipv4.sysctl_ip_fwd_use_pmtu || 443 ip_mtu_locked(dst) || 444 !forwarding) 445 return dst_mtu(dst); 446 447 return min(READ_ONCE(dst->dev->mtu), IP_MAX_MTU); 448 } 449 450 static inline unsigned int ip_skb_dst_mtu(struct sock *sk, 451 const struct sk_buff *skb) 452 { 453 if (!sk || !sk_fullsock(sk) || ip_sk_use_pmtu(sk)) { 454 bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED; 455 456 return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding); 457 } 458 459 return min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU); 460 } 461 462 struct dst_metrics *ip_fib_metrics_init(struct net *net, struct nlattr *fc_mx, 463 int fc_mx_len, 464 struct netlink_ext_ack *extack); 465 static inline void ip_fib_metrics_put(struct dst_metrics *fib_metrics) 466 { 467 if (fib_metrics != &dst_default_metrics && 468 refcount_dec_and_test(&fib_metrics->refcnt)) 469 kfree(fib_metrics); 470 } 471 472 /* ipv4 and ipv6 both use refcounted metrics if it is not the default */ 473 static inline 474 void ip_dst_init_metrics(struct dst_entry *dst, struct dst_metrics *fib_metrics) 475 { 476 dst_init_metrics(dst, fib_metrics->metrics, true); 477 478 if (fib_metrics != &dst_default_metrics) { 479 dst->_metrics |= DST_METRICS_REFCOUNTED; 480 refcount_inc(&fib_metrics->refcnt); 481 } 482 } 483 484 static inline 485 void ip_dst_metrics_put(struct dst_entry *dst) 486 { 487 struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst); 488 489 if (p != &dst_default_metrics && refcount_dec_and_test(&p->refcnt)) 490 kfree(p); 491 } 492 493 u32 ip_idents_reserve(u32 hash, int segs); 494 void __ip_select_ident(struct net *net, struct iphdr *iph, int segs); 495 496 static inline void ip_select_ident_segs(struct net *net, struct sk_buff *skb, 497 struct sock *sk, int segs) 498 { 499 struct iphdr *iph = ip_hdr(skb); 500 501 if ((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) { 502 /* This is only to work around buggy Windows95/2000 503 * VJ compression implementations. If the ID field 504 * does not change, they drop every other packet in 505 * a TCP stream using header compression. 506 */ 507 if (sk && inet_sk(sk)->inet_daddr) { 508 iph->id = htons(inet_sk(sk)->inet_id); 509 inet_sk(sk)->inet_id += segs; 510 } else { 511 iph->id = 0; 512 } 513 } else { 514 __ip_select_ident(net, iph, segs); 515 } 516 } 517 518 static inline void ip_select_ident(struct net *net, struct sk_buff *skb, 519 struct sock *sk) 520 { 521 ip_select_ident_segs(net, skb, sk, 1); 522 } 523 524 static inline __wsum inet_compute_pseudo(struct sk_buff *skb, int proto) 525 { 526 return csum_tcpudp_nofold(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, 527 skb->len, proto, 0); 528 } 529 530 /* copy IPv4 saddr & daddr to flow_keys, possibly using 64bit load/store 531 * Equivalent to : flow->v4addrs.src = iph->saddr; 532 * flow->v4addrs.dst = iph->daddr; 533 */ 534 static inline void iph_to_flow_copy_v4addrs(struct flow_keys *flow, 535 const struct iphdr *iph) 536 { 537 BUILD_BUG_ON(offsetof(typeof(flow->addrs), v4addrs.dst) != 538 offsetof(typeof(flow->addrs), v4addrs.src) + 539 sizeof(flow->addrs.v4addrs.src)); 540 memcpy(&flow->addrs.v4addrs, &iph->saddr, sizeof(flow->addrs.v4addrs)); 541 flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; 542 } 543 544 static inline __wsum inet_gro_compute_pseudo(struct sk_buff *skb, int proto) 545 { 546 const struct iphdr *iph = skb_gro_network_header(skb); 547 548 return csum_tcpudp_nofold(iph->saddr, iph->daddr, 549 skb_gro_len(skb), proto, 0); 550 } 551 552 /* 553 * Map a multicast IP onto multicast MAC for type ethernet. 554 */ 555 556 static inline void ip_eth_mc_map(__be32 naddr, char *buf) 557 { 558 __u32 addr=ntohl(naddr); 559 buf[0]=0x01; 560 buf[1]=0x00; 561 buf[2]=0x5e; 562 buf[5]=addr&0xFF; 563 addr>>=8; 564 buf[4]=addr&0xFF; 565 addr>>=8; 566 buf[3]=addr&0x7F; 567 } 568 569 /* 570 * Map a multicast IP onto multicast MAC for type IP-over-InfiniBand. 571 * Leave P_Key as 0 to be filled in by driver. 572 */ 573 574 static inline void ip_ib_mc_map(__be32 naddr, const unsigned char *broadcast, char *buf) 575 { 576 __u32 addr; 577 unsigned char scope = broadcast[5] & 0xF; 578 579 buf[0] = 0; /* Reserved */ 580 buf[1] = 0xff; /* Multicast QPN */ 581 buf[2] = 0xff; 582 buf[3] = 0xff; 583 addr = ntohl(naddr); 584 buf[4] = 0xff; 585 buf[5] = 0x10 | scope; /* scope from broadcast address */ 586 buf[6] = 0x40; /* IPv4 signature */ 587 buf[7] = 0x1b; 588 buf[8] = broadcast[8]; /* P_Key */ 589 buf[9] = broadcast[9]; 590 buf[10] = 0; 591 buf[11] = 0; 592 buf[12] = 0; 593 buf[13] = 0; 594 buf[14] = 0; 595 buf[15] = 0; 596 buf[19] = addr & 0xff; 597 addr >>= 8; 598 buf[18] = addr & 0xff; 599 addr >>= 8; 600 buf[17] = addr & 0xff; 601 addr >>= 8; 602 buf[16] = addr & 0x0f; 603 } 604 605 static inline void ip_ipgre_mc_map(__be32 naddr, const unsigned char *broadcast, char *buf) 606 { 607 if ((broadcast[0] | broadcast[1] | broadcast[2] | broadcast[3]) != 0) 608 memcpy(buf, broadcast, 4); 609 else 610 memcpy(buf, &naddr, sizeof(naddr)); 611 } 612 613 #if IS_ENABLED(CONFIG_IPV6) 614 #include <linux/ipv6.h> 615 #endif 616 617 static __inline__ void inet_reset_saddr(struct sock *sk) 618 { 619 inet_sk(sk)->inet_rcv_saddr = inet_sk(sk)->inet_saddr = 0; 620 #if IS_ENABLED(CONFIG_IPV6) 621 if (sk->sk_family == PF_INET6) { 622 struct ipv6_pinfo *np = inet6_sk(sk); 623 624 memset(&np->saddr, 0, sizeof(np->saddr)); 625 memset(&sk->sk_v6_rcv_saddr, 0, sizeof(sk->sk_v6_rcv_saddr)); 626 } 627 #endif 628 } 629 630 #endif 631 632 static inline unsigned int ipv4_addr_hash(__be32 ip) 633 { 634 return (__force unsigned int) ip; 635 } 636 637 static inline u32 ipv4_portaddr_hash(const struct net *net, 638 __be32 saddr, 639 unsigned int port) 640 { 641 return jhash_1word((__force u32)saddr, net_hash_mix(net)) ^ port; 642 } 643 644 bool ip_call_ra_chain(struct sk_buff *skb); 645 646 /* 647 * Functions provided by ip_fragment.c 648 */ 649 650 enum ip_defrag_users { 651 IP_DEFRAG_LOCAL_DELIVER, 652 IP_DEFRAG_CALL_RA_CHAIN, 653 IP_DEFRAG_CONNTRACK_IN, 654 __IP_DEFRAG_CONNTRACK_IN_END = IP_DEFRAG_CONNTRACK_IN + USHRT_MAX, 655 IP_DEFRAG_CONNTRACK_OUT, 656 __IP_DEFRAG_CONNTRACK_OUT_END = IP_DEFRAG_CONNTRACK_OUT + USHRT_MAX, 657 IP_DEFRAG_CONNTRACK_BRIDGE_IN, 658 __IP_DEFRAG_CONNTRACK_BRIDGE_IN = IP_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX, 659 IP_DEFRAG_VS_IN, 660 IP_DEFRAG_VS_OUT, 661 IP_DEFRAG_VS_FWD, 662 IP_DEFRAG_AF_PACKET, 663 IP_DEFRAG_MACVLAN, 664 }; 665 666 /* Return true if the value of 'user' is between 'lower_bond' 667 * and 'upper_bond' inclusively. 668 */ 669 static inline bool ip_defrag_user_in_between(u32 user, 670 enum ip_defrag_users lower_bond, 671 enum ip_defrag_users upper_bond) 672 { 673 return user >= lower_bond && user <= upper_bond; 674 } 675 676 int ip_defrag(struct net *net, struct sk_buff *skb, u32 user); 677 #ifdef CONFIG_INET 678 struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user); 679 #else 680 static inline struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user) 681 { 682 return skb; 683 } 684 #endif 685 686 /* 687 * Functions provided by ip_forward.c 688 */ 689 690 int ip_forward(struct sk_buff *skb); 691 692 /* 693 * Functions provided by ip_options.c 694 */ 695 696 void ip_options_build(struct sk_buff *skb, struct ip_options *opt, 697 __be32 daddr, struct rtable *rt, int is_frag); 698 699 int __ip_options_echo(struct net *net, struct ip_options *dopt, 700 struct sk_buff *skb, const struct ip_options *sopt); 701 static inline int ip_options_echo(struct net *net, struct ip_options *dopt, 702 struct sk_buff *skb) 703 { 704 return __ip_options_echo(net, dopt, skb, &IPCB(skb)->opt); 705 } 706 707 void ip_options_fragment(struct sk_buff *skb); 708 int __ip_options_compile(struct net *net, struct ip_options *opt, 709 struct sk_buff *skb, __be32 *info); 710 int ip_options_compile(struct net *net, struct ip_options *opt, 711 struct sk_buff *skb); 712 int ip_options_get(struct net *net, struct ip_options_rcu **optp, 713 unsigned char *data, int optlen); 714 int ip_options_get_from_user(struct net *net, struct ip_options_rcu **optp, 715 unsigned char __user *data, int optlen); 716 void ip_options_undo(struct ip_options *opt); 717 void ip_forward_options(struct sk_buff *skb); 718 int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev); 719 720 /* 721 * Functions provided by ip_sockglue.c 722 */ 723 724 void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb); 725 void ip_cmsg_recv_offset(struct msghdr *msg, struct sock *sk, 726 struct sk_buff *skb, int tlen, int offset); 727 int ip_cmsg_send(struct sock *sk, struct msghdr *msg, 728 struct ipcm_cookie *ipc, bool allow_ipv6); 729 int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval, 730 unsigned int optlen); 731 int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval, 732 int __user *optlen); 733 int compat_ip_setsockopt(struct sock *sk, int level, int optname, 734 char __user *optval, unsigned int optlen); 735 int compat_ip_getsockopt(struct sock *sk, int level, int optname, 736 char __user *optval, int __user *optlen); 737 int ip_ra_control(struct sock *sk, unsigned char on, 738 void (*destructor)(struct sock *)); 739 740 int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len); 741 void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port, 742 u32 info, u8 *payload); 743 void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport, 744 u32 info); 745 746 static inline void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb) 747 { 748 ip_cmsg_recv_offset(msg, skb->sk, skb, 0, 0); 749 } 750 751 bool icmp_global_allow(void); 752 extern int sysctl_icmp_msgs_per_sec; 753 extern int sysctl_icmp_msgs_burst; 754 755 #ifdef CONFIG_PROC_FS 756 int ip_misc_proc_init(void); 757 #endif 758 759 int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto, u8 family, 760 struct netlink_ext_ack *extack); 761 762 #endif /* _IP_H */ 763