1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __LINUX_NETFILTER_H 3 #define __LINUX_NETFILTER_H 4 5 #include <linux/init.h> 6 #include <linux/skbuff.h> 7 #include <linux/net.h> 8 #include <linux/if.h> 9 #include <linux/in.h> 10 #include <linux/in6.h> 11 #include <linux/wait.h> 12 #include <linux/list.h> 13 #include <linux/static_key.h> 14 #include <linux/netfilter_defs.h> 15 #include <linux/netdevice.h> 16 #include <net/net_namespace.h> 17 18 #ifdef CONFIG_NETFILTER 19 static inline int NF_DROP_GETERR(int verdict) 20 { 21 return -(verdict >> NF_VERDICT_QBITS); 22 } 23 24 static inline int nf_inet_addr_cmp(const union nf_inet_addr *a1, 25 const union nf_inet_addr *a2) 26 { 27 return a1->all[0] == a2->all[0] && 28 a1->all[1] == a2->all[1] && 29 a1->all[2] == a2->all[2] && 30 a1->all[3] == a2->all[3]; 31 } 32 33 static inline void nf_inet_addr_mask(const union nf_inet_addr *a1, 34 union nf_inet_addr *result, 35 const union nf_inet_addr *mask) 36 { 37 result->all[0] = a1->all[0] & mask->all[0]; 38 result->all[1] = a1->all[1] & mask->all[1]; 39 result->all[2] = a1->all[2] & mask->all[2]; 40 result->all[3] = a1->all[3] & mask->all[3]; 41 } 42 43 int netfilter_init(void); 44 45 struct sk_buff; 46 47 struct nf_hook_ops; 48 49 struct sock; 50 51 struct nf_hook_state { 52 unsigned int hook; 53 u_int8_t pf; 54 struct net_device *in; 55 struct net_device *out; 56 struct sock *sk; 57 struct net *net; 58 int (*okfn)(struct net *, struct sock *, struct sk_buff *); 59 }; 60 61 typedef unsigned int nf_hookfn(void *priv, 62 struct sk_buff *skb, 63 const struct nf_hook_state *state); 64 struct nf_hook_ops { 65 /* User fills in from here down. */ 66 nf_hookfn *hook; 67 struct net_device *dev; 68 void *priv; 69 u_int8_t pf; 70 bool nat_hook; 71 unsigned int hooknum; 72 /* Hooks are ordered in ascending priority. */ 73 int priority; 74 }; 75 76 struct nf_hook_entry { 77 nf_hookfn *hook; 78 void *priv; 79 }; 80 81 struct nf_hook_entries_rcu_head { 82 struct rcu_head head; 83 void *allocation; 84 }; 85 86 struct nf_hook_entries { 87 u16 num_hook_entries; 88 /* padding */ 89 struct nf_hook_entry hooks[]; 90 91 /* trailer: pointers to original orig_ops of each hook, 92 * followed by rcu_head and scratch space used for freeing 93 * the structure via call_rcu. 94 * 95 * This is not part of struct nf_hook_entry since its only 96 * needed in slow path (hook register/unregister): 97 * const struct nf_hook_ops *orig_ops[] 98 * 99 * For the same reason, we store this at end -- its 100 * only needed when a hook is deleted, not during 101 * packet path processing: 102 * struct nf_hook_entries_rcu_head head 103 */ 104 }; 105 106 static inline struct nf_hook_ops **nf_hook_entries_get_hook_ops(const struct nf_hook_entries *e) 107 { 108 unsigned int n = e->num_hook_entries; 109 const void *hook_end; 110 111 hook_end = &e->hooks[n]; /* this is *past* ->hooks[]! */ 112 113 return (struct nf_hook_ops **)hook_end; 114 } 115 116 static inline int 117 nf_hook_entry_hookfn(const struct nf_hook_entry *entry, struct sk_buff *skb, 118 struct nf_hook_state *state) 119 { 120 return entry->hook(entry->priv, skb, state); 121 } 122 123 static inline void nf_hook_state_init(struct nf_hook_state *p, 124 unsigned int hook, 125 u_int8_t pf, 126 struct net_device *indev, 127 struct net_device *outdev, 128 struct sock *sk, 129 struct net *net, 130 int (*okfn)(struct net *, struct sock *, struct sk_buff *)) 131 { 132 p->hook = hook; 133 p->pf = pf; 134 p->in = indev; 135 p->out = outdev; 136 p->sk = sk; 137 p->net = net; 138 p->okfn = okfn; 139 } 140 141 142 143 struct nf_sockopt_ops { 144 struct list_head list; 145 146 u_int8_t pf; 147 148 /* Non-inclusive ranges: use 0/0/NULL to never get called. */ 149 int set_optmin; 150 int set_optmax; 151 int (*set)(struct sock *sk, int optval, void __user *user, unsigned int len); 152 #ifdef CONFIG_COMPAT 153 int (*compat_set)(struct sock *sk, int optval, 154 void __user *user, unsigned int len); 155 #endif 156 int get_optmin; 157 int get_optmax; 158 int (*get)(struct sock *sk, int optval, void __user *user, int *len); 159 #ifdef CONFIG_COMPAT 160 int (*compat_get)(struct sock *sk, int optval, 161 void __user *user, int *len); 162 #endif 163 /* Use the module struct to lock set/get code in place */ 164 struct module *owner; 165 }; 166 167 /* Function to register/unregister hook points. */ 168 int nf_register_net_hook(struct net *net, const struct nf_hook_ops *ops); 169 void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *ops); 170 int nf_register_net_hooks(struct net *net, const struct nf_hook_ops *reg, 171 unsigned int n); 172 void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg, 173 unsigned int n); 174 175 /* Functions to register get/setsockopt ranges (non-inclusive). You 176 need to check permissions yourself! */ 177 int nf_register_sockopt(struct nf_sockopt_ops *reg); 178 void nf_unregister_sockopt(struct nf_sockopt_ops *reg); 179 180 #ifdef HAVE_JUMP_LABEL 181 extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; 182 #endif 183 184 int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state, 185 const struct nf_hook_entries *e, unsigned int i); 186 187 /** 188 * nf_hook - call a netfilter hook 189 * 190 * Returns 1 if the hook has allowed the packet to pass. The function 191 * okfn must be invoked by the caller in this case. Any other return 192 * value indicates the packet has been consumed by the hook. 193 */ 194 static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net, 195 struct sock *sk, struct sk_buff *skb, 196 struct net_device *indev, struct net_device *outdev, 197 int (*okfn)(struct net *, struct sock *, struct sk_buff *)) 198 { 199 struct nf_hook_entries *hook_head = NULL; 200 int ret = 1; 201 202 #ifdef HAVE_JUMP_LABEL 203 if (__builtin_constant_p(pf) && 204 __builtin_constant_p(hook) && 205 !static_key_false(&nf_hooks_needed[pf][hook])) 206 return 1; 207 #endif 208 209 rcu_read_lock(); 210 switch (pf) { 211 case NFPROTO_IPV4: 212 hook_head = rcu_dereference(net->nf.hooks_ipv4[hook]); 213 break; 214 case NFPROTO_IPV6: 215 hook_head = rcu_dereference(net->nf.hooks_ipv6[hook]); 216 break; 217 case NFPROTO_ARP: 218 #ifdef CONFIG_NETFILTER_FAMILY_ARP 219 hook_head = rcu_dereference(net->nf.hooks_arp[hook]); 220 #endif 221 break; 222 case NFPROTO_BRIDGE: 223 #ifdef CONFIG_NETFILTER_FAMILY_BRIDGE 224 hook_head = rcu_dereference(net->nf.hooks_bridge[hook]); 225 #endif 226 break; 227 #if IS_ENABLED(CONFIG_DECNET) 228 case NFPROTO_DECNET: 229 hook_head = rcu_dereference(net->nf.hooks_decnet[hook]); 230 break; 231 #endif 232 default: 233 WARN_ON_ONCE(1); 234 break; 235 } 236 237 if (hook_head) { 238 struct nf_hook_state state; 239 240 nf_hook_state_init(&state, hook, pf, indev, outdev, 241 sk, net, okfn); 242 243 ret = nf_hook_slow(skb, &state, hook_head, 0); 244 } 245 rcu_read_unlock(); 246 247 return ret; 248 } 249 250 /* Activate hook; either okfn or kfree_skb called, unless a hook 251 returns NF_STOLEN (in which case, it's up to the hook to deal with 252 the consequences). 253 254 Returns -ERRNO if packet dropped. Zero means queued, stolen or 255 accepted. 256 */ 257 258 /* RR: 259 > I don't want nf_hook to return anything because people might forget 260 > about async and trust the return value to mean "packet was ok". 261 262 AK: 263 Just document it clearly, then you can expect some sense from kernel 264 coders :) 265 */ 266 267 static inline int 268 NF_HOOK_COND(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, 269 struct sk_buff *skb, struct net_device *in, struct net_device *out, 270 int (*okfn)(struct net *, struct sock *, struct sk_buff *), 271 bool cond) 272 { 273 int ret; 274 275 if (!cond || 276 ((ret = nf_hook(pf, hook, net, sk, skb, in, out, okfn)) == 1)) 277 ret = okfn(net, sk, skb); 278 return ret; 279 } 280 281 static inline int 282 NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct sk_buff *skb, 283 struct net_device *in, struct net_device *out, 284 int (*okfn)(struct net *, struct sock *, struct sk_buff *)) 285 { 286 int ret = nf_hook(pf, hook, net, sk, skb, in, out, okfn); 287 if (ret == 1) 288 ret = okfn(net, sk, skb); 289 return ret; 290 } 291 292 /* Call setsockopt() */ 293 int nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt, 294 unsigned int len); 295 int nf_getsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt, 296 int *len); 297 #ifdef CONFIG_COMPAT 298 int compat_nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, 299 char __user *opt, unsigned int len); 300 int compat_nf_getsockopt(struct sock *sk, u_int8_t pf, int optval, 301 char __user *opt, int *len); 302 #endif 303 304 /* Call this before modifying an existing packet: ensures it is 305 modifiable and linear to the point you care about (writable_len). 306 Returns true or false. */ 307 int skb_make_writable(struct sk_buff *skb, unsigned int writable_len); 308 309 struct flowi; 310 struct nf_queue_entry; 311 312 __sum16 nf_checksum(struct sk_buff *skb, unsigned int hook, 313 unsigned int dataoff, u_int8_t protocol, 314 unsigned short family); 315 316 __sum16 nf_checksum_partial(struct sk_buff *skb, unsigned int hook, 317 unsigned int dataoff, unsigned int len, 318 u_int8_t protocol, unsigned short family); 319 int nf_route(struct net *net, struct dst_entry **dst, struct flowi *fl, 320 bool strict, unsigned short family); 321 int nf_reroute(struct sk_buff *skb, struct nf_queue_entry *entry); 322 323 #include <net/flow.h> 324 extern void (*nf_nat_decode_session_hook)(struct sk_buff *, struct flowi *); 325 326 static inline void 327 nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family) 328 { 329 #ifdef CONFIG_NF_NAT_NEEDED 330 void (*decodefn)(struct sk_buff *, struct flowi *); 331 332 rcu_read_lock(); 333 decodefn = rcu_dereference(nf_nat_decode_session_hook); 334 if (decodefn) 335 decodefn(skb, fl); 336 rcu_read_unlock(); 337 #endif 338 } 339 340 #else /* !CONFIG_NETFILTER */ 341 static inline int 342 NF_HOOK_COND(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, 343 struct sk_buff *skb, struct net_device *in, struct net_device *out, 344 int (*okfn)(struct net *, struct sock *, struct sk_buff *), 345 bool cond) 346 { 347 return okfn(net, sk, skb); 348 } 349 350 static inline int 351 NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, 352 struct sk_buff *skb, struct net_device *in, struct net_device *out, 353 int (*okfn)(struct net *, struct sock *, struct sk_buff *)) 354 { 355 return okfn(net, sk, skb); 356 } 357 358 static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net, 359 struct sock *sk, struct sk_buff *skb, 360 struct net_device *indev, struct net_device *outdev, 361 int (*okfn)(struct net *, struct sock *, struct sk_buff *)) 362 { 363 return 1; 364 } 365 struct flowi; 366 static inline void 367 nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family) 368 { 369 } 370 #endif /*CONFIG_NETFILTER*/ 371 372 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 373 #include <linux/netfilter/nf_conntrack_zones_common.h> 374 375 extern void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) __rcu; 376 void nf_ct_attach(struct sk_buff *, const struct sk_buff *); 377 extern void (*nf_ct_destroy)(struct nf_conntrack *) __rcu; 378 #else 379 static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {} 380 #endif 381 382 struct nf_conn; 383 enum ip_conntrack_info; 384 struct nlattr; 385 386 struct nfnl_ct_hook { 387 struct nf_conn *(*get_ct)(const struct sk_buff *skb, 388 enum ip_conntrack_info *ctinfo); 389 size_t (*build_size)(const struct nf_conn *ct); 390 int (*build)(struct sk_buff *skb, struct nf_conn *ct, 391 enum ip_conntrack_info ctinfo, 392 u_int16_t ct_attr, u_int16_t ct_info_attr); 393 int (*parse)(const struct nlattr *attr, struct nf_conn *ct); 394 int (*attach_expect)(const struct nlattr *attr, struct nf_conn *ct, 395 u32 portid, u32 report); 396 void (*seq_adjust)(struct sk_buff *skb, struct nf_conn *ct, 397 enum ip_conntrack_info ctinfo, s32 off); 398 }; 399 extern struct nfnl_ct_hook __rcu *nfnl_ct_hook; 400 401 /** 402 * nf_skb_duplicated - TEE target has sent a packet 403 * 404 * When a xtables target sends a packet, the OUTPUT and POSTROUTING 405 * hooks are traversed again, i.e. nft and xtables are invoked recursively. 406 * 407 * This is used by xtables TEE target to prevent the duplicated skb from 408 * being duplicated again. 409 */ 410 DECLARE_PER_CPU(bool, nf_skb_duplicated); 411 412 #endif /*__LINUX_NETFILTER_H*/ 413