1 #ifndef __LINUX_NETFILTER_H 2 #define __LINUX_NETFILTER_H 3 4 #include <linux/init.h> 5 #include <linux/skbuff.h> 6 #include <linux/net.h> 7 #include <linux/if.h> 8 #include <linux/in.h> 9 #include <linux/in6.h> 10 #include <linux/wait.h> 11 #include <linux/list.h> 12 #include <linux/static_key.h> 13 #include <linux/netfilter_defs.h> 14 #include <linux/netdevice.h> 15 #include <net/net_namespace.h> 16 17 #ifdef CONFIG_NETFILTER 18 static inline int NF_DROP_GETERR(int verdict) 19 { 20 return -(verdict >> NF_VERDICT_QBITS); 21 } 22 23 static inline int nf_inet_addr_cmp(const union nf_inet_addr *a1, 24 const union nf_inet_addr *a2) 25 { 26 return a1->all[0] == a2->all[0] && 27 a1->all[1] == a2->all[1] && 28 a1->all[2] == a2->all[2] && 29 a1->all[3] == a2->all[3]; 30 } 31 32 static inline void nf_inet_addr_mask(const union nf_inet_addr *a1, 33 union nf_inet_addr *result, 34 const union nf_inet_addr *mask) 35 { 36 result->all[0] = a1->all[0] & mask->all[0]; 37 result->all[1] = a1->all[1] & mask->all[1]; 38 result->all[2] = a1->all[2] & mask->all[2]; 39 result->all[3] = a1->all[3] & mask->all[3]; 40 } 41 42 int netfilter_init(void); 43 44 struct sk_buff; 45 46 struct nf_hook_ops; 47 48 struct sock; 49 50 struct nf_hook_state { 51 unsigned int hook; 52 u_int8_t pf; 53 struct net_device *in; 54 struct net_device *out; 55 struct sock *sk; 56 struct net *net; 57 int (*okfn)(struct net *, struct sock *, struct sk_buff *); 58 }; 59 60 typedef unsigned int nf_hookfn(void *priv, 61 struct sk_buff *skb, 62 const struct nf_hook_state *state); 63 struct nf_hook_ops { 64 struct list_head list; 65 66 /* User fills in from here down. */ 67 nf_hookfn *hook; 68 struct net_device *dev; 69 void *priv; 70 u_int8_t pf; 71 unsigned int hooknum; 72 /* Hooks are ordered in ascending priority. */ 73 int priority; 74 }; 75 76 struct nf_hook_entry { 77 struct nf_hook_entry __rcu *next; 78 nf_hookfn *hook; 79 void *priv; 80 const struct nf_hook_ops *orig_ops; 81 }; 82 83 static inline void 84 nf_hook_entry_init(struct nf_hook_entry *entry, const struct nf_hook_ops *ops) 85 { 86 entry->next = NULL; 87 entry->hook = ops->hook; 88 entry->priv = ops->priv; 89 entry->orig_ops = ops; 90 } 91 92 static inline int 93 nf_hook_entry_priority(const struct nf_hook_entry *entry) 94 { 95 return entry->orig_ops->priority; 96 } 97 98 static inline int 99 nf_hook_entry_hookfn(const struct nf_hook_entry *entry, struct sk_buff *skb, 100 struct nf_hook_state *state) 101 { 102 return entry->hook(entry->priv, skb, state); 103 } 104 105 static inline const struct nf_hook_ops * 106 nf_hook_entry_ops(const struct nf_hook_entry *entry) 107 { 108 return entry->orig_ops; 109 } 110 111 static inline void nf_hook_state_init(struct nf_hook_state *p, 112 unsigned int hook, 113 u_int8_t pf, 114 struct net_device *indev, 115 struct net_device *outdev, 116 struct sock *sk, 117 struct net *net, 118 int (*okfn)(struct net *, struct sock *, struct sk_buff *)) 119 { 120 p->hook = hook; 121 p->pf = pf; 122 p->in = indev; 123 p->out = outdev; 124 p->sk = sk; 125 p->net = net; 126 p->okfn = okfn; 127 } 128 129 130 131 struct nf_sockopt_ops { 132 struct list_head list; 133 134 u_int8_t pf; 135 136 /* Non-inclusive ranges: use 0/0/NULL to never get called. */ 137 int set_optmin; 138 int set_optmax; 139 int (*set)(struct sock *sk, int optval, void __user *user, unsigned int len); 140 #ifdef CONFIG_COMPAT 141 int (*compat_set)(struct sock *sk, int optval, 142 void __user *user, unsigned int len); 143 #endif 144 int get_optmin; 145 int get_optmax; 146 int (*get)(struct sock *sk, int optval, void __user *user, int *len); 147 #ifdef CONFIG_COMPAT 148 int (*compat_get)(struct sock *sk, int optval, 149 void __user *user, int *len); 150 #endif 151 /* Use the module struct to lock set/get code in place */ 152 struct module *owner; 153 }; 154 155 /* Function to register/unregister hook points. */ 156 int nf_register_net_hook(struct net *net, const struct nf_hook_ops *ops); 157 void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *ops); 158 int nf_register_net_hooks(struct net *net, const struct nf_hook_ops *reg, 159 unsigned int n); 160 void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg, 161 unsigned int n); 162 163 int nf_register_hook(struct nf_hook_ops *reg); 164 void nf_unregister_hook(struct nf_hook_ops *reg); 165 int nf_register_hooks(struct nf_hook_ops *reg, unsigned int n); 166 void nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n); 167 int _nf_register_hooks(struct nf_hook_ops *reg, unsigned int n); 168 void _nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n); 169 170 /* Functions to register get/setsockopt ranges (non-inclusive). You 171 need to check permissions yourself! */ 172 int nf_register_sockopt(struct nf_sockopt_ops *reg); 173 void nf_unregister_sockopt(struct nf_sockopt_ops *reg); 174 175 #ifdef HAVE_JUMP_LABEL 176 extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; 177 #endif 178 179 int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state, 180 struct nf_hook_entry *entry); 181 182 /** 183 * nf_hook - call a netfilter hook 184 * 185 * Returns 1 if the hook has allowed the packet to pass. The function 186 * okfn must be invoked by the caller in this case. Any other return 187 * value indicates the packet has been consumed by the hook. 188 */ 189 static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net, 190 struct sock *sk, struct sk_buff *skb, 191 struct net_device *indev, struct net_device *outdev, 192 int (*okfn)(struct net *, struct sock *, struct sk_buff *)) 193 { 194 struct nf_hook_entry *hook_head; 195 int ret = 1; 196 197 #ifdef HAVE_JUMP_LABEL 198 if (__builtin_constant_p(pf) && 199 __builtin_constant_p(hook) && 200 !static_key_false(&nf_hooks_needed[pf][hook])) 201 return 1; 202 #endif 203 204 rcu_read_lock(); 205 hook_head = rcu_dereference(net->nf.hooks[pf][hook]); 206 if (hook_head) { 207 struct nf_hook_state state; 208 209 nf_hook_state_init(&state, hook, pf, indev, outdev, 210 sk, net, okfn); 211 212 ret = nf_hook_slow(skb, &state, hook_head); 213 } 214 rcu_read_unlock(); 215 216 return ret; 217 } 218 219 /* Activate hook; either okfn or kfree_skb called, unless a hook 220 returns NF_STOLEN (in which case, it's up to the hook to deal with 221 the consequences). 222 223 Returns -ERRNO if packet dropped. Zero means queued, stolen or 224 accepted. 225 */ 226 227 /* RR: 228 > I don't want nf_hook to return anything because people might forget 229 > about async and trust the return value to mean "packet was ok". 230 231 AK: 232 Just document it clearly, then you can expect some sense from kernel 233 coders :) 234 */ 235 236 static inline int 237 NF_HOOK_COND(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, 238 struct sk_buff *skb, struct net_device *in, struct net_device *out, 239 int (*okfn)(struct net *, struct sock *, struct sk_buff *), 240 bool cond) 241 { 242 int ret; 243 244 if (!cond || 245 ((ret = nf_hook(pf, hook, net, sk, skb, in, out, okfn)) == 1)) 246 ret = okfn(net, sk, skb); 247 return ret; 248 } 249 250 static inline int 251 NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct sk_buff *skb, 252 struct net_device *in, struct net_device *out, 253 int (*okfn)(struct net *, struct sock *, struct sk_buff *)) 254 { 255 int ret = nf_hook(pf, hook, net, sk, skb, in, out, okfn); 256 if (ret == 1) 257 ret = okfn(net, sk, skb); 258 return ret; 259 } 260 261 /* Call setsockopt() */ 262 int nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt, 263 unsigned int len); 264 int nf_getsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt, 265 int *len); 266 #ifdef CONFIG_COMPAT 267 int compat_nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, 268 char __user *opt, unsigned int len); 269 int compat_nf_getsockopt(struct sock *sk, u_int8_t pf, int optval, 270 char __user *opt, int *len); 271 #endif 272 273 /* Call this before modifying an existing packet: ensures it is 274 modifiable and linear to the point you care about (writable_len). 275 Returns true or false. */ 276 int skb_make_writable(struct sk_buff *skb, unsigned int writable_len); 277 278 struct flowi; 279 struct nf_queue_entry; 280 281 struct nf_afinfo { 282 unsigned short family; 283 __sum16 (*checksum)(struct sk_buff *skb, unsigned int hook, 284 unsigned int dataoff, u_int8_t protocol); 285 __sum16 (*checksum_partial)(struct sk_buff *skb, 286 unsigned int hook, 287 unsigned int dataoff, 288 unsigned int len, 289 u_int8_t protocol); 290 int (*route)(struct net *net, struct dst_entry **dst, 291 struct flowi *fl, bool strict); 292 void (*saveroute)(const struct sk_buff *skb, 293 struct nf_queue_entry *entry); 294 int (*reroute)(struct net *net, struct sk_buff *skb, 295 const struct nf_queue_entry *entry); 296 int route_key_size; 297 }; 298 299 extern const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO]; 300 static inline const struct nf_afinfo *nf_get_afinfo(unsigned short family) 301 { 302 return rcu_dereference(nf_afinfo[family]); 303 } 304 305 static inline __sum16 306 nf_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, 307 u_int8_t protocol, unsigned short family) 308 { 309 const struct nf_afinfo *afinfo; 310 __sum16 csum = 0; 311 312 rcu_read_lock(); 313 afinfo = nf_get_afinfo(family); 314 if (afinfo) 315 csum = afinfo->checksum(skb, hook, dataoff, protocol); 316 rcu_read_unlock(); 317 return csum; 318 } 319 320 static inline __sum16 321 nf_checksum_partial(struct sk_buff *skb, unsigned int hook, 322 unsigned int dataoff, unsigned int len, 323 u_int8_t protocol, unsigned short family) 324 { 325 const struct nf_afinfo *afinfo; 326 __sum16 csum = 0; 327 328 rcu_read_lock(); 329 afinfo = nf_get_afinfo(family); 330 if (afinfo) 331 csum = afinfo->checksum_partial(skb, hook, dataoff, len, 332 protocol); 333 rcu_read_unlock(); 334 return csum; 335 } 336 337 int nf_register_afinfo(const struct nf_afinfo *afinfo); 338 void nf_unregister_afinfo(const struct nf_afinfo *afinfo); 339 340 #include <net/flow.h> 341 extern void (*nf_nat_decode_session_hook)(struct sk_buff *, struct flowi *); 342 343 static inline void 344 nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family) 345 { 346 #ifdef CONFIG_NF_NAT_NEEDED 347 void (*decodefn)(struct sk_buff *, struct flowi *); 348 349 rcu_read_lock(); 350 decodefn = rcu_dereference(nf_nat_decode_session_hook); 351 if (decodefn) 352 decodefn(skb, fl); 353 rcu_read_unlock(); 354 #endif 355 } 356 357 #else /* !CONFIG_NETFILTER */ 358 static inline int 359 NF_HOOK_COND(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, 360 struct sk_buff *skb, struct net_device *in, struct net_device *out, 361 int (*okfn)(struct net *, struct sock *, struct sk_buff *), 362 bool cond) 363 { 364 return okfn(net, sk, skb); 365 } 366 367 static inline int 368 NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, 369 struct sk_buff *skb, struct net_device *in, struct net_device *out, 370 int (*okfn)(struct net *, struct sock *, struct sk_buff *)) 371 { 372 return okfn(net, sk, skb); 373 } 374 375 static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net, 376 struct sock *sk, struct sk_buff *skb, 377 struct net_device *indev, struct net_device *outdev, 378 int (*okfn)(struct net *, struct sock *, struct sk_buff *)) 379 { 380 return 1; 381 } 382 struct flowi; 383 static inline void 384 nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family) 385 { 386 } 387 #endif /*CONFIG_NETFILTER*/ 388 389 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 390 #include <linux/netfilter/nf_conntrack_zones_common.h> 391 392 extern void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) __rcu; 393 void nf_ct_attach(struct sk_buff *, const struct sk_buff *); 394 extern void (*nf_ct_destroy)(struct nf_conntrack *) __rcu; 395 #else 396 static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {} 397 #endif 398 399 struct nf_conn; 400 enum ip_conntrack_info; 401 struct nlattr; 402 403 struct nfnl_ct_hook { 404 struct nf_conn *(*get_ct)(const struct sk_buff *skb, 405 enum ip_conntrack_info *ctinfo); 406 size_t (*build_size)(const struct nf_conn *ct); 407 int (*build)(struct sk_buff *skb, struct nf_conn *ct, 408 enum ip_conntrack_info ctinfo, 409 u_int16_t ct_attr, u_int16_t ct_info_attr); 410 int (*parse)(const struct nlattr *attr, struct nf_conn *ct); 411 int (*attach_expect)(const struct nlattr *attr, struct nf_conn *ct, 412 u32 portid, u32 report); 413 void (*seq_adjust)(struct sk_buff *skb, struct nf_conn *ct, 414 enum ip_conntrack_info ctinfo, s32 off); 415 }; 416 extern struct nfnl_ct_hook __rcu *nfnl_ct_hook; 417 418 /** 419 * nf_skb_duplicated - TEE target has sent a packet 420 * 421 * When a xtables target sends a packet, the OUTPUT and POSTROUTING 422 * hooks are traversed again, i.e. nft and xtables are invoked recursively. 423 * 424 * This is used by xtables TEE target to prevent the duplicated skb from 425 * being duplicated again. 426 */ 427 DECLARE_PER_CPU(bool, nf_skb_duplicated); 428 429 #endif /*__LINUX_NETFILTER_H*/ 430