1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * This is a module which is used for queueing packets and communicating with 4 * userspace via nfnetlink. 5 * 6 * (C) 2005 by Harald Welte <laforge@netfilter.org> 7 * (C) 2007 by Patrick McHardy <kaber@trash.net> 8 * 9 * Based on the old ipv4-only ip_queue.c: 10 * (C) 2000-2002 James Morris <jmorris@intercode.com.au> 11 * (C) 2003-2005 Netfilter Core Team <coreteam@netfilter.org> 12 */ 13 14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 15 16 #include <linux/module.h> 17 #include <linux/skbuff.h> 18 #include <linux/init.h> 19 #include <linux/spinlock.h> 20 #include <linux/slab.h> 21 #include <linux/notifier.h> 22 #include <linux/netdevice.h> 23 #include <linux/netfilter.h> 24 #include <linux/proc_fs.h> 25 #include <linux/netfilter_ipv4.h> 26 #include <linux/netfilter_ipv6.h> 27 #include <linux/netfilter_bridge.h> 28 #include <linux/netfilter/nfnetlink.h> 29 #include <linux/netfilter/nfnetlink_queue.h> 30 #include <linux/netfilter/nf_conntrack_common.h> 31 #include <linux/list.h> 32 #include <linux/cgroup-defs.h> 33 #include <net/gso.h> 34 #include <net/sock.h> 35 #include <net/tcp_states.h> 36 #include <net/netfilter/nf_queue.h> 37 #include <net/netns/generic.h> 38 39 #include <linux/atomic.h> 40 41 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 42 #include "../bridge/br_private.h" 43 #endif 44 45 #if IS_ENABLED(CONFIG_NF_CONNTRACK) 46 #include <net/netfilter/nf_conntrack.h> 47 #endif 48 49 #define NFQNL_QMAX_DEFAULT 1024 50 51 /* We're using struct nlattr which has 16bit nla_len. Note that nla_len 52 * includes the header length. Thus, the maximum packet length that we 53 * support is 65531 bytes. We send truncated packets if the specified length 54 * is larger than that. Userspace can check for presence of NFQA_CAP_LEN 55 * attribute to detect truncation. 56 */ 57 #define NFQNL_MAX_COPY_RANGE (0xffff - NLA_HDRLEN) 58 59 struct nfqnl_instance { 60 struct hlist_node hlist; /* global list of queues */ 61 struct rcu_head rcu; 62 63 u32 peer_portid; 64 unsigned int queue_maxlen; 65 unsigned int copy_range; 66 unsigned int queue_dropped; 67 unsigned int queue_user_dropped; 68 69 70 u_int16_t queue_num; /* number of this queue */ 71 u_int8_t copy_mode; 72 u_int32_t flags; /* Set using NFQA_CFG_FLAGS */ 73 /* 74 * Following fields are dirtied for each queued packet, 75 * keep them in same cache line if possible. 76 */ 77 spinlock_t lock ____cacheline_aligned_in_smp; 78 unsigned int queue_total; 79 unsigned int id_sequence; /* 'sequence' of pkt ids */ 80 struct list_head queue_list; /* packets in queue */ 81 }; 82 83 typedef int (*nfqnl_cmpfn)(struct nf_queue_entry *, unsigned long); 84 85 static unsigned int nfnl_queue_net_id __read_mostly; 86 87 #define INSTANCE_BUCKETS 16 88 struct nfnl_queue_net { 89 spinlock_t instances_lock; 90 struct hlist_head instance_table[INSTANCE_BUCKETS]; 91 }; 92 93 static struct nfnl_queue_net *nfnl_queue_pernet(struct net *net) 94 { 95 return net_generic(net, nfnl_queue_net_id); 96 } 97 98 static inline u_int8_t instance_hashfn(u_int16_t queue_num) 99 { 100 return ((queue_num >> 8) ^ queue_num) % INSTANCE_BUCKETS; 101 } 102 103 static struct nfqnl_instance * 104 instance_lookup(struct nfnl_queue_net *q, u_int16_t queue_num) 105 { 106 struct hlist_head *head; 107 struct nfqnl_instance *inst; 108 109 head = &q->instance_table[instance_hashfn(queue_num)]; 110 hlist_for_each_entry_rcu(inst, head, hlist) { 111 if (inst->queue_num == queue_num) 112 return inst; 113 } 114 return NULL; 115 } 116 117 static struct nfqnl_instance * 118 instance_create(struct nfnl_queue_net *q, u_int16_t queue_num, u32 portid) 119 { 120 struct nfqnl_instance *inst; 121 unsigned int h; 122 int err; 123 124 spin_lock(&q->instances_lock); 125 if (instance_lookup(q, queue_num)) { 126 err = -EEXIST; 127 goto out_unlock; 128 } 129 130 inst = kzalloc(sizeof(*inst), GFP_ATOMIC); 131 if (!inst) { 132 err = -ENOMEM; 133 goto out_unlock; 134 } 135 136 inst->queue_num = queue_num; 137 inst->peer_portid = portid; 138 inst->queue_maxlen = NFQNL_QMAX_DEFAULT; 139 inst->copy_range = NFQNL_MAX_COPY_RANGE; 140 inst->copy_mode = NFQNL_COPY_NONE; 141 spin_lock_init(&inst->lock); 142 INIT_LIST_HEAD(&inst->queue_list); 143 144 if (!try_module_get(THIS_MODULE)) { 145 err = -EAGAIN; 146 goto out_free; 147 } 148 149 h = instance_hashfn(queue_num); 150 hlist_add_head_rcu(&inst->hlist, &q->instance_table[h]); 151 152 spin_unlock(&q->instances_lock); 153 154 return inst; 155 156 out_free: 157 kfree(inst); 158 out_unlock: 159 spin_unlock(&q->instances_lock); 160 return ERR_PTR(err); 161 } 162 163 static void nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, 164 unsigned long data); 165 166 static void 167 instance_destroy_rcu(struct rcu_head *head) 168 { 169 struct nfqnl_instance *inst = container_of(head, struct nfqnl_instance, 170 rcu); 171 172 nfqnl_flush(inst, NULL, 0); 173 kfree(inst); 174 module_put(THIS_MODULE); 175 } 176 177 static void 178 __instance_destroy(struct nfqnl_instance *inst) 179 { 180 hlist_del_rcu(&inst->hlist); 181 call_rcu(&inst->rcu, instance_destroy_rcu); 182 } 183 184 static void 185 instance_destroy(struct nfnl_queue_net *q, struct nfqnl_instance *inst) 186 { 187 spin_lock(&q->instances_lock); 188 __instance_destroy(inst); 189 spin_unlock(&q->instances_lock); 190 } 191 192 static inline void 193 __enqueue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry) 194 { 195 list_add_tail(&entry->list, &queue->queue_list); 196 queue->queue_total++; 197 } 198 199 static void 200 __dequeue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry) 201 { 202 list_del(&entry->list); 203 queue->queue_total--; 204 } 205 206 static struct nf_queue_entry * 207 find_dequeue_entry(struct nfqnl_instance *queue, unsigned int id) 208 { 209 struct nf_queue_entry *entry = NULL, *i; 210 211 spin_lock_bh(&queue->lock); 212 213 list_for_each_entry(i, &queue->queue_list, list) { 214 if (i->id == id) { 215 entry = i; 216 break; 217 } 218 } 219 220 if (entry) 221 __dequeue_entry(queue, entry); 222 223 spin_unlock_bh(&queue->lock); 224 225 return entry; 226 } 227 228 static void nfqnl_reinject(struct nf_queue_entry *entry, unsigned int verdict) 229 { 230 const struct nf_ct_hook *ct_hook; 231 int err; 232 233 if (verdict == NF_ACCEPT || 234 verdict == NF_REPEAT || 235 verdict == NF_STOP) { 236 rcu_read_lock(); 237 ct_hook = rcu_dereference(nf_ct_hook); 238 if (ct_hook) { 239 err = ct_hook->update(entry->state.net, entry->skb); 240 if (err < 0) 241 verdict = NF_DROP; 242 } 243 rcu_read_unlock(); 244 } 245 nf_reinject(entry, verdict); 246 } 247 248 static void 249 nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, unsigned long data) 250 { 251 struct nf_queue_entry *entry, *next; 252 253 spin_lock_bh(&queue->lock); 254 list_for_each_entry_safe(entry, next, &queue->queue_list, list) { 255 if (!cmpfn || cmpfn(entry, data)) { 256 list_del(&entry->list); 257 queue->queue_total--; 258 nfqnl_reinject(entry, NF_DROP); 259 } 260 } 261 spin_unlock_bh(&queue->lock); 262 } 263 264 static int 265 nfqnl_put_packet_info(struct sk_buff *nlskb, struct sk_buff *packet, 266 bool csum_verify) 267 { 268 __u32 flags = 0; 269 270 if (packet->ip_summed == CHECKSUM_PARTIAL) 271 flags = NFQA_SKB_CSUMNOTREADY; 272 else if (csum_verify) 273 flags = NFQA_SKB_CSUM_NOTVERIFIED; 274 275 if (skb_is_gso(packet)) 276 flags |= NFQA_SKB_GSO; 277 278 return flags ? nla_put_be32(nlskb, NFQA_SKB_INFO, htonl(flags)) : 0; 279 } 280 281 static int nfqnl_put_sk_uidgid(struct sk_buff *skb, struct sock *sk) 282 { 283 const struct cred *cred; 284 285 if (!sk_fullsock(sk)) 286 return 0; 287 288 read_lock_bh(&sk->sk_callback_lock); 289 if (sk->sk_socket && sk->sk_socket->file) { 290 cred = sk->sk_socket->file->f_cred; 291 if (nla_put_be32(skb, NFQA_UID, 292 htonl(from_kuid_munged(&init_user_ns, cred->fsuid)))) 293 goto nla_put_failure; 294 if (nla_put_be32(skb, NFQA_GID, 295 htonl(from_kgid_munged(&init_user_ns, cred->fsgid)))) 296 goto nla_put_failure; 297 } 298 read_unlock_bh(&sk->sk_callback_lock); 299 return 0; 300 301 nla_put_failure: 302 read_unlock_bh(&sk->sk_callback_lock); 303 return -1; 304 } 305 306 static int nfqnl_put_sk_classid(struct sk_buff *skb, struct sock *sk) 307 { 308 #if IS_ENABLED(CONFIG_CGROUP_NET_CLASSID) 309 if (sk && sk_fullsock(sk)) { 310 u32 classid = sock_cgroup_classid(&sk->sk_cgrp_data); 311 312 if (classid && nla_put_be32(skb, NFQA_CGROUP_CLASSID, htonl(classid))) 313 return -1; 314 } 315 #endif 316 return 0; 317 } 318 319 static u32 nfqnl_get_sk_secctx(struct sk_buff *skb, char **secdata) 320 { 321 u32 seclen = 0; 322 #if IS_ENABLED(CONFIG_NETWORK_SECMARK) 323 if (!skb || !sk_fullsock(skb->sk)) 324 return 0; 325 326 read_lock_bh(&skb->sk->sk_callback_lock); 327 328 if (skb->secmark) 329 security_secid_to_secctx(skb->secmark, secdata, &seclen); 330 331 read_unlock_bh(&skb->sk->sk_callback_lock); 332 #endif 333 return seclen; 334 } 335 336 static u32 nfqnl_get_bridge_size(struct nf_queue_entry *entry) 337 { 338 struct sk_buff *entskb = entry->skb; 339 u32 nlalen = 0; 340 341 if (entry->state.pf != PF_BRIDGE || !skb_mac_header_was_set(entskb)) 342 return 0; 343 344 if (skb_vlan_tag_present(entskb)) 345 nlalen += nla_total_size(nla_total_size(sizeof(__be16)) + 346 nla_total_size(sizeof(__be16))); 347 348 if (entskb->network_header > entskb->mac_header) 349 nlalen += nla_total_size((entskb->network_header - 350 entskb->mac_header)); 351 352 return nlalen; 353 } 354 355 static int nfqnl_put_bridge(struct nf_queue_entry *entry, struct sk_buff *skb) 356 { 357 struct sk_buff *entskb = entry->skb; 358 359 if (entry->state.pf != PF_BRIDGE || !skb_mac_header_was_set(entskb)) 360 return 0; 361 362 if (skb_vlan_tag_present(entskb)) { 363 struct nlattr *nest; 364 365 nest = nla_nest_start(skb, NFQA_VLAN); 366 if (!nest) 367 goto nla_put_failure; 368 369 if (nla_put_be16(skb, NFQA_VLAN_TCI, htons(entskb->vlan_tci)) || 370 nla_put_be16(skb, NFQA_VLAN_PROTO, entskb->vlan_proto)) 371 goto nla_put_failure; 372 373 nla_nest_end(skb, nest); 374 } 375 376 if (entskb->mac_header < entskb->network_header) { 377 int len = (int)(entskb->network_header - entskb->mac_header); 378 379 if (nla_put(skb, NFQA_L2HDR, len, skb_mac_header(entskb))) 380 goto nla_put_failure; 381 } 382 383 return 0; 384 385 nla_put_failure: 386 return -1; 387 } 388 389 static struct sk_buff * 390 nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue, 391 struct nf_queue_entry *entry, 392 __be32 **packet_id_ptr) 393 { 394 size_t size; 395 size_t data_len = 0, cap_len = 0; 396 unsigned int hlen = 0; 397 struct sk_buff *skb; 398 struct nlattr *nla; 399 struct nfqnl_msg_packet_hdr *pmsg; 400 struct nlmsghdr *nlh; 401 struct sk_buff *entskb = entry->skb; 402 struct net_device *indev; 403 struct net_device *outdev; 404 struct nf_conn *ct = NULL; 405 enum ip_conntrack_info ctinfo = 0; 406 const struct nfnl_ct_hook *nfnl_ct; 407 bool csum_verify; 408 char *secdata = NULL; 409 u32 seclen = 0; 410 ktime_t tstamp; 411 412 size = nlmsg_total_size(sizeof(struct nfgenmsg)) 413 + nla_total_size(sizeof(struct nfqnl_msg_packet_hdr)) 414 + nla_total_size(sizeof(u_int32_t)) /* ifindex */ 415 + nla_total_size(sizeof(u_int32_t)) /* ifindex */ 416 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 417 + nla_total_size(sizeof(u_int32_t)) /* ifindex */ 418 + nla_total_size(sizeof(u_int32_t)) /* ifindex */ 419 #endif 420 + nla_total_size(sizeof(u_int32_t)) /* mark */ 421 + nla_total_size(sizeof(u_int32_t)) /* priority */ 422 + nla_total_size(sizeof(struct nfqnl_msg_packet_hw)) 423 + nla_total_size(sizeof(u_int32_t)) /* skbinfo */ 424 #if IS_ENABLED(CONFIG_CGROUP_NET_CLASSID) 425 + nla_total_size(sizeof(u_int32_t)) /* classid */ 426 #endif 427 + nla_total_size(sizeof(u_int32_t)); /* cap_len */ 428 429 tstamp = skb_tstamp_cond(entskb, false); 430 if (tstamp) 431 size += nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp)); 432 433 size += nfqnl_get_bridge_size(entry); 434 435 if (entry->state.hook <= NF_INET_FORWARD || 436 (entry->state.hook == NF_INET_POST_ROUTING && entskb->sk == NULL)) 437 csum_verify = !skb_csum_unnecessary(entskb); 438 else 439 csum_verify = false; 440 441 outdev = entry->state.out; 442 443 switch ((enum nfqnl_config_mode)READ_ONCE(queue->copy_mode)) { 444 case NFQNL_COPY_META: 445 case NFQNL_COPY_NONE: 446 break; 447 448 case NFQNL_COPY_PACKET: 449 if (!(queue->flags & NFQA_CFG_F_GSO) && 450 entskb->ip_summed == CHECKSUM_PARTIAL && 451 skb_checksum_help(entskb)) 452 return NULL; 453 454 data_len = READ_ONCE(queue->copy_range); 455 if (data_len > entskb->len) 456 data_len = entskb->len; 457 458 hlen = skb_zerocopy_headlen(entskb); 459 hlen = min_t(unsigned int, hlen, data_len); 460 size += sizeof(struct nlattr) + hlen; 461 cap_len = entskb->len; 462 break; 463 } 464 465 nfnl_ct = rcu_dereference(nfnl_ct_hook); 466 467 #if IS_ENABLED(CONFIG_NF_CONNTRACK) 468 if (queue->flags & NFQA_CFG_F_CONNTRACK) { 469 if (nfnl_ct != NULL) { 470 ct = nf_ct_get(entskb, &ctinfo); 471 if (ct != NULL) 472 size += nfnl_ct->build_size(ct); 473 } 474 } 475 #endif 476 477 if (queue->flags & NFQA_CFG_F_UID_GID) { 478 size += (nla_total_size(sizeof(u_int32_t)) /* uid */ 479 + nla_total_size(sizeof(u_int32_t))); /* gid */ 480 } 481 482 if ((queue->flags & NFQA_CFG_F_SECCTX) && entskb->sk) { 483 seclen = nfqnl_get_sk_secctx(entskb, &secdata); 484 if (seclen) 485 size += nla_total_size(seclen); 486 } 487 488 skb = alloc_skb(size, GFP_ATOMIC); 489 if (!skb) { 490 skb_tx_error(entskb); 491 goto nlmsg_failure; 492 } 493 494 nlh = nfnl_msg_put(skb, 0, 0, 495 nfnl_msg_type(NFNL_SUBSYS_QUEUE, NFQNL_MSG_PACKET), 496 0, entry->state.pf, NFNETLINK_V0, 497 htons(queue->queue_num)); 498 if (!nlh) { 499 skb_tx_error(entskb); 500 kfree_skb(skb); 501 goto nlmsg_failure; 502 } 503 504 nla = __nla_reserve(skb, NFQA_PACKET_HDR, sizeof(*pmsg)); 505 pmsg = nla_data(nla); 506 pmsg->hw_protocol = entskb->protocol; 507 pmsg->hook = entry->state.hook; 508 *packet_id_ptr = &pmsg->packet_id; 509 510 indev = entry->state.in; 511 if (indev) { 512 #if !IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 513 if (nla_put_be32(skb, NFQA_IFINDEX_INDEV, htonl(indev->ifindex))) 514 goto nla_put_failure; 515 #else 516 if (entry->state.pf == PF_BRIDGE) { 517 /* Case 1: indev is physical input device, we need to 518 * look for bridge group (when called from 519 * netfilter_bridge) */ 520 if (nla_put_be32(skb, NFQA_IFINDEX_PHYSINDEV, 521 htonl(indev->ifindex)) || 522 /* this is the bridge group "brX" */ 523 /* rcu_read_lock()ed by __nf_queue */ 524 nla_put_be32(skb, NFQA_IFINDEX_INDEV, 525 htonl(br_port_get_rcu(indev)->br->dev->ifindex))) 526 goto nla_put_failure; 527 } else { 528 int physinif; 529 530 /* Case 2: indev is bridge group, we need to look for 531 * physical device (when called from ipv4) */ 532 if (nla_put_be32(skb, NFQA_IFINDEX_INDEV, 533 htonl(indev->ifindex))) 534 goto nla_put_failure; 535 536 physinif = nf_bridge_get_physinif(entskb); 537 if (physinif && 538 nla_put_be32(skb, NFQA_IFINDEX_PHYSINDEV, 539 htonl(physinif))) 540 goto nla_put_failure; 541 } 542 #endif 543 } 544 545 if (outdev) { 546 #if !IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 547 if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV, htonl(outdev->ifindex))) 548 goto nla_put_failure; 549 #else 550 if (entry->state.pf == PF_BRIDGE) { 551 /* Case 1: outdev is physical output device, we need to 552 * look for bridge group (when called from 553 * netfilter_bridge) */ 554 if (nla_put_be32(skb, NFQA_IFINDEX_PHYSOUTDEV, 555 htonl(outdev->ifindex)) || 556 /* this is the bridge group "brX" */ 557 /* rcu_read_lock()ed by __nf_queue */ 558 nla_put_be32(skb, NFQA_IFINDEX_OUTDEV, 559 htonl(br_port_get_rcu(outdev)->br->dev->ifindex))) 560 goto nla_put_failure; 561 } else { 562 int physoutif; 563 564 /* Case 2: outdev is bridge group, we need to look for 565 * physical output device (when called from ipv4) */ 566 if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV, 567 htonl(outdev->ifindex))) 568 goto nla_put_failure; 569 570 physoutif = nf_bridge_get_physoutif(entskb); 571 if (physoutif && 572 nla_put_be32(skb, NFQA_IFINDEX_PHYSOUTDEV, 573 htonl(physoutif))) 574 goto nla_put_failure; 575 } 576 #endif 577 } 578 579 if (entskb->mark && 580 nla_put_be32(skb, NFQA_MARK, htonl(entskb->mark))) 581 goto nla_put_failure; 582 583 if (entskb->priority && 584 nla_put_be32(skb, NFQA_PRIORITY, htonl(entskb->priority))) 585 goto nla_put_failure; 586 587 if (indev && entskb->dev && 588 skb_mac_header_was_set(entskb) && 589 skb_mac_header_len(entskb) != 0) { 590 struct nfqnl_msg_packet_hw phw; 591 int len; 592 593 memset(&phw, 0, sizeof(phw)); 594 len = dev_parse_header(entskb, phw.hw_addr); 595 if (len) { 596 phw.hw_addrlen = htons(len); 597 if (nla_put(skb, NFQA_HWADDR, sizeof(phw), &phw)) 598 goto nla_put_failure; 599 } 600 } 601 602 if (nfqnl_put_bridge(entry, skb) < 0) 603 goto nla_put_failure; 604 605 if (entry->state.hook <= NF_INET_FORWARD && tstamp) { 606 struct nfqnl_msg_packet_timestamp ts; 607 struct timespec64 kts = ktime_to_timespec64(tstamp); 608 609 ts.sec = cpu_to_be64(kts.tv_sec); 610 ts.usec = cpu_to_be64(kts.tv_nsec / NSEC_PER_USEC); 611 612 if (nla_put(skb, NFQA_TIMESTAMP, sizeof(ts), &ts)) 613 goto nla_put_failure; 614 } 615 616 if ((queue->flags & NFQA_CFG_F_UID_GID) && entskb->sk && 617 nfqnl_put_sk_uidgid(skb, entskb->sk) < 0) 618 goto nla_put_failure; 619 620 if (nfqnl_put_sk_classid(skb, entskb->sk) < 0) 621 goto nla_put_failure; 622 623 if (seclen && nla_put(skb, NFQA_SECCTX, seclen, secdata)) 624 goto nla_put_failure; 625 626 if (ct && nfnl_ct->build(skb, ct, ctinfo, NFQA_CT, NFQA_CT_INFO) < 0) 627 goto nla_put_failure; 628 629 if (cap_len > data_len && 630 nla_put_be32(skb, NFQA_CAP_LEN, htonl(cap_len))) 631 goto nla_put_failure; 632 633 if (nfqnl_put_packet_info(skb, entskb, csum_verify)) 634 goto nla_put_failure; 635 636 if (data_len) { 637 struct nlattr *nla; 638 639 if (skb_tailroom(skb) < sizeof(*nla) + hlen) 640 goto nla_put_failure; 641 642 nla = skb_put(skb, sizeof(*nla)); 643 nla->nla_type = NFQA_PAYLOAD; 644 nla->nla_len = nla_attr_size(data_len); 645 646 if (skb_zerocopy(skb, entskb, data_len, hlen)) 647 goto nla_put_failure; 648 } 649 650 nlh->nlmsg_len = skb->len; 651 if (seclen) 652 security_release_secctx(secdata, seclen); 653 return skb; 654 655 nla_put_failure: 656 skb_tx_error(entskb); 657 kfree_skb(skb); 658 net_err_ratelimited("nf_queue: error creating packet message\n"); 659 nlmsg_failure: 660 if (seclen) 661 security_release_secctx(secdata, seclen); 662 return NULL; 663 } 664 665 static bool nf_ct_drop_unconfirmed(const struct nf_queue_entry *entry) 666 { 667 #if IS_ENABLED(CONFIG_NF_CONNTRACK) 668 static const unsigned long flags = IPS_CONFIRMED | IPS_DYING; 669 const struct nf_conn *ct = (void *)skb_nfct(entry->skb); 670 671 if (ct && ((ct->status & flags) == IPS_DYING)) 672 return true; 673 #endif 674 return false; 675 } 676 677 static int 678 __nfqnl_enqueue_packet(struct net *net, struct nfqnl_instance *queue, 679 struct nf_queue_entry *entry) 680 { 681 struct sk_buff *nskb; 682 int err = -ENOBUFS; 683 __be32 *packet_id_ptr; 684 int failopen = 0; 685 686 nskb = nfqnl_build_packet_message(net, queue, entry, &packet_id_ptr); 687 if (nskb == NULL) { 688 err = -ENOMEM; 689 goto err_out; 690 } 691 spin_lock_bh(&queue->lock); 692 693 if (nf_ct_drop_unconfirmed(entry)) 694 goto err_out_free_nskb; 695 696 if (queue->queue_total >= queue->queue_maxlen) { 697 if (queue->flags & NFQA_CFG_F_FAIL_OPEN) { 698 failopen = 1; 699 err = 0; 700 } else { 701 queue->queue_dropped++; 702 net_warn_ratelimited("nf_queue: full at %d entries, dropping packets(s)\n", 703 queue->queue_total); 704 } 705 goto err_out_free_nskb; 706 } 707 entry->id = ++queue->id_sequence; 708 *packet_id_ptr = htonl(entry->id); 709 710 /* nfnetlink_unicast will either free the nskb or add it to a socket */ 711 err = nfnetlink_unicast(nskb, net, queue->peer_portid); 712 if (err < 0) { 713 if (queue->flags & NFQA_CFG_F_FAIL_OPEN) { 714 failopen = 1; 715 err = 0; 716 } else { 717 queue->queue_user_dropped++; 718 } 719 goto err_out_unlock; 720 } 721 722 __enqueue_entry(queue, entry); 723 724 spin_unlock_bh(&queue->lock); 725 return 0; 726 727 err_out_free_nskb: 728 kfree_skb(nskb); 729 err_out_unlock: 730 spin_unlock_bh(&queue->lock); 731 if (failopen) 732 nfqnl_reinject(entry, NF_ACCEPT); 733 err_out: 734 return err; 735 } 736 737 static struct nf_queue_entry * 738 nf_queue_entry_dup(struct nf_queue_entry *e) 739 { 740 struct nf_queue_entry *entry = kmemdup(e, e->size, GFP_ATOMIC); 741 742 if (!entry) 743 return NULL; 744 745 if (nf_queue_entry_get_refs(entry)) 746 return entry; 747 748 kfree(entry); 749 return NULL; 750 } 751 752 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 753 /* When called from bridge netfilter, skb->data must point to MAC header 754 * before calling skb_gso_segment(). Else, original MAC header is lost 755 * and segmented skbs will be sent to wrong destination. 756 */ 757 static void nf_bridge_adjust_skb_data(struct sk_buff *skb) 758 { 759 if (nf_bridge_info_get(skb)) 760 __skb_push(skb, skb->network_header - skb->mac_header); 761 } 762 763 static void nf_bridge_adjust_segmented_data(struct sk_buff *skb) 764 { 765 if (nf_bridge_info_get(skb)) 766 __skb_pull(skb, skb->network_header - skb->mac_header); 767 } 768 #else 769 #define nf_bridge_adjust_skb_data(s) do {} while (0) 770 #define nf_bridge_adjust_segmented_data(s) do {} while (0) 771 #endif 772 773 static int 774 __nfqnl_enqueue_packet_gso(struct net *net, struct nfqnl_instance *queue, 775 struct sk_buff *skb, struct nf_queue_entry *entry) 776 { 777 int ret = -ENOMEM; 778 struct nf_queue_entry *entry_seg; 779 780 nf_bridge_adjust_segmented_data(skb); 781 782 if (skb->next == NULL) { /* last packet, no need to copy entry */ 783 struct sk_buff *gso_skb = entry->skb; 784 entry->skb = skb; 785 ret = __nfqnl_enqueue_packet(net, queue, entry); 786 if (ret) 787 entry->skb = gso_skb; 788 return ret; 789 } 790 791 skb_mark_not_on_list(skb); 792 793 entry_seg = nf_queue_entry_dup(entry); 794 if (entry_seg) { 795 entry_seg->skb = skb; 796 ret = __nfqnl_enqueue_packet(net, queue, entry_seg); 797 if (ret) 798 nf_queue_entry_free(entry_seg); 799 } 800 return ret; 801 } 802 803 static int 804 nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum) 805 { 806 unsigned int queued; 807 struct nfqnl_instance *queue; 808 struct sk_buff *skb, *segs, *nskb; 809 int err = -ENOBUFS; 810 struct net *net = entry->state.net; 811 struct nfnl_queue_net *q = nfnl_queue_pernet(net); 812 813 /* rcu_read_lock()ed by nf_hook_thresh */ 814 queue = instance_lookup(q, queuenum); 815 if (!queue) 816 return -ESRCH; 817 818 if (queue->copy_mode == NFQNL_COPY_NONE) 819 return -EINVAL; 820 821 skb = entry->skb; 822 823 switch (entry->state.pf) { 824 case NFPROTO_IPV4: 825 skb->protocol = htons(ETH_P_IP); 826 break; 827 case NFPROTO_IPV6: 828 skb->protocol = htons(ETH_P_IPV6); 829 break; 830 } 831 832 if ((queue->flags & NFQA_CFG_F_GSO) || !skb_is_gso(skb)) 833 return __nfqnl_enqueue_packet(net, queue, entry); 834 835 nf_bridge_adjust_skb_data(skb); 836 segs = skb_gso_segment(skb, 0); 837 /* Does not use PTR_ERR to limit the number of error codes that can be 838 * returned by nf_queue. For instance, callers rely on -ESRCH to 839 * mean 'ignore this hook'. 840 */ 841 if (IS_ERR_OR_NULL(segs)) 842 goto out_err; 843 queued = 0; 844 err = 0; 845 skb_list_walk_safe(segs, segs, nskb) { 846 if (err == 0) 847 err = __nfqnl_enqueue_packet_gso(net, queue, 848 segs, entry); 849 if (err == 0) 850 queued++; 851 else 852 kfree_skb(segs); 853 } 854 855 if (queued) { 856 if (err) /* some segments are already queued */ 857 nf_queue_entry_free(entry); 858 kfree_skb(skb); 859 return 0; 860 } 861 out_err: 862 nf_bridge_adjust_segmented_data(skb); 863 return err; 864 } 865 866 static int 867 nfqnl_mangle(void *data, unsigned int data_len, struct nf_queue_entry *e, int diff) 868 { 869 struct sk_buff *nskb; 870 871 if (diff < 0) { 872 unsigned int min_len = skb_transport_offset(e->skb); 873 874 if (data_len < min_len) 875 return -EINVAL; 876 877 if (pskb_trim(e->skb, data_len)) 878 return -ENOMEM; 879 } else if (diff > 0) { 880 if (data_len > 0xFFFF) 881 return -EINVAL; 882 if (diff > skb_tailroom(e->skb)) { 883 nskb = skb_copy_expand(e->skb, skb_headroom(e->skb), 884 diff, GFP_ATOMIC); 885 if (!nskb) 886 return -ENOMEM; 887 kfree_skb(e->skb); 888 e->skb = nskb; 889 } 890 skb_put(e->skb, diff); 891 } 892 if (skb_ensure_writable(e->skb, data_len)) 893 return -ENOMEM; 894 skb_copy_to_linear_data(e->skb, data, data_len); 895 e->skb->ip_summed = CHECKSUM_NONE; 896 return 0; 897 } 898 899 static int 900 nfqnl_set_mode(struct nfqnl_instance *queue, 901 unsigned char mode, unsigned int range) 902 { 903 int status = 0; 904 905 spin_lock_bh(&queue->lock); 906 switch (mode) { 907 case NFQNL_COPY_NONE: 908 case NFQNL_COPY_META: 909 queue->copy_mode = mode; 910 queue->copy_range = 0; 911 break; 912 913 case NFQNL_COPY_PACKET: 914 queue->copy_mode = mode; 915 if (range == 0 || range > NFQNL_MAX_COPY_RANGE) 916 queue->copy_range = NFQNL_MAX_COPY_RANGE; 917 else 918 queue->copy_range = range; 919 break; 920 921 default: 922 status = -EINVAL; 923 924 } 925 spin_unlock_bh(&queue->lock); 926 927 return status; 928 } 929 930 static int 931 dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex) 932 { 933 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 934 int physinif, physoutif; 935 936 physinif = nf_bridge_get_physinif(entry->skb); 937 physoutif = nf_bridge_get_physoutif(entry->skb); 938 939 if (physinif == ifindex || physoutif == ifindex) 940 return 1; 941 #endif 942 if (entry->state.in) 943 if (entry->state.in->ifindex == ifindex) 944 return 1; 945 if (entry->state.out) 946 if (entry->state.out->ifindex == ifindex) 947 return 1; 948 949 return 0; 950 } 951 952 /* drop all packets with either indev or outdev == ifindex from all queue 953 * instances */ 954 static void 955 nfqnl_dev_drop(struct net *net, int ifindex) 956 { 957 int i; 958 struct nfnl_queue_net *q = nfnl_queue_pernet(net); 959 960 rcu_read_lock(); 961 962 for (i = 0; i < INSTANCE_BUCKETS; i++) { 963 struct nfqnl_instance *inst; 964 struct hlist_head *head = &q->instance_table[i]; 965 966 hlist_for_each_entry_rcu(inst, head, hlist) 967 nfqnl_flush(inst, dev_cmp, ifindex); 968 } 969 970 rcu_read_unlock(); 971 } 972 973 static int 974 nfqnl_rcv_dev_event(struct notifier_block *this, 975 unsigned long event, void *ptr) 976 { 977 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 978 979 /* Drop any packets associated with the downed device */ 980 if (event == NETDEV_DOWN) 981 nfqnl_dev_drop(dev_net(dev), dev->ifindex); 982 return NOTIFY_DONE; 983 } 984 985 static struct notifier_block nfqnl_dev_notifier = { 986 .notifier_call = nfqnl_rcv_dev_event, 987 }; 988 989 static void nfqnl_nf_hook_drop(struct net *net) 990 { 991 struct nfnl_queue_net *q = nfnl_queue_pernet(net); 992 int i; 993 994 /* This function is also called on net namespace error unwind, 995 * when pernet_ops->init() failed and ->exit() functions of the 996 * previous pernet_ops gets called. 997 * 998 * This may result in a call to nfqnl_nf_hook_drop() before 999 * struct nfnl_queue_net was allocated. 1000 */ 1001 if (!q) 1002 return; 1003 1004 for (i = 0; i < INSTANCE_BUCKETS; i++) { 1005 struct nfqnl_instance *inst; 1006 struct hlist_head *head = &q->instance_table[i]; 1007 1008 hlist_for_each_entry_rcu(inst, head, hlist) 1009 nfqnl_flush(inst, NULL, 0); 1010 } 1011 } 1012 1013 static int 1014 nfqnl_rcv_nl_event(struct notifier_block *this, 1015 unsigned long event, void *ptr) 1016 { 1017 struct netlink_notify *n = ptr; 1018 struct nfnl_queue_net *q = nfnl_queue_pernet(n->net); 1019 1020 if (event == NETLINK_URELEASE && n->protocol == NETLINK_NETFILTER) { 1021 int i; 1022 1023 /* destroy all instances for this portid */ 1024 spin_lock(&q->instances_lock); 1025 for (i = 0; i < INSTANCE_BUCKETS; i++) { 1026 struct hlist_node *t2; 1027 struct nfqnl_instance *inst; 1028 struct hlist_head *head = &q->instance_table[i]; 1029 1030 hlist_for_each_entry_safe(inst, t2, head, hlist) { 1031 if (n->portid == inst->peer_portid) 1032 __instance_destroy(inst); 1033 } 1034 } 1035 spin_unlock(&q->instances_lock); 1036 } 1037 return NOTIFY_DONE; 1038 } 1039 1040 static struct notifier_block nfqnl_rtnl_notifier = { 1041 .notifier_call = nfqnl_rcv_nl_event, 1042 }; 1043 1044 static const struct nla_policy nfqa_vlan_policy[NFQA_VLAN_MAX + 1] = { 1045 [NFQA_VLAN_TCI] = { .type = NLA_U16}, 1046 [NFQA_VLAN_PROTO] = { .type = NLA_U16}, 1047 }; 1048 1049 static const struct nla_policy nfqa_verdict_policy[NFQA_MAX+1] = { 1050 [NFQA_VERDICT_HDR] = { .len = sizeof(struct nfqnl_msg_verdict_hdr) }, 1051 [NFQA_MARK] = { .type = NLA_U32 }, 1052 [NFQA_PAYLOAD] = { .type = NLA_UNSPEC }, 1053 [NFQA_CT] = { .type = NLA_UNSPEC }, 1054 [NFQA_EXP] = { .type = NLA_UNSPEC }, 1055 [NFQA_VLAN] = { .type = NLA_NESTED }, 1056 [NFQA_PRIORITY] = { .type = NLA_U32 }, 1057 }; 1058 1059 static const struct nla_policy nfqa_verdict_batch_policy[NFQA_MAX+1] = { 1060 [NFQA_VERDICT_HDR] = { .len = sizeof(struct nfqnl_msg_verdict_hdr) }, 1061 [NFQA_MARK] = { .type = NLA_U32 }, 1062 [NFQA_PRIORITY] = { .type = NLA_U32 }, 1063 }; 1064 1065 static struct nfqnl_instance * 1066 verdict_instance_lookup(struct nfnl_queue_net *q, u16 queue_num, u32 nlportid) 1067 { 1068 struct nfqnl_instance *queue; 1069 1070 queue = instance_lookup(q, queue_num); 1071 if (!queue) 1072 return ERR_PTR(-ENODEV); 1073 1074 if (queue->peer_portid != nlportid) 1075 return ERR_PTR(-EPERM); 1076 1077 return queue; 1078 } 1079 1080 static struct nfqnl_msg_verdict_hdr* 1081 verdicthdr_get(const struct nlattr * const nfqa[]) 1082 { 1083 struct nfqnl_msg_verdict_hdr *vhdr; 1084 unsigned int verdict; 1085 1086 if (!nfqa[NFQA_VERDICT_HDR]) 1087 return NULL; 1088 1089 vhdr = nla_data(nfqa[NFQA_VERDICT_HDR]); 1090 verdict = ntohl(vhdr->verdict) & NF_VERDICT_MASK; 1091 if (verdict > NF_MAX_VERDICT || verdict == NF_STOLEN) 1092 return NULL; 1093 return vhdr; 1094 } 1095 1096 static int nfq_id_after(unsigned int id, unsigned int max) 1097 { 1098 return (int)(id - max) > 0; 1099 } 1100 1101 static int nfqnl_recv_verdict_batch(struct sk_buff *skb, 1102 const struct nfnl_info *info, 1103 const struct nlattr * const nfqa[]) 1104 { 1105 struct nfnl_queue_net *q = nfnl_queue_pernet(info->net); 1106 u16 queue_num = ntohs(info->nfmsg->res_id); 1107 struct nf_queue_entry *entry, *tmp; 1108 struct nfqnl_msg_verdict_hdr *vhdr; 1109 struct nfqnl_instance *queue; 1110 unsigned int verdict, maxid; 1111 LIST_HEAD(batch_list); 1112 1113 queue = verdict_instance_lookup(q, queue_num, 1114 NETLINK_CB(skb).portid); 1115 if (IS_ERR(queue)) 1116 return PTR_ERR(queue); 1117 1118 vhdr = verdicthdr_get(nfqa); 1119 if (!vhdr) 1120 return -EINVAL; 1121 1122 verdict = ntohl(vhdr->verdict); 1123 maxid = ntohl(vhdr->id); 1124 1125 spin_lock_bh(&queue->lock); 1126 1127 list_for_each_entry_safe(entry, tmp, &queue->queue_list, list) { 1128 if (nfq_id_after(entry->id, maxid)) 1129 break; 1130 __dequeue_entry(queue, entry); 1131 list_add_tail(&entry->list, &batch_list); 1132 } 1133 1134 spin_unlock_bh(&queue->lock); 1135 1136 if (list_empty(&batch_list)) 1137 return -ENOENT; 1138 1139 list_for_each_entry_safe(entry, tmp, &batch_list, list) { 1140 if (nfqa[NFQA_MARK]) 1141 entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK])); 1142 1143 if (nfqa[NFQA_PRIORITY]) 1144 entry->skb->priority = ntohl(nla_get_be32(nfqa[NFQA_PRIORITY])); 1145 1146 nfqnl_reinject(entry, verdict); 1147 } 1148 return 0; 1149 } 1150 1151 static struct nf_conn *nfqnl_ct_parse(const struct nfnl_ct_hook *nfnl_ct, 1152 const struct nlmsghdr *nlh, 1153 const struct nlattr * const nfqa[], 1154 struct nf_queue_entry *entry, 1155 enum ip_conntrack_info *ctinfo) 1156 { 1157 #if IS_ENABLED(CONFIG_NF_CONNTRACK) 1158 struct nf_conn *ct; 1159 1160 ct = nf_ct_get(entry->skb, ctinfo); 1161 if (ct == NULL) 1162 return NULL; 1163 1164 if (nfnl_ct->parse(nfqa[NFQA_CT], ct) < 0) 1165 return NULL; 1166 1167 if (nfqa[NFQA_EXP]) 1168 nfnl_ct->attach_expect(nfqa[NFQA_EXP], ct, 1169 NETLINK_CB(entry->skb).portid, 1170 nlmsg_report(nlh)); 1171 return ct; 1172 #else 1173 return NULL; 1174 #endif 1175 } 1176 1177 static int nfqa_parse_bridge(struct nf_queue_entry *entry, 1178 const struct nlattr * const nfqa[]) 1179 { 1180 if (nfqa[NFQA_VLAN]) { 1181 struct nlattr *tb[NFQA_VLAN_MAX + 1]; 1182 int err; 1183 1184 err = nla_parse_nested_deprecated(tb, NFQA_VLAN_MAX, 1185 nfqa[NFQA_VLAN], 1186 nfqa_vlan_policy, NULL); 1187 if (err < 0) 1188 return err; 1189 1190 if (!tb[NFQA_VLAN_TCI] || !tb[NFQA_VLAN_PROTO]) 1191 return -EINVAL; 1192 1193 __vlan_hwaccel_put_tag(entry->skb, 1194 nla_get_be16(tb[NFQA_VLAN_PROTO]), 1195 ntohs(nla_get_be16(tb[NFQA_VLAN_TCI]))); 1196 } 1197 1198 if (nfqa[NFQA_L2HDR]) { 1199 int mac_header_len = entry->skb->network_header - 1200 entry->skb->mac_header; 1201 1202 if (mac_header_len != nla_len(nfqa[NFQA_L2HDR])) 1203 return -EINVAL; 1204 else if (mac_header_len > 0) 1205 memcpy(skb_mac_header(entry->skb), 1206 nla_data(nfqa[NFQA_L2HDR]), 1207 mac_header_len); 1208 } 1209 1210 return 0; 1211 } 1212 1213 static int nfqnl_recv_verdict(struct sk_buff *skb, const struct nfnl_info *info, 1214 const struct nlattr * const nfqa[]) 1215 { 1216 struct nfnl_queue_net *q = nfnl_queue_pernet(info->net); 1217 u_int16_t queue_num = ntohs(info->nfmsg->res_id); 1218 const struct nfnl_ct_hook *nfnl_ct; 1219 struct nfqnl_msg_verdict_hdr *vhdr; 1220 enum ip_conntrack_info ctinfo; 1221 struct nfqnl_instance *queue; 1222 struct nf_queue_entry *entry; 1223 struct nf_conn *ct = NULL; 1224 unsigned int verdict; 1225 int err; 1226 1227 queue = verdict_instance_lookup(q, queue_num, 1228 NETLINK_CB(skb).portid); 1229 if (IS_ERR(queue)) 1230 return PTR_ERR(queue); 1231 1232 vhdr = verdicthdr_get(nfqa); 1233 if (!vhdr) 1234 return -EINVAL; 1235 1236 verdict = ntohl(vhdr->verdict); 1237 1238 entry = find_dequeue_entry(queue, ntohl(vhdr->id)); 1239 if (entry == NULL) 1240 return -ENOENT; 1241 1242 /* rcu lock already held from nfnl->call_rcu. */ 1243 nfnl_ct = rcu_dereference(nfnl_ct_hook); 1244 1245 if (nfqa[NFQA_CT]) { 1246 if (nfnl_ct != NULL) 1247 ct = nfqnl_ct_parse(nfnl_ct, info->nlh, nfqa, entry, 1248 &ctinfo); 1249 } 1250 1251 if (entry->state.pf == PF_BRIDGE) { 1252 err = nfqa_parse_bridge(entry, nfqa); 1253 if (err < 0) 1254 return err; 1255 } 1256 1257 if (nfqa[NFQA_PAYLOAD]) { 1258 u16 payload_len = nla_len(nfqa[NFQA_PAYLOAD]); 1259 int diff = payload_len - entry->skb->len; 1260 1261 if (nfqnl_mangle(nla_data(nfqa[NFQA_PAYLOAD]), 1262 payload_len, entry, diff) < 0) 1263 verdict = NF_DROP; 1264 1265 if (ct && diff) 1266 nfnl_ct->seq_adjust(entry->skb, ct, ctinfo, diff); 1267 } 1268 1269 if (nfqa[NFQA_MARK]) 1270 entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK])); 1271 1272 if (nfqa[NFQA_PRIORITY]) 1273 entry->skb->priority = ntohl(nla_get_be32(nfqa[NFQA_PRIORITY])); 1274 1275 nfqnl_reinject(entry, verdict); 1276 return 0; 1277 } 1278 1279 static int nfqnl_recv_unsupp(struct sk_buff *skb, const struct nfnl_info *info, 1280 const struct nlattr * const cda[]) 1281 { 1282 return -ENOTSUPP; 1283 } 1284 1285 static const struct nla_policy nfqa_cfg_policy[NFQA_CFG_MAX+1] = { 1286 [NFQA_CFG_CMD] = { .len = sizeof(struct nfqnl_msg_config_cmd) }, 1287 [NFQA_CFG_PARAMS] = { .len = sizeof(struct nfqnl_msg_config_params) }, 1288 [NFQA_CFG_QUEUE_MAXLEN] = { .type = NLA_U32 }, 1289 [NFQA_CFG_MASK] = { .type = NLA_U32 }, 1290 [NFQA_CFG_FLAGS] = { .type = NLA_U32 }, 1291 }; 1292 1293 static const struct nf_queue_handler nfqh = { 1294 .outfn = nfqnl_enqueue_packet, 1295 .nf_hook_drop = nfqnl_nf_hook_drop, 1296 }; 1297 1298 static int nfqnl_recv_config(struct sk_buff *skb, const struct nfnl_info *info, 1299 const struct nlattr * const nfqa[]) 1300 { 1301 struct nfnl_queue_net *q = nfnl_queue_pernet(info->net); 1302 u_int16_t queue_num = ntohs(info->nfmsg->res_id); 1303 struct nfqnl_msg_config_cmd *cmd = NULL; 1304 struct nfqnl_instance *queue; 1305 __u32 flags = 0, mask = 0; 1306 int ret = 0; 1307 1308 if (nfqa[NFQA_CFG_CMD]) { 1309 cmd = nla_data(nfqa[NFQA_CFG_CMD]); 1310 1311 /* Obsolete commands without queue context */ 1312 switch (cmd->command) { 1313 case NFQNL_CFG_CMD_PF_BIND: return 0; 1314 case NFQNL_CFG_CMD_PF_UNBIND: return 0; 1315 } 1316 } 1317 1318 /* Check if we support these flags in first place, dependencies should 1319 * be there too not to break atomicity. 1320 */ 1321 if (nfqa[NFQA_CFG_FLAGS]) { 1322 if (!nfqa[NFQA_CFG_MASK]) { 1323 /* A mask is needed to specify which flags are being 1324 * changed. 1325 */ 1326 return -EINVAL; 1327 } 1328 1329 flags = ntohl(nla_get_be32(nfqa[NFQA_CFG_FLAGS])); 1330 mask = ntohl(nla_get_be32(nfqa[NFQA_CFG_MASK])); 1331 1332 if (flags >= NFQA_CFG_F_MAX) 1333 return -EOPNOTSUPP; 1334 1335 #if !IS_ENABLED(CONFIG_NETWORK_SECMARK) 1336 if (flags & mask & NFQA_CFG_F_SECCTX) 1337 return -EOPNOTSUPP; 1338 #endif 1339 if ((flags & mask & NFQA_CFG_F_CONNTRACK) && 1340 !rcu_access_pointer(nfnl_ct_hook)) { 1341 #ifdef CONFIG_MODULES 1342 nfnl_unlock(NFNL_SUBSYS_QUEUE); 1343 request_module("ip_conntrack_netlink"); 1344 nfnl_lock(NFNL_SUBSYS_QUEUE); 1345 if (rcu_access_pointer(nfnl_ct_hook)) 1346 return -EAGAIN; 1347 #endif 1348 return -EOPNOTSUPP; 1349 } 1350 } 1351 1352 rcu_read_lock(); 1353 queue = instance_lookup(q, queue_num); 1354 if (queue && queue->peer_portid != NETLINK_CB(skb).portid) { 1355 ret = -EPERM; 1356 goto err_out_unlock; 1357 } 1358 1359 if (cmd != NULL) { 1360 switch (cmd->command) { 1361 case NFQNL_CFG_CMD_BIND: 1362 if (queue) { 1363 ret = -EBUSY; 1364 goto err_out_unlock; 1365 } 1366 queue = instance_create(q, queue_num, 1367 NETLINK_CB(skb).portid); 1368 if (IS_ERR(queue)) { 1369 ret = PTR_ERR(queue); 1370 goto err_out_unlock; 1371 } 1372 break; 1373 case NFQNL_CFG_CMD_UNBIND: 1374 if (!queue) { 1375 ret = -ENODEV; 1376 goto err_out_unlock; 1377 } 1378 instance_destroy(q, queue); 1379 goto err_out_unlock; 1380 case NFQNL_CFG_CMD_PF_BIND: 1381 case NFQNL_CFG_CMD_PF_UNBIND: 1382 break; 1383 default: 1384 ret = -ENOTSUPP; 1385 goto err_out_unlock; 1386 } 1387 } 1388 1389 if (!queue) { 1390 ret = -ENODEV; 1391 goto err_out_unlock; 1392 } 1393 1394 if (nfqa[NFQA_CFG_PARAMS]) { 1395 struct nfqnl_msg_config_params *params = 1396 nla_data(nfqa[NFQA_CFG_PARAMS]); 1397 1398 nfqnl_set_mode(queue, params->copy_mode, 1399 ntohl(params->copy_range)); 1400 } 1401 1402 if (nfqa[NFQA_CFG_QUEUE_MAXLEN]) { 1403 __be32 *queue_maxlen = nla_data(nfqa[NFQA_CFG_QUEUE_MAXLEN]); 1404 1405 spin_lock_bh(&queue->lock); 1406 queue->queue_maxlen = ntohl(*queue_maxlen); 1407 spin_unlock_bh(&queue->lock); 1408 } 1409 1410 if (nfqa[NFQA_CFG_FLAGS]) { 1411 spin_lock_bh(&queue->lock); 1412 queue->flags &= ~mask; 1413 queue->flags |= flags & mask; 1414 spin_unlock_bh(&queue->lock); 1415 } 1416 1417 err_out_unlock: 1418 rcu_read_unlock(); 1419 return ret; 1420 } 1421 1422 static const struct nfnl_callback nfqnl_cb[NFQNL_MSG_MAX] = { 1423 [NFQNL_MSG_PACKET] = { 1424 .call = nfqnl_recv_unsupp, 1425 .type = NFNL_CB_RCU, 1426 .attr_count = NFQA_MAX, 1427 }, 1428 [NFQNL_MSG_VERDICT] = { 1429 .call = nfqnl_recv_verdict, 1430 .type = NFNL_CB_RCU, 1431 .attr_count = NFQA_MAX, 1432 .policy = nfqa_verdict_policy 1433 }, 1434 [NFQNL_MSG_CONFIG] = { 1435 .call = nfqnl_recv_config, 1436 .type = NFNL_CB_MUTEX, 1437 .attr_count = NFQA_CFG_MAX, 1438 .policy = nfqa_cfg_policy 1439 }, 1440 [NFQNL_MSG_VERDICT_BATCH] = { 1441 .call = nfqnl_recv_verdict_batch, 1442 .type = NFNL_CB_RCU, 1443 .attr_count = NFQA_MAX, 1444 .policy = nfqa_verdict_batch_policy 1445 }, 1446 }; 1447 1448 static const struct nfnetlink_subsystem nfqnl_subsys = { 1449 .name = "nf_queue", 1450 .subsys_id = NFNL_SUBSYS_QUEUE, 1451 .cb_count = NFQNL_MSG_MAX, 1452 .cb = nfqnl_cb, 1453 }; 1454 1455 #ifdef CONFIG_PROC_FS 1456 struct iter_state { 1457 struct seq_net_private p; 1458 unsigned int bucket; 1459 }; 1460 1461 static struct hlist_node *get_first(struct seq_file *seq) 1462 { 1463 struct iter_state *st = seq->private; 1464 struct net *net; 1465 struct nfnl_queue_net *q; 1466 1467 if (!st) 1468 return NULL; 1469 1470 net = seq_file_net(seq); 1471 q = nfnl_queue_pernet(net); 1472 for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) { 1473 if (!hlist_empty(&q->instance_table[st->bucket])) 1474 return q->instance_table[st->bucket].first; 1475 } 1476 return NULL; 1477 } 1478 1479 static struct hlist_node *get_next(struct seq_file *seq, struct hlist_node *h) 1480 { 1481 struct iter_state *st = seq->private; 1482 struct net *net = seq_file_net(seq); 1483 1484 h = h->next; 1485 while (!h) { 1486 struct nfnl_queue_net *q; 1487 1488 if (++st->bucket >= INSTANCE_BUCKETS) 1489 return NULL; 1490 1491 q = nfnl_queue_pernet(net); 1492 h = q->instance_table[st->bucket].first; 1493 } 1494 return h; 1495 } 1496 1497 static struct hlist_node *get_idx(struct seq_file *seq, loff_t pos) 1498 { 1499 struct hlist_node *head; 1500 head = get_first(seq); 1501 1502 if (head) 1503 while (pos && (head = get_next(seq, head))) 1504 pos--; 1505 return pos ? NULL : head; 1506 } 1507 1508 static void *seq_start(struct seq_file *s, loff_t *pos) 1509 __acquires(nfnl_queue_pernet(seq_file_net(s))->instances_lock) 1510 { 1511 spin_lock(&nfnl_queue_pernet(seq_file_net(s))->instances_lock); 1512 return get_idx(s, *pos); 1513 } 1514 1515 static void *seq_next(struct seq_file *s, void *v, loff_t *pos) 1516 { 1517 (*pos)++; 1518 return get_next(s, v); 1519 } 1520 1521 static void seq_stop(struct seq_file *s, void *v) 1522 __releases(nfnl_queue_pernet(seq_file_net(s))->instances_lock) 1523 { 1524 spin_unlock(&nfnl_queue_pernet(seq_file_net(s))->instances_lock); 1525 } 1526 1527 static int seq_show(struct seq_file *s, void *v) 1528 { 1529 const struct nfqnl_instance *inst = v; 1530 1531 seq_printf(s, "%5u %6u %5u %1u %5u %5u %5u %8u %2d\n", 1532 inst->queue_num, 1533 inst->peer_portid, inst->queue_total, 1534 inst->copy_mode, inst->copy_range, 1535 inst->queue_dropped, inst->queue_user_dropped, 1536 inst->id_sequence, 1); 1537 return 0; 1538 } 1539 1540 static const struct seq_operations nfqnl_seq_ops = { 1541 .start = seq_start, 1542 .next = seq_next, 1543 .stop = seq_stop, 1544 .show = seq_show, 1545 }; 1546 #endif /* PROC_FS */ 1547 1548 static int __net_init nfnl_queue_net_init(struct net *net) 1549 { 1550 unsigned int i; 1551 struct nfnl_queue_net *q = nfnl_queue_pernet(net); 1552 1553 for (i = 0; i < INSTANCE_BUCKETS; i++) 1554 INIT_HLIST_HEAD(&q->instance_table[i]); 1555 1556 spin_lock_init(&q->instances_lock); 1557 1558 #ifdef CONFIG_PROC_FS 1559 if (!proc_create_net("nfnetlink_queue", 0440, net->nf.proc_netfilter, 1560 &nfqnl_seq_ops, sizeof(struct iter_state))) 1561 return -ENOMEM; 1562 #endif 1563 return 0; 1564 } 1565 1566 static void __net_exit nfnl_queue_net_exit(struct net *net) 1567 { 1568 struct nfnl_queue_net *q = nfnl_queue_pernet(net); 1569 unsigned int i; 1570 1571 #ifdef CONFIG_PROC_FS 1572 remove_proc_entry("nfnetlink_queue", net->nf.proc_netfilter); 1573 #endif 1574 for (i = 0; i < INSTANCE_BUCKETS; i++) 1575 WARN_ON_ONCE(!hlist_empty(&q->instance_table[i])); 1576 } 1577 1578 static struct pernet_operations nfnl_queue_net_ops = { 1579 .init = nfnl_queue_net_init, 1580 .exit = nfnl_queue_net_exit, 1581 .id = &nfnl_queue_net_id, 1582 .size = sizeof(struct nfnl_queue_net), 1583 }; 1584 1585 static int __init nfnetlink_queue_init(void) 1586 { 1587 int status; 1588 1589 status = register_pernet_subsys(&nfnl_queue_net_ops); 1590 if (status < 0) { 1591 pr_err("failed to register pernet ops\n"); 1592 goto out; 1593 } 1594 1595 netlink_register_notifier(&nfqnl_rtnl_notifier); 1596 status = nfnetlink_subsys_register(&nfqnl_subsys); 1597 if (status < 0) { 1598 pr_err("failed to create netlink socket\n"); 1599 goto cleanup_netlink_notifier; 1600 } 1601 1602 status = register_netdevice_notifier(&nfqnl_dev_notifier); 1603 if (status < 0) { 1604 pr_err("failed to register netdevice notifier\n"); 1605 goto cleanup_netlink_subsys; 1606 } 1607 1608 nf_register_queue_handler(&nfqh); 1609 1610 return status; 1611 1612 cleanup_netlink_subsys: 1613 nfnetlink_subsys_unregister(&nfqnl_subsys); 1614 cleanup_netlink_notifier: 1615 netlink_unregister_notifier(&nfqnl_rtnl_notifier); 1616 unregister_pernet_subsys(&nfnl_queue_net_ops); 1617 out: 1618 return status; 1619 } 1620 1621 static void __exit nfnetlink_queue_fini(void) 1622 { 1623 nf_unregister_queue_handler(); 1624 unregister_netdevice_notifier(&nfqnl_dev_notifier); 1625 nfnetlink_subsys_unregister(&nfqnl_subsys); 1626 netlink_unregister_notifier(&nfqnl_rtnl_notifier); 1627 unregister_pernet_subsys(&nfnl_queue_net_ops); 1628 1629 rcu_barrier(); /* Wait for completion of call_rcu()'s */ 1630 } 1631 1632 MODULE_DESCRIPTION("netfilter packet queue handler"); 1633 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); 1634 MODULE_LICENSE("GPL"); 1635 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_QUEUE); 1636 1637 module_init(nfnetlink_queue_init); 1638 module_exit(nfnetlink_queue_fini); 1639