1 /* 2 * This is a module which is used for queueing packets and communicating with 3 * userspace via nfnetlink. 4 * 5 * (C) 2005 by Harald Welte <laforge@netfilter.org> 6 * (C) 2007 by Patrick McHardy <kaber@trash.net> 7 * 8 * Based on the old ipv4-only ip_queue.c: 9 * (C) 2000-2002 James Morris <jmorris@intercode.com.au> 10 * (C) 2003-2005 Netfilter Core Team <coreteam@netfilter.org> 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License version 2 as 14 * published by the Free Software Foundation. 15 * 16 */ 17 #include <linux/module.h> 18 #include <linux/skbuff.h> 19 #include <linux/init.h> 20 #include <linux/spinlock.h> 21 #include <linux/slab.h> 22 #include <linux/notifier.h> 23 #include <linux/netdevice.h> 24 #include <linux/netfilter.h> 25 #include <linux/proc_fs.h> 26 #include <linux/netfilter_ipv4.h> 27 #include <linux/netfilter_ipv6.h> 28 #include <linux/netfilter_bridge.h> 29 #include <linux/netfilter/nfnetlink.h> 30 #include <linux/netfilter/nfnetlink_queue.h> 31 #include <linux/netfilter/nf_conntrack_common.h> 32 #include <linux/list.h> 33 #include <net/sock.h> 34 #include <net/tcp_states.h> 35 #include <net/netfilter/nf_queue.h> 36 #include <net/netns/generic.h> 37 38 #include <linux/atomic.h> 39 40 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 41 #include "../bridge/br_private.h" 42 #endif 43 44 #define NFQNL_QMAX_DEFAULT 1024 45 46 /* We're using struct nlattr which has 16bit nla_len. Note that nla_len 47 * includes the header length. Thus, the maximum packet length that we 48 * support is 65531 bytes. We send truncated packets if the specified length 49 * is larger than that. Userspace can check for presence of NFQA_CAP_LEN 50 * attribute to detect truncation. 51 */ 52 #define NFQNL_MAX_COPY_RANGE (0xffff - NLA_HDRLEN) 53 54 struct nfqnl_instance { 55 struct hlist_node hlist; /* global list of queues */ 56 struct rcu_head rcu; 57 58 u32 peer_portid; 59 unsigned int queue_maxlen; 60 unsigned int copy_range; 61 unsigned int queue_dropped; 62 unsigned int queue_user_dropped; 63 64 65 u_int16_t queue_num; /* number of this queue */ 66 u_int8_t copy_mode; 67 u_int32_t flags; /* Set using NFQA_CFG_FLAGS */ 68 /* 69 * Following fields are dirtied for each queued packet, 70 * keep them in same cache line if possible. 71 */ 72 spinlock_t lock; 73 unsigned int queue_total; 74 unsigned int id_sequence; /* 'sequence' of pkt ids */ 75 struct list_head queue_list; /* packets in queue */ 76 }; 77 78 typedef int (*nfqnl_cmpfn)(struct nf_queue_entry *, unsigned long); 79 80 static int nfnl_queue_net_id __read_mostly; 81 82 #define INSTANCE_BUCKETS 16 83 struct nfnl_queue_net { 84 spinlock_t instances_lock; 85 struct hlist_head instance_table[INSTANCE_BUCKETS]; 86 }; 87 88 static struct nfnl_queue_net *nfnl_queue_pernet(struct net *net) 89 { 90 return net_generic(net, nfnl_queue_net_id); 91 } 92 93 static inline u_int8_t instance_hashfn(u_int16_t queue_num) 94 { 95 return ((queue_num >> 8) ^ queue_num) % INSTANCE_BUCKETS; 96 } 97 98 static struct nfqnl_instance * 99 instance_lookup(struct nfnl_queue_net *q, u_int16_t queue_num) 100 { 101 struct hlist_head *head; 102 struct nfqnl_instance *inst; 103 104 head = &q->instance_table[instance_hashfn(queue_num)]; 105 hlist_for_each_entry_rcu(inst, head, hlist) { 106 if (inst->queue_num == queue_num) 107 return inst; 108 } 109 return NULL; 110 } 111 112 static struct nfqnl_instance * 113 instance_create(struct nfnl_queue_net *q, u_int16_t queue_num, u32 portid) 114 { 115 struct nfqnl_instance *inst; 116 unsigned int h; 117 int err; 118 119 spin_lock(&q->instances_lock); 120 if (instance_lookup(q, queue_num)) { 121 err = -EEXIST; 122 goto out_unlock; 123 } 124 125 inst = kzalloc(sizeof(*inst), GFP_ATOMIC); 126 if (!inst) { 127 err = -ENOMEM; 128 goto out_unlock; 129 } 130 131 inst->queue_num = queue_num; 132 inst->peer_portid = portid; 133 inst->queue_maxlen = NFQNL_QMAX_DEFAULT; 134 inst->copy_range = NFQNL_MAX_COPY_RANGE; 135 inst->copy_mode = NFQNL_COPY_NONE; 136 spin_lock_init(&inst->lock); 137 INIT_LIST_HEAD(&inst->queue_list); 138 139 if (!try_module_get(THIS_MODULE)) { 140 err = -EAGAIN; 141 goto out_free; 142 } 143 144 h = instance_hashfn(queue_num); 145 hlist_add_head_rcu(&inst->hlist, &q->instance_table[h]); 146 147 spin_unlock(&q->instances_lock); 148 149 return inst; 150 151 out_free: 152 kfree(inst); 153 out_unlock: 154 spin_unlock(&q->instances_lock); 155 return ERR_PTR(err); 156 } 157 158 static void nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, 159 unsigned long data); 160 161 static void 162 instance_destroy_rcu(struct rcu_head *head) 163 { 164 struct nfqnl_instance *inst = container_of(head, struct nfqnl_instance, 165 rcu); 166 167 nfqnl_flush(inst, NULL, 0); 168 kfree(inst); 169 module_put(THIS_MODULE); 170 } 171 172 static void 173 __instance_destroy(struct nfqnl_instance *inst) 174 { 175 hlist_del_rcu(&inst->hlist); 176 call_rcu(&inst->rcu, instance_destroy_rcu); 177 } 178 179 static void 180 instance_destroy(struct nfnl_queue_net *q, struct nfqnl_instance *inst) 181 { 182 spin_lock(&q->instances_lock); 183 __instance_destroy(inst); 184 spin_unlock(&q->instances_lock); 185 } 186 187 static inline void 188 __enqueue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry) 189 { 190 list_add_tail(&entry->list, &queue->queue_list); 191 queue->queue_total++; 192 } 193 194 static void 195 __dequeue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry) 196 { 197 list_del(&entry->list); 198 queue->queue_total--; 199 } 200 201 static struct nf_queue_entry * 202 find_dequeue_entry(struct nfqnl_instance *queue, unsigned int id) 203 { 204 struct nf_queue_entry *entry = NULL, *i; 205 206 spin_lock_bh(&queue->lock); 207 208 list_for_each_entry(i, &queue->queue_list, list) { 209 if (i->id == id) { 210 entry = i; 211 break; 212 } 213 } 214 215 if (entry) 216 __dequeue_entry(queue, entry); 217 218 spin_unlock_bh(&queue->lock); 219 220 return entry; 221 } 222 223 static void 224 nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, unsigned long data) 225 { 226 struct nf_queue_entry *entry, *next; 227 228 spin_lock_bh(&queue->lock); 229 list_for_each_entry_safe(entry, next, &queue->queue_list, list) { 230 if (!cmpfn || cmpfn(entry, data)) { 231 list_del(&entry->list); 232 queue->queue_total--; 233 nf_reinject(entry, NF_DROP); 234 } 235 } 236 spin_unlock_bh(&queue->lock); 237 } 238 239 static int 240 nfqnl_put_packet_info(struct sk_buff *nlskb, struct sk_buff *packet, 241 bool csum_verify) 242 { 243 __u32 flags = 0; 244 245 if (packet->ip_summed == CHECKSUM_PARTIAL) 246 flags = NFQA_SKB_CSUMNOTREADY; 247 else if (csum_verify) 248 flags = NFQA_SKB_CSUM_NOTVERIFIED; 249 250 if (skb_is_gso(packet)) 251 flags |= NFQA_SKB_GSO; 252 253 return flags ? nla_put_be32(nlskb, NFQA_SKB_INFO, htonl(flags)) : 0; 254 } 255 256 static int nfqnl_put_sk_uidgid(struct sk_buff *skb, struct sock *sk) 257 { 258 const struct cred *cred; 259 260 if (!sk_fullsock(sk)) 261 return 0; 262 263 read_lock_bh(&sk->sk_callback_lock); 264 if (sk->sk_socket && sk->sk_socket->file) { 265 cred = sk->sk_socket->file->f_cred; 266 if (nla_put_be32(skb, NFQA_UID, 267 htonl(from_kuid_munged(&init_user_ns, cred->fsuid)))) 268 goto nla_put_failure; 269 if (nla_put_be32(skb, NFQA_GID, 270 htonl(from_kgid_munged(&init_user_ns, cred->fsgid)))) 271 goto nla_put_failure; 272 } 273 read_unlock_bh(&sk->sk_callback_lock); 274 return 0; 275 276 nla_put_failure: 277 read_unlock_bh(&sk->sk_callback_lock); 278 return -1; 279 } 280 281 static u32 nfqnl_get_sk_secctx(struct sk_buff *skb, char **secdata) 282 { 283 u32 seclen = 0; 284 #if IS_ENABLED(CONFIG_NETWORK_SECMARK) 285 if (!skb || !sk_fullsock(skb->sk)) 286 return 0; 287 288 read_lock_bh(&skb->sk->sk_callback_lock); 289 290 if (skb->secmark) 291 security_secid_to_secctx(skb->secmark, secdata, &seclen); 292 293 read_unlock_bh(&skb->sk->sk_callback_lock); 294 #endif 295 return seclen; 296 } 297 298 static struct sk_buff * 299 nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue, 300 struct nf_queue_entry *entry, 301 __be32 **packet_id_ptr) 302 { 303 size_t size; 304 size_t data_len = 0, cap_len = 0; 305 unsigned int hlen = 0; 306 struct sk_buff *skb; 307 struct nlattr *nla; 308 struct nfqnl_msg_packet_hdr *pmsg; 309 struct nlmsghdr *nlh; 310 struct nfgenmsg *nfmsg; 311 struct sk_buff *entskb = entry->skb; 312 struct net_device *indev; 313 struct net_device *outdev; 314 struct nf_conn *ct = NULL; 315 enum ip_conntrack_info uninitialized_var(ctinfo); 316 struct nfnl_ct_hook *nfnl_ct; 317 bool csum_verify; 318 char *secdata = NULL; 319 u32 seclen = 0; 320 321 size = nlmsg_total_size(sizeof(struct nfgenmsg)) 322 + nla_total_size(sizeof(struct nfqnl_msg_packet_hdr)) 323 + nla_total_size(sizeof(u_int32_t)) /* ifindex */ 324 + nla_total_size(sizeof(u_int32_t)) /* ifindex */ 325 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 326 + nla_total_size(sizeof(u_int32_t)) /* ifindex */ 327 + nla_total_size(sizeof(u_int32_t)) /* ifindex */ 328 #endif 329 + nla_total_size(sizeof(u_int32_t)) /* mark */ 330 + nla_total_size(sizeof(struct nfqnl_msg_packet_hw)) 331 + nla_total_size(sizeof(u_int32_t)) /* skbinfo */ 332 + nla_total_size(sizeof(u_int32_t)); /* cap_len */ 333 334 if (entskb->tstamp.tv64) 335 size += nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp)); 336 337 if (entry->state.hook <= NF_INET_FORWARD || 338 (entry->state.hook == NF_INET_POST_ROUTING && entskb->sk == NULL)) 339 csum_verify = !skb_csum_unnecessary(entskb); 340 else 341 csum_verify = false; 342 343 outdev = entry->state.out; 344 345 switch ((enum nfqnl_config_mode)ACCESS_ONCE(queue->copy_mode)) { 346 case NFQNL_COPY_META: 347 case NFQNL_COPY_NONE: 348 break; 349 350 case NFQNL_COPY_PACKET: 351 if (!(queue->flags & NFQA_CFG_F_GSO) && 352 entskb->ip_summed == CHECKSUM_PARTIAL && 353 skb_checksum_help(entskb)) 354 return NULL; 355 356 data_len = ACCESS_ONCE(queue->copy_range); 357 if (data_len > entskb->len) 358 data_len = entskb->len; 359 360 hlen = skb_zerocopy_headlen(entskb); 361 hlen = min_t(unsigned int, hlen, data_len); 362 size += sizeof(struct nlattr) + hlen; 363 cap_len = entskb->len; 364 break; 365 } 366 367 nfnl_ct = rcu_dereference(nfnl_ct_hook); 368 369 if (queue->flags & NFQA_CFG_F_CONNTRACK) { 370 if (nfnl_ct != NULL) { 371 ct = nfnl_ct->get_ct(entskb, &ctinfo); 372 if (ct != NULL) 373 size += nfnl_ct->build_size(ct); 374 } 375 } 376 377 if (queue->flags & NFQA_CFG_F_UID_GID) { 378 size += (nla_total_size(sizeof(u_int32_t)) /* uid */ 379 + nla_total_size(sizeof(u_int32_t))); /* gid */ 380 } 381 382 if ((queue->flags & NFQA_CFG_F_SECCTX) && entskb->sk) { 383 seclen = nfqnl_get_sk_secctx(entskb, &secdata); 384 if (seclen) 385 size += nla_total_size(seclen); 386 } 387 388 skb = alloc_skb(size, GFP_ATOMIC); 389 if (!skb) { 390 skb_tx_error(entskb); 391 return NULL; 392 } 393 394 nlh = nlmsg_put(skb, 0, 0, 395 NFNL_SUBSYS_QUEUE << 8 | NFQNL_MSG_PACKET, 396 sizeof(struct nfgenmsg), 0); 397 if (!nlh) { 398 skb_tx_error(entskb); 399 kfree_skb(skb); 400 return NULL; 401 } 402 nfmsg = nlmsg_data(nlh); 403 nfmsg->nfgen_family = entry->state.pf; 404 nfmsg->version = NFNETLINK_V0; 405 nfmsg->res_id = htons(queue->queue_num); 406 407 nla = __nla_reserve(skb, NFQA_PACKET_HDR, sizeof(*pmsg)); 408 pmsg = nla_data(nla); 409 pmsg->hw_protocol = entskb->protocol; 410 pmsg->hook = entry->state.hook; 411 *packet_id_ptr = &pmsg->packet_id; 412 413 indev = entry->state.in; 414 if (indev) { 415 #if !IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 416 if (nla_put_be32(skb, NFQA_IFINDEX_INDEV, htonl(indev->ifindex))) 417 goto nla_put_failure; 418 #else 419 if (entry->state.pf == PF_BRIDGE) { 420 /* Case 1: indev is physical input device, we need to 421 * look for bridge group (when called from 422 * netfilter_bridge) */ 423 if (nla_put_be32(skb, NFQA_IFINDEX_PHYSINDEV, 424 htonl(indev->ifindex)) || 425 /* this is the bridge group "brX" */ 426 /* rcu_read_lock()ed by __nf_queue */ 427 nla_put_be32(skb, NFQA_IFINDEX_INDEV, 428 htonl(br_port_get_rcu(indev)->br->dev->ifindex))) 429 goto nla_put_failure; 430 } else { 431 int physinif; 432 433 /* Case 2: indev is bridge group, we need to look for 434 * physical device (when called from ipv4) */ 435 if (nla_put_be32(skb, NFQA_IFINDEX_INDEV, 436 htonl(indev->ifindex))) 437 goto nla_put_failure; 438 439 physinif = nf_bridge_get_physinif(entskb); 440 if (physinif && 441 nla_put_be32(skb, NFQA_IFINDEX_PHYSINDEV, 442 htonl(physinif))) 443 goto nla_put_failure; 444 } 445 #endif 446 } 447 448 if (outdev) { 449 #if !IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 450 if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV, htonl(outdev->ifindex))) 451 goto nla_put_failure; 452 #else 453 if (entry->state.pf == PF_BRIDGE) { 454 /* Case 1: outdev is physical output device, we need to 455 * look for bridge group (when called from 456 * netfilter_bridge) */ 457 if (nla_put_be32(skb, NFQA_IFINDEX_PHYSOUTDEV, 458 htonl(outdev->ifindex)) || 459 /* this is the bridge group "brX" */ 460 /* rcu_read_lock()ed by __nf_queue */ 461 nla_put_be32(skb, NFQA_IFINDEX_OUTDEV, 462 htonl(br_port_get_rcu(outdev)->br->dev->ifindex))) 463 goto nla_put_failure; 464 } else { 465 int physoutif; 466 467 /* Case 2: outdev is bridge group, we need to look for 468 * physical output device (when called from ipv4) */ 469 if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV, 470 htonl(outdev->ifindex))) 471 goto nla_put_failure; 472 473 physoutif = nf_bridge_get_physoutif(entskb); 474 if (physoutif && 475 nla_put_be32(skb, NFQA_IFINDEX_PHYSOUTDEV, 476 htonl(physoutif))) 477 goto nla_put_failure; 478 } 479 #endif 480 } 481 482 if (entskb->mark && 483 nla_put_be32(skb, NFQA_MARK, htonl(entskb->mark))) 484 goto nla_put_failure; 485 486 if (indev && entskb->dev && 487 entskb->mac_header != entskb->network_header) { 488 struct nfqnl_msg_packet_hw phw; 489 int len; 490 491 memset(&phw, 0, sizeof(phw)); 492 len = dev_parse_header(entskb, phw.hw_addr); 493 if (len) { 494 phw.hw_addrlen = htons(len); 495 if (nla_put(skb, NFQA_HWADDR, sizeof(phw), &phw)) 496 goto nla_put_failure; 497 } 498 } 499 500 if (entskb->tstamp.tv64) { 501 struct nfqnl_msg_packet_timestamp ts; 502 struct timespec64 kts = ktime_to_timespec64(skb->tstamp); 503 504 ts.sec = cpu_to_be64(kts.tv_sec); 505 ts.usec = cpu_to_be64(kts.tv_nsec / NSEC_PER_USEC); 506 507 if (nla_put(skb, NFQA_TIMESTAMP, sizeof(ts), &ts)) 508 goto nla_put_failure; 509 } 510 511 if ((queue->flags & NFQA_CFG_F_UID_GID) && entskb->sk && 512 nfqnl_put_sk_uidgid(skb, entskb->sk) < 0) 513 goto nla_put_failure; 514 515 if (seclen && nla_put(skb, NFQA_SECCTX, seclen, secdata)) 516 goto nla_put_failure; 517 518 if (ct && nfnl_ct->build(skb, ct, ctinfo, NFQA_CT, NFQA_CT_INFO) < 0) 519 goto nla_put_failure; 520 521 if (cap_len > data_len && 522 nla_put_be32(skb, NFQA_CAP_LEN, htonl(cap_len))) 523 goto nla_put_failure; 524 525 if (nfqnl_put_packet_info(skb, entskb, csum_verify)) 526 goto nla_put_failure; 527 528 if (data_len) { 529 struct nlattr *nla; 530 531 if (skb_tailroom(skb) < sizeof(*nla) + hlen) 532 goto nla_put_failure; 533 534 nla = (struct nlattr *)skb_put(skb, sizeof(*nla)); 535 nla->nla_type = NFQA_PAYLOAD; 536 nla->nla_len = nla_attr_size(data_len); 537 538 if (skb_zerocopy(skb, entskb, data_len, hlen)) 539 goto nla_put_failure; 540 } 541 542 nlh->nlmsg_len = skb->len; 543 return skb; 544 545 nla_put_failure: 546 skb_tx_error(entskb); 547 kfree_skb(skb); 548 net_err_ratelimited("nf_queue: error creating packet message\n"); 549 return NULL; 550 } 551 552 static int 553 __nfqnl_enqueue_packet(struct net *net, struct nfqnl_instance *queue, 554 struct nf_queue_entry *entry) 555 { 556 struct sk_buff *nskb; 557 int err = -ENOBUFS; 558 __be32 *packet_id_ptr; 559 int failopen = 0; 560 561 nskb = nfqnl_build_packet_message(net, queue, entry, &packet_id_ptr); 562 if (nskb == NULL) { 563 err = -ENOMEM; 564 goto err_out; 565 } 566 spin_lock_bh(&queue->lock); 567 568 if (queue->queue_total >= queue->queue_maxlen) { 569 if (queue->flags & NFQA_CFG_F_FAIL_OPEN) { 570 failopen = 1; 571 err = 0; 572 } else { 573 queue->queue_dropped++; 574 net_warn_ratelimited("nf_queue: full at %d entries, dropping packets(s)\n", 575 queue->queue_total); 576 } 577 goto err_out_free_nskb; 578 } 579 entry->id = ++queue->id_sequence; 580 *packet_id_ptr = htonl(entry->id); 581 582 /* nfnetlink_unicast will either free the nskb or add it to a socket */ 583 err = nfnetlink_unicast(nskb, net, queue->peer_portid, MSG_DONTWAIT); 584 if (err < 0) { 585 if (queue->flags & NFQA_CFG_F_FAIL_OPEN) { 586 failopen = 1; 587 err = 0; 588 } else { 589 queue->queue_user_dropped++; 590 } 591 goto err_out_unlock; 592 } 593 594 __enqueue_entry(queue, entry); 595 596 spin_unlock_bh(&queue->lock); 597 return 0; 598 599 err_out_free_nskb: 600 kfree_skb(nskb); 601 err_out_unlock: 602 spin_unlock_bh(&queue->lock); 603 if (failopen) 604 nf_reinject(entry, NF_ACCEPT); 605 err_out: 606 return err; 607 } 608 609 static struct nf_queue_entry * 610 nf_queue_entry_dup(struct nf_queue_entry *e) 611 { 612 struct nf_queue_entry *entry = kmemdup(e, e->size, GFP_ATOMIC); 613 if (entry) 614 nf_queue_entry_get_refs(entry); 615 return entry; 616 } 617 618 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 619 /* When called from bridge netfilter, skb->data must point to MAC header 620 * before calling skb_gso_segment(). Else, original MAC header is lost 621 * and segmented skbs will be sent to wrong destination. 622 */ 623 static void nf_bridge_adjust_skb_data(struct sk_buff *skb) 624 { 625 if (skb->nf_bridge) 626 __skb_push(skb, skb->network_header - skb->mac_header); 627 } 628 629 static void nf_bridge_adjust_segmented_data(struct sk_buff *skb) 630 { 631 if (skb->nf_bridge) 632 __skb_pull(skb, skb->network_header - skb->mac_header); 633 } 634 #else 635 #define nf_bridge_adjust_skb_data(s) do {} while (0) 636 #define nf_bridge_adjust_segmented_data(s) do {} while (0) 637 #endif 638 639 static void free_entry(struct nf_queue_entry *entry) 640 { 641 nf_queue_entry_release_refs(entry); 642 kfree(entry); 643 } 644 645 static int 646 __nfqnl_enqueue_packet_gso(struct net *net, struct nfqnl_instance *queue, 647 struct sk_buff *skb, struct nf_queue_entry *entry) 648 { 649 int ret = -ENOMEM; 650 struct nf_queue_entry *entry_seg; 651 652 nf_bridge_adjust_segmented_data(skb); 653 654 if (skb->next == NULL) { /* last packet, no need to copy entry */ 655 struct sk_buff *gso_skb = entry->skb; 656 entry->skb = skb; 657 ret = __nfqnl_enqueue_packet(net, queue, entry); 658 if (ret) 659 entry->skb = gso_skb; 660 return ret; 661 } 662 663 skb->next = NULL; 664 665 entry_seg = nf_queue_entry_dup(entry); 666 if (entry_seg) { 667 entry_seg->skb = skb; 668 ret = __nfqnl_enqueue_packet(net, queue, entry_seg); 669 if (ret) 670 free_entry(entry_seg); 671 } 672 return ret; 673 } 674 675 static int 676 nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum) 677 { 678 unsigned int queued; 679 struct nfqnl_instance *queue; 680 struct sk_buff *skb, *segs; 681 int err = -ENOBUFS; 682 struct net *net = entry->state.net; 683 struct nfnl_queue_net *q = nfnl_queue_pernet(net); 684 685 /* rcu_read_lock()ed by nf_hook_slow() */ 686 queue = instance_lookup(q, queuenum); 687 if (!queue) 688 return -ESRCH; 689 690 if (queue->copy_mode == NFQNL_COPY_NONE) 691 return -EINVAL; 692 693 skb = entry->skb; 694 695 switch (entry->state.pf) { 696 case NFPROTO_IPV4: 697 skb->protocol = htons(ETH_P_IP); 698 break; 699 case NFPROTO_IPV6: 700 skb->protocol = htons(ETH_P_IPV6); 701 break; 702 } 703 704 if ((queue->flags & NFQA_CFG_F_GSO) || !skb_is_gso(skb)) 705 return __nfqnl_enqueue_packet(net, queue, entry); 706 707 nf_bridge_adjust_skb_data(skb); 708 segs = skb_gso_segment(skb, 0); 709 /* Does not use PTR_ERR to limit the number of error codes that can be 710 * returned by nf_queue. For instance, callers rely on -ESRCH to 711 * mean 'ignore this hook'. 712 */ 713 if (IS_ERR_OR_NULL(segs)) 714 goto out_err; 715 queued = 0; 716 err = 0; 717 do { 718 struct sk_buff *nskb = segs->next; 719 if (err == 0) 720 err = __nfqnl_enqueue_packet_gso(net, queue, 721 segs, entry); 722 if (err == 0) 723 queued++; 724 else 725 kfree_skb(segs); 726 segs = nskb; 727 } while (segs); 728 729 if (queued) { 730 if (err) /* some segments are already queued */ 731 free_entry(entry); 732 kfree_skb(skb); 733 return 0; 734 } 735 out_err: 736 nf_bridge_adjust_segmented_data(skb); 737 return err; 738 } 739 740 static int 741 nfqnl_mangle(void *data, int data_len, struct nf_queue_entry *e, int diff) 742 { 743 struct sk_buff *nskb; 744 745 if (diff < 0) { 746 if (pskb_trim(e->skb, data_len)) 747 return -ENOMEM; 748 } else if (diff > 0) { 749 if (data_len > 0xFFFF) 750 return -EINVAL; 751 if (diff > skb_tailroom(e->skb)) { 752 nskb = skb_copy_expand(e->skb, skb_headroom(e->skb), 753 diff, GFP_ATOMIC); 754 if (!nskb) { 755 printk(KERN_WARNING "nf_queue: OOM " 756 "in mangle, dropping packet\n"); 757 return -ENOMEM; 758 } 759 kfree_skb(e->skb); 760 e->skb = nskb; 761 } 762 skb_put(e->skb, diff); 763 } 764 if (!skb_make_writable(e->skb, data_len)) 765 return -ENOMEM; 766 skb_copy_to_linear_data(e->skb, data, data_len); 767 e->skb->ip_summed = CHECKSUM_NONE; 768 return 0; 769 } 770 771 static int 772 nfqnl_set_mode(struct nfqnl_instance *queue, 773 unsigned char mode, unsigned int range) 774 { 775 int status = 0; 776 777 spin_lock_bh(&queue->lock); 778 switch (mode) { 779 case NFQNL_COPY_NONE: 780 case NFQNL_COPY_META: 781 queue->copy_mode = mode; 782 queue->copy_range = 0; 783 break; 784 785 case NFQNL_COPY_PACKET: 786 queue->copy_mode = mode; 787 if (range == 0 || range > NFQNL_MAX_COPY_RANGE) 788 queue->copy_range = NFQNL_MAX_COPY_RANGE; 789 else 790 queue->copy_range = range; 791 break; 792 793 default: 794 status = -EINVAL; 795 796 } 797 spin_unlock_bh(&queue->lock); 798 799 return status; 800 } 801 802 static int 803 dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex) 804 { 805 if (entry->state.in) 806 if (entry->state.in->ifindex == ifindex) 807 return 1; 808 if (entry->state.out) 809 if (entry->state.out->ifindex == ifindex) 810 return 1; 811 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 812 if (entry->skb->nf_bridge) { 813 int physinif, physoutif; 814 815 physinif = nf_bridge_get_physinif(entry->skb); 816 physoutif = nf_bridge_get_physoutif(entry->skb); 817 818 if (physinif == ifindex || physoutif == ifindex) 819 return 1; 820 } 821 #endif 822 return 0; 823 } 824 825 /* drop all packets with either indev or outdev == ifindex from all queue 826 * instances */ 827 static void 828 nfqnl_dev_drop(struct net *net, int ifindex) 829 { 830 int i; 831 struct nfnl_queue_net *q = nfnl_queue_pernet(net); 832 833 rcu_read_lock(); 834 835 for (i = 0; i < INSTANCE_BUCKETS; i++) { 836 struct nfqnl_instance *inst; 837 struct hlist_head *head = &q->instance_table[i]; 838 839 hlist_for_each_entry_rcu(inst, head, hlist) 840 nfqnl_flush(inst, dev_cmp, ifindex); 841 } 842 843 rcu_read_unlock(); 844 } 845 846 static int 847 nfqnl_rcv_dev_event(struct notifier_block *this, 848 unsigned long event, void *ptr) 849 { 850 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 851 852 /* Drop any packets associated with the downed device */ 853 if (event == NETDEV_DOWN) 854 nfqnl_dev_drop(dev_net(dev), dev->ifindex); 855 return NOTIFY_DONE; 856 } 857 858 static struct notifier_block nfqnl_dev_notifier = { 859 .notifier_call = nfqnl_rcv_dev_event, 860 }; 861 862 static int nf_hook_cmp(struct nf_queue_entry *entry, unsigned long ops_ptr) 863 { 864 return entry->elem == (struct nf_hook_ops *)ops_ptr; 865 } 866 867 static void nfqnl_nf_hook_drop(struct net *net, struct nf_hook_ops *hook) 868 { 869 struct nfnl_queue_net *q = nfnl_queue_pernet(net); 870 int i; 871 872 rcu_read_lock(); 873 for (i = 0; i < INSTANCE_BUCKETS; i++) { 874 struct nfqnl_instance *inst; 875 struct hlist_head *head = &q->instance_table[i]; 876 877 hlist_for_each_entry_rcu(inst, head, hlist) 878 nfqnl_flush(inst, nf_hook_cmp, (unsigned long)hook); 879 } 880 rcu_read_unlock(); 881 } 882 883 static int 884 nfqnl_rcv_nl_event(struct notifier_block *this, 885 unsigned long event, void *ptr) 886 { 887 struct netlink_notify *n = ptr; 888 struct nfnl_queue_net *q = nfnl_queue_pernet(n->net); 889 890 if (event == NETLINK_URELEASE && n->protocol == NETLINK_NETFILTER) { 891 int i; 892 893 /* destroy all instances for this portid */ 894 spin_lock(&q->instances_lock); 895 for (i = 0; i < INSTANCE_BUCKETS; i++) { 896 struct hlist_node *t2; 897 struct nfqnl_instance *inst; 898 struct hlist_head *head = &q->instance_table[i]; 899 900 hlist_for_each_entry_safe(inst, t2, head, hlist) { 901 if (n->portid == inst->peer_portid) 902 __instance_destroy(inst); 903 } 904 } 905 spin_unlock(&q->instances_lock); 906 } 907 return NOTIFY_DONE; 908 } 909 910 static struct notifier_block nfqnl_rtnl_notifier = { 911 .notifier_call = nfqnl_rcv_nl_event, 912 }; 913 914 static const struct nla_policy nfqa_verdict_policy[NFQA_MAX+1] = { 915 [NFQA_VERDICT_HDR] = { .len = sizeof(struct nfqnl_msg_verdict_hdr) }, 916 [NFQA_MARK] = { .type = NLA_U32 }, 917 [NFQA_PAYLOAD] = { .type = NLA_UNSPEC }, 918 [NFQA_CT] = { .type = NLA_UNSPEC }, 919 [NFQA_EXP] = { .type = NLA_UNSPEC }, 920 }; 921 922 static const struct nla_policy nfqa_verdict_batch_policy[NFQA_MAX+1] = { 923 [NFQA_VERDICT_HDR] = { .len = sizeof(struct nfqnl_msg_verdict_hdr) }, 924 [NFQA_MARK] = { .type = NLA_U32 }, 925 }; 926 927 static struct nfqnl_instance * 928 verdict_instance_lookup(struct nfnl_queue_net *q, u16 queue_num, u32 nlportid) 929 { 930 struct nfqnl_instance *queue; 931 932 queue = instance_lookup(q, queue_num); 933 if (!queue) 934 return ERR_PTR(-ENODEV); 935 936 if (queue->peer_portid != nlportid) 937 return ERR_PTR(-EPERM); 938 939 return queue; 940 } 941 942 static struct nfqnl_msg_verdict_hdr* 943 verdicthdr_get(const struct nlattr * const nfqa[]) 944 { 945 struct nfqnl_msg_verdict_hdr *vhdr; 946 unsigned int verdict; 947 948 if (!nfqa[NFQA_VERDICT_HDR]) 949 return NULL; 950 951 vhdr = nla_data(nfqa[NFQA_VERDICT_HDR]); 952 verdict = ntohl(vhdr->verdict) & NF_VERDICT_MASK; 953 if (verdict > NF_MAX_VERDICT || verdict == NF_STOLEN) 954 return NULL; 955 return vhdr; 956 } 957 958 static int nfq_id_after(unsigned int id, unsigned int max) 959 { 960 return (int)(id - max) > 0; 961 } 962 963 static int nfqnl_recv_verdict_batch(struct net *net, struct sock *ctnl, 964 struct sk_buff *skb, 965 const struct nlmsghdr *nlh, 966 const struct nlattr * const nfqa[]) 967 { 968 struct nfgenmsg *nfmsg = nlmsg_data(nlh); 969 struct nf_queue_entry *entry, *tmp; 970 unsigned int verdict, maxid; 971 struct nfqnl_msg_verdict_hdr *vhdr; 972 struct nfqnl_instance *queue; 973 LIST_HEAD(batch_list); 974 u16 queue_num = ntohs(nfmsg->res_id); 975 struct nfnl_queue_net *q = nfnl_queue_pernet(net); 976 977 queue = verdict_instance_lookup(q, queue_num, 978 NETLINK_CB(skb).portid); 979 if (IS_ERR(queue)) 980 return PTR_ERR(queue); 981 982 vhdr = verdicthdr_get(nfqa); 983 if (!vhdr) 984 return -EINVAL; 985 986 verdict = ntohl(vhdr->verdict); 987 maxid = ntohl(vhdr->id); 988 989 spin_lock_bh(&queue->lock); 990 991 list_for_each_entry_safe(entry, tmp, &queue->queue_list, list) { 992 if (nfq_id_after(entry->id, maxid)) 993 break; 994 __dequeue_entry(queue, entry); 995 list_add_tail(&entry->list, &batch_list); 996 } 997 998 spin_unlock_bh(&queue->lock); 999 1000 if (list_empty(&batch_list)) 1001 return -ENOENT; 1002 1003 list_for_each_entry_safe(entry, tmp, &batch_list, list) { 1004 if (nfqa[NFQA_MARK]) 1005 entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK])); 1006 nf_reinject(entry, verdict); 1007 } 1008 return 0; 1009 } 1010 1011 static struct nf_conn *nfqnl_ct_parse(struct nfnl_ct_hook *nfnl_ct, 1012 const struct nlmsghdr *nlh, 1013 const struct nlattr * const nfqa[], 1014 struct nf_queue_entry *entry, 1015 enum ip_conntrack_info *ctinfo) 1016 { 1017 struct nf_conn *ct; 1018 1019 ct = nfnl_ct->get_ct(entry->skb, ctinfo); 1020 if (ct == NULL) 1021 return NULL; 1022 1023 if (nfnl_ct->parse(nfqa[NFQA_CT], ct) < 0) 1024 return NULL; 1025 1026 if (nfqa[NFQA_EXP]) 1027 nfnl_ct->attach_expect(nfqa[NFQA_EXP], ct, 1028 NETLINK_CB(entry->skb).portid, 1029 nlmsg_report(nlh)); 1030 return ct; 1031 } 1032 1033 static int nfqnl_recv_verdict(struct net *net, struct sock *ctnl, 1034 struct sk_buff *skb, 1035 const struct nlmsghdr *nlh, 1036 const struct nlattr * const nfqa[]) 1037 { 1038 struct nfgenmsg *nfmsg = nlmsg_data(nlh); 1039 u_int16_t queue_num = ntohs(nfmsg->res_id); 1040 struct nfqnl_msg_verdict_hdr *vhdr; 1041 struct nfqnl_instance *queue; 1042 unsigned int verdict; 1043 struct nf_queue_entry *entry; 1044 enum ip_conntrack_info uninitialized_var(ctinfo); 1045 struct nfnl_ct_hook *nfnl_ct; 1046 struct nf_conn *ct = NULL; 1047 struct nfnl_queue_net *q = nfnl_queue_pernet(net); 1048 1049 queue = instance_lookup(q, queue_num); 1050 if (!queue) 1051 queue = verdict_instance_lookup(q, queue_num, 1052 NETLINK_CB(skb).portid); 1053 if (IS_ERR(queue)) 1054 return PTR_ERR(queue); 1055 1056 vhdr = verdicthdr_get(nfqa); 1057 if (!vhdr) 1058 return -EINVAL; 1059 1060 verdict = ntohl(vhdr->verdict); 1061 1062 entry = find_dequeue_entry(queue, ntohl(vhdr->id)); 1063 if (entry == NULL) 1064 return -ENOENT; 1065 1066 /* rcu lock already held from nfnl->call_rcu. */ 1067 nfnl_ct = rcu_dereference(nfnl_ct_hook); 1068 1069 if (nfqa[NFQA_CT]) { 1070 if (nfnl_ct != NULL) 1071 ct = nfqnl_ct_parse(nfnl_ct, nlh, nfqa, entry, &ctinfo); 1072 } 1073 1074 if (nfqa[NFQA_PAYLOAD]) { 1075 u16 payload_len = nla_len(nfqa[NFQA_PAYLOAD]); 1076 int diff = payload_len - entry->skb->len; 1077 1078 if (nfqnl_mangle(nla_data(nfqa[NFQA_PAYLOAD]), 1079 payload_len, entry, diff) < 0) 1080 verdict = NF_DROP; 1081 1082 if (ct && diff) 1083 nfnl_ct->seq_adjust(entry->skb, ct, ctinfo, diff); 1084 } 1085 1086 if (nfqa[NFQA_MARK]) 1087 entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK])); 1088 1089 nf_reinject(entry, verdict); 1090 return 0; 1091 } 1092 1093 static int nfqnl_recv_unsupp(struct net *net, struct sock *ctnl, 1094 struct sk_buff *skb, const struct nlmsghdr *nlh, 1095 const struct nlattr * const nfqa[]) 1096 { 1097 return -ENOTSUPP; 1098 } 1099 1100 static const struct nla_policy nfqa_cfg_policy[NFQA_CFG_MAX+1] = { 1101 [NFQA_CFG_CMD] = { .len = sizeof(struct nfqnl_msg_config_cmd) }, 1102 [NFQA_CFG_PARAMS] = { .len = sizeof(struct nfqnl_msg_config_params) }, 1103 }; 1104 1105 static const struct nf_queue_handler nfqh = { 1106 .outfn = &nfqnl_enqueue_packet, 1107 .nf_hook_drop = &nfqnl_nf_hook_drop, 1108 }; 1109 1110 static int nfqnl_recv_config(struct net *net, struct sock *ctnl, 1111 struct sk_buff *skb, const struct nlmsghdr *nlh, 1112 const struct nlattr * const nfqa[]) 1113 { 1114 struct nfgenmsg *nfmsg = nlmsg_data(nlh); 1115 u_int16_t queue_num = ntohs(nfmsg->res_id); 1116 struct nfqnl_instance *queue; 1117 struct nfqnl_msg_config_cmd *cmd = NULL; 1118 struct nfnl_queue_net *q = nfnl_queue_pernet(net); 1119 __u32 flags = 0, mask = 0; 1120 int ret = 0; 1121 1122 if (nfqa[NFQA_CFG_CMD]) { 1123 cmd = nla_data(nfqa[NFQA_CFG_CMD]); 1124 1125 /* Obsolete commands without queue context */ 1126 switch (cmd->command) { 1127 case NFQNL_CFG_CMD_PF_BIND: return 0; 1128 case NFQNL_CFG_CMD_PF_UNBIND: return 0; 1129 } 1130 } 1131 1132 /* Check if we support these flags in first place, dependencies should 1133 * be there too not to break atomicity. 1134 */ 1135 if (nfqa[NFQA_CFG_FLAGS]) { 1136 if (!nfqa[NFQA_CFG_MASK]) { 1137 /* A mask is needed to specify which flags are being 1138 * changed. 1139 */ 1140 return -EINVAL; 1141 } 1142 1143 flags = ntohl(nla_get_be32(nfqa[NFQA_CFG_FLAGS])); 1144 mask = ntohl(nla_get_be32(nfqa[NFQA_CFG_MASK])); 1145 1146 if (flags >= NFQA_CFG_F_MAX) 1147 return -EOPNOTSUPP; 1148 1149 #if !IS_ENABLED(CONFIG_NETWORK_SECMARK) 1150 if (flags & mask & NFQA_CFG_F_SECCTX) 1151 return -EOPNOTSUPP; 1152 #endif 1153 if ((flags & mask & NFQA_CFG_F_CONNTRACK) && 1154 !rcu_access_pointer(nfnl_ct_hook)) { 1155 #ifdef CONFIG_MODULES 1156 nfnl_unlock(NFNL_SUBSYS_QUEUE); 1157 request_module("ip_conntrack_netlink"); 1158 nfnl_lock(NFNL_SUBSYS_QUEUE); 1159 if (rcu_access_pointer(nfnl_ct_hook)) 1160 return -EAGAIN; 1161 #endif 1162 return -EOPNOTSUPP; 1163 } 1164 } 1165 1166 rcu_read_lock(); 1167 queue = instance_lookup(q, queue_num); 1168 if (queue && queue->peer_portid != NETLINK_CB(skb).portid) { 1169 ret = -EPERM; 1170 goto err_out_unlock; 1171 } 1172 1173 if (cmd != NULL) { 1174 switch (cmd->command) { 1175 case NFQNL_CFG_CMD_BIND: 1176 if (queue) { 1177 ret = -EBUSY; 1178 goto err_out_unlock; 1179 } 1180 queue = instance_create(q, queue_num, 1181 NETLINK_CB(skb).portid); 1182 if (IS_ERR(queue)) { 1183 ret = PTR_ERR(queue); 1184 goto err_out_unlock; 1185 } 1186 break; 1187 case NFQNL_CFG_CMD_UNBIND: 1188 if (!queue) { 1189 ret = -ENODEV; 1190 goto err_out_unlock; 1191 } 1192 instance_destroy(q, queue); 1193 goto err_out_unlock; 1194 case NFQNL_CFG_CMD_PF_BIND: 1195 case NFQNL_CFG_CMD_PF_UNBIND: 1196 break; 1197 default: 1198 ret = -ENOTSUPP; 1199 goto err_out_unlock; 1200 } 1201 } 1202 1203 if (!queue) { 1204 ret = -ENODEV; 1205 goto err_out_unlock; 1206 } 1207 1208 if (nfqa[NFQA_CFG_PARAMS]) { 1209 struct nfqnl_msg_config_params *params = 1210 nla_data(nfqa[NFQA_CFG_PARAMS]); 1211 1212 nfqnl_set_mode(queue, params->copy_mode, 1213 ntohl(params->copy_range)); 1214 } 1215 1216 if (nfqa[NFQA_CFG_QUEUE_MAXLEN]) { 1217 __be32 *queue_maxlen = nla_data(nfqa[NFQA_CFG_QUEUE_MAXLEN]); 1218 1219 spin_lock_bh(&queue->lock); 1220 queue->queue_maxlen = ntohl(*queue_maxlen); 1221 spin_unlock_bh(&queue->lock); 1222 } 1223 1224 if (nfqa[NFQA_CFG_FLAGS]) { 1225 spin_lock_bh(&queue->lock); 1226 queue->flags &= ~mask; 1227 queue->flags |= flags & mask; 1228 spin_unlock_bh(&queue->lock); 1229 } 1230 1231 err_out_unlock: 1232 rcu_read_unlock(); 1233 return ret; 1234 } 1235 1236 static const struct nfnl_callback nfqnl_cb[NFQNL_MSG_MAX] = { 1237 [NFQNL_MSG_PACKET] = { .call_rcu = nfqnl_recv_unsupp, 1238 .attr_count = NFQA_MAX, }, 1239 [NFQNL_MSG_VERDICT] = { .call_rcu = nfqnl_recv_verdict, 1240 .attr_count = NFQA_MAX, 1241 .policy = nfqa_verdict_policy }, 1242 [NFQNL_MSG_CONFIG] = { .call = nfqnl_recv_config, 1243 .attr_count = NFQA_CFG_MAX, 1244 .policy = nfqa_cfg_policy }, 1245 [NFQNL_MSG_VERDICT_BATCH]={ .call_rcu = nfqnl_recv_verdict_batch, 1246 .attr_count = NFQA_MAX, 1247 .policy = nfqa_verdict_batch_policy }, 1248 }; 1249 1250 static const struct nfnetlink_subsystem nfqnl_subsys = { 1251 .name = "nf_queue", 1252 .subsys_id = NFNL_SUBSYS_QUEUE, 1253 .cb_count = NFQNL_MSG_MAX, 1254 .cb = nfqnl_cb, 1255 }; 1256 1257 #ifdef CONFIG_PROC_FS 1258 struct iter_state { 1259 struct seq_net_private p; 1260 unsigned int bucket; 1261 }; 1262 1263 static struct hlist_node *get_first(struct seq_file *seq) 1264 { 1265 struct iter_state *st = seq->private; 1266 struct net *net; 1267 struct nfnl_queue_net *q; 1268 1269 if (!st) 1270 return NULL; 1271 1272 net = seq_file_net(seq); 1273 q = nfnl_queue_pernet(net); 1274 for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) { 1275 if (!hlist_empty(&q->instance_table[st->bucket])) 1276 return q->instance_table[st->bucket].first; 1277 } 1278 return NULL; 1279 } 1280 1281 static struct hlist_node *get_next(struct seq_file *seq, struct hlist_node *h) 1282 { 1283 struct iter_state *st = seq->private; 1284 struct net *net = seq_file_net(seq); 1285 1286 h = h->next; 1287 while (!h) { 1288 struct nfnl_queue_net *q; 1289 1290 if (++st->bucket >= INSTANCE_BUCKETS) 1291 return NULL; 1292 1293 q = nfnl_queue_pernet(net); 1294 h = q->instance_table[st->bucket].first; 1295 } 1296 return h; 1297 } 1298 1299 static struct hlist_node *get_idx(struct seq_file *seq, loff_t pos) 1300 { 1301 struct hlist_node *head; 1302 head = get_first(seq); 1303 1304 if (head) 1305 while (pos && (head = get_next(seq, head))) 1306 pos--; 1307 return pos ? NULL : head; 1308 } 1309 1310 static void *seq_start(struct seq_file *s, loff_t *pos) 1311 __acquires(nfnl_queue_pernet(seq_file_net(s))->instances_lock) 1312 { 1313 spin_lock(&nfnl_queue_pernet(seq_file_net(s))->instances_lock); 1314 return get_idx(s, *pos); 1315 } 1316 1317 static void *seq_next(struct seq_file *s, void *v, loff_t *pos) 1318 { 1319 (*pos)++; 1320 return get_next(s, v); 1321 } 1322 1323 static void seq_stop(struct seq_file *s, void *v) 1324 __releases(nfnl_queue_pernet(seq_file_net(s))->instances_lock) 1325 { 1326 spin_unlock(&nfnl_queue_pernet(seq_file_net(s))->instances_lock); 1327 } 1328 1329 static int seq_show(struct seq_file *s, void *v) 1330 { 1331 const struct nfqnl_instance *inst = v; 1332 1333 seq_printf(s, "%5u %6u %5u %1u %5u %5u %5u %8u %2d\n", 1334 inst->queue_num, 1335 inst->peer_portid, inst->queue_total, 1336 inst->copy_mode, inst->copy_range, 1337 inst->queue_dropped, inst->queue_user_dropped, 1338 inst->id_sequence, 1); 1339 return 0; 1340 } 1341 1342 static const struct seq_operations nfqnl_seq_ops = { 1343 .start = seq_start, 1344 .next = seq_next, 1345 .stop = seq_stop, 1346 .show = seq_show, 1347 }; 1348 1349 static int nfqnl_open(struct inode *inode, struct file *file) 1350 { 1351 return seq_open_net(inode, file, &nfqnl_seq_ops, 1352 sizeof(struct iter_state)); 1353 } 1354 1355 static const struct file_operations nfqnl_file_ops = { 1356 .owner = THIS_MODULE, 1357 .open = nfqnl_open, 1358 .read = seq_read, 1359 .llseek = seq_lseek, 1360 .release = seq_release_net, 1361 }; 1362 1363 #endif /* PROC_FS */ 1364 1365 static int __net_init nfnl_queue_net_init(struct net *net) 1366 { 1367 unsigned int i; 1368 struct nfnl_queue_net *q = nfnl_queue_pernet(net); 1369 1370 for (i = 0; i < INSTANCE_BUCKETS; i++) 1371 INIT_HLIST_HEAD(&q->instance_table[i]); 1372 1373 spin_lock_init(&q->instances_lock); 1374 1375 #ifdef CONFIG_PROC_FS 1376 if (!proc_create("nfnetlink_queue", 0440, 1377 net->nf.proc_netfilter, &nfqnl_file_ops)) 1378 return -ENOMEM; 1379 #endif 1380 return 0; 1381 } 1382 1383 static void __net_exit nfnl_queue_net_exit(struct net *net) 1384 { 1385 #ifdef CONFIG_PROC_FS 1386 remove_proc_entry("nfnetlink_queue", net->nf.proc_netfilter); 1387 #endif 1388 } 1389 1390 static struct pernet_operations nfnl_queue_net_ops = { 1391 .init = nfnl_queue_net_init, 1392 .exit = nfnl_queue_net_exit, 1393 .id = &nfnl_queue_net_id, 1394 .size = sizeof(struct nfnl_queue_net), 1395 }; 1396 1397 static int __init nfnetlink_queue_init(void) 1398 { 1399 int status; 1400 1401 status = register_pernet_subsys(&nfnl_queue_net_ops); 1402 if (status < 0) { 1403 pr_err("nf_queue: failed to register pernet ops\n"); 1404 goto out; 1405 } 1406 1407 netlink_register_notifier(&nfqnl_rtnl_notifier); 1408 status = nfnetlink_subsys_register(&nfqnl_subsys); 1409 if (status < 0) { 1410 pr_err("nf_queue: failed to create netlink socket\n"); 1411 goto cleanup_netlink_notifier; 1412 } 1413 1414 register_netdevice_notifier(&nfqnl_dev_notifier); 1415 nf_register_queue_handler(&nfqh); 1416 return status; 1417 1418 cleanup_netlink_notifier: 1419 netlink_unregister_notifier(&nfqnl_rtnl_notifier); 1420 unregister_pernet_subsys(&nfnl_queue_net_ops); 1421 out: 1422 return status; 1423 } 1424 1425 static void __exit nfnetlink_queue_fini(void) 1426 { 1427 nf_unregister_queue_handler(); 1428 unregister_netdevice_notifier(&nfqnl_dev_notifier); 1429 nfnetlink_subsys_unregister(&nfqnl_subsys); 1430 netlink_unregister_notifier(&nfqnl_rtnl_notifier); 1431 unregister_pernet_subsys(&nfnl_queue_net_ops); 1432 1433 rcu_barrier(); /* Wait for completion of call_rcu()'s */ 1434 } 1435 1436 MODULE_DESCRIPTION("netfilter packet queue handler"); 1437 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); 1438 MODULE_LICENSE("GPL"); 1439 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_QUEUE); 1440 1441 module_init(nfnetlink_queue_init); 1442 module_exit(nfnetlink_queue_fini); 1443