1 /* Netfilter messages via netlink socket. Allows for user space 2 * protocol helpers and general trouble making from userspace. 3 * 4 * (C) 2001 by Jay Schulist <jschlst@samba.org>, 5 * (C) 2002-2005 by Harald Welte <laforge@gnumonks.org> 6 * (C) 2005-2017 by Pablo Neira Ayuso <pablo@netfilter.org> 7 * 8 * Initial netfilter messages via netlink development funded and 9 * generally made possible by Network Robots, Inc. (www.networkrobots.com) 10 * 11 * Further development of this code funded by Astaro AG (http://www.astaro.com) 12 * 13 * This software may be used and distributed according to the terms 14 * of the GNU General Public License, incorporated herein by reference. 15 */ 16 17 #include <linux/module.h> 18 #include <linux/types.h> 19 #include <linux/socket.h> 20 #include <linux/kernel.h> 21 #include <linux/string.h> 22 #include <linux/sockios.h> 23 #include <linux/net.h> 24 #include <linux/skbuff.h> 25 #include <linux/uaccess.h> 26 #include <net/sock.h> 27 #include <linux/init.h> 28 #include <linux/sched/signal.h> 29 30 #include <net/netlink.h> 31 #include <net/netns/generic.h> 32 #include <linux/netfilter/nfnetlink.h> 33 34 MODULE_LICENSE("GPL"); 35 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); 36 MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_NETFILTER); 37 MODULE_DESCRIPTION("Netfilter messages via netlink socket"); 38 39 #define nfnl_dereference_protected(id) \ 40 rcu_dereference_protected(table[(id)].subsys, \ 41 lockdep_nfnl_is_held((id))) 42 43 #define NFNL_MAX_ATTR_COUNT 32 44 45 static unsigned int nfnetlink_pernet_id __read_mostly; 46 47 #ifdef CONFIG_NF_CONNTRACK_EVENTS 48 static DEFINE_SPINLOCK(nfnl_grp_active_lock); 49 #endif 50 51 struct nfnl_net { 52 struct sock *nfnl; 53 }; 54 55 static struct { 56 struct mutex mutex; 57 const struct nfnetlink_subsystem __rcu *subsys; 58 } table[NFNL_SUBSYS_COUNT]; 59 60 static struct lock_class_key nfnl_lockdep_keys[NFNL_SUBSYS_COUNT]; 61 62 static const char *const nfnl_lockdep_names[NFNL_SUBSYS_COUNT] = { 63 [NFNL_SUBSYS_NONE] = "nfnl_subsys_none", 64 [NFNL_SUBSYS_CTNETLINK] = "nfnl_subsys_ctnetlink", 65 [NFNL_SUBSYS_CTNETLINK_EXP] = "nfnl_subsys_ctnetlink_exp", 66 [NFNL_SUBSYS_QUEUE] = "nfnl_subsys_queue", 67 [NFNL_SUBSYS_ULOG] = "nfnl_subsys_ulog", 68 [NFNL_SUBSYS_OSF] = "nfnl_subsys_osf", 69 [NFNL_SUBSYS_IPSET] = "nfnl_subsys_ipset", 70 [NFNL_SUBSYS_ACCT] = "nfnl_subsys_acct", 71 [NFNL_SUBSYS_CTNETLINK_TIMEOUT] = "nfnl_subsys_cttimeout", 72 [NFNL_SUBSYS_CTHELPER] = "nfnl_subsys_cthelper", 73 [NFNL_SUBSYS_NFTABLES] = "nfnl_subsys_nftables", 74 [NFNL_SUBSYS_NFT_COMPAT] = "nfnl_subsys_nftcompat", 75 [NFNL_SUBSYS_HOOK] = "nfnl_subsys_hook", 76 }; 77 78 static const int nfnl_group2type[NFNLGRP_MAX+1] = { 79 [NFNLGRP_CONNTRACK_NEW] = NFNL_SUBSYS_CTNETLINK, 80 [NFNLGRP_CONNTRACK_UPDATE] = NFNL_SUBSYS_CTNETLINK, 81 [NFNLGRP_CONNTRACK_DESTROY] = NFNL_SUBSYS_CTNETLINK, 82 [NFNLGRP_CONNTRACK_EXP_NEW] = NFNL_SUBSYS_CTNETLINK_EXP, 83 [NFNLGRP_CONNTRACK_EXP_UPDATE] = NFNL_SUBSYS_CTNETLINK_EXP, 84 [NFNLGRP_CONNTRACK_EXP_DESTROY] = NFNL_SUBSYS_CTNETLINK_EXP, 85 [NFNLGRP_NFTABLES] = NFNL_SUBSYS_NFTABLES, 86 [NFNLGRP_ACCT_QUOTA] = NFNL_SUBSYS_ACCT, 87 [NFNLGRP_NFTRACE] = NFNL_SUBSYS_NFTABLES, 88 }; 89 90 static struct nfnl_net *nfnl_pernet(struct net *net) 91 { 92 return net_generic(net, nfnetlink_pernet_id); 93 } 94 95 void nfnl_lock(__u8 subsys_id) 96 { 97 mutex_lock(&table[subsys_id].mutex); 98 } 99 EXPORT_SYMBOL_GPL(nfnl_lock); 100 101 void nfnl_unlock(__u8 subsys_id) 102 { 103 mutex_unlock(&table[subsys_id].mutex); 104 } 105 EXPORT_SYMBOL_GPL(nfnl_unlock); 106 107 #ifdef CONFIG_PROVE_LOCKING 108 bool lockdep_nfnl_is_held(u8 subsys_id) 109 { 110 return lockdep_is_held(&table[subsys_id].mutex); 111 } 112 EXPORT_SYMBOL_GPL(lockdep_nfnl_is_held); 113 #endif 114 115 int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n) 116 { 117 u8 cb_id; 118 119 /* Sanity-check attr_count size to avoid stack buffer overflow. */ 120 for (cb_id = 0; cb_id < n->cb_count; cb_id++) 121 if (WARN_ON(n->cb[cb_id].attr_count > NFNL_MAX_ATTR_COUNT)) 122 return -EINVAL; 123 124 nfnl_lock(n->subsys_id); 125 if (table[n->subsys_id].subsys) { 126 nfnl_unlock(n->subsys_id); 127 return -EBUSY; 128 } 129 rcu_assign_pointer(table[n->subsys_id].subsys, n); 130 nfnl_unlock(n->subsys_id); 131 132 return 0; 133 } 134 EXPORT_SYMBOL_GPL(nfnetlink_subsys_register); 135 136 int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n) 137 { 138 nfnl_lock(n->subsys_id); 139 table[n->subsys_id].subsys = NULL; 140 nfnl_unlock(n->subsys_id); 141 synchronize_rcu(); 142 return 0; 143 } 144 EXPORT_SYMBOL_GPL(nfnetlink_subsys_unregister); 145 146 static inline const struct nfnetlink_subsystem *nfnetlink_get_subsys(u16 type) 147 { 148 u8 subsys_id = NFNL_SUBSYS_ID(type); 149 150 if (subsys_id >= NFNL_SUBSYS_COUNT) 151 return NULL; 152 153 return rcu_dereference(table[subsys_id].subsys); 154 } 155 156 static inline const struct nfnl_callback * 157 nfnetlink_find_client(u16 type, const struct nfnetlink_subsystem *ss) 158 { 159 u8 cb_id = NFNL_MSG_TYPE(type); 160 161 if (cb_id >= ss->cb_count) 162 return NULL; 163 164 return &ss->cb[cb_id]; 165 } 166 167 int nfnetlink_has_listeners(struct net *net, unsigned int group) 168 { 169 struct nfnl_net *nfnlnet = nfnl_pernet(net); 170 171 return netlink_has_listeners(nfnlnet->nfnl, group); 172 } 173 EXPORT_SYMBOL_GPL(nfnetlink_has_listeners); 174 175 int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 portid, 176 unsigned int group, int echo, gfp_t flags) 177 { 178 struct nfnl_net *nfnlnet = nfnl_pernet(net); 179 180 return nlmsg_notify(nfnlnet->nfnl, skb, portid, group, echo, flags); 181 } 182 EXPORT_SYMBOL_GPL(nfnetlink_send); 183 184 int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error) 185 { 186 struct nfnl_net *nfnlnet = nfnl_pernet(net); 187 188 return netlink_set_err(nfnlnet->nfnl, portid, group, error); 189 } 190 EXPORT_SYMBOL_GPL(nfnetlink_set_err); 191 192 int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid) 193 { 194 struct nfnl_net *nfnlnet = nfnl_pernet(net); 195 int err; 196 197 err = nlmsg_unicast(nfnlnet->nfnl, skb, portid); 198 if (err == -EAGAIN) 199 err = -ENOBUFS; 200 201 return err; 202 } 203 EXPORT_SYMBOL_GPL(nfnetlink_unicast); 204 205 void nfnetlink_broadcast(struct net *net, struct sk_buff *skb, __u32 portid, 206 __u32 group, gfp_t allocation) 207 { 208 struct nfnl_net *nfnlnet = nfnl_pernet(net); 209 210 netlink_broadcast(nfnlnet->nfnl, skb, portid, group, allocation); 211 } 212 EXPORT_SYMBOL_GPL(nfnetlink_broadcast); 213 214 /* Process one complete nfnetlink message. */ 215 static int nfnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, 216 struct netlink_ext_ack *extack) 217 { 218 struct net *net = sock_net(skb->sk); 219 const struct nfnl_callback *nc; 220 const struct nfnetlink_subsystem *ss; 221 int type, err; 222 223 /* All the messages must at least contain nfgenmsg */ 224 if (nlmsg_len(nlh) < sizeof(struct nfgenmsg)) 225 return 0; 226 227 type = nlh->nlmsg_type; 228 replay: 229 rcu_read_lock(); 230 231 ss = nfnetlink_get_subsys(type); 232 if (!ss) { 233 #ifdef CONFIG_MODULES 234 rcu_read_unlock(); 235 request_module("nfnetlink-subsys-%d", NFNL_SUBSYS_ID(type)); 236 rcu_read_lock(); 237 ss = nfnetlink_get_subsys(type); 238 if (!ss) 239 #endif 240 { 241 rcu_read_unlock(); 242 return -EINVAL; 243 } 244 } 245 246 nc = nfnetlink_find_client(type, ss); 247 if (!nc) { 248 rcu_read_unlock(); 249 return -EINVAL; 250 } 251 252 { 253 int min_len = nlmsg_total_size(sizeof(struct nfgenmsg)); 254 struct nfnl_net *nfnlnet = nfnl_pernet(net); 255 u8 cb_id = NFNL_MSG_TYPE(nlh->nlmsg_type); 256 struct nlattr *cda[NFNL_MAX_ATTR_COUNT + 1]; 257 struct nlattr *attr = (void *)nlh + min_len; 258 int attrlen = nlh->nlmsg_len - min_len; 259 __u8 subsys_id = NFNL_SUBSYS_ID(type); 260 struct nfnl_info info = { 261 .net = net, 262 .sk = nfnlnet->nfnl, 263 .nlh = nlh, 264 .nfmsg = nlmsg_data(nlh), 265 .extack = extack, 266 }; 267 268 /* Sanity-check NFNL_MAX_ATTR_COUNT */ 269 if (ss->cb[cb_id].attr_count > NFNL_MAX_ATTR_COUNT) { 270 rcu_read_unlock(); 271 return -ENOMEM; 272 } 273 274 err = nla_parse_deprecated(cda, ss->cb[cb_id].attr_count, 275 attr, attrlen, 276 ss->cb[cb_id].policy, extack); 277 if (err < 0) { 278 rcu_read_unlock(); 279 return err; 280 } 281 282 if (!nc->call) { 283 rcu_read_unlock(); 284 return -EINVAL; 285 } 286 287 switch (nc->type) { 288 case NFNL_CB_RCU: 289 err = nc->call(skb, &info, (const struct nlattr **)cda); 290 rcu_read_unlock(); 291 break; 292 case NFNL_CB_MUTEX: 293 rcu_read_unlock(); 294 nfnl_lock(subsys_id); 295 if (nfnl_dereference_protected(subsys_id) != ss || 296 nfnetlink_find_client(type, ss) != nc) { 297 nfnl_unlock(subsys_id); 298 err = -EAGAIN; 299 break; 300 } 301 err = nc->call(skb, &info, (const struct nlattr **)cda); 302 nfnl_unlock(subsys_id); 303 break; 304 default: 305 rcu_read_unlock(); 306 err = -EINVAL; 307 break; 308 } 309 if (err == -EAGAIN) 310 goto replay; 311 return err; 312 } 313 } 314 315 struct nfnl_err { 316 struct list_head head; 317 struct nlmsghdr *nlh; 318 int err; 319 struct netlink_ext_ack extack; 320 }; 321 322 static int nfnl_err_add(struct list_head *list, struct nlmsghdr *nlh, int err, 323 const struct netlink_ext_ack *extack) 324 { 325 struct nfnl_err *nfnl_err; 326 327 nfnl_err = kmalloc(sizeof(struct nfnl_err), GFP_KERNEL); 328 if (nfnl_err == NULL) 329 return -ENOMEM; 330 331 nfnl_err->nlh = nlh; 332 nfnl_err->err = err; 333 nfnl_err->extack = *extack; 334 list_add_tail(&nfnl_err->head, list); 335 336 return 0; 337 } 338 339 static void nfnl_err_del(struct nfnl_err *nfnl_err) 340 { 341 list_del(&nfnl_err->head); 342 kfree(nfnl_err); 343 } 344 345 static void nfnl_err_reset(struct list_head *err_list) 346 { 347 struct nfnl_err *nfnl_err, *next; 348 349 list_for_each_entry_safe(nfnl_err, next, err_list, head) 350 nfnl_err_del(nfnl_err); 351 } 352 353 static void nfnl_err_deliver(struct list_head *err_list, struct sk_buff *skb) 354 { 355 struct nfnl_err *nfnl_err, *next; 356 357 list_for_each_entry_safe(nfnl_err, next, err_list, head) { 358 netlink_ack(skb, nfnl_err->nlh, nfnl_err->err, 359 &nfnl_err->extack); 360 nfnl_err_del(nfnl_err); 361 } 362 } 363 364 enum { 365 NFNL_BATCH_FAILURE = (1 << 0), 366 NFNL_BATCH_DONE = (1 << 1), 367 NFNL_BATCH_REPLAY = (1 << 2), 368 }; 369 370 static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh, 371 u16 subsys_id, u32 genid) 372 { 373 struct sk_buff *oskb = skb; 374 struct net *net = sock_net(skb->sk); 375 const struct nfnetlink_subsystem *ss; 376 const struct nfnl_callback *nc; 377 struct netlink_ext_ack extack; 378 LIST_HEAD(err_list); 379 u32 status; 380 int err; 381 382 if (subsys_id >= NFNL_SUBSYS_COUNT) 383 return netlink_ack(skb, nlh, -EINVAL, NULL); 384 replay: 385 status = 0; 386 replay_abort: 387 skb = netlink_skb_clone(oskb, GFP_KERNEL); 388 if (!skb) 389 return netlink_ack(oskb, nlh, -ENOMEM, NULL); 390 391 nfnl_lock(subsys_id); 392 ss = nfnl_dereference_protected(subsys_id); 393 if (!ss) { 394 #ifdef CONFIG_MODULES 395 nfnl_unlock(subsys_id); 396 request_module("nfnetlink-subsys-%d", subsys_id); 397 nfnl_lock(subsys_id); 398 ss = nfnl_dereference_protected(subsys_id); 399 if (!ss) 400 #endif 401 { 402 nfnl_unlock(subsys_id); 403 netlink_ack(oskb, nlh, -EOPNOTSUPP, NULL); 404 return kfree_skb(skb); 405 } 406 } 407 408 if (!ss->valid_genid || !ss->commit || !ss->abort) { 409 nfnl_unlock(subsys_id); 410 netlink_ack(oskb, nlh, -EOPNOTSUPP, NULL); 411 return kfree_skb(skb); 412 } 413 414 if (!try_module_get(ss->owner)) { 415 nfnl_unlock(subsys_id); 416 netlink_ack(oskb, nlh, -EOPNOTSUPP, NULL); 417 return kfree_skb(skb); 418 } 419 420 if (!ss->valid_genid(net, genid)) { 421 module_put(ss->owner); 422 nfnl_unlock(subsys_id); 423 netlink_ack(oskb, nlh, -ERESTART, NULL); 424 return kfree_skb(skb); 425 } 426 427 nfnl_unlock(subsys_id); 428 429 while (skb->len >= nlmsg_total_size(0)) { 430 int msglen, type; 431 432 if (fatal_signal_pending(current)) { 433 nfnl_err_reset(&err_list); 434 err = -EINTR; 435 status = NFNL_BATCH_FAILURE; 436 goto done; 437 } 438 439 memset(&extack, 0, sizeof(extack)); 440 nlh = nlmsg_hdr(skb); 441 err = 0; 442 443 if (nlh->nlmsg_len < NLMSG_HDRLEN || 444 skb->len < nlh->nlmsg_len || 445 nlmsg_len(nlh) < sizeof(struct nfgenmsg)) { 446 nfnl_err_reset(&err_list); 447 status |= NFNL_BATCH_FAILURE; 448 goto done; 449 } 450 451 /* Only requests are handled by the kernel */ 452 if (!(nlh->nlmsg_flags & NLM_F_REQUEST)) { 453 err = -EINVAL; 454 goto ack; 455 } 456 457 type = nlh->nlmsg_type; 458 if (type == NFNL_MSG_BATCH_BEGIN) { 459 /* Malformed: Batch begin twice */ 460 nfnl_err_reset(&err_list); 461 status |= NFNL_BATCH_FAILURE; 462 goto done; 463 } else if (type == NFNL_MSG_BATCH_END) { 464 status |= NFNL_BATCH_DONE; 465 goto done; 466 } else if (type < NLMSG_MIN_TYPE) { 467 err = -EINVAL; 468 goto ack; 469 } 470 471 /* We only accept a batch with messages for the same 472 * subsystem. 473 */ 474 if (NFNL_SUBSYS_ID(type) != subsys_id) { 475 err = -EINVAL; 476 goto ack; 477 } 478 479 nc = nfnetlink_find_client(type, ss); 480 if (!nc) { 481 err = -EINVAL; 482 goto ack; 483 } 484 485 if (nc->type != NFNL_CB_BATCH) { 486 err = -EINVAL; 487 goto ack; 488 } 489 490 { 491 int min_len = nlmsg_total_size(sizeof(struct nfgenmsg)); 492 struct nfnl_net *nfnlnet = nfnl_pernet(net); 493 struct nlattr *cda[NFNL_MAX_ATTR_COUNT + 1]; 494 struct nlattr *attr = (void *)nlh + min_len; 495 u8 cb_id = NFNL_MSG_TYPE(nlh->nlmsg_type); 496 int attrlen = nlh->nlmsg_len - min_len; 497 struct nfnl_info info = { 498 .net = net, 499 .sk = nfnlnet->nfnl, 500 .nlh = nlh, 501 .nfmsg = nlmsg_data(nlh), 502 .extack = &extack, 503 }; 504 505 /* Sanity-check NFTA_MAX_ATTR */ 506 if (ss->cb[cb_id].attr_count > NFNL_MAX_ATTR_COUNT) { 507 err = -ENOMEM; 508 goto ack; 509 } 510 511 err = nla_parse_deprecated(cda, 512 ss->cb[cb_id].attr_count, 513 attr, attrlen, 514 ss->cb[cb_id].policy, NULL); 515 if (err < 0) 516 goto ack; 517 518 err = nc->call(skb, &info, (const struct nlattr **)cda); 519 520 /* The lock was released to autoload some module, we 521 * have to abort and start from scratch using the 522 * original skb. 523 */ 524 if (err == -EAGAIN) { 525 status |= NFNL_BATCH_REPLAY; 526 goto done; 527 } 528 } 529 ack: 530 if (nlh->nlmsg_flags & NLM_F_ACK || err) { 531 /* Errors are delivered once the full batch has been 532 * processed, this avoids that the same error is 533 * reported several times when replaying the batch. 534 */ 535 if (nfnl_err_add(&err_list, nlh, err, &extack) < 0) { 536 /* We failed to enqueue an error, reset the 537 * list of errors and send OOM to userspace 538 * pointing to the batch header. 539 */ 540 nfnl_err_reset(&err_list); 541 netlink_ack(oskb, nlmsg_hdr(oskb), -ENOMEM, 542 NULL); 543 status |= NFNL_BATCH_FAILURE; 544 goto done; 545 } 546 /* We don't stop processing the batch on errors, thus, 547 * userspace gets all the errors that the batch 548 * triggers. 549 */ 550 if (err) 551 status |= NFNL_BATCH_FAILURE; 552 } 553 554 msglen = NLMSG_ALIGN(nlh->nlmsg_len); 555 if (msglen > skb->len) 556 msglen = skb->len; 557 skb_pull(skb, msglen); 558 } 559 done: 560 if (status & NFNL_BATCH_REPLAY) { 561 ss->abort(net, oskb, NFNL_ABORT_AUTOLOAD); 562 nfnl_err_reset(&err_list); 563 kfree_skb(skb); 564 module_put(ss->owner); 565 goto replay; 566 } else if (status == NFNL_BATCH_DONE) { 567 err = ss->commit(net, oskb); 568 if (err == -EAGAIN) { 569 status |= NFNL_BATCH_REPLAY; 570 goto done; 571 } else if (err) { 572 ss->abort(net, oskb, NFNL_ABORT_NONE); 573 netlink_ack(oskb, nlmsg_hdr(oskb), err, NULL); 574 } 575 } else { 576 enum nfnl_abort_action abort_action; 577 578 if (status & NFNL_BATCH_FAILURE) 579 abort_action = NFNL_ABORT_NONE; 580 else 581 abort_action = NFNL_ABORT_VALIDATE; 582 583 err = ss->abort(net, oskb, abort_action); 584 if (err == -EAGAIN) { 585 nfnl_err_reset(&err_list); 586 kfree_skb(skb); 587 module_put(ss->owner); 588 status |= NFNL_BATCH_FAILURE; 589 goto replay_abort; 590 } 591 } 592 if (ss->cleanup) 593 ss->cleanup(net); 594 595 nfnl_err_deliver(&err_list, oskb); 596 kfree_skb(skb); 597 module_put(ss->owner); 598 } 599 600 static const struct nla_policy nfnl_batch_policy[NFNL_BATCH_MAX + 1] = { 601 [NFNL_BATCH_GENID] = { .type = NLA_U32 }, 602 }; 603 604 static void nfnetlink_rcv_skb_batch(struct sk_buff *skb, struct nlmsghdr *nlh) 605 { 606 int min_len = nlmsg_total_size(sizeof(struct nfgenmsg)); 607 struct nlattr *attr = (void *)nlh + min_len; 608 struct nlattr *cda[NFNL_BATCH_MAX + 1]; 609 int attrlen = nlh->nlmsg_len - min_len; 610 struct nfgenmsg *nfgenmsg; 611 int msglen, err; 612 u32 gen_id = 0; 613 u16 res_id; 614 615 msglen = NLMSG_ALIGN(nlh->nlmsg_len); 616 if (msglen > skb->len) 617 msglen = skb->len; 618 619 if (skb->len < NLMSG_HDRLEN + sizeof(struct nfgenmsg)) 620 return; 621 622 err = nla_parse_deprecated(cda, NFNL_BATCH_MAX, attr, attrlen, 623 nfnl_batch_policy, NULL); 624 if (err < 0) { 625 netlink_ack(skb, nlh, err, NULL); 626 return; 627 } 628 if (cda[NFNL_BATCH_GENID]) 629 gen_id = ntohl(nla_get_be32(cda[NFNL_BATCH_GENID])); 630 631 nfgenmsg = nlmsg_data(nlh); 632 skb_pull(skb, msglen); 633 /* Work around old nft using host byte order */ 634 if (nfgenmsg->res_id == (__force __be16)NFNL_SUBSYS_NFTABLES) 635 res_id = NFNL_SUBSYS_NFTABLES; 636 else 637 res_id = ntohs(nfgenmsg->res_id); 638 639 nfnetlink_rcv_batch(skb, nlh, res_id, gen_id); 640 } 641 642 static void nfnetlink_rcv(struct sk_buff *skb) 643 { 644 struct nlmsghdr *nlh = nlmsg_hdr(skb); 645 646 if (skb->len < NLMSG_HDRLEN || 647 nlh->nlmsg_len < NLMSG_HDRLEN || 648 skb->len < nlh->nlmsg_len) 649 return; 650 651 if (!netlink_net_capable(skb, CAP_NET_ADMIN)) { 652 netlink_ack(skb, nlh, -EPERM, NULL); 653 return; 654 } 655 656 if (nlh->nlmsg_type == NFNL_MSG_BATCH_BEGIN) 657 nfnetlink_rcv_skb_batch(skb, nlh); 658 else 659 netlink_rcv_skb(skb, nfnetlink_rcv_msg); 660 } 661 662 static void nfnetlink_bind_event(struct net *net, unsigned int group) 663 { 664 #ifdef CONFIG_NF_CONNTRACK_EVENTS 665 int type, group_bit; 666 u8 v; 667 668 /* All NFNLGRP_CONNTRACK_* group bits fit into u8. 669 * The other groups are not relevant and can be ignored. 670 */ 671 if (group >= 8) 672 return; 673 674 type = nfnl_group2type[group]; 675 676 switch (type) { 677 case NFNL_SUBSYS_CTNETLINK: 678 break; 679 case NFNL_SUBSYS_CTNETLINK_EXP: 680 break; 681 default: 682 return; 683 } 684 685 group_bit = (1 << group); 686 687 spin_lock(&nfnl_grp_active_lock); 688 v = READ_ONCE(net->ct.ctnetlink_has_listener); 689 if ((v & group_bit) == 0) { 690 v |= group_bit; 691 692 /* read concurrently without nfnl_grp_active_lock held. */ 693 WRITE_ONCE(net->ct.ctnetlink_has_listener, v); 694 } 695 696 spin_unlock(&nfnl_grp_active_lock); 697 #endif 698 } 699 700 static int nfnetlink_bind(struct net *net, int group) 701 { 702 const struct nfnetlink_subsystem *ss; 703 int type; 704 705 if (group <= NFNLGRP_NONE || group > NFNLGRP_MAX) 706 return 0; 707 708 type = nfnl_group2type[group]; 709 710 rcu_read_lock(); 711 ss = nfnetlink_get_subsys(type << 8); 712 rcu_read_unlock(); 713 if (!ss) 714 request_module_nowait("nfnetlink-subsys-%d", type); 715 716 nfnetlink_bind_event(net, group); 717 return 0; 718 } 719 720 static void nfnetlink_unbind(struct net *net, int group) 721 { 722 #ifdef CONFIG_NF_CONNTRACK_EVENTS 723 int type, group_bit; 724 725 if (group <= NFNLGRP_NONE || group > NFNLGRP_MAX) 726 return; 727 728 type = nfnl_group2type[group]; 729 730 switch (type) { 731 case NFNL_SUBSYS_CTNETLINK: 732 break; 733 case NFNL_SUBSYS_CTNETLINK_EXP: 734 break; 735 default: 736 return; 737 } 738 739 /* ctnetlink_has_listener is u8 */ 740 if (group >= 8) 741 return; 742 743 group_bit = (1 << group); 744 745 spin_lock(&nfnl_grp_active_lock); 746 if (!nfnetlink_has_listeners(net, group)) { 747 u8 v = READ_ONCE(net->ct.ctnetlink_has_listener); 748 749 v &= ~group_bit; 750 751 /* read concurrently without nfnl_grp_active_lock held. */ 752 WRITE_ONCE(net->ct.ctnetlink_has_listener, v); 753 } 754 spin_unlock(&nfnl_grp_active_lock); 755 #endif 756 } 757 758 static int __net_init nfnetlink_net_init(struct net *net) 759 { 760 struct nfnl_net *nfnlnet = nfnl_pernet(net); 761 struct netlink_kernel_cfg cfg = { 762 .groups = NFNLGRP_MAX, 763 .input = nfnetlink_rcv, 764 .bind = nfnetlink_bind, 765 .unbind = nfnetlink_unbind, 766 }; 767 768 nfnlnet->nfnl = netlink_kernel_create(net, NETLINK_NETFILTER, &cfg); 769 if (!nfnlnet->nfnl) 770 return -ENOMEM; 771 return 0; 772 } 773 774 static void __net_exit nfnetlink_net_exit_batch(struct list_head *net_exit_list) 775 { 776 struct nfnl_net *nfnlnet; 777 struct net *net; 778 779 list_for_each_entry(net, net_exit_list, exit_list) { 780 nfnlnet = nfnl_pernet(net); 781 782 netlink_kernel_release(nfnlnet->nfnl); 783 } 784 } 785 786 static struct pernet_operations nfnetlink_net_ops = { 787 .init = nfnetlink_net_init, 788 .exit_batch = nfnetlink_net_exit_batch, 789 .id = &nfnetlink_pernet_id, 790 .size = sizeof(struct nfnl_net), 791 }; 792 793 static int __init nfnetlink_init(void) 794 { 795 int i; 796 797 for (i = NFNLGRP_NONE + 1; i <= NFNLGRP_MAX; i++) 798 BUG_ON(nfnl_group2type[i] == NFNL_SUBSYS_NONE); 799 800 for (i=0; i<NFNL_SUBSYS_COUNT; i++) 801 __mutex_init(&table[i].mutex, nfnl_lockdep_names[i], &nfnl_lockdep_keys[i]); 802 803 return register_pernet_subsys(&nfnetlink_net_ops); 804 } 805 806 static void __exit nfnetlink_exit(void) 807 { 808 unregister_pernet_subsys(&nfnetlink_net_ops); 809 } 810 module_init(nfnetlink_init); 811 module_exit(nfnetlink_exit); 812