1 /* Netfilter messages via netlink socket. Allows for user space 2 * protocol helpers and general trouble making from userspace. 3 * 4 * (C) 2001 by Jay Schulist <jschlst@samba.org>, 5 * (C) 2002-2005 by Harald Welte <laforge@gnumonks.org> 6 * (C) 2005-2017 by Pablo Neira Ayuso <pablo@netfilter.org> 7 * 8 * Initial netfilter messages via netlink development funded and 9 * generally made possible by Network Robots, Inc. (www.networkrobots.com) 10 * 11 * Further development of this code funded by Astaro AG (http://www.astaro.com) 12 * 13 * This software may be used and distributed according to the terms 14 * of the GNU General Public License, incorporated herein by reference. 15 */ 16 17 #include <linux/module.h> 18 #include <linux/types.h> 19 #include <linux/socket.h> 20 #include <linux/kernel.h> 21 #include <linux/string.h> 22 #include <linux/sockios.h> 23 #include <linux/net.h> 24 #include <linux/skbuff.h> 25 #include <linux/uaccess.h> 26 #include <net/sock.h> 27 #include <linux/init.h> 28 29 #include <net/netlink.h> 30 #include <linux/netfilter/nfnetlink.h> 31 32 MODULE_LICENSE("GPL"); 33 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); 34 MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_NETFILTER); 35 36 #define nfnl_dereference_protected(id) \ 37 rcu_dereference_protected(table[(id)].subsys, \ 38 lockdep_nfnl_is_held((id))) 39 40 static char __initdata nfversion[] = "0.30"; 41 42 static struct { 43 struct mutex mutex; 44 const struct nfnetlink_subsystem __rcu *subsys; 45 } table[NFNL_SUBSYS_COUNT]; 46 47 static const int nfnl_group2type[NFNLGRP_MAX+1] = { 48 [NFNLGRP_CONNTRACK_NEW] = NFNL_SUBSYS_CTNETLINK, 49 [NFNLGRP_CONNTRACK_UPDATE] = NFNL_SUBSYS_CTNETLINK, 50 [NFNLGRP_CONNTRACK_DESTROY] = NFNL_SUBSYS_CTNETLINK, 51 [NFNLGRP_CONNTRACK_EXP_NEW] = NFNL_SUBSYS_CTNETLINK_EXP, 52 [NFNLGRP_CONNTRACK_EXP_UPDATE] = NFNL_SUBSYS_CTNETLINK_EXP, 53 [NFNLGRP_CONNTRACK_EXP_DESTROY] = NFNL_SUBSYS_CTNETLINK_EXP, 54 [NFNLGRP_NFTABLES] = NFNL_SUBSYS_NFTABLES, 55 [NFNLGRP_ACCT_QUOTA] = NFNL_SUBSYS_ACCT, 56 [NFNLGRP_NFTRACE] = NFNL_SUBSYS_NFTABLES, 57 }; 58 59 void nfnl_lock(__u8 subsys_id) 60 { 61 mutex_lock(&table[subsys_id].mutex); 62 } 63 EXPORT_SYMBOL_GPL(nfnl_lock); 64 65 void nfnl_unlock(__u8 subsys_id) 66 { 67 mutex_unlock(&table[subsys_id].mutex); 68 } 69 EXPORT_SYMBOL_GPL(nfnl_unlock); 70 71 #ifdef CONFIG_PROVE_LOCKING 72 bool lockdep_nfnl_is_held(u8 subsys_id) 73 { 74 return lockdep_is_held(&table[subsys_id].mutex); 75 } 76 EXPORT_SYMBOL_GPL(lockdep_nfnl_is_held); 77 #endif 78 79 int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n) 80 { 81 nfnl_lock(n->subsys_id); 82 if (table[n->subsys_id].subsys) { 83 nfnl_unlock(n->subsys_id); 84 return -EBUSY; 85 } 86 rcu_assign_pointer(table[n->subsys_id].subsys, n); 87 nfnl_unlock(n->subsys_id); 88 89 return 0; 90 } 91 EXPORT_SYMBOL_GPL(nfnetlink_subsys_register); 92 93 int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n) 94 { 95 nfnl_lock(n->subsys_id); 96 table[n->subsys_id].subsys = NULL; 97 nfnl_unlock(n->subsys_id); 98 synchronize_rcu(); 99 return 0; 100 } 101 EXPORT_SYMBOL_GPL(nfnetlink_subsys_unregister); 102 103 static inline const struct nfnetlink_subsystem *nfnetlink_get_subsys(u16 type) 104 { 105 u8 subsys_id = NFNL_SUBSYS_ID(type); 106 107 if (subsys_id >= NFNL_SUBSYS_COUNT) 108 return NULL; 109 110 return rcu_dereference(table[subsys_id].subsys); 111 } 112 113 static inline const struct nfnl_callback * 114 nfnetlink_find_client(u16 type, const struct nfnetlink_subsystem *ss) 115 { 116 u8 cb_id = NFNL_MSG_TYPE(type); 117 118 if (cb_id >= ss->cb_count) 119 return NULL; 120 121 return &ss->cb[cb_id]; 122 } 123 124 int nfnetlink_has_listeners(struct net *net, unsigned int group) 125 { 126 return netlink_has_listeners(net->nfnl, group); 127 } 128 EXPORT_SYMBOL_GPL(nfnetlink_has_listeners); 129 130 int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 portid, 131 unsigned int group, int echo, gfp_t flags) 132 { 133 return nlmsg_notify(net->nfnl, skb, portid, group, echo, flags); 134 } 135 EXPORT_SYMBOL_GPL(nfnetlink_send); 136 137 int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error) 138 { 139 return netlink_set_err(net->nfnl, portid, group, error); 140 } 141 EXPORT_SYMBOL_GPL(nfnetlink_set_err); 142 143 int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid, 144 int flags) 145 { 146 return netlink_unicast(net->nfnl, skb, portid, flags); 147 } 148 EXPORT_SYMBOL_GPL(nfnetlink_unicast); 149 150 /* Process one complete nfnetlink message. */ 151 static int nfnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) 152 { 153 struct net *net = sock_net(skb->sk); 154 const struct nfnl_callback *nc; 155 const struct nfnetlink_subsystem *ss; 156 int type, err; 157 158 /* All the messages must at least contain nfgenmsg */ 159 if (nlmsg_len(nlh) < sizeof(struct nfgenmsg)) 160 return 0; 161 162 type = nlh->nlmsg_type; 163 replay: 164 rcu_read_lock(); 165 ss = nfnetlink_get_subsys(type); 166 if (!ss) { 167 #ifdef CONFIG_MODULES 168 rcu_read_unlock(); 169 request_module("nfnetlink-subsys-%d", NFNL_SUBSYS_ID(type)); 170 rcu_read_lock(); 171 ss = nfnetlink_get_subsys(type); 172 if (!ss) 173 #endif 174 { 175 rcu_read_unlock(); 176 return -EINVAL; 177 } 178 } 179 180 nc = nfnetlink_find_client(type, ss); 181 if (!nc) { 182 rcu_read_unlock(); 183 return -EINVAL; 184 } 185 186 { 187 int min_len = nlmsg_total_size(sizeof(struct nfgenmsg)); 188 u8 cb_id = NFNL_MSG_TYPE(nlh->nlmsg_type); 189 struct nlattr *cda[ss->cb[cb_id].attr_count + 1]; 190 struct nlattr *attr = (void *)nlh + min_len; 191 int attrlen = nlh->nlmsg_len - min_len; 192 __u8 subsys_id = NFNL_SUBSYS_ID(type); 193 194 err = nla_parse(cda, ss->cb[cb_id].attr_count, 195 attr, attrlen, ss->cb[cb_id].policy); 196 if (err < 0) { 197 rcu_read_unlock(); 198 return err; 199 } 200 201 if (nc->call_rcu) { 202 err = nc->call_rcu(net, net->nfnl, skb, nlh, 203 (const struct nlattr **)cda); 204 rcu_read_unlock(); 205 } else { 206 rcu_read_unlock(); 207 nfnl_lock(subsys_id); 208 if (nfnl_dereference_protected(subsys_id) != ss || 209 nfnetlink_find_client(type, ss) != nc) 210 err = -EAGAIN; 211 else if (nc->call) 212 err = nc->call(net, net->nfnl, skb, nlh, 213 (const struct nlattr **)cda); 214 else 215 err = -EINVAL; 216 nfnl_unlock(subsys_id); 217 } 218 if (err == -EAGAIN) 219 goto replay; 220 return err; 221 } 222 } 223 224 struct nfnl_err { 225 struct list_head head; 226 struct nlmsghdr *nlh; 227 int err; 228 }; 229 230 static int nfnl_err_add(struct list_head *list, struct nlmsghdr *nlh, int err) 231 { 232 struct nfnl_err *nfnl_err; 233 234 nfnl_err = kmalloc(sizeof(struct nfnl_err), GFP_KERNEL); 235 if (nfnl_err == NULL) 236 return -ENOMEM; 237 238 nfnl_err->nlh = nlh; 239 nfnl_err->err = err; 240 list_add_tail(&nfnl_err->head, list); 241 242 return 0; 243 } 244 245 static void nfnl_err_del(struct nfnl_err *nfnl_err) 246 { 247 list_del(&nfnl_err->head); 248 kfree(nfnl_err); 249 } 250 251 static void nfnl_err_reset(struct list_head *err_list) 252 { 253 struct nfnl_err *nfnl_err, *next; 254 255 list_for_each_entry_safe(nfnl_err, next, err_list, head) 256 nfnl_err_del(nfnl_err); 257 } 258 259 static void nfnl_err_deliver(struct list_head *err_list, struct sk_buff *skb) 260 { 261 struct nfnl_err *nfnl_err, *next; 262 263 list_for_each_entry_safe(nfnl_err, next, err_list, head) { 264 netlink_ack(skb, nfnl_err->nlh, nfnl_err->err); 265 nfnl_err_del(nfnl_err); 266 } 267 } 268 269 enum { 270 NFNL_BATCH_FAILURE = (1 << 0), 271 NFNL_BATCH_DONE = (1 << 1), 272 NFNL_BATCH_REPLAY = (1 << 2), 273 }; 274 275 static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh, 276 u16 subsys_id, u32 genid) 277 { 278 struct sk_buff *oskb = skb; 279 struct net *net = sock_net(skb->sk); 280 const struct nfnetlink_subsystem *ss; 281 const struct nfnl_callback *nc; 282 LIST_HEAD(err_list); 283 u32 status; 284 int err; 285 286 if (subsys_id >= NFNL_SUBSYS_COUNT) 287 return netlink_ack(skb, nlh, -EINVAL); 288 replay: 289 status = 0; 290 291 skb = netlink_skb_clone(oskb, GFP_KERNEL); 292 if (!skb) 293 return netlink_ack(oskb, nlh, -ENOMEM); 294 295 nfnl_lock(subsys_id); 296 ss = nfnl_dereference_protected(subsys_id); 297 if (!ss) { 298 #ifdef CONFIG_MODULES 299 nfnl_unlock(subsys_id); 300 request_module("nfnetlink-subsys-%d", subsys_id); 301 nfnl_lock(subsys_id); 302 ss = nfnl_dereference_protected(subsys_id); 303 if (!ss) 304 #endif 305 { 306 nfnl_unlock(subsys_id); 307 netlink_ack(oskb, nlh, -EOPNOTSUPP); 308 return kfree_skb(skb); 309 } 310 } 311 312 if (!ss->commit || !ss->abort) { 313 nfnl_unlock(subsys_id); 314 netlink_ack(oskb, nlh, -EOPNOTSUPP); 315 return kfree_skb(skb); 316 } 317 318 if (genid && ss->valid_genid && !ss->valid_genid(net, genid)) { 319 nfnl_unlock(subsys_id); 320 netlink_ack(oskb, nlh, -ERESTART); 321 return kfree_skb(skb); 322 } 323 324 while (skb->len >= nlmsg_total_size(0)) { 325 int msglen, type; 326 327 nlh = nlmsg_hdr(skb); 328 err = 0; 329 330 if (nlh->nlmsg_len < NLMSG_HDRLEN || 331 skb->len < nlh->nlmsg_len || 332 nlmsg_len(nlh) < sizeof(struct nfgenmsg)) { 333 nfnl_err_reset(&err_list); 334 status |= NFNL_BATCH_FAILURE; 335 goto done; 336 } 337 338 /* Only requests are handled by the kernel */ 339 if (!(nlh->nlmsg_flags & NLM_F_REQUEST)) { 340 err = -EINVAL; 341 goto ack; 342 } 343 344 type = nlh->nlmsg_type; 345 if (type == NFNL_MSG_BATCH_BEGIN) { 346 /* Malformed: Batch begin twice */ 347 nfnl_err_reset(&err_list); 348 status |= NFNL_BATCH_FAILURE; 349 goto done; 350 } else if (type == NFNL_MSG_BATCH_END) { 351 status |= NFNL_BATCH_DONE; 352 goto done; 353 } else if (type < NLMSG_MIN_TYPE) { 354 err = -EINVAL; 355 goto ack; 356 } 357 358 /* We only accept a batch with messages for the same 359 * subsystem. 360 */ 361 if (NFNL_SUBSYS_ID(type) != subsys_id) { 362 err = -EINVAL; 363 goto ack; 364 } 365 366 nc = nfnetlink_find_client(type, ss); 367 if (!nc) { 368 err = -EINVAL; 369 goto ack; 370 } 371 372 { 373 int min_len = nlmsg_total_size(sizeof(struct nfgenmsg)); 374 u8 cb_id = NFNL_MSG_TYPE(nlh->nlmsg_type); 375 struct nlattr *cda[ss->cb[cb_id].attr_count + 1]; 376 struct nlattr *attr = (void *)nlh + min_len; 377 int attrlen = nlh->nlmsg_len - min_len; 378 379 err = nla_parse(cda, ss->cb[cb_id].attr_count, 380 attr, attrlen, ss->cb[cb_id].policy); 381 if (err < 0) 382 goto ack; 383 384 if (nc->call_batch) { 385 err = nc->call_batch(net, net->nfnl, skb, nlh, 386 (const struct nlattr **)cda); 387 } 388 389 /* The lock was released to autoload some module, we 390 * have to abort and start from scratch using the 391 * original skb. 392 */ 393 if (err == -EAGAIN) { 394 status |= NFNL_BATCH_REPLAY; 395 goto next; 396 } 397 } 398 ack: 399 if (nlh->nlmsg_flags & NLM_F_ACK || err) { 400 /* Errors are delivered once the full batch has been 401 * processed, this avoids that the same error is 402 * reported several times when replaying the batch. 403 */ 404 if (nfnl_err_add(&err_list, nlh, err) < 0) { 405 /* We failed to enqueue an error, reset the 406 * list of errors and send OOM to userspace 407 * pointing to the batch header. 408 */ 409 nfnl_err_reset(&err_list); 410 netlink_ack(oskb, nlmsg_hdr(oskb), -ENOMEM); 411 status |= NFNL_BATCH_FAILURE; 412 goto done; 413 } 414 /* We don't stop processing the batch on errors, thus, 415 * userspace gets all the errors that the batch 416 * triggers. 417 */ 418 if (err) 419 status |= NFNL_BATCH_FAILURE; 420 } 421 next: 422 msglen = NLMSG_ALIGN(nlh->nlmsg_len); 423 if (msglen > skb->len) 424 msglen = skb->len; 425 skb_pull(skb, msglen); 426 } 427 done: 428 if (status & NFNL_BATCH_REPLAY) { 429 ss->abort(net, oskb); 430 nfnl_err_reset(&err_list); 431 nfnl_unlock(subsys_id); 432 kfree_skb(skb); 433 goto replay; 434 } else if (status == NFNL_BATCH_DONE) { 435 ss->commit(net, oskb); 436 } else { 437 ss->abort(net, oskb); 438 } 439 440 nfnl_err_deliver(&err_list, oskb); 441 nfnl_unlock(subsys_id); 442 kfree_skb(skb); 443 } 444 445 static const struct nla_policy nfnl_batch_policy[NFNL_BATCH_MAX + 1] = { 446 [NFNL_BATCH_GENID] = { .type = NLA_U32 }, 447 }; 448 449 static void nfnetlink_rcv_skb_batch(struct sk_buff *skb, struct nlmsghdr *nlh) 450 { 451 int min_len = nlmsg_total_size(sizeof(struct nfgenmsg)); 452 struct nlattr *attr = (void *)nlh + min_len; 453 struct nlattr *cda[NFNL_BATCH_MAX + 1]; 454 int attrlen = nlh->nlmsg_len - min_len; 455 struct nfgenmsg *nfgenmsg; 456 int msglen, err; 457 u32 gen_id = 0; 458 u16 res_id; 459 460 msglen = NLMSG_ALIGN(nlh->nlmsg_len); 461 if (msglen > skb->len) 462 msglen = skb->len; 463 464 if (nlh->nlmsg_len < NLMSG_HDRLEN || 465 skb->len < NLMSG_HDRLEN + sizeof(struct nfgenmsg)) 466 return; 467 468 err = nla_parse(cda, NFNL_BATCH_MAX, attr, attrlen, nfnl_batch_policy); 469 if (err < 0) { 470 netlink_ack(skb, nlh, err); 471 return; 472 } 473 if (cda[NFNL_BATCH_GENID]) 474 gen_id = ntohl(nla_get_be32(cda[NFNL_BATCH_GENID])); 475 476 nfgenmsg = nlmsg_data(nlh); 477 skb_pull(skb, msglen); 478 /* Work around old nft using host byte order */ 479 if (nfgenmsg->res_id == NFNL_SUBSYS_NFTABLES) 480 res_id = NFNL_SUBSYS_NFTABLES; 481 else 482 res_id = ntohs(nfgenmsg->res_id); 483 484 nfnetlink_rcv_batch(skb, nlh, res_id, gen_id); 485 } 486 487 static void nfnetlink_rcv(struct sk_buff *skb) 488 { 489 struct nlmsghdr *nlh = nlmsg_hdr(skb); 490 491 if (nlh->nlmsg_len < NLMSG_HDRLEN || 492 skb->len < nlh->nlmsg_len) 493 return; 494 495 if (!netlink_net_capable(skb, CAP_NET_ADMIN)) { 496 netlink_ack(skb, nlh, -EPERM); 497 return; 498 } 499 500 if (nlh->nlmsg_type == NFNL_MSG_BATCH_BEGIN) 501 nfnetlink_rcv_skb_batch(skb, nlh); 502 else 503 netlink_rcv_skb(skb, &nfnetlink_rcv_msg); 504 } 505 506 #ifdef CONFIG_MODULES 507 static int nfnetlink_bind(struct net *net, int group) 508 { 509 const struct nfnetlink_subsystem *ss; 510 int type; 511 512 if (group <= NFNLGRP_NONE || group > NFNLGRP_MAX) 513 return 0; 514 515 type = nfnl_group2type[group]; 516 517 rcu_read_lock(); 518 ss = nfnetlink_get_subsys(type << 8); 519 rcu_read_unlock(); 520 if (!ss) 521 request_module("nfnetlink-subsys-%d", type); 522 return 0; 523 } 524 #endif 525 526 static int __net_init nfnetlink_net_init(struct net *net) 527 { 528 struct sock *nfnl; 529 struct netlink_kernel_cfg cfg = { 530 .groups = NFNLGRP_MAX, 531 .input = nfnetlink_rcv, 532 #ifdef CONFIG_MODULES 533 .bind = nfnetlink_bind, 534 #endif 535 }; 536 537 nfnl = netlink_kernel_create(net, NETLINK_NETFILTER, &cfg); 538 if (!nfnl) 539 return -ENOMEM; 540 net->nfnl_stash = nfnl; 541 rcu_assign_pointer(net->nfnl, nfnl); 542 return 0; 543 } 544 545 static void __net_exit nfnetlink_net_exit_batch(struct list_head *net_exit_list) 546 { 547 struct net *net; 548 549 list_for_each_entry(net, net_exit_list, exit_list) 550 RCU_INIT_POINTER(net->nfnl, NULL); 551 synchronize_net(); 552 list_for_each_entry(net, net_exit_list, exit_list) 553 netlink_kernel_release(net->nfnl_stash); 554 } 555 556 static struct pernet_operations nfnetlink_net_ops = { 557 .init = nfnetlink_net_init, 558 .exit_batch = nfnetlink_net_exit_batch, 559 }; 560 561 static int __init nfnetlink_init(void) 562 { 563 int i; 564 565 for (i = NFNLGRP_NONE + 1; i <= NFNLGRP_MAX; i++) 566 BUG_ON(nfnl_group2type[i] == NFNL_SUBSYS_NONE); 567 568 for (i=0; i<NFNL_SUBSYS_COUNT; i++) 569 mutex_init(&table[i].mutex); 570 571 pr_info("Netfilter messages via NETLINK v%s.\n", nfversion); 572 return register_pernet_subsys(&nfnetlink_net_ops); 573 } 574 575 static void __exit nfnetlink_exit(void) 576 { 577 pr_info("Removing netfilter NETLINK layer.\n"); 578 unregister_pernet_subsys(&nfnetlink_net_ops); 579 } 580 module_init(nfnetlink_init); 581 module_exit(nfnetlink_exit); 582