1 /* Netfilter messages via netlink socket. Allows for user space 2 * protocol helpers and general trouble making from userspace. 3 * 4 * (C) 2001 by Jay Schulist <jschlst@samba.org>, 5 * (C) 2002-2005 by Harald Welte <laforge@gnumonks.org> 6 * (C) 2005-2017 by Pablo Neira Ayuso <pablo@netfilter.org> 7 * 8 * Initial netfilter messages via netlink development funded and 9 * generally made possible by Network Robots, Inc. (www.networkrobots.com) 10 * 11 * Further development of this code funded by Astaro AG (http://www.astaro.com) 12 * 13 * This software may be used and distributed according to the terms 14 * of the GNU General Public License, incorporated herein by reference. 15 */ 16 17 #include <linux/module.h> 18 #include <linux/types.h> 19 #include <linux/socket.h> 20 #include <linux/kernel.h> 21 #include <linux/string.h> 22 #include <linux/sockios.h> 23 #include <linux/net.h> 24 #include <linux/skbuff.h> 25 #include <linux/uaccess.h> 26 #include <net/sock.h> 27 #include <linux/init.h> 28 29 #include <net/netlink.h> 30 #include <linux/netfilter/nfnetlink.h> 31 32 MODULE_LICENSE("GPL"); 33 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); 34 MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_NETFILTER); 35 36 #define nfnl_dereference_protected(id) \ 37 rcu_dereference_protected(table[(id)].subsys, \ 38 lockdep_nfnl_is_held((id))) 39 40 static struct { 41 struct mutex mutex; 42 const struct nfnetlink_subsystem __rcu *subsys; 43 } table[NFNL_SUBSYS_COUNT]; 44 45 static const int nfnl_group2type[NFNLGRP_MAX+1] = { 46 [NFNLGRP_CONNTRACK_NEW] = NFNL_SUBSYS_CTNETLINK, 47 [NFNLGRP_CONNTRACK_UPDATE] = NFNL_SUBSYS_CTNETLINK, 48 [NFNLGRP_CONNTRACK_DESTROY] = NFNL_SUBSYS_CTNETLINK, 49 [NFNLGRP_CONNTRACK_EXP_NEW] = NFNL_SUBSYS_CTNETLINK_EXP, 50 [NFNLGRP_CONNTRACK_EXP_UPDATE] = NFNL_SUBSYS_CTNETLINK_EXP, 51 [NFNLGRP_CONNTRACK_EXP_DESTROY] = NFNL_SUBSYS_CTNETLINK_EXP, 52 [NFNLGRP_NFTABLES] = NFNL_SUBSYS_NFTABLES, 53 [NFNLGRP_ACCT_QUOTA] = NFNL_SUBSYS_ACCT, 54 [NFNLGRP_NFTRACE] = NFNL_SUBSYS_NFTABLES, 55 }; 56 57 void nfnl_lock(__u8 subsys_id) 58 { 59 mutex_lock(&table[subsys_id].mutex); 60 } 61 EXPORT_SYMBOL_GPL(nfnl_lock); 62 63 void nfnl_unlock(__u8 subsys_id) 64 { 65 mutex_unlock(&table[subsys_id].mutex); 66 } 67 EXPORT_SYMBOL_GPL(nfnl_unlock); 68 69 #ifdef CONFIG_PROVE_LOCKING 70 bool lockdep_nfnl_is_held(u8 subsys_id) 71 { 72 return lockdep_is_held(&table[subsys_id].mutex); 73 } 74 EXPORT_SYMBOL_GPL(lockdep_nfnl_is_held); 75 #endif 76 77 int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n) 78 { 79 nfnl_lock(n->subsys_id); 80 if (table[n->subsys_id].subsys) { 81 nfnl_unlock(n->subsys_id); 82 return -EBUSY; 83 } 84 rcu_assign_pointer(table[n->subsys_id].subsys, n); 85 nfnl_unlock(n->subsys_id); 86 87 return 0; 88 } 89 EXPORT_SYMBOL_GPL(nfnetlink_subsys_register); 90 91 int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n) 92 { 93 nfnl_lock(n->subsys_id); 94 table[n->subsys_id].subsys = NULL; 95 nfnl_unlock(n->subsys_id); 96 synchronize_rcu(); 97 return 0; 98 } 99 EXPORT_SYMBOL_GPL(nfnetlink_subsys_unregister); 100 101 static inline const struct nfnetlink_subsystem *nfnetlink_get_subsys(u16 type) 102 { 103 u8 subsys_id = NFNL_SUBSYS_ID(type); 104 105 if (subsys_id >= NFNL_SUBSYS_COUNT) 106 return NULL; 107 108 return rcu_dereference(table[subsys_id].subsys); 109 } 110 111 static inline const struct nfnl_callback * 112 nfnetlink_find_client(u16 type, const struct nfnetlink_subsystem *ss) 113 { 114 u8 cb_id = NFNL_MSG_TYPE(type); 115 116 if (cb_id >= ss->cb_count) 117 return NULL; 118 119 return &ss->cb[cb_id]; 120 } 121 122 int nfnetlink_has_listeners(struct net *net, unsigned int group) 123 { 124 return netlink_has_listeners(net->nfnl, group); 125 } 126 EXPORT_SYMBOL_GPL(nfnetlink_has_listeners); 127 128 int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 portid, 129 unsigned int group, int echo, gfp_t flags) 130 { 131 return nlmsg_notify(net->nfnl, skb, portid, group, echo, flags); 132 } 133 EXPORT_SYMBOL_GPL(nfnetlink_send); 134 135 int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error) 136 { 137 return netlink_set_err(net->nfnl, portid, group, error); 138 } 139 EXPORT_SYMBOL_GPL(nfnetlink_set_err); 140 141 int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid, 142 int flags) 143 { 144 return netlink_unicast(net->nfnl, skb, portid, flags); 145 } 146 EXPORT_SYMBOL_GPL(nfnetlink_unicast); 147 148 /* Process one complete nfnetlink message. */ 149 static int nfnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, 150 struct netlink_ext_ack *extack) 151 { 152 struct net *net = sock_net(skb->sk); 153 const struct nfnl_callback *nc; 154 const struct nfnetlink_subsystem *ss; 155 int type, err; 156 157 /* All the messages must at least contain nfgenmsg */ 158 if (nlmsg_len(nlh) < sizeof(struct nfgenmsg)) 159 return 0; 160 161 type = nlh->nlmsg_type; 162 replay: 163 rcu_read_lock(); 164 ss = nfnetlink_get_subsys(type); 165 if (!ss) { 166 #ifdef CONFIG_MODULES 167 rcu_read_unlock(); 168 request_module("nfnetlink-subsys-%d", NFNL_SUBSYS_ID(type)); 169 rcu_read_lock(); 170 ss = nfnetlink_get_subsys(type); 171 if (!ss) 172 #endif 173 { 174 rcu_read_unlock(); 175 return -EINVAL; 176 } 177 } 178 179 nc = nfnetlink_find_client(type, ss); 180 if (!nc) { 181 rcu_read_unlock(); 182 return -EINVAL; 183 } 184 185 { 186 int min_len = nlmsg_total_size(sizeof(struct nfgenmsg)); 187 u8 cb_id = NFNL_MSG_TYPE(nlh->nlmsg_type); 188 struct nlattr *cda[ss->cb[cb_id].attr_count + 1]; 189 struct nlattr *attr = (void *)nlh + min_len; 190 int attrlen = nlh->nlmsg_len - min_len; 191 __u8 subsys_id = NFNL_SUBSYS_ID(type); 192 193 err = nla_parse(cda, ss->cb[cb_id].attr_count, attr, attrlen, 194 ss->cb[cb_id].policy, extack); 195 if (err < 0) { 196 rcu_read_unlock(); 197 return err; 198 } 199 200 if (nc->call_rcu) { 201 err = nc->call_rcu(net, net->nfnl, skb, nlh, 202 (const struct nlattr **)cda, 203 extack); 204 rcu_read_unlock(); 205 } else { 206 rcu_read_unlock(); 207 nfnl_lock(subsys_id); 208 if (nfnl_dereference_protected(subsys_id) != ss || 209 nfnetlink_find_client(type, ss) != nc) 210 err = -EAGAIN; 211 else if (nc->call) 212 err = nc->call(net, net->nfnl, skb, nlh, 213 (const struct nlattr **)cda, 214 extack); 215 else 216 err = -EINVAL; 217 nfnl_unlock(subsys_id); 218 } 219 if (err == -EAGAIN) 220 goto replay; 221 return err; 222 } 223 } 224 225 struct nfnl_err { 226 struct list_head head; 227 struct nlmsghdr *nlh; 228 int err; 229 struct netlink_ext_ack extack; 230 }; 231 232 static int nfnl_err_add(struct list_head *list, struct nlmsghdr *nlh, int err, 233 const struct netlink_ext_ack *extack) 234 { 235 struct nfnl_err *nfnl_err; 236 237 nfnl_err = kmalloc(sizeof(struct nfnl_err), GFP_KERNEL); 238 if (nfnl_err == NULL) 239 return -ENOMEM; 240 241 nfnl_err->nlh = nlh; 242 nfnl_err->err = err; 243 nfnl_err->extack = *extack; 244 list_add_tail(&nfnl_err->head, list); 245 246 return 0; 247 } 248 249 static void nfnl_err_del(struct nfnl_err *nfnl_err) 250 { 251 list_del(&nfnl_err->head); 252 kfree(nfnl_err); 253 } 254 255 static void nfnl_err_reset(struct list_head *err_list) 256 { 257 struct nfnl_err *nfnl_err, *next; 258 259 list_for_each_entry_safe(nfnl_err, next, err_list, head) 260 nfnl_err_del(nfnl_err); 261 } 262 263 static void nfnl_err_deliver(struct list_head *err_list, struct sk_buff *skb) 264 { 265 struct nfnl_err *nfnl_err, *next; 266 267 list_for_each_entry_safe(nfnl_err, next, err_list, head) { 268 netlink_ack(skb, nfnl_err->nlh, nfnl_err->err, 269 &nfnl_err->extack); 270 nfnl_err_del(nfnl_err); 271 } 272 } 273 274 enum { 275 NFNL_BATCH_FAILURE = (1 << 0), 276 NFNL_BATCH_DONE = (1 << 1), 277 NFNL_BATCH_REPLAY = (1 << 2), 278 }; 279 280 static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh, 281 u16 subsys_id, u32 genid) 282 { 283 struct sk_buff *oskb = skb; 284 struct net *net = sock_net(skb->sk); 285 const struct nfnetlink_subsystem *ss; 286 const struct nfnl_callback *nc; 287 struct netlink_ext_ack extack; 288 LIST_HEAD(err_list); 289 u32 status; 290 int err; 291 292 if (subsys_id >= NFNL_SUBSYS_COUNT) 293 return netlink_ack(skb, nlh, -EINVAL, NULL); 294 replay: 295 status = 0; 296 297 skb = netlink_skb_clone(oskb, GFP_KERNEL); 298 if (!skb) 299 return netlink_ack(oskb, nlh, -ENOMEM, NULL); 300 301 nfnl_lock(subsys_id); 302 ss = nfnl_dereference_protected(subsys_id); 303 if (!ss) { 304 #ifdef CONFIG_MODULES 305 nfnl_unlock(subsys_id); 306 request_module("nfnetlink-subsys-%d", subsys_id); 307 nfnl_lock(subsys_id); 308 ss = nfnl_dereference_protected(subsys_id); 309 if (!ss) 310 #endif 311 { 312 nfnl_unlock(subsys_id); 313 netlink_ack(oskb, nlh, -EOPNOTSUPP, NULL); 314 return kfree_skb(skb); 315 } 316 } 317 318 if (!ss->commit || !ss->abort) { 319 nfnl_unlock(subsys_id); 320 netlink_ack(oskb, nlh, -EOPNOTSUPP, NULL); 321 return kfree_skb(skb); 322 } 323 324 if (genid && ss->valid_genid && !ss->valid_genid(net, genid)) { 325 nfnl_unlock(subsys_id); 326 netlink_ack(oskb, nlh, -ERESTART, NULL); 327 return kfree_skb(skb); 328 } 329 330 while (skb->len >= nlmsg_total_size(0)) { 331 int msglen, type; 332 333 memset(&extack, 0, sizeof(extack)); 334 nlh = nlmsg_hdr(skb); 335 err = 0; 336 337 if (nlh->nlmsg_len < NLMSG_HDRLEN || 338 skb->len < nlh->nlmsg_len || 339 nlmsg_len(nlh) < sizeof(struct nfgenmsg)) { 340 nfnl_err_reset(&err_list); 341 status |= NFNL_BATCH_FAILURE; 342 goto done; 343 } 344 345 /* Only requests are handled by the kernel */ 346 if (!(nlh->nlmsg_flags & NLM_F_REQUEST)) { 347 err = -EINVAL; 348 goto ack; 349 } 350 351 type = nlh->nlmsg_type; 352 if (type == NFNL_MSG_BATCH_BEGIN) { 353 /* Malformed: Batch begin twice */ 354 nfnl_err_reset(&err_list); 355 status |= NFNL_BATCH_FAILURE; 356 goto done; 357 } else if (type == NFNL_MSG_BATCH_END) { 358 status |= NFNL_BATCH_DONE; 359 goto done; 360 } else if (type < NLMSG_MIN_TYPE) { 361 err = -EINVAL; 362 goto ack; 363 } 364 365 /* We only accept a batch with messages for the same 366 * subsystem. 367 */ 368 if (NFNL_SUBSYS_ID(type) != subsys_id) { 369 err = -EINVAL; 370 goto ack; 371 } 372 373 nc = nfnetlink_find_client(type, ss); 374 if (!nc) { 375 err = -EINVAL; 376 goto ack; 377 } 378 379 { 380 int min_len = nlmsg_total_size(sizeof(struct nfgenmsg)); 381 u8 cb_id = NFNL_MSG_TYPE(nlh->nlmsg_type); 382 struct nlattr *cda[ss->cb[cb_id].attr_count + 1]; 383 struct nlattr *attr = (void *)nlh + min_len; 384 int attrlen = nlh->nlmsg_len - min_len; 385 386 err = nla_parse(cda, ss->cb[cb_id].attr_count, attr, 387 attrlen, ss->cb[cb_id].policy, NULL); 388 if (err < 0) 389 goto ack; 390 391 if (nc->call_batch) { 392 err = nc->call_batch(net, net->nfnl, skb, nlh, 393 (const struct nlattr **)cda, 394 &extack); 395 } 396 397 /* The lock was released to autoload some module, we 398 * have to abort and start from scratch using the 399 * original skb. 400 */ 401 if (err == -EAGAIN) { 402 status |= NFNL_BATCH_REPLAY; 403 goto next; 404 } 405 } 406 ack: 407 if (nlh->nlmsg_flags & NLM_F_ACK || err) { 408 /* Errors are delivered once the full batch has been 409 * processed, this avoids that the same error is 410 * reported several times when replaying the batch. 411 */ 412 if (nfnl_err_add(&err_list, nlh, err, &extack) < 0) { 413 /* We failed to enqueue an error, reset the 414 * list of errors and send OOM to userspace 415 * pointing to the batch header. 416 */ 417 nfnl_err_reset(&err_list); 418 netlink_ack(oskb, nlmsg_hdr(oskb), -ENOMEM, 419 NULL); 420 status |= NFNL_BATCH_FAILURE; 421 goto done; 422 } 423 /* We don't stop processing the batch on errors, thus, 424 * userspace gets all the errors that the batch 425 * triggers. 426 */ 427 if (err) 428 status |= NFNL_BATCH_FAILURE; 429 } 430 next: 431 msglen = NLMSG_ALIGN(nlh->nlmsg_len); 432 if (msglen > skb->len) 433 msglen = skb->len; 434 skb_pull(skb, msglen); 435 } 436 done: 437 if (status & NFNL_BATCH_REPLAY) { 438 ss->abort(net, oskb); 439 nfnl_err_reset(&err_list); 440 nfnl_unlock(subsys_id); 441 kfree_skb(skb); 442 goto replay; 443 } else if (status == NFNL_BATCH_DONE) { 444 ss->commit(net, oskb); 445 } else { 446 ss->abort(net, oskb); 447 } 448 449 nfnl_err_deliver(&err_list, oskb); 450 nfnl_unlock(subsys_id); 451 kfree_skb(skb); 452 } 453 454 static const struct nla_policy nfnl_batch_policy[NFNL_BATCH_MAX + 1] = { 455 [NFNL_BATCH_GENID] = { .type = NLA_U32 }, 456 }; 457 458 static void nfnetlink_rcv_skb_batch(struct sk_buff *skb, struct nlmsghdr *nlh) 459 { 460 int min_len = nlmsg_total_size(sizeof(struct nfgenmsg)); 461 struct nlattr *attr = (void *)nlh + min_len; 462 struct nlattr *cda[NFNL_BATCH_MAX + 1]; 463 int attrlen = nlh->nlmsg_len - min_len; 464 struct nfgenmsg *nfgenmsg; 465 int msglen, err; 466 u32 gen_id = 0; 467 u16 res_id; 468 469 msglen = NLMSG_ALIGN(nlh->nlmsg_len); 470 if (msglen > skb->len) 471 msglen = skb->len; 472 473 if (skb->len < NLMSG_HDRLEN + sizeof(struct nfgenmsg)) 474 return; 475 476 err = nla_parse(cda, NFNL_BATCH_MAX, attr, attrlen, nfnl_batch_policy, 477 NULL); 478 if (err < 0) { 479 netlink_ack(skb, nlh, err, NULL); 480 return; 481 } 482 if (cda[NFNL_BATCH_GENID]) 483 gen_id = ntohl(nla_get_be32(cda[NFNL_BATCH_GENID])); 484 485 nfgenmsg = nlmsg_data(nlh); 486 skb_pull(skb, msglen); 487 /* Work around old nft using host byte order */ 488 if (nfgenmsg->res_id == NFNL_SUBSYS_NFTABLES) 489 res_id = NFNL_SUBSYS_NFTABLES; 490 else 491 res_id = ntohs(nfgenmsg->res_id); 492 493 nfnetlink_rcv_batch(skb, nlh, res_id, gen_id); 494 } 495 496 static void nfnetlink_rcv(struct sk_buff *skb) 497 { 498 struct nlmsghdr *nlh = nlmsg_hdr(skb); 499 500 if (skb->len < NLMSG_HDRLEN || 501 nlh->nlmsg_len < NLMSG_HDRLEN || 502 skb->len < nlh->nlmsg_len) 503 return; 504 505 if (!netlink_net_capable(skb, CAP_NET_ADMIN)) { 506 netlink_ack(skb, nlh, -EPERM, NULL); 507 return; 508 } 509 510 if (nlh->nlmsg_type == NFNL_MSG_BATCH_BEGIN) 511 nfnetlink_rcv_skb_batch(skb, nlh); 512 else 513 netlink_rcv_skb(skb, nfnetlink_rcv_msg); 514 } 515 516 #ifdef CONFIG_MODULES 517 static int nfnetlink_bind(struct net *net, int group) 518 { 519 const struct nfnetlink_subsystem *ss; 520 int type; 521 522 if (group <= NFNLGRP_NONE || group > NFNLGRP_MAX) 523 return 0; 524 525 type = nfnl_group2type[group]; 526 527 rcu_read_lock(); 528 ss = nfnetlink_get_subsys(type << 8); 529 rcu_read_unlock(); 530 if (!ss) 531 request_module("nfnetlink-subsys-%d", type); 532 return 0; 533 } 534 #endif 535 536 static int __net_init nfnetlink_net_init(struct net *net) 537 { 538 struct sock *nfnl; 539 struct netlink_kernel_cfg cfg = { 540 .groups = NFNLGRP_MAX, 541 .input = nfnetlink_rcv, 542 #ifdef CONFIG_MODULES 543 .bind = nfnetlink_bind, 544 #endif 545 }; 546 547 nfnl = netlink_kernel_create(net, NETLINK_NETFILTER, &cfg); 548 if (!nfnl) 549 return -ENOMEM; 550 net->nfnl_stash = nfnl; 551 rcu_assign_pointer(net->nfnl, nfnl); 552 return 0; 553 } 554 555 static void __net_exit nfnetlink_net_exit_batch(struct list_head *net_exit_list) 556 { 557 struct net *net; 558 559 list_for_each_entry(net, net_exit_list, exit_list) 560 RCU_INIT_POINTER(net->nfnl, NULL); 561 synchronize_net(); 562 list_for_each_entry(net, net_exit_list, exit_list) 563 netlink_kernel_release(net->nfnl_stash); 564 } 565 566 static struct pernet_operations nfnetlink_net_ops = { 567 .init = nfnetlink_net_init, 568 .exit_batch = nfnetlink_net_exit_batch, 569 }; 570 571 static int __init nfnetlink_init(void) 572 { 573 int i; 574 575 for (i = NFNLGRP_NONE + 1; i <= NFNLGRP_MAX; i++) 576 BUG_ON(nfnl_group2type[i] == NFNL_SUBSYS_NONE); 577 578 for (i=0; i<NFNL_SUBSYS_COUNT; i++) 579 mutex_init(&table[i].mutex); 580 581 return register_pernet_subsys(&nfnetlink_net_ops); 582 } 583 584 static void __exit nfnetlink_exit(void) 585 { 586 unregister_pernet_subsys(&nfnetlink_net_ops); 587 } 588 module_init(nfnetlink_init); 589 module_exit(nfnetlink_exit); 590