1 /* 2 * x_tables core - Backend for {ip,ip6,arp}_tables 3 * 4 * Copyright (C) 2006-2006 Harald Welte <laforge@netfilter.org> 5 * Copyright (C) 2006-2012 Patrick McHardy <kaber@trash.net> 6 * 7 * Based on existing ip_tables code which is 8 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling 9 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org> 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License version 2 as 13 * published by the Free Software Foundation. 14 * 15 */ 16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 17 #include <linux/kernel.h> 18 #include <linux/module.h> 19 #include <linux/socket.h> 20 #include <linux/net.h> 21 #include <linux/proc_fs.h> 22 #include <linux/seq_file.h> 23 #include <linux/string.h> 24 #include <linux/vmalloc.h> 25 #include <linux/mutex.h> 26 #include <linux/mm.h> 27 #include <linux/slab.h> 28 #include <linux/audit.h> 29 #include <linux/user_namespace.h> 30 #include <net/net_namespace.h> 31 32 #include <linux/netfilter/x_tables.h> 33 #include <linux/netfilter_arp.h> 34 #include <linux/netfilter_ipv4/ip_tables.h> 35 #include <linux/netfilter_ipv6/ip6_tables.h> 36 #include <linux/netfilter_arp/arp_tables.h> 37 38 MODULE_LICENSE("GPL"); 39 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); 40 MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module"); 41 42 #define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1)) 43 44 struct compat_delta { 45 unsigned int offset; /* offset in kernel */ 46 int delta; /* delta in 32bit user land */ 47 }; 48 49 struct xt_af { 50 struct mutex mutex; 51 struct list_head match; 52 struct list_head target; 53 #ifdef CONFIG_COMPAT 54 struct mutex compat_mutex; 55 struct compat_delta *compat_tab; 56 unsigned int number; /* number of slots in compat_tab[] */ 57 unsigned int cur; /* number of used slots in compat_tab[] */ 58 #endif 59 }; 60 61 static struct xt_af *xt; 62 63 static const char *const xt_prefix[NFPROTO_NUMPROTO] = { 64 [NFPROTO_UNSPEC] = "x", 65 [NFPROTO_IPV4] = "ip", 66 [NFPROTO_ARP] = "arp", 67 [NFPROTO_BRIDGE] = "eb", 68 [NFPROTO_IPV6] = "ip6", 69 }; 70 71 /* Registration hooks for targets. */ 72 int xt_register_target(struct xt_target *target) 73 { 74 u_int8_t af = target->family; 75 76 mutex_lock(&xt[af].mutex); 77 list_add(&target->list, &xt[af].target); 78 mutex_unlock(&xt[af].mutex); 79 return 0; 80 } 81 EXPORT_SYMBOL(xt_register_target); 82 83 void 84 xt_unregister_target(struct xt_target *target) 85 { 86 u_int8_t af = target->family; 87 88 mutex_lock(&xt[af].mutex); 89 list_del(&target->list); 90 mutex_unlock(&xt[af].mutex); 91 } 92 EXPORT_SYMBOL(xt_unregister_target); 93 94 int 95 xt_register_targets(struct xt_target *target, unsigned int n) 96 { 97 unsigned int i; 98 int err = 0; 99 100 for (i = 0; i < n; i++) { 101 err = xt_register_target(&target[i]); 102 if (err) 103 goto err; 104 } 105 return err; 106 107 err: 108 if (i > 0) 109 xt_unregister_targets(target, i); 110 return err; 111 } 112 EXPORT_SYMBOL(xt_register_targets); 113 114 void 115 xt_unregister_targets(struct xt_target *target, unsigned int n) 116 { 117 while (n-- > 0) 118 xt_unregister_target(&target[n]); 119 } 120 EXPORT_SYMBOL(xt_unregister_targets); 121 122 int xt_register_match(struct xt_match *match) 123 { 124 u_int8_t af = match->family; 125 126 mutex_lock(&xt[af].mutex); 127 list_add(&match->list, &xt[af].match); 128 mutex_unlock(&xt[af].mutex); 129 return 0; 130 } 131 EXPORT_SYMBOL(xt_register_match); 132 133 void 134 xt_unregister_match(struct xt_match *match) 135 { 136 u_int8_t af = match->family; 137 138 mutex_lock(&xt[af].mutex); 139 list_del(&match->list); 140 mutex_unlock(&xt[af].mutex); 141 } 142 EXPORT_SYMBOL(xt_unregister_match); 143 144 int 145 xt_register_matches(struct xt_match *match, unsigned int n) 146 { 147 unsigned int i; 148 int err = 0; 149 150 for (i = 0; i < n; i++) { 151 err = xt_register_match(&match[i]); 152 if (err) 153 goto err; 154 } 155 return err; 156 157 err: 158 if (i > 0) 159 xt_unregister_matches(match, i); 160 return err; 161 } 162 EXPORT_SYMBOL(xt_register_matches); 163 164 void 165 xt_unregister_matches(struct xt_match *match, unsigned int n) 166 { 167 while (n-- > 0) 168 xt_unregister_match(&match[n]); 169 } 170 EXPORT_SYMBOL(xt_unregister_matches); 171 172 173 /* 174 * These are weird, but module loading must not be done with mutex 175 * held (since they will register), and we have to have a single 176 * function to use. 177 */ 178 179 /* Find match, grabs ref. Returns ERR_PTR() on error. */ 180 struct xt_match *xt_find_match(u8 af, const char *name, u8 revision) 181 { 182 struct xt_match *m; 183 int err = -ENOENT; 184 185 mutex_lock(&xt[af].mutex); 186 list_for_each_entry(m, &xt[af].match, list) { 187 if (strcmp(m->name, name) == 0) { 188 if (m->revision == revision) { 189 if (try_module_get(m->me)) { 190 mutex_unlock(&xt[af].mutex); 191 return m; 192 } 193 } else 194 err = -EPROTOTYPE; /* Found something. */ 195 } 196 } 197 mutex_unlock(&xt[af].mutex); 198 199 if (af != NFPROTO_UNSPEC) 200 /* Try searching again in the family-independent list */ 201 return xt_find_match(NFPROTO_UNSPEC, name, revision); 202 203 return ERR_PTR(err); 204 } 205 EXPORT_SYMBOL(xt_find_match); 206 207 struct xt_match * 208 xt_request_find_match(uint8_t nfproto, const char *name, uint8_t revision) 209 { 210 struct xt_match *match; 211 212 match = xt_find_match(nfproto, name, revision); 213 if (IS_ERR(match)) { 214 request_module("%st_%s", xt_prefix[nfproto], name); 215 match = xt_find_match(nfproto, name, revision); 216 } 217 218 return match; 219 } 220 EXPORT_SYMBOL_GPL(xt_request_find_match); 221 222 /* Find target, grabs ref. Returns ERR_PTR() on error. */ 223 struct xt_target *xt_find_target(u8 af, const char *name, u8 revision) 224 { 225 struct xt_target *t; 226 int err = -ENOENT; 227 228 mutex_lock(&xt[af].mutex); 229 list_for_each_entry(t, &xt[af].target, list) { 230 if (strcmp(t->name, name) == 0) { 231 if (t->revision == revision) { 232 if (try_module_get(t->me)) { 233 mutex_unlock(&xt[af].mutex); 234 return t; 235 } 236 } else 237 err = -EPROTOTYPE; /* Found something. */ 238 } 239 } 240 mutex_unlock(&xt[af].mutex); 241 242 if (af != NFPROTO_UNSPEC) 243 /* Try searching again in the family-independent list */ 244 return xt_find_target(NFPROTO_UNSPEC, name, revision); 245 246 return ERR_PTR(err); 247 } 248 EXPORT_SYMBOL(xt_find_target); 249 250 struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision) 251 { 252 struct xt_target *target; 253 254 target = xt_find_target(af, name, revision); 255 if (IS_ERR(target)) { 256 request_module("%st_%s", xt_prefix[af], name); 257 target = xt_find_target(af, name, revision); 258 } 259 260 return target; 261 } 262 EXPORT_SYMBOL_GPL(xt_request_find_target); 263 264 static int match_revfn(u8 af, const char *name, u8 revision, int *bestp) 265 { 266 const struct xt_match *m; 267 int have_rev = 0; 268 269 list_for_each_entry(m, &xt[af].match, list) { 270 if (strcmp(m->name, name) == 0) { 271 if (m->revision > *bestp) 272 *bestp = m->revision; 273 if (m->revision == revision) 274 have_rev = 1; 275 } 276 } 277 278 if (af != NFPROTO_UNSPEC && !have_rev) 279 return match_revfn(NFPROTO_UNSPEC, name, revision, bestp); 280 281 return have_rev; 282 } 283 284 static int target_revfn(u8 af, const char *name, u8 revision, int *bestp) 285 { 286 const struct xt_target *t; 287 int have_rev = 0; 288 289 list_for_each_entry(t, &xt[af].target, list) { 290 if (strcmp(t->name, name) == 0) { 291 if (t->revision > *bestp) 292 *bestp = t->revision; 293 if (t->revision == revision) 294 have_rev = 1; 295 } 296 } 297 298 if (af != NFPROTO_UNSPEC && !have_rev) 299 return target_revfn(NFPROTO_UNSPEC, name, revision, bestp); 300 301 return have_rev; 302 } 303 304 /* Returns true or false (if no such extension at all) */ 305 int xt_find_revision(u8 af, const char *name, u8 revision, int target, 306 int *err) 307 { 308 int have_rev, best = -1; 309 310 mutex_lock(&xt[af].mutex); 311 if (target == 1) 312 have_rev = target_revfn(af, name, revision, &best); 313 else 314 have_rev = match_revfn(af, name, revision, &best); 315 mutex_unlock(&xt[af].mutex); 316 317 /* Nothing at all? Return 0 to try loading module. */ 318 if (best == -1) { 319 *err = -ENOENT; 320 return 0; 321 } 322 323 *err = best; 324 if (!have_rev) 325 *err = -EPROTONOSUPPORT; 326 return 1; 327 } 328 EXPORT_SYMBOL_GPL(xt_find_revision); 329 330 static char * 331 textify_hooks(char *buf, size_t size, unsigned int mask, uint8_t nfproto) 332 { 333 static const char *const inetbr_names[] = { 334 "PREROUTING", "INPUT", "FORWARD", 335 "OUTPUT", "POSTROUTING", "BROUTING", 336 }; 337 static const char *const arp_names[] = { 338 "INPUT", "FORWARD", "OUTPUT", 339 }; 340 const char *const *names; 341 unsigned int i, max; 342 char *p = buf; 343 bool np = false; 344 int res; 345 346 names = (nfproto == NFPROTO_ARP) ? arp_names : inetbr_names; 347 max = (nfproto == NFPROTO_ARP) ? ARRAY_SIZE(arp_names) : 348 ARRAY_SIZE(inetbr_names); 349 *p = '\0'; 350 for (i = 0; i < max; ++i) { 351 if (!(mask & (1 << i))) 352 continue; 353 res = snprintf(p, size, "%s%s", np ? "/" : "", names[i]); 354 if (res > 0) { 355 size -= res; 356 p += res; 357 } 358 np = true; 359 } 360 361 return buf; 362 } 363 364 int xt_check_match(struct xt_mtchk_param *par, 365 unsigned int size, u_int8_t proto, bool inv_proto) 366 { 367 int ret; 368 369 if (XT_ALIGN(par->match->matchsize) != size && 370 par->match->matchsize != -1) { 371 /* 372 * ebt_among is exempt from centralized matchsize checking 373 * because it uses a dynamic-size data set. 374 */ 375 pr_err("%s_tables: %s.%u match: invalid size " 376 "%u (kernel) != (user) %u\n", 377 xt_prefix[par->family], par->match->name, 378 par->match->revision, 379 XT_ALIGN(par->match->matchsize), size); 380 return -EINVAL; 381 } 382 if (par->match->table != NULL && 383 strcmp(par->match->table, par->table) != 0) { 384 pr_err("%s_tables: %s match: only valid in %s table, not %s\n", 385 xt_prefix[par->family], par->match->name, 386 par->match->table, par->table); 387 return -EINVAL; 388 } 389 if (par->match->hooks && (par->hook_mask & ~par->match->hooks) != 0) { 390 char used[64], allow[64]; 391 392 pr_err("%s_tables: %s match: used from hooks %s, but only " 393 "valid from %s\n", 394 xt_prefix[par->family], par->match->name, 395 textify_hooks(used, sizeof(used), par->hook_mask, 396 par->family), 397 textify_hooks(allow, sizeof(allow), par->match->hooks, 398 par->family)); 399 return -EINVAL; 400 } 401 if (par->match->proto && (par->match->proto != proto || inv_proto)) { 402 pr_err("%s_tables: %s match: only valid for protocol %u\n", 403 xt_prefix[par->family], par->match->name, 404 par->match->proto); 405 return -EINVAL; 406 } 407 if (par->match->checkentry != NULL) { 408 ret = par->match->checkentry(par); 409 if (ret < 0) 410 return ret; 411 else if (ret > 0) 412 /* Flag up potential errors. */ 413 return -EIO; 414 } 415 return 0; 416 } 417 EXPORT_SYMBOL_GPL(xt_check_match); 418 419 /** xt_check_entry_match - check that matches end before start of target 420 * 421 * @match: beginning of xt_entry_match 422 * @target: beginning of this rules target (alleged end of matches) 423 * @alignment: alignment requirement of match structures 424 * 425 * Validates that all matches add up to the beginning of the target, 426 * and that each match covers at least the base structure size. 427 * 428 * Return: 0 on success, negative errno on failure. 429 */ 430 static int xt_check_entry_match(const char *match, const char *target, 431 const size_t alignment) 432 { 433 const struct xt_entry_match *pos; 434 int length = target - match; 435 436 if (length == 0) /* no matches */ 437 return 0; 438 439 pos = (struct xt_entry_match *)match; 440 do { 441 if ((unsigned long)pos % alignment) 442 return -EINVAL; 443 444 if (length < (int)sizeof(struct xt_entry_match)) 445 return -EINVAL; 446 447 if (pos->u.match_size < sizeof(struct xt_entry_match)) 448 return -EINVAL; 449 450 if (pos->u.match_size > length) 451 return -EINVAL; 452 453 length -= pos->u.match_size; 454 pos = ((void *)((char *)(pos) + (pos)->u.match_size)); 455 } while (length > 0); 456 457 return 0; 458 } 459 460 #ifdef CONFIG_COMPAT 461 int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta) 462 { 463 struct xt_af *xp = &xt[af]; 464 465 if (!xp->compat_tab) { 466 if (!xp->number) 467 return -EINVAL; 468 xp->compat_tab = vmalloc(sizeof(struct compat_delta) * xp->number); 469 if (!xp->compat_tab) 470 return -ENOMEM; 471 xp->cur = 0; 472 } 473 474 if (xp->cur >= xp->number) 475 return -EINVAL; 476 477 if (xp->cur) 478 delta += xp->compat_tab[xp->cur - 1].delta; 479 xp->compat_tab[xp->cur].offset = offset; 480 xp->compat_tab[xp->cur].delta = delta; 481 xp->cur++; 482 return 0; 483 } 484 EXPORT_SYMBOL_GPL(xt_compat_add_offset); 485 486 void xt_compat_flush_offsets(u_int8_t af) 487 { 488 if (xt[af].compat_tab) { 489 vfree(xt[af].compat_tab); 490 xt[af].compat_tab = NULL; 491 xt[af].number = 0; 492 xt[af].cur = 0; 493 } 494 } 495 EXPORT_SYMBOL_GPL(xt_compat_flush_offsets); 496 497 int xt_compat_calc_jump(u_int8_t af, unsigned int offset) 498 { 499 struct compat_delta *tmp = xt[af].compat_tab; 500 int mid, left = 0, right = xt[af].cur - 1; 501 502 while (left <= right) { 503 mid = (left + right) >> 1; 504 if (offset > tmp[mid].offset) 505 left = mid + 1; 506 else if (offset < tmp[mid].offset) 507 right = mid - 1; 508 else 509 return mid ? tmp[mid - 1].delta : 0; 510 } 511 return left ? tmp[left - 1].delta : 0; 512 } 513 EXPORT_SYMBOL_GPL(xt_compat_calc_jump); 514 515 void xt_compat_init_offsets(u_int8_t af, unsigned int number) 516 { 517 xt[af].number = number; 518 xt[af].cur = 0; 519 } 520 EXPORT_SYMBOL(xt_compat_init_offsets); 521 522 int xt_compat_match_offset(const struct xt_match *match) 523 { 524 u_int16_t csize = match->compatsize ? : match->matchsize; 525 return XT_ALIGN(match->matchsize) - COMPAT_XT_ALIGN(csize); 526 } 527 EXPORT_SYMBOL_GPL(xt_compat_match_offset); 528 529 void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr, 530 unsigned int *size) 531 { 532 const struct xt_match *match = m->u.kernel.match; 533 struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m; 534 int pad, off = xt_compat_match_offset(match); 535 u_int16_t msize = cm->u.user.match_size; 536 char name[sizeof(m->u.user.name)]; 537 538 m = *dstptr; 539 memcpy(m, cm, sizeof(*cm)); 540 if (match->compat_from_user) 541 match->compat_from_user(m->data, cm->data); 542 else 543 memcpy(m->data, cm->data, msize - sizeof(*cm)); 544 pad = XT_ALIGN(match->matchsize) - match->matchsize; 545 if (pad > 0) 546 memset(m->data + match->matchsize, 0, pad); 547 548 msize += off; 549 m->u.user.match_size = msize; 550 strlcpy(name, match->name, sizeof(name)); 551 module_put(match->me); 552 strncpy(m->u.user.name, name, sizeof(m->u.user.name)); 553 554 *size += off; 555 *dstptr += msize; 556 } 557 EXPORT_SYMBOL_GPL(xt_compat_match_from_user); 558 559 int xt_compat_match_to_user(const struct xt_entry_match *m, 560 void __user **dstptr, unsigned int *size) 561 { 562 const struct xt_match *match = m->u.kernel.match; 563 struct compat_xt_entry_match __user *cm = *dstptr; 564 int off = xt_compat_match_offset(match); 565 u_int16_t msize = m->u.user.match_size - off; 566 567 if (copy_to_user(cm, m, sizeof(*cm)) || 568 put_user(msize, &cm->u.user.match_size) || 569 copy_to_user(cm->u.user.name, m->u.kernel.match->name, 570 strlen(m->u.kernel.match->name) + 1)) 571 return -EFAULT; 572 573 if (match->compat_to_user) { 574 if (match->compat_to_user((void __user *)cm->data, m->data)) 575 return -EFAULT; 576 } else { 577 if (copy_to_user(cm->data, m->data, msize - sizeof(*cm))) 578 return -EFAULT; 579 } 580 581 *size -= off; 582 *dstptr += msize; 583 return 0; 584 } 585 EXPORT_SYMBOL_GPL(xt_compat_match_to_user); 586 587 /* non-compat version may have padding after verdict */ 588 struct compat_xt_standard_target { 589 struct compat_xt_entry_target t; 590 compat_uint_t verdict; 591 }; 592 593 int xt_compat_check_entry_offsets(const void *base, const char *elems, 594 unsigned int target_offset, 595 unsigned int next_offset) 596 { 597 long size_of_base_struct = elems - (const char *)base; 598 const struct compat_xt_entry_target *t; 599 const char *e = base; 600 601 if (target_offset < size_of_base_struct) 602 return -EINVAL; 603 604 if (target_offset + sizeof(*t) > next_offset) 605 return -EINVAL; 606 607 t = (void *)(e + target_offset); 608 if (t->u.target_size < sizeof(*t)) 609 return -EINVAL; 610 611 if (target_offset + t->u.target_size > next_offset) 612 return -EINVAL; 613 614 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 && 615 target_offset + sizeof(struct compat_xt_standard_target) != next_offset) 616 return -EINVAL; 617 618 /* compat_xt_entry match has less strict aligment requirements, 619 * otherwise they are identical. In case of padding differences 620 * we need to add compat version of xt_check_entry_match. 621 */ 622 BUILD_BUG_ON(sizeof(struct compat_xt_entry_match) != sizeof(struct xt_entry_match)); 623 624 return xt_check_entry_match(elems, base + target_offset, 625 __alignof__(struct compat_xt_entry_match)); 626 } 627 EXPORT_SYMBOL(xt_compat_check_entry_offsets); 628 #endif /* CONFIG_COMPAT */ 629 630 /** 631 * xt_check_entry_offsets - validate arp/ip/ip6t_entry 632 * 633 * @base: pointer to arp/ip/ip6t_entry 634 * @elems: pointer to first xt_entry_match, i.e. ip(6)t_entry->elems 635 * @target_offset: the arp/ip/ip6_t->target_offset 636 * @next_offset: the arp/ip/ip6_t->next_offset 637 * 638 * validates that target_offset and next_offset are sane and that all 639 * match sizes (if any) align with the target offset. 640 * 641 * This function does not validate the targets or matches themselves, it 642 * only tests that all the offsets and sizes are correct, that all 643 * match structures are aligned, and that the last structure ends where 644 * the target structure begins. 645 * 646 * Also see xt_compat_check_entry_offsets for CONFIG_COMPAT version. 647 * 648 * The arp/ip/ip6t_entry structure @base must have passed following tests: 649 * - it must point to a valid memory location 650 * - base to base + next_offset must be accessible, i.e. not exceed allocated 651 * length. 652 * 653 * A well-formed entry looks like this: 654 * 655 * ip(6)t_entry match [mtdata] match [mtdata] target [tgdata] ip(6)t_entry 656 * e->elems[]-----' | | 657 * matchsize | | 658 * matchsize | | 659 * | | 660 * target_offset---------------------------------' | 661 * next_offset---------------------------------------------------' 662 * 663 * elems[]: flexible array member at end of ip(6)/arpt_entry struct. 664 * This is where matches (if any) and the target reside. 665 * target_offset: beginning of target. 666 * next_offset: start of the next rule; also: size of this rule. 667 * Since targets have a minimum size, target_offset + minlen <= next_offset. 668 * 669 * Every match stores its size, sum of sizes must not exceed target_offset. 670 * 671 * Return: 0 on success, negative errno on failure. 672 */ 673 int xt_check_entry_offsets(const void *base, 674 const char *elems, 675 unsigned int target_offset, 676 unsigned int next_offset) 677 { 678 long size_of_base_struct = elems - (const char *)base; 679 const struct xt_entry_target *t; 680 const char *e = base; 681 682 /* target start is within the ip/ip6/arpt_entry struct */ 683 if (target_offset < size_of_base_struct) 684 return -EINVAL; 685 686 if (target_offset + sizeof(*t) > next_offset) 687 return -EINVAL; 688 689 t = (void *)(e + target_offset); 690 if (t->u.target_size < sizeof(*t)) 691 return -EINVAL; 692 693 if (target_offset + t->u.target_size > next_offset) 694 return -EINVAL; 695 696 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 && 697 target_offset + sizeof(struct xt_standard_target) != next_offset) 698 return -EINVAL; 699 700 return xt_check_entry_match(elems, base + target_offset, 701 __alignof__(struct xt_entry_match)); 702 } 703 EXPORT_SYMBOL(xt_check_entry_offsets); 704 705 int xt_check_target(struct xt_tgchk_param *par, 706 unsigned int size, u_int8_t proto, bool inv_proto) 707 { 708 int ret; 709 710 if (XT_ALIGN(par->target->targetsize) != size) { 711 pr_err("%s_tables: %s.%u target: invalid size " 712 "%u (kernel) != (user) %u\n", 713 xt_prefix[par->family], par->target->name, 714 par->target->revision, 715 XT_ALIGN(par->target->targetsize), size); 716 return -EINVAL; 717 } 718 if (par->target->table != NULL && 719 strcmp(par->target->table, par->table) != 0) { 720 pr_err("%s_tables: %s target: only valid in %s table, not %s\n", 721 xt_prefix[par->family], par->target->name, 722 par->target->table, par->table); 723 return -EINVAL; 724 } 725 if (par->target->hooks && (par->hook_mask & ~par->target->hooks) != 0) { 726 char used[64], allow[64]; 727 728 pr_err("%s_tables: %s target: used from hooks %s, but only " 729 "usable from %s\n", 730 xt_prefix[par->family], par->target->name, 731 textify_hooks(used, sizeof(used), par->hook_mask, 732 par->family), 733 textify_hooks(allow, sizeof(allow), par->target->hooks, 734 par->family)); 735 return -EINVAL; 736 } 737 if (par->target->proto && (par->target->proto != proto || inv_proto)) { 738 pr_err("%s_tables: %s target: only valid for protocol %u\n", 739 xt_prefix[par->family], par->target->name, 740 par->target->proto); 741 return -EINVAL; 742 } 743 if (par->target->checkentry != NULL) { 744 ret = par->target->checkentry(par); 745 if (ret < 0) 746 return ret; 747 else if (ret > 0) 748 /* Flag up potential errors. */ 749 return -EIO; 750 } 751 return 0; 752 } 753 EXPORT_SYMBOL_GPL(xt_check_target); 754 755 /** 756 * xt_copy_counters_from_user - copy counters and metadata from userspace 757 * 758 * @user: src pointer to userspace memory 759 * @len: alleged size of userspace memory 760 * @info: where to store the xt_counters_info metadata 761 * @compat: true if we setsockopt call is done by 32bit task on 64bit kernel 762 * 763 * Copies counter meta data from @user and stores it in @info. 764 * 765 * vmallocs memory to hold the counters, then copies the counter data 766 * from @user to the new memory and returns a pointer to it. 767 * 768 * If @compat is true, @info gets converted automatically to the 64bit 769 * representation. 770 * 771 * The metadata associated with the counters is stored in @info. 772 * 773 * Return: returns pointer that caller has to test via IS_ERR(). 774 * If IS_ERR is false, caller has to vfree the pointer. 775 */ 776 void *xt_copy_counters_from_user(const void __user *user, unsigned int len, 777 struct xt_counters_info *info, bool compat) 778 { 779 void *mem; 780 u64 size; 781 782 #ifdef CONFIG_COMPAT 783 if (compat) { 784 /* structures only differ in size due to alignment */ 785 struct compat_xt_counters_info compat_tmp; 786 787 if (len <= sizeof(compat_tmp)) 788 return ERR_PTR(-EINVAL); 789 790 len -= sizeof(compat_tmp); 791 if (copy_from_user(&compat_tmp, user, sizeof(compat_tmp)) != 0) 792 return ERR_PTR(-EFAULT); 793 794 strlcpy(info->name, compat_tmp.name, sizeof(info->name)); 795 info->num_counters = compat_tmp.num_counters; 796 user += sizeof(compat_tmp); 797 } else 798 #endif 799 { 800 if (len <= sizeof(*info)) 801 return ERR_PTR(-EINVAL); 802 803 len -= sizeof(*info); 804 if (copy_from_user(info, user, sizeof(*info)) != 0) 805 return ERR_PTR(-EFAULT); 806 807 info->name[sizeof(info->name) - 1] = '\0'; 808 user += sizeof(*info); 809 } 810 811 size = sizeof(struct xt_counters); 812 size *= info->num_counters; 813 814 if (size != (u64)len) 815 return ERR_PTR(-EINVAL); 816 817 mem = vmalloc(len); 818 if (!mem) 819 return ERR_PTR(-ENOMEM); 820 821 if (copy_from_user(mem, user, len) == 0) 822 return mem; 823 824 vfree(mem); 825 return ERR_PTR(-EFAULT); 826 } 827 EXPORT_SYMBOL_GPL(xt_copy_counters_from_user); 828 829 #ifdef CONFIG_COMPAT 830 int xt_compat_target_offset(const struct xt_target *target) 831 { 832 u_int16_t csize = target->compatsize ? : target->targetsize; 833 return XT_ALIGN(target->targetsize) - COMPAT_XT_ALIGN(csize); 834 } 835 EXPORT_SYMBOL_GPL(xt_compat_target_offset); 836 837 void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr, 838 unsigned int *size) 839 { 840 const struct xt_target *target = t->u.kernel.target; 841 struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t; 842 int pad, off = xt_compat_target_offset(target); 843 u_int16_t tsize = ct->u.user.target_size; 844 char name[sizeof(t->u.user.name)]; 845 846 t = *dstptr; 847 memcpy(t, ct, sizeof(*ct)); 848 if (target->compat_from_user) 849 target->compat_from_user(t->data, ct->data); 850 else 851 memcpy(t->data, ct->data, tsize - sizeof(*ct)); 852 pad = XT_ALIGN(target->targetsize) - target->targetsize; 853 if (pad > 0) 854 memset(t->data + target->targetsize, 0, pad); 855 856 tsize += off; 857 t->u.user.target_size = tsize; 858 strlcpy(name, target->name, sizeof(name)); 859 module_put(target->me); 860 strncpy(t->u.user.name, name, sizeof(t->u.user.name)); 861 862 *size += off; 863 *dstptr += tsize; 864 } 865 EXPORT_SYMBOL_GPL(xt_compat_target_from_user); 866 867 int xt_compat_target_to_user(const struct xt_entry_target *t, 868 void __user **dstptr, unsigned int *size) 869 { 870 const struct xt_target *target = t->u.kernel.target; 871 struct compat_xt_entry_target __user *ct = *dstptr; 872 int off = xt_compat_target_offset(target); 873 u_int16_t tsize = t->u.user.target_size - off; 874 875 if (copy_to_user(ct, t, sizeof(*ct)) || 876 put_user(tsize, &ct->u.user.target_size) || 877 copy_to_user(ct->u.user.name, t->u.kernel.target->name, 878 strlen(t->u.kernel.target->name) + 1)) 879 return -EFAULT; 880 881 if (target->compat_to_user) { 882 if (target->compat_to_user((void __user *)ct->data, t->data)) 883 return -EFAULT; 884 } else { 885 if (copy_to_user(ct->data, t->data, tsize - sizeof(*ct))) 886 return -EFAULT; 887 } 888 889 *size -= off; 890 *dstptr += tsize; 891 return 0; 892 } 893 EXPORT_SYMBOL_GPL(xt_compat_target_to_user); 894 #endif 895 896 struct xt_table_info *xt_alloc_table_info(unsigned int size) 897 { 898 struct xt_table_info *info = NULL; 899 size_t sz = sizeof(*info) + size; 900 901 if (sz < sizeof(*info)) 902 return NULL; 903 904 /* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */ 905 if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > totalram_pages) 906 return NULL; 907 908 if (sz <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) 909 info = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); 910 if (!info) { 911 info = vmalloc(sz); 912 if (!info) 913 return NULL; 914 } 915 memset(info, 0, sizeof(*info)); 916 info->size = size; 917 return info; 918 } 919 EXPORT_SYMBOL(xt_alloc_table_info); 920 921 void xt_free_table_info(struct xt_table_info *info) 922 { 923 int cpu; 924 925 if (info->jumpstack != NULL) { 926 for_each_possible_cpu(cpu) 927 kvfree(info->jumpstack[cpu]); 928 kvfree(info->jumpstack); 929 } 930 931 kvfree(info); 932 } 933 EXPORT_SYMBOL(xt_free_table_info); 934 935 /* Find table by name, grabs mutex & ref. Returns ERR_PTR() on error. */ 936 struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af, 937 const char *name) 938 { 939 struct xt_table *t, *found = NULL; 940 941 mutex_lock(&xt[af].mutex); 942 list_for_each_entry(t, &net->xt.tables[af], list) 943 if (strcmp(t->name, name) == 0 && try_module_get(t->me)) 944 return t; 945 946 if (net == &init_net) 947 goto out; 948 949 /* Table doesn't exist in this netns, re-try init */ 950 list_for_each_entry(t, &init_net.xt.tables[af], list) { 951 if (strcmp(t->name, name)) 952 continue; 953 if (!try_module_get(t->me)) 954 return NULL; 955 956 mutex_unlock(&xt[af].mutex); 957 if (t->table_init(net) != 0) { 958 module_put(t->me); 959 return NULL; 960 } 961 962 found = t; 963 964 mutex_lock(&xt[af].mutex); 965 break; 966 } 967 968 if (!found) 969 goto out; 970 971 /* and once again: */ 972 list_for_each_entry(t, &net->xt.tables[af], list) 973 if (strcmp(t->name, name) == 0) 974 return t; 975 976 module_put(found->me); 977 out: 978 mutex_unlock(&xt[af].mutex); 979 return NULL; 980 } 981 EXPORT_SYMBOL_GPL(xt_find_table_lock); 982 983 void xt_table_unlock(struct xt_table *table) 984 { 985 mutex_unlock(&xt[table->af].mutex); 986 } 987 EXPORT_SYMBOL_GPL(xt_table_unlock); 988 989 #ifdef CONFIG_COMPAT 990 void xt_compat_lock(u_int8_t af) 991 { 992 mutex_lock(&xt[af].compat_mutex); 993 } 994 EXPORT_SYMBOL_GPL(xt_compat_lock); 995 996 void xt_compat_unlock(u_int8_t af) 997 { 998 mutex_unlock(&xt[af].compat_mutex); 999 } 1000 EXPORT_SYMBOL_GPL(xt_compat_unlock); 1001 #endif 1002 1003 DEFINE_PER_CPU(seqcount_t, xt_recseq); 1004 EXPORT_PER_CPU_SYMBOL_GPL(xt_recseq); 1005 1006 struct static_key xt_tee_enabled __read_mostly; 1007 EXPORT_SYMBOL_GPL(xt_tee_enabled); 1008 1009 static int xt_jumpstack_alloc(struct xt_table_info *i) 1010 { 1011 unsigned int size; 1012 int cpu; 1013 1014 size = sizeof(void **) * nr_cpu_ids; 1015 if (size > PAGE_SIZE) 1016 i->jumpstack = vzalloc(size); 1017 else 1018 i->jumpstack = kzalloc(size, GFP_KERNEL); 1019 if (i->jumpstack == NULL) 1020 return -ENOMEM; 1021 1022 /* ruleset without jumps -- no stack needed */ 1023 if (i->stacksize == 0) 1024 return 0; 1025 1026 /* Jumpstack needs to be able to record two full callchains, one 1027 * from the first rule set traversal, plus one table reentrancy 1028 * via -j TEE without clobbering the callchain that brought us to 1029 * TEE target. 1030 * 1031 * This is done by allocating two jumpstacks per cpu, on reentry 1032 * the upper half of the stack is used. 1033 * 1034 * see the jumpstack setup in ipt_do_table() for more details. 1035 */ 1036 size = sizeof(void *) * i->stacksize * 2u; 1037 for_each_possible_cpu(cpu) { 1038 if (size > PAGE_SIZE) 1039 i->jumpstack[cpu] = vmalloc_node(size, 1040 cpu_to_node(cpu)); 1041 else 1042 i->jumpstack[cpu] = kmalloc_node(size, 1043 GFP_KERNEL, cpu_to_node(cpu)); 1044 if (i->jumpstack[cpu] == NULL) 1045 /* 1046 * Freeing will be done later on by the callers. The 1047 * chain is: xt_replace_table -> __do_replace -> 1048 * do_replace -> xt_free_table_info. 1049 */ 1050 return -ENOMEM; 1051 } 1052 1053 return 0; 1054 } 1055 1056 struct xt_table_info * 1057 xt_replace_table(struct xt_table *table, 1058 unsigned int num_counters, 1059 struct xt_table_info *newinfo, 1060 int *error) 1061 { 1062 struct xt_table_info *private; 1063 int ret; 1064 1065 ret = xt_jumpstack_alloc(newinfo); 1066 if (ret < 0) { 1067 *error = ret; 1068 return NULL; 1069 } 1070 1071 /* Do the substitution. */ 1072 local_bh_disable(); 1073 private = table->private; 1074 1075 /* Check inside lock: is the old number correct? */ 1076 if (num_counters != private->number) { 1077 pr_debug("num_counters != table->private->number (%u/%u)\n", 1078 num_counters, private->number); 1079 local_bh_enable(); 1080 *error = -EAGAIN; 1081 return NULL; 1082 } 1083 1084 newinfo->initial_entries = private->initial_entries; 1085 /* 1086 * Ensure contents of newinfo are visible before assigning to 1087 * private. 1088 */ 1089 smp_wmb(); 1090 table->private = newinfo; 1091 1092 /* 1093 * Even though table entries have now been swapped, other CPU's 1094 * may still be using the old entries. This is okay, because 1095 * resynchronization happens because of the locking done 1096 * during the get_counters() routine. 1097 */ 1098 local_bh_enable(); 1099 1100 #ifdef CONFIG_AUDIT 1101 if (audit_enabled) { 1102 struct audit_buffer *ab; 1103 1104 ab = audit_log_start(current->audit_context, GFP_KERNEL, 1105 AUDIT_NETFILTER_CFG); 1106 if (ab) { 1107 audit_log_format(ab, "table=%s family=%u entries=%u", 1108 table->name, table->af, 1109 private->number); 1110 audit_log_end(ab); 1111 } 1112 } 1113 #endif 1114 1115 return private; 1116 } 1117 EXPORT_SYMBOL_GPL(xt_replace_table); 1118 1119 struct xt_table *xt_register_table(struct net *net, 1120 const struct xt_table *input_table, 1121 struct xt_table_info *bootstrap, 1122 struct xt_table_info *newinfo) 1123 { 1124 int ret; 1125 struct xt_table_info *private; 1126 struct xt_table *t, *table; 1127 1128 /* Don't add one object to multiple lists. */ 1129 table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL); 1130 if (!table) { 1131 ret = -ENOMEM; 1132 goto out; 1133 } 1134 1135 mutex_lock(&xt[table->af].mutex); 1136 /* Don't autoload: we'd eat our tail... */ 1137 list_for_each_entry(t, &net->xt.tables[table->af], list) { 1138 if (strcmp(t->name, table->name) == 0) { 1139 ret = -EEXIST; 1140 goto unlock; 1141 } 1142 } 1143 1144 /* Simplifies replace_table code. */ 1145 table->private = bootstrap; 1146 1147 if (!xt_replace_table(table, 0, newinfo, &ret)) 1148 goto unlock; 1149 1150 private = table->private; 1151 pr_debug("table->private->number = %u\n", private->number); 1152 1153 /* save number of initial entries */ 1154 private->initial_entries = private->number; 1155 1156 list_add(&table->list, &net->xt.tables[table->af]); 1157 mutex_unlock(&xt[table->af].mutex); 1158 return table; 1159 1160 unlock: 1161 mutex_unlock(&xt[table->af].mutex); 1162 kfree(table); 1163 out: 1164 return ERR_PTR(ret); 1165 } 1166 EXPORT_SYMBOL_GPL(xt_register_table); 1167 1168 void *xt_unregister_table(struct xt_table *table) 1169 { 1170 struct xt_table_info *private; 1171 1172 mutex_lock(&xt[table->af].mutex); 1173 private = table->private; 1174 list_del(&table->list); 1175 mutex_unlock(&xt[table->af].mutex); 1176 kfree(table); 1177 1178 return private; 1179 } 1180 EXPORT_SYMBOL_GPL(xt_unregister_table); 1181 1182 #ifdef CONFIG_PROC_FS 1183 struct xt_names_priv { 1184 struct seq_net_private p; 1185 u_int8_t af; 1186 }; 1187 static void *xt_table_seq_start(struct seq_file *seq, loff_t *pos) 1188 { 1189 struct xt_names_priv *priv = seq->private; 1190 struct net *net = seq_file_net(seq); 1191 u_int8_t af = priv->af; 1192 1193 mutex_lock(&xt[af].mutex); 1194 return seq_list_start(&net->xt.tables[af], *pos); 1195 } 1196 1197 static void *xt_table_seq_next(struct seq_file *seq, void *v, loff_t *pos) 1198 { 1199 struct xt_names_priv *priv = seq->private; 1200 struct net *net = seq_file_net(seq); 1201 u_int8_t af = priv->af; 1202 1203 return seq_list_next(v, &net->xt.tables[af], pos); 1204 } 1205 1206 static void xt_table_seq_stop(struct seq_file *seq, void *v) 1207 { 1208 struct xt_names_priv *priv = seq->private; 1209 u_int8_t af = priv->af; 1210 1211 mutex_unlock(&xt[af].mutex); 1212 } 1213 1214 static int xt_table_seq_show(struct seq_file *seq, void *v) 1215 { 1216 struct xt_table *table = list_entry(v, struct xt_table, list); 1217 1218 if (*table->name) 1219 seq_printf(seq, "%s\n", table->name); 1220 return 0; 1221 } 1222 1223 static const struct seq_operations xt_table_seq_ops = { 1224 .start = xt_table_seq_start, 1225 .next = xt_table_seq_next, 1226 .stop = xt_table_seq_stop, 1227 .show = xt_table_seq_show, 1228 }; 1229 1230 static int xt_table_open(struct inode *inode, struct file *file) 1231 { 1232 int ret; 1233 struct xt_names_priv *priv; 1234 1235 ret = seq_open_net(inode, file, &xt_table_seq_ops, 1236 sizeof(struct xt_names_priv)); 1237 if (!ret) { 1238 priv = ((struct seq_file *)file->private_data)->private; 1239 priv->af = (unsigned long)PDE_DATA(inode); 1240 } 1241 return ret; 1242 } 1243 1244 static const struct file_operations xt_table_ops = { 1245 .owner = THIS_MODULE, 1246 .open = xt_table_open, 1247 .read = seq_read, 1248 .llseek = seq_lseek, 1249 .release = seq_release_net, 1250 }; 1251 1252 /* 1253 * Traverse state for ip{,6}_{tables,matches} for helping crossing 1254 * the multi-AF mutexes. 1255 */ 1256 struct nf_mttg_trav { 1257 struct list_head *head, *curr; 1258 uint8_t class, nfproto; 1259 }; 1260 1261 enum { 1262 MTTG_TRAV_INIT, 1263 MTTG_TRAV_NFP_UNSPEC, 1264 MTTG_TRAV_NFP_SPEC, 1265 MTTG_TRAV_DONE, 1266 }; 1267 1268 static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos, 1269 bool is_target) 1270 { 1271 static const uint8_t next_class[] = { 1272 [MTTG_TRAV_NFP_UNSPEC] = MTTG_TRAV_NFP_SPEC, 1273 [MTTG_TRAV_NFP_SPEC] = MTTG_TRAV_DONE, 1274 }; 1275 struct nf_mttg_trav *trav = seq->private; 1276 1277 switch (trav->class) { 1278 case MTTG_TRAV_INIT: 1279 trav->class = MTTG_TRAV_NFP_UNSPEC; 1280 mutex_lock(&xt[NFPROTO_UNSPEC].mutex); 1281 trav->head = trav->curr = is_target ? 1282 &xt[NFPROTO_UNSPEC].target : &xt[NFPROTO_UNSPEC].match; 1283 break; 1284 case MTTG_TRAV_NFP_UNSPEC: 1285 trav->curr = trav->curr->next; 1286 if (trav->curr != trav->head) 1287 break; 1288 mutex_unlock(&xt[NFPROTO_UNSPEC].mutex); 1289 mutex_lock(&xt[trav->nfproto].mutex); 1290 trav->head = trav->curr = is_target ? 1291 &xt[trav->nfproto].target : &xt[trav->nfproto].match; 1292 trav->class = next_class[trav->class]; 1293 break; 1294 case MTTG_TRAV_NFP_SPEC: 1295 trav->curr = trav->curr->next; 1296 if (trav->curr != trav->head) 1297 break; 1298 /* fallthru, _stop will unlock */ 1299 default: 1300 return NULL; 1301 } 1302 1303 if (ppos != NULL) 1304 ++*ppos; 1305 return trav; 1306 } 1307 1308 static void *xt_mttg_seq_start(struct seq_file *seq, loff_t *pos, 1309 bool is_target) 1310 { 1311 struct nf_mttg_trav *trav = seq->private; 1312 unsigned int j; 1313 1314 trav->class = MTTG_TRAV_INIT; 1315 for (j = 0; j < *pos; ++j) 1316 if (xt_mttg_seq_next(seq, NULL, NULL, is_target) == NULL) 1317 return NULL; 1318 return trav; 1319 } 1320 1321 static void xt_mttg_seq_stop(struct seq_file *seq, void *v) 1322 { 1323 struct nf_mttg_trav *trav = seq->private; 1324 1325 switch (trav->class) { 1326 case MTTG_TRAV_NFP_UNSPEC: 1327 mutex_unlock(&xt[NFPROTO_UNSPEC].mutex); 1328 break; 1329 case MTTG_TRAV_NFP_SPEC: 1330 mutex_unlock(&xt[trav->nfproto].mutex); 1331 break; 1332 } 1333 } 1334 1335 static void *xt_match_seq_start(struct seq_file *seq, loff_t *pos) 1336 { 1337 return xt_mttg_seq_start(seq, pos, false); 1338 } 1339 1340 static void *xt_match_seq_next(struct seq_file *seq, void *v, loff_t *ppos) 1341 { 1342 return xt_mttg_seq_next(seq, v, ppos, false); 1343 } 1344 1345 static int xt_match_seq_show(struct seq_file *seq, void *v) 1346 { 1347 const struct nf_mttg_trav *trav = seq->private; 1348 const struct xt_match *match; 1349 1350 switch (trav->class) { 1351 case MTTG_TRAV_NFP_UNSPEC: 1352 case MTTG_TRAV_NFP_SPEC: 1353 if (trav->curr == trav->head) 1354 return 0; 1355 match = list_entry(trav->curr, struct xt_match, list); 1356 if (*match->name) 1357 seq_printf(seq, "%s\n", match->name); 1358 } 1359 return 0; 1360 } 1361 1362 static const struct seq_operations xt_match_seq_ops = { 1363 .start = xt_match_seq_start, 1364 .next = xt_match_seq_next, 1365 .stop = xt_mttg_seq_stop, 1366 .show = xt_match_seq_show, 1367 }; 1368 1369 static int xt_match_open(struct inode *inode, struct file *file) 1370 { 1371 struct nf_mttg_trav *trav; 1372 trav = __seq_open_private(file, &xt_match_seq_ops, sizeof(*trav)); 1373 if (!trav) 1374 return -ENOMEM; 1375 1376 trav->nfproto = (unsigned long)PDE_DATA(inode); 1377 return 0; 1378 } 1379 1380 static const struct file_operations xt_match_ops = { 1381 .owner = THIS_MODULE, 1382 .open = xt_match_open, 1383 .read = seq_read, 1384 .llseek = seq_lseek, 1385 .release = seq_release_private, 1386 }; 1387 1388 static void *xt_target_seq_start(struct seq_file *seq, loff_t *pos) 1389 { 1390 return xt_mttg_seq_start(seq, pos, true); 1391 } 1392 1393 static void *xt_target_seq_next(struct seq_file *seq, void *v, loff_t *ppos) 1394 { 1395 return xt_mttg_seq_next(seq, v, ppos, true); 1396 } 1397 1398 static int xt_target_seq_show(struct seq_file *seq, void *v) 1399 { 1400 const struct nf_mttg_trav *trav = seq->private; 1401 const struct xt_target *target; 1402 1403 switch (trav->class) { 1404 case MTTG_TRAV_NFP_UNSPEC: 1405 case MTTG_TRAV_NFP_SPEC: 1406 if (trav->curr == trav->head) 1407 return 0; 1408 target = list_entry(trav->curr, struct xt_target, list); 1409 if (*target->name) 1410 seq_printf(seq, "%s\n", target->name); 1411 } 1412 return 0; 1413 } 1414 1415 static const struct seq_operations xt_target_seq_ops = { 1416 .start = xt_target_seq_start, 1417 .next = xt_target_seq_next, 1418 .stop = xt_mttg_seq_stop, 1419 .show = xt_target_seq_show, 1420 }; 1421 1422 static int xt_target_open(struct inode *inode, struct file *file) 1423 { 1424 struct nf_mttg_trav *trav; 1425 trav = __seq_open_private(file, &xt_target_seq_ops, sizeof(*trav)); 1426 if (!trav) 1427 return -ENOMEM; 1428 1429 trav->nfproto = (unsigned long)PDE_DATA(inode); 1430 return 0; 1431 } 1432 1433 static const struct file_operations xt_target_ops = { 1434 .owner = THIS_MODULE, 1435 .open = xt_target_open, 1436 .read = seq_read, 1437 .llseek = seq_lseek, 1438 .release = seq_release_private, 1439 }; 1440 1441 #define FORMAT_TABLES "_tables_names" 1442 #define FORMAT_MATCHES "_tables_matches" 1443 #define FORMAT_TARGETS "_tables_targets" 1444 1445 #endif /* CONFIG_PROC_FS */ 1446 1447 /** 1448 * xt_hook_ops_alloc - set up hooks for a new table 1449 * @table: table with metadata needed to set up hooks 1450 * @fn: Hook function 1451 * 1452 * This function will create the nf_hook_ops that the x_table needs 1453 * to hand to xt_hook_link_net(). 1454 */ 1455 struct nf_hook_ops * 1456 xt_hook_ops_alloc(const struct xt_table *table, nf_hookfn *fn) 1457 { 1458 unsigned int hook_mask = table->valid_hooks; 1459 uint8_t i, num_hooks = hweight32(hook_mask); 1460 uint8_t hooknum; 1461 struct nf_hook_ops *ops; 1462 1463 ops = kmalloc(sizeof(*ops) * num_hooks, GFP_KERNEL); 1464 if (ops == NULL) 1465 return ERR_PTR(-ENOMEM); 1466 1467 for (i = 0, hooknum = 0; i < num_hooks && hook_mask != 0; 1468 hook_mask >>= 1, ++hooknum) { 1469 if (!(hook_mask & 1)) 1470 continue; 1471 ops[i].hook = fn; 1472 ops[i].pf = table->af; 1473 ops[i].hooknum = hooknum; 1474 ops[i].priority = table->priority; 1475 ++i; 1476 } 1477 1478 return ops; 1479 } 1480 EXPORT_SYMBOL_GPL(xt_hook_ops_alloc); 1481 1482 int xt_proto_init(struct net *net, u_int8_t af) 1483 { 1484 #ifdef CONFIG_PROC_FS 1485 char buf[XT_FUNCTION_MAXNAMELEN]; 1486 struct proc_dir_entry *proc; 1487 kuid_t root_uid; 1488 kgid_t root_gid; 1489 #endif 1490 1491 if (af >= ARRAY_SIZE(xt_prefix)) 1492 return -EINVAL; 1493 1494 1495 #ifdef CONFIG_PROC_FS 1496 root_uid = make_kuid(net->user_ns, 0); 1497 root_gid = make_kgid(net->user_ns, 0); 1498 1499 strlcpy(buf, xt_prefix[af], sizeof(buf)); 1500 strlcat(buf, FORMAT_TABLES, sizeof(buf)); 1501 proc = proc_create_data(buf, 0440, net->proc_net, &xt_table_ops, 1502 (void *)(unsigned long)af); 1503 if (!proc) 1504 goto out; 1505 if (uid_valid(root_uid) && gid_valid(root_gid)) 1506 proc_set_user(proc, root_uid, root_gid); 1507 1508 strlcpy(buf, xt_prefix[af], sizeof(buf)); 1509 strlcat(buf, FORMAT_MATCHES, sizeof(buf)); 1510 proc = proc_create_data(buf, 0440, net->proc_net, &xt_match_ops, 1511 (void *)(unsigned long)af); 1512 if (!proc) 1513 goto out_remove_tables; 1514 if (uid_valid(root_uid) && gid_valid(root_gid)) 1515 proc_set_user(proc, root_uid, root_gid); 1516 1517 strlcpy(buf, xt_prefix[af], sizeof(buf)); 1518 strlcat(buf, FORMAT_TARGETS, sizeof(buf)); 1519 proc = proc_create_data(buf, 0440, net->proc_net, &xt_target_ops, 1520 (void *)(unsigned long)af); 1521 if (!proc) 1522 goto out_remove_matches; 1523 if (uid_valid(root_uid) && gid_valid(root_gid)) 1524 proc_set_user(proc, root_uid, root_gid); 1525 #endif 1526 1527 return 0; 1528 1529 #ifdef CONFIG_PROC_FS 1530 out_remove_matches: 1531 strlcpy(buf, xt_prefix[af], sizeof(buf)); 1532 strlcat(buf, FORMAT_MATCHES, sizeof(buf)); 1533 remove_proc_entry(buf, net->proc_net); 1534 1535 out_remove_tables: 1536 strlcpy(buf, xt_prefix[af], sizeof(buf)); 1537 strlcat(buf, FORMAT_TABLES, sizeof(buf)); 1538 remove_proc_entry(buf, net->proc_net); 1539 out: 1540 return -1; 1541 #endif 1542 } 1543 EXPORT_SYMBOL_GPL(xt_proto_init); 1544 1545 void xt_proto_fini(struct net *net, u_int8_t af) 1546 { 1547 #ifdef CONFIG_PROC_FS 1548 char buf[XT_FUNCTION_MAXNAMELEN]; 1549 1550 strlcpy(buf, xt_prefix[af], sizeof(buf)); 1551 strlcat(buf, FORMAT_TABLES, sizeof(buf)); 1552 remove_proc_entry(buf, net->proc_net); 1553 1554 strlcpy(buf, xt_prefix[af], sizeof(buf)); 1555 strlcat(buf, FORMAT_TARGETS, sizeof(buf)); 1556 remove_proc_entry(buf, net->proc_net); 1557 1558 strlcpy(buf, xt_prefix[af], sizeof(buf)); 1559 strlcat(buf, FORMAT_MATCHES, sizeof(buf)); 1560 remove_proc_entry(buf, net->proc_net); 1561 #endif /*CONFIG_PROC_FS*/ 1562 } 1563 EXPORT_SYMBOL_GPL(xt_proto_fini); 1564 1565 static int __net_init xt_net_init(struct net *net) 1566 { 1567 int i; 1568 1569 for (i = 0; i < NFPROTO_NUMPROTO; i++) 1570 INIT_LIST_HEAD(&net->xt.tables[i]); 1571 return 0; 1572 } 1573 1574 static struct pernet_operations xt_net_ops = { 1575 .init = xt_net_init, 1576 }; 1577 1578 static int __init xt_init(void) 1579 { 1580 unsigned int i; 1581 int rv; 1582 1583 for_each_possible_cpu(i) { 1584 seqcount_init(&per_cpu(xt_recseq, i)); 1585 } 1586 1587 xt = kmalloc(sizeof(struct xt_af) * NFPROTO_NUMPROTO, GFP_KERNEL); 1588 if (!xt) 1589 return -ENOMEM; 1590 1591 for (i = 0; i < NFPROTO_NUMPROTO; i++) { 1592 mutex_init(&xt[i].mutex); 1593 #ifdef CONFIG_COMPAT 1594 mutex_init(&xt[i].compat_mutex); 1595 xt[i].compat_tab = NULL; 1596 #endif 1597 INIT_LIST_HEAD(&xt[i].target); 1598 INIT_LIST_HEAD(&xt[i].match); 1599 } 1600 rv = register_pernet_subsys(&xt_net_ops); 1601 if (rv < 0) 1602 kfree(xt); 1603 return rv; 1604 } 1605 1606 static void __exit xt_fini(void) 1607 { 1608 unregister_pernet_subsys(&xt_net_ops); 1609 kfree(xt); 1610 } 1611 1612 module_init(xt_init); 1613 module_exit(xt_fini); 1614 1615