1 #include "audit.h" 2 #include <linux/fsnotify_backend.h> 3 #include <linux/namei.h> 4 #include <linux/mount.h> 5 #include <linux/kthread.h> 6 #include <linux/slab.h> 7 8 struct audit_tree; 9 struct audit_chunk; 10 11 struct audit_tree { 12 atomic_t count; 13 int goner; 14 struct audit_chunk *root; 15 struct list_head chunks; 16 struct list_head rules; 17 struct list_head list; 18 struct list_head same_root; 19 struct rcu_head head; 20 char pathname[]; 21 }; 22 23 struct audit_chunk { 24 struct list_head hash; 25 struct fsnotify_mark mark; 26 struct list_head trees; /* with root here */ 27 int dead; 28 int count; 29 atomic_long_t refs; 30 struct rcu_head head; 31 struct node { 32 struct list_head list; 33 struct audit_tree *owner; 34 unsigned index; /* index; upper bit indicates 'will prune' */ 35 } owners[]; 36 }; 37 38 static LIST_HEAD(tree_list); 39 static LIST_HEAD(prune_list); 40 static struct task_struct *prune_thread; 41 42 /* 43 * One struct chunk is attached to each inode of interest. 44 * We replace struct chunk on tagging/untagging. 45 * Rules have pointer to struct audit_tree. 46 * Rules have struct list_head rlist forming a list of rules over 47 * the same tree. 48 * References to struct chunk are collected at audit_inode{,_child}() 49 * time and used in AUDIT_TREE rule matching. 50 * These references are dropped at the same time we are calling 51 * audit_free_names(), etc. 52 * 53 * Cyclic lists galore: 54 * tree.chunks anchors chunk.owners[].list hash_lock 55 * tree.rules anchors rule.rlist audit_filter_mutex 56 * chunk.trees anchors tree.same_root hash_lock 57 * chunk.hash is a hash with middle bits of watch.inode as 58 * a hash function. RCU, hash_lock 59 * 60 * tree is refcounted; one reference for "some rules on rules_list refer to 61 * it", one for each chunk with pointer to it. 62 * 63 * chunk is refcounted by embedded fsnotify_mark + .refs (non-zero refcount 64 * of watch contributes 1 to .refs). 65 * 66 * node.index allows to get from node.list to containing chunk. 67 * MSB of that sucker is stolen to mark taggings that we might have to 68 * revert - several operations have very unpleasant cleanup logics and 69 * that makes a difference. Some. 70 */ 71 72 static struct fsnotify_group *audit_tree_group; 73 74 static struct audit_tree *alloc_tree(const char *s) 75 { 76 struct audit_tree *tree; 77 78 tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL); 79 if (tree) { 80 atomic_set(&tree->count, 1); 81 tree->goner = 0; 82 INIT_LIST_HEAD(&tree->chunks); 83 INIT_LIST_HEAD(&tree->rules); 84 INIT_LIST_HEAD(&tree->list); 85 INIT_LIST_HEAD(&tree->same_root); 86 tree->root = NULL; 87 strcpy(tree->pathname, s); 88 } 89 return tree; 90 } 91 92 static inline void get_tree(struct audit_tree *tree) 93 { 94 atomic_inc(&tree->count); 95 } 96 97 static inline void put_tree(struct audit_tree *tree) 98 { 99 if (atomic_dec_and_test(&tree->count)) 100 kfree_rcu(tree, head); 101 } 102 103 /* to avoid bringing the entire thing in audit.h */ 104 const char *audit_tree_path(struct audit_tree *tree) 105 { 106 return tree->pathname; 107 } 108 109 static void free_chunk(struct audit_chunk *chunk) 110 { 111 int i; 112 113 for (i = 0; i < chunk->count; i++) { 114 if (chunk->owners[i].owner) 115 put_tree(chunk->owners[i].owner); 116 } 117 kfree(chunk); 118 } 119 120 void audit_put_chunk(struct audit_chunk *chunk) 121 { 122 if (atomic_long_dec_and_test(&chunk->refs)) 123 free_chunk(chunk); 124 } 125 126 static void __put_chunk(struct rcu_head *rcu) 127 { 128 struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head); 129 audit_put_chunk(chunk); 130 } 131 132 static void audit_tree_destroy_watch(struct fsnotify_mark *entry) 133 { 134 struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark); 135 call_rcu(&chunk->head, __put_chunk); 136 } 137 138 static struct audit_chunk *alloc_chunk(int count) 139 { 140 struct audit_chunk *chunk; 141 size_t size; 142 int i; 143 144 size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node); 145 chunk = kzalloc(size, GFP_KERNEL); 146 if (!chunk) 147 return NULL; 148 149 INIT_LIST_HEAD(&chunk->hash); 150 INIT_LIST_HEAD(&chunk->trees); 151 chunk->count = count; 152 atomic_long_set(&chunk->refs, 1); 153 for (i = 0; i < count; i++) { 154 INIT_LIST_HEAD(&chunk->owners[i].list); 155 chunk->owners[i].index = i; 156 } 157 fsnotify_init_mark(&chunk->mark, audit_tree_destroy_watch); 158 chunk->mark.mask = FS_IN_IGNORED; 159 return chunk; 160 } 161 162 enum {HASH_SIZE = 128}; 163 static struct list_head chunk_hash_heads[HASH_SIZE]; 164 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock); 165 166 static inline struct list_head *chunk_hash(const struct inode *inode) 167 { 168 unsigned long n = (unsigned long)inode / L1_CACHE_BYTES; 169 return chunk_hash_heads + n % HASH_SIZE; 170 } 171 172 /* hash_lock & entry->lock is held by caller */ 173 static void insert_hash(struct audit_chunk *chunk) 174 { 175 struct fsnotify_mark *entry = &chunk->mark; 176 struct list_head *list; 177 178 if (!entry->inode) 179 return; 180 list = chunk_hash(entry->inode); 181 list_add_rcu(&chunk->hash, list); 182 } 183 184 /* called under rcu_read_lock */ 185 struct audit_chunk *audit_tree_lookup(const struct inode *inode) 186 { 187 struct list_head *list = chunk_hash(inode); 188 struct audit_chunk *p; 189 190 list_for_each_entry_rcu(p, list, hash) { 191 /* mark.inode may have gone NULL, but who cares? */ 192 if (p->mark.inode == inode) { 193 atomic_long_inc(&p->refs); 194 return p; 195 } 196 } 197 return NULL; 198 } 199 200 bool audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree) 201 { 202 int n; 203 for (n = 0; n < chunk->count; n++) 204 if (chunk->owners[n].owner == tree) 205 return true; 206 return false; 207 } 208 209 /* tagging and untagging inodes with trees */ 210 211 static struct audit_chunk *find_chunk(struct node *p) 212 { 213 int index = p->index & ~(1U<<31); 214 p -= index; 215 return container_of(p, struct audit_chunk, owners[0]); 216 } 217 218 static void untag_chunk(struct node *p) 219 { 220 struct audit_chunk *chunk = find_chunk(p); 221 struct fsnotify_mark *entry = &chunk->mark; 222 struct audit_chunk *new = NULL; 223 struct audit_tree *owner; 224 int size = chunk->count - 1; 225 int i, j; 226 227 fsnotify_get_mark(entry); 228 229 spin_unlock(&hash_lock); 230 231 if (size) 232 new = alloc_chunk(size); 233 234 spin_lock(&entry->lock); 235 if (chunk->dead || !entry->inode) { 236 spin_unlock(&entry->lock); 237 if (new) 238 free_chunk(new); 239 goto out; 240 } 241 242 owner = p->owner; 243 244 if (!size) { 245 chunk->dead = 1; 246 spin_lock(&hash_lock); 247 list_del_init(&chunk->trees); 248 if (owner->root == chunk) 249 owner->root = NULL; 250 list_del_init(&p->list); 251 list_del_rcu(&chunk->hash); 252 spin_unlock(&hash_lock); 253 spin_unlock(&entry->lock); 254 fsnotify_destroy_mark(entry, audit_tree_group); 255 goto out; 256 } 257 258 if (!new) 259 goto Fallback; 260 261 fsnotify_duplicate_mark(&new->mark, entry); 262 if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.inode, NULL, 1)) { 263 fsnotify_put_mark(&new->mark); 264 goto Fallback; 265 } 266 267 chunk->dead = 1; 268 spin_lock(&hash_lock); 269 list_replace_init(&chunk->trees, &new->trees); 270 if (owner->root == chunk) { 271 list_del_init(&owner->same_root); 272 owner->root = NULL; 273 } 274 275 for (i = j = 0; j <= size; i++, j++) { 276 struct audit_tree *s; 277 if (&chunk->owners[j] == p) { 278 list_del_init(&p->list); 279 i--; 280 continue; 281 } 282 s = chunk->owners[j].owner; 283 new->owners[i].owner = s; 284 new->owners[i].index = chunk->owners[j].index - j + i; 285 if (!s) /* result of earlier fallback */ 286 continue; 287 get_tree(s); 288 list_replace_init(&chunk->owners[j].list, &new->owners[i].list); 289 } 290 291 list_replace_rcu(&chunk->hash, &new->hash); 292 list_for_each_entry(owner, &new->trees, same_root) 293 owner->root = new; 294 spin_unlock(&hash_lock); 295 spin_unlock(&entry->lock); 296 fsnotify_destroy_mark(entry, audit_tree_group); 297 fsnotify_put_mark(&new->mark); /* drop initial reference */ 298 goto out; 299 300 Fallback: 301 // do the best we can 302 spin_lock(&hash_lock); 303 if (owner->root == chunk) { 304 list_del_init(&owner->same_root); 305 owner->root = NULL; 306 } 307 list_del_init(&p->list); 308 p->owner = NULL; 309 put_tree(owner); 310 spin_unlock(&hash_lock); 311 spin_unlock(&entry->lock); 312 out: 313 fsnotify_put_mark(entry); 314 spin_lock(&hash_lock); 315 } 316 317 static int create_chunk(struct inode *inode, struct audit_tree *tree) 318 { 319 struct fsnotify_mark *entry; 320 struct audit_chunk *chunk = alloc_chunk(1); 321 if (!chunk) 322 return -ENOMEM; 323 324 entry = &chunk->mark; 325 if (fsnotify_add_mark(entry, audit_tree_group, inode, NULL, 0)) { 326 fsnotify_put_mark(entry); 327 return -ENOSPC; 328 } 329 330 spin_lock(&entry->lock); 331 spin_lock(&hash_lock); 332 if (tree->goner) { 333 spin_unlock(&hash_lock); 334 chunk->dead = 1; 335 spin_unlock(&entry->lock); 336 fsnotify_destroy_mark(entry, audit_tree_group); 337 fsnotify_put_mark(entry); 338 return 0; 339 } 340 chunk->owners[0].index = (1U << 31); 341 chunk->owners[0].owner = tree; 342 get_tree(tree); 343 list_add(&chunk->owners[0].list, &tree->chunks); 344 if (!tree->root) { 345 tree->root = chunk; 346 list_add(&tree->same_root, &chunk->trees); 347 } 348 insert_hash(chunk); 349 spin_unlock(&hash_lock); 350 spin_unlock(&entry->lock); 351 fsnotify_put_mark(entry); /* drop initial reference */ 352 return 0; 353 } 354 355 /* the first tagged inode becomes root of tree */ 356 static int tag_chunk(struct inode *inode, struct audit_tree *tree) 357 { 358 struct fsnotify_mark *old_entry, *chunk_entry; 359 struct audit_tree *owner; 360 struct audit_chunk *chunk, *old; 361 struct node *p; 362 int n; 363 364 old_entry = fsnotify_find_inode_mark(audit_tree_group, inode); 365 if (!old_entry) 366 return create_chunk(inode, tree); 367 368 old = container_of(old_entry, struct audit_chunk, mark); 369 370 /* are we already there? */ 371 spin_lock(&hash_lock); 372 for (n = 0; n < old->count; n++) { 373 if (old->owners[n].owner == tree) { 374 spin_unlock(&hash_lock); 375 fsnotify_put_mark(old_entry); 376 return 0; 377 } 378 } 379 spin_unlock(&hash_lock); 380 381 chunk = alloc_chunk(old->count + 1); 382 if (!chunk) { 383 fsnotify_put_mark(old_entry); 384 return -ENOMEM; 385 } 386 387 chunk_entry = &chunk->mark; 388 389 spin_lock(&old_entry->lock); 390 if (!old_entry->inode) { 391 /* old_entry is being shot, lets just lie */ 392 spin_unlock(&old_entry->lock); 393 fsnotify_put_mark(old_entry); 394 free_chunk(chunk); 395 return -ENOENT; 396 } 397 398 fsnotify_duplicate_mark(chunk_entry, old_entry); 399 if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->inode, NULL, 1)) { 400 spin_unlock(&old_entry->lock); 401 fsnotify_put_mark(chunk_entry); 402 fsnotify_put_mark(old_entry); 403 return -ENOSPC; 404 } 405 406 /* even though we hold old_entry->lock, this is safe since chunk_entry->lock could NEVER have been grabbed before */ 407 spin_lock(&chunk_entry->lock); 408 spin_lock(&hash_lock); 409 410 /* we now hold old_entry->lock, chunk_entry->lock, and hash_lock */ 411 if (tree->goner) { 412 spin_unlock(&hash_lock); 413 chunk->dead = 1; 414 spin_unlock(&chunk_entry->lock); 415 spin_unlock(&old_entry->lock); 416 417 fsnotify_destroy_mark(chunk_entry, audit_tree_group); 418 419 fsnotify_put_mark(chunk_entry); 420 fsnotify_put_mark(old_entry); 421 return 0; 422 } 423 list_replace_init(&old->trees, &chunk->trees); 424 for (n = 0, p = chunk->owners; n < old->count; n++, p++) { 425 struct audit_tree *s = old->owners[n].owner; 426 p->owner = s; 427 p->index = old->owners[n].index; 428 if (!s) /* result of fallback in untag */ 429 continue; 430 get_tree(s); 431 list_replace_init(&old->owners[n].list, &p->list); 432 } 433 p->index = (chunk->count - 1) | (1U<<31); 434 p->owner = tree; 435 get_tree(tree); 436 list_add(&p->list, &tree->chunks); 437 list_replace_rcu(&old->hash, &chunk->hash); 438 list_for_each_entry(owner, &chunk->trees, same_root) 439 owner->root = chunk; 440 old->dead = 1; 441 if (!tree->root) { 442 tree->root = chunk; 443 list_add(&tree->same_root, &chunk->trees); 444 } 445 spin_unlock(&hash_lock); 446 spin_unlock(&chunk_entry->lock); 447 spin_unlock(&old_entry->lock); 448 fsnotify_destroy_mark(old_entry, audit_tree_group); 449 fsnotify_put_mark(chunk_entry); /* drop initial reference */ 450 fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */ 451 return 0; 452 } 453 454 static void audit_tree_log_remove_rule(struct audit_krule *rule) 455 { 456 struct audit_buffer *ab; 457 458 ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE); 459 if (unlikely(!ab)) 460 return; 461 audit_log_format(ab, "op=remove_rule"); 462 audit_log_format(ab, " dir="); 463 audit_log_untrustedstring(ab, rule->tree->pathname); 464 audit_log_key(ab, rule->filterkey); 465 audit_log_format(ab, " list=%d res=1", rule->listnr); 466 audit_log_end(ab); 467 } 468 469 static void kill_rules(struct audit_tree *tree) 470 { 471 struct audit_krule *rule, *next; 472 struct audit_entry *entry; 473 474 list_for_each_entry_safe(rule, next, &tree->rules, rlist) { 475 entry = container_of(rule, struct audit_entry, rule); 476 477 list_del_init(&rule->rlist); 478 if (rule->tree) { 479 /* not a half-baked one */ 480 audit_tree_log_remove_rule(rule); 481 if (entry->rule.exe) 482 audit_remove_mark(entry->rule.exe); 483 rule->tree = NULL; 484 list_del_rcu(&entry->list); 485 list_del(&entry->rule.list); 486 call_rcu(&entry->rcu, audit_free_rule_rcu); 487 } 488 } 489 } 490 491 /* 492 * finish killing struct audit_tree 493 */ 494 static void prune_one(struct audit_tree *victim) 495 { 496 spin_lock(&hash_lock); 497 while (!list_empty(&victim->chunks)) { 498 struct node *p; 499 500 p = list_entry(victim->chunks.next, struct node, list); 501 502 untag_chunk(p); 503 } 504 spin_unlock(&hash_lock); 505 put_tree(victim); 506 } 507 508 /* trim the uncommitted chunks from tree */ 509 510 static void trim_marked(struct audit_tree *tree) 511 { 512 struct list_head *p, *q; 513 spin_lock(&hash_lock); 514 if (tree->goner) { 515 spin_unlock(&hash_lock); 516 return; 517 } 518 /* reorder */ 519 for (p = tree->chunks.next; p != &tree->chunks; p = q) { 520 struct node *node = list_entry(p, struct node, list); 521 q = p->next; 522 if (node->index & (1U<<31)) { 523 list_del_init(p); 524 list_add(p, &tree->chunks); 525 } 526 } 527 528 while (!list_empty(&tree->chunks)) { 529 struct node *node; 530 531 node = list_entry(tree->chunks.next, struct node, list); 532 533 /* have we run out of marked? */ 534 if (!(node->index & (1U<<31))) 535 break; 536 537 untag_chunk(node); 538 } 539 if (!tree->root && !tree->goner) { 540 tree->goner = 1; 541 spin_unlock(&hash_lock); 542 mutex_lock(&audit_filter_mutex); 543 kill_rules(tree); 544 list_del_init(&tree->list); 545 mutex_unlock(&audit_filter_mutex); 546 prune_one(tree); 547 } else { 548 spin_unlock(&hash_lock); 549 } 550 } 551 552 static void audit_schedule_prune(void); 553 554 /* called with audit_filter_mutex */ 555 int audit_remove_tree_rule(struct audit_krule *rule) 556 { 557 struct audit_tree *tree; 558 tree = rule->tree; 559 if (tree) { 560 spin_lock(&hash_lock); 561 list_del_init(&rule->rlist); 562 if (list_empty(&tree->rules) && !tree->goner) { 563 tree->root = NULL; 564 list_del_init(&tree->same_root); 565 tree->goner = 1; 566 list_move(&tree->list, &prune_list); 567 rule->tree = NULL; 568 spin_unlock(&hash_lock); 569 audit_schedule_prune(); 570 return 1; 571 } 572 rule->tree = NULL; 573 spin_unlock(&hash_lock); 574 return 1; 575 } 576 return 0; 577 } 578 579 static int compare_root(struct vfsmount *mnt, void *arg) 580 { 581 return d_backing_inode(mnt->mnt_root) == arg; 582 } 583 584 void audit_trim_trees(void) 585 { 586 struct list_head cursor; 587 588 mutex_lock(&audit_filter_mutex); 589 list_add(&cursor, &tree_list); 590 while (cursor.next != &tree_list) { 591 struct audit_tree *tree; 592 struct path path; 593 struct vfsmount *root_mnt; 594 struct node *node; 595 int err; 596 597 tree = container_of(cursor.next, struct audit_tree, list); 598 get_tree(tree); 599 list_del(&cursor); 600 list_add(&cursor, &tree->list); 601 mutex_unlock(&audit_filter_mutex); 602 603 err = kern_path(tree->pathname, 0, &path); 604 if (err) 605 goto skip_it; 606 607 root_mnt = collect_mounts(&path); 608 path_put(&path); 609 if (IS_ERR(root_mnt)) 610 goto skip_it; 611 612 spin_lock(&hash_lock); 613 list_for_each_entry(node, &tree->chunks, list) { 614 struct audit_chunk *chunk = find_chunk(node); 615 /* this could be NULL if the watch is dying else where... */ 616 struct inode *inode = chunk->mark.inode; 617 node->index |= 1U<<31; 618 if (iterate_mounts(compare_root, inode, root_mnt)) 619 node->index &= ~(1U<<31); 620 } 621 spin_unlock(&hash_lock); 622 trim_marked(tree); 623 drop_collected_mounts(root_mnt); 624 skip_it: 625 put_tree(tree); 626 mutex_lock(&audit_filter_mutex); 627 } 628 list_del(&cursor); 629 mutex_unlock(&audit_filter_mutex); 630 } 631 632 int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op) 633 { 634 635 if (pathname[0] != '/' || 636 rule->listnr != AUDIT_FILTER_EXIT || 637 op != Audit_equal || 638 rule->inode_f || rule->watch || rule->tree) 639 return -EINVAL; 640 rule->tree = alloc_tree(pathname); 641 if (!rule->tree) 642 return -ENOMEM; 643 return 0; 644 } 645 646 void audit_put_tree(struct audit_tree *tree) 647 { 648 put_tree(tree); 649 } 650 651 static int tag_mount(struct vfsmount *mnt, void *arg) 652 { 653 return tag_chunk(d_backing_inode(mnt->mnt_root), arg); 654 } 655 656 /* 657 * That gets run when evict_chunk() ends up needing to kill audit_tree. 658 * Runs from a separate thread. 659 */ 660 static int prune_tree_thread(void *unused) 661 { 662 for (;;) { 663 if (list_empty(&prune_list)) { 664 set_current_state(TASK_INTERRUPTIBLE); 665 schedule(); 666 } 667 668 mutex_lock(&audit_cmd_mutex); 669 mutex_lock(&audit_filter_mutex); 670 671 while (!list_empty(&prune_list)) { 672 struct audit_tree *victim; 673 674 victim = list_entry(prune_list.next, 675 struct audit_tree, list); 676 list_del_init(&victim->list); 677 678 mutex_unlock(&audit_filter_mutex); 679 680 prune_one(victim); 681 682 mutex_lock(&audit_filter_mutex); 683 } 684 685 mutex_unlock(&audit_filter_mutex); 686 mutex_unlock(&audit_cmd_mutex); 687 } 688 return 0; 689 } 690 691 static int audit_launch_prune(void) 692 { 693 if (prune_thread) 694 return 0; 695 prune_thread = kthread_run(prune_tree_thread, NULL, 696 "audit_prune_tree"); 697 if (IS_ERR(prune_thread)) { 698 pr_err("cannot start thread audit_prune_tree"); 699 prune_thread = NULL; 700 return -ENOMEM; 701 } 702 return 0; 703 } 704 705 /* called with audit_filter_mutex */ 706 int audit_add_tree_rule(struct audit_krule *rule) 707 { 708 struct audit_tree *seed = rule->tree, *tree; 709 struct path path; 710 struct vfsmount *mnt; 711 int err; 712 713 rule->tree = NULL; 714 list_for_each_entry(tree, &tree_list, list) { 715 if (!strcmp(seed->pathname, tree->pathname)) { 716 put_tree(seed); 717 rule->tree = tree; 718 list_add(&rule->rlist, &tree->rules); 719 return 0; 720 } 721 } 722 tree = seed; 723 list_add(&tree->list, &tree_list); 724 list_add(&rule->rlist, &tree->rules); 725 /* do not set rule->tree yet */ 726 mutex_unlock(&audit_filter_mutex); 727 728 if (unlikely(!prune_thread)) { 729 err = audit_launch_prune(); 730 if (err) 731 goto Err; 732 } 733 734 err = kern_path(tree->pathname, 0, &path); 735 if (err) 736 goto Err; 737 mnt = collect_mounts(&path); 738 path_put(&path); 739 if (IS_ERR(mnt)) { 740 err = PTR_ERR(mnt); 741 goto Err; 742 } 743 744 get_tree(tree); 745 err = iterate_mounts(tag_mount, tree, mnt); 746 drop_collected_mounts(mnt); 747 748 if (!err) { 749 struct node *node; 750 spin_lock(&hash_lock); 751 list_for_each_entry(node, &tree->chunks, list) 752 node->index &= ~(1U<<31); 753 spin_unlock(&hash_lock); 754 } else { 755 trim_marked(tree); 756 goto Err; 757 } 758 759 mutex_lock(&audit_filter_mutex); 760 if (list_empty(&rule->rlist)) { 761 put_tree(tree); 762 return -ENOENT; 763 } 764 rule->tree = tree; 765 put_tree(tree); 766 767 return 0; 768 Err: 769 mutex_lock(&audit_filter_mutex); 770 list_del_init(&tree->list); 771 list_del_init(&tree->rules); 772 put_tree(tree); 773 return err; 774 } 775 776 int audit_tag_tree(char *old, char *new) 777 { 778 struct list_head cursor, barrier; 779 int failed = 0; 780 struct path path1, path2; 781 struct vfsmount *tagged; 782 int err; 783 784 err = kern_path(new, 0, &path2); 785 if (err) 786 return err; 787 tagged = collect_mounts(&path2); 788 path_put(&path2); 789 if (IS_ERR(tagged)) 790 return PTR_ERR(tagged); 791 792 err = kern_path(old, 0, &path1); 793 if (err) { 794 drop_collected_mounts(tagged); 795 return err; 796 } 797 798 mutex_lock(&audit_filter_mutex); 799 list_add(&barrier, &tree_list); 800 list_add(&cursor, &barrier); 801 802 while (cursor.next != &tree_list) { 803 struct audit_tree *tree; 804 int good_one = 0; 805 806 tree = container_of(cursor.next, struct audit_tree, list); 807 get_tree(tree); 808 list_del(&cursor); 809 list_add(&cursor, &tree->list); 810 mutex_unlock(&audit_filter_mutex); 811 812 err = kern_path(tree->pathname, 0, &path2); 813 if (!err) { 814 good_one = path_is_under(&path1, &path2); 815 path_put(&path2); 816 } 817 818 if (!good_one) { 819 put_tree(tree); 820 mutex_lock(&audit_filter_mutex); 821 continue; 822 } 823 824 failed = iterate_mounts(tag_mount, tree, tagged); 825 if (failed) { 826 put_tree(tree); 827 mutex_lock(&audit_filter_mutex); 828 break; 829 } 830 831 mutex_lock(&audit_filter_mutex); 832 spin_lock(&hash_lock); 833 if (!tree->goner) { 834 list_del(&tree->list); 835 list_add(&tree->list, &tree_list); 836 } 837 spin_unlock(&hash_lock); 838 put_tree(tree); 839 } 840 841 while (barrier.prev != &tree_list) { 842 struct audit_tree *tree; 843 844 tree = container_of(barrier.prev, struct audit_tree, list); 845 get_tree(tree); 846 list_del(&tree->list); 847 list_add(&tree->list, &barrier); 848 mutex_unlock(&audit_filter_mutex); 849 850 if (!failed) { 851 struct node *node; 852 spin_lock(&hash_lock); 853 list_for_each_entry(node, &tree->chunks, list) 854 node->index &= ~(1U<<31); 855 spin_unlock(&hash_lock); 856 } else { 857 trim_marked(tree); 858 } 859 860 put_tree(tree); 861 mutex_lock(&audit_filter_mutex); 862 } 863 list_del(&barrier); 864 list_del(&cursor); 865 mutex_unlock(&audit_filter_mutex); 866 path_put(&path1); 867 drop_collected_mounts(tagged); 868 return failed; 869 } 870 871 872 static void audit_schedule_prune(void) 873 { 874 wake_up_process(prune_thread); 875 } 876 877 /* 878 * ... and that one is done if evict_chunk() decides to delay until the end 879 * of syscall. Runs synchronously. 880 */ 881 void audit_kill_trees(struct list_head *list) 882 { 883 mutex_lock(&audit_cmd_mutex); 884 mutex_lock(&audit_filter_mutex); 885 886 while (!list_empty(list)) { 887 struct audit_tree *victim; 888 889 victim = list_entry(list->next, struct audit_tree, list); 890 kill_rules(victim); 891 list_del_init(&victim->list); 892 893 mutex_unlock(&audit_filter_mutex); 894 895 prune_one(victim); 896 897 mutex_lock(&audit_filter_mutex); 898 } 899 900 mutex_unlock(&audit_filter_mutex); 901 mutex_unlock(&audit_cmd_mutex); 902 } 903 904 /* 905 * Here comes the stuff asynchronous to auditctl operations 906 */ 907 908 static void evict_chunk(struct audit_chunk *chunk) 909 { 910 struct audit_tree *owner; 911 struct list_head *postponed = audit_killed_trees(); 912 int need_prune = 0; 913 int n; 914 915 if (chunk->dead) 916 return; 917 918 chunk->dead = 1; 919 mutex_lock(&audit_filter_mutex); 920 spin_lock(&hash_lock); 921 while (!list_empty(&chunk->trees)) { 922 owner = list_entry(chunk->trees.next, 923 struct audit_tree, same_root); 924 owner->goner = 1; 925 owner->root = NULL; 926 list_del_init(&owner->same_root); 927 spin_unlock(&hash_lock); 928 if (!postponed) { 929 kill_rules(owner); 930 list_move(&owner->list, &prune_list); 931 need_prune = 1; 932 } else { 933 list_move(&owner->list, postponed); 934 } 935 spin_lock(&hash_lock); 936 } 937 list_del_rcu(&chunk->hash); 938 for (n = 0; n < chunk->count; n++) 939 list_del_init(&chunk->owners[n].list); 940 spin_unlock(&hash_lock); 941 mutex_unlock(&audit_filter_mutex); 942 if (need_prune) 943 audit_schedule_prune(); 944 } 945 946 static int audit_tree_handle_event(struct fsnotify_group *group, 947 struct inode *to_tell, 948 struct fsnotify_mark *inode_mark, 949 struct fsnotify_mark *vfsmount_mark, 950 u32 mask, const void *data, int data_type, 951 const unsigned char *file_name, u32 cookie) 952 { 953 return 0; 954 } 955 956 static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify_group *group) 957 { 958 struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark); 959 960 evict_chunk(chunk); 961 962 /* 963 * We are guaranteed to have at least one reference to the mark from 964 * either the inode or the caller of fsnotify_destroy_mark(). 965 */ 966 BUG_ON(atomic_read(&entry->refcnt) < 1); 967 } 968 969 static const struct fsnotify_ops audit_tree_ops = { 970 .handle_event = audit_tree_handle_event, 971 .freeing_mark = audit_tree_freeing_mark, 972 }; 973 974 static int __init audit_tree_init(void) 975 { 976 int i; 977 978 audit_tree_group = fsnotify_alloc_group(&audit_tree_ops); 979 if (IS_ERR(audit_tree_group)) 980 audit_panic("cannot initialize fsnotify group for rectree watches"); 981 982 for (i = 0; i < HASH_SIZE; i++) 983 INIT_LIST_HEAD(&chunk_hash_heads[i]); 984 985 return 0; 986 } 987 __initcall(audit_tree_init); 988