1 #include <linux/fanotify.h> 2 #include <linux/fcntl.h> 3 #include <linux/file.h> 4 #include <linux/fs.h> 5 #include <linux/anon_inodes.h> 6 #include <linux/fsnotify_backend.h> 7 #include <linux/init.h> 8 #include <linux/mount.h> 9 #include <linux/namei.h> 10 #include <linux/poll.h> 11 #include <linux/security.h> 12 #include <linux/syscalls.h> 13 #include <linux/slab.h> 14 #include <linux/types.h> 15 #include <linux/uaccess.h> 16 #include <linux/compat.h> 17 18 #include <asm/ioctls.h> 19 20 #include "../../mount.h" 21 #include "../fdinfo.h" 22 #include "fanotify.h" 23 24 #define FANOTIFY_DEFAULT_MAX_EVENTS 16384 25 #define FANOTIFY_DEFAULT_MAX_MARKS 8192 26 #define FANOTIFY_DEFAULT_MAX_LISTENERS 128 27 28 extern const struct fsnotify_ops fanotify_fsnotify_ops; 29 30 static struct kmem_cache *fanotify_mark_cache __read_mostly; 31 static struct kmem_cache *fanotify_response_event_cache __read_mostly; 32 struct kmem_cache *fanotify_event_cachep __read_mostly; 33 34 struct fanotify_response_event { 35 struct list_head list; 36 __s32 fd; 37 struct fanotify_event_info *event; 38 }; 39 40 /* 41 * Get an fsnotify notification event if one exists and is small 42 * enough to fit in "count". Return an error pointer if the count 43 * is not large enough. 44 * 45 * Called with the group->notification_mutex held. 46 */ 47 static struct fsnotify_event *get_one_event(struct fsnotify_group *group, 48 size_t count) 49 { 50 BUG_ON(!mutex_is_locked(&group->notification_mutex)); 51 52 pr_debug("%s: group=%p count=%zd\n", __func__, group, count); 53 54 if (fsnotify_notify_queue_is_empty(group)) 55 return NULL; 56 57 if (FAN_EVENT_METADATA_LEN > count) 58 return ERR_PTR(-EINVAL); 59 60 /* held the notification_mutex the whole time, so this is the 61 * same event we peeked above */ 62 return fsnotify_remove_notify_event(group); 63 } 64 65 static int create_fd(struct fsnotify_group *group, 66 struct fanotify_event_info *event, 67 struct file **file) 68 { 69 int client_fd; 70 struct file *new_file; 71 72 pr_debug("%s: group=%p event=%p\n", __func__, group, event); 73 74 client_fd = get_unused_fd(); 75 if (client_fd < 0) 76 return client_fd; 77 78 /* 79 * we need a new file handle for the userspace program so it can read even if it was 80 * originally opened O_WRONLY. 81 */ 82 /* it's possible this event was an overflow event. in that case dentry and mnt 83 * are NULL; That's fine, just don't call dentry open */ 84 if (event->path.dentry && event->path.mnt) 85 new_file = dentry_open(&event->path, 86 group->fanotify_data.f_flags | FMODE_NONOTIFY, 87 current_cred()); 88 else 89 new_file = ERR_PTR(-EOVERFLOW); 90 if (IS_ERR(new_file)) { 91 /* 92 * we still send an event even if we can't open the file. this 93 * can happen when say tasks are gone and we try to open their 94 * /proc files or we try to open a WRONLY file like in sysfs 95 * we just send the errno to userspace since there isn't much 96 * else we can do. 97 */ 98 put_unused_fd(client_fd); 99 client_fd = PTR_ERR(new_file); 100 } else { 101 *file = new_file; 102 } 103 104 return client_fd; 105 } 106 107 static int fill_event_metadata(struct fsnotify_group *group, 108 struct fanotify_event_metadata *metadata, 109 struct fsnotify_event *fsn_event, 110 struct file **file) 111 { 112 int ret = 0; 113 struct fanotify_event_info *event; 114 115 pr_debug("%s: group=%p metadata=%p event=%p\n", __func__, 116 group, metadata, fsn_event); 117 118 *file = NULL; 119 event = container_of(fsn_event, struct fanotify_event_info, fse); 120 metadata->event_len = FAN_EVENT_METADATA_LEN; 121 metadata->metadata_len = FAN_EVENT_METADATA_LEN; 122 metadata->vers = FANOTIFY_METADATA_VERSION; 123 metadata->reserved = 0; 124 metadata->mask = fsn_event->mask & FAN_ALL_OUTGOING_EVENTS; 125 metadata->pid = pid_vnr(event->tgid); 126 if (unlikely(fsn_event->mask & FAN_Q_OVERFLOW)) 127 metadata->fd = FAN_NOFD; 128 else { 129 metadata->fd = create_fd(group, event, file); 130 if (metadata->fd < 0) 131 ret = metadata->fd; 132 } 133 134 return ret; 135 } 136 137 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS 138 static struct fanotify_response_event *dequeue_re(struct fsnotify_group *group, 139 __s32 fd) 140 { 141 struct fanotify_response_event *re, *return_re = NULL; 142 143 mutex_lock(&group->fanotify_data.access_mutex); 144 list_for_each_entry(re, &group->fanotify_data.access_list, list) { 145 if (re->fd != fd) 146 continue; 147 148 list_del_init(&re->list); 149 return_re = re; 150 break; 151 } 152 mutex_unlock(&group->fanotify_data.access_mutex); 153 154 pr_debug("%s: found return_re=%p\n", __func__, return_re); 155 156 return return_re; 157 } 158 159 static int process_access_response(struct fsnotify_group *group, 160 struct fanotify_response *response_struct) 161 { 162 struct fanotify_response_event *re; 163 __s32 fd = response_struct->fd; 164 __u32 response = response_struct->response; 165 166 pr_debug("%s: group=%p fd=%d response=%d\n", __func__, group, 167 fd, response); 168 /* 169 * make sure the response is valid, if invalid we do nothing and either 170 * userspace can send a valid response or we will clean it up after the 171 * timeout 172 */ 173 switch (response) { 174 case FAN_ALLOW: 175 case FAN_DENY: 176 break; 177 default: 178 return -EINVAL; 179 } 180 181 if (fd < 0) 182 return -EINVAL; 183 184 re = dequeue_re(group, fd); 185 if (!re) 186 return -ENOENT; 187 188 re->event->response = response; 189 190 wake_up(&group->fanotify_data.access_waitq); 191 192 kmem_cache_free(fanotify_response_event_cache, re); 193 194 return 0; 195 } 196 197 static int prepare_for_access_response(struct fsnotify_group *group, 198 struct fsnotify_event *event, 199 __s32 fd) 200 { 201 struct fanotify_response_event *re; 202 203 if (!(event->mask & FAN_ALL_PERM_EVENTS)) 204 return 0; 205 206 re = kmem_cache_alloc(fanotify_response_event_cache, GFP_KERNEL); 207 if (!re) 208 return -ENOMEM; 209 210 re->event = FANOTIFY_E(event); 211 re->fd = fd; 212 213 mutex_lock(&group->fanotify_data.access_mutex); 214 215 if (atomic_read(&group->fanotify_data.bypass_perm)) { 216 mutex_unlock(&group->fanotify_data.access_mutex); 217 kmem_cache_free(fanotify_response_event_cache, re); 218 FANOTIFY_E(event)->response = FAN_ALLOW; 219 return 0; 220 } 221 222 list_add_tail(&re->list, &group->fanotify_data.access_list); 223 mutex_unlock(&group->fanotify_data.access_mutex); 224 225 return 0; 226 } 227 228 #else 229 static int prepare_for_access_response(struct fsnotify_group *group, 230 struct fsnotify_event *event, 231 __s32 fd) 232 { 233 return 0; 234 } 235 236 #endif 237 238 static ssize_t copy_event_to_user(struct fsnotify_group *group, 239 struct fsnotify_event *event, 240 char __user *buf) 241 { 242 struct fanotify_event_metadata fanotify_event_metadata; 243 struct file *f; 244 int fd, ret; 245 246 pr_debug("%s: group=%p event=%p\n", __func__, group, event); 247 248 ret = fill_event_metadata(group, &fanotify_event_metadata, event, &f); 249 if (ret < 0) 250 goto out; 251 252 fd = fanotify_event_metadata.fd; 253 ret = -EFAULT; 254 if (copy_to_user(buf, &fanotify_event_metadata, 255 fanotify_event_metadata.event_len)) 256 goto out_close_fd; 257 258 ret = prepare_for_access_response(group, event, fd); 259 if (ret) 260 goto out_close_fd; 261 262 if (fd != FAN_NOFD) 263 fd_install(fd, f); 264 return fanotify_event_metadata.event_len; 265 266 out_close_fd: 267 if (fd != FAN_NOFD) { 268 put_unused_fd(fd); 269 fput(f); 270 } 271 out: 272 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS 273 if (event->mask & FAN_ALL_PERM_EVENTS) { 274 FANOTIFY_E(event)->response = FAN_DENY; 275 wake_up(&group->fanotify_data.access_waitq); 276 } 277 #endif 278 return ret; 279 } 280 281 /* intofiy userspace file descriptor functions */ 282 static unsigned int fanotify_poll(struct file *file, poll_table *wait) 283 { 284 struct fsnotify_group *group = file->private_data; 285 int ret = 0; 286 287 poll_wait(file, &group->notification_waitq, wait); 288 mutex_lock(&group->notification_mutex); 289 if (!fsnotify_notify_queue_is_empty(group)) 290 ret = POLLIN | POLLRDNORM; 291 mutex_unlock(&group->notification_mutex); 292 293 return ret; 294 } 295 296 static ssize_t fanotify_read(struct file *file, char __user *buf, 297 size_t count, loff_t *pos) 298 { 299 struct fsnotify_group *group; 300 struct fsnotify_event *kevent; 301 char __user *start; 302 int ret; 303 DEFINE_WAIT(wait); 304 305 start = buf; 306 group = file->private_data; 307 308 pr_debug("%s: group=%p\n", __func__, group); 309 310 while (1) { 311 prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE); 312 313 mutex_lock(&group->notification_mutex); 314 kevent = get_one_event(group, count); 315 mutex_unlock(&group->notification_mutex); 316 317 if (kevent) { 318 ret = PTR_ERR(kevent); 319 if (IS_ERR(kevent)) 320 break; 321 ret = copy_event_to_user(group, kevent, buf); 322 /* 323 * Permission events get destroyed after we 324 * receive response 325 */ 326 if (!(kevent->mask & FAN_ALL_PERM_EVENTS)) 327 fsnotify_destroy_event(group, kevent); 328 if (ret < 0) 329 break; 330 buf += ret; 331 count -= ret; 332 continue; 333 } 334 335 ret = -EAGAIN; 336 if (file->f_flags & O_NONBLOCK) 337 break; 338 ret = -ERESTARTSYS; 339 if (signal_pending(current)) 340 break; 341 342 if (start != buf) 343 break; 344 345 schedule(); 346 } 347 348 finish_wait(&group->notification_waitq, &wait); 349 if (start != buf && ret != -EFAULT) 350 ret = buf - start; 351 return ret; 352 } 353 354 static ssize_t fanotify_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) 355 { 356 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS 357 struct fanotify_response response = { .fd = -1, .response = -1 }; 358 struct fsnotify_group *group; 359 int ret; 360 361 group = file->private_data; 362 363 if (count > sizeof(response)) 364 count = sizeof(response); 365 366 pr_debug("%s: group=%p count=%zu\n", __func__, group, count); 367 368 if (copy_from_user(&response, buf, count)) 369 return -EFAULT; 370 371 ret = process_access_response(group, &response); 372 if (ret < 0) 373 count = ret; 374 375 return count; 376 #else 377 return -EINVAL; 378 #endif 379 } 380 381 static int fanotify_release(struct inode *ignored, struct file *file) 382 { 383 struct fsnotify_group *group = file->private_data; 384 385 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS 386 struct fanotify_response_event *re, *lre; 387 388 mutex_lock(&group->fanotify_data.access_mutex); 389 390 atomic_inc(&group->fanotify_data.bypass_perm); 391 392 list_for_each_entry_safe(re, lre, &group->fanotify_data.access_list, list) { 393 pr_debug("%s: found group=%p re=%p event=%p\n", __func__, group, 394 re, re->event); 395 396 list_del_init(&re->list); 397 re->event->response = FAN_ALLOW; 398 399 kmem_cache_free(fanotify_response_event_cache, re); 400 } 401 mutex_unlock(&group->fanotify_data.access_mutex); 402 403 wake_up(&group->fanotify_data.access_waitq); 404 #endif 405 406 /* matches the fanotify_init->fsnotify_alloc_group */ 407 fsnotify_destroy_group(group); 408 409 return 0; 410 } 411 412 static long fanotify_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 413 { 414 struct fsnotify_group *group; 415 struct fsnotify_event *fsn_event; 416 void __user *p; 417 int ret = -ENOTTY; 418 size_t send_len = 0; 419 420 group = file->private_data; 421 422 p = (void __user *) arg; 423 424 switch (cmd) { 425 case FIONREAD: 426 mutex_lock(&group->notification_mutex); 427 list_for_each_entry(fsn_event, &group->notification_list, list) 428 send_len += FAN_EVENT_METADATA_LEN; 429 mutex_unlock(&group->notification_mutex); 430 ret = put_user(send_len, (int __user *) p); 431 break; 432 } 433 434 return ret; 435 } 436 437 static const struct file_operations fanotify_fops = { 438 .show_fdinfo = fanotify_show_fdinfo, 439 .poll = fanotify_poll, 440 .read = fanotify_read, 441 .write = fanotify_write, 442 .fasync = NULL, 443 .release = fanotify_release, 444 .unlocked_ioctl = fanotify_ioctl, 445 .compat_ioctl = fanotify_ioctl, 446 .llseek = noop_llseek, 447 }; 448 449 static void fanotify_free_mark(struct fsnotify_mark *fsn_mark) 450 { 451 kmem_cache_free(fanotify_mark_cache, fsn_mark); 452 } 453 454 static int fanotify_find_path(int dfd, const char __user *filename, 455 struct path *path, unsigned int flags) 456 { 457 int ret; 458 459 pr_debug("%s: dfd=%d filename=%p flags=%x\n", __func__, 460 dfd, filename, flags); 461 462 if (filename == NULL) { 463 struct fd f = fdget(dfd); 464 465 ret = -EBADF; 466 if (!f.file) 467 goto out; 468 469 ret = -ENOTDIR; 470 if ((flags & FAN_MARK_ONLYDIR) && 471 !(S_ISDIR(file_inode(f.file)->i_mode))) { 472 fdput(f); 473 goto out; 474 } 475 476 *path = f.file->f_path; 477 path_get(path); 478 fdput(f); 479 } else { 480 unsigned int lookup_flags = 0; 481 482 if (!(flags & FAN_MARK_DONT_FOLLOW)) 483 lookup_flags |= LOOKUP_FOLLOW; 484 if (flags & FAN_MARK_ONLYDIR) 485 lookup_flags |= LOOKUP_DIRECTORY; 486 487 ret = user_path_at(dfd, filename, lookup_flags, path); 488 if (ret) 489 goto out; 490 } 491 492 /* you can only watch an inode if you have read permissions on it */ 493 ret = inode_permission(path->dentry->d_inode, MAY_READ); 494 if (ret) 495 path_put(path); 496 out: 497 return ret; 498 } 499 500 static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark, 501 __u32 mask, 502 unsigned int flags, 503 int *destroy) 504 { 505 __u32 oldmask; 506 507 spin_lock(&fsn_mark->lock); 508 if (!(flags & FAN_MARK_IGNORED_MASK)) { 509 oldmask = fsn_mark->mask; 510 fsnotify_set_mark_mask_locked(fsn_mark, (oldmask & ~mask)); 511 } else { 512 oldmask = fsn_mark->ignored_mask; 513 fsnotify_set_mark_ignored_mask_locked(fsn_mark, (oldmask & ~mask)); 514 } 515 spin_unlock(&fsn_mark->lock); 516 517 *destroy = !(oldmask & ~mask); 518 519 return mask & oldmask; 520 } 521 522 static int fanotify_remove_vfsmount_mark(struct fsnotify_group *group, 523 struct vfsmount *mnt, __u32 mask, 524 unsigned int flags) 525 { 526 struct fsnotify_mark *fsn_mark = NULL; 527 __u32 removed; 528 int destroy_mark; 529 530 mutex_lock(&group->mark_mutex); 531 fsn_mark = fsnotify_find_vfsmount_mark(group, mnt); 532 if (!fsn_mark) { 533 mutex_unlock(&group->mark_mutex); 534 return -ENOENT; 535 } 536 537 removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags, 538 &destroy_mark); 539 if (destroy_mark) 540 fsnotify_destroy_mark_locked(fsn_mark, group); 541 mutex_unlock(&group->mark_mutex); 542 543 fsnotify_put_mark(fsn_mark); 544 if (removed & real_mount(mnt)->mnt_fsnotify_mask) 545 fsnotify_recalc_vfsmount_mask(mnt); 546 547 return 0; 548 } 549 550 static int fanotify_remove_inode_mark(struct fsnotify_group *group, 551 struct inode *inode, __u32 mask, 552 unsigned int flags) 553 { 554 struct fsnotify_mark *fsn_mark = NULL; 555 __u32 removed; 556 int destroy_mark; 557 558 mutex_lock(&group->mark_mutex); 559 fsn_mark = fsnotify_find_inode_mark(group, inode); 560 if (!fsn_mark) { 561 mutex_unlock(&group->mark_mutex); 562 return -ENOENT; 563 } 564 565 removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags, 566 &destroy_mark); 567 if (destroy_mark) 568 fsnotify_destroy_mark_locked(fsn_mark, group); 569 mutex_unlock(&group->mark_mutex); 570 571 /* matches the fsnotify_find_inode_mark() */ 572 fsnotify_put_mark(fsn_mark); 573 if (removed & inode->i_fsnotify_mask) 574 fsnotify_recalc_inode_mask(inode); 575 576 return 0; 577 } 578 579 static __u32 fanotify_mark_add_to_mask(struct fsnotify_mark *fsn_mark, 580 __u32 mask, 581 unsigned int flags) 582 { 583 __u32 oldmask = -1; 584 585 spin_lock(&fsn_mark->lock); 586 if (!(flags & FAN_MARK_IGNORED_MASK)) { 587 oldmask = fsn_mark->mask; 588 fsnotify_set_mark_mask_locked(fsn_mark, (oldmask | mask)); 589 } else { 590 __u32 tmask = fsn_mark->ignored_mask | mask; 591 fsnotify_set_mark_ignored_mask_locked(fsn_mark, tmask); 592 if (flags & FAN_MARK_IGNORED_SURV_MODIFY) 593 fsn_mark->flags |= FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY; 594 } 595 596 if (!(flags & FAN_MARK_ONDIR)) { 597 __u32 tmask = fsn_mark->ignored_mask | FAN_ONDIR; 598 fsnotify_set_mark_ignored_mask_locked(fsn_mark, tmask); 599 } 600 601 spin_unlock(&fsn_mark->lock); 602 603 return mask & ~oldmask; 604 } 605 606 static struct fsnotify_mark *fanotify_add_new_mark(struct fsnotify_group *group, 607 struct inode *inode, 608 struct vfsmount *mnt) 609 { 610 struct fsnotify_mark *mark; 611 int ret; 612 613 if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks) 614 return ERR_PTR(-ENOSPC); 615 616 mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL); 617 if (!mark) 618 return ERR_PTR(-ENOMEM); 619 620 fsnotify_init_mark(mark, fanotify_free_mark); 621 ret = fsnotify_add_mark_locked(mark, group, inode, mnt, 0); 622 if (ret) { 623 fsnotify_put_mark(mark); 624 return ERR_PTR(ret); 625 } 626 627 return mark; 628 } 629 630 631 static int fanotify_add_vfsmount_mark(struct fsnotify_group *group, 632 struct vfsmount *mnt, __u32 mask, 633 unsigned int flags) 634 { 635 struct fsnotify_mark *fsn_mark; 636 __u32 added; 637 638 mutex_lock(&group->mark_mutex); 639 fsn_mark = fsnotify_find_vfsmount_mark(group, mnt); 640 if (!fsn_mark) { 641 fsn_mark = fanotify_add_new_mark(group, NULL, mnt); 642 if (IS_ERR(fsn_mark)) { 643 mutex_unlock(&group->mark_mutex); 644 return PTR_ERR(fsn_mark); 645 } 646 } 647 added = fanotify_mark_add_to_mask(fsn_mark, mask, flags); 648 mutex_unlock(&group->mark_mutex); 649 650 if (added & ~real_mount(mnt)->mnt_fsnotify_mask) 651 fsnotify_recalc_vfsmount_mask(mnt); 652 653 fsnotify_put_mark(fsn_mark); 654 return 0; 655 } 656 657 static int fanotify_add_inode_mark(struct fsnotify_group *group, 658 struct inode *inode, __u32 mask, 659 unsigned int flags) 660 { 661 struct fsnotify_mark *fsn_mark; 662 __u32 added; 663 664 pr_debug("%s: group=%p inode=%p\n", __func__, group, inode); 665 666 /* 667 * If some other task has this inode open for write we should not add 668 * an ignored mark, unless that ignored mark is supposed to survive 669 * modification changes anyway. 670 */ 671 if ((flags & FAN_MARK_IGNORED_MASK) && 672 !(flags & FAN_MARK_IGNORED_SURV_MODIFY) && 673 (atomic_read(&inode->i_writecount) > 0)) 674 return 0; 675 676 mutex_lock(&group->mark_mutex); 677 fsn_mark = fsnotify_find_inode_mark(group, inode); 678 if (!fsn_mark) { 679 fsn_mark = fanotify_add_new_mark(group, inode, NULL); 680 if (IS_ERR(fsn_mark)) { 681 mutex_unlock(&group->mark_mutex); 682 return PTR_ERR(fsn_mark); 683 } 684 } 685 added = fanotify_mark_add_to_mask(fsn_mark, mask, flags); 686 mutex_unlock(&group->mark_mutex); 687 688 if (added & ~inode->i_fsnotify_mask) 689 fsnotify_recalc_inode_mask(inode); 690 691 fsnotify_put_mark(fsn_mark); 692 return 0; 693 } 694 695 /* fanotify syscalls */ 696 SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags) 697 { 698 struct fsnotify_group *group; 699 int f_flags, fd; 700 struct user_struct *user; 701 702 pr_debug("%s: flags=%d event_f_flags=%d\n", 703 __func__, flags, event_f_flags); 704 705 if (!capable(CAP_SYS_ADMIN)) 706 return -EPERM; 707 708 if (flags & ~FAN_ALL_INIT_FLAGS) 709 return -EINVAL; 710 711 user = get_current_user(); 712 if (atomic_read(&user->fanotify_listeners) > FANOTIFY_DEFAULT_MAX_LISTENERS) { 713 free_uid(user); 714 return -EMFILE; 715 } 716 717 f_flags = O_RDWR | FMODE_NONOTIFY; 718 if (flags & FAN_CLOEXEC) 719 f_flags |= O_CLOEXEC; 720 if (flags & FAN_NONBLOCK) 721 f_flags |= O_NONBLOCK; 722 723 /* fsnotify_alloc_group takes a ref. Dropped in fanotify_release */ 724 group = fsnotify_alloc_group(&fanotify_fsnotify_ops); 725 if (IS_ERR(group)) { 726 free_uid(user); 727 return PTR_ERR(group); 728 } 729 730 group->fanotify_data.user = user; 731 atomic_inc(&user->fanotify_listeners); 732 733 group->fanotify_data.f_flags = event_f_flags; 734 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS 735 mutex_init(&group->fanotify_data.access_mutex); 736 init_waitqueue_head(&group->fanotify_data.access_waitq); 737 INIT_LIST_HEAD(&group->fanotify_data.access_list); 738 atomic_set(&group->fanotify_data.bypass_perm, 0); 739 #endif 740 switch (flags & FAN_ALL_CLASS_BITS) { 741 case FAN_CLASS_NOTIF: 742 group->priority = FS_PRIO_0; 743 break; 744 case FAN_CLASS_CONTENT: 745 group->priority = FS_PRIO_1; 746 break; 747 case FAN_CLASS_PRE_CONTENT: 748 group->priority = FS_PRIO_2; 749 break; 750 default: 751 fd = -EINVAL; 752 goto out_destroy_group; 753 } 754 755 if (flags & FAN_UNLIMITED_QUEUE) { 756 fd = -EPERM; 757 if (!capable(CAP_SYS_ADMIN)) 758 goto out_destroy_group; 759 group->max_events = UINT_MAX; 760 } else { 761 group->max_events = FANOTIFY_DEFAULT_MAX_EVENTS; 762 } 763 764 if (flags & FAN_UNLIMITED_MARKS) { 765 fd = -EPERM; 766 if (!capable(CAP_SYS_ADMIN)) 767 goto out_destroy_group; 768 group->fanotify_data.max_marks = UINT_MAX; 769 } else { 770 group->fanotify_data.max_marks = FANOTIFY_DEFAULT_MAX_MARKS; 771 } 772 773 fd = anon_inode_getfd("[fanotify]", &fanotify_fops, group, f_flags); 774 if (fd < 0) 775 goto out_destroy_group; 776 777 return fd; 778 779 out_destroy_group: 780 fsnotify_destroy_group(group); 781 return fd; 782 } 783 784 SYSCALL_DEFINE5(fanotify_mark, int, fanotify_fd, unsigned int, flags, 785 __u64, mask, int, dfd, 786 const char __user *, pathname) 787 { 788 struct inode *inode = NULL; 789 struct vfsmount *mnt = NULL; 790 struct fsnotify_group *group; 791 struct fd f; 792 struct path path; 793 int ret; 794 795 pr_debug("%s: fanotify_fd=%d flags=%x dfd=%d pathname=%p mask=%llx\n", 796 __func__, fanotify_fd, flags, dfd, pathname, mask); 797 798 /* we only use the lower 32 bits as of right now. */ 799 if (mask & ((__u64)0xffffffff << 32)) 800 return -EINVAL; 801 802 if (flags & ~FAN_ALL_MARK_FLAGS) 803 return -EINVAL; 804 switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) { 805 case FAN_MARK_ADD: /* fallthrough */ 806 case FAN_MARK_REMOVE: 807 if (!mask) 808 return -EINVAL; 809 case FAN_MARK_FLUSH: 810 break; 811 default: 812 return -EINVAL; 813 } 814 815 if (mask & FAN_ONDIR) { 816 flags |= FAN_MARK_ONDIR; 817 mask &= ~FAN_ONDIR; 818 } 819 820 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS 821 if (mask & ~(FAN_ALL_EVENTS | FAN_ALL_PERM_EVENTS | FAN_EVENT_ON_CHILD)) 822 #else 823 if (mask & ~(FAN_ALL_EVENTS | FAN_EVENT_ON_CHILD)) 824 #endif 825 return -EINVAL; 826 827 f = fdget(fanotify_fd); 828 if (unlikely(!f.file)) 829 return -EBADF; 830 831 /* verify that this is indeed an fanotify instance */ 832 ret = -EINVAL; 833 if (unlikely(f.file->f_op != &fanotify_fops)) 834 goto fput_and_out; 835 group = f.file->private_data; 836 837 /* 838 * group->priority == FS_PRIO_0 == FAN_CLASS_NOTIF. These are not 839 * allowed to set permissions events. 840 */ 841 ret = -EINVAL; 842 if (mask & FAN_ALL_PERM_EVENTS && 843 group->priority == FS_PRIO_0) 844 goto fput_and_out; 845 846 ret = fanotify_find_path(dfd, pathname, &path, flags); 847 if (ret) 848 goto fput_and_out; 849 850 /* inode held in place by reference to path; group by fget on fd */ 851 if (!(flags & FAN_MARK_MOUNT)) 852 inode = path.dentry->d_inode; 853 else 854 mnt = path.mnt; 855 856 /* create/update an inode mark */ 857 switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) { 858 case FAN_MARK_ADD: 859 if (flags & FAN_MARK_MOUNT) 860 ret = fanotify_add_vfsmount_mark(group, mnt, mask, flags); 861 else 862 ret = fanotify_add_inode_mark(group, inode, mask, flags); 863 break; 864 case FAN_MARK_REMOVE: 865 if (flags & FAN_MARK_MOUNT) 866 ret = fanotify_remove_vfsmount_mark(group, mnt, mask, flags); 867 else 868 ret = fanotify_remove_inode_mark(group, inode, mask, flags); 869 break; 870 case FAN_MARK_FLUSH: 871 if (flags & FAN_MARK_MOUNT) 872 fsnotify_clear_vfsmount_marks_by_group(group); 873 else 874 fsnotify_clear_inode_marks_by_group(group); 875 break; 876 default: 877 ret = -EINVAL; 878 } 879 880 path_put(&path); 881 fput_and_out: 882 fdput(f); 883 return ret; 884 } 885 886 #ifdef CONFIG_COMPAT 887 COMPAT_SYSCALL_DEFINE6(fanotify_mark, 888 int, fanotify_fd, unsigned int, flags, 889 __u32, mask0, __u32, mask1, int, dfd, 890 const char __user *, pathname) 891 { 892 return sys_fanotify_mark(fanotify_fd, flags, 893 #ifdef __BIG_ENDIAN 894 ((__u64)mask0 << 32) | mask1, 895 #else 896 ((__u64)mask1 << 32) | mask0, 897 #endif 898 dfd, pathname); 899 } 900 #endif 901 902 /* 903 * fanotify_user_setup - Our initialization function. Note that we cannot return 904 * error because we have compiled-in VFS hooks. So an (unlikely) failure here 905 * must result in panic(). 906 */ 907 static int __init fanotify_user_setup(void) 908 { 909 fanotify_mark_cache = KMEM_CACHE(fsnotify_mark, SLAB_PANIC); 910 fanotify_response_event_cache = KMEM_CACHE(fanotify_response_event, 911 SLAB_PANIC); 912 fanotify_event_cachep = KMEM_CACHE(fanotify_event_info, SLAB_PANIC); 913 914 return 0; 915 } 916 device_initcall(fanotify_user_setup); 917