1 #include <linux/fanotify.h> 2 #include <linux/fcntl.h> 3 #include <linux/file.h> 4 #include <linux/fs.h> 5 #include <linux/anon_inodes.h> 6 #include <linux/fsnotify_backend.h> 7 #include <linux/init.h> 8 #include <linux/mount.h> 9 #include <linux/namei.h> 10 #include <linux/poll.h> 11 #include <linux/security.h> 12 #include <linux/syscalls.h> 13 #include <linux/slab.h> 14 #include <linux/types.h> 15 #include <linux/uaccess.h> 16 17 #include <asm/ioctls.h> 18 19 #include "../../mount.h" 20 #include "../fdinfo.h" 21 22 #define FANOTIFY_DEFAULT_MAX_EVENTS 16384 23 #define FANOTIFY_DEFAULT_MAX_MARKS 8192 24 #define FANOTIFY_DEFAULT_MAX_LISTENERS 128 25 26 extern const struct fsnotify_ops fanotify_fsnotify_ops; 27 28 static struct kmem_cache *fanotify_mark_cache __read_mostly; 29 static struct kmem_cache *fanotify_response_event_cache __read_mostly; 30 31 struct fanotify_response_event { 32 struct list_head list; 33 __s32 fd; 34 struct fsnotify_event *event; 35 }; 36 37 /* 38 * Get an fsnotify notification event if one exists and is small 39 * enough to fit in "count". Return an error pointer if the count 40 * is not large enough. 41 * 42 * Called with the group->notification_mutex held. 43 */ 44 static struct fsnotify_event *get_one_event(struct fsnotify_group *group, 45 size_t count) 46 { 47 BUG_ON(!mutex_is_locked(&group->notification_mutex)); 48 49 pr_debug("%s: group=%p count=%zd\n", __func__, group, count); 50 51 if (fsnotify_notify_queue_is_empty(group)) 52 return NULL; 53 54 if (FAN_EVENT_METADATA_LEN > count) 55 return ERR_PTR(-EINVAL); 56 57 /* held the notification_mutex the whole time, so this is the 58 * same event we peeked above */ 59 return fsnotify_remove_notify_event(group); 60 } 61 62 static int create_fd(struct fsnotify_group *group, 63 struct fsnotify_event *event, 64 struct file **file) 65 { 66 int client_fd; 67 struct file *new_file; 68 69 pr_debug("%s: group=%p event=%p\n", __func__, group, event); 70 71 client_fd = get_unused_fd(); 72 if (client_fd < 0) 73 return client_fd; 74 75 if (event->data_type != FSNOTIFY_EVENT_PATH) { 76 WARN_ON(1); 77 put_unused_fd(client_fd); 78 return -EINVAL; 79 } 80 81 /* 82 * we need a new file handle for the userspace program so it can read even if it was 83 * originally opened O_WRONLY. 84 */ 85 /* it's possible this event was an overflow event. in that case dentry and mnt 86 * are NULL; That's fine, just don't call dentry open */ 87 if (event->path.dentry && event->path.mnt) 88 new_file = dentry_open(&event->path, 89 group->fanotify_data.f_flags | FMODE_NONOTIFY, 90 current_cred()); 91 else 92 new_file = ERR_PTR(-EOVERFLOW); 93 if (IS_ERR(new_file)) { 94 /* 95 * we still send an event even if we can't open the file. this 96 * can happen when say tasks are gone and we try to open their 97 * /proc files or we try to open a WRONLY file like in sysfs 98 * we just send the errno to userspace since there isn't much 99 * else we can do. 100 */ 101 put_unused_fd(client_fd); 102 client_fd = PTR_ERR(new_file); 103 } else { 104 *file = new_file; 105 } 106 107 return client_fd; 108 } 109 110 static int fill_event_metadata(struct fsnotify_group *group, 111 struct fanotify_event_metadata *metadata, 112 struct fsnotify_event *event, 113 struct file **file) 114 { 115 int ret = 0; 116 117 pr_debug("%s: group=%p metadata=%p event=%p\n", __func__, 118 group, metadata, event); 119 120 *file = NULL; 121 metadata->event_len = FAN_EVENT_METADATA_LEN; 122 metadata->metadata_len = FAN_EVENT_METADATA_LEN; 123 metadata->vers = FANOTIFY_METADATA_VERSION; 124 metadata->mask = event->mask & FAN_ALL_OUTGOING_EVENTS; 125 metadata->pid = pid_vnr(event->tgid); 126 if (unlikely(event->mask & FAN_Q_OVERFLOW)) 127 metadata->fd = FAN_NOFD; 128 else { 129 metadata->fd = create_fd(group, event, file); 130 if (metadata->fd < 0) 131 ret = metadata->fd; 132 } 133 134 return ret; 135 } 136 137 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS 138 static struct fanotify_response_event *dequeue_re(struct fsnotify_group *group, 139 __s32 fd) 140 { 141 struct fanotify_response_event *re, *return_re = NULL; 142 143 mutex_lock(&group->fanotify_data.access_mutex); 144 list_for_each_entry(re, &group->fanotify_data.access_list, list) { 145 if (re->fd != fd) 146 continue; 147 148 list_del_init(&re->list); 149 return_re = re; 150 break; 151 } 152 mutex_unlock(&group->fanotify_data.access_mutex); 153 154 pr_debug("%s: found return_re=%p\n", __func__, return_re); 155 156 return return_re; 157 } 158 159 static int process_access_response(struct fsnotify_group *group, 160 struct fanotify_response *response_struct) 161 { 162 struct fanotify_response_event *re; 163 __s32 fd = response_struct->fd; 164 __u32 response = response_struct->response; 165 166 pr_debug("%s: group=%p fd=%d response=%d\n", __func__, group, 167 fd, response); 168 /* 169 * make sure the response is valid, if invalid we do nothing and either 170 * userspace can send a valid response or we will clean it up after the 171 * timeout 172 */ 173 switch (response) { 174 case FAN_ALLOW: 175 case FAN_DENY: 176 break; 177 default: 178 return -EINVAL; 179 } 180 181 if (fd < 0) 182 return -EINVAL; 183 184 re = dequeue_re(group, fd); 185 if (!re) 186 return -ENOENT; 187 188 re->event->response = response; 189 190 wake_up(&group->fanotify_data.access_waitq); 191 192 kmem_cache_free(fanotify_response_event_cache, re); 193 194 return 0; 195 } 196 197 static int prepare_for_access_response(struct fsnotify_group *group, 198 struct fsnotify_event *event, 199 __s32 fd) 200 { 201 struct fanotify_response_event *re; 202 203 if (!(event->mask & FAN_ALL_PERM_EVENTS)) 204 return 0; 205 206 re = kmem_cache_alloc(fanotify_response_event_cache, GFP_KERNEL); 207 if (!re) 208 return -ENOMEM; 209 210 re->event = event; 211 re->fd = fd; 212 213 mutex_lock(&group->fanotify_data.access_mutex); 214 215 if (atomic_read(&group->fanotify_data.bypass_perm)) { 216 mutex_unlock(&group->fanotify_data.access_mutex); 217 kmem_cache_free(fanotify_response_event_cache, re); 218 event->response = FAN_ALLOW; 219 return 0; 220 } 221 222 list_add_tail(&re->list, &group->fanotify_data.access_list); 223 mutex_unlock(&group->fanotify_data.access_mutex); 224 225 return 0; 226 } 227 228 #else 229 static int prepare_for_access_response(struct fsnotify_group *group, 230 struct fsnotify_event *event, 231 __s32 fd) 232 { 233 return 0; 234 } 235 236 #endif 237 238 static ssize_t copy_event_to_user(struct fsnotify_group *group, 239 struct fsnotify_event *event, 240 char __user *buf) 241 { 242 struct fanotify_event_metadata fanotify_event_metadata; 243 struct file *f; 244 int fd, ret; 245 246 pr_debug("%s: group=%p event=%p\n", __func__, group, event); 247 248 ret = fill_event_metadata(group, &fanotify_event_metadata, event, &f); 249 if (ret < 0) 250 goto out; 251 252 fd = fanotify_event_metadata.fd; 253 ret = -EFAULT; 254 if (copy_to_user(buf, &fanotify_event_metadata, 255 fanotify_event_metadata.event_len)) 256 goto out_close_fd; 257 258 ret = prepare_for_access_response(group, event, fd); 259 if (ret) 260 goto out_close_fd; 261 262 if (fd != FAN_NOFD) 263 fd_install(fd, f); 264 return fanotify_event_metadata.event_len; 265 266 out_close_fd: 267 if (fd != FAN_NOFD) { 268 put_unused_fd(fd); 269 fput(f); 270 } 271 out: 272 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS 273 if (event->mask & FAN_ALL_PERM_EVENTS) { 274 event->response = FAN_DENY; 275 wake_up(&group->fanotify_data.access_waitq); 276 } 277 #endif 278 return ret; 279 } 280 281 /* intofiy userspace file descriptor functions */ 282 static unsigned int fanotify_poll(struct file *file, poll_table *wait) 283 { 284 struct fsnotify_group *group = file->private_data; 285 int ret = 0; 286 287 poll_wait(file, &group->notification_waitq, wait); 288 mutex_lock(&group->notification_mutex); 289 if (!fsnotify_notify_queue_is_empty(group)) 290 ret = POLLIN | POLLRDNORM; 291 mutex_unlock(&group->notification_mutex); 292 293 return ret; 294 } 295 296 static ssize_t fanotify_read(struct file *file, char __user *buf, 297 size_t count, loff_t *pos) 298 { 299 struct fsnotify_group *group; 300 struct fsnotify_event *kevent; 301 char __user *start; 302 int ret; 303 DEFINE_WAIT(wait); 304 305 start = buf; 306 group = file->private_data; 307 308 pr_debug("%s: group=%p\n", __func__, group); 309 310 while (1) { 311 prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE); 312 313 mutex_lock(&group->notification_mutex); 314 kevent = get_one_event(group, count); 315 mutex_unlock(&group->notification_mutex); 316 317 if (kevent) { 318 ret = PTR_ERR(kevent); 319 if (IS_ERR(kevent)) 320 break; 321 ret = copy_event_to_user(group, kevent, buf); 322 fsnotify_put_event(kevent); 323 if (ret < 0) 324 break; 325 buf += ret; 326 count -= ret; 327 continue; 328 } 329 330 ret = -EAGAIN; 331 if (file->f_flags & O_NONBLOCK) 332 break; 333 ret = -ERESTARTSYS; 334 if (signal_pending(current)) 335 break; 336 337 if (start != buf) 338 break; 339 340 schedule(); 341 } 342 343 finish_wait(&group->notification_waitq, &wait); 344 if (start != buf && ret != -EFAULT) 345 ret = buf - start; 346 return ret; 347 } 348 349 static ssize_t fanotify_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) 350 { 351 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS 352 struct fanotify_response response = { .fd = -1, .response = -1 }; 353 struct fsnotify_group *group; 354 int ret; 355 356 group = file->private_data; 357 358 if (count > sizeof(response)) 359 count = sizeof(response); 360 361 pr_debug("%s: group=%p count=%zu\n", __func__, group, count); 362 363 if (copy_from_user(&response, buf, count)) 364 return -EFAULT; 365 366 ret = process_access_response(group, &response); 367 if (ret < 0) 368 count = ret; 369 370 return count; 371 #else 372 return -EINVAL; 373 #endif 374 } 375 376 static int fanotify_release(struct inode *ignored, struct file *file) 377 { 378 struct fsnotify_group *group = file->private_data; 379 380 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS 381 struct fanotify_response_event *re, *lre; 382 383 mutex_lock(&group->fanotify_data.access_mutex); 384 385 atomic_inc(&group->fanotify_data.bypass_perm); 386 387 list_for_each_entry_safe(re, lre, &group->fanotify_data.access_list, list) { 388 pr_debug("%s: found group=%p re=%p event=%p\n", __func__, group, 389 re, re->event); 390 391 list_del_init(&re->list); 392 re->event->response = FAN_ALLOW; 393 394 kmem_cache_free(fanotify_response_event_cache, re); 395 } 396 mutex_unlock(&group->fanotify_data.access_mutex); 397 398 wake_up(&group->fanotify_data.access_waitq); 399 #endif 400 401 if (file->f_flags & FASYNC) 402 fsnotify_fasync(-1, file, 0); 403 404 /* matches the fanotify_init->fsnotify_alloc_group */ 405 fsnotify_destroy_group(group); 406 407 return 0; 408 } 409 410 static long fanotify_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 411 { 412 struct fsnotify_group *group; 413 struct fsnotify_event_holder *holder; 414 void __user *p; 415 int ret = -ENOTTY; 416 size_t send_len = 0; 417 418 group = file->private_data; 419 420 p = (void __user *) arg; 421 422 switch (cmd) { 423 case FIONREAD: 424 mutex_lock(&group->notification_mutex); 425 list_for_each_entry(holder, &group->notification_list, event_list) 426 send_len += FAN_EVENT_METADATA_LEN; 427 mutex_unlock(&group->notification_mutex); 428 ret = put_user(send_len, (int __user *) p); 429 break; 430 } 431 432 return ret; 433 } 434 435 static const struct file_operations fanotify_fops = { 436 .show_fdinfo = fanotify_show_fdinfo, 437 .poll = fanotify_poll, 438 .read = fanotify_read, 439 .write = fanotify_write, 440 .fasync = NULL, 441 .release = fanotify_release, 442 .unlocked_ioctl = fanotify_ioctl, 443 .compat_ioctl = fanotify_ioctl, 444 .llseek = noop_llseek, 445 }; 446 447 static void fanotify_free_mark(struct fsnotify_mark *fsn_mark) 448 { 449 kmem_cache_free(fanotify_mark_cache, fsn_mark); 450 } 451 452 static int fanotify_find_path(int dfd, const char __user *filename, 453 struct path *path, unsigned int flags) 454 { 455 int ret; 456 457 pr_debug("%s: dfd=%d filename=%p flags=%x\n", __func__, 458 dfd, filename, flags); 459 460 if (filename == NULL) { 461 struct fd f = fdget(dfd); 462 463 ret = -EBADF; 464 if (!f.file) 465 goto out; 466 467 ret = -ENOTDIR; 468 if ((flags & FAN_MARK_ONLYDIR) && 469 !(S_ISDIR(f.file->f_path.dentry->d_inode->i_mode))) { 470 fdput(f); 471 goto out; 472 } 473 474 *path = f.file->f_path; 475 path_get(path); 476 fdput(f); 477 } else { 478 unsigned int lookup_flags = 0; 479 480 if (!(flags & FAN_MARK_DONT_FOLLOW)) 481 lookup_flags |= LOOKUP_FOLLOW; 482 if (flags & FAN_MARK_ONLYDIR) 483 lookup_flags |= LOOKUP_DIRECTORY; 484 485 ret = user_path_at(dfd, filename, lookup_flags, path); 486 if (ret) 487 goto out; 488 } 489 490 /* you can only watch an inode if you have read permissions on it */ 491 ret = inode_permission(path->dentry->d_inode, MAY_READ); 492 if (ret) 493 path_put(path); 494 out: 495 return ret; 496 } 497 498 static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark, 499 __u32 mask, 500 unsigned int flags, 501 int *destroy) 502 { 503 __u32 oldmask; 504 505 spin_lock(&fsn_mark->lock); 506 if (!(flags & FAN_MARK_IGNORED_MASK)) { 507 oldmask = fsn_mark->mask; 508 fsnotify_set_mark_mask_locked(fsn_mark, (oldmask & ~mask)); 509 } else { 510 oldmask = fsn_mark->ignored_mask; 511 fsnotify_set_mark_ignored_mask_locked(fsn_mark, (oldmask & ~mask)); 512 } 513 spin_unlock(&fsn_mark->lock); 514 515 *destroy = !(oldmask & ~mask); 516 517 return mask & oldmask; 518 } 519 520 static int fanotify_remove_vfsmount_mark(struct fsnotify_group *group, 521 struct vfsmount *mnt, __u32 mask, 522 unsigned int flags) 523 { 524 struct fsnotify_mark *fsn_mark = NULL; 525 __u32 removed; 526 int destroy_mark; 527 528 fsn_mark = fsnotify_find_vfsmount_mark(group, mnt); 529 if (!fsn_mark) 530 return -ENOENT; 531 532 removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags, 533 &destroy_mark); 534 if (destroy_mark) 535 fsnotify_destroy_mark(fsn_mark, group); 536 537 fsnotify_put_mark(fsn_mark); 538 if (removed & real_mount(mnt)->mnt_fsnotify_mask) 539 fsnotify_recalc_vfsmount_mask(mnt); 540 541 return 0; 542 } 543 544 static int fanotify_remove_inode_mark(struct fsnotify_group *group, 545 struct inode *inode, __u32 mask, 546 unsigned int flags) 547 { 548 struct fsnotify_mark *fsn_mark = NULL; 549 __u32 removed; 550 int destroy_mark; 551 552 fsn_mark = fsnotify_find_inode_mark(group, inode); 553 if (!fsn_mark) 554 return -ENOENT; 555 556 removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags, 557 &destroy_mark); 558 if (destroy_mark) 559 fsnotify_destroy_mark(fsn_mark, group); 560 /* matches the fsnotify_find_inode_mark() */ 561 fsnotify_put_mark(fsn_mark); 562 if (removed & inode->i_fsnotify_mask) 563 fsnotify_recalc_inode_mask(inode); 564 565 return 0; 566 } 567 568 static __u32 fanotify_mark_add_to_mask(struct fsnotify_mark *fsn_mark, 569 __u32 mask, 570 unsigned int flags) 571 { 572 __u32 oldmask = -1; 573 574 spin_lock(&fsn_mark->lock); 575 if (!(flags & FAN_MARK_IGNORED_MASK)) { 576 oldmask = fsn_mark->mask; 577 fsnotify_set_mark_mask_locked(fsn_mark, (oldmask | mask)); 578 } else { 579 __u32 tmask = fsn_mark->ignored_mask | mask; 580 fsnotify_set_mark_ignored_mask_locked(fsn_mark, tmask); 581 if (flags & FAN_MARK_IGNORED_SURV_MODIFY) 582 fsn_mark->flags |= FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY; 583 } 584 585 if (!(flags & FAN_MARK_ONDIR)) { 586 __u32 tmask = fsn_mark->ignored_mask | FAN_ONDIR; 587 fsnotify_set_mark_ignored_mask_locked(fsn_mark, tmask); 588 } 589 590 spin_unlock(&fsn_mark->lock); 591 592 return mask & ~oldmask; 593 } 594 595 static int fanotify_add_vfsmount_mark(struct fsnotify_group *group, 596 struct vfsmount *mnt, __u32 mask, 597 unsigned int flags) 598 { 599 struct fsnotify_mark *fsn_mark; 600 __u32 added; 601 int ret = 0; 602 603 fsn_mark = fsnotify_find_vfsmount_mark(group, mnt); 604 if (!fsn_mark) { 605 if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks) 606 return -ENOSPC; 607 608 fsn_mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL); 609 if (!fsn_mark) 610 return -ENOMEM; 611 612 fsnotify_init_mark(fsn_mark, fanotify_free_mark); 613 ret = fsnotify_add_mark(fsn_mark, group, NULL, mnt, 0); 614 if (ret) 615 goto err; 616 } 617 added = fanotify_mark_add_to_mask(fsn_mark, mask, flags); 618 619 if (added & ~real_mount(mnt)->mnt_fsnotify_mask) 620 fsnotify_recalc_vfsmount_mask(mnt); 621 err: 622 fsnotify_put_mark(fsn_mark); 623 return ret; 624 } 625 626 static int fanotify_add_inode_mark(struct fsnotify_group *group, 627 struct inode *inode, __u32 mask, 628 unsigned int flags) 629 { 630 struct fsnotify_mark *fsn_mark; 631 __u32 added; 632 int ret = 0; 633 634 pr_debug("%s: group=%p inode=%p\n", __func__, group, inode); 635 636 /* 637 * If some other task has this inode open for write we should not add 638 * an ignored mark, unless that ignored mark is supposed to survive 639 * modification changes anyway. 640 */ 641 if ((flags & FAN_MARK_IGNORED_MASK) && 642 !(flags & FAN_MARK_IGNORED_SURV_MODIFY) && 643 (atomic_read(&inode->i_writecount) > 0)) 644 return 0; 645 646 fsn_mark = fsnotify_find_inode_mark(group, inode); 647 if (!fsn_mark) { 648 if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks) 649 return -ENOSPC; 650 651 fsn_mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL); 652 if (!fsn_mark) 653 return -ENOMEM; 654 655 fsnotify_init_mark(fsn_mark, fanotify_free_mark); 656 ret = fsnotify_add_mark(fsn_mark, group, inode, NULL, 0); 657 if (ret) 658 goto err; 659 } 660 added = fanotify_mark_add_to_mask(fsn_mark, mask, flags); 661 662 if (added & ~inode->i_fsnotify_mask) 663 fsnotify_recalc_inode_mask(inode); 664 err: 665 fsnotify_put_mark(fsn_mark); 666 return ret; 667 } 668 669 /* fanotify syscalls */ 670 SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags) 671 { 672 struct fsnotify_group *group; 673 int f_flags, fd; 674 struct user_struct *user; 675 676 pr_debug("%s: flags=%d event_f_flags=%d\n", 677 __func__, flags, event_f_flags); 678 679 if (!capable(CAP_SYS_ADMIN)) 680 return -EPERM; 681 682 if (flags & ~FAN_ALL_INIT_FLAGS) 683 return -EINVAL; 684 685 user = get_current_user(); 686 if (atomic_read(&user->fanotify_listeners) > FANOTIFY_DEFAULT_MAX_LISTENERS) { 687 free_uid(user); 688 return -EMFILE; 689 } 690 691 f_flags = O_RDWR | FMODE_NONOTIFY; 692 if (flags & FAN_CLOEXEC) 693 f_flags |= O_CLOEXEC; 694 if (flags & FAN_NONBLOCK) 695 f_flags |= O_NONBLOCK; 696 697 /* fsnotify_alloc_group takes a ref. Dropped in fanotify_release */ 698 group = fsnotify_alloc_group(&fanotify_fsnotify_ops); 699 if (IS_ERR(group)) { 700 free_uid(user); 701 return PTR_ERR(group); 702 } 703 704 group->fanotify_data.user = user; 705 atomic_inc(&user->fanotify_listeners); 706 707 group->fanotify_data.f_flags = event_f_flags; 708 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS 709 mutex_init(&group->fanotify_data.access_mutex); 710 init_waitqueue_head(&group->fanotify_data.access_waitq); 711 INIT_LIST_HEAD(&group->fanotify_data.access_list); 712 atomic_set(&group->fanotify_data.bypass_perm, 0); 713 #endif 714 switch (flags & FAN_ALL_CLASS_BITS) { 715 case FAN_CLASS_NOTIF: 716 group->priority = FS_PRIO_0; 717 break; 718 case FAN_CLASS_CONTENT: 719 group->priority = FS_PRIO_1; 720 break; 721 case FAN_CLASS_PRE_CONTENT: 722 group->priority = FS_PRIO_2; 723 break; 724 default: 725 fd = -EINVAL; 726 goto out_destroy_group; 727 } 728 729 if (flags & FAN_UNLIMITED_QUEUE) { 730 fd = -EPERM; 731 if (!capable(CAP_SYS_ADMIN)) 732 goto out_destroy_group; 733 group->max_events = UINT_MAX; 734 } else { 735 group->max_events = FANOTIFY_DEFAULT_MAX_EVENTS; 736 } 737 738 if (flags & FAN_UNLIMITED_MARKS) { 739 fd = -EPERM; 740 if (!capable(CAP_SYS_ADMIN)) 741 goto out_destroy_group; 742 group->fanotify_data.max_marks = UINT_MAX; 743 } else { 744 group->fanotify_data.max_marks = FANOTIFY_DEFAULT_MAX_MARKS; 745 } 746 747 fd = anon_inode_getfd("[fanotify]", &fanotify_fops, group, f_flags); 748 if (fd < 0) 749 goto out_destroy_group; 750 751 return fd; 752 753 out_destroy_group: 754 fsnotify_destroy_group(group); 755 return fd; 756 } 757 758 SYSCALL_DEFINE(fanotify_mark)(int fanotify_fd, unsigned int flags, 759 __u64 mask, int dfd, 760 const char __user * pathname) 761 { 762 struct inode *inode = NULL; 763 struct vfsmount *mnt = NULL; 764 struct fsnotify_group *group; 765 struct fd f; 766 struct path path; 767 int ret; 768 769 pr_debug("%s: fanotify_fd=%d flags=%x dfd=%d pathname=%p mask=%llx\n", 770 __func__, fanotify_fd, flags, dfd, pathname, mask); 771 772 /* we only use the lower 32 bits as of right now. */ 773 if (mask & ((__u64)0xffffffff << 32)) 774 return -EINVAL; 775 776 if (flags & ~FAN_ALL_MARK_FLAGS) 777 return -EINVAL; 778 switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) { 779 case FAN_MARK_ADD: /* fallthrough */ 780 case FAN_MARK_REMOVE: 781 if (!mask) 782 return -EINVAL; 783 case FAN_MARK_FLUSH: 784 break; 785 default: 786 return -EINVAL; 787 } 788 789 if (mask & FAN_ONDIR) { 790 flags |= FAN_MARK_ONDIR; 791 mask &= ~FAN_ONDIR; 792 } 793 794 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS 795 if (mask & ~(FAN_ALL_EVENTS | FAN_ALL_PERM_EVENTS | FAN_EVENT_ON_CHILD)) 796 #else 797 if (mask & ~(FAN_ALL_EVENTS | FAN_EVENT_ON_CHILD)) 798 #endif 799 return -EINVAL; 800 801 f = fdget(fanotify_fd); 802 if (unlikely(!f.file)) 803 return -EBADF; 804 805 /* verify that this is indeed an fanotify instance */ 806 ret = -EINVAL; 807 if (unlikely(f.file->f_op != &fanotify_fops)) 808 goto fput_and_out; 809 group = f.file->private_data; 810 811 /* 812 * group->priority == FS_PRIO_0 == FAN_CLASS_NOTIF. These are not 813 * allowed to set permissions events. 814 */ 815 ret = -EINVAL; 816 if (mask & FAN_ALL_PERM_EVENTS && 817 group->priority == FS_PRIO_0) 818 goto fput_and_out; 819 820 ret = fanotify_find_path(dfd, pathname, &path, flags); 821 if (ret) 822 goto fput_and_out; 823 824 /* inode held in place by reference to path; group by fget on fd */ 825 if (!(flags & FAN_MARK_MOUNT)) 826 inode = path.dentry->d_inode; 827 else 828 mnt = path.mnt; 829 830 /* create/update an inode mark */ 831 switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) { 832 case FAN_MARK_ADD: 833 if (flags & FAN_MARK_MOUNT) 834 ret = fanotify_add_vfsmount_mark(group, mnt, mask, flags); 835 else 836 ret = fanotify_add_inode_mark(group, inode, mask, flags); 837 break; 838 case FAN_MARK_REMOVE: 839 if (flags & FAN_MARK_MOUNT) 840 ret = fanotify_remove_vfsmount_mark(group, mnt, mask, flags); 841 else 842 ret = fanotify_remove_inode_mark(group, inode, mask, flags); 843 break; 844 case FAN_MARK_FLUSH: 845 if (flags & FAN_MARK_MOUNT) 846 fsnotify_clear_vfsmount_marks_by_group(group); 847 else 848 fsnotify_clear_inode_marks_by_group(group); 849 break; 850 default: 851 ret = -EINVAL; 852 } 853 854 path_put(&path); 855 fput_and_out: 856 fdput(f); 857 return ret; 858 } 859 860 #ifdef CONFIG_HAVE_SYSCALL_WRAPPERS 861 asmlinkage long SyS_fanotify_mark(long fanotify_fd, long flags, __u64 mask, 862 long dfd, long pathname) 863 { 864 return SYSC_fanotify_mark((int) fanotify_fd, (unsigned int) flags, 865 mask, (int) dfd, 866 (const char __user *) pathname); 867 } 868 SYSCALL_ALIAS(sys_fanotify_mark, SyS_fanotify_mark); 869 #endif 870 871 /* 872 * fanotify_user_setup - Our initialization function. Note that we cannot return 873 * error because we have compiled-in VFS hooks. So an (unlikely) failure here 874 * must result in panic(). 875 */ 876 static int __init fanotify_user_setup(void) 877 { 878 fanotify_mark_cache = KMEM_CACHE(fsnotify_mark, SLAB_PANIC); 879 fanotify_response_event_cache = KMEM_CACHE(fanotify_response_event, 880 SLAB_PANIC); 881 882 return 0; 883 } 884 device_initcall(fanotify_user_setup); 885