1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/fanotify.h> 3 #include <linux/fcntl.h> 4 #include <linux/file.h> 5 #include <linux/fs.h> 6 #include <linux/anon_inodes.h> 7 #include <linux/fsnotify_backend.h> 8 #include <linux/init.h> 9 #include <linux/mount.h> 10 #include <linux/namei.h> 11 #include <linux/poll.h> 12 #include <linux/security.h> 13 #include <linux/syscalls.h> 14 #include <linux/slab.h> 15 #include <linux/types.h> 16 #include <linux/uaccess.h> 17 #include <linux/compat.h> 18 #include <linux/sched/signal.h> 19 #include <linux/memcontrol.h> 20 #include <linux/statfs.h> 21 #include <linux/exportfs.h> 22 23 #include <asm/ioctls.h> 24 25 #include "../../mount.h" 26 #include "../fdinfo.h" 27 #include "fanotify.h" 28 29 #define FANOTIFY_DEFAULT_MAX_EVENTS 16384 30 #define FANOTIFY_OLD_DEFAULT_MAX_MARKS 8192 31 #define FANOTIFY_DEFAULT_MAX_GROUPS 128 32 33 /* 34 * Legacy fanotify marks limits (8192) is per group and we introduced a tunable 35 * limit of marks per user, similar to inotify. Effectively, the legacy limit 36 * of fanotify marks per user is <max marks per group> * <max groups per user>. 37 * This default limit (1M) also happens to match the increased limit of inotify 38 * max_user_watches since v5.10. 39 */ 40 #define FANOTIFY_DEFAULT_MAX_USER_MARKS \ 41 (FANOTIFY_OLD_DEFAULT_MAX_MARKS * FANOTIFY_DEFAULT_MAX_GROUPS) 42 43 /* 44 * Most of the memory cost of adding an inode mark is pinning the marked inode. 45 * The size of the filesystem inode struct is not uniform across filesystems, 46 * so double the size of a VFS inode is used as a conservative approximation. 47 */ 48 #define INODE_MARK_COST (2 * sizeof(struct inode)) 49 50 /* configurable via /proc/sys/fs/fanotify/ */ 51 static int fanotify_max_queued_events __read_mostly; 52 53 #ifdef CONFIG_SYSCTL 54 55 #include <linux/sysctl.h> 56 57 struct ctl_table fanotify_table[] = { 58 { 59 .procname = "max_user_groups", 60 .data = &init_user_ns.ucount_max[UCOUNT_FANOTIFY_GROUPS], 61 .maxlen = sizeof(int), 62 .mode = 0644, 63 .proc_handler = proc_dointvec_minmax, 64 .extra1 = SYSCTL_ZERO, 65 }, 66 { 67 .procname = "max_user_marks", 68 .data = &init_user_ns.ucount_max[UCOUNT_FANOTIFY_MARKS], 69 .maxlen = sizeof(int), 70 .mode = 0644, 71 .proc_handler = proc_dointvec_minmax, 72 .extra1 = SYSCTL_ZERO, 73 }, 74 { 75 .procname = "max_queued_events", 76 .data = &fanotify_max_queued_events, 77 .maxlen = sizeof(int), 78 .mode = 0644, 79 .proc_handler = proc_dointvec_minmax, 80 .extra1 = SYSCTL_ZERO 81 }, 82 { } 83 }; 84 #endif /* CONFIG_SYSCTL */ 85 86 /* 87 * All flags that may be specified in parameter event_f_flags of fanotify_init. 88 * 89 * Internal and external open flags are stored together in field f_flags of 90 * struct file. Only external open flags shall be allowed in event_f_flags. 91 * Internal flags like FMODE_NONOTIFY, FMODE_EXEC, FMODE_NOCMTIME shall be 92 * excluded. 93 */ 94 #define FANOTIFY_INIT_ALL_EVENT_F_BITS ( \ 95 O_ACCMODE | O_APPEND | O_NONBLOCK | \ 96 __O_SYNC | O_DSYNC | O_CLOEXEC | \ 97 O_LARGEFILE | O_NOATIME ) 98 99 extern const struct fsnotify_ops fanotify_fsnotify_ops; 100 101 struct kmem_cache *fanotify_mark_cache __read_mostly; 102 struct kmem_cache *fanotify_fid_event_cachep __read_mostly; 103 struct kmem_cache *fanotify_path_event_cachep __read_mostly; 104 struct kmem_cache *fanotify_perm_event_cachep __read_mostly; 105 106 #define FANOTIFY_EVENT_ALIGN 4 107 #define FANOTIFY_INFO_HDR_LEN \ 108 (sizeof(struct fanotify_event_info_fid) + sizeof(struct file_handle)) 109 110 static int fanotify_fid_info_len(int fh_len, int name_len) 111 { 112 int info_len = fh_len; 113 114 if (name_len) 115 info_len += name_len + 1; 116 117 return roundup(FANOTIFY_INFO_HDR_LEN + info_len, FANOTIFY_EVENT_ALIGN); 118 } 119 120 static int fanotify_event_info_len(unsigned int fid_mode, 121 struct fanotify_event *event) 122 { 123 struct fanotify_info *info = fanotify_event_info(event); 124 int dir_fh_len = fanotify_event_dir_fh_len(event); 125 int fh_len = fanotify_event_object_fh_len(event); 126 int info_len = 0; 127 int dot_len = 0; 128 129 if (dir_fh_len) { 130 info_len += fanotify_fid_info_len(dir_fh_len, info->name_len); 131 } else if ((fid_mode & FAN_REPORT_NAME) && (event->mask & FAN_ONDIR)) { 132 /* 133 * With group flag FAN_REPORT_NAME, if name was not recorded in 134 * event on a directory, we will report the name ".". 135 */ 136 dot_len = 1; 137 } 138 139 if (fh_len) 140 info_len += fanotify_fid_info_len(fh_len, dot_len); 141 142 return info_len; 143 } 144 145 /* 146 * Remove an hashed event from merge hash table. 147 */ 148 static void fanotify_unhash_event(struct fsnotify_group *group, 149 struct fanotify_event *event) 150 { 151 assert_spin_locked(&group->notification_lock); 152 153 pr_debug("%s: group=%p event=%p bucket=%u\n", __func__, 154 group, event, fanotify_event_hash_bucket(group, event)); 155 156 if (WARN_ON_ONCE(hlist_unhashed(&event->merge_list))) 157 return; 158 159 hlist_del_init(&event->merge_list); 160 } 161 162 /* 163 * Get an fanotify notification event if one exists and is small 164 * enough to fit in "count". Return an error pointer if the count 165 * is not large enough. When permission event is dequeued, its state is 166 * updated accordingly. 167 */ 168 static struct fanotify_event *get_one_event(struct fsnotify_group *group, 169 size_t count) 170 { 171 size_t event_size = FAN_EVENT_METADATA_LEN; 172 struct fanotify_event *event = NULL; 173 struct fsnotify_event *fsn_event; 174 unsigned int fid_mode = FAN_GROUP_FLAG(group, FANOTIFY_FID_BITS); 175 176 pr_debug("%s: group=%p count=%zd\n", __func__, group, count); 177 178 spin_lock(&group->notification_lock); 179 fsn_event = fsnotify_peek_first_event(group); 180 if (!fsn_event) 181 goto out; 182 183 event = FANOTIFY_E(fsn_event); 184 if (fid_mode) 185 event_size += fanotify_event_info_len(fid_mode, event); 186 187 if (event_size > count) { 188 event = ERR_PTR(-EINVAL); 189 goto out; 190 } 191 192 /* 193 * Held the notification_lock the whole time, so this is the 194 * same event we peeked above. 195 */ 196 fsnotify_remove_first_event(group); 197 if (fanotify_is_perm_event(event->mask)) 198 FANOTIFY_PERM(event)->state = FAN_EVENT_REPORTED; 199 if (fanotify_is_hashed_event(event->mask)) 200 fanotify_unhash_event(group, event); 201 out: 202 spin_unlock(&group->notification_lock); 203 return event; 204 } 205 206 static int create_fd(struct fsnotify_group *group, struct path *path, 207 struct file **file) 208 { 209 int client_fd; 210 struct file *new_file; 211 212 client_fd = get_unused_fd_flags(group->fanotify_data.f_flags); 213 if (client_fd < 0) 214 return client_fd; 215 216 /* 217 * we need a new file handle for the userspace program so it can read even if it was 218 * originally opened O_WRONLY. 219 */ 220 new_file = dentry_open(path, 221 group->fanotify_data.f_flags | FMODE_NONOTIFY, 222 current_cred()); 223 if (IS_ERR(new_file)) { 224 /* 225 * we still send an event even if we can't open the file. this 226 * can happen when say tasks are gone and we try to open their 227 * /proc files or we try to open a WRONLY file like in sysfs 228 * we just send the errno to userspace since there isn't much 229 * else we can do. 230 */ 231 put_unused_fd(client_fd); 232 client_fd = PTR_ERR(new_file); 233 } else { 234 *file = new_file; 235 } 236 237 return client_fd; 238 } 239 240 /* 241 * Finish processing of permission event by setting it to ANSWERED state and 242 * drop group->notification_lock. 243 */ 244 static void finish_permission_event(struct fsnotify_group *group, 245 struct fanotify_perm_event *event, 246 unsigned int response) 247 __releases(&group->notification_lock) 248 { 249 bool destroy = false; 250 251 assert_spin_locked(&group->notification_lock); 252 event->response = response; 253 if (event->state == FAN_EVENT_CANCELED) 254 destroy = true; 255 else 256 event->state = FAN_EVENT_ANSWERED; 257 spin_unlock(&group->notification_lock); 258 if (destroy) 259 fsnotify_destroy_event(group, &event->fae.fse); 260 } 261 262 static int process_access_response(struct fsnotify_group *group, 263 struct fanotify_response *response_struct) 264 { 265 struct fanotify_perm_event *event; 266 int fd = response_struct->fd; 267 int response = response_struct->response; 268 269 pr_debug("%s: group=%p fd=%d response=%d\n", __func__, group, 270 fd, response); 271 /* 272 * make sure the response is valid, if invalid we do nothing and either 273 * userspace can send a valid response or we will clean it up after the 274 * timeout 275 */ 276 switch (response & ~FAN_AUDIT) { 277 case FAN_ALLOW: 278 case FAN_DENY: 279 break; 280 default: 281 return -EINVAL; 282 } 283 284 if (fd < 0) 285 return -EINVAL; 286 287 if ((response & FAN_AUDIT) && !FAN_GROUP_FLAG(group, FAN_ENABLE_AUDIT)) 288 return -EINVAL; 289 290 spin_lock(&group->notification_lock); 291 list_for_each_entry(event, &group->fanotify_data.access_list, 292 fae.fse.list) { 293 if (event->fd != fd) 294 continue; 295 296 list_del_init(&event->fae.fse.list); 297 finish_permission_event(group, event, response); 298 wake_up(&group->fanotify_data.access_waitq); 299 return 0; 300 } 301 spin_unlock(&group->notification_lock); 302 303 return -ENOENT; 304 } 305 306 static int copy_info_to_user(__kernel_fsid_t *fsid, struct fanotify_fh *fh, 307 int info_type, const char *name, size_t name_len, 308 char __user *buf, size_t count) 309 { 310 struct fanotify_event_info_fid info = { }; 311 struct file_handle handle = { }; 312 unsigned char bounce[FANOTIFY_INLINE_FH_LEN], *fh_buf; 313 size_t fh_len = fh ? fh->len : 0; 314 size_t info_len = fanotify_fid_info_len(fh_len, name_len); 315 size_t len = info_len; 316 317 pr_debug("%s: fh_len=%zu name_len=%zu, info_len=%zu, count=%zu\n", 318 __func__, fh_len, name_len, info_len, count); 319 320 if (!fh_len) 321 return 0; 322 323 if (WARN_ON_ONCE(len < sizeof(info) || len > count)) 324 return -EFAULT; 325 326 /* 327 * Copy event info fid header followed by variable sized file handle 328 * and optionally followed by variable sized filename. 329 */ 330 switch (info_type) { 331 case FAN_EVENT_INFO_TYPE_FID: 332 case FAN_EVENT_INFO_TYPE_DFID: 333 if (WARN_ON_ONCE(name_len)) 334 return -EFAULT; 335 break; 336 case FAN_EVENT_INFO_TYPE_DFID_NAME: 337 if (WARN_ON_ONCE(!name || !name_len)) 338 return -EFAULT; 339 break; 340 default: 341 return -EFAULT; 342 } 343 344 info.hdr.info_type = info_type; 345 info.hdr.len = len; 346 info.fsid = *fsid; 347 if (copy_to_user(buf, &info, sizeof(info))) 348 return -EFAULT; 349 350 buf += sizeof(info); 351 len -= sizeof(info); 352 if (WARN_ON_ONCE(len < sizeof(handle))) 353 return -EFAULT; 354 355 handle.handle_type = fh->type; 356 handle.handle_bytes = fh_len; 357 if (copy_to_user(buf, &handle, sizeof(handle))) 358 return -EFAULT; 359 360 buf += sizeof(handle); 361 len -= sizeof(handle); 362 if (WARN_ON_ONCE(len < fh_len)) 363 return -EFAULT; 364 365 /* 366 * For an inline fh and inline file name, copy through stack to exclude 367 * the copy from usercopy hardening protections. 368 */ 369 fh_buf = fanotify_fh_buf(fh); 370 if (fh_len <= FANOTIFY_INLINE_FH_LEN) { 371 memcpy(bounce, fh_buf, fh_len); 372 fh_buf = bounce; 373 } 374 if (copy_to_user(buf, fh_buf, fh_len)) 375 return -EFAULT; 376 377 buf += fh_len; 378 len -= fh_len; 379 380 if (name_len) { 381 /* Copy the filename with terminating null */ 382 name_len++; 383 if (WARN_ON_ONCE(len < name_len)) 384 return -EFAULT; 385 386 if (copy_to_user(buf, name, name_len)) 387 return -EFAULT; 388 389 buf += name_len; 390 len -= name_len; 391 } 392 393 /* Pad with 0's */ 394 WARN_ON_ONCE(len < 0 || len >= FANOTIFY_EVENT_ALIGN); 395 if (len > 0 && clear_user(buf, len)) 396 return -EFAULT; 397 398 return info_len; 399 } 400 401 static ssize_t copy_event_to_user(struct fsnotify_group *group, 402 struct fanotify_event *event, 403 char __user *buf, size_t count) 404 { 405 struct fanotify_event_metadata metadata; 406 struct path *path = fanotify_event_path(event); 407 struct fanotify_info *info = fanotify_event_info(event); 408 unsigned int fid_mode = FAN_GROUP_FLAG(group, FANOTIFY_FID_BITS); 409 struct file *f = NULL; 410 int ret, fd = FAN_NOFD; 411 int info_type = 0; 412 413 pr_debug("%s: group=%p event=%p\n", __func__, group, event); 414 415 metadata.event_len = FAN_EVENT_METADATA_LEN + 416 fanotify_event_info_len(fid_mode, event); 417 metadata.metadata_len = FAN_EVENT_METADATA_LEN; 418 metadata.vers = FANOTIFY_METADATA_VERSION; 419 metadata.reserved = 0; 420 metadata.mask = event->mask & FANOTIFY_OUTGOING_EVENTS; 421 metadata.pid = pid_vnr(event->pid); 422 /* 423 * For an unprivileged listener, event->pid can be used to identify the 424 * events generated by the listener process itself, without disclosing 425 * the pids of other processes. 426 */ 427 if (!capable(CAP_SYS_ADMIN) && 428 task_tgid(current) != event->pid) 429 metadata.pid = 0; 430 431 if (path && path->mnt && path->dentry) { 432 fd = create_fd(group, path, &f); 433 if (fd < 0) 434 return fd; 435 } 436 metadata.fd = fd; 437 438 ret = -EFAULT; 439 /* 440 * Sanity check copy size in case get_one_event() and 441 * event_len sizes ever get out of sync. 442 */ 443 if (WARN_ON_ONCE(metadata.event_len > count)) 444 goto out_close_fd; 445 446 if (copy_to_user(buf, &metadata, FAN_EVENT_METADATA_LEN)) 447 goto out_close_fd; 448 449 buf += FAN_EVENT_METADATA_LEN; 450 count -= FAN_EVENT_METADATA_LEN; 451 452 if (fanotify_is_perm_event(event->mask)) 453 FANOTIFY_PERM(event)->fd = fd; 454 455 if (f) 456 fd_install(fd, f); 457 458 /* Event info records order is: dir fid + name, child fid */ 459 if (fanotify_event_dir_fh_len(event)) { 460 info_type = info->name_len ? FAN_EVENT_INFO_TYPE_DFID_NAME : 461 FAN_EVENT_INFO_TYPE_DFID; 462 ret = copy_info_to_user(fanotify_event_fsid(event), 463 fanotify_info_dir_fh(info), 464 info_type, fanotify_info_name(info), 465 info->name_len, buf, count); 466 if (ret < 0) 467 return ret; 468 469 buf += ret; 470 count -= ret; 471 } 472 473 if (fanotify_event_object_fh_len(event)) { 474 const char *dot = NULL; 475 int dot_len = 0; 476 477 if (fid_mode == FAN_REPORT_FID || info_type) { 478 /* 479 * With only group flag FAN_REPORT_FID only type FID is 480 * reported. Second info record type is always FID. 481 */ 482 info_type = FAN_EVENT_INFO_TYPE_FID; 483 } else if ((fid_mode & FAN_REPORT_NAME) && 484 (event->mask & FAN_ONDIR)) { 485 /* 486 * With group flag FAN_REPORT_NAME, if name was not 487 * recorded in an event on a directory, report the 488 * name "." with info type DFID_NAME. 489 */ 490 info_type = FAN_EVENT_INFO_TYPE_DFID_NAME; 491 dot = "."; 492 dot_len = 1; 493 } else if ((event->mask & ALL_FSNOTIFY_DIRENT_EVENTS) || 494 (event->mask & FAN_ONDIR)) { 495 /* 496 * With group flag FAN_REPORT_DIR_FID, a single info 497 * record has type DFID for directory entry modification 498 * event and for event on a directory. 499 */ 500 info_type = FAN_EVENT_INFO_TYPE_DFID; 501 } else { 502 /* 503 * With group flags FAN_REPORT_DIR_FID|FAN_REPORT_FID, 504 * a single info record has type FID for event on a 505 * non-directory, when there is no directory to report. 506 * For example, on FAN_DELETE_SELF event. 507 */ 508 info_type = FAN_EVENT_INFO_TYPE_FID; 509 } 510 511 ret = copy_info_to_user(fanotify_event_fsid(event), 512 fanotify_event_object_fh(event), 513 info_type, dot, dot_len, buf, count); 514 if (ret < 0) 515 return ret; 516 517 buf += ret; 518 count -= ret; 519 } 520 521 return metadata.event_len; 522 523 out_close_fd: 524 if (fd != FAN_NOFD) { 525 put_unused_fd(fd); 526 fput(f); 527 } 528 return ret; 529 } 530 531 /* intofiy userspace file descriptor functions */ 532 static __poll_t fanotify_poll(struct file *file, poll_table *wait) 533 { 534 struct fsnotify_group *group = file->private_data; 535 __poll_t ret = 0; 536 537 poll_wait(file, &group->notification_waitq, wait); 538 spin_lock(&group->notification_lock); 539 if (!fsnotify_notify_queue_is_empty(group)) 540 ret = EPOLLIN | EPOLLRDNORM; 541 spin_unlock(&group->notification_lock); 542 543 return ret; 544 } 545 546 static ssize_t fanotify_read(struct file *file, char __user *buf, 547 size_t count, loff_t *pos) 548 { 549 struct fsnotify_group *group; 550 struct fanotify_event *event; 551 char __user *start; 552 int ret; 553 DEFINE_WAIT_FUNC(wait, woken_wake_function); 554 555 start = buf; 556 group = file->private_data; 557 558 pr_debug("%s: group=%p\n", __func__, group); 559 560 add_wait_queue(&group->notification_waitq, &wait); 561 while (1) { 562 /* 563 * User can supply arbitrarily large buffer. Avoid softlockups 564 * in case there are lots of available events. 565 */ 566 cond_resched(); 567 event = get_one_event(group, count); 568 if (IS_ERR(event)) { 569 ret = PTR_ERR(event); 570 break; 571 } 572 573 if (!event) { 574 ret = -EAGAIN; 575 if (file->f_flags & O_NONBLOCK) 576 break; 577 578 ret = -ERESTARTSYS; 579 if (signal_pending(current)) 580 break; 581 582 if (start != buf) 583 break; 584 585 wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); 586 continue; 587 } 588 589 ret = copy_event_to_user(group, event, buf, count); 590 if (unlikely(ret == -EOPENSTALE)) { 591 /* 592 * We cannot report events with stale fd so drop it. 593 * Setting ret to 0 will continue the event loop and 594 * do the right thing if there are no more events to 595 * read (i.e. return bytes read, -EAGAIN or wait). 596 */ 597 ret = 0; 598 } 599 600 /* 601 * Permission events get queued to wait for response. Other 602 * events can be destroyed now. 603 */ 604 if (!fanotify_is_perm_event(event->mask)) { 605 fsnotify_destroy_event(group, &event->fse); 606 } else { 607 if (ret <= 0) { 608 spin_lock(&group->notification_lock); 609 finish_permission_event(group, 610 FANOTIFY_PERM(event), FAN_DENY); 611 wake_up(&group->fanotify_data.access_waitq); 612 } else { 613 spin_lock(&group->notification_lock); 614 list_add_tail(&event->fse.list, 615 &group->fanotify_data.access_list); 616 spin_unlock(&group->notification_lock); 617 } 618 } 619 if (ret < 0) 620 break; 621 buf += ret; 622 count -= ret; 623 } 624 remove_wait_queue(&group->notification_waitq, &wait); 625 626 if (start != buf && ret != -EFAULT) 627 ret = buf - start; 628 return ret; 629 } 630 631 static ssize_t fanotify_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) 632 { 633 struct fanotify_response response = { .fd = -1, .response = -1 }; 634 struct fsnotify_group *group; 635 int ret; 636 637 if (!IS_ENABLED(CONFIG_FANOTIFY_ACCESS_PERMISSIONS)) 638 return -EINVAL; 639 640 group = file->private_data; 641 642 if (count < sizeof(response)) 643 return -EINVAL; 644 645 count = sizeof(response); 646 647 pr_debug("%s: group=%p count=%zu\n", __func__, group, count); 648 649 if (copy_from_user(&response, buf, count)) 650 return -EFAULT; 651 652 ret = process_access_response(group, &response); 653 if (ret < 0) 654 count = ret; 655 656 return count; 657 } 658 659 static int fanotify_release(struct inode *ignored, struct file *file) 660 { 661 struct fsnotify_group *group = file->private_data; 662 struct fsnotify_event *fsn_event; 663 664 /* 665 * Stop new events from arriving in the notification queue. since 666 * userspace cannot use fanotify fd anymore, no event can enter or 667 * leave access_list by now either. 668 */ 669 fsnotify_group_stop_queueing(group); 670 671 /* 672 * Process all permission events on access_list and notification queue 673 * and simulate reply from userspace. 674 */ 675 spin_lock(&group->notification_lock); 676 while (!list_empty(&group->fanotify_data.access_list)) { 677 struct fanotify_perm_event *event; 678 679 event = list_first_entry(&group->fanotify_data.access_list, 680 struct fanotify_perm_event, fae.fse.list); 681 list_del_init(&event->fae.fse.list); 682 finish_permission_event(group, event, FAN_ALLOW); 683 spin_lock(&group->notification_lock); 684 } 685 686 /* 687 * Destroy all non-permission events. For permission events just 688 * dequeue them and set the response. They will be freed once the 689 * response is consumed and fanotify_get_response() returns. 690 */ 691 while ((fsn_event = fsnotify_remove_first_event(group))) { 692 struct fanotify_event *event = FANOTIFY_E(fsn_event); 693 694 if (!(event->mask & FANOTIFY_PERM_EVENTS)) { 695 spin_unlock(&group->notification_lock); 696 fsnotify_destroy_event(group, fsn_event); 697 } else { 698 finish_permission_event(group, FANOTIFY_PERM(event), 699 FAN_ALLOW); 700 } 701 spin_lock(&group->notification_lock); 702 } 703 spin_unlock(&group->notification_lock); 704 705 /* Response for all permission events it set, wakeup waiters */ 706 wake_up(&group->fanotify_data.access_waitq); 707 708 /* matches the fanotify_init->fsnotify_alloc_group */ 709 fsnotify_destroy_group(group); 710 711 return 0; 712 } 713 714 static long fanotify_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 715 { 716 struct fsnotify_group *group; 717 struct fsnotify_event *fsn_event; 718 void __user *p; 719 int ret = -ENOTTY; 720 size_t send_len = 0; 721 722 group = file->private_data; 723 724 p = (void __user *) arg; 725 726 switch (cmd) { 727 case FIONREAD: 728 spin_lock(&group->notification_lock); 729 list_for_each_entry(fsn_event, &group->notification_list, list) 730 send_len += FAN_EVENT_METADATA_LEN; 731 spin_unlock(&group->notification_lock); 732 ret = put_user(send_len, (int __user *) p); 733 break; 734 } 735 736 return ret; 737 } 738 739 static const struct file_operations fanotify_fops = { 740 .show_fdinfo = fanotify_show_fdinfo, 741 .poll = fanotify_poll, 742 .read = fanotify_read, 743 .write = fanotify_write, 744 .fasync = NULL, 745 .release = fanotify_release, 746 .unlocked_ioctl = fanotify_ioctl, 747 .compat_ioctl = compat_ptr_ioctl, 748 .llseek = noop_llseek, 749 }; 750 751 static int fanotify_find_path(int dfd, const char __user *filename, 752 struct path *path, unsigned int flags, __u64 mask, 753 unsigned int obj_type) 754 { 755 int ret; 756 757 pr_debug("%s: dfd=%d filename=%p flags=%x\n", __func__, 758 dfd, filename, flags); 759 760 if (filename == NULL) { 761 struct fd f = fdget(dfd); 762 763 ret = -EBADF; 764 if (!f.file) 765 goto out; 766 767 ret = -ENOTDIR; 768 if ((flags & FAN_MARK_ONLYDIR) && 769 !(S_ISDIR(file_inode(f.file)->i_mode))) { 770 fdput(f); 771 goto out; 772 } 773 774 *path = f.file->f_path; 775 path_get(path); 776 fdput(f); 777 } else { 778 unsigned int lookup_flags = 0; 779 780 if (!(flags & FAN_MARK_DONT_FOLLOW)) 781 lookup_flags |= LOOKUP_FOLLOW; 782 if (flags & FAN_MARK_ONLYDIR) 783 lookup_flags |= LOOKUP_DIRECTORY; 784 785 ret = user_path_at(dfd, filename, lookup_flags, path); 786 if (ret) 787 goto out; 788 } 789 790 /* you can only watch an inode if you have read permissions on it */ 791 ret = path_permission(path, MAY_READ); 792 if (ret) { 793 path_put(path); 794 goto out; 795 } 796 797 ret = security_path_notify(path, mask, obj_type); 798 if (ret) 799 path_put(path); 800 801 out: 802 return ret; 803 } 804 805 static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark, 806 __u32 mask, unsigned int flags, 807 __u32 umask, int *destroy) 808 { 809 __u32 oldmask = 0; 810 811 /* umask bits cannot be removed by user */ 812 mask &= ~umask; 813 spin_lock(&fsn_mark->lock); 814 if (!(flags & FAN_MARK_IGNORED_MASK)) { 815 oldmask = fsn_mark->mask; 816 fsn_mark->mask &= ~mask; 817 } else { 818 fsn_mark->ignored_mask &= ~mask; 819 } 820 /* 821 * We need to keep the mark around even if remaining mask cannot 822 * result in any events (e.g. mask == FAN_ONDIR) to support incremenal 823 * changes to the mask. 824 * Destroy mark when only umask bits remain. 825 */ 826 *destroy = !((fsn_mark->mask | fsn_mark->ignored_mask) & ~umask); 827 spin_unlock(&fsn_mark->lock); 828 829 return mask & oldmask; 830 } 831 832 static int fanotify_remove_mark(struct fsnotify_group *group, 833 fsnotify_connp_t *connp, __u32 mask, 834 unsigned int flags, __u32 umask) 835 { 836 struct fsnotify_mark *fsn_mark = NULL; 837 __u32 removed; 838 int destroy_mark; 839 840 mutex_lock(&group->mark_mutex); 841 fsn_mark = fsnotify_find_mark(connp, group); 842 if (!fsn_mark) { 843 mutex_unlock(&group->mark_mutex); 844 return -ENOENT; 845 } 846 847 removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags, 848 umask, &destroy_mark); 849 if (removed & fsnotify_conn_mask(fsn_mark->connector)) 850 fsnotify_recalc_mask(fsn_mark->connector); 851 if (destroy_mark) 852 fsnotify_detach_mark(fsn_mark); 853 mutex_unlock(&group->mark_mutex); 854 if (destroy_mark) 855 fsnotify_free_mark(fsn_mark); 856 857 /* matches the fsnotify_find_mark() */ 858 fsnotify_put_mark(fsn_mark); 859 return 0; 860 } 861 862 static int fanotify_remove_vfsmount_mark(struct fsnotify_group *group, 863 struct vfsmount *mnt, __u32 mask, 864 unsigned int flags, __u32 umask) 865 { 866 return fanotify_remove_mark(group, &real_mount(mnt)->mnt_fsnotify_marks, 867 mask, flags, umask); 868 } 869 870 static int fanotify_remove_sb_mark(struct fsnotify_group *group, 871 struct super_block *sb, __u32 mask, 872 unsigned int flags, __u32 umask) 873 { 874 return fanotify_remove_mark(group, &sb->s_fsnotify_marks, mask, 875 flags, umask); 876 } 877 878 static int fanotify_remove_inode_mark(struct fsnotify_group *group, 879 struct inode *inode, __u32 mask, 880 unsigned int flags, __u32 umask) 881 { 882 return fanotify_remove_mark(group, &inode->i_fsnotify_marks, mask, 883 flags, umask); 884 } 885 886 static __u32 fanotify_mark_add_to_mask(struct fsnotify_mark *fsn_mark, 887 __u32 mask, 888 unsigned int flags) 889 { 890 __u32 oldmask = -1; 891 892 spin_lock(&fsn_mark->lock); 893 if (!(flags & FAN_MARK_IGNORED_MASK)) { 894 oldmask = fsn_mark->mask; 895 fsn_mark->mask |= mask; 896 } else { 897 fsn_mark->ignored_mask |= mask; 898 if (flags & FAN_MARK_IGNORED_SURV_MODIFY) 899 fsn_mark->flags |= FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY; 900 } 901 spin_unlock(&fsn_mark->lock); 902 903 return mask & ~oldmask; 904 } 905 906 static struct fsnotify_mark *fanotify_add_new_mark(struct fsnotify_group *group, 907 fsnotify_connp_t *connp, 908 unsigned int type, 909 __kernel_fsid_t *fsid) 910 { 911 struct ucounts *ucounts = group->fanotify_data.ucounts; 912 struct fsnotify_mark *mark; 913 int ret; 914 915 /* 916 * Enforce per user marks limits per user in all containing user ns. 917 * A group with FAN_UNLIMITED_MARKS does not contribute to mark count 918 * in the limited groups account. 919 */ 920 if (!FAN_GROUP_FLAG(group, FAN_UNLIMITED_MARKS) && 921 !inc_ucount(ucounts->ns, ucounts->uid, UCOUNT_FANOTIFY_MARKS)) 922 return ERR_PTR(-ENOSPC); 923 924 mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL); 925 if (!mark) { 926 ret = -ENOMEM; 927 goto out_dec_ucounts; 928 } 929 930 fsnotify_init_mark(mark, group); 931 ret = fsnotify_add_mark_locked(mark, connp, type, 0, fsid); 932 if (ret) { 933 fsnotify_put_mark(mark); 934 goto out_dec_ucounts; 935 } 936 937 return mark; 938 939 out_dec_ucounts: 940 if (!FAN_GROUP_FLAG(group, FAN_UNLIMITED_MARKS)) 941 dec_ucount(ucounts, UCOUNT_FANOTIFY_MARKS); 942 return ERR_PTR(ret); 943 } 944 945 946 static int fanotify_add_mark(struct fsnotify_group *group, 947 fsnotify_connp_t *connp, unsigned int type, 948 __u32 mask, unsigned int flags, 949 __kernel_fsid_t *fsid) 950 { 951 struct fsnotify_mark *fsn_mark; 952 __u32 added; 953 954 mutex_lock(&group->mark_mutex); 955 fsn_mark = fsnotify_find_mark(connp, group); 956 if (!fsn_mark) { 957 fsn_mark = fanotify_add_new_mark(group, connp, type, fsid); 958 if (IS_ERR(fsn_mark)) { 959 mutex_unlock(&group->mark_mutex); 960 return PTR_ERR(fsn_mark); 961 } 962 } 963 added = fanotify_mark_add_to_mask(fsn_mark, mask, flags); 964 if (added & ~fsnotify_conn_mask(fsn_mark->connector)) 965 fsnotify_recalc_mask(fsn_mark->connector); 966 mutex_unlock(&group->mark_mutex); 967 968 fsnotify_put_mark(fsn_mark); 969 return 0; 970 } 971 972 static int fanotify_add_vfsmount_mark(struct fsnotify_group *group, 973 struct vfsmount *mnt, __u32 mask, 974 unsigned int flags, __kernel_fsid_t *fsid) 975 { 976 return fanotify_add_mark(group, &real_mount(mnt)->mnt_fsnotify_marks, 977 FSNOTIFY_OBJ_TYPE_VFSMOUNT, mask, flags, fsid); 978 } 979 980 static int fanotify_add_sb_mark(struct fsnotify_group *group, 981 struct super_block *sb, __u32 mask, 982 unsigned int flags, __kernel_fsid_t *fsid) 983 { 984 return fanotify_add_mark(group, &sb->s_fsnotify_marks, 985 FSNOTIFY_OBJ_TYPE_SB, mask, flags, fsid); 986 } 987 988 static int fanotify_add_inode_mark(struct fsnotify_group *group, 989 struct inode *inode, __u32 mask, 990 unsigned int flags, __kernel_fsid_t *fsid) 991 { 992 pr_debug("%s: group=%p inode=%p\n", __func__, group, inode); 993 994 /* 995 * If some other task has this inode open for write we should not add 996 * an ignored mark, unless that ignored mark is supposed to survive 997 * modification changes anyway. 998 */ 999 if ((flags & FAN_MARK_IGNORED_MASK) && 1000 !(flags & FAN_MARK_IGNORED_SURV_MODIFY) && 1001 inode_is_open_for_write(inode)) 1002 return 0; 1003 1004 return fanotify_add_mark(group, &inode->i_fsnotify_marks, 1005 FSNOTIFY_OBJ_TYPE_INODE, mask, flags, fsid); 1006 } 1007 1008 static struct fsnotify_event *fanotify_alloc_overflow_event(void) 1009 { 1010 struct fanotify_event *oevent; 1011 1012 oevent = kmalloc(sizeof(*oevent), GFP_KERNEL_ACCOUNT); 1013 if (!oevent) 1014 return NULL; 1015 1016 fanotify_init_event(oevent, 0, FS_Q_OVERFLOW); 1017 oevent->type = FANOTIFY_EVENT_TYPE_OVERFLOW; 1018 1019 return &oevent->fse; 1020 } 1021 1022 static struct hlist_head *fanotify_alloc_merge_hash(void) 1023 { 1024 struct hlist_head *hash; 1025 1026 hash = kmalloc(sizeof(struct hlist_head) << FANOTIFY_HTABLE_BITS, 1027 GFP_KERNEL_ACCOUNT); 1028 if (!hash) 1029 return NULL; 1030 1031 __hash_init(hash, FANOTIFY_HTABLE_SIZE); 1032 1033 return hash; 1034 } 1035 1036 /* fanotify syscalls */ 1037 SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags) 1038 { 1039 struct fsnotify_group *group; 1040 int f_flags, fd; 1041 unsigned int fid_mode = flags & FANOTIFY_FID_BITS; 1042 unsigned int class = flags & FANOTIFY_CLASS_BITS; 1043 1044 pr_debug("%s: flags=%x event_f_flags=%x\n", 1045 __func__, flags, event_f_flags); 1046 1047 if (!capable(CAP_SYS_ADMIN)) { 1048 /* 1049 * An unprivileged user can setup an fanotify group with 1050 * limited functionality - an unprivileged group is limited to 1051 * notification events with file handles and it cannot use 1052 * unlimited queue/marks. 1053 */ 1054 if ((flags & FANOTIFY_ADMIN_INIT_FLAGS) || !fid_mode) 1055 return -EPERM; 1056 } 1057 1058 #ifdef CONFIG_AUDITSYSCALL 1059 if (flags & ~(FANOTIFY_INIT_FLAGS | FAN_ENABLE_AUDIT)) 1060 #else 1061 if (flags & ~FANOTIFY_INIT_FLAGS) 1062 #endif 1063 return -EINVAL; 1064 1065 if (event_f_flags & ~FANOTIFY_INIT_ALL_EVENT_F_BITS) 1066 return -EINVAL; 1067 1068 switch (event_f_flags & O_ACCMODE) { 1069 case O_RDONLY: 1070 case O_RDWR: 1071 case O_WRONLY: 1072 break; 1073 default: 1074 return -EINVAL; 1075 } 1076 1077 if (fid_mode && class != FAN_CLASS_NOTIF) 1078 return -EINVAL; 1079 1080 /* 1081 * Child name is reported with parent fid so requires dir fid. 1082 * We can report both child fid and dir fid with or without name. 1083 */ 1084 if ((fid_mode & FAN_REPORT_NAME) && !(fid_mode & FAN_REPORT_DIR_FID)) 1085 return -EINVAL; 1086 1087 f_flags = O_RDWR | FMODE_NONOTIFY; 1088 if (flags & FAN_CLOEXEC) 1089 f_flags |= O_CLOEXEC; 1090 if (flags & FAN_NONBLOCK) 1091 f_flags |= O_NONBLOCK; 1092 1093 /* fsnotify_alloc_group takes a ref. Dropped in fanotify_release */ 1094 group = fsnotify_alloc_user_group(&fanotify_fsnotify_ops); 1095 if (IS_ERR(group)) { 1096 return PTR_ERR(group); 1097 } 1098 1099 /* Enforce groups limits per user in all containing user ns */ 1100 group->fanotify_data.ucounts = inc_ucount(current_user_ns(), 1101 current_euid(), 1102 UCOUNT_FANOTIFY_GROUPS); 1103 if (!group->fanotify_data.ucounts) { 1104 fd = -EMFILE; 1105 goto out_destroy_group; 1106 } 1107 1108 group->fanotify_data.flags = flags; 1109 group->memcg = get_mem_cgroup_from_mm(current->mm); 1110 1111 group->fanotify_data.merge_hash = fanotify_alloc_merge_hash(); 1112 if (!group->fanotify_data.merge_hash) { 1113 fd = -ENOMEM; 1114 goto out_destroy_group; 1115 } 1116 1117 group->overflow_event = fanotify_alloc_overflow_event(); 1118 if (unlikely(!group->overflow_event)) { 1119 fd = -ENOMEM; 1120 goto out_destroy_group; 1121 } 1122 1123 if (force_o_largefile()) 1124 event_f_flags |= O_LARGEFILE; 1125 group->fanotify_data.f_flags = event_f_flags; 1126 init_waitqueue_head(&group->fanotify_data.access_waitq); 1127 INIT_LIST_HEAD(&group->fanotify_data.access_list); 1128 switch (class) { 1129 case FAN_CLASS_NOTIF: 1130 group->priority = FS_PRIO_0; 1131 break; 1132 case FAN_CLASS_CONTENT: 1133 group->priority = FS_PRIO_1; 1134 break; 1135 case FAN_CLASS_PRE_CONTENT: 1136 group->priority = FS_PRIO_2; 1137 break; 1138 default: 1139 fd = -EINVAL; 1140 goto out_destroy_group; 1141 } 1142 1143 if (flags & FAN_UNLIMITED_QUEUE) { 1144 fd = -EPERM; 1145 if (!capable(CAP_SYS_ADMIN)) 1146 goto out_destroy_group; 1147 group->max_events = UINT_MAX; 1148 } else { 1149 group->max_events = fanotify_max_queued_events; 1150 } 1151 1152 if (flags & FAN_UNLIMITED_MARKS) { 1153 fd = -EPERM; 1154 if (!capable(CAP_SYS_ADMIN)) 1155 goto out_destroy_group; 1156 } 1157 1158 if (flags & FAN_ENABLE_AUDIT) { 1159 fd = -EPERM; 1160 if (!capable(CAP_AUDIT_WRITE)) 1161 goto out_destroy_group; 1162 } 1163 1164 fd = anon_inode_getfd("[fanotify]", &fanotify_fops, group, f_flags); 1165 if (fd < 0) 1166 goto out_destroy_group; 1167 1168 return fd; 1169 1170 out_destroy_group: 1171 fsnotify_destroy_group(group); 1172 return fd; 1173 } 1174 1175 /* Check if filesystem can encode a unique fid */ 1176 static int fanotify_test_fid(struct path *path, __kernel_fsid_t *fsid) 1177 { 1178 __kernel_fsid_t root_fsid; 1179 int err; 1180 1181 /* 1182 * Make sure path is not in filesystem with zero fsid (e.g. tmpfs). 1183 */ 1184 err = vfs_get_fsid(path->dentry, fsid); 1185 if (err) 1186 return err; 1187 1188 if (!fsid->val[0] && !fsid->val[1]) 1189 return -ENODEV; 1190 1191 /* 1192 * Make sure path is not inside a filesystem subvolume (e.g. btrfs) 1193 * which uses a different fsid than sb root. 1194 */ 1195 err = vfs_get_fsid(path->dentry->d_sb->s_root, &root_fsid); 1196 if (err) 1197 return err; 1198 1199 if (root_fsid.val[0] != fsid->val[0] || 1200 root_fsid.val[1] != fsid->val[1]) 1201 return -EXDEV; 1202 1203 /* 1204 * We need to make sure that the file system supports at least 1205 * encoding a file handle so user can use name_to_handle_at() to 1206 * compare fid returned with event to the file handle of watched 1207 * objects. However, name_to_handle_at() requires that the 1208 * filesystem also supports decoding file handles. 1209 */ 1210 if (!path->dentry->d_sb->s_export_op || 1211 !path->dentry->d_sb->s_export_op->fh_to_dentry) 1212 return -EOPNOTSUPP; 1213 1214 return 0; 1215 } 1216 1217 static int fanotify_events_supported(struct path *path, __u64 mask) 1218 { 1219 /* 1220 * Some filesystems such as 'proc' acquire unusual locks when opening 1221 * files. For them fanotify permission events have high chances of 1222 * deadlocking the system - open done when reporting fanotify event 1223 * blocks on this "unusual" lock while another process holding the lock 1224 * waits for fanotify permission event to be answered. Just disallow 1225 * permission events for such filesystems. 1226 */ 1227 if (mask & FANOTIFY_PERM_EVENTS && 1228 path->mnt->mnt_sb->s_type->fs_flags & FS_DISALLOW_NOTIFY_PERM) 1229 return -EINVAL; 1230 return 0; 1231 } 1232 1233 static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask, 1234 int dfd, const char __user *pathname) 1235 { 1236 struct inode *inode = NULL; 1237 struct vfsmount *mnt = NULL; 1238 struct fsnotify_group *group; 1239 struct fd f; 1240 struct path path; 1241 __kernel_fsid_t __fsid, *fsid = NULL; 1242 u32 valid_mask = FANOTIFY_EVENTS | FANOTIFY_EVENT_FLAGS; 1243 unsigned int mark_type = flags & FANOTIFY_MARK_TYPE_BITS; 1244 bool ignored = flags & FAN_MARK_IGNORED_MASK; 1245 unsigned int obj_type, fid_mode; 1246 u32 umask = 0; 1247 int ret; 1248 1249 pr_debug("%s: fanotify_fd=%d flags=%x dfd=%d pathname=%p mask=%llx\n", 1250 __func__, fanotify_fd, flags, dfd, pathname, mask); 1251 1252 /* we only use the lower 32 bits as of right now. */ 1253 if (upper_32_bits(mask)) 1254 return -EINVAL; 1255 1256 if (flags & ~FANOTIFY_MARK_FLAGS) 1257 return -EINVAL; 1258 1259 switch (mark_type) { 1260 case FAN_MARK_INODE: 1261 obj_type = FSNOTIFY_OBJ_TYPE_INODE; 1262 break; 1263 case FAN_MARK_MOUNT: 1264 obj_type = FSNOTIFY_OBJ_TYPE_VFSMOUNT; 1265 break; 1266 case FAN_MARK_FILESYSTEM: 1267 obj_type = FSNOTIFY_OBJ_TYPE_SB; 1268 break; 1269 default: 1270 return -EINVAL; 1271 } 1272 1273 switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) { 1274 case FAN_MARK_ADD: 1275 case FAN_MARK_REMOVE: 1276 if (!mask) 1277 return -EINVAL; 1278 break; 1279 case FAN_MARK_FLUSH: 1280 if (flags & ~(FANOTIFY_MARK_TYPE_BITS | FAN_MARK_FLUSH)) 1281 return -EINVAL; 1282 break; 1283 default: 1284 return -EINVAL; 1285 } 1286 1287 if (IS_ENABLED(CONFIG_FANOTIFY_ACCESS_PERMISSIONS)) 1288 valid_mask |= FANOTIFY_PERM_EVENTS; 1289 1290 if (mask & ~valid_mask) 1291 return -EINVAL; 1292 1293 /* Event flags (ONDIR, ON_CHILD) are meaningless in ignored mask */ 1294 if (ignored) 1295 mask &= ~FANOTIFY_EVENT_FLAGS; 1296 1297 f = fdget(fanotify_fd); 1298 if (unlikely(!f.file)) 1299 return -EBADF; 1300 1301 /* verify that this is indeed an fanotify instance */ 1302 ret = -EINVAL; 1303 if (unlikely(f.file->f_op != &fanotify_fops)) 1304 goto fput_and_out; 1305 group = f.file->private_data; 1306 1307 /* 1308 * An unprivileged user is not allowed to watch a mount point nor 1309 * a filesystem. 1310 */ 1311 ret = -EPERM; 1312 if (!capable(CAP_SYS_ADMIN) && 1313 mark_type != FAN_MARK_INODE) 1314 goto fput_and_out; 1315 1316 /* 1317 * group->priority == FS_PRIO_0 == FAN_CLASS_NOTIF. These are not 1318 * allowed to set permissions events. 1319 */ 1320 ret = -EINVAL; 1321 if (mask & FANOTIFY_PERM_EVENTS && 1322 group->priority == FS_PRIO_0) 1323 goto fput_and_out; 1324 1325 /* 1326 * Events with data type inode do not carry enough information to report 1327 * event->fd, so we do not allow setting a mask for inode events unless 1328 * group supports reporting fid. 1329 * inode events are not supported on a mount mark, because they do not 1330 * carry enough information (i.e. path) to be filtered by mount point. 1331 */ 1332 fid_mode = FAN_GROUP_FLAG(group, FANOTIFY_FID_BITS); 1333 if (mask & FANOTIFY_INODE_EVENTS && 1334 (!fid_mode || mark_type == FAN_MARK_MOUNT)) 1335 goto fput_and_out; 1336 1337 if (flags & FAN_MARK_FLUSH) { 1338 ret = 0; 1339 if (mark_type == FAN_MARK_MOUNT) 1340 fsnotify_clear_vfsmount_marks_by_group(group); 1341 else if (mark_type == FAN_MARK_FILESYSTEM) 1342 fsnotify_clear_sb_marks_by_group(group); 1343 else 1344 fsnotify_clear_inode_marks_by_group(group); 1345 goto fput_and_out; 1346 } 1347 1348 ret = fanotify_find_path(dfd, pathname, &path, flags, 1349 (mask & ALL_FSNOTIFY_EVENTS), obj_type); 1350 if (ret) 1351 goto fput_and_out; 1352 1353 if (flags & FAN_MARK_ADD) { 1354 ret = fanotify_events_supported(&path, mask); 1355 if (ret) 1356 goto path_put_and_out; 1357 } 1358 1359 if (fid_mode) { 1360 ret = fanotify_test_fid(&path, &__fsid); 1361 if (ret) 1362 goto path_put_and_out; 1363 1364 fsid = &__fsid; 1365 } 1366 1367 /* inode held in place by reference to path; group by fget on fd */ 1368 if (mark_type == FAN_MARK_INODE) 1369 inode = path.dentry->d_inode; 1370 else 1371 mnt = path.mnt; 1372 1373 /* Mask out FAN_EVENT_ON_CHILD flag for sb/mount/non-dir marks */ 1374 if (mnt || !S_ISDIR(inode->i_mode)) { 1375 mask &= ~FAN_EVENT_ON_CHILD; 1376 umask = FAN_EVENT_ON_CHILD; 1377 /* 1378 * If group needs to report parent fid, register for getting 1379 * events with parent/name info for non-directory. 1380 */ 1381 if ((fid_mode & FAN_REPORT_DIR_FID) && 1382 (flags & FAN_MARK_ADD) && !ignored) 1383 mask |= FAN_EVENT_ON_CHILD; 1384 } 1385 1386 /* create/update an inode mark */ 1387 switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE)) { 1388 case FAN_MARK_ADD: 1389 if (mark_type == FAN_MARK_MOUNT) 1390 ret = fanotify_add_vfsmount_mark(group, mnt, mask, 1391 flags, fsid); 1392 else if (mark_type == FAN_MARK_FILESYSTEM) 1393 ret = fanotify_add_sb_mark(group, mnt->mnt_sb, mask, 1394 flags, fsid); 1395 else 1396 ret = fanotify_add_inode_mark(group, inode, mask, 1397 flags, fsid); 1398 break; 1399 case FAN_MARK_REMOVE: 1400 if (mark_type == FAN_MARK_MOUNT) 1401 ret = fanotify_remove_vfsmount_mark(group, mnt, mask, 1402 flags, umask); 1403 else if (mark_type == FAN_MARK_FILESYSTEM) 1404 ret = fanotify_remove_sb_mark(group, mnt->mnt_sb, mask, 1405 flags, umask); 1406 else 1407 ret = fanotify_remove_inode_mark(group, inode, mask, 1408 flags, umask); 1409 break; 1410 default: 1411 ret = -EINVAL; 1412 } 1413 1414 path_put_and_out: 1415 path_put(&path); 1416 fput_and_out: 1417 fdput(f); 1418 return ret; 1419 } 1420 1421 #ifndef CONFIG_ARCH_SPLIT_ARG64 1422 SYSCALL_DEFINE5(fanotify_mark, int, fanotify_fd, unsigned int, flags, 1423 __u64, mask, int, dfd, 1424 const char __user *, pathname) 1425 { 1426 return do_fanotify_mark(fanotify_fd, flags, mask, dfd, pathname); 1427 } 1428 #endif 1429 1430 #if defined(CONFIG_ARCH_SPLIT_ARG64) || defined(CONFIG_COMPAT) 1431 SYSCALL32_DEFINE6(fanotify_mark, 1432 int, fanotify_fd, unsigned int, flags, 1433 SC_ARG64(mask), int, dfd, 1434 const char __user *, pathname) 1435 { 1436 return do_fanotify_mark(fanotify_fd, flags, SC_VAL64(__u64, mask), 1437 dfd, pathname); 1438 } 1439 #endif 1440 1441 /* 1442 * fanotify_user_setup - Our initialization function. Note that we cannot return 1443 * error because we have compiled-in VFS hooks. So an (unlikely) failure here 1444 * must result in panic(). 1445 */ 1446 static int __init fanotify_user_setup(void) 1447 { 1448 struct sysinfo si; 1449 int max_marks; 1450 1451 si_meminfo(&si); 1452 /* 1453 * Allow up to 1% of addressable memory to be accounted for per user 1454 * marks limited to the range [8192, 1048576]. mount and sb marks are 1455 * a lot cheaper than inode marks, but there is no reason for a user 1456 * to have many of those, so calculate by the cost of inode marks. 1457 */ 1458 max_marks = (((si.totalram - si.totalhigh) / 100) << PAGE_SHIFT) / 1459 INODE_MARK_COST; 1460 max_marks = clamp(max_marks, FANOTIFY_OLD_DEFAULT_MAX_MARKS, 1461 FANOTIFY_DEFAULT_MAX_USER_MARKS); 1462 1463 BUILD_BUG_ON(HWEIGHT32(FANOTIFY_INIT_FLAGS) != 10); 1464 BUILD_BUG_ON(HWEIGHT32(FANOTIFY_MARK_FLAGS) != 9); 1465 1466 fanotify_mark_cache = KMEM_CACHE(fsnotify_mark, 1467 SLAB_PANIC|SLAB_ACCOUNT); 1468 fanotify_fid_event_cachep = KMEM_CACHE(fanotify_fid_event, 1469 SLAB_PANIC); 1470 fanotify_path_event_cachep = KMEM_CACHE(fanotify_path_event, 1471 SLAB_PANIC); 1472 if (IS_ENABLED(CONFIG_FANOTIFY_ACCESS_PERMISSIONS)) { 1473 fanotify_perm_event_cachep = 1474 KMEM_CACHE(fanotify_perm_event, SLAB_PANIC); 1475 } 1476 1477 fanotify_max_queued_events = FANOTIFY_DEFAULT_MAX_EVENTS; 1478 init_user_ns.ucount_max[UCOUNT_FANOTIFY_GROUPS] = 1479 FANOTIFY_DEFAULT_MAX_GROUPS; 1480 init_user_ns.ucount_max[UCOUNT_FANOTIFY_MARKS] = max_marks; 1481 1482 return 0; 1483 } 1484 device_initcall(fanotify_user_setup); 1485