1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/fanotify.h> 3 #include <linux/fdtable.h> 4 #include <linux/fsnotify_backend.h> 5 #include <linux/init.h> 6 #include <linux/jiffies.h> 7 #include <linux/kernel.h> /* UINT_MAX */ 8 #include <linux/mount.h> 9 #include <linux/sched.h> 10 #include <linux/sched/user.h> 11 #include <linux/sched/signal.h> 12 #include <linux/types.h> 13 #include <linux/wait.h> 14 #include <linux/audit.h> 15 #include <linux/sched/mm.h> 16 #include <linux/statfs.h> 17 18 #include "fanotify.h" 19 20 static bool fanotify_path_equal(struct path *p1, struct path *p2) 21 { 22 return p1->mnt == p2->mnt && p1->dentry == p2->dentry; 23 } 24 25 static inline bool fanotify_fsid_equal(__kernel_fsid_t *fsid1, 26 __kernel_fsid_t *fsid2) 27 { 28 return fsid1->val[0] == fsid2->val[0] && fsid1->val[1] == fsid2->val[1]; 29 } 30 31 static bool fanotify_fh_equal(struct fanotify_fh *fh1, 32 struct fanotify_fh *fh2) 33 { 34 if (fh1->type != fh2->type || fh1->len != fh2->len) 35 return false; 36 37 return !fh1->len || 38 !memcmp(fanotify_fh_buf(fh1), fanotify_fh_buf(fh2), fh1->len); 39 } 40 41 static bool fanotify_fid_event_equal(struct fanotify_fid_event *ffe1, 42 struct fanotify_fid_event *ffe2) 43 { 44 /* Do not merge fid events without object fh */ 45 if (!ffe1->object_fh.len) 46 return false; 47 48 return fanotify_fsid_equal(&ffe1->fsid, &ffe2->fsid) && 49 fanotify_fh_equal(&ffe1->object_fh, &ffe2->object_fh); 50 } 51 52 static bool fanotify_info_equal(struct fanotify_info *info1, 53 struct fanotify_info *info2) 54 { 55 if (info1->dir_fh_totlen != info2->dir_fh_totlen || 56 info1->file_fh_totlen != info2->file_fh_totlen || 57 info1->name_len != info2->name_len) 58 return false; 59 60 if (info1->dir_fh_totlen && 61 !fanotify_fh_equal(fanotify_info_dir_fh(info1), 62 fanotify_info_dir_fh(info2))) 63 return false; 64 65 if (info1->file_fh_totlen && 66 !fanotify_fh_equal(fanotify_info_file_fh(info1), 67 fanotify_info_file_fh(info2))) 68 return false; 69 70 return !info1->name_len || 71 !memcmp(fanotify_info_name(info1), fanotify_info_name(info2), 72 info1->name_len); 73 } 74 75 static bool fanotify_name_event_equal(struct fanotify_name_event *fne1, 76 struct fanotify_name_event *fne2) 77 { 78 struct fanotify_info *info1 = &fne1->info; 79 struct fanotify_info *info2 = &fne2->info; 80 81 /* Do not merge name events without dir fh */ 82 if (!info1->dir_fh_totlen) 83 return false; 84 85 if (!fanotify_fsid_equal(&fne1->fsid, &fne2->fsid)) 86 return false; 87 88 return fanotify_info_equal(info1, info2); 89 } 90 91 static bool fanotify_should_merge(struct fsnotify_event *old_fsn, 92 struct fsnotify_event *new_fsn) 93 { 94 struct fanotify_event *old, *new; 95 96 pr_debug("%s: old=%p new=%p\n", __func__, old_fsn, new_fsn); 97 old = FANOTIFY_E(old_fsn); 98 new = FANOTIFY_E(new_fsn); 99 100 if (old_fsn->objectid != new_fsn->objectid || 101 old->type != new->type || old->pid != new->pid) 102 return false; 103 104 /* 105 * We want to merge many dirent events in the same dir (i.e. 106 * creates/unlinks/renames), but we do not want to merge dirent 107 * events referring to subdirs with dirent events referring to 108 * non subdirs, otherwise, user won't be able to tell from a 109 * mask FAN_CREATE|FAN_DELETE|FAN_ONDIR if it describes mkdir+ 110 * unlink pair or rmdir+create pair of events. 111 */ 112 if ((old->mask & FS_ISDIR) != (new->mask & FS_ISDIR)) 113 return false; 114 115 switch (old->type) { 116 case FANOTIFY_EVENT_TYPE_PATH: 117 return fanotify_path_equal(fanotify_event_path(old), 118 fanotify_event_path(new)); 119 case FANOTIFY_EVENT_TYPE_FID: 120 return fanotify_fid_event_equal(FANOTIFY_FE(old), 121 FANOTIFY_FE(new)); 122 case FANOTIFY_EVENT_TYPE_FID_NAME: 123 return fanotify_name_event_equal(FANOTIFY_NE(old), 124 FANOTIFY_NE(new)); 125 default: 126 WARN_ON_ONCE(1); 127 } 128 129 return false; 130 } 131 132 /* and the list better be locked by something too! */ 133 static int fanotify_merge(struct list_head *list, struct fsnotify_event *event) 134 { 135 struct fsnotify_event *test_event; 136 struct fanotify_event *new; 137 138 pr_debug("%s: list=%p event=%p\n", __func__, list, event); 139 new = FANOTIFY_E(event); 140 141 /* 142 * Don't merge a permission event with any other event so that we know 143 * the event structure we have created in fanotify_handle_event() is the 144 * one we should check for permission response. 145 */ 146 if (fanotify_is_perm_event(new->mask)) 147 return 0; 148 149 list_for_each_entry_reverse(test_event, list, list) { 150 if (fanotify_should_merge(test_event, event)) { 151 FANOTIFY_E(test_event)->mask |= new->mask; 152 return 1; 153 } 154 } 155 156 return 0; 157 } 158 159 /* 160 * Wait for response to permission event. The function also takes care of 161 * freeing the permission event (or offloads that in case the wait is canceled 162 * by a signal). The function returns 0 in case access got allowed by userspace, 163 * -EPERM in case userspace disallowed the access, and -ERESTARTSYS in case 164 * the wait got interrupted by a signal. 165 */ 166 static int fanotify_get_response(struct fsnotify_group *group, 167 struct fanotify_perm_event *event, 168 struct fsnotify_iter_info *iter_info) 169 { 170 int ret; 171 172 pr_debug("%s: group=%p event=%p\n", __func__, group, event); 173 174 ret = wait_event_killable(group->fanotify_data.access_waitq, 175 event->state == FAN_EVENT_ANSWERED); 176 /* Signal pending? */ 177 if (ret < 0) { 178 spin_lock(&group->notification_lock); 179 /* Event reported to userspace and no answer yet? */ 180 if (event->state == FAN_EVENT_REPORTED) { 181 /* Event will get freed once userspace answers to it */ 182 event->state = FAN_EVENT_CANCELED; 183 spin_unlock(&group->notification_lock); 184 return ret; 185 } 186 /* Event not yet reported? Just remove it. */ 187 if (event->state == FAN_EVENT_INIT) 188 fsnotify_remove_queued_event(group, &event->fae.fse); 189 /* 190 * Event may be also answered in case signal delivery raced 191 * with wakeup. In that case we have nothing to do besides 192 * freeing the event and reporting error. 193 */ 194 spin_unlock(&group->notification_lock); 195 goto out; 196 } 197 198 /* userspace responded, convert to something usable */ 199 switch (event->response & ~FAN_AUDIT) { 200 case FAN_ALLOW: 201 ret = 0; 202 break; 203 case FAN_DENY: 204 default: 205 ret = -EPERM; 206 } 207 208 /* Check if the response should be audited */ 209 if (event->response & FAN_AUDIT) 210 audit_fanotify(event->response & ~FAN_AUDIT); 211 212 pr_debug("%s: group=%p event=%p about to return ret=%d\n", __func__, 213 group, event, ret); 214 out: 215 fsnotify_destroy_event(group, &event->fae.fse); 216 217 return ret; 218 } 219 220 /* 221 * This function returns a mask for an event that only contains the flags 222 * that have been specifically requested by the user. Flags that may have 223 * been included within the event mask, but have not been explicitly 224 * requested by the user, will not be present in the returned mask. 225 */ 226 static u32 fanotify_group_event_mask(struct fsnotify_group *group, 227 struct fsnotify_iter_info *iter_info, 228 u32 event_mask, const void *data, 229 int data_type, struct inode *dir) 230 { 231 __u32 marks_mask = 0, marks_ignored_mask = 0; 232 __u32 test_mask, user_mask = FANOTIFY_OUTGOING_EVENTS | 233 FANOTIFY_EVENT_FLAGS; 234 const struct path *path = fsnotify_data_path(data, data_type); 235 unsigned int fid_mode = FAN_GROUP_FLAG(group, FANOTIFY_FID_BITS); 236 struct fsnotify_mark *mark; 237 int type; 238 239 pr_debug("%s: report_mask=%x mask=%x data=%p data_type=%d\n", 240 __func__, iter_info->report_mask, event_mask, data, data_type); 241 242 if (!fid_mode) { 243 /* Do we have path to open a file descriptor? */ 244 if (!path) 245 return 0; 246 /* Path type events are only relevant for files and dirs */ 247 if (!d_is_reg(path->dentry) && !d_can_lookup(path->dentry)) 248 return 0; 249 } else if (!(fid_mode & FAN_REPORT_FID)) { 250 /* Do we have a directory inode to report? */ 251 if (!dir && !(event_mask & FS_ISDIR)) 252 return 0; 253 } 254 255 fsnotify_foreach_obj_type(type) { 256 if (!fsnotify_iter_should_report_type(iter_info, type)) 257 continue; 258 mark = iter_info->marks[type]; 259 260 /* Apply ignore mask regardless of ISDIR and ON_CHILD flags */ 261 marks_ignored_mask |= mark->ignored_mask; 262 263 /* 264 * If the event is on dir and this mark doesn't care about 265 * events on dir, don't send it! 266 */ 267 if (event_mask & FS_ISDIR && !(mark->mask & FS_ISDIR)) 268 continue; 269 270 /* 271 * If the event is for a child and this mark is on a parent not 272 * watching children, don't send it! 273 */ 274 if (event_mask & FS_EVENT_ON_CHILD && 275 type == FSNOTIFY_OBJ_TYPE_INODE && 276 !(mark->mask & FS_EVENT_ON_CHILD)) 277 continue; 278 279 marks_mask |= mark->mask; 280 } 281 282 test_mask = event_mask & marks_mask & ~marks_ignored_mask; 283 284 /* 285 * For dirent modification events (create/delete/move) that do not carry 286 * the child entry name information, we report FAN_ONDIR for mkdir/rmdir 287 * so user can differentiate them from creat/unlink. 288 * 289 * For backward compatibility and consistency, do not report FAN_ONDIR 290 * to user in legacy fanotify mode (reporting fd) and report FAN_ONDIR 291 * to user in fid mode for all event types. 292 * 293 * We never report FAN_EVENT_ON_CHILD to user, but we do pass it in to 294 * fanotify_alloc_event() when group is reporting fid as indication 295 * that event happened on child. 296 */ 297 if (fid_mode) { 298 /* Do not report event flags without any event */ 299 if (!(test_mask & ~FANOTIFY_EVENT_FLAGS)) 300 return 0; 301 } else { 302 user_mask &= ~FANOTIFY_EVENT_FLAGS; 303 } 304 305 return test_mask & user_mask; 306 } 307 308 /* 309 * Check size needed to encode fanotify_fh. 310 * 311 * Return size of encoded fh without fanotify_fh header. 312 * Return 0 on failure to encode. 313 */ 314 static int fanotify_encode_fh_len(struct inode *inode) 315 { 316 int dwords = 0; 317 318 if (!inode) 319 return 0; 320 321 exportfs_encode_inode_fh(inode, NULL, &dwords, NULL); 322 323 return dwords << 2; 324 } 325 326 /* 327 * Encode fanotify_fh. 328 * 329 * Return total size of encoded fh including fanotify_fh header. 330 * Return 0 on failure to encode. 331 */ 332 static int fanotify_encode_fh(struct fanotify_fh *fh, struct inode *inode, 333 unsigned int fh_len, gfp_t gfp) 334 { 335 int dwords, type = 0; 336 char *ext_buf = NULL; 337 void *buf = fh->buf; 338 int err; 339 340 fh->type = FILEID_ROOT; 341 fh->len = 0; 342 fh->flags = 0; 343 if (!inode) 344 return 0; 345 346 /* 347 * !gpf means preallocated variable size fh, but fh_len could 348 * be zero in that case if encoding fh len failed. 349 */ 350 err = -ENOENT; 351 if (fh_len < 4 || WARN_ON_ONCE(fh_len % 4)) 352 goto out_err; 353 354 /* No external buffer in a variable size allocated fh */ 355 if (gfp && fh_len > FANOTIFY_INLINE_FH_LEN) { 356 /* Treat failure to allocate fh as failure to encode fh */ 357 err = -ENOMEM; 358 ext_buf = kmalloc(fh_len, gfp); 359 if (!ext_buf) 360 goto out_err; 361 362 *fanotify_fh_ext_buf_ptr(fh) = ext_buf; 363 buf = ext_buf; 364 fh->flags |= FANOTIFY_FH_FLAG_EXT_BUF; 365 } 366 367 dwords = fh_len >> 2; 368 type = exportfs_encode_inode_fh(inode, buf, &dwords, NULL); 369 err = -EINVAL; 370 if (!type || type == FILEID_INVALID || fh_len != dwords << 2) 371 goto out_err; 372 373 fh->type = type; 374 fh->len = fh_len; 375 376 return FANOTIFY_FH_HDR_LEN + fh_len; 377 378 out_err: 379 pr_warn_ratelimited("fanotify: failed to encode fid (type=%d, len=%d, err=%i)\n", 380 type, fh_len, err); 381 kfree(ext_buf); 382 *fanotify_fh_ext_buf_ptr(fh) = NULL; 383 /* Report the event without a file identifier on encode error */ 384 fh->type = FILEID_INVALID; 385 fh->len = 0; 386 return 0; 387 } 388 389 /* 390 * The inode to use as identifier when reporting fid depends on the event. 391 * Report the modified directory inode on dirent modification events. 392 * Report the "victim" inode otherwise. 393 * For example: 394 * FS_ATTRIB reports the child inode even if reported on a watched parent. 395 * FS_CREATE reports the modified dir inode and not the created inode. 396 */ 397 static struct inode *fanotify_fid_inode(u32 event_mask, const void *data, 398 int data_type, struct inode *dir) 399 { 400 if (event_mask & ALL_FSNOTIFY_DIRENT_EVENTS) 401 return dir; 402 403 return fsnotify_data_inode(data, data_type); 404 } 405 406 /* 407 * The inode to use as identifier when reporting dir fid depends on the event. 408 * Report the modified directory inode on dirent modification events. 409 * Report the "victim" inode if "victim" is a directory. 410 * Report the parent inode if "victim" is not a directory and event is 411 * reported to parent. 412 * Otherwise, do not report dir fid. 413 */ 414 static struct inode *fanotify_dfid_inode(u32 event_mask, const void *data, 415 int data_type, struct inode *dir) 416 { 417 struct inode *inode = fsnotify_data_inode(data, data_type); 418 419 if (event_mask & ALL_FSNOTIFY_DIRENT_EVENTS) 420 return dir; 421 422 if (S_ISDIR(inode->i_mode)) 423 return inode; 424 425 return dir; 426 } 427 428 static struct fanotify_event *fanotify_alloc_path_event(const struct path *path, 429 gfp_t gfp) 430 { 431 struct fanotify_path_event *pevent; 432 433 pevent = kmem_cache_alloc(fanotify_path_event_cachep, gfp); 434 if (!pevent) 435 return NULL; 436 437 pevent->fae.type = FANOTIFY_EVENT_TYPE_PATH; 438 pevent->path = *path; 439 path_get(path); 440 441 return &pevent->fae; 442 } 443 444 static struct fanotify_event *fanotify_alloc_perm_event(const struct path *path, 445 gfp_t gfp) 446 { 447 struct fanotify_perm_event *pevent; 448 449 pevent = kmem_cache_alloc(fanotify_perm_event_cachep, gfp); 450 if (!pevent) 451 return NULL; 452 453 pevent->fae.type = FANOTIFY_EVENT_TYPE_PATH_PERM; 454 pevent->response = 0; 455 pevent->state = FAN_EVENT_INIT; 456 pevent->path = *path; 457 path_get(path); 458 459 return &pevent->fae; 460 } 461 462 static struct fanotify_event *fanotify_alloc_fid_event(struct inode *id, 463 __kernel_fsid_t *fsid, 464 gfp_t gfp) 465 { 466 struct fanotify_fid_event *ffe; 467 468 ffe = kmem_cache_alloc(fanotify_fid_event_cachep, gfp); 469 if (!ffe) 470 return NULL; 471 472 ffe->fae.type = FANOTIFY_EVENT_TYPE_FID; 473 ffe->fsid = *fsid; 474 fanotify_encode_fh(&ffe->object_fh, id, fanotify_encode_fh_len(id), 475 gfp); 476 477 return &ffe->fae; 478 } 479 480 static struct fanotify_event *fanotify_alloc_name_event(struct inode *id, 481 __kernel_fsid_t *fsid, 482 const struct qstr *file_name, 483 struct inode *child, 484 gfp_t gfp) 485 { 486 struct fanotify_name_event *fne; 487 struct fanotify_info *info; 488 struct fanotify_fh *dfh, *ffh; 489 unsigned int dir_fh_len = fanotify_encode_fh_len(id); 490 unsigned int child_fh_len = fanotify_encode_fh_len(child); 491 unsigned int size; 492 493 size = sizeof(*fne) + FANOTIFY_FH_HDR_LEN + dir_fh_len; 494 if (child_fh_len) 495 size += FANOTIFY_FH_HDR_LEN + child_fh_len; 496 if (file_name) 497 size += file_name->len + 1; 498 fne = kmalloc(size, gfp); 499 if (!fne) 500 return NULL; 501 502 fne->fae.type = FANOTIFY_EVENT_TYPE_FID_NAME; 503 fne->fsid = *fsid; 504 info = &fne->info; 505 fanotify_info_init(info); 506 dfh = fanotify_info_dir_fh(info); 507 info->dir_fh_totlen = fanotify_encode_fh(dfh, id, dir_fh_len, 0); 508 if (child_fh_len) { 509 ffh = fanotify_info_file_fh(info); 510 info->file_fh_totlen = fanotify_encode_fh(ffh, child, child_fh_len, 0); 511 } 512 if (file_name) 513 fanotify_info_copy_name(info, file_name); 514 515 pr_debug("%s: ino=%lu size=%u dir_fh_len=%u child_fh_len=%u name_len=%u name='%.*s'\n", 516 __func__, id->i_ino, size, dir_fh_len, child_fh_len, 517 info->name_len, info->name_len, fanotify_info_name(info)); 518 519 return &fne->fae; 520 } 521 522 static struct fanotify_event *fanotify_alloc_event(struct fsnotify_group *group, 523 u32 mask, const void *data, 524 int data_type, struct inode *dir, 525 const struct qstr *file_name, 526 __kernel_fsid_t *fsid) 527 { 528 struct fanotify_event *event = NULL; 529 gfp_t gfp = GFP_KERNEL_ACCOUNT; 530 struct inode *id = fanotify_fid_inode(mask, data, data_type, dir); 531 struct inode *dirid = fanotify_dfid_inode(mask, data, data_type, dir); 532 const struct path *path = fsnotify_data_path(data, data_type); 533 unsigned int fid_mode = FAN_GROUP_FLAG(group, FANOTIFY_FID_BITS); 534 struct mem_cgroup *old_memcg; 535 struct inode *child = NULL; 536 bool name_event = false; 537 538 if ((fid_mode & FAN_REPORT_DIR_FID) && dirid) { 539 /* 540 * With both flags FAN_REPORT_DIR_FID and FAN_REPORT_FID, we 541 * report the child fid for events reported on a non-dir child 542 * in addition to reporting the parent fid and maybe child name. 543 */ 544 if ((fid_mode & FAN_REPORT_FID) && 545 id != dirid && !(mask & FAN_ONDIR)) 546 child = id; 547 548 id = dirid; 549 550 /* 551 * We record file name only in a group with FAN_REPORT_NAME 552 * and when we have a directory inode to report. 553 * 554 * For directory entry modification event, we record the fid of 555 * the directory and the name of the modified entry. 556 * 557 * For event on non-directory that is reported to parent, we 558 * record the fid of the parent and the name of the child. 559 * 560 * Even if not reporting name, we need a variable length 561 * fanotify_name_event if reporting both parent and child fids. 562 */ 563 if (!(fid_mode & FAN_REPORT_NAME)) { 564 name_event = !!child; 565 file_name = NULL; 566 } else if ((mask & ALL_FSNOTIFY_DIRENT_EVENTS) || 567 !(mask & FAN_ONDIR)) { 568 name_event = true; 569 } 570 } 571 572 /* 573 * For queues with unlimited length lost events are not expected and 574 * can possibly have security implications. Avoid losing events when 575 * memory is short. For the limited size queues, avoid OOM killer in the 576 * target monitoring memcg as it may have security repercussion. 577 */ 578 if (group->max_events == UINT_MAX) 579 gfp |= __GFP_NOFAIL; 580 else 581 gfp |= __GFP_RETRY_MAYFAIL; 582 583 /* Whoever is interested in the event, pays for the allocation. */ 584 old_memcg = set_active_memcg(group->memcg); 585 586 if (fanotify_is_perm_event(mask)) { 587 event = fanotify_alloc_perm_event(path, gfp); 588 } else if (name_event && (file_name || child)) { 589 event = fanotify_alloc_name_event(id, fsid, file_name, child, 590 gfp); 591 } else if (fid_mode) { 592 event = fanotify_alloc_fid_event(id, fsid, gfp); 593 } else { 594 event = fanotify_alloc_path_event(path, gfp); 595 } 596 597 if (!event) 598 goto out; 599 600 /* 601 * Use the victim inode instead of the watching inode as the id for 602 * event queue, so event reported on parent is merged with event 603 * reported on child when both directory and child watches exist. 604 */ 605 fanotify_init_event(event, (unsigned long)id, mask); 606 if (FAN_GROUP_FLAG(group, FAN_REPORT_TID)) 607 event->pid = get_pid(task_pid(current)); 608 else 609 event->pid = get_pid(task_tgid(current)); 610 611 out: 612 set_active_memcg(old_memcg); 613 return event; 614 } 615 616 /* 617 * Get cached fsid of the filesystem containing the object from any connector. 618 * All connectors are supposed to have the same fsid, but we do not verify that 619 * here. 620 */ 621 static __kernel_fsid_t fanotify_get_fsid(struct fsnotify_iter_info *iter_info) 622 { 623 int type; 624 __kernel_fsid_t fsid = {}; 625 626 fsnotify_foreach_obj_type(type) { 627 struct fsnotify_mark_connector *conn; 628 629 if (!fsnotify_iter_should_report_type(iter_info, type)) 630 continue; 631 632 conn = READ_ONCE(iter_info->marks[type]->connector); 633 /* Mark is just getting destroyed or created? */ 634 if (!conn) 635 continue; 636 if (!(conn->flags & FSNOTIFY_CONN_FLAG_HAS_FSID)) 637 continue; 638 /* Pairs with smp_wmb() in fsnotify_add_mark_list() */ 639 smp_rmb(); 640 fsid = conn->fsid; 641 if (WARN_ON_ONCE(!fsid.val[0] && !fsid.val[1])) 642 continue; 643 return fsid; 644 } 645 646 return fsid; 647 } 648 649 static int fanotify_handle_event(struct fsnotify_group *group, u32 mask, 650 const void *data, int data_type, 651 struct inode *dir, 652 const struct qstr *file_name, u32 cookie, 653 struct fsnotify_iter_info *iter_info) 654 { 655 int ret = 0; 656 struct fanotify_event *event; 657 struct fsnotify_event *fsn_event; 658 __kernel_fsid_t fsid = {}; 659 660 BUILD_BUG_ON(FAN_ACCESS != FS_ACCESS); 661 BUILD_BUG_ON(FAN_MODIFY != FS_MODIFY); 662 BUILD_BUG_ON(FAN_ATTRIB != FS_ATTRIB); 663 BUILD_BUG_ON(FAN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE); 664 BUILD_BUG_ON(FAN_CLOSE_WRITE != FS_CLOSE_WRITE); 665 BUILD_BUG_ON(FAN_OPEN != FS_OPEN); 666 BUILD_BUG_ON(FAN_MOVED_TO != FS_MOVED_TO); 667 BUILD_BUG_ON(FAN_MOVED_FROM != FS_MOVED_FROM); 668 BUILD_BUG_ON(FAN_CREATE != FS_CREATE); 669 BUILD_BUG_ON(FAN_DELETE != FS_DELETE); 670 BUILD_BUG_ON(FAN_DELETE_SELF != FS_DELETE_SELF); 671 BUILD_BUG_ON(FAN_MOVE_SELF != FS_MOVE_SELF); 672 BUILD_BUG_ON(FAN_EVENT_ON_CHILD != FS_EVENT_ON_CHILD); 673 BUILD_BUG_ON(FAN_Q_OVERFLOW != FS_Q_OVERFLOW); 674 BUILD_BUG_ON(FAN_OPEN_PERM != FS_OPEN_PERM); 675 BUILD_BUG_ON(FAN_ACCESS_PERM != FS_ACCESS_PERM); 676 BUILD_BUG_ON(FAN_ONDIR != FS_ISDIR); 677 BUILD_BUG_ON(FAN_OPEN_EXEC != FS_OPEN_EXEC); 678 BUILD_BUG_ON(FAN_OPEN_EXEC_PERM != FS_OPEN_EXEC_PERM); 679 680 BUILD_BUG_ON(HWEIGHT32(ALL_FANOTIFY_EVENT_BITS) != 19); 681 682 mask = fanotify_group_event_mask(group, iter_info, mask, data, 683 data_type, dir); 684 if (!mask) 685 return 0; 686 687 pr_debug("%s: group=%p mask=%x\n", __func__, group, mask); 688 689 if (fanotify_is_perm_event(mask)) { 690 /* 691 * fsnotify_prepare_user_wait() fails if we race with mark 692 * deletion. Just let the operation pass in that case. 693 */ 694 if (!fsnotify_prepare_user_wait(iter_info)) 695 return 0; 696 } 697 698 if (FAN_GROUP_FLAG(group, FANOTIFY_FID_BITS)) { 699 fsid = fanotify_get_fsid(iter_info); 700 /* Racing with mark destruction or creation? */ 701 if (!fsid.val[0] && !fsid.val[1]) 702 return 0; 703 } 704 705 event = fanotify_alloc_event(group, mask, data, data_type, dir, 706 file_name, &fsid); 707 ret = -ENOMEM; 708 if (unlikely(!event)) { 709 /* 710 * We don't queue overflow events for permission events as 711 * there the access is denied and so no event is in fact lost. 712 */ 713 if (!fanotify_is_perm_event(mask)) 714 fsnotify_queue_overflow(group); 715 goto finish; 716 } 717 718 fsn_event = &event->fse; 719 ret = fsnotify_add_event(group, fsn_event, fanotify_merge); 720 if (ret) { 721 /* Permission events shouldn't be merged */ 722 BUG_ON(ret == 1 && mask & FANOTIFY_PERM_EVENTS); 723 /* Our event wasn't used in the end. Free it. */ 724 fsnotify_destroy_event(group, fsn_event); 725 726 ret = 0; 727 } else if (fanotify_is_perm_event(mask)) { 728 ret = fanotify_get_response(group, FANOTIFY_PERM(event), 729 iter_info); 730 } 731 finish: 732 if (fanotify_is_perm_event(mask)) 733 fsnotify_finish_user_wait(iter_info); 734 735 return ret; 736 } 737 738 static void fanotify_free_group_priv(struct fsnotify_group *group) 739 { 740 struct user_struct *user; 741 742 user = group->fanotify_data.user; 743 atomic_dec(&user->fanotify_listeners); 744 free_uid(user); 745 } 746 747 static void fanotify_free_path_event(struct fanotify_event *event) 748 { 749 path_put(fanotify_event_path(event)); 750 kmem_cache_free(fanotify_path_event_cachep, FANOTIFY_PE(event)); 751 } 752 753 static void fanotify_free_perm_event(struct fanotify_event *event) 754 { 755 path_put(fanotify_event_path(event)); 756 kmem_cache_free(fanotify_perm_event_cachep, FANOTIFY_PERM(event)); 757 } 758 759 static void fanotify_free_fid_event(struct fanotify_event *event) 760 { 761 struct fanotify_fid_event *ffe = FANOTIFY_FE(event); 762 763 if (fanotify_fh_has_ext_buf(&ffe->object_fh)) 764 kfree(fanotify_fh_ext_buf(&ffe->object_fh)); 765 kmem_cache_free(fanotify_fid_event_cachep, ffe); 766 } 767 768 static void fanotify_free_name_event(struct fanotify_event *event) 769 { 770 kfree(FANOTIFY_NE(event)); 771 } 772 773 static void fanotify_free_event(struct fsnotify_event *fsn_event) 774 { 775 struct fanotify_event *event; 776 777 event = FANOTIFY_E(fsn_event); 778 put_pid(event->pid); 779 switch (event->type) { 780 case FANOTIFY_EVENT_TYPE_PATH: 781 fanotify_free_path_event(event); 782 break; 783 case FANOTIFY_EVENT_TYPE_PATH_PERM: 784 fanotify_free_perm_event(event); 785 break; 786 case FANOTIFY_EVENT_TYPE_FID: 787 fanotify_free_fid_event(event); 788 break; 789 case FANOTIFY_EVENT_TYPE_FID_NAME: 790 fanotify_free_name_event(event); 791 break; 792 case FANOTIFY_EVENT_TYPE_OVERFLOW: 793 kfree(event); 794 break; 795 default: 796 WARN_ON_ONCE(1); 797 } 798 } 799 800 static void fanotify_free_mark(struct fsnotify_mark *fsn_mark) 801 { 802 kmem_cache_free(fanotify_mark_cache, fsn_mark); 803 } 804 805 const struct fsnotify_ops fanotify_fsnotify_ops = { 806 .handle_event = fanotify_handle_event, 807 .free_group_priv = fanotify_free_group_priv, 808 .free_event = fanotify_free_event, 809 .free_mark = fanotify_free_mark, 810 }; 811