1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/fanotify.h> 3 #include <linux/fdtable.h> 4 #include <linux/fsnotify_backend.h> 5 #include <linux/init.h> 6 #include <linux/jiffies.h> 7 #include <linux/kernel.h> /* UINT_MAX */ 8 #include <linux/mount.h> 9 #include <linux/sched.h> 10 #include <linux/sched/user.h> 11 #include <linux/sched/signal.h> 12 #include <linux/types.h> 13 #include <linux/wait.h> 14 #include <linux/audit.h> 15 #include <linux/sched/mm.h> 16 #include <linux/statfs.h> 17 18 #include "fanotify.h" 19 20 static bool fanotify_path_equal(struct path *p1, struct path *p2) 21 { 22 return p1->mnt == p2->mnt && p1->dentry == p2->dentry; 23 } 24 25 static inline bool fanotify_fsid_equal(__kernel_fsid_t *fsid1, 26 __kernel_fsid_t *fsid2) 27 { 28 return fsid1->val[0] == fsid2->val[0] && fsid1->val[1] == fsid2->val[1]; 29 } 30 31 static bool fanotify_fh_equal(struct fanotify_fh *fh1, 32 struct fanotify_fh *fh2) 33 { 34 if (fh1->type != fh2->type || fh1->len != fh2->len) 35 return false; 36 37 /* Do not merge events if we failed to encode fh */ 38 if (fh1->type == FILEID_INVALID) 39 return false; 40 41 return !fh1->len || 42 !memcmp(fanotify_fh_buf(fh1), fanotify_fh_buf(fh2), fh1->len); 43 } 44 45 static bool fanotify_fid_event_equal(struct fanotify_fid_event *ffe1, 46 struct fanotify_fid_event *ffe2) 47 { 48 /* Do not merge fid events without object fh */ 49 if (!ffe1->object_fh.len) 50 return false; 51 52 return fanotify_fsid_equal(&ffe1->fsid, &ffe2->fsid) && 53 fanotify_fh_equal(&ffe1->object_fh, &ffe2->object_fh); 54 } 55 56 static bool fanotify_name_event_equal(struct fanotify_name_event *fne1, 57 struct fanotify_name_event *fne2) 58 { 59 /* 60 * Do not merge name events without dir fh. 61 * FAN_DIR_MODIFY does not encode object fh, so it may be empty. 62 */ 63 if (!fne1->dir_fh.len) 64 return false; 65 66 if (fne1->name_len != fne2->name_len || 67 !fanotify_fh_equal(&fne1->dir_fh, &fne2->dir_fh)) 68 return false; 69 70 return !memcmp(fne1->name, fne2->name, fne1->name_len); 71 } 72 73 static bool fanotify_should_merge(struct fsnotify_event *old_fsn, 74 struct fsnotify_event *new_fsn) 75 { 76 struct fanotify_event *old, *new; 77 78 pr_debug("%s: old=%p new=%p\n", __func__, old_fsn, new_fsn); 79 old = FANOTIFY_E(old_fsn); 80 new = FANOTIFY_E(new_fsn); 81 82 if (old_fsn->objectid != new_fsn->objectid || 83 old->type != new->type || old->pid != new->pid) 84 return false; 85 86 switch (old->type) { 87 case FANOTIFY_EVENT_TYPE_PATH: 88 return fanotify_path_equal(fanotify_event_path(old), 89 fanotify_event_path(new)); 90 case FANOTIFY_EVENT_TYPE_FID: 91 /* 92 * We want to merge many dirent events in the same dir (i.e. 93 * creates/unlinks/renames), but we do not want to merge dirent 94 * events referring to subdirs with dirent events referring to 95 * non subdirs, otherwise, user won't be able to tell from a 96 * mask FAN_CREATE|FAN_DELETE|FAN_ONDIR if it describes mkdir+ 97 * unlink pair or rmdir+create pair of events. 98 */ 99 if ((old->mask & FS_ISDIR) != (new->mask & FS_ISDIR)) 100 return false; 101 102 return fanotify_fid_event_equal(FANOTIFY_FE(old), 103 FANOTIFY_FE(new)); 104 case FANOTIFY_EVENT_TYPE_FID_NAME: 105 return fanotify_name_event_equal(FANOTIFY_NE(old), 106 FANOTIFY_NE(new)); 107 default: 108 WARN_ON_ONCE(1); 109 } 110 111 return false; 112 } 113 114 /* and the list better be locked by something too! */ 115 static int fanotify_merge(struct list_head *list, struct fsnotify_event *event) 116 { 117 struct fsnotify_event *test_event; 118 struct fanotify_event *new; 119 120 pr_debug("%s: list=%p event=%p\n", __func__, list, event); 121 new = FANOTIFY_E(event); 122 123 /* 124 * Don't merge a permission event with any other event so that we know 125 * the event structure we have created in fanotify_handle_event() is the 126 * one we should check for permission response. 127 */ 128 if (fanotify_is_perm_event(new->mask)) 129 return 0; 130 131 list_for_each_entry_reverse(test_event, list, list) { 132 if (fanotify_should_merge(test_event, event)) { 133 FANOTIFY_E(test_event)->mask |= new->mask; 134 return 1; 135 } 136 } 137 138 return 0; 139 } 140 141 /* 142 * Wait for response to permission event. The function also takes care of 143 * freeing the permission event (or offloads that in case the wait is canceled 144 * by a signal). The function returns 0 in case access got allowed by userspace, 145 * -EPERM in case userspace disallowed the access, and -ERESTARTSYS in case 146 * the wait got interrupted by a signal. 147 */ 148 static int fanotify_get_response(struct fsnotify_group *group, 149 struct fanotify_perm_event *event, 150 struct fsnotify_iter_info *iter_info) 151 { 152 int ret; 153 154 pr_debug("%s: group=%p event=%p\n", __func__, group, event); 155 156 ret = wait_event_killable(group->fanotify_data.access_waitq, 157 event->state == FAN_EVENT_ANSWERED); 158 /* Signal pending? */ 159 if (ret < 0) { 160 spin_lock(&group->notification_lock); 161 /* Event reported to userspace and no answer yet? */ 162 if (event->state == FAN_EVENT_REPORTED) { 163 /* Event will get freed once userspace answers to it */ 164 event->state = FAN_EVENT_CANCELED; 165 spin_unlock(&group->notification_lock); 166 return ret; 167 } 168 /* Event not yet reported? Just remove it. */ 169 if (event->state == FAN_EVENT_INIT) 170 fsnotify_remove_queued_event(group, &event->fae.fse); 171 /* 172 * Event may be also answered in case signal delivery raced 173 * with wakeup. In that case we have nothing to do besides 174 * freeing the event and reporting error. 175 */ 176 spin_unlock(&group->notification_lock); 177 goto out; 178 } 179 180 /* userspace responded, convert to something usable */ 181 switch (event->response & ~FAN_AUDIT) { 182 case FAN_ALLOW: 183 ret = 0; 184 break; 185 case FAN_DENY: 186 default: 187 ret = -EPERM; 188 } 189 190 /* Check if the response should be audited */ 191 if (event->response & FAN_AUDIT) 192 audit_fanotify(event->response & ~FAN_AUDIT); 193 194 pr_debug("%s: group=%p event=%p about to return ret=%d\n", __func__, 195 group, event, ret); 196 out: 197 fsnotify_destroy_event(group, &event->fae.fse); 198 199 return ret; 200 } 201 202 /* 203 * This function returns a mask for an event that only contains the flags 204 * that have been specifically requested by the user. Flags that may have 205 * been included within the event mask, but have not been explicitly 206 * requested by the user, will not be present in the returned mask. 207 */ 208 static u32 fanotify_group_event_mask(struct fsnotify_group *group, 209 struct fsnotify_iter_info *iter_info, 210 u32 event_mask, const void *data, 211 int data_type) 212 { 213 __u32 marks_mask = 0, marks_ignored_mask = 0; 214 __u32 test_mask, user_mask = FANOTIFY_OUTGOING_EVENTS; 215 const struct path *path = fsnotify_data_path(data, data_type); 216 struct fsnotify_mark *mark; 217 int type; 218 219 pr_debug("%s: report_mask=%x mask=%x data=%p data_type=%d\n", 220 __func__, iter_info->report_mask, event_mask, data, data_type); 221 222 if (!FAN_GROUP_FLAG(group, FAN_REPORT_FID)) { 223 /* Do we have path to open a file descriptor? */ 224 if (!path) 225 return 0; 226 /* Path type events are only relevant for files and dirs */ 227 if (!d_is_reg(path->dentry) && !d_can_lookup(path->dentry)) 228 return 0; 229 } 230 231 fsnotify_foreach_obj_type(type) { 232 if (!fsnotify_iter_should_report_type(iter_info, type)) 233 continue; 234 mark = iter_info->marks[type]; 235 236 /* Apply ignore mask regardless of ISDIR and ON_CHILD flags */ 237 marks_ignored_mask |= mark->ignored_mask; 238 239 /* 240 * If the event is on dir and this mark doesn't care about 241 * events on dir, don't send it! 242 */ 243 if (event_mask & FS_ISDIR && !(mark->mask & FS_ISDIR)) 244 continue; 245 246 /* 247 * If the event is for a child and this mark doesn't care about 248 * events on a child, don't send it! 249 */ 250 if (event_mask & FS_EVENT_ON_CHILD && 251 (type != FSNOTIFY_OBJ_TYPE_INODE || 252 !(mark->mask & FS_EVENT_ON_CHILD))) 253 continue; 254 255 marks_mask |= mark->mask; 256 } 257 258 test_mask = event_mask & marks_mask & ~marks_ignored_mask; 259 260 /* 261 * For dirent modification events (create/delete/move) that do not carry 262 * the child entry name information, we report FAN_ONDIR for mkdir/rmdir 263 * so user can differentiate them from creat/unlink. 264 * 265 * For backward compatibility and consistency, do not report FAN_ONDIR 266 * to user in legacy fanotify mode (reporting fd) and report FAN_ONDIR 267 * to user in FAN_REPORT_FID mode for all event types. 268 */ 269 if (FAN_GROUP_FLAG(group, FAN_REPORT_FID)) { 270 /* Do not report FAN_ONDIR without any event */ 271 if (!(test_mask & ~FAN_ONDIR)) 272 return 0; 273 } else { 274 user_mask &= ~FAN_ONDIR; 275 } 276 277 return test_mask & user_mask; 278 } 279 280 static void fanotify_encode_fh(struct fanotify_fh *fh, struct inode *inode, 281 gfp_t gfp) 282 { 283 int dwords, type, bytes = 0; 284 char *ext_buf = NULL; 285 void *buf = fh->buf; 286 int err; 287 288 if (!inode) 289 goto out; 290 291 dwords = 0; 292 err = -ENOENT; 293 type = exportfs_encode_inode_fh(inode, NULL, &dwords, NULL); 294 if (!dwords) 295 goto out_err; 296 297 bytes = dwords << 2; 298 if (bytes > FANOTIFY_INLINE_FH_LEN) { 299 /* Treat failure to allocate fh as failure to allocate event */ 300 err = -ENOMEM; 301 ext_buf = kmalloc(bytes, gfp); 302 if (!ext_buf) 303 goto out_err; 304 305 *fanotify_fh_ext_buf_ptr(fh) = ext_buf; 306 buf = ext_buf; 307 } 308 309 type = exportfs_encode_inode_fh(inode, buf, &dwords, NULL); 310 err = -EINVAL; 311 if (!type || type == FILEID_INVALID || bytes != dwords << 2) 312 goto out_err; 313 314 fh->type = type; 315 fh->len = bytes; 316 317 return; 318 319 out_err: 320 pr_warn_ratelimited("fanotify: failed to encode fid (type=%d, len=%d, err=%i)\n", 321 type, bytes, err); 322 kfree(ext_buf); 323 *fanotify_fh_ext_buf_ptr(fh) = NULL; 324 out: 325 /* Report the event without a file identifier on encode error */ 326 fh->type = FILEID_INVALID; 327 fh->len = 0; 328 } 329 330 /* 331 * The inode to use as identifier when reporting fid depends on the event. 332 * Report the modified directory inode on dirent modification events. 333 * Report the "victim" inode otherwise. 334 * For example: 335 * FS_ATTRIB reports the child inode even if reported on a watched parent. 336 * FS_CREATE reports the modified dir inode and not the created inode. 337 */ 338 static struct inode *fanotify_fid_inode(struct inode *to_tell, u32 event_mask, 339 const void *data, int data_type) 340 { 341 if (event_mask & ALL_FSNOTIFY_DIRENT_EVENTS) 342 return to_tell; 343 344 return (struct inode *)fsnotify_data_inode(data, data_type); 345 } 346 347 struct fanotify_event *fanotify_alloc_event(struct fsnotify_group *group, 348 struct inode *inode, u32 mask, 349 const void *data, int data_type, 350 const struct qstr *file_name, 351 __kernel_fsid_t *fsid) 352 { 353 struct fanotify_event *event = NULL; 354 struct fanotify_fid_event *ffe = NULL; 355 struct fanotify_name_event *fne = NULL; 356 gfp_t gfp = GFP_KERNEL_ACCOUNT; 357 struct inode *id = fanotify_fid_inode(inode, mask, data, data_type); 358 const struct path *path = fsnotify_data_path(data, data_type); 359 360 /* 361 * For queues with unlimited length lost events are not expected and 362 * can possibly have security implications. Avoid losing events when 363 * memory is short. For the limited size queues, avoid OOM killer in the 364 * target monitoring memcg as it may have security repercussion. 365 */ 366 if (group->max_events == UINT_MAX) 367 gfp |= __GFP_NOFAIL; 368 else 369 gfp |= __GFP_RETRY_MAYFAIL; 370 371 /* Whoever is interested in the event, pays for the allocation. */ 372 memalloc_use_memcg(group->memcg); 373 374 if (fanotify_is_perm_event(mask)) { 375 struct fanotify_perm_event *pevent; 376 377 pevent = kmem_cache_alloc(fanotify_perm_event_cachep, gfp); 378 if (!pevent) 379 goto out; 380 381 event = &pevent->fae; 382 event->type = FANOTIFY_EVENT_TYPE_PATH_PERM; 383 pevent->response = 0; 384 pevent->state = FAN_EVENT_INIT; 385 goto init; 386 } 387 388 /* 389 * For FAN_DIR_MODIFY event, we report the fid of the directory and 390 * the name of the modified entry. 391 * Allocate an fanotify_name_event struct and copy the name. 392 */ 393 if (mask & FAN_DIR_MODIFY && !(WARN_ON_ONCE(!file_name))) { 394 fne = kmalloc(sizeof(*fne) + file_name->len + 1, gfp); 395 if (!fne) 396 goto out; 397 398 event = &fne->fae; 399 event->type = FANOTIFY_EVENT_TYPE_FID_NAME; 400 fne->name_len = file_name->len; 401 strcpy(fne->name, file_name->name); 402 goto init; 403 } 404 405 if (FAN_GROUP_FLAG(group, FAN_REPORT_FID)) { 406 ffe = kmem_cache_alloc(fanotify_fid_event_cachep, gfp); 407 if (!ffe) 408 goto out; 409 410 event = &ffe->fae; 411 event->type = FANOTIFY_EVENT_TYPE_FID; 412 } else { 413 struct fanotify_path_event *pevent; 414 415 pevent = kmem_cache_alloc(fanotify_path_event_cachep, gfp); 416 if (!pevent) 417 goto out; 418 419 event = &pevent->fae; 420 event->type = FANOTIFY_EVENT_TYPE_PATH; 421 } 422 423 init: 424 /* 425 * Use the victim inode instead of the watching inode as the id for 426 * event queue, so event reported on parent is merged with event 427 * reported on child when both directory and child watches exist. 428 */ 429 fsnotify_init_event(&event->fse, (unsigned long)id); 430 event->mask = mask; 431 if (FAN_GROUP_FLAG(group, FAN_REPORT_TID)) 432 event->pid = get_pid(task_pid(current)); 433 else 434 event->pid = get_pid(task_tgid(current)); 435 436 if (fsid && fanotify_event_fsid(event)) 437 *fanotify_event_fsid(event) = *fsid; 438 439 if (fanotify_event_object_fh(event)) 440 fanotify_encode_fh(fanotify_event_object_fh(event), id, gfp); 441 442 if (fanotify_event_dir_fh(event)) 443 fanotify_encode_fh(fanotify_event_dir_fh(event), id, gfp); 444 445 if (fanotify_event_has_path(event)) { 446 struct path *p = fanotify_event_path(event); 447 448 if (path) { 449 *p = *path; 450 path_get(path); 451 } else { 452 p->mnt = NULL; 453 p->dentry = NULL; 454 } 455 } 456 out: 457 memalloc_unuse_memcg(); 458 return event; 459 } 460 461 /* 462 * Get cached fsid of the filesystem containing the object from any connector. 463 * All connectors are supposed to have the same fsid, but we do not verify that 464 * here. 465 */ 466 static __kernel_fsid_t fanotify_get_fsid(struct fsnotify_iter_info *iter_info) 467 { 468 int type; 469 __kernel_fsid_t fsid = {}; 470 471 fsnotify_foreach_obj_type(type) { 472 struct fsnotify_mark_connector *conn; 473 474 if (!fsnotify_iter_should_report_type(iter_info, type)) 475 continue; 476 477 conn = READ_ONCE(iter_info->marks[type]->connector); 478 /* Mark is just getting destroyed or created? */ 479 if (!conn) 480 continue; 481 if (!(conn->flags & FSNOTIFY_CONN_FLAG_HAS_FSID)) 482 continue; 483 /* Pairs with smp_wmb() in fsnotify_add_mark_list() */ 484 smp_rmb(); 485 fsid = conn->fsid; 486 if (WARN_ON_ONCE(!fsid.val[0] && !fsid.val[1])) 487 continue; 488 return fsid; 489 } 490 491 return fsid; 492 } 493 494 static int fanotify_handle_event(struct fsnotify_group *group, 495 struct inode *inode, 496 u32 mask, const void *data, int data_type, 497 const struct qstr *file_name, u32 cookie, 498 struct fsnotify_iter_info *iter_info) 499 { 500 int ret = 0; 501 struct fanotify_event *event; 502 struct fsnotify_event *fsn_event; 503 __kernel_fsid_t fsid = {}; 504 505 BUILD_BUG_ON(FAN_ACCESS != FS_ACCESS); 506 BUILD_BUG_ON(FAN_MODIFY != FS_MODIFY); 507 BUILD_BUG_ON(FAN_ATTRIB != FS_ATTRIB); 508 BUILD_BUG_ON(FAN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE); 509 BUILD_BUG_ON(FAN_CLOSE_WRITE != FS_CLOSE_WRITE); 510 BUILD_BUG_ON(FAN_OPEN != FS_OPEN); 511 BUILD_BUG_ON(FAN_MOVED_TO != FS_MOVED_TO); 512 BUILD_BUG_ON(FAN_MOVED_FROM != FS_MOVED_FROM); 513 BUILD_BUG_ON(FAN_CREATE != FS_CREATE); 514 BUILD_BUG_ON(FAN_DELETE != FS_DELETE); 515 BUILD_BUG_ON(FAN_DIR_MODIFY != FS_DIR_MODIFY); 516 BUILD_BUG_ON(FAN_DELETE_SELF != FS_DELETE_SELF); 517 BUILD_BUG_ON(FAN_MOVE_SELF != FS_MOVE_SELF); 518 BUILD_BUG_ON(FAN_EVENT_ON_CHILD != FS_EVENT_ON_CHILD); 519 BUILD_BUG_ON(FAN_Q_OVERFLOW != FS_Q_OVERFLOW); 520 BUILD_BUG_ON(FAN_OPEN_PERM != FS_OPEN_PERM); 521 BUILD_BUG_ON(FAN_ACCESS_PERM != FS_ACCESS_PERM); 522 BUILD_BUG_ON(FAN_ONDIR != FS_ISDIR); 523 BUILD_BUG_ON(FAN_OPEN_EXEC != FS_OPEN_EXEC); 524 BUILD_BUG_ON(FAN_OPEN_EXEC_PERM != FS_OPEN_EXEC_PERM); 525 526 BUILD_BUG_ON(HWEIGHT32(ALL_FANOTIFY_EVENT_BITS) != 19); 527 528 mask = fanotify_group_event_mask(group, iter_info, mask, data, 529 data_type); 530 if (!mask) 531 return 0; 532 533 pr_debug("%s: group=%p inode=%p mask=%x\n", __func__, group, inode, 534 mask); 535 536 if (fanotify_is_perm_event(mask)) { 537 /* 538 * fsnotify_prepare_user_wait() fails if we race with mark 539 * deletion. Just let the operation pass in that case. 540 */ 541 if (!fsnotify_prepare_user_wait(iter_info)) 542 return 0; 543 } 544 545 if (FAN_GROUP_FLAG(group, FAN_REPORT_FID)) { 546 fsid = fanotify_get_fsid(iter_info); 547 /* Racing with mark destruction or creation? */ 548 if (!fsid.val[0] && !fsid.val[1]) 549 return 0; 550 } 551 552 event = fanotify_alloc_event(group, inode, mask, data, data_type, 553 file_name, &fsid); 554 ret = -ENOMEM; 555 if (unlikely(!event)) { 556 /* 557 * We don't queue overflow events for permission events as 558 * there the access is denied and so no event is in fact lost. 559 */ 560 if (!fanotify_is_perm_event(mask)) 561 fsnotify_queue_overflow(group); 562 goto finish; 563 } 564 565 fsn_event = &event->fse; 566 ret = fsnotify_add_event(group, fsn_event, fanotify_merge); 567 if (ret) { 568 /* Permission events shouldn't be merged */ 569 BUG_ON(ret == 1 && mask & FANOTIFY_PERM_EVENTS); 570 /* Our event wasn't used in the end. Free it. */ 571 fsnotify_destroy_event(group, fsn_event); 572 573 ret = 0; 574 } else if (fanotify_is_perm_event(mask)) { 575 ret = fanotify_get_response(group, FANOTIFY_PERM(event), 576 iter_info); 577 } 578 finish: 579 if (fanotify_is_perm_event(mask)) 580 fsnotify_finish_user_wait(iter_info); 581 582 return ret; 583 } 584 585 static void fanotify_free_group_priv(struct fsnotify_group *group) 586 { 587 struct user_struct *user; 588 589 user = group->fanotify_data.user; 590 atomic_dec(&user->fanotify_listeners); 591 free_uid(user); 592 } 593 594 static void fanotify_free_path_event(struct fanotify_event *event) 595 { 596 path_put(fanotify_event_path(event)); 597 kmem_cache_free(fanotify_path_event_cachep, FANOTIFY_PE(event)); 598 } 599 600 static void fanotify_free_perm_event(struct fanotify_event *event) 601 { 602 path_put(fanotify_event_path(event)); 603 kmem_cache_free(fanotify_perm_event_cachep, FANOTIFY_PERM(event)); 604 } 605 606 static void fanotify_free_fid_event(struct fanotify_event *event) 607 { 608 struct fanotify_fid_event *ffe = FANOTIFY_FE(event); 609 610 if (fanotify_fh_has_ext_buf(&ffe->object_fh)) 611 kfree(fanotify_fh_ext_buf(&ffe->object_fh)); 612 kmem_cache_free(fanotify_fid_event_cachep, ffe); 613 } 614 615 static void fanotify_free_name_event(struct fanotify_event *event) 616 { 617 struct fanotify_name_event *fne = FANOTIFY_NE(event); 618 619 if (fanotify_fh_has_ext_buf(&fne->dir_fh)) 620 kfree(fanotify_fh_ext_buf(&fne->dir_fh)); 621 kfree(fne); 622 } 623 624 static void fanotify_free_event(struct fsnotify_event *fsn_event) 625 { 626 struct fanotify_event *event; 627 628 event = FANOTIFY_E(fsn_event); 629 put_pid(event->pid); 630 switch (event->type) { 631 case FANOTIFY_EVENT_TYPE_PATH: 632 fanotify_free_path_event(event); 633 break; 634 case FANOTIFY_EVENT_TYPE_PATH_PERM: 635 fanotify_free_perm_event(event); 636 break; 637 case FANOTIFY_EVENT_TYPE_FID: 638 fanotify_free_fid_event(event); 639 break; 640 case FANOTIFY_EVENT_TYPE_FID_NAME: 641 fanotify_free_name_event(event); 642 break; 643 default: 644 WARN_ON_ONCE(1); 645 } 646 } 647 648 static void fanotify_free_mark(struct fsnotify_mark *fsn_mark) 649 { 650 kmem_cache_free(fanotify_mark_cache, fsn_mark); 651 } 652 653 const struct fsnotify_ops fanotify_fsnotify_ops = { 654 .handle_event = fanotify_handle_event, 655 .free_group_priv = fanotify_free_group_priv, 656 .free_event = fanotify_free_event, 657 .free_mark = fanotify_free_mark, 658 }; 659