xref: /openbmc/linux/fs/notify/group.c (revision e1cd7b80)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com>
4  */
5 
6 #include <linux/list.h>
7 #include <linux/mutex.h>
8 #include <linux/slab.h>
9 #include <linux/srcu.h>
10 #include <linux/rculist.h>
11 #include <linux/wait.h>
12 #include <linux/memcontrol.h>
13 
14 #include <linux/fsnotify_backend.h>
15 #include "fsnotify.h"
16 
17 #include <linux/atomic.h>
18 
19 /*
20  * Final freeing of a group
21  */
22 static void fsnotify_final_destroy_group(struct fsnotify_group *group)
23 {
24 	if (group->ops->free_group_priv)
25 		group->ops->free_group_priv(group);
26 
27 	mem_cgroup_put(group->memcg);
28 
29 	kfree(group);
30 }
31 
32 /*
33  * Stop queueing new events for this group. Once this function returns
34  * fsnotify_add_event() will not add any new events to the group's queue.
35  */
36 void fsnotify_group_stop_queueing(struct fsnotify_group *group)
37 {
38 	spin_lock(&group->notification_lock);
39 	group->shutdown = true;
40 	spin_unlock(&group->notification_lock);
41 }
42 
43 /*
44  * Trying to get rid of a group. Remove all marks, flush all events and release
45  * the group reference.
46  * Note that another thread calling fsnotify_clear_marks_by_group() may still
47  * hold a ref to the group.
48  */
49 void fsnotify_destroy_group(struct fsnotify_group *group)
50 {
51 	/*
52 	 * Stop queueing new events. The code below is careful enough to not
53 	 * require this but fanotify needs to stop queuing events even before
54 	 * fsnotify_destroy_group() is called and this makes the other callers
55 	 * of fsnotify_destroy_group() to see the same behavior.
56 	 */
57 	fsnotify_group_stop_queueing(group);
58 
59 	/* Clear all marks for this group and queue them for destruction */
60 	fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_ALL_TYPES_MASK);
61 
62 	/*
63 	 * Some marks can still be pinned when waiting for response from
64 	 * userspace. Wait for those now. fsnotify_prepare_user_wait() will
65 	 * not succeed now so this wait is race-free.
66 	 */
67 	wait_event(group->notification_waitq, !atomic_read(&group->user_waits));
68 
69 	/*
70 	 * Wait until all marks get really destroyed. We could actually destroy
71 	 * them ourselves instead of waiting for worker to do it, however that
72 	 * would be racy as worker can already be processing some marks before
73 	 * we even entered fsnotify_destroy_group().
74 	 */
75 	fsnotify_wait_marks_destroyed();
76 
77 	/*
78 	 * Since we have waited for fsnotify_mark_srcu in
79 	 * fsnotify_mark_destroy_list() there can be no outstanding event
80 	 * notification against this group. So clearing the notification queue
81 	 * of all events is reliable now.
82 	 */
83 	fsnotify_flush_notify(group);
84 
85 	/*
86 	 * Destroy overflow event (we cannot use fsnotify_destroy_event() as
87 	 * that deliberately ignores overflow events.
88 	 */
89 	if (group->overflow_event)
90 		group->ops->free_event(group->overflow_event);
91 
92 	fsnotify_put_group(group);
93 }
94 
95 /*
96  * Get reference to a group.
97  */
98 void fsnotify_get_group(struct fsnotify_group *group)
99 {
100 	refcount_inc(&group->refcnt);
101 }
102 
103 /*
104  * Drop a reference to a group.  Free it if it's through.
105  */
106 void fsnotify_put_group(struct fsnotify_group *group)
107 {
108 	if (refcount_dec_and_test(&group->refcnt))
109 		fsnotify_final_destroy_group(group);
110 }
111 EXPORT_SYMBOL_GPL(fsnotify_put_group);
112 
113 /*
114  * Create a new fsnotify_group and hold a reference for the group returned.
115  */
116 struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *ops)
117 {
118 	struct fsnotify_group *group;
119 
120 	group = kzalloc(sizeof(struct fsnotify_group), GFP_KERNEL);
121 	if (!group)
122 		return ERR_PTR(-ENOMEM);
123 
124 	/* set to 0 when there a no external references to this group */
125 	refcount_set(&group->refcnt, 1);
126 	atomic_set(&group->num_marks, 0);
127 	atomic_set(&group->user_waits, 0);
128 
129 	spin_lock_init(&group->notification_lock);
130 	INIT_LIST_HEAD(&group->notification_list);
131 	init_waitqueue_head(&group->notification_waitq);
132 	group->max_events = UINT_MAX;
133 
134 	mutex_init(&group->mark_mutex);
135 	INIT_LIST_HEAD(&group->marks_list);
136 
137 	group->ops = ops;
138 
139 	return group;
140 }
141 EXPORT_SYMBOL_GPL(fsnotify_alloc_group);
142 
143 int fsnotify_fasync(int fd, struct file *file, int on)
144 {
145 	struct fsnotify_group *group = file->private_data;
146 
147 	return fasync_helper(fd, file, on, &group->fsn_fa) >= 0 ? 0 : -EIO;
148 }
149