xref: /openbmc/linux/include/linux/mmap_lock.h (revision 479965a2)
1 #ifndef _LINUX_MMAP_LOCK_H
2 #define _LINUX_MMAP_LOCK_H
3 
4 #include <linux/lockdep.h>
5 #include <linux/mm_types.h>
6 #include <linux/mmdebug.h>
7 #include <linux/rwsem.h>
8 #include <linux/tracepoint-defs.h>
9 #include <linux/types.h>
10 
11 #define MMAP_LOCK_INITIALIZER(name) \
12 	.mmap_lock = __RWSEM_INITIALIZER((name).mmap_lock),
13 
14 DECLARE_TRACEPOINT(mmap_lock_start_locking);
15 DECLARE_TRACEPOINT(mmap_lock_acquire_returned);
16 DECLARE_TRACEPOINT(mmap_lock_released);
17 
18 #ifdef CONFIG_TRACING
19 
20 void __mmap_lock_do_trace_start_locking(struct mm_struct *mm, bool write);
21 void __mmap_lock_do_trace_acquire_returned(struct mm_struct *mm, bool write,
22 					   bool success);
23 void __mmap_lock_do_trace_released(struct mm_struct *mm, bool write);
24 
25 static inline void __mmap_lock_trace_start_locking(struct mm_struct *mm,
26 						   bool write)
27 {
28 	if (tracepoint_enabled(mmap_lock_start_locking))
29 		__mmap_lock_do_trace_start_locking(mm, write);
30 }
31 
32 static inline void __mmap_lock_trace_acquire_returned(struct mm_struct *mm,
33 						      bool write, bool success)
34 {
35 	if (tracepoint_enabled(mmap_lock_acquire_returned))
36 		__mmap_lock_do_trace_acquire_returned(mm, write, success);
37 }
38 
39 static inline void __mmap_lock_trace_released(struct mm_struct *mm, bool write)
40 {
41 	if (tracepoint_enabled(mmap_lock_released))
42 		__mmap_lock_do_trace_released(mm, write);
43 }
44 
45 #else /* !CONFIG_TRACING */
46 
47 static inline void __mmap_lock_trace_start_locking(struct mm_struct *mm,
48 						   bool write)
49 {
50 }
51 
52 static inline void __mmap_lock_trace_acquire_returned(struct mm_struct *mm,
53 						      bool write, bool success)
54 {
55 }
56 
57 static inline void __mmap_lock_trace_released(struct mm_struct *mm, bool write)
58 {
59 }
60 
61 #endif /* CONFIG_TRACING */
62 
63 static inline void mmap_assert_locked(struct mm_struct *mm)
64 {
65 	lockdep_assert_held(&mm->mmap_lock);
66 	VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm);
67 }
68 
69 static inline void mmap_assert_write_locked(struct mm_struct *mm)
70 {
71 	lockdep_assert_held_write(&mm->mmap_lock);
72 	VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm);
73 }
74 
75 #ifdef CONFIG_PER_VMA_LOCK
76 /*
77  * Drop all currently-held per-VMA locks.
78  * This is called from the mmap_lock implementation directly before releasing
79  * a write-locked mmap_lock (or downgrading it to read-locked).
80  * This should normally NOT be called manually from other places.
81  * If you want to call this manually anyway, keep in mind that this will release
82  * *all* VMA write locks, including ones from further up the stack.
83  */
84 static inline void vma_end_write_all(struct mm_struct *mm)
85 {
86 	mmap_assert_write_locked(mm);
87 	/*
88 	 * Nobody can concurrently modify mm->mm_lock_seq due to exclusive
89 	 * mmap_lock being held.
90 	 * We need RELEASE semantics here to ensure that preceding stores into
91 	 * the VMA take effect before we unlock it with this store.
92 	 * Pairs with ACQUIRE semantics in vma_start_read().
93 	 */
94 	smp_store_release(&mm->mm_lock_seq, mm->mm_lock_seq + 1);
95 }
96 #else
97 static inline void vma_end_write_all(struct mm_struct *mm) {}
98 #endif
99 
100 static inline void mmap_init_lock(struct mm_struct *mm)
101 {
102 	init_rwsem(&mm->mmap_lock);
103 }
104 
105 static inline void mmap_write_lock(struct mm_struct *mm)
106 {
107 	__mmap_lock_trace_start_locking(mm, true);
108 	down_write(&mm->mmap_lock);
109 	__mmap_lock_trace_acquire_returned(mm, true, true);
110 }
111 
112 static inline void mmap_write_lock_nested(struct mm_struct *mm, int subclass)
113 {
114 	__mmap_lock_trace_start_locking(mm, true);
115 	down_write_nested(&mm->mmap_lock, subclass);
116 	__mmap_lock_trace_acquire_returned(mm, true, true);
117 }
118 
119 static inline int mmap_write_lock_killable(struct mm_struct *mm)
120 {
121 	int ret;
122 
123 	__mmap_lock_trace_start_locking(mm, true);
124 	ret = down_write_killable(&mm->mmap_lock);
125 	__mmap_lock_trace_acquire_returned(mm, true, ret == 0);
126 	return ret;
127 }
128 
129 static inline void mmap_write_unlock(struct mm_struct *mm)
130 {
131 	__mmap_lock_trace_released(mm, true);
132 	vma_end_write_all(mm);
133 	up_write(&mm->mmap_lock);
134 }
135 
136 static inline void mmap_write_downgrade(struct mm_struct *mm)
137 {
138 	__mmap_lock_trace_acquire_returned(mm, false, true);
139 	vma_end_write_all(mm);
140 	downgrade_write(&mm->mmap_lock);
141 }
142 
143 static inline void mmap_read_lock(struct mm_struct *mm)
144 {
145 	__mmap_lock_trace_start_locking(mm, false);
146 	down_read(&mm->mmap_lock);
147 	__mmap_lock_trace_acquire_returned(mm, false, true);
148 }
149 
150 static inline int mmap_read_lock_killable(struct mm_struct *mm)
151 {
152 	int ret;
153 
154 	__mmap_lock_trace_start_locking(mm, false);
155 	ret = down_read_killable(&mm->mmap_lock);
156 	__mmap_lock_trace_acquire_returned(mm, false, ret == 0);
157 	return ret;
158 }
159 
160 static inline bool mmap_read_trylock(struct mm_struct *mm)
161 {
162 	bool ret;
163 
164 	__mmap_lock_trace_start_locking(mm, false);
165 	ret = down_read_trylock(&mm->mmap_lock) != 0;
166 	__mmap_lock_trace_acquire_returned(mm, false, ret);
167 	return ret;
168 }
169 
170 static inline void mmap_read_unlock(struct mm_struct *mm)
171 {
172 	__mmap_lock_trace_released(mm, false);
173 	up_read(&mm->mmap_lock);
174 }
175 
176 static inline void mmap_read_unlock_non_owner(struct mm_struct *mm)
177 {
178 	__mmap_lock_trace_released(mm, false);
179 	up_read_non_owner(&mm->mmap_lock);
180 }
181 
182 static inline int mmap_lock_is_contended(struct mm_struct *mm)
183 {
184 	return rwsem_is_contended(&mm->mmap_lock);
185 }
186 
187 #endif /* _LINUX_MMAP_LOCK_H */
188