xref: /openbmc/linux/mm/mlock.c (revision 64c70b1c)
1 /*
2  *	linux/mm/mlock.c
3  *
4  *  (C) Copyright 1995 Linus Torvalds
5  *  (C) Copyright 2002 Christoph Hellwig
6  */
7 
8 #include <linux/capability.h>
9 #include <linux/mman.h>
10 #include <linux/mm.h>
11 #include <linux/mempolicy.h>
12 #include <linux/syscalls.h>
13 #include <linux/sched.h>
14 #include <linux/module.h>
15 
16 int can_do_mlock(void)
17 {
18 	if (capable(CAP_IPC_LOCK))
19 		return 1;
20 	if (current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur != 0)
21 		return 1;
22 	return 0;
23 }
24 EXPORT_SYMBOL(can_do_mlock);
25 
26 static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
27 	unsigned long start, unsigned long end, unsigned int newflags)
28 {
29 	struct mm_struct * mm = vma->vm_mm;
30 	pgoff_t pgoff;
31 	int pages;
32 	int ret = 0;
33 
34 	if (newflags == vma->vm_flags) {
35 		*prev = vma;
36 		goto out;
37 	}
38 
39 	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
40 	*prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma,
41 			  vma->vm_file, pgoff, vma_policy(vma));
42 	if (*prev) {
43 		vma = *prev;
44 		goto success;
45 	}
46 
47 	*prev = vma;
48 
49 	if (start != vma->vm_start) {
50 		ret = split_vma(mm, vma, start, 1);
51 		if (ret)
52 			goto out;
53 	}
54 
55 	if (end != vma->vm_end) {
56 		ret = split_vma(mm, vma, end, 0);
57 		if (ret)
58 			goto out;
59 	}
60 
61 success:
62 	/*
63 	 * vm_flags is protected by the mmap_sem held in write mode.
64 	 * It's okay if try_to_unmap_one unmaps a page just after we
65 	 * set VM_LOCKED, make_pages_present below will bring it back.
66 	 */
67 	vma->vm_flags = newflags;
68 
69 	/*
70 	 * Keep track of amount of locked VM.
71 	 */
72 	pages = (end - start) >> PAGE_SHIFT;
73 	if (newflags & VM_LOCKED) {
74 		pages = -pages;
75 		if (!(newflags & VM_IO))
76 			ret = make_pages_present(start, end);
77 	}
78 
79 	mm->locked_vm -= pages;
80 out:
81 	if (ret == -ENOMEM)
82 		ret = -EAGAIN;
83 	return ret;
84 }
85 
86 static int do_mlock(unsigned long start, size_t len, int on)
87 {
88 	unsigned long nstart, end, tmp;
89 	struct vm_area_struct * vma, * prev;
90 	int error;
91 
92 	len = PAGE_ALIGN(len);
93 	end = start + len;
94 	if (end < start)
95 		return -EINVAL;
96 	if (end == start)
97 		return 0;
98 	vma = find_vma_prev(current->mm, start, &prev);
99 	if (!vma || vma->vm_start > start)
100 		return -ENOMEM;
101 
102 	if (start > vma->vm_start)
103 		prev = vma;
104 
105 	for (nstart = start ; ; ) {
106 		unsigned int newflags;
107 
108 		/* Here we know that  vma->vm_start <= nstart < vma->vm_end. */
109 
110 		newflags = vma->vm_flags | VM_LOCKED;
111 		if (!on)
112 			newflags &= ~VM_LOCKED;
113 
114 		tmp = vma->vm_end;
115 		if (tmp > end)
116 			tmp = end;
117 		error = mlock_fixup(vma, &prev, nstart, tmp, newflags);
118 		if (error)
119 			break;
120 		nstart = tmp;
121 		if (nstart < prev->vm_end)
122 			nstart = prev->vm_end;
123 		if (nstart >= end)
124 			break;
125 
126 		vma = prev->vm_next;
127 		if (!vma || vma->vm_start != nstart) {
128 			error = -ENOMEM;
129 			break;
130 		}
131 	}
132 	return error;
133 }
134 
135 asmlinkage long sys_mlock(unsigned long start, size_t len)
136 {
137 	unsigned long locked;
138 	unsigned long lock_limit;
139 	int error = -ENOMEM;
140 
141 	if (!can_do_mlock())
142 		return -EPERM;
143 
144 	down_write(&current->mm->mmap_sem);
145 	len = PAGE_ALIGN(len + (start & ~PAGE_MASK));
146 	start &= PAGE_MASK;
147 
148 	locked = len >> PAGE_SHIFT;
149 	locked += current->mm->locked_vm;
150 
151 	lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
152 	lock_limit >>= PAGE_SHIFT;
153 
154 	/* check against resource limits */
155 	if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
156 		error = do_mlock(start, len, 1);
157 	up_write(&current->mm->mmap_sem);
158 	return error;
159 }
160 
161 asmlinkage long sys_munlock(unsigned long start, size_t len)
162 {
163 	int ret;
164 
165 	down_write(&current->mm->mmap_sem);
166 	len = PAGE_ALIGN(len + (start & ~PAGE_MASK));
167 	start &= PAGE_MASK;
168 	ret = do_mlock(start, len, 0);
169 	up_write(&current->mm->mmap_sem);
170 	return ret;
171 }
172 
173 static int do_mlockall(int flags)
174 {
175 	struct vm_area_struct * vma, * prev = NULL;
176 	unsigned int def_flags = 0;
177 
178 	if (flags & MCL_FUTURE)
179 		def_flags = VM_LOCKED;
180 	current->mm->def_flags = def_flags;
181 	if (flags == MCL_FUTURE)
182 		goto out;
183 
184 	for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
185 		unsigned int newflags;
186 
187 		newflags = vma->vm_flags | VM_LOCKED;
188 		if (!(flags & MCL_CURRENT))
189 			newflags &= ~VM_LOCKED;
190 
191 		/* Ignore errors */
192 		mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags);
193 	}
194 out:
195 	return 0;
196 }
197 
198 asmlinkage long sys_mlockall(int flags)
199 {
200 	unsigned long lock_limit;
201 	int ret = -EINVAL;
202 
203 	if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE)))
204 		goto out;
205 
206 	ret = -EPERM;
207 	if (!can_do_mlock())
208 		goto out;
209 
210 	down_write(&current->mm->mmap_sem);
211 
212 	lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
213 	lock_limit >>= PAGE_SHIFT;
214 
215 	ret = -ENOMEM;
216 	if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
217 	    capable(CAP_IPC_LOCK))
218 		ret = do_mlockall(flags);
219 	up_write(&current->mm->mmap_sem);
220 out:
221 	return ret;
222 }
223 
224 asmlinkage long sys_munlockall(void)
225 {
226 	int ret;
227 
228 	down_write(&current->mm->mmap_sem);
229 	ret = do_mlockall(0);
230 	up_write(&current->mm->mmap_sem);
231 	return ret;
232 }
233 
234 /*
235  * Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB
236  * shm segments) get accounted against the user_struct instead.
237  */
238 static DEFINE_SPINLOCK(shmlock_user_lock);
239 
240 int user_shm_lock(size_t size, struct user_struct *user)
241 {
242 	unsigned long lock_limit, locked;
243 	int allowed = 0;
244 
245 	locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
246 	lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
247 	lock_limit >>= PAGE_SHIFT;
248 	spin_lock(&shmlock_user_lock);
249 	if (locked + user->locked_shm > lock_limit && !capable(CAP_IPC_LOCK))
250 		goto out;
251 	get_uid(user);
252 	user->locked_shm += locked;
253 	allowed = 1;
254 out:
255 	spin_unlock(&shmlock_user_lock);
256 	return allowed;
257 }
258 
259 void user_shm_unlock(size_t size, struct user_struct *user)
260 {
261 	spin_lock(&shmlock_user_lock);
262 	user->locked_shm -= (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
263 	spin_unlock(&shmlock_user_lock);
264 	free_uid(user);
265 }
266