xref: /openbmc/linux/mm/mlock.c (revision 6ab3d562)
1 /*
2  *	linux/mm/mlock.c
3  *
4  *  (C) Copyright 1995 Linus Torvalds
5  *  (C) Copyright 2002 Christoph Hellwig
6  */
7 
8 #include <linux/capability.h>
9 #include <linux/mman.h>
10 #include <linux/mm.h>
11 #include <linux/mempolicy.h>
12 #include <linux/syscalls.h>
13 
14 
15 static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
16 	unsigned long start, unsigned long end, unsigned int newflags)
17 {
18 	struct mm_struct * mm = vma->vm_mm;
19 	pgoff_t pgoff;
20 	int pages;
21 	int ret = 0;
22 
23 	if (newflags == vma->vm_flags) {
24 		*prev = vma;
25 		goto out;
26 	}
27 
28 	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
29 	*prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma,
30 			  vma->vm_file, pgoff, vma_policy(vma));
31 	if (*prev) {
32 		vma = *prev;
33 		goto success;
34 	}
35 
36 	*prev = vma;
37 
38 	if (start != vma->vm_start) {
39 		ret = split_vma(mm, vma, start, 1);
40 		if (ret)
41 			goto out;
42 	}
43 
44 	if (end != vma->vm_end) {
45 		ret = split_vma(mm, vma, end, 0);
46 		if (ret)
47 			goto out;
48 	}
49 
50 success:
51 	/*
52 	 * vm_flags is protected by the mmap_sem held in write mode.
53 	 * It's okay if try_to_unmap_one unmaps a page just after we
54 	 * set VM_LOCKED, make_pages_present below will bring it back.
55 	 */
56 	vma->vm_flags = newflags;
57 
58 	/*
59 	 * Keep track of amount of locked VM.
60 	 */
61 	pages = (end - start) >> PAGE_SHIFT;
62 	if (newflags & VM_LOCKED) {
63 		pages = -pages;
64 		if (!(newflags & VM_IO))
65 			ret = make_pages_present(start, end);
66 	}
67 
68 	vma->vm_mm->locked_vm -= pages;
69 out:
70 	if (ret == -ENOMEM)
71 		ret = -EAGAIN;
72 	return ret;
73 }
74 
75 static int do_mlock(unsigned long start, size_t len, int on)
76 {
77 	unsigned long nstart, end, tmp;
78 	struct vm_area_struct * vma, * prev;
79 	int error;
80 
81 	len = PAGE_ALIGN(len);
82 	end = start + len;
83 	if (end < start)
84 		return -EINVAL;
85 	if (end == start)
86 		return 0;
87 	vma = find_vma_prev(current->mm, start, &prev);
88 	if (!vma || vma->vm_start > start)
89 		return -ENOMEM;
90 
91 	if (start > vma->vm_start)
92 		prev = vma;
93 
94 	for (nstart = start ; ; ) {
95 		unsigned int newflags;
96 
97 		/* Here we know that  vma->vm_start <= nstart < vma->vm_end. */
98 
99 		newflags = vma->vm_flags | VM_LOCKED;
100 		if (!on)
101 			newflags &= ~VM_LOCKED;
102 
103 		tmp = vma->vm_end;
104 		if (tmp > end)
105 			tmp = end;
106 		error = mlock_fixup(vma, &prev, nstart, tmp, newflags);
107 		if (error)
108 			break;
109 		nstart = tmp;
110 		if (nstart < prev->vm_end)
111 			nstart = prev->vm_end;
112 		if (nstart >= end)
113 			break;
114 
115 		vma = prev->vm_next;
116 		if (!vma || vma->vm_start != nstart) {
117 			error = -ENOMEM;
118 			break;
119 		}
120 	}
121 	return error;
122 }
123 
124 asmlinkage long sys_mlock(unsigned long start, size_t len)
125 {
126 	unsigned long locked;
127 	unsigned long lock_limit;
128 	int error = -ENOMEM;
129 
130 	if (!can_do_mlock())
131 		return -EPERM;
132 
133 	down_write(&current->mm->mmap_sem);
134 	len = PAGE_ALIGN(len + (start & ~PAGE_MASK));
135 	start &= PAGE_MASK;
136 
137 	locked = len >> PAGE_SHIFT;
138 	locked += current->mm->locked_vm;
139 
140 	lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
141 	lock_limit >>= PAGE_SHIFT;
142 
143 	/* check against resource limits */
144 	if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
145 		error = do_mlock(start, len, 1);
146 	up_write(&current->mm->mmap_sem);
147 	return error;
148 }
149 
150 asmlinkage long sys_munlock(unsigned long start, size_t len)
151 {
152 	int ret;
153 
154 	down_write(&current->mm->mmap_sem);
155 	len = PAGE_ALIGN(len + (start & ~PAGE_MASK));
156 	start &= PAGE_MASK;
157 	ret = do_mlock(start, len, 0);
158 	up_write(&current->mm->mmap_sem);
159 	return ret;
160 }
161 
162 static int do_mlockall(int flags)
163 {
164 	struct vm_area_struct * vma, * prev = NULL;
165 	unsigned int def_flags = 0;
166 
167 	if (flags & MCL_FUTURE)
168 		def_flags = VM_LOCKED;
169 	current->mm->def_flags = def_flags;
170 	if (flags == MCL_FUTURE)
171 		goto out;
172 
173 	for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
174 		unsigned int newflags;
175 
176 		newflags = vma->vm_flags | VM_LOCKED;
177 		if (!(flags & MCL_CURRENT))
178 			newflags &= ~VM_LOCKED;
179 
180 		/* Ignore errors */
181 		mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags);
182 	}
183 out:
184 	return 0;
185 }
186 
187 asmlinkage long sys_mlockall(int flags)
188 {
189 	unsigned long lock_limit;
190 	int ret = -EINVAL;
191 
192 	if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE)))
193 		goto out;
194 
195 	ret = -EPERM;
196 	if (!can_do_mlock())
197 		goto out;
198 
199 	down_write(&current->mm->mmap_sem);
200 
201 	lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
202 	lock_limit >>= PAGE_SHIFT;
203 
204 	ret = -ENOMEM;
205 	if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
206 	    capable(CAP_IPC_LOCK))
207 		ret = do_mlockall(flags);
208 	up_write(&current->mm->mmap_sem);
209 out:
210 	return ret;
211 }
212 
213 asmlinkage long sys_munlockall(void)
214 {
215 	int ret;
216 
217 	down_write(&current->mm->mmap_sem);
218 	ret = do_mlockall(0);
219 	up_write(&current->mm->mmap_sem);
220 	return ret;
221 }
222 
223 /*
224  * Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB
225  * shm segments) get accounted against the user_struct instead.
226  */
227 static DEFINE_SPINLOCK(shmlock_user_lock);
228 
229 int user_shm_lock(size_t size, struct user_struct *user)
230 {
231 	unsigned long lock_limit, locked;
232 	int allowed = 0;
233 
234 	locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
235 	lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
236 	lock_limit >>= PAGE_SHIFT;
237 	spin_lock(&shmlock_user_lock);
238 	if (locked + user->locked_shm > lock_limit && !capable(CAP_IPC_LOCK))
239 		goto out;
240 	get_uid(user);
241 	user->locked_shm += locked;
242 	allowed = 1;
243 out:
244 	spin_unlock(&shmlock_user_lock);
245 	return allowed;
246 }
247 
248 void user_shm_unlock(size_t size, struct user_struct *user)
249 {
250 	spin_lock(&shmlock_user_lock);
251 	user->locked_shm -= (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
252 	spin_unlock(&shmlock_user_lock);
253 	free_uid(user);
254 }
255