xref: /openbmc/linux/ipc/shm.c (revision c997aabb)
1 /*
2  * linux/ipc/shm.c
3  * Copyright (C) 1992, 1993 Krishna Balasubramanian
4  *	 Many improvements/fixes by Bruno Haible.
5  * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
6  * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
7  *
8  * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
9  * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
10  * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
11  * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
12  * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
13  * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
14  * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
15  *
16  * support for audit of ipc object properties and permission changes
17  * Dustin Kirkland <dustin.kirkland@us.ibm.com>
18  *
19  * namespaces support
20  * OpenVZ, SWsoft Inc.
21  * Pavel Emelianov <xemul@openvz.org>
22  *
23  * Better ipc lock (kern_ipc_perm.lock) handling
24  * Davidlohr Bueso <davidlohr.bueso@hp.com>, June 2013.
25  */
26 
27 #include <linux/slab.h>
28 #include <linux/mm.h>
29 #include <linux/hugetlb.h>
30 #include <linux/shm.h>
31 #include <linux/init.h>
32 #include <linux/file.h>
33 #include <linux/mman.h>
34 #include <linux/shmem_fs.h>
35 #include <linux/security.h>
36 #include <linux/syscalls.h>
37 #include <linux/audit.h>
38 #include <linux/capability.h>
39 #include <linux/ptrace.h>
40 #include <linux/seq_file.h>
41 #include <linux/rwsem.h>
42 #include <linux/nsproxy.h>
43 #include <linux/mount.h>
44 #include <linux/ipc_namespace.h>
45 
46 #include <linux/uaccess.h>
47 
48 #include "util.h"
49 
50 struct shm_file_data {
51 	int id;
52 	struct ipc_namespace *ns;
53 	struct file *file;
54 	const struct vm_operations_struct *vm_ops;
55 };
56 
57 #define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
58 
59 static const struct file_operations shm_file_operations;
60 static const struct vm_operations_struct shm_vm_ops;
61 
62 #define shm_ids(ns)	((ns)->ids[IPC_SHM_IDS])
63 
64 #define shm_unlock(shp)			\
65 	ipc_unlock(&(shp)->shm_perm)
66 
67 static int newseg(struct ipc_namespace *, struct ipc_params *);
68 static void shm_open(struct vm_area_struct *vma);
69 static void shm_close(struct vm_area_struct *vma);
70 static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp);
71 #ifdef CONFIG_PROC_FS
72 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
73 #endif
74 
75 void shm_init_ns(struct ipc_namespace *ns)
76 {
77 	ns->shm_ctlmax = SHMMAX;
78 	ns->shm_ctlall = SHMALL;
79 	ns->shm_ctlmni = SHMMNI;
80 	ns->shm_rmid_forced = 0;
81 	ns->shm_tot = 0;
82 	ipc_init_ids(&shm_ids(ns));
83 }
84 
85 /*
86  * Called with shm_ids.rwsem (writer) and the shp structure locked.
87  * Only shm_ids.rwsem remains locked on exit.
88  */
89 static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
90 {
91 	struct shmid_kernel *shp;
92 
93 	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
94 
95 	if (shp->shm_nattch) {
96 		shp->shm_perm.mode |= SHM_DEST;
97 		/* Do not find it any more */
98 		shp->shm_perm.key = IPC_PRIVATE;
99 		shm_unlock(shp);
100 	} else
101 		shm_destroy(ns, shp);
102 }
103 
104 #ifdef CONFIG_IPC_NS
105 void shm_exit_ns(struct ipc_namespace *ns)
106 {
107 	free_ipcs(ns, &shm_ids(ns), do_shm_rmid);
108 	idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr);
109 }
110 #endif
111 
112 static int __init ipc_ns_init(void)
113 {
114 	shm_init_ns(&init_ipc_ns);
115 	return 0;
116 }
117 
118 pure_initcall(ipc_ns_init);
119 
120 void __init shm_init(void)
121 {
122 	ipc_init_proc_interface("sysvipc/shm",
123 #if BITS_PER_LONG <= 32
124 				"       key      shmid perms       size  cpid  lpid nattch   uid   gid  cuid  cgid      atime      dtime      ctime        rss       swap\n",
125 #else
126 				"       key      shmid perms                  size  cpid  lpid nattch   uid   gid  cuid  cgid      atime      dtime      ctime                   rss                  swap\n",
127 #endif
128 				IPC_SHM_IDS, sysvipc_shm_proc_show);
129 }
130 
131 static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id)
132 {
133 	struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&shm_ids(ns), id);
134 
135 	if (IS_ERR(ipcp))
136 		return ERR_CAST(ipcp);
137 
138 	return container_of(ipcp, struct shmid_kernel, shm_perm);
139 }
140 
141 static inline struct shmid_kernel *shm_obtain_object_check(struct ipc_namespace *ns, int id)
142 {
143 	struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&shm_ids(ns), id);
144 
145 	if (IS_ERR(ipcp))
146 		return ERR_CAST(ipcp);
147 
148 	return container_of(ipcp, struct shmid_kernel, shm_perm);
149 }
150 
151 /*
152  * shm_lock_(check_) routines are called in the paths where the rwsem
153  * is not necessarily held.
154  */
155 static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
156 {
157 	struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
158 
159 	/*
160 	 * Callers of shm_lock() must validate the status of the returned ipc
161 	 * object pointer (as returned by ipc_lock()), and error out as
162 	 * appropriate.
163 	 */
164 	if (IS_ERR(ipcp))
165 		return (void *)ipcp;
166 	return container_of(ipcp, struct shmid_kernel, shm_perm);
167 }
168 
169 static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
170 {
171 	rcu_read_lock();
172 	ipc_lock_object(&ipcp->shm_perm);
173 }
174 
175 static void shm_rcu_free(struct rcu_head *head)
176 {
177 	struct kern_ipc_perm *ptr = container_of(head, struct kern_ipc_perm,
178 							rcu);
179 	struct shmid_kernel *shp = container_of(ptr, struct shmid_kernel,
180 							shm_perm);
181 	security_shm_free(shp);
182 	kvfree(shp);
183 }
184 
185 static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
186 {
187 	list_del(&s->shm_clist);
188 	ipc_rmid(&shm_ids(ns), &s->shm_perm);
189 }
190 
191 
192 static int __shm_open(struct vm_area_struct *vma)
193 {
194 	struct file *file = vma->vm_file;
195 	struct shm_file_data *sfd = shm_file_data(file);
196 	struct shmid_kernel *shp;
197 
198 	shp = shm_lock(sfd->ns, sfd->id);
199 
200 	if (IS_ERR(shp))
201 		return PTR_ERR(shp);
202 
203 	shp->shm_atim = get_seconds();
204 	shp->shm_lprid = task_tgid_vnr(current);
205 	shp->shm_nattch++;
206 	shm_unlock(shp);
207 	return 0;
208 }
209 
210 /* This is called by fork, once for every shm attach. */
211 static void shm_open(struct vm_area_struct *vma)
212 {
213 	int err = __shm_open(vma);
214 	/*
215 	 * We raced in the idr lookup or with shm_destroy().
216 	 * Either way, the ID is busted.
217 	 */
218 	WARN_ON_ONCE(err);
219 }
220 
221 /*
222  * shm_destroy - free the struct shmid_kernel
223  *
224  * @ns: namespace
225  * @shp: struct to free
226  *
227  * It has to be called with shp and shm_ids.rwsem (writer) locked,
228  * but returns with shp unlocked and freed.
229  */
230 static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
231 {
232 	struct file *shm_file;
233 
234 	shm_file = shp->shm_file;
235 	shp->shm_file = NULL;
236 	ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
237 	shm_rmid(ns, shp);
238 	shm_unlock(shp);
239 	if (!is_file_hugepages(shm_file))
240 		shmem_lock(shm_file, 0, shp->mlock_user);
241 	else if (shp->mlock_user)
242 		user_shm_unlock(i_size_read(file_inode(shm_file)),
243 				shp->mlock_user);
244 	fput(shm_file);
245 	ipc_rcu_putref(&shp->shm_perm, shm_rcu_free);
246 }
247 
248 /*
249  * shm_may_destroy - identifies whether shm segment should be destroyed now
250  *
251  * Returns true if and only if there are no active users of the segment and
252  * one of the following is true:
253  *
254  * 1) shmctl(id, IPC_RMID, NULL) was called for this shp
255  *
256  * 2) sysctl kernel.shm_rmid_forced is set to 1.
257  */
258 static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
259 {
260 	return (shp->shm_nattch == 0) &&
261 	       (ns->shm_rmid_forced ||
262 		(shp->shm_perm.mode & SHM_DEST));
263 }
264 
265 /*
266  * remove the attach descriptor vma.
267  * free memory for segment if it is marked destroyed.
268  * The descriptor has already been removed from the current->mm->mmap list
269  * and will later be kfree()d.
270  */
271 static void shm_close(struct vm_area_struct *vma)
272 {
273 	struct file *file = vma->vm_file;
274 	struct shm_file_data *sfd = shm_file_data(file);
275 	struct shmid_kernel *shp;
276 	struct ipc_namespace *ns = sfd->ns;
277 
278 	down_write(&shm_ids(ns).rwsem);
279 	/* remove from the list of attaches of the shm segment */
280 	shp = shm_lock(ns, sfd->id);
281 
282 	/*
283 	 * We raced in the idr lookup or with shm_destroy().
284 	 * Either way, the ID is busted.
285 	 */
286 	if (WARN_ON_ONCE(IS_ERR(shp)))
287 		goto done; /* no-op */
288 
289 	shp->shm_lprid = task_tgid_vnr(current);
290 	shp->shm_dtim = get_seconds();
291 	shp->shm_nattch--;
292 	if (shm_may_destroy(ns, shp))
293 		shm_destroy(ns, shp);
294 	else
295 		shm_unlock(shp);
296 done:
297 	up_write(&shm_ids(ns).rwsem);
298 }
299 
300 /* Called with ns->shm_ids(ns).rwsem locked */
301 static int shm_try_destroy_orphaned(int id, void *p, void *data)
302 {
303 	struct ipc_namespace *ns = data;
304 	struct kern_ipc_perm *ipcp = p;
305 	struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
306 
307 	/*
308 	 * We want to destroy segments without users and with already
309 	 * exit'ed originating process.
310 	 *
311 	 * As shp->* are changed under rwsem, it's safe to skip shp locking.
312 	 */
313 	if (shp->shm_creator != NULL)
314 		return 0;
315 
316 	if (shm_may_destroy(ns, shp)) {
317 		shm_lock_by_ptr(shp);
318 		shm_destroy(ns, shp);
319 	}
320 	return 0;
321 }
322 
323 void shm_destroy_orphaned(struct ipc_namespace *ns)
324 {
325 	down_write(&shm_ids(ns).rwsem);
326 	if (shm_ids(ns).in_use)
327 		idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns);
328 	up_write(&shm_ids(ns).rwsem);
329 }
330 
331 /* Locking assumes this will only be called with task == current */
332 void exit_shm(struct task_struct *task)
333 {
334 	struct ipc_namespace *ns = task->nsproxy->ipc_ns;
335 	struct shmid_kernel *shp, *n;
336 
337 	if (list_empty(&task->sysvshm.shm_clist))
338 		return;
339 
340 	/*
341 	 * If kernel.shm_rmid_forced is not set then only keep track of
342 	 * which shmids are orphaned, so that a later set of the sysctl
343 	 * can clean them up.
344 	 */
345 	if (!ns->shm_rmid_forced) {
346 		down_read(&shm_ids(ns).rwsem);
347 		list_for_each_entry(shp, &task->sysvshm.shm_clist, shm_clist)
348 			shp->shm_creator = NULL;
349 		/*
350 		 * Only under read lock but we are only called on current
351 		 * so no entry on the list will be shared.
352 		 */
353 		list_del(&task->sysvshm.shm_clist);
354 		up_read(&shm_ids(ns).rwsem);
355 		return;
356 	}
357 
358 	/*
359 	 * Destroy all already created segments, that were not yet mapped,
360 	 * and mark any mapped as orphan to cover the sysctl toggling.
361 	 * Destroy is skipped if shm_may_destroy() returns false.
362 	 */
363 	down_write(&shm_ids(ns).rwsem);
364 	list_for_each_entry_safe(shp, n, &task->sysvshm.shm_clist, shm_clist) {
365 		shp->shm_creator = NULL;
366 
367 		if (shm_may_destroy(ns, shp)) {
368 			shm_lock_by_ptr(shp);
369 			shm_destroy(ns, shp);
370 		}
371 	}
372 
373 	/* Remove the list head from any segments still attached. */
374 	list_del(&task->sysvshm.shm_clist);
375 	up_write(&shm_ids(ns).rwsem);
376 }
377 
378 static int shm_fault(struct vm_fault *vmf)
379 {
380 	struct file *file = vmf->vma->vm_file;
381 	struct shm_file_data *sfd = shm_file_data(file);
382 
383 	return sfd->vm_ops->fault(vmf);
384 }
385 
386 #ifdef CONFIG_NUMA
387 static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
388 {
389 	struct file *file = vma->vm_file;
390 	struct shm_file_data *sfd = shm_file_data(file);
391 	int err = 0;
392 
393 	if (sfd->vm_ops->set_policy)
394 		err = sfd->vm_ops->set_policy(vma, new);
395 	return err;
396 }
397 
398 static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
399 					unsigned long addr)
400 {
401 	struct file *file = vma->vm_file;
402 	struct shm_file_data *sfd = shm_file_data(file);
403 	struct mempolicy *pol = NULL;
404 
405 	if (sfd->vm_ops->get_policy)
406 		pol = sfd->vm_ops->get_policy(vma, addr);
407 	else if (vma->vm_policy)
408 		pol = vma->vm_policy;
409 
410 	return pol;
411 }
412 #endif
413 
414 static int shm_mmap(struct file *file, struct vm_area_struct *vma)
415 {
416 	struct shm_file_data *sfd = shm_file_data(file);
417 	int ret;
418 
419 	/*
420 	 * In case of remap_file_pages() emulation, the file can represent
421 	 * removed IPC ID: propogate shm_lock() error to caller.
422 	 */
423 	ret = __shm_open(vma);
424 	if (ret)
425 		return ret;
426 
427 	ret = call_mmap(sfd->file, vma);
428 	if (ret) {
429 		shm_close(vma);
430 		return ret;
431 	}
432 	sfd->vm_ops = vma->vm_ops;
433 #ifdef CONFIG_MMU
434 	WARN_ON(!sfd->vm_ops->fault);
435 #endif
436 	vma->vm_ops = &shm_vm_ops;
437 	return 0;
438 }
439 
440 static int shm_release(struct inode *ino, struct file *file)
441 {
442 	struct shm_file_data *sfd = shm_file_data(file);
443 
444 	put_ipc_ns(sfd->ns);
445 	shm_file_data(file) = NULL;
446 	kfree(sfd);
447 	return 0;
448 }
449 
450 static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync)
451 {
452 	struct shm_file_data *sfd = shm_file_data(file);
453 
454 	if (!sfd->file->f_op->fsync)
455 		return -EINVAL;
456 	return sfd->file->f_op->fsync(sfd->file, start, end, datasync);
457 }
458 
459 static long shm_fallocate(struct file *file, int mode, loff_t offset,
460 			  loff_t len)
461 {
462 	struct shm_file_data *sfd = shm_file_data(file);
463 
464 	if (!sfd->file->f_op->fallocate)
465 		return -EOPNOTSUPP;
466 	return sfd->file->f_op->fallocate(file, mode, offset, len);
467 }
468 
469 static unsigned long shm_get_unmapped_area(struct file *file,
470 	unsigned long addr, unsigned long len, unsigned long pgoff,
471 	unsigned long flags)
472 {
473 	struct shm_file_data *sfd = shm_file_data(file);
474 
475 	return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len,
476 						pgoff, flags);
477 }
478 
479 static const struct file_operations shm_file_operations = {
480 	.mmap		= shm_mmap,
481 	.fsync		= shm_fsync,
482 	.release	= shm_release,
483 	.get_unmapped_area	= shm_get_unmapped_area,
484 	.llseek		= noop_llseek,
485 	.fallocate	= shm_fallocate,
486 };
487 
488 /*
489  * shm_file_operations_huge is now identical to shm_file_operations,
490  * but we keep it distinct for the sake of is_file_shm_hugepages().
491  */
492 static const struct file_operations shm_file_operations_huge = {
493 	.mmap		= shm_mmap,
494 	.fsync		= shm_fsync,
495 	.release	= shm_release,
496 	.get_unmapped_area	= shm_get_unmapped_area,
497 	.llseek		= noop_llseek,
498 	.fallocate	= shm_fallocate,
499 };
500 
501 bool is_file_shm_hugepages(struct file *file)
502 {
503 	return file->f_op == &shm_file_operations_huge;
504 }
505 
506 static const struct vm_operations_struct shm_vm_ops = {
507 	.open	= shm_open,	/* callback for a new vm-area open */
508 	.close	= shm_close,	/* callback for when the vm-area is released */
509 	.fault	= shm_fault,
510 #if defined(CONFIG_NUMA)
511 	.set_policy = shm_set_policy,
512 	.get_policy = shm_get_policy,
513 #endif
514 };
515 
516 /**
517  * newseg - Create a new shared memory segment
518  * @ns: namespace
519  * @params: ptr to the structure that contains key, size and shmflg
520  *
521  * Called with shm_ids.rwsem held as a writer.
522  */
523 static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
524 {
525 	key_t key = params->key;
526 	int shmflg = params->flg;
527 	size_t size = params->u.size;
528 	int error;
529 	struct shmid_kernel *shp;
530 	size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
531 	struct file *file;
532 	char name[13];
533 	vm_flags_t acctflag = 0;
534 
535 	if (size < SHMMIN || size > ns->shm_ctlmax)
536 		return -EINVAL;
537 
538 	if (numpages << PAGE_SHIFT < size)
539 		return -ENOSPC;
540 
541 	if (ns->shm_tot + numpages < ns->shm_tot ||
542 			ns->shm_tot + numpages > ns->shm_ctlall)
543 		return -ENOSPC;
544 
545 	shp = kvmalloc(sizeof(*shp), GFP_KERNEL);
546 	if (unlikely(!shp))
547 		return -ENOMEM;
548 
549 	shp->shm_perm.key = key;
550 	shp->shm_perm.mode = (shmflg & S_IRWXUGO);
551 	shp->mlock_user = NULL;
552 
553 	shp->shm_perm.security = NULL;
554 	error = security_shm_alloc(shp);
555 	if (error) {
556 		kvfree(shp);
557 		return error;
558 	}
559 
560 	sprintf(name, "SYSV%08x", key);
561 	if (shmflg & SHM_HUGETLB) {
562 		struct hstate *hs;
563 		size_t hugesize;
564 
565 		hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
566 		if (!hs) {
567 			error = -EINVAL;
568 			goto no_file;
569 		}
570 		hugesize = ALIGN(size, huge_page_size(hs));
571 
572 		/* hugetlb_file_setup applies strict accounting */
573 		if (shmflg & SHM_NORESERVE)
574 			acctflag = VM_NORESERVE;
575 		file = hugetlb_file_setup(name, hugesize, acctflag,
576 				  &shp->mlock_user, HUGETLB_SHMFS_INODE,
577 				(shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
578 	} else {
579 		/*
580 		 * Do not allow no accounting for OVERCOMMIT_NEVER, even
581 		 * if it's asked for.
582 		 */
583 		if  ((shmflg & SHM_NORESERVE) &&
584 				sysctl_overcommit_memory != OVERCOMMIT_NEVER)
585 			acctflag = VM_NORESERVE;
586 		file = shmem_kernel_file_setup(name, size, acctflag);
587 	}
588 	error = PTR_ERR(file);
589 	if (IS_ERR(file))
590 		goto no_file;
591 
592 	shp->shm_cprid = task_tgid_vnr(current);
593 	shp->shm_lprid = 0;
594 	shp->shm_atim = shp->shm_dtim = 0;
595 	shp->shm_ctim = get_seconds();
596 	shp->shm_segsz = size;
597 	shp->shm_nattch = 0;
598 	shp->shm_file = file;
599 	shp->shm_creator = current;
600 
601 	error = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
602 	if (error < 0)
603 		goto no_id;
604 
605 	list_add(&shp->shm_clist, &current->sysvshm.shm_clist);
606 
607 	/*
608 	 * shmid gets reported as "inode#" in /proc/pid/maps.
609 	 * proc-ps tools use this. Changing this will break them.
610 	 */
611 	file_inode(file)->i_ino = shp->shm_perm.id;
612 
613 	ns->shm_tot += numpages;
614 	error = shp->shm_perm.id;
615 
616 	ipc_unlock_object(&shp->shm_perm);
617 	rcu_read_unlock();
618 	return error;
619 
620 no_id:
621 	if (is_file_hugepages(file) && shp->mlock_user)
622 		user_shm_unlock(size, shp->mlock_user);
623 	fput(file);
624 no_file:
625 	call_rcu(&shp->shm_perm.rcu, shm_rcu_free);
626 	return error;
627 }
628 
629 /*
630  * Called with shm_ids.rwsem and ipcp locked.
631  */
632 static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg)
633 {
634 	struct shmid_kernel *shp;
635 
636 	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
637 	return security_shm_associate(shp, shmflg);
638 }
639 
640 /*
641  * Called with shm_ids.rwsem and ipcp locked.
642  */
643 static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
644 				struct ipc_params *params)
645 {
646 	struct shmid_kernel *shp;
647 
648 	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
649 	if (shp->shm_segsz < params->u.size)
650 		return -EINVAL;
651 
652 	return 0;
653 }
654 
655 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
656 {
657 	struct ipc_namespace *ns;
658 	static const struct ipc_ops shm_ops = {
659 		.getnew = newseg,
660 		.associate = shm_security,
661 		.more_checks = shm_more_checks,
662 	};
663 	struct ipc_params shm_params;
664 
665 	ns = current->nsproxy->ipc_ns;
666 
667 	shm_params.key = key;
668 	shm_params.flg = shmflg;
669 	shm_params.u.size = size;
670 
671 	return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params);
672 }
673 
674 static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
675 {
676 	switch (version) {
677 	case IPC_64:
678 		return copy_to_user(buf, in, sizeof(*in));
679 	case IPC_OLD:
680 	    {
681 		struct shmid_ds out;
682 
683 		memset(&out, 0, sizeof(out));
684 		ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
685 		out.shm_segsz	= in->shm_segsz;
686 		out.shm_atime	= in->shm_atime;
687 		out.shm_dtime	= in->shm_dtime;
688 		out.shm_ctime	= in->shm_ctime;
689 		out.shm_cpid	= in->shm_cpid;
690 		out.shm_lpid	= in->shm_lpid;
691 		out.shm_nattch	= in->shm_nattch;
692 
693 		return copy_to_user(buf, &out, sizeof(out));
694 	    }
695 	default:
696 		return -EINVAL;
697 	}
698 }
699 
700 static inline unsigned long
701 copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version)
702 {
703 	switch (version) {
704 	case IPC_64:
705 		if (copy_from_user(out, buf, sizeof(*out)))
706 			return -EFAULT;
707 		return 0;
708 	case IPC_OLD:
709 	    {
710 		struct shmid_ds tbuf_old;
711 
712 		if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
713 			return -EFAULT;
714 
715 		out->shm_perm.uid	= tbuf_old.shm_perm.uid;
716 		out->shm_perm.gid	= tbuf_old.shm_perm.gid;
717 		out->shm_perm.mode	= tbuf_old.shm_perm.mode;
718 
719 		return 0;
720 	    }
721 	default:
722 		return -EINVAL;
723 	}
724 }
725 
726 static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
727 {
728 	switch (version) {
729 	case IPC_64:
730 		return copy_to_user(buf, in, sizeof(*in));
731 	case IPC_OLD:
732 	    {
733 		struct shminfo out;
734 
735 		if (in->shmmax > INT_MAX)
736 			out.shmmax = INT_MAX;
737 		else
738 			out.shmmax = (int)in->shmmax;
739 
740 		out.shmmin	= in->shmmin;
741 		out.shmmni	= in->shmmni;
742 		out.shmseg	= in->shmseg;
743 		out.shmall	= in->shmall;
744 
745 		return copy_to_user(buf, &out, sizeof(out));
746 	    }
747 	default:
748 		return -EINVAL;
749 	}
750 }
751 
752 /*
753  * Calculate and add used RSS and swap pages of a shm.
754  * Called with shm_ids.rwsem held as a reader
755  */
756 static void shm_add_rss_swap(struct shmid_kernel *shp,
757 	unsigned long *rss_add, unsigned long *swp_add)
758 {
759 	struct inode *inode;
760 
761 	inode = file_inode(shp->shm_file);
762 
763 	if (is_file_hugepages(shp->shm_file)) {
764 		struct address_space *mapping = inode->i_mapping;
765 		struct hstate *h = hstate_file(shp->shm_file);
766 		*rss_add += pages_per_huge_page(h) * mapping->nrpages;
767 	} else {
768 #ifdef CONFIG_SHMEM
769 		struct shmem_inode_info *info = SHMEM_I(inode);
770 
771 		spin_lock_irq(&info->lock);
772 		*rss_add += inode->i_mapping->nrpages;
773 		*swp_add += info->swapped;
774 		spin_unlock_irq(&info->lock);
775 #else
776 		*rss_add += inode->i_mapping->nrpages;
777 #endif
778 	}
779 }
780 
781 /*
782  * Called with shm_ids.rwsem held as a reader
783  */
784 static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
785 		unsigned long *swp)
786 {
787 	int next_id;
788 	int total, in_use;
789 
790 	*rss = 0;
791 	*swp = 0;
792 
793 	in_use = shm_ids(ns).in_use;
794 
795 	for (total = 0, next_id = 0; total < in_use; next_id++) {
796 		struct kern_ipc_perm *ipc;
797 		struct shmid_kernel *shp;
798 
799 		ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id);
800 		if (ipc == NULL)
801 			continue;
802 		shp = container_of(ipc, struct shmid_kernel, shm_perm);
803 
804 		shm_add_rss_swap(shp, rss, swp);
805 
806 		total++;
807 	}
808 }
809 
810 /*
811  * This function handles some shmctl commands which require the rwsem
812  * to be held in write mode.
813  * NOTE: no locks must be held, the rwsem is taken inside this function.
814  */
815 static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
816 		       struct shmid_ds __user *buf, int version)
817 {
818 	struct kern_ipc_perm *ipcp;
819 	struct shmid64_ds shmid64;
820 	struct shmid_kernel *shp;
821 	int err;
822 
823 	if (cmd == IPC_SET) {
824 		if (copy_shmid_from_user(&shmid64, buf, version))
825 			return -EFAULT;
826 	}
827 
828 	down_write(&shm_ids(ns).rwsem);
829 	rcu_read_lock();
830 
831 	ipcp = ipcctl_pre_down_nolock(ns, &shm_ids(ns), shmid, cmd,
832 				      &shmid64.shm_perm, 0);
833 	if (IS_ERR(ipcp)) {
834 		err = PTR_ERR(ipcp);
835 		goto out_unlock1;
836 	}
837 
838 	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
839 
840 	err = security_shm_shmctl(shp, cmd);
841 	if (err)
842 		goto out_unlock1;
843 
844 	switch (cmd) {
845 	case IPC_RMID:
846 		ipc_lock_object(&shp->shm_perm);
847 		/* do_shm_rmid unlocks the ipc object and rcu */
848 		do_shm_rmid(ns, ipcp);
849 		goto out_up;
850 	case IPC_SET:
851 		ipc_lock_object(&shp->shm_perm);
852 		err = ipc_update_perm(&shmid64.shm_perm, ipcp);
853 		if (err)
854 			goto out_unlock0;
855 		shp->shm_ctim = get_seconds();
856 		break;
857 	default:
858 		err = -EINVAL;
859 		goto out_unlock1;
860 	}
861 
862 out_unlock0:
863 	ipc_unlock_object(&shp->shm_perm);
864 out_unlock1:
865 	rcu_read_unlock();
866 out_up:
867 	up_write(&shm_ids(ns).rwsem);
868 	return err;
869 }
870 
871 static int shmctl_nolock(struct ipc_namespace *ns, int shmid,
872 			 int cmd, int version, void __user *buf)
873 {
874 	int err;
875 	struct shmid_kernel *shp;
876 
877 	/* preliminary security checks for *_INFO */
878 	if (cmd == IPC_INFO || cmd == SHM_INFO) {
879 		err = security_shm_shmctl(NULL, cmd);
880 		if (err)
881 			return err;
882 	}
883 
884 	switch (cmd) {
885 	case IPC_INFO:
886 	{
887 		struct shminfo64 shminfo;
888 
889 		memset(&shminfo, 0, sizeof(shminfo));
890 		shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni;
891 		shminfo.shmmax = ns->shm_ctlmax;
892 		shminfo.shmall = ns->shm_ctlall;
893 
894 		shminfo.shmmin = SHMMIN;
895 		if (copy_shminfo_to_user(buf, &shminfo, version))
896 			return -EFAULT;
897 
898 		down_read(&shm_ids(ns).rwsem);
899 		err = ipc_get_maxid(&shm_ids(ns));
900 		up_read(&shm_ids(ns).rwsem);
901 
902 		if (err < 0)
903 			err = 0;
904 		goto out;
905 	}
906 	case SHM_INFO:
907 	{
908 		struct shm_info shm_info;
909 
910 		memset(&shm_info, 0, sizeof(shm_info));
911 		down_read(&shm_ids(ns).rwsem);
912 		shm_info.used_ids = shm_ids(ns).in_use;
913 		shm_get_stat(ns, &shm_info.shm_rss, &shm_info.shm_swp);
914 		shm_info.shm_tot = ns->shm_tot;
915 		shm_info.swap_attempts = 0;
916 		shm_info.swap_successes = 0;
917 		err = ipc_get_maxid(&shm_ids(ns));
918 		up_read(&shm_ids(ns).rwsem);
919 		if (copy_to_user(buf, &shm_info, sizeof(shm_info))) {
920 			err = -EFAULT;
921 			goto out;
922 		}
923 
924 		err = err < 0 ? 0 : err;
925 		goto out;
926 	}
927 	case SHM_STAT:
928 	case IPC_STAT:
929 	{
930 		struct shmid64_ds tbuf;
931 		int result;
932 
933 		rcu_read_lock();
934 		if (cmd == SHM_STAT) {
935 			shp = shm_obtain_object(ns, shmid);
936 			if (IS_ERR(shp)) {
937 				err = PTR_ERR(shp);
938 				goto out_unlock;
939 			}
940 			result = shp->shm_perm.id;
941 		} else {
942 			shp = shm_obtain_object_check(ns, shmid);
943 			if (IS_ERR(shp)) {
944 				err = PTR_ERR(shp);
945 				goto out_unlock;
946 			}
947 			result = 0;
948 		}
949 
950 		err = -EACCES;
951 		if (ipcperms(ns, &shp->shm_perm, S_IRUGO))
952 			goto out_unlock;
953 
954 		err = security_shm_shmctl(shp, cmd);
955 		if (err)
956 			goto out_unlock;
957 
958 		memset(&tbuf, 0, sizeof(tbuf));
959 		kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
960 		tbuf.shm_segsz	= shp->shm_segsz;
961 		tbuf.shm_atime	= shp->shm_atim;
962 		tbuf.shm_dtime	= shp->shm_dtim;
963 		tbuf.shm_ctime	= shp->shm_ctim;
964 		tbuf.shm_cpid	= shp->shm_cprid;
965 		tbuf.shm_lpid	= shp->shm_lprid;
966 		tbuf.shm_nattch	= shp->shm_nattch;
967 		rcu_read_unlock();
968 
969 		if (copy_shmid_to_user(buf, &tbuf, version))
970 			err = -EFAULT;
971 		else
972 			err = result;
973 		goto out;
974 	}
975 	default:
976 		return -EINVAL;
977 	}
978 
979 out_unlock:
980 	rcu_read_unlock();
981 out:
982 	return err;
983 }
984 
985 SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
986 {
987 	struct shmid_kernel *shp;
988 	int err, version;
989 	struct ipc_namespace *ns;
990 
991 	if (cmd < 0 || shmid < 0)
992 		return -EINVAL;
993 
994 	version = ipc_parse_version(&cmd);
995 	ns = current->nsproxy->ipc_ns;
996 
997 	switch (cmd) {
998 	case IPC_INFO:
999 	case SHM_INFO:
1000 	case SHM_STAT:
1001 	case IPC_STAT:
1002 		return shmctl_nolock(ns, shmid, cmd, version, buf);
1003 	case IPC_RMID:
1004 	case IPC_SET:
1005 		return shmctl_down(ns, shmid, cmd, buf, version);
1006 	case SHM_LOCK:
1007 	case SHM_UNLOCK:
1008 	{
1009 		struct file *shm_file;
1010 
1011 		rcu_read_lock();
1012 		shp = shm_obtain_object_check(ns, shmid);
1013 		if (IS_ERR(shp)) {
1014 			err = PTR_ERR(shp);
1015 			goto out_unlock1;
1016 		}
1017 
1018 		audit_ipc_obj(&(shp->shm_perm));
1019 		err = security_shm_shmctl(shp, cmd);
1020 		if (err)
1021 			goto out_unlock1;
1022 
1023 		ipc_lock_object(&shp->shm_perm);
1024 
1025 		/* check if shm_destroy() is tearing down shp */
1026 		if (!ipc_valid_object(&shp->shm_perm)) {
1027 			err = -EIDRM;
1028 			goto out_unlock0;
1029 		}
1030 
1031 		if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) {
1032 			kuid_t euid = current_euid();
1033 
1034 			if (!uid_eq(euid, shp->shm_perm.uid) &&
1035 			    !uid_eq(euid, shp->shm_perm.cuid)) {
1036 				err = -EPERM;
1037 				goto out_unlock0;
1038 			}
1039 			if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK)) {
1040 				err = -EPERM;
1041 				goto out_unlock0;
1042 			}
1043 		}
1044 
1045 		shm_file = shp->shm_file;
1046 		if (is_file_hugepages(shm_file))
1047 			goto out_unlock0;
1048 
1049 		if (cmd == SHM_LOCK) {
1050 			struct user_struct *user = current_user();
1051 
1052 			err = shmem_lock(shm_file, 1, user);
1053 			if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) {
1054 				shp->shm_perm.mode |= SHM_LOCKED;
1055 				shp->mlock_user = user;
1056 			}
1057 			goto out_unlock0;
1058 		}
1059 
1060 		/* SHM_UNLOCK */
1061 		if (!(shp->shm_perm.mode & SHM_LOCKED))
1062 			goto out_unlock0;
1063 		shmem_lock(shm_file, 0, shp->mlock_user);
1064 		shp->shm_perm.mode &= ~SHM_LOCKED;
1065 		shp->mlock_user = NULL;
1066 		get_file(shm_file);
1067 		ipc_unlock_object(&shp->shm_perm);
1068 		rcu_read_unlock();
1069 		shmem_unlock_mapping(shm_file->f_mapping);
1070 
1071 		fput(shm_file);
1072 		return err;
1073 	}
1074 	default:
1075 		return -EINVAL;
1076 	}
1077 
1078 out_unlock0:
1079 	ipc_unlock_object(&shp->shm_perm);
1080 out_unlock1:
1081 	rcu_read_unlock();
1082 	return err;
1083 }
1084 
1085 /*
1086  * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
1087  *
1088  * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
1089  * "raddr" thing points to kernel space, and there has to be a wrapper around
1090  * this.
1091  */
1092 long do_shmat(int shmid, char __user *shmaddr, int shmflg,
1093 	      ulong *raddr, unsigned long shmlba)
1094 {
1095 	struct shmid_kernel *shp;
1096 	unsigned long addr = (unsigned long)shmaddr;
1097 	unsigned long size;
1098 	struct file *file;
1099 	int    err;
1100 	unsigned long flags = MAP_SHARED;
1101 	unsigned long prot;
1102 	int acc_mode;
1103 	struct ipc_namespace *ns;
1104 	struct shm_file_data *sfd;
1105 	struct path path;
1106 	fmode_t f_mode;
1107 	unsigned long populate = 0;
1108 
1109 	err = -EINVAL;
1110 	if (shmid < 0)
1111 		goto out;
1112 
1113 	if (addr) {
1114 		if (addr & (shmlba - 1)) {
1115 			/*
1116 			 * Round down to the nearest multiple of shmlba.
1117 			 * For sane do_mmap_pgoff() parameters, avoid
1118 			 * round downs that trigger nil-page and MAP_FIXED.
1119 			 */
1120 			if ((shmflg & SHM_RND) && addr >= shmlba)
1121 				addr &= ~(shmlba - 1);
1122 			else
1123 #ifndef __ARCH_FORCE_SHMLBA
1124 				if (addr & ~PAGE_MASK)
1125 #endif
1126 					goto out;
1127 		}
1128 
1129 		flags |= MAP_FIXED;
1130 	} else if ((shmflg & SHM_REMAP))
1131 		goto out;
1132 
1133 	if (shmflg & SHM_RDONLY) {
1134 		prot = PROT_READ;
1135 		acc_mode = S_IRUGO;
1136 		f_mode = FMODE_READ;
1137 	} else {
1138 		prot = PROT_READ | PROT_WRITE;
1139 		acc_mode = S_IRUGO | S_IWUGO;
1140 		f_mode = FMODE_READ | FMODE_WRITE;
1141 	}
1142 	if (shmflg & SHM_EXEC) {
1143 		prot |= PROT_EXEC;
1144 		acc_mode |= S_IXUGO;
1145 	}
1146 
1147 	/*
1148 	 * We cannot rely on the fs check since SYSV IPC does have an
1149 	 * additional creator id...
1150 	 */
1151 	ns = current->nsproxy->ipc_ns;
1152 	rcu_read_lock();
1153 	shp = shm_obtain_object_check(ns, shmid);
1154 	if (IS_ERR(shp)) {
1155 		err = PTR_ERR(shp);
1156 		goto out_unlock;
1157 	}
1158 
1159 	err = -EACCES;
1160 	if (ipcperms(ns, &shp->shm_perm, acc_mode))
1161 		goto out_unlock;
1162 
1163 	err = security_shm_shmat(shp, shmaddr, shmflg);
1164 	if (err)
1165 		goto out_unlock;
1166 
1167 	ipc_lock_object(&shp->shm_perm);
1168 
1169 	/* check if shm_destroy() is tearing down shp */
1170 	if (!ipc_valid_object(&shp->shm_perm)) {
1171 		ipc_unlock_object(&shp->shm_perm);
1172 		err = -EIDRM;
1173 		goto out_unlock;
1174 	}
1175 
1176 	path = shp->shm_file->f_path;
1177 	path_get(&path);
1178 	shp->shm_nattch++;
1179 	size = i_size_read(d_inode(path.dentry));
1180 	ipc_unlock_object(&shp->shm_perm);
1181 	rcu_read_unlock();
1182 
1183 	err = -ENOMEM;
1184 	sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
1185 	if (!sfd) {
1186 		path_put(&path);
1187 		goto out_nattch;
1188 	}
1189 
1190 	file = alloc_file(&path, f_mode,
1191 			  is_file_hugepages(shp->shm_file) ?
1192 				&shm_file_operations_huge :
1193 				&shm_file_operations);
1194 	err = PTR_ERR(file);
1195 	if (IS_ERR(file)) {
1196 		kfree(sfd);
1197 		path_put(&path);
1198 		goto out_nattch;
1199 	}
1200 
1201 	file->private_data = sfd;
1202 	file->f_mapping = shp->shm_file->f_mapping;
1203 	sfd->id = shp->shm_perm.id;
1204 	sfd->ns = get_ipc_ns(ns);
1205 	sfd->file = shp->shm_file;
1206 	sfd->vm_ops = NULL;
1207 
1208 	err = security_mmap_file(file, prot, flags);
1209 	if (err)
1210 		goto out_fput;
1211 
1212 	if (down_write_killable(&current->mm->mmap_sem)) {
1213 		err = -EINTR;
1214 		goto out_fput;
1215 	}
1216 
1217 	if (addr && !(shmflg & SHM_REMAP)) {
1218 		err = -EINVAL;
1219 		if (addr + size < addr)
1220 			goto invalid;
1221 
1222 		if (find_vma_intersection(current->mm, addr, addr + size))
1223 			goto invalid;
1224 	}
1225 
1226 	addr = do_mmap_pgoff(file, addr, size, prot, flags, 0, &populate, NULL);
1227 	*raddr = addr;
1228 	err = 0;
1229 	if (IS_ERR_VALUE(addr))
1230 		err = (long)addr;
1231 invalid:
1232 	up_write(&current->mm->mmap_sem);
1233 	if (populate)
1234 		mm_populate(addr, populate);
1235 
1236 out_fput:
1237 	fput(file);
1238 
1239 out_nattch:
1240 	down_write(&shm_ids(ns).rwsem);
1241 	shp = shm_lock(ns, shmid);
1242 	shp->shm_nattch--;
1243 	if (shm_may_destroy(ns, shp))
1244 		shm_destroy(ns, shp);
1245 	else
1246 		shm_unlock(shp);
1247 	up_write(&shm_ids(ns).rwsem);
1248 	return err;
1249 
1250 out_unlock:
1251 	rcu_read_unlock();
1252 out:
1253 	return err;
1254 }
1255 
1256 SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg)
1257 {
1258 	unsigned long ret;
1259 	long err;
1260 
1261 	err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA);
1262 	if (err)
1263 		return err;
1264 	force_successful_syscall_return();
1265 	return (long)ret;
1266 }
1267 
1268 /*
1269  * detach and kill segment if marked destroyed.
1270  * The work is done in shm_close.
1271  */
1272 SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
1273 {
1274 	struct mm_struct *mm = current->mm;
1275 	struct vm_area_struct *vma;
1276 	unsigned long addr = (unsigned long)shmaddr;
1277 	int retval = -EINVAL;
1278 #ifdef CONFIG_MMU
1279 	loff_t size = 0;
1280 	struct file *file;
1281 	struct vm_area_struct *next;
1282 #endif
1283 
1284 	if (addr & ~PAGE_MASK)
1285 		return retval;
1286 
1287 	if (down_write_killable(&mm->mmap_sem))
1288 		return -EINTR;
1289 
1290 	/*
1291 	 * This function tries to be smart and unmap shm segments that
1292 	 * were modified by partial mlock or munmap calls:
1293 	 * - It first determines the size of the shm segment that should be
1294 	 *   unmapped: It searches for a vma that is backed by shm and that
1295 	 *   started at address shmaddr. It records it's size and then unmaps
1296 	 *   it.
1297 	 * - Then it unmaps all shm vmas that started at shmaddr and that
1298 	 *   are within the initially determined size and that are from the
1299 	 *   same shm segment from which we determined the size.
1300 	 * Errors from do_munmap are ignored: the function only fails if
1301 	 * it's called with invalid parameters or if it's called to unmap
1302 	 * a part of a vma. Both calls in this function are for full vmas,
1303 	 * the parameters are directly copied from the vma itself and always
1304 	 * valid - therefore do_munmap cannot fail. (famous last words?)
1305 	 */
1306 	/*
1307 	 * If it had been mremap()'d, the starting address would not
1308 	 * match the usual checks anyway. So assume all vma's are
1309 	 * above the starting address given.
1310 	 */
1311 	vma = find_vma(mm, addr);
1312 
1313 #ifdef CONFIG_MMU
1314 	while (vma) {
1315 		next = vma->vm_next;
1316 
1317 		/*
1318 		 * Check if the starting address would match, i.e. it's
1319 		 * a fragment created by mprotect() and/or munmap(), or it
1320 		 * otherwise it starts at this address with no hassles.
1321 		 */
1322 		if ((vma->vm_ops == &shm_vm_ops) &&
1323 			(vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
1324 
1325 			/*
1326 			 * Record the file of the shm segment being
1327 			 * unmapped.  With mremap(), someone could place
1328 			 * page from another segment but with equal offsets
1329 			 * in the range we are unmapping.
1330 			 */
1331 			file = vma->vm_file;
1332 			size = i_size_read(file_inode(vma->vm_file));
1333 			do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
1334 			/*
1335 			 * We discovered the size of the shm segment, so
1336 			 * break out of here and fall through to the next
1337 			 * loop that uses the size information to stop
1338 			 * searching for matching vma's.
1339 			 */
1340 			retval = 0;
1341 			vma = next;
1342 			break;
1343 		}
1344 		vma = next;
1345 	}
1346 
1347 	/*
1348 	 * We need look no further than the maximum address a fragment
1349 	 * could possibly have landed at. Also cast things to loff_t to
1350 	 * prevent overflows and make comparisons vs. equal-width types.
1351 	 */
1352 	size = PAGE_ALIGN(size);
1353 	while (vma && (loff_t)(vma->vm_end - addr) <= size) {
1354 		next = vma->vm_next;
1355 
1356 		/* finding a matching vma now does not alter retval */
1357 		if ((vma->vm_ops == &shm_vm_ops) &&
1358 		    ((vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) &&
1359 		    (vma->vm_file == file))
1360 			do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
1361 		vma = next;
1362 	}
1363 
1364 #else	/* CONFIG_MMU */
1365 	/* under NOMMU conditions, the exact address to be destroyed must be
1366 	 * given
1367 	 */
1368 	if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
1369 		do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
1370 		retval = 0;
1371 	}
1372 
1373 #endif
1374 
1375 	up_write(&mm->mmap_sem);
1376 	return retval;
1377 }
1378 
1379 #ifdef CONFIG_PROC_FS
1380 static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
1381 {
1382 	struct user_namespace *user_ns = seq_user_ns(s);
1383 	struct shmid_kernel *shp = it;
1384 	unsigned long rss = 0, swp = 0;
1385 
1386 	shm_add_rss_swap(shp, &rss, &swp);
1387 
1388 #if BITS_PER_LONG <= 32
1389 #define SIZE_SPEC "%10lu"
1390 #else
1391 #define SIZE_SPEC "%21lu"
1392 #endif
1393 
1394 	seq_printf(s,
1395 		   "%10d %10d  %4o " SIZE_SPEC " %5u %5u  "
1396 		   "%5lu %5u %5u %5u %5u %10lu %10lu %10lu "
1397 		   SIZE_SPEC " " SIZE_SPEC "\n",
1398 		   shp->shm_perm.key,
1399 		   shp->shm_perm.id,
1400 		   shp->shm_perm.mode,
1401 		   shp->shm_segsz,
1402 		   shp->shm_cprid,
1403 		   shp->shm_lprid,
1404 		   shp->shm_nattch,
1405 		   from_kuid_munged(user_ns, shp->shm_perm.uid),
1406 		   from_kgid_munged(user_ns, shp->shm_perm.gid),
1407 		   from_kuid_munged(user_ns, shp->shm_perm.cuid),
1408 		   from_kgid_munged(user_ns, shp->shm_perm.cgid),
1409 		   shp->shm_atim,
1410 		   shp->shm_dtim,
1411 		   shp->shm_ctim,
1412 		   rss * PAGE_SIZE,
1413 		   swp * PAGE_SIZE);
1414 
1415 	return 0;
1416 }
1417 #endif
1418