1 /* 2 * linux/ipc/namespace.c 3 * Copyright (C) 2006 Pavel Emelyanov <xemul@openvz.org> OpenVZ, SWsoft Inc. 4 */ 5 6 #include <linux/ipc.h> 7 #include <linux/msg.h> 8 #include <linux/ipc_namespace.h> 9 #include <linux/rcupdate.h> 10 #include <linux/nsproxy.h> 11 #include <linux/slab.h> 12 #include <linux/fs.h> 13 #include <linux/mount.h> 14 #include <linux/user_namespace.h> 15 16 #include "util.h" 17 18 static struct ipc_namespace *create_ipc_ns(struct task_struct *tsk, 19 struct ipc_namespace *old_ns) 20 { 21 struct ipc_namespace *ns; 22 int err; 23 24 ns = kmalloc(sizeof(struct ipc_namespace), GFP_KERNEL); 25 if (ns == NULL) 26 return ERR_PTR(-ENOMEM); 27 28 atomic_set(&ns->count, 1); 29 err = mq_init_ns(ns); 30 if (err) { 31 kfree(ns); 32 return ERR_PTR(err); 33 } 34 atomic_inc(&nr_ipc_ns); 35 36 sem_init_ns(ns); 37 msg_init_ns(ns); 38 shm_init_ns(ns); 39 40 /* 41 * msgmni has already been computed for the new ipc ns. 42 * Thus, do the ipcns creation notification before registering that 43 * new ipcns in the chain. 44 */ 45 ipcns_notify(IPCNS_CREATED); 46 register_ipcns_notifier(ns); 47 48 ns->user_ns = get_user_ns(task_cred_xxx(tsk, user)->user_ns); 49 50 return ns; 51 } 52 53 struct ipc_namespace *copy_ipcs(unsigned long flags, 54 struct task_struct *tsk) 55 { 56 struct ipc_namespace *ns = tsk->nsproxy->ipc_ns; 57 58 if (!(flags & CLONE_NEWIPC)) 59 return get_ipc_ns(ns); 60 return create_ipc_ns(tsk, ns); 61 } 62 63 /* 64 * free_ipcs - free all ipcs of one type 65 * @ns: the namespace to remove the ipcs from 66 * @ids: the table of ipcs to free 67 * @free: the function called to free each individual ipc 68 * 69 * Called for each kind of ipc when an ipc_namespace exits. 70 */ 71 void free_ipcs(struct ipc_namespace *ns, struct ipc_ids *ids, 72 void (*free)(struct ipc_namespace *, struct kern_ipc_perm *)) 73 { 74 struct kern_ipc_perm *perm; 75 int next_id; 76 int total, in_use; 77 78 down_write(&ids->rw_mutex); 79 80 in_use = ids->in_use; 81 82 for (total = 0, next_id = 0; total < in_use; next_id++) { 83 perm = idr_find(&ids->ipcs_idr, next_id); 84 if (perm == NULL) 85 continue; 86 ipc_lock_by_ptr(perm); 87 free(ns, perm); 88 total++; 89 } 90 up_write(&ids->rw_mutex); 91 } 92 93 static void free_ipc_ns(struct ipc_namespace *ns) 94 { 95 /* 96 * Unregistering the hotplug notifier at the beginning guarantees 97 * that the ipc namespace won't be freed while we are inside the 98 * callback routine. Since the blocking_notifier_chain_XXX routines 99 * hold a rw lock on the notifier list, unregister_ipcns_notifier() 100 * won't take the rw lock before blocking_notifier_call_chain() has 101 * released the rd lock. 102 */ 103 unregister_ipcns_notifier(ns); 104 sem_exit_ns(ns); 105 msg_exit_ns(ns); 106 shm_exit_ns(ns); 107 atomic_dec(&nr_ipc_ns); 108 109 /* 110 * Do the ipcns removal notification after decrementing nr_ipc_ns in 111 * order to have a correct value when recomputing msgmni. 112 */ 113 ipcns_notify(IPCNS_REMOVED); 114 put_user_ns(ns->user_ns); 115 kfree(ns); 116 } 117 118 /* 119 * put_ipc_ns - drop a reference to an ipc namespace. 120 * @ns: the namespace to put 121 * 122 * If this is the last task in the namespace exiting, and 123 * it is dropping the refcount to 0, then it can race with 124 * a task in another ipc namespace but in a mounts namespace 125 * which has this ipcns's mqueuefs mounted, doing some action 126 * with one of the mqueuefs files. That can raise the refcount. 127 * So dropping the refcount, and raising the refcount when 128 * accessing it through the VFS, are protected with mq_lock. 129 * 130 * (Clearly, a task raising the refcount on its own ipc_ns 131 * needn't take mq_lock since it can't race with the last task 132 * in the ipcns exiting). 133 */ 134 void put_ipc_ns(struct ipc_namespace *ns) 135 { 136 if (atomic_dec_and_lock(&ns->count, &mq_lock)) { 137 mq_clear_sbinfo(ns); 138 spin_unlock(&mq_lock); 139 mq_put_mnt(ns); 140 free_ipc_ns(ns); 141 } 142 } 143