1 /* 2 * Pid namespaces 3 * 4 * Authors: 5 * (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc. 6 * (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM 7 * Many thanks to Oleg Nesterov for comments and help 8 * 9 */ 10 11 #include <linux/pid.h> 12 #include <linux/pid_namespace.h> 13 #include <linux/syscalls.h> 14 #include <linux/err.h> 15 #include <linux/acct.h> 16 17 #define BITS_PER_PAGE (PAGE_SIZE*8) 18 19 struct pid_cache { 20 int nr_ids; 21 char name[16]; 22 struct kmem_cache *cachep; 23 struct list_head list; 24 }; 25 26 static LIST_HEAD(pid_caches_lh); 27 static DEFINE_MUTEX(pid_caches_mutex); 28 static struct kmem_cache *pid_ns_cachep; 29 30 /* 31 * creates the kmem cache to allocate pids from. 32 * @nr_ids: the number of numerical ids this pid will have to carry 33 */ 34 35 static struct kmem_cache *create_pid_cachep(int nr_ids) 36 { 37 struct pid_cache *pcache; 38 struct kmem_cache *cachep; 39 40 mutex_lock(&pid_caches_mutex); 41 list_for_each_entry(pcache, &pid_caches_lh, list) 42 if (pcache->nr_ids == nr_ids) 43 goto out; 44 45 pcache = kmalloc(sizeof(struct pid_cache), GFP_KERNEL); 46 if (pcache == NULL) 47 goto err_alloc; 48 49 snprintf(pcache->name, sizeof(pcache->name), "pid_%d", nr_ids); 50 cachep = kmem_cache_create(pcache->name, 51 sizeof(struct pid) + (nr_ids - 1) * sizeof(struct upid), 52 0, SLAB_HWCACHE_ALIGN, NULL); 53 if (cachep == NULL) 54 goto err_cachep; 55 56 pcache->nr_ids = nr_ids; 57 pcache->cachep = cachep; 58 list_add(&pcache->list, &pid_caches_lh); 59 out: 60 mutex_unlock(&pid_caches_mutex); 61 return pcache->cachep; 62 63 err_cachep: 64 kfree(pcache); 65 err_alloc: 66 mutex_unlock(&pid_caches_mutex); 67 return NULL; 68 } 69 70 static struct pid_namespace *create_pid_namespace(unsigned int level) 71 { 72 struct pid_namespace *ns; 73 int i; 74 75 ns = kmem_cache_zalloc(pid_ns_cachep, GFP_KERNEL); 76 if (ns == NULL) 77 goto out; 78 79 ns->pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL); 80 if (!ns->pidmap[0].page) 81 goto out_free; 82 83 ns->pid_cachep = create_pid_cachep(level + 1); 84 if (ns->pid_cachep == NULL) 85 goto out_free_map; 86 87 kref_init(&ns->kref); 88 ns->level = level; 89 90 set_bit(0, ns->pidmap[0].page); 91 atomic_set(&ns->pidmap[0].nr_free, BITS_PER_PAGE - 1); 92 93 for (i = 1; i < PIDMAP_ENTRIES; i++) 94 atomic_set(&ns->pidmap[i].nr_free, BITS_PER_PAGE); 95 96 return ns; 97 98 out_free_map: 99 kfree(ns->pidmap[0].page); 100 out_free: 101 kmem_cache_free(pid_ns_cachep, ns); 102 out: 103 return ERR_PTR(-ENOMEM); 104 } 105 106 static void destroy_pid_namespace(struct pid_namespace *ns) 107 { 108 int i; 109 110 for (i = 0; i < PIDMAP_ENTRIES; i++) 111 kfree(ns->pidmap[i].page); 112 kmem_cache_free(pid_ns_cachep, ns); 113 } 114 115 struct pid_namespace *copy_pid_ns(unsigned long flags, struct pid_namespace *old_ns) 116 { 117 struct pid_namespace *new_ns; 118 119 BUG_ON(!old_ns); 120 new_ns = get_pid_ns(old_ns); 121 if (!(flags & CLONE_NEWPID)) 122 goto out; 123 124 new_ns = ERR_PTR(-EINVAL); 125 if (flags & CLONE_THREAD) 126 goto out_put; 127 128 new_ns = create_pid_namespace(old_ns->level + 1); 129 if (!IS_ERR(new_ns)) 130 new_ns->parent = get_pid_ns(old_ns); 131 132 out_put: 133 put_pid_ns(old_ns); 134 out: 135 return new_ns; 136 } 137 138 void free_pid_ns(struct kref *kref) 139 { 140 struct pid_namespace *ns, *parent; 141 142 ns = container_of(kref, struct pid_namespace, kref); 143 144 parent = ns->parent; 145 destroy_pid_namespace(ns); 146 147 if (parent != NULL) 148 put_pid_ns(parent); 149 } 150 151 void zap_pid_ns_processes(struct pid_namespace *pid_ns) 152 { 153 int nr; 154 int rc; 155 156 /* 157 * The last thread in the cgroup-init thread group is terminating. 158 * Find remaining pid_ts in the namespace, signal and wait for them 159 * to exit. 160 * 161 * Note: This signals each threads in the namespace - even those that 162 * belong to the same thread group, To avoid this, we would have 163 * to walk the entire tasklist looking a processes in this 164 * namespace, but that could be unnecessarily expensive if the 165 * pid namespace has just a few processes. Or we need to 166 * maintain a tasklist for each pid namespace. 167 * 168 */ 169 read_lock(&tasklist_lock); 170 nr = next_pidmap(pid_ns, 1); 171 while (nr > 0) { 172 kill_proc_info(SIGKILL, SEND_SIG_PRIV, nr); 173 nr = next_pidmap(pid_ns, nr); 174 } 175 read_unlock(&tasklist_lock); 176 177 do { 178 clear_thread_flag(TIF_SIGPENDING); 179 rc = sys_wait4(-1, NULL, __WALL, NULL); 180 } while (rc != -ECHILD); 181 182 acct_exit_ns(pid_ns); 183 return; 184 } 185 186 static __init int pid_namespaces_init(void) 187 { 188 pid_ns_cachep = KMEM_CACHE(pid_namespace, SLAB_PANIC); 189 return 0; 190 } 191 192 __initcall(pid_namespaces_init); 193