1 /* 2 * Pid namespaces 3 * 4 * Authors: 5 * (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc. 6 * (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM 7 * Many thanks to Oleg Nesterov for comments and help 8 * 9 */ 10 11 #include <linux/pid.h> 12 #include <linux/pid_namespace.h> 13 #include <linux/user_namespace.h> 14 #include <linux/syscalls.h> 15 #include <linux/err.h> 16 #include <linux/acct.h> 17 #include <linux/slab.h> 18 #include <linux/proc_fs.h> 19 #include <linux/reboot.h> 20 #include <linux/export.h> 21 22 #define BITS_PER_PAGE (PAGE_SIZE*8) 23 24 struct pid_cache { 25 int nr_ids; 26 char name[16]; 27 struct kmem_cache *cachep; 28 struct list_head list; 29 }; 30 31 static LIST_HEAD(pid_caches_lh); 32 static DEFINE_MUTEX(pid_caches_mutex); 33 static struct kmem_cache *pid_ns_cachep; 34 35 /* 36 * creates the kmem cache to allocate pids from. 37 * @nr_ids: the number of numerical ids this pid will have to carry 38 */ 39 40 static struct kmem_cache *create_pid_cachep(int nr_ids) 41 { 42 struct pid_cache *pcache; 43 struct kmem_cache *cachep; 44 45 mutex_lock(&pid_caches_mutex); 46 list_for_each_entry(pcache, &pid_caches_lh, list) 47 if (pcache->nr_ids == nr_ids) 48 goto out; 49 50 pcache = kmalloc(sizeof(struct pid_cache), GFP_KERNEL); 51 if (pcache == NULL) 52 goto err_alloc; 53 54 snprintf(pcache->name, sizeof(pcache->name), "pid_%d", nr_ids); 55 cachep = kmem_cache_create(pcache->name, 56 sizeof(struct pid) + (nr_ids - 1) * sizeof(struct upid), 57 0, SLAB_HWCACHE_ALIGN, NULL); 58 if (cachep == NULL) 59 goto err_cachep; 60 61 pcache->nr_ids = nr_ids; 62 pcache->cachep = cachep; 63 list_add(&pcache->list, &pid_caches_lh); 64 out: 65 mutex_unlock(&pid_caches_mutex); 66 return pcache->cachep; 67 68 err_cachep: 69 kfree(pcache); 70 err_alloc: 71 mutex_unlock(&pid_caches_mutex); 72 return NULL; 73 } 74 75 static void proc_cleanup_work(struct work_struct *work) 76 { 77 struct pid_namespace *ns = container_of(work, struct pid_namespace, proc_work); 78 pid_ns_release_proc(ns); 79 } 80 81 /* MAX_PID_NS_LEVEL is needed for limiting size of 'struct pid' */ 82 #define MAX_PID_NS_LEVEL 32 83 84 static struct pid_namespace *create_pid_namespace(struct user_namespace *user_ns, 85 struct pid_namespace *parent_pid_ns) 86 { 87 struct pid_namespace *ns; 88 unsigned int level = parent_pid_ns->level + 1; 89 int i; 90 int err; 91 92 if (level > MAX_PID_NS_LEVEL) { 93 err = -EINVAL; 94 goto out; 95 } 96 97 err = -ENOMEM; 98 ns = kmem_cache_zalloc(pid_ns_cachep, GFP_KERNEL); 99 if (ns == NULL) 100 goto out; 101 102 ns->pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL); 103 if (!ns->pidmap[0].page) 104 goto out_free; 105 106 ns->pid_cachep = create_pid_cachep(level + 1); 107 if (ns->pid_cachep == NULL) 108 goto out_free_map; 109 110 err = proc_alloc_inum(&ns->proc_inum); 111 if (err) 112 goto out_free_map; 113 114 kref_init(&ns->kref); 115 ns->level = level; 116 ns->parent = get_pid_ns(parent_pid_ns); 117 ns->user_ns = get_user_ns(user_ns); 118 ns->nr_hashed = PIDNS_HASH_ADDING; 119 INIT_WORK(&ns->proc_work, proc_cleanup_work); 120 121 set_bit(0, ns->pidmap[0].page); 122 atomic_set(&ns->pidmap[0].nr_free, BITS_PER_PAGE - 1); 123 124 for (i = 1; i < PIDMAP_ENTRIES; i++) 125 atomic_set(&ns->pidmap[i].nr_free, BITS_PER_PAGE); 126 127 return ns; 128 129 out_free_map: 130 kfree(ns->pidmap[0].page); 131 out_free: 132 kmem_cache_free(pid_ns_cachep, ns); 133 out: 134 return ERR_PTR(err); 135 } 136 137 static void destroy_pid_namespace(struct pid_namespace *ns) 138 { 139 int i; 140 141 proc_free_inum(ns->proc_inum); 142 for (i = 0; i < PIDMAP_ENTRIES; i++) 143 kfree(ns->pidmap[i].page); 144 put_user_ns(ns->user_ns); 145 kmem_cache_free(pid_ns_cachep, ns); 146 } 147 148 struct pid_namespace *copy_pid_ns(unsigned long flags, 149 struct user_namespace *user_ns, struct pid_namespace *old_ns) 150 { 151 if (!(flags & CLONE_NEWPID)) 152 return get_pid_ns(old_ns); 153 if (task_active_pid_ns(current) != old_ns) 154 return ERR_PTR(-EINVAL); 155 return create_pid_namespace(user_ns, old_ns); 156 } 157 158 static void free_pid_ns(struct kref *kref) 159 { 160 struct pid_namespace *ns; 161 162 ns = container_of(kref, struct pid_namespace, kref); 163 destroy_pid_namespace(ns); 164 } 165 166 void put_pid_ns(struct pid_namespace *ns) 167 { 168 struct pid_namespace *parent; 169 170 while (ns != &init_pid_ns) { 171 parent = ns->parent; 172 if (!kref_put(&ns->kref, free_pid_ns)) 173 break; 174 ns = parent; 175 } 176 } 177 EXPORT_SYMBOL_GPL(put_pid_ns); 178 179 void zap_pid_ns_processes(struct pid_namespace *pid_ns) 180 { 181 int nr; 182 int rc; 183 struct task_struct *task, *me = current; 184 185 /* Don't allow any more processes into the pid namespace */ 186 disable_pid_allocation(pid_ns); 187 188 /* Ignore SIGCHLD causing any terminated children to autoreap */ 189 spin_lock_irq(&me->sighand->siglock); 190 me->sighand->action[SIGCHLD - 1].sa.sa_handler = SIG_IGN; 191 spin_unlock_irq(&me->sighand->siglock); 192 193 /* 194 * The last thread in the cgroup-init thread group is terminating. 195 * Find remaining pid_ts in the namespace, signal and wait for them 196 * to exit. 197 * 198 * Note: This signals each threads in the namespace - even those that 199 * belong to the same thread group, To avoid this, we would have 200 * to walk the entire tasklist looking a processes in this 201 * namespace, but that could be unnecessarily expensive if the 202 * pid namespace has just a few processes. Or we need to 203 * maintain a tasklist for each pid namespace. 204 * 205 */ 206 read_lock(&tasklist_lock); 207 nr = next_pidmap(pid_ns, 1); 208 while (nr > 0) { 209 rcu_read_lock(); 210 211 task = pid_task(find_vpid(nr), PIDTYPE_PID); 212 if (task && !__fatal_signal_pending(task)) 213 send_sig_info(SIGKILL, SEND_SIG_FORCED, task); 214 215 rcu_read_unlock(); 216 217 nr = next_pidmap(pid_ns, nr); 218 } 219 read_unlock(&tasklist_lock); 220 221 /* Firstly reap the EXIT_ZOMBIE children we may have. */ 222 do { 223 clear_thread_flag(TIF_SIGPENDING); 224 rc = sys_wait4(-1, NULL, __WALL, NULL); 225 } while (rc != -ECHILD); 226 227 /* 228 * sys_wait4() above can't reap the TASK_DEAD children. 229 * Make sure they all go away, see free_pid(). 230 */ 231 for (;;) { 232 set_current_state(TASK_UNINTERRUPTIBLE); 233 if (pid_ns->nr_hashed == 1) 234 break; 235 schedule(); 236 } 237 __set_current_state(TASK_RUNNING); 238 239 if (pid_ns->reboot) 240 current->signal->group_exit_code = pid_ns->reboot; 241 242 acct_exit_ns(pid_ns); 243 return; 244 } 245 246 #ifdef CONFIG_CHECKPOINT_RESTORE 247 static int pid_ns_ctl_handler(struct ctl_table *table, int write, 248 void __user *buffer, size_t *lenp, loff_t *ppos) 249 { 250 struct pid_namespace *pid_ns = task_active_pid_ns(current); 251 struct ctl_table tmp = *table; 252 253 if (write && !ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN)) 254 return -EPERM; 255 256 /* 257 * Writing directly to ns' last_pid field is OK, since this field 258 * is volatile in a living namespace anyway and a code writing to 259 * it should synchronize its usage with external means. 260 */ 261 262 tmp.data = &pid_ns->last_pid; 263 return proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); 264 } 265 266 extern int pid_max; 267 static int zero = 0; 268 static struct ctl_table pid_ns_ctl_table[] = { 269 { 270 .procname = "ns_last_pid", 271 .maxlen = sizeof(int), 272 .mode = 0666, /* permissions are checked in the handler */ 273 .proc_handler = pid_ns_ctl_handler, 274 .extra1 = &zero, 275 .extra2 = &pid_max, 276 }, 277 { } 278 }; 279 static struct ctl_path kern_path[] = { { .procname = "kernel", }, { } }; 280 #endif /* CONFIG_CHECKPOINT_RESTORE */ 281 282 int reboot_pid_ns(struct pid_namespace *pid_ns, int cmd) 283 { 284 if (pid_ns == &init_pid_ns) 285 return 0; 286 287 switch (cmd) { 288 case LINUX_REBOOT_CMD_RESTART2: 289 case LINUX_REBOOT_CMD_RESTART: 290 pid_ns->reboot = SIGHUP; 291 break; 292 293 case LINUX_REBOOT_CMD_POWER_OFF: 294 case LINUX_REBOOT_CMD_HALT: 295 pid_ns->reboot = SIGINT; 296 break; 297 default: 298 return -EINVAL; 299 } 300 301 read_lock(&tasklist_lock); 302 force_sig(SIGKILL, pid_ns->child_reaper); 303 read_unlock(&tasklist_lock); 304 305 do_exit(0); 306 307 /* Not reached */ 308 return 0; 309 } 310 311 static void *pidns_get(struct task_struct *task) 312 { 313 struct pid_namespace *ns; 314 315 rcu_read_lock(); 316 ns = get_pid_ns(task_active_pid_ns(task)); 317 rcu_read_unlock(); 318 319 return ns; 320 } 321 322 static void pidns_put(void *ns) 323 { 324 put_pid_ns(ns); 325 } 326 327 static int pidns_install(struct nsproxy *nsproxy, void *ns) 328 { 329 struct pid_namespace *active = task_active_pid_ns(current); 330 struct pid_namespace *ancestor, *new = ns; 331 332 if (!ns_capable(new->user_ns, CAP_SYS_ADMIN) || 333 !nsown_capable(CAP_SYS_ADMIN)) 334 return -EPERM; 335 336 /* 337 * Only allow entering the current active pid namespace 338 * or a child of the current active pid namespace. 339 * 340 * This is required for fork to return a usable pid value and 341 * this maintains the property that processes and their 342 * children can not escape their current pid namespace. 343 */ 344 if (new->level < active->level) 345 return -EINVAL; 346 347 ancestor = new; 348 while (ancestor->level > active->level) 349 ancestor = ancestor->parent; 350 if (ancestor != active) 351 return -EINVAL; 352 353 put_pid_ns(nsproxy->pid_ns); 354 nsproxy->pid_ns = get_pid_ns(new); 355 return 0; 356 } 357 358 static unsigned int pidns_inum(void *ns) 359 { 360 struct pid_namespace *pid_ns = ns; 361 return pid_ns->proc_inum; 362 } 363 364 const struct proc_ns_operations pidns_operations = { 365 .name = "pid", 366 .type = CLONE_NEWPID, 367 .get = pidns_get, 368 .put = pidns_put, 369 .install = pidns_install, 370 .inum = pidns_inum, 371 }; 372 373 static __init int pid_namespaces_init(void) 374 { 375 pid_ns_cachep = KMEM_CACHE(pid_namespace, SLAB_PANIC); 376 377 #ifdef CONFIG_CHECKPOINT_RESTORE 378 register_sysctl_paths(kern_path, pid_ns_ctl_table); 379 #endif 380 return 0; 381 } 382 383 __initcall(pid_namespaces_init); 384