fork.c (06059d5090118b047c5b6a621c8b57a068f7ce2c) fork.c (12c641ab8270f787dfcce08b5f20ce8b65008096)
1/*
2 * linux/kernel/fork.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7/*
8 * 'fork.c' contains the help-routines for the 'fork' system call

--- 273 unchanged lines hidden (view full) ---

282 (u64) THREAD_SIZE * 8UL);
283
284 if (threads > max_threads_suggested)
285 threads = max_threads_suggested;
286
287 max_threads = clamp_t(u64, threads, MIN_THREADS, MAX_THREADS);
288}
289
1/*
2 * linux/kernel/fork.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7/*
8 * 'fork.c' contains the help-routines for the 'fork' system call

--- 273 unchanged lines hidden (view full) ---

282 (u64) THREAD_SIZE * 8UL);
283
284 if (threads > max_threads_suggested)
285 threads = max_threads_suggested;
286
287 max_threads = clamp_t(u64, threads, MIN_THREADS, MAX_THREADS);
288}
289
290#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
291/* Initialized by the architecture: */
292int arch_task_struct_size __read_mostly;
293#endif
294
295void __init fork_init(void)
296{
297#ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
298#ifndef ARCH_MIN_TASKALIGN
299#define ARCH_MIN_TASKALIGN L1_CACHE_BYTES
300#endif
301 /* create a slab on which task_structs can be allocated */
302 task_struct_cachep =
290void __init fork_init(void)
291{
292#ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
293#ifndef ARCH_MIN_TASKALIGN
294#define ARCH_MIN_TASKALIGN L1_CACHE_BYTES
295#endif
296 /* create a slab on which task_structs can be allocated */
297 task_struct_cachep =
303 kmem_cache_create("task_struct", arch_task_struct_size,
298 kmem_cache_create("task_struct", sizeof(struct task_struct),
304 ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
305#endif
306
307 /* do the arch specific task caches init */
308 arch_task_cache_init();
309
310 set_max_threads(MAX_THREADS);
311

--- 1554 unchanged lines hidden (view full) ---

1866static int check_unshare_flags(unsigned long unshare_flags)
1867{
1868 if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND|
1869 CLONE_VM|CLONE_FILES|CLONE_SYSVSEM|
1870 CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET|
1871 CLONE_NEWUSER|CLONE_NEWPID))
1872 return -EINVAL;
1873 /*
299 ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
300#endif
301
302 /* do the arch specific task caches init */
303 arch_task_cache_init();
304
305 set_max_threads(MAX_THREADS);
306

--- 1554 unchanged lines hidden (view full) ---

1861static int check_unshare_flags(unsigned long unshare_flags)
1862{
1863 if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND|
1864 CLONE_VM|CLONE_FILES|CLONE_SYSVSEM|
1865 CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET|
1866 CLONE_NEWUSER|CLONE_NEWPID))
1867 return -EINVAL;
1868 /*
1874 * Not implemented, but pretend it works if there is nothing to
1875 * unshare. Note that unsharing CLONE_THREAD or CLONE_SIGHAND
1876 * needs to unshare vm.
1869 * Not implemented, but pretend it works if there is nothing
1870 * to unshare. Note that unsharing the address space or the
1871 * signal handlers also need to unshare the signal queues (aka
1872 * CLONE_THREAD).
1877 */
1878 if (unshare_flags & (CLONE_THREAD | CLONE_SIGHAND | CLONE_VM)) {
1873 */
1874 if (unshare_flags & (CLONE_THREAD | CLONE_SIGHAND | CLONE_VM)) {
1879 /* FIXME: get_task_mm() increments ->mm_users */
1880 if (atomic_read(&current->mm->mm_users) > 1)
1875 if (!thread_group_empty(current))
1881 return -EINVAL;
1882 }
1876 return -EINVAL;
1877 }
1878 if (unshare_flags & (CLONE_SIGHAND | CLONE_VM)) {
1879 if (atomic_read(&current->sighand->count) > 1)
1880 return -EINVAL;
1881 }
1882 if (unshare_flags & CLONE_VM) {
1883 if (!current_is_single_threaded())
1884 return -EINVAL;
1885 }
1883
1884 return 0;
1885}
1886
1887/*
1888 * Unshare the filesystem structure if it is being shared
1889 */
1890static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)

--- 50 unchanged lines hidden (view full) ---

1941 int err;
1942
1943 /*
1944 * If unsharing a user namespace must also unshare the thread.
1945 */
1946 if (unshare_flags & CLONE_NEWUSER)
1947 unshare_flags |= CLONE_THREAD | CLONE_FS;
1948 /*
1886
1887 return 0;
1888}
1889
1890/*
1891 * Unshare the filesystem structure if it is being shared
1892 */
1893static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)

--- 50 unchanged lines hidden (view full) ---

1944 int err;
1945
1946 /*
1947 * If unsharing a user namespace must also unshare the thread.
1948 */
1949 if (unshare_flags & CLONE_NEWUSER)
1950 unshare_flags |= CLONE_THREAD | CLONE_FS;
1951 /*
1949 * If unsharing a thread from a thread group, must also unshare vm.
1950 */
1951 if (unshare_flags & CLONE_THREAD)
1952 unshare_flags |= CLONE_VM;
1953 /*
1954 * If unsharing vm, must also unshare signal handlers.
1955 */
1956 if (unshare_flags & CLONE_VM)
1957 unshare_flags |= CLONE_SIGHAND;
1958 /*
1952 * If unsharing vm, must also unshare signal handlers.
1953 */
1954 if (unshare_flags & CLONE_VM)
1955 unshare_flags |= CLONE_SIGHAND;
1956 /*
1957 * If unsharing a signal handlers, must also unshare the signal queues.
1958 */
1959 if (unshare_flags & CLONE_SIGHAND)
1960 unshare_flags |= CLONE_THREAD;
1961 /*
1959 * If unsharing namespace, must also unshare filesystem information.
1960 */
1961 if (unshare_flags & CLONE_NEWNS)
1962 unshare_flags |= CLONE_FS;
1963
1964 err = check_unshare_flags(unshare_flags);
1965 if (err)
1966 goto bad_unshare_out;

--- 126 unchanged lines hidden ---
1962 * If unsharing namespace, must also unshare filesystem information.
1963 */
1964 if (unshare_flags & CLONE_NEWNS)
1965 unshare_flags |= CLONE_FS;
1966
1967 err = check_unshare_flags(unshare_flags);
1968 if (err)
1969 goto bad_unshare_out;

--- 126 unchanged lines hidden ---