fork.c (472475000979a156bc32cd75caa59737f5a1caa5) fork.c (9b6f7e163cd0f468d1b9696b785659d3c27c8667)
1/*
2 * linux/kernel/fork.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7/*
8 * 'fork.c' contains the help-routines for the 'fork' system call

--- 209 unchanged lines hidden (view full) ---

218
219 /* Clear stale pointers from reused stack. */
220 memset(s->addr, 0, THREAD_SIZE);
221
222 tsk->stack_vm_area = s;
223 return s->addr;
224 }
225
1/*
2 * linux/kernel/fork.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7/*
8 * 'fork.c' contains the help-routines for the 'fork' system call

--- 209 unchanged lines hidden (view full) ---

218
219 /* Clear stale pointers from reused stack. */
220 memset(s->addr, 0, THREAD_SIZE);
221
222 tsk->stack_vm_area = s;
223 return s->addr;
224 }
225
226 /*
227 * Allocated stacks are cached and later reused by new threads,
228 * so memcg accounting is performed manually on assigning/releasing
229 * stacks to tasks. Drop __GFP_ACCOUNT.
230 */
226 stack = __vmalloc_node_range(THREAD_SIZE, THREAD_ALIGN,
227 VMALLOC_START, VMALLOC_END,
231 stack = __vmalloc_node_range(THREAD_SIZE, THREAD_ALIGN,
232 VMALLOC_START, VMALLOC_END,
228 THREADINFO_GFP,
233 THREADINFO_GFP & ~__GFP_ACCOUNT,
229 PAGE_KERNEL,
230 0, node, __builtin_return_address(0));
231
232 /*
233 * We can't call find_vm_area() in interrupt context, and
234 * free_thread_stack() can be called in interrupt context,
235 * so cache the vm_struct.
236 */

--- 6 unchanged lines hidden (view full) ---

243
244 return page ? page_address(page) : NULL;
245#endif
246}
247
248static inline void free_thread_stack(struct task_struct *tsk)
249{
250#ifdef CONFIG_VMAP_STACK
234 PAGE_KERNEL,
235 0, node, __builtin_return_address(0));
236
237 /*
238 * We can't call find_vm_area() in interrupt context, and
239 * free_thread_stack() can be called in interrupt context,
240 * so cache the vm_struct.
241 */

--- 6 unchanged lines hidden (view full) ---

248
249 return page ? page_address(page) : NULL;
250#endif
251}
252
253static inline void free_thread_stack(struct task_struct *tsk)
254{
255#ifdef CONFIG_VMAP_STACK
251 if (task_stack_vm_area(tsk)) {
256 struct vm_struct *vm = task_stack_vm_area(tsk);
257
258 if (vm) {
252 int i;
253
259 int i;
260
261 for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) {
262 mod_memcg_page_state(vm->pages[i],
263 MEMCG_KERNEL_STACK_KB,
264 -(int)(PAGE_SIZE / 1024));
265
266 memcg_kmem_uncharge(vm->pages[i], 0);
267 }
268
254 for (i = 0; i < NR_CACHED_STACKS; i++) {
255 if (this_cpu_cmpxchg(cached_stacks[i],
256 NULL, tsk->stack_vm_area) != NULL)
257 continue;
258
259 return;
260 }
261

--- 84 unchanged lines hidden (view full) ---

346
347 BUG_ON(vm->nr_pages != THREAD_SIZE / PAGE_SIZE);
348
349 for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) {
350 mod_zone_page_state(page_zone(vm->pages[i]),
351 NR_KERNEL_STACK_KB,
352 PAGE_SIZE / 1024 * account);
353 }
269 for (i = 0; i < NR_CACHED_STACKS; i++) {
270 if (this_cpu_cmpxchg(cached_stacks[i],
271 NULL, tsk->stack_vm_area) != NULL)
272 continue;
273
274 return;
275 }
276

--- 84 unchanged lines hidden (view full) ---

361
362 BUG_ON(vm->nr_pages != THREAD_SIZE / PAGE_SIZE);
363
364 for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) {
365 mod_zone_page_state(page_zone(vm->pages[i]),
366 NR_KERNEL_STACK_KB,
367 PAGE_SIZE / 1024 * account);
368 }
354
355 /* All stack pages belong to the same memcg. */
356 mod_memcg_page_state(vm->pages[0], MEMCG_KERNEL_STACK_KB,
357 account * (THREAD_SIZE / 1024));
358 } else {
359 /*
360 * All stack pages are in the same zone and belong to the
361 * same memcg.
362 */
363 struct page *first_page = virt_to_page(stack);
364
365 mod_zone_page_state(page_zone(first_page), NR_KERNEL_STACK_KB,
366 THREAD_SIZE / 1024 * account);
367
368 mod_memcg_page_state(first_page, MEMCG_KERNEL_STACK_KB,
369 account * (THREAD_SIZE / 1024));
370 }
371}
372
369 } else {
370 /*
371 * All stack pages are in the same zone and belong to the
372 * same memcg.
373 */
374 struct page *first_page = virt_to_page(stack);
375
376 mod_zone_page_state(page_zone(first_page), NR_KERNEL_STACK_KB,
377 THREAD_SIZE / 1024 * account);
378
379 mod_memcg_page_state(first_page, MEMCG_KERNEL_STACK_KB,
380 account * (THREAD_SIZE / 1024));
381 }
382}
383
384static int memcg_charge_kernel_stack(struct task_struct *tsk)
385{
386#ifdef CONFIG_VMAP_STACK
387 struct vm_struct *vm = task_stack_vm_area(tsk);
388 int ret;
389
390 if (vm) {
391 int i;
392
393 for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) {
394 /*
395 * If memcg_kmem_charge() fails, page->mem_cgroup
396 * pointer is NULL, and both memcg_kmem_uncharge()
397 * and mod_memcg_page_state() in free_thread_stack()
398 * will ignore this page. So it's safe.
399 */
400 ret = memcg_kmem_charge(vm->pages[i], GFP_KERNEL, 0);
401 if (ret)
402 return ret;
403
404 mod_memcg_page_state(vm->pages[i],
405 MEMCG_KERNEL_STACK_KB,
406 PAGE_SIZE / 1024);
407 }
408 }
409#endif
410 return 0;
411}
412
373static void release_task_stack(struct task_struct *tsk)
374{
375 if (WARN_ON(tsk->state != TASK_DEAD))
376 return; /* Better to leak the stack than to free prematurely */
377
378 account_kernel_stack(tsk, -1);
379 arch_release_thread_stack(tsk->stack);
380 free_thread_stack(tsk);

--- 421 unchanged lines hidden (view full) ---

802 tsk = alloc_task_struct_node(node);
803 if (!tsk)
804 return NULL;
805
806 stack = alloc_thread_stack_node(tsk, node);
807 if (!stack)
808 goto free_tsk;
809
413static void release_task_stack(struct task_struct *tsk)
414{
415 if (WARN_ON(tsk->state != TASK_DEAD))
416 return; /* Better to leak the stack than to free prematurely */
417
418 account_kernel_stack(tsk, -1);
419 arch_release_thread_stack(tsk->stack);
420 free_thread_stack(tsk);

--- 421 unchanged lines hidden (view full) ---

842 tsk = alloc_task_struct_node(node);
843 if (!tsk)
844 return NULL;
845
846 stack = alloc_thread_stack_node(tsk, node);
847 if (!stack)
848 goto free_tsk;
849
850 if (memcg_charge_kernel_stack(tsk))
851 goto free_stack;
852
810 stack_vm_area = task_stack_vm_area(tsk);
811
812 err = arch_dup_task_struct(tsk, orig);
813
814 /*
815 * arch_dup_task_struct() clobbers the stack-related fields. Make
816 * sure they're properly initialized before using any stack-related
817 * functions again.

--- 1788 unchanged lines hidden ---
853 stack_vm_area = task_stack_vm_area(tsk);
854
855 err = arch_dup_task_struct(tsk, orig);
856
857 /*
858 * arch_dup_task_struct() clobbers the stack-related fields. Make
859 * sure they're properly initialized before using any stack-related
860 * functions again.

--- 1788 unchanged lines hidden ---