memcontrol.c (b3bbcc5d1da1b654091dad15980b3d58fdae0fc6) | memcontrol.c (ec1c86b25f4bdd9dce6436c0539d2a6ae676e1c4) |
---|---|
1// SPDX-License-Identifier: GPL-2.0-or-later 2/* memcontrol.c - Memory Controller 3 * 4 * Copyright IBM Corporation, 2007 5 * Author Balbir Singh <balbir@linux.vnet.ibm.com> 6 * 7 * Copyright 2007 OpenVZ SWsoft Inc 8 * Author: Pavel Emelianov <xemul@openvz.org> --- 1129 unchanged lines hidden (view full) --- 1138 struct mem_cgroup *last; 1139 1140 do { 1141 __invalidate_reclaim_iterators(memcg, dead_memcg); 1142 last = memcg; 1143 } while ((memcg = parent_mem_cgroup(memcg))); 1144 1145 /* | 1// SPDX-License-Identifier: GPL-2.0-or-later 2/* memcontrol.c - Memory Controller 3 * 4 * Copyright IBM Corporation, 2007 5 * Author Balbir Singh <balbir@linux.vnet.ibm.com> 6 * 7 * Copyright 2007 OpenVZ SWsoft Inc 8 * Author: Pavel Emelianov <xemul@openvz.org> --- 1129 unchanged lines hidden (view full) --- 1138 struct mem_cgroup *last; 1139 1140 do { 1141 __invalidate_reclaim_iterators(memcg, dead_memcg); 1142 last = memcg; 1143 } while ((memcg = parent_mem_cgroup(memcg))); 1144 1145 /* |
1146 * When cgruop1 non-hierarchy mode is used, | 1146 * When cgroup1 non-hierarchy mode is used, |
1147 * parent_mem_cgroup() does not walk all the way up to the 1148 * cgroup root (root_mem_cgroup). So we have to handle 1149 * dead_memcg from cgroup root separately. 1150 */ 1151 if (last != root_mem_cgroup) 1152 __invalidate_reclaim_iterators(root_mem_cgroup, 1153 dead_memcg); 1154} --- 2815 unchanged lines hidden (view full) --- 3970 NR_ANON_MAPPED, 3971#ifdef CONFIG_TRANSPARENT_HUGEPAGE 3972 NR_ANON_THPS, 3973#endif 3974 NR_SHMEM, 3975 NR_FILE_MAPPED, 3976 NR_FILE_DIRTY, 3977 NR_WRITEBACK, | 1147 * parent_mem_cgroup() does not walk all the way up to the 1148 * cgroup root (root_mem_cgroup). So we have to handle 1149 * dead_memcg from cgroup root separately. 1150 */ 1151 if (last != root_mem_cgroup) 1152 __invalidate_reclaim_iterators(root_mem_cgroup, 1153 dead_memcg); 1154} --- 2815 unchanged lines hidden (view full) --- 3970 NR_ANON_MAPPED, 3971#ifdef CONFIG_TRANSPARENT_HUGEPAGE 3972 NR_ANON_THPS, 3973#endif 3974 NR_SHMEM, 3975 NR_FILE_MAPPED, 3976 NR_FILE_DIRTY, 3977 NR_WRITEBACK, |
3978 WORKINGSET_REFAULT_ANON, 3979 WORKINGSET_REFAULT_FILE, |
|
3978 MEMCG_SWAP, 3979}; 3980 3981static const char *const memcg1_stat_names[] = { 3982 "cache", 3983 "rss", 3984#ifdef CONFIG_TRANSPARENT_HUGEPAGE 3985 "rss_huge", 3986#endif 3987 "shmem", 3988 "mapped_file", 3989 "dirty", 3990 "writeback", | 3980 MEMCG_SWAP, 3981}; 3982 3983static const char *const memcg1_stat_names[] = { 3984 "cache", 3985 "rss", 3986#ifdef CONFIG_TRANSPARENT_HUGEPAGE 3987 "rss_huge", 3988#endif 3989 "shmem", 3990 "mapped_file", 3991 "dirty", 3992 "writeback", |
3993 "workingset_refault_anon", 3994 "workingset_refault_file", |
|
3991 "swap", 3992}; 3993 3994/* Universal VM events cgroup1 shows, original sort order */ 3995static const unsigned int memcg1_events[] = { 3996 PGPGIN, 3997 PGPGOUT, 3998 PGFAULT, --- 12 unchanged lines hidden (view full) --- 4011 mem_cgroup_flush_stats(); 4012 4013 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) { 4014 unsigned long nr; 4015 4016 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account()) 4017 continue; 4018 nr = memcg_page_state_local(memcg, memcg1_stats[i]); | 3995 "swap", 3996}; 3997 3998/* Universal VM events cgroup1 shows, original sort order */ 3999static const unsigned int memcg1_events[] = { 4000 PGPGIN, 4001 PGPGOUT, 4002 PGFAULT, --- 12 unchanged lines hidden (view full) --- 4015 mem_cgroup_flush_stats(); 4016 4017 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) { 4018 unsigned long nr; 4019 4020 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account()) 4021 continue; 4022 nr = memcg_page_state_local(memcg, memcg1_stats[i]); |
4019 seq_printf(m, "%s %lu\n", memcg1_stat_names[i], nr * PAGE_SIZE); | 4023 seq_printf(m, "%s %lu\n", memcg1_stat_names[i], 4024 nr * memcg_page_state_unit(memcg1_stats[i])); |
4020 } 4021 4022 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) 4023 seq_printf(m, "%s %lu\n", vm_event_name(memcg1_events[i]), 4024 memcg_events_local(memcg, memcg1_events[i])); 4025 4026 for (i = 0; i < NR_LRU_LISTS; i++) 4027 seq_printf(m, "%s %lu\n", lru_list_name(i), --- 14 unchanged lines hidden (view full) --- 4042 4043 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) { 4044 unsigned long nr; 4045 4046 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account()) 4047 continue; 4048 nr = memcg_page_state(memcg, memcg1_stats[i]); 4049 seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i], | 4025 } 4026 4027 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) 4028 seq_printf(m, "%s %lu\n", vm_event_name(memcg1_events[i]), 4029 memcg_events_local(memcg, memcg1_events[i])); 4030 4031 for (i = 0; i < NR_LRU_LISTS; i++) 4032 seq_printf(m, "%s %lu\n", lru_list_name(i), --- 14 unchanged lines hidden (view full) --- 4047 4048 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) { 4049 unsigned long nr; 4050 4051 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account()) 4052 continue; 4053 nr = memcg_page_state(memcg, memcg1_stats[i]); 4054 seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i], |
4050 (u64)nr * PAGE_SIZE); | 4055 (u64)nr * memcg_page_state_unit(memcg1_stats[i])); |
4051 } 4052 4053 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) 4054 seq_printf(m, "total_%s %llu\n", 4055 vm_event_name(memcg1_events[i]), 4056 (u64)memcg_events(memcg, memcg1_events[i])); 4057 4058 for (i = 0; i < NR_LRU_LISTS; i++) --- 1106 unchanged lines hidden (view full) --- 5165 for_each_node(node) 5166 free_mem_cgroup_per_node_info(memcg, node); 5167 free_percpu(memcg->vmstats_percpu); 5168 kfree(memcg); 5169} 5170 5171static void mem_cgroup_free(struct mem_cgroup *memcg) 5172{ | 4056 } 4057 4058 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) 4059 seq_printf(m, "total_%s %llu\n", 4060 vm_event_name(memcg1_events[i]), 4061 (u64)memcg_events(memcg, memcg1_events[i])); 4062 4063 for (i = 0; i < NR_LRU_LISTS; i++) --- 1106 unchanged lines hidden (view full) --- 5170 for_each_node(node) 5171 free_mem_cgroup_per_node_info(memcg, node); 5172 free_percpu(memcg->vmstats_percpu); 5173 kfree(memcg); 5174} 5175 5176static void mem_cgroup_free(struct mem_cgroup *memcg) 5177{ |
5178 lru_gen_exit_memcg(memcg); |
|
5173 memcg_wb_domain_exit(memcg); 5174 __mem_cgroup_free(memcg); 5175} 5176 5177static struct mem_cgroup *mem_cgroup_alloc(void) 5178{ 5179 struct mem_cgroup *memcg; 5180 int node; --- 42 unchanged lines hidden (view full) --- 5223 __WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq); 5224#endif 5225#ifdef CONFIG_TRANSPARENT_HUGEPAGE 5226 spin_lock_init(&memcg->deferred_split_queue.split_queue_lock); 5227 INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue); 5228 memcg->deferred_split_queue.split_queue_len = 0; 5229#endif 5230 idr_replace(&mem_cgroup_idr, memcg, memcg->id.id); | 5179 memcg_wb_domain_exit(memcg); 5180 __mem_cgroup_free(memcg); 5181} 5182 5183static struct mem_cgroup *mem_cgroup_alloc(void) 5184{ 5185 struct mem_cgroup *memcg; 5186 int node; --- 42 unchanged lines hidden (view full) --- 5229 __WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq); 5230#endif 5231#ifdef CONFIG_TRANSPARENT_HUGEPAGE 5232 spin_lock_init(&memcg->deferred_split_queue.split_queue_lock); 5233 INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue); 5234 memcg->deferred_split_queue.split_queue_len = 0; 5235#endif 5236 idr_replace(&mem_cgroup_idr, memcg, memcg->id.id); |
5237 lru_gen_init_memcg(memcg); |
|
5231 return memcg; 5232fail: 5233 mem_cgroup_id_remove(memcg); 5234 __mem_cgroup_free(memcg); 5235 return ERR_PTR(error); 5236} 5237 5238static struct cgroup_subsys_state * __ref --- 2466 unchanged lines hidden --- | 5238 return memcg; 5239fail: 5240 mem_cgroup_id_remove(memcg); 5241 __mem_cgroup_free(memcg); 5242 return ERR_PTR(error); 5243} 5244 5245static struct cgroup_subsys_state * __ref --- 2466 unchanged lines hidden --- |