vmscan.c (32d32ef140de3cc3f6817999415a72f7b0cb52f5) vmscan.c (42c9db39704839eeb77b27db4c1d57bfa2a54a5b)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
4 *
5 * Swap reorganised 29.12.95, Stephen Tweedie.
6 * kswapd added: 7.1.96 sct
7 * Removed kswapd_ctl limits, and swap out as many pages as needed
8 * to bring the system back to freepages.high: 2.4.97, Rik van Riel.

--- 212 unchanged lines hidden (view full) ---

221 int nid)
222{
223 return rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_info,
224 lockdep_is_held(&shrinker_rwsem));
225}
226
227static int expand_one_shrinker_info(struct mem_cgroup *memcg,
228 int map_size, int defer_size,
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
4 *
5 * Swap reorganised 29.12.95, Stephen Tweedie.
6 * kswapd added: 7.1.96 sct
7 * Removed kswapd_ctl limits, and swap out as many pages as needed
8 * to bring the system back to freepages.high: 2.4.97, Rik van Riel.

--- 212 unchanged lines hidden (view full) ---

221 int nid)
222{
223 return rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_info,
224 lockdep_is_held(&shrinker_rwsem));
225}
226
227static int expand_one_shrinker_info(struct mem_cgroup *memcg,
228 int map_size, int defer_size,
229 int old_map_size, int old_defer_size)
229 int old_map_size, int old_defer_size,
230 int new_nr_max)
230{
231 struct shrinker_info *new, *old;
232 struct mem_cgroup_per_node *pn;
233 int nid;
234 int size = map_size + defer_size;
235
236 for_each_node(nid) {
237 pn = memcg->nodeinfo[nid];
238 old = shrinker_info_protected(memcg, nid);
239 /* Not yet online memcg */
240 if (!old)
241 return 0;
242
231{
232 struct shrinker_info *new, *old;
233 struct mem_cgroup_per_node *pn;
234 int nid;
235 int size = map_size + defer_size;
236
237 for_each_node(nid) {
238 pn = memcg->nodeinfo[nid];
239 old = shrinker_info_protected(memcg, nid);
240 /* Not yet online memcg */
241 if (!old)
242 return 0;
243
244 /* Already expanded this shrinker_info */
245 if (new_nr_max <= old->map_nr_max)
246 continue;
247
243 new = kvmalloc_node(sizeof(*new) + size, GFP_KERNEL, nid);
244 if (!new)
245 return -ENOMEM;
246
247 new->nr_deferred = (atomic_long_t *)(new + 1);
248 new->map = (void *)new->nr_deferred + defer_size;
248 new = kvmalloc_node(sizeof(*new) + size, GFP_KERNEL, nid);
249 if (!new)
250 return -ENOMEM;
251
252 new->nr_deferred = (atomic_long_t *)(new + 1);
253 new->map = (void *)new->nr_deferred + defer_size;
254 new->map_nr_max = new_nr_max;
249
250 /* map: set all old bits, clear all new bits */
251 memset(new->map, (int)0xff, old_map_size);
252 memset((void *)new->map + old_map_size, 0, map_size - old_map_size);
253 /* nr_deferred: copy old values, clear all new values */
254 memcpy(new->nr_deferred, old->nr_deferred, old_defer_size);
255 memset((void *)new->nr_deferred + old_defer_size, 0,
256 defer_size - old_defer_size);

--- 33 unchanged lines hidden (view full) ---

290 info = kvzalloc_node(sizeof(*info) + size, GFP_KERNEL, nid);
291 if (!info) {
292 free_shrinker_info(memcg);
293 ret = -ENOMEM;
294 break;
295 }
296 info->nr_deferred = (atomic_long_t *)(info + 1);
297 info->map = (void *)info->nr_deferred + defer_size;
255
256 /* map: set all old bits, clear all new bits */
257 memset(new->map, (int)0xff, old_map_size);
258 memset((void *)new->map + old_map_size, 0, map_size - old_map_size);
259 /* nr_deferred: copy old values, clear all new values */
260 memcpy(new->nr_deferred, old->nr_deferred, old_defer_size);
261 memset((void *)new->nr_deferred + old_defer_size, 0,
262 defer_size - old_defer_size);

--- 33 unchanged lines hidden (view full) ---

296 info = kvzalloc_node(sizeof(*info) + size, GFP_KERNEL, nid);
297 if (!info) {
298 free_shrinker_info(memcg);
299 ret = -ENOMEM;
300 break;
301 }
302 info->nr_deferred = (atomic_long_t *)(info + 1);
303 info->map = (void *)info->nr_deferred + defer_size;
304 info->map_nr_max = shrinker_nr_max;
298 rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_info, info);
299 }
300 up_write(&shrinker_rwsem);
301
302 return ret;
303}
304
305 rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_info, info);
306 }
307 up_write(&shrinker_rwsem);
308
309 return ret;
310}
311
305static inline bool need_expand(int nr_max)
306{
307 return round_up(nr_max, BITS_PER_LONG) >
308 round_up(shrinker_nr_max, BITS_PER_LONG);
309}
310
311static int expand_shrinker_info(int new_id)
312{
313 int ret = 0;
312static int expand_shrinker_info(int new_id)
313{
314 int ret = 0;
314 int new_nr_max = new_id + 1;
315 int new_nr_max = round_up(new_id + 1, BITS_PER_LONG);
315 int map_size, defer_size = 0;
316 int old_map_size, old_defer_size = 0;
317 struct mem_cgroup *memcg;
318
316 int map_size, defer_size = 0;
317 int old_map_size, old_defer_size = 0;
318 struct mem_cgroup *memcg;
319
319 if (!need_expand(new_nr_max))
320 goto out;
321
322 if (!root_mem_cgroup)
323 goto out;
324
325 lockdep_assert_held(&shrinker_rwsem);
326
327 map_size = shrinker_map_size(new_nr_max);
328 defer_size = shrinker_defer_size(new_nr_max);
329 old_map_size = shrinker_map_size(shrinker_nr_max);
330 old_defer_size = shrinker_defer_size(shrinker_nr_max);
331
332 memcg = mem_cgroup_iter(NULL, NULL, NULL);
333 do {
334 ret = expand_one_shrinker_info(memcg, map_size, defer_size,
320 if (!root_mem_cgroup)
321 goto out;
322
323 lockdep_assert_held(&shrinker_rwsem);
324
325 map_size = shrinker_map_size(new_nr_max);
326 defer_size = shrinker_defer_size(new_nr_max);
327 old_map_size = shrinker_map_size(shrinker_nr_max);
328 old_defer_size = shrinker_defer_size(shrinker_nr_max);
329
330 memcg = mem_cgroup_iter(NULL, NULL, NULL);
331 do {
332 ret = expand_one_shrinker_info(memcg, map_size, defer_size,
335 old_map_size, old_defer_size);
333 old_map_size, old_defer_size,
334 new_nr_max);
336 if (ret) {
337 mem_cgroup_iter_break(NULL, memcg);
338 goto out;
339 }
340 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);
341out:
342 if (!ret)
343 shrinker_nr_max = new_nr_max;
344
345 return ret;
346}
347
348void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id)
349{
350 if (shrinker_id >= 0 && memcg && !mem_cgroup_is_root(memcg)) {
351 struct shrinker_info *info;
352
353 rcu_read_lock();
354 info = rcu_dereference(memcg->nodeinfo[nid]->shrinker_info);
335 if (ret) {
336 mem_cgroup_iter_break(NULL, memcg);
337 goto out;
338 }
339 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);
340out:
341 if (!ret)
342 shrinker_nr_max = new_nr_max;
343
344 return ret;
345}
346
347void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id)
348{
349 if (shrinker_id >= 0 && memcg && !mem_cgroup_is_root(memcg)) {
350 struct shrinker_info *info;
351
352 rcu_read_lock();
353 info = rcu_dereference(memcg->nodeinfo[nid]->shrinker_info);
355 /* Pairs with smp mb in shrink_slab() */
356 smp_mb__before_atomic();
357 set_bit(shrinker_id, info->map);
354 if (!WARN_ON_ONCE(shrinker_id >= info->map_nr_max)) {
355 /* Pairs with smp mb in shrink_slab() */
356 smp_mb__before_atomic();
357 set_bit(shrinker_id, info->map);
358 }
358 rcu_read_unlock();
359 }
360}
361
362static DEFINE_IDR(shrinker_idr);
363
364static int prealloc_memcg_shrinker(struct shrinker *shrinker)
365{

--- 61 unchanged lines hidden (view full) ---

427 if (!parent)
428 parent = root_mem_cgroup;
429
430 /* Prevent from concurrent shrinker_info expand */
431 down_read(&shrinker_rwsem);
432 for_each_node(nid) {
433 child_info = shrinker_info_protected(memcg, nid);
434 parent_info = shrinker_info_protected(parent, nid);
359 rcu_read_unlock();
360 }
361}
362
363static DEFINE_IDR(shrinker_idr);
364
365static int prealloc_memcg_shrinker(struct shrinker *shrinker)
366{

--- 61 unchanged lines hidden (view full) ---

428 if (!parent)
429 parent = root_mem_cgroup;
430
431 /* Prevent from concurrent shrinker_info expand */
432 down_read(&shrinker_rwsem);
433 for_each_node(nid) {
434 child_info = shrinker_info_protected(memcg, nid);
435 parent_info = shrinker_info_protected(parent, nid);
435 for (i = 0; i < shrinker_nr_max; i++) {
436 for (i = 0; i < child_info->map_nr_max; i++) {
436 nr = atomic_long_read(&child_info->nr_deferred[i]);
437 atomic_long_add(nr, &parent_info->nr_deferred[i]);
438 }
439 }
440 up_read(&shrinker_rwsem);
441}
442
443static bool cgroup_reclaim(struct scan_control *sc)

--- 450 unchanged lines hidden (view full) ---

894
895 if (!down_read_trylock(&shrinker_rwsem))
896 return 0;
897
898 info = shrinker_info_protected(memcg, nid);
899 if (unlikely(!info))
900 goto unlock;
901
437 nr = atomic_long_read(&child_info->nr_deferred[i]);
438 atomic_long_add(nr, &parent_info->nr_deferred[i]);
439 }
440 }
441 up_read(&shrinker_rwsem);
442}
443
444static bool cgroup_reclaim(struct scan_control *sc)

--- 450 unchanged lines hidden (view full) ---

895
896 if (!down_read_trylock(&shrinker_rwsem))
897 return 0;
898
899 info = shrinker_info_protected(memcg, nid);
900 if (unlikely(!info))
901 goto unlock;
902
902 for_each_set_bit(i, info->map, shrinker_nr_max) {
903 for_each_set_bit(i, info->map, info->map_nr_max) {
903 struct shrink_control sc = {
904 .gfp_mask = gfp_mask,
905 .nid = nid,
906 .memcg = memcg,
907 };
908 struct shrinker *shrinker;
909
910 shrinker = idr_find(&shrinker_idr, i);

--- 7182 unchanged lines hidden ---
904 struct shrink_control sc = {
905 .gfp_mask = gfp_mask,
906 .nid = nid,
907 .memcg = memcg,
908 };
909 struct shrinker *shrinker;
910
911 shrinker = idr_find(&shrinker_idr, i);

--- 7182 unchanged lines hidden ---