155716d26SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
29f645532STejun Heo /*
39f645532STejun Heo * mm/percpu-vm.c - vmalloc area based chunk allocation
49f645532STejun Heo *
59f645532STejun Heo * Copyright (C) 2010 SUSE Linux Products GmbH
69f645532STejun Heo * Copyright (C) 2010 Tejun Heo <tj@kernel.org>
79f645532STejun Heo *
89f645532STejun Heo * Chunks are mapped into vmalloc areas and populated page by page.
99f645532STejun Heo * This is the default chunk allocator.
109f645532STejun Heo */
11b67177ecSNicholas Piggin #include "internal.h"
129f645532STejun Heo
pcpu_chunk_page(struct pcpu_chunk * chunk,unsigned int cpu,int page_idx)139f645532STejun Heo static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk,
149f645532STejun Heo unsigned int cpu, int page_idx)
159f645532STejun Heo {
169f645532STejun Heo /* must not be used on pre-mapped chunk */
179f645532STejun Heo WARN_ON(chunk->immutable);
189f645532STejun Heo
199f645532STejun Heo return vmalloc_to_page((void *)pcpu_chunk_addr(chunk, cpu, page_idx));
209f645532STejun Heo }
219f645532STejun Heo
229f645532STejun Heo /**
23fbbb7f4eSTejun Heo * pcpu_get_pages - get temp pages array
249f645532STejun Heo *
25fbbb7f4eSTejun Heo * Returns pointer to array of pointers to struct page which can be indexed
26cdb4cba5STejun Heo * with pcpu_page_idx(). Note that there is only one array and accesses
27cdb4cba5STejun Heo * should be serialized by pcpu_alloc_mutex.
289f645532STejun Heo *
299f645532STejun Heo * RETURNS:
30fbbb7f4eSTejun Heo * Pointer to temp pages array on success.
319f645532STejun Heo */
pcpu_get_pages(void)328a1df543STahsin Erdogan static struct page **pcpu_get_pages(void)
339f645532STejun Heo {
349f645532STejun Heo static struct page **pages;
359f645532STejun Heo size_t pages_size = pcpu_nr_units * pcpu_unit_pages * sizeof(pages[0]);
369f645532STejun Heo
37cdb4cba5STejun Heo lockdep_assert_held(&pcpu_alloc_mutex);
38cdb4cba5STejun Heo
39cdb4cba5STejun Heo if (!pages)
40554fef1cSDennis Zhou pages = pcpu_mem_zalloc(pages_size, GFP_KERNEL);
419f645532STejun Heo return pages;
429f645532STejun Heo }
439f645532STejun Heo
449f645532STejun Heo /**
459f645532STejun Heo * pcpu_free_pages - free pages which were allocated for @chunk
469f645532STejun Heo * @chunk: chunk pages were allocated for
479f645532STejun Heo * @pages: array of pages to be freed, indexed by pcpu_page_idx()
489f645532STejun Heo * @page_start: page index of the first page to be freed
499f645532STejun Heo * @page_end: page index of the last page to be freed + 1
509f645532STejun Heo *
519f645532STejun Heo * Free pages [@page_start and @page_end) in @pages for all units.
529f645532STejun Heo * The pages were allocated for @chunk.
539f645532STejun Heo */
pcpu_free_pages(struct pcpu_chunk * chunk,struct page ** pages,int page_start,int page_end)549f645532STejun Heo static void pcpu_free_pages(struct pcpu_chunk *chunk,
55fbbb7f4eSTejun Heo struct page **pages, int page_start, int page_end)
569f645532STejun Heo {
579f645532STejun Heo unsigned int cpu;
589f645532STejun Heo int i;
599f645532STejun Heo
609f645532STejun Heo for_each_possible_cpu(cpu) {
619f645532STejun Heo for (i = page_start; i < page_end; i++) {
629f645532STejun Heo struct page *page = pages[pcpu_page_idx(cpu, i)];
639f645532STejun Heo
649f645532STejun Heo if (page)
659f645532STejun Heo __free_page(page);
669f645532STejun Heo }
679f645532STejun Heo }
689f645532STejun Heo }
699f645532STejun Heo
709f645532STejun Heo /**
719f645532STejun Heo * pcpu_alloc_pages - allocates pages for @chunk
729f645532STejun Heo * @chunk: target chunk
739f645532STejun Heo * @pages: array to put the allocated pages into, indexed by pcpu_page_idx()
749f645532STejun Heo * @page_start: page index of the first page to be allocated
759f645532STejun Heo * @page_end: page index of the last page to be allocated + 1
7647504ee0SDennis Zhou * @gfp: allocation flags passed to the underlying allocator
779f645532STejun Heo *
789f645532STejun Heo * Allocate pages [@page_start,@page_end) into @pages for all units.
799f645532STejun Heo * The allocation is for @chunk. Percpu core doesn't care about the
809f645532STejun Heo * content of @pages and will pass it verbatim to pcpu_map_pages().
819f645532STejun Heo */
pcpu_alloc_pages(struct pcpu_chunk * chunk,struct page ** pages,int page_start,int page_end,gfp_t gfp)829f645532STejun Heo static int pcpu_alloc_pages(struct pcpu_chunk *chunk,
8347504ee0SDennis Zhou struct page **pages, int page_start, int page_end,
8447504ee0SDennis Zhou gfp_t gfp)
859f645532STejun Heo {
86f0d27965STejun Heo unsigned int cpu, tcpu;
879f645532STejun Heo int i;
889f645532STejun Heo
89554fef1cSDennis Zhou gfp |= __GFP_HIGHMEM;
9047504ee0SDennis Zhou
919f645532STejun Heo for_each_possible_cpu(cpu) {
929f645532STejun Heo for (i = page_start; i < page_end; i++) {
939f645532STejun Heo struct page **pagep = &pages[pcpu_page_idx(cpu, i)];
949f645532STejun Heo
959f645532STejun Heo *pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0);
96f0d27965STejun Heo if (!*pagep)
97f0d27965STejun Heo goto err;
989f645532STejun Heo }
999f645532STejun Heo }
1009f645532STejun Heo return 0;
101f0d27965STejun Heo
102f0d27965STejun Heo err:
103f0d27965STejun Heo while (--i >= page_start)
104f0d27965STejun Heo __free_page(pages[pcpu_page_idx(cpu, i)]);
105f0d27965STejun Heo
106f0d27965STejun Heo for_each_possible_cpu(tcpu) {
107f0d27965STejun Heo if (tcpu == cpu)
108f0d27965STejun Heo break;
109f0d27965STejun Heo for (i = page_start; i < page_end; i++)
110f0d27965STejun Heo __free_page(pages[pcpu_page_idx(tcpu, i)]);
111f0d27965STejun Heo }
112f0d27965STejun Heo return -ENOMEM;
1139f645532STejun Heo }
1149f645532STejun Heo
1159f645532STejun Heo /**
1169f645532STejun Heo * pcpu_pre_unmap_flush - flush cache prior to unmapping
1179f645532STejun Heo * @chunk: chunk the regions to be flushed belongs to
1189f645532STejun Heo * @page_start: page index of the first page to be flushed
1199f645532STejun Heo * @page_end: page index of the last page to be flushed + 1
1209f645532STejun Heo *
1219f645532STejun Heo * Pages in [@page_start,@page_end) of @chunk are about to be
1229f645532STejun Heo * unmapped. Flush cache. As each flushing trial can be very
1239f645532STejun Heo * expensive, issue flush on the whole region at once rather than
1249f645532STejun Heo * doing it for each cpu. This could be an overkill but is more
1259f645532STejun Heo * scalable.
1269f645532STejun Heo */
pcpu_pre_unmap_flush(struct pcpu_chunk * chunk,int page_start,int page_end)1279f645532STejun Heo static void pcpu_pre_unmap_flush(struct pcpu_chunk *chunk,
1289f645532STejun Heo int page_start, int page_end)
1299f645532STejun Heo {
1309f645532STejun Heo flush_cache_vunmap(
131a855b84cSTejun Heo pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start),
132a855b84cSTejun Heo pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end));
1339f645532STejun Heo }
1349f645532STejun Heo
__pcpu_unmap_pages(unsigned long addr,int nr_pages)1359f645532STejun Heo static void __pcpu_unmap_pages(unsigned long addr, int nr_pages)
1369f645532STejun Heo {
1374ad0ae8cSNicholas Piggin vunmap_range_noflush(addr, addr + (nr_pages << PAGE_SHIFT));
1389f645532STejun Heo }
1399f645532STejun Heo
1409f645532STejun Heo /**
1419f645532STejun Heo * pcpu_unmap_pages - unmap pages out of a pcpu_chunk
1429f645532STejun Heo * @chunk: chunk of interest
1439f645532STejun Heo * @pages: pages array which can be used to pass information to free
1449f645532STejun Heo * @page_start: page index of the first page to unmap
1459f645532STejun Heo * @page_end: page index of the last page to unmap + 1
1469f645532STejun Heo *
1479f645532STejun Heo * For each cpu, unmap pages [@page_start,@page_end) out of @chunk.
1489f645532STejun Heo * Corresponding elements in @pages were cleared by the caller and can
1499f645532STejun Heo * be used to carry information to pcpu_free_pages() which will be
1509f645532STejun Heo * called after all unmaps are finished. The caller should call
1519f645532STejun Heo * proper pre/post flush functions.
1529f645532STejun Heo */
pcpu_unmap_pages(struct pcpu_chunk * chunk,struct page ** pages,int page_start,int page_end)1539f645532STejun Heo static void pcpu_unmap_pages(struct pcpu_chunk *chunk,
154fbbb7f4eSTejun Heo struct page **pages, int page_start, int page_end)
1559f645532STejun Heo {
1569f645532STejun Heo unsigned int cpu;
1579f645532STejun Heo int i;
1589f645532STejun Heo
1599f645532STejun Heo for_each_possible_cpu(cpu) {
1609f645532STejun Heo for (i = page_start; i < page_end; i++) {
1619f645532STejun Heo struct page *page;
1629f645532STejun Heo
1639f645532STejun Heo page = pcpu_chunk_page(chunk, cpu, i);
1649f645532STejun Heo WARN_ON(!page);
1659f645532STejun Heo pages[pcpu_page_idx(cpu, i)] = page;
1669f645532STejun Heo }
1679f645532STejun Heo __pcpu_unmap_pages(pcpu_chunk_addr(chunk, cpu, page_start),
1689f645532STejun Heo page_end - page_start);
1699f645532STejun Heo }
1709f645532STejun Heo }
1719f645532STejun Heo
1729f645532STejun Heo /**
1739f645532STejun Heo * pcpu_post_unmap_tlb_flush - flush TLB after unmapping
1749f645532STejun Heo * @chunk: pcpu_chunk the regions to be flushed belong to
1759f645532STejun Heo * @page_start: page index of the first page to be flushed
1769f645532STejun Heo * @page_end: page index of the last page to be flushed + 1
1779f645532STejun Heo *
1789f645532STejun Heo * Pages [@page_start,@page_end) of @chunk have been unmapped. Flush
1799f645532STejun Heo * TLB for the regions. This can be skipped if the area is to be
1809f645532STejun Heo * returned to vmalloc as vmalloc will handle TLB flushing lazily.
1819f645532STejun Heo *
1829f645532STejun Heo * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once
1839f645532STejun Heo * for the whole region.
1849f645532STejun Heo */
pcpu_post_unmap_tlb_flush(struct pcpu_chunk * chunk,int page_start,int page_end)1859f645532STejun Heo static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk,
1869f645532STejun Heo int page_start, int page_end)
1879f645532STejun Heo {
1889f645532STejun Heo flush_tlb_kernel_range(
189a855b84cSTejun Heo pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start),
190a855b84cSTejun Heo pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end));
1919f645532STejun Heo }
1929f645532STejun Heo
__pcpu_map_pages(unsigned long addr,struct page ** pages,int nr_pages)1939f645532STejun Heo static int __pcpu_map_pages(unsigned long addr, struct page **pages,
1949f645532STejun Heo int nr_pages)
1959f645532STejun Heo {
196b67177ecSNicholas Piggin return vmap_pages_range_noflush(addr, addr + (nr_pages << PAGE_SHIFT),
197b67177ecSNicholas Piggin PAGE_KERNEL, pages, PAGE_SHIFT);
1989f645532STejun Heo }
1999f645532STejun Heo
2009f645532STejun Heo /**
2019f645532STejun Heo * pcpu_map_pages - map pages into a pcpu_chunk
2029f645532STejun Heo * @chunk: chunk of interest
2039f645532STejun Heo * @pages: pages array containing pages to be mapped
2049f645532STejun Heo * @page_start: page index of the first page to map
2059f645532STejun Heo * @page_end: page index of the last page to map + 1
2069f645532STejun Heo *
2079f645532STejun Heo * For each cpu, map pages [@page_start,@page_end) into @chunk. The
2089f645532STejun Heo * caller is responsible for calling pcpu_post_map_flush() after all
2099f645532STejun Heo * mappings are complete.
2109f645532STejun Heo *
211fbbb7f4eSTejun Heo * This function is responsible for setting up whatever is necessary for
212fbbb7f4eSTejun Heo * reverse lookup (addr -> chunk).
2139f645532STejun Heo */
pcpu_map_pages(struct pcpu_chunk * chunk,struct page ** pages,int page_start,int page_end)2149f645532STejun Heo static int pcpu_map_pages(struct pcpu_chunk *chunk,
215fbbb7f4eSTejun Heo struct page **pages, int page_start, int page_end)
2169f645532STejun Heo {
2179f645532STejun Heo unsigned int cpu, tcpu;
2189f645532STejun Heo int i, err;
2199f645532STejun Heo
2209f645532STejun Heo for_each_possible_cpu(cpu) {
2219f645532STejun Heo err = __pcpu_map_pages(pcpu_chunk_addr(chunk, cpu, page_start),
2229f645532STejun Heo &pages[pcpu_page_idx(cpu, page_start)],
2239f645532STejun Heo page_end - page_start);
2249f645532STejun Heo if (err < 0)
2259f645532STejun Heo goto err;
2269f645532STejun Heo
227fbbb7f4eSTejun Heo for (i = page_start; i < page_end; i++)
2289f645532STejun Heo pcpu_set_page_chunk(pages[pcpu_page_idx(cpu, i)],
2299f645532STejun Heo chunk);
2309f645532STejun Heo }
2319f645532STejun Heo return 0;
2329f645532STejun Heo err:
2339f645532STejun Heo for_each_possible_cpu(tcpu) {
2349f645532STejun Heo if (tcpu == cpu)
2359f645532STejun Heo break;
2369f645532STejun Heo __pcpu_unmap_pages(pcpu_chunk_addr(chunk, tcpu, page_start),
2379f645532STejun Heo page_end - page_start);
2389f645532STejun Heo }
239849f5169STejun Heo pcpu_post_unmap_tlb_flush(chunk, page_start, page_end);
2409f645532STejun Heo return err;
2419f645532STejun Heo }
2429f645532STejun Heo
2439f645532STejun Heo /**
2449f645532STejun Heo * pcpu_post_map_flush - flush cache after mapping
2459f645532STejun Heo * @chunk: pcpu_chunk the regions to be flushed belong to
2469f645532STejun Heo * @page_start: page index of the first page to be flushed
2479f645532STejun Heo * @page_end: page index of the last page to be flushed + 1
2489f645532STejun Heo *
2499f645532STejun Heo * Pages [@page_start,@page_end) of @chunk have been mapped. Flush
2509f645532STejun Heo * cache.
2519f645532STejun Heo *
2529f645532STejun Heo * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once
2539f645532STejun Heo * for the whole region.
2549f645532STejun Heo */
pcpu_post_map_flush(struct pcpu_chunk * chunk,int page_start,int page_end)2559f645532STejun Heo static void pcpu_post_map_flush(struct pcpu_chunk *chunk,
2569f645532STejun Heo int page_start, int page_end)
2579f645532STejun Heo {
2589f645532STejun Heo flush_cache_vmap(
259a855b84cSTejun Heo pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start),
260a855b84cSTejun Heo pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end));
2619f645532STejun Heo }
2629f645532STejun Heo
2639f645532STejun Heo /**
2649f645532STejun Heo * pcpu_populate_chunk - populate and map an area of a pcpu_chunk
2659f645532STejun Heo * @chunk: chunk of interest
266a93ace48STejun Heo * @page_start: the start page
267a93ace48STejun Heo * @page_end: the end page
26847504ee0SDennis Zhou * @gfp: allocation flags passed to the underlying memory allocator
2699f645532STejun Heo *
2709f645532STejun Heo * For each cpu, populate and map pages [@page_start,@page_end) into
271dca49645STejun Heo * @chunk.
2729f645532STejun Heo *
2739f645532STejun Heo * CONTEXT:
2749f645532STejun Heo * pcpu_alloc_mutex, does GFP_KERNEL allocation.
2759f645532STejun Heo */
pcpu_populate_chunk(struct pcpu_chunk * chunk,int page_start,int page_end,gfp_t gfp)276a93ace48STejun Heo static int pcpu_populate_chunk(struct pcpu_chunk *chunk,
27747504ee0SDennis Zhou int page_start, int page_end, gfp_t gfp)
2789f645532STejun Heo {
2799f645532STejun Heo struct page **pages;
2809f645532STejun Heo
2818a1df543STahsin Erdogan pages = pcpu_get_pages();
2829f645532STejun Heo if (!pages)
2839f645532STejun Heo return -ENOMEM;
2849f645532STejun Heo
28547504ee0SDennis Zhou if (pcpu_alloc_pages(chunk, pages, page_start, page_end, gfp))
286a93ace48STejun Heo return -ENOMEM;
2879f645532STejun Heo
288a93ace48STejun Heo if (pcpu_map_pages(chunk, pages, page_start, page_end)) {
289a93ace48STejun Heo pcpu_free_pages(chunk, pages, page_start, page_end);
290a93ace48STejun Heo return -ENOMEM;
2919f645532STejun Heo }
2929f645532STejun Heo pcpu_post_map_flush(chunk, page_start, page_end);
2939f645532STejun Heo
2949f645532STejun Heo return 0;
2959f645532STejun Heo }
2969f645532STejun Heo
2979f645532STejun Heo /**
2989f645532STejun Heo * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk
2999f645532STejun Heo * @chunk: chunk to depopulate
300a93ace48STejun Heo * @page_start: the start page
301a93ace48STejun Heo * @page_end: the end page
3029f645532STejun Heo *
3039f645532STejun Heo * For each cpu, depopulate and unmap pages [@page_start,@page_end)
304a93ace48STejun Heo * from @chunk.
3059f645532STejun Heo *
306*93274f1dSDennis Zhou * Caller is required to call pcpu_post_unmap_tlb_flush() if not returning the
307*93274f1dSDennis Zhou * region back to vmalloc() which will lazily flush the tlb.
308*93274f1dSDennis Zhou *
3099f645532STejun Heo * CONTEXT:
3109f645532STejun Heo * pcpu_alloc_mutex.
3119f645532STejun Heo */
pcpu_depopulate_chunk(struct pcpu_chunk * chunk,int page_start,int page_end)312a93ace48STejun Heo static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk,
313a93ace48STejun Heo int page_start, int page_end)
3149f645532STejun Heo {
3159f645532STejun Heo struct page **pages;
3169f645532STejun Heo
3179f645532STejun Heo /*
3189f645532STejun Heo * If control reaches here, there must have been at least one
3199f645532STejun Heo * successful population attempt so the temp pages array must
3209f645532STejun Heo * be available now.
3219f645532STejun Heo */
3228a1df543STahsin Erdogan pages = pcpu_get_pages();
3239f645532STejun Heo BUG_ON(!pages);
3249f645532STejun Heo
3259f645532STejun Heo /* unmap and free */
3269f645532STejun Heo pcpu_pre_unmap_flush(chunk, page_start, page_end);
3279f645532STejun Heo
328a93ace48STejun Heo pcpu_unmap_pages(chunk, pages, page_start, page_end);
3299f645532STejun Heo
330a93ace48STejun Heo pcpu_free_pages(chunk, pages, page_start, page_end);
3319f645532STejun Heo }
3329f645532STejun Heo
pcpu_create_chunk(gfp_t gfp)333faf65ddeSRoman Gushchin static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp)
3349f645532STejun Heo {
3359f645532STejun Heo struct pcpu_chunk *chunk;
3369f645532STejun Heo struct vm_struct **vms;
3379f645532STejun Heo
338faf65ddeSRoman Gushchin chunk = pcpu_alloc_chunk(gfp);
3399f645532STejun Heo if (!chunk)
3409f645532STejun Heo return NULL;
3419f645532STejun Heo
3429f645532STejun Heo vms = pcpu_get_vm_areas(pcpu_group_offsets, pcpu_group_sizes,
343ec3f64fcSDavid Rientjes pcpu_nr_groups, pcpu_atom_size);
3449f645532STejun Heo if (!vms) {
3459f645532STejun Heo pcpu_free_chunk(chunk);
3469f645532STejun Heo return NULL;
3479f645532STejun Heo }
3489f645532STejun Heo
3499f645532STejun Heo chunk->data = vms;
3509f645532STejun Heo chunk->base_addr = vms[0]->addr - pcpu_group_offsets[0];
35130a5b536SDennis Zhou
35230a5b536SDennis Zhou pcpu_stats_chunk_alloc();
353df95e795SDennis Zhou trace_percpu_create_chunk(chunk->base_addr);
35430a5b536SDennis Zhou
3559f645532STejun Heo return chunk;
3569f645532STejun Heo }
3579f645532STejun Heo
pcpu_destroy_chunk(struct pcpu_chunk * chunk)3589f645532STejun Heo static void pcpu_destroy_chunk(struct pcpu_chunk *chunk)
3599f645532STejun Heo {
360e3efe3dbSDennis Zhou if (!chunk)
361e3efe3dbSDennis Zhou return;
362e3efe3dbSDennis Zhou
36330a5b536SDennis Zhou pcpu_stats_chunk_dealloc();
364df95e795SDennis Zhou trace_percpu_destroy_chunk(chunk->base_addr);
36530a5b536SDennis Zhou
366e3efe3dbSDennis Zhou if (chunk->data)
3679f645532STejun Heo pcpu_free_vm_areas(chunk->data, pcpu_nr_groups);
3689f645532STejun Heo pcpu_free_chunk(chunk);
3699f645532STejun Heo }
3709f645532STejun Heo
pcpu_addr_to_page(void * addr)3719f645532STejun Heo static struct page *pcpu_addr_to_page(void *addr)
3729f645532STejun Heo {
3739f645532STejun Heo return vmalloc_to_page(addr);
3749f645532STejun Heo }
3759f645532STejun Heo
pcpu_verify_alloc_info(const struct pcpu_alloc_info * ai)3769f645532STejun Heo static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai)
3779f645532STejun Heo {
3789f645532STejun Heo /* no extra restriction */
3799f645532STejun Heo return 0;
3809f645532STejun Heo }
381f1833241SRoman Gushchin
382f1833241SRoman Gushchin /**
383f1833241SRoman Gushchin * pcpu_should_reclaim_chunk - determine if a chunk should go into reclaim
384f1833241SRoman Gushchin * @chunk: chunk of interest
385f1833241SRoman Gushchin *
386f1833241SRoman Gushchin * This is the entry point for percpu reclaim. If a chunk qualifies, it is then
387f1833241SRoman Gushchin * isolated and managed in separate lists at the back of pcpu_slot: sidelined
388f1833241SRoman Gushchin * and to_depopulate respectively. The to_depopulate list holds chunks slated
389f1833241SRoman Gushchin * for depopulation. They no longer contribute to pcpu_nr_empty_pop_pages once
390f1833241SRoman Gushchin * they are on this list. Once depopulated, they are moved onto the sidelined
391f1833241SRoman Gushchin * list which enables them to be pulled back in for allocation if no other chunk
392f1833241SRoman Gushchin * can suffice the allocation.
393f1833241SRoman Gushchin */
pcpu_should_reclaim_chunk(struct pcpu_chunk * chunk)394f1833241SRoman Gushchin static bool pcpu_should_reclaim_chunk(struct pcpu_chunk *chunk)
395f1833241SRoman Gushchin {
396f1833241SRoman Gushchin /* do not reclaim either the first chunk or reserved chunk */
397f1833241SRoman Gushchin if (chunk == pcpu_first_chunk || chunk == pcpu_reserved_chunk)
398f1833241SRoman Gushchin return false;
399f1833241SRoman Gushchin
400f1833241SRoman Gushchin /*
401f1833241SRoman Gushchin * If it is isolated, it may be on the sidelined list so move it back to
402f1833241SRoman Gushchin * the to_depopulate list. If we hit at least 1/4 pages empty pages AND
403f1833241SRoman Gushchin * there is no system-wide shortage of empty pages aside from this
404f1833241SRoman Gushchin * chunk, move it to the to_depopulate list.
405f1833241SRoman Gushchin */
406f1833241SRoman Gushchin return ((chunk->isolated && chunk->nr_empty_pop_pages) ||
407faf65ddeSRoman Gushchin (pcpu_nr_empty_pop_pages >
408faf65ddeSRoman Gushchin (PCPU_EMPTY_POP_PAGES_HIGH + chunk->nr_empty_pop_pages) &&
409f1833241SRoman Gushchin chunk->nr_empty_pop_pages >= chunk->nr_pages / 4));
410f1833241SRoman Gushchin }
411