1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/mm/page_alloc.c
4 *
5 * Manages the free list, the system allocates free pages here.
6 * Note that kmalloc() lives in slab.c
7 *
8 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
9 * Swap reorganised 29.12.95, Stephen Tweedie
10 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
11 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
12 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
13 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000
14 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
15 * (lots of bits borrowed from Ingo Molnar & Andrew Morton)
16 */
17
18 #include <linux/stddef.h>
19 #include <linux/mm.h>
20 #include <linux/highmem.h>
21 #include <linux/interrupt.h>
22 #include <linux/jiffies.h>
23 #include <linux/compiler.h>
24 #include <linux/kernel.h>
25 #include <linux/kasan.h>
26 #include <linux/kmsan.h>
27 #include <linux/module.h>
28 #include <linux/suspend.h>
29 #include <linux/ratelimit.h>
30 #include <linux/oom.h>
31 #include <linux/topology.h>
32 #include <linux/sysctl.h>
33 #include <linux/cpu.h>
34 #include <linux/cpuset.h>
35 #include <linux/memory_hotplug.h>
36 #include <linux/nodemask.h>
37 #include <linux/vmstat.h>
38 #include <linux/fault-inject.h>
39 #include <linux/compaction.h>
40 #include <trace/events/kmem.h>
41 #include <trace/events/oom.h>
42 #include <linux/prefetch.h>
43 #include <linux/mm_inline.h>
44 #include <linux/mmu_notifier.h>
45 #include <linux/migrate.h>
46 #include <linux/sched/mm.h>
47 #include <linux/page_owner.h>
48 #include <linux/page_table_check.h>
49 #include <linux/memcontrol.h>
50 #include <linux/ftrace.h>
51 #include <linux/lockdep.h>
52 #include <linux/psi.h>
53 #include <linux/khugepaged.h>
54 #include <linux/delayacct.h>
55 #include <asm/div64.h>
56 #include "internal.h"
57 #include "shuffle.h"
58 #include "page_reporting.h"
59
60 /* Free Page Internal flags: for internal, non-pcp variants of free_pages(). */
61 typedef int __bitwise fpi_t;
62
63 /* No special request */
64 #define FPI_NONE ((__force fpi_t)0)
65
66 /*
67 * Skip free page reporting notification for the (possibly merged) page.
68 * This does not hinder free page reporting from grabbing the page,
69 * reporting it and marking it "reported" - it only skips notifying
70 * the free page reporting infrastructure about a newly freed page. For
71 * example, used when temporarily pulling a page from a freelist and
72 * putting it back unmodified.
73 */
74 #define FPI_SKIP_REPORT_NOTIFY ((__force fpi_t)BIT(0))
75
76 /*
77 * Place the (possibly merged) page to the tail of the freelist. Will ignore
78 * page shuffling (relevant code - e.g., memory onlining - is expected to
79 * shuffle the whole zone).
80 *
81 * Note: No code should rely on this flag for correctness - it's purely
82 * to allow for optimizations when handing back either fresh pages
83 * (memory onlining) or untouched pages (page isolation, free page
84 * reporting).
85 */
86 #define FPI_TO_TAIL ((__force fpi_t)BIT(1))
87
88 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
89 static DEFINE_MUTEX(pcp_batch_high_lock);
90 #define MIN_PERCPU_PAGELIST_HIGH_FRACTION (8)
91
92 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
93 /*
94 * On SMP, spin_trylock is sufficient protection.
95 * On PREEMPT_RT, spin_trylock is equivalent on both SMP and UP.
96 */
97 #define pcp_trylock_prepare(flags) do { } while (0)
98 #define pcp_trylock_finish(flag) do { } while (0)
99 #else
100
101 /* UP spin_trylock always succeeds so disable IRQs to prevent re-entrancy. */
102 #define pcp_trylock_prepare(flags) local_irq_save(flags)
103 #define pcp_trylock_finish(flags) local_irq_restore(flags)
104 #endif
105
106 /*
107 * Locking a pcp requires a PCP lookup followed by a spinlock. To avoid
108 * a migration causing the wrong PCP to be locked and remote memory being
109 * potentially allocated, pin the task to the CPU for the lookup+lock.
110 * preempt_disable is used on !RT because it is faster than migrate_disable.
111 * migrate_disable is used on RT because otherwise RT spinlock usage is
112 * interfered with and a high priority task cannot preempt the allocator.
113 */
114 #ifndef CONFIG_PREEMPT_RT
115 #define pcpu_task_pin() preempt_disable()
116 #define pcpu_task_unpin() preempt_enable()
117 #else
118 #define pcpu_task_pin() migrate_disable()
119 #define pcpu_task_unpin() migrate_enable()
120 #endif
121
122 /*
123 * Generic helper to lookup and a per-cpu variable with an embedded spinlock.
124 * Return value should be used with equivalent unlock helper.
125 */
126 #define pcpu_spin_lock(type, member, ptr) \
127 ({ \
128 type *_ret; \
129 pcpu_task_pin(); \
130 _ret = this_cpu_ptr(ptr); \
131 spin_lock(&_ret->member); \
132 _ret; \
133 })
134
135 #define pcpu_spin_trylock(type, member, ptr) \
136 ({ \
137 type *_ret; \
138 pcpu_task_pin(); \
139 _ret = this_cpu_ptr(ptr); \
140 if (!spin_trylock(&_ret->member)) { \
141 pcpu_task_unpin(); \
142 _ret = NULL; \
143 } \
144 _ret; \
145 })
146
147 #define pcpu_spin_unlock(member, ptr) \
148 ({ \
149 spin_unlock(&ptr->member); \
150 pcpu_task_unpin(); \
151 })
152
153 /* struct per_cpu_pages specific helpers. */
154 #define pcp_spin_lock(ptr) \
155 pcpu_spin_lock(struct per_cpu_pages, lock, ptr)
156
157 #define pcp_spin_trylock(ptr) \
158 pcpu_spin_trylock(struct per_cpu_pages, lock, ptr)
159
160 #define pcp_spin_unlock(ptr) \
161 pcpu_spin_unlock(lock, ptr)
162
163 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
164 DEFINE_PER_CPU(int, numa_node);
165 EXPORT_PER_CPU_SYMBOL(numa_node);
166 #endif
167
168 DEFINE_STATIC_KEY_TRUE(vm_numa_stat_key);
169
170 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
171 /*
172 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
173 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
174 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
175 * defined in <linux/topology.h>.
176 */
177 DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */
178 EXPORT_PER_CPU_SYMBOL(_numa_mem_);
179 #endif
180
181 static DEFINE_MUTEX(pcpu_drain_mutex);
182
183 #ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
184 volatile unsigned long latent_entropy __latent_entropy;
185 EXPORT_SYMBOL(latent_entropy);
186 #endif
187
188 /*
189 * Array of node states.
190 */
191 nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
192 [N_POSSIBLE] = NODE_MASK_ALL,
193 [N_ONLINE] = { { [0] = 1UL } },
194 #ifndef CONFIG_NUMA
195 [N_NORMAL_MEMORY] = { { [0] = 1UL } },
196 #ifdef CONFIG_HIGHMEM
197 [N_HIGH_MEMORY] = { { [0] = 1UL } },
198 #endif
199 [N_MEMORY] = { { [0] = 1UL } },
200 [N_CPU] = { { [0] = 1UL } },
201 #endif /* NUMA */
202 };
203 EXPORT_SYMBOL(node_states);
204
205 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
206
207 /*
208 * A cached value of the page's pageblock's migratetype, used when the page is
209 * put on a pcplist. Used to avoid the pageblock migratetype lookup when
210 * freeing from pcplists in most cases, at the cost of possibly becoming stale.
211 * Also the migratetype set in the page does not necessarily match the pcplist
212 * index, e.g. page might have MIGRATE_CMA set but be on a pcplist with any
213 * other index - this ensures that it will be put on the correct CMA freelist.
214 */
get_pcppage_migratetype(struct page * page)215 static inline int get_pcppage_migratetype(struct page *page)
216 {
217 return page->index;
218 }
219
set_pcppage_migratetype(struct page * page,int migratetype)220 static inline void set_pcppage_migratetype(struct page *page, int migratetype)
221 {
222 page->index = migratetype;
223 }
224
225 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
226 unsigned int pageblock_order __read_mostly;
227 #endif
228
229 static void __free_pages_ok(struct page *page, unsigned int order,
230 fpi_t fpi_flags);
231
232 /*
233 * results with 256, 32 in the lowmem_reserve sysctl:
234 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
235 * 1G machine -> (16M dma, 784M normal, 224M high)
236 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
237 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
238 * HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA
239 *
240 * TBD: should special case ZONE_DMA32 machines here - in those we normally
241 * don't need any ZONE_NORMAL reservation
242 */
243 static int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES] = {
244 #ifdef CONFIG_ZONE_DMA
245 [ZONE_DMA] = 256,
246 #endif
247 #ifdef CONFIG_ZONE_DMA32
248 [ZONE_DMA32] = 256,
249 #endif
250 [ZONE_NORMAL] = 32,
251 #ifdef CONFIG_HIGHMEM
252 [ZONE_HIGHMEM] = 0,
253 #endif
254 [ZONE_MOVABLE] = 0,
255 };
256
257 char * const zone_names[MAX_NR_ZONES] = {
258 #ifdef CONFIG_ZONE_DMA
259 "DMA",
260 #endif
261 #ifdef CONFIG_ZONE_DMA32
262 "DMA32",
263 #endif
264 "Normal",
265 #ifdef CONFIG_HIGHMEM
266 "HighMem",
267 #endif
268 "Movable",
269 #ifdef CONFIG_ZONE_DEVICE
270 "Device",
271 #endif
272 };
273
274 const char * const migratetype_names[MIGRATE_TYPES] = {
275 "Unmovable",
276 "Movable",
277 "Reclaimable",
278 "HighAtomic",
279 #ifdef CONFIG_CMA
280 "CMA",
281 #endif
282 #ifdef CONFIG_MEMORY_ISOLATION
283 "Isolate",
284 #endif
285 };
286
287 int min_free_kbytes = 1024;
288 int user_min_free_kbytes = -1;
289 static int watermark_boost_factor __read_mostly = 15000;
290 static int watermark_scale_factor = 10;
291
292 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
293 int movable_zone;
294 EXPORT_SYMBOL(movable_zone);
295
296 #if MAX_NUMNODES > 1
297 unsigned int nr_node_ids __read_mostly = MAX_NUMNODES;
298 unsigned int nr_online_nodes __read_mostly = 1;
299 EXPORT_SYMBOL(nr_node_ids);
300 EXPORT_SYMBOL(nr_online_nodes);
301 #endif
302
303 static bool page_contains_unaccepted(struct page *page, unsigned int order);
304 static void accept_page(struct page *page, unsigned int order);
305 static bool cond_accept_memory(struct zone *zone, unsigned int order);
306 static inline bool has_unaccepted_memory(void);
307 static bool __free_unaccepted(struct page *page);
308
309 int page_group_by_mobility_disabled __read_mostly;
310
311 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
312 /*
313 * During boot we initialize deferred pages on-demand, as needed, but once
314 * page_alloc_init_late() has finished, the deferred pages are all initialized,
315 * and we can permanently disable that path.
316 */
317 DEFINE_STATIC_KEY_TRUE(deferred_pages);
318
deferred_pages_enabled(void)319 static inline bool deferred_pages_enabled(void)
320 {
321 return static_branch_unlikely(&deferred_pages);
322 }
323
324 /*
325 * deferred_grow_zone() is __init, but it is called from
326 * get_page_from_freelist() during early boot until deferred_pages permanently
327 * disables this call. This is why we have refdata wrapper to avoid warning,
328 * and to ensure that the function body gets unloaded.
329 */
330 static bool __ref
_deferred_grow_zone(struct zone * zone,unsigned int order)331 _deferred_grow_zone(struct zone *zone, unsigned int order)
332 {
333 return deferred_grow_zone(zone, order);
334 }
335 #else
deferred_pages_enabled(void)336 static inline bool deferred_pages_enabled(void)
337 {
338 return false;
339 }
340 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
341
342 /* Return a pointer to the bitmap storing bits affecting a block of pages */
get_pageblock_bitmap(const struct page * page,unsigned long pfn)343 static inline unsigned long *get_pageblock_bitmap(const struct page *page,
344 unsigned long pfn)
345 {
346 #ifdef CONFIG_SPARSEMEM
347 return section_to_usemap(__pfn_to_section(pfn));
348 #else
349 return page_zone(page)->pageblock_flags;
350 #endif /* CONFIG_SPARSEMEM */
351 }
352
pfn_to_bitidx(const struct page * page,unsigned long pfn)353 static inline int pfn_to_bitidx(const struct page *page, unsigned long pfn)
354 {
355 #ifdef CONFIG_SPARSEMEM
356 pfn &= (PAGES_PER_SECTION-1);
357 #else
358 pfn = pfn - pageblock_start_pfn(page_zone(page)->zone_start_pfn);
359 #endif /* CONFIG_SPARSEMEM */
360 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
361 }
362
363 /**
364 * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages
365 * @page: The page within the block of interest
366 * @pfn: The target page frame number
367 * @mask: mask of bits that the caller is interested in
368 *
369 * Return: pageblock_bits flags
370 */
get_pfnblock_flags_mask(const struct page * page,unsigned long pfn,unsigned long mask)371 unsigned long get_pfnblock_flags_mask(const struct page *page,
372 unsigned long pfn, unsigned long mask)
373 {
374 unsigned long *bitmap;
375 unsigned long bitidx, word_bitidx;
376 unsigned long word;
377
378 bitmap = get_pageblock_bitmap(page, pfn);
379 bitidx = pfn_to_bitidx(page, pfn);
380 word_bitidx = bitidx / BITS_PER_LONG;
381 bitidx &= (BITS_PER_LONG-1);
382 /*
383 * This races, without locks, with set_pfnblock_flags_mask(). Ensure
384 * a consistent read of the memory array, so that results, even though
385 * racy, are not corrupted.
386 */
387 word = READ_ONCE(bitmap[word_bitidx]);
388 return (word >> bitidx) & mask;
389 }
390
get_pfnblock_migratetype(const struct page * page,unsigned long pfn)391 static __always_inline int get_pfnblock_migratetype(const struct page *page,
392 unsigned long pfn)
393 {
394 return get_pfnblock_flags_mask(page, pfn, MIGRATETYPE_MASK);
395 }
396
397 /**
398 * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
399 * @page: The page within the block of interest
400 * @flags: The flags to set
401 * @pfn: The target page frame number
402 * @mask: mask of bits that the caller is interested in
403 */
set_pfnblock_flags_mask(struct page * page,unsigned long flags,unsigned long pfn,unsigned long mask)404 void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
405 unsigned long pfn,
406 unsigned long mask)
407 {
408 unsigned long *bitmap;
409 unsigned long bitidx, word_bitidx;
410 unsigned long word;
411
412 BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
413 BUILD_BUG_ON(MIGRATE_TYPES > (1 << PB_migratetype_bits));
414
415 bitmap = get_pageblock_bitmap(page, pfn);
416 bitidx = pfn_to_bitidx(page, pfn);
417 word_bitidx = bitidx / BITS_PER_LONG;
418 bitidx &= (BITS_PER_LONG-1);
419
420 VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page);
421
422 mask <<= bitidx;
423 flags <<= bitidx;
424
425 word = READ_ONCE(bitmap[word_bitidx]);
426 do {
427 } while (!try_cmpxchg(&bitmap[word_bitidx], &word, (word & ~mask) | flags));
428 }
429
set_pageblock_migratetype(struct page * page,int migratetype)430 void set_pageblock_migratetype(struct page *page, int migratetype)
431 {
432 if (unlikely(page_group_by_mobility_disabled &&
433 migratetype < MIGRATE_PCPTYPES))
434 migratetype = MIGRATE_UNMOVABLE;
435
436 set_pfnblock_flags_mask(page, (unsigned long)migratetype,
437 page_to_pfn(page), MIGRATETYPE_MASK);
438 }
439
440 #ifdef CONFIG_DEBUG_VM
page_outside_zone_boundaries(struct zone * zone,struct page * page)441 static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
442 {
443 int ret;
444 unsigned seq;
445 unsigned long pfn = page_to_pfn(page);
446 unsigned long sp, start_pfn;
447
448 do {
449 seq = zone_span_seqbegin(zone);
450 start_pfn = zone->zone_start_pfn;
451 sp = zone->spanned_pages;
452 ret = !zone_spans_pfn(zone, pfn);
453 } while (zone_span_seqretry(zone, seq));
454
455 if (ret)
456 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n",
457 pfn, zone_to_nid(zone), zone->name,
458 start_pfn, start_pfn + sp);
459
460 return ret;
461 }
462
463 /*
464 * Temporary debugging check for pages not lying within a given zone.
465 */
bad_range(struct zone * zone,struct page * page)466 static int __maybe_unused bad_range(struct zone *zone, struct page *page)
467 {
468 if (page_outside_zone_boundaries(zone, page))
469 return 1;
470 if (zone != page_zone(page))
471 return 1;
472
473 return 0;
474 }
475 #else
bad_range(struct zone * zone,struct page * page)476 static inline int __maybe_unused bad_range(struct zone *zone, struct page *page)
477 {
478 return 0;
479 }
480 #endif
481
bad_page(struct page * page,const char * reason)482 static void bad_page(struct page *page, const char *reason)
483 {
484 static unsigned long resume;
485 static unsigned long nr_shown;
486 static unsigned long nr_unshown;
487
488 /*
489 * Allow a burst of 60 reports, then keep quiet for that minute;
490 * or allow a steady drip of one report per second.
491 */
492 if (nr_shown == 60) {
493 if (time_before(jiffies, resume)) {
494 nr_unshown++;
495 goto out;
496 }
497 if (nr_unshown) {
498 pr_alert(
499 "BUG: Bad page state: %lu messages suppressed\n",
500 nr_unshown);
501 nr_unshown = 0;
502 }
503 nr_shown = 0;
504 }
505 if (nr_shown++ == 0)
506 resume = jiffies + 60 * HZ;
507
508 pr_alert("BUG: Bad page state in process %s pfn:%05lx\n",
509 current->comm, page_to_pfn(page));
510 dump_page(page, reason);
511
512 print_modules();
513 dump_stack();
514 out:
515 /* Leave bad fields for debug, except PageBuddy could make trouble */
516 page_mapcount_reset(page); /* remove PageBuddy */
517 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
518 }
519
order_to_pindex(int migratetype,int order)520 static inline unsigned int order_to_pindex(int migratetype, int order)
521 {
522 bool __maybe_unused movable;
523
524 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
525 if (order > PAGE_ALLOC_COSTLY_ORDER) {
526 VM_BUG_ON(order != pageblock_order);
527
528 movable = migratetype == MIGRATE_MOVABLE;
529
530 return NR_LOWORDER_PCP_LISTS + movable;
531 }
532 #else
533 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER);
534 #endif
535
536 return (MIGRATE_PCPTYPES * order) + migratetype;
537 }
538
pindex_to_order(unsigned int pindex)539 static inline int pindex_to_order(unsigned int pindex)
540 {
541 int order = pindex / MIGRATE_PCPTYPES;
542
543 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
544 if (pindex >= NR_LOWORDER_PCP_LISTS)
545 order = pageblock_order;
546 #else
547 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER);
548 #endif
549
550 return order;
551 }
552
pcp_allowed_order(unsigned int order)553 static inline bool pcp_allowed_order(unsigned int order)
554 {
555 if (order <= PAGE_ALLOC_COSTLY_ORDER)
556 return true;
557 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
558 if (order == pageblock_order)
559 return true;
560 #endif
561 return false;
562 }
563
free_the_page(struct page * page,unsigned int order)564 static inline void free_the_page(struct page *page, unsigned int order)
565 {
566 if (pcp_allowed_order(order)) /* Via pcp? */
567 free_unref_page(page, order);
568 else
569 __free_pages_ok(page, order, FPI_NONE);
570 }
571
572 /*
573 * Higher-order pages are called "compound pages". They are structured thusly:
574 *
575 * The first PAGE_SIZE page is called the "head page" and have PG_head set.
576 *
577 * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded
578 * in bit 0 of page->compound_head. The rest of bits is pointer to head page.
579 *
580 * The first tail page's ->compound_order holds the order of allocation.
581 * This usage means that zero-order pages may not be compound.
582 */
583
prep_compound_page(struct page * page,unsigned int order)584 void prep_compound_page(struct page *page, unsigned int order)
585 {
586 int i;
587 int nr_pages = 1 << order;
588
589 __SetPageHead(page);
590 for (i = 1; i < nr_pages; i++)
591 prep_compound_tail(page, i);
592
593 prep_compound_head(page, order);
594 }
595
destroy_large_folio(struct folio * folio)596 void destroy_large_folio(struct folio *folio)
597 {
598 if (folio_test_hugetlb(folio)) {
599 free_huge_folio(folio);
600 return;
601 }
602
603 folio_unqueue_deferred_split(folio);
604 mem_cgroup_uncharge(folio);
605 free_the_page(&folio->page, folio_order(folio));
606 }
607
set_buddy_order(struct page * page,unsigned int order)608 static inline void set_buddy_order(struct page *page, unsigned int order)
609 {
610 set_page_private(page, order);
611 __SetPageBuddy(page);
612 }
613
614 #ifdef CONFIG_COMPACTION
task_capc(struct zone * zone)615 static inline struct capture_control *task_capc(struct zone *zone)
616 {
617 struct capture_control *capc = current->capture_control;
618
619 return unlikely(capc) &&
620 !(current->flags & PF_KTHREAD) &&
621 !capc->page &&
622 capc->cc->zone == zone ? capc : NULL;
623 }
624
625 static inline bool
compaction_capture(struct capture_control * capc,struct page * page,int order,int migratetype)626 compaction_capture(struct capture_control *capc, struct page *page,
627 int order, int migratetype)
628 {
629 if (!capc || order != capc->cc->order)
630 return false;
631
632 /* Do not accidentally pollute CMA or isolated regions*/
633 if (is_migrate_cma(migratetype) ||
634 is_migrate_isolate(migratetype))
635 return false;
636
637 /*
638 * Do not let lower order allocations pollute a movable pageblock.
639 * This might let an unmovable request use a reclaimable pageblock
640 * and vice-versa but no more than normal fallback logic which can
641 * have trouble finding a high-order free page.
642 */
643 if (order < pageblock_order && migratetype == MIGRATE_MOVABLE)
644 return false;
645
646 capc->page = page;
647 return true;
648 }
649
650 #else
task_capc(struct zone * zone)651 static inline struct capture_control *task_capc(struct zone *zone)
652 {
653 return NULL;
654 }
655
656 static inline bool
compaction_capture(struct capture_control * capc,struct page * page,int order,int migratetype)657 compaction_capture(struct capture_control *capc, struct page *page,
658 int order, int migratetype)
659 {
660 return false;
661 }
662 #endif /* CONFIG_COMPACTION */
663
664 /* Used for pages not on another list */
add_to_free_list(struct page * page,struct zone * zone,unsigned int order,int migratetype)665 static inline void add_to_free_list(struct page *page, struct zone *zone,
666 unsigned int order, int migratetype)
667 {
668 struct free_area *area = &zone->free_area[order];
669
670 list_add(&page->buddy_list, &area->free_list[migratetype]);
671 area->nr_free++;
672 }
673
674 /* Used for pages not on another list */
add_to_free_list_tail(struct page * page,struct zone * zone,unsigned int order,int migratetype)675 static inline void add_to_free_list_tail(struct page *page, struct zone *zone,
676 unsigned int order, int migratetype)
677 {
678 struct free_area *area = &zone->free_area[order];
679
680 list_add_tail(&page->buddy_list, &area->free_list[migratetype]);
681 area->nr_free++;
682 }
683
684 /*
685 * Used for pages which are on another list. Move the pages to the tail
686 * of the list - so the moved pages won't immediately be considered for
687 * allocation again (e.g., optimization for memory onlining).
688 */
move_to_free_list(struct page * page,struct zone * zone,unsigned int order,int migratetype)689 static inline void move_to_free_list(struct page *page, struct zone *zone,
690 unsigned int order, int migratetype)
691 {
692 struct free_area *area = &zone->free_area[order];
693
694 list_move_tail(&page->buddy_list, &area->free_list[migratetype]);
695 }
696
del_page_from_free_list(struct page * page,struct zone * zone,unsigned int order)697 static inline void del_page_from_free_list(struct page *page, struct zone *zone,
698 unsigned int order)
699 {
700 /* clear reported state and update reported page count */
701 if (page_reported(page))
702 __ClearPageReported(page);
703
704 list_del(&page->buddy_list);
705 __ClearPageBuddy(page);
706 set_page_private(page, 0);
707 zone->free_area[order].nr_free--;
708 }
709
get_page_from_free_area(struct free_area * area,int migratetype)710 static inline struct page *get_page_from_free_area(struct free_area *area,
711 int migratetype)
712 {
713 return list_first_entry_or_null(&area->free_list[migratetype],
714 struct page, buddy_list);
715 }
716
717 /*
718 * If this is not the largest possible page, check if the buddy
719 * of the next-highest order is free. If it is, it's possible
720 * that pages are being freed that will coalesce soon. In case,
721 * that is happening, add the free page to the tail of the list
722 * so it's less likely to be used soon and more likely to be merged
723 * as a higher order page
724 */
725 static inline bool
buddy_merge_likely(unsigned long pfn,unsigned long buddy_pfn,struct page * page,unsigned int order)726 buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn,
727 struct page *page, unsigned int order)
728 {
729 unsigned long higher_page_pfn;
730 struct page *higher_page;
731
732 if (order >= MAX_ORDER - 1)
733 return false;
734
735 higher_page_pfn = buddy_pfn & pfn;
736 higher_page = page + (higher_page_pfn - pfn);
737
738 return find_buddy_page_pfn(higher_page, higher_page_pfn, order + 1,
739 NULL) != NULL;
740 }
741
742 /*
743 * Freeing function for a buddy system allocator.
744 *
745 * The concept of a buddy system is to maintain direct-mapped table
746 * (containing bit values) for memory blocks of various "orders".
747 * The bottom level table contains the map for the smallest allocatable
748 * units of memory (here, pages), and each level above it describes
749 * pairs of units from the levels below, hence, "buddies".
750 * At a high level, all that happens here is marking the table entry
751 * at the bottom level available, and propagating the changes upward
752 * as necessary, plus some accounting needed to play nicely with other
753 * parts of the VM system.
754 * At each level, we keep a list of pages, which are heads of continuous
755 * free pages of length of (1 << order) and marked with PageBuddy.
756 * Page's order is recorded in page_private(page) field.
757 * So when we are allocating or freeing one, we can derive the state of the
758 * other. That is, if we allocate a small block, and both were
759 * free, the remainder of the region must be split into blocks.
760 * If a block is freed, and its buddy is also free, then this
761 * triggers coalescing into a block of larger size.
762 *
763 * -- nyc
764 */
765
__free_one_page(struct page * page,unsigned long pfn,struct zone * zone,unsigned int order,int migratetype,fpi_t fpi_flags)766 static inline void __free_one_page(struct page *page,
767 unsigned long pfn,
768 struct zone *zone, unsigned int order,
769 int migratetype, fpi_t fpi_flags)
770 {
771 struct capture_control *capc = task_capc(zone);
772 unsigned long buddy_pfn = 0;
773 unsigned long combined_pfn;
774 struct page *buddy;
775 bool to_tail;
776
777 VM_BUG_ON(!zone_is_initialized(zone));
778 VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
779
780 VM_BUG_ON(migratetype == -1);
781 if (likely(!is_migrate_isolate(migratetype)))
782 __mod_zone_freepage_state(zone, 1 << order, migratetype);
783
784 VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page);
785 VM_BUG_ON_PAGE(bad_range(zone, page), page);
786
787 while (order < MAX_ORDER) {
788 if (compaction_capture(capc, page, order, migratetype)) {
789 __mod_zone_freepage_state(zone, -(1 << order),
790 migratetype);
791 return;
792 }
793
794 buddy = find_buddy_page_pfn(page, pfn, order, &buddy_pfn);
795 if (!buddy)
796 goto done_merging;
797
798 if (unlikely(order >= pageblock_order)) {
799 /*
800 * We want to prevent merge between freepages on pageblock
801 * without fallbacks and normal pageblock. Without this,
802 * pageblock isolation could cause incorrect freepage or CMA
803 * accounting or HIGHATOMIC accounting.
804 */
805 int buddy_mt = get_pfnblock_migratetype(buddy, buddy_pfn);
806
807 if (migratetype != buddy_mt
808 && (!migratetype_is_mergeable(migratetype) ||
809 !migratetype_is_mergeable(buddy_mt)))
810 goto done_merging;
811 }
812
813 /*
814 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
815 * merge with it and move up one order.
816 */
817 if (page_is_guard(buddy))
818 clear_page_guard(zone, buddy, order, migratetype);
819 else
820 del_page_from_free_list(buddy, zone, order);
821 combined_pfn = buddy_pfn & pfn;
822 page = page + (combined_pfn - pfn);
823 pfn = combined_pfn;
824 order++;
825 }
826
827 done_merging:
828 set_buddy_order(page, order);
829
830 if (fpi_flags & FPI_TO_TAIL)
831 to_tail = true;
832 else if (is_shuffle_order(order))
833 to_tail = shuffle_pick_tail();
834 else
835 to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order);
836
837 if (to_tail)
838 add_to_free_list_tail(page, zone, order, migratetype);
839 else
840 add_to_free_list(page, zone, order, migratetype);
841
842 /* Notify page reporting subsystem of freed page */
843 if (!(fpi_flags & FPI_SKIP_REPORT_NOTIFY))
844 page_reporting_notify_free(order);
845 }
846
847 /**
848 * split_free_page() -- split a free page at split_pfn_offset
849 * @free_page: the original free page
850 * @order: the order of the page
851 * @split_pfn_offset: split offset within the page
852 *
853 * Return -ENOENT if the free page is changed, otherwise 0
854 *
855 * It is used when the free page crosses two pageblocks with different migratetypes
856 * at split_pfn_offset within the page. The split free page will be put into
857 * separate migratetype lists afterwards. Otherwise, the function achieves
858 * nothing.
859 */
split_free_page(struct page * free_page,unsigned int order,unsigned long split_pfn_offset)860 int split_free_page(struct page *free_page,
861 unsigned int order, unsigned long split_pfn_offset)
862 {
863 struct zone *zone = page_zone(free_page);
864 unsigned long free_page_pfn = page_to_pfn(free_page);
865 unsigned long pfn;
866 unsigned long flags;
867 int free_page_order;
868 int mt;
869 int ret = 0;
870
871 if (split_pfn_offset == 0)
872 return ret;
873
874 spin_lock_irqsave(&zone->lock, flags);
875
876 if (!PageBuddy(free_page) || buddy_order(free_page) != order) {
877 ret = -ENOENT;
878 goto out;
879 }
880
881 mt = get_pfnblock_migratetype(free_page, free_page_pfn);
882 if (likely(!is_migrate_isolate(mt)))
883 __mod_zone_freepage_state(zone, -(1UL << order), mt);
884
885 del_page_from_free_list(free_page, zone, order);
886 for (pfn = free_page_pfn;
887 pfn < free_page_pfn + (1UL << order);) {
888 int mt = get_pfnblock_migratetype(pfn_to_page(pfn), pfn);
889
890 free_page_order = min_t(unsigned int,
891 pfn ? __ffs(pfn) : order,
892 __fls(split_pfn_offset));
893 __free_one_page(pfn_to_page(pfn), pfn, zone, free_page_order,
894 mt, FPI_NONE);
895 pfn += 1UL << free_page_order;
896 split_pfn_offset -= (1UL << free_page_order);
897 /* we have done the first part, now switch to second part */
898 if (split_pfn_offset == 0)
899 split_pfn_offset = (1UL << order) - (pfn - free_page_pfn);
900 }
901 out:
902 spin_unlock_irqrestore(&zone->lock, flags);
903 return ret;
904 }
905 /*
906 * A bad page could be due to a number of fields. Instead of multiple branches,
907 * try and check multiple fields with one check. The caller must do a detailed
908 * check if necessary.
909 */
page_expected_state(struct page * page,unsigned long check_flags)910 static inline bool page_expected_state(struct page *page,
911 unsigned long check_flags)
912 {
913 if (unlikely(atomic_read(&page->_mapcount) != -1))
914 return false;
915
916 if (unlikely((unsigned long)page->mapping |
917 page_ref_count(page) |
918 #ifdef CONFIG_MEMCG
919 page->memcg_data |
920 #endif
921 (page->flags & check_flags)))
922 return false;
923
924 return true;
925 }
926
page_bad_reason(struct page * page,unsigned long flags)927 static const char *page_bad_reason(struct page *page, unsigned long flags)
928 {
929 const char *bad_reason = NULL;
930
931 if (unlikely(atomic_read(&page->_mapcount) != -1))
932 bad_reason = "nonzero mapcount";
933 if (unlikely(page->mapping != NULL))
934 bad_reason = "non-NULL mapping";
935 if (unlikely(page_ref_count(page) != 0))
936 bad_reason = "nonzero _refcount";
937 if (unlikely(page->flags & flags)) {
938 if (flags == PAGE_FLAGS_CHECK_AT_PREP)
939 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag(s) set";
940 else
941 bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
942 }
943 #ifdef CONFIG_MEMCG
944 if (unlikely(page->memcg_data))
945 bad_reason = "page still charged to cgroup";
946 #endif
947 return bad_reason;
948 }
949
free_page_is_bad_report(struct page * page)950 static void free_page_is_bad_report(struct page *page)
951 {
952 bad_page(page,
953 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_FREE));
954 }
955
free_page_is_bad(struct page * page)956 static inline bool free_page_is_bad(struct page *page)
957 {
958 if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE)))
959 return false;
960
961 /* Something has gone sideways, find it */
962 free_page_is_bad_report(page);
963 return true;
964 }
965
is_check_pages_enabled(void)966 static inline bool is_check_pages_enabled(void)
967 {
968 return static_branch_unlikely(&check_pages_enabled);
969 }
970
free_tail_page_prepare(struct page * head_page,struct page * page)971 static int free_tail_page_prepare(struct page *head_page, struct page *page)
972 {
973 struct folio *folio = (struct folio *)head_page;
974 int ret = 1;
975
976 /*
977 * We rely page->lru.next never has bit 0 set, unless the page
978 * is PageTail(). Let's make sure that's true even for poisoned ->lru.
979 */
980 BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1);
981
982 if (!is_check_pages_enabled()) {
983 ret = 0;
984 goto out;
985 }
986 switch (page - head_page) {
987 case 1:
988 /* the first tail page: these may be in place of ->mapping */
989 if (unlikely(folio_entire_mapcount(folio))) {
990 bad_page(page, "nonzero entire_mapcount");
991 goto out;
992 }
993 if (unlikely(atomic_read(&folio->_nr_pages_mapped))) {
994 bad_page(page, "nonzero nr_pages_mapped");
995 goto out;
996 }
997 if (unlikely(atomic_read(&folio->_pincount))) {
998 bad_page(page, "nonzero pincount");
999 goto out;
1000 }
1001 break;
1002 case 2:
1003 /* the second tail page: deferred_list overlaps ->mapping */
1004 if (unlikely(!list_empty(&folio->_deferred_list))) {
1005 bad_page(page, "on deferred list");
1006 goto out;
1007 }
1008 break;
1009 default:
1010 if (page->mapping != TAIL_MAPPING) {
1011 bad_page(page, "corrupted mapping in tail page");
1012 goto out;
1013 }
1014 break;
1015 }
1016 if (unlikely(!PageTail(page))) {
1017 bad_page(page, "PageTail not set");
1018 goto out;
1019 }
1020 if (unlikely(compound_head(page) != head_page)) {
1021 bad_page(page, "compound_head not consistent");
1022 goto out;
1023 }
1024 ret = 0;
1025 out:
1026 page->mapping = NULL;
1027 clear_compound_head(page);
1028 return ret;
1029 }
1030
1031 /*
1032 * Skip KASAN memory poisoning when either:
1033 *
1034 * 1. For generic KASAN: deferred memory initialization has not yet completed.
1035 * Tag-based KASAN modes skip pages freed via deferred memory initialization
1036 * using page tags instead (see below).
1037 * 2. For tag-based KASAN modes: the page has a match-all KASAN tag, indicating
1038 * that error detection is disabled for accesses via the page address.
1039 *
1040 * Pages will have match-all tags in the following circumstances:
1041 *
1042 * 1. Pages are being initialized for the first time, including during deferred
1043 * memory init; see the call to page_kasan_tag_reset in __init_single_page.
1044 * 2. The allocation was not unpoisoned due to __GFP_SKIP_KASAN, with the
1045 * exception of pages unpoisoned by kasan_unpoison_vmalloc.
1046 * 3. The allocation was excluded from being checked due to sampling,
1047 * see the call to kasan_unpoison_pages.
1048 *
1049 * Poisoning pages during deferred memory init will greatly lengthen the
1050 * process and cause problem in large memory systems as the deferred pages
1051 * initialization is done with interrupt disabled.
1052 *
1053 * Assuming that there will be no reference to those newly initialized
1054 * pages before they are ever allocated, this should have no effect on
1055 * KASAN memory tracking as the poison will be properly inserted at page
1056 * allocation time. The only corner case is when pages are allocated by
1057 * on-demand allocation and then freed again before the deferred pages
1058 * initialization is done, but this is not likely to happen.
1059 */
should_skip_kasan_poison(struct page * page,fpi_t fpi_flags)1060 static inline bool should_skip_kasan_poison(struct page *page, fpi_t fpi_flags)
1061 {
1062 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
1063 return deferred_pages_enabled();
1064
1065 return page_kasan_tag(page) == 0xff;
1066 }
1067
kernel_init_pages(struct page * page,int numpages)1068 static void kernel_init_pages(struct page *page, int numpages)
1069 {
1070 int i;
1071
1072 /* s390's use of memset() could override KASAN redzones. */
1073 kasan_disable_current();
1074 for (i = 0; i < numpages; i++)
1075 clear_highpage_kasan_tagged(page + i);
1076 kasan_enable_current();
1077 }
1078
free_pages_prepare(struct page * page,unsigned int order,fpi_t fpi_flags)1079 static __always_inline bool free_pages_prepare(struct page *page,
1080 unsigned int order, fpi_t fpi_flags)
1081 {
1082 int bad = 0;
1083 bool skip_kasan_poison = should_skip_kasan_poison(page, fpi_flags);
1084 bool init = want_init_on_free();
1085
1086 VM_BUG_ON_PAGE(PageTail(page), page);
1087
1088 trace_mm_page_free(page, order);
1089 kmsan_free_page(page, order);
1090
1091 if (unlikely(PageHWPoison(page)) && !order) {
1092 /*
1093 * Do not let hwpoison pages hit pcplists/buddy
1094 * Untie memcg state and reset page's owner
1095 */
1096 if (memcg_kmem_online() && PageMemcgKmem(page))
1097 __memcg_kmem_uncharge_page(page, order);
1098 reset_page_owner(page, order);
1099 page_table_check_free(page, order);
1100 return false;
1101 }
1102
1103 /*
1104 * Check tail pages before head page information is cleared to
1105 * avoid checking PageCompound for order-0 pages.
1106 */
1107 if (unlikely(order)) {
1108 bool compound = PageCompound(page);
1109 int i;
1110
1111 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
1112
1113 if (compound)
1114 page[1].flags &= ~PAGE_FLAGS_SECOND;
1115 for (i = 1; i < (1 << order); i++) {
1116 if (compound)
1117 bad += free_tail_page_prepare(page, page + i);
1118 if (is_check_pages_enabled()) {
1119 if (free_page_is_bad(page + i)) {
1120 bad++;
1121 continue;
1122 }
1123 }
1124 (page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1125 }
1126 }
1127 if (PageMappingFlags(page))
1128 page->mapping = NULL;
1129 if (memcg_kmem_online() && PageMemcgKmem(page))
1130 __memcg_kmem_uncharge_page(page, order);
1131 if (is_check_pages_enabled()) {
1132 if (free_page_is_bad(page))
1133 bad++;
1134 if (bad)
1135 return false;
1136 }
1137
1138 page_cpupid_reset_last(page);
1139 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1140 reset_page_owner(page, order);
1141 page_table_check_free(page, order);
1142
1143 if (!PageHighMem(page)) {
1144 debug_check_no_locks_freed(page_address(page),
1145 PAGE_SIZE << order);
1146 debug_check_no_obj_freed(page_address(page),
1147 PAGE_SIZE << order);
1148 }
1149
1150 kernel_poison_pages(page, 1 << order);
1151
1152 /*
1153 * As memory initialization might be integrated into KASAN,
1154 * KASAN poisoning and memory initialization code must be
1155 * kept together to avoid discrepancies in behavior.
1156 *
1157 * With hardware tag-based KASAN, memory tags must be set before the
1158 * page becomes unavailable via debug_pagealloc or arch_free_page.
1159 */
1160 if (!skip_kasan_poison) {
1161 kasan_poison_pages(page, order, init);
1162
1163 /* Memory is already initialized if KASAN did it internally. */
1164 if (kasan_has_integrated_init())
1165 init = false;
1166 }
1167 if (init)
1168 kernel_init_pages(page, 1 << order);
1169
1170 /*
1171 * arch_free_page() can make the page's contents inaccessible. s390
1172 * does this. So nothing which can access the page's contents should
1173 * happen after this.
1174 */
1175 arch_free_page(page, order);
1176
1177 debug_pagealloc_unmap_pages(page, 1 << order);
1178
1179 return true;
1180 }
1181
1182 /*
1183 * Frees a number of pages from the PCP lists
1184 * Assumes all pages on list are in same zone.
1185 * count is the number of pages to free.
1186 */
free_pcppages_bulk(struct zone * zone,int count,struct per_cpu_pages * pcp,int pindex)1187 static void free_pcppages_bulk(struct zone *zone, int count,
1188 struct per_cpu_pages *pcp,
1189 int pindex)
1190 {
1191 unsigned long flags;
1192 unsigned int order;
1193 bool isolated_pageblocks;
1194 struct page *page;
1195
1196 /*
1197 * Ensure proper count is passed which otherwise would stuck in the
1198 * below while (list_empty(list)) loop.
1199 */
1200 count = min(pcp->count, count);
1201
1202 /* Ensure requested pindex is drained first. */
1203 pindex = pindex - 1;
1204
1205 spin_lock_irqsave(&zone->lock, flags);
1206 isolated_pageblocks = has_isolate_pageblock(zone);
1207
1208 while (count > 0) {
1209 struct list_head *list;
1210 int nr_pages;
1211
1212 /* Remove pages from lists in a round-robin fashion. */
1213 do {
1214 if (++pindex > NR_PCP_LISTS - 1)
1215 pindex = 0;
1216 list = &pcp->lists[pindex];
1217 } while (list_empty(list));
1218
1219 order = pindex_to_order(pindex);
1220 nr_pages = 1 << order;
1221 do {
1222 int mt;
1223
1224 page = list_last_entry(list, struct page, pcp_list);
1225 mt = get_pcppage_migratetype(page);
1226
1227 /* must delete to avoid corrupting pcp list */
1228 list_del(&page->pcp_list);
1229 count -= nr_pages;
1230 pcp->count -= nr_pages;
1231
1232 /* MIGRATE_ISOLATE page should not go to pcplists */
1233 VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
1234 /* Pageblock could have been isolated meanwhile */
1235 if (unlikely(isolated_pageblocks))
1236 mt = get_pageblock_migratetype(page);
1237
1238 __free_one_page(page, page_to_pfn(page), zone, order, mt, FPI_NONE);
1239 trace_mm_page_pcpu_drain(page, order, mt);
1240 } while (count > 0 && !list_empty(list));
1241 }
1242
1243 spin_unlock_irqrestore(&zone->lock, flags);
1244 }
1245
free_one_page(struct zone * zone,struct page * page,unsigned long pfn,unsigned int order,int migratetype,fpi_t fpi_flags)1246 static void free_one_page(struct zone *zone,
1247 struct page *page, unsigned long pfn,
1248 unsigned int order,
1249 int migratetype, fpi_t fpi_flags)
1250 {
1251 unsigned long flags;
1252
1253 spin_lock_irqsave(&zone->lock, flags);
1254 if (unlikely(has_isolate_pageblock(zone) ||
1255 is_migrate_isolate(migratetype))) {
1256 migratetype = get_pfnblock_migratetype(page, pfn);
1257 }
1258 __free_one_page(page, pfn, zone, order, migratetype, fpi_flags);
1259 spin_unlock_irqrestore(&zone->lock, flags);
1260 }
1261
__free_pages_ok(struct page * page,unsigned int order,fpi_t fpi_flags)1262 static void __free_pages_ok(struct page *page, unsigned int order,
1263 fpi_t fpi_flags)
1264 {
1265 unsigned long flags;
1266 int migratetype;
1267 unsigned long pfn = page_to_pfn(page);
1268 struct zone *zone = page_zone(page);
1269
1270 if (!free_pages_prepare(page, order, fpi_flags))
1271 return;
1272
1273 /*
1274 * Calling get_pfnblock_migratetype() without spin_lock_irqsave() here
1275 * is used to avoid calling get_pfnblock_migratetype() under the lock.
1276 * This will reduce the lock holding time.
1277 */
1278 migratetype = get_pfnblock_migratetype(page, pfn);
1279
1280 spin_lock_irqsave(&zone->lock, flags);
1281 if (unlikely(has_isolate_pageblock(zone) ||
1282 is_migrate_isolate(migratetype))) {
1283 migratetype = get_pfnblock_migratetype(page, pfn);
1284 }
1285 __free_one_page(page, pfn, zone, order, migratetype, fpi_flags);
1286 spin_unlock_irqrestore(&zone->lock, flags);
1287
1288 __count_vm_events(PGFREE, 1 << order);
1289 }
1290
__free_pages_core(struct page * page,unsigned int order)1291 void __free_pages_core(struct page *page, unsigned int order)
1292 {
1293 unsigned int nr_pages = 1 << order;
1294 struct page *p = page;
1295 unsigned int loop;
1296
1297 /*
1298 * When initializing the memmap, __init_single_page() sets the refcount
1299 * of all pages to 1 ("allocated"/"not free"). We have to set the
1300 * refcount of all involved pages to 0.
1301 */
1302 prefetchw(p);
1303 for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
1304 prefetchw(p + 1);
1305 __ClearPageReserved(p);
1306 set_page_count(p, 0);
1307 }
1308 __ClearPageReserved(p);
1309 set_page_count(p, 0);
1310
1311 atomic_long_add(nr_pages, &page_zone(page)->managed_pages);
1312
1313 if (page_contains_unaccepted(page, order)) {
1314 if (order == MAX_ORDER && __free_unaccepted(page))
1315 return;
1316
1317 accept_page(page, order);
1318 }
1319
1320 /*
1321 * Bypass PCP and place fresh pages right to the tail, primarily
1322 * relevant for memory onlining.
1323 */
1324 __free_pages_ok(page, order, FPI_TO_TAIL);
1325 }
1326
1327 /*
1328 * Check that the whole (or subset of) a pageblock given by the interval of
1329 * [start_pfn, end_pfn) is valid and within the same zone, before scanning it
1330 * with the migration of free compaction scanner.
1331 *
1332 * Return struct page pointer of start_pfn, or NULL if checks were not passed.
1333 *
1334 * It's possible on some configurations to have a setup like node0 node1 node0
1335 * i.e. it's possible that all pages within a zones range of pages do not
1336 * belong to a single zone. We assume that a border between node0 and node1
1337 * can occur within a single pageblock, but not a node0 node1 node0
1338 * interleaving within a single pageblock. It is therefore sufficient to check
1339 * the first and last page of a pageblock and avoid checking each individual
1340 * page in a pageblock.
1341 *
1342 * Note: the function may return non-NULL struct page even for a page block
1343 * which contains a memory hole (i.e. there is no physical memory for a subset
1344 * of the pfn range). For example, if the pageblock order is MAX_ORDER, which
1345 * will fall into 2 sub-sections, and the end pfn of the pageblock may be hole
1346 * even though the start pfn is online and valid. This should be safe most of
1347 * the time because struct pages are still initialized via init_unavailable_range()
1348 * and pfn walkers shouldn't touch any physical memory range for which they do
1349 * not recognize any specific metadata in struct pages.
1350 */
__pageblock_pfn_to_page(unsigned long start_pfn,unsigned long end_pfn,struct zone * zone)1351 struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
1352 unsigned long end_pfn, struct zone *zone)
1353 {
1354 struct page *start_page;
1355 struct page *end_page;
1356
1357 /* end_pfn is one past the range we are checking */
1358 end_pfn--;
1359
1360 if (!pfn_valid(end_pfn))
1361 return NULL;
1362
1363 start_page = pfn_to_online_page(start_pfn);
1364 if (!start_page)
1365 return NULL;
1366
1367 if (page_zone(start_page) != zone)
1368 return NULL;
1369
1370 end_page = pfn_to_page(end_pfn);
1371
1372 /* This gives a shorter code than deriving page_zone(end_page) */
1373 if (page_zone_id(start_page) != page_zone_id(end_page))
1374 return NULL;
1375
1376 return start_page;
1377 }
1378
1379 /*
1380 * The order of subdivision here is critical for the IO subsystem.
1381 * Please do not alter this order without good reasons and regression
1382 * testing. Specifically, as large blocks of memory are subdivided,
1383 * the order in which smaller blocks are delivered depends on the order
1384 * they're subdivided in this function. This is the primary factor
1385 * influencing the order in which pages are delivered to the IO
1386 * subsystem according to empirical testing, and this is also justified
1387 * by considering the behavior of a buddy system containing a single
1388 * large block of memory acted on by a series of small allocations.
1389 * This behavior is a critical factor in sglist merging's success.
1390 *
1391 * -- nyc
1392 */
expand(struct zone * zone,struct page * page,int low,int high,int migratetype)1393 static inline void expand(struct zone *zone, struct page *page,
1394 int low, int high, int migratetype)
1395 {
1396 unsigned long size = 1 << high;
1397
1398 while (high > low) {
1399 high--;
1400 size >>= 1;
1401 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
1402
1403 /*
1404 * Mark as guard pages (or page), that will allow to
1405 * merge back to allocator when buddy will be freed.
1406 * Corresponding page table entries will not be touched,
1407 * pages will stay not present in virtual address space
1408 */
1409 if (set_page_guard(zone, &page[size], high, migratetype))
1410 continue;
1411
1412 add_to_free_list(&page[size], zone, high, migratetype);
1413 set_buddy_order(&page[size], high);
1414 }
1415 }
1416
check_new_page_bad(struct page * page)1417 static void check_new_page_bad(struct page *page)
1418 {
1419 if (unlikely(page->flags & __PG_HWPOISON)) {
1420 /* Don't complain about hwpoisoned pages */
1421 page_mapcount_reset(page); /* remove PageBuddy */
1422 return;
1423 }
1424
1425 bad_page(page,
1426 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_PREP));
1427 }
1428
1429 /*
1430 * This page is about to be returned from the page allocator
1431 */
check_new_page(struct page * page)1432 static int check_new_page(struct page *page)
1433 {
1434 if (likely(page_expected_state(page,
1435 PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON)))
1436 return 0;
1437
1438 check_new_page_bad(page);
1439 return 1;
1440 }
1441
check_new_pages(struct page * page,unsigned int order)1442 static inline bool check_new_pages(struct page *page, unsigned int order)
1443 {
1444 if (is_check_pages_enabled()) {
1445 for (int i = 0; i < (1 << order); i++) {
1446 struct page *p = page + i;
1447
1448 if (check_new_page(p))
1449 return true;
1450 }
1451 }
1452
1453 return false;
1454 }
1455
should_skip_kasan_unpoison(gfp_t flags)1456 static inline bool should_skip_kasan_unpoison(gfp_t flags)
1457 {
1458 /* Don't skip if a software KASAN mode is enabled. */
1459 if (IS_ENABLED(CONFIG_KASAN_GENERIC) ||
1460 IS_ENABLED(CONFIG_KASAN_SW_TAGS))
1461 return false;
1462
1463 /* Skip, if hardware tag-based KASAN is not enabled. */
1464 if (!kasan_hw_tags_enabled())
1465 return true;
1466
1467 /*
1468 * With hardware tag-based KASAN enabled, skip if this has been
1469 * requested via __GFP_SKIP_KASAN.
1470 */
1471 return flags & __GFP_SKIP_KASAN;
1472 }
1473
should_skip_init(gfp_t flags)1474 static inline bool should_skip_init(gfp_t flags)
1475 {
1476 /* Don't skip, if hardware tag-based KASAN is not enabled. */
1477 if (!kasan_hw_tags_enabled())
1478 return false;
1479
1480 /* For hardware tag-based KASAN, skip if requested. */
1481 return (flags & __GFP_SKIP_ZERO);
1482 }
1483
post_alloc_hook(struct page * page,unsigned int order,gfp_t gfp_flags)1484 inline void post_alloc_hook(struct page *page, unsigned int order,
1485 gfp_t gfp_flags)
1486 {
1487 bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags) &&
1488 !should_skip_init(gfp_flags);
1489 bool zero_tags = init && (gfp_flags & __GFP_ZEROTAGS);
1490 int i;
1491
1492 set_page_private(page, 0);
1493 set_page_refcounted(page);
1494
1495 arch_alloc_page(page, order);
1496 debug_pagealloc_map_pages(page, 1 << order);
1497
1498 /*
1499 * Page unpoisoning must happen before memory initialization.
1500 * Otherwise, the poison pattern will be overwritten for __GFP_ZERO
1501 * allocations and the page unpoisoning code will complain.
1502 */
1503 kernel_unpoison_pages(page, 1 << order);
1504
1505 /*
1506 * As memory initialization might be integrated into KASAN,
1507 * KASAN unpoisoning and memory initializion code must be
1508 * kept together to avoid discrepancies in behavior.
1509 */
1510
1511 /*
1512 * If memory tags should be zeroed
1513 * (which happens only when memory should be initialized as well).
1514 */
1515 if (zero_tags) {
1516 /* Initialize both memory and memory tags. */
1517 for (i = 0; i != 1 << order; ++i)
1518 tag_clear_highpage(page + i);
1519
1520 /* Take note that memory was initialized by the loop above. */
1521 init = false;
1522 }
1523 if (!should_skip_kasan_unpoison(gfp_flags) &&
1524 kasan_unpoison_pages(page, order, init)) {
1525 /* Take note that memory was initialized by KASAN. */
1526 if (kasan_has_integrated_init())
1527 init = false;
1528 } else {
1529 /*
1530 * If memory tags have not been set by KASAN, reset the page
1531 * tags to ensure page_address() dereferencing does not fault.
1532 */
1533 for (i = 0; i != 1 << order; ++i)
1534 page_kasan_tag_reset(page + i);
1535 }
1536 /* If memory is still not initialized, initialize it now. */
1537 if (init)
1538 kernel_init_pages(page, 1 << order);
1539
1540 set_page_owner(page, order, gfp_flags);
1541 page_table_check_alloc(page, order);
1542 }
1543
prep_new_page(struct page * page,unsigned int order,gfp_t gfp_flags,unsigned int alloc_flags)1544 static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
1545 unsigned int alloc_flags)
1546 {
1547 post_alloc_hook(page, order, gfp_flags);
1548
1549 if (order && (gfp_flags & __GFP_COMP))
1550 prep_compound_page(page, order);
1551
1552 /*
1553 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
1554 * allocate the page. The expectation is that the caller is taking
1555 * steps that will free more memory. The caller should avoid the page
1556 * being used for !PFMEMALLOC purposes.
1557 */
1558 if (alloc_flags & ALLOC_NO_WATERMARKS)
1559 set_page_pfmemalloc(page);
1560 else
1561 clear_page_pfmemalloc(page);
1562 }
1563
1564 /*
1565 * Go through the free lists for the given migratetype and remove
1566 * the smallest available page from the freelists
1567 */
1568 static __always_inline
__rmqueue_smallest(struct zone * zone,unsigned int order,int migratetype)1569 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
1570 int migratetype)
1571 {
1572 unsigned int current_order;
1573 struct free_area *area;
1574 struct page *page;
1575
1576 /* Find a page of the appropriate size in the preferred list */
1577 for (current_order = order; current_order < NR_PAGE_ORDERS; ++current_order) {
1578 area = &(zone->free_area[current_order]);
1579 page = get_page_from_free_area(area, migratetype);
1580 if (!page)
1581 continue;
1582 del_page_from_free_list(page, zone, current_order);
1583 expand(zone, page, order, current_order, migratetype);
1584 set_pcppage_migratetype(page, migratetype);
1585 trace_mm_page_alloc_zone_locked(page, order, migratetype,
1586 pcp_allowed_order(order) &&
1587 migratetype < MIGRATE_PCPTYPES);
1588 return page;
1589 }
1590
1591 return NULL;
1592 }
1593
1594
1595 /*
1596 * This array describes the order lists are fallen back to when
1597 * the free lists for the desirable migrate type are depleted
1598 *
1599 * The other migratetypes do not have fallbacks.
1600 */
1601 static int fallbacks[MIGRATE_TYPES][MIGRATE_PCPTYPES - 1] = {
1602 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE },
1603 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE },
1604 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE },
1605 };
1606
1607 #ifdef CONFIG_CMA
__rmqueue_cma_fallback(struct zone * zone,unsigned int order)1608 static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone,
1609 unsigned int order)
1610 {
1611 return __rmqueue_smallest(zone, order, MIGRATE_CMA);
1612 }
1613 #else
__rmqueue_cma_fallback(struct zone * zone,unsigned int order)1614 static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
1615 unsigned int order) { return NULL; }
1616 #endif
1617
1618 /*
1619 * Move the free pages in a range to the freelist tail of the requested type.
1620 * Note that start_page and end_pages are not aligned on a pageblock
1621 * boundary. If alignment is required, use move_freepages_block()
1622 */
move_freepages(struct zone * zone,unsigned long start_pfn,unsigned long end_pfn,int migratetype,int * num_movable)1623 static int move_freepages(struct zone *zone,
1624 unsigned long start_pfn, unsigned long end_pfn,
1625 int migratetype, int *num_movable)
1626 {
1627 struct page *page;
1628 unsigned long pfn;
1629 unsigned int order;
1630 int pages_moved = 0;
1631
1632 for (pfn = start_pfn; pfn <= end_pfn;) {
1633 page = pfn_to_page(pfn);
1634 if (!PageBuddy(page)) {
1635 /*
1636 * We assume that pages that could be isolated for
1637 * migration are movable. But we don't actually try
1638 * isolating, as that would be expensive.
1639 */
1640 if (num_movable &&
1641 (PageLRU(page) || __PageMovable(page)))
1642 (*num_movable)++;
1643 pfn++;
1644 continue;
1645 }
1646
1647 /* Make sure we are not inadvertently changing nodes */
1648 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
1649 VM_BUG_ON_PAGE(page_zone(page) != zone, page);
1650
1651 order = buddy_order(page);
1652 move_to_free_list(page, zone, order, migratetype);
1653 pfn += 1 << order;
1654 pages_moved += 1 << order;
1655 }
1656
1657 return pages_moved;
1658 }
1659
move_freepages_block(struct zone * zone,struct page * page,int migratetype,int * num_movable)1660 int move_freepages_block(struct zone *zone, struct page *page,
1661 int migratetype, int *num_movable)
1662 {
1663 unsigned long start_pfn, end_pfn, pfn;
1664
1665 if (num_movable)
1666 *num_movable = 0;
1667
1668 pfn = page_to_pfn(page);
1669 start_pfn = pageblock_start_pfn(pfn);
1670 end_pfn = pageblock_end_pfn(pfn) - 1;
1671
1672 /* Do not cross zone boundaries */
1673 if (!zone_spans_pfn(zone, start_pfn))
1674 start_pfn = pfn;
1675 if (!zone_spans_pfn(zone, end_pfn))
1676 return 0;
1677
1678 return move_freepages(zone, start_pfn, end_pfn, migratetype,
1679 num_movable);
1680 }
1681
change_pageblock_range(struct page * pageblock_page,int start_order,int migratetype)1682 static void change_pageblock_range(struct page *pageblock_page,
1683 int start_order, int migratetype)
1684 {
1685 int nr_pageblocks = 1 << (start_order - pageblock_order);
1686
1687 while (nr_pageblocks--) {
1688 set_pageblock_migratetype(pageblock_page, migratetype);
1689 pageblock_page += pageblock_nr_pages;
1690 }
1691 }
1692
1693 /*
1694 * When we are falling back to another migratetype during allocation, try to
1695 * steal extra free pages from the same pageblocks to satisfy further
1696 * allocations, instead of polluting multiple pageblocks.
1697 *
1698 * If we are stealing a relatively large buddy page, it is likely there will
1699 * be more free pages in the pageblock, so try to steal them all. For
1700 * reclaimable and unmovable allocations, we steal regardless of page size,
1701 * as fragmentation caused by those allocations polluting movable pageblocks
1702 * is worse than movable allocations stealing from unmovable and reclaimable
1703 * pageblocks.
1704 */
can_steal_fallback(unsigned int order,int start_mt)1705 static bool can_steal_fallback(unsigned int order, int start_mt)
1706 {
1707 /*
1708 * Leaving this order check is intended, although there is
1709 * relaxed order check in next check. The reason is that
1710 * we can actually steal whole pageblock if this condition met,
1711 * but, below check doesn't guarantee it and that is just heuristic
1712 * so could be changed anytime.
1713 */
1714 if (order >= pageblock_order)
1715 return true;
1716
1717 if (order >= pageblock_order / 2 ||
1718 start_mt == MIGRATE_RECLAIMABLE ||
1719 start_mt == MIGRATE_UNMOVABLE ||
1720 page_group_by_mobility_disabled)
1721 return true;
1722
1723 return false;
1724 }
1725
boost_watermark(struct zone * zone)1726 static inline bool boost_watermark(struct zone *zone)
1727 {
1728 unsigned long max_boost;
1729
1730 if (!watermark_boost_factor)
1731 return false;
1732 /*
1733 * Don't bother in zones that are unlikely to produce results.
1734 * On small machines, including kdump capture kernels running
1735 * in a small area, boosting the watermark can cause an out of
1736 * memory situation immediately.
1737 */
1738 if ((pageblock_nr_pages * 4) > zone_managed_pages(zone))
1739 return false;
1740
1741 max_boost = mult_frac(zone->_watermark[WMARK_HIGH],
1742 watermark_boost_factor, 10000);
1743
1744 /*
1745 * high watermark may be uninitialised if fragmentation occurs
1746 * very early in boot so do not boost. We do not fall
1747 * through and boost by pageblock_nr_pages as failing
1748 * allocations that early means that reclaim is not going
1749 * to help and it may even be impossible to reclaim the
1750 * boosted watermark resulting in a hang.
1751 */
1752 if (!max_boost)
1753 return false;
1754
1755 max_boost = max(pageblock_nr_pages, max_boost);
1756
1757 zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages,
1758 max_boost);
1759
1760 return true;
1761 }
1762
1763 /*
1764 * This function implements actual steal behaviour. If order is large enough,
1765 * we can steal whole pageblock. If not, we first move freepages in this
1766 * pageblock to our migratetype and determine how many already-allocated pages
1767 * are there in the pageblock with a compatible migratetype. If at least half
1768 * of pages are free or compatible, we can change migratetype of the pageblock
1769 * itself, so pages freed in the future will be put on the correct free list.
1770 */
steal_suitable_fallback(struct zone * zone,struct page * page,unsigned int alloc_flags,int start_type,bool whole_block)1771 static void steal_suitable_fallback(struct zone *zone, struct page *page,
1772 unsigned int alloc_flags, int start_type, bool whole_block)
1773 {
1774 unsigned int current_order = buddy_order(page);
1775 int free_pages, movable_pages, alike_pages;
1776 int old_block_type;
1777
1778 old_block_type = get_pageblock_migratetype(page);
1779
1780 /*
1781 * This can happen due to races and we want to prevent broken
1782 * highatomic accounting.
1783 */
1784 if (is_migrate_highatomic(old_block_type))
1785 goto single_page;
1786
1787 /* Take ownership for orders >= pageblock_order */
1788 if (current_order >= pageblock_order) {
1789 change_pageblock_range(page, current_order, start_type);
1790 goto single_page;
1791 }
1792
1793 /*
1794 * Boost watermarks to increase reclaim pressure to reduce the
1795 * likelihood of future fallbacks. Wake kswapd now as the node
1796 * may be balanced overall and kswapd will not wake naturally.
1797 */
1798 if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD))
1799 set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
1800
1801 /* We are not allowed to try stealing from the whole block */
1802 if (!whole_block)
1803 goto single_page;
1804
1805 free_pages = move_freepages_block(zone, page, start_type,
1806 &movable_pages);
1807 /* moving whole block can fail due to zone boundary conditions */
1808 if (!free_pages)
1809 goto single_page;
1810
1811 /*
1812 * Determine how many pages are compatible with our allocation.
1813 * For movable allocation, it's the number of movable pages which
1814 * we just obtained. For other types it's a bit more tricky.
1815 */
1816 if (start_type == MIGRATE_MOVABLE) {
1817 alike_pages = movable_pages;
1818 } else {
1819 /*
1820 * If we are falling back a RECLAIMABLE or UNMOVABLE allocation
1821 * to MOVABLE pageblock, consider all non-movable pages as
1822 * compatible. If it's UNMOVABLE falling back to RECLAIMABLE or
1823 * vice versa, be conservative since we can't distinguish the
1824 * exact migratetype of non-movable pages.
1825 */
1826 if (old_block_type == MIGRATE_MOVABLE)
1827 alike_pages = pageblock_nr_pages
1828 - (free_pages + movable_pages);
1829 else
1830 alike_pages = 0;
1831 }
1832 /*
1833 * If a sufficient number of pages in the block are either free or of
1834 * compatible migratability as our allocation, claim the whole block.
1835 */
1836 if (free_pages + alike_pages >= (1 << (pageblock_order-1)) ||
1837 page_group_by_mobility_disabled)
1838 set_pageblock_migratetype(page, start_type);
1839
1840 return;
1841
1842 single_page:
1843 move_to_free_list(page, zone, current_order, start_type);
1844 }
1845
1846 /*
1847 * Check whether there is a suitable fallback freepage with requested order.
1848 * If only_stealable is true, this function returns fallback_mt only if
1849 * we can steal other freepages all together. This would help to reduce
1850 * fragmentation due to mixed migratetype pages in one pageblock.
1851 */
find_suitable_fallback(struct free_area * area,unsigned int order,int migratetype,bool only_stealable,bool * can_steal)1852 int find_suitable_fallback(struct free_area *area, unsigned int order,
1853 int migratetype, bool only_stealable, bool *can_steal)
1854 {
1855 int i;
1856 int fallback_mt;
1857
1858 if (area->nr_free == 0)
1859 return -1;
1860
1861 *can_steal = false;
1862 for (i = 0; i < MIGRATE_PCPTYPES - 1 ; i++) {
1863 fallback_mt = fallbacks[migratetype][i];
1864 if (free_area_empty(area, fallback_mt))
1865 continue;
1866
1867 if (can_steal_fallback(order, migratetype))
1868 *can_steal = true;
1869
1870 if (!only_stealable)
1871 return fallback_mt;
1872
1873 if (*can_steal)
1874 return fallback_mt;
1875 }
1876
1877 return -1;
1878 }
1879
1880 /*
1881 * Reserve a pageblock for exclusive use of high-order atomic allocations if
1882 * there are no empty page blocks that contain a page with a suitable order
1883 */
reserve_highatomic_pageblock(struct page * page,struct zone * zone)1884 static void reserve_highatomic_pageblock(struct page *page, struct zone *zone)
1885 {
1886 int mt;
1887 unsigned long max_managed, flags;
1888
1889 /*
1890 * Limit the number reserved to 1 pageblock or roughly 1% of a zone.
1891 * Check is race-prone but harmless.
1892 */
1893 max_managed = (zone_managed_pages(zone) / 100) + pageblock_nr_pages;
1894 if (zone->nr_reserved_highatomic >= max_managed)
1895 return;
1896
1897 spin_lock_irqsave(&zone->lock, flags);
1898
1899 /* Recheck the nr_reserved_highatomic limit under the lock */
1900 if (zone->nr_reserved_highatomic >= max_managed)
1901 goto out_unlock;
1902
1903 /* Yoink! */
1904 mt = get_pageblock_migratetype(page);
1905 /* Only reserve normal pageblocks (i.e., they can merge with others) */
1906 if (migratetype_is_mergeable(mt)) {
1907 zone->nr_reserved_highatomic += pageblock_nr_pages;
1908 set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC);
1909 move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL);
1910 }
1911
1912 out_unlock:
1913 spin_unlock_irqrestore(&zone->lock, flags);
1914 }
1915
1916 /*
1917 * Used when an allocation is about to fail under memory pressure. This
1918 * potentially hurts the reliability of high-order allocations when under
1919 * intense memory pressure but failed atomic allocations should be easier
1920 * to recover from than an OOM.
1921 *
1922 * If @force is true, try to unreserve a pageblock even though highatomic
1923 * pageblock is exhausted.
1924 */
unreserve_highatomic_pageblock(const struct alloc_context * ac,bool force)1925 static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
1926 bool force)
1927 {
1928 struct zonelist *zonelist = ac->zonelist;
1929 unsigned long flags;
1930 struct zoneref *z;
1931 struct zone *zone;
1932 struct page *page;
1933 int order;
1934 bool ret;
1935
1936 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx,
1937 ac->nodemask) {
1938 /*
1939 * Preserve at least one pageblock unless memory pressure
1940 * is really high.
1941 */
1942 if (!force && zone->nr_reserved_highatomic <=
1943 pageblock_nr_pages)
1944 continue;
1945
1946 spin_lock_irqsave(&zone->lock, flags);
1947 for (order = 0; order < NR_PAGE_ORDERS; order++) {
1948 struct free_area *area = &(zone->free_area[order]);
1949
1950 page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC);
1951 if (!page)
1952 continue;
1953
1954 /*
1955 * In page freeing path, migratetype change is racy so
1956 * we can counter several free pages in a pageblock
1957 * in this loop although we changed the pageblock type
1958 * from highatomic to ac->migratetype. So we should
1959 * adjust the count once.
1960 */
1961 if (is_migrate_highatomic_page(page)) {
1962 /*
1963 * It should never happen but changes to
1964 * locking could inadvertently allow a per-cpu
1965 * drain to add pages to MIGRATE_HIGHATOMIC
1966 * while unreserving so be safe and watch for
1967 * underflows.
1968 */
1969 zone->nr_reserved_highatomic -= min(
1970 pageblock_nr_pages,
1971 zone->nr_reserved_highatomic);
1972 }
1973
1974 /*
1975 * Convert to ac->migratetype and avoid the normal
1976 * pageblock stealing heuristics. Minimally, the caller
1977 * is doing the work and needs the pages. More
1978 * importantly, if the block was always converted to
1979 * MIGRATE_UNMOVABLE or another type then the number
1980 * of pageblocks that cannot be completely freed
1981 * may increase.
1982 */
1983 set_pageblock_migratetype(page, ac->migratetype);
1984 ret = move_freepages_block(zone, page, ac->migratetype,
1985 NULL);
1986 if (ret) {
1987 spin_unlock_irqrestore(&zone->lock, flags);
1988 return ret;
1989 }
1990 }
1991 spin_unlock_irqrestore(&zone->lock, flags);
1992 }
1993
1994 return false;
1995 }
1996
1997 /*
1998 * Try finding a free buddy page on the fallback list and put it on the free
1999 * list of requested migratetype, possibly along with other pages from the same
2000 * block, depending on fragmentation avoidance heuristics. Returns true if
2001 * fallback was found so that __rmqueue_smallest() can grab it.
2002 *
2003 * The use of signed ints for order and current_order is a deliberate
2004 * deviation from the rest of this file, to make the for loop
2005 * condition simpler.
2006 */
2007 static __always_inline bool
__rmqueue_fallback(struct zone * zone,int order,int start_migratetype,unsigned int alloc_flags)2008 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
2009 unsigned int alloc_flags)
2010 {
2011 struct free_area *area;
2012 int current_order;
2013 int min_order = order;
2014 struct page *page;
2015 int fallback_mt;
2016 bool can_steal;
2017
2018 /*
2019 * Do not steal pages from freelists belonging to other pageblocks
2020 * i.e. orders < pageblock_order. If there are no local zones free,
2021 * the zonelists will be reiterated without ALLOC_NOFRAGMENT.
2022 */
2023 if (order < pageblock_order && alloc_flags & ALLOC_NOFRAGMENT)
2024 min_order = pageblock_order;
2025
2026 /*
2027 * Find the largest available free page in the other list. This roughly
2028 * approximates finding the pageblock with the most free pages, which
2029 * would be too costly to do exactly.
2030 */
2031 for (current_order = MAX_ORDER; current_order >= min_order;
2032 --current_order) {
2033 area = &(zone->free_area[current_order]);
2034 fallback_mt = find_suitable_fallback(area, current_order,
2035 start_migratetype, false, &can_steal);
2036 if (fallback_mt == -1)
2037 continue;
2038
2039 /*
2040 * We cannot steal all free pages from the pageblock and the
2041 * requested migratetype is movable. In that case it's better to
2042 * steal and split the smallest available page instead of the
2043 * largest available page, because even if the next movable
2044 * allocation falls back into a different pageblock than this
2045 * one, it won't cause permanent fragmentation.
2046 */
2047 if (!can_steal && start_migratetype == MIGRATE_MOVABLE
2048 && current_order > order)
2049 goto find_smallest;
2050
2051 goto do_steal;
2052 }
2053
2054 return false;
2055
2056 find_smallest:
2057 for (current_order = order; current_order < NR_PAGE_ORDERS; current_order++) {
2058 area = &(zone->free_area[current_order]);
2059 fallback_mt = find_suitable_fallback(area, current_order,
2060 start_migratetype, false, &can_steal);
2061 if (fallback_mt != -1)
2062 break;
2063 }
2064
2065 /*
2066 * This should not happen - we already found a suitable fallback
2067 * when looking for the largest page.
2068 */
2069 VM_BUG_ON(current_order > MAX_ORDER);
2070
2071 do_steal:
2072 page = get_page_from_free_area(area, fallback_mt);
2073
2074 steal_suitable_fallback(zone, page, alloc_flags, start_migratetype,
2075 can_steal);
2076
2077 trace_mm_page_alloc_extfrag(page, order, current_order,
2078 start_migratetype, fallback_mt);
2079
2080 return true;
2081
2082 }
2083
2084 /*
2085 * Do the hard work of removing an element from the buddy allocator.
2086 * Call me with the zone->lock already held.
2087 */
2088 static __always_inline struct page *
__rmqueue(struct zone * zone,unsigned int order,int migratetype,unsigned int alloc_flags)2089 __rmqueue(struct zone *zone, unsigned int order, int migratetype,
2090 unsigned int alloc_flags)
2091 {
2092 struct page *page;
2093
2094 if (IS_ENABLED(CONFIG_CMA)) {
2095 /*
2096 * Balance movable allocations between regular and CMA areas by
2097 * allocating from CMA when over half of the zone's free memory
2098 * is in the CMA area.
2099 */
2100 if (alloc_flags & ALLOC_CMA &&
2101 zone_page_state(zone, NR_FREE_CMA_PAGES) >
2102 zone_page_state(zone, NR_FREE_PAGES) / 2) {
2103 page = __rmqueue_cma_fallback(zone, order);
2104 if (page)
2105 return page;
2106 }
2107 }
2108 retry:
2109 page = __rmqueue_smallest(zone, order, migratetype);
2110 if (unlikely(!page)) {
2111 if (alloc_flags & ALLOC_CMA)
2112 page = __rmqueue_cma_fallback(zone, order);
2113
2114 if (!page && __rmqueue_fallback(zone, order, migratetype,
2115 alloc_flags))
2116 goto retry;
2117 }
2118 return page;
2119 }
2120
2121 /*
2122 * Obtain a specified number of elements from the buddy allocator, all under
2123 * a single hold of the lock, for efficiency. Add them to the supplied list.
2124 * Returns the number of new pages which were placed at *list.
2125 */
rmqueue_bulk(struct zone * zone,unsigned int order,unsigned long count,struct list_head * list,int migratetype,unsigned int alloc_flags)2126 static int rmqueue_bulk(struct zone *zone, unsigned int order,
2127 unsigned long count, struct list_head *list,
2128 int migratetype, unsigned int alloc_flags)
2129 {
2130 unsigned long flags;
2131 int i;
2132
2133 spin_lock_irqsave(&zone->lock, flags);
2134 for (i = 0; i < count; ++i) {
2135 struct page *page = __rmqueue(zone, order, migratetype,
2136 alloc_flags);
2137 if (unlikely(page == NULL))
2138 break;
2139
2140 /*
2141 * Split buddy pages returned by expand() are received here in
2142 * physical page order. The page is added to the tail of
2143 * caller's list. From the callers perspective, the linked list
2144 * is ordered by page number under some conditions. This is
2145 * useful for IO devices that can forward direction from the
2146 * head, thus also in the physical page order. This is useful
2147 * for IO devices that can merge IO requests if the physical
2148 * pages are ordered properly.
2149 */
2150 list_add_tail(&page->pcp_list, list);
2151 if (is_migrate_cma(get_pcppage_migratetype(page)))
2152 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
2153 -(1 << order));
2154 }
2155
2156 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
2157 spin_unlock_irqrestore(&zone->lock, flags);
2158
2159 return i;
2160 }
2161
2162 #ifdef CONFIG_NUMA
2163 /*
2164 * Called from the vmstat counter updater to drain pagesets of this
2165 * currently executing processor on remote nodes after they have
2166 * expired.
2167 */
drain_zone_pages(struct zone * zone,struct per_cpu_pages * pcp)2168 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
2169 {
2170 int to_drain, batch;
2171
2172 batch = READ_ONCE(pcp->batch);
2173 to_drain = min(pcp->count, batch);
2174 if (to_drain > 0) {
2175 spin_lock(&pcp->lock);
2176 free_pcppages_bulk(zone, to_drain, pcp, 0);
2177 spin_unlock(&pcp->lock);
2178 }
2179 }
2180 #endif
2181
2182 /*
2183 * Drain pcplists of the indicated processor and zone.
2184 */
drain_pages_zone(unsigned int cpu,struct zone * zone)2185 static void drain_pages_zone(unsigned int cpu, struct zone *zone)
2186 {
2187 struct per_cpu_pages *pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
2188 int count;
2189
2190 do {
2191 spin_lock(&pcp->lock);
2192 count = pcp->count;
2193 if (count) {
2194 int to_drain = min(count,
2195 pcp->batch << CONFIG_PCP_BATCH_SCALE_MAX);
2196
2197 free_pcppages_bulk(zone, to_drain, pcp, 0);
2198 count -= to_drain;
2199 }
2200 spin_unlock(&pcp->lock);
2201 } while (count);
2202 }
2203
2204 /*
2205 * Drain pcplists of all zones on the indicated processor.
2206 */
drain_pages(unsigned int cpu)2207 static void drain_pages(unsigned int cpu)
2208 {
2209 struct zone *zone;
2210
2211 for_each_populated_zone(zone) {
2212 drain_pages_zone(cpu, zone);
2213 }
2214 }
2215
2216 /*
2217 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
2218 */
drain_local_pages(struct zone * zone)2219 void drain_local_pages(struct zone *zone)
2220 {
2221 int cpu = smp_processor_id();
2222
2223 if (zone)
2224 drain_pages_zone(cpu, zone);
2225 else
2226 drain_pages(cpu);
2227 }
2228
2229 /*
2230 * The implementation of drain_all_pages(), exposing an extra parameter to
2231 * drain on all cpus.
2232 *
2233 * drain_all_pages() is optimized to only execute on cpus where pcplists are
2234 * not empty. The check for non-emptiness can however race with a free to
2235 * pcplist that has not yet increased the pcp->count from 0 to 1. Callers
2236 * that need the guarantee that every CPU has drained can disable the
2237 * optimizing racy check.
2238 */
__drain_all_pages(struct zone * zone,bool force_all_cpus)2239 static void __drain_all_pages(struct zone *zone, bool force_all_cpus)
2240 {
2241 int cpu;
2242
2243 /*
2244 * Allocate in the BSS so we won't require allocation in
2245 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
2246 */
2247 static cpumask_t cpus_with_pcps;
2248
2249 /*
2250 * Do not drain if one is already in progress unless it's specific to
2251 * a zone. Such callers are primarily CMA and memory hotplug and need
2252 * the drain to be complete when the call returns.
2253 */
2254 if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) {
2255 if (!zone)
2256 return;
2257 mutex_lock(&pcpu_drain_mutex);
2258 }
2259
2260 /*
2261 * We don't care about racing with CPU hotplug event
2262 * as offline notification will cause the notified
2263 * cpu to drain that CPU pcps and on_each_cpu_mask
2264 * disables preemption as part of its processing
2265 */
2266 for_each_online_cpu(cpu) {
2267 struct per_cpu_pages *pcp;
2268 struct zone *z;
2269 bool has_pcps = false;
2270
2271 if (force_all_cpus) {
2272 /*
2273 * The pcp.count check is racy, some callers need a
2274 * guarantee that no cpu is missed.
2275 */
2276 has_pcps = true;
2277 } else if (zone) {
2278 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
2279 if (pcp->count)
2280 has_pcps = true;
2281 } else {
2282 for_each_populated_zone(z) {
2283 pcp = per_cpu_ptr(z->per_cpu_pageset, cpu);
2284 if (pcp->count) {
2285 has_pcps = true;
2286 break;
2287 }
2288 }
2289 }
2290
2291 if (has_pcps)
2292 cpumask_set_cpu(cpu, &cpus_with_pcps);
2293 else
2294 cpumask_clear_cpu(cpu, &cpus_with_pcps);
2295 }
2296
2297 for_each_cpu(cpu, &cpus_with_pcps) {
2298 if (zone)
2299 drain_pages_zone(cpu, zone);
2300 else
2301 drain_pages(cpu);
2302 }
2303
2304 mutex_unlock(&pcpu_drain_mutex);
2305 }
2306
2307 /*
2308 * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
2309 *
2310 * When zone parameter is non-NULL, spill just the single zone's pages.
2311 */
drain_all_pages(struct zone * zone)2312 void drain_all_pages(struct zone *zone)
2313 {
2314 __drain_all_pages(zone, false);
2315 }
2316
free_unref_page_prepare(struct page * page,unsigned long pfn,unsigned int order)2317 static bool free_unref_page_prepare(struct page *page, unsigned long pfn,
2318 unsigned int order)
2319 {
2320 int migratetype;
2321
2322 if (!free_pages_prepare(page, order, FPI_NONE))
2323 return false;
2324
2325 migratetype = get_pfnblock_migratetype(page, pfn);
2326 set_pcppage_migratetype(page, migratetype);
2327 return true;
2328 }
2329
nr_pcp_free(struct per_cpu_pages * pcp,int high,bool free_high)2330 static int nr_pcp_free(struct per_cpu_pages *pcp, int high, bool free_high)
2331 {
2332 int min_nr_free, max_nr_free;
2333 int batch = READ_ONCE(pcp->batch);
2334
2335 /* Free everything if batch freeing high-order pages. */
2336 if (unlikely(free_high))
2337 return pcp->count;
2338
2339 /* Check for PCP disabled or boot pageset */
2340 if (unlikely(high < batch))
2341 return 1;
2342
2343 /* Leave at least pcp->batch pages on the list */
2344 min_nr_free = batch;
2345 max_nr_free = high - batch;
2346
2347 /*
2348 * Double the number of pages freed each time there is subsequent
2349 * freeing of pages without any allocation.
2350 */
2351 batch <<= pcp->free_factor;
2352 if (batch < max_nr_free && pcp->free_factor < CONFIG_PCP_BATCH_SCALE_MAX)
2353 pcp->free_factor++;
2354 batch = clamp(batch, min_nr_free, max_nr_free);
2355
2356 return batch;
2357 }
2358
nr_pcp_high(struct per_cpu_pages * pcp,struct zone * zone,bool free_high)2359 static int nr_pcp_high(struct per_cpu_pages *pcp, struct zone *zone,
2360 bool free_high)
2361 {
2362 int high = READ_ONCE(pcp->high);
2363
2364 if (unlikely(!high || free_high))
2365 return 0;
2366
2367 if (!test_bit(ZONE_RECLAIM_ACTIVE, &zone->flags))
2368 return high;
2369
2370 /*
2371 * If reclaim is active, limit the number of pages that can be
2372 * stored on pcp lists
2373 */
2374 return min(READ_ONCE(pcp->batch) << 2, high);
2375 }
2376
free_unref_page_commit(struct zone * zone,struct per_cpu_pages * pcp,struct page * page,int migratetype,unsigned int order)2377 static void free_unref_page_commit(struct zone *zone, struct per_cpu_pages *pcp,
2378 struct page *page, int migratetype,
2379 unsigned int order)
2380 {
2381 int high;
2382 int pindex;
2383 bool free_high;
2384
2385 __count_vm_events(PGFREE, 1 << order);
2386 pindex = order_to_pindex(migratetype, order);
2387 list_add(&page->pcp_list, &pcp->lists[pindex]);
2388 pcp->count += 1 << order;
2389
2390 /*
2391 * As high-order pages other than THP's stored on PCP can contribute
2392 * to fragmentation, limit the number stored when PCP is heavily
2393 * freeing without allocation. The remainder after bulk freeing
2394 * stops will be drained from vmstat refresh context.
2395 */
2396 free_high = (pcp->free_factor && order && order <= PAGE_ALLOC_COSTLY_ORDER);
2397
2398 high = nr_pcp_high(pcp, zone, free_high);
2399 if (pcp->count >= high) {
2400 free_pcppages_bulk(zone, nr_pcp_free(pcp, high, free_high), pcp, pindex);
2401 }
2402 }
2403
2404 /*
2405 * Free a pcp page
2406 */
free_unref_page(struct page * page,unsigned int order)2407 void free_unref_page(struct page *page, unsigned int order)
2408 {
2409 unsigned long __maybe_unused UP_flags;
2410 struct per_cpu_pages *pcp;
2411 struct zone *zone;
2412 unsigned long pfn = page_to_pfn(page);
2413 int migratetype, pcpmigratetype;
2414
2415 if (!free_unref_page_prepare(page, pfn, order))
2416 return;
2417
2418 /*
2419 * We only track unmovable, reclaimable and movable on pcp lists.
2420 * Place ISOLATE pages on the isolated list because they are being
2421 * offlined but treat HIGHATOMIC and CMA as movable pages so we can
2422 * get those areas back if necessary. Otherwise, we may have to free
2423 * excessively into the page allocator
2424 */
2425 migratetype = pcpmigratetype = get_pcppage_migratetype(page);
2426 if (unlikely(migratetype >= MIGRATE_PCPTYPES)) {
2427 if (unlikely(is_migrate_isolate(migratetype))) {
2428 free_one_page(page_zone(page), page, pfn, order, migratetype, FPI_NONE);
2429 return;
2430 }
2431 pcpmigratetype = MIGRATE_MOVABLE;
2432 }
2433
2434 zone = page_zone(page);
2435 pcp_trylock_prepare(UP_flags);
2436 pcp = pcp_spin_trylock(zone->per_cpu_pageset);
2437 if (pcp) {
2438 free_unref_page_commit(zone, pcp, page, pcpmigratetype, order);
2439 pcp_spin_unlock(pcp);
2440 } else {
2441 free_one_page(zone, page, pfn, order, migratetype, FPI_NONE);
2442 }
2443 pcp_trylock_finish(UP_flags);
2444 }
2445
2446 /*
2447 * Free a list of 0-order pages
2448 */
free_unref_page_list(struct list_head * list)2449 void free_unref_page_list(struct list_head *list)
2450 {
2451 unsigned long __maybe_unused UP_flags;
2452 struct page *page, *next;
2453 struct per_cpu_pages *pcp = NULL;
2454 struct zone *locked_zone = NULL;
2455 int batch_count = 0;
2456 int migratetype;
2457
2458 /* Prepare pages for freeing */
2459 list_for_each_entry_safe(page, next, list, lru) {
2460 unsigned long pfn = page_to_pfn(page);
2461 if (!free_unref_page_prepare(page, pfn, 0)) {
2462 list_del(&page->lru);
2463 continue;
2464 }
2465
2466 /*
2467 * Free isolated pages directly to the allocator, see
2468 * comment in free_unref_page.
2469 */
2470 migratetype = get_pcppage_migratetype(page);
2471 if (unlikely(is_migrate_isolate(migratetype))) {
2472 list_del(&page->lru);
2473 free_one_page(page_zone(page), page, pfn, 0, migratetype, FPI_NONE);
2474 continue;
2475 }
2476 }
2477
2478 list_for_each_entry_safe(page, next, list, lru) {
2479 struct zone *zone = page_zone(page);
2480
2481 list_del(&page->lru);
2482 migratetype = get_pcppage_migratetype(page);
2483
2484 /*
2485 * Either different zone requiring a different pcp lock or
2486 * excessive lock hold times when freeing a large list of
2487 * pages.
2488 */
2489 if (zone != locked_zone || batch_count == SWAP_CLUSTER_MAX) {
2490 if (pcp) {
2491 pcp_spin_unlock(pcp);
2492 pcp_trylock_finish(UP_flags);
2493 }
2494
2495 batch_count = 0;
2496
2497 /*
2498 * trylock is necessary as pages may be getting freed
2499 * from IRQ or SoftIRQ context after an IO completion.
2500 */
2501 pcp_trylock_prepare(UP_flags);
2502 pcp = pcp_spin_trylock(zone->per_cpu_pageset);
2503 if (unlikely(!pcp)) {
2504 pcp_trylock_finish(UP_flags);
2505 free_one_page(zone, page, page_to_pfn(page),
2506 0, migratetype, FPI_NONE);
2507 locked_zone = NULL;
2508 continue;
2509 }
2510 locked_zone = zone;
2511 }
2512
2513 /*
2514 * Non-isolated types over MIGRATE_PCPTYPES get added
2515 * to the MIGRATE_MOVABLE pcp list.
2516 */
2517 if (unlikely(migratetype >= MIGRATE_PCPTYPES))
2518 migratetype = MIGRATE_MOVABLE;
2519
2520 trace_mm_page_free_batched(page);
2521 free_unref_page_commit(zone, pcp, page, migratetype, 0);
2522 batch_count++;
2523 }
2524
2525 if (pcp) {
2526 pcp_spin_unlock(pcp);
2527 pcp_trylock_finish(UP_flags);
2528 }
2529 }
2530
2531 /*
2532 * split_page takes a non-compound higher-order page, and splits it into
2533 * n (1<<order) sub-pages: page[0..n]
2534 * Each sub-page must be freed individually.
2535 *
2536 * Note: this is probably too low level an operation for use in drivers.
2537 * Please consult with lkml before using this in your driver.
2538 */
split_page(struct page * page,unsigned int order)2539 void split_page(struct page *page, unsigned int order)
2540 {
2541 int i;
2542
2543 VM_BUG_ON_PAGE(PageCompound(page), page);
2544 VM_BUG_ON_PAGE(!page_count(page), page);
2545
2546 for (i = 1; i < (1 << order); i++)
2547 set_page_refcounted(page + i);
2548 split_page_owner(page, 1 << order);
2549 split_page_memcg(page, 1 << order);
2550 }
2551 EXPORT_SYMBOL_GPL(split_page);
2552
__isolate_free_page(struct page * page,unsigned int order)2553 int __isolate_free_page(struct page *page, unsigned int order)
2554 {
2555 struct zone *zone = page_zone(page);
2556 int mt = get_pageblock_migratetype(page);
2557
2558 if (!is_migrate_isolate(mt)) {
2559 unsigned long watermark;
2560 /*
2561 * Obey watermarks as if the page was being allocated. We can
2562 * emulate a high-order watermark check with a raised order-0
2563 * watermark, because we already know our high-order page
2564 * exists.
2565 */
2566 watermark = zone->_watermark[WMARK_MIN] + (1UL << order);
2567 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA))
2568 return 0;
2569
2570 __mod_zone_freepage_state(zone, -(1UL << order), mt);
2571 }
2572
2573 del_page_from_free_list(page, zone, order);
2574
2575 /*
2576 * Set the pageblock if the isolated page is at least half of a
2577 * pageblock
2578 */
2579 if (order >= pageblock_order - 1) {
2580 struct page *endpage = page + (1 << order) - 1;
2581 for (; page < endpage; page += pageblock_nr_pages) {
2582 int mt = get_pageblock_migratetype(page);
2583 /*
2584 * Only change normal pageblocks (i.e., they can merge
2585 * with others)
2586 */
2587 if (migratetype_is_mergeable(mt))
2588 set_pageblock_migratetype(page,
2589 MIGRATE_MOVABLE);
2590 }
2591 }
2592
2593 return 1UL << order;
2594 }
2595
2596 /**
2597 * __putback_isolated_page - Return a now-isolated page back where we got it
2598 * @page: Page that was isolated
2599 * @order: Order of the isolated page
2600 * @mt: The page's pageblock's migratetype
2601 *
2602 * This function is meant to return a page pulled from the free lists via
2603 * __isolate_free_page back to the free lists they were pulled from.
2604 */
__putback_isolated_page(struct page * page,unsigned int order,int mt)2605 void __putback_isolated_page(struct page *page, unsigned int order, int mt)
2606 {
2607 struct zone *zone = page_zone(page);
2608
2609 /* zone lock should be held when this function is called */
2610 lockdep_assert_held(&zone->lock);
2611
2612 /* Return isolated page to tail of freelist. */
2613 __free_one_page(page, page_to_pfn(page), zone, order, mt,
2614 FPI_SKIP_REPORT_NOTIFY | FPI_TO_TAIL);
2615 }
2616
2617 /*
2618 * Update NUMA hit/miss statistics
2619 */
zone_statistics(struct zone * preferred_zone,struct zone * z,long nr_account)2620 static inline void zone_statistics(struct zone *preferred_zone, struct zone *z,
2621 long nr_account)
2622 {
2623 #ifdef CONFIG_NUMA
2624 enum numa_stat_item local_stat = NUMA_LOCAL;
2625
2626 /* skip numa counters update if numa stats is disabled */
2627 if (!static_branch_likely(&vm_numa_stat_key))
2628 return;
2629
2630 if (zone_to_nid(z) != numa_node_id())
2631 local_stat = NUMA_OTHER;
2632
2633 if (zone_to_nid(z) == zone_to_nid(preferred_zone))
2634 __count_numa_events(z, NUMA_HIT, nr_account);
2635 else {
2636 __count_numa_events(z, NUMA_MISS, nr_account);
2637 __count_numa_events(preferred_zone, NUMA_FOREIGN, nr_account);
2638 }
2639 __count_numa_events(z, local_stat, nr_account);
2640 #endif
2641 }
2642
2643 static __always_inline
rmqueue_buddy(struct zone * preferred_zone,struct zone * zone,unsigned int order,unsigned int alloc_flags,int migratetype)2644 struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone,
2645 unsigned int order, unsigned int alloc_flags,
2646 int migratetype)
2647 {
2648 struct page *page;
2649 unsigned long flags;
2650
2651 do {
2652 page = NULL;
2653 spin_lock_irqsave(&zone->lock, flags);
2654 if (alloc_flags & ALLOC_HIGHATOMIC)
2655 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
2656 if (!page) {
2657 page = __rmqueue(zone, order, migratetype, alloc_flags);
2658
2659 /*
2660 * If the allocation fails, allow OOM handling and
2661 * order-0 (atomic) allocs access to HIGHATOMIC
2662 * reserves as failing now is worse than failing a
2663 * high-order atomic allocation in the future.
2664 */
2665 if (!page && (alloc_flags & (ALLOC_OOM|ALLOC_NON_BLOCK)))
2666 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
2667
2668 if (!page) {
2669 spin_unlock_irqrestore(&zone->lock, flags);
2670 return NULL;
2671 }
2672 }
2673 __mod_zone_freepage_state(zone, -(1 << order),
2674 get_pcppage_migratetype(page));
2675 spin_unlock_irqrestore(&zone->lock, flags);
2676 } while (check_new_pages(page, order));
2677
2678 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
2679 zone_statistics(preferred_zone, zone, 1);
2680
2681 return page;
2682 }
2683
2684 /* Remove page from the per-cpu list, caller must protect the list */
2685 static inline
__rmqueue_pcplist(struct zone * zone,unsigned int order,int migratetype,unsigned int alloc_flags,struct per_cpu_pages * pcp,struct list_head * list)2686 struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order,
2687 int migratetype,
2688 unsigned int alloc_flags,
2689 struct per_cpu_pages *pcp,
2690 struct list_head *list)
2691 {
2692 struct page *page;
2693
2694 do {
2695 if (list_empty(list)) {
2696 int batch = READ_ONCE(pcp->batch);
2697 int alloced;
2698
2699 /*
2700 * Scale batch relative to order if batch implies
2701 * free pages can be stored on the PCP. Batch can
2702 * be 1 for small zones or for boot pagesets which
2703 * should never store free pages as the pages may
2704 * belong to arbitrary zones.
2705 */
2706 if (batch > 1)
2707 batch = max(batch >> order, 2);
2708 alloced = rmqueue_bulk(zone, order,
2709 batch, list,
2710 migratetype, alloc_flags);
2711
2712 pcp->count += alloced << order;
2713 if (unlikely(list_empty(list)))
2714 return NULL;
2715 }
2716
2717 page = list_first_entry(list, struct page, pcp_list);
2718 list_del(&page->pcp_list);
2719 pcp->count -= 1 << order;
2720 } while (check_new_pages(page, order));
2721
2722 return page;
2723 }
2724
2725 /* Lock and remove page from the per-cpu list */
rmqueue_pcplist(struct zone * preferred_zone,struct zone * zone,unsigned int order,int migratetype,unsigned int alloc_flags)2726 static struct page *rmqueue_pcplist(struct zone *preferred_zone,
2727 struct zone *zone, unsigned int order,
2728 int migratetype, unsigned int alloc_flags)
2729 {
2730 struct per_cpu_pages *pcp;
2731 struct list_head *list;
2732 struct page *page;
2733 unsigned long __maybe_unused UP_flags;
2734
2735 /* spin_trylock may fail due to a parallel drain or IRQ reentrancy. */
2736 pcp_trylock_prepare(UP_flags);
2737 pcp = pcp_spin_trylock(zone->per_cpu_pageset);
2738 if (!pcp) {
2739 pcp_trylock_finish(UP_flags);
2740 return NULL;
2741 }
2742
2743 /*
2744 * On allocation, reduce the number of pages that are batch freed.
2745 * See nr_pcp_free() where free_factor is increased for subsequent
2746 * frees.
2747 */
2748 pcp->free_factor >>= 1;
2749 list = &pcp->lists[order_to_pindex(migratetype, order)];
2750 page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list);
2751 pcp_spin_unlock(pcp);
2752 pcp_trylock_finish(UP_flags);
2753 if (page) {
2754 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
2755 zone_statistics(preferred_zone, zone, 1);
2756 }
2757 return page;
2758 }
2759
2760 /*
2761 * Allocate a page from the given zone.
2762 * Use pcplists for THP or "cheap" high-order allocations.
2763 */
2764
2765 /*
2766 * Do not instrument rmqueue() with KMSAN. This function may call
2767 * __msan_poison_alloca() through a call to set_pfnblock_flags_mask().
2768 * If __msan_poison_alloca() attempts to allocate pages for the stack depot, it
2769 * may call rmqueue() again, which will result in a deadlock.
2770 */
2771 __no_sanitize_memory
2772 static inline
rmqueue(struct zone * preferred_zone,struct zone * zone,unsigned int order,gfp_t gfp_flags,unsigned int alloc_flags,int migratetype)2773 struct page *rmqueue(struct zone *preferred_zone,
2774 struct zone *zone, unsigned int order,
2775 gfp_t gfp_flags, unsigned int alloc_flags,
2776 int migratetype)
2777 {
2778 struct page *page;
2779
2780 /*
2781 * We most definitely don't want callers attempting to
2782 * allocate greater than order-1 page units with __GFP_NOFAIL.
2783 */
2784 WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
2785
2786 if (likely(pcp_allowed_order(order))) {
2787 page = rmqueue_pcplist(preferred_zone, zone, order,
2788 migratetype, alloc_flags);
2789 if (likely(page))
2790 goto out;
2791 }
2792
2793 page = rmqueue_buddy(preferred_zone, zone, order, alloc_flags,
2794 migratetype);
2795
2796 out:
2797 /* Separate test+clear to avoid unnecessary atomics */
2798 if ((alloc_flags & ALLOC_KSWAPD) &&
2799 unlikely(test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags))) {
2800 clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
2801 wakeup_kswapd(zone, 0, 0, zone_idx(zone));
2802 }
2803
2804 VM_BUG_ON_PAGE(page && bad_range(zone, page), page);
2805 return page;
2806 }
2807
should_fail_alloc_page(gfp_t gfp_mask,unsigned int order)2808 noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
2809 {
2810 return __should_fail_alloc_page(gfp_mask, order);
2811 }
2812 ALLOW_ERROR_INJECTION(should_fail_alloc_page, TRUE);
2813
__zone_watermark_unusable_free(struct zone * z,unsigned int order,unsigned int alloc_flags)2814 static inline long __zone_watermark_unusable_free(struct zone *z,
2815 unsigned int order, unsigned int alloc_flags)
2816 {
2817 long unusable_free = (1 << order) - 1;
2818
2819 /*
2820 * If the caller does not have rights to reserves below the min
2821 * watermark then subtract the high-atomic reserves. This will
2822 * over-estimate the size of the atomic reserve but it avoids a search.
2823 */
2824 if (likely(!(alloc_flags & ALLOC_RESERVES)))
2825 unusable_free += z->nr_reserved_highatomic;
2826
2827 #ifdef CONFIG_CMA
2828 /* If allocation can't use CMA areas don't use free CMA pages */
2829 if (!(alloc_flags & ALLOC_CMA))
2830 unusable_free += zone_page_state(z, NR_FREE_CMA_PAGES);
2831 #endif
2832
2833 return unusable_free;
2834 }
2835
2836 /*
2837 * Return true if free base pages are above 'mark'. For high-order checks it
2838 * will return true of the order-0 watermark is reached and there is at least
2839 * one free page of a suitable size. Checking now avoids taking the zone lock
2840 * to check in the allocation paths if no pages are free.
2841 */
__zone_watermark_ok(struct zone * z,unsigned int order,unsigned long mark,int highest_zoneidx,unsigned int alloc_flags,long free_pages)2842 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
2843 int highest_zoneidx, unsigned int alloc_flags,
2844 long free_pages)
2845 {
2846 long min = mark;
2847 int o;
2848
2849 /* free_pages may go negative - that's OK */
2850 free_pages -= __zone_watermark_unusable_free(z, order, alloc_flags);
2851
2852 if (unlikely(alloc_flags & ALLOC_RESERVES)) {
2853 /*
2854 * __GFP_HIGH allows access to 50% of the min reserve as well
2855 * as OOM.
2856 */
2857 if (alloc_flags & ALLOC_MIN_RESERVE) {
2858 min -= min / 2;
2859
2860 /*
2861 * Non-blocking allocations (e.g. GFP_ATOMIC) can
2862 * access more reserves than just __GFP_HIGH. Other
2863 * non-blocking allocations requests such as GFP_NOWAIT
2864 * or (GFP_KERNEL & ~__GFP_DIRECT_RECLAIM) do not get
2865 * access to the min reserve.
2866 */
2867 if (alloc_flags & ALLOC_NON_BLOCK)
2868 min -= min / 4;
2869 }
2870
2871 /*
2872 * OOM victims can try even harder than the normal reserve
2873 * users on the grounds that it's definitely going to be in
2874 * the exit path shortly and free memory. Any allocation it
2875 * makes during the free path will be small and short-lived.
2876 */
2877 if (alloc_flags & ALLOC_OOM)
2878 min -= min / 2;
2879 }
2880
2881 /*
2882 * Check watermarks for an order-0 allocation request. If these
2883 * are not met, then a high-order request also cannot go ahead
2884 * even if a suitable page happened to be free.
2885 */
2886 if (free_pages <= min + z->lowmem_reserve[highest_zoneidx])
2887 return false;
2888
2889 /* If this is an order-0 request then the watermark is fine */
2890 if (!order)
2891 return true;
2892
2893 /* For a high-order request, check at least one suitable page is free */
2894 for (o = order; o < NR_PAGE_ORDERS; o++) {
2895 struct free_area *area = &z->free_area[o];
2896 int mt;
2897
2898 if (!area->nr_free)
2899 continue;
2900
2901 for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
2902 if (!free_area_empty(area, mt))
2903 return true;
2904 }
2905
2906 #ifdef CONFIG_CMA
2907 if ((alloc_flags & ALLOC_CMA) &&
2908 !free_area_empty(area, MIGRATE_CMA)) {
2909 return true;
2910 }
2911 #endif
2912 if ((alloc_flags & (ALLOC_HIGHATOMIC|ALLOC_OOM)) &&
2913 !free_area_empty(area, MIGRATE_HIGHATOMIC)) {
2914 return true;
2915 }
2916 }
2917 return false;
2918 }
2919
zone_watermark_ok(struct zone * z,unsigned int order,unsigned long mark,int highest_zoneidx,unsigned int alloc_flags)2920 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
2921 int highest_zoneidx, unsigned int alloc_flags)
2922 {
2923 return __zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags,
2924 zone_page_state(z, NR_FREE_PAGES));
2925 }
2926
zone_watermark_fast(struct zone * z,unsigned int order,unsigned long mark,int highest_zoneidx,unsigned int alloc_flags,gfp_t gfp_mask)2927 static inline bool zone_watermark_fast(struct zone *z, unsigned int order,
2928 unsigned long mark, int highest_zoneidx,
2929 unsigned int alloc_flags, gfp_t gfp_mask)
2930 {
2931 long free_pages;
2932
2933 free_pages = zone_page_state(z, NR_FREE_PAGES);
2934
2935 /*
2936 * Fast check for order-0 only. If this fails then the reserves
2937 * need to be calculated.
2938 */
2939 if (!order) {
2940 long usable_free;
2941 long reserved;
2942
2943 usable_free = free_pages;
2944 reserved = __zone_watermark_unusable_free(z, 0, alloc_flags);
2945
2946 /* reserved may over estimate high-atomic reserves. */
2947 usable_free -= min(usable_free, reserved);
2948 if (usable_free > mark + z->lowmem_reserve[highest_zoneidx])
2949 return true;
2950 }
2951
2952 if (__zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags,
2953 free_pages))
2954 return true;
2955
2956 /*
2957 * Ignore watermark boosting for __GFP_HIGH order-0 allocations
2958 * when checking the min watermark. The min watermark is the
2959 * point where boosting is ignored so that kswapd is woken up
2960 * when below the low watermark.
2961 */
2962 if (unlikely(!order && (alloc_flags & ALLOC_MIN_RESERVE) && z->watermark_boost
2963 && ((alloc_flags & ALLOC_WMARK_MASK) == WMARK_MIN))) {
2964 mark = z->_watermark[WMARK_MIN];
2965 return __zone_watermark_ok(z, order, mark, highest_zoneidx,
2966 alloc_flags, free_pages);
2967 }
2968
2969 return false;
2970 }
2971
zone_watermark_ok_safe(struct zone * z,unsigned int order,unsigned long mark,int highest_zoneidx)2972 bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
2973 unsigned long mark, int highest_zoneidx)
2974 {
2975 long free_pages = zone_page_state(z, NR_FREE_PAGES);
2976
2977 if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
2978 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
2979
2980 return __zone_watermark_ok(z, order, mark, highest_zoneidx, 0,
2981 free_pages);
2982 }
2983
2984 #ifdef CONFIG_NUMA
2985 int __read_mostly node_reclaim_distance = RECLAIM_DISTANCE;
2986
zone_allows_reclaim(struct zone * local_zone,struct zone * zone)2987 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
2988 {
2989 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <=
2990 node_reclaim_distance;
2991 }
2992 #else /* CONFIG_NUMA */
zone_allows_reclaim(struct zone * local_zone,struct zone * zone)2993 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
2994 {
2995 return true;
2996 }
2997 #endif /* CONFIG_NUMA */
2998
2999 /*
3000 * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid
3001 * fragmentation is subtle. If the preferred zone was HIGHMEM then
3002 * premature use of a lower zone may cause lowmem pressure problems that
3003 * are worse than fragmentation. If the next zone is ZONE_DMA then it is
3004 * probably too small. It only makes sense to spread allocations to avoid
3005 * fragmentation between the Normal and DMA32 zones.
3006 */
3007 static inline unsigned int
alloc_flags_nofragment(struct zone * zone,gfp_t gfp_mask)3008 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
3009 {
3010 unsigned int alloc_flags;
3011
3012 /*
3013 * __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD
3014 * to save a branch.
3015 */
3016 alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM);
3017
3018 #ifdef CONFIG_ZONE_DMA32
3019 if (!zone)
3020 return alloc_flags;
3021
3022 if (zone_idx(zone) != ZONE_NORMAL)
3023 return alloc_flags;
3024
3025 /*
3026 * If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and
3027 * the pointer is within zone->zone_pgdat->node_zones[]. Also assume
3028 * on UMA that if Normal is populated then so is DMA32.
3029 */
3030 BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1);
3031 if (nr_online_nodes > 1 && !populated_zone(--zone))
3032 return alloc_flags;
3033
3034 alloc_flags |= ALLOC_NOFRAGMENT;
3035 #endif /* CONFIG_ZONE_DMA32 */
3036 return alloc_flags;
3037 }
3038
3039 /* Must be called after current_gfp_context() which can change gfp_mask */
gfp_to_alloc_flags_cma(gfp_t gfp_mask,unsigned int alloc_flags)3040 static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask,
3041 unsigned int alloc_flags)
3042 {
3043 #ifdef CONFIG_CMA
3044 if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE)
3045 alloc_flags |= ALLOC_CMA;
3046 #endif
3047 return alloc_flags;
3048 }
3049
3050 /*
3051 * get_page_from_freelist goes through the zonelist trying to allocate
3052 * a page.
3053 */
3054 static struct page *
get_page_from_freelist(gfp_t gfp_mask,unsigned int order,int alloc_flags,const struct alloc_context * ac)3055 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
3056 const struct alloc_context *ac)
3057 {
3058 struct zoneref *z;
3059 struct zone *zone;
3060 struct pglist_data *last_pgdat = NULL;
3061 bool last_pgdat_dirty_ok = false;
3062 bool no_fallback;
3063
3064 retry:
3065 /*
3066 * Scan zonelist, looking for a zone with enough free.
3067 * See also cpuset_node_allowed() comment in kernel/cgroup/cpuset.c.
3068 */
3069 no_fallback = alloc_flags & ALLOC_NOFRAGMENT;
3070 z = ac->preferred_zoneref;
3071 for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx,
3072 ac->nodemask) {
3073 struct page *page;
3074 unsigned long mark;
3075
3076 if (cpusets_enabled() &&
3077 (alloc_flags & ALLOC_CPUSET) &&
3078 !__cpuset_zone_allowed(zone, gfp_mask))
3079 continue;
3080 /*
3081 * When allocating a page cache page for writing, we
3082 * want to get it from a node that is within its dirty
3083 * limit, such that no single node holds more than its
3084 * proportional share of globally allowed dirty pages.
3085 * The dirty limits take into account the node's
3086 * lowmem reserves and high watermark so that kswapd
3087 * should be able to balance it without having to
3088 * write pages from its LRU list.
3089 *
3090 * XXX: For now, allow allocations to potentially
3091 * exceed the per-node dirty limit in the slowpath
3092 * (spread_dirty_pages unset) before going into reclaim,
3093 * which is important when on a NUMA setup the allowed
3094 * nodes are together not big enough to reach the
3095 * global limit. The proper fix for these situations
3096 * will require awareness of nodes in the
3097 * dirty-throttling and the flusher threads.
3098 */
3099 if (ac->spread_dirty_pages) {
3100 if (last_pgdat != zone->zone_pgdat) {
3101 last_pgdat = zone->zone_pgdat;
3102 last_pgdat_dirty_ok = node_dirty_ok(zone->zone_pgdat);
3103 }
3104
3105 if (!last_pgdat_dirty_ok)
3106 continue;
3107 }
3108
3109 if (no_fallback && nr_online_nodes > 1 &&
3110 zone != ac->preferred_zoneref->zone) {
3111 int local_nid;
3112
3113 /*
3114 * If moving to a remote node, retry but allow
3115 * fragmenting fallbacks. Locality is more important
3116 * than fragmentation avoidance.
3117 */
3118 local_nid = zone_to_nid(ac->preferred_zoneref->zone);
3119 if (zone_to_nid(zone) != local_nid) {
3120 alloc_flags &= ~ALLOC_NOFRAGMENT;
3121 goto retry;
3122 }
3123 }
3124
3125 cond_accept_memory(zone, order);
3126
3127 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
3128 if (!zone_watermark_fast(zone, order, mark,
3129 ac->highest_zoneidx, alloc_flags,
3130 gfp_mask)) {
3131 int ret;
3132
3133 if (cond_accept_memory(zone, order))
3134 goto try_this_zone;
3135
3136 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
3137 /*
3138 * Watermark failed for this zone, but see if we can
3139 * grow this zone if it contains deferred pages.
3140 */
3141 if (deferred_pages_enabled()) {
3142 if (_deferred_grow_zone(zone, order))
3143 goto try_this_zone;
3144 }
3145 #endif
3146 /* Checked here to keep the fast path fast */
3147 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
3148 if (alloc_flags & ALLOC_NO_WATERMARKS)
3149 goto try_this_zone;
3150
3151 if (!node_reclaim_enabled() ||
3152 !zone_allows_reclaim(ac->preferred_zoneref->zone, zone))
3153 continue;
3154
3155 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order);
3156 switch (ret) {
3157 case NODE_RECLAIM_NOSCAN:
3158 /* did not scan */
3159 continue;
3160 case NODE_RECLAIM_FULL:
3161 /* scanned but unreclaimable */
3162 continue;
3163 default:
3164 /* did we reclaim enough */
3165 if (zone_watermark_ok(zone, order, mark,
3166 ac->highest_zoneidx, alloc_flags))
3167 goto try_this_zone;
3168
3169 continue;
3170 }
3171 }
3172
3173 try_this_zone:
3174 page = rmqueue(ac->preferred_zoneref->zone, zone, order,
3175 gfp_mask, alloc_flags, ac->migratetype);
3176 if (page) {
3177 prep_new_page(page, order, gfp_mask, alloc_flags);
3178
3179 /*
3180 * If this is a high-order atomic allocation then check
3181 * if the pageblock should be reserved for the future
3182 */
3183 if (unlikely(alloc_flags & ALLOC_HIGHATOMIC))
3184 reserve_highatomic_pageblock(page, zone);
3185
3186 return page;
3187 } else {
3188 if (cond_accept_memory(zone, order))
3189 goto try_this_zone;
3190
3191 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
3192 /* Try again if zone has deferred pages */
3193 if (deferred_pages_enabled()) {
3194 if (_deferred_grow_zone(zone, order))
3195 goto try_this_zone;
3196 }
3197 #endif
3198 }
3199 }
3200
3201 /*
3202 * It's possible on a UMA machine to get through all zones that are
3203 * fragmented. If avoiding fragmentation, reset and try again.
3204 */
3205 if (no_fallback) {
3206 alloc_flags &= ~ALLOC_NOFRAGMENT;
3207 goto retry;
3208 }
3209
3210 return NULL;
3211 }
3212
warn_alloc_show_mem(gfp_t gfp_mask,nodemask_t * nodemask)3213 static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask)
3214 {
3215 unsigned int filter = SHOW_MEM_FILTER_NODES;
3216
3217 /*
3218 * This documents exceptions given to allocations in certain
3219 * contexts that are allowed to allocate outside current's set
3220 * of allowed nodes.
3221 */
3222 if (!(gfp_mask & __GFP_NOMEMALLOC))
3223 if (tsk_is_oom_victim(current) ||
3224 (current->flags & (PF_MEMALLOC | PF_EXITING)))
3225 filter &= ~SHOW_MEM_FILTER_NODES;
3226 if (!in_task() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
3227 filter &= ~SHOW_MEM_FILTER_NODES;
3228
3229 __show_mem(filter, nodemask, gfp_zone(gfp_mask));
3230 }
3231
warn_alloc(gfp_t gfp_mask,nodemask_t * nodemask,const char * fmt,...)3232 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)
3233 {
3234 struct va_format vaf;
3235 va_list args;
3236 static DEFINE_RATELIMIT_STATE(nopage_rs, 10*HZ, 1);
3237
3238 if ((gfp_mask & __GFP_NOWARN) ||
3239 !__ratelimit(&nopage_rs) ||
3240 ((gfp_mask & __GFP_DMA) && !has_managed_dma()))
3241 return;
3242
3243 va_start(args, fmt);
3244 vaf.fmt = fmt;
3245 vaf.va = &args;
3246 pr_warn("%s: %pV, mode:%#x(%pGg), nodemask=%*pbl",
3247 current->comm, &vaf, gfp_mask, &gfp_mask,
3248 nodemask_pr_args(nodemask));
3249 va_end(args);
3250
3251 cpuset_print_current_mems_allowed();
3252 pr_cont("\n");
3253 dump_stack();
3254 warn_alloc_show_mem(gfp_mask, nodemask);
3255 }
3256
3257 static inline struct page *
__alloc_pages_cpuset_fallback(gfp_t gfp_mask,unsigned int order,unsigned int alloc_flags,const struct alloc_context * ac)3258 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order,
3259 unsigned int alloc_flags,
3260 const struct alloc_context *ac)
3261 {
3262 struct page *page;
3263
3264 page = get_page_from_freelist(gfp_mask, order,
3265 alloc_flags|ALLOC_CPUSET, ac);
3266 /*
3267 * fallback to ignore cpuset restriction if our nodes
3268 * are depleted
3269 */
3270 if (!page)
3271 page = get_page_from_freelist(gfp_mask, order,
3272 alloc_flags, ac);
3273
3274 return page;
3275 }
3276
3277 static inline struct page *
__alloc_pages_may_oom(gfp_t gfp_mask,unsigned int order,const struct alloc_context * ac,unsigned long * did_some_progress)3278 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
3279 const struct alloc_context *ac, unsigned long *did_some_progress)
3280 {
3281 struct oom_control oc = {
3282 .zonelist = ac->zonelist,
3283 .nodemask = ac->nodemask,
3284 .memcg = NULL,
3285 .gfp_mask = gfp_mask,
3286 .order = order,
3287 };
3288 struct page *page;
3289
3290 *did_some_progress = 0;
3291
3292 /*
3293 * Acquire the oom lock. If that fails, somebody else is
3294 * making progress for us.
3295 */
3296 if (!mutex_trylock(&oom_lock)) {
3297 *did_some_progress = 1;
3298 schedule_timeout_uninterruptible(1);
3299 return NULL;
3300 }
3301
3302 /*
3303 * Go through the zonelist yet one more time, keep very high watermark
3304 * here, this is only to catch a parallel oom killing, we must fail if
3305 * we're still under heavy pressure. But make sure that this reclaim
3306 * attempt shall not depend on __GFP_DIRECT_RECLAIM && !__GFP_NORETRY
3307 * allocation which will never fail due to oom_lock already held.
3308 */
3309 page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) &
3310 ~__GFP_DIRECT_RECLAIM, order,
3311 ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac);
3312 if (page)
3313 goto out;
3314
3315 /* Coredumps can quickly deplete all memory reserves */
3316 if (current->flags & PF_DUMPCORE)
3317 goto out;
3318 /* The OOM killer will not help higher order allocs */
3319 if (order > PAGE_ALLOC_COSTLY_ORDER)
3320 goto out;
3321 /*
3322 * We have already exhausted all our reclaim opportunities without any
3323 * success so it is time to admit defeat. We will skip the OOM killer
3324 * because it is very likely that the caller has a more reasonable
3325 * fallback than shooting a random task.
3326 *
3327 * The OOM killer may not free memory on a specific node.
3328 */
3329 if (gfp_mask & (__GFP_RETRY_MAYFAIL | __GFP_THISNODE))
3330 goto out;
3331 /* The OOM killer does not needlessly kill tasks for lowmem */
3332 if (ac->highest_zoneidx < ZONE_NORMAL)
3333 goto out;
3334 if (pm_suspended_storage())
3335 goto out;
3336 /*
3337 * XXX: GFP_NOFS allocations should rather fail than rely on
3338 * other request to make a forward progress.
3339 * We are in an unfortunate situation where out_of_memory cannot
3340 * do much for this context but let's try it to at least get
3341 * access to memory reserved if the current task is killed (see
3342 * out_of_memory). Once filesystems are ready to handle allocation
3343 * failures more gracefully we should just bail out here.
3344 */
3345
3346 /* Exhausted what can be done so it's blame time */
3347 if (out_of_memory(&oc) ||
3348 WARN_ON_ONCE_GFP(gfp_mask & __GFP_NOFAIL, gfp_mask)) {
3349 *did_some_progress = 1;
3350
3351 /*
3352 * Help non-failing allocations by giving them access to memory
3353 * reserves
3354 */
3355 if (gfp_mask & __GFP_NOFAIL)
3356 page = __alloc_pages_cpuset_fallback(gfp_mask, order,
3357 ALLOC_NO_WATERMARKS, ac);
3358 }
3359 out:
3360 mutex_unlock(&oom_lock);
3361 return page;
3362 }
3363
3364 /*
3365 * Maximum number of compaction retries with a progress before OOM
3366 * killer is consider as the only way to move forward.
3367 */
3368 #define MAX_COMPACT_RETRIES 16
3369
3370 #ifdef CONFIG_COMPACTION
3371 /* Try memory compaction for high-order allocations before reclaim */
3372 static struct page *
__alloc_pages_direct_compact(gfp_t gfp_mask,unsigned int order,unsigned int alloc_flags,const struct alloc_context * ac,enum compact_priority prio,enum compact_result * compact_result)3373 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
3374 unsigned int alloc_flags, const struct alloc_context *ac,
3375 enum compact_priority prio, enum compact_result *compact_result)
3376 {
3377 struct page *page = NULL;
3378 unsigned long pflags;
3379 unsigned int noreclaim_flag;
3380
3381 if (!order)
3382 return NULL;
3383
3384 psi_memstall_enter(&pflags);
3385 delayacct_compact_start();
3386 noreclaim_flag = memalloc_noreclaim_save();
3387
3388 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
3389 prio, &page);
3390
3391 memalloc_noreclaim_restore(noreclaim_flag);
3392 psi_memstall_leave(&pflags);
3393 delayacct_compact_end();
3394
3395 if (*compact_result == COMPACT_SKIPPED)
3396 return NULL;
3397 /*
3398 * At least in one zone compaction wasn't deferred or skipped, so let's
3399 * count a compaction stall
3400 */
3401 count_vm_event(COMPACTSTALL);
3402
3403 /* Prep a captured page if available */
3404 if (page)
3405 prep_new_page(page, order, gfp_mask, alloc_flags);
3406
3407 /* Try get a page from the freelist if available */
3408 if (!page)
3409 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
3410
3411 if (page) {
3412 struct zone *zone = page_zone(page);
3413
3414 zone->compact_blockskip_flush = false;
3415 compaction_defer_reset(zone, order, true);
3416 count_vm_event(COMPACTSUCCESS);
3417 return page;
3418 }
3419
3420 /*
3421 * It's bad if compaction run occurs and fails. The most likely reason
3422 * is that pages exist, but not enough to satisfy watermarks.
3423 */
3424 count_vm_event(COMPACTFAIL);
3425
3426 cond_resched();
3427
3428 return NULL;
3429 }
3430
3431 static inline bool
should_compact_retry(struct alloc_context * ac,int order,int alloc_flags,enum compact_result compact_result,enum compact_priority * compact_priority,int * compaction_retries)3432 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
3433 enum compact_result compact_result,
3434 enum compact_priority *compact_priority,
3435 int *compaction_retries)
3436 {
3437 int max_retries = MAX_COMPACT_RETRIES;
3438 int min_priority;
3439 bool ret = false;
3440 int retries = *compaction_retries;
3441 enum compact_priority priority = *compact_priority;
3442
3443 if (!order)
3444 return false;
3445
3446 if (fatal_signal_pending(current))
3447 return false;
3448
3449 /*
3450 * Compaction was skipped due to a lack of free order-0
3451 * migration targets. Continue if reclaim can help.
3452 */
3453 if (compact_result == COMPACT_SKIPPED) {
3454 ret = compaction_zonelist_suitable(ac, order, alloc_flags);
3455 goto out;
3456 }
3457
3458 /*
3459 * Compaction managed to coalesce some page blocks, but the
3460 * allocation failed presumably due to a race. Retry some.
3461 */
3462 if (compact_result == COMPACT_SUCCESS) {
3463 /*
3464 * !costly requests are much more important than
3465 * __GFP_RETRY_MAYFAIL costly ones because they are de
3466 * facto nofail and invoke OOM killer to move on while
3467 * costly can fail and users are ready to cope with
3468 * that. 1/4 retries is rather arbitrary but we would
3469 * need much more detailed feedback from compaction to
3470 * make a better decision.
3471 */
3472 if (order > PAGE_ALLOC_COSTLY_ORDER)
3473 max_retries /= 4;
3474
3475 if (++(*compaction_retries) <= max_retries) {
3476 ret = true;
3477 goto out;
3478 }
3479 }
3480
3481 /*
3482 * Compaction failed. Retry with increasing priority.
3483 */
3484 min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ?
3485 MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY;
3486
3487 if (*compact_priority > min_priority) {
3488 (*compact_priority)--;
3489 *compaction_retries = 0;
3490 ret = true;
3491 }
3492 out:
3493 trace_compact_retry(order, priority, compact_result, retries, max_retries, ret);
3494 return ret;
3495 }
3496 #else
3497 static inline struct page *
__alloc_pages_direct_compact(gfp_t gfp_mask,unsigned int order,unsigned int alloc_flags,const struct alloc_context * ac,enum compact_priority prio,enum compact_result * compact_result)3498 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
3499 unsigned int alloc_flags, const struct alloc_context *ac,
3500 enum compact_priority prio, enum compact_result *compact_result)
3501 {
3502 *compact_result = COMPACT_SKIPPED;
3503 return NULL;
3504 }
3505
3506 static inline bool
should_compact_retry(struct alloc_context * ac,unsigned int order,int alloc_flags,enum compact_result compact_result,enum compact_priority * compact_priority,int * compaction_retries)3507 should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags,
3508 enum compact_result compact_result,
3509 enum compact_priority *compact_priority,
3510 int *compaction_retries)
3511 {
3512 struct zone *zone;
3513 struct zoneref *z;
3514
3515 if (!order || order > PAGE_ALLOC_COSTLY_ORDER)
3516 return false;
3517
3518 /*
3519 * There are setups with compaction disabled which would prefer to loop
3520 * inside the allocator rather than hit the oom killer prematurely.
3521 * Let's give them a good hope and keep retrying while the order-0
3522 * watermarks are OK.
3523 */
3524 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
3525 ac->highest_zoneidx, ac->nodemask) {
3526 if (zone_watermark_ok(zone, 0, min_wmark_pages(zone),
3527 ac->highest_zoneidx, alloc_flags))
3528 return true;
3529 }
3530 return false;
3531 }
3532 #endif /* CONFIG_COMPACTION */
3533
3534 #ifdef CONFIG_LOCKDEP
3535 static struct lockdep_map __fs_reclaim_map =
3536 STATIC_LOCKDEP_MAP_INIT("fs_reclaim", &__fs_reclaim_map);
3537
__need_reclaim(gfp_t gfp_mask)3538 static bool __need_reclaim(gfp_t gfp_mask)
3539 {
3540 /* no reclaim without waiting on it */
3541 if (!(gfp_mask & __GFP_DIRECT_RECLAIM))
3542 return false;
3543
3544 /* this guy won't enter reclaim */
3545 if (current->flags & PF_MEMALLOC)
3546 return false;
3547
3548 if (gfp_mask & __GFP_NOLOCKDEP)
3549 return false;
3550
3551 return true;
3552 }
3553
__fs_reclaim_acquire(unsigned long ip)3554 void __fs_reclaim_acquire(unsigned long ip)
3555 {
3556 lock_acquire_exclusive(&__fs_reclaim_map, 0, 0, NULL, ip);
3557 }
3558
__fs_reclaim_release(unsigned long ip)3559 void __fs_reclaim_release(unsigned long ip)
3560 {
3561 lock_release(&__fs_reclaim_map, ip);
3562 }
3563
fs_reclaim_acquire(gfp_t gfp_mask)3564 void fs_reclaim_acquire(gfp_t gfp_mask)
3565 {
3566 gfp_mask = current_gfp_context(gfp_mask);
3567
3568 if (__need_reclaim(gfp_mask)) {
3569 if (gfp_mask & __GFP_FS)
3570 __fs_reclaim_acquire(_RET_IP_);
3571
3572 #ifdef CONFIG_MMU_NOTIFIER
3573 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
3574 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
3575 #endif
3576
3577 }
3578 }
3579 EXPORT_SYMBOL_GPL(fs_reclaim_acquire);
3580
fs_reclaim_release(gfp_t gfp_mask)3581 void fs_reclaim_release(gfp_t gfp_mask)
3582 {
3583 gfp_mask = current_gfp_context(gfp_mask);
3584
3585 if (__need_reclaim(gfp_mask)) {
3586 if (gfp_mask & __GFP_FS)
3587 __fs_reclaim_release(_RET_IP_);
3588 }
3589 }
3590 EXPORT_SYMBOL_GPL(fs_reclaim_release);
3591 #endif
3592
3593 /*
3594 * Zonelists may change due to hotplug during allocation. Detect when zonelists
3595 * have been rebuilt so allocation retries. Reader side does not lock and
3596 * retries the allocation if zonelist changes. Writer side is protected by the
3597 * embedded spin_lock.
3598 */
3599 static DEFINE_SEQLOCK(zonelist_update_seq);
3600
zonelist_iter_begin(void)3601 static unsigned int zonelist_iter_begin(void)
3602 {
3603 if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE))
3604 return read_seqbegin(&zonelist_update_seq);
3605
3606 return 0;
3607 }
3608
check_retry_zonelist(unsigned int seq)3609 static unsigned int check_retry_zonelist(unsigned int seq)
3610 {
3611 if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE))
3612 return read_seqretry(&zonelist_update_seq, seq);
3613
3614 return seq;
3615 }
3616
3617 /* Perform direct synchronous page reclaim */
3618 static unsigned long
__perform_reclaim(gfp_t gfp_mask,unsigned int order,const struct alloc_context * ac)3619 __perform_reclaim(gfp_t gfp_mask, unsigned int order,
3620 const struct alloc_context *ac)
3621 {
3622 unsigned int noreclaim_flag;
3623 unsigned long progress;
3624
3625 cond_resched();
3626
3627 /* We now go into synchronous reclaim */
3628 cpuset_memory_pressure_bump();
3629 fs_reclaim_acquire(gfp_mask);
3630 noreclaim_flag = memalloc_noreclaim_save();
3631
3632 progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
3633 ac->nodemask);
3634
3635 memalloc_noreclaim_restore(noreclaim_flag);
3636 fs_reclaim_release(gfp_mask);
3637
3638 cond_resched();
3639
3640 return progress;
3641 }
3642
3643 /* The really slow allocator path where we enter direct reclaim */
3644 static inline struct page *
__alloc_pages_direct_reclaim(gfp_t gfp_mask,unsigned int order,unsigned int alloc_flags,const struct alloc_context * ac,unsigned long * did_some_progress)3645 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
3646 unsigned int alloc_flags, const struct alloc_context *ac,
3647 unsigned long *did_some_progress)
3648 {
3649 struct page *page = NULL;
3650 unsigned long pflags;
3651 bool drained = false;
3652
3653 psi_memstall_enter(&pflags);
3654 *did_some_progress = __perform_reclaim(gfp_mask, order, ac);
3655 if (unlikely(!(*did_some_progress)))
3656 goto out;
3657
3658 retry:
3659 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
3660
3661 /*
3662 * If an allocation failed after direct reclaim, it could be because
3663 * pages are pinned on the per-cpu lists or in high alloc reserves.
3664 * Shrink them and try again
3665 */
3666 if (!page && !drained) {
3667 unreserve_highatomic_pageblock(ac, false);
3668 drain_all_pages(NULL);
3669 drained = true;
3670 goto retry;
3671 }
3672 out:
3673 psi_memstall_leave(&pflags);
3674
3675 return page;
3676 }
3677
wake_all_kswapds(unsigned int order,gfp_t gfp_mask,const struct alloc_context * ac)3678 static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask,
3679 const struct alloc_context *ac)
3680 {
3681 struct zoneref *z;
3682 struct zone *zone;
3683 pg_data_t *last_pgdat = NULL;
3684 enum zone_type highest_zoneidx = ac->highest_zoneidx;
3685
3686 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx,
3687 ac->nodemask) {
3688 if (!managed_zone(zone))
3689 continue;
3690 if (last_pgdat != zone->zone_pgdat) {
3691 wakeup_kswapd(zone, gfp_mask, order, highest_zoneidx);
3692 last_pgdat = zone->zone_pgdat;
3693 }
3694 }
3695 }
3696
3697 static inline unsigned int
gfp_to_alloc_flags(gfp_t gfp_mask,unsigned int order)3698 gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order)
3699 {
3700 unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
3701
3702 /*
3703 * __GFP_HIGH is assumed to be the same as ALLOC_MIN_RESERVE
3704 * and __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD
3705 * to save two branches.
3706 */
3707 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_MIN_RESERVE);
3708 BUILD_BUG_ON(__GFP_KSWAPD_RECLAIM != (__force gfp_t) ALLOC_KSWAPD);
3709
3710 /*
3711 * The caller may dip into page reserves a bit more if the caller
3712 * cannot run direct reclaim, or if the caller has realtime scheduling
3713 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
3714 * set both ALLOC_NON_BLOCK and ALLOC_MIN_RESERVE(__GFP_HIGH).
3715 */
3716 alloc_flags |= (__force int)
3717 (gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM));
3718
3719 if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) {
3720 /*
3721 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
3722 * if it can't schedule.
3723 */
3724 if (!(gfp_mask & __GFP_NOMEMALLOC)) {
3725 alloc_flags |= ALLOC_NON_BLOCK;
3726
3727 if (order > 0)
3728 alloc_flags |= ALLOC_HIGHATOMIC;
3729 }
3730
3731 /*
3732 * Ignore cpuset mems for non-blocking __GFP_HIGH (probably
3733 * GFP_ATOMIC) rather than fail, see the comment for
3734 * cpuset_node_allowed().
3735 */
3736 if (alloc_flags & ALLOC_MIN_RESERVE)
3737 alloc_flags &= ~ALLOC_CPUSET;
3738 } else if (unlikely(rt_task(current)) && in_task())
3739 alloc_flags |= ALLOC_MIN_RESERVE;
3740
3741 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, alloc_flags);
3742
3743 return alloc_flags;
3744 }
3745
oom_reserves_allowed(struct task_struct * tsk)3746 static bool oom_reserves_allowed(struct task_struct *tsk)
3747 {
3748 if (!tsk_is_oom_victim(tsk))
3749 return false;
3750
3751 /*
3752 * !MMU doesn't have oom reaper so give access to memory reserves
3753 * only to the thread with TIF_MEMDIE set
3754 */
3755 if (!IS_ENABLED(CONFIG_MMU) && !test_thread_flag(TIF_MEMDIE))
3756 return false;
3757
3758 return true;
3759 }
3760
3761 /*
3762 * Distinguish requests which really need access to full memory
3763 * reserves from oom victims which can live with a portion of it
3764 */
__gfp_pfmemalloc_flags(gfp_t gfp_mask)3765 static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask)
3766 {
3767 if (unlikely(gfp_mask & __GFP_NOMEMALLOC))
3768 return 0;
3769 if (gfp_mask & __GFP_MEMALLOC)
3770 return ALLOC_NO_WATERMARKS;
3771 if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
3772 return ALLOC_NO_WATERMARKS;
3773 if (!in_interrupt()) {
3774 if (current->flags & PF_MEMALLOC)
3775 return ALLOC_NO_WATERMARKS;
3776 else if (oom_reserves_allowed(current))
3777 return ALLOC_OOM;
3778 }
3779
3780 return 0;
3781 }
3782
gfp_pfmemalloc_allowed(gfp_t gfp_mask)3783 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
3784 {
3785 return !!__gfp_pfmemalloc_flags(gfp_mask);
3786 }
3787
3788 /*
3789 * Checks whether it makes sense to retry the reclaim to make a forward progress
3790 * for the given allocation request.
3791 *
3792 * We give up when we either have tried MAX_RECLAIM_RETRIES in a row
3793 * without success, or when we couldn't even meet the watermark if we
3794 * reclaimed all remaining pages on the LRU lists.
3795 *
3796 * Returns true if a retry is viable or false to enter the oom path.
3797 */
3798 static inline bool
should_reclaim_retry(gfp_t gfp_mask,unsigned order,struct alloc_context * ac,int alloc_flags,bool did_some_progress,int * no_progress_loops)3799 should_reclaim_retry(gfp_t gfp_mask, unsigned order,
3800 struct alloc_context *ac, int alloc_flags,
3801 bool did_some_progress, int *no_progress_loops)
3802 {
3803 struct zone *zone;
3804 struct zoneref *z;
3805 bool ret = false;
3806
3807 /*
3808 * Costly allocations might have made a progress but this doesn't mean
3809 * their order will become available due to high fragmentation so
3810 * always increment the no progress counter for them
3811 */
3812 if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER)
3813 *no_progress_loops = 0;
3814 else
3815 (*no_progress_loops)++;
3816
3817 if (*no_progress_loops > MAX_RECLAIM_RETRIES)
3818 goto out;
3819
3820
3821 /*
3822 * Keep reclaiming pages while there is a chance this will lead
3823 * somewhere. If none of the target zones can satisfy our allocation
3824 * request even if all reclaimable pages are considered then we are
3825 * screwed and have to go OOM.
3826 */
3827 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
3828 ac->highest_zoneidx, ac->nodemask) {
3829 unsigned long available;
3830 unsigned long reclaimable;
3831 unsigned long min_wmark = min_wmark_pages(zone);
3832 bool wmark;
3833
3834 available = reclaimable = zone_reclaimable_pages(zone);
3835 available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
3836
3837 /*
3838 * Would the allocation succeed if we reclaimed all
3839 * reclaimable pages?
3840 */
3841 wmark = __zone_watermark_ok(zone, order, min_wmark,
3842 ac->highest_zoneidx, alloc_flags, available);
3843 trace_reclaim_retry_zone(z, order, reclaimable,
3844 available, min_wmark, *no_progress_loops, wmark);
3845 if (wmark) {
3846 ret = true;
3847 break;
3848 }
3849 }
3850
3851 /*
3852 * Memory allocation/reclaim might be called from a WQ context and the
3853 * current implementation of the WQ concurrency control doesn't
3854 * recognize that a particular WQ is congested if the worker thread is
3855 * looping without ever sleeping. Therefore we have to do a short sleep
3856 * here rather than calling cond_resched().
3857 */
3858 if (current->flags & PF_WQ_WORKER)
3859 schedule_timeout_uninterruptible(1);
3860 else
3861 cond_resched();
3862 out:
3863 /* Before OOM, exhaust highatomic_reserve */
3864 if (!ret)
3865 return unreserve_highatomic_pageblock(ac, true);
3866
3867 return ret;
3868 }
3869
3870 static inline bool
check_retry_cpuset(int cpuset_mems_cookie,struct alloc_context * ac)3871 check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac)
3872 {
3873 /*
3874 * It's possible that cpuset's mems_allowed and the nodemask from
3875 * mempolicy don't intersect. This should be normally dealt with by
3876 * policy_nodemask(), but it's possible to race with cpuset update in
3877 * such a way the check therein was true, and then it became false
3878 * before we got our cpuset_mems_cookie here.
3879 * This assumes that for all allocations, ac->nodemask can come only
3880 * from MPOL_BIND mempolicy (whose documented semantics is to be ignored
3881 * when it does not intersect with the cpuset restrictions) or the
3882 * caller can deal with a violated nodemask.
3883 */
3884 if (cpusets_enabled() && ac->nodemask &&
3885 !cpuset_nodemask_valid_mems_allowed(ac->nodemask)) {
3886 ac->nodemask = NULL;
3887 return true;
3888 }
3889
3890 /*
3891 * When updating a task's mems_allowed or mempolicy nodemask, it is
3892 * possible to race with parallel threads in such a way that our
3893 * allocation can fail while the mask is being updated. If we are about
3894 * to fail, check if the cpuset changed during allocation and if so,
3895 * retry.
3896 */
3897 if (read_mems_allowed_retry(cpuset_mems_cookie))
3898 return true;
3899
3900 return false;
3901 }
3902
3903 static inline struct page *
__alloc_pages_slowpath(gfp_t gfp_mask,unsigned int order,struct alloc_context * ac)3904 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
3905 struct alloc_context *ac)
3906 {
3907 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
3908 bool can_compact = gfp_compaction_allowed(gfp_mask);
3909 const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER;
3910 struct page *page = NULL;
3911 unsigned int alloc_flags;
3912 unsigned long did_some_progress;
3913 enum compact_priority compact_priority;
3914 enum compact_result compact_result;
3915 int compaction_retries;
3916 int no_progress_loops;
3917 unsigned int cpuset_mems_cookie;
3918 unsigned int zonelist_iter_cookie;
3919 int reserve_flags;
3920
3921 restart:
3922 compaction_retries = 0;
3923 no_progress_loops = 0;
3924 compact_priority = DEF_COMPACT_PRIORITY;
3925 cpuset_mems_cookie = read_mems_allowed_begin();
3926 zonelist_iter_cookie = zonelist_iter_begin();
3927
3928 /*
3929 * The fast path uses conservative alloc_flags to succeed only until
3930 * kswapd needs to be woken up, and to avoid the cost of setting up
3931 * alloc_flags precisely. So we do that now.
3932 */
3933 alloc_flags = gfp_to_alloc_flags(gfp_mask, order);
3934
3935 /*
3936 * We need to recalculate the starting point for the zonelist iterator
3937 * because we might have used different nodemask in the fast path, or
3938 * there was a cpuset modification and we are retrying - otherwise we
3939 * could end up iterating over non-eligible zones endlessly.
3940 */
3941 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
3942 ac->highest_zoneidx, ac->nodemask);
3943 if (!ac->preferred_zoneref->zone)
3944 goto nopage;
3945
3946 /*
3947 * Check for insane configurations where the cpuset doesn't contain
3948 * any suitable zone to satisfy the request - e.g. non-movable
3949 * GFP_HIGHUSER allocations from MOVABLE nodes only.
3950 */
3951 if (cpusets_insane_config() && (gfp_mask & __GFP_HARDWALL)) {
3952 struct zoneref *z = first_zones_zonelist(ac->zonelist,
3953 ac->highest_zoneidx,
3954 &cpuset_current_mems_allowed);
3955 if (!z->zone)
3956 goto nopage;
3957 }
3958
3959 if (alloc_flags & ALLOC_KSWAPD)
3960 wake_all_kswapds(order, gfp_mask, ac);
3961
3962 /*
3963 * The adjusted alloc_flags might result in immediate success, so try
3964 * that first
3965 */
3966 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
3967 if (page)
3968 goto got_pg;
3969
3970 /*
3971 * For costly allocations, try direct compaction first, as it's likely
3972 * that we have enough base pages and don't need to reclaim. For non-
3973 * movable high-order allocations, do that as well, as compaction will
3974 * try prevent permanent fragmentation by migrating from blocks of the
3975 * same migratetype.
3976 * Don't try this for allocations that are allowed to ignore
3977 * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen.
3978 */
3979 if (can_direct_reclaim && can_compact &&
3980 (costly_order ||
3981 (order > 0 && ac->migratetype != MIGRATE_MOVABLE))
3982 && !gfp_pfmemalloc_allowed(gfp_mask)) {
3983 page = __alloc_pages_direct_compact(gfp_mask, order,
3984 alloc_flags, ac,
3985 INIT_COMPACT_PRIORITY,
3986 &compact_result);
3987 if (page)
3988 goto got_pg;
3989
3990 /*
3991 * Checks for costly allocations with __GFP_NORETRY, which
3992 * includes some THP page fault allocations
3993 */
3994 if (costly_order && (gfp_mask & __GFP_NORETRY)) {
3995 /*
3996 * If allocating entire pageblock(s) and compaction
3997 * failed because all zones are below low watermarks
3998 * or is prohibited because it recently failed at this
3999 * order, fail immediately unless the allocator has
4000 * requested compaction and reclaim retry.
4001 *
4002 * Reclaim is
4003 * - potentially very expensive because zones are far
4004 * below their low watermarks or this is part of very
4005 * bursty high order allocations,
4006 * - not guaranteed to help because isolate_freepages()
4007 * may not iterate over freed pages as part of its
4008 * linear scan, and
4009 * - unlikely to make entire pageblocks free on its
4010 * own.
4011 */
4012 if (compact_result == COMPACT_SKIPPED ||
4013 compact_result == COMPACT_DEFERRED)
4014 goto nopage;
4015
4016 /*
4017 * Looks like reclaim/compaction is worth trying, but
4018 * sync compaction could be very expensive, so keep
4019 * using async compaction.
4020 */
4021 compact_priority = INIT_COMPACT_PRIORITY;
4022 }
4023 }
4024
4025 retry:
4026 /* Ensure kswapd doesn't accidentally go to sleep as long as we loop */
4027 if (alloc_flags & ALLOC_KSWAPD)
4028 wake_all_kswapds(order, gfp_mask, ac);
4029
4030 reserve_flags = __gfp_pfmemalloc_flags(gfp_mask);
4031 if (reserve_flags)
4032 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, reserve_flags) |
4033 (alloc_flags & ALLOC_KSWAPD);
4034
4035 /*
4036 * Reset the nodemask and zonelist iterators if memory policies can be
4037 * ignored. These allocations are high priority and system rather than
4038 * user oriented.
4039 */
4040 if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) {
4041 ac->nodemask = NULL;
4042 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
4043 ac->highest_zoneidx, ac->nodemask);
4044 }
4045
4046 /* Attempt with potentially adjusted zonelist and alloc_flags */
4047 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4048 if (page)
4049 goto got_pg;
4050
4051 /* Caller is not willing to reclaim, we can't balance anything */
4052 if (!can_direct_reclaim)
4053 goto nopage;
4054
4055 /* Avoid recursion of direct reclaim */
4056 if (current->flags & PF_MEMALLOC)
4057 goto nopage;
4058
4059 /* Try direct reclaim and then allocating */
4060 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
4061 &did_some_progress);
4062 if (page)
4063 goto got_pg;
4064
4065 /* Try direct compaction and then allocating */
4066 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
4067 compact_priority, &compact_result);
4068 if (page)
4069 goto got_pg;
4070
4071 /* Do not loop if specifically requested */
4072 if (gfp_mask & __GFP_NORETRY)
4073 goto nopage;
4074
4075 /*
4076 * Do not retry costly high order allocations unless they are
4077 * __GFP_RETRY_MAYFAIL and we can compact
4078 */
4079 if (costly_order && (!can_compact ||
4080 !(gfp_mask & __GFP_RETRY_MAYFAIL)))
4081 goto nopage;
4082
4083 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
4084 did_some_progress > 0, &no_progress_loops))
4085 goto retry;
4086
4087 /*
4088 * It doesn't make any sense to retry for the compaction if the order-0
4089 * reclaim is not able to make any progress because the current
4090 * implementation of the compaction depends on the sufficient amount
4091 * of free memory (see __compaction_suitable)
4092 */
4093 if (did_some_progress > 0 && can_compact &&
4094 should_compact_retry(ac, order, alloc_flags,
4095 compact_result, &compact_priority,
4096 &compaction_retries))
4097 goto retry;
4098
4099
4100 /*
4101 * Deal with possible cpuset update races or zonelist updates to avoid
4102 * a unnecessary OOM kill.
4103 */
4104 if (check_retry_cpuset(cpuset_mems_cookie, ac) ||
4105 check_retry_zonelist(zonelist_iter_cookie))
4106 goto restart;
4107
4108 /* Reclaim has failed us, start killing things */
4109 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
4110 if (page)
4111 goto got_pg;
4112
4113 /* Avoid allocations with no watermarks from looping endlessly */
4114 if (tsk_is_oom_victim(current) &&
4115 (alloc_flags & ALLOC_OOM ||
4116 (gfp_mask & __GFP_NOMEMALLOC)))
4117 goto nopage;
4118
4119 /* Retry as long as the OOM killer is making progress */
4120 if (did_some_progress) {
4121 no_progress_loops = 0;
4122 goto retry;
4123 }
4124
4125 nopage:
4126 /*
4127 * Deal with possible cpuset update races or zonelist updates to avoid
4128 * a unnecessary OOM kill.
4129 */
4130 if (check_retry_cpuset(cpuset_mems_cookie, ac) ||
4131 check_retry_zonelist(zonelist_iter_cookie))
4132 goto restart;
4133
4134 /*
4135 * Make sure that __GFP_NOFAIL request doesn't leak out and make sure
4136 * we always retry
4137 */
4138 if (gfp_mask & __GFP_NOFAIL) {
4139 /*
4140 * All existing users of the __GFP_NOFAIL are blockable, so warn
4141 * of any new users that actually require GFP_NOWAIT
4142 */
4143 if (WARN_ON_ONCE_GFP(!can_direct_reclaim, gfp_mask))
4144 goto fail;
4145
4146 /*
4147 * PF_MEMALLOC request from this context is rather bizarre
4148 * because we cannot reclaim anything and only can loop waiting
4149 * for somebody to do a work for us
4150 */
4151 WARN_ON_ONCE_GFP(current->flags & PF_MEMALLOC, gfp_mask);
4152
4153 /*
4154 * non failing costly orders are a hard requirement which we
4155 * are not prepared for much so let's warn about these users
4156 * so that we can identify them and convert them to something
4157 * else.
4158 */
4159 WARN_ON_ONCE_GFP(costly_order, gfp_mask);
4160
4161 /*
4162 * Help non-failing allocations by giving some access to memory
4163 * reserves normally used for high priority non-blocking
4164 * allocations but do not use ALLOC_NO_WATERMARKS because this
4165 * could deplete whole memory reserves which would just make
4166 * the situation worse.
4167 */
4168 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_MIN_RESERVE, ac);
4169 if (page)
4170 goto got_pg;
4171
4172 cond_resched();
4173 goto retry;
4174 }
4175 fail:
4176 warn_alloc(gfp_mask, ac->nodemask,
4177 "page allocation failure: order:%u", order);
4178 got_pg:
4179 return page;
4180 }
4181
prepare_alloc_pages(gfp_t gfp_mask,unsigned int order,int preferred_nid,nodemask_t * nodemask,struct alloc_context * ac,gfp_t * alloc_gfp,unsigned int * alloc_flags)4182 static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
4183 int preferred_nid, nodemask_t *nodemask,
4184 struct alloc_context *ac, gfp_t *alloc_gfp,
4185 unsigned int *alloc_flags)
4186 {
4187 ac->highest_zoneidx = gfp_zone(gfp_mask);
4188 ac->zonelist = node_zonelist(preferred_nid, gfp_mask);
4189 ac->nodemask = nodemask;
4190 ac->migratetype = gfp_migratetype(gfp_mask);
4191
4192 if (cpusets_enabled()) {
4193 *alloc_gfp |= __GFP_HARDWALL;
4194 /*
4195 * When we are in the interrupt context, it is irrelevant
4196 * to the current task context. It means that any node ok.
4197 */
4198 if (in_task() && !ac->nodemask)
4199 ac->nodemask = &cpuset_current_mems_allowed;
4200 else
4201 *alloc_flags |= ALLOC_CPUSET;
4202 }
4203
4204 might_alloc(gfp_mask);
4205
4206 if (should_fail_alloc_page(gfp_mask, order))
4207 return false;
4208
4209 *alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, *alloc_flags);
4210
4211 /* Dirty zone balancing only done in the fast path */
4212 ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE);
4213
4214 /*
4215 * The preferred zone is used for statistics but crucially it is
4216 * also used as the starting point for the zonelist iterator. It
4217 * may get reset for allocations that ignore memory policies.
4218 */
4219 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
4220 ac->highest_zoneidx, ac->nodemask);
4221
4222 return true;
4223 }
4224
4225 /*
4226 * __alloc_pages_bulk - Allocate a number of order-0 pages to a list or array
4227 * @gfp: GFP flags for the allocation
4228 * @preferred_nid: The preferred NUMA node ID to allocate from
4229 * @nodemask: Set of nodes to allocate from, may be NULL
4230 * @nr_pages: The number of pages desired on the list or array
4231 * @page_list: Optional list to store the allocated pages
4232 * @page_array: Optional array to store the pages
4233 *
4234 * This is a batched version of the page allocator that attempts to
4235 * allocate nr_pages quickly. Pages are added to page_list if page_list
4236 * is not NULL, otherwise it is assumed that the page_array is valid.
4237 *
4238 * For lists, nr_pages is the number of pages that should be allocated.
4239 *
4240 * For arrays, only NULL elements are populated with pages and nr_pages
4241 * is the maximum number of pages that will be stored in the array.
4242 *
4243 * Returns the number of pages on the list or array.
4244 */
__alloc_pages_bulk(gfp_t gfp,int preferred_nid,nodemask_t * nodemask,int nr_pages,struct list_head * page_list,struct page ** page_array)4245 unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
4246 nodemask_t *nodemask, int nr_pages,
4247 struct list_head *page_list,
4248 struct page **page_array)
4249 {
4250 struct page *page;
4251 unsigned long __maybe_unused UP_flags;
4252 struct zone *zone;
4253 struct zoneref *z;
4254 struct per_cpu_pages *pcp;
4255 struct list_head *pcp_list;
4256 struct alloc_context ac;
4257 gfp_t alloc_gfp;
4258 unsigned int alloc_flags = ALLOC_WMARK_LOW;
4259 int nr_populated = 0, nr_account = 0;
4260
4261 /*
4262 * Skip populated array elements to determine if any pages need
4263 * to be allocated before disabling IRQs.
4264 */
4265 while (page_array && nr_populated < nr_pages && page_array[nr_populated])
4266 nr_populated++;
4267
4268 /* No pages requested? */
4269 if (unlikely(nr_pages <= 0))
4270 goto out;
4271
4272 /* Already populated array? */
4273 if (unlikely(page_array && nr_pages - nr_populated == 0))
4274 goto out;
4275
4276 /* Bulk allocator does not support memcg accounting. */
4277 if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT))
4278 goto failed;
4279
4280 /* Use the single page allocator for one page. */
4281 if (nr_pages - nr_populated == 1)
4282 goto failed;
4283
4284 #ifdef CONFIG_PAGE_OWNER
4285 /*
4286 * PAGE_OWNER may recurse into the allocator to allocate space to
4287 * save the stack with pagesets.lock held. Releasing/reacquiring
4288 * removes much of the performance benefit of bulk allocation so
4289 * force the caller to allocate one page at a time as it'll have
4290 * similar performance to added complexity to the bulk allocator.
4291 */
4292 if (static_branch_unlikely(&page_owner_inited))
4293 goto failed;
4294 #endif
4295
4296 /* May set ALLOC_NOFRAGMENT, fragmentation will return 1 page. */
4297 gfp &= gfp_allowed_mask;
4298 alloc_gfp = gfp;
4299 if (!prepare_alloc_pages(gfp, 0, preferred_nid, nodemask, &ac, &alloc_gfp, &alloc_flags))
4300 goto out;
4301 gfp = alloc_gfp;
4302
4303 /* Find an allowed local zone that meets the low watermark. */
4304 z = ac.preferred_zoneref;
4305 for_next_zone_zonelist_nodemask(zone, z, ac.highest_zoneidx, ac.nodemask) {
4306 unsigned long mark;
4307
4308 if (cpusets_enabled() && (alloc_flags & ALLOC_CPUSET) &&
4309 !__cpuset_zone_allowed(zone, gfp)) {
4310 continue;
4311 }
4312
4313 if (nr_online_nodes > 1 && zone != ac.preferred_zoneref->zone &&
4314 zone_to_nid(zone) != zone_to_nid(ac.preferred_zoneref->zone)) {
4315 goto failed;
4316 }
4317
4318 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK) + nr_pages;
4319 if (zone_watermark_fast(zone, 0, mark,
4320 zonelist_zone_idx(ac.preferred_zoneref),
4321 alloc_flags, gfp)) {
4322 break;
4323 }
4324 }
4325
4326 /*
4327 * If there are no allowed local zones that meets the watermarks then
4328 * try to allocate a single page and reclaim if necessary.
4329 */
4330 if (unlikely(!zone))
4331 goto failed;
4332
4333 /* spin_trylock may fail due to a parallel drain or IRQ reentrancy. */
4334 pcp_trylock_prepare(UP_flags);
4335 pcp = pcp_spin_trylock(zone->per_cpu_pageset);
4336 if (!pcp)
4337 goto failed_irq;
4338
4339 /* Attempt the batch allocation */
4340 pcp_list = &pcp->lists[order_to_pindex(ac.migratetype, 0)];
4341 while (nr_populated < nr_pages) {
4342
4343 /* Skip existing pages */
4344 if (page_array && page_array[nr_populated]) {
4345 nr_populated++;
4346 continue;
4347 }
4348
4349 page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags,
4350 pcp, pcp_list);
4351 if (unlikely(!page)) {
4352 /* Try and allocate at least one page */
4353 if (!nr_account) {
4354 pcp_spin_unlock(pcp);
4355 goto failed_irq;
4356 }
4357 break;
4358 }
4359 nr_account++;
4360
4361 prep_new_page(page, 0, gfp, 0);
4362 if (page_list)
4363 list_add(&page->lru, page_list);
4364 else
4365 page_array[nr_populated] = page;
4366 nr_populated++;
4367 }
4368
4369 pcp_spin_unlock(pcp);
4370 pcp_trylock_finish(UP_flags);
4371
4372 __count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account);
4373 zone_statistics(ac.preferred_zoneref->zone, zone, nr_account);
4374
4375 out:
4376 return nr_populated;
4377
4378 failed_irq:
4379 pcp_trylock_finish(UP_flags);
4380
4381 failed:
4382 page = __alloc_pages(gfp, 0, preferred_nid, nodemask);
4383 if (page) {
4384 if (page_list)
4385 list_add(&page->lru, page_list);
4386 else
4387 page_array[nr_populated] = page;
4388 nr_populated++;
4389 }
4390
4391 goto out;
4392 }
4393 EXPORT_SYMBOL_GPL(__alloc_pages_bulk);
4394
4395 /*
4396 * This is the 'heart' of the zoned buddy allocator.
4397 */
__alloc_pages(gfp_t gfp,unsigned int order,int preferred_nid,nodemask_t * nodemask)4398 struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
4399 nodemask_t *nodemask)
4400 {
4401 struct page *page;
4402 unsigned int alloc_flags = ALLOC_WMARK_LOW;
4403 gfp_t alloc_gfp; /* The gfp_t that was actually used for allocation */
4404 struct alloc_context ac = { };
4405
4406 /*
4407 * There are several places where we assume that the order value is sane
4408 * so bail out early if the request is out of bound.
4409 */
4410 if (WARN_ON_ONCE_GFP(order > MAX_ORDER, gfp))
4411 return NULL;
4412
4413 gfp &= gfp_allowed_mask;
4414 /*
4415 * Apply scoped allocation constraints. This is mainly about GFP_NOFS
4416 * resp. GFP_NOIO which has to be inherited for all allocation requests
4417 * from a particular context which has been marked by
4418 * memalloc_no{fs,io}_{save,restore}. And PF_MEMALLOC_PIN which ensures
4419 * movable zones are not used during allocation.
4420 */
4421 gfp = current_gfp_context(gfp);
4422 alloc_gfp = gfp;
4423 if (!prepare_alloc_pages(gfp, order, preferred_nid, nodemask, &ac,
4424 &alloc_gfp, &alloc_flags))
4425 return NULL;
4426
4427 /*
4428 * Forbid the first pass from falling back to types that fragment
4429 * memory until all local zones are considered.
4430 */
4431 alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp);
4432
4433 /* First allocation attempt */
4434 page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac);
4435 if (likely(page))
4436 goto out;
4437
4438 alloc_gfp = gfp;
4439 ac.spread_dirty_pages = false;
4440
4441 /*
4442 * Restore the original nodemask if it was potentially replaced with
4443 * &cpuset_current_mems_allowed to optimize the fast-path attempt.
4444 */
4445 ac.nodemask = nodemask;
4446
4447 page = __alloc_pages_slowpath(alloc_gfp, order, &ac);
4448
4449 out:
4450 if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT) && page &&
4451 unlikely(__memcg_kmem_charge_page(page, gfp, order) != 0)) {
4452 __free_pages(page, order);
4453 page = NULL;
4454 }
4455
4456 trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype);
4457 kmsan_alloc_page(page, order, alloc_gfp);
4458
4459 return page;
4460 }
4461 EXPORT_SYMBOL(__alloc_pages);
4462
__folio_alloc(gfp_t gfp,unsigned int order,int preferred_nid,nodemask_t * nodemask)4463 struct folio *__folio_alloc(gfp_t gfp, unsigned int order, int preferred_nid,
4464 nodemask_t *nodemask)
4465 {
4466 struct page *page = __alloc_pages(gfp | __GFP_COMP, order,
4467 preferred_nid, nodemask);
4468 return page_rmappable_folio(page);
4469 }
4470 EXPORT_SYMBOL(__folio_alloc);
4471
4472 /*
4473 * Common helper functions. Never use with __GFP_HIGHMEM because the returned
4474 * address cannot represent highmem pages. Use alloc_pages and then kmap if
4475 * you need to access high mem.
4476 */
__get_free_pages(gfp_t gfp_mask,unsigned int order)4477 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
4478 {
4479 struct page *page;
4480
4481 page = alloc_pages(gfp_mask & ~__GFP_HIGHMEM, order);
4482 if (!page)
4483 return 0;
4484 return (unsigned long) page_address(page);
4485 }
4486 EXPORT_SYMBOL(__get_free_pages);
4487
get_zeroed_page(gfp_t gfp_mask)4488 unsigned long get_zeroed_page(gfp_t gfp_mask)
4489 {
4490 return __get_free_page(gfp_mask | __GFP_ZERO);
4491 }
4492 EXPORT_SYMBOL(get_zeroed_page);
4493
4494 /**
4495 * __free_pages - Free pages allocated with alloc_pages().
4496 * @page: The page pointer returned from alloc_pages().
4497 * @order: The order of the allocation.
4498 *
4499 * This function can free multi-page allocations that are not compound
4500 * pages. It does not check that the @order passed in matches that of
4501 * the allocation, so it is easy to leak memory. Freeing more memory
4502 * than was allocated will probably emit a warning.
4503 *
4504 * If the last reference to this page is speculative, it will be released
4505 * by put_page() which only frees the first page of a non-compound
4506 * allocation. To prevent the remaining pages from being leaked, we free
4507 * the subsequent pages here. If you want to use the page's reference
4508 * count to decide when to free the allocation, you should allocate a
4509 * compound page, and use put_page() instead of __free_pages().
4510 *
4511 * Context: May be called in interrupt context or while holding a normal
4512 * spinlock, but not in NMI context or while holding a raw spinlock.
4513 */
__free_pages(struct page * page,unsigned int order)4514 void __free_pages(struct page *page, unsigned int order)
4515 {
4516 /* get PageHead before we drop reference */
4517 int head = PageHead(page);
4518
4519 if (put_page_testzero(page))
4520 free_the_page(page, order);
4521 else if (!head)
4522 while (order-- > 0)
4523 free_the_page(page + (1 << order), order);
4524 }
4525 EXPORT_SYMBOL(__free_pages);
4526
free_pages(unsigned long addr,unsigned int order)4527 void free_pages(unsigned long addr, unsigned int order)
4528 {
4529 if (addr != 0) {
4530 VM_BUG_ON(!virt_addr_valid((void *)addr));
4531 __free_pages(virt_to_page((void *)addr), order);
4532 }
4533 }
4534
4535 EXPORT_SYMBOL(free_pages);
4536
4537 /*
4538 * Page Fragment:
4539 * An arbitrary-length arbitrary-offset area of memory which resides
4540 * within a 0 or higher order page. Multiple fragments within that page
4541 * are individually refcounted, in the page's reference counter.
4542 *
4543 * The page_frag functions below provide a simple allocation framework for
4544 * page fragments. This is used by the network stack and network device
4545 * drivers to provide a backing region of memory for use as either an
4546 * sk_buff->head, or to be used in the "frags" portion of skb_shared_info.
4547 */
__page_frag_cache_refill(struct page_frag_cache * nc,gfp_t gfp_mask)4548 static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
4549 gfp_t gfp_mask)
4550 {
4551 struct page *page = NULL;
4552 gfp_t gfp = gfp_mask;
4553
4554 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
4555 gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
4556 __GFP_NOMEMALLOC;
4557 page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
4558 PAGE_FRAG_CACHE_MAX_ORDER);
4559 nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE;
4560 #endif
4561 if (unlikely(!page))
4562 page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
4563
4564 nc->va = page ? page_address(page) : NULL;
4565
4566 return page;
4567 }
4568
__page_frag_cache_drain(struct page * page,unsigned int count)4569 void __page_frag_cache_drain(struct page *page, unsigned int count)
4570 {
4571 VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
4572
4573 if (page_ref_sub_and_test(page, count))
4574 free_the_page(page, compound_order(page));
4575 }
4576 EXPORT_SYMBOL(__page_frag_cache_drain);
4577
page_frag_alloc_align(struct page_frag_cache * nc,unsigned int fragsz,gfp_t gfp_mask,unsigned int align_mask)4578 void *page_frag_alloc_align(struct page_frag_cache *nc,
4579 unsigned int fragsz, gfp_t gfp_mask,
4580 unsigned int align_mask)
4581 {
4582 unsigned int size = PAGE_SIZE;
4583 struct page *page;
4584 int offset;
4585
4586 if (unlikely(!nc->va)) {
4587 refill:
4588 page = __page_frag_cache_refill(nc, gfp_mask);
4589 if (!page)
4590 return NULL;
4591
4592 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
4593 /* if size can vary use size else just use PAGE_SIZE */
4594 size = nc->size;
4595 #endif
4596 /* Even if we own the page, we do not use atomic_set().
4597 * This would break get_page_unless_zero() users.
4598 */
4599 page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE);
4600
4601 /* reset page count bias and offset to start of new frag */
4602 nc->pfmemalloc = page_is_pfmemalloc(page);
4603 nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
4604 nc->offset = size;
4605 }
4606
4607 offset = nc->offset - fragsz;
4608 if (unlikely(offset < 0)) {
4609 page = virt_to_page(nc->va);
4610
4611 if (!page_ref_sub_and_test(page, nc->pagecnt_bias))
4612 goto refill;
4613
4614 if (unlikely(nc->pfmemalloc)) {
4615 free_the_page(page, compound_order(page));
4616 goto refill;
4617 }
4618
4619 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
4620 /* if size can vary use size else just use PAGE_SIZE */
4621 size = nc->size;
4622 #endif
4623 /* OK, page count is 0, we can safely set it */
4624 set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1);
4625
4626 /* reset page count bias and offset to start of new frag */
4627 nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
4628 offset = size - fragsz;
4629 if (unlikely(offset < 0)) {
4630 /*
4631 * The caller is trying to allocate a fragment
4632 * with fragsz > PAGE_SIZE but the cache isn't big
4633 * enough to satisfy the request, this may
4634 * happen in low memory conditions.
4635 * We don't release the cache page because
4636 * it could make memory pressure worse
4637 * so we simply return NULL here.
4638 */
4639 return NULL;
4640 }
4641 }
4642
4643 nc->pagecnt_bias--;
4644 offset &= align_mask;
4645 nc->offset = offset;
4646
4647 return nc->va + offset;
4648 }
4649 EXPORT_SYMBOL(page_frag_alloc_align);
4650
4651 /*
4652 * Frees a page fragment allocated out of either a compound or order 0 page.
4653 */
page_frag_free(void * addr)4654 void page_frag_free(void *addr)
4655 {
4656 struct page *page = virt_to_head_page(addr);
4657
4658 if (unlikely(put_page_testzero(page)))
4659 free_the_page(page, compound_order(page));
4660 }
4661 EXPORT_SYMBOL(page_frag_free);
4662
make_alloc_exact(unsigned long addr,unsigned int order,size_t size)4663 static void *make_alloc_exact(unsigned long addr, unsigned int order,
4664 size_t size)
4665 {
4666 if (addr) {
4667 unsigned long nr = DIV_ROUND_UP(size, PAGE_SIZE);
4668 struct page *page = virt_to_page((void *)addr);
4669 struct page *last = page + nr;
4670
4671 split_page_owner(page, 1 << order);
4672 split_page_memcg(page, 1 << order);
4673 while (page < --last)
4674 set_page_refcounted(last);
4675
4676 last = page + (1UL << order);
4677 for (page += nr; page < last; page++)
4678 __free_pages_ok(page, 0, FPI_TO_TAIL);
4679 }
4680 return (void *)addr;
4681 }
4682
4683 /**
4684 * alloc_pages_exact - allocate an exact number physically-contiguous pages.
4685 * @size: the number of bytes to allocate
4686 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP
4687 *
4688 * This function is similar to alloc_pages(), except that it allocates the
4689 * minimum number of pages to satisfy the request. alloc_pages() can only
4690 * allocate memory in power-of-two pages.
4691 *
4692 * This function is also limited by MAX_ORDER.
4693 *
4694 * Memory allocated by this function must be released by free_pages_exact().
4695 *
4696 * Return: pointer to the allocated area or %NULL in case of error.
4697 */
alloc_pages_exact(size_t size,gfp_t gfp_mask)4698 void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
4699 {
4700 unsigned int order = get_order(size);
4701 unsigned long addr;
4702
4703 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM)))
4704 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM);
4705
4706 addr = __get_free_pages(gfp_mask, order);
4707 return make_alloc_exact(addr, order, size);
4708 }
4709 EXPORT_SYMBOL(alloc_pages_exact);
4710
4711 /**
4712 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
4713 * pages on a node.
4714 * @nid: the preferred node ID where memory should be allocated
4715 * @size: the number of bytes to allocate
4716 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP
4717 *
4718 * Like alloc_pages_exact(), but try to allocate on node nid first before falling
4719 * back.
4720 *
4721 * Return: pointer to the allocated area or %NULL in case of error.
4722 */
alloc_pages_exact_nid(int nid,size_t size,gfp_t gfp_mask)4723 void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
4724 {
4725 unsigned int order = get_order(size);
4726 struct page *p;
4727
4728 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM)))
4729 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM);
4730
4731 p = alloc_pages_node(nid, gfp_mask, order);
4732 if (!p)
4733 return NULL;
4734 return make_alloc_exact((unsigned long)page_address(p), order, size);
4735 }
4736
4737 /**
4738 * free_pages_exact - release memory allocated via alloc_pages_exact()
4739 * @virt: the value returned by alloc_pages_exact.
4740 * @size: size of allocation, same value as passed to alloc_pages_exact().
4741 *
4742 * Release the memory allocated by a previous call to alloc_pages_exact.
4743 */
free_pages_exact(void * virt,size_t size)4744 void free_pages_exact(void *virt, size_t size)
4745 {
4746 unsigned long addr = (unsigned long)virt;
4747 unsigned long end = addr + PAGE_ALIGN(size);
4748
4749 while (addr < end) {
4750 free_page(addr);
4751 addr += PAGE_SIZE;
4752 }
4753 }
4754 EXPORT_SYMBOL(free_pages_exact);
4755
4756 /**
4757 * nr_free_zone_pages - count number of pages beyond high watermark
4758 * @offset: The zone index of the highest zone
4759 *
4760 * nr_free_zone_pages() counts the number of pages which are beyond the
4761 * high watermark within all zones at or below a given zone index. For each
4762 * zone, the number of pages is calculated as:
4763 *
4764 * nr_free_zone_pages = managed_pages - high_pages
4765 *
4766 * Return: number of pages beyond high watermark.
4767 */
nr_free_zone_pages(int offset)4768 static unsigned long nr_free_zone_pages(int offset)
4769 {
4770 struct zoneref *z;
4771 struct zone *zone;
4772
4773 /* Just pick one node, since fallback list is circular */
4774 unsigned long sum = 0;
4775
4776 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
4777
4778 for_each_zone_zonelist(zone, z, zonelist, offset) {
4779 unsigned long size = zone_managed_pages(zone);
4780 unsigned long high = high_wmark_pages(zone);
4781 if (size > high)
4782 sum += size - high;
4783 }
4784
4785 return sum;
4786 }
4787
4788 /**
4789 * nr_free_buffer_pages - count number of pages beyond high watermark
4790 *
4791 * nr_free_buffer_pages() counts the number of pages which are beyond the high
4792 * watermark within ZONE_DMA and ZONE_NORMAL.
4793 *
4794 * Return: number of pages beyond high watermark within ZONE_DMA and
4795 * ZONE_NORMAL.
4796 */
nr_free_buffer_pages(void)4797 unsigned long nr_free_buffer_pages(void)
4798 {
4799 return nr_free_zone_pages(gfp_zone(GFP_USER));
4800 }
4801 EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
4802
zoneref_set_zone(struct zone * zone,struct zoneref * zoneref)4803 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
4804 {
4805 zoneref->zone = zone;
4806 zoneref->zone_idx = zone_idx(zone);
4807 }
4808
4809 /*
4810 * Builds allocation fallback zone lists.
4811 *
4812 * Add all populated zones of a node to the zonelist.
4813 */
build_zonerefs_node(pg_data_t * pgdat,struct zoneref * zonerefs)4814 static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs)
4815 {
4816 struct zone *zone;
4817 enum zone_type zone_type = MAX_NR_ZONES;
4818 int nr_zones = 0;
4819
4820 do {
4821 zone_type--;
4822 zone = pgdat->node_zones + zone_type;
4823 if (populated_zone(zone)) {
4824 zoneref_set_zone(zone, &zonerefs[nr_zones++]);
4825 check_highest_zone(zone_type);
4826 }
4827 } while (zone_type);
4828
4829 return nr_zones;
4830 }
4831
4832 #ifdef CONFIG_NUMA
4833
__parse_numa_zonelist_order(char * s)4834 static int __parse_numa_zonelist_order(char *s)
4835 {
4836 /*
4837 * We used to support different zonelists modes but they turned
4838 * out to be just not useful. Let's keep the warning in place
4839 * if somebody still use the cmd line parameter so that we do
4840 * not fail it silently
4841 */
4842 if (!(*s == 'd' || *s == 'D' || *s == 'n' || *s == 'N')) {
4843 pr_warn("Ignoring unsupported numa_zonelist_order value: %s\n", s);
4844 return -EINVAL;
4845 }
4846 return 0;
4847 }
4848
4849 static char numa_zonelist_order[] = "Node";
4850 #define NUMA_ZONELIST_ORDER_LEN 16
4851 /*
4852 * sysctl handler for numa_zonelist_order
4853 */
numa_zonelist_order_handler(struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)4854 static int numa_zonelist_order_handler(struct ctl_table *table, int write,
4855 void *buffer, size_t *length, loff_t *ppos)
4856 {
4857 if (write)
4858 return __parse_numa_zonelist_order(buffer);
4859 return proc_dostring(table, write, buffer, length, ppos);
4860 }
4861
4862 static int node_load[MAX_NUMNODES];
4863
4864 /**
4865 * find_next_best_node - find the next node that should appear in a given node's fallback list
4866 * @node: node whose fallback list we're appending
4867 * @used_node_mask: nodemask_t of already used nodes
4868 *
4869 * We use a number of factors to determine which is the next node that should
4870 * appear on a given node's fallback list. The node should not have appeared
4871 * already in @node's fallback list, and it should be the next closest node
4872 * according to the distance array (which contains arbitrary distance values
4873 * from each node to each node in the system), and should also prefer nodes
4874 * with no CPUs, since presumably they'll have very little allocation pressure
4875 * on them otherwise.
4876 *
4877 * Return: node id of the found node or %NUMA_NO_NODE if no node is found.
4878 */
find_next_best_node(int node,nodemask_t * used_node_mask)4879 int find_next_best_node(int node, nodemask_t *used_node_mask)
4880 {
4881 int n, val;
4882 int min_val = INT_MAX;
4883 int best_node = NUMA_NO_NODE;
4884
4885 /* Use the local node if we haven't already */
4886 if (!node_isset(node, *used_node_mask)) {
4887 node_set(node, *used_node_mask);
4888 return node;
4889 }
4890
4891 for_each_node_state(n, N_MEMORY) {
4892
4893 /* Don't want a node to appear more than once */
4894 if (node_isset(n, *used_node_mask))
4895 continue;
4896
4897 /* Use the distance array to find the distance */
4898 val = node_distance(node, n);
4899
4900 /* Penalize nodes under us ("prefer the next node") */
4901 val += (n < node);
4902
4903 /* Give preference to headless and unused nodes */
4904 if (!cpumask_empty(cpumask_of_node(n)))
4905 val += PENALTY_FOR_NODE_WITH_CPUS;
4906
4907 /* Slight preference for less loaded node */
4908 val *= MAX_NUMNODES;
4909 val += node_load[n];
4910
4911 if (val < min_val) {
4912 min_val = val;
4913 best_node = n;
4914 }
4915 }
4916
4917 if (best_node >= 0)
4918 node_set(best_node, *used_node_mask);
4919
4920 return best_node;
4921 }
4922
4923
4924 /*
4925 * Build zonelists ordered by node and zones within node.
4926 * This results in maximum locality--normal zone overflows into local
4927 * DMA zone, if any--but risks exhausting DMA zone.
4928 */
build_zonelists_in_node_order(pg_data_t * pgdat,int * node_order,unsigned nr_nodes)4929 static void build_zonelists_in_node_order(pg_data_t *pgdat, int *node_order,
4930 unsigned nr_nodes)
4931 {
4932 struct zoneref *zonerefs;
4933 int i;
4934
4935 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
4936
4937 for (i = 0; i < nr_nodes; i++) {
4938 int nr_zones;
4939
4940 pg_data_t *node = NODE_DATA(node_order[i]);
4941
4942 nr_zones = build_zonerefs_node(node, zonerefs);
4943 zonerefs += nr_zones;
4944 }
4945 zonerefs->zone = NULL;
4946 zonerefs->zone_idx = 0;
4947 }
4948
4949 /*
4950 * Build gfp_thisnode zonelists
4951 */
build_thisnode_zonelists(pg_data_t * pgdat)4952 static void build_thisnode_zonelists(pg_data_t *pgdat)
4953 {
4954 struct zoneref *zonerefs;
4955 int nr_zones;
4956
4957 zonerefs = pgdat->node_zonelists[ZONELIST_NOFALLBACK]._zonerefs;
4958 nr_zones = build_zonerefs_node(pgdat, zonerefs);
4959 zonerefs += nr_zones;
4960 zonerefs->zone = NULL;
4961 zonerefs->zone_idx = 0;
4962 }
4963
4964 /*
4965 * Build zonelists ordered by zone and nodes within zones.
4966 * This results in conserving DMA zone[s] until all Normal memory is
4967 * exhausted, but results in overflowing to remote node while memory
4968 * may still exist in local DMA zone.
4969 */
4970
build_zonelists(pg_data_t * pgdat)4971 static void build_zonelists(pg_data_t *pgdat)
4972 {
4973 static int node_order[MAX_NUMNODES];
4974 int node, nr_nodes = 0;
4975 nodemask_t used_mask = NODE_MASK_NONE;
4976 int local_node, prev_node;
4977
4978 /* NUMA-aware ordering of nodes */
4979 local_node = pgdat->node_id;
4980 prev_node = local_node;
4981
4982 memset(node_order, 0, sizeof(node_order));
4983 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
4984 /*
4985 * We don't want to pressure a particular node.
4986 * So adding penalty to the first node in same
4987 * distance group to make it round-robin.
4988 */
4989 if (node_distance(local_node, node) !=
4990 node_distance(local_node, prev_node))
4991 node_load[node] += 1;
4992
4993 node_order[nr_nodes++] = node;
4994 prev_node = node;
4995 }
4996
4997 build_zonelists_in_node_order(pgdat, node_order, nr_nodes);
4998 build_thisnode_zonelists(pgdat);
4999 pr_info("Fallback order for Node %d: ", local_node);
5000 for (node = 0; node < nr_nodes; node++)
5001 pr_cont("%d ", node_order[node]);
5002 pr_cont("\n");
5003 }
5004
5005 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
5006 /*
5007 * Return node id of node used for "local" allocations.
5008 * I.e., first node id of first zone in arg node's generic zonelist.
5009 * Used for initializing percpu 'numa_mem', which is used primarily
5010 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
5011 */
local_memory_node(int node)5012 int local_memory_node(int node)
5013 {
5014 struct zoneref *z;
5015
5016 z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
5017 gfp_zone(GFP_KERNEL),
5018 NULL);
5019 return zone_to_nid(z->zone);
5020 }
5021 #endif
5022
5023 static void setup_min_unmapped_ratio(void);
5024 static void setup_min_slab_ratio(void);
5025 #else /* CONFIG_NUMA */
5026
build_zonelists(pg_data_t * pgdat)5027 static void build_zonelists(pg_data_t *pgdat)
5028 {
5029 int node, local_node;
5030 struct zoneref *zonerefs;
5031 int nr_zones;
5032
5033 local_node = pgdat->node_id;
5034
5035 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
5036 nr_zones = build_zonerefs_node(pgdat, zonerefs);
5037 zonerefs += nr_zones;
5038
5039 /*
5040 * Now we build the zonelist so that it contains the zones
5041 * of all the other nodes.
5042 * We don't want to pressure a particular node, so when
5043 * building the zones for node N, we make sure that the
5044 * zones coming right after the local ones are those from
5045 * node N+1 (modulo N)
5046 */
5047 for (node = local_node + 1; node < MAX_NUMNODES; node++) {
5048 if (!node_online(node))
5049 continue;
5050 nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs);
5051 zonerefs += nr_zones;
5052 }
5053 for (node = 0; node < local_node; node++) {
5054 if (!node_online(node))
5055 continue;
5056 nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs);
5057 zonerefs += nr_zones;
5058 }
5059
5060 zonerefs->zone = NULL;
5061 zonerefs->zone_idx = 0;
5062 }
5063
5064 #endif /* CONFIG_NUMA */
5065
5066 /*
5067 * Boot pageset table. One per cpu which is going to be used for all
5068 * zones and all nodes. The parameters will be set in such a way
5069 * that an item put on a list will immediately be handed over to
5070 * the buddy list. This is safe since pageset manipulation is done
5071 * with interrupts disabled.
5072 *
5073 * The boot_pagesets must be kept even after bootup is complete for
5074 * unused processors and/or zones. They do play a role for bootstrapping
5075 * hotplugged processors.
5076 *
5077 * zoneinfo_show() and maybe other functions do
5078 * not check if the processor is online before following the pageset pointer.
5079 * Other parts of the kernel may not check if the zone is available.
5080 */
5081 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats);
5082 /* These effectively disable the pcplists in the boot pageset completely */
5083 #define BOOT_PAGESET_HIGH 0
5084 #define BOOT_PAGESET_BATCH 1
5085 static DEFINE_PER_CPU(struct per_cpu_pages, boot_pageset);
5086 static DEFINE_PER_CPU(struct per_cpu_zonestat, boot_zonestats);
5087
__build_all_zonelists(void * data)5088 static void __build_all_zonelists(void *data)
5089 {
5090 int nid;
5091 int __maybe_unused cpu;
5092 pg_data_t *self = data;
5093 unsigned long flags;
5094
5095 /*
5096 * The zonelist_update_seq must be acquired with irqsave because the
5097 * reader can be invoked from IRQ with GFP_ATOMIC.
5098 */
5099 write_seqlock_irqsave(&zonelist_update_seq, flags);
5100 /*
5101 * Also disable synchronous printk() to prevent any printk() from
5102 * trying to hold port->lock, for
5103 * tty_insert_flip_string_and_push_buffer() on other CPU might be
5104 * calling kmalloc(GFP_ATOMIC | __GFP_NOWARN) with port->lock held.
5105 */
5106 printk_deferred_enter();
5107
5108 #ifdef CONFIG_NUMA
5109 memset(node_load, 0, sizeof(node_load));
5110 #endif
5111
5112 /*
5113 * This node is hotadded and no memory is yet present. So just
5114 * building zonelists is fine - no need to touch other nodes.
5115 */
5116 if (self && !node_online(self->node_id)) {
5117 build_zonelists(self);
5118 } else {
5119 /*
5120 * All possible nodes have pgdat preallocated
5121 * in free_area_init
5122 */
5123 for_each_node(nid) {
5124 pg_data_t *pgdat = NODE_DATA(nid);
5125
5126 build_zonelists(pgdat);
5127 }
5128
5129 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
5130 /*
5131 * We now know the "local memory node" for each node--
5132 * i.e., the node of the first zone in the generic zonelist.
5133 * Set up numa_mem percpu variable for on-line cpus. During
5134 * boot, only the boot cpu should be on-line; we'll init the
5135 * secondary cpus' numa_mem as they come on-line. During
5136 * node/memory hotplug, we'll fixup all on-line cpus.
5137 */
5138 for_each_online_cpu(cpu)
5139 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
5140 #endif
5141 }
5142
5143 printk_deferred_exit();
5144 write_sequnlock_irqrestore(&zonelist_update_seq, flags);
5145 }
5146
5147 static noinline void __init
build_all_zonelists_init(void)5148 build_all_zonelists_init(void)
5149 {
5150 int cpu;
5151
5152 __build_all_zonelists(NULL);
5153
5154 /*
5155 * Initialize the boot_pagesets that are going to be used
5156 * for bootstrapping processors. The real pagesets for
5157 * each zone will be allocated later when the per cpu
5158 * allocator is available.
5159 *
5160 * boot_pagesets are used also for bootstrapping offline
5161 * cpus if the system is already booted because the pagesets
5162 * are needed to initialize allocators on a specific cpu too.
5163 * F.e. the percpu allocator needs the page allocator which
5164 * needs the percpu allocator in order to allocate its pagesets
5165 * (a chicken-egg dilemma).
5166 */
5167 for_each_possible_cpu(cpu)
5168 per_cpu_pages_init(&per_cpu(boot_pageset, cpu), &per_cpu(boot_zonestats, cpu));
5169
5170 mminit_verify_zonelist();
5171 cpuset_init_current_mems_allowed();
5172 }
5173
5174 /*
5175 * unless system_state == SYSTEM_BOOTING.
5176 *
5177 * __ref due to call of __init annotated helper build_all_zonelists_init
5178 * [protected by SYSTEM_BOOTING].
5179 */
build_all_zonelists(pg_data_t * pgdat)5180 void __ref build_all_zonelists(pg_data_t *pgdat)
5181 {
5182 unsigned long vm_total_pages;
5183
5184 if (system_state == SYSTEM_BOOTING) {
5185 build_all_zonelists_init();
5186 } else {
5187 __build_all_zonelists(pgdat);
5188 /* cpuset refresh routine should be here */
5189 }
5190 /* Get the number of free pages beyond high watermark in all zones. */
5191 vm_total_pages = nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
5192 /*
5193 * Disable grouping by mobility if the number of pages in the
5194 * system is too low to allow the mechanism to work. It would be
5195 * more accurate, but expensive to check per-zone. This check is
5196 * made on memory-hotadd so a system can start with mobility
5197 * disabled and enable it later
5198 */
5199 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
5200 page_group_by_mobility_disabled = 1;
5201 else
5202 page_group_by_mobility_disabled = 0;
5203
5204 pr_info("Built %u zonelists, mobility grouping %s. Total pages: %ld\n",
5205 nr_online_nodes,
5206 page_group_by_mobility_disabled ? "off" : "on",
5207 vm_total_pages);
5208 #ifdef CONFIG_NUMA
5209 pr_info("Policy zone: %s\n", zone_names[policy_zone]);
5210 #endif
5211 }
5212
zone_batchsize(struct zone * zone)5213 static int zone_batchsize(struct zone *zone)
5214 {
5215 #ifdef CONFIG_MMU
5216 int batch;
5217
5218 /*
5219 * The number of pages to batch allocate is either ~0.1%
5220 * of the zone or 1MB, whichever is smaller. The batch
5221 * size is striking a balance between allocation latency
5222 * and zone lock contention.
5223 */
5224 batch = min(zone_managed_pages(zone) >> 10, SZ_1M / PAGE_SIZE);
5225 batch /= 4; /* We effectively *= 4 below */
5226 if (batch < 1)
5227 batch = 1;
5228
5229 /*
5230 * Clamp the batch to a 2^n - 1 value. Having a power
5231 * of 2 value was found to be more likely to have
5232 * suboptimal cache aliasing properties in some cases.
5233 *
5234 * For example if 2 tasks are alternately allocating
5235 * batches of pages, one task can end up with a lot
5236 * of pages of one half of the possible page colors
5237 * and the other with pages of the other colors.
5238 */
5239 batch = rounddown_pow_of_two(batch + batch/2) - 1;
5240
5241 return batch;
5242
5243 #else
5244 /* The deferral and batching of frees should be suppressed under NOMMU
5245 * conditions.
5246 *
5247 * The problem is that NOMMU needs to be able to allocate large chunks
5248 * of contiguous memory as there's no hardware page translation to
5249 * assemble apparent contiguous memory from discontiguous pages.
5250 *
5251 * Queueing large contiguous runs of pages for batching, however,
5252 * causes the pages to actually be freed in smaller chunks. As there
5253 * can be a significant delay between the individual batches being
5254 * recycled, this leads to the once large chunks of space being
5255 * fragmented and becoming unavailable for high-order allocations.
5256 */
5257 return 0;
5258 #endif
5259 }
5260
5261 static int percpu_pagelist_high_fraction;
zone_highsize(struct zone * zone,int batch,int cpu_online)5262 static int zone_highsize(struct zone *zone, int batch, int cpu_online)
5263 {
5264 #ifdef CONFIG_MMU
5265 int high;
5266 int nr_split_cpus;
5267 unsigned long total_pages;
5268
5269 if (!percpu_pagelist_high_fraction) {
5270 /*
5271 * By default, the high value of the pcp is based on the zone
5272 * low watermark so that if they are full then background
5273 * reclaim will not be started prematurely.
5274 */
5275 total_pages = low_wmark_pages(zone);
5276 } else {
5277 /*
5278 * If percpu_pagelist_high_fraction is configured, the high
5279 * value is based on a fraction of the managed pages in the
5280 * zone.
5281 */
5282 total_pages = zone_managed_pages(zone) / percpu_pagelist_high_fraction;
5283 }
5284
5285 /*
5286 * Split the high value across all online CPUs local to the zone. Note
5287 * that early in boot that CPUs may not be online yet and that during
5288 * CPU hotplug that the cpumask is not yet updated when a CPU is being
5289 * onlined. For memory nodes that have no CPUs, split pcp->high across
5290 * all online CPUs to mitigate the risk that reclaim is triggered
5291 * prematurely due to pages stored on pcp lists.
5292 */
5293 nr_split_cpus = cpumask_weight(cpumask_of_node(zone_to_nid(zone))) + cpu_online;
5294 if (!nr_split_cpus)
5295 nr_split_cpus = num_online_cpus();
5296 high = total_pages / nr_split_cpus;
5297
5298 /*
5299 * Ensure high is at least batch*4. The multiple is based on the
5300 * historical relationship between high and batch.
5301 */
5302 high = max(high, batch << 2);
5303
5304 return high;
5305 #else
5306 return 0;
5307 #endif
5308 }
5309
5310 /*
5311 * pcp->high and pcp->batch values are related and generally batch is lower
5312 * than high. They are also related to pcp->count such that count is lower
5313 * than high, and as soon as it reaches high, the pcplist is flushed.
5314 *
5315 * However, guaranteeing these relations at all times would require e.g. write
5316 * barriers here but also careful usage of read barriers at the read side, and
5317 * thus be prone to error and bad for performance. Thus the update only prevents
5318 * store tearing. Any new users of pcp->batch and pcp->high should ensure they
5319 * can cope with those fields changing asynchronously, and fully trust only the
5320 * pcp->count field on the local CPU with interrupts disabled.
5321 *
5322 * mutex_is_locked(&pcp_batch_high_lock) required when calling this function
5323 * outside of boot time (or some other assurance that no concurrent updaters
5324 * exist).
5325 */
pageset_update(struct per_cpu_pages * pcp,unsigned long high,unsigned long batch)5326 static void pageset_update(struct per_cpu_pages *pcp, unsigned long high,
5327 unsigned long batch)
5328 {
5329 WRITE_ONCE(pcp->batch, batch);
5330 WRITE_ONCE(pcp->high, high);
5331 }
5332
per_cpu_pages_init(struct per_cpu_pages * pcp,struct per_cpu_zonestat * pzstats)5333 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats)
5334 {
5335 int pindex;
5336
5337 memset(pcp, 0, sizeof(*pcp));
5338 memset(pzstats, 0, sizeof(*pzstats));
5339
5340 spin_lock_init(&pcp->lock);
5341 for (pindex = 0; pindex < NR_PCP_LISTS; pindex++)
5342 INIT_LIST_HEAD(&pcp->lists[pindex]);
5343
5344 /*
5345 * Set batch and high values safe for a boot pageset. A true percpu
5346 * pageset's initialization will update them subsequently. Here we don't
5347 * need to be as careful as pageset_update() as nobody can access the
5348 * pageset yet.
5349 */
5350 pcp->high = BOOT_PAGESET_HIGH;
5351 pcp->batch = BOOT_PAGESET_BATCH;
5352 pcp->free_factor = 0;
5353 }
5354
__zone_set_pageset_high_and_batch(struct zone * zone,unsigned long high,unsigned long batch)5355 static void __zone_set_pageset_high_and_batch(struct zone *zone, unsigned long high,
5356 unsigned long batch)
5357 {
5358 struct per_cpu_pages *pcp;
5359 int cpu;
5360
5361 for_each_possible_cpu(cpu) {
5362 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
5363 pageset_update(pcp, high, batch);
5364 }
5365 }
5366
5367 /*
5368 * Calculate and set new high and batch values for all per-cpu pagesets of a
5369 * zone based on the zone's size.
5370 */
zone_set_pageset_high_and_batch(struct zone * zone,int cpu_online)5371 static void zone_set_pageset_high_and_batch(struct zone *zone, int cpu_online)
5372 {
5373 int new_high, new_batch;
5374
5375 new_batch = max(1, zone_batchsize(zone));
5376 new_high = zone_highsize(zone, new_batch, cpu_online);
5377
5378 if (zone->pageset_high == new_high &&
5379 zone->pageset_batch == new_batch)
5380 return;
5381
5382 zone->pageset_high = new_high;
5383 zone->pageset_batch = new_batch;
5384
5385 __zone_set_pageset_high_and_batch(zone, new_high, new_batch);
5386 }
5387
setup_zone_pageset(struct zone * zone)5388 void __meminit setup_zone_pageset(struct zone *zone)
5389 {
5390 int cpu;
5391
5392 /* Size may be 0 on !SMP && !NUMA */
5393 if (sizeof(struct per_cpu_zonestat) > 0)
5394 zone->per_cpu_zonestats = alloc_percpu(struct per_cpu_zonestat);
5395
5396 zone->per_cpu_pageset = alloc_percpu(struct per_cpu_pages);
5397 for_each_possible_cpu(cpu) {
5398 struct per_cpu_pages *pcp;
5399 struct per_cpu_zonestat *pzstats;
5400
5401 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
5402 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
5403 per_cpu_pages_init(pcp, pzstats);
5404 }
5405
5406 zone_set_pageset_high_and_batch(zone, 0);
5407 }
5408
5409 /*
5410 * The zone indicated has a new number of managed_pages; batch sizes and percpu
5411 * page high values need to be recalculated.
5412 */
zone_pcp_update(struct zone * zone,int cpu_online)5413 static void zone_pcp_update(struct zone *zone, int cpu_online)
5414 {
5415 mutex_lock(&pcp_batch_high_lock);
5416 zone_set_pageset_high_and_batch(zone, cpu_online);
5417 mutex_unlock(&pcp_batch_high_lock);
5418 }
5419
5420 /*
5421 * Allocate per cpu pagesets and initialize them.
5422 * Before this call only boot pagesets were available.
5423 */
setup_per_cpu_pageset(void)5424 void __init setup_per_cpu_pageset(void)
5425 {
5426 struct pglist_data *pgdat;
5427 struct zone *zone;
5428 int __maybe_unused cpu;
5429
5430 for_each_populated_zone(zone)
5431 setup_zone_pageset(zone);
5432
5433 #ifdef CONFIG_NUMA
5434 /*
5435 * Unpopulated zones continue using the boot pagesets.
5436 * The numa stats for these pagesets need to be reset.
5437 * Otherwise, they will end up skewing the stats of
5438 * the nodes these zones are associated with.
5439 */
5440 for_each_possible_cpu(cpu) {
5441 struct per_cpu_zonestat *pzstats = &per_cpu(boot_zonestats, cpu);
5442 memset(pzstats->vm_numa_event, 0,
5443 sizeof(pzstats->vm_numa_event));
5444 }
5445 #endif
5446
5447 for_each_online_pgdat(pgdat)
5448 pgdat->per_cpu_nodestats =
5449 alloc_percpu(struct per_cpu_nodestat);
5450 }
5451
zone_pcp_init(struct zone * zone)5452 __meminit void zone_pcp_init(struct zone *zone)
5453 {
5454 /*
5455 * per cpu subsystem is not up at this point. The following code
5456 * relies on the ability of the linker to provide the
5457 * offset of a (static) per cpu variable into the per cpu area.
5458 */
5459 zone->per_cpu_pageset = &boot_pageset;
5460 zone->per_cpu_zonestats = &boot_zonestats;
5461 zone->pageset_high = BOOT_PAGESET_HIGH;
5462 zone->pageset_batch = BOOT_PAGESET_BATCH;
5463
5464 if (populated_zone(zone))
5465 pr_debug(" %s zone: %lu pages, LIFO batch:%u\n", zone->name,
5466 zone->present_pages, zone_batchsize(zone));
5467 }
5468
adjust_managed_page_count(struct page * page,long count)5469 void adjust_managed_page_count(struct page *page, long count)
5470 {
5471 atomic_long_add(count, &page_zone(page)->managed_pages);
5472 totalram_pages_add(count);
5473 #ifdef CONFIG_HIGHMEM
5474 if (PageHighMem(page))
5475 totalhigh_pages_add(count);
5476 #endif
5477 }
5478 EXPORT_SYMBOL(adjust_managed_page_count);
5479
free_reserved_area(void * start,void * end,int poison,const char * s)5480 unsigned long free_reserved_area(void *start, void *end, int poison, const char *s)
5481 {
5482 void *pos;
5483 unsigned long pages = 0;
5484
5485 start = (void *)PAGE_ALIGN((unsigned long)start);
5486 end = (void *)((unsigned long)end & PAGE_MASK);
5487 for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
5488 struct page *page = virt_to_page(pos);
5489 void *direct_map_addr;
5490
5491 /*
5492 * 'direct_map_addr' might be different from 'pos'
5493 * because some architectures' virt_to_page()
5494 * work with aliases. Getting the direct map
5495 * address ensures that we get a _writeable_
5496 * alias for the memset().
5497 */
5498 direct_map_addr = page_address(page);
5499 /*
5500 * Perform a kasan-unchecked memset() since this memory
5501 * has not been initialized.
5502 */
5503 direct_map_addr = kasan_reset_tag(direct_map_addr);
5504 if ((unsigned int)poison <= 0xFF)
5505 memset(direct_map_addr, poison, PAGE_SIZE);
5506
5507 free_reserved_page(page);
5508 }
5509
5510 if (pages && s)
5511 pr_info("Freeing %s memory: %ldK\n", s, K(pages));
5512
5513 return pages;
5514 }
5515
page_alloc_cpu_dead(unsigned int cpu)5516 static int page_alloc_cpu_dead(unsigned int cpu)
5517 {
5518 struct zone *zone;
5519
5520 lru_add_drain_cpu(cpu);
5521 mlock_drain_remote(cpu);
5522 drain_pages(cpu);
5523
5524 /*
5525 * Spill the event counters of the dead processor
5526 * into the current processors event counters.
5527 * This artificially elevates the count of the current
5528 * processor.
5529 */
5530 vm_events_fold_cpu(cpu);
5531
5532 /*
5533 * Zero the differential counters of the dead processor
5534 * so that the vm statistics are consistent.
5535 *
5536 * This is only okay since the processor is dead and cannot
5537 * race with what we are doing.
5538 */
5539 cpu_vm_stats_fold(cpu);
5540
5541 for_each_populated_zone(zone)
5542 zone_pcp_update(zone, 0);
5543
5544 return 0;
5545 }
5546
page_alloc_cpu_online(unsigned int cpu)5547 static int page_alloc_cpu_online(unsigned int cpu)
5548 {
5549 struct zone *zone;
5550
5551 for_each_populated_zone(zone)
5552 zone_pcp_update(zone, 1);
5553 return 0;
5554 }
5555
page_alloc_init_cpuhp(void)5556 void __init page_alloc_init_cpuhp(void)
5557 {
5558 int ret;
5559
5560 ret = cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC,
5561 "mm/page_alloc:pcp",
5562 page_alloc_cpu_online,
5563 page_alloc_cpu_dead);
5564 WARN_ON(ret < 0);
5565 }
5566
5567 /*
5568 * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio
5569 * or min_free_kbytes changes.
5570 */
calculate_totalreserve_pages(void)5571 static void calculate_totalreserve_pages(void)
5572 {
5573 struct pglist_data *pgdat;
5574 unsigned long reserve_pages = 0;
5575 enum zone_type i, j;
5576
5577 for_each_online_pgdat(pgdat) {
5578
5579 pgdat->totalreserve_pages = 0;
5580
5581 for (i = 0; i < MAX_NR_ZONES; i++) {
5582 struct zone *zone = pgdat->node_zones + i;
5583 long max = 0;
5584 unsigned long managed_pages = zone_managed_pages(zone);
5585
5586 /* Find valid and maximum lowmem_reserve in the zone */
5587 for (j = i; j < MAX_NR_ZONES; j++) {
5588 if (zone->lowmem_reserve[j] > max)
5589 max = zone->lowmem_reserve[j];
5590 }
5591
5592 /* we treat the high watermark as reserved pages. */
5593 max += high_wmark_pages(zone);
5594
5595 if (max > managed_pages)
5596 max = managed_pages;
5597
5598 pgdat->totalreserve_pages += max;
5599
5600 reserve_pages += max;
5601 }
5602 }
5603 totalreserve_pages = reserve_pages;
5604 }
5605
5606 /*
5607 * setup_per_zone_lowmem_reserve - called whenever
5608 * sysctl_lowmem_reserve_ratio changes. Ensures that each zone
5609 * has a correct pages reserved value, so an adequate number of
5610 * pages are left in the zone after a successful __alloc_pages().
5611 */
setup_per_zone_lowmem_reserve(void)5612 static void setup_per_zone_lowmem_reserve(void)
5613 {
5614 struct pglist_data *pgdat;
5615 enum zone_type i, j;
5616
5617 for_each_online_pgdat(pgdat) {
5618 for (i = 0; i < MAX_NR_ZONES - 1; i++) {
5619 struct zone *zone = &pgdat->node_zones[i];
5620 int ratio = sysctl_lowmem_reserve_ratio[i];
5621 bool clear = !ratio || !zone_managed_pages(zone);
5622 unsigned long managed_pages = 0;
5623
5624 for (j = i + 1; j < MAX_NR_ZONES; j++) {
5625 struct zone *upper_zone = &pgdat->node_zones[j];
5626
5627 managed_pages += zone_managed_pages(upper_zone);
5628
5629 if (clear)
5630 zone->lowmem_reserve[j] = 0;
5631 else
5632 zone->lowmem_reserve[j] = managed_pages / ratio;
5633 }
5634 }
5635 }
5636
5637 /* update totalreserve_pages */
5638 calculate_totalreserve_pages();
5639 }
5640
__setup_per_zone_wmarks(void)5641 static void __setup_per_zone_wmarks(void)
5642 {
5643 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
5644 unsigned long lowmem_pages = 0;
5645 struct zone *zone;
5646 unsigned long flags;
5647
5648 /* Calculate total number of !ZONE_HIGHMEM and !ZONE_MOVABLE pages */
5649 for_each_zone(zone) {
5650 if (!is_highmem(zone) && zone_idx(zone) != ZONE_MOVABLE)
5651 lowmem_pages += zone_managed_pages(zone);
5652 }
5653
5654 for_each_zone(zone) {
5655 u64 tmp;
5656
5657 spin_lock_irqsave(&zone->lock, flags);
5658 tmp = (u64)pages_min * zone_managed_pages(zone);
5659 do_div(tmp, lowmem_pages);
5660 if (is_highmem(zone) || zone_idx(zone) == ZONE_MOVABLE) {
5661 /*
5662 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
5663 * need highmem and movable zones pages, so cap pages_min
5664 * to a small value here.
5665 *
5666 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
5667 * deltas control async page reclaim, and so should
5668 * not be capped for highmem and movable zones.
5669 */
5670 unsigned long min_pages;
5671
5672 min_pages = zone_managed_pages(zone) / 1024;
5673 min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
5674 zone->_watermark[WMARK_MIN] = min_pages;
5675 } else {
5676 /*
5677 * If it's a lowmem zone, reserve a number of pages
5678 * proportionate to the zone's size.
5679 */
5680 zone->_watermark[WMARK_MIN] = tmp;
5681 }
5682
5683 /*
5684 * Set the kswapd watermarks distance according to the
5685 * scale factor in proportion to available memory, but
5686 * ensure a minimum size on small systems.
5687 */
5688 tmp = max_t(u64, tmp >> 2,
5689 mult_frac(zone_managed_pages(zone),
5690 watermark_scale_factor, 10000));
5691
5692 zone->watermark_boost = 0;
5693 zone->_watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp;
5694 zone->_watermark[WMARK_HIGH] = low_wmark_pages(zone) + tmp;
5695 zone->_watermark[WMARK_PROMO] = high_wmark_pages(zone) + tmp;
5696
5697 spin_unlock_irqrestore(&zone->lock, flags);
5698 }
5699
5700 /* update totalreserve_pages */
5701 calculate_totalreserve_pages();
5702 }
5703
5704 /**
5705 * setup_per_zone_wmarks - called when min_free_kbytes changes
5706 * or when memory is hot-{added|removed}
5707 *
5708 * Ensures that the watermark[min,low,high] values for each zone are set
5709 * correctly with respect to min_free_kbytes.
5710 */
setup_per_zone_wmarks(void)5711 void setup_per_zone_wmarks(void)
5712 {
5713 struct zone *zone;
5714 static DEFINE_SPINLOCK(lock);
5715
5716 spin_lock(&lock);
5717 __setup_per_zone_wmarks();
5718 spin_unlock(&lock);
5719
5720 /*
5721 * The watermark size have changed so update the pcpu batch
5722 * and high limits or the limits may be inappropriate.
5723 */
5724 for_each_zone(zone)
5725 zone_pcp_update(zone, 0);
5726 }
5727
5728 /*
5729 * Initialise min_free_kbytes.
5730 *
5731 * For small machines we want it small (128k min). For large machines
5732 * we want it large (256MB max). But it is not linear, because network
5733 * bandwidth does not increase linearly with machine size. We use
5734 *
5735 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
5736 * min_free_kbytes = sqrt(lowmem_kbytes * 16)
5737 *
5738 * which yields
5739 *
5740 * 16MB: 512k
5741 * 32MB: 724k
5742 * 64MB: 1024k
5743 * 128MB: 1448k
5744 * 256MB: 2048k
5745 * 512MB: 2896k
5746 * 1024MB: 4096k
5747 * 2048MB: 5792k
5748 * 4096MB: 8192k
5749 * 8192MB: 11584k
5750 * 16384MB: 16384k
5751 */
calculate_min_free_kbytes(void)5752 void calculate_min_free_kbytes(void)
5753 {
5754 unsigned long lowmem_kbytes;
5755 int new_min_free_kbytes;
5756
5757 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
5758 new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
5759
5760 if (new_min_free_kbytes > user_min_free_kbytes)
5761 min_free_kbytes = clamp(new_min_free_kbytes, 128, 262144);
5762 else
5763 pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n",
5764 new_min_free_kbytes, user_min_free_kbytes);
5765
5766 }
5767
init_per_zone_wmark_min(void)5768 int __meminit init_per_zone_wmark_min(void)
5769 {
5770 calculate_min_free_kbytes();
5771 setup_per_zone_wmarks();
5772 refresh_zone_stat_thresholds();
5773 setup_per_zone_lowmem_reserve();
5774
5775 #ifdef CONFIG_NUMA
5776 setup_min_unmapped_ratio();
5777 setup_min_slab_ratio();
5778 #endif
5779
5780 khugepaged_min_free_kbytes_update();
5781
5782 return 0;
5783 }
postcore_initcall(init_per_zone_wmark_min)5784 postcore_initcall(init_per_zone_wmark_min)
5785
5786 /*
5787 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
5788 * that we can call two helper functions whenever min_free_kbytes
5789 * changes.
5790 */
5791 static int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write,
5792 void *buffer, size_t *length, loff_t *ppos)
5793 {
5794 int rc;
5795
5796 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
5797 if (rc)
5798 return rc;
5799
5800 if (write) {
5801 user_min_free_kbytes = min_free_kbytes;
5802 setup_per_zone_wmarks();
5803 }
5804 return 0;
5805 }
5806
watermark_scale_factor_sysctl_handler(struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)5807 static int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write,
5808 void *buffer, size_t *length, loff_t *ppos)
5809 {
5810 int rc;
5811
5812 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
5813 if (rc)
5814 return rc;
5815
5816 if (write)
5817 setup_per_zone_wmarks();
5818
5819 return 0;
5820 }
5821
5822 #ifdef CONFIG_NUMA
setup_min_unmapped_ratio(void)5823 static void setup_min_unmapped_ratio(void)
5824 {
5825 pg_data_t *pgdat;
5826 struct zone *zone;
5827
5828 for_each_online_pgdat(pgdat)
5829 pgdat->min_unmapped_pages = 0;
5830
5831 for_each_zone(zone)
5832 zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) *
5833 sysctl_min_unmapped_ratio) / 100;
5834 }
5835
5836
sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)5837 static int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write,
5838 void *buffer, size_t *length, loff_t *ppos)
5839 {
5840 int rc;
5841
5842 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
5843 if (rc)
5844 return rc;
5845
5846 setup_min_unmapped_ratio();
5847
5848 return 0;
5849 }
5850
setup_min_slab_ratio(void)5851 static void setup_min_slab_ratio(void)
5852 {
5853 pg_data_t *pgdat;
5854 struct zone *zone;
5855
5856 for_each_online_pgdat(pgdat)
5857 pgdat->min_slab_pages = 0;
5858
5859 for_each_zone(zone)
5860 zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) *
5861 sysctl_min_slab_ratio) / 100;
5862 }
5863
sysctl_min_slab_ratio_sysctl_handler(struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)5864 static int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write,
5865 void *buffer, size_t *length, loff_t *ppos)
5866 {
5867 int rc;
5868
5869 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
5870 if (rc)
5871 return rc;
5872
5873 setup_min_slab_ratio();
5874
5875 return 0;
5876 }
5877 #endif
5878
5879 /*
5880 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
5881 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
5882 * whenever sysctl_lowmem_reserve_ratio changes.
5883 *
5884 * The reserve ratio obviously has absolutely no relation with the
5885 * minimum watermarks. The lowmem reserve ratio can only make sense
5886 * if in function of the boot time zone sizes.
5887 */
lowmem_reserve_ratio_sysctl_handler(struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)5888 static int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table,
5889 int write, void *buffer, size_t *length, loff_t *ppos)
5890 {
5891 int i;
5892
5893 proc_dointvec_minmax(table, write, buffer, length, ppos);
5894
5895 for (i = 0; i < MAX_NR_ZONES; i++) {
5896 if (sysctl_lowmem_reserve_ratio[i] < 1)
5897 sysctl_lowmem_reserve_ratio[i] = 0;
5898 }
5899
5900 setup_per_zone_lowmem_reserve();
5901 return 0;
5902 }
5903
5904 /*
5905 * percpu_pagelist_high_fraction - changes the pcp->high for each zone on each
5906 * cpu. It is the fraction of total pages in each zone that a hot per cpu
5907 * pagelist can have before it gets flushed back to buddy allocator.
5908 */
percpu_pagelist_high_fraction_sysctl_handler(struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)5909 static int percpu_pagelist_high_fraction_sysctl_handler(struct ctl_table *table,
5910 int write, void *buffer, size_t *length, loff_t *ppos)
5911 {
5912 struct zone *zone;
5913 int old_percpu_pagelist_high_fraction;
5914 int ret;
5915
5916 mutex_lock(&pcp_batch_high_lock);
5917 old_percpu_pagelist_high_fraction = percpu_pagelist_high_fraction;
5918
5919 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
5920 if (!write || ret < 0)
5921 goto out;
5922
5923 /* Sanity checking to avoid pcp imbalance */
5924 if (percpu_pagelist_high_fraction &&
5925 percpu_pagelist_high_fraction < MIN_PERCPU_PAGELIST_HIGH_FRACTION) {
5926 percpu_pagelist_high_fraction = old_percpu_pagelist_high_fraction;
5927 ret = -EINVAL;
5928 goto out;
5929 }
5930
5931 /* No change? */
5932 if (percpu_pagelist_high_fraction == old_percpu_pagelist_high_fraction)
5933 goto out;
5934
5935 for_each_populated_zone(zone)
5936 zone_set_pageset_high_and_batch(zone, 0);
5937 out:
5938 mutex_unlock(&pcp_batch_high_lock);
5939 return ret;
5940 }
5941
5942 static struct ctl_table page_alloc_sysctl_table[] = {
5943 {
5944 .procname = "min_free_kbytes",
5945 .data = &min_free_kbytes,
5946 .maxlen = sizeof(min_free_kbytes),
5947 .mode = 0644,
5948 .proc_handler = min_free_kbytes_sysctl_handler,
5949 .extra1 = SYSCTL_ZERO,
5950 },
5951 {
5952 .procname = "watermark_boost_factor",
5953 .data = &watermark_boost_factor,
5954 .maxlen = sizeof(watermark_boost_factor),
5955 .mode = 0644,
5956 .proc_handler = proc_dointvec_minmax,
5957 .extra1 = SYSCTL_ZERO,
5958 },
5959 {
5960 .procname = "watermark_scale_factor",
5961 .data = &watermark_scale_factor,
5962 .maxlen = sizeof(watermark_scale_factor),
5963 .mode = 0644,
5964 .proc_handler = watermark_scale_factor_sysctl_handler,
5965 .extra1 = SYSCTL_ONE,
5966 .extra2 = SYSCTL_THREE_THOUSAND,
5967 },
5968 {
5969 .procname = "percpu_pagelist_high_fraction",
5970 .data = &percpu_pagelist_high_fraction,
5971 .maxlen = sizeof(percpu_pagelist_high_fraction),
5972 .mode = 0644,
5973 .proc_handler = percpu_pagelist_high_fraction_sysctl_handler,
5974 .extra1 = SYSCTL_ZERO,
5975 },
5976 {
5977 .procname = "lowmem_reserve_ratio",
5978 .data = &sysctl_lowmem_reserve_ratio,
5979 .maxlen = sizeof(sysctl_lowmem_reserve_ratio),
5980 .mode = 0644,
5981 .proc_handler = lowmem_reserve_ratio_sysctl_handler,
5982 },
5983 #ifdef CONFIG_NUMA
5984 {
5985 .procname = "numa_zonelist_order",
5986 .data = &numa_zonelist_order,
5987 .maxlen = NUMA_ZONELIST_ORDER_LEN,
5988 .mode = 0644,
5989 .proc_handler = numa_zonelist_order_handler,
5990 },
5991 {
5992 .procname = "min_unmapped_ratio",
5993 .data = &sysctl_min_unmapped_ratio,
5994 .maxlen = sizeof(sysctl_min_unmapped_ratio),
5995 .mode = 0644,
5996 .proc_handler = sysctl_min_unmapped_ratio_sysctl_handler,
5997 .extra1 = SYSCTL_ZERO,
5998 .extra2 = SYSCTL_ONE_HUNDRED,
5999 },
6000 {
6001 .procname = "min_slab_ratio",
6002 .data = &sysctl_min_slab_ratio,
6003 .maxlen = sizeof(sysctl_min_slab_ratio),
6004 .mode = 0644,
6005 .proc_handler = sysctl_min_slab_ratio_sysctl_handler,
6006 .extra1 = SYSCTL_ZERO,
6007 .extra2 = SYSCTL_ONE_HUNDRED,
6008 },
6009 #endif
6010 {}
6011 };
6012
page_alloc_sysctl_init(void)6013 void __init page_alloc_sysctl_init(void)
6014 {
6015 register_sysctl_init("vm", page_alloc_sysctl_table);
6016 }
6017
6018 #ifdef CONFIG_CONTIG_ALLOC
6019 /* Usage: See admin-guide/dynamic-debug-howto.rst */
alloc_contig_dump_pages(struct list_head * page_list)6020 static void alloc_contig_dump_pages(struct list_head *page_list)
6021 {
6022 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, "migrate failure");
6023
6024 if (DYNAMIC_DEBUG_BRANCH(descriptor)) {
6025 struct page *page;
6026
6027 dump_stack();
6028 list_for_each_entry(page, page_list, lru)
6029 dump_page(page, "migration failure");
6030 }
6031 }
6032
6033 /* [start, end) must belong to a single zone. */
__alloc_contig_migrate_range(struct compact_control * cc,unsigned long start,unsigned long end)6034 int __alloc_contig_migrate_range(struct compact_control *cc,
6035 unsigned long start, unsigned long end)
6036 {
6037 /* This function is based on compact_zone() from compaction.c. */
6038 unsigned int nr_reclaimed;
6039 unsigned long pfn = start;
6040 unsigned int tries = 0;
6041 int ret = 0;
6042 struct migration_target_control mtc = {
6043 .nid = zone_to_nid(cc->zone),
6044 .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
6045 };
6046
6047 lru_cache_disable();
6048
6049 while (pfn < end || !list_empty(&cc->migratepages)) {
6050 if (fatal_signal_pending(current)) {
6051 ret = -EINTR;
6052 break;
6053 }
6054
6055 if (list_empty(&cc->migratepages)) {
6056 cc->nr_migratepages = 0;
6057 ret = isolate_migratepages_range(cc, pfn, end);
6058 if (ret && ret != -EAGAIN)
6059 break;
6060 pfn = cc->migrate_pfn;
6061 tries = 0;
6062 } else if (++tries == 5) {
6063 ret = -EBUSY;
6064 break;
6065 }
6066
6067 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
6068 &cc->migratepages);
6069 cc->nr_migratepages -= nr_reclaimed;
6070
6071 ret = migrate_pages(&cc->migratepages, alloc_migration_target,
6072 NULL, (unsigned long)&mtc, cc->mode, MR_CONTIG_RANGE, NULL);
6073
6074 /*
6075 * On -ENOMEM, migrate_pages() bails out right away. It is pointless
6076 * to retry again over this error, so do the same here.
6077 */
6078 if (ret == -ENOMEM)
6079 break;
6080 }
6081
6082 lru_cache_enable();
6083 if (ret < 0) {
6084 if (!(cc->gfp_mask & __GFP_NOWARN) && ret == -EBUSY)
6085 alloc_contig_dump_pages(&cc->migratepages);
6086 putback_movable_pages(&cc->migratepages);
6087 return ret;
6088 }
6089 return 0;
6090 }
6091
6092 /**
6093 * alloc_contig_range() -- tries to allocate given range of pages
6094 * @start: start PFN to allocate
6095 * @end: one-past-the-last PFN to allocate
6096 * @migratetype: migratetype of the underlying pageblocks (either
6097 * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks
6098 * in range must have the same migratetype and it must
6099 * be either of the two.
6100 * @gfp_mask: GFP mask to use during compaction
6101 *
6102 * The PFN range does not have to be pageblock aligned. The PFN range must
6103 * belong to a single zone.
6104 *
6105 * The first thing this routine does is attempt to MIGRATE_ISOLATE all
6106 * pageblocks in the range. Once isolated, the pageblocks should not
6107 * be modified by others.
6108 *
6109 * Return: zero on success or negative error code. On success all
6110 * pages which PFN is in [start, end) are allocated for the caller and
6111 * need to be freed with free_contig_range().
6112 */
alloc_contig_range(unsigned long start,unsigned long end,unsigned migratetype,gfp_t gfp_mask)6113 int alloc_contig_range(unsigned long start, unsigned long end,
6114 unsigned migratetype, gfp_t gfp_mask)
6115 {
6116 unsigned long outer_start, outer_end;
6117 int order;
6118 int ret = 0;
6119
6120 struct compact_control cc = {
6121 .nr_migratepages = 0,
6122 .order = -1,
6123 .zone = page_zone(pfn_to_page(start)),
6124 .mode = MIGRATE_SYNC,
6125 .ignore_skip_hint = true,
6126 .no_set_skip_hint = true,
6127 .gfp_mask = current_gfp_context(gfp_mask),
6128 .alloc_contig = true,
6129 };
6130 INIT_LIST_HEAD(&cc.migratepages);
6131
6132 /*
6133 * What we do here is we mark all pageblocks in range as
6134 * MIGRATE_ISOLATE. Because pageblock and max order pages may
6135 * have different sizes, and due to the way page allocator
6136 * work, start_isolate_page_range() has special handlings for this.
6137 *
6138 * Once the pageblocks are marked as MIGRATE_ISOLATE, we
6139 * migrate the pages from an unaligned range (ie. pages that
6140 * we are interested in). This will put all the pages in
6141 * range back to page allocator as MIGRATE_ISOLATE.
6142 *
6143 * When this is done, we take the pages in range from page
6144 * allocator removing them from the buddy system. This way
6145 * page allocator will never consider using them.
6146 *
6147 * This lets us mark the pageblocks back as
6148 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
6149 * aligned range but not in the unaligned, original range are
6150 * put back to page allocator so that buddy can use them.
6151 */
6152
6153 ret = start_isolate_page_range(start, end, migratetype, 0, gfp_mask);
6154 if (ret)
6155 goto done;
6156
6157 drain_all_pages(cc.zone);
6158
6159 /*
6160 * In case of -EBUSY, we'd like to know which page causes problem.
6161 * So, just fall through. test_pages_isolated() has a tracepoint
6162 * which will report the busy page.
6163 *
6164 * It is possible that busy pages could become available before
6165 * the call to test_pages_isolated, and the range will actually be
6166 * allocated. So, if we fall through be sure to clear ret so that
6167 * -EBUSY is not accidentally used or returned to caller.
6168 */
6169 ret = __alloc_contig_migrate_range(&cc, start, end);
6170 if (ret && ret != -EBUSY)
6171 goto done;
6172 ret = 0;
6173
6174 /*
6175 * Pages from [start, end) are within a pageblock_nr_pages
6176 * aligned blocks that are marked as MIGRATE_ISOLATE. What's
6177 * more, all pages in [start, end) are free in page allocator.
6178 * What we are going to do is to allocate all pages from
6179 * [start, end) (that is remove them from page allocator).
6180 *
6181 * The only problem is that pages at the beginning and at the
6182 * end of interesting range may be not aligned with pages that
6183 * page allocator holds, ie. they can be part of higher order
6184 * pages. Because of this, we reserve the bigger range and
6185 * once this is done free the pages we are not interested in.
6186 *
6187 * We don't have to hold zone->lock here because the pages are
6188 * isolated thus they won't get removed from buddy.
6189 */
6190
6191 order = 0;
6192 outer_start = start;
6193 while (!PageBuddy(pfn_to_page(outer_start))) {
6194 if (++order > MAX_ORDER) {
6195 outer_start = start;
6196 break;
6197 }
6198 outer_start &= ~0UL << order;
6199 }
6200
6201 if (outer_start != start) {
6202 order = buddy_order(pfn_to_page(outer_start));
6203
6204 /*
6205 * outer_start page could be small order buddy page and
6206 * it doesn't include start page. Adjust outer_start
6207 * in this case to report failed page properly
6208 * on tracepoint in test_pages_isolated()
6209 */
6210 if (outer_start + (1UL << order) <= start)
6211 outer_start = start;
6212 }
6213
6214 /* Make sure the range is really isolated. */
6215 if (test_pages_isolated(outer_start, end, 0)) {
6216 ret = -EBUSY;
6217 goto done;
6218 }
6219
6220 /* Grab isolated pages from freelists. */
6221 outer_end = isolate_freepages_range(&cc, outer_start, end);
6222 if (!outer_end) {
6223 ret = -EBUSY;
6224 goto done;
6225 }
6226
6227 /* Free head and tail (if any) */
6228 if (start != outer_start)
6229 free_contig_range(outer_start, start - outer_start);
6230 if (end != outer_end)
6231 free_contig_range(end, outer_end - end);
6232
6233 done:
6234 undo_isolate_page_range(start, end, migratetype);
6235 return ret;
6236 }
6237 EXPORT_SYMBOL(alloc_contig_range);
6238
__alloc_contig_pages(unsigned long start_pfn,unsigned long nr_pages,gfp_t gfp_mask)6239 static int __alloc_contig_pages(unsigned long start_pfn,
6240 unsigned long nr_pages, gfp_t gfp_mask)
6241 {
6242 unsigned long end_pfn = start_pfn + nr_pages;
6243
6244 return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE,
6245 gfp_mask);
6246 }
6247
pfn_range_valid_contig(struct zone * z,unsigned long start_pfn,unsigned long nr_pages)6248 static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn,
6249 unsigned long nr_pages)
6250 {
6251 unsigned long i, end_pfn = start_pfn + nr_pages;
6252 struct page *page;
6253
6254 for (i = start_pfn; i < end_pfn; i++) {
6255 page = pfn_to_online_page(i);
6256 if (!page)
6257 return false;
6258
6259 if (page_zone(page) != z)
6260 return false;
6261
6262 if (PageReserved(page))
6263 return false;
6264
6265 if (PageHuge(page))
6266 return false;
6267 }
6268 return true;
6269 }
6270
zone_spans_last_pfn(const struct zone * zone,unsigned long start_pfn,unsigned long nr_pages)6271 static bool zone_spans_last_pfn(const struct zone *zone,
6272 unsigned long start_pfn, unsigned long nr_pages)
6273 {
6274 unsigned long last_pfn = start_pfn + nr_pages - 1;
6275
6276 return zone_spans_pfn(zone, last_pfn);
6277 }
6278
6279 /**
6280 * alloc_contig_pages() -- tries to find and allocate contiguous range of pages
6281 * @nr_pages: Number of contiguous pages to allocate
6282 * @gfp_mask: GFP mask to limit search and used during compaction
6283 * @nid: Target node
6284 * @nodemask: Mask for other possible nodes
6285 *
6286 * This routine is a wrapper around alloc_contig_range(). It scans over zones
6287 * on an applicable zonelist to find a contiguous pfn range which can then be
6288 * tried for allocation with alloc_contig_range(). This routine is intended
6289 * for allocation requests which can not be fulfilled with the buddy allocator.
6290 *
6291 * The allocated memory is always aligned to a page boundary. If nr_pages is a
6292 * power of two, then allocated range is also guaranteed to be aligned to same
6293 * nr_pages (e.g. 1GB request would be aligned to 1GB).
6294 *
6295 * Allocated pages can be freed with free_contig_range() or by manually calling
6296 * __free_page() on each allocated page.
6297 *
6298 * Return: pointer to contiguous pages on success, or NULL if not successful.
6299 */
alloc_contig_pages(unsigned long nr_pages,gfp_t gfp_mask,int nid,nodemask_t * nodemask)6300 struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask,
6301 int nid, nodemask_t *nodemask)
6302 {
6303 unsigned long ret, pfn, flags;
6304 struct zonelist *zonelist;
6305 struct zone *zone;
6306 struct zoneref *z;
6307
6308 zonelist = node_zonelist(nid, gfp_mask);
6309 for_each_zone_zonelist_nodemask(zone, z, zonelist,
6310 gfp_zone(gfp_mask), nodemask) {
6311 spin_lock_irqsave(&zone->lock, flags);
6312
6313 pfn = ALIGN(zone->zone_start_pfn, nr_pages);
6314 while (zone_spans_last_pfn(zone, pfn, nr_pages)) {
6315 if (pfn_range_valid_contig(zone, pfn, nr_pages)) {
6316 /*
6317 * We release the zone lock here because
6318 * alloc_contig_range() will also lock the zone
6319 * at some point. If there's an allocation
6320 * spinning on this lock, it may win the race
6321 * and cause alloc_contig_range() to fail...
6322 */
6323 spin_unlock_irqrestore(&zone->lock, flags);
6324 ret = __alloc_contig_pages(pfn, nr_pages,
6325 gfp_mask);
6326 if (!ret)
6327 return pfn_to_page(pfn);
6328 spin_lock_irqsave(&zone->lock, flags);
6329 }
6330 pfn += nr_pages;
6331 }
6332 spin_unlock_irqrestore(&zone->lock, flags);
6333 }
6334 return NULL;
6335 }
6336 #endif /* CONFIG_CONTIG_ALLOC */
6337
free_contig_range(unsigned long pfn,unsigned long nr_pages)6338 void free_contig_range(unsigned long pfn, unsigned long nr_pages)
6339 {
6340 unsigned long count = 0;
6341
6342 for (; nr_pages--; pfn++) {
6343 struct page *page = pfn_to_page(pfn);
6344
6345 count += page_count(page) != 1;
6346 __free_page(page);
6347 }
6348 WARN(count != 0, "%lu pages are still in use!\n", count);
6349 }
6350 EXPORT_SYMBOL(free_contig_range);
6351
6352 /*
6353 * Effectively disable pcplists for the zone by setting the high limit to 0
6354 * and draining all cpus. A concurrent page freeing on another CPU that's about
6355 * to put the page on pcplist will either finish before the drain and the page
6356 * will be drained, or observe the new high limit and skip the pcplist.
6357 *
6358 * Must be paired with a call to zone_pcp_enable().
6359 */
zone_pcp_disable(struct zone * zone)6360 void zone_pcp_disable(struct zone *zone)
6361 {
6362 mutex_lock(&pcp_batch_high_lock);
6363 __zone_set_pageset_high_and_batch(zone, 0, 1);
6364 __drain_all_pages(zone, true);
6365 }
6366
zone_pcp_enable(struct zone * zone)6367 void zone_pcp_enable(struct zone *zone)
6368 {
6369 __zone_set_pageset_high_and_batch(zone, zone->pageset_high, zone->pageset_batch);
6370 mutex_unlock(&pcp_batch_high_lock);
6371 }
6372
zone_pcp_reset(struct zone * zone)6373 void zone_pcp_reset(struct zone *zone)
6374 {
6375 int cpu;
6376 struct per_cpu_zonestat *pzstats;
6377
6378 if (zone->per_cpu_pageset != &boot_pageset) {
6379 for_each_online_cpu(cpu) {
6380 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
6381 drain_zonestat(zone, pzstats);
6382 }
6383 free_percpu(zone->per_cpu_pageset);
6384 zone->per_cpu_pageset = &boot_pageset;
6385 if (zone->per_cpu_zonestats != &boot_zonestats) {
6386 free_percpu(zone->per_cpu_zonestats);
6387 zone->per_cpu_zonestats = &boot_zonestats;
6388 }
6389 }
6390 }
6391
6392 #ifdef CONFIG_MEMORY_HOTREMOVE
6393 /*
6394 * All pages in the range must be in a single zone, must not contain holes,
6395 * must span full sections, and must be isolated before calling this function.
6396 */
__offline_isolated_pages(unsigned long start_pfn,unsigned long end_pfn)6397 void __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
6398 {
6399 unsigned long pfn = start_pfn;
6400 struct page *page;
6401 struct zone *zone;
6402 unsigned int order;
6403 unsigned long flags;
6404
6405 offline_mem_sections(pfn, end_pfn);
6406 zone = page_zone(pfn_to_page(pfn));
6407 spin_lock_irqsave(&zone->lock, flags);
6408 while (pfn < end_pfn) {
6409 page = pfn_to_page(pfn);
6410 /*
6411 * The HWPoisoned page may be not in buddy system, and
6412 * page_count() is not 0.
6413 */
6414 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) {
6415 pfn++;
6416 continue;
6417 }
6418 /*
6419 * At this point all remaining PageOffline() pages have a
6420 * reference count of 0 and can simply be skipped.
6421 */
6422 if (PageOffline(page)) {
6423 BUG_ON(page_count(page));
6424 BUG_ON(PageBuddy(page));
6425 pfn++;
6426 continue;
6427 }
6428
6429 BUG_ON(page_count(page));
6430 BUG_ON(!PageBuddy(page));
6431 order = buddy_order(page);
6432 del_page_from_free_list(page, zone, order);
6433 pfn += (1 << order);
6434 }
6435 spin_unlock_irqrestore(&zone->lock, flags);
6436 }
6437 #endif
6438
6439 /*
6440 * This function returns a stable result only if called under zone lock.
6441 */
is_free_buddy_page(struct page * page)6442 bool is_free_buddy_page(struct page *page)
6443 {
6444 unsigned long pfn = page_to_pfn(page);
6445 unsigned int order;
6446
6447 for (order = 0; order < NR_PAGE_ORDERS; order++) {
6448 struct page *page_head = page - (pfn & ((1 << order) - 1));
6449
6450 if (PageBuddy(page_head) &&
6451 buddy_order_unsafe(page_head) >= order)
6452 break;
6453 }
6454
6455 return order <= MAX_ORDER;
6456 }
6457 EXPORT_SYMBOL(is_free_buddy_page);
6458
6459 #ifdef CONFIG_MEMORY_FAILURE
6460 /*
6461 * Break down a higher-order page in sub-pages, and keep our target out of
6462 * buddy allocator.
6463 */
break_down_buddy_pages(struct zone * zone,struct page * page,struct page * target,int low,int high,int migratetype)6464 static void break_down_buddy_pages(struct zone *zone, struct page *page,
6465 struct page *target, int low, int high,
6466 int migratetype)
6467 {
6468 unsigned long size = 1 << high;
6469 struct page *current_buddy, *next_page;
6470
6471 while (high > low) {
6472 high--;
6473 size >>= 1;
6474
6475 if (target >= &page[size]) {
6476 next_page = page + size;
6477 current_buddy = page;
6478 } else {
6479 next_page = page;
6480 current_buddy = page + size;
6481 }
6482 page = next_page;
6483
6484 if (set_page_guard(zone, current_buddy, high, migratetype))
6485 continue;
6486
6487 if (current_buddy != target) {
6488 add_to_free_list(current_buddy, zone, high, migratetype);
6489 set_buddy_order(current_buddy, high);
6490 }
6491 }
6492 }
6493
6494 /*
6495 * Take a page that will be marked as poisoned off the buddy allocator.
6496 */
take_page_off_buddy(struct page * page)6497 bool take_page_off_buddy(struct page *page)
6498 {
6499 struct zone *zone = page_zone(page);
6500 unsigned long pfn = page_to_pfn(page);
6501 unsigned long flags;
6502 unsigned int order;
6503 bool ret = false;
6504
6505 spin_lock_irqsave(&zone->lock, flags);
6506 for (order = 0; order < NR_PAGE_ORDERS; order++) {
6507 struct page *page_head = page - (pfn & ((1 << order) - 1));
6508 int page_order = buddy_order(page_head);
6509
6510 if (PageBuddy(page_head) && page_order >= order) {
6511 unsigned long pfn_head = page_to_pfn(page_head);
6512 int migratetype = get_pfnblock_migratetype(page_head,
6513 pfn_head);
6514
6515 del_page_from_free_list(page_head, zone, page_order);
6516 break_down_buddy_pages(zone, page_head, page, 0,
6517 page_order, migratetype);
6518 SetPageHWPoisonTakenOff(page);
6519 if (!is_migrate_isolate(migratetype))
6520 __mod_zone_freepage_state(zone, -1, migratetype);
6521 ret = true;
6522 break;
6523 }
6524 if (page_count(page_head) > 0)
6525 break;
6526 }
6527 spin_unlock_irqrestore(&zone->lock, flags);
6528 return ret;
6529 }
6530
6531 /*
6532 * Cancel takeoff done by take_page_off_buddy().
6533 */
put_page_back_buddy(struct page * page)6534 bool put_page_back_buddy(struct page *page)
6535 {
6536 struct zone *zone = page_zone(page);
6537 unsigned long pfn = page_to_pfn(page);
6538 unsigned long flags;
6539 int migratetype = get_pfnblock_migratetype(page, pfn);
6540 bool ret = false;
6541
6542 spin_lock_irqsave(&zone->lock, flags);
6543 if (put_page_testzero(page)) {
6544 ClearPageHWPoisonTakenOff(page);
6545 __free_one_page(page, pfn, zone, 0, migratetype, FPI_NONE);
6546 if (TestClearPageHWPoison(page)) {
6547 ret = true;
6548 }
6549 }
6550 spin_unlock_irqrestore(&zone->lock, flags);
6551
6552 return ret;
6553 }
6554 #endif
6555
6556 #ifdef CONFIG_ZONE_DMA
has_managed_dma(void)6557 bool has_managed_dma(void)
6558 {
6559 struct pglist_data *pgdat;
6560
6561 for_each_online_pgdat(pgdat) {
6562 struct zone *zone = &pgdat->node_zones[ZONE_DMA];
6563
6564 if (managed_zone(zone))
6565 return true;
6566 }
6567 return false;
6568 }
6569 #endif /* CONFIG_ZONE_DMA */
6570
6571 #ifdef CONFIG_UNACCEPTED_MEMORY
6572
6573 /* Counts number of zones with unaccepted pages. */
6574 static DEFINE_STATIC_KEY_FALSE(zones_with_unaccepted_pages);
6575
6576 static bool lazy_accept = true;
6577
accept_memory_parse(char * p)6578 static int __init accept_memory_parse(char *p)
6579 {
6580 if (!strcmp(p, "lazy")) {
6581 lazy_accept = true;
6582 return 0;
6583 } else if (!strcmp(p, "eager")) {
6584 lazy_accept = false;
6585 return 0;
6586 } else {
6587 return -EINVAL;
6588 }
6589 }
6590 early_param("accept_memory", accept_memory_parse);
6591
page_contains_unaccepted(struct page * page,unsigned int order)6592 static bool page_contains_unaccepted(struct page *page, unsigned int order)
6593 {
6594 phys_addr_t start = page_to_phys(page);
6595 phys_addr_t end = start + (PAGE_SIZE << order);
6596
6597 return range_contains_unaccepted_memory(start, end);
6598 }
6599
accept_page(struct page * page,unsigned int order)6600 static void accept_page(struct page *page, unsigned int order)
6601 {
6602 phys_addr_t start = page_to_phys(page);
6603
6604 accept_memory(start, start + (PAGE_SIZE << order));
6605 }
6606
try_to_accept_memory_one(struct zone * zone)6607 static bool try_to_accept_memory_one(struct zone *zone)
6608 {
6609 unsigned long flags;
6610 struct page *page;
6611 bool last;
6612
6613 spin_lock_irqsave(&zone->lock, flags);
6614 page = list_first_entry_or_null(&zone->unaccepted_pages,
6615 struct page, lru);
6616 if (!page) {
6617 spin_unlock_irqrestore(&zone->lock, flags);
6618 return false;
6619 }
6620
6621 list_del(&page->lru);
6622 last = list_empty(&zone->unaccepted_pages);
6623
6624 __mod_zone_freepage_state(zone, -MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE);
6625 __mod_zone_page_state(zone, NR_UNACCEPTED, -MAX_ORDER_NR_PAGES);
6626 spin_unlock_irqrestore(&zone->lock, flags);
6627
6628 accept_page(page, MAX_ORDER);
6629
6630 __free_pages_ok(page, MAX_ORDER, FPI_TO_TAIL);
6631
6632 if (last)
6633 static_branch_dec(&zones_with_unaccepted_pages);
6634
6635 return true;
6636 }
6637
cond_accept_memory(struct zone * zone,unsigned int order)6638 static bool cond_accept_memory(struct zone *zone, unsigned int order)
6639 {
6640 long to_accept;
6641 bool ret = false;
6642
6643 if (!has_unaccepted_memory())
6644 return false;
6645
6646 if (list_empty(&zone->unaccepted_pages))
6647 return false;
6648
6649 /* How much to accept to get to high watermark? */
6650 to_accept = high_wmark_pages(zone) -
6651 (zone_page_state(zone, NR_FREE_PAGES) -
6652 __zone_watermark_unusable_free(zone, order, 0) -
6653 zone_page_state(zone, NR_UNACCEPTED));
6654
6655 while (to_accept > 0) {
6656 if (!try_to_accept_memory_one(zone))
6657 break;
6658 ret = true;
6659 to_accept -= MAX_ORDER_NR_PAGES;
6660 }
6661
6662 return ret;
6663 }
6664
has_unaccepted_memory(void)6665 static inline bool has_unaccepted_memory(void)
6666 {
6667 return static_branch_unlikely(&zones_with_unaccepted_pages);
6668 }
6669
__free_unaccepted(struct page * page)6670 static bool __free_unaccepted(struct page *page)
6671 {
6672 struct zone *zone = page_zone(page);
6673 unsigned long flags;
6674 bool first = false;
6675
6676 if (!lazy_accept)
6677 return false;
6678
6679 spin_lock_irqsave(&zone->lock, flags);
6680 first = list_empty(&zone->unaccepted_pages);
6681 list_add_tail(&page->lru, &zone->unaccepted_pages);
6682 __mod_zone_freepage_state(zone, MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE);
6683 __mod_zone_page_state(zone, NR_UNACCEPTED, MAX_ORDER_NR_PAGES);
6684 spin_unlock_irqrestore(&zone->lock, flags);
6685
6686 if (first)
6687 static_branch_inc(&zones_with_unaccepted_pages);
6688
6689 return true;
6690 }
6691
6692 #else
6693
page_contains_unaccepted(struct page * page,unsigned int order)6694 static bool page_contains_unaccepted(struct page *page, unsigned int order)
6695 {
6696 return false;
6697 }
6698
accept_page(struct page * page,unsigned int order)6699 static void accept_page(struct page *page, unsigned int order)
6700 {
6701 }
6702
cond_accept_memory(struct zone * zone,unsigned int order)6703 static bool cond_accept_memory(struct zone *zone, unsigned int order)
6704 {
6705 return false;
6706 }
6707
has_unaccepted_memory(void)6708 static inline bool has_unaccepted_memory(void)
6709 {
6710 return false;
6711 }
6712
__free_unaccepted(struct page * page)6713 static bool __free_unaccepted(struct page *page)
6714 {
6715 BUILD_BUG();
6716 return false;
6717 }
6718
6719 #endif /* CONFIG_UNACCEPTED_MEMORY */
6720