1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/mm/page_alloc.c
4 *
5 * Manages the free list, the system allocates free pages here.
6 * Note that kmalloc() lives in slab.c
7 *
8 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
9 * Swap reorganised 29.12.95, Stephen Tweedie
10 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
11 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
12 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
13 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000
14 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
15 * (lots of bits borrowed from Ingo Molnar & Andrew Morton)
16 */
17
18 #include <linux/stddef.h>
19 #include <linux/mm.h>
20 #include <linux/highmem.h>
21 #include <linux/interrupt.h>
22 #include <linux/jiffies.h>
23 #include <linux/compiler.h>
24 #include <linux/kernel.h>
25 #include <linux/kasan.h>
26 #include <linux/kmsan.h>
27 #include <linux/module.h>
28 #include <linux/suspend.h>
29 #include <linux/ratelimit.h>
30 #include <linux/oom.h>
31 #include <linux/topology.h>
32 #include <linux/sysctl.h>
33 #include <linux/cpu.h>
34 #include <linux/cpuset.h>
35 #include <linux/memory_hotplug.h>
36 #include <linux/nodemask.h>
37 #include <linux/vmstat.h>
38 #include <linux/fault-inject.h>
39 #include <linux/compaction.h>
40 #include <trace/events/kmem.h>
41 #include <trace/events/oom.h>
42 #include <linux/prefetch.h>
43 #include <linux/mm_inline.h>
44 #include <linux/mmu_notifier.h>
45 #include <linux/migrate.h>
46 #include <linux/sched/mm.h>
47 #include <linux/page_owner.h>
48 #include <linux/page_table_check.h>
49 #include <linux/memcontrol.h>
50 #include <linux/ftrace.h>
51 #include <linux/lockdep.h>
52 #include <linux/psi.h>
53 #include <linux/khugepaged.h>
54 #include <linux/delayacct.h>
55 #include <asm/div64.h>
56 #include "internal.h"
57 #include "shuffle.h"
58 #include "page_reporting.h"
59
60 /* Free Page Internal flags: for internal, non-pcp variants of free_pages(). */
61 typedef int __bitwise fpi_t;
62
63 /* No special request */
64 #define FPI_NONE ((__force fpi_t)0)
65
66 /*
67 * Skip free page reporting notification for the (possibly merged) page.
68 * This does not hinder free page reporting from grabbing the page,
69 * reporting it and marking it "reported" - it only skips notifying
70 * the free page reporting infrastructure about a newly freed page. For
71 * example, used when temporarily pulling a page from a freelist and
72 * putting it back unmodified.
73 */
74 #define FPI_SKIP_REPORT_NOTIFY ((__force fpi_t)BIT(0))
75
76 /*
77 * Place the (possibly merged) page to the tail of the freelist. Will ignore
78 * page shuffling (relevant code - e.g., memory onlining - is expected to
79 * shuffle the whole zone).
80 *
81 * Note: No code should rely on this flag for correctness - it's purely
82 * to allow for optimizations when handing back either fresh pages
83 * (memory onlining) or untouched pages (page isolation, free page
84 * reporting).
85 */
86 #define FPI_TO_TAIL ((__force fpi_t)BIT(1))
87
88 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
89 static DEFINE_MUTEX(pcp_batch_high_lock);
90 #define MIN_PERCPU_PAGELIST_HIGH_FRACTION (8)
91
92 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
93 /*
94 * On SMP, spin_trylock is sufficient protection.
95 * On PREEMPT_RT, spin_trylock is equivalent on both SMP and UP.
96 */
97 #define pcp_trylock_prepare(flags) do { } while (0)
98 #define pcp_trylock_finish(flag) do { } while (0)
99 #else
100
101 /* UP spin_trylock always succeeds so disable IRQs to prevent re-entrancy. */
102 #define pcp_trylock_prepare(flags) local_irq_save(flags)
103 #define pcp_trylock_finish(flags) local_irq_restore(flags)
104 #endif
105
106 /*
107 * Locking a pcp requires a PCP lookup followed by a spinlock. To avoid
108 * a migration causing the wrong PCP to be locked and remote memory being
109 * potentially allocated, pin the task to the CPU for the lookup+lock.
110 * preempt_disable is used on !RT because it is faster than migrate_disable.
111 * migrate_disable is used on RT because otherwise RT spinlock usage is
112 * interfered with and a high priority task cannot preempt the allocator.
113 */
114 #ifndef CONFIG_PREEMPT_RT
115 #define pcpu_task_pin() preempt_disable()
116 #define pcpu_task_unpin() preempt_enable()
117 #else
118 #define pcpu_task_pin() migrate_disable()
119 #define pcpu_task_unpin() migrate_enable()
120 #endif
121
122 /*
123 * Generic helper to lookup and a per-cpu variable with an embedded spinlock.
124 * Return value should be used with equivalent unlock helper.
125 */
126 #define pcpu_spin_lock(type, member, ptr) \
127 ({ \
128 type *_ret; \
129 pcpu_task_pin(); \
130 _ret = this_cpu_ptr(ptr); \
131 spin_lock(&_ret->member); \
132 _ret; \
133 })
134
135 #define pcpu_spin_trylock(type, member, ptr) \
136 ({ \
137 type *_ret; \
138 pcpu_task_pin(); \
139 _ret = this_cpu_ptr(ptr); \
140 if (!spin_trylock(&_ret->member)) { \
141 pcpu_task_unpin(); \
142 _ret = NULL; \
143 } \
144 _ret; \
145 })
146
147 #define pcpu_spin_unlock(member, ptr) \
148 ({ \
149 spin_unlock(&ptr->member); \
150 pcpu_task_unpin(); \
151 })
152
153 /* struct per_cpu_pages specific helpers. */
154 #define pcp_spin_lock(ptr) \
155 pcpu_spin_lock(struct per_cpu_pages, lock, ptr)
156
157 #define pcp_spin_trylock(ptr) \
158 pcpu_spin_trylock(struct per_cpu_pages, lock, ptr)
159
160 #define pcp_spin_unlock(ptr) \
161 pcpu_spin_unlock(lock, ptr)
162
163 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
164 DEFINE_PER_CPU(int, numa_node);
165 EXPORT_PER_CPU_SYMBOL(numa_node);
166 #endif
167
168 DEFINE_STATIC_KEY_TRUE(vm_numa_stat_key);
169
170 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
171 /*
172 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
173 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
174 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
175 * defined in <linux/topology.h>.
176 */
177 DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */
178 EXPORT_PER_CPU_SYMBOL(_numa_mem_);
179 #endif
180
181 static DEFINE_MUTEX(pcpu_drain_mutex);
182
183 #ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
184 volatile unsigned long latent_entropy __latent_entropy;
185 EXPORT_SYMBOL(latent_entropy);
186 #endif
187
188 /*
189 * Array of node states.
190 */
191 nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
192 [N_POSSIBLE] = NODE_MASK_ALL,
193 [N_ONLINE] = { { [0] = 1UL } },
194 #ifndef CONFIG_NUMA
195 [N_NORMAL_MEMORY] = { { [0] = 1UL } },
196 #ifdef CONFIG_HIGHMEM
197 [N_HIGH_MEMORY] = { { [0] = 1UL } },
198 #endif
199 [N_MEMORY] = { { [0] = 1UL } },
200 [N_CPU] = { { [0] = 1UL } },
201 #endif /* NUMA */
202 };
203 EXPORT_SYMBOL(node_states);
204
205 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
206
207 /*
208 * A cached value of the page's pageblock's migratetype, used when the page is
209 * put on a pcplist. Used to avoid the pageblock migratetype lookup when
210 * freeing from pcplists in most cases, at the cost of possibly becoming stale.
211 * Also the migratetype set in the page does not necessarily match the pcplist
212 * index, e.g. page might have MIGRATE_CMA set but be on a pcplist with any
213 * other index - this ensures that it will be put on the correct CMA freelist.
214 */
get_pcppage_migratetype(struct page * page)215 static inline int get_pcppage_migratetype(struct page *page)
216 {
217 return page->index;
218 }
219
set_pcppage_migratetype(struct page * page,int migratetype)220 static inline void set_pcppage_migratetype(struct page *page, int migratetype)
221 {
222 page->index = migratetype;
223 }
224
225 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
226 unsigned int pageblock_order __read_mostly;
227 #endif
228
229 static void __free_pages_ok(struct page *page, unsigned int order,
230 fpi_t fpi_flags);
231
232 /*
233 * results with 256, 32 in the lowmem_reserve sysctl:
234 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
235 * 1G machine -> (16M dma, 784M normal, 224M high)
236 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
237 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
238 * HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA
239 *
240 * TBD: should special case ZONE_DMA32 machines here - in those we normally
241 * don't need any ZONE_NORMAL reservation
242 */
243 static int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES] = {
244 #ifdef CONFIG_ZONE_DMA
245 [ZONE_DMA] = 256,
246 #endif
247 #ifdef CONFIG_ZONE_DMA32
248 [ZONE_DMA32] = 256,
249 #endif
250 [ZONE_NORMAL] = 32,
251 #ifdef CONFIG_HIGHMEM
252 [ZONE_HIGHMEM] = 0,
253 #endif
254 [ZONE_MOVABLE] = 0,
255 };
256
257 char * const zone_names[MAX_NR_ZONES] = {
258 #ifdef CONFIG_ZONE_DMA
259 "DMA",
260 #endif
261 #ifdef CONFIG_ZONE_DMA32
262 "DMA32",
263 #endif
264 "Normal",
265 #ifdef CONFIG_HIGHMEM
266 "HighMem",
267 #endif
268 "Movable",
269 #ifdef CONFIG_ZONE_DEVICE
270 "Device",
271 #endif
272 };
273
274 const char * const migratetype_names[MIGRATE_TYPES] = {
275 "Unmovable",
276 "Movable",
277 "Reclaimable",
278 "HighAtomic",
279 #ifdef CONFIG_CMA
280 "CMA",
281 #endif
282 #ifdef CONFIG_MEMORY_ISOLATION
283 "Isolate",
284 #endif
285 };
286
287 int min_free_kbytes = 1024;
288 int user_min_free_kbytes = -1;
289 static int watermark_boost_factor __read_mostly = 15000;
290 static int watermark_scale_factor = 10;
291
292 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
293 int movable_zone;
294 EXPORT_SYMBOL(movable_zone);
295
296 #if MAX_NUMNODES > 1
297 unsigned int nr_node_ids __read_mostly = MAX_NUMNODES;
298 unsigned int nr_online_nodes __read_mostly = 1;
299 EXPORT_SYMBOL(nr_node_ids);
300 EXPORT_SYMBOL(nr_online_nodes);
301 #endif
302
303 static bool page_contains_unaccepted(struct page *page, unsigned int order);
304 static void accept_page(struct page *page, unsigned int order);
305 static bool cond_accept_memory(struct zone *zone, unsigned int order);
306 static inline bool has_unaccepted_memory(void);
307 static bool __free_unaccepted(struct page *page);
308
309 int page_group_by_mobility_disabled __read_mostly;
310
311 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
312 /*
313 * During boot we initialize deferred pages on-demand, as needed, but once
314 * page_alloc_init_late() has finished, the deferred pages are all initialized,
315 * and we can permanently disable that path.
316 */
317 DEFINE_STATIC_KEY_TRUE(deferred_pages);
318
deferred_pages_enabled(void)319 static inline bool deferred_pages_enabled(void)
320 {
321 return static_branch_unlikely(&deferred_pages);
322 }
323
324 /*
325 * deferred_grow_zone() is __init, but it is called from
326 * get_page_from_freelist() during early boot until deferred_pages permanently
327 * disables this call. This is why we have refdata wrapper to avoid warning,
328 * and to ensure that the function body gets unloaded.
329 */
330 static bool __ref
_deferred_grow_zone(struct zone * zone,unsigned int order)331 _deferred_grow_zone(struct zone *zone, unsigned int order)
332 {
333 return deferred_grow_zone(zone, order);
334 }
335 #else
deferred_pages_enabled(void)336 static inline bool deferred_pages_enabled(void)
337 {
338 return false;
339 }
340 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
341
342 /* Return a pointer to the bitmap storing bits affecting a block of pages */
get_pageblock_bitmap(const struct page * page,unsigned long pfn)343 static inline unsigned long *get_pageblock_bitmap(const struct page *page,
344 unsigned long pfn)
345 {
346 #ifdef CONFIG_SPARSEMEM
347 return section_to_usemap(__pfn_to_section(pfn));
348 #else
349 return page_zone(page)->pageblock_flags;
350 #endif /* CONFIG_SPARSEMEM */
351 }
352
pfn_to_bitidx(const struct page * page,unsigned long pfn)353 static inline int pfn_to_bitidx(const struct page *page, unsigned long pfn)
354 {
355 #ifdef CONFIG_SPARSEMEM
356 pfn &= (PAGES_PER_SECTION-1);
357 #else
358 pfn = pfn - pageblock_start_pfn(page_zone(page)->zone_start_pfn);
359 #endif /* CONFIG_SPARSEMEM */
360 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
361 }
362
363 /**
364 * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages
365 * @page: The page within the block of interest
366 * @pfn: The target page frame number
367 * @mask: mask of bits that the caller is interested in
368 *
369 * Return: pageblock_bits flags
370 */
get_pfnblock_flags_mask(const struct page * page,unsigned long pfn,unsigned long mask)371 unsigned long get_pfnblock_flags_mask(const struct page *page,
372 unsigned long pfn, unsigned long mask)
373 {
374 unsigned long *bitmap;
375 unsigned long bitidx, word_bitidx;
376 unsigned long word;
377
378 bitmap = get_pageblock_bitmap(page, pfn);
379 bitidx = pfn_to_bitidx(page, pfn);
380 word_bitidx = bitidx / BITS_PER_LONG;
381 bitidx &= (BITS_PER_LONG-1);
382 /*
383 * This races, without locks, with set_pfnblock_flags_mask(). Ensure
384 * a consistent read of the memory array, so that results, even though
385 * racy, are not corrupted.
386 */
387 word = READ_ONCE(bitmap[word_bitidx]);
388 return (word >> bitidx) & mask;
389 }
390
get_pfnblock_migratetype(const struct page * page,unsigned long pfn)391 static __always_inline int get_pfnblock_migratetype(const struct page *page,
392 unsigned long pfn)
393 {
394 return get_pfnblock_flags_mask(page, pfn, MIGRATETYPE_MASK);
395 }
396
397 /**
398 * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
399 * @page: The page within the block of interest
400 * @flags: The flags to set
401 * @pfn: The target page frame number
402 * @mask: mask of bits that the caller is interested in
403 */
set_pfnblock_flags_mask(struct page * page,unsigned long flags,unsigned long pfn,unsigned long mask)404 void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
405 unsigned long pfn,
406 unsigned long mask)
407 {
408 unsigned long *bitmap;
409 unsigned long bitidx, word_bitidx;
410 unsigned long word;
411
412 BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
413 BUILD_BUG_ON(MIGRATE_TYPES > (1 << PB_migratetype_bits));
414
415 bitmap = get_pageblock_bitmap(page, pfn);
416 bitidx = pfn_to_bitidx(page, pfn);
417 word_bitidx = bitidx / BITS_PER_LONG;
418 bitidx &= (BITS_PER_LONG-1);
419
420 VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page);
421
422 mask <<= bitidx;
423 flags <<= bitidx;
424
425 word = READ_ONCE(bitmap[word_bitidx]);
426 do {
427 } while (!try_cmpxchg(&bitmap[word_bitidx], &word, (word & ~mask) | flags));
428 }
429
set_pageblock_migratetype(struct page * page,int migratetype)430 void set_pageblock_migratetype(struct page *page, int migratetype)
431 {
432 if (unlikely(page_group_by_mobility_disabled &&
433 migratetype < MIGRATE_PCPTYPES))
434 migratetype = MIGRATE_UNMOVABLE;
435
436 set_pfnblock_flags_mask(page, (unsigned long)migratetype,
437 page_to_pfn(page), MIGRATETYPE_MASK);
438 }
439
440 #ifdef CONFIG_DEBUG_VM
page_outside_zone_boundaries(struct zone * zone,struct page * page)441 static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
442 {
443 int ret;
444 unsigned seq;
445 unsigned long pfn = page_to_pfn(page);
446 unsigned long sp, start_pfn;
447
448 do {
449 seq = zone_span_seqbegin(zone);
450 start_pfn = zone->zone_start_pfn;
451 sp = zone->spanned_pages;
452 ret = !zone_spans_pfn(zone, pfn);
453 } while (zone_span_seqretry(zone, seq));
454
455 if (ret)
456 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n",
457 pfn, zone_to_nid(zone), zone->name,
458 start_pfn, start_pfn + sp);
459
460 return ret;
461 }
462
463 /*
464 * Temporary debugging check for pages not lying within a given zone.
465 */
bad_range(struct zone * zone,struct page * page)466 static int __maybe_unused bad_range(struct zone *zone, struct page *page)
467 {
468 if (page_outside_zone_boundaries(zone, page))
469 return 1;
470 if (zone != page_zone(page))
471 return 1;
472
473 return 0;
474 }
475 #else
bad_range(struct zone * zone,struct page * page)476 static inline int __maybe_unused bad_range(struct zone *zone, struct page *page)
477 {
478 return 0;
479 }
480 #endif
481
bad_page(struct page * page,const char * reason)482 static void bad_page(struct page *page, const char *reason)
483 {
484 static unsigned long resume;
485 static unsigned long nr_shown;
486 static unsigned long nr_unshown;
487
488 /*
489 * Allow a burst of 60 reports, then keep quiet for that minute;
490 * or allow a steady drip of one report per second.
491 */
492 if (nr_shown == 60) {
493 if (time_before(jiffies, resume)) {
494 nr_unshown++;
495 goto out;
496 }
497 if (nr_unshown) {
498 pr_alert(
499 "BUG: Bad page state: %lu messages suppressed\n",
500 nr_unshown);
501 nr_unshown = 0;
502 }
503 nr_shown = 0;
504 }
505 if (nr_shown++ == 0)
506 resume = jiffies + 60 * HZ;
507
508 pr_alert("BUG: Bad page state in process %s pfn:%05lx\n",
509 current->comm, page_to_pfn(page));
510 dump_page(page, reason);
511
512 print_modules();
513 dump_stack();
514 out:
515 /* Leave bad fields for debug, except PageBuddy could make trouble */
516 page_mapcount_reset(page); /* remove PageBuddy */
517 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
518 }
519
order_to_pindex(int migratetype,int order)520 static inline unsigned int order_to_pindex(int migratetype, int order)
521 {
522 bool __maybe_unused movable;
523
524 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
525 if (order > PAGE_ALLOC_COSTLY_ORDER) {
526 VM_BUG_ON(order != pageblock_order);
527
528 movable = migratetype == MIGRATE_MOVABLE;
529
530 return NR_LOWORDER_PCP_LISTS + movable;
531 }
532 #else
533 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER);
534 #endif
535
536 return (MIGRATE_PCPTYPES * order) + migratetype;
537 }
538
pindex_to_order(unsigned int pindex)539 static inline int pindex_to_order(unsigned int pindex)
540 {
541 int order = pindex / MIGRATE_PCPTYPES;
542
543 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
544 if (pindex >= NR_LOWORDER_PCP_LISTS)
545 order = pageblock_order;
546 #else
547 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER);
548 #endif
549
550 return order;
551 }
552
pcp_allowed_order(unsigned int order)553 static inline bool pcp_allowed_order(unsigned int order)
554 {
555 if (order <= PAGE_ALLOC_COSTLY_ORDER)
556 return true;
557 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
558 if (order == pageblock_order)
559 return true;
560 #endif
561 return false;
562 }
563
free_the_page(struct page * page,unsigned int order)564 static inline void free_the_page(struct page *page, unsigned int order)
565 {
566 if (pcp_allowed_order(order)) /* Via pcp? */
567 free_unref_page(page, order);
568 else
569 __free_pages_ok(page, order, FPI_NONE);
570 }
571
572 /*
573 * Higher-order pages are called "compound pages". They are structured thusly:
574 *
575 * The first PAGE_SIZE page is called the "head page" and have PG_head set.
576 *
577 * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded
578 * in bit 0 of page->compound_head. The rest of bits is pointer to head page.
579 *
580 * The first tail page's ->compound_order holds the order of allocation.
581 * This usage means that zero-order pages may not be compound.
582 */
583
prep_compound_page(struct page * page,unsigned int order)584 void prep_compound_page(struct page *page, unsigned int order)
585 {
586 int i;
587 int nr_pages = 1 << order;
588
589 __SetPageHead(page);
590 for (i = 1; i < nr_pages; i++)
591 prep_compound_tail(page, i);
592
593 prep_compound_head(page, order);
594 }
595
destroy_large_folio(struct folio * folio)596 void destroy_large_folio(struct folio *folio)
597 {
598 if (folio_test_hugetlb(folio)) {
599 free_huge_folio(folio);
600 return;
601 }
602
603 if (folio_test_large_rmappable(folio))
604 folio_undo_large_rmappable(folio);
605
606 mem_cgroup_uncharge(folio);
607 free_the_page(&folio->page, folio_order(folio));
608 }
609
set_buddy_order(struct page * page,unsigned int order)610 static inline void set_buddy_order(struct page *page, unsigned int order)
611 {
612 set_page_private(page, order);
613 __SetPageBuddy(page);
614 }
615
616 #ifdef CONFIG_COMPACTION
task_capc(struct zone * zone)617 static inline struct capture_control *task_capc(struct zone *zone)
618 {
619 struct capture_control *capc = current->capture_control;
620
621 return unlikely(capc) &&
622 !(current->flags & PF_KTHREAD) &&
623 !capc->page &&
624 capc->cc->zone == zone ? capc : NULL;
625 }
626
627 static inline bool
compaction_capture(struct capture_control * capc,struct page * page,int order,int migratetype)628 compaction_capture(struct capture_control *capc, struct page *page,
629 int order, int migratetype)
630 {
631 if (!capc || order != capc->cc->order)
632 return false;
633
634 /* Do not accidentally pollute CMA or isolated regions*/
635 if (is_migrate_cma(migratetype) ||
636 is_migrate_isolate(migratetype))
637 return false;
638
639 /*
640 * Do not let lower order allocations pollute a movable pageblock.
641 * This might let an unmovable request use a reclaimable pageblock
642 * and vice-versa but no more than normal fallback logic which can
643 * have trouble finding a high-order free page.
644 */
645 if (order < pageblock_order && migratetype == MIGRATE_MOVABLE)
646 return false;
647
648 capc->page = page;
649 return true;
650 }
651
652 #else
task_capc(struct zone * zone)653 static inline struct capture_control *task_capc(struct zone *zone)
654 {
655 return NULL;
656 }
657
658 static inline bool
compaction_capture(struct capture_control * capc,struct page * page,int order,int migratetype)659 compaction_capture(struct capture_control *capc, struct page *page,
660 int order, int migratetype)
661 {
662 return false;
663 }
664 #endif /* CONFIG_COMPACTION */
665
666 /* Used for pages not on another list */
add_to_free_list(struct page * page,struct zone * zone,unsigned int order,int migratetype)667 static inline void add_to_free_list(struct page *page, struct zone *zone,
668 unsigned int order, int migratetype)
669 {
670 struct free_area *area = &zone->free_area[order];
671
672 list_add(&page->buddy_list, &area->free_list[migratetype]);
673 area->nr_free++;
674 }
675
676 /* Used for pages not on another list */
add_to_free_list_tail(struct page * page,struct zone * zone,unsigned int order,int migratetype)677 static inline void add_to_free_list_tail(struct page *page, struct zone *zone,
678 unsigned int order, int migratetype)
679 {
680 struct free_area *area = &zone->free_area[order];
681
682 list_add_tail(&page->buddy_list, &area->free_list[migratetype]);
683 area->nr_free++;
684 }
685
686 /*
687 * Used for pages which are on another list. Move the pages to the tail
688 * of the list - so the moved pages won't immediately be considered for
689 * allocation again (e.g., optimization for memory onlining).
690 */
move_to_free_list(struct page * page,struct zone * zone,unsigned int order,int migratetype)691 static inline void move_to_free_list(struct page *page, struct zone *zone,
692 unsigned int order, int migratetype)
693 {
694 struct free_area *area = &zone->free_area[order];
695
696 list_move_tail(&page->buddy_list, &area->free_list[migratetype]);
697 }
698
del_page_from_free_list(struct page * page,struct zone * zone,unsigned int order)699 static inline void del_page_from_free_list(struct page *page, struct zone *zone,
700 unsigned int order)
701 {
702 /* clear reported state and update reported page count */
703 if (page_reported(page))
704 __ClearPageReported(page);
705
706 list_del(&page->buddy_list);
707 __ClearPageBuddy(page);
708 set_page_private(page, 0);
709 zone->free_area[order].nr_free--;
710 }
711
get_page_from_free_area(struct free_area * area,int migratetype)712 static inline struct page *get_page_from_free_area(struct free_area *area,
713 int migratetype)
714 {
715 return list_first_entry_or_null(&area->free_list[migratetype],
716 struct page, buddy_list);
717 }
718
719 /*
720 * If this is not the largest possible page, check if the buddy
721 * of the next-highest order is free. If it is, it's possible
722 * that pages are being freed that will coalesce soon. In case,
723 * that is happening, add the free page to the tail of the list
724 * so it's less likely to be used soon and more likely to be merged
725 * as a higher order page
726 */
727 static inline bool
buddy_merge_likely(unsigned long pfn,unsigned long buddy_pfn,struct page * page,unsigned int order)728 buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn,
729 struct page *page, unsigned int order)
730 {
731 unsigned long higher_page_pfn;
732 struct page *higher_page;
733
734 if (order >= MAX_ORDER - 1)
735 return false;
736
737 higher_page_pfn = buddy_pfn & pfn;
738 higher_page = page + (higher_page_pfn - pfn);
739
740 return find_buddy_page_pfn(higher_page, higher_page_pfn, order + 1,
741 NULL) != NULL;
742 }
743
744 /*
745 * Freeing function for a buddy system allocator.
746 *
747 * The concept of a buddy system is to maintain direct-mapped table
748 * (containing bit values) for memory blocks of various "orders".
749 * The bottom level table contains the map for the smallest allocatable
750 * units of memory (here, pages), and each level above it describes
751 * pairs of units from the levels below, hence, "buddies".
752 * At a high level, all that happens here is marking the table entry
753 * at the bottom level available, and propagating the changes upward
754 * as necessary, plus some accounting needed to play nicely with other
755 * parts of the VM system.
756 * At each level, we keep a list of pages, which are heads of continuous
757 * free pages of length of (1 << order) and marked with PageBuddy.
758 * Page's order is recorded in page_private(page) field.
759 * So when we are allocating or freeing one, we can derive the state of the
760 * other. That is, if we allocate a small block, and both were
761 * free, the remainder of the region must be split into blocks.
762 * If a block is freed, and its buddy is also free, then this
763 * triggers coalescing into a block of larger size.
764 *
765 * -- nyc
766 */
767
__free_one_page(struct page * page,unsigned long pfn,struct zone * zone,unsigned int order,int migratetype,fpi_t fpi_flags)768 static inline void __free_one_page(struct page *page,
769 unsigned long pfn,
770 struct zone *zone, unsigned int order,
771 int migratetype, fpi_t fpi_flags)
772 {
773 struct capture_control *capc = task_capc(zone);
774 unsigned long buddy_pfn = 0;
775 unsigned long combined_pfn;
776 struct page *buddy;
777 bool to_tail;
778
779 VM_BUG_ON(!zone_is_initialized(zone));
780 VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
781
782 VM_BUG_ON(migratetype == -1);
783 if (likely(!is_migrate_isolate(migratetype)))
784 __mod_zone_freepage_state(zone, 1 << order, migratetype);
785
786 VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page);
787 VM_BUG_ON_PAGE(bad_range(zone, page), page);
788
789 while (order < MAX_ORDER) {
790 if (compaction_capture(capc, page, order, migratetype)) {
791 __mod_zone_freepage_state(zone, -(1 << order),
792 migratetype);
793 return;
794 }
795
796 buddy = find_buddy_page_pfn(page, pfn, order, &buddy_pfn);
797 if (!buddy)
798 goto done_merging;
799
800 if (unlikely(order >= pageblock_order)) {
801 /*
802 * We want to prevent merge between freepages on pageblock
803 * without fallbacks and normal pageblock. Without this,
804 * pageblock isolation could cause incorrect freepage or CMA
805 * accounting or HIGHATOMIC accounting.
806 */
807 int buddy_mt = get_pfnblock_migratetype(buddy, buddy_pfn);
808
809 if (migratetype != buddy_mt
810 && (!migratetype_is_mergeable(migratetype) ||
811 !migratetype_is_mergeable(buddy_mt)))
812 goto done_merging;
813 }
814
815 /*
816 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
817 * merge with it and move up one order.
818 */
819 if (page_is_guard(buddy))
820 clear_page_guard(zone, buddy, order, migratetype);
821 else
822 del_page_from_free_list(buddy, zone, order);
823 combined_pfn = buddy_pfn & pfn;
824 page = page + (combined_pfn - pfn);
825 pfn = combined_pfn;
826 order++;
827 }
828
829 done_merging:
830 set_buddy_order(page, order);
831
832 if (fpi_flags & FPI_TO_TAIL)
833 to_tail = true;
834 else if (is_shuffle_order(order))
835 to_tail = shuffle_pick_tail();
836 else
837 to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order);
838
839 if (to_tail)
840 add_to_free_list_tail(page, zone, order, migratetype);
841 else
842 add_to_free_list(page, zone, order, migratetype);
843
844 /* Notify page reporting subsystem of freed page */
845 if (!(fpi_flags & FPI_SKIP_REPORT_NOTIFY))
846 page_reporting_notify_free(order);
847 }
848
849 /**
850 * split_free_page() -- split a free page at split_pfn_offset
851 * @free_page: the original free page
852 * @order: the order of the page
853 * @split_pfn_offset: split offset within the page
854 *
855 * Return -ENOENT if the free page is changed, otherwise 0
856 *
857 * It is used when the free page crosses two pageblocks with different migratetypes
858 * at split_pfn_offset within the page. The split free page will be put into
859 * separate migratetype lists afterwards. Otherwise, the function achieves
860 * nothing.
861 */
split_free_page(struct page * free_page,unsigned int order,unsigned long split_pfn_offset)862 int split_free_page(struct page *free_page,
863 unsigned int order, unsigned long split_pfn_offset)
864 {
865 struct zone *zone = page_zone(free_page);
866 unsigned long free_page_pfn = page_to_pfn(free_page);
867 unsigned long pfn;
868 unsigned long flags;
869 int free_page_order;
870 int mt;
871 int ret = 0;
872
873 if (split_pfn_offset == 0)
874 return ret;
875
876 spin_lock_irqsave(&zone->lock, flags);
877
878 if (!PageBuddy(free_page) || buddy_order(free_page) != order) {
879 ret = -ENOENT;
880 goto out;
881 }
882
883 mt = get_pfnblock_migratetype(free_page, free_page_pfn);
884 if (likely(!is_migrate_isolate(mt)))
885 __mod_zone_freepage_state(zone, -(1UL << order), mt);
886
887 del_page_from_free_list(free_page, zone, order);
888 for (pfn = free_page_pfn;
889 pfn < free_page_pfn + (1UL << order);) {
890 int mt = get_pfnblock_migratetype(pfn_to_page(pfn), pfn);
891
892 free_page_order = min_t(unsigned int,
893 pfn ? __ffs(pfn) : order,
894 __fls(split_pfn_offset));
895 __free_one_page(pfn_to_page(pfn), pfn, zone, free_page_order,
896 mt, FPI_NONE);
897 pfn += 1UL << free_page_order;
898 split_pfn_offset -= (1UL << free_page_order);
899 /* we have done the first part, now switch to second part */
900 if (split_pfn_offset == 0)
901 split_pfn_offset = (1UL << order) - (pfn - free_page_pfn);
902 }
903 out:
904 spin_unlock_irqrestore(&zone->lock, flags);
905 return ret;
906 }
907 /*
908 * A bad page could be due to a number of fields. Instead of multiple branches,
909 * try and check multiple fields with one check. The caller must do a detailed
910 * check if necessary.
911 */
page_expected_state(struct page * page,unsigned long check_flags)912 static inline bool page_expected_state(struct page *page,
913 unsigned long check_flags)
914 {
915 if (unlikely(atomic_read(&page->_mapcount) != -1))
916 return false;
917
918 if (unlikely((unsigned long)page->mapping |
919 page_ref_count(page) |
920 #ifdef CONFIG_MEMCG
921 page->memcg_data |
922 #endif
923 (page->flags & check_flags)))
924 return false;
925
926 return true;
927 }
928
page_bad_reason(struct page * page,unsigned long flags)929 static const char *page_bad_reason(struct page *page, unsigned long flags)
930 {
931 const char *bad_reason = NULL;
932
933 if (unlikely(atomic_read(&page->_mapcount) != -1))
934 bad_reason = "nonzero mapcount";
935 if (unlikely(page->mapping != NULL))
936 bad_reason = "non-NULL mapping";
937 if (unlikely(page_ref_count(page) != 0))
938 bad_reason = "nonzero _refcount";
939 if (unlikely(page->flags & flags)) {
940 if (flags == PAGE_FLAGS_CHECK_AT_PREP)
941 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag(s) set";
942 else
943 bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
944 }
945 #ifdef CONFIG_MEMCG
946 if (unlikely(page->memcg_data))
947 bad_reason = "page still charged to cgroup";
948 #endif
949 return bad_reason;
950 }
951
free_page_is_bad_report(struct page * page)952 static void free_page_is_bad_report(struct page *page)
953 {
954 bad_page(page,
955 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_FREE));
956 }
957
free_page_is_bad(struct page * page)958 static inline bool free_page_is_bad(struct page *page)
959 {
960 if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE)))
961 return false;
962
963 /* Something has gone sideways, find it */
964 free_page_is_bad_report(page);
965 return true;
966 }
967
is_check_pages_enabled(void)968 static inline bool is_check_pages_enabled(void)
969 {
970 return static_branch_unlikely(&check_pages_enabled);
971 }
972
free_tail_page_prepare(struct page * head_page,struct page * page)973 static int free_tail_page_prepare(struct page *head_page, struct page *page)
974 {
975 struct folio *folio = (struct folio *)head_page;
976 int ret = 1;
977
978 /*
979 * We rely page->lru.next never has bit 0 set, unless the page
980 * is PageTail(). Let's make sure that's true even for poisoned ->lru.
981 */
982 BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1);
983
984 if (!is_check_pages_enabled()) {
985 ret = 0;
986 goto out;
987 }
988 switch (page - head_page) {
989 case 1:
990 /* the first tail page: these may be in place of ->mapping */
991 if (unlikely(folio_entire_mapcount(folio))) {
992 bad_page(page, "nonzero entire_mapcount");
993 goto out;
994 }
995 if (unlikely(atomic_read(&folio->_nr_pages_mapped))) {
996 bad_page(page, "nonzero nr_pages_mapped");
997 goto out;
998 }
999 if (unlikely(atomic_read(&folio->_pincount))) {
1000 bad_page(page, "nonzero pincount");
1001 goto out;
1002 }
1003 break;
1004 case 2:
1005 /*
1006 * the second tail page: ->mapping is
1007 * deferred_list.next -- ignore value.
1008 */
1009 break;
1010 default:
1011 if (page->mapping != TAIL_MAPPING) {
1012 bad_page(page, "corrupted mapping in tail page");
1013 goto out;
1014 }
1015 break;
1016 }
1017 if (unlikely(!PageTail(page))) {
1018 bad_page(page, "PageTail not set");
1019 goto out;
1020 }
1021 if (unlikely(compound_head(page) != head_page)) {
1022 bad_page(page, "compound_head not consistent");
1023 goto out;
1024 }
1025 ret = 0;
1026 out:
1027 page->mapping = NULL;
1028 clear_compound_head(page);
1029 return ret;
1030 }
1031
1032 /*
1033 * Skip KASAN memory poisoning when either:
1034 *
1035 * 1. For generic KASAN: deferred memory initialization has not yet completed.
1036 * Tag-based KASAN modes skip pages freed via deferred memory initialization
1037 * using page tags instead (see below).
1038 * 2. For tag-based KASAN modes: the page has a match-all KASAN tag, indicating
1039 * that error detection is disabled for accesses via the page address.
1040 *
1041 * Pages will have match-all tags in the following circumstances:
1042 *
1043 * 1. Pages are being initialized for the first time, including during deferred
1044 * memory init; see the call to page_kasan_tag_reset in __init_single_page.
1045 * 2. The allocation was not unpoisoned due to __GFP_SKIP_KASAN, with the
1046 * exception of pages unpoisoned by kasan_unpoison_vmalloc.
1047 * 3. The allocation was excluded from being checked due to sampling,
1048 * see the call to kasan_unpoison_pages.
1049 *
1050 * Poisoning pages during deferred memory init will greatly lengthen the
1051 * process and cause problem in large memory systems as the deferred pages
1052 * initialization is done with interrupt disabled.
1053 *
1054 * Assuming that there will be no reference to those newly initialized
1055 * pages before they are ever allocated, this should have no effect on
1056 * KASAN memory tracking as the poison will be properly inserted at page
1057 * allocation time. The only corner case is when pages are allocated by
1058 * on-demand allocation and then freed again before the deferred pages
1059 * initialization is done, but this is not likely to happen.
1060 */
should_skip_kasan_poison(struct page * page,fpi_t fpi_flags)1061 static inline bool should_skip_kasan_poison(struct page *page, fpi_t fpi_flags)
1062 {
1063 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
1064 return deferred_pages_enabled();
1065
1066 return page_kasan_tag(page) == 0xff;
1067 }
1068
kernel_init_pages(struct page * page,int numpages)1069 static void kernel_init_pages(struct page *page, int numpages)
1070 {
1071 int i;
1072
1073 /* s390's use of memset() could override KASAN redzones. */
1074 kasan_disable_current();
1075 for (i = 0; i < numpages; i++)
1076 clear_highpage_kasan_tagged(page + i);
1077 kasan_enable_current();
1078 }
1079
free_pages_prepare(struct page * page,unsigned int order,fpi_t fpi_flags)1080 static __always_inline bool free_pages_prepare(struct page *page,
1081 unsigned int order, fpi_t fpi_flags)
1082 {
1083 int bad = 0;
1084 bool skip_kasan_poison = should_skip_kasan_poison(page, fpi_flags);
1085 bool init = want_init_on_free();
1086
1087 VM_BUG_ON_PAGE(PageTail(page), page);
1088
1089 trace_mm_page_free(page, order);
1090 kmsan_free_page(page, order);
1091
1092 if (unlikely(PageHWPoison(page)) && !order) {
1093 /*
1094 * Do not let hwpoison pages hit pcplists/buddy
1095 * Untie memcg state and reset page's owner
1096 */
1097 if (memcg_kmem_online() && PageMemcgKmem(page))
1098 __memcg_kmem_uncharge_page(page, order);
1099 reset_page_owner(page, order);
1100 page_table_check_free(page, order);
1101 return false;
1102 }
1103
1104 /*
1105 * Check tail pages before head page information is cleared to
1106 * avoid checking PageCompound for order-0 pages.
1107 */
1108 if (unlikely(order)) {
1109 bool compound = PageCompound(page);
1110 int i;
1111
1112 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
1113
1114 if (compound)
1115 page[1].flags &= ~PAGE_FLAGS_SECOND;
1116 for (i = 1; i < (1 << order); i++) {
1117 if (compound)
1118 bad += free_tail_page_prepare(page, page + i);
1119 if (is_check_pages_enabled()) {
1120 if (free_page_is_bad(page + i)) {
1121 bad++;
1122 continue;
1123 }
1124 }
1125 (page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1126 }
1127 }
1128 if (PageMappingFlags(page))
1129 page->mapping = NULL;
1130 if (memcg_kmem_online() && PageMemcgKmem(page))
1131 __memcg_kmem_uncharge_page(page, order);
1132 if (is_check_pages_enabled()) {
1133 if (free_page_is_bad(page))
1134 bad++;
1135 if (bad)
1136 return false;
1137 }
1138
1139 page_cpupid_reset_last(page);
1140 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1141 reset_page_owner(page, order);
1142 page_table_check_free(page, order);
1143
1144 if (!PageHighMem(page)) {
1145 debug_check_no_locks_freed(page_address(page),
1146 PAGE_SIZE << order);
1147 debug_check_no_obj_freed(page_address(page),
1148 PAGE_SIZE << order);
1149 }
1150
1151 kernel_poison_pages(page, 1 << order);
1152
1153 /*
1154 * As memory initialization might be integrated into KASAN,
1155 * KASAN poisoning and memory initialization code must be
1156 * kept together to avoid discrepancies in behavior.
1157 *
1158 * With hardware tag-based KASAN, memory tags must be set before the
1159 * page becomes unavailable via debug_pagealloc or arch_free_page.
1160 */
1161 if (!skip_kasan_poison) {
1162 kasan_poison_pages(page, order, init);
1163
1164 /* Memory is already initialized if KASAN did it internally. */
1165 if (kasan_has_integrated_init())
1166 init = false;
1167 }
1168 if (init)
1169 kernel_init_pages(page, 1 << order);
1170
1171 /*
1172 * arch_free_page() can make the page's contents inaccessible. s390
1173 * does this. So nothing which can access the page's contents should
1174 * happen after this.
1175 */
1176 arch_free_page(page, order);
1177
1178 debug_pagealloc_unmap_pages(page, 1 << order);
1179
1180 return true;
1181 }
1182
1183 /*
1184 * Frees a number of pages from the PCP lists
1185 * Assumes all pages on list are in same zone.
1186 * count is the number of pages to free.
1187 */
free_pcppages_bulk(struct zone * zone,int count,struct per_cpu_pages * pcp,int pindex)1188 static void free_pcppages_bulk(struct zone *zone, int count,
1189 struct per_cpu_pages *pcp,
1190 int pindex)
1191 {
1192 unsigned long flags;
1193 unsigned int order;
1194 bool isolated_pageblocks;
1195 struct page *page;
1196
1197 /*
1198 * Ensure proper count is passed which otherwise would stuck in the
1199 * below while (list_empty(list)) loop.
1200 */
1201 count = min(pcp->count, count);
1202
1203 /* Ensure requested pindex is drained first. */
1204 pindex = pindex - 1;
1205
1206 spin_lock_irqsave(&zone->lock, flags);
1207 isolated_pageblocks = has_isolate_pageblock(zone);
1208
1209 while (count > 0) {
1210 struct list_head *list;
1211 int nr_pages;
1212
1213 /* Remove pages from lists in a round-robin fashion. */
1214 do {
1215 if (++pindex > NR_PCP_LISTS - 1)
1216 pindex = 0;
1217 list = &pcp->lists[pindex];
1218 } while (list_empty(list));
1219
1220 order = pindex_to_order(pindex);
1221 nr_pages = 1 << order;
1222 do {
1223 int mt;
1224
1225 page = list_last_entry(list, struct page, pcp_list);
1226 mt = get_pcppage_migratetype(page);
1227
1228 /* must delete to avoid corrupting pcp list */
1229 list_del(&page->pcp_list);
1230 count -= nr_pages;
1231 pcp->count -= nr_pages;
1232
1233 /* MIGRATE_ISOLATE page should not go to pcplists */
1234 VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
1235 /* Pageblock could have been isolated meanwhile */
1236 if (unlikely(isolated_pageblocks))
1237 mt = get_pageblock_migratetype(page);
1238
1239 __free_one_page(page, page_to_pfn(page), zone, order, mt, FPI_NONE);
1240 trace_mm_page_pcpu_drain(page, order, mt);
1241 } while (count > 0 && !list_empty(list));
1242 }
1243
1244 spin_unlock_irqrestore(&zone->lock, flags);
1245 }
1246
free_one_page(struct zone * zone,struct page * page,unsigned long pfn,unsigned int order,int migratetype,fpi_t fpi_flags)1247 static void free_one_page(struct zone *zone,
1248 struct page *page, unsigned long pfn,
1249 unsigned int order,
1250 int migratetype, fpi_t fpi_flags)
1251 {
1252 unsigned long flags;
1253
1254 spin_lock_irqsave(&zone->lock, flags);
1255 if (unlikely(has_isolate_pageblock(zone) ||
1256 is_migrate_isolate(migratetype))) {
1257 migratetype = get_pfnblock_migratetype(page, pfn);
1258 }
1259 __free_one_page(page, pfn, zone, order, migratetype, fpi_flags);
1260 spin_unlock_irqrestore(&zone->lock, flags);
1261 }
1262
__free_pages_ok(struct page * page,unsigned int order,fpi_t fpi_flags)1263 static void __free_pages_ok(struct page *page, unsigned int order,
1264 fpi_t fpi_flags)
1265 {
1266 unsigned long flags;
1267 int migratetype;
1268 unsigned long pfn = page_to_pfn(page);
1269 struct zone *zone = page_zone(page);
1270
1271 if (!free_pages_prepare(page, order, fpi_flags))
1272 return;
1273
1274 /*
1275 * Calling get_pfnblock_migratetype() without spin_lock_irqsave() here
1276 * is used to avoid calling get_pfnblock_migratetype() under the lock.
1277 * This will reduce the lock holding time.
1278 */
1279 migratetype = get_pfnblock_migratetype(page, pfn);
1280
1281 spin_lock_irqsave(&zone->lock, flags);
1282 if (unlikely(has_isolate_pageblock(zone) ||
1283 is_migrate_isolate(migratetype))) {
1284 migratetype = get_pfnblock_migratetype(page, pfn);
1285 }
1286 __free_one_page(page, pfn, zone, order, migratetype, fpi_flags);
1287 spin_unlock_irqrestore(&zone->lock, flags);
1288
1289 __count_vm_events(PGFREE, 1 << order);
1290 }
1291
__free_pages_core(struct page * page,unsigned int order)1292 void __free_pages_core(struct page *page, unsigned int order)
1293 {
1294 unsigned int nr_pages = 1 << order;
1295 struct page *p = page;
1296 unsigned int loop;
1297
1298 /*
1299 * When initializing the memmap, __init_single_page() sets the refcount
1300 * of all pages to 1 ("allocated"/"not free"). We have to set the
1301 * refcount of all involved pages to 0.
1302 */
1303 prefetchw(p);
1304 for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
1305 prefetchw(p + 1);
1306 __ClearPageReserved(p);
1307 set_page_count(p, 0);
1308 }
1309 __ClearPageReserved(p);
1310 set_page_count(p, 0);
1311
1312 atomic_long_add(nr_pages, &page_zone(page)->managed_pages);
1313
1314 if (page_contains_unaccepted(page, order)) {
1315 if (order == MAX_ORDER && __free_unaccepted(page))
1316 return;
1317
1318 accept_page(page, order);
1319 }
1320
1321 /*
1322 * Bypass PCP and place fresh pages right to the tail, primarily
1323 * relevant for memory onlining.
1324 */
1325 __free_pages_ok(page, order, FPI_TO_TAIL);
1326 }
1327
1328 /*
1329 * Check that the whole (or subset of) a pageblock given by the interval of
1330 * [start_pfn, end_pfn) is valid and within the same zone, before scanning it
1331 * with the migration of free compaction scanner.
1332 *
1333 * Return struct page pointer of start_pfn, or NULL if checks were not passed.
1334 *
1335 * It's possible on some configurations to have a setup like node0 node1 node0
1336 * i.e. it's possible that all pages within a zones range of pages do not
1337 * belong to a single zone. We assume that a border between node0 and node1
1338 * can occur within a single pageblock, but not a node0 node1 node0
1339 * interleaving within a single pageblock. It is therefore sufficient to check
1340 * the first and last page of a pageblock and avoid checking each individual
1341 * page in a pageblock.
1342 *
1343 * Note: the function may return non-NULL struct page even for a page block
1344 * which contains a memory hole (i.e. there is no physical memory for a subset
1345 * of the pfn range). For example, if the pageblock order is MAX_ORDER, which
1346 * will fall into 2 sub-sections, and the end pfn of the pageblock may be hole
1347 * even though the start pfn is online and valid. This should be safe most of
1348 * the time because struct pages are still initialized via init_unavailable_range()
1349 * and pfn walkers shouldn't touch any physical memory range for which they do
1350 * not recognize any specific metadata in struct pages.
1351 */
__pageblock_pfn_to_page(unsigned long start_pfn,unsigned long end_pfn,struct zone * zone)1352 struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
1353 unsigned long end_pfn, struct zone *zone)
1354 {
1355 struct page *start_page;
1356 struct page *end_page;
1357
1358 /* end_pfn is one past the range we are checking */
1359 end_pfn--;
1360
1361 if (!pfn_valid(end_pfn))
1362 return NULL;
1363
1364 start_page = pfn_to_online_page(start_pfn);
1365 if (!start_page)
1366 return NULL;
1367
1368 if (page_zone(start_page) != zone)
1369 return NULL;
1370
1371 end_page = pfn_to_page(end_pfn);
1372
1373 /* This gives a shorter code than deriving page_zone(end_page) */
1374 if (page_zone_id(start_page) != page_zone_id(end_page))
1375 return NULL;
1376
1377 return start_page;
1378 }
1379
1380 /*
1381 * The order of subdivision here is critical for the IO subsystem.
1382 * Please do not alter this order without good reasons and regression
1383 * testing. Specifically, as large blocks of memory are subdivided,
1384 * the order in which smaller blocks are delivered depends on the order
1385 * they're subdivided in this function. This is the primary factor
1386 * influencing the order in which pages are delivered to the IO
1387 * subsystem according to empirical testing, and this is also justified
1388 * by considering the behavior of a buddy system containing a single
1389 * large block of memory acted on by a series of small allocations.
1390 * This behavior is a critical factor in sglist merging's success.
1391 *
1392 * -- nyc
1393 */
expand(struct zone * zone,struct page * page,int low,int high,int migratetype)1394 static inline void expand(struct zone *zone, struct page *page,
1395 int low, int high, int migratetype)
1396 {
1397 unsigned long size = 1 << high;
1398
1399 while (high > low) {
1400 high--;
1401 size >>= 1;
1402 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
1403
1404 /*
1405 * Mark as guard pages (or page), that will allow to
1406 * merge back to allocator when buddy will be freed.
1407 * Corresponding page table entries will not be touched,
1408 * pages will stay not present in virtual address space
1409 */
1410 if (set_page_guard(zone, &page[size], high, migratetype))
1411 continue;
1412
1413 add_to_free_list(&page[size], zone, high, migratetype);
1414 set_buddy_order(&page[size], high);
1415 }
1416 }
1417
check_new_page_bad(struct page * page)1418 static void check_new_page_bad(struct page *page)
1419 {
1420 if (unlikely(page->flags & __PG_HWPOISON)) {
1421 /* Don't complain about hwpoisoned pages */
1422 page_mapcount_reset(page); /* remove PageBuddy */
1423 return;
1424 }
1425
1426 bad_page(page,
1427 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_PREP));
1428 }
1429
1430 /*
1431 * This page is about to be returned from the page allocator
1432 */
check_new_page(struct page * page)1433 static int check_new_page(struct page *page)
1434 {
1435 if (likely(page_expected_state(page,
1436 PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON)))
1437 return 0;
1438
1439 check_new_page_bad(page);
1440 return 1;
1441 }
1442
check_new_pages(struct page * page,unsigned int order)1443 static inline bool check_new_pages(struct page *page, unsigned int order)
1444 {
1445 if (is_check_pages_enabled()) {
1446 for (int i = 0; i < (1 << order); i++) {
1447 struct page *p = page + i;
1448
1449 if (check_new_page(p))
1450 return true;
1451 }
1452 }
1453
1454 return false;
1455 }
1456
should_skip_kasan_unpoison(gfp_t flags)1457 static inline bool should_skip_kasan_unpoison(gfp_t flags)
1458 {
1459 /* Don't skip if a software KASAN mode is enabled. */
1460 if (IS_ENABLED(CONFIG_KASAN_GENERIC) ||
1461 IS_ENABLED(CONFIG_KASAN_SW_TAGS))
1462 return false;
1463
1464 /* Skip, if hardware tag-based KASAN is not enabled. */
1465 if (!kasan_hw_tags_enabled())
1466 return true;
1467
1468 /*
1469 * With hardware tag-based KASAN enabled, skip if this has been
1470 * requested via __GFP_SKIP_KASAN.
1471 */
1472 return flags & __GFP_SKIP_KASAN;
1473 }
1474
should_skip_init(gfp_t flags)1475 static inline bool should_skip_init(gfp_t flags)
1476 {
1477 /* Don't skip, if hardware tag-based KASAN is not enabled. */
1478 if (!kasan_hw_tags_enabled())
1479 return false;
1480
1481 /* For hardware tag-based KASAN, skip if requested. */
1482 return (flags & __GFP_SKIP_ZERO);
1483 }
1484
post_alloc_hook(struct page * page,unsigned int order,gfp_t gfp_flags)1485 inline void post_alloc_hook(struct page *page, unsigned int order,
1486 gfp_t gfp_flags)
1487 {
1488 bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags) &&
1489 !should_skip_init(gfp_flags);
1490 bool zero_tags = init && (gfp_flags & __GFP_ZEROTAGS);
1491 int i;
1492
1493 set_page_private(page, 0);
1494 set_page_refcounted(page);
1495
1496 arch_alloc_page(page, order);
1497 debug_pagealloc_map_pages(page, 1 << order);
1498
1499 /*
1500 * Page unpoisoning must happen before memory initialization.
1501 * Otherwise, the poison pattern will be overwritten for __GFP_ZERO
1502 * allocations and the page unpoisoning code will complain.
1503 */
1504 kernel_unpoison_pages(page, 1 << order);
1505
1506 /*
1507 * As memory initialization might be integrated into KASAN,
1508 * KASAN unpoisoning and memory initializion code must be
1509 * kept together to avoid discrepancies in behavior.
1510 */
1511
1512 /*
1513 * If memory tags should be zeroed
1514 * (which happens only when memory should be initialized as well).
1515 */
1516 if (zero_tags) {
1517 /* Initialize both memory and memory tags. */
1518 for (i = 0; i != 1 << order; ++i)
1519 tag_clear_highpage(page + i);
1520
1521 /* Take note that memory was initialized by the loop above. */
1522 init = false;
1523 }
1524 if (!should_skip_kasan_unpoison(gfp_flags) &&
1525 kasan_unpoison_pages(page, order, init)) {
1526 /* Take note that memory was initialized by KASAN. */
1527 if (kasan_has_integrated_init())
1528 init = false;
1529 } else {
1530 /*
1531 * If memory tags have not been set by KASAN, reset the page
1532 * tags to ensure page_address() dereferencing does not fault.
1533 */
1534 for (i = 0; i != 1 << order; ++i)
1535 page_kasan_tag_reset(page + i);
1536 }
1537 /* If memory is still not initialized, initialize it now. */
1538 if (init)
1539 kernel_init_pages(page, 1 << order);
1540
1541 set_page_owner(page, order, gfp_flags);
1542 page_table_check_alloc(page, order);
1543 }
1544
prep_new_page(struct page * page,unsigned int order,gfp_t gfp_flags,unsigned int alloc_flags)1545 static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
1546 unsigned int alloc_flags)
1547 {
1548 post_alloc_hook(page, order, gfp_flags);
1549
1550 if (order && (gfp_flags & __GFP_COMP))
1551 prep_compound_page(page, order);
1552
1553 /*
1554 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
1555 * allocate the page. The expectation is that the caller is taking
1556 * steps that will free more memory. The caller should avoid the page
1557 * being used for !PFMEMALLOC purposes.
1558 */
1559 if (alloc_flags & ALLOC_NO_WATERMARKS)
1560 set_page_pfmemalloc(page);
1561 else
1562 clear_page_pfmemalloc(page);
1563 }
1564
1565 /*
1566 * Go through the free lists for the given migratetype and remove
1567 * the smallest available page from the freelists
1568 */
1569 static __always_inline
__rmqueue_smallest(struct zone * zone,unsigned int order,int migratetype)1570 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
1571 int migratetype)
1572 {
1573 unsigned int current_order;
1574 struct free_area *area;
1575 struct page *page;
1576
1577 /* Find a page of the appropriate size in the preferred list */
1578 for (current_order = order; current_order < NR_PAGE_ORDERS; ++current_order) {
1579 area = &(zone->free_area[current_order]);
1580 page = get_page_from_free_area(area, migratetype);
1581 if (!page)
1582 continue;
1583 del_page_from_free_list(page, zone, current_order);
1584 expand(zone, page, order, current_order, migratetype);
1585 set_pcppage_migratetype(page, migratetype);
1586 trace_mm_page_alloc_zone_locked(page, order, migratetype,
1587 pcp_allowed_order(order) &&
1588 migratetype < MIGRATE_PCPTYPES);
1589 return page;
1590 }
1591
1592 return NULL;
1593 }
1594
1595
1596 /*
1597 * This array describes the order lists are fallen back to when
1598 * the free lists for the desirable migrate type are depleted
1599 *
1600 * The other migratetypes do not have fallbacks.
1601 */
1602 static int fallbacks[MIGRATE_TYPES][MIGRATE_PCPTYPES - 1] = {
1603 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE },
1604 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE },
1605 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE },
1606 };
1607
1608 #ifdef CONFIG_CMA
__rmqueue_cma_fallback(struct zone * zone,unsigned int order)1609 static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone,
1610 unsigned int order)
1611 {
1612 return __rmqueue_smallest(zone, order, MIGRATE_CMA);
1613 }
1614 #else
__rmqueue_cma_fallback(struct zone * zone,unsigned int order)1615 static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
1616 unsigned int order) { return NULL; }
1617 #endif
1618
1619 /*
1620 * Move the free pages in a range to the freelist tail of the requested type.
1621 * Note that start_page and end_pages are not aligned on a pageblock
1622 * boundary. If alignment is required, use move_freepages_block()
1623 */
move_freepages(struct zone * zone,unsigned long start_pfn,unsigned long end_pfn,int migratetype,int * num_movable)1624 static int move_freepages(struct zone *zone,
1625 unsigned long start_pfn, unsigned long end_pfn,
1626 int migratetype, int *num_movable)
1627 {
1628 struct page *page;
1629 unsigned long pfn;
1630 unsigned int order;
1631 int pages_moved = 0;
1632
1633 for (pfn = start_pfn; pfn <= end_pfn;) {
1634 page = pfn_to_page(pfn);
1635 if (!PageBuddy(page)) {
1636 /*
1637 * We assume that pages that could be isolated for
1638 * migration are movable. But we don't actually try
1639 * isolating, as that would be expensive.
1640 */
1641 if (num_movable &&
1642 (PageLRU(page) || __PageMovable(page)))
1643 (*num_movable)++;
1644 pfn++;
1645 continue;
1646 }
1647
1648 /* Make sure we are not inadvertently changing nodes */
1649 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
1650 VM_BUG_ON_PAGE(page_zone(page) != zone, page);
1651
1652 order = buddy_order(page);
1653 move_to_free_list(page, zone, order, migratetype);
1654 pfn += 1 << order;
1655 pages_moved += 1 << order;
1656 }
1657
1658 return pages_moved;
1659 }
1660
move_freepages_block(struct zone * zone,struct page * page,int migratetype,int * num_movable)1661 int move_freepages_block(struct zone *zone, struct page *page,
1662 int migratetype, int *num_movable)
1663 {
1664 unsigned long start_pfn, end_pfn, pfn;
1665
1666 if (num_movable)
1667 *num_movable = 0;
1668
1669 pfn = page_to_pfn(page);
1670 start_pfn = pageblock_start_pfn(pfn);
1671 end_pfn = pageblock_end_pfn(pfn) - 1;
1672
1673 /* Do not cross zone boundaries */
1674 if (!zone_spans_pfn(zone, start_pfn))
1675 start_pfn = pfn;
1676 if (!zone_spans_pfn(zone, end_pfn))
1677 return 0;
1678
1679 return move_freepages(zone, start_pfn, end_pfn, migratetype,
1680 num_movable);
1681 }
1682
change_pageblock_range(struct page * pageblock_page,int start_order,int migratetype)1683 static void change_pageblock_range(struct page *pageblock_page,
1684 int start_order, int migratetype)
1685 {
1686 int nr_pageblocks = 1 << (start_order - pageblock_order);
1687
1688 while (nr_pageblocks--) {
1689 set_pageblock_migratetype(pageblock_page, migratetype);
1690 pageblock_page += pageblock_nr_pages;
1691 }
1692 }
1693
1694 /*
1695 * When we are falling back to another migratetype during allocation, try to
1696 * steal extra free pages from the same pageblocks to satisfy further
1697 * allocations, instead of polluting multiple pageblocks.
1698 *
1699 * If we are stealing a relatively large buddy page, it is likely there will
1700 * be more free pages in the pageblock, so try to steal them all. For
1701 * reclaimable and unmovable allocations, we steal regardless of page size,
1702 * as fragmentation caused by those allocations polluting movable pageblocks
1703 * is worse than movable allocations stealing from unmovable and reclaimable
1704 * pageblocks.
1705 */
can_steal_fallback(unsigned int order,int start_mt)1706 static bool can_steal_fallback(unsigned int order, int start_mt)
1707 {
1708 /*
1709 * Leaving this order check is intended, although there is
1710 * relaxed order check in next check. The reason is that
1711 * we can actually steal whole pageblock if this condition met,
1712 * but, below check doesn't guarantee it and that is just heuristic
1713 * so could be changed anytime.
1714 */
1715 if (order >= pageblock_order)
1716 return true;
1717
1718 if (order >= pageblock_order / 2 ||
1719 start_mt == MIGRATE_RECLAIMABLE ||
1720 start_mt == MIGRATE_UNMOVABLE ||
1721 page_group_by_mobility_disabled)
1722 return true;
1723
1724 return false;
1725 }
1726
boost_watermark(struct zone * zone)1727 static inline bool boost_watermark(struct zone *zone)
1728 {
1729 unsigned long max_boost;
1730
1731 if (!watermark_boost_factor)
1732 return false;
1733 /*
1734 * Don't bother in zones that are unlikely to produce results.
1735 * On small machines, including kdump capture kernels running
1736 * in a small area, boosting the watermark can cause an out of
1737 * memory situation immediately.
1738 */
1739 if ((pageblock_nr_pages * 4) > zone_managed_pages(zone))
1740 return false;
1741
1742 max_boost = mult_frac(zone->_watermark[WMARK_HIGH],
1743 watermark_boost_factor, 10000);
1744
1745 /*
1746 * high watermark may be uninitialised if fragmentation occurs
1747 * very early in boot so do not boost. We do not fall
1748 * through and boost by pageblock_nr_pages as failing
1749 * allocations that early means that reclaim is not going
1750 * to help and it may even be impossible to reclaim the
1751 * boosted watermark resulting in a hang.
1752 */
1753 if (!max_boost)
1754 return false;
1755
1756 max_boost = max(pageblock_nr_pages, max_boost);
1757
1758 zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages,
1759 max_boost);
1760
1761 return true;
1762 }
1763
1764 /*
1765 * This function implements actual steal behaviour. If order is large enough,
1766 * we can steal whole pageblock. If not, we first move freepages in this
1767 * pageblock to our migratetype and determine how many already-allocated pages
1768 * are there in the pageblock with a compatible migratetype. If at least half
1769 * of pages are free or compatible, we can change migratetype of the pageblock
1770 * itself, so pages freed in the future will be put on the correct free list.
1771 */
steal_suitable_fallback(struct zone * zone,struct page * page,unsigned int alloc_flags,int start_type,bool whole_block)1772 static void steal_suitable_fallback(struct zone *zone, struct page *page,
1773 unsigned int alloc_flags, int start_type, bool whole_block)
1774 {
1775 unsigned int current_order = buddy_order(page);
1776 int free_pages, movable_pages, alike_pages;
1777 int old_block_type;
1778
1779 old_block_type = get_pageblock_migratetype(page);
1780
1781 /*
1782 * This can happen due to races and we want to prevent broken
1783 * highatomic accounting.
1784 */
1785 if (is_migrate_highatomic(old_block_type))
1786 goto single_page;
1787
1788 /* Take ownership for orders >= pageblock_order */
1789 if (current_order >= pageblock_order) {
1790 change_pageblock_range(page, current_order, start_type);
1791 goto single_page;
1792 }
1793
1794 /*
1795 * Boost watermarks to increase reclaim pressure to reduce the
1796 * likelihood of future fallbacks. Wake kswapd now as the node
1797 * may be balanced overall and kswapd will not wake naturally.
1798 */
1799 if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD))
1800 set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
1801
1802 /* We are not allowed to try stealing from the whole block */
1803 if (!whole_block)
1804 goto single_page;
1805
1806 free_pages = move_freepages_block(zone, page, start_type,
1807 &movable_pages);
1808 /* moving whole block can fail due to zone boundary conditions */
1809 if (!free_pages)
1810 goto single_page;
1811
1812 /*
1813 * Determine how many pages are compatible with our allocation.
1814 * For movable allocation, it's the number of movable pages which
1815 * we just obtained. For other types it's a bit more tricky.
1816 */
1817 if (start_type == MIGRATE_MOVABLE) {
1818 alike_pages = movable_pages;
1819 } else {
1820 /*
1821 * If we are falling back a RECLAIMABLE or UNMOVABLE allocation
1822 * to MOVABLE pageblock, consider all non-movable pages as
1823 * compatible. If it's UNMOVABLE falling back to RECLAIMABLE or
1824 * vice versa, be conservative since we can't distinguish the
1825 * exact migratetype of non-movable pages.
1826 */
1827 if (old_block_type == MIGRATE_MOVABLE)
1828 alike_pages = pageblock_nr_pages
1829 - (free_pages + movable_pages);
1830 else
1831 alike_pages = 0;
1832 }
1833 /*
1834 * If a sufficient number of pages in the block are either free or of
1835 * compatible migratability as our allocation, claim the whole block.
1836 */
1837 if (free_pages + alike_pages >= (1 << (pageblock_order-1)) ||
1838 page_group_by_mobility_disabled)
1839 set_pageblock_migratetype(page, start_type);
1840
1841 return;
1842
1843 single_page:
1844 move_to_free_list(page, zone, current_order, start_type);
1845 }
1846
1847 /*
1848 * Check whether there is a suitable fallback freepage with requested order.
1849 * If only_stealable is true, this function returns fallback_mt only if
1850 * we can steal other freepages all together. This would help to reduce
1851 * fragmentation due to mixed migratetype pages in one pageblock.
1852 */
find_suitable_fallback(struct free_area * area,unsigned int order,int migratetype,bool only_stealable,bool * can_steal)1853 int find_suitable_fallback(struct free_area *area, unsigned int order,
1854 int migratetype, bool only_stealable, bool *can_steal)
1855 {
1856 int i;
1857 int fallback_mt;
1858
1859 if (area->nr_free == 0)
1860 return -1;
1861
1862 *can_steal = false;
1863 for (i = 0; i < MIGRATE_PCPTYPES - 1 ; i++) {
1864 fallback_mt = fallbacks[migratetype][i];
1865 if (free_area_empty(area, fallback_mt))
1866 continue;
1867
1868 if (can_steal_fallback(order, migratetype))
1869 *can_steal = true;
1870
1871 if (!only_stealable)
1872 return fallback_mt;
1873
1874 if (*can_steal)
1875 return fallback_mt;
1876 }
1877
1878 return -1;
1879 }
1880
1881 /*
1882 * Reserve a pageblock for exclusive use of high-order atomic allocations if
1883 * there are no empty page blocks that contain a page with a suitable order
1884 */
reserve_highatomic_pageblock(struct page * page,struct zone * zone)1885 static void reserve_highatomic_pageblock(struct page *page, struct zone *zone)
1886 {
1887 int mt;
1888 unsigned long max_managed, flags;
1889
1890 /*
1891 * Limit the number reserved to 1 pageblock or roughly 1% of a zone.
1892 * Check is race-prone but harmless.
1893 */
1894 max_managed = (zone_managed_pages(zone) / 100) + pageblock_nr_pages;
1895 if (zone->nr_reserved_highatomic >= max_managed)
1896 return;
1897
1898 spin_lock_irqsave(&zone->lock, flags);
1899
1900 /* Recheck the nr_reserved_highatomic limit under the lock */
1901 if (zone->nr_reserved_highatomic >= max_managed)
1902 goto out_unlock;
1903
1904 /* Yoink! */
1905 mt = get_pageblock_migratetype(page);
1906 /* Only reserve normal pageblocks (i.e., they can merge with others) */
1907 if (migratetype_is_mergeable(mt)) {
1908 zone->nr_reserved_highatomic += pageblock_nr_pages;
1909 set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC);
1910 move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL);
1911 }
1912
1913 out_unlock:
1914 spin_unlock_irqrestore(&zone->lock, flags);
1915 }
1916
1917 /*
1918 * Used when an allocation is about to fail under memory pressure. This
1919 * potentially hurts the reliability of high-order allocations when under
1920 * intense memory pressure but failed atomic allocations should be easier
1921 * to recover from than an OOM.
1922 *
1923 * If @force is true, try to unreserve a pageblock even though highatomic
1924 * pageblock is exhausted.
1925 */
unreserve_highatomic_pageblock(const struct alloc_context * ac,bool force)1926 static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
1927 bool force)
1928 {
1929 struct zonelist *zonelist = ac->zonelist;
1930 unsigned long flags;
1931 struct zoneref *z;
1932 struct zone *zone;
1933 struct page *page;
1934 int order;
1935 bool ret;
1936
1937 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx,
1938 ac->nodemask) {
1939 /*
1940 * Preserve at least one pageblock unless memory pressure
1941 * is really high.
1942 */
1943 if (!force && zone->nr_reserved_highatomic <=
1944 pageblock_nr_pages)
1945 continue;
1946
1947 spin_lock_irqsave(&zone->lock, flags);
1948 for (order = 0; order < NR_PAGE_ORDERS; order++) {
1949 struct free_area *area = &(zone->free_area[order]);
1950
1951 page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC);
1952 if (!page)
1953 continue;
1954
1955 /*
1956 * In page freeing path, migratetype change is racy so
1957 * we can counter several free pages in a pageblock
1958 * in this loop although we changed the pageblock type
1959 * from highatomic to ac->migratetype. So we should
1960 * adjust the count once.
1961 */
1962 if (is_migrate_highatomic_page(page)) {
1963 /*
1964 * It should never happen but changes to
1965 * locking could inadvertently allow a per-cpu
1966 * drain to add pages to MIGRATE_HIGHATOMIC
1967 * while unreserving so be safe and watch for
1968 * underflows.
1969 */
1970 zone->nr_reserved_highatomic -= min(
1971 pageblock_nr_pages,
1972 zone->nr_reserved_highatomic);
1973 }
1974
1975 /*
1976 * Convert to ac->migratetype and avoid the normal
1977 * pageblock stealing heuristics. Minimally, the caller
1978 * is doing the work and needs the pages. More
1979 * importantly, if the block was always converted to
1980 * MIGRATE_UNMOVABLE or another type then the number
1981 * of pageblocks that cannot be completely freed
1982 * may increase.
1983 */
1984 set_pageblock_migratetype(page, ac->migratetype);
1985 ret = move_freepages_block(zone, page, ac->migratetype,
1986 NULL);
1987 if (ret) {
1988 spin_unlock_irqrestore(&zone->lock, flags);
1989 return ret;
1990 }
1991 }
1992 spin_unlock_irqrestore(&zone->lock, flags);
1993 }
1994
1995 return false;
1996 }
1997
1998 /*
1999 * Try finding a free buddy page on the fallback list and put it on the free
2000 * list of requested migratetype, possibly along with other pages from the same
2001 * block, depending on fragmentation avoidance heuristics. Returns true if
2002 * fallback was found so that __rmqueue_smallest() can grab it.
2003 *
2004 * The use of signed ints for order and current_order is a deliberate
2005 * deviation from the rest of this file, to make the for loop
2006 * condition simpler.
2007 */
2008 static __always_inline bool
__rmqueue_fallback(struct zone * zone,int order,int start_migratetype,unsigned int alloc_flags)2009 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
2010 unsigned int alloc_flags)
2011 {
2012 struct free_area *area;
2013 int current_order;
2014 int min_order = order;
2015 struct page *page;
2016 int fallback_mt;
2017 bool can_steal;
2018
2019 /*
2020 * Do not steal pages from freelists belonging to other pageblocks
2021 * i.e. orders < pageblock_order. If there are no local zones free,
2022 * the zonelists will be reiterated without ALLOC_NOFRAGMENT.
2023 */
2024 if (order < pageblock_order && alloc_flags & ALLOC_NOFRAGMENT)
2025 min_order = pageblock_order;
2026
2027 /*
2028 * Find the largest available free page in the other list. This roughly
2029 * approximates finding the pageblock with the most free pages, which
2030 * would be too costly to do exactly.
2031 */
2032 for (current_order = MAX_ORDER; current_order >= min_order;
2033 --current_order) {
2034 area = &(zone->free_area[current_order]);
2035 fallback_mt = find_suitable_fallback(area, current_order,
2036 start_migratetype, false, &can_steal);
2037 if (fallback_mt == -1)
2038 continue;
2039
2040 /*
2041 * We cannot steal all free pages from the pageblock and the
2042 * requested migratetype is movable. In that case it's better to
2043 * steal and split the smallest available page instead of the
2044 * largest available page, because even if the next movable
2045 * allocation falls back into a different pageblock than this
2046 * one, it won't cause permanent fragmentation.
2047 */
2048 if (!can_steal && start_migratetype == MIGRATE_MOVABLE
2049 && current_order > order)
2050 goto find_smallest;
2051
2052 goto do_steal;
2053 }
2054
2055 return false;
2056
2057 find_smallest:
2058 for (current_order = order; current_order < NR_PAGE_ORDERS; current_order++) {
2059 area = &(zone->free_area[current_order]);
2060 fallback_mt = find_suitable_fallback(area, current_order,
2061 start_migratetype, false, &can_steal);
2062 if (fallback_mt != -1)
2063 break;
2064 }
2065
2066 /*
2067 * This should not happen - we already found a suitable fallback
2068 * when looking for the largest page.
2069 */
2070 VM_BUG_ON(current_order > MAX_ORDER);
2071
2072 do_steal:
2073 page = get_page_from_free_area(area, fallback_mt);
2074
2075 steal_suitable_fallback(zone, page, alloc_flags, start_migratetype,
2076 can_steal);
2077
2078 trace_mm_page_alloc_extfrag(page, order, current_order,
2079 start_migratetype, fallback_mt);
2080
2081 return true;
2082
2083 }
2084
2085 /*
2086 * Do the hard work of removing an element from the buddy allocator.
2087 * Call me with the zone->lock already held.
2088 */
2089 static __always_inline struct page *
__rmqueue(struct zone * zone,unsigned int order,int migratetype,unsigned int alloc_flags)2090 __rmqueue(struct zone *zone, unsigned int order, int migratetype,
2091 unsigned int alloc_flags)
2092 {
2093 struct page *page;
2094
2095 if (IS_ENABLED(CONFIG_CMA)) {
2096 /*
2097 * Balance movable allocations between regular and CMA areas by
2098 * allocating from CMA when over half of the zone's free memory
2099 * is in the CMA area.
2100 */
2101 if (alloc_flags & ALLOC_CMA &&
2102 zone_page_state(zone, NR_FREE_CMA_PAGES) >
2103 zone_page_state(zone, NR_FREE_PAGES) / 2) {
2104 page = __rmqueue_cma_fallback(zone, order);
2105 if (page)
2106 return page;
2107 }
2108 }
2109 retry:
2110 page = __rmqueue_smallest(zone, order, migratetype);
2111 if (unlikely(!page)) {
2112 if (alloc_flags & ALLOC_CMA)
2113 page = __rmqueue_cma_fallback(zone, order);
2114
2115 if (!page && __rmqueue_fallback(zone, order, migratetype,
2116 alloc_flags))
2117 goto retry;
2118 }
2119 return page;
2120 }
2121
2122 /*
2123 * Obtain a specified number of elements from the buddy allocator, all under
2124 * a single hold of the lock, for efficiency. Add them to the supplied list.
2125 * Returns the number of new pages which were placed at *list.
2126 */
rmqueue_bulk(struct zone * zone,unsigned int order,unsigned long count,struct list_head * list,int migratetype,unsigned int alloc_flags)2127 static int rmqueue_bulk(struct zone *zone, unsigned int order,
2128 unsigned long count, struct list_head *list,
2129 int migratetype, unsigned int alloc_flags)
2130 {
2131 unsigned long flags;
2132 int i;
2133
2134 spin_lock_irqsave(&zone->lock, flags);
2135 for (i = 0; i < count; ++i) {
2136 struct page *page = __rmqueue(zone, order, migratetype,
2137 alloc_flags);
2138 if (unlikely(page == NULL))
2139 break;
2140
2141 /*
2142 * Split buddy pages returned by expand() are received here in
2143 * physical page order. The page is added to the tail of
2144 * caller's list. From the callers perspective, the linked list
2145 * is ordered by page number under some conditions. This is
2146 * useful for IO devices that can forward direction from the
2147 * head, thus also in the physical page order. This is useful
2148 * for IO devices that can merge IO requests if the physical
2149 * pages are ordered properly.
2150 */
2151 list_add_tail(&page->pcp_list, list);
2152 if (is_migrate_cma(get_pcppage_migratetype(page)))
2153 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
2154 -(1 << order));
2155 }
2156
2157 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
2158 spin_unlock_irqrestore(&zone->lock, flags);
2159
2160 return i;
2161 }
2162
2163 #ifdef CONFIG_NUMA
2164 /*
2165 * Called from the vmstat counter updater to drain pagesets of this
2166 * currently executing processor on remote nodes after they have
2167 * expired.
2168 */
drain_zone_pages(struct zone * zone,struct per_cpu_pages * pcp)2169 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
2170 {
2171 int to_drain, batch;
2172
2173 batch = READ_ONCE(pcp->batch);
2174 to_drain = min(pcp->count, batch);
2175 if (to_drain > 0) {
2176 spin_lock(&pcp->lock);
2177 free_pcppages_bulk(zone, to_drain, pcp, 0);
2178 spin_unlock(&pcp->lock);
2179 }
2180 }
2181 #endif
2182
2183 /*
2184 * Drain pcplists of the indicated processor and zone.
2185 */
drain_pages_zone(unsigned int cpu,struct zone * zone)2186 static void drain_pages_zone(unsigned int cpu, struct zone *zone)
2187 {
2188 struct per_cpu_pages *pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
2189 int count;
2190
2191 do {
2192 spin_lock(&pcp->lock);
2193 count = pcp->count;
2194 if (count) {
2195 int to_drain = min(count,
2196 pcp->batch << CONFIG_PCP_BATCH_SCALE_MAX);
2197
2198 free_pcppages_bulk(zone, to_drain, pcp, 0);
2199 count -= to_drain;
2200 }
2201 spin_unlock(&pcp->lock);
2202 } while (count);
2203 }
2204
2205 /*
2206 * Drain pcplists of all zones on the indicated processor.
2207 */
drain_pages(unsigned int cpu)2208 static void drain_pages(unsigned int cpu)
2209 {
2210 struct zone *zone;
2211
2212 for_each_populated_zone(zone) {
2213 drain_pages_zone(cpu, zone);
2214 }
2215 }
2216
2217 /*
2218 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
2219 */
drain_local_pages(struct zone * zone)2220 void drain_local_pages(struct zone *zone)
2221 {
2222 int cpu = smp_processor_id();
2223
2224 if (zone)
2225 drain_pages_zone(cpu, zone);
2226 else
2227 drain_pages(cpu);
2228 }
2229
2230 /*
2231 * The implementation of drain_all_pages(), exposing an extra parameter to
2232 * drain on all cpus.
2233 *
2234 * drain_all_pages() is optimized to only execute on cpus where pcplists are
2235 * not empty. The check for non-emptiness can however race with a free to
2236 * pcplist that has not yet increased the pcp->count from 0 to 1. Callers
2237 * that need the guarantee that every CPU has drained can disable the
2238 * optimizing racy check.
2239 */
__drain_all_pages(struct zone * zone,bool force_all_cpus)2240 static void __drain_all_pages(struct zone *zone, bool force_all_cpus)
2241 {
2242 int cpu;
2243
2244 /*
2245 * Allocate in the BSS so we won't require allocation in
2246 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
2247 */
2248 static cpumask_t cpus_with_pcps;
2249
2250 /*
2251 * Do not drain if one is already in progress unless it's specific to
2252 * a zone. Such callers are primarily CMA and memory hotplug and need
2253 * the drain to be complete when the call returns.
2254 */
2255 if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) {
2256 if (!zone)
2257 return;
2258 mutex_lock(&pcpu_drain_mutex);
2259 }
2260
2261 /*
2262 * We don't care about racing with CPU hotplug event
2263 * as offline notification will cause the notified
2264 * cpu to drain that CPU pcps and on_each_cpu_mask
2265 * disables preemption as part of its processing
2266 */
2267 for_each_online_cpu(cpu) {
2268 struct per_cpu_pages *pcp;
2269 struct zone *z;
2270 bool has_pcps = false;
2271
2272 if (force_all_cpus) {
2273 /*
2274 * The pcp.count check is racy, some callers need a
2275 * guarantee that no cpu is missed.
2276 */
2277 has_pcps = true;
2278 } else if (zone) {
2279 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
2280 if (pcp->count)
2281 has_pcps = true;
2282 } else {
2283 for_each_populated_zone(z) {
2284 pcp = per_cpu_ptr(z->per_cpu_pageset, cpu);
2285 if (pcp->count) {
2286 has_pcps = true;
2287 break;
2288 }
2289 }
2290 }
2291
2292 if (has_pcps)
2293 cpumask_set_cpu(cpu, &cpus_with_pcps);
2294 else
2295 cpumask_clear_cpu(cpu, &cpus_with_pcps);
2296 }
2297
2298 for_each_cpu(cpu, &cpus_with_pcps) {
2299 if (zone)
2300 drain_pages_zone(cpu, zone);
2301 else
2302 drain_pages(cpu);
2303 }
2304
2305 mutex_unlock(&pcpu_drain_mutex);
2306 }
2307
2308 /*
2309 * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
2310 *
2311 * When zone parameter is non-NULL, spill just the single zone's pages.
2312 */
drain_all_pages(struct zone * zone)2313 void drain_all_pages(struct zone *zone)
2314 {
2315 __drain_all_pages(zone, false);
2316 }
2317
free_unref_page_prepare(struct page * page,unsigned long pfn,unsigned int order)2318 static bool free_unref_page_prepare(struct page *page, unsigned long pfn,
2319 unsigned int order)
2320 {
2321 int migratetype;
2322
2323 if (!free_pages_prepare(page, order, FPI_NONE))
2324 return false;
2325
2326 migratetype = get_pfnblock_migratetype(page, pfn);
2327 set_pcppage_migratetype(page, migratetype);
2328 return true;
2329 }
2330
nr_pcp_free(struct per_cpu_pages * pcp,int high,bool free_high)2331 static int nr_pcp_free(struct per_cpu_pages *pcp, int high, bool free_high)
2332 {
2333 int min_nr_free, max_nr_free;
2334 int batch = READ_ONCE(pcp->batch);
2335
2336 /* Free everything if batch freeing high-order pages. */
2337 if (unlikely(free_high))
2338 return pcp->count;
2339
2340 /* Check for PCP disabled or boot pageset */
2341 if (unlikely(high < batch))
2342 return 1;
2343
2344 /* Leave at least pcp->batch pages on the list */
2345 min_nr_free = batch;
2346 max_nr_free = high - batch;
2347
2348 /*
2349 * Double the number of pages freed each time there is subsequent
2350 * freeing of pages without any allocation.
2351 */
2352 batch <<= pcp->free_factor;
2353 if (batch < max_nr_free && pcp->free_factor < CONFIG_PCP_BATCH_SCALE_MAX)
2354 pcp->free_factor++;
2355 batch = clamp(batch, min_nr_free, max_nr_free);
2356
2357 return batch;
2358 }
2359
nr_pcp_high(struct per_cpu_pages * pcp,struct zone * zone,bool free_high)2360 static int nr_pcp_high(struct per_cpu_pages *pcp, struct zone *zone,
2361 bool free_high)
2362 {
2363 int high = READ_ONCE(pcp->high);
2364
2365 if (unlikely(!high || free_high))
2366 return 0;
2367
2368 if (!test_bit(ZONE_RECLAIM_ACTIVE, &zone->flags))
2369 return high;
2370
2371 /*
2372 * If reclaim is active, limit the number of pages that can be
2373 * stored on pcp lists
2374 */
2375 return min(READ_ONCE(pcp->batch) << 2, high);
2376 }
2377
free_unref_page_commit(struct zone * zone,struct per_cpu_pages * pcp,struct page * page,int migratetype,unsigned int order)2378 static void free_unref_page_commit(struct zone *zone, struct per_cpu_pages *pcp,
2379 struct page *page, int migratetype,
2380 unsigned int order)
2381 {
2382 int high;
2383 int pindex;
2384 bool free_high;
2385
2386 __count_vm_events(PGFREE, 1 << order);
2387 pindex = order_to_pindex(migratetype, order);
2388 list_add(&page->pcp_list, &pcp->lists[pindex]);
2389 pcp->count += 1 << order;
2390
2391 /*
2392 * As high-order pages other than THP's stored on PCP can contribute
2393 * to fragmentation, limit the number stored when PCP is heavily
2394 * freeing without allocation. The remainder after bulk freeing
2395 * stops will be drained from vmstat refresh context.
2396 */
2397 free_high = (pcp->free_factor && order && order <= PAGE_ALLOC_COSTLY_ORDER);
2398
2399 high = nr_pcp_high(pcp, zone, free_high);
2400 if (pcp->count >= high) {
2401 free_pcppages_bulk(zone, nr_pcp_free(pcp, high, free_high), pcp, pindex);
2402 }
2403 }
2404
2405 /*
2406 * Free a pcp page
2407 */
free_unref_page(struct page * page,unsigned int order)2408 void free_unref_page(struct page *page, unsigned int order)
2409 {
2410 unsigned long __maybe_unused UP_flags;
2411 struct per_cpu_pages *pcp;
2412 struct zone *zone;
2413 unsigned long pfn = page_to_pfn(page);
2414 int migratetype, pcpmigratetype;
2415
2416 if (!free_unref_page_prepare(page, pfn, order))
2417 return;
2418
2419 /*
2420 * We only track unmovable, reclaimable and movable on pcp lists.
2421 * Place ISOLATE pages on the isolated list because they are being
2422 * offlined but treat HIGHATOMIC and CMA as movable pages so we can
2423 * get those areas back if necessary. Otherwise, we may have to free
2424 * excessively into the page allocator
2425 */
2426 migratetype = pcpmigratetype = get_pcppage_migratetype(page);
2427 if (unlikely(migratetype >= MIGRATE_PCPTYPES)) {
2428 if (unlikely(is_migrate_isolate(migratetype))) {
2429 free_one_page(page_zone(page), page, pfn, order, migratetype, FPI_NONE);
2430 return;
2431 }
2432 pcpmigratetype = MIGRATE_MOVABLE;
2433 }
2434
2435 zone = page_zone(page);
2436 pcp_trylock_prepare(UP_flags);
2437 pcp = pcp_spin_trylock(zone->per_cpu_pageset);
2438 if (pcp) {
2439 free_unref_page_commit(zone, pcp, page, pcpmigratetype, order);
2440 pcp_spin_unlock(pcp);
2441 } else {
2442 free_one_page(zone, page, pfn, order, migratetype, FPI_NONE);
2443 }
2444 pcp_trylock_finish(UP_flags);
2445 }
2446
2447 /*
2448 * Free a list of 0-order pages
2449 */
free_unref_page_list(struct list_head * list)2450 void free_unref_page_list(struct list_head *list)
2451 {
2452 unsigned long __maybe_unused UP_flags;
2453 struct page *page, *next;
2454 struct per_cpu_pages *pcp = NULL;
2455 struct zone *locked_zone = NULL;
2456 int batch_count = 0;
2457 int migratetype;
2458
2459 /* Prepare pages for freeing */
2460 list_for_each_entry_safe(page, next, list, lru) {
2461 unsigned long pfn = page_to_pfn(page);
2462 if (!free_unref_page_prepare(page, pfn, 0)) {
2463 list_del(&page->lru);
2464 continue;
2465 }
2466
2467 /*
2468 * Free isolated pages directly to the allocator, see
2469 * comment in free_unref_page.
2470 */
2471 migratetype = get_pcppage_migratetype(page);
2472 if (unlikely(is_migrate_isolate(migratetype))) {
2473 list_del(&page->lru);
2474 free_one_page(page_zone(page), page, pfn, 0, migratetype, FPI_NONE);
2475 continue;
2476 }
2477 }
2478
2479 list_for_each_entry_safe(page, next, list, lru) {
2480 struct zone *zone = page_zone(page);
2481
2482 list_del(&page->lru);
2483 migratetype = get_pcppage_migratetype(page);
2484
2485 /*
2486 * Either different zone requiring a different pcp lock or
2487 * excessive lock hold times when freeing a large list of
2488 * pages.
2489 */
2490 if (zone != locked_zone || batch_count == SWAP_CLUSTER_MAX) {
2491 if (pcp) {
2492 pcp_spin_unlock(pcp);
2493 pcp_trylock_finish(UP_flags);
2494 }
2495
2496 batch_count = 0;
2497
2498 /*
2499 * trylock is necessary as pages may be getting freed
2500 * from IRQ or SoftIRQ context after an IO completion.
2501 */
2502 pcp_trylock_prepare(UP_flags);
2503 pcp = pcp_spin_trylock(zone->per_cpu_pageset);
2504 if (unlikely(!pcp)) {
2505 pcp_trylock_finish(UP_flags);
2506 free_one_page(zone, page, page_to_pfn(page),
2507 0, migratetype, FPI_NONE);
2508 locked_zone = NULL;
2509 continue;
2510 }
2511 locked_zone = zone;
2512 }
2513
2514 /*
2515 * Non-isolated types over MIGRATE_PCPTYPES get added
2516 * to the MIGRATE_MOVABLE pcp list.
2517 */
2518 if (unlikely(migratetype >= MIGRATE_PCPTYPES))
2519 migratetype = MIGRATE_MOVABLE;
2520
2521 trace_mm_page_free_batched(page);
2522 free_unref_page_commit(zone, pcp, page, migratetype, 0);
2523 batch_count++;
2524 }
2525
2526 if (pcp) {
2527 pcp_spin_unlock(pcp);
2528 pcp_trylock_finish(UP_flags);
2529 }
2530 }
2531
2532 /*
2533 * split_page takes a non-compound higher-order page, and splits it into
2534 * n (1<<order) sub-pages: page[0..n]
2535 * Each sub-page must be freed individually.
2536 *
2537 * Note: this is probably too low level an operation for use in drivers.
2538 * Please consult with lkml before using this in your driver.
2539 */
split_page(struct page * page,unsigned int order)2540 void split_page(struct page *page, unsigned int order)
2541 {
2542 int i;
2543
2544 VM_BUG_ON_PAGE(PageCompound(page), page);
2545 VM_BUG_ON_PAGE(!page_count(page), page);
2546
2547 for (i = 1; i < (1 << order); i++)
2548 set_page_refcounted(page + i);
2549 split_page_owner(page, 1 << order);
2550 split_page_memcg(page, 1 << order);
2551 }
2552 EXPORT_SYMBOL_GPL(split_page);
2553
__isolate_free_page(struct page * page,unsigned int order)2554 int __isolate_free_page(struct page *page, unsigned int order)
2555 {
2556 struct zone *zone = page_zone(page);
2557 int mt = get_pageblock_migratetype(page);
2558
2559 if (!is_migrate_isolate(mt)) {
2560 unsigned long watermark;
2561 /*
2562 * Obey watermarks as if the page was being allocated. We can
2563 * emulate a high-order watermark check with a raised order-0
2564 * watermark, because we already know our high-order page
2565 * exists.
2566 */
2567 watermark = zone->_watermark[WMARK_MIN] + (1UL << order);
2568 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA))
2569 return 0;
2570
2571 __mod_zone_freepage_state(zone, -(1UL << order), mt);
2572 }
2573
2574 del_page_from_free_list(page, zone, order);
2575
2576 /*
2577 * Set the pageblock if the isolated page is at least half of a
2578 * pageblock
2579 */
2580 if (order >= pageblock_order - 1) {
2581 struct page *endpage = page + (1 << order) - 1;
2582 for (; page < endpage; page += pageblock_nr_pages) {
2583 int mt = get_pageblock_migratetype(page);
2584 /*
2585 * Only change normal pageblocks (i.e., they can merge
2586 * with others)
2587 */
2588 if (migratetype_is_mergeable(mt))
2589 set_pageblock_migratetype(page,
2590 MIGRATE_MOVABLE);
2591 }
2592 }
2593
2594 return 1UL << order;
2595 }
2596
2597 /**
2598 * __putback_isolated_page - Return a now-isolated page back where we got it
2599 * @page: Page that was isolated
2600 * @order: Order of the isolated page
2601 * @mt: The page's pageblock's migratetype
2602 *
2603 * This function is meant to return a page pulled from the free lists via
2604 * __isolate_free_page back to the free lists they were pulled from.
2605 */
__putback_isolated_page(struct page * page,unsigned int order,int mt)2606 void __putback_isolated_page(struct page *page, unsigned int order, int mt)
2607 {
2608 struct zone *zone = page_zone(page);
2609
2610 /* zone lock should be held when this function is called */
2611 lockdep_assert_held(&zone->lock);
2612
2613 /* Return isolated page to tail of freelist. */
2614 __free_one_page(page, page_to_pfn(page), zone, order, mt,
2615 FPI_SKIP_REPORT_NOTIFY | FPI_TO_TAIL);
2616 }
2617
2618 /*
2619 * Update NUMA hit/miss statistics
2620 */
zone_statistics(struct zone * preferred_zone,struct zone * z,long nr_account)2621 static inline void zone_statistics(struct zone *preferred_zone, struct zone *z,
2622 long nr_account)
2623 {
2624 #ifdef CONFIG_NUMA
2625 enum numa_stat_item local_stat = NUMA_LOCAL;
2626
2627 /* skip numa counters update if numa stats is disabled */
2628 if (!static_branch_likely(&vm_numa_stat_key))
2629 return;
2630
2631 if (zone_to_nid(z) != numa_node_id())
2632 local_stat = NUMA_OTHER;
2633
2634 if (zone_to_nid(z) == zone_to_nid(preferred_zone))
2635 __count_numa_events(z, NUMA_HIT, nr_account);
2636 else {
2637 __count_numa_events(z, NUMA_MISS, nr_account);
2638 __count_numa_events(preferred_zone, NUMA_FOREIGN, nr_account);
2639 }
2640 __count_numa_events(z, local_stat, nr_account);
2641 #endif
2642 }
2643
2644 static __always_inline
rmqueue_buddy(struct zone * preferred_zone,struct zone * zone,unsigned int order,unsigned int alloc_flags,int migratetype)2645 struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone,
2646 unsigned int order, unsigned int alloc_flags,
2647 int migratetype)
2648 {
2649 struct page *page;
2650 unsigned long flags;
2651
2652 do {
2653 page = NULL;
2654 spin_lock_irqsave(&zone->lock, flags);
2655 if (alloc_flags & ALLOC_HIGHATOMIC)
2656 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
2657 if (!page) {
2658 page = __rmqueue(zone, order, migratetype, alloc_flags);
2659
2660 /*
2661 * If the allocation fails, allow OOM handling access
2662 * to HIGHATOMIC reserves as failing now is worse than
2663 * failing a high-order atomic allocation in the
2664 * future.
2665 */
2666 if (!page && (alloc_flags & ALLOC_OOM))
2667 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
2668
2669 if (!page) {
2670 spin_unlock_irqrestore(&zone->lock, flags);
2671 return NULL;
2672 }
2673 }
2674 __mod_zone_freepage_state(zone, -(1 << order),
2675 get_pcppage_migratetype(page));
2676 spin_unlock_irqrestore(&zone->lock, flags);
2677 } while (check_new_pages(page, order));
2678
2679 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
2680 zone_statistics(preferred_zone, zone, 1);
2681
2682 return page;
2683 }
2684
2685 /* Remove page from the per-cpu list, caller must protect the list */
2686 static inline
__rmqueue_pcplist(struct zone * zone,unsigned int order,int migratetype,unsigned int alloc_flags,struct per_cpu_pages * pcp,struct list_head * list)2687 struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order,
2688 int migratetype,
2689 unsigned int alloc_flags,
2690 struct per_cpu_pages *pcp,
2691 struct list_head *list)
2692 {
2693 struct page *page;
2694
2695 do {
2696 if (list_empty(list)) {
2697 int batch = READ_ONCE(pcp->batch);
2698 int alloced;
2699
2700 /*
2701 * Scale batch relative to order if batch implies
2702 * free pages can be stored on the PCP. Batch can
2703 * be 1 for small zones or for boot pagesets which
2704 * should never store free pages as the pages may
2705 * belong to arbitrary zones.
2706 */
2707 if (batch > 1)
2708 batch = max(batch >> order, 2);
2709 alloced = rmqueue_bulk(zone, order,
2710 batch, list,
2711 migratetype, alloc_flags);
2712
2713 pcp->count += alloced << order;
2714 if (unlikely(list_empty(list)))
2715 return NULL;
2716 }
2717
2718 page = list_first_entry(list, struct page, pcp_list);
2719 list_del(&page->pcp_list);
2720 pcp->count -= 1 << order;
2721 } while (check_new_pages(page, order));
2722
2723 return page;
2724 }
2725
2726 /* Lock and remove page from the per-cpu list */
rmqueue_pcplist(struct zone * preferred_zone,struct zone * zone,unsigned int order,int migratetype,unsigned int alloc_flags)2727 static struct page *rmqueue_pcplist(struct zone *preferred_zone,
2728 struct zone *zone, unsigned int order,
2729 int migratetype, unsigned int alloc_flags)
2730 {
2731 struct per_cpu_pages *pcp;
2732 struct list_head *list;
2733 struct page *page;
2734 unsigned long __maybe_unused UP_flags;
2735
2736 /* spin_trylock may fail due to a parallel drain or IRQ reentrancy. */
2737 pcp_trylock_prepare(UP_flags);
2738 pcp = pcp_spin_trylock(zone->per_cpu_pageset);
2739 if (!pcp) {
2740 pcp_trylock_finish(UP_flags);
2741 return NULL;
2742 }
2743
2744 /*
2745 * On allocation, reduce the number of pages that are batch freed.
2746 * See nr_pcp_free() where free_factor is increased for subsequent
2747 * frees.
2748 */
2749 pcp->free_factor >>= 1;
2750 list = &pcp->lists[order_to_pindex(migratetype, order)];
2751 page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list);
2752 pcp_spin_unlock(pcp);
2753 pcp_trylock_finish(UP_flags);
2754 if (page) {
2755 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
2756 zone_statistics(preferred_zone, zone, 1);
2757 }
2758 return page;
2759 }
2760
2761 /*
2762 * Allocate a page from the given zone.
2763 * Use pcplists for THP or "cheap" high-order allocations.
2764 */
2765
2766 /*
2767 * Do not instrument rmqueue() with KMSAN. This function may call
2768 * __msan_poison_alloca() through a call to set_pfnblock_flags_mask().
2769 * If __msan_poison_alloca() attempts to allocate pages for the stack depot, it
2770 * may call rmqueue() again, which will result in a deadlock.
2771 */
2772 __no_sanitize_memory
2773 static inline
rmqueue(struct zone * preferred_zone,struct zone * zone,unsigned int order,gfp_t gfp_flags,unsigned int alloc_flags,int migratetype)2774 struct page *rmqueue(struct zone *preferred_zone,
2775 struct zone *zone, unsigned int order,
2776 gfp_t gfp_flags, unsigned int alloc_flags,
2777 int migratetype)
2778 {
2779 struct page *page;
2780
2781 /*
2782 * We most definitely don't want callers attempting to
2783 * allocate greater than order-1 page units with __GFP_NOFAIL.
2784 */
2785 WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
2786
2787 if (likely(pcp_allowed_order(order))) {
2788 page = rmqueue_pcplist(preferred_zone, zone, order,
2789 migratetype, alloc_flags);
2790 if (likely(page))
2791 goto out;
2792 }
2793
2794 page = rmqueue_buddy(preferred_zone, zone, order, alloc_flags,
2795 migratetype);
2796
2797 out:
2798 /* Separate test+clear to avoid unnecessary atomics */
2799 if ((alloc_flags & ALLOC_KSWAPD) &&
2800 unlikely(test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags))) {
2801 clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
2802 wakeup_kswapd(zone, 0, 0, zone_idx(zone));
2803 }
2804
2805 VM_BUG_ON_PAGE(page && bad_range(zone, page), page);
2806 return page;
2807 }
2808
should_fail_alloc_page(gfp_t gfp_mask,unsigned int order)2809 noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
2810 {
2811 return __should_fail_alloc_page(gfp_mask, order);
2812 }
2813 ALLOW_ERROR_INJECTION(should_fail_alloc_page, TRUE);
2814
__zone_watermark_unusable_free(struct zone * z,unsigned int order,unsigned int alloc_flags)2815 static inline long __zone_watermark_unusable_free(struct zone *z,
2816 unsigned int order, unsigned int alloc_flags)
2817 {
2818 long unusable_free = (1 << order) - 1;
2819
2820 /*
2821 * If the caller does not have rights to reserves below the min
2822 * watermark then subtract the high-atomic reserves. This will
2823 * over-estimate the size of the atomic reserve but it avoids a search.
2824 */
2825 if (likely(!(alloc_flags & ALLOC_RESERVES)))
2826 unusable_free += z->nr_reserved_highatomic;
2827
2828 #ifdef CONFIG_CMA
2829 /* If allocation can't use CMA areas don't use free CMA pages */
2830 if (!(alloc_flags & ALLOC_CMA))
2831 unusable_free += zone_page_state(z, NR_FREE_CMA_PAGES);
2832 #endif
2833
2834 return unusable_free;
2835 }
2836
2837 /*
2838 * Return true if free base pages are above 'mark'. For high-order checks it
2839 * will return true of the order-0 watermark is reached and there is at least
2840 * one free page of a suitable size. Checking now avoids taking the zone lock
2841 * to check in the allocation paths if no pages are free.
2842 */
__zone_watermark_ok(struct zone * z,unsigned int order,unsigned long mark,int highest_zoneidx,unsigned int alloc_flags,long free_pages)2843 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
2844 int highest_zoneidx, unsigned int alloc_flags,
2845 long free_pages)
2846 {
2847 long min = mark;
2848 int o;
2849
2850 /* free_pages may go negative - that's OK */
2851 free_pages -= __zone_watermark_unusable_free(z, order, alloc_flags);
2852
2853 if (unlikely(alloc_flags & ALLOC_RESERVES)) {
2854 /*
2855 * __GFP_HIGH allows access to 50% of the min reserve as well
2856 * as OOM.
2857 */
2858 if (alloc_flags & ALLOC_MIN_RESERVE) {
2859 min -= min / 2;
2860
2861 /*
2862 * Non-blocking allocations (e.g. GFP_ATOMIC) can
2863 * access more reserves than just __GFP_HIGH. Other
2864 * non-blocking allocations requests such as GFP_NOWAIT
2865 * or (GFP_KERNEL & ~__GFP_DIRECT_RECLAIM) do not get
2866 * access to the min reserve.
2867 */
2868 if (alloc_flags & ALLOC_NON_BLOCK)
2869 min -= min / 4;
2870 }
2871
2872 /*
2873 * OOM victims can try even harder than the normal reserve
2874 * users on the grounds that it's definitely going to be in
2875 * the exit path shortly and free memory. Any allocation it
2876 * makes during the free path will be small and short-lived.
2877 */
2878 if (alloc_flags & ALLOC_OOM)
2879 min -= min / 2;
2880 }
2881
2882 /*
2883 * Check watermarks for an order-0 allocation request. If these
2884 * are not met, then a high-order request also cannot go ahead
2885 * even if a suitable page happened to be free.
2886 */
2887 if (free_pages <= min + z->lowmem_reserve[highest_zoneidx])
2888 return false;
2889
2890 /* If this is an order-0 request then the watermark is fine */
2891 if (!order)
2892 return true;
2893
2894 /* For a high-order request, check at least one suitable page is free */
2895 for (o = order; o < NR_PAGE_ORDERS; o++) {
2896 struct free_area *area = &z->free_area[o];
2897 int mt;
2898
2899 if (!area->nr_free)
2900 continue;
2901
2902 for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
2903 if (!free_area_empty(area, mt))
2904 return true;
2905 }
2906
2907 #ifdef CONFIG_CMA
2908 if ((alloc_flags & ALLOC_CMA) &&
2909 !free_area_empty(area, MIGRATE_CMA)) {
2910 return true;
2911 }
2912 #endif
2913 if ((alloc_flags & (ALLOC_HIGHATOMIC|ALLOC_OOM)) &&
2914 !free_area_empty(area, MIGRATE_HIGHATOMIC)) {
2915 return true;
2916 }
2917 }
2918 return false;
2919 }
2920
zone_watermark_ok(struct zone * z,unsigned int order,unsigned long mark,int highest_zoneidx,unsigned int alloc_flags)2921 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
2922 int highest_zoneidx, unsigned int alloc_flags)
2923 {
2924 return __zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags,
2925 zone_page_state(z, NR_FREE_PAGES));
2926 }
2927
zone_watermark_fast(struct zone * z,unsigned int order,unsigned long mark,int highest_zoneidx,unsigned int alloc_flags,gfp_t gfp_mask)2928 static inline bool zone_watermark_fast(struct zone *z, unsigned int order,
2929 unsigned long mark, int highest_zoneidx,
2930 unsigned int alloc_flags, gfp_t gfp_mask)
2931 {
2932 long free_pages;
2933
2934 free_pages = zone_page_state(z, NR_FREE_PAGES);
2935
2936 /*
2937 * Fast check for order-0 only. If this fails then the reserves
2938 * need to be calculated.
2939 */
2940 if (!order) {
2941 long usable_free;
2942 long reserved;
2943
2944 usable_free = free_pages;
2945 reserved = __zone_watermark_unusable_free(z, 0, alloc_flags);
2946
2947 /* reserved may over estimate high-atomic reserves. */
2948 usable_free -= min(usable_free, reserved);
2949 if (usable_free > mark + z->lowmem_reserve[highest_zoneidx])
2950 return true;
2951 }
2952
2953 if (__zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags,
2954 free_pages))
2955 return true;
2956
2957 /*
2958 * Ignore watermark boosting for __GFP_HIGH order-0 allocations
2959 * when checking the min watermark. The min watermark is the
2960 * point where boosting is ignored so that kswapd is woken up
2961 * when below the low watermark.
2962 */
2963 if (unlikely(!order && (alloc_flags & ALLOC_MIN_RESERVE) && z->watermark_boost
2964 && ((alloc_flags & ALLOC_WMARK_MASK) == WMARK_MIN))) {
2965 mark = z->_watermark[WMARK_MIN];
2966 return __zone_watermark_ok(z, order, mark, highest_zoneidx,
2967 alloc_flags, free_pages);
2968 }
2969
2970 return false;
2971 }
2972
zone_watermark_ok_safe(struct zone * z,unsigned int order,unsigned long mark,int highest_zoneidx)2973 bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
2974 unsigned long mark, int highest_zoneidx)
2975 {
2976 long free_pages = zone_page_state(z, NR_FREE_PAGES);
2977
2978 if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
2979 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
2980
2981 return __zone_watermark_ok(z, order, mark, highest_zoneidx, 0,
2982 free_pages);
2983 }
2984
2985 #ifdef CONFIG_NUMA
2986 int __read_mostly node_reclaim_distance = RECLAIM_DISTANCE;
2987
zone_allows_reclaim(struct zone * local_zone,struct zone * zone)2988 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
2989 {
2990 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <=
2991 node_reclaim_distance;
2992 }
2993 #else /* CONFIG_NUMA */
zone_allows_reclaim(struct zone * local_zone,struct zone * zone)2994 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
2995 {
2996 return true;
2997 }
2998 #endif /* CONFIG_NUMA */
2999
3000 /*
3001 * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid
3002 * fragmentation is subtle. If the preferred zone was HIGHMEM then
3003 * premature use of a lower zone may cause lowmem pressure problems that
3004 * are worse than fragmentation. If the next zone is ZONE_DMA then it is
3005 * probably too small. It only makes sense to spread allocations to avoid
3006 * fragmentation between the Normal and DMA32 zones.
3007 */
3008 static inline unsigned int
alloc_flags_nofragment(struct zone * zone,gfp_t gfp_mask)3009 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
3010 {
3011 unsigned int alloc_flags;
3012
3013 /*
3014 * __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD
3015 * to save a branch.
3016 */
3017 alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM);
3018
3019 #ifdef CONFIG_ZONE_DMA32
3020 if (!zone)
3021 return alloc_flags;
3022
3023 if (zone_idx(zone) != ZONE_NORMAL)
3024 return alloc_flags;
3025
3026 /*
3027 * If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and
3028 * the pointer is within zone->zone_pgdat->node_zones[]. Also assume
3029 * on UMA that if Normal is populated then so is DMA32.
3030 */
3031 BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1);
3032 if (nr_online_nodes > 1 && !populated_zone(--zone))
3033 return alloc_flags;
3034
3035 alloc_flags |= ALLOC_NOFRAGMENT;
3036 #endif /* CONFIG_ZONE_DMA32 */
3037 return alloc_flags;
3038 }
3039
3040 /* Must be called after current_gfp_context() which can change gfp_mask */
gfp_to_alloc_flags_cma(gfp_t gfp_mask,unsigned int alloc_flags)3041 static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask,
3042 unsigned int alloc_flags)
3043 {
3044 #ifdef CONFIG_CMA
3045 if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE)
3046 alloc_flags |= ALLOC_CMA;
3047 #endif
3048 return alloc_flags;
3049 }
3050
3051 /*
3052 * get_page_from_freelist goes through the zonelist trying to allocate
3053 * a page.
3054 */
3055 static struct page *
get_page_from_freelist(gfp_t gfp_mask,unsigned int order,int alloc_flags,const struct alloc_context * ac)3056 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
3057 const struct alloc_context *ac)
3058 {
3059 struct zoneref *z;
3060 struct zone *zone;
3061 struct pglist_data *last_pgdat = NULL;
3062 bool last_pgdat_dirty_ok = false;
3063 bool no_fallback;
3064
3065 retry:
3066 /*
3067 * Scan zonelist, looking for a zone with enough free.
3068 * See also cpuset_node_allowed() comment in kernel/cgroup/cpuset.c.
3069 */
3070 no_fallback = alloc_flags & ALLOC_NOFRAGMENT;
3071 z = ac->preferred_zoneref;
3072 for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx,
3073 ac->nodemask) {
3074 struct page *page;
3075 unsigned long mark;
3076
3077 if (cpusets_enabled() &&
3078 (alloc_flags & ALLOC_CPUSET) &&
3079 !__cpuset_zone_allowed(zone, gfp_mask))
3080 continue;
3081 /*
3082 * When allocating a page cache page for writing, we
3083 * want to get it from a node that is within its dirty
3084 * limit, such that no single node holds more than its
3085 * proportional share of globally allowed dirty pages.
3086 * The dirty limits take into account the node's
3087 * lowmem reserves and high watermark so that kswapd
3088 * should be able to balance it without having to
3089 * write pages from its LRU list.
3090 *
3091 * XXX: For now, allow allocations to potentially
3092 * exceed the per-node dirty limit in the slowpath
3093 * (spread_dirty_pages unset) before going into reclaim,
3094 * which is important when on a NUMA setup the allowed
3095 * nodes are together not big enough to reach the
3096 * global limit. The proper fix for these situations
3097 * will require awareness of nodes in the
3098 * dirty-throttling and the flusher threads.
3099 */
3100 if (ac->spread_dirty_pages) {
3101 if (last_pgdat != zone->zone_pgdat) {
3102 last_pgdat = zone->zone_pgdat;
3103 last_pgdat_dirty_ok = node_dirty_ok(zone->zone_pgdat);
3104 }
3105
3106 if (!last_pgdat_dirty_ok)
3107 continue;
3108 }
3109
3110 if (no_fallback && nr_online_nodes > 1 &&
3111 zone != ac->preferred_zoneref->zone) {
3112 int local_nid;
3113
3114 /*
3115 * If moving to a remote node, retry but allow
3116 * fragmenting fallbacks. Locality is more important
3117 * than fragmentation avoidance.
3118 */
3119 local_nid = zone_to_nid(ac->preferred_zoneref->zone);
3120 if (zone_to_nid(zone) != local_nid) {
3121 alloc_flags &= ~ALLOC_NOFRAGMENT;
3122 goto retry;
3123 }
3124 }
3125
3126 cond_accept_memory(zone, order);
3127
3128 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
3129 if (!zone_watermark_fast(zone, order, mark,
3130 ac->highest_zoneidx, alloc_flags,
3131 gfp_mask)) {
3132 int ret;
3133
3134 if (cond_accept_memory(zone, order))
3135 goto try_this_zone;
3136
3137 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
3138 /*
3139 * Watermark failed for this zone, but see if we can
3140 * grow this zone if it contains deferred pages.
3141 */
3142 if (deferred_pages_enabled()) {
3143 if (_deferred_grow_zone(zone, order))
3144 goto try_this_zone;
3145 }
3146 #endif
3147 /* Checked here to keep the fast path fast */
3148 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
3149 if (alloc_flags & ALLOC_NO_WATERMARKS)
3150 goto try_this_zone;
3151
3152 if (!node_reclaim_enabled() ||
3153 !zone_allows_reclaim(ac->preferred_zoneref->zone, zone))
3154 continue;
3155
3156 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order);
3157 switch (ret) {
3158 case NODE_RECLAIM_NOSCAN:
3159 /* did not scan */
3160 continue;
3161 case NODE_RECLAIM_FULL:
3162 /* scanned but unreclaimable */
3163 continue;
3164 default:
3165 /* did we reclaim enough */
3166 if (zone_watermark_ok(zone, order, mark,
3167 ac->highest_zoneidx, alloc_flags))
3168 goto try_this_zone;
3169
3170 continue;
3171 }
3172 }
3173
3174 try_this_zone:
3175 page = rmqueue(ac->preferred_zoneref->zone, zone, order,
3176 gfp_mask, alloc_flags, ac->migratetype);
3177 if (page) {
3178 prep_new_page(page, order, gfp_mask, alloc_flags);
3179
3180 /*
3181 * If this is a high-order atomic allocation then check
3182 * if the pageblock should be reserved for the future
3183 */
3184 if (unlikely(alloc_flags & ALLOC_HIGHATOMIC))
3185 reserve_highatomic_pageblock(page, zone);
3186
3187 return page;
3188 } else {
3189 if (cond_accept_memory(zone, order))
3190 goto try_this_zone;
3191
3192 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
3193 /* Try again if zone has deferred pages */
3194 if (deferred_pages_enabled()) {
3195 if (_deferred_grow_zone(zone, order))
3196 goto try_this_zone;
3197 }
3198 #endif
3199 }
3200 }
3201
3202 /*
3203 * It's possible on a UMA machine to get through all zones that are
3204 * fragmented. If avoiding fragmentation, reset and try again.
3205 */
3206 if (no_fallback) {
3207 alloc_flags &= ~ALLOC_NOFRAGMENT;
3208 goto retry;
3209 }
3210
3211 return NULL;
3212 }
3213
warn_alloc_show_mem(gfp_t gfp_mask,nodemask_t * nodemask)3214 static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask)
3215 {
3216 unsigned int filter = SHOW_MEM_FILTER_NODES;
3217
3218 /*
3219 * This documents exceptions given to allocations in certain
3220 * contexts that are allowed to allocate outside current's set
3221 * of allowed nodes.
3222 */
3223 if (!(gfp_mask & __GFP_NOMEMALLOC))
3224 if (tsk_is_oom_victim(current) ||
3225 (current->flags & (PF_MEMALLOC | PF_EXITING)))
3226 filter &= ~SHOW_MEM_FILTER_NODES;
3227 if (!in_task() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
3228 filter &= ~SHOW_MEM_FILTER_NODES;
3229
3230 __show_mem(filter, nodemask, gfp_zone(gfp_mask));
3231 }
3232
warn_alloc(gfp_t gfp_mask,nodemask_t * nodemask,const char * fmt,...)3233 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)
3234 {
3235 struct va_format vaf;
3236 va_list args;
3237 static DEFINE_RATELIMIT_STATE(nopage_rs, 10*HZ, 1);
3238
3239 if ((gfp_mask & __GFP_NOWARN) ||
3240 !__ratelimit(&nopage_rs) ||
3241 ((gfp_mask & __GFP_DMA) && !has_managed_dma()))
3242 return;
3243
3244 va_start(args, fmt);
3245 vaf.fmt = fmt;
3246 vaf.va = &args;
3247 pr_warn("%s: %pV, mode:%#x(%pGg), nodemask=%*pbl",
3248 current->comm, &vaf, gfp_mask, &gfp_mask,
3249 nodemask_pr_args(nodemask));
3250 va_end(args);
3251
3252 cpuset_print_current_mems_allowed();
3253 pr_cont("\n");
3254 dump_stack();
3255 warn_alloc_show_mem(gfp_mask, nodemask);
3256 }
3257
3258 static inline struct page *
__alloc_pages_cpuset_fallback(gfp_t gfp_mask,unsigned int order,unsigned int alloc_flags,const struct alloc_context * ac)3259 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order,
3260 unsigned int alloc_flags,
3261 const struct alloc_context *ac)
3262 {
3263 struct page *page;
3264
3265 page = get_page_from_freelist(gfp_mask, order,
3266 alloc_flags|ALLOC_CPUSET, ac);
3267 /*
3268 * fallback to ignore cpuset restriction if our nodes
3269 * are depleted
3270 */
3271 if (!page)
3272 page = get_page_from_freelist(gfp_mask, order,
3273 alloc_flags, ac);
3274
3275 return page;
3276 }
3277
3278 static inline struct page *
__alloc_pages_may_oom(gfp_t gfp_mask,unsigned int order,const struct alloc_context * ac,unsigned long * did_some_progress)3279 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
3280 const struct alloc_context *ac, unsigned long *did_some_progress)
3281 {
3282 struct oom_control oc = {
3283 .zonelist = ac->zonelist,
3284 .nodemask = ac->nodemask,
3285 .memcg = NULL,
3286 .gfp_mask = gfp_mask,
3287 .order = order,
3288 };
3289 struct page *page;
3290
3291 *did_some_progress = 0;
3292
3293 /*
3294 * Acquire the oom lock. If that fails, somebody else is
3295 * making progress for us.
3296 */
3297 if (!mutex_trylock(&oom_lock)) {
3298 *did_some_progress = 1;
3299 schedule_timeout_uninterruptible(1);
3300 return NULL;
3301 }
3302
3303 /*
3304 * Go through the zonelist yet one more time, keep very high watermark
3305 * here, this is only to catch a parallel oom killing, we must fail if
3306 * we're still under heavy pressure. But make sure that this reclaim
3307 * attempt shall not depend on __GFP_DIRECT_RECLAIM && !__GFP_NORETRY
3308 * allocation which will never fail due to oom_lock already held.
3309 */
3310 page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) &
3311 ~__GFP_DIRECT_RECLAIM, order,
3312 ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac);
3313 if (page)
3314 goto out;
3315
3316 /* Coredumps can quickly deplete all memory reserves */
3317 if (current->flags & PF_DUMPCORE)
3318 goto out;
3319 /* The OOM killer will not help higher order allocs */
3320 if (order > PAGE_ALLOC_COSTLY_ORDER)
3321 goto out;
3322 /*
3323 * We have already exhausted all our reclaim opportunities without any
3324 * success so it is time to admit defeat. We will skip the OOM killer
3325 * because it is very likely that the caller has a more reasonable
3326 * fallback than shooting a random task.
3327 *
3328 * The OOM killer may not free memory on a specific node.
3329 */
3330 if (gfp_mask & (__GFP_RETRY_MAYFAIL | __GFP_THISNODE))
3331 goto out;
3332 /* The OOM killer does not needlessly kill tasks for lowmem */
3333 if (ac->highest_zoneidx < ZONE_NORMAL)
3334 goto out;
3335 if (pm_suspended_storage())
3336 goto out;
3337 /*
3338 * XXX: GFP_NOFS allocations should rather fail than rely on
3339 * other request to make a forward progress.
3340 * We are in an unfortunate situation where out_of_memory cannot
3341 * do much for this context but let's try it to at least get
3342 * access to memory reserved if the current task is killed (see
3343 * out_of_memory). Once filesystems are ready to handle allocation
3344 * failures more gracefully we should just bail out here.
3345 */
3346
3347 /* Exhausted what can be done so it's blame time */
3348 if (out_of_memory(&oc) ||
3349 WARN_ON_ONCE_GFP(gfp_mask & __GFP_NOFAIL, gfp_mask)) {
3350 *did_some_progress = 1;
3351
3352 /*
3353 * Help non-failing allocations by giving them access to memory
3354 * reserves
3355 */
3356 if (gfp_mask & __GFP_NOFAIL)
3357 page = __alloc_pages_cpuset_fallback(gfp_mask, order,
3358 ALLOC_NO_WATERMARKS, ac);
3359 }
3360 out:
3361 mutex_unlock(&oom_lock);
3362 return page;
3363 }
3364
3365 /*
3366 * Maximum number of compaction retries with a progress before OOM
3367 * killer is consider as the only way to move forward.
3368 */
3369 #define MAX_COMPACT_RETRIES 16
3370
3371 #ifdef CONFIG_COMPACTION
3372 /* Try memory compaction for high-order allocations before reclaim */
3373 static struct page *
__alloc_pages_direct_compact(gfp_t gfp_mask,unsigned int order,unsigned int alloc_flags,const struct alloc_context * ac,enum compact_priority prio,enum compact_result * compact_result)3374 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
3375 unsigned int alloc_flags, const struct alloc_context *ac,
3376 enum compact_priority prio, enum compact_result *compact_result)
3377 {
3378 struct page *page = NULL;
3379 unsigned long pflags;
3380 unsigned int noreclaim_flag;
3381
3382 if (!order)
3383 return NULL;
3384
3385 psi_memstall_enter(&pflags);
3386 delayacct_compact_start();
3387 noreclaim_flag = memalloc_noreclaim_save();
3388
3389 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
3390 prio, &page);
3391
3392 memalloc_noreclaim_restore(noreclaim_flag);
3393 psi_memstall_leave(&pflags);
3394 delayacct_compact_end();
3395
3396 if (*compact_result == COMPACT_SKIPPED)
3397 return NULL;
3398 /*
3399 * At least in one zone compaction wasn't deferred or skipped, so let's
3400 * count a compaction stall
3401 */
3402 count_vm_event(COMPACTSTALL);
3403
3404 /* Prep a captured page if available */
3405 if (page)
3406 prep_new_page(page, order, gfp_mask, alloc_flags);
3407
3408 /* Try get a page from the freelist if available */
3409 if (!page)
3410 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
3411
3412 if (page) {
3413 struct zone *zone = page_zone(page);
3414
3415 zone->compact_blockskip_flush = false;
3416 compaction_defer_reset(zone, order, true);
3417 count_vm_event(COMPACTSUCCESS);
3418 return page;
3419 }
3420
3421 /*
3422 * It's bad if compaction run occurs and fails. The most likely reason
3423 * is that pages exist, but not enough to satisfy watermarks.
3424 */
3425 count_vm_event(COMPACTFAIL);
3426
3427 cond_resched();
3428
3429 return NULL;
3430 }
3431
3432 static inline bool
should_compact_retry(struct alloc_context * ac,int order,int alloc_flags,enum compact_result compact_result,enum compact_priority * compact_priority,int * compaction_retries)3433 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
3434 enum compact_result compact_result,
3435 enum compact_priority *compact_priority,
3436 int *compaction_retries)
3437 {
3438 int max_retries = MAX_COMPACT_RETRIES;
3439 int min_priority;
3440 bool ret = false;
3441 int retries = *compaction_retries;
3442 enum compact_priority priority = *compact_priority;
3443
3444 if (!order)
3445 return false;
3446
3447 if (fatal_signal_pending(current))
3448 return false;
3449
3450 /*
3451 * Compaction was skipped due to a lack of free order-0
3452 * migration targets. Continue if reclaim can help.
3453 */
3454 if (compact_result == COMPACT_SKIPPED) {
3455 ret = compaction_zonelist_suitable(ac, order, alloc_flags);
3456 goto out;
3457 }
3458
3459 /*
3460 * Compaction managed to coalesce some page blocks, but the
3461 * allocation failed presumably due to a race. Retry some.
3462 */
3463 if (compact_result == COMPACT_SUCCESS) {
3464 /*
3465 * !costly requests are much more important than
3466 * __GFP_RETRY_MAYFAIL costly ones because they are de
3467 * facto nofail and invoke OOM killer to move on while
3468 * costly can fail and users are ready to cope with
3469 * that. 1/4 retries is rather arbitrary but we would
3470 * need much more detailed feedback from compaction to
3471 * make a better decision.
3472 */
3473 if (order > PAGE_ALLOC_COSTLY_ORDER)
3474 max_retries /= 4;
3475
3476 if (++(*compaction_retries) <= max_retries) {
3477 ret = true;
3478 goto out;
3479 }
3480 }
3481
3482 /*
3483 * Compaction failed. Retry with increasing priority.
3484 */
3485 min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ?
3486 MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY;
3487
3488 if (*compact_priority > min_priority) {
3489 (*compact_priority)--;
3490 *compaction_retries = 0;
3491 ret = true;
3492 }
3493 out:
3494 trace_compact_retry(order, priority, compact_result, retries, max_retries, ret);
3495 return ret;
3496 }
3497 #else
3498 static inline struct page *
__alloc_pages_direct_compact(gfp_t gfp_mask,unsigned int order,unsigned int alloc_flags,const struct alloc_context * ac,enum compact_priority prio,enum compact_result * compact_result)3499 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
3500 unsigned int alloc_flags, const struct alloc_context *ac,
3501 enum compact_priority prio, enum compact_result *compact_result)
3502 {
3503 *compact_result = COMPACT_SKIPPED;
3504 return NULL;
3505 }
3506
3507 static inline bool
should_compact_retry(struct alloc_context * ac,unsigned int order,int alloc_flags,enum compact_result compact_result,enum compact_priority * compact_priority,int * compaction_retries)3508 should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags,
3509 enum compact_result compact_result,
3510 enum compact_priority *compact_priority,
3511 int *compaction_retries)
3512 {
3513 struct zone *zone;
3514 struct zoneref *z;
3515
3516 if (!order || order > PAGE_ALLOC_COSTLY_ORDER)
3517 return false;
3518
3519 /*
3520 * There are setups with compaction disabled which would prefer to loop
3521 * inside the allocator rather than hit the oom killer prematurely.
3522 * Let's give them a good hope and keep retrying while the order-0
3523 * watermarks are OK.
3524 */
3525 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
3526 ac->highest_zoneidx, ac->nodemask) {
3527 if (zone_watermark_ok(zone, 0, min_wmark_pages(zone),
3528 ac->highest_zoneidx, alloc_flags))
3529 return true;
3530 }
3531 return false;
3532 }
3533 #endif /* CONFIG_COMPACTION */
3534
3535 #ifdef CONFIG_LOCKDEP
3536 static struct lockdep_map __fs_reclaim_map =
3537 STATIC_LOCKDEP_MAP_INIT("fs_reclaim", &__fs_reclaim_map);
3538
__need_reclaim(gfp_t gfp_mask)3539 static bool __need_reclaim(gfp_t gfp_mask)
3540 {
3541 /* no reclaim without waiting on it */
3542 if (!(gfp_mask & __GFP_DIRECT_RECLAIM))
3543 return false;
3544
3545 /* this guy won't enter reclaim */
3546 if (current->flags & PF_MEMALLOC)
3547 return false;
3548
3549 if (gfp_mask & __GFP_NOLOCKDEP)
3550 return false;
3551
3552 return true;
3553 }
3554
__fs_reclaim_acquire(unsigned long ip)3555 void __fs_reclaim_acquire(unsigned long ip)
3556 {
3557 lock_acquire_exclusive(&__fs_reclaim_map, 0, 0, NULL, ip);
3558 }
3559
__fs_reclaim_release(unsigned long ip)3560 void __fs_reclaim_release(unsigned long ip)
3561 {
3562 lock_release(&__fs_reclaim_map, ip);
3563 }
3564
fs_reclaim_acquire(gfp_t gfp_mask)3565 void fs_reclaim_acquire(gfp_t gfp_mask)
3566 {
3567 gfp_mask = current_gfp_context(gfp_mask);
3568
3569 if (__need_reclaim(gfp_mask)) {
3570 if (gfp_mask & __GFP_FS)
3571 __fs_reclaim_acquire(_RET_IP_);
3572
3573 #ifdef CONFIG_MMU_NOTIFIER
3574 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
3575 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
3576 #endif
3577
3578 }
3579 }
3580 EXPORT_SYMBOL_GPL(fs_reclaim_acquire);
3581
fs_reclaim_release(gfp_t gfp_mask)3582 void fs_reclaim_release(gfp_t gfp_mask)
3583 {
3584 gfp_mask = current_gfp_context(gfp_mask);
3585
3586 if (__need_reclaim(gfp_mask)) {
3587 if (gfp_mask & __GFP_FS)
3588 __fs_reclaim_release(_RET_IP_);
3589 }
3590 }
3591 EXPORT_SYMBOL_GPL(fs_reclaim_release);
3592 #endif
3593
3594 /*
3595 * Zonelists may change due to hotplug during allocation. Detect when zonelists
3596 * have been rebuilt so allocation retries. Reader side does not lock and
3597 * retries the allocation if zonelist changes. Writer side is protected by the
3598 * embedded spin_lock.
3599 */
3600 static DEFINE_SEQLOCK(zonelist_update_seq);
3601
zonelist_iter_begin(void)3602 static unsigned int zonelist_iter_begin(void)
3603 {
3604 if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE))
3605 return read_seqbegin(&zonelist_update_seq);
3606
3607 return 0;
3608 }
3609
check_retry_zonelist(unsigned int seq)3610 static unsigned int check_retry_zonelist(unsigned int seq)
3611 {
3612 if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE))
3613 return read_seqretry(&zonelist_update_seq, seq);
3614
3615 return seq;
3616 }
3617
3618 /* Perform direct synchronous page reclaim */
3619 static unsigned long
__perform_reclaim(gfp_t gfp_mask,unsigned int order,const struct alloc_context * ac)3620 __perform_reclaim(gfp_t gfp_mask, unsigned int order,
3621 const struct alloc_context *ac)
3622 {
3623 unsigned int noreclaim_flag;
3624 unsigned long progress;
3625
3626 cond_resched();
3627
3628 /* We now go into synchronous reclaim */
3629 cpuset_memory_pressure_bump();
3630 fs_reclaim_acquire(gfp_mask);
3631 noreclaim_flag = memalloc_noreclaim_save();
3632
3633 progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
3634 ac->nodemask);
3635
3636 memalloc_noreclaim_restore(noreclaim_flag);
3637 fs_reclaim_release(gfp_mask);
3638
3639 cond_resched();
3640
3641 return progress;
3642 }
3643
3644 /* The really slow allocator path where we enter direct reclaim */
3645 static inline struct page *
__alloc_pages_direct_reclaim(gfp_t gfp_mask,unsigned int order,unsigned int alloc_flags,const struct alloc_context * ac,unsigned long * did_some_progress)3646 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
3647 unsigned int alloc_flags, const struct alloc_context *ac,
3648 unsigned long *did_some_progress)
3649 {
3650 struct page *page = NULL;
3651 unsigned long pflags;
3652 bool drained = false;
3653
3654 psi_memstall_enter(&pflags);
3655 *did_some_progress = __perform_reclaim(gfp_mask, order, ac);
3656 if (unlikely(!(*did_some_progress)))
3657 goto out;
3658
3659 retry:
3660 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
3661
3662 /*
3663 * If an allocation failed after direct reclaim, it could be because
3664 * pages are pinned on the per-cpu lists or in high alloc reserves.
3665 * Shrink them and try again
3666 */
3667 if (!page && !drained) {
3668 unreserve_highatomic_pageblock(ac, false);
3669 drain_all_pages(NULL);
3670 drained = true;
3671 goto retry;
3672 }
3673 out:
3674 psi_memstall_leave(&pflags);
3675
3676 return page;
3677 }
3678
wake_all_kswapds(unsigned int order,gfp_t gfp_mask,const struct alloc_context * ac)3679 static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask,
3680 const struct alloc_context *ac)
3681 {
3682 struct zoneref *z;
3683 struct zone *zone;
3684 pg_data_t *last_pgdat = NULL;
3685 enum zone_type highest_zoneidx = ac->highest_zoneidx;
3686
3687 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx,
3688 ac->nodemask) {
3689 if (!managed_zone(zone))
3690 continue;
3691 if (last_pgdat != zone->zone_pgdat) {
3692 wakeup_kswapd(zone, gfp_mask, order, highest_zoneidx);
3693 last_pgdat = zone->zone_pgdat;
3694 }
3695 }
3696 }
3697
3698 static inline unsigned int
gfp_to_alloc_flags(gfp_t gfp_mask,unsigned int order)3699 gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order)
3700 {
3701 unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
3702
3703 /*
3704 * __GFP_HIGH is assumed to be the same as ALLOC_MIN_RESERVE
3705 * and __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD
3706 * to save two branches.
3707 */
3708 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_MIN_RESERVE);
3709 BUILD_BUG_ON(__GFP_KSWAPD_RECLAIM != (__force gfp_t) ALLOC_KSWAPD);
3710
3711 /*
3712 * The caller may dip into page reserves a bit more if the caller
3713 * cannot run direct reclaim, or if the caller has realtime scheduling
3714 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
3715 * set both ALLOC_NON_BLOCK and ALLOC_MIN_RESERVE(__GFP_HIGH).
3716 */
3717 alloc_flags |= (__force int)
3718 (gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM));
3719
3720 if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) {
3721 /*
3722 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
3723 * if it can't schedule.
3724 */
3725 if (!(gfp_mask & __GFP_NOMEMALLOC)) {
3726 alloc_flags |= ALLOC_NON_BLOCK;
3727
3728 if (order > 0)
3729 alloc_flags |= ALLOC_HIGHATOMIC;
3730 }
3731
3732 /*
3733 * Ignore cpuset mems for non-blocking __GFP_HIGH (probably
3734 * GFP_ATOMIC) rather than fail, see the comment for
3735 * cpuset_node_allowed().
3736 */
3737 if (alloc_flags & ALLOC_MIN_RESERVE)
3738 alloc_flags &= ~ALLOC_CPUSET;
3739 } else if (unlikely(rt_task(current)) && in_task())
3740 alloc_flags |= ALLOC_MIN_RESERVE;
3741
3742 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, alloc_flags);
3743
3744 return alloc_flags;
3745 }
3746
oom_reserves_allowed(struct task_struct * tsk)3747 static bool oom_reserves_allowed(struct task_struct *tsk)
3748 {
3749 if (!tsk_is_oom_victim(tsk))
3750 return false;
3751
3752 /*
3753 * !MMU doesn't have oom reaper so give access to memory reserves
3754 * only to the thread with TIF_MEMDIE set
3755 */
3756 if (!IS_ENABLED(CONFIG_MMU) && !test_thread_flag(TIF_MEMDIE))
3757 return false;
3758
3759 return true;
3760 }
3761
3762 /*
3763 * Distinguish requests which really need access to full memory
3764 * reserves from oom victims which can live with a portion of it
3765 */
__gfp_pfmemalloc_flags(gfp_t gfp_mask)3766 static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask)
3767 {
3768 if (unlikely(gfp_mask & __GFP_NOMEMALLOC))
3769 return 0;
3770 if (gfp_mask & __GFP_MEMALLOC)
3771 return ALLOC_NO_WATERMARKS;
3772 if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
3773 return ALLOC_NO_WATERMARKS;
3774 if (!in_interrupt()) {
3775 if (current->flags & PF_MEMALLOC)
3776 return ALLOC_NO_WATERMARKS;
3777 else if (oom_reserves_allowed(current))
3778 return ALLOC_OOM;
3779 }
3780
3781 return 0;
3782 }
3783
gfp_pfmemalloc_allowed(gfp_t gfp_mask)3784 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
3785 {
3786 return !!__gfp_pfmemalloc_flags(gfp_mask);
3787 }
3788
3789 /*
3790 * Checks whether it makes sense to retry the reclaim to make a forward progress
3791 * for the given allocation request.
3792 *
3793 * We give up when we either have tried MAX_RECLAIM_RETRIES in a row
3794 * without success, or when we couldn't even meet the watermark if we
3795 * reclaimed all remaining pages on the LRU lists.
3796 *
3797 * Returns true if a retry is viable or false to enter the oom path.
3798 */
3799 static inline bool
should_reclaim_retry(gfp_t gfp_mask,unsigned order,struct alloc_context * ac,int alloc_flags,bool did_some_progress,int * no_progress_loops)3800 should_reclaim_retry(gfp_t gfp_mask, unsigned order,
3801 struct alloc_context *ac, int alloc_flags,
3802 bool did_some_progress, int *no_progress_loops)
3803 {
3804 struct zone *zone;
3805 struct zoneref *z;
3806 bool ret = false;
3807
3808 /*
3809 * Costly allocations might have made a progress but this doesn't mean
3810 * their order will become available due to high fragmentation so
3811 * always increment the no progress counter for them
3812 */
3813 if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER)
3814 *no_progress_loops = 0;
3815 else
3816 (*no_progress_loops)++;
3817
3818 if (*no_progress_loops > MAX_RECLAIM_RETRIES)
3819 goto out;
3820
3821
3822 /*
3823 * Keep reclaiming pages while there is a chance this will lead
3824 * somewhere. If none of the target zones can satisfy our allocation
3825 * request even if all reclaimable pages are considered then we are
3826 * screwed and have to go OOM.
3827 */
3828 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
3829 ac->highest_zoneidx, ac->nodemask) {
3830 unsigned long available;
3831 unsigned long reclaimable;
3832 unsigned long min_wmark = min_wmark_pages(zone);
3833 bool wmark;
3834
3835 available = reclaimable = zone_reclaimable_pages(zone);
3836 available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
3837
3838 /*
3839 * Would the allocation succeed if we reclaimed all
3840 * reclaimable pages?
3841 */
3842 wmark = __zone_watermark_ok(zone, order, min_wmark,
3843 ac->highest_zoneidx, alloc_flags, available);
3844 trace_reclaim_retry_zone(z, order, reclaimable,
3845 available, min_wmark, *no_progress_loops, wmark);
3846 if (wmark) {
3847 ret = true;
3848 break;
3849 }
3850 }
3851
3852 /*
3853 * Memory allocation/reclaim might be called from a WQ context and the
3854 * current implementation of the WQ concurrency control doesn't
3855 * recognize that a particular WQ is congested if the worker thread is
3856 * looping without ever sleeping. Therefore we have to do a short sleep
3857 * here rather than calling cond_resched().
3858 */
3859 if (current->flags & PF_WQ_WORKER)
3860 schedule_timeout_uninterruptible(1);
3861 else
3862 cond_resched();
3863 out:
3864 /* Before OOM, exhaust highatomic_reserve */
3865 if (!ret)
3866 return unreserve_highatomic_pageblock(ac, true);
3867
3868 return ret;
3869 }
3870
3871 static inline bool
check_retry_cpuset(int cpuset_mems_cookie,struct alloc_context * ac)3872 check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac)
3873 {
3874 /*
3875 * It's possible that cpuset's mems_allowed and the nodemask from
3876 * mempolicy don't intersect. This should be normally dealt with by
3877 * policy_nodemask(), but it's possible to race with cpuset update in
3878 * such a way the check therein was true, and then it became false
3879 * before we got our cpuset_mems_cookie here.
3880 * This assumes that for all allocations, ac->nodemask can come only
3881 * from MPOL_BIND mempolicy (whose documented semantics is to be ignored
3882 * when it does not intersect with the cpuset restrictions) or the
3883 * caller can deal with a violated nodemask.
3884 */
3885 if (cpusets_enabled() && ac->nodemask &&
3886 !cpuset_nodemask_valid_mems_allowed(ac->nodemask)) {
3887 ac->nodemask = NULL;
3888 return true;
3889 }
3890
3891 /*
3892 * When updating a task's mems_allowed or mempolicy nodemask, it is
3893 * possible to race with parallel threads in such a way that our
3894 * allocation can fail while the mask is being updated. If we are about
3895 * to fail, check if the cpuset changed during allocation and if so,
3896 * retry.
3897 */
3898 if (read_mems_allowed_retry(cpuset_mems_cookie))
3899 return true;
3900
3901 return false;
3902 }
3903
3904 static inline struct page *
__alloc_pages_slowpath(gfp_t gfp_mask,unsigned int order,struct alloc_context * ac)3905 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
3906 struct alloc_context *ac)
3907 {
3908 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
3909 bool can_compact = gfp_compaction_allowed(gfp_mask);
3910 const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER;
3911 struct page *page = NULL;
3912 unsigned int alloc_flags;
3913 unsigned long did_some_progress;
3914 enum compact_priority compact_priority;
3915 enum compact_result compact_result;
3916 int compaction_retries;
3917 int no_progress_loops;
3918 unsigned int cpuset_mems_cookie;
3919 unsigned int zonelist_iter_cookie;
3920 int reserve_flags;
3921
3922 restart:
3923 compaction_retries = 0;
3924 no_progress_loops = 0;
3925 compact_priority = DEF_COMPACT_PRIORITY;
3926 cpuset_mems_cookie = read_mems_allowed_begin();
3927 zonelist_iter_cookie = zonelist_iter_begin();
3928
3929 /*
3930 * The fast path uses conservative alloc_flags to succeed only until
3931 * kswapd needs to be woken up, and to avoid the cost of setting up
3932 * alloc_flags precisely. So we do that now.
3933 */
3934 alloc_flags = gfp_to_alloc_flags(gfp_mask, order);
3935
3936 /*
3937 * We need to recalculate the starting point for the zonelist iterator
3938 * because we might have used different nodemask in the fast path, or
3939 * there was a cpuset modification and we are retrying - otherwise we
3940 * could end up iterating over non-eligible zones endlessly.
3941 */
3942 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
3943 ac->highest_zoneidx, ac->nodemask);
3944 if (!ac->preferred_zoneref->zone)
3945 goto nopage;
3946
3947 /*
3948 * Check for insane configurations where the cpuset doesn't contain
3949 * any suitable zone to satisfy the request - e.g. non-movable
3950 * GFP_HIGHUSER allocations from MOVABLE nodes only.
3951 */
3952 if (cpusets_insane_config() && (gfp_mask & __GFP_HARDWALL)) {
3953 struct zoneref *z = first_zones_zonelist(ac->zonelist,
3954 ac->highest_zoneidx,
3955 &cpuset_current_mems_allowed);
3956 if (!z->zone)
3957 goto nopage;
3958 }
3959
3960 if (alloc_flags & ALLOC_KSWAPD)
3961 wake_all_kswapds(order, gfp_mask, ac);
3962
3963 /*
3964 * The adjusted alloc_flags might result in immediate success, so try
3965 * that first
3966 */
3967 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
3968 if (page)
3969 goto got_pg;
3970
3971 /*
3972 * For costly allocations, try direct compaction first, as it's likely
3973 * that we have enough base pages and don't need to reclaim. For non-
3974 * movable high-order allocations, do that as well, as compaction will
3975 * try prevent permanent fragmentation by migrating from blocks of the
3976 * same migratetype.
3977 * Don't try this for allocations that are allowed to ignore
3978 * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen.
3979 */
3980 if (can_direct_reclaim && can_compact &&
3981 (costly_order ||
3982 (order > 0 && ac->migratetype != MIGRATE_MOVABLE))
3983 && !gfp_pfmemalloc_allowed(gfp_mask)) {
3984 page = __alloc_pages_direct_compact(gfp_mask, order,
3985 alloc_flags, ac,
3986 INIT_COMPACT_PRIORITY,
3987 &compact_result);
3988 if (page)
3989 goto got_pg;
3990
3991 /*
3992 * Checks for costly allocations with __GFP_NORETRY, which
3993 * includes some THP page fault allocations
3994 */
3995 if (costly_order && (gfp_mask & __GFP_NORETRY)) {
3996 /*
3997 * If allocating entire pageblock(s) and compaction
3998 * failed because all zones are below low watermarks
3999 * or is prohibited because it recently failed at this
4000 * order, fail immediately unless the allocator has
4001 * requested compaction and reclaim retry.
4002 *
4003 * Reclaim is
4004 * - potentially very expensive because zones are far
4005 * below their low watermarks or this is part of very
4006 * bursty high order allocations,
4007 * - not guaranteed to help because isolate_freepages()
4008 * may not iterate over freed pages as part of its
4009 * linear scan, and
4010 * - unlikely to make entire pageblocks free on its
4011 * own.
4012 */
4013 if (compact_result == COMPACT_SKIPPED ||
4014 compact_result == COMPACT_DEFERRED)
4015 goto nopage;
4016
4017 /*
4018 * Looks like reclaim/compaction is worth trying, but
4019 * sync compaction could be very expensive, so keep
4020 * using async compaction.
4021 */
4022 compact_priority = INIT_COMPACT_PRIORITY;
4023 }
4024 }
4025
4026 retry:
4027 /* Ensure kswapd doesn't accidentally go to sleep as long as we loop */
4028 if (alloc_flags & ALLOC_KSWAPD)
4029 wake_all_kswapds(order, gfp_mask, ac);
4030
4031 reserve_flags = __gfp_pfmemalloc_flags(gfp_mask);
4032 if (reserve_flags)
4033 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, reserve_flags) |
4034 (alloc_flags & ALLOC_KSWAPD);
4035
4036 /*
4037 * Reset the nodemask and zonelist iterators if memory policies can be
4038 * ignored. These allocations are high priority and system rather than
4039 * user oriented.
4040 */
4041 if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) {
4042 ac->nodemask = NULL;
4043 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
4044 ac->highest_zoneidx, ac->nodemask);
4045 }
4046
4047 /* Attempt with potentially adjusted zonelist and alloc_flags */
4048 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4049 if (page)
4050 goto got_pg;
4051
4052 /* Caller is not willing to reclaim, we can't balance anything */
4053 if (!can_direct_reclaim)
4054 goto nopage;
4055
4056 /* Avoid recursion of direct reclaim */
4057 if (current->flags & PF_MEMALLOC)
4058 goto nopage;
4059
4060 /* Try direct reclaim and then allocating */
4061 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
4062 &did_some_progress);
4063 if (page)
4064 goto got_pg;
4065
4066 /* Try direct compaction and then allocating */
4067 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
4068 compact_priority, &compact_result);
4069 if (page)
4070 goto got_pg;
4071
4072 /* Do not loop if specifically requested */
4073 if (gfp_mask & __GFP_NORETRY)
4074 goto nopage;
4075
4076 /*
4077 * Do not retry costly high order allocations unless they are
4078 * __GFP_RETRY_MAYFAIL and we can compact
4079 */
4080 if (costly_order && (!can_compact ||
4081 !(gfp_mask & __GFP_RETRY_MAYFAIL)))
4082 goto nopage;
4083
4084 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
4085 did_some_progress > 0, &no_progress_loops))
4086 goto retry;
4087
4088 /*
4089 * It doesn't make any sense to retry for the compaction if the order-0
4090 * reclaim is not able to make any progress because the current
4091 * implementation of the compaction depends on the sufficient amount
4092 * of free memory (see __compaction_suitable)
4093 */
4094 if (did_some_progress > 0 && can_compact &&
4095 should_compact_retry(ac, order, alloc_flags,
4096 compact_result, &compact_priority,
4097 &compaction_retries))
4098 goto retry;
4099
4100
4101 /*
4102 * Deal with possible cpuset update races or zonelist updates to avoid
4103 * a unnecessary OOM kill.
4104 */
4105 if (check_retry_cpuset(cpuset_mems_cookie, ac) ||
4106 check_retry_zonelist(zonelist_iter_cookie))
4107 goto restart;
4108
4109 /* Reclaim has failed us, start killing things */
4110 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
4111 if (page)
4112 goto got_pg;
4113
4114 /* Avoid allocations with no watermarks from looping endlessly */
4115 if (tsk_is_oom_victim(current) &&
4116 (alloc_flags & ALLOC_OOM ||
4117 (gfp_mask & __GFP_NOMEMALLOC)))
4118 goto nopage;
4119
4120 /* Retry as long as the OOM killer is making progress */
4121 if (did_some_progress) {
4122 no_progress_loops = 0;
4123 goto retry;
4124 }
4125
4126 nopage:
4127 /*
4128 * Deal with possible cpuset update races or zonelist updates to avoid
4129 * a unnecessary OOM kill.
4130 */
4131 if (check_retry_cpuset(cpuset_mems_cookie, ac) ||
4132 check_retry_zonelist(zonelist_iter_cookie))
4133 goto restart;
4134
4135 /*
4136 * Make sure that __GFP_NOFAIL request doesn't leak out and make sure
4137 * we always retry
4138 */
4139 if (gfp_mask & __GFP_NOFAIL) {
4140 /*
4141 * All existing users of the __GFP_NOFAIL are blockable, so warn
4142 * of any new users that actually require GFP_NOWAIT
4143 */
4144 if (WARN_ON_ONCE_GFP(!can_direct_reclaim, gfp_mask))
4145 goto fail;
4146
4147 /*
4148 * PF_MEMALLOC request from this context is rather bizarre
4149 * because we cannot reclaim anything and only can loop waiting
4150 * for somebody to do a work for us
4151 */
4152 WARN_ON_ONCE_GFP(current->flags & PF_MEMALLOC, gfp_mask);
4153
4154 /*
4155 * non failing costly orders are a hard requirement which we
4156 * are not prepared for much so let's warn about these users
4157 * so that we can identify them and convert them to something
4158 * else.
4159 */
4160 WARN_ON_ONCE_GFP(costly_order, gfp_mask);
4161
4162 /*
4163 * Help non-failing allocations by giving some access to memory
4164 * reserves normally used for high priority non-blocking
4165 * allocations but do not use ALLOC_NO_WATERMARKS because this
4166 * could deplete whole memory reserves which would just make
4167 * the situation worse.
4168 */
4169 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_MIN_RESERVE, ac);
4170 if (page)
4171 goto got_pg;
4172
4173 cond_resched();
4174 goto retry;
4175 }
4176 fail:
4177 warn_alloc(gfp_mask, ac->nodemask,
4178 "page allocation failure: order:%u", order);
4179 got_pg:
4180 return page;
4181 }
4182
prepare_alloc_pages(gfp_t gfp_mask,unsigned int order,int preferred_nid,nodemask_t * nodemask,struct alloc_context * ac,gfp_t * alloc_gfp,unsigned int * alloc_flags)4183 static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
4184 int preferred_nid, nodemask_t *nodemask,
4185 struct alloc_context *ac, gfp_t *alloc_gfp,
4186 unsigned int *alloc_flags)
4187 {
4188 ac->highest_zoneidx = gfp_zone(gfp_mask);
4189 ac->zonelist = node_zonelist(preferred_nid, gfp_mask);
4190 ac->nodemask = nodemask;
4191 ac->migratetype = gfp_migratetype(gfp_mask);
4192
4193 if (cpusets_enabled()) {
4194 *alloc_gfp |= __GFP_HARDWALL;
4195 /*
4196 * When we are in the interrupt context, it is irrelevant
4197 * to the current task context. It means that any node ok.
4198 */
4199 if (in_task() && !ac->nodemask)
4200 ac->nodemask = &cpuset_current_mems_allowed;
4201 else
4202 *alloc_flags |= ALLOC_CPUSET;
4203 }
4204
4205 might_alloc(gfp_mask);
4206
4207 if (should_fail_alloc_page(gfp_mask, order))
4208 return false;
4209
4210 *alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, *alloc_flags);
4211
4212 /* Dirty zone balancing only done in the fast path */
4213 ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE);
4214
4215 /*
4216 * The preferred zone is used for statistics but crucially it is
4217 * also used as the starting point for the zonelist iterator. It
4218 * may get reset for allocations that ignore memory policies.
4219 */
4220 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
4221 ac->highest_zoneidx, ac->nodemask);
4222
4223 return true;
4224 }
4225
4226 /*
4227 * __alloc_pages_bulk - Allocate a number of order-0 pages to a list or array
4228 * @gfp: GFP flags for the allocation
4229 * @preferred_nid: The preferred NUMA node ID to allocate from
4230 * @nodemask: Set of nodes to allocate from, may be NULL
4231 * @nr_pages: The number of pages desired on the list or array
4232 * @page_list: Optional list to store the allocated pages
4233 * @page_array: Optional array to store the pages
4234 *
4235 * This is a batched version of the page allocator that attempts to
4236 * allocate nr_pages quickly. Pages are added to page_list if page_list
4237 * is not NULL, otherwise it is assumed that the page_array is valid.
4238 *
4239 * For lists, nr_pages is the number of pages that should be allocated.
4240 *
4241 * For arrays, only NULL elements are populated with pages and nr_pages
4242 * is the maximum number of pages that will be stored in the array.
4243 *
4244 * Returns the number of pages on the list or array.
4245 */
__alloc_pages_bulk(gfp_t gfp,int preferred_nid,nodemask_t * nodemask,int nr_pages,struct list_head * page_list,struct page ** page_array)4246 unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
4247 nodemask_t *nodemask, int nr_pages,
4248 struct list_head *page_list,
4249 struct page **page_array)
4250 {
4251 struct page *page;
4252 unsigned long __maybe_unused UP_flags;
4253 struct zone *zone;
4254 struct zoneref *z;
4255 struct per_cpu_pages *pcp;
4256 struct list_head *pcp_list;
4257 struct alloc_context ac;
4258 gfp_t alloc_gfp;
4259 unsigned int alloc_flags = ALLOC_WMARK_LOW;
4260 int nr_populated = 0, nr_account = 0;
4261
4262 /*
4263 * Skip populated array elements to determine if any pages need
4264 * to be allocated before disabling IRQs.
4265 */
4266 while (page_array && nr_populated < nr_pages && page_array[nr_populated])
4267 nr_populated++;
4268
4269 /* No pages requested? */
4270 if (unlikely(nr_pages <= 0))
4271 goto out;
4272
4273 /* Already populated array? */
4274 if (unlikely(page_array && nr_pages - nr_populated == 0))
4275 goto out;
4276
4277 /* Bulk allocator does not support memcg accounting. */
4278 if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT))
4279 goto failed;
4280
4281 /* Use the single page allocator for one page. */
4282 if (nr_pages - nr_populated == 1)
4283 goto failed;
4284
4285 #ifdef CONFIG_PAGE_OWNER
4286 /*
4287 * PAGE_OWNER may recurse into the allocator to allocate space to
4288 * save the stack with pagesets.lock held. Releasing/reacquiring
4289 * removes much of the performance benefit of bulk allocation so
4290 * force the caller to allocate one page at a time as it'll have
4291 * similar performance to added complexity to the bulk allocator.
4292 */
4293 if (static_branch_unlikely(&page_owner_inited))
4294 goto failed;
4295 #endif
4296
4297 /* May set ALLOC_NOFRAGMENT, fragmentation will return 1 page. */
4298 gfp &= gfp_allowed_mask;
4299 alloc_gfp = gfp;
4300 if (!prepare_alloc_pages(gfp, 0, preferred_nid, nodemask, &ac, &alloc_gfp, &alloc_flags))
4301 goto out;
4302 gfp = alloc_gfp;
4303
4304 /* Find an allowed local zone that meets the low watermark. */
4305 for_each_zone_zonelist_nodemask(zone, z, ac.zonelist, ac.highest_zoneidx, ac.nodemask) {
4306 unsigned long mark;
4307
4308 if (cpusets_enabled() && (alloc_flags & ALLOC_CPUSET) &&
4309 !__cpuset_zone_allowed(zone, gfp)) {
4310 continue;
4311 }
4312
4313 if (nr_online_nodes > 1 && zone != ac.preferred_zoneref->zone &&
4314 zone_to_nid(zone) != zone_to_nid(ac.preferred_zoneref->zone)) {
4315 goto failed;
4316 }
4317
4318 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK) + nr_pages;
4319 if (zone_watermark_fast(zone, 0, mark,
4320 zonelist_zone_idx(ac.preferred_zoneref),
4321 alloc_flags, gfp)) {
4322 break;
4323 }
4324 }
4325
4326 /*
4327 * If there are no allowed local zones that meets the watermarks then
4328 * try to allocate a single page and reclaim if necessary.
4329 */
4330 if (unlikely(!zone))
4331 goto failed;
4332
4333 /* spin_trylock may fail due to a parallel drain or IRQ reentrancy. */
4334 pcp_trylock_prepare(UP_flags);
4335 pcp = pcp_spin_trylock(zone->per_cpu_pageset);
4336 if (!pcp)
4337 goto failed_irq;
4338
4339 /* Attempt the batch allocation */
4340 pcp_list = &pcp->lists[order_to_pindex(ac.migratetype, 0)];
4341 while (nr_populated < nr_pages) {
4342
4343 /* Skip existing pages */
4344 if (page_array && page_array[nr_populated]) {
4345 nr_populated++;
4346 continue;
4347 }
4348
4349 page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags,
4350 pcp, pcp_list);
4351 if (unlikely(!page)) {
4352 /* Try and allocate at least one page */
4353 if (!nr_account) {
4354 pcp_spin_unlock(pcp);
4355 goto failed_irq;
4356 }
4357 break;
4358 }
4359 nr_account++;
4360
4361 prep_new_page(page, 0, gfp, 0);
4362 if (page_list)
4363 list_add(&page->lru, page_list);
4364 else
4365 page_array[nr_populated] = page;
4366 nr_populated++;
4367 }
4368
4369 pcp_spin_unlock(pcp);
4370 pcp_trylock_finish(UP_flags);
4371
4372 __count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account);
4373 zone_statistics(ac.preferred_zoneref->zone, zone, nr_account);
4374
4375 out:
4376 return nr_populated;
4377
4378 failed_irq:
4379 pcp_trylock_finish(UP_flags);
4380
4381 failed:
4382 page = __alloc_pages(gfp, 0, preferred_nid, nodemask);
4383 if (page) {
4384 if (page_list)
4385 list_add(&page->lru, page_list);
4386 else
4387 page_array[nr_populated] = page;
4388 nr_populated++;
4389 }
4390
4391 goto out;
4392 }
4393 EXPORT_SYMBOL_GPL(__alloc_pages_bulk);
4394
4395 /*
4396 * This is the 'heart' of the zoned buddy allocator.
4397 */
__alloc_pages(gfp_t gfp,unsigned int order,int preferred_nid,nodemask_t * nodemask)4398 struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
4399 nodemask_t *nodemask)
4400 {
4401 struct page *page;
4402 unsigned int alloc_flags = ALLOC_WMARK_LOW;
4403 gfp_t alloc_gfp; /* The gfp_t that was actually used for allocation */
4404 struct alloc_context ac = { };
4405
4406 /*
4407 * There are several places where we assume that the order value is sane
4408 * so bail out early if the request is out of bound.
4409 */
4410 if (WARN_ON_ONCE_GFP(order > MAX_ORDER, gfp))
4411 return NULL;
4412
4413 gfp &= gfp_allowed_mask;
4414 /*
4415 * Apply scoped allocation constraints. This is mainly about GFP_NOFS
4416 * resp. GFP_NOIO which has to be inherited for all allocation requests
4417 * from a particular context which has been marked by
4418 * memalloc_no{fs,io}_{save,restore}. And PF_MEMALLOC_PIN which ensures
4419 * movable zones are not used during allocation.
4420 */
4421 gfp = current_gfp_context(gfp);
4422 alloc_gfp = gfp;
4423 if (!prepare_alloc_pages(gfp, order, preferred_nid, nodemask, &ac,
4424 &alloc_gfp, &alloc_flags))
4425 return NULL;
4426
4427 /*
4428 * Forbid the first pass from falling back to types that fragment
4429 * memory until all local zones are considered.
4430 */
4431 alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp);
4432
4433 /* First allocation attempt */
4434 page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac);
4435 if (likely(page))
4436 goto out;
4437
4438 alloc_gfp = gfp;
4439 ac.spread_dirty_pages = false;
4440
4441 /*
4442 * Restore the original nodemask if it was potentially replaced with
4443 * &cpuset_current_mems_allowed to optimize the fast-path attempt.
4444 */
4445 ac.nodemask = nodemask;
4446
4447 page = __alloc_pages_slowpath(alloc_gfp, order, &ac);
4448
4449 out:
4450 if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT) && page &&
4451 unlikely(__memcg_kmem_charge_page(page, gfp, order) != 0)) {
4452 __free_pages(page, order);
4453 page = NULL;
4454 }
4455
4456 trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype);
4457 kmsan_alloc_page(page, order, alloc_gfp);
4458
4459 return page;
4460 }
4461 EXPORT_SYMBOL(__alloc_pages);
4462
__folio_alloc(gfp_t gfp,unsigned int order,int preferred_nid,nodemask_t * nodemask)4463 struct folio *__folio_alloc(gfp_t gfp, unsigned int order, int preferred_nid,
4464 nodemask_t *nodemask)
4465 {
4466 struct page *page = __alloc_pages(gfp | __GFP_COMP, order,
4467 preferred_nid, nodemask);
4468 struct folio *folio = (struct folio *)page;
4469
4470 if (folio && order > 1)
4471 folio_prep_large_rmappable(folio);
4472 return folio;
4473 }
4474 EXPORT_SYMBOL(__folio_alloc);
4475
4476 /*
4477 * Common helper functions. Never use with __GFP_HIGHMEM because the returned
4478 * address cannot represent highmem pages. Use alloc_pages and then kmap if
4479 * you need to access high mem.
4480 */
__get_free_pages(gfp_t gfp_mask,unsigned int order)4481 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
4482 {
4483 struct page *page;
4484
4485 page = alloc_pages(gfp_mask & ~__GFP_HIGHMEM, order);
4486 if (!page)
4487 return 0;
4488 return (unsigned long) page_address(page);
4489 }
4490 EXPORT_SYMBOL(__get_free_pages);
4491
get_zeroed_page(gfp_t gfp_mask)4492 unsigned long get_zeroed_page(gfp_t gfp_mask)
4493 {
4494 return __get_free_page(gfp_mask | __GFP_ZERO);
4495 }
4496 EXPORT_SYMBOL(get_zeroed_page);
4497
4498 /**
4499 * __free_pages - Free pages allocated with alloc_pages().
4500 * @page: The page pointer returned from alloc_pages().
4501 * @order: The order of the allocation.
4502 *
4503 * This function can free multi-page allocations that are not compound
4504 * pages. It does not check that the @order passed in matches that of
4505 * the allocation, so it is easy to leak memory. Freeing more memory
4506 * than was allocated will probably emit a warning.
4507 *
4508 * If the last reference to this page is speculative, it will be released
4509 * by put_page() which only frees the first page of a non-compound
4510 * allocation. To prevent the remaining pages from being leaked, we free
4511 * the subsequent pages here. If you want to use the page's reference
4512 * count to decide when to free the allocation, you should allocate a
4513 * compound page, and use put_page() instead of __free_pages().
4514 *
4515 * Context: May be called in interrupt context or while holding a normal
4516 * spinlock, but not in NMI context or while holding a raw spinlock.
4517 */
__free_pages(struct page * page,unsigned int order)4518 void __free_pages(struct page *page, unsigned int order)
4519 {
4520 /* get PageHead before we drop reference */
4521 int head = PageHead(page);
4522
4523 if (put_page_testzero(page))
4524 free_the_page(page, order);
4525 else if (!head)
4526 while (order-- > 0)
4527 free_the_page(page + (1 << order), order);
4528 }
4529 EXPORT_SYMBOL(__free_pages);
4530
free_pages(unsigned long addr,unsigned int order)4531 void free_pages(unsigned long addr, unsigned int order)
4532 {
4533 if (addr != 0) {
4534 VM_BUG_ON(!virt_addr_valid((void *)addr));
4535 __free_pages(virt_to_page((void *)addr), order);
4536 }
4537 }
4538
4539 EXPORT_SYMBOL(free_pages);
4540
4541 /*
4542 * Page Fragment:
4543 * An arbitrary-length arbitrary-offset area of memory which resides
4544 * within a 0 or higher order page. Multiple fragments within that page
4545 * are individually refcounted, in the page's reference counter.
4546 *
4547 * The page_frag functions below provide a simple allocation framework for
4548 * page fragments. This is used by the network stack and network device
4549 * drivers to provide a backing region of memory for use as either an
4550 * sk_buff->head, or to be used in the "frags" portion of skb_shared_info.
4551 */
__page_frag_cache_refill(struct page_frag_cache * nc,gfp_t gfp_mask)4552 static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
4553 gfp_t gfp_mask)
4554 {
4555 struct page *page = NULL;
4556 gfp_t gfp = gfp_mask;
4557
4558 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
4559 gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
4560 __GFP_NOMEMALLOC;
4561 page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
4562 PAGE_FRAG_CACHE_MAX_ORDER);
4563 nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE;
4564 #endif
4565 if (unlikely(!page))
4566 page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
4567
4568 nc->va = page ? page_address(page) : NULL;
4569
4570 return page;
4571 }
4572
__page_frag_cache_drain(struct page * page,unsigned int count)4573 void __page_frag_cache_drain(struct page *page, unsigned int count)
4574 {
4575 VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
4576
4577 if (page_ref_sub_and_test(page, count))
4578 free_the_page(page, compound_order(page));
4579 }
4580 EXPORT_SYMBOL(__page_frag_cache_drain);
4581
page_frag_alloc_align(struct page_frag_cache * nc,unsigned int fragsz,gfp_t gfp_mask,unsigned int align_mask)4582 void *page_frag_alloc_align(struct page_frag_cache *nc,
4583 unsigned int fragsz, gfp_t gfp_mask,
4584 unsigned int align_mask)
4585 {
4586 unsigned int size = PAGE_SIZE;
4587 struct page *page;
4588 int offset;
4589
4590 if (unlikely(!nc->va)) {
4591 refill:
4592 page = __page_frag_cache_refill(nc, gfp_mask);
4593 if (!page)
4594 return NULL;
4595
4596 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
4597 /* if size can vary use size else just use PAGE_SIZE */
4598 size = nc->size;
4599 #endif
4600 /* Even if we own the page, we do not use atomic_set().
4601 * This would break get_page_unless_zero() users.
4602 */
4603 page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE);
4604
4605 /* reset page count bias and offset to start of new frag */
4606 nc->pfmemalloc = page_is_pfmemalloc(page);
4607 nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
4608 nc->offset = size;
4609 }
4610
4611 offset = nc->offset - fragsz;
4612 if (unlikely(offset < 0)) {
4613 page = virt_to_page(nc->va);
4614
4615 if (!page_ref_sub_and_test(page, nc->pagecnt_bias))
4616 goto refill;
4617
4618 if (unlikely(nc->pfmemalloc)) {
4619 free_the_page(page, compound_order(page));
4620 goto refill;
4621 }
4622
4623 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
4624 /* if size can vary use size else just use PAGE_SIZE */
4625 size = nc->size;
4626 #endif
4627 /* OK, page count is 0, we can safely set it */
4628 set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1);
4629
4630 /* reset page count bias and offset to start of new frag */
4631 nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
4632 offset = size - fragsz;
4633 if (unlikely(offset < 0)) {
4634 /*
4635 * The caller is trying to allocate a fragment
4636 * with fragsz > PAGE_SIZE but the cache isn't big
4637 * enough to satisfy the request, this may
4638 * happen in low memory conditions.
4639 * We don't release the cache page because
4640 * it could make memory pressure worse
4641 * so we simply return NULL here.
4642 */
4643 return NULL;
4644 }
4645 }
4646
4647 nc->pagecnt_bias--;
4648 offset &= align_mask;
4649 nc->offset = offset;
4650
4651 return nc->va + offset;
4652 }
4653 EXPORT_SYMBOL(page_frag_alloc_align);
4654
4655 /*
4656 * Frees a page fragment allocated out of either a compound or order 0 page.
4657 */
page_frag_free(void * addr)4658 void page_frag_free(void *addr)
4659 {
4660 struct page *page = virt_to_head_page(addr);
4661
4662 if (unlikely(put_page_testzero(page)))
4663 free_the_page(page, compound_order(page));
4664 }
4665 EXPORT_SYMBOL(page_frag_free);
4666
make_alloc_exact(unsigned long addr,unsigned int order,size_t size)4667 static void *make_alloc_exact(unsigned long addr, unsigned int order,
4668 size_t size)
4669 {
4670 if (addr) {
4671 unsigned long nr = DIV_ROUND_UP(size, PAGE_SIZE);
4672 struct page *page = virt_to_page((void *)addr);
4673 struct page *last = page + nr;
4674
4675 split_page_owner(page, 1 << order);
4676 split_page_memcg(page, 1 << order);
4677 while (page < --last)
4678 set_page_refcounted(last);
4679
4680 last = page + (1UL << order);
4681 for (page += nr; page < last; page++)
4682 __free_pages_ok(page, 0, FPI_TO_TAIL);
4683 }
4684 return (void *)addr;
4685 }
4686
4687 /**
4688 * alloc_pages_exact - allocate an exact number physically-contiguous pages.
4689 * @size: the number of bytes to allocate
4690 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP
4691 *
4692 * This function is similar to alloc_pages(), except that it allocates the
4693 * minimum number of pages to satisfy the request. alloc_pages() can only
4694 * allocate memory in power-of-two pages.
4695 *
4696 * This function is also limited by MAX_ORDER.
4697 *
4698 * Memory allocated by this function must be released by free_pages_exact().
4699 *
4700 * Return: pointer to the allocated area or %NULL in case of error.
4701 */
alloc_pages_exact(size_t size,gfp_t gfp_mask)4702 void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
4703 {
4704 unsigned int order = get_order(size);
4705 unsigned long addr;
4706
4707 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM)))
4708 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM);
4709
4710 addr = __get_free_pages(gfp_mask, order);
4711 return make_alloc_exact(addr, order, size);
4712 }
4713 EXPORT_SYMBOL(alloc_pages_exact);
4714
4715 /**
4716 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
4717 * pages on a node.
4718 * @nid: the preferred node ID where memory should be allocated
4719 * @size: the number of bytes to allocate
4720 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP
4721 *
4722 * Like alloc_pages_exact(), but try to allocate on node nid first before falling
4723 * back.
4724 *
4725 * Return: pointer to the allocated area or %NULL in case of error.
4726 */
alloc_pages_exact_nid(int nid,size_t size,gfp_t gfp_mask)4727 void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
4728 {
4729 unsigned int order = get_order(size);
4730 struct page *p;
4731
4732 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM)))
4733 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM);
4734
4735 p = alloc_pages_node(nid, gfp_mask, order);
4736 if (!p)
4737 return NULL;
4738 return make_alloc_exact((unsigned long)page_address(p), order, size);
4739 }
4740
4741 /**
4742 * free_pages_exact - release memory allocated via alloc_pages_exact()
4743 * @virt: the value returned by alloc_pages_exact.
4744 * @size: size of allocation, same value as passed to alloc_pages_exact().
4745 *
4746 * Release the memory allocated by a previous call to alloc_pages_exact.
4747 */
free_pages_exact(void * virt,size_t size)4748 void free_pages_exact(void *virt, size_t size)
4749 {
4750 unsigned long addr = (unsigned long)virt;
4751 unsigned long end = addr + PAGE_ALIGN(size);
4752
4753 while (addr < end) {
4754 free_page(addr);
4755 addr += PAGE_SIZE;
4756 }
4757 }
4758 EXPORT_SYMBOL(free_pages_exact);
4759
4760 /**
4761 * nr_free_zone_pages - count number of pages beyond high watermark
4762 * @offset: The zone index of the highest zone
4763 *
4764 * nr_free_zone_pages() counts the number of pages which are beyond the
4765 * high watermark within all zones at or below a given zone index. For each
4766 * zone, the number of pages is calculated as:
4767 *
4768 * nr_free_zone_pages = managed_pages - high_pages
4769 *
4770 * Return: number of pages beyond high watermark.
4771 */
nr_free_zone_pages(int offset)4772 static unsigned long nr_free_zone_pages(int offset)
4773 {
4774 struct zoneref *z;
4775 struct zone *zone;
4776
4777 /* Just pick one node, since fallback list is circular */
4778 unsigned long sum = 0;
4779
4780 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
4781
4782 for_each_zone_zonelist(zone, z, zonelist, offset) {
4783 unsigned long size = zone_managed_pages(zone);
4784 unsigned long high = high_wmark_pages(zone);
4785 if (size > high)
4786 sum += size - high;
4787 }
4788
4789 return sum;
4790 }
4791
4792 /**
4793 * nr_free_buffer_pages - count number of pages beyond high watermark
4794 *
4795 * nr_free_buffer_pages() counts the number of pages which are beyond the high
4796 * watermark within ZONE_DMA and ZONE_NORMAL.
4797 *
4798 * Return: number of pages beyond high watermark within ZONE_DMA and
4799 * ZONE_NORMAL.
4800 */
nr_free_buffer_pages(void)4801 unsigned long nr_free_buffer_pages(void)
4802 {
4803 return nr_free_zone_pages(gfp_zone(GFP_USER));
4804 }
4805 EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
4806
zoneref_set_zone(struct zone * zone,struct zoneref * zoneref)4807 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
4808 {
4809 zoneref->zone = zone;
4810 zoneref->zone_idx = zone_idx(zone);
4811 }
4812
4813 /*
4814 * Builds allocation fallback zone lists.
4815 *
4816 * Add all populated zones of a node to the zonelist.
4817 */
build_zonerefs_node(pg_data_t * pgdat,struct zoneref * zonerefs)4818 static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs)
4819 {
4820 struct zone *zone;
4821 enum zone_type zone_type = MAX_NR_ZONES;
4822 int nr_zones = 0;
4823
4824 do {
4825 zone_type--;
4826 zone = pgdat->node_zones + zone_type;
4827 if (populated_zone(zone)) {
4828 zoneref_set_zone(zone, &zonerefs[nr_zones++]);
4829 check_highest_zone(zone_type);
4830 }
4831 } while (zone_type);
4832
4833 return nr_zones;
4834 }
4835
4836 #ifdef CONFIG_NUMA
4837
__parse_numa_zonelist_order(char * s)4838 static int __parse_numa_zonelist_order(char *s)
4839 {
4840 /*
4841 * We used to support different zonelists modes but they turned
4842 * out to be just not useful. Let's keep the warning in place
4843 * if somebody still use the cmd line parameter so that we do
4844 * not fail it silently
4845 */
4846 if (!(*s == 'd' || *s == 'D' || *s == 'n' || *s == 'N')) {
4847 pr_warn("Ignoring unsupported numa_zonelist_order value: %s\n", s);
4848 return -EINVAL;
4849 }
4850 return 0;
4851 }
4852
4853 static char numa_zonelist_order[] = "Node";
4854 #define NUMA_ZONELIST_ORDER_LEN 16
4855 /*
4856 * sysctl handler for numa_zonelist_order
4857 */
numa_zonelist_order_handler(struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)4858 static int numa_zonelist_order_handler(struct ctl_table *table, int write,
4859 void *buffer, size_t *length, loff_t *ppos)
4860 {
4861 if (write)
4862 return __parse_numa_zonelist_order(buffer);
4863 return proc_dostring(table, write, buffer, length, ppos);
4864 }
4865
4866 static int node_load[MAX_NUMNODES];
4867
4868 /**
4869 * find_next_best_node - find the next node that should appear in a given node's fallback list
4870 * @node: node whose fallback list we're appending
4871 * @used_node_mask: nodemask_t of already used nodes
4872 *
4873 * We use a number of factors to determine which is the next node that should
4874 * appear on a given node's fallback list. The node should not have appeared
4875 * already in @node's fallback list, and it should be the next closest node
4876 * according to the distance array (which contains arbitrary distance values
4877 * from each node to each node in the system), and should also prefer nodes
4878 * with no CPUs, since presumably they'll have very little allocation pressure
4879 * on them otherwise.
4880 *
4881 * Return: node id of the found node or %NUMA_NO_NODE if no node is found.
4882 */
find_next_best_node(int node,nodemask_t * used_node_mask)4883 int find_next_best_node(int node, nodemask_t *used_node_mask)
4884 {
4885 int n, val;
4886 int min_val = INT_MAX;
4887 int best_node = NUMA_NO_NODE;
4888
4889 /* Use the local node if we haven't already */
4890 if (!node_isset(node, *used_node_mask)) {
4891 node_set(node, *used_node_mask);
4892 return node;
4893 }
4894
4895 for_each_node_state(n, N_MEMORY) {
4896
4897 /* Don't want a node to appear more than once */
4898 if (node_isset(n, *used_node_mask))
4899 continue;
4900
4901 /* Use the distance array to find the distance */
4902 val = node_distance(node, n);
4903
4904 /* Penalize nodes under us ("prefer the next node") */
4905 val += (n < node);
4906
4907 /* Give preference to headless and unused nodes */
4908 if (!cpumask_empty(cpumask_of_node(n)))
4909 val += PENALTY_FOR_NODE_WITH_CPUS;
4910
4911 /* Slight preference for less loaded node */
4912 val *= MAX_NUMNODES;
4913 val += node_load[n];
4914
4915 if (val < min_val) {
4916 min_val = val;
4917 best_node = n;
4918 }
4919 }
4920
4921 if (best_node >= 0)
4922 node_set(best_node, *used_node_mask);
4923
4924 return best_node;
4925 }
4926
4927
4928 /*
4929 * Build zonelists ordered by node and zones within node.
4930 * This results in maximum locality--normal zone overflows into local
4931 * DMA zone, if any--but risks exhausting DMA zone.
4932 */
build_zonelists_in_node_order(pg_data_t * pgdat,int * node_order,unsigned nr_nodes)4933 static void build_zonelists_in_node_order(pg_data_t *pgdat, int *node_order,
4934 unsigned nr_nodes)
4935 {
4936 struct zoneref *zonerefs;
4937 int i;
4938
4939 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
4940
4941 for (i = 0; i < nr_nodes; i++) {
4942 int nr_zones;
4943
4944 pg_data_t *node = NODE_DATA(node_order[i]);
4945
4946 nr_zones = build_zonerefs_node(node, zonerefs);
4947 zonerefs += nr_zones;
4948 }
4949 zonerefs->zone = NULL;
4950 zonerefs->zone_idx = 0;
4951 }
4952
4953 /*
4954 * Build gfp_thisnode zonelists
4955 */
build_thisnode_zonelists(pg_data_t * pgdat)4956 static void build_thisnode_zonelists(pg_data_t *pgdat)
4957 {
4958 struct zoneref *zonerefs;
4959 int nr_zones;
4960
4961 zonerefs = pgdat->node_zonelists[ZONELIST_NOFALLBACK]._zonerefs;
4962 nr_zones = build_zonerefs_node(pgdat, zonerefs);
4963 zonerefs += nr_zones;
4964 zonerefs->zone = NULL;
4965 zonerefs->zone_idx = 0;
4966 }
4967
4968 /*
4969 * Build zonelists ordered by zone and nodes within zones.
4970 * This results in conserving DMA zone[s] until all Normal memory is
4971 * exhausted, but results in overflowing to remote node while memory
4972 * may still exist in local DMA zone.
4973 */
4974
build_zonelists(pg_data_t * pgdat)4975 static void build_zonelists(pg_data_t *pgdat)
4976 {
4977 static int node_order[MAX_NUMNODES];
4978 int node, nr_nodes = 0;
4979 nodemask_t used_mask = NODE_MASK_NONE;
4980 int local_node, prev_node;
4981
4982 /* NUMA-aware ordering of nodes */
4983 local_node = pgdat->node_id;
4984 prev_node = local_node;
4985
4986 memset(node_order, 0, sizeof(node_order));
4987 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
4988 /*
4989 * We don't want to pressure a particular node.
4990 * So adding penalty to the first node in same
4991 * distance group to make it round-robin.
4992 */
4993 if (node_distance(local_node, node) !=
4994 node_distance(local_node, prev_node))
4995 node_load[node] += 1;
4996
4997 node_order[nr_nodes++] = node;
4998 prev_node = node;
4999 }
5000
5001 build_zonelists_in_node_order(pgdat, node_order, nr_nodes);
5002 build_thisnode_zonelists(pgdat);
5003 pr_info("Fallback order for Node %d: ", local_node);
5004 for (node = 0; node < nr_nodes; node++)
5005 pr_cont("%d ", node_order[node]);
5006 pr_cont("\n");
5007 }
5008
5009 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
5010 /*
5011 * Return node id of node used for "local" allocations.
5012 * I.e., first node id of first zone in arg node's generic zonelist.
5013 * Used for initializing percpu 'numa_mem', which is used primarily
5014 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
5015 */
local_memory_node(int node)5016 int local_memory_node(int node)
5017 {
5018 struct zoneref *z;
5019
5020 z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
5021 gfp_zone(GFP_KERNEL),
5022 NULL);
5023 return zone_to_nid(z->zone);
5024 }
5025 #endif
5026
5027 static void setup_min_unmapped_ratio(void);
5028 static void setup_min_slab_ratio(void);
5029 #else /* CONFIG_NUMA */
5030
build_zonelists(pg_data_t * pgdat)5031 static void build_zonelists(pg_data_t *pgdat)
5032 {
5033 int node, local_node;
5034 struct zoneref *zonerefs;
5035 int nr_zones;
5036
5037 local_node = pgdat->node_id;
5038
5039 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
5040 nr_zones = build_zonerefs_node(pgdat, zonerefs);
5041 zonerefs += nr_zones;
5042
5043 /*
5044 * Now we build the zonelist so that it contains the zones
5045 * of all the other nodes.
5046 * We don't want to pressure a particular node, so when
5047 * building the zones for node N, we make sure that the
5048 * zones coming right after the local ones are those from
5049 * node N+1 (modulo N)
5050 */
5051 for (node = local_node + 1; node < MAX_NUMNODES; node++) {
5052 if (!node_online(node))
5053 continue;
5054 nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs);
5055 zonerefs += nr_zones;
5056 }
5057 for (node = 0; node < local_node; node++) {
5058 if (!node_online(node))
5059 continue;
5060 nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs);
5061 zonerefs += nr_zones;
5062 }
5063
5064 zonerefs->zone = NULL;
5065 zonerefs->zone_idx = 0;
5066 }
5067
5068 #endif /* CONFIG_NUMA */
5069
5070 /*
5071 * Boot pageset table. One per cpu which is going to be used for all
5072 * zones and all nodes. The parameters will be set in such a way
5073 * that an item put on a list will immediately be handed over to
5074 * the buddy list. This is safe since pageset manipulation is done
5075 * with interrupts disabled.
5076 *
5077 * The boot_pagesets must be kept even after bootup is complete for
5078 * unused processors and/or zones. They do play a role for bootstrapping
5079 * hotplugged processors.
5080 *
5081 * zoneinfo_show() and maybe other functions do
5082 * not check if the processor is online before following the pageset pointer.
5083 * Other parts of the kernel may not check if the zone is available.
5084 */
5085 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats);
5086 /* These effectively disable the pcplists in the boot pageset completely */
5087 #define BOOT_PAGESET_HIGH 0
5088 #define BOOT_PAGESET_BATCH 1
5089 static DEFINE_PER_CPU(struct per_cpu_pages, boot_pageset);
5090 static DEFINE_PER_CPU(struct per_cpu_zonestat, boot_zonestats);
5091
__build_all_zonelists(void * data)5092 static void __build_all_zonelists(void *data)
5093 {
5094 int nid;
5095 int __maybe_unused cpu;
5096 pg_data_t *self = data;
5097 unsigned long flags;
5098
5099 /*
5100 * The zonelist_update_seq must be acquired with irqsave because the
5101 * reader can be invoked from IRQ with GFP_ATOMIC.
5102 */
5103 write_seqlock_irqsave(&zonelist_update_seq, flags);
5104 /*
5105 * Also disable synchronous printk() to prevent any printk() from
5106 * trying to hold port->lock, for
5107 * tty_insert_flip_string_and_push_buffer() on other CPU might be
5108 * calling kmalloc(GFP_ATOMIC | __GFP_NOWARN) with port->lock held.
5109 */
5110 printk_deferred_enter();
5111
5112 #ifdef CONFIG_NUMA
5113 memset(node_load, 0, sizeof(node_load));
5114 #endif
5115
5116 /*
5117 * This node is hotadded and no memory is yet present. So just
5118 * building zonelists is fine - no need to touch other nodes.
5119 */
5120 if (self && !node_online(self->node_id)) {
5121 build_zonelists(self);
5122 } else {
5123 /*
5124 * All possible nodes have pgdat preallocated
5125 * in free_area_init
5126 */
5127 for_each_node(nid) {
5128 pg_data_t *pgdat = NODE_DATA(nid);
5129
5130 build_zonelists(pgdat);
5131 }
5132
5133 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
5134 /*
5135 * We now know the "local memory node" for each node--
5136 * i.e., the node of the first zone in the generic zonelist.
5137 * Set up numa_mem percpu variable for on-line cpus. During
5138 * boot, only the boot cpu should be on-line; we'll init the
5139 * secondary cpus' numa_mem as they come on-line. During
5140 * node/memory hotplug, we'll fixup all on-line cpus.
5141 */
5142 for_each_online_cpu(cpu)
5143 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
5144 #endif
5145 }
5146
5147 printk_deferred_exit();
5148 write_sequnlock_irqrestore(&zonelist_update_seq, flags);
5149 }
5150
5151 static noinline void __init
build_all_zonelists_init(void)5152 build_all_zonelists_init(void)
5153 {
5154 int cpu;
5155
5156 __build_all_zonelists(NULL);
5157
5158 /*
5159 * Initialize the boot_pagesets that are going to be used
5160 * for bootstrapping processors. The real pagesets for
5161 * each zone will be allocated later when the per cpu
5162 * allocator is available.
5163 *
5164 * boot_pagesets are used also for bootstrapping offline
5165 * cpus if the system is already booted because the pagesets
5166 * are needed to initialize allocators on a specific cpu too.
5167 * F.e. the percpu allocator needs the page allocator which
5168 * needs the percpu allocator in order to allocate its pagesets
5169 * (a chicken-egg dilemma).
5170 */
5171 for_each_possible_cpu(cpu)
5172 per_cpu_pages_init(&per_cpu(boot_pageset, cpu), &per_cpu(boot_zonestats, cpu));
5173
5174 mminit_verify_zonelist();
5175 cpuset_init_current_mems_allowed();
5176 }
5177
5178 /*
5179 * unless system_state == SYSTEM_BOOTING.
5180 *
5181 * __ref due to call of __init annotated helper build_all_zonelists_init
5182 * [protected by SYSTEM_BOOTING].
5183 */
build_all_zonelists(pg_data_t * pgdat)5184 void __ref build_all_zonelists(pg_data_t *pgdat)
5185 {
5186 unsigned long vm_total_pages;
5187
5188 if (system_state == SYSTEM_BOOTING) {
5189 build_all_zonelists_init();
5190 } else {
5191 __build_all_zonelists(pgdat);
5192 /* cpuset refresh routine should be here */
5193 }
5194 /* Get the number of free pages beyond high watermark in all zones. */
5195 vm_total_pages = nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
5196 /*
5197 * Disable grouping by mobility if the number of pages in the
5198 * system is too low to allow the mechanism to work. It would be
5199 * more accurate, but expensive to check per-zone. This check is
5200 * made on memory-hotadd so a system can start with mobility
5201 * disabled and enable it later
5202 */
5203 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
5204 page_group_by_mobility_disabled = 1;
5205 else
5206 page_group_by_mobility_disabled = 0;
5207
5208 pr_info("Built %u zonelists, mobility grouping %s. Total pages: %ld\n",
5209 nr_online_nodes,
5210 page_group_by_mobility_disabled ? "off" : "on",
5211 vm_total_pages);
5212 #ifdef CONFIG_NUMA
5213 pr_info("Policy zone: %s\n", zone_names[policy_zone]);
5214 #endif
5215 }
5216
zone_batchsize(struct zone * zone)5217 static int zone_batchsize(struct zone *zone)
5218 {
5219 #ifdef CONFIG_MMU
5220 int batch;
5221
5222 /*
5223 * The number of pages to batch allocate is either ~0.1%
5224 * of the zone or 1MB, whichever is smaller. The batch
5225 * size is striking a balance between allocation latency
5226 * and zone lock contention.
5227 */
5228 batch = min(zone_managed_pages(zone) >> 10, SZ_1M / PAGE_SIZE);
5229 batch /= 4; /* We effectively *= 4 below */
5230 if (batch < 1)
5231 batch = 1;
5232
5233 /*
5234 * Clamp the batch to a 2^n - 1 value. Having a power
5235 * of 2 value was found to be more likely to have
5236 * suboptimal cache aliasing properties in some cases.
5237 *
5238 * For example if 2 tasks are alternately allocating
5239 * batches of pages, one task can end up with a lot
5240 * of pages of one half of the possible page colors
5241 * and the other with pages of the other colors.
5242 */
5243 batch = rounddown_pow_of_two(batch + batch/2) - 1;
5244
5245 return batch;
5246
5247 #else
5248 /* The deferral and batching of frees should be suppressed under NOMMU
5249 * conditions.
5250 *
5251 * The problem is that NOMMU needs to be able to allocate large chunks
5252 * of contiguous memory as there's no hardware page translation to
5253 * assemble apparent contiguous memory from discontiguous pages.
5254 *
5255 * Queueing large contiguous runs of pages for batching, however,
5256 * causes the pages to actually be freed in smaller chunks. As there
5257 * can be a significant delay between the individual batches being
5258 * recycled, this leads to the once large chunks of space being
5259 * fragmented and becoming unavailable for high-order allocations.
5260 */
5261 return 0;
5262 #endif
5263 }
5264
5265 static int percpu_pagelist_high_fraction;
zone_highsize(struct zone * zone,int batch,int cpu_online)5266 static int zone_highsize(struct zone *zone, int batch, int cpu_online)
5267 {
5268 #ifdef CONFIG_MMU
5269 int high;
5270 int nr_split_cpus;
5271 unsigned long total_pages;
5272
5273 if (!percpu_pagelist_high_fraction) {
5274 /*
5275 * By default, the high value of the pcp is based on the zone
5276 * low watermark so that if they are full then background
5277 * reclaim will not be started prematurely.
5278 */
5279 total_pages = low_wmark_pages(zone);
5280 } else {
5281 /*
5282 * If percpu_pagelist_high_fraction is configured, the high
5283 * value is based on a fraction of the managed pages in the
5284 * zone.
5285 */
5286 total_pages = zone_managed_pages(zone) / percpu_pagelist_high_fraction;
5287 }
5288
5289 /*
5290 * Split the high value across all online CPUs local to the zone. Note
5291 * that early in boot that CPUs may not be online yet and that during
5292 * CPU hotplug that the cpumask is not yet updated when a CPU is being
5293 * onlined. For memory nodes that have no CPUs, split pcp->high across
5294 * all online CPUs to mitigate the risk that reclaim is triggered
5295 * prematurely due to pages stored on pcp lists.
5296 */
5297 nr_split_cpus = cpumask_weight(cpumask_of_node(zone_to_nid(zone))) + cpu_online;
5298 if (!nr_split_cpus)
5299 nr_split_cpus = num_online_cpus();
5300 high = total_pages / nr_split_cpus;
5301
5302 /*
5303 * Ensure high is at least batch*4. The multiple is based on the
5304 * historical relationship between high and batch.
5305 */
5306 high = max(high, batch << 2);
5307
5308 return high;
5309 #else
5310 return 0;
5311 #endif
5312 }
5313
5314 /*
5315 * pcp->high and pcp->batch values are related and generally batch is lower
5316 * than high. They are also related to pcp->count such that count is lower
5317 * than high, and as soon as it reaches high, the pcplist is flushed.
5318 *
5319 * However, guaranteeing these relations at all times would require e.g. write
5320 * barriers here but also careful usage of read barriers at the read side, and
5321 * thus be prone to error and bad for performance. Thus the update only prevents
5322 * store tearing. Any new users of pcp->batch and pcp->high should ensure they
5323 * can cope with those fields changing asynchronously, and fully trust only the
5324 * pcp->count field on the local CPU with interrupts disabled.
5325 *
5326 * mutex_is_locked(&pcp_batch_high_lock) required when calling this function
5327 * outside of boot time (or some other assurance that no concurrent updaters
5328 * exist).
5329 */
pageset_update(struct per_cpu_pages * pcp,unsigned long high,unsigned long batch)5330 static void pageset_update(struct per_cpu_pages *pcp, unsigned long high,
5331 unsigned long batch)
5332 {
5333 WRITE_ONCE(pcp->batch, batch);
5334 WRITE_ONCE(pcp->high, high);
5335 }
5336
per_cpu_pages_init(struct per_cpu_pages * pcp,struct per_cpu_zonestat * pzstats)5337 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats)
5338 {
5339 int pindex;
5340
5341 memset(pcp, 0, sizeof(*pcp));
5342 memset(pzstats, 0, sizeof(*pzstats));
5343
5344 spin_lock_init(&pcp->lock);
5345 for (pindex = 0; pindex < NR_PCP_LISTS; pindex++)
5346 INIT_LIST_HEAD(&pcp->lists[pindex]);
5347
5348 /*
5349 * Set batch and high values safe for a boot pageset. A true percpu
5350 * pageset's initialization will update them subsequently. Here we don't
5351 * need to be as careful as pageset_update() as nobody can access the
5352 * pageset yet.
5353 */
5354 pcp->high = BOOT_PAGESET_HIGH;
5355 pcp->batch = BOOT_PAGESET_BATCH;
5356 pcp->free_factor = 0;
5357 }
5358
__zone_set_pageset_high_and_batch(struct zone * zone,unsigned long high,unsigned long batch)5359 static void __zone_set_pageset_high_and_batch(struct zone *zone, unsigned long high,
5360 unsigned long batch)
5361 {
5362 struct per_cpu_pages *pcp;
5363 int cpu;
5364
5365 for_each_possible_cpu(cpu) {
5366 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
5367 pageset_update(pcp, high, batch);
5368 }
5369 }
5370
5371 /*
5372 * Calculate and set new high and batch values for all per-cpu pagesets of a
5373 * zone based on the zone's size.
5374 */
zone_set_pageset_high_and_batch(struct zone * zone,int cpu_online)5375 static void zone_set_pageset_high_and_batch(struct zone *zone, int cpu_online)
5376 {
5377 int new_high, new_batch;
5378
5379 new_batch = max(1, zone_batchsize(zone));
5380 new_high = zone_highsize(zone, new_batch, cpu_online);
5381
5382 if (zone->pageset_high == new_high &&
5383 zone->pageset_batch == new_batch)
5384 return;
5385
5386 zone->pageset_high = new_high;
5387 zone->pageset_batch = new_batch;
5388
5389 __zone_set_pageset_high_and_batch(zone, new_high, new_batch);
5390 }
5391
setup_zone_pageset(struct zone * zone)5392 void __meminit setup_zone_pageset(struct zone *zone)
5393 {
5394 int cpu;
5395
5396 /* Size may be 0 on !SMP && !NUMA */
5397 if (sizeof(struct per_cpu_zonestat) > 0)
5398 zone->per_cpu_zonestats = alloc_percpu(struct per_cpu_zonestat);
5399
5400 zone->per_cpu_pageset = alloc_percpu(struct per_cpu_pages);
5401 for_each_possible_cpu(cpu) {
5402 struct per_cpu_pages *pcp;
5403 struct per_cpu_zonestat *pzstats;
5404
5405 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
5406 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
5407 per_cpu_pages_init(pcp, pzstats);
5408 }
5409
5410 zone_set_pageset_high_and_batch(zone, 0);
5411 }
5412
5413 /*
5414 * The zone indicated has a new number of managed_pages; batch sizes and percpu
5415 * page high values need to be recalculated.
5416 */
zone_pcp_update(struct zone * zone,int cpu_online)5417 static void zone_pcp_update(struct zone *zone, int cpu_online)
5418 {
5419 mutex_lock(&pcp_batch_high_lock);
5420 zone_set_pageset_high_and_batch(zone, cpu_online);
5421 mutex_unlock(&pcp_batch_high_lock);
5422 }
5423
5424 /*
5425 * Allocate per cpu pagesets and initialize them.
5426 * Before this call only boot pagesets were available.
5427 */
setup_per_cpu_pageset(void)5428 void __init setup_per_cpu_pageset(void)
5429 {
5430 struct pglist_data *pgdat;
5431 struct zone *zone;
5432 int __maybe_unused cpu;
5433
5434 for_each_populated_zone(zone)
5435 setup_zone_pageset(zone);
5436
5437 #ifdef CONFIG_NUMA
5438 /*
5439 * Unpopulated zones continue using the boot pagesets.
5440 * The numa stats for these pagesets need to be reset.
5441 * Otherwise, they will end up skewing the stats of
5442 * the nodes these zones are associated with.
5443 */
5444 for_each_possible_cpu(cpu) {
5445 struct per_cpu_zonestat *pzstats = &per_cpu(boot_zonestats, cpu);
5446 memset(pzstats->vm_numa_event, 0,
5447 sizeof(pzstats->vm_numa_event));
5448 }
5449 #endif
5450
5451 for_each_online_pgdat(pgdat)
5452 pgdat->per_cpu_nodestats =
5453 alloc_percpu(struct per_cpu_nodestat);
5454 }
5455
zone_pcp_init(struct zone * zone)5456 __meminit void zone_pcp_init(struct zone *zone)
5457 {
5458 /*
5459 * per cpu subsystem is not up at this point. The following code
5460 * relies on the ability of the linker to provide the
5461 * offset of a (static) per cpu variable into the per cpu area.
5462 */
5463 zone->per_cpu_pageset = &boot_pageset;
5464 zone->per_cpu_zonestats = &boot_zonestats;
5465 zone->pageset_high = BOOT_PAGESET_HIGH;
5466 zone->pageset_batch = BOOT_PAGESET_BATCH;
5467
5468 if (populated_zone(zone))
5469 pr_debug(" %s zone: %lu pages, LIFO batch:%u\n", zone->name,
5470 zone->present_pages, zone_batchsize(zone));
5471 }
5472
adjust_managed_page_count(struct page * page,long count)5473 void adjust_managed_page_count(struct page *page, long count)
5474 {
5475 atomic_long_add(count, &page_zone(page)->managed_pages);
5476 totalram_pages_add(count);
5477 #ifdef CONFIG_HIGHMEM
5478 if (PageHighMem(page))
5479 totalhigh_pages_add(count);
5480 #endif
5481 }
5482 EXPORT_SYMBOL(adjust_managed_page_count);
5483
free_reserved_area(void * start,void * end,int poison,const char * s)5484 unsigned long free_reserved_area(void *start, void *end, int poison, const char *s)
5485 {
5486 void *pos;
5487 unsigned long pages = 0;
5488
5489 start = (void *)PAGE_ALIGN((unsigned long)start);
5490 end = (void *)((unsigned long)end & PAGE_MASK);
5491 for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
5492 struct page *page = virt_to_page(pos);
5493 void *direct_map_addr;
5494
5495 /*
5496 * 'direct_map_addr' might be different from 'pos'
5497 * because some architectures' virt_to_page()
5498 * work with aliases. Getting the direct map
5499 * address ensures that we get a _writeable_
5500 * alias for the memset().
5501 */
5502 direct_map_addr = page_address(page);
5503 /*
5504 * Perform a kasan-unchecked memset() since this memory
5505 * has not been initialized.
5506 */
5507 direct_map_addr = kasan_reset_tag(direct_map_addr);
5508 if ((unsigned int)poison <= 0xFF)
5509 memset(direct_map_addr, poison, PAGE_SIZE);
5510
5511 free_reserved_page(page);
5512 }
5513
5514 if (pages && s)
5515 pr_info("Freeing %s memory: %ldK\n", s, K(pages));
5516
5517 return pages;
5518 }
5519
page_alloc_cpu_dead(unsigned int cpu)5520 static int page_alloc_cpu_dead(unsigned int cpu)
5521 {
5522 struct zone *zone;
5523
5524 lru_add_drain_cpu(cpu);
5525 mlock_drain_remote(cpu);
5526 drain_pages(cpu);
5527
5528 /*
5529 * Spill the event counters of the dead processor
5530 * into the current processors event counters.
5531 * This artificially elevates the count of the current
5532 * processor.
5533 */
5534 vm_events_fold_cpu(cpu);
5535
5536 /*
5537 * Zero the differential counters of the dead processor
5538 * so that the vm statistics are consistent.
5539 *
5540 * This is only okay since the processor is dead and cannot
5541 * race with what we are doing.
5542 */
5543 cpu_vm_stats_fold(cpu);
5544
5545 for_each_populated_zone(zone)
5546 zone_pcp_update(zone, 0);
5547
5548 return 0;
5549 }
5550
page_alloc_cpu_online(unsigned int cpu)5551 static int page_alloc_cpu_online(unsigned int cpu)
5552 {
5553 struct zone *zone;
5554
5555 for_each_populated_zone(zone)
5556 zone_pcp_update(zone, 1);
5557 return 0;
5558 }
5559
page_alloc_init_cpuhp(void)5560 void __init page_alloc_init_cpuhp(void)
5561 {
5562 int ret;
5563
5564 ret = cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC,
5565 "mm/page_alloc:pcp",
5566 page_alloc_cpu_online,
5567 page_alloc_cpu_dead);
5568 WARN_ON(ret < 0);
5569 }
5570
5571 /*
5572 * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio
5573 * or min_free_kbytes changes.
5574 */
calculate_totalreserve_pages(void)5575 static void calculate_totalreserve_pages(void)
5576 {
5577 struct pglist_data *pgdat;
5578 unsigned long reserve_pages = 0;
5579 enum zone_type i, j;
5580
5581 for_each_online_pgdat(pgdat) {
5582
5583 pgdat->totalreserve_pages = 0;
5584
5585 for (i = 0; i < MAX_NR_ZONES; i++) {
5586 struct zone *zone = pgdat->node_zones + i;
5587 long max = 0;
5588 unsigned long managed_pages = zone_managed_pages(zone);
5589
5590 /* Find valid and maximum lowmem_reserve in the zone */
5591 for (j = i; j < MAX_NR_ZONES; j++) {
5592 if (zone->lowmem_reserve[j] > max)
5593 max = zone->lowmem_reserve[j];
5594 }
5595
5596 /* we treat the high watermark as reserved pages. */
5597 max += high_wmark_pages(zone);
5598
5599 if (max > managed_pages)
5600 max = managed_pages;
5601
5602 pgdat->totalreserve_pages += max;
5603
5604 reserve_pages += max;
5605 }
5606 }
5607 totalreserve_pages = reserve_pages;
5608 }
5609
5610 /*
5611 * setup_per_zone_lowmem_reserve - called whenever
5612 * sysctl_lowmem_reserve_ratio changes. Ensures that each zone
5613 * has a correct pages reserved value, so an adequate number of
5614 * pages are left in the zone after a successful __alloc_pages().
5615 */
setup_per_zone_lowmem_reserve(void)5616 static void setup_per_zone_lowmem_reserve(void)
5617 {
5618 struct pglist_data *pgdat;
5619 enum zone_type i, j;
5620
5621 for_each_online_pgdat(pgdat) {
5622 for (i = 0; i < MAX_NR_ZONES - 1; i++) {
5623 struct zone *zone = &pgdat->node_zones[i];
5624 int ratio = sysctl_lowmem_reserve_ratio[i];
5625 bool clear = !ratio || !zone_managed_pages(zone);
5626 unsigned long managed_pages = 0;
5627
5628 for (j = i + 1; j < MAX_NR_ZONES; j++) {
5629 struct zone *upper_zone = &pgdat->node_zones[j];
5630
5631 managed_pages += zone_managed_pages(upper_zone);
5632
5633 if (clear)
5634 zone->lowmem_reserve[j] = 0;
5635 else
5636 zone->lowmem_reserve[j] = managed_pages / ratio;
5637 }
5638 }
5639 }
5640
5641 /* update totalreserve_pages */
5642 calculate_totalreserve_pages();
5643 }
5644
__setup_per_zone_wmarks(void)5645 static void __setup_per_zone_wmarks(void)
5646 {
5647 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
5648 unsigned long lowmem_pages = 0;
5649 struct zone *zone;
5650 unsigned long flags;
5651
5652 /* Calculate total number of !ZONE_HIGHMEM and !ZONE_MOVABLE pages */
5653 for_each_zone(zone) {
5654 if (!is_highmem(zone) && zone_idx(zone) != ZONE_MOVABLE)
5655 lowmem_pages += zone_managed_pages(zone);
5656 }
5657
5658 for_each_zone(zone) {
5659 u64 tmp;
5660
5661 spin_lock_irqsave(&zone->lock, flags);
5662 tmp = (u64)pages_min * zone_managed_pages(zone);
5663 do_div(tmp, lowmem_pages);
5664 if (is_highmem(zone) || zone_idx(zone) == ZONE_MOVABLE) {
5665 /*
5666 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
5667 * need highmem and movable zones pages, so cap pages_min
5668 * to a small value here.
5669 *
5670 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
5671 * deltas control async page reclaim, and so should
5672 * not be capped for highmem and movable zones.
5673 */
5674 unsigned long min_pages;
5675
5676 min_pages = zone_managed_pages(zone) / 1024;
5677 min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
5678 zone->_watermark[WMARK_MIN] = min_pages;
5679 } else {
5680 /*
5681 * If it's a lowmem zone, reserve a number of pages
5682 * proportionate to the zone's size.
5683 */
5684 zone->_watermark[WMARK_MIN] = tmp;
5685 }
5686
5687 /*
5688 * Set the kswapd watermarks distance according to the
5689 * scale factor in proportion to available memory, but
5690 * ensure a minimum size on small systems.
5691 */
5692 tmp = max_t(u64, tmp >> 2,
5693 mult_frac(zone_managed_pages(zone),
5694 watermark_scale_factor, 10000));
5695
5696 zone->watermark_boost = 0;
5697 zone->_watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp;
5698 zone->_watermark[WMARK_HIGH] = low_wmark_pages(zone) + tmp;
5699 zone->_watermark[WMARK_PROMO] = high_wmark_pages(zone) + tmp;
5700
5701 spin_unlock_irqrestore(&zone->lock, flags);
5702 }
5703
5704 /* update totalreserve_pages */
5705 calculate_totalreserve_pages();
5706 }
5707
5708 /**
5709 * setup_per_zone_wmarks - called when min_free_kbytes changes
5710 * or when memory is hot-{added|removed}
5711 *
5712 * Ensures that the watermark[min,low,high] values for each zone are set
5713 * correctly with respect to min_free_kbytes.
5714 */
setup_per_zone_wmarks(void)5715 void setup_per_zone_wmarks(void)
5716 {
5717 struct zone *zone;
5718 static DEFINE_SPINLOCK(lock);
5719
5720 spin_lock(&lock);
5721 __setup_per_zone_wmarks();
5722 spin_unlock(&lock);
5723
5724 /*
5725 * The watermark size have changed so update the pcpu batch
5726 * and high limits or the limits may be inappropriate.
5727 */
5728 for_each_zone(zone)
5729 zone_pcp_update(zone, 0);
5730 }
5731
5732 /*
5733 * Initialise min_free_kbytes.
5734 *
5735 * For small machines we want it small (128k min). For large machines
5736 * we want it large (256MB max). But it is not linear, because network
5737 * bandwidth does not increase linearly with machine size. We use
5738 *
5739 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
5740 * min_free_kbytes = sqrt(lowmem_kbytes * 16)
5741 *
5742 * which yields
5743 *
5744 * 16MB: 512k
5745 * 32MB: 724k
5746 * 64MB: 1024k
5747 * 128MB: 1448k
5748 * 256MB: 2048k
5749 * 512MB: 2896k
5750 * 1024MB: 4096k
5751 * 2048MB: 5792k
5752 * 4096MB: 8192k
5753 * 8192MB: 11584k
5754 * 16384MB: 16384k
5755 */
calculate_min_free_kbytes(void)5756 void calculate_min_free_kbytes(void)
5757 {
5758 unsigned long lowmem_kbytes;
5759 int new_min_free_kbytes;
5760
5761 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
5762 new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
5763
5764 if (new_min_free_kbytes > user_min_free_kbytes)
5765 min_free_kbytes = clamp(new_min_free_kbytes, 128, 262144);
5766 else
5767 pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n",
5768 new_min_free_kbytes, user_min_free_kbytes);
5769
5770 }
5771
init_per_zone_wmark_min(void)5772 int __meminit init_per_zone_wmark_min(void)
5773 {
5774 calculate_min_free_kbytes();
5775 setup_per_zone_wmarks();
5776 refresh_zone_stat_thresholds();
5777 setup_per_zone_lowmem_reserve();
5778
5779 #ifdef CONFIG_NUMA
5780 setup_min_unmapped_ratio();
5781 setup_min_slab_ratio();
5782 #endif
5783
5784 khugepaged_min_free_kbytes_update();
5785
5786 return 0;
5787 }
postcore_initcall(init_per_zone_wmark_min)5788 postcore_initcall(init_per_zone_wmark_min)
5789
5790 /*
5791 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
5792 * that we can call two helper functions whenever min_free_kbytes
5793 * changes.
5794 */
5795 static int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write,
5796 void *buffer, size_t *length, loff_t *ppos)
5797 {
5798 int rc;
5799
5800 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
5801 if (rc)
5802 return rc;
5803
5804 if (write) {
5805 user_min_free_kbytes = min_free_kbytes;
5806 setup_per_zone_wmarks();
5807 }
5808 return 0;
5809 }
5810
watermark_scale_factor_sysctl_handler(struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)5811 static int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write,
5812 void *buffer, size_t *length, loff_t *ppos)
5813 {
5814 int rc;
5815
5816 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
5817 if (rc)
5818 return rc;
5819
5820 if (write)
5821 setup_per_zone_wmarks();
5822
5823 return 0;
5824 }
5825
5826 #ifdef CONFIG_NUMA
setup_min_unmapped_ratio(void)5827 static void setup_min_unmapped_ratio(void)
5828 {
5829 pg_data_t *pgdat;
5830 struct zone *zone;
5831
5832 for_each_online_pgdat(pgdat)
5833 pgdat->min_unmapped_pages = 0;
5834
5835 for_each_zone(zone)
5836 zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) *
5837 sysctl_min_unmapped_ratio) / 100;
5838 }
5839
5840
sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)5841 static int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write,
5842 void *buffer, size_t *length, loff_t *ppos)
5843 {
5844 int rc;
5845
5846 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
5847 if (rc)
5848 return rc;
5849
5850 setup_min_unmapped_ratio();
5851
5852 return 0;
5853 }
5854
setup_min_slab_ratio(void)5855 static void setup_min_slab_ratio(void)
5856 {
5857 pg_data_t *pgdat;
5858 struct zone *zone;
5859
5860 for_each_online_pgdat(pgdat)
5861 pgdat->min_slab_pages = 0;
5862
5863 for_each_zone(zone)
5864 zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) *
5865 sysctl_min_slab_ratio) / 100;
5866 }
5867
sysctl_min_slab_ratio_sysctl_handler(struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)5868 static int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write,
5869 void *buffer, size_t *length, loff_t *ppos)
5870 {
5871 int rc;
5872
5873 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
5874 if (rc)
5875 return rc;
5876
5877 setup_min_slab_ratio();
5878
5879 return 0;
5880 }
5881 #endif
5882
5883 /*
5884 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
5885 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
5886 * whenever sysctl_lowmem_reserve_ratio changes.
5887 *
5888 * The reserve ratio obviously has absolutely no relation with the
5889 * minimum watermarks. The lowmem reserve ratio can only make sense
5890 * if in function of the boot time zone sizes.
5891 */
lowmem_reserve_ratio_sysctl_handler(struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)5892 static int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table,
5893 int write, void *buffer, size_t *length, loff_t *ppos)
5894 {
5895 int i;
5896
5897 proc_dointvec_minmax(table, write, buffer, length, ppos);
5898
5899 for (i = 0; i < MAX_NR_ZONES; i++) {
5900 if (sysctl_lowmem_reserve_ratio[i] < 1)
5901 sysctl_lowmem_reserve_ratio[i] = 0;
5902 }
5903
5904 setup_per_zone_lowmem_reserve();
5905 return 0;
5906 }
5907
5908 /*
5909 * percpu_pagelist_high_fraction - changes the pcp->high for each zone on each
5910 * cpu. It is the fraction of total pages in each zone that a hot per cpu
5911 * pagelist can have before it gets flushed back to buddy allocator.
5912 */
percpu_pagelist_high_fraction_sysctl_handler(struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)5913 static int percpu_pagelist_high_fraction_sysctl_handler(struct ctl_table *table,
5914 int write, void *buffer, size_t *length, loff_t *ppos)
5915 {
5916 struct zone *zone;
5917 int old_percpu_pagelist_high_fraction;
5918 int ret;
5919
5920 mutex_lock(&pcp_batch_high_lock);
5921 old_percpu_pagelist_high_fraction = percpu_pagelist_high_fraction;
5922
5923 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
5924 if (!write || ret < 0)
5925 goto out;
5926
5927 /* Sanity checking to avoid pcp imbalance */
5928 if (percpu_pagelist_high_fraction &&
5929 percpu_pagelist_high_fraction < MIN_PERCPU_PAGELIST_HIGH_FRACTION) {
5930 percpu_pagelist_high_fraction = old_percpu_pagelist_high_fraction;
5931 ret = -EINVAL;
5932 goto out;
5933 }
5934
5935 /* No change? */
5936 if (percpu_pagelist_high_fraction == old_percpu_pagelist_high_fraction)
5937 goto out;
5938
5939 for_each_populated_zone(zone)
5940 zone_set_pageset_high_and_batch(zone, 0);
5941 out:
5942 mutex_unlock(&pcp_batch_high_lock);
5943 return ret;
5944 }
5945
5946 static struct ctl_table page_alloc_sysctl_table[] = {
5947 {
5948 .procname = "min_free_kbytes",
5949 .data = &min_free_kbytes,
5950 .maxlen = sizeof(min_free_kbytes),
5951 .mode = 0644,
5952 .proc_handler = min_free_kbytes_sysctl_handler,
5953 .extra1 = SYSCTL_ZERO,
5954 },
5955 {
5956 .procname = "watermark_boost_factor",
5957 .data = &watermark_boost_factor,
5958 .maxlen = sizeof(watermark_boost_factor),
5959 .mode = 0644,
5960 .proc_handler = proc_dointvec_minmax,
5961 .extra1 = SYSCTL_ZERO,
5962 },
5963 {
5964 .procname = "watermark_scale_factor",
5965 .data = &watermark_scale_factor,
5966 .maxlen = sizeof(watermark_scale_factor),
5967 .mode = 0644,
5968 .proc_handler = watermark_scale_factor_sysctl_handler,
5969 .extra1 = SYSCTL_ONE,
5970 .extra2 = SYSCTL_THREE_THOUSAND,
5971 },
5972 {
5973 .procname = "percpu_pagelist_high_fraction",
5974 .data = &percpu_pagelist_high_fraction,
5975 .maxlen = sizeof(percpu_pagelist_high_fraction),
5976 .mode = 0644,
5977 .proc_handler = percpu_pagelist_high_fraction_sysctl_handler,
5978 .extra1 = SYSCTL_ZERO,
5979 },
5980 {
5981 .procname = "lowmem_reserve_ratio",
5982 .data = &sysctl_lowmem_reserve_ratio,
5983 .maxlen = sizeof(sysctl_lowmem_reserve_ratio),
5984 .mode = 0644,
5985 .proc_handler = lowmem_reserve_ratio_sysctl_handler,
5986 },
5987 #ifdef CONFIG_NUMA
5988 {
5989 .procname = "numa_zonelist_order",
5990 .data = &numa_zonelist_order,
5991 .maxlen = NUMA_ZONELIST_ORDER_LEN,
5992 .mode = 0644,
5993 .proc_handler = numa_zonelist_order_handler,
5994 },
5995 {
5996 .procname = "min_unmapped_ratio",
5997 .data = &sysctl_min_unmapped_ratio,
5998 .maxlen = sizeof(sysctl_min_unmapped_ratio),
5999 .mode = 0644,
6000 .proc_handler = sysctl_min_unmapped_ratio_sysctl_handler,
6001 .extra1 = SYSCTL_ZERO,
6002 .extra2 = SYSCTL_ONE_HUNDRED,
6003 },
6004 {
6005 .procname = "min_slab_ratio",
6006 .data = &sysctl_min_slab_ratio,
6007 .maxlen = sizeof(sysctl_min_slab_ratio),
6008 .mode = 0644,
6009 .proc_handler = sysctl_min_slab_ratio_sysctl_handler,
6010 .extra1 = SYSCTL_ZERO,
6011 .extra2 = SYSCTL_ONE_HUNDRED,
6012 },
6013 #endif
6014 {}
6015 };
6016
page_alloc_sysctl_init(void)6017 void __init page_alloc_sysctl_init(void)
6018 {
6019 register_sysctl_init("vm", page_alloc_sysctl_table);
6020 }
6021
6022 #ifdef CONFIG_CONTIG_ALLOC
6023 /* Usage: See admin-guide/dynamic-debug-howto.rst */
alloc_contig_dump_pages(struct list_head * page_list)6024 static void alloc_contig_dump_pages(struct list_head *page_list)
6025 {
6026 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, "migrate failure");
6027
6028 if (DYNAMIC_DEBUG_BRANCH(descriptor)) {
6029 struct page *page;
6030
6031 dump_stack();
6032 list_for_each_entry(page, page_list, lru)
6033 dump_page(page, "migration failure");
6034 }
6035 }
6036
6037 /* [start, end) must belong to a single zone. */
__alloc_contig_migrate_range(struct compact_control * cc,unsigned long start,unsigned long end)6038 int __alloc_contig_migrate_range(struct compact_control *cc,
6039 unsigned long start, unsigned long end)
6040 {
6041 /* This function is based on compact_zone() from compaction.c. */
6042 unsigned int nr_reclaimed;
6043 unsigned long pfn = start;
6044 unsigned int tries = 0;
6045 int ret = 0;
6046 struct migration_target_control mtc = {
6047 .nid = zone_to_nid(cc->zone),
6048 .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
6049 };
6050
6051 lru_cache_disable();
6052
6053 while (pfn < end || !list_empty(&cc->migratepages)) {
6054 if (fatal_signal_pending(current)) {
6055 ret = -EINTR;
6056 break;
6057 }
6058
6059 if (list_empty(&cc->migratepages)) {
6060 cc->nr_migratepages = 0;
6061 ret = isolate_migratepages_range(cc, pfn, end);
6062 if (ret && ret != -EAGAIN)
6063 break;
6064 pfn = cc->migrate_pfn;
6065 tries = 0;
6066 } else if (++tries == 5) {
6067 ret = -EBUSY;
6068 break;
6069 }
6070
6071 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
6072 &cc->migratepages);
6073 cc->nr_migratepages -= nr_reclaimed;
6074
6075 ret = migrate_pages(&cc->migratepages, alloc_migration_target,
6076 NULL, (unsigned long)&mtc, cc->mode, MR_CONTIG_RANGE, NULL);
6077
6078 /*
6079 * On -ENOMEM, migrate_pages() bails out right away. It is pointless
6080 * to retry again over this error, so do the same here.
6081 */
6082 if (ret == -ENOMEM)
6083 break;
6084 }
6085
6086 lru_cache_enable();
6087 if (ret < 0) {
6088 if (!(cc->gfp_mask & __GFP_NOWARN) && ret == -EBUSY)
6089 alloc_contig_dump_pages(&cc->migratepages);
6090 putback_movable_pages(&cc->migratepages);
6091 return ret;
6092 }
6093 return 0;
6094 }
6095
6096 /**
6097 * alloc_contig_range() -- tries to allocate given range of pages
6098 * @start: start PFN to allocate
6099 * @end: one-past-the-last PFN to allocate
6100 * @migratetype: migratetype of the underlying pageblocks (either
6101 * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks
6102 * in range must have the same migratetype and it must
6103 * be either of the two.
6104 * @gfp_mask: GFP mask to use during compaction
6105 *
6106 * The PFN range does not have to be pageblock aligned. The PFN range must
6107 * belong to a single zone.
6108 *
6109 * The first thing this routine does is attempt to MIGRATE_ISOLATE all
6110 * pageblocks in the range. Once isolated, the pageblocks should not
6111 * be modified by others.
6112 *
6113 * Return: zero on success or negative error code. On success all
6114 * pages which PFN is in [start, end) are allocated for the caller and
6115 * need to be freed with free_contig_range().
6116 */
alloc_contig_range(unsigned long start,unsigned long end,unsigned migratetype,gfp_t gfp_mask)6117 int alloc_contig_range(unsigned long start, unsigned long end,
6118 unsigned migratetype, gfp_t gfp_mask)
6119 {
6120 unsigned long outer_start, outer_end;
6121 int order;
6122 int ret = 0;
6123
6124 struct compact_control cc = {
6125 .nr_migratepages = 0,
6126 .order = -1,
6127 .zone = page_zone(pfn_to_page(start)),
6128 .mode = MIGRATE_SYNC,
6129 .ignore_skip_hint = true,
6130 .no_set_skip_hint = true,
6131 .gfp_mask = current_gfp_context(gfp_mask),
6132 .alloc_contig = true,
6133 };
6134 INIT_LIST_HEAD(&cc.migratepages);
6135
6136 /*
6137 * What we do here is we mark all pageblocks in range as
6138 * MIGRATE_ISOLATE. Because pageblock and max order pages may
6139 * have different sizes, and due to the way page allocator
6140 * work, start_isolate_page_range() has special handlings for this.
6141 *
6142 * Once the pageblocks are marked as MIGRATE_ISOLATE, we
6143 * migrate the pages from an unaligned range (ie. pages that
6144 * we are interested in). This will put all the pages in
6145 * range back to page allocator as MIGRATE_ISOLATE.
6146 *
6147 * When this is done, we take the pages in range from page
6148 * allocator removing them from the buddy system. This way
6149 * page allocator will never consider using them.
6150 *
6151 * This lets us mark the pageblocks back as
6152 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
6153 * aligned range but not in the unaligned, original range are
6154 * put back to page allocator so that buddy can use them.
6155 */
6156
6157 ret = start_isolate_page_range(start, end, migratetype, 0, gfp_mask);
6158 if (ret)
6159 goto done;
6160
6161 drain_all_pages(cc.zone);
6162
6163 /*
6164 * In case of -EBUSY, we'd like to know which page causes problem.
6165 * So, just fall through. test_pages_isolated() has a tracepoint
6166 * which will report the busy page.
6167 *
6168 * It is possible that busy pages could become available before
6169 * the call to test_pages_isolated, and the range will actually be
6170 * allocated. So, if we fall through be sure to clear ret so that
6171 * -EBUSY is not accidentally used or returned to caller.
6172 */
6173 ret = __alloc_contig_migrate_range(&cc, start, end);
6174 if (ret && ret != -EBUSY)
6175 goto done;
6176 ret = 0;
6177
6178 /*
6179 * Pages from [start, end) are within a pageblock_nr_pages
6180 * aligned blocks that are marked as MIGRATE_ISOLATE. What's
6181 * more, all pages in [start, end) are free in page allocator.
6182 * What we are going to do is to allocate all pages from
6183 * [start, end) (that is remove them from page allocator).
6184 *
6185 * The only problem is that pages at the beginning and at the
6186 * end of interesting range may be not aligned with pages that
6187 * page allocator holds, ie. they can be part of higher order
6188 * pages. Because of this, we reserve the bigger range and
6189 * once this is done free the pages we are not interested in.
6190 *
6191 * We don't have to hold zone->lock here because the pages are
6192 * isolated thus they won't get removed from buddy.
6193 */
6194
6195 order = 0;
6196 outer_start = start;
6197 while (!PageBuddy(pfn_to_page(outer_start))) {
6198 if (++order > MAX_ORDER) {
6199 outer_start = start;
6200 break;
6201 }
6202 outer_start &= ~0UL << order;
6203 }
6204
6205 if (outer_start != start) {
6206 order = buddy_order(pfn_to_page(outer_start));
6207
6208 /*
6209 * outer_start page could be small order buddy page and
6210 * it doesn't include start page. Adjust outer_start
6211 * in this case to report failed page properly
6212 * on tracepoint in test_pages_isolated()
6213 */
6214 if (outer_start + (1UL << order) <= start)
6215 outer_start = start;
6216 }
6217
6218 /* Make sure the range is really isolated. */
6219 if (test_pages_isolated(outer_start, end, 0)) {
6220 ret = -EBUSY;
6221 goto done;
6222 }
6223
6224 /* Grab isolated pages from freelists. */
6225 outer_end = isolate_freepages_range(&cc, outer_start, end);
6226 if (!outer_end) {
6227 ret = -EBUSY;
6228 goto done;
6229 }
6230
6231 /* Free head and tail (if any) */
6232 if (start != outer_start)
6233 free_contig_range(outer_start, start - outer_start);
6234 if (end != outer_end)
6235 free_contig_range(end, outer_end - end);
6236
6237 done:
6238 undo_isolate_page_range(start, end, migratetype);
6239 return ret;
6240 }
6241 EXPORT_SYMBOL(alloc_contig_range);
6242
__alloc_contig_pages(unsigned long start_pfn,unsigned long nr_pages,gfp_t gfp_mask)6243 static int __alloc_contig_pages(unsigned long start_pfn,
6244 unsigned long nr_pages, gfp_t gfp_mask)
6245 {
6246 unsigned long end_pfn = start_pfn + nr_pages;
6247
6248 return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE,
6249 gfp_mask);
6250 }
6251
pfn_range_valid_contig(struct zone * z,unsigned long start_pfn,unsigned long nr_pages)6252 static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn,
6253 unsigned long nr_pages)
6254 {
6255 unsigned long i, end_pfn = start_pfn + nr_pages;
6256 struct page *page;
6257
6258 for (i = start_pfn; i < end_pfn; i++) {
6259 page = pfn_to_online_page(i);
6260 if (!page)
6261 return false;
6262
6263 if (page_zone(page) != z)
6264 return false;
6265
6266 if (PageReserved(page))
6267 return false;
6268
6269 if (PageHuge(page))
6270 return false;
6271 }
6272 return true;
6273 }
6274
zone_spans_last_pfn(const struct zone * zone,unsigned long start_pfn,unsigned long nr_pages)6275 static bool zone_spans_last_pfn(const struct zone *zone,
6276 unsigned long start_pfn, unsigned long nr_pages)
6277 {
6278 unsigned long last_pfn = start_pfn + nr_pages - 1;
6279
6280 return zone_spans_pfn(zone, last_pfn);
6281 }
6282
6283 /**
6284 * alloc_contig_pages() -- tries to find and allocate contiguous range of pages
6285 * @nr_pages: Number of contiguous pages to allocate
6286 * @gfp_mask: GFP mask to limit search and used during compaction
6287 * @nid: Target node
6288 * @nodemask: Mask for other possible nodes
6289 *
6290 * This routine is a wrapper around alloc_contig_range(). It scans over zones
6291 * on an applicable zonelist to find a contiguous pfn range which can then be
6292 * tried for allocation with alloc_contig_range(). This routine is intended
6293 * for allocation requests which can not be fulfilled with the buddy allocator.
6294 *
6295 * The allocated memory is always aligned to a page boundary. If nr_pages is a
6296 * power of two, then allocated range is also guaranteed to be aligned to same
6297 * nr_pages (e.g. 1GB request would be aligned to 1GB).
6298 *
6299 * Allocated pages can be freed with free_contig_range() or by manually calling
6300 * __free_page() on each allocated page.
6301 *
6302 * Return: pointer to contiguous pages on success, or NULL if not successful.
6303 */
alloc_contig_pages(unsigned long nr_pages,gfp_t gfp_mask,int nid,nodemask_t * nodemask)6304 struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask,
6305 int nid, nodemask_t *nodemask)
6306 {
6307 unsigned long ret, pfn, flags;
6308 struct zonelist *zonelist;
6309 struct zone *zone;
6310 struct zoneref *z;
6311
6312 zonelist = node_zonelist(nid, gfp_mask);
6313 for_each_zone_zonelist_nodemask(zone, z, zonelist,
6314 gfp_zone(gfp_mask), nodemask) {
6315 spin_lock_irqsave(&zone->lock, flags);
6316
6317 pfn = ALIGN(zone->zone_start_pfn, nr_pages);
6318 while (zone_spans_last_pfn(zone, pfn, nr_pages)) {
6319 if (pfn_range_valid_contig(zone, pfn, nr_pages)) {
6320 /*
6321 * We release the zone lock here because
6322 * alloc_contig_range() will also lock the zone
6323 * at some point. If there's an allocation
6324 * spinning on this lock, it may win the race
6325 * and cause alloc_contig_range() to fail...
6326 */
6327 spin_unlock_irqrestore(&zone->lock, flags);
6328 ret = __alloc_contig_pages(pfn, nr_pages,
6329 gfp_mask);
6330 if (!ret)
6331 return pfn_to_page(pfn);
6332 spin_lock_irqsave(&zone->lock, flags);
6333 }
6334 pfn += nr_pages;
6335 }
6336 spin_unlock_irqrestore(&zone->lock, flags);
6337 }
6338 return NULL;
6339 }
6340 #endif /* CONFIG_CONTIG_ALLOC */
6341
free_contig_range(unsigned long pfn,unsigned long nr_pages)6342 void free_contig_range(unsigned long pfn, unsigned long nr_pages)
6343 {
6344 unsigned long count = 0;
6345
6346 for (; nr_pages--; pfn++) {
6347 struct page *page = pfn_to_page(pfn);
6348
6349 count += page_count(page) != 1;
6350 __free_page(page);
6351 }
6352 WARN(count != 0, "%lu pages are still in use!\n", count);
6353 }
6354 EXPORT_SYMBOL(free_contig_range);
6355
6356 /*
6357 * Effectively disable pcplists for the zone by setting the high limit to 0
6358 * and draining all cpus. A concurrent page freeing on another CPU that's about
6359 * to put the page on pcplist will either finish before the drain and the page
6360 * will be drained, or observe the new high limit and skip the pcplist.
6361 *
6362 * Must be paired with a call to zone_pcp_enable().
6363 */
zone_pcp_disable(struct zone * zone)6364 void zone_pcp_disable(struct zone *zone)
6365 {
6366 mutex_lock(&pcp_batch_high_lock);
6367 __zone_set_pageset_high_and_batch(zone, 0, 1);
6368 __drain_all_pages(zone, true);
6369 }
6370
zone_pcp_enable(struct zone * zone)6371 void zone_pcp_enable(struct zone *zone)
6372 {
6373 __zone_set_pageset_high_and_batch(zone, zone->pageset_high, zone->pageset_batch);
6374 mutex_unlock(&pcp_batch_high_lock);
6375 }
6376
zone_pcp_reset(struct zone * zone)6377 void zone_pcp_reset(struct zone *zone)
6378 {
6379 int cpu;
6380 struct per_cpu_zonestat *pzstats;
6381
6382 if (zone->per_cpu_pageset != &boot_pageset) {
6383 for_each_online_cpu(cpu) {
6384 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
6385 drain_zonestat(zone, pzstats);
6386 }
6387 free_percpu(zone->per_cpu_pageset);
6388 zone->per_cpu_pageset = &boot_pageset;
6389 if (zone->per_cpu_zonestats != &boot_zonestats) {
6390 free_percpu(zone->per_cpu_zonestats);
6391 zone->per_cpu_zonestats = &boot_zonestats;
6392 }
6393 }
6394 }
6395
6396 #ifdef CONFIG_MEMORY_HOTREMOVE
6397 /*
6398 * All pages in the range must be in a single zone, must not contain holes,
6399 * must span full sections, and must be isolated before calling this function.
6400 */
__offline_isolated_pages(unsigned long start_pfn,unsigned long end_pfn)6401 void __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
6402 {
6403 unsigned long pfn = start_pfn;
6404 struct page *page;
6405 struct zone *zone;
6406 unsigned int order;
6407 unsigned long flags;
6408
6409 offline_mem_sections(pfn, end_pfn);
6410 zone = page_zone(pfn_to_page(pfn));
6411 spin_lock_irqsave(&zone->lock, flags);
6412 while (pfn < end_pfn) {
6413 page = pfn_to_page(pfn);
6414 /*
6415 * The HWPoisoned page may be not in buddy system, and
6416 * page_count() is not 0.
6417 */
6418 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) {
6419 pfn++;
6420 continue;
6421 }
6422 /*
6423 * At this point all remaining PageOffline() pages have a
6424 * reference count of 0 and can simply be skipped.
6425 */
6426 if (PageOffline(page)) {
6427 BUG_ON(page_count(page));
6428 BUG_ON(PageBuddy(page));
6429 pfn++;
6430 continue;
6431 }
6432
6433 BUG_ON(page_count(page));
6434 BUG_ON(!PageBuddy(page));
6435 order = buddy_order(page);
6436 del_page_from_free_list(page, zone, order);
6437 pfn += (1 << order);
6438 }
6439 spin_unlock_irqrestore(&zone->lock, flags);
6440 }
6441 #endif
6442
6443 /*
6444 * This function returns a stable result only if called under zone lock.
6445 */
is_free_buddy_page(struct page * page)6446 bool is_free_buddy_page(struct page *page)
6447 {
6448 unsigned long pfn = page_to_pfn(page);
6449 unsigned int order;
6450
6451 for (order = 0; order < NR_PAGE_ORDERS; order++) {
6452 struct page *page_head = page - (pfn & ((1 << order) - 1));
6453
6454 if (PageBuddy(page_head) &&
6455 buddy_order_unsafe(page_head) >= order)
6456 break;
6457 }
6458
6459 return order <= MAX_ORDER;
6460 }
6461 EXPORT_SYMBOL(is_free_buddy_page);
6462
6463 #ifdef CONFIG_MEMORY_FAILURE
6464 /*
6465 * Break down a higher-order page in sub-pages, and keep our target out of
6466 * buddy allocator.
6467 */
break_down_buddy_pages(struct zone * zone,struct page * page,struct page * target,int low,int high,int migratetype)6468 static void break_down_buddy_pages(struct zone *zone, struct page *page,
6469 struct page *target, int low, int high,
6470 int migratetype)
6471 {
6472 unsigned long size = 1 << high;
6473 struct page *current_buddy, *next_page;
6474
6475 while (high > low) {
6476 high--;
6477 size >>= 1;
6478
6479 if (target >= &page[size]) {
6480 next_page = page + size;
6481 current_buddy = page;
6482 } else {
6483 next_page = page;
6484 current_buddy = page + size;
6485 }
6486 page = next_page;
6487
6488 if (set_page_guard(zone, current_buddy, high, migratetype))
6489 continue;
6490
6491 if (current_buddy != target) {
6492 add_to_free_list(current_buddy, zone, high, migratetype);
6493 set_buddy_order(current_buddy, high);
6494 }
6495 }
6496 }
6497
6498 /*
6499 * Take a page that will be marked as poisoned off the buddy allocator.
6500 */
take_page_off_buddy(struct page * page)6501 bool take_page_off_buddy(struct page *page)
6502 {
6503 struct zone *zone = page_zone(page);
6504 unsigned long pfn = page_to_pfn(page);
6505 unsigned long flags;
6506 unsigned int order;
6507 bool ret = false;
6508
6509 spin_lock_irqsave(&zone->lock, flags);
6510 for (order = 0; order < NR_PAGE_ORDERS; order++) {
6511 struct page *page_head = page - (pfn & ((1 << order) - 1));
6512 int page_order = buddy_order(page_head);
6513
6514 if (PageBuddy(page_head) && page_order >= order) {
6515 unsigned long pfn_head = page_to_pfn(page_head);
6516 int migratetype = get_pfnblock_migratetype(page_head,
6517 pfn_head);
6518
6519 del_page_from_free_list(page_head, zone, page_order);
6520 break_down_buddy_pages(zone, page_head, page, 0,
6521 page_order, migratetype);
6522 SetPageHWPoisonTakenOff(page);
6523 if (!is_migrate_isolate(migratetype))
6524 __mod_zone_freepage_state(zone, -1, migratetype);
6525 ret = true;
6526 break;
6527 }
6528 if (page_count(page_head) > 0)
6529 break;
6530 }
6531 spin_unlock_irqrestore(&zone->lock, flags);
6532 return ret;
6533 }
6534
6535 /*
6536 * Cancel takeoff done by take_page_off_buddy().
6537 */
put_page_back_buddy(struct page * page)6538 bool put_page_back_buddy(struct page *page)
6539 {
6540 struct zone *zone = page_zone(page);
6541 unsigned long pfn = page_to_pfn(page);
6542 unsigned long flags;
6543 int migratetype = get_pfnblock_migratetype(page, pfn);
6544 bool ret = false;
6545
6546 spin_lock_irqsave(&zone->lock, flags);
6547 if (put_page_testzero(page)) {
6548 ClearPageHWPoisonTakenOff(page);
6549 __free_one_page(page, pfn, zone, 0, migratetype, FPI_NONE);
6550 if (TestClearPageHWPoison(page)) {
6551 ret = true;
6552 }
6553 }
6554 spin_unlock_irqrestore(&zone->lock, flags);
6555
6556 return ret;
6557 }
6558 #endif
6559
6560 #ifdef CONFIG_ZONE_DMA
has_managed_dma(void)6561 bool has_managed_dma(void)
6562 {
6563 struct pglist_data *pgdat;
6564
6565 for_each_online_pgdat(pgdat) {
6566 struct zone *zone = &pgdat->node_zones[ZONE_DMA];
6567
6568 if (managed_zone(zone))
6569 return true;
6570 }
6571 return false;
6572 }
6573 #endif /* CONFIG_ZONE_DMA */
6574
6575 #ifdef CONFIG_UNACCEPTED_MEMORY
6576
6577 /* Counts number of zones with unaccepted pages. */
6578 static DEFINE_STATIC_KEY_FALSE(zones_with_unaccepted_pages);
6579
6580 static bool lazy_accept = true;
6581
accept_memory_parse(char * p)6582 static int __init accept_memory_parse(char *p)
6583 {
6584 if (!strcmp(p, "lazy")) {
6585 lazy_accept = true;
6586 return 0;
6587 } else if (!strcmp(p, "eager")) {
6588 lazy_accept = false;
6589 return 0;
6590 } else {
6591 return -EINVAL;
6592 }
6593 }
6594 early_param("accept_memory", accept_memory_parse);
6595
page_contains_unaccepted(struct page * page,unsigned int order)6596 static bool page_contains_unaccepted(struct page *page, unsigned int order)
6597 {
6598 phys_addr_t start = page_to_phys(page);
6599 phys_addr_t end = start + (PAGE_SIZE << order);
6600
6601 return range_contains_unaccepted_memory(start, end);
6602 }
6603
accept_page(struct page * page,unsigned int order)6604 static void accept_page(struct page *page, unsigned int order)
6605 {
6606 phys_addr_t start = page_to_phys(page);
6607
6608 accept_memory(start, start + (PAGE_SIZE << order));
6609 }
6610
try_to_accept_memory_one(struct zone * zone)6611 static bool try_to_accept_memory_one(struct zone *zone)
6612 {
6613 unsigned long flags;
6614 struct page *page;
6615 bool last;
6616
6617 spin_lock_irqsave(&zone->lock, flags);
6618 page = list_first_entry_or_null(&zone->unaccepted_pages,
6619 struct page, lru);
6620 if (!page) {
6621 spin_unlock_irqrestore(&zone->lock, flags);
6622 return false;
6623 }
6624
6625 list_del(&page->lru);
6626 last = list_empty(&zone->unaccepted_pages);
6627
6628 __mod_zone_freepage_state(zone, -MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE);
6629 __mod_zone_page_state(zone, NR_UNACCEPTED, -MAX_ORDER_NR_PAGES);
6630 spin_unlock_irqrestore(&zone->lock, flags);
6631
6632 accept_page(page, MAX_ORDER);
6633
6634 __free_pages_ok(page, MAX_ORDER, FPI_TO_TAIL);
6635
6636 if (last)
6637 static_branch_dec(&zones_with_unaccepted_pages);
6638
6639 return true;
6640 }
6641
cond_accept_memory(struct zone * zone,unsigned int order)6642 static bool cond_accept_memory(struct zone *zone, unsigned int order)
6643 {
6644 long to_accept;
6645 bool ret = false;
6646
6647 if (!has_unaccepted_memory())
6648 return false;
6649
6650 if (list_empty(&zone->unaccepted_pages))
6651 return false;
6652
6653 /* How much to accept to get to high watermark? */
6654 to_accept = high_wmark_pages(zone) -
6655 (zone_page_state(zone, NR_FREE_PAGES) -
6656 __zone_watermark_unusable_free(zone, order, 0) -
6657 zone_page_state(zone, NR_UNACCEPTED));
6658
6659 while (to_accept > 0) {
6660 if (!try_to_accept_memory_one(zone))
6661 break;
6662 ret = true;
6663 to_accept -= MAX_ORDER_NR_PAGES;
6664 }
6665
6666 return ret;
6667 }
6668
has_unaccepted_memory(void)6669 static inline bool has_unaccepted_memory(void)
6670 {
6671 return static_branch_unlikely(&zones_with_unaccepted_pages);
6672 }
6673
__free_unaccepted(struct page * page)6674 static bool __free_unaccepted(struct page *page)
6675 {
6676 struct zone *zone = page_zone(page);
6677 unsigned long flags;
6678 bool first = false;
6679
6680 if (!lazy_accept)
6681 return false;
6682
6683 spin_lock_irqsave(&zone->lock, flags);
6684 first = list_empty(&zone->unaccepted_pages);
6685 list_add_tail(&page->lru, &zone->unaccepted_pages);
6686 __mod_zone_freepage_state(zone, MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE);
6687 __mod_zone_page_state(zone, NR_UNACCEPTED, MAX_ORDER_NR_PAGES);
6688 spin_unlock_irqrestore(&zone->lock, flags);
6689
6690 if (first)
6691 static_branch_inc(&zones_with_unaccepted_pages);
6692
6693 return true;
6694 }
6695
6696 #else
6697
page_contains_unaccepted(struct page * page,unsigned int order)6698 static bool page_contains_unaccepted(struct page *page, unsigned int order)
6699 {
6700 return false;
6701 }
6702
accept_page(struct page * page,unsigned int order)6703 static void accept_page(struct page *page, unsigned int order)
6704 {
6705 }
6706
cond_accept_memory(struct zone * zone,unsigned int order)6707 static bool cond_accept_memory(struct zone *zone, unsigned int order)
6708 {
6709 return false;
6710 }
6711
has_unaccepted_memory(void)6712 static inline bool has_unaccepted_memory(void)
6713 {
6714 return false;
6715 }
6716
__free_unaccepted(struct page * page)6717 static bool __free_unaccepted(struct page *page)
6718 {
6719 BUILD_BUG();
6720 return false;
6721 }
6722
6723 #endif /* CONFIG_UNACCEPTED_MEMORY */
6724