xref: /openbmc/linux/mm/Kconfig (revision 54ad9c76)
1# SPDX-License-Identifier: GPL-2.0-only
2
3menu "Memory Management options"
4
5#
6# For some reason microblaze and nios2 hard code SWAP=n.  Hopefully we can
7# add proper SWAP support to them, in which case this can be remove.
8#
9config ARCH_NO_SWAP
10	bool
11
12config ZPOOL
13	bool
14
15menuconfig SWAP
16	bool "Support for paging of anonymous memory (swap)"
17	depends on MMU && BLOCK && !ARCH_NO_SWAP
18	default y
19	help
20	  This option allows you to choose whether you want to have support
21	  for so called swap devices or swap files in your kernel that are
22	  used to provide more virtual memory than the actual RAM present
23	  in your computer.  If unsure say Y.
24
25config ZSWAP
26	bool "Compressed cache for swap pages"
27	depends on SWAP
28	select CRYPTO
29	select ZPOOL
30	help
31	  A lightweight compressed cache for swap pages.  It takes
32	  pages that are in the process of being swapped out and attempts to
33	  compress them into a dynamically allocated RAM-based memory pool.
34	  This can result in a significant I/O reduction on swap device and,
35	  in the case where decompressing from RAM is faster than swap device
36	  reads, can also improve workload performance.
37
38config ZSWAP_DEFAULT_ON
39	bool "Enable the compressed cache for swap pages by default"
40	depends on ZSWAP
41	help
42	  If selected, the compressed cache for swap pages will be enabled
43	  at boot, otherwise it will be disabled.
44
45	  The selection made here can be overridden by using the kernel
46	  command line 'zswap.enabled=' option.
47
48config ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON
49	bool "Invalidate zswap entries when pages are loaded"
50	depends on ZSWAP
51	help
52	  If selected, exclusive loads for zswap will be enabled at boot,
53	  otherwise it will be disabled.
54
55	  If exclusive loads are enabled, when a page is loaded from zswap,
56	  the zswap entry is invalidated at once, as opposed to leaving it
57	  in zswap until the swap entry is freed.
58
59	  This avoids having two copies of the same page in memory
60	  (compressed and uncompressed) after faulting in a page from zswap.
61	  The cost is that if the page was never dirtied and needs to be
62	  swapped out again, it will be re-compressed.
63
64choice
65	prompt "Default compressor"
66	depends on ZSWAP
67	default ZSWAP_COMPRESSOR_DEFAULT_LZO
68	help
69	  Selects the default compression algorithm for the compressed cache
70	  for swap pages.
71
72	  For an overview what kind of performance can be expected from
73	  a particular compression algorithm please refer to the benchmarks
74	  available at the following LWN page:
75	  https://lwn.net/Articles/751795/
76
77	  If in doubt, select 'LZO'.
78
79	  The selection made here can be overridden by using the kernel
80	  command line 'zswap.compressor=' option.
81
82config ZSWAP_COMPRESSOR_DEFAULT_DEFLATE
83	bool "Deflate"
84	select CRYPTO_DEFLATE
85	help
86	  Use the Deflate algorithm as the default compression algorithm.
87
88config ZSWAP_COMPRESSOR_DEFAULT_LZO
89	bool "LZO"
90	select CRYPTO_LZO
91	help
92	  Use the LZO algorithm as the default compression algorithm.
93
94config ZSWAP_COMPRESSOR_DEFAULT_842
95	bool "842"
96	select CRYPTO_842
97	help
98	  Use the 842 algorithm as the default compression algorithm.
99
100config ZSWAP_COMPRESSOR_DEFAULT_LZ4
101	bool "LZ4"
102	select CRYPTO_LZ4
103	help
104	  Use the LZ4 algorithm as the default compression algorithm.
105
106config ZSWAP_COMPRESSOR_DEFAULT_LZ4HC
107	bool "LZ4HC"
108	select CRYPTO_LZ4HC
109	help
110	  Use the LZ4HC algorithm as the default compression algorithm.
111
112config ZSWAP_COMPRESSOR_DEFAULT_ZSTD
113	bool "zstd"
114	select CRYPTO_ZSTD
115	help
116	  Use the zstd algorithm as the default compression algorithm.
117endchoice
118
119config ZSWAP_COMPRESSOR_DEFAULT
120       string
121       depends on ZSWAP
122       default "deflate" if ZSWAP_COMPRESSOR_DEFAULT_DEFLATE
123       default "lzo" if ZSWAP_COMPRESSOR_DEFAULT_LZO
124       default "842" if ZSWAP_COMPRESSOR_DEFAULT_842
125       default "lz4" if ZSWAP_COMPRESSOR_DEFAULT_LZ4
126       default "lz4hc" if ZSWAP_COMPRESSOR_DEFAULT_LZ4HC
127       default "zstd" if ZSWAP_COMPRESSOR_DEFAULT_ZSTD
128       default ""
129
130choice
131	prompt "Default allocator"
132	depends on ZSWAP
133	default ZSWAP_ZPOOL_DEFAULT_ZBUD
134	help
135	  Selects the default allocator for the compressed cache for
136	  swap pages.
137	  The default is 'zbud' for compatibility, however please do
138	  read the description of each of the allocators below before
139	  making a right choice.
140
141	  The selection made here can be overridden by using the kernel
142	  command line 'zswap.zpool=' option.
143
144config ZSWAP_ZPOOL_DEFAULT_ZBUD
145	bool "zbud"
146	select ZBUD
147	help
148	  Use the zbud allocator as the default allocator.
149
150config ZSWAP_ZPOOL_DEFAULT_Z3FOLD_DEPRECATED
151	bool "z3foldi (DEPRECATED)"
152	select Z3FOLD_DEPRECATED
153	help
154	  Use the z3fold allocator as the default allocator.
155
156	  Deprecated and scheduled for removal in a few cycles,
157	  see CONFIG_Z3FOLD_DEPRECATED.
158
159config ZSWAP_ZPOOL_DEFAULT_ZSMALLOC
160	bool "zsmalloc"
161	select ZSMALLOC
162	help
163	  Use the zsmalloc allocator as the default allocator.
164endchoice
165
166config ZSWAP_ZPOOL_DEFAULT
167       string
168       depends on ZSWAP
169       default "zbud" if ZSWAP_ZPOOL_DEFAULT_ZBUD
170       default "z3fold" if ZSWAP_ZPOOL_DEFAULT_Z3FOLD_DEPRECATED
171       default "zsmalloc" if ZSWAP_ZPOOL_DEFAULT_ZSMALLOC
172       default ""
173
174config ZBUD
175	tristate "2:1 compression allocator (zbud)"
176	depends on ZSWAP
177	help
178	  A special purpose allocator for storing compressed pages.
179	  It is designed to store up to two compressed pages per physical
180	  page.  While this design limits storage density, it has simple and
181	  deterministic reclaim properties that make it preferable to a higher
182	  density approach when reclaim will be used.
183
184config Z3FOLD_DEPRECATED
185	tristate "3:1 compression allocator (z3fold) (DEPRECATED)"
186	depends on ZSWAP
187	help
188	  Deprecated and scheduled for removal in a few cycles. If you have
189	  a good reason for using Z3FOLD over ZSMALLOC, please contact
190	  linux-mm@kvack.org and the zswap maintainers.
191
192	  A special purpose allocator for storing compressed pages.
193	  It is designed to store up to three compressed pages per physical
194	  page. It is a ZBUD derivative so the simplicity and determinism are
195	  still there.
196
197config Z3FOLD
198	tristate
199	default y if Z3FOLD_DEPRECATED=y
200	default m if Z3FOLD_DEPRECATED=m
201	depends on Z3FOLD_DEPRECATED
202
203config ZSMALLOC
204	tristate
205	prompt "N:1 compression allocator (zsmalloc)" if ZSWAP
206	depends on MMU
207	help
208	  zsmalloc is a slab-based memory allocator designed to store
209	  pages of various compression levels efficiently. It achieves
210	  the highest storage density with the least amount of fragmentation.
211
212config ZSMALLOC_STAT
213	bool "Export zsmalloc statistics"
214	depends on ZSMALLOC
215	select DEBUG_FS
216	help
217	  This option enables code in the zsmalloc to collect various
218	  statistics about what's happening in zsmalloc and exports that
219	  information to userspace via debugfs.
220	  If unsure, say N.
221
222config ZSMALLOC_CHAIN_SIZE
223	int "Maximum number of physical pages per-zspage"
224	default 8
225	range 4 16
226	depends on ZSMALLOC
227	help
228	  This option sets the upper limit on the number of physical pages
229	  that a zmalloc page (zspage) can consist of. The optimal zspage
230	  chain size is calculated for each size class during the
231	  initialization of the pool.
232
233	  Changing this option can alter the characteristics of size classes,
234	  such as the number of pages per zspage and the number of objects
235	  per zspage. This can also result in different configurations of
236	  the pool, as zsmalloc merges size classes with similar
237	  characteristics.
238
239	  For more information, see zsmalloc documentation.
240
241menu "SLAB allocator options"
242
243choice
244	prompt "Choose SLAB allocator"
245	default SLUB
246	help
247	   This option allows to select a slab allocator.
248
249config SLAB_DEPRECATED
250	bool "SLAB (DEPRECATED)"
251	depends on !PREEMPT_RT
252	help
253	  Deprecated and scheduled for removal in a few cycles. Replaced by
254	  SLUB.
255
256	  If you cannot migrate to SLUB, please contact linux-mm@kvack.org
257	  and the people listed in the SLAB ALLOCATOR section of MAINTAINERS
258	  file, explaining why.
259
260	  The regular slab allocator that is established and known to work
261	  well in all environments. It organizes cache hot objects in
262	  per cpu and per node queues.
263
264config SLUB
265	bool "SLUB (Unqueued Allocator)"
266	help
267	   SLUB is a slab allocator that minimizes cache line usage
268	   instead of managing queues of cached objects (SLAB approach).
269	   Per cpu caching is realized using slabs of objects instead
270	   of queues of objects. SLUB can use memory efficiently
271	   and has enhanced diagnostics. SLUB is the default choice for
272	   a slab allocator.
273
274endchoice
275
276config SLAB
277	bool
278	default y
279	depends on SLAB_DEPRECATED
280
281config SLUB_TINY
282	bool "Configure SLUB for minimal memory footprint"
283	depends on SLUB && EXPERT
284	select SLAB_MERGE_DEFAULT
285	help
286	   Configures the SLUB allocator in a way to achieve minimal memory
287	   footprint, sacrificing scalability, debugging and other features.
288	   This is intended only for the smallest system that had used the
289	   SLOB allocator and is not recommended for systems with more than
290	   16MB RAM.
291
292	   If unsure, say N.
293
294config SLAB_MERGE_DEFAULT
295	bool "Allow slab caches to be merged"
296	default y
297	depends on SLAB || SLUB
298	help
299	  For reduced kernel memory fragmentation, slab caches can be
300	  merged when they share the same size and other characteristics.
301	  This carries a risk of kernel heap overflows being able to
302	  overwrite objects from merged caches (and more easily control
303	  cache layout), which makes such heap attacks easier to exploit
304	  by attackers. By keeping caches unmerged, these kinds of exploits
305	  can usually only damage objects in the same cache. To disable
306	  merging at runtime, "slab_nomerge" can be passed on the kernel
307	  command line.
308
309config SLAB_FREELIST_RANDOM
310	bool "Randomize slab freelist"
311	depends on SLAB || (SLUB && !SLUB_TINY)
312	help
313	  Randomizes the freelist order used on creating new pages. This
314	  security feature reduces the predictability of the kernel slab
315	  allocator against heap overflows.
316
317config SLAB_FREELIST_HARDENED
318	bool "Harden slab freelist metadata"
319	depends on SLAB || (SLUB && !SLUB_TINY)
320	help
321	  Many kernel heap attacks try to target slab cache metadata and
322	  other infrastructure. This options makes minor performance
323	  sacrifices to harden the kernel slab allocator against common
324	  freelist exploit methods. Some slab implementations have more
325	  sanity-checking than others. This option is most effective with
326	  CONFIG_SLUB.
327
328config SLUB_STATS
329	default n
330	bool "Enable SLUB performance statistics"
331	depends on SLUB && SYSFS && !SLUB_TINY
332	help
333	  SLUB statistics are useful to debug SLUBs allocation behavior in
334	  order find ways to optimize the allocator. This should never be
335	  enabled for production use since keeping statistics slows down
336	  the allocator by a few percentage points. The slabinfo command
337	  supports the determination of the most active slabs to figure
338	  out which slabs are relevant to a particular load.
339	  Try running: slabinfo -DA
340
341config SLUB_CPU_PARTIAL
342	default y
343	depends on SLUB && SMP && !SLUB_TINY
344	bool "SLUB per cpu partial cache"
345	help
346	  Per cpu partial caches accelerate objects allocation and freeing
347	  that is local to a processor at the price of more indeterminism
348	  in the latency of the free. On overflow these caches will be cleared
349	  which requires the taking of locks that may cause latency spikes.
350	  Typically one would choose no for a realtime system.
351
352config RANDOM_KMALLOC_CACHES
353	default n
354	depends on SLUB && !SLUB_TINY
355	bool "Randomize slab caches for normal kmalloc"
356	help
357	  A hardening feature that creates multiple copies of slab caches for
358	  normal kmalloc allocation and makes kmalloc randomly pick one based
359	  on code address, which makes the attackers more difficult to spray
360	  vulnerable memory objects on the heap for the purpose of exploiting
361	  memory vulnerabilities.
362
363	  Currently the number of copies is set to 16, a reasonably large value
364	  that effectively diverges the memory objects allocated for different
365	  subsystems or modules into different caches, at the expense of a
366	  limited degree of memory and CPU overhead that relates to hardware and
367	  system workload.
368
369endmenu # SLAB allocator options
370
371config SHUFFLE_PAGE_ALLOCATOR
372	bool "Page allocator randomization"
373	default SLAB_FREELIST_RANDOM && ACPI_NUMA
374	help
375	  Randomization of the page allocator improves the average
376	  utilization of a direct-mapped memory-side-cache. See section
377	  5.2.27 Heterogeneous Memory Attribute Table (HMAT) in the ACPI
378	  6.2a specification for an example of how a platform advertises
379	  the presence of a memory-side-cache. There are also incidental
380	  security benefits as it reduces the predictability of page
381	  allocations to compliment SLAB_FREELIST_RANDOM, but the
382	  default granularity of shuffling on the MAX_ORDER i.e, 10th
383	  order of pages is selected based on cache utilization benefits
384	  on x86.
385
386	  While the randomization improves cache utilization it may
387	  negatively impact workloads on platforms without a cache. For
388	  this reason, by default, the randomization is enabled only
389	  after runtime detection of a direct-mapped memory-side-cache.
390	  Otherwise, the randomization may be force enabled with the
391	  'page_alloc.shuffle' kernel command line parameter.
392
393	  Say Y if unsure.
394
395config COMPAT_BRK
396	bool "Disable heap randomization"
397	default y
398	help
399	  Randomizing heap placement makes heap exploits harder, but it
400	  also breaks ancient binaries (including anything libc5 based).
401	  This option changes the bootup default to heap randomization
402	  disabled, and can be overridden at runtime by setting
403	  /proc/sys/kernel/randomize_va_space to 2.
404
405	  On non-ancient distros (post-2000 ones) N is usually a safe choice.
406
407config MMAP_ALLOW_UNINITIALIZED
408	bool "Allow mmapped anonymous memory to be uninitialized"
409	depends on EXPERT && !MMU
410	default n
411	help
412	  Normally, and according to the Linux spec, anonymous memory obtained
413	  from mmap() has its contents cleared before it is passed to
414	  userspace.  Enabling this config option allows you to request that
415	  mmap() skip that if it is given an MAP_UNINITIALIZED flag, thus
416	  providing a huge performance boost.  If this option is not enabled,
417	  then the flag will be ignored.
418
419	  This is taken advantage of by uClibc's malloc(), and also by
420	  ELF-FDPIC binfmt's brk and stack allocator.
421
422	  Because of the obvious security issues, this option should only be
423	  enabled on embedded devices where you control what is run in
424	  userspace.  Since that isn't generally a problem on no-MMU systems,
425	  it is normally safe to say Y here.
426
427	  See Documentation/admin-guide/mm/nommu-mmap.rst for more information.
428
429config SELECT_MEMORY_MODEL
430	def_bool y
431	depends on ARCH_SELECT_MEMORY_MODEL
432
433choice
434	prompt "Memory model"
435	depends on SELECT_MEMORY_MODEL
436	default SPARSEMEM_MANUAL if ARCH_SPARSEMEM_DEFAULT
437	default FLATMEM_MANUAL
438	help
439	  This option allows you to change some of the ways that
440	  Linux manages its memory internally. Most users will
441	  only have one option here selected by the architecture
442	  configuration. This is normal.
443
444config FLATMEM_MANUAL
445	bool "Flat Memory"
446	depends on !ARCH_SPARSEMEM_ENABLE || ARCH_FLATMEM_ENABLE
447	help
448	  This option is best suited for non-NUMA systems with
449	  flat address space. The FLATMEM is the most efficient
450	  system in terms of performance and resource consumption
451	  and it is the best option for smaller systems.
452
453	  For systems that have holes in their physical address
454	  spaces and for features like NUMA and memory hotplug,
455	  choose "Sparse Memory".
456
457	  If unsure, choose this option (Flat Memory) over any other.
458
459config SPARSEMEM_MANUAL
460	bool "Sparse Memory"
461	depends on ARCH_SPARSEMEM_ENABLE
462	help
463	  This will be the only option for some systems, including
464	  memory hot-plug systems.  This is normal.
465
466	  This option provides efficient support for systems with
467	  holes is their physical address space and allows memory
468	  hot-plug and hot-remove.
469
470	  If unsure, choose "Flat Memory" over this option.
471
472endchoice
473
474config SPARSEMEM
475	def_bool y
476	depends on (!SELECT_MEMORY_MODEL && ARCH_SPARSEMEM_ENABLE) || SPARSEMEM_MANUAL
477
478config FLATMEM
479	def_bool y
480	depends on !SPARSEMEM || FLATMEM_MANUAL
481
482#
483# SPARSEMEM_EXTREME (which is the default) does some bootmem
484# allocations when sparse_init() is called.  If this cannot
485# be done on your architecture, select this option.  However,
486# statically allocating the mem_section[] array can potentially
487# consume vast quantities of .bss, so be careful.
488#
489# This option will also potentially produce smaller runtime code
490# with gcc 3.4 and later.
491#
492config SPARSEMEM_STATIC
493	bool
494
495#
496# Architecture platforms which require a two level mem_section in SPARSEMEM
497# must select this option. This is usually for architecture platforms with
498# an extremely sparse physical address space.
499#
500config SPARSEMEM_EXTREME
501	def_bool y
502	depends on SPARSEMEM && !SPARSEMEM_STATIC
503
504config SPARSEMEM_VMEMMAP_ENABLE
505	bool
506
507config SPARSEMEM_VMEMMAP
508	bool "Sparse Memory virtual memmap"
509	depends on SPARSEMEM && SPARSEMEM_VMEMMAP_ENABLE
510	default y
511	help
512	  SPARSEMEM_VMEMMAP uses a virtually mapped memmap to optimise
513	  pfn_to_page and page_to_pfn operations.  This is the most
514	  efficient option when sufficient kernel resources are available.
515#
516# Select this config option from the architecture Kconfig, if it is preferred
517# to enable the feature of HugeTLB/dev_dax vmemmap optimization.
518#
519config ARCH_WANT_OPTIMIZE_DAX_VMEMMAP
520	bool
521
522config ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP
523	bool
524
525config HAVE_MEMBLOCK_PHYS_MAP
526	bool
527
528config HAVE_FAST_GUP
529	depends on MMU
530	bool
531
532# Don't discard allocated memory used to track "memory" and "reserved" memblocks
533# after early boot, so it can still be used to test for validity of memory.
534# Also, memblocks are updated with memory hot(un)plug.
535config ARCH_KEEP_MEMBLOCK
536	bool
537
538# Keep arch NUMA mapping infrastructure post-init.
539config NUMA_KEEP_MEMINFO
540	bool
541
542config MEMORY_ISOLATION
543	bool
544
545# IORESOURCE_SYSTEM_RAM regions in the kernel resource tree that are marked
546# IORESOURCE_EXCLUSIVE cannot be mapped to user space, for example, via
547# /dev/mem.
548config EXCLUSIVE_SYSTEM_RAM
549	def_bool y
550	depends on !DEVMEM || STRICT_DEVMEM
551
552#
553# Only be set on architectures that have completely implemented memory hotplug
554# feature. If you are not sure, don't touch it.
555#
556config HAVE_BOOTMEM_INFO_NODE
557	def_bool n
558
559config ARCH_ENABLE_MEMORY_HOTPLUG
560	bool
561
562config ARCH_ENABLE_MEMORY_HOTREMOVE
563	bool
564
565# eventually, we can have this option just 'select SPARSEMEM'
566menuconfig MEMORY_HOTPLUG
567	bool "Memory hotplug"
568	select MEMORY_ISOLATION
569	depends on SPARSEMEM
570	depends on ARCH_ENABLE_MEMORY_HOTPLUG
571	depends on 64BIT
572	select NUMA_KEEP_MEMINFO if NUMA
573
574if MEMORY_HOTPLUG
575
576config MEMORY_HOTPLUG_DEFAULT_ONLINE
577	bool "Online the newly added memory blocks by default"
578	depends on MEMORY_HOTPLUG
579	help
580	  This option sets the default policy setting for memory hotplug
581	  onlining policy (/sys/devices/system/memory/auto_online_blocks) which
582	  determines what happens to newly added memory regions. Policy setting
583	  can always be changed at runtime.
584	  See Documentation/admin-guide/mm/memory-hotplug.rst for more information.
585
586	  Say Y here if you want all hot-plugged memory blocks to appear in
587	  'online' state by default.
588	  Say N here if you want the default policy to keep all hot-plugged
589	  memory blocks in 'offline' state.
590
591config MEMORY_HOTREMOVE
592	bool "Allow for memory hot remove"
593	select HAVE_BOOTMEM_INFO_NODE if (X86_64 || PPC64)
594	depends on MEMORY_HOTPLUG && ARCH_ENABLE_MEMORY_HOTREMOVE
595	depends on MIGRATION
596
597config MHP_MEMMAP_ON_MEMORY
598	def_bool y
599	depends on MEMORY_HOTPLUG && SPARSEMEM_VMEMMAP
600	depends on ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE
601
602endif # MEMORY_HOTPLUG
603
604config ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE
605       bool
606
607# Heavily threaded applications may benefit from splitting the mm-wide
608# page_table_lock, so that faults on different parts of the user address
609# space can be handled with less contention: split it at this NR_CPUS.
610# Default to 4 for wider testing, though 8 might be more appropriate.
611# ARM's adjust_pte (unused if VIPT) depends on mm-wide page_table_lock.
612# PA-RISC 7xxx's spinlock_t would enlarge struct page from 32 to 44 bytes.
613# SPARC32 allocates multiple pte tables within a single page, and therefore
614# a per-page lock leads to problems when multiple tables need to be locked
615# at the same time (e.g. copy_page_range()).
616# DEBUG_SPINLOCK and DEBUG_LOCK_ALLOC spinlock_t also enlarge struct page.
617#
618config SPLIT_PTLOCK_CPUS
619	int
620	default "999999" if !MMU
621	default "999999" if ARM && !CPU_CACHE_VIPT
622	default "999999" if PARISC && !PA20
623	default "999999" if SPARC32
624	default "4"
625
626config ARCH_ENABLE_SPLIT_PMD_PTLOCK
627	bool
628
629#
630# support for memory balloon
631config MEMORY_BALLOON
632	bool
633
634#
635# support for memory balloon compaction
636config BALLOON_COMPACTION
637	bool "Allow for balloon memory compaction/migration"
638	def_bool y
639	depends on COMPACTION && MEMORY_BALLOON
640	help
641	  Memory fragmentation introduced by ballooning might reduce
642	  significantly the number of 2MB contiguous memory blocks that can be
643	  used within a guest, thus imposing performance penalties associated
644	  with the reduced number of transparent huge pages that could be used
645	  by the guest workload. Allowing the compaction & migration for memory
646	  pages enlisted as being part of memory balloon devices avoids the
647	  scenario aforementioned and helps improving memory defragmentation.
648
649#
650# support for memory compaction
651config COMPACTION
652	bool "Allow for memory compaction"
653	def_bool y
654	select MIGRATION
655	depends on MMU
656	help
657	  Compaction is the only memory management component to form
658	  high order (larger physically contiguous) memory blocks
659	  reliably. The page allocator relies on compaction heavily and
660	  the lack of the feature can lead to unexpected OOM killer
661	  invocations for high order memory requests. You shouldn't
662	  disable this option unless there really is a strong reason for
663	  it and then we would be really interested to hear about that at
664	  linux-mm@kvack.org.
665
666config COMPACT_UNEVICTABLE_DEFAULT
667	int
668	depends on COMPACTION
669	default 0 if PREEMPT_RT
670	default 1
671
672#
673# support for free page reporting
674config PAGE_REPORTING
675	bool "Free page reporting"
676	def_bool n
677	help
678	  Free page reporting allows for the incremental acquisition of
679	  free pages from the buddy allocator for the purpose of reporting
680	  those pages to another entity, such as a hypervisor, so that the
681	  memory can be freed within the host for other uses.
682
683#
684# support for page migration
685#
686config MIGRATION
687	bool "Page migration"
688	def_bool y
689	depends on (NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE || COMPACTION || CMA) && MMU
690	help
691	  Allows the migration of the physical location of pages of processes
692	  while the virtual addresses are not changed. This is useful in
693	  two situations. The first is on NUMA systems to put pages nearer
694	  to the processors accessing. The second is when allocating huge
695	  pages as migration can relocate pages to satisfy a huge page
696	  allocation instead of reclaiming.
697
698config DEVICE_MIGRATION
699	def_bool MIGRATION && ZONE_DEVICE
700
701config ARCH_ENABLE_HUGEPAGE_MIGRATION
702	bool
703
704config ARCH_ENABLE_THP_MIGRATION
705	bool
706
707config HUGETLB_PAGE_SIZE_VARIABLE
708	def_bool n
709	help
710	  Allows the pageblock_order value to be dynamic instead of just standard
711	  HUGETLB_PAGE_ORDER when there are multiple HugeTLB page sizes available
712	  on a platform.
713
714	  Note that the pageblock_order cannot exceed MAX_ORDER and will be
715	  clamped down to MAX_ORDER.
716
717config CONTIG_ALLOC
718	def_bool (MEMORY_ISOLATION && COMPACTION) || CMA
719
720config PCP_BATCH_SCALE_MAX
721	int "Maximum scale factor of PCP (Per-CPU pageset) batch allocate/free"
722	default 5
723	range 0 6
724	help
725	  In page allocator, PCP (Per-CPU pageset) is refilled and drained in
726	  batches.  The batch number is scaled automatically to improve page
727	  allocation/free throughput.  But too large scale factor may hurt
728	  latency.  This option sets the upper limit of scale factor to limit
729	  the maximum latency.
730
731config PHYS_ADDR_T_64BIT
732	def_bool 64BIT
733
734config BOUNCE
735	bool "Enable bounce buffers"
736	default y
737	depends on BLOCK && MMU && HIGHMEM
738	help
739	  Enable bounce buffers for devices that cannot access the full range of
740	  memory available to the CPU. Enabled by default when HIGHMEM is
741	  selected, but you may say n to override this.
742
743config MMU_NOTIFIER
744	bool
745	select INTERVAL_TREE
746
747config KSM
748	bool "Enable KSM for page merging"
749	depends on MMU
750	select XXHASH
751	help
752	  Enable Kernel Samepage Merging: KSM periodically scans those areas
753	  of an application's address space that an app has advised may be
754	  mergeable.  When it finds pages of identical content, it replaces
755	  the many instances by a single page with that content, so
756	  saving memory until one or another app needs to modify the content.
757	  Recommended for use with KVM, or with other duplicative applications.
758	  See Documentation/mm/ksm.rst for more information: KSM is inactive
759	  until a program has madvised that an area is MADV_MERGEABLE, and
760	  root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
761
762config DEFAULT_MMAP_MIN_ADDR
763	int "Low address space to protect from user allocation"
764	depends on MMU
765	default 4096
766	help
767	  This is the portion of low virtual memory which should be protected
768	  from userspace allocation.  Keeping a user from writing to low pages
769	  can help reduce the impact of kernel NULL pointer bugs.
770
771	  For most ia64, ppc64 and x86 users with lots of address space
772	  a value of 65536 is reasonable and should cause no problems.
773	  On arm and other archs it should not be higher than 32768.
774	  Programs which use vm86 functionality or have some need to map
775	  this low address space will need CAP_SYS_RAWIO or disable this
776	  protection by setting the value to 0.
777
778	  This value can be changed after boot using the
779	  /proc/sys/vm/mmap_min_addr tunable.
780
781config ARCH_SUPPORTS_MEMORY_FAILURE
782	bool
783
784config MEMORY_FAILURE
785	depends on MMU
786	depends on ARCH_SUPPORTS_MEMORY_FAILURE
787	bool "Enable recovery from hardware memory errors"
788	select MEMORY_ISOLATION
789	select RAS
790	help
791	  Enables code to recover from some memory failures on systems
792	  with MCA recovery. This allows a system to continue running
793	  even when some of its memory has uncorrected errors. This requires
794	  special hardware support and typically ECC memory.
795
796config HWPOISON_INJECT
797	tristate "HWPoison pages injector"
798	depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
799	select PROC_PAGE_MONITOR
800
801config NOMMU_INITIAL_TRIM_EXCESS
802	int "Turn on mmap() excess space trimming before booting"
803	depends on !MMU
804	default 1
805	help
806	  The NOMMU mmap() frequently needs to allocate large contiguous chunks
807	  of memory on which to store mappings, but it can only ask the system
808	  allocator for chunks in 2^N*PAGE_SIZE amounts - which is frequently
809	  more than it requires.  To deal with this, mmap() is able to trim off
810	  the excess and return it to the allocator.
811
812	  If trimming is enabled, the excess is trimmed off and returned to the
813	  system allocator, which can cause extra fragmentation, particularly
814	  if there are a lot of transient processes.
815
816	  If trimming is disabled, the excess is kept, but not used, which for
817	  long-term mappings means that the space is wasted.
818
819	  Trimming can be dynamically controlled through a sysctl option
820	  (/proc/sys/vm/nr_trim_pages) which specifies the minimum number of
821	  excess pages there must be before trimming should occur, or zero if
822	  no trimming is to occur.
823
824	  This option specifies the initial value of this option.  The default
825	  of 1 says that all excess pages should be trimmed.
826
827	  See Documentation/admin-guide/mm/nommu-mmap.rst for more information.
828
829config ARCH_WANT_GENERAL_HUGETLB
830	bool
831
832config ARCH_WANTS_THP_SWAP
833	def_bool n
834
835menuconfig TRANSPARENT_HUGEPAGE
836	bool "Transparent Hugepage Support"
837	depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE && !PREEMPT_RT
838	select COMPACTION
839	select XARRAY_MULTI
840	help
841	  Transparent Hugepages allows the kernel to use huge pages and
842	  huge tlb transparently to the applications whenever possible.
843	  This feature can improve computing performance to certain
844	  applications by speeding up page faults during memory
845	  allocation, by reducing the number of tlb misses and by speeding
846	  up the pagetable walking.
847
848	  If memory constrained on embedded, you may want to say N.
849
850if TRANSPARENT_HUGEPAGE
851
852choice
853	prompt "Transparent Hugepage Support sysfs defaults"
854	depends on TRANSPARENT_HUGEPAGE
855	default TRANSPARENT_HUGEPAGE_ALWAYS
856	help
857	  Selects the sysfs defaults for Transparent Hugepage Support.
858
859	config TRANSPARENT_HUGEPAGE_ALWAYS
860		bool "always"
861	help
862	  Enabling Transparent Hugepage always, can increase the
863	  memory footprint of applications without a guaranteed
864	  benefit but it will work automatically for all applications.
865
866	config TRANSPARENT_HUGEPAGE_MADVISE
867		bool "madvise"
868	help
869	  Enabling Transparent Hugepage madvise, will only provide a
870	  performance improvement benefit to the applications using
871	  madvise(MADV_HUGEPAGE) but it won't risk to increase the
872	  memory footprint of applications without a guaranteed
873	  benefit.
874endchoice
875
876config THP_SWAP
877	def_bool y
878	depends on TRANSPARENT_HUGEPAGE && ARCH_WANTS_THP_SWAP && SWAP && 64BIT
879	help
880	  Swap transparent huge pages in one piece, without splitting.
881	  XXX: For now, swap cluster backing transparent huge page
882	  will be split after swapout.
883
884	  For selection by architectures with reasonable THP sizes.
885
886config READ_ONLY_THP_FOR_FS
887	bool "Read-only THP for filesystems (EXPERIMENTAL)"
888	depends on TRANSPARENT_HUGEPAGE && SHMEM
889
890	help
891	  Allow khugepaged to put read-only file-backed pages in THP.
892
893	  This is marked experimental because it is a new feature. Write
894	  support of file THPs will be developed in the next few release
895	  cycles.
896
897endif # TRANSPARENT_HUGEPAGE
898
899#
900# UP and nommu archs use km based percpu allocator
901#
902config NEED_PER_CPU_KM
903	depends on !SMP || !MMU
904	bool
905	default y
906
907config NEED_PER_CPU_EMBED_FIRST_CHUNK
908	bool
909
910config NEED_PER_CPU_PAGE_FIRST_CHUNK
911	bool
912
913config USE_PERCPU_NUMA_NODE_ID
914	bool
915
916config HAVE_SETUP_PER_CPU_AREA
917	bool
918
919config CMA
920	bool "Contiguous Memory Allocator"
921	depends on MMU
922	select MIGRATION
923	select MEMORY_ISOLATION
924	help
925	  This enables the Contiguous Memory Allocator which allows other
926	  subsystems to allocate big physically-contiguous blocks of memory.
927	  CMA reserves a region of memory and allows only movable pages to
928	  be allocated from it. This way, the kernel can use the memory for
929	  pagecache and when a subsystem requests for contiguous area, the
930	  allocated pages are migrated away to serve the contiguous request.
931
932	  If unsure, say "n".
933
934config CMA_DEBUG
935	bool "CMA debug messages (DEVELOPMENT)"
936	depends on DEBUG_KERNEL && CMA
937	help
938	  Turns on debug messages in CMA.  This produces KERN_DEBUG
939	  messages for every CMA call as well as various messages while
940	  processing calls such as dma_alloc_from_contiguous().
941	  This option does not affect warning and error messages.
942
943config CMA_DEBUGFS
944	bool "CMA debugfs interface"
945	depends on CMA && DEBUG_FS
946	help
947	  Turns on the DebugFS interface for CMA.
948
949config CMA_SYSFS
950	bool "CMA information through sysfs interface"
951	depends on CMA && SYSFS
952	help
953	  This option exposes some sysfs attributes to get information
954	  from CMA.
955
956config CMA_AREAS
957	int "Maximum count of the CMA areas"
958	depends on CMA
959	default 19 if NUMA
960	default 7
961	help
962	  CMA allows to create CMA areas for particular purpose, mainly,
963	  used as device private area. This parameter sets the maximum
964	  number of CMA area in the system.
965
966	  If unsure, leave the default value "7" in UMA and "19" in NUMA.
967
968config MEM_SOFT_DIRTY
969	bool "Track memory changes"
970	depends on CHECKPOINT_RESTORE && HAVE_ARCH_SOFT_DIRTY && PROC_FS
971	select PROC_PAGE_MONITOR
972	help
973	  This option enables memory changes tracking by introducing a
974	  soft-dirty bit on pte-s. This bit it set when someone writes
975	  into a page just as regular dirty bit, but unlike the latter
976	  it can be cleared by hands.
977
978	  See Documentation/admin-guide/mm/soft-dirty.rst for more details.
979
980config GENERIC_EARLY_IOREMAP
981	bool
982
983config STACK_MAX_DEFAULT_SIZE_MB
984	int "Default maximum user stack size for 32-bit processes (MB)"
985	default 100
986	range 8 2048
987	depends on STACK_GROWSUP && (!64BIT || COMPAT)
988	help
989	  This is the maximum stack size in Megabytes in the VM layout of 32-bit
990	  user processes when the stack grows upwards (currently only on parisc
991	  arch) when the RLIMIT_STACK hard limit is unlimited.
992
993	  A sane initial value is 100 MB.
994
995config DEFERRED_STRUCT_PAGE_INIT
996	bool "Defer initialisation of struct pages to kthreads"
997	depends on SPARSEMEM
998	depends on !NEED_PER_CPU_KM
999	depends on 64BIT
1000	select PADATA
1001	help
1002	  Ordinarily all struct pages are initialised during early boot in a
1003	  single thread. On very large machines this can take a considerable
1004	  amount of time. If this option is set, large machines will bring up
1005	  a subset of memmap at boot and then initialise the rest in parallel.
1006	  This has a potential performance impact on tasks running early in the
1007	  lifetime of the system until these kthreads finish the
1008	  initialisation.
1009
1010config PAGE_IDLE_FLAG
1011	bool
1012	select PAGE_EXTENSION if !64BIT
1013	help
1014	  This adds PG_idle and PG_young flags to 'struct page'.  PTE Accessed
1015	  bit writers can set the state of the bit in the flags so that PTE
1016	  Accessed bit readers may avoid disturbance.
1017
1018config IDLE_PAGE_TRACKING
1019	bool "Enable idle page tracking"
1020	depends on SYSFS && MMU
1021	select PAGE_IDLE_FLAG
1022	help
1023	  This feature allows to estimate the amount of user pages that have
1024	  not been touched during a given period of time. This information can
1025	  be useful to tune memory cgroup limits and/or for job placement
1026	  within a compute cluster.
1027
1028	  See Documentation/admin-guide/mm/idle_page_tracking.rst for
1029	  more details.
1030
1031config ARCH_HAS_CACHE_LINE_SIZE
1032	bool
1033
1034config ARCH_HAS_CURRENT_STACK_POINTER
1035	bool
1036	help
1037	  In support of HARDENED_USERCOPY performing stack variable lifetime
1038	  checking, an architecture-agnostic way to find the stack pointer
1039	  is needed. Once an architecture defines an unsigned long global
1040	  register alias named "current_stack_pointer", this config can be
1041	  selected.
1042
1043config ARCH_HAS_PTE_DEVMAP
1044	bool
1045
1046config ARCH_HAS_ZONE_DMA_SET
1047	bool
1048
1049config ZONE_DMA
1050	bool "Support DMA zone" if ARCH_HAS_ZONE_DMA_SET
1051	default y if ARM64 || X86
1052
1053config ZONE_DMA32
1054	bool "Support DMA32 zone" if ARCH_HAS_ZONE_DMA_SET
1055	depends on !X86_32
1056	default y if ARM64
1057
1058config ZONE_DEVICE
1059	bool "Device memory (pmem, HMM, etc...) hotplug support"
1060	depends on MEMORY_HOTPLUG
1061	depends on MEMORY_HOTREMOVE
1062	depends on SPARSEMEM_VMEMMAP
1063	depends on ARCH_HAS_PTE_DEVMAP
1064	select XARRAY_MULTI
1065
1066	help
1067	  Device memory hotplug support allows for establishing pmem,
1068	  or other device driver discovered memory regions, in the
1069	  memmap. This allows pfn_to_page() lookups of otherwise
1070	  "device-physical" addresses which is needed for using a DAX
1071	  mapping in an O_DIRECT operation, among other things.
1072
1073	  If FS_DAX is enabled, then say Y.
1074
1075#
1076# Helpers to mirror range of the CPU page tables of a process into device page
1077# tables.
1078#
1079config HMM_MIRROR
1080	bool
1081	depends on MMU
1082
1083config GET_FREE_REGION
1084	depends on SPARSEMEM
1085	bool
1086
1087config DEVICE_PRIVATE
1088	bool "Unaddressable device memory (GPU memory, ...)"
1089	depends on ZONE_DEVICE
1090	select GET_FREE_REGION
1091
1092	help
1093	  Allows creation of struct pages to represent unaddressable device
1094	  memory; i.e., memory that is only accessible from the device (or
1095	  group of devices). You likely also want to select HMM_MIRROR.
1096
1097config VMAP_PFN
1098	bool
1099
1100config ARCH_USES_HIGH_VMA_FLAGS
1101	bool
1102config ARCH_HAS_PKEYS
1103	bool
1104
1105config ARCH_USES_PG_ARCH_X
1106	bool
1107	help
1108	  Enable the definition of PG_arch_x page flags with x > 1. Only
1109	  suitable for 64-bit architectures with CONFIG_FLATMEM or
1110	  CONFIG_SPARSEMEM_VMEMMAP enabled, otherwise there may not be
1111	  enough room for additional bits in page->flags.
1112
1113config VM_EVENT_COUNTERS
1114	default y
1115	bool "Enable VM event counters for /proc/vmstat" if EXPERT
1116	help
1117	  VM event counters are needed for event counts to be shown.
1118	  This option allows the disabling of the VM event counters
1119	  on EXPERT systems.  /proc/vmstat will only show page counts
1120	  if VM event counters are disabled.
1121
1122config PERCPU_STATS
1123	bool "Collect percpu memory statistics"
1124	help
1125	  This feature collects and exposes statistics via debugfs. The
1126	  information includes global and per chunk statistics, which can
1127	  be used to help understand percpu memory usage.
1128
1129config GUP_TEST
1130	bool "Enable infrastructure for get_user_pages()-related unit tests"
1131	depends on DEBUG_FS
1132	help
1133	  Provides /sys/kernel/debug/gup_test, which in turn provides a way
1134	  to make ioctl calls that can launch kernel-based unit tests for
1135	  the get_user_pages*() and pin_user_pages*() family of API calls.
1136
1137	  These tests include benchmark testing of the _fast variants of
1138	  get_user_pages*() and pin_user_pages*(), as well as smoke tests of
1139	  the non-_fast variants.
1140
1141	  There is also a sub-test that allows running dump_page() on any
1142	  of up to eight pages (selected by command line args) within the
1143	  range of user-space addresses. These pages are either pinned via
1144	  pin_user_pages*(), or pinned via get_user_pages*(), as specified
1145	  by other command line arguments.
1146
1147	  See tools/testing/selftests/mm/gup_test.c
1148
1149comment "GUP_TEST needs to have DEBUG_FS enabled"
1150	depends on !GUP_TEST && !DEBUG_FS
1151
1152config GUP_GET_PXX_LOW_HIGH
1153	bool
1154
1155config DMAPOOL_TEST
1156	tristate "Enable a module to run time tests on dma_pool"
1157	depends on HAS_DMA
1158	help
1159	  Provides a test module that will allocate and free many blocks of
1160	  various sizes and report how long it takes. This is intended to
1161	  provide a consistent way to measure how changes to the
1162	  dma_pool_alloc/free routines affect performance.
1163
1164config ARCH_HAS_PTE_SPECIAL
1165	bool
1166
1167#
1168# Some architectures require a special hugepage directory format that is
1169# required to support multiple hugepage sizes. For example a4fe3ce76
1170# "powerpc/mm: Allow more flexible layouts for hugepage pagetables"
1171# introduced it on powerpc.  This allows for a more flexible hugepage
1172# pagetable layouts.
1173#
1174config ARCH_HAS_HUGEPD
1175	bool
1176
1177config MAPPING_DIRTY_HELPERS
1178        bool
1179
1180config KMAP_LOCAL
1181	bool
1182
1183config KMAP_LOCAL_NON_LINEAR_PTE_ARRAY
1184	bool
1185
1186# struct io_mapping based helper.  Selected by drivers that need them
1187config IO_MAPPING
1188	bool
1189
1190config MEMFD_CREATE
1191	bool "Enable memfd_create() system call" if EXPERT
1192
1193config SECRETMEM
1194	default y
1195	bool "Enable memfd_secret() system call" if EXPERT
1196	depends on ARCH_HAS_SET_DIRECT_MAP
1197	help
1198	  Enable the memfd_secret() system call with the ability to create
1199	  memory areas visible only in the context of the owning process and
1200	  not mapped to other processes and other kernel page tables.
1201
1202config ANON_VMA_NAME
1203	bool "Anonymous VMA name support"
1204	depends on PROC_FS && ADVISE_SYSCALLS && MMU
1205
1206	help
1207	  Allow naming anonymous virtual memory areas.
1208
1209	  This feature allows assigning names to virtual memory areas. Assigned
1210	  names can be later retrieved from /proc/pid/maps and /proc/pid/smaps
1211	  and help identifying individual anonymous memory areas.
1212	  Assigning a name to anonymous virtual memory area might prevent that
1213	  area from being merged with adjacent virtual memory areas due to the
1214	  difference in their name.
1215
1216config USERFAULTFD
1217	bool "Enable userfaultfd() system call"
1218	depends on MMU
1219	help
1220	  Enable the userfaultfd() system call that allows to intercept and
1221	  handle page faults in userland.
1222
1223config HAVE_ARCH_USERFAULTFD_WP
1224	bool
1225	help
1226	  Arch has userfaultfd write protection support
1227
1228config HAVE_ARCH_USERFAULTFD_MINOR
1229	bool
1230	help
1231	  Arch has userfaultfd minor fault support
1232
1233config PTE_MARKER_UFFD_WP
1234	bool "Userfaultfd write protection support for shmem/hugetlbfs"
1235	default y
1236	depends on HAVE_ARCH_USERFAULTFD_WP
1237
1238	help
1239	  Allows to create marker PTEs for userfaultfd write protection
1240	  purposes.  It is required to enable userfaultfd write protection on
1241	  file-backed memory types like shmem and hugetlbfs.
1242
1243# multi-gen LRU {
1244config LRU_GEN
1245	bool "Multi-Gen LRU"
1246	depends on MMU
1247	# make sure folio->flags has enough spare bits
1248	depends on 64BIT || !SPARSEMEM || SPARSEMEM_VMEMMAP
1249	help
1250	  A high performance LRU implementation to overcommit memory. See
1251	  Documentation/admin-guide/mm/multigen_lru.rst for details.
1252
1253config LRU_GEN_ENABLED
1254	bool "Enable by default"
1255	depends on LRU_GEN
1256	help
1257	  This option enables the multi-gen LRU by default.
1258
1259config LRU_GEN_STATS
1260	bool "Full stats for debugging"
1261	depends on LRU_GEN
1262	help
1263	  Do not enable this option unless you plan to look at historical stats
1264	  from evicted generations for debugging purpose.
1265
1266	  This option has a per-memcg and per-node memory overhead.
1267# }
1268
1269config ARCH_SUPPORTS_PER_VMA_LOCK
1270       def_bool n
1271
1272config PER_VMA_LOCK
1273	def_bool y
1274	depends on ARCH_SUPPORTS_PER_VMA_LOCK && MMU && SMP
1275	help
1276	  Allow per-vma locking during page fault handling.
1277
1278	  This feature allows locking each virtual memory area separately when
1279	  handling page faults instead of taking mmap_lock.
1280
1281config LOCK_MM_AND_FIND_VMA
1282	bool
1283	depends on !STACK_GROWSUP
1284
1285source "mm/damon/Kconfig"
1286
1287endmenu
1288