1# SPDX-License-Identifier: GPL-2.0-only 2 3menu "Memory Management options" 4 5# 6# For some reason microblaze and nios2 hard code SWAP=n. Hopefully we can 7# add proper SWAP support to them, in which case this can be remove. 8# 9config ARCH_NO_SWAP 10 bool 11 12config ZPOOL 13 bool 14 15menuconfig SWAP 16 bool "Support for paging of anonymous memory (swap)" 17 depends on MMU && BLOCK && !ARCH_NO_SWAP 18 default y 19 help 20 This option allows you to choose whether you want to have support 21 for so called swap devices or swap files in your kernel that are 22 used to provide more virtual memory than the actual RAM present 23 in your computer. If unsure say Y. 24 25config ZSWAP 26 bool "Compressed cache for swap pages (EXPERIMENTAL)" 27 depends on SWAP 28 select FRONTSWAP 29 select CRYPTO 30 select ZPOOL 31 help 32 A lightweight compressed cache for swap pages. It takes 33 pages that are in the process of being swapped out and attempts to 34 compress them into a dynamically allocated RAM-based memory pool. 35 This can result in a significant I/O reduction on swap device and, 36 in the case where decompressing from RAM is faster that swap device 37 reads, can also improve workload performance. 38 39 This is marked experimental because it is a new feature (as of 40 v3.11) that interacts heavily with memory reclaim. While these 41 interactions don't cause any known issues on simple memory setups, 42 they have not be fully explored on the large set of potential 43 configurations and workloads that exist. 44 45config ZSWAP_DEFAULT_ON 46 bool "Enable the compressed cache for swap pages by default" 47 depends on ZSWAP 48 help 49 If selected, the compressed cache for swap pages will be enabled 50 at boot, otherwise it will be disabled. 51 52 The selection made here can be overridden by using the kernel 53 command line 'zswap.enabled=' option. 54 55choice 56 prompt "Default compressor" 57 depends on ZSWAP 58 default ZSWAP_COMPRESSOR_DEFAULT_LZO 59 help 60 Selects the default compression algorithm for the compressed cache 61 for swap pages. 62 63 For an overview what kind of performance can be expected from 64 a particular compression algorithm please refer to the benchmarks 65 available at the following LWN page: 66 https://lwn.net/Articles/751795/ 67 68 If in doubt, select 'LZO'. 69 70 The selection made here can be overridden by using the kernel 71 command line 'zswap.compressor=' option. 72 73config ZSWAP_COMPRESSOR_DEFAULT_DEFLATE 74 bool "Deflate" 75 select CRYPTO_DEFLATE 76 help 77 Use the Deflate algorithm as the default compression algorithm. 78 79config ZSWAP_COMPRESSOR_DEFAULT_LZO 80 bool "LZO" 81 select CRYPTO_LZO 82 help 83 Use the LZO algorithm as the default compression algorithm. 84 85config ZSWAP_COMPRESSOR_DEFAULT_842 86 bool "842" 87 select CRYPTO_842 88 help 89 Use the 842 algorithm as the default compression algorithm. 90 91config ZSWAP_COMPRESSOR_DEFAULT_LZ4 92 bool "LZ4" 93 select CRYPTO_LZ4 94 help 95 Use the LZ4 algorithm as the default compression algorithm. 96 97config ZSWAP_COMPRESSOR_DEFAULT_LZ4HC 98 bool "LZ4HC" 99 select CRYPTO_LZ4HC 100 help 101 Use the LZ4HC algorithm as the default compression algorithm. 102 103config ZSWAP_COMPRESSOR_DEFAULT_ZSTD 104 bool "zstd" 105 select CRYPTO_ZSTD 106 help 107 Use the zstd algorithm as the default compression algorithm. 108endchoice 109 110config ZSWAP_COMPRESSOR_DEFAULT 111 string 112 depends on ZSWAP 113 default "deflate" if ZSWAP_COMPRESSOR_DEFAULT_DEFLATE 114 default "lzo" if ZSWAP_COMPRESSOR_DEFAULT_LZO 115 default "842" if ZSWAP_COMPRESSOR_DEFAULT_842 116 default "lz4" if ZSWAP_COMPRESSOR_DEFAULT_LZ4 117 default "lz4hc" if ZSWAP_COMPRESSOR_DEFAULT_LZ4HC 118 default "zstd" if ZSWAP_COMPRESSOR_DEFAULT_ZSTD 119 default "" 120 121choice 122 prompt "Default allocator" 123 depends on ZSWAP 124 default ZSWAP_ZPOOL_DEFAULT_ZBUD 125 help 126 Selects the default allocator for the compressed cache for 127 swap pages. 128 The default is 'zbud' for compatibility, however please do 129 read the description of each of the allocators below before 130 making a right choice. 131 132 The selection made here can be overridden by using the kernel 133 command line 'zswap.zpool=' option. 134 135config ZSWAP_ZPOOL_DEFAULT_ZBUD 136 bool "zbud" 137 select ZBUD 138 help 139 Use the zbud allocator as the default allocator. 140 141config ZSWAP_ZPOOL_DEFAULT_Z3FOLD 142 bool "z3fold" 143 select Z3FOLD 144 help 145 Use the z3fold allocator as the default allocator. 146 147config ZSWAP_ZPOOL_DEFAULT_ZSMALLOC 148 bool "zsmalloc" 149 select ZSMALLOC 150 help 151 Use the zsmalloc allocator as the default allocator. 152endchoice 153 154config ZSWAP_ZPOOL_DEFAULT 155 string 156 depends on ZSWAP 157 default "zbud" if ZSWAP_ZPOOL_DEFAULT_ZBUD 158 default "z3fold" if ZSWAP_ZPOOL_DEFAULT_Z3FOLD 159 default "zsmalloc" if ZSWAP_ZPOOL_DEFAULT_ZSMALLOC 160 default "" 161 162config ZBUD 163 tristate "2:1 compression allocator (zbud)" 164 depends on ZSWAP 165 help 166 A special purpose allocator for storing compressed pages. 167 It is designed to store up to two compressed pages per physical 168 page. While this design limits storage density, it has simple and 169 deterministic reclaim properties that make it preferable to a higher 170 density approach when reclaim will be used. 171 172config Z3FOLD 173 tristate "3:1 compression allocator (z3fold)" 174 depends on ZSWAP 175 help 176 A special purpose allocator for storing compressed pages. 177 It is designed to store up to three compressed pages per physical 178 page. It is a ZBUD derivative so the simplicity and determinism are 179 still there. 180 181config ZSMALLOC 182 tristate 183 prompt "N:1 compression allocator (zsmalloc)" if ZSWAP 184 depends on MMU 185 help 186 zsmalloc is a slab-based memory allocator designed to store 187 pages of various compression levels efficiently. It achieves 188 the highest storage density with the least amount of fragmentation. 189 190config ZSMALLOC_STAT 191 bool "Export zsmalloc statistics" 192 depends on ZSMALLOC 193 select DEBUG_FS 194 help 195 This option enables code in the zsmalloc to collect various 196 statistics about what's happening in zsmalloc and exports that 197 information to userspace via debugfs. 198 If unsure, say N. 199 200menu "SLAB allocator options" 201 202choice 203 prompt "Choose SLAB allocator" 204 default SLUB 205 help 206 This option allows to select a slab allocator. 207 208config SLAB 209 bool "SLAB" 210 depends on !PREEMPT_RT 211 select HAVE_HARDENED_USERCOPY_ALLOCATOR 212 help 213 The regular slab allocator that is established and known to work 214 well in all environments. It organizes cache hot objects in 215 per cpu and per node queues. 216 217config SLUB 218 bool "SLUB (Unqueued Allocator)" 219 select HAVE_HARDENED_USERCOPY_ALLOCATOR 220 help 221 SLUB is a slab allocator that minimizes cache line usage 222 instead of managing queues of cached objects (SLAB approach). 223 Per cpu caching is realized using slabs of objects instead 224 of queues of objects. SLUB can use memory efficiently 225 and has enhanced diagnostics. SLUB is the default choice for 226 a slab allocator. 227 228config SLOB 229 depends on EXPERT 230 bool "SLOB (Simple Allocator)" 231 depends on !PREEMPT_RT 232 help 233 SLOB replaces the stock allocator with a drastically simpler 234 allocator. SLOB is generally more space efficient but 235 does not perform as well on large systems. 236 237endchoice 238 239config SLAB_MERGE_DEFAULT 240 bool "Allow slab caches to be merged" 241 default y 242 depends on SLAB || SLUB 243 help 244 For reduced kernel memory fragmentation, slab caches can be 245 merged when they share the same size and other characteristics. 246 This carries a risk of kernel heap overflows being able to 247 overwrite objects from merged caches (and more easily control 248 cache layout), which makes such heap attacks easier to exploit 249 by attackers. By keeping caches unmerged, these kinds of exploits 250 can usually only damage objects in the same cache. To disable 251 merging at runtime, "slab_nomerge" can be passed on the kernel 252 command line. 253 254config SLAB_FREELIST_RANDOM 255 bool "Randomize slab freelist" 256 depends on SLAB || SLUB 257 help 258 Randomizes the freelist order used on creating new pages. This 259 security feature reduces the predictability of the kernel slab 260 allocator against heap overflows. 261 262config SLAB_FREELIST_HARDENED 263 bool "Harden slab freelist metadata" 264 depends on SLAB || SLUB 265 help 266 Many kernel heap attacks try to target slab cache metadata and 267 other infrastructure. This options makes minor performance 268 sacrifices to harden the kernel slab allocator against common 269 freelist exploit methods. Some slab implementations have more 270 sanity-checking than others. This option is most effective with 271 CONFIG_SLUB. 272 273config SLUB_STATS 274 default n 275 bool "Enable SLUB performance statistics" 276 depends on SLUB && SYSFS 277 help 278 SLUB statistics are useful to debug SLUBs allocation behavior in 279 order find ways to optimize the allocator. This should never be 280 enabled for production use since keeping statistics slows down 281 the allocator by a few percentage points. The slabinfo command 282 supports the determination of the most active slabs to figure 283 out which slabs are relevant to a particular load. 284 Try running: slabinfo -DA 285 286config SLUB_CPU_PARTIAL 287 default y 288 depends on SLUB && SMP 289 bool "SLUB per cpu partial cache" 290 help 291 Per cpu partial caches accelerate objects allocation and freeing 292 that is local to a processor at the price of more indeterminism 293 in the latency of the free. On overflow these caches will be cleared 294 which requires the taking of locks that may cause latency spikes. 295 Typically one would choose no for a realtime system. 296 297endmenu # SLAB allocator options 298 299config SHUFFLE_PAGE_ALLOCATOR 300 bool "Page allocator randomization" 301 default SLAB_FREELIST_RANDOM && ACPI_NUMA 302 help 303 Randomization of the page allocator improves the average 304 utilization of a direct-mapped memory-side-cache. See section 305 5.2.27 Heterogeneous Memory Attribute Table (HMAT) in the ACPI 306 6.2a specification for an example of how a platform advertises 307 the presence of a memory-side-cache. There are also incidental 308 security benefits as it reduces the predictability of page 309 allocations to compliment SLAB_FREELIST_RANDOM, but the 310 default granularity of shuffling on the "MAX_ORDER - 1" i.e, 311 10th order of pages is selected based on cache utilization 312 benefits on x86. 313 314 While the randomization improves cache utilization it may 315 negatively impact workloads on platforms without a cache. For 316 this reason, by default, the randomization is enabled only 317 after runtime detection of a direct-mapped memory-side-cache. 318 Otherwise, the randomization may be force enabled with the 319 'page_alloc.shuffle' kernel command line parameter. 320 321 Say Y if unsure. 322 323config COMPAT_BRK 324 bool "Disable heap randomization" 325 default y 326 help 327 Randomizing heap placement makes heap exploits harder, but it 328 also breaks ancient binaries (including anything libc5 based). 329 This option changes the bootup default to heap randomization 330 disabled, and can be overridden at runtime by setting 331 /proc/sys/kernel/randomize_va_space to 2. 332 333 On non-ancient distros (post-2000 ones) N is usually a safe choice. 334 335config MMAP_ALLOW_UNINITIALIZED 336 bool "Allow mmapped anonymous memory to be uninitialized" 337 depends on EXPERT && !MMU 338 default n 339 help 340 Normally, and according to the Linux spec, anonymous memory obtained 341 from mmap() has its contents cleared before it is passed to 342 userspace. Enabling this config option allows you to request that 343 mmap() skip that if it is given an MAP_UNINITIALIZED flag, thus 344 providing a huge performance boost. If this option is not enabled, 345 then the flag will be ignored. 346 347 This is taken advantage of by uClibc's malloc(), and also by 348 ELF-FDPIC binfmt's brk and stack allocator. 349 350 Because of the obvious security issues, this option should only be 351 enabled on embedded devices where you control what is run in 352 userspace. Since that isn't generally a problem on no-MMU systems, 353 it is normally safe to say Y here. 354 355 See Documentation/admin-guide/mm/nommu-mmap.rst for more information. 356 357config SELECT_MEMORY_MODEL 358 def_bool y 359 depends on ARCH_SELECT_MEMORY_MODEL 360 361choice 362 prompt "Memory model" 363 depends on SELECT_MEMORY_MODEL 364 default SPARSEMEM_MANUAL if ARCH_SPARSEMEM_DEFAULT 365 default FLATMEM_MANUAL 366 help 367 This option allows you to change some of the ways that 368 Linux manages its memory internally. Most users will 369 only have one option here selected by the architecture 370 configuration. This is normal. 371 372config FLATMEM_MANUAL 373 bool "Flat Memory" 374 depends on !ARCH_SPARSEMEM_ENABLE || ARCH_FLATMEM_ENABLE 375 help 376 This option is best suited for non-NUMA systems with 377 flat address space. The FLATMEM is the most efficient 378 system in terms of performance and resource consumption 379 and it is the best option for smaller systems. 380 381 For systems that have holes in their physical address 382 spaces and for features like NUMA and memory hotplug, 383 choose "Sparse Memory". 384 385 If unsure, choose this option (Flat Memory) over any other. 386 387config SPARSEMEM_MANUAL 388 bool "Sparse Memory" 389 depends on ARCH_SPARSEMEM_ENABLE 390 help 391 This will be the only option for some systems, including 392 memory hot-plug systems. This is normal. 393 394 This option provides efficient support for systems with 395 holes is their physical address space and allows memory 396 hot-plug and hot-remove. 397 398 If unsure, choose "Flat Memory" over this option. 399 400endchoice 401 402config SPARSEMEM 403 def_bool y 404 depends on (!SELECT_MEMORY_MODEL && ARCH_SPARSEMEM_ENABLE) || SPARSEMEM_MANUAL 405 406config FLATMEM 407 def_bool y 408 depends on !SPARSEMEM || FLATMEM_MANUAL 409 410# 411# SPARSEMEM_EXTREME (which is the default) does some bootmem 412# allocations when sparse_init() is called. If this cannot 413# be done on your architecture, select this option. However, 414# statically allocating the mem_section[] array can potentially 415# consume vast quantities of .bss, so be careful. 416# 417# This option will also potentially produce smaller runtime code 418# with gcc 3.4 and later. 419# 420config SPARSEMEM_STATIC 421 bool 422 423# 424# Architecture platforms which require a two level mem_section in SPARSEMEM 425# must select this option. This is usually for architecture platforms with 426# an extremely sparse physical address space. 427# 428config SPARSEMEM_EXTREME 429 def_bool y 430 depends on SPARSEMEM && !SPARSEMEM_STATIC 431 432config SPARSEMEM_VMEMMAP_ENABLE 433 bool 434 435config SPARSEMEM_VMEMMAP 436 bool "Sparse Memory virtual memmap" 437 depends on SPARSEMEM && SPARSEMEM_VMEMMAP_ENABLE 438 default y 439 help 440 SPARSEMEM_VMEMMAP uses a virtually mapped memmap to optimise 441 pfn_to_page and page_to_pfn operations. This is the most 442 efficient option when sufficient kernel resources are available. 443 444config HAVE_MEMBLOCK_PHYS_MAP 445 bool 446 447config HAVE_FAST_GUP 448 depends on MMU 449 bool 450 451# Don't discard allocated memory used to track "memory" and "reserved" memblocks 452# after early boot, so it can still be used to test for validity of memory. 453# Also, memblocks are updated with memory hot(un)plug. 454config ARCH_KEEP_MEMBLOCK 455 bool 456 457# Keep arch NUMA mapping infrastructure post-init. 458config NUMA_KEEP_MEMINFO 459 bool 460 461config MEMORY_ISOLATION 462 bool 463 464# IORESOURCE_SYSTEM_RAM regions in the kernel resource tree that are marked 465# IORESOURCE_EXCLUSIVE cannot be mapped to user space, for example, via 466# /dev/mem. 467config EXCLUSIVE_SYSTEM_RAM 468 def_bool y 469 depends on !DEVMEM || STRICT_DEVMEM 470 471# 472# Only be set on architectures that have completely implemented memory hotplug 473# feature. If you are not sure, don't touch it. 474# 475config HAVE_BOOTMEM_INFO_NODE 476 def_bool n 477 478config ARCH_ENABLE_MEMORY_HOTPLUG 479 bool 480 481config ARCH_ENABLE_MEMORY_HOTREMOVE 482 bool 483 484# eventually, we can have this option just 'select SPARSEMEM' 485menuconfig MEMORY_HOTPLUG 486 bool "Memory hotplug" 487 select MEMORY_ISOLATION 488 depends on SPARSEMEM 489 depends on ARCH_ENABLE_MEMORY_HOTPLUG 490 depends on 64BIT 491 select NUMA_KEEP_MEMINFO if NUMA 492 493if MEMORY_HOTPLUG 494 495config MEMORY_HOTPLUG_DEFAULT_ONLINE 496 bool "Online the newly added memory blocks by default" 497 depends on MEMORY_HOTPLUG 498 help 499 This option sets the default policy setting for memory hotplug 500 onlining policy (/sys/devices/system/memory/auto_online_blocks) which 501 determines what happens to newly added memory regions. Policy setting 502 can always be changed at runtime. 503 See Documentation/admin-guide/mm/memory-hotplug.rst for more information. 504 505 Say Y here if you want all hot-plugged memory blocks to appear in 506 'online' state by default. 507 Say N here if you want the default policy to keep all hot-plugged 508 memory blocks in 'offline' state. 509 510config MEMORY_HOTREMOVE 511 bool "Allow for memory hot remove" 512 select HAVE_BOOTMEM_INFO_NODE if (X86_64 || PPC64) 513 depends on MEMORY_HOTPLUG && ARCH_ENABLE_MEMORY_HOTREMOVE 514 depends on MIGRATION 515 516config MHP_MEMMAP_ON_MEMORY 517 def_bool y 518 depends on MEMORY_HOTPLUG && SPARSEMEM_VMEMMAP 519 depends on ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE 520 521endif # MEMORY_HOTPLUG 522 523# Heavily threaded applications may benefit from splitting the mm-wide 524# page_table_lock, so that faults on different parts of the user address 525# space can be handled with less contention: split it at this NR_CPUS. 526# Default to 4 for wider testing, though 8 might be more appropriate. 527# ARM's adjust_pte (unused if VIPT) depends on mm-wide page_table_lock. 528# PA-RISC 7xxx's spinlock_t would enlarge struct page from 32 to 44 bytes. 529# SPARC32 allocates multiple pte tables within a single page, and therefore 530# a per-page lock leads to problems when multiple tables need to be locked 531# at the same time (e.g. copy_page_range()). 532# DEBUG_SPINLOCK and DEBUG_LOCK_ALLOC spinlock_t also enlarge struct page. 533# 534config SPLIT_PTLOCK_CPUS 535 int 536 default "999999" if !MMU 537 default "999999" if ARM && !CPU_CACHE_VIPT 538 default "999999" if PARISC && !PA20 539 default "999999" if SPARC32 540 default "4" 541 542config ARCH_ENABLE_SPLIT_PMD_PTLOCK 543 bool 544 545# 546# support for memory balloon 547config MEMORY_BALLOON 548 bool 549 550# 551# support for memory balloon compaction 552config BALLOON_COMPACTION 553 bool "Allow for balloon memory compaction/migration" 554 def_bool y 555 depends on COMPACTION && MEMORY_BALLOON 556 help 557 Memory fragmentation introduced by ballooning might reduce 558 significantly the number of 2MB contiguous memory blocks that can be 559 used within a guest, thus imposing performance penalties associated 560 with the reduced number of transparent huge pages that could be used 561 by the guest workload. Allowing the compaction & migration for memory 562 pages enlisted as being part of memory balloon devices avoids the 563 scenario aforementioned and helps improving memory defragmentation. 564 565# 566# support for memory compaction 567config COMPACTION 568 bool "Allow for memory compaction" 569 def_bool y 570 select MIGRATION 571 depends on MMU 572 help 573 Compaction is the only memory management component to form 574 high order (larger physically contiguous) memory blocks 575 reliably. The page allocator relies on compaction heavily and 576 the lack of the feature can lead to unexpected OOM killer 577 invocations for high order memory requests. You shouldn't 578 disable this option unless there really is a strong reason for 579 it and then we would be really interested to hear about that at 580 linux-mm@kvack.org. 581 582# 583# support for free page reporting 584config PAGE_REPORTING 585 bool "Free page reporting" 586 def_bool n 587 help 588 Free page reporting allows for the incremental acquisition of 589 free pages from the buddy allocator for the purpose of reporting 590 those pages to another entity, such as a hypervisor, so that the 591 memory can be freed within the host for other uses. 592 593# 594# support for page migration 595# 596config MIGRATION 597 bool "Page migration" 598 def_bool y 599 depends on (NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE || COMPACTION || CMA) && MMU 600 help 601 Allows the migration of the physical location of pages of processes 602 while the virtual addresses are not changed. This is useful in 603 two situations. The first is on NUMA systems to put pages nearer 604 to the processors accessing. The second is when allocating huge 605 pages as migration can relocate pages to satisfy a huge page 606 allocation instead of reclaiming. 607 608config DEVICE_MIGRATION 609 def_bool MIGRATION && ZONE_DEVICE 610 611config ARCH_ENABLE_HUGEPAGE_MIGRATION 612 bool 613 614config ARCH_ENABLE_THP_MIGRATION 615 bool 616 617config HUGETLB_PAGE_SIZE_VARIABLE 618 def_bool n 619 help 620 Allows the pageblock_order value to be dynamic instead of just standard 621 HUGETLB_PAGE_ORDER when there are multiple HugeTLB page sizes available 622 on a platform. 623 624 Note that the pageblock_order cannot exceed MAX_ORDER - 1 and will be 625 clamped down to MAX_ORDER - 1. 626 627config CONTIG_ALLOC 628 def_bool (MEMORY_ISOLATION && COMPACTION) || CMA 629 630config PHYS_ADDR_T_64BIT 631 def_bool 64BIT 632 633config BOUNCE 634 bool "Enable bounce buffers" 635 default y 636 depends on BLOCK && MMU && HIGHMEM 637 help 638 Enable bounce buffers for devices that cannot access the full range of 639 memory available to the CPU. Enabled by default when HIGHMEM is 640 selected, but you may say n to override this. 641 642config VIRT_TO_BUS 643 bool 644 help 645 An architecture should select this if it implements the 646 deprecated interface virt_to_bus(). All new architectures 647 should probably not select this. 648 649 650config MMU_NOTIFIER 651 bool 652 select SRCU 653 select INTERVAL_TREE 654 655config KSM 656 bool "Enable KSM for page merging" 657 depends on MMU 658 select XXHASH 659 help 660 Enable Kernel Samepage Merging: KSM periodically scans those areas 661 of an application's address space that an app has advised may be 662 mergeable. When it finds pages of identical content, it replaces 663 the many instances by a single page with that content, so 664 saving memory until one or another app needs to modify the content. 665 Recommended for use with KVM, or with other duplicative applications. 666 See Documentation/vm/ksm.rst for more information: KSM is inactive 667 until a program has madvised that an area is MADV_MERGEABLE, and 668 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set). 669 670config DEFAULT_MMAP_MIN_ADDR 671 int "Low address space to protect from user allocation" 672 depends on MMU 673 default 4096 674 help 675 This is the portion of low virtual memory which should be protected 676 from userspace allocation. Keeping a user from writing to low pages 677 can help reduce the impact of kernel NULL pointer bugs. 678 679 For most ia64, ppc64 and x86 users with lots of address space 680 a value of 65536 is reasonable and should cause no problems. 681 On arm and other archs it should not be higher than 32768. 682 Programs which use vm86 functionality or have some need to map 683 this low address space will need CAP_SYS_RAWIO or disable this 684 protection by setting the value to 0. 685 686 This value can be changed after boot using the 687 /proc/sys/vm/mmap_min_addr tunable. 688 689config ARCH_SUPPORTS_MEMORY_FAILURE 690 bool 691 692config MEMORY_FAILURE 693 depends on MMU 694 depends on ARCH_SUPPORTS_MEMORY_FAILURE 695 bool "Enable recovery from hardware memory errors" 696 select MEMORY_ISOLATION 697 select RAS 698 help 699 Enables code to recover from some memory failures on systems 700 with MCA recovery. This allows a system to continue running 701 even when some of its memory has uncorrected errors. This requires 702 special hardware support and typically ECC memory. 703 704config HWPOISON_INJECT 705 tristate "HWPoison pages injector" 706 depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS 707 select PROC_PAGE_MONITOR 708 709config NOMMU_INITIAL_TRIM_EXCESS 710 int "Turn on mmap() excess space trimming before booting" 711 depends on !MMU 712 default 1 713 help 714 The NOMMU mmap() frequently needs to allocate large contiguous chunks 715 of memory on which to store mappings, but it can only ask the system 716 allocator for chunks in 2^N*PAGE_SIZE amounts - which is frequently 717 more than it requires. To deal with this, mmap() is able to trim off 718 the excess and return it to the allocator. 719 720 If trimming is enabled, the excess is trimmed off and returned to the 721 system allocator, which can cause extra fragmentation, particularly 722 if there are a lot of transient processes. 723 724 If trimming is disabled, the excess is kept, but not used, which for 725 long-term mappings means that the space is wasted. 726 727 Trimming can be dynamically controlled through a sysctl option 728 (/proc/sys/vm/nr_trim_pages) which specifies the minimum number of 729 excess pages there must be before trimming should occur, or zero if 730 no trimming is to occur. 731 732 This option specifies the initial value of this option. The default 733 of 1 says that all excess pages should be trimmed. 734 735 See Documentation/admin-guide/mm/nommu-mmap.rst for more information. 736 737config ARCH_WANT_GENERAL_HUGETLB 738 bool 739 740config ARCH_WANTS_THP_SWAP 741 def_bool n 742 743menuconfig TRANSPARENT_HUGEPAGE 744 bool "Transparent Hugepage Support" 745 depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE && !PREEMPT_RT 746 select COMPACTION 747 select XARRAY_MULTI 748 help 749 Transparent Hugepages allows the kernel to use huge pages and 750 huge tlb transparently to the applications whenever possible. 751 This feature can improve computing performance to certain 752 applications by speeding up page faults during memory 753 allocation, by reducing the number of tlb misses and by speeding 754 up the pagetable walking. 755 756 If memory constrained on embedded, you may want to say N. 757 758if TRANSPARENT_HUGEPAGE 759 760choice 761 prompt "Transparent Hugepage Support sysfs defaults" 762 depends on TRANSPARENT_HUGEPAGE 763 default TRANSPARENT_HUGEPAGE_ALWAYS 764 help 765 Selects the sysfs defaults for Transparent Hugepage Support. 766 767 config TRANSPARENT_HUGEPAGE_ALWAYS 768 bool "always" 769 help 770 Enabling Transparent Hugepage always, can increase the 771 memory footprint of applications without a guaranteed 772 benefit but it will work automatically for all applications. 773 774 config TRANSPARENT_HUGEPAGE_MADVISE 775 bool "madvise" 776 help 777 Enabling Transparent Hugepage madvise, will only provide a 778 performance improvement benefit to the applications using 779 madvise(MADV_HUGEPAGE) but it won't risk to increase the 780 memory footprint of applications without a guaranteed 781 benefit. 782endchoice 783 784config THP_SWAP 785 def_bool y 786 depends on TRANSPARENT_HUGEPAGE && ARCH_WANTS_THP_SWAP && SWAP 787 help 788 Swap transparent huge pages in one piece, without splitting. 789 XXX: For now, swap cluster backing transparent huge page 790 will be split after swapout. 791 792 For selection by architectures with reasonable THP sizes. 793 794config READ_ONLY_THP_FOR_FS 795 bool "Read-only THP for filesystems (EXPERIMENTAL)" 796 depends on TRANSPARENT_HUGEPAGE && SHMEM 797 798 help 799 Allow khugepaged to put read-only file-backed pages in THP. 800 801 This is marked experimental because it is a new feature. Write 802 support of file THPs will be developed in the next few release 803 cycles. 804 805endif # TRANSPARENT_HUGEPAGE 806 807# 808# UP and nommu archs use km based percpu allocator 809# 810config NEED_PER_CPU_KM 811 depends on !SMP || !MMU 812 bool 813 default y 814 815config NEED_PER_CPU_EMBED_FIRST_CHUNK 816 bool 817 818config NEED_PER_CPU_PAGE_FIRST_CHUNK 819 bool 820 821config USE_PERCPU_NUMA_NODE_ID 822 bool 823 824config HAVE_SETUP_PER_CPU_AREA 825 bool 826 827config FRONTSWAP 828 bool 829 830config CMA 831 bool "Contiguous Memory Allocator" 832 depends on MMU 833 select MIGRATION 834 select MEMORY_ISOLATION 835 help 836 This enables the Contiguous Memory Allocator which allows other 837 subsystems to allocate big physically-contiguous blocks of memory. 838 CMA reserves a region of memory and allows only movable pages to 839 be allocated from it. This way, the kernel can use the memory for 840 pagecache and when a subsystem requests for contiguous area, the 841 allocated pages are migrated away to serve the contiguous request. 842 843 If unsure, say "n". 844 845config CMA_DEBUG 846 bool "CMA debug messages (DEVELOPMENT)" 847 depends on DEBUG_KERNEL && CMA 848 help 849 Turns on debug messages in CMA. This produces KERN_DEBUG 850 messages for every CMA call as well as various messages while 851 processing calls such as dma_alloc_from_contiguous(). 852 This option does not affect warning and error messages. 853 854config CMA_DEBUGFS 855 bool "CMA debugfs interface" 856 depends on CMA && DEBUG_FS 857 help 858 Turns on the DebugFS interface for CMA. 859 860config CMA_SYSFS 861 bool "CMA information through sysfs interface" 862 depends on CMA && SYSFS 863 help 864 This option exposes some sysfs attributes to get information 865 from CMA. 866 867config CMA_AREAS 868 int "Maximum count of the CMA areas" 869 depends on CMA 870 default 19 if NUMA 871 default 7 872 help 873 CMA allows to create CMA areas for particular purpose, mainly, 874 used as device private area. This parameter sets the maximum 875 number of CMA area in the system. 876 877 If unsure, leave the default value "7" in UMA and "19" in NUMA. 878 879config MEM_SOFT_DIRTY 880 bool "Track memory changes" 881 depends on CHECKPOINT_RESTORE && HAVE_ARCH_SOFT_DIRTY && PROC_FS 882 select PROC_PAGE_MONITOR 883 help 884 This option enables memory changes tracking by introducing a 885 soft-dirty bit on pte-s. This bit it set when someone writes 886 into a page just as regular dirty bit, but unlike the latter 887 it can be cleared by hands. 888 889 See Documentation/admin-guide/mm/soft-dirty.rst for more details. 890 891config GENERIC_EARLY_IOREMAP 892 bool 893 894config STACK_MAX_DEFAULT_SIZE_MB 895 int "Default maximum user stack size for 32-bit processes (MB)" 896 default 100 897 range 8 2048 898 depends on STACK_GROWSUP && (!64BIT || COMPAT) 899 help 900 This is the maximum stack size in Megabytes in the VM layout of 32-bit 901 user processes when the stack grows upwards (currently only on parisc 902 arch) when the RLIMIT_STACK hard limit is unlimited. 903 904 A sane initial value is 100 MB. 905 906config DEFERRED_STRUCT_PAGE_INIT 907 bool "Defer initialisation of struct pages to kthreads" 908 depends on SPARSEMEM 909 depends on !NEED_PER_CPU_KM 910 depends on 64BIT 911 select PADATA 912 help 913 Ordinarily all struct pages are initialised during early boot in a 914 single thread. On very large machines this can take a considerable 915 amount of time. If this option is set, large machines will bring up 916 a subset of memmap at boot and then initialise the rest in parallel. 917 This has a potential performance impact on tasks running early in the 918 lifetime of the system until these kthreads finish the 919 initialisation. 920 921config PAGE_IDLE_FLAG 922 bool 923 select PAGE_EXTENSION if !64BIT 924 help 925 This adds PG_idle and PG_young flags to 'struct page'. PTE Accessed 926 bit writers can set the state of the bit in the flags so that PTE 927 Accessed bit readers may avoid disturbance. 928 929config IDLE_PAGE_TRACKING 930 bool "Enable idle page tracking" 931 depends on SYSFS && MMU 932 select PAGE_IDLE_FLAG 933 help 934 This feature allows to estimate the amount of user pages that have 935 not been touched during a given period of time. This information can 936 be useful to tune memory cgroup limits and/or for job placement 937 within a compute cluster. 938 939 See Documentation/admin-guide/mm/idle_page_tracking.rst for 940 more details. 941 942config ARCH_HAS_CACHE_LINE_SIZE 943 bool 944 945config ARCH_HAS_CURRENT_STACK_POINTER 946 bool 947 help 948 In support of HARDENED_USERCOPY performing stack variable lifetime 949 checking, an architecture-agnostic way to find the stack pointer 950 is needed. Once an architecture defines an unsigned long global 951 register alias named "current_stack_pointer", this config can be 952 selected. 953 954config ARCH_HAS_VM_GET_PAGE_PROT 955 bool 956 957config ARCH_HAS_PTE_DEVMAP 958 bool 959 960config ARCH_HAS_ZONE_DMA_SET 961 bool 962 963config ZONE_DMA 964 bool "Support DMA zone" if ARCH_HAS_ZONE_DMA_SET 965 default y if ARM64 || X86 966 967config ZONE_DMA32 968 bool "Support DMA32 zone" if ARCH_HAS_ZONE_DMA_SET 969 depends on !X86_32 970 default y if ARM64 971 972config ZONE_DEVICE 973 bool "Device memory (pmem, HMM, etc...) hotplug support" 974 depends on MEMORY_HOTPLUG 975 depends on MEMORY_HOTREMOVE 976 depends on SPARSEMEM_VMEMMAP 977 depends on ARCH_HAS_PTE_DEVMAP 978 select XARRAY_MULTI 979 980 help 981 Device memory hotplug support allows for establishing pmem, 982 or other device driver discovered memory regions, in the 983 memmap. This allows pfn_to_page() lookups of otherwise 984 "device-physical" addresses which is needed for using a DAX 985 mapping in an O_DIRECT operation, among other things. 986 987 If FS_DAX is enabled, then say Y. 988 989# 990# Helpers to mirror range of the CPU page tables of a process into device page 991# tables. 992# 993config HMM_MIRROR 994 bool 995 depends on MMU 996 997config DEVICE_PRIVATE 998 bool "Unaddressable device memory (GPU memory, ...)" 999 depends on ZONE_DEVICE 1000 1001 help 1002 Allows creation of struct pages to represent unaddressable device 1003 memory; i.e., memory that is only accessible from the device (or 1004 group of devices). You likely also want to select HMM_MIRROR. 1005 1006config VMAP_PFN 1007 bool 1008 1009config ARCH_USES_HIGH_VMA_FLAGS 1010 bool 1011config ARCH_HAS_PKEYS 1012 bool 1013 1014config VM_EVENT_COUNTERS 1015 default y 1016 bool "Enable VM event counters for /proc/vmstat" if EXPERT 1017 help 1018 VM event counters are needed for event counts to be shown. 1019 This option allows the disabling of the VM event counters 1020 on EXPERT systems. /proc/vmstat will only show page counts 1021 if VM event counters are disabled. 1022 1023config PERCPU_STATS 1024 bool "Collect percpu memory statistics" 1025 help 1026 This feature collects and exposes statistics via debugfs. The 1027 information includes global and per chunk statistics, which can 1028 be used to help understand percpu memory usage. 1029 1030config GUP_TEST 1031 bool "Enable infrastructure for get_user_pages()-related unit tests" 1032 depends on DEBUG_FS 1033 help 1034 Provides /sys/kernel/debug/gup_test, which in turn provides a way 1035 to make ioctl calls that can launch kernel-based unit tests for 1036 the get_user_pages*() and pin_user_pages*() family of API calls. 1037 1038 These tests include benchmark testing of the _fast variants of 1039 get_user_pages*() and pin_user_pages*(), as well as smoke tests of 1040 the non-_fast variants. 1041 1042 There is also a sub-test that allows running dump_page() on any 1043 of up to eight pages (selected by command line args) within the 1044 range of user-space addresses. These pages are either pinned via 1045 pin_user_pages*(), or pinned via get_user_pages*(), as specified 1046 by other command line arguments. 1047 1048 See tools/testing/selftests/vm/gup_test.c 1049 1050comment "GUP_TEST needs to have DEBUG_FS enabled" 1051 depends on !GUP_TEST && !DEBUG_FS 1052 1053config GUP_GET_PTE_LOW_HIGH 1054 bool 1055 1056config ARCH_HAS_PTE_SPECIAL 1057 bool 1058 1059# 1060# Some architectures require a special hugepage directory format that is 1061# required to support multiple hugepage sizes. For example a4fe3ce76 1062# "powerpc/mm: Allow more flexible layouts for hugepage pagetables" 1063# introduced it on powerpc. This allows for a more flexible hugepage 1064# pagetable layouts. 1065# 1066config ARCH_HAS_HUGEPD 1067 bool 1068 1069config MAPPING_DIRTY_HELPERS 1070 bool 1071 1072config KMAP_LOCAL 1073 bool 1074 1075config KMAP_LOCAL_NON_LINEAR_PTE_ARRAY 1076 bool 1077 1078# struct io_mapping based helper. Selected by drivers that need them 1079config IO_MAPPING 1080 bool 1081 1082config SECRETMEM 1083 def_bool ARCH_HAS_SET_DIRECT_MAP && !EMBEDDED 1084 1085config ANON_VMA_NAME 1086 bool "Anonymous VMA name support" 1087 depends on PROC_FS && ADVISE_SYSCALLS && MMU 1088 1089 help 1090 Allow naming anonymous virtual memory areas. 1091 1092 This feature allows assigning names to virtual memory areas. Assigned 1093 names can be later retrieved from /proc/pid/maps and /proc/pid/smaps 1094 and help identifying individual anonymous memory areas. 1095 Assigning a name to anonymous virtual memory area might prevent that 1096 area from being merged with adjacent virtual memory areas due to the 1097 difference in their name. 1098 1099config USERFAULTFD 1100 bool "Enable userfaultfd() system call" 1101 depends on MMU 1102 help 1103 Enable the userfaultfd() system call that allows to intercept and 1104 handle page faults in userland. 1105 1106config HAVE_ARCH_USERFAULTFD_WP 1107 bool 1108 help 1109 Arch has userfaultfd write protection support 1110 1111config HAVE_ARCH_USERFAULTFD_MINOR 1112 bool 1113 help 1114 Arch has userfaultfd minor fault support 1115 1116config PTE_MARKER 1117 bool 1118 1119 help 1120 Allows to create marker PTEs for file-backed memory. 1121 1122config PTE_MARKER_UFFD_WP 1123 bool "Userfaultfd write protection support for shmem/hugetlbfs" 1124 default y 1125 depends on HAVE_ARCH_USERFAULTFD_WP 1126 select PTE_MARKER 1127 1128 help 1129 Allows to create marker PTEs for userfaultfd write protection 1130 purposes. It is required to enable userfaultfd write protection on 1131 file-backed memory types like shmem and hugetlbfs. 1132 1133source "mm/damon/Kconfig" 1134 1135endmenu 1136