xref: /openbmc/linux/mm/Kconfig (revision bcf1647d)
1e1785e85SDave Hansenconfig SELECT_MEMORY_MODEL
2e1785e85SDave Hansen	def_bool y
3a8826eebSKees Cook	depends on ARCH_SELECT_MEMORY_MODEL
4e1785e85SDave Hansen
53a9da765SDave Hansenchoice
63a9da765SDave Hansen	prompt "Memory model"
7e1785e85SDave Hansen	depends on SELECT_MEMORY_MODEL
8e1785e85SDave Hansen	default DISCONTIGMEM_MANUAL if ARCH_DISCONTIGMEM_DEFAULT
9d41dee36SAndy Whitcroft	default SPARSEMEM_MANUAL if ARCH_SPARSEMEM_DEFAULT
10e1785e85SDave Hansen	default FLATMEM_MANUAL
113a9da765SDave Hansen
12e1785e85SDave Hansenconfig FLATMEM_MANUAL
133a9da765SDave Hansen	bool "Flat Memory"
14c898ec16SAnton Blanchard	depends on !(ARCH_DISCONTIGMEM_ENABLE || ARCH_SPARSEMEM_ENABLE) || ARCH_FLATMEM_ENABLE
153a9da765SDave Hansen	help
163a9da765SDave Hansen	  This option allows you to change some of the ways that
173a9da765SDave Hansen	  Linux manages its memory internally.  Most users will
183a9da765SDave Hansen	  only have one option here: FLATMEM.  This is normal
193a9da765SDave Hansen	  and a correct option.
203a9da765SDave Hansen
21d41dee36SAndy Whitcroft	  Some users of more advanced features like NUMA and
22d41dee36SAndy Whitcroft	  memory hotplug may have different options here.
2318f65332SGeert Uytterhoeven	  DISCONTIGMEM is a more mature, better tested system,
24d41dee36SAndy Whitcroft	  but is incompatible with memory hotplug and may suffer
25d41dee36SAndy Whitcroft	  decreased performance over SPARSEMEM.  If unsure between
26d41dee36SAndy Whitcroft	  "Sparse Memory" and "Discontiguous Memory", choose
27d41dee36SAndy Whitcroft	  "Discontiguous Memory".
28d41dee36SAndy Whitcroft
29d41dee36SAndy Whitcroft	  If unsure, choose this option (Flat Memory) over any other.
303a9da765SDave Hansen
31e1785e85SDave Hansenconfig DISCONTIGMEM_MANUAL
32f3519f91SDave Hansen	bool "Discontiguous Memory"
333a9da765SDave Hansen	depends on ARCH_DISCONTIGMEM_ENABLE
343a9da765SDave Hansen	help
35785dcd44SDave Hansen	  This option provides enhanced support for discontiguous
36785dcd44SDave Hansen	  memory systems, over FLATMEM.  These systems have holes
37785dcd44SDave Hansen	  in their physical address spaces, and this option provides
38785dcd44SDave Hansen	  more efficient handling of these holes.  However, the vast
39785dcd44SDave Hansen	  majority of hardware has quite flat address spaces, and
40ad3d0a38SPhilipp Marek	  can have degraded performance from the extra overhead that
41785dcd44SDave Hansen	  this option imposes.
42785dcd44SDave Hansen
43785dcd44SDave Hansen	  Many NUMA configurations will have this as the only option.
44785dcd44SDave Hansen
453a9da765SDave Hansen	  If unsure, choose "Flat Memory" over this option.
463a9da765SDave Hansen
47d41dee36SAndy Whitcroftconfig SPARSEMEM_MANUAL
48d41dee36SAndy Whitcroft	bool "Sparse Memory"
49d41dee36SAndy Whitcroft	depends on ARCH_SPARSEMEM_ENABLE
50d41dee36SAndy Whitcroft	help
51d41dee36SAndy Whitcroft	  This will be the only option for some systems, including
52d41dee36SAndy Whitcroft	  memory hotplug systems.  This is normal.
53d41dee36SAndy Whitcroft
54d41dee36SAndy Whitcroft	  For many other systems, this will be an alternative to
55f3519f91SDave Hansen	  "Discontiguous Memory".  This option provides some potential
56d41dee36SAndy Whitcroft	  performance benefits, along with decreased code complexity,
57d41dee36SAndy Whitcroft	  but it is newer, and more experimental.
58d41dee36SAndy Whitcroft
59d41dee36SAndy Whitcroft	  If unsure, choose "Discontiguous Memory" or "Flat Memory"
60d41dee36SAndy Whitcroft	  over this option.
61d41dee36SAndy Whitcroft
623a9da765SDave Hansenendchoice
633a9da765SDave Hansen
64e1785e85SDave Hansenconfig DISCONTIGMEM
65e1785e85SDave Hansen	def_bool y
66e1785e85SDave Hansen	depends on (!SELECT_MEMORY_MODEL && ARCH_DISCONTIGMEM_ENABLE) || DISCONTIGMEM_MANUAL
67e1785e85SDave Hansen
68d41dee36SAndy Whitcroftconfig SPARSEMEM
69d41dee36SAndy Whitcroft	def_bool y
701a83e175SRussell King	depends on (!SELECT_MEMORY_MODEL && ARCH_SPARSEMEM_ENABLE) || SPARSEMEM_MANUAL
71d41dee36SAndy Whitcroft
72e1785e85SDave Hansenconfig FLATMEM
73e1785e85SDave Hansen	def_bool y
74d41dee36SAndy Whitcroft	depends on (!DISCONTIGMEM && !SPARSEMEM) || FLATMEM_MANUAL
75d41dee36SAndy Whitcroft
76d41dee36SAndy Whitcroftconfig FLAT_NODE_MEM_MAP
77d41dee36SAndy Whitcroft	def_bool y
78d41dee36SAndy Whitcroft	depends on !SPARSEMEM
79e1785e85SDave Hansen
8093b7504eSDave Hansen#
8193b7504eSDave Hansen# Both the NUMA code and DISCONTIGMEM use arrays of pg_data_t's
8293b7504eSDave Hansen# to represent different areas of memory.  This variable allows
8393b7504eSDave Hansen# those dependencies to exist individually.
8493b7504eSDave Hansen#
8593b7504eSDave Hansenconfig NEED_MULTIPLE_NODES
8693b7504eSDave Hansen	def_bool y
8793b7504eSDave Hansen	depends on DISCONTIGMEM || NUMA
88af705362SAndy Whitcroft
89af705362SAndy Whitcroftconfig HAVE_MEMORY_PRESENT
90af705362SAndy Whitcroft	def_bool y
91d41dee36SAndy Whitcroft	depends on ARCH_HAVE_MEMORY_PRESENT || SPARSEMEM
92802f192eSBob Picco
93802f192eSBob Picco#
943e347261SBob Picco# SPARSEMEM_EXTREME (which is the default) does some bootmem
953e347261SBob Picco# allocations when memory_present() is called.  If this cannot
963e347261SBob Picco# be done on your architecture, select this option.  However,
973e347261SBob Picco# statically allocating the mem_section[] array can potentially
983e347261SBob Picco# consume vast quantities of .bss, so be careful.
993e347261SBob Picco#
1003e347261SBob Picco# This option will also potentially produce smaller runtime code
1013e347261SBob Picco# with gcc 3.4 and later.
1023e347261SBob Picco#
1033e347261SBob Piccoconfig SPARSEMEM_STATIC
1049ba16087SJan Beulich	bool
1053e347261SBob Picco
1063e347261SBob Picco#
10744c09201SMatt LaPlante# Architecture platforms which require a two level mem_section in SPARSEMEM
108802f192eSBob Picco# must select this option. This is usually for architecture platforms with
109802f192eSBob Picco# an extremely sparse physical address space.
110802f192eSBob Picco#
1113e347261SBob Piccoconfig SPARSEMEM_EXTREME
1123e347261SBob Picco	def_bool y
1133e347261SBob Picco	depends on SPARSEMEM && !SPARSEMEM_STATIC
1144c21e2f2SHugh Dickins
11529c71111SAndy Whitcroftconfig SPARSEMEM_VMEMMAP_ENABLE
1169ba16087SJan Beulich	bool
11729c71111SAndy Whitcroft
1189bdac914SYinghai Luconfig SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
1199bdac914SYinghai Lu	def_bool y
1209bdac914SYinghai Lu	depends on SPARSEMEM && X86_64
1219bdac914SYinghai Lu
12229c71111SAndy Whitcroftconfig SPARSEMEM_VMEMMAP
123a5ee6daaSGeoff Levand	bool "Sparse Memory virtual memmap"
124a5ee6daaSGeoff Levand	depends on SPARSEMEM && SPARSEMEM_VMEMMAP_ENABLE
125a5ee6daaSGeoff Levand	default y
126a5ee6daaSGeoff Levand	help
127a5ee6daaSGeoff Levand	 SPARSEMEM_VMEMMAP uses a virtually mapped memmap to optimise
128a5ee6daaSGeoff Levand	 pfn_to_page and page_to_pfn operations.  This is the most
129a5ee6daaSGeoff Levand	 efficient option when sufficient kernel resources are available.
13029c71111SAndy Whitcroft
13195f72d1eSYinghai Luconfig HAVE_MEMBLOCK
13295f72d1eSYinghai Lu	boolean
13395f72d1eSYinghai Lu
1347c0caeb8STejun Heoconfig HAVE_MEMBLOCK_NODE_MAP
1357c0caeb8STejun Heo	boolean
1367c0caeb8STejun Heo
137c378ddd5STejun Heoconfig ARCH_DISCARD_MEMBLOCK
138c378ddd5STejun Heo	boolean
139c378ddd5STejun Heo
14066616720SSam Ravnborgconfig NO_BOOTMEM
14166616720SSam Ravnborg	boolean
14266616720SSam Ravnborg
143ee6f509cSMinchan Kimconfig MEMORY_ISOLATION
144ee6f509cSMinchan Kim	boolean
145ee6f509cSMinchan Kim
14620b2f52bSLai Jiangshanconfig MOVABLE_NODE
14720b2f52bSLai Jiangshan	boolean "Enable to assign a node which has only movable memory"
14820b2f52bSLai Jiangshan	depends on HAVE_MEMBLOCK
14920b2f52bSLai Jiangshan	depends on NO_BOOTMEM
15020b2f52bSLai Jiangshan	depends on X86_64
15120b2f52bSLai Jiangshan	depends on NUMA
152c2974058STang Chen	default n
153c2974058STang Chen	help
154c2974058STang Chen	  Allow a node to have only movable memory.  Pages used by the kernel,
155c2974058STang Chen	  such as direct mapping pages cannot be migrated.  So the corresponding
156c5320926STang Chen	  memory device cannot be hotplugged.  This option allows the following
157c5320926STang Chen	  two things:
158c5320926STang Chen	  - When the system is booting, node full of hotpluggable memory can
159c5320926STang Chen	  be arranged to have only movable memory so that the whole node can
160c5320926STang Chen	  be hot-removed. (need movable_node boot option specified).
161c5320926STang Chen	  - After the system is up, the option allows users to online all the
162c5320926STang Chen	  memory of a node as movable memory so that the whole node can be
163c5320926STang Chen	  hot-removed.
164c5320926STang Chen
165c5320926STang Chen	  Users who don't use the memory hotplug feature are fine with this
166c5320926STang Chen	  option on since they don't specify movable_node boot option or they
167c5320926STang Chen	  don't online memory as movable.
168c2974058STang Chen
169c2974058STang Chen	  Say Y here if you want to hotplug a whole node.
170c2974058STang Chen	  Say N here if you want kernel to use memory on all nodes evenly.
17120b2f52bSLai Jiangshan
17246723bfaSYasuaki Ishimatsu#
17346723bfaSYasuaki Ishimatsu# Only be set on architectures that have completely implemented memory hotplug
17446723bfaSYasuaki Ishimatsu# feature. If you are not sure, don't touch it.
17546723bfaSYasuaki Ishimatsu#
17646723bfaSYasuaki Ishimatsuconfig HAVE_BOOTMEM_INFO_NODE
17746723bfaSYasuaki Ishimatsu	def_bool n
17846723bfaSYasuaki Ishimatsu
1793947be19SDave Hansen# eventually, we can have this option just 'select SPARSEMEM'
1803947be19SDave Hansenconfig MEMORY_HOTPLUG
1813947be19SDave Hansen	bool "Allow for memory hot-add"
182ec69acbbSKeith Mannthey	depends on SPARSEMEM || X86_64_ACPI_NUMA
18340b31360SStephen Rothwell	depends on ARCH_ENABLE_MEMORY_HOTPLUG
184ed84a07aSKumar Gala	depends on (IA64 || X86 || PPC_BOOK3S_64 || SUPERH || S390)
1853947be19SDave Hansen
186ec69acbbSKeith Manntheyconfig MEMORY_HOTPLUG_SPARSE
187ec69acbbSKeith Mannthey	def_bool y
188ec69acbbSKeith Mannthey	depends on SPARSEMEM && MEMORY_HOTPLUG
189ec69acbbSKeith Mannthey
1900c0e6195SKAMEZAWA Hiroyukiconfig MEMORY_HOTREMOVE
1910c0e6195SKAMEZAWA Hiroyuki	bool "Allow for memory hot remove"
19246723bfaSYasuaki Ishimatsu	select MEMORY_ISOLATION
193f7e3334aSNathan Fontenot	select HAVE_BOOTMEM_INFO_NODE if (X86_64 || PPC64)
1940c0e6195SKAMEZAWA Hiroyuki	depends on MEMORY_HOTPLUG && ARCH_ENABLE_MEMORY_HOTREMOVE
1950c0e6195SKAMEZAWA Hiroyuki	depends on MIGRATION
1960c0e6195SKAMEZAWA Hiroyuki
197e20b8ccaSChristoph Lameter#
198e20b8ccaSChristoph Lameter# If we have space for more page flags then we can enable additional
199e20b8ccaSChristoph Lameter# optimizations and functionality.
200e20b8ccaSChristoph Lameter#
201e20b8ccaSChristoph Lameter# Regular Sparsemem takes page flag bits for the sectionid if it does not
202e20b8ccaSChristoph Lameter# use a virtual memmap. Disable extended page flags for 32 bit platforms
203e20b8ccaSChristoph Lameter# that require the use of a sectionid in the page flags.
204e20b8ccaSChristoph Lameter#
205e20b8ccaSChristoph Lameterconfig PAGEFLAGS_EXTENDED
206e20b8ccaSChristoph Lameter	def_bool y
207a269cca9SH. Peter Anvin	depends on 64BIT || SPARSEMEM_VMEMMAP || !SPARSEMEM
208e20b8ccaSChristoph Lameter
2094c21e2f2SHugh Dickins# Heavily threaded applications may benefit from splitting the mm-wide
2104c21e2f2SHugh Dickins# page_table_lock, so that faults on different parts of the user address
2114c21e2f2SHugh Dickins# space can be handled with less contention: split it at this NR_CPUS.
2124c21e2f2SHugh Dickins# Default to 4 for wider testing, though 8 might be more appropriate.
2134c21e2f2SHugh Dickins# ARM's adjust_pte (unused if VIPT) depends on mm-wide page_table_lock.
2147b6ac9dfSHugh Dickins# PA-RISC 7xxx's spinlock_t would enlarge struct page from 32 to 44 bytes.
215a70caa8bSHugh Dickins# DEBUG_SPINLOCK and DEBUG_LOCK_ALLOC spinlock_t also enlarge struct page.
2164c21e2f2SHugh Dickins#
2174c21e2f2SHugh Dickinsconfig SPLIT_PTLOCK_CPUS
2184c21e2f2SHugh Dickins	int
219a70caa8bSHugh Dickins	default "999999" if ARM && !CPU_CACHE_VIPT
220a70caa8bSHugh Dickins	default "999999" if PARISC && !PA20
2214c21e2f2SHugh Dickins	default "4"
2227cbe34cfSChristoph Lameter
223e009bb30SKirill A. Shutemovconfig ARCH_ENABLE_SPLIT_PMD_PTLOCK
224e009bb30SKirill A. Shutemov	boolean
225e009bb30SKirill A. Shutemov
2267cbe34cfSChristoph Lameter#
22718468d93SRafael Aquini# support for memory balloon compaction
22818468d93SRafael Aquiniconfig BALLOON_COMPACTION
22918468d93SRafael Aquini	bool "Allow for balloon memory compaction/migration"
23018468d93SRafael Aquini	def_bool y
23118468d93SRafael Aquini	depends on COMPACTION && VIRTIO_BALLOON
23218468d93SRafael Aquini	help
23318468d93SRafael Aquini	  Memory fragmentation introduced by ballooning might reduce
23418468d93SRafael Aquini	  significantly the number of 2MB contiguous memory blocks that can be
23518468d93SRafael Aquini	  used within a guest, thus imposing performance penalties associated
23618468d93SRafael Aquini	  with the reduced number of transparent huge pages that could be used
23718468d93SRafael Aquini	  by the guest workload. Allowing the compaction & migration for memory
23818468d93SRafael Aquini	  pages enlisted as being part of memory balloon devices avoids the
23918468d93SRafael Aquini	  scenario aforementioned and helps improving memory defragmentation.
24018468d93SRafael Aquini
24118468d93SRafael Aquini#
242e9e96b39SMel Gorman# support for memory compaction
243e9e96b39SMel Gormanconfig COMPACTION
244e9e96b39SMel Gorman	bool "Allow for memory compaction"
24505106e6aSRik van Riel	def_bool y
246e9e96b39SMel Gorman	select MIGRATION
24733a93877SAndrea Arcangeli	depends on MMU
248e9e96b39SMel Gorman	help
249e9e96b39SMel Gorman	  Allows the compaction of memory for the allocation of huge pages.
250e9e96b39SMel Gorman
251e9e96b39SMel Gorman#
2527cbe34cfSChristoph Lameter# support for page migration
2537cbe34cfSChristoph Lameter#
2547cbe34cfSChristoph Lameterconfig MIGRATION
255b20a3503SChristoph Lameter	bool "Page migration"
2566c5240aeSChristoph Lameter	def_bool y
257de32a817SChen Gang	depends on (NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE || COMPACTION || CMA) && MMU
258b20a3503SChristoph Lameter	help
259b20a3503SChristoph Lameter	  Allows the migration of the physical location of pages of processes
260e9e96b39SMel Gorman	  while the virtual addresses are not changed. This is useful in
261e9e96b39SMel Gorman	  two situations. The first is on NUMA systems to put pages nearer
262e9e96b39SMel Gorman	  to the processors accessing. The second is when allocating huge
263e9e96b39SMel Gorman	  pages as migration can relocate pages to satisfy a huge page
264e9e96b39SMel Gorman	  allocation instead of reclaiming.
2656550e07fSGreg Kroah-Hartman
266600715dcSJeremy Fitzhardingeconfig PHYS_ADDR_T_64BIT
267600715dcSJeremy Fitzhardinge	def_bool 64BIT || ARCH_PHYS_ADDR_T_64BIT
268600715dcSJeremy Fitzhardinge
2694b51d669SChristoph Lameterconfig ZONE_DMA_FLAG
2704b51d669SChristoph Lameter	int
2714b51d669SChristoph Lameter	default "0" if !ZONE_DMA
2724b51d669SChristoph Lameter	default "1"
2734b51d669SChristoph Lameter
2742a7326b5SChristoph Lameterconfig BOUNCE
2759ca24e2eSVinayak Menon	bool "Enable bounce buffers"
2769ca24e2eSVinayak Menon	default y
2772a7326b5SChristoph Lameter	depends on BLOCK && MMU && (ZONE_DMA || HIGHMEM)
2789ca24e2eSVinayak Menon	help
2799ca24e2eSVinayak Menon	  Enable bounce buffers for devices that cannot access
2809ca24e2eSVinayak Menon	  the full range of memory available to the CPU. Enabled
2819ca24e2eSVinayak Menon	  by default when ZONE_DMA or HIGHMEM is selected, but you
2829ca24e2eSVinayak Menon	  may say n to override this.
2832a7326b5SChristoph Lameter
284ffecfd1aSDarrick J. Wong# On the 'tile' arch, USB OHCI needs the bounce pool since tilegx will often
285ffecfd1aSDarrick J. Wong# have more than 4GB of memory, but we don't currently use the IOTLB to present
286ffecfd1aSDarrick J. Wong# a 32-bit address to OHCI.  So we need to use a bounce pool instead.
287ffecfd1aSDarrick J. Wong#
288ffecfd1aSDarrick J. Wong# We also use the bounce pool to provide stable page writes for jbd.  jbd
289ffecfd1aSDarrick J. Wong# initiates buffer writeback without locking the page or setting PG_writeback,
290ffecfd1aSDarrick J. Wong# and fixing that behavior (a second time; jbd2 doesn't have this problem) is
291ffecfd1aSDarrick J. Wong# a major rework effort.  Instead, use the bounce buffer to snapshot pages
292ffecfd1aSDarrick J. Wong# (until jbd goes away).  The only jbd user is ext3.
293ffecfd1aSDarrick J. Wongconfig NEED_BOUNCE_POOL
294ffecfd1aSDarrick J. Wong	bool
295ffecfd1aSDarrick J. Wong	default y if (TILE && USB_OHCI_HCD) || (BLK_DEV_INTEGRITY && JBD)
296ffecfd1aSDarrick J. Wong
2976225e937SChristoph Lameterconfig NR_QUICK
2986225e937SChristoph Lameter	int
2996225e937SChristoph Lameter	depends on QUICKLIST
3000176bd3dSPaul Mundt	default "2" if AVR32
3016225e937SChristoph Lameter	default "1"
302f057eac0SStephen Rothwell
303f057eac0SStephen Rothwellconfig VIRT_TO_BUS
3044febd95aSStephen Rothwell	bool
3054febd95aSStephen Rothwell	help
3064febd95aSStephen Rothwell	  An architecture should select this if it implements the
3074febd95aSStephen Rothwell	  deprecated interface virt_to_bus().  All new architectures
3084febd95aSStephen Rothwell	  should probably not select this.
3094febd95aSStephen Rothwell
310cddb8a5cSAndrea Arcangeli
311cddb8a5cSAndrea Arcangeliconfig MMU_NOTIFIER
312cddb8a5cSAndrea Arcangeli	bool
313fc4d5c29SDavid Howells
314f8af4da3SHugh Dickinsconfig KSM
315f8af4da3SHugh Dickins	bool "Enable KSM for page merging"
316f8af4da3SHugh Dickins	depends on MMU
317f8af4da3SHugh Dickins	help
318f8af4da3SHugh Dickins	  Enable Kernel Samepage Merging: KSM periodically scans those areas
319f8af4da3SHugh Dickins	  of an application's address space that an app has advised may be
320f8af4da3SHugh Dickins	  mergeable.  When it finds pages of identical content, it replaces
321d0f209f6SHugh Dickins	  the many instances by a single page with that content, so
322f8af4da3SHugh Dickins	  saving memory until one or another app needs to modify the content.
323f8af4da3SHugh Dickins	  Recommended for use with KVM, or with other duplicative applications.
324c73602adSHugh Dickins	  See Documentation/vm/ksm.txt for more information: KSM is inactive
325c73602adSHugh Dickins	  until a program has madvised that an area is MADV_MERGEABLE, and
326c73602adSHugh Dickins	  root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
327f8af4da3SHugh Dickins
328e0a94c2aSChristoph Lameterconfig DEFAULT_MMAP_MIN_ADDR
329e0a94c2aSChristoph Lameter        int "Low address space to protect from user allocation"
3306e141546SDavid Howells	depends on MMU
331e0a94c2aSChristoph Lameter        default 4096
332e0a94c2aSChristoph Lameter        help
333e0a94c2aSChristoph Lameter	  This is the portion of low virtual memory which should be protected
334e0a94c2aSChristoph Lameter	  from userspace allocation.  Keeping a user from writing to low pages
335e0a94c2aSChristoph Lameter	  can help reduce the impact of kernel NULL pointer bugs.
336e0a94c2aSChristoph Lameter
337e0a94c2aSChristoph Lameter	  For most ia64, ppc64 and x86 users with lots of address space
338e0a94c2aSChristoph Lameter	  a value of 65536 is reasonable and should cause no problems.
339e0a94c2aSChristoph Lameter	  On arm and other archs it should not be higher than 32768.
340788084abSEric Paris	  Programs which use vm86 functionality or have some need to map
341788084abSEric Paris	  this low address space will need CAP_SYS_RAWIO or disable this
342788084abSEric Paris	  protection by setting the value to 0.
343e0a94c2aSChristoph Lameter
344e0a94c2aSChristoph Lameter	  This value can be changed after boot using the
345e0a94c2aSChristoph Lameter	  /proc/sys/vm/mmap_min_addr tunable.
346e0a94c2aSChristoph Lameter
347d949f36fSLinus Torvaldsconfig ARCH_SUPPORTS_MEMORY_FAILURE
348d949f36fSLinus Torvalds	bool
349e0a94c2aSChristoph Lameter
3506a46079cSAndi Kleenconfig MEMORY_FAILURE
3516a46079cSAndi Kleen	depends on MMU
352d949f36fSLinus Torvalds	depends on ARCH_SUPPORTS_MEMORY_FAILURE
3536a46079cSAndi Kleen	bool "Enable recovery from hardware memory errors"
354ee6f509cSMinchan Kim	select MEMORY_ISOLATION
3556a46079cSAndi Kleen	help
3566a46079cSAndi Kleen	  Enables code to recover from some memory failures on systems
3576a46079cSAndi Kleen	  with MCA recovery. This allows a system to continue running
3586a46079cSAndi Kleen	  even when some of its memory has uncorrected errors. This requires
3596a46079cSAndi Kleen	  special hardware support and typically ECC memory.
3606a46079cSAndi Kleen
361cae681fcSAndi Kleenconfig HWPOISON_INJECT
362413f9efbSAndi Kleen	tristate "HWPoison pages injector"
36327df5068SAndi Kleen	depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
364478c5ffcSWu Fengguang	select PROC_PAGE_MONITOR
365cae681fcSAndi Kleen
366fc4d5c29SDavid Howellsconfig NOMMU_INITIAL_TRIM_EXCESS
367fc4d5c29SDavid Howells	int "Turn on mmap() excess space trimming before booting"
368fc4d5c29SDavid Howells	depends on !MMU
369fc4d5c29SDavid Howells	default 1
370fc4d5c29SDavid Howells	help
371fc4d5c29SDavid Howells	  The NOMMU mmap() frequently needs to allocate large contiguous chunks
372fc4d5c29SDavid Howells	  of memory on which to store mappings, but it can only ask the system
373fc4d5c29SDavid Howells	  allocator for chunks in 2^N*PAGE_SIZE amounts - which is frequently
374fc4d5c29SDavid Howells	  more than it requires.  To deal with this, mmap() is able to trim off
375fc4d5c29SDavid Howells	  the excess and return it to the allocator.
376fc4d5c29SDavid Howells
377fc4d5c29SDavid Howells	  If trimming is enabled, the excess is trimmed off and returned to the
378fc4d5c29SDavid Howells	  system allocator, which can cause extra fragmentation, particularly
379fc4d5c29SDavid Howells	  if there are a lot of transient processes.
380fc4d5c29SDavid Howells
381fc4d5c29SDavid Howells	  If trimming is disabled, the excess is kept, but not used, which for
382fc4d5c29SDavid Howells	  long-term mappings means that the space is wasted.
383fc4d5c29SDavid Howells
384fc4d5c29SDavid Howells	  Trimming can be dynamically controlled through a sysctl option
385fc4d5c29SDavid Howells	  (/proc/sys/vm/nr_trim_pages) which specifies the minimum number of
386fc4d5c29SDavid Howells	  excess pages there must be before trimming should occur, or zero if
387fc4d5c29SDavid Howells	  no trimming is to occur.
388fc4d5c29SDavid Howells
389fc4d5c29SDavid Howells	  This option specifies the initial value of this option.  The default
390fc4d5c29SDavid Howells	  of 1 says that all excess pages should be trimmed.
391fc4d5c29SDavid Howells
392fc4d5c29SDavid Howells	  See Documentation/nommu-mmap.txt for more information.
393bbddff05STejun Heo
3944c76d9d1SAndrea Arcangeliconfig TRANSPARENT_HUGEPAGE
39513ece886SAndrea Arcangeli	bool "Transparent Hugepage Support"
39615626062SGerald Schaefer	depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE
3975d689240SAndrea Arcangeli	select COMPACTION
3984c76d9d1SAndrea Arcangeli	help
3994c76d9d1SAndrea Arcangeli	  Transparent Hugepages allows the kernel to use huge pages and
4004c76d9d1SAndrea Arcangeli	  huge tlb transparently to the applications whenever possible.
4014c76d9d1SAndrea Arcangeli	  This feature can improve computing performance to certain
4024c76d9d1SAndrea Arcangeli	  applications by speeding up page faults during memory
4034c76d9d1SAndrea Arcangeli	  allocation, by reducing the number of tlb misses and by speeding
4044c76d9d1SAndrea Arcangeli	  up the pagetable walking.
4054c76d9d1SAndrea Arcangeli
4064c76d9d1SAndrea Arcangeli	  If memory constrained on embedded, you may want to say N.
4074c76d9d1SAndrea Arcangeli
40813ece886SAndrea Arcangelichoice
40913ece886SAndrea Arcangeli	prompt "Transparent Hugepage Support sysfs defaults"
41013ece886SAndrea Arcangeli	depends on TRANSPARENT_HUGEPAGE
41113ece886SAndrea Arcangeli	default TRANSPARENT_HUGEPAGE_ALWAYS
41213ece886SAndrea Arcangeli	help
41313ece886SAndrea Arcangeli	  Selects the sysfs defaults for Transparent Hugepage Support.
41413ece886SAndrea Arcangeli
41513ece886SAndrea Arcangeli	config TRANSPARENT_HUGEPAGE_ALWAYS
41613ece886SAndrea Arcangeli		bool "always"
41713ece886SAndrea Arcangeli	help
41813ece886SAndrea Arcangeli	  Enabling Transparent Hugepage always, can increase the
41913ece886SAndrea Arcangeli	  memory footprint of applications without a guaranteed
42013ece886SAndrea Arcangeli	  benefit but it will work automatically for all applications.
42113ece886SAndrea Arcangeli
42213ece886SAndrea Arcangeli	config TRANSPARENT_HUGEPAGE_MADVISE
42313ece886SAndrea Arcangeli		bool "madvise"
42413ece886SAndrea Arcangeli	help
42513ece886SAndrea Arcangeli	  Enabling Transparent Hugepage madvise, will only provide a
42613ece886SAndrea Arcangeli	  performance improvement benefit to the applications using
42713ece886SAndrea Arcangeli	  madvise(MADV_HUGEPAGE) but it won't risk to increase the
42813ece886SAndrea Arcangeli	  memory footprint of applications without a guaranteed
42913ece886SAndrea Arcangeli	  benefit.
43013ece886SAndrea Arcangeliendchoice
43113ece886SAndrea Arcangeli
4325febcbe9SChristopher Yeohconfig CROSS_MEMORY_ATTACH
4335febcbe9SChristopher Yeoh	bool "Cross Memory Support"
4345febcbe9SChristopher Yeoh	depends on MMU
4355febcbe9SChristopher Yeoh	default y
4365febcbe9SChristopher Yeoh	help
4375febcbe9SChristopher Yeoh	  Enabling this option adds the system calls process_vm_readv and
4385febcbe9SChristopher Yeoh	  process_vm_writev which allow a process with the correct privileges
4395febcbe9SChristopher Yeoh	  to directly read from or write to to another process's address space.
4405febcbe9SChristopher Yeoh	  See the man page for more details.
4415febcbe9SChristopher Yeoh
442bbddff05STejun Heo#
443bbddff05STejun Heo# UP and nommu archs use km based percpu allocator
444bbddff05STejun Heo#
445bbddff05STejun Heoconfig NEED_PER_CPU_KM
446bbddff05STejun Heo	depends on !SMP
447bbddff05STejun Heo	bool
448bbddff05STejun Heo	default y
449077b1f83SDan Magenheimer
450077b1f83SDan Magenheimerconfig CLEANCACHE
451077b1f83SDan Magenheimer	bool "Enable cleancache driver to cache clean pages if tmem is present"
452077b1f83SDan Magenheimer	default n
453077b1f83SDan Magenheimer	help
454077b1f83SDan Magenheimer	  Cleancache can be thought of as a page-granularity victim cache
455077b1f83SDan Magenheimer	  for clean pages that the kernel's pageframe replacement algorithm
456077b1f83SDan Magenheimer	  (PFRA) would like to keep around, but can't since there isn't enough
457077b1f83SDan Magenheimer	  memory.  So when the PFRA "evicts" a page, it first attempts to use
458140a1ef2SMichael Witten	  cleancache code to put the data contained in that page into
459077b1f83SDan Magenheimer	  "transcendent memory", memory that is not directly accessible or
460077b1f83SDan Magenheimer	  addressable by the kernel and is of unknown and possibly
461077b1f83SDan Magenheimer	  time-varying size.  And when a cleancache-enabled
462077b1f83SDan Magenheimer	  filesystem wishes to access a page in a file on disk, it first
463077b1f83SDan Magenheimer	  checks cleancache to see if it already contains it; if it does,
464077b1f83SDan Magenheimer	  the page is copied into the kernel and a disk access is avoided.
465077b1f83SDan Magenheimer	  When a transcendent memory driver is available (such as zcache or
466077b1f83SDan Magenheimer	  Xen transcendent memory), a significant I/O reduction
467077b1f83SDan Magenheimer	  may be achieved.  When none is available, all cleancache calls
468077b1f83SDan Magenheimer	  are reduced to a single pointer-compare-against-NULL resulting
469077b1f83SDan Magenheimer	  in a negligible performance hit.
470077b1f83SDan Magenheimer
471077b1f83SDan Magenheimer	  If unsure, say Y to enable cleancache
47227c6aec2SDan Magenheimer
47327c6aec2SDan Magenheimerconfig FRONTSWAP
47427c6aec2SDan Magenheimer	bool "Enable frontswap to cache swap pages if tmem is present"
47527c6aec2SDan Magenheimer	depends on SWAP
47627c6aec2SDan Magenheimer	default n
47727c6aec2SDan Magenheimer	help
47827c6aec2SDan Magenheimer	  Frontswap is so named because it can be thought of as the opposite
47927c6aec2SDan Magenheimer	  of a "backing" store for a swap device.  The data is stored into
48027c6aec2SDan Magenheimer	  "transcendent memory", memory that is not directly accessible or
48127c6aec2SDan Magenheimer	  addressable by the kernel and is of unknown and possibly
48227c6aec2SDan Magenheimer	  time-varying size.  When space in transcendent memory is available,
48327c6aec2SDan Magenheimer	  a significant swap I/O reduction may be achieved.  When none is
48427c6aec2SDan Magenheimer	  available, all frontswap calls are reduced to a single pointer-
48527c6aec2SDan Magenheimer	  compare-against-NULL resulting in a negligible performance hit
48627c6aec2SDan Magenheimer	  and swap data is stored as normal on the matching swap device.
48727c6aec2SDan Magenheimer
48827c6aec2SDan Magenheimer	  If unsure, say Y to enable frontswap.
489f825c736SAneesh Kumar K.V
490f825c736SAneesh Kumar K.Vconfig CMA
491f825c736SAneesh Kumar K.V	bool "Contiguous Memory Allocator"
492de32a817SChen Gang	depends on HAVE_MEMBLOCK && MMU
493f825c736SAneesh Kumar K.V	select MIGRATION
494f825c736SAneesh Kumar K.V	select MEMORY_ISOLATION
495f825c736SAneesh Kumar K.V	help
496f825c736SAneesh Kumar K.V	  This enables the Contiguous Memory Allocator which allows other
497f825c736SAneesh Kumar K.V	  subsystems to allocate big physically-contiguous blocks of memory.
498f825c736SAneesh Kumar K.V	  CMA reserves a region of memory and allows only movable pages to
499f825c736SAneesh Kumar K.V	  be allocated from it. This way, the kernel can use the memory for
500f825c736SAneesh Kumar K.V	  pagecache and when a subsystem requests for contiguous area, the
501f825c736SAneesh Kumar K.V	  allocated pages are migrated away to serve the contiguous request.
502f825c736SAneesh Kumar K.V
503f825c736SAneesh Kumar K.V	  If unsure, say "n".
504f825c736SAneesh Kumar K.V
505f825c736SAneesh Kumar K.Vconfig CMA_DEBUG
506f825c736SAneesh Kumar K.V	bool "CMA debug messages (DEVELOPMENT)"
507f825c736SAneesh Kumar K.V	depends on DEBUG_KERNEL && CMA
508f825c736SAneesh Kumar K.V	help
509f825c736SAneesh Kumar K.V	  Turns on debug messages in CMA.  This produces KERN_DEBUG
510f825c736SAneesh Kumar K.V	  messages for every CMA call as well as various messages while
511f825c736SAneesh Kumar K.V	  processing calls such as dma_alloc_from_contiguous().
512f825c736SAneesh Kumar K.V	  This option does not affect warning and error messages.
513bf550fc9SAlexander Graf
5144e2e2770SSeth Jenningsconfig ZBUD
5154e2e2770SSeth Jennings	tristate
5164e2e2770SSeth Jennings	default n
5174e2e2770SSeth Jennings	help
5184e2e2770SSeth Jennings	  A special purpose allocator for storing compressed pages.
5194e2e2770SSeth Jennings	  It is designed to store up to two compressed pages per physical
5204e2e2770SSeth Jennings	  page.  While this design limits storage density, it has simple and
5214e2e2770SSeth Jennings	  deterministic reclaim properties that make it preferable to a higher
5224e2e2770SSeth Jennings	  density approach when reclaim will be used.
5234e2e2770SSeth Jennings
5242b281117SSeth Jenningsconfig ZSWAP
5252b281117SSeth Jennings	bool "Compressed cache for swap pages (EXPERIMENTAL)"
5262b281117SSeth Jennings	depends on FRONTSWAP && CRYPTO=y
5272b281117SSeth Jennings	select CRYPTO_LZO
5282b281117SSeth Jennings	select ZBUD
5292b281117SSeth Jennings	default n
5302b281117SSeth Jennings	help
5312b281117SSeth Jennings	  A lightweight compressed cache for swap pages.  It takes
5322b281117SSeth Jennings	  pages that are in the process of being swapped out and attempts to
5332b281117SSeth Jennings	  compress them into a dynamically allocated RAM-based memory pool.
5342b281117SSeth Jennings	  This can result in a significant I/O reduction on swap device and,
5352b281117SSeth Jennings	  in the case where decompressing from RAM is faster that swap device
5362b281117SSeth Jennings	  reads, can also improve workload performance.
5372b281117SSeth Jennings
5382b281117SSeth Jennings	  This is marked experimental because it is a new feature (as of
5392b281117SSeth Jennings	  v3.11) that interacts heavily with memory reclaim.  While these
5402b281117SSeth Jennings	  interactions don't cause any known issues on simple memory setups,
5412b281117SSeth Jennings	  they have not be fully explored on the large set of potential
5422b281117SSeth Jennings	  configurations and workloads that exist.
5432b281117SSeth Jennings
5440f8975ecSPavel Emelyanovconfig MEM_SOFT_DIRTY
5450f8975ecSPavel Emelyanov	bool "Track memory changes"
546a844f386SSima Baymani	depends on CHECKPOINT_RESTORE && HAVE_ARCH_SOFT_DIRTY && PROC_FS
5470f8975ecSPavel Emelyanov	select PROC_PAGE_MONITOR
5480f8975ecSPavel Emelyanov	help
5490f8975ecSPavel Emelyanov	  This option enables memory changes tracking by introducing a
5500f8975ecSPavel Emelyanov	  soft-dirty bit on pte-s. This bit it set when someone writes
5510f8975ecSPavel Emelyanov	  into a page just as regular dirty bit, but unlike the latter
5520f8975ecSPavel Emelyanov	  it can be cleared by hands.
5530f8975ecSPavel Emelyanov
5540f8975ecSPavel Emelyanov	  See Documentation/vm/soft-dirty.txt for more details.
555bcf1647dSMinchan Kim
556bcf1647dSMinchan Kimconfig ZSMALLOC
557bcf1647dSMinchan Kim	bool "Memory allocator for compressed pages"
558bcf1647dSMinchan Kim	depends on MMU
559bcf1647dSMinchan Kim	default n
560bcf1647dSMinchan Kim	help
561bcf1647dSMinchan Kim	  zsmalloc is a slab-based memory allocator designed to store
562bcf1647dSMinchan Kim	  compressed RAM pages.  zsmalloc uses virtual memory mapping
563bcf1647dSMinchan Kim	  in order to reduce fragmentation.  However, this results in a
564bcf1647dSMinchan Kim	  non-standard allocator interface where a handle, not a pointer, is
565bcf1647dSMinchan Kim	  returned by an alloc().  This handle must be mapped in order to
566bcf1647dSMinchan Kim	  access the allocated space.
567bcf1647dSMinchan Kim
568bcf1647dSMinchan Kimconfig PGTABLE_MAPPING
569bcf1647dSMinchan Kim	bool "Use page table mapping to access object in zsmalloc"
570bcf1647dSMinchan Kim	depends on ZSMALLOC
571bcf1647dSMinchan Kim	help
572bcf1647dSMinchan Kim	  By default, zsmalloc uses a copy-based object mapping method to
573bcf1647dSMinchan Kim	  access allocations that span two pages. However, if a particular
574bcf1647dSMinchan Kim	  architecture (ex, ARM) performs VM mapping faster than copying,
575bcf1647dSMinchan Kim	  then you should select this. This causes zsmalloc to use page table
576bcf1647dSMinchan Kim	  mapping rather than copying for object mapping.
577bcf1647dSMinchan Kim
578bcf1647dSMinchan Kim	  You can check speed with zsmalloc benchmark[1].
579bcf1647dSMinchan Kim	  [1] https://github.com/spartacus06/zsmalloc
580