xref: /openbmc/linux/mm/Kconfig (revision 3fc41476)
1
2menu "Memory Management options"
3
4config SELECT_MEMORY_MODEL
5	def_bool y
6	depends on ARCH_SELECT_MEMORY_MODEL
7
8choice
9	prompt "Memory model"
10	depends on SELECT_MEMORY_MODEL
11	default DISCONTIGMEM_MANUAL if ARCH_DISCONTIGMEM_DEFAULT
12	default SPARSEMEM_MANUAL if ARCH_SPARSEMEM_DEFAULT
13	default FLATMEM_MANUAL
14	help
15	  This option allows you to change some of the ways that
16	  Linux manages its memory internally. Most users will
17	  only have one option here selected by the architecture
18	  configuration. This is normal.
19
20config FLATMEM_MANUAL
21	bool "Flat Memory"
22	depends on !(ARCH_DISCONTIGMEM_ENABLE || ARCH_SPARSEMEM_ENABLE) || ARCH_FLATMEM_ENABLE
23	help
24	  This option is best suited for non-NUMA systems with
25	  flat address space. The FLATMEM is the most efficient
26	  system in terms of performance and resource consumption
27	  and it is the best option for smaller systems.
28
29	  For systems that have holes in their physical address
30	  spaces and for features like NUMA and memory hotplug,
31	  choose "Sparse Memory"
32
33	  If unsure, choose this option (Flat Memory) over any other.
34
35config DISCONTIGMEM_MANUAL
36	bool "Discontiguous Memory"
37	depends on ARCH_DISCONTIGMEM_ENABLE
38	help
39	  This option provides enhanced support for discontiguous
40	  memory systems, over FLATMEM.  These systems have holes
41	  in their physical address spaces, and this option provides
42	  more efficient handling of these holes.
43
44	  Although "Discontiguous Memory" is still used by several
45	  architectures, it is considered deprecated in favor of
46	  "Sparse Memory".
47
48	  If unsure, choose "Sparse Memory" over this option.
49
50config SPARSEMEM_MANUAL
51	bool "Sparse Memory"
52	depends on ARCH_SPARSEMEM_ENABLE
53	help
54	  This will be the only option for some systems, including
55	  memory hot-plug systems.  This is normal.
56
57	  This option provides efficient support for systems with
58	  holes is their physical address space and allows memory
59	  hot-plug and hot-remove.
60
61	  If unsure, choose "Flat Memory" over this option.
62
63endchoice
64
65config DISCONTIGMEM
66	def_bool y
67	depends on (!SELECT_MEMORY_MODEL && ARCH_DISCONTIGMEM_ENABLE) || DISCONTIGMEM_MANUAL
68
69config SPARSEMEM
70	def_bool y
71	depends on (!SELECT_MEMORY_MODEL && ARCH_SPARSEMEM_ENABLE) || SPARSEMEM_MANUAL
72
73config FLATMEM
74	def_bool y
75	depends on (!DISCONTIGMEM && !SPARSEMEM) || FLATMEM_MANUAL
76
77config FLAT_NODE_MEM_MAP
78	def_bool y
79	depends on !SPARSEMEM
80
81#
82# Both the NUMA code and DISCONTIGMEM use arrays of pg_data_t's
83# to represent different areas of memory.  This variable allows
84# those dependencies to exist individually.
85#
86config NEED_MULTIPLE_NODES
87	def_bool y
88	depends on DISCONTIGMEM || NUMA
89
90config HAVE_MEMORY_PRESENT
91	def_bool y
92	depends on ARCH_HAVE_MEMORY_PRESENT || SPARSEMEM
93
94#
95# SPARSEMEM_EXTREME (which is the default) does some bootmem
96# allocations when memory_present() is called.  If this cannot
97# be done on your architecture, select this option.  However,
98# statically allocating the mem_section[] array can potentially
99# consume vast quantities of .bss, so be careful.
100#
101# This option will also potentially produce smaller runtime code
102# with gcc 3.4 and later.
103#
104config SPARSEMEM_STATIC
105	bool
106
107#
108# Architecture platforms which require a two level mem_section in SPARSEMEM
109# must select this option. This is usually for architecture platforms with
110# an extremely sparse physical address space.
111#
112config SPARSEMEM_EXTREME
113	def_bool y
114	depends on SPARSEMEM && !SPARSEMEM_STATIC
115
116config SPARSEMEM_VMEMMAP_ENABLE
117	bool
118
119config SPARSEMEM_VMEMMAP
120	bool "Sparse Memory virtual memmap"
121	depends on SPARSEMEM && SPARSEMEM_VMEMMAP_ENABLE
122	default y
123	help
124	 SPARSEMEM_VMEMMAP uses a virtually mapped memmap to optimise
125	 pfn_to_page and page_to_pfn operations.  This is the most
126	 efficient option when sufficient kernel resources are available.
127
128config HAVE_MEMBLOCK_NODE_MAP
129	bool
130
131config HAVE_MEMBLOCK_PHYS_MAP
132	bool
133
134config HAVE_GENERIC_GUP
135	bool
136
137config ARCH_KEEP_MEMBLOCK
138	bool
139
140config MEMORY_ISOLATION
141	bool
142
143#
144# Only be set on architectures that have completely implemented memory hotplug
145# feature. If you are not sure, don't touch it.
146#
147config HAVE_BOOTMEM_INFO_NODE
148	def_bool n
149
150# eventually, we can have this option just 'select SPARSEMEM'
151config MEMORY_HOTPLUG
152	bool "Allow for memory hot-add"
153	depends on SPARSEMEM || X86_64_ACPI_NUMA
154	depends on ARCH_ENABLE_MEMORY_HOTPLUG
155
156config MEMORY_HOTPLUG_SPARSE
157	def_bool y
158	depends on SPARSEMEM && MEMORY_HOTPLUG
159
160config MEMORY_HOTPLUG_DEFAULT_ONLINE
161        bool "Online the newly added memory blocks by default"
162        depends on MEMORY_HOTPLUG
163        help
164	  This option sets the default policy setting for memory hotplug
165	  onlining policy (/sys/devices/system/memory/auto_online_blocks) which
166	  determines what happens to newly added memory regions. Policy setting
167	  can always be changed at runtime.
168	  See Documentation/memory-hotplug.txt for more information.
169
170	  Say Y here if you want all hot-plugged memory blocks to appear in
171	  'online' state by default.
172	  Say N here if you want the default policy to keep all hot-plugged
173	  memory blocks in 'offline' state.
174
175config MEMORY_HOTREMOVE
176	bool "Allow for memory hot remove"
177	select MEMORY_ISOLATION
178	select HAVE_BOOTMEM_INFO_NODE if (X86_64 || PPC64)
179	depends on MEMORY_HOTPLUG && ARCH_ENABLE_MEMORY_HOTREMOVE
180	depends on MIGRATION
181
182# Heavily threaded applications may benefit from splitting the mm-wide
183# page_table_lock, so that faults on different parts of the user address
184# space can be handled with less contention: split it at this NR_CPUS.
185# Default to 4 for wider testing, though 8 might be more appropriate.
186# ARM's adjust_pte (unused if VIPT) depends on mm-wide page_table_lock.
187# PA-RISC 7xxx's spinlock_t would enlarge struct page from 32 to 44 bytes.
188# DEBUG_SPINLOCK and DEBUG_LOCK_ALLOC spinlock_t also enlarge struct page.
189#
190config SPLIT_PTLOCK_CPUS
191	int
192	default "999999" if !MMU
193	default "999999" if ARM && !CPU_CACHE_VIPT
194	default "999999" if PARISC && !PA20
195	default "4"
196
197config ARCH_ENABLE_SPLIT_PMD_PTLOCK
198	bool
199
200#
201# support for memory balloon
202config MEMORY_BALLOON
203	bool
204
205#
206# support for memory balloon compaction
207config BALLOON_COMPACTION
208	bool "Allow for balloon memory compaction/migration"
209	def_bool y
210	depends on COMPACTION && MEMORY_BALLOON
211	help
212	  Memory fragmentation introduced by ballooning might reduce
213	  significantly the number of 2MB contiguous memory blocks that can be
214	  used within a guest, thus imposing performance penalties associated
215	  with the reduced number of transparent huge pages that could be used
216	  by the guest workload. Allowing the compaction & migration for memory
217	  pages enlisted as being part of memory balloon devices avoids the
218	  scenario aforementioned and helps improving memory defragmentation.
219
220#
221# support for memory compaction
222config COMPACTION
223	bool "Allow for memory compaction"
224	def_bool y
225	select MIGRATION
226	depends on MMU
227	help
228          Compaction is the only memory management component to form
229          high order (larger physically contiguous) memory blocks
230          reliably. The page allocator relies on compaction heavily and
231          the lack of the feature can lead to unexpected OOM killer
232          invocations for high order memory requests. You shouldn't
233          disable this option unless there really is a strong reason for
234          it and then we would be really interested to hear about that at
235          linux-mm@kvack.org.
236
237#
238# support for page migration
239#
240config MIGRATION
241	bool "Page migration"
242	def_bool y
243	depends on (NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE || COMPACTION || CMA) && MMU
244	help
245	  Allows the migration of the physical location of pages of processes
246	  while the virtual addresses are not changed. This is useful in
247	  two situations. The first is on NUMA systems to put pages nearer
248	  to the processors accessing. The second is when allocating huge
249	  pages as migration can relocate pages to satisfy a huge page
250	  allocation instead of reclaiming.
251
252config ARCH_ENABLE_HUGEPAGE_MIGRATION
253	bool
254
255config ARCH_ENABLE_THP_MIGRATION
256	bool
257
258config CONTIG_ALLOC
259       def_bool (MEMORY_ISOLATION && COMPACTION) || CMA
260
261config PHYS_ADDR_T_64BIT
262	def_bool 64BIT
263
264config BOUNCE
265	bool "Enable bounce buffers"
266	default y
267	depends on BLOCK && MMU && (ZONE_DMA || HIGHMEM)
268	help
269	  Enable bounce buffers for devices that cannot access
270	  the full range of memory available to the CPU. Enabled
271	  by default when ZONE_DMA or HIGHMEM is selected, but you
272	  may say n to override this.
273
274config NR_QUICK
275	int
276	depends on QUICKLIST
277	default "1"
278
279config VIRT_TO_BUS
280	bool
281	help
282	  An architecture should select this if it implements the
283	  deprecated interface virt_to_bus().  All new architectures
284	  should probably not select this.
285
286
287config MMU_NOTIFIER
288	bool
289	select SRCU
290
291config KSM
292	bool "Enable KSM for page merging"
293	depends on MMU
294	select XXHASH
295	help
296	  Enable Kernel Samepage Merging: KSM periodically scans those areas
297	  of an application's address space that an app has advised may be
298	  mergeable.  When it finds pages of identical content, it replaces
299	  the many instances by a single page with that content, so
300	  saving memory until one or another app needs to modify the content.
301	  Recommended for use with KVM, or with other duplicative applications.
302	  See Documentation/vm/ksm.rst for more information: KSM is inactive
303	  until a program has madvised that an area is MADV_MERGEABLE, and
304	  root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
305
306config DEFAULT_MMAP_MIN_ADDR
307        int "Low address space to protect from user allocation"
308	depends on MMU
309        default 4096
310        help
311	  This is the portion of low virtual memory which should be protected
312	  from userspace allocation.  Keeping a user from writing to low pages
313	  can help reduce the impact of kernel NULL pointer bugs.
314
315	  For most ia64, ppc64 and x86 users with lots of address space
316	  a value of 65536 is reasonable and should cause no problems.
317	  On arm and other archs it should not be higher than 32768.
318	  Programs which use vm86 functionality or have some need to map
319	  this low address space will need CAP_SYS_RAWIO or disable this
320	  protection by setting the value to 0.
321
322	  This value can be changed after boot using the
323	  /proc/sys/vm/mmap_min_addr tunable.
324
325config ARCH_SUPPORTS_MEMORY_FAILURE
326	bool
327
328config MEMORY_FAILURE
329	depends on MMU
330	depends on ARCH_SUPPORTS_MEMORY_FAILURE
331	bool "Enable recovery from hardware memory errors"
332	select MEMORY_ISOLATION
333	select RAS
334	help
335	  Enables code to recover from some memory failures on systems
336	  with MCA recovery. This allows a system to continue running
337	  even when some of its memory has uncorrected errors. This requires
338	  special hardware support and typically ECC memory.
339
340config HWPOISON_INJECT
341	tristate "HWPoison pages injector"
342	depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
343	select PROC_PAGE_MONITOR
344
345config NOMMU_INITIAL_TRIM_EXCESS
346	int "Turn on mmap() excess space trimming before booting"
347	depends on !MMU
348	default 1
349	help
350	  The NOMMU mmap() frequently needs to allocate large contiguous chunks
351	  of memory on which to store mappings, but it can only ask the system
352	  allocator for chunks in 2^N*PAGE_SIZE amounts - which is frequently
353	  more than it requires.  To deal with this, mmap() is able to trim off
354	  the excess and return it to the allocator.
355
356	  If trimming is enabled, the excess is trimmed off and returned to the
357	  system allocator, which can cause extra fragmentation, particularly
358	  if there are a lot of transient processes.
359
360	  If trimming is disabled, the excess is kept, but not used, which for
361	  long-term mappings means that the space is wasted.
362
363	  Trimming can be dynamically controlled through a sysctl option
364	  (/proc/sys/vm/nr_trim_pages) which specifies the minimum number of
365	  excess pages there must be before trimming should occur, or zero if
366	  no trimming is to occur.
367
368	  This option specifies the initial value of this option.  The default
369	  of 1 says that all excess pages should be trimmed.
370
371	  See Documentation/nommu-mmap.txt for more information.
372
373config TRANSPARENT_HUGEPAGE
374	bool "Transparent Hugepage Support"
375	depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE
376	select COMPACTION
377	select XARRAY_MULTI
378	help
379	  Transparent Hugepages allows the kernel to use huge pages and
380	  huge tlb transparently to the applications whenever possible.
381	  This feature can improve computing performance to certain
382	  applications by speeding up page faults during memory
383	  allocation, by reducing the number of tlb misses and by speeding
384	  up the pagetable walking.
385
386	  If memory constrained on embedded, you may want to say N.
387
388choice
389	prompt "Transparent Hugepage Support sysfs defaults"
390	depends on TRANSPARENT_HUGEPAGE
391	default TRANSPARENT_HUGEPAGE_ALWAYS
392	help
393	  Selects the sysfs defaults for Transparent Hugepage Support.
394
395	config TRANSPARENT_HUGEPAGE_ALWAYS
396		bool "always"
397	help
398	  Enabling Transparent Hugepage always, can increase the
399	  memory footprint of applications without a guaranteed
400	  benefit but it will work automatically for all applications.
401
402	config TRANSPARENT_HUGEPAGE_MADVISE
403		bool "madvise"
404	help
405	  Enabling Transparent Hugepage madvise, will only provide a
406	  performance improvement benefit to the applications using
407	  madvise(MADV_HUGEPAGE) but it won't risk to increase the
408	  memory footprint of applications without a guaranteed
409	  benefit.
410endchoice
411
412config ARCH_WANTS_THP_SWAP
413       def_bool n
414
415config THP_SWAP
416	def_bool y
417	depends on TRANSPARENT_HUGEPAGE && ARCH_WANTS_THP_SWAP && SWAP
418	help
419	  Swap transparent huge pages in one piece, without splitting.
420	  XXX: For now, swap cluster backing transparent huge page
421	  will be split after swapout.
422
423	  For selection by architectures with reasonable THP sizes.
424
425config	TRANSPARENT_HUGE_PAGECACHE
426	def_bool y
427	depends on TRANSPARENT_HUGEPAGE
428
429#
430# UP and nommu archs use km based percpu allocator
431#
432config NEED_PER_CPU_KM
433	depends on !SMP
434	bool
435	default y
436
437config CLEANCACHE
438	bool "Enable cleancache driver to cache clean pages if tmem is present"
439	help
440	  Cleancache can be thought of as a page-granularity victim cache
441	  for clean pages that the kernel's pageframe replacement algorithm
442	  (PFRA) would like to keep around, but can't since there isn't enough
443	  memory.  So when the PFRA "evicts" a page, it first attempts to use
444	  cleancache code to put the data contained in that page into
445	  "transcendent memory", memory that is not directly accessible or
446	  addressable by the kernel and is of unknown and possibly
447	  time-varying size.  And when a cleancache-enabled
448	  filesystem wishes to access a page in a file on disk, it first
449	  checks cleancache to see if it already contains it; if it does,
450	  the page is copied into the kernel and a disk access is avoided.
451	  When a transcendent memory driver is available (such as zcache or
452	  Xen transcendent memory), a significant I/O reduction
453	  may be achieved.  When none is available, all cleancache calls
454	  are reduced to a single pointer-compare-against-NULL resulting
455	  in a negligible performance hit.
456
457	  If unsure, say Y to enable cleancache
458
459config FRONTSWAP
460	bool "Enable frontswap to cache swap pages if tmem is present"
461	depends on SWAP
462	help
463	  Frontswap is so named because it can be thought of as the opposite
464	  of a "backing" store for a swap device.  The data is stored into
465	  "transcendent memory", memory that is not directly accessible or
466	  addressable by the kernel and is of unknown and possibly
467	  time-varying size.  When space in transcendent memory is available,
468	  a significant swap I/O reduction may be achieved.  When none is
469	  available, all frontswap calls are reduced to a single pointer-
470	  compare-against-NULL resulting in a negligible performance hit
471	  and swap data is stored as normal on the matching swap device.
472
473	  If unsure, say Y to enable frontswap.
474
475config CMA
476	bool "Contiguous Memory Allocator"
477	depends on MMU
478	select MIGRATION
479	select MEMORY_ISOLATION
480	help
481	  This enables the Contiguous Memory Allocator which allows other
482	  subsystems to allocate big physically-contiguous blocks of memory.
483	  CMA reserves a region of memory and allows only movable pages to
484	  be allocated from it. This way, the kernel can use the memory for
485	  pagecache and when a subsystem requests for contiguous area, the
486	  allocated pages are migrated away to serve the contiguous request.
487
488	  If unsure, say "n".
489
490config CMA_DEBUG
491	bool "CMA debug messages (DEVELOPMENT)"
492	depends on DEBUG_KERNEL && CMA
493	help
494	  Turns on debug messages in CMA.  This produces KERN_DEBUG
495	  messages for every CMA call as well as various messages while
496	  processing calls such as dma_alloc_from_contiguous().
497	  This option does not affect warning and error messages.
498
499config CMA_DEBUGFS
500	bool "CMA debugfs interface"
501	depends on CMA && DEBUG_FS
502	help
503	  Turns on the DebugFS interface for CMA.
504
505config CMA_AREAS
506	int "Maximum count of the CMA areas"
507	depends on CMA
508	default 7
509	help
510	  CMA allows to create CMA areas for particular purpose, mainly,
511	  used as device private area. This parameter sets the maximum
512	  number of CMA area in the system.
513
514	  If unsure, leave the default value "7".
515
516config MEM_SOFT_DIRTY
517	bool "Track memory changes"
518	depends on CHECKPOINT_RESTORE && HAVE_ARCH_SOFT_DIRTY && PROC_FS
519	select PROC_PAGE_MONITOR
520	help
521	  This option enables memory changes tracking by introducing a
522	  soft-dirty bit on pte-s. This bit it set when someone writes
523	  into a page just as regular dirty bit, but unlike the latter
524	  it can be cleared by hands.
525
526	  See Documentation/admin-guide/mm/soft-dirty.rst for more details.
527
528config ZSWAP
529	bool "Compressed cache for swap pages (EXPERIMENTAL)"
530	depends on FRONTSWAP && CRYPTO=y
531	select CRYPTO_LZO
532	select ZPOOL
533	help
534	  A lightweight compressed cache for swap pages.  It takes
535	  pages that are in the process of being swapped out and attempts to
536	  compress them into a dynamically allocated RAM-based memory pool.
537	  This can result in a significant I/O reduction on swap device and,
538	  in the case where decompressing from RAM is faster that swap device
539	  reads, can also improve workload performance.
540
541	  This is marked experimental because it is a new feature (as of
542	  v3.11) that interacts heavily with memory reclaim.  While these
543	  interactions don't cause any known issues on simple memory setups,
544	  they have not be fully explored on the large set of potential
545	  configurations and workloads that exist.
546
547config ZPOOL
548	tristate "Common API for compressed memory storage"
549	help
550	  Compressed memory storage API.  This allows using either zbud or
551	  zsmalloc.
552
553config ZBUD
554	tristate "Low (Up to 2x) density storage for compressed pages"
555	help
556	  A special purpose allocator for storing compressed pages.
557	  It is designed to store up to two compressed pages per physical
558	  page.  While this design limits storage density, it has simple and
559	  deterministic reclaim properties that make it preferable to a higher
560	  density approach when reclaim will be used.
561
562config Z3FOLD
563	tristate "Up to 3x density storage for compressed pages"
564	depends on ZPOOL
565	help
566	  A special purpose allocator for storing compressed pages.
567	  It is designed to store up to three compressed pages per physical
568	  page. It is a ZBUD derivative so the simplicity and determinism are
569	  still there.
570
571config ZSMALLOC
572	tristate "Memory allocator for compressed pages"
573	depends on MMU
574	help
575	  zsmalloc is a slab-based memory allocator designed to store
576	  compressed RAM pages.  zsmalloc uses virtual memory mapping
577	  in order to reduce fragmentation.  However, this results in a
578	  non-standard allocator interface where a handle, not a pointer, is
579	  returned by an alloc().  This handle must be mapped in order to
580	  access the allocated space.
581
582config PGTABLE_MAPPING
583	bool "Use page table mapping to access object in zsmalloc"
584	depends on ZSMALLOC
585	help
586	  By default, zsmalloc uses a copy-based object mapping method to
587	  access allocations that span two pages. However, if a particular
588	  architecture (ex, ARM) performs VM mapping faster than copying,
589	  then you should select this. This causes zsmalloc to use page table
590	  mapping rather than copying for object mapping.
591
592	  You can check speed with zsmalloc benchmark:
593	  https://github.com/spartacus06/zsmapbench
594
595config ZSMALLOC_STAT
596	bool "Export zsmalloc statistics"
597	depends on ZSMALLOC
598	select DEBUG_FS
599	help
600	  This option enables code in the zsmalloc to collect various
601	  statistics about whats happening in zsmalloc and exports that
602	  information to userspace via debugfs.
603	  If unsure, say N.
604
605config GENERIC_EARLY_IOREMAP
606	bool
607
608config MAX_STACK_SIZE_MB
609	int "Maximum user stack size for 32-bit processes (MB)"
610	default 80
611	range 8 2048
612	depends on STACK_GROWSUP && (!64BIT || COMPAT)
613	help
614	  This is the maximum stack size in Megabytes in the VM layout of 32-bit
615	  user processes when the stack grows upwards (currently only on parisc
616	  arch). The stack will be located at the highest memory address minus
617	  the given value, unless the RLIMIT_STACK hard limit is changed to a
618	  smaller value in which case that is used.
619
620	  A sane initial value is 80 MB.
621
622config DEFERRED_STRUCT_PAGE_INIT
623	bool "Defer initialisation of struct pages to kthreads"
624	depends on SPARSEMEM
625	depends on !NEED_PER_CPU_KM
626	depends on 64BIT
627	help
628	  Ordinarily all struct pages are initialised during early boot in a
629	  single thread. On very large machines this can take a considerable
630	  amount of time. If this option is set, large machines will bring up
631	  a subset of memmap at boot and then initialise the rest in parallel
632	  by starting one-off "pgdatinitX" kernel thread for each node X. This
633	  has a potential performance impact on processes running early in the
634	  lifetime of the system until these kthreads finish the
635	  initialisation.
636
637config IDLE_PAGE_TRACKING
638	bool "Enable idle page tracking"
639	depends on SYSFS && MMU
640	select PAGE_EXTENSION if !64BIT
641	help
642	  This feature allows to estimate the amount of user pages that have
643	  not been touched during a given period of time. This information can
644	  be useful to tune memory cgroup limits and/or for job placement
645	  within a compute cluster.
646
647	  See Documentation/admin-guide/mm/idle_page_tracking.rst for
648	  more details.
649
650# arch_add_memory() comprehends device memory
651config ARCH_HAS_ZONE_DEVICE
652	bool
653
654config ZONE_DEVICE
655	bool "Device memory (pmem, HMM, etc...) hotplug support"
656	depends on MEMORY_HOTPLUG
657	depends on MEMORY_HOTREMOVE
658	depends on SPARSEMEM_VMEMMAP
659	depends on ARCH_HAS_ZONE_DEVICE
660	select XARRAY_MULTI
661
662	help
663	  Device memory hotplug support allows for establishing pmem,
664	  or other device driver discovered memory regions, in the
665	  memmap. This allows pfn_to_page() lookups of otherwise
666	  "device-physical" addresses which is needed for using a DAX
667	  mapping in an O_DIRECT operation, among other things.
668
669	  If FS_DAX is enabled, then say Y.
670
671config ARCH_HAS_HMM_MIRROR
672	bool
673	default y
674	depends on (X86_64 || PPC64)
675	depends on MMU && 64BIT
676
677config ARCH_HAS_HMM_DEVICE
678	bool
679	default y
680	depends on (X86_64 || PPC64)
681	depends on MEMORY_HOTPLUG
682	depends on MEMORY_HOTREMOVE
683	depends on SPARSEMEM_VMEMMAP
684	depends on ARCH_HAS_ZONE_DEVICE
685	select XARRAY_MULTI
686
687config ARCH_HAS_HMM
688	bool
689	default y
690	depends on (X86_64 || PPC64)
691	depends on ZONE_DEVICE
692	depends on MMU && 64BIT
693	depends on MEMORY_HOTPLUG
694	depends on MEMORY_HOTREMOVE
695	depends on SPARSEMEM_VMEMMAP
696
697config MIGRATE_VMA_HELPER
698	bool
699
700config DEV_PAGEMAP_OPS
701	bool
702
703config HMM
704	bool
705	select MMU_NOTIFIER
706	select MIGRATE_VMA_HELPER
707
708config HMM_MIRROR
709	bool "HMM mirror CPU page table into a device page table"
710	depends on ARCH_HAS_HMM
711	select HMM
712	help
713	  Select HMM_MIRROR if you want to mirror range of the CPU page table of a
714	  process into a device page table. Here, mirror means "keep synchronized".
715	  Prerequisites: the device must provide the ability to write-protect its
716	  page tables (at PAGE_SIZE granularity), and must be able to recover from
717	  the resulting potential page faults.
718
719config DEVICE_PRIVATE
720	bool "Unaddressable device memory (GPU memory, ...)"
721	depends on ARCH_HAS_HMM
722	select HMM
723	select DEV_PAGEMAP_OPS
724
725	help
726	  Allows creation of struct pages to represent unaddressable device
727	  memory; i.e., memory that is only accessible from the device (or
728	  group of devices). You likely also want to select HMM_MIRROR.
729
730config DEVICE_PUBLIC
731	bool "Addressable device memory (like GPU memory)"
732	depends on ARCH_HAS_HMM
733	select HMM
734	select DEV_PAGEMAP_OPS
735
736	help
737	  Allows creation of struct pages to represent addressable device
738	  memory; i.e., memory that is accessible from both the device and
739	  the CPU
740
741config FRAME_VECTOR
742	bool
743
744config ARCH_USES_HIGH_VMA_FLAGS
745	bool
746config ARCH_HAS_PKEYS
747	bool
748
749config PERCPU_STATS
750	bool "Collect percpu memory statistics"
751	help
752	  This feature collects and exposes statistics via debugfs. The
753	  information includes global and per chunk statistics, which can
754	  be used to help understand percpu memory usage.
755
756config GUP_BENCHMARK
757	bool "Enable infrastructure for get_user_pages_fast() benchmarking"
758	help
759	  Provides /sys/kernel/debug/gup_benchmark that helps with testing
760	  performance of get_user_pages_fast().
761
762	  See tools/testing/selftests/vm/gup_benchmark.c
763
764config ARCH_HAS_PTE_SPECIAL
765	bool
766
767endmenu
768