xref: /openbmc/linux/include/trace/events/mmflags.h (revision 94588c1b)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/node.h>
3 #include <linux/mmzone.h>
4 #include <linux/compaction.h>
5 /*
6  * The order of these masks is important. Matching masks will be seen
7  * first and the left over flags will end up showing by themselves.
8  *
9  * For example, if we have GFP_KERNEL before GFP_USER we wil get:
10  *
11  *  GFP_KERNEL|GFP_HARDWALL
12  *
13  * Thus most bits set go first.
14  */
15 
16 #define __def_gfpflag_names						\
17 	{(unsigned long)GFP_TRANSHUGE,		"GFP_TRANSHUGE"},	\
18 	{(unsigned long)GFP_TRANSHUGE_LIGHT,	"GFP_TRANSHUGE_LIGHT"}, \
19 	{(unsigned long)GFP_HIGHUSER_MOVABLE,	"GFP_HIGHUSER_MOVABLE"},\
20 	{(unsigned long)GFP_HIGHUSER,		"GFP_HIGHUSER"},	\
21 	{(unsigned long)GFP_USER,		"GFP_USER"},		\
22 	{(unsigned long)GFP_KERNEL_ACCOUNT,	"GFP_KERNEL_ACCOUNT"},	\
23 	{(unsigned long)GFP_KERNEL,		"GFP_KERNEL"},		\
24 	{(unsigned long)GFP_NOFS,		"GFP_NOFS"},		\
25 	{(unsigned long)GFP_ATOMIC,		"GFP_ATOMIC"},		\
26 	{(unsigned long)GFP_NOIO,		"GFP_NOIO"},		\
27 	{(unsigned long)GFP_NOWAIT,		"GFP_NOWAIT"},		\
28 	{(unsigned long)GFP_DMA,		"GFP_DMA"},		\
29 	{(unsigned long)__GFP_HIGHMEM,		"__GFP_HIGHMEM"},	\
30 	{(unsigned long)GFP_DMA32,		"GFP_DMA32"},		\
31 	{(unsigned long)__GFP_HIGH,		"__GFP_HIGH"},		\
32 	{(unsigned long)__GFP_ATOMIC,		"__GFP_ATOMIC"},	\
33 	{(unsigned long)__GFP_IO,		"__GFP_IO"},		\
34 	{(unsigned long)__GFP_FS,		"__GFP_FS"},		\
35 	{(unsigned long)__GFP_NOWARN,		"__GFP_NOWARN"},	\
36 	{(unsigned long)__GFP_RETRY_MAYFAIL,	"__GFP_RETRY_MAYFAIL"},	\
37 	{(unsigned long)__GFP_NOFAIL,		"__GFP_NOFAIL"},	\
38 	{(unsigned long)__GFP_NORETRY,		"__GFP_NORETRY"},	\
39 	{(unsigned long)__GFP_COMP,		"__GFP_COMP"},		\
40 	{(unsigned long)__GFP_ZERO,		"__GFP_ZERO"},		\
41 	{(unsigned long)__GFP_NOMEMALLOC,	"__GFP_NOMEMALLOC"},	\
42 	{(unsigned long)__GFP_MEMALLOC,		"__GFP_MEMALLOC"},	\
43 	{(unsigned long)__GFP_HARDWALL,		"__GFP_HARDWALL"},	\
44 	{(unsigned long)__GFP_THISNODE,		"__GFP_THISNODE"},	\
45 	{(unsigned long)__GFP_RECLAIMABLE,	"__GFP_RECLAIMABLE"},	\
46 	{(unsigned long)__GFP_MOVABLE,		"__GFP_MOVABLE"},	\
47 	{(unsigned long)__GFP_ACCOUNT,		"__GFP_ACCOUNT"},	\
48 	{(unsigned long)__GFP_WRITE,		"__GFP_WRITE"},		\
49 	{(unsigned long)__GFP_RECLAIM,		"__GFP_RECLAIM"},	\
50 	{(unsigned long)__GFP_DIRECT_RECLAIM,	"__GFP_DIRECT_RECLAIM"},\
51 	{(unsigned long)__GFP_KSWAPD_RECLAIM,	"__GFP_KSWAPD_RECLAIM"}\
52 
53 #define show_gfp_flags(flags)						\
54 	(flags) ? __print_flags(flags, "|",				\
55 	__def_gfpflag_names						\
56 	) : "none"
57 
58 #ifdef CONFIG_MMU
59 #define IF_HAVE_PG_MLOCK(flag,string) ,{1UL << flag, string}
60 #else
61 #define IF_HAVE_PG_MLOCK(flag,string)
62 #endif
63 
64 #ifdef CONFIG_ARCH_USES_PG_UNCACHED
65 #define IF_HAVE_PG_UNCACHED(flag,string) ,{1UL << flag, string}
66 #else
67 #define IF_HAVE_PG_UNCACHED(flag,string)
68 #endif
69 
70 #ifdef CONFIG_MEMORY_FAILURE
71 #define IF_HAVE_PG_HWPOISON(flag,string) ,{1UL << flag, string}
72 #else
73 #define IF_HAVE_PG_HWPOISON(flag,string)
74 #endif
75 
76 #if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
77 #define IF_HAVE_PG_IDLE(flag,string) ,{1UL << flag, string}
78 #else
79 #define IF_HAVE_PG_IDLE(flag,string)
80 #endif
81 
82 #ifdef CONFIG_64BIT
83 #define IF_HAVE_PG_ARCH_2(flag,string) ,{1UL << flag, string}
84 #else
85 #define IF_HAVE_PG_ARCH_2(flag,string)
86 #endif
87 
88 #define __def_pageflag_names						\
89 	{1UL << PG_locked,		"locked"	},		\
90 	{1UL << PG_waiters,		"waiters"	},		\
91 	{1UL << PG_error,		"error"		},		\
92 	{1UL << PG_referenced,		"referenced"	},		\
93 	{1UL << PG_uptodate,		"uptodate"	},		\
94 	{1UL << PG_dirty,		"dirty"		},		\
95 	{1UL << PG_lru,			"lru"		},		\
96 	{1UL << PG_active,		"active"	},		\
97 	{1UL << PG_workingset,		"workingset"	},		\
98 	{1UL << PG_slab,		"slab"		},		\
99 	{1UL << PG_owner_priv_1,	"owner_priv_1"	},		\
100 	{1UL << PG_arch_1,		"arch_1"	},		\
101 	{1UL << PG_reserved,		"reserved"	},		\
102 	{1UL << PG_private,		"private"	},		\
103 	{1UL << PG_private_2,		"private_2"	},		\
104 	{1UL << PG_writeback,		"writeback"	},		\
105 	{1UL << PG_head,		"head"		},		\
106 	{1UL << PG_mappedtodisk,	"mappedtodisk"	},		\
107 	{1UL << PG_reclaim,		"reclaim"	},		\
108 	{1UL << PG_swapbacked,		"swapbacked"	},		\
109 	{1UL << PG_unevictable,		"unevictable"	}		\
110 IF_HAVE_PG_MLOCK(PG_mlocked,		"mlocked"	)		\
111 IF_HAVE_PG_UNCACHED(PG_uncached,	"uncached"	)		\
112 IF_HAVE_PG_HWPOISON(PG_hwpoison,	"hwpoison"	)		\
113 IF_HAVE_PG_IDLE(PG_young,		"young"		)		\
114 IF_HAVE_PG_IDLE(PG_idle,		"idle"		)		\
115 IF_HAVE_PG_ARCH_2(PG_arch_2,		"arch_2"	)
116 
117 #define show_page_flags(flags)						\
118 	(flags) ? __print_flags(flags, "|",				\
119 	__def_pageflag_names						\
120 	) : "none"
121 
122 #if defined(CONFIG_X86)
123 #define __VM_ARCH_SPECIFIC_1 {VM_PAT,     "pat"           }
124 #elif defined(CONFIG_PPC)
125 #define __VM_ARCH_SPECIFIC_1 {VM_SAO,     "sao"           }
126 #elif defined(CONFIG_PARISC) || defined(CONFIG_IA64)
127 #define __VM_ARCH_SPECIFIC_1 {VM_GROWSUP,	"growsup"	}
128 #elif !defined(CONFIG_MMU)
129 #define __VM_ARCH_SPECIFIC_1 {VM_MAPPED_COPY,"mappedcopy"	}
130 #else
131 #define __VM_ARCH_SPECIFIC_1 {VM_ARCH_1,	"arch_1"	}
132 #endif
133 
134 #ifdef CONFIG_MEM_SOFT_DIRTY
135 #define IF_HAVE_VM_SOFTDIRTY(flag,name) {flag, name },
136 #else
137 #define IF_HAVE_VM_SOFTDIRTY(flag,name)
138 #endif
139 
140 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
141 # define IF_HAVE_UFFD_MINOR(flag, name) {flag, name},
142 #else
143 # define IF_HAVE_UFFD_MINOR(flag, name)
144 #endif
145 
146 #define __def_vmaflag_names						\
147 	{VM_READ,			"read"		},		\
148 	{VM_WRITE,			"write"		},		\
149 	{VM_EXEC,			"exec"		},		\
150 	{VM_SHARED,			"shared"	},		\
151 	{VM_MAYREAD,			"mayread"	},		\
152 	{VM_MAYWRITE,			"maywrite"	},		\
153 	{VM_MAYEXEC,			"mayexec"	},		\
154 	{VM_MAYSHARE,			"mayshare"	},		\
155 	{VM_GROWSDOWN,			"growsdown"	},		\
156 	{VM_UFFD_MISSING,		"uffd_missing"	},		\
157 IF_HAVE_UFFD_MINOR(VM_UFFD_MINOR,	"uffd_minor"	)		\
158 	{VM_PFNMAP,			"pfnmap"	},		\
159 	{VM_DENYWRITE,			"denywrite"	},		\
160 	{VM_UFFD_WP,			"uffd_wp"	},		\
161 	{VM_LOCKED,			"locked"	},		\
162 	{VM_IO,				"io"		},		\
163 	{VM_SEQ_READ,			"seqread"	},		\
164 	{VM_RAND_READ,			"randread"	},		\
165 	{VM_DONTCOPY,			"dontcopy"	},		\
166 	{VM_DONTEXPAND,			"dontexpand"	},		\
167 	{VM_LOCKONFAULT,		"lockonfault"	},		\
168 	{VM_ACCOUNT,			"account"	},		\
169 	{VM_NORESERVE,			"noreserve"	},		\
170 	{VM_HUGETLB,			"hugetlb"	},		\
171 	{VM_SYNC,			"sync"		},		\
172 	__VM_ARCH_SPECIFIC_1				,		\
173 	{VM_WIPEONFORK,			"wipeonfork"	},		\
174 	{VM_DONTDUMP,			"dontdump"	},		\
175 IF_HAVE_VM_SOFTDIRTY(VM_SOFTDIRTY,	"softdirty"	)		\
176 	{VM_MIXEDMAP,			"mixedmap"	},		\
177 	{VM_HUGEPAGE,			"hugepage"	},		\
178 	{VM_NOHUGEPAGE,			"nohugepage"	},		\
179 	{VM_MERGEABLE,			"mergeable"	}		\
180 
181 #define show_vma_flags(flags)						\
182 	(flags) ? __print_flags(flags, "|",				\
183 	__def_vmaflag_names						\
184 	) : "none"
185 
186 #ifdef CONFIG_COMPACTION
187 #define COMPACTION_STATUS					\
188 	EM( COMPACT_SKIPPED,		"skipped")		\
189 	EM( COMPACT_DEFERRED,		"deferred")		\
190 	EM( COMPACT_CONTINUE,		"continue")		\
191 	EM( COMPACT_SUCCESS,		"success")		\
192 	EM( COMPACT_PARTIAL_SKIPPED,	"partial_skipped")	\
193 	EM( COMPACT_COMPLETE,		"complete")		\
194 	EM( COMPACT_NO_SUITABLE_PAGE,	"no_suitable_page")	\
195 	EM( COMPACT_NOT_SUITABLE_ZONE,	"not_suitable_zone")	\
196 	EMe(COMPACT_CONTENDED,		"contended")
197 
198 /* High-level compaction status feedback */
199 #define COMPACTION_FAILED	1
200 #define COMPACTION_WITHDRAWN	2
201 #define COMPACTION_PROGRESS	3
202 
203 #define compact_result_to_feedback(result)	\
204 ({						\
205 	enum compact_result __result = result;	\
206 	(compaction_failed(__result)) ? COMPACTION_FAILED : \
207 		(compaction_withdrawn(__result)) ? COMPACTION_WITHDRAWN : COMPACTION_PROGRESS; \
208 })
209 
210 #define COMPACTION_FEEDBACK		\
211 	EM(COMPACTION_FAILED,		"failed")	\
212 	EM(COMPACTION_WITHDRAWN,	"withdrawn")	\
213 	EMe(COMPACTION_PROGRESS,	"progress")
214 
215 #define COMPACTION_PRIORITY						\
216 	EM(COMPACT_PRIO_SYNC_FULL,	"COMPACT_PRIO_SYNC_FULL")	\
217 	EM(COMPACT_PRIO_SYNC_LIGHT,	"COMPACT_PRIO_SYNC_LIGHT")	\
218 	EMe(COMPACT_PRIO_ASYNC,		"COMPACT_PRIO_ASYNC")
219 #else
220 #define COMPACTION_STATUS
221 #define COMPACTION_PRIORITY
222 #define COMPACTION_FEEDBACK
223 #endif
224 
225 #ifdef CONFIG_ZONE_DMA
226 #define IFDEF_ZONE_DMA(X) X
227 #else
228 #define IFDEF_ZONE_DMA(X)
229 #endif
230 
231 #ifdef CONFIG_ZONE_DMA32
232 #define IFDEF_ZONE_DMA32(X) X
233 #else
234 #define IFDEF_ZONE_DMA32(X)
235 #endif
236 
237 #ifdef CONFIG_HIGHMEM
238 #define IFDEF_ZONE_HIGHMEM(X) X
239 #else
240 #define IFDEF_ZONE_HIGHMEM(X)
241 #endif
242 
243 #define ZONE_TYPE						\
244 	IFDEF_ZONE_DMA(		EM (ZONE_DMA,	 "DMA"))	\
245 	IFDEF_ZONE_DMA32(	EM (ZONE_DMA32,	 "DMA32"))	\
246 				EM (ZONE_NORMAL, "Normal")	\
247 	IFDEF_ZONE_HIGHMEM(	EM (ZONE_HIGHMEM,"HighMem"))	\
248 				EMe(ZONE_MOVABLE,"Movable")
249 
250 #define LRU_NAMES		\
251 		EM (LRU_INACTIVE_ANON, "inactive_anon") \
252 		EM (LRU_ACTIVE_ANON, "active_anon") \
253 		EM (LRU_INACTIVE_FILE, "inactive_file") \
254 		EM (LRU_ACTIVE_FILE, "active_file") \
255 		EMe(LRU_UNEVICTABLE, "unevictable")
256 
257 /*
258  * First define the enums in the above macros to be exported to userspace
259  * via TRACE_DEFINE_ENUM().
260  */
261 #undef EM
262 #undef EMe
263 #define EM(a, b)	TRACE_DEFINE_ENUM(a);
264 #define EMe(a, b)	TRACE_DEFINE_ENUM(a);
265 
266 COMPACTION_STATUS
267 COMPACTION_PRIORITY
268 /* COMPACTION_FEEDBACK are defines not enums. Not needed here. */
269 ZONE_TYPE
270 LRU_NAMES
271 
272 /*
273  * Now redefine the EM() and EMe() macros to map the enums to the strings
274  * that will be printed in the output.
275  */
276 #undef EM
277 #undef EMe
278 #define EM(a, b)	{a, b},
279 #define EMe(a, b)	{a, b}
280