xref: /openbmc/linux/include/trace/events/kmem.h (revision 0153682e)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #undef TRACE_SYSTEM
3 #define TRACE_SYSTEM kmem
4 
5 #if !defined(_TRACE_KMEM_H) || defined(TRACE_HEADER_MULTI_READ)
6 #define _TRACE_KMEM_H
7 
8 #include <linux/types.h>
9 #include <linux/tracepoint.h>
10 #include <trace/events/mmflags.h>
11 
12 DECLARE_EVENT_CLASS(kmem_alloc,
13 
14 	TP_PROTO(unsigned long call_site,
15 		 const void *ptr,
16 		 struct kmem_cache *s,
17 		 size_t bytes_req,
18 		 size_t bytes_alloc,
19 		 gfp_t gfp_flags),
20 
21 	TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags),
22 
23 	TP_STRUCT__entry(
24 		__field(	unsigned long,	call_site	)
25 		__field(	const void *,	ptr		)
26 		__field(	size_t,		bytes_req	)
27 		__field(	size_t,		bytes_alloc	)
28 		__field(	unsigned long,	gfp_flags	)
29 		__field(	bool,		accounted	)
30 	),
31 
32 	TP_fast_assign(
33 		__entry->call_site	= call_site;
34 		__entry->ptr		= ptr;
35 		__entry->bytes_req	= bytes_req;
36 		__entry->bytes_alloc	= bytes_alloc;
37 		__entry->gfp_flags	= (__force unsigned long)gfp_flags;
38 		__entry->accounted	= IS_ENABLED(CONFIG_MEMCG_KMEM) ?
39 					  ((gfp_flags & __GFP_ACCOUNT) ||
40 					  (s && s->flags & SLAB_ACCOUNT)) : false;
41 	),
42 
43 	TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s accounted=%s",
44 		(void *)__entry->call_site,
45 		__entry->ptr,
46 		__entry->bytes_req,
47 		__entry->bytes_alloc,
48 		show_gfp_flags(__entry->gfp_flags),
49 		__entry->accounted ? "true" : "false")
50 );
51 
52 DEFINE_EVENT(kmem_alloc, kmalloc,
53 
54 	TP_PROTO(unsigned long call_site, const void *ptr, struct kmem_cache *s,
55 		 size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
56 
57 	TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags)
58 );
59 
60 DEFINE_EVENT(kmem_alloc, kmem_cache_alloc,
61 
62 	TP_PROTO(unsigned long call_site, const void *ptr, struct kmem_cache *s,
63 		 size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
64 
65 	TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags)
66 );
67 
68 DECLARE_EVENT_CLASS(kmem_alloc_node,
69 
70 	TP_PROTO(unsigned long call_site,
71 		 const void *ptr,
72 		 struct kmem_cache *s,
73 		 size_t bytes_req,
74 		 size_t bytes_alloc,
75 		 gfp_t gfp_flags,
76 		 int node),
77 
78 	TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags, node),
79 
80 	TP_STRUCT__entry(
81 		__field(	unsigned long,	call_site	)
82 		__field(	const void *,	ptr		)
83 		__field(	size_t,		bytes_req	)
84 		__field(	size_t,		bytes_alloc	)
85 		__field(	unsigned long,	gfp_flags	)
86 		__field(	int,		node		)
87 		__field(	bool,		accounted	)
88 	),
89 
90 	TP_fast_assign(
91 		__entry->call_site	= call_site;
92 		__entry->ptr		= ptr;
93 		__entry->bytes_req	= bytes_req;
94 		__entry->bytes_alloc	= bytes_alloc;
95 		__entry->gfp_flags	= (__force unsigned long)gfp_flags;
96 		__entry->node		= node;
97 		__entry->accounted	= IS_ENABLED(CONFIG_MEMCG_KMEM) ?
98 					  ((gfp_flags & __GFP_ACCOUNT) ||
99 					  (s && s->flags & SLAB_ACCOUNT)) : false;
100 	),
101 
102 	TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d accounted=%s",
103 		(void *)__entry->call_site,
104 		__entry->ptr,
105 		__entry->bytes_req,
106 		__entry->bytes_alloc,
107 		show_gfp_flags(__entry->gfp_flags),
108 		__entry->node,
109 		__entry->accounted ? "true" : "false")
110 );
111 
112 DEFINE_EVENT(kmem_alloc_node, kmalloc_node,
113 
114 	TP_PROTO(unsigned long call_site, const void *ptr,
115 		 struct kmem_cache *s, size_t bytes_req, size_t bytes_alloc,
116 		 gfp_t gfp_flags, int node),
117 
118 	TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags, node)
119 );
120 
121 DEFINE_EVENT(kmem_alloc_node, kmem_cache_alloc_node,
122 
123 	TP_PROTO(unsigned long call_site, const void *ptr,
124 		 struct kmem_cache *s, size_t bytes_req, size_t bytes_alloc,
125 		 gfp_t gfp_flags, int node),
126 
127 	TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags, node)
128 );
129 
130 TRACE_EVENT(kfree,
131 
132 	TP_PROTO(unsigned long call_site, const void *ptr),
133 
134 	TP_ARGS(call_site, ptr),
135 
136 	TP_STRUCT__entry(
137 		__field(	unsigned long,	call_site	)
138 		__field(	const void *,	ptr		)
139 	),
140 
141 	TP_fast_assign(
142 		__entry->call_site	= call_site;
143 		__entry->ptr		= ptr;
144 	),
145 
146 	TP_printk("call_site=%pS ptr=%p",
147 		  (void *)__entry->call_site, __entry->ptr)
148 );
149 
150 TRACE_EVENT(kmem_cache_free,
151 
152 	TP_PROTO(unsigned long call_site, const void *ptr, const char *name),
153 
154 	TP_ARGS(call_site, ptr, name),
155 
156 	TP_STRUCT__entry(
157 		__field(	unsigned long,	call_site	)
158 		__field(	const void *,	ptr		)
159 		__string(	name,	name	)
160 	),
161 
162 	TP_fast_assign(
163 		__entry->call_site	= call_site;
164 		__entry->ptr		= ptr;
165 		__assign_str(name, name);
166 	),
167 
168 	TP_printk("call_site=%pS ptr=%p name=%s",
169 		  (void *)__entry->call_site, __entry->ptr, __get_str(name))
170 );
171 
172 TRACE_EVENT(mm_page_free,
173 
174 	TP_PROTO(struct page *page, unsigned int order),
175 
176 	TP_ARGS(page, order),
177 
178 	TP_STRUCT__entry(
179 		__field(	unsigned long,	pfn		)
180 		__field(	unsigned int,	order		)
181 	),
182 
183 	TP_fast_assign(
184 		__entry->pfn		= page_to_pfn(page);
185 		__entry->order		= order;
186 	),
187 
188 	TP_printk("page=%p pfn=0x%lx order=%d",
189 			pfn_to_page(__entry->pfn),
190 			__entry->pfn,
191 			__entry->order)
192 );
193 
194 TRACE_EVENT(mm_page_free_batched,
195 
196 	TP_PROTO(struct page *page),
197 
198 	TP_ARGS(page),
199 
200 	TP_STRUCT__entry(
201 		__field(	unsigned long,	pfn		)
202 	),
203 
204 	TP_fast_assign(
205 		__entry->pfn		= page_to_pfn(page);
206 	),
207 
208 	TP_printk("page=%p pfn=0x%lx order=0",
209 			pfn_to_page(__entry->pfn),
210 			__entry->pfn)
211 );
212 
213 TRACE_EVENT(mm_page_alloc,
214 
215 	TP_PROTO(struct page *page, unsigned int order,
216 			gfp_t gfp_flags, int migratetype),
217 
218 	TP_ARGS(page, order, gfp_flags, migratetype),
219 
220 	TP_STRUCT__entry(
221 		__field(	unsigned long,	pfn		)
222 		__field(	unsigned int,	order		)
223 		__field(	unsigned long,	gfp_flags	)
224 		__field(	int,		migratetype	)
225 	),
226 
227 	TP_fast_assign(
228 		__entry->pfn		= page ? page_to_pfn(page) : -1UL;
229 		__entry->order		= order;
230 		__entry->gfp_flags	= (__force unsigned long)gfp_flags;
231 		__entry->migratetype	= migratetype;
232 	),
233 
234 	TP_printk("page=%p pfn=0x%lx order=%d migratetype=%d gfp_flags=%s",
235 		__entry->pfn != -1UL ? pfn_to_page(__entry->pfn) : NULL,
236 		__entry->pfn != -1UL ? __entry->pfn : 0,
237 		__entry->order,
238 		__entry->migratetype,
239 		show_gfp_flags(__entry->gfp_flags))
240 );
241 
242 DECLARE_EVENT_CLASS(mm_page,
243 
244 	TP_PROTO(struct page *page, unsigned int order, int migratetype,
245 		 int percpu_refill),
246 
247 	TP_ARGS(page, order, migratetype, percpu_refill),
248 
249 	TP_STRUCT__entry(
250 		__field(	unsigned long,	pfn		)
251 		__field(	unsigned int,	order		)
252 		__field(	int,		migratetype	)
253 		__field(	int,		percpu_refill	)
254 	),
255 
256 	TP_fast_assign(
257 		__entry->pfn		= page ? page_to_pfn(page) : -1UL;
258 		__entry->order		= order;
259 		__entry->migratetype	= migratetype;
260 		__entry->percpu_refill	= percpu_refill;
261 	),
262 
263 	TP_printk("page=%p pfn=0x%lx order=%u migratetype=%d percpu_refill=%d",
264 		__entry->pfn != -1UL ? pfn_to_page(__entry->pfn) : NULL,
265 		__entry->pfn != -1UL ? __entry->pfn : 0,
266 		__entry->order,
267 		__entry->migratetype,
268 		__entry->percpu_refill)
269 );
270 
271 DEFINE_EVENT(mm_page, mm_page_alloc_zone_locked,
272 
273 	TP_PROTO(struct page *page, unsigned int order, int migratetype,
274 		 int percpu_refill),
275 
276 	TP_ARGS(page, order, migratetype, percpu_refill)
277 );
278 
279 TRACE_EVENT(mm_page_pcpu_drain,
280 
281 	TP_PROTO(struct page *page, unsigned int order, int migratetype),
282 
283 	TP_ARGS(page, order, migratetype),
284 
285 	TP_STRUCT__entry(
286 		__field(	unsigned long,	pfn		)
287 		__field(	unsigned int,	order		)
288 		__field(	int,		migratetype	)
289 	),
290 
291 	TP_fast_assign(
292 		__entry->pfn		= page ? page_to_pfn(page) : -1UL;
293 		__entry->order		= order;
294 		__entry->migratetype	= migratetype;
295 	),
296 
297 	TP_printk("page=%p pfn=0x%lx order=%d migratetype=%d",
298 		pfn_to_page(__entry->pfn), __entry->pfn,
299 		__entry->order, __entry->migratetype)
300 );
301 
302 TRACE_EVENT(mm_page_alloc_extfrag,
303 
304 	TP_PROTO(struct page *page,
305 		int alloc_order, int fallback_order,
306 		int alloc_migratetype, int fallback_migratetype),
307 
308 	TP_ARGS(page,
309 		alloc_order, fallback_order,
310 		alloc_migratetype, fallback_migratetype),
311 
312 	TP_STRUCT__entry(
313 		__field(	unsigned long,	pfn			)
314 		__field(	int,		alloc_order		)
315 		__field(	int,		fallback_order		)
316 		__field(	int,		alloc_migratetype	)
317 		__field(	int,		fallback_migratetype	)
318 		__field(	int,		change_ownership	)
319 	),
320 
321 	TP_fast_assign(
322 		__entry->pfn			= page_to_pfn(page);
323 		__entry->alloc_order		= alloc_order;
324 		__entry->fallback_order		= fallback_order;
325 		__entry->alloc_migratetype	= alloc_migratetype;
326 		__entry->fallback_migratetype	= fallback_migratetype;
327 		__entry->change_ownership	= (alloc_migratetype ==
328 					get_pageblock_migratetype(page));
329 	),
330 
331 	TP_printk("page=%p pfn=0x%lx alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d",
332 		pfn_to_page(__entry->pfn),
333 		__entry->pfn,
334 		__entry->alloc_order,
335 		__entry->fallback_order,
336 		pageblock_order,
337 		__entry->alloc_migratetype,
338 		__entry->fallback_migratetype,
339 		__entry->fallback_order < pageblock_order,
340 		__entry->change_ownership)
341 );
342 
343 /*
344  * Required for uniquely and securely identifying mm in rss_stat tracepoint.
345  */
346 #ifndef __PTR_TO_HASHVAL
347 static unsigned int __maybe_unused mm_ptr_to_hash(const void *ptr)
348 {
349 	int ret;
350 	unsigned long hashval;
351 
352 	ret = ptr_to_hashval(ptr, &hashval);
353 	if (ret)
354 		return 0;
355 
356 	/* The hashed value is only 32-bit */
357 	return (unsigned int)hashval;
358 }
359 #define __PTR_TO_HASHVAL
360 #endif
361 
362 #define TRACE_MM_PAGES		\
363 	EM(MM_FILEPAGES)	\
364 	EM(MM_ANONPAGES)	\
365 	EM(MM_SWAPENTS)		\
366 	EMe(MM_SHMEMPAGES)
367 
368 #undef EM
369 #undef EMe
370 
371 #define EM(a)	TRACE_DEFINE_ENUM(a);
372 #define EMe(a)	TRACE_DEFINE_ENUM(a);
373 
374 TRACE_MM_PAGES
375 
376 #undef EM
377 #undef EMe
378 
379 #define EM(a)	{ a, #a },
380 #define EMe(a)	{ a, #a }
381 
382 TRACE_EVENT(rss_stat,
383 
384 	TP_PROTO(struct mm_struct *mm,
385 		int member,
386 		long count),
387 
388 	TP_ARGS(mm, member, count),
389 
390 	TP_STRUCT__entry(
391 		__field(unsigned int, mm_id)
392 		__field(unsigned int, curr)
393 		__field(int, member)
394 		__field(long, size)
395 	),
396 
397 	TP_fast_assign(
398 		__entry->mm_id = mm_ptr_to_hash(mm);
399 		__entry->curr = !!(current->mm == mm);
400 		__entry->member = member;
401 		__entry->size = (count << PAGE_SHIFT);
402 	),
403 
404 	TP_printk("mm_id=%u curr=%d type=%s size=%ldB",
405 		__entry->mm_id,
406 		__entry->curr,
407 		__print_symbolic(__entry->member, TRACE_MM_PAGES),
408 		__entry->size)
409 	);
410 #endif /* _TRACE_KMEM_H */
411 
412 /* This part must be outside protection */
413 #include <trace/define_trace.h>
414