1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #undef TRACE_SYSTEM 3 #define TRACE_SYSTEM kmem 4 5 #if !defined(_TRACE_KMEM_H) || defined(TRACE_HEADER_MULTI_READ) 6 #define _TRACE_KMEM_H 7 8 #include <linux/types.h> 9 #include <linux/tracepoint.h> 10 #include <trace/events/mmflags.h> 11 12 DECLARE_EVENT_CLASS(kmem_alloc, 13 14 TP_PROTO(unsigned long call_site, 15 const void *ptr, 16 size_t bytes_req, 17 size_t bytes_alloc, 18 gfp_t gfp_flags), 19 20 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags), 21 22 TP_STRUCT__entry( 23 __field( unsigned long, call_site ) 24 __field( const void *, ptr ) 25 __field( size_t, bytes_req ) 26 __field( size_t, bytes_alloc ) 27 __field( gfp_t, gfp_flags ) 28 ), 29 30 TP_fast_assign( 31 __entry->call_site = call_site; 32 __entry->ptr = ptr; 33 __entry->bytes_req = bytes_req; 34 __entry->bytes_alloc = bytes_alloc; 35 __entry->gfp_flags = gfp_flags; 36 ), 37 38 TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s", 39 (void *)__entry->call_site, 40 __entry->ptr, 41 __entry->bytes_req, 42 __entry->bytes_alloc, 43 show_gfp_flags(__entry->gfp_flags)) 44 ); 45 46 DEFINE_EVENT(kmem_alloc, kmalloc, 47 48 TP_PROTO(unsigned long call_site, const void *ptr, 49 size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags), 50 51 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags) 52 ); 53 54 DEFINE_EVENT(kmem_alloc, kmem_cache_alloc, 55 56 TP_PROTO(unsigned long call_site, const void *ptr, 57 size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags), 58 59 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags) 60 ); 61 62 DECLARE_EVENT_CLASS(kmem_alloc_node, 63 64 TP_PROTO(unsigned long call_site, 65 const void *ptr, 66 size_t bytes_req, 67 size_t bytes_alloc, 68 gfp_t gfp_flags, 69 int node), 70 71 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node), 72 73 TP_STRUCT__entry( 74 __field( unsigned long, call_site ) 75 __field( const void *, ptr ) 76 __field( size_t, bytes_req ) 77 __field( size_t, bytes_alloc ) 78 __field( gfp_t, gfp_flags ) 79 __field( int, node ) 80 ), 81 82 TP_fast_assign( 83 __entry->call_site = call_site; 84 __entry->ptr = ptr; 85 __entry->bytes_req = bytes_req; 86 __entry->bytes_alloc = bytes_alloc; 87 __entry->gfp_flags = gfp_flags; 88 __entry->node = node; 89 ), 90 91 TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d", 92 (void *)__entry->call_site, 93 __entry->ptr, 94 __entry->bytes_req, 95 __entry->bytes_alloc, 96 show_gfp_flags(__entry->gfp_flags), 97 __entry->node) 98 ); 99 100 DEFINE_EVENT(kmem_alloc_node, kmalloc_node, 101 102 TP_PROTO(unsigned long call_site, const void *ptr, 103 size_t bytes_req, size_t bytes_alloc, 104 gfp_t gfp_flags, int node), 105 106 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node) 107 ); 108 109 DEFINE_EVENT(kmem_alloc_node, kmem_cache_alloc_node, 110 111 TP_PROTO(unsigned long call_site, const void *ptr, 112 size_t bytes_req, size_t bytes_alloc, 113 gfp_t gfp_flags, int node), 114 115 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node) 116 ); 117 118 TRACE_EVENT(kfree, 119 120 TP_PROTO(unsigned long call_site, const void *ptr), 121 122 TP_ARGS(call_site, ptr), 123 124 TP_STRUCT__entry( 125 __field( unsigned long, call_site ) 126 __field( const void *, ptr ) 127 ), 128 129 TP_fast_assign( 130 __entry->call_site = call_site; 131 __entry->ptr = ptr; 132 ), 133 134 TP_printk("call_site=%pS ptr=%p", 135 (void *)__entry->call_site, __entry->ptr) 136 ); 137 138 TRACE_EVENT(kmem_cache_free, 139 140 TP_PROTO(unsigned long call_site, const void *ptr, const char *name), 141 142 TP_ARGS(call_site, ptr, name), 143 144 TP_STRUCT__entry( 145 __field( unsigned long, call_site ) 146 __field( const void *, ptr ) 147 __string( name, name ) 148 ), 149 150 TP_fast_assign( 151 __entry->call_site = call_site; 152 __entry->ptr = ptr; 153 __assign_str(name, name); 154 ), 155 156 TP_printk("call_site=%pS ptr=%p name=%s", 157 (void *)__entry->call_site, __entry->ptr, __get_str(name)) 158 ); 159 160 TRACE_EVENT(mm_page_free, 161 162 TP_PROTO(struct page *page, unsigned int order), 163 164 TP_ARGS(page, order), 165 166 TP_STRUCT__entry( 167 __field( unsigned long, pfn ) 168 __field( unsigned int, order ) 169 ), 170 171 TP_fast_assign( 172 __entry->pfn = page_to_pfn(page); 173 __entry->order = order; 174 ), 175 176 TP_printk("page=%p pfn=0x%lx order=%d", 177 pfn_to_page(__entry->pfn), 178 __entry->pfn, 179 __entry->order) 180 ); 181 182 TRACE_EVENT(mm_page_free_batched, 183 184 TP_PROTO(struct page *page), 185 186 TP_ARGS(page), 187 188 TP_STRUCT__entry( 189 __field( unsigned long, pfn ) 190 ), 191 192 TP_fast_assign( 193 __entry->pfn = page_to_pfn(page); 194 ), 195 196 TP_printk("page=%p pfn=0x%lx order=0", 197 pfn_to_page(__entry->pfn), 198 __entry->pfn) 199 ); 200 201 TRACE_EVENT(mm_page_alloc, 202 203 TP_PROTO(struct page *page, unsigned int order, 204 gfp_t gfp_flags, int migratetype), 205 206 TP_ARGS(page, order, gfp_flags, migratetype), 207 208 TP_STRUCT__entry( 209 __field( unsigned long, pfn ) 210 __field( unsigned int, order ) 211 __field( gfp_t, gfp_flags ) 212 __field( int, migratetype ) 213 ), 214 215 TP_fast_assign( 216 __entry->pfn = page ? page_to_pfn(page) : -1UL; 217 __entry->order = order; 218 __entry->gfp_flags = gfp_flags; 219 __entry->migratetype = migratetype; 220 ), 221 222 TP_printk("page=%p pfn=0x%lx order=%d migratetype=%d gfp_flags=%s", 223 __entry->pfn != -1UL ? pfn_to_page(__entry->pfn) : NULL, 224 __entry->pfn != -1UL ? __entry->pfn : 0, 225 __entry->order, 226 __entry->migratetype, 227 show_gfp_flags(__entry->gfp_flags)) 228 ); 229 230 DECLARE_EVENT_CLASS(mm_page, 231 232 TP_PROTO(struct page *page, unsigned int order, int migratetype), 233 234 TP_ARGS(page, order, migratetype), 235 236 TP_STRUCT__entry( 237 __field( unsigned long, pfn ) 238 __field( unsigned int, order ) 239 __field( int, migratetype ) 240 ), 241 242 TP_fast_assign( 243 __entry->pfn = page ? page_to_pfn(page) : -1UL; 244 __entry->order = order; 245 __entry->migratetype = migratetype; 246 ), 247 248 TP_printk("page=%p pfn=0x%lx order=%u migratetype=%d percpu_refill=%d", 249 __entry->pfn != -1UL ? pfn_to_page(__entry->pfn) : NULL, 250 __entry->pfn != -1UL ? __entry->pfn : 0, 251 __entry->order, 252 __entry->migratetype, 253 __entry->order == 0) 254 ); 255 256 DEFINE_EVENT(mm_page, mm_page_alloc_zone_locked, 257 258 TP_PROTO(struct page *page, unsigned int order, int migratetype), 259 260 TP_ARGS(page, order, migratetype) 261 ); 262 263 TRACE_EVENT(mm_page_pcpu_drain, 264 265 TP_PROTO(struct page *page, unsigned int order, int migratetype), 266 267 TP_ARGS(page, order, migratetype), 268 269 TP_STRUCT__entry( 270 __field( unsigned long, pfn ) 271 __field( unsigned int, order ) 272 __field( int, migratetype ) 273 ), 274 275 TP_fast_assign( 276 __entry->pfn = page ? page_to_pfn(page) : -1UL; 277 __entry->order = order; 278 __entry->migratetype = migratetype; 279 ), 280 281 TP_printk("page=%p pfn=0x%lx order=%d migratetype=%d", 282 pfn_to_page(__entry->pfn), __entry->pfn, 283 __entry->order, __entry->migratetype) 284 ); 285 286 TRACE_EVENT(mm_page_alloc_extfrag, 287 288 TP_PROTO(struct page *page, 289 int alloc_order, int fallback_order, 290 int alloc_migratetype, int fallback_migratetype), 291 292 TP_ARGS(page, 293 alloc_order, fallback_order, 294 alloc_migratetype, fallback_migratetype), 295 296 TP_STRUCT__entry( 297 __field( unsigned long, pfn ) 298 __field( int, alloc_order ) 299 __field( int, fallback_order ) 300 __field( int, alloc_migratetype ) 301 __field( int, fallback_migratetype ) 302 __field( int, change_ownership ) 303 ), 304 305 TP_fast_assign( 306 __entry->pfn = page_to_pfn(page); 307 __entry->alloc_order = alloc_order; 308 __entry->fallback_order = fallback_order; 309 __entry->alloc_migratetype = alloc_migratetype; 310 __entry->fallback_migratetype = fallback_migratetype; 311 __entry->change_ownership = (alloc_migratetype == 312 get_pageblock_migratetype(page)); 313 ), 314 315 TP_printk("page=%p pfn=0x%lx alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d", 316 pfn_to_page(__entry->pfn), 317 __entry->pfn, 318 __entry->alloc_order, 319 __entry->fallback_order, 320 pageblock_order, 321 __entry->alloc_migratetype, 322 __entry->fallback_migratetype, 323 __entry->fallback_order < pageblock_order, 324 __entry->change_ownership) 325 ); 326 327 /* 328 * Required for uniquely and securely identifying mm in rss_stat tracepoint. 329 */ 330 #ifndef __PTR_TO_HASHVAL 331 static unsigned int __maybe_unused mm_ptr_to_hash(const void *ptr) 332 { 333 int ret; 334 unsigned long hashval; 335 336 ret = ptr_to_hashval(ptr, &hashval); 337 if (ret) 338 return 0; 339 340 /* The hashed value is only 32-bit */ 341 return (unsigned int)hashval; 342 } 343 #define __PTR_TO_HASHVAL 344 #endif 345 346 #define TRACE_MM_PAGES \ 347 EM(MM_FILEPAGES) \ 348 EM(MM_ANONPAGES) \ 349 EM(MM_SWAPENTS) \ 350 EMe(MM_SHMEMPAGES) 351 352 #undef EM 353 #undef EMe 354 355 #define EM(a) TRACE_DEFINE_ENUM(a); 356 #define EMe(a) TRACE_DEFINE_ENUM(a); 357 358 TRACE_MM_PAGES 359 360 #undef EM 361 #undef EMe 362 363 #define EM(a) { a, #a }, 364 #define EMe(a) { a, #a } 365 366 TRACE_EVENT(rss_stat, 367 368 TP_PROTO(struct mm_struct *mm, 369 int member, 370 long count), 371 372 TP_ARGS(mm, member, count), 373 374 TP_STRUCT__entry( 375 __field(unsigned int, mm_id) 376 __field(unsigned int, curr) 377 __field(int, member) 378 __field(long, size) 379 ), 380 381 TP_fast_assign( 382 __entry->mm_id = mm_ptr_to_hash(mm); 383 __entry->curr = !!(current->mm == mm); 384 __entry->member = member; 385 __entry->size = (count << PAGE_SHIFT); 386 ), 387 388 TP_printk("mm_id=%u curr=%d type=%s size=%ldB", 389 __entry->mm_id, 390 __entry->curr, 391 __print_symbolic(__entry->member, TRACE_MM_PAGES), 392 __entry->size) 393 ); 394 #endif /* _TRACE_KMEM_H */ 395 396 /* This part must be outside protection */ 397 #include <trace/define_trace.h> 398