1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #undef TRACE_SYSTEM 3 #define TRACE_SYSTEM kmem 4 5 #if !defined(_TRACE_KMEM_H) || defined(TRACE_HEADER_MULTI_READ) 6 #define _TRACE_KMEM_H 7 8 #include <linux/types.h> 9 #include <linux/tracepoint.h> 10 #include <trace/events/mmflags.h> 11 12 DECLARE_EVENT_CLASS(kmem_alloc, 13 14 TP_PROTO(unsigned long call_site, 15 const void *ptr, 16 size_t bytes_req, 17 size_t bytes_alloc, 18 gfp_t gfp_flags), 19 20 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags), 21 22 TP_STRUCT__entry( 23 __field( unsigned long, call_site ) 24 __field( const void *, ptr ) 25 __field( size_t, bytes_req ) 26 __field( size_t, bytes_alloc ) 27 __field( gfp_t, gfp_flags ) 28 ), 29 30 TP_fast_assign( 31 __entry->call_site = call_site; 32 __entry->ptr = ptr; 33 __entry->bytes_req = bytes_req; 34 __entry->bytes_alloc = bytes_alloc; 35 __entry->gfp_flags = gfp_flags; 36 ), 37 38 TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s", 39 (void *)__entry->call_site, 40 __entry->ptr, 41 __entry->bytes_req, 42 __entry->bytes_alloc, 43 show_gfp_flags(__entry->gfp_flags)) 44 ); 45 46 DEFINE_EVENT(kmem_alloc, kmalloc, 47 48 TP_PROTO(unsigned long call_site, const void *ptr, 49 size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags), 50 51 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags) 52 ); 53 54 DEFINE_EVENT(kmem_alloc, kmem_cache_alloc, 55 56 TP_PROTO(unsigned long call_site, const void *ptr, 57 size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags), 58 59 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags) 60 ); 61 62 DECLARE_EVENT_CLASS(kmem_alloc_node, 63 64 TP_PROTO(unsigned long call_site, 65 const void *ptr, 66 size_t bytes_req, 67 size_t bytes_alloc, 68 gfp_t gfp_flags, 69 int node), 70 71 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node), 72 73 TP_STRUCT__entry( 74 __field( unsigned long, call_site ) 75 __field( const void *, ptr ) 76 __field( size_t, bytes_req ) 77 __field( size_t, bytes_alloc ) 78 __field( gfp_t, gfp_flags ) 79 __field( int, node ) 80 ), 81 82 TP_fast_assign( 83 __entry->call_site = call_site; 84 __entry->ptr = ptr; 85 __entry->bytes_req = bytes_req; 86 __entry->bytes_alloc = bytes_alloc; 87 __entry->gfp_flags = gfp_flags; 88 __entry->node = node; 89 ), 90 91 TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d", 92 (void *)__entry->call_site, 93 __entry->ptr, 94 __entry->bytes_req, 95 __entry->bytes_alloc, 96 show_gfp_flags(__entry->gfp_flags), 97 __entry->node) 98 ); 99 100 DEFINE_EVENT(kmem_alloc_node, kmalloc_node, 101 102 TP_PROTO(unsigned long call_site, const void *ptr, 103 size_t bytes_req, size_t bytes_alloc, 104 gfp_t gfp_flags, int node), 105 106 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node) 107 ); 108 109 DEFINE_EVENT(kmem_alloc_node, kmem_cache_alloc_node, 110 111 TP_PROTO(unsigned long call_site, const void *ptr, 112 size_t bytes_req, size_t bytes_alloc, 113 gfp_t gfp_flags, int node), 114 115 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node) 116 ); 117 118 DECLARE_EVENT_CLASS(kmem_free, 119 120 TP_PROTO(unsigned long call_site, const void *ptr), 121 122 TP_ARGS(call_site, ptr), 123 124 TP_STRUCT__entry( 125 __field( unsigned long, call_site ) 126 __field( const void *, ptr ) 127 ), 128 129 TP_fast_assign( 130 __entry->call_site = call_site; 131 __entry->ptr = ptr; 132 ), 133 134 TP_printk("call_site=%pS ptr=%p", 135 (void *)__entry->call_site, __entry->ptr) 136 ); 137 138 DEFINE_EVENT(kmem_free, kfree, 139 140 TP_PROTO(unsigned long call_site, const void *ptr), 141 142 TP_ARGS(call_site, ptr) 143 ); 144 145 DEFINE_EVENT(kmem_free, kmem_cache_free, 146 147 TP_PROTO(unsigned long call_site, const void *ptr), 148 149 TP_ARGS(call_site, ptr) 150 ); 151 152 TRACE_EVENT(mm_page_free, 153 154 TP_PROTO(struct page *page, unsigned int order), 155 156 TP_ARGS(page, order), 157 158 TP_STRUCT__entry( 159 __field( unsigned long, pfn ) 160 __field( unsigned int, order ) 161 ), 162 163 TP_fast_assign( 164 __entry->pfn = page_to_pfn(page); 165 __entry->order = order; 166 ), 167 168 TP_printk("page=%p pfn=%lu order=%d", 169 pfn_to_page(__entry->pfn), 170 __entry->pfn, 171 __entry->order) 172 ); 173 174 TRACE_EVENT(mm_page_free_batched, 175 176 TP_PROTO(struct page *page), 177 178 TP_ARGS(page), 179 180 TP_STRUCT__entry( 181 __field( unsigned long, pfn ) 182 ), 183 184 TP_fast_assign( 185 __entry->pfn = page_to_pfn(page); 186 ), 187 188 TP_printk("page=%p pfn=%lu order=0", 189 pfn_to_page(__entry->pfn), 190 __entry->pfn) 191 ); 192 193 TRACE_EVENT(mm_page_alloc, 194 195 TP_PROTO(struct page *page, unsigned int order, 196 gfp_t gfp_flags, int migratetype), 197 198 TP_ARGS(page, order, gfp_flags, migratetype), 199 200 TP_STRUCT__entry( 201 __field( unsigned long, pfn ) 202 __field( unsigned int, order ) 203 __field( gfp_t, gfp_flags ) 204 __field( int, migratetype ) 205 ), 206 207 TP_fast_assign( 208 __entry->pfn = page ? page_to_pfn(page) : -1UL; 209 __entry->order = order; 210 __entry->gfp_flags = gfp_flags; 211 __entry->migratetype = migratetype; 212 ), 213 214 TP_printk("page=%p pfn=%lu order=%d migratetype=%d gfp_flags=%s", 215 __entry->pfn != -1UL ? pfn_to_page(__entry->pfn) : NULL, 216 __entry->pfn != -1UL ? __entry->pfn : 0, 217 __entry->order, 218 __entry->migratetype, 219 show_gfp_flags(__entry->gfp_flags)) 220 ); 221 222 DECLARE_EVENT_CLASS(mm_page, 223 224 TP_PROTO(struct page *page, unsigned int order, int migratetype), 225 226 TP_ARGS(page, order, migratetype), 227 228 TP_STRUCT__entry( 229 __field( unsigned long, pfn ) 230 __field( unsigned int, order ) 231 __field( int, migratetype ) 232 ), 233 234 TP_fast_assign( 235 __entry->pfn = page ? page_to_pfn(page) : -1UL; 236 __entry->order = order; 237 __entry->migratetype = migratetype; 238 ), 239 240 TP_printk("page=%p pfn=%lu order=%u migratetype=%d percpu_refill=%d", 241 __entry->pfn != -1UL ? pfn_to_page(__entry->pfn) : NULL, 242 __entry->pfn != -1UL ? __entry->pfn : 0, 243 __entry->order, 244 __entry->migratetype, 245 __entry->order == 0) 246 ); 247 248 DEFINE_EVENT(mm_page, mm_page_alloc_zone_locked, 249 250 TP_PROTO(struct page *page, unsigned int order, int migratetype), 251 252 TP_ARGS(page, order, migratetype) 253 ); 254 255 TRACE_EVENT(mm_page_pcpu_drain, 256 257 TP_PROTO(struct page *page, unsigned int order, int migratetype), 258 259 TP_ARGS(page, order, migratetype), 260 261 TP_STRUCT__entry( 262 __field( unsigned long, pfn ) 263 __field( unsigned int, order ) 264 __field( int, migratetype ) 265 ), 266 267 TP_fast_assign( 268 __entry->pfn = page ? page_to_pfn(page) : -1UL; 269 __entry->order = order; 270 __entry->migratetype = migratetype; 271 ), 272 273 TP_printk("page=%p pfn=%lu order=%d migratetype=%d", 274 pfn_to_page(__entry->pfn), __entry->pfn, 275 __entry->order, __entry->migratetype) 276 ); 277 278 TRACE_EVENT(mm_page_alloc_extfrag, 279 280 TP_PROTO(struct page *page, 281 int alloc_order, int fallback_order, 282 int alloc_migratetype, int fallback_migratetype), 283 284 TP_ARGS(page, 285 alloc_order, fallback_order, 286 alloc_migratetype, fallback_migratetype), 287 288 TP_STRUCT__entry( 289 __field( unsigned long, pfn ) 290 __field( int, alloc_order ) 291 __field( int, fallback_order ) 292 __field( int, alloc_migratetype ) 293 __field( int, fallback_migratetype ) 294 __field( int, change_ownership ) 295 ), 296 297 TP_fast_assign( 298 __entry->pfn = page_to_pfn(page); 299 __entry->alloc_order = alloc_order; 300 __entry->fallback_order = fallback_order; 301 __entry->alloc_migratetype = alloc_migratetype; 302 __entry->fallback_migratetype = fallback_migratetype; 303 __entry->change_ownership = (alloc_migratetype == 304 get_pageblock_migratetype(page)); 305 ), 306 307 TP_printk("page=%p pfn=%lu alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d", 308 pfn_to_page(__entry->pfn), 309 __entry->pfn, 310 __entry->alloc_order, 311 __entry->fallback_order, 312 pageblock_order, 313 __entry->alloc_migratetype, 314 __entry->fallback_migratetype, 315 __entry->fallback_order < pageblock_order, 316 __entry->change_ownership) 317 ); 318 319 /* 320 * Required for uniquely and securely identifying mm in rss_stat tracepoint. 321 */ 322 #ifndef __PTR_TO_HASHVAL 323 static unsigned int __maybe_unused mm_ptr_to_hash(const void *ptr) 324 { 325 int ret; 326 unsigned long hashval; 327 328 ret = ptr_to_hashval(ptr, &hashval); 329 if (ret) 330 return 0; 331 332 /* The hashed value is only 32-bit */ 333 return (unsigned int)hashval; 334 } 335 #define __PTR_TO_HASHVAL 336 #endif 337 338 TRACE_EVENT(rss_stat, 339 340 TP_PROTO(struct mm_struct *mm, 341 int member, 342 long count), 343 344 TP_ARGS(mm, member, count), 345 346 TP_STRUCT__entry( 347 __field(unsigned int, mm_id) 348 __field(unsigned int, curr) 349 __field(int, member) 350 __field(long, size) 351 ), 352 353 TP_fast_assign( 354 __entry->mm_id = mm_ptr_to_hash(mm); 355 __entry->curr = !!(current->mm == mm); 356 __entry->member = member; 357 __entry->size = (count << PAGE_SHIFT); 358 ), 359 360 TP_printk("mm_id=%u curr=%d member=%d size=%ldB", 361 __entry->mm_id, 362 __entry->curr, 363 __entry->member, 364 __entry->size) 365 ); 366 #endif /* _TRACE_KMEM_H */ 367 368 /* This part must be outside protection */ 369 #include <trace/define_trace.h> 370