1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #undef TRACE_SYSTEM 3 #define TRACE_SYSTEM kmem 4 5 #if !defined(_TRACE_KMEM_H) || defined(TRACE_HEADER_MULTI_READ) 6 #define _TRACE_KMEM_H 7 8 #include <linux/types.h> 9 #include <linux/tracepoint.h> 10 #include <trace/events/mmflags.h> 11 12 DECLARE_EVENT_CLASS(kmem_alloc, 13 14 TP_PROTO(unsigned long call_site, 15 const void *ptr, 16 size_t bytes_req, 17 size_t bytes_alloc, 18 gfp_t gfp_flags), 19 20 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags), 21 22 TP_STRUCT__entry( 23 __field( unsigned long, call_site ) 24 __field( const void *, ptr ) 25 __field( size_t, bytes_req ) 26 __field( size_t, bytes_alloc ) 27 __field( gfp_t, gfp_flags ) 28 ), 29 30 TP_fast_assign( 31 __entry->call_site = call_site; 32 __entry->ptr = ptr; 33 __entry->bytes_req = bytes_req; 34 __entry->bytes_alloc = bytes_alloc; 35 __entry->gfp_flags = gfp_flags; 36 ), 37 38 TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s", 39 __entry->call_site, 40 __entry->ptr, 41 __entry->bytes_req, 42 __entry->bytes_alloc, 43 show_gfp_flags(__entry->gfp_flags)) 44 ); 45 46 DEFINE_EVENT(kmem_alloc, kmalloc, 47 48 TP_PROTO(unsigned long call_site, const void *ptr, 49 size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags), 50 51 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags) 52 ); 53 54 DEFINE_EVENT(kmem_alloc, kmem_cache_alloc, 55 56 TP_PROTO(unsigned long call_site, const void *ptr, 57 size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags), 58 59 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags) 60 ); 61 62 DECLARE_EVENT_CLASS(kmem_alloc_node, 63 64 TP_PROTO(unsigned long call_site, 65 const void *ptr, 66 size_t bytes_req, 67 size_t bytes_alloc, 68 gfp_t gfp_flags, 69 int node), 70 71 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node), 72 73 TP_STRUCT__entry( 74 __field( unsigned long, call_site ) 75 __field( const void *, ptr ) 76 __field( size_t, bytes_req ) 77 __field( size_t, bytes_alloc ) 78 __field( gfp_t, gfp_flags ) 79 __field( int, node ) 80 ), 81 82 TP_fast_assign( 83 __entry->call_site = call_site; 84 __entry->ptr = ptr; 85 __entry->bytes_req = bytes_req; 86 __entry->bytes_alloc = bytes_alloc; 87 __entry->gfp_flags = gfp_flags; 88 __entry->node = node; 89 ), 90 91 TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d", 92 __entry->call_site, 93 __entry->ptr, 94 __entry->bytes_req, 95 __entry->bytes_alloc, 96 show_gfp_flags(__entry->gfp_flags), 97 __entry->node) 98 ); 99 100 DEFINE_EVENT(kmem_alloc_node, kmalloc_node, 101 102 TP_PROTO(unsigned long call_site, const void *ptr, 103 size_t bytes_req, size_t bytes_alloc, 104 gfp_t gfp_flags, int node), 105 106 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node) 107 ); 108 109 DEFINE_EVENT(kmem_alloc_node, kmem_cache_alloc_node, 110 111 TP_PROTO(unsigned long call_site, const void *ptr, 112 size_t bytes_req, size_t bytes_alloc, 113 gfp_t gfp_flags, int node), 114 115 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node) 116 ); 117 118 DECLARE_EVENT_CLASS(kmem_free, 119 120 TP_PROTO(unsigned long call_site, const void *ptr), 121 122 TP_ARGS(call_site, ptr), 123 124 TP_STRUCT__entry( 125 __field( unsigned long, call_site ) 126 __field( const void *, ptr ) 127 ), 128 129 TP_fast_assign( 130 __entry->call_site = call_site; 131 __entry->ptr = ptr; 132 ), 133 134 TP_printk("call_site=%lx ptr=%p", __entry->call_site, __entry->ptr) 135 ); 136 137 DEFINE_EVENT(kmem_free, kfree, 138 139 TP_PROTO(unsigned long call_site, const void *ptr), 140 141 TP_ARGS(call_site, ptr) 142 ); 143 144 DEFINE_EVENT(kmem_free, kmem_cache_free, 145 146 TP_PROTO(unsigned long call_site, const void *ptr), 147 148 TP_ARGS(call_site, ptr) 149 ); 150 151 TRACE_EVENT(mm_page_free, 152 153 TP_PROTO(struct page *page, unsigned int order), 154 155 TP_ARGS(page, order), 156 157 TP_STRUCT__entry( 158 __field( unsigned long, pfn ) 159 __field( unsigned int, order ) 160 ), 161 162 TP_fast_assign( 163 __entry->pfn = page_to_pfn(page); 164 __entry->order = order; 165 ), 166 167 TP_printk("page=%p pfn=%lu order=%d", 168 pfn_to_page(__entry->pfn), 169 __entry->pfn, 170 __entry->order) 171 ); 172 173 TRACE_EVENT(mm_page_free_batched, 174 175 TP_PROTO(struct page *page, int cold), 176 177 TP_ARGS(page, cold), 178 179 TP_STRUCT__entry( 180 __field( unsigned long, pfn ) 181 __field( int, cold ) 182 ), 183 184 TP_fast_assign( 185 __entry->pfn = page_to_pfn(page); 186 __entry->cold = cold; 187 ), 188 189 TP_printk("page=%p pfn=%lu order=0 cold=%d", 190 pfn_to_page(__entry->pfn), 191 __entry->pfn, 192 __entry->cold) 193 ); 194 195 TRACE_EVENT(mm_page_alloc, 196 197 TP_PROTO(struct page *page, unsigned int order, 198 gfp_t gfp_flags, int migratetype), 199 200 TP_ARGS(page, order, gfp_flags, migratetype), 201 202 TP_STRUCT__entry( 203 __field( unsigned long, pfn ) 204 __field( unsigned int, order ) 205 __field( gfp_t, gfp_flags ) 206 __field( int, migratetype ) 207 ), 208 209 TP_fast_assign( 210 __entry->pfn = page ? page_to_pfn(page) : -1UL; 211 __entry->order = order; 212 __entry->gfp_flags = gfp_flags; 213 __entry->migratetype = migratetype; 214 ), 215 216 TP_printk("page=%p pfn=%lu order=%d migratetype=%d gfp_flags=%s", 217 __entry->pfn != -1UL ? pfn_to_page(__entry->pfn) : NULL, 218 __entry->pfn != -1UL ? __entry->pfn : 0, 219 __entry->order, 220 __entry->migratetype, 221 show_gfp_flags(__entry->gfp_flags)) 222 ); 223 224 DECLARE_EVENT_CLASS(mm_page, 225 226 TP_PROTO(struct page *page, unsigned int order, int migratetype), 227 228 TP_ARGS(page, order, migratetype), 229 230 TP_STRUCT__entry( 231 __field( unsigned long, pfn ) 232 __field( unsigned int, order ) 233 __field( int, migratetype ) 234 ), 235 236 TP_fast_assign( 237 __entry->pfn = page ? page_to_pfn(page) : -1UL; 238 __entry->order = order; 239 __entry->migratetype = migratetype; 240 ), 241 242 TP_printk("page=%p pfn=%lu order=%u migratetype=%d percpu_refill=%d", 243 __entry->pfn != -1UL ? pfn_to_page(__entry->pfn) : NULL, 244 __entry->pfn != -1UL ? __entry->pfn : 0, 245 __entry->order, 246 __entry->migratetype, 247 __entry->order == 0) 248 ); 249 250 DEFINE_EVENT(mm_page, mm_page_alloc_zone_locked, 251 252 TP_PROTO(struct page *page, unsigned int order, int migratetype), 253 254 TP_ARGS(page, order, migratetype) 255 ); 256 257 TRACE_EVENT(mm_page_pcpu_drain, 258 259 TP_PROTO(struct page *page, unsigned int order, int migratetype), 260 261 TP_ARGS(page, order, migratetype), 262 263 TP_STRUCT__entry( 264 __field( unsigned long, pfn ) 265 __field( unsigned int, order ) 266 __field( int, migratetype ) 267 ), 268 269 TP_fast_assign( 270 __entry->pfn = page ? page_to_pfn(page) : -1UL; 271 __entry->order = order; 272 __entry->migratetype = migratetype; 273 ), 274 275 TP_printk("page=%p pfn=%lu order=%d migratetype=%d", 276 pfn_to_page(__entry->pfn), __entry->pfn, 277 __entry->order, __entry->migratetype) 278 ); 279 280 TRACE_EVENT(mm_page_alloc_extfrag, 281 282 TP_PROTO(struct page *page, 283 int alloc_order, int fallback_order, 284 int alloc_migratetype, int fallback_migratetype), 285 286 TP_ARGS(page, 287 alloc_order, fallback_order, 288 alloc_migratetype, fallback_migratetype), 289 290 TP_STRUCT__entry( 291 __field( unsigned long, pfn ) 292 __field( int, alloc_order ) 293 __field( int, fallback_order ) 294 __field( int, alloc_migratetype ) 295 __field( int, fallback_migratetype ) 296 __field( int, change_ownership ) 297 ), 298 299 TP_fast_assign( 300 __entry->pfn = page_to_pfn(page); 301 __entry->alloc_order = alloc_order; 302 __entry->fallback_order = fallback_order; 303 __entry->alloc_migratetype = alloc_migratetype; 304 __entry->fallback_migratetype = fallback_migratetype; 305 __entry->change_ownership = (alloc_migratetype == 306 get_pageblock_migratetype(page)); 307 ), 308 309 TP_printk("page=%p pfn=%lu alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d", 310 pfn_to_page(__entry->pfn), 311 __entry->pfn, 312 __entry->alloc_order, 313 __entry->fallback_order, 314 pageblock_order, 315 __entry->alloc_migratetype, 316 __entry->fallback_migratetype, 317 __entry->fallback_order < pageblock_order, 318 __entry->change_ownership) 319 ); 320 321 #endif /* _TRACE_KMEM_H */ 322 323 /* This part must be outside protection */ 324 #include <trace/define_trace.h> 325