1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Stage 1 of the trace events. 4 * 5 * Override the macros in <trace/trace_events.h> to include the following: 6 * 7 * struct trace_event_raw_<call> { 8 * struct trace_entry ent; 9 * <type> <item>; 10 * <type2> <item2>[<len>]; 11 * [...] 12 * }; 13 * 14 * The <type> <item> is created by the __field(type, item) macro or 15 * the __array(type2, item2, len) macro. 16 * We simply do "type item;", and that will create the fields 17 * in the structure. 18 */ 19 20 #include <linux/trace_events.h> 21 22 #ifndef TRACE_SYSTEM_VAR 23 #define TRACE_SYSTEM_VAR TRACE_SYSTEM 24 #endif 25 26 #define __app__(x, y) str__##x##y 27 #define __app(x, y) __app__(x, y) 28 29 #define TRACE_SYSTEM_STRING __app(TRACE_SYSTEM_VAR,__trace_system_name) 30 31 #define TRACE_MAKE_SYSTEM_STR() \ 32 static const char TRACE_SYSTEM_STRING[] = \ 33 __stringify(TRACE_SYSTEM) 34 35 TRACE_MAKE_SYSTEM_STR(); 36 37 #undef TRACE_DEFINE_ENUM 38 #define TRACE_DEFINE_ENUM(a) \ 39 static struct trace_eval_map __used __initdata \ 40 __##TRACE_SYSTEM##_##a = \ 41 { \ 42 .system = TRACE_SYSTEM_STRING, \ 43 .eval_string = #a, \ 44 .eval_value = a \ 45 }; \ 46 static struct trace_eval_map __used \ 47 __attribute__((section("_ftrace_eval_map"))) \ 48 *TRACE_SYSTEM##_##a = &__##TRACE_SYSTEM##_##a 49 50 #undef TRACE_DEFINE_SIZEOF 51 #define TRACE_DEFINE_SIZEOF(a) \ 52 static struct trace_eval_map __used __initdata \ 53 __##TRACE_SYSTEM##_##a = \ 54 { \ 55 .system = TRACE_SYSTEM_STRING, \ 56 .eval_string = "sizeof(" #a ")", \ 57 .eval_value = sizeof(a) \ 58 }; \ 59 static struct trace_eval_map __used \ 60 __attribute__((section("_ftrace_eval_map"))) \ 61 *TRACE_SYSTEM##_##a = &__##TRACE_SYSTEM##_##a 62 63 /* 64 * DECLARE_EVENT_CLASS can be used to add a generic function 65 * handlers for events. That is, if all events have the same 66 * parameters and just have distinct trace points. 67 * Each tracepoint can be defined with DEFINE_EVENT and that 68 * will map the DECLARE_EVENT_CLASS to the tracepoint. 69 * 70 * TRACE_EVENT is a one to one mapping between tracepoint and template. 71 */ 72 #undef TRACE_EVENT 73 #define TRACE_EVENT(name, proto, args, tstruct, assign, print) \ 74 DECLARE_EVENT_CLASS(name, \ 75 PARAMS(proto), \ 76 PARAMS(args), \ 77 PARAMS(tstruct), \ 78 PARAMS(assign), \ 79 PARAMS(print)); \ 80 DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args)); 81 82 83 #undef __field 84 #define __field(type, item) type item; 85 86 #undef __field_ext 87 #define __field_ext(type, item, filter_type) type item; 88 89 #undef __field_struct 90 #define __field_struct(type, item) type item; 91 92 #undef __field_struct_ext 93 #define __field_struct_ext(type, item, filter_type) type item; 94 95 #undef __array 96 #define __array(type, item, len) type item[len]; 97 98 #undef __dynamic_array 99 #define __dynamic_array(type, item, len) u32 __data_loc_##item; 100 101 #undef __string 102 #define __string(item, src) __dynamic_array(char, item, -1) 103 104 #undef __bitmask 105 #define __bitmask(item, nr_bits) __dynamic_array(char, item, -1) 106 107 #undef TP_STRUCT__entry 108 #define TP_STRUCT__entry(args...) args 109 110 #undef DECLARE_EVENT_CLASS 111 #define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \ 112 struct trace_event_raw_##name { \ 113 struct trace_entry ent; \ 114 tstruct \ 115 char __data[0]; \ 116 }; \ 117 \ 118 static struct trace_event_class event_class_##name; 119 120 #undef DEFINE_EVENT 121 #define DEFINE_EVENT(template, name, proto, args) \ 122 static struct trace_event_call __used \ 123 __attribute__((__aligned__(4))) event_##name 124 125 #undef DEFINE_EVENT_FN 126 #define DEFINE_EVENT_FN(template, name, proto, args, reg, unreg) \ 127 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) 128 129 #undef DEFINE_EVENT_PRINT 130 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ 131 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) 132 133 /* Callbacks are meaningless to ftrace. */ 134 #undef TRACE_EVENT_FN 135 #define TRACE_EVENT_FN(name, proto, args, tstruct, \ 136 assign, print, reg, unreg) \ 137 TRACE_EVENT(name, PARAMS(proto), PARAMS(args), \ 138 PARAMS(tstruct), PARAMS(assign), PARAMS(print)) \ 139 140 #undef TRACE_EVENT_FN_COND 141 #define TRACE_EVENT_FN_COND(name, proto, args, cond, tstruct, \ 142 assign, print, reg, unreg) \ 143 TRACE_EVENT_CONDITION(name, PARAMS(proto), PARAMS(args), PARAMS(cond), \ 144 PARAMS(tstruct), PARAMS(assign), PARAMS(print)) \ 145 146 #undef TRACE_EVENT_FLAGS 147 #define TRACE_EVENT_FLAGS(name, value) \ 148 __TRACE_EVENT_FLAGS(name, value) 149 150 #undef TRACE_EVENT_PERF_PERM 151 #define TRACE_EVENT_PERF_PERM(name, expr...) \ 152 __TRACE_EVENT_PERF_PERM(name, expr) 153 154 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 155 156 /* 157 * Stage 2 of the trace events. 158 * 159 * Include the following: 160 * 161 * struct trace_event_data_offsets_<call> { 162 * u32 <item1>; 163 * u32 <item2>; 164 * [...] 165 * }; 166 * 167 * The __dynamic_array() macro will create each u32 <item>, this is 168 * to keep the offset of each array from the beginning of the event. 169 * The size of an array is also encoded, in the higher 16 bits of <item>. 170 */ 171 172 #undef TRACE_DEFINE_ENUM 173 #define TRACE_DEFINE_ENUM(a) 174 175 #undef TRACE_DEFINE_SIZEOF 176 #define TRACE_DEFINE_SIZEOF(a) 177 178 #undef __field 179 #define __field(type, item) 180 181 #undef __field_ext 182 #define __field_ext(type, item, filter_type) 183 184 #undef __field_struct 185 #define __field_struct(type, item) 186 187 #undef __field_struct_ext 188 #define __field_struct_ext(type, item, filter_type) 189 190 #undef __array 191 #define __array(type, item, len) 192 193 #undef __dynamic_array 194 #define __dynamic_array(type, item, len) u32 item; 195 196 #undef __string 197 #define __string(item, src) __dynamic_array(char, item, -1) 198 199 #undef __bitmask 200 #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1) 201 202 #undef DECLARE_EVENT_CLASS 203 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 204 struct trace_event_data_offsets_##call { \ 205 tstruct; \ 206 }; 207 208 #undef DEFINE_EVENT 209 #define DEFINE_EVENT(template, name, proto, args) 210 211 #undef DEFINE_EVENT_PRINT 212 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ 213 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) 214 215 #undef TRACE_EVENT_FLAGS 216 #define TRACE_EVENT_FLAGS(event, flag) 217 218 #undef TRACE_EVENT_PERF_PERM 219 #define TRACE_EVENT_PERF_PERM(event, expr...) 220 221 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 222 223 /* 224 * Stage 3 of the trace events. 225 * 226 * Override the macros in <trace/trace_events.h> to include the following: 227 * 228 * enum print_line_t 229 * trace_raw_output_<call>(struct trace_iterator *iter, int flags) 230 * { 231 * struct trace_seq *s = &iter->seq; 232 * struct trace_event_raw_<call> *field; <-- defined in stage 1 233 * struct trace_entry *entry; 234 * struct trace_seq *p = &iter->tmp_seq; 235 * int ret; 236 * 237 * entry = iter->ent; 238 * 239 * if (entry->type != event_<call>->event.type) { 240 * WARN_ON_ONCE(1); 241 * return TRACE_TYPE_UNHANDLED; 242 * } 243 * 244 * field = (typeof(field))entry; 245 * 246 * trace_seq_init(p); 247 * ret = trace_seq_printf(s, "%s: ", <call>); 248 * if (ret) 249 * ret = trace_seq_printf(s, <TP_printk> "\n"); 250 * if (!ret) 251 * return TRACE_TYPE_PARTIAL_LINE; 252 * 253 * return TRACE_TYPE_HANDLED; 254 * } 255 * 256 * This is the method used to print the raw event to the trace 257 * output format. Note, this is not needed if the data is read 258 * in binary. 259 */ 260 261 #undef __entry 262 #define __entry field 263 264 #undef TP_printk 265 #define TP_printk(fmt, args...) fmt "\n", args 266 267 #undef __get_dynamic_array 268 #define __get_dynamic_array(field) \ 269 ((void *)__entry + (__entry->__data_loc_##field & 0xffff)) 270 271 #undef __get_dynamic_array_len 272 #define __get_dynamic_array_len(field) \ 273 ((__entry->__data_loc_##field >> 16) & 0xffff) 274 275 #undef __get_str 276 #define __get_str(field) ((char *)__get_dynamic_array(field)) 277 278 #undef __get_bitmask 279 #define __get_bitmask(field) \ 280 ({ \ 281 void *__bitmask = __get_dynamic_array(field); \ 282 unsigned int __bitmask_size; \ 283 __bitmask_size = __get_dynamic_array_len(field); \ 284 trace_print_bitmask_seq(p, __bitmask, __bitmask_size); \ 285 }) 286 287 #undef __print_flags 288 #define __print_flags(flag, delim, flag_array...) \ 289 ({ \ 290 static const struct trace_print_flags __flags[] = \ 291 { flag_array, { -1, NULL }}; \ 292 trace_print_flags_seq(p, delim, flag, __flags); \ 293 }) 294 295 #undef __print_symbolic 296 #define __print_symbolic(value, symbol_array...) \ 297 ({ \ 298 static const struct trace_print_flags symbols[] = \ 299 { symbol_array, { -1, NULL }}; \ 300 trace_print_symbols_seq(p, value, symbols); \ 301 }) 302 303 #undef __print_flags_u64 304 #undef __print_symbolic_u64 305 #if BITS_PER_LONG == 32 306 #define __print_flags_u64(flag, delim, flag_array...) \ 307 ({ \ 308 static const struct trace_print_flags_u64 __flags[] = \ 309 { flag_array, { -1, NULL } }; \ 310 trace_print_flags_seq_u64(p, delim, flag, __flags); \ 311 }) 312 313 #define __print_symbolic_u64(value, symbol_array...) \ 314 ({ \ 315 static const struct trace_print_flags_u64 symbols[] = \ 316 { symbol_array, { -1, NULL } }; \ 317 trace_print_symbols_seq_u64(p, value, symbols); \ 318 }) 319 #else 320 #define __print_flags_u64(flag, delim, flag_array...) \ 321 __print_flags(flag, delim, flag_array) 322 323 #define __print_symbolic_u64(value, symbol_array...) \ 324 __print_symbolic(value, symbol_array) 325 #endif 326 327 #undef __print_hex 328 #define __print_hex(buf, buf_len) \ 329 trace_print_hex_seq(p, buf, buf_len, false) 330 331 #undef __print_hex_str 332 #define __print_hex_str(buf, buf_len) \ 333 trace_print_hex_seq(p, buf, buf_len, true) 334 335 #undef __print_array 336 #define __print_array(array, count, el_size) \ 337 ({ \ 338 BUILD_BUG_ON(el_size != 1 && el_size != 2 && \ 339 el_size != 4 && el_size != 8); \ 340 trace_print_array_seq(p, array, count, el_size); \ 341 }) 342 343 #undef __print_hex_dump 344 #define __print_hex_dump(prefix_str, prefix_type, \ 345 rowsize, groupsize, buf, len, ascii) \ 346 trace_print_hex_dump_seq(p, prefix_str, prefix_type, \ 347 rowsize, groupsize, buf, len, ascii) 348 349 #undef DECLARE_EVENT_CLASS 350 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 351 static notrace enum print_line_t \ 352 trace_raw_output_##call(struct trace_iterator *iter, int flags, \ 353 struct trace_event *trace_event) \ 354 { \ 355 struct trace_seq *s = &iter->seq; \ 356 struct trace_seq __maybe_unused *p = &iter->tmp_seq; \ 357 struct trace_event_raw_##call *field; \ 358 int ret; \ 359 \ 360 field = (typeof(field))iter->ent; \ 361 \ 362 ret = trace_raw_output_prep(iter, trace_event); \ 363 if (ret != TRACE_TYPE_HANDLED) \ 364 return ret; \ 365 \ 366 trace_seq_printf(s, print); \ 367 \ 368 return trace_handle_return(s); \ 369 } \ 370 static struct trace_event_functions trace_event_type_funcs_##call = { \ 371 .trace = trace_raw_output_##call, \ 372 }; 373 374 #undef DEFINE_EVENT_PRINT 375 #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ 376 static notrace enum print_line_t \ 377 trace_raw_output_##call(struct trace_iterator *iter, int flags, \ 378 struct trace_event *event) \ 379 { \ 380 struct trace_event_raw_##template *field; \ 381 struct trace_entry *entry; \ 382 struct trace_seq *p = &iter->tmp_seq; \ 383 \ 384 entry = iter->ent; \ 385 \ 386 if (entry->type != event_##call.event.type) { \ 387 WARN_ON_ONCE(1); \ 388 return TRACE_TYPE_UNHANDLED; \ 389 } \ 390 \ 391 field = (typeof(field))entry; \ 392 \ 393 trace_seq_init(p); \ 394 return trace_output_call(iter, #call, print); \ 395 } \ 396 static struct trace_event_functions trace_event_type_funcs_##call = { \ 397 .trace = trace_raw_output_##call, \ 398 }; 399 400 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 401 402 #undef __field_ext 403 #define __field_ext(_type, _item, _filter_type) { \ 404 .type = #_type, .name = #_item, \ 405 .size = sizeof(_type), .align = __alignof__(_type), \ 406 .is_signed = is_signed_type(_type), .filter_type = _filter_type }, 407 408 #undef __field_struct_ext 409 #define __field_struct_ext(_type, _item, _filter_type) { \ 410 .type = #_type, .name = #_item, \ 411 .size = sizeof(_type), .align = __alignof__(_type), \ 412 0, .filter_type = _filter_type }, 413 414 #undef __field 415 #define __field(type, item) __field_ext(type, item, FILTER_OTHER) 416 417 #undef __field_struct 418 #define __field_struct(type, item) __field_struct_ext(type, item, FILTER_OTHER) 419 420 #undef __array 421 #define __array(_type, _item, _len) { \ 422 .type = #_type"["__stringify(_len)"]", .name = #_item, \ 423 .size = sizeof(_type[_len]), .align = __alignof__(_type), \ 424 .is_signed = is_signed_type(_type), .filter_type = FILTER_OTHER }, 425 426 #undef __dynamic_array 427 #define __dynamic_array(_type, _item, _len) { \ 428 .type = "__data_loc " #_type "[]", .name = #_item, \ 429 .size = 4, .align = 4, \ 430 .is_signed = is_signed_type(_type), .filter_type = FILTER_OTHER }, 431 432 #undef __string 433 #define __string(item, src) __dynamic_array(char, item, -1) 434 435 #undef __bitmask 436 #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1) 437 438 #undef DECLARE_EVENT_CLASS 439 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \ 440 static struct trace_event_fields trace_event_fields_##call[] = { \ 441 tstruct \ 442 {} }; 443 444 #undef DEFINE_EVENT 445 #define DEFINE_EVENT(template, name, proto, args) 446 447 #undef DEFINE_EVENT_PRINT 448 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ 449 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) 450 451 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 452 453 /* 454 * remember the offset of each array from the beginning of the event. 455 */ 456 457 #undef __entry 458 #define __entry entry 459 460 #undef __field 461 #define __field(type, item) 462 463 #undef __field_ext 464 #define __field_ext(type, item, filter_type) 465 466 #undef __field_struct 467 #define __field_struct(type, item) 468 469 #undef __field_struct_ext 470 #define __field_struct_ext(type, item, filter_type) 471 472 #undef __array 473 #define __array(type, item, len) 474 475 #undef __dynamic_array 476 #define __dynamic_array(type, item, len) \ 477 __item_length = (len) * sizeof(type); \ 478 __data_offsets->item = __data_size + \ 479 offsetof(typeof(*entry), __data); \ 480 __data_offsets->item |= __item_length << 16; \ 481 __data_size += __item_length; 482 483 #undef __string 484 #define __string(item, src) __dynamic_array(char, item, \ 485 strlen((src) ? (const char *)(src) : "(null)") + 1) 486 487 /* 488 * __bitmask_size_in_bytes_raw is the number of bytes needed to hold 489 * num_possible_cpus(). 490 */ 491 #define __bitmask_size_in_bytes_raw(nr_bits) \ 492 (((nr_bits) + 7) / 8) 493 494 #define __bitmask_size_in_longs(nr_bits) \ 495 ((__bitmask_size_in_bytes_raw(nr_bits) + \ 496 ((BITS_PER_LONG / 8) - 1)) / (BITS_PER_LONG / 8)) 497 498 /* 499 * __bitmask_size_in_bytes is the number of bytes needed to hold 500 * num_possible_cpus() padded out to the nearest long. This is what 501 * is saved in the buffer, just to be consistent. 502 */ 503 #define __bitmask_size_in_bytes(nr_bits) \ 504 (__bitmask_size_in_longs(nr_bits) * (BITS_PER_LONG / 8)) 505 506 #undef __bitmask 507 #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, \ 508 __bitmask_size_in_longs(nr_bits)) 509 510 #undef DECLARE_EVENT_CLASS 511 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 512 static inline notrace int trace_event_get_offsets_##call( \ 513 struct trace_event_data_offsets_##call *__data_offsets, proto) \ 514 { \ 515 int __data_size = 0; \ 516 int __maybe_unused __item_length; \ 517 struct trace_event_raw_##call __maybe_unused *entry; \ 518 \ 519 tstruct; \ 520 \ 521 return __data_size; \ 522 } 523 524 #undef DEFINE_EVENT 525 #define DEFINE_EVENT(template, name, proto, args) 526 527 #undef DEFINE_EVENT_PRINT 528 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ 529 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) 530 531 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 532 533 /* 534 * Stage 4 of the trace events. 535 * 536 * Override the macros in <trace/trace_events.h> to include the following: 537 * 538 * For those macros defined with TRACE_EVENT: 539 * 540 * static struct trace_event_call event_<call>; 541 * 542 * static void trace_event_raw_event_<call>(void *__data, proto) 543 * { 544 * struct trace_event_file *trace_file = __data; 545 * struct trace_event_call *event_call = trace_file->event_call; 546 * struct trace_event_data_offsets_<call> __maybe_unused __data_offsets; 547 * unsigned long eflags = trace_file->flags; 548 * enum event_trigger_type __tt = ETT_NONE; 549 * struct ring_buffer_event *event; 550 * struct trace_event_raw_<call> *entry; <-- defined in stage 1 551 * struct ring_buffer *buffer; 552 * unsigned long irq_flags; 553 * int __data_size; 554 * int pc; 555 * 556 * if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) { 557 * if (eflags & EVENT_FILE_FL_TRIGGER_MODE) 558 * event_triggers_call(trace_file, NULL); 559 * if (eflags & EVENT_FILE_FL_SOFT_DISABLED) 560 * return; 561 * } 562 * 563 * local_save_flags(irq_flags); 564 * pc = preempt_count(); 565 * 566 * __data_size = trace_event_get_offsets_<call>(&__data_offsets, args); 567 * 568 * event = trace_event_buffer_lock_reserve(&buffer, trace_file, 569 * event_<call>->event.type, 570 * sizeof(*entry) + __data_size, 571 * irq_flags, pc); 572 * if (!event) 573 * return; 574 * entry = ring_buffer_event_data(event); 575 * 576 * { <assign>; } <-- Here we assign the entries by the __field and 577 * __array macros. 578 * 579 * if (eflags & EVENT_FILE_FL_TRIGGER_COND) 580 * __tt = event_triggers_call(trace_file, entry); 581 * 582 * if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, 583 * &trace_file->flags)) 584 * ring_buffer_discard_commit(buffer, event); 585 * else if (!filter_check_discard(trace_file, entry, buffer, event)) 586 * trace_buffer_unlock_commit(buffer, event, irq_flags, pc); 587 * 588 * if (__tt) 589 * event_triggers_post_call(trace_file, __tt); 590 * } 591 * 592 * static struct trace_event ftrace_event_type_<call> = { 593 * .trace = trace_raw_output_<call>, <-- stage 2 594 * }; 595 * 596 * static char print_fmt_<call>[] = <TP_printk>; 597 * 598 * static struct trace_event_class __used event_class_<template> = { 599 * .system = "<system>", 600 * .fields_array = trace_event_fields_<call>, 601 * .fields = LIST_HEAD_INIT(event_class_##call.fields), 602 * .raw_init = trace_event_raw_init, 603 * .probe = trace_event_raw_event_##call, 604 * .reg = trace_event_reg, 605 * }; 606 * 607 * static struct trace_event_call event_<call> = { 608 * .class = event_class_<template>, 609 * { 610 * .tp = &__tracepoint_<call>, 611 * }, 612 * .event = &ftrace_event_type_<call>, 613 * .print_fmt = print_fmt_<call>, 614 * .flags = TRACE_EVENT_FL_TRACEPOINT, 615 * }; 616 * // its only safe to use pointers when doing linker tricks to 617 * // create an array. 618 * static struct trace_event_call __used 619 * __attribute__((section("_ftrace_events"))) *__event_<call> = &event_<call>; 620 * 621 */ 622 623 #ifdef CONFIG_PERF_EVENTS 624 625 #define _TRACE_PERF_PROTO(call, proto) \ 626 static notrace void \ 627 perf_trace_##call(void *__data, proto); 628 629 #define _TRACE_PERF_INIT(call) \ 630 .perf_probe = perf_trace_##call, 631 632 #else 633 #define _TRACE_PERF_PROTO(call, proto) 634 #define _TRACE_PERF_INIT(call) 635 #endif /* CONFIG_PERF_EVENTS */ 636 637 #undef __entry 638 #define __entry entry 639 640 #undef __field 641 #define __field(type, item) 642 643 #undef __field_struct 644 #define __field_struct(type, item) 645 646 #undef __array 647 #define __array(type, item, len) 648 649 #undef __dynamic_array 650 #define __dynamic_array(type, item, len) \ 651 __entry->__data_loc_##item = __data_offsets.item; 652 653 #undef __string 654 #define __string(item, src) __dynamic_array(char, item, -1) 655 656 #undef __assign_str 657 #define __assign_str(dst, src) \ 658 strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)"); 659 660 #undef __bitmask 661 #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1) 662 663 #undef __get_bitmask 664 #define __get_bitmask(field) (char *)__get_dynamic_array(field) 665 666 #undef __assign_bitmask 667 #define __assign_bitmask(dst, src, nr_bits) \ 668 memcpy(__get_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits)) 669 670 #undef TP_fast_assign 671 #define TP_fast_assign(args...) args 672 673 #undef __perf_count 674 #define __perf_count(c) (c) 675 676 #undef __perf_task 677 #define __perf_task(t) (t) 678 679 #undef DECLARE_EVENT_CLASS 680 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 681 \ 682 static notrace void \ 683 trace_event_raw_event_##call(void *__data, proto) \ 684 { \ 685 struct trace_event_file *trace_file = __data; \ 686 struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\ 687 struct trace_event_buffer fbuffer; \ 688 struct trace_event_raw_##call *entry; \ 689 int __data_size; \ 690 \ 691 if (trace_trigger_soft_disabled(trace_file)) \ 692 return; \ 693 \ 694 __data_size = trace_event_get_offsets_##call(&__data_offsets, args); \ 695 \ 696 entry = trace_event_buffer_reserve(&fbuffer, trace_file, \ 697 sizeof(*entry) + __data_size); \ 698 \ 699 if (!entry) \ 700 return; \ 701 \ 702 tstruct \ 703 \ 704 { assign; } \ 705 \ 706 trace_event_buffer_commit(&fbuffer); \ 707 } 708 /* 709 * The ftrace_test_probe is compiled out, it is only here as a build time check 710 * to make sure that if the tracepoint handling changes, the ftrace probe will 711 * fail to compile unless it too is updated. 712 */ 713 714 #undef DEFINE_EVENT 715 #define DEFINE_EVENT(template, call, proto, args) \ 716 static inline void ftrace_test_probe_##call(void) \ 717 { \ 718 check_trace_callback_type_##call(trace_event_raw_event_##template); \ 719 } 720 721 #undef DEFINE_EVENT_PRINT 722 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) 723 724 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 725 726 #undef __entry 727 #define __entry REC 728 729 #undef __print_flags 730 #undef __print_symbolic 731 #undef __print_hex 732 #undef __print_hex_str 733 #undef __get_dynamic_array 734 #undef __get_dynamic_array_len 735 #undef __get_str 736 #undef __get_bitmask 737 #undef __print_array 738 #undef __print_hex_dump 739 740 #undef TP_printk 741 #define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args) 742 743 #undef DECLARE_EVENT_CLASS 744 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 745 _TRACE_PERF_PROTO(call, PARAMS(proto)); \ 746 static char print_fmt_##call[] = print; \ 747 static struct trace_event_class __used __refdata event_class_##call = { \ 748 .system = TRACE_SYSTEM_STRING, \ 749 .fields_array = trace_event_fields_##call, \ 750 .fields = LIST_HEAD_INIT(event_class_##call.fields),\ 751 .raw_init = trace_event_raw_init, \ 752 .probe = trace_event_raw_event_##call, \ 753 .reg = trace_event_reg, \ 754 _TRACE_PERF_INIT(call) \ 755 }; 756 757 #undef DEFINE_EVENT 758 #define DEFINE_EVENT(template, call, proto, args) \ 759 \ 760 static struct trace_event_call __used event_##call = { \ 761 .class = &event_class_##template, \ 762 { \ 763 .tp = &__tracepoint_##call, \ 764 }, \ 765 .event.funcs = &trace_event_type_funcs_##template, \ 766 .print_fmt = print_fmt_##template, \ 767 .flags = TRACE_EVENT_FL_TRACEPOINT, \ 768 }; \ 769 static struct trace_event_call __used \ 770 __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call 771 772 #undef DEFINE_EVENT_PRINT 773 #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ 774 \ 775 static char print_fmt_##call[] = print; \ 776 \ 777 static struct trace_event_call __used event_##call = { \ 778 .class = &event_class_##template, \ 779 { \ 780 .tp = &__tracepoint_##call, \ 781 }, \ 782 .event.funcs = &trace_event_type_funcs_##call, \ 783 .print_fmt = print_fmt_##call, \ 784 .flags = TRACE_EVENT_FL_TRACEPOINT, \ 785 }; \ 786 static struct trace_event_call __used \ 787 __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call 788 789 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 790