1 /* 2 * Stage 1 of the trace events. 3 * 4 * Override the macros in <trace/trace_events.h> to include the following: 5 * 6 * struct trace_event_raw_<call> { 7 * struct trace_entry ent; 8 * <type> <item>; 9 * <type2> <item2>[<len>]; 10 * [...] 11 * }; 12 * 13 * The <type> <item> is created by the __field(type, item) macro or 14 * the __array(type2, item2, len) macro. 15 * We simply do "type item;", and that will create the fields 16 * in the structure. 17 */ 18 19 #include <linux/trace_events.h> 20 21 #ifndef TRACE_SYSTEM_VAR 22 #define TRACE_SYSTEM_VAR TRACE_SYSTEM 23 #endif 24 25 #define __app__(x, y) str__##x##y 26 #define __app(x, y) __app__(x, y) 27 28 #define TRACE_SYSTEM_STRING __app(TRACE_SYSTEM_VAR,__trace_system_name) 29 30 #define TRACE_MAKE_SYSTEM_STR() \ 31 static const char TRACE_SYSTEM_STRING[] = \ 32 __stringify(TRACE_SYSTEM) 33 34 TRACE_MAKE_SYSTEM_STR(); 35 36 #undef TRACE_DEFINE_ENUM 37 #define TRACE_DEFINE_ENUM(a) \ 38 static struct trace_eval_map __used __initdata \ 39 __##TRACE_SYSTEM##_##a = \ 40 { \ 41 .system = TRACE_SYSTEM_STRING, \ 42 .eval_string = #a, \ 43 .eval_value = a \ 44 }; \ 45 static struct trace_eval_map __used \ 46 __attribute__((section("_ftrace_eval_map"))) \ 47 *TRACE_SYSTEM##_##a = &__##TRACE_SYSTEM##_##a 48 49 #undef TRACE_DEFINE_SIZEOF 50 #define TRACE_DEFINE_SIZEOF(a) \ 51 static struct trace_eval_map __used __initdata \ 52 __##TRACE_SYSTEM##_##a = \ 53 { \ 54 .system = TRACE_SYSTEM_STRING, \ 55 .eval_string = "sizeof(" #a ")", \ 56 .eval_value = sizeof(a) \ 57 }; \ 58 static struct trace_eval_map __used \ 59 __attribute__((section("_ftrace_eval_map"))) \ 60 *TRACE_SYSTEM##_##a = &__##TRACE_SYSTEM##_##a 61 62 /* 63 * DECLARE_EVENT_CLASS can be used to add a generic function 64 * handlers for events. That is, if all events have the same 65 * parameters and just have distinct trace points. 66 * Each tracepoint can be defined with DEFINE_EVENT and that 67 * will map the DECLARE_EVENT_CLASS to the tracepoint. 68 * 69 * TRACE_EVENT is a one to one mapping between tracepoint and template. 70 */ 71 #undef TRACE_EVENT 72 #define TRACE_EVENT(name, proto, args, tstruct, assign, print) \ 73 DECLARE_EVENT_CLASS(name, \ 74 PARAMS(proto), \ 75 PARAMS(args), \ 76 PARAMS(tstruct), \ 77 PARAMS(assign), \ 78 PARAMS(print)); \ 79 DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args)); 80 81 82 #undef __field 83 #define __field(type, item) type item; 84 85 #undef __field_ext 86 #define __field_ext(type, item, filter_type) type item; 87 88 #undef __field_struct 89 #define __field_struct(type, item) type item; 90 91 #undef __field_struct_ext 92 #define __field_struct_ext(type, item, filter_type) type item; 93 94 #undef __array 95 #define __array(type, item, len) type item[len]; 96 97 #undef __dynamic_array 98 #define __dynamic_array(type, item, len) u32 __data_loc_##item; 99 100 #undef __string 101 #define __string(item, src) __dynamic_array(char, item, -1) 102 103 #undef __bitmask 104 #define __bitmask(item, nr_bits) __dynamic_array(char, item, -1) 105 106 #undef TP_STRUCT__entry 107 #define TP_STRUCT__entry(args...) args 108 109 #undef DECLARE_EVENT_CLASS 110 #define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \ 111 struct trace_event_raw_##name { \ 112 struct trace_entry ent; \ 113 tstruct \ 114 char __data[0]; \ 115 }; \ 116 \ 117 static struct trace_event_class event_class_##name; 118 119 #undef DEFINE_EVENT 120 #define DEFINE_EVENT(template, name, proto, args) \ 121 static struct trace_event_call __used \ 122 __attribute__((__aligned__(4))) event_##name 123 124 #undef DEFINE_EVENT_FN 125 #define DEFINE_EVENT_FN(template, name, proto, args, reg, unreg) \ 126 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) 127 128 #undef DEFINE_EVENT_PRINT 129 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ 130 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) 131 132 /* Callbacks are meaningless to ftrace. */ 133 #undef TRACE_EVENT_FN 134 #define TRACE_EVENT_FN(name, proto, args, tstruct, \ 135 assign, print, reg, unreg) \ 136 TRACE_EVENT(name, PARAMS(proto), PARAMS(args), \ 137 PARAMS(tstruct), PARAMS(assign), PARAMS(print)) \ 138 139 #undef TRACE_EVENT_FN_COND 140 #define TRACE_EVENT_FN_COND(name, proto, args, cond, tstruct, \ 141 assign, print, reg, unreg) \ 142 TRACE_EVENT_CONDITION(name, PARAMS(proto), PARAMS(args), PARAMS(cond), \ 143 PARAMS(tstruct), PARAMS(assign), PARAMS(print)) \ 144 145 #undef TRACE_EVENT_FLAGS 146 #define TRACE_EVENT_FLAGS(name, value) \ 147 __TRACE_EVENT_FLAGS(name, value) 148 149 #undef TRACE_EVENT_PERF_PERM 150 #define TRACE_EVENT_PERF_PERM(name, expr...) \ 151 __TRACE_EVENT_PERF_PERM(name, expr) 152 153 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 154 155 /* 156 * Stage 2 of the trace events. 157 * 158 * Include the following: 159 * 160 * struct trace_event_data_offsets_<call> { 161 * u32 <item1>; 162 * u32 <item2>; 163 * [...] 164 * }; 165 * 166 * The __dynamic_array() macro will create each u32 <item>, this is 167 * to keep the offset of each array from the beginning of the event. 168 * The size of an array is also encoded, in the higher 16 bits of <item>. 169 */ 170 171 #undef TRACE_DEFINE_ENUM 172 #define TRACE_DEFINE_ENUM(a) 173 174 #undef TRACE_DEFINE_SIZEOF 175 #define TRACE_DEFINE_SIZEOF(a) 176 177 #undef __field 178 #define __field(type, item) 179 180 #undef __field_ext 181 #define __field_ext(type, item, filter_type) 182 183 #undef __field_struct 184 #define __field_struct(type, item) 185 186 #undef __field_struct_ext 187 #define __field_struct_ext(type, item, filter_type) 188 189 #undef __array 190 #define __array(type, item, len) 191 192 #undef __dynamic_array 193 #define __dynamic_array(type, item, len) u32 item; 194 195 #undef __string 196 #define __string(item, src) __dynamic_array(char, item, -1) 197 198 #undef __bitmask 199 #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1) 200 201 #undef DECLARE_EVENT_CLASS 202 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 203 struct trace_event_data_offsets_##call { \ 204 tstruct; \ 205 }; 206 207 #undef DEFINE_EVENT 208 #define DEFINE_EVENT(template, name, proto, args) 209 210 #undef DEFINE_EVENT_PRINT 211 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ 212 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) 213 214 #undef TRACE_EVENT_FLAGS 215 #define TRACE_EVENT_FLAGS(event, flag) 216 217 #undef TRACE_EVENT_PERF_PERM 218 #define TRACE_EVENT_PERF_PERM(event, expr...) 219 220 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 221 222 /* 223 * Stage 3 of the trace events. 224 * 225 * Override the macros in <trace/trace_events.h> to include the following: 226 * 227 * enum print_line_t 228 * trace_raw_output_<call>(struct trace_iterator *iter, int flags) 229 * { 230 * struct trace_seq *s = &iter->seq; 231 * struct trace_event_raw_<call> *field; <-- defined in stage 1 232 * struct trace_entry *entry; 233 * struct trace_seq *p = &iter->tmp_seq; 234 * int ret; 235 * 236 * entry = iter->ent; 237 * 238 * if (entry->type != event_<call>->event.type) { 239 * WARN_ON_ONCE(1); 240 * return TRACE_TYPE_UNHANDLED; 241 * } 242 * 243 * field = (typeof(field))entry; 244 * 245 * trace_seq_init(p); 246 * ret = trace_seq_printf(s, "%s: ", <call>); 247 * if (ret) 248 * ret = trace_seq_printf(s, <TP_printk> "\n"); 249 * if (!ret) 250 * return TRACE_TYPE_PARTIAL_LINE; 251 * 252 * return TRACE_TYPE_HANDLED; 253 * } 254 * 255 * This is the method used to print the raw event to the trace 256 * output format. Note, this is not needed if the data is read 257 * in binary. 258 */ 259 260 #undef __entry 261 #define __entry field 262 263 #undef TP_printk 264 #define TP_printk(fmt, args...) fmt "\n", args 265 266 #undef __get_dynamic_array 267 #define __get_dynamic_array(field) \ 268 ((void *)__entry + (__entry->__data_loc_##field & 0xffff)) 269 270 #undef __get_dynamic_array_len 271 #define __get_dynamic_array_len(field) \ 272 ((__entry->__data_loc_##field >> 16) & 0xffff) 273 274 #undef __get_str 275 #define __get_str(field) ((char *)__get_dynamic_array(field)) 276 277 #undef __get_bitmask 278 #define __get_bitmask(field) \ 279 ({ \ 280 void *__bitmask = __get_dynamic_array(field); \ 281 unsigned int __bitmask_size; \ 282 __bitmask_size = __get_dynamic_array_len(field); \ 283 trace_print_bitmask_seq(p, __bitmask, __bitmask_size); \ 284 }) 285 286 #undef __print_flags 287 #define __print_flags(flag, delim, flag_array...) \ 288 ({ \ 289 static const struct trace_print_flags __flags[] = \ 290 { flag_array, { -1, NULL }}; \ 291 trace_print_flags_seq(p, delim, flag, __flags); \ 292 }) 293 294 #undef __print_symbolic 295 #define __print_symbolic(value, symbol_array...) \ 296 ({ \ 297 static const struct trace_print_flags symbols[] = \ 298 { symbol_array, { -1, NULL }}; \ 299 trace_print_symbols_seq(p, value, symbols); \ 300 }) 301 302 #undef __print_flags_u64 303 #undef __print_symbolic_u64 304 #if BITS_PER_LONG == 32 305 #define __print_flags_u64(flag, delim, flag_array...) \ 306 ({ \ 307 static const struct trace_print_flags_u64 __flags[] = \ 308 { flag_array, { -1, NULL } }; \ 309 trace_print_flags_seq_u64(p, delim, flag, __flags); \ 310 }) 311 312 #define __print_symbolic_u64(value, symbol_array...) \ 313 ({ \ 314 static const struct trace_print_flags_u64 symbols[] = \ 315 { symbol_array, { -1, NULL } }; \ 316 trace_print_symbols_seq_u64(p, value, symbols); \ 317 }) 318 #else 319 #define __print_flags_u64(flag, delim, flag_array...) \ 320 __print_flags(flag, delim, flag_array) 321 322 #define __print_symbolic_u64(value, symbol_array...) \ 323 __print_symbolic(value, symbol_array) 324 #endif 325 326 #undef __print_hex 327 #define __print_hex(buf, buf_len) \ 328 trace_print_hex_seq(p, buf, buf_len, false) 329 330 #undef __print_hex_str 331 #define __print_hex_str(buf, buf_len) \ 332 trace_print_hex_seq(p, buf, buf_len, true) 333 334 #undef __print_array 335 #define __print_array(array, count, el_size) \ 336 ({ \ 337 BUILD_BUG_ON(el_size != 1 && el_size != 2 && \ 338 el_size != 4 && el_size != 8); \ 339 trace_print_array_seq(p, array, count, el_size); \ 340 }) 341 342 #undef DECLARE_EVENT_CLASS 343 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 344 static notrace enum print_line_t \ 345 trace_raw_output_##call(struct trace_iterator *iter, int flags, \ 346 struct trace_event *trace_event) \ 347 { \ 348 struct trace_seq *s = &iter->seq; \ 349 struct trace_seq __maybe_unused *p = &iter->tmp_seq; \ 350 struct trace_event_raw_##call *field; \ 351 int ret; \ 352 \ 353 field = (typeof(field))iter->ent; \ 354 \ 355 ret = trace_raw_output_prep(iter, trace_event); \ 356 if (ret != TRACE_TYPE_HANDLED) \ 357 return ret; \ 358 \ 359 trace_seq_printf(s, print); \ 360 \ 361 return trace_handle_return(s); \ 362 } \ 363 static struct trace_event_functions trace_event_type_funcs_##call = { \ 364 .trace = trace_raw_output_##call, \ 365 }; 366 367 #undef DEFINE_EVENT_PRINT 368 #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ 369 static notrace enum print_line_t \ 370 trace_raw_output_##call(struct trace_iterator *iter, int flags, \ 371 struct trace_event *event) \ 372 { \ 373 struct trace_event_raw_##template *field; \ 374 struct trace_entry *entry; \ 375 struct trace_seq *p = &iter->tmp_seq; \ 376 \ 377 entry = iter->ent; \ 378 \ 379 if (entry->type != event_##call.event.type) { \ 380 WARN_ON_ONCE(1); \ 381 return TRACE_TYPE_UNHANDLED; \ 382 } \ 383 \ 384 field = (typeof(field))entry; \ 385 \ 386 trace_seq_init(p); \ 387 return trace_output_call(iter, #call, print); \ 388 } \ 389 static struct trace_event_functions trace_event_type_funcs_##call = { \ 390 .trace = trace_raw_output_##call, \ 391 }; 392 393 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 394 395 #undef __field_ext 396 #define __field_ext(type, item, filter_type) \ 397 ret = trace_define_field(event_call, #type, #item, \ 398 offsetof(typeof(field), item), \ 399 sizeof(field.item), \ 400 is_signed_type(type), filter_type); \ 401 if (ret) \ 402 return ret; 403 404 #undef __field_struct_ext 405 #define __field_struct_ext(type, item, filter_type) \ 406 ret = trace_define_field(event_call, #type, #item, \ 407 offsetof(typeof(field), item), \ 408 sizeof(field.item), \ 409 0, filter_type); \ 410 if (ret) \ 411 return ret; 412 413 #undef __field 414 #define __field(type, item) __field_ext(type, item, FILTER_OTHER) 415 416 #undef __field_struct 417 #define __field_struct(type, item) __field_struct_ext(type, item, FILTER_OTHER) 418 419 #undef __array 420 #define __array(type, item, len) \ 421 do { \ 422 char *type_str = #type"["__stringify(len)"]"; \ 423 BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \ 424 ret = trace_define_field(event_call, type_str, #item, \ 425 offsetof(typeof(field), item), \ 426 sizeof(field.item), \ 427 is_signed_type(type), FILTER_OTHER); \ 428 if (ret) \ 429 return ret; \ 430 } while (0); 431 432 #undef __dynamic_array 433 #define __dynamic_array(type, item, len) \ 434 ret = trace_define_field(event_call, "__data_loc " #type "[]", #item, \ 435 offsetof(typeof(field), __data_loc_##item), \ 436 sizeof(field.__data_loc_##item), \ 437 is_signed_type(type), FILTER_OTHER); 438 439 #undef __string 440 #define __string(item, src) __dynamic_array(char, item, -1) 441 442 #undef __bitmask 443 #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1) 444 445 #undef DECLARE_EVENT_CLASS 446 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \ 447 static int notrace __init \ 448 trace_event_define_fields_##call(struct trace_event_call *event_call) \ 449 { \ 450 struct trace_event_raw_##call field; \ 451 int ret; \ 452 \ 453 tstruct; \ 454 \ 455 return ret; \ 456 } 457 458 #undef DEFINE_EVENT 459 #define DEFINE_EVENT(template, name, proto, args) 460 461 #undef DEFINE_EVENT_PRINT 462 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ 463 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) 464 465 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 466 467 /* 468 * remember the offset of each array from the beginning of the event. 469 */ 470 471 #undef __entry 472 #define __entry entry 473 474 #undef __field 475 #define __field(type, item) 476 477 #undef __field_ext 478 #define __field_ext(type, item, filter_type) 479 480 #undef __field_struct 481 #define __field_struct(type, item) 482 483 #undef __field_struct_ext 484 #define __field_struct_ext(type, item, filter_type) 485 486 #undef __array 487 #define __array(type, item, len) 488 489 #undef __dynamic_array 490 #define __dynamic_array(type, item, len) \ 491 __item_length = (len) * sizeof(type); \ 492 __data_offsets->item = __data_size + \ 493 offsetof(typeof(*entry), __data); \ 494 __data_offsets->item |= __item_length << 16; \ 495 __data_size += __item_length; 496 497 #undef __string 498 #define __string(item, src) __dynamic_array(char, item, \ 499 strlen((src) ? (const char *)(src) : "(null)") + 1) 500 501 /* 502 * __bitmask_size_in_bytes_raw is the number of bytes needed to hold 503 * num_possible_cpus(). 504 */ 505 #define __bitmask_size_in_bytes_raw(nr_bits) \ 506 (((nr_bits) + 7) / 8) 507 508 #define __bitmask_size_in_longs(nr_bits) \ 509 ((__bitmask_size_in_bytes_raw(nr_bits) + \ 510 ((BITS_PER_LONG / 8) - 1)) / (BITS_PER_LONG / 8)) 511 512 /* 513 * __bitmask_size_in_bytes is the number of bytes needed to hold 514 * num_possible_cpus() padded out to the nearest long. This is what 515 * is saved in the buffer, just to be consistent. 516 */ 517 #define __bitmask_size_in_bytes(nr_bits) \ 518 (__bitmask_size_in_longs(nr_bits) * (BITS_PER_LONG / 8)) 519 520 #undef __bitmask 521 #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, \ 522 __bitmask_size_in_longs(nr_bits)) 523 524 #undef DECLARE_EVENT_CLASS 525 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 526 static inline notrace int trace_event_get_offsets_##call( \ 527 struct trace_event_data_offsets_##call *__data_offsets, proto) \ 528 { \ 529 int __data_size = 0; \ 530 int __maybe_unused __item_length; \ 531 struct trace_event_raw_##call __maybe_unused *entry; \ 532 \ 533 tstruct; \ 534 \ 535 return __data_size; \ 536 } 537 538 #undef DEFINE_EVENT 539 #define DEFINE_EVENT(template, name, proto, args) 540 541 #undef DEFINE_EVENT_PRINT 542 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ 543 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) 544 545 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 546 547 /* 548 * Stage 4 of the trace events. 549 * 550 * Override the macros in <trace/trace_events.h> to include the following: 551 * 552 * For those macros defined with TRACE_EVENT: 553 * 554 * static struct trace_event_call event_<call>; 555 * 556 * static void trace_event_raw_event_<call>(void *__data, proto) 557 * { 558 * struct trace_event_file *trace_file = __data; 559 * struct trace_event_call *event_call = trace_file->event_call; 560 * struct trace_event_data_offsets_<call> __maybe_unused __data_offsets; 561 * unsigned long eflags = trace_file->flags; 562 * enum event_trigger_type __tt = ETT_NONE; 563 * struct ring_buffer_event *event; 564 * struct trace_event_raw_<call> *entry; <-- defined in stage 1 565 * struct ring_buffer *buffer; 566 * unsigned long irq_flags; 567 * int __data_size; 568 * int pc; 569 * 570 * if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) { 571 * if (eflags & EVENT_FILE_FL_TRIGGER_MODE) 572 * event_triggers_call(trace_file, NULL); 573 * if (eflags & EVENT_FILE_FL_SOFT_DISABLED) 574 * return; 575 * } 576 * 577 * local_save_flags(irq_flags); 578 * pc = preempt_count(); 579 * 580 * __data_size = trace_event_get_offsets_<call>(&__data_offsets, args); 581 * 582 * event = trace_event_buffer_lock_reserve(&buffer, trace_file, 583 * event_<call>->event.type, 584 * sizeof(*entry) + __data_size, 585 * irq_flags, pc); 586 * if (!event) 587 * return; 588 * entry = ring_buffer_event_data(event); 589 * 590 * { <assign>; } <-- Here we assign the entries by the __field and 591 * __array macros. 592 * 593 * if (eflags & EVENT_FILE_FL_TRIGGER_COND) 594 * __tt = event_triggers_call(trace_file, entry); 595 * 596 * if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, 597 * &trace_file->flags)) 598 * ring_buffer_discard_commit(buffer, event); 599 * else if (!filter_check_discard(trace_file, entry, buffer, event)) 600 * trace_buffer_unlock_commit(buffer, event, irq_flags, pc); 601 * 602 * if (__tt) 603 * event_triggers_post_call(trace_file, __tt); 604 * } 605 * 606 * static struct trace_event ftrace_event_type_<call> = { 607 * .trace = trace_raw_output_<call>, <-- stage 2 608 * }; 609 * 610 * static char print_fmt_<call>[] = <TP_printk>; 611 * 612 * static struct trace_event_class __used event_class_<template> = { 613 * .system = "<system>", 614 * .define_fields = trace_event_define_fields_<call>, 615 * .fields = LIST_HEAD_INIT(event_class_##call.fields), 616 * .raw_init = trace_event_raw_init, 617 * .probe = trace_event_raw_event_##call, 618 * .reg = trace_event_reg, 619 * }; 620 * 621 * static struct trace_event_call event_<call> = { 622 * .class = event_class_<template>, 623 * { 624 * .tp = &__tracepoint_<call>, 625 * }, 626 * .event = &ftrace_event_type_<call>, 627 * .print_fmt = print_fmt_<call>, 628 * .flags = TRACE_EVENT_FL_TRACEPOINT, 629 * }; 630 * // its only safe to use pointers when doing linker tricks to 631 * // create an array. 632 * static struct trace_event_call __used 633 * __attribute__((section("_ftrace_events"))) *__event_<call> = &event_<call>; 634 * 635 */ 636 637 #ifdef CONFIG_PERF_EVENTS 638 639 #define _TRACE_PERF_PROTO(call, proto) \ 640 static notrace void \ 641 perf_trace_##call(void *__data, proto); 642 643 #define _TRACE_PERF_INIT(call) \ 644 .perf_probe = perf_trace_##call, 645 646 #else 647 #define _TRACE_PERF_PROTO(call, proto) 648 #define _TRACE_PERF_INIT(call) 649 #endif /* CONFIG_PERF_EVENTS */ 650 651 #undef __entry 652 #define __entry entry 653 654 #undef __field 655 #define __field(type, item) 656 657 #undef __field_struct 658 #define __field_struct(type, item) 659 660 #undef __array 661 #define __array(type, item, len) 662 663 #undef __dynamic_array 664 #define __dynamic_array(type, item, len) \ 665 __entry->__data_loc_##item = __data_offsets.item; 666 667 #undef __string 668 #define __string(item, src) __dynamic_array(char, item, -1) 669 670 #undef __assign_str 671 #define __assign_str(dst, src) \ 672 strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)"); 673 674 #undef __bitmask 675 #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1) 676 677 #undef __get_bitmask 678 #define __get_bitmask(field) (char *)__get_dynamic_array(field) 679 680 #undef __assign_bitmask 681 #define __assign_bitmask(dst, src, nr_bits) \ 682 memcpy(__get_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits)) 683 684 #undef TP_fast_assign 685 #define TP_fast_assign(args...) args 686 687 #undef __perf_count 688 #define __perf_count(c) (c) 689 690 #undef __perf_task 691 #define __perf_task(t) (t) 692 693 #undef DECLARE_EVENT_CLASS 694 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 695 \ 696 static notrace void \ 697 trace_event_raw_event_##call(void *__data, proto) \ 698 { \ 699 struct trace_event_file *trace_file = __data; \ 700 struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\ 701 struct trace_event_buffer fbuffer; \ 702 struct trace_event_raw_##call *entry; \ 703 int __data_size; \ 704 \ 705 if (trace_trigger_soft_disabled(trace_file)) \ 706 return; \ 707 \ 708 __data_size = trace_event_get_offsets_##call(&__data_offsets, args); \ 709 \ 710 entry = trace_event_buffer_reserve(&fbuffer, trace_file, \ 711 sizeof(*entry) + __data_size); \ 712 \ 713 if (!entry) \ 714 return; \ 715 \ 716 tstruct \ 717 \ 718 { assign; } \ 719 \ 720 trace_event_buffer_commit(&fbuffer); \ 721 } 722 /* 723 * The ftrace_test_probe is compiled out, it is only here as a build time check 724 * to make sure that if the tracepoint handling changes, the ftrace probe will 725 * fail to compile unless it too is updated. 726 */ 727 728 #undef DEFINE_EVENT 729 #define DEFINE_EVENT(template, call, proto, args) \ 730 static inline void ftrace_test_probe_##call(void) \ 731 { \ 732 check_trace_callback_type_##call(trace_event_raw_event_##template); \ 733 } 734 735 #undef DEFINE_EVENT_PRINT 736 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) 737 738 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 739 740 #undef __entry 741 #define __entry REC 742 743 #undef __print_flags 744 #undef __print_symbolic 745 #undef __print_hex 746 #undef __print_hex_str 747 #undef __get_dynamic_array 748 #undef __get_dynamic_array_len 749 #undef __get_str 750 #undef __get_bitmask 751 #undef __print_array 752 753 #undef TP_printk 754 #define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args) 755 756 #undef DECLARE_EVENT_CLASS 757 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 758 _TRACE_PERF_PROTO(call, PARAMS(proto)); \ 759 static char print_fmt_##call[] = print; \ 760 static struct trace_event_class __used __refdata event_class_##call = { \ 761 .system = TRACE_SYSTEM_STRING, \ 762 .define_fields = trace_event_define_fields_##call, \ 763 .fields = LIST_HEAD_INIT(event_class_##call.fields),\ 764 .raw_init = trace_event_raw_init, \ 765 .probe = trace_event_raw_event_##call, \ 766 .reg = trace_event_reg, \ 767 _TRACE_PERF_INIT(call) \ 768 }; 769 770 #undef DEFINE_EVENT 771 #define DEFINE_EVENT(template, call, proto, args) \ 772 \ 773 static struct trace_event_call __used event_##call = { \ 774 .class = &event_class_##template, \ 775 { \ 776 .tp = &__tracepoint_##call, \ 777 }, \ 778 .event.funcs = &trace_event_type_funcs_##template, \ 779 .print_fmt = print_fmt_##template, \ 780 .flags = TRACE_EVENT_FL_TRACEPOINT, \ 781 }; \ 782 static struct trace_event_call __used \ 783 __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call 784 785 #undef DEFINE_EVENT_PRINT 786 #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ 787 \ 788 static char print_fmt_##call[] = print; \ 789 \ 790 static struct trace_event_call __used event_##call = { \ 791 .class = &event_class_##template, \ 792 { \ 793 .tp = &__tracepoint_##call, \ 794 }, \ 795 .event.funcs = &trace_event_type_funcs_##call, \ 796 .print_fmt = print_fmt_##call, \ 797 .flags = TRACE_EVENT_FL_TRACEPOINT, \ 798 }; \ 799 static struct trace_event_call __used \ 800 __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call 801 802 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 803