1 /* 2 * trace_events_hist - trace event hist triggers 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * Copyright (C) 2015 Tom Zanussi <tom.zanussi@linux.intel.com> 15 */ 16 17 #include <linux/module.h> 18 #include <linux/kallsyms.h> 19 #include <linux/mutex.h> 20 #include <linux/slab.h> 21 #include <linux/stacktrace.h> 22 #include <linux/rculist.h> 23 #include <linux/tracefs.h> 24 25 #include "tracing_map.h" 26 #include "trace.h" 27 28 #define SYNTH_SYSTEM "synthetic" 29 #define SYNTH_FIELDS_MAX 16 30 31 #define STR_VAR_LEN_MAX 32 /* must be multiple of sizeof(u64) */ 32 33 struct hist_field; 34 35 typedef u64 (*hist_field_fn_t) (struct hist_field *field, 36 struct tracing_map_elt *elt, 37 struct ring_buffer_event *rbe, 38 void *event); 39 40 #define HIST_FIELD_OPERANDS_MAX 2 41 #define HIST_FIELDS_MAX (TRACING_MAP_FIELDS_MAX + TRACING_MAP_VARS_MAX) 42 #define HIST_ACTIONS_MAX 8 43 44 enum field_op_id { 45 FIELD_OP_NONE, 46 FIELD_OP_PLUS, 47 FIELD_OP_MINUS, 48 FIELD_OP_UNARY_MINUS, 49 }; 50 51 struct hist_var { 52 char *name; 53 struct hist_trigger_data *hist_data; 54 unsigned int idx; 55 }; 56 57 struct hist_field { 58 struct ftrace_event_field *field; 59 unsigned long flags; 60 hist_field_fn_t fn; 61 unsigned int size; 62 unsigned int offset; 63 unsigned int is_signed; 64 const char *type; 65 struct hist_field *operands[HIST_FIELD_OPERANDS_MAX]; 66 struct hist_trigger_data *hist_data; 67 struct hist_var var; 68 enum field_op_id operator; 69 char *system; 70 char *event_name; 71 char *name; 72 unsigned int var_idx; 73 unsigned int var_ref_idx; 74 bool read_once; 75 }; 76 77 static u64 hist_field_none(struct hist_field *field, 78 struct tracing_map_elt *elt, 79 struct ring_buffer_event *rbe, 80 void *event) 81 { 82 return 0; 83 } 84 85 static u64 hist_field_counter(struct hist_field *field, 86 struct tracing_map_elt *elt, 87 struct ring_buffer_event *rbe, 88 void *event) 89 { 90 return 1; 91 } 92 93 static u64 hist_field_string(struct hist_field *hist_field, 94 struct tracing_map_elt *elt, 95 struct ring_buffer_event *rbe, 96 void *event) 97 { 98 char *addr = (char *)(event + hist_field->field->offset); 99 100 return (u64)(unsigned long)addr; 101 } 102 103 static u64 hist_field_dynstring(struct hist_field *hist_field, 104 struct tracing_map_elt *elt, 105 struct ring_buffer_event *rbe, 106 void *event) 107 { 108 u32 str_item = *(u32 *)(event + hist_field->field->offset); 109 int str_loc = str_item & 0xffff; 110 char *addr = (char *)(event + str_loc); 111 112 return (u64)(unsigned long)addr; 113 } 114 115 static u64 hist_field_pstring(struct hist_field *hist_field, 116 struct tracing_map_elt *elt, 117 struct ring_buffer_event *rbe, 118 void *event) 119 { 120 char **addr = (char **)(event + hist_field->field->offset); 121 122 return (u64)(unsigned long)*addr; 123 } 124 125 static u64 hist_field_log2(struct hist_field *hist_field, 126 struct tracing_map_elt *elt, 127 struct ring_buffer_event *rbe, 128 void *event) 129 { 130 struct hist_field *operand = hist_field->operands[0]; 131 132 u64 val = operand->fn(operand, elt, rbe, event); 133 134 return (u64) ilog2(roundup_pow_of_two(val)); 135 } 136 137 static u64 hist_field_plus(struct hist_field *hist_field, 138 struct tracing_map_elt *elt, 139 struct ring_buffer_event *rbe, 140 void *event) 141 { 142 struct hist_field *operand1 = hist_field->operands[0]; 143 struct hist_field *operand2 = hist_field->operands[1]; 144 145 u64 val1 = operand1->fn(operand1, elt, rbe, event); 146 u64 val2 = operand2->fn(operand2, elt, rbe, event); 147 148 return val1 + val2; 149 } 150 151 static u64 hist_field_minus(struct hist_field *hist_field, 152 struct tracing_map_elt *elt, 153 struct ring_buffer_event *rbe, 154 void *event) 155 { 156 struct hist_field *operand1 = hist_field->operands[0]; 157 struct hist_field *operand2 = hist_field->operands[1]; 158 159 u64 val1 = operand1->fn(operand1, elt, rbe, event); 160 u64 val2 = operand2->fn(operand2, elt, rbe, event); 161 162 return val1 - val2; 163 } 164 165 static u64 hist_field_unary_minus(struct hist_field *hist_field, 166 struct tracing_map_elt *elt, 167 struct ring_buffer_event *rbe, 168 void *event) 169 { 170 struct hist_field *operand = hist_field->operands[0]; 171 172 s64 sval = (s64)operand->fn(operand, elt, rbe, event); 173 u64 val = (u64)-sval; 174 175 return val; 176 } 177 178 #define DEFINE_HIST_FIELD_FN(type) \ 179 static u64 hist_field_##type(struct hist_field *hist_field, \ 180 struct tracing_map_elt *elt, \ 181 struct ring_buffer_event *rbe, \ 182 void *event) \ 183 { \ 184 type *addr = (type *)(event + hist_field->field->offset); \ 185 \ 186 return (u64)(unsigned long)*addr; \ 187 } 188 189 DEFINE_HIST_FIELD_FN(s64); 190 DEFINE_HIST_FIELD_FN(u64); 191 DEFINE_HIST_FIELD_FN(s32); 192 DEFINE_HIST_FIELD_FN(u32); 193 DEFINE_HIST_FIELD_FN(s16); 194 DEFINE_HIST_FIELD_FN(u16); 195 DEFINE_HIST_FIELD_FN(s8); 196 DEFINE_HIST_FIELD_FN(u8); 197 198 #define for_each_hist_field(i, hist_data) \ 199 for ((i) = 0; (i) < (hist_data)->n_fields; (i)++) 200 201 #define for_each_hist_val_field(i, hist_data) \ 202 for ((i) = 0; (i) < (hist_data)->n_vals; (i)++) 203 204 #define for_each_hist_key_field(i, hist_data) \ 205 for ((i) = (hist_data)->n_vals; (i) < (hist_data)->n_fields; (i)++) 206 207 #define HIST_STACKTRACE_DEPTH 16 208 #define HIST_STACKTRACE_SIZE (HIST_STACKTRACE_DEPTH * sizeof(unsigned long)) 209 #define HIST_STACKTRACE_SKIP 5 210 211 #define HITCOUNT_IDX 0 212 #define HIST_KEY_SIZE_MAX (MAX_FILTER_STR_VAL + HIST_STACKTRACE_SIZE) 213 214 enum hist_field_flags { 215 HIST_FIELD_FL_HITCOUNT = 1 << 0, 216 HIST_FIELD_FL_KEY = 1 << 1, 217 HIST_FIELD_FL_STRING = 1 << 2, 218 HIST_FIELD_FL_HEX = 1 << 3, 219 HIST_FIELD_FL_SYM = 1 << 4, 220 HIST_FIELD_FL_SYM_OFFSET = 1 << 5, 221 HIST_FIELD_FL_EXECNAME = 1 << 6, 222 HIST_FIELD_FL_SYSCALL = 1 << 7, 223 HIST_FIELD_FL_STACKTRACE = 1 << 8, 224 HIST_FIELD_FL_LOG2 = 1 << 9, 225 HIST_FIELD_FL_TIMESTAMP = 1 << 10, 226 HIST_FIELD_FL_TIMESTAMP_USECS = 1 << 11, 227 HIST_FIELD_FL_VAR = 1 << 12, 228 HIST_FIELD_FL_EXPR = 1 << 13, 229 HIST_FIELD_FL_VAR_REF = 1 << 14, 230 HIST_FIELD_FL_CPU = 1 << 15, 231 HIST_FIELD_FL_ALIAS = 1 << 16, 232 }; 233 234 struct var_defs { 235 unsigned int n_vars; 236 char *name[TRACING_MAP_VARS_MAX]; 237 char *expr[TRACING_MAP_VARS_MAX]; 238 }; 239 240 struct hist_trigger_attrs { 241 char *keys_str; 242 char *vals_str; 243 char *sort_key_str; 244 char *name; 245 char *clock; 246 bool pause; 247 bool cont; 248 bool clear; 249 bool ts_in_usecs; 250 unsigned int map_bits; 251 252 char *assignment_str[TRACING_MAP_VARS_MAX]; 253 unsigned int n_assignments; 254 255 char *action_str[HIST_ACTIONS_MAX]; 256 unsigned int n_actions; 257 258 struct var_defs var_defs; 259 }; 260 261 struct field_var { 262 struct hist_field *var; 263 struct hist_field *val; 264 }; 265 266 struct field_var_hist { 267 struct hist_trigger_data *hist_data; 268 char *cmd; 269 }; 270 271 struct hist_trigger_data { 272 struct hist_field *fields[HIST_FIELDS_MAX]; 273 unsigned int n_vals; 274 unsigned int n_keys; 275 unsigned int n_fields; 276 unsigned int n_vars; 277 unsigned int key_size; 278 struct tracing_map_sort_key sort_keys[TRACING_MAP_SORT_KEYS_MAX]; 279 unsigned int n_sort_keys; 280 struct trace_event_file *event_file; 281 struct hist_trigger_attrs *attrs; 282 struct tracing_map *map; 283 bool enable_timestamps; 284 bool remove; 285 struct hist_field *var_refs[TRACING_MAP_VARS_MAX]; 286 unsigned int n_var_refs; 287 288 struct action_data *actions[HIST_ACTIONS_MAX]; 289 unsigned int n_actions; 290 291 struct hist_field *synth_var_refs[SYNTH_FIELDS_MAX]; 292 unsigned int n_synth_var_refs; 293 struct field_var *field_vars[SYNTH_FIELDS_MAX]; 294 unsigned int n_field_vars; 295 unsigned int n_field_var_str; 296 struct field_var_hist *field_var_hists[SYNTH_FIELDS_MAX]; 297 unsigned int n_field_var_hists; 298 299 struct field_var *max_vars[SYNTH_FIELDS_MAX]; 300 unsigned int n_max_vars; 301 unsigned int n_max_var_str; 302 }; 303 304 struct synth_field { 305 char *type; 306 char *name; 307 size_t size; 308 bool is_signed; 309 bool is_string; 310 }; 311 312 struct synth_event { 313 struct list_head list; 314 int ref; 315 char *name; 316 struct synth_field **fields; 317 unsigned int n_fields; 318 unsigned int n_u64; 319 struct trace_event_class class; 320 struct trace_event_call call; 321 struct tracepoint *tp; 322 }; 323 324 struct action_data; 325 326 typedef void (*action_fn_t) (struct hist_trigger_data *hist_data, 327 struct tracing_map_elt *elt, void *rec, 328 struct ring_buffer_event *rbe, 329 struct action_data *data, u64 *var_ref_vals); 330 331 struct action_data { 332 action_fn_t fn; 333 unsigned int n_params; 334 char *params[SYNTH_FIELDS_MAX]; 335 336 union { 337 struct { 338 unsigned int var_ref_idx; 339 char *match_event; 340 char *match_event_system; 341 char *synth_event_name; 342 struct synth_event *synth_event; 343 } onmatch; 344 345 struct { 346 char *var_str; 347 char *fn_name; 348 unsigned int max_var_ref_idx; 349 struct hist_field *max_var; 350 struct hist_field *var; 351 } onmax; 352 }; 353 }; 354 355 356 static char last_hist_cmd[MAX_FILTER_STR_VAL]; 357 static char hist_err_str[MAX_FILTER_STR_VAL]; 358 359 static void last_cmd_set(char *str) 360 { 361 if (!str) 362 return; 363 364 strncpy(last_hist_cmd, str, MAX_FILTER_STR_VAL - 1); 365 } 366 367 static void hist_err(char *str, char *var) 368 { 369 int maxlen = MAX_FILTER_STR_VAL - 1; 370 371 if (!str) 372 return; 373 374 if (strlen(hist_err_str)) 375 return; 376 377 if (!var) 378 var = ""; 379 380 if (strlen(hist_err_str) + strlen(str) + strlen(var) > maxlen) 381 return; 382 383 strcat(hist_err_str, str); 384 strcat(hist_err_str, var); 385 } 386 387 static void hist_err_event(char *str, char *system, char *event, char *var) 388 { 389 char err[MAX_FILTER_STR_VAL]; 390 391 if (system && var) 392 snprintf(err, MAX_FILTER_STR_VAL, "%s.%s.%s", system, event, var); 393 else if (system) 394 snprintf(err, MAX_FILTER_STR_VAL, "%s.%s", system, event); 395 else 396 strncpy(err, var, MAX_FILTER_STR_VAL); 397 398 hist_err(str, err); 399 } 400 401 static void hist_err_clear(void) 402 { 403 hist_err_str[0] = '\0'; 404 } 405 406 static bool have_hist_err(void) 407 { 408 if (strlen(hist_err_str)) 409 return true; 410 411 return false; 412 } 413 414 static LIST_HEAD(synth_event_list); 415 static DEFINE_MUTEX(synth_event_mutex); 416 417 struct synth_trace_event { 418 struct trace_entry ent; 419 u64 fields[]; 420 }; 421 422 static int synth_event_define_fields(struct trace_event_call *call) 423 { 424 struct synth_trace_event trace; 425 int offset = offsetof(typeof(trace), fields); 426 struct synth_event *event = call->data; 427 unsigned int i, size, n_u64; 428 char *name, *type; 429 bool is_signed; 430 int ret = 0; 431 432 for (i = 0, n_u64 = 0; i < event->n_fields; i++) { 433 size = event->fields[i]->size; 434 is_signed = event->fields[i]->is_signed; 435 type = event->fields[i]->type; 436 name = event->fields[i]->name; 437 ret = trace_define_field(call, type, name, offset, size, 438 is_signed, FILTER_OTHER); 439 if (ret) 440 break; 441 442 if (event->fields[i]->is_string) { 443 offset += STR_VAR_LEN_MAX; 444 n_u64 += STR_VAR_LEN_MAX / sizeof(u64); 445 } else { 446 offset += sizeof(u64); 447 n_u64++; 448 } 449 } 450 451 event->n_u64 = n_u64; 452 453 return ret; 454 } 455 456 static bool synth_field_signed(char *type) 457 { 458 if (strncmp(type, "u", 1) == 0) 459 return false; 460 461 return true; 462 } 463 464 static int synth_field_is_string(char *type) 465 { 466 if (strstr(type, "char[") != NULL) 467 return true; 468 469 return false; 470 } 471 472 static int synth_field_string_size(char *type) 473 { 474 char buf[4], *end, *start; 475 unsigned int len; 476 int size, err; 477 478 start = strstr(type, "char["); 479 if (start == NULL) 480 return -EINVAL; 481 start += strlen("char["); 482 483 end = strchr(type, ']'); 484 if (!end || end < start) 485 return -EINVAL; 486 487 len = end - start; 488 if (len > 3) 489 return -EINVAL; 490 491 strncpy(buf, start, len); 492 buf[len] = '\0'; 493 494 err = kstrtouint(buf, 0, &size); 495 if (err) 496 return err; 497 498 if (size > STR_VAR_LEN_MAX) 499 return -EINVAL; 500 501 return size; 502 } 503 504 static int synth_field_size(char *type) 505 { 506 int size = 0; 507 508 if (strcmp(type, "s64") == 0) 509 size = sizeof(s64); 510 else if (strcmp(type, "u64") == 0) 511 size = sizeof(u64); 512 else if (strcmp(type, "s32") == 0) 513 size = sizeof(s32); 514 else if (strcmp(type, "u32") == 0) 515 size = sizeof(u32); 516 else if (strcmp(type, "s16") == 0) 517 size = sizeof(s16); 518 else if (strcmp(type, "u16") == 0) 519 size = sizeof(u16); 520 else if (strcmp(type, "s8") == 0) 521 size = sizeof(s8); 522 else if (strcmp(type, "u8") == 0) 523 size = sizeof(u8); 524 else if (strcmp(type, "char") == 0) 525 size = sizeof(char); 526 else if (strcmp(type, "unsigned char") == 0) 527 size = sizeof(unsigned char); 528 else if (strcmp(type, "int") == 0) 529 size = sizeof(int); 530 else if (strcmp(type, "unsigned int") == 0) 531 size = sizeof(unsigned int); 532 else if (strcmp(type, "long") == 0) 533 size = sizeof(long); 534 else if (strcmp(type, "unsigned long") == 0) 535 size = sizeof(unsigned long); 536 else if (strcmp(type, "pid_t") == 0) 537 size = sizeof(pid_t); 538 else if (synth_field_is_string(type)) 539 size = synth_field_string_size(type); 540 541 return size; 542 } 543 544 static const char *synth_field_fmt(char *type) 545 { 546 const char *fmt = "%llu"; 547 548 if (strcmp(type, "s64") == 0) 549 fmt = "%lld"; 550 else if (strcmp(type, "u64") == 0) 551 fmt = "%llu"; 552 else if (strcmp(type, "s32") == 0) 553 fmt = "%d"; 554 else if (strcmp(type, "u32") == 0) 555 fmt = "%u"; 556 else if (strcmp(type, "s16") == 0) 557 fmt = "%d"; 558 else if (strcmp(type, "u16") == 0) 559 fmt = "%u"; 560 else if (strcmp(type, "s8") == 0) 561 fmt = "%d"; 562 else if (strcmp(type, "u8") == 0) 563 fmt = "%u"; 564 else if (strcmp(type, "char") == 0) 565 fmt = "%d"; 566 else if (strcmp(type, "unsigned char") == 0) 567 fmt = "%u"; 568 else if (strcmp(type, "int") == 0) 569 fmt = "%d"; 570 else if (strcmp(type, "unsigned int") == 0) 571 fmt = "%u"; 572 else if (strcmp(type, "long") == 0) 573 fmt = "%ld"; 574 else if (strcmp(type, "unsigned long") == 0) 575 fmt = "%lu"; 576 else if (strcmp(type, "pid_t") == 0) 577 fmt = "%d"; 578 else if (synth_field_is_string(type)) 579 fmt = "%s"; 580 581 return fmt; 582 } 583 584 static enum print_line_t print_synth_event(struct trace_iterator *iter, 585 int flags, 586 struct trace_event *event) 587 { 588 struct trace_array *tr = iter->tr; 589 struct trace_seq *s = &iter->seq; 590 struct synth_trace_event *entry; 591 struct synth_event *se; 592 unsigned int i, n_u64; 593 char print_fmt[32]; 594 const char *fmt; 595 596 entry = (struct synth_trace_event *)iter->ent; 597 se = container_of(event, struct synth_event, call.event); 598 599 trace_seq_printf(s, "%s: ", se->name); 600 601 for (i = 0, n_u64 = 0; i < se->n_fields; i++) { 602 if (trace_seq_has_overflowed(s)) 603 goto end; 604 605 fmt = synth_field_fmt(se->fields[i]->type); 606 607 /* parameter types */ 608 if (tr->trace_flags & TRACE_ITER_VERBOSE) 609 trace_seq_printf(s, "%s ", fmt); 610 611 snprintf(print_fmt, sizeof(print_fmt), "%%s=%s%%s", fmt); 612 613 /* parameter values */ 614 if (se->fields[i]->is_string) { 615 trace_seq_printf(s, print_fmt, se->fields[i]->name, 616 (char *)&entry->fields[n_u64], 617 i == se->n_fields - 1 ? "" : " "); 618 n_u64 += STR_VAR_LEN_MAX / sizeof(u64); 619 } else { 620 trace_seq_printf(s, print_fmt, se->fields[i]->name, 621 entry->fields[n_u64], 622 i == se->n_fields - 1 ? "" : " "); 623 n_u64++; 624 } 625 } 626 end: 627 trace_seq_putc(s, '\n'); 628 629 return trace_handle_return(s); 630 } 631 632 static struct trace_event_functions synth_event_funcs = { 633 .trace = print_synth_event 634 }; 635 636 static notrace void trace_event_raw_event_synth(void *__data, 637 u64 *var_ref_vals, 638 unsigned int var_ref_idx) 639 { 640 struct trace_event_file *trace_file = __data; 641 struct synth_trace_event *entry; 642 struct trace_event_buffer fbuffer; 643 struct ring_buffer *buffer; 644 struct synth_event *event; 645 unsigned int i, n_u64; 646 int fields_size = 0; 647 648 event = trace_file->event_call->data; 649 650 if (trace_trigger_soft_disabled(trace_file)) 651 return; 652 653 fields_size = event->n_u64 * sizeof(u64); 654 655 /* 656 * Avoid ring buffer recursion detection, as this event 657 * is being performed within another event. 658 */ 659 buffer = trace_file->tr->trace_buffer.buffer; 660 ring_buffer_nest_start(buffer); 661 662 entry = trace_event_buffer_reserve(&fbuffer, trace_file, 663 sizeof(*entry) + fields_size); 664 if (!entry) 665 goto out; 666 667 for (i = 0, n_u64 = 0; i < event->n_fields; i++) { 668 if (event->fields[i]->is_string) { 669 char *str_val = (char *)(long)var_ref_vals[var_ref_idx + i]; 670 char *str_field = (char *)&entry->fields[n_u64]; 671 672 strscpy(str_field, str_val, STR_VAR_LEN_MAX); 673 n_u64 += STR_VAR_LEN_MAX / sizeof(u64); 674 } else { 675 entry->fields[n_u64] = var_ref_vals[var_ref_idx + i]; 676 n_u64++; 677 } 678 } 679 680 trace_event_buffer_commit(&fbuffer); 681 out: 682 ring_buffer_nest_end(buffer); 683 } 684 685 static void free_synth_event_print_fmt(struct trace_event_call *call) 686 { 687 if (call) { 688 kfree(call->print_fmt); 689 call->print_fmt = NULL; 690 } 691 } 692 693 static int __set_synth_event_print_fmt(struct synth_event *event, 694 char *buf, int len) 695 { 696 const char *fmt; 697 int pos = 0; 698 int i; 699 700 /* When len=0, we just calculate the needed length */ 701 #define LEN_OR_ZERO (len ? len - pos : 0) 702 703 pos += snprintf(buf + pos, LEN_OR_ZERO, "\""); 704 for (i = 0; i < event->n_fields; i++) { 705 fmt = synth_field_fmt(event->fields[i]->type); 706 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s=%s%s", 707 event->fields[i]->name, fmt, 708 i == event->n_fields - 1 ? "" : ", "); 709 } 710 pos += snprintf(buf + pos, LEN_OR_ZERO, "\""); 711 712 for (i = 0; i < event->n_fields; i++) { 713 pos += snprintf(buf + pos, LEN_OR_ZERO, 714 ", REC->%s", event->fields[i]->name); 715 } 716 717 #undef LEN_OR_ZERO 718 719 /* return the length of print_fmt */ 720 return pos; 721 } 722 723 static int set_synth_event_print_fmt(struct trace_event_call *call) 724 { 725 struct synth_event *event = call->data; 726 char *print_fmt; 727 int len; 728 729 /* First: called with 0 length to calculate the needed length */ 730 len = __set_synth_event_print_fmt(event, NULL, 0); 731 732 print_fmt = kmalloc(len + 1, GFP_KERNEL); 733 if (!print_fmt) 734 return -ENOMEM; 735 736 /* Second: actually write the @print_fmt */ 737 __set_synth_event_print_fmt(event, print_fmt, len + 1); 738 call->print_fmt = print_fmt; 739 740 return 0; 741 } 742 743 static void free_synth_field(struct synth_field *field) 744 { 745 kfree(field->type); 746 kfree(field->name); 747 kfree(field); 748 } 749 750 static struct synth_field *parse_synth_field(char *field_type, 751 char *field_name) 752 { 753 struct synth_field *field; 754 int len, ret = 0; 755 char *array; 756 757 if (field_type[0] == ';') 758 field_type++; 759 760 len = strlen(field_name); 761 if (field_name[len - 1] == ';') 762 field_name[len - 1] = '\0'; 763 764 field = kzalloc(sizeof(*field), GFP_KERNEL); 765 if (!field) 766 return ERR_PTR(-ENOMEM); 767 768 len = strlen(field_type) + 1; 769 array = strchr(field_name, '['); 770 if (array) 771 len += strlen(array); 772 field->type = kzalloc(len, GFP_KERNEL); 773 if (!field->type) { 774 ret = -ENOMEM; 775 goto free; 776 } 777 strcat(field->type, field_type); 778 if (array) { 779 strcat(field->type, array); 780 *array = '\0'; 781 } 782 783 field->size = synth_field_size(field->type); 784 if (!field->size) { 785 ret = -EINVAL; 786 goto free; 787 } 788 789 if (synth_field_is_string(field->type)) 790 field->is_string = true; 791 792 field->is_signed = synth_field_signed(field->type); 793 794 field->name = kstrdup(field_name, GFP_KERNEL); 795 if (!field->name) { 796 ret = -ENOMEM; 797 goto free; 798 } 799 out: 800 return field; 801 free: 802 free_synth_field(field); 803 field = ERR_PTR(ret); 804 goto out; 805 } 806 807 static void free_synth_tracepoint(struct tracepoint *tp) 808 { 809 if (!tp) 810 return; 811 812 kfree(tp->name); 813 kfree(tp); 814 } 815 816 static struct tracepoint *alloc_synth_tracepoint(char *name) 817 { 818 struct tracepoint *tp; 819 820 tp = kzalloc(sizeof(*tp), GFP_KERNEL); 821 if (!tp) 822 return ERR_PTR(-ENOMEM); 823 824 tp->name = kstrdup(name, GFP_KERNEL); 825 if (!tp->name) { 826 kfree(tp); 827 return ERR_PTR(-ENOMEM); 828 } 829 830 return tp; 831 } 832 833 typedef void (*synth_probe_func_t) (void *__data, u64 *var_ref_vals, 834 unsigned int var_ref_idx); 835 836 static inline void trace_synth(struct synth_event *event, u64 *var_ref_vals, 837 unsigned int var_ref_idx) 838 { 839 struct tracepoint *tp = event->tp; 840 841 if (unlikely(atomic_read(&tp->key.enabled) > 0)) { 842 struct tracepoint_func *probe_func_ptr; 843 synth_probe_func_t probe_func; 844 void *__data; 845 846 if (!(cpu_online(raw_smp_processor_id()))) 847 return; 848 849 probe_func_ptr = rcu_dereference_sched((tp)->funcs); 850 if (probe_func_ptr) { 851 do { 852 probe_func = probe_func_ptr->func; 853 __data = probe_func_ptr->data; 854 probe_func(__data, var_ref_vals, var_ref_idx); 855 } while ((++probe_func_ptr)->func); 856 } 857 } 858 } 859 860 static struct synth_event *find_synth_event(const char *name) 861 { 862 struct synth_event *event; 863 864 list_for_each_entry(event, &synth_event_list, list) { 865 if (strcmp(event->name, name) == 0) 866 return event; 867 } 868 869 return NULL; 870 } 871 872 static int register_synth_event(struct synth_event *event) 873 { 874 struct trace_event_call *call = &event->call; 875 int ret = 0; 876 877 event->call.class = &event->class; 878 event->class.system = kstrdup(SYNTH_SYSTEM, GFP_KERNEL); 879 if (!event->class.system) { 880 ret = -ENOMEM; 881 goto out; 882 } 883 884 event->tp = alloc_synth_tracepoint(event->name); 885 if (IS_ERR(event->tp)) { 886 ret = PTR_ERR(event->tp); 887 event->tp = NULL; 888 goto out; 889 } 890 891 INIT_LIST_HEAD(&call->class->fields); 892 call->event.funcs = &synth_event_funcs; 893 call->class->define_fields = synth_event_define_fields; 894 895 ret = register_trace_event(&call->event); 896 if (!ret) { 897 ret = -ENODEV; 898 goto out; 899 } 900 call->flags = TRACE_EVENT_FL_TRACEPOINT; 901 call->class->reg = trace_event_reg; 902 call->class->probe = trace_event_raw_event_synth; 903 call->data = event; 904 call->tp = event->tp; 905 906 ret = trace_add_event_call(call); 907 if (ret) { 908 pr_warn("Failed to register synthetic event: %s\n", 909 trace_event_name(call)); 910 goto err; 911 } 912 913 ret = set_synth_event_print_fmt(call); 914 if (ret < 0) { 915 trace_remove_event_call(call); 916 goto err; 917 } 918 out: 919 return ret; 920 err: 921 unregister_trace_event(&call->event); 922 goto out; 923 } 924 925 static int unregister_synth_event(struct synth_event *event) 926 { 927 struct trace_event_call *call = &event->call; 928 int ret; 929 930 ret = trace_remove_event_call(call); 931 932 return ret; 933 } 934 935 static void free_synth_event(struct synth_event *event) 936 { 937 unsigned int i; 938 939 if (!event) 940 return; 941 942 for (i = 0; i < event->n_fields; i++) 943 free_synth_field(event->fields[i]); 944 945 kfree(event->fields); 946 kfree(event->name); 947 kfree(event->class.system); 948 free_synth_tracepoint(event->tp); 949 free_synth_event_print_fmt(&event->call); 950 kfree(event); 951 } 952 953 static struct synth_event *alloc_synth_event(char *event_name, int n_fields, 954 struct synth_field **fields) 955 { 956 struct synth_event *event; 957 unsigned int i; 958 959 event = kzalloc(sizeof(*event), GFP_KERNEL); 960 if (!event) { 961 event = ERR_PTR(-ENOMEM); 962 goto out; 963 } 964 965 event->name = kstrdup(event_name, GFP_KERNEL); 966 if (!event->name) { 967 kfree(event); 968 event = ERR_PTR(-ENOMEM); 969 goto out; 970 } 971 972 event->fields = kcalloc(n_fields, sizeof(*event->fields), GFP_KERNEL); 973 if (!event->fields) { 974 free_synth_event(event); 975 event = ERR_PTR(-ENOMEM); 976 goto out; 977 } 978 979 for (i = 0; i < n_fields; i++) 980 event->fields[i] = fields[i]; 981 982 event->n_fields = n_fields; 983 out: 984 return event; 985 } 986 987 static void action_trace(struct hist_trigger_data *hist_data, 988 struct tracing_map_elt *elt, void *rec, 989 struct ring_buffer_event *rbe, 990 struct action_data *data, u64 *var_ref_vals) 991 { 992 struct synth_event *event = data->onmatch.synth_event; 993 994 trace_synth(event, var_ref_vals, data->onmatch.var_ref_idx); 995 } 996 997 struct hist_var_data { 998 struct list_head list; 999 struct hist_trigger_data *hist_data; 1000 }; 1001 1002 static void add_or_delete_synth_event(struct synth_event *event, int delete) 1003 { 1004 if (delete) 1005 free_synth_event(event); 1006 else { 1007 mutex_lock(&synth_event_mutex); 1008 if (!find_synth_event(event->name)) 1009 list_add(&event->list, &synth_event_list); 1010 else 1011 free_synth_event(event); 1012 mutex_unlock(&synth_event_mutex); 1013 } 1014 } 1015 1016 static int create_synth_event(int argc, char **argv) 1017 { 1018 struct synth_field *field, *fields[SYNTH_FIELDS_MAX]; 1019 struct synth_event *event = NULL; 1020 bool delete_event = false; 1021 int i, n_fields = 0, ret = 0; 1022 char *name; 1023 1024 mutex_lock(&synth_event_mutex); 1025 1026 /* 1027 * Argument syntax: 1028 * - Add synthetic event: <event_name> field[;field] ... 1029 * - Remove synthetic event: !<event_name> field[;field] ... 1030 * where 'field' = type field_name 1031 */ 1032 if (argc < 1) { 1033 ret = -EINVAL; 1034 goto out; 1035 } 1036 1037 name = argv[0]; 1038 if (name[0] == '!') { 1039 delete_event = true; 1040 name++; 1041 } 1042 1043 event = find_synth_event(name); 1044 if (event) { 1045 if (delete_event) { 1046 if (event->ref) { 1047 event = NULL; 1048 ret = -EBUSY; 1049 goto out; 1050 } 1051 list_del(&event->list); 1052 goto out; 1053 } 1054 event = NULL; 1055 ret = -EEXIST; 1056 goto out; 1057 } else if (delete_event) 1058 goto out; 1059 1060 if (argc < 2) { 1061 ret = -EINVAL; 1062 goto out; 1063 } 1064 1065 for (i = 1; i < argc - 1; i++) { 1066 if (strcmp(argv[i], ";") == 0) 1067 continue; 1068 if (n_fields == SYNTH_FIELDS_MAX) { 1069 ret = -EINVAL; 1070 goto err; 1071 } 1072 1073 field = parse_synth_field(argv[i], argv[i + 1]); 1074 if (IS_ERR(field)) { 1075 ret = PTR_ERR(field); 1076 goto err; 1077 } 1078 fields[n_fields] = field; 1079 i++; n_fields++; 1080 } 1081 1082 if (i < argc) { 1083 ret = -EINVAL; 1084 goto err; 1085 } 1086 1087 event = alloc_synth_event(name, n_fields, fields); 1088 if (IS_ERR(event)) { 1089 ret = PTR_ERR(event); 1090 event = NULL; 1091 goto err; 1092 } 1093 out: 1094 mutex_unlock(&synth_event_mutex); 1095 1096 if (event) { 1097 if (delete_event) { 1098 ret = unregister_synth_event(event); 1099 add_or_delete_synth_event(event, !ret); 1100 } else { 1101 ret = register_synth_event(event); 1102 add_or_delete_synth_event(event, ret); 1103 } 1104 } 1105 1106 return ret; 1107 err: 1108 mutex_unlock(&synth_event_mutex); 1109 1110 for (i = 0; i < n_fields; i++) 1111 free_synth_field(fields[i]); 1112 free_synth_event(event); 1113 1114 return ret; 1115 } 1116 1117 static int release_all_synth_events(void) 1118 { 1119 struct list_head release_events; 1120 struct synth_event *event, *e; 1121 int ret = 0; 1122 1123 INIT_LIST_HEAD(&release_events); 1124 1125 mutex_lock(&synth_event_mutex); 1126 1127 list_for_each_entry(event, &synth_event_list, list) { 1128 if (event->ref) { 1129 mutex_unlock(&synth_event_mutex); 1130 return -EBUSY; 1131 } 1132 } 1133 1134 list_splice_init(&event->list, &release_events); 1135 1136 mutex_unlock(&synth_event_mutex); 1137 1138 list_for_each_entry_safe(event, e, &release_events, list) { 1139 list_del(&event->list); 1140 1141 ret = unregister_synth_event(event); 1142 add_or_delete_synth_event(event, !ret); 1143 } 1144 1145 return ret; 1146 } 1147 1148 1149 static void *synth_events_seq_start(struct seq_file *m, loff_t *pos) 1150 { 1151 mutex_lock(&synth_event_mutex); 1152 1153 return seq_list_start(&synth_event_list, *pos); 1154 } 1155 1156 static void *synth_events_seq_next(struct seq_file *m, void *v, loff_t *pos) 1157 { 1158 return seq_list_next(v, &synth_event_list, pos); 1159 } 1160 1161 static void synth_events_seq_stop(struct seq_file *m, void *v) 1162 { 1163 mutex_unlock(&synth_event_mutex); 1164 } 1165 1166 static int synth_events_seq_show(struct seq_file *m, void *v) 1167 { 1168 struct synth_field *field; 1169 struct synth_event *event = v; 1170 unsigned int i; 1171 1172 seq_printf(m, "%s\t", event->name); 1173 1174 for (i = 0; i < event->n_fields; i++) { 1175 field = event->fields[i]; 1176 1177 /* parameter values */ 1178 seq_printf(m, "%s %s%s", field->type, field->name, 1179 i == event->n_fields - 1 ? "" : "; "); 1180 } 1181 1182 seq_putc(m, '\n'); 1183 1184 return 0; 1185 } 1186 1187 static const struct seq_operations synth_events_seq_op = { 1188 .start = synth_events_seq_start, 1189 .next = synth_events_seq_next, 1190 .stop = synth_events_seq_stop, 1191 .show = synth_events_seq_show 1192 }; 1193 1194 static int synth_events_open(struct inode *inode, struct file *file) 1195 { 1196 int ret; 1197 1198 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { 1199 ret = release_all_synth_events(); 1200 if (ret < 0) 1201 return ret; 1202 } 1203 1204 return seq_open(file, &synth_events_seq_op); 1205 } 1206 1207 static ssize_t synth_events_write(struct file *file, 1208 const char __user *buffer, 1209 size_t count, loff_t *ppos) 1210 { 1211 return trace_parse_run_command(file, buffer, count, ppos, 1212 create_synth_event); 1213 } 1214 1215 static const struct file_operations synth_events_fops = { 1216 .open = synth_events_open, 1217 .write = synth_events_write, 1218 .read = seq_read, 1219 .llseek = seq_lseek, 1220 .release = seq_release, 1221 }; 1222 1223 static u64 hist_field_timestamp(struct hist_field *hist_field, 1224 struct tracing_map_elt *elt, 1225 struct ring_buffer_event *rbe, 1226 void *event) 1227 { 1228 struct hist_trigger_data *hist_data = hist_field->hist_data; 1229 struct trace_array *tr = hist_data->event_file->tr; 1230 1231 u64 ts = ring_buffer_event_time_stamp(rbe); 1232 1233 if (hist_data->attrs->ts_in_usecs && trace_clock_in_ns(tr)) 1234 ts = ns2usecs(ts); 1235 1236 return ts; 1237 } 1238 1239 static u64 hist_field_cpu(struct hist_field *hist_field, 1240 struct tracing_map_elt *elt, 1241 struct ring_buffer_event *rbe, 1242 void *event) 1243 { 1244 int cpu = smp_processor_id(); 1245 1246 return cpu; 1247 } 1248 1249 static struct hist_field * 1250 check_field_for_var_ref(struct hist_field *hist_field, 1251 struct hist_trigger_data *var_data, 1252 unsigned int var_idx) 1253 { 1254 struct hist_field *found = NULL; 1255 1256 if (hist_field && hist_field->flags & HIST_FIELD_FL_VAR_REF) { 1257 if (hist_field->var.idx == var_idx && 1258 hist_field->var.hist_data == var_data) { 1259 found = hist_field; 1260 } 1261 } 1262 1263 return found; 1264 } 1265 1266 static struct hist_field * 1267 check_field_for_var_refs(struct hist_trigger_data *hist_data, 1268 struct hist_field *hist_field, 1269 struct hist_trigger_data *var_data, 1270 unsigned int var_idx, 1271 unsigned int level) 1272 { 1273 struct hist_field *found = NULL; 1274 unsigned int i; 1275 1276 if (level > 3) 1277 return found; 1278 1279 if (!hist_field) 1280 return found; 1281 1282 found = check_field_for_var_ref(hist_field, var_data, var_idx); 1283 if (found) 1284 return found; 1285 1286 for (i = 0; i < HIST_FIELD_OPERANDS_MAX; i++) { 1287 struct hist_field *operand; 1288 1289 operand = hist_field->operands[i]; 1290 found = check_field_for_var_refs(hist_data, operand, var_data, 1291 var_idx, level + 1); 1292 if (found) 1293 return found; 1294 } 1295 1296 return found; 1297 } 1298 1299 static struct hist_field *find_var_ref(struct hist_trigger_data *hist_data, 1300 struct hist_trigger_data *var_data, 1301 unsigned int var_idx) 1302 { 1303 struct hist_field *hist_field, *found = NULL; 1304 unsigned int i; 1305 1306 for_each_hist_field(i, hist_data) { 1307 hist_field = hist_data->fields[i]; 1308 found = check_field_for_var_refs(hist_data, hist_field, 1309 var_data, var_idx, 0); 1310 if (found) 1311 return found; 1312 } 1313 1314 for (i = 0; i < hist_data->n_synth_var_refs; i++) { 1315 hist_field = hist_data->synth_var_refs[i]; 1316 found = check_field_for_var_refs(hist_data, hist_field, 1317 var_data, var_idx, 0); 1318 if (found) 1319 return found; 1320 } 1321 1322 return found; 1323 } 1324 1325 static struct hist_field *find_any_var_ref(struct hist_trigger_data *hist_data, 1326 unsigned int var_idx) 1327 { 1328 struct trace_array *tr = hist_data->event_file->tr; 1329 struct hist_field *found = NULL; 1330 struct hist_var_data *var_data; 1331 1332 list_for_each_entry(var_data, &tr->hist_vars, list) { 1333 if (var_data->hist_data == hist_data) 1334 continue; 1335 found = find_var_ref(var_data->hist_data, hist_data, var_idx); 1336 if (found) 1337 break; 1338 } 1339 1340 return found; 1341 } 1342 1343 static bool check_var_refs(struct hist_trigger_data *hist_data) 1344 { 1345 struct hist_field *field; 1346 bool found = false; 1347 int i; 1348 1349 for_each_hist_field(i, hist_data) { 1350 field = hist_data->fields[i]; 1351 if (field && field->flags & HIST_FIELD_FL_VAR) { 1352 if (find_any_var_ref(hist_data, field->var.idx)) { 1353 found = true; 1354 break; 1355 } 1356 } 1357 } 1358 1359 return found; 1360 } 1361 1362 static struct hist_var_data *find_hist_vars(struct hist_trigger_data *hist_data) 1363 { 1364 struct trace_array *tr = hist_data->event_file->tr; 1365 struct hist_var_data *var_data, *found = NULL; 1366 1367 list_for_each_entry(var_data, &tr->hist_vars, list) { 1368 if (var_data->hist_data == hist_data) { 1369 found = var_data; 1370 break; 1371 } 1372 } 1373 1374 return found; 1375 } 1376 1377 static bool field_has_hist_vars(struct hist_field *hist_field, 1378 unsigned int level) 1379 { 1380 int i; 1381 1382 if (level > 3) 1383 return false; 1384 1385 if (!hist_field) 1386 return false; 1387 1388 if (hist_field->flags & HIST_FIELD_FL_VAR || 1389 hist_field->flags & HIST_FIELD_FL_VAR_REF) 1390 return true; 1391 1392 for (i = 0; i < HIST_FIELD_OPERANDS_MAX; i++) { 1393 struct hist_field *operand; 1394 1395 operand = hist_field->operands[i]; 1396 if (field_has_hist_vars(operand, level + 1)) 1397 return true; 1398 } 1399 1400 return false; 1401 } 1402 1403 static bool has_hist_vars(struct hist_trigger_data *hist_data) 1404 { 1405 struct hist_field *hist_field; 1406 int i; 1407 1408 for_each_hist_field(i, hist_data) { 1409 hist_field = hist_data->fields[i]; 1410 if (field_has_hist_vars(hist_field, 0)) 1411 return true; 1412 } 1413 1414 return false; 1415 } 1416 1417 static int save_hist_vars(struct hist_trigger_data *hist_data) 1418 { 1419 struct trace_array *tr = hist_data->event_file->tr; 1420 struct hist_var_data *var_data; 1421 1422 var_data = find_hist_vars(hist_data); 1423 if (var_data) 1424 return 0; 1425 1426 if (trace_array_get(tr) < 0) 1427 return -ENODEV; 1428 1429 var_data = kzalloc(sizeof(*var_data), GFP_KERNEL); 1430 if (!var_data) { 1431 trace_array_put(tr); 1432 return -ENOMEM; 1433 } 1434 1435 var_data->hist_data = hist_data; 1436 list_add(&var_data->list, &tr->hist_vars); 1437 1438 return 0; 1439 } 1440 1441 static void remove_hist_vars(struct hist_trigger_data *hist_data) 1442 { 1443 struct trace_array *tr = hist_data->event_file->tr; 1444 struct hist_var_data *var_data; 1445 1446 var_data = find_hist_vars(hist_data); 1447 if (!var_data) 1448 return; 1449 1450 if (WARN_ON(check_var_refs(hist_data))) 1451 return; 1452 1453 list_del(&var_data->list); 1454 1455 kfree(var_data); 1456 1457 trace_array_put(tr); 1458 } 1459 1460 static struct hist_field *find_var_field(struct hist_trigger_data *hist_data, 1461 const char *var_name) 1462 { 1463 struct hist_field *hist_field, *found = NULL; 1464 int i; 1465 1466 for_each_hist_field(i, hist_data) { 1467 hist_field = hist_data->fields[i]; 1468 if (hist_field && hist_field->flags & HIST_FIELD_FL_VAR && 1469 strcmp(hist_field->var.name, var_name) == 0) { 1470 found = hist_field; 1471 break; 1472 } 1473 } 1474 1475 return found; 1476 } 1477 1478 static struct hist_field *find_var(struct hist_trigger_data *hist_data, 1479 struct trace_event_file *file, 1480 const char *var_name) 1481 { 1482 struct hist_trigger_data *test_data; 1483 struct event_trigger_data *test; 1484 struct hist_field *hist_field; 1485 1486 hist_field = find_var_field(hist_data, var_name); 1487 if (hist_field) 1488 return hist_field; 1489 1490 list_for_each_entry_rcu(test, &file->triggers, list) { 1491 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 1492 test_data = test->private_data; 1493 hist_field = find_var_field(test_data, var_name); 1494 if (hist_field) 1495 return hist_field; 1496 } 1497 } 1498 1499 return NULL; 1500 } 1501 1502 static struct trace_event_file *find_var_file(struct trace_array *tr, 1503 char *system, 1504 char *event_name, 1505 char *var_name) 1506 { 1507 struct hist_trigger_data *var_hist_data; 1508 struct hist_var_data *var_data; 1509 struct trace_event_file *file, *found = NULL; 1510 1511 if (system) 1512 return find_event_file(tr, system, event_name); 1513 1514 list_for_each_entry(var_data, &tr->hist_vars, list) { 1515 var_hist_data = var_data->hist_data; 1516 file = var_hist_data->event_file; 1517 if (file == found) 1518 continue; 1519 1520 if (find_var_field(var_hist_data, var_name)) { 1521 if (found) { 1522 hist_err_event("Variable name not unique, need to use fully qualified name (subsys.event.var) for variable: ", system, event_name, var_name); 1523 return NULL; 1524 } 1525 1526 found = file; 1527 } 1528 } 1529 1530 return found; 1531 } 1532 1533 static struct hist_field *find_file_var(struct trace_event_file *file, 1534 const char *var_name) 1535 { 1536 struct hist_trigger_data *test_data; 1537 struct event_trigger_data *test; 1538 struct hist_field *hist_field; 1539 1540 list_for_each_entry_rcu(test, &file->triggers, list) { 1541 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 1542 test_data = test->private_data; 1543 hist_field = find_var_field(test_data, var_name); 1544 if (hist_field) 1545 return hist_field; 1546 } 1547 } 1548 1549 return NULL; 1550 } 1551 1552 static struct hist_field * 1553 find_match_var(struct hist_trigger_data *hist_data, char *var_name) 1554 { 1555 struct trace_array *tr = hist_data->event_file->tr; 1556 struct hist_field *hist_field, *found = NULL; 1557 struct trace_event_file *file; 1558 unsigned int i; 1559 1560 for (i = 0; i < hist_data->n_actions; i++) { 1561 struct action_data *data = hist_data->actions[i]; 1562 1563 if (data->fn == action_trace) { 1564 char *system = data->onmatch.match_event_system; 1565 char *event_name = data->onmatch.match_event; 1566 1567 file = find_var_file(tr, system, event_name, var_name); 1568 if (!file) 1569 continue; 1570 hist_field = find_file_var(file, var_name); 1571 if (hist_field) { 1572 if (found) { 1573 hist_err_event("Variable name not unique, need to use fully qualified name (subsys.event.var) for variable: ", system, event_name, var_name); 1574 return ERR_PTR(-EINVAL); 1575 } 1576 1577 found = hist_field; 1578 } 1579 } 1580 } 1581 return found; 1582 } 1583 1584 static struct hist_field *find_event_var(struct hist_trigger_data *hist_data, 1585 char *system, 1586 char *event_name, 1587 char *var_name) 1588 { 1589 struct trace_array *tr = hist_data->event_file->tr; 1590 struct hist_field *hist_field = NULL; 1591 struct trace_event_file *file; 1592 1593 if (!system || !event_name) { 1594 hist_field = find_match_var(hist_data, var_name); 1595 if (IS_ERR(hist_field)) 1596 return NULL; 1597 if (hist_field) 1598 return hist_field; 1599 } 1600 1601 file = find_var_file(tr, system, event_name, var_name); 1602 if (!file) 1603 return NULL; 1604 1605 hist_field = find_file_var(file, var_name); 1606 1607 return hist_field; 1608 } 1609 1610 struct hist_elt_data { 1611 char *comm; 1612 u64 *var_ref_vals; 1613 char *field_var_str[SYNTH_FIELDS_MAX]; 1614 }; 1615 1616 static u64 hist_field_var_ref(struct hist_field *hist_field, 1617 struct tracing_map_elt *elt, 1618 struct ring_buffer_event *rbe, 1619 void *event) 1620 { 1621 struct hist_elt_data *elt_data; 1622 u64 var_val = 0; 1623 1624 elt_data = elt->private_data; 1625 var_val = elt_data->var_ref_vals[hist_field->var_ref_idx]; 1626 1627 return var_val; 1628 } 1629 1630 static bool resolve_var_refs(struct hist_trigger_data *hist_data, void *key, 1631 u64 *var_ref_vals, bool self) 1632 { 1633 struct hist_trigger_data *var_data; 1634 struct tracing_map_elt *var_elt; 1635 struct hist_field *hist_field; 1636 unsigned int i, var_idx; 1637 bool resolved = true; 1638 u64 var_val = 0; 1639 1640 for (i = 0; i < hist_data->n_var_refs; i++) { 1641 hist_field = hist_data->var_refs[i]; 1642 var_idx = hist_field->var.idx; 1643 var_data = hist_field->var.hist_data; 1644 1645 if (var_data == NULL) { 1646 resolved = false; 1647 break; 1648 } 1649 1650 if ((self && var_data != hist_data) || 1651 (!self && var_data == hist_data)) 1652 continue; 1653 1654 var_elt = tracing_map_lookup(var_data->map, key); 1655 if (!var_elt) { 1656 resolved = false; 1657 break; 1658 } 1659 1660 if (!tracing_map_var_set(var_elt, var_idx)) { 1661 resolved = false; 1662 break; 1663 } 1664 1665 if (self || !hist_field->read_once) 1666 var_val = tracing_map_read_var(var_elt, var_idx); 1667 else 1668 var_val = tracing_map_read_var_once(var_elt, var_idx); 1669 1670 var_ref_vals[i] = var_val; 1671 } 1672 1673 return resolved; 1674 } 1675 1676 static const char *hist_field_name(struct hist_field *field, 1677 unsigned int level) 1678 { 1679 const char *field_name = ""; 1680 1681 if (level > 1) 1682 return field_name; 1683 1684 if (field->field) 1685 field_name = field->field->name; 1686 else if (field->flags & HIST_FIELD_FL_LOG2 || 1687 field->flags & HIST_FIELD_FL_ALIAS) 1688 field_name = hist_field_name(field->operands[0], ++level); 1689 else if (field->flags & HIST_FIELD_FL_CPU) 1690 field_name = "cpu"; 1691 else if (field->flags & HIST_FIELD_FL_EXPR || 1692 field->flags & HIST_FIELD_FL_VAR_REF) { 1693 if (field->system) { 1694 static char full_name[MAX_FILTER_STR_VAL]; 1695 1696 strcat(full_name, field->system); 1697 strcat(full_name, "."); 1698 strcat(full_name, field->event_name); 1699 strcat(full_name, "."); 1700 strcat(full_name, field->name); 1701 field_name = full_name; 1702 } else 1703 field_name = field->name; 1704 } else if (field->flags & HIST_FIELD_FL_TIMESTAMP) 1705 field_name = "common_timestamp"; 1706 1707 if (field_name == NULL) 1708 field_name = ""; 1709 1710 return field_name; 1711 } 1712 1713 static hist_field_fn_t select_value_fn(int field_size, int field_is_signed) 1714 { 1715 hist_field_fn_t fn = NULL; 1716 1717 switch (field_size) { 1718 case 8: 1719 if (field_is_signed) 1720 fn = hist_field_s64; 1721 else 1722 fn = hist_field_u64; 1723 break; 1724 case 4: 1725 if (field_is_signed) 1726 fn = hist_field_s32; 1727 else 1728 fn = hist_field_u32; 1729 break; 1730 case 2: 1731 if (field_is_signed) 1732 fn = hist_field_s16; 1733 else 1734 fn = hist_field_u16; 1735 break; 1736 case 1: 1737 if (field_is_signed) 1738 fn = hist_field_s8; 1739 else 1740 fn = hist_field_u8; 1741 break; 1742 } 1743 1744 return fn; 1745 } 1746 1747 static int parse_map_size(char *str) 1748 { 1749 unsigned long size, map_bits; 1750 int ret; 1751 1752 strsep(&str, "="); 1753 if (!str) { 1754 ret = -EINVAL; 1755 goto out; 1756 } 1757 1758 ret = kstrtoul(str, 0, &size); 1759 if (ret) 1760 goto out; 1761 1762 map_bits = ilog2(roundup_pow_of_two(size)); 1763 if (map_bits < TRACING_MAP_BITS_MIN || 1764 map_bits > TRACING_MAP_BITS_MAX) 1765 ret = -EINVAL; 1766 else 1767 ret = map_bits; 1768 out: 1769 return ret; 1770 } 1771 1772 static void destroy_hist_trigger_attrs(struct hist_trigger_attrs *attrs) 1773 { 1774 unsigned int i; 1775 1776 if (!attrs) 1777 return; 1778 1779 for (i = 0; i < attrs->n_assignments; i++) 1780 kfree(attrs->assignment_str[i]); 1781 1782 for (i = 0; i < attrs->n_actions; i++) 1783 kfree(attrs->action_str[i]); 1784 1785 kfree(attrs->name); 1786 kfree(attrs->sort_key_str); 1787 kfree(attrs->keys_str); 1788 kfree(attrs->vals_str); 1789 kfree(attrs->clock); 1790 kfree(attrs); 1791 } 1792 1793 static int parse_action(char *str, struct hist_trigger_attrs *attrs) 1794 { 1795 int ret = -EINVAL; 1796 1797 if (attrs->n_actions >= HIST_ACTIONS_MAX) 1798 return ret; 1799 1800 if ((strncmp(str, "onmatch(", strlen("onmatch(")) == 0) || 1801 (strncmp(str, "onmax(", strlen("onmax(")) == 0)) { 1802 attrs->action_str[attrs->n_actions] = kstrdup(str, GFP_KERNEL); 1803 if (!attrs->action_str[attrs->n_actions]) { 1804 ret = -ENOMEM; 1805 return ret; 1806 } 1807 attrs->n_actions++; 1808 ret = 0; 1809 } 1810 1811 return ret; 1812 } 1813 1814 static int parse_assignment(char *str, struct hist_trigger_attrs *attrs) 1815 { 1816 int ret = 0; 1817 1818 if ((strncmp(str, "key=", strlen("key=")) == 0) || 1819 (strncmp(str, "keys=", strlen("keys=")) == 0)) { 1820 attrs->keys_str = kstrdup(str, GFP_KERNEL); 1821 if (!attrs->keys_str) { 1822 ret = -ENOMEM; 1823 goto out; 1824 } 1825 } else if ((strncmp(str, "val=", strlen("val=")) == 0) || 1826 (strncmp(str, "vals=", strlen("vals=")) == 0) || 1827 (strncmp(str, "values=", strlen("values=")) == 0)) { 1828 attrs->vals_str = kstrdup(str, GFP_KERNEL); 1829 if (!attrs->vals_str) { 1830 ret = -ENOMEM; 1831 goto out; 1832 } 1833 } else if (strncmp(str, "sort=", strlen("sort=")) == 0) { 1834 attrs->sort_key_str = kstrdup(str, GFP_KERNEL); 1835 if (!attrs->sort_key_str) { 1836 ret = -ENOMEM; 1837 goto out; 1838 } 1839 } else if (strncmp(str, "name=", strlen("name=")) == 0) { 1840 attrs->name = kstrdup(str, GFP_KERNEL); 1841 if (!attrs->name) { 1842 ret = -ENOMEM; 1843 goto out; 1844 } 1845 } else if (strncmp(str, "clock=", strlen("clock=")) == 0) { 1846 strsep(&str, "="); 1847 if (!str) { 1848 ret = -EINVAL; 1849 goto out; 1850 } 1851 1852 str = strstrip(str); 1853 attrs->clock = kstrdup(str, GFP_KERNEL); 1854 if (!attrs->clock) { 1855 ret = -ENOMEM; 1856 goto out; 1857 } 1858 } else if (strncmp(str, "size=", strlen("size=")) == 0) { 1859 int map_bits = parse_map_size(str); 1860 1861 if (map_bits < 0) { 1862 ret = map_bits; 1863 goto out; 1864 } 1865 attrs->map_bits = map_bits; 1866 } else { 1867 char *assignment; 1868 1869 if (attrs->n_assignments == TRACING_MAP_VARS_MAX) { 1870 hist_err("Too many variables defined: ", str); 1871 ret = -EINVAL; 1872 goto out; 1873 } 1874 1875 assignment = kstrdup(str, GFP_KERNEL); 1876 if (!assignment) { 1877 ret = -ENOMEM; 1878 goto out; 1879 } 1880 1881 attrs->assignment_str[attrs->n_assignments++] = assignment; 1882 } 1883 out: 1884 return ret; 1885 } 1886 1887 static struct hist_trigger_attrs *parse_hist_trigger_attrs(char *trigger_str) 1888 { 1889 struct hist_trigger_attrs *attrs; 1890 int ret = 0; 1891 1892 attrs = kzalloc(sizeof(*attrs), GFP_KERNEL); 1893 if (!attrs) 1894 return ERR_PTR(-ENOMEM); 1895 1896 while (trigger_str) { 1897 char *str = strsep(&trigger_str, ":"); 1898 1899 if (strchr(str, '=')) { 1900 ret = parse_assignment(str, attrs); 1901 if (ret) 1902 goto free; 1903 } else if (strcmp(str, "pause") == 0) 1904 attrs->pause = true; 1905 else if ((strcmp(str, "cont") == 0) || 1906 (strcmp(str, "continue") == 0)) 1907 attrs->cont = true; 1908 else if (strcmp(str, "clear") == 0) 1909 attrs->clear = true; 1910 else { 1911 ret = parse_action(str, attrs); 1912 if (ret) 1913 goto free; 1914 } 1915 } 1916 1917 if (!attrs->keys_str) { 1918 ret = -EINVAL; 1919 goto free; 1920 } 1921 1922 if (!attrs->clock) { 1923 attrs->clock = kstrdup("global", GFP_KERNEL); 1924 if (!attrs->clock) { 1925 ret = -ENOMEM; 1926 goto free; 1927 } 1928 } 1929 1930 return attrs; 1931 free: 1932 destroy_hist_trigger_attrs(attrs); 1933 1934 return ERR_PTR(ret); 1935 } 1936 1937 static inline void save_comm(char *comm, struct task_struct *task) 1938 { 1939 if (!task->pid) { 1940 strcpy(comm, "<idle>"); 1941 return; 1942 } 1943 1944 if (WARN_ON_ONCE(task->pid < 0)) { 1945 strcpy(comm, "<XXX>"); 1946 return; 1947 } 1948 1949 memcpy(comm, task->comm, TASK_COMM_LEN); 1950 } 1951 1952 static void hist_elt_data_free(struct hist_elt_data *elt_data) 1953 { 1954 unsigned int i; 1955 1956 for (i = 0; i < SYNTH_FIELDS_MAX; i++) 1957 kfree(elt_data->field_var_str[i]); 1958 1959 kfree(elt_data->comm); 1960 kfree(elt_data); 1961 } 1962 1963 static void hist_trigger_elt_data_free(struct tracing_map_elt *elt) 1964 { 1965 struct hist_elt_data *elt_data = elt->private_data; 1966 1967 hist_elt_data_free(elt_data); 1968 } 1969 1970 static int hist_trigger_elt_data_alloc(struct tracing_map_elt *elt) 1971 { 1972 struct hist_trigger_data *hist_data = elt->map->private_data; 1973 unsigned int size = TASK_COMM_LEN; 1974 struct hist_elt_data *elt_data; 1975 struct hist_field *key_field; 1976 unsigned int i, n_str; 1977 1978 elt_data = kzalloc(sizeof(*elt_data), GFP_KERNEL); 1979 if (!elt_data) 1980 return -ENOMEM; 1981 1982 for_each_hist_key_field(i, hist_data) { 1983 key_field = hist_data->fields[i]; 1984 1985 if (key_field->flags & HIST_FIELD_FL_EXECNAME) { 1986 elt_data->comm = kzalloc(size, GFP_KERNEL); 1987 if (!elt_data->comm) { 1988 kfree(elt_data); 1989 return -ENOMEM; 1990 } 1991 break; 1992 } 1993 } 1994 1995 n_str = hist_data->n_field_var_str + hist_data->n_max_var_str; 1996 1997 size = STR_VAR_LEN_MAX; 1998 1999 for (i = 0; i < n_str; i++) { 2000 elt_data->field_var_str[i] = kzalloc(size, GFP_KERNEL); 2001 if (!elt_data->field_var_str[i]) { 2002 hist_elt_data_free(elt_data); 2003 return -ENOMEM; 2004 } 2005 } 2006 2007 elt->private_data = elt_data; 2008 2009 return 0; 2010 } 2011 2012 static void hist_trigger_elt_data_init(struct tracing_map_elt *elt) 2013 { 2014 struct hist_elt_data *elt_data = elt->private_data; 2015 2016 if (elt_data->comm) 2017 save_comm(elt_data->comm, current); 2018 } 2019 2020 static const struct tracing_map_ops hist_trigger_elt_data_ops = { 2021 .elt_alloc = hist_trigger_elt_data_alloc, 2022 .elt_free = hist_trigger_elt_data_free, 2023 .elt_init = hist_trigger_elt_data_init, 2024 }; 2025 2026 static const char *get_hist_field_flags(struct hist_field *hist_field) 2027 { 2028 const char *flags_str = NULL; 2029 2030 if (hist_field->flags & HIST_FIELD_FL_HEX) 2031 flags_str = "hex"; 2032 else if (hist_field->flags & HIST_FIELD_FL_SYM) 2033 flags_str = "sym"; 2034 else if (hist_field->flags & HIST_FIELD_FL_SYM_OFFSET) 2035 flags_str = "sym-offset"; 2036 else if (hist_field->flags & HIST_FIELD_FL_EXECNAME) 2037 flags_str = "execname"; 2038 else if (hist_field->flags & HIST_FIELD_FL_SYSCALL) 2039 flags_str = "syscall"; 2040 else if (hist_field->flags & HIST_FIELD_FL_LOG2) 2041 flags_str = "log2"; 2042 else if (hist_field->flags & HIST_FIELD_FL_TIMESTAMP_USECS) 2043 flags_str = "usecs"; 2044 2045 return flags_str; 2046 } 2047 2048 static void expr_field_str(struct hist_field *field, char *expr) 2049 { 2050 if (field->flags & HIST_FIELD_FL_VAR_REF) 2051 strcat(expr, "$"); 2052 2053 strcat(expr, hist_field_name(field, 0)); 2054 2055 if (field->flags && !(field->flags & HIST_FIELD_FL_VAR_REF)) { 2056 const char *flags_str = get_hist_field_flags(field); 2057 2058 if (flags_str) { 2059 strcat(expr, "."); 2060 strcat(expr, flags_str); 2061 } 2062 } 2063 } 2064 2065 static char *expr_str(struct hist_field *field, unsigned int level) 2066 { 2067 char *expr; 2068 2069 if (level > 1) 2070 return NULL; 2071 2072 expr = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL); 2073 if (!expr) 2074 return NULL; 2075 2076 if (!field->operands[0]) { 2077 expr_field_str(field, expr); 2078 return expr; 2079 } 2080 2081 if (field->operator == FIELD_OP_UNARY_MINUS) { 2082 char *subexpr; 2083 2084 strcat(expr, "-("); 2085 subexpr = expr_str(field->operands[0], ++level); 2086 if (!subexpr) { 2087 kfree(expr); 2088 return NULL; 2089 } 2090 strcat(expr, subexpr); 2091 strcat(expr, ")"); 2092 2093 kfree(subexpr); 2094 2095 return expr; 2096 } 2097 2098 expr_field_str(field->operands[0], expr); 2099 2100 switch (field->operator) { 2101 case FIELD_OP_MINUS: 2102 strcat(expr, "-"); 2103 break; 2104 case FIELD_OP_PLUS: 2105 strcat(expr, "+"); 2106 break; 2107 default: 2108 kfree(expr); 2109 return NULL; 2110 } 2111 2112 expr_field_str(field->operands[1], expr); 2113 2114 return expr; 2115 } 2116 2117 static int contains_operator(char *str) 2118 { 2119 enum field_op_id field_op = FIELD_OP_NONE; 2120 char *op; 2121 2122 op = strpbrk(str, "+-"); 2123 if (!op) 2124 return FIELD_OP_NONE; 2125 2126 switch (*op) { 2127 case '-': 2128 if (*str == '-') 2129 field_op = FIELD_OP_UNARY_MINUS; 2130 else 2131 field_op = FIELD_OP_MINUS; 2132 break; 2133 case '+': 2134 field_op = FIELD_OP_PLUS; 2135 break; 2136 default: 2137 break; 2138 } 2139 2140 return field_op; 2141 } 2142 2143 static void destroy_hist_field(struct hist_field *hist_field, 2144 unsigned int level) 2145 { 2146 unsigned int i; 2147 2148 if (level > 3) 2149 return; 2150 2151 if (!hist_field) 2152 return; 2153 2154 for (i = 0; i < HIST_FIELD_OPERANDS_MAX; i++) 2155 destroy_hist_field(hist_field->operands[i], level + 1); 2156 2157 kfree(hist_field->var.name); 2158 kfree(hist_field->name); 2159 kfree(hist_field->type); 2160 2161 kfree(hist_field); 2162 } 2163 2164 static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data, 2165 struct ftrace_event_field *field, 2166 unsigned long flags, 2167 char *var_name) 2168 { 2169 struct hist_field *hist_field; 2170 2171 if (field && is_function_field(field)) 2172 return NULL; 2173 2174 hist_field = kzalloc(sizeof(struct hist_field), GFP_KERNEL); 2175 if (!hist_field) 2176 return NULL; 2177 2178 hist_field->hist_data = hist_data; 2179 2180 if (flags & HIST_FIELD_FL_EXPR || flags & HIST_FIELD_FL_ALIAS) 2181 goto out; /* caller will populate */ 2182 2183 if (flags & HIST_FIELD_FL_VAR_REF) { 2184 hist_field->fn = hist_field_var_ref; 2185 goto out; 2186 } 2187 2188 if (flags & HIST_FIELD_FL_HITCOUNT) { 2189 hist_field->fn = hist_field_counter; 2190 hist_field->size = sizeof(u64); 2191 hist_field->type = kstrdup("u64", GFP_KERNEL); 2192 if (!hist_field->type) 2193 goto free; 2194 goto out; 2195 } 2196 2197 if (flags & HIST_FIELD_FL_STACKTRACE) { 2198 hist_field->fn = hist_field_none; 2199 goto out; 2200 } 2201 2202 if (flags & HIST_FIELD_FL_LOG2) { 2203 unsigned long fl = flags & ~HIST_FIELD_FL_LOG2; 2204 hist_field->fn = hist_field_log2; 2205 hist_field->operands[0] = create_hist_field(hist_data, field, fl, NULL); 2206 hist_field->size = hist_field->operands[0]->size; 2207 hist_field->type = kstrdup(hist_field->operands[0]->type, GFP_KERNEL); 2208 if (!hist_field->type) 2209 goto free; 2210 goto out; 2211 } 2212 2213 if (flags & HIST_FIELD_FL_TIMESTAMP) { 2214 hist_field->fn = hist_field_timestamp; 2215 hist_field->size = sizeof(u64); 2216 hist_field->type = kstrdup("u64", GFP_KERNEL); 2217 if (!hist_field->type) 2218 goto free; 2219 goto out; 2220 } 2221 2222 if (flags & HIST_FIELD_FL_CPU) { 2223 hist_field->fn = hist_field_cpu; 2224 hist_field->size = sizeof(int); 2225 hist_field->type = kstrdup("unsigned int", GFP_KERNEL); 2226 if (!hist_field->type) 2227 goto free; 2228 goto out; 2229 } 2230 2231 if (WARN_ON_ONCE(!field)) 2232 goto out; 2233 2234 if (is_string_field(field)) { 2235 flags |= HIST_FIELD_FL_STRING; 2236 2237 hist_field->size = MAX_FILTER_STR_VAL; 2238 hist_field->type = kstrdup(field->type, GFP_KERNEL); 2239 if (!hist_field->type) 2240 goto free; 2241 2242 if (field->filter_type == FILTER_STATIC_STRING) 2243 hist_field->fn = hist_field_string; 2244 else if (field->filter_type == FILTER_DYN_STRING) 2245 hist_field->fn = hist_field_dynstring; 2246 else 2247 hist_field->fn = hist_field_pstring; 2248 } else { 2249 hist_field->size = field->size; 2250 hist_field->is_signed = field->is_signed; 2251 hist_field->type = kstrdup(field->type, GFP_KERNEL); 2252 if (!hist_field->type) 2253 goto free; 2254 2255 hist_field->fn = select_value_fn(field->size, 2256 field->is_signed); 2257 if (!hist_field->fn) { 2258 destroy_hist_field(hist_field, 0); 2259 return NULL; 2260 } 2261 } 2262 out: 2263 hist_field->field = field; 2264 hist_field->flags = flags; 2265 2266 if (var_name) { 2267 hist_field->var.name = kstrdup(var_name, GFP_KERNEL); 2268 if (!hist_field->var.name) 2269 goto free; 2270 } 2271 2272 return hist_field; 2273 free: 2274 destroy_hist_field(hist_field, 0); 2275 return NULL; 2276 } 2277 2278 static void destroy_hist_fields(struct hist_trigger_data *hist_data) 2279 { 2280 unsigned int i; 2281 2282 for (i = 0; i < HIST_FIELDS_MAX; i++) { 2283 if (hist_data->fields[i]) { 2284 destroy_hist_field(hist_data->fields[i], 0); 2285 hist_data->fields[i] = NULL; 2286 } 2287 } 2288 } 2289 2290 static int init_var_ref(struct hist_field *ref_field, 2291 struct hist_field *var_field, 2292 char *system, char *event_name) 2293 { 2294 int err = 0; 2295 2296 ref_field->var.idx = var_field->var.idx; 2297 ref_field->var.hist_data = var_field->hist_data; 2298 ref_field->size = var_field->size; 2299 ref_field->is_signed = var_field->is_signed; 2300 ref_field->flags |= var_field->flags & 2301 (HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS); 2302 2303 if (system) { 2304 ref_field->system = kstrdup(system, GFP_KERNEL); 2305 if (!ref_field->system) 2306 return -ENOMEM; 2307 } 2308 2309 if (event_name) { 2310 ref_field->event_name = kstrdup(event_name, GFP_KERNEL); 2311 if (!ref_field->event_name) { 2312 err = -ENOMEM; 2313 goto free; 2314 } 2315 } 2316 2317 if (var_field->var.name) { 2318 ref_field->name = kstrdup(var_field->var.name, GFP_KERNEL); 2319 if (!ref_field->name) { 2320 err = -ENOMEM; 2321 goto free; 2322 } 2323 } else if (var_field->name) { 2324 ref_field->name = kstrdup(var_field->name, GFP_KERNEL); 2325 if (!ref_field->name) { 2326 err = -ENOMEM; 2327 goto free; 2328 } 2329 } 2330 2331 ref_field->type = kstrdup(var_field->type, GFP_KERNEL); 2332 if (!ref_field->type) { 2333 err = -ENOMEM; 2334 goto free; 2335 } 2336 out: 2337 return err; 2338 free: 2339 kfree(ref_field->system); 2340 kfree(ref_field->event_name); 2341 kfree(ref_field->name); 2342 2343 goto out; 2344 } 2345 2346 static struct hist_field *create_var_ref(struct hist_field *var_field, 2347 char *system, char *event_name) 2348 { 2349 unsigned long flags = HIST_FIELD_FL_VAR_REF; 2350 struct hist_field *ref_field; 2351 2352 ref_field = create_hist_field(var_field->hist_data, NULL, flags, NULL); 2353 if (ref_field) { 2354 if (init_var_ref(ref_field, var_field, system, event_name)) { 2355 destroy_hist_field(ref_field, 0); 2356 return NULL; 2357 } 2358 } 2359 2360 return ref_field; 2361 } 2362 2363 static bool is_var_ref(char *var_name) 2364 { 2365 if (!var_name || strlen(var_name) < 2 || var_name[0] != '$') 2366 return false; 2367 2368 return true; 2369 } 2370 2371 static char *field_name_from_var(struct hist_trigger_data *hist_data, 2372 char *var_name) 2373 { 2374 char *name, *field; 2375 unsigned int i; 2376 2377 for (i = 0; i < hist_data->attrs->var_defs.n_vars; i++) { 2378 name = hist_data->attrs->var_defs.name[i]; 2379 2380 if (strcmp(var_name, name) == 0) { 2381 field = hist_data->attrs->var_defs.expr[i]; 2382 if (contains_operator(field) || is_var_ref(field)) 2383 continue; 2384 return field; 2385 } 2386 } 2387 2388 return NULL; 2389 } 2390 2391 static char *local_field_var_ref(struct hist_trigger_data *hist_data, 2392 char *system, char *event_name, 2393 char *var_name) 2394 { 2395 struct trace_event_call *call; 2396 2397 if (system && event_name) { 2398 call = hist_data->event_file->event_call; 2399 2400 if (strcmp(system, call->class->system) != 0) 2401 return NULL; 2402 2403 if (strcmp(event_name, trace_event_name(call)) != 0) 2404 return NULL; 2405 } 2406 2407 if (!!system != !!event_name) 2408 return NULL; 2409 2410 if (!is_var_ref(var_name)) 2411 return NULL; 2412 2413 var_name++; 2414 2415 return field_name_from_var(hist_data, var_name); 2416 } 2417 2418 static struct hist_field *parse_var_ref(struct hist_trigger_data *hist_data, 2419 char *system, char *event_name, 2420 char *var_name) 2421 { 2422 struct hist_field *var_field = NULL, *ref_field = NULL; 2423 2424 if (!is_var_ref(var_name)) 2425 return NULL; 2426 2427 var_name++; 2428 2429 var_field = find_event_var(hist_data, system, event_name, var_name); 2430 if (var_field) 2431 ref_field = create_var_ref(var_field, system, event_name); 2432 2433 if (!ref_field) 2434 hist_err_event("Couldn't find variable: $", 2435 system, event_name, var_name); 2436 2437 return ref_field; 2438 } 2439 2440 static struct ftrace_event_field * 2441 parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file, 2442 char *field_str, unsigned long *flags) 2443 { 2444 struct ftrace_event_field *field = NULL; 2445 char *field_name, *modifier, *str; 2446 2447 modifier = str = kstrdup(field_str, GFP_KERNEL); 2448 if (!modifier) 2449 return ERR_PTR(-ENOMEM); 2450 2451 field_name = strsep(&modifier, "."); 2452 if (modifier) { 2453 if (strcmp(modifier, "hex") == 0) 2454 *flags |= HIST_FIELD_FL_HEX; 2455 else if (strcmp(modifier, "sym") == 0) 2456 *flags |= HIST_FIELD_FL_SYM; 2457 else if (strcmp(modifier, "sym-offset") == 0) 2458 *flags |= HIST_FIELD_FL_SYM_OFFSET; 2459 else if ((strcmp(modifier, "execname") == 0) && 2460 (strcmp(field_name, "common_pid") == 0)) 2461 *flags |= HIST_FIELD_FL_EXECNAME; 2462 else if (strcmp(modifier, "syscall") == 0) 2463 *flags |= HIST_FIELD_FL_SYSCALL; 2464 else if (strcmp(modifier, "log2") == 0) 2465 *flags |= HIST_FIELD_FL_LOG2; 2466 else if (strcmp(modifier, "usecs") == 0) 2467 *flags |= HIST_FIELD_FL_TIMESTAMP_USECS; 2468 else { 2469 field = ERR_PTR(-EINVAL); 2470 goto out; 2471 } 2472 } 2473 2474 if (strcmp(field_name, "common_timestamp") == 0) { 2475 *flags |= HIST_FIELD_FL_TIMESTAMP; 2476 hist_data->enable_timestamps = true; 2477 if (*flags & HIST_FIELD_FL_TIMESTAMP_USECS) 2478 hist_data->attrs->ts_in_usecs = true; 2479 } else if (strcmp(field_name, "cpu") == 0) 2480 *flags |= HIST_FIELD_FL_CPU; 2481 else { 2482 field = trace_find_event_field(file->event_call, field_name); 2483 if (!field || !field->size) { 2484 field = ERR_PTR(-EINVAL); 2485 goto out; 2486 } 2487 } 2488 out: 2489 kfree(str); 2490 2491 return field; 2492 } 2493 2494 static struct hist_field *create_alias(struct hist_trigger_data *hist_data, 2495 struct hist_field *var_ref, 2496 char *var_name) 2497 { 2498 struct hist_field *alias = NULL; 2499 unsigned long flags = HIST_FIELD_FL_ALIAS | HIST_FIELD_FL_VAR; 2500 2501 alias = create_hist_field(hist_data, NULL, flags, var_name); 2502 if (!alias) 2503 return NULL; 2504 2505 alias->fn = var_ref->fn; 2506 alias->operands[0] = var_ref; 2507 2508 if (init_var_ref(alias, var_ref, var_ref->system, var_ref->event_name)) { 2509 destroy_hist_field(alias, 0); 2510 return NULL; 2511 } 2512 2513 return alias; 2514 } 2515 2516 static struct hist_field *parse_atom(struct hist_trigger_data *hist_data, 2517 struct trace_event_file *file, char *str, 2518 unsigned long *flags, char *var_name) 2519 { 2520 char *s, *ref_system = NULL, *ref_event = NULL, *ref_var = str; 2521 struct ftrace_event_field *field = NULL; 2522 struct hist_field *hist_field = NULL; 2523 int ret = 0; 2524 2525 s = strchr(str, '.'); 2526 if (s) { 2527 s = strchr(++s, '.'); 2528 if (s) { 2529 ref_system = strsep(&str, "."); 2530 if (!str) { 2531 ret = -EINVAL; 2532 goto out; 2533 } 2534 ref_event = strsep(&str, "."); 2535 if (!str) { 2536 ret = -EINVAL; 2537 goto out; 2538 } 2539 ref_var = str; 2540 } 2541 } 2542 2543 s = local_field_var_ref(hist_data, ref_system, ref_event, ref_var); 2544 if (!s) { 2545 hist_field = parse_var_ref(hist_data, ref_system, ref_event, ref_var); 2546 if (hist_field) { 2547 hist_data->var_refs[hist_data->n_var_refs] = hist_field; 2548 hist_field->var_ref_idx = hist_data->n_var_refs++; 2549 if (var_name) { 2550 hist_field = create_alias(hist_data, hist_field, var_name); 2551 if (!hist_field) { 2552 ret = -ENOMEM; 2553 goto out; 2554 } 2555 } 2556 return hist_field; 2557 } 2558 } else 2559 str = s; 2560 2561 field = parse_field(hist_data, file, str, flags); 2562 if (IS_ERR(field)) { 2563 ret = PTR_ERR(field); 2564 goto out; 2565 } 2566 2567 hist_field = create_hist_field(hist_data, field, *flags, var_name); 2568 if (!hist_field) { 2569 ret = -ENOMEM; 2570 goto out; 2571 } 2572 2573 return hist_field; 2574 out: 2575 return ERR_PTR(ret); 2576 } 2577 2578 static struct hist_field *parse_expr(struct hist_trigger_data *hist_data, 2579 struct trace_event_file *file, 2580 char *str, unsigned long flags, 2581 char *var_name, unsigned int level); 2582 2583 static struct hist_field *parse_unary(struct hist_trigger_data *hist_data, 2584 struct trace_event_file *file, 2585 char *str, unsigned long flags, 2586 char *var_name, unsigned int level) 2587 { 2588 struct hist_field *operand1, *expr = NULL; 2589 unsigned long operand_flags; 2590 int ret = 0; 2591 char *s; 2592 2593 /* we support only -(xxx) i.e. explicit parens required */ 2594 2595 if (level > 3) { 2596 hist_err("Too many subexpressions (3 max): ", str); 2597 ret = -EINVAL; 2598 goto free; 2599 } 2600 2601 str++; /* skip leading '-' */ 2602 2603 s = strchr(str, '('); 2604 if (s) 2605 str++; 2606 else { 2607 ret = -EINVAL; 2608 goto free; 2609 } 2610 2611 s = strrchr(str, ')'); 2612 if (s) 2613 *s = '\0'; 2614 else { 2615 ret = -EINVAL; /* no closing ')' */ 2616 goto free; 2617 } 2618 2619 flags |= HIST_FIELD_FL_EXPR; 2620 expr = create_hist_field(hist_data, NULL, flags, var_name); 2621 if (!expr) { 2622 ret = -ENOMEM; 2623 goto free; 2624 } 2625 2626 operand_flags = 0; 2627 operand1 = parse_expr(hist_data, file, str, operand_flags, NULL, ++level); 2628 if (IS_ERR(operand1)) { 2629 ret = PTR_ERR(operand1); 2630 goto free; 2631 } 2632 2633 expr->flags |= operand1->flags & 2634 (HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS); 2635 expr->fn = hist_field_unary_minus; 2636 expr->operands[0] = operand1; 2637 expr->operator = FIELD_OP_UNARY_MINUS; 2638 expr->name = expr_str(expr, 0); 2639 expr->type = kstrdup(operand1->type, GFP_KERNEL); 2640 if (!expr->type) { 2641 ret = -ENOMEM; 2642 goto free; 2643 } 2644 2645 return expr; 2646 free: 2647 destroy_hist_field(expr, 0); 2648 return ERR_PTR(ret); 2649 } 2650 2651 static int check_expr_operands(struct hist_field *operand1, 2652 struct hist_field *operand2) 2653 { 2654 unsigned long operand1_flags = operand1->flags; 2655 unsigned long operand2_flags = operand2->flags; 2656 2657 if ((operand1_flags & HIST_FIELD_FL_VAR_REF) || 2658 (operand1_flags & HIST_FIELD_FL_ALIAS)) { 2659 struct hist_field *var; 2660 2661 var = find_var_field(operand1->var.hist_data, operand1->name); 2662 if (!var) 2663 return -EINVAL; 2664 operand1_flags = var->flags; 2665 } 2666 2667 if ((operand2_flags & HIST_FIELD_FL_VAR_REF) || 2668 (operand2_flags & HIST_FIELD_FL_ALIAS)) { 2669 struct hist_field *var; 2670 2671 var = find_var_field(operand2->var.hist_data, operand2->name); 2672 if (!var) 2673 return -EINVAL; 2674 operand2_flags = var->flags; 2675 } 2676 2677 if ((operand1_flags & HIST_FIELD_FL_TIMESTAMP_USECS) != 2678 (operand2_flags & HIST_FIELD_FL_TIMESTAMP_USECS)) { 2679 hist_err("Timestamp units in expression don't match", NULL); 2680 return -EINVAL; 2681 } 2682 2683 return 0; 2684 } 2685 2686 static struct hist_field *parse_expr(struct hist_trigger_data *hist_data, 2687 struct trace_event_file *file, 2688 char *str, unsigned long flags, 2689 char *var_name, unsigned int level) 2690 { 2691 struct hist_field *operand1 = NULL, *operand2 = NULL, *expr = NULL; 2692 unsigned long operand_flags; 2693 int field_op, ret = -EINVAL; 2694 char *sep, *operand1_str; 2695 2696 if (level > 3) { 2697 hist_err("Too many subexpressions (3 max): ", str); 2698 return ERR_PTR(-EINVAL); 2699 } 2700 2701 field_op = contains_operator(str); 2702 2703 if (field_op == FIELD_OP_NONE) 2704 return parse_atom(hist_data, file, str, &flags, var_name); 2705 2706 if (field_op == FIELD_OP_UNARY_MINUS) 2707 return parse_unary(hist_data, file, str, flags, var_name, ++level); 2708 2709 switch (field_op) { 2710 case FIELD_OP_MINUS: 2711 sep = "-"; 2712 break; 2713 case FIELD_OP_PLUS: 2714 sep = "+"; 2715 break; 2716 default: 2717 goto free; 2718 } 2719 2720 operand1_str = strsep(&str, sep); 2721 if (!operand1_str || !str) 2722 goto free; 2723 2724 operand_flags = 0; 2725 operand1 = parse_atom(hist_data, file, operand1_str, 2726 &operand_flags, NULL); 2727 if (IS_ERR(operand1)) { 2728 ret = PTR_ERR(operand1); 2729 operand1 = NULL; 2730 goto free; 2731 } 2732 2733 /* rest of string could be another expression e.g. b+c in a+b+c */ 2734 operand_flags = 0; 2735 operand2 = parse_expr(hist_data, file, str, operand_flags, NULL, ++level); 2736 if (IS_ERR(operand2)) { 2737 ret = PTR_ERR(operand2); 2738 operand2 = NULL; 2739 goto free; 2740 } 2741 2742 ret = check_expr_operands(operand1, operand2); 2743 if (ret) 2744 goto free; 2745 2746 flags |= HIST_FIELD_FL_EXPR; 2747 2748 flags |= operand1->flags & 2749 (HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS); 2750 2751 expr = create_hist_field(hist_data, NULL, flags, var_name); 2752 if (!expr) { 2753 ret = -ENOMEM; 2754 goto free; 2755 } 2756 2757 operand1->read_once = true; 2758 operand2->read_once = true; 2759 2760 expr->operands[0] = operand1; 2761 expr->operands[1] = operand2; 2762 expr->operator = field_op; 2763 expr->name = expr_str(expr, 0); 2764 expr->type = kstrdup(operand1->type, GFP_KERNEL); 2765 if (!expr->type) { 2766 ret = -ENOMEM; 2767 goto free; 2768 } 2769 2770 switch (field_op) { 2771 case FIELD_OP_MINUS: 2772 expr->fn = hist_field_minus; 2773 break; 2774 case FIELD_OP_PLUS: 2775 expr->fn = hist_field_plus; 2776 break; 2777 default: 2778 ret = -EINVAL; 2779 goto free; 2780 } 2781 2782 return expr; 2783 free: 2784 destroy_hist_field(operand1, 0); 2785 destroy_hist_field(operand2, 0); 2786 destroy_hist_field(expr, 0); 2787 2788 return ERR_PTR(ret); 2789 } 2790 2791 static char *find_trigger_filter(struct hist_trigger_data *hist_data, 2792 struct trace_event_file *file) 2793 { 2794 struct event_trigger_data *test; 2795 2796 list_for_each_entry_rcu(test, &file->triggers, list) { 2797 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 2798 if (test->private_data == hist_data) 2799 return test->filter_str; 2800 } 2801 } 2802 2803 return NULL; 2804 } 2805 2806 static struct event_command trigger_hist_cmd; 2807 static int event_hist_trigger_func(struct event_command *cmd_ops, 2808 struct trace_event_file *file, 2809 char *glob, char *cmd, char *param); 2810 2811 static bool compatible_keys(struct hist_trigger_data *target_hist_data, 2812 struct hist_trigger_data *hist_data, 2813 unsigned int n_keys) 2814 { 2815 struct hist_field *target_hist_field, *hist_field; 2816 unsigned int n, i, j; 2817 2818 if (hist_data->n_fields - hist_data->n_vals != n_keys) 2819 return false; 2820 2821 i = hist_data->n_vals; 2822 j = target_hist_data->n_vals; 2823 2824 for (n = 0; n < n_keys; n++) { 2825 hist_field = hist_data->fields[i + n]; 2826 target_hist_field = target_hist_data->fields[j + n]; 2827 2828 if (strcmp(hist_field->type, target_hist_field->type) != 0) 2829 return false; 2830 if (hist_field->size != target_hist_field->size) 2831 return false; 2832 if (hist_field->is_signed != target_hist_field->is_signed) 2833 return false; 2834 } 2835 2836 return true; 2837 } 2838 2839 static struct hist_trigger_data * 2840 find_compatible_hist(struct hist_trigger_data *target_hist_data, 2841 struct trace_event_file *file) 2842 { 2843 struct hist_trigger_data *hist_data; 2844 struct event_trigger_data *test; 2845 unsigned int n_keys; 2846 2847 n_keys = target_hist_data->n_fields - target_hist_data->n_vals; 2848 2849 list_for_each_entry_rcu(test, &file->triggers, list) { 2850 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 2851 hist_data = test->private_data; 2852 2853 if (compatible_keys(target_hist_data, hist_data, n_keys)) 2854 return hist_data; 2855 } 2856 } 2857 2858 return NULL; 2859 } 2860 2861 static struct trace_event_file *event_file(struct trace_array *tr, 2862 char *system, char *event_name) 2863 { 2864 struct trace_event_file *file; 2865 2866 file = find_event_file(tr, system, event_name); 2867 if (!file) 2868 return ERR_PTR(-EINVAL); 2869 2870 return file; 2871 } 2872 2873 static struct hist_field * 2874 find_synthetic_field_var(struct hist_trigger_data *target_hist_data, 2875 char *system, char *event_name, char *field_name) 2876 { 2877 struct hist_field *event_var; 2878 char *synthetic_name; 2879 2880 synthetic_name = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL); 2881 if (!synthetic_name) 2882 return ERR_PTR(-ENOMEM); 2883 2884 strcpy(synthetic_name, "synthetic_"); 2885 strcat(synthetic_name, field_name); 2886 2887 event_var = find_event_var(target_hist_data, system, event_name, synthetic_name); 2888 2889 kfree(synthetic_name); 2890 2891 return event_var; 2892 } 2893 2894 /** 2895 * create_field_var_hist - Automatically create a histogram and var for a field 2896 * @target_hist_data: The target hist trigger 2897 * @subsys_name: Optional subsystem name 2898 * @event_name: Optional event name 2899 * @field_name: The name of the field (and the resulting variable) 2900 * 2901 * Hist trigger actions fetch data from variables, not directly from 2902 * events. However, for convenience, users are allowed to directly 2903 * specify an event field in an action, which will be automatically 2904 * converted into a variable on their behalf. 2905 2906 * If a user specifies a field on an event that isn't the event the 2907 * histogram currently being defined (the target event histogram), the 2908 * only way that can be accomplished is if a new hist trigger is 2909 * created and the field variable defined on that. 2910 * 2911 * This function creates a new histogram compatible with the target 2912 * event (meaning a histogram with the same key as the target 2913 * histogram), and creates a variable for the specified field, but 2914 * with 'synthetic_' prepended to the variable name in order to avoid 2915 * collision with normal field variables. 2916 * 2917 * Return: The variable created for the field. 2918 */ 2919 static struct hist_field * 2920 create_field_var_hist(struct hist_trigger_data *target_hist_data, 2921 char *subsys_name, char *event_name, char *field_name) 2922 { 2923 struct trace_array *tr = target_hist_data->event_file->tr; 2924 struct hist_field *event_var = ERR_PTR(-EINVAL); 2925 struct hist_trigger_data *hist_data; 2926 unsigned int i, n, first = true; 2927 struct field_var_hist *var_hist; 2928 struct trace_event_file *file; 2929 struct hist_field *key_field; 2930 char *saved_filter; 2931 char *cmd; 2932 int ret; 2933 2934 if (target_hist_data->n_field_var_hists >= SYNTH_FIELDS_MAX) { 2935 hist_err_event("onmatch: Too many field variables defined: ", 2936 subsys_name, event_name, field_name); 2937 return ERR_PTR(-EINVAL); 2938 } 2939 2940 file = event_file(tr, subsys_name, event_name); 2941 2942 if (IS_ERR(file)) { 2943 hist_err_event("onmatch: Event file not found: ", 2944 subsys_name, event_name, field_name); 2945 ret = PTR_ERR(file); 2946 return ERR_PTR(ret); 2947 } 2948 2949 /* 2950 * Look for a histogram compatible with target. We'll use the 2951 * found histogram specification to create a new matching 2952 * histogram with our variable on it. target_hist_data is not 2953 * yet a registered histogram so we can't use that. 2954 */ 2955 hist_data = find_compatible_hist(target_hist_data, file); 2956 if (!hist_data) { 2957 hist_err_event("onmatch: Matching event histogram not found: ", 2958 subsys_name, event_name, field_name); 2959 return ERR_PTR(-EINVAL); 2960 } 2961 2962 /* See if a synthetic field variable has already been created */ 2963 event_var = find_synthetic_field_var(target_hist_data, subsys_name, 2964 event_name, field_name); 2965 if (!IS_ERR_OR_NULL(event_var)) 2966 return event_var; 2967 2968 var_hist = kzalloc(sizeof(*var_hist), GFP_KERNEL); 2969 if (!var_hist) 2970 return ERR_PTR(-ENOMEM); 2971 2972 cmd = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL); 2973 if (!cmd) { 2974 kfree(var_hist); 2975 return ERR_PTR(-ENOMEM); 2976 } 2977 2978 /* Use the same keys as the compatible histogram */ 2979 strcat(cmd, "keys="); 2980 2981 for_each_hist_key_field(i, hist_data) { 2982 key_field = hist_data->fields[i]; 2983 if (!first) 2984 strcat(cmd, ","); 2985 strcat(cmd, key_field->field->name); 2986 first = false; 2987 } 2988 2989 /* Create the synthetic field variable specification */ 2990 strcat(cmd, ":synthetic_"); 2991 strcat(cmd, field_name); 2992 strcat(cmd, "="); 2993 strcat(cmd, field_name); 2994 2995 /* Use the same filter as the compatible histogram */ 2996 saved_filter = find_trigger_filter(hist_data, file); 2997 if (saved_filter) { 2998 strcat(cmd, " if "); 2999 strcat(cmd, saved_filter); 3000 } 3001 3002 var_hist->cmd = kstrdup(cmd, GFP_KERNEL); 3003 if (!var_hist->cmd) { 3004 kfree(cmd); 3005 kfree(var_hist); 3006 return ERR_PTR(-ENOMEM); 3007 } 3008 3009 /* Save the compatible histogram information */ 3010 var_hist->hist_data = hist_data; 3011 3012 /* Create the new histogram with our variable */ 3013 ret = event_hist_trigger_func(&trigger_hist_cmd, file, 3014 "", "hist", cmd); 3015 if (ret) { 3016 kfree(cmd); 3017 kfree(var_hist->cmd); 3018 kfree(var_hist); 3019 hist_err_event("onmatch: Couldn't create histogram for field: ", 3020 subsys_name, event_name, field_name); 3021 return ERR_PTR(ret); 3022 } 3023 3024 kfree(cmd); 3025 3026 /* If we can't find the variable, something went wrong */ 3027 event_var = find_synthetic_field_var(target_hist_data, subsys_name, 3028 event_name, field_name); 3029 if (IS_ERR_OR_NULL(event_var)) { 3030 kfree(var_hist->cmd); 3031 kfree(var_hist); 3032 hist_err_event("onmatch: Couldn't find synthetic variable: ", 3033 subsys_name, event_name, field_name); 3034 return ERR_PTR(-EINVAL); 3035 } 3036 3037 n = target_hist_data->n_field_var_hists; 3038 target_hist_data->field_var_hists[n] = var_hist; 3039 target_hist_data->n_field_var_hists++; 3040 3041 return event_var; 3042 } 3043 3044 static struct hist_field * 3045 find_target_event_var(struct hist_trigger_data *hist_data, 3046 char *subsys_name, char *event_name, char *var_name) 3047 { 3048 struct trace_event_file *file = hist_data->event_file; 3049 struct hist_field *hist_field = NULL; 3050 3051 if (subsys_name) { 3052 struct trace_event_call *call; 3053 3054 if (!event_name) 3055 return NULL; 3056 3057 call = file->event_call; 3058 3059 if (strcmp(subsys_name, call->class->system) != 0) 3060 return NULL; 3061 3062 if (strcmp(event_name, trace_event_name(call)) != 0) 3063 return NULL; 3064 } 3065 3066 hist_field = find_var_field(hist_data, var_name); 3067 3068 return hist_field; 3069 } 3070 3071 static inline void __update_field_vars(struct tracing_map_elt *elt, 3072 struct ring_buffer_event *rbe, 3073 void *rec, 3074 struct field_var **field_vars, 3075 unsigned int n_field_vars, 3076 unsigned int field_var_str_start) 3077 { 3078 struct hist_elt_data *elt_data = elt->private_data; 3079 unsigned int i, j, var_idx; 3080 u64 var_val; 3081 3082 for (i = 0, j = field_var_str_start; i < n_field_vars; i++) { 3083 struct field_var *field_var = field_vars[i]; 3084 struct hist_field *var = field_var->var; 3085 struct hist_field *val = field_var->val; 3086 3087 var_val = val->fn(val, elt, rbe, rec); 3088 var_idx = var->var.idx; 3089 3090 if (val->flags & HIST_FIELD_FL_STRING) { 3091 char *str = elt_data->field_var_str[j++]; 3092 char *val_str = (char *)(uintptr_t)var_val; 3093 3094 strscpy(str, val_str, STR_VAR_LEN_MAX); 3095 var_val = (u64)(uintptr_t)str; 3096 } 3097 tracing_map_set_var(elt, var_idx, var_val); 3098 } 3099 } 3100 3101 static void update_field_vars(struct hist_trigger_data *hist_data, 3102 struct tracing_map_elt *elt, 3103 struct ring_buffer_event *rbe, 3104 void *rec) 3105 { 3106 __update_field_vars(elt, rbe, rec, hist_data->field_vars, 3107 hist_data->n_field_vars, 0); 3108 } 3109 3110 static void update_max_vars(struct hist_trigger_data *hist_data, 3111 struct tracing_map_elt *elt, 3112 struct ring_buffer_event *rbe, 3113 void *rec) 3114 { 3115 __update_field_vars(elt, rbe, rec, hist_data->max_vars, 3116 hist_data->n_max_vars, hist_data->n_field_var_str); 3117 } 3118 3119 static struct hist_field *create_var(struct hist_trigger_data *hist_data, 3120 struct trace_event_file *file, 3121 char *name, int size, const char *type) 3122 { 3123 struct hist_field *var; 3124 int idx; 3125 3126 if (find_var(hist_data, file, name) && !hist_data->remove) { 3127 var = ERR_PTR(-EINVAL); 3128 goto out; 3129 } 3130 3131 var = kzalloc(sizeof(struct hist_field), GFP_KERNEL); 3132 if (!var) { 3133 var = ERR_PTR(-ENOMEM); 3134 goto out; 3135 } 3136 3137 idx = tracing_map_add_var(hist_data->map); 3138 if (idx < 0) { 3139 kfree(var); 3140 var = ERR_PTR(-EINVAL); 3141 goto out; 3142 } 3143 3144 var->flags = HIST_FIELD_FL_VAR; 3145 var->var.idx = idx; 3146 var->var.hist_data = var->hist_data = hist_data; 3147 var->size = size; 3148 var->var.name = kstrdup(name, GFP_KERNEL); 3149 var->type = kstrdup(type, GFP_KERNEL); 3150 if (!var->var.name || !var->type) { 3151 kfree(var->var.name); 3152 kfree(var->type); 3153 kfree(var); 3154 var = ERR_PTR(-ENOMEM); 3155 } 3156 out: 3157 return var; 3158 } 3159 3160 static struct field_var *create_field_var(struct hist_trigger_data *hist_data, 3161 struct trace_event_file *file, 3162 char *field_name) 3163 { 3164 struct hist_field *val = NULL, *var = NULL; 3165 unsigned long flags = HIST_FIELD_FL_VAR; 3166 struct field_var *field_var; 3167 int ret = 0; 3168 3169 if (hist_data->n_field_vars >= SYNTH_FIELDS_MAX) { 3170 hist_err("Too many field variables defined: ", field_name); 3171 ret = -EINVAL; 3172 goto err; 3173 } 3174 3175 val = parse_atom(hist_data, file, field_name, &flags, NULL); 3176 if (IS_ERR(val)) { 3177 hist_err("Couldn't parse field variable: ", field_name); 3178 ret = PTR_ERR(val); 3179 goto err; 3180 } 3181 3182 var = create_var(hist_data, file, field_name, val->size, val->type); 3183 if (IS_ERR(var)) { 3184 hist_err("Couldn't create or find variable: ", field_name); 3185 kfree(val); 3186 ret = PTR_ERR(var); 3187 goto err; 3188 } 3189 3190 field_var = kzalloc(sizeof(struct field_var), GFP_KERNEL); 3191 if (!field_var) { 3192 kfree(val); 3193 kfree(var); 3194 ret = -ENOMEM; 3195 goto err; 3196 } 3197 3198 field_var->var = var; 3199 field_var->val = val; 3200 out: 3201 return field_var; 3202 err: 3203 field_var = ERR_PTR(ret); 3204 goto out; 3205 } 3206 3207 /** 3208 * create_target_field_var - Automatically create a variable for a field 3209 * @target_hist_data: The target hist trigger 3210 * @subsys_name: Optional subsystem name 3211 * @event_name: Optional event name 3212 * @var_name: The name of the field (and the resulting variable) 3213 * 3214 * Hist trigger actions fetch data from variables, not directly from 3215 * events. However, for convenience, users are allowed to directly 3216 * specify an event field in an action, which will be automatically 3217 * converted into a variable on their behalf. 3218 3219 * This function creates a field variable with the name var_name on 3220 * the hist trigger currently being defined on the target event. If 3221 * subsys_name and event_name are specified, this function simply 3222 * verifies that they do in fact match the target event subsystem and 3223 * event name. 3224 * 3225 * Return: The variable created for the field. 3226 */ 3227 static struct field_var * 3228 create_target_field_var(struct hist_trigger_data *target_hist_data, 3229 char *subsys_name, char *event_name, char *var_name) 3230 { 3231 struct trace_event_file *file = target_hist_data->event_file; 3232 3233 if (subsys_name) { 3234 struct trace_event_call *call; 3235 3236 if (!event_name) 3237 return NULL; 3238 3239 call = file->event_call; 3240 3241 if (strcmp(subsys_name, call->class->system) != 0) 3242 return NULL; 3243 3244 if (strcmp(event_name, trace_event_name(call)) != 0) 3245 return NULL; 3246 } 3247 3248 return create_field_var(target_hist_data, file, var_name); 3249 } 3250 3251 static void onmax_print(struct seq_file *m, 3252 struct hist_trigger_data *hist_data, 3253 struct tracing_map_elt *elt, 3254 struct action_data *data) 3255 { 3256 unsigned int i, save_var_idx, max_idx = data->onmax.max_var->var.idx; 3257 3258 seq_printf(m, "\n\tmax: %10llu", tracing_map_read_var(elt, max_idx)); 3259 3260 for (i = 0; i < hist_data->n_max_vars; i++) { 3261 struct hist_field *save_val = hist_data->max_vars[i]->val; 3262 struct hist_field *save_var = hist_data->max_vars[i]->var; 3263 u64 val; 3264 3265 save_var_idx = save_var->var.idx; 3266 3267 val = tracing_map_read_var(elt, save_var_idx); 3268 3269 if (save_val->flags & HIST_FIELD_FL_STRING) { 3270 seq_printf(m, " %s: %-32s", save_var->var.name, 3271 (char *)(uintptr_t)(val)); 3272 } else 3273 seq_printf(m, " %s: %10llu", save_var->var.name, val); 3274 } 3275 } 3276 3277 static void onmax_save(struct hist_trigger_data *hist_data, 3278 struct tracing_map_elt *elt, void *rec, 3279 struct ring_buffer_event *rbe, 3280 struct action_data *data, u64 *var_ref_vals) 3281 { 3282 unsigned int max_idx = data->onmax.max_var->var.idx; 3283 unsigned int max_var_ref_idx = data->onmax.max_var_ref_idx; 3284 3285 u64 var_val, max_val; 3286 3287 var_val = var_ref_vals[max_var_ref_idx]; 3288 max_val = tracing_map_read_var(elt, max_idx); 3289 3290 if (var_val <= max_val) 3291 return; 3292 3293 tracing_map_set_var(elt, max_idx, var_val); 3294 3295 update_max_vars(hist_data, elt, rbe, rec); 3296 } 3297 3298 static void onmax_destroy(struct action_data *data) 3299 { 3300 unsigned int i; 3301 3302 destroy_hist_field(data->onmax.max_var, 0); 3303 destroy_hist_field(data->onmax.var, 0); 3304 3305 kfree(data->onmax.var_str); 3306 kfree(data->onmax.fn_name); 3307 3308 for (i = 0; i < data->n_params; i++) 3309 kfree(data->params[i]); 3310 3311 kfree(data); 3312 } 3313 3314 static int onmax_create(struct hist_trigger_data *hist_data, 3315 struct action_data *data) 3316 { 3317 struct trace_event_file *file = hist_data->event_file; 3318 struct hist_field *var_field, *ref_field, *max_var; 3319 unsigned int var_ref_idx = hist_data->n_var_refs; 3320 struct field_var *field_var; 3321 char *onmax_var_str, *param; 3322 unsigned long flags; 3323 unsigned int i; 3324 int ret = 0; 3325 3326 onmax_var_str = data->onmax.var_str; 3327 if (onmax_var_str[0] != '$') { 3328 hist_err("onmax: For onmax(x), x must be a variable: ", onmax_var_str); 3329 return -EINVAL; 3330 } 3331 onmax_var_str++; 3332 3333 var_field = find_target_event_var(hist_data, NULL, NULL, onmax_var_str); 3334 if (!var_field) { 3335 hist_err("onmax: Couldn't find onmax variable: ", onmax_var_str); 3336 return -EINVAL; 3337 } 3338 3339 flags = HIST_FIELD_FL_VAR_REF; 3340 ref_field = create_hist_field(hist_data, NULL, flags, NULL); 3341 if (!ref_field) 3342 return -ENOMEM; 3343 3344 if (init_var_ref(ref_field, var_field, NULL, NULL)) { 3345 destroy_hist_field(ref_field, 0); 3346 ret = -ENOMEM; 3347 goto out; 3348 } 3349 hist_data->var_refs[hist_data->n_var_refs] = ref_field; 3350 ref_field->var_ref_idx = hist_data->n_var_refs++; 3351 data->onmax.var = ref_field; 3352 3353 data->fn = onmax_save; 3354 data->onmax.max_var_ref_idx = var_ref_idx; 3355 max_var = create_var(hist_data, file, "max", sizeof(u64), "u64"); 3356 if (IS_ERR(max_var)) { 3357 hist_err("onmax: Couldn't create onmax variable: ", "max"); 3358 ret = PTR_ERR(max_var); 3359 goto out; 3360 } 3361 data->onmax.max_var = max_var; 3362 3363 for (i = 0; i < data->n_params; i++) { 3364 param = kstrdup(data->params[i], GFP_KERNEL); 3365 if (!param) { 3366 ret = -ENOMEM; 3367 goto out; 3368 } 3369 3370 field_var = create_target_field_var(hist_data, NULL, NULL, param); 3371 if (IS_ERR(field_var)) { 3372 hist_err("onmax: Couldn't create field variable: ", param); 3373 ret = PTR_ERR(field_var); 3374 kfree(param); 3375 goto out; 3376 } 3377 3378 hist_data->max_vars[hist_data->n_max_vars++] = field_var; 3379 if (field_var->val->flags & HIST_FIELD_FL_STRING) 3380 hist_data->n_max_var_str++; 3381 3382 kfree(param); 3383 } 3384 out: 3385 return ret; 3386 } 3387 3388 static int parse_action_params(char *params, struct action_data *data) 3389 { 3390 char *param, *saved_param; 3391 int ret = 0; 3392 3393 while (params) { 3394 if (data->n_params >= SYNTH_FIELDS_MAX) 3395 goto out; 3396 3397 param = strsep(¶ms, ","); 3398 if (!param) { 3399 ret = -EINVAL; 3400 goto out; 3401 } 3402 3403 param = strstrip(param); 3404 if (strlen(param) < 2) { 3405 hist_err("Invalid action param: ", param); 3406 ret = -EINVAL; 3407 goto out; 3408 } 3409 3410 saved_param = kstrdup(param, GFP_KERNEL); 3411 if (!saved_param) { 3412 ret = -ENOMEM; 3413 goto out; 3414 } 3415 3416 data->params[data->n_params++] = saved_param; 3417 } 3418 out: 3419 return ret; 3420 } 3421 3422 static struct action_data *onmax_parse(char *str) 3423 { 3424 char *onmax_fn_name, *onmax_var_str; 3425 struct action_data *data; 3426 int ret = -EINVAL; 3427 3428 data = kzalloc(sizeof(*data), GFP_KERNEL); 3429 if (!data) 3430 return ERR_PTR(-ENOMEM); 3431 3432 onmax_var_str = strsep(&str, ")"); 3433 if (!onmax_var_str || !str) { 3434 ret = -EINVAL; 3435 goto free; 3436 } 3437 3438 data->onmax.var_str = kstrdup(onmax_var_str, GFP_KERNEL); 3439 if (!data->onmax.var_str) { 3440 ret = -ENOMEM; 3441 goto free; 3442 } 3443 3444 strsep(&str, "."); 3445 if (!str) 3446 goto free; 3447 3448 onmax_fn_name = strsep(&str, "("); 3449 if (!onmax_fn_name || !str) 3450 goto free; 3451 3452 if (strncmp(onmax_fn_name, "save", strlen("save")) == 0) { 3453 char *params = strsep(&str, ")"); 3454 3455 if (!params) { 3456 ret = -EINVAL; 3457 goto free; 3458 } 3459 3460 ret = parse_action_params(params, data); 3461 if (ret) 3462 goto free; 3463 } else 3464 goto free; 3465 3466 data->onmax.fn_name = kstrdup(onmax_fn_name, GFP_KERNEL); 3467 if (!data->onmax.fn_name) { 3468 ret = -ENOMEM; 3469 goto free; 3470 } 3471 out: 3472 return data; 3473 free: 3474 onmax_destroy(data); 3475 data = ERR_PTR(ret); 3476 goto out; 3477 } 3478 3479 static void onmatch_destroy(struct action_data *data) 3480 { 3481 unsigned int i; 3482 3483 mutex_lock(&synth_event_mutex); 3484 3485 kfree(data->onmatch.match_event); 3486 kfree(data->onmatch.match_event_system); 3487 kfree(data->onmatch.synth_event_name); 3488 3489 for (i = 0; i < data->n_params; i++) 3490 kfree(data->params[i]); 3491 3492 if (data->onmatch.synth_event) 3493 data->onmatch.synth_event->ref--; 3494 3495 kfree(data); 3496 3497 mutex_unlock(&synth_event_mutex); 3498 } 3499 3500 static void destroy_field_var(struct field_var *field_var) 3501 { 3502 if (!field_var) 3503 return; 3504 3505 destroy_hist_field(field_var->var, 0); 3506 destroy_hist_field(field_var->val, 0); 3507 3508 kfree(field_var); 3509 } 3510 3511 static void destroy_field_vars(struct hist_trigger_data *hist_data) 3512 { 3513 unsigned int i; 3514 3515 for (i = 0; i < hist_data->n_field_vars; i++) 3516 destroy_field_var(hist_data->field_vars[i]); 3517 } 3518 3519 static void save_field_var(struct hist_trigger_data *hist_data, 3520 struct field_var *field_var) 3521 { 3522 hist_data->field_vars[hist_data->n_field_vars++] = field_var; 3523 3524 if (field_var->val->flags & HIST_FIELD_FL_STRING) 3525 hist_data->n_field_var_str++; 3526 } 3527 3528 3529 static void destroy_synth_var_refs(struct hist_trigger_data *hist_data) 3530 { 3531 unsigned int i; 3532 3533 for (i = 0; i < hist_data->n_synth_var_refs; i++) 3534 destroy_hist_field(hist_data->synth_var_refs[i], 0); 3535 } 3536 3537 static void save_synth_var_ref(struct hist_trigger_data *hist_data, 3538 struct hist_field *var_ref) 3539 { 3540 hist_data->synth_var_refs[hist_data->n_synth_var_refs++] = var_ref; 3541 3542 hist_data->var_refs[hist_data->n_var_refs] = var_ref; 3543 var_ref->var_ref_idx = hist_data->n_var_refs++; 3544 } 3545 3546 static int check_synth_field(struct synth_event *event, 3547 struct hist_field *hist_field, 3548 unsigned int field_pos) 3549 { 3550 struct synth_field *field; 3551 3552 if (field_pos >= event->n_fields) 3553 return -EINVAL; 3554 3555 field = event->fields[field_pos]; 3556 3557 if (strcmp(field->type, hist_field->type) != 0) 3558 return -EINVAL; 3559 3560 return 0; 3561 } 3562 3563 static struct hist_field * 3564 onmatch_find_var(struct hist_trigger_data *hist_data, struct action_data *data, 3565 char *system, char *event, char *var) 3566 { 3567 struct hist_field *hist_field; 3568 3569 var++; /* skip '$' */ 3570 3571 hist_field = find_target_event_var(hist_data, system, event, var); 3572 if (!hist_field) { 3573 if (!system) { 3574 system = data->onmatch.match_event_system; 3575 event = data->onmatch.match_event; 3576 } 3577 3578 hist_field = find_event_var(hist_data, system, event, var); 3579 } 3580 3581 if (!hist_field) 3582 hist_err_event("onmatch: Couldn't find onmatch param: $", system, event, var); 3583 3584 return hist_field; 3585 } 3586 3587 static struct hist_field * 3588 onmatch_create_field_var(struct hist_trigger_data *hist_data, 3589 struct action_data *data, char *system, 3590 char *event, char *var) 3591 { 3592 struct hist_field *hist_field = NULL; 3593 struct field_var *field_var; 3594 3595 /* 3596 * First try to create a field var on the target event (the 3597 * currently being defined). This will create a variable for 3598 * unqualified fields on the target event, or if qualified, 3599 * target fields that have qualified names matching the target. 3600 */ 3601 field_var = create_target_field_var(hist_data, system, event, var); 3602 3603 if (field_var && !IS_ERR(field_var)) { 3604 save_field_var(hist_data, field_var); 3605 hist_field = field_var->var; 3606 } else { 3607 field_var = NULL; 3608 /* 3609 * If no explicit system.event is specfied, default to 3610 * looking for fields on the onmatch(system.event.xxx) 3611 * event. 3612 */ 3613 if (!system) { 3614 system = data->onmatch.match_event_system; 3615 event = data->onmatch.match_event; 3616 } 3617 3618 /* 3619 * At this point, we're looking at a field on another 3620 * event. Because we can't modify a hist trigger on 3621 * another event to add a variable for a field, we need 3622 * to create a new trigger on that event and create the 3623 * variable at the same time. 3624 */ 3625 hist_field = create_field_var_hist(hist_data, system, event, var); 3626 if (IS_ERR(hist_field)) 3627 goto free; 3628 } 3629 out: 3630 return hist_field; 3631 free: 3632 destroy_field_var(field_var); 3633 hist_field = NULL; 3634 goto out; 3635 } 3636 3637 static int onmatch_create(struct hist_trigger_data *hist_data, 3638 struct trace_event_file *file, 3639 struct action_data *data) 3640 { 3641 char *event_name, *param, *system = NULL; 3642 struct hist_field *hist_field, *var_ref; 3643 unsigned int i, var_ref_idx; 3644 unsigned int field_pos = 0; 3645 struct synth_event *event; 3646 int ret = 0; 3647 3648 mutex_lock(&synth_event_mutex); 3649 event = find_synth_event(data->onmatch.synth_event_name); 3650 if (!event) { 3651 hist_err("onmatch: Couldn't find synthetic event: ", data->onmatch.synth_event_name); 3652 mutex_unlock(&synth_event_mutex); 3653 return -EINVAL; 3654 } 3655 event->ref++; 3656 mutex_unlock(&synth_event_mutex); 3657 3658 var_ref_idx = hist_data->n_var_refs; 3659 3660 for (i = 0; i < data->n_params; i++) { 3661 char *p; 3662 3663 p = param = kstrdup(data->params[i], GFP_KERNEL); 3664 if (!param) { 3665 ret = -ENOMEM; 3666 goto err; 3667 } 3668 3669 system = strsep(¶m, "."); 3670 if (!param) { 3671 param = (char *)system; 3672 system = event_name = NULL; 3673 } else { 3674 event_name = strsep(¶m, "."); 3675 if (!param) { 3676 kfree(p); 3677 ret = -EINVAL; 3678 goto err; 3679 } 3680 } 3681 3682 if (param[0] == '$') 3683 hist_field = onmatch_find_var(hist_data, data, system, 3684 event_name, param); 3685 else 3686 hist_field = onmatch_create_field_var(hist_data, data, 3687 system, 3688 event_name, 3689 param); 3690 3691 if (!hist_field) { 3692 kfree(p); 3693 ret = -EINVAL; 3694 goto err; 3695 } 3696 3697 if (check_synth_field(event, hist_field, field_pos) == 0) { 3698 var_ref = create_var_ref(hist_field, system, event_name); 3699 if (!var_ref) { 3700 kfree(p); 3701 ret = -ENOMEM; 3702 goto err; 3703 } 3704 3705 save_synth_var_ref(hist_data, var_ref); 3706 field_pos++; 3707 kfree(p); 3708 continue; 3709 } 3710 3711 hist_err_event("onmatch: Param type doesn't match synthetic event field type: ", 3712 system, event_name, param); 3713 kfree(p); 3714 ret = -EINVAL; 3715 goto err; 3716 } 3717 3718 if (field_pos != event->n_fields) { 3719 hist_err("onmatch: Param count doesn't match synthetic event field count: ", event->name); 3720 ret = -EINVAL; 3721 goto err; 3722 } 3723 3724 data->fn = action_trace; 3725 data->onmatch.synth_event = event; 3726 data->onmatch.var_ref_idx = var_ref_idx; 3727 out: 3728 return ret; 3729 err: 3730 mutex_lock(&synth_event_mutex); 3731 event->ref--; 3732 mutex_unlock(&synth_event_mutex); 3733 3734 goto out; 3735 } 3736 3737 static struct action_data *onmatch_parse(struct trace_array *tr, char *str) 3738 { 3739 char *match_event, *match_event_system; 3740 char *synth_event_name, *params; 3741 struct action_data *data; 3742 int ret = -EINVAL; 3743 3744 data = kzalloc(sizeof(*data), GFP_KERNEL); 3745 if (!data) 3746 return ERR_PTR(-ENOMEM); 3747 3748 match_event = strsep(&str, ")"); 3749 if (!match_event || !str) { 3750 hist_err("onmatch: Missing closing paren: ", match_event); 3751 goto free; 3752 } 3753 3754 match_event_system = strsep(&match_event, "."); 3755 if (!match_event) { 3756 hist_err("onmatch: Missing subsystem for match event: ", match_event_system); 3757 goto free; 3758 } 3759 3760 if (IS_ERR(event_file(tr, match_event_system, match_event))) { 3761 hist_err_event("onmatch: Invalid subsystem or event name: ", 3762 match_event_system, match_event, NULL); 3763 goto free; 3764 } 3765 3766 data->onmatch.match_event = kstrdup(match_event, GFP_KERNEL); 3767 if (!data->onmatch.match_event) { 3768 ret = -ENOMEM; 3769 goto free; 3770 } 3771 3772 data->onmatch.match_event_system = kstrdup(match_event_system, GFP_KERNEL); 3773 if (!data->onmatch.match_event_system) { 3774 ret = -ENOMEM; 3775 goto free; 3776 } 3777 3778 strsep(&str, "."); 3779 if (!str) { 3780 hist_err("onmatch: Missing . after onmatch(): ", str); 3781 goto free; 3782 } 3783 3784 synth_event_name = strsep(&str, "("); 3785 if (!synth_event_name || !str) { 3786 hist_err("onmatch: Missing opening paramlist paren: ", synth_event_name); 3787 goto free; 3788 } 3789 3790 data->onmatch.synth_event_name = kstrdup(synth_event_name, GFP_KERNEL); 3791 if (!data->onmatch.synth_event_name) { 3792 ret = -ENOMEM; 3793 goto free; 3794 } 3795 3796 params = strsep(&str, ")"); 3797 if (!params || !str || (str && strlen(str))) { 3798 hist_err("onmatch: Missing closing paramlist paren: ", params); 3799 goto free; 3800 } 3801 3802 ret = parse_action_params(params, data); 3803 if (ret) 3804 goto free; 3805 out: 3806 return data; 3807 free: 3808 onmatch_destroy(data); 3809 data = ERR_PTR(ret); 3810 goto out; 3811 } 3812 3813 static int create_hitcount_val(struct hist_trigger_data *hist_data) 3814 { 3815 hist_data->fields[HITCOUNT_IDX] = 3816 create_hist_field(hist_data, NULL, HIST_FIELD_FL_HITCOUNT, NULL); 3817 if (!hist_data->fields[HITCOUNT_IDX]) 3818 return -ENOMEM; 3819 3820 hist_data->n_vals++; 3821 hist_data->n_fields++; 3822 3823 if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX)) 3824 return -EINVAL; 3825 3826 return 0; 3827 } 3828 3829 static int __create_val_field(struct hist_trigger_data *hist_data, 3830 unsigned int val_idx, 3831 struct trace_event_file *file, 3832 char *var_name, char *field_str, 3833 unsigned long flags) 3834 { 3835 struct hist_field *hist_field; 3836 int ret = 0; 3837 3838 hist_field = parse_expr(hist_data, file, field_str, flags, var_name, 0); 3839 if (IS_ERR(hist_field)) { 3840 ret = PTR_ERR(hist_field); 3841 goto out; 3842 } 3843 3844 hist_data->fields[val_idx] = hist_field; 3845 3846 ++hist_data->n_vals; 3847 ++hist_data->n_fields; 3848 3849 if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX + TRACING_MAP_VARS_MAX)) 3850 ret = -EINVAL; 3851 out: 3852 return ret; 3853 } 3854 3855 static int create_val_field(struct hist_trigger_data *hist_data, 3856 unsigned int val_idx, 3857 struct trace_event_file *file, 3858 char *field_str) 3859 { 3860 if (WARN_ON(val_idx >= TRACING_MAP_VALS_MAX)) 3861 return -EINVAL; 3862 3863 return __create_val_field(hist_data, val_idx, file, NULL, field_str, 0); 3864 } 3865 3866 static int create_var_field(struct hist_trigger_data *hist_data, 3867 unsigned int val_idx, 3868 struct trace_event_file *file, 3869 char *var_name, char *expr_str) 3870 { 3871 unsigned long flags = 0; 3872 3873 if (WARN_ON(val_idx >= TRACING_MAP_VALS_MAX + TRACING_MAP_VARS_MAX)) 3874 return -EINVAL; 3875 3876 if (find_var(hist_data, file, var_name) && !hist_data->remove) { 3877 hist_err("Variable already defined: ", var_name); 3878 return -EINVAL; 3879 } 3880 3881 flags |= HIST_FIELD_FL_VAR; 3882 hist_data->n_vars++; 3883 if (WARN_ON(hist_data->n_vars > TRACING_MAP_VARS_MAX)) 3884 return -EINVAL; 3885 3886 return __create_val_field(hist_data, val_idx, file, var_name, expr_str, flags); 3887 } 3888 3889 static int create_val_fields(struct hist_trigger_data *hist_data, 3890 struct trace_event_file *file) 3891 { 3892 char *fields_str, *field_str; 3893 unsigned int i, j = 1; 3894 int ret; 3895 3896 ret = create_hitcount_val(hist_data); 3897 if (ret) 3898 goto out; 3899 3900 fields_str = hist_data->attrs->vals_str; 3901 if (!fields_str) 3902 goto out; 3903 3904 strsep(&fields_str, "="); 3905 if (!fields_str) 3906 goto out; 3907 3908 for (i = 0, j = 1; i < TRACING_MAP_VALS_MAX && 3909 j < TRACING_MAP_VALS_MAX; i++) { 3910 field_str = strsep(&fields_str, ","); 3911 if (!field_str) 3912 break; 3913 3914 if (strcmp(field_str, "hitcount") == 0) 3915 continue; 3916 3917 ret = create_val_field(hist_data, j++, file, field_str); 3918 if (ret) 3919 goto out; 3920 } 3921 3922 if (fields_str && (strcmp(fields_str, "hitcount") != 0)) 3923 ret = -EINVAL; 3924 out: 3925 return ret; 3926 } 3927 3928 static int create_key_field(struct hist_trigger_data *hist_data, 3929 unsigned int key_idx, 3930 unsigned int key_offset, 3931 struct trace_event_file *file, 3932 char *field_str) 3933 { 3934 struct hist_field *hist_field = NULL; 3935 3936 unsigned long flags = 0; 3937 unsigned int key_size; 3938 int ret = 0; 3939 3940 if (WARN_ON(key_idx >= HIST_FIELDS_MAX)) 3941 return -EINVAL; 3942 3943 flags |= HIST_FIELD_FL_KEY; 3944 3945 if (strcmp(field_str, "stacktrace") == 0) { 3946 flags |= HIST_FIELD_FL_STACKTRACE; 3947 key_size = sizeof(unsigned long) * HIST_STACKTRACE_DEPTH; 3948 hist_field = create_hist_field(hist_data, NULL, flags, NULL); 3949 } else { 3950 hist_field = parse_expr(hist_data, file, field_str, flags, 3951 NULL, 0); 3952 if (IS_ERR(hist_field)) { 3953 ret = PTR_ERR(hist_field); 3954 goto out; 3955 } 3956 3957 if (hist_field->flags & HIST_FIELD_FL_VAR_REF) { 3958 hist_err("Using variable references as keys not supported: ", field_str); 3959 destroy_hist_field(hist_field, 0); 3960 ret = -EINVAL; 3961 goto out; 3962 } 3963 3964 key_size = hist_field->size; 3965 } 3966 3967 hist_data->fields[key_idx] = hist_field; 3968 3969 key_size = ALIGN(key_size, sizeof(u64)); 3970 hist_data->fields[key_idx]->size = key_size; 3971 hist_data->fields[key_idx]->offset = key_offset; 3972 3973 hist_data->key_size += key_size; 3974 3975 if (hist_data->key_size > HIST_KEY_SIZE_MAX) { 3976 ret = -EINVAL; 3977 goto out; 3978 } 3979 3980 hist_data->n_keys++; 3981 hist_data->n_fields++; 3982 3983 if (WARN_ON(hist_data->n_keys > TRACING_MAP_KEYS_MAX)) 3984 return -EINVAL; 3985 3986 ret = key_size; 3987 out: 3988 return ret; 3989 } 3990 3991 static int create_key_fields(struct hist_trigger_data *hist_data, 3992 struct trace_event_file *file) 3993 { 3994 unsigned int i, key_offset = 0, n_vals = hist_data->n_vals; 3995 char *fields_str, *field_str; 3996 int ret = -EINVAL; 3997 3998 fields_str = hist_data->attrs->keys_str; 3999 if (!fields_str) 4000 goto out; 4001 4002 strsep(&fields_str, "="); 4003 if (!fields_str) 4004 goto out; 4005 4006 for (i = n_vals; i < n_vals + TRACING_MAP_KEYS_MAX; i++) { 4007 field_str = strsep(&fields_str, ","); 4008 if (!field_str) 4009 break; 4010 ret = create_key_field(hist_data, i, key_offset, 4011 file, field_str); 4012 if (ret < 0) 4013 goto out; 4014 key_offset += ret; 4015 } 4016 if (fields_str) { 4017 ret = -EINVAL; 4018 goto out; 4019 } 4020 ret = 0; 4021 out: 4022 return ret; 4023 } 4024 4025 static int create_var_fields(struct hist_trigger_data *hist_data, 4026 struct trace_event_file *file) 4027 { 4028 unsigned int i, j = hist_data->n_vals; 4029 int ret = 0; 4030 4031 unsigned int n_vars = hist_data->attrs->var_defs.n_vars; 4032 4033 for (i = 0; i < n_vars; i++) { 4034 char *var_name = hist_data->attrs->var_defs.name[i]; 4035 char *expr = hist_data->attrs->var_defs.expr[i]; 4036 4037 ret = create_var_field(hist_data, j++, file, var_name, expr); 4038 if (ret) 4039 goto out; 4040 } 4041 out: 4042 return ret; 4043 } 4044 4045 static void free_var_defs(struct hist_trigger_data *hist_data) 4046 { 4047 unsigned int i; 4048 4049 for (i = 0; i < hist_data->attrs->var_defs.n_vars; i++) { 4050 kfree(hist_data->attrs->var_defs.name[i]); 4051 kfree(hist_data->attrs->var_defs.expr[i]); 4052 } 4053 4054 hist_data->attrs->var_defs.n_vars = 0; 4055 } 4056 4057 static int parse_var_defs(struct hist_trigger_data *hist_data) 4058 { 4059 char *s, *str, *var_name, *field_str; 4060 unsigned int i, j, n_vars = 0; 4061 int ret = 0; 4062 4063 for (i = 0; i < hist_data->attrs->n_assignments; i++) { 4064 str = hist_data->attrs->assignment_str[i]; 4065 for (j = 0; j < TRACING_MAP_VARS_MAX; j++) { 4066 field_str = strsep(&str, ","); 4067 if (!field_str) 4068 break; 4069 4070 var_name = strsep(&field_str, "="); 4071 if (!var_name || !field_str) { 4072 hist_err("Malformed assignment: ", var_name); 4073 ret = -EINVAL; 4074 goto free; 4075 } 4076 4077 if (n_vars == TRACING_MAP_VARS_MAX) { 4078 hist_err("Too many variables defined: ", var_name); 4079 ret = -EINVAL; 4080 goto free; 4081 } 4082 4083 s = kstrdup(var_name, GFP_KERNEL); 4084 if (!s) { 4085 ret = -ENOMEM; 4086 goto free; 4087 } 4088 hist_data->attrs->var_defs.name[n_vars] = s; 4089 4090 s = kstrdup(field_str, GFP_KERNEL); 4091 if (!s) { 4092 kfree(hist_data->attrs->var_defs.name[n_vars]); 4093 ret = -ENOMEM; 4094 goto free; 4095 } 4096 hist_data->attrs->var_defs.expr[n_vars++] = s; 4097 4098 hist_data->attrs->var_defs.n_vars = n_vars; 4099 } 4100 } 4101 4102 return ret; 4103 free: 4104 free_var_defs(hist_data); 4105 4106 return ret; 4107 } 4108 4109 static int create_hist_fields(struct hist_trigger_data *hist_data, 4110 struct trace_event_file *file) 4111 { 4112 int ret; 4113 4114 ret = parse_var_defs(hist_data); 4115 if (ret) 4116 goto out; 4117 4118 ret = create_val_fields(hist_data, file); 4119 if (ret) 4120 goto out; 4121 4122 ret = create_var_fields(hist_data, file); 4123 if (ret) 4124 goto out; 4125 4126 ret = create_key_fields(hist_data, file); 4127 if (ret) 4128 goto out; 4129 out: 4130 free_var_defs(hist_data); 4131 4132 return ret; 4133 } 4134 4135 static int is_descending(const char *str) 4136 { 4137 if (!str) 4138 return 0; 4139 4140 if (strcmp(str, "descending") == 0) 4141 return 1; 4142 4143 if (strcmp(str, "ascending") == 0) 4144 return 0; 4145 4146 return -EINVAL; 4147 } 4148 4149 static int create_sort_keys(struct hist_trigger_data *hist_data) 4150 { 4151 char *fields_str = hist_data->attrs->sort_key_str; 4152 struct tracing_map_sort_key *sort_key; 4153 int descending, ret = 0; 4154 unsigned int i, j, k; 4155 4156 hist_data->n_sort_keys = 1; /* we always have at least one, hitcount */ 4157 4158 if (!fields_str) 4159 goto out; 4160 4161 strsep(&fields_str, "="); 4162 if (!fields_str) { 4163 ret = -EINVAL; 4164 goto out; 4165 } 4166 4167 for (i = 0; i < TRACING_MAP_SORT_KEYS_MAX; i++) { 4168 struct hist_field *hist_field; 4169 char *field_str, *field_name; 4170 const char *test_name; 4171 4172 sort_key = &hist_data->sort_keys[i]; 4173 4174 field_str = strsep(&fields_str, ","); 4175 if (!field_str) { 4176 if (i == 0) 4177 ret = -EINVAL; 4178 break; 4179 } 4180 4181 if ((i == TRACING_MAP_SORT_KEYS_MAX - 1) && fields_str) { 4182 ret = -EINVAL; 4183 break; 4184 } 4185 4186 field_name = strsep(&field_str, "."); 4187 if (!field_name) { 4188 ret = -EINVAL; 4189 break; 4190 } 4191 4192 if (strcmp(field_name, "hitcount") == 0) { 4193 descending = is_descending(field_str); 4194 if (descending < 0) { 4195 ret = descending; 4196 break; 4197 } 4198 sort_key->descending = descending; 4199 continue; 4200 } 4201 4202 for (j = 1, k = 1; j < hist_data->n_fields; j++) { 4203 unsigned int idx; 4204 4205 hist_field = hist_data->fields[j]; 4206 if (hist_field->flags & HIST_FIELD_FL_VAR) 4207 continue; 4208 4209 idx = k++; 4210 4211 test_name = hist_field_name(hist_field, 0); 4212 4213 if (strcmp(field_name, test_name) == 0) { 4214 sort_key->field_idx = idx; 4215 descending = is_descending(field_str); 4216 if (descending < 0) { 4217 ret = descending; 4218 goto out; 4219 } 4220 sort_key->descending = descending; 4221 break; 4222 } 4223 } 4224 if (j == hist_data->n_fields) { 4225 ret = -EINVAL; 4226 break; 4227 } 4228 } 4229 4230 hist_data->n_sort_keys = i; 4231 out: 4232 return ret; 4233 } 4234 4235 static void destroy_actions(struct hist_trigger_data *hist_data) 4236 { 4237 unsigned int i; 4238 4239 for (i = 0; i < hist_data->n_actions; i++) { 4240 struct action_data *data = hist_data->actions[i]; 4241 4242 if (data->fn == action_trace) 4243 onmatch_destroy(data); 4244 else if (data->fn == onmax_save) 4245 onmax_destroy(data); 4246 else 4247 kfree(data); 4248 } 4249 } 4250 4251 static int parse_actions(struct hist_trigger_data *hist_data) 4252 { 4253 struct trace_array *tr = hist_data->event_file->tr; 4254 struct action_data *data; 4255 unsigned int i; 4256 int ret = 0; 4257 char *str; 4258 4259 for (i = 0; i < hist_data->attrs->n_actions; i++) { 4260 str = hist_data->attrs->action_str[i]; 4261 4262 if (strncmp(str, "onmatch(", strlen("onmatch(")) == 0) { 4263 char *action_str = str + strlen("onmatch("); 4264 4265 data = onmatch_parse(tr, action_str); 4266 if (IS_ERR(data)) { 4267 ret = PTR_ERR(data); 4268 break; 4269 } 4270 data->fn = action_trace; 4271 } else if (strncmp(str, "onmax(", strlen("onmax(")) == 0) { 4272 char *action_str = str + strlen("onmax("); 4273 4274 data = onmax_parse(action_str); 4275 if (IS_ERR(data)) { 4276 ret = PTR_ERR(data); 4277 break; 4278 } 4279 data->fn = onmax_save; 4280 } else { 4281 ret = -EINVAL; 4282 break; 4283 } 4284 4285 hist_data->actions[hist_data->n_actions++] = data; 4286 } 4287 4288 return ret; 4289 } 4290 4291 static int create_actions(struct hist_trigger_data *hist_data, 4292 struct trace_event_file *file) 4293 { 4294 struct action_data *data; 4295 unsigned int i; 4296 int ret = 0; 4297 4298 for (i = 0; i < hist_data->attrs->n_actions; i++) { 4299 data = hist_data->actions[i]; 4300 4301 if (data->fn == action_trace) { 4302 ret = onmatch_create(hist_data, file, data); 4303 if (ret) 4304 return ret; 4305 } else if (data->fn == onmax_save) { 4306 ret = onmax_create(hist_data, data); 4307 if (ret) 4308 return ret; 4309 } 4310 } 4311 4312 return ret; 4313 } 4314 4315 static void print_actions(struct seq_file *m, 4316 struct hist_trigger_data *hist_data, 4317 struct tracing_map_elt *elt) 4318 { 4319 unsigned int i; 4320 4321 for (i = 0; i < hist_data->n_actions; i++) { 4322 struct action_data *data = hist_data->actions[i]; 4323 4324 if (data->fn == onmax_save) 4325 onmax_print(m, hist_data, elt, data); 4326 } 4327 } 4328 4329 static void print_onmax_spec(struct seq_file *m, 4330 struct hist_trigger_data *hist_data, 4331 struct action_data *data) 4332 { 4333 unsigned int i; 4334 4335 seq_puts(m, ":onmax("); 4336 seq_printf(m, "%s", data->onmax.var_str); 4337 seq_printf(m, ").%s(", data->onmax.fn_name); 4338 4339 for (i = 0; i < hist_data->n_max_vars; i++) { 4340 seq_printf(m, "%s", hist_data->max_vars[i]->var->var.name); 4341 if (i < hist_data->n_max_vars - 1) 4342 seq_puts(m, ","); 4343 } 4344 seq_puts(m, ")"); 4345 } 4346 4347 static void print_onmatch_spec(struct seq_file *m, 4348 struct hist_trigger_data *hist_data, 4349 struct action_data *data) 4350 { 4351 unsigned int i; 4352 4353 seq_printf(m, ":onmatch(%s.%s).", data->onmatch.match_event_system, 4354 data->onmatch.match_event); 4355 4356 seq_printf(m, "%s(", data->onmatch.synth_event->name); 4357 4358 for (i = 0; i < data->n_params; i++) { 4359 if (i) 4360 seq_puts(m, ","); 4361 seq_printf(m, "%s", data->params[i]); 4362 } 4363 4364 seq_puts(m, ")"); 4365 } 4366 4367 static bool actions_match(struct hist_trigger_data *hist_data, 4368 struct hist_trigger_data *hist_data_test) 4369 { 4370 unsigned int i, j; 4371 4372 if (hist_data->n_actions != hist_data_test->n_actions) 4373 return false; 4374 4375 for (i = 0; i < hist_data->n_actions; i++) { 4376 struct action_data *data = hist_data->actions[i]; 4377 struct action_data *data_test = hist_data_test->actions[i]; 4378 4379 if (data->fn != data_test->fn) 4380 return false; 4381 4382 if (data->n_params != data_test->n_params) 4383 return false; 4384 4385 for (j = 0; j < data->n_params; j++) { 4386 if (strcmp(data->params[j], data_test->params[j]) != 0) 4387 return false; 4388 } 4389 4390 if (data->fn == action_trace) { 4391 if (strcmp(data->onmatch.synth_event_name, 4392 data_test->onmatch.synth_event_name) != 0) 4393 return false; 4394 if (strcmp(data->onmatch.match_event_system, 4395 data_test->onmatch.match_event_system) != 0) 4396 return false; 4397 if (strcmp(data->onmatch.match_event, 4398 data_test->onmatch.match_event) != 0) 4399 return false; 4400 } else if (data->fn == onmax_save) { 4401 if (strcmp(data->onmax.var_str, 4402 data_test->onmax.var_str) != 0) 4403 return false; 4404 if (strcmp(data->onmax.fn_name, 4405 data_test->onmax.fn_name) != 0) 4406 return false; 4407 } 4408 } 4409 4410 return true; 4411 } 4412 4413 4414 static void print_actions_spec(struct seq_file *m, 4415 struct hist_trigger_data *hist_data) 4416 { 4417 unsigned int i; 4418 4419 for (i = 0; i < hist_data->n_actions; i++) { 4420 struct action_data *data = hist_data->actions[i]; 4421 4422 if (data->fn == action_trace) 4423 print_onmatch_spec(m, hist_data, data); 4424 else if (data->fn == onmax_save) 4425 print_onmax_spec(m, hist_data, data); 4426 } 4427 } 4428 4429 static void destroy_field_var_hists(struct hist_trigger_data *hist_data) 4430 { 4431 unsigned int i; 4432 4433 for (i = 0; i < hist_data->n_field_var_hists; i++) { 4434 kfree(hist_data->field_var_hists[i]->cmd); 4435 kfree(hist_data->field_var_hists[i]); 4436 } 4437 } 4438 4439 static void destroy_hist_data(struct hist_trigger_data *hist_data) 4440 { 4441 if (!hist_data) 4442 return; 4443 4444 destroy_hist_trigger_attrs(hist_data->attrs); 4445 destroy_hist_fields(hist_data); 4446 tracing_map_destroy(hist_data->map); 4447 4448 destroy_actions(hist_data); 4449 destroy_field_vars(hist_data); 4450 destroy_field_var_hists(hist_data); 4451 destroy_synth_var_refs(hist_data); 4452 4453 kfree(hist_data); 4454 } 4455 4456 static int create_tracing_map_fields(struct hist_trigger_data *hist_data) 4457 { 4458 struct tracing_map *map = hist_data->map; 4459 struct ftrace_event_field *field; 4460 struct hist_field *hist_field; 4461 int i, idx = 0; 4462 4463 for_each_hist_field(i, hist_data) { 4464 hist_field = hist_data->fields[i]; 4465 if (hist_field->flags & HIST_FIELD_FL_KEY) { 4466 tracing_map_cmp_fn_t cmp_fn; 4467 4468 field = hist_field->field; 4469 4470 if (hist_field->flags & HIST_FIELD_FL_STACKTRACE) 4471 cmp_fn = tracing_map_cmp_none; 4472 else if (!field) 4473 cmp_fn = tracing_map_cmp_num(hist_field->size, 4474 hist_field->is_signed); 4475 else if (is_string_field(field)) 4476 cmp_fn = tracing_map_cmp_string; 4477 else 4478 cmp_fn = tracing_map_cmp_num(field->size, 4479 field->is_signed); 4480 idx = tracing_map_add_key_field(map, 4481 hist_field->offset, 4482 cmp_fn); 4483 } else if (!(hist_field->flags & HIST_FIELD_FL_VAR)) 4484 idx = tracing_map_add_sum_field(map); 4485 4486 if (idx < 0) 4487 return idx; 4488 4489 if (hist_field->flags & HIST_FIELD_FL_VAR) { 4490 idx = tracing_map_add_var(map); 4491 if (idx < 0) 4492 return idx; 4493 hist_field->var.idx = idx; 4494 hist_field->var.hist_data = hist_data; 4495 } 4496 } 4497 4498 return 0; 4499 } 4500 4501 static struct hist_trigger_data * 4502 create_hist_data(unsigned int map_bits, 4503 struct hist_trigger_attrs *attrs, 4504 struct trace_event_file *file, 4505 bool remove) 4506 { 4507 const struct tracing_map_ops *map_ops = NULL; 4508 struct hist_trigger_data *hist_data; 4509 int ret = 0; 4510 4511 hist_data = kzalloc(sizeof(*hist_data), GFP_KERNEL); 4512 if (!hist_data) 4513 return ERR_PTR(-ENOMEM); 4514 4515 hist_data->attrs = attrs; 4516 hist_data->remove = remove; 4517 hist_data->event_file = file; 4518 4519 ret = parse_actions(hist_data); 4520 if (ret) 4521 goto free; 4522 4523 ret = create_hist_fields(hist_data, file); 4524 if (ret) 4525 goto free; 4526 4527 ret = create_sort_keys(hist_data); 4528 if (ret) 4529 goto free; 4530 4531 map_ops = &hist_trigger_elt_data_ops; 4532 4533 hist_data->map = tracing_map_create(map_bits, hist_data->key_size, 4534 map_ops, hist_data); 4535 if (IS_ERR(hist_data->map)) { 4536 ret = PTR_ERR(hist_data->map); 4537 hist_data->map = NULL; 4538 goto free; 4539 } 4540 4541 ret = create_tracing_map_fields(hist_data); 4542 if (ret) 4543 goto free; 4544 out: 4545 return hist_data; 4546 free: 4547 hist_data->attrs = NULL; 4548 4549 destroy_hist_data(hist_data); 4550 4551 hist_data = ERR_PTR(ret); 4552 4553 goto out; 4554 } 4555 4556 static void hist_trigger_elt_update(struct hist_trigger_data *hist_data, 4557 struct tracing_map_elt *elt, void *rec, 4558 struct ring_buffer_event *rbe, 4559 u64 *var_ref_vals) 4560 { 4561 struct hist_elt_data *elt_data; 4562 struct hist_field *hist_field; 4563 unsigned int i, var_idx; 4564 u64 hist_val; 4565 4566 elt_data = elt->private_data; 4567 elt_data->var_ref_vals = var_ref_vals; 4568 4569 for_each_hist_val_field(i, hist_data) { 4570 hist_field = hist_data->fields[i]; 4571 hist_val = hist_field->fn(hist_field, elt, rbe, rec); 4572 if (hist_field->flags & HIST_FIELD_FL_VAR) { 4573 var_idx = hist_field->var.idx; 4574 tracing_map_set_var(elt, var_idx, hist_val); 4575 continue; 4576 } 4577 tracing_map_update_sum(elt, i, hist_val); 4578 } 4579 4580 for_each_hist_key_field(i, hist_data) { 4581 hist_field = hist_data->fields[i]; 4582 if (hist_field->flags & HIST_FIELD_FL_VAR) { 4583 hist_val = hist_field->fn(hist_field, elt, rbe, rec); 4584 var_idx = hist_field->var.idx; 4585 tracing_map_set_var(elt, var_idx, hist_val); 4586 } 4587 } 4588 4589 update_field_vars(hist_data, elt, rbe, rec); 4590 } 4591 4592 static inline void add_to_key(char *compound_key, void *key, 4593 struct hist_field *key_field, void *rec) 4594 { 4595 size_t size = key_field->size; 4596 4597 if (key_field->flags & HIST_FIELD_FL_STRING) { 4598 struct ftrace_event_field *field; 4599 4600 field = key_field->field; 4601 if (field->filter_type == FILTER_DYN_STRING) 4602 size = *(u32 *)(rec + field->offset) >> 16; 4603 else if (field->filter_type == FILTER_PTR_STRING) 4604 size = strlen(key); 4605 else if (field->filter_type == FILTER_STATIC_STRING) 4606 size = field->size; 4607 4608 /* ensure NULL-termination */ 4609 if (size > key_field->size - 1) 4610 size = key_field->size - 1; 4611 } 4612 4613 memcpy(compound_key + key_field->offset, key, size); 4614 } 4615 4616 static void 4617 hist_trigger_actions(struct hist_trigger_data *hist_data, 4618 struct tracing_map_elt *elt, void *rec, 4619 struct ring_buffer_event *rbe, u64 *var_ref_vals) 4620 { 4621 struct action_data *data; 4622 unsigned int i; 4623 4624 for (i = 0; i < hist_data->n_actions; i++) { 4625 data = hist_data->actions[i]; 4626 data->fn(hist_data, elt, rec, rbe, data, var_ref_vals); 4627 } 4628 } 4629 4630 static void event_hist_trigger(struct event_trigger_data *data, void *rec, 4631 struct ring_buffer_event *rbe) 4632 { 4633 struct hist_trigger_data *hist_data = data->private_data; 4634 bool use_compound_key = (hist_data->n_keys > 1); 4635 unsigned long entries[HIST_STACKTRACE_DEPTH]; 4636 u64 var_ref_vals[TRACING_MAP_VARS_MAX]; 4637 char compound_key[HIST_KEY_SIZE_MAX]; 4638 struct tracing_map_elt *elt = NULL; 4639 struct stack_trace stacktrace; 4640 struct hist_field *key_field; 4641 u64 field_contents; 4642 void *key = NULL; 4643 unsigned int i; 4644 4645 memset(compound_key, 0, hist_data->key_size); 4646 4647 for_each_hist_key_field(i, hist_data) { 4648 key_field = hist_data->fields[i]; 4649 4650 if (key_field->flags & HIST_FIELD_FL_STACKTRACE) { 4651 stacktrace.max_entries = HIST_STACKTRACE_DEPTH; 4652 stacktrace.entries = entries; 4653 stacktrace.nr_entries = 0; 4654 stacktrace.skip = HIST_STACKTRACE_SKIP; 4655 4656 memset(stacktrace.entries, 0, HIST_STACKTRACE_SIZE); 4657 save_stack_trace(&stacktrace); 4658 4659 key = entries; 4660 } else { 4661 field_contents = key_field->fn(key_field, elt, rbe, rec); 4662 if (key_field->flags & HIST_FIELD_FL_STRING) { 4663 key = (void *)(unsigned long)field_contents; 4664 use_compound_key = true; 4665 } else 4666 key = (void *)&field_contents; 4667 } 4668 4669 if (use_compound_key) 4670 add_to_key(compound_key, key, key_field, rec); 4671 } 4672 4673 if (use_compound_key) 4674 key = compound_key; 4675 4676 if (hist_data->n_var_refs && 4677 !resolve_var_refs(hist_data, key, var_ref_vals, false)) 4678 return; 4679 4680 elt = tracing_map_insert(hist_data->map, key); 4681 if (!elt) 4682 return; 4683 4684 hist_trigger_elt_update(hist_data, elt, rec, rbe, var_ref_vals); 4685 4686 if (resolve_var_refs(hist_data, key, var_ref_vals, true)) 4687 hist_trigger_actions(hist_data, elt, rec, rbe, var_ref_vals); 4688 } 4689 4690 static void hist_trigger_stacktrace_print(struct seq_file *m, 4691 unsigned long *stacktrace_entries, 4692 unsigned int max_entries) 4693 { 4694 char str[KSYM_SYMBOL_LEN]; 4695 unsigned int spaces = 8; 4696 unsigned int i; 4697 4698 for (i = 0; i < max_entries; i++) { 4699 if (stacktrace_entries[i] == ULONG_MAX) 4700 return; 4701 4702 seq_printf(m, "%*c", 1 + spaces, ' '); 4703 sprint_symbol(str, stacktrace_entries[i]); 4704 seq_printf(m, "%s\n", str); 4705 } 4706 } 4707 4708 static void 4709 hist_trigger_entry_print(struct seq_file *m, 4710 struct hist_trigger_data *hist_data, void *key, 4711 struct tracing_map_elt *elt) 4712 { 4713 struct hist_field *key_field; 4714 char str[KSYM_SYMBOL_LEN]; 4715 bool multiline = false; 4716 const char *field_name; 4717 unsigned int i; 4718 u64 uval; 4719 4720 seq_puts(m, "{ "); 4721 4722 for_each_hist_key_field(i, hist_data) { 4723 key_field = hist_data->fields[i]; 4724 4725 if (i > hist_data->n_vals) 4726 seq_puts(m, ", "); 4727 4728 field_name = hist_field_name(key_field, 0); 4729 4730 if (key_field->flags & HIST_FIELD_FL_HEX) { 4731 uval = *(u64 *)(key + key_field->offset); 4732 seq_printf(m, "%s: %llx", field_name, uval); 4733 } else if (key_field->flags & HIST_FIELD_FL_SYM) { 4734 uval = *(u64 *)(key + key_field->offset); 4735 sprint_symbol_no_offset(str, uval); 4736 seq_printf(m, "%s: [%llx] %-45s", field_name, 4737 uval, str); 4738 } else if (key_field->flags & HIST_FIELD_FL_SYM_OFFSET) { 4739 uval = *(u64 *)(key + key_field->offset); 4740 sprint_symbol(str, uval); 4741 seq_printf(m, "%s: [%llx] %-55s", field_name, 4742 uval, str); 4743 } else if (key_field->flags & HIST_FIELD_FL_EXECNAME) { 4744 struct hist_elt_data *elt_data = elt->private_data; 4745 char *comm; 4746 4747 if (WARN_ON_ONCE(!elt_data)) 4748 return; 4749 4750 comm = elt_data->comm; 4751 4752 uval = *(u64 *)(key + key_field->offset); 4753 seq_printf(m, "%s: %-16s[%10llu]", field_name, 4754 comm, uval); 4755 } else if (key_field->flags & HIST_FIELD_FL_SYSCALL) { 4756 const char *syscall_name; 4757 4758 uval = *(u64 *)(key + key_field->offset); 4759 syscall_name = get_syscall_name(uval); 4760 if (!syscall_name) 4761 syscall_name = "unknown_syscall"; 4762 4763 seq_printf(m, "%s: %-30s[%3llu]", field_name, 4764 syscall_name, uval); 4765 } else if (key_field->flags & HIST_FIELD_FL_STACKTRACE) { 4766 seq_puts(m, "stacktrace:\n"); 4767 hist_trigger_stacktrace_print(m, 4768 key + key_field->offset, 4769 HIST_STACKTRACE_DEPTH); 4770 multiline = true; 4771 } else if (key_field->flags & HIST_FIELD_FL_LOG2) { 4772 seq_printf(m, "%s: ~ 2^%-2llu", field_name, 4773 *(u64 *)(key + key_field->offset)); 4774 } else if (key_field->flags & HIST_FIELD_FL_STRING) { 4775 seq_printf(m, "%s: %-50s", field_name, 4776 (char *)(key + key_field->offset)); 4777 } else { 4778 uval = *(u64 *)(key + key_field->offset); 4779 seq_printf(m, "%s: %10llu", field_name, uval); 4780 } 4781 } 4782 4783 if (!multiline) 4784 seq_puts(m, " "); 4785 4786 seq_puts(m, "}"); 4787 4788 seq_printf(m, " hitcount: %10llu", 4789 tracing_map_read_sum(elt, HITCOUNT_IDX)); 4790 4791 for (i = 1; i < hist_data->n_vals; i++) { 4792 field_name = hist_field_name(hist_data->fields[i], 0); 4793 4794 if (hist_data->fields[i]->flags & HIST_FIELD_FL_VAR || 4795 hist_data->fields[i]->flags & HIST_FIELD_FL_EXPR) 4796 continue; 4797 4798 if (hist_data->fields[i]->flags & HIST_FIELD_FL_HEX) { 4799 seq_printf(m, " %s: %10llx", field_name, 4800 tracing_map_read_sum(elt, i)); 4801 } else { 4802 seq_printf(m, " %s: %10llu", field_name, 4803 tracing_map_read_sum(elt, i)); 4804 } 4805 } 4806 4807 print_actions(m, hist_data, elt); 4808 4809 seq_puts(m, "\n"); 4810 } 4811 4812 static int print_entries(struct seq_file *m, 4813 struct hist_trigger_data *hist_data) 4814 { 4815 struct tracing_map_sort_entry **sort_entries = NULL; 4816 struct tracing_map *map = hist_data->map; 4817 int i, n_entries; 4818 4819 n_entries = tracing_map_sort_entries(map, hist_data->sort_keys, 4820 hist_data->n_sort_keys, 4821 &sort_entries); 4822 if (n_entries < 0) 4823 return n_entries; 4824 4825 for (i = 0; i < n_entries; i++) 4826 hist_trigger_entry_print(m, hist_data, 4827 sort_entries[i]->key, 4828 sort_entries[i]->elt); 4829 4830 tracing_map_destroy_sort_entries(sort_entries, n_entries); 4831 4832 return n_entries; 4833 } 4834 4835 static void hist_trigger_show(struct seq_file *m, 4836 struct event_trigger_data *data, int n) 4837 { 4838 struct hist_trigger_data *hist_data; 4839 int n_entries; 4840 4841 if (n > 0) 4842 seq_puts(m, "\n\n"); 4843 4844 seq_puts(m, "# event histogram\n#\n# trigger info: "); 4845 data->ops->print(m, data->ops, data); 4846 seq_puts(m, "#\n\n"); 4847 4848 hist_data = data->private_data; 4849 n_entries = print_entries(m, hist_data); 4850 if (n_entries < 0) 4851 n_entries = 0; 4852 4853 seq_printf(m, "\nTotals:\n Hits: %llu\n Entries: %u\n Dropped: %llu\n", 4854 (u64)atomic64_read(&hist_data->map->hits), 4855 n_entries, (u64)atomic64_read(&hist_data->map->drops)); 4856 } 4857 4858 static int hist_show(struct seq_file *m, void *v) 4859 { 4860 struct event_trigger_data *data; 4861 struct trace_event_file *event_file; 4862 int n = 0, ret = 0; 4863 4864 mutex_lock(&event_mutex); 4865 4866 event_file = event_file_data(m->private); 4867 if (unlikely(!event_file)) { 4868 ret = -ENODEV; 4869 goto out_unlock; 4870 } 4871 4872 list_for_each_entry_rcu(data, &event_file->triggers, list) { 4873 if (data->cmd_ops->trigger_type == ETT_EVENT_HIST) 4874 hist_trigger_show(m, data, n++); 4875 } 4876 4877 if (have_hist_err()) { 4878 seq_printf(m, "\nERROR: %s\n", hist_err_str); 4879 seq_printf(m, " Last command: %s\n", last_hist_cmd); 4880 } 4881 4882 out_unlock: 4883 mutex_unlock(&event_mutex); 4884 4885 return ret; 4886 } 4887 4888 static int event_hist_open(struct inode *inode, struct file *file) 4889 { 4890 return single_open(file, hist_show, file); 4891 } 4892 4893 const struct file_operations event_hist_fops = { 4894 .open = event_hist_open, 4895 .read = seq_read, 4896 .llseek = seq_lseek, 4897 .release = single_release, 4898 }; 4899 4900 static void hist_field_print(struct seq_file *m, struct hist_field *hist_field) 4901 { 4902 const char *field_name = hist_field_name(hist_field, 0); 4903 4904 if (hist_field->var.name) 4905 seq_printf(m, "%s=", hist_field->var.name); 4906 4907 if (hist_field->flags & HIST_FIELD_FL_CPU) 4908 seq_puts(m, "cpu"); 4909 else if (field_name) { 4910 if (hist_field->flags & HIST_FIELD_FL_VAR_REF || 4911 hist_field->flags & HIST_FIELD_FL_ALIAS) 4912 seq_putc(m, '$'); 4913 seq_printf(m, "%s", field_name); 4914 } else if (hist_field->flags & HIST_FIELD_FL_TIMESTAMP) 4915 seq_puts(m, "common_timestamp"); 4916 } 4917 4918 static int event_hist_trigger_print(struct seq_file *m, 4919 struct event_trigger_ops *ops, 4920 struct event_trigger_data *data) 4921 { 4922 struct hist_trigger_data *hist_data = data->private_data; 4923 struct hist_field *field; 4924 bool have_var = false; 4925 unsigned int i; 4926 4927 seq_puts(m, "hist:"); 4928 4929 if (data->name) 4930 seq_printf(m, "%s:", data->name); 4931 4932 seq_puts(m, "keys="); 4933 4934 for_each_hist_key_field(i, hist_data) { 4935 field = hist_data->fields[i]; 4936 4937 if (i > hist_data->n_vals) 4938 seq_puts(m, ","); 4939 4940 if (field->flags & HIST_FIELD_FL_STACKTRACE) 4941 seq_puts(m, "stacktrace"); 4942 else 4943 hist_field_print(m, field); 4944 } 4945 4946 seq_puts(m, ":vals="); 4947 4948 for_each_hist_val_field(i, hist_data) { 4949 field = hist_data->fields[i]; 4950 if (field->flags & HIST_FIELD_FL_VAR) { 4951 have_var = true; 4952 continue; 4953 } 4954 4955 if (i == HITCOUNT_IDX) 4956 seq_puts(m, "hitcount"); 4957 else { 4958 seq_puts(m, ","); 4959 hist_field_print(m, field); 4960 } 4961 } 4962 4963 if (have_var) { 4964 unsigned int n = 0; 4965 4966 seq_puts(m, ":"); 4967 4968 for_each_hist_val_field(i, hist_data) { 4969 field = hist_data->fields[i]; 4970 4971 if (field->flags & HIST_FIELD_FL_VAR) { 4972 if (n++) 4973 seq_puts(m, ","); 4974 hist_field_print(m, field); 4975 } 4976 } 4977 } 4978 4979 seq_puts(m, ":sort="); 4980 4981 for (i = 0; i < hist_data->n_sort_keys; i++) { 4982 struct tracing_map_sort_key *sort_key; 4983 unsigned int idx, first_key_idx; 4984 4985 /* skip VAR vals */ 4986 first_key_idx = hist_data->n_vals - hist_data->n_vars; 4987 4988 sort_key = &hist_data->sort_keys[i]; 4989 idx = sort_key->field_idx; 4990 4991 if (WARN_ON(idx >= HIST_FIELDS_MAX)) 4992 return -EINVAL; 4993 4994 if (i > 0) 4995 seq_puts(m, ","); 4996 4997 if (idx == HITCOUNT_IDX) 4998 seq_puts(m, "hitcount"); 4999 else { 5000 if (idx >= first_key_idx) 5001 idx += hist_data->n_vars; 5002 hist_field_print(m, hist_data->fields[idx]); 5003 } 5004 5005 if (sort_key->descending) 5006 seq_puts(m, ".descending"); 5007 } 5008 seq_printf(m, ":size=%u", (1 << hist_data->map->map_bits)); 5009 if (hist_data->enable_timestamps) 5010 seq_printf(m, ":clock=%s", hist_data->attrs->clock); 5011 5012 print_actions_spec(m, hist_data); 5013 5014 if (data->filter_str) 5015 seq_printf(m, " if %s", data->filter_str); 5016 5017 if (data->paused) 5018 seq_puts(m, " [paused]"); 5019 else 5020 seq_puts(m, " [active]"); 5021 5022 seq_putc(m, '\n'); 5023 5024 return 0; 5025 } 5026 5027 static int event_hist_trigger_init(struct event_trigger_ops *ops, 5028 struct event_trigger_data *data) 5029 { 5030 struct hist_trigger_data *hist_data = data->private_data; 5031 5032 if (!data->ref && hist_data->attrs->name) 5033 save_named_trigger(hist_data->attrs->name, data); 5034 5035 data->ref++; 5036 5037 return 0; 5038 } 5039 5040 static void unregister_field_var_hists(struct hist_trigger_data *hist_data) 5041 { 5042 struct trace_event_file *file; 5043 unsigned int i; 5044 char *cmd; 5045 int ret; 5046 5047 for (i = 0; i < hist_data->n_field_var_hists; i++) { 5048 file = hist_data->field_var_hists[i]->hist_data->event_file; 5049 cmd = hist_data->field_var_hists[i]->cmd; 5050 ret = event_hist_trigger_func(&trigger_hist_cmd, file, 5051 "!hist", "hist", cmd); 5052 } 5053 } 5054 5055 static void event_hist_trigger_free(struct event_trigger_ops *ops, 5056 struct event_trigger_data *data) 5057 { 5058 struct hist_trigger_data *hist_data = data->private_data; 5059 5060 if (WARN_ON_ONCE(data->ref <= 0)) 5061 return; 5062 5063 data->ref--; 5064 if (!data->ref) { 5065 if (data->name) 5066 del_named_trigger(data); 5067 5068 trigger_data_free(data); 5069 5070 remove_hist_vars(hist_data); 5071 5072 unregister_field_var_hists(hist_data); 5073 5074 destroy_hist_data(hist_data); 5075 } 5076 } 5077 5078 static struct event_trigger_ops event_hist_trigger_ops = { 5079 .func = event_hist_trigger, 5080 .print = event_hist_trigger_print, 5081 .init = event_hist_trigger_init, 5082 .free = event_hist_trigger_free, 5083 }; 5084 5085 static int event_hist_trigger_named_init(struct event_trigger_ops *ops, 5086 struct event_trigger_data *data) 5087 { 5088 data->ref++; 5089 5090 save_named_trigger(data->named_data->name, data); 5091 5092 event_hist_trigger_init(ops, data->named_data); 5093 5094 return 0; 5095 } 5096 5097 static void event_hist_trigger_named_free(struct event_trigger_ops *ops, 5098 struct event_trigger_data *data) 5099 { 5100 if (WARN_ON_ONCE(data->ref <= 0)) 5101 return; 5102 5103 event_hist_trigger_free(ops, data->named_data); 5104 5105 data->ref--; 5106 if (!data->ref) { 5107 del_named_trigger(data); 5108 trigger_data_free(data); 5109 } 5110 } 5111 5112 static struct event_trigger_ops event_hist_trigger_named_ops = { 5113 .func = event_hist_trigger, 5114 .print = event_hist_trigger_print, 5115 .init = event_hist_trigger_named_init, 5116 .free = event_hist_trigger_named_free, 5117 }; 5118 5119 static struct event_trigger_ops *event_hist_get_trigger_ops(char *cmd, 5120 char *param) 5121 { 5122 return &event_hist_trigger_ops; 5123 } 5124 5125 static void hist_clear(struct event_trigger_data *data) 5126 { 5127 struct hist_trigger_data *hist_data = data->private_data; 5128 5129 if (data->name) 5130 pause_named_trigger(data); 5131 5132 synchronize_sched(); 5133 5134 tracing_map_clear(hist_data->map); 5135 5136 if (data->name) 5137 unpause_named_trigger(data); 5138 } 5139 5140 static bool compatible_field(struct ftrace_event_field *field, 5141 struct ftrace_event_field *test_field) 5142 { 5143 if (field == test_field) 5144 return true; 5145 if (field == NULL || test_field == NULL) 5146 return false; 5147 if (strcmp(field->name, test_field->name) != 0) 5148 return false; 5149 if (strcmp(field->type, test_field->type) != 0) 5150 return false; 5151 if (field->size != test_field->size) 5152 return false; 5153 if (field->is_signed != test_field->is_signed) 5154 return false; 5155 5156 return true; 5157 } 5158 5159 static bool hist_trigger_match(struct event_trigger_data *data, 5160 struct event_trigger_data *data_test, 5161 struct event_trigger_data *named_data, 5162 bool ignore_filter) 5163 { 5164 struct tracing_map_sort_key *sort_key, *sort_key_test; 5165 struct hist_trigger_data *hist_data, *hist_data_test; 5166 struct hist_field *key_field, *key_field_test; 5167 unsigned int i; 5168 5169 if (named_data && (named_data != data_test) && 5170 (named_data != data_test->named_data)) 5171 return false; 5172 5173 if (!named_data && is_named_trigger(data_test)) 5174 return false; 5175 5176 hist_data = data->private_data; 5177 hist_data_test = data_test->private_data; 5178 5179 if (hist_data->n_vals != hist_data_test->n_vals || 5180 hist_data->n_fields != hist_data_test->n_fields || 5181 hist_data->n_sort_keys != hist_data_test->n_sort_keys) 5182 return false; 5183 5184 if (!ignore_filter) { 5185 if ((data->filter_str && !data_test->filter_str) || 5186 (!data->filter_str && data_test->filter_str)) 5187 return false; 5188 } 5189 5190 for_each_hist_field(i, hist_data) { 5191 key_field = hist_data->fields[i]; 5192 key_field_test = hist_data_test->fields[i]; 5193 5194 if (key_field->flags != key_field_test->flags) 5195 return false; 5196 if (!compatible_field(key_field->field, key_field_test->field)) 5197 return false; 5198 if (key_field->offset != key_field_test->offset) 5199 return false; 5200 if (key_field->size != key_field_test->size) 5201 return false; 5202 if (key_field->is_signed != key_field_test->is_signed) 5203 return false; 5204 if (!!key_field->var.name != !!key_field_test->var.name) 5205 return false; 5206 if (key_field->var.name && 5207 strcmp(key_field->var.name, key_field_test->var.name) != 0) 5208 return false; 5209 } 5210 5211 for (i = 0; i < hist_data->n_sort_keys; i++) { 5212 sort_key = &hist_data->sort_keys[i]; 5213 sort_key_test = &hist_data_test->sort_keys[i]; 5214 5215 if (sort_key->field_idx != sort_key_test->field_idx || 5216 sort_key->descending != sort_key_test->descending) 5217 return false; 5218 } 5219 5220 if (!ignore_filter && data->filter_str && 5221 (strcmp(data->filter_str, data_test->filter_str) != 0)) 5222 return false; 5223 5224 if (!actions_match(hist_data, hist_data_test)) 5225 return false; 5226 5227 return true; 5228 } 5229 5230 static int hist_register_trigger(char *glob, struct event_trigger_ops *ops, 5231 struct event_trigger_data *data, 5232 struct trace_event_file *file) 5233 { 5234 struct hist_trigger_data *hist_data = data->private_data; 5235 struct event_trigger_data *test, *named_data = NULL; 5236 int ret = 0; 5237 5238 if (hist_data->attrs->name) { 5239 named_data = find_named_trigger(hist_data->attrs->name); 5240 if (named_data) { 5241 if (!hist_trigger_match(data, named_data, named_data, 5242 true)) { 5243 hist_err("Named hist trigger doesn't match existing named trigger (includes variables): ", hist_data->attrs->name); 5244 ret = -EINVAL; 5245 goto out; 5246 } 5247 } 5248 } 5249 5250 if (hist_data->attrs->name && !named_data) 5251 goto new; 5252 5253 list_for_each_entry_rcu(test, &file->triggers, list) { 5254 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 5255 if (!hist_trigger_match(data, test, named_data, false)) 5256 continue; 5257 if (hist_data->attrs->pause) 5258 test->paused = true; 5259 else if (hist_data->attrs->cont) 5260 test->paused = false; 5261 else if (hist_data->attrs->clear) 5262 hist_clear(test); 5263 else { 5264 hist_err("Hist trigger already exists", NULL); 5265 ret = -EEXIST; 5266 } 5267 goto out; 5268 } 5269 } 5270 new: 5271 if (hist_data->attrs->cont || hist_data->attrs->clear) { 5272 hist_err("Can't clear or continue a nonexistent hist trigger", NULL); 5273 ret = -ENOENT; 5274 goto out; 5275 } 5276 5277 if (hist_data->attrs->pause) 5278 data->paused = true; 5279 5280 if (named_data) { 5281 data->private_data = named_data->private_data; 5282 set_named_trigger_data(data, named_data); 5283 data->ops = &event_hist_trigger_named_ops; 5284 } 5285 5286 if (data->ops->init) { 5287 ret = data->ops->init(data->ops, data); 5288 if (ret < 0) 5289 goto out; 5290 } 5291 5292 if (hist_data->enable_timestamps) { 5293 char *clock = hist_data->attrs->clock; 5294 5295 ret = tracing_set_clock(file->tr, hist_data->attrs->clock); 5296 if (ret) { 5297 hist_err("Couldn't set trace_clock: ", clock); 5298 goto out; 5299 } 5300 5301 tracing_set_time_stamp_abs(file->tr, true); 5302 } 5303 5304 if (named_data) 5305 destroy_hist_data(hist_data); 5306 5307 ret++; 5308 out: 5309 return ret; 5310 } 5311 5312 static int hist_trigger_enable(struct event_trigger_data *data, 5313 struct trace_event_file *file) 5314 { 5315 int ret = 0; 5316 5317 list_add_tail_rcu(&data->list, &file->triggers); 5318 5319 update_cond_flag(file); 5320 5321 if (trace_event_trigger_enable_disable(file, 1) < 0) { 5322 list_del_rcu(&data->list); 5323 update_cond_flag(file); 5324 ret--; 5325 } 5326 5327 return ret; 5328 } 5329 5330 static bool have_hist_trigger_match(struct event_trigger_data *data, 5331 struct trace_event_file *file) 5332 { 5333 struct hist_trigger_data *hist_data = data->private_data; 5334 struct event_trigger_data *test, *named_data = NULL; 5335 bool match = false; 5336 5337 if (hist_data->attrs->name) 5338 named_data = find_named_trigger(hist_data->attrs->name); 5339 5340 list_for_each_entry_rcu(test, &file->triggers, list) { 5341 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 5342 if (hist_trigger_match(data, test, named_data, false)) { 5343 match = true; 5344 break; 5345 } 5346 } 5347 } 5348 5349 return match; 5350 } 5351 5352 static bool hist_trigger_check_refs(struct event_trigger_data *data, 5353 struct trace_event_file *file) 5354 { 5355 struct hist_trigger_data *hist_data = data->private_data; 5356 struct event_trigger_data *test, *named_data = NULL; 5357 5358 if (hist_data->attrs->name) 5359 named_data = find_named_trigger(hist_data->attrs->name); 5360 5361 list_for_each_entry_rcu(test, &file->triggers, list) { 5362 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 5363 if (!hist_trigger_match(data, test, named_data, false)) 5364 continue; 5365 hist_data = test->private_data; 5366 if (check_var_refs(hist_data)) 5367 return true; 5368 break; 5369 } 5370 } 5371 5372 return false; 5373 } 5374 5375 static void hist_unregister_trigger(char *glob, struct event_trigger_ops *ops, 5376 struct event_trigger_data *data, 5377 struct trace_event_file *file) 5378 { 5379 struct hist_trigger_data *hist_data = data->private_data; 5380 struct event_trigger_data *test, *named_data = NULL; 5381 bool unregistered = false; 5382 5383 if (hist_data->attrs->name) 5384 named_data = find_named_trigger(hist_data->attrs->name); 5385 5386 list_for_each_entry_rcu(test, &file->triggers, list) { 5387 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 5388 if (!hist_trigger_match(data, test, named_data, false)) 5389 continue; 5390 unregistered = true; 5391 list_del_rcu(&test->list); 5392 trace_event_trigger_enable_disable(file, 0); 5393 update_cond_flag(file); 5394 break; 5395 } 5396 } 5397 5398 if (unregistered && test->ops->free) 5399 test->ops->free(test->ops, test); 5400 5401 if (hist_data->enable_timestamps) { 5402 if (!hist_data->remove || unregistered) 5403 tracing_set_time_stamp_abs(file->tr, false); 5404 } 5405 } 5406 5407 static bool hist_file_check_refs(struct trace_event_file *file) 5408 { 5409 struct hist_trigger_data *hist_data; 5410 struct event_trigger_data *test; 5411 5412 list_for_each_entry_rcu(test, &file->triggers, list) { 5413 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 5414 hist_data = test->private_data; 5415 if (check_var_refs(hist_data)) 5416 return true; 5417 } 5418 } 5419 5420 return false; 5421 } 5422 5423 static void hist_unreg_all(struct trace_event_file *file) 5424 { 5425 struct event_trigger_data *test, *n; 5426 struct hist_trigger_data *hist_data; 5427 struct synth_event *se; 5428 const char *se_name; 5429 5430 if (hist_file_check_refs(file)) 5431 return; 5432 5433 list_for_each_entry_safe(test, n, &file->triggers, list) { 5434 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 5435 hist_data = test->private_data; 5436 list_del_rcu(&test->list); 5437 trace_event_trigger_enable_disable(file, 0); 5438 5439 mutex_lock(&synth_event_mutex); 5440 se_name = trace_event_name(file->event_call); 5441 se = find_synth_event(se_name); 5442 if (se) 5443 se->ref--; 5444 mutex_unlock(&synth_event_mutex); 5445 5446 update_cond_flag(file); 5447 if (hist_data->enable_timestamps) 5448 tracing_set_time_stamp_abs(file->tr, false); 5449 if (test->ops->free) 5450 test->ops->free(test->ops, test); 5451 } 5452 } 5453 } 5454 5455 static int event_hist_trigger_func(struct event_command *cmd_ops, 5456 struct trace_event_file *file, 5457 char *glob, char *cmd, char *param) 5458 { 5459 unsigned int hist_trigger_bits = TRACING_MAP_BITS_DEFAULT; 5460 struct event_trigger_data *trigger_data; 5461 struct hist_trigger_attrs *attrs; 5462 struct event_trigger_ops *trigger_ops; 5463 struct hist_trigger_data *hist_data; 5464 struct synth_event *se; 5465 const char *se_name; 5466 bool remove = false; 5467 char *trigger, *p; 5468 int ret = 0; 5469 5470 if (glob && strlen(glob)) { 5471 last_cmd_set(param); 5472 hist_err_clear(); 5473 } 5474 5475 if (!param) 5476 return -EINVAL; 5477 5478 if (glob[0] == '!') 5479 remove = true; 5480 5481 /* 5482 * separate the trigger from the filter (k:v [if filter]) 5483 * allowing for whitespace in the trigger 5484 */ 5485 p = trigger = param; 5486 do { 5487 p = strstr(p, "if"); 5488 if (!p) 5489 break; 5490 if (p == param) 5491 return -EINVAL; 5492 if (*(p - 1) != ' ' && *(p - 1) != '\t') { 5493 p++; 5494 continue; 5495 } 5496 if (p >= param + strlen(param) - strlen("if") - 1) 5497 return -EINVAL; 5498 if (*(p + strlen("if")) != ' ' && *(p + strlen("if")) != '\t') { 5499 p++; 5500 continue; 5501 } 5502 break; 5503 } while (p); 5504 5505 if (!p) 5506 param = NULL; 5507 else { 5508 *(p - 1) = '\0'; 5509 param = strstrip(p); 5510 trigger = strstrip(trigger); 5511 } 5512 5513 attrs = parse_hist_trigger_attrs(trigger); 5514 if (IS_ERR(attrs)) 5515 return PTR_ERR(attrs); 5516 5517 if (attrs->map_bits) 5518 hist_trigger_bits = attrs->map_bits; 5519 5520 hist_data = create_hist_data(hist_trigger_bits, attrs, file, remove); 5521 if (IS_ERR(hist_data)) { 5522 destroy_hist_trigger_attrs(attrs); 5523 return PTR_ERR(hist_data); 5524 } 5525 5526 trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger); 5527 5528 trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL); 5529 if (!trigger_data) { 5530 ret = -ENOMEM; 5531 goto out_free; 5532 } 5533 5534 trigger_data->count = -1; 5535 trigger_data->ops = trigger_ops; 5536 trigger_data->cmd_ops = cmd_ops; 5537 5538 INIT_LIST_HEAD(&trigger_data->list); 5539 RCU_INIT_POINTER(trigger_data->filter, NULL); 5540 5541 trigger_data->private_data = hist_data; 5542 5543 /* if param is non-empty, it's supposed to be a filter */ 5544 if (param && cmd_ops->set_filter) { 5545 ret = cmd_ops->set_filter(param, trigger_data, file); 5546 if (ret < 0) 5547 goto out_free; 5548 } 5549 5550 if (remove) { 5551 if (!have_hist_trigger_match(trigger_data, file)) 5552 goto out_free; 5553 5554 if (hist_trigger_check_refs(trigger_data, file)) { 5555 ret = -EBUSY; 5556 goto out_free; 5557 } 5558 5559 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file); 5560 5561 mutex_lock(&synth_event_mutex); 5562 se_name = trace_event_name(file->event_call); 5563 se = find_synth_event(se_name); 5564 if (se) 5565 se->ref--; 5566 mutex_unlock(&synth_event_mutex); 5567 5568 ret = 0; 5569 goto out_free; 5570 } 5571 5572 ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file); 5573 /* 5574 * The above returns on success the # of triggers registered, 5575 * but if it didn't register any it returns zero. Consider no 5576 * triggers registered a failure too. 5577 */ 5578 if (!ret) { 5579 if (!(attrs->pause || attrs->cont || attrs->clear)) 5580 ret = -ENOENT; 5581 goto out_free; 5582 } else if (ret < 0) 5583 goto out_free; 5584 5585 if (get_named_trigger_data(trigger_data)) 5586 goto enable; 5587 5588 if (has_hist_vars(hist_data)) 5589 save_hist_vars(hist_data); 5590 5591 ret = create_actions(hist_data, file); 5592 if (ret) 5593 goto out_unreg; 5594 5595 ret = tracing_map_init(hist_data->map); 5596 if (ret) 5597 goto out_unreg; 5598 enable: 5599 ret = hist_trigger_enable(trigger_data, file); 5600 if (ret) 5601 goto out_unreg; 5602 5603 mutex_lock(&synth_event_mutex); 5604 se_name = trace_event_name(file->event_call); 5605 se = find_synth_event(se_name); 5606 if (se) 5607 se->ref++; 5608 mutex_unlock(&synth_event_mutex); 5609 5610 /* Just return zero, not the number of registered triggers */ 5611 ret = 0; 5612 out: 5613 if (ret == 0) 5614 hist_err_clear(); 5615 5616 return ret; 5617 out_unreg: 5618 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file); 5619 out_free: 5620 if (cmd_ops->set_filter) 5621 cmd_ops->set_filter(NULL, trigger_data, NULL); 5622 5623 remove_hist_vars(hist_data); 5624 5625 kfree(trigger_data); 5626 5627 destroy_hist_data(hist_data); 5628 goto out; 5629 } 5630 5631 static struct event_command trigger_hist_cmd = { 5632 .name = "hist", 5633 .trigger_type = ETT_EVENT_HIST, 5634 .flags = EVENT_CMD_FL_NEEDS_REC, 5635 .func = event_hist_trigger_func, 5636 .reg = hist_register_trigger, 5637 .unreg = hist_unregister_trigger, 5638 .unreg_all = hist_unreg_all, 5639 .get_trigger_ops = event_hist_get_trigger_ops, 5640 .set_filter = set_trigger_filter, 5641 }; 5642 5643 __init int register_trigger_hist_cmd(void) 5644 { 5645 int ret; 5646 5647 ret = register_event_command(&trigger_hist_cmd); 5648 WARN_ON(ret < 0); 5649 5650 return ret; 5651 } 5652 5653 static void 5654 hist_enable_trigger(struct event_trigger_data *data, void *rec, 5655 struct ring_buffer_event *event) 5656 { 5657 struct enable_trigger_data *enable_data = data->private_data; 5658 struct event_trigger_data *test; 5659 5660 list_for_each_entry_rcu(test, &enable_data->file->triggers, list) { 5661 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 5662 if (enable_data->enable) 5663 test->paused = false; 5664 else 5665 test->paused = true; 5666 } 5667 } 5668 } 5669 5670 static void 5671 hist_enable_count_trigger(struct event_trigger_data *data, void *rec, 5672 struct ring_buffer_event *event) 5673 { 5674 if (!data->count) 5675 return; 5676 5677 if (data->count != -1) 5678 (data->count)--; 5679 5680 hist_enable_trigger(data, rec, event); 5681 } 5682 5683 static struct event_trigger_ops hist_enable_trigger_ops = { 5684 .func = hist_enable_trigger, 5685 .print = event_enable_trigger_print, 5686 .init = event_trigger_init, 5687 .free = event_enable_trigger_free, 5688 }; 5689 5690 static struct event_trigger_ops hist_enable_count_trigger_ops = { 5691 .func = hist_enable_count_trigger, 5692 .print = event_enable_trigger_print, 5693 .init = event_trigger_init, 5694 .free = event_enable_trigger_free, 5695 }; 5696 5697 static struct event_trigger_ops hist_disable_trigger_ops = { 5698 .func = hist_enable_trigger, 5699 .print = event_enable_trigger_print, 5700 .init = event_trigger_init, 5701 .free = event_enable_trigger_free, 5702 }; 5703 5704 static struct event_trigger_ops hist_disable_count_trigger_ops = { 5705 .func = hist_enable_count_trigger, 5706 .print = event_enable_trigger_print, 5707 .init = event_trigger_init, 5708 .free = event_enable_trigger_free, 5709 }; 5710 5711 static struct event_trigger_ops * 5712 hist_enable_get_trigger_ops(char *cmd, char *param) 5713 { 5714 struct event_trigger_ops *ops; 5715 bool enable; 5716 5717 enable = (strcmp(cmd, ENABLE_HIST_STR) == 0); 5718 5719 if (enable) 5720 ops = param ? &hist_enable_count_trigger_ops : 5721 &hist_enable_trigger_ops; 5722 else 5723 ops = param ? &hist_disable_count_trigger_ops : 5724 &hist_disable_trigger_ops; 5725 5726 return ops; 5727 } 5728 5729 static void hist_enable_unreg_all(struct trace_event_file *file) 5730 { 5731 struct event_trigger_data *test, *n; 5732 5733 list_for_each_entry_safe(test, n, &file->triggers, list) { 5734 if (test->cmd_ops->trigger_type == ETT_HIST_ENABLE) { 5735 list_del_rcu(&test->list); 5736 update_cond_flag(file); 5737 trace_event_trigger_enable_disable(file, 0); 5738 if (test->ops->free) 5739 test->ops->free(test->ops, test); 5740 } 5741 } 5742 } 5743 5744 static struct event_command trigger_hist_enable_cmd = { 5745 .name = ENABLE_HIST_STR, 5746 .trigger_type = ETT_HIST_ENABLE, 5747 .func = event_enable_trigger_func, 5748 .reg = event_enable_register_trigger, 5749 .unreg = event_enable_unregister_trigger, 5750 .unreg_all = hist_enable_unreg_all, 5751 .get_trigger_ops = hist_enable_get_trigger_ops, 5752 .set_filter = set_trigger_filter, 5753 }; 5754 5755 static struct event_command trigger_hist_disable_cmd = { 5756 .name = DISABLE_HIST_STR, 5757 .trigger_type = ETT_HIST_ENABLE, 5758 .func = event_enable_trigger_func, 5759 .reg = event_enable_register_trigger, 5760 .unreg = event_enable_unregister_trigger, 5761 .unreg_all = hist_enable_unreg_all, 5762 .get_trigger_ops = hist_enable_get_trigger_ops, 5763 .set_filter = set_trigger_filter, 5764 }; 5765 5766 static __init void unregister_trigger_hist_enable_disable_cmds(void) 5767 { 5768 unregister_event_command(&trigger_hist_enable_cmd); 5769 unregister_event_command(&trigger_hist_disable_cmd); 5770 } 5771 5772 __init int register_trigger_hist_enable_disable_cmds(void) 5773 { 5774 int ret; 5775 5776 ret = register_event_command(&trigger_hist_enable_cmd); 5777 if (WARN_ON(ret < 0)) 5778 return ret; 5779 ret = register_event_command(&trigger_hist_disable_cmd); 5780 if (WARN_ON(ret < 0)) 5781 unregister_trigger_hist_enable_disable_cmds(); 5782 5783 return ret; 5784 } 5785 5786 static __init int trace_events_hist_init(void) 5787 { 5788 struct dentry *entry = NULL; 5789 struct dentry *d_tracer; 5790 int err = 0; 5791 5792 d_tracer = tracing_init_dentry(); 5793 if (IS_ERR(d_tracer)) { 5794 err = PTR_ERR(d_tracer); 5795 goto err; 5796 } 5797 5798 entry = tracefs_create_file("synthetic_events", 0644, d_tracer, 5799 NULL, &synth_events_fops); 5800 if (!entry) { 5801 err = -ENODEV; 5802 goto err; 5803 } 5804 5805 return err; 5806 err: 5807 pr_warn("Could not create tracefs 'synthetic_events' entry\n"); 5808 5809 return err; 5810 } 5811 5812 fs_initcall(trace_events_hist_init); 5813