1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * trace_events_hist - trace event hist triggers 4 * 5 * Copyright (C) 2015 Tom Zanussi <tom.zanussi@linux.intel.com> 6 */ 7 8 #include <linux/module.h> 9 #include <linux/kallsyms.h> 10 #include <linux/mutex.h> 11 #include <linux/slab.h> 12 #include <linux/stacktrace.h> 13 #include <linux/rculist.h> 14 #include <linux/tracefs.h> 15 16 #include "tracing_map.h" 17 #include "trace.h" 18 #include "trace_dynevent.h" 19 20 #define SYNTH_SYSTEM "synthetic" 21 #define SYNTH_FIELDS_MAX 16 22 23 #define STR_VAR_LEN_MAX 32 /* must be multiple of sizeof(u64) */ 24 25 #define ERRORS \ 26 C(NONE, "No error"), \ 27 C(DUPLICATE_VAR, "Variable already defined"), \ 28 C(VAR_NOT_UNIQUE, "Variable name not unique, need to use fully qualified name (subsys.event.var) for variable"), \ 29 C(TOO_MANY_VARS, "Too many variables defined"), \ 30 C(MALFORMED_ASSIGNMENT, "Malformed assignment"), \ 31 C(NAMED_MISMATCH, "Named hist trigger doesn't match existing named trigger (includes variables)"), \ 32 C(TRIGGER_EEXIST, "Hist trigger already exists"), \ 33 C(TRIGGER_ENOENT_CLEAR, "Can't clear or continue a nonexistent hist trigger"), \ 34 C(SET_CLOCK_FAIL, "Couldn't set trace_clock"), \ 35 C(BAD_FIELD_MODIFIER, "Invalid field modifier"), \ 36 C(TOO_MANY_SUBEXPR, "Too many subexpressions (3 max)"), \ 37 C(TIMESTAMP_MISMATCH, "Timestamp units in expression don't match"), \ 38 C(TOO_MANY_FIELD_VARS, "Too many field variables defined"), \ 39 C(EVENT_FILE_NOT_FOUND, "Event file not found"), \ 40 C(HIST_NOT_FOUND, "Matching event histogram not found"), \ 41 C(HIST_CREATE_FAIL, "Couldn't create histogram for field"), \ 42 C(SYNTH_VAR_NOT_FOUND, "Couldn't find synthetic variable"), \ 43 C(SYNTH_EVENT_NOT_FOUND,"Couldn't find synthetic event"), \ 44 C(SYNTH_TYPE_MISMATCH, "Param type doesn't match synthetic event field type"), \ 45 C(SYNTH_COUNT_MISMATCH, "Param count doesn't match synthetic event field count"), \ 46 C(FIELD_VAR_PARSE_FAIL, "Couldn't parse field variable"), \ 47 C(VAR_CREATE_FIND_FAIL, "Couldn't create or find variable"), \ 48 C(ONX_NOT_VAR, "For onmax(x) or onchange(x), x must be a variable"), \ 49 C(ONX_VAR_NOT_FOUND, "Couldn't find onmax or onchange variable"), \ 50 C(ONX_VAR_CREATE_FAIL, "Couldn't create onmax or onchange variable"), \ 51 C(FIELD_VAR_CREATE_FAIL,"Couldn't create field variable"), \ 52 C(TOO_MANY_PARAMS, "Too many action params"), \ 53 C(PARAM_NOT_FOUND, "Couldn't find param"), \ 54 C(INVALID_PARAM, "Invalid action param"), \ 55 C(ACTION_NOT_FOUND, "No action found"), \ 56 C(NO_SAVE_PARAMS, "No params found for save()"), \ 57 C(TOO_MANY_SAVE_ACTIONS,"Can't have more than one save() action per hist"), \ 58 C(ACTION_MISMATCH, "Handler doesn't support action"), \ 59 C(NO_CLOSING_PAREN, "No closing paren found"), \ 60 C(SUBSYS_NOT_FOUND, "Missing subsystem"), \ 61 C(INVALID_SUBSYS_EVENT, "Invalid subsystem or event name"), \ 62 C(INVALID_REF_KEY, "Using variable references in keys not supported"), \ 63 C(VAR_NOT_FOUND, "Couldn't find variable"), \ 64 C(FIELD_NOT_FOUND, "Couldn't find field"), 65 66 #undef C 67 #define C(a, b) HIST_ERR_##a 68 69 enum { ERRORS }; 70 71 #undef C 72 #define C(a, b) b 73 74 static const char *err_text[] = { ERRORS }; 75 76 struct hist_field; 77 78 typedef u64 (*hist_field_fn_t) (struct hist_field *field, 79 struct tracing_map_elt *elt, 80 struct ring_buffer_event *rbe, 81 void *event); 82 83 #define HIST_FIELD_OPERANDS_MAX 2 84 #define HIST_FIELDS_MAX (TRACING_MAP_FIELDS_MAX + TRACING_MAP_VARS_MAX) 85 #define HIST_ACTIONS_MAX 8 86 87 enum field_op_id { 88 FIELD_OP_NONE, 89 FIELD_OP_PLUS, 90 FIELD_OP_MINUS, 91 FIELD_OP_UNARY_MINUS, 92 }; 93 94 /* 95 * A hist_var (histogram variable) contains variable information for 96 * hist_fields having the HIST_FIELD_FL_VAR or HIST_FIELD_FL_VAR_REF 97 * flag set. A hist_var has a variable name e.g. ts0, and is 98 * associated with a given histogram trigger, as specified by 99 * hist_data. The hist_var idx is the unique index assigned to the 100 * variable by the hist trigger's tracing_map. The idx is what is 101 * used to set a variable's value and, by a variable reference, to 102 * retrieve it. 103 */ 104 struct hist_var { 105 char *name; 106 struct hist_trigger_data *hist_data; 107 unsigned int idx; 108 }; 109 110 struct hist_field { 111 struct ftrace_event_field *field; 112 unsigned long flags; 113 hist_field_fn_t fn; 114 unsigned int size; 115 unsigned int offset; 116 unsigned int is_signed; 117 const char *type; 118 struct hist_field *operands[HIST_FIELD_OPERANDS_MAX]; 119 struct hist_trigger_data *hist_data; 120 121 /* 122 * Variable fields contain variable-specific info in var. 123 */ 124 struct hist_var var; 125 enum field_op_id operator; 126 char *system; 127 char *event_name; 128 129 /* 130 * The name field is used for EXPR and VAR_REF fields. VAR 131 * fields contain the variable name in var.name. 132 */ 133 char *name; 134 135 /* 136 * When a histogram trigger is hit, if it has any references 137 * to variables, the values of those variables are collected 138 * into a var_ref_vals array by resolve_var_refs(). The 139 * current value of each variable is read from the tracing_map 140 * using the hist field's hist_var.idx and entered into the 141 * var_ref_idx entry i.e. var_ref_vals[var_ref_idx]. 142 */ 143 unsigned int var_ref_idx; 144 bool read_once; 145 }; 146 147 static u64 hist_field_none(struct hist_field *field, 148 struct tracing_map_elt *elt, 149 struct ring_buffer_event *rbe, 150 void *event) 151 { 152 return 0; 153 } 154 155 static u64 hist_field_counter(struct hist_field *field, 156 struct tracing_map_elt *elt, 157 struct ring_buffer_event *rbe, 158 void *event) 159 { 160 return 1; 161 } 162 163 static u64 hist_field_string(struct hist_field *hist_field, 164 struct tracing_map_elt *elt, 165 struct ring_buffer_event *rbe, 166 void *event) 167 { 168 char *addr = (char *)(event + hist_field->field->offset); 169 170 return (u64)(unsigned long)addr; 171 } 172 173 static u64 hist_field_dynstring(struct hist_field *hist_field, 174 struct tracing_map_elt *elt, 175 struct ring_buffer_event *rbe, 176 void *event) 177 { 178 u32 str_item = *(u32 *)(event + hist_field->field->offset); 179 int str_loc = str_item & 0xffff; 180 char *addr = (char *)(event + str_loc); 181 182 return (u64)(unsigned long)addr; 183 } 184 185 static u64 hist_field_pstring(struct hist_field *hist_field, 186 struct tracing_map_elt *elt, 187 struct ring_buffer_event *rbe, 188 void *event) 189 { 190 char **addr = (char **)(event + hist_field->field->offset); 191 192 return (u64)(unsigned long)*addr; 193 } 194 195 static u64 hist_field_log2(struct hist_field *hist_field, 196 struct tracing_map_elt *elt, 197 struct ring_buffer_event *rbe, 198 void *event) 199 { 200 struct hist_field *operand = hist_field->operands[0]; 201 202 u64 val = operand->fn(operand, elt, rbe, event); 203 204 return (u64) ilog2(roundup_pow_of_two(val)); 205 } 206 207 static u64 hist_field_plus(struct hist_field *hist_field, 208 struct tracing_map_elt *elt, 209 struct ring_buffer_event *rbe, 210 void *event) 211 { 212 struct hist_field *operand1 = hist_field->operands[0]; 213 struct hist_field *operand2 = hist_field->operands[1]; 214 215 u64 val1 = operand1->fn(operand1, elt, rbe, event); 216 u64 val2 = operand2->fn(operand2, elt, rbe, event); 217 218 return val1 + val2; 219 } 220 221 static u64 hist_field_minus(struct hist_field *hist_field, 222 struct tracing_map_elt *elt, 223 struct ring_buffer_event *rbe, 224 void *event) 225 { 226 struct hist_field *operand1 = hist_field->operands[0]; 227 struct hist_field *operand2 = hist_field->operands[1]; 228 229 u64 val1 = operand1->fn(operand1, elt, rbe, event); 230 u64 val2 = operand2->fn(operand2, elt, rbe, event); 231 232 return val1 - val2; 233 } 234 235 static u64 hist_field_unary_minus(struct hist_field *hist_field, 236 struct tracing_map_elt *elt, 237 struct ring_buffer_event *rbe, 238 void *event) 239 { 240 struct hist_field *operand = hist_field->operands[0]; 241 242 s64 sval = (s64)operand->fn(operand, elt, rbe, event); 243 u64 val = (u64)-sval; 244 245 return val; 246 } 247 248 #define DEFINE_HIST_FIELD_FN(type) \ 249 static u64 hist_field_##type(struct hist_field *hist_field, \ 250 struct tracing_map_elt *elt, \ 251 struct ring_buffer_event *rbe, \ 252 void *event) \ 253 { \ 254 type *addr = (type *)(event + hist_field->field->offset); \ 255 \ 256 return (u64)(unsigned long)*addr; \ 257 } 258 259 DEFINE_HIST_FIELD_FN(s64); 260 DEFINE_HIST_FIELD_FN(u64); 261 DEFINE_HIST_FIELD_FN(s32); 262 DEFINE_HIST_FIELD_FN(u32); 263 DEFINE_HIST_FIELD_FN(s16); 264 DEFINE_HIST_FIELD_FN(u16); 265 DEFINE_HIST_FIELD_FN(s8); 266 DEFINE_HIST_FIELD_FN(u8); 267 268 #define for_each_hist_field(i, hist_data) \ 269 for ((i) = 0; (i) < (hist_data)->n_fields; (i)++) 270 271 #define for_each_hist_val_field(i, hist_data) \ 272 for ((i) = 0; (i) < (hist_data)->n_vals; (i)++) 273 274 #define for_each_hist_key_field(i, hist_data) \ 275 for ((i) = (hist_data)->n_vals; (i) < (hist_data)->n_fields; (i)++) 276 277 #define HIST_STACKTRACE_DEPTH 16 278 #define HIST_STACKTRACE_SIZE (HIST_STACKTRACE_DEPTH * sizeof(unsigned long)) 279 #define HIST_STACKTRACE_SKIP 5 280 281 #define HITCOUNT_IDX 0 282 #define HIST_KEY_SIZE_MAX (MAX_FILTER_STR_VAL + HIST_STACKTRACE_SIZE) 283 284 enum hist_field_flags { 285 HIST_FIELD_FL_HITCOUNT = 1 << 0, 286 HIST_FIELD_FL_KEY = 1 << 1, 287 HIST_FIELD_FL_STRING = 1 << 2, 288 HIST_FIELD_FL_HEX = 1 << 3, 289 HIST_FIELD_FL_SYM = 1 << 4, 290 HIST_FIELD_FL_SYM_OFFSET = 1 << 5, 291 HIST_FIELD_FL_EXECNAME = 1 << 6, 292 HIST_FIELD_FL_SYSCALL = 1 << 7, 293 HIST_FIELD_FL_STACKTRACE = 1 << 8, 294 HIST_FIELD_FL_LOG2 = 1 << 9, 295 HIST_FIELD_FL_TIMESTAMP = 1 << 10, 296 HIST_FIELD_FL_TIMESTAMP_USECS = 1 << 11, 297 HIST_FIELD_FL_VAR = 1 << 12, 298 HIST_FIELD_FL_EXPR = 1 << 13, 299 HIST_FIELD_FL_VAR_REF = 1 << 14, 300 HIST_FIELD_FL_CPU = 1 << 15, 301 HIST_FIELD_FL_ALIAS = 1 << 16, 302 }; 303 304 struct var_defs { 305 unsigned int n_vars; 306 char *name[TRACING_MAP_VARS_MAX]; 307 char *expr[TRACING_MAP_VARS_MAX]; 308 }; 309 310 struct hist_trigger_attrs { 311 char *keys_str; 312 char *vals_str; 313 char *sort_key_str; 314 char *name; 315 char *clock; 316 bool pause; 317 bool cont; 318 bool clear; 319 bool ts_in_usecs; 320 unsigned int map_bits; 321 322 char *assignment_str[TRACING_MAP_VARS_MAX]; 323 unsigned int n_assignments; 324 325 char *action_str[HIST_ACTIONS_MAX]; 326 unsigned int n_actions; 327 328 struct var_defs var_defs; 329 }; 330 331 struct field_var { 332 struct hist_field *var; 333 struct hist_field *val; 334 }; 335 336 struct field_var_hist { 337 struct hist_trigger_data *hist_data; 338 char *cmd; 339 }; 340 341 struct hist_trigger_data { 342 struct hist_field *fields[HIST_FIELDS_MAX]; 343 unsigned int n_vals; 344 unsigned int n_keys; 345 unsigned int n_fields; 346 unsigned int n_vars; 347 unsigned int key_size; 348 struct tracing_map_sort_key sort_keys[TRACING_MAP_SORT_KEYS_MAX]; 349 unsigned int n_sort_keys; 350 struct trace_event_file *event_file; 351 struct hist_trigger_attrs *attrs; 352 struct tracing_map *map; 353 bool enable_timestamps; 354 bool remove; 355 struct hist_field *var_refs[TRACING_MAP_VARS_MAX]; 356 unsigned int n_var_refs; 357 358 struct action_data *actions[HIST_ACTIONS_MAX]; 359 unsigned int n_actions; 360 361 struct field_var *field_vars[SYNTH_FIELDS_MAX]; 362 unsigned int n_field_vars; 363 unsigned int n_field_var_str; 364 struct field_var_hist *field_var_hists[SYNTH_FIELDS_MAX]; 365 unsigned int n_field_var_hists; 366 367 struct field_var *save_vars[SYNTH_FIELDS_MAX]; 368 unsigned int n_save_vars; 369 unsigned int n_save_var_str; 370 }; 371 372 static int synth_event_create(int argc, const char **argv); 373 static int synth_event_show(struct seq_file *m, struct dyn_event *ev); 374 static int synth_event_release(struct dyn_event *ev); 375 static bool synth_event_is_busy(struct dyn_event *ev); 376 static bool synth_event_match(const char *system, const char *event, 377 struct dyn_event *ev); 378 379 static struct dyn_event_operations synth_event_ops = { 380 .create = synth_event_create, 381 .show = synth_event_show, 382 .is_busy = synth_event_is_busy, 383 .free = synth_event_release, 384 .match = synth_event_match, 385 }; 386 387 struct synth_field { 388 char *type; 389 char *name; 390 size_t size; 391 bool is_signed; 392 bool is_string; 393 }; 394 395 struct synth_event { 396 struct dyn_event devent; 397 int ref; 398 char *name; 399 struct synth_field **fields; 400 unsigned int n_fields; 401 unsigned int n_u64; 402 struct trace_event_class class; 403 struct trace_event_call call; 404 struct tracepoint *tp; 405 }; 406 407 static bool is_synth_event(struct dyn_event *ev) 408 { 409 return ev->ops == &synth_event_ops; 410 } 411 412 static struct synth_event *to_synth_event(struct dyn_event *ev) 413 { 414 return container_of(ev, struct synth_event, devent); 415 } 416 417 static bool synth_event_is_busy(struct dyn_event *ev) 418 { 419 struct synth_event *event = to_synth_event(ev); 420 421 return event->ref != 0; 422 } 423 424 static bool synth_event_match(const char *system, const char *event, 425 struct dyn_event *ev) 426 { 427 struct synth_event *sev = to_synth_event(ev); 428 429 return strcmp(sev->name, event) == 0 && 430 (!system || strcmp(system, SYNTH_SYSTEM) == 0); 431 } 432 433 struct action_data; 434 435 typedef void (*action_fn_t) (struct hist_trigger_data *hist_data, 436 struct tracing_map_elt *elt, void *rec, 437 struct ring_buffer_event *rbe, void *key, 438 struct action_data *data, u64 *var_ref_vals); 439 440 typedef bool (*check_track_val_fn_t) (u64 track_val, u64 var_val); 441 442 enum handler_id { 443 HANDLER_ONMATCH = 1, 444 HANDLER_ONMAX, 445 HANDLER_ONCHANGE, 446 }; 447 448 enum action_id { 449 ACTION_SAVE = 1, 450 ACTION_TRACE, 451 ACTION_SNAPSHOT, 452 }; 453 454 struct action_data { 455 enum handler_id handler; 456 enum action_id action; 457 char *action_name; 458 action_fn_t fn; 459 460 unsigned int n_params; 461 char *params[SYNTH_FIELDS_MAX]; 462 463 /* 464 * When a histogram trigger is hit, the values of any 465 * references to variables, including variables being passed 466 * as parameters to synthetic events, are collected into a 467 * var_ref_vals array. This var_ref_idx is the index of the 468 * first param in the array to be passed to the synthetic 469 * event invocation. 470 */ 471 unsigned int var_ref_idx; 472 struct synth_event *synth_event; 473 bool use_trace_keyword; 474 char *synth_event_name; 475 476 union { 477 struct { 478 char *event; 479 char *event_system; 480 } match_data; 481 482 struct { 483 /* 484 * var_str contains the $-unstripped variable 485 * name referenced by var_ref, and used when 486 * printing the action. Because var_ref 487 * creation is deferred to create_actions(), 488 * we need a per-action way to save it until 489 * then, thus var_str. 490 */ 491 char *var_str; 492 493 /* 494 * var_ref refers to the variable being 495 * tracked e.g onmax($var). 496 */ 497 struct hist_field *var_ref; 498 499 /* 500 * track_var contains the 'invisible' tracking 501 * variable created to keep the current 502 * e.g. max value. 503 */ 504 struct hist_field *track_var; 505 506 check_track_val_fn_t check_val; 507 action_fn_t save_data; 508 } track_data; 509 }; 510 }; 511 512 struct track_data { 513 u64 track_val; 514 bool updated; 515 516 unsigned int key_len; 517 void *key; 518 struct tracing_map_elt elt; 519 520 struct action_data *action_data; 521 struct hist_trigger_data *hist_data; 522 }; 523 524 struct hist_elt_data { 525 char *comm; 526 u64 *var_ref_vals; 527 char *field_var_str[SYNTH_FIELDS_MAX]; 528 }; 529 530 struct snapshot_context { 531 struct tracing_map_elt *elt; 532 void *key; 533 }; 534 535 static void track_data_free(struct track_data *track_data) 536 { 537 struct hist_elt_data *elt_data; 538 539 if (!track_data) 540 return; 541 542 kfree(track_data->key); 543 544 elt_data = track_data->elt.private_data; 545 if (elt_data) { 546 kfree(elt_data->comm); 547 kfree(elt_data); 548 } 549 550 kfree(track_data); 551 } 552 553 static struct track_data *track_data_alloc(unsigned int key_len, 554 struct action_data *action_data, 555 struct hist_trigger_data *hist_data) 556 { 557 struct track_data *data = kzalloc(sizeof(*data), GFP_KERNEL); 558 struct hist_elt_data *elt_data; 559 560 if (!data) 561 return ERR_PTR(-ENOMEM); 562 563 data->key = kzalloc(key_len, GFP_KERNEL); 564 if (!data->key) { 565 track_data_free(data); 566 return ERR_PTR(-ENOMEM); 567 } 568 569 data->key_len = key_len; 570 data->action_data = action_data; 571 data->hist_data = hist_data; 572 573 elt_data = kzalloc(sizeof(*elt_data), GFP_KERNEL); 574 if (!elt_data) { 575 track_data_free(data); 576 return ERR_PTR(-ENOMEM); 577 } 578 data->elt.private_data = elt_data; 579 580 elt_data->comm = kzalloc(TASK_COMM_LEN, GFP_KERNEL); 581 if (!elt_data->comm) { 582 track_data_free(data); 583 return ERR_PTR(-ENOMEM); 584 } 585 586 return data; 587 } 588 589 static char last_cmd[MAX_FILTER_STR_VAL]; 590 static char last_cmd_loc[MAX_FILTER_STR_VAL]; 591 592 static int errpos(char *str) 593 { 594 return err_pos(last_cmd, str); 595 } 596 597 static void last_cmd_set(struct trace_event_file *file, char *str) 598 { 599 const char *system = NULL, *name = NULL; 600 struct trace_event_call *call; 601 602 if (!str) 603 return; 604 605 strncpy(last_cmd, str, MAX_FILTER_STR_VAL - 1); 606 607 if (file) { 608 call = file->event_call; 609 610 system = call->class->system; 611 if (system) { 612 name = trace_event_name(call); 613 if (!name) 614 system = NULL; 615 } 616 } 617 618 if (system) 619 snprintf(last_cmd_loc, MAX_FILTER_STR_VAL, "hist:%s:%s", system, name); 620 } 621 622 static void hist_err(struct trace_array *tr, u8 err_type, u8 err_pos) 623 { 624 tracing_log_err(tr, last_cmd_loc, last_cmd, err_text, 625 err_type, err_pos); 626 } 627 628 static void hist_err_clear(void) 629 { 630 last_cmd[0] = '\0'; 631 last_cmd_loc[0] = '\0'; 632 } 633 634 struct synth_trace_event { 635 struct trace_entry ent; 636 u64 fields[]; 637 }; 638 639 static int synth_event_define_fields(struct trace_event_call *call) 640 { 641 struct synth_trace_event trace; 642 int offset = offsetof(typeof(trace), fields); 643 struct synth_event *event = call->data; 644 unsigned int i, size, n_u64; 645 char *name, *type; 646 bool is_signed; 647 int ret = 0; 648 649 for (i = 0, n_u64 = 0; i < event->n_fields; i++) { 650 size = event->fields[i]->size; 651 is_signed = event->fields[i]->is_signed; 652 type = event->fields[i]->type; 653 name = event->fields[i]->name; 654 ret = trace_define_field(call, type, name, offset, size, 655 is_signed, FILTER_OTHER); 656 if (ret) 657 break; 658 659 if (event->fields[i]->is_string) { 660 offset += STR_VAR_LEN_MAX; 661 n_u64 += STR_VAR_LEN_MAX / sizeof(u64); 662 } else { 663 offset += sizeof(u64); 664 n_u64++; 665 } 666 } 667 668 event->n_u64 = n_u64; 669 670 return ret; 671 } 672 673 static bool synth_field_signed(char *type) 674 { 675 if (str_has_prefix(type, "u")) 676 return false; 677 678 return true; 679 } 680 681 static int synth_field_is_string(char *type) 682 { 683 if (strstr(type, "char[") != NULL) 684 return true; 685 686 return false; 687 } 688 689 static int synth_field_string_size(char *type) 690 { 691 char buf[4], *end, *start; 692 unsigned int len; 693 int size, err; 694 695 start = strstr(type, "char["); 696 if (start == NULL) 697 return -EINVAL; 698 start += sizeof("char[") - 1; 699 700 end = strchr(type, ']'); 701 if (!end || end < start) 702 return -EINVAL; 703 704 len = end - start; 705 if (len > 3) 706 return -EINVAL; 707 708 strncpy(buf, start, len); 709 buf[len] = '\0'; 710 711 err = kstrtouint(buf, 0, &size); 712 if (err) 713 return err; 714 715 if (size > STR_VAR_LEN_MAX) 716 return -EINVAL; 717 718 return size; 719 } 720 721 static int synth_field_size(char *type) 722 { 723 int size = 0; 724 725 if (strcmp(type, "s64") == 0) 726 size = sizeof(s64); 727 else if (strcmp(type, "u64") == 0) 728 size = sizeof(u64); 729 else if (strcmp(type, "s32") == 0) 730 size = sizeof(s32); 731 else if (strcmp(type, "u32") == 0) 732 size = sizeof(u32); 733 else if (strcmp(type, "s16") == 0) 734 size = sizeof(s16); 735 else if (strcmp(type, "u16") == 0) 736 size = sizeof(u16); 737 else if (strcmp(type, "s8") == 0) 738 size = sizeof(s8); 739 else if (strcmp(type, "u8") == 0) 740 size = sizeof(u8); 741 else if (strcmp(type, "char") == 0) 742 size = sizeof(char); 743 else if (strcmp(type, "unsigned char") == 0) 744 size = sizeof(unsigned char); 745 else if (strcmp(type, "int") == 0) 746 size = sizeof(int); 747 else if (strcmp(type, "unsigned int") == 0) 748 size = sizeof(unsigned int); 749 else if (strcmp(type, "long") == 0) 750 size = sizeof(long); 751 else if (strcmp(type, "unsigned long") == 0) 752 size = sizeof(unsigned long); 753 else if (strcmp(type, "pid_t") == 0) 754 size = sizeof(pid_t); 755 else if (synth_field_is_string(type)) 756 size = synth_field_string_size(type); 757 758 return size; 759 } 760 761 static const char *synth_field_fmt(char *type) 762 { 763 const char *fmt = "%llu"; 764 765 if (strcmp(type, "s64") == 0) 766 fmt = "%lld"; 767 else if (strcmp(type, "u64") == 0) 768 fmt = "%llu"; 769 else if (strcmp(type, "s32") == 0) 770 fmt = "%d"; 771 else if (strcmp(type, "u32") == 0) 772 fmt = "%u"; 773 else if (strcmp(type, "s16") == 0) 774 fmt = "%d"; 775 else if (strcmp(type, "u16") == 0) 776 fmt = "%u"; 777 else if (strcmp(type, "s8") == 0) 778 fmt = "%d"; 779 else if (strcmp(type, "u8") == 0) 780 fmt = "%u"; 781 else if (strcmp(type, "char") == 0) 782 fmt = "%d"; 783 else if (strcmp(type, "unsigned char") == 0) 784 fmt = "%u"; 785 else if (strcmp(type, "int") == 0) 786 fmt = "%d"; 787 else if (strcmp(type, "unsigned int") == 0) 788 fmt = "%u"; 789 else if (strcmp(type, "long") == 0) 790 fmt = "%ld"; 791 else if (strcmp(type, "unsigned long") == 0) 792 fmt = "%lu"; 793 else if (strcmp(type, "pid_t") == 0) 794 fmt = "%d"; 795 else if (synth_field_is_string(type)) 796 fmt = "%s"; 797 798 return fmt; 799 } 800 801 static enum print_line_t print_synth_event(struct trace_iterator *iter, 802 int flags, 803 struct trace_event *event) 804 { 805 struct trace_array *tr = iter->tr; 806 struct trace_seq *s = &iter->seq; 807 struct synth_trace_event *entry; 808 struct synth_event *se; 809 unsigned int i, n_u64; 810 char print_fmt[32]; 811 const char *fmt; 812 813 entry = (struct synth_trace_event *)iter->ent; 814 se = container_of(event, struct synth_event, call.event); 815 816 trace_seq_printf(s, "%s: ", se->name); 817 818 for (i = 0, n_u64 = 0; i < se->n_fields; i++) { 819 if (trace_seq_has_overflowed(s)) 820 goto end; 821 822 fmt = synth_field_fmt(se->fields[i]->type); 823 824 /* parameter types */ 825 if (tr->trace_flags & TRACE_ITER_VERBOSE) 826 trace_seq_printf(s, "%s ", fmt); 827 828 snprintf(print_fmt, sizeof(print_fmt), "%%s=%s%%s", fmt); 829 830 /* parameter values */ 831 if (se->fields[i]->is_string) { 832 trace_seq_printf(s, print_fmt, se->fields[i]->name, 833 (char *)&entry->fields[n_u64], 834 i == se->n_fields - 1 ? "" : " "); 835 n_u64 += STR_VAR_LEN_MAX / sizeof(u64); 836 } else { 837 trace_seq_printf(s, print_fmt, se->fields[i]->name, 838 entry->fields[n_u64], 839 i == se->n_fields - 1 ? "" : " "); 840 n_u64++; 841 } 842 } 843 end: 844 trace_seq_putc(s, '\n'); 845 846 return trace_handle_return(s); 847 } 848 849 static struct trace_event_functions synth_event_funcs = { 850 .trace = print_synth_event 851 }; 852 853 static notrace void trace_event_raw_event_synth(void *__data, 854 u64 *var_ref_vals, 855 unsigned int var_ref_idx) 856 { 857 struct trace_event_file *trace_file = __data; 858 struct synth_trace_event *entry; 859 struct trace_event_buffer fbuffer; 860 struct ring_buffer *buffer; 861 struct synth_event *event; 862 unsigned int i, n_u64; 863 int fields_size = 0; 864 865 event = trace_file->event_call->data; 866 867 if (trace_trigger_soft_disabled(trace_file)) 868 return; 869 870 fields_size = event->n_u64 * sizeof(u64); 871 872 /* 873 * Avoid ring buffer recursion detection, as this event 874 * is being performed within another event. 875 */ 876 buffer = trace_file->tr->trace_buffer.buffer; 877 ring_buffer_nest_start(buffer); 878 879 entry = trace_event_buffer_reserve(&fbuffer, trace_file, 880 sizeof(*entry) + fields_size); 881 if (!entry) 882 goto out; 883 884 for (i = 0, n_u64 = 0; i < event->n_fields; i++) { 885 if (event->fields[i]->is_string) { 886 char *str_val = (char *)(long)var_ref_vals[var_ref_idx + i]; 887 char *str_field = (char *)&entry->fields[n_u64]; 888 889 strscpy(str_field, str_val, STR_VAR_LEN_MAX); 890 n_u64 += STR_VAR_LEN_MAX / sizeof(u64); 891 } else { 892 entry->fields[n_u64] = var_ref_vals[var_ref_idx + i]; 893 n_u64++; 894 } 895 } 896 897 trace_event_buffer_commit(&fbuffer); 898 out: 899 ring_buffer_nest_end(buffer); 900 } 901 902 static void free_synth_event_print_fmt(struct trace_event_call *call) 903 { 904 if (call) { 905 kfree(call->print_fmt); 906 call->print_fmt = NULL; 907 } 908 } 909 910 static int __set_synth_event_print_fmt(struct synth_event *event, 911 char *buf, int len) 912 { 913 const char *fmt; 914 int pos = 0; 915 int i; 916 917 /* When len=0, we just calculate the needed length */ 918 #define LEN_OR_ZERO (len ? len - pos : 0) 919 920 pos += snprintf(buf + pos, LEN_OR_ZERO, "\""); 921 for (i = 0; i < event->n_fields; i++) { 922 fmt = synth_field_fmt(event->fields[i]->type); 923 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s=%s%s", 924 event->fields[i]->name, fmt, 925 i == event->n_fields - 1 ? "" : ", "); 926 } 927 pos += snprintf(buf + pos, LEN_OR_ZERO, "\""); 928 929 for (i = 0; i < event->n_fields; i++) { 930 pos += snprintf(buf + pos, LEN_OR_ZERO, 931 ", REC->%s", event->fields[i]->name); 932 } 933 934 #undef LEN_OR_ZERO 935 936 /* return the length of print_fmt */ 937 return pos; 938 } 939 940 static int set_synth_event_print_fmt(struct trace_event_call *call) 941 { 942 struct synth_event *event = call->data; 943 char *print_fmt; 944 int len; 945 946 /* First: called with 0 length to calculate the needed length */ 947 len = __set_synth_event_print_fmt(event, NULL, 0); 948 949 print_fmt = kmalloc(len + 1, GFP_KERNEL); 950 if (!print_fmt) 951 return -ENOMEM; 952 953 /* Second: actually write the @print_fmt */ 954 __set_synth_event_print_fmt(event, print_fmt, len + 1); 955 call->print_fmt = print_fmt; 956 957 return 0; 958 } 959 960 static void free_synth_field(struct synth_field *field) 961 { 962 kfree(field->type); 963 kfree(field->name); 964 kfree(field); 965 } 966 967 static struct synth_field *parse_synth_field(int argc, const char **argv, 968 int *consumed) 969 { 970 struct synth_field *field; 971 const char *prefix = NULL, *field_type = argv[0], *field_name, *array; 972 int len, ret = 0; 973 974 if (field_type[0] == ';') 975 field_type++; 976 977 if (!strcmp(field_type, "unsigned")) { 978 if (argc < 3) 979 return ERR_PTR(-EINVAL); 980 prefix = "unsigned "; 981 field_type = argv[1]; 982 field_name = argv[2]; 983 *consumed = 3; 984 } else { 985 field_name = argv[1]; 986 *consumed = 2; 987 } 988 989 field = kzalloc(sizeof(*field), GFP_KERNEL); 990 if (!field) 991 return ERR_PTR(-ENOMEM); 992 993 len = strlen(field_name); 994 array = strchr(field_name, '['); 995 if (array) 996 len -= strlen(array); 997 else if (field_name[len - 1] == ';') 998 len--; 999 1000 field->name = kmemdup_nul(field_name, len, GFP_KERNEL); 1001 if (!field->name) { 1002 ret = -ENOMEM; 1003 goto free; 1004 } 1005 1006 if (field_type[0] == ';') 1007 field_type++; 1008 len = strlen(field_type) + 1; 1009 if (array) 1010 len += strlen(array); 1011 if (prefix) 1012 len += strlen(prefix); 1013 1014 field->type = kzalloc(len, GFP_KERNEL); 1015 if (!field->type) { 1016 ret = -ENOMEM; 1017 goto free; 1018 } 1019 if (prefix) 1020 strcat(field->type, prefix); 1021 strcat(field->type, field_type); 1022 if (array) { 1023 strcat(field->type, array); 1024 if (field->type[len - 1] == ';') 1025 field->type[len - 1] = '\0'; 1026 } 1027 1028 field->size = synth_field_size(field->type); 1029 if (!field->size) { 1030 ret = -EINVAL; 1031 goto free; 1032 } 1033 1034 if (synth_field_is_string(field->type)) 1035 field->is_string = true; 1036 1037 field->is_signed = synth_field_signed(field->type); 1038 1039 out: 1040 return field; 1041 free: 1042 free_synth_field(field); 1043 field = ERR_PTR(ret); 1044 goto out; 1045 } 1046 1047 static void free_synth_tracepoint(struct tracepoint *tp) 1048 { 1049 if (!tp) 1050 return; 1051 1052 kfree(tp->name); 1053 kfree(tp); 1054 } 1055 1056 static struct tracepoint *alloc_synth_tracepoint(char *name) 1057 { 1058 struct tracepoint *tp; 1059 1060 tp = kzalloc(sizeof(*tp), GFP_KERNEL); 1061 if (!tp) 1062 return ERR_PTR(-ENOMEM); 1063 1064 tp->name = kstrdup(name, GFP_KERNEL); 1065 if (!tp->name) { 1066 kfree(tp); 1067 return ERR_PTR(-ENOMEM); 1068 } 1069 1070 return tp; 1071 } 1072 1073 typedef void (*synth_probe_func_t) (void *__data, u64 *var_ref_vals, 1074 unsigned int var_ref_idx); 1075 1076 static inline void trace_synth(struct synth_event *event, u64 *var_ref_vals, 1077 unsigned int var_ref_idx) 1078 { 1079 struct tracepoint *tp = event->tp; 1080 1081 if (unlikely(atomic_read(&tp->key.enabled) > 0)) { 1082 struct tracepoint_func *probe_func_ptr; 1083 synth_probe_func_t probe_func; 1084 void *__data; 1085 1086 if (!(cpu_online(raw_smp_processor_id()))) 1087 return; 1088 1089 probe_func_ptr = rcu_dereference_sched((tp)->funcs); 1090 if (probe_func_ptr) { 1091 do { 1092 probe_func = probe_func_ptr->func; 1093 __data = probe_func_ptr->data; 1094 probe_func(__data, var_ref_vals, var_ref_idx); 1095 } while ((++probe_func_ptr)->func); 1096 } 1097 } 1098 } 1099 1100 static struct synth_event *find_synth_event(const char *name) 1101 { 1102 struct dyn_event *pos; 1103 struct synth_event *event; 1104 1105 for_each_dyn_event(pos) { 1106 if (!is_synth_event(pos)) 1107 continue; 1108 event = to_synth_event(pos); 1109 if (strcmp(event->name, name) == 0) 1110 return event; 1111 } 1112 1113 return NULL; 1114 } 1115 1116 static int register_synth_event(struct synth_event *event) 1117 { 1118 struct trace_event_call *call = &event->call; 1119 int ret = 0; 1120 1121 event->call.class = &event->class; 1122 event->class.system = kstrdup(SYNTH_SYSTEM, GFP_KERNEL); 1123 if (!event->class.system) { 1124 ret = -ENOMEM; 1125 goto out; 1126 } 1127 1128 event->tp = alloc_synth_tracepoint(event->name); 1129 if (IS_ERR(event->tp)) { 1130 ret = PTR_ERR(event->tp); 1131 event->tp = NULL; 1132 goto out; 1133 } 1134 1135 INIT_LIST_HEAD(&call->class->fields); 1136 call->event.funcs = &synth_event_funcs; 1137 call->class->define_fields = synth_event_define_fields; 1138 1139 ret = register_trace_event(&call->event); 1140 if (!ret) { 1141 ret = -ENODEV; 1142 goto out; 1143 } 1144 call->flags = TRACE_EVENT_FL_TRACEPOINT; 1145 call->class->reg = trace_event_reg; 1146 call->class->probe = trace_event_raw_event_synth; 1147 call->data = event; 1148 call->tp = event->tp; 1149 1150 ret = trace_add_event_call(call); 1151 if (ret) { 1152 pr_warn("Failed to register synthetic event: %s\n", 1153 trace_event_name(call)); 1154 goto err; 1155 } 1156 1157 ret = set_synth_event_print_fmt(call); 1158 if (ret < 0) { 1159 trace_remove_event_call(call); 1160 goto err; 1161 } 1162 out: 1163 return ret; 1164 err: 1165 unregister_trace_event(&call->event); 1166 goto out; 1167 } 1168 1169 static int unregister_synth_event(struct synth_event *event) 1170 { 1171 struct trace_event_call *call = &event->call; 1172 int ret; 1173 1174 ret = trace_remove_event_call(call); 1175 1176 return ret; 1177 } 1178 1179 static void free_synth_event(struct synth_event *event) 1180 { 1181 unsigned int i; 1182 1183 if (!event) 1184 return; 1185 1186 for (i = 0; i < event->n_fields; i++) 1187 free_synth_field(event->fields[i]); 1188 1189 kfree(event->fields); 1190 kfree(event->name); 1191 kfree(event->class.system); 1192 free_synth_tracepoint(event->tp); 1193 free_synth_event_print_fmt(&event->call); 1194 kfree(event); 1195 } 1196 1197 static struct synth_event *alloc_synth_event(const char *name, int n_fields, 1198 struct synth_field **fields) 1199 { 1200 struct synth_event *event; 1201 unsigned int i; 1202 1203 event = kzalloc(sizeof(*event), GFP_KERNEL); 1204 if (!event) { 1205 event = ERR_PTR(-ENOMEM); 1206 goto out; 1207 } 1208 1209 event->name = kstrdup(name, GFP_KERNEL); 1210 if (!event->name) { 1211 kfree(event); 1212 event = ERR_PTR(-ENOMEM); 1213 goto out; 1214 } 1215 1216 event->fields = kcalloc(n_fields, sizeof(*event->fields), GFP_KERNEL); 1217 if (!event->fields) { 1218 free_synth_event(event); 1219 event = ERR_PTR(-ENOMEM); 1220 goto out; 1221 } 1222 1223 dyn_event_init(&event->devent, &synth_event_ops); 1224 1225 for (i = 0; i < n_fields; i++) 1226 event->fields[i] = fields[i]; 1227 1228 event->n_fields = n_fields; 1229 out: 1230 return event; 1231 } 1232 1233 static void action_trace(struct hist_trigger_data *hist_data, 1234 struct tracing_map_elt *elt, void *rec, 1235 struct ring_buffer_event *rbe, void *key, 1236 struct action_data *data, u64 *var_ref_vals) 1237 { 1238 struct synth_event *event = data->synth_event; 1239 1240 trace_synth(event, var_ref_vals, data->var_ref_idx); 1241 } 1242 1243 struct hist_var_data { 1244 struct list_head list; 1245 struct hist_trigger_data *hist_data; 1246 }; 1247 1248 static int __create_synth_event(int argc, const char *name, const char **argv) 1249 { 1250 struct synth_field *field, *fields[SYNTH_FIELDS_MAX]; 1251 struct synth_event *event = NULL; 1252 int i, consumed = 0, n_fields = 0, ret = 0; 1253 1254 /* 1255 * Argument syntax: 1256 * - Add synthetic event: <event_name> field[;field] ... 1257 * - Remove synthetic event: !<event_name> field[;field] ... 1258 * where 'field' = type field_name 1259 */ 1260 1261 if (name[0] == '\0' || argc < 1) 1262 return -EINVAL; 1263 1264 mutex_lock(&event_mutex); 1265 1266 event = find_synth_event(name); 1267 if (event) { 1268 ret = -EEXIST; 1269 goto out; 1270 } 1271 1272 for (i = 0; i < argc - 1; i++) { 1273 if (strcmp(argv[i], ";") == 0) 1274 continue; 1275 if (n_fields == SYNTH_FIELDS_MAX) { 1276 ret = -EINVAL; 1277 goto err; 1278 } 1279 1280 field = parse_synth_field(argc - i, &argv[i], &consumed); 1281 if (IS_ERR(field)) { 1282 ret = PTR_ERR(field); 1283 goto err; 1284 } 1285 fields[n_fields++] = field; 1286 i += consumed - 1; 1287 } 1288 1289 if (i < argc && strcmp(argv[i], ";") != 0) { 1290 ret = -EINVAL; 1291 goto err; 1292 } 1293 1294 event = alloc_synth_event(name, n_fields, fields); 1295 if (IS_ERR(event)) { 1296 ret = PTR_ERR(event); 1297 event = NULL; 1298 goto err; 1299 } 1300 ret = register_synth_event(event); 1301 if (!ret) 1302 dyn_event_add(&event->devent); 1303 else 1304 free_synth_event(event); 1305 out: 1306 mutex_unlock(&event_mutex); 1307 1308 return ret; 1309 err: 1310 for (i = 0; i < n_fields; i++) 1311 free_synth_field(fields[i]); 1312 1313 goto out; 1314 } 1315 1316 static int create_or_delete_synth_event(int argc, char **argv) 1317 { 1318 const char *name = argv[0]; 1319 struct synth_event *event = NULL; 1320 int ret; 1321 1322 /* trace_run_command() ensures argc != 0 */ 1323 if (name[0] == '!') { 1324 mutex_lock(&event_mutex); 1325 event = find_synth_event(name + 1); 1326 if (event) { 1327 if (event->ref) 1328 ret = -EBUSY; 1329 else { 1330 ret = unregister_synth_event(event); 1331 if (!ret) { 1332 dyn_event_remove(&event->devent); 1333 free_synth_event(event); 1334 } 1335 } 1336 } else 1337 ret = -ENOENT; 1338 mutex_unlock(&event_mutex); 1339 return ret; 1340 } 1341 1342 ret = __create_synth_event(argc - 1, name, (const char **)argv + 1); 1343 return ret == -ECANCELED ? -EINVAL : ret; 1344 } 1345 1346 static int synth_event_create(int argc, const char **argv) 1347 { 1348 const char *name = argv[0]; 1349 int len; 1350 1351 if (name[0] != 's' || name[1] != ':') 1352 return -ECANCELED; 1353 name += 2; 1354 1355 /* This interface accepts group name prefix */ 1356 if (strchr(name, '/')) { 1357 len = str_has_prefix(name, SYNTH_SYSTEM "/"); 1358 if (len == 0) 1359 return -EINVAL; 1360 name += len; 1361 } 1362 return __create_synth_event(argc - 1, name, argv + 1); 1363 } 1364 1365 static int synth_event_release(struct dyn_event *ev) 1366 { 1367 struct synth_event *event = to_synth_event(ev); 1368 int ret; 1369 1370 if (event->ref) 1371 return -EBUSY; 1372 1373 ret = unregister_synth_event(event); 1374 if (ret) 1375 return ret; 1376 1377 dyn_event_remove(ev); 1378 free_synth_event(event); 1379 return 0; 1380 } 1381 1382 static int __synth_event_show(struct seq_file *m, struct synth_event *event) 1383 { 1384 struct synth_field *field; 1385 unsigned int i; 1386 1387 seq_printf(m, "%s\t", event->name); 1388 1389 for (i = 0; i < event->n_fields; i++) { 1390 field = event->fields[i]; 1391 1392 /* parameter values */ 1393 seq_printf(m, "%s %s%s", field->type, field->name, 1394 i == event->n_fields - 1 ? "" : "; "); 1395 } 1396 1397 seq_putc(m, '\n'); 1398 1399 return 0; 1400 } 1401 1402 static int synth_event_show(struct seq_file *m, struct dyn_event *ev) 1403 { 1404 struct synth_event *event = to_synth_event(ev); 1405 1406 seq_printf(m, "s:%s/", event->class.system); 1407 1408 return __synth_event_show(m, event); 1409 } 1410 1411 static int synth_events_seq_show(struct seq_file *m, void *v) 1412 { 1413 struct dyn_event *ev = v; 1414 1415 if (!is_synth_event(ev)) 1416 return 0; 1417 1418 return __synth_event_show(m, to_synth_event(ev)); 1419 } 1420 1421 static const struct seq_operations synth_events_seq_op = { 1422 .start = dyn_event_seq_start, 1423 .next = dyn_event_seq_next, 1424 .stop = dyn_event_seq_stop, 1425 .show = synth_events_seq_show, 1426 }; 1427 1428 static int synth_events_open(struct inode *inode, struct file *file) 1429 { 1430 int ret; 1431 1432 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { 1433 ret = dyn_events_release_all(&synth_event_ops); 1434 if (ret < 0) 1435 return ret; 1436 } 1437 1438 return seq_open(file, &synth_events_seq_op); 1439 } 1440 1441 static ssize_t synth_events_write(struct file *file, 1442 const char __user *buffer, 1443 size_t count, loff_t *ppos) 1444 { 1445 return trace_parse_run_command(file, buffer, count, ppos, 1446 create_or_delete_synth_event); 1447 } 1448 1449 static const struct file_operations synth_events_fops = { 1450 .open = synth_events_open, 1451 .write = synth_events_write, 1452 .read = seq_read, 1453 .llseek = seq_lseek, 1454 .release = seq_release, 1455 }; 1456 1457 static u64 hist_field_timestamp(struct hist_field *hist_field, 1458 struct tracing_map_elt *elt, 1459 struct ring_buffer_event *rbe, 1460 void *event) 1461 { 1462 struct hist_trigger_data *hist_data = hist_field->hist_data; 1463 struct trace_array *tr = hist_data->event_file->tr; 1464 1465 u64 ts = ring_buffer_event_time_stamp(rbe); 1466 1467 if (hist_data->attrs->ts_in_usecs && trace_clock_in_ns(tr)) 1468 ts = ns2usecs(ts); 1469 1470 return ts; 1471 } 1472 1473 static u64 hist_field_cpu(struct hist_field *hist_field, 1474 struct tracing_map_elt *elt, 1475 struct ring_buffer_event *rbe, 1476 void *event) 1477 { 1478 int cpu = smp_processor_id(); 1479 1480 return cpu; 1481 } 1482 1483 /** 1484 * check_field_for_var_ref - Check if a VAR_REF field references a variable 1485 * @hist_field: The VAR_REF field to check 1486 * @var_data: The hist trigger that owns the variable 1487 * @var_idx: The trigger variable identifier 1488 * 1489 * Check the given VAR_REF field to see whether or not it references 1490 * the given variable associated with the given trigger. 1491 * 1492 * Return: The VAR_REF field if it does reference the variable, NULL if not 1493 */ 1494 static struct hist_field * 1495 check_field_for_var_ref(struct hist_field *hist_field, 1496 struct hist_trigger_data *var_data, 1497 unsigned int var_idx) 1498 { 1499 WARN_ON(!(hist_field && hist_field->flags & HIST_FIELD_FL_VAR_REF)); 1500 1501 if (hist_field && hist_field->var.idx == var_idx && 1502 hist_field->var.hist_data == var_data) 1503 return hist_field; 1504 1505 return NULL; 1506 } 1507 1508 /** 1509 * find_var_ref - Check if a trigger has a reference to a trigger variable 1510 * @hist_data: The hist trigger that might have a reference to the variable 1511 * @var_data: The hist trigger that owns the variable 1512 * @var_idx: The trigger variable identifier 1513 * 1514 * Check the list of var_refs[] on the first hist trigger to see 1515 * whether any of them are references to the variable on the second 1516 * trigger. 1517 * 1518 * Return: The VAR_REF field referencing the variable if so, NULL if not 1519 */ 1520 static struct hist_field *find_var_ref(struct hist_trigger_data *hist_data, 1521 struct hist_trigger_data *var_data, 1522 unsigned int var_idx) 1523 { 1524 struct hist_field *hist_field; 1525 unsigned int i; 1526 1527 for (i = 0; i < hist_data->n_var_refs; i++) { 1528 hist_field = hist_data->var_refs[i]; 1529 if (check_field_for_var_ref(hist_field, var_data, var_idx)) 1530 return hist_field; 1531 } 1532 1533 return NULL; 1534 } 1535 1536 /** 1537 * find_any_var_ref - Check if there is a reference to a given trigger variable 1538 * @hist_data: The hist trigger 1539 * @var_idx: The trigger variable identifier 1540 * 1541 * Check to see whether the given variable is currently referenced by 1542 * any other trigger. 1543 * 1544 * The trigger the variable is defined on is explicitly excluded - the 1545 * assumption being that a self-reference doesn't prevent a trigger 1546 * from being removed. 1547 * 1548 * Return: The VAR_REF field referencing the variable if so, NULL if not 1549 */ 1550 static struct hist_field *find_any_var_ref(struct hist_trigger_data *hist_data, 1551 unsigned int var_idx) 1552 { 1553 struct trace_array *tr = hist_data->event_file->tr; 1554 struct hist_field *found = NULL; 1555 struct hist_var_data *var_data; 1556 1557 list_for_each_entry(var_data, &tr->hist_vars, list) { 1558 if (var_data->hist_data == hist_data) 1559 continue; 1560 found = find_var_ref(var_data->hist_data, hist_data, var_idx); 1561 if (found) 1562 break; 1563 } 1564 1565 return found; 1566 } 1567 1568 /** 1569 * check_var_refs - Check if there is a reference to any of trigger's variables 1570 * @hist_data: The hist trigger 1571 * 1572 * A trigger can define one or more variables. If any one of them is 1573 * currently referenced by any other trigger, this function will 1574 * determine that. 1575 1576 * Typically used to determine whether or not a trigger can be removed 1577 * - if there are any references to a trigger's variables, it cannot. 1578 * 1579 * Return: True if there is a reference to any of trigger's variables 1580 */ 1581 static bool check_var_refs(struct hist_trigger_data *hist_data) 1582 { 1583 struct hist_field *field; 1584 bool found = false; 1585 int i; 1586 1587 for_each_hist_field(i, hist_data) { 1588 field = hist_data->fields[i]; 1589 if (field && field->flags & HIST_FIELD_FL_VAR) { 1590 if (find_any_var_ref(hist_data, field->var.idx)) { 1591 found = true; 1592 break; 1593 } 1594 } 1595 } 1596 1597 return found; 1598 } 1599 1600 static struct hist_var_data *find_hist_vars(struct hist_trigger_data *hist_data) 1601 { 1602 struct trace_array *tr = hist_data->event_file->tr; 1603 struct hist_var_data *var_data, *found = NULL; 1604 1605 list_for_each_entry(var_data, &tr->hist_vars, list) { 1606 if (var_data->hist_data == hist_data) { 1607 found = var_data; 1608 break; 1609 } 1610 } 1611 1612 return found; 1613 } 1614 1615 static bool field_has_hist_vars(struct hist_field *hist_field, 1616 unsigned int level) 1617 { 1618 int i; 1619 1620 if (level > 3) 1621 return false; 1622 1623 if (!hist_field) 1624 return false; 1625 1626 if (hist_field->flags & HIST_FIELD_FL_VAR || 1627 hist_field->flags & HIST_FIELD_FL_VAR_REF) 1628 return true; 1629 1630 for (i = 0; i < HIST_FIELD_OPERANDS_MAX; i++) { 1631 struct hist_field *operand; 1632 1633 operand = hist_field->operands[i]; 1634 if (field_has_hist_vars(operand, level + 1)) 1635 return true; 1636 } 1637 1638 return false; 1639 } 1640 1641 static bool has_hist_vars(struct hist_trigger_data *hist_data) 1642 { 1643 struct hist_field *hist_field; 1644 int i; 1645 1646 for_each_hist_field(i, hist_data) { 1647 hist_field = hist_data->fields[i]; 1648 if (field_has_hist_vars(hist_field, 0)) 1649 return true; 1650 } 1651 1652 return false; 1653 } 1654 1655 static int save_hist_vars(struct hist_trigger_data *hist_data) 1656 { 1657 struct trace_array *tr = hist_data->event_file->tr; 1658 struct hist_var_data *var_data; 1659 1660 var_data = find_hist_vars(hist_data); 1661 if (var_data) 1662 return 0; 1663 1664 if (trace_array_get(tr) < 0) 1665 return -ENODEV; 1666 1667 var_data = kzalloc(sizeof(*var_data), GFP_KERNEL); 1668 if (!var_data) { 1669 trace_array_put(tr); 1670 return -ENOMEM; 1671 } 1672 1673 var_data->hist_data = hist_data; 1674 list_add(&var_data->list, &tr->hist_vars); 1675 1676 return 0; 1677 } 1678 1679 static void remove_hist_vars(struct hist_trigger_data *hist_data) 1680 { 1681 struct trace_array *tr = hist_data->event_file->tr; 1682 struct hist_var_data *var_data; 1683 1684 var_data = find_hist_vars(hist_data); 1685 if (!var_data) 1686 return; 1687 1688 if (WARN_ON(check_var_refs(hist_data))) 1689 return; 1690 1691 list_del(&var_data->list); 1692 1693 kfree(var_data); 1694 1695 trace_array_put(tr); 1696 } 1697 1698 static struct hist_field *find_var_field(struct hist_trigger_data *hist_data, 1699 const char *var_name) 1700 { 1701 struct hist_field *hist_field, *found = NULL; 1702 int i; 1703 1704 for_each_hist_field(i, hist_data) { 1705 hist_field = hist_data->fields[i]; 1706 if (hist_field && hist_field->flags & HIST_FIELD_FL_VAR && 1707 strcmp(hist_field->var.name, var_name) == 0) { 1708 found = hist_field; 1709 break; 1710 } 1711 } 1712 1713 return found; 1714 } 1715 1716 static struct hist_field *find_var(struct hist_trigger_data *hist_data, 1717 struct trace_event_file *file, 1718 const char *var_name) 1719 { 1720 struct hist_trigger_data *test_data; 1721 struct event_trigger_data *test; 1722 struct hist_field *hist_field; 1723 1724 hist_field = find_var_field(hist_data, var_name); 1725 if (hist_field) 1726 return hist_field; 1727 1728 list_for_each_entry_rcu(test, &file->triggers, list) { 1729 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 1730 test_data = test->private_data; 1731 hist_field = find_var_field(test_data, var_name); 1732 if (hist_field) 1733 return hist_field; 1734 } 1735 } 1736 1737 return NULL; 1738 } 1739 1740 static struct trace_event_file *find_var_file(struct trace_array *tr, 1741 char *system, 1742 char *event_name, 1743 char *var_name) 1744 { 1745 struct hist_trigger_data *var_hist_data; 1746 struct hist_var_data *var_data; 1747 struct trace_event_file *file, *found = NULL; 1748 1749 if (system) 1750 return find_event_file(tr, system, event_name); 1751 1752 list_for_each_entry(var_data, &tr->hist_vars, list) { 1753 var_hist_data = var_data->hist_data; 1754 file = var_hist_data->event_file; 1755 if (file == found) 1756 continue; 1757 1758 if (find_var_field(var_hist_data, var_name)) { 1759 if (found) { 1760 hist_err(tr, HIST_ERR_VAR_NOT_UNIQUE, errpos(var_name)); 1761 return NULL; 1762 } 1763 1764 found = file; 1765 } 1766 } 1767 1768 return found; 1769 } 1770 1771 static struct hist_field *find_file_var(struct trace_event_file *file, 1772 const char *var_name) 1773 { 1774 struct hist_trigger_data *test_data; 1775 struct event_trigger_data *test; 1776 struct hist_field *hist_field; 1777 1778 list_for_each_entry_rcu(test, &file->triggers, list) { 1779 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 1780 test_data = test->private_data; 1781 hist_field = find_var_field(test_data, var_name); 1782 if (hist_field) 1783 return hist_field; 1784 } 1785 } 1786 1787 return NULL; 1788 } 1789 1790 static struct hist_field * 1791 find_match_var(struct hist_trigger_data *hist_data, char *var_name) 1792 { 1793 struct trace_array *tr = hist_data->event_file->tr; 1794 struct hist_field *hist_field, *found = NULL; 1795 struct trace_event_file *file; 1796 unsigned int i; 1797 1798 for (i = 0; i < hist_data->n_actions; i++) { 1799 struct action_data *data = hist_data->actions[i]; 1800 1801 if (data->handler == HANDLER_ONMATCH) { 1802 char *system = data->match_data.event_system; 1803 char *event_name = data->match_data.event; 1804 1805 file = find_var_file(tr, system, event_name, var_name); 1806 if (!file) 1807 continue; 1808 hist_field = find_file_var(file, var_name); 1809 if (hist_field) { 1810 if (found) { 1811 hist_err(tr, HIST_ERR_VAR_NOT_UNIQUE, 1812 errpos(var_name)); 1813 return ERR_PTR(-EINVAL); 1814 } 1815 1816 found = hist_field; 1817 } 1818 } 1819 } 1820 return found; 1821 } 1822 1823 static struct hist_field *find_event_var(struct hist_trigger_data *hist_data, 1824 char *system, 1825 char *event_name, 1826 char *var_name) 1827 { 1828 struct trace_array *tr = hist_data->event_file->tr; 1829 struct hist_field *hist_field = NULL; 1830 struct trace_event_file *file; 1831 1832 if (!system || !event_name) { 1833 hist_field = find_match_var(hist_data, var_name); 1834 if (IS_ERR(hist_field)) 1835 return NULL; 1836 if (hist_field) 1837 return hist_field; 1838 } 1839 1840 file = find_var_file(tr, system, event_name, var_name); 1841 if (!file) 1842 return NULL; 1843 1844 hist_field = find_file_var(file, var_name); 1845 1846 return hist_field; 1847 } 1848 1849 static u64 hist_field_var_ref(struct hist_field *hist_field, 1850 struct tracing_map_elt *elt, 1851 struct ring_buffer_event *rbe, 1852 void *event) 1853 { 1854 struct hist_elt_data *elt_data; 1855 u64 var_val = 0; 1856 1857 if (WARN_ON_ONCE(!elt)) 1858 return var_val; 1859 1860 elt_data = elt->private_data; 1861 var_val = elt_data->var_ref_vals[hist_field->var_ref_idx]; 1862 1863 return var_val; 1864 } 1865 1866 static bool resolve_var_refs(struct hist_trigger_data *hist_data, void *key, 1867 u64 *var_ref_vals, bool self) 1868 { 1869 struct hist_trigger_data *var_data; 1870 struct tracing_map_elt *var_elt; 1871 struct hist_field *hist_field; 1872 unsigned int i, var_idx; 1873 bool resolved = true; 1874 u64 var_val = 0; 1875 1876 for (i = 0; i < hist_data->n_var_refs; i++) { 1877 hist_field = hist_data->var_refs[i]; 1878 var_idx = hist_field->var.idx; 1879 var_data = hist_field->var.hist_data; 1880 1881 if (var_data == NULL) { 1882 resolved = false; 1883 break; 1884 } 1885 1886 if ((self && var_data != hist_data) || 1887 (!self && var_data == hist_data)) 1888 continue; 1889 1890 var_elt = tracing_map_lookup(var_data->map, key); 1891 if (!var_elt) { 1892 resolved = false; 1893 break; 1894 } 1895 1896 if (!tracing_map_var_set(var_elt, var_idx)) { 1897 resolved = false; 1898 break; 1899 } 1900 1901 if (self || !hist_field->read_once) 1902 var_val = tracing_map_read_var(var_elt, var_idx); 1903 else 1904 var_val = tracing_map_read_var_once(var_elt, var_idx); 1905 1906 var_ref_vals[i] = var_val; 1907 } 1908 1909 return resolved; 1910 } 1911 1912 static const char *hist_field_name(struct hist_field *field, 1913 unsigned int level) 1914 { 1915 const char *field_name = ""; 1916 1917 if (level > 1) 1918 return field_name; 1919 1920 if (field->field) 1921 field_name = field->field->name; 1922 else if (field->flags & HIST_FIELD_FL_LOG2 || 1923 field->flags & HIST_FIELD_FL_ALIAS) 1924 field_name = hist_field_name(field->operands[0], ++level); 1925 else if (field->flags & HIST_FIELD_FL_CPU) 1926 field_name = "cpu"; 1927 else if (field->flags & HIST_FIELD_FL_EXPR || 1928 field->flags & HIST_FIELD_FL_VAR_REF) { 1929 if (field->system) { 1930 static char full_name[MAX_FILTER_STR_VAL]; 1931 1932 strcat(full_name, field->system); 1933 strcat(full_name, "."); 1934 strcat(full_name, field->event_name); 1935 strcat(full_name, "."); 1936 strcat(full_name, field->name); 1937 field_name = full_name; 1938 } else 1939 field_name = field->name; 1940 } else if (field->flags & HIST_FIELD_FL_TIMESTAMP) 1941 field_name = "common_timestamp"; 1942 1943 if (field_name == NULL) 1944 field_name = ""; 1945 1946 return field_name; 1947 } 1948 1949 static hist_field_fn_t select_value_fn(int field_size, int field_is_signed) 1950 { 1951 hist_field_fn_t fn = NULL; 1952 1953 switch (field_size) { 1954 case 8: 1955 if (field_is_signed) 1956 fn = hist_field_s64; 1957 else 1958 fn = hist_field_u64; 1959 break; 1960 case 4: 1961 if (field_is_signed) 1962 fn = hist_field_s32; 1963 else 1964 fn = hist_field_u32; 1965 break; 1966 case 2: 1967 if (field_is_signed) 1968 fn = hist_field_s16; 1969 else 1970 fn = hist_field_u16; 1971 break; 1972 case 1: 1973 if (field_is_signed) 1974 fn = hist_field_s8; 1975 else 1976 fn = hist_field_u8; 1977 break; 1978 } 1979 1980 return fn; 1981 } 1982 1983 static int parse_map_size(char *str) 1984 { 1985 unsigned long size, map_bits; 1986 int ret; 1987 1988 strsep(&str, "="); 1989 if (!str) { 1990 ret = -EINVAL; 1991 goto out; 1992 } 1993 1994 ret = kstrtoul(str, 0, &size); 1995 if (ret) 1996 goto out; 1997 1998 map_bits = ilog2(roundup_pow_of_two(size)); 1999 if (map_bits < TRACING_MAP_BITS_MIN || 2000 map_bits > TRACING_MAP_BITS_MAX) 2001 ret = -EINVAL; 2002 else 2003 ret = map_bits; 2004 out: 2005 return ret; 2006 } 2007 2008 static void destroy_hist_trigger_attrs(struct hist_trigger_attrs *attrs) 2009 { 2010 unsigned int i; 2011 2012 if (!attrs) 2013 return; 2014 2015 for (i = 0; i < attrs->n_assignments; i++) 2016 kfree(attrs->assignment_str[i]); 2017 2018 for (i = 0; i < attrs->n_actions; i++) 2019 kfree(attrs->action_str[i]); 2020 2021 kfree(attrs->name); 2022 kfree(attrs->sort_key_str); 2023 kfree(attrs->keys_str); 2024 kfree(attrs->vals_str); 2025 kfree(attrs->clock); 2026 kfree(attrs); 2027 } 2028 2029 static int parse_action(char *str, struct hist_trigger_attrs *attrs) 2030 { 2031 int ret = -EINVAL; 2032 2033 if (attrs->n_actions >= HIST_ACTIONS_MAX) 2034 return ret; 2035 2036 if ((str_has_prefix(str, "onmatch(")) || 2037 (str_has_prefix(str, "onmax(")) || 2038 (str_has_prefix(str, "onchange("))) { 2039 attrs->action_str[attrs->n_actions] = kstrdup(str, GFP_KERNEL); 2040 if (!attrs->action_str[attrs->n_actions]) { 2041 ret = -ENOMEM; 2042 return ret; 2043 } 2044 attrs->n_actions++; 2045 ret = 0; 2046 } 2047 return ret; 2048 } 2049 2050 static int parse_assignment(struct trace_array *tr, 2051 char *str, struct hist_trigger_attrs *attrs) 2052 { 2053 int ret = 0; 2054 2055 if ((str_has_prefix(str, "key=")) || 2056 (str_has_prefix(str, "keys="))) { 2057 attrs->keys_str = kstrdup(str, GFP_KERNEL); 2058 if (!attrs->keys_str) { 2059 ret = -ENOMEM; 2060 goto out; 2061 } 2062 } else if ((str_has_prefix(str, "val=")) || 2063 (str_has_prefix(str, "vals=")) || 2064 (str_has_prefix(str, "values="))) { 2065 attrs->vals_str = kstrdup(str, GFP_KERNEL); 2066 if (!attrs->vals_str) { 2067 ret = -ENOMEM; 2068 goto out; 2069 } 2070 } else if (str_has_prefix(str, "sort=")) { 2071 attrs->sort_key_str = kstrdup(str, GFP_KERNEL); 2072 if (!attrs->sort_key_str) { 2073 ret = -ENOMEM; 2074 goto out; 2075 } 2076 } else if (str_has_prefix(str, "name=")) { 2077 attrs->name = kstrdup(str, GFP_KERNEL); 2078 if (!attrs->name) { 2079 ret = -ENOMEM; 2080 goto out; 2081 } 2082 } else if (str_has_prefix(str, "clock=")) { 2083 strsep(&str, "="); 2084 if (!str) { 2085 ret = -EINVAL; 2086 goto out; 2087 } 2088 2089 str = strstrip(str); 2090 attrs->clock = kstrdup(str, GFP_KERNEL); 2091 if (!attrs->clock) { 2092 ret = -ENOMEM; 2093 goto out; 2094 } 2095 } else if (str_has_prefix(str, "size=")) { 2096 int map_bits = parse_map_size(str); 2097 2098 if (map_bits < 0) { 2099 ret = map_bits; 2100 goto out; 2101 } 2102 attrs->map_bits = map_bits; 2103 } else { 2104 char *assignment; 2105 2106 if (attrs->n_assignments == TRACING_MAP_VARS_MAX) { 2107 hist_err(tr, HIST_ERR_TOO_MANY_VARS, errpos(str)); 2108 ret = -EINVAL; 2109 goto out; 2110 } 2111 2112 assignment = kstrdup(str, GFP_KERNEL); 2113 if (!assignment) { 2114 ret = -ENOMEM; 2115 goto out; 2116 } 2117 2118 attrs->assignment_str[attrs->n_assignments++] = assignment; 2119 } 2120 out: 2121 return ret; 2122 } 2123 2124 static struct hist_trigger_attrs * 2125 parse_hist_trigger_attrs(struct trace_array *tr, char *trigger_str) 2126 { 2127 struct hist_trigger_attrs *attrs; 2128 int ret = 0; 2129 2130 attrs = kzalloc(sizeof(*attrs), GFP_KERNEL); 2131 if (!attrs) 2132 return ERR_PTR(-ENOMEM); 2133 2134 while (trigger_str) { 2135 char *str = strsep(&trigger_str, ":"); 2136 2137 if (strchr(str, '=')) { 2138 ret = parse_assignment(tr, str, attrs); 2139 if (ret) 2140 goto free; 2141 } else if (strcmp(str, "pause") == 0) 2142 attrs->pause = true; 2143 else if ((strcmp(str, "cont") == 0) || 2144 (strcmp(str, "continue") == 0)) 2145 attrs->cont = true; 2146 else if (strcmp(str, "clear") == 0) 2147 attrs->clear = true; 2148 else { 2149 ret = parse_action(str, attrs); 2150 if (ret) 2151 goto free; 2152 } 2153 } 2154 2155 if (!attrs->keys_str) { 2156 ret = -EINVAL; 2157 goto free; 2158 } 2159 2160 if (!attrs->clock) { 2161 attrs->clock = kstrdup("global", GFP_KERNEL); 2162 if (!attrs->clock) { 2163 ret = -ENOMEM; 2164 goto free; 2165 } 2166 } 2167 2168 return attrs; 2169 free: 2170 destroy_hist_trigger_attrs(attrs); 2171 2172 return ERR_PTR(ret); 2173 } 2174 2175 static inline void save_comm(char *comm, struct task_struct *task) 2176 { 2177 if (!task->pid) { 2178 strcpy(comm, "<idle>"); 2179 return; 2180 } 2181 2182 if (WARN_ON_ONCE(task->pid < 0)) { 2183 strcpy(comm, "<XXX>"); 2184 return; 2185 } 2186 2187 strncpy(comm, task->comm, TASK_COMM_LEN); 2188 } 2189 2190 static void hist_elt_data_free(struct hist_elt_data *elt_data) 2191 { 2192 unsigned int i; 2193 2194 for (i = 0; i < SYNTH_FIELDS_MAX; i++) 2195 kfree(elt_data->field_var_str[i]); 2196 2197 kfree(elt_data->comm); 2198 kfree(elt_data); 2199 } 2200 2201 static void hist_trigger_elt_data_free(struct tracing_map_elt *elt) 2202 { 2203 struct hist_elt_data *elt_data = elt->private_data; 2204 2205 hist_elt_data_free(elt_data); 2206 } 2207 2208 static int hist_trigger_elt_data_alloc(struct tracing_map_elt *elt) 2209 { 2210 struct hist_trigger_data *hist_data = elt->map->private_data; 2211 unsigned int size = TASK_COMM_LEN; 2212 struct hist_elt_data *elt_data; 2213 struct hist_field *key_field; 2214 unsigned int i, n_str; 2215 2216 elt_data = kzalloc(sizeof(*elt_data), GFP_KERNEL); 2217 if (!elt_data) 2218 return -ENOMEM; 2219 2220 for_each_hist_key_field(i, hist_data) { 2221 key_field = hist_data->fields[i]; 2222 2223 if (key_field->flags & HIST_FIELD_FL_EXECNAME) { 2224 elt_data->comm = kzalloc(size, GFP_KERNEL); 2225 if (!elt_data->comm) { 2226 kfree(elt_data); 2227 return -ENOMEM; 2228 } 2229 break; 2230 } 2231 } 2232 2233 n_str = hist_data->n_field_var_str + hist_data->n_save_var_str; 2234 2235 size = STR_VAR_LEN_MAX; 2236 2237 for (i = 0; i < n_str; i++) { 2238 elt_data->field_var_str[i] = kzalloc(size, GFP_KERNEL); 2239 if (!elt_data->field_var_str[i]) { 2240 hist_elt_data_free(elt_data); 2241 return -ENOMEM; 2242 } 2243 } 2244 2245 elt->private_data = elt_data; 2246 2247 return 0; 2248 } 2249 2250 static void hist_trigger_elt_data_init(struct tracing_map_elt *elt) 2251 { 2252 struct hist_elt_data *elt_data = elt->private_data; 2253 2254 if (elt_data->comm) 2255 save_comm(elt_data->comm, current); 2256 } 2257 2258 static const struct tracing_map_ops hist_trigger_elt_data_ops = { 2259 .elt_alloc = hist_trigger_elt_data_alloc, 2260 .elt_free = hist_trigger_elt_data_free, 2261 .elt_init = hist_trigger_elt_data_init, 2262 }; 2263 2264 static const char *get_hist_field_flags(struct hist_field *hist_field) 2265 { 2266 const char *flags_str = NULL; 2267 2268 if (hist_field->flags & HIST_FIELD_FL_HEX) 2269 flags_str = "hex"; 2270 else if (hist_field->flags & HIST_FIELD_FL_SYM) 2271 flags_str = "sym"; 2272 else if (hist_field->flags & HIST_FIELD_FL_SYM_OFFSET) 2273 flags_str = "sym-offset"; 2274 else if (hist_field->flags & HIST_FIELD_FL_EXECNAME) 2275 flags_str = "execname"; 2276 else if (hist_field->flags & HIST_FIELD_FL_SYSCALL) 2277 flags_str = "syscall"; 2278 else if (hist_field->flags & HIST_FIELD_FL_LOG2) 2279 flags_str = "log2"; 2280 else if (hist_field->flags & HIST_FIELD_FL_TIMESTAMP_USECS) 2281 flags_str = "usecs"; 2282 2283 return flags_str; 2284 } 2285 2286 static void expr_field_str(struct hist_field *field, char *expr) 2287 { 2288 if (field->flags & HIST_FIELD_FL_VAR_REF) 2289 strcat(expr, "$"); 2290 2291 strcat(expr, hist_field_name(field, 0)); 2292 2293 if (field->flags && !(field->flags & HIST_FIELD_FL_VAR_REF)) { 2294 const char *flags_str = get_hist_field_flags(field); 2295 2296 if (flags_str) { 2297 strcat(expr, "."); 2298 strcat(expr, flags_str); 2299 } 2300 } 2301 } 2302 2303 static char *expr_str(struct hist_field *field, unsigned int level) 2304 { 2305 char *expr; 2306 2307 if (level > 1) 2308 return NULL; 2309 2310 expr = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL); 2311 if (!expr) 2312 return NULL; 2313 2314 if (!field->operands[0]) { 2315 expr_field_str(field, expr); 2316 return expr; 2317 } 2318 2319 if (field->operator == FIELD_OP_UNARY_MINUS) { 2320 char *subexpr; 2321 2322 strcat(expr, "-("); 2323 subexpr = expr_str(field->operands[0], ++level); 2324 if (!subexpr) { 2325 kfree(expr); 2326 return NULL; 2327 } 2328 strcat(expr, subexpr); 2329 strcat(expr, ")"); 2330 2331 kfree(subexpr); 2332 2333 return expr; 2334 } 2335 2336 expr_field_str(field->operands[0], expr); 2337 2338 switch (field->operator) { 2339 case FIELD_OP_MINUS: 2340 strcat(expr, "-"); 2341 break; 2342 case FIELD_OP_PLUS: 2343 strcat(expr, "+"); 2344 break; 2345 default: 2346 kfree(expr); 2347 return NULL; 2348 } 2349 2350 expr_field_str(field->operands[1], expr); 2351 2352 return expr; 2353 } 2354 2355 static int contains_operator(char *str) 2356 { 2357 enum field_op_id field_op = FIELD_OP_NONE; 2358 char *op; 2359 2360 op = strpbrk(str, "+-"); 2361 if (!op) 2362 return FIELD_OP_NONE; 2363 2364 switch (*op) { 2365 case '-': 2366 if (*str == '-') 2367 field_op = FIELD_OP_UNARY_MINUS; 2368 else 2369 field_op = FIELD_OP_MINUS; 2370 break; 2371 case '+': 2372 field_op = FIELD_OP_PLUS; 2373 break; 2374 default: 2375 break; 2376 } 2377 2378 return field_op; 2379 } 2380 2381 static void __destroy_hist_field(struct hist_field *hist_field) 2382 { 2383 kfree(hist_field->var.name); 2384 kfree(hist_field->name); 2385 kfree(hist_field->type); 2386 2387 kfree(hist_field); 2388 } 2389 2390 static void destroy_hist_field(struct hist_field *hist_field, 2391 unsigned int level) 2392 { 2393 unsigned int i; 2394 2395 if (level > 3) 2396 return; 2397 2398 if (!hist_field) 2399 return; 2400 2401 if (hist_field->flags & HIST_FIELD_FL_VAR_REF) 2402 return; /* var refs will be destroyed separately */ 2403 2404 for (i = 0; i < HIST_FIELD_OPERANDS_MAX; i++) 2405 destroy_hist_field(hist_field->operands[i], level + 1); 2406 2407 __destroy_hist_field(hist_field); 2408 } 2409 2410 static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data, 2411 struct ftrace_event_field *field, 2412 unsigned long flags, 2413 char *var_name) 2414 { 2415 struct hist_field *hist_field; 2416 2417 if (field && is_function_field(field)) 2418 return NULL; 2419 2420 hist_field = kzalloc(sizeof(struct hist_field), GFP_KERNEL); 2421 if (!hist_field) 2422 return NULL; 2423 2424 hist_field->hist_data = hist_data; 2425 2426 if (flags & HIST_FIELD_FL_EXPR || flags & HIST_FIELD_FL_ALIAS) 2427 goto out; /* caller will populate */ 2428 2429 if (flags & HIST_FIELD_FL_VAR_REF) { 2430 hist_field->fn = hist_field_var_ref; 2431 goto out; 2432 } 2433 2434 if (flags & HIST_FIELD_FL_HITCOUNT) { 2435 hist_field->fn = hist_field_counter; 2436 hist_field->size = sizeof(u64); 2437 hist_field->type = kstrdup("u64", GFP_KERNEL); 2438 if (!hist_field->type) 2439 goto free; 2440 goto out; 2441 } 2442 2443 if (flags & HIST_FIELD_FL_STACKTRACE) { 2444 hist_field->fn = hist_field_none; 2445 goto out; 2446 } 2447 2448 if (flags & HIST_FIELD_FL_LOG2) { 2449 unsigned long fl = flags & ~HIST_FIELD_FL_LOG2; 2450 hist_field->fn = hist_field_log2; 2451 hist_field->operands[0] = create_hist_field(hist_data, field, fl, NULL); 2452 hist_field->size = hist_field->operands[0]->size; 2453 hist_field->type = kstrdup(hist_field->operands[0]->type, GFP_KERNEL); 2454 if (!hist_field->type) 2455 goto free; 2456 goto out; 2457 } 2458 2459 if (flags & HIST_FIELD_FL_TIMESTAMP) { 2460 hist_field->fn = hist_field_timestamp; 2461 hist_field->size = sizeof(u64); 2462 hist_field->type = kstrdup("u64", GFP_KERNEL); 2463 if (!hist_field->type) 2464 goto free; 2465 goto out; 2466 } 2467 2468 if (flags & HIST_FIELD_FL_CPU) { 2469 hist_field->fn = hist_field_cpu; 2470 hist_field->size = sizeof(int); 2471 hist_field->type = kstrdup("unsigned int", GFP_KERNEL); 2472 if (!hist_field->type) 2473 goto free; 2474 goto out; 2475 } 2476 2477 if (WARN_ON_ONCE(!field)) 2478 goto out; 2479 2480 if (is_string_field(field)) { 2481 flags |= HIST_FIELD_FL_STRING; 2482 2483 hist_field->size = MAX_FILTER_STR_VAL; 2484 hist_field->type = kstrdup(field->type, GFP_KERNEL); 2485 if (!hist_field->type) 2486 goto free; 2487 2488 if (field->filter_type == FILTER_STATIC_STRING) 2489 hist_field->fn = hist_field_string; 2490 else if (field->filter_type == FILTER_DYN_STRING) 2491 hist_field->fn = hist_field_dynstring; 2492 else 2493 hist_field->fn = hist_field_pstring; 2494 } else { 2495 hist_field->size = field->size; 2496 hist_field->is_signed = field->is_signed; 2497 hist_field->type = kstrdup(field->type, GFP_KERNEL); 2498 if (!hist_field->type) 2499 goto free; 2500 2501 hist_field->fn = select_value_fn(field->size, 2502 field->is_signed); 2503 if (!hist_field->fn) { 2504 destroy_hist_field(hist_field, 0); 2505 return NULL; 2506 } 2507 } 2508 out: 2509 hist_field->field = field; 2510 hist_field->flags = flags; 2511 2512 if (var_name) { 2513 hist_field->var.name = kstrdup(var_name, GFP_KERNEL); 2514 if (!hist_field->var.name) 2515 goto free; 2516 } 2517 2518 return hist_field; 2519 free: 2520 destroy_hist_field(hist_field, 0); 2521 return NULL; 2522 } 2523 2524 static void destroy_hist_fields(struct hist_trigger_data *hist_data) 2525 { 2526 unsigned int i; 2527 2528 for (i = 0; i < HIST_FIELDS_MAX; i++) { 2529 if (hist_data->fields[i]) { 2530 destroy_hist_field(hist_data->fields[i], 0); 2531 hist_data->fields[i] = NULL; 2532 } 2533 } 2534 2535 for (i = 0; i < hist_data->n_var_refs; i++) { 2536 WARN_ON(!(hist_data->var_refs[i]->flags & HIST_FIELD_FL_VAR_REF)); 2537 __destroy_hist_field(hist_data->var_refs[i]); 2538 hist_data->var_refs[i] = NULL; 2539 } 2540 } 2541 2542 static int init_var_ref(struct hist_field *ref_field, 2543 struct hist_field *var_field, 2544 char *system, char *event_name) 2545 { 2546 int err = 0; 2547 2548 ref_field->var.idx = var_field->var.idx; 2549 ref_field->var.hist_data = var_field->hist_data; 2550 ref_field->size = var_field->size; 2551 ref_field->is_signed = var_field->is_signed; 2552 ref_field->flags |= var_field->flags & 2553 (HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS); 2554 2555 if (system) { 2556 ref_field->system = kstrdup(system, GFP_KERNEL); 2557 if (!ref_field->system) 2558 return -ENOMEM; 2559 } 2560 2561 if (event_name) { 2562 ref_field->event_name = kstrdup(event_name, GFP_KERNEL); 2563 if (!ref_field->event_name) { 2564 err = -ENOMEM; 2565 goto free; 2566 } 2567 } 2568 2569 if (var_field->var.name) { 2570 ref_field->name = kstrdup(var_field->var.name, GFP_KERNEL); 2571 if (!ref_field->name) { 2572 err = -ENOMEM; 2573 goto free; 2574 } 2575 } else if (var_field->name) { 2576 ref_field->name = kstrdup(var_field->name, GFP_KERNEL); 2577 if (!ref_field->name) { 2578 err = -ENOMEM; 2579 goto free; 2580 } 2581 } 2582 2583 ref_field->type = kstrdup(var_field->type, GFP_KERNEL); 2584 if (!ref_field->type) { 2585 err = -ENOMEM; 2586 goto free; 2587 } 2588 out: 2589 return err; 2590 free: 2591 kfree(ref_field->system); 2592 kfree(ref_field->event_name); 2593 kfree(ref_field->name); 2594 2595 goto out; 2596 } 2597 2598 /** 2599 * create_var_ref - Create a variable reference and attach it to trigger 2600 * @hist_data: The trigger that will be referencing the variable 2601 * @var_field: The VAR field to create a reference to 2602 * @system: The optional system string 2603 * @event_name: The optional event_name string 2604 * 2605 * Given a variable hist_field, create a VAR_REF hist_field that 2606 * represents a reference to it. 2607 * 2608 * This function also adds the reference to the trigger that 2609 * now references the variable. 2610 * 2611 * Return: The VAR_REF field if successful, NULL if not 2612 */ 2613 static struct hist_field *create_var_ref(struct hist_trigger_data *hist_data, 2614 struct hist_field *var_field, 2615 char *system, char *event_name) 2616 { 2617 unsigned long flags = HIST_FIELD_FL_VAR_REF; 2618 struct hist_field *ref_field; 2619 2620 ref_field = create_hist_field(var_field->hist_data, NULL, flags, NULL); 2621 if (ref_field) { 2622 if (init_var_ref(ref_field, var_field, system, event_name)) { 2623 destroy_hist_field(ref_field, 0); 2624 return NULL; 2625 } 2626 2627 hist_data->var_refs[hist_data->n_var_refs] = ref_field; 2628 ref_field->var_ref_idx = hist_data->n_var_refs++; 2629 } 2630 2631 return ref_field; 2632 } 2633 2634 static bool is_var_ref(char *var_name) 2635 { 2636 if (!var_name || strlen(var_name) < 2 || var_name[0] != '$') 2637 return false; 2638 2639 return true; 2640 } 2641 2642 static char *field_name_from_var(struct hist_trigger_data *hist_data, 2643 char *var_name) 2644 { 2645 char *name, *field; 2646 unsigned int i; 2647 2648 for (i = 0; i < hist_data->attrs->var_defs.n_vars; i++) { 2649 name = hist_data->attrs->var_defs.name[i]; 2650 2651 if (strcmp(var_name, name) == 0) { 2652 field = hist_data->attrs->var_defs.expr[i]; 2653 if (contains_operator(field) || is_var_ref(field)) 2654 continue; 2655 return field; 2656 } 2657 } 2658 2659 return NULL; 2660 } 2661 2662 static char *local_field_var_ref(struct hist_trigger_data *hist_data, 2663 char *system, char *event_name, 2664 char *var_name) 2665 { 2666 struct trace_event_call *call; 2667 2668 if (system && event_name) { 2669 call = hist_data->event_file->event_call; 2670 2671 if (strcmp(system, call->class->system) != 0) 2672 return NULL; 2673 2674 if (strcmp(event_name, trace_event_name(call)) != 0) 2675 return NULL; 2676 } 2677 2678 if (!!system != !!event_name) 2679 return NULL; 2680 2681 if (!is_var_ref(var_name)) 2682 return NULL; 2683 2684 var_name++; 2685 2686 return field_name_from_var(hist_data, var_name); 2687 } 2688 2689 static struct hist_field *parse_var_ref(struct hist_trigger_data *hist_data, 2690 char *system, char *event_name, 2691 char *var_name) 2692 { 2693 struct hist_field *var_field = NULL, *ref_field = NULL; 2694 struct trace_array *tr = hist_data->event_file->tr; 2695 2696 if (!is_var_ref(var_name)) 2697 return NULL; 2698 2699 var_name++; 2700 2701 var_field = find_event_var(hist_data, system, event_name, var_name); 2702 if (var_field) 2703 ref_field = create_var_ref(hist_data, var_field, 2704 system, event_name); 2705 2706 if (!ref_field) 2707 hist_err(tr, HIST_ERR_VAR_NOT_FOUND, errpos(var_name)); 2708 2709 return ref_field; 2710 } 2711 2712 static struct ftrace_event_field * 2713 parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file, 2714 char *field_str, unsigned long *flags) 2715 { 2716 struct ftrace_event_field *field = NULL; 2717 char *field_name, *modifier, *str; 2718 struct trace_array *tr = file->tr; 2719 2720 modifier = str = kstrdup(field_str, GFP_KERNEL); 2721 if (!modifier) 2722 return ERR_PTR(-ENOMEM); 2723 2724 field_name = strsep(&modifier, "."); 2725 if (modifier) { 2726 if (strcmp(modifier, "hex") == 0) 2727 *flags |= HIST_FIELD_FL_HEX; 2728 else if (strcmp(modifier, "sym") == 0) 2729 *flags |= HIST_FIELD_FL_SYM; 2730 else if (strcmp(modifier, "sym-offset") == 0) 2731 *flags |= HIST_FIELD_FL_SYM_OFFSET; 2732 else if ((strcmp(modifier, "execname") == 0) && 2733 (strcmp(field_name, "common_pid") == 0)) 2734 *flags |= HIST_FIELD_FL_EXECNAME; 2735 else if (strcmp(modifier, "syscall") == 0) 2736 *flags |= HIST_FIELD_FL_SYSCALL; 2737 else if (strcmp(modifier, "log2") == 0) 2738 *flags |= HIST_FIELD_FL_LOG2; 2739 else if (strcmp(modifier, "usecs") == 0) 2740 *flags |= HIST_FIELD_FL_TIMESTAMP_USECS; 2741 else { 2742 hist_err(tr, HIST_ERR_BAD_FIELD_MODIFIER, errpos(modifier)); 2743 field = ERR_PTR(-EINVAL); 2744 goto out; 2745 } 2746 } 2747 2748 if (strcmp(field_name, "common_timestamp") == 0) { 2749 *flags |= HIST_FIELD_FL_TIMESTAMP; 2750 hist_data->enable_timestamps = true; 2751 if (*flags & HIST_FIELD_FL_TIMESTAMP_USECS) 2752 hist_data->attrs->ts_in_usecs = true; 2753 } else if (strcmp(field_name, "cpu") == 0) 2754 *flags |= HIST_FIELD_FL_CPU; 2755 else { 2756 field = trace_find_event_field(file->event_call, field_name); 2757 if (!field || !field->size) { 2758 hist_err(tr, HIST_ERR_FIELD_NOT_FOUND, errpos(field_name)); 2759 field = ERR_PTR(-EINVAL); 2760 goto out; 2761 } 2762 } 2763 out: 2764 kfree(str); 2765 2766 return field; 2767 } 2768 2769 static struct hist_field *create_alias(struct hist_trigger_data *hist_data, 2770 struct hist_field *var_ref, 2771 char *var_name) 2772 { 2773 struct hist_field *alias = NULL; 2774 unsigned long flags = HIST_FIELD_FL_ALIAS | HIST_FIELD_FL_VAR; 2775 2776 alias = create_hist_field(hist_data, NULL, flags, var_name); 2777 if (!alias) 2778 return NULL; 2779 2780 alias->fn = var_ref->fn; 2781 alias->operands[0] = var_ref; 2782 2783 if (init_var_ref(alias, var_ref, var_ref->system, var_ref->event_name)) { 2784 destroy_hist_field(alias, 0); 2785 return NULL; 2786 } 2787 2788 return alias; 2789 } 2790 2791 static struct hist_field *parse_atom(struct hist_trigger_data *hist_data, 2792 struct trace_event_file *file, char *str, 2793 unsigned long *flags, char *var_name) 2794 { 2795 char *s, *ref_system = NULL, *ref_event = NULL, *ref_var = str; 2796 struct ftrace_event_field *field = NULL; 2797 struct hist_field *hist_field = NULL; 2798 int ret = 0; 2799 2800 s = strchr(str, '.'); 2801 if (s) { 2802 s = strchr(++s, '.'); 2803 if (s) { 2804 ref_system = strsep(&str, "."); 2805 if (!str) { 2806 ret = -EINVAL; 2807 goto out; 2808 } 2809 ref_event = strsep(&str, "."); 2810 if (!str) { 2811 ret = -EINVAL; 2812 goto out; 2813 } 2814 ref_var = str; 2815 } 2816 } 2817 2818 s = local_field_var_ref(hist_data, ref_system, ref_event, ref_var); 2819 if (!s) { 2820 hist_field = parse_var_ref(hist_data, ref_system, 2821 ref_event, ref_var); 2822 if (hist_field) { 2823 if (var_name) { 2824 hist_field = create_alias(hist_data, hist_field, var_name); 2825 if (!hist_field) { 2826 ret = -ENOMEM; 2827 goto out; 2828 } 2829 } 2830 return hist_field; 2831 } 2832 } else 2833 str = s; 2834 2835 field = parse_field(hist_data, file, str, flags); 2836 if (IS_ERR(field)) { 2837 ret = PTR_ERR(field); 2838 goto out; 2839 } 2840 2841 hist_field = create_hist_field(hist_data, field, *flags, var_name); 2842 if (!hist_field) { 2843 ret = -ENOMEM; 2844 goto out; 2845 } 2846 2847 return hist_field; 2848 out: 2849 return ERR_PTR(ret); 2850 } 2851 2852 static struct hist_field *parse_expr(struct hist_trigger_data *hist_data, 2853 struct trace_event_file *file, 2854 char *str, unsigned long flags, 2855 char *var_name, unsigned int level); 2856 2857 static struct hist_field *parse_unary(struct hist_trigger_data *hist_data, 2858 struct trace_event_file *file, 2859 char *str, unsigned long flags, 2860 char *var_name, unsigned int level) 2861 { 2862 struct hist_field *operand1, *expr = NULL; 2863 unsigned long operand_flags; 2864 int ret = 0; 2865 char *s; 2866 2867 /* we support only -(xxx) i.e. explicit parens required */ 2868 2869 if (level > 3) { 2870 hist_err(file->tr, HIST_ERR_TOO_MANY_SUBEXPR, errpos(str)); 2871 ret = -EINVAL; 2872 goto free; 2873 } 2874 2875 str++; /* skip leading '-' */ 2876 2877 s = strchr(str, '('); 2878 if (s) 2879 str++; 2880 else { 2881 ret = -EINVAL; 2882 goto free; 2883 } 2884 2885 s = strrchr(str, ')'); 2886 if (s) 2887 *s = '\0'; 2888 else { 2889 ret = -EINVAL; /* no closing ')' */ 2890 goto free; 2891 } 2892 2893 flags |= HIST_FIELD_FL_EXPR; 2894 expr = create_hist_field(hist_data, NULL, flags, var_name); 2895 if (!expr) { 2896 ret = -ENOMEM; 2897 goto free; 2898 } 2899 2900 operand_flags = 0; 2901 operand1 = parse_expr(hist_data, file, str, operand_flags, NULL, ++level); 2902 if (IS_ERR(operand1)) { 2903 ret = PTR_ERR(operand1); 2904 goto free; 2905 } 2906 2907 expr->flags |= operand1->flags & 2908 (HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS); 2909 expr->fn = hist_field_unary_minus; 2910 expr->operands[0] = operand1; 2911 expr->operator = FIELD_OP_UNARY_MINUS; 2912 expr->name = expr_str(expr, 0); 2913 expr->type = kstrdup(operand1->type, GFP_KERNEL); 2914 if (!expr->type) { 2915 ret = -ENOMEM; 2916 goto free; 2917 } 2918 2919 return expr; 2920 free: 2921 destroy_hist_field(expr, 0); 2922 return ERR_PTR(ret); 2923 } 2924 2925 static int check_expr_operands(struct trace_array *tr, 2926 struct hist_field *operand1, 2927 struct hist_field *operand2) 2928 { 2929 unsigned long operand1_flags = operand1->flags; 2930 unsigned long operand2_flags = operand2->flags; 2931 2932 if ((operand1_flags & HIST_FIELD_FL_VAR_REF) || 2933 (operand1_flags & HIST_FIELD_FL_ALIAS)) { 2934 struct hist_field *var; 2935 2936 var = find_var_field(operand1->var.hist_data, operand1->name); 2937 if (!var) 2938 return -EINVAL; 2939 operand1_flags = var->flags; 2940 } 2941 2942 if ((operand2_flags & HIST_FIELD_FL_VAR_REF) || 2943 (operand2_flags & HIST_FIELD_FL_ALIAS)) { 2944 struct hist_field *var; 2945 2946 var = find_var_field(operand2->var.hist_data, operand2->name); 2947 if (!var) 2948 return -EINVAL; 2949 operand2_flags = var->flags; 2950 } 2951 2952 if ((operand1_flags & HIST_FIELD_FL_TIMESTAMP_USECS) != 2953 (operand2_flags & HIST_FIELD_FL_TIMESTAMP_USECS)) { 2954 hist_err(tr, HIST_ERR_TIMESTAMP_MISMATCH, 0); 2955 return -EINVAL; 2956 } 2957 2958 return 0; 2959 } 2960 2961 static struct hist_field *parse_expr(struct hist_trigger_data *hist_data, 2962 struct trace_event_file *file, 2963 char *str, unsigned long flags, 2964 char *var_name, unsigned int level) 2965 { 2966 struct hist_field *operand1 = NULL, *operand2 = NULL, *expr = NULL; 2967 unsigned long operand_flags; 2968 int field_op, ret = -EINVAL; 2969 char *sep, *operand1_str; 2970 2971 if (level > 3) { 2972 hist_err(file->tr, HIST_ERR_TOO_MANY_SUBEXPR, errpos(str)); 2973 return ERR_PTR(-EINVAL); 2974 } 2975 2976 field_op = contains_operator(str); 2977 2978 if (field_op == FIELD_OP_NONE) 2979 return parse_atom(hist_data, file, str, &flags, var_name); 2980 2981 if (field_op == FIELD_OP_UNARY_MINUS) 2982 return parse_unary(hist_data, file, str, flags, var_name, ++level); 2983 2984 switch (field_op) { 2985 case FIELD_OP_MINUS: 2986 sep = "-"; 2987 break; 2988 case FIELD_OP_PLUS: 2989 sep = "+"; 2990 break; 2991 default: 2992 goto free; 2993 } 2994 2995 operand1_str = strsep(&str, sep); 2996 if (!operand1_str || !str) 2997 goto free; 2998 2999 operand_flags = 0; 3000 operand1 = parse_atom(hist_data, file, operand1_str, 3001 &operand_flags, NULL); 3002 if (IS_ERR(operand1)) { 3003 ret = PTR_ERR(operand1); 3004 operand1 = NULL; 3005 goto free; 3006 } 3007 3008 /* rest of string could be another expression e.g. b+c in a+b+c */ 3009 operand_flags = 0; 3010 operand2 = parse_expr(hist_data, file, str, operand_flags, NULL, ++level); 3011 if (IS_ERR(operand2)) { 3012 ret = PTR_ERR(operand2); 3013 operand2 = NULL; 3014 goto free; 3015 } 3016 3017 ret = check_expr_operands(file->tr, operand1, operand2); 3018 if (ret) 3019 goto free; 3020 3021 flags |= HIST_FIELD_FL_EXPR; 3022 3023 flags |= operand1->flags & 3024 (HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS); 3025 3026 expr = create_hist_field(hist_data, NULL, flags, var_name); 3027 if (!expr) { 3028 ret = -ENOMEM; 3029 goto free; 3030 } 3031 3032 operand1->read_once = true; 3033 operand2->read_once = true; 3034 3035 expr->operands[0] = operand1; 3036 expr->operands[1] = operand2; 3037 expr->operator = field_op; 3038 expr->name = expr_str(expr, 0); 3039 expr->type = kstrdup(operand1->type, GFP_KERNEL); 3040 if (!expr->type) { 3041 ret = -ENOMEM; 3042 goto free; 3043 } 3044 3045 switch (field_op) { 3046 case FIELD_OP_MINUS: 3047 expr->fn = hist_field_minus; 3048 break; 3049 case FIELD_OP_PLUS: 3050 expr->fn = hist_field_plus; 3051 break; 3052 default: 3053 ret = -EINVAL; 3054 goto free; 3055 } 3056 3057 return expr; 3058 free: 3059 destroy_hist_field(operand1, 0); 3060 destroy_hist_field(operand2, 0); 3061 destroy_hist_field(expr, 0); 3062 3063 return ERR_PTR(ret); 3064 } 3065 3066 static char *find_trigger_filter(struct hist_trigger_data *hist_data, 3067 struct trace_event_file *file) 3068 { 3069 struct event_trigger_data *test; 3070 3071 list_for_each_entry_rcu(test, &file->triggers, list) { 3072 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 3073 if (test->private_data == hist_data) 3074 return test->filter_str; 3075 } 3076 } 3077 3078 return NULL; 3079 } 3080 3081 static struct event_command trigger_hist_cmd; 3082 static int event_hist_trigger_func(struct event_command *cmd_ops, 3083 struct trace_event_file *file, 3084 char *glob, char *cmd, char *param); 3085 3086 static bool compatible_keys(struct hist_trigger_data *target_hist_data, 3087 struct hist_trigger_data *hist_data, 3088 unsigned int n_keys) 3089 { 3090 struct hist_field *target_hist_field, *hist_field; 3091 unsigned int n, i, j; 3092 3093 if (hist_data->n_fields - hist_data->n_vals != n_keys) 3094 return false; 3095 3096 i = hist_data->n_vals; 3097 j = target_hist_data->n_vals; 3098 3099 for (n = 0; n < n_keys; n++) { 3100 hist_field = hist_data->fields[i + n]; 3101 target_hist_field = target_hist_data->fields[j + n]; 3102 3103 if (strcmp(hist_field->type, target_hist_field->type) != 0) 3104 return false; 3105 if (hist_field->size != target_hist_field->size) 3106 return false; 3107 if (hist_field->is_signed != target_hist_field->is_signed) 3108 return false; 3109 } 3110 3111 return true; 3112 } 3113 3114 static struct hist_trigger_data * 3115 find_compatible_hist(struct hist_trigger_data *target_hist_data, 3116 struct trace_event_file *file) 3117 { 3118 struct hist_trigger_data *hist_data; 3119 struct event_trigger_data *test; 3120 unsigned int n_keys; 3121 3122 n_keys = target_hist_data->n_fields - target_hist_data->n_vals; 3123 3124 list_for_each_entry_rcu(test, &file->triggers, list) { 3125 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 3126 hist_data = test->private_data; 3127 3128 if (compatible_keys(target_hist_data, hist_data, n_keys)) 3129 return hist_data; 3130 } 3131 } 3132 3133 return NULL; 3134 } 3135 3136 static struct trace_event_file *event_file(struct trace_array *tr, 3137 char *system, char *event_name) 3138 { 3139 struct trace_event_file *file; 3140 3141 file = __find_event_file(tr, system, event_name); 3142 if (!file) 3143 return ERR_PTR(-EINVAL); 3144 3145 return file; 3146 } 3147 3148 static struct hist_field * 3149 find_synthetic_field_var(struct hist_trigger_data *target_hist_data, 3150 char *system, char *event_name, char *field_name) 3151 { 3152 struct hist_field *event_var; 3153 char *synthetic_name; 3154 3155 synthetic_name = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL); 3156 if (!synthetic_name) 3157 return ERR_PTR(-ENOMEM); 3158 3159 strcpy(synthetic_name, "synthetic_"); 3160 strcat(synthetic_name, field_name); 3161 3162 event_var = find_event_var(target_hist_data, system, event_name, synthetic_name); 3163 3164 kfree(synthetic_name); 3165 3166 return event_var; 3167 } 3168 3169 /** 3170 * create_field_var_hist - Automatically create a histogram and var for a field 3171 * @target_hist_data: The target hist trigger 3172 * @subsys_name: Optional subsystem name 3173 * @event_name: Optional event name 3174 * @field_name: The name of the field (and the resulting variable) 3175 * 3176 * Hist trigger actions fetch data from variables, not directly from 3177 * events. However, for convenience, users are allowed to directly 3178 * specify an event field in an action, which will be automatically 3179 * converted into a variable on their behalf. 3180 3181 * If a user specifies a field on an event that isn't the event the 3182 * histogram currently being defined (the target event histogram), the 3183 * only way that can be accomplished is if a new hist trigger is 3184 * created and the field variable defined on that. 3185 * 3186 * This function creates a new histogram compatible with the target 3187 * event (meaning a histogram with the same key as the target 3188 * histogram), and creates a variable for the specified field, but 3189 * with 'synthetic_' prepended to the variable name in order to avoid 3190 * collision with normal field variables. 3191 * 3192 * Return: The variable created for the field. 3193 */ 3194 static struct hist_field * 3195 create_field_var_hist(struct hist_trigger_data *target_hist_data, 3196 char *subsys_name, char *event_name, char *field_name) 3197 { 3198 struct trace_array *tr = target_hist_data->event_file->tr; 3199 struct hist_field *event_var = ERR_PTR(-EINVAL); 3200 struct hist_trigger_data *hist_data; 3201 unsigned int i, n, first = true; 3202 struct field_var_hist *var_hist; 3203 struct trace_event_file *file; 3204 struct hist_field *key_field; 3205 char *saved_filter; 3206 char *cmd; 3207 int ret; 3208 3209 if (target_hist_data->n_field_var_hists >= SYNTH_FIELDS_MAX) { 3210 hist_err(tr, HIST_ERR_TOO_MANY_FIELD_VARS, errpos(field_name)); 3211 return ERR_PTR(-EINVAL); 3212 } 3213 3214 file = event_file(tr, subsys_name, event_name); 3215 3216 if (IS_ERR(file)) { 3217 hist_err(tr, HIST_ERR_EVENT_FILE_NOT_FOUND, errpos(field_name)); 3218 ret = PTR_ERR(file); 3219 return ERR_PTR(ret); 3220 } 3221 3222 /* 3223 * Look for a histogram compatible with target. We'll use the 3224 * found histogram specification to create a new matching 3225 * histogram with our variable on it. target_hist_data is not 3226 * yet a registered histogram so we can't use that. 3227 */ 3228 hist_data = find_compatible_hist(target_hist_data, file); 3229 if (!hist_data) { 3230 hist_err(tr, HIST_ERR_HIST_NOT_FOUND, errpos(field_name)); 3231 return ERR_PTR(-EINVAL); 3232 } 3233 3234 /* See if a synthetic field variable has already been created */ 3235 event_var = find_synthetic_field_var(target_hist_data, subsys_name, 3236 event_name, field_name); 3237 if (!IS_ERR_OR_NULL(event_var)) 3238 return event_var; 3239 3240 var_hist = kzalloc(sizeof(*var_hist), GFP_KERNEL); 3241 if (!var_hist) 3242 return ERR_PTR(-ENOMEM); 3243 3244 cmd = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL); 3245 if (!cmd) { 3246 kfree(var_hist); 3247 return ERR_PTR(-ENOMEM); 3248 } 3249 3250 /* Use the same keys as the compatible histogram */ 3251 strcat(cmd, "keys="); 3252 3253 for_each_hist_key_field(i, hist_data) { 3254 key_field = hist_data->fields[i]; 3255 if (!first) 3256 strcat(cmd, ","); 3257 strcat(cmd, key_field->field->name); 3258 first = false; 3259 } 3260 3261 /* Create the synthetic field variable specification */ 3262 strcat(cmd, ":synthetic_"); 3263 strcat(cmd, field_name); 3264 strcat(cmd, "="); 3265 strcat(cmd, field_name); 3266 3267 /* Use the same filter as the compatible histogram */ 3268 saved_filter = find_trigger_filter(hist_data, file); 3269 if (saved_filter) { 3270 strcat(cmd, " if "); 3271 strcat(cmd, saved_filter); 3272 } 3273 3274 var_hist->cmd = kstrdup(cmd, GFP_KERNEL); 3275 if (!var_hist->cmd) { 3276 kfree(cmd); 3277 kfree(var_hist); 3278 return ERR_PTR(-ENOMEM); 3279 } 3280 3281 /* Save the compatible histogram information */ 3282 var_hist->hist_data = hist_data; 3283 3284 /* Create the new histogram with our variable */ 3285 ret = event_hist_trigger_func(&trigger_hist_cmd, file, 3286 "", "hist", cmd); 3287 if (ret) { 3288 kfree(cmd); 3289 kfree(var_hist->cmd); 3290 kfree(var_hist); 3291 hist_err(tr, HIST_ERR_HIST_CREATE_FAIL, errpos(field_name)); 3292 return ERR_PTR(ret); 3293 } 3294 3295 kfree(cmd); 3296 3297 /* If we can't find the variable, something went wrong */ 3298 event_var = find_synthetic_field_var(target_hist_data, subsys_name, 3299 event_name, field_name); 3300 if (IS_ERR_OR_NULL(event_var)) { 3301 kfree(var_hist->cmd); 3302 kfree(var_hist); 3303 hist_err(tr, HIST_ERR_SYNTH_VAR_NOT_FOUND, errpos(field_name)); 3304 return ERR_PTR(-EINVAL); 3305 } 3306 3307 n = target_hist_data->n_field_var_hists; 3308 target_hist_data->field_var_hists[n] = var_hist; 3309 target_hist_data->n_field_var_hists++; 3310 3311 return event_var; 3312 } 3313 3314 static struct hist_field * 3315 find_target_event_var(struct hist_trigger_data *hist_data, 3316 char *subsys_name, char *event_name, char *var_name) 3317 { 3318 struct trace_event_file *file = hist_data->event_file; 3319 struct hist_field *hist_field = NULL; 3320 3321 if (subsys_name) { 3322 struct trace_event_call *call; 3323 3324 if (!event_name) 3325 return NULL; 3326 3327 call = file->event_call; 3328 3329 if (strcmp(subsys_name, call->class->system) != 0) 3330 return NULL; 3331 3332 if (strcmp(event_name, trace_event_name(call)) != 0) 3333 return NULL; 3334 } 3335 3336 hist_field = find_var_field(hist_data, var_name); 3337 3338 return hist_field; 3339 } 3340 3341 static inline void __update_field_vars(struct tracing_map_elt *elt, 3342 struct ring_buffer_event *rbe, 3343 void *rec, 3344 struct field_var **field_vars, 3345 unsigned int n_field_vars, 3346 unsigned int field_var_str_start) 3347 { 3348 struct hist_elt_data *elt_data = elt->private_data; 3349 unsigned int i, j, var_idx; 3350 u64 var_val; 3351 3352 for (i = 0, j = field_var_str_start; i < n_field_vars; i++) { 3353 struct field_var *field_var = field_vars[i]; 3354 struct hist_field *var = field_var->var; 3355 struct hist_field *val = field_var->val; 3356 3357 var_val = val->fn(val, elt, rbe, rec); 3358 var_idx = var->var.idx; 3359 3360 if (val->flags & HIST_FIELD_FL_STRING) { 3361 char *str = elt_data->field_var_str[j++]; 3362 char *val_str = (char *)(uintptr_t)var_val; 3363 3364 strscpy(str, val_str, STR_VAR_LEN_MAX); 3365 var_val = (u64)(uintptr_t)str; 3366 } 3367 tracing_map_set_var(elt, var_idx, var_val); 3368 } 3369 } 3370 3371 static void update_field_vars(struct hist_trigger_data *hist_data, 3372 struct tracing_map_elt *elt, 3373 struct ring_buffer_event *rbe, 3374 void *rec) 3375 { 3376 __update_field_vars(elt, rbe, rec, hist_data->field_vars, 3377 hist_data->n_field_vars, 0); 3378 } 3379 3380 static void save_track_data_vars(struct hist_trigger_data *hist_data, 3381 struct tracing_map_elt *elt, void *rec, 3382 struct ring_buffer_event *rbe, void *key, 3383 struct action_data *data, u64 *var_ref_vals) 3384 { 3385 __update_field_vars(elt, rbe, rec, hist_data->save_vars, 3386 hist_data->n_save_vars, hist_data->n_field_var_str); 3387 } 3388 3389 static struct hist_field *create_var(struct hist_trigger_data *hist_data, 3390 struct trace_event_file *file, 3391 char *name, int size, const char *type) 3392 { 3393 struct hist_field *var; 3394 int idx; 3395 3396 if (find_var(hist_data, file, name) && !hist_data->remove) { 3397 var = ERR_PTR(-EINVAL); 3398 goto out; 3399 } 3400 3401 var = kzalloc(sizeof(struct hist_field), GFP_KERNEL); 3402 if (!var) { 3403 var = ERR_PTR(-ENOMEM); 3404 goto out; 3405 } 3406 3407 idx = tracing_map_add_var(hist_data->map); 3408 if (idx < 0) { 3409 kfree(var); 3410 var = ERR_PTR(-EINVAL); 3411 goto out; 3412 } 3413 3414 var->flags = HIST_FIELD_FL_VAR; 3415 var->var.idx = idx; 3416 var->var.hist_data = var->hist_data = hist_data; 3417 var->size = size; 3418 var->var.name = kstrdup(name, GFP_KERNEL); 3419 var->type = kstrdup(type, GFP_KERNEL); 3420 if (!var->var.name || !var->type) { 3421 kfree(var->var.name); 3422 kfree(var->type); 3423 kfree(var); 3424 var = ERR_PTR(-ENOMEM); 3425 } 3426 out: 3427 return var; 3428 } 3429 3430 static struct field_var *create_field_var(struct hist_trigger_data *hist_data, 3431 struct trace_event_file *file, 3432 char *field_name) 3433 { 3434 struct hist_field *val = NULL, *var = NULL; 3435 unsigned long flags = HIST_FIELD_FL_VAR; 3436 struct trace_array *tr = file->tr; 3437 struct field_var *field_var; 3438 int ret = 0; 3439 3440 if (hist_data->n_field_vars >= SYNTH_FIELDS_MAX) { 3441 hist_err(tr, HIST_ERR_TOO_MANY_FIELD_VARS, errpos(field_name)); 3442 ret = -EINVAL; 3443 goto err; 3444 } 3445 3446 val = parse_atom(hist_data, file, field_name, &flags, NULL); 3447 if (IS_ERR(val)) { 3448 hist_err(tr, HIST_ERR_FIELD_VAR_PARSE_FAIL, errpos(field_name)); 3449 ret = PTR_ERR(val); 3450 goto err; 3451 } 3452 3453 var = create_var(hist_data, file, field_name, val->size, val->type); 3454 if (IS_ERR(var)) { 3455 hist_err(tr, HIST_ERR_VAR_CREATE_FIND_FAIL, errpos(field_name)); 3456 kfree(val); 3457 ret = PTR_ERR(var); 3458 goto err; 3459 } 3460 3461 field_var = kzalloc(sizeof(struct field_var), GFP_KERNEL); 3462 if (!field_var) { 3463 kfree(val); 3464 kfree(var); 3465 ret = -ENOMEM; 3466 goto err; 3467 } 3468 3469 field_var->var = var; 3470 field_var->val = val; 3471 out: 3472 return field_var; 3473 err: 3474 field_var = ERR_PTR(ret); 3475 goto out; 3476 } 3477 3478 /** 3479 * create_target_field_var - Automatically create a variable for a field 3480 * @target_hist_data: The target hist trigger 3481 * @subsys_name: Optional subsystem name 3482 * @event_name: Optional event name 3483 * @var_name: The name of the field (and the resulting variable) 3484 * 3485 * Hist trigger actions fetch data from variables, not directly from 3486 * events. However, for convenience, users are allowed to directly 3487 * specify an event field in an action, which will be automatically 3488 * converted into a variable on their behalf. 3489 3490 * This function creates a field variable with the name var_name on 3491 * the hist trigger currently being defined on the target event. If 3492 * subsys_name and event_name are specified, this function simply 3493 * verifies that they do in fact match the target event subsystem and 3494 * event name. 3495 * 3496 * Return: The variable created for the field. 3497 */ 3498 static struct field_var * 3499 create_target_field_var(struct hist_trigger_data *target_hist_data, 3500 char *subsys_name, char *event_name, char *var_name) 3501 { 3502 struct trace_event_file *file = target_hist_data->event_file; 3503 3504 if (subsys_name) { 3505 struct trace_event_call *call; 3506 3507 if (!event_name) 3508 return NULL; 3509 3510 call = file->event_call; 3511 3512 if (strcmp(subsys_name, call->class->system) != 0) 3513 return NULL; 3514 3515 if (strcmp(event_name, trace_event_name(call)) != 0) 3516 return NULL; 3517 } 3518 3519 return create_field_var(target_hist_data, file, var_name); 3520 } 3521 3522 static bool check_track_val_max(u64 track_val, u64 var_val) 3523 { 3524 if (var_val <= track_val) 3525 return false; 3526 3527 return true; 3528 } 3529 3530 static bool check_track_val_changed(u64 track_val, u64 var_val) 3531 { 3532 if (var_val == track_val) 3533 return false; 3534 3535 return true; 3536 } 3537 3538 static u64 get_track_val(struct hist_trigger_data *hist_data, 3539 struct tracing_map_elt *elt, 3540 struct action_data *data) 3541 { 3542 unsigned int track_var_idx = data->track_data.track_var->var.idx; 3543 u64 track_val; 3544 3545 track_val = tracing_map_read_var(elt, track_var_idx); 3546 3547 return track_val; 3548 } 3549 3550 static void save_track_val(struct hist_trigger_data *hist_data, 3551 struct tracing_map_elt *elt, 3552 struct action_data *data, u64 var_val) 3553 { 3554 unsigned int track_var_idx = data->track_data.track_var->var.idx; 3555 3556 tracing_map_set_var(elt, track_var_idx, var_val); 3557 } 3558 3559 static void save_track_data(struct hist_trigger_data *hist_data, 3560 struct tracing_map_elt *elt, void *rec, 3561 struct ring_buffer_event *rbe, void *key, 3562 struct action_data *data, u64 *var_ref_vals) 3563 { 3564 if (data->track_data.save_data) 3565 data->track_data.save_data(hist_data, elt, rec, rbe, key, data, var_ref_vals); 3566 } 3567 3568 static bool check_track_val(struct tracing_map_elt *elt, 3569 struct action_data *data, 3570 u64 var_val) 3571 { 3572 struct hist_trigger_data *hist_data; 3573 u64 track_val; 3574 3575 hist_data = data->track_data.track_var->hist_data; 3576 track_val = get_track_val(hist_data, elt, data); 3577 3578 return data->track_data.check_val(track_val, var_val); 3579 } 3580 3581 #ifdef CONFIG_TRACER_SNAPSHOT 3582 static bool cond_snapshot_update(struct trace_array *tr, void *cond_data) 3583 { 3584 /* called with tr->max_lock held */ 3585 struct track_data *track_data = tr->cond_snapshot->cond_data; 3586 struct hist_elt_data *elt_data, *track_elt_data; 3587 struct snapshot_context *context = cond_data; 3588 struct action_data *action; 3589 u64 track_val; 3590 3591 if (!track_data) 3592 return false; 3593 3594 action = track_data->action_data; 3595 3596 track_val = get_track_val(track_data->hist_data, context->elt, 3597 track_data->action_data); 3598 3599 if (!action->track_data.check_val(track_data->track_val, track_val)) 3600 return false; 3601 3602 track_data->track_val = track_val; 3603 memcpy(track_data->key, context->key, track_data->key_len); 3604 3605 elt_data = context->elt->private_data; 3606 track_elt_data = track_data->elt.private_data; 3607 if (elt_data->comm) 3608 strncpy(track_elt_data->comm, elt_data->comm, TASK_COMM_LEN); 3609 3610 track_data->updated = true; 3611 3612 return true; 3613 } 3614 3615 static void save_track_data_snapshot(struct hist_trigger_data *hist_data, 3616 struct tracing_map_elt *elt, void *rec, 3617 struct ring_buffer_event *rbe, void *key, 3618 struct action_data *data, 3619 u64 *var_ref_vals) 3620 { 3621 struct trace_event_file *file = hist_data->event_file; 3622 struct snapshot_context context; 3623 3624 context.elt = elt; 3625 context.key = key; 3626 3627 tracing_snapshot_cond(file->tr, &context); 3628 } 3629 3630 static void hist_trigger_print_key(struct seq_file *m, 3631 struct hist_trigger_data *hist_data, 3632 void *key, 3633 struct tracing_map_elt *elt); 3634 3635 static struct action_data *snapshot_action(struct hist_trigger_data *hist_data) 3636 { 3637 unsigned int i; 3638 3639 if (!hist_data->n_actions) 3640 return NULL; 3641 3642 for (i = 0; i < hist_data->n_actions; i++) { 3643 struct action_data *data = hist_data->actions[i]; 3644 3645 if (data->action == ACTION_SNAPSHOT) 3646 return data; 3647 } 3648 3649 return NULL; 3650 } 3651 3652 static void track_data_snapshot_print(struct seq_file *m, 3653 struct hist_trigger_data *hist_data) 3654 { 3655 struct trace_event_file *file = hist_data->event_file; 3656 struct track_data *track_data; 3657 struct action_data *action; 3658 3659 track_data = tracing_cond_snapshot_data(file->tr); 3660 if (!track_data) 3661 return; 3662 3663 if (!track_data->updated) 3664 return; 3665 3666 action = snapshot_action(hist_data); 3667 if (!action) 3668 return; 3669 3670 seq_puts(m, "\nSnapshot taken (see tracing/snapshot). Details:\n"); 3671 seq_printf(m, "\ttriggering value { %s(%s) }: %10llu", 3672 action->handler == HANDLER_ONMAX ? "onmax" : "onchange", 3673 action->track_data.var_str, track_data->track_val); 3674 3675 seq_puts(m, "\ttriggered by event with key: "); 3676 hist_trigger_print_key(m, hist_data, track_data->key, &track_data->elt); 3677 seq_putc(m, '\n'); 3678 } 3679 #else 3680 static bool cond_snapshot_update(struct trace_array *tr, void *cond_data) 3681 { 3682 return false; 3683 } 3684 static void save_track_data_snapshot(struct hist_trigger_data *hist_data, 3685 struct tracing_map_elt *elt, void *rec, 3686 struct ring_buffer_event *rbe, void *key, 3687 struct action_data *data, 3688 u64 *var_ref_vals) {} 3689 static void track_data_snapshot_print(struct seq_file *m, 3690 struct hist_trigger_data *hist_data) {} 3691 #endif /* CONFIG_TRACER_SNAPSHOT */ 3692 3693 static void track_data_print(struct seq_file *m, 3694 struct hist_trigger_data *hist_data, 3695 struct tracing_map_elt *elt, 3696 struct action_data *data) 3697 { 3698 u64 track_val = get_track_val(hist_data, elt, data); 3699 unsigned int i, save_var_idx; 3700 3701 if (data->handler == HANDLER_ONMAX) 3702 seq_printf(m, "\n\tmax: %10llu", track_val); 3703 else if (data->handler == HANDLER_ONCHANGE) 3704 seq_printf(m, "\n\tchanged: %10llu", track_val); 3705 3706 if (data->action == ACTION_SNAPSHOT) 3707 return; 3708 3709 for (i = 0; i < hist_data->n_save_vars; i++) { 3710 struct hist_field *save_val = hist_data->save_vars[i]->val; 3711 struct hist_field *save_var = hist_data->save_vars[i]->var; 3712 u64 val; 3713 3714 save_var_idx = save_var->var.idx; 3715 3716 val = tracing_map_read_var(elt, save_var_idx); 3717 3718 if (save_val->flags & HIST_FIELD_FL_STRING) { 3719 seq_printf(m, " %s: %-32s", save_var->var.name, 3720 (char *)(uintptr_t)(val)); 3721 } else 3722 seq_printf(m, " %s: %10llu", save_var->var.name, val); 3723 } 3724 } 3725 3726 static void ontrack_action(struct hist_trigger_data *hist_data, 3727 struct tracing_map_elt *elt, void *rec, 3728 struct ring_buffer_event *rbe, void *key, 3729 struct action_data *data, u64 *var_ref_vals) 3730 { 3731 u64 var_val = var_ref_vals[data->track_data.var_ref->var_ref_idx]; 3732 3733 if (check_track_val(elt, data, var_val)) { 3734 save_track_val(hist_data, elt, data, var_val); 3735 save_track_data(hist_data, elt, rec, rbe, key, data, var_ref_vals); 3736 } 3737 } 3738 3739 static void action_data_destroy(struct action_data *data) 3740 { 3741 unsigned int i; 3742 3743 lockdep_assert_held(&event_mutex); 3744 3745 kfree(data->action_name); 3746 3747 for (i = 0; i < data->n_params; i++) 3748 kfree(data->params[i]); 3749 3750 if (data->synth_event) 3751 data->synth_event->ref--; 3752 3753 kfree(data->synth_event_name); 3754 3755 kfree(data); 3756 } 3757 3758 static void track_data_destroy(struct hist_trigger_data *hist_data, 3759 struct action_data *data) 3760 { 3761 struct trace_event_file *file = hist_data->event_file; 3762 3763 destroy_hist_field(data->track_data.track_var, 0); 3764 3765 if (data->action == ACTION_SNAPSHOT) { 3766 struct track_data *track_data; 3767 3768 track_data = tracing_cond_snapshot_data(file->tr); 3769 if (track_data && track_data->hist_data == hist_data) { 3770 tracing_snapshot_cond_disable(file->tr); 3771 track_data_free(track_data); 3772 } 3773 } 3774 3775 kfree(data->track_data.var_str); 3776 3777 action_data_destroy(data); 3778 } 3779 3780 static int action_create(struct hist_trigger_data *hist_data, 3781 struct action_data *data); 3782 3783 static int track_data_create(struct hist_trigger_data *hist_data, 3784 struct action_data *data) 3785 { 3786 struct hist_field *var_field, *ref_field, *track_var = NULL; 3787 struct trace_event_file *file = hist_data->event_file; 3788 struct trace_array *tr = file->tr; 3789 char *track_data_var_str; 3790 int ret = 0; 3791 3792 track_data_var_str = data->track_data.var_str; 3793 if (track_data_var_str[0] != '$') { 3794 hist_err(tr, HIST_ERR_ONX_NOT_VAR, errpos(track_data_var_str)); 3795 return -EINVAL; 3796 } 3797 track_data_var_str++; 3798 3799 var_field = find_target_event_var(hist_data, NULL, NULL, track_data_var_str); 3800 if (!var_field) { 3801 hist_err(tr, HIST_ERR_ONX_VAR_NOT_FOUND, errpos(track_data_var_str)); 3802 return -EINVAL; 3803 } 3804 3805 ref_field = create_var_ref(hist_data, var_field, NULL, NULL); 3806 if (!ref_field) 3807 return -ENOMEM; 3808 3809 data->track_data.var_ref = ref_field; 3810 3811 if (data->handler == HANDLER_ONMAX) 3812 track_var = create_var(hist_data, file, "__max", sizeof(u64), "u64"); 3813 if (IS_ERR(track_var)) { 3814 hist_err(tr, HIST_ERR_ONX_VAR_CREATE_FAIL, 0); 3815 ret = PTR_ERR(track_var); 3816 goto out; 3817 } 3818 3819 if (data->handler == HANDLER_ONCHANGE) 3820 track_var = create_var(hist_data, file, "__change", sizeof(u64), "u64"); 3821 if (IS_ERR(track_var)) { 3822 hist_err(tr, HIST_ERR_ONX_VAR_CREATE_FAIL, 0); 3823 ret = PTR_ERR(track_var); 3824 goto out; 3825 } 3826 data->track_data.track_var = track_var; 3827 3828 ret = action_create(hist_data, data); 3829 out: 3830 return ret; 3831 } 3832 3833 static int parse_action_params(struct trace_array *tr, char *params, 3834 struct action_data *data) 3835 { 3836 char *param, *saved_param; 3837 bool first_param = true; 3838 int ret = 0; 3839 3840 while (params) { 3841 if (data->n_params >= SYNTH_FIELDS_MAX) { 3842 hist_err(tr, HIST_ERR_TOO_MANY_PARAMS, 0); 3843 goto out; 3844 } 3845 3846 param = strsep(¶ms, ","); 3847 if (!param) { 3848 hist_err(tr, HIST_ERR_PARAM_NOT_FOUND, 0); 3849 ret = -EINVAL; 3850 goto out; 3851 } 3852 3853 param = strstrip(param); 3854 if (strlen(param) < 2) { 3855 hist_err(tr, HIST_ERR_INVALID_PARAM, errpos(param)); 3856 ret = -EINVAL; 3857 goto out; 3858 } 3859 3860 saved_param = kstrdup(param, GFP_KERNEL); 3861 if (!saved_param) { 3862 ret = -ENOMEM; 3863 goto out; 3864 } 3865 3866 if (first_param && data->use_trace_keyword) { 3867 data->synth_event_name = saved_param; 3868 first_param = false; 3869 continue; 3870 } 3871 first_param = false; 3872 3873 data->params[data->n_params++] = saved_param; 3874 } 3875 out: 3876 return ret; 3877 } 3878 3879 static int action_parse(struct trace_array *tr, char *str, struct action_data *data, 3880 enum handler_id handler) 3881 { 3882 char *action_name; 3883 int ret = 0; 3884 3885 strsep(&str, "."); 3886 if (!str) { 3887 hist_err(tr, HIST_ERR_ACTION_NOT_FOUND, 0); 3888 ret = -EINVAL; 3889 goto out; 3890 } 3891 3892 action_name = strsep(&str, "("); 3893 if (!action_name || !str) { 3894 hist_err(tr, HIST_ERR_ACTION_NOT_FOUND, 0); 3895 ret = -EINVAL; 3896 goto out; 3897 } 3898 3899 if (str_has_prefix(action_name, "save")) { 3900 char *params = strsep(&str, ")"); 3901 3902 if (!params) { 3903 hist_err(tr, HIST_ERR_NO_SAVE_PARAMS, 0); 3904 ret = -EINVAL; 3905 goto out; 3906 } 3907 3908 ret = parse_action_params(tr, params, data); 3909 if (ret) 3910 goto out; 3911 3912 if (handler == HANDLER_ONMAX) 3913 data->track_data.check_val = check_track_val_max; 3914 else if (handler == HANDLER_ONCHANGE) 3915 data->track_data.check_val = check_track_val_changed; 3916 else { 3917 hist_err(tr, HIST_ERR_ACTION_MISMATCH, errpos(action_name)); 3918 ret = -EINVAL; 3919 goto out; 3920 } 3921 3922 data->track_data.save_data = save_track_data_vars; 3923 data->fn = ontrack_action; 3924 data->action = ACTION_SAVE; 3925 } else if (str_has_prefix(action_name, "snapshot")) { 3926 char *params = strsep(&str, ")"); 3927 3928 if (!str) { 3929 hist_err(tr, HIST_ERR_NO_CLOSING_PAREN, errpos(params)); 3930 ret = -EINVAL; 3931 goto out; 3932 } 3933 3934 if (handler == HANDLER_ONMAX) 3935 data->track_data.check_val = check_track_val_max; 3936 else if (handler == HANDLER_ONCHANGE) 3937 data->track_data.check_val = check_track_val_changed; 3938 else { 3939 hist_err(tr, HIST_ERR_ACTION_MISMATCH, errpos(action_name)); 3940 ret = -EINVAL; 3941 goto out; 3942 } 3943 3944 data->track_data.save_data = save_track_data_snapshot; 3945 data->fn = ontrack_action; 3946 data->action = ACTION_SNAPSHOT; 3947 } else { 3948 char *params = strsep(&str, ")"); 3949 3950 if (str_has_prefix(action_name, "trace")) 3951 data->use_trace_keyword = true; 3952 3953 if (params) { 3954 ret = parse_action_params(tr, params, data); 3955 if (ret) 3956 goto out; 3957 } 3958 3959 if (handler == HANDLER_ONMAX) 3960 data->track_data.check_val = check_track_val_max; 3961 else if (handler == HANDLER_ONCHANGE) 3962 data->track_data.check_val = check_track_val_changed; 3963 3964 if (handler != HANDLER_ONMATCH) { 3965 data->track_data.save_data = action_trace; 3966 data->fn = ontrack_action; 3967 } else 3968 data->fn = action_trace; 3969 3970 data->action = ACTION_TRACE; 3971 } 3972 3973 data->action_name = kstrdup(action_name, GFP_KERNEL); 3974 if (!data->action_name) { 3975 ret = -ENOMEM; 3976 goto out; 3977 } 3978 3979 data->handler = handler; 3980 out: 3981 return ret; 3982 } 3983 3984 static struct action_data *track_data_parse(struct hist_trigger_data *hist_data, 3985 char *str, enum handler_id handler) 3986 { 3987 struct action_data *data; 3988 int ret = -EINVAL; 3989 char *var_str; 3990 3991 data = kzalloc(sizeof(*data), GFP_KERNEL); 3992 if (!data) 3993 return ERR_PTR(-ENOMEM); 3994 3995 var_str = strsep(&str, ")"); 3996 if (!var_str || !str) { 3997 ret = -EINVAL; 3998 goto free; 3999 } 4000 4001 data->track_data.var_str = kstrdup(var_str, GFP_KERNEL); 4002 if (!data->track_data.var_str) { 4003 ret = -ENOMEM; 4004 goto free; 4005 } 4006 4007 ret = action_parse(hist_data->event_file->tr, str, data, handler); 4008 if (ret) 4009 goto free; 4010 out: 4011 return data; 4012 free: 4013 track_data_destroy(hist_data, data); 4014 data = ERR_PTR(ret); 4015 goto out; 4016 } 4017 4018 static void onmatch_destroy(struct action_data *data) 4019 { 4020 kfree(data->match_data.event); 4021 kfree(data->match_data.event_system); 4022 4023 action_data_destroy(data); 4024 } 4025 4026 static void destroy_field_var(struct field_var *field_var) 4027 { 4028 if (!field_var) 4029 return; 4030 4031 destroy_hist_field(field_var->var, 0); 4032 destroy_hist_field(field_var->val, 0); 4033 4034 kfree(field_var); 4035 } 4036 4037 static void destroy_field_vars(struct hist_trigger_data *hist_data) 4038 { 4039 unsigned int i; 4040 4041 for (i = 0; i < hist_data->n_field_vars; i++) 4042 destroy_field_var(hist_data->field_vars[i]); 4043 } 4044 4045 static void save_field_var(struct hist_trigger_data *hist_data, 4046 struct field_var *field_var) 4047 { 4048 hist_data->field_vars[hist_data->n_field_vars++] = field_var; 4049 4050 if (field_var->val->flags & HIST_FIELD_FL_STRING) 4051 hist_data->n_field_var_str++; 4052 } 4053 4054 4055 static int check_synth_field(struct synth_event *event, 4056 struct hist_field *hist_field, 4057 unsigned int field_pos) 4058 { 4059 struct synth_field *field; 4060 4061 if (field_pos >= event->n_fields) 4062 return -EINVAL; 4063 4064 field = event->fields[field_pos]; 4065 4066 if (strcmp(field->type, hist_field->type) != 0) 4067 return -EINVAL; 4068 4069 return 0; 4070 } 4071 4072 static struct hist_field * 4073 trace_action_find_var(struct hist_trigger_data *hist_data, 4074 struct action_data *data, 4075 char *system, char *event, char *var) 4076 { 4077 struct trace_array *tr = hist_data->event_file->tr; 4078 struct hist_field *hist_field; 4079 4080 var++; /* skip '$' */ 4081 4082 hist_field = find_target_event_var(hist_data, system, event, var); 4083 if (!hist_field) { 4084 if (!system && data->handler == HANDLER_ONMATCH) { 4085 system = data->match_data.event_system; 4086 event = data->match_data.event; 4087 } 4088 4089 hist_field = find_event_var(hist_data, system, event, var); 4090 } 4091 4092 if (!hist_field) 4093 hist_err(tr, HIST_ERR_PARAM_NOT_FOUND, errpos(var)); 4094 4095 return hist_field; 4096 } 4097 4098 static struct hist_field * 4099 trace_action_create_field_var(struct hist_trigger_data *hist_data, 4100 struct action_data *data, char *system, 4101 char *event, char *var) 4102 { 4103 struct hist_field *hist_field = NULL; 4104 struct field_var *field_var; 4105 4106 /* 4107 * First try to create a field var on the target event (the 4108 * currently being defined). This will create a variable for 4109 * unqualified fields on the target event, or if qualified, 4110 * target fields that have qualified names matching the target. 4111 */ 4112 field_var = create_target_field_var(hist_data, system, event, var); 4113 4114 if (field_var && !IS_ERR(field_var)) { 4115 save_field_var(hist_data, field_var); 4116 hist_field = field_var->var; 4117 } else { 4118 field_var = NULL; 4119 /* 4120 * If no explicit system.event is specfied, default to 4121 * looking for fields on the onmatch(system.event.xxx) 4122 * event. 4123 */ 4124 if (!system && data->handler == HANDLER_ONMATCH) { 4125 system = data->match_data.event_system; 4126 event = data->match_data.event; 4127 } 4128 4129 /* 4130 * At this point, we're looking at a field on another 4131 * event. Because we can't modify a hist trigger on 4132 * another event to add a variable for a field, we need 4133 * to create a new trigger on that event and create the 4134 * variable at the same time. 4135 */ 4136 hist_field = create_field_var_hist(hist_data, system, event, var); 4137 if (IS_ERR(hist_field)) 4138 goto free; 4139 } 4140 out: 4141 return hist_field; 4142 free: 4143 destroy_field_var(field_var); 4144 hist_field = NULL; 4145 goto out; 4146 } 4147 4148 static int trace_action_create(struct hist_trigger_data *hist_data, 4149 struct action_data *data) 4150 { 4151 struct trace_array *tr = hist_data->event_file->tr; 4152 char *event_name, *param, *system = NULL; 4153 struct hist_field *hist_field, *var_ref; 4154 unsigned int i, var_ref_idx; 4155 unsigned int field_pos = 0; 4156 struct synth_event *event; 4157 char *synth_event_name; 4158 int ret = 0; 4159 4160 lockdep_assert_held(&event_mutex); 4161 4162 if (data->use_trace_keyword) 4163 synth_event_name = data->synth_event_name; 4164 else 4165 synth_event_name = data->action_name; 4166 4167 event = find_synth_event(synth_event_name); 4168 if (!event) { 4169 hist_err(tr, HIST_ERR_SYNTH_EVENT_NOT_FOUND, errpos(synth_event_name)); 4170 return -EINVAL; 4171 } 4172 4173 event->ref++; 4174 4175 var_ref_idx = hist_data->n_var_refs; 4176 4177 for (i = 0; i < data->n_params; i++) { 4178 char *p; 4179 4180 p = param = kstrdup(data->params[i], GFP_KERNEL); 4181 if (!param) { 4182 ret = -ENOMEM; 4183 goto err; 4184 } 4185 4186 system = strsep(¶m, "."); 4187 if (!param) { 4188 param = (char *)system; 4189 system = event_name = NULL; 4190 } else { 4191 event_name = strsep(¶m, "."); 4192 if (!param) { 4193 kfree(p); 4194 ret = -EINVAL; 4195 goto err; 4196 } 4197 } 4198 4199 if (param[0] == '$') 4200 hist_field = trace_action_find_var(hist_data, data, 4201 system, event_name, 4202 param); 4203 else 4204 hist_field = trace_action_create_field_var(hist_data, 4205 data, 4206 system, 4207 event_name, 4208 param); 4209 4210 if (!hist_field) { 4211 kfree(p); 4212 ret = -EINVAL; 4213 goto err; 4214 } 4215 4216 if (check_synth_field(event, hist_field, field_pos) == 0) { 4217 var_ref = create_var_ref(hist_data, hist_field, 4218 system, event_name); 4219 if (!var_ref) { 4220 kfree(p); 4221 ret = -ENOMEM; 4222 goto err; 4223 } 4224 4225 field_pos++; 4226 kfree(p); 4227 continue; 4228 } 4229 4230 hist_err(tr, HIST_ERR_SYNTH_TYPE_MISMATCH, errpos(param)); 4231 kfree(p); 4232 ret = -EINVAL; 4233 goto err; 4234 } 4235 4236 if (field_pos != event->n_fields) { 4237 hist_err(tr, HIST_ERR_SYNTH_COUNT_MISMATCH, errpos(event->name)); 4238 ret = -EINVAL; 4239 goto err; 4240 } 4241 4242 data->synth_event = event; 4243 data->var_ref_idx = var_ref_idx; 4244 out: 4245 return ret; 4246 err: 4247 event->ref--; 4248 4249 goto out; 4250 } 4251 4252 static int action_create(struct hist_trigger_data *hist_data, 4253 struct action_data *data) 4254 { 4255 struct trace_event_file *file = hist_data->event_file; 4256 struct trace_array *tr = file->tr; 4257 struct track_data *track_data; 4258 struct field_var *field_var; 4259 unsigned int i; 4260 char *param; 4261 int ret = 0; 4262 4263 if (data->action == ACTION_TRACE) 4264 return trace_action_create(hist_data, data); 4265 4266 if (data->action == ACTION_SNAPSHOT) { 4267 track_data = track_data_alloc(hist_data->key_size, data, hist_data); 4268 if (IS_ERR(track_data)) { 4269 ret = PTR_ERR(track_data); 4270 goto out; 4271 } 4272 4273 ret = tracing_snapshot_cond_enable(file->tr, track_data, 4274 cond_snapshot_update); 4275 if (ret) 4276 track_data_free(track_data); 4277 4278 goto out; 4279 } 4280 4281 if (data->action == ACTION_SAVE) { 4282 if (hist_data->n_save_vars) { 4283 ret = -EEXIST; 4284 hist_err(tr, HIST_ERR_TOO_MANY_SAVE_ACTIONS, 0); 4285 goto out; 4286 } 4287 4288 for (i = 0; i < data->n_params; i++) { 4289 param = kstrdup(data->params[i], GFP_KERNEL); 4290 if (!param) { 4291 ret = -ENOMEM; 4292 goto out; 4293 } 4294 4295 field_var = create_target_field_var(hist_data, NULL, NULL, param); 4296 if (IS_ERR(field_var)) { 4297 hist_err(tr, HIST_ERR_FIELD_VAR_CREATE_FAIL, 4298 errpos(param)); 4299 ret = PTR_ERR(field_var); 4300 kfree(param); 4301 goto out; 4302 } 4303 4304 hist_data->save_vars[hist_data->n_save_vars++] = field_var; 4305 if (field_var->val->flags & HIST_FIELD_FL_STRING) 4306 hist_data->n_save_var_str++; 4307 kfree(param); 4308 } 4309 } 4310 out: 4311 return ret; 4312 } 4313 4314 static int onmatch_create(struct hist_trigger_data *hist_data, 4315 struct action_data *data) 4316 { 4317 return action_create(hist_data, data); 4318 } 4319 4320 static struct action_data *onmatch_parse(struct trace_array *tr, char *str) 4321 { 4322 char *match_event, *match_event_system; 4323 struct action_data *data; 4324 int ret = -EINVAL; 4325 4326 data = kzalloc(sizeof(*data), GFP_KERNEL); 4327 if (!data) 4328 return ERR_PTR(-ENOMEM); 4329 4330 match_event = strsep(&str, ")"); 4331 if (!match_event || !str) { 4332 hist_err(tr, HIST_ERR_NO_CLOSING_PAREN, errpos(match_event)); 4333 goto free; 4334 } 4335 4336 match_event_system = strsep(&match_event, "."); 4337 if (!match_event) { 4338 hist_err(tr, HIST_ERR_SUBSYS_NOT_FOUND, errpos(match_event_system)); 4339 goto free; 4340 } 4341 4342 if (IS_ERR(event_file(tr, match_event_system, match_event))) { 4343 hist_err(tr, HIST_ERR_INVALID_SUBSYS_EVENT, errpos(match_event)); 4344 goto free; 4345 } 4346 4347 data->match_data.event = kstrdup(match_event, GFP_KERNEL); 4348 if (!data->match_data.event) { 4349 ret = -ENOMEM; 4350 goto free; 4351 } 4352 4353 data->match_data.event_system = kstrdup(match_event_system, GFP_KERNEL); 4354 if (!data->match_data.event_system) { 4355 ret = -ENOMEM; 4356 goto free; 4357 } 4358 4359 ret = action_parse(tr, str, data, HANDLER_ONMATCH); 4360 if (ret) 4361 goto free; 4362 out: 4363 return data; 4364 free: 4365 onmatch_destroy(data); 4366 data = ERR_PTR(ret); 4367 goto out; 4368 } 4369 4370 static int create_hitcount_val(struct hist_trigger_data *hist_data) 4371 { 4372 hist_data->fields[HITCOUNT_IDX] = 4373 create_hist_field(hist_data, NULL, HIST_FIELD_FL_HITCOUNT, NULL); 4374 if (!hist_data->fields[HITCOUNT_IDX]) 4375 return -ENOMEM; 4376 4377 hist_data->n_vals++; 4378 hist_data->n_fields++; 4379 4380 if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX)) 4381 return -EINVAL; 4382 4383 return 0; 4384 } 4385 4386 static int __create_val_field(struct hist_trigger_data *hist_data, 4387 unsigned int val_idx, 4388 struct trace_event_file *file, 4389 char *var_name, char *field_str, 4390 unsigned long flags) 4391 { 4392 struct hist_field *hist_field; 4393 int ret = 0; 4394 4395 hist_field = parse_expr(hist_data, file, field_str, flags, var_name, 0); 4396 if (IS_ERR(hist_field)) { 4397 ret = PTR_ERR(hist_field); 4398 goto out; 4399 } 4400 4401 hist_data->fields[val_idx] = hist_field; 4402 4403 ++hist_data->n_vals; 4404 ++hist_data->n_fields; 4405 4406 if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX + TRACING_MAP_VARS_MAX)) 4407 ret = -EINVAL; 4408 out: 4409 return ret; 4410 } 4411 4412 static int create_val_field(struct hist_trigger_data *hist_data, 4413 unsigned int val_idx, 4414 struct trace_event_file *file, 4415 char *field_str) 4416 { 4417 if (WARN_ON(val_idx >= TRACING_MAP_VALS_MAX)) 4418 return -EINVAL; 4419 4420 return __create_val_field(hist_data, val_idx, file, NULL, field_str, 0); 4421 } 4422 4423 static int create_var_field(struct hist_trigger_data *hist_data, 4424 unsigned int val_idx, 4425 struct trace_event_file *file, 4426 char *var_name, char *expr_str) 4427 { 4428 struct trace_array *tr = hist_data->event_file->tr; 4429 unsigned long flags = 0; 4430 4431 if (WARN_ON(val_idx >= TRACING_MAP_VALS_MAX + TRACING_MAP_VARS_MAX)) 4432 return -EINVAL; 4433 4434 if (find_var(hist_data, file, var_name) && !hist_data->remove) { 4435 hist_err(tr, HIST_ERR_DUPLICATE_VAR, errpos(var_name)); 4436 return -EINVAL; 4437 } 4438 4439 flags |= HIST_FIELD_FL_VAR; 4440 hist_data->n_vars++; 4441 if (WARN_ON(hist_data->n_vars > TRACING_MAP_VARS_MAX)) 4442 return -EINVAL; 4443 4444 return __create_val_field(hist_data, val_idx, file, var_name, expr_str, flags); 4445 } 4446 4447 static int create_val_fields(struct hist_trigger_data *hist_data, 4448 struct trace_event_file *file) 4449 { 4450 char *fields_str, *field_str; 4451 unsigned int i, j = 1; 4452 int ret; 4453 4454 ret = create_hitcount_val(hist_data); 4455 if (ret) 4456 goto out; 4457 4458 fields_str = hist_data->attrs->vals_str; 4459 if (!fields_str) 4460 goto out; 4461 4462 strsep(&fields_str, "="); 4463 if (!fields_str) 4464 goto out; 4465 4466 for (i = 0, j = 1; i < TRACING_MAP_VALS_MAX && 4467 j < TRACING_MAP_VALS_MAX; i++) { 4468 field_str = strsep(&fields_str, ","); 4469 if (!field_str) 4470 break; 4471 4472 if (strcmp(field_str, "hitcount") == 0) 4473 continue; 4474 4475 ret = create_val_field(hist_data, j++, file, field_str); 4476 if (ret) 4477 goto out; 4478 } 4479 4480 if (fields_str && (strcmp(fields_str, "hitcount") != 0)) 4481 ret = -EINVAL; 4482 out: 4483 return ret; 4484 } 4485 4486 static int create_key_field(struct hist_trigger_data *hist_data, 4487 unsigned int key_idx, 4488 unsigned int key_offset, 4489 struct trace_event_file *file, 4490 char *field_str) 4491 { 4492 struct trace_array *tr = hist_data->event_file->tr; 4493 struct hist_field *hist_field = NULL; 4494 unsigned long flags = 0; 4495 unsigned int key_size; 4496 int ret = 0; 4497 4498 if (WARN_ON(key_idx >= HIST_FIELDS_MAX)) 4499 return -EINVAL; 4500 4501 flags |= HIST_FIELD_FL_KEY; 4502 4503 if (strcmp(field_str, "stacktrace") == 0) { 4504 flags |= HIST_FIELD_FL_STACKTRACE; 4505 key_size = sizeof(unsigned long) * HIST_STACKTRACE_DEPTH; 4506 hist_field = create_hist_field(hist_data, NULL, flags, NULL); 4507 } else { 4508 hist_field = parse_expr(hist_data, file, field_str, flags, 4509 NULL, 0); 4510 if (IS_ERR(hist_field)) { 4511 ret = PTR_ERR(hist_field); 4512 goto out; 4513 } 4514 4515 if (field_has_hist_vars(hist_field, 0)) { 4516 hist_err(tr, HIST_ERR_INVALID_REF_KEY, errpos(field_str)); 4517 destroy_hist_field(hist_field, 0); 4518 ret = -EINVAL; 4519 goto out; 4520 } 4521 4522 key_size = hist_field->size; 4523 } 4524 4525 hist_data->fields[key_idx] = hist_field; 4526 4527 key_size = ALIGN(key_size, sizeof(u64)); 4528 hist_data->fields[key_idx]->size = key_size; 4529 hist_data->fields[key_idx]->offset = key_offset; 4530 4531 hist_data->key_size += key_size; 4532 4533 if (hist_data->key_size > HIST_KEY_SIZE_MAX) { 4534 ret = -EINVAL; 4535 goto out; 4536 } 4537 4538 hist_data->n_keys++; 4539 hist_data->n_fields++; 4540 4541 if (WARN_ON(hist_data->n_keys > TRACING_MAP_KEYS_MAX)) 4542 return -EINVAL; 4543 4544 ret = key_size; 4545 out: 4546 return ret; 4547 } 4548 4549 static int create_key_fields(struct hist_trigger_data *hist_data, 4550 struct trace_event_file *file) 4551 { 4552 unsigned int i, key_offset = 0, n_vals = hist_data->n_vals; 4553 char *fields_str, *field_str; 4554 int ret = -EINVAL; 4555 4556 fields_str = hist_data->attrs->keys_str; 4557 if (!fields_str) 4558 goto out; 4559 4560 strsep(&fields_str, "="); 4561 if (!fields_str) 4562 goto out; 4563 4564 for (i = n_vals; i < n_vals + TRACING_MAP_KEYS_MAX; i++) { 4565 field_str = strsep(&fields_str, ","); 4566 if (!field_str) 4567 break; 4568 ret = create_key_field(hist_data, i, key_offset, 4569 file, field_str); 4570 if (ret < 0) 4571 goto out; 4572 key_offset += ret; 4573 } 4574 if (fields_str) { 4575 ret = -EINVAL; 4576 goto out; 4577 } 4578 ret = 0; 4579 out: 4580 return ret; 4581 } 4582 4583 static int create_var_fields(struct hist_trigger_data *hist_data, 4584 struct trace_event_file *file) 4585 { 4586 unsigned int i, j = hist_data->n_vals; 4587 int ret = 0; 4588 4589 unsigned int n_vars = hist_data->attrs->var_defs.n_vars; 4590 4591 for (i = 0; i < n_vars; i++) { 4592 char *var_name = hist_data->attrs->var_defs.name[i]; 4593 char *expr = hist_data->attrs->var_defs.expr[i]; 4594 4595 ret = create_var_field(hist_data, j++, file, var_name, expr); 4596 if (ret) 4597 goto out; 4598 } 4599 out: 4600 return ret; 4601 } 4602 4603 static void free_var_defs(struct hist_trigger_data *hist_data) 4604 { 4605 unsigned int i; 4606 4607 for (i = 0; i < hist_data->attrs->var_defs.n_vars; i++) { 4608 kfree(hist_data->attrs->var_defs.name[i]); 4609 kfree(hist_data->attrs->var_defs.expr[i]); 4610 } 4611 4612 hist_data->attrs->var_defs.n_vars = 0; 4613 } 4614 4615 static int parse_var_defs(struct hist_trigger_data *hist_data) 4616 { 4617 struct trace_array *tr = hist_data->event_file->tr; 4618 char *s, *str, *var_name, *field_str; 4619 unsigned int i, j, n_vars = 0; 4620 int ret = 0; 4621 4622 for (i = 0; i < hist_data->attrs->n_assignments; i++) { 4623 str = hist_data->attrs->assignment_str[i]; 4624 for (j = 0; j < TRACING_MAP_VARS_MAX; j++) { 4625 field_str = strsep(&str, ","); 4626 if (!field_str) 4627 break; 4628 4629 var_name = strsep(&field_str, "="); 4630 if (!var_name || !field_str) { 4631 hist_err(tr, HIST_ERR_MALFORMED_ASSIGNMENT, 4632 errpos(var_name)); 4633 ret = -EINVAL; 4634 goto free; 4635 } 4636 4637 if (n_vars == TRACING_MAP_VARS_MAX) { 4638 hist_err(tr, HIST_ERR_TOO_MANY_VARS, errpos(var_name)); 4639 ret = -EINVAL; 4640 goto free; 4641 } 4642 4643 s = kstrdup(var_name, GFP_KERNEL); 4644 if (!s) { 4645 ret = -ENOMEM; 4646 goto free; 4647 } 4648 hist_data->attrs->var_defs.name[n_vars] = s; 4649 4650 s = kstrdup(field_str, GFP_KERNEL); 4651 if (!s) { 4652 kfree(hist_data->attrs->var_defs.name[n_vars]); 4653 ret = -ENOMEM; 4654 goto free; 4655 } 4656 hist_data->attrs->var_defs.expr[n_vars++] = s; 4657 4658 hist_data->attrs->var_defs.n_vars = n_vars; 4659 } 4660 } 4661 4662 return ret; 4663 free: 4664 free_var_defs(hist_data); 4665 4666 return ret; 4667 } 4668 4669 static int create_hist_fields(struct hist_trigger_data *hist_data, 4670 struct trace_event_file *file) 4671 { 4672 int ret; 4673 4674 ret = parse_var_defs(hist_data); 4675 if (ret) 4676 goto out; 4677 4678 ret = create_val_fields(hist_data, file); 4679 if (ret) 4680 goto out; 4681 4682 ret = create_var_fields(hist_data, file); 4683 if (ret) 4684 goto out; 4685 4686 ret = create_key_fields(hist_data, file); 4687 if (ret) 4688 goto out; 4689 out: 4690 free_var_defs(hist_data); 4691 4692 return ret; 4693 } 4694 4695 static int is_descending(const char *str) 4696 { 4697 if (!str) 4698 return 0; 4699 4700 if (strcmp(str, "descending") == 0) 4701 return 1; 4702 4703 if (strcmp(str, "ascending") == 0) 4704 return 0; 4705 4706 return -EINVAL; 4707 } 4708 4709 static int create_sort_keys(struct hist_trigger_data *hist_data) 4710 { 4711 char *fields_str = hist_data->attrs->sort_key_str; 4712 struct tracing_map_sort_key *sort_key; 4713 int descending, ret = 0; 4714 unsigned int i, j, k; 4715 4716 hist_data->n_sort_keys = 1; /* we always have at least one, hitcount */ 4717 4718 if (!fields_str) 4719 goto out; 4720 4721 strsep(&fields_str, "="); 4722 if (!fields_str) { 4723 ret = -EINVAL; 4724 goto out; 4725 } 4726 4727 for (i = 0; i < TRACING_MAP_SORT_KEYS_MAX; i++) { 4728 struct hist_field *hist_field; 4729 char *field_str, *field_name; 4730 const char *test_name; 4731 4732 sort_key = &hist_data->sort_keys[i]; 4733 4734 field_str = strsep(&fields_str, ","); 4735 if (!field_str) { 4736 if (i == 0) 4737 ret = -EINVAL; 4738 break; 4739 } 4740 4741 if ((i == TRACING_MAP_SORT_KEYS_MAX - 1) && fields_str) { 4742 ret = -EINVAL; 4743 break; 4744 } 4745 4746 field_name = strsep(&field_str, "."); 4747 if (!field_name) { 4748 ret = -EINVAL; 4749 break; 4750 } 4751 4752 if (strcmp(field_name, "hitcount") == 0) { 4753 descending = is_descending(field_str); 4754 if (descending < 0) { 4755 ret = descending; 4756 break; 4757 } 4758 sort_key->descending = descending; 4759 continue; 4760 } 4761 4762 for (j = 1, k = 1; j < hist_data->n_fields; j++) { 4763 unsigned int idx; 4764 4765 hist_field = hist_data->fields[j]; 4766 if (hist_field->flags & HIST_FIELD_FL_VAR) 4767 continue; 4768 4769 idx = k++; 4770 4771 test_name = hist_field_name(hist_field, 0); 4772 4773 if (strcmp(field_name, test_name) == 0) { 4774 sort_key->field_idx = idx; 4775 descending = is_descending(field_str); 4776 if (descending < 0) { 4777 ret = descending; 4778 goto out; 4779 } 4780 sort_key->descending = descending; 4781 break; 4782 } 4783 } 4784 if (j == hist_data->n_fields) { 4785 ret = -EINVAL; 4786 break; 4787 } 4788 } 4789 4790 hist_data->n_sort_keys = i; 4791 out: 4792 return ret; 4793 } 4794 4795 static void destroy_actions(struct hist_trigger_data *hist_data) 4796 { 4797 unsigned int i; 4798 4799 for (i = 0; i < hist_data->n_actions; i++) { 4800 struct action_data *data = hist_data->actions[i]; 4801 4802 if (data->handler == HANDLER_ONMATCH) 4803 onmatch_destroy(data); 4804 else if (data->handler == HANDLER_ONMAX || 4805 data->handler == HANDLER_ONCHANGE) 4806 track_data_destroy(hist_data, data); 4807 else 4808 kfree(data); 4809 } 4810 } 4811 4812 static int parse_actions(struct hist_trigger_data *hist_data) 4813 { 4814 struct trace_array *tr = hist_data->event_file->tr; 4815 struct action_data *data; 4816 unsigned int i; 4817 int ret = 0; 4818 char *str; 4819 int len; 4820 4821 for (i = 0; i < hist_data->attrs->n_actions; i++) { 4822 str = hist_data->attrs->action_str[i]; 4823 4824 if ((len = str_has_prefix(str, "onmatch("))) { 4825 char *action_str = str + len; 4826 4827 data = onmatch_parse(tr, action_str); 4828 if (IS_ERR(data)) { 4829 ret = PTR_ERR(data); 4830 break; 4831 } 4832 } else if ((len = str_has_prefix(str, "onmax("))) { 4833 char *action_str = str + len; 4834 4835 data = track_data_parse(hist_data, action_str, 4836 HANDLER_ONMAX); 4837 if (IS_ERR(data)) { 4838 ret = PTR_ERR(data); 4839 break; 4840 } 4841 } else if ((len = str_has_prefix(str, "onchange("))) { 4842 char *action_str = str + len; 4843 4844 data = track_data_parse(hist_data, action_str, 4845 HANDLER_ONCHANGE); 4846 if (IS_ERR(data)) { 4847 ret = PTR_ERR(data); 4848 break; 4849 } 4850 } else { 4851 ret = -EINVAL; 4852 break; 4853 } 4854 4855 hist_data->actions[hist_data->n_actions++] = data; 4856 } 4857 4858 return ret; 4859 } 4860 4861 static int create_actions(struct hist_trigger_data *hist_data) 4862 { 4863 struct action_data *data; 4864 unsigned int i; 4865 int ret = 0; 4866 4867 for (i = 0; i < hist_data->attrs->n_actions; i++) { 4868 data = hist_data->actions[i]; 4869 4870 if (data->handler == HANDLER_ONMATCH) { 4871 ret = onmatch_create(hist_data, data); 4872 if (ret) 4873 break; 4874 } else if (data->handler == HANDLER_ONMAX || 4875 data->handler == HANDLER_ONCHANGE) { 4876 ret = track_data_create(hist_data, data); 4877 if (ret) 4878 break; 4879 } else { 4880 ret = -EINVAL; 4881 break; 4882 } 4883 } 4884 4885 return ret; 4886 } 4887 4888 static void print_actions(struct seq_file *m, 4889 struct hist_trigger_data *hist_data, 4890 struct tracing_map_elt *elt) 4891 { 4892 unsigned int i; 4893 4894 for (i = 0; i < hist_data->n_actions; i++) { 4895 struct action_data *data = hist_data->actions[i]; 4896 4897 if (data->action == ACTION_SNAPSHOT) 4898 continue; 4899 4900 if (data->handler == HANDLER_ONMAX || 4901 data->handler == HANDLER_ONCHANGE) 4902 track_data_print(m, hist_data, elt, data); 4903 } 4904 } 4905 4906 static void print_action_spec(struct seq_file *m, 4907 struct hist_trigger_data *hist_data, 4908 struct action_data *data) 4909 { 4910 unsigned int i; 4911 4912 if (data->action == ACTION_SAVE) { 4913 for (i = 0; i < hist_data->n_save_vars; i++) { 4914 seq_printf(m, "%s", hist_data->save_vars[i]->var->var.name); 4915 if (i < hist_data->n_save_vars - 1) 4916 seq_puts(m, ","); 4917 } 4918 } else if (data->action == ACTION_TRACE) { 4919 if (data->use_trace_keyword) 4920 seq_printf(m, "%s", data->synth_event_name); 4921 for (i = 0; i < data->n_params; i++) { 4922 if (i || data->use_trace_keyword) 4923 seq_puts(m, ","); 4924 seq_printf(m, "%s", data->params[i]); 4925 } 4926 } 4927 } 4928 4929 static void print_track_data_spec(struct seq_file *m, 4930 struct hist_trigger_data *hist_data, 4931 struct action_data *data) 4932 { 4933 if (data->handler == HANDLER_ONMAX) 4934 seq_puts(m, ":onmax("); 4935 else if (data->handler == HANDLER_ONCHANGE) 4936 seq_puts(m, ":onchange("); 4937 seq_printf(m, "%s", data->track_data.var_str); 4938 seq_printf(m, ").%s(", data->action_name); 4939 4940 print_action_spec(m, hist_data, data); 4941 4942 seq_puts(m, ")"); 4943 } 4944 4945 static void print_onmatch_spec(struct seq_file *m, 4946 struct hist_trigger_data *hist_data, 4947 struct action_data *data) 4948 { 4949 seq_printf(m, ":onmatch(%s.%s).", data->match_data.event_system, 4950 data->match_data.event); 4951 4952 seq_printf(m, "%s(", data->action_name); 4953 4954 print_action_spec(m, hist_data, data); 4955 4956 seq_puts(m, ")"); 4957 } 4958 4959 static bool actions_match(struct hist_trigger_data *hist_data, 4960 struct hist_trigger_data *hist_data_test) 4961 { 4962 unsigned int i, j; 4963 4964 if (hist_data->n_actions != hist_data_test->n_actions) 4965 return false; 4966 4967 for (i = 0; i < hist_data->n_actions; i++) { 4968 struct action_data *data = hist_data->actions[i]; 4969 struct action_data *data_test = hist_data_test->actions[i]; 4970 char *action_name, *action_name_test; 4971 4972 if (data->handler != data_test->handler) 4973 return false; 4974 if (data->action != data_test->action) 4975 return false; 4976 4977 if (data->n_params != data_test->n_params) 4978 return false; 4979 4980 for (j = 0; j < data->n_params; j++) { 4981 if (strcmp(data->params[j], data_test->params[j]) != 0) 4982 return false; 4983 } 4984 4985 if (data->use_trace_keyword) 4986 action_name = data->synth_event_name; 4987 else 4988 action_name = data->action_name; 4989 4990 if (data_test->use_trace_keyword) 4991 action_name_test = data_test->synth_event_name; 4992 else 4993 action_name_test = data_test->action_name; 4994 4995 if (strcmp(action_name, action_name_test) != 0) 4996 return false; 4997 4998 if (data->handler == HANDLER_ONMATCH) { 4999 if (strcmp(data->match_data.event_system, 5000 data_test->match_data.event_system) != 0) 5001 return false; 5002 if (strcmp(data->match_data.event, 5003 data_test->match_data.event) != 0) 5004 return false; 5005 } else if (data->handler == HANDLER_ONMAX || 5006 data->handler == HANDLER_ONCHANGE) { 5007 if (strcmp(data->track_data.var_str, 5008 data_test->track_data.var_str) != 0) 5009 return false; 5010 } 5011 } 5012 5013 return true; 5014 } 5015 5016 5017 static void print_actions_spec(struct seq_file *m, 5018 struct hist_trigger_data *hist_data) 5019 { 5020 unsigned int i; 5021 5022 for (i = 0; i < hist_data->n_actions; i++) { 5023 struct action_data *data = hist_data->actions[i]; 5024 5025 if (data->handler == HANDLER_ONMATCH) 5026 print_onmatch_spec(m, hist_data, data); 5027 else if (data->handler == HANDLER_ONMAX || 5028 data->handler == HANDLER_ONCHANGE) 5029 print_track_data_spec(m, hist_data, data); 5030 } 5031 } 5032 5033 static void destroy_field_var_hists(struct hist_trigger_data *hist_data) 5034 { 5035 unsigned int i; 5036 5037 for (i = 0; i < hist_data->n_field_var_hists; i++) { 5038 kfree(hist_data->field_var_hists[i]->cmd); 5039 kfree(hist_data->field_var_hists[i]); 5040 } 5041 } 5042 5043 static void destroy_hist_data(struct hist_trigger_data *hist_data) 5044 { 5045 if (!hist_data) 5046 return; 5047 5048 destroy_hist_trigger_attrs(hist_data->attrs); 5049 destroy_hist_fields(hist_data); 5050 tracing_map_destroy(hist_data->map); 5051 5052 destroy_actions(hist_data); 5053 destroy_field_vars(hist_data); 5054 destroy_field_var_hists(hist_data); 5055 5056 kfree(hist_data); 5057 } 5058 5059 static int create_tracing_map_fields(struct hist_trigger_data *hist_data) 5060 { 5061 struct tracing_map *map = hist_data->map; 5062 struct ftrace_event_field *field; 5063 struct hist_field *hist_field; 5064 int i, idx = 0; 5065 5066 for_each_hist_field(i, hist_data) { 5067 hist_field = hist_data->fields[i]; 5068 if (hist_field->flags & HIST_FIELD_FL_KEY) { 5069 tracing_map_cmp_fn_t cmp_fn; 5070 5071 field = hist_field->field; 5072 5073 if (hist_field->flags & HIST_FIELD_FL_STACKTRACE) 5074 cmp_fn = tracing_map_cmp_none; 5075 else if (!field) 5076 cmp_fn = tracing_map_cmp_num(hist_field->size, 5077 hist_field->is_signed); 5078 else if (is_string_field(field)) 5079 cmp_fn = tracing_map_cmp_string; 5080 else 5081 cmp_fn = tracing_map_cmp_num(field->size, 5082 field->is_signed); 5083 idx = tracing_map_add_key_field(map, 5084 hist_field->offset, 5085 cmp_fn); 5086 } else if (!(hist_field->flags & HIST_FIELD_FL_VAR)) 5087 idx = tracing_map_add_sum_field(map); 5088 5089 if (idx < 0) 5090 return idx; 5091 5092 if (hist_field->flags & HIST_FIELD_FL_VAR) { 5093 idx = tracing_map_add_var(map); 5094 if (idx < 0) 5095 return idx; 5096 hist_field->var.idx = idx; 5097 hist_field->var.hist_data = hist_data; 5098 } 5099 } 5100 5101 return 0; 5102 } 5103 5104 static struct hist_trigger_data * 5105 create_hist_data(unsigned int map_bits, 5106 struct hist_trigger_attrs *attrs, 5107 struct trace_event_file *file, 5108 bool remove) 5109 { 5110 const struct tracing_map_ops *map_ops = NULL; 5111 struct hist_trigger_data *hist_data; 5112 int ret = 0; 5113 5114 hist_data = kzalloc(sizeof(*hist_data), GFP_KERNEL); 5115 if (!hist_data) 5116 return ERR_PTR(-ENOMEM); 5117 5118 hist_data->attrs = attrs; 5119 hist_data->remove = remove; 5120 hist_data->event_file = file; 5121 5122 ret = parse_actions(hist_data); 5123 if (ret) 5124 goto free; 5125 5126 ret = create_hist_fields(hist_data, file); 5127 if (ret) 5128 goto free; 5129 5130 ret = create_sort_keys(hist_data); 5131 if (ret) 5132 goto free; 5133 5134 map_ops = &hist_trigger_elt_data_ops; 5135 5136 hist_data->map = tracing_map_create(map_bits, hist_data->key_size, 5137 map_ops, hist_data); 5138 if (IS_ERR(hist_data->map)) { 5139 ret = PTR_ERR(hist_data->map); 5140 hist_data->map = NULL; 5141 goto free; 5142 } 5143 5144 ret = create_tracing_map_fields(hist_data); 5145 if (ret) 5146 goto free; 5147 out: 5148 return hist_data; 5149 free: 5150 hist_data->attrs = NULL; 5151 5152 destroy_hist_data(hist_data); 5153 5154 hist_data = ERR_PTR(ret); 5155 5156 goto out; 5157 } 5158 5159 static void hist_trigger_elt_update(struct hist_trigger_data *hist_data, 5160 struct tracing_map_elt *elt, void *rec, 5161 struct ring_buffer_event *rbe, 5162 u64 *var_ref_vals) 5163 { 5164 struct hist_elt_data *elt_data; 5165 struct hist_field *hist_field; 5166 unsigned int i, var_idx; 5167 u64 hist_val; 5168 5169 elt_data = elt->private_data; 5170 elt_data->var_ref_vals = var_ref_vals; 5171 5172 for_each_hist_val_field(i, hist_data) { 5173 hist_field = hist_data->fields[i]; 5174 hist_val = hist_field->fn(hist_field, elt, rbe, rec); 5175 if (hist_field->flags & HIST_FIELD_FL_VAR) { 5176 var_idx = hist_field->var.idx; 5177 tracing_map_set_var(elt, var_idx, hist_val); 5178 continue; 5179 } 5180 tracing_map_update_sum(elt, i, hist_val); 5181 } 5182 5183 for_each_hist_key_field(i, hist_data) { 5184 hist_field = hist_data->fields[i]; 5185 if (hist_field->flags & HIST_FIELD_FL_VAR) { 5186 hist_val = hist_field->fn(hist_field, elt, rbe, rec); 5187 var_idx = hist_field->var.idx; 5188 tracing_map_set_var(elt, var_idx, hist_val); 5189 } 5190 } 5191 5192 update_field_vars(hist_data, elt, rbe, rec); 5193 } 5194 5195 static inline void add_to_key(char *compound_key, void *key, 5196 struct hist_field *key_field, void *rec) 5197 { 5198 size_t size = key_field->size; 5199 5200 if (key_field->flags & HIST_FIELD_FL_STRING) { 5201 struct ftrace_event_field *field; 5202 5203 field = key_field->field; 5204 if (field->filter_type == FILTER_DYN_STRING) 5205 size = *(u32 *)(rec + field->offset) >> 16; 5206 else if (field->filter_type == FILTER_PTR_STRING) 5207 size = strlen(key); 5208 else if (field->filter_type == FILTER_STATIC_STRING) 5209 size = field->size; 5210 5211 /* ensure NULL-termination */ 5212 if (size > key_field->size - 1) 5213 size = key_field->size - 1; 5214 5215 strncpy(compound_key + key_field->offset, (char *)key, size); 5216 } else 5217 memcpy(compound_key + key_field->offset, key, size); 5218 } 5219 5220 static void 5221 hist_trigger_actions(struct hist_trigger_data *hist_data, 5222 struct tracing_map_elt *elt, void *rec, 5223 struct ring_buffer_event *rbe, void *key, 5224 u64 *var_ref_vals) 5225 { 5226 struct action_data *data; 5227 unsigned int i; 5228 5229 for (i = 0; i < hist_data->n_actions; i++) { 5230 data = hist_data->actions[i]; 5231 data->fn(hist_data, elt, rec, rbe, key, data, var_ref_vals); 5232 } 5233 } 5234 5235 static void event_hist_trigger(struct event_trigger_data *data, void *rec, 5236 struct ring_buffer_event *rbe) 5237 { 5238 struct hist_trigger_data *hist_data = data->private_data; 5239 bool use_compound_key = (hist_data->n_keys > 1); 5240 unsigned long entries[HIST_STACKTRACE_DEPTH]; 5241 u64 var_ref_vals[TRACING_MAP_VARS_MAX]; 5242 char compound_key[HIST_KEY_SIZE_MAX]; 5243 struct tracing_map_elt *elt = NULL; 5244 struct hist_field *key_field; 5245 u64 field_contents; 5246 void *key = NULL; 5247 unsigned int i; 5248 5249 memset(compound_key, 0, hist_data->key_size); 5250 5251 for_each_hist_key_field(i, hist_data) { 5252 key_field = hist_data->fields[i]; 5253 5254 if (key_field->flags & HIST_FIELD_FL_STACKTRACE) { 5255 memset(entries, 0, HIST_STACKTRACE_SIZE); 5256 stack_trace_save(entries, HIST_STACKTRACE_DEPTH, 5257 HIST_STACKTRACE_SKIP); 5258 key = entries; 5259 } else { 5260 field_contents = key_field->fn(key_field, elt, rbe, rec); 5261 if (key_field->flags & HIST_FIELD_FL_STRING) { 5262 key = (void *)(unsigned long)field_contents; 5263 use_compound_key = true; 5264 } else 5265 key = (void *)&field_contents; 5266 } 5267 5268 if (use_compound_key) 5269 add_to_key(compound_key, key, key_field, rec); 5270 } 5271 5272 if (use_compound_key) 5273 key = compound_key; 5274 5275 if (hist_data->n_var_refs && 5276 !resolve_var_refs(hist_data, key, var_ref_vals, false)) 5277 return; 5278 5279 elt = tracing_map_insert(hist_data->map, key); 5280 if (!elt) 5281 return; 5282 5283 hist_trigger_elt_update(hist_data, elt, rec, rbe, var_ref_vals); 5284 5285 if (resolve_var_refs(hist_data, key, var_ref_vals, true)) 5286 hist_trigger_actions(hist_data, elt, rec, rbe, key, var_ref_vals); 5287 } 5288 5289 static void hist_trigger_stacktrace_print(struct seq_file *m, 5290 unsigned long *stacktrace_entries, 5291 unsigned int max_entries) 5292 { 5293 char str[KSYM_SYMBOL_LEN]; 5294 unsigned int spaces = 8; 5295 unsigned int i; 5296 5297 for (i = 0; i < max_entries; i++) { 5298 if (!stacktrace_entries[i]) 5299 return; 5300 5301 seq_printf(m, "%*c", 1 + spaces, ' '); 5302 sprint_symbol(str, stacktrace_entries[i]); 5303 seq_printf(m, "%s\n", str); 5304 } 5305 } 5306 5307 static void hist_trigger_print_key(struct seq_file *m, 5308 struct hist_trigger_data *hist_data, 5309 void *key, 5310 struct tracing_map_elt *elt) 5311 { 5312 struct hist_field *key_field; 5313 char str[KSYM_SYMBOL_LEN]; 5314 bool multiline = false; 5315 const char *field_name; 5316 unsigned int i; 5317 u64 uval; 5318 5319 seq_puts(m, "{ "); 5320 5321 for_each_hist_key_field(i, hist_data) { 5322 key_field = hist_data->fields[i]; 5323 5324 if (i > hist_data->n_vals) 5325 seq_puts(m, ", "); 5326 5327 field_name = hist_field_name(key_field, 0); 5328 5329 if (key_field->flags & HIST_FIELD_FL_HEX) { 5330 uval = *(u64 *)(key + key_field->offset); 5331 seq_printf(m, "%s: %llx", field_name, uval); 5332 } else if (key_field->flags & HIST_FIELD_FL_SYM) { 5333 uval = *(u64 *)(key + key_field->offset); 5334 sprint_symbol_no_offset(str, uval); 5335 seq_printf(m, "%s: [%llx] %-45s", field_name, 5336 uval, str); 5337 } else if (key_field->flags & HIST_FIELD_FL_SYM_OFFSET) { 5338 uval = *(u64 *)(key + key_field->offset); 5339 sprint_symbol(str, uval); 5340 seq_printf(m, "%s: [%llx] %-55s", field_name, 5341 uval, str); 5342 } else if (key_field->flags & HIST_FIELD_FL_EXECNAME) { 5343 struct hist_elt_data *elt_data = elt->private_data; 5344 char *comm; 5345 5346 if (WARN_ON_ONCE(!elt_data)) 5347 return; 5348 5349 comm = elt_data->comm; 5350 5351 uval = *(u64 *)(key + key_field->offset); 5352 seq_printf(m, "%s: %-16s[%10llu]", field_name, 5353 comm, uval); 5354 } else if (key_field->flags & HIST_FIELD_FL_SYSCALL) { 5355 const char *syscall_name; 5356 5357 uval = *(u64 *)(key + key_field->offset); 5358 syscall_name = get_syscall_name(uval); 5359 if (!syscall_name) 5360 syscall_name = "unknown_syscall"; 5361 5362 seq_printf(m, "%s: %-30s[%3llu]", field_name, 5363 syscall_name, uval); 5364 } else if (key_field->flags & HIST_FIELD_FL_STACKTRACE) { 5365 seq_puts(m, "stacktrace:\n"); 5366 hist_trigger_stacktrace_print(m, 5367 key + key_field->offset, 5368 HIST_STACKTRACE_DEPTH); 5369 multiline = true; 5370 } else if (key_field->flags & HIST_FIELD_FL_LOG2) { 5371 seq_printf(m, "%s: ~ 2^%-2llu", field_name, 5372 *(u64 *)(key + key_field->offset)); 5373 } else if (key_field->flags & HIST_FIELD_FL_STRING) { 5374 seq_printf(m, "%s: %-50s", field_name, 5375 (char *)(key + key_field->offset)); 5376 } else { 5377 uval = *(u64 *)(key + key_field->offset); 5378 seq_printf(m, "%s: %10llu", field_name, uval); 5379 } 5380 } 5381 5382 if (!multiline) 5383 seq_puts(m, " "); 5384 5385 seq_puts(m, "}"); 5386 } 5387 5388 static void hist_trigger_entry_print(struct seq_file *m, 5389 struct hist_trigger_data *hist_data, 5390 void *key, 5391 struct tracing_map_elt *elt) 5392 { 5393 const char *field_name; 5394 unsigned int i; 5395 5396 hist_trigger_print_key(m, hist_data, key, elt); 5397 5398 seq_printf(m, " hitcount: %10llu", 5399 tracing_map_read_sum(elt, HITCOUNT_IDX)); 5400 5401 for (i = 1; i < hist_data->n_vals; i++) { 5402 field_name = hist_field_name(hist_data->fields[i], 0); 5403 5404 if (hist_data->fields[i]->flags & HIST_FIELD_FL_VAR || 5405 hist_data->fields[i]->flags & HIST_FIELD_FL_EXPR) 5406 continue; 5407 5408 if (hist_data->fields[i]->flags & HIST_FIELD_FL_HEX) { 5409 seq_printf(m, " %s: %10llx", field_name, 5410 tracing_map_read_sum(elt, i)); 5411 } else { 5412 seq_printf(m, " %s: %10llu", field_name, 5413 tracing_map_read_sum(elt, i)); 5414 } 5415 } 5416 5417 print_actions(m, hist_data, elt); 5418 5419 seq_puts(m, "\n"); 5420 } 5421 5422 static int print_entries(struct seq_file *m, 5423 struct hist_trigger_data *hist_data) 5424 { 5425 struct tracing_map_sort_entry **sort_entries = NULL; 5426 struct tracing_map *map = hist_data->map; 5427 int i, n_entries; 5428 5429 n_entries = tracing_map_sort_entries(map, hist_data->sort_keys, 5430 hist_data->n_sort_keys, 5431 &sort_entries); 5432 if (n_entries < 0) 5433 return n_entries; 5434 5435 for (i = 0; i < n_entries; i++) 5436 hist_trigger_entry_print(m, hist_data, 5437 sort_entries[i]->key, 5438 sort_entries[i]->elt); 5439 5440 tracing_map_destroy_sort_entries(sort_entries, n_entries); 5441 5442 return n_entries; 5443 } 5444 5445 static void hist_trigger_show(struct seq_file *m, 5446 struct event_trigger_data *data, int n) 5447 { 5448 struct hist_trigger_data *hist_data; 5449 int n_entries; 5450 5451 if (n > 0) 5452 seq_puts(m, "\n\n"); 5453 5454 seq_puts(m, "# event histogram\n#\n# trigger info: "); 5455 data->ops->print(m, data->ops, data); 5456 seq_puts(m, "#\n\n"); 5457 5458 hist_data = data->private_data; 5459 n_entries = print_entries(m, hist_data); 5460 if (n_entries < 0) 5461 n_entries = 0; 5462 5463 track_data_snapshot_print(m, hist_data); 5464 5465 seq_printf(m, "\nTotals:\n Hits: %llu\n Entries: %u\n Dropped: %llu\n", 5466 (u64)atomic64_read(&hist_data->map->hits), 5467 n_entries, (u64)atomic64_read(&hist_data->map->drops)); 5468 } 5469 5470 static int hist_show(struct seq_file *m, void *v) 5471 { 5472 struct event_trigger_data *data; 5473 struct trace_event_file *event_file; 5474 int n = 0, ret = 0; 5475 5476 mutex_lock(&event_mutex); 5477 5478 event_file = event_file_data(m->private); 5479 if (unlikely(!event_file)) { 5480 ret = -ENODEV; 5481 goto out_unlock; 5482 } 5483 5484 list_for_each_entry_rcu(data, &event_file->triggers, list) { 5485 if (data->cmd_ops->trigger_type == ETT_EVENT_HIST) 5486 hist_trigger_show(m, data, n++); 5487 } 5488 5489 out_unlock: 5490 mutex_unlock(&event_mutex); 5491 5492 return ret; 5493 } 5494 5495 static int event_hist_open(struct inode *inode, struct file *file) 5496 { 5497 return single_open(file, hist_show, file); 5498 } 5499 5500 const struct file_operations event_hist_fops = { 5501 .open = event_hist_open, 5502 .read = seq_read, 5503 .llseek = seq_lseek, 5504 .release = single_release, 5505 }; 5506 5507 static void hist_field_print(struct seq_file *m, struct hist_field *hist_field) 5508 { 5509 const char *field_name = hist_field_name(hist_field, 0); 5510 5511 if (hist_field->var.name) 5512 seq_printf(m, "%s=", hist_field->var.name); 5513 5514 if (hist_field->flags & HIST_FIELD_FL_CPU) 5515 seq_puts(m, "cpu"); 5516 else if (field_name) { 5517 if (hist_field->flags & HIST_FIELD_FL_VAR_REF || 5518 hist_field->flags & HIST_FIELD_FL_ALIAS) 5519 seq_putc(m, '$'); 5520 seq_printf(m, "%s", field_name); 5521 } else if (hist_field->flags & HIST_FIELD_FL_TIMESTAMP) 5522 seq_puts(m, "common_timestamp"); 5523 5524 if (hist_field->flags) { 5525 if (!(hist_field->flags & HIST_FIELD_FL_VAR_REF) && 5526 !(hist_field->flags & HIST_FIELD_FL_EXPR)) { 5527 const char *flags = get_hist_field_flags(hist_field); 5528 5529 if (flags) 5530 seq_printf(m, ".%s", flags); 5531 } 5532 } 5533 } 5534 5535 static int event_hist_trigger_print(struct seq_file *m, 5536 struct event_trigger_ops *ops, 5537 struct event_trigger_data *data) 5538 { 5539 struct hist_trigger_data *hist_data = data->private_data; 5540 struct hist_field *field; 5541 bool have_var = false; 5542 unsigned int i; 5543 5544 seq_puts(m, "hist:"); 5545 5546 if (data->name) 5547 seq_printf(m, "%s:", data->name); 5548 5549 seq_puts(m, "keys="); 5550 5551 for_each_hist_key_field(i, hist_data) { 5552 field = hist_data->fields[i]; 5553 5554 if (i > hist_data->n_vals) 5555 seq_puts(m, ","); 5556 5557 if (field->flags & HIST_FIELD_FL_STACKTRACE) 5558 seq_puts(m, "stacktrace"); 5559 else 5560 hist_field_print(m, field); 5561 } 5562 5563 seq_puts(m, ":vals="); 5564 5565 for_each_hist_val_field(i, hist_data) { 5566 field = hist_data->fields[i]; 5567 if (field->flags & HIST_FIELD_FL_VAR) { 5568 have_var = true; 5569 continue; 5570 } 5571 5572 if (i == HITCOUNT_IDX) 5573 seq_puts(m, "hitcount"); 5574 else { 5575 seq_puts(m, ","); 5576 hist_field_print(m, field); 5577 } 5578 } 5579 5580 if (have_var) { 5581 unsigned int n = 0; 5582 5583 seq_puts(m, ":"); 5584 5585 for_each_hist_val_field(i, hist_data) { 5586 field = hist_data->fields[i]; 5587 5588 if (field->flags & HIST_FIELD_FL_VAR) { 5589 if (n++) 5590 seq_puts(m, ","); 5591 hist_field_print(m, field); 5592 } 5593 } 5594 } 5595 5596 seq_puts(m, ":sort="); 5597 5598 for (i = 0; i < hist_data->n_sort_keys; i++) { 5599 struct tracing_map_sort_key *sort_key; 5600 unsigned int idx, first_key_idx; 5601 5602 /* skip VAR vals */ 5603 first_key_idx = hist_data->n_vals - hist_data->n_vars; 5604 5605 sort_key = &hist_data->sort_keys[i]; 5606 idx = sort_key->field_idx; 5607 5608 if (WARN_ON(idx >= HIST_FIELDS_MAX)) 5609 return -EINVAL; 5610 5611 if (i > 0) 5612 seq_puts(m, ","); 5613 5614 if (idx == HITCOUNT_IDX) 5615 seq_puts(m, "hitcount"); 5616 else { 5617 if (idx >= first_key_idx) 5618 idx += hist_data->n_vars; 5619 hist_field_print(m, hist_data->fields[idx]); 5620 } 5621 5622 if (sort_key->descending) 5623 seq_puts(m, ".descending"); 5624 } 5625 seq_printf(m, ":size=%u", (1 << hist_data->map->map_bits)); 5626 if (hist_data->enable_timestamps) 5627 seq_printf(m, ":clock=%s", hist_data->attrs->clock); 5628 5629 print_actions_spec(m, hist_data); 5630 5631 if (data->filter_str) 5632 seq_printf(m, " if %s", data->filter_str); 5633 5634 if (data->paused) 5635 seq_puts(m, " [paused]"); 5636 else 5637 seq_puts(m, " [active]"); 5638 5639 seq_putc(m, '\n'); 5640 5641 return 0; 5642 } 5643 5644 static int event_hist_trigger_init(struct event_trigger_ops *ops, 5645 struct event_trigger_data *data) 5646 { 5647 struct hist_trigger_data *hist_data = data->private_data; 5648 5649 if (!data->ref && hist_data->attrs->name) 5650 save_named_trigger(hist_data->attrs->name, data); 5651 5652 data->ref++; 5653 5654 return 0; 5655 } 5656 5657 static void unregister_field_var_hists(struct hist_trigger_data *hist_data) 5658 { 5659 struct trace_event_file *file; 5660 unsigned int i; 5661 char *cmd; 5662 int ret; 5663 5664 for (i = 0; i < hist_data->n_field_var_hists; i++) { 5665 file = hist_data->field_var_hists[i]->hist_data->event_file; 5666 cmd = hist_data->field_var_hists[i]->cmd; 5667 ret = event_hist_trigger_func(&trigger_hist_cmd, file, 5668 "!hist", "hist", cmd); 5669 } 5670 } 5671 5672 static void event_hist_trigger_free(struct event_trigger_ops *ops, 5673 struct event_trigger_data *data) 5674 { 5675 struct hist_trigger_data *hist_data = data->private_data; 5676 5677 if (WARN_ON_ONCE(data->ref <= 0)) 5678 return; 5679 5680 data->ref--; 5681 if (!data->ref) { 5682 if (data->name) 5683 del_named_trigger(data); 5684 5685 trigger_data_free(data); 5686 5687 remove_hist_vars(hist_data); 5688 5689 unregister_field_var_hists(hist_data); 5690 5691 destroy_hist_data(hist_data); 5692 } 5693 } 5694 5695 static struct event_trigger_ops event_hist_trigger_ops = { 5696 .func = event_hist_trigger, 5697 .print = event_hist_trigger_print, 5698 .init = event_hist_trigger_init, 5699 .free = event_hist_trigger_free, 5700 }; 5701 5702 static int event_hist_trigger_named_init(struct event_trigger_ops *ops, 5703 struct event_trigger_data *data) 5704 { 5705 data->ref++; 5706 5707 save_named_trigger(data->named_data->name, data); 5708 5709 event_hist_trigger_init(ops, data->named_data); 5710 5711 return 0; 5712 } 5713 5714 static void event_hist_trigger_named_free(struct event_trigger_ops *ops, 5715 struct event_trigger_data *data) 5716 { 5717 if (WARN_ON_ONCE(data->ref <= 0)) 5718 return; 5719 5720 event_hist_trigger_free(ops, data->named_data); 5721 5722 data->ref--; 5723 if (!data->ref) { 5724 del_named_trigger(data); 5725 trigger_data_free(data); 5726 } 5727 } 5728 5729 static struct event_trigger_ops event_hist_trigger_named_ops = { 5730 .func = event_hist_trigger, 5731 .print = event_hist_trigger_print, 5732 .init = event_hist_trigger_named_init, 5733 .free = event_hist_trigger_named_free, 5734 }; 5735 5736 static struct event_trigger_ops *event_hist_get_trigger_ops(char *cmd, 5737 char *param) 5738 { 5739 return &event_hist_trigger_ops; 5740 } 5741 5742 static void hist_clear(struct event_trigger_data *data) 5743 { 5744 struct hist_trigger_data *hist_data = data->private_data; 5745 5746 if (data->name) 5747 pause_named_trigger(data); 5748 5749 tracepoint_synchronize_unregister(); 5750 5751 tracing_map_clear(hist_data->map); 5752 5753 if (data->name) 5754 unpause_named_trigger(data); 5755 } 5756 5757 static bool compatible_field(struct ftrace_event_field *field, 5758 struct ftrace_event_field *test_field) 5759 { 5760 if (field == test_field) 5761 return true; 5762 if (field == NULL || test_field == NULL) 5763 return false; 5764 if (strcmp(field->name, test_field->name) != 0) 5765 return false; 5766 if (strcmp(field->type, test_field->type) != 0) 5767 return false; 5768 if (field->size != test_field->size) 5769 return false; 5770 if (field->is_signed != test_field->is_signed) 5771 return false; 5772 5773 return true; 5774 } 5775 5776 static bool hist_trigger_match(struct event_trigger_data *data, 5777 struct event_trigger_data *data_test, 5778 struct event_trigger_data *named_data, 5779 bool ignore_filter) 5780 { 5781 struct tracing_map_sort_key *sort_key, *sort_key_test; 5782 struct hist_trigger_data *hist_data, *hist_data_test; 5783 struct hist_field *key_field, *key_field_test; 5784 unsigned int i; 5785 5786 if (named_data && (named_data != data_test) && 5787 (named_data != data_test->named_data)) 5788 return false; 5789 5790 if (!named_data && is_named_trigger(data_test)) 5791 return false; 5792 5793 hist_data = data->private_data; 5794 hist_data_test = data_test->private_data; 5795 5796 if (hist_data->n_vals != hist_data_test->n_vals || 5797 hist_data->n_fields != hist_data_test->n_fields || 5798 hist_data->n_sort_keys != hist_data_test->n_sort_keys) 5799 return false; 5800 5801 if (!ignore_filter) { 5802 if ((data->filter_str && !data_test->filter_str) || 5803 (!data->filter_str && data_test->filter_str)) 5804 return false; 5805 } 5806 5807 for_each_hist_field(i, hist_data) { 5808 key_field = hist_data->fields[i]; 5809 key_field_test = hist_data_test->fields[i]; 5810 5811 if (key_field->flags != key_field_test->flags) 5812 return false; 5813 if (!compatible_field(key_field->field, key_field_test->field)) 5814 return false; 5815 if (key_field->offset != key_field_test->offset) 5816 return false; 5817 if (key_field->size != key_field_test->size) 5818 return false; 5819 if (key_field->is_signed != key_field_test->is_signed) 5820 return false; 5821 if (!!key_field->var.name != !!key_field_test->var.name) 5822 return false; 5823 if (key_field->var.name && 5824 strcmp(key_field->var.name, key_field_test->var.name) != 0) 5825 return false; 5826 } 5827 5828 for (i = 0; i < hist_data->n_sort_keys; i++) { 5829 sort_key = &hist_data->sort_keys[i]; 5830 sort_key_test = &hist_data_test->sort_keys[i]; 5831 5832 if (sort_key->field_idx != sort_key_test->field_idx || 5833 sort_key->descending != sort_key_test->descending) 5834 return false; 5835 } 5836 5837 if (!ignore_filter && data->filter_str && 5838 (strcmp(data->filter_str, data_test->filter_str) != 0)) 5839 return false; 5840 5841 if (!actions_match(hist_data, hist_data_test)) 5842 return false; 5843 5844 return true; 5845 } 5846 5847 static int hist_register_trigger(char *glob, struct event_trigger_ops *ops, 5848 struct event_trigger_data *data, 5849 struct trace_event_file *file) 5850 { 5851 struct hist_trigger_data *hist_data = data->private_data; 5852 struct event_trigger_data *test, *named_data = NULL; 5853 struct trace_array *tr = file->tr; 5854 int ret = 0; 5855 5856 if (hist_data->attrs->name) { 5857 named_data = find_named_trigger(hist_data->attrs->name); 5858 if (named_data) { 5859 if (!hist_trigger_match(data, named_data, named_data, 5860 true)) { 5861 hist_err(tr, HIST_ERR_NAMED_MISMATCH, errpos(hist_data->attrs->name)); 5862 ret = -EINVAL; 5863 goto out; 5864 } 5865 } 5866 } 5867 5868 if (hist_data->attrs->name && !named_data) 5869 goto new; 5870 5871 list_for_each_entry_rcu(test, &file->triggers, list) { 5872 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 5873 if (!hist_trigger_match(data, test, named_data, false)) 5874 continue; 5875 if (hist_data->attrs->pause) 5876 test->paused = true; 5877 else if (hist_data->attrs->cont) 5878 test->paused = false; 5879 else if (hist_data->attrs->clear) 5880 hist_clear(test); 5881 else { 5882 hist_err(tr, HIST_ERR_TRIGGER_EEXIST, 0); 5883 ret = -EEXIST; 5884 } 5885 goto out; 5886 } 5887 } 5888 new: 5889 if (hist_data->attrs->cont || hist_data->attrs->clear) { 5890 hist_err(tr, HIST_ERR_TRIGGER_ENOENT_CLEAR, 0); 5891 ret = -ENOENT; 5892 goto out; 5893 } 5894 5895 if (hist_data->attrs->pause) 5896 data->paused = true; 5897 5898 if (named_data) { 5899 data->private_data = named_data->private_data; 5900 set_named_trigger_data(data, named_data); 5901 data->ops = &event_hist_trigger_named_ops; 5902 } 5903 5904 if (data->ops->init) { 5905 ret = data->ops->init(data->ops, data); 5906 if (ret < 0) 5907 goto out; 5908 } 5909 5910 if (hist_data->enable_timestamps) { 5911 char *clock = hist_data->attrs->clock; 5912 5913 ret = tracing_set_clock(file->tr, hist_data->attrs->clock); 5914 if (ret) { 5915 hist_err(tr, HIST_ERR_SET_CLOCK_FAIL, errpos(clock)); 5916 goto out; 5917 } 5918 5919 tracing_set_time_stamp_abs(file->tr, true); 5920 } 5921 5922 if (named_data) 5923 destroy_hist_data(hist_data); 5924 5925 ret++; 5926 out: 5927 return ret; 5928 } 5929 5930 static int hist_trigger_enable(struct event_trigger_data *data, 5931 struct trace_event_file *file) 5932 { 5933 int ret = 0; 5934 5935 list_add_tail_rcu(&data->list, &file->triggers); 5936 5937 update_cond_flag(file); 5938 5939 if (trace_event_trigger_enable_disable(file, 1) < 0) { 5940 list_del_rcu(&data->list); 5941 update_cond_flag(file); 5942 ret--; 5943 } 5944 5945 return ret; 5946 } 5947 5948 static bool have_hist_trigger_match(struct event_trigger_data *data, 5949 struct trace_event_file *file) 5950 { 5951 struct hist_trigger_data *hist_data = data->private_data; 5952 struct event_trigger_data *test, *named_data = NULL; 5953 bool match = false; 5954 5955 if (hist_data->attrs->name) 5956 named_data = find_named_trigger(hist_data->attrs->name); 5957 5958 list_for_each_entry_rcu(test, &file->triggers, list) { 5959 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 5960 if (hist_trigger_match(data, test, named_data, false)) { 5961 match = true; 5962 break; 5963 } 5964 } 5965 } 5966 5967 return match; 5968 } 5969 5970 static bool hist_trigger_check_refs(struct event_trigger_data *data, 5971 struct trace_event_file *file) 5972 { 5973 struct hist_trigger_data *hist_data = data->private_data; 5974 struct event_trigger_data *test, *named_data = NULL; 5975 5976 if (hist_data->attrs->name) 5977 named_data = find_named_trigger(hist_data->attrs->name); 5978 5979 list_for_each_entry_rcu(test, &file->triggers, list) { 5980 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 5981 if (!hist_trigger_match(data, test, named_data, false)) 5982 continue; 5983 hist_data = test->private_data; 5984 if (check_var_refs(hist_data)) 5985 return true; 5986 break; 5987 } 5988 } 5989 5990 return false; 5991 } 5992 5993 static void hist_unregister_trigger(char *glob, struct event_trigger_ops *ops, 5994 struct event_trigger_data *data, 5995 struct trace_event_file *file) 5996 { 5997 struct hist_trigger_data *hist_data = data->private_data; 5998 struct event_trigger_data *test, *named_data = NULL; 5999 bool unregistered = false; 6000 6001 if (hist_data->attrs->name) 6002 named_data = find_named_trigger(hist_data->attrs->name); 6003 6004 list_for_each_entry_rcu(test, &file->triggers, list) { 6005 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 6006 if (!hist_trigger_match(data, test, named_data, false)) 6007 continue; 6008 unregistered = true; 6009 list_del_rcu(&test->list); 6010 trace_event_trigger_enable_disable(file, 0); 6011 update_cond_flag(file); 6012 break; 6013 } 6014 } 6015 6016 if (unregistered && test->ops->free) 6017 test->ops->free(test->ops, test); 6018 6019 if (hist_data->enable_timestamps) { 6020 if (!hist_data->remove || unregistered) 6021 tracing_set_time_stamp_abs(file->tr, false); 6022 } 6023 } 6024 6025 static bool hist_file_check_refs(struct trace_event_file *file) 6026 { 6027 struct hist_trigger_data *hist_data; 6028 struct event_trigger_data *test; 6029 6030 list_for_each_entry_rcu(test, &file->triggers, list) { 6031 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 6032 hist_data = test->private_data; 6033 if (check_var_refs(hist_data)) 6034 return true; 6035 } 6036 } 6037 6038 return false; 6039 } 6040 6041 static void hist_unreg_all(struct trace_event_file *file) 6042 { 6043 struct event_trigger_data *test, *n; 6044 struct hist_trigger_data *hist_data; 6045 struct synth_event *se; 6046 const char *se_name; 6047 6048 lockdep_assert_held(&event_mutex); 6049 6050 if (hist_file_check_refs(file)) 6051 return; 6052 6053 list_for_each_entry_safe(test, n, &file->triggers, list) { 6054 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 6055 hist_data = test->private_data; 6056 list_del_rcu(&test->list); 6057 trace_event_trigger_enable_disable(file, 0); 6058 6059 se_name = trace_event_name(file->event_call); 6060 se = find_synth_event(se_name); 6061 if (se) 6062 se->ref--; 6063 6064 update_cond_flag(file); 6065 if (hist_data->enable_timestamps) 6066 tracing_set_time_stamp_abs(file->tr, false); 6067 if (test->ops->free) 6068 test->ops->free(test->ops, test); 6069 } 6070 } 6071 } 6072 6073 static int event_hist_trigger_func(struct event_command *cmd_ops, 6074 struct trace_event_file *file, 6075 char *glob, char *cmd, char *param) 6076 { 6077 unsigned int hist_trigger_bits = TRACING_MAP_BITS_DEFAULT; 6078 struct event_trigger_data *trigger_data; 6079 struct hist_trigger_attrs *attrs; 6080 struct event_trigger_ops *trigger_ops; 6081 struct hist_trigger_data *hist_data; 6082 struct synth_event *se; 6083 const char *se_name; 6084 bool remove = false; 6085 char *trigger, *p; 6086 int ret = 0; 6087 6088 lockdep_assert_held(&event_mutex); 6089 6090 if (glob && strlen(glob)) { 6091 hist_err_clear(); 6092 last_cmd_set(file, param); 6093 } 6094 6095 if (!param) 6096 return -EINVAL; 6097 6098 if (glob[0] == '!') 6099 remove = true; 6100 6101 /* 6102 * separate the trigger from the filter (k:v [if filter]) 6103 * allowing for whitespace in the trigger 6104 */ 6105 p = trigger = param; 6106 do { 6107 p = strstr(p, "if"); 6108 if (!p) 6109 break; 6110 if (p == param) 6111 return -EINVAL; 6112 if (*(p - 1) != ' ' && *(p - 1) != '\t') { 6113 p++; 6114 continue; 6115 } 6116 if (p >= param + strlen(param) - (sizeof("if") - 1) - 1) 6117 return -EINVAL; 6118 if (*(p + sizeof("if") - 1) != ' ' && *(p + sizeof("if") - 1) != '\t') { 6119 p++; 6120 continue; 6121 } 6122 break; 6123 } while (p); 6124 6125 if (!p) 6126 param = NULL; 6127 else { 6128 *(p - 1) = '\0'; 6129 param = strstrip(p); 6130 trigger = strstrip(trigger); 6131 } 6132 6133 attrs = parse_hist_trigger_attrs(file->tr, trigger); 6134 if (IS_ERR(attrs)) 6135 return PTR_ERR(attrs); 6136 6137 if (attrs->map_bits) 6138 hist_trigger_bits = attrs->map_bits; 6139 6140 hist_data = create_hist_data(hist_trigger_bits, attrs, file, remove); 6141 if (IS_ERR(hist_data)) { 6142 destroy_hist_trigger_attrs(attrs); 6143 return PTR_ERR(hist_data); 6144 } 6145 6146 trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger); 6147 6148 trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL); 6149 if (!trigger_data) { 6150 ret = -ENOMEM; 6151 goto out_free; 6152 } 6153 6154 trigger_data->count = -1; 6155 trigger_data->ops = trigger_ops; 6156 trigger_data->cmd_ops = cmd_ops; 6157 6158 INIT_LIST_HEAD(&trigger_data->list); 6159 RCU_INIT_POINTER(trigger_data->filter, NULL); 6160 6161 trigger_data->private_data = hist_data; 6162 6163 /* if param is non-empty, it's supposed to be a filter */ 6164 if (param && cmd_ops->set_filter) { 6165 ret = cmd_ops->set_filter(param, trigger_data, file); 6166 if (ret < 0) 6167 goto out_free; 6168 } 6169 6170 if (remove) { 6171 if (!have_hist_trigger_match(trigger_data, file)) 6172 goto out_free; 6173 6174 if (hist_trigger_check_refs(trigger_data, file)) { 6175 ret = -EBUSY; 6176 goto out_free; 6177 } 6178 6179 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file); 6180 se_name = trace_event_name(file->event_call); 6181 se = find_synth_event(se_name); 6182 if (se) 6183 se->ref--; 6184 ret = 0; 6185 goto out_free; 6186 } 6187 6188 ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file); 6189 /* 6190 * The above returns on success the # of triggers registered, 6191 * but if it didn't register any it returns zero. Consider no 6192 * triggers registered a failure too. 6193 */ 6194 if (!ret) { 6195 if (!(attrs->pause || attrs->cont || attrs->clear)) 6196 ret = -ENOENT; 6197 goto out_free; 6198 } else if (ret < 0) 6199 goto out_free; 6200 6201 if (get_named_trigger_data(trigger_data)) 6202 goto enable; 6203 6204 if (has_hist_vars(hist_data)) 6205 save_hist_vars(hist_data); 6206 6207 ret = create_actions(hist_data); 6208 if (ret) 6209 goto out_unreg; 6210 6211 ret = tracing_map_init(hist_data->map); 6212 if (ret) 6213 goto out_unreg; 6214 enable: 6215 ret = hist_trigger_enable(trigger_data, file); 6216 if (ret) 6217 goto out_unreg; 6218 6219 se_name = trace_event_name(file->event_call); 6220 se = find_synth_event(se_name); 6221 if (se) 6222 se->ref++; 6223 /* Just return zero, not the number of registered triggers */ 6224 ret = 0; 6225 out: 6226 if (ret == 0) 6227 hist_err_clear(); 6228 6229 return ret; 6230 out_unreg: 6231 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file); 6232 out_free: 6233 if (cmd_ops->set_filter) 6234 cmd_ops->set_filter(NULL, trigger_data, NULL); 6235 6236 remove_hist_vars(hist_data); 6237 6238 kfree(trigger_data); 6239 6240 destroy_hist_data(hist_data); 6241 goto out; 6242 } 6243 6244 static struct event_command trigger_hist_cmd = { 6245 .name = "hist", 6246 .trigger_type = ETT_EVENT_HIST, 6247 .flags = EVENT_CMD_FL_NEEDS_REC, 6248 .func = event_hist_trigger_func, 6249 .reg = hist_register_trigger, 6250 .unreg = hist_unregister_trigger, 6251 .unreg_all = hist_unreg_all, 6252 .get_trigger_ops = event_hist_get_trigger_ops, 6253 .set_filter = set_trigger_filter, 6254 }; 6255 6256 __init int register_trigger_hist_cmd(void) 6257 { 6258 int ret; 6259 6260 ret = register_event_command(&trigger_hist_cmd); 6261 WARN_ON(ret < 0); 6262 6263 return ret; 6264 } 6265 6266 static void 6267 hist_enable_trigger(struct event_trigger_data *data, void *rec, 6268 struct ring_buffer_event *event) 6269 { 6270 struct enable_trigger_data *enable_data = data->private_data; 6271 struct event_trigger_data *test; 6272 6273 list_for_each_entry_rcu(test, &enable_data->file->triggers, list) { 6274 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 6275 if (enable_data->enable) 6276 test->paused = false; 6277 else 6278 test->paused = true; 6279 } 6280 } 6281 } 6282 6283 static void 6284 hist_enable_count_trigger(struct event_trigger_data *data, void *rec, 6285 struct ring_buffer_event *event) 6286 { 6287 if (!data->count) 6288 return; 6289 6290 if (data->count != -1) 6291 (data->count)--; 6292 6293 hist_enable_trigger(data, rec, event); 6294 } 6295 6296 static struct event_trigger_ops hist_enable_trigger_ops = { 6297 .func = hist_enable_trigger, 6298 .print = event_enable_trigger_print, 6299 .init = event_trigger_init, 6300 .free = event_enable_trigger_free, 6301 }; 6302 6303 static struct event_trigger_ops hist_enable_count_trigger_ops = { 6304 .func = hist_enable_count_trigger, 6305 .print = event_enable_trigger_print, 6306 .init = event_trigger_init, 6307 .free = event_enable_trigger_free, 6308 }; 6309 6310 static struct event_trigger_ops hist_disable_trigger_ops = { 6311 .func = hist_enable_trigger, 6312 .print = event_enable_trigger_print, 6313 .init = event_trigger_init, 6314 .free = event_enable_trigger_free, 6315 }; 6316 6317 static struct event_trigger_ops hist_disable_count_trigger_ops = { 6318 .func = hist_enable_count_trigger, 6319 .print = event_enable_trigger_print, 6320 .init = event_trigger_init, 6321 .free = event_enable_trigger_free, 6322 }; 6323 6324 static struct event_trigger_ops * 6325 hist_enable_get_trigger_ops(char *cmd, char *param) 6326 { 6327 struct event_trigger_ops *ops; 6328 bool enable; 6329 6330 enable = (strcmp(cmd, ENABLE_HIST_STR) == 0); 6331 6332 if (enable) 6333 ops = param ? &hist_enable_count_trigger_ops : 6334 &hist_enable_trigger_ops; 6335 else 6336 ops = param ? &hist_disable_count_trigger_ops : 6337 &hist_disable_trigger_ops; 6338 6339 return ops; 6340 } 6341 6342 static void hist_enable_unreg_all(struct trace_event_file *file) 6343 { 6344 struct event_trigger_data *test, *n; 6345 6346 list_for_each_entry_safe(test, n, &file->triggers, list) { 6347 if (test->cmd_ops->trigger_type == ETT_HIST_ENABLE) { 6348 list_del_rcu(&test->list); 6349 update_cond_flag(file); 6350 trace_event_trigger_enable_disable(file, 0); 6351 if (test->ops->free) 6352 test->ops->free(test->ops, test); 6353 } 6354 } 6355 } 6356 6357 static struct event_command trigger_hist_enable_cmd = { 6358 .name = ENABLE_HIST_STR, 6359 .trigger_type = ETT_HIST_ENABLE, 6360 .func = event_enable_trigger_func, 6361 .reg = event_enable_register_trigger, 6362 .unreg = event_enable_unregister_trigger, 6363 .unreg_all = hist_enable_unreg_all, 6364 .get_trigger_ops = hist_enable_get_trigger_ops, 6365 .set_filter = set_trigger_filter, 6366 }; 6367 6368 static struct event_command trigger_hist_disable_cmd = { 6369 .name = DISABLE_HIST_STR, 6370 .trigger_type = ETT_HIST_ENABLE, 6371 .func = event_enable_trigger_func, 6372 .reg = event_enable_register_trigger, 6373 .unreg = event_enable_unregister_trigger, 6374 .unreg_all = hist_enable_unreg_all, 6375 .get_trigger_ops = hist_enable_get_trigger_ops, 6376 .set_filter = set_trigger_filter, 6377 }; 6378 6379 static __init void unregister_trigger_hist_enable_disable_cmds(void) 6380 { 6381 unregister_event_command(&trigger_hist_enable_cmd); 6382 unregister_event_command(&trigger_hist_disable_cmd); 6383 } 6384 6385 __init int register_trigger_hist_enable_disable_cmds(void) 6386 { 6387 int ret; 6388 6389 ret = register_event_command(&trigger_hist_enable_cmd); 6390 if (WARN_ON(ret < 0)) 6391 return ret; 6392 ret = register_event_command(&trigger_hist_disable_cmd); 6393 if (WARN_ON(ret < 0)) 6394 unregister_trigger_hist_enable_disable_cmds(); 6395 6396 return ret; 6397 } 6398 6399 static __init int trace_events_hist_init(void) 6400 { 6401 struct dentry *entry = NULL; 6402 struct dentry *d_tracer; 6403 int err = 0; 6404 6405 err = dyn_event_register(&synth_event_ops); 6406 if (err) { 6407 pr_warn("Could not register synth_event_ops\n"); 6408 return err; 6409 } 6410 6411 d_tracer = tracing_init_dentry(); 6412 if (IS_ERR(d_tracer)) { 6413 err = PTR_ERR(d_tracer); 6414 goto err; 6415 } 6416 6417 entry = tracefs_create_file("synthetic_events", 0644, d_tracer, 6418 NULL, &synth_events_fops); 6419 if (!entry) { 6420 err = -ENODEV; 6421 goto err; 6422 } 6423 6424 return err; 6425 err: 6426 pr_warn("Could not create tracefs 'synthetic_events' entry\n"); 6427 6428 return err; 6429 } 6430 6431 fs_initcall(trace_events_hist_init); 6432