1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * trace_events_hist - trace event hist triggers 4 * 5 * Copyright (C) 2015 Tom Zanussi <tom.zanussi@linux.intel.com> 6 */ 7 8 #include <linux/module.h> 9 #include <linux/kallsyms.h> 10 #include <linux/security.h> 11 #include <linux/mutex.h> 12 #include <linux/slab.h> 13 #include <linux/stacktrace.h> 14 #include <linux/rculist.h> 15 #include <linux/tracefs.h> 16 17 /* for gfp flag names */ 18 #include <linux/trace_events.h> 19 #include <trace/events/mmflags.h> 20 21 #include "tracing_map.h" 22 #include "trace_synth.h" 23 24 #define ERRORS \ 25 C(NONE, "No error"), \ 26 C(DUPLICATE_VAR, "Variable already defined"), \ 27 C(VAR_NOT_UNIQUE, "Variable name not unique, need to use fully qualified name (subsys.event.var) for variable"), \ 28 C(TOO_MANY_VARS, "Too many variables defined"), \ 29 C(MALFORMED_ASSIGNMENT, "Malformed assignment"), \ 30 C(NAMED_MISMATCH, "Named hist trigger doesn't match existing named trigger (includes variables)"), \ 31 C(TRIGGER_EEXIST, "Hist trigger already exists"), \ 32 C(TRIGGER_ENOENT_CLEAR, "Can't clear or continue a nonexistent hist trigger"), \ 33 C(SET_CLOCK_FAIL, "Couldn't set trace_clock"), \ 34 C(BAD_FIELD_MODIFIER, "Invalid field modifier"), \ 35 C(TOO_MANY_SUBEXPR, "Too many subexpressions (3 max)"), \ 36 C(TIMESTAMP_MISMATCH, "Timestamp units in expression don't match"), \ 37 C(TOO_MANY_FIELD_VARS, "Too many field variables defined"), \ 38 C(EVENT_FILE_NOT_FOUND, "Event file not found"), \ 39 C(HIST_NOT_FOUND, "Matching event histogram not found"), \ 40 C(HIST_CREATE_FAIL, "Couldn't create histogram for field"), \ 41 C(SYNTH_VAR_NOT_FOUND, "Couldn't find synthetic variable"), \ 42 C(SYNTH_EVENT_NOT_FOUND,"Couldn't find synthetic event"), \ 43 C(SYNTH_TYPE_MISMATCH, "Param type doesn't match synthetic event field type"), \ 44 C(SYNTH_COUNT_MISMATCH, "Param count doesn't match synthetic event field count"), \ 45 C(FIELD_VAR_PARSE_FAIL, "Couldn't parse field variable"), \ 46 C(VAR_CREATE_FIND_FAIL, "Couldn't create or find variable"), \ 47 C(ONX_NOT_VAR, "For onmax(x) or onchange(x), x must be a variable"), \ 48 C(ONX_VAR_NOT_FOUND, "Couldn't find onmax or onchange variable"), \ 49 C(ONX_VAR_CREATE_FAIL, "Couldn't create onmax or onchange variable"), \ 50 C(FIELD_VAR_CREATE_FAIL,"Couldn't create field variable"), \ 51 C(TOO_MANY_PARAMS, "Too many action params"), \ 52 C(PARAM_NOT_FOUND, "Couldn't find param"), \ 53 C(INVALID_PARAM, "Invalid action param"), \ 54 C(ACTION_NOT_FOUND, "No action found"), \ 55 C(NO_SAVE_PARAMS, "No params found for save()"), \ 56 C(TOO_MANY_SAVE_ACTIONS,"Can't have more than one save() action per hist"), \ 57 C(ACTION_MISMATCH, "Handler doesn't support action"), \ 58 C(NO_CLOSING_PAREN, "No closing paren found"), \ 59 C(SUBSYS_NOT_FOUND, "Missing subsystem"), \ 60 C(INVALID_SUBSYS_EVENT, "Invalid subsystem or event name"), \ 61 C(INVALID_REF_KEY, "Using variable references in keys not supported"), \ 62 C(VAR_NOT_FOUND, "Couldn't find variable"), \ 63 C(FIELD_NOT_FOUND, "Couldn't find field"), \ 64 C(EMPTY_ASSIGNMENT, "Empty assignment"), \ 65 C(INVALID_SORT_MODIFIER,"Invalid sort modifier"), \ 66 C(EMPTY_SORT_FIELD, "Empty sort field"), \ 67 C(TOO_MANY_SORT_FIELDS, "Too many sort fields (Max = 2)"), \ 68 C(INVALID_SORT_FIELD, "Sort field must be a key or a val"), 69 70 #undef C 71 #define C(a, b) HIST_ERR_##a 72 73 enum { ERRORS }; 74 75 #undef C 76 #define C(a, b) b 77 78 static const char *err_text[] = { ERRORS }; 79 80 struct hist_field; 81 82 typedef u64 (*hist_field_fn_t) (struct hist_field *field, 83 struct tracing_map_elt *elt, 84 struct ring_buffer_event *rbe, 85 void *event); 86 87 #define HIST_FIELD_OPERANDS_MAX 2 88 #define HIST_FIELDS_MAX (TRACING_MAP_FIELDS_MAX + TRACING_MAP_VARS_MAX) 89 #define HIST_ACTIONS_MAX 8 90 91 enum field_op_id { 92 FIELD_OP_NONE, 93 FIELD_OP_PLUS, 94 FIELD_OP_MINUS, 95 FIELD_OP_UNARY_MINUS, 96 }; 97 98 /* 99 * A hist_var (histogram variable) contains variable information for 100 * hist_fields having the HIST_FIELD_FL_VAR or HIST_FIELD_FL_VAR_REF 101 * flag set. A hist_var has a variable name e.g. ts0, and is 102 * associated with a given histogram trigger, as specified by 103 * hist_data. The hist_var idx is the unique index assigned to the 104 * variable by the hist trigger's tracing_map. The idx is what is 105 * used to set a variable's value and, by a variable reference, to 106 * retrieve it. 107 */ 108 struct hist_var { 109 char *name; 110 struct hist_trigger_data *hist_data; 111 unsigned int idx; 112 }; 113 114 struct hist_field { 115 struct ftrace_event_field *field; 116 unsigned long flags; 117 hist_field_fn_t fn; 118 unsigned int ref; 119 unsigned int size; 120 unsigned int offset; 121 unsigned int is_signed; 122 const char *type; 123 struct hist_field *operands[HIST_FIELD_OPERANDS_MAX]; 124 struct hist_trigger_data *hist_data; 125 126 /* 127 * Variable fields contain variable-specific info in var. 128 */ 129 struct hist_var var; 130 enum field_op_id operator; 131 char *system; 132 char *event_name; 133 134 /* 135 * The name field is used for EXPR and VAR_REF fields. VAR 136 * fields contain the variable name in var.name. 137 */ 138 char *name; 139 140 /* 141 * When a histogram trigger is hit, if it has any references 142 * to variables, the values of those variables are collected 143 * into a var_ref_vals array by resolve_var_refs(). The 144 * current value of each variable is read from the tracing_map 145 * using the hist field's hist_var.idx and entered into the 146 * var_ref_idx entry i.e. var_ref_vals[var_ref_idx]. 147 */ 148 unsigned int var_ref_idx; 149 bool read_once; 150 151 unsigned int var_str_idx; 152 }; 153 154 static u64 hist_field_none(struct hist_field *field, 155 struct tracing_map_elt *elt, 156 struct ring_buffer_event *rbe, 157 void *event) 158 { 159 return 0; 160 } 161 162 static u64 hist_field_counter(struct hist_field *field, 163 struct tracing_map_elt *elt, 164 struct ring_buffer_event *rbe, 165 void *event) 166 { 167 return 1; 168 } 169 170 static u64 hist_field_string(struct hist_field *hist_field, 171 struct tracing_map_elt *elt, 172 struct ring_buffer_event *rbe, 173 void *event) 174 { 175 char *addr = (char *)(event + hist_field->field->offset); 176 177 return (u64)(unsigned long)addr; 178 } 179 180 static u64 hist_field_dynstring(struct hist_field *hist_field, 181 struct tracing_map_elt *elt, 182 struct ring_buffer_event *rbe, 183 void *event) 184 { 185 u32 str_item = *(u32 *)(event + hist_field->field->offset); 186 int str_loc = str_item & 0xffff; 187 char *addr = (char *)(event + str_loc); 188 189 return (u64)(unsigned long)addr; 190 } 191 192 static u64 hist_field_pstring(struct hist_field *hist_field, 193 struct tracing_map_elt *elt, 194 struct ring_buffer_event *rbe, 195 void *event) 196 { 197 char **addr = (char **)(event + hist_field->field->offset); 198 199 return (u64)(unsigned long)*addr; 200 } 201 202 static u64 hist_field_log2(struct hist_field *hist_field, 203 struct tracing_map_elt *elt, 204 struct ring_buffer_event *rbe, 205 void *event) 206 { 207 struct hist_field *operand = hist_field->operands[0]; 208 209 u64 val = operand->fn(operand, elt, rbe, event); 210 211 return (u64) ilog2(roundup_pow_of_two(val)); 212 } 213 214 static u64 hist_field_plus(struct hist_field *hist_field, 215 struct tracing_map_elt *elt, 216 struct ring_buffer_event *rbe, 217 void *event) 218 { 219 struct hist_field *operand1 = hist_field->operands[0]; 220 struct hist_field *operand2 = hist_field->operands[1]; 221 222 u64 val1 = operand1->fn(operand1, elt, rbe, event); 223 u64 val2 = operand2->fn(operand2, elt, rbe, event); 224 225 return val1 + val2; 226 } 227 228 static u64 hist_field_minus(struct hist_field *hist_field, 229 struct tracing_map_elt *elt, 230 struct ring_buffer_event *rbe, 231 void *event) 232 { 233 struct hist_field *operand1 = hist_field->operands[0]; 234 struct hist_field *operand2 = hist_field->operands[1]; 235 236 u64 val1 = operand1->fn(operand1, elt, rbe, event); 237 u64 val2 = operand2->fn(operand2, elt, rbe, event); 238 239 return val1 - val2; 240 } 241 242 static u64 hist_field_unary_minus(struct hist_field *hist_field, 243 struct tracing_map_elt *elt, 244 struct ring_buffer_event *rbe, 245 void *event) 246 { 247 struct hist_field *operand = hist_field->operands[0]; 248 249 s64 sval = (s64)operand->fn(operand, elt, rbe, event); 250 u64 val = (u64)-sval; 251 252 return val; 253 } 254 255 #define DEFINE_HIST_FIELD_FN(type) \ 256 static u64 hist_field_##type(struct hist_field *hist_field, \ 257 struct tracing_map_elt *elt, \ 258 struct ring_buffer_event *rbe, \ 259 void *event) \ 260 { \ 261 type *addr = (type *)(event + hist_field->field->offset); \ 262 \ 263 return (u64)(unsigned long)*addr; \ 264 } 265 266 DEFINE_HIST_FIELD_FN(s64); 267 DEFINE_HIST_FIELD_FN(u64); 268 DEFINE_HIST_FIELD_FN(s32); 269 DEFINE_HIST_FIELD_FN(u32); 270 DEFINE_HIST_FIELD_FN(s16); 271 DEFINE_HIST_FIELD_FN(u16); 272 DEFINE_HIST_FIELD_FN(s8); 273 DEFINE_HIST_FIELD_FN(u8); 274 275 #define for_each_hist_field(i, hist_data) \ 276 for ((i) = 0; (i) < (hist_data)->n_fields; (i)++) 277 278 #define for_each_hist_val_field(i, hist_data) \ 279 for ((i) = 0; (i) < (hist_data)->n_vals; (i)++) 280 281 #define for_each_hist_key_field(i, hist_data) \ 282 for ((i) = (hist_data)->n_vals; (i) < (hist_data)->n_fields; (i)++) 283 284 #define HIST_STACKTRACE_DEPTH 16 285 #define HIST_STACKTRACE_SIZE (HIST_STACKTRACE_DEPTH * sizeof(unsigned long)) 286 #define HIST_STACKTRACE_SKIP 5 287 288 #define HITCOUNT_IDX 0 289 #define HIST_KEY_SIZE_MAX (MAX_FILTER_STR_VAL + HIST_STACKTRACE_SIZE) 290 291 enum hist_field_flags { 292 HIST_FIELD_FL_HITCOUNT = 1 << 0, 293 HIST_FIELD_FL_KEY = 1 << 1, 294 HIST_FIELD_FL_STRING = 1 << 2, 295 HIST_FIELD_FL_HEX = 1 << 3, 296 HIST_FIELD_FL_SYM = 1 << 4, 297 HIST_FIELD_FL_SYM_OFFSET = 1 << 5, 298 HIST_FIELD_FL_EXECNAME = 1 << 6, 299 HIST_FIELD_FL_SYSCALL = 1 << 7, 300 HIST_FIELD_FL_STACKTRACE = 1 << 8, 301 HIST_FIELD_FL_LOG2 = 1 << 9, 302 HIST_FIELD_FL_TIMESTAMP = 1 << 10, 303 HIST_FIELD_FL_TIMESTAMP_USECS = 1 << 11, 304 HIST_FIELD_FL_VAR = 1 << 12, 305 HIST_FIELD_FL_EXPR = 1 << 13, 306 HIST_FIELD_FL_VAR_REF = 1 << 14, 307 HIST_FIELD_FL_CPU = 1 << 15, 308 HIST_FIELD_FL_ALIAS = 1 << 16, 309 }; 310 311 struct var_defs { 312 unsigned int n_vars; 313 char *name[TRACING_MAP_VARS_MAX]; 314 char *expr[TRACING_MAP_VARS_MAX]; 315 }; 316 317 struct hist_trigger_attrs { 318 char *keys_str; 319 char *vals_str; 320 char *sort_key_str; 321 char *name; 322 char *clock; 323 bool pause; 324 bool cont; 325 bool clear; 326 bool ts_in_usecs; 327 unsigned int map_bits; 328 329 char *assignment_str[TRACING_MAP_VARS_MAX]; 330 unsigned int n_assignments; 331 332 char *action_str[HIST_ACTIONS_MAX]; 333 unsigned int n_actions; 334 335 struct var_defs var_defs; 336 }; 337 338 struct field_var { 339 struct hist_field *var; 340 struct hist_field *val; 341 }; 342 343 struct field_var_hist { 344 struct hist_trigger_data *hist_data; 345 char *cmd; 346 }; 347 348 struct hist_trigger_data { 349 struct hist_field *fields[HIST_FIELDS_MAX]; 350 unsigned int n_vals; 351 unsigned int n_keys; 352 unsigned int n_fields; 353 unsigned int n_vars; 354 unsigned int n_var_str; 355 unsigned int key_size; 356 struct tracing_map_sort_key sort_keys[TRACING_MAP_SORT_KEYS_MAX]; 357 unsigned int n_sort_keys; 358 struct trace_event_file *event_file; 359 struct hist_trigger_attrs *attrs; 360 struct tracing_map *map; 361 bool enable_timestamps; 362 bool remove; 363 struct hist_field *var_refs[TRACING_MAP_VARS_MAX]; 364 unsigned int n_var_refs; 365 366 struct action_data *actions[HIST_ACTIONS_MAX]; 367 unsigned int n_actions; 368 369 struct field_var *field_vars[SYNTH_FIELDS_MAX]; 370 unsigned int n_field_vars; 371 unsigned int n_field_var_str; 372 struct field_var_hist *field_var_hists[SYNTH_FIELDS_MAX]; 373 unsigned int n_field_var_hists; 374 375 struct field_var *save_vars[SYNTH_FIELDS_MAX]; 376 unsigned int n_save_vars; 377 unsigned int n_save_var_str; 378 }; 379 380 struct action_data; 381 382 typedef void (*action_fn_t) (struct hist_trigger_data *hist_data, 383 struct tracing_map_elt *elt, void *rec, 384 struct ring_buffer_event *rbe, void *key, 385 struct action_data *data, u64 *var_ref_vals); 386 387 typedef bool (*check_track_val_fn_t) (u64 track_val, u64 var_val); 388 389 enum handler_id { 390 HANDLER_ONMATCH = 1, 391 HANDLER_ONMAX, 392 HANDLER_ONCHANGE, 393 }; 394 395 enum action_id { 396 ACTION_SAVE = 1, 397 ACTION_TRACE, 398 ACTION_SNAPSHOT, 399 }; 400 401 struct action_data { 402 enum handler_id handler; 403 enum action_id action; 404 char *action_name; 405 action_fn_t fn; 406 407 unsigned int n_params; 408 char *params[SYNTH_FIELDS_MAX]; 409 410 /* 411 * When a histogram trigger is hit, the values of any 412 * references to variables, including variables being passed 413 * as parameters to synthetic events, are collected into a 414 * var_ref_vals array. This var_ref_idx array is an array of 415 * indices into the var_ref_vals array, one for each synthetic 416 * event param, and is passed to the synthetic event 417 * invocation. 418 */ 419 unsigned int var_ref_idx[TRACING_MAP_VARS_MAX]; 420 struct synth_event *synth_event; 421 bool use_trace_keyword; 422 char *synth_event_name; 423 424 union { 425 struct { 426 char *event; 427 char *event_system; 428 } match_data; 429 430 struct { 431 /* 432 * var_str contains the $-unstripped variable 433 * name referenced by var_ref, and used when 434 * printing the action. Because var_ref 435 * creation is deferred to create_actions(), 436 * we need a per-action way to save it until 437 * then, thus var_str. 438 */ 439 char *var_str; 440 441 /* 442 * var_ref refers to the variable being 443 * tracked e.g onmax($var). 444 */ 445 struct hist_field *var_ref; 446 447 /* 448 * track_var contains the 'invisible' tracking 449 * variable created to keep the current 450 * e.g. max value. 451 */ 452 struct hist_field *track_var; 453 454 check_track_val_fn_t check_val; 455 action_fn_t save_data; 456 } track_data; 457 }; 458 }; 459 460 struct track_data { 461 u64 track_val; 462 bool updated; 463 464 unsigned int key_len; 465 void *key; 466 struct tracing_map_elt elt; 467 468 struct action_data *action_data; 469 struct hist_trigger_data *hist_data; 470 }; 471 472 struct hist_elt_data { 473 char *comm; 474 u64 *var_ref_vals; 475 char *field_var_str[SYNTH_FIELDS_MAX]; 476 }; 477 478 struct snapshot_context { 479 struct tracing_map_elt *elt; 480 void *key; 481 }; 482 483 static void track_data_free(struct track_data *track_data) 484 { 485 struct hist_elt_data *elt_data; 486 487 if (!track_data) 488 return; 489 490 kfree(track_data->key); 491 492 elt_data = track_data->elt.private_data; 493 if (elt_data) { 494 kfree(elt_data->comm); 495 kfree(elt_data); 496 } 497 498 kfree(track_data); 499 } 500 501 static struct track_data *track_data_alloc(unsigned int key_len, 502 struct action_data *action_data, 503 struct hist_trigger_data *hist_data) 504 { 505 struct track_data *data = kzalloc(sizeof(*data), GFP_KERNEL); 506 struct hist_elt_data *elt_data; 507 508 if (!data) 509 return ERR_PTR(-ENOMEM); 510 511 data->key = kzalloc(key_len, GFP_KERNEL); 512 if (!data->key) { 513 track_data_free(data); 514 return ERR_PTR(-ENOMEM); 515 } 516 517 data->key_len = key_len; 518 data->action_data = action_data; 519 data->hist_data = hist_data; 520 521 elt_data = kzalloc(sizeof(*elt_data), GFP_KERNEL); 522 if (!elt_data) { 523 track_data_free(data); 524 return ERR_PTR(-ENOMEM); 525 } 526 527 data->elt.private_data = elt_data; 528 529 elt_data->comm = kzalloc(TASK_COMM_LEN, GFP_KERNEL); 530 if (!elt_data->comm) { 531 track_data_free(data); 532 return ERR_PTR(-ENOMEM); 533 } 534 535 return data; 536 } 537 538 static char last_cmd[MAX_FILTER_STR_VAL]; 539 static char last_cmd_loc[MAX_FILTER_STR_VAL]; 540 541 static int errpos(char *str) 542 { 543 return err_pos(last_cmd, str); 544 } 545 546 static void last_cmd_set(struct trace_event_file *file, char *str) 547 { 548 const char *system = NULL, *name = NULL; 549 struct trace_event_call *call; 550 551 if (!str) 552 return; 553 554 strcpy(last_cmd, "hist:"); 555 strncat(last_cmd, str, MAX_FILTER_STR_VAL - 1 - sizeof("hist:")); 556 557 if (file) { 558 call = file->event_call; 559 system = call->class->system; 560 if (system) { 561 name = trace_event_name(call); 562 if (!name) 563 system = NULL; 564 } 565 } 566 567 if (system) 568 snprintf(last_cmd_loc, MAX_FILTER_STR_VAL, "hist:%s:%s", system, name); 569 } 570 571 static void hist_err(struct trace_array *tr, u8 err_type, u8 err_pos) 572 { 573 tracing_log_err(tr, last_cmd_loc, last_cmd, err_text, 574 err_type, err_pos); 575 } 576 577 static void hist_err_clear(void) 578 { 579 last_cmd[0] = '\0'; 580 last_cmd_loc[0] = '\0'; 581 } 582 583 typedef void (*synth_probe_func_t) (void *__data, u64 *var_ref_vals, 584 unsigned int *var_ref_idx); 585 586 static inline void trace_synth(struct synth_event *event, u64 *var_ref_vals, 587 unsigned int *var_ref_idx) 588 { 589 struct tracepoint *tp = event->tp; 590 591 if (unlikely(atomic_read(&tp->key.enabled) > 0)) { 592 struct tracepoint_func *probe_func_ptr; 593 synth_probe_func_t probe_func; 594 void *__data; 595 596 if (!(cpu_online(raw_smp_processor_id()))) 597 return; 598 599 probe_func_ptr = rcu_dereference_sched((tp)->funcs); 600 if (probe_func_ptr) { 601 do { 602 probe_func = probe_func_ptr->func; 603 __data = probe_func_ptr->data; 604 probe_func(__data, var_ref_vals, var_ref_idx); 605 } while ((++probe_func_ptr)->func); 606 } 607 } 608 } 609 610 static void action_trace(struct hist_trigger_data *hist_data, 611 struct tracing_map_elt *elt, void *rec, 612 struct ring_buffer_event *rbe, void *key, 613 struct action_data *data, u64 *var_ref_vals) 614 { 615 struct synth_event *event = data->synth_event; 616 617 trace_synth(event, var_ref_vals, data->var_ref_idx); 618 } 619 620 struct hist_var_data { 621 struct list_head list; 622 struct hist_trigger_data *hist_data; 623 }; 624 625 static u64 hist_field_timestamp(struct hist_field *hist_field, 626 struct tracing_map_elt *elt, 627 struct ring_buffer_event *rbe, 628 void *event) 629 { 630 struct hist_trigger_data *hist_data = hist_field->hist_data; 631 struct trace_array *tr = hist_data->event_file->tr; 632 633 u64 ts = ring_buffer_event_time_stamp(rbe); 634 635 if (hist_data->attrs->ts_in_usecs && trace_clock_in_ns(tr)) 636 ts = ns2usecs(ts); 637 638 return ts; 639 } 640 641 static u64 hist_field_cpu(struct hist_field *hist_field, 642 struct tracing_map_elt *elt, 643 struct ring_buffer_event *rbe, 644 void *event) 645 { 646 int cpu = smp_processor_id(); 647 648 return cpu; 649 } 650 651 /** 652 * check_field_for_var_ref - Check if a VAR_REF field references a variable 653 * @hist_field: The VAR_REF field to check 654 * @var_data: The hist trigger that owns the variable 655 * @var_idx: The trigger variable identifier 656 * 657 * Check the given VAR_REF field to see whether or not it references 658 * the given variable associated with the given trigger. 659 * 660 * Return: The VAR_REF field if it does reference the variable, NULL if not 661 */ 662 static struct hist_field * 663 check_field_for_var_ref(struct hist_field *hist_field, 664 struct hist_trigger_data *var_data, 665 unsigned int var_idx) 666 { 667 WARN_ON(!(hist_field && hist_field->flags & HIST_FIELD_FL_VAR_REF)); 668 669 if (hist_field && hist_field->var.idx == var_idx && 670 hist_field->var.hist_data == var_data) 671 return hist_field; 672 673 return NULL; 674 } 675 676 /** 677 * find_var_ref - Check if a trigger has a reference to a trigger variable 678 * @hist_data: The hist trigger that might have a reference to the variable 679 * @var_data: The hist trigger that owns the variable 680 * @var_idx: The trigger variable identifier 681 * 682 * Check the list of var_refs[] on the first hist trigger to see 683 * whether any of them are references to the variable on the second 684 * trigger. 685 * 686 * Return: The VAR_REF field referencing the variable if so, NULL if not 687 */ 688 static struct hist_field *find_var_ref(struct hist_trigger_data *hist_data, 689 struct hist_trigger_data *var_data, 690 unsigned int var_idx) 691 { 692 struct hist_field *hist_field; 693 unsigned int i; 694 695 for (i = 0; i < hist_data->n_var_refs; i++) { 696 hist_field = hist_data->var_refs[i]; 697 if (check_field_for_var_ref(hist_field, var_data, var_idx)) 698 return hist_field; 699 } 700 701 return NULL; 702 } 703 704 /** 705 * find_any_var_ref - Check if there is a reference to a given trigger variable 706 * @hist_data: The hist trigger 707 * @var_idx: The trigger variable identifier 708 * 709 * Check to see whether the given variable is currently referenced by 710 * any other trigger. 711 * 712 * The trigger the variable is defined on is explicitly excluded - the 713 * assumption being that a self-reference doesn't prevent a trigger 714 * from being removed. 715 * 716 * Return: The VAR_REF field referencing the variable if so, NULL if not 717 */ 718 static struct hist_field *find_any_var_ref(struct hist_trigger_data *hist_data, 719 unsigned int var_idx) 720 { 721 struct trace_array *tr = hist_data->event_file->tr; 722 struct hist_field *found = NULL; 723 struct hist_var_data *var_data; 724 725 list_for_each_entry(var_data, &tr->hist_vars, list) { 726 if (var_data->hist_data == hist_data) 727 continue; 728 found = find_var_ref(var_data->hist_data, hist_data, var_idx); 729 if (found) 730 break; 731 } 732 733 return found; 734 } 735 736 /** 737 * check_var_refs - Check if there is a reference to any of trigger's variables 738 * @hist_data: The hist trigger 739 * 740 * A trigger can define one or more variables. If any one of them is 741 * currently referenced by any other trigger, this function will 742 * determine that. 743 744 * Typically used to determine whether or not a trigger can be removed 745 * - if there are any references to a trigger's variables, it cannot. 746 * 747 * Return: True if there is a reference to any of trigger's variables 748 */ 749 static bool check_var_refs(struct hist_trigger_data *hist_data) 750 { 751 struct hist_field *field; 752 bool found = false; 753 int i; 754 755 for_each_hist_field(i, hist_data) { 756 field = hist_data->fields[i]; 757 if (field && field->flags & HIST_FIELD_FL_VAR) { 758 if (find_any_var_ref(hist_data, field->var.idx)) { 759 found = true; 760 break; 761 } 762 } 763 } 764 765 return found; 766 } 767 768 static struct hist_var_data *find_hist_vars(struct hist_trigger_data *hist_data) 769 { 770 struct trace_array *tr = hist_data->event_file->tr; 771 struct hist_var_data *var_data, *found = NULL; 772 773 list_for_each_entry(var_data, &tr->hist_vars, list) { 774 if (var_data->hist_data == hist_data) { 775 found = var_data; 776 break; 777 } 778 } 779 780 return found; 781 } 782 783 static bool field_has_hist_vars(struct hist_field *hist_field, 784 unsigned int level) 785 { 786 int i; 787 788 if (level > 3) 789 return false; 790 791 if (!hist_field) 792 return false; 793 794 if (hist_field->flags & HIST_FIELD_FL_VAR || 795 hist_field->flags & HIST_FIELD_FL_VAR_REF) 796 return true; 797 798 for (i = 0; i < HIST_FIELD_OPERANDS_MAX; i++) { 799 struct hist_field *operand; 800 801 operand = hist_field->operands[i]; 802 if (field_has_hist_vars(operand, level + 1)) 803 return true; 804 } 805 806 return false; 807 } 808 809 static bool has_hist_vars(struct hist_trigger_data *hist_data) 810 { 811 struct hist_field *hist_field; 812 int i; 813 814 for_each_hist_field(i, hist_data) { 815 hist_field = hist_data->fields[i]; 816 if (field_has_hist_vars(hist_field, 0)) 817 return true; 818 } 819 820 return false; 821 } 822 823 static int save_hist_vars(struct hist_trigger_data *hist_data) 824 { 825 struct trace_array *tr = hist_data->event_file->tr; 826 struct hist_var_data *var_data; 827 828 var_data = find_hist_vars(hist_data); 829 if (var_data) 830 return 0; 831 832 if (tracing_check_open_get_tr(tr)) 833 return -ENODEV; 834 835 var_data = kzalloc(sizeof(*var_data), GFP_KERNEL); 836 if (!var_data) { 837 trace_array_put(tr); 838 return -ENOMEM; 839 } 840 841 var_data->hist_data = hist_data; 842 list_add(&var_data->list, &tr->hist_vars); 843 844 return 0; 845 } 846 847 static void remove_hist_vars(struct hist_trigger_data *hist_data) 848 { 849 struct trace_array *tr = hist_data->event_file->tr; 850 struct hist_var_data *var_data; 851 852 var_data = find_hist_vars(hist_data); 853 if (!var_data) 854 return; 855 856 if (WARN_ON(check_var_refs(hist_data))) 857 return; 858 859 list_del(&var_data->list); 860 861 kfree(var_data); 862 863 trace_array_put(tr); 864 } 865 866 static struct hist_field *find_var_field(struct hist_trigger_data *hist_data, 867 const char *var_name) 868 { 869 struct hist_field *hist_field, *found = NULL; 870 int i; 871 872 for_each_hist_field(i, hist_data) { 873 hist_field = hist_data->fields[i]; 874 if (hist_field && hist_field->flags & HIST_FIELD_FL_VAR && 875 strcmp(hist_field->var.name, var_name) == 0) { 876 found = hist_field; 877 break; 878 } 879 } 880 881 return found; 882 } 883 884 static struct hist_field *find_var(struct hist_trigger_data *hist_data, 885 struct trace_event_file *file, 886 const char *var_name) 887 { 888 struct hist_trigger_data *test_data; 889 struct event_trigger_data *test; 890 struct hist_field *hist_field; 891 892 lockdep_assert_held(&event_mutex); 893 894 hist_field = find_var_field(hist_data, var_name); 895 if (hist_field) 896 return hist_field; 897 898 list_for_each_entry(test, &file->triggers, list) { 899 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 900 test_data = test->private_data; 901 hist_field = find_var_field(test_data, var_name); 902 if (hist_field) 903 return hist_field; 904 } 905 } 906 907 return NULL; 908 } 909 910 static struct trace_event_file *find_var_file(struct trace_array *tr, 911 char *system, 912 char *event_name, 913 char *var_name) 914 { 915 struct hist_trigger_data *var_hist_data; 916 struct hist_var_data *var_data; 917 struct trace_event_file *file, *found = NULL; 918 919 if (system) 920 return find_event_file(tr, system, event_name); 921 922 list_for_each_entry(var_data, &tr->hist_vars, list) { 923 var_hist_data = var_data->hist_data; 924 file = var_hist_data->event_file; 925 if (file == found) 926 continue; 927 928 if (find_var_field(var_hist_data, var_name)) { 929 if (found) { 930 hist_err(tr, HIST_ERR_VAR_NOT_UNIQUE, errpos(var_name)); 931 return NULL; 932 } 933 934 found = file; 935 } 936 } 937 938 return found; 939 } 940 941 static struct hist_field *find_file_var(struct trace_event_file *file, 942 const char *var_name) 943 { 944 struct hist_trigger_data *test_data; 945 struct event_trigger_data *test; 946 struct hist_field *hist_field; 947 948 lockdep_assert_held(&event_mutex); 949 950 list_for_each_entry(test, &file->triggers, list) { 951 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 952 test_data = test->private_data; 953 hist_field = find_var_field(test_data, var_name); 954 if (hist_field) 955 return hist_field; 956 } 957 } 958 959 return NULL; 960 } 961 962 static struct hist_field * 963 find_match_var(struct hist_trigger_data *hist_data, char *var_name) 964 { 965 struct trace_array *tr = hist_data->event_file->tr; 966 struct hist_field *hist_field, *found = NULL; 967 struct trace_event_file *file; 968 unsigned int i; 969 970 for (i = 0; i < hist_data->n_actions; i++) { 971 struct action_data *data = hist_data->actions[i]; 972 973 if (data->handler == HANDLER_ONMATCH) { 974 char *system = data->match_data.event_system; 975 char *event_name = data->match_data.event; 976 977 file = find_var_file(tr, system, event_name, var_name); 978 if (!file) 979 continue; 980 hist_field = find_file_var(file, var_name); 981 if (hist_field) { 982 if (found) { 983 hist_err(tr, HIST_ERR_VAR_NOT_UNIQUE, 984 errpos(var_name)); 985 return ERR_PTR(-EINVAL); 986 } 987 988 found = hist_field; 989 } 990 } 991 } 992 return found; 993 } 994 995 static struct hist_field *find_event_var(struct hist_trigger_data *hist_data, 996 char *system, 997 char *event_name, 998 char *var_name) 999 { 1000 struct trace_array *tr = hist_data->event_file->tr; 1001 struct hist_field *hist_field = NULL; 1002 struct trace_event_file *file; 1003 1004 if (!system || !event_name) { 1005 hist_field = find_match_var(hist_data, var_name); 1006 if (IS_ERR(hist_field)) 1007 return NULL; 1008 if (hist_field) 1009 return hist_field; 1010 } 1011 1012 file = find_var_file(tr, system, event_name, var_name); 1013 if (!file) 1014 return NULL; 1015 1016 hist_field = find_file_var(file, var_name); 1017 1018 return hist_field; 1019 } 1020 1021 static u64 hist_field_var_ref(struct hist_field *hist_field, 1022 struct tracing_map_elt *elt, 1023 struct ring_buffer_event *rbe, 1024 void *event) 1025 { 1026 struct hist_elt_data *elt_data; 1027 u64 var_val = 0; 1028 1029 if (WARN_ON_ONCE(!elt)) 1030 return var_val; 1031 1032 elt_data = elt->private_data; 1033 var_val = elt_data->var_ref_vals[hist_field->var_ref_idx]; 1034 1035 return var_val; 1036 } 1037 1038 static bool resolve_var_refs(struct hist_trigger_data *hist_data, void *key, 1039 u64 *var_ref_vals, bool self) 1040 { 1041 struct hist_trigger_data *var_data; 1042 struct tracing_map_elt *var_elt; 1043 struct hist_field *hist_field; 1044 unsigned int i, var_idx; 1045 bool resolved = true; 1046 u64 var_val = 0; 1047 1048 for (i = 0; i < hist_data->n_var_refs; i++) { 1049 hist_field = hist_data->var_refs[i]; 1050 var_idx = hist_field->var.idx; 1051 var_data = hist_field->var.hist_data; 1052 1053 if (var_data == NULL) { 1054 resolved = false; 1055 break; 1056 } 1057 1058 if ((self && var_data != hist_data) || 1059 (!self && var_data == hist_data)) 1060 continue; 1061 1062 var_elt = tracing_map_lookup(var_data->map, key); 1063 if (!var_elt) { 1064 resolved = false; 1065 break; 1066 } 1067 1068 if (!tracing_map_var_set(var_elt, var_idx)) { 1069 resolved = false; 1070 break; 1071 } 1072 1073 if (self || !hist_field->read_once) 1074 var_val = tracing_map_read_var(var_elt, var_idx); 1075 else 1076 var_val = tracing_map_read_var_once(var_elt, var_idx); 1077 1078 var_ref_vals[i] = var_val; 1079 } 1080 1081 return resolved; 1082 } 1083 1084 static const char *hist_field_name(struct hist_field *field, 1085 unsigned int level) 1086 { 1087 const char *field_name = ""; 1088 1089 if (level > 1) 1090 return field_name; 1091 1092 if (field->field) 1093 field_name = field->field->name; 1094 else if (field->flags & HIST_FIELD_FL_LOG2 || 1095 field->flags & HIST_FIELD_FL_ALIAS) 1096 field_name = hist_field_name(field->operands[0], ++level); 1097 else if (field->flags & HIST_FIELD_FL_CPU) 1098 field_name = "cpu"; 1099 else if (field->flags & HIST_FIELD_FL_EXPR || 1100 field->flags & HIST_FIELD_FL_VAR_REF) { 1101 if (field->system) { 1102 static char full_name[MAX_FILTER_STR_VAL]; 1103 1104 strcat(full_name, field->system); 1105 strcat(full_name, "."); 1106 strcat(full_name, field->event_name); 1107 strcat(full_name, "."); 1108 strcat(full_name, field->name); 1109 field_name = full_name; 1110 } else 1111 field_name = field->name; 1112 } else if (field->flags & HIST_FIELD_FL_TIMESTAMP) 1113 field_name = "common_timestamp"; 1114 1115 if (field_name == NULL) 1116 field_name = ""; 1117 1118 return field_name; 1119 } 1120 1121 static hist_field_fn_t select_value_fn(int field_size, int field_is_signed) 1122 { 1123 hist_field_fn_t fn = NULL; 1124 1125 switch (field_size) { 1126 case 8: 1127 if (field_is_signed) 1128 fn = hist_field_s64; 1129 else 1130 fn = hist_field_u64; 1131 break; 1132 case 4: 1133 if (field_is_signed) 1134 fn = hist_field_s32; 1135 else 1136 fn = hist_field_u32; 1137 break; 1138 case 2: 1139 if (field_is_signed) 1140 fn = hist_field_s16; 1141 else 1142 fn = hist_field_u16; 1143 break; 1144 case 1: 1145 if (field_is_signed) 1146 fn = hist_field_s8; 1147 else 1148 fn = hist_field_u8; 1149 break; 1150 } 1151 1152 return fn; 1153 } 1154 1155 static int parse_map_size(char *str) 1156 { 1157 unsigned long size, map_bits; 1158 int ret; 1159 1160 ret = kstrtoul(str, 0, &size); 1161 if (ret) 1162 goto out; 1163 1164 map_bits = ilog2(roundup_pow_of_two(size)); 1165 if (map_bits < TRACING_MAP_BITS_MIN || 1166 map_bits > TRACING_MAP_BITS_MAX) 1167 ret = -EINVAL; 1168 else 1169 ret = map_bits; 1170 out: 1171 return ret; 1172 } 1173 1174 static void destroy_hist_trigger_attrs(struct hist_trigger_attrs *attrs) 1175 { 1176 unsigned int i; 1177 1178 if (!attrs) 1179 return; 1180 1181 for (i = 0; i < attrs->n_assignments; i++) 1182 kfree(attrs->assignment_str[i]); 1183 1184 for (i = 0; i < attrs->n_actions; i++) 1185 kfree(attrs->action_str[i]); 1186 1187 kfree(attrs->name); 1188 kfree(attrs->sort_key_str); 1189 kfree(attrs->keys_str); 1190 kfree(attrs->vals_str); 1191 kfree(attrs->clock); 1192 kfree(attrs); 1193 } 1194 1195 static int parse_action(char *str, struct hist_trigger_attrs *attrs) 1196 { 1197 int ret = -EINVAL; 1198 1199 if (attrs->n_actions >= HIST_ACTIONS_MAX) 1200 return ret; 1201 1202 if ((str_has_prefix(str, "onmatch(")) || 1203 (str_has_prefix(str, "onmax(")) || 1204 (str_has_prefix(str, "onchange("))) { 1205 attrs->action_str[attrs->n_actions] = kstrdup(str, GFP_KERNEL); 1206 if (!attrs->action_str[attrs->n_actions]) { 1207 ret = -ENOMEM; 1208 return ret; 1209 } 1210 attrs->n_actions++; 1211 ret = 0; 1212 } 1213 return ret; 1214 } 1215 1216 static int parse_assignment(struct trace_array *tr, 1217 char *str, struct hist_trigger_attrs *attrs) 1218 { 1219 int len, ret = 0; 1220 1221 if ((len = str_has_prefix(str, "key=")) || 1222 (len = str_has_prefix(str, "keys="))) { 1223 attrs->keys_str = kstrdup(str + len, GFP_KERNEL); 1224 if (!attrs->keys_str) { 1225 ret = -ENOMEM; 1226 goto out; 1227 } 1228 } else if ((len = str_has_prefix(str, "val=")) || 1229 (len = str_has_prefix(str, "vals=")) || 1230 (len = str_has_prefix(str, "values="))) { 1231 attrs->vals_str = kstrdup(str + len, GFP_KERNEL); 1232 if (!attrs->vals_str) { 1233 ret = -ENOMEM; 1234 goto out; 1235 } 1236 } else if ((len = str_has_prefix(str, "sort="))) { 1237 attrs->sort_key_str = kstrdup(str + len, GFP_KERNEL); 1238 if (!attrs->sort_key_str) { 1239 ret = -ENOMEM; 1240 goto out; 1241 } 1242 } else if (str_has_prefix(str, "name=")) { 1243 attrs->name = kstrdup(str, GFP_KERNEL); 1244 if (!attrs->name) { 1245 ret = -ENOMEM; 1246 goto out; 1247 } 1248 } else if ((len = str_has_prefix(str, "clock="))) { 1249 str += len; 1250 1251 str = strstrip(str); 1252 attrs->clock = kstrdup(str, GFP_KERNEL); 1253 if (!attrs->clock) { 1254 ret = -ENOMEM; 1255 goto out; 1256 } 1257 } else if ((len = str_has_prefix(str, "size="))) { 1258 int map_bits = parse_map_size(str + len); 1259 1260 if (map_bits < 0) { 1261 ret = map_bits; 1262 goto out; 1263 } 1264 attrs->map_bits = map_bits; 1265 } else { 1266 char *assignment; 1267 1268 if (attrs->n_assignments == TRACING_MAP_VARS_MAX) { 1269 hist_err(tr, HIST_ERR_TOO_MANY_VARS, errpos(str)); 1270 ret = -EINVAL; 1271 goto out; 1272 } 1273 1274 assignment = kstrdup(str, GFP_KERNEL); 1275 if (!assignment) { 1276 ret = -ENOMEM; 1277 goto out; 1278 } 1279 1280 attrs->assignment_str[attrs->n_assignments++] = assignment; 1281 } 1282 out: 1283 return ret; 1284 } 1285 1286 static struct hist_trigger_attrs * 1287 parse_hist_trigger_attrs(struct trace_array *tr, char *trigger_str) 1288 { 1289 struct hist_trigger_attrs *attrs; 1290 int ret = 0; 1291 1292 attrs = kzalloc(sizeof(*attrs), GFP_KERNEL); 1293 if (!attrs) 1294 return ERR_PTR(-ENOMEM); 1295 1296 while (trigger_str) { 1297 char *str = strsep(&trigger_str, ":"); 1298 char *rhs; 1299 1300 rhs = strchr(str, '='); 1301 if (rhs) { 1302 if (!strlen(++rhs)) { 1303 ret = -EINVAL; 1304 hist_err(tr, HIST_ERR_EMPTY_ASSIGNMENT, errpos(str)); 1305 goto free; 1306 } 1307 ret = parse_assignment(tr, str, attrs); 1308 if (ret) 1309 goto free; 1310 } else if (strcmp(str, "pause") == 0) 1311 attrs->pause = true; 1312 else if ((strcmp(str, "cont") == 0) || 1313 (strcmp(str, "continue") == 0)) 1314 attrs->cont = true; 1315 else if (strcmp(str, "clear") == 0) 1316 attrs->clear = true; 1317 else { 1318 ret = parse_action(str, attrs); 1319 if (ret) 1320 goto free; 1321 } 1322 } 1323 1324 if (!attrs->keys_str) { 1325 ret = -EINVAL; 1326 goto free; 1327 } 1328 1329 if (!attrs->clock) { 1330 attrs->clock = kstrdup("global", GFP_KERNEL); 1331 if (!attrs->clock) { 1332 ret = -ENOMEM; 1333 goto free; 1334 } 1335 } 1336 1337 return attrs; 1338 free: 1339 destroy_hist_trigger_attrs(attrs); 1340 1341 return ERR_PTR(ret); 1342 } 1343 1344 static inline void save_comm(char *comm, struct task_struct *task) 1345 { 1346 if (!task->pid) { 1347 strcpy(comm, "<idle>"); 1348 return; 1349 } 1350 1351 if (WARN_ON_ONCE(task->pid < 0)) { 1352 strcpy(comm, "<XXX>"); 1353 return; 1354 } 1355 1356 strncpy(comm, task->comm, TASK_COMM_LEN); 1357 } 1358 1359 static void hist_elt_data_free(struct hist_elt_data *elt_data) 1360 { 1361 unsigned int i; 1362 1363 for (i = 0; i < SYNTH_FIELDS_MAX; i++) 1364 kfree(elt_data->field_var_str[i]); 1365 1366 kfree(elt_data->comm); 1367 kfree(elt_data); 1368 } 1369 1370 static void hist_trigger_elt_data_free(struct tracing_map_elt *elt) 1371 { 1372 struct hist_elt_data *elt_data = elt->private_data; 1373 1374 hist_elt_data_free(elt_data); 1375 } 1376 1377 static int hist_trigger_elt_data_alloc(struct tracing_map_elt *elt) 1378 { 1379 struct hist_trigger_data *hist_data = elt->map->private_data; 1380 unsigned int size = TASK_COMM_LEN; 1381 struct hist_elt_data *elt_data; 1382 struct hist_field *key_field; 1383 unsigned int i, n_str; 1384 1385 elt_data = kzalloc(sizeof(*elt_data), GFP_KERNEL); 1386 if (!elt_data) 1387 return -ENOMEM; 1388 1389 for_each_hist_key_field(i, hist_data) { 1390 key_field = hist_data->fields[i]; 1391 1392 if (key_field->flags & HIST_FIELD_FL_EXECNAME) { 1393 elt_data->comm = kzalloc(size, GFP_KERNEL); 1394 if (!elt_data->comm) { 1395 kfree(elt_data); 1396 return -ENOMEM; 1397 } 1398 break; 1399 } 1400 } 1401 1402 n_str = hist_data->n_field_var_str + hist_data->n_save_var_str + 1403 hist_data->n_var_str; 1404 if (n_str > SYNTH_FIELDS_MAX) { 1405 hist_elt_data_free(elt_data); 1406 return -EINVAL; 1407 } 1408 1409 BUILD_BUG_ON(STR_VAR_LEN_MAX & (sizeof(u64) - 1)); 1410 1411 size = STR_VAR_LEN_MAX; 1412 1413 for (i = 0; i < n_str; i++) { 1414 elt_data->field_var_str[i] = kzalloc(size, GFP_KERNEL); 1415 if (!elt_data->field_var_str[i]) { 1416 hist_elt_data_free(elt_data); 1417 return -ENOMEM; 1418 } 1419 } 1420 1421 elt->private_data = elt_data; 1422 1423 return 0; 1424 } 1425 1426 static void hist_trigger_elt_data_init(struct tracing_map_elt *elt) 1427 { 1428 struct hist_elt_data *elt_data = elt->private_data; 1429 1430 if (elt_data->comm) 1431 save_comm(elt_data->comm, current); 1432 } 1433 1434 static const struct tracing_map_ops hist_trigger_elt_data_ops = { 1435 .elt_alloc = hist_trigger_elt_data_alloc, 1436 .elt_free = hist_trigger_elt_data_free, 1437 .elt_init = hist_trigger_elt_data_init, 1438 }; 1439 1440 static const char *get_hist_field_flags(struct hist_field *hist_field) 1441 { 1442 const char *flags_str = NULL; 1443 1444 if (hist_field->flags & HIST_FIELD_FL_HEX) 1445 flags_str = "hex"; 1446 else if (hist_field->flags & HIST_FIELD_FL_SYM) 1447 flags_str = "sym"; 1448 else if (hist_field->flags & HIST_FIELD_FL_SYM_OFFSET) 1449 flags_str = "sym-offset"; 1450 else if (hist_field->flags & HIST_FIELD_FL_EXECNAME) 1451 flags_str = "execname"; 1452 else if (hist_field->flags & HIST_FIELD_FL_SYSCALL) 1453 flags_str = "syscall"; 1454 else if (hist_field->flags & HIST_FIELD_FL_LOG2) 1455 flags_str = "log2"; 1456 else if (hist_field->flags & HIST_FIELD_FL_TIMESTAMP_USECS) 1457 flags_str = "usecs"; 1458 1459 return flags_str; 1460 } 1461 1462 static void expr_field_str(struct hist_field *field, char *expr) 1463 { 1464 if (field->flags & HIST_FIELD_FL_VAR_REF) 1465 strcat(expr, "$"); 1466 1467 strcat(expr, hist_field_name(field, 0)); 1468 1469 if (field->flags && !(field->flags & HIST_FIELD_FL_VAR_REF)) { 1470 const char *flags_str = get_hist_field_flags(field); 1471 1472 if (flags_str) { 1473 strcat(expr, "."); 1474 strcat(expr, flags_str); 1475 } 1476 } 1477 } 1478 1479 static char *expr_str(struct hist_field *field, unsigned int level) 1480 { 1481 char *expr; 1482 1483 if (level > 1) 1484 return NULL; 1485 1486 expr = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL); 1487 if (!expr) 1488 return NULL; 1489 1490 if (!field->operands[0]) { 1491 expr_field_str(field, expr); 1492 return expr; 1493 } 1494 1495 if (field->operator == FIELD_OP_UNARY_MINUS) { 1496 char *subexpr; 1497 1498 strcat(expr, "-("); 1499 subexpr = expr_str(field->operands[0], ++level); 1500 if (!subexpr) { 1501 kfree(expr); 1502 return NULL; 1503 } 1504 strcat(expr, subexpr); 1505 strcat(expr, ")"); 1506 1507 kfree(subexpr); 1508 1509 return expr; 1510 } 1511 1512 expr_field_str(field->operands[0], expr); 1513 1514 switch (field->operator) { 1515 case FIELD_OP_MINUS: 1516 strcat(expr, "-"); 1517 break; 1518 case FIELD_OP_PLUS: 1519 strcat(expr, "+"); 1520 break; 1521 default: 1522 kfree(expr); 1523 return NULL; 1524 } 1525 1526 expr_field_str(field->operands[1], expr); 1527 1528 return expr; 1529 } 1530 1531 static int contains_operator(char *str) 1532 { 1533 enum field_op_id field_op = FIELD_OP_NONE; 1534 char *op; 1535 1536 op = strpbrk(str, "+-"); 1537 if (!op) 1538 return FIELD_OP_NONE; 1539 1540 switch (*op) { 1541 case '-': 1542 if (*str == '-') 1543 field_op = FIELD_OP_UNARY_MINUS; 1544 else 1545 field_op = FIELD_OP_MINUS; 1546 break; 1547 case '+': 1548 field_op = FIELD_OP_PLUS; 1549 break; 1550 default: 1551 break; 1552 } 1553 1554 return field_op; 1555 } 1556 1557 static void get_hist_field(struct hist_field *hist_field) 1558 { 1559 hist_field->ref++; 1560 } 1561 1562 static void __destroy_hist_field(struct hist_field *hist_field) 1563 { 1564 if (--hist_field->ref > 1) 1565 return; 1566 1567 kfree(hist_field->var.name); 1568 kfree(hist_field->name); 1569 kfree(hist_field->type); 1570 1571 kfree(hist_field->system); 1572 kfree(hist_field->event_name); 1573 1574 kfree(hist_field); 1575 } 1576 1577 static void destroy_hist_field(struct hist_field *hist_field, 1578 unsigned int level) 1579 { 1580 unsigned int i; 1581 1582 if (level > 3) 1583 return; 1584 1585 if (!hist_field) 1586 return; 1587 1588 if (hist_field->flags & HIST_FIELD_FL_VAR_REF) 1589 return; /* var refs will be destroyed separately */ 1590 1591 for (i = 0; i < HIST_FIELD_OPERANDS_MAX; i++) 1592 destroy_hist_field(hist_field->operands[i], level + 1); 1593 1594 __destroy_hist_field(hist_field); 1595 } 1596 1597 static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data, 1598 struct ftrace_event_field *field, 1599 unsigned long flags, 1600 char *var_name) 1601 { 1602 struct hist_field *hist_field; 1603 1604 if (field && is_function_field(field)) 1605 return NULL; 1606 1607 hist_field = kzalloc(sizeof(struct hist_field), GFP_KERNEL); 1608 if (!hist_field) 1609 return NULL; 1610 1611 hist_field->ref = 1; 1612 1613 hist_field->hist_data = hist_data; 1614 1615 if (flags & HIST_FIELD_FL_EXPR || flags & HIST_FIELD_FL_ALIAS) 1616 goto out; /* caller will populate */ 1617 1618 if (flags & HIST_FIELD_FL_VAR_REF) { 1619 hist_field->fn = hist_field_var_ref; 1620 goto out; 1621 } 1622 1623 if (flags & HIST_FIELD_FL_HITCOUNT) { 1624 hist_field->fn = hist_field_counter; 1625 hist_field->size = sizeof(u64); 1626 hist_field->type = kstrdup("u64", GFP_KERNEL); 1627 if (!hist_field->type) 1628 goto free; 1629 goto out; 1630 } 1631 1632 if (flags & HIST_FIELD_FL_STACKTRACE) { 1633 hist_field->fn = hist_field_none; 1634 goto out; 1635 } 1636 1637 if (flags & HIST_FIELD_FL_LOG2) { 1638 unsigned long fl = flags & ~HIST_FIELD_FL_LOG2; 1639 hist_field->fn = hist_field_log2; 1640 hist_field->operands[0] = create_hist_field(hist_data, field, fl, NULL); 1641 hist_field->size = hist_field->operands[0]->size; 1642 hist_field->type = kstrdup(hist_field->operands[0]->type, GFP_KERNEL); 1643 if (!hist_field->type) 1644 goto free; 1645 goto out; 1646 } 1647 1648 if (flags & HIST_FIELD_FL_TIMESTAMP) { 1649 hist_field->fn = hist_field_timestamp; 1650 hist_field->size = sizeof(u64); 1651 hist_field->type = kstrdup("u64", GFP_KERNEL); 1652 if (!hist_field->type) 1653 goto free; 1654 goto out; 1655 } 1656 1657 if (flags & HIST_FIELD_FL_CPU) { 1658 hist_field->fn = hist_field_cpu; 1659 hist_field->size = sizeof(int); 1660 hist_field->type = kstrdup("unsigned int", GFP_KERNEL); 1661 if (!hist_field->type) 1662 goto free; 1663 goto out; 1664 } 1665 1666 if (WARN_ON_ONCE(!field)) 1667 goto out; 1668 1669 if (is_string_field(field)) { 1670 flags |= HIST_FIELD_FL_STRING; 1671 1672 hist_field->size = MAX_FILTER_STR_VAL; 1673 hist_field->type = kstrdup(field->type, GFP_KERNEL); 1674 if (!hist_field->type) 1675 goto free; 1676 1677 if (field->filter_type == FILTER_STATIC_STRING) 1678 hist_field->fn = hist_field_string; 1679 else if (field->filter_type == FILTER_DYN_STRING) 1680 hist_field->fn = hist_field_dynstring; 1681 else 1682 hist_field->fn = hist_field_pstring; 1683 } else { 1684 hist_field->size = field->size; 1685 hist_field->is_signed = field->is_signed; 1686 hist_field->type = kstrdup(field->type, GFP_KERNEL); 1687 if (!hist_field->type) 1688 goto free; 1689 1690 hist_field->fn = select_value_fn(field->size, 1691 field->is_signed); 1692 if (!hist_field->fn) { 1693 destroy_hist_field(hist_field, 0); 1694 return NULL; 1695 } 1696 } 1697 out: 1698 hist_field->field = field; 1699 hist_field->flags = flags; 1700 1701 if (var_name) { 1702 hist_field->var.name = kstrdup(var_name, GFP_KERNEL); 1703 if (!hist_field->var.name) 1704 goto free; 1705 } 1706 1707 return hist_field; 1708 free: 1709 destroy_hist_field(hist_field, 0); 1710 return NULL; 1711 } 1712 1713 static void destroy_hist_fields(struct hist_trigger_data *hist_data) 1714 { 1715 unsigned int i; 1716 1717 for (i = 0; i < HIST_FIELDS_MAX; i++) { 1718 if (hist_data->fields[i]) { 1719 destroy_hist_field(hist_data->fields[i], 0); 1720 hist_data->fields[i] = NULL; 1721 } 1722 } 1723 1724 for (i = 0; i < hist_data->n_var_refs; i++) { 1725 WARN_ON(!(hist_data->var_refs[i]->flags & HIST_FIELD_FL_VAR_REF)); 1726 __destroy_hist_field(hist_data->var_refs[i]); 1727 hist_data->var_refs[i] = NULL; 1728 } 1729 } 1730 1731 static int init_var_ref(struct hist_field *ref_field, 1732 struct hist_field *var_field, 1733 char *system, char *event_name) 1734 { 1735 int err = 0; 1736 1737 ref_field->var.idx = var_field->var.idx; 1738 ref_field->var.hist_data = var_field->hist_data; 1739 ref_field->size = var_field->size; 1740 ref_field->is_signed = var_field->is_signed; 1741 ref_field->flags |= var_field->flags & 1742 (HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS); 1743 1744 if (system) { 1745 ref_field->system = kstrdup(system, GFP_KERNEL); 1746 if (!ref_field->system) 1747 return -ENOMEM; 1748 } 1749 1750 if (event_name) { 1751 ref_field->event_name = kstrdup(event_name, GFP_KERNEL); 1752 if (!ref_field->event_name) { 1753 err = -ENOMEM; 1754 goto free; 1755 } 1756 } 1757 1758 if (var_field->var.name) { 1759 ref_field->name = kstrdup(var_field->var.name, GFP_KERNEL); 1760 if (!ref_field->name) { 1761 err = -ENOMEM; 1762 goto free; 1763 } 1764 } else if (var_field->name) { 1765 ref_field->name = kstrdup(var_field->name, GFP_KERNEL); 1766 if (!ref_field->name) { 1767 err = -ENOMEM; 1768 goto free; 1769 } 1770 } 1771 1772 ref_field->type = kstrdup(var_field->type, GFP_KERNEL); 1773 if (!ref_field->type) { 1774 err = -ENOMEM; 1775 goto free; 1776 } 1777 out: 1778 return err; 1779 free: 1780 kfree(ref_field->system); 1781 kfree(ref_field->event_name); 1782 kfree(ref_field->name); 1783 1784 goto out; 1785 } 1786 1787 static int find_var_ref_idx(struct hist_trigger_data *hist_data, 1788 struct hist_field *var_field) 1789 { 1790 struct hist_field *ref_field; 1791 int i; 1792 1793 for (i = 0; i < hist_data->n_var_refs; i++) { 1794 ref_field = hist_data->var_refs[i]; 1795 if (ref_field->var.idx == var_field->var.idx && 1796 ref_field->var.hist_data == var_field->hist_data) 1797 return i; 1798 } 1799 1800 return -ENOENT; 1801 } 1802 1803 /** 1804 * create_var_ref - Create a variable reference and attach it to trigger 1805 * @hist_data: The trigger that will be referencing the variable 1806 * @var_field: The VAR field to create a reference to 1807 * @system: The optional system string 1808 * @event_name: The optional event_name string 1809 * 1810 * Given a variable hist_field, create a VAR_REF hist_field that 1811 * represents a reference to it. 1812 * 1813 * This function also adds the reference to the trigger that 1814 * now references the variable. 1815 * 1816 * Return: The VAR_REF field if successful, NULL if not 1817 */ 1818 static struct hist_field *create_var_ref(struct hist_trigger_data *hist_data, 1819 struct hist_field *var_field, 1820 char *system, char *event_name) 1821 { 1822 unsigned long flags = HIST_FIELD_FL_VAR_REF; 1823 struct hist_field *ref_field; 1824 int i; 1825 1826 /* Check if the variable already exists */ 1827 for (i = 0; i < hist_data->n_var_refs; i++) { 1828 ref_field = hist_data->var_refs[i]; 1829 if (ref_field->var.idx == var_field->var.idx && 1830 ref_field->var.hist_data == var_field->hist_data) { 1831 get_hist_field(ref_field); 1832 return ref_field; 1833 } 1834 } 1835 1836 ref_field = create_hist_field(var_field->hist_data, NULL, flags, NULL); 1837 if (ref_field) { 1838 if (init_var_ref(ref_field, var_field, system, event_name)) { 1839 destroy_hist_field(ref_field, 0); 1840 return NULL; 1841 } 1842 1843 hist_data->var_refs[hist_data->n_var_refs] = ref_field; 1844 ref_field->var_ref_idx = hist_data->n_var_refs++; 1845 } 1846 1847 return ref_field; 1848 } 1849 1850 static bool is_var_ref(char *var_name) 1851 { 1852 if (!var_name || strlen(var_name) < 2 || var_name[0] != '$') 1853 return false; 1854 1855 return true; 1856 } 1857 1858 static char *field_name_from_var(struct hist_trigger_data *hist_data, 1859 char *var_name) 1860 { 1861 char *name, *field; 1862 unsigned int i; 1863 1864 for (i = 0; i < hist_data->attrs->var_defs.n_vars; i++) { 1865 name = hist_data->attrs->var_defs.name[i]; 1866 1867 if (strcmp(var_name, name) == 0) { 1868 field = hist_data->attrs->var_defs.expr[i]; 1869 if (contains_operator(field) || is_var_ref(field)) 1870 continue; 1871 return field; 1872 } 1873 } 1874 1875 return NULL; 1876 } 1877 1878 static char *local_field_var_ref(struct hist_trigger_data *hist_data, 1879 char *system, char *event_name, 1880 char *var_name) 1881 { 1882 struct trace_event_call *call; 1883 1884 if (system && event_name) { 1885 call = hist_data->event_file->event_call; 1886 1887 if (strcmp(system, call->class->system) != 0) 1888 return NULL; 1889 1890 if (strcmp(event_name, trace_event_name(call)) != 0) 1891 return NULL; 1892 } 1893 1894 if (!!system != !!event_name) 1895 return NULL; 1896 1897 if (!is_var_ref(var_name)) 1898 return NULL; 1899 1900 var_name++; 1901 1902 return field_name_from_var(hist_data, var_name); 1903 } 1904 1905 static struct hist_field *parse_var_ref(struct hist_trigger_data *hist_data, 1906 char *system, char *event_name, 1907 char *var_name) 1908 { 1909 struct hist_field *var_field = NULL, *ref_field = NULL; 1910 struct trace_array *tr = hist_data->event_file->tr; 1911 1912 if (!is_var_ref(var_name)) 1913 return NULL; 1914 1915 var_name++; 1916 1917 var_field = find_event_var(hist_data, system, event_name, var_name); 1918 if (var_field) 1919 ref_field = create_var_ref(hist_data, var_field, 1920 system, event_name); 1921 1922 if (!ref_field) 1923 hist_err(tr, HIST_ERR_VAR_NOT_FOUND, errpos(var_name)); 1924 1925 return ref_field; 1926 } 1927 1928 static struct ftrace_event_field * 1929 parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file, 1930 char *field_str, unsigned long *flags) 1931 { 1932 struct ftrace_event_field *field = NULL; 1933 char *field_name, *modifier, *str; 1934 struct trace_array *tr = file->tr; 1935 1936 modifier = str = kstrdup(field_str, GFP_KERNEL); 1937 if (!modifier) 1938 return ERR_PTR(-ENOMEM); 1939 1940 field_name = strsep(&modifier, "."); 1941 if (modifier) { 1942 if (strcmp(modifier, "hex") == 0) 1943 *flags |= HIST_FIELD_FL_HEX; 1944 else if (strcmp(modifier, "sym") == 0) 1945 *flags |= HIST_FIELD_FL_SYM; 1946 else if (strcmp(modifier, "sym-offset") == 0) 1947 *flags |= HIST_FIELD_FL_SYM_OFFSET; 1948 else if ((strcmp(modifier, "execname") == 0) && 1949 (strcmp(field_name, "common_pid") == 0)) 1950 *flags |= HIST_FIELD_FL_EXECNAME; 1951 else if (strcmp(modifier, "syscall") == 0) 1952 *flags |= HIST_FIELD_FL_SYSCALL; 1953 else if (strcmp(modifier, "log2") == 0) 1954 *flags |= HIST_FIELD_FL_LOG2; 1955 else if (strcmp(modifier, "usecs") == 0) 1956 *flags |= HIST_FIELD_FL_TIMESTAMP_USECS; 1957 else { 1958 hist_err(tr, HIST_ERR_BAD_FIELD_MODIFIER, errpos(modifier)); 1959 field = ERR_PTR(-EINVAL); 1960 goto out; 1961 } 1962 } 1963 1964 if (strcmp(field_name, "common_timestamp") == 0) { 1965 *flags |= HIST_FIELD_FL_TIMESTAMP; 1966 hist_data->enable_timestamps = true; 1967 if (*flags & HIST_FIELD_FL_TIMESTAMP_USECS) 1968 hist_data->attrs->ts_in_usecs = true; 1969 } else if (strcmp(field_name, "cpu") == 0) 1970 *flags |= HIST_FIELD_FL_CPU; 1971 else { 1972 field = trace_find_event_field(file->event_call, field_name); 1973 if (!field || !field->size) { 1974 hist_err(tr, HIST_ERR_FIELD_NOT_FOUND, errpos(field_name)); 1975 field = ERR_PTR(-EINVAL); 1976 goto out; 1977 } 1978 } 1979 out: 1980 kfree(str); 1981 1982 return field; 1983 } 1984 1985 static struct hist_field *create_alias(struct hist_trigger_data *hist_data, 1986 struct hist_field *var_ref, 1987 char *var_name) 1988 { 1989 struct hist_field *alias = NULL; 1990 unsigned long flags = HIST_FIELD_FL_ALIAS | HIST_FIELD_FL_VAR; 1991 1992 alias = create_hist_field(hist_data, NULL, flags, var_name); 1993 if (!alias) 1994 return NULL; 1995 1996 alias->fn = var_ref->fn; 1997 alias->operands[0] = var_ref; 1998 1999 if (init_var_ref(alias, var_ref, var_ref->system, var_ref->event_name)) { 2000 destroy_hist_field(alias, 0); 2001 return NULL; 2002 } 2003 2004 alias->var_ref_idx = var_ref->var_ref_idx; 2005 2006 return alias; 2007 } 2008 2009 static struct hist_field *parse_atom(struct hist_trigger_data *hist_data, 2010 struct trace_event_file *file, char *str, 2011 unsigned long *flags, char *var_name) 2012 { 2013 char *s, *ref_system = NULL, *ref_event = NULL, *ref_var = str; 2014 struct ftrace_event_field *field = NULL; 2015 struct hist_field *hist_field = NULL; 2016 int ret = 0; 2017 2018 s = strchr(str, '.'); 2019 if (s) { 2020 s = strchr(++s, '.'); 2021 if (s) { 2022 ref_system = strsep(&str, "."); 2023 if (!str) { 2024 ret = -EINVAL; 2025 goto out; 2026 } 2027 ref_event = strsep(&str, "."); 2028 if (!str) { 2029 ret = -EINVAL; 2030 goto out; 2031 } 2032 ref_var = str; 2033 } 2034 } 2035 2036 s = local_field_var_ref(hist_data, ref_system, ref_event, ref_var); 2037 if (!s) { 2038 hist_field = parse_var_ref(hist_data, ref_system, 2039 ref_event, ref_var); 2040 if (hist_field) { 2041 if (var_name) { 2042 hist_field = create_alias(hist_data, hist_field, var_name); 2043 if (!hist_field) { 2044 ret = -ENOMEM; 2045 goto out; 2046 } 2047 } 2048 return hist_field; 2049 } 2050 } else 2051 str = s; 2052 2053 field = parse_field(hist_data, file, str, flags); 2054 if (IS_ERR(field)) { 2055 ret = PTR_ERR(field); 2056 goto out; 2057 } 2058 2059 hist_field = create_hist_field(hist_data, field, *flags, var_name); 2060 if (!hist_field) { 2061 ret = -ENOMEM; 2062 goto out; 2063 } 2064 2065 return hist_field; 2066 out: 2067 return ERR_PTR(ret); 2068 } 2069 2070 static struct hist_field *parse_expr(struct hist_trigger_data *hist_data, 2071 struct trace_event_file *file, 2072 char *str, unsigned long flags, 2073 char *var_name, unsigned int level); 2074 2075 static struct hist_field *parse_unary(struct hist_trigger_data *hist_data, 2076 struct trace_event_file *file, 2077 char *str, unsigned long flags, 2078 char *var_name, unsigned int level) 2079 { 2080 struct hist_field *operand1, *expr = NULL; 2081 unsigned long operand_flags; 2082 int ret = 0; 2083 char *s; 2084 2085 /* we support only -(xxx) i.e. explicit parens required */ 2086 2087 if (level > 3) { 2088 hist_err(file->tr, HIST_ERR_TOO_MANY_SUBEXPR, errpos(str)); 2089 ret = -EINVAL; 2090 goto free; 2091 } 2092 2093 str++; /* skip leading '-' */ 2094 2095 s = strchr(str, '('); 2096 if (s) 2097 str++; 2098 else { 2099 ret = -EINVAL; 2100 goto free; 2101 } 2102 2103 s = strrchr(str, ')'); 2104 if (s) 2105 *s = '\0'; 2106 else { 2107 ret = -EINVAL; /* no closing ')' */ 2108 goto free; 2109 } 2110 2111 flags |= HIST_FIELD_FL_EXPR; 2112 expr = create_hist_field(hist_data, NULL, flags, var_name); 2113 if (!expr) { 2114 ret = -ENOMEM; 2115 goto free; 2116 } 2117 2118 operand_flags = 0; 2119 operand1 = parse_expr(hist_data, file, str, operand_flags, NULL, ++level); 2120 if (IS_ERR(operand1)) { 2121 ret = PTR_ERR(operand1); 2122 goto free; 2123 } 2124 2125 expr->flags |= operand1->flags & 2126 (HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS); 2127 expr->fn = hist_field_unary_minus; 2128 expr->operands[0] = operand1; 2129 expr->operator = FIELD_OP_UNARY_MINUS; 2130 expr->name = expr_str(expr, 0); 2131 expr->type = kstrdup(operand1->type, GFP_KERNEL); 2132 if (!expr->type) { 2133 ret = -ENOMEM; 2134 goto free; 2135 } 2136 2137 return expr; 2138 free: 2139 destroy_hist_field(expr, 0); 2140 return ERR_PTR(ret); 2141 } 2142 2143 static int check_expr_operands(struct trace_array *tr, 2144 struct hist_field *operand1, 2145 struct hist_field *operand2) 2146 { 2147 unsigned long operand1_flags = operand1->flags; 2148 unsigned long operand2_flags = operand2->flags; 2149 2150 if ((operand1_flags & HIST_FIELD_FL_VAR_REF) || 2151 (operand1_flags & HIST_FIELD_FL_ALIAS)) { 2152 struct hist_field *var; 2153 2154 var = find_var_field(operand1->var.hist_data, operand1->name); 2155 if (!var) 2156 return -EINVAL; 2157 operand1_flags = var->flags; 2158 } 2159 2160 if ((operand2_flags & HIST_FIELD_FL_VAR_REF) || 2161 (operand2_flags & HIST_FIELD_FL_ALIAS)) { 2162 struct hist_field *var; 2163 2164 var = find_var_field(operand2->var.hist_data, operand2->name); 2165 if (!var) 2166 return -EINVAL; 2167 operand2_flags = var->flags; 2168 } 2169 2170 if ((operand1_flags & HIST_FIELD_FL_TIMESTAMP_USECS) != 2171 (operand2_flags & HIST_FIELD_FL_TIMESTAMP_USECS)) { 2172 hist_err(tr, HIST_ERR_TIMESTAMP_MISMATCH, 0); 2173 return -EINVAL; 2174 } 2175 2176 return 0; 2177 } 2178 2179 static struct hist_field *parse_expr(struct hist_trigger_data *hist_data, 2180 struct trace_event_file *file, 2181 char *str, unsigned long flags, 2182 char *var_name, unsigned int level) 2183 { 2184 struct hist_field *operand1 = NULL, *operand2 = NULL, *expr = NULL; 2185 unsigned long operand_flags; 2186 int field_op, ret = -EINVAL; 2187 char *sep, *operand1_str; 2188 2189 if (level > 3) { 2190 hist_err(file->tr, HIST_ERR_TOO_MANY_SUBEXPR, errpos(str)); 2191 return ERR_PTR(-EINVAL); 2192 } 2193 2194 field_op = contains_operator(str); 2195 2196 if (field_op == FIELD_OP_NONE) 2197 return parse_atom(hist_data, file, str, &flags, var_name); 2198 2199 if (field_op == FIELD_OP_UNARY_MINUS) 2200 return parse_unary(hist_data, file, str, flags, var_name, ++level); 2201 2202 switch (field_op) { 2203 case FIELD_OP_MINUS: 2204 sep = "-"; 2205 break; 2206 case FIELD_OP_PLUS: 2207 sep = "+"; 2208 break; 2209 default: 2210 goto free; 2211 } 2212 2213 operand1_str = strsep(&str, sep); 2214 if (!operand1_str || !str) 2215 goto free; 2216 2217 operand_flags = 0; 2218 operand1 = parse_atom(hist_data, file, operand1_str, 2219 &operand_flags, NULL); 2220 if (IS_ERR(operand1)) { 2221 ret = PTR_ERR(operand1); 2222 operand1 = NULL; 2223 goto free; 2224 } 2225 2226 /* rest of string could be another expression e.g. b+c in a+b+c */ 2227 operand_flags = 0; 2228 operand2 = parse_expr(hist_data, file, str, operand_flags, NULL, ++level); 2229 if (IS_ERR(operand2)) { 2230 ret = PTR_ERR(operand2); 2231 operand2 = NULL; 2232 goto free; 2233 } 2234 2235 ret = check_expr_operands(file->tr, operand1, operand2); 2236 if (ret) 2237 goto free; 2238 2239 flags |= HIST_FIELD_FL_EXPR; 2240 2241 flags |= operand1->flags & 2242 (HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS); 2243 2244 expr = create_hist_field(hist_data, NULL, flags, var_name); 2245 if (!expr) { 2246 ret = -ENOMEM; 2247 goto free; 2248 } 2249 2250 operand1->read_once = true; 2251 operand2->read_once = true; 2252 2253 expr->operands[0] = operand1; 2254 expr->operands[1] = operand2; 2255 expr->operator = field_op; 2256 expr->name = expr_str(expr, 0); 2257 expr->type = kstrdup(operand1->type, GFP_KERNEL); 2258 if (!expr->type) { 2259 ret = -ENOMEM; 2260 goto free; 2261 } 2262 2263 switch (field_op) { 2264 case FIELD_OP_MINUS: 2265 expr->fn = hist_field_minus; 2266 break; 2267 case FIELD_OP_PLUS: 2268 expr->fn = hist_field_plus; 2269 break; 2270 default: 2271 ret = -EINVAL; 2272 goto free; 2273 } 2274 2275 return expr; 2276 free: 2277 destroy_hist_field(operand1, 0); 2278 destroy_hist_field(operand2, 0); 2279 destroy_hist_field(expr, 0); 2280 2281 return ERR_PTR(ret); 2282 } 2283 2284 static char *find_trigger_filter(struct hist_trigger_data *hist_data, 2285 struct trace_event_file *file) 2286 { 2287 struct event_trigger_data *test; 2288 2289 lockdep_assert_held(&event_mutex); 2290 2291 list_for_each_entry(test, &file->triggers, list) { 2292 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 2293 if (test->private_data == hist_data) 2294 return test->filter_str; 2295 } 2296 } 2297 2298 return NULL; 2299 } 2300 2301 static struct event_command trigger_hist_cmd; 2302 static int event_hist_trigger_func(struct event_command *cmd_ops, 2303 struct trace_event_file *file, 2304 char *glob, char *cmd, char *param); 2305 2306 static bool compatible_keys(struct hist_trigger_data *target_hist_data, 2307 struct hist_trigger_data *hist_data, 2308 unsigned int n_keys) 2309 { 2310 struct hist_field *target_hist_field, *hist_field; 2311 unsigned int n, i, j; 2312 2313 if (hist_data->n_fields - hist_data->n_vals != n_keys) 2314 return false; 2315 2316 i = hist_data->n_vals; 2317 j = target_hist_data->n_vals; 2318 2319 for (n = 0; n < n_keys; n++) { 2320 hist_field = hist_data->fields[i + n]; 2321 target_hist_field = target_hist_data->fields[j + n]; 2322 2323 if (strcmp(hist_field->type, target_hist_field->type) != 0) 2324 return false; 2325 if (hist_field->size != target_hist_field->size) 2326 return false; 2327 if (hist_field->is_signed != target_hist_field->is_signed) 2328 return false; 2329 } 2330 2331 return true; 2332 } 2333 2334 static struct hist_trigger_data * 2335 find_compatible_hist(struct hist_trigger_data *target_hist_data, 2336 struct trace_event_file *file) 2337 { 2338 struct hist_trigger_data *hist_data; 2339 struct event_trigger_data *test; 2340 unsigned int n_keys; 2341 2342 lockdep_assert_held(&event_mutex); 2343 2344 n_keys = target_hist_data->n_fields - target_hist_data->n_vals; 2345 2346 list_for_each_entry(test, &file->triggers, list) { 2347 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 2348 hist_data = test->private_data; 2349 2350 if (compatible_keys(target_hist_data, hist_data, n_keys)) 2351 return hist_data; 2352 } 2353 } 2354 2355 return NULL; 2356 } 2357 2358 static struct trace_event_file *event_file(struct trace_array *tr, 2359 char *system, char *event_name) 2360 { 2361 struct trace_event_file *file; 2362 2363 file = __find_event_file(tr, system, event_name); 2364 if (!file) 2365 return ERR_PTR(-EINVAL); 2366 2367 return file; 2368 } 2369 2370 static struct hist_field * 2371 find_synthetic_field_var(struct hist_trigger_data *target_hist_data, 2372 char *system, char *event_name, char *field_name) 2373 { 2374 struct hist_field *event_var; 2375 char *synthetic_name; 2376 2377 synthetic_name = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL); 2378 if (!synthetic_name) 2379 return ERR_PTR(-ENOMEM); 2380 2381 strcpy(synthetic_name, "synthetic_"); 2382 strcat(synthetic_name, field_name); 2383 2384 event_var = find_event_var(target_hist_data, system, event_name, synthetic_name); 2385 2386 kfree(synthetic_name); 2387 2388 return event_var; 2389 } 2390 2391 /** 2392 * create_field_var_hist - Automatically create a histogram and var for a field 2393 * @target_hist_data: The target hist trigger 2394 * @subsys_name: Optional subsystem name 2395 * @event_name: Optional event name 2396 * @field_name: The name of the field (and the resulting variable) 2397 * 2398 * Hist trigger actions fetch data from variables, not directly from 2399 * events. However, for convenience, users are allowed to directly 2400 * specify an event field in an action, which will be automatically 2401 * converted into a variable on their behalf. 2402 2403 * If a user specifies a field on an event that isn't the event the 2404 * histogram currently being defined (the target event histogram), the 2405 * only way that can be accomplished is if a new hist trigger is 2406 * created and the field variable defined on that. 2407 * 2408 * This function creates a new histogram compatible with the target 2409 * event (meaning a histogram with the same key as the target 2410 * histogram), and creates a variable for the specified field, but 2411 * with 'synthetic_' prepended to the variable name in order to avoid 2412 * collision with normal field variables. 2413 * 2414 * Return: The variable created for the field. 2415 */ 2416 static struct hist_field * 2417 create_field_var_hist(struct hist_trigger_data *target_hist_data, 2418 char *subsys_name, char *event_name, char *field_name) 2419 { 2420 struct trace_array *tr = target_hist_data->event_file->tr; 2421 struct hist_field *event_var = ERR_PTR(-EINVAL); 2422 struct hist_trigger_data *hist_data; 2423 unsigned int i, n, first = true; 2424 struct field_var_hist *var_hist; 2425 struct trace_event_file *file; 2426 struct hist_field *key_field; 2427 char *saved_filter; 2428 char *cmd; 2429 int ret; 2430 2431 if (target_hist_data->n_field_var_hists >= SYNTH_FIELDS_MAX) { 2432 hist_err(tr, HIST_ERR_TOO_MANY_FIELD_VARS, errpos(field_name)); 2433 return ERR_PTR(-EINVAL); 2434 } 2435 2436 file = event_file(tr, subsys_name, event_name); 2437 2438 if (IS_ERR(file)) { 2439 hist_err(tr, HIST_ERR_EVENT_FILE_NOT_FOUND, errpos(field_name)); 2440 ret = PTR_ERR(file); 2441 return ERR_PTR(ret); 2442 } 2443 2444 /* 2445 * Look for a histogram compatible with target. We'll use the 2446 * found histogram specification to create a new matching 2447 * histogram with our variable on it. target_hist_data is not 2448 * yet a registered histogram so we can't use that. 2449 */ 2450 hist_data = find_compatible_hist(target_hist_data, file); 2451 if (!hist_data) { 2452 hist_err(tr, HIST_ERR_HIST_NOT_FOUND, errpos(field_name)); 2453 return ERR_PTR(-EINVAL); 2454 } 2455 2456 /* See if a synthetic field variable has already been created */ 2457 event_var = find_synthetic_field_var(target_hist_data, subsys_name, 2458 event_name, field_name); 2459 if (!IS_ERR_OR_NULL(event_var)) 2460 return event_var; 2461 2462 var_hist = kzalloc(sizeof(*var_hist), GFP_KERNEL); 2463 if (!var_hist) 2464 return ERR_PTR(-ENOMEM); 2465 2466 cmd = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL); 2467 if (!cmd) { 2468 kfree(var_hist); 2469 return ERR_PTR(-ENOMEM); 2470 } 2471 2472 /* Use the same keys as the compatible histogram */ 2473 strcat(cmd, "keys="); 2474 2475 for_each_hist_key_field(i, hist_data) { 2476 key_field = hist_data->fields[i]; 2477 if (!first) 2478 strcat(cmd, ","); 2479 strcat(cmd, key_field->field->name); 2480 first = false; 2481 } 2482 2483 /* Create the synthetic field variable specification */ 2484 strcat(cmd, ":synthetic_"); 2485 strcat(cmd, field_name); 2486 strcat(cmd, "="); 2487 strcat(cmd, field_name); 2488 2489 /* Use the same filter as the compatible histogram */ 2490 saved_filter = find_trigger_filter(hist_data, file); 2491 if (saved_filter) { 2492 strcat(cmd, " if "); 2493 strcat(cmd, saved_filter); 2494 } 2495 2496 var_hist->cmd = kstrdup(cmd, GFP_KERNEL); 2497 if (!var_hist->cmd) { 2498 kfree(cmd); 2499 kfree(var_hist); 2500 return ERR_PTR(-ENOMEM); 2501 } 2502 2503 /* Save the compatible histogram information */ 2504 var_hist->hist_data = hist_data; 2505 2506 /* Create the new histogram with our variable */ 2507 ret = event_hist_trigger_func(&trigger_hist_cmd, file, 2508 "", "hist", cmd); 2509 if (ret) { 2510 kfree(cmd); 2511 kfree(var_hist->cmd); 2512 kfree(var_hist); 2513 hist_err(tr, HIST_ERR_HIST_CREATE_FAIL, errpos(field_name)); 2514 return ERR_PTR(ret); 2515 } 2516 2517 kfree(cmd); 2518 2519 /* If we can't find the variable, something went wrong */ 2520 event_var = find_synthetic_field_var(target_hist_data, subsys_name, 2521 event_name, field_name); 2522 if (IS_ERR_OR_NULL(event_var)) { 2523 kfree(var_hist->cmd); 2524 kfree(var_hist); 2525 hist_err(tr, HIST_ERR_SYNTH_VAR_NOT_FOUND, errpos(field_name)); 2526 return ERR_PTR(-EINVAL); 2527 } 2528 2529 n = target_hist_data->n_field_var_hists; 2530 target_hist_data->field_var_hists[n] = var_hist; 2531 target_hist_data->n_field_var_hists++; 2532 2533 return event_var; 2534 } 2535 2536 static struct hist_field * 2537 find_target_event_var(struct hist_trigger_data *hist_data, 2538 char *subsys_name, char *event_name, char *var_name) 2539 { 2540 struct trace_event_file *file = hist_data->event_file; 2541 struct hist_field *hist_field = NULL; 2542 2543 if (subsys_name) { 2544 struct trace_event_call *call; 2545 2546 if (!event_name) 2547 return NULL; 2548 2549 call = file->event_call; 2550 2551 if (strcmp(subsys_name, call->class->system) != 0) 2552 return NULL; 2553 2554 if (strcmp(event_name, trace_event_name(call)) != 0) 2555 return NULL; 2556 } 2557 2558 hist_field = find_var_field(hist_data, var_name); 2559 2560 return hist_field; 2561 } 2562 2563 static inline void __update_field_vars(struct tracing_map_elt *elt, 2564 struct ring_buffer_event *rbe, 2565 void *rec, 2566 struct field_var **field_vars, 2567 unsigned int n_field_vars, 2568 unsigned int field_var_str_start) 2569 { 2570 struct hist_elt_data *elt_data = elt->private_data; 2571 unsigned int i, j, var_idx; 2572 u64 var_val; 2573 2574 for (i = 0, j = field_var_str_start; i < n_field_vars; i++) { 2575 struct field_var *field_var = field_vars[i]; 2576 struct hist_field *var = field_var->var; 2577 struct hist_field *val = field_var->val; 2578 2579 var_val = val->fn(val, elt, rbe, rec); 2580 var_idx = var->var.idx; 2581 2582 if (val->flags & HIST_FIELD_FL_STRING) { 2583 char *str = elt_data->field_var_str[j++]; 2584 char *val_str = (char *)(uintptr_t)var_val; 2585 2586 strscpy(str, val_str, STR_VAR_LEN_MAX); 2587 var_val = (u64)(uintptr_t)str; 2588 } 2589 tracing_map_set_var(elt, var_idx, var_val); 2590 } 2591 } 2592 2593 static void update_field_vars(struct hist_trigger_data *hist_data, 2594 struct tracing_map_elt *elt, 2595 struct ring_buffer_event *rbe, 2596 void *rec) 2597 { 2598 __update_field_vars(elt, rbe, rec, hist_data->field_vars, 2599 hist_data->n_field_vars, 0); 2600 } 2601 2602 static void save_track_data_vars(struct hist_trigger_data *hist_data, 2603 struct tracing_map_elt *elt, void *rec, 2604 struct ring_buffer_event *rbe, void *key, 2605 struct action_data *data, u64 *var_ref_vals) 2606 { 2607 __update_field_vars(elt, rbe, rec, hist_data->save_vars, 2608 hist_data->n_save_vars, hist_data->n_field_var_str); 2609 } 2610 2611 static struct hist_field *create_var(struct hist_trigger_data *hist_data, 2612 struct trace_event_file *file, 2613 char *name, int size, const char *type) 2614 { 2615 struct hist_field *var; 2616 int idx; 2617 2618 if (find_var(hist_data, file, name) && !hist_data->remove) { 2619 var = ERR_PTR(-EINVAL); 2620 goto out; 2621 } 2622 2623 var = kzalloc(sizeof(struct hist_field), GFP_KERNEL); 2624 if (!var) { 2625 var = ERR_PTR(-ENOMEM); 2626 goto out; 2627 } 2628 2629 idx = tracing_map_add_var(hist_data->map); 2630 if (idx < 0) { 2631 kfree(var); 2632 var = ERR_PTR(-EINVAL); 2633 goto out; 2634 } 2635 2636 var->ref = 1; 2637 var->flags = HIST_FIELD_FL_VAR; 2638 var->var.idx = idx; 2639 var->var.hist_data = var->hist_data = hist_data; 2640 var->size = size; 2641 var->var.name = kstrdup(name, GFP_KERNEL); 2642 var->type = kstrdup(type, GFP_KERNEL); 2643 if (!var->var.name || !var->type) { 2644 kfree(var->var.name); 2645 kfree(var->type); 2646 kfree(var); 2647 var = ERR_PTR(-ENOMEM); 2648 } 2649 out: 2650 return var; 2651 } 2652 2653 static struct field_var *create_field_var(struct hist_trigger_data *hist_data, 2654 struct trace_event_file *file, 2655 char *field_name) 2656 { 2657 struct hist_field *val = NULL, *var = NULL; 2658 unsigned long flags = HIST_FIELD_FL_VAR; 2659 struct trace_array *tr = file->tr; 2660 struct field_var *field_var; 2661 int ret = 0; 2662 2663 if (hist_data->n_field_vars >= SYNTH_FIELDS_MAX) { 2664 hist_err(tr, HIST_ERR_TOO_MANY_FIELD_VARS, errpos(field_name)); 2665 ret = -EINVAL; 2666 goto err; 2667 } 2668 2669 val = parse_atom(hist_data, file, field_name, &flags, NULL); 2670 if (IS_ERR(val)) { 2671 hist_err(tr, HIST_ERR_FIELD_VAR_PARSE_FAIL, errpos(field_name)); 2672 ret = PTR_ERR(val); 2673 goto err; 2674 } 2675 2676 var = create_var(hist_data, file, field_name, val->size, val->type); 2677 if (IS_ERR(var)) { 2678 hist_err(tr, HIST_ERR_VAR_CREATE_FIND_FAIL, errpos(field_name)); 2679 kfree(val); 2680 ret = PTR_ERR(var); 2681 goto err; 2682 } 2683 2684 field_var = kzalloc(sizeof(struct field_var), GFP_KERNEL); 2685 if (!field_var) { 2686 kfree(val); 2687 kfree(var); 2688 ret = -ENOMEM; 2689 goto err; 2690 } 2691 2692 field_var->var = var; 2693 field_var->val = val; 2694 out: 2695 return field_var; 2696 err: 2697 field_var = ERR_PTR(ret); 2698 goto out; 2699 } 2700 2701 /** 2702 * create_target_field_var - Automatically create a variable for a field 2703 * @target_hist_data: The target hist trigger 2704 * @subsys_name: Optional subsystem name 2705 * @event_name: Optional event name 2706 * @var_name: The name of the field (and the resulting variable) 2707 * 2708 * Hist trigger actions fetch data from variables, not directly from 2709 * events. However, for convenience, users are allowed to directly 2710 * specify an event field in an action, which will be automatically 2711 * converted into a variable on their behalf. 2712 2713 * This function creates a field variable with the name var_name on 2714 * the hist trigger currently being defined on the target event. If 2715 * subsys_name and event_name are specified, this function simply 2716 * verifies that they do in fact match the target event subsystem and 2717 * event name. 2718 * 2719 * Return: The variable created for the field. 2720 */ 2721 static struct field_var * 2722 create_target_field_var(struct hist_trigger_data *target_hist_data, 2723 char *subsys_name, char *event_name, char *var_name) 2724 { 2725 struct trace_event_file *file = target_hist_data->event_file; 2726 2727 if (subsys_name) { 2728 struct trace_event_call *call; 2729 2730 if (!event_name) 2731 return NULL; 2732 2733 call = file->event_call; 2734 2735 if (strcmp(subsys_name, call->class->system) != 0) 2736 return NULL; 2737 2738 if (strcmp(event_name, trace_event_name(call)) != 0) 2739 return NULL; 2740 } 2741 2742 return create_field_var(target_hist_data, file, var_name); 2743 } 2744 2745 static bool check_track_val_max(u64 track_val, u64 var_val) 2746 { 2747 if (var_val <= track_val) 2748 return false; 2749 2750 return true; 2751 } 2752 2753 static bool check_track_val_changed(u64 track_val, u64 var_val) 2754 { 2755 if (var_val == track_val) 2756 return false; 2757 2758 return true; 2759 } 2760 2761 static u64 get_track_val(struct hist_trigger_data *hist_data, 2762 struct tracing_map_elt *elt, 2763 struct action_data *data) 2764 { 2765 unsigned int track_var_idx = data->track_data.track_var->var.idx; 2766 u64 track_val; 2767 2768 track_val = tracing_map_read_var(elt, track_var_idx); 2769 2770 return track_val; 2771 } 2772 2773 static void save_track_val(struct hist_trigger_data *hist_data, 2774 struct tracing_map_elt *elt, 2775 struct action_data *data, u64 var_val) 2776 { 2777 unsigned int track_var_idx = data->track_data.track_var->var.idx; 2778 2779 tracing_map_set_var(elt, track_var_idx, var_val); 2780 } 2781 2782 static void save_track_data(struct hist_trigger_data *hist_data, 2783 struct tracing_map_elt *elt, void *rec, 2784 struct ring_buffer_event *rbe, void *key, 2785 struct action_data *data, u64 *var_ref_vals) 2786 { 2787 if (data->track_data.save_data) 2788 data->track_data.save_data(hist_data, elt, rec, rbe, key, data, var_ref_vals); 2789 } 2790 2791 static bool check_track_val(struct tracing_map_elt *elt, 2792 struct action_data *data, 2793 u64 var_val) 2794 { 2795 struct hist_trigger_data *hist_data; 2796 u64 track_val; 2797 2798 hist_data = data->track_data.track_var->hist_data; 2799 track_val = get_track_val(hist_data, elt, data); 2800 2801 return data->track_data.check_val(track_val, var_val); 2802 } 2803 2804 #ifdef CONFIG_TRACER_SNAPSHOT 2805 static bool cond_snapshot_update(struct trace_array *tr, void *cond_data) 2806 { 2807 /* called with tr->max_lock held */ 2808 struct track_data *track_data = tr->cond_snapshot->cond_data; 2809 struct hist_elt_data *elt_data, *track_elt_data; 2810 struct snapshot_context *context = cond_data; 2811 struct action_data *action; 2812 u64 track_val; 2813 2814 if (!track_data) 2815 return false; 2816 2817 action = track_data->action_data; 2818 2819 track_val = get_track_val(track_data->hist_data, context->elt, 2820 track_data->action_data); 2821 2822 if (!action->track_data.check_val(track_data->track_val, track_val)) 2823 return false; 2824 2825 track_data->track_val = track_val; 2826 memcpy(track_data->key, context->key, track_data->key_len); 2827 2828 elt_data = context->elt->private_data; 2829 track_elt_data = track_data->elt.private_data; 2830 if (elt_data->comm) 2831 strncpy(track_elt_data->comm, elt_data->comm, TASK_COMM_LEN); 2832 2833 track_data->updated = true; 2834 2835 return true; 2836 } 2837 2838 static void save_track_data_snapshot(struct hist_trigger_data *hist_data, 2839 struct tracing_map_elt *elt, void *rec, 2840 struct ring_buffer_event *rbe, void *key, 2841 struct action_data *data, 2842 u64 *var_ref_vals) 2843 { 2844 struct trace_event_file *file = hist_data->event_file; 2845 struct snapshot_context context; 2846 2847 context.elt = elt; 2848 context.key = key; 2849 2850 tracing_snapshot_cond(file->tr, &context); 2851 } 2852 2853 static void hist_trigger_print_key(struct seq_file *m, 2854 struct hist_trigger_data *hist_data, 2855 void *key, 2856 struct tracing_map_elt *elt); 2857 2858 static struct action_data *snapshot_action(struct hist_trigger_data *hist_data) 2859 { 2860 unsigned int i; 2861 2862 if (!hist_data->n_actions) 2863 return NULL; 2864 2865 for (i = 0; i < hist_data->n_actions; i++) { 2866 struct action_data *data = hist_data->actions[i]; 2867 2868 if (data->action == ACTION_SNAPSHOT) 2869 return data; 2870 } 2871 2872 return NULL; 2873 } 2874 2875 static void track_data_snapshot_print(struct seq_file *m, 2876 struct hist_trigger_data *hist_data) 2877 { 2878 struct trace_event_file *file = hist_data->event_file; 2879 struct track_data *track_data; 2880 struct action_data *action; 2881 2882 track_data = tracing_cond_snapshot_data(file->tr); 2883 if (!track_data) 2884 return; 2885 2886 if (!track_data->updated) 2887 return; 2888 2889 action = snapshot_action(hist_data); 2890 if (!action) 2891 return; 2892 2893 seq_puts(m, "\nSnapshot taken (see tracing/snapshot). Details:\n"); 2894 seq_printf(m, "\ttriggering value { %s(%s) }: %10llu", 2895 action->handler == HANDLER_ONMAX ? "onmax" : "onchange", 2896 action->track_data.var_str, track_data->track_val); 2897 2898 seq_puts(m, "\ttriggered by event with key: "); 2899 hist_trigger_print_key(m, hist_data, track_data->key, &track_data->elt); 2900 seq_putc(m, '\n'); 2901 } 2902 #else 2903 static bool cond_snapshot_update(struct trace_array *tr, void *cond_data) 2904 { 2905 return false; 2906 } 2907 static void save_track_data_snapshot(struct hist_trigger_data *hist_data, 2908 struct tracing_map_elt *elt, void *rec, 2909 struct ring_buffer_event *rbe, void *key, 2910 struct action_data *data, 2911 u64 *var_ref_vals) {} 2912 static void track_data_snapshot_print(struct seq_file *m, 2913 struct hist_trigger_data *hist_data) {} 2914 #endif /* CONFIG_TRACER_SNAPSHOT */ 2915 2916 static void track_data_print(struct seq_file *m, 2917 struct hist_trigger_data *hist_data, 2918 struct tracing_map_elt *elt, 2919 struct action_data *data) 2920 { 2921 u64 track_val = get_track_val(hist_data, elt, data); 2922 unsigned int i, save_var_idx; 2923 2924 if (data->handler == HANDLER_ONMAX) 2925 seq_printf(m, "\n\tmax: %10llu", track_val); 2926 else if (data->handler == HANDLER_ONCHANGE) 2927 seq_printf(m, "\n\tchanged: %10llu", track_val); 2928 2929 if (data->action == ACTION_SNAPSHOT) 2930 return; 2931 2932 for (i = 0; i < hist_data->n_save_vars; i++) { 2933 struct hist_field *save_val = hist_data->save_vars[i]->val; 2934 struct hist_field *save_var = hist_data->save_vars[i]->var; 2935 u64 val; 2936 2937 save_var_idx = save_var->var.idx; 2938 2939 val = tracing_map_read_var(elt, save_var_idx); 2940 2941 if (save_val->flags & HIST_FIELD_FL_STRING) { 2942 seq_printf(m, " %s: %-32s", save_var->var.name, 2943 (char *)(uintptr_t)(val)); 2944 } else 2945 seq_printf(m, " %s: %10llu", save_var->var.name, val); 2946 } 2947 } 2948 2949 static void ontrack_action(struct hist_trigger_data *hist_data, 2950 struct tracing_map_elt *elt, void *rec, 2951 struct ring_buffer_event *rbe, void *key, 2952 struct action_data *data, u64 *var_ref_vals) 2953 { 2954 u64 var_val = var_ref_vals[data->track_data.var_ref->var_ref_idx]; 2955 2956 if (check_track_val(elt, data, var_val)) { 2957 save_track_val(hist_data, elt, data, var_val); 2958 save_track_data(hist_data, elt, rec, rbe, key, data, var_ref_vals); 2959 } 2960 } 2961 2962 static void action_data_destroy(struct action_data *data) 2963 { 2964 unsigned int i; 2965 2966 lockdep_assert_held(&event_mutex); 2967 2968 kfree(data->action_name); 2969 2970 for (i = 0; i < data->n_params; i++) 2971 kfree(data->params[i]); 2972 2973 if (data->synth_event) 2974 data->synth_event->ref--; 2975 2976 kfree(data->synth_event_name); 2977 2978 kfree(data); 2979 } 2980 2981 static void track_data_destroy(struct hist_trigger_data *hist_data, 2982 struct action_data *data) 2983 { 2984 struct trace_event_file *file = hist_data->event_file; 2985 2986 destroy_hist_field(data->track_data.track_var, 0); 2987 2988 if (data->action == ACTION_SNAPSHOT) { 2989 struct track_data *track_data; 2990 2991 track_data = tracing_cond_snapshot_data(file->tr); 2992 if (track_data && track_data->hist_data == hist_data) { 2993 tracing_snapshot_cond_disable(file->tr); 2994 track_data_free(track_data); 2995 } 2996 } 2997 2998 kfree(data->track_data.var_str); 2999 3000 action_data_destroy(data); 3001 } 3002 3003 static int action_create(struct hist_trigger_data *hist_data, 3004 struct action_data *data); 3005 3006 static int track_data_create(struct hist_trigger_data *hist_data, 3007 struct action_data *data) 3008 { 3009 struct hist_field *var_field, *ref_field, *track_var = NULL; 3010 struct trace_event_file *file = hist_data->event_file; 3011 struct trace_array *tr = file->tr; 3012 char *track_data_var_str; 3013 int ret = 0; 3014 3015 track_data_var_str = data->track_data.var_str; 3016 if (track_data_var_str[0] != '$') { 3017 hist_err(tr, HIST_ERR_ONX_NOT_VAR, errpos(track_data_var_str)); 3018 return -EINVAL; 3019 } 3020 track_data_var_str++; 3021 3022 var_field = find_target_event_var(hist_data, NULL, NULL, track_data_var_str); 3023 if (!var_field) { 3024 hist_err(tr, HIST_ERR_ONX_VAR_NOT_FOUND, errpos(track_data_var_str)); 3025 return -EINVAL; 3026 } 3027 3028 ref_field = create_var_ref(hist_data, var_field, NULL, NULL); 3029 if (!ref_field) 3030 return -ENOMEM; 3031 3032 data->track_data.var_ref = ref_field; 3033 3034 if (data->handler == HANDLER_ONMAX) 3035 track_var = create_var(hist_data, file, "__max", sizeof(u64), "u64"); 3036 if (IS_ERR(track_var)) { 3037 hist_err(tr, HIST_ERR_ONX_VAR_CREATE_FAIL, 0); 3038 ret = PTR_ERR(track_var); 3039 goto out; 3040 } 3041 3042 if (data->handler == HANDLER_ONCHANGE) 3043 track_var = create_var(hist_data, file, "__change", sizeof(u64), "u64"); 3044 if (IS_ERR(track_var)) { 3045 hist_err(tr, HIST_ERR_ONX_VAR_CREATE_FAIL, 0); 3046 ret = PTR_ERR(track_var); 3047 goto out; 3048 } 3049 data->track_data.track_var = track_var; 3050 3051 ret = action_create(hist_data, data); 3052 out: 3053 return ret; 3054 } 3055 3056 static int parse_action_params(struct trace_array *tr, char *params, 3057 struct action_data *data) 3058 { 3059 char *param, *saved_param; 3060 bool first_param = true; 3061 int ret = 0; 3062 3063 while (params) { 3064 if (data->n_params >= SYNTH_FIELDS_MAX) { 3065 hist_err(tr, HIST_ERR_TOO_MANY_PARAMS, 0); 3066 goto out; 3067 } 3068 3069 param = strsep(¶ms, ","); 3070 if (!param) { 3071 hist_err(tr, HIST_ERR_PARAM_NOT_FOUND, 0); 3072 ret = -EINVAL; 3073 goto out; 3074 } 3075 3076 param = strstrip(param); 3077 if (strlen(param) < 2) { 3078 hist_err(tr, HIST_ERR_INVALID_PARAM, errpos(param)); 3079 ret = -EINVAL; 3080 goto out; 3081 } 3082 3083 saved_param = kstrdup(param, GFP_KERNEL); 3084 if (!saved_param) { 3085 ret = -ENOMEM; 3086 goto out; 3087 } 3088 3089 if (first_param && data->use_trace_keyword) { 3090 data->synth_event_name = saved_param; 3091 first_param = false; 3092 continue; 3093 } 3094 first_param = false; 3095 3096 data->params[data->n_params++] = saved_param; 3097 } 3098 out: 3099 return ret; 3100 } 3101 3102 static int action_parse(struct trace_array *tr, char *str, struct action_data *data, 3103 enum handler_id handler) 3104 { 3105 char *action_name; 3106 int ret = 0; 3107 3108 strsep(&str, "."); 3109 if (!str) { 3110 hist_err(tr, HIST_ERR_ACTION_NOT_FOUND, 0); 3111 ret = -EINVAL; 3112 goto out; 3113 } 3114 3115 action_name = strsep(&str, "("); 3116 if (!action_name || !str) { 3117 hist_err(tr, HIST_ERR_ACTION_NOT_FOUND, 0); 3118 ret = -EINVAL; 3119 goto out; 3120 } 3121 3122 if (str_has_prefix(action_name, "save")) { 3123 char *params = strsep(&str, ")"); 3124 3125 if (!params) { 3126 hist_err(tr, HIST_ERR_NO_SAVE_PARAMS, 0); 3127 ret = -EINVAL; 3128 goto out; 3129 } 3130 3131 ret = parse_action_params(tr, params, data); 3132 if (ret) 3133 goto out; 3134 3135 if (handler == HANDLER_ONMAX) 3136 data->track_data.check_val = check_track_val_max; 3137 else if (handler == HANDLER_ONCHANGE) 3138 data->track_data.check_val = check_track_val_changed; 3139 else { 3140 hist_err(tr, HIST_ERR_ACTION_MISMATCH, errpos(action_name)); 3141 ret = -EINVAL; 3142 goto out; 3143 } 3144 3145 data->track_data.save_data = save_track_data_vars; 3146 data->fn = ontrack_action; 3147 data->action = ACTION_SAVE; 3148 } else if (str_has_prefix(action_name, "snapshot")) { 3149 char *params = strsep(&str, ")"); 3150 3151 if (!str) { 3152 hist_err(tr, HIST_ERR_NO_CLOSING_PAREN, errpos(params)); 3153 ret = -EINVAL; 3154 goto out; 3155 } 3156 3157 if (handler == HANDLER_ONMAX) 3158 data->track_data.check_val = check_track_val_max; 3159 else if (handler == HANDLER_ONCHANGE) 3160 data->track_data.check_val = check_track_val_changed; 3161 else { 3162 hist_err(tr, HIST_ERR_ACTION_MISMATCH, errpos(action_name)); 3163 ret = -EINVAL; 3164 goto out; 3165 } 3166 3167 data->track_data.save_data = save_track_data_snapshot; 3168 data->fn = ontrack_action; 3169 data->action = ACTION_SNAPSHOT; 3170 } else { 3171 char *params = strsep(&str, ")"); 3172 3173 if (str_has_prefix(action_name, "trace")) 3174 data->use_trace_keyword = true; 3175 3176 if (params) { 3177 ret = parse_action_params(tr, params, data); 3178 if (ret) 3179 goto out; 3180 } 3181 3182 if (handler == HANDLER_ONMAX) 3183 data->track_data.check_val = check_track_val_max; 3184 else if (handler == HANDLER_ONCHANGE) 3185 data->track_data.check_val = check_track_val_changed; 3186 3187 if (handler != HANDLER_ONMATCH) { 3188 data->track_data.save_data = action_trace; 3189 data->fn = ontrack_action; 3190 } else 3191 data->fn = action_trace; 3192 3193 data->action = ACTION_TRACE; 3194 } 3195 3196 data->action_name = kstrdup(action_name, GFP_KERNEL); 3197 if (!data->action_name) { 3198 ret = -ENOMEM; 3199 goto out; 3200 } 3201 3202 data->handler = handler; 3203 out: 3204 return ret; 3205 } 3206 3207 static struct action_data *track_data_parse(struct hist_trigger_data *hist_data, 3208 char *str, enum handler_id handler) 3209 { 3210 struct action_data *data; 3211 int ret = -EINVAL; 3212 char *var_str; 3213 3214 data = kzalloc(sizeof(*data), GFP_KERNEL); 3215 if (!data) 3216 return ERR_PTR(-ENOMEM); 3217 3218 var_str = strsep(&str, ")"); 3219 if (!var_str || !str) { 3220 ret = -EINVAL; 3221 goto free; 3222 } 3223 3224 data->track_data.var_str = kstrdup(var_str, GFP_KERNEL); 3225 if (!data->track_data.var_str) { 3226 ret = -ENOMEM; 3227 goto free; 3228 } 3229 3230 ret = action_parse(hist_data->event_file->tr, str, data, handler); 3231 if (ret) 3232 goto free; 3233 out: 3234 return data; 3235 free: 3236 track_data_destroy(hist_data, data); 3237 data = ERR_PTR(ret); 3238 goto out; 3239 } 3240 3241 static void onmatch_destroy(struct action_data *data) 3242 { 3243 kfree(data->match_data.event); 3244 kfree(data->match_data.event_system); 3245 3246 action_data_destroy(data); 3247 } 3248 3249 static void destroy_field_var(struct field_var *field_var) 3250 { 3251 if (!field_var) 3252 return; 3253 3254 destroy_hist_field(field_var->var, 0); 3255 destroy_hist_field(field_var->val, 0); 3256 3257 kfree(field_var); 3258 } 3259 3260 static void destroy_field_vars(struct hist_trigger_data *hist_data) 3261 { 3262 unsigned int i; 3263 3264 for (i = 0; i < hist_data->n_field_vars; i++) 3265 destroy_field_var(hist_data->field_vars[i]); 3266 3267 for (i = 0; i < hist_data->n_save_vars; i++) 3268 destroy_field_var(hist_data->save_vars[i]); 3269 } 3270 3271 static void save_field_var(struct hist_trigger_data *hist_data, 3272 struct field_var *field_var) 3273 { 3274 hist_data->field_vars[hist_data->n_field_vars++] = field_var; 3275 3276 if (field_var->val->flags & HIST_FIELD_FL_STRING) 3277 hist_data->n_field_var_str++; 3278 } 3279 3280 3281 static int check_synth_field(struct synth_event *event, 3282 struct hist_field *hist_field, 3283 unsigned int field_pos) 3284 { 3285 struct synth_field *field; 3286 3287 if (field_pos >= event->n_fields) 3288 return -EINVAL; 3289 3290 field = event->fields[field_pos]; 3291 3292 /* 3293 * A dynamic string synth field can accept static or 3294 * dynamic. A static string synth field can only accept a 3295 * same-sized static string, which is checked for later. 3296 */ 3297 if (strstr(hist_field->type, "char[") && field->is_string 3298 && field->is_dynamic) 3299 return 0; 3300 3301 if (strcmp(field->type, hist_field->type) != 0) { 3302 if (field->size != hist_field->size || 3303 field->is_signed != hist_field->is_signed) 3304 return -EINVAL; 3305 } 3306 3307 return 0; 3308 } 3309 3310 static struct hist_field * 3311 trace_action_find_var(struct hist_trigger_data *hist_data, 3312 struct action_data *data, 3313 char *system, char *event, char *var) 3314 { 3315 struct trace_array *tr = hist_data->event_file->tr; 3316 struct hist_field *hist_field; 3317 3318 var++; /* skip '$' */ 3319 3320 hist_field = find_target_event_var(hist_data, system, event, var); 3321 if (!hist_field) { 3322 if (!system && data->handler == HANDLER_ONMATCH) { 3323 system = data->match_data.event_system; 3324 event = data->match_data.event; 3325 } 3326 3327 hist_field = find_event_var(hist_data, system, event, var); 3328 } 3329 3330 if (!hist_field) 3331 hist_err(tr, HIST_ERR_PARAM_NOT_FOUND, errpos(var)); 3332 3333 return hist_field; 3334 } 3335 3336 static struct hist_field * 3337 trace_action_create_field_var(struct hist_trigger_data *hist_data, 3338 struct action_data *data, char *system, 3339 char *event, char *var) 3340 { 3341 struct hist_field *hist_field = NULL; 3342 struct field_var *field_var; 3343 3344 /* 3345 * First try to create a field var on the target event (the 3346 * currently being defined). This will create a variable for 3347 * unqualified fields on the target event, or if qualified, 3348 * target fields that have qualified names matching the target. 3349 */ 3350 field_var = create_target_field_var(hist_data, system, event, var); 3351 3352 if (field_var && !IS_ERR(field_var)) { 3353 save_field_var(hist_data, field_var); 3354 hist_field = field_var->var; 3355 } else { 3356 field_var = NULL; 3357 /* 3358 * If no explicit system.event is specified, default to 3359 * looking for fields on the onmatch(system.event.xxx) 3360 * event. 3361 */ 3362 if (!system && data->handler == HANDLER_ONMATCH) { 3363 system = data->match_data.event_system; 3364 event = data->match_data.event; 3365 } 3366 3367 /* 3368 * At this point, we're looking at a field on another 3369 * event. Because we can't modify a hist trigger on 3370 * another event to add a variable for a field, we need 3371 * to create a new trigger on that event and create the 3372 * variable at the same time. 3373 */ 3374 hist_field = create_field_var_hist(hist_data, system, event, var); 3375 if (IS_ERR(hist_field)) 3376 goto free; 3377 } 3378 out: 3379 return hist_field; 3380 free: 3381 destroy_field_var(field_var); 3382 hist_field = NULL; 3383 goto out; 3384 } 3385 3386 static int trace_action_create(struct hist_trigger_data *hist_data, 3387 struct action_data *data) 3388 { 3389 struct trace_array *tr = hist_data->event_file->tr; 3390 char *event_name, *param, *system = NULL; 3391 struct hist_field *hist_field, *var_ref; 3392 unsigned int i; 3393 unsigned int field_pos = 0; 3394 struct synth_event *event; 3395 char *synth_event_name; 3396 int var_ref_idx, ret = 0; 3397 3398 lockdep_assert_held(&event_mutex); 3399 3400 if (data->use_trace_keyword) 3401 synth_event_name = data->synth_event_name; 3402 else 3403 synth_event_name = data->action_name; 3404 3405 event = find_synth_event(synth_event_name); 3406 if (!event) { 3407 hist_err(tr, HIST_ERR_SYNTH_EVENT_NOT_FOUND, errpos(synth_event_name)); 3408 return -EINVAL; 3409 } 3410 3411 event->ref++; 3412 3413 for (i = 0; i < data->n_params; i++) { 3414 char *p; 3415 3416 p = param = kstrdup(data->params[i], GFP_KERNEL); 3417 if (!param) { 3418 ret = -ENOMEM; 3419 goto err; 3420 } 3421 3422 system = strsep(¶m, "."); 3423 if (!param) { 3424 param = (char *)system; 3425 system = event_name = NULL; 3426 } else { 3427 event_name = strsep(¶m, "."); 3428 if (!param) { 3429 kfree(p); 3430 ret = -EINVAL; 3431 goto err; 3432 } 3433 } 3434 3435 if (param[0] == '$') 3436 hist_field = trace_action_find_var(hist_data, data, 3437 system, event_name, 3438 param); 3439 else 3440 hist_field = trace_action_create_field_var(hist_data, 3441 data, 3442 system, 3443 event_name, 3444 param); 3445 3446 if (!hist_field) { 3447 kfree(p); 3448 ret = -EINVAL; 3449 goto err; 3450 } 3451 3452 if (check_synth_field(event, hist_field, field_pos) == 0) { 3453 var_ref = create_var_ref(hist_data, hist_field, 3454 system, event_name); 3455 if (!var_ref) { 3456 kfree(p); 3457 ret = -ENOMEM; 3458 goto err; 3459 } 3460 3461 var_ref_idx = find_var_ref_idx(hist_data, var_ref); 3462 if (WARN_ON(var_ref_idx < 0)) { 3463 ret = var_ref_idx; 3464 goto err; 3465 } 3466 3467 data->var_ref_idx[i] = var_ref_idx; 3468 3469 field_pos++; 3470 kfree(p); 3471 continue; 3472 } 3473 3474 hist_err(tr, HIST_ERR_SYNTH_TYPE_MISMATCH, errpos(param)); 3475 kfree(p); 3476 ret = -EINVAL; 3477 goto err; 3478 } 3479 3480 if (field_pos != event->n_fields) { 3481 hist_err(tr, HIST_ERR_SYNTH_COUNT_MISMATCH, errpos(event->name)); 3482 ret = -EINVAL; 3483 goto err; 3484 } 3485 3486 data->synth_event = event; 3487 out: 3488 return ret; 3489 err: 3490 event->ref--; 3491 3492 goto out; 3493 } 3494 3495 static int action_create(struct hist_trigger_data *hist_data, 3496 struct action_data *data) 3497 { 3498 struct trace_event_file *file = hist_data->event_file; 3499 struct trace_array *tr = file->tr; 3500 struct track_data *track_data; 3501 struct field_var *field_var; 3502 unsigned int i; 3503 char *param; 3504 int ret = 0; 3505 3506 if (data->action == ACTION_TRACE) 3507 return trace_action_create(hist_data, data); 3508 3509 if (data->action == ACTION_SNAPSHOT) { 3510 track_data = track_data_alloc(hist_data->key_size, data, hist_data); 3511 if (IS_ERR(track_data)) { 3512 ret = PTR_ERR(track_data); 3513 goto out; 3514 } 3515 3516 ret = tracing_snapshot_cond_enable(file->tr, track_data, 3517 cond_snapshot_update); 3518 if (ret) 3519 track_data_free(track_data); 3520 3521 goto out; 3522 } 3523 3524 if (data->action == ACTION_SAVE) { 3525 if (hist_data->n_save_vars) { 3526 ret = -EEXIST; 3527 hist_err(tr, HIST_ERR_TOO_MANY_SAVE_ACTIONS, 0); 3528 goto out; 3529 } 3530 3531 for (i = 0; i < data->n_params; i++) { 3532 param = kstrdup(data->params[i], GFP_KERNEL); 3533 if (!param) { 3534 ret = -ENOMEM; 3535 goto out; 3536 } 3537 3538 field_var = create_target_field_var(hist_data, NULL, NULL, param); 3539 if (IS_ERR(field_var)) { 3540 hist_err(tr, HIST_ERR_FIELD_VAR_CREATE_FAIL, 3541 errpos(param)); 3542 ret = PTR_ERR(field_var); 3543 kfree(param); 3544 goto out; 3545 } 3546 3547 hist_data->save_vars[hist_data->n_save_vars++] = field_var; 3548 if (field_var->val->flags & HIST_FIELD_FL_STRING) 3549 hist_data->n_save_var_str++; 3550 kfree(param); 3551 } 3552 } 3553 out: 3554 return ret; 3555 } 3556 3557 static int onmatch_create(struct hist_trigger_data *hist_data, 3558 struct action_data *data) 3559 { 3560 return action_create(hist_data, data); 3561 } 3562 3563 static struct action_data *onmatch_parse(struct trace_array *tr, char *str) 3564 { 3565 char *match_event, *match_event_system; 3566 struct action_data *data; 3567 int ret = -EINVAL; 3568 3569 data = kzalloc(sizeof(*data), GFP_KERNEL); 3570 if (!data) 3571 return ERR_PTR(-ENOMEM); 3572 3573 match_event = strsep(&str, ")"); 3574 if (!match_event || !str) { 3575 hist_err(tr, HIST_ERR_NO_CLOSING_PAREN, errpos(match_event)); 3576 goto free; 3577 } 3578 3579 match_event_system = strsep(&match_event, "."); 3580 if (!match_event) { 3581 hist_err(tr, HIST_ERR_SUBSYS_NOT_FOUND, errpos(match_event_system)); 3582 goto free; 3583 } 3584 3585 if (IS_ERR(event_file(tr, match_event_system, match_event))) { 3586 hist_err(tr, HIST_ERR_INVALID_SUBSYS_EVENT, errpos(match_event)); 3587 goto free; 3588 } 3589 3590 data->match_data.event = kstrdup(match_event, GFP_KERNEL); 3591 if (!data->match_data.event) { 3592 ret = -ENOMEM; 3593 goto free; 3594 } 3595 3596 data->match_data.event_system = kstrdup(match_event_system, GFP_KERNEL); 3597 if (!data->match_data.event_system) { 3598 ret = -ENOMEM; 3599 goto free; 3600 } 3601 3602 ret = action_parse(tr, str, data, HANDLER_ONMATCH); 3603 if (ret) 3604 goto free; 3605 out: 3606 return data; 3607 free: 3608 onmatch_destroy(data); 3609 data = ERR_PTR(ret); 3610 goto out; 3611 } 3612 3613 static int create_hitcount_val(struct hist_trigger_data *hist_data) 3614 { 3615 hist_data->fields[HITCOUNT_IDX] = 3616 create_hist_field(hist_data, NULL, HIST_FIELD_FL_HITCOUNT, NULL); 3617 if (!hist_data->fields[HITCOUNT_IDX]) 3618 return -ENOMEM; 3619 3620 hist_data->n_vals++; 3621 hist_data->n_fields++; 3622 3623 if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX)) 3624 return -EINVAL; 3625 3626 return 0; 3627 } 3628 3629 static int __create_val_field(struct hist_trigger_data *hist_data, 3630 unsigned int val_idx, 3631 struct trace_event_file *file, 3632 char *var_name, char *field_str, 3633 unsigned long flags) 3634 { 3635 struct hist_field *hist_field; 3636 int ret = 0; 3637 3638 hist_field = parse_expr(hist_data, file, field_str, flags, var_name, 0); 3639 if (IS_ERR(hist_field)) { 3640 ret = PTR_ERR(hist_field); 3641 goto out; 3642 } 3643 3644 hist_data->fields[val_idx] = hist_field; 3645 3646 ++hist_data->n_vals; 3647 ++hist_data->n_fields; 3648 3649 if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX + TRACING_MAP_VARS_MAX)) 3650 ret = -EINVAL; 3651 out: 3652 return ret; 3653 } 3654 3655 static int create_val_field(struct hist_trigger_data *hist_data, 3656 unsigned int val_idx, 3657 struct trace_event_file *file, 3658 char *field_str) 3659 { 3660 if (WARN_ON(val_idx >= TRACING_MAP_VALS_MAX)) 3661 return -EINVAL; 3662 3663 return __create_val_field(hist_data, val_idx, file, NULL, field_str, 0); 3664 } 3665 3666 static int create_var_field(struct hist_trigger_data *hist_data, 3667 unsigned int val_idx, 3668 struct trace_event_file *file, 3669 char *var_name, char *expr_str) 3670 { 3671 struct trace_array *tr = hist_data->event_file->tr; 3672 unsigned long flags = 0; 3673 int ret; 3674 3675 if (WARN_ON(val_idx >= TRACING_MAP_VALS_MAX + TRACING_MAP_VARS_MAX)) 3676 return -EINVAL; 3677 3678 if (find_var(hist_data, file, var_name) && !hist_data->remove) { 3679 hist_err(tr, HIST_ERR_DUPLICATE_VAR, errpos(var_name)); 3680 return -EINVAL; 3681 } 3682 3683 flags |= HIST_FIELD_FL_VAR; 3684 hist_data->n_vars++; 3685 if (WARN_ON(hist_data->n_vars > TRACING_MAP_VARS_MAX)) 3686 return -EINVAL; 3687 3688 ret = __create_val_field(hist_data, val_idx, file, var_name, expr_str, flags); 3689 3690 if (!ret && hist_data->fields[val_idx]->flags & HIST_FIELD_FL_STRING) 3691 hist_data->fields[val_idx]->var_str_idx = hist_data->n_var_str++; 3692 3693 return ret; 3694 } 3695 3696 static int create_val_fields(struct hist_trigger_data *hist_data, 3697 struct trace_event_file *file) 3698 { 3699 char *fields_str, *field_str; 3700 unsigned int i, j = 1; 3701 int ret; 3702 3703 ret = create_hitcount_val(hist_data); 3704 if (ret) 3705 goto out; 3706 3707 fields_str = hist_data->attrs->vals_str; 3708 if (!fields_str) 3709 goto out; 3710 3711 for (i = 0, j = 1; i < TRACING_MAP_VALS_MAX && 3712 j < TRACING_MAP_VALS_MAX; i++) { 3713 field_str = strsep(&fields_str, ","); 3714 if (!field_str) 3715 break; 3716 3717 if (strcmp(field_str, "hitcount") == 0) 3718 continue; 3719 3720 ret = create_val_field(hist_data, j++, file, field_str); 3721 if (ret) 3722 goto out; 3723 } 3724 3725 if (fields_str && (strcmp(fields_str, "hitcount") != 0)) 3726 ret = -EINVAL; 3727 out: 3728 return ret; 3729 } 3730 3731 static int create_key_field(struct hist_trigger_data *hist_data, 3732 unsigned int key_idx, 3733 unsigned int key_offset, 3734 struct trace_event_file *file, 3735 char *field_str) 3736 { 3737 struct trace_array *tr = hist_data->event_file->tr; 3738 struct hist_field *hist_field = NULL; 3739 unsigned long flags = 0; 3740 unsigned int key_size; 3741 int ret = 0; 3742 3743 if (WARN_ON(key_idx >= HIST_FIELDS_MAX)) 3744 return -EINVAL; 3745 3746 flags |= HIST_FIELD_FL_KEY; 3747 3748 if (strcmp(field_str, "stacktrace") == 0) { 3749 flags |= HIST_FIELD_FL_STACKTRACE; 3750 key_size = sizeof(unsigned long) * HIST_STACKTRACE_DEPTH; 3751 hist_field = create_hist_field(hist_data, NULL, flags, NULL); 3752 } else { 3753 hist_field = parse_expr(hist_data, file, field_str, flags, 3754 NULL, 0); 3755 if (IS_ERR(hist_field)) { 3756 ret = PTR_ERR(hist_field); 3757 goto out; 3758 } 3759 3760 if (field_has_hist_vars(hist_field, 0)) { 3761 hist_err(tr, HIST_ERR_INVALID_REF_KEY, errpos(field_str)); 3762 destroy_hist_field(hist_field, 0); 3763 ret = -EINVAL; 3764 goto out; 3765 } 3766 3767 key_size = hist_field->size; 3768 } 3769 3770 hist_data->fields[key_idx] = hist_field; 3771 3772 key_size = ALIGN(key_size, sizeof(u64)); 3773 hist_data->fields[key_idx]->size = key_size; 3774 hist_data->fields[key_idx]->offset = key_offset; 3775 3776 hist_data->key_size += key_size; 3777 3778 if (hist_data->key_size > HIST_KEY_SIZE_MAX) { 3779 ret = -EINVAL; 3780 goto out; 3781 } 3782 3783 hist_data->n_keys++; 3784 hist_data->n_fields++; 3785 3786 if (WARN_ON(hist_data->n_keys > TRACING_MAP_KEYS_MAX)) 3787 return -EINVAL; 3788 3789 ret = key_size; 3790 out: 3791 return ret; 3792 } 3793 3794 static int create_key_fields(struct hist_trigger_data *hist_data, 3795 struct trace_event_file *file) 3796 { 3797 unsigned int i, key_offset = 0, n_vals = hist_data->n_vals; 3798 char *fields_str, *field_str; 3799 int ret = -EINVAL; 3800 3801 fields_str = hist_data->attrs->keys_str; 3802 if (!fields_str) 3803 goto out; 3804 3805 for (i = n_vals; i < n_vals + TRACING_MAP_KEYS_MAX; i++) { 3806 field_str = strsep(&fields_str, ","); 3807 if (!field_str) 3808 break; 3809 ret = create_key_field(hist_data, i, key_offset, 3810 file, field_str); 3811 if (ret < 0) 3812 goto out; 3813 key_offset += ret; 3814 } 3815 if (fields_str) { 3816 ret = -EINVAL; 3817 goto out; 3818 } 3819 ret = 0; 3820 out: 3821 return ret; 3822 } 3823 3824 static int create_var_fields(struct hist_trigger_data *hist_data, 3825 struct trace_event_file *file) 3826 { 3827 unsigned int i, j = hist_data->n_vals; 3828 int ret = 0; 3829 3830 unsigned int n_vars = hist_data->attrs->var_defs.n_vars; 3831 3832 for (i = 0; i < n_vars; i++) { 3833 char *var_name = hist_data->attrs->var_defs.name[i]; 3834 char *expr = hist_data->attrs->var_defs.expr[i]; 3835 3836 ret = create_var_field(hist_data, j++, file, var_name, expr); 3837 if (ret) 3838 goto out; 3839 } 3840 out: 3841 return ret; 3842 } 3843 3844 static void free_var_defs(struct hist_trigger_data *hist_data) 3845 { 3846 unsigned int i; 3847 3848 for (i = 0; i < hist_data->attrs->var_defs.n_vars; i++) { 3849 kfree(hist_data->attrs->var_defs.name[i]); 3850 kfree(hist_data->attrs->var_defs.expr[i]); 3851 } 3852 3853 hist_data->attrs->var_defs.n_vars = 0; 3854 } 3855 3856 static int parse_var_defs(struct hist_trigger_data *hist_data) 3857 { 3858 struct trace_array *tr = hist_data->event_file->tr; 3859 char *s, *str, *var_name, *field_str; 3860 unsigned int i, j, n_vars = 0; 3861 int ret = 0; 3862 3863 for (i = 0; i < hist_data->attrs->n_assignments; i++) { 3864 str = hist_data->attrs->assignment_str[i]; 3865 for (j = 0; j < TRACING_MAP_VARS_MAX; j++) { 3866 field_str = strsep(&str, ","); 3867 if (!field_str) 3868 break; 3869 3870 var_name = strsep(&field_str, "="); 3871 if (!var_name || !field_str) { 3872 hist_err(tr, HIST_ERR_MALFORMED_ASSIGNMENT, 3873 errpos(var_name)); 3874 ret = -EINVAL; 3875 goto free; 3876 } 3877 3878 if (n_vars == TRACING_MAP_VARS_MAX) { 3879 hist_err(tr, HIST_ERR_TOO_MANY_VARS, errpos(var_name)); 3880 ret = -EINVAL; 3881 goto free; 3882 } 3883 3884 s = kstrdup(var_name, GFP_KERNEL); 3885 if (!s) { 3886 ret = -ENOMEM; 3887 goto free; 3888 } 3889 hist_data->attrs->var_defs.name[n_vars] = s; 3890 3891 s = kstrdup(field_str, GFP_KERNEL); 3892 if (!s) { 3893 ret = -ENOMEM; 3894 goto free; 3895 } 3896 hist_data->attrs->var_defs.expr[n_vars++] = s; 3897 3898 hist_data->attrs->var_defs.n_vars = n_vars; 3899 } 3900 } 3901 3902 return ret; 3903 free: 3904 free_var_defs(hist_data); 3905 3906 return ret; 3907 } 3908 3909 static int create_hist_fields(struct hist_trigger_data *hist_data, 3910 struct trace_event_file *file) 3911 { 3912 int ret; 3913 3914 ret = parse_var_defs(hist_data); 3915 if (ret) 3916 goto out; 3917 3918 ret = create_val_fields(hist_data, file); 3919 if (ret) 3920 goto out; 3921 3922 ret = create_var_fields(hist_data, file); 3923 if (ret) 3924 goto out; 3925 3926 ret = create_key_fields(hist_data, file); 3927 if (ret) 3928 goto out; 3929 out: 3930 free_var_defs(hist_data); 3931 3932 return ret; 3933 } 3934 3935 static int is_descending(struct trace_array *tr, const char *str) 3936 { 3937 if (!str) 3938 return 0; 3939 3940 if (strcmp(str, "descending") == 0) 3941 return 1; 3942 3943 if (strcmp(str, "ascending") == 0) 3944 return 0; 3945 3946 hist_err(tr, HIST_ERR_INVALID_SORT_MODIFIER, errpos((char *)str)); 3947 3948 return -EINVAL; 3949 } 3950 3951 static int create_sort_keys(struct hist_trigger_data *hist_data) 3952 { 3953 struct trace_array *tr = hist_data->event_file->tr; 3954 char *fields_str = hist_data->attrs->sort_key_str; 3955 struct tracing_map_sort_key *sort_key; 3956 int descending, ret = 0; 3957 unsigned int i, j, k; 3958 3959 hist_data->n_sort_keys = 1; /* we always have at least one, hitcount */ 3960 3961 if (!fields_str) 3962 goto out; 3963 3964 for (i = 0; i < TRACING_MAP_SORT_KEYS_MAX; i++) { 3965 struct hist_field *hist_field; 3966 char *field_str, *field_name; 3967 const char *test_name; 3968 3969 sort_key = &hist_data->sort_keys[i]; 3970 3971 field_str = strsep(&fields_str, ","); 3972 if (!field_str) 3973 break; 3974 3975 if (!*field_str) { 3976 ret = -EINVAL; 3977 hist_err(tr, HIST_ERR_EMPTY_SORT_FIELD, errpos("sort=")); 3978 break; 3979 } 3980 3981 if ((i == TRACING_MAP_SORT_KEYS_MAX - 1) && fields_str) { 3982 hist_err(tr, HIST_ERR_TOO_MANY_SORT_FIELDS, errpos("sort=")); 3983 ret = -EINVAL; 3984 break; 3985 } 3986 3987 field_name = strsep(&field_str, "."); 3988 if (!field_name || !*field_name) { 3989 ret = -EINVAL; 3990 hist_err(tr, HIST_ERR_EMPTY_SORT_FIELD, errpos("sort=")); 3991 break; 3992 } 3993 3994 if (strcmp(field_name, "hitcount") == 0) { 3995 descending = is_descending(tr, field_str); 3996 if (descending < 0) { 3997 ret = descending; 3998 break; 3999 } 4000 sort_key->descending = descending; 4001 continue; 4002 } 4003 4004 for (j = 1, k = 1; j < hist_data->n_fields; j++) { 4005 unsigned int idx; 4006 4007 hist_field = hist_data->fields[j]; 4008 if (hist_field->flags & HIST_FIELD_FL_VAR) 4009 continue; 4010 4011 idx = k++; 4012 4013 test_name = hist_field_name(hist_field, 0); 4014 4015 if (strcmp(field_name, test_name) == 0) { 4016 sort_key->field_idx = idx; 4017 descending = is_descending(tr, field_str); 4018 if (descending < 0) { 4019 ret = descending; 4020 goto out; 4021 } 4022 sort_key->descending = descending; 4023 break; 4024 } 4025 } 4026 if (j == hist_data->n_fields) { 4027 ret = -EINVAL; 4028 hist_err(tr, HIST_ERR_INVALID_SORT_FIELD, errpos(field_name)); 4029 break; 4030 } 4031 } 4032 4033 hist_data->n_sort_keys = i; 4034 out: 4035 return ret; 4036 } 4037 4038 static void destroy_actions(struct hist_trigger_data *hist_data) 4039 { 4040 unsigned int i; 4041 4042 for (i = 0; i < hist_data->n_actions; i++) { 4043 struct action_data *data = hist_data->actions[i]; 4044 4045 if (data->handler == HANDLER_ONMATCH) 4046 onmatch_destroy(data); 4047 else if (data->handler == HANDLER_ONMAX || 4048 data->handler == HANDLER_ONCHANGE) 4049 track_data_destroy(hist_data, data); 4050 else 4051 kfree(data); 4052 } 4053 } 4054 4055 static int parse_actions(struct hist_trigger_data *hist_data) 4056 { 4057 struct trace_array *tr = hist_data->event_file->tr; 4058 struct action_data *data; 4059 unsigned int i; 4060 int ret = 0; 4061 char *str; 4062 int len; 4063 4064 for (i = 0; i < hist_data->attrs->n_actions; i++) { 4065 str = hist_data->attrs->action_str[i]; 4066 4067 if ((len = str_has_prefix(str, "onmatch("))) { 4068 char *action_str = str + len; 4069 4070 data = onmatch_parse(tr, action_str); 4071 if (IS_ERR(data)) { 4072 ret = PTR_ERR(data); 4073 break; 4074 } 4075 } else if ((len = str_has_prefix(str, "onmax("))) { 4076 char *action_str = str + len; 4077 4078 data = track_data_parse(hist_data, action_str, 4079 HANDLER_ONMAX); 4080 if (IS_ERR(data)) { 4081 ret = PTR_ERR(data); 4082 break; 4083 } 4084 } else if ((len = str_has_prefix(str, "onchange("))) { 4085 char *action_str = str + len; 4086 4087 data = track_data_parse(hist_data, action_str, 4088 HANDLER_ONCHANGE); 4089 if (IS_ERR(data)) { 4090 ret = PTR_ERR(data); 4091 break; 4092 } 4093 } else { 4094 ret = -EINVAL; 4095 break; 4096 } 4097 4098 hist_data->actions[hist_data->n_actions++] = data; 4099 } 4100 4101 return ret; 4102 } 4103 4104 static int create_actions(struct hist_trigger_data *hist_data) 4105 { 4106 struct action_data *data; 4107 unsigned int i; 4108 int ret = 0; 4109 4110 for (i = 0; i < hist_data->attrs->n_actions; i++) { 4111 data = hist_data->actions[i]; 4112 4113 if (data->handler == HANDLER_ONMATCH) { 4114 ret = onmatch_create(hist_data, data); 4115 if (ret) 4116 break; 4117 } else if (data->handler == HANDLER_ONMAX || 4118 data->handler == HANDLER_ONCHANGE) { 4119 ret = track_data_create(hist_data, data); 4120 if (ret) 4121 break; 4122 } else { 4123 ret = -EINVAL; 4124 break; 4125 } 4126 } 4127 4128 return ret; 4129 } 4130 4131 static void print_actions(struct seq_file *m, 4132 struct hist_trigger_data *hist_data, 4133 struct tracing_map_elt *elt) 4134 { 4135 unsigned int i; 4136 4137 for (i = 0; i < hist_data->n_actions; i++) { 4138 struct action_data *data = hist_data->actions[i]; 4139 4140 if (data->action == ACTION_SNAPSHOT) 4141 continue; 4142 4143 if (data->handler == HANDLER_ONMAX || 4144 data->handler == HANDLER_ONCHANGE) 4145 track_data_print(m, hist_data, elt, data); 4146 } 4147 } 4148 4149 static void print_action_spec(struct seq_file *m, 4150 struct hist_trigger_data *hist_data, 4151 struct action_data *data) 4152 { 4153 unsigned int i; 4154 4155 if (data->action == ACTION_SAVE) { 4156 for (i = 0; i < hist_data->n_save_vars; i++) { 4157 seq_printf(m, "%s", hist_data->save_vars[i]->var->var.name); 4158 if (i < hist_data->n_save_vars - 1) 4159 seq_puts(m, ","); 4160 } 4161 } else if (data->action == ACTION_TRACE) { 4162 if (data->use_trace_keyword) 4163 seq_printf(m, "%s", data->synth_event_name); 4164 for (i = 0; i < data->n_params; i++) { 4165 if (i || data->use_trace_keyword) 4166 seq_puts(m, ","); 4167 seq_printf(m, "%s", data->params[i]); 4168 } 4169 } 4170 } 4171 4172 static void print_track_data_spec(struct seq_file *m, 4173 struct hist_trigger_data *hist_data, 4174 struct action_data *data) 4175 { 4176 if (data->handler == HANDLER_ONMAX) 4177 seq_puts(m, ":onmax("); 4178 else if (data->handler == HANDLER_ONCHANGE) 4179 seq_puts(m, ":onchange("); 4180 seq_printf(m, "%s", data->track_data.var_str); 4181 seq_printf(m, ").%s(", data->action_name); 4182 4183 print_action_spec(m, hist_data, data); 4184 4185 seq_puts(m, ")"); 4186 } 4187 4188 static void print_onmatch_spec(struct seq_file *m, 4189 struct hist_trigger_data *hist_data, 4190 struct action_data *data) 4191 { 4192 seq_printf(m, ":onmatch(%s.%s).", data->match_data.event_system, 4193 data->match_data.event); 4194 4195 seq_printf(m, "%s(", data->action_name); 4196 4197 print_action_spec(m, hist_data, data); 4198 4199 seq_puts(m, ")"); 4200 } 4201 4202 static bool actions_match(struct hist_trigger_data *hist_data, 4203 struct hist_trigger_data *hist_data_test) 4204 { 4205 unsigned int i, j; 4206 4207 if (hist_data->n_actions != hist_data_test->n_actions) 4208 return false; 4209 4210 for (i = 0; i < hist_data->n_actions; i++) { 4211 struct action_data *data = hist_data->actions[i]; 4212 struct action_data *data_test = hist_data_test->actions[i]; 4213 char *action_name, *action_name_test; 4214 4215 if (data->handler != data_test->handler) 4216 return false; 4217 if (data->action != data_test->action) 4218 return false; 4219 4220 if (data->n_params != data_test->n_params) 4221 return false; 4222 4223 for (j = 0; j < data->n_params; j++) { 4224 if (strcmp(data->params[j], data_test->params[j]) != 0) 4225 return false; 4226 } 4227 4228 if (data->use_trace_keyword) 4229 action_name = data->synth_event_name; 4230 else 4231 action_name = data->action_name; 4232 4233 if (data_test->use_trace_keyword) 4234 action_name_test = data_test->synth_event_name; 4235 else 4236 action_name_test = data_test->action_name; 4237 4238 if (strcmp(action_name, action_name_test) != 0) 4239 return false; 4240 4241 if (data->handler == HANDLER_ONMATCH) { 4242 if (strcmp(data->match_data.event_system, 4243 data_test->match_data.event_system) != 0) 4244 return false; 4245 if (strcmp(data->match_data.event, 4246 data_test->match_data.event) != 0) 4247 return false; 4248 } else if (data->handler == HANDLER_ONMAX || 4249 data->handler == HANDLER_ONCHANGE) { 4250 if (strcmp(data->track_data.var_str, 4251 data_test->track_data.var_str) != 0) 4252 return false; 4253 } 4254 } 4255 4256 return true; 4257 } 4258 4259 4260 static void print_actions_spec(struct seq_file *m, 4261 struct hist_trigger_data *hist_data) 4262 { 4263 unsigned int i; 4264 4265 for (i = 0; i < hist_data->n_actions; i++) { 4266 struct action_data *data = hist_data->actions[i]; 4267 4268 if (data->handler == HANDLER_ONMATCH) 4269 print_onmatch_spec(m, hist_data, data); 4270 else if (data->handler == HANDLER_ONMAX || 4271 data->handler == HANDLER_ONCHANGE) 4272 print_track_data_spec(m, hist_data, data); 4273 } 4274 } 4275 4276 static void destroy_field_var_hists(struct hist_trigger_data *hist_data) 4277 { 4278 unsigned int i; 4279 4280 for (i = 0; i < hist_data->n_field_var_hists; i++) { 4281 kfree(hist_data->field_var_hists[i]->cmd); 4282 kfree(hist_data->field_var_hists[i]); 4283 } 4284 } 4285 4286 static void destroy_hist_data(struct hist_trigger_data *hist_data) 4287 { 4288 if (!hist_data) 4289 return; 4290 4291 destroy_hist_trigger_attrs(hist_data->attrs); 4292 destroy_hist_fields(hist_data); 4293 tracing_map_destroy(hist_data->map); 4294 4295 destroy_actions(hist_data); 4296 destroy_field_vars(hist_data); 4297 destroy_field_var_hists(hist_data); 4298 4299 kfree(hist_data); 4300 } 4301 4302 static int create_tracing_map_fields(struct hist_trigger_data *hist_data) 4303 { 4304 struct tracing_map *map = hist_data->map; 4305 struct ftrace_event_field *field; 4306 struct hist_field *hist_field; 4307 int i, idx = 0; 4308 4309 for_each_hist_field(i, hist_data) { 4310 hist_field = hist_data->fields[i]; 4311 if (hist_field->flags & HIST_FIELD_FL_KEY) { 4312 tracing_map_cmp_fn_t cmp_fn; 4313 4314 field = hist_field->field; 4315 4316 if (hist_field->flags & HIST_FIELD_FL_STACKTRACE) 4317 cmp_fn = tracing_map_cmp_none; 4318 else if (!field) 4319 cmp_fn = tracing_map_cmp_num(hist_field->size, 4320 hist_field->is_signed); 4321 else if (is_string_field(field)) 4322 cmp_fn = tracing_map_cmp_string; 4323 else 4324 cmp_fn = tracing_map_cmp_num(field->size, 4325 field->is_signed); 4326 idx = tracing_map_add_key_field(map, 4327 hist_field->offset, 4328 cmp_fn); 4329 } else if (!(hist_field->flags & HIST_FIELD_FL_VAR)) 4330 idx = tracing_map_add_sum_field(map); 4331 4332 if (idx < 0) 4333 return idx; 4334 4335 if (hist_field->flags & HIST_FIELD_FL_VAR) { 4336 idx = tracing_map_add_var(map); 4337 if (idx < 0) 4338 return idx; 4339 hist_field->var.idx = idx; 4340 hist_field->var.hist_data = hist_data; 4341 } 4342 } 4343 4344 return 0; 4345 } 4346 4347 static struct hist_trigger_data * 4348 create_hist_data(unsigned int map_bits, 4349 struct hist_trigger_attrs *attrs, 4350 struct trace_event_file *file, 4351 bool remove) 4352 { 4353 const struct tracing_map_ops *map_ops = NULL; 4354 struct hist_trigger_data *hist_data; 4355 int ret = 0; 4356 4357 hist_data = kzalloc(sizeof(*hist_data), GFP_KERNEL); 4358 if (!hist_data) 4359 return ERR_PTR(-ENOMEM); 4360 4361 hist_data->attrs = attrs; 4362 hist_data->remove = remove; 4363 hist_data->event_file = file; 4364 4365 ret = parse_actions(hist_data); 4366 if (ret) 4367 goto free; 4368 4369 ret = create_hist_fields(hist_data, file); 4370 if (ret) 4371 goto free; 4372 4373 ret = create_sort_keys(hist_data); 4374 if (ret) 4375 goto free; 4376 4377 map_ops = &hist_trigger_elt_data_ops; 4378 4379 hist_data->map = tracing_map_create(map_bits, hist_data->key_size, 4380 map_ops, hist_data); 4381 if (IS_ERR(hist_data->map)) { 4382 ret = PTR_ERR(hist_data->map); 4383 hist_data->map = NULL; 4384 goto free; 4385 } 4386 4387 ret = create_tracing_map_fields(hist_data); 4388 if (ret) 4389 goto free; 4390 out: 4391 return hist_data; 4392 free: 4393 hist_data->attrs = NULL; 4394 4395 destroy_hist_data(hist_data); 4396 4397 hist_data = ERR_PTR(ret); 4398 4399 goto out; 4400 } 4401 4402 static void hist_trigger_elt_update(struct hist_trigger_data *hist_data, 4403 struct tracing_map_elt *elt, void *rec, 4404 struct ring_buffer_event *rbe, 4405 u64 *var_ref_vals) 4406 { 4407 struct hist_elt_data *elt_data; 4408 struct hist_field *hist_field; 4409 unsigned int i, var_idx; 4410 u64 hist_val; 4411 4412 elt_data = elt->private_data; 4413 elt_data->var_ref_vals = var_ref_vals; 4414 4415 for_each_hist_val_field(i, hist_data) { 4416 hist_field = hist_data->fields[i]; 4417 hist_val = hist_field->fn(hist_field, elt, rbe, rec); 4418 if (hist_field->flags & HIST_FIELD_FL_VAR) { 4419 var_idx = hist_field->var.idx; 4420 4421 if (hist_field->flags & HIST_FIELD_FL_STRING) { 4422 unsigned int str_start, var_str_idx, idx; 4423 char *str, *val_str; 4424 4425 str_start = hist_data->n_field_var_str + 4426 hist_data->n_save_var_str; 4427 var_str_idx = hist_field->var_str_idx; 4428 idx = str_start + var_str_idx; 4429 4430 str = elt_data->field_var_str[idx]; 4431 val_str = (char *)(uintptr_t)hist_val; 4432 strscpy(str, val_str, STR_VAR_LEN_MAX); 4433 4434 hist_val = (u64)(uintptr_t)str; 4435 } 4436 tracing_map_set_var(elt, var_idx, hist_val); 4437 continue; 4438 } 4439 tracing_map_update_sum(elt, i, hist_val); 4440 } 4441 4442 for_each_hist_key_field(i, hist_data) { 4443 hist_field = hist_data->fields[i]; 4444 if (hist_field->flags & HIST_FIELD_FL_VAR) { 4445 hist_val = hist_field->fn(hist_field, elt, rbe, rec); 4446 var_idx = hist_field->var.idx; 4447 tracing_map_set_var(elt, var_idx, hist_val); 4448 } 4449 } 4450 4451 update_field_vars(hist_data, elt, rbe, rec); 4452 } 4453 4454 static inline void add_to_key(char *compound_key, void *key, 4455 struct hist_field *key_field, void *rec) 4456 { 4457 size_t size = key_field->size; 4458 4459 if (key_field->flags & HIST_FIELD_FL_STRING) { 4460 struct ftrace_event_field *field; 4461 4462 field = key_field->field; 4463 if (field->filter_type == FILTER_DYN_STRING) 4464 size = *(u32 *)(rec + field->offset) >> 16; 4465 else if (field->filter_type == FILTER_PTR_STRING) 4466 size = strlen(key); 4467 else if (field->filter_type == FILTER_STATIC_STRING) 4468 size = field->size; 4469 4470 /* ensure NULL-termination */ 4471 if (size > key_field->size - 1) 4472 size = key_field->size - 1; 4473 4474 strncpy(compound_key + key_field->offset, (char *)key, size); 4475 } else 4476 memcpy(compound_key + key_field->offset, key, size); 4477 } 4478 4479 static void 4480 hist_trigger_actions(struct hist_trigger_data *hist_data, 4481 struct tracing_map_elt *elt, void *rec, 4482 struct ring_buffer_event *rbe, void *key, 4483 u64 *var_ref_vals) 4484 { 4485 struct action_data *data; 4486 unsigned int i; 4487 4488 for (i = 0; i < hist_data->n_actions; i++) { 4489 data = hist_data->actions[i]; 4490 data->fn(hist_data, elt, rec, rbe, key, data, var_ref_vals); 4491 } 4492 } 4493 4494 static void event_hist_trigger(struct event_trigger_data *data, void *rec, 4495 struct ring_buffer_event *rbe) 4496 { 4497 struct hist_trigger_data *hist_data = data->private_data; 4498 bool use_compound_key = (hist_data->n_keys > 1); 4499 unsigned long entries[HIST_STACKTRACE_DEPTH]; 4500 u64 var_ref_vals[TRACING_MAP_VARS_MAX]; 4501 char compound_key[HIST_KEY_SIZE_MAX]; 4502 struct tracing_map_elt *elt = NULL; 4503 struct hist_field *key_field; 4504 u64 field_contents; 4505 void *key = NULL; 4506 unsigned int i; 4507 4508 memset(compound_key, 0, hist_data->key_size); 4509 4510 for_each_hist_key_field(i, hist_data) { 4511 key_field = hist_data->fields[i]; 4512 4513 if (key_field->flags & HIST_FIELD_FL_STACKTRACE) { 4514 memset(entries, 0, HIST_STACKTRACE_SIZE); 4515 stack_trace_save(entries, HIST_STACKTRACE_DEPTH, 4516 HIST_STACKTRACE_SKIP); 4517 key = entries; 4518 } else { 4519 field_contents = key_field->fn(key_field, elt, rbe, rec); 4520 if (key_field->flags & HIST_FIELD_FL_STRING) { 4521 key = (void *)(unsigned long)field_contents; 4522 use_compound_key = true; 4523 } else 4524 key = (void *)&field_contents; 4525 } 4526 4527 if (use_compound_key) 4528 add_to_key(compound_key, key, key_field, rec); 4529 } 4530 4531 if (use_compound_key) 4532 key = compound_key; 4533 4534 if (hist_data->n_var_refs && 4535 !resolve_var_refs(hist_data, key, var_ref_vals, false)) 4536 return; 4537 4538 elt = tracing_map_insert(hist_data->map, key); 4539 if (!elt) 4540 return; 4541 4542 hist_trigger_elt_update(hist_data, elt, rec, rbe, var_ref_vals); 4543 4544 if (resolve_var_refs(hist_data, key, var_ref_vals, true)) 4545 hist_trigger_actions(hist_data, elt, rec, rbe, key, var_ref_vals); 4546 } 4547 4548 static void hist_trigger_stacktrace_print(struct seq_file *m, 4549 unsigned long *stacktrace_entries, 4550 unsigned int max_entries) 4551 { 4552 char str[KSYM_SYMBOL_LEN]; 4553 unsigned int spaces = 8; 4554 unsigned int i; 4555 4556 for (i = 0; i < max_entries; i++) { 4557 if (!stacktrace_entries[i]) 4558 return; 4559 4560 seq_printf(m, "%*c", 1 + spaces, ' '); 4561 sprint_symbol(str, stacktrace_entries[i]); 4562 seq_printf(m, "%s\n", str); 4563 } 4564 } 4565 4566 static void hist_trigger_print_key(struct seq_file *m, 4567 struct hist_trigger_data *hist_data, 4568 void *key, 4569 struct tracing_map_elt *elt) 4570 { 4571 struct hist_field *key_field; 4572 char str[KSYM_SYMBOL_LEN]; 4573 bool multiline = false; 4574 const char *field_name; 4575 unsigned int i; 4576 u64 uval; 4577 4578 seq_puts(m, "{ "); 4579 4580 for_each_hist_key_field(i, hist_data) { 4581 key_field = hist_data->fields[i]; 4582 4583 if (i > hist_data->n_vals) 4584 seq_puts(m, ", "); 4585 4586 field_name = hist_field_name(key_field, 0); 4587 4588 if (key_field->flags & HIST_FIELD_FL_HEX) { 4589 uval = *(u64 *)(key + key_field->offset); 4590 seq_printf(m, "%s: %llx", field_name, uval); 4591 } else if (key_field->flags & HIST_FIELD_FL_SYM) { 4592 uval = *(u64 *)(key + key_field->offset); 4593 sprint_symbol_no_offset(str, uval); 4594 seq_printf(m, "%s: [%llx] %-45s", field_name, 4595 uval, str); 4596 } else if (key_field->flags & HIST_FIELD_FL_SYM_OFFSET) { 4597 uval = *(u64 *)(key + key_field->offset); 4598 sprint_symbol(str, uval); 4599 seq_printf(m, "%s: [%llx] %-55s", field_name, 4600 uval, str); 4601 } else if (key_field->flags & HIST_FIELD_FL_EXECNAME) { 4602 struct hist_elt_data *elt_data = elt->private_data; 4603 char *comm; 4604 4605 if (WARN_ON_ONCE(!elt_data)) 4606 return; 4607 4608 comm = elt_data->comm; 4609 4610 uval = *(u64 *)(key + key_field->offset); 4611 seq_printf(m, "%s: %-16s[%10llu]", field_name, 4612 comm, uval); 4613 } else if (key_field->flags & HIST_FIELD_FL_SYSCALL) { 4614 const char *syscall_name; 4615 4616 uval = *(u64 *)(key + key_field->offset); 4617 syscall_name = get_syscall_name(uval); 4618 if (!syscall_name) 4619 syscall_name = "unknown_syscall"; 4620 4621 seq_printf(m, "%s: %-30s[%3llu]", field_name, 4622 syscall_name, uval); 4623 } else if (key_field->flags & HIST_FIELD_FL_STACKTRACE) { 4624 seq_puts(m, "stacktrace:\n"); 4625 hist_trigger_stacktrace_print(m, 4626 key + key_field->offset, 4627 HIST_STACKTRACE_DEPTH); 4628 multiline = true; 4629 } else if (key_field->flags & HIST_FIELD_FL_LOG2) { 4630 seq_printf(m, "%s: ~ 2^%-2llu", field_name, 4631 *(u64 *)(key + key_field->offset)); 4632 } else if (key_field->flags & HIST_FIELD_FL_STRING) { 4633 seq_printf(m, "%s: %-50s", field_name, 4634 (char *)(key + key_field->offset)); 4635 } else { 4636 uval = *(u64 *)(key + key_field->offset); 4637 seq_printf(m, "%s: %10llu", field_name, uval); 4638 } 4639 } 4640 4641 if (!multiline) 4642 seq_puts(m, " "); 4643 4644 seq_puts(m, "}"); 4645 } 4646 4647 static void hist_trigger_entry_print(struct seq_file *m, 4648 struct hist_trigger_data *hist_data, 4649 void *key, 4650 struct tracing_map_elt *elt) 4651 { 4652 const char *field_name; 4653 unsigned int i; 4654 4655 hist_trigger_print_key(m, hist_data, key, elt); 4656 4657 seq_printf(m, " hitcount: %10llu", 4658 tracing_map_read_sum(elt, HITCOUNT_IDX)); 4659 4660 for (i = 1; i < hist_data->n_vals; i++) { 4661 field_name = hist_field_name(hist_data->fields[i], 0); 4662 4663 if (hist_data->fields[i]->flags & HIST_FIELD_FL_VAR || 4664 hist_data->fields[i]->flags & HIST_FIELD_FL_EXPR) 4665 continue; 4666 4667 if (hist_data->fields[i]->flags & HIST_FIELD_FL_HEX) { 4668 seq_printf(m, " %s: %10llx", field_name, 4669 tracing_map_read_sum(elt, i)); 4670 } else { 4671 seq_printf(m, " %s: %10llu", field_name, 4672 tracing_map_read_sum(elt, i)); 4673 } 4674 } 4675 4676 print_actions(m, hist_data, elt); 4677 4678 seq_puts(m, "\n"); 4679 } 4680 4681 static int print_entries(struct seq_file *m, 4682 struct hist_trigger_data *hist_data) 4683 { 4684 struct tracing_map_sort_entry **sort_entries = NULL; 4685 struct tracing_map *map = hist_data->map; 4686 int i, n_entries; 4687 4688 n_entries = tracing_map_sort_entries(map, hist_data->sort_keys, 4689 hist_data->n_sort_keys, 4690 &sort_entries); 4691 if (n_entries < 0) 4692 return n_entries; 4693 4694 for (i = 0; i < n_entries; i++) 4695 hist_trigger_entry_print(m, hist_data, 4696 sort_entries[i]->key, 4697 sort_entries[i]->elt); 4698 4699 tracing_map_destroy_sort_entries(sort_entries, n_entries); 4700 4701 return n_entries; 4702 } 4703 4704 static void hist_trigger_show(struct seq_file *m, 4705 struct event_trigger_data *data, int n) 4706 { 4707 struct hist_trigger_data *hist_data; 4708 int n_entries; 4709 4710 if (n > 0) 4711 seq_puts(m, "\n\n"); 4712 4713 seq_puts(m, "# event histogram\n#\n# trigger info: "); 4714 data->ops->print(m, data->ops, data); 4715 seq_puts(m, "#\n\n"); 4716 4717 hist_data = data->private_data; 4718 n_entries = print_entries(m, hist_data); 4719 if (n_entries < 0) 4720 n_entries = 0; 4721 4722 track_data_snapshot_print(m, hist_data); 4723 4724 seq_printf(m, "\nTotals:\n Hits: %llu\n Entries: %u\n Dropped: %llu\n", 4725 (u64)atomic64_read(&hist_data->map->hits), 4726 n_entries, (u64)atomic64_read(&hist_data->map->drops)); 4727 } 4728 4729 static int hist_show(struct seq_file *m, void *v) 4730 { 4731 struct event_trigger_data *data; 4732 struct trace_event_file *event_file; 4733 int n = 0, ret = 0; 4734 4735 mutex_lock(&event_mutex); 4736 4737 event_file = event_file_data(m->private); 4738 if (unlikely(!event_file)) { 4739 ret = -ENODEV; 4740 goto out_unlock; 4741 } 4742 4743 list_for_each_entry(data, &event_file->triggers, list) { 4744 if (data->cmd_ops->trigger_type == ETT_EVENT_HIST) 4745 hist_trigger_show(m, data, n++); 4746 } 4747 4748 out_unlock: 4749 mutex_unlock(&event_mutex); 4750 4751 return ret; 4752 } 4753 4754 static int event_hist_open(struct inode *inode, struct file *file) 4755 { 4756 int ret; 4757 4758 ret = security_locked_down(LOCKDOWN_TRACEFS); 4759 if (ret) 4760 return ret; 4761 4762 return single_open(file, hist_show, file); 4763 } 4764 4765 const struct file_operations event_hist_fops = { 4766 .open = event_hist_open, 4767 .read = seq_read, 4768 .llseek = seq_lseek, 4769 .release = single_release, 4770 }; 4771 4772 #ifdef CONFIG_HIST_TRIGGERS_DEBUG 4773 static void hist_field_debug_show_flags(struct seq_file *m, 4774 unsigned long flags) 4775 { 4776 seq_puts(m, " flags:\n"); 4777 4778 if (flags & HIST_FIELD_FL_KEY) 4779 seq_puts(m, " HIST_FIELD_FL_KEY\n"); 4780 else if (flags & HIST_FIELD_FL_HITCOUNT) 4781 seq_puts(m, " VAL: HIST_FIELD_FL_HITCOUNT\n"); 4782 else if (flags & HIST_FIELD_FL_VAR) 4783 seq_puts(m, " HIST_FIELD_FL_VAR\n"); 4784 else if (flags & HIST_FIELD_FL_VAR_REF) 4785 seq_puts(m, " HIST_FIELD_FL_VAR_REF\n"); 4786 else 4787 seq_puts(m, " VAL: normal u64 value\n"); 4788 4789 if (flags & HIST_FIELD_FL_ALIAS) 4790 seq_puts(m, " HIST_FIELD_FL_ALIAS\n"); 4791 } 4792 4793 static int hist_field_debug_show(struct seq_file *m, 4794 struct hist_field *field, unsigned long flags) 4795 { 4796 if ((field->flags & flags) != flags) { 4797 seq_printf(m, "ERROR: bad flags - %lx\n", flags); 4798 return -EINVAL; 4799 } 4800 4801 hist_field_debug_show_flags(m, field->flags); 4802 if (field->field) 4803 seq_printf(m, " ftrace_event_field name: %s\n", 4804 field->field->name); 4805 4806 if (field->flags & HIST_FIELD_FL_VAR) { 4807 seq_printf(m, " var.name: %s\n", field->var.name); 4808 seq_printf(m, " var.idx (into tracing_map_elt.vars[]): %u\n", 4809 field->var.idx); 4810 } 4811 4812 if (field->flags & HIST_FIELD_FL_ALIAS) 4813 seq_printf(m, " var_ref_idx (into hist_data->var_refs[]): %u\n", 4814 field->var_ref_idx); 4815 4816 if (field->flags & HIST_FIELD_FL_VAR_REF) { 4817 seq_printf(m, " name: %s\n", field->name); 4818 seq_printf(m, " var.idx (into tracing_map_elt.vars[]): %u\n", 4819 field->var.idx); 4820 seq_printf(m, " var.hist_data: %p\n", field->var.hist_data); 4821 seq_printf(m, " var_ref_idx (into hist_data->var_refs[]): %u\n", 4822 field->var_ref_idx); 4823 if (field->system) 4824 seq_printf(m, " system: %s\n", field->system); 4825 if (field->event_name) 4826 seq_printf(m, " event_name: %s\n", field->event_name); 4827 } 4828 4829 seq_printf(m, " type: %s\n", field->type); 4830 seq_printf(m, " size: %u\n", field->size); 4831 seq_printf(m, " is_signed: %u\n", field->is_signed); 4832 4833 return 0; 4834 } 4835 4836 static int field_var_debug_show(struct seq_file *m, 4837 struct field_var *field_var, unsigned int i, 4838 bool save_vars) 4839 { 4840 const char *vars_name = save_vars ? "save_vars" : "field_vars"; 4841 struct hist_field *field; 4842 int ret = 0; 4843 4844 seq_printf(m, "\n hist_data->%s[%d]:\n", vars_name, i); 4845 4846 field = field_var->var; 4847 4848 seq_printf(m, "\n %s[%d].var:\n", vars_name, i); 4849 4850 hist_field_debug_show_flags(m, field->flags); 4851 seq_printf(m, " var.name: %s\n", field->var.name); 4852 seq_printf(m, " var.idx (into tracing_map_elt.vars[]): %u\n", 4853 field->var.idx); 4854 4855 field = field_var->val; 4856 4857 seq_printf(m, "\n %s[%d].val:\n", vars_name, i); 4858 if (field->field) 4859 seq_printf(m, " ftrace_event_field name: %s\n", 4860 field->field->name); 4861 else { 4862 ret = -EINVAL; 4863 goto out; 4864 } 4865 4866 seq_printf(m, " type: %s\n", field->type); 4867 seq_printf(m, " size: %u\n", field->size); 4868 seq_printf(m, " is_signed: %u\n", field->is_signed); 4869 out: 4870 return ret; 4871 } 4872 4873 static int hist_action_debug_show(struct seq_file *m, 4874 struct action_data *data, int i) 4875 { 4876 int ret = 0; 4877 4878 if (data->handler == HANDLER_ONMAX || 4879 data->handler == HANDLER_ONCHANGE) { 4880 seq_printf(m, "\n hist_data->actions[%d].track_data.var_ref:\n", i); 4881 ret = hist_field_debug_show(m, data->track_data.var_ref, 4882 HIST_FIELD_FL_VAR_REF); 4883 if (ret) 4884 goto out; 4885 4886 seq_printf(m, "\n hist_data->actions[%d].track_data.track_var:\n", i); 4887 ret = hist_field_debug_show(m, data->track_data.track_var, 4888 HIST_FIELD_FL_VAR); 4889 if (ret) 4890 goto out; 4891 } 4892 4893 if (data->handler == HANDLER_ONMATCH) { 4894 seq_printf(m, "\n hist_data->actions[%d].match_data.event_system: %s\n", 4895 i, data->match_data.event_system); 4896 seq_printf(m, " hist_data->actions[%d].match_data.event: %s\n", 4897 i, data->match_data.event); 4898 } 4899 out: 4900 return ret; 4901 } 4902 4903 static int hist_actions_debug_show(struct seq_file *m, 4904 struct hist_trigger_data *hist_data) 4905 { 4906 int i, ret = 0; 4907 4908 if (hist_data->n_actions) 4909 seq_puts(m, "\n action tracking variables (for onmax()/onchange()/onmatch()):\n"); 4910 4911 for (i = 0; i < hist_data->n_actions; i++) { 4912 struct action_data *action = hist_data->actions[i]; 4913 4914 ret = hist_action_debug_show(m, action, i); 4915 if (ret) 4916 goto out; 4917 } 4918 4919 if (hist_data->n_save_vars) 4920 seq_puts(m, "\n save action variables (save() params):\n"); 4921 4922 for (i = 0; i < hist_data->n_save_vars; i++) { 4923 ret = field_var_debug_show(m, hist_data->save_vars[i], i, true); 4924 if (ret) 4925 goto out; 4926 } 4927 out: 4928 return ret; 4929 } 4930 4931 static void hist_trigger_debug_show(struct seq_file *m, 4932 struct event_trigger_data *data, int n) 4933 { 4934 struct hist_trigger_data *hist_data; 4935 int i, ret; 4936 4937 if (n > 0) 4938 seq_puts(m, "\n\n"); 4939 4940 seq_puts(m, "# event histogram\n#\n# trigger info: "); 4941 data->ops->print(m, data->ops, data); 4942 seq_puts(m, "#\n\n"); 4943 4944 hist_data = data->private_data; 4945 4946 seq_printf(m, "hist_data: %p\n\n", hist_data); 4947 seq_printf(m, " n_vals: %u\n", hist_data->n_vals); 4948 seq_printf(m, " n_keys: %u\n", hist_data->n_keys); 4949 seq_printf(m, " n_fields: %u\n", hist_data->n_fields); 4950 4951 seq_puts(m, "\n val fields:\n\n"); 4952 4953 seq_puts(m, " hist_data->fields[0]:\n"); 4954 ret = hist_field_debug_show(m, hist_data->fields[0], 4955 HIST_FIELD_FL_HITCOUNT); 4956 if (ret) 4957 return; 4958 4959 for (i = 1; i < hist_data->n_vals; i++) { 4960 seq_printf(m, "\n hist_data->fields[%d]:\n", i); 4961 ret = hist_field_debug_show(m, hist_data->fields[i], 0); 4962 if (ret) 4963 return; 4964 } 4965 4966 seq_puts(m, "\n key fields:\n"); 4967 4968 for (i = hist_data->n_vals; i < hist_data->n_fields; i++) { 4969 seq_printf(m, "\n hist_data->fields[%d]:\n", i); 4970 ret = hist_field_debug_show(m, hist_data->fields[i], 4971 HIST_FIELD_FL_KEY); 4972 if (ret) 4973 return; 4974 } 4975 4976 if (hist_data->n_var_refs) 4977 seq_puts(m, "\n variable reference fields:\n"); 4978 4979 for (i = 0; i < hist_data->n_var_refs; i++) { 4980 seq_printf(m, "\n hist_data->var_refs[%d]:\n", i); 4981 ret = hist_field_debug_show(m, hist_data->var_refs[i], 4982 HIST_FIELD_FL_VAR_REF); 4983 if (ret) 4984 return; 4985 } 4986 4987 if (hist_data->n_field_vars) 4988 seq_puts(m, "\n field variables:\n"); 4989 4990 for (i = 0; i < hist_data->n_field_vars; i++) { 4991 ret = field_var_debug_show(m, hist_data->field_vars[i], i, false); 4992 if (ret) 4993 return; 4994 } 4995 4996 ret = hist_actions_debug_show(m, hist_data); 4997 if (ret) 4998 return; 4999 } 5000 5001 static int hist_debug_show(struct seq_file *m, void *v) 5002 { 5003 struct event_trigger_data *data; 5004 struct trace_event_file *event_file; 5005 int n = 0, ret = 0; 5006 5007 mutex_lock(&event_mutex); 5008 5009 event_file = event_file_data(m->private); 5010 if (unlikely(!event_file)) { 5011 ret = -ENODEV; 5012 goto out_unlock; 5013 } 5014 5015 list_for_each_entry(data, &event_file->triggers, list) { 5016 if (data->cmd_ops->trigger_type == ETT_EVENT_HIST) 5017 hist_trigger_debug_show(m, data, n++); 5018 } 5019 5020 out_unlock: 5021 mutex_unlock(&event_mutex); 5022 5023 return ret; 5024 } 5025 5026 static int event_hist_debug_open(struct inode *inode, struct file *file) 5027 { 5028 int ret; 5029 5030 ret = security_locked_down(LOCKDOWN_TRACEFS); 5031 if (ret) 5032 return ret; 5033 5034 return single_open(file, hist_debug_show, file); 5035 } 5036 5037 const struct file_operations event_hist_debug_fops = { 5038 .open = event_hist_debug_open, 5039 .read = seq_read, 5040 .llseek = seq_lseek, 5041 .release = single_release, 5042 }; 5043 #endif 5044 5045 static void hist_field_print(struct seq_file *m, struct hist_field *hist_field) 5046 { 5047 const char *field_name = hist_field_name(hist_field, 0); 5048 5049 if (hist_field->var.name) 5050 seq_printf(m, "%s=", hist_field->var.name); 5051 5052 if (hist_field->flags & HIST_FIELD_FL_CPU) 5053 seq_puts(m, "cpu"); 5054 else if (field_name) { 5055 if (hist_field->flags & HIST_FIELD_FL_VAR_REF || 5056 hist_field->flags & HIST_FIELD_FL_ALIAS) 5057 seq_putc(m, '$'); 5058 seq_printf(m, "%s", field_name); 5059 } else if (hist_field->flags & HIST_FIELD_FL_TIMESTAMP) 5060 seq_puts(m, "common_timestamp"); 5061 5062 if (hist_field->flags) { 5063 if (!(hist_field->flags & HIST_FIELD_FL_VAR_REF) && 5064 !(hist_field->flags & HIST_FIELD_FL_EXPR)) { 5065 const char *flags = get_hist_field_flags(hist_field); 5066 5067 if (flags) 5068 seq_printf(m, ".%s", flags); 5069 } 5070 } 5071 } 5072 5073 static int event_hist_trigger_print(struct seq_file *m, 5074 struct event_trigger_ops *ops, 5075 struct event_trigger_data *data) 5076 { 5077 struct hist_trigger_data *hist_data = data->private_data; 5078 struct hist_field *field; 5079 bool have_var = false; 5080 unsigned int i; 5081 5082 seq_puts(m, "hist:"); 5083 5084 if (data->name) 5085 seq_printf(m, "%s:", data->name); 5086 5087 seq_puts(m, "keys="); 5088 5089 for_each_hist_key_field(i, hist_data) { 5090 field = hist_data->fields[i]; 5091 5092 if (i > hist_data->n_vals) 5093 seq_puts(m, ","); 5094 5095 if (field->flags & HIST_FIELD_FL_STACKTRACE) 5096 seq_puts(m, "stacktrace"); 5097 else 5098 hist_field_print(m, field); 5099 } 5100 5101 seq_puts(m, ":vals="); 5102 5103 for_each_hist_val_field(i, hist_data) { 5104 field = hist_data->fields[i]; 5105 if (field->flags & HIST_FIELD_FL_VAR) { 5106 have_var = true; 5107 continue; 5108 } 5109 5110 if (i == HITCOUNT_IDX) 5111 seq_puts(m, "hitcount"); 5112 else { 5113 seq_puts(m, ","); 5114 hist_field_print(m, field); 5115 } 5116 } 5117 5118 if (have_var) { 5119 unsigned int n = 0; 5120 5121 seq_puts(m, ":"); 5122 5123 for_each_hist_val_field(i, hist_data) { 5124 field = hist_data->fields[i]; 5125 5126 if (field->flags & HIST_FIELD_FL_VAR) { 5127 if (n++) 5128 seq_puts(m, ","); 5129 hist_field_print(m, field); 5130 } 5131 } 5132 } 5133 5134 seq_puts(m, ":sort="); 5135 5136 for (i = 0; i < hist_data->n_sort_keys; i++) { 5137 struct tracing_map_sort_key *sort_key; 5138 unsigned int idx, first_key_idx; 5139 5140 /* skip VAR vals */ 5141 first_key_idx = hist_data->n_vals - hist_data->n_vars; 5142 5143 sort_key = &hist_data->sort_keys[i]; 5144 idx = sort_key->field_idx; 5145 5146 if (WARN_ON(idx >= HIST_FIELDS_MAX)) 5147 return -EINVAL; 5148 5149 if (i > 0) 5150 seq_puts(m, ","); 5151 5152 if (idx == HITCOUNT_IDX) 5153 seq_puts(m, "hitcount"); 5154 else { 5155 if (idx >= first_key_idx) 5156 idx += hist_data->n_vars; 5157 hist_field_print(m, hist_data->fields[idx]); 5158 } 5159 5160 if (sort_key->descending) 5161 seq_puts(m, ".descending"); 5162 } 5163 seq_printf(m, ":size=%u", (1 << hist_data->map->map_bits)); 5164 if (hist_data->enable_timestamps) 5165 seq_printf(m, ":clock=%s", hist_data->attrs->clock); 5166 5167 print_actions_spec(m, hist_data); 5168 5169 if (data->filter_str) 5170 seq_printf(m, " if %s", data->filter_str); 5171 5172 if (data->paused) 5173 seq_puts(m, " [paused]"); 5174 else 5175 seq_puts(m, " [active]"); 5176 5177 seq_putc(m, '\n'); 5178 5179 return 0; 5180 } 5181 5182 static int event_hist_trigger_init(struct event_trigger_ops *ops, 5183 struct event_trigger_data *data) 5184 { 5185 struct hist_trigger_data *hist_data = data->private_data; 5186 5187 if (!data->ref && hist_data->attrs->name) 5188 save_named_trigger(hist_data->attrs->name, data); 5189 5190 data->ref++; 5191 5192 return 0; 5193 } 5194 5195 static void unregister_field_var_hists(struct hist_trigger_data *hist_data) 5196 { 5197 struct trace_event_file *file; 5198 unsigned int i; 5199 char *cmd; 5200 int ret; 5201 5202 for (i = 0; i < hist_data->n_field_var_hists; i++) { 5203 file = hist_data->field_var_hists[i]->hist_data->event_file; 5204 cmd = hist_data->field_var_hists[i]->cmd; 5205 ret = event_hist_trigger_func(&trigger_hist_cmd, file, 5206 "!hist", "hist", cmd); 5207 } 5208 } 5209 5210 static void event_hist_trigger_free(struct event_trigger_ops *ops, 5211 struct event_trigger_data *data) 5212 { 5213 struct hist_trigger_data *hist_data = data->private_data; 5214 5215 if (WARN_ON_ONCE(data->ref <= 0)) 5216 return; 5217 5218 data->ref--; 5219 if (!data->ref) { 5220 if (data->name) 5221 del_named_trigger(data); 5222 5223 trigger_data_free(data); 5224 5225 remove_hist_vars(hist_data); 5226 5227 unregister_field_var_hists(hist_data); 5228 5229 destroy_hist_data(hist_data); 5230 } 5231 } 5232 5233 static struct event_trigger_ops event_hist_trigger_ops = { 5234 .func = event_hist_trigger, 5235 .print = event_hist_trigger_print, 5236 .init = event_hist_trigger_init, 5237 .free = event_hist_trigger_free, 5238 }; 5239 5240 static int event_hist_trigger_named_init(struct event_trigger_ops *ops, 5241 struct event_trigger_data *data) 5242 { 5243 data->ref++; 5244 5245 save_named_trigger(data->named_data->name, data); 5246 5247 event_hist_trigger_init(ops, data->named_data); 5248 5249 return 0; 5250 } 5251 5252 static void event_hist_trigger_named_free(struct event_trigger_ops *ops, 5253 struct event_trigger_data *data) 5254 { 5255 if (WARN_ON_ONCE(data->ref <= 0)) 5256 return; 5257 5258 event_hist_trigger_free(ops, data->named_data); 5259 5260 data->ref--; 5261 if (!data->ref) { 5262 del_named_trigger(data); 5263 trigger_data_free(data); 5264 } 5265 } 5266 5267 static struct event_trigger_ops event_hist_trigger_named_ops = { 5268 .func = event_hist_trigger, 5269 .print = event_hist_trigger_print, 5270 .init = event_hist_trigger_named_init, 5271 .free = event_hist_trigger_named_free, 5272 }; 5273 5274 static struct event_trigger_ops *event_hist_get_trigger_ops(char *cmd, 5275 char *param) 5276 { 5277 return &event_hist_trigger_ops; 5278 } 5279 5280 static void hist_clear(struct event_trigger_data *data) 5281 { 5282 struct hist_trigger_data *hist_data = data->private_data; 5283 5284 if (data->name) 5285 pause_named_trigger(data); 5286 5287 tracepoint_synchronize_unregister(); 5288 5289 tracing_map_clear(hist_data->map); 5290 5291 if (data->name) 5292 unpause_named_trigger(data); 5293 } 5294 5295 static bool compatible_field(struct ftrace_event_field *field, 5296 struct ftrace_event_field *test_field) 5297 { 5298 if (field == test_field) 5299 return true; 5300 if (field == NULL || test_field == NULL) 5301 return false; 5302 if (strcmp(field->name, test_field->name) != 0) 5303 return false; 5304 if (strcmp(field->type, test_field->type) != 0) 5305 return false; 5306 if (field->size != test_field->size) 5307 return false; 5308 if (field->is_signed != test_field->is_signed) 5309 return false; 5310 5311 return true; 5312 } 5313 5314 static bool hist_trigger_match(struct event_trigger_data *data, 5315 struct event_trigger_data *data_test, 5316 struct event_trigger_data *named_data, 5317 bool ignore_filter) 5318 { 5319 struct tracing_map_sort_key *sort_key, *sort_key_test; 5320 struct hist_trigger_data *hist_data, *hist_data_test; 5321 struct hist_field *key_field, *key_field_test; 5322 unsigned int i; 5323 5324 if (named_data && (named_data != data_test) && 5325 (named_data != data_test->named_data)) 5326 return false; 5327 5328 if (!named_data && is_named_trigger(data_test)) 5329 return false; 5330 5331 hist_data = data->private_data; 5332 hist_data_test = data_test->private_data; 5333 5334 if (hist_data->n_vals != hist_data_test->n_vals || 5335 hist_data->n_fields != hist_data_test->n_fields || 5336 hist_data->n_sort_keys != hist_data_test->n_sort_keys) 5337 return false; 5338 5339 if (!ignore_filter) { 5340 if ((data->filter_str && !data_test->filter_str) || 5341 (!data->filter_str && data_test->filter_str)) 5342 return false; 5343 } 5344 5345 for_each_hist_field(i, hist_data) { 5346 key_field = hist_data->fields[i]; 5347 key_field_test = hist_data_test->fields[i]; 5348 5349 if (key_field->flags != key_field_test->flags) 5350 return false; 5351 if (!compatible_field(key_field->field, key_field_test->field)) 5352 return false; 5353 if (key_field->offset != key_field_test->offset) 5354 return false; 5355 if (key_field->size != key_field_test->size) 5356 return false; 5357 if (key_field->is_signed != key_field_test->is_signed) 5358 return false; 5359 if (!!key_field->var.name != !!key_field_test->var.name) 5360 return false; 5361 if (key_field->var.name && 5362 strcmp(key_field->var.name, key_field_test->var.name) != 0) 5363 return false; 5364 } 5365 5366 for (i = 0; i < hist_data->n_sort_keys; i++) { 5367 sort_key = &hist_data->sort_keys[i]; 5368 sort_key_test = &hist_data_test->sort_keys[i]; 5369 5370 if (sort_key->field_idx != sort_key_test->field_idx || 5371 sort_key->descending != sort_key_test->descending) 5372 return false; 5373 } 5374 5375 if (!ignore_filter && data->filter_str && 5376 (strcmp(data->filter_str, data_test->filter_str) != 0)) 5377 return false; 5378 5379 if (!actions_match(hist_data, hist_data_test)) 5380 return false; 5381 5382 return true; 5383 } 5384 5385 static int hist_register_trigger(char *glob, struct event_trigger_ops *ops, 5386 struct event_trigger_data *data, 5387 struct trace_event_file *file) 5388 { 5389 struct hist_trigger_data *hist_data = data->private_data; 5390 struct event_trigger_data *test, *named_data = NULL; 5391 struct trace_array *tr = file->tr; 5392 int ret = 0; 5393 5394 if (hist_data->attrs->name) { 5395 named_data = find_named_trigger(hist_data->attrs->name); 5396 if (named_data) { 5397 if (!hist_trigger_match(data, named_data, named_data, 5398 true)) { 5399 hist_err(tr, HIST_ERR_NAMED_MISMATCH, errpos(hist_data->attrs->name)); 5400 ret = -EINVAL; 5401 goto out; 5402 } 5403 } 5404 } 5405 5406 if (hist_data->attrs->name && !named_data) 5407 goto new; 5408 5409 lockdep_assert_held(&event_mutex); 5410 5411 list_for_each_entry(test, &file->triggers, list) { 5412 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 5413 if (!hist_trigger_match(data, test, named_data, false)) 5414 continue; 5415 if (hist_data->attrs->pause) 5416 test->paused = true; 5417 else if (hist_data->attrs->cont) 5418 test->paused = false; 5419 else if (hist_data->attrs->clear) 5420 hist_clear(test); 5421 else { 5422 hist_err(tr, HIST_ERR_TRIGGER_EEXIST, 0); 5423 ret = -EEXIST; 5424 } 5425 goto out; 5426 } 5427 } 5428 new: 5429 if (hist_data->attrs->cont || hist_data->attrs->clear) { 5430 hist_err(tr, HIST_ERR_TRIGGER_ENOENT_CLEAR, 0); 5431 ret = -ENOENT; 5432 goto out; 5433 } 5434 5435 if (hist_data->attrs->pause) 5436 data->paused = true; 5437 5438 if (named_data) { 5439 data->private_data = named_data->private_data; 5440 set_named_trigger_data(data, named_data); 5441 data->ops = &event_hist_trigger_named_ops; 5442 } 5443 5444 if (data->ops->init) { 5445 ret = data->ops->init(data->ops, data); 5446 if (ret < 0) 5447 goto out; 5448 } 5449 5450 if (hist_data->enable_timestamps) { 5451 char *clock = hist_data->attrs->clock; 5452 5453 ret = tracing_set_clock(file->tr, hist_data->attrs->clock); 5454 if (ret) { 5455 hist_err(tr, HIST_ERR_SET_CLOCK_FAIL, errpos(clock)); 5456 goto out; 5457 } 5458 5459 tracing_set_time_stamp_abs(file->tr, true); 5460 } 5461 5462 if (named_data) 5463 destroy_hist_data(hist_data); 5464 5465 ret++; 5466 out: 5467 return ret; 5468 } 5469 5470 static int hist_trigger_enable(struct event_trigger_data *data, 5471 struct trace_event_file *file) 5472 { 5473 int ret = 0; 5474 5475 list_add_tail_rcu(&data->list, &file->triggers); 5476 5477 update_cond_flag(file); 5478 5479 if (trace_event_trigger_enable_disable(file, 1) < 0) { 5480 list_del_rcu(&data->list); 5481 update_cond_flag(file); 5482 ret--; 5483 } 5484 5485 return ret; 5486 } 5487 5488 static bool have_hist_trigger_match(struct event_trigger_data *data, 5489 struct trace_event_file *file) 5490 { 5491 struct hist_trigger_data *hist_data = data->private_data; 5492 struct event_trigger_data *test, *named_data = NULL; 5493 bool match = false; 5494 5495 lockdep_assert_held(&event_mutex); 5496 5497 if (hist_data->attrs->name) 5498 named_data = find_named_trigger(hist_data->attrs->name); 5499 5500 list_for_each_entry(test, &file->triggers, list) { 5501 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 5502 if (hist_trigger_match(data, test, named_data, false)) { 5503 match = true; 5504 break; 5505 } 5506 } 5507 } 5508 5509 return match; 5510 } 5511 5512 static bool hist_trigger_check_refs(struct event_trigger_data *data, 5513 struct trace_event_file *file) 5514 { 5515 struct hist_trigger_data *hist_data = data->private_data; 5516 struct event_trigger_data *test, *named_data = NULL; 5517 5518 lockdep_assert_held(&event_mutex); 5519 5520 if (hist_data->attrs->name) 5521 named_data = find_named_trigger(hist_data->attrs->name); 5522 5523 list_for_each_entry(test, &file->triggers, list) { 5524 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 5525 if (!hist_trigger_match(data, test, named_data, false)) 5526 continue; 5527 hist_data = test->private_data; 5528 if (check_var_refs(hist_data)) 5529 return true; 5530 break; 5531 } 5532 } 5533 5534 return false; 5535 } 5536 5537 static void hist_unregister_trigger(char *glob, struct event_trigger_ops *ops, 5538 struct event_trigger_data *data, 5539 struct trace_event_file *file) 5540 { 5541 struct hist_trigger_data *hist_data = data->private_data; 5542 struct event_trigger_data *test, *named_data = NULL; 5543 bool unregistered = false; 5544 5545 lockdep_assert_held(&event_mutex); 5546 5547 if (hist_data->attrs->name) 5548 named_data = find_named_trigger(hist_data->attrs->name); 5549 5550 list_for_each_entry(test, &file->triggers, list) { 5551 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 5552 if (!hist_trigger_match(data, test, named_data, false)) 5553 continue; 5554 unregistered = true; 5555 list_del_rcu(&test->list); 5556 trace_event_trigger_enable_disable(file, 0); 5557 update_cond_flag(file); 5558 break; 5559 } 5560 } 5561 5562 if (unregistered && test->ops->free) 5563 test->ops->free(test->ops, test); 5564 5565 if (hist_data->enable_timestamps) { 5566 if (!hist_data->remove || unregistered) 5567 tracing_set_time_stamp_abs(file->tr, false); 5568 } 5569 } 5570 5571 static bool hist_file_check_refs(struct trace_event_file *file) 5572 { 5573 struct hist_trigger_data *hist_data; 5574 struct event_trigger_data *test; 5575 5576 lockdep_assert_held(&event_mutex); 5577 5578 list_for_each_entry(test, &file->triggers, list) { 5579 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 5580 hist_data = test->private_data; 5581 if (check_var_refs(hist_data)) 5582 return true; 5583 } 5584 } 5585 5586 return false; 5587 } 5588 5589 static void hist_unreg_all(struct trace_event_file *file) 5590 { 5591 struct event_trigger_data *test, *n; 5592 struct hist_trigger_data *hist_data; 5593 struct synth_event *se; 5594 const char *se_name; 5595 5596 lockdep_assert_held(&event_mutex); 5597 5598 if (hist_file_check_refs(file)) 5599 return; 5600 5601 list_for_each_entry_safe(test, n, &file->triggers, list) { 5602 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 5603 hist_data = test->private_data; 5604 list_del_rcu(&test->list); 5605 trace_event_trigger_enable_disable(file, 0); 5606 5607 se_name = trace_event_name(file->event_call); 5608 se = find_synth_event(se_name); 5609 if (se) 5610 se->ref--; 5611 5612 update_cond_flag(file); 5613 if (hist_data->enable_timestamps) 5614 tracing_set_time_stamp_abs(file->tr, false); 5615 if (test->ops->free) 5616 test->ops->free(test->ops, test); 5617 } 5618 } 5619 } 5620 5621 static int event_hist_trigger_func(struct event_command *cmd_ops, 5622 struct trace_event_file *file, 5623 char *glob, char *cmd, char *param) 5624 { 5625 unsigned int hist_trigger_bits = TRACING_MAP_BITS_DEFAULT; 5626 struct event_trigger_data *trigger_data; 5627 struct hist_trigger_attrs *attrs; 5628 struct event_trigger_ops *trigger_ops; 5629 struct hist_trigger_data *hist_data; 5630 struct synth_event *se; 5631 const char *se_name; 5632 bool remove = false; 5633 char *trigger, *p; 5634 int ret = 0; 5635 5636 lockdep_assert_held(&event_mutex); 5637 5638 if (glob && strlen(glob)) { 5639 hist_err_clear(); 5640 last_cmd_set(file, param); 5641 } 5642 5643 if (!param) 5644 return -EINVAL; 5645 5646 if (glob[0] == '!') 5647 remove = true; 5648 5649 /* 5650 * separate the trigger from the filter (k:v [if filter]) 5651 * allowing for whitespace in the trigger 5652 */ 5653 p = trigger = param; 5654 do { 5655 p = strstr(p, "if"); 5656 if (!p) 5657 break; 5658 if (p == param) 5659 return -EINVAL; 5660 if (*(p - 1) != ' ' && *(p - 1) != '\t') { 5661 p++; 5662 continue; 5663 } 5664 if (p >= param + strlen(param) - (sizeof("if") - 1) - 1) 5665 return -EINVAL; 5666 if (*(p + sizeof("if") - 1) != ' ' && *(p + sizeof("if") - 1) != '\t') { 5667 p++; 5668 continue; 5669 } 5670 break; 5671 } while (p); 5672 5673 if (!p) 5674 param = NULL; 5675 else { 5676 *(p - 1) = '\0'; 5677 param = strstrip(p); 5678 trigger = strstrip(trigger); 5679 } 5680 5681 attrs = parse_hist_trigger_attrs(file->tr, trigger); 5682 if (IS_ERR(attrs)) 5683 return PTR_ERR(attrs); 5684 5685 if (attrs->map_bits) 5686 hist_trigger_bits = attrs->map_bits; 5687 5688 hist_data = create_hist_data(hist_trigger_bits, attrs, file, remove); 5689 if (IS_ERR(hist_data)) { 5690 destroy_hist_trigger_attrs(attrs); 5691 return PTR_ERR(hist_data); 5692 } 5693 5694 trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger); 5695 5696 trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL); 5697 if (!trigger_data) { 5698 ret = -ENOMEM; 5699 goto out_free; 5700 } 5701 5702 trigger_data->count = -1; 5703 trigger_data->ops = trigger_ops; 5704 trigger_data->cmd_ops = cmd_ops; 5705 5706 INIT_LIST_HEAD(&trigger_data->list); 5707 RCU_INIT_POINTER(trigger_data->filter, NULL); 5708 5709 trigger_data->private_data = hist_data; 5710 5711 /* if param is non-empty, it's supposed to be a filter */ 5712 if (param && cmd_ops->set_filter) { 5713 ret = cmd_ops->set_filter(param, trigger_data, file); 5714 if (ret < 0) 5715 goto out_free; 5716 } 5717 5718 if (remove) { 5719 if (!have_hist_trigger_match(trigger_data, file)) 5720 goto out_free; 5721 5722 if (hist_trigger_check_refs(trigger_data, file)) { 5723 ret = -EBUSY; 5724 goto out_free; 5725 } 5726 5727 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file); 5728 se_name = trace_event_name(file->event_call); 5729 se = find_synth_event(se_name); 5730 if (se) 5731 se->ref--; 5732 ret = 0; 5733 goto out_free; 5734 } 5735 5736 ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file); 5737 /* 5738 * The above returns on success the # of triggers registered, 5739 * but if it didn't register any it returns zero. Consider no 5740 * triggers registered a failure too. 5741 */ 5742 if (!ret) { 5743 if (!(attrs->pause || attrs->cont || attrs->clear)) 5744 ret = -ENOENT; 5745 goto out_free; 5746 } else if (ret < 0) 5747 goto out_free; 5748 5749 if (get_named_trigger_data(trigger_data)) 5750 goto enable; 5751 5752 if (has_hist_vars(hist_data)) 5753 save_hist_vars(hist_data); 5754 5755 ret = create_actions(hist_data); 5756 if (ret) 5757 goto out_unreg; 5758 5759 ret = tracing_map_init(hist_data->map); 5760 if (ret) 5761 goto out_unreg; 5762 enable: 5763 ret = hist_trigger_enable(trigger_data, file); 5764 if (ret) 5765 goto out_unreg; 5766 5767 se_name = trace_event_name(file->event_call); 5768 se = find_synth_event(se_name); 5769 if (se) 5770 se->ref++; 5771 /* Just return zero, not the number of registered triggers */ 5772 ret = 0; 5773 out: 5774 if (ret == 0) 5775 hist_err_clear(); 5776 5777 return ret; 5778 out_unreg: 5779 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file); 5780 out_free: 5781 if (cmd_ops->set_filter) 5782 cmd_ops->set_filter(NULL, trigger_data, NULL); 5783 5784 remove_hist_vars(hist_data); 5785 5786 kfree(trigger_data); 5787 5788 destroy_hist_data(hist_data); 5789 goto out; 5790 } 5791 5792 static struct event_command trigger_hist_cmd = { 5793 .name = "hist", 5794 .trigger_type = ETT_EVENT_HIST, 5795 .flags = EVENT_CMD_FL_NEEDS_REC, 5796 .func = event_hist_trigger_func, 5797 .reg = hist_register_trigger, 5798 .unreg = hist_unregister_trigger, 5799 .unreg_all = hist_unreg_all, 5800 .get_trigger_ops = event_hist_get_trigger_ops, 5801 .set_filter = set_trigger_filter, 5802 }; 5803 5804 __init int register_trigger_hist_cmd(void) 5805 { 5806 int ret; 5807 5808 ret = register_event_command(&trigger_hist_cmd); 5809 WARN_ON(ret < 0); 5810 5811 return ret; 5812 } 5813 5814 static void 5815 hist_enable_trigger(struct event_trigger_data *data, void *rec, 5816 struct ring_buffer_event *event) 5817 { 5818 struct enable_trigger_data *enable_data = data->private_data; 5819 struct event_trigger_data *test; 5820 5821 list_for_each_entry_rcu(test, &enable_data->file->triggers, list, 5822 lockdep_is_held(&event_mutex)) { 5823 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 5824 if (enable_data->enable) 5825 test->paused = false; 5826 else 5827 test->paused = true; 5828 } 5829 } 5830 } 5831 5832 static void 5833 hist_enable_count_trigger(struct event_trigger_data *data, void *rec, 5834 struct ring_buffer_event *event) 5835 { 5836 if (!data->count) 5837 return; 5838 5839 if (data->count != -1) 5840 (data->count)--; 5841 5842 hist_enable_trigger(data, rec, event); 5843 } 5844 5845 static struct event_trigger_ops hist_enable_trigger_ops = { 5846 .func = hist_enable_trigger, 5847 .print = event_enable_trigger_print, 5848 .init = event_trigger_init, 5849 .free = event_enable_trigger_free, 5850 }; 5851 5852 static struct event_trigger_ops hist_enable_count_trigger_ops = { 5853 .func = hist_enable_count_trigger, 5854 .print = event_enable_trigger_print, 5855 .init = event_trigger_init, 5856 .free = event_enable_trigger_free, 5857 }; 5858 5859 static struct event_trigger_ops hist_disable_trigger_ops = { 5860 .func = hist_enable_trigger, 5861 .print = event_enable_trigger_print, 5862 .init = event_trigger_init, 5863 .free = event_enable_trigger_free, 5864 }; 5865 5866 static struct event_trigger_ops hist_disable_count_trigger_ops = { 5867 .func = hist_enable_count_trigger, 5868 .print = event_enable_trigger_print, 5869 .init = event_trigger_init, 5870 .free = event_enable_trigger_free, 5871 }; 5872 5873 static struct event_trigger_ops * 5874 hist_enable_get_trigger_ops(char *cmd, char *param) 5875 { 5876 struct event_trigger_ops *ops; 5877 bool enable; 5878 5879 enable = (strcmp(cmd, ENABLE_HIST_STR) == 0); 5880 5881 if (enable) 5882 ops = param ? &hist_enable_count_trigger_ops : 5883 &hist_enable_trigger_ops; 5884 else 5885 ops = param ? &hist_disable_count_trigger_ops : 5886 &hist_disable_trigger_ops; 5887 5888 return ops; 5889 } 5890 5891 static void hist_enable_unreg_all(struct trace_event_file *file) 5892 { 5893 struct event_trigger_data *test, *n; 5894 5895 list_for_each_entry_safe(test, n, &file->triggers, list) { 5896 if (test->cmd_ops->trigger_type == ETT_HIST_ENABLE) { 5897 list_del_rcu(&test->list); 5898 update_cond_flag(file); 5899 trace_event_trigger_enable_disable(file, 0); 5900 if (test->ops->free) 5901 test->ops->free(test->ops, test); 5902 } 5903 } 5904 } 5905 5906 static struct event_command trigger_hist_enable_cmd = { 5907 .name = ENABLE_HIST_STR, 5908 .trigger_type = ETT_HIST_ENABLE, 5909 .func = event_enable_trigger_func, 5910 .reg = event_enable_register_trigger, 5911 .unreg = event_enable_unregister_trigger, 5912 .unreg_all = hist_enable_unreg_all, 5913 .get_trigger_ops = hist_enable_get_trigger_ops, 5914 .set_filter = set_trigger_filter, 5915 }; 5916 5917 static struct event_command trigger_hist_disable_cmd = { 5918 .name = DISABLE_HIST_STR, 5919 .trigger_type = ETT_HIST_ENABLE, 5920 .func = event_enable_trigger_func, 5921 .reg = event_enable_register_trigger, 5922 .unreg = event_enable_unregister_trigger, 5923 .unreg_all = hist_enable_unreg_all, 5924 .get_trigger_ops = hist_enable_get_trigger_ops, 5925 .set_filter = set_trigger_filter, 5926 }; 5927 5928 static __init void unregister_trigger_hist_enable_disable_cmds(void) 5929 { 5930 unregister_event_command(&trigger_hist_enable_cmd); 5931 unregister_event_command(&trigger_hist_disable_cmd); 5932 } 5933 5934 __init int register_trigger_hist_enable_disable_cmds(void) 5935 { 5936 int ret; 5937 5938 ret = register_event_command(&trigger_hist_enable_cmd); 5939 if (WARN_ON(ret < 0)) 5940 return ret; 5941 ret = register_event_command(&trigger_hist_disable_cmd); 5942 if (WARN_ON(ret < 0)) 5943 unregister_trigger_hist_enable_disable_cmds(); 5944 5945 return ret; 5946 } 5947