1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * trace_events_hist - trace event hist triggers 4 * 5 * Copyright (C) 2015 Tom Zanussi <tom.zanussi@linux.intel.com> 6 */ 7 8 #include <linux/module.h> 9 #include <linux/kallsyms.h> 10 #include <linux/security.h> 11 #include <linux/mutex.h> 12 #include <linux/slab.h> 13 #include <linux/stacktrace.h> 14 #include <linux/rculist.h> 15 #include <linux/tracefs.h> 16 17 /* for gfp flag names */ 18 #include <linux/trace_events.h> 19 #include <trace/events/mmflags.h> 20 21 #include "tracing_map.h" 22 #include "trace_synth.h" 23 24 #define ERRORS \ 25 C(NONE, "No error"), \ 26 C(DUPLICATE_VAR, "Variable already defined"), \ 27 C(VAR_NOT_UNIQUE, "Variable name not unique, need to use fully qualified name (subsys.event.var) for variable"), \ 28 C(TOO_MANY_VARS, "Too many variables defined"), \ 29 C(MALFORMED_ASSIGNMENT, "Malformed assignment"), \ 30 C(NAMED_MISMATCH, "Named hist trigger doesn't match existing named trigger (includes variables)"), \ 31 C(TRIGGER_EEXIST, "Hist trigger already exists"), \ 32 C(TRIGGER_ENOENT_CLEAR, "Can't clear or continue a nonexistent hist trigger"), \ 33 C(SET_CLOCK_FAIL, "Couldn't set trace_clock"), \ 34 C(BAD_FIELD_MODIFIER, "Invalid field modifier"), \ 35 C(TOO_MANY_SUBEXPR, "Too many subexpressions (3 max)"), \ 36 C(TIMESTAMP_MISMATCH, "Timestamp units in expression don't match"), \ 37 C(TOO_MANY_FIELD_VARS, "Too many field variables defined"), \ 38 C(EVENT_FILE_NOT_FOUND, "Event file not found"), \ 39 C(HIST_NOT_FOUND, "Matching event histogram not found"), \ 40 C(HIST_CREATE_FAIL, "Couldn't create histogram for field"), \ 41 C(SYNTH_VAR_NOT_FOUND, "Couldn't find synthetic variable"), \ 42 C(SYNTH_EVENT_NOT_FOUND,"Couldn't find synthetic event"), \ 43 C(SYNTH_TYPE_MISMATCH, "Param type doesn't match synthetic event field type"), \ 44 C(SYNTH_COUNT_MISMATCH, "Param count doesn't match synthetic event field count"), \ 45 C(FIELD_VAR_PARSE_FAIL, "Couldn't parse field variable"), \ 46 C(VAR_CREATE_FIND_FAIL, "Couldn't create or find variable"), \ 47 C(ONX_NOT_VAR, "For onmax(x) or onchange(x), x must be a variable"), \ 48 C(ONX_VAR_NOT_FOUND, "Couldn't find onmax or onchange variable"), \ 49 C(ONX_VAR_CREATE_FAIL, "Couldn't create onmax or onchange variable"), \ 50 C(FIELD_VAR_CREATE_FAIL,"Couldn't create field variable"), \ 51 C(TOO_MANY_PARAMS, "Too many action params"), \ 52 C(PARAM_NOT_FOUND, "Couldn't find param"), \ 53 C(INVALID_PARAM, "Invalid action param"), \ 54 C(ACTION_NOT_FOUND, "No action found"), \ 55 C(NO_SAVE_PARAMS, "No params found for save()"), \ 56 C(TOO_MANY_SAVE_ACTIONS,"Can't have more than one save() action per hist"), \ 57 C(ACTION_MISMATCH, "Handler doesn't support action"), \ 58 C(NO_CLOSING_PAREN, "No closing paren found"), \ 59 C(SUBSYS_NOT_FOUND, "Missing subsystem"), \ 60 C(INVALID_SUBSYS_EVENT, "Invalid subsystem or event name"), \ 61 C(INVALID_REF_KEY, "Using variable references in keys not supported"), \ 62 C(VAR_NOT_FOUND, "Couldn't find variable"), \ 63 C(FIELD_NOT_FOUND, "Couldn't find field"), \ 64 C(EMPTY_ASSIGNMENT, "Empty assignment"), \ 65 C(INVALID_SORT_MODIFIER,"Invalid sort modifier"), \ 66 C(EMPTY_SORT_FIELD, "Empty sort field"), \ 67 C(TOO_MANY_SORT_FIELDS, "Too many sort fields (Max = 2)"), \ 68 C(INVALID_SORT_FIELD, "Sort field must be a key or a val"), \ 69 C(INVALID_STR_OPERAND, "String type can not be an operand in expression"), \ 70 C(EXPECT_NUMBER, "Expecting numeric literal"), \ 71 C(UNARY_MINUS_SUBEXPR, "Unary minus not supported in sub-expressions"), \ 72 C(DIVISION_BY_ZERO, "Division by zero"), 73 74 #undef C 75 #define C(a, b) HIST_ERR_##a 76 77 enum { ERRORS }; 78 79 #undef C 80 #define C(a, b) b 81 82 static const char *err_text[] = { ERRORS }; 83 84 struct hist_field; 85 86 typedef u64 (*hist_field_fn_t) (struct hist_field *field, 87 struct tracing_map_elt *elt, 88 struct trace_buffer *buffer, 89 struct ring_buffer_event *rbe, 90 void *event); 91 92 #define HIST_FIELD_OPERANDS_MAX 2 93 #define HIST_FIELDS_MAX (TRACING_MAP_FIELDS_MAX + TRACING_MAP_VARS_MAX) 94 #define HIST_ACTIONS_MAX 8 95 #define HIST_CONST_DIGITS_MAX 21 96 #define HIST_DIV_SHIFT 20 /* For optimizing division by constants */ 97 98 enum field_op_id { 99 FIELD_OP_NONE, 100 FIELD_OP_PLUS, 101 FIELD_OP_MINUS, 102 FIELD_OP_UNARY_MINUS, 103 FIELD_OP_DIV, 104 FIELD_OP_MULT, 105 }; 106 107 enum hist_field_fn { 108 HIST_FIELD_FN_NOP, 109 HIST_FIELD_FN_VAR_REF, 110 HIST_FIELD_FN_COUNTER, 111 HIST_FIELD_FN_CONST, 112 HIST_FIELD_FN_LOG2, 113 HIST_FIELD_FN_BUCKET, 114 HIST_FIELD_FN_TIMESTAMP, 115 HIST_FIELD_FN_CPU, 116 HIST_FIELD_FN_STRING, 117 HIST_FIELD_FN_DYNSTRING, 118 HIST_FIELD_FN_RELDYNSTRING, 119 HIST_FIELD_FN_PSTRING, 120 HIST_FIELD_FN_S64, 121 HIST_FIELD_FN_U64, 122 HIST_FIELD_FN_S32, 123 HIST_FIELD_FN_U32, 124 HIST_FIELD_FN_S16, 125 HIST_FIELD_FN_U16, 126 HIST_FIELD_FN_S8, 127 HIST_FIELD_FN_U8, 128 HIST_FIELD_FN_UMINUS, 129 HIST_FIELD_FN_MINUS, 130 HIST_FIELD_FN_PLUS, 131 HIST_FIELD_FN_DIV, 132 HIST_FIELD_FN_MULT, 133 HIST_FIELD_FN_DIV_POWER2, 134 HIST_FIELD_FN_DIV_NOT_POWER2, 135 HIST_FIELD_FN_DIV_MULT_SHIFT, 136 HIST_FIELD_FN_EXECNAME, 137 }; 138 139 /* 140 * A hist_var (histogram variable) contains variable information for 141 * hist_fields having the HIST_FIELD_FL_VAR or HIST_FIELD_FL_VAR_REF 142 * flag set. A hist_var has a variable name e.g. ts0, and is 143 * associated with a given histogram trigger, as specified by 144 * hist_data. The hist_var idx is the unique index assigned to the 145 * variable by the hist trigger's tracing_map. The idx is what is 146 * used to set a variable's value and, by a variable reference, to 147 * retrieve it. 148 */ 149 struct hist_var { 150 char *name; 151 struct hist_trigger_data *hist_data; 152 unsigned int idx; 153 }; 154 155 struct hist_field { 156 struct ftrace_event_field *field; 157 unsigned long flags; 158 unsigned long buckets; 159 const char *type; 160 struct hist_field *operands[HIST_FIELD_OPERANDS_MAX]; 161 struct hist_trigger_data *hist_data; 162 enum hist_field_fn fn_num; 163 unsigned int ref; 164 unsigned int size; 165 unsigned int offset; 166 unsigned int is_signed; 167 168 /* 169 * Variable fields contain variable-specific info in var. 170 */ 171 struct hist_var var; 172 enum field_op_id operator; 173 char *system; 174 char *event_name; 175 176 /* 177 * The name field is used for EXPR and VAR_REF fields. VAR 178 * fields contain the variable name in var.name. 179 */ 180 char *name; 181 182 /* 183 * When a histogram trigger is hit, if it has any references 184 * to variables, the values of those variables are collected 185 * into a var_ref_vals array by resolve_var_refs(). The 186 * current value of each variable is read from the tracing_map 187 * using the hist field's hist_var.idx and entered into the 188 * var_ref_idx entry i.e. var_ref_vals[var_ref_idx]. 189 */ 190 unsigned int var_ref_idx; 191 bool read_once; 192 193 unsigned int var_str_idx; 194 195 /* Numeric literals are represented as u64 */ 196 u64 constant; 197 /* Used to optimize division by constants */ 198 u64 div_multiplier; 199 }; 200 201 static u64 hist_fn_call(struct hist_field *hist_field, 202 struct tracing_map_elt *elt, 203 struct trace_buffer *buffer, 204 struct ring_buffer_event *rbe, 205 void *event); 206 207 static u64 hist_field_const(struct hist_field *field, 208 struct tracing_map_elt *elt, 209 struct trace_buffer *buffer, 210 struct ring_buffer_event *rbe, 211 void *event) 212 { 213 return field->constant; 214 } 215 216 static u64 hist_field_counter(struct hist_field *field, 217 struct tracing_map_elt *elt, 218 struct trace_buffer *buffer, 219 struct ring_buffer_event *rbe, 220 void *event) 221 { 222 return 1; 223 } 224 225 static u64 hist_field_string(struct hist_field *hist_field, 226 struct tracing_map_elt *elt, 227 struct trace_buffer *buffer, 228 struct ring_buffer_event *rbe, 229 void *event) 230 { 231 char *addr = (char *)(event + hist_field->field->offset); 232 233 return (u64)(unsigned long)addr; 234 } 235 236 static u64 hist_field_dynstring(struct hist_field *hist_field, 237 struct tracing_map_elt *elt, 238 struct trace_buffer *buffer, 239 struct ring_buffer_event *rbe, 240 void *event) 241 { 242 u32 str_item = *(u32 *)(event + hist_field->field->offset); 243 int str_loc = str_item & 0xffff; 244 char *addr = (char *)(event + str_loc); 245 246 return (u64)(unsigned long)addr; 247 } 248 249 static u64 hist_field_reldynstring(struct hist_field *hist_field, 250 struct tracing_map_elt *elt, 251 struct trace_buffer *buffer, 252 struct ring_buffer_event *rbe, 253 void *event) 254 { 255 u32 *item = event + hist_field->field->offset; 256 u32 str_item = *item; 257 int str_loc = str_item & 0xffff; 258 char *addr = (char *)&item[1] + str_loc; 259 260 return (u64)(unsigned long)addr; 261 } 262 263 static u64 hist_field_pstring(struct hist_field *hist_field, 264 struct tracing_map_elt *elt, 265 struct trace_buffer *buffer, 266 struct ring_buffer_event *rbe, 267 void *event) 268 { 269 char **addr = (char **)(event + hist_field->field->offset); 270 271 return (u64)(unsigned long)*addr; 272 } 273 274 static u64 hist_field_log2(struct hist_field *hist_field, 275 struct tracing_map_elt *elt, 276 struct trace_buffer *buffer, 277 struct ring_buffer_event *rbe, 278 void *event) 279 { 280 struct hist_field *operand = hist_field->operands[0]; 281 282 u64 val = hist_fn_call(operand, elt, buffer, rbe, event); 283 284 return (u64) ilog2(roundup_pow_of_two(val)); 285 } 286 287 static u64 hist_field_bucket(struct hist_field *hist_field, 288 struct tracing_map_elt *elt, 289 struct trace_buffer *buffer, 290 struct ring_buffer_event *rbe, 291 void *event) 292 { 293 struct hist_field *operand = hist_field->operands[0]; 294 unsigned long buckets = hist_field->buckets; 295 296 u64 val = hist_fn_call(operand, elt, buffer, rbe, event); 297 298 if (WARN_ON_ONCE(!buckets)) 299 return val; 300 301 if (val >= LONG_MAX) 302 val = div64_ul(val, buckets); 303 else 304 val = (u64)((unsigned long)val / buckets); 305 return val * buckets; 306 } 307 308 static u64 hist_field_plus(struct hist_field *hist_field, 309 struct tracing_map_elt *elt, 310 struct trace_buffer *buffer, 311 struct ring_buffer_event *rbe, 312 void *event) 313 { 314 struct hist_field *operand1 = hist_field->operands[0]; 315 struct hist_field *operand2 = hist_field->operands[1]; 316 317 u64 val1 = hist_fn_call(operand1, elt, buffer, rbe, event); 318 u64 val2 = hist_fn_call(operand2, elt, buffer, rbe, event); 319 320 return val1 + val2; 321 } 322 323 static u64 hist_field_minus(struct hist_field *hist_field, 324 struct tracing_map_elt *elt, 325 struct trace_buffer *buffer, 326 struct ring_buffer_event *rbe, 327 void *event) 328 { 329 struct hist_field *operand1 = hist_field->operands[0]; 330 struct hist_field *operand2 = hist_field->operands[1]; 331 332 u64 val1 = hist_fn_call(operand1, elt, buffer, rbe, event); 333 u64 val2 = hist_fn_call(operand2, elt, buffer, rbe, event); 334 335 return val1 - val2; 336 } 337 338 static u64 hist_field_div(struct hist_field *hist_field, 339 struct tracing_map_elt *elt, 340 struct trace_buffer *buffer, 341 struct ring_buffer_event *rbe, 342 void *event) 343 { 344 struct hist_field *operand1 = hist_field->operands[0]; 345 struct hist_field *operand2 = hist_field->operands[1]; 346 347 u64 val1 = hist_fn_call(operand1, elt, buffer, rbe, event); 348 u64 val2 = hist_fn_call(operand2, elt, buffer, rbe, event); 349 350 /* Return -1 for the undefined case */ 351 if (!val2) 352 return -1; 353 354 /* Use shift if the divisor is a power of 2 */ 355 if (!(val2 & (val2 - 1))) 356 return val1 >> __ffs64(val2); 357 358 return div64_u64(val1, val2); 359 } 360 361 static u64 div_by_power_of_two(struct hist_field *hist_field, 362 struct tracing_map_elt *elt, 363 struct trace_buffer *buffer, 364 struct ring_buffer_event *rbe, 365 void *event) 366 { 367 struct hist_field *operand1 = hist_field->operands[0]; 368 struct hist_field *operand2 = hist_field->operands[1]; 369 370 u64 val1 = hist_fn_call(operand1, elt, buffer, rbe, event); 371 372 return val1 >> __ffs64(operand2->constant); 373 } 374 375 static u64 div_by_not_power_of_two(struct hist_field *hist_field, 376 struct tracing_map_elt *elt, 377 struct trace_buffer *buffer, 378 struct ring_buffer_event *rbe, 379 void *event) 380 { 381 struct hist_field *operand1 = hist_field->operands[0]; 382 struct hist_field *operand2 = hist_field->operands[1]; 383 384 u64 val1 = hist_fn_call(operand1, elt, buffer, rbe, event); 385 386 return div64_u64(val1, operand2->constant); 387 } 388 389 static u64 div_by_mult_and_shift(struct hist_field *hist_field, 390 struct tracing_map_elt *elt, 391 struct trace_buffer *buffer, 392 struct ring_buffer_event *rbe, 393 void *event) 394 { 395 struct hist_field *operand1 = hist_field->operands[0]; 396 struct hist_field *operand2 = hist_field->operands[1]; 397 398 u64 val1 = hist_fn_call(operand1, elt, buffer, rbe, event); 399 400 /* 401 * If the divisor is a constant, do a multiplication and shift instead. 402 * 403 * Choose Z = some power of 2. If Y <= Z, then: 404 * X / Y = (X * (Z / Y)) / Z 405 * 406 * (Z / Y) is a constant (mult) which is calculated at parse time, so: 407 * X / Y = (X * mult) / Z 408 * 409 * The division by Z can be replaced by a shift since Z is a power of 2: 410 * X / Y = (X * mult) >> HIST_DIV_SHIFT 411 * 412 * As long, as X < Z the results will not be off by more than 1. 413 */ 414 if (val1 < (1 << HIST_DIV_SHIFT)) { 415 u64 mult = operand2->div_multiplier; 416 417 return (val1 * mult + ((1 << HIST_DIV_SHIFT) - 1)) >> HIST_DIV_SHIFT; 418 } 419 420 return div64_u64(val1, operand2->constant); 421 } 422 423 static u64 hist_field_mult(struct hist_field *hist_field, 424 struct tracing_map_elt *elt, 425 struct trace_buffer *buffer, 426 struct ring_buffer_event *rbe, 427 void *event) 428 { 429 struct hist_field *operand1 = hist_field->operands[0]; 430 struct hist_field *operand2 = hist_field->operands[1]; 431 432 u64 val1 = hist_fn_call(operand1, elt, buffer, rbe, event); 433 u64 val2 = hist_fn_call(operand2, elt, buffer, rbe, event); 434 435 return val1 * val2; 436 } 437 438 static u64 hist_field_unary_minus(struct hist_field *hist_field, 439 struct tracing_map_elt *elt, 440 struct trace_buffer *buffer, 441 struct ring_buffer_event *rbe, 442 void *event) 443 { 444 struct hist_field *operand = hist_field->operands[0]; 445 446 s64 sval = (s64)hist_fn_call(operand, elt, buffer, rbe, event); 447 u64 val = (u64)-sval; 448 449 return val; 450 } 451 452 #define DEFINE_HIST_FIELD_FN(type) \ 453 static u64 hist_field_##type(struct hist_field *hist_field, \ 454 struct tracing_map_elt *elt, \ 455 struct trace_buffer *buffer, \ 456 struct ring_buffer_event *rbe, \ 457 void *event) \ 458 { \ 459 type *addr = (type *)(event + hist_field->field->offset); \ 460 \ 461 return (u64)(unsigned long)*addr; \ 462 } 463 464 DEFINE_HIST_FIELD_FN(s64); 465 DEFINE_HIST_FIELD_FN(u64); 466 DEFINE_HIST_FIELD_FN(s32); 467 DEFINE_HIST_FIELD_FN(u32); 468 DEFINE_HIST_FIELD_FN(s16); 469 DEFINE_HIST_FIELD_FN(u16); 470 DEFINE_HIST_FIELD_FN(s8); 471 DEFINE_HIST_FIELD_FN(u8); 472 473 #define for_each_hist_field(i, hist_data) \ 474 for ((i) = 0; (i) < (hist_data)->n_fields; (i)++) 475 476 #define for_each_hist_val_field(i, hist_data) \ 477 for ((i) = 0; (i) < (hist_data)->n_vals; (i)++) 478 479 #define for_each_hist_key_field(i, hist_data) \ 480 for ((i) = (hist_data)->n_vals; (i) < (hist_data)->n_fields; (i)++) 481 482 #define HIST_STACKTRACE_DEPTH 16 483 #define HIST_STACKTRACE_SIZE (HIST_STACKTRACE_DEPTH * sizeof(unsigned long)) 484 #define HIST_STACKTRACE_SKIP 5 485 486 #define HITCOUNT_IDX 0 487 #define HIST_KEY_SIZE_MAX (MAX_FILTER_STR_VAL + HIST_STACKTRACE_SIZE) 488 489 enum hist_field_flags { 490 HIST_FIELD_FL_HITCOUNT = 1 << 0, 491 HIST_FIELD_FL_KEY = 1 << 1, 492 HIST_FIELD_FL_STRING = 1 << 2, 493 HIST_FIELD_FL_HEX = 1 << 3, 494 HIST_FIELD_FL_SYM = 1 << 4, 495 HIST_FIELD_FL_SYM_OFFSET = 1 << 5, 496 HIST_FIELD_FL_EXECNAME = 1 << 6, 497 HIST_FIELD_FL_SYSCALL = 1 << 7, 498 HIST_FIELD_FL_STACKTRACE = 1 << 8, 499 HIST_FIELD_FL_LOG2 = 1 << 9, 500 HIST_FIELD_FL_TIMESTAMP = 1 << 10, 501 HIST_FIELD_FL_TIMESTAMP_USECS = 1 << 11, 502 HIST_FIELD_FL_VAR = 1 << 12, 503 HIST_FIELD_FL_EXPR = 1 << 13, 504 HIST_FIELD_FL_VAR_REF = 1 << 14, 505 HIST_FIELD_FL_CPU = 1 << 15, 506 HIST_FIELD_FL_ALIAS = 1 << 16, 507 HIST_FIELD_FL_BUCKET = 1 << 17, 508 HIST_FIELD_FL_CONST = 1 << 18, 509 }; 510 511 struct var_defs { 512 unsigned int n_vars; 513 char *name[TRACING_MAP_VARS_MAX]; 514 char *expr[TRACING_MAP_VARS_MAX]; 515 }; 516 517 struct hist_trigger_attrs { 518 char *keys_str; 519 char *vals_str; 520 char *sort_key_str; 521 char *name; 522 char *clock; 523 bool pause; 524 bool cont; 525 bool clear; 526 bool ts_in_usecs; 527 unsigned int map_bits; 528 529 char *assignment_str[TRACING_MAP_VARS_MAX]; 530 unsigned int n_assignments; 531 532 char *action_str[HIST_ACTIONS_MAX]; 533 unsigned int n_actions; 534 535 struct var_defs var_defs; 536 }; 537 538 struct field_var { 539 struct hist_field *var; 540 struct hist_field *val; 541 }; 542 543 struct field_var_hist { 544 struct hist_trigger_data *hist_data; 545 char *cmd; 546 }; 547 548 struct hist_trigger_data { 549 struct hist_field *fields[HIST_FIELDS_MAX]; 550 unsigned int n_vals; 551 unsigned int n_keys; 552 unsigned int n_fields; 553 unsigned int n_vars; 554 unsigned int n_var_str; 555 unsigned int key_size; 556 struct tracing_map_sort_key sort_keys[TRACING_MAP_SORT_KEYS_MAX]; 557 unsigned int n_sort_keys; 558 struct trace_event_file *event_file; 559 struct hist_trigger_attrs *attrs; 560 struct tracing_map *map; 561 bool enable_timestamps; 562 bool remove; 563 struct hist_field *var_refs[TRACING_MAP_VARS_MAX]; 564 unsigned int n_var_refs; 565 566 struct action_data *actions[HIST_ACTIONS_MAX]; 567 unsigned int n_actions; 568 569 struct field_var *field_vars[SYNTH_FIELDS_MAX]; 570 unsigned int n_field_vars; 571 unsigned int n_field_var_str; 572 struct field_var_hist *field_var_hists[SYNTH_FIELDS_MAX]; 573 unsigned int n_field_var_hists; 574 575 struct field_var *save_vars[SYNTH_FIELDS_MAX]; 576 unsigned int n_save_vars; 577 unsigned int n_save_var_str; 578 }; 579 580 struct action_data; 581 582 typedef void (*action_fn_t) (struct hist_trigger_data *hist_data, 583 struct tracing_map_elt *elt, 584 struct trace_buffer *buffer, void *rec, 585 struct ring_buffer_event *rbe, void *key, 586 struct action_data *data, u64 *var_ref_vals); 587 588 typedef bool (*check_track_val_fn_t) (u64 track_val, u64 var_val); 589 590 enum handler_id { 591 HANDLER_ONMATCH = 1, 592 HANDLER_ONMAX, 593 HANDLER_ONCHANGE, 594 }; 595 596 enum action_id { 597 ACTION_SAVE = 1, 598 ACTION_TRACE, 599 ACTION_SNAPSHOT, 600 }; 601 602 struct action_data { 603 enum handler_id handler; 604 enum action_id action; 605 char *action_name; 606 action_fn_t fn; 607 608 unsigned int n_params; 609 char *params[SYNTH_FIELDS_MAX]; 610 611 /* 612 * When a histogram trigger is hit, the values of any 613 * references to variables, including variables being passed 614 * as parameters to synthetic events, are collected into a 615 * var_ref_vals array. This var_ref_idx array is an array of 616 * indices into the var_ref_vals array, one for each synthetic 617 * event param, and is passed to the synthetic event 618 * invocation. 619 */ 620 unsigned int var_ref_idx[TRACING_MAP_VARS_MAX]; 621 struct synth_event *synth_event; 622 bool use_trace_keyword; 623 char *synth_event_name; 624 625 union { 626 struct { 627 char *event; 628 char *event_system; 629 } match_data; 630 631 struct { 632 /* 633 * var_str contains the $-unstripped variable 634 * name referenced by var_ref, and used when 635 * printing the action. Because var_ref 636 * creation is deferred to create_actions(), 637 * we need a per-action way to save it until 638 * then, thus var_str. 639 */ 640 char *var_str; 641 642 /* 643 * var_ref refers to the variable being 644 * tracked e.g onmax($var). 645 */ 646 struct hist_field *var_ref; 647 648 /* 649 * track_var contains the 'invisible' tracking 650 * variable created to keep the current 651 * e.g. max value. 652 */ 653 struct hist_field *track_var; 654 655 check_track_val_fn_t check_val; 656 action_fn_t save_data; 657 } track_data; 658 }; 659 }; 660 661 struct track_data { 662 u64 track_val; 663 bool updated; 664 665 unsigned int key_len; 666 void *key; 667 struct tracing_map_elt elt; 668 669 struct action_data *action_data; 670 struct hist_trigger_data *hist_data; 671 }; 672 673 struct hist_elt_data { 674 char *comm; 675 u64 *var_ref_vals; 676 char **field_var_str; 677 int n_field_var_str; 678 }; 679 680 struct snapshot_context { 681 struct tracing_map_elt *elt; 682 void *key; 683 }; 684 685 /* 686 * Returns the specific division function to use if the divisor 687 * is constant. This avoids extra branches when the trigger is hit. 688 */ 689 static enum hist_field_fn hist_field_get_div_fn(struct hist_field *divisor) 690 { 691 u64 div = divisor->constant; 692 693 if (!(div & (div - 1))) 694 return HIST_FIELD_FN_DIV_POWER2; 695 696 /* If the divisor is too large, do a regular division */ 697 if (div > (1 << HIST_DIV_SHIFT)) 698 return HIST_FIELD_FN_DIV_NOT_POWER2; 699 700 divisor->div_multiplier = div64_u64((u64)(1 << HIST_DIV_SHIFT), div); 701 return HIST_FIELD_FN_DIV_MULT_SHIFT; 702 } 703 704 static void track_data_free(struct track_data *track_data) 705 { 706 struct hist_elt_data *elt_data; 707 708 if (!track_data) 709 return; 710 711 kfree(track_data->key); 712 713 elt_data = track_data->elt.private_data; 714 if (elt_data) { 715 kfree(elt_data->comm); 716 kfree(elt_data); 717 } 718 719 kfree(track_data); 720 } 721 722 static struct track_data *track_data_alloc(unsigned int key_len, 723 struct action_data *action_data, 724 struct hist_trigger_data *hist_data) 725 { 726 struct track_data *data = kzalloc(sizeof(*data), GFP_KERNEL); 727 struct hist_elt_data *elt_data; 728 729 if (!data) 730 return ERR_PTR(-ENOMEM); 731 732 data->key = kzalloc(key_len, GFP_KERNEL); 733 if (!data->key) { 734 track_data_free(data); 735 return ERR_PTR(-ENOMEM); 736 } 737 738 data->key_len = key_len; 739 data->action_data = action_data; 740 data->hist_data = hist_data; 741 742 elt_data = kzalloc(sizeof(*elt_data), GFP_KERNEL); 743 if (!elt_data) { 744 track_data_free(data); 745 return ERR_PTR(-ENOMEM); 746 } 747 748 data->elt.private_data = elt_data; 749 750 elt_data->comm = kzalloc(TASK_COMM_LEN, GFP_KERNEL); 751 if (!elt_data->comm) { 752 track_data_free(data); 753 return ERR_PTR(-ENOMEM); 754 } 755 756 return data; 757 } 758 759 #define HIST_PREFIX "hist:" 760 761 static char *last_cmd; 762 static char last_cmd_loc[MAX_FILTER_STR_VAL]; 763 764 static int errpos(char *str) 765 { 766 if (!str || !last_cmd) 767 return 0; 768 769 return err_pos(last_cmd, str); 770 } 771 772 static void last_cmd_set(struct trace_event_file *file, char *str) 773 { 774 const char *system = NULL, *name = NULL; 775 struct trace_event_call *call; 776 int len; 777 778 if (!str) 779 return; 780 781 /* sizeof() contains the nul byte */ 782 len = sizeof(HIST_PREFIX) + strlen(str); 783 kfree(last_cmd); 784 last_cmd = kzalloc(len, GFP_KERNEL); 785 if (!last_cmd) 786 return; 787 788 strcpy(last_cmd, HIST_PREFIX); 789 /* Again, sizeof() contains the nul byte */ 790 len -= sizeof(HIST_PREFIX); 791 strncat(last_cmd, str, len); 792 793 if (file) { 794 call = file->event_call; 795 system = call->class->system; 796 if (system) { 797 name = trace_event_name(call); 798 if (!name) 799 system = NULL; 800 } 801 } 802 803 if (system) 804 snprintf(last_cmd_loc, MAX_FILTER_STR_VAL, HIST_PREFIX "%s:%s", system, name); 805 } 806 807 static void hist_err(struct trace_array *tr, u8 err_type, u16 err_pos) 808 { 809 if (!last_cmd) 810 return; 811 812 tracing_log_err(tr, last_cmd_loc, last_cmd, err_text, 813 err_type, err_pos); 814 } 815 816 static void hist_err_clear(void) 817 { 818 if (last_cmd) 819 last_cmd[0] = '\0'; 820 last_cmd_loc[0] = '\0'; 821 } 822 823 typedef void (*synth_probe_func_t) (void *__data, u64 *var_ref_vals, 824 unsigned int *var_ref_idx); 825 826 static inline void trace_synth(struct synth_event *event, u64 *var_ref_vals, 827 unsigned int *var_ref_idx) 828 { 829 struct tracepoint *tp = event->tp; 830 831 if (unlikely(atomic_read(&tp->key.enabled) > 0)) { 832 struct tracepoint_func *probe_func_ptr; 833 synth_probe_func_t probe_func; 834 void *__data; 835 836 if (!(cpu_online(raw_smp_processor_id()))) 837 return; 838 839 probe_func_ptr = rcu_dereference_sched((tp)->funcs); 840 if (probe_func_ptr) { 841 do { 842 probe_func = probe_func_ptr->func; 843 __data = probe_func_ptr->data; 844 probe_func(__data, var_ref_vals, var_ref_idx); 845 } while ((++probe_func_ptr)->func); 846 } 847 } 848 } 849 850 static void action_trace(struct hist_trigger_data *hist_data, 851 struct tracing_map_elt *elt, 852 struct trace_buffer *buffer, void *rec, 853 struct ring_buffer_event *rbe, void *key, 854 struct action_data *data, u64 *var_ref_vals) 855 { 856 struct synth_event *event = data->synth_event; 857 858 trace_synth(event, var_ref_vals, data->var_ref_idx); 859 } 860 861 struct hist_var_data { 862 struct list_head list; 863 struct hist_trigger_data *hist_data; 864 }; 865 866 static u64 hist_field_timestamp(struct hist_field *hist_field, 867 struct tracing_map_elt *elt, 868 struct trace_buffer *buffer, 869 struct ring_buffer_event *rbe, 870 void *event) 871 { 872 struct hist_trigger_data *hist_data = hist_field->hist_data; 873 struct trace_array *tr = hist_data->event_file->tr; 874 875 u64 ts = ring_buffer_event_time_stamp(buffer, rbe); 876 877 if (hist_data->attrs->ts_in_usecs && trace_clock_in_ns(tr)) 878 ts = ns2usecs(ts); 879 880 return ts; 881 } 882 883 static u64 hist_field_cpu(struct hist_field *hist_field, 884 struct tracing_map_elt *elt, 885 struct trace_buffer *buffer, 886 struct ring_buffer_event *rbe, 887 void *event) 888 { 889 int cpu = smp_processor_id(); 890 891 return cpu; 892 } 893 894 /** 895 * check_field_for_var_ref - Check if a VAR_REF field references a variable 896 * @hist_field: The VAR_REF field to check 897 * @var_data: The hist trigger that owns the variable 898 * @var_idx: The trigger variable identifier 899 * 900 * Check the given VAR_REF field to see whether or not it references 901 * the given variable associated with the given trigger. 902 * 903 * Return: The VAR_REF field if it does reference the variable, NULL if not 904 */ 905 static struct hist_field * 906 check_field_for_var_ref(struct hist_field *hist_field, 907 struct hist_trigger_data *var_data, 908 unsigned int var_idx) 909 { 910 WARN_ON(!(hist_field && hist_field->flags & HIST_FIELD_FL_VAR_REF)); 911 912 if (hist_field && hist_field->var.idx == var_idx && 913 hist_field->var.hist_data == var_data) 914 return hist_field; 915 916 return NULL; 917 } 918 919 /** 920 * find_var_ref - Check if a trigger has a reference to a trigger variable 921 * @hist_data: The hist trigger that might have a reference to the variable 922 * @var_data: The hist trigger that owns the variable 923 * @var_idx: The trigger variable identifier 924 * 925 * Check the list of var_refs[] on the first hist trigger to see 926 * whether any of them are references to the variable on the second 927 * trigger. 928 * 929 * Return: The VAR_REF field referencing the variable if so, NULL if not 930 */ 931 static struct hist_field *find_var_ref(struct hist_trigger_data *hist_data, 932 struct hist_trigger_data *var_data, 933 unsigned int var_idx) 934 { 935 struct hist_field *hist_field; 936 unsigned int i; 937 938 for (i = 0; i < hist_data->n_var_refs; i++) { 939 hist_field = hist_data->var_refs[i]; 940 if (check_field_for_var_ref(hist_field, var_data, var_idx)) 941 return hist_field; 942 } 943 944 return NULL; 945 } 946 947 /** 948 * find_any_var_ref - Check if there is a reference to a given trigger variable 949 * @hist_data: The hist trigger 950 * @var_idx: The trigger variable identifier 951 * 952 * Check to see whether the given variable is currently referenced by 953 * any other trigger. 954 * 955 * The trigger the variable is defined on is explicitly excluded - the 956 * assumption being that a self-reference doesn't prevent a trigger 957 * from being removed. 958 * 959 * Return: The VAR_REF field referencing the variable if so, NULL if not 960 */ 961 static struct hist_field *find_any_var_ref(struct hist_trigger_data *hist_data, 962 unsigned int var_idx) 963 { 964 struct trace_array *tr = hist_data->event_file->tr; 965 struct hist_field *found = NULL; 966 struct hist_var_data *var_data; 967 968 list_for_each_entry(var_data, &tr->hist_vars, list) { 969 if (var_data->hist_data == hist_data) 970 continue; 971 found = find_var_ref(var_data->hist_data, hist_data, var_idx); 972 if (found) 973 break; 974 } 975 976 return found; 977 } 978 979 /** 980 * check_var_refs - Check if there is a reference to any of trigger's variables 981 * @hist_data: The hist trigger 982 * 983 * A trigger can define one or more variables. If any one of them is 984 * currently referenced by any other trigger, this function will 985 * determine that. 986 987 * Typically used to determine whether or not a trigger can be removed 988 * - if there are any references to a trigger's variables, it cannot. 989 * 990 * Return: True if there is a reference to any of trigger's variables 991 */ 992 static bool check_var_refs(struct hist_trigger_data *hist_data) 993 { 994 struct hist_field *field; 995 bool found = false; 996 int i; 997 998 for_each_hist_field(i, hist_data) { 999 field = hist_data->fields[i]; 1000 if (field && field->flags & HIST_FIELD_FL_VAR) { 1001 if (find_any_var_ref(hist_data, field->var.idx)) { 1002 found = true; 1003 break; 1004 } 1005 } 1006 } 1007 1008 return found; 1009 } 1010 1011 static struct hist_var_data *find_hist_vars(struct hist_trigger_data *hist_data) 1012 { 1013 struct trace_array *tr = hist_data->event_file->tr; 1014 struct hist_var_data *var_data, *found = NULL; 1015 1016 list_for_each_entry(var_data, &tr->hist_vars, list) { 1017 if (var_data->hist_data == hist_data) { 1018 found = var_data; 1019 break; 1020 } 1021 } 1022 1023 return found; 1024 } 1025 1026 static bool field_has_hist_vars(struct hist_field *hist_field, 1027 unsigned int level) 1028 { 1029 int i; 1030 1031 if (level > 3) 1032 return false; 1033 1034 if (!hist_field) 1035 return false; 1036 1037 if (hist_field->flags & HIST_FIELD_FL_VAR || 1038 hist_field->flags & HIST_FIELD_FL_VAR_REF) 1039 return true; 1040 1041 for (i = 0; i < HIST_FIELD_OPERANDS_MAX; i++) { 1042 struct hist_field *operand; 1043 1044 operand = hist_field->operands[i]; 1045 if (field_has_hist_vars(operand, level + 1)) 1046 return true; 1047 } 1048 1049 return false; 1050 } 1051 1052 static bool has_hist_vars(struct hist_trigger_data *hist_data) 1053 { 1054 struct hist_field *hist_field; 1055 int i; 1056 1057 for_each_hist_field(i, hist_data) { 1058 hist_field = hist_data->fields[i]; 1059 if (field_has_hist_vars(hist_field, 0)) 1060 return true; 1061 } 1062 1063 return false; 1064 } 1065 1066 static int save_hist_vars(struct hist_trigger_data *hist_data) 1067 { 1068 struct trace_array *tr = hist_data->event_file->tr; 1069 struct hist_var_data *var_data; 1070 1071 var_data = find_hist_vars(hist_data); 1072 if (var_data) 1073 return 0; 1074 1075 if (tracing_check_open_get_tr(tr)) 1076 return -ENODEV; 1077 1078 var_data = kzalloc(sizeof(*var_data), GFP_KERNEL); 1079 if (!var_data) { 1080 trace_array_put(tr); 1081 return -ENOMEM; 1082 } 1083 1084 var_data->hist_data = hist_data; 1085 list_add(&var_data->list, &tr->hist_vars); 1086 1087 return 0; 1088 } 1089 1090 static void remove_hist_vars(struct hist_trigger_data *hist_data) 1091 { 1092 struct trace_array *tr = hist_data->event_file->tr; 1093 struct hist_var_data *var_data; 1094 1095 var_data = find_hist_vars(hist_data); 1096 if (!var_data) 1097 return; 1098 1099 if (WARN_ON(check_var_refs(hist_data))) 1100 return; 1101 1102 list_del(&var_data->list); 1103 1104 kfree(var_data); 1105 1106 trace_array_put(tr); 1107 } 1108 1109 static struct hist_field *find_var_field(struct hist_trigger_data *hist_data, 1110 const char *var_name) 1111 { 1112 struct hist_field *hist_field, *found = NULL; 1113 int i; 1114 1115 for_each_hist_field(i, hist_data) { 1116 hist_field = hist_data->fields[i]; 1117 if (hist_field && hist_field->flags & HIST_FIELD_FL_VAR && 1118 strcmp(hist_field->var.name, var_name) == 0) { 1119 found = hist_field; 1120 break; 1121 } 1122 } 1123 1124 return found; 1125 } 1126 1127 static struct hist_field *find_var(struct hist_trigger_data *hist_data, 1128 struct trace_event_file *file, 1129 const char *var_name) 1130 { 1131 struct hist_trigger_data *test_data; 1132 struct event_trigger_data *test; 1133 struct hist_field *hist_field; 1134 1135 lockdep_assert_held(&event_mutex); 1136 1137 hist_field = find_var_field(hist_data, var_name); 1138 if (hist_field) 1139 return hist_field; 1140 1141 list_for_each_entry(test, &file->triggers, list) { 1142 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 1143 test_data = test->private_data; 1144 hist_field = find_var_field(test_data, var_name); 1145 if (hist_field) 1146 return hist_field; 1147 } 1148 } 1149 1150 return NULL; 1151 } 1152 1153 static struct trace_event_file *find_var_file(struct trace_array *tr, 1154 char *system, 1155 char *event_name, 1156 char *var_name) 1157 { 1158 struct hist_trigger_data *var_hist_data; 1159 struct hist_var_data *var_data; 1160 struct trace_event_file *file, *found = NULL; 1161 1162 if (system) 1163 return find_event_file(tr, system, event_name); 1164 1165 list_for_each_entry(var_data, &tr->hist_vars, list) { 1166 var_hist_data = var_data->hist_data; 1167 file = var_hist_data->event_file; 1168 if (file == found) 1169 continue; 1170 1171 if (find_var_field(var_hist_data, var_name)) { 1172 if (found) { 1173 hist_err(tr, HIST_ERR_VAR_NOT_UNIQUE, errpos(var_name)); 1174 return NULL; 1175 } 1176 1177 found = file; 1178 } 1179 } 1180 1181 return found; 1182 } 1183 1184 static struct hist_field *find_file_var(struct trace_event_file *file, 1185 const char *var_name) 1186 { 1187 struct hist_trigger_data *test_data; 1188 struct event_trigger_data *test; 1189 struct hist_field *hist_field; 1190 1191 lockdep_assert_held(&event_mutex); 1192 1193 list_for_each_entry(test, &file->triggers, list) { 1194 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 1195 test_data = test->private_data; 1196 hist_field = find_var_field(test_data, var_name); 1197 if (hist_field) 1198 return hist_field; 1199 } 1200 } 1201 1202 return NULL; 1203 } 1204 1205 static struct hist_field * 1206 find_match_var(struct hist_trigger_data *hist_data, char *var_name) 1207 { 1208 struct trace_array *tr = hist_data->event_file->tr; 1209 struct hist_field *hist_field, *found = NULL; 1210 struct trace_event_file *file; 1211 unsigned int i; 1212 1213 for (i = 0; i < hist_data->n_actions; i++) { 1214 struct action_data *data = hist_data->actions[i]; 1215 1216 if (data->handler == HANDLER_ONMATCH) { 1217 char *system = data->match_data.event_system; 1218 char *event_name = data->match_data.event; 1219 1220 file = find_var_file(tr, system, event_name, var_name); 1221 if (!file) 1222 continue; 1223 hist_field = find_file_var(file, var_name); 1224 if (hist_field) { 1225 if (found) { 1226 hist_err(tr, HIST_ERR_VAR_NOT_UNIQUE, 1227 errpos(var_name)); 1228 return ERR_PTR(-EINVAL); 1229 } 1230 1231 found = hist_field; 1232 } 1233 } 1234 } 1235 return found; 1236 } 1237 1238 static struct hist_field *find_event_var(struct hist_trigger_data *hist_data, 1239 char *system, 1240 char *event_name, 1241 char *var_name) 1242 { 1243 struct trace_array *tr = hist_data->event_file->tr; 1244 struct hist_field *hist_field = NULL; 1245 struct trace_event_file *file; 1246 1247 if (!system || !event_name) { 1248 hist_field = find_match_var(hist_data, var_name); 1249 if (IS_ERR(hist_field)) 1250 return NULL; 1251 if (hist_field) 1252 return hist_field; 1253 } 1254 1255 file = find_var_file(tr, system, event_name, var_name); 1256 if (!file) 1257 return NULL; 1258 1259 hist_field = find_file_var(file, var_name); 1260 1261 return hist_field; 1262 } 1263 1264 static u64 hist_field_var_ref(struct hist_field *hist_field, 1265 struct tracing_map_elt *elt, 1266 struct trace_buffer *buffer, 1267 struct ring_buffer_event *rbe, 1268 void *event) 1269 { 1270 struct hist_elt_data *elt_data; 1271 u64 var_val = 0; 1272 1273 if (WARN_ON_ONCE(!elt)) 1274 return var_val; 1275 1276 elt_data = elt->private_data; 1277 var_val = elt_data->var_ref_vals[hist_field->var_ref_idx]; 1278 1279 return var_val; 1280 } 1281 1282 static bool resolve_var_refs(struct hist_trigger_data *hist_data, void *key, 1283 u64 *var_ref_vals, bool self) 1284 { 1285 struct hist_trigger_data *var_data; 1286 struct tracing_map_elt *var_elt; 1287 struct hist_field *hist_field; 1288 unsigned int i, var_idx; 1289 bool resolved = true; 1290 u64 var_val = 0; 1291 1292 for (i = 0; i < hist_data->n_var_refs; i++) { 1293 hist_field = hist_data->var_refs[i]; 1294 var_idx = hist_field->var.idx; 1295 var_data = hist_field->var.hist_data; 1296 1297 if (var_data == NULL) { 1298 resolved = false; 1299 break; 1300 } 1301 1302 if ((self && var_data != hist_data) || 1303 (!self && var_data == hist_data)) 1304 continue; 1305 1306 var_elt = tracing_map_lookup(var_data->map, key); 1307 if (!var_elt) { 1308 resolved = false; 1309 break; 1310 } 1311 1312 if (!tracing_map_var_set(var_elt, var_idx)) { 1313 resolved = false; 1314 break; 1315 } 1316 1317 if (self || !hist_field->read_once) 1318 var_val = tracing_map_read_var(var_elt, var_idx); 1319 else 1320 var_val = tracing_map_read_var_once(var_elt, var_idx); 1321 1322 var_ref_vals[i] = var_val; 1323 } 1324 1325 return resolved; 1326 } 1327 1328 static const char *hist_field_name(struct hist_field *field, 1329 unsigned int level) 1330 { 1331 const char *field_name = ""; 1332 1333 if (level > 1) 1334 return field_name; 1335 1336 if (field->field) 1337 field_name = field->field->name; 1338 else if (field->flags & HIST_FIELD_FL_LOG2 || 1339 field->flags & HIST_FIELD_FL_ALIAS || 1340 field->flags & HIST_FIELD_FL_BUCKET) 1341 field_name = hist_field_name(field->operands[0], ++level); 1342 else if (field->flags & HIST_FIELD_FL_CPU) 1343 field_name = "common_cpu"; 1344 else if (field->flags & HIST_FIELD_FL_EXPR || 1345 field->flags & HIST_FIELD_FL_VAR_REF) { 1346 if (field->system) { 1347 static char full_name[MAX_FILTER_STR_VAL]; 1348 1349 strcat(full_name, field->system); 1350 strcat(full_name, "."); 1351 strcat(full_name, field->event_name); 1352 strcat(full_name, "."); 1353 strcat(full_name, field->name); 1354 field_name = full_name; 1355 } else 1356 field_name = field->name; 1357 } else if (field->flags & HIST_FIELD_FL_TIMESTAMP) 1358 field_name = "common_timestamp"; 1359 1360 if (field_name == NULL) 1361 field_name = ""; 1362 1363 return field_name; 1364 } 1365 1366 static enum hist_field_fn select_value_fn(int field_size, int field_is_signed) 1367 { 1368 switch (field_size) { 1369 case 8: 1370 if (field_is_signed) 1371 return HIST_FIELD_FN_S64; 1372 else 1373 return HIST_FIELD_FN_U64; 1374 case 4: 1375 if (field_is_signed) 1376 return HIST_FIELD_FN_S32; 1377 else 1378 return HIST_FIELD_FN_U32; 1379 case 2: 1380 if (field_is_signed) 1381 return HIST_FIELD_FN_S16; 1382 else 1383 return HIST_FIELD_FN_U16; 1384 case 1: 1385 if (field_is_signed) 1386 return HIST_FIELD_FN_S8; 1387 else 1388 return HIST_FIELD_FN_U8; 1389 } 1390 1391 return HIST_FIELD_FN_NOP; 1392 } 1393 1394 static int parse_map_size(char *str) 1395 { 1396 unsigned long size, map_bits; 1397 int ret; 1398 1399 ret = kstrtoul(str, 0, &size); 1400 if (ret) 1401 goto out; 1402 1403 map_bits = ilog2(roundup_pow_of_two(size)); 1404 if (map_bits < TRACING_MAP_BITS_MIN || 1405 map_bits > TRACING_MAP_BITS_MAX) 1406 ret = -EINVAL; 1407 else 1408 ret = map_bits; 1409 out: 1410 return ret; 1411 } 1412 1413 static void destroy_hist_trigger_attrs(struct hist_trigger_attrs *attrs) 1414 { 1415 unsigned int i; 1416 1417 if (!attrs) 1418 return; 1419 1420 for (i = 0; i < attrs->n_assignments; i++) 1421 kfree(attrs->assignment_str[i]); 1422 1423 for (i = 0; i < attrs->n_actions; i++) 1424 kfree(attrs->action_str[i]); 1425 1426 kfree(attrs->name); 1427 kfree(attrs->sort_key_str); 1428 kfree(attrs->keys_str); 1429 kfree(attrs->vals_str); 1430 kfree(attrs->clock); 1431 kfree(attrs); 1432 } 1433 1434 static int parse_action(char *str, struct hist_trigger_attrs *attrs) 1435 { 1436 int ret = -EINVAL; 1437 1438 if (attrs->n_actions >= HIST_ACTIONS_MAX) 1439 return ret; 1440 1441 if ((str_has_prefix(str, "onmatch(")) || 1442 (str_has_prefix(str, "onmax(")) || 1443 (str_has_prefix(str, "onchange("))) { 1444 attrs->action_str[attrs->n_actions] = kstrdup(str, GFP_KERNEL); 1445 if (!attrs->action_str[attrs->n_actions]) { 1446 ret = -ENOMEM; 1447 return ret; 1448 } 1449 attrs->n_actions++; 1450 ret = 0; 1451 } 1452 return ret; 1453 } 1454 1455 static int parse_assignment(struct trace_array *tr, 1456 char *str, struct hist_trigger_attrs *attrs) 1457 { 1458 int len, ret = 0; 1459 1460 if ((len = str_has_prefix(str, "key=")) || 1461 (len = str_has_prefix(str, "keys="))) { 1462 attrs->keys_str = kstrdup(str + len, GFP_KERNEL); 1463 if (!attrs->keys_str) { 1464 ret = -ENOMEM; 1465 goto out; 1466 } 1467 } else if ((len = str_has_prefix(str, "val=")) || 1468 (len = str_has_prefix(str, "vals=")) || 1469 (len = str_has_prefix(str, "values="))) { 1470 attrs->vals_str = kstrdup(str + len, GFP_KERNEL); 1471 if (!attrs->vals_str) { 1472 ret = -ENOMEM; 1473 goto out; 1474 } 1475 } else if ((len = str_has_prefix(str, "sort="))) { 1476 attrs->sort_key_str = kstrdup(str + len, GFP_KERNEL); 1477 if (!attrs->sort_key_str) { 1478 ret = -ENOMEM; 1479 goto out; 1480 } 1481 } else if (str_has_prefix(str, "name=")) { 1482 attrs->name = kstrdup(str, GFP_KERNEL); 1483 if (!attrs->name) { 1484 ret = -ENOMEM; 1485 goto out; 1486 } 1487 } else if ((len = str_has_prefix(str, "clock="))) { 1488 str += len; 1489 1490 str = strstrip(str); 1491 attrs->clock = kstrdup(str, GFP_KERNEL); 1492 if (!attrs->clock) { 1493 ret = -ENOMEM; 1494 goto out; 1495 } 1496 } else if ((len = str_has_prefix(str, "size="))) { 1497 int map_bits = parse_map_size(str + len); 1498 1499 if (map_bits < 0) { 1500 ret = map_bits; 1501 goto out; 1502 } 1503 attrs->map_bits = map_bits; 1504 } else { 1505 char *assignment; 1506 1507 if (attrs->n_assignments == TRACING_MAP_VARS_MAX) { 1508 hist_err(tr, HIST_ERR_TOO_MANY_VARS, errpos(str)); 1509 ret = -EINVAL; 1510 goto out; 1511 } 1512 1513 assignment = kstrdup(str, GFP_KERNEL); 1514 if (!assignment) { 1515 ret = -ENOMEM; 1516 goto out; 1517 } 1518 1519 attrs->assignment_str[attrs->n_assignments++] = assignment; 1520 } 1521 out: 1522 return ret; 1523 } 1524 1525 static struct hist_trigger_attrs * 1526 parse_hist_trigger_attrs(struct trace_array *tr, char *trigger_str) 1527 { 1528 struct hist_trigger_attrs *attrs; 1529 int ret = 0; 1530 1531 attrs = kzalloc(sizeof(*attrs), GFP_KERNEL); 1532 if (!attrs) 1533 return ERR_PTR(-ENOMEM); 1534 1535 while (trigger_str) { 1536 char *str = strsep(&trigger_str, ":"); 1537 char *rhs; 1538 1539 rhs = strchr(str, '='); 1540 if (rhs) { 1541 if (!strlen(++rhs)) { 1542 ret = -EINVAL; 1543 hist_err(tr, HIST_ERR_EMPTY_ASSIGNMENT, errpos(str)); 1544 goto free; 1545 } 1546 ret = parse_assignment(tr, str, attrs); 1547 if (ret) 1548 goto free; 1549 } else if (strcmp(str, "pause") == 0) 1550 attrs->pause = true; 1551 else if ((strcmp(str, "cont") == 0) || 1552 (strcmp(str, "continue") == 0)) 1553 attrs->cont = true; 1554 else if (strcmp(str, "clear") == 0) 1555 attrs->clear = true; 1556 else { 1557 ret = parse_action(str, attrs); 1558 if (ret) 1559 goto free; 1560 } 1561 } 1562 1563 if (!attrs->keys_str) { 1564 ret = -EINVAL; 1565 goto free; 1566 } 1567 1568 if (!attrs->clock) { 1569 attrs->clock = kstrdup("global", GFP_KERNEL); 1570 if (!attrs->clock) { 1571 ret = -ENOMEM; 1572 goto free; 1573 } 1574 } 1575 1576 return attrs; 1577 free: 1578 destroy_hist_trigger_attrs(attrs); 1579 1580 return ERR_PTR(ret); 1581 } 1582 1583 static inline void save_comm(char *comm, struct task_struct *task) 1584 { 1585 if (!task->pid) { 1586 strcpy(comm, "<idle>"); 1587 return; 1588 } 1589 1590 if (WARN_ON_ONCE(task->pid < 0)) { 1591 strcpy(comm, "<XXX>"); 1592 return; 1593 } 1594 1595 strncpy(comm, task->comm, TASK_COMM_LEN); 1596 } 1597 1598 static void hist_elt_data_free(struct hist_elt_data *elt_data) 1599 { 1600 unsigned int i; 1601 1602 for (i = 0; i < elt_data->n_field_var_str; i++) 1603 kfree(elt_data->field_var_str[i]); 1604 1605 kfree(elt_data->field_var_str); 1606 1607 kfree(elt_data->comm); 1608 kfree(elt_data); 1609 } 1610 1611 static void hist_trigger_elt_data_free(struct tracing_map_elt *elt) 1612 { 1613 struct hist_elt_data *elt_data = elt->private_data; 1614 1615 hist_elt_data_free(elt_data); 1616 } 1617 1618 static int hist_trigger_elt_data_alloc(struct tracing_map_elt *elt) 1619 { 1620 struct hist_trigger_data *hist_data = elt->map->private_data; 1621 unsigned int size = TASK_COMM_LEN; 1622 struct hist_elt_data *elt_data; 1623 struct hist_field *hist_field; 1624 unsigned int i, n_str; 1625 1626 elt_data = kzalloc(sizeof(*elt_data), GFP_KERNEL); 1627 if (!elt_data) 1628 return -ENOMEM; 1629 1630 for_each_hist_field(i, hist_data) { 1631 hist_field = hist_data->fields[i]; 1632 1633 if (hist_field->flags & HIST_FIELD_FL_EXECNAME) { 1634 elt_data->comm = kzalloc(size, GFP_KERNEL); 1635 if (!elt_data->comm) { 1636 kfree(elt_data); 1637 return -ENOMEM; 1638 } 1639 break; 1640 } 1641 } 1642 1643 n_str = hist_data->n_field_var_str + hist_data->n_save_var_str + 1644 hist_data->n_var_str; 1645 if (n_str > SYNTH_FIELDS_MAX) { 1646 hist_elt_data_free(elt_data); 1647 return -EINVAL; 1648 } 1649 1650 BUILD_BUG_ON(STR_VAR_LEN_MAX & (sizeof(u64) - 1)); 1651 1652 size = STR_VAR_LEN_MAX; 1653 1654 elt_data->field_var_str = kcalloc(n_str, sizeof(char *), GFP_KERNEL); 1655 if (!elt_data->field_var_str) { 1656 hist_elt_data_free(elt_data); 1657 return -EINVAL; 1658 } 1659 elt_data->n_field_var_str = n_str; 1660 1661 for (i = 0; i < n_str; i++) { 1662 elt_data->field_var_str[i] = kzalloc(size, GFP_KERNEL); 1663 if (!elt_data->field_var_str[i]) { 1664 hist_elt_data_free(elt_data); 1665 return -ENOMEM; 1666 } 1667 } 1668 1669 elt->private_data = elt_data; 1670 1671 return 0; 1672 } 1673 1674 static void hist_trigger_elt_data_init(struct tracing_map_elt *elt) 1675 { 1676 struct hist_elt_data *elt_data = elt->private_data; 1677 1678 if (elt_data->comm) 1679 save_comm(elt_data->comm, current); 1680 } 1681 1682 static const struct tracing_map_ops hist_trigger_elt_data_ops = { 1683 .elt_alloc = hist_trigger_elt_data_alloc, 1684 .elt_free = hist_trigger_elt_data_free, 1685 .elt_init = hist_trigger_elt_data_init, 1686 }; 1687 1688 static const char *get_hist_field_flags(struct hist_field *hist_field) 1689 { 1690 const char *flags_str = NULL; 1691 1692 if (hist_field->flags & HIST_FIELD_FL_HEX) 1693 flags_str = "hex"; 1694 else if (hist_field->flags & HIST_FIELD_FL_SYM) 1695 flags_str = "sym"; 1696 else if (hist_field->flags & HIST_FIELD_FL_SYM_OFFSET) 1697 flags_str = "sym-offset"; 1698 else if (hist_field->flags & HIST_FIELD_FL_EXECNAME) 1699 flags_str = "execname"; 1700 else if (hist_field->flags & HIST_FIELD_FL_SYSCALL) 1701 flags_str = "syscall"; 1702 else if (hist_field->flags & HIST_FIELD_FL_LOG2) 1703 flags_str = "log2"; 1704 else if (hist_field->flags & HIST_FIELD_FL_BUCKET) 1705 flags_str = "buckets"; 1706 else if (hist_field->flags & HIST_FIELD_FL_TIMESTAMP_USECS) 1707 flags_str = "usecs"; 1708 1709 return flags_str; 1710 } 1711 1712 static void expr_field_str(struct hist_field *field, char *expr) 1713 { 1714 if (field->flags & HIST_FIELD_FL_VAR_REF) 1715 strcat(expr, "$"); 1716 else if (field->flags & HIST_FIELD_FL_CONST) { 1717 char str[HIST_CONST_DIGITS_MAX]; 1718 1719 snprintf(str, HIST_CONST_DIGITS_MAX, "%llu", field->constant); 1720 strcat(expr, str); 1721 } 1722 1723 strcat(expr, hist_field_name(field, 0)); 1724 1725 if (field->flags && !(field->flags & HIST_FIELD_FL_VAR_REF)) { 1726 const char *flags_str = get_hist_field_flags(field); 1727 1728 if (flags_str) { 1729 strcat(expr, "."); 1730 strcat(expr, flags_str); 1731 } 1732 } 1733 } 1734 1735 static char *expr_str(struct hist_field *field, unsigned int level) 1736 { 1737 char *expr; 1738 1739 if (level > 1) 1740 return NULL; 1741 1742 expr = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL); 1743 if (!expr) 1744 return NULL; 1745 1746 if (!field->operands[0]) { 1747 expr_field_str(field, expr); 1748 return expr; 1749 } 1750 1751 if (field->operator == FIELD_OP_UNARY_MINUS) { 1752 char *subexpr; 1753 1754 strcat(expr, "-("); 1755 subexpr = expr_str(field->operands[0], ++level); 1756 if (!subexpr) { 1757 kfree(expr); 1758 return NULL; 1759 } 1760 strcat(expr, subexpr); 1761 strcat(expr, ")"); 1762 1763 kfree(subexpr); 1764 1765 return expr; 1766 } 1767 1768 expr_field_str(field->operands[0], expr); 1769 1770 switch (field->operator) { 1771 case FIELD_OP_MINUS: 1772 strcat(expr, "-"); 1773 break; 1774 case FIELD_OP_PLUS: 1775 strcat(expr, "+"); 1776 break; 1777 case FIELD_OP_DIV: 1778 strcat(expr, "/"); 1779 break; 1780 case FIELD_OP_MULT: 1781 strcat(expr, "*"); 1782 break; 1783 default: 1784 kfree(expr); 1785 return NULL; 1786 } 1787 1788 expr_field_str(field->operands[1], expr); 1789 1790 return expr; 1791 } 1792 1793 /* 1794 * If field_op != FIELD_OP_NONE, *sep points to the root operator 1795 * of the expression tree to be evaluated. 1796 */ 1797 static int contains_operator(char *str, char **sep) 1798 { 1799 enum field_op_id field_op = FIELD_OP_NONE; 1800 char *minus_op, *plus_op, *div_op, *mult_op; 1801 1802 1803 /* 1804 * Report the last occurrence of the operators first, so that the 1805 * expression is evaluated left to right. This is important since 1806 * subtraction and division are not associative. 1807 * 1808 * e.g 1809 * 64/8/4/2 is 1, i.e 64/8/4/2 = ((64/8)/4)/2 1810 * 14-7-5-2 is 0, i.e 14-7-5-2 = ((14-7)-5)-2 1811 */ 1812 1813 /* 1814 * First, find lower precedence addition and subtraction 1815 * since the expression will be evaluated recursively. 1816 */ 1817 minus_op = strrchr(str, '-'); 1818 if (minus_op) { 1819 /* 1820 * Unary minus is not supported in sub-expressions. If 1821 * present, it is always the next root operator. 1822 */ 1823 if (minus_op == str) { 1824 field_op = FIELD_OP_UNARY_MINUS; 1825 goto out; 1826 } 1827 1828 field_op = FIELD_OP_MINUS; 1829 } 1830 1831 plus_op = strrchr(str, '+'); 1832 if (plus_op || minus_op) { 1833 /* 1834 * For operators of the same precedence use to rightmost as the 1835 * root, so that the expression is evaluated left to right. 1836 */ 1837 if (plus_op > minus_op) 1838 field_op = FIELD_OP_PLUS; 1839 goto out; 1840 } 1841 1842 /* 1843 * Multiplication and division have higher precedence than addition and 1844 * subtraction. 1845 */ 1846 div_op = strrchr(str, '/'); 1847 if (div_op) 1848 field_op = FIELD_OP_DIV; 1849 1850 mult_op = strrchr(str, '*'); 1851 /* 1852 * For operators of the same precedence use to rightmost as the 1853 * root, so that the expression is evaluated left to right. 1854 */ 1855 if (mult_op > div_op) 1856 field_op = FIELD_OP_MULT; 1857 1858 out: 1859 if (sep) { 1860 switch (field_op) { 1861 case FIELD_OP_UNARY_MINUS: 1862 case FIELD_OP_MINUS: 1863 *sep = minus_op; 1864 break; 1865 case FIELD_OP_PLUS: 1866 *sep = plus_op; 1867 break; 1868 case FIELD_OP_DIV: 1869 *sep = div_op; 1870 break; 1871 case FIELD_OP_MULT: 1872 *sep = mult_op; 1873 break; 1874 case FIELD_OP_NONE: 1875 default: 1876 *sep = NULL; 1877 break; 1878 } 1879 } 1880 1881 return field_op; 1882 } 1883 1884 static void get_hist_field(struct hist_field *hist_field) 1885 { 1886 hist_field->ref++; 1887 } 1888 1889 static void __destroy_hist_field(struct hist_field *hist_field) 1890 { 1891 if (--hist_field->ref > 1) 1892 return; 1893 1894 kfree(hist_field->var.name); 1895 kfree(hist_field->name); 1896 1897 /* Can likely be a const */ 1898 kfree_const(hist_field->type); 1899 1900 kfree(hist_field->system); 1901 kfree(hist_field->event_name); 1902 1903 kfree(hist_field); 1904 } 1905 1906 static void destroy_hist_field(struct hist_field *hist_field, 1907 unsigned int level) 1908 { 1909 unsigned int i; 1910 1911 if (level > 3) 1912 return; 1913 1914 if (!hist_field) 1915 return; 1916 1917 if (hist_field->flags & HIST_FIELD_FL_VAR_REF) 1918 return; /* var refs will be destroyed separately */ 1919 1920 for (i = 0; i < HIST_FIELD_OPERANDS_MAX; i++) 1921 destroy_hist_field(hist_field->operands[i], level + 1); 1922 1923 __destroy_hist_field(hist_field); 1924 } 1925 1926 static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data, 1927 struct ftrace_event_field *field, 1928 unsigned long flags, 1929 char *var_name) 1930 { 1931 struct hist_field *hist_field; 1932 1933 if (field && is_function_field(field)) 1934 return NULL; 1935 1936 hist_field = kzalloc(sizeof(struct hist_field), GFP_KERNEL); 1937 if (!hist_field) 1938 return NULL; 1939 1940 hist_field->ref = 1; 1941 1942 hist_field->hist_data = hist_data; 1943 1944 if (flags & HIST_FIELD_FL_EXPR || flags & HIST_FIELD_FL_ALIAS) 1945 goto out; /* caller will populate */ 1946 1947 if (flags & HIST_FIELD_FL_VAR_REF) { 1948 hist_field->fn_num = HIST_FIELD_FN_VAR_REF; 1949 goto out; 1950 } 1951 1952 if (flags & HIST_FIELD_FL_HITCOUNT) { 1953 hist_field->fn_num = HIST_FIELD_FN_COUNTER; 1954 hist_field->size = sizeof(u64); 1955 hist_field->type = "u64"; 1956 goto out; 1957 } 1958 1959 if (flags & HIST_FIELD_FL_CONST) { 1960 hist_field->fn_num = HIST_FIELD_FN_CONST; 1961 hist_field->size = sizeof(u64); 1962 hist_field->type = kstrdup("u64", GFP_KERNEL); 1963 if (!hist_field->type) 1964 goto free; 1965 goto out; 1966 } 1967 1968 if (flags & HIST_FIELD_FL_STACKTRACE) { 1969 hist_field->fn_num = HIST_FIELD_FN_NOP; 1970 goto out; 1971 } 1972 1973 if (flags & (HIST_FIELD_FL_LOG2 | HIST_FIELD_FL_BUCKET)) { 1974 unsigned long fl = flags & ~(HIST_FIELD_FL_LOG2 | HIST_FIELD_FL_BUCKET); 1975 hist_field->fn_num = flags & HIST_FIELD_FL_LOG2 ? HIST_FIELD_FN_LOG2 : 1976 HIST_FIELD_FN_BUCKET; 1977 hist_field->operands[0] = create_hist_field(hist_data, field, fl, NULL); 1978 hist_field->size = hist_field->operands[0]->size; 1979 hist_field->type = kstrdup_const(hist_field->operands[0]->type, GFP_KERNEL); 1980 if (!hist_field->type) 1981 goto free; 1982 goto out; 1983 } 1984 1985 if (flags & HIST_FIELD_FL_TIMESTAMP) { 1986 hist_field->fn_num = HIST_FIELD_FN_TIMESTAMP; 1987 hist_field->size = sizeof(u64); 1988 hist_field->type = "u64"; 1989 goto out; 1990 } 1991 1992 if (flags & HIST_FIELD_FL_CPU) { 1993 hist_field->fn_num = HIST_FIELD_FN_CPU; 1994 hist_field->size = sizeof(int); 1995 hist_field->type = "unsigned int"; 1996 goto out; 1997 } 1998 1999 if (WARN_ON_ONCE(!field)) 2000 goto out; 2001 2002 /* Pointers to strings are just pointers and dangerous to dereference */ 2003 if (is_string_field(field) && 2004 (field->filter_type != FILTER_PTR_STRING)) { 2005 flags |= HIST_FIELD_FL_STRING; 2006 2007 hist_field->size = MAX_FILTER_STR_VAL; 2008 hist_field->type = kstrdup_const(field->type, GFP_KERNEL); 2009 if (!hist_field->type) 2010 goto free; 2011 2012 if (field->filter_type == FILTER_STATIC_STRING) { 2013 hist_field->fn_num = HIST_FIELD_FN_STRING; 2014 hist_field->size = field->size; 2015 } else if (field->filter_type == FILTER_DYN_STRING) { 2016 hist_field->fn_num = HIST_FIELD_FN_DYNSTRING; 2017 } else if (field->filter_type == FILTER_RDYN_STRING) 2018 hist_field->fn_num = HIST_FIELD_FN_RELDYNSTRING; 2019 else 2020 hist_field->fn_num = HIST_FIELD_FN_PSTRING; 2021 } else { 2022 hist_field->size = field->size; 2023 hist_field->is_signed = field->is_signed; 2024 hist_field->type = kstrdup_const(field->type, GFP_KERNEL); 2025 if (!hist_field->type) 2026 goto free; 2027 2028 hist_field->fn_num = select_value_fn(field->size, 2029 field->is_signed); 2030 if (hist_field->fn_num == HIST_FIELD_FN_NOP) { 2031 destroy_hist_field(hist_field, 0); 2032 return NULL; 2033 } 2034 } 2035 out: 2036 hist_field->field = field; 2037 hist_field->flags = flags; 2038 2039 if (var_name) { 2040 hist_field->var.name = kstrdup(var_name, GFP_KERNEL); 2041 if (!hist_field->var.name) 2042 goto free; 2043 } 2044 2045 return hist_field; 2046 free: 2047 destroy_hist_field(hist_field, 0); 2048 return NULL; 2049 } 2050 2051 static void destroy_hist_fields(struct hist_trigger_data *hist_data) 2052 { 2053 unsigned int i; 2054 2055 for (i = 0; i < HIST_FIELDS_MAX; i++) { 2056 if (hist_data->fields[i]) { 2057 destroy_hist_field(hist_data->fields[i], 0); 2058 hist_data->fields[i] = NULL; 2059 } 2060 } 2061 2062 for (i = 0; i < hist_data->n_var_refs; i++) { 2063 WARN_ON(!(hist_data->var_refs[i]->flags & HIST_FIELD_FL_VAR_REF)); 2064 __destroy_hist_field(hist_data->var_refs[i]); 2065 hist_data->var_refs[i] = NULL; 2066 } 2067 } 2068 2069 static int init_var_ref(struct hist_field *ref_field, 2070 struct hist_field *var_field, 2071 char *system, char *event_name) 2072 { 2073 int err = 0; 2074 2075 ref_field->var.idx = var_field->var.idx; 2076 ref_field->var.hist_data = var_field->hist_data; 2077 ref_field->size = var_field->size; 2078 ref_field->is_signed = var_field->is_signed; 2079 ref_field->flags |= var_field->flags & 2080 (HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS); 2081 2082 if (system) { 2083 ref_field->system = kstrdup(system, GFP_KERNEL); 2084 if (!ref_field->system) 2085 return -ENOMEM; 2086 } 2087 2088 if (event_name) { 2089 ref_field->event_name = kstrdup(event_name, GFP_KERNEL); 2090 if (!ref_field->event_name) { 2091 err = -ENOMEM; 2092 goto free; 2093 } 2094 } 2095 2096 if (var_field->var.name) { 2097 ref_field->name = kstrdup(var_field->var.name, GFP_KERNEL); 2098 if (!ref_field->name) { 2099 err = -ENOMEM; 2100 goto free; 2101 } 2102 } else if (var_field->name) { 2103 ref_field->name = kstrdup(var_field->name, GFP_KERNEL); 2104 if (!ref_field->name) { 2105 err = -ENOMEM; 2106 goto free; 2107 } 2108 } 2109 2110 ref_field->type = kstrdup_const(var_field->type, GFP_KERNEL); 2111 if (!ref_field->type) { 2112 err = -ENOMEM; 2113 goto free; 2114 } 2115 out: 2116 return err; 2117 free: 2118 kfree(ref_field->system); 2119 ref_field->system = NULL; 2120 kfree(ref_field->event_name); 2121 ref_field->event_name = NULL; 2122 kfree(ref_field->name); 2123 ref_field->name = NULL; 2124 2125 goto out; 2126 } 2127 2128 static int find_var_ref_idx(struct hist_trigger_data *hist_data, 2129 struct hist_field *var_field) 2130 { 2131 struct hist_field *ref_field; 2132 int i; 2133 2134 for (i = 0; i < hist_data->n_var_refs; i++) { 2135 ref_field = hist_data->var_refs[i]; 2136 if (ref_field->var.idx == var_field->var.idx && 2137 ref_field->var.hist_data == var_field->hist_data) 2138 return i; 2139 } 2140 2141 return -ENOENT; 2142 } 2143 2144 /** 2145 * create_var_ref - Create a variable reference and attach it to trigger 2146 * @hist_data: The trigger that will be referencing the variable 2147 * @var_field: The VAR field to create a reference to 2148 * @system: The optional system string 2149 * @event_name: The optional event_name string 2150 * 2151 * Given a variable hist_field, create a VAR_REF hist_field that 2152 * represents a reference to it. 2153 * 2154 * This function also adds the reference to the trigger that 2155 * now references the variable. 2156 * 2157 * Return: The VAR_REF field if successful, NULL if not 2158 */ 2159 static struct hist_field *create_var_ref(struct hist_trigger_data *hist_data, 2160 struct hist_field *var_field, 2161 char *system, char *event_name) 2162 { 2163 unsigned long flags = HIST_FIELD_FL_VAR_REF; 2164 struct hist_field *ref_field; 2165 int i; 2166 2167 /* Check if the variable already exists */ 2168 for (i = 0; i < hist_data->n_var_refs; i++) { 2169 ref_field = hist_data->var_refs[i]; 2170 if (ref_field->var.idx == var_field->var.idx && 2171 ref_field->var.hist_data == var_field->hist_data) { 2172 get_hist_field(ref_field); 2173 return ref_field; 2174 } 2175 } 2176 2177 ref_field = create_hist_field(var_field->hist_data, NULL, flags, NULL); 2178 if (ref_field) { 2179 if (init_var_ref(ref_field, var_field, system, event_name)) { 2180 destroy_hist_field(ref_field, 0); 2181 return NULL; 2182 } 2183 2184 hist_data->var_refs[hist_data->n_var_refs] = ref_field; 2185 ref_field->var_ref_idx = hist_data->n_var_refs++; 2186 } 2187 2188 return ref_field; 2189 } 2190 2191 static bool is_var_ref(char *var_name) 2192 { 2193 if (!var_name || strlen(var_name) < 2 || var_name[0] != '$') 2194 return false; 2195 2196 return true; 2197 } 2198 2199 static char *field_name_from_var(struct hist_trigger_data *hist_data, 2200 char *var_name) 2201 { 2202 char *name, *field; 2203 unsigned int i; 2204 2205 for (i = 0; i < hist_data->attrs->var_defs.n_vars; i++) { 2206 name = hist_data->attrs->var_defs.name[i]; 2207 2208 if (strcmp(var_name, name) == 0) { 2209 field = hist_data->attrs->var_defs.expr[i]; 2210 if (contains_operator(field, NULL) || is_var_ref(field)) 2211 continue; 2212 return field; 2213 } 2214 } 2215 2216 return NULL; 2217 } 2218 2219 static char *local_field_var_ref(struct hist_trigger_data *hist_data, 2220 char *system, char *event_name, 2221 char *var_name) 2222 { 2223 struct trace_event_call *call; 2224 2225 if (system && event_name) { 2226 call = hist_data->event_file->event_call; 2227 2228 if (strcmp(system, call->class->system) != 0) 2229 return NULL; 2230 2231 if (strcmp(event_name, trace_event_name(call)) != 0) 2232 return NULL; 2233 } 2234 2235 if (!!system != !!event_name) 2236 return NULL; 2237 2238 if (!is_var_ref(var_name)) 2239 return NULL; 2240 2241 var_name++; 2242 2243 return field_name_from_var(hist_data, var_name); 2244 } 2245 2246 static struct hist_field *parse_var_ref(struct hist_trigger_data *hist_data, 2247 char *system, char *event_name, 2248 char *var_name) 2249 { 2250 struct hist_field *var_field = NULL, *ref_field = NULL; 2251 struct trace_array *tr = hist_data->event_file->tr; 2252 2253 if (!is_var_ref(var_name)) 2254 return NULL; 2255 2256 var_name++; 2257 2258 var_field = find_event_var(hist_data, system, event_name, var_name); 2259 if (var_field) 2260 ref_field = create_var_ref(hist_data, var_field, 2261 system, event_name); 2262 2263 if (!ref_field) 2264 hist_err(tr, HIST_ERR_VAR_NOT_FOUND, errpos(var_name)); 2265 2266 return ref_field; 2267 } 2268 2269 static struct ftrace_event_field * 2270 parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file, 2271 char *field_str, unsigned long *flags, unsigned long *buckets) 2272 { 2273 struct ftrace_event_field *field = NULL; 2274 char *field_name, *modifier, *str; 2275 struct trace_array *tr = file->tr; 2276 2277 modifier = str = kstrdup(field_str, GFP_KERNEL); 2278 if (!modifier) 2279 return ERR_PTR(-ENOMEM); 2280 2281 field_name = strsep(&modifier, "."); 2282 if (modifier) { 2283 if (strcmp(modifier, "hex") == 0) 2284 *flags |= HIST_FIELD_FL_HEX; 2285 else if (strcmp(modifier, "sym") == 0) 2286 *flags |= HIST_FIELD_FL_SYM; 2287 /* 2288 * 'sym-offset' occurrences in the trigger string are modified 2289 * to 'symXoffset' to simplify arithmetic expression parsing. 2290 */ 2291 else if (strcmp(modifier, "symXoffset") == 0) 2292 *flags |= HIST_FIELD_FL_SYM_OFFSET; 2293 else if ((strcmp(modifier, "execname") == 0) && 2294 (strcmp(field_name, "common_pid") == 0)) 2295 *flags |= HIST_FIELD_FL_EXECNAME; 2296 else if (strcmp(modifier, "syscall") == 0) 2297 *flags |= HIST_FIELD_FL_SYSCALL; 2298 else if (strcmp(modifier, "log2") == 0) 2299 *flags |= HIST_FIELD_FL_LOG2; 2300 else if (strcmp(modifier, "usecs") == 0) 2301 *flags |= HIST_FIELD_FL_TIMESTAMP_USECS; 2302 else if (strncmp(modifier, "bucket", 6) == 0) { 2303 int ret; 2304 2305 modifier += 6; 2306 2307 if (*modifier == 's') 2308 modifier++; 2309 if (*modifier != '=') 2310 goto error; 2311 modifier++; 2312 ret = kstrtoul(modifier, 0, buckets); 2313 if (ret || !(*buckets)) 2314 goto error; 2315 *flags |= HIST_FIELD_FL_BUCKET; 2316 } else { 2317 error: 2318 hist_err(tr, HIST_ERR_BAD_FIELD_MODIFIER, errpos(modifier)); 2319 field = ERR_PTR(-EINVAL); 2320 goto out; 2321 } 2322 } 2323 2324 if (strcmp(field_name, "common_timestamp") == 0) { 2325 *flags |= HIST_FIELD_FL_TIMESTAMP; 2326 hist_data->enable_timestamps = true; 2327 if (*flags & HIST_FIELD_FL_TIMESTAMP_USECS) 2328 hist_data->attrs->ts_in_usecs = true; 2329 } else if (strcmp(field_name, "common_cpu") == 0) 2330 *flags |= HIST_FIELD_FL_CPU; 2331 else { 2332 field = trace_find_event_field(file->event_call, field_name); 2333 if (!field || !field->size) { 2334 /* 2335 * For backward compatibility, if field_name 2336 * was "cpu", then we treat this the same as 2337 * common_cpu. This also works for "CPU". 2338 */ 2339 if (field && field->filter_type == FILTER_CPU) { 2340 *flags |= HIST_FIELD_FL_CPU; 2341 } else { 2342 hist_err(tr, HIST_ERR_FIELD_NOT_FOUND, 2343 errpos(field_name)); 2344 field = ERR_PTR(-EINVAL); 2345 goto out; 2346 } 2347 } 2348 } 2349 out: 2350 kfree(str); 2351 2352 return field; 2353 } 2354 2355 static struct hist_field *create_alias(struct hist_trigger_data *hist_data, 2356 struct hist_field *var_ref, 2357 char *var_name) 2358 { 2359 struct hist_field *alias = NULL; 2360 unsigned long flags = HIST_FIELD_FL_ALIAS | HIST_FIELD_FL_VAR; 2361 2362 alias = create_hist_field(hist_data, NULL, flags, var_name); 2363 if (!alias) 2364 return NULL; 2365 2366 alias->fn_num = var_ref->fn_num; 2367 alias->operands[0] = var_ref; 2368 2369 if (init_var_ref(alias, var_ref, var_ref->system, var_ref->event_name)) { 2370 destroy_hist_field(alias, 0); 2371 return NULL; 2372 } 2373 2374 alias->var_ref_idx = var_ref->var_ref_idx; 2375 2376 return alias; 2377 } 2378 2379 static struct hist_field *parse_const(struct hist_trigger_data *hist_data, 2380 char *str, char *var_name, 2381 unsigned long *flags) 2382 { 2383 struct trace_array *tr = hist_data->event_file->tr; 2384 struct hist_field *field = NULL; 2385 u64 constant; 2386 2387 if (kstrtoull(str, 0, &constant)) { 2388 hist_err(tr, HIST_ERR_EXPECT_NUMBER, errpos(str)); 2389 return NULL; 2390 } 2391 2392 *flags |= HIST_FIELD_FL_CONST; 2393 field = create_hist_field(hist_data, NULL, *flags, var_name); 2394 if (!field) 2395 return NULL; 2396 2397 field->constant = constant; 2398 2399 return field; 2400 } 2401 2402 static struct hist_field *parse_atom(struct hist_trigger_data *hist_data, 2403 struct trace_event_file *file, char *str, 2404 unsigned long *flags, char *var_name) 2405 { 2406 char *s, *ref_system = NULL, *ref_event = NULL, *ref_var = str; 2407 struct ftrace_event_field *field = NULL; 2408 struct hist_field *hist_field = NULL; 2409 unsigned long buckets = 0; 2410 int ret = 0; 2411 2412 if (isdigit(str[0])) { 2413 hist_field = parse_const(hist_data, str, var_name, flags); 2414 if (!hist_field) { 2415 ret = -EINVAL; 2416 goto out; 2417 } 2418 return hist_field; 2419 } 2420 2421 s = strchr(str, '.'); 2422 if (s) { 2423 s = strchr(++s, '.'); 2424 if (s) { 2425 ref_system = strsep(&str, "."); 2426 if (!str) { 2427 ret = -EINVAL; 2428 goto out; 2429 } 2430 ref_event = strsep(&str, "."); 2431 if (!str) { 2432 ret = -EINVAL; 2433 goto out; 2434 } 2435 ref_var = str; 2436 } 2437 } 2438 2439 s = local_field_var_ref(hist_data, ref_system, ref_event, ref_var); 2440 if (!s) { 2441 hist_field = parse_var_ref(hist_data, ref_system, 2442 ref_event, ref_var); 2443 if (hist_field) { 2444 if (var_name) { 2445 hist_field = create_alias(hist_data, hist_field, var_name); 2446 if (!hist_field) { 2447 ret = -ENOMEM; 2448 goto out; 2449 } 2450 } 2451 return hist_field; 2452 } 2453 } else 2454 str = s; 2455 2456 field = parse_field(hist_data, file, str, flags, &buckets); 2457 if (IS_ERR(field)) { 2458 ret = PTR_ERR(field); 2459 goto out; 2460 } 2461 2462 hist_field = create_hist_field(hist_data, field, *flags, var_name); 2463 if (!hist_field) { 2464 ret = -ENOMEM; 2465 goto out; 2466 } 2467 hist_field->buckets = buckets; 2468 2469 return hist_field; 2470 out: 2471 return ERR_PTR(ret); 2472 } 2473 2474 static struct hist_field *parse_expr(struct hist_trigger_data *hist_data, 2475 struct trace_event_file *file, 2476 char *str, unsigned long flags, 2477 char *var_name, unsigned int *n_subexprs); 2478 2479 static struct hist_field *parse_unary(struct hist_trigger_data *hist_data, 2480 struct trace_event_file *file, 2481 char *str, unsigned long flags, 2482 char *var_name, unsigned int *n_subexprs) 2483 { 2484 struct hist_field *operand1, *expr = NULL; 2485 unsigned long operand_flags; 2486 int ret = 0; 2487 char *s; 2488 2489 /* Unary minus operator, increment n_subexprs */ 2490 ++*n_subexprs; 2491 2492 /* we support only -(xxx) i.e. explicit parens required */ 2493 2494 if (*n_subexprs > 3) { 2495 hist_err(file->tr, HIST_ERR_TOO_MANY_SUBEXPR, errpos(str)); 2496 ret = -EINVAL; 2497 goto free; 2498 } 2499 2500 str++; /* skip leading '-' */ 2501 2502 s = strchr(str, '('); 2503 if (s) 2504 str++; 2505 else { 2506 ret = -EINVAL; 2507 goto free; 2508 } 2509 2510 s = strrchr(str, ')'); 2511 if (s) { 2512 /* unary minus not supported in sub-expressions */ 2513 if (*(s+1) != '\0') { 2514 hist_err(file->tr, HIST_ERR_UNARY_MINUS_SUBEXPR, 2515 errpos(str)); 2516 ret = -EINVAL; 2517 goto free; 2518 } 2519 *s = '\0'; 2520 } 2521 else { 2522 ret = -EINVAL; /* no closing ')' */ 2523 goto free; 2524 } 2525 2526 flags |= HIST_FIELD_FL_EXPR; 2527 expr = create_hist_field(hist_data, NULL, flags, var_name); 2528 if (!expr) { 2529 ret = -ENOMEM; 2530 goto free; 2531 } 2532 2533 operand_flags = 0; 2534 operand1 = parse_expr(hist_data, file, str, operand_flags, NULL, n_subexprs); 2535 if (IS_ERR(operand1)) { 2536 ret = PTR_ERR(operand1); 2537 goto free; 2538 } 2539 if (operand1->flags & HIST_FIELD_FL_STRING) { 2540 /* String type can not be the operand of unary operator. */ 2541 hist_err(file->tr, HIST_ERR_INVALID_STR_OPERAND, errpos(str)); 2542 destroy_hist_field(operand1, 0); 2543 ret = -EINVAL; 2544 goto free; 2545 } 2546 2547 expr->flags |= operand1->flags & 2548 (HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS); 2549 expr->fn_num = HIST_FIELD_FN_UMINUS; 2550 expr->operands[0] = operand1; 2551 expr->size = operand1->size; 2552 expr->is_signed = operand1->is_signed; 2553 expr->operator = FIELD_OP_UNARY_MINUS; 2554 expr->name = expr_str(expr, 0); 2555 expr->type = kstrdup_const(operand1->type, GFP_KERNEL); 2556 if (!expr->type) { 2557 ret = -ENOMEM; 2558 goto free; 2559 } 2560 2561 return expr; 2562 free: 2563 destroy_hist_field(expr, 0); 2564 return ERR_PTR(ret); 2565 } 2566 2567 /* 2568 * If the operands are var refs, return pointers the 2569 * variable(s) referenced in var1 and var2, else NULL. 2570 */ 2571 static int check_expr_operands(struct trace_array *tr, 2572 struct hist_field *operand1, 2573 struct hist_field *operand2, 2574 struct hist_field **var1, 2575 struct hist_field **var2) 2576 { 2577 unsigned long operand1_flags = operand1->flags; 2578 unsigned long operand2_flags = operand2->flags; 2579 2580 if ((operand1_flags & HIST_FIELD_FL_VAR_REF) || 2581 (operand1_flags & HIST_FIELD_FL_ALIAS)) { 2582 struct hist_field *var; 2583 2584 var = find_var_field(operand1->var.hist_data, operand1->name); 2585 if (!var) 2586 return -EINVAL; 2587 operand1_flags = var->flags; 2588 *var1 = var; 2589 } 2590 2591 if ((operand2_flags & HIST_FIELD_FL_VAR_REF) || 2592 (operand2_flags & HIST_FIELD_FL_ALIAS)) { 2593 struct hist_field *var; 2594 2595 var = find_var_field(operand2->var.hist_data, operand2->name); 2596 if (!var) 2597 return -EINVAL; 2598 operand2_flags = var->flags; 2599 *var2 = var; 2600 } 2601 2602 if ((operand1_flags & HIST_FIELD_FL_TIMESTAMP_USECS) != 2603 (operand2_flags & HIST_FIELD_FL_TIMESTAMP_USECS)) { 2604 hist_err(tr, HIST_ERR_TIMESTAMP_MISMATCH, 0); 2605 return -EINVAL; 2606 } 2607 2608 return 0; 2609 } 2610 2611 static struct hist_field *parse_expr(struct hist_trigger_data *hist_data, 2612 struct trace_event_file *file, 2613 char *str, unsigned long flags, 2614 char *var_name, unsigned int *n_subexprs) 2615 { 2616 struct hist_field *operand1 = NULL, *operand2 = NULL, *expr = NULL; 2617 struct hist_field *var1 = NULL, *var2 = NULL; 2618 unsigned long operand_flags, operand2_flags; 2619 int field_op, ret = -EINVAL; 2620 char *sep, *operand1_str; 2621 enum hist_field_fn op_fn; 2622 bool combine_consts; 2623 2624 if (*n_subexprs > 3) { 2625 hist_err(file->tr, HIST_ERR_TOO_MANY_SUBEXPR, errpos(str)); 2626 return ERR_PTR(-EINVAL); 2627 } 2628 2629 field_op = contains_operator(str, &sep); 2630 2631 if (field_op == FIELD_OP_NONE) 2632 return parse_atom(hist_data, file, str, &flags, var_name); 2633 2634 if (field_op == FIELD_OP_UNARY_MINUS) 2635 return parse_unary(hist_data, file, str, flags, var_name, n_subexprs); 2636 2637 /* Binary operator found, increment n_subexprs */ 2638 ++*n_subexprs; 2639 2640 /* Split the expression string at the root operator */ 2641 if (!sep) 2642 return ERR_PTR(-EINVAL); 2643 2644 *sep = '\0'; 2645 operand1_str = str; 2646 str = sep+1; 2647 2648 /* Binary operator requires both operands */ 2649 if (*operand1_str == '\0' || *str == '\0') 2650 return ERR_PTR(-EINVAL); 2651 2652 operand_flags = 0; 2653 2654 /* LHS of string is an expression e.g. a+b in a+b+c */ 2655 operand1 = parse_expr(hist_data, file, operand1_str, operand_flags, NULL, n_subexprs); 2656 if (IS_ERR(operand1)) 2657 return ERR_CAST(operand1); 2658 2659 if (operand1->flags & HIST_FIELD_FL_STRING) { 2660 hist_err(file->tr, HIST_ERR_INVALID_STR_OPERAND, errpos(operand1_str)); 2661 ret = -EINVAL; 2662 goto free_op1; 2663 } 2664 2665 /* RHS of string is another expression e.g. c in a+b+c */ 2666 operand_flags = 0; 2667 operand2 = parse_expr(hist_data, file, str, operand_flags, NULL, n_subexprs); 2668 if (IS_ERR(operand2)) { 2669 ret = PTR_ERR(operand2); 2670 goto free_op1; 2671 } 2672 if (operand2->flags & HIST_FIELD_FL_STRING) { 2673 hist_err(file->tr, HIST_ERR_INVALID_STR_OPERAND, errpos(str)); 2674 ret = -EINVAL; 2675 goto free_operands; 2676 } 2677 2678 switch (field_op) { 2679 case FIELD_OP_MINUS: 2680 op_fn = HIST_FIELD_FN_MINUS; 2681 break; 2682 case FIELD_OP_PLUS: 2683 op_fn = HIST_FIELD_FN_PLUS; 2684 break; 2685 case FIELD_OP_DIV: 2686 op_fn = HIST_FIELD_FN_DIV; 2687 break; 2688 case FIELD_OP_MULT: 2689 op_fn = HIST_FIELD_FN_MULT; 2690 break; 2691 default: 2692 ret = -EINVAL; 2693 goto free_operands; 2694 } 2695 2696 ret = check_expr_operands(file->tr, operand1, operand2, &var1, &var2); 2697 if (ret) 2698 goto free_operands; 2699 2700 operand_flags = var1 ? var1->flags : operand1->flags; 2701 operand2_flags = var2 ? var2->flags : operand2->flags; 2702 2703 /* 2704 * If both operands are constant, the expression can be 2705 * collapsed to a single constant. 2706 */ 2707 combine_consts = operand_flags & operand2_flags & HIST_FIELD_FL_CONST; 2708 2709 flags |= combine_consts ? HIST_FIELD_FL_CONST : HIST_FIELD_FL_EXPR; 2710 2711 flags |= operand1->flags & 2712 (HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS); 2713 2714 expr = create_hist_field(hist_data, NULL, flags, var_name); 2715 if (!expr) { 2716 ret = -ENOMEM; 2717 goto free_operands; 2718 } 2719 2720 operand1->read_once = true; 2721 operand2->read_once = true; 2722 2723 /* The operands are now owned and free'd by 'expr' */ 2724 expr->operands[0] = operand1; 2725 expr->operands[1] = operand2; 2726 2727 if (field_op == FIELD_OP_DIV && 2728 operand2_flags & HIST_FIELD_FL_CONST) { 2729 u64 divisor = var2 ? var2->constant : operand2->constant; 2730 2731 if (!divisor) { 2732 hist_err(file->tr, HIST_ERR_DIVISION_BY_ZERO, errpos(str)); 2733 ret = -EDOM; 2734 goto free_expr; 2735 } 2736 2737 /* 2738 * Copy the divisor here so we don't have to look it up 2739 * later if this is a var ref 2740 */ 2741 operand2->constant = divisor; 2742 op_fn = hist_field_get_div_fn(operand2); 2743 } 2744 2745 expr->fn_num = op_fn; 2746 2747 if (combine_consts) { 2748 if (var1) 2749 expr->operands[0] = var1; 2750 if (var2) 2751 expr->operands[1] = var2; 2752 2753 expr->constant = hist_fn_call(expr, NULL, NULL, NULL, NULL); 2754 expr->fn_num = HIST_FIELD_FN_CONST; 2755 2756 expr->operands[0] = NULL; 2757 expr->operands[1] = NULL; 2758 2759 /* 2760 * var refs won't be destroyed immediately 2761 * See: destroy_hist_field() 2762 */ 2763 destroy_hist_field(operand2, 0); 2764 destroy_hist_field(operand1, 0); 2765 2766 expr->name = expr_str(expr, 0); 2767 } else { 2768 /* The operand sizes should be the same, so just pick one */ 2769 expr->size = operand1->size; 2770 expr->is_signed = operand1->is_signed; 2771 2772 expr->operator = field_op; 2773 expr->type = kstrdup_const(operand1->type, GFP_KERNEL); 2774 if (!expr->type) { 2775 ret = -ENOMEM; 2776 goto free_expr; 2777 } 2778 2779 expr->name = expr_str(expr, 0); 2780 } 2781 2782 return expr; 2783 2784 free_operands: 2785 destroy_hist_field(operand2, 0); 2786 free_op1: 2787 destroy_hist_field(operand1, 0); 2788 return ERR_PTR(ret); 2789 2790 free_expr: 2791 destroy_hist_field(expr, 0); 2792 return ERR_PTR(ret); 2793 } 2794 2795 static char *find_trigger_filter(struct hist_trigger_data *hist_data, 2796 struct trace_event_file *file) 2797 { 2798 struct event_trigger_data *test; 2799 2800 lockdep_assert_held(&event_mutex); 2801 2802 list_for_each_entry(test, &file->triggers, list) { 2803 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 2804 if (test->private_data == hist_data) 2805 return test->filter_str; 2806 } 2807 } 2808 2809 return NULL; 2810 } 2811 2812 static struct event_command trigger_hist_cmd; 2813 static int event_hist_trigger_parse(struct event_command *cmd_ops, 2814 struct trace_event_file *file, 2815 char *glob, char *cmd, 2816 char *param_and_filter); 2817 2818 static bool compatible_keys(struct hist_trigger_data *target_hist_data, 2819 struct hist_trigger_data *hist_data, 2820 unsigned int n_keys) 2821 { 2822 struct hist_field *target_hist_field, *hist_field; 2823 unsigned int n, i, j; 2824 2825 if (hist_data->n_fields - hist_data->n_vals != n_keys) 2826 return false; 2827 2828 i = hist_data->n_vals; 2829 j = target_hist_data->n_vals; 2830 2831 for (n = 0; n < n_keys; n++) { 2832 hist_field = hist_data->fields[i + n]; 2833 target_hist_field = target_hist_data->fields[j + n]; 2834 2835 if (strcmp(hist_field->type, target_hist_field->type) != 0) 2836 return false; 2837 if (hist_field->size != target_hist_field->size) 2838 return false; 2839 if (hist_field->is_signed != target_hist_field->is_signed) 2840 return false; 2841 } 2842 2843 return true; 2844 } 2845 2846 static struct hist_trigger_data * 2847 find_compatible_hist(struct hist_trigger_data *target_hist_data, 2848 struct trace_event_file *file) 2849 { 2850 struct hist_trigger_data *hist_data; 2851 struct event_trigger_data *test; 2852 unsigned int n_keys; 2853 2854 lockdep_assert_held(&event_mutex); 2855 2856 n_keys = target_hist_data->n_fields - target_hist_data->n_vals; 2857 2858 list_for_each_entry(test, &file->triggers, list) { 2859 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 2860 hist_data = test->private_data; 2861 2862 if (compatible_keys(target_hist_data, hist_data, n_keys)) 2863 return hist_data; 2864 } 2865 } 2866 2867 return NULL; 2868 } 2869 2870 static struct trace_event_file *event_file(struct trace_array *tr, 2871 char *system, char *event_name) 2872 { 2873 struct trace_event_file *file; 2874 2875 file = __find_event_file(tr, system, event_name); 2876 if (!file) 2877 return ERR_PTR(-EINVAL); 2878 2879 return file; 2880 } 2881 2882 static struct hist_field * 2883 find_synthetic_field_var(struct hist_trigger_data *target_hist_data, 2884 char *system, char *event_name, char *field_name) 2885 { 2886 struct hist_field *event_var; 2887 char *synthetic_name; 2888 2889 synthetic_name = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL); 2890 if (!synthetic_name) 2891 return ERR_PTR(-ENOMEM); 2892 2893 strcpy(synthetic_name, "synthetic_"); 2894 strcat(synthetic_name, field_name); 2895 2896 event_var = find_event_var(target_hist_data, system, event_name, synthetic_name); 2897 2898 kfree(synthetic_name); 2899 2900 return event_var; 2901 } 2902 2903 /** 2904 * create_field_var_hist - Automatically create a histogram and var for a field 2905 * @target_hist_data: The target hist trigger 2906 * @subsys_name: Optional subsystem name 2907 * @event_name: Optional event name 2908 * @field_name: The name of the field (and the resulting variable) 2909 * 2910 * Hist trigger actions fetch data from variables, not directly from 2911 * events. However, for convenience, users are allowed to directly 2912 * specify an event field in an action, which will be automatically 2913 * converted into a variable on their behalf. 2914 * 2915 * If a user specifies a field on an event that isn't the event the 2916 * histogram currently being defined (the target event histogram), the 2917 * only way that can be accomplished is if a new hist trigger is 2918 * created and the field variable defined on that. 2919 * 2920 * This function creates a new histogram compatible with the target 2921 * event (meaning a histogram with the same key as the target 2922 * histogram), and creates a variable for the specified field, but 2923 * with 'synthetic_' prepended to the variable name in order to avoid 2924 * collision with normal field variables. 2925 * 2926 * Return: The variable created for the field. 2927 */ 2928 static struct hist_field * 2929 create_field_var_hist(struct hist_trigger_data *target_hist_data, 2930 char *subsys_name, char *event_name, char *field_name) 2931 { 2932 struct trace_array *tr = target_hist_data->event_file->tr; 2933 struct hist_trigger_data *hist_data; 2934 unsigned int i, n, first = true; 2935 struct field_var_hist *var_hist; 2936 struct trace_event_file *file; 2937 struct hist_field *key_field; 2938 struct hist_field *event_var; 2939 char *saved_filter; 2940 char *cmd; 2941 int ret; 2942 2943 if (target_hist_data->n_field_var_hists >= SYNTH_FIELDS_MAX) { 2944 hist_err(tr, HIST_ERR_TOO_MANY_FIELD_VARS, errpos(field_name)); 2945 return ERR_PTR(-EINVAL); 2946 } 2947 2948 file = event_file(tr, subsys_name, event_name); 2949 2950 if (IS_ERR(file)) { 2951 hist_err(tr, HIST_ERR_EVENT_FILE_NOT_FOUND, errpos(field_name)); 2952 ret = PTR_ERR(file); 2953 return ERR_PTR(ret); 2954 } 2955 2956 /* 2957 * Look for a histogram compatible with target. We'll use the 2958 * found histogram specification to create a new matching 2959 * histogram with our variable on it. target_hist_data is not 2960 * yet a registered histogram so we can't use that. 2961 */ 2962 hist_data = find_compatible_hist(target_hist_data, file); 2963 if (!hist_data) { 2964 hist_err(tr, HIST_ERR_HIST_NOT_FOUND, errpos(field_name)); 2965 return ERR_PTR(-EINVAL); 2966 } 2967 2968 /* See if a synthetic field variable has already been created */ 2969 event_var = find_synthetic_field_var(target_hist_data, subsys_name, 2970 event_name, field_name); 2971 if (!IS_ERR_OR_NULL(event_var)) 2972 return event_var; 2973 2974 var_hist = kzalloc(sizeof(*var_hist), GFP_KERNEL); 2975 if (!var_hist) 2976 return ERR_PTR(-ENOMEM); 2977 2978 cmd = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL); 2979 if (!cmd) { 2980 kfree(var_hist); 2981 return ERR_PTR(-ENOMEM); 2982 } 2983 2984 /* Use the same keys as the compatible histogram */ 2985 strcat(cmd, "keys="); 2986 2987 for_each_hist_key_field(i, hist_data) { 2988 key_field = hist_data->fields[i]; 2989 if (!first) 2990 strcat(cmd, ","); 2991 strcat(cmd, key_field->field->name); 2992 first = false; 2993 } 2994 2995 /* Create the synthetic field variable specification */ 2996 strcat(cmd, ":synthetic_"); 2997 strcat(cmd, field_name); 2998 strcat(cmd, "="); 2999 strcat(cmd, field_name); 3000 3001 /* Use the same filter as the compatible histogram */ 3002 saved_filter = find_trigger_filter(hist_data, file); 3003 if (saved_filter) { 3004 strcat(cmd, " if "); 3005 strcat(cmd, saved_filter); 3006 } 3007 3008 var_hist->cmd = kstrdup(cmd, GFP_KERNEL); 3009 if (!var_hist->cmd) { 3010 kfree(cmd); 3011 kfree(var_hist); 3012 return ERR_PTR(-ENOMEM); 3013 } 3014 3015 /* Save the compatible histogram information */ 3016 var_hist->hist_data = hist_data; 3017 3018 /* Create the new histogram with our variable */ 3019 ret = event_hist_trigger_parse(&trigger_hist_cmd, file, 3020 "", "hist", cmd); 3021 if (ret) { 3022 kfree(cmd); 3023 kfree(var_hist->cmd); 3024 kfree(var_hist); 3025 hist_err(tr, HIST_ERR_HIST_CREATE_FAIL, errpos(field_name)); 3026 return ERR_PTR(ret); 3027 } 3028 3029 kfree(cmd); 3030 3031 /* If we can't find the variable, something went wrong */ 3032 event_var = find_synthetic_field_var(target_hist_data, subsys_name, 3033 event_name, field_name); 3034 if (IS_ERR_OR_NULL(event_var)) { 3035 kfree(var_hist->cmd); 3036 kfree(var_hist); 3037 hist_err(tr, HIST_ERR_SYNTH_VAR_NOT_FOUND, errpos(field_name)); 3038 return ERR_PTR(-EINVAL); 3039 } 3040 3041 n = target_hist_data->n_field_var_hists; 3042 target_hist_data->field_var_hists[n] = var_hist; 3043 target_hist_data->n_field_var_hists++; 3044 3045 return event_var; 3046 } 3047 3048 static struct hist_field * 3049 find_target_event_var(struct hist_trigger_data *hist_data, 3050 char *subsys_name, char *event_name, char *var_name) 3051 { 3052 struct trace_event_file *file = hist_data->event_file; 3053 struct hist_field *hist_field = NULL; 3054 3055 if (subsys_name) { 3056 struct trace_event_call *call; 3057 3058 if (!event_name) 3059 return NULL; 3060 3061 call = file->event_call; 3062 3063 if (strcmp(subsys_name, call->class->system) != 0) 3064 return NULL; 3065 3066 if (strcmp(event_name, trace_event_name(call)) != 0) 3067 return NULL; 3068 } 3069 3070 hist_field = find_var_field(hist_data, var_name); 3071 3072 return hist_field; 3073 } 3074 3075 static inline void __update_field_vars(struct tracing_map_elt *elt, 3076 struct trace_buffer *buffer, 3077 struct ring_buffer_event *rbe, 3078 void *rec, 3079 struct field_var **field_vars, 3080 unsigned int n_field_vars, 3081 unsigned int field_var_str_start) 3082 { 3083 struct hist_elt_data *elt_data = elt->private_data; 3084 unsigned int i, j, var_idx; 3085 u64 var_val; 3086 3087 for (i = 0, j = field_var_str_start; i < n_field_vars; i++) { 3088 struct field_var *field_var = field_vars[i]; 3089 struct hist_field *var = field_var->var; 3090 struct hist_field *val = field_var->val; 3091 3092 var_val = hist_fn_call(val, elt, buffer, rbe, rec); 3093 var_idx = var->var.idx; 3094 3095 if (val->flags & HIST_FIELD_FL_STRING) { 3096 char *str = elt_data->field_var_str[j++]; 3097 char *val_str = (char *)(uintptr_t)var_val; 3098 unsigned int size; 3099 3100 size = min(val->size, STR_VAR_LEN_MAX); 3101 strscpy(str, val_str, size); 3102 var_val = (u64)(uintptr_t)str; 3103 } 3104 tracing_map_set_var(elt, var_idx, var_val); 3105 } 3106 } 3107 3108 static void update_field_vars(struct hist_trigger_data *hist_data, 3109 struct tracing_map_elt *elt, 3110 struct trace_buffer *buffer, 3111 struct ring_buffer_event *rbe, 3112 void *rec) 3113 { 3114 __update_field_vars(elt, buffer, rbe, rec, hist_data->field_vars, 3115 hist_data->n_field_vars, 0); 3116 } 3117 3118 static void save_track_data_vars(struct hist_trigger_data *hist_data, 3119 struct tracing_map_elt *elt, 3120 struct trace_buffer *buffer, void *rec, 3121 struct ring_buffer_event *rbe, void *key, 3122 struct action_data *data, u64 *var_ref_vals) 3123 { 3124 __update_field_vars(elt, buffer, rbe, rec, hist_data->save_vars, 3125 hist_data->n_save_vars, hist_data->n_field_var_str); 3126 } 3127 3128 static struct hist_field *create_var(struct hist_trigger_data *hist_data, 3129 struct trace_event_file *file, 3130 char *name, int size, const char *type) 3131 { 3132 struct hist_field *var; 3133 int idx; 3134 3135 if (find_var(hist_data, file, name) && !hist_data->remove) { 3136 var = ERR_PTR(-EINVAL); 3137 goto out; 3138 } 3139 3140 var = kzalloc(sizeof(struct hist_field), GFP_KERNEL); 3141 if (!var) { 3142 var = ERR_PTR(-ENOMEM); 3143 goto out; 3144 } 3145 3146 idx = tracing_map_add_var(hist_data->map); 3147 if (idx < 0) { 3148 kfree(var); 3149 var = ERR_PTR(-EINVAL); 3150 goto out; 3151 } 3152 3153 var->ref = 1; 3154 var->flags = HIST_FIELD_FL_VAR; 3155 var->var.idx = idx; 3156 var->var.hist_data = var->hist_data = hist_data; 3157 var->size = size; 3158 var->var.name = kstrdup(name, GFP_KERNEL); 3159 var->type = kstrdup_const(type, GFP_KERNEL); 3160 if (!var->var.name || !var->type) { 3161 kfree_const(var->type); 3162 kfree(var->var.name); 3163 kfree(var); 3164 var = ERR_PTR(-ENOMEM); 3165 } 3166 out: 3167 return var; 3168 } 3169 3170 static struct field_var *create_field_var(struct hist_trigger_data *hist_data, 3171 struct trace_event_file *file, 3172 char *field_name) 3173 { 3174 struct hist_field *val = NULL, *var = NULL; 3175 unsigned long flags = HIST_FIELD_FL_VAR; 3176 struct trace_array *tr = file->tr; 3177 struct field_var *field_var; 3178 int ret = 0; 3179 3180 if (hist_data->n_field_vars >= SYNTH_FIELDS_MAX) { 3181 hist_err(tr, HIST_ERR_TOO_MANY_FIELD_VARS, errpos(field_name)); 3182 ret = -EINVAL; 3183 goto err; 3184 } 3185 3186 val = parse_atom(hist_data, file, field_name, &flags, NULL); 3187 if (IS_ERR(val)) { 3188 hist_err(tr, HIST_ERR_FIELD_VAR_PARSE_FAIL, errpos(field_name)); 3189 ret = PTR_ERR(val); 3190 goto err; 3191 } 3192 3193 var = create_var(hist_data, file, field_name, val->size, val->type); 3194 if (IS_ERR(var)) { 3195 hist_err(tr, HIST_ERR_VAR_CREATE_FIND_FAIL, errpos(field_name)); 3196 kfree(val); 3197 ret = PTR_ERR(var); 3198 goto err; 3199 } 3200 3201 field_var = kzalloc(sizeof(struct field_var), GFP_KERNEL); 3202 if (!field_var) { 3203 kfree(val); 3204 kfree(var); 3205 ret = -ENOMEM; 3206 goto err; 3207 } 3208 3209 field_var->var = var; 3210 field_var->val = val; 3211 out: 3212 return field_var; 3213 err: 3214 field_var = ERR_PTR(ret); 3215 goto out; 3216 } 3217 3218 /** 3219 * create_target_field_var - Automatically create a variable for a field 3220 * @target_hist_data: The target hist trigger 3221 * @subsys_name: Optional subsystem name 3222 * @event_name: Optional event name 3223 * @var_name: The name of the field (and the resulting variable) 3224 * 3225 * Hist trigger actions fetch data from variables, not directly from 3226 * events. However, for convenience, users are allowed to directly 3227 * specify an event field in an action, which will be automatically 3228 * converted into a variable on their behalf. 3229 3230 * This function creates a field variable with the name var_name on 3231 * the hist trigger currently being defined on the target event. If 3232 * subsys_name and event_name are specified, this function simply 3233 * verifies that they do in fact match the target event subsystem and 3234 * event name. 3235 * 3236 * Return: The variable created for the field. 3237 */ 3238 static struct field_var * 3239 create_target_field_var(struct hist_trigger_data *target_hist_data, 3240 char *subsys_name, char *event_name, char *var_name) 3241 { 3242 struct trace_event_file *file = target_hist_data->event_file; 3243 3244 if (subsys_name) { 3245 struct trace_event_call *call; 3246 3247 if (!event_name) 3248 return NULL; 3249 3250 call = file->event_call; 3251 3252 if (strcmp(subsys_name, call->class->system) != 0) 3253 return NULL; 3254 3255 if (strcmp(event_name, trace_event_name(call)) != 0) 3256 return NULL; 3257 } 3258 3259 return create_field_var(target_hist_data, file, var_name); 3260 } 3261 3262 static bool check_track_val_max(u64 track_val, u64 var_val) 3263 { 3264 if (var_val <= track_val) 3265 return false; 3266 3267 return true; 3268 } 3269 3270 static bool check_track_val_changed(u64 track_val, u64 var_val) 3271 { 3272 if (var_val == track_val) 3273 return false; 3274 3275 return true; 3276 } 3277 3278 static u64 get_track_val(struct hist_trigger_data *hist_data, 3279 struct tracing_map_elt *elt, 3280 struct action_data *data) 3281 { 3282 unsigned int track_var_idx = data->track_data.track_var->var.idx; 3283 u64 track_val; 3284 3285 track_val = tracing_map_read_var(elt, track_var_idx); 3286 3287 return track_val; 3288 } 3289 3290 static void save_track_val(struct hist_trigger_data *hist_data, 3291 struct tracing_map_elt *elt, 3292 struct action_data *data, u64 var_val) 3293 { 3294 unsigned int track_var_idx = data->track_data.track_var->var.idx; 3295 3296 tracing_map_set_var(elt, track_var_idx, var_val); 3297 } 3298 3299 static void save_track_data(struct hist_trigger_data *hist_data, 3300 struct tracing_map_elt *elt, 3301 struct trace_buffer *buffer, void *rec, 3302 struct ring_buffer_event *rbe, void *key, 3303 struct action_data *data, u64 *var_ref_vals) 3304 { 3305 if (data->track_data.save_data) 3306 data->track_data.save_data(hist_data, elt, buffer, rec, rbe, 3307 key, data, var_ref_vals); 3308 } 3309 3310 static bool check_track_val(struct tracing_map_elt *elt, 3311 struct action_data *data, 3312 u64 var_val) 3313 { 3314 struct hist_trigger_data *hist_data; 3315 u64 track_val; 3316 3317 hist_data = data->track_data.track_var->hist_data; 3318 track_val = get_track_val(hist_data, elt, data); 3319 3320 return data->track_data.check_val(track_val, var_val); 3321 } 3322 3323 #ifdef CONFIG_TRACER_SNAPSHOT 3324 static bool cond_snapshot_update(struct trace_array *tr, void *cond_data) 3325 { 3326 /* called with tr->max_lock held */ 3327 struct track_data *track_data = tr->cond_snapshot->cond_data; 3328 struct hist_elt_data *elt_data, *track_elt_data; 3329 struct snapshot_context *context = cond_data; 3330 struct action_data *action; 3331 u64 track_val; 3332 3333 if (!track_data) 3334 return false; 3335 3336 action = track_data->action_data; 3337 3338 track_val = get_track_val(track_data->hist_data, context->elt, 3339 track_data->action_data); 3340 3341 if (!action->track_data.check_val(track_data->track_val, track_val)) 3342 return false; 3343 3344 track_data->track_val = track_val; 3345 memcpy(track_data->key, context->key, track_data->key_len); 3346 3347 elt_data = context->elt->private_data; 3348 track_elt_data = track_data->elt.private_data; 3349 if (elt_data->comm) 3350 strncpy(track_elt_data->comm, elt_data->comm, TASK_COMM_LEN); 3351 3352 track_data->updated = true; 3353 3354 return true; 3355 } 3356 3357 static void save_track_data_snapshot(struct hist_trigger_data *hist_data, 3358 struct tracing_map_elt *elt, 3359 struct trace_buffer *buffer, void *rec, 3360 struct ring_buffer_event *rbe, void *key, 3361 struct action_data *data, 3362 u64 *var_ref_vals) 3363 { 3364 struct trace_event_file *file = hist_data->event_file; 3365 struct snapshot_context context; 3366 3367 context.elt = elt; 3368 context.key = key; 3369 3370 tracing_snapshot_cond(file->tr, &context); 3371 } 3372 3373 static void hist_trigger_print_key(struct seq_file *m, 3374 struct hist_trigger_data *hist_data, 3375 void *key, 3376 struct tracing_map_elt *elt); 3377 3378 static struct action_data *snapshot_action(struct hist_trigger_data *hist_data) 3379 { 3380 unsigned int i; 3381 3382 if (!hist_data->n_actions) 3383 return NULL; 3384 3385 for (i = 0; i < hist_data->n_actions; i++) { 3386 struct action_data *data = hist_data->actions[i]; 3387 3388 if (data->action == ACTION_SNAPSHOT) 3389 return data; 3390 } 3391 3392 return NULL; 3393 } 3394 3395 static void track_data_snapshot_print(struct seq_file *m, 3396 struct hist_trigger_data *hist_data) 3397 { 3398 struct trace_event_file *file = hist_data->event_file; 3399 struct track_data *track_data; 3400 struct action_data *action; 3401 3402 track_data = tracing_cond_snapshot_data(file->tr); 3403 if (!track_data) 3404 return; 3405 3406 if (!track_data->updated) 3407 return; 3408 3409 action = snapshot_action(hist_data); 3410 if (!action) 3411 return; 3412 3413 seq_puts(m, "\nSnapshot taken (see tracing/snapshot). Details:\n"); 3414 seq_printf(m, "\ttriggering value { %s(%s) }: %10llu", 3415 action->handler == HANDLER_ONMAX ? "onmax" : "onchange", 3416 action->track_data.var_str, track_data->track_val); 3417 3418 seq_puts(m, "\ttriggered by event with key: "); 3419 hist_trigger_print_key(m, hist_data, track_data->key, &track_data->elt); 3420 seq_putc(m, '\n'); 3421 } 3422 #else 3423 static bool cond_snapshot_update(struct trace_array *tr, void *cond_data) 3424 { 3425 return false; 3426 } 3427 static void save_track_data_snapshot(struct hist_trigger_data *hist_data, 3428 struct tracing_map_elt *elt, 3429 struct trace_buffer *buffer, void *rec, 3430 struct ring_buffer_event *rbe, void *key, 3431 struct action_data *data, 3432 u64 *var_ref_vals) {} 3433 static void track_data_snapshot_print(struct seq_file *m, 3434 struct hist_trigger_data *hist_data) {} 3435 #endif /* CONFIG_TRACER_SNAPSHOT */ 3436 3437 static void track_data_print(struct seq_file *m, 3438 struct hist_trigger_data *hist_data, 3439 struct tracing_map_elt *elt, 3440 struct action_data *data) 3441 { 3442 u64 track_val = get_track_val(hist_data, elt, data); 3443 unsigned int i, save_var_idx; 3444 3445 if (data->handler == HANDLER_ONMAX) 3446 seq_printf(m, "\n\tmax: %10llu", track_val); 3447 else if (data->handler == HANDLER_ONCHANGE) 3448 seq_printf(m, "\n\tchanged: %10llu", track_val); 3449 3450 if (data->action == ACTION_SNAPSHOT) 3451 return; 3452 3453 for (i = 0; i < hist_data->n_save_vars; i++) { 3454 struct hist_field *save_val = hist_data->save_vars[i]->val; 3455 struct hist_field *save_var = hist_data->save_vars[i]->var; 3456 u64 val; 3457 3458 save_var_idx = save_var->var.idx; 3459 3460 val = tracing_map_read_var(elt, save_var_idx); 3461 3462 if (save_val->flags & HIST_FIELD_FL_STRING) { 3463 seq_printf(m, " %s: %-32s", save_var->var.name, 3464 (char *)(uintptr_t)(val)); 3465 } else 3466 seq_printf(m, " %s: %10llu", save_var->var.name, val); 3467 } 3468 } 3469 3470 static void ontrack_action(struct hist_trigger_data *hist_data, 3471 struct tracing_map_elt *elt, 3472 struct trace_buffer *buffer, void *rec, 3473 struct ring_buffer_event *rbe, void *key, 3474 struct action_data *data, u64 *var_ref_vals) 3475 { 3476 u64 var_val = var_ref_vals[data->track_data.var_ref->var_ref_idx]; 3477 3478 if (check_track_val(elt, data, var_val)) { 3479 save_track_val(hist_data, elt, data, var_val); 3480 save_track_data(hist_data, elt, buffer, rec, rbe, 3481 key, data, var_ref_vals); 3482 } 3483 } 3484 3485 static void action_data_destroy(struct action_data *data) 3486 { 3487 unsigned int i; 3488 3489 lockdep_assert_held(&event_mutex); 3490 3491 kfree(data->action_name); 3492 3493 for (i = 0; i < data->n_params; i++) 3494 kfree(data->params[i]); 3495 3496 if (data->synth_event) 3497 data->synth_event->ref--; 3498 3499 kfree(data->synth_event_name); 3500 3501 kfree(data); 3502 } 3503 3504 static void track_data_destroy(struct hist_trigger_data *hist_data, 3505 struct action_data *data) 3506 { 3507 struct trace_event_file *file = hist_data->event_file; 3508 3509 destroy_hist_field(data->track_data.track_var, 0); 3510 3511 if (data->action == ACTION_SNAPSHOT) { 3512 struct track_data *track_data; 3513 3514 track_data = tracing_cond_snapshot_data(file->tr); 3515 if (track_data && track_data->hist_data == hist_data) { 3516 tracing_snapshot_cond_disable(file->tr); 3517 track_data_free(track_data); 3518 } 3519 } 3520 3521 kfree(data->track_data.var_str); 3522 3523 action_data_destroy(data); 3524 } 3525 3526 static int action_create(struct hist_trigger_data *hist_data, 3527 struct action_data *data); 3528 3529 static int track_data_create(struct hist_trigger_data *hist_data, 3530 struct action_data *data) 3531 { 3532 struct hist_field *var_field, *ref_field, *track_var = NULL; 3533 struct trace_event_file *file = hist_data->event_file; 3534 struct trace_array *tr = file->tr; 3535 char *track_data_var_str; 3536 int ret = 0; 3537 3538 track_data_var_str = data->track_data.var_str; 3539 if (track_data_var_str[0] != '$') { 3540 hist_err(tr, HIST_ERR_ONX_NOT_VAR, errpos(track_data_var_str)); 3541 return -EINVAL; 3542 } 3543 track_data_var_str++; 3544 3545 var_field = find_target_event_var(hist_data, NULL, NULL, track_data_var_str); 3546 if (!var_field) { 3547 hist_err(tr, HIST_ERR_ONX_VAR_NOT_FOUND, errpos(track_data_var_str)); 3548 return -EINVAL; 3549 } 3550 3551 ref_field = create_var_ref(hist_data, var_field, NULL, NULL); 3552 if (!ref_field) 3553 return -ENOMEM; 3554 3555 data->track_data.var_ref = ref_field; 3556 3557 if (data->handler == HANDLER_ONMAX) 3558 track_var = create_var(hist_data, file, "__max", sizeof(u64), "u64"); 3559 if (IS_ERR(track_var)) { 3560 hist_err(tr, HIST_ERR_ONX_VAR_CREATE_FAIL, 0); 3561 ret = PTR_ERR(track_var); 3562 goto out; 3563 } 3564 3565 if (data->handler == HANDLER_ONCHANGE) 3566 track_var = create_var(hist_data, file, "__change", sizeof(u64), "u64"); 3567 if (IS_ERR(track_var)) { 3568 hist_err(tr, HIST_ERR_ONX_VAR_CREATE_FAIL, 0); 3569 ret = PTR_ERR(track_var); 3570 goto out; 3571 } 3572 data->track_data.track_var = track_var; 3573 3574 ret = action_create(hist_data, data); 3575 out: 3576 return ret; 3577 } 3578 3579 static int parse_action_params(struct trace_array *tr, char *params, 3580 struct action_data *data) 3581 { 3582 char *param, *saved_param; 3583 bool first_param = true; 3584 int ret = 0; 3585 3586 while (params) { 3587 if (data->n_params >= SYNTH_FIELDS_MAX) { 3588 hist_err(tr, HIST_ERR_TOO_MANY_PARAMS, 0); 3589 goto out; 3590 } 3591 3592 param = strsep(¶ms, ","); 3593 if (!param) { 3594 hist_err(tr, HIST_ERR_PARAM_NOT_FOUND, 0); 3595 ret = -EINVAL; 3596 goto out; 3597 } 3598 3599 param = strstrip(param); 3600 if (strlen(param) < 2) { 3601 hist_err(tr, HIST_ERR_INVALID_PARAM, errpos(param)); 3602 ret = -EINVAL; 3603 goto out; 3604 } 3605 3606 saved_param = kstrdup(param, GFP_KERNEL); 3607 if (!saved_param) { 3608 ret = -ENOMEM; 3609 goto out; 3610 } 3611 3612 if (first_param && data->use_trace_keyword) { 3613 data->synth_event_name = saved_param; 3614 first_param = false; 3615 continue; 3616 } 3617 first_param = false; 3618 3619 data->params[data->n_params++] = saved_param; 3620 } 3621 out: 3622 return ret; 3623 } 3624 3625 static int action_parse(struct trace_array *tr, char *str, struct action_data *data, 3626 enum handler_id handler) 3627 { 3628 char *action_name; 3629 int ret = 0; 3630 3631 strsep(&str, "."); 3632 if (!str) { 3633 hist_err(tr, HIST_ERR_ACTION_NOT_FOUND, 0); 3634 ret = -EINVAL; 3635 goto out; 3636 } 3637 3638 action_name = strsep(&str, "("); 3639 if (!action_name || !str) { 3640 hist_err(tr, HIST_ERR_ACTION_NOT_FOUND, 0); 3641 ret = -EINVAL; 3642 goto out; 3643 } 3644 3645 if (str_has_prefix(action_name, "save")) { 3646 char *params = strsep(&str, ")"); 3647 3648 if (!params) { 3649 hist_err(tr, HIST_ERR_NO_SAVE_PARAMS, 0); 3650 ret = -EINVAL; 3651 goto out; 3652 } 3653 3654 ret = parse_action_params(tr, params, data); 3655 if (ret) 3656 goto out; 3657 3658 if (handler == HANDLER_ONMAX) 3659 data->track_data.check_val = check_track_val_max; 3660 else if (handler == HANDLER_ONCHANGE) 3661 data->track_data.check_val = check_track_val_changed; 3662 else { 3663 hist_err(tr, HIST_ERR_ACTION_MISMATCH, errpos(action_name)); 3664 ret = -EINVAL; 3665 goto out; 3666 } 3667 3668 data->track_data.save_data = save_track_data_vars; 3669 data->fn = ontrack_action; 3670 data->action = ACTION_SAVE; 3671 } else if (str_has_prefix(action_name, "snapshot")) { 3672 char *params = strsep(&str, ")"); 3673 3674 if (!str) { 3675 hist_err(tr, HIST_ERR_NO_CLOSING_PAREN, errpos(params)); 3676 ret = -EINVAL; 3677 goto out; 3678 } 3679 3680 if (handler == HANDLER_ONMAX) 3681 data->track_data.check_val = check_track_val_max; 3682 else if (handler == HANDLER_ONCHANGE) 3683 data->track_data.check_val = check_track_val_changed; 3684 else { 3685 hist_err(tr, HIST_ERR_ACTION_MISMATCH, errpos(action_name)); 3686 ret = -EINVAL; 3687 goto out; 3688 } 3689 3690 data->track_data.save_data = save_track_data_snapshot; 3691 data->fn = ontrack_action; 3692 data->action = ACTION_SNAPSHOT; 3693 } else { 3694 char *params = strsep(&str, ")"); 3695 3696 if (str_has_prefix(action_name, "trace")) 3697 data->use_trace_keyword = true; 3698 3699 if (params) { 3700 ret = parse_action_params(tr, params, data); 3701 if (ret) 3702 goto out; 3703 } 3704 3705 if (handler == HANDLER_ONMAX) 3706 data->track_data.check_val = check_track_val_max; 3707 else if (handler == HANDLER_ONCHANGE) 3708 data->track_data.check_val = check_track_val_changed; 3709 3710 if (handler != HANDLER_ONMATCH) { 3711 data->track_data.save_data = action_trace; 3712 data->fn = ontrack_action; 3713 } else 3714 data->fn = action_trace; 3715 3716 data->action = ACTION_TRACE; 3717 } 3718 3719 data->action_name = kstrdup(action_name, GFP_KERNEL); 3720 if (!data->action_name) { 3721 ret = -ENOMEM; 3722 goto out; 3723 } 3724 3725 data->handler = handler; 3726 out: 3727 return ret; 3728 } 3729 3730 static struct action_data *track_data_parse(struct hist_trigger_data *hist_data, 3731 char *str, enum handler_id handler) 3732 { 3733 struct action_data *data; 3734 int ret = -EINVAL; 3735 char *var_str; 3736 3737 data = kzalloc(sizeof(*data), GFP_KERNEL); 3738 if (!data) 3739 return ERR_PTR(-ENOMEM); 3740 3741 var_str = strsep(&str, ")"); 3742 if (!var_str || !str) { 3743 ret = -EINVAL; 3744 goto free; 3745 } 3746 3747 data->track_data.var_str = kstrdup(var_str, GFP_KERNEL); 3748 if (!data->track_data.var_str) { 3749 ret = -ENOMEM; 3750 goto free; 3751 } 3752 3753 ret = action_parse(hist_data->event_file->tr, str, data, handler); 3754 if (ret) 3755 goto free; 3756 out: 3757 return data; 3758 free: 3759 track_data_destroy(hist_data, data); 3760 data = ERR_PTR(ret); 3761 goto out; 3762 } 3763 3764 static void onmatch_destroy(struct action_data *data) 3765 { 3766 kfree(data->match_data.event); 3767 kfree(data->match_data.event_system); 3768 3769 action_data_destroy(data); 3770 } 3771 3772 static void destroy_field_var(struct field_var *field_var) 3773 { 3774 if (!field_var) 3775 return; 3776 3777 destroy_hist_field(field_var->var, 0); 3778 destroy_hist_field(field_var->val, 0); 3779 3780 kfree(field_var); 3781 } 3782 3783 static void destroy_field_vars(struct hist_trigger_data *hist_data) 3784 { 3785 unsigned int i; 3786 3787 for (i = 0; i < hist_data->n_field_vars; i++) 3788 destroy_field_var(hist_data->field_vars[i]); 3789 3790 for (i = 0; i < hist_data->n_save_vars; i++) 3791 destroy_field_var(hist_data->save_vars[i]); 3792 } 3793 3794 static void save_field_var(struct hist_trigger_data *hist_data, 3795 struct field_var *field_var) 3796 { 3797 hist_data->field_vars[hist_data->n_field_vars++] = field_var; 3798 3799 if (field_var->val->flags & HIST_FIELD_FL_STRING) 3800 hist_data->n_field_var_str++; 3801 } 3802 3803 3804 static int check_synth_field(struct synth_event *event, 3805 struct hist_field *hist_field, 3806 unsigned int field_pos) 3807 { 3808 struct synth_field *field; 3809 3810 if (field_pos >= event->n_fields) 3811 return -EINVAL; 3812 3813 field = event->fields[field_pos]; 3814 3815 /* 3816 * A dynamic string synth field can accept static or 3817 * dynamic. A static string synth field can only accept a 3818 * same-sized static string, which is checked for later. 3819 */ 3820 if (strstr(hist_field->type, "char[") && field->is_string 3821 && field->is_dynamic) 3822 return 0; 3823 3824 if (strcmp(field->type, hist_field->type) != 0) { 3825 if (field->size != hist_field->size || 3826 (!field->is_string && field->is_signed != hist_field->is_signed)) 3827 return -EINVAL; 3828 } 3829 3830 return 0; 3831 } 3832 3833 static struct hist_field * 3834 trace_action_find_var(struct hist_trigger_data *hist_data, 3835 struct action_data *data, 3836 char *system, char *event, char *var) 3837 { 3838 struct trace_array *tr = hist_data->event_file->tr; 3839 struct hist_field *hist_field; 3840 3841 var++; /* skip '$' */ 3842 3843 hist_field = find_target_event_var(hist_data, system, event, var); 3844 if (!hist_field) { 3845 if (!system && data->handler == HANDLER_ONMATCH) { 3846 system = data->match_data.event_system; 3847 event = data->match_data.event; 3848 } 3849 3850 hist_field = find_event_var(hist_data, system, event, var); 3851 } 3852 3853 if (!hist_field) 3854 hist_err(tr, HIST_ERR_PARAM_NOT_FOUND, errpos(var)); 3855 3856 return hist_field; 3857 } 3858 3859 static struct hist_field * 3860 trace_action_create_field_var(struct hist_trigger_data *hist_data, 3861 struct action_data *data, char *system, 3862 char *event, char *var) 3863 { 3864 struct hist_field *hist_field = NULL; 3865 struct field_var *field_var; 3866 3867 /* 3868 * First try to create a field var on the target event (the 3869 * currently being defined). This will create a variable for 3870 * unqualified fields on the target event, or if qualified, 3871 * target fields that have qualified names matching the target. 3872 */ 3873 field_var = create_target_field_var(hist_data, system, event, var); 3874 3875 if (field_var && !IS_ERR(field_var)) { 3876 save_field_var(hist_data, field_var); 3877 hist_field = field_var->var; 3878 } else { 3879 field_var = NULL; 3880 /* 3881 * If no explicit system.event is specified, default to 3882 * looking for fields on the onmatch(system.event.xxx) 3883 * event. 3884 */ 3885 if (!system && data->handler == HANDLER_ONMATCH) { 3886 system = data->match_data.event_system; 3887 event = data->match_data.event; 3888 } 3889 3890 if (!event) 3891 goto free; 3892 /* 3893 * At this point, we're looking at a field on another 3894 * event. Because we can't modify a hist trigger on 3895 * another event to add a variable for a field, we need 3896 * to create a new trigger on that event and create the 3897 * variable at the same time. 3898 */ 3899 hist_field = create_field_var_hist(hist_data, system, event, var); 3900 if (IS_ERR(hist_field)) 3901 goto free; 3902 } 3903 out: 3904 return hist_field; 3905 free: 3906 destroy_field_var(field_var); 3907 hist_field = NULL; 3908 goto out; 3909 } 3910 3911 static int trace_action_create(struct hist_trigger_data *hist_data, 3912 struct action_data *data) 3913 { 3914 struct trace_array *tr = hist_data->event_file->tr; 3915 char *event_name, *param, *system = NULL; 3916 struct hist_field *hist_field, *var_ref; 3917 unsigned int i; 3918 unsigned int field_pos = 0; 3919 struct synth_event *event; 3920 char *synth_event_name; 3921 int var_ref_idx, ret = 0; 3922 3923 lockdep_assert_held(&event_mutex); 3924 3925 if (data->use_trace_keyword) 3926 synth_event_name = data->synth_event_name; 3927 else 3928 synth_event_name = data->action_name; 3929 3930 event = find_synth_event(synth_event_name); 3931 if (!event) { 3932 hist_err(tr, HIST_ERR_SYNTH_EVENT_NOT_FOUND, errpos(synth_event_name)); 3933 return -EINVAL; 3934 } 3935 3936 event->ref++; 3937 3938 for (i = 0; i < data->n_params; i++) { 3939 char *p; 3940 3941 p = param = kstrdup(data->params[i], GFP_KERNEL); 3942 if (!param) { 3943 ret = -ENOMEM; 3944 goto err; 3945 } 3946 3947 system = strsep(¶m, "."); 3948 if (!param) { 3949 param = (char *)system; 3950 system = event_name = NULL; 3951 } else { 3952 event_name = strsep(¶m, "."); 3953 if (!param) { 3954 kfree(p); 3955 ret = -EINVAL; 3956 goto err; 3957 } 3958 } 3959 3960 if (param[0] == '$') 3961 hist_field = trace_action_find_var(hist_data, data, 3962 system, event_name, 3963 param); 3964 else 3965 hist_field = trace_action_create_field_var(hist_data, 3966 data, 3967 system, 3968 event_name, 3969 param); 3970 3971 if (!hist_field) { 3972 kfree(p); 3973 ret = -EINVAL; 3974 goto err; 3975 } 3976 3977 if (check_synth_field(event, hist_field, field_pos) == 0) { 3978 var_ref = create_var_ref(hist_data, hist_field, 3979 system, event_name); 3980 if (!var_ref) { 3981 kfree(p); 3982 ret = -ENOMEM; 3983 goto err; 3984 } 3985 3986 var_ref_idx = find_var_ref_idx(hist_data, var_ref); 3987 if (WARN_ON(var_ref_idx < 0)) { 3988 kfree(p); 3989 ret = var_ref_idx; 3990 goto err; 3991 } 3992 3993 data->var_ref_idx[i] = var_ref_idx; 3994 3995 field_pos++; 3996 kfree(p); 3997 continue; 3998 } 3999 4000 hist_err(tr, HIST_ERR_SYNTH_TYPE_MISMATCH, errpos(param)); 4001 kfree(p); 4002 ret = -EINVAL; 4003 goto err; 4004 } 4005 4006 if (field_pos != event->n_fields) { 4007 hist_err(tr, HIST_ERR_SYNTH_COUNT_MISMATCH, errpos(event->name)); 4008 ret = -EINVAL; 4009 goto err; 4010 } 4011 4012 data->synth_event = event; 4013 out: 4014 return ret; 4015 err: 4016 event->ref--; 4017 4018 goto out; 4019 } 4020 4021 static int action_create(struct hist_trigger_data *hist_data, 4022 struct action_data *data) 4023 { 4024 struct trace_event_file *file = hist_data->event_file; 4025 struct trace_array *tr = file->tr; 4026 struct track_data *track_data; 4027 struct field_var *field_var; 4028 unsigned int i; 4029 char *param; 4030 int ret = 0; 4031 4032 if (data->action == ACTION_TRACE) 4033 return trace_action_create(hist_data, data); 4034 4035 if (data->action == ACTION_SNAPSHOT) { 4036 track_data = track_data_alloc(hist_data->key_size, data, hist_data); 4037 if (IS_ERR(track_data)) { 4038 ret = PTR_ERR(track_data); 4039 goto out; 4040 } 4041 4042 ret = tracing_snapshot_cond_enable(file->tr, track_data, 4043 cond_snapshot_update); 4044 if (ret) 4045 track_data_free(track_data); 4046 4047 goto out; 4048 } 4049 4050 if (data->action == ACTION_SAVE) { 4051 if (hist_data->n_save_vars) { 4052 ret = -EEXIST; 4053 hist_err(tr, HIST_ERR_TOO_MANY_SAVE_ACTIONS, 0); 4054 goto out; 4055 } 4056 4057 for (i = 0; i < data->n_params; i++) { 4058 param = kstrdup(data->params[i], GFP_KERNEL); 4059 if (!param) { 4060 ret = -ENOMEM; 4061 goto out; 4062 } 4063 4064 field_var = create_target_field_var(hist_data, NULL, NULL, param); 4065 if (IS_ERR(field_var)) { 4066 hist_err(tr, HIST_ERR_FIELD_VAR_CREATE_FAIL, 4067 errpos(param)); 4068 ret = PTR_ERR(field_var); 4069 kfree(param); 4070 goto out; 4071 } 4072 4073 hist_data->save_vars[hist_data->n_save_vars++] = field_var; 4074 if (field_var->val->flags & HIST_FIELD_FL_STRING) 4075 hist_data->n_save_var_str++; 4076 kfree(param); 4077 } 4078 } 4079 out: 4080 return ret; 4081 } 4082 4083 static int onmatch_create(struct hist_trigger_data *hist_data, 4084 struct action_data *data) 4085 { 4086 return action_create(hist_data, data); 4087 } 4088 4089 static struct action_data *onmatch_parse(struct trace_array *tr, char *str) 4090 { 4091 char *match_event, *match_event_system; 4092 struct action_data *data; 4093 int ret = -EINVAL; 4094 4095 data = kzalloc(sizeof(*data), GFP_KERNEL); 4096 if (!data) 4097 return ERR_PTR(-ENOMEM); 4098 4099 match_event = strsep(&str, ")"); 4100 if (!match_event || !str) { 4101 hist_err(tr, HIST_ERR_NO_CLOSING_PAREN, errpos(match_event)); 4102 goto free; 4103 } 4104 4105 match_event_system = strsep(&match_event, "."); 4106 if (!match_event) { 4107 hist_err(tr, HIST_ERR_SUBSYS_NOT_FOUND, errpos(match_event_system)); 4108 goto free; 4109 } 4110 4111 if (IS_ERR(event_file(tr, match_event_system, match_event))) { 4112 hist_err(tr, HIST_ERR_INVALID_SUBSYS_EVENT, errpos(match_event)); 4113 goto free; 4114 } 4115 4116 data->match_data.event = kstrdup(match_event, GFP_KERNEL); 4117 if (!data->match_data.event) { 4118 ret = -ENOMEM; 4119 goto free; 4120 } 4121 4122 data->match_data.event_system = kstrdup(match_event_system, GFP_KERNEL); 4123 if (!data->match_data.event_system) { 4124 ret = -ENOMEM; 4125 goto free; 4126 } 4127 4128 ret = action_parse(tr, str, data, HANDLER_ONMATCH); 4129 if (ret) 4130 goto free; 4131 out: 4132 return data; 4133 free: 4134 onmatch_destroy(data); 4135 data = ERR_PTR(ret); 4136 goto out; 4137 } 4138 4139 static int create_hitcount_val(struct hist_trigger_data *hist_data) 4140 { 4141 hist_data->fields[HITCOUNT_IDX] = 4142 create_hist_field(hist_data, NULL, HIST_FIELD_FL_HITCOUNT, NULL); 4143 if (!hist_data->fields[HITCOUNT_IDX]) 4144 return -ENOMEM; 4145 4146 hist_data->n_vals++; 4147 hist_data->n_fields++; 4148 4149 if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX)) 4150 return -EINVAL; 4151 4152 return 0; 4153 } 4154 4155 static int __create_val_field(struct hist_trigger_data *hist_data, 4156 unsigned int val_idx, 4157 struct trace_event_file *file, 4158 char *var_name, char *field_str, 4159 unsigned long flags) 4160 { 4161 struct hist_field *hist_field; 4162 int ret = 0, n_subexprs = 0; 4163 4164 hist_field = parse_expr(hist_data, file, field_str, flags, var_name, &n_subexprs); 4165 if (IS_ERR(hist_field)) { 4166 ret = PTR_ERR(hist_field); 4167 goto out; 4168 } 4169 4170 hist_data->fields[val_idx] = hist_field; 4171 4172 ++hist_data->n_vals; 4173 ++hist_data->n_fields; 4174 4175 if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX + TRACING_MAP_VARS_MAX)) 4176 ret = -EINVAL; 4177 out: 4178 return ret; 4179 } 4180 4181 static int create_val_field(struct hist_trigger_data *hist_data, 4182 unsigned int val_idx, 4183 struct trace_event_file *file, 4184 char *field_str) 4185 { 4186 if (WARN_ON(val_idx >= TRACING_MAP_VALS_MAX)) 4187 return -EINVAL; 4188 4189 return __create_val_field(hist_data, val_idx, file, NULL, field_str, 0); 4190 } 4191 4192 static const char no_comm[] = "(no comm)"; 4193 4194 static u64 hist_field_execname(struct hist_field *hist_field, 4195 struct tracing_map_elt *elt, 4196 struct trace_buffer *buffer, 4197 struct ring_buffer_event *rbe, 4198 void *event) 4199 { 4200 struct hist_elt_data *elt_data; 4201 4202 if (WARN_ON_ONCE(!elt)) 4203 return (u64)(unsigned long)no_comm; 4204 4205 elt_data = elt->private_data; 4206 4207 if (WARN_ON_ONCE(!elt_data->comm)) 4208 return (u64)(unsigned long)no_comm; 4209 4210 return (u64)(unsigned long)(elt_data->comm); 4211 } 4212 4213 static u64 hist_fn_call(struct hist_field *hist_field, 4214 struct tracing_map_elt *elt, 4215 struct trace_buffer *buffer, 4216 struct ring_buffer_event *rbe, 4217 void *event) 4218 { 4219 switch (hist_field->fn_num) { 4220 case HIST_FIELD_FN_VAR_REF: 4221 return hist_field_var_ref(hist_field, elt, buffer, rbe, event); 4222 case HIST_FIELD_FN_COUNTER: 4223 return hist_field_counter(hist_field, elt, buffer, rbe, event); 4224 case HIST_FIELD_FN_CONST: 4225 return hist_field_const(hist_field, elt, buffer, rbe, event); 4226 case HIST_FIELD_FN_LOG2: 4227 return hist_field_log2(hist_field, elt, buffer, rbe, event); 4228 case HIST_FIELD_FN_BUCKET: 4229 return hist_field_bucket(hist_field, elt, buffer, rbe, event); 4230 case HIST_FIELD_FN_TIMESTAMP: 4231 return hist_field_timestamp(hist_field, elt, buffer, rbe, event); 4232 case HIST_FIELD_FN_CPU: 4233 return hist_field_cpu(hist_field, elt, buffer, rbe, event); 4234 case HIST_FIELD_FN_STRING: 4235 return hist_field_string(hist_field, elt, buffer, rbe, event); 4236 case HIST_FIELD_FN_DYNSTRING: 4237 return hist_field_dynstring(hist_field, elt, buffer, rbe, event); 4238 case HIST_FIELD_FN_RELDYNSTRING: 4239 return hist_field_reldynstring(hist_field, elt, buffer, rbe, event); 4240 case HIST_FIELD_FN_PSTRING: 4241 return hist_field_pstring(hist_field, elt, buffer, rbe, event); 4242 case HIST_FIELD_FN_S64: 4243 return hist_field_s64(hist_field, elt, buffer, rbe, event); 4244 case HIST_FIELD_FN_U64: 4245 return hist_field_u64(hist_field, elt, buffer, rbe, event); 4246 case HIST_FIELD_FN_S32: 4247 return hist_field_s32(hist_field, elt, buffer, rbe, event); 4248 case HIST_FIELD_FN_U32: 4249 return hist_field_u32(hist_field, elt, buffer, rbe, event); 4250 case HIST_FIELD_FN_S16: 4251 return hist_field_s16(hist_field, elt, buffer, rbe, event); 4252 case HIST_FIELD_FN_U16: 4253 return hist_field_u16(hist_field, elt, buffer, rbe, event); 4254 case HIST_FIELD_FN_S8: 4255 return hist_field_s8(hist_field, elt, buffer, rbe, event); 4256 case HIST_FIELD_FN_U8: 4257 return hist_field_u8(hist_field, elt, buffer, rbe, event); 4258 case HIST_FIELD_FN_UMINUS: 4259 return hist_field_unary_minus(hist_field, elt, buffer, rbe, event); 4260 case HIST_FIELD_FN_MINUS: 4261 return hist_field_minus(hist_field, elt, buffer, rbe, event); 4262 case HIST_FIELD_FN_PLUS: 4263 return hist_field_plus(hist_field, elt, buffer, rbe, event); 4264 case HIST_FIELD_FN_DIV: 4265 return hist_field_div(hist_field, elt, buffer, rbe, event); 4266 case HIST_FIELD_FN_MULT: 4267 return hist_field_mult(hist_field, elt, buffer, rbe, event); 4268 case HIST_FIELD_FN_DIV_POWER2: 4269 return div_by_power_of_two(hist_field, elt, buffer, rbe, event); 4270 case HIST_FIELD_FN_DIV_NOT_POWER2: 4271 return div_by_not_power_of_two(hist_field, elt, buffer, rbe, event); 4272 case HIST_FIELD_FN_DIV_MULT_SHIFT: 4273 return div_by_mult_and_shift(hist_field, elt, buffer, rbe, event); 4274 case HIST_FIELD_FN_EXECNAME: 4275 return hist_field_execname(hist_field, elt, buffer, rbe, event); 4276 default: 4277 return 0; 4278 } 4279 } 4280 4281 /* Convert a var that points to common_pid.execname to a string */ 4282 static void update_var_execname(struct hist_field *hist_field) 4283 { 4284 hist_field->flags = HIST_FIELD_FL_STRING | HIST_FIELD_FL_VAR | 4285 HIST_FIELD_FL_EXECNAME; 4286 hist_field->size = MAX_FILTER_STR_VAL; 4287 hist_field->is_signed = 0; 4288 4289 kfree_const(hist_field->type); 4290 hist_field->type = "char[]"; 4291 4292 hist_field->fn_num = HIST_FIELD_FN_EXECNAME; 4293 } 4294 4295 static int create_var_field(struct hist_trigger_data *hist_data, 4296 unsigned int val_idx, 4297 struct trace_event_file *file, 4298 char *var_name, char *expr_str) 4299 { 4300 struct trace_array *tr = hist_data->event_file->tr; 4301 unsigned long flags = 0; 4302 int ret; 4303 4304 if (WARN_ON(val_idx >= TRACING_MAP_VALS_MAX + TRACING_MAP_VARS_MAX)) 4305 return -EINVAL; 4306 4307 if (find_var(hist_data, file, var_name) && !hist_data->remove) { 4308 hist_err(tr, HIST_ERR_DUPLICATE_VAR, errpos(var_name)); 4309 return -EINVAL; 4310 } 4311 4312 flags |= HIST_FIELD_FL_VAR; 4313 hist_data->n_vars++; 4314 if (WARN_ON(hist_data->n_vars > TRACING_MAP_VARS_MAX)) 4315 return -EINVAL; 4316 4317 ret = __create_val_field(hist_data, val_idx, file, var_name, expr_str, flags); 4318 4319 if (!ret && hist_data->fields[val_idx]->flags & HIST_FIELD_FL_EXECNAME) 4320 update_var_execname(hist_data->fields[val_idx]); 4321 4322 if (!ret && hist_data->fields[val_idx]->flags & HIST_FIELD_FL_STRING) 4323 hist_data->fields[val_idx]->var_str_idx = hist_data->n_var_str++; 4324 4325 return ret; 4326 } 4327 4328 static int create_val_fields(struct hist_trigger_data *hist_data, 4329 struct trace_event_file *file) 4330 { 4331 char *fields_str, *field_str; 4332 unsigned int i, j = 1; 4333 int ret; 4334 4335 ret = create_hitcount_val(hist_data); 4336 if (ret) 4337 goto out; 4338 4339 fields_str = hist_data->attrs->vals_str; 4340 if (!fields_str) 4341 goto out; 4342 4343 for (i = 0, j = 1; i < TRACING_MAP_VALS_MAX && 4344 j < TRACING_MAP_VALS_MAX; i++) { 4345 field_str = strsep(&fields_str, ","); 4346 if (!field_str) 4347 break; 4348 4349 if (strcmp(field_str, "hitcount") == 0) 4350 continue; 4351 4352 ret = create_val_field(hist_data, j++, file, field_str); 4353 if (ret) 4354 goto out; 4355 } 4356 4357 if (fields_str && (strcmp(fields_str, "hitcount") != 0)) 4358 ret = -EINVAL; 4359 out: 4360 return ret; 4361 } 4362 4363 static int create_key_field(struct hist_trigger_data *hist_data, 4364 unsigned int key_idx, 4365 unsigned int key_offset, 4366 struct trace_event_file *file, 4367 char *field_str) 4368 { 4369 struct trace_array *tr = hist_data->event_file->tr; 4370 struct hist_field *hist_field = NULL; 4371 unsigned long flags = 0; 4372 unsigned int key_size; 4373 int ret = 0, n_subexprs = 0; 4374 4375 if (WARN_ON(key_idx >= HIST_FIELDS_MAX)) 4376 return -EINVAL; 4377 4378 flags |= HIST_FIELD_FL_KEY; 4379 4380 if (strcmp(field_str, "stacktrace") == 0) { 4381 flags |= HIST_FIELD_FL_STACKTRACE; 4382 key_size = sizeof(unsigned long) * HIST_STACKTRACE_DEPTH; 4383 hist_field = create_hist_field(hist_data, NULL, flags, NULL); 4384 } else { 4385 hist_field = parse_expr(hist_data, file, field_str, flags, 4386 NULL, &n_subexprs); 4387 if (IS_ERR(hist_field)) { 4388 ret = PTR_ERR(hist_field); 4389 goto out; 4390 } 4391 4392 if (field_has_hist_vars(hist_field, 0)) { 4393 hist_err(tr, HIST_ERR_INVALID_REF_KEY, errpos(field_str)); 4394 destroy_hist_field(hist_field, 0); 4395 ret = -EINVAL; 4396 goto out; 4397 } 4398 4399 key_size = hist_field->size; 4400 } 4401 4402 hist_data->fields[key_idx] = hist_field; 4403 4404 key_size = ALIGN(key_size, sizeof(u64)); 4405 hist_data->fields[key_idx]->size = key_size; 4406 hist_data->fields[key_idx]->offset = key_offset; 4407 4408 hist_data->key_size += key_size; 4409 4410 if (hist_data->key_size > HIST_KEY_SIZE_MAX) { 4411 ret = -EINVAL; 4412 goto out; 4413 } 4414 4415 hist_data->n_keys++; 4416 hist_data->n_fields++; 4417 4418 if (WARN_ON(hist_data->n_keys > TRACING_MAP_KEYS_MAX)) 4419 return -EINVAL; 4420 4421 ret = key_size; 4422 out: 4423 return ret; 4424 } 4425 4426 static int create_key_fields(struct hist_trigger_data *hist_data, 4427 struct trace_event_file *file) 4428 { 4429 unsigned int i, key_offset = 0, n_vals = hist_data->n_vals; 4430 char *fields_str, *field_str; 4431 int ret = -EINVAL; 4432 4433 fields_str = hist_data->attrs->keys_str; 4434 if (!fields_str) 4435 goto out; 4436 4437 for (i = n_vals; i < n_vals + TRACING_MAP_KEYS_MAX; i++) { 4438 field_str = strsep(&fields_str, ","); 4439 if (!field_str) 4440 break; 4441 ret = create_key_field(hist_data, i, key_offset, 4442 file, field_str); 4443 if (ret < 0) 4444 goto out; 4445 key_offset += ret; 4446 } 4447 if (fields_str) { 4448 ret = -EINVAL; 4449 goto out; 4450 } 4451 ret = 0; 4452 out: 4453 return ret; 4454 } 4455 4456 static int create_var_fields(struct hist_trigger_data *hist_data, 4457 struct trace_event_file *file) 4458 { 4459 unsigned int i, j = hist_data->n_vals; 4460 int ret = 0; 4461 4462 unsigned int n_vars = hist_data->attrs->var_defs.n_vars; 4463 4464 for (i = 0; i < n_vars; i++) { 4465 char *var_name = hist_data->attrs->var_defs.name[i]; 4466 char *expr = hist_data->attrs->var_defs.expr[i]; 4467 4468 ret = create_var_field(hist_data, j++, file, var_name, expr); 4469 if (ret) 4470 goto out; 4471 } 4472 out: 4473 return ret; 4474 } 4475 4476 static void free_var_defs(struct hist_trigger_data *hist_data) 4477 { 4478 unsigned int i; 4479 4480 for (i = 0; i < hist_data->attrs->var_defs.n_vars; i++) { 4481 kfree(hist_data->attrs->var_defs.name[i]); 4482 kfree(hist_data->attrs->var_defs.expr[i]); 4483 } 4484 4485 hist_data->attrs->var_defs.n_vars = 0; 4486 } 4487 4488 static int parse_var_defs(struct hist_trigger_data *hist_data) 4489 { 4490 struct trace_array *tr = hist_data->event_file->tr; 4491 char *s, *str, *var_name, *field_str; 4492 unsigned int i, j, n_vars = 0; 4493 int ret = 0; 4494 4495 for (i = 0; i < hist_data->attrs->n_assignments; i++) { 4496 str = hist_data->attrs->assignment_str[i]; 4497 for (j = 0; j < TRACING_MAP_VARS_MAX; j++) { 4498 field_str = strsep(&str, ","); 4499 if (!field_str) 4500 break; 4501 4502 var_name = strsep(&field_str, "="); 4503 if (!var_name || !field_str) { 4504 hist_err(tr, HIST_ERR_MALFORMED_ASSIGNMENT, 4505 errpos(var_name)); 4506 ret = -EINVAL; 4507 goto free; 4508 } 4509 4510 if (n_vars == TRACING_MAP_VARS_MAX) { 4511 hist_err(tr, HIST_ERR_TOO_MANY_VARS, errpos(var_name)); 4512 ret = -EINVAL; 4513 goto free; 4514 } 4515 4516 s = kstrdup(var_name, GFP_KERNEL); 4517 if (!s) { 4518 ret = -ENOMEM; 4519 goto free; 4520 } 4521 hist_data->attrs->var_defs.name[n_vars] = s; 4522 4523 s = kstrdup(field_str, GFP_KERNEL); 4524 if (!s) { 4525 kfree(hist_data->attrs->var_defs.name[n_vars]); 4526 hist_data->attrs->var_defs.name[n_vars] = NULL; 4527 ret = -ENOMEM; 4528 goto free; 4529 } 4530 hist_data->attrs->var_defs.expr[n_vars++] = s; 4531 4532 hist_data->attrs->var_defs.n_vars = n_vars; 4533 } 4534 } 4535 4536 return ret; 4537 free: 4538 free_var_defs(hist_data); 4539 4540 return ret; 4541 } 4542 4543 static int create_hist_fields(struct hist_trigger_data *hist_data, 4544 struct trace_event_file *file) 4545 { 4546 int ret; 4547 4548 ret = parse_var_defs(hist_data); 4549 if (ret) 4550 return ret; 4551 4552 ret = create_val_fields(hist_data, file); 4553 if (ret) 4554 goto out; 4555 4556 ret = create_var_fields(hist_data, file); 4557 if (ret) 4558 goto out; 4559 4560 ret = create_key_fields(hist_data, file); 4561 4562 out: 4563 free_var_defs(hist_data); 4564 4565 return ret; 4566 } 4567 4568 static int is_descending(struct trace_array *tr, const char *str) 4569 { 4570 if (!str) 4571 return 0; 4572 4573 if (strcmp(str, "descending") == 0) 4574 return 1; 4575 4576 if (strcmp(str, "ascending") == 0) 4577 return 0; 4578 4579 hist_err(tr, HIST_ERR_INVALID_SORT_MODIFIER, errpos((char *)str)); 4580 4581 return -EINVAL; 4582 } 4583 4584 static int create_sort_keys(struct hist_trigger_data *hist_data) 4585 { 4586 struct trace_array *tr = hist_data->event_file->tr; 4587 char *fields_str = hist_data->attrs->sort_key_str; 4588 struct tracing_map_sort_key *sort_key; 4589 int descending, ret = 0; 4590 unsigned int i, j, k; 4591 4592 hist_data->n_sort_keys = 1; /* we always have at least one, hitcount */ 4593 4594 if (!fields_str) 4595 goto out; 4596 4597 for (i = 0; i < TRACING_MAP_SORT_KEYS_MAX; i++) { 4598 struct hist_field *hist_field; 4599 char *field_str, *field_name; 4600 const char *test_name; 4601 4602 sort_key = &hist_data->sort_keys[i]; 4603 4604 field_str = strsep(&fields_str, ","); 4605 if (!field_str) 4606 break; 4607 4608 if (!*field_str) { 4609 ret = -EINVAL; 4610 hist_err(tr, HIST_ERR_EMPTY_SORT_FIELD, errpos("sort=")); 4611 break; 4612 } 4613 4614 if ((i == TRACING_MAP_SORT_KEYS_MAX - 1) && fields_str) { 4615 hist_err(tr, HIST_ERR_TOO_MANY_SORT_FIELDS, errpos("sort=")); 4616 ret = -EINVAL; 4617 break; 4618 } 4619 4620 field_name = strsep(&field_str, "."); 4621 if (!field_name || !*field_name) { 4622 ret = -EINVAL; 4623 hist_err(tr, HIST_ERR_EMPTY_SORT_FIELD, errpos("sort=")); 4624 break; 4625 } 4626 4627 if (strcmp(field_name, "hitcount") == 0) { 4628 descending = is_descending(tr, field_str); 4629 if (descending < 0) { 4630 ret = descending; 4631 break; 4632 } 4633 sort_key->descending = descending; 4634 continue; 4635 } 4636 4637 for (j = 1, k = 1; j < hist_data->n_fields; j++) { 4638 unsigned int idx; 4639 4640 hist_field = hist_data->fields[j]; 4641 if (hist_field->flags & HIST_FIELD_FL_VAR) 4642 continue; 4643 4644 idx = k++; 4645 4646 test_name = hist_field_name(hist_field, 0); 4647 4648 if (strcmp(field_name, test_name) == 0) { 4649 sort_key->field_idx = idx; 4650 descending = is_descending(tr, field_str); 4651 if (descending < 0) { 4652 ret = descending; 4653 goto out; 4654 } 4655 sort_key->descending = descending; 4656 break; 4657 } 4658 } 4659 if (j == hist_data->n_fields) { 4660 ret = -EINVAL; 4661 hist_err(tr, HIST_ERR_INVALID_SORT_FIELD, errpos(field_name)); 4662 break; 4663 } 4664 } 4665 4666 hist_data->n_sort_keys = i; 4667 out: 4668 return ret; 4669 } 4670 4671 static void destroy_actions(struct hist_trigger_data *hist_data) 4672 { 4673 unsigned int i; 4674 4675 for (i = 0; i < hist_data->n_actions; i++) { 4676 struct action_data *data = hist_data->actions[i]; 4677 4678 if (data->handler == HANDLER_ONMATCH) 4679 onmatch_destroy(data); 4680 else if (data->handler == HANDLER_ONMAX || 4681 data->handler == HANDLER_ONCHANGE) 4682 track_data_destroy(hist_data, data); 4683 else 4684 kfree(data); 4685 } 4686 } 4687 4688 static int parse_actions(struct hist_trigger_data *hist_data) 4689 { 4690 struct trace_array *tr = hist_data->event_file->tr; 4691 struct action_data *data; 4692 unsigned int i; 4693 int ret = 0; 4694 char *str; 4695 int len; 4696 4697 for (i = 0; i < hist_data->attrs->n_actions; i++) { 4698 str = hist_data->attrs->action_str[i]; 4699 4700 if ((len = str_has_prefix(str, "onmatch("))) { 4701 char *action_str = str + len; 4702 4703 data = onmatch_parse(tr, action_str); 4704 if (IS_ERR(data)) { 4705 ret = PTR_ERR(data); 4706 break; 4707 } 4708 } else if ((len = str_has_prefix(str, "onmax("))) { 4709 char *action_str = str + len; 4710 4711 data = track_data_parse(hist_data, action_str, 4712 HANDLER_ONMAX); 4713 if (IS_ERR(data)) { 4714 ret = PTR_ERR(data); 4715 break; 4716 } 4717 } else if ((len = str_has_prefix(str, "onchange("))) { 4718 char *action_str = str + len; 4719 4720 data = track_data_parse(hist_data, action_str, 4721 HANDLER_ONCHANGE); 4722 if (IS_ERR(data)) { 4723 ret = PTR_ERR(data); 4724 break; 4725 } 4726 } else { 4727 ret = -EINVAL; 4728 break; 4729 } 4730 4731 hist_data->actions[hist_data->n_actions++] = data; 4732 } 4733 4734 return ret; 4735 } 4736 4737 static int create_actions(struct hist_trigger_data *hist_data) 4738 { 4739 struct action_data *data; 4740 unsigned int i; 4741 int ret = 0; 4742 4743 for (i = 0; i < hist_data->attrs->n_actions; i++) { 4744 data = hist_data->actions[i]; 4745 4746 if (data->handler == HANDLER_ONMATCH) { 4747 ret = onmatch_create(hist_data, data); 4748 if (ret) 4749 break; 4750 } else if (data->handler == HANDLER_ONMAX || 4751 data->handler == HANDLER_ONCHANGE) { 4752 ret = track_data_create(hist_data, data); 4753 if (ret) 4754 break; 4755 } else { 4756 ret = -EINVAL; 4757 break; 4758 } 4759 } 4760 4761 return ret; 4762 } 4763 4764 static void print_actions(struct seq_file *m, 4765 struct hist_trigger_data *hist_data, 4766 struct tracing_map_elt *elt) 4767 { 4768 unsigned int i; 4769 4770 for (i = 0; i < hist_data->n_actions; i++) { 4771 struct action_data *data = hist_data->actions[i]; 4772 4773 if (data->action == ACTION_SNAPSHOT) 4774 continue; 4775 4776 if (data->handler == HANDLER_ONMAX || 4777 data->handler == HANDLER_ONCHANGE) 4778 track_data_print(m, hist_data, elt, data); 4779 } 4780 } 4781 4782 static void print_action_spec(struct seq_file *m, 4783 struct hist_trigger_data *hist_data, 4784 struct action_data *data) 4785 { 4786 unsigned int i; 4787 4788 if (data->action == ACTION_SAVE) { 4789 for (i = 0; i < hist_data->n_save_vars; i++) { 4790 seq_printf(m, "%s", hist_data->save_vars[i]->var->var.name); 4791 if (i < hist_data->n_save_vars - 1) 4792 seq_puts(m, ","); 4793 } 4794 } else if (data->action == ACTION_TRACE) { 4795 if (data->use_trace_keyword) 4796 seq_printf(m, "%s", data->synth_event_name); 4797 for (i = 0; i < data->n_params; i++) { 4798 if (i || data->use_trace_keyword) 4799 seq_puts(m, ","); 4800 seq_printf(m, "%s", data->params[i]); 4801 } 4802 } 4803 } 4804 4805 static void print_track_data_spec(struct seq_file *m, 4806 struct hist_trigger_data *hist_data, 4807 struct action_data *data) 4808 { 4809 if (data->handler == HANDLER_ONMAX) 4810 seq_puts(m, ":onmax("); 4811 else if (data->handler == HANDLER_ONCHANGE) 4812 seq_puts(m, ":onchange("); 4813 seq_printf(m, "%s", data->track_data.var_str); 4814 seq_printf(m, ").%s(", data->action_name); 4815 4816 print_action_spec(m, hist_data, data); 4817 4818 seq_puts(m, ")"); 4819 } 4820 4821 static void print_onmatch_spec(struct seq_file *m, 4822 struct hist_trigger_data *hist_data, 4823 struct action_data *data) 4824 { 4825 seq_printf(m, ":onmatch(%s.%s).", data->match_data.event_system, 4826 data->match_data.event); 4827 4828 seq_printf(m, "%s(", data->action_name); 4829 4830 print_action_spec(m, hist_data, data); 4831 4832 seq_puts(m, ")"); 4833 } 4834 4835 static bool actions_match(struct hist_trigger_data *hist_data, 4836 struct hist_trigger_data *hist_data_test) 4837 { 4838 unsigned int i, j; 4839 4840 if (hist_data->n_actions != hist_data_test->n_actions) 4841 return false; 4842 4843 for (i = 0; i < hist_data->n_actions; i++) { 4844 struct action_data *data = hist_data->actions[i]; 4845 struct action_data *data_test = hist_data_test->actions[i]; 4846 char *action_name, *action_name_test; 4847 4848 if (data->handler != data_test->handler) 4849 return false; 4850 if (data->action != data_test->action) 4851 return false; 4852 4853 if (data->n_params != data_test->n_params) 4854 return false; 4855 4856 for (j = 0; j < data->n_params; j++) { 4857 if (strcmp(data->params[j], data_test->params[j]) != 0) 4858 return false; 4859 } 4860 4861 if (data->use_trace_keyword) 4862 action_name = data->synth_event_name; 4863 else 4864 action_name = data->action_name; 4865 4866 if (data_test->use_trace_keyword) 4867 action_name_test = data_test->synth_event_name; 4868 else 4869 action_name_test = data_test->action_name; 4870 4871 if (strcmp(action_name, action_name_test) != 0) 4872 return false; 4873 4874 if (data->handler == HANDLER_ONMATCH) { 4875 if (strcmp(data->match_data.event_system, 4876 data_test->match_data.event_system) != 0) 4877 return false; 4878 if (strcmp(data->match_data.event, 4879 data_test->match_data.event) != 0) 4880 return false; 4881 } else if (data->handler == HANDLER_ONMAX || 4882 data->handler == HANDLER_ONCHANGE) { 4883 if (strcmp(data->track_data.var_str, 4884 data_test->track_data.var_str) != 0) 4885 return false; 4886 } 4887 } 4888 4889 return true; 4890 } 4891 4892 4893 static void print_actions_spec(struct seq_file *m, 4894 struct hist_trigger_data *hist_data) 4895 { 4896 unsigned int i; 4897 4898 for (i = 0; i < hist_data->n_actions; i++) { 4899 struct action_data *data = hist_data->actions[i]; 4900 4901 if (data->handler == HANDLER_ONMATCH) 4902 print_onmatch_spec(m, hist_data, data); 4903 else if (data->handler == HANDLER_ONMAX || 4904 data->handler == HANDLER_ONCHANGE) 4905 print_track_data_spec(m, hist_data, data); 4906 } 4907 } 4908 4909 static void destroy_field_var_hists(struct hist_trigger_data *hist_data) 4910 { 4911 unsigned int i; 4912 4913 for (i = 0; i < hist_data->n_field_var_hists; i++) { 4914 kfree(hist_data->field_var_hists[i]->cmd); 4915 kfree(hist_data->field_var_hists[i]); 4916 } 4917 } 4918 4919 static void destroy_hist_data(struct hist_trigger_data *hist_data) 4920 { 4921 if (!hist_data) 4922 return; 4923 4924 destroy_hist_trigger_attrs(hist_data->attrs); 4925 destroy_hist_fields(hist_data); 4926 tracing_map_destroy(hist_data->map); 4927 4928 destroy_actions(hist_data); 4929 destroy_field_vars(hist_data); 4930 destroy_field_var_hists(hist_data); 4931 4932 kfree(hist_data); 4933 } 4934 4935 static int create_tracing_map_fields(struct hist_trigger_data *hist_data) 4936 { 4937 struct tracing_map *map = hist_data->map; 4938 struct ftrace_event_field *field; 4939 struct hist_field *hist_field; 4940 int i, idx = 0; 4941 4942 for_each_hist_field(i, hist_data) { 4943 hist_field = hist_data->fields[i]; 4944 if (hist_field->flags & HIST_FIELD_FL_KEY) { 4945 tracing_map_cmp_fn_t cmp_fn; 4946 4947 field = hist_field->field; 4948 4949 if (hist_field->flags & HIST_FIELD_FL_STACKTRACE) 4950 cmp_fn = tracing_map_cmp_none; 4951 else if (!field || hist_field->flags & HIST_FIELD_FL_CPU) 4952 cmp_fn = tracing_map_cmp_num(hist_field->size, 4953 hist_field->is_signed); 4954 else if (is_string_field(field)) 4955 cmp_fn = tracing_map_cmp_string; 4956 else 4957 cmp_fn = tracing_map_cmp_num(field->size, 4958 field->is_signed); 4959 idx = tracing_map_add_key_field(map, 4960 hist_field->offset, 4961 cmp_fn); 4962 } else if (!(hist_field->flags & HIST_FIELD_FL_VAR)) 4963 idx = tracing_map_add_sum_field(map); 4964 4965 if (idx < 0) 4966 return idx; 4967 4968 if (hist_field->flags & HIST_FIELD_FL_VAR) { 4969 idx = tracing_map_add_var(map); 4970 if (idx < 0) 4971 return idx; 4972 hist_field->var.idx = idx; 4973 hist_field->var.hist_data = hist_data; 4974 } 4975 } 4976 4977 return 0; 4978 } 4979 4980 static struct hist_trigger_data * 4981 create_hist_data(unsigned int map_bits, 4982 struct hist_trigger_attrs *attrs, 4983 struct trace_event_file *file, 4984 bool remove) 4985 { 4986 const struct tracing_map_ops *map_ops = NULL; 4987 struct hist_trigger_data *hist_data; 4988 int ret = 0; 4989 4990 hist_data = kzalloc(sizeof(*hist_data), GFP_KERNEL); 4991 if (!hist_data) 4992 return ERR_PTR(-ENOMEM); 4993 4994 hist_data->attrs = attrs; 4995 hist_data->remove = remove; 4996 hist_data->event_file = file; 4997 4998 ret = parse_actions(hist_data); 4999 if (ret) 5000 goto free; 5001 5002 ret = create_hist_fields(hist_data, file); 5003 if (ret) 5004 goto free; 5005 5006 ret = create_sort_keys(hist_data); 5007 if (ret) 5008 goto free; 5009 5010 map_ops = &hist_trigger_elt_data_ops; 5011 5012 hist_data->map = tracing_map_create(map_bits, hist_data->key_size, 5013 map_ops, hist_data); 5014 if (IS_ERR(hist_data->map)) { 5015 ret = PTR_ERR(hist_data->map); 5016 hist_data->map = NULL; 5017 goto free; 5018 } 5019 5020 ret = create_tracing_map_fields(hist_data); 5021 if (ret) 5022 goto free; 5023 out: 5024 return hist_data; 5025 free: 5026 hist_data->attrs = NULL; 5027 5028 destroy_hist_data(hist_data); 5029 5030 hist_data = ERR_PTR(ret); 5031 5032 goto out; 5033 } 5034 5035 static void hist_trigger_elt_update(struct hist_trigger_data *hist_data, 5036 struct tracing_map_elt *elt, 5037 struct trace_buffer *buffer, void *rec, 5038 struct ring_buffer_event *rbe, 5039 u64 *var_ref_vals) 5040 { 5041 struct hist_elt_data *elt_data; 5042 struct hist_field *hist_field; 5043 unsigned int i, var_idx; 5044 u64 hist_val; 5045 5046 elt_data = elt->private_data; 5047 elt_data->var_ref_vals = var_ref_vals; 5048 5049 for_each_hist_val_field(i, hist_data) { 5050 hist_field = hist_data->fields[i]; 5051 hist_val = hist_fn_call(hist_field, elt, buffer, rbe, rec); 5052 if (hist_field->flags & HIST_FIELD_FL_VAR) { 5053 var_idx = hist_field->var.idx; 5054 5055 if (hist_field->flags & HIST_FIELD_FL_STRING) { 5056 unsigned int str_start, var_str_idx, idx; 5057 char *str, *val_str; 5058 unsigned int size; 5059 5060 str_start = hist_data->n_field_var_str + 5061 hist_data->n_save_var_str; 5062 var_str_idx = hist_field->var_str_idx; 5063 idx = str_start + var_str_idx; 5064 5065 str = elt_data->field_var_str[idx]; 5066 val_str = (char *)(uintptr_t)hist_val; 5067 5068 size = min(hist_field->size, STR_VAR_LEN_MAX); 5069 strscpy(str, val_str, size); 5070 5071 hist_val = (u64)(uintptr_t)str; 5072 } 5073 tracing_map_set_var(elt, var_idx, hist_val); 5074 continue; 5075 } 5076 tracing_map_update_sum(elt, i, hist_val); 5077 } 5078 5079 for_each_hist_key_field(i, hist_data) { 5080 hist_field = hist_data->fields[i]; 5081 if (hist_field->flags & HIST_FIELD_FL_VAR) { 5082 hist_val = hist_fn_call(hist_field, elt, buffer, rbe, rec); 5083 var_idx = hist_field->var.idx; 5084 tracing_map_set_var(elt, var_idx, hist_val); 5085 } 5086 } 5087 5088 update_field_vars(hist_data, elt, buffer, rbe, rec); 5089 } 5090 5091 static inline void add_to_key(char *compound_key, void *key, 5092 struct hist_field *key_field, void *rec) 5093 { 5094 size_t size = key_field->size; 5095 5096 if (key_field->flags & HIST_FIELD_FL_STRING) { 5097 struct ftrace_event_field *field; 5098 5099 field = key_field->field; 5100 if (field->filter_type == FILTER_DYN_STRING || 5101 field->filter_type == FILTER_RDYN_STRING) 5102 size = *(u32 *)(rec + field->offset) >> 16; 5103 else if (field->filter_type == FILTER_STATIC_STRING) 5104 size = field->size; 5105 5106 /* ensure NULL-termination */ 5107 if (size > key_field->size - 1) 5108 size = key_field->size - 1; 5109 5110 strncpy(compound_key + key_field->offset, (char *)key, size); 5111 } else 5112 memcpy(compound_key + key_field->offset, key, size); 5113 } 5114 5115 static void 5116 hist_trigger_actions(struct hist_trigger_data *hist_data, 5117 struct tracing_map_elt *elt, 5118 struct trace_buffer *buffer, void *rec, 5119 struct ring_buffer_event *rbe, void *key, 5120 u64 *var_ref_vals) 5121 { 5122 struct action_data *data; 5123 unsigned int i; 5124 5125 for (i = 0; i < hist_data->n_actions; i++) { 5126 data = hist_data->actions[i]; 5127 data->fn(hist_data, elt, buffer, rec, rbe, key, data, var_ref_vals); 5128 } 5129 } 5130 5131 static void event_hist_trigger(struct event_trigger_data *data, 5132 struct trace_buffer *buffer, void *rec, 5133 struct ring_buffer_event *rbe) 5134 { 5135 struct hist_trigger_data *hist_data = data->private_data; 5136 bool use_compound_key = (hist_data->n_keys > 1); 5137 unsigned long entries[HIST_STACKTRACE_DEPTH]; 5138 u64 var_ref_vals[TRACING_MAP_VARS_MAX]; 5139 char compound_key[HIST_KEY_SIZE_MAX]; 5140 struct tracing_map_elt *elt = NULL; 5141 struct hist_field *key_field; 5142 u64 field_contents; 5143 void *key = NULL; 5144 unsigned int i; 5145 5146 memset(compound_key, 0, hist_data->key_size); 5147 5148 for_each_hist_key_field(i, hist_data) { 5149 key_field = hist_data->fields[i]; 5150 5151 if (key_field->flags & HIST_FIELD_FL_STACKTRACE) { 5152 memset(entries, 0, HIST_STACKTRACE_SIZE); 5153 stack_trace_save(entries, HIST_STACKTRACE_DEPTH, 5154 HIST_STACKTRACE_SKIP); 5155 key = entries; 5156 } else { 5157 field_contents = hist_fn_call(key_field, elt, buffer, rbe, rec); 5158 if (key_field->flags & HIST_FIELD_FL_STRING) { 5159 key = (void *)(unsigned long)field_contents; 5160 use_compound_key = true; 5161 } else 5162 key = (void *)&field_contents; 5163 } 5164 5165 if (use_compound_key) 5166 add_to_key(compound_key, key, key_field, rec); 5167 } 5168 5169 if (use_compound_key) 5170 key = compound_key; 5171 5172 if (hist_data->n_var_refs && 5173 !resolve_var_refs(hist_data, key, var_ref_vals, false)) 5174 return; 5175 5176 elt = tracing_map_insert(hist_data->map, key); 5177 if (!elt) 5178 return; 5179 5180 hist_trigger_elt_update(hist_data, elt, buffer, rec, rbe, var_ref_vals); 5181 5182 if (resolve_var_refs(hist_data, key, var_ref_vals, true)) 5183 hist_trigger_actions(hist_data, elt, buffer, rec, rbe, key, var_ref_vals); 5184 } 5185 5186 static void hist_trigger_stacktrace_print(struct seq_file *m, 5187 unsigned long *stacktrace_entries, 5188 unsigned int max_entries) 5189 { 5190 unsigned int spaces = 8; 5191 unsigned int i; 5192 5193 for (i = 0; i < max_entries; i++) { 5194 if (!stacktrace_entries[i]) 5195 return; 5196 5197 seq_printf(m, "%*c", 1 + spaces, ' '); 5198 seq_printf(m, "%pS\n", (void*)stacktrace_entries[i]); 5199 } 5200 } 5201 5202 static void hist_trigger_print_key(struct seq_file *m, 5203 struct hist_trigger_data *hist_data, 5204 void *key, 5205 struct tracing_map_elt *elt) 5206 { 5207 struct hist_field *key_field; 5208 bool multiline = false; 5209 const char *field_name; 5210 unsigned int i; 5211 u64 uval; 5212 5213 seq_puts(m, "{ "); 5214 5215 for_each_hist_key_field(i, hist_data) { 5216 key_field = hist_data->fields[i]; 5217 5218 if (i > hist_data->n_vals) 5219 seq_puts(m, ", "); 5220 5221 field_name = hist_field_name(key_field, 0); 5222 5223 if (key_field->flags & HIST_FIELD_FL_HEX) { 5224 uval = *(u64 *)(key + key_field->offset); 5225 seq_printf(m, "%s: %llx", field_name, uval); 5226 } else if (key_field->flags & HIST_FIELD_FL_SYM) { 5227 uval = *(u64 *)(key + key_field->offset); 5228 seq_printf(m, "%s: [%llx] %-45ps", field_name, 5229 uval, (void *)(uintptr_t)uval); 5230 } else if (key_field->flags & HIST_FIELD_FL_SYM_OFFSET) { 5231 uval = *(u64 *)(key + key_field->offset); 5232 seq_printf(m, "%s: [%llx] %-55pS", field_name, 5233 uval, (void *)(uintptr_t)uval); 5234 } else if (key_field->flags & HIST_FIELD_FL_EXECNAME) { 5235 struct hist_elt_data *elt_data = elt->private_data; 5236 char *comm; 5237 5238 if (WARN_ON_ONCE(!elt_data)) 5239 return; 5240 5241 comm = elt_data->comm; 5242 5243 uval = *(u64 *)(key + key_field->offset); 5244 seq_printf(m, "%s: %-16s[%10llu]", field_name, 5245 comm, uval); 5246 } else if (key_field->flags & HIST_FIELD_FL_SYSCALL) { 5247 const char *syscall_name; 5248 5249 uval = *(u64 *)(key + key_field->offset); 5250 syscall_name = get_syscall_name(uval); 5251 if (!syscall_name) 5252 syscall_name = "unknown_syscall"; 5253 5254 seq_printf(m, "%s: %-30s[%3llu]", field_name, 5255 syscall_name, uval); 5256 } else if (key_field->flags & HIST_FIELD_FL_STACKTRACE) { 5257 seq_puts(m, "stacktrace:\n"); 5258 hist_trigger_stacktrace_print(m, 5259 key + key_field->offset, 5260 HIST_STACKTRACE_DEPTH); 5261 multiline = true; 5262 } else if (key_field->flags & HIST_FIELD_FL_LOG2) { 5263 seq_printf(m, "%s: ~ 2^%-2llu", field_name, 5264 *(u64 *)(key + key_field->offset)); 5265 } else if (key_field->flags & HIST_FIELD_FL_BUCKET) { 5266 unsigned long buckets = key_field->buckets; 5267 uval = *(u64 *)(key + key_field->offset); 5268 seq_printf(m, "%s: ~ %llu-%llu", field_name, 5269 uval, uval + buckets -1); 5270 } else if (key_field->flags & HIST_FIELD_FL_STRING) { 5271 seq_printf(m, "%s: %-50s", field_name, 5272 (char *)(key + key_field->offset)); 5273 } else { 5274 uval = *(u64 *)(key + key_field->offset); 5275 seq_printf(m, "%s: %10llu", field_name, uval); 5276 } 5277 } 5278 5279 if (!multiline) 5280 seq_puts(m, " "); 5281 5282 seq_puts(m, "}"); 5283 } 5284 5285 static void hist_trigger_entry_print(struct seq_file *m, 5286 struct hist_trigger_data *hist_data, 5287 void *key, 5288 struct tracing_map_elt *elt) 5289 { 5290 const char *field_name; 5291 unsigned int i; 5292 5293 hist_trigger_print_key(m, hist_data, key, elt); 5294 5295 seq_printf(m, " hitcount: %10llu", 5296 tracing_map_read_sum(elt, HITCOUNT_IDX)); 5297 5298 for (i = 1; i < hist_data->n_vals; i++) { 5299 field_name = hist_field_name(hist_data->fields[i], 0); 5300 5301 if (hist_data->fields[i]->flags & HIST_FIELD_FL_VAR || 5302 hist_data->fields[i]->flags & HIST_FIELD_FL_EXPR) 5303 continue; 5304 5305 if (hist_data->fields[i]->flags & HIST_FIELD_FL_HEX) { 5306 seq_printf(m, " %s: %10llx", field_name, 5307 tracing_map_read_sum(elt, i)); 5308 } else { 5309 seq_printf(m, " %s: %10llu", field_name, 5310 tracing_map_read_sum(elt, i)); 5311 } 5312 } 5313 5314 print_actions(m, hist_data, elt); 5315 5316 seq_puts(m, "\n"); 5317 } 5318 5319 static int print_entries(struct seq_file *m, 5320 struct hist_trigger_data *hist_data) 5321 { 5322 struct tracing_map_sort_entry **sort_entries = NULL; 5323 struct tracing_map *map = hist_data->map; 5324 int i, n_entries; 5325 5326 n_entries = tracing_map_sort_entries(map, hist_data->sort_keys, 5327 hist_data->n_sort_keys, 5328 &sort_entries); 5329 if (n_entries < 0) 5330 return n_entries; 5331 5332 for (i = 0; i < n_entries; i++) 5333 hist_trigger_entry_print(m, hist_data, 5334 sort_entries[i]->key, 5335 sort_entries[i]->elt); 5336 5337 tracing_map_destroy_sort_entries(sort_entries, n_entries); 5338 5339 return n_entries; 5340 } 5341 5342 static void hist_trigger_show(struct seq_file *m, 5343 struct event_trigger_data *data, int n) 5344 { 5345 struct hist_trigger_data *hist_data; 5346 int n_entries; 5347 5348 if (n > 0) 5349 seq_puts(m, "\n\n"); 5350 5351 seq_puts(m, "# event histogram\n#\n# trigger info: "); 5352 data->ops->print(m, data); 5353 seq_puts(m, "#\n\n"); 5354 5355 hist_data = data->private_data; 5356 n_entries = print_entries(m, hist_data); 5357 if (n_entries < 0) 5358 n_entries = 0; 5359 5360 track_data_snapshot_print(m, hist_data); 5361 5362 seq_printf(m, "\nTotals:\n Hits: %llu\n Entries: %u\n Dropped: %llu\n", 5363 (u64)atomic64_read(&hist_data->map->hits), 5364 n_entries, (u64)atomic64_read(&hist_data->map->drops)); 5365 } 5366 5367 static int hist_show(struct seq_file *m, void *v) 5368 { 5369 struct event_trigger_data *data; 5370 struct trace_event_file *event_file; 5371 int n = 0, ret = 0; 5372 5373 mutex_lock(&event_mutex); 5374 5375 event_file = event_file_data(m->private); 5376 if (unlikely(!event_file)) { 5377 ret = -ENODEV; 5378 goto out_unlock; 5379 } 5380 5381 list_for_each_entry(data, &event_file->triggers, list) { 5382 if (data->cmd_ops->trigger_type == ETT_EVENT_HIST) 5383 hist_trigger_show(m, data, n++); 5384 } 5385 5386 out_unlock: 5387 mutex_unlock(&event_mutex); 5388 5389 return ret; 5390 } 5391 5392 static int event_hist_open(struct inode *inode, struct file *file) 5393 { 5394 int ret; 5395 5396 ret = security_locked_down(LOCKDOWN_TRACEFS); 5397 if (ret) 5398 return ret; 5399 5400 return single_open(file, hist_show, file); 5401 } 5402 5403 const struct file_operations event_hist_fops = { 5404 .open = event_hist_open, 5405 .read = seq_read, 5406 .llseek = seq_lseek, 5407 .release = single_release, 5408 }; 5409 5410 #ifdef CONFIG_HIST_TRIGGERS_DEBUG 5411 static void hist_field_debug_show_flags(struct seq_file *m, 5412 unsigned long flags) 5413 { 5414 seq_puts(m, " flags:\n"); 5415 5416 if (flags & HIST_FIELD_FL_KEY) 5417 seq_puts(m, " HIST_FIELD_FL_KEY\n"); 5418 else if (flags & HIST_FIELD_FL_HITCOUNT) 5419 seq_puts(m, " VAL: HIST_FIELD_FL_HITCOUNT\n"); 5420 else if (flags & HIST_FIELD_FL_VAR) 5421 seq_puts(m, " HIST_FIELD_FL_VAR\n"); 5422 else if (flags & HIST_FIELD_FL_VAR_REF) 5423 seq_puts(m, " HIST_FIELD_FL_VAR_REF\n"); 5424 else 5425 seq_puts(m, " VAL: normal u64 value\n"); 5426 5427 if (flags & HIST_FIELD_FL_ALIAS) 5428 seq_puts(m, " HIST_FIELD_FL_ALIAS\n"); 5429 else if (flags & HIST_FIELD_FL_CONST) 5430 seq_puts(m, " HIST_FIELD_FL_CONST\n"); 5431 } 5432 5433 static int hist_field_debug_show(struct seq_file *m, 5434 struct hist_field *field, unsigned long flags) 5435 { 5436 if ((field->flags & flags) != flags) { 5437 seq_printf(m, "ERROR: bad flags - %lx\n", flags); 5438 return -EINVAL; 5439 } 5440 5441 hist_field_debug_show_flags(m, field->flags); 5442 if (field->field) 5443 seq_printf(m, " ftrace_event_field name: %s\n", 5444 field->field->name); 5445 5446 if (field->flags & HIST_FIELD_FL_VAR) { 5447 seq_printf(m, " var.name: %s\n", field->var.name); 5448 seq_printf(m, " var.idx (into tracing_map_elt.vars[]): %u\n", 5449 field->var.idx); 5450 } 5451 5452 if (field->flags & HIST_FIELD_FL_CONST) 5453 seq_printf(m, " constant: %llu\n", field->constant); 5454 5455 if (field->flags & HIST_FIELD_FL_ALIAS) 5456 seq_printf(m, " var_ref_idx (into hist_data->var_refs[]): %u\n", 5457 field->var_ref_idx); 5458 5459 if (field->flags & HIST_FIELD_FL_VAR_REF) { 5460 seq_printf(m, " name: %s\n", field->name); 5461 seq_printf(m, " var.idx (into tracing_map_elt.vars[]): %u\n", 5462 field->var.idx); 5463 seq_printf(m, " var.hist_data: %p\n", field->var.hist_data); 5464 seq_printf(m, " var_ref_idx (into hist_data->var_refs[]): %u\n", 5465 field->var_ref_idx); 5466 if (field->system) 5467 seq_printf(m, " system: %s\n", field->system); 5468 if (field->event_name) 5469 seq_printf(m, " event_name: %s\n", field->event_name); 5470 } 5471 5472 seq_printf(m, " type: %s\n", field->type); 5473 seq_printf(m, " size: %u\n", field->size); 5474 seq_printf(m, " is_signed: %u\n", field->is_signed); 5475 5476 return 0; 5477 } 5478 5479 static int field_var_debug_show(struct seq_file *m, 5480 struct field_var *field_var, unsigned int i, 5481 bool save_vars) 5482 { 5483 const char *vars_name = save_vars ? "save_vars" : "field_vars"; 5484 struct hist_field *field; 5485 int ret = 0; 5486 5487 seq_printf(m, "\n hist_data->%s[%d]:\n", vars_name, i); 5488 5489 field = field_var->var; 5490 5491 seq_printf(m, "\n %s[%d].var:\n", vars_name, i); 5492 5493 hist_field_debug_show_flags(m, field->flags); 5494 seq_printf(m, " var.name: %s\n", field->var.name); 5495 seq_printf(m, " var.idx (into tracing_map_elt.vars[]): %u\n", 5496 field->var.idx); 5497 5498 field = field_var->val; 5499 5500 seq_printf(m, "\n %s[%d].val:\n", vars_name, i); 5501 if (field->field) 5502 seq_printf(m, " ftrace_event_field name: %s\n", 5503 field->field->name); 5504 else { 5505 ret = -EINVAL; 5506 goto out; 5507 } 5508 5509 seq_printf(m, " type: %s\n", field->type); 5510 seq_printf(m, " size: %u\n", field->size); 5511 seq_printf(m, " is_signed: %u\n", field->is_signed); 5512 out: 5513 return ret; 5514 } 5515 5516 static int hist_action_debug_show(struct seq_file *m, 5517 struct action_data *data, int i) 5518 { 5519 int ret = 0; 5520 5521 if (data->handler == HANDLER_ONMAX || 5522 data->handler == HANDLER_ONCHANGE) { 5523 seq_printf(m, "\n hist_data->actions[%d].track_data.var_ref:\n", i); 5524 ret = hist_field_debug_show(m, data->track_data.var_ref, 5525 HIST_FIELD_FL_VAR_REF); 5526 if (ret) 5527 goto out; 5528 5529 seq_printf(m, "\n hist_data->actions[%d].track_data.track_var:\n", i); 5530 ret = hist_field_debug_show(m, data->track_data.track_var, 5531 HIST_FIELD_FL_VAR); 5532 if (ret) 5533 goto out; 5534 } 5535 5536 if (data->handler == HANDLER_ONMATCH) { 5537 seq_printf(m, "\n hist_data->actions[%d].match_data.event_system: %s\n", 5538 i, data->match_data.event_system); 5539 seq_printf(m, " hist_data->actions[%d].match_data.event: %s\n", 5540 i, data->match_data.event); 5541 } 5542 out: 5543 return ret; 5544 } 5545 5546 static int hist_actions_debug_show(struct seq_file *m, 5547 struct hist_trigger_data *hist_data) 5548 { 5549 int i, ret = 0; 5550 5551 if (hist_data->n_actions) 5552 seq_puts(m, "\n action tracking variables (for onmax()/onchange()/onmatch()):\n"); 5553 5554 for (i = 0; i < hist_data->n_actions; i++) { 5555 struct action_data *action = hist_data->actions[i]; 5556 5557 ret = hist_action_debug_show(m, action, i); 5558 if (ret) 5559 goto out; 5560 } 5561 5562 if (hist_data->n_save_vars) 5563 seq_puts(m, "\n save action variables (save() params):\n"); 5564 5565 for (i = 0; i < hist_data->n_save_vars; i++) { 5566 ret = field_var_debug_show(m, hist_data->save_vars[i], i, true); 5567 if (ret) 5568 goto out; 5569 } 5570 out: 5571 return ret; 5572 } 5573 5574 static void hist_trigger_debug_show(struct seq_file *m, 5575 struct event_trigger_data *data, int n) 5576 { 5577 struct hist_trigger_data *hist_data; 5578 int i, ret; 5579 5580 if (n > 0) 5581 seq_puts(m, "\n\n"); 5582 5583 seq_puts(m, "# event histogram\n#\n# trigger info: "); 5584 data->ops->print(m, data); 5585 seq_puts(m, "#\n\n"); 5586 5587 hist_data = data->private_data; 5588 5589 seq_printf(m, "hist_data: %p\n\n", hist_data); 5590 seq_printf(m, " n_vals: %u\n", hist_data->n_vals); 5591 seq_printf(m, " n_keys: %u\n", hist_data->n_keys); 5592 seq_printf(m, " n_fields: %u\n", hist_data->n_fields); 5593 5594 seq_puts(m, "\n val fields:\n\n"); 5595 5596 seq_puts(m, " hist_data->fields[0]:\n"); 5597 ret = hist_field_debug_show(m, hist_data->fields[0], 5598 HIST_FIELD_FL_HITCOUNT); 5599 if (ret) 5600 return; 5601 5602 for (i = 1; i < hist_data->n_vals; i++) { 5603 seq_printf(m, "\n hist_data->fields[%d]:\n", i); 5604 ret = hist_field_debug_show(m, hist_data->fields[i], 0); 5605 if (ret) 5606 return; 5607 } 5608 5609 seq_puts(m, "\n key fields:\n"); 5610 5611 for (i = hist_data->n_vals; i < hist_data->n_fields; i++) { 5612 seq_printf(m, "\n hist_data->fields[%d]:\n", i); 5613 ret = hist_field_debug_show(m, hist_data->fields[i], 5614 HIST_FIELD_FL_KEY); 5615 if (ret) 5616 return; 5617 } 5618 5619 if (hist_data->n_var_refs) 5620 seq_puts(m, "\n variable reference fields:\n"); 5621 5622 for (i = 0; i < hist_data->n_var_refs; i++) { 5623 seq_printf(m, "\n hist_data->var_refs[%d]:\n", i); 5624 ret = hist_field_debug_show(m, hist_data->var_refs[i], 5625 HIST_FIELD_FL_VAR_REF); 5626 if (ret) 5627 return; 5628 } 5629 5630 if (hist_data->n_field_vars) 5631 seq_puts(m, "\n field variables:\n"); 5632 5633 for (i = 0; i < hist_data->n_field_vars; i++) { 5634 ret = field_var_debug_show(m, hist_data->field_vars[i], i, false); 5635 if (ret) 5636 return; 5637 } 5638 5639 ret = hist_actions_debug_show(m, hist_data); 5640 if (ret) 5641 return; 5642 } 5643 5644 static int hist_debug_show(struct seq_file *m, void *v) 5645 { 5646 struct event_trigger_data *data; 5647 struct trace_event_file *event_file; 5648 int n = 0, ret = 0; 5649 5650 mutex_lock(&event_mutex); 5651 5652 event_file = event_file_data(m->private); 5653 if (unlikely(!event_file)) { 5654 ret = -ENODEV; 5655 goto out_unlock; 5656 } 5657 5658 list_for_each_entry(data, &event_file->triggers, list) { 5659 if (data->cmd_ops->trigger_type == ETT_EVENT_HIST) 5660 hist_trigger_debug_show(m, data, n++); 5661 } 5662 5663 out_unlock: 5664 mutex_unlock(&event_mutex); 5665 5666 return ret; 5667 } 5668 5669 static int event_hist_debug_open(struct inode *inode, struct file *file) 5670 { 5671 int ret; 5672 5673 ret = security_locked_down(LOCKDOWN_TRACEFS); 5674 if (ret) 5675 return ret; 5676 5677 return single_open(file, hist_debug_show, file); 5678 } 5679 5680 const struct file_operations event_hist_debug_fops = { 5681 .open = event_hist_debug_open, 5682 .read = seq_read, 5683 .llseek = seq_lseek, 5684 .release = single_release, 5685 }; 5686 #endif 5687 5688 static void hist_field_print(struct seq_file *m, struct hist_field *hist_field) 5689 { 5690 const char *field_name = hist_field_name(hist_field, 0); 5691 5692 if (hist_field->var.name) 5693 seq_printf(m, "%s=", hist_field->var.name); 5694 5695 if (hist_field->flags & HIST_FIELD_FL_CPU) 5696 seq_puts(m, "common_cpu"); 5697 else if (hist_field->flags & HIST_FIELD_FL_CONST) 5698 seq_printf(m, "%llu", hist_field->constant); 5699 else if (field_name) { 5700 if (hist_field->flags & HIST_FIELD_FL_VAR_REF || 5701 hist_field->flags & HIST_FIELD_FL_ALIAS) 5702 seq_putc(m, '$'); 5703 seq_printf(m, "%s", field_name); 5704 } else if (hist_field->flags & HIST_FIELD_FL_TIMESTAMP) 5705 seq_puts(m, "common_timestamp"); 5706 5707 if (hist_field->flags) { 5708 if (!(hist_field->flags & HIST_FIELD_FL_VAR_REF) && 5709 !(hist_field->flags & HIST_FIELD_FL_EXPR)) { 5710 const char *flags = get_hist_field_flags(hist_field); 5711 5712 if (flags) 5713 seq_printf(m, ".%s", flags); 5714 } 5715 } 5716 if (hist_field->buckets) 5717 seq_printf(m, "=%ld", hist_field->buckets); 5718 } 5719 5720 static int event_hist_trigger_print(struct seq_file *m, 5721 struct event_trigger_data *data) 5722 { 5723 struct hist_trigger_data *hist_data = data->private_data; 5724 struct hist_field *field; 5725 bool have_var = false; 5726 unsigned int i; 5727 5728 seq_puts(m, HIST_PREFIX); 5729 5730 if (data->name) 5731 seq_printf(m, "%s:", data->name); 5732 5733 seq_puts(m, "keys="); 5734 5735 for_each_hist_key_field(i, hist_data) { 5736 field = hist_data->fields[i]; 5737 5738 if (i > hist_data->n_vals) 5739 seq_puts(m, ","); 5740 5741 if (field->flags & HIST_FIELD_FL_STACKTRACE) 5742 seq_puts(m, "stacktrace"); 5743 else 5744 hist_field_print(m, field); 5745 } 5746 5747 seq_puts(m, ":vals="); 5748 5749 for_each_hist_val_field(i, hist_data) { 5750 field = hist_data->fields[i]; 5751 if (field->flags & HIST_FIELD_FL_VAR) { 5752 have_var = true; 5753 continue; 5754 } 5755 5756 if (i == HITCOUNT_IDX) 5757 seq_puts(m, "hitcount"); 5758 else { 5759 seq_puts(m, ","); 5760 hist_field_print(m, field); 5761 } 5762 } 5763 5764 if (have_var) { 5765 unsigned int n = 0; 5766 5767 seq_puts(m, ":"); 5768 5769 for_each_hist_val_field(i, hist_data) { 5770 field = hist_data->fields[i]; 5771 5772 if (field->flags & HIST_FIELD_FL_VAR) { 5773 if (n++) 5774 seq_puts(m, ","); 5775 hist_field_print(m, field); 5776 } 5777 } 5778 } 5779 5780 seq_puts(m, ":sort="); 5781 5782 for (i = 0; i < hist_data->n_sort_keys; i++) { 5783 struct tracing_map_sort_key *sort_key; 5784 unsigned int idx, first_key_idx; 5785 5786 /* skip VAR vals */ 5787 first_key_idx = hist_data->n_vals - hist_data->n_vars; 5788 5789 sort_key = &hist_data->sort_keys[i]; 5790 idx = sort_key->field_idx; 5791 5792 if (WARN_ON(idx >= HIST_FIELDS_MAX)) 5793 return -EINVAL; 5794 5795 if (i > 0) 5796 seq_puts(m, ","); 5797 5798 if (idx == HITCOUNT_IDX) 5799 seq_puts(m, "hitcount"); 5800 else { 5801 if (idx >= first_key_idx) 5802 idx += hist_data->n_vars; 5803 hist_field_print(m, hist_data->fields[idx]); 5804 } 5805 5806 if (sort_key->descending) 5807 seq_puts(m, ".descending"); 5808 } 5809 seq_printf(m, ":size=%u", (1 << hist_data->map->map_bits)); 5810 if (hist_data->enable_timestamps) 5811 seq_printf(m, ":clock=%s", hist_data->attrs->clock); 5812 5813 print_actions_spec(m, hist_data); 5814 5815 if (data->filter_str) 5816 seq_printf(m, " if %s", data->filter_str); 5817 5818 if (data->paused) 5819 seq_puts(m, " [paused]"); 5820 else 5821 seq_puts(m, " [active]"); 5822 5823 seq_putc(m, '\n'); 5824 5825 return 0; 5826 } 5827 5828 static int event_hist_trigger_init(struct event_trigger_data *data) 5829 { 5830 struct hist_trigger_data *hist_data = data->private_data; 5831 5832 if (!data->ref && hist_data->attrs->name) 5833 save_named_trigger(hist_data->attrs->name, data); 5834 5835 data->ref++; 5836 5837 return 0; 5838 } 5839 5840 static void unregister_field_var_hists(struct hist_trigger_data *hist_data) 5841 { 5842 struct trace_event_file *file; 5843 unsigned int i; 5844 char *cmd; 5845 int ret; 5846 5847 for (i = 0; i < hist_data->n_field_var_hists; i++) { 5848 file = hist_data->field_var_hists[i]->hist_data->event_file; 5849 cmd = hist_data->field_var_hists[i]->cmd; 5850 ret = event_hist_trigger_parse(&trigger_hist_cmd, file, 5851 "!hist", "hist", cmd); 5852 WARN_ON_ONCE(ret < 0); 5853 } 5854 } 5855 5856 static void event_hist_trigger_free(struct event_trigger_data *data) 5857 { 5858 struct hist_trigger_data *hist_data = data->private_data; 5859 5860 if (WARN_ON_ONCE(data->ref <= 0)) 5861 return; 5862 5863 data->ref--; 5864 if (!data->ref) { 5865 if (data->name) 5866 del_named_trigger(data); 5867 5868 trigger_data_free(data); 5869 5870 remove_hist_vars(hist_data); 5871 5872 unregister_field_var_hists(hist_data); 5873 5874 destroy_hist_data(hist_data); 5875 } 5876 } 5877 5878 static struct event_trigger_ops event_hist_trigger_ops = { 5879 .trigger = event_hist_trigger, 5880 .print = event_hist_trigger_print, 5881 .init = event_hist_trigger_init, 5882 .free = event_hist_trigger_free, 5883 }; 5884 5885 static int event_hist_trigger_named_init(struct event_trigger_data *data) 5886 { 5887 data->ref++; 5888 5889 save_named_trigger(data->named_data->name, data); 5890 5891 event_hist_trigger_init(data->named_data); 5892 5893 return 0; 5894 } 5895 5896 static void event_hist_trigger_named_free(struct event_trigger_data *data) 5897 { 5898 if (WARN_ON_ONCE(data->ref <= 0)) 5899 return; 5900 5901 event_hist_trigger_free(data->named_data); 5902 5903 data->ref--; 5904 if (!data->ref) { 5905 del_named_trigger(data); 5906 trigger_data_free(data); 5907 } 5908 } 5909 5910 static struct event_trigger_ops event_hist_trigger_named_ops = { 5911 .trigger = event_hist_trigger, 5912 .print = event_hist_trigger_print, 5913 .init = event_hist_trigger_named_init, 5914 .free = event_hist_trigger_named_free, 5915 }; 5916 5917 static struct event_trigger_ops *event_hist_get_trigger_ops(char *cmd, 5918 char *param) 5919 { 5920 return &event_hist_trigger_ops; 5921 } 5922 5923 static void hist_clear(struct event_trigger_data *data) 5924 { 5925 struct hist_trigger_data *hist_data = data->private_data; 5926 5927 if (data->name) 5928 pause_named_trigger(data); 5929 5930 tracepoint_synchronize_unregister(); 5931 5932 tracing_map_clear(hist_data->map); 5933 5934 if (data->name) 5935 unpause_named_trigger(data); 5936 } 5937 5938 static bool compatible_field(struct ftrace_event_field *field, 5939 struct ftrace_event_field *test_field) 5940 { 5941 if (field == test_field) 5942 return true; 5943 if (field == NULL || test_field == NULL) 5944 return false; 5945 if (strcmp(field->name, test_field->name) != 0) 5946 return false; 5947 if (strcmp(field->type, test_field->type) != 0) 5948 return false; 5949 if (field->size != test_field->size) 5950 return false; 5951 if (field->is_signed != test_field->is_signed) 5952 return false; 5953 5954 return true; 5955 } 5956 5957 static bool hist_trigger_match(struct event_trigger_data *data, 5958 struct event_trigger_data *data_test, 5959 struct event_trigger_data *named_data, 5960 bool ignore_filter) 5961 { 5962 struct tracing_map_sort_key *sort_key, *sort_key_test; 5963 struct hist_trigger_data *hist_data, *hist_data_test; 5964 struct hist_field *key_field, *key_field_test; 5965 unsigned int i; 5966 5967 if (named_data && (named_data != data_test) && 5968 (named_data != data_test->named_data)) 5969 return false; 5970 5971 if (!named_data && is_named_trigger(data_test)) 5972 return false; 5973 5974 hist_data = data->private_data; 5975 hist_data_test = data_test->private_data; 5976 5977 if (hist_data->n_vals != hist_data_test->n_vals || 5978 hist_data->n_fields != hist_data_test->n_fields || 5979 hist_data->n_sort_keys != hist_data_test->n_sort_keys) 5980 return false; 5981 5982 if (!ignore_filter) { 5983 if ((data->filter_str && !data_test->filter_str) || 5984 (!data->filter_str && data_test->filter_str)) 5985 return false; 5986 } 5987 5988 for_each_hist_field(i, hist_data) { 5989 key_field = hist_data->fields[i]; 5990 key_field_test = hist_data_test->fields[i]; 5991 5992 if (key_field->flags != key_field_test->flags) 5993 return false; 5994 if (!compatible_field(key_field->field, key_field_test->field)) 5995 return false; 5996 if (key_field->offset != key_field_test->offset) 5997 return false; 5998 if (key_field->size != key_field_test->size) 5999 return false; 6000 if (key_field->is_signed != key_field_test->is_signed) 6001 return false; 6002 if (!!key_field->var.name != !!key_field_test->var.name) 6003 return false; 6004 if (key_field->var.name && 6005 strcmp(key_field->var.name, key_field_test->var.name) != 0) 6006 return false; 6007 } 6008 6009 for (i = 0; i < hist_data->n_sort_keys; i++) { 6010 sort_key = &hist_data->sort_keys[i]; 6011 sort_key_test = &hist_data_test->sort_keys[i]; 6012 6013 if (sort_key->field_idx != sort_key_test->field_idx || 6014 sort_key->descending != sort_key_test->descending) 6015 return false; 6016 } 6017 6018 if (!ignore_filter && data->filter_str && 6019 (strcmp(data->filter_str, data_test->filter_str) != 0)) 6020 return false; 6021 6022 if (!actions_match(hist_data, hist_data_test)) 6023 return false; 6024 6025 return true; 6026 } 6027 6028 static bool existing_hist_update_only(char *glob, 6029 struct event_trigger_data *data, 6030 struct trace_event_file *file) 6031 { 6032 struct hist_trigger_data *hist_data = data->private_data; 6033 struct event_trigger_data *test, *named_data = NULL; 6034 bool updated = false; 6035 6036 if (!hist_data->attrs->pause && !hist_data->attrs->cont && 6037 !hist_data->attrs->clear) 6038 goto out; 6039 6040 if (hist_data->attrs->name) { 6041 named_data = find_named_trigger(hist_data->attrs->name); 6042 if (named_data) { 6043 if (!hist_trigger_match(data, named_data, named_data, 6044 true)) 6045 goto out; 6046 } 6047 } 6048 6049 if (hist_data->attrs->name && !named_data) 6050 goto out; 6051 6052 list_for_each_entry(test, &file->triggers, list) { 6053 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 6054 if (!hist_trigger_match(data, test, named_data, false)) 6055 continue; 6056 if (hist_data->attrs->pause) 6057 test->paused = true; 6058 else if (hist_data->attrs->cont) 6059 test->paused = false; 6060 else if (hist_data->attrs->clear) 6061 hist_clear(test); 6062 updated = true; 6063 goto out; 6064 } 6065 } 6066 out: 6067 return updated; 6068 } 6069 6070 static int hist_register_trigger(char *glob, 6071 struct event_trigger_data *data, 6072 struct trace_event_file *file) 6073 { 6074 struct hist_trigger_data *hist_data = data->private_data; 6075 struct event_trigger_data *test, *named_data = NULL; 6076 struct trace_array *tr = file->tr; 6077 int ret = 0; 6078 6079 if (hist_data->attrs->name) { 6080 named_data = find_named_trigger(hist_data->attrs->name); 6081 if (named_data) { 6082 if (!hist_trigger_match(data, named_data, named_data, 6083 true)) { 6084 hist_err(tr, HIST_ERR_NAMED_MISMATCH, errpos(hist_data->attrs->name)); 6085 ret = -EINVAL; 6086 goto out; 6087 } 6088 } 6089 } 6090 6091 if (hist_data->attrs->name && !named_data) 6092 goto new; 6093 6094 lockdep_assert_held(&event_mutex); 6095 6096 list_for_each_entry(test, &file->triggers, list) { 6097 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 6098 if (hist_trigger_match(data, test, named_data, false)) { 6099 hist_err(tr, HIST_ERR_TRIGGER_EEXIST, 0); 6100 ret = -EEXIST; 6101 goto out; 6102 } 6103 } 6104 } 6105 new: 6106 if (hist_data->attrs->cont || hist_data->attrs->clear) { 6107 hist_err(tr, HIST_ERR_TRIGGER_ENOENT_CLEAR, 0); 6108 ret = -ENOENT; 6109 goto out; 6110 } 6111 6112 if (hist_data->attrs->pause) 6113 data->paused = true; 6114 6115 if (named_data) { 6116 data->private_data = named_data->private_data; 6117 set_named_trigger_data(data, named_data); 6118 data->ops = &event_hist_trigger_named_ops; 6119 } 6120 6121 if (data->ops->init) { 6122 ret = data->ops->init(data); 6123 if (ret < 0) 6124 goto out; 6125 } 6126 6127 if (hist_data->enable_timestamps) { 6128 char *clock = hist_data->attrs->clock; 6129 6130 ret = tracing_set_clock(file->tr, hist_data->attrs->clock); 6131 if (ret) { 6132 hist_err(tr, HIST_ERR_SET_CLOCK_FAIL, errpos(clock)); 6133 goto out; 6134 } 6135 6136 tracing_set_filter_buffering(file->tr, true); 6137 } 6138 6139 if (named_data) 6140 destroy_hist_data(hist_data); 6141 out: 6142 return ret; 6143 } 6144 6145 static int hist_trigger_enable(struct event_trigger_data *data, 6146 struct trace_event_file *file) 6147 { 6148 int ret = 0; 6149 6150 list_add_tail_rcu(&data->list, &file->triggers); 6151 6152 update_cond_flag(file); 6153 6154 if (trace_event_trigger_enable_disable(file, 1) < 0) { 6155 list_del_rcu(&data->list); 6156 update_cond_flag(file); 6157 ret--; 6158 } 6159 6160 return ret; 6161 } 6162 6163 static bool have_hist_trigger_match(struct event_trigger_data *data, 6164 struct trace_event_file *file) 6165 { 6166 struct hist_trigger_data *hist_data = data->private_data; 6167 struct event_trigger_data *test, *named_data = NULL; 6168 bool match = false; 6169 6170 lockdep_assert_held(&event_mutex); 6171 6172 if (hist_data->attrs->name) 6173 named_data = find_named_trigger(hist_data->attrs->name); 6174 6175 list_for_each_entry(test, &file->triggers, list) { 6176 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 6177 if (hist_trigger_match(data, test, named_data, false)) { 6178 match = true; 6179 break; 6180 } 6181 } 6182 } 6183 6184 return match; 6185 } 6186 6187 static bool hist_trigger_check_refs(struct event_trigger_data *data, 6188 struct trace_event_file *file) 6189 { 6190 struct hist_trigger_data *hist_data = data->private_data; 6191 struct event_trigger_data *test, *named_data = NULL; 6192 6193 lockdep_assert_held(&event_mutex); 6194 6195 if (hist_data->attrs->name) 6196 named_data = find_named_trigger(hist_data->attrs->name); 6197 6198 list_for_each_entry(test, &file->triggers, list) { 6199 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 6200 if (!hist_trigger_match(data, test, named_data, false)) 6201 continue; 6202 hist_data = test->private_data; 6203 if (check_var_refs(hist_data)) 6204 return true; 6205 break; 6206 } 6207 } 6208 6209 return false; 6210 } 6211 6212 static void hist_unregister_trigger(char *glob, 6213 struct event_trigger_data *data, 6214 struct trace_event_file *file) 6215 { 6216 struct event_trigger_data *test = NULL, *iter, *named_data = NULL; 6217 struct hist_trigger_data *hist_data = data->private_data; 6218 6219 lockdep_assert_held(&event_mutex); 6220 6221 if (hist_data->attrs->name) 6222 named_data = find_named_trigger(hist_data->attrs->name); 6223 6224 list_for_each_entry(iter, &file->triggers, list) { 6225 if (iter->cmd_ops->trigger_type == ETT_EVENT_HIST) { 6226 if (!hist_trigger_match(data, iter, named_data, false)) 6227 continue; 6228 test = iter; 6229 list_del_rcu(&test->list); 6230 trace_event_trigger_enable_disable(file, 0); 6231 update_cond_flag(file); 6232 break; 6233 } 6234 } 6235 6236 if (test && test->ops->free) 6237 test->ops->free(test); 6238 6239 if (hist_data->enable_timestamps) { 6240 if (!hist_data->remove || test) 6241 tracing_set_filter_buffering(file->tr, false); 6242 } 6243 } 6244 6245 static bool hist_file_check_refs(struct trace_event_file *file) 6246 { 6247 struct hist_trigger_data *hist_data; 6248 struct event_trigger_data *test; 6249 6250 lockdep_assert_held(&event_mutex); 6251 6252 list_for_each_entry(test, &file->triggers, list) { 6253 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 6254 hist_data = test->private_data; 6255 if (check_var_refs(hist_data)) 6256 return true; 6257 } 6258 } 6259 6260 return false; 6261 } 6262 6263 static void hist_unreg_all(struct trace_event_file *file) 6264 { 6265 struct event_trigger_data *test, *n; 6266 struct hist_trigger_data *hist_data; 6267 struct synth_event *se; 6268 const char *se_name; 6269 6270 lockdep_assert_held(&event_mutex); 6271 6272 if (hist_file_check_refs(file)) 6273 return; 6274 6275 list_for_each_entry_safe(test, n, &file->triggers, list) { 6276 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 6277 hist_data = test->private_data; 6278 list_del_rcu(&test->list); 6279 trace_event_trigger_enable_disable(file, 0); 6280 6281 se_name = trace_event_name(file->event_call); 6282 se = find_synth_event(se_name); 6283 if (se) 6284 se->ref--; 6285 6286 update_cond_flag(file); 6287 if (hist_data->enable_timestamps) 6288 tracing_set_filter_buffering(file->tr, false); 6289 if (test->ops->free) 6290 test->ops->free(test); 6291 } 6292 } 6293 } 6294 6295 static int event_hist_trigger_parse(struct event_command *cmd_ops, 6296 struct trace_event_file *file, 6297 char *glob, char *cmd, 6298 char *param_and_filter) 6299 { 6300 unsigned int hist_trigger_bits = TRACING_MAP_BITS_DEFAULT; 6301 struct event_trigger_data *trigger_data; 6302 struct hist_trigger_attrs *attrs; 6303 struct hist_trigger_data *hist_data; 6304 char *param, *filter, *p, *start; 6305 struct synth_event *se; 6306 const char *se_name; 6307 bool remove; 6308 int ret = 0; 6309 6310 lockdep_assert_held(&event_mutex); 6311 6312 if (WARN_ON(!glob)) 6313 return -EINVAL; 6314 6315 if (glob[0]) { 6316 hist_err_clear(); 6317 last_cmd_set(file, param_and_filter); 6318 } 6319 6320 remove = event_trigger_check_remove(glob); 6321 6322 if (event_trigger_empty_param(param_and_filter)) 6323 return -EINVAL; 6324 6325 /* 6326 * separate the trigger from the filter (k:v [if filter]) 6327 * allowing for whitespace in the trigger 6328 */ 6329 p = param = param_and_filter; 6330 do { 6331 p = strstr(p, "if"); 6332 if (!p) 6333 break; 6334 if (p == param_and_filter) 6335 return -EINVAL; 6336 if (*(p - 1) != ' ' && *(p - 1) != '\t') { 6337 p++; 6338 continue; 6339 } 6340 if (p >= param_and_filter + strlen(param_and_filter) - (sizeof("if") - 1) - 1) 6341 return -EINVAL; 6342 if (*(p + sizeof("if") - 1) != ' ' && *(p + sizeof("if") - 1) != '\t') { 6343 p++; 6344 continue; 6345 } 6346 break; 6347 } while (1); 6348 6349 if (!p) 6350 filter = NULL; 6351 else { 6352 *(p - 1) = '\0'; 6353 filter = strstrip(p); 6354 param = strstrip(param); 6355 } 6356 6357 /* 6358 * To simplify arithmetic expression parsing, replace occurrences of 6359 * '.sym-offset' modifier with '.symXoffset' 6360 */ 6361 start = strstr(param, ".sym-offset"); 6362 while (start) { 6363 *(start + 4) = 'X'; 6364 start = strstr(start + 11, ".sym-offset"); 6365 } 6366 6367 attrs = parse_hist_trigger_attrs(file->tr, param); 6368 if (IS_ERR(attrs)) 6369 return PTR_ERR(attrs); 6370 6371 if (attrs->map_bits) 6372 hist_trigger_bits = attrs->map_bits; 6373 6374 hist_data = create_hist_data(hist_trigger_bits, attrs, file, remove); 6375 if (IS_ERR(hist_data)) { 6376 destroy_hist_trigger_attrs(attrs); 6377 return PTR_ERR(hist_data); 6378 } 6379 6380 trigger_data = event_trigger_alloc(cmd_ops, cmd, param, hist_data); 6381 if (!trigger_data) { 6382 ret = -ENOMEM; 6383 goto out_free; 6384 } 6385 6386 ret = event_trigger_set_filter(cmd_ops, file, filter, trigger_data); 6387 if (ret < 0) 6388 goto out_free; 6389 6390 if (remove) { 6391 if (!have_hist_trigger_match(trigger_data, file)) 6392 goto out_free; 6393 6394 if (hist_trigger_check_refs(trigger_data, file)) { 6395 ret = -EBUSY; 6396 goto out_free; 6397 } 6398 6399 event_trigger_unregister(cmd_ops, file, glob+1, trigger_data); 6400 se_name = trace_event_name(file->event_call); 6401 se = find_synth_event(se_name); 6402 if (se) 6403 se->ref--; 6404 ret = 0; 6405 goto out_free; 6406 } 6407 6408 if (existing_hist_update_only(glob, trigger_data, file)) 6409 goto out_free; 6410 6411 ret = event_trigger_register(cmd_ops, file, glob, trigger_data); 6412 if (ret < 0) 6413 goto out_free; 6414 6415 if (get_named_trigger_data(trigger_data)) 6416 goto enable; 6417 6418 if (has_hist_vars(hist_data)) 6419 save_hist_vars(hist_data); 6420 6421 ret = create_actions(hist_data); 6422 if (ret) 6423 goto out_unreg; 6424 6425 ret = tracing_map_init(hist_data->map); 6426 if (ret) 6427 goto out_unreg; 6428 enable: 6429 ret = hist_trigger_enable(trigger_data, file); 6430 if (ret) 6431 goto out_unreg; 6432 6433 se_name = trace_event_name(file->event_call); 6434 se = find_synth_event(se_name); 6435 if (se) 6436 se->ref++; 6437 out: 6438 if (ret == 0) 6439 hist_err_clear(); 6440 6441 return ret; 6442 out_unreg: 6443 event_trigger_unregister(cmd_ops, file, glob+1, trigger_data); 6444 out_free: 6445 event_trigger_reset_filter(cmd_ops, trigger_data); 6446 6447 remove_hist_vars(hist_data); 6448 6449 kfree(trigger_data); 6450 6451 destroy_hist_data(hist_data); 6452 goto out; 6453 } 6454 6455 static struct event_command trigger_hist_cmd = { 6456 .name = "hist", 6457 .trigger_type = ETT_EVENT_HIST, 6458 .flags = EVENT_CMD_FL_NEEDS_REC, 6459 .parse = event_hist_trigger_parse, 6460 .reg = hist_register_trigger, 6461 .unreg = hist_unregister_trigger, 6462 .unreg_all = hist_unreg_all, 6463 .get_trigger_ops = event_hist_get_trigger_ops, 6464 .set_filter = set_trigger_filter, 6465 }; 6466 6467 __init int register_trigger_hist_cmd(void) 6468 { 6469 int ret; 6470 6471 ret = register_event_command(&trigger_hist_cmd); 6472 WARN_ON(ret < 0); 6473 6474 return ret; 6475 } 6476 6477 static void 6478 hist_enable_trigger(struct event_trigger_data *data, 6479 struct trace_buffer *buffer, void *rec, 6480 struct ring_buffer_event *event) 6481 { 6482 struct enable_trigger_data *enable_data = data->private_data; 6483 struct event_trigger_data *test; 6484 6485 list_for_each_entry_rcu(test, &enable_data->file->triggers, list, 6486 lockdep_is_held(&event_mutex)) { 6487 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { 6488 if (enable_data->enable) 6489 test->paused = false; 6490 else 6491 test->paused = true; 6492 } 6493 } 6494 } 6495 6496 static void 6497 hist_enable_count_trigger(struct event_trigger_data *data, 6498 struct trace_buffer *buffer, void *rec, 6499 struct ring_buffer_event *event) 6500 { 6501 if (!data->count) 6502 return; 6503 6504 if (data->count != -1) 6505 (data->count)--; 6506 6507 hist_enable_trigger(data, buffer, rec, event); 6508 } 6509 6510 static struct event_trigger_ops hist_enable_trigger_ops = { 6511 .trigger = hist_enable_trigger, 6512 .print = event_enable_trigger_print, 6513 .init = event_trigger_init, 6514 .free = event_enable_trigger_free, 6515 }; 6516 6517 static struct event_trigger_ops hist_enable_count_trigger_ops = { 6518 .trigger = hist_enable_count_trigger, 6519 .print = event_enable_trigger_print, 6520 .init = event_trigger_init, 6521 .free = event_enable_trigger_free, 6522 }; 6523 6524 static struct event_trigger_ops hist_disable_trigger_ops = { 6525 .trigger = hist_enable_trigger, 6526 .print = event_enable_trigger_print, 6527 .init = event_trigger_init, 6528 .free = event_enable_trigger_free, 6529 }; 6530 6531 static struct event_trigger_ops hist_disable_count_trigger_ops = { 6532 .trigger = hist_enable_count_trigger, 6533 .print = event_enable_trigger_print, 6534 .init = event_trigger_init, 6535 .free = event_enable_trigger_free, 6536 }; 6537 6538 static struct event_trigger_ops * 6539 hist_enable_get_trigger_ops(char *cmd, char *param) 6540 { 6541 struct event_trigger_ops *ops; 6542 bool enable; 6543 6544 enable = (strcmp(cmd, ENABLE_HIST_STR) == 0); 6545 6546 if (enable) 6547 ops = param ? &hist_enable_count_trigger_ops : 6548 &hist_enable_trigger_ops; 6549 else 6550 ops = param ? &hist_disable_count_trigger_ops : 6551 &hist_disable_trigger_ops; 6552 6553 return ops; 6554 } 6555 6556 static void hist_enable_unreg_all(struct trace_event_file *file) 6557 { 6558 struct event_trigger_data *test, *n; 6559 6560 list_for_each_entry_safe(test, n, &file->triggers, list) { 6561 if (test->cmd_ops->trigger_type == ETT_HIST_ENABLE) { 6562 list_del_rcu(&test->list); 6563 update_cond_flag(file); 6564 trace_event_trigger_enable_disable(file, 0); 6565 if (test->ops->free) 6566 test->ops->free(test); 6567 } 6568 } 6569 } 6570 6571 static struct event_command trigger_hist_enable_cmd = { 6572 .name = ENABLE_HIST_STR, 6573 .trigger_type = ETT_HIST_ENABLE, 6574 .parse = event_enable_trigger_parse, 6575 .reg = event_enable_register_trigger, 6576 .unreg = event_enable_unregister_trigger, 6577 .unreg_all = hist_enable_unreg_all, 6578 .get_trigger_ops = hist_enable_get_trigger_ops, 6579 .set_filter = set_trigger_filter, 6580 }; 6581 6582 static struct event_command trigger_hist_disable_cmd = { 6583 .name = DISABLE_HIST_STR, 6584 .trigger_type = ETT_HIST_ENABLE, 6585 .parse = event_enable_trigger_parse, 6586 .reg = event_enable_register_trigger, 6587 .unreg = event_enable_unregister_trigger, 6588 .unreg_all = hist_enable_unreg_all, 6589 .get_trigger_ops = hist_enable_get_trigger_ops, 6590 .set_filter = set_trigger_filter, 6591 }; 6592 6593 static __init void unregister_trigger_hist_enable_disable_cmds(void) 6594 { 6595 unregister_event_command(&trigger_hist_enable_cmd); 6596 unregister_event_command(&trigger_hist_disable_cmd); 6597 } 6598 6599 __init int register_trigger_hist_enable_disable_cmds(void) 6600 { 6601 int ret; 6602 6603 ret = register_event_command(&trigger_hist_enable_cmd); 6604 if (WARN_ON(ret < 0)) 6605 return ret; 6606 ret = register_event_command(&trigger_hist_disable_cmd); 6607 if (WARN_ON(ret < 0)) 6608 unregister_trigger_hist_enable_disable_cmds(); 6609 6610 return ret; 6611 } 6612