1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * trace_events_trigger - trace event triggers 4 * 5 * Copyright (C) 2013 Tom Zanussi <tom.zanussi@linux.intel.com> 6 */ 7 8 #include <linux/security.h> 9 #include <linux/module.h> 10 #include <linux/ctype.h> 11 #include <linux/mutex.h> 12 #include <linux/slab.h> 13 #include <linux/rculist.h> 14 15 #include "trace.h" 16 17 static LIST_HEAD(trigger_commands); 18 static DEFINE_MUTEX(trigger_cmd_mutex); 19 20 void trigger_data_free(struct event_trigger_data *data) 21 { 22 if (data->cmd_ops->set_filter) 23 data->cmd_ops->set_filter(NULL, data, NULL); 24 25 /* make sure current triggers exit before free */ 26 tracepoint_synchronize_unregister(); 27 28 kfree(data); 29 } 30 31 /** 32 * event_triggers_call - Call triggers associated with a trace event 33 * @file: The trace_event_file associated with the event 34 * @rec: The trace entry for the event, NULL for unconditional invocation 35 * 36 * For each trigger associated with an event, invoke the trigger 37 * function registered with the associated trigger command. If rec is 38 * non-NULL, it means that the trigger requires further processing and 39 * shouldn't be unconditionally invoked. If rec is non-NULL and the 40 * trigger has a filter associated with it, rec will checked against 41 * the filter and if the record matches the trigger will be invoked. 42 * If the trigger is a 'post_trigger', meaning it shouldn't be invoked 43 * in any case until the current event is written, the trigger 44 * function isn't invoked but the bit associated with the deferred 45 * trigger is set in the return value. 46 * 47 * Returns an enum event_trigger_type value containing a set bit for 48 * any trigger that should be deferred, ETT_NONE if nothing to defer. 49 * 50 * Called from tracepoint handlers (with rcu_read_lock_sched() held). 51 * 52 * Return: an enum event_trigger_type value containing a set bit for 53 * any trigger that should be deferred, ETT_NONE if nothing to defer. 54 */ 55 enum event_trigger_type 56 event_triggers_call(struct trace_event_file *file, 57 struct trace_buffer *buffer, void *rec, 58 struct ring_buffer_event *event) 59 { 60 struct event_trigger_data *data; 61 enum event_trigger_type tt = ETT_NONE; 62 struct event_filter *filter; 63 64 if (list_empty(&file->triggers)) 65 return tt; 66 67 list_for_each_entry_rcu(data, &file->triggers, list) { 68 if (data->paused) 69 continue; 70 if (!rec) { 71 data->ops->func(data, buffer, rec, event); 72 continue; 73 } 74 filter = rcu_dereference_sched(data->filter); 75 if (filter && !filter_match_preds(filter, rec)) 76 continue; 77 if (event_command_post_trigger(data->cmd_ops)) { 78 tt |= data->cmd_ops->trigger_type; 79 continue; 80 } 81 data->ops->func(data, buffer, rec, event); 82 } 83 return tt; 84 } 85 EXPORT_SYMBOL_GPL(event_triggers_call); 86 87 /** 88 * event_triggers_post_call - Call 'post_triggers' for a trace event 89 * @file: The trace_event_file associated with the event 90 * @tt: enum event_trigger_type containing a set bit for each trigger to invoke 91 * 92 * For each trigger associated with an event, invoke the trigger 93 * function registered with the associated trigger command, if the 94 * corresponding bit is set in the tt enum passed into this function. 95 * See @event_triggers_call for details on how those bits are set. 96 * 97 * Called from tracepoint handlers (with rcu_read_lock_sched() held). 98 */ 99 void 100 event_triggers_post_call(struct trace_event_file *file, 101 enum event_trigger_type tt) 102 { 103 struct event_trigger_data *data; 104 105 list_for_each_entry_rcu(data, &file->triggers, list) { 106 if (data->paused) 107 continue; 108 if (data->cmd_ops->trigger_type & tt) 109 data->ops->func(data, NULL, NULL, NULL); 110 } 111 } 112 EXPORT_SYMBOL_GPL(event_triggers_post_call); 113 114 #define SHOW_AVAILABLE_TRIGGERS (void *)(1UL) 115 116 static void *trigger_next(struct seq_file *m, void *t, loff_t *pos) 117 { 118 struct trace_event_file *event_file = event_file_data(m->private); 119 120 if (t == SHOW_AVAILABLE_TRIGGERS) { 121 (*pos)++; 122 return NULL; 123 } 124 return seq_list_next(t, &event_file->triggers, pos); 125 } 126 127 static void *trigger_start(struct seq_file *m, loff_t *pos) 128 { 129 struct trace_event_file *event_file; 130 131 /* ->stop() is called even if ->start() fails */ 132 mutex_lock(&event_mutex); 133 event_file = event_file_data(m->private); 134 if (unlikely(!event_file)) 135 return ERR_PTR(-ENODEV); 136 137 if (list_empty(&event_file->triggers)) 138 return *pos == 0 ? SHOW_AVAILABLE_TRIGGERS : NULL; 139 140 return seq_list_start(&event_file->triggers, *pos); 141 } 142 143 static void trigger_stop(struct seq_file *m, void *t) 144 { 145 mutex_unlock(&event_mutex); 146 } 147 148 static int trigger_show(struct seq_file *m, void *v) 149 { 150 struct event_trigger_data *data; 151 struct event_command *p; 152 153 if (v == SHOW_AVAILABLE_TRIGGERS) { 154 seq_puts(m, "# Available triggers:\n"); 155 seq_putc(m, '#'); 156 mutex_lock(&trigger_cmd_mutex); 157 list_for_each_entry_reverse(p, &trigger_commands, list) 158 seq_printf(m, " %s", p->name); 159 seq_putc(m, '\n'); 160 mutex_unlock(&trigger_cmd_mutex); 161 return 0; 162 } 163 164 data = list_entry(v, struct event_trigger_data, list); 165 data->ops->print(m, data->ops, data); 166 167 return 0; 168 } 169 170 static const struct seq_operations event_triggers_seq_ops = { 171 .start = trigger_start, 172 .next = trigger_next, 173 .stop = trigger_stop, 174 .show = trigger_show, 175 }; 176 177 static int event_trigger_regex_open(struct inode *inode, struct file *file) 178 { 179 int ret; 180 181 ret = security_locked_down(LOCKDOWN_TRACEFS); 182 if (ret) 183 return ret; 184 185 mutex_lock(&event_mutex); 186 187 if (unlikely(!event_file_data(file))) { 188 mutex_unlock(&event_mutex); 189 return -ENODEV; 190 } 191 192 if ((file->f_mode & FMODE_WRITE) && 193 (file->f_flags & O_TRUNC)) { 194 struct trace_event_file *event_file; 195 struct event_command *p; 196 197 event_file = event_file_data(file); 198 199 list_for_each_entry(p, &trigger_commands, list) { 200 if (p->unreg_all) 201 p->unreg_all(event_file); 202 } 203 } 204 205 if (file->f_mode & FMODE_READ) { 206 ret = seq_open(file, &event_triggers_seq_ops); 207 if (!ret) { 208 struct seq_file *m = file->private_data; 209 m->private = file; 210 } 211 } 212 213 mutex_unlock(&event_mutex); 214 215 return ret; 216 } 217 218 int trigger_process_regex(struct trace_event_file *file, char *buff) 219 { 220 char *command, *next; 221 struct event_command *p; 222 int ret = -EINVAL; 223 224 next = buff = skip_spaces(buff); 225 command = strsep(&next, ": \t"); 226 if (next) { 227 next = skip_spaces(next); 228 if (!*next) 229 next = NULL; 230 } 231 command = (command[0] != '!') ? command : command + 1; 232 233 mutex_lock(&trigger_cmd_mutex); 234 list_for_each_entry(p, &trigger_commands, list) { 235 if (strcmp(p->name, command) == 0) { 236 ret = p->func(p, file, buff, command, next); 237 goto out_unlock; 238 } 239 } 240 out_unlock: 241 mutex_unlock(&trigger_cmd_mutex); 242 243 return ret; 244 } 245 246 static ssize_t event_trigger_regex_write(struct file *file, 247 const char __user *ubuf, 248 size_t cnt, loff_t *ppos) 249 { 250 struct trace_event_file *event_file; 251 ssize_t ret; 252 char *buf; 253 254 if (!cnt) 255 return 0; 256 257 if (cnt >= PAGE_SIZE) 258 return -EINVAL; 259 260 buf = memdup_user_nul(ubuf, cnt); 261 if (IS_ERR(buf)) 262 return PTR_ERR(buf); 263 264 strim(buf); 265 266 mutex_lock(&event_mutex); 267 event_file = event_file_data(file); 268 if (unlikely(!event_file)) { 269 mutex_unlock(&event_mutex); 270 kfree(buf); 271 return -ENODEV; 272 } 273 ret = trigger_process_regex(event_file, buf); 274 mutex_unlock(&event_mutex); 275 276 kfree(buf); 277 if (ret < 0) 278 goto out; 279 280 *ppos += cnt; 281 ret = cnt; 282 out: 283 return ret; 284 } 285 286 static int event_trigger_regex_release(struct inode *inode, struct file *file) 287 { 288 mutex_lock(&event_mutex); 289 290 if (file->f_mode & FMODE_READ) 291 seq_release(inode, file); 292 293 mutex_unlock(&event_mutex); 294 295 return 0; 296 } 297 298 static ssize_t 299 event_trigger_write(struct file *filp, const char __user *ubuf, 300 size_t cnt, loff_t *ppos) 301 { 302 return event_trigger_regex_write(filp, ubuf, cnt, ppos); 303 } 304 305 static int 306 event_trigger_open(struct inode *inode, struct file *filp) 307 { 308 /* Checks for tracefs lockdown */ 309 return event_trigger_regex_open(inode, filp); 310 } 311 312 static int 313 event_trigger_release(struct inode *inode, struct file *file) 314 { 315 return event_trigger_regex_release(inode, file); 316 } 317 318 const struct file_operations event_trigger_fops = { 319 .open = event_trigger_open, 320 .read = seq_read, 321 .write = event_trigger_write, 322 .llseek = tracing_lseek, 323 .release = event_trigger_release, 324 }; 325 326 /* 327 * Currently we only register event commands from __init, so mark this 328 * __init too. 329 */ 330 __init int register_event_command(struct event_command *cmd) 331 { 332 struct event_command *p; 333 int ret = 0; 334 335 mutex_lock(&trigger_cmd_mutex); 336 list_for_each_entry(p, &trigger_commands, list) { 337 if (strcmp(cmd->name, p->name) == 0) { 338 ret = -EBUSY; 339 goto out_unlock; 340 } 341 } 342 list_add(&cmd->list, &trigger_commands); 343 out_unlock: 344 mutex_unlock(&trigger_cmd_mutex); 345 346 return ret; 347 } 348 349 /* 350 * Currently we only unregister event commands from __init, so mark 351 * this __init too. 352 */ 353 __init int unregister_event_command(struct event_command *cmd) 354 { 355 struct event_command *p, *n; 356 int ret = -ENODEV; 357 358 mutex_lock(&trigger_cmd_mutex); 359 list_for_each_entry_safe(p, n, &trigger_commands, list) { 360 if (strcmp(cmd->name, p->name) == 0) { 361 ret = 0; 362 list_del_init(&p->list); 363 goto out_unlock; 364 } 365 } 366 out_unlock: 367 mutex_unlock(&trigger_cmd_mutex); 368 369 return ret; 370 } 371 372 /** 373 * event_trigger_print - Generic event_trigger_ops @print implementation 374 * @name: The name of the event trigger 375 * @m: The seq_file being printed to 376 * @data: Trigger-specific data 377 * @filter_str: filter_str to print, if present 378 * 379 * Common implementation for event triggers to print themselves. 380 * 381 * Usually wrapped by a function that simply sets the @name of the 382 * trigger command and then invokes this. 383 * 384 * Return: 0 on success, errno otherwise 385 */ 386 static int 387 event_trigger_print(const char *name, struct seq_file *m, 388 void *data, char *filter_str) 389 { 390 long count = (long)data; 391 392 seq_puts(m, name); 393 394 if (count == -1) 395 seq_puts(m, ":unlimited"); 396 else 397 seq_printf(m, ":count=%ld", count); 398 399 if (filter_str) 400 seq_printf(m, " if %s\n", filter_str); 401 else 402 seq_putc(m, '\n'); 403 404 return 0; 405 } 406 407 /** 408 * event_trigger_init - Generic event_trigger_ops @init implementation 409 * @ops: The trigger ops associated with the trigger 410 * @data: Trigger-specific data 411 * 412 * Common implementation of event trigger initialization. 413 * 414 * Usually used directly as the @init method in event trigger 415 * implementations. 416 * 417 * Return: 0 on success, errno otherwise 418 */ 419 int event_trigger_init(struct event_trigger_ops *ops, 420 struct event_trigger_data *data) 421 { 422 data->ref++; 423 return 0; 424 } 425 426 /** 427 * event_trigger_free - Generic event_trigger_ops @free implementation 428 * @ops: The trigger ops associated with the trigger 429 * @data: Trigger-specific data 430 * 431 * Common implementation of event trigger de-initialization. 432 * 433 * Usually used directly as the @free method in event trigger 434 * implementations. 435 */ 436 static void 437 event_trigger_free(struct event_trigger_ops *ops, 438 struct event_trigger_data *data) 439 { 440 if (WARN_ON_ONCE(data->ref <= 0)) 441 return; 442 443 data->ref--; 444 if (!data->ref) 445 trigger_data_free(data); 446 } 447 448 int trace_event_trigger_enable_disable(struct trace_event_file *file, 449 int trigger_enable) 450 { 451 int ret = 0; 452 453 if (trigger_enable) { 454 if (atomic_inc_return(&file->tm_ref) > 1) 455 return ret; 456 set_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags); 457 ret = trace_event_enable_disable(file, 1, 1); 458 } else { 459 if (atomic_dec_return(&file->tm_ref) > 0) 460 return ret; 461 clear_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags); 462 ret = trace_event_enable_disable(file, 0, 1); 463 } 464 465 return ret; 466 } 467 468 /** 469 * clear_event_triggers - Clear all triggers associated with a trace array 470 * @tr: The trace array to clear 471 * 472 * For each trigger, the triggering event has its tm_ref decremented 473 * via trace_event_trigger_enable_disable(), and any associated event 474 * (in the case of enable/disable_event triggers) will have its sm_ref 475 * decremented via free()->trace_event_enable_disable(). That 476 * combination effectively reverses the soft-mode/trigger state added 477 * by trigger registration. 478 * 479 * Must be called with event_mutex held. 480 */ 481 void 482 clear_event_triggers(struct trace_array *tr) 483 { 484 struct trace_event_file *file; 485 486 list_for_each_entry(file, &tr->events, list) { 487 struct event_trigger_data *data, *n; 488 list_for_each_entry_safe(data, n, &file->triggers, list) { 489 trace_event_trigger_enable_disable(file, 0); 490 list_del_rcu(&data->list); 491 if (data->ops->free) 492 data->ops->free(data->ops, data); 493 } 494 } 495 } 496 497 /** 498 * update_cond_flag - Set or reset the TRIGGER_COND bit 499 * @file: The trace_event_file associated with the event 500 * 501 * If an event has triggers and any of those triggers has a filter or 502 * a post_trigger, trigger invocation needs to be deferred until after 503 * the current event has logged its data, and the event should have 504 * its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be 505 * cleared. 506 */ 507 void update_cond_flag(struct trace_event_file *file) 508 { 509 struct event_trigger_data *data; 510 bool set_cond = false; 511 512 lockdep_assert_held(&event_mutex); 513 514 list_for_each_entry(data, &file->triggers, list) { 515 if (data->filter || event_command_post_trigger(data->cmd_ops) || 516 event_command_needs_rec(data->cmd_ops)) { 517 set_cond = true; 518 break; 519 } 520 } 521 522 if (set_cond) 523 set_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags); 524 else 525 clear_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags); 526 } 527 528 /** 529 * register_trigger - Generic event_command @reg implementation 530 * @glob: The raw string used to register the trigger 531 * @ops: The trigger ops associated with the trigger 532 * @data: Trigger-specific data to associate with the trigger 533 * @file: The trace_event_file associated with the event 534 * 535 * Common implementation for event trigger registration. 536 * 537 * Usually used directly as the @reg method in event command 538 * implementations. 539 * 540 * Return: 0 on success, errno otherwise 541 */ 542 static int register_trigger(char *glob, struct event_trigger_ops *ops, 543 struct event_trigger_data *data, 544 struct trace_event_file *file) 545 { 546 struct event_trigger_data *test; 547 int ret = 0; 548 549 lockdep_assert_held(&event_mutex); 550 551 list_for_each_entry(test, &file->triggers, list) { 552 if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) { 553 ret = -EEXIST; 554 goto out; 555 } 556 } 557 558 if (data->ops->init) { 559 ret = data->ops->init(data->ops, data); 560 if (ret < 0) 561 goto out; 562 } 563 564 list_add_rcu(&data->list, &file->triggers); 565 ret++; 566 567 update_cond_flag(file); 568 if (trace_event_trigger_enable_disable(file, 1) < 0) { 569 list_del_rcu(&data->list); 570 update_cond_flag(file); 571 ret--; 572 } 573 out: 574 return ret; 575 } 576 577 /** 578 * unregister_trigger - Generic event_command @unreg implementation 579 * @glob: The raw string used to register the trigger 580 * @ops: The trigger ops associated with the trigger 581 * @test: Trigger-specific data used to find the trigger to remove 582 * @file: The trace_event_file associated with the event 583 * 584 * Common implementation for event trigger unregistration. 585 * 586 * Usually used directly as the @unreg method in event command 587 * implementations. 588 */ 589 static void unregister_trigger(char *glob, struct event_trigger_ops *ops, 590 struct event_trigger_data *test, 591 struct trace_event_file *file) 592 { 593 struct event_trigger_data *data; 594 bool unregistered = false; 595 596 lockdep_assert_held(&event_mutex); 597 598 list_for_each_entry(data, &file->triggers, list) { 599 if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) { 600 unregistered = true; 601 list_del_rcu(&data->list); 602 trace_event_trigger_enable_disable(file, 0); 603 update_cond_flag(file); 604 break; 605 } 606 } 607 608 if (unregistered && data->ops->free) 609 data->ops->free(data->ops, data); 610 } 611 612 /** 613 * event_trigger_callback - Generic event_command @func implementation 614 * @cmd_ops: The command ops, used for trigger registration 615 * @file: The trace_event_file associated with the event 616 * @glob: The raw string used to register the trigger 617 * @cmd: The cmd portion of the string used to register the trigger 618 * @param: The params portion of the string used to register the trigger 619 * 620 * Common implementation for event command parsing and trigger 621 * instantiation. 622 * 623 * Usually used directly as the @func method in event command 624 * implementations. 625 * 626 * Return: 0 on success, errno otherwise 627 */ 628 static int 629 event_trigger_callback(struct event_command *cmd_ops, 630 struct trace_event_file *file, 631 char *glob, char *cmd, char *param) 632 { 633 struct event_trigger_data *trigger_data; 634 struct event_trigger_ops *trigger_ops; 635 char *trigger = NULL; 636 char *number; 637 int ret; 638 639 /* separate the trigger from the filter (t:n [if filter]) */ 640 if (param && isdigit(param[0])) { 641 trigger = strsep(¶m, " \t"); 642 if (param) { 643 param = skip_spaces(param); 644 if (!*param) 645 param = NULL; 646 } 647 } 648 649 trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger); 650 651 ret = -ENOMEM; 652 trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL); 653 if (!trigger_data) 654 goto out; 655 656 trigger_data->count = -1; 657 trigger_data->ops = trigger_ops; 658 trigger_data->cmd_ops = cmd_ops; 659 trigger_data->private_data = file; 660 INIT_LIST_HEAD(&trigger_data->list); 661 INIT_LIST_HEAD(&trigger_data->named_list); 662 663 if (glob[0] == '!') { 664 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file); 665 kfree(trigger_data); 666 ret = 0; 667 goto out; 668 } 669 670 if (trigger) { 671 number = strsep(&trigger, ":"); 672 673 ret = -EINVAL; 674 if (!strlen(number)) 675 goto out_free; 676 677 /* 678 * We use the callback data field (which is a pointer) 679 * as our counter. 680 */ 681 ret = kstrtoul(number, 0, &trigger_data->count); 682 if (ret) 683 goto out_free; 684 } 685 686 if (!param) /* if param is non-empty, it's supposed to be a filter */ 687 goto out_reg; 688 689 if (!cmd_ops->set_filter) 690 goto out_reg; 691 692 ret = cmd_ops->set_filter(param, trigger_data, file); 693 if (ret < 0) 694 goto out_free; 695 696 out_reg: 697 /* Up the trigger_data count to make sure reg doesn't free it on failure */ 698 event_trigger_init(trigger_ops, trigger_data); 699 ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file); 700 /* 701 * The above returns on success the # of functions enabled, 702 * but if it didn't find any functions it returns zero. 703 * Consider no functions a failure too. 704 */ 705 if (!ret) { 706 cmd_ops->unreg(glob, trigger_ops, trigger_data, file); 707 ret = -ENOENT; 708 } else if (ret > 0) 709 ret = 0; 710 711 /* Down the counter of trigger_data or free it if not used anymore */ 712 event_trigger_free(trigger_ops, trigger_data); 713 out: 714 return ret; 715 716 out_free: 717 if (cmd_ops->set_filter) 718 cmd_ops->set_filter(NULL, trigger_data, NULL); 719 kfree(trigger_data); 720 goto out; 721 } 722 723 /** 724 * set_trigger_filter - Generic event_command @set_filter implementation 725 * @filter_str: The filter string for the trigger, NULL to remove filter 726 * @trigger_data: Trigger-specific data 727 * @file: The trace_event_file associated with the event 728 * 729 * Common implementation for event command filter parsing and filter 730 * instantiation. 731 * 732 * Usually used directly as the @set_filter method in event command 733 * implementations. 734 * 735 * Also used to remove a filter (if filter_str = NULL). 736 * 737 * Return: 0 on success, errno otherwise 738 */ 739 int set_trigger_filter(char *filter_str, 740 struct event_trigger_data *trigger_data, 741 struct trace_event_file *file) 742 { 743 struct event_trigger_data *data = trigger_data; 744 struct event_filter *filter = NULL, *tmp; 745 int ret = -EINVAL; 746 char *s; 747 748 if (!filter_str) /* clear the current filter */ 749 goto assign; 750 751 s = strsep(&filter_str, " \t"); 752 753 if (!strlen(s) || strcmp(s, "if") != 0) 754 goto out; 755 756 if (!filter_str) 757 goto out; 758 759 /* The filter is for the 'trigger' event, not the triggered event */ 760 ret = create_event_filter(file->tr, file->event_call, 761 filter_str, false, &filter); 762 /* 763 * If create_event_filter() fails, filter still needs to be freed. 764 * Which the calling code will do with data->filter. 765 */ 766 assign: 767 tmp = rcu_access_pointer(data->filter); 768 769 rcu_assign_pointer(data->filter, filter); 770 771 if (tmp) { 772 /* Make sure the call is done with the filter */ 773 tracepoint_synchronize_unregister(); 774 free_event_filter(tmp); 775 } 776 777 kfree(data->filter_str); 778 data->filter_str = NULL; 779 780 if (filter_str) { 781 data->filter_str = kstrdup(filter_str, GFP_KERNEL); 782 if (!data->filter_str) { 783 free_event_filter(rcu_access_pointer(data->filter)); 784 data->filter = NULL; 785 ret = -ENOMEM; 786 } 787 } 788 out: 789 return ret; 790 } 791 792 static LIST_HEAD(named_triggers); 793 794 /** 795 * find_named_trigger - Find the common named trigger associated with @name 796 * @name: The name of the set of named triggers to find the common data for 797 * 798 * Named triggers are sets of triggers that share a common set of 799 * trigger data. The first named trigger registered with a given name 800 * owns the common trigger data that the others subsequently 801 * registered with the same name will reference. This function 802 * returns the common trigger data associated with that first 803 * registered instance. 804 * 805 * Return: the common trigger data for the given named trigger on 806 * success, NULL otherwise. 807 */ 808 struct event_trigger_data *find_named_trigger(const char *name) 809 { 810 struct event_trigger_data *data; 811 812 if (!name) 813 return NULL; 814 815 list_for_each_entry(data, &named_triggers, named_list) { 816 if (data->named_data) 817 continue; 818 if (strcmp(data->name, name) == 0) 819 return data; 820 } 821 822 return NULL; 823 } 824 825 /** 826 * is_named_trigger - determine if a given trigger is a named trigger 827 * @test: The trigger data to test 828 * 829 * Return: true if 'test' is a named trigger, false otherwise. 830 */ 831 bool is_named_trigger(struct event_trigger_data *test) 832 { 833 struct event_trigger_data *data; 834 835 list_for_each_entry(data, &named_triggers, named_list) { 836 if (test == data) 837 return true; 838 } 839 840 return false; 841 } 842 843 /** 844 * save_named_trigger - save the trigger in the named trigger list 845 * @name: The name of the named trigger set 846 * @data: The trigger data to save 847 * 848 * Return: 0 if successful, negative error otherwise. 849 */ 850 int save_named_trigger(const char *name, struct event_trigger_data *data) 851 { 852 data->name = kstrdup(name, GFP_KERNEL); 853 if (!data->name) 854 return -ENOMEM; 855 856 list_add(&data->named_list, &named_triggers); 857 858 return 0; 859 } 860 861 /** 862 * del_named_trigger - delete a trigger from the named trigger list 863 * @data: The trigger data to delete 864 */ 865 void del_named_trigger(struct event_trigger_data *data) 866 { 867 kfree(data->name); 868 data->name = NULL; 869 870 list_del(&data->named_list); 871 } 872 873 static void __pause_named_trigger(struct event_trigger_data *data, bool pause) 874 { 875 struct event_trigger_data *test; 876 877 list_for_each_entry(test, &named_triggers, named_list) { 878 if (strcmp(test->name, data->name) == 0) { 879 if (pause) { 880 test->paused_tmp = test->paused; 881 test->paused = true; 882 } else { 883 test->paused = test->paused_tmp; 884 } 885 } 886 } 887 } 888 889 /** 890 * pause_named_trigger - Pause all named triggers with the same name 891 * @data: The trigger data of a named trigger to pause 892 * 893 * Pauses a named trigger along with all other triggers having the 894 * same name. Because named triggers share a common set of data, 895 * pausing only one is meaningless, so pausing one named trigger needs 896 * to pause all triggers with the same name. 897 */ 898 void pause_named_trigger(struct event_trigger_data *data) 899 { 900 __pause_named_trigger(data, true); 901 } 902 903 /** 904 * unpause_named_trigger - Un-pause all named triggers with the same name 905 * @data: The trigger data of a named trigger to unpause 906 * 907 * Un-pauses a named trigger along with all other triggers having the 908 * same name. Because named triggers share a common set of data, 909 * unpausing only one is meaningless, so unpausing one named trigger 910 * needs to unpause all triggers with the same name. 911 */ 912 void unpause_named_trigger(struct event_trigger_data *data) 913 { 914 __pause_named_trigger(data, false); 915 } 916 917 /** 918 * set_named_trigger_data - Associate common named trigger data 919 * @data: The trigger data to associate 920 * @named_data: The common named trigger to be associated 921 * 922 * Named triggers are sets of triggers that share a common set of 923 * trigger data. The first named trigger registered with a given name 924 * owns the common trigger data that the others subsequently 925 * registered with the same name will reference. This function 926 * associates the common trigger data from the first trigger with the 927 * given trigger. 928 */ 929 void set_named_trigger_data(struct event_trigger_data *data, 930 struct event_trigger_data *named_data) 931 { 932 data->named_data = named_data; 933 } 934 935 struct event_trigger_data * 936 get_named_trigger_data(struct event_trigger_data *data) 937 { 938 return data->named_data; 939 } 940 941 static void 942 traceon_trigger(struct event_trigger_data *data, 943 struct trace_buffer *buffer, void *rec, 944 struct ring_buffer_event *event) 945 { 946 if (tracing_is_on()) 947 return; 948 949 tracing_on(); 950 } 951 952 static void 953 traceon_count_trigger(struct event_trigger_data *data, 954 struct trace_buffer *buffer, void *rec, 955 struct ring_buffer_event *event) 956 { 957 if (tracing_is_on()) 958 return; 959 960 if (!data->count) 961 return; 962 963 if (data->count != -1) 964 (data->count)--; 965 966 tracing_on(); 967 } 968 969 static void 970 traceoff_trigger(struct event_trigger_data *data, 971 struct trace_buffer *buffer, void *rec, 972 struct ring_buffer_event *event) 973 { 974 if (!tracing_is_on()) 975 return; 976 977 tracing_off(); 978 } 979 980 static void 981 traceoff_count_trigger(struct event_trigger_data *data, 982 struct trace_buffer *buffer, void *rec, 983 struct ring_buffer_event *event) 984 { 985 if (!tracing_is_on()) 986 return; 987 988 if (!data->count) 989 return; 990 991 if (data->count != -1) 992 (data->count)--; 993 994 tracing_off(); 995 } 996 997 static int 998 traceon_trigger_print(struct seq_file *m, struct event_trigger_ops *ops, 999 struct event_trigger_data *data) 1000 { 1001 return event_trigger_print("traceon", m, (void *)data->count, 1002 data->filter_str); 1003 } 1004 1005 static int 1006 traceoff_trigger_print(struct seq_file *m, struct event_trigger_ops *ops, 1007 struct event_trigger_data *data) 1008 { 1009 return event_trigger_print("traceoff", m, (void *)data->count, 1010 data->filter_str); 1011 } 1012 1013 static struct event_trigger_ops traceon_trigger_ops = { 1014 .func = traceon_trigger, 1015 .print = traceon_trigger_print, 1016 .init = event_trigger_init, 1017 .free = event_trigger_free, 1018 }; 1019 1020 static struct event_trigger_ops traceon_count_trigger_ops = { 1021 .func = traceon_count_trigger, 1022 .print = traceon_trigger_print, 1023 .init = event_trigger_init, 1024 .free = event_trigger_free, 1025 }; 1026 1027 static struct event_trigger_ops traceoff_trigger_ops = { 1028 .func = traceoff_trigger, 1029 .print = traceoff_trigger_print, 1030 .init = event_trigger_init, 1031 .free = event_trigger_free, 1032 }; 1033 1034 static struct event_trigger_ops traceoff_count_trigger_ops = { 1035 .func = traceoff_count_trigger, 1036 .print = traceoff_trigger_print, 1037 .init = event_trigger_init, 1038 .free = event_trigger_free, 1039 }; 1040 1041 static struct event_trigger_ops * 1042 onoff_get_trigger_ops(char *cmd, char *param) 1043 { 1044 struct event_trigger_ops *ops; 1045 1046 /* we register both traceon and traceoff to this callback */ 1047 if (strcmp(cmd, "traceon") == 0) 1048 ops = param ? &traceon_count_trigger_ops : 1049 &traceon_trigger_ops; 1050 else 1051 ops = param ? &traceoff_count_trigger_ops : 1052 &traceoff_trigger_ops; 1053 1054 return ops; 1055 } 1056 1057 static struct event_command trigger_traceon_cmd = { 1058 .name = "traceon", 1059 .trigger_type = ETT_TRACE_ONOFF, 1060 .func = event_trigger_callback, 1061 .reg = register_trigger, 1062 .unreg = unregister_trigger, 1063 .get_trigger_ops = onoff_get_trigger_ops, 1064 .set_filter = set_trigger_filter, 1065 }; 1066 1067 static struct event_command trigger_traceoff_cmd = { 1068 .name = "traceoff", 1069 .trigger_type = ETT_TRACE_ONOFF, 1070 .flags = EVENT_CMD_FL_POST_TRIGGER, 1071 .func = event_trigger_callback, 1072 .reg = register_trigger, 1073 .unreg = unregister_trigger, 1074 .get_trigger_ops = onoff_get_trigger_ops, 1075 .set_filter = set_trigger_filter, 1076 }; 1077 1078 #ifdef CONFIG_TRACER_SNAPSHOT 1079 static void 1080 snapshot_trigger(struct event_trigger_data *data, 1081 struct trace_buffer *buffer, void *rec, 1082 struct ring_buffer_event *event) 1083 { 1084 struct trace_event_file *file = data->private_data; 1085 1086 if (file) 1087 tracing_snapshot_instance(file->tr); 1088 else 1089 tracing_snapshot(); 1090 } 1091 1092 static void 1093 snapshot_count_trigger(struct event_trigger_data *data, 1094 struct trace_buffer *buffer, void *rec, 1095 struct ring_buffer_event *event) 1096 { 1097 if (!data->count) 1098 return; 1099 1100 if (data->count != -1) 1101 (data->count)--; 1102 1103 snapshot_trigger(data, buffer, rec, event); 1104 } 1105 1106 static int 1107 register_snapshot_trigger(char *glob, struct event_trigger_ops *ops, 1108 struct event_trigger_data *data, 1109 struct trace_event_file *file) 1110 { 1111 if (tracing_alloc_snapshot_instance(file->tr) != 0) 1112 return 0; 1113 1114 return register_trigger(glob, ops, data, file); 1115 } 1116 1117 static int 1118 snapshot_trigger_print(struct seq_file *m, struct event_trigger_ops *ops, 1119 struct event_trigger_data *data) 1120 { 1121 return event_trigger_print("snapshot", m, (void *)data->count, 1122 data->filter_str); 1123 } 1124 1125 static struct event_trigger_ops snapshot_trigger_ops = { 1126 .func = snapshot_trigger, 1127 .print = snapshot_trigger_print, 1128 .init = event_trigger_init, 1129 .free = event_trigger_free, 1130 }; 1131 1132 static struct event_trigger_ops snapshot_count_trigger_ops = { 1133 .func = snapshot_count_trigger, 1134 .print = snapshot_trigger_print, 1135 .init = event_trigger_init, 1136 .free = event_trigger_free, 1137 }; 1138 1139 static struct event_trigger_ops * 1140 snapshot_get_trigger_ops(char *cmd, char *param) 1141 { 1142 return param ? &snapshot_count_trigger_ops : &snapshot_trigger_ops; 1143 } 1144 1145 static struct event_command trigger_snapshot_cmd = { 1146 .name = "snapshot", 1147 .trigger_type = ETT_SNAPSHOT, 1148 .func = event_trigger_callback, 1149 .reg = register_snapshot_trigger, 1150 .unreg = unregister_trigger, 1151 .get_trigger_ops = snapshot_get_trigger_ops, 1152 .set_filter = set_trigger_filter, 1153 }; 1154 1155 static __init int register_trigger_snapshot_cmd(void) 1156 { 1157 int ret; 1158 1159 ret = register_event_command(&trigger_snapshot_cmd); 1160 WARN_ON(ret < 0); 1161 1162 return ret; 1163 } 1164 #else 1165 static __init int register_trigger_snapshot_cmd(void) { return 0; } 1166 #endif /* CONFIG_TRACER_SNAPSHOT */ 1167 1168 #ifdef CONFIG_STACKTRACE 1169 #ifdef CONFIG_UNWINDER_ORC 1170 /* Skip 2: 1171 * event_triggers_post_call() 1172 * trace_event_raw_event_xxx() 1173 */ 1174 # define STACK_SKIP 2 1175 #else 1176 /* 1177 * Skip 4: 1178 * stacktrace_trigger() 1179 * event_triggers_post_call() 1180 * trace_event_buffer_commit() 1181 * trace_event_raw_event_xxx() 1182 */ 1183 #define STACK_SKIP 4 1184 #endif 1185 1186 static void 1187 stacktrace_trigger(struct event_trigger_data *data, 1188 struct trace_buffer *buffer, void *rec, 1189 struct ring_buffer_event *event) 1190 { 1191 trace_dump_stack(STACK_SKIP); 1192 } 1193 1194 static void 1195 stacktrace_count_trigger(struct event_trigger_data *data, 1196 struct trace_buffer *buffer, void *rec, 1197 struct ring_buffer_event *event) 1198 { 1199 if (!data->count) 1200 return; 1201 1202 if (data->count != -1) 1203 (data->count)--; 1204 1205 stacktrace_trigger(data, buffer, rec, event); 1206 } 1207 1208 static int 1209 stacktrace_trigger_print(struct seq_file *m, struct event_trigger_ops *ops, 1210 struct event_trigger_data *data) 1211 { 1212 return event_trigger_print("stacktrace", m, (void *)data->count, 1213 data->filter_str); 1214 } 1215 1216 static struct event_trigger_ops stacktrace_trigger_ops = { 1217 .func = stacktrace_trigger, 1218 .print = stacktrace_trigger_print, 1219 .init = event_trigger_init, 1220 .free = event_trigger_free, 1221 }; 1222 1223 static struct event_trigger_ops stacktrace_count_trigger_ops = { 1224 .func = stacktrace_count_trigger, 1225 .print = stacktrace_trigger_print, 1226 .init = event_trigger_init, 1227 .free = event_trigger_free, 1228 }; 1229 1230 static struct event_trigger_ops * 1231 stacktrace_get_trigger_ops(char *cmd, char *param) 1232 { 1233 return param ? &stacktrace_count_trigger_ops : &stacktrace_trigger_ops; 1234 } 1235 1236 static struct event_command trigger_stacktrace_cmd = { 1237 .name = "stacktrace", 1238 .trigger_type = ETT_STACKTRACE, 1239 .flags = EVENT_CMD_FL_POST_TRIGGER, 1240 .func = event_trigger_callback, 1241 .reg = register_trigger, 1242 .unreg = unregister_trigger, 1243 .get_trigger_ops = stacktrace_get_trigger_ops, 1244 .set_filter = set_trigger_filter, 1245 }; 1246 1247 static __init int register_trigger_stacktrace_cmd(void) 1248 { 1249 int ret; 1250 1251 ret = register_event_command(&trigger_stacktrace_cmd); 1252 WARN_ON(ret < 0); 1253 1254 return ret; 1255 } 1256 #else 1257 static __init int register_trigger_stacktrace_cmd(void) { return 0; } 1258 #endif /* CONFIG_STACKTRACE */ 1259 1260 static __init void unregister_trigger_traceon_traceoff_cmds(void) 1261 { 1262 unregister_event_command(&trigger_traceon_cmd); 1263 unregister_event_command(&trigger_traceoff_cmd); 1264 } 1265 1266 static void 1267 event_enable_trigger(struct event_trigger_data *data, 1268 struct trace_buffer *buffer, void *rec, 1269 struct ring_buffer_event *event) 1270 { 1271 struct enable_trigger_data *enable_data = data->private_data; 1272 1273 if (enable_data->enable) 1274 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags); 1275 else 1276 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags); 1277 } 1278 1279 static void 1280 event_enable_count_trigger(struct event_trigger_data *data, 1281 struct trace_buffer *buffer, void *rec, 1282 struct ring_buffer_event *event) 1283 { 1284 struct enable_trigger_data *enable_data = data->private_data; 1285 1286 if (!data->count) 1287 return; 1288 1289 /* Skip if the event is in a state we want to switch to */ 1290 if (enable_data->enable == !(enable_data->file->flags & EVENT_FILE_FL_SOFT_DISABLED)) 1291 return; 1292 1293 if (data->count != -1) 1294 (data->count)--; 1295 1296 event_enable_trigger(data, buffer, rec, event); 1297 } 1298 1299 int event_enable_trigger_print(struct seq_file *m, 1300 struct event_trigger_ops *ops, 1301 struct event_trigger_data *data) 1302 { 1303 struct enable_trigger_data *enable_data = data->private_data; 1304 1305 seq_printf(m, "%s:%s:%s", 1306 enable_data->hist ? 1307 (enable_data->enable ? ENABLE_HIST_STR : DISABLE_HIST_STR) : 1308 (enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR), 1309 enable_data->file->event_call->class->system, 1310 trace_event_name(enable_data->file->event_call)); 1311 1312 if (data->count == -1) 1313 seq_puts(m, ":unlimited"); 1314 else 1315 seq_printf(m, ":count=%ld", data->count); 1316 1317 if (data->filter_str) 1318 seq_printf(m, " if %s\n", data->filter_str); 1319 else 1320 seq_putc(m, '\n'); 1321 1322 return 0; 1323 } 1324 1325 void event_enable_trigger_free(struct event_trigger_ops *ops, 1326 struct event_trigger_data *data) 1327 { 1328 struct enable_trigger_data *enable_data = data->private_data; 1329 1330 if (WARN_ON_ONCE(data->ref <= 0)) 1331 return; 1332 1333 data->ref--; 1334 if (!data->ref) { 1335 /* Remove the SOFT_MODE flag */ 1336 trace_event_enable_disable(enable_data->file, 0, 1); 1337 module_put(enable_data->file->event_call->mod); 1338 trigger_data_free(data); 1339 kfree(enable_data); 1340 } 1341 } 1342 1343 static struct event_trigger_ops event_enable_trigger_ops = { 1344 .func = event_enable_trigger, 1345 .print = event_enable_trigger_print, 1346 .init = event_trigger_init, 1347 .free = event_enable_trigger_free, 1348 }; 1349 1350 static struct event_trigger_ops event_enable_count_trigger_ops = { 1351 .func = event_enable_count_trigger, 1352 .print = event_enable_trigger_print, 1353 .init = event_trigger_init, 1354 .free = event_enable_trigger_free, 1355 }; 1356 1357 static struct event_trigger_ops event_disable_trigger_ops = { 1358 .func = event_enable_trigger, 1359 .print = event_enable_trigger_print, 1360 .init = event_trigger_init, 1361 .free = event_enable_trigger_free, 1362 }; 1363 1364 static struct event_trigger_ops event_disable_count_trigger_ops = { 1365 .func = event_enable_count_trigger, 1366 .print = event_enable_trigger_print, 1367 .init = event_trigger_init, 1368 .free = event_enable_trigger_free, 1369 }; 1370 1371 int event_enable_trigger_func(struct event_command *cmd_ops, 1372 struct trace_event_file *file, 1373 char *glob, char *cmd, char *param) 1374 { 1375 struct trace_event_file *event_enable_file; 1376 struct enable_trigger_data *enable_data; 1377 struct event_trigger_data *trigger_data; 1378 struct event_trigger_ops *trigger_ops; 1379 struct trace_array *tr = file->tr; 1380 const char *system; 1381 const char *event; 1382 bool hist = false; 1383 char *trigger; 1384 char *number; 1385 bool enable; 1386 int ret; 1387 1388 if (!param) 1389 return -EINVAL; 1390 1391 /* separate the trigger from the filter (s:e:n [if filter]) */ 1392 trigger = strsep(¶m, " \t"); 1393 if (!trigger) 1394 return -EINVAL; 1395 if (param) { 1396 param = skip_spaces(param); 1397 if (!*param) 1398 param = NULL; 1399 } 1400 1401 system = strsep(&trigger, ":"); 1402 if (!trigger) 1403 return -EINVAL; 1404 1405 event = strsep(&trigger, ":"); 1406 1407 ret = -EINVAL; 1408 event_enable_file = find_event_file(tr, system, event); 1409 if (!event_enable_file) 1410 goto out; 1411 1412 #ifdef CONFIG_HIST_TRIGGERS 1413 hist = ((strcmp(cmd, ENABLE_HIST_STR) == 0) || 1414 (strcmp(cmd, DISABLE_HIST_STR) == 0)); 1415 1416 enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) || 1417 (strcmp(cmd, ENABLE_HIST_STR) == 0)); 1418 #else 1419 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0; 1420 #endif 1421 trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger); 1422 1423 ret = -ENOMEM; 1424 trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL); 1425 if (!trigger_data) 1426 goto out; 1427 1428 enable_data = kzalloc(sizeof(*enable_data), GFP_KERNEL); 1429 if (!enable_data) { 1430 kfree(trigger_data); 1431 goto out; 1432 } 1433 1434 trigger_data->count = -1; 1435 trigger_data->ops = trigger_ops; 1436 trigger_data->cmd_ops = cmd_ops; 1437 INIT_LIST_HEAD(&trigger_data->list); 1438 RCU_INIT_POINTER(trigger_data->filter, NULL); 1439 1440 enable_data->hist = hist; 1441 enable_data->enable = enable; 1442 enable_data->file = event_enable_file; 1443 trigger_data->private_data = enable_data; 1444 1445 if (glob[0] == '!') { 1446 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file); 1447 kfree(trigger_data); 1448 kfree(enable_data); 1449 ret = 0; 1450 goto out; 1451 } 1452 1453 /* Up the trigger_data count to make sure nothing frees it on failure */ 1454 event_trigger_init(trigger_ops, trigger_data); 1455 1456 if (trigger) { 1457 number = strsep(&trigger, ":"); 1458 1459 ret = -EINVAL; 1460 if (!strlen(number)) 1461 goto out_free; 1462 1463 /* 1464 * We use the callback data field (which is a pointer) 1465 * as our counter. 1466 */ 1467 ret = kstrtoul(number, 0, &trigger_data->count); 1468 if (ret) 1469 goto out_free; 1470 } 1471 1472 if (!param) /* if param is non-empty, it's supposed to be a filter */ 1473 goto out_reg; 1474 1475 if (!cmd_ops->set_filter) 1476 goto out_reg; 1477 1478 ret = cmd_ops->set_filter(param, trigger_data, file); 1479 if (ret < 0) 1480 goto out_free; 1481 1482 out_reg: 1483 /* Don't let event modules unload while probe registered */ 1484 ret = try_module_get(event_enable_file->event_call->mod); 1485 if (!ret) { 1486 ret = -EBUSY; 1487 goto out_free; 1488 } 1489 1490 ret = trace_event_enable_disable(event_enable_file, 1, 1); 1491 if (ret < 0) 1492 goto out_put; 1493 ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file); 1494 /* 1495 * The above returns on success the # of functions enabled, 1496 * but if it didn't find any functions it returns zero. 1497 * Consider no functions a failure too. 1498 */ 1499 if (!ret) { 1500 ret = -ENOENT; 1501 goto out_disable; 1502 } else if (ret < 0) 1503 goto out_disable; 1504 /* Just return zero, not the number of enabled functions */ 1505 ret = 0; 1506 event_trigger_free(trigger_ops, trigger_data); 1507 out: 1508 return ret; 1509 1510 out_disable: 1511 trace_event_enable_disable(event_enable_file, 0, 1); 1512 out_put: 1513 module_put(event_enable_file->event_call->mod); 1514 out_free: 1515 if (cmd_ops->set_filter) 1516 cmd_ops->set_filter(NULL, trigger_data, NULL); 1517 event_trigger_free(trigger_ops, trigger_data); 1518 kfree(enable_data); 1519 goto out; 1520 } 1521 1522 int event_enable_register_trigger(char *glob, 1523 struct event_trigger_ops *ops, 1524 struct event_trigger_data *data, 1525 struct trace_event_file *file) 1526 { 1527 struct enable_trigger_data *enable_data = data->private_data; 1528 struct enable_trigger_data *test_enable_data; 1529 struct event_trigger_data *test; 1530 int ret = 0; 1531 1532 lockdep_assert_held(&event_mutex); 1533 1534 list_for_each_entry(test, &file->triggers, list) { 1535 test_enable_data = test->private_data; 1536 if (test_enable_data && 1537 (test->cmd_ops->trigger_type == 1538 data->cmd_ops->trigger_type) && 1539 (test_enable_data->file == enable_data->file)) { 1540 ret = -EEXIST; 1541 goto out; 1542 } 1543 } 1544 1545 if (data->ops->init) { 1546 ret = data->ops->init(data->ops, data); 1547 if (ret < 0) 1548 goto out; 1549 } 1550 1551 list_add_rcu(&data->list, &file->triggers); 1552 ret++; 1553 1554 update_cond_flag(file); 1555 if (trace_event_trigger_enable_disable(file, 1) < 0) { 1556 list_del_rcu(&data->list); 1557 update_cond_flag(file); 1558 ret--; 1559 } 1560 out: 1561 return ret; 1562 } 1563 1564 void event_enable_unregister_trigger(char *glob, 1565 struct event_trigger_ops *ops, 1566 struct event_trigger_data *test, 1567 struct trace_event_file *file) 1568 { 1569 struct enable_trigger_data *test_enable_data = test->private_data; 1570 struct enable_trigger_data *enable_data; 1571 struct event_trigger_data *data; 1572 bool unregistered = false; 1573 1574 lockdep_assert_held(&event_mutex); 1575 1576 list_for_each_entry(data, &file->triggers, list) { 1577 enable_data = data->private_data; 1578 if (enable_data && 1579 (data->cmd_ops->trigger_type == 1580 test->cmd_ops->trigger_type) && 1581 (enable_data->file == test_enable_data->file)) { 1582 unregistered = true; 1583 list_del_rcu(&data->list); 1584 trace_event_trigger_enable_disable(file, 0); 1585 update_cond_flag(file); 1586 break; 1587 } 1588 } 1589 1590 if (unregistered && data->ops->free) 1591 data->ops->free(data->ops, data); 1592 } 1593 1594 static struct event_trigger_ops * 1595 event_enable_get_trigger_ops(char *cmd, char *param) 1596 { 1597 struct event_trigger_ops *ops; 1598 bool enable; 1599 1600 #ifdef CONFIG_HIST_TRIGGERS 1601 enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) || 1602 (strcmp(cmd, ENABLE_HIST_STR) == 0)); 1603 #else 1604 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0; 1605 #endif 1606 if (enable) 1607 ops = param ? &event_enable_count_trigger_ops : 1608 &event_enable_trigger_ops; 1609 else 1610 ops = param ? &event_disable_count_trigger_ops : 1611 &event_disable_trigger_ops; 1612 1613 return ops; 1614 } 1615 1616 static struct event_command trigger_enable_cmd = { 1617 .name = ENABLE_EVENT_STR, 1618 .trigger_type = ETT_EVENT_ENABLE, 1619 .func = event_enable_trigger_func, 1620 .reg = event_enable_register_trigger, 1621 .unreg = event_enable_unregister_trigger, 1622 .get_trigger_ops = event_enable_get_trigger_ops, 1623 .set_filter = set_trigger_filter, 1624 }; 1625 1626 static struct event_command trigger_disable_cmd = { 1627 .name = DISABLE_EVENT_STR, 1628 .trigger_type = ETT_EVENT_ENABLE, 1629 .func = event_enable_trigger_func, 1630 .reg = event_enable_register_trigger, 1631 .unreg = event_enable_unregister_trigger, 1632 .get_trigger_ops = event_enable_get_trigger_ops, 1633 .set_filter = set_trigger_filter, 1634 }; 1635 1636 static __init void unregister_trigger_enable_disable_cmds(void) 1637 { 1638 unregister_event_command(&trigger_enable_cmd); 1639 unregister_event_command(&trigger_disable_cmd); 1640 } 1641 1642 static __init int register_trigger_enable_disable_cmds(void) 1643 { 1644 int ret; 1645 1646 ret = register_event_command(&trigger_enable_cmd); 1647 if (WARN_ON(ret < 0)) 1648 return ret; 1649 ret = register_event_command(&trigger_disable_cmd); 1650 if (WARN_ON(ret < 0)) 1651 unregister_trigger_enable_disable_cmds(); 1652 1653 return ret; 1654 } 1655 1656 static __init int register_trigger_traceon_traceoff_cmds(void) 1657 { 1658 int ret; 1659 1660 ret = register_event_command(&trigger_traceon_cmd); 1661 if (WARN_ON(ret < 0)) 1662 return ret; 1663 ret = register_event_command(&trigger_traceoff_cmd); 1664 if (WARN_ON(ret < 0)) 1665 unregister_trigger_traceon_traceoff_cmds(); 1666 1667 return ret; 1668 } 1669 1670 __init int register_trigger_cmds(void) 1671 { 1672 register_trigger_traceon_traceoff_cmds(); 1673 register_trigger_snapshot_cmd(); 1674 register_trigger_stacktrace_cmd(); 1675 register_trigger_enable_disable_cmds(); 1676 register_trigger_hist_enable_disable_cmds(); 1677 register_trigger_hist_cmd(); 1678 1679 return 0; 1680 } 1681