xref: /openbmc/linux/kernel/events/callchain.c (revision 3723c63247854c97fe044c12a40e29043e9bbc1b)
19251f904SBorislav Petkov /*
29251f904SBorislav Petkov  * Performance events callchain code, extracted from core.c:
39251f904SBorislav Petkov  *
49251f904SBorislav Petkov  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
59251f904SBorislav Petkov  *  Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
690eec103SPeter Zijlstra  *  Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
7*3723c632SArnd Bergmann  *  Copyright  ©  2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
89251f904SBorislav Petkov  *
99251f904SBorislav Petkov  * For licensing details see kernel-base/COPYING
109251f904SBorislav Petkov  */
119251f904SBorislav Petkov 
129251f904SBorislav Petkov #include <linux/perf_event.h>
139251f904SBorislav Petkov #include <linux/slab.h>
1468db0cf1SIngo Molnar #include <linux/sched/task_stack.h>
1568db0cf1SIngo Molnar 
169251f904SBorislav Petkov #include "internal.h"
179251f904SBorislav Petkov 
189251f904SBorislav Petkov struct callchain_cpus_entries {
199251f904SBorislav Petkov 	struct rcu_head			rcu_head;
209251f904SBorislav Petkov 	struct perf_callchain_entry	*cpu_entries[0];
219251f904SBorislav Petkov };
229251f904SBorislav Petkov 
23c5dfd78eSArnaldo Carvalho de Melo int sysctl_perf_event_max_stack __read_mostly = PERF_MAX_STACK_DEPTH;
24c85b0334SArnaldo Carvalho de Melo int sysctl_perf_event_max_contexts_per_stack __read_mostly = PERF_MAX_CONTEXTS_PER_STACK;
25c5dfd78eSArnaldo Carvalho de Melo 
26c5dfd78eSArnaldo Carvalho de Melo static inline size_t perf_callchain_entry__sizeof(void)
27c5dfd78eSArnaldo Carvalho de Melo {
28c5dfd78eSArnaldo Carvalho de Melo 	return (sizeof(struct perf_callchain_entry) +
29c85b0334SArnaldo Carvalho de Melo 		sizeof(__u64) * (sysctl_perf_event_max_stack +
30c85b0334SArnaldo Carvalho de Melo 				 sysctl_perf_event_max_contexts_per_stack));
31c5dfd78eSArnaldo Carvalho de Melo }
32c5dfd78eSArnaldo Carvalho de Melo 
339251f904SBorislav Petkov static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
349251f904SBorislav Petkov static atomic_t nr_callchain_events;
359251f904SBorislav Petkov static DEFINE_MUTEX(callchain_mutex);
369251f904SBorislav Petkov static struct callchain_cpus_entries *callchain_cpus_entries;
379251f904SBorislav Petkov 
389251f904SBorislav Petkov 
39cfbcf468SArnaldo Carvalho de Melo __weak void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
409251f904SBorislav Petkov 				  struct pt_regs *regs)
419251f904SBorislav Petkov {
429251f904SBorislav Petkov }
439251f904SBorislav Petkov 
44cfbcf468SArnaldo Carvalho de Melo __weak void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
459251f904SBorislav Petkov 				struct pt_regs *regs)
469251f904SBorislav Petkov {
479251f904SBorislav Petkov }
489251f904SBorislav Petkov 
499251f904SBorislav Petkov static void release_callchain_buffers_rcu(struct rcu_head *head)
509251f904SBorislav Petkov {
519251f904SBorislav Petkov 	struct callchain_cpus_entries *entries;
529251f904SBorislav Petkov 	int cpu;
539251f904SBorislav Petkov 
549251f904SBorislav Petkov 	entries = container_of(head, struct callchain_cpus_entries, rcu_head);
559251f904SBorislav Petkov 
569251f904SBorislav Petkov 	for_each_possible_cpu(cpu)
579251f904SBorislav Petkov 		kfree(entries->cpu_entries[cpu]);
589251f904SBorislav Petkov 
599251f904SBorislav Petkov 	kfree(entries);
609251f904SBorislav Petkov }
619251f904SBorislav Petkov 
629251f904SBorislav Petkov static void release_callchain_buffers(void)
639251f904SBorislav Petkov {
649251f904SBorislav Petkov 	struct callchain_cpus_entries *entries;
659251f904SBorislav Petkov 
669251f904SBorislav Petkov 	entries = callchain_cpus_entries;
67e0455e19SAndreea-Cristina Bernat 	RCU_INIT_POINTER(callchain_cpus_entries, NULL);
689251f904SBorislav Petkov 	call_rcu(&entries->rcu_head, release_callchain_buffers_rcu);
699251f904SBorislav Petkov }
709251f904SBorislav Petkov 
719251f904SBorislav Petkov static int alloc_callchain_buffers(void)
729251f904SBorislav Petkov {
739251f904SBorislav Petkov 	int cpu;
749251f904SBorislav Petkov 	int size;
759251f904SBorislav Petkov 	struct callchain_cpus_entries *entries;
769251f904SBorislav Petkov 
779251f904SBorislav Petkov 	/*
789251f904SBorislav Petkov 	 * We can't use the percpu allocation API for data that can be
799251f904SBorislav Petkov 	 * accessed from NMI. Use a temporary manual per cpu allocation
809251f904SBorislav Petkov 	 * until that gets sorted out.
819251f904SBorislav Petkov 	 */
829251f904SBorislav Petkov 	size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]);
839251f904SBorislav Petkov 
849251f904SBorislav Petkov 	entries = kzalloc(size, GFP_KERNEL);
859251f904SBorislav Petkov 	if (!entries)
869251f904SBorislav Petkov 		return -ENOMEM;
879251f904SBorislav Petkov 
88c5dfd78eSArnaldo Carvalho de Melo 	size = perf_callchain_entry__sizeof() * PERF_NR_CONTEXTS;
899251f904SBorislav Petkov 
909251f904SBorislav Petkov 	for_each_possible_cpu(cpu) {
919251f904SBorislav Petkov 		entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL,
929251f904SBorislav Petkov 							 cpu_to_node(cpu));
939251f904SBorislav Petkov 		if (!entries->cpu_entries[cpu])
949251f904SBorislav Petkov 			goto fail;
959251f904SBorislav Petkov 	}
969251f904SBorislav Petkov 
979251f904SBorislav Petkov 	rcu_assign_pointer(callchain_cpus_entries, entries);
989251f904SBorislav Petkov 
999251f904SBorislav Petkov 	return 0;
1009251f904SBorislav Petkov 
1019251f904SBorislav Petkov fail:
1029251f904SBorislav Petkov 	for_each_possible_cpu(cpu)
1039251f904SBorislav Petkov 		kfree(entries->cpu_entries[cpu]);
1049251f904SBorislav Petkov 	kfree(entries);
1059251f904SBorislav Petkov 
1069251f904SBorislav Petkov 	return -ENOMEM;
1079251f904SBorislav Petkov }
1089251f904SBorislav Petkov 
10997c79a38SArnaldo Carvalho de Melo int get_callchain_buffers(int event_max_stack)
1109251f904SBorislav Petkov {
1119251f904SBorislav Petkov 	int err = 0;
1129251f904SBorislav Petkov 	int count;
1139251f904SBorislav Petkov 
1149251f904SBorislav Petkov 	mutex_lock(&callchain_mutex);
1159251f904SBorislav Petkov 
1169251f904SBorislav Petkov 	count = atomic_inc_return(&nr_callchain_events);
1179251f904SBorislav Petkov 	if (WARN_ON_ONCE(count < 1)) {
1189251f904SBorislav Petkov 		err = -EINVAL;
1199251f904SBorislav Petkov 		goto exit;
1209251f904SBorislav Petkov 	}
1219251f904SBorislav Petkov 
12297c79a38SArnaldo Carvalho de Melo 	/*
12397c79a38SArnaldo Carvalho de Melo 	 * If requesting per event more than the global cap,
12497c79a38SArnaldo Carvalho de Melo 	 * return a different error to help userspace figure
12597c79a38SArnaldo Carvalho de Melo 	 * this out.
12697c79a38SArnaldo Carvalho de Melo 	 *
12797c79a38SArnaldo Carvalho de Melo 	 * And also do it here so that we have &callchain_mutex held.
12897c79a38SArnaldo Carvalho de Melo 	 */
1295af44ca5SJiri Olsa 	if (event_max_stack > sysctl_perf_event_max_stack) {
13097c79a38SArnaldo Carvalho de Melo 		err = -EOVERFLOW;
1319251f904SBorislav Petkov 		goto exit;
1329251f904SBorislav Petkov 	}
1339251f904SBorislav Petkov 
134bfb3d7b8SJiri Olsa 	if (count == 1)
1359251f904SBorislav Petkov 		err = alloc_callchain_buffers();
1369251f904SBorislav Petkov exit:
13790983b16SFrederic Weisbecker 	if (err)
13890983b16SFrederic Weisbecker 		atomic_dec(&nr_callchain_events);
1399251f904SBorislav Petkov 
140fc3b86d6SFrederic Weisbecker 	mutex_unlock(&callchain_mutex);
141fc3b86d6SFrederic Weisbecker 
1429251f904SBorislav Petkov 	return err;
1439251f904SBorislav Petkov }
1449251f904SBorislav Petkov 
1459251f904SBorislav Petkov void put_callchain_buffers(void)
1469251f904SBorislav Petkov {
1479251f904SBorislav Petkov 	if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) {
1489251f904SBorislav Petkov 		release_callchain_buffers();
1499251f904SBorislav Petkov 		mutex_unlock(&callchain_mutex);
1509251f904SBorislav Petkov 	}
1519251f904SBorislav Petkov }
1529251f904SBorislav Petkov 
1539251f904SBorislav Petkov static struct perf_callchain_entry *get_callchain_entry(int *rctx)
1549251f904SBorislav Petkov {
1559251f904SBorislav Petkov 	int cpu;
1569251f904SBorislav Petkov 	struct callchain_cpus_entries *entries;
1579251f904SBorislav Petkov 
1584a32fea9SChristoph Lameter 	*rctx = get_recursion_context(this_cpu_ptr(callchain_recursion));
1599251f904SBorislav Petkov 	if (*rctx == -1)
1609251f904SBorislav Petkov 		return NULL;
1619251f904SBorislav Petkov 
1629251f904SBorislav Petkov 	entries = rcu_dereference(callchain_cpus_entries);
1639251f904SBorislav Petkov 	if (!entries)
1649251f904SBorislav Petkov 		return NULL;
1659251f904SBorislav Petkov 
1669251f904SBorislav Petkov 	cpu = smp_processor_id();
1679251f904SBorislav Petkov 
168c5dfd78eSArnaldo Carvalho de Melo 	return (((void *)entries->cpu_entries[cpu]) +
169c5dfd78eSArnaldo Carvalho de Melo 		(*rctx * perf_callchain_entry__sizeof()));
1709251f904SBorislav Petkov }
1719251f904SBorislav Petkov 
1729251f904SBorislav Petkov static void
1739251f904SBorislav Petkov put_callchain_entry(int rctx)
1749251f904SBorislav Petkov {
1754a32fea9SChristoph Lameter 	put_recursion_context(this_cpu_ptr(callchain_recursion), rctx);
1769251f904SBorislav Petkov }
1779251f904SBorislav Petkov 
178e6dab5ffSAndrew Vagin struct perf_callchain_entry *
179568b329aSAlexei Starovoitov get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
180cfbcf468SArnaldo Carvalho de Melo 		   u32 max_stack, bool crosstask, bool add_mark)
181568b329aSAlexei Starovoitov {
182568b329aSAlexei Starovoitov 	struct perf_callchain_entry *entry;
183cfbcf468SArnaldo Carvalho de Melo 	struct perf_callchain_entry_ctx ctx;
184568b329aSAlexei Starovoitov 	int rctx;
185568b329aSAlexei Starovoitov 
1869251f904SBorislav Petkov 	entry = get_callchain_entry(&rctx);
1879251f904SBorislav Petkov 	if (rctx == -1)
1889251f904SBorislav Petkov 		return NULL;
1899251f904SBorislav Petkov 
1909251f904SBorislav Petkov 	if (!entry)
1919251f904SBorislav Petkov 		goto exit_put;
1929251f904SBorislav Petkov 
193cfbcf468SArnaldo Carvalho de Melo 	ctx.entry     = entry;
194cfbcf468SArnaldo Carvalho de Melo 	ctx.max_stack = max_stack;
1953b1fff08SArnaldo Carvalho de Melo 	ctx.nr	      = entry->nr = init_nr;
196c85b0334SArnaldo Carvalho de Melo 	ctx.contexts       = 0;
197c85b0334SArnaldo Carvalho de Melo 	ctx.contexts_maxed = false;
1989251f904SBorislav Petkov 
199d0775264SFrederic Weisbecker 	if (kernel && !user_mode(regs)) {
200568b329aSAlexei Starovoitov 		if (add_mark)
2013e4de4ecSArnaldo Carvalho de Melo 			perf_callchain_store_context(&ctx, PERF_CONTEXT_KERNEL);
202cfbcf468SArnaldo Carvalho de Melo 		perf_callchain_kernel(&ctx, regs);
203d0775264SFrederic Weisbecker 	}
204d0775264SFrederic Weisbecker 
205d0775264SFrederic Weisbecker 	if (user) {
206d0775264SFrederic Weisbecker 		if (!user_mode(regs)) {
2079251f904SBorislav Petkov 			if  (current->mm)
2089251f904SBorislav Petkov 				regs = task_pt_regs(current);
2099251f904SBorislav Petkov 			else
2109251f904SBorislav Petkov 				regs = NULL;
2119251f904SBorislav Petkov 		}
2129251f904SBorislav Petkov 
2139251f904SBorislav Petkov 		if (regs) {
21488b0193dSWill Deacon 			mm_segment_t fs;
21588b0193dSWill Deacon 
216568b329aSAlexei Starovoitov 			if (crosstask)
217e6dab5ffSAndrew Vagin 				goto exit_put;
218e6dab5ffSAndrew Vagin 
219568b329aSAlexei Starovoitov 			if (add_mark)
2203e4de4ecSArnaldo Carvalho de Melo 				perf_callchain_store_context(&ctx, PERF_CONTEXT_USER);
22188b0193dSWill Deacon 
22288b0193dSWill Deacon 			fs = get_fs();
22388b0193dSWill Deacon 			set_fs(USER_DS);
224cfbcf468SArnaldo Carvalho de Melo 			perf_callchain_user(&ctx, regs);
22588b0193dSWill Deacon 			set_fs(fs);
2269251f904SBorislav Petkov 		}
227d0775264SFrederic Weisbecker 	}
2289251f904SBorislav Petkov 
2299251f904SBorislav Petkov exit_put:
2309251f904SBorislav Petkov 	put_callchain_entry(rctx);
2319251f904SBorislav Petkov 
2329251f904SBorislav Petkov 	return entry;
2339251f904SBorislav Petkov }
234c5dfd78eSArnaldo Carvalho de Melo 
235c85b0334SArnaldo Carvalho de Melo /*
236c85b0334SArnaldo Carvalho de Melo  * Used for sysctl_perf_event_max_stack and
237c85b0334SArnaldo Carvalho de Melo  * sysctl_perf_event_max_contexts_per_stack.
238c85b0334SArnaldo Carvalho de Melo  */
239c5dfd78eSArnaldo Carvalho de Melo int perf_event_max_stack_handler(struct ctl_table *table, int write,
240c5dfd78eSArnaldo Carvalho de Melo 				 void __user *buffer, size_t *lenp, loff_t *ppos)
241c5dfd78eSArnaldo Carvalho de Melo {
242a831100aSArnaldo Carvalho de Melo 	int *value = table->data;
243a831100aSArnaldo Carvalho de Melo 	int new_value = *value, ret;
244c5dfd78eSArnaldo Carvalho de Melo 	struct ctl_table new_table = *table;
245c5dfd78eSArnaldo Carvalho de Melo 
246c5dfd78eSArnaldo Carvalho de Melo 	new_table.data = &new_value;
247c5dfd78eSArnaldo Carvalho de Melo 	ret = proc_dointvec_minmax(&new_table, write, buffer, lenp, ppos);
248c5dfd78eSArnaldo Carvalho de Melo 	if (ret || !write)
249c5dfd78eSArnaldo Carvalho de Melo 		return ret;
250c5dfd78eSArnaldo Carvalho de Melo 
251c5dfd78eSArnaldo Carvalho de Melo 	mutex_lock(&callchain_mutex);
252c5dfd78eSArnaldo Carvalho de Melo 	if (atomic_read(&nr_callchain_events))
253c5dfd78eSArnaldo Carvalho de Melo 		ret = -EBUSY;
254c5dfd78eSArnaldo Carvalho de Melo 	else
255a831100aSArnaldo Carvalho de Melo 		*value = new_value;
256c5dfd78eSArnaldo Carvalho de Melo 
257c5dfd78eSArnaldo Carvalho de Melo 	mutex_unlock(&callchain_mutex);
258c5dfd78eSArnaldo Carvalho de Melo 
259c5dfd78eSArnaldo Carvalho de Melo 	return ret;
260c5dfd78eSArnaldo Carvalho de Melo }
261