1 /* 2 * Performance events callchain code, extracted from core.c: 3 * 4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> 5 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar 6 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra 7 * Copyright � 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 8 * 9 * For licensing details see kernel-base/COPYING 10 */ 11 12 #include <linux/perf_event.h> 13 #include <linux/slab.h> 14 #include "internal.h" 15 16 struct callchain_cpus_entries { 17 struct rcu_head rcu_head; 18 struct perf_callchain_entry *cpu_entries[0]; 19 }; 20 21 int sysctl_perf_event_max_stack __read_mostly = PERF_MAX_STACK_DEPTH; 22 int sysctl_perf_event_max_contexts_per_stack __read_mostly = PERF_MAX_CONTEXTS_PER_STACK; 23 24 static inline size_t perf_callchain_entry__sizeof(void) 25 { 26 return (sizeof(struct perf_callchain_entry) + 27 sizeof(__u64) * (sysctl_perf_event_max_stack + 28 sysctl_perf_event_max_contexts_per_stack)); 29 } 30 31 static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]); 32 static atomic_t nr_callchain_events; 33 static DEFINE_MUTEX(callchain_mutex); 34 static struct callchain_cpus_entries *callchain_cpus_entries; 35 36 37 __weak void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, 38 struct pt_regs *regs) 39 { 40 } 41 42 __weak void perf_callchain_user(struct perf_callchain_entry_ctx *entry, 43 struct pt_regs *regs) 44 { 45 } 46 47 static void release_callchain_buffers_rcu(struct rcu_head *head) 48 { 49 struct callchain_cpus_entries *entries; 50 int cpu; 51 52 entries = container_of(head, struct callchain_cpus_entries, rcu_head); 53 54 for_each_possible_cpu(cpu) 55 kfree(entries->cpu_entries[cpu]); 56 57 kfree(entries); 58 } 59 60 static void release_callchain_buffers(void) 61 { 62 struct callchain_cpus_entries *entries; 63 64 entries = callchain_cpus_entries; 65 RCU_INIT_POINTER(callchain_cpus_entries, NULL); 66 call_rcu(&entries->rcu_head, release_callchain_buffers_rcu); 67 } 68 69 static int alloc_callchain_buffers(void) 70 { 71 int cpu; 72 int size; 73 struct callchain_cpus_entries *entries; 74 75 /* 76 * We can't use the percpu allocation API for data that can be 77 * accessed from NMI. Use a temporary manual per cpu allocation 78 * until that gets sorted out. 79 */ 80 size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]); 81 82 entries = kzalloc(size, GFP_KERNEL); 83 if (!entries) 84 return -ENOMEM; 85 86 size = perf_callchain_entry__sizeof() * PERF_NR_CONTEXTS; 87 88 for_each_possible_cpu(cpu) { 89 entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL, 90 cpu_to_node(cpu)); 91 if (!entries->cpu_entries[cpu]) 92 goto fail; 93 } 94 95 rcu_assign_pointer(callchain_cpus_entries, entries); 96 97 return 0; 98 99 fail: 100 for_each_possible_cpu(cpu) 101 kfree(entries->cpu_entries[cpu]); 102 kfree(entries); 103 104 return -ENOMEM; 105 } 106 107 int get_callchain_buffers(void) 108 { 109 int err = 0; 110 int count; 111 112 mutex_lock(&callchain_mutex); 113 114 count = atomic_inc_return(&nr_callchain_events); 115 if (WARN_ON_ONCE(count < 1)) { 116 err = -EINVAL; 117 goto exit; 118 } 119 120 if (count > 1) { 121 /* If the allocation failed, give up */ 122 if (!callchain_cpus_entries) 123 err = -ENOMEM; 124 goto exit; 125 } 126 127 err = alloc_callchain_buffers(); 128 exit: 129 if (err) 130 atomic_dec(&nr_callchain_events); 131 132 mutex_unlock(&callchain_mutex); 133 134 return err; 135 } 136 137 void put_callchain_buffers(void) 138 { 139 if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) { 140 release_callchain_buffers(); 141 mutex_unlock(&callchain_mutex); 142 } 143 } 144 145 static struct perf_callchain_entry *get_callchain_entry(int *rctx) 146 { 147 int cpu; 148 struct callchain_cpus_entries *entries; 149 150 *rctx = get_recursion_context(this_cpu_ptr(callchain_recursion)); 151 if (*rctx == -1) 152 return NULL; 153 154 entries = rcu_dereference(callchain_cpus_entries); 155 if (!entries) 156 return NULL; 157 158 cpu = smp_processor_id(); 159 160 return (((void *)entries->cpu_entries[cpu]) + 161 (*rctx * perf_callchain_entry__sizeof())); 162 } 163 164 static void 165 put_callchain_entry(int rctx) 166 { 167 put_recursion_context(this_cpu_ptr(callchain_recursion), rctx); 168 } 169 170 struct perf_callchain_entry * 171 perf_callchain(struct perf_event *event, struct pt_regs *regs) 172 { 173 bool kernel = !event->attr.exclude_callchain_kernel; 174 bool user = !event->attr.exclude_callchain_user; 175 /* Disallow cross-task user callchains. */ 176 bool crosstask = event->ctx->task && event->ctx->task != current; 177 178 if (!kernel && !user) 179 return NULL; 180 181 return get_perf_callchain(regs, 0, kernel, user, sysctl_perf_event_max_stack, crosstask, true); 182 } 183 184 struct perf_callchain_entry * 185 get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user, 186 u32 max_stack, bool crosstask, bool add_mark) 187 { 188 struct perf_callchain_entry *entry; 189 struct perf_callchain_entry_ctx ctx; 190 int rctx; 191 192 entry = get_callchain_entry(&rctx); 193 if (rctx == -1) 194 return NULL; 195 196 if (!entry) 197 goto exit_put; 198 199 ctx.entry = entry; 200 ctx.max_stack = max_stack; 201 ctx.nr = entry->nr = init_nr; 202 ctx.contexts = 0; 203 ctx.contexts_maxed = false; 204 205 if (kernel && !user_mode(regs)) { 206 if (add_mark) 207 perf_callchain_store_context(&ctx, PERF_CONTEXT_KERNEL); 208 perf_callchain_kernel(&ctx, regs); 209 } 210 211 if (user) { 212 if (!user_mode(regs)) { 213 if (current->mm) 214 regs = task_pt_regs(current); 215 else 216 regs = NULL; 217 } 218 219 if (regs) { 220 if (crosstask) 221 goto exit_put; 222 223 if (add_mark) 224 perf_callchain_store_context(&ctx, PERF_CONTEXT_USER); 225 perf_callchain_user(&ctx, regs); 226 } 227 } 228 229 exit_put: 230 put_callchain_entry(rctx); 231 232 return entry; 233 } 234 235 /* 236 * Used for sysctl_perf_event_max_stack and 237 * sysctl_perf_event_max_contexts_per_stack. 238 */ 239 int perf_event_max_stack_handler(struct ctl_table *table, int write, 240 void __user *buffer, size_t *lenp, loff_t *ppos) 241 { 242 int *value = table->data; 243 int new_value = *value, ret; 244 struct ctl_table new_table = *table; 245 246 new_table.data = &new_value; 247 ret = proc_dointvec_minmax(&new_table, write, buffer, lenp, ppos); 248 if (ret || !write) 249 return ret; 250 251 mutex_lock(&callchain_mutex); 252 if (atomic_read(&nr_callchain_events)) 253 ret = -EBUSY; 254 else 255 *value = new_value; 256 257 mutex_unlock(&callchain_mutex); 258 259 return ret; 260 } 261