1 /* 2 * Performance events callchain code, extracted from core.c: 3 * 4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> 5 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar 6 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra 7 * Copyright � 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 8 * 9 * For licensing details see kernel-base/COPYING 10 */ 11 12 #include <linux/perf_event.h> 13 #include <linux/slab.h> 14 #include <linux/sched/task_stack.h> 15 16 #include "internal.h" 17 18 struct callchain_cpus_entries { 19 struct rcu_head rcu_head; 20 struct perf_callchain_entry *cpu_entries[0]; 21 }; 22 23 int sysctl_perf_event_max_stack __read_mostly = PERF_MAX_STACK_DEPTH; 24 int sysctl_perf_event_max_contexts_per_stack __read_mostly = PERF_MAX_CONTEXTS_PER_STACK; 25 26 static inline size_t perf_callchain_entry__sizeof(void) 27 { 28 return (sizeof(struct perf_callchain_entry) + 29 sizeof(__u64) * (sysctl_perf_event_max_stack + 30 sysctl_perf_event_max_contexts_per_stack)); 31 } 32 33 static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]); 34 static atomic_t nr_callchain_events; 35 static DEFINE_MUTEX(callchain_mutex); 36 static struct callchain_cpus_entries *callchain_cpus_entries; 37 38 39 __weak void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, 40 struct pt_regs *regs) 41 { 42 } 43 44 __weak void perf_callchain_user(struct perf_callchain_entry_ctx *entry, 45 struct pt_regs *regs) 46 { 47 } 48 49 static void release_callchain_buffers_rcu(struct rcu_head *head) 50 { 51 struct callchain_cpus_entries *entries; 52 int cpu; 53 54 entries = container_of(head, struct callchain_cpus_entries, rcu_head); 55 56 for_each_possible_cpu(cpu) 57 kfree(entries->cpu_entries[cpu]); 58 59 kfree(entries); 60 } 61 62 static void release_callchain_buffers(void) 63 { 64 struct callchain_cpus_entries *entries; 65 66 entries = callchain_cpus_entries; 67 RCU_INIT_POINTER(callchain_cpus_entries, NULL); 68 call_rcu(&entries->rcu_head, release_callchain_buffers_rcu); 69 } 70 71 static int alloc_callchain_buffers(void) 72 { 73 int cpu; 74 int size; 75 struct callchain_cpus_entries *entries; 76 77 /* 78 * We can't use the percpu allocation API for data that can be 79 * accessed from NMI. Use a temporary manual per cpu allocation 80 * until that gets sorted out. 81 */ 82 size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]); 83 84 entries = kzalloc(size, GFP_KERNEL); 85 if (!entries) 86 return -ENOMEM; 87 88 size = perf_callchain_entry__sizeof() * PERF_NR_CONTEXTS; 89 90 for_each_possible_cpu(cpu) { 91 entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL, 92 cpu_to_node(cpu)); 93 if (!entries->cpu_entries[cpu]) 94 goto fail; 95 } 96 97 rcu_assign_pointer(callchain_cpus_entries, entries); 98 99 return 0; 100 101 fail: 102 for_each_possible_cpu(cpu) 103 kfree(entries->cpu_entries[cpu]); 104 kfree(entries); 105 106 return -ENOMEM; 107 } 108 109 int get_callchain_buffers(int event_max_stack) 110 { 111 int err = 0; 112 int count; 113 114 mutex_lock(&callchain_mutex); 115 116 count = atomic_inc_return(&nr_callchain_events); 117 if (WARN_ON_ONCE(count < 1)) { 118 err = -EINVAL; 119 goto exit; 120 } 121 122 if (count > 1) { 123 /* If the allocation failed, give up */ 124 if (!callchain_cpus_entries) 125 err = -ENOMEM; 126 /* 127 * If requesting per event more than the global cap, 128 * return a different error to help userspace figure 129 * this out. 130 * 131 * And also do it here so that we have &callchain_mutex held. 132 */ 133 if (event_max_stack > sysctl_perf_event_max_stack) 134 err = -EOVERFLOW; 135 goto exit; 136 } 137 138 err = alloc_callchain_buffers(); 139 exit: 140 if (err) 141 atomic_dec(&nr_callchain_events); 142 143 mutex_unlock(&callchain_mutex); 144 145 return err; 146 } 147 148 void put_callchain_buffers(void) 149 { 150 if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) { 151 release_callchain_buffers(); 152 mutex_unlock(&callchain_mutex); 153 } 154 } 155 156 static struct perf_callchain_entry *get_callchain_entry(int *rctx) 157 { 158 int cpu; 159 struct callchain_cpus_entries *entries; 160 161 *rctx = get_recursion_context(this_cpu_ptr(callchain_recursion)); 162 if (*rctx == -1) 163 return NULL; 164 165 entries = rcu_dereference(callchain_cpus_entries); 166 if (!entries) 167 return NULL; 168 169 cpu = smp_processor_id(); 170 171 return (((void *)entries->cpu_entries[cpu]) + 172 (*rctx * perf_callchain_entry__sizeof())); 173 } 174 175 static void 176 put_callchain_entry(int rctx) 177 { 178 put_recursion_context(this_cpu_ptr(callchain_recursion), rctx); 179 } 180 181 struct perf_callchain_entry * 182 perf_callchain(struct perf_event *event, struct pt_regs *regs) 183 { 184 bool kernel = !event->attr.exclude_callchain_kernel; 185 bool user = !event->attr.exclude_callchain_user; 186 /* Disallow cross-task user callchains. */ 187 bool crosstask = event->ctx->task && event->ctx->task != current; 188 const u32 max_stack = event->attr.sample_max_stack; 189 190 if (!kernel && !user) 191 return NULL; 192 193 return get_perf_callchain(regs, 0, kernel, user, max_stack, crosstask, true); 194 } 195 196 struct perf_callchain_entry * 197 get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user, 198 u32 max_stack, bool crosstask, bool add_mark) 199 { 200 struct perf_callchain_entry *entry; 201 struct perf_callchain_entry_ctx ctx; 202 int rctx; 203 204 entry = get_callchain_entry(&rctx); 205 if (rctx == -1) 206 return NULL; 207 208 if (!entry) 209 goto exit_put; 210 211 ctx.entry = entry; 212 ctx.max_stack = max_stack; 213 ctx.nr = entry->nr = init_nr; 214 ctx.contexts = 0; 215 ctx.contexts_maxed = false; 216 217 if (kernel && !user_mode(regs)) { 218 if (add_mark) 219 perf_callchain_store_context(&ctx, PERF_CONTEXT_KERNEL); 220 perf_callchain_kernel(&ctx, regs); 221 } 222 223 if (user) { 224 if (!user_mode(regs)) { 225 if (current->mm) 226 regs = task_pt_regs(current); 227 else 228 regs = NULL; 229 } 230 231 if (regs) { 232 mm_segment_t fs; 233 234 if (crosstask) 235 goto exit_put; 236 237 if (add_mark) 238 perf_callchain_store_context(&ctx, PERF_CONTEXT_USER); 239 240 fs = get_fs(); 241 set_fs(USER_DS); 242 perf_callchain_user(&ctx, regs); 243 set_fs(fs); 244 } 245 } 246 247 exit_put: 248 put_callchain_entry(rctx); 249 250 return entry; 251 } 252 253 /* 254 * Used for sysctl_perf_event_max_stack and 255 * sysctl_perf_event_max_contexts_per_stack. 256 */ 257 int perf_event_max_stack_handler(struct ctl_table *table, int write, 258 void __user *buffer, size_t *lenp, loff_t *ppos) 259 { 260 int *value = table->data; 261 int new_value = *value, ret; 262 struct ctl_table new_table = *table; 263 264 new_table.data = &new_value; 265 ret = proc_dointvec_minmax(&new_table, write, buffer, lenp, ppos); 266 if (ret || !write) 267 return ret; 268 269 mutex_lock(&callchain_mutex); 270 if (atomic_read(&nr_callchain_events)) 271 ret = -EBUSY; 272 else 273 *value = new_value; 274 275 mutex_unlock(&callchain_mutex); 276 277 return ret; 278 } 279