1 /*
2  * trace event based perf event profiling/tracing
3  *
4  * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
5  * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
6  */
7 
8 #include <linux/module.h>
9 #include <linux/kprobes.h>
10 #include "trace.h"
11 
12 static char __percpu *perf_trace_buf[PERF_NR_CONTEXTS];
13 
14 /*
15  * Force it to be aligned to unsigned long to avoid misaligned accesses
16  * suprises
17  */
18 typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
19 	perf_trace_t;
20 
21 /* Count the events in use (per event id, not per instance) */
22 static int	total_ref_count;
23 
24 static int perf_trace_event_perm(struct ftrace_event_call *tp_event,
25 				 struct perf_event *p_event)
26 {
27 	if (tp_event->perf_perm) {
28 		int ret = tp_event->perf_perm(tp_event, p_event);
29 		if (ret)
30 			return ret;
31 	}
32 
33 	/* The ftrace function trace is allowed only for root. */
34 	if (ftrace_event_is_function(tp_event)) {
35 		if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
36 			return -EPERM;
37 
38 		/*
39 		 * We don't allow user space callchains for  function trace
40 		 * event, due to issues with page faults while tracing page
41 		 * fault handler and its overall trickiness nature.
42 		 */
43 		if (!p_event->attr.exclude_callchain_user)
44 			return -EINVAL;
45 
46 		/*
47 		 * Same reason to disable user stack dump as for user space
48 		 * callchains above.
49 		 */
50 		if (p_event->attr.sample_type & PERF_SAMPLE_STACK_USER)
51 			return -EINVAL;
52 	}
53 
54 	/* No tracing, just counting, so no obvious leak */
55 	if (!(p_event->attr.sample_type & PERF_SAMPLE_RAW))
56 		return 0;
57 
58 	/* Some events are ok to be traced by non-root users... */
59 	if (p_event->attach_state == PERF_ATTACH_TASK) {
60 		if (tp_event->flags & TRACE_EVENT_FL_CAP_ANY)
61 			return 0;
62 	}
63 
64 	/*
65 	 * ...otherwise raw tracepoint data can be a severe data leak,
66 	 * only allow root to have these.
67 	 */
68 	if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
69 		return -EPERM;
70 
71 	return 0;
72 }
73 
74 static int perf_trace_event_reg(struct ftrace_event_call *tp_event,
75 				struct perf_event *p_event)
76 {
77 	struct hlist_head __percpu *list;
78 	int ret = -ENOMEM;
79 	int cpu;
80 
81 	p_event->tp_event = tp_event;
82 	if (tp_event->perf_refcount++ > 0)
83 		return 0;
84 
85 	list = alloc_percpu(struct hlist_head);
86 	if (!list)
87 		goto fail;
88 
89 	for_each_possible_cpu(cpu)
90 		INIT_HLIST_HEAD(per_cpu_ptr(list, cpu));
91 
92 	tp_event->perf_events = list;
93 
94 	if (!total_ref_count) {
95 		char __percpu *buf;
96 		int i;
97 
98 		for (i = 0; i < PERF_NR_CONTEXTS; i++) {
99 			buf = (char __percpu *)alloc_percpu(perf_trace_t);
100 			if (!buf)
101 				goto fail;
102 
103 			perf_trace_buf[i] = buf;
104 		}
105 	}
106 
107 	ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER, NULL);
108 	if (ret)
109 		goto fail;
110 
111 	total_ref_count++;
112 	return 0;
113 
114 fail:
115 	if (!total_ref_count) {
116 		int i;
117 
118 		for (i = 0; i < PERF_NR_CONTEXTS; i++) {
119 			free_percpu(perf_trace_buf[i]);
120 			perf_trace_buf[i] = NULL;
121 		}
122 	}
123 
124 	if (!--tp_event->perf_refcount) {
125 		free_percpu(tp_event->perf_events);
126 		tp_event->perf_events = NULL;
127 	}
128 
129 	return ret;
130 }
131 
132 static void perf_trace_event_unreg(struct perf_event *p_event)
133 {
134 	struct ftrace_event_call *tp_event = p_event->tp_event;
135 	int i;
136 
137 	if (--tp_event->perf_refcount > 0)
138 		goto out;
139 
140 	tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER, NULL);
141 
142 	/*
143 	 * Ensure our callback won't be called anymore. The buffers
144 	 * will be freed after that.
145 	 */
146 	tracepoint_synchronize_unregister();
147 
148 	free_percpu(tp_event->perf_events);
149 	tp_event->perf_events = NULL;
150 
151 	if (!--total_ref_count) {
152 		for (i = 0; i < PERF_NR_CONTEXTS; i++) {
153 			free_percpu(perf_trace_buf[i]);
154 			perf_trace_buf[i] = NULL;
155 		}
156 	}
157 out:
158 	module_put(tp_event->mod);
159 }
160 
161 static int perf_trace_event_open(struct perf_event *p_event)
162 {
163 	struct ftrace_event_call *tp_event = p_event->tp_event;
164 	return tp_event->class->reg(tp_event, TRACE_REG_PERF_OPEN, p_event);
165 }
166 
167 static void perf_trace_event_close(struct perf_event *p_event)
168 {
169 	struct ftrace_event_call *tp_event = p_event->tp_event;
170 	tp_event->class->reg(tp_event, TRACE_REG_PERF_CLOSE, p_event);
171 }
172 
173 static int perf_trace_event_init(struct ftrace_event_call *tp_event,
174 				 struct perf_event *p_event)
175 {
176 	int ret;
177 
178 	ret = perf_trace_event_perm(tp_event, p_event);
179 	if (ret)
180 		return ret;
181 
182 	ret = perf_trace_event_reg(tp_event, p_event);
183 	if (ret)
184 		return ret;
185 
186 	ret = perf_trace_event_open(p_event);
187 	if (ret) {
188 		perf_trace_event_unreg(p_event);
189 		return ret;
190 	}
191 
192 	return 0;
193 }
194 
195 int perf_trace_init(struct perf_event *p_event)
196 {
197 	struct ftrace_event_call *tp_event;
198 	u64 event_id = p_event->attr.config;
199 	int ret = -EINVAL;
200 
201 	mutex_lock(&event_mutex);
202 	list_for_each_entry(tp_event, &ftrace_events, list) {
203 		if (tp_event->event.type == event_id &&
204 		    tp_event->class && tp_event->class->reg &&
205 		    try_module_get(tp_event->mod)) {
206 			ret = perf_trace_event_init(tp_event, p_event);
207 			if (ret)
208 				module_put(tp_event->mod);
209 			break;
210 		}
211 	}
212 	mutex_unlock(&event_mutex);
213 
214 	return ret;
215 }
216 
217 void perf_trace_destroy(struct perf_event *p_event)
218 {
219 	mutex_lock(&event_mutex);
220 	perf_trace_event_close(p_event);
221 	perf_trace_event_unreg(p_event);
222 	mutex_unlock(&event_mutex);
223 }
224 
225 int perf_trace_add(struct perf_event *p_event, int flags)
226 {
227 	struct ftrace_event_call *tp_event = p_event->tp_event;
228 	struct hlist_head __percpu *pcpu_list;
229 	struct hlist_head *list;
230 
231 	pcpu_list = tp_event->perf_events;
232 	if (WARN_ON_ONCE(!pcpu_list))
233 		return -EINVAL;
234 
235 	if (!(flags & PERF_EF_START))
236 		p_event->hw.state = PERF_HES_STOPPED;
237 
238 	list = this_cpu_ptr(pcpu_list);
239 	hlist_add_head_rcu(&p_event->hlist_entry, list);
240 
241 	return tp_event->class->reg(tp_event, TRACE_REG_PERF_ADD, p_event);
242 }
243 
244 void perf_trace_del(struct perf_event *p_event, int flags)
245 {
246 	struct ftrace_event_call *tp_event = p_event->tp_event;
247 	hlist_del_rcu(&p_event->hlist_entry);
248 	tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event);
249 }
250 
251 void *perf_trace_buf_prepare(int size, unsigned short type,
252 			     struct pt_regs *regs, int *rctxp)
253 {
254 	struct trace_entry *entry;
255 	unsigned long flags;
256 	char *raw_data;
257 	int pc;
258 
259 	BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
260 
261 	if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
262 			"perf buffer not large enough"))
263 		return NULL;
264 
265 	pc = preempt_count();
266 
267 	*rctxp = perf_swevent_get_recursion_context();
268 	if (*rctxp < 0)
269 		return NULL;
270 
271 	raw_data = this_cpu_ptr(perf_trace_buf[*rctxp]);
272 
273 	/* zero the dead bytes from align to not leak stack to user */
274 	memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
275 
276 	entry = (struct trace_entry *)raw_data;
277 	local_save_flags(flags);
278 	tracing_generic_entry_update(entry, flags, pc);
279 	entry->type = type;
280 
281 	return raw_data;
282 }
283 EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);
284 NOKPROBE_SYMBOL(perf_trace_buf_prepare);
285 
286 #ifdef CONFIG_FUNCTION_TRACER
287 static void
288 perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
289 			  struct ftrace_ops *ops, struct pt_regs *pt_regs)
290 {
291 	struct ftrace_entry *entry;
292 	struct hlist_head *head;
293 	struct pt_regs regs;
294 	int rctx;
295 
296 	head = this_cpu_ptr(event_function.perf_events);
297 	if (hlist_empty(head))
298 		return;
299 
300 #define ENTRY_SIZE (ALIGN(sizeof(struct ftrace_entry) + sizeof(u32), \
301 		    sizeof(u64)) - sizeof(u32))
302 
303 	BUILD_BUG_ON(ENTRY_SIZE > PERF_MAX_TRACE_SIZE);
304 
305 	perf_fetch_caller_regs(&regs);
306 
307 	entry = perf_trace_buf_prepare(ENTRY_SIZE, TRACE_FN, NULL, &rctx);
308 	if (!entry)
309 		return;
310 
311 	entry->ip = ip;
312 	entry->parent_ip = parent_ip;
313 	perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, 0,
314 			      1, &regs, head, NULL);
315 
316 #undef ENTRY_SIZE
317 }
318 
319 static int perf_ftrace_function_register(struct perf_event *event)
320 {
321 	struct ftrace_ops *ops = &event->ftrace_ops;
322 
323 	ops->flags |= FTRACE_OPS_FL_CONTROL;
324 	ops->func = perf_ftrace_function_call;
325 	return register_ftrace_function(ops);
326 }
327 
328 static int perf_ftrace_function_unregister(struct perf_event *event)
329 {
330 	struct ftrace_ops *ops = &event->ftrace_ops;
331 	int ret = unregister_ftrace_function(ops);
332 	ftrace_free_filter(ops);
333 	return ret;
334 }
335 
336 static void perf_ftrace_function_enable(struct perf_event *event)
337 {
338 	ftrace_function_local_enable(&event->ftrace_ops);
339 }
340 
341 static void perf_ftrace_function_disable(struct perf_event *event)
342 {
343 	ftrace_function_local_disable(&event->ftrace_ops);
344 }
345 
346 int perf_ftrace_event_register(struct ftrace_event_call *call,
347 			       enum trace_reg type, void *data)
348 {
349 	switch (type) {
350 	case TRACE_REG_REGISTER:
351 	case TRACE_REG_UNREGISTER:
352 		break;
353 	case TRACE_REG_PERF_REGISTER:
354 	case TRACE_REG_PERF_UNREGISTER:
355 		return 0;
356 	case TRACE_REG_PERF_OPEN:
357 		return perf_ftrace_function_register(data);
358 	case TRACE_REG_PERF_CLOSE:
359 		return perf_ftrace_function_unregister(data);
360 	case TRACE_REG_PERF_ADD:
361 		perf_ftrace_function_enable(data);
362 		return 0;
363 	case TRACE_REG_PERF_DEL:
364 		perf_ftrace_function_disable(data);
365 		return 0;
366 	}
367 
368 	return -EINVAL;
369 }
370 #endif /* CONFIG_FUNCTION_TRACER */
371