1 /*
2  * trace event based perf event profiling/tracing
3  *
4  * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
5  * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
6  */
7 
8 #include <linux/module.h>
9 #include <linux/kprobes.h>
10 #include "trace.h"
11 
12 static char __percpu *perf_trace_buf[PERF_NR_CONTEXTS];
13 
14 /*
15  * Force it to be aligned to unsigned long to avoid misaligned accesses
16  * suprises
17  */
18 typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
19 	perf_trace_t;
20 
21 /* Count the events in use (per event id, not per instance) */
22 static int	total_ref_count;
23 
24 static int perf_trace_event_perm(struct ftrace_event_call *tp_event,
25 				 struct perf_event *p_event)
26 {
27 	if (tp_event->perf_perm) {
28 		int ret = tp_event->perf_perm(tp_event, p_event);
29 		if (ret)
30 			return ret;
31 	}
32 
33 	/* The ftrace function trace is allowed only for root. */
34 	if (ftrace_event_is_function(tp_event)) {
35 		if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
36 			return -EPERM;
37 
38 		/*
39 		 * We don't allow user space callchains for  function trace
40 		 * event, due to issues with page faults while tracing page
41 		 * fault handler and its overall trickiness nature.
42 		 */
43 		if (!p_event->attr.exclude_callchain_user)
44 			return -EINVAL;
45 	}
46 
47 	/* No tracing, just counting, so no obvious leak */
48 	if (!(p_event->attr.sample_type & PERF_SAMPLE_RAW))
49 		return 0;
50 
51 	/* Some events are ok to be traced by non-root users... */
52 	if (p_event->attach_state == PERF_ATTACH_TASK) {
53 		if (tp_event->flags & TRACE_EVENT_FL_CAP_ANY)
54 			return 0;
55 	}
56 
57 	/*
58 	 * ...otherwise raw tracepoint data can be a severe data leak,
59 	 * only allow root to have these.
60 	 */
61 	if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
62 		return -EPERM;
63 
64 	return 0;
65 }
66 
67 static int perf_trace_event_reg(struct ftrace_event_call *tp_event,
68 				struct perf_event *p_event)
69 {
70 	struct hlist_head __percpu *list;
71 	int ret = -ENOMEM;
72 	int cpu;
73 
74 	p_event->tp_event = tp_event;
75 	if (tp_event->perf_refcount++ > 0)
76 		return 0;
77 
78 	list = alloc_percpu(struct hlist_head);
79 	if (!list)
80 		goto fail;
81 
82 	for_each_possible_cpu(cpu)
83 		INIT_HLIST_HEAD(per_cpu_ptr(list, cpu));
84 
85 	tp_event->perf_events = list;
86 
87 	if (!total_ref_count) {
88 		char __percpu *buf;
89 		int i;
90 
91 		for (i = 0; i < PERF_NR_CONTEXTS; i++) {
92 			buf = (char __percpu *)alloc_percpu(perf_trace_t);
93 			if (!buf)
94 				goto fail;
95 
96 			perf_trace_buf[i] = buf;
97 		}
98 	}
99 
100 	ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER, NULL);
101 	if (ret)
102 		goto fail;
103 
104 	total_ref_count++;
105 	return 0;
106 
107 fail:
108 	if (!total_ref_count) {
109 		int i;
110 
111 		for (i = 0; i < PERF_NR_CONTEXTS; i++) {
112 			free_percpu(perf_trace_buf[i]);
113 			perf_trace_buf[i] = NULL;
114 		}
115 	}
116 
117 	if (!--tp_event->perf_refcount) {
118 		free_percpu(tp_event->perf_events);
119 		tp_event->perf_events = NULL;
120 	}
121 
122 	return ret;
123 }
124 
125 static void perf_trace_event_unreg(struct perf_event *p_event)
126 {
127 	struct ftrace_event_call *tp_event = p_event->tp_event;
128 	int i;
129 
130 	if (--tp_event->perf_refcount > 0)
131 		goto out;
132 
133 	tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER, NULL);
134 
135 	/*
136 	 * Ensure our callback won't be called anymore. The buffers
137 	 * will be freed after that.
138 	 */
139 	tracepoint_synchronize_unregister();
140 
141 	free_percpu(tp_event->perf_events);
142 	tp_event->perf_events = NULL;
143 
144 	if (!--total_ref_count) {
145 		for (i = 0; i < PERF_NR_CONTEXTS; i++) {
146 			free_percpu(perf_trace_buf[i]);
147 			perf_trace_buf[i] = NULL;
148 		}
149 	}
150 out:
151 	module_put(tp_event->mod);
152 }
153 
154 static int perf_trace_event_open(struct perf_event *p_event)
155 {
156 	struct ftrace_event_call *tp_event = p_event->tp_event;
157 	return tp_event->class->reg(tp_event, TRACE_REG_PERF_OPEN, p_event);
158 }
159 
160 static void perf_trace_event_close(struct perf_event *p_event)
161 {
162 	struct ftrace_event_call *tp_event = p_event->tp_event;
163 	tp_event->class->reg(tp_event, TRACE_REG_PERF_CLOSE, p_event);
164 }
165 
166 static int perf_trace_event_init(struct ftrace_event_call *tp_event,
167 				 struct perf_event *p_event)
168 {
169 	int ret;
170 
171 	ret = perf_trace_event_perm(tp_event, p_event);
172 	if (ret)
173 		return ret;
174 
175 	ret = perf_trace_event_reg(tp_event, p_event);
176 	if (ret)
177 		return ret;
178 
179 	ret = perf_trace_event_open(p_event);
180 	if (ret) {
181 		perf_trace_event_unreg(p_event);
182 		return ret;
183 	}
184 
185 	return 0;
186 }
187 
188 int perf_trace_init(struct perf_event *p_event)
189 {
190 	struct ftrace_event_call *tp_event;
191 	u64 event_id = p_event->attr.config;
192 	int ret = -EINVAL;
193 
194 	mutex_lock(&event_mutex);
195 	list_for_each_entry(tp_event, &ftrace_events, list) {
196 		if (tp_event->event.type == event_id &&
197 		    tp_event->class && tp_event->class->reg &&
198 		    try_module_get(tp_event->mod)) {
199 			ret = perf_trace_event_init(tp_event, p_event);
200 			if (ret)
201 				module_put(tp_event->mod);
202 			break;
203 		}
204 	}
205 	mutex_unlock(&event_mutex);
206 
207 	return ret;
208 }
209 
210 void perf_trace_destroy(struct perf_event *p_event)
211 {
212 	mutex_lock(&event_mutex);
213 	perf_trace_event_close(p_event);
214 	perf_trace_event_unreg(p_event);
215 	mutex_unlock(&event_mutex);
216 }
217 
218 int perf_trace_add(struct perf_event *p_event, int flags)
219 {
220 	struct ftrace_event_call *tp_event = p_event->tp_event;
221 	struct hlist_head __percpu *pcpu_list;
222 	struct hlist_head *list;
223 
224 	pcpu_list = tp_event->perf_events;
225 	if (WARN_ON_ONCE(!pcpu_list))
226 		return -EINVAL;
227 
228 	if (!(flags & PERF_EF_START))
229 		p_event->hw.state = PERF_HES_STOPPED;
230 
231 	list = this_cpu_ptr(pcpu_list);
232 	hlist_add_head_rcu(&p_event->hlist_entry, list);
233 
234 	return tp_event->class->reg(tp_event, TRACE_REG_PERF_ADD, p_event);
235 }
236 
237 void perf_trace_del(struct perf_event *p_event, int flags)
238 {
239 	struct ftrace_event_call *tp_event = p_event->tp_event;
240 	hlist_del_rcu(&p_event->hlist_entry);
241 	tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event);
242 }
243 
244 __kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
245 				       struct pt_regs *regs, int *rctxp)
246 {
247 	struct trace_entry *entry;
248 	unsigned long flags;
249 	char *raw_data;
250 	int pc;
251 
252 	BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
253 
254 	if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
255 			"perf buffer not large enough"))
256 		return NULL;
257 
258 	pc = preempt_count();
259 
260 	*rctxp = perf_swevent_get_recursion_context();
261 	if (*rctxp < 0)
262 		return NULL;
263 
264 	raw_data = this_cpu_ptr(perf_trace_buf[*rctxp]);
265 
266 	/* zero the dead bytes from align to not leak stack to user */
267 	memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
268 
269 	entry = (struct trace_entry *)raw_data;
270 	local_save_flags(flags);
271 	tracing_generic_entry_update(entry, flags, pc);
272 	entry->type = type;
273 
274 	return raw_data;
275 }
276 EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);
277 
278 #ifdef CONFIG_FUNCTION_TRACER
279 static void
280 perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
281 			  struct ftrace_ops *ops, struct pt_regs *pt_regs)
282 {
283 	struct ftrace_entry *entry;
284 	struct hlist_head *head;
285 	struct pt_regs regs;
286 	int rctx;
287 
288 	head = this_cpu_ptr(event_function.perf_events);
289 	if (hlist_empty(head))
290 		return;
291 
292 #define ENTRY_SIZE (ALIGN(sizeof(struct ftrace_entry) + sizeof(u32), \
293 		    sizeof(u64)) - sizeof(u32))
294 
295 	BUILD_BUG_ON(ENTRY_SIZE > PERF_MAX_TRACE_SIZE);
296 
297 	perf_fetch_caller_regs(&regs);
298 
299 	entry = perf_trace_buf_prepare(ENTRY_SIZE, TRACE_FN, NULL, &rctx);
300 	if (!entry)
301 		return;
302 
303 	entry->ip = ip;
304 	entry->parent_ip = parent_ip;
305 	perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, 0,
306 			      1, &regs, head, NULL);
307 
308 #undef ENTRY_SIZE
309 }
310 
311 static int perf_ftrace_function_register(struct perf_event *event)
312 {
313 	struct ftrace_ops *ops = &event->ftrace_ops;
314 
315 	ops->flags |= FTRACE_OPS_FL_CONTROL;
316 	ops->func = perf_ftrace_function_call;
317 	return register_ftrace_function(ops);
318 }
319 
320 static int perf_ftrace_function_unregister(struct perf_event *event)
321 {
322 	struct ftrace_ops *ops = &event->ftrace_ops;
323 	int ret = unregister_ftrace_function(ops);
324 	ftrace_free_filter(ops);
325 	return ret;
326 }
327 
328 static void perf_ftrace_function_enable(struct perf_event *event)
329 {
330 	ftrace_function_local_enable(&event->ftrace_ops);
331 }
332 
333 static void perf_ftrace_function_disable(struct perf_event *event)
334 {
335 	ftrace_function_local_disable(&event->ftrace_ops);
336 }
337 
338 int perf_ftrace_event_register(struct ftrace_event_call *call,
339 			       enum trace_reg type, void *data)
340 {
341 	switch (type) {
342 	case TRACE_REG_REGISTER:
343 	case TRACE_REG_UNREGISTER:
344 		break;
345 	case TRACE_REG_PERF_REGISTER:
346 	case TRACE_REG_PERF_UNREGISTER:
347 		return 0;
348 	case TRACE_REG_PERF_OPEN:
349 		return perf_ftrace_function_register(data);
350 	case TRACE_REG_PERF_CLOSE:
351 		return perf_ftrace_function_unregister(data);
352 	case TRACE_REG_PERF_ADD:
353 		perf_ftrace_function_enable(data);
354 		return 0;
355 	case TRACE_REG_PERF_DEL:
356 		perf_ftrace_function_disable(data);
357 		return 0;
358 	}
359 
360 	return -EINVAL;
361 }
362 #endif /* CONFIG_FUNCTION_TRACER */
363