xref: /openbmc/linux/kernel/trace/trace_stat.c (revision b04b4f78)
1 /*
2  * Infrastructure for statistic tracing (histogram output).
3  *
4  * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
5  *
6  * Based on the code from trace_branch.c which is
7  * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
8  *
9  */
10 
11 
12 #include <linux/list.h>
13 #include <linux/debugfs.h>
14 #include "trace_stat.h"
15 #include "trace.h"
16 
17 
18 /* List of stat entries from a tracer */
19 struct trace_stat_list {
20 	struct list_head	list;
21 	void			*stat;
22 };
23 
24 /* A stat session is the stats output in one file */
25 struct tracer_stat_session {
26 	struct list_head	session_list;
27 	struct tracer_stat	*ts;
28 	struct list_head	stat_list;
29 	struct mutex		stat_mutex;
30 	struct dentry		*file;
31 };
32 
33 /* All of the sessions currently in use. Each stat file embed one session */
34 static LIST_HEAD(all_stat_sessions);
35 static DEFINE_MUTEX(all_stat_sessions_mutex);
36 
37 /* The root directory for all stat files */
38 static struct dentry		*stat_dir;
39 
40 
41 static void reset_stat_session(struct tracer_stat_session *session)
42 {
43 	struct trace_stat_list *node, *next;
44 
45 	list_for_each_entry_safe(node, next, &session->stat_list, list)
46 		kfree(node);
47 
48 	INIT_LIST_HEAD(&session->stat_list);
49 }
50 
51 static void destroy_session(struct tracer_stat_session *session)
52 {
53 	debugfs_remove(session->file);
54 	reset_stat_session(session);
55 	mutex_destroy(&session->stat_mutex);
56 	kfree(session);
57 }
58 
59 /*
60  * For tracers that don't provide a stat_cmp callback.
61  * This one will force an immediate insertion on tail of
62  * the list.
63  */
64 static int dummy_cmp(void *p1, void *p2)
65 {
66 	return 1;
67 }
68 
69 /*
70  * Initialize the stat list at each trace_stat file opening.
71  * All of these copies and sorting are required on all opening
72  * since the stats could have changed between two file sessions.
73  */
74 static int stat_seq_init(struct tracer_stat_session *session)
75 {
76 	struct trace_stat_list *iter_entry, *new_entry;
77 	struct tracer_stat *ts = session->ts;
78 	void *stat;
79 	int ret = 0;
80 	int i;
81 
82 	mutex_lock(&session->stat_mutex);
83 	reset_stat_session(session);
84 
85 	if (!ts->stat_cmp)
86 		ts->stat_cmp = dummy_cmp;
87 
88 	stat = ts->stat_start();
89 	if (!stat)
90 		goto exit;
91 
92 	/*
93 	 * The first entry. Actually this is the second, but the first
94 	 * one (the stat_list head) is pointless.
95 	 */
96 	new_entry = kmalloc(sizeof(struct trace_stat_list), GFP_KERNEL);
97 	if (!new_entry) {
98 		ret = -ENOMEM;
99 		goto exit;
100 	}
101 
102 	INIT_LIST_HEAD(&new_entry->list);
103 
104 	list_add(&new_entry->list, &session->stat_list);
105 
106 	new_entry->stat = stat;
107 
108 	/*
109 	 * Iterate over the tracer stat entries and store them in a sorted
110 	 * list.
111 	 */
112 	for (i = 1; ; i++) {
113 		stat = ts->stat_next(stat, i);
114 
115 		/* End of insertion */
116 		if (!stat)
117 			break;
118 
119 		new_entry = kmalloc(sizeof(struct trace_stat_list), GFP_KERNEL);
120 		if (!new_entry) {
121 			ret = -ENOMEM;
122 			goto exit_free_list;
123 		}
124 
125 		INIT_LIST_HEAD(&new_entry->list);
126 		new_entry->stat = stat;
127 
128 		list_for_each_entry_reverse(iter_entry, &session->stat_list,
129 				list) {
130 
131 			/* Insertion with a descendent sorting */
132 			if (ts->stat_cmp(iter_entry->stat,
133 					new_entry->stat) >= 0) {
134 
135 				list_add(&new_entry->list, &iter_entry->list);
136 				break;
137 			}
138 		}
139 
140 		/* The current larger value */
141 		if (list_empty(&new_entry->list))
142 			list_add(&new_entry->list, &session->stat_list);
143 	}
144 exit:
145 	mutex_unlock(&session->stat_mutex);
146 	return ret;
147 
148 exit_free_list:
149 	reset_stat_session(session);
150 	mutex_unlock(&session->stat_mutex);
151 	return ret;
152 }
153 
154 
155 static void *stat_seq_start(struct seq_file *s, loff_t *pos)
156 {
157 	struct tracer_stat_session *session = s->private;
158 
159 	/* Prevent from tracer switch or stat_list modification */
160 	mutex_lock(&session->stat_mutex);
161 
162 	/* If we are in the beginning of the file, print the headers */
163 	if (!*pos && session->ts->stat_headers)
164 		return SEQ_START_TOKEN;
165 
166 	return seq_list_start(&session->stat_list, *pos);
167 }
168 
169 static void *stat_seq_next(struct seq_file *s, void *p, loff_t *pos)
170 {
171 	struct tracer_stat_session *session = s->private;
172 
173 	if (p == SEQ_START_TOKEN)
174 		return seq_list_start(&session->stat_list, *pos);
175 
176 	return seq_list_next(p, &session->stat_list, pos);
177 }
178 
179 static void stat_seq_stop(struct seq_file *s, void *p)
180 {
181 	struct tracer_stat_session *session = s->private;
182 	mutex_unlock(&session->stat_mutex);
183 }
184 
185 static int stat_seq_show(struct seq_file *s, void *v)
186 {
187 	struct tracer_stat_session *session = s->private;
188 	struct trace_stat_list *l = list_entry(v, struct trace_stat_list, list);
189 
190 	if (v == SEQ_START_TOKEN)
191 		return session->ts->stat_headers(s);
192 
193 	return session->ts->stat_show(s, l->stat);
194 }
195 
196 static const struct seq_operations trace_stat_seq_ops = {
197 	.start		= stat_seq_start,
198 	.next		= stat_seq_next,
199 	.stop		= stat_seq_stop,
200 	.show		= stat_seq_show
201 };
202 
203 /* The session stat is refilled and resorted at each stat file opening */
204 static int tracing_stat_open(struct inode *inode, struct file *file)
205 {
206 	int ret;
207 
208 	struct tracer_stat_session *session = inode->i_private;
209 
210 	ret = seq_open(file, &trace_stat_seq_ops);
211 	if (!ret) {
212 		struct seq_file *m = file->private_data;
213 		m->private = session;
214 		ret = stat_seq_init(session);
215 	}
216 
217 	return ret;
218 }
219 
220 /*
221  * Avoid consuming memory with our now useless list.
222  */
223 static int tracing_stat_release(struct inode *i, struct file *f)
224 {
225 	struct tracer_stat_session *session = i->i_private;
226 
227 	mutex_lock(&session->stat_mutex);
228 	reset_stat_session(session);
229 	mutex_unlock(&session->stat_mutex);
230 
231 	return 0;
232 }
233 
234 static const struct file_operations tracing_stat_fops = {
235 	.open		= tracing_stat_open,
236 	.read		= seq_read,
237 	.llseek		= seq_lseek,
238 	.release	= tracing_stat_release
239 };
240 
241 static int tracing_stat_init(void)
242 {
243 	struct dentry *d_tracing;
244 
245 	d_tracing = tracing_init_dentry();
246 
247 	stat_dir = debugfs_create_dir("trace_stat", d_tracing);
248 	if (!stat_dir)
249 		pr_warning("Could not create debugfs "
250 			   "'trace_stat' entry\n");
251 	return 0;
252 }
253 
254 static int init_stat_file(struct tracer_stat_session *session)
255 {
256 	if (!stat_dir && tracing_stat_init())
257 		return -ENODEV;
258 
259 	session->file = debugfs_create_file(session->ts->name, 0644,
260 					    stat_dir,
261 					    session, &tracing_stat_fops);
262 	if (!session->file)
263 		return -ENOMEM;
264 	return 0;
265 }
266 
267 int register_stat_tracer(struct tracer_stat *trace)
268 {
269 	struct tracer_stat_session *session, *node, *tmp;
270 	int ret;
271 
272 	if (!trace)
273 		return -EINVAL;
274 
275 	if (!trace->stat_start || !trace->stat_next || !trace->stat_show)
276 		return -EINVAL;
277 
278 	/* Already registered? */
279 	mutex_lock(&all_stat_sessions_mutex);
280 	list_for_each_entry_safe(node, tmp, &all_stat_sessions, session_list) {
281 		if (node->ts == trace) {
282 			mutex_unlock(&all_stat_sessions_mutex);
283 			return -EINVAL;
284 		}
285 	}
286 	mutex_unlock(&all_stat_sessions_mutex);
287 
288 	/* Init the session */
289 	session = kmalloc(sizeof(struct tracer_stat_session), GFP_KERNEL);
290 	if (!session)
291 		return -ENOMEM;
292 
293 	session->ts = trace;
294 	INIT_LIST_HEAD(&session->session_list);
295 	INIT_LIST_HEAD(&session->stat_list);
296 	mutex_init(&session->stat_mutex);
297 	session->file = NULL;
298 
299 	ret = init_stat_file(session);
300 	if (ret) {
301 		destroy_session(session);
302 		return ret;
303 	}
304 
305 	/* Register */
306 	mutex_lock(&all_stat_sessions_mutex);
307 	list_add_tail(&session->session_list, &all_stat_sessions);
308 	mutex_unlock(&all_stat_sessions_mutex);
309 
310 	return 0;
311 }
312 
313 void unregister_stat_tracer(struct tracer_stat *trace)
314 {
315 	struct tracer_stat_session *node, *tmp;
316 
317 	mutex_lock(&all_stat_sessions_mutex);
318 	list_for_each_entry_safe(node, tmp, &all_stat_sessions, session_list) {
319 		if (node->ts == trace) {
320 			list_del(&node->session_list);
321 			destroy_session(node);
322 			break;
323 		}
324 	}
325 	mutex_unlock(&all_stat_sessions_mutex);
326 }
327