1773c1670SSteven Rostedt (VMware) // SPDX-License-Identifier: GPL-2.0
2773c1670SSteven Rostedt (VMware) 
3773c1670SSteven Rostedt (VMware) #include <linux/seq_file.h>
4773c1670SSteven Rostedt (VMware) #include <linux/kallsyms.h>
5773c1670SSteven Rostedt (VMware) #include <linux/module.h>
6773c1670SSteven Rostedt (VMware) #include <linux/ftrace.h>
7773c1670SSteven Rostedt (VMware) #include <linux/fs.h>
8773c1670SSteven Rostedt (VMware) 
9773c1670SSteven Rostedt (VMware) #include "trace_output.h"
10773c1670SSteven Rostedt (VMware) 
11773c1670SSteven Rostedt (VMware) struct recursed_functions {
12773c1670SSteven Rostedt (VMware) 	unsigned long		ip;
13773c1670SSteven Rostedt (VMware) 	unsigned long		parent_ip;
14773c1670SSteven Rostedt (VMware) };
15773c1670SSteven Rostedt (VMware) 
16773c1670SSteven Rostedt (VMware) static struct recursed_functions recursed_functions[CONFIG_FTRACE_RECORD_RECURSION_SIZE];
17773c1670SSteven Rostedt (VMware) static atomic_t nr_records;
18773c1670SSteven Rostedt (VMware) 
19773c1670SSteven Rostedt (VMware) /*
20773c1670SSteven Rostedt (VMware)  * Cache the last found function. Yes, updates to this is racey, but
21773c1670SSteven Rostedt (VMware)  * so is memory cache ;-)
22773c1670SSteven Rostedt (VMware)  */
23773c1670SSteven Rostedt (VMware) static unsigned long cached_function;
24773c1670SSteven Rostedt (VMware) 
ftrace_record_recursion(unsigned long ip,unsigned long parent_ip)25773c1670SSteven Rostedt (VMware) void ftrace_record_recursion(unsigned long ip, unsigned long parent_ip)
26773c1670SSteven Rostedt (VMware) {
27773c1670SSteven Rostedt (VMware) 	int index = 0;
28773c1670SSteven Rostedt (VMware) 	int i;
29773c1670SSteven Rostedt (VMware) 	unsigned long old;
30773c1670SSteven Rostedt (VMware) 
31773c1670SSteven Rostedt (VMware)  again:
32773c1670SSteven Rostedt (VMware) 	/* First check the last one recorded */
33773c1670SSteven Rostedt (VMware) 	if (ip == cached_function)
34773c1670SSteven Rostedt (VMware) 		return;
35773c1670SSteven Rostedt (VMware) 
36773c1670SSteven Rostedt (VMware) 	i = atomic_read(&nr_records);
37773c1670SSteven Rostedt (VMware) 	/* nr_records is -1 when clearing records */
38773c1670SSteven Rostedt (VMware) 	smp_mb__after_atomic();
39773c1670SSteven Rostedt (VMware) 	if (i < 0)
40773c1670SSteven Rostedt (VMware) 		return;
41773c1670SSteven Rostedt (VMware) 
42773c1670SSteven Rostedt (VMware) 	/*
43773c1670SSteven Rostedt (VMware) 	 * If there's two writers and this writer comes in second,
44773c1670SSteven Rostedt (VMware) 	 * the cmpxchg() below to update the ip will fail. Then this
45773c1670SSteven Rostedt (VMware) 	 * writer will try again. It is possible that index will now
46773c1670SSteven Rostedt (VMware) 	 * be greater than nr_records. This is because the writer
47773c1670SSteven Rostedt (VMware) 	 * that succeeded has not updated the nr_records yet.
48773c1670SSteven Rostedt (VMware) 	 * This writer could keep trying again until the other writer
49773c1670SSteven Rostedt (VMware) 	 * updates nr_records. But if the other writer takes an
50773c1670SSteven Rostedt (VMware) 	 * interrupt, and that interrupt locks up that CPU, we do
51773c1670SSteven Rostedt (VMware) 	 * not want this CPU to lock up due to the recursion protection,
52773c1670SSteven Rostedt (VMware) 	 * and have a bug report showing this CPU as the cause of
53773c1670SSteven Rostedt (VMware) 	 * locking up the computer. To not lose this record, this
54773c1670SSteven Rostedt (VMware) 	 * writer will simply use the next position to update the
55773c1670SSteven Rostedt (VMware) 	 * recursed_functions, and it will update the nr_records
56773c1670SSteven Rostedt (VMware) 	 * accordingly.
57773c1670SSteven Rostedt (VMware) 	 */
58773c1670SSteven Rostedt (VMware) 	if (index < i)
59773c1670SSteven Rostedt (VMware) 		index = i;
60773c1670SSteven Rostedt (VMware) 	if (index >= CONFIG_FTRACE_RECORD_RECURSION_SIZE)
61773c1670SSteven Rostedt (VMware) 		return;
62773c1670SSteven Rostedt (VMware) 
63773c1670SSteven Rostedt (VMware) 	for (i = index - 1; i >= 0; i--) {
64773c1670SSteven Rostedt (VMware) 		if (recursed_functions[i].ip == ip) {
65773c1670SSteven Rostedt (VMware) 			cached_function = ip;
66773c1670SSteven Rostedt (VMware) 			return;
67773c1670SSteven Rostedt (VMware) 		}
68773c1670SSteven Rostedt (VMware) 	}
69773c1670SSteven Rostedt (VMware) 
70773c1670SSteven Rostedt (VMware) 	cached_function = ip;
71773c1670SSteven Rostedt (VMware) 
72773c1670SSteven Rostedt (VMware) 	/*
73773c1670SSteven Rostedt (VMware) 	 * We only want to add a function if it hasn't been added before.
74773c1670SSteven Rostedt (VMware) 	 * Add to the current location before incrementing the count.
75773c1670SSteven Rostedt (VMware) 	 * If it fails to add, then increment the index (save in i)
76773c1670SSteven Rostedt (VMware) 	 * and try again.
77773c1670SSteven Rostedt (VMware) 	 */
78773c1670SSteven Rostedt (VMware) 	old = cmpxchg(&recursed_functions[index].ip, 0, ip);
79773c1670SSteven Rostedt (VMware) 	if (old != 0) {
80773c1670SSteven Rostedt (VMware) 		/* Did something else already added this for us? */
81773c1670SSteven Rostedt (VMware) 		if (old == ip)
82773c1670SSteven Rostedt (VMware) 			return;
83773c1670SSteven Rostedt (VMware) 		/* Try the next location (use i for the next index) */
84773c1670SSteven Rostedt (VMware) 		index++;
85773c1670SSteven Rostedt (VMware) 		goto again;
86773c1670SSteven Rostedt (VMware) 	}
87773c1670SSteven Rostedt (VMware) 
88773c1670SSteven Rostedt (VMware) 	recursed_functions[index].parent_ip = parent_ip;
89773c1670SSteven Rostedt (VMware) 
90773c1670SSteven Rostedt (VMware) 	/*
91773c1670SSteven Rostedt (VMware) 	 * It's still possible that we could race with the clearing
92773c1670SSteven Rostedt (VMware) 	 *    CPU0                                    CPU1
93773c1670SSteven Rostedt (VMware) 	 *    ----                                    ----
94773c1670SSteven Rostedt (VMware) 	 *                                       ip = func
95773c1670SSteven Rostedt (VMware) 	 *  nr_records = -1;
96773c1670SSteven Rostedt (VMware) 	 *  recursed_functions[0] = 0;
97773c1670SSteven Rostedt (VMware) 	 *                                       i = -1
98773c1670SSteven Rostedt (VMware) 	 *                                       if (i < 0)
99773c1670SSteven Rostedt (VMware) 	 *  nr_records = 0;
100773c1670SSteven Rostedt (VMware) 	 *  (new recursion detected)
101773c1670SSteven Rostedt (VMware) 	 *      recursed_functions[0] = func
102773c1670SSteven Rostedt (VMware) 	 *                                            cmpxchg(recursed_functions[0],
103773c1670SSteven Rostedt (VMware) 	 *                                                    func, 0)
104773c1670SSteven Rostedt (VMware) 	 *
105773c1670SSteven Rostedt (VMware) 	 * But the worse that could happen is that we get a zero in
106773c1670SSteven Rostedt (VMware) 	 * the recursed_functions array, and it's likely that "func" will
107773c1670SSteven Rostedt (VMware) 	 * be recorded again.
108773c1670SSteven Rostedt (VMware) 	 */
109773c1670SSteven Rostedt (VMware) 	i = atomic_read(&nr_records);
110773c1670SSteven Rostedt (VMware) 	smp_mb__after_atomic();
111773c1670SSteven Rostedt (VMware) 	if (i < 0)
112773c1670SSteven Rostedt (VMware) 		cmpxchg(&recursed_functions[index].ip, ip, 0);
113773c1670SSteven Rostedt (VMware) 	else if (i <= index)
114773c1670SSteven Rostedt (VMware) 		atomic_cmpxchg(&nr_records, i, index + 1);
115773c1670SSteven Rostedt (VMware) }
116773c1670SSteven Rostedt (VMware) EXPORT_SYMBOL_GPL(ftrace_record_recursion);
117773c1670SSteven Rostedt (VMware) 
118773c1670SSteven Rostedt (VMware) static DEFINE_MUTEX(recursed_function_lock);
119773c1670SSteven Rostedt (VMware) static struct trace_seq *tseq;
120773c1670SSteven Rostedt (VMware) 
recursed_function_seq_start(struct seq_file * m,loff_t * pos)121773c1670SSteven Rostedt (VMware) static void *recursed_function_seq_start(struct seq_file *m, loff_t *pos)
122773c1670SSteven Rostedt (VMware) {
123773c1670SSteven Rostedt (VMware) 	void *ret = NULL;
124773c1670SSteven Rostedt (VMware) 	int index;
125773c1670SSteven Rostedt (VMware) 
126773c1670SSteven Rostedt (VMware) 	mutex_lock(&recursed_function_lock);
127773c1670SSteven Rostedt (VMware) 	index = atomic_read(&nr_records);
128773c1670SSteven Rostedt (VMware) 	if (*pos < index) {
129773c1670SSteven Rostedt (VMware) 		ret = &recursed_functions[*pos];
130773c1670SSteven Rostedt (VMware) 	}
131773c1670SSteven Rostedt (VMware) 
132773c1670SSteven Rostedt (VMware) 	tseq = kzalloc(sizeof(*tseq), GFP_KERNEL);
133773c1670SSteven Rostedt (VMware) 	if (!tseq)
134773c1670SSteven Rostedt (VMware) 		return ERR_PTR(-ENOMEM);
135773c1670SSteven Rostedt (VMware) 
136773c1670SSteven Rostedt (VMware) 	trace_seq_init(tseq);
137773c1670SSteven Rostedt (VMware) 
138773c1670SSteven Rostedt (VMware) 	return ret;
139773c1670SSteven Rostedt (VMware) }
140773c1670SSteven Rostedt (VMware) 
recursed_function_seq_next(struct seq_file * m,void * v,loff_t * pos)141773c1670SSteven Rostedt (VMware) static void *recursed_function_seq_next(struct seq_file *m, void *v, loff_t *pos)
142773c1670SSteven Rostedt (VMware) {
143773c1670SSteven Rostedt (VMware) 	int index;
144773c1670SSteven Rostedt (VMware) 	int p;
145773c1670SSteven Rostedt (VMware) 
146773c1670SSteven Rostedt (VMware) 	index = atomic_read(&nr_records);
147773c1670SSteven Rostedt (VMware) 	p = ++(*pos);
148773c1670SSteven Rostedt (VMware) 
149773c1670SSteven Rostedt (VMware) 	return p < index ? &recursed_functions[p] : NULL;
150773c1670SSteven Rostedt (VMware) }
151773c1670SSteven Rostedt (VMware) 
recursed_function_seq_stop(struct seq_file * m,void * v)152773c1670SSteven Rostedt (VMware) static void recursed_function_seq_stop(struct seq_file *m, void *v)
153773c1670SSteven Rostedt (VMware) {
154773c1670SSteven Rostedt (VMware) 	kfree(tseq);
155773c1670SSteven Rostedt (VMware) 	mutex_unlock(&recursed_function_lock);
156773c1670SSteven Rostedt (VMware) }
157773c1670SSteven Rostedt (VMware) 
recursed_function_seq_show(struct seq_file * m,void * v)158773c1670SSteven Rostedt (VMware) static int recursed_function_seq_show(struct seq_file *m, void *v)
159773c1670SSteven Rostedt (VMware) {
160773c1670SSteven Rostedt (VMware) 	struct recursed_functions *record = v;
161773c1670SSteven Rostedt (VMware) 	int ret = 0;
162773c1670SSteven Rostedt (VMware) 
163773c1670SSteven Rostedt (VMware) 	if (record) {
164773c1670SSteven Rostedt (VMware) 		trace_seq_print_sym(tseq, record->parent_ip, true);
165773c1670SSteven Rostedt (VMware) 		trace_seq_puts(tseq, ":\t");
166773c1670SSteven Rostedt (VMware) 		trace_seq_print_sym(tseq, record->ip, true);
167773c1670SSteven Rostedt (VMware) 		trace_seq_putc(tseq, '\n');
168773c1670SSteven Rostedt (VMware) 		ret = trace_print_seq(m, tseq);
169773c1670SSteven Rostedt (VMware) 	}
170773c1670SSteven Rostedt (VMware) 
171773c1670SSteven Rostedt (VMware) 	return ret;
172773c1670SSteven Rostedt (VMware) }
173773c1670SSteven Rostedt (VMware) 
174773c1670SSteven Rostedt (VMware) static const struct seq_operations recursed_function_seq_ops = {
175773c1670SSteven Rostedt (VMware) 	.start  = recursed_function_seq_start,
176773c1670SSteven Rostedt (VMware) 	.next   = recursed_function_seq_next,
177773c1670SSteven Rostedt (VMware) 	.stop   = recursed_function_seq_stop,
178773c1670SSteven Rostedt (VMware) 	.show   = recursed_function_seq_show
179773c1670SSteven Rostedt (VMware) };
180773c1670SSteven Rostedt (VMware) 
recursed_function_open(struct inode * inode,struct file * file)181773c1670SSteven Rostedt (VMware) static int recursed_function_open(struct inode *inode, struct file *file)
182773c1670SSteven Rostedt (VMware) {
183773c1670SSteven Rostedt (VMware) 	int ret = 0;
184773c1670SSteven Rostedt (VMware) 
185773c1670SSteven Rostedt (VMware) 	mutex_lock(&recursed_function_lock);
186773c1670SSteven Rostedt (VMware) 	/* If this file was opened for write, then erase contents */
187773c1670SSteven Rostedt (VMware) 	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
188773c1670SSteven Rostedt (VMware) 		/* disable updating records */
189773c1670SSteven Rostedt (VMware) 		atomic_set(&nr_records, -1);
190773c1670SSteven Rostedt (VMware) 		smp_mb__after_atomic();
191773c1670SSteven Rostedt (VMware) 		memset(recursed_functions, 0, sizeof(recursed_functions));
192773c1670SSteven Rostedt (VMware) 		smp_wmb();
193773c1670SSteven Rostedt (VMware) 		/* enable them again */
194773c1670SSteven Rostedt (VMware) 		atomic_set(&nr_records, 0);
195773c1670SSteven Rostedt (VMware) 	}
196773c1670SSteven Rostedt (VMware) 	if (file->f_mode & FMODE_READ)
197773c1670SSteven Rostedt (VMware) 		ret = seq_open(file, &recursed_function_seq_ops);
198773c1670SSteven Rostedt (VMware) 	mutex_unlock(&recursed_function_lock);
199773c1670SSteven Rostedt (VMware) 
200773c1670SSteven Rostedt (VMware) 	return ret;
201773c1670SSteven Rostedt (VMware) }
202773c1670SSteven Rostedt (VMware) 
recursed_function_write(struct file * file,const char __user * buffer,size_t count,loff_t * ppos)203773c1670SSteven Rostedt (VMware) static ssize_t recursed_function_write(struct file *file,
204773c1670SSteven Rostedt (VMware) 				       const char __user *buffer,
205773c1670SSteven Rostedt (VMware) 				       size_t count, loff_t *ppos)
206773c1670SSteven Rostedt (VMware) {
207773c1670SSteven Rostedt (VMware) 	return count;
208773c1670SSteven Rostedt (VMware) }
209773c1670SSteven Rostedt (VMware) 
recursed_function_release(struct inode * inode,struct file * file)210773c1670SSteven Rostedt (VMware) static int recursed_function_release(struct inode *inode, struct file *file)
211773c1670SSteven Rostedt (VMware) {
212773c1670SSteven Rostedt (VMware) 	if (file->f_mode & FMODE_READ)
213773c1670SSteven Rostedt (VMware) 		seq_release(inode, file);
214773c1670SSteven Rostedt (VMware) 	return 0;
215773c1670SSteven Rostedt (VMware) }
216773c1670SSteven Rostedt (VMware) 
217773c1670SSteven Rostedt (VMware) static const struct file_operations recursed_functions_fops = {
218773c1670SSteven Rostedt (VMware) 	.open           = recursed_function_open,
219773c1670SSteven Rostedt (VMware) 	.write		= recursed_function_write,
220773c1670SSteven Rostedt (VMware) 	.read           = seq_read,
221773c1670SSteven Rostedt (VMware) 	.llseek         = seq_lseek,
222773c1670SSteven Rostedt (VMware) 	.release        = recursed_function_release,
223773c1670SSteven Rostedt (VMware) };
224773c1670SSteven Rostedt (VMware) 
create_recursed_functions(void)225773c1670SSteven Rostedt (VMware) __init static int create_recursed_functions(void)
226773c1670SSteven Rostedt (VMware) {
227773c1670SSteven Rostedt (VMware) 
228*e4931b82SYuntao Wang 	trace_create_file("recursed_functions", TRACE_MODE_WRITE,
22921ccc9cdSSteven Rostedt (VMware) 			  NULL, NULL, &recursed_functions_fops);
230773c1670SSteven Rostedt (VMware) 	return 0;
231773c1670SSteven Rostedt (VMware) }
232773c1670SSteven Rostedt (VMware) 
233773c1670SSteven Rostedt (VMware) fs_initcall(create_recursed_functions);
234