xref: /openbmc/linux/fs/pstore/ftrace.c (revision c4f7ac64)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright 2012  Google, Inc.
4  */
5 
6 #include <linux/kernel.h>
7 #include <linux/compiler.h>
8 #include <linux/irqflags.h>
9 #include <linux/percpu.h>
10 #include <linux/smp.h>
11 #include <linux/atomic.h>
12 #include <linux/types.h>
13 #include <linux/mutex.h>
14 #include <linux/ftrace.h>
15 #include <linux/fs.h>
16 #include <linux/debugfs.h>
17 #include <linux/err.h>
18 #include <linux/cache.h>
19 #include <linux/slab.h>
20 #include <asm/barrier.h>
21 #include "internal.h"
22 
23 /* This doesn't need to be atomic: speed is chosen over correctness here. */
24 static u64 pstore_ftrace_stamp;
25 
26 static void notrace pstore_ftrace_call(unsigned long ip,
27 				       unsigned long parent_ip,
28 				       struct ftrace_ops *op,
29 				       struct ftrace_regs *fregs)
30 {
31 	int bit;
32 	unsigned long flags;
33 	struct pstore_ftrace_record rec = {};
34 	struct pstore_record record = {
35 		.type = PSTORE_TYPE_FTRACE,
36 		.buf = (char *)&rec,
37 		.size = sizeof(rec),
38 		.psi = psinfo,
39 	};
40 
41 	if (unlikely(oops_in_progress))
42 		return;
43 
44 	bit = ftrace_test_recursion_trylock(ip, parent_ip);
45 	if (bit < 0)
46 		return;
47 
48 	local_irq_save(flags);
49 
50 	rec.ip = ip;
51 	rec.parent_ip = parent_ip;
52 	pstore_ftrace_write_timestamp(&rec, pstore_ftrace_stamp++);
53 	pstore_ftrace_encode_cpu(&rec, raw_smp_processor_id());
54 	psinfo->write(&record);
55 
56 	local_irq_restore(flags);
57 	ftrace_test_recursion_unlock(bit);
58 }
59 
60 static struct ftrace_ops pstore_ftrace_ops __read_mostly = {
61 	.func	= pstore_ftrace_call,
62 };
63 
64 static DEFINE_MUTEX(pstore_ftrace_lock);
65 static bool pstore_ftrace_enabled;
66 
67 static ssize_t pstore_ftrace_knob_write(struct file *f, const char __user *buf,
68 					size_t count, loff_t *ppos)
69 {
70 	u8 on;
71 	ssize_t ret;
72 
73 	ret = kstrtou8_from_user(buf, count, 2, &on);
74 	if (ret)
75 		return ret;
76 
77 	mutex_lock(&pstore_ftrace_lock);
78 
79 	if (!on ^ pstore_ftrace_enabled)
80 		goto out;
81 
82 	if (on) {
83 		ftrace_ops_set_global_filter(&pstore_ftrace_ops);
84 		ret = register_ftrace_function(&pstore_ftrace_ops);
85 	} else {
86 		ret = unregister_ftrace_function(&pstore_ftrace_ops);
87 	}
88 
89 	if (ret) {
90 		pr_err("%s: unable to %sregister ftrace ops: %zd\n",
91 		       __func__, on ? "" : "un", ret);
92 		goto err;
93 	}
94 
95 	pstore_ftrace_enabled = on;
96 out:
97 	ret = count;
98 err:
99 	mutex_unlock(&pstore_ftrace_lock);
100 
101 	return ret;
102 }
103 
104 static ssize_t pstore_ftrace_knob_read(struct file *f, char __user *buf,
105 				       size_t count, loff_t *ppos)
106 {
107 	char val[] = { '0' + pstore_ftrace_enabled, '\n' };
108 
109 	return simple_read_from_buffer(buf, count, ppos, val, sizeof(val));
110 }
111 
112 static const struct file_operations pstore_knob_fops = {
113 	.open	= simple_open,
114 	.read	= pstore_ftrace_knob_read,
115 	.write	= pstore_ftrace_knob_write,
116 };
117 
118 static struct dentry *pstore_ftrace_dir;
119 
120 void pstore_register_ftrace(void)
121 {
122 	if (!psinfo->write)
123 		return;
124 
125 	pstore_ftrace_dir = debugfs_create_dir("pstore", NULL);
126 
127 	debugfs_create_file("record_ftrace", 0600, pstore_ftrace_dir, NULL,
128 			    &pstore_knob_fops);
129 }
130 
131 void pstore_unregister_ftrace(void)
132 {
133 	mutex_lock(&pstore_ftrace_lock);
134 	if (pstore_ftrace_enabled) {
135 		unregister_ftrace_function(&pstore_ftrace_ops);
136 		pstore_ftrace_enabled = false;
137 	}
138 	mutex_unlock(&pstore_ftrace_lock);
139 
140 	debugfs_remove_recursive(pstore_ftrace_dir);
141 }
142 
143 ssize_t pstore_ftrace_combine_log(char **dest_log, size_t *dest_log_size,
144 				  const char *src_log, size_t src_log_size)
145 {
146 	size_t dest_size, src_size, total, dest_off, src_off;
147 	size_t dest_idx = 0, src_idx = 0, merged_idx = 0;
148 	void *merged_buf;
149 	struct pstore_ftrace_record *drec, *srec, *mrec;
150 	size_t record_size = sizeof(struct pstore_ftrace_record);
151 
152 	dest_off = *dest_log_size % record_size;
153 	dest_size = *dest_log_size - dest_off;
154 
155 	src_off = src_log_size % record_size;
156 	src_size = src_log_size - src_off;
157 
158 	total = dest_size + src_size;
159 	merged_buf = kmalloc(total, GFP_KERNEL);
160 	if (!merged_buf)
161 		return -ENOMEM;
162 
163 	drec = (struct pstore_ftrace_record *)(*dest_log + dest_off);
164 	srec = (struct pstore_ftrace_record *)(src_log + src_off);
165 	mrec = (struct pstore_ftrace_record *)(merged_buf);
166 
167 	while (dest_size > 0 && src_size > 0) {
168 		if (pstore_ftrace_read_timestamp(&drec[dest_idx]) <
169 		    pstore_ftrace_read_timestamp(&srec[src_idx])) {
170 			mrec[merged_idx++] = drec[dest_idx++];
171 			dest_size -= record_size;
172 		} else {
173 			mrec[merged_idx++] = srec[src_idx++];
174 			src_size -= record_size;
175 		}
176 	}
177 
178 	while (dest_size > 0) {
179 		mrec[merged_idx++] = drec[dest_idx++];
180 		dest_size -= record_size;
181 	}
182 
183 	while (src_size > 0) {
184 		mrec[merged_idx++] = srec[src_idx++];
185 		src_size -= record_size;
186 	}
187 
188 	kfree(*dest_log);
189 	*dest_log = merged_buf;
190 	*dest_log_size = total;
191 
192 	return 0;
193 }
194 EXPORT_SYMBOL_GPL(pstore_ftrace_combine_log);
195