xref: /openbmc/linux/kernel/trace/trace_mmiotrace.c (revision f0868d1e23a8efec33beb3aa688aab7fdb1ae093)
1 /*
2  * Memory mapped I/O tracing
3  *
4  * Copyright (C) 2008 Pekka Paalanen <pq@iki.fi>
5  */
6 
7 #define DEBUG 1
8 
9 #include <linux/kernel.h>
10 #include <linux/mmiotrace.h>
11 #include <linux/pci.h>
12 
13 #include "trace.h"
14 #include "trace_output.h"
15 
16 struct header_iter {
17 	struct pci_dev *dev;
18 };
19 
20 static struct trace_array *mmio_trace_array;
21 static bool overrun_detected;
22 static unsigned long prev_overruns;
23 
24 static void mmio_reset_data(struct trace_array *tr)
25 {
26 	overrun_detected = false;
27 	prev_overruns = 0;
28 
29 	tracing_reset_online_cpus(tr);
30 }
31 
32 static int mmio_trace_init(struct trace_array *tr)
33 {
34 	pr_debug("in %s\n", __func__);
35 	mmio_trace_array = tr;
36 
37 	mmio_reset_data(tr);
38 	enable_mmiotrace();
39 	return 0;
40 }
41 
42 static void mmio_trace_reset(struct trace_array *tr)
43 {
44 	pr_debug("in %s\n", __func__);
45 
46 	disable_mmiotrace();
47 	mmio_reset_data(tr);
48 	mmio_trace_array = NULL;
49 }
50 
51 static void mmio_trace_start(struct trace_array *tr)
52 {
53 	pr_debug("in %s\n", __func__);
54 	mmio_reset_data(tr);
55 }
56 
57 static int mmio_print_pcidev(struct trace_seq *s, const struct pci_dev *dev)
58 {
59 	int ret = 0;
60 	int i;
61 	resource_size_t start, end;
62 	const struct pci_driver *drv = pci_dev_driver(dev);
63 
64 	/* XXX: incomplete checks for trace_seq_printf() return value */
65 	ret += trace_seq_printf(s, "PCIDEV %02x%02x %04x%04x %x",
66 				dev->bus->number, dev->devfn,
67 				dev->vendor, dev->device, dev->irq);
68 	/*
69 	 * XXX: is pci_resource_to_user() appropriate, since we are
70 	 * supposed to interpret the __ioremap() phys_addr argument based on
71 	 * these printed values?
72 	 */
73 	for (i = 0; i < 7; i++) {
74 		pci_resource_to_user(dev, i, &dev->resource[i], &start, &end);
75 		ret += trace_seq_printf(s, " %llx",
76 			(unsigned long long)(start |
77 			(dev->resource[i].flags & PCI_REGION_FLAG_MASK)));
78 	}
79 	for (i = 0; i < 7; i++) {
80 		pci_resource_to_user(dev, i, &dev->resource[i], &start, &end);
81 		ret += trace_seq_printf(s, " %llx",
82 			dev->resource[i].start < dev->resource[i].end ?
83 			(unsigned long long)(end - start) + 1 : 0);
84 	}
85 	if (drv)
86 		ret += trace_seq_printf(s, " %s\n", drv->name);
87 	else
88 		ret += trace_seq_printf(s, " \n");
89 	return ret;
90 }
91 
92 static void destroy_header_iter(struct header_iter *hiter)
93 {
94 	if (!hiter)
95 		return;
96 	pci_dev_put(hiter->dev);
97 	kfree(hiter);
98 }
99 
100 static void mmio_pipe_open(struct trace_iterator *iter)
101 {
102 	struct header_iter *hiter;
103 	struct trace_seq *s = &iter->seq;
104 
105 	trace_seq_printf(s, "VERSION 20070824\n");
106 
107 	hiter = kzalloc(sizeof(*hiter), GFP_KERNEL);
108 	if (!hiter)
109 		return;
110 
111 	hiter->dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, NULL);
112 	iter->private = hiter;
113 }
114 
115 /* XXX: This is not called when the pipe is closed! */
116 static void mmio_close(struct trace_iterator *iter)
117 {
118 	struct header_iter *hiter = iter->private;
119 	destroy_header_iter(hiter);
120 	iter->private = NULL;
121 }
122 
123 static unsigned long count_overruns(struct trace_iterator *iter)
124 {
125 	unsigned long cnt = 0;
126 	unsigned long over = ring_buffer_overruns(iter->tr->buffer);
127 
128 	if (over > prev_overruns)
129 		cnt = over - prev_overruns;
130 	prev_overruns = over;
131 	return cnt;
132 }
133 
134 static ssize_t mmio_read(struct trace_iterator *iter, struct file *filp,
135 				char __user *ubuf, size_t cnt, loff_t *ppos)
136 {
137 	ssize_t ret;
138 	struct header_iter *hiter = iter->private;
139 	struct trace_seq *s = &iter->seq;
140 	unsigned long n;
141 
142 	n = count_overruns(iter);
143 	if (n) {
144 		/* XXX: This is later than where events were lost. */
145 		trace_seq_printf(s, "MARK 0.000000 Lost %lu events.\n", n);
146 		if (!overrun_detected)
147 			pr_warning("mmiotrace has lost events.\n");
148 		overrun_detected = true;
149 		goto print_out;
150 	}
151 
152 	if (!hiter)
153 		return 0;
154 
155 	mmio_print_pcidev(s, hiter->dev);
156 	hiter->dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, hiter->dev);
157 
158 	if (!hiter->dev) {
159 		destroy_header_iter(hiter);
160 		iter->private = NULL;
161 	}
162 
163 print_out:
164 	ret = trace_seq_to_user(s, ubuf, cnt);
165 	return (ret == -EBUSY) ? 0 : ret;
166 }
167 
168 static enum print_line_t mmio_print_rw(struct trace_iterator *iter)
169 {
170 	struct trace_entry *entry = iter->ent;
171 	struct trace_mmiotrace_rw *field;
172 	struct mmiotrace_rw *rw;
173 	struct trace_seq *s	= &iter->seq;
174 	unsigned long long t	= ns2usecs(iter->ts);
175 	unsigned long usec_rem	= do_div(t, 1000000ULL);
176 	unsigned secs		= (unsigned long)t;
177 	int ret = 1;
178 
179 	trace_assign_type(field, entry);
180 	rw = &field->rw;
181 
182 	switch (rw->opcode) {
183 	case MMIO_READ:
184 		ret = trace_seq_printf(s,
185 			"R %d %lu.%06lu %d 0x%llx 0x%lx 0x%lx %d\n",
186 			rw->width, secs, usec_rem, rw->map_id,
187 			(unsigned long long)rw->phys,
188 			rw->value, rw->pc, 0);
189 		break;
190 	case MMIO_WRITE:
191 		ret = trace_seq_printf(s,
192 			"W %d %lu.%06lu %d 0x%llx 0x%lx 0x%lx %d\n",
193 			rw->width, secs, usec_rem, rw->map_id,
194 			(unsigned long long)rw->phys,
195 			rw->value, rw->pc, 0);
196 		break;
197 	case MMIO_UNKNOWN_OP:
198 		ret = trace_seq_printf(s,
199 			"UNKNOWN %lu.%06lu %d 0x%llx %02x,%02x,%02x 0x%lx %d\n",
200 			secs, usec_rem, rw->map_id,
201 			(unsigned long long)rw->phys,
202 			(rw->value >> 16) & 0xff, (rw->value >> 8) & 0xff,
203 			(rw->value >> 0) & 0xff, rw->pc, 0);
204 		break;
205 	default:
206 		ret = trace_seq_printf(s, "rw what?\n");
207 		break;
208 	}
209 	if (ret)
210 		return TRACE_TYPE_HANDLED;
211 	return TRACE_TYPE_PARTIAL_LINE;
212 }
213 
214 static enum print_line_t mmio_print_map(struct trace_iterator *iter)
215 {
216 	struct trace_entry *entry = iter->ent;
217 	struct trace_mmiotrace_map *field;
218 	struct mmiotrace_map *m;
219 	struct trace_seq *s	= &iter->seq;
220 	unsigned long long t	= ns2usecs(iter->ts);
221 	unsigned long usec_rem	= do_div(t, 1000000ULL);
222 	unsigned secs		= (unsigned long)t;
223 	int ret;
224 
225 	trace_assign_type(field, entry);
226 	m = &field->map;
227 
228 	switch (m->opcode) {
229 	case MMIO_PROBE:
230 		ret = trace_seq_printf(s,
231 			"MAP %lu.%06lu %d 0x%llx 0x%lx 0x%lx 0x%lx %d\n",
232 			secs, usec_rem, m->map_id,
233 			(unsigned long long)m->phys, m->virt, m->len,
234 			0UL, 0);
235 		break;
236 	case MMIO_UNPROBE:
237 		ret = trace_seq_printf(s,
238 			"UNMAP %lu.%06lu %d 0x%lx %d\n",
239 			secs, usec_rem, m->map_id, 0UL, 0);
240 		break;
241 	default:
242 		ret = trace_seq_printf(s, "map what?\n");
243 		break;
244 	}
245 	if (ret)
246 		return TRACE_TYPE_HANDLED;
247 	return TRACE_TYPE_PARTIAL_LINE;
248 }
249 
250 static enum print_line_t mmio_print_mark(struct trace_iterator *iter)
251 {
252 	struct trace_entry *entry = iter->ent;
253 	struct print_entry *print = (struct print_entry *)entry;
254 	const char *msg		= print->buf;
255 	struct trace_seq *s	= &iter->seq;
256 	unsigned long long t	= ns2usecs(iter->ts);
257 	unsigned long usec_rem	= do_div(t, 1000000ULL);
258 	unsigned secs		= (unsigned long)t;
259 	int ret;
260 
261 	/* The trailing newline must be in the message. */
262 	ret = trace_seq_printf(s, "MARK %lu.%06lu %s", secs, usec_rem, msg);
263 	if (!ret)
264 		return TRACE_TYPE_PARTIAL_LINE;
265 
266 	return TRACE_TYPE_HANDLED;
267 }
268 
269 static enum print_line_t mmio_print_line(struct trace_iterator *iter)
270 {
271 	switch (iter->ent->type) {
272 	case TRACE_MMIO_RW:
273 		return mmio_print_rw(iter);
274 	case TRACE_MMIO_MAP:
275 		return mmio_print_map(iter);
276 	case TRACE_PRINT:
277 		return mmio_print_mark(iter);
278 	default:
279 		return TRACE_TYPE_HANDLED; /* ignore unknown entries */
280 	}
281 }
282 
283 static struct tracer mmio_tracer __read_mostly =
284 {
285 	.name		= "mmiotrace",
286 	.init		= mmio_trace_init,
287 	.reset		= mmio_trace_reset,
288 	.start		= mmio_trace_start,
289 	.pipe_open	= mmio_pipe_open,
290 	.close		= mmio_close,
291 	.read		= mmio_read,
292 	.print_line	= mmio_print_line,
293 };
294 
295 __init static int init_mmio_trace(void)
296 {
297 	return register_tracer(&mmio_tracer);
298 }
299 device_initcall(init_mmio_trace);
300 
301 static void __trace_mmiotrace_rw(struct trace_array *tr,
302 				struct trace_array_cpu *data,
303 				struct mmiotrace_rw *rw)
304 {
305 	struct ring_buffer_event *event;
306 	struct trace_mmiotrace_rw *entry;
307 	unsigned long irq_flags;
308 
309 	event	= ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
310 					   &irq_flags);
311 	if (!event)
312 		return;
313 	entry	= ring_buffer_event_data(event);
314 	tracing_generic_entry_update(&entry->ent, 0, preempt_count());
315 	entry->ent.type			= TRACE_MMIO_RW;
316 	entry->rw			= *rw;
317 	ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
318 
319 	trace_wake_up();
320 }
321 
322 void mmio_trace_rw(struct mmiotrace_rw *rw)
323 {
324 	struct trace_array *tr = mmio_trace_array;
325 	struct trace_array_cpu *data = tr->data[smp_processor_id()];
326 	__trace_mmiotrace_rw(tr, data, rw);
327 }
328 
329 static void __trace_mmiotrace_map(struct trace_array *tr,
330 				struct trace_array_cpu *data,
331 				struct mmiotrace_map *map)
332 {
333 	struct ring_buffer_event *event;
334 	struct trace_mmiotrace_map *entry;
335 	unsigned long irq_flags;
336 
337 	event	= ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
338 					   &irq_flags);
339 	if (!event)
340 		return;
341 	entry	= ring_buffer_event_data(event);
342 	tracing_generic_entry_update(&entry->ent, 0, preempt_count());
343 	entry->ent.type			= TRACE_MMIO_MAP;
344 	entry->map			= *map;
345 	ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
346 
347 	trace_wake_up();
348 }
349 
350 void mmio_trace_mapping(struct mmiotrace_map *map)
351 {
352 	struct trace_array *tr = mmio_trace_array;
353 	struct trace_array_cpu *data;
354 
355 	preempt_disable();
356 	data = tr->data[smp_processor_id()];
357 	__trace_mmiotrace_map(tr, data, map);
358 	preempt_enable();
359 }
360 
361 int mmio_trace_printk(const char *fmt, va_list args)
362 {
363 	return trace_vprintk(0, -1, fmt, args);
364 }
365