1 /* 2 * Memory mapped I/O tracing 3 * 4 * Copyright (C) 2008 Pekka Paalanen <pq@iki.fi> 5 */ 6 7 #define DEBUG 1 8 9 #include <linux/kernel.h> 10 #include <linux/mmiotrace.h> 11 #include <linux/pci.h> 12 #include <asm/atomic.h> 13 14 #include "trace.h" 15 16 struct header_iter { 17 struct pci_dev *dev; 18 }; 19 20 static struct trace_array *mmio_trace_array; 21 static bool overrun_detected; 22 static unsigned long prev_overruns; 23 static atomic_t dropped_count; 24 25 static void mmio_reset_data(struct trace_array *tr) 26 { 27 overrun_detected = false; 28 prev_overruns = 0; 29 30 tracing_reset_online_cpus(tr); 31 } 32 33 static int mmio_trace_init(struct trace_array *tr) 34 { 35 pr_debug("in %s\n", __func__); 36 mmio_trace_array = tr; 37 38 mmio_reset_data(tr); 39 enable_mmiotrace(); 40 return 0; 41 } 42 43 static void mmio_trace_reset(struct trace_array *tr) 44 { 45 pr_debug("in %s\n", __func__); 46 47 disable_mmiotrace(); 48 mmio_reset_data(tr); 49 mmio_trace_array = NULL; 50 } 51 52 static void mmio_trace_start(struct trace_array *tr) 53 { 54 pr_debug("in %s\n", __func__); 55 mmio_reset_data(tr); 56 } 57 58 static int mmio_print_pcidev(struct trace_seq *s, const struct pci_dev *dev) 59 { 60 int ret = 0; 61 int i; 62 resource_size_t start, end; 63 const struct pci_driver *drv = pci_dev_driver(dev); 64 65 /* XXX: incomplete checks for trace_seq_printf() return value */ 66 ret += trace_seq_printf(s, "PCIDEV %02x%02x %04x%04x %x", 67 dev->bus->number, dev->devfn, 68 dev->vendor, dev->device, dev->irq); 69 /* 70 * XXX: is pci_resource_to_user() appropriate, since we are 71 * supposed to interpret the __ioremap() phys_addr argument based on 72 * these printed values? 73 */ 74 for (i = 0; i < 7; i++) { 75 pci_resource_to_user(dev, i, &dev->resource[i], &start, &end); 76 ret += trace_seq_printf(s, " %llx", 77 (unsigned long long)(start | 78 (dev->resource[i].flags & PCI_REGION_FLAG_MASK))); 79 } 80 for (i = 0; i < 7; i++) { 81 pci_resource_to_user(dev, i, &dev->resource[i], &start, &end); 82 ret += trace_seq_printf(s, " %llx", 83 dev->resource[i].start < dev->resource[i].end ? 84 (unsigned long long)(end - start) + 1 : 0); 85 } 86 if (drv) 87 ret += trace_seq_printf(s, " %s\n", drv->name); 88 else 89 ret += trace_seq_printf(s, " \n"); 90 return ret; 91 } 92 93 static void destroy_header_iter(struct header_iter *hiter) 94 { 95 if (!hiter) 96 return; 97 pci_dev_put(hiter->dev); 98 kfree(hiter); 99 } 100 101 static void mmio_pipe_open(struct trace_iterator *iter) 102 { 103 struct header_iter *hiter; 104 struct trace_seq *s = &iter->seq; 105 106 trace_seq_printf(s, "VERSION 20070824\n"); 107 108 hiter = kzalloc(sizeof(*hiter), GFP_KERNEL); 109 if (!hiter) 110 return; 111 112 hiter->dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, NULL); 113 iter->private = hiter; 114 } 115 116 /* XXX: This is not called when the pipe is closed! */ 117 static void mmio_close(struct trace_iterator *iter) 118 { 119 struct header_iter *hiter = iter->private; 120 destroy_header_iter(hiter); 121 iter->private = NULL; 122 } 123 124 static unsigned long count_overruns(struct trace_iterator *iter) 125 { 126 unsigned long cnt = atomic_xchg(&dropped_count, 0); 127 unsigned long over = ring_buffer_overruns(iter->tr->buffer); 128 129 if (over > prev_overruns) 130 cnt += over - prev_overruns; 131 prev_overruns = over; 132 return cnt; 133 } 134 135 static ssize_t mmio_read(struct trace_iterator *iter, struct file *filp, 136 char __user *ubuf, size_t cnt, loff_t *ppos) 137 { 138 ssize_t ret; 139 struct header_iter *hiter = iter->private; 140 struct trace_seq *s = &iter->seq; 141 unsigned long n; 142 143 n = count_overruns(iter); 144 if (n) { 145 /* XXX: This is later than where events were lost. */ 146 trace_seq_printf(s, "MARK 0.000000 Lost %lu events.\n", n); 147 if (!overrun_detected) 148 pr_warning("mmiotrace has lost events.\n"); 149 overrun_detected = true; 150 goto print_out; 151 } 152 153 if (!hiter) 154 return 0; 155 156 mmio_print_pcidev(s, hiter->dev); 157 hiter->dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, hiter->dev); 158 159 if (!hiter->dev) { 160 destroy_header_iter(hiter); 161 iter->private = NULL; 162 } 163 164 print_out: 165 ret = trace_seq_to_user(s, ubuf, cnt); 166 return (ret == -EBUSY) ? 0 : ret; 167 } 168 169 static enum print_line_t mmio_print_rw(struct trace_iterator *iter) 170 { 171 struct trace_entry *entry = iter->ent; 172 struct trace_mmiotrace_rw *field; 173 struct mmiotrace_rw *rw; 174 struct trace_seq *s = &iter->seq; 175 unsigned long long t = ns2usecs(iter->ts); 176 unsigned long usec_rem = do_div(t, 1000000ULL); 177 unsigned secs = (unsigned long)t; 178 int ret = 1; 179 180 trace_assign_type(field, entry); 181 rw = &field->rw; 182 183 switch (rw->opcode) { 184 case MMIO_READ: 185 ret = trace_seq_printf(s, 186 "R %d %lu.%06lu %d 0x%llx 0x%lx 0x%lx %d\n", 187 rw->width, secs, usec_rem, rw->map_id, 188 (unsigned long long)rw->phys, 189 rw->value, rw->pc, 0); 190 break; 191 case MMIO_WRITE: 192 ret = trace_seq_printf(s, 193 "W %d %lu.%06lu %d 0x%llx 0x%lx 0x%lx %d\n", 194 rw->width, secs, usec_rem, rw->map_id, 195 (unsigned long long)rw->phys, 196 rw->value, rw->pc, 0); 197 break; 198 case MMIO_UNKNOWN_OP: 199 ret = trace_seq_printf(s, 200 "UNKNOWN %lu.%06lu %d 0x%llx %02x,%02x,%02x 0x%lx %d\n", 201 secs, usec_rem, rw->map_id, 202 (unsigned long long)rw->phys, 203 (rw->value >> 16) & 0xff, (rw->value >> 8) & 0xff, 204 (rw->value >> 0) & 0xff, rw->pc, 0); 205 break; 206 default: 207 ret = trace_seq_printf(s, "rw what?\n"); 208 break; 209 } 210 if (ret) 211 return TRACE_TYPE_HANDLED; 212 return TRACE_TYPE_PARTIAL_LINE; 213 } 214 215 static enum print_line_t mmio_print_map(struct trace_iterator *iter) 216 { 217 struct trace_entry *entry = iter->ent; 218 struct trace_mmiotrace_map *field; 219 struct mmiotrace_map *m; 220 struct trace_seq *s = &iter->seq; 221 unsigned long long t = ns2usecs(iter->ts); 222 unsigned long usec_rem = do_div(t, 1000000ULL); 223 unsigned secs = (unsigned long)t; 224 int ret; 225 226 trace_assign_type(field, entry); 227 m = &field->map; 228 229 switch (m->opcode) { 230 case MMIO_PROBE: 231 ret = trace_seq_printf(s, 232 "MAP %lu.%06lu %d 0x%llx 0x%lx 0x%lx 0x%lx %d\n", 233 secs, usec_rem, m->map_id, 234 (unsigned long long)m->phys, m->virt, m->len, 235 0UL, 0); 236 break; 237 case MMIO_UNPROBE: 238 ret = trace_seq_printf(s, 239 "UNMAP %lu.%06lu %d 0x%lx %d\n", 240 secs, usec_rem, m->map_id, 0UL, 0); 241 break; 242 default: 243 ret = trace_seq_printf(s, "map what?\n"); 244 break; 245 } 246 if (ret) 247 return TRACE_TYPE_HANDLED; 248 return TRACE_TYPE_PARTIAL_LINE; 249 } 250 251 static enum print_line_t mmio_print_mark(struct trace_iterator *iter) 252 { 253 struct trace_entry *entry = iter->ent; 254 struct print_entry *print = (struct print_entry *)entry; 255 const char *msg = print->buf; 256 struct trace_seq *s = &iter->seq; 257 unsigned long long t = ns2usecs(iter->ts); 258 unsigned long usec_rem = do_div(t, 1000000ULL); 259 unsigned secs = (unsigned long)t; 260 int ret; 261 262 /* The trailing newline must be in the message. */ 263 ret = trace_seq_printf(s, "MARK %lu.%06lu %s", secs, usec_rem, msg); 264 if (!ret) 265 return TRACE_TYPE_PARTIAL_LINE; 266 267 if (entry->flags & TRACE_FLAG_CONT) 268 trace_seq_print_cont(s, iter); 269 270 return TRACE_TYPE_HANDLED; 271 } 272 273 static enum print_line_t mmio_print_line(struct trace_iterator *iter) 274 { 275 switch (iter->ent->type) { 276 case TRACE_MMIO_RW: 277 return mmio_print_rw(iter); 278 case TRACE_MMIO_MAP: 279 return mmio_print_map(iter); 280 case TRACE_PRINT: 281 return mmio_print_mark(iter); 282 default: 283 return TRACE_TYPE_HANDLED; /* ignore unknown entries */ 284 } 285 } 286 287 static struct tracer mmio_tracer __read_mostly = 288 { 289 .name = "mmiotrace", 290 .init = mmio_trace_init, 291 .reset = mmio_trace_reset, 292 .start = mmio_trace_start, 293 .pipe_open = mmio_pipe_open, 294 .close = mmio_close, 295 .read = mmio_read, 296 .print_line = mmio_print_line, 297 }; 298 299 __init static int init_mmio_trace(void) 300 { 301 return register_tracer(&mmio_tracer); 302 } 303 device_initcall(init_mmio_trace); 304 305 static void __trace_mmiotrace_rw(struct trace_array *tr, 306 struct trace_array_cpu *data, 307 struct mmiotrace_rw *rw) 308 { 309 struct ring_buffer_event *event; 310 struct trace_mmiotrace_rw *entry; 311 unsigned long irq_flags; 312 313 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), 314 &irq_flags); 315 if (!event) { 316 atomic_inc(&dropped_count); 317 return; 318 } 319 entry = ring_buffer_event_data(event); 320 tracing_generic_entry_update(&entry->ent, 0, preempt_count()); 321 entry->ent.type = TRACE_MMIO_RW; 322 entry->rw = *rw; 323 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); 324 325 trace_wake_up(); 326 } 327 328 void mmio_trace_rw(struct mmiotrace_rw *rw) 329 { 330 struct trace_array *tr = mmio_trace_array; 331 struct trace_array_cpu *data = tr->data[smp_processor_id()]; 332 __trace_mmiotrace_rw(tr, data, rw); 333 } 334 335 static void __trace_mmiotrace_map(struct trace_array *tr, 336 struct trace_array_cpu *data, 337 struct mmiotrace_map *map) 338 { 339 struct ring_buffer_event *event; 340 struct trace_mmiotrace_map *entry; 341 unsigned long irq_flags; 342 343 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), 344 &irq_flags); 345 if (!event) { 346 atomic_inc(&dropped_count); 347 return; 348 } 349 entry = ring_buffer_event_data(event); 350 tracing_generic_entry_update(&entry->ent, 0, preempt_count()); 351 entry->ent.type = TRACE_MMIO_MAP; 352 entry->map = *map; 353 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); 354 355 trace_wake_up(); 356 } 357 358 void mmio_trace_mapping(struct mmiotrace_map *map) 359 { 360 struct trace_array *tr = mmio_trace_array; 361 struct trace_array_cpu *data; 362 363 preempt_disable(); 364 data = tr->data[smp_processor_id()]; 365 __trace_mmiotrace_map(tr, data, map); 366 preempt_enable(); 367 } 368 369 int mmio_trace_printk(const char *fmt, va_list args) 370 { 371 return trace_vprintk(0, -1, fmt, args); 372 } 373