1 /* Support for MMIO probes. 2 * Benfit many code from kprobes 3 * (C) 2002 Louis Zhuang <louis.zhuang@intel.com>. 4 * 2007 Alexander Eichner 5 * 2008 Pekka Paalanen <pq@iki.fi> 6 */ 7 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10 #include <linux/list.h> 11 #include <linux/rculist.h> 12 #include <linux/spinlock.h> 13 #include <linux/hash.h> 14 #include <linux/init.h> 15 #include <linux/module.h> 16 #include <linux/kernel.h> 17 #include <linux/uaccess.h> 18 #include <linux/ptrace.h> 19 #include <linux/preempt.h> 20 #include <linux/percpu.h> 21 #include <linux/kdebug.h> 22 #include <linux/mutex.h> 23 #include <linux/io.h> 24 #include <asm/cacheflush.h> 25 #include <asm/tlbflush.h> 26 #include <linux/errno.h> 27 #include <asm/debugreg.h> 28 #include <linux/mmiotrace.h> 29 30 #define KMMIO_PAGE_HASH_BITS 4 31 #define KMMIO_PAGE_TABLE_SIZE (1 << KMMIO_PAGE_HASH_BITS) 32 33 struct kmmio_fault_page { 34 struct list_head list; 35 struct kmmio_fault_page *release_next; 36 unsigned long page; /* location of the fault page */ 37 pteval_t old_presence; /* page presence prior to arming */ 38 bool armed; 39 40 /* 41 * Number of times this page has been registered as a part 42 * of a probe. If zero, page is disarmed and this may be freed. 43 * Used only by writers (RCU) and post_kmmio_handler(). 44 * Protected by kmmio_lock, when linked into kmmio_page_table. 45 */ 46 int count; 47 }; 48 49 struct kmmio_delayed_release { 50 struct rcu_head rcu; 51 struct kmmio_fault_page *release_list; 52 }; 53 54 struct kmmio_context { 55 struct kmmio_fault_page *fpage; 56 struct kmmio_probe *probe; 57 unsigned long saved_flags; 58 unsigned long addr; 59 int active; 60 }; 61 62 static DEFINE_SPINLOCK(kmmio_lock); 63 64 /* Protected by kmmio_lock */ 65 unsigned int kmmio_count; 66 67 /* Read-protected by RCU, write-protected by kmmio_lock. */ 68 static struct list_head kmmio_page_table[KMMIO_PAGE_TABLE_SIZE]; 69 static LIST_HEAD(kmmio_probes); 70 71 static struct list_head *kmmio_page_list(unsigned long page) 72 { 73 return &kmmio_page_table[hash_long(page, KMMIO_PAGE_HASH_BITS)]; 74 } 75 76 /* Accessed per-cpu */ 77 static DEFINE_PER_CPU(struct kmmio_context, kmmio_ctx); 78 79 /* 80 * this is basically a dynamic stabbing problem: 81 * Could use the existing prio tree code or 82 * Possible better implementations: 83 * The Interval Skip List: A Data Structure for Finding All Intervals That 84 * Overlap a Point (might be simple) 85 * Space Efficient Dynamic Stabbing with Fast Queries - Mikkel Thorup 86 */ 87 /* Get the kmmio at this addr (if any). You must be holding RCU read lock. */ 88 static struct kmmio_probe *get_kmmio_probe(unsigned long addr) 89 { 90 struct kmmio_probe *p; 91 list_for_each_entry_rcu(p, &kmmio_probes, list) { 92 if (addr >= p->addr && addr < (p->addr + p->len)) 93 return p; 94 } 95 return NULL; 96 } 97 98 /* You must be holding RCU read lock. */ 99 static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long page) 100 { 101 struct list_head *head; 102 struct kmmio_fault_page *f; 103 104 page &= PAGE_MASK; 105 head = kmmio_page_list(page); 106 list_for_each_entry_rcu(f, head, list) { 107 if (f->page == page) 108 return f; 109 } 110 return NULL; 111 } 112 113 static void clear_pmd_presence(pmd_t *pmd, bool clear, pmdval_t *old) 114 { 115 pmdval_t v = pmd_val(*pmd); 116 if (clear) { 117 *old = v & _PAGE_PRESENT; 118 v &= ~_PAGE_PRESENT; 119 } else /* presume this has been called with clear==true previously */ 120 v |= *old; 121 set_pmd(pmd, __pmd(v)); 122 } 123 124 static void clear_pte_presence(pte_t *pte, bool clear, pteval_t *old) 125 { 126 pteval_t v = pte_val(*pte); 127 if (clear) { 128 *old = v & _PAGE_PRESENT; 129 v &= ~_PAGE_PRESENT; 130 } else /* presume this has been called with clear==true previously */ 131 v |= *old; 132 set_pte_atomic(pte, __pte(v)); 133 } 134 135 static int clear_page_presence(struct kmmio_fault_page *f, bool clear) 136 { 137 unsigned int level; 138 pte_t *pte = lookup_address(f->page, &level); 139 140 if (!pte) { 141 pr_err("no pte for page 0x%08lx\n", f->page); 142 return -1; 143 } 144 145 switch (level) { 146 case PG_LEVEL_2M: 147 clear_pmd_presence((pmd_t *)pte, clear, &f->old_presence); 148 break; 149 case PG_LEVEL_4K: 150 clear_pte_presence(pte, clear, &f->old_presence); 151 break; 152 default: 153 pr_err("unexpected page level 0x%x.\n", level); 154 return -1; 155 } 156 157 __flush_tlb_one(f->page); 158 return 0; 159 } 160 161 /* 162 * Mark the given page as not present. Access to it will trigger a fault. 163 * 164 * Struct kmmio_fault_page is protected by RCU and kmmio_lock, but the 165 * protection is ignored here. RCU read lock is assumed held, so the struct 166 * will not disappear unexpectedly. Furthermore, the caller must guarantee, 167 * that double arming the same virtual address (page) cannot occur. 168 * 169 * Double disarming on the other hand is allowed, and may occur when a fault 170 * and mmiotrace shutdown happen simultaneously. 171 */ 172 static int arm_kmmio_fault_page(struct kmmio_fault_page *f) 173 { 174 int ret; 175 WARN_ONCE(f->armed, KERN_ERR pr_fmt("kmmio page already armed.\n")); 176 if (f->armed) { 177 pr_warning("double-arm: page 0x%08lx, ref %d, old %d\n", 178 f->page, f->count, !!f->old_presence); 179 } 180 ret = clear_page_presence(f, true); 181 WARN_ONCE(ret < 0, KERN_ERR pr_fmt("arming 0x%08lx failed.\n"), 182 f->page); 183 f->armed = true; 184 return ret; 185 } 186 187 /** Restore the given page to saved presence state. */ 188 static void disarm_kmmio_fault_page(struct kmmio_fault_page *f) 189 { 190 int ret = clear_page_presence(f, false); 191 WARN_ONCE(ret < 0, 192 KERN_ERR "kmmio disarming 0x%08lx failed.\n", f->page); 193 f->armed = false; 194 } 195 196 /* 197 * This is being called from do_page_fault(). 198 * 199 * We may be in an interrupt or a critical section. Also prefecthing may 200 * trigger a page fault. We may be in the middle of process switch. 201 * We cannot take any locks, because we could be executing especially 202 * within a kmmio critical section. 203 * 204 * Local interrupts are disabled, so preemption cannot happen. 205 * Do not enable interrupts, do not sleep, and watch out for other CPUs. 206 */ 207 /* 208 * Interrupts are disabled on entry as trap3 is an interrupt gate 209 * and they remain disabled throughout this function. 210 */ 211 int kmmio_handler(struct pt_regs *regs, unsigned long addr) 212 { 213 struct kmmio_context *ctx; 214 struct kmmio_fault_page *faultpage; 215 int ret = 0; /* default to fault not handled */ 216 217 /* 218 * Preemption is now disabled to prevent process switch during 219 * single stepping. We can only handle one active kmmio trace 220 * per cpu, so ensure that we finish it before something else 221 * gets to run. We also hold the RCU read lock over single 222 * stepping to avoid looking up the probe and kmmio_fault_page 223 * again. 224 */ 225 preempt_disable(); 226 rcu_read_lock(); 227 228 faultpage = get_kmmio_fault_page(addr); 229 if (!faultpage) { 230 /* 231 * Either this page fault is not caused by kmmio, or 232 * another CPU just pulled the kmmio probe from under 233 * our feet. The latter case should not be possible. 234 */ 235 goto no_kmmio; 236 } 237 238 ctx = &get_cpu_var(kmmio_ctx); 239 if (ctx->active) { 240 if (addr == ctx->addr) { 241 /* 242 * A second fault on the same page means some other 243 * condition needs handling by do_page_fault(), the 244 * page really not being present is the most common. 245 */ 246 pr_debug("secondary hit for 0x%08lx CPU %d.\n", 247 addr, smp_processor_id()); 248 249 if (!faultpage->old_presence) 250 pr_info("unexpected secondary hit for address 0x%08lx on CPU %d.\n", 251 addr, smp_processor_id()); 252 } else { 253 /* 254 * Prevent overwriting already in-flight context. 255 * This should not happen, let's hope disarming at 256 * least prevents a panic. 257 */ 258 pr_emerg("recursive probe hit on CPU %d, for address 0x%08lx. Ignoring.\n", 259 smp_processor_id(), addr); 260 pr_emerg("previous hit was at 0x%08lx.\n", ctx->addr); 261 disarm_kmmio_fault_page(faultpage); 262 } 263 goto no_kmmio_ctx; 264 } 265 ctx->active++; 266 267 ctx->fpage = faultpage; 268 ctx->probe = get_kmmio_probe(addr); 269 ctx->saved_flags = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF)); 270 ctx->addr = addr; 271 272 if (ctx->probe && ctx->probe->pre_handler) 273 ctx->probe->pre_handler(ctx->probe, regs, addr); 274 275 /* 276 * Enable single-stepping and disable interrupts for the faulting 277 * context. Local interrupts must not get enabled during stepping. 278 */ 279 regs->flags |= X86_EFLAGS_TF; 280 regs->flags &= ~X86_EFLAGS_IF; 281 282 /* Now we set present bit in PTE and single step. */ 283 disarm_kmmio_fault_page(ctx->fpage); 284 285 /* 286 * If another cpu accesses the same page while we are stepping, 287 * the access will not be caught. It will simply succeed and the 288 * only downside is we lose the event. If this becomes a problem, 289 * the user should drop to single cpu before tracing. 290 */ 291 292 put_cpu_var(kmmio_ctx); 293 return 1; /* fault handled */ 294 295 no_kmmio_ctx: 296 put_cpu_var(kmmio_ctx); 297 no_kmmio: 298 rcu_read_unlock(); 299 preempt_enable_no_resched(); 300 return ret; 301 } 302 303 /* 304 * Interrupts are disabled on entry as trap1 is an interrupt gate 305 * and they remain disabled throughout this function. 306 * This must always get called as the pair to kmmio_handler(). 307 */ 308 static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs) 309 { 310 int ret = 0; 311 struct kmmio_context *ctx = &get_cpu_var(kmmio_ctx); 312 313 if (!ctx->active) { 314 /* 315 * debug traps without an active context are due to either 316 * something external causing them (f.e. using a debugger while 317 * mmio tracing enabled), or erroneous behaviour 318 */ 319 pr_warning("unexpected debug trap on CPU %d.\n", 320 smp_processor_id()); 321 goto out; 322 } 323 324 if (ctx->probe && ctx->probe->post_handler) 325 ctx->probe->post_handler(ctx->probe, condition, regs); 326 327 /* Prevent racing against release_kmmio_fault_page(). */ 328 spin_lock(&kmmio_lock); 329 if (ctx->fpage->count) 330 arm_kmmio_fault_page(ctx->fpage); 331 spin_unlock(&kmmio_lock); 332 333 regs->flags &= ~X86_EFLAGS_TF; 334 regs->flags |= ctx->saved_flags; 335 336 /* These were acquired in kmmio_handler(). */ 337 ctx->active--; 338 BUG_ON(ctx->active); 339 rcu_read_unlock(); 340 preempt_enable_no_resched(); 341 342 /* 343 * if somebody else is singlestepping across a probe point, flags 344 * will have TF set, in which case, continue the remaining processing 345 * of do_debug, as if this is not a probe hit. 346 */ 347 if (!(regs->flags & X86_EFLAGS_TF)) 348 ret = 1; 349 out: 350 put_cpu_var(kmmio_ctx); 351 return ret; 352 } 353 354 /* You must be holding kmmio_lock. */ 355 static int add_kmmio_fault_page(unsigned long page) 356 { 357 struct kmmio_fault_page *f; 358 359 page &= PAGE_MASK; 360 f = get_kmmio_fault_page(page); 361 if (f) { 362 if (!f->count) 363 arm_kmmio_fault_page(f); 364 f->count++; 365 return 0; 366 } 367 368 f = kzalloc(sizeof(*f), GFP_ATOMIC); 369 if (!f) 370 return -1; 371 372 f->count = 1; 373 f->page = page; 374 375 if (arm_kmmio_fault_page(f)) { 376 kfree(f); 377 return -1; 378 } 379 380 list_add_rcu(&f->list, kmmio_page_list(f->page)); 381 382 return 0; 383 } 384 385 /* You must be holding kmmio_lock. */ 386 static void release_kmmio_fault_page(unsigned long page, 387 struct kmmio_fault_page **release_list) 388 { 389 struct kmmio_fault_page *f; 390 391 page &= PAGE_MASK; 392 f = get_kmmio_fault_page(page); 393 if (!f) 394 return; 395 396 f->count--; 397 BUG_ON(f->count < 0); 398 if (!f->count) { 399 disarm_kmmio_fault_page(f); 400 f->release_next = *release_list; 401 *release_list = f; 402 } 403 } 404 405 /* 406 * With page-unaligned ioremaps, one or two armed pages may contain 407 * addresses from outside the intended mapping. Events for these addresses 408 * are currently silently dropped. The events may result only from programming 409 * mistakes by accessing addresses before the beginning or past the end of a 410 * mapping. 411 */ 412 int register_kmmio_probe(struct kmmio_probe *p) 413 { 414 unsigned long flags; 415 int ret = 0; 416 unsigned long size = 0; 417 const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK); 418 419 spin_lock_irqsave(&kmmio_lock, flags); 420 if (get_kmmio_probe(p->addr)) { 421 ret = -EEXIST; 422 goto out; 423 } 424 kmmio_count++; 425 list_add_rcu(&p->list, &kmmio_probes); 426 while (size < size_lim) { 427 if (add_kmmio_fault_page(p->addr + size)) 428 pr_err("Unable to set page fault.\n"); 429 size += PAGE_SIZE; 430 } 431 out: 432 spin_unlock_irqrestore(&kmmio_lock, flags); 433 /* 434 * XXX: What should I do here? 435 * Here was a call to global_flush_tlb(), but it does not exist 436 * anymore. It seems it's not needed after all. 437 */ 438 return ret; 439 } 440 EXPORT_SYMBOL(register_kmmio_probe); 441 442 static void rcu_free_kmmio_fault_pages(struct rcu_head *head) 443 { 444 struct kmmio_delayed_release *dr = container_of( 445 head, 446 struct kmmio_delayed_release, 447 rcu); 448 struct kmmio_fault_page *f = dr->release_list; 449 while (f) { 450 struct kmmio_fault_page *next = f->release_next; 451 BUG_ON(f->count); 452 kfree(f); 453 f = next; 454 } 455 kfree(dr); 456 } 457 458 static void remove_kmmio_fault_pages(struct rcu_head *head) 459 { 460 struct kmmio_delayed_release *dr = 461 container_of(head, struct kmmio_delayed_release, rcu); 462 struct kmmio_fault_page *f = dr->release_list; 463 struct kmmio_fault_page **prevp = &dr->release_list; 464 unsigned long flags; 465 466 spin_lock_irqsave(&kmmio_lock, flags); 467 while (f) { 468 if (!f->count) { 469 list_del_rcu(&f->list); 470 prevp = &f->release_next; 471 } else { 472 *prevp = f->release_next; 473 } 474 f = f->release_next; 475 } 476 spin_unlock_irqrestore(&kmmio_lock, flags); 477 478 /* This is the real RCU destroy call. */ 479 call_rcu(&dr->rcu, rcu_free_kmmio_fault_pages); 480 } 481 482 /* 483 * Remove a kmmio probe. You have to synchronize_rcu() before you can be 484 * sure that the callbacks will not be called anymore. Only after that 485 * you may actually release your struct kmmio_probe. 486 * 487 * Unregistering a kmmio fault page has three steps: 488 * 1. release_kmmio_fault_page() 489 * Disarm the page, wait a grace period to let all faults finish. 490 * 2. remove_kmmio_fault_pages() 491 * Remove the pages from kmmio_page_table. 492 * 3. rcu_free_kmmio_fault_pages() 493 * Actually free the kmmio_fault_page structs as with RCU. 494 */ 495 void unregister_kmmio_probe(struct kmmio_probe *p) 496 { 497 unsigned long flags; 498 unsigned long size = 0; 499 const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK); 500 struct kmmio_fault_page *release_list = NULL; 501 struct kmmio_delayed_release *drelease; 502 503 spin_lock_irqsave(&kmmio_lock, flags); 504 while (size < size_lim) { 505 release_kmmio_fault_page(p->addr + size, &release_list); 506 size += PAGE_SIZE; 507 } 508 list_del_rcu(&p->list); 509 kmmio_count--; 510 spin_unlock_irqrestore(&kmmio_lock, flags); 511 512 drelease = kmalloc(sizeof(*drelease), GFP_ATOMIC); 513 if (!drelease) { 514 pr_crit("leaking kmmio_fault_page objects.\n"); 515 return; 516 } 517 drelease->release_list = release_list; 518 519 /* 520 * This is not really RCU here. We have just disarmed a set of 521 * pages so that they cannot trigger page faults anymore. However, 522 * we cannot remove the pages from kmmio_page_table, 523 * because a probe hit might be in flight on another CPU. The 524 * pages are collected into a list, and they will be removed from 525 * kmmio_page_table when it is certain that no probe hit related to 526 * these pages can be in flight. RCU grace period sounds like a 527 * good choice. 528 * 529 * If we removed the pages too early, kmmio page fault handler might 530 * not find the respective kmmio_fault_page and determine it's not 531 * a kmmio fault, when it actually is. This would lead to madness. 532 */ 533 call_rcu(&drelease->rcu, remove_kmmio_fault_pages); 534 } 535 EXPORT_SYMBOL(unregister_kmmio_probe); 536 537 static int 538 kmmio_die_notifier(struct notifier_block *nb, unsigned long val, void *args) 539 { 540 struct die_args *arg = args; 541 542 if (val == DIE_DEBUG && (arg->err & DR_STEP)) 543 if (post_kmmio_handler(arg->err, arg->regs) == 1) { 544 /* 545 * Reset the BS bit in dr6 (pointed by args->err) to 546 * denote completion of processing 547 */ 548 (*(unsigned long *)ERR_PTR(arg->err)) &= ~DR_STEP; 549 return NOTIFY_STOP; 550 } 551 552 return NOTIFY_DONE; 553 } 554 555 static struct notifier_block nb_die = { 556 .notifier_call = kmmio_die_notifier 557 }; 558 559 int kmmio_init(void) 560 { 561 int i; 562 563 for (i = 0; i < KMMIO_PAGE_TABLE_SIZE; i++) 564 INIT_LIST_HEAD(&kmmio_page_table[i]); 565 566 return register_die_notifier(&nb_die); 567 } 568 569 void kmmio_cleanup(void) 570 { 571 int i; 572 573 unregister_die_notifier(&nb_die); 574 for (i = 0; i < KMMIO_PAGE_TABLE_SIZE; i++) { 575 WARN_ONCE(!list_empty(&kmmio_page_table[i]), 576 KERN_ERR "kmmio_page_table not empty at cleanup, any further tracing will leak memory.\n"); 577 } 578 } 579