xref: /openbmc/linux/arch/x86/mm/kmmio.c (revision 78c99ba1)
1 /* Support for MMIO probes.
2  * Benfit many code from kprobes
3  * (C) 2002 Louis Zhuang <louis.zhuang@intel.com>.
4  *     2007 Alexander Eichner
5  *     2008 Pekka Paalanen <pq@iki.fi>
6  */
7 
8 #include <linux/list.h>
9 #include <linux/rculist.h>
10 #include <linux/spinlock.h>
11 #include <linux/hash.h>
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/uaccess.h>
16 #include <linux/ptrace.h>
17 #include <linux/preempt.h>
18 #include <linux/percpu.h>
19 #include <linux/kdebug.h>
20 #include <linux/mutex.h>
21 #include <linux/io.h>
22 #include <asm/cacheflush.h>
23 #include <asm/tlbflush.h>
24 #include <linux/errno.h>
25 #include <asm/debugreg.h>
26 #include <linux/mmiotrace.h>
27 
28 #define KMMIO_PAGE_HASH_BITS 4
29 #define KMMIO_PAGE_TABLE_SIZE (1 << KMMIO_PAGE_HASH_BITS)
30 
31 struct kmmio_fault_page {
32 	struct list_head list;
33 	struct kmmio_fault_page *release_next;
34 	unsigned long page; /* location of the fault page */
35 	pteval_t old_presence; /* page presence prior to arming */
36 	bool armed;
37 
38 	/*
39 	 * Number of times this page has been registered as a part
40 	 * of a probe. If zero, page is disarmed and this may be freed.
41 	 * Used only by writers (RCU) and post_kmmio_handler().
42 	 * Protected by kmmio_lock, when linked into kmmio_page_table.
43 	 */
44 	int count;
45 };
46 
47 struct kmmio_delayed_release {
48 	struct rcu_head rcu;
49 	struct kmmio_fault_page *release_list;
50 };
51 
52 struct kmmio_context {
53 	struct kmmio_fault_page *fpage;
54 	struct kmmio_probe *probe;
55 	unsigned long saved_flags;
56 	unsigned long addr;
57 	int active;
58 };
59 
60 static DEFINE_SPINLOCK(kmmio_lock);
61 
62 /* Protected by kmmio_lock */
63 unsigned int kmmio_count;
64 
65 /* Read-protected by RCU, write-protected by kmmio_lock. */
66 static struct list_head kmmio_page_table[KMMIO_PAGE_TABLE_SIZE];
67 static LIST_HEAD(kmmio_probes);
68 
69 static struct list_head *kmmio_page_list(unsigned long page)
70 {
71 	return &kmmio_page_table[hash_long(page, KMMIO_PAGE_HASH_BITS)];
72 }
73 
74 /* Accessed per-cpu */
75 static DEFINE_PER_CPU(struct kmmio_context, kmmio_ctx);
76 
77 /*
78  * this is basically a dynamic stabbing problem:
79  * Could use the existing prio tree code or
80  * Possible better implementations:
81  * The Interval Skip List: A Data Structure for Finding All Intervals That
82  * Overlap a Point (might be simple)
83  * Space Efficient Dynamic Stabbing with Fast Queries - Mikkel Thorup
84  */
85 /* Get the kmmio at this addr (if any). You must be holding RCU read lock. */
86 static struct kmmio_probe *get_kmmio_probe(unsigned long addr)
87 {
88 	struct kmmio_probe *p;
89 	list_for_each_entry_rcu(p, &kmmio_probes, list) {
90 		if (addr >= p->addr && addr < (p->addr + p->len))
91 			return p;
92 	}
93 	return NULL;
94 }
95 
96 /* You must be holding RCU read lock. */
97 static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long page)
98 {
99 	struct list_head *head;
100 	struct kmmio_fault_page *f;
101 
102 	page &= PAGE_MASK;
103 	head = kmmio_page_list(page);
104 	list_for_each_entry_rcu(f, head, list) {
105 		if (f->page == page)
106 			return f;
107 	}
108 	return NULL;
109 }
110 
111 static void clear_pmd_presence(pmd_t *pmd, bool clear, pmdval_t *old)
112 {
113 	pmdval_t v = pmd_val(*pmd);
114 	if (clear) {
115 		*old = v & _PAGE_PRESENT;
116 		v &= ~_PAGE_PRESENT;
117 	} else	/* presume this has been called with clear==true previously */
118 		v |= *old;
119 	set_pmd(pmd, __pmd(v));
120 }
121 
122 static void clear_pte_presence(pte_t *pte, bool clear, pteval_t *old)
123 {
124 	pteval_t v = pte_val(*pte);
125 	if (clear) {
126 		*old = v & _PAGE_PRESENT;
127 		v &= ~_PAGE_PRESENT;
128 	} else	/* presume this has been called with clear==true previously */
129 		v |= *old;
130 	set_pte_atomic(pte, __pte(v));
131 }
132 
133 static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
134 {
135 	unsigned int level;
136 	pte_t *pte = lookup_address(f->page, &level);
137 
138 	if (!pte) {
139 		pr_err("kmmio: no pte for page 0x%08lx\n", f->page);
140 		return -1;
141 	}
142 
143 	switch (level) {
144 	case PG_LEVEL_2M:
145 		clear_pmd_presence((pmd_t *)pte, clear, &f->old_presence);
146 		break;
147 	case PG_LEVEL_4K:
148 		clear_pte_presence(pte, clear, &f->old_presence);
149 		break;
150 	default:
151 		pr_err("kmmio: unexpected page level 0x%x.\n", level);
152 		return -1;
153 	}
154 
155 	__flush_tlb_one(f->page);
156 	return 0;
157 }
158 
159 /*
160  * Mark the given page as not present. Access to it will trigger a fault.
161  *
162  * Struct kmmio_fault_page is protected by RCU and kmmio_lock, but the
163  * protection is ignored here. RCU read lock is assumed held, so the struct
164  * will not disappear unexpectedly. Furthermore, the caller must guarantee,
165  * that double arming the same virtual address (page) cannot occur.
166  *
167  * Double disarming on the other hand is allowed, and may occur when a fault
168  * and mmiotrace shutdown happen simultaneously.
169  */
170 static int arm_kmmio_fault_page(struct kmmio_fault_page *f)
171 {
172 	int ret;
173 	WARN_ONCE(f->armed, KERN_ERR "kmmio page already armed.\n");
174 	if (f->armed) {
175 		pr_warning("kmmio double-arm: page 0x%08lx, ref %d, old %d\n",
176 					f->page, f->count, !!f->old_presence);
177 	}
178 	ret = clear_page_presence(f, true);
179 	WARN_ONCE(ret < 0, KERN_ERR "kmmio arming 0x%08lx failed.\n", f->page);
180 	f->armed = true;
181 	return ret;
182 }
183 
184 /** Restore the given page to saved presence state. */
185 static void disarm_kmmio_fault_page(struct kmmio_fault_page *f)
186 {
187 	int ret = clear_page_presence(f, false);
188 	WARN_ONCE(ret < 0,
189 			KERN_ERR "kmmio disarming 0x%08lx failed.\n", f->page);
190 	f->armed = false;
191 }
192 
193 /*
194  * This is being called from do_page_fault().
195  *
196  * We may be in an interrupt or a critical section. Also prefecthing may
197  * trigger a page fault. We may be in the middle of process switch.
198  * We cannot take any locks, because we could be executing especially
199  * within a kmmio critical section.
200  *
201  * Local interrupts are disabled, so preemption cannot happen.
202  * Do not enable interrupts, do not sleep, and watch out for other CPUs.
203  */
204 /*
205  * Interrupts are disabled on entry as trap3 is an interrupt gate
206  * and they remain disabled thorough out this function.
207  */
208 int kmmio_handler(struct pt_regs *regs, unsigned long addr)
209 {
210 	struct kmmio_context *ctx;
211 	struct kmmio_fault_page *faultpage;
212 	int ret = 0; /* default to fault not handled */
213 
214 	/*
215 	 * Preemption is now disabled to prevent process switch during
216 	 * single stepping. We can only handle one active kmmio trace
217 	 * per cpu, so ensure that we finish it before something else
218 	 * gets to run. We also hold the RCU read lock over single
219 	 * stepping to avoid looking up the probe and kmmio_fault_page
220 	 * again.
221 	 */
222 	preempt_disable();
223 	rcu_read_lock();
224 
225 	faultpage = get_kmmio_fault_page(addr);
226 	if (!faultpage) {
227 		/*
228 		 * Either this page fault is not caused by kmmio, or
229 		 * another CPU just pulled the kmmio probe from under
230 		 * our feet. The latter case should not be possible.
231 		 */
232 		goto no_kmmio;
233 	}
234 
235 	ctx = &get_cpu_var(kmmio_ctx);
236 	if (ctx->active) {
237 		if (addr == ctx->addr) {
238 			/*
239 			 * A second fault on the same page means some other
240 			 * condition needs handling by do_page_fault(), the
241 			 * page really not being present is the most common.
242 			 */
243 			pr_debug("kmmio: secondary hit for 0x%08lx CPU %d.\n",
244 					addr, smp_processor_id());
245 
246 			if (!faultpage->old_presence)
247 				pr_info("kmmio: unexpected secondary hit for "
248 					"address 0x%08lx on CPU %d.\n", addr,
249 					smp_processor_id());
250 		} else {
251 			/*
252 			 * Prevent overwriting already in-flight context.
253 			 * This should not happen, let's hope disarming at
254 			 * least prevents a panic.
255 			 */
256 			pr_emerg("kmmio: recursive probe hit on CPU %d, "
257 					"for address 0x%08lx. Ignoring.\n",
258 					smp_processor_id(), addr);
259 			pr_emerg("kmmio: previous hit was at 0x%08lx.\n",
260 						ctx->addr);
261 			disarm_kmmio_fault_page(faultpage);
262 		}
263 		goto no_kmmio_ctx;
264 	}
265 	ctx->active++;
266 
267 	ctx->fpage = faultpage;
268 	ctx->probe = get_kmmio_probe(addr);
269 	ctx->saved_flags = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
270 	ctx->addr = addr;
271 
272 	if (ctx->probe && ctx->probe->pre_handler)
273 		ctx->probe->pre_handler(ctx->probe, regs, addr);
274 
275 	/*
276 	 * Enable single-stepping and disable interrupts for the faulting
277 	 * context. Local interrupts must not get enabled during stepping.
278 	 */
279 	regs->flags |= X86_EFLAGS_TF;
280 	regs->flags &= ~X86_EFLAGS_IF;
281 
282 	/* Now we set present bit in PTE and single step. */
283 	disarm_kmmio_fault_page(ctx->fpage);
284 
285 	/*
286 	 * If another cpu accesses the same page while we are stepping,
287 	 * the access will not be caught. It will simply succeed and the
288 	 * only downside is we lose the event. If this becomes a problem,
289 	 * the user should drop to single cpu before tracing.
290 	 */
291 
292 	put_cpu_var(kmmio_ctx);
293 	return 1; /* fault handled */
294 
295 no_kmmio_ctx:
296 	put_cpu_var(kmmio_ctx);
297 no_kmmio:
298 	rcu_read_unlock();
299 	preempt_enable_no_resched();
300 	return ret;
301 }
302 
303 /*
304  * Interrupts are disabled on entry as trap1 is an interrupt gate
305  * and they remain disabled thorough out this function.
306  * This must always get called as the pair to kmmio_handler().
307  */
308 static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs)
309 {
310 	int ret = 0;
311 	struct kmmio_context *ctx = &get_cpu_var(kmmio_ctx);
312 
313 	if (!ctx->active) {
314 		/*
315 		 * debug traps without an active context are due to either
316 		 * something external causing them (f.e. using a debugger while
317 		 * mmio tracing enabled), or erroneous behaviour
318 		 */
319 		pr_warning("kmmio: unexpected debug trap on CPU %d.\n",
320 							smp_processor_id());
321 		goto out;
322 	}
323 
324 	if (ctx->probe && ctx->probe->post_handler)
325 		ctx->probe->post_handler(ctx->probe, condition, regs);
326 
327 	/* Prevent racing against release_kmmio_fault_page(). */
328 	spin_lock(&kmmio_lock);
329 	if (ctx->fpage->count)
330 		arm_kmmio_fault_page(ctx->fpage);
331 	spin_unlock(&kmmio_lock);
332 
333 	regs->flags &= ~X86_EFLAGS_TF;
334 	regs->flags |= ctx->saved_flags;
335 
336 	/* These were acquired in kmmio_handler(). */
337 	ctx->active--;
338 	BUG_ON(ctx->active);
339 	rcu_read_unlock();
340 	preempt_enable_no_resched();
341 
342 	/*
343 	 * if somebody else is singlestepping across a probe point, flags
344 	 * will have TF set, in which case, continue the remaining processing
345 	 * of do_debug, as if this is not a probe hit.
346 	 */
347 	if (!(regs->flags & X86_EFLAGS_TF))
348 		ret = 1;
349 out:
350 	put_cpu_var(kmmio_ctx);
351 	return ret;
352 }
353 
354 /* You must be holding kmmio_lock. */
355 static int add_kmmio_fault_page(unsigned long page)
356 {
357 	struct kmmio_fault_page *f;
358 
359 	page &= PAGE_MASK;
360 	f = get_kmmio_fault_page(page);
361 	if (f) {
362 		if (!f->count)
363 			arm_kmmio_fault_page(f);
364 		f->count++;
365 		return 0;
366 	}
367 
368 	f = kzalloc(sizeof(*f), GFP_ATOMIC);
369 	if (!f)
370 		return -1;
371 
372 	f->count = 1;
373 	f->page = page;
374 
375 	if (arm_kmmio_fault_page(f)) {
376 		kfree(f);
377 		return -1;
378 	}
379 
380 	list_add_rcu(&f->list, kmmio_page_list(f->page));
381 
382 	return 0;
383 }
384 
385 /* You must be holding kmmio_lock. */
386 static void release_kmmio_fault_page(unsigned long page,
387 				struct kmmio_fault_page **release_list)
388 {
389 	struct kmmio_fault_page *f;
390 
391 	page &= PAGE_MASK;
392 	f = get_kmmio_fault_page(page);
393 	if (!f)
394 		return;
395 
396 	f->count--;
397 	BUG_ON(f->count < 0);
398 	if (!f->count) {
399 		disarm_kmmio_fault_page(f);
400 		f->release_next = *release_list;
401 		*release_list = f;
402 	}
403 }
404 
405 /*
406  * With page-unaligned ioremaps, one or two armed pages may contain
407  * addresses from outside the intended mapping. Events for these addresses
408  * are currently silently dropped. The events may result only from programming
409  * mistakes by accessing addresses before the beginning or past the end of a
410  * mapping.
411  */
412 int register_kmmio_probe(struct kmmio_probe *p)
413 {
414 	unsigned long flags;
415 	int ret = 0;
416 	unsigned long size = 0;
417 	const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
418 
419 	spin_lock_irqsave(&kmmio_lock, flags);
420 	if (get_kmmio_probe(p->addr)) {
421 		ret = -EEXIST;
422 		goto out;
423 	}
424 	kmmio_count++;
425 	list_add_rcu(&p->list, &kmmio_probes);
426 	while (size < size_lim) {
427 		if (add_kmmio_fault_page(p->addr + size))
428 			pr_err("kmmio: Unable to set page fault.\n");
429 		size += PAGE_SIZE;
430 	}
431 out:
432 	spin_unlock_irqrestore(&kmmio_lock, flags);
433 	/*
434 	 * XXX: What should I do here?
435 	 * Here was a call to global_flush_tlb(), but it does not exist
436 	 * anymore. It seems it's not needed after all.
437 	 */
438 	return ret;
439 }
440 EXPORT_SYMBOL(register_kmmio_probe);
441 
442 static void rcu_free_kmmio_fault_pages(struct rcu_head *head)
443 {
444 	struct kmmio_delayed_release *dr = container_of(
445 						head,
446 						struct kmmio_delayed_release,
447 						rcu);
448 	struct kmmio_fault_page *f = dr->release_list;
449 	while (f) {
450 		struct kmmio_fault_page *next = f->release_next;
451 		BUG_ON(f->count);
452 		kfree(f);
453 		f = next;
454 	}
455 	kfree(dr);
456 }
457 
458 static void remove_kmmio_fault_pages(struct rcu_head *head)
459 {
460 	struct kmmio_delayed_release *dr =
461 		container_of(head, struct kmmio_delayed_release, rcu);
462 	struct kmmio_fault_page *f = dr->release_list;
463 	struct kmmio_fault_page **prevp = &dr->release_list;
464 	unsigned long flags;
465 
466 	spin_lock_irqsave(&kmmio_lock, flags);
467 	while (f) {
468 		if (!f->count) {
469 			list_del_rcu(&f->list);
470 			prevp = &f->release_next;
471 		} else {
472 			*prevp = f->release_next;
473 		}
474 		f = f->release_next;
475 	}
476 	spin_unlock_irqrestore(&kmmio_lock, flags);
477 
478 	/* This is the real RCU destroy call. */
479 	call_rcu(&dr->rcu, rcu_free_kmmio_fault_pages);
480 }
481 
482 /*
483  * Remove a kmmio probe. You have to synchronize_rcu() before you can be
484  * sure that the callbacks will not be called anymore. Only after that
485  * you may actually release your struct kmmio_probe.
486  *
487  * Unregistering a kmmio fault page has three steps:
488  * 1. release_kmmio_fault_page()
489  *    Disarm the page, wait a grace period to let all faults finish.
490  * 2. remove_kmmio_fault_pages()
491  *    Remove the pages from kmmio_page_table.
492  * 3. rcu_free_kmmio_fault_pages()
493  *    Actally free the kmmio_fault_page structs as with RCU.
494  */
495 void unregister_kmmio_probe(struct kmmio_probe *p)
496 {
497 	unsigned long flags;
498 	unsigned long size = 0;
499 	const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
500 	struct kmmio_fault_page *release_list = NULL;
501 	struct kmmio_delayed_release *drelease;
502 
503 	spin_lock_irqsave(&kmmio_lock, flags);
504 	while (size < size_lim) {
505 		release_kmmio_fault_page(p->addr + size, &release_list);
506 		size += PAGE_SIZE;
507 	}
508 	list_del_rcu(&p->list);
509 	kmmio_count--;
510 	spin_unlock_irqrestore(&kmmio_lock, flags);
511 
512 	drelease = kmalloc(sizeof(*drelease), GFP_ATOMIC);
513 	if (!drelease) {
514 		pr_crit("kmmio: leaking kmmio_fault_page objects.\n");
515 		return;
516 	}
517 	drelease->release_list = release_list;
518 
519 	/*
520 	 * This is not really RCU here. We have just disarmed a set of
521 	 * pages so that they cannot trigger page faults anymore. However,
522 	 * we cannot remove the pages from kmmio_page_table,
523 	 * because a probe hit might be in flight on another CPU. The
524 	 * pages are collected into a list, and they will be removed from
525 	 * kmmio_page_table when it is certain that no probe hit related to
526 	 * these pages can be in flight. RCU grace period sounds like a
527 	 * good choice.
528 	 *
529 	 * If we removed the pages too early, kmmio page fault handler might
530 	 * not find the respective kmmio_fault_page and determine it's not
531 	 * a kmmio fault, when it actually is. This would lead to madness.
532 	 */
533 	call_rcu(&drelease->rcu, remove_kmmio_fault_pages);
534 }
535 EXPORT_SYMBOL(unregister_kmmio_probe);
536 
537 static int
538 kmmio_die_notifier(struct notifier_block *nb, unsigned long val, void *args)
539 {
540 	struct die_args *arg = args;
541 
542 	if (val == DIE_DEBUG && (arg->err & DR_STEP))
543 		if (post_kmmio_handler(arg->err, arg->regs) == 1)
544 			return NOTIFY_STOP;
545 
546 	return NOTIFY_DONE;
547 }
548 
549 static struct notifier_block nb_die = {
550 	.notifier_call = kmmio_die_notifier
551 };
552 
553 int kmmio_init(void)
554 {
555 	int i;
556 
557 	for (i = 0; i < KMMIO_PAGE_TABLE_SIZE; i++)
558 		INIT_LIST_HEAD(&kmmio_page_table[i]);
559 
560 	return register_die_notifier(&nb_die);
561 }
562 
563 void kmmio_cleanup(void)
564 {
565 	int i;
566 
567 	unregister_die_notifier(&nb_die);
568 	for (i = 0; i < KMMIO_PAGE_TABLE_SIZE; i++) {
569 		WARN_ONCE(!list_empty(&kmmio_page_table[i]),
570 			KERN_ERR "kmmio_page_table not empty at cleanup, any further tracing will leak memory.\n");
571 	}
572 }
573