xref: /openbmc/linux/arch/x86/kernel/ftrace.c (revision 3d0833953e1b98b79ddf491dd49229eef9baeac1)
1*3d083395SSteven Rostedt /*
2*3d083395SSteven Rostedt  * Code for replacing ftrace calls with jumps.
3*3d083395SSteven Rostedt  *
4*3d083395SSteven Rostedt  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5*3d083395SSteven Rostedt  *
6*3d083395SSteven Rostedt  * Thanks goes to Ingo Molnar, for suggesting the idea.
7*3d083395SSteven Rostedt  * Mathieu Desnoyers, for suggesting postponing the modifications.
8*3d083395SSteven Rostedt  * Arjan van de Ven, for keeping me straight, and explaining to me
9*3d083395SSteven Rostedt  * the dangers of modifying code on the run.
10*3d083395SSteven Rostedt  */
11*3d083395SSteven Rostedt 
12*3d083395SSteven Rostedt #include <linux/spinlock.h>
13*3d083395SSteven Rostedt #include <linux/hardirq.h>
14*3d083395SSteven Rostedt #include <linux/ftrace.h>
15*3d083395SSteven Rostedt #include <linux/percpu.h>
16*3d083395SSteven Rostedt #include <linux/init.h>
17*3d083395SSteven Rostedt #include <linux/list.h>
18*3d083395SSteven Rostedt 
19*3d083395SSteven Rostedt #define CALL_BACK		5
20*3d083395SSteven Rostedt 
21*3d083395SSteven Rostedt #define JMPFWD			0x03eb
22*3d083395SSteven Rostedt 
23*3d083395SSteven Rostedt static unsigned short ftrace_jmp = JMPFWD;
24*3d083395SSteven Rostedt 
25*3d083395SSteven Rostedt struct ftrace_record {
26*3d083395SSteven Rostedt 	struct dyn_ftrace	rec;
27*3d083395SSteven Rostedt 	int			failed;
28*3d083395SSteven Rostedt } __attribute__((packed));
29*3d083395SSteven Rostedt 
30*3d083395SSteven Rostedt struct ftrace_page {
31*3d083395SSteven Rostedt 	struct ftrace_page	*next;
32*3d083395SSteven Rostedt 	int			index;
33*3d083395SSteven Rostedt 	struct ftrace_record	records[];
34*3d083395SSteven Rostedt } __attribute__((packed));
35*3d083395SSteven Rostedt 
36*3d083395SSteven Rostedt #define ENTRIES_PER_PAGE \
37*3d083395SSteven Rostedt   ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct ftrace_record))
38*3d083395SSteven Rostedt 
39*3d083395SSteven Rostedt /* estimate from running different kernels */
40*3d083395SSteven Rostedt #define NR_TO_INIT		10000
41*3d083395SSteven Rostedt 
42*3d083395SSteven Rostedt #define MCOUNT_ADDR ((long)(&mcount))
43*3d083395SSteven Rostedt 
44*3d083395SSteven Rostedt union ftrace_code_union {
45*3d083395SSteven Rostedt 	char code[5];
46*3d083395SSteven Rostedt 	struct {
47*3d083395SSteven Rostedt 		char e8;
48*3d083395SSteven Rostedt 		int offset;
49*3d083395SSteven Rostedt 	} __attribute__((packed));
50*3d083395SSteven Rostedt };
51*3d083395SSteven Rostedt 
52*3d083395SSteven Rostedt static struct ftrace_page	*ftrace_pages_start;
53*3d083395SSteven Rostedt static struct ftrace_page	*ftrace_pages;
54*3d083395SSteven Rostedt 
55*3d083395SSteven Rostedt notrace struct dyn_ftrace *ftrace_alloc_shutdown_node(unsigned long ip)
56*3d083395SSteven Rostedt {
57*3d083395SSteven Rostedt 	struct ftrace_record *rec;
58*3d083395SSteven Rostedt 	unsigned short save;
59*3d083395SSteven Rostedt 
60*3d083395SSteven Rostedt 	ip -= CALL_BACK;
61*3d083395SSteven Rostedt 	save = *(short *)ip;
62*3d083395SSteven Rostedt 
63*3d083395SSteven Rostedt 	/* If this was already converted, skip it */
64*3d083395SSteven Rostedt 	if (save == JMPFWD)
65*3d083395SSteven Rostedt 		return NULL;
66*3d083395SSteven Rostedt 
67*3d083395SSteven Rostedt 	if (ftrace_pages->index == ENTRIES_PER_PAGE) {
68*3d083395SSteven Rostedt 		if (!ftrace_pages->next)
69*3d083395SSteven Rostedt 			return NULL;
70*3d083395SSteven Rostedt 		ftrace_pages = ftrace_pages->next;
71*3d083395SSteven Rostedt 	}
72*3d083395SSteven Rostedt 
73*3d083395SSteven Rostedt 	rec = &ftrace_pages->records[ftrace_pages->index++];
74*3d083395SSteven Rostedt 
75*3d083395SSteven Rostedt 	return &rec->rec;
76*3d083395SSteven Rostedt }
77*3d083395SSteven Rostedt 
78*3d083395SSteven Rostedt static int notrace
79*3d083395SSteven Rostedt ftrace_modify_code(unsigned long ip, unsigned char *old_code,
80*3d083395SSteven Rostedt 		   unsigned char *new_code)
81*3d083395SSteven Rostedt {
82*3d083395SSteven Rostedt 	unsigned short old = *(unsigned short *)old_code;
83*3d083395SSteven Rostedt 	unsigned short new = *(unsigned short *)new_code;
84*3d083395SSteven Rostedt 	unsigned short replaced;
85*3d083395SSteven Rostedt 	int faulted = 0;
86*3d083395SSteven Rostedt 
87*3d083395SSteven Rostedt 	/*
88*3d083395SSteven Rostedt 	 * Note: Due to modules and __init, code can
89*3d083395SSteven Rostedt 	 *  disappear and change, we need to protect against faulting
90*3d083395SSteven Rostedt 	 *  as well as code changing.
91*3d083395SSteven Rostedt 	 *
92*3d083395SSteven Rostedt 	 * No real locking needed, this code is run through
93*3d083395SSteven Rostedt 	 * kstop_machine.
94*3d083395SSteven Rostedt 	 */
95*3d083395SSteven Rostedt 	asm volatile (
96*3d083395SSteven Rostedt 		"1: lock\n"
97*3d083395SSteven Rostedt 		"   cmpxchg %w3, (%2)\n"
98*3d083395SSteven Rostedt 		"2:\n"
99*3d083395SSteven Rostedt 		".section .fixup, \"ax\"\n"
100*3d083395SSteven Rostedt 		"	movl $1, %0\n"
101*3d083395SSteven Rostedt 		"3:	jmp 2b\n"
102*3d083395SSteven Rostedt 		".previous\n"
103*3d083395SSteven Rostedt 		_ASM_EXTABLE(1b, 3b)
104*3d083395SSteven Rostedt 		: "=r"(faulted), "=a"(replaced)
105*3d083395SSteven Rostedt 		: "r"(ip), "r"(new), "0"(faulted), "a"(old)
106*3d083395SSteven Rostedt 		: "memory");
107*3d083395SSteven Rostedt 	sync_core();
108*3d083395SSteven Rostedt 
109*3d083395SSteven Rostedt 	if (replaced != old)
110*3d083395SSteven Rostedt 		faulted = 2;
111*3d083395SSteven Rostedt 
112*3d083395SSteven Rostedt 	return faulted;
113*3d083395SSteven Rostedt }
114*3d083395SSteven Rostedt 
115*3d083395SSteven Rostedt static int notrace ftrace_calc_offset(long ip)
116*3d083395SSteven Rostedt {
117*3d083395SSteven Rostedt 	return (int)(MCOUNT_ADDR - ip);
118*3d083395SSteven Rostedt }
119*3d083395SSteven Rostedt 
120*3d083395SSteven Rostedt notrace void ftrace_code_disable(struct dyn_ftrace *rec)
121*3d083395SSteven Rostedt {
122*3d083395SSteven Rostedt 	unsigned long ip;
123*3d083395SSteven Rostedt 	union ftrace_code_union save;
124*3d083395SSteven Rostedt 	struct ftrace_record *r =
125*3d083395SSteven Rostedt 		container_of(rec, struct ftrace_record, rec);
126*3d083395SSteven Rostedt 
127*3d083395SSteven Rostedt 	ip = rec->ip;
128*3d083395SSteven Rostedt 
129*3d083395SSteven Rostedt 	save.e8		= 0xe8;
130*3d083395SSteven Rostedt 	save.offset 	= ftrace_calc_offset(ip);
131*3d083395SSteven Rostedt 
132*3d083395SSteven Rostedt 	/* move the IP back to the start of the call */
133*3d083395SSteven Rostedt 	ip -= CALL_BACK;
134*3d083395SSteven Rostedt 
135*3d083395SSteven Rostedt 	r->failed = ftrace_modify_code(ip, save.code, (char *)&ftrace_jmp);
136*3d083395SSteven Rostedt }
137*3d083395SSteven Rostedt 
138*3d083395SSteven Rostedt static void notrace ftrace_replace_code(int saved)
139*3d083395SSteven Rostedt {
140*3d083395SSteven Rostedt 	unsigned char *new = NULL, *old = NULL;
141*3d083395SSteven Rostedt 	struct ftrace_record *rec;
142*3d083395SSteven Rostedt 	struct ftrace_page *pg;
143*3d083395SSteven Rostedt 	unsigned long ip;
144*3d083395SSteven Rostedt 	int i;
145*3d083395SSteven Rostedt 
146*3d083395SSteven Rostedt 	if (saved)
147*3d083395SSteven Rostedt 		old = (char *)&ftrace_jmp;
148*3d083395SSteven Rostedt 	else
149*3d083395SSteven Rostedt 		new = (char *)&ftrace_jmp;
150*3d083395SSteven Rostedt 
151*3d083395SSteven Rostedt 	for (pg = ftrace_pages_start; pg; pg = pg->next) {
152*3d083395SSteven Rostedt 		for (i = 0; i < pg->index; i++) {
153*3d083395SSteven Rostedt 			union ftrace_code_union calc;
154*3d083395SSteven Rostedt 			rec = &pg->records[i];
155*3d083395SSteven Rostedt 
156*3d083395SSteven Rostedt 			/* don't modify code that has already faulted */
157*3d083395SSteven Rostedt 			if (rec->failed)
158*3d083395SSteven Rostedt 				continue;
159*3d083395SSteven Rostedt 
160*3d083395SSteven Rostedt 			ip = rec->rec.ip;
161*3d083395SSteven Rostedt 
162*3d083395SSteven Rostedt 			calc.e8		= 0xe8;
163*3d083395SSteven Rostedt 			calc.offset	= ftrace_calc_offset(ip);
164*3d083395SSteven Rostedt 
165*3d083395SSteven Rostedt 			if (saved)
166*3d083395SSteven Rostedt 				new = calc.code;
167*3d083395SSteven Rostedt 			else
168*3d083395SSteven Rostedt 				old = calc.code;
169*3d083395SSteven Rostedt 
170*3d083395SSteven Rostedt 			ip -= CALL_BACK;
171*3d083395SSteven Rostedt 
172*3d083395SSteven Rostedt 			rec->failed = ftrace_modify_code(ip, old, new);
173*3d083395SSteven Rostedt 		}
174*3d083395SSteven Rostedt 	}
175*3d083395SSteven Rostedt 
176*3d083395SSteven Rostedt }
177*3d083395SSteven Rostedt 
178*3d083395SSteven Rostedt notrace void ftrace_startup_code(void)
179*3d083395SSteven Rostedt {
180*3d083395SSteven Rostedt 	ftrace_replace_code(1);
181*3d083395SSteven Rostedt }
182*3d083395SSteven Rostedt 
183*3d083395SSteven Rostedt notrace void ftrace_shutdown_code(void)
184*3d083395SSteven Rostedt {
185*3d083395SSteven Rostedt 	ftrace_replace_code(0);
186*3d083395SSteven Rostedt }
187*3d083395SSteven Rostedt 
188*3d083395SSteven Rostedt notrace void ftrace_shutdown_replenish(void)
189*3d083395SSteven Rostedt {
190*3d083395SSteven Rostedt 	if (ftrace_pages->next)
191*3d083395SSteven Rostedt 		return;
192*3d083395SSteven Rostedt 
193*3d083395SSteven Rostedt 	/* allocate another page */
194*3d083395SSteven Rostedt 	ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
195*3d083395SSteven Rostedt }
196*3d083395SSteven Rostedt 
197*3d083395SSteven Rostedt notrace int ftrace_shutdown_arch_init(void)
198*3d083395SSteven Rostedt {
199*3d083395SSteven Rostedt 	struct ftrace_page *pg;
200*3d083395SSteven Rostedt 	int cnt;
201*3d083395SSteven Rostedt 	int i;
202*3d083395SSteven Rostedt 
203*3d083395SSteven Rostedt 	/* allocate a few pages */
204*3d083395SSteven Rostedt 	ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
205*3d083395SSteven Rostedt 	if (!ftrace_pages_start)
206*3d083395SSteven Rostedt 		return -1;
207*3d083395SSteven Rostedt 
208*3d083395SSteven Rostedt 	/*
209*3d083395SSteven Rostedt 	 * Allocate a few more pages.
210*3d083395SSteven Rostedt 	 *
211*3d083395SSteven Rostedt 	 * TODO: have some parser search vmlinux before
212*3d083395SSteven Rostedt 	 *   final linking to find all calls to ftrace.
213*3d083395SSteven Rostedt 	 *   Then we can:
214*3d083395SSteven Rostedt 	 *    a) know how many pages to allocate.
215*3d083395SSteven Rostedt 	 *     and/or
216*3d083395SSteven Rostedt 	 *    b) set up the table then.
217*3d083395SSteven Rostedt 	 *
218*3d083395SSteven Rostedt 	 *  The dynamic code is still necessary for
219*3d083395SSteven Rostedt 	 *  modules.
220*3d083395SSteven Rostedt 	 */
221*3d083395SSteven Rostedt 
222*3d083395SSteven Rostedt 	pg = ftrace_pages = ftrace_pages_start;
223*3d083395SSteven Rostedt 
224*3d083395SSteven Rostedt 	cnt = NR_TO_INIT / ENTRIES_PER_PAGE;
225*3d083395SSteven Rostedt 
226*3d083395SSteven Rostedt 	for (i = 0; i < cnt; i++) {
227*3d083395SSteven Rostedt 		pg->next = (void *)get_zeroed_page(GFP_KERNEL);
228*3d083395SSteven Rostedt 
229*3d083395SSteven Rostedt 		/* If we fail, we'll try later anyway */
230*3d083395SSteven Rostedt 		if (!pg->next)
231*3d083395SSteven Rostedt 			break;
232*3d083395SSteven Rostedt 
233*3d083395SSteven Rostedt 		pg = pg->next;
234*3d083395SSteven Rostedt 	}
235*3d083395SSteven Rostedt 
236*3d083395SSteven Rostedt 	return 0;
237*3d083395SSteven Rostedt }
238