1e81dc127SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2e81dc127SThomas Gleixner
3e81dc127SThomas Gleixner #define pr_fmt(fmt) "callthunks: " fmt
4e81dc127SThomas Gleixner
5f5c1bb2aSThomas Gleixner #include <linux/debugfs.h>
6e81dc127SThomas Gleixner #include <linux/kallsyms.h>
7e81dc127SThomas Gleixner #include <linux/memory.h>
8e81dc127SThomas Gleixner #include <linux/moduleloader.h>
97825451fSPeter Zijlstra #include <linux/static_call.h>
10e81dc127SThomas Gleixner
11e81dc127SThomas Gleixner #include <asm/alternative.h>
12bbaceb18SThomas Gleixner #include <asm/asm-offsets.h>
13e81dc127SThomas Gleixner #include <asm/cpu.h>
14e81dc127SThomas Gleixner #include <asm/ftrace.h>
15e81dc127SThomas Gleixner #include <asm/insn.h>
16e81dc127SThomas Gleixner #include <asm/kexec.h>
17e81dc127SThomas Gleixner #include <asm/nospec-branch.h>
18e81dc127SThomas Gleixner #include <asm/paravirt.h>
19e81dc127SThomas Gleixner #include <asm/sections.h>
20e81dc127SThomas Gleixner #include <asm/switch_to.h>
21e81dc127SThomas Gleixner #include <asm/sync_core.h>
22e81dc127SThomas Gleixner #include <asm/text-patching.h>
23e81dc127SThomas Gleixner #include <asm/xen/hypercall.h>
24e81dc127SThomas Gleixner
25e81dc127SThomas Gleixner static int __initdata_or_module debug_callthunks;
26e81dc127SThomas Gleixner
27e81dc127SThomas Gleixner #define prdbg(fmt, args...) \
28e81dc127SThomas Gleixner do { \
29e81dc127SThomas Gleixner if (debug_callthunks) \
30e81dc127SThomas Gleixner printk(KERN_DEBUG pr_fmt(fmt), ##args); \
31e81dc127SThomas Gleixner } while(0)
32e81dc127SThomas Gleixner
debug_thunks(char * str)33e81dc127SThomas Gleixner static int __init debug_thunks(char *str)
34e81dc127SThomas Gleixner {
35e81dc127SThomas Gleixner debug_callthunks = 1;
36e81dc127SThomas Gleixner return 1;
37e81dc127SThomas Gleixner }
38e81dc127SThomas Gleixner __setup("debug-callthunks", debug_thunks);
39e81dc127SThomas Gleixner
40f5c1bb2aSThomas Gleixner #ifdef CONFIG_CALL_THUNKS_DEBUG
41f5c1bb2aSThomas Gleixner DEFINE_PER_CPU(u64, __x86_call_count);
42f5c1bb2aSThomas Gleixner DEFINE_PER_CPU(u64, __x86_ret_count);
43f5c1bb2aSThomas Gleixner DEFINE_PER_CPU(u64, __x86_stuffs_count);
44f5c1bb2aSThomas Gleixner DEFINE_PER_CPU(u64, __x86_ctxsw_count);
45f5c1bb2aSThomas Gleixner EXPORT_SYMBOL_GPL(__x86_ctxsw_count);
46f5c1bb2aSThomas Gleixner EXPORT_SYMBOL_GPL(__x86_call_count);
47f5c1bb2aSThomas Gleixner #endif
48f5c1bb2aSThomas Gleixner
49e81dc127SThomas Gleixner extern s32 __call_sites[], __call_sites_end[];
50e81dc127SThomas Gleixner
51e81dc127SThomas Gleixner struct thunk_desc {
52e81dc127SThomas Gleixner void *template;
53e81dc127SThomas Gleixner unsigned int template_size;
54e81dc127SThomas Gleixner };
55e81dc127SThomas Gleixner
56e81dc127SThomas Gleixner struct core_text {
57e81dc127SThomas Gleixner unsigned long base;
58e81dc127SThomas Gleixner unsigned long end;
59e81dc127SThomas Gleixner const char *name;
60e81dc127SThomas Gleixner };
61e81dc127SThomas Gleixner
62e81dc127SThomas Gleixner static bool thunks_initialized __ro_after_init;
63e81dc127SThomas Gleixner
64e81dc127SThomas Gleixner static const struct core_text builtin_coretext = {
65e81dc127SThomas Gleixner .base = (unsigned long)_text,
66e81dc127SThomas Gleixner .end = (unsigned long)_etext,
67e81dc127SThomas Gleixner .name = "builtin",
68e81dc127SThomas Gleixner };
69e81dc127SThomas Gleixner
70bbaceb18SThomas Gleixner asm (
71bbaceb18SThomas Gleixner ".pushsection .rodata \n"
72bbaceb18SThomas Gleixner ".global skl_call_thunk_template \n"
73bbaceb18SThomas Gleixner "skl_call_thunk_template: \n"
74bbaceb18SThomas Gleixner __stringify(INCREMENT_CALL_DEPTH)" \n"
75bbaceb18SThomas Gleixner ".global skl_call_thunk_tail \n"
76bbaceb18SThomas Gleixner "skl_call_thunk_tail: \n"
77bbaceb18SThomas Gleixner ".popsection \n"
78bbaceb18SThomas Gleixner );
79bbaceb18SThomas Gleixner
80bbaceb18SThomas Gleixner extern u8 skl_call_thunk_template[];
81bbaceb18SThomas Gleixner extern u8 skl_call_thunk_tail[];
82bbaceb18SThomas Gleixner
83bbaceb18SThomas Gleixner #define SKL_TMPL_SIZE \
84bbaceb18SThomas Gleixner ((unsigned int)(skl_call_thunk_tail - skl_call_thunk_template))
85e81dc127SThomas Gleixner
86e81dc127SThomas Gleixner extern void error_entry(void);
87e81dc127SThomas Gleixner extern void xen_error_entry(void);
88e81dc127SThomas Gleixner extern void paranoid_entry(void);
89e81dc127SThomas Gleixner
within_coretext(const struct core_text * ct,void * addr)90e81dc127SThomas Gleixner static inline bool within_coretext(const struct core_text *ct, void *addr)
91e81dc127SThomas Gleixner {
92e81dc127SThomas Gleixner unsigned long p = (unsigned long)addr;
93e81dc127SThomas Gleixner
94e81dc127SThomas Gleixner return ct->base <= p && p < ct->end;
95e81dc127SThomas Gleixner }
96e81dc127SThomas Gleixner
within_module_coretext(void * addr)97e81dc127SThomas Gleixner static inline bool within_module_coretext(void *addr)
98e81dc127SThomas Gleixner {
99e81dc127SThomas Gleixner bool ret = false;
100e81dc127SThomas Gleixner
101e81dc127SThomas Gleixner #ifdef CONFIG_MODULES
102e81dc127SThomas Gleixner struct module *mod;
103e81dc127SThomas Gleixner
104e81dc127SThomas Gleixner preempt_disable();
105e81dc127SThomas Gleixner mod = __module_address((unsigned long)addr);
106e81dc127SThomas Gleixner if (mod && within_module_core((unsigned long)addr, mod))
107e81dc127SThomas Gleixner ret = true;
108e81dc127SThomas Gleixner preempt_enable();
109e81dc127SThomas Gleixner #endif
110e81dc127SThomas Gleixner return ret;
111e81dc127SThomas Gleixner }
112e81dc127SThomas Gleixner
is_coretext(const struct core_text * ct,void * addr)113e81dc127SThomas Gleixner static bool is_coretext(const struct core_text *ct, void *addr)
114e81dc127SThomas Gleixner {
115e81dc127SThomas Gleixner if (ct && within_coretext(ct, addr))
116e81dc127SThomas Gleixner return true;
117e81dc127SThomas Gleixner if (within_coretext(&builtin_coretext, addr))
118e81dc127SThomas Gleixner return true;
119e81dc127SThomas Gleixner return within_module_coretext(addr);
120e81dc127SThomas Gleixner }
121e81dc127SThomas Gleixner
skip_addr(void * dest)122ade8c208SArnd Bergmann static bool skip_addr(void *dest)
123e81dc127SThomas Gleixner {
124e81dc127SThomas Gleixner if (dest == error_entry)
125e81dc127SThomas Gleixner return true;
126e81dc127SThomas Gleixner if (dest == paranoid_entry)
127e81dc127SThomas Gleixner return true;
128e81dc127SThomas Gleixner if (dest == xen_error_entry)
129e81dc127SThomas Gleixner return true;
130e81dc127SThomas Gleixner /* Does FILL_RSB... */
131e81dc127SThomas Gleixner if (dest == __switch_to_asm)
132e81dc127SThomas Gleixner return true;
133e81dc127SThomas Gleixner /* Accounts directly */
134e81dc127SThomas Gleixner if (dest == ret_from_fork)
135e81dc127SThomas Gleixner return true;
136cded3679SThomas Gleixner #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_AMD_MEM_ENCRYPT)
137666e1156SThomas Gleixner if (dest == soft_restart_cpu)
138e81dc127SThomas Gleixner return true;
139e81dc127SThomas Gleixner #endif
140e81dc127SThomas Gleixner #ifdef CONFIG_FUNCTION_TRACER
141e81dc127SThomas Gleixner if (dest == __fentry__)
142e81dc127SThomas Gleixner return true;
143e81dc127SThomas Gleixner #endif
144e81dc127SThomas Gleixner #ifdef CONFIG_KEXEC_CORE
145e81dc127SThomas Gleixner if (dest >= (void *)relocate_kernel &&
146e81dc127SThomas Gleixner dest < (void*)relocate_kernel + KEXEC_CONTROL_CODE_MAX_SIZE)
147e81dc127SThomas Gleixner return true;
148e81dc127SThomas Gleixner #endif
149e81dc127SThomas Gleixner #ifdef CONFIG_XEN
150e81dc127SThomas Gleixner if (dest >= (void *)hypercall_page &&
151e81dc127SThomas Gleixner dest < (void*)hypercall_page + PAGE_SIZE)
152e81dc127SThomas Gleixner return true;
153e81dc127SThomas Gleixner #endif
154e81dc127SThomas Gleixner return false;
155e81dc127SThomas Gleixner }
156e81dc127SThomas Gleixner
call_get_dest(void * addr)157e81dc127SThomas Gleixner static __init_or_module void *call_get_dest(void *addr)
158e81dc127SThomas Gleixner {
159e81dc127SThomas Gleixner struct insn insn;
160e81dc127SThomas Gleixner void *dest;
161e81dc127SThomas Gleixner int ret;
162e81dc127SThomas Gleixner
163e81dc127SThomas Gleixner ret = insn_decode_kernel(&insn, addr);
164e81dc127SThomas Gleixner if (ret)
165e81dc127SThomas Gleixner return ERR_PTR(ret);
166e81dc127SThomas Gleixner
167e81dc127SThomas Gleixner /* Patched out call? */
168e81dc127SThomas Gleixner if (insn.opcode.bytes[0] != CALL_INSN_OPCODE)
169e81dc127SThomas Gleixner return NULL;
170e81dc127SThomas Gleixner
171e81dc127SThomas Gleixner dest = addr + insn.length + insn.immediate.value;
172e81dc127SThomas Gleixner if (skip_addr(dest))
173e81dc127SThomas Gleixner return NULL;
174e81dc127SThomas Gleixner return dest;
175e81dc127SThomas Gleixner }
176e81dc127SThomas Gleixner
177e81dc127SThomas Gleixner static const u8 nops[] = {
178e81dc127SThomas Gleixner 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
179e81dc127SThomas Gleixner 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
180e81dc127SThomas Gleixner 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
181e81dc127SThomas Gleixner 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
182e81dc127SThomas Gleixner };
183e81dc127SThomas Gleixner
patch_dest(void * dest,bool direct)184ade8c208SArnd Bergmann static void *patch_dest(void *dest, bool direct)
185e81dc127SThomas Gleixner {
186bbaceb18SThomas Gleixner unsigned int tsize = SKL_TMPL_SIZE;
187e81dc127SThomas Gleixner u8 *pad = dest - tsize;
188e81dc127SThomas Gleixner
189e81dc127SThomas Gleixner /* Already patched? */
190bbaceb18SThomas Gleixner if (!bcmp(pad, skl_call_thunk_template, tsize))
191e81dc127SThomas Gleixner return pad;
192e81dc127SThomas Gleixner
193e81dc127SThomas Gleixner /* Ensure there are nops */
194e81dc127SThomas Gleixner if (bcmp(pad, nops, tsize)) {
195e81dc127SThomas Gleixner pr_warn_once("Invalid padding area for %pS\n", dest);
196e81dc127SThomas Gleixner return NULL;
197e81dc127SThomas Gleixner }
198e81dc127SThomas Gleixner
199e81dc127SThomas Gleixner if (direct)
200bbaceb18SThomas Gleixner memcpy(pad, skl_call_thunk_template, tsize);
201e81dc127SThomas Gleixner else
202bbaceb18SThomas Gleixner text_poke_copy_locked(pad, skl_call_thunk_template, tsize, true);
203e81dc127SThomas Gleixner return pad;
204e81dc127SThomas Gleixner }
205e81dc127SThomas Gleixner
patch_call(void * addr,const struct core_text * ct)206e81dc127SThomas Gleixner static __init_or_module void patch_call(void *addr, const struct core_text *ct)
207e81dc127SThomas Gleixner {
208e81dc127SThomas Gleixner void *pad, *dest;
209e81dc127SThomas Gleixner u8 bytes[8];
210e81dc127SThomas Gleixner
211e81dc127SThomas Gleixner if (!within_coretext(ct, addr))
212e81dc127SThomas Gleixner return;
213e81dc127SThomas Gleixner
214e81dc127SThomas Gleixner dest = call_get_dest(addr);
215e81dc127SThomas Gleixner if (!dest || WARN_ON_ONCE(IS_ERR(dest)))
216e81dc127SThomas Gleixner return;
217e81dc127SThomas Gleixner
218e81dc127SThomas Gleixner if (!is_coretext(ct, dest))
219e81dc127SThomas Gleixner return;
220e81dc127SThomas Gleixner
221e81dc127SThomas Gleixner pad = patch_dest(dest, within_coretext(ct, dest));
222e81dc127SThomas Gleixner if (!pad)
223e81dc127SThomas Gleixner return;
224e81dc127SThomas Gleixner
225e81dc127SThomas Gleixner prdbg("Patch call at: %pS %px to %pS %px -> %px \n", addr, addr,
226e81dc127SThomas Gleixner dest, dest, pad);
227e81dc127SThomas Gleixner __text_gen_insn(bytes, CALL_INSN_OPCODE, addr, pad, CALL_INSN_SIZE);
228e81dc127SThomas Gleixner text_poke_early(addr, bytes, CALL_INSN_SIZE);
229e81dc127SThomas Gleixner }
230e81dc127SThomas Gleixner
231e81dc127SThomas Gleixner static __init_or_module void
patch_call_sites(s32 * start,s32 * end,const struct core_text * ct)232e81dc127SThomas Gleixner patch_call_sites(s32 *start, s32 *end, const struct core_text *ct)
233e81dc127SThomas Gleixner {
234e81dc127SThomas Gleixner s32 *s;
235e81dc127SThomas Gleixner
236e81dc127SThomas Gleixner for (s = start; s < end; s++)
237e81dc127SThomas Gleixner patch_call((void *)s + *s, ct);
238e81dc127SThomas Gleixner }
239e81dc127SThomas Gleixner
240e81dc127SThomas Gleixner static __init_or_module void
patch_paravirt_call_sites(struct paravirt_patch_site * start,struct paravirt_patch_site * end,const struct core_text * ct)241e81dc127SThomas Gleixner patch_paravirt_call_sites(struct paravirt_patch_site *start,
242e81dc127SThomas Gleixner struct paravirt_patch_site *end,
243e81dc127SThomas Gleixner const struct core_text *ct)
244e81dc127SThomas Gleixner {
245e81dc127SThomas Gleixner struct paravirt_patch_site *p;
246e81dc127SThomas Gleixner
247e81dc127SThomas Gleixner for (p = start; p < end; p++)
248e81dc127SThomas Gleixner patch_call(p->instr, ct);
249e81dc127SThomas Gleixner }
250e81dc127SThomas Gleixner
251e81dc127SThomas Gleixner static __init_or_module void
callthunks_setup(struct callthunk_sites * cs,const struct core_text * ct)252e81dc127SThomas Gleixner callthunks_setup(struct callthunk_sites *cs, const struct core_text *ct)
253e81dc127SThomas Gleixner {
254e81dc127SThomas Gleixner prdbg("Patching call sites %s\n", ct->name);
255e81dc127SThomas Gleixner patch_call_sites(cs->call_start, cs->call_end, ct);
256e81dc127SThomas Gleixner patch_paravirt_call_sites(cs->pv_start, cs->pv_end, ct);
257e81dc127SThomas Gleixner prdbg("Patching call sites done%s\n", ct->name);
258e81dc127SThomas Gleixner }
259e81dc127SThomas Gleixner
callthunks_patch_builtin_calls(void)260e81dc127SThomas Gleixner void __init callthunks_patch_builtin_calls(void)
261e81dc127SThomas Gleixner {
262e81dc127SThomas Gleixner struct callthunk_sites cs = {
263e81dc127SThomas Gleixner .call_start = __call_sites,
264e81dc127SThomas Gleixner .call_end = __call_sites_end,
265e81dc127SThomas Gleixner .pv_start = __parainstructions,
266e81dc127SThomas Gleixner .pv_end = __parainstructions_end
267e81dc127SThomas Gleixner };
268e81dc127SThomas Gleixner
269e81dc127SThomas Gleixner if (!cpu_feature_enabled(X86_FEATURE_CALL_DEPTH))
270e81dc127SThomas Gleixner return;
271e81dc127SThomas Gleixner
272e81dc127SThomas Gleixner pr_info("Setting up call depth tracking\n");
273e81dc127SThomas Gleixner mutex_lock(&text_mutex);
274e81dc127SThomas Gleixner callthunks_setup(&cs, &builtin_coretext);
275e81dc127SThomas Gleixner thunks_initialized = true;
276e81dc127SThomas Gleixner mutex_unlock(&text_mutex);
277e81dc127SThomas Gleixner }
278eaf44c81SThomas Gleixner
callthunks_translate_call_dest(void * dest)2797825451fSPeter Zijlstra void *callthunks_translate_call_dest(void *dest)
2807825451fSPeter Zijlstra {
2817825451fSPeter Zijlstra void *target;
2827825451fSPeter Zijlstra
2837825451fSPeter Zijlstra lockdep_assert_held(&text_mutex);
2847825451fSPeter Zijlstra
2857825451fSPeter Zijlstra if (!thunks_initialized || skip_addr(dest))
2867825451fSPeter Zijlstra return dest;
2877825451fSPeter Zijlstra
2887825451fSPeter Zijlstra if (!is_coretext(NULL, dest))
2897825451fSPeter Zijlstra return dest;
2907825451fSPeter Zijlstra
2917825451fSPeter Zijlstra target = patch_dest(dest, false);
2927825451fSPeter Zijlstra return target ? : dest;
2937825451fSPeter Zijlstra }
2947825451fSPeter Zijlstra
295*301cf77eSIngo Molnar #ifdef CONFIG_BPF_JIT
is_callthunk(void * addr)29602012623SJosh Poimboeuf static bool is_callthunk(void *addr)
297396e0b8eSPeter Zijlstra {
298396e0b8eSPeter Zijlstra unsigned int tmpl_size = SKL_TMPL_SIZE;
299396e0b8eSPeter Zijlstra void *tmpl = skl_call_thunk_template;
300396e0b8eSPeter Zijlstra unsigned long dest;
301396e0b8eSPeter Zijlstra
302396e0b8eSPeter Zijlstra dest = roundup((unsigned long)addr, CONFIG_FUNCTION_ALIGNMENT);
303396e0b8eSPeter Zijlstra if (!thunks_initialized || skip_addr((void *)dest))
304396e0b8eSPeter Zijlstra return false;
305396e0b8eSPeter Zijlstra
306396e0b8eSPeter Zijlstra return !bcmp((void *)(dest - tmpl_size), tmpl, tmpl_size);
307396e0b8eSPeter Zijlstra }
308396e0b8eSPeter Zijlstra
x86_call_depth_emit_accounting(u8 ** pprog,void * func)309b2e9dfe5SThomas Gleixner int x86_call_depth_emit_accounting(u8 **pprog, void *func)
310b2e9dfe5SThomas Gleixner {
311b2e9dfe5SThomas Gleixner unsigned int tmpl_size = SKL_TMPL_SIZE;
312b2e9dfe5SThomas Gleixner void *tmpl = skl_call_thunk_template;
313b2e9dfe5SThomas Gleixner
314b2e9dfe5SThomas Gleixner if (!thunks_initialized)
315b2e9dfe5SThomas Gleixner return 0;
316b2e9dfe5SThomas Gleixner
317b2e9dfe5SThomas Gleixner /* Is function call target a thunk? */
318ee3e2469SPeter Zijlstra if (func && is_callthunk(func))
319b2e9dfe5SThomas Gleixner return 0;
320b2e9dfe5SThomas Gleixner
321b2e9dfe5SThomas Gleixner memcpy(*pprog, tmpl, tmpl_size);
322b2e9dfe5SThomas Gleixner *pprog += tmpl_size;
323b2e9dfe5SThomas Gleixner return tmpl_size;
324b2e9dfe5SThomas Gleixner }
325b2e9dfe5SThomas Gleixner #endif
326b2e9dfe5SThomas Gleixner
327eaf44c81SThomas Gleixner #ifdef CONFIG_MODULES
callthunks_patch_module_calls(struct callthunk_sites * cs,struct module * mod)328eaf44c81SThomas Gleixner void noinline callthunks_patch_module_calls(struct callthunk_sites *cs,
329eaf44c81SThomas Gleixner struct module *mod)
330eaf44c81SThomas Gleixner {
331eaf44c81SThomas Gleixner struct core_text ct = {
332ac3b4328SSong Liu .base = (unsigned long)mod->mem[MOD_TEXT].base,
333ac3b4328SSong Liu .end = (unsigned long)mod->mem[MOD_TEXT].base + mod->mem[MOD_TEXT].size,
334eaf44c81SThomas Gleixner .name = mod->name,
335eaf44c81SThomas Gleixner };
336eaf44c81SThomas Gleixner
337eaf44c81SThomas Gleixner if (!thunks_initialized)
338eaf44c81SThomas Gleixner return;
339eaf44c81SThomas Gleixner
340eaf44c81SThomas Gleixner mutex_lock(&text_mutex);
341eaf44c81SThomas Gleixner callthunks_setup(cs, &ct);
342eaf44c81SThomas Gleixner mutex_unlock(&text_mutex);
343eaf44c81SThomas Gleixner }
344eaf44c81SThomas Gleixner #endif /* CONFIG_MODULES */
345f5c1bb2aSThomas Gleixner
346f5c1bb2aSThomas Gleixner #if defined(CONFIG_CALL_THUNKS_DEBUG) && defined(CONFIG_DEBUG_FS)
callthunks_debug_show(struct seq_file * m,void * p)347f5c1bb2aSThomas Gleixner static int callthunks_debug_show(struct seq_file *m, void *p)
348f5c1bb2aSThomas Gleixner {
349f5c1bb2aSThomas Gleixner unsigned long cpu = (unsigned long)m->private;
350f5c1bb2aSThomas Gleixner
351f5c1bb2aSThomas Gleixner seq_printf(m, "C: %16llu R: %16llu S: %16llu X: %16llu\n,",
352f5c1bb2aSThomas Gleixner per_cpu(__x86_call_count, cpu),
353f5c1bb2aSThomas Gleixner per_cpu(__x86_ret_count, cpu),
354f5c1bb2aSThomas Gleixner per_cpu(__x86_stuffs_count, cpu),
355f5c1bb2aSThomas Gleixner per_cpu(__x86_ctxsw_count, cpu));
356f5c1bb2aSThomas Gleixner return 0;
357f5c1bb2aSThomas Gleixner }
358f5c1bb2aSThomas Gleixner
callthunks_debug_open(struct inode * inode,struct file * file)359f5c1bb2aSThomas Gleixner static int callthunks_debug_open(struct inode *inode, struct file *file)
360f5c1bb2aSThomas Gleixner {
361f5c1bb2aSThomas Gleixner return single_open(file, callthunks_debug_show, inode->i_private);
362f5c1bb2aSThomas Gleixner }
363f5c1bb2aSThomas Gleixner
364f5c1bb2aSThomas Gleixner static const struct file_operations dfs_ops = {
365f5c1bb2aSThomas Gleixner .open = callthunks_debug_open,
366f5c1bb2aSThomas Gleixner .read = seq_read,
367f5c1bb2aSThomas Gleixner .llseek = seq_lseek,
368f5c1bb2aSThomas Gleixner .release = single_release,
369f5c1bb2aSThomas Gleixner };
370f5c1bb2aSThomas Gleixner
callthunks_debugfs_init(void)371f5c1bb2aSThomas Gleixner static int __init callthunks_debugfs_init(void)
372f5c1bb2aSThomas Gleixner {
373f5c1bb2aSThomas Gleixner struct dentry *dir;
374f5c1bb2aSThomas Gleixner unsigned long cpu;
375f5c1bb2aSThomas Gleixner
376f5c1bb2aSThomas Gleixner dir = debugfs_create_dir("callthunks", NULL);
377f5c1bb2aSThomas Gleixner for_each_possible_cpu(cpu) {
378f5c1bb2aSThomas Gleixner void *arg = (void *)cpu;
379f5c1bb2aSThomas Gleixner char name [10];
380f5c1bb2aSThomas Gleixner
381f5c1bb2aSThomas Gleixner sprintf(name, "cpu%lu", cpu);
382f5c1bb2aSThomas Gleixner debugfs_create_file(name, 0644, dir, arg, &dfs_ops);
383f5c1bb2aSThomas Gleixner }
384f5c1bb2aSThomas Gleixner return 0;
385f5c1bb2aSThomas Gleixner }
386f5c1bb2aSThomas Gleixner __initcall(callthunks_debugfs_init);
387f5c1bb2aSThomas Gleixner #endif
388