1e81dc127SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2e81dc127SThomas Gleixner
3e81dc127SThomas Gleixner #define pr_fmt(fmt) "callthunks: " fmt
4e81dc127SThomas Gleixner
5f5c1bb2aSThomas Gleixner #include <linux/debugfs.h>
6e81dc127SThomas Gleixner #include <linux/kallsyms.h>
7e81dc127SThomas Gleixner #include <linux/memory.h>
8e81dc127SThomas Gleixner #include <linux/moduleloader.h>
97825451fSPeter Zijlstra #include <linux/static_call.h>
10e81dc127SThomas Gleixner
11e81dc127SThomas Gleixner #include <asm/alternative.h>
12bbaceb18SThomas Gleixner #include <asm/asm-offsets.h>
13e81dc127SThomas Gleixner #include <asm/cpu.h>
14e81dc127SThomas Gleixner #include <asm/ftrace.h>
15e81dc127SThomas Gleixner #include <asm/insn.h>
16e81dc127SThomas Gleixner #include <asm/kexec.h>
17e81dc127SThomas Gleixner #include <asm/nospec-branch.h>
18e81dc127SThomas Gleixner #include <asm/paravirt.h>
19e81dc127SThomas Gleixner #include <asm/sections.h>
20e81dc127SThomas Gleixner #include <asm/switch_to.h>
21e81dc127SThomas Gleixner #include <asm/sync_core.h>
22e81dc127SThomas Gleixner #include <asm/text-patching.h>
23e81dc127SThomas Gleixner #include <asm/xen/hypercall.h>
24e81dc127SThomas Gleixner
25e81dc127SThomas Gleixner static int __initdata_or_module debug_callthunks;
26e81dc127SThomas Gleixner
27e81dc127SThomas Gleixner #define prdbg(fmt, args...) \
28e81dc127SThomas Gleixner do { \
29e81dc127SThomas Gleixner if (debug_callthunks) \
30e81dc127SThomas Gleixner printk(KERN_DEBUG pr_fmt(fmt), ##args); \
31e81dc127SThomas Gleixner } while(0)
32e81dc127SThomas Gleixner
debug_thunks(char * str)33e81dc127SThomas Gleixner static int __init debug_thunks(char *str)
34e81dc127SThomas Gleixner {
35e81dc127SThomas Gleixner debug_callthunks = 1;
36e81dc127SThomas Gleixner return 1;
37e81dc127SThomas Gleixner }
38e81dc127SThomas Gleixner __setup("debug-callthunks", debug_thunks);
39e81dc127SThomas Gleixner
40f5c1bb2aSThomas Gleixner #ifdef CONFIG_CALL_THUNKS_DEBUG
41f5c1bb2aSThomas Gleixner DEFINE_PER_CPU(u64, __x86_call_count);
42f5c1bb2aSThomas Gleixner DEFINE_PER_CPU(u64, __x86_ret_count);
43f5c1bb2aSThomas Gleixner DEFINE_PER_CPU(u64, __x86_stuffs_count);
44f5c1bb2aSThomas Gleixner DEFINE_PER_CPU(u64, __x86_ctxsw_count);
45f5c1bb2aSThomas Gleixner EXPORT_SYMBOL_GPL(__x86_ctxsw_count);
46f5c1bb2aSThomas Gleixner EXPORT_SYMBOL_GPL(__x86_call_count);
47f5c1bb2aSThomas Gleixner #endif
48f5c1bb2aSThomas Gleixner
49e81dc127SThomas Gleixner extern s32 __call_sites[], __call_sites_end[];
50e81dc127SThomas Gleixner
51e81dc127SThomas Gleixner struct thunk_desc {
52e81dc127SThomas Gleixner void *template;
53e81dc127SThomas Gleixner unsigned int template_size;
54e81dc127SThomas Gleixner };
55e81dc127SThomas Gleixner
56e81dc127SThomas Gleixner struct core_text {
57e81dc127SThomas Gleixner unsigned long base;
58e81dc127SThomas Gleixner unsigned long end;
59e81dc127SThomas Gleixner const char *name;
60e81dc127SThomas Gleixner };
61e81dc127SThomas Gleixner
62e81dc127SThomas Gleixner static bool thunks_initialized __ro_after_init;
63e81dc127SThomas Gleixner
64e81dc127SThomas Gleixner static const struct core_text builtin_coretext = {
65e81dc127SThomas Gleixner .base = (unsigned long)_text,
66e81dc127SThomas Gleixner .end = (unsigned long)_etext,
67e81dc127SThomas Gleixner .name = "builtin",
68e81dc127SThomas Gleixner };
69e81dc127SThomas Gleixner
70bbaceb18SThomas Gleixner asm (
71bbaceb18SThomas Gleixner ".pushsection .rodata \n"
72bbaceb18SThomas Gleixner ".global skl_call_thunk_template \n"
73bbaceb18SThomas Gleixner "skl_call_thunk_template: \n"
74bbaceb18SThomas Gleixner __stringify(INCREMENT_CALL_DEPTH)" \n"
75bbaceb18SThomas Gleixner ".global skl_call_thunk_tail \n"
76bbaceb18SThomas Gleixner "skl_call_thunk_tail: \n"
77bbaceb18SThomas Gleixner ".popsection \n"
78bbaceb18SThomas Gleixner );
79bbaceb18SThomas Gleixner
80bbaceb18SThomas Gleixner extern u8 skl_call_thunk_template[];
81bbaceb18SThomas Gleixner extern u8 skl_call_thunk_tail[];
82bbaceb18SThomas Gleixner
83bbaceb18SThomas Gleixner #define SKL_TMPL_SIZE \
84bbaceb18SThomas Gleixner ((unsigned int)(skl_call_thunk_tail - skl_call_thunk_template))
85e81dc127SThomas Gleixner
86e81dc127SThomas Gleixner extern void error_entry(void);
87e81dc127SThomas Gleixner extern void xen_error_entry(void);
88e81dc127SThomas Gleixner extern void paranoid_entry(void);
89e81dc127SThomas Gleixner
within_coretext(const struct core_text * ct,void * addr)90e81dc127SThomas Gleixner static inline bool within_coretext(const struct core_text *ct, void *addr)
91e81dc127SThomas Gleixner {
92e81dc127SThomas Gleixner unsigned long p = (unsigned long)addr;
93e81dc127SThomas Gleixner
94e81dc127SThomas Gleixner return ct->base <= p && p < ct->end;
95e81dc127SThomas Gleixner }
96e81dc127SThomas Gleixner
within_module_coretext(void * addr)97e81dc127SThomas Gleixner static inline bool within_module_coretext(void *addr)
98e81dc127SThomas Gleixner {
99e81dc127SThomas Gleixner bool ret = false;
100e81dc127SThomas Gleixner
101e81dc127SThomas Gleixner #ifdef CONFIG_MODULES
102e81dc127SThomas Gleixner struct module *mod;
103e81dc127SThomas Gleixner
104e81dc127SThomas Gleixner preempt_disable();
105e81dc127SThomas Gleixner mod = __module_address((unsigned long)addr);
106e81dc127SThomas Gleixner if (mod && within_module_core((unsigned long)addr, mod))
107e81dc127SThomas Gleixner ret = true;
108e81dc127SThomas Gleixner preempt_enable();
109e81dc127SThomas Gleixner #endif
110e81dc127SThomas Gleixner return ret;
111e81dc127SThomas Gleixner }
112e81dc127SThomas Gleixner
is_coretext(const struct core_text * ct,void * addr)113e81dc127SThomas Gleixner static bool is_coretext(const struct core_text *ct, void *addr)
114e81dc127SThomas Gleixner {
115e81dc127SThomas Gleixner if (ct && within_coretext(ct, addr))
116e81dc127SThomas Gleixner return true;
117e81dc127SThomas Gleixner if (within_coretext(&builtin_coretext, addr))
118e81dc127SThomas Gleixner return true;
119e81dc127SThomas Gleixner return within_module_coretext(addr);
120e81dc127SThomas Gleixner }
121e81dc127SThomas Gleixner
skip_addr(void * dest)122ade8c208SArnd Bergmann static bool skip_addr(void *dest)
123e81dc127SThomas Gleixner {
124e81dc127SThomas Gleixner if (dest == error_entry)
125e81dc127SThomas Gleixner return true;
126e81dc127SThomas Gleixner if (dest == paranoid_entry)
127e81dc127SThomas Gleixner return true;
128e81dc127SThomas Gleixner if (dest == xen_error_entry)
129e81dc127SThomas Gleixner return true;
130e81dc127SThomas Gleixner /* Does FILL_RSB... */
131e81dc127SThomas Gleixner if (dest == __switch_to_asm)
132e81dc127SThomas Gleixner return true;
133e81dc127SThomas Gleixner /* Accounts directly */
134e81dc127SThomas Gleixner if (dest == ret_from_fork)
135e81dc127SThomas Gleixner return true;
136cded3679SThomas Gleixner #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_AMD_MEM_ENCRYPT)
137666e1156SThomas Gleixner if (dest == soft_restart_cpu)
138e81dc127SThomas Gleixner return true;
139e81dc127SThomas Gleixner #endif
140e81dc127SThomas Gleixner #ifdef CONFIG_FUNCTION_TRACER
141e81dc127SThomas Gleixner if (dest == __fentry__)
142e81dc127SThomas Gleixner return true;
143e81dc127SThomas Gleixner #endif
144e81dc127SThomas Gleixner #ifdef CONFIG_KEXEC_CORE
145e81dc127SThomas Gleixner if (dest >= (void *)relocate_kernel &&
146e81dc127SThomas Gleixner dest < (void*)relocate_kernel + KEXEC_CONTROL_CODE_MAX_SIZE)
147e81dc127SThomas Gleixner return true;
148e81dc127SThomas Gleixner #endif
149e81dc127SThomas Gleixner return false;
150e81dc127SThomas Gleixner }
151e81dc127SThomas Gleixner
call_get_dest(void * addr)152e81dc127SThomas Gleixner static __init_or_module void *call_get_dest(void *addr)
153e81dc127SThomas Gleixner {
154e81dc127SThomas Gleixner struct insn insn;
155e81dc127SThomas Gleixner void *dest;
156e81dc127SThomas Gleixner int ret;
157e81dc127SThomas Gleixner
158e81dc127SThomas Gleixner ret = insn_decode_kernel(&insn, addr);
159e81dc127SThomas Gleixner if (ret)
160e81dc127SThomas Gleixner return ERR_PTR(ret);
161e81dc127SThomas Gleixner
162e81dc127SThomas Gleixner /* Patched out call? */
163e81dc127SThomas Gleixner if (insn.opcode.bytes[0] != CALL_INSN_OPCODE)
164e81dc127SThomas Gleixner return NULL;
165e81dc127SThomas Gleixner
166e81dc127SThomas Gleixner dest = addr + insn.length + insn.immediate.value;
167e81dc127SThomas Gleixner if (skip_addr(dest))
168e81dc127SThomas Gleixner return NULL;
169e81dc127SThomas Gleixner return dest;
170e81dc127SThomas Gleixner }
171e81dc127SThomas Gleixner
172e81dc127SThomas Gleixner static const u8 nops[] = {
173e81dc127SThomas Gleixner 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
174e81dc127SThomas Gleixner 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
175e81dc127SThomas Gleixner 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
176e81dc127SThomas Gleixner 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
177e81dc127SThomas Gleixner };
178e81dc127SThomas Gleixner
patch_dest(void * dest,bool direct)179ade8c208SArnd Bergmann static void *patch_dest(void *dest, bool direct)
180e81dc127SThomas Gleixner {
181bbaceb18SThomas Gleixner unsigned int tsize = SKL_TMPL_SIZE;
182e81dc127SThomas Gleixner u8 *pad = dest - tsize;
183e81dc127SThomas Gleixner
184e81dc127SThomas Gleixner /* Already patched? */
185bbaceb18SThomas Gleixner if (!bcmp(pad, skl_call_thunk_template, tsize))
186e81dc127SThomas Gleixner return pad;
187e81dc127SThomas Gleixner
188e81dc127SThomas Gleixner /* Ensure there are nops */
189e81dc127SThomas Gleixner if (bcmp(pad, nops, tsize)) {
190e81dc127SThomas Gleixner pr_warn_once("Invalid padding area for %pS\n", dest);
191e81dc127SThomas Gleixner return NULL;
192e81dc127SThomas Gleixner }
193e81dc127SThomas Gleixner
194e81dc127SThomas Gleixner if (direct)
195bbaceb18SThomas Gleixner memcpy(pad, skl_call_thunk_template, tsize);
196e81dc127SThomas Gleixner else
197bbaceb18SThomas Gleixner text_poke_copy_locked(pad, skl_call_thunk_template, tsize, true);
198e81dc127SThomas Gleixner return pad;
199e81dc127SThomas Gleixner }
200e81dc127SThomas Gleixner
patch_call(void * addr,const struct core_text * ct)201e81dc127SThomas Gleixner static __init_or_module void patch_call(void *addr, const struct core_text *ct)
202e81dc127SThomas Gleixner {
203e81dc127SThomas Gleixner void *pad, *dest;
204e81dc127SThomas Gleixner u8 bytes[8];
205e81dc127SThomas Gleixner
206e81dc127SThomas Gleixner if (!within_coretext(ct, addr))
207e81dc127SThomas Gleixner return;
208e81dc127SThomas Gleixner
209e81dc127SThomas Gleixner dest = call_get_dest(addr);
210e81dc127SThomas Gleixner if (!dest || WARN_ON_ONCE(IS_ERR(dest)))
211e81dc127SThomas Gleixner return;
212e81dc127SThomas Gleixner
213e81dc127SThomas Gleixner if (!is_coretext(ct, dest))
214e81dc127SThomas Gleixner return;
215e81dc127SThomas Gleixner
216e81dc127SThomas Gleixner pad = patch_dest(dest, within_coretext(ct, dest));
217e81dc127SThomas Gleixner if (!pad)
218e81dc127SThomas Gleixner return;
219e81dc127SThomas Gleixner
220e81dc127SThomas Gleixner prdbg("Patch call at: %pS %px to %pS %px -> %px \n", addr, addr,
221e81dc127SThomas Gleixner dest, dest, pad);
222e81dc127SThomas Gleixner __text_gen_insn(bytes, CALL_INSN_OPCODE, addr, pad, CALL_INSN_SIZE);
223e81dc127SThomas Gleixner text_poke_early(addr, bytes, CALL_INSN_SIZE);
224e81dc127SThomas Gleixner }
225e81dc127SThomas Gleixner
226e81dc127SThomas Gleixner static __init_or_module void
patch_call_sites(s32 * start,s32 * end,const struct core_text * ct)227e81dc127SThomas Gleixner patch_call_sites(s32 *start, s32 *end, const struct core_text *ct)
228e81dc127SThomas Gleixner {
229e81dc127SThomas Gleixner s32 *s;
230e81dc127SThomas Gleixner
231e81dc127SThomas Gleixner for (s = start; s < end; s++)
232e81dc127SThomas Gleixner patch_call((void *)s + *s, ct);
233e81dc127SThomas Gleixner }
234e81dc127SThomas Gleixner
235e81dc127SThomas Gleixner static __init_or_module void
patch_paravirt_call_sites(struct paravirt_patch_site * start,struct paravirt_patch_site * end,const struct core_text * ct)236e81dc127SThomas Gleixner patch_paravirt_call_sites(struct paravirt_patch_site *start,
237e81dc127SThomas Gleixner struct paravirt_patch_site *end,
238e81dc127SThomas Gleixner const struct core_text *ct)
239e81dc127SThomas Gleixner {
240e81dc127SThomas Gleixner struct paravirt_patch_site *p;
241e81dc127SThomas Gleixner
242e81dc127SThomas Gleixner for (p = start; p < end; p++)
243e81dc127SThomas Gleixner patch_call(p->instr, ct);
244e81dc127SThomas Gleixner }
245e81dc127SThomas Gleixner
246e81dc127SThomas Gleixner static __init_or_module void
callthunks_setup(struct callthunk_sites * cs,const struct core_text * ct)247e81dc127SThomas Gleixner callthunks_setup(struct callthunk_sites *cs, const struct core_text *ct)
248e81dc127SThomas Gleixner {
249e81dc127SThomas Gleixner prdbg("Patching call sites %s\n", ct->name);
250e81dc127SThomas Gleixner patch_call_sites(cs->call_start, cs->call_end, ct);
251e81dc127SThomas Gleixner patch_paravirt_call_sites(cs->pv_start, cs->pv_end, ct);
252e81dc127SThomas Gleixner prdbg("Patching call sites done%s\n", ct->name);
253e81dc127SThomas Gleixner }
254e81dc127SThomas Gleixner
callthunks_patch_builtin_calls(void)255e81dc127SThomas Gleixner void __init callthunks_patch_builtin_calls(void)
256e81dc127SThomas Gleixner {
257e81dc127SThomas Gleixner struct callthunk_sites cs = {
258e81dc127SThomas Gleixner .call_start = __call_sites,
259e81dc127SThomas Gleixner .call_end = __call_sites_end,
260e81dc127SThomas Gleixner .pv_start = __parainstructions,
261e81dc127SThomas Gleixner .pv_end = __parainstructions_end
262e81dc127SThomas Gleixner };
263e81dc127SThomas Gleixner
264e81dc127SThomas Gleixner if (!cpu_feature_enabled(X86_FEATURE_CALL_DEPTH))
265e81dc127SThomas Gleixner return;
266e81dc127SThomas Gleixner
267e81dc127SThomas Gleixner pr_info("Setting up call depth tracking\n");
268e81dc127SThomas Gleixner mutex_lock(&text_mutex);
269e81dc127SThomas Gleixner callthunks_setup(&cs, &builtin_coretext);
270e81dc127SThomas Gleixner thunks_initialized = true;
271e81dc127SThomas Gleixner mutex_unlock(&text_mutex);
272e81dc127SThomas Gleixner }
273eaf44c81SThomas Gleixner
callthunks_translate_call_dest(void * dest)2747825451fSPeter Zijlstra void *callthunks_translate_call_dest(void *dest)
2757825451fSPeter Zijlstra {
2767825451fSPeter Zijlstra void *target;
2777825451fSPeter Zijlstra
2787825451fSPeter Zijlstra lockdep_assert_held(&text_mutex);
2797825451fSPeter Zijlstra
2807825451fSPeter Zijlstra if (!thunks_initialized || skip_addr(dest))
2817825451fSPeter Zijlstra return dest;
2827825451fSPeter Zijlstra
2837825451fSPeter Zijlstra if (!is_coretext(NULL, dest))
2847825451fSPeter Zijlstra return dest;
2857825451fSPeter Zijlstra
2867825451fSPeter Zijlstra target = patch_dest(dest, false);
2877825451fSPeter Zijlstra return target ? : dest;
2887825451fSPeter Zijlstra }
2897825451fSPeter Zijlstra
290*301cf77eSIngo Molnar #ifdef CONFIG_BPF_JIT
is_callthunk(void * addr)29102012623SJosh Poimboeuf static bool is_callthunk(void *addr)
292396e0b8eSPeter Zijlstra {
293396e0b8eSPeter Zijlstra unsigned int tmpl_size = SKL_TMPL_SIZE;
294396e0b8eSPeter Zijlstra void *tmpl = skl_call_thunk_template;
295396e0b8eSPeter Zijlstra unsigned long dest;
296396e0b8eSPeter Zijlstra
297396e0b8eSPeter Zijlstra dest = roundup((unsigned long)addr, CONFIG_FUNCTION_ALIGNMENT);
298396e0b8eSPeter Zijlstra if (!thunks_initialized || skip_addr((void *)dest))
299396e0b8eSPeter Zijlstra return false;
300396e0b8eSPeter Zijlstra
301396e0b8eSPeter Zijlstra return !bcmp((void *)(dest - tmpl_size), tmpl, tmpl_size);
302396e0b8eSPeter Zijlstra }
303396e0b8eSPeter Zijlstra
x86_call_depth_emit_accounting(u8 ** pprog,void * func)304b2e9dfe5SThomas Gleixner int x86_call_depth_emit_accounting(u8 **pprog, void *func)
305b2e9dfe5SThomas Gleixner {
306b2e9dfe5SThomas Gleixner unsigned int tmpl_size = SKL_TMPL_SIZE;
307b2e9dfe5SThomas Gleixner void *tmpl = skl_call_thunk_template;
308b2e9dfe5SThomas Gleixner
309b2e9dfe5SThomas Gleixner if (!thunks_initialized)
310b2e9dfe5SThomas Gleixner return 0;
311b2e9dfe5SThomas Gleixner
312b2e9dfe5SThomas Gleixner /* Is function call target a thunk? */
313ee3e2469SPeter Zijlstra if (func && is_callthunk(func))
314b2e9dfe5SThomas Gleixner return 0;
315b2e9dfe5SThomas Gleixner
316b2e9dfe5SThomas Gleixner memcpy(*pprog, tmpl, tmpl_size);
317b2e9dfe5SThomas Gleixner *pprog += tmpl_size;
318b2e9dfe5SThomas Gleixner return tmpl_size;
319b2e9dfe5SThomas Gleixner }
320b2e9dfe5SThomas Gleixner #endif
321b2e9dfe5SThomas Gleixner
322eaf44c81SThomas Gleixner #ifdef CONFIG_MODULES
callthunks_patch_module_calls(struct callthunk_sites * cs,struct module * mod)323eaf44c81SThomas Gleixner void noinline callthunks_patch_module_calls(struct callthunk_sites *cs,
324eaf44c81SThomas Gleixner struct module *mod)
325eaf44c81SThomas Gleixner {
326eaf44c81SThomas Gleixner struct core_text ct = {
327ac3b4328SSong Liu .base = (unsigned long)mod->mem[MOD_TEXT].base,
328ac3b4328SSong Liu .end = (unsigned long)mod->mem[MOD_TEXT].base + mod->mem[MOD_TEXT].size,
329eaf44c81SThomas Gleixner .name = mod->name,
330eaf44c81SThomas Gleixner };
331eaf44c81SThomas Gleixner
332eaf44c81SThomas Gleixner if (!thunks_initialized)
333eaf44c81SThomas Gleixner return;
334eaf44c81SThomas Gleixner
335eaf44c81SThomas Gleixner mutex_lock(&text_mutex);
336eaf44c81SThomas Gleixner callthunks_setup(cs, &ct);
337eaf44c81SThomas Gleixner mutex_unlock(&text_mutex);
338eaf44c81SThomas Gleixner }
339eaf44c81SThomas Gleixner #endif /* CONFIG_MODULES */
340f5c1bb2aSThomas Gleixner
341f5c1bb2aSThomas Gleixner #if defined(CONFIG_CALL_THUNKS_DEBUG) && defined(CONFIG_DEBUG_FS)
callthunks_debug_show(struct seq_file * m,void * p)342f5c1bb2aSThomas Gleixner static int callthunks_debug_show(struct seq_file *m, void *p)
343f5c1bb2aSThomas Gleixner {
344f5c1bb2aSThomas Gleixner unsigned long cpu = (unsigned long)m->private;
345f5c1bb2aSThomas Gleixner
346f5c1bb2aSThomas Gleixner seq_printf(m, "C: %16llu R: %16llu S: %16llu X: %16llu\n,",
347f5c1bb2aSThomas Gleixner per_cpu(__x86_call_count, cpu),
348f5c1bb2aSThomas Gleixner per_cpu(__x86_ret_count, cpu),
349f5c1bb2aSThomas Gleixner per_cpu(__x86_stuffs_count, cpu),
350f5c1bb2aSThomas Gleixner per_cpu(__x86_ctxsw_count, cpu));
351f5c1bb2aSThomas Gleixner return 0;
352f5c1bb2aSThomas Gleixner }
353f5c1bb2aSThomas Gleixner
callthunks_debug_open(struct inode * inode,struct file * file)354f5c1bb2aSThomas Gleixner static int callthunks_debug_open(struct inode *inode, struct file *file)
355f5c1bb2aSThomas Gleixner {
356f5c1bb2aSThomas Gleixner return single_open(file, callthunks_debug_show, inode->i_private);
357f5c1bb2aSThomas Gleixner }
358f5c1bb2aSThomas Gleixner
359f5c1bb2aSThomas Gleixner static const struct file_operations dfs_ops = {
360f5c1bb2aSThomas Gleixner .open = callthunks_debug_open,
361f5c1bb2aSThomas Gleixner .read = seq_read,
362f5c1bb2aSThomas Gleixner .llseek = seq_lseek,
363f5c1bb2aSThomas Gleixner .release = single_release,
364f5c1bb2aSThomas Gleixner };
365f5c1bb2aSThomas Gleixner
callthunks_debugfs_init(void)366f5c1bb2aSThomas Gleixner static int __init callthunks_debugfs_init(void)
367f5c1bb2aSThomas Gleixner {
368f5c1bb2aSThomas Gleixner struct dentry *dir;
369f5c1bb2aSThomas Gleixner unsigned long cpu;
370f5c1bb2aSThomas Gleixner
371f5c1bb2aSThomas Gleixner dir = debugfs_create_dir("callthunks", NULL);
372f5c1bb2aSThomas Gleixner for_each_possible_cpu(cpu) {
373f5c1bb2aSThomas Gleixner void *arg = (void *)cpu;
374f5c1bb2aSThomas Gleixner char name [10];
375f5c1bb2aSThomas Gleixner
376f5c1bb2aSThomas Gleixner sprintf(name, "cpu%lu", cpu);
377f5c1bb2aSThomas Gleixner debugfs_create_file(name, 0644, dir, arg, &dfs_ops);
378f5c1bb2aSThomas Gleixner }
379f5c1bb2aSThomas Gleixner return 0;
380f5c1bb2aSThomas Gleixner }
381f5c1bb2aSThomas Gleixner __initcall(callthunks_debugfs_init);
382f5c1bb2aSThomas Gleixner #endif
383