xref: /openbmc/linux/arch/powerpc/perf/callchain.h (revision 29c37341)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 #ifndef _POWERPC_PERF_CALLCHAIN_H
3 #define _POWERPC_PERF_CALLCHAIN_H
4 
5 int read_user_stack_slow(const void __user *ptr, void *buf, int nb);
6 void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
7 			    struct pt_regs *regs);
8 void perf_callchain_user_32(struct perf_callchain_entry_ctx *entry,
9 			    struct pt_regs *regs);
10 
11 static inline bool invalid_user_sp(unsigned long sp)
12 {
13 	unsigned long mask = is_32bit_task() ? 3 : 7;
14 	unsigned long top = STACK_TOP - (is_32bit_task() ? 16 : 32);
15 
16 	return (!sp || (sp & mask) || (sp > top));
17 }
18 
19 /*
20  * On 32-bit we just access the address and let hash_page create a
21  * HPTE if necessary, so there is no need to fall back to reading
22  * the page tables.  Since this is called at interrupt level,
23  * do_page_fault() won't treat a DSI as a page fault.
24  */
25 static inline int __read_user_stack(const void __user *ptr, void *ret,
26 				    size_t size)
27 {
28 	unsigned long addr = (unsigned long)ptr;
29 	int rc;
30 
31 	if (addr > TASK_SIZE - size || (addr & (size - 1)))
32 		return -EFAULT;
33 
34 	rc = copy_from_user_nofault(ret, ptr, size);
35 
36 	if (IS_ENABLED(CONFIG_PPC64) && rc)
37 		return read_user_stack_slow(ptr, ret, size);
38 
39 	return rc;
40 }
41 
42 #endif /* _POWERPC_PERF_CALLCHAIN_H */
43