xref: /openbmc/linux/kernel/events/internal.h (revision 60490e79)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
276369139SFrederic Weisbecker #ifndef _KERNEL_EVENTS_INTERNAL_H
376369139SFrederic Weisbecker #define _KERNEL_EVENTS_INTERNAL_H
476369139SFrederic Weisbecker 
59251f904SBorislav Petkov #include <linux/hardirq.h>
691d7753aSFrederic Weisbecker #include <linux/uaccess.h>
7fecb8ed2SElena Reshetova #include <linux/refcount.h>
89251f904SBorislav Petkov 
99251f904SBorislav Petkov /* Buffer handling */
109251f904SBorislav Petkov 
1176369139SFrederic Weisbecker #define RING_BUFFER_WRITABLE		0x01
1276369139SFrederic Weisbecker 
1356de4e8fSSteven Rostedt (VMware) struct perf_buffer {
14fecb8ed2SElena Reshetova 	refcount_t			refcount;
1576369139SFrederic Weisbecker 	struct rcu_head			rcu_head;
1676369139SFrederic Weisbecker #ifdef CONFIG_PERF_USE_VMALLOC
1776369139SFrederic Weisbecker 	struct work_struct		work;
1876369139SFrederic Weisbecker 	int				page_order;	/* allocation order  */
1976369139SFrederic Weisbecker #endif
2076369139SFrederic Weisbecker 	int				nr_pages;	/* nr of data pages  */
21dd9c086dSStephane Eranian 	int				overwrite;	/* can overwrite itself */
2286e7972fSWang Nan 	int				paused;		/* can write into ring buffer */
2376369139SFrederic Weisbecker 
2476369139SFrederic Weisbecker 	atomic_t			poll;		/* POLL_ for wakeups */
2576369139SFrederic Weisbecker 
2676369139SFrederic Weisbecker 	local_t				head;		/* write position    */
275322ea58SPeter Zijlstra 	unsigned int			nest;		/* nested writers    */
2876369139SFrederic Weisbecker 	local_t				events;		/* event limit       */
2976369139SFrederic Weisbecker 	local_t				wakeup;		/* wakeup stamp      */
3076369139SFrederic Weisbecker 	local_t				lost;		/* nr records lost   */
3176369139SFrederic Weisbecker 
3276369139SFrederic Weisbecker 	long				watermark;	/* wakeup watermark  */
331a594131SAlexander Shishkin 	long				aux_watermark;
3410c6db11SPeter Zijlstra 	/* poll crap */
3510c6db11SPeter Zijlstra 	spinlock_t			event_lock;
3610c6db11SPeter Zijlstra 	struct list_head		event_list;
3776369139SFrederic Weisbecker 
389bb5d40cSPeter Zijlstra 	atomic_t			mmap_count;
399bb5d40cSPeter Zijlstra 	unsigned long			mmap_locked;
4026cb63adSPeter Zijlstra 	struct user_struct		*mmap_user;
4126cb63adSPeter Zijlstra 
4245bfb2e5SPeter Zijlstra 	/* AUX area */
432ab346cfSWill Deacon 	long				aux_head;
445322ea58SPeter Zijlstra 	unsigned int			aux_nest;
45d9a50b02SWill Deacon 	long				aux_wakeup;	/* last aux_watermark boundary crossed by aux_head */
4645bfb2e5SPeter Zijlstra 	unsigned long			aux_pgoff;
4745bfb2e5SPeter Zijlstra 	int				aux_nr_pages;
482023a0d2SAlexander Shishkin 	int				aux_overwrite;
4945bfb2e5SPeter Zijlstra 	atomic_t			aux_mmap_count;
5045bfb2e5SPeter Zijlstra 	unsigned long			aux_mmap_locked;
5145bfb2e5SPeter Zijlstra 	void				(*free_aux)(void *);
52ca3bb3d0SElena Reshetova 	refcount_t			aux_refcount;
53a4faf00dSAlexander Shishkin 	int				aux_in_sampling;
5445bfb2e5SPeter Zijlstra 	void				**aux_pages;
5545bfb2e5SPeter Zijlstra 	void				*aux_priv;
5645bfb2e5SPeter Zijlstra 
5776369139SFrederic Weisbecker 	struct perf_event_mmap_page	*user_page;
58c50c75e9SGustavo A. R. Silva 	void				*data_pages[];
5976369139SFrederic Weisbecker };
6076369139SFrederic Weisbecker 
6156de4e8fSSteven Rostedt (VMware) extern void rb_free(struct perf_buffer *rb);
6257ffc5caSPeter Zijlstra 
rb_free_rcu(struct rcu_head * rcu_head)6357ffc5caSPeter Zijlstra static inline void rb_free_rcu(struct rcu_head *rcu_head)
6457ffc5caSPeter Zijlstra {
6556de4e8fSSteven Rostedt (VMware) 	struct perf_buffer *rb;
6657ffc5caSPeter Zijlstra 
6756de4e8fSSteven Rostedt (VMware) 	rb = container_of(rcu_head, struct perf_buffer, rcu_head);
6857ffc5caSPeter Zijlstra 	rb_free(rb);
6957ffc5caSPeter Zijlstra }
7057ffc5caSPeter Zijlstra 
rb_toggle_paused(struct perf_buffer * rb,bool pause)7156de4e8fSSteven Rostedt (VMware) static inline void rb_toggle_paused(struct perf_buffer *rb, bool pause)
7286e7972fSWang Nan {
7386e7972fSWang Nan 	if (!pause && rb->nr_pages)
7486e7972fSWang Nan 		rb->paused = 0;
7586e7972fSWang Nan 	else
7686e7972fSWang Nan 		rb->paused = 1;
7786e7972fSWang Nan }
7886e7972fSWang Nan 
7956de4e8fSSteven Rostedt (VMware) extern struct perf_buffer *
8076369139SFrederic Weisbecker rb_alloc(int nr_pages, long watermark, int cpu, int flags);
8176369139SFrederic Weisbecker extern void perf_event_wakeup(struct perf_event *event);
8256de4e8fSSteven Rostedt (VMware) extern int rb_alloc_aux(struct perf_buffer *rb, struct perf_event *event,
831a594131SAlexander Shishkin 			pgoff_t pgoff, int nr_pages, long watermark, int flags);
8456de4e8fSSteven Rostedt (VMware) extern void rb_free_aux(struct perf_buffer *rb);
8556de4e8fSSteven Rostedt (VMware) extern struct perf_buffer *ring_buffer_get(struct perf_event *event);
8656de4e8fSSteven Rostedt (VMware) extern void ring_buffer_put(struct perf_buffer *rb);
8745bfb2e5SPeter Zijlstra 
rb_has_aux(struct perf_buffer * rb)8856de4e8fSSteven Rostedt (VMware) static inline bool rb_has_aux(struct perf_buffer *rb)
8945bfb2e5SPeter Zijlstra {
9045bfb2e5SPeter Zijlstra 	return !!rb->aux_nr_pages;
9145bfb2e5SPeter Zijlstra }
9276369139SFrederic Weisbecker 
9368db7e98SAlexander Shishkin void perf_event_aux_event(struct perf_event *event, unsigned long head,
9468db7e98SAlexander Shishkin 			  unsigned long size, u64 flags);
9568db7e98SAlexander Shishkin 
9676369139SFrederic Weisbecker extern struct page *
9756de4e8fSSteven Rostedt (VMware) perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff);
9876369139SFrederic Weisbecker 
9976369139SFrederic Weisbecker #ifdef CONFIG_PERF_USE_VMALLOC
10076369139SFrederic Weisbecker /*
10176369139SFrederic Weisbecker  * Back perf_mmap() with vmalloc memory.
10276369139SFrederic Weisbecker  *
10376369139SFrederic Weisbecker  * Required for architectures that have d-cache aliasing issues.
10476369139SFrederic Weisbecker  */
10576369139SFrederic Weisbecker 
page_order(struct perf_buffer * rb)10656de4e8fSSteven Rostedt (VMware) static inline int page_order(struct perf_buffer *rb)
10776369139SFrederic Weisbecker {
10876369139SFrederic Weisbecker 	return rb->page_order;
10976369139SFrederic Weisbecker }
11076369139SFrederic Weisbecker 
11176369139SFrederic Weisbecker #else
11276369139SFrederic Weisbecker 
page_order(struct perf_buffer * rb)11356de4e8fSSteven Rostedt (VMware) static inline int page_order(struct perf_buffer *rb)
11476369139SFrederic Weisbecker {
11576369139SFrederic Weisbecker 	return 0;
11676369139SFrederic Weisbecker }
11776369139SFrederic Weisbecker #endif
11876369139SFrederic Weisbecker 
data_page_nr(struct perf_buffer * rb)119*60490e79SZhipeng Xie static inline int data_page_nr(struct perf_buffer *rb)
120*60490e79SZhipeng Xie {
121*60490e79SZhipeng Xie 	return rb->nr_pages << page_order(rb);
122*60490e79SZhipeng Xie }
123*60490e79SZhipeng Xie 
perf_data_size(struct perf_buffer * rb)12456de4e8fSSteven Rostedt (VMware) static inline unsigned long perf_data_size(struct perf_buffer *rb)
12576369139SFrederic Weisbecker {
12676369139SFrederic Weisbecker 	return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
12776369139SFrederic Weisbecker }
12876369139SFrederic Weisbecker 
perf_aux_size(struct perf_buffer * rb)12956de4e8fSSteven Rostedt (VMware) static inline unsigned long perf_aux_size(struct perf_buffer *rb)
13045bfb2e5SPeter Zijlstra {
13145bfb2e5SPeter Zijlstra 	return rb->aux_nr_pages << PAGE_SHIFT;
13245bfb2e5SPeter Zijlstra }
13345bfb2e5SPeter Zijlstra 
134aa7145c1SDaniel Borkmann #define __DEFINE_OUTPUT_COPY_BODY(advance_buf, memcpy_func, ...)	\
13591d7753aSFrederic Weisbecker {									\
13691d7753aSFrederic Weisbecker 	unsigned long size, written;					\
13791d7753aSFrederic Weisbecker 									\
13891d7753aSFrederic Weisbecker 	do {								\
1390a196848SPeter Zijlstra 		size    = min(handle->size, len);			\
140aa7145c1SDaniel Borkmann 		written = memcpy_func(__VA_ARGS__);			\
1410a196848SPeter Zijlstra 		written = size - written;				\
14291d7753aSFrederic Weisbecker 									\
14391d7753aSFrederic Weisbecker 		len -= written;						\
14491d7753aSFrederic Weisbecker 		handle->addr += written;				\
145aa7145c1SDaniel Borkmann 		if (advance_buf)					\
14691d7753aSFrederic Weisbecker 			buf += written;					\
14791d7753aSFrederic Weisbecker 		handle->size -= written;				\
14891d7753aSFrederic Weisbecker 		if (!handle->size) {					\
14956de4e8fSSteven Rostedt (VMware) 			struct perf_buffer *rb = handle->rb;	\
15091d7753aSFrederic Weisbecker 									\
15191d7753aSFrederic Weisbecker 			handle->page++;					\
15291d7753aSFrederic Weisbecker 			handle->page &= rb->nr_pages - 1;		\
15391d7753aSFrederic Weisbecker 			handle->addr = rb->data_pages[handle->page];	\
15491d7753aSFrederic Weisbecker 			handle->size = PAGE_SIZE << page_order(rb);	\
15591d7753aSFrederic Weisbecker 		}							\
15691d7753aSFrederic Weisbecker 	} while (len && written == size);				\
15791d7753aSFrederic Weisbecker 									\
15891d7753aSFrederic Weisbecker 	return len;							\
15991d7753aSFrederic Weisbecker }
16091d7753aSFrederic Weisbecker 
1617e3f977eSDaniel Borkmann #define DEFINE_OUTPUT_COPY(func_name, memcpy_func)			\
1627e3f977eSDaniel Borkmann static inline unsigned long						\
1637e3f977eSDaniel Borkmann func_name(struct perf_output_handle *handle,				\
1647e3f977eSDaniel Borkmann 	  const void *buf, unsigned long len)				\
165aa7145c1SDaniel Borkmann __DEFINE_OUTPUT_COPY_BODY(true, memcpy_func, handle->addr, buf, size)
1667e3f977eSDaniel Borkmann 
1677e3f977eSDaniel Borkmann static inline unsigned long
__output_custom(struct perf_output_handle * handle,perf_copy_f copy_func,const void * buf,unsigned long len)1687e3f977eSDaniel Borkmann __output_custom(struct perf_output_handle *handle, perf_copy_f copy_func,
1697e3f977eSDaniel Borkmann 		const void *buf, unsigned long len)
170aa7145c1SDaniel Borkmann {
171aa7145c1SDaniel Borkmann 	unsigned long orig_len = len;
172aa7145c1SDaniel Borkmann 	__DEFINE_OUTPUT_COPY_BODY(false, copy_func, handle->addr, buf,
173aa7145c1SDaniel Borkmann 				  orig_len - len, size)
174aa7145c1SDaniel Borkmann }
1757e3f977eSDaniel Borkmann 
1760a196848SPeter Zijlstra static inline unsigned long
memcpy_common(void * dst,const void * src,unsigned long n)1770a196848SPeter Zijlstra memcpy_common(void *dst, const void *src, unsigned long n)
17876369139SFrederic Weisbecker {
17991d7753aSFrederic Weisbecker 	memcpy(dst, src, n);
1800a196848SPeter Zijlstra 	return 0;
18176369139SFrederic Weisbecker }
18291d7753aSFrederic Weisbecker 
DEFINE_OUTPUT_COPY(__output_copy,memcpy_common)18391d7753aSFrederic Weisbecker DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
18491d7753aSFrederic Weisbecker 
1850a196848SPeter Zijlstra static inline unsigned long
1860a196848SPeter Zijlstra memcpy_skip(void *dst, const void *src, unsigned long n)
1870a196848SPeter Zijlstra {
1880a196848SPeter Zijlstra 	return 0;
1890a196848SPeter Zijlstra }
1905685e0ffSJiri Olsa 
DEFINE_OUTPUT_COPY(__output_skip,memcpy_skip)1910a196848SPeter Zijlstra DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
1925685e0ffSJiri Olsa 
19391d7753aSFrederic Weisbecker #ifndef arch_perf_out_copy_user
1940a196848SPeter Zijlstra #define arch_perf_out_copy_user arch_perf_out_copy_user
1950a196848SPeter Zijlstra 
1960a196848SPeter Zijlstra static inline unsigned long
1970a196848SPeter Zijlstra arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
1980a196848SPeter Zijlstra {
1990a196848SPeter Zijlstra 	unsigned long ret;
2000a196848SPeter Zijlstra 
2010a196848SPeter Zijlstra 	pagefault_disable();
2020a196848SPeter Zijlstra 	ret = __copy_from_user_inatomic(dst, src, n);
2030a196848SPeter Zijlstra 	pagefault_enable();
2040a196848SPeter Zijlstra 
2050a196848SPeter Zijlstra 	return ret;
2060a196848SPeter Zijlstra }
20791d7753aSFrederic Weisbecker #endif
20891d7753aSFrederic Weisbecker 
DEFINE_OUTPUT_COPY(__output_copy_user,arch_perf_out_copy_user)20991d7753aSFrederic Weisbecker DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
21076369139SFrederic Weisbecker 
2119251f904SBorislav Petkov static inline int get_recursion_context(int *recursion)
2129251f904SBorislav Petkov {
21391ebe8bcSSteven Rostedt (VMware) 	unsigned char rctx = interrupt_context_level();
2149251f904SBorislav Petkov 
2159251f904SBorislav Petkov 	if (recursion[rctx])
2169251f904SBorislav Petkov 		return -1;
2179251f904SBorislav Petkov 
2189251f904SBorislav Petkov 	recursion[rctx]++;
2199251f904SBorislav Petkov 	barrier();
2209251f904SBorislav Petkov 
2219251f904SBorislav Petkov 	return rctx;
2229251f904SBorislav Petkov }
2239251f904SBorislav Petkov 
put_recursion_context(int * recursion,int rctx)2249251f904SBorislav Petkov static inline void put_recursion_context(int *recursion, int rctx)
2259251f904SBorislav Petkov {
2269251f904SBorislav Petkov 	barrier();
2279251f904SBorislav Petkov 	recursion[rctx]--;
2289251f904SBorislav Petkov }
2299251f904SBorislav Petkov 
230c5ebcedbSJiri Olsa #ifdef CONFIG_HAVE_PERF_USER_STACK_DUMP
arch_perf_have_user_stack_dump(void)231c5ebcedbSJiri Olsa static inline bool arch_perf_have_user_stack_dump(void)
232c5ebcedbSJiri Olsa {
233c5ebcedbSJiri Olsa 	return true;
234c5ebcedbSJiri Olsa }
235c5ebcedbSJiri Olsa 
236c5ebcedbSJiri Olsa #define perf_user_stack_pointer(regs) user_stack_pointer(regs)
237c5ebcedbSJiri Olsa #else
arch_perf_have_user_stack_dump(void)238c5ebcedbSJiri Olsa static inline bool arch_perf_have_user_stack_dump(void)
239c5ebcedbSJiri Olsa {
240c5ebcedbSJiri Olsa 	return false;
241c5ebcedbSJiri Olsa }
242c5ebcedbSJiri Olsa 
243c5ebcedbSJiri Olsa #define perf_user_stack_pointer(regs) 0
244c5ebcedbSJiri Olsa #endif /* CONFIG_HAVE_PERF_USER_STACK_DUMP */
245c5ebcedbSJiri Olsa 
24676369139SFrederic Weisbecker #endif /* _KERNEL_EVENTS_INTERNAL_H */
247