xref: /openbmc/linux/kernel/events/internal.h (revision 2023a0d2)
176369139SFrederic Weisbecker #ifndef _KERNEL_EVENTS_INTERNAL_H
276369139SFrederic Weisbecker #define _KERNEL_EVENTS_INTERNAL_H
376369139SFrederic Weisbecker 
49251f904SBorislav Petkov #include <linux/hardirq.h>
591d7753aSFrederic Weisbecker #include <linux/uaccess.h>
69251f904SBorislav Petkov 
79251f904SBorislav Petkov /* Buffer handling */
89251f904SBorislav Petkov 
976369139SFrederic Weisbecker #define RING_BUFFER_WRITABLE		0x01
1076369139SFrederic Weisbecker 
1176369139SFrederic Weisbecker struct ring_buffer {
1276369139SFrederic Weisbecker 	atomic_t			refcount;
1376369139SFrederic Weisbecker 	struct rcu_head			rcu_head;
1476369139SFrederic Weisbecker #ifdef CONFIG_PERF_USE_VMALLOC
1576369139SFrederic Weisbecker 	struct work_struct		work;
1676369139SFrederic Weisbecker 	int				page_order;	/* allocation order  */
1776369139SFrederic Weisbecker #endif
1876369139SFrederic Weisbecker 	int				nr_pages;	/* nr of data pages  */
19dd9c086dSStephane Eranian 	int				overwrite;	/* can overwrite itself */
2076369139SFrederic Weisbecker 
2176369139SFrederic Weisbecker 	atomic_t			poll;		/* POLL_ for wakeups */
2276369139SFrederic Weisbecker 
2376369139SFrederic Weisbecker 	local_t				head;		/* write position    */
2476369139SFrederic Weisbecker 	local_t				nest;		/* nested writers    */
2576369139SFrederic Weisbecker 	local_t				events;		/* event limit       */
2676369139SFrederic Weisbecker 	local_t				wakeup;		/* wakeup stamp      */
2776369139SFrederic Weisbecker 	local_t				lost;		/* nr records lost   */
2876369139SFrederic Weisbecker 
2976369139SFrederic Weisbecker 	long				watermark;	/* wakeup watermark  */
3010c6db11SPeter Zijlstra 	/* poll crap */
3110c6db11SPeter Zijlstra 	spinlock_t			event_lock;
3210c6db11SPeter Zijlstra 	struct list_head		event_list;
3376369139SFrederic Weisbecker 
349bb5d40cSPeter Zijlstra 	atomic_t			mmap_count;
359bb5d40cSPeter Zijlstra 	unsigned long			mmap_locked;
3626cb63adSPeter Zijlstra 	struct user_struct		*mmap_user;
3726cb63adSPeter Zijlstra 
3845bfb2e5SPeter Zijlstra 	/* AUX area */
39fdc26706SAlexander Shishkin 	local_t				aux_head;
40fdc26706SAlexander Shishkin 	local_t				aux_nest;
4145bfb2e5SPeter Zijlstra 	unsigned long			aux_pgoff;
4245bfb2e5SPeter Zijlstra 	int				aux_nr_pages;
432023a0d2SAlexander Shishkin 	int				aux_overwrite;
4445bfb2e5SPeter Zijlstra 	atomic_t			aux_mmap_count;
4545bfb2e5SPeter Zijlstra 	unsigned long			aux_mmap_locked;
4645bfb2e5SPeter Zijlstra 	void				(*free_aux)(void *);
4745bfb2e5SPeter Zijlstra 	atomic_t			aux_refcount;
4845bfb2e5SPeter Zijlstra 	void				**aux_pages;
4945bfb2e5SPeter Zijlstra 	void				*aux_priv;
5045bfb2e5SPeter Zijlstra 
5176369139SFrederic Weisbecker 	struct perf_event_mmap_page	*user_page;
5276369139SFrederic Weisbecker 	void				*data_pages[0];
5376369139SFrederic Weisbecker };
5476369139SFrederic Weisbecker 
5576369139SFrederic Weisbecker extern void rb_free(struct ring_buffer *rb);
5676369139SFrederic Weisbecker extern struct ring_buffer *
5776369139SFrederic Weisbecker rb_alloc(int nr_pages, long watermark, int cpu, int flags);
5876369139SFrederic Weisbecker extern void perf_event_wakeup(struct perf_event *event);
5945bfb2e5SPeter Zijlstra extern int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
6045bfb2e5SPeter Zijlstra 			pgoff_t pgoff, int nr_pages, int flags);
6145bfb2e5SPeter Zijlstra extern void rb_free_aux(struct ring_buffer *rb);
62fdc26706SAlexander Shishkin extern struct ring_buffer *ring_buffer_get(struct perf_event *event);
63fdc26706SAlexander Shishkin extern void ring_buffer_put(struct ring_buffer *rb);
6445bfb2e5SPeter Zijlstra 
6545bfb2e5SPeter Zijlstra static inline bool rb_has_aux(struct ring_buffer *rb)
6645bfb2e5SPeter Zijlstra {
6745bfb2e5SPeter Zijlstra 	return !!rb->aux_nr_pages;
6845bfb2e5SPeter Zijlstra }
6976369139SFrederic Weisbecker 
7068db7e98SAlexander Shishkin void perf_event_aux_event(struct perf_event *event, unsigned long head,
7168db7e98SAlexander Shishkin 			  unsigned long size, u64 flags);
7268db7e98SAlexander Shishkin 
7376369139SFrederic Weisbecker extern void
7476369139SFrederic Weisbecker perf_event_header__init_id(struct perf_event_header *header,
7576369139SFrederic Weisbecker 			   struct perf_sample_data *data,
7676369139SFrederic Weisbecker 			   struct perf_event *event);
7776369139SFrederic Weisbecker extern void
7876369139SFrederic Weisbecker perf_event__output_id_sample(struct perf_event *event,
7976369139SFrederic Weisbecker 			     struct perf_output_handle *handle,
8076369139SFrederic Weisbecker 			     struct perf_sample_data *sample);
8176369139SFrederic Weisbecker 
8276369139SFrederic Weisbecker extern struct page *
8376369139SFrederic Weisbecker perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff);
8476369139SFrederic Weisbecker 
8576369139SFrederic Weisbecker #ifdef CONFIG_PERF_USE_VMALLOC
8676369139SFrederic Weisbecker /*
8776369139SFrederic Weisbecker  * Back perf_mmap() with vmalloc memory.
8876369139SFrederic Weisbecker  *
8976369139SFrederic Weisbecker  * Required for architectures that have d-cache aliasing issues.
9076369139SFrederic Weisbecker  */
9176369139SFrederic Weisbecker 
9276369139SFrederic Weisbecker static inline int page_order(struct ring_buffer *rb)
9376369139SFrederic Weisbecker {
9476369139SFrederic Weisbecker 	return rb->page_order;
9576369139SFrederic Weisbecker }
9676369139SFrederic Weisbecker 
9776369139SFrederic Weisbecker #else
9876369139SFrederic Weisbecker 
9976369139SFrederic Weisbecker static inline int page_order(struct ring_buffer *rb)
10076369139SFrederic Weisbecker {
10176369139SFrederic Weisbecker 	return 0;
10276369139SFrederic Weisbecker }
10376369139SFrederic Weisbecker #endif
10476369139SFrederic Weisbecker 
1059251f904SBorislav Petkov static inline unsigned long perf_data_size(struct ring_buffer *rb)
10676369139SFrederic Weisbecker {
10776369139SFrederic Weisbecker 	return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
10876369139SFrederic Weisbecker }
10976369139SFrederic Weisbecker 
11045bfb2e5SPeter Zijlstra static inline unsigned long perf_aux_size(struct ring_buffer *rb)
11145bfb2e5SPeter Zijlstra {
11245bfb2e5SPeter Zijlstra 	return rb->aux_nr_pages << PAGE_SHIFT;
11345bfb2e5SPeter Zijlstra }
11445bfb2e5SPeter Zijlstra 
11591d7753aSFrederic Weisbecker #define DEFINE_OUTPUT_COPY(func_name, memcpy_func)			\
1160a196848SPeter Zijlstra static inline unsigned long						\
11791d7753aSFrederic Weisbecker func_name(struct perf_output_handle *handle,				\
1180a196848SPeter Zijlstra 	  const void *buf, unsigned long len)				\
11991d7753aSFrederic Weisbecker {									\
12091d7753aSFrederic Weisbecker 	unsigned long size, written;					\
12191d7753aSFrederic Weisbecker 									\
12291d7753aSFrederic Weisbecker 	do {								\
1230a196848SPeter Zijlstra 		size    = min(handle->size, len);			\
12491d7753aSFrederic Weisbecker 		written = memcpy_func(handle->addr, buf, size);		\
1250a196848SPeter Zijlstra 		written = size - written;				\
12691d7753aSFrederic Weisbecker 									\
12791d7753aSFrederic Weisbecker 		len -= written;						\
12891d7753aSFrederic Weisbecker 		handle->addr += written;				\
12991d7753aSFrederic Weisbecker 		buf += written;						\
13091d7753aSFrederic Weisbecker 		handle->size -= written;				\
13191d7753aSFrederic Weisbecker 		if (!handle->size) {					\
13291d7753aSFrederic Weisbecker 			struct ring_buffer *rb = handle->rb;		\
13391d7753aSFrederic Weisbecker 									\
13491d7753aSFrederic Weisbecker 			handle->page++;					\
13591d7753aSFrederic Weisbecker 			handle->page &= rb->nr_pages - 1;		\
13691d7753aSFrederic Weisbecker 			handle->addr = rb->data_pages[handle->page];	\
13791d7753aSFrederic Weisbecker 			handle->size = PAGE_SIZE << page_order(rb);	\
13891d7753aSFrederic Weisbecker 		}							\
13991d7753aSFrederic Weisbecker 	} while (len && written == size);				\
14091d7753aSFrederic Weisbecker 									\
14191d7753aSFrederic Weisbecker 	return len;							\
14291d7753aSFrederic Weisbecker }
14391d7753aSFrederic Weisbecker 
1440a196848SPeter Zijlstra static inline unsigned long
1450a196848SPeter Zijlstra memcpy_common(void *dst, const void *src, unsigned long n)
14676369139SFrederic Weisbecker {
14791d7753aSFrederic Weisbecker 	memcpy(dst, src, n);
1480a196848SPeter Zijlstra 	return 0;
14976369139SFrederic Weisbecker }
15091d7753aSFrederic Weisbecker 
15191d7753aSFrederic Weisbecker DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
15291d7753aSFrederic Weisbecker 
1530a196848SPeter Zijlstra static inline unsigned long
1540a196848SPeter Zijlstra memcpy_skip(void *dst, const void *src, unsigned long n)
1550a196848SPeter Zijlstra {
1560a196848SPeter Zijlstra 	return 0;
1570a196848SPeter Zijlstra }
1585685e0ffSJiri Olsa 
1590a196848SPeter Zijlstra DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
1605685e0ffSJiri Olsa 
16191d7753aSFrederic Weisbecker #ifndef arch_perf_out_copy_user
1620a196848SPeter Zijlstra #define arch_perf_out_copy_user arch_perf_out_copy_user
1630a196848SPeter Zijlstra 
1640a196848SPeter Zijlstra static inline unsigned long
1650a196848SPeter Zijlstra arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
1660a196848SPeter Zijlstra {
1670a196848SPeter Zijlstra 	unsigned long ret;
1680a196848SPeter Zijlstra 
1690a196848SPeter Zijlstra 	pagefault_disable();
1700a196848SPeter Zijlstra 	ret = __copy_from_user_inatomic(dst, src, n);
1710a196848SPeter Zijlstra 	pagefault_enable();
1720a196848SPeter Zijlstra 
1730a196848SPeter Zijlstra 	return ret;
1740a196848SPeter Zijlstra }
17591d7753aSFrederic Weisbecker #endif
17691d7753aSFrederic Weisbecker 
17791d7753aSFrederic Weisbecker DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
17876369139SFrederic Weisbecker 
1799251f904SBorislav Petkov /* Callchain handling */
180e6dab5ffSAndrew Vagin extern struct perf_callchain_entry *
181e6dab5ffSAndrew Vagin perf_callchain(struct perf_event *event, struct pt_regs *regs);
1829251f904SBorislav Petkov extern int get_callchain_buffers(void);
1839251f904SBorislav Petkov extern void put_callchain_buffers(void);
1849251f904SBorislav Petkov 
1859251f904SBorislav Petkov static inline int get_recursion_context(int *recursion)
1869251f904SBorislav Petkov {
1879251f904SBorislav Petkov 	int rctx;
1889251f904SBorislav Petkov 
1899251f904SBorislav Petkov 	if (in_nmi())
1909251f904SBorislav Petkov 		rctx = 3;
1919251f904SBorislav Petkov 	else if (in_irq())
1929251f904SBorislav Petkov 		rctx = 2;
1939251f904SBorislav Petkov 	else if (in_softirq())
1949251f904SBorislav Petkov 		rctx = 1;
1959251f904SBorislav Petkov 	else
1969251f904SBorislav Petkov 		rctx = 0;
1979251f904SBorislav Petkov 
1989251f904SBorislav Petkov 	if (recursion[rctx])
1999251f904SBorislav Petkov 		return -1;
2009251f904SBorislav Petkov 
2019251f904SBorislav Petkov 	recursion[rctx]++;
2029251f904SBorislav Petkov 	barrier();
2039251f904SBorislav Petkov 
2049251f904SBorislav Petkov 	return rctx;
2059251f904SBorislav Petkov }
2069251f904SBorislav Petkov 
2079251f904SBorislav Petkov static inline void put_recursion_context(int *recursion, int rctx)
2089251f904SBorislav Petkov {
2099251f904SBorislav Petkov 	barrier();
2109251f904SBorislav Petkov 	recursion[rctx]--;
2119251f904SBorislav Petkov }
2129251f904SBorislav Petkov 
213c5ebcedbSJiri Olsa #ifdef CONFIG_HAVE_PERF_USER_STACK_DUMP
214c5ebcedbSJiri Olsa static inline bool arch_perf_have_user_stack_dump(void)
215c5ebcedbSJiri Olsa {
216c5ebcedbSJiri Olsa 	return true;
217c5ebcedbSJiri Olsa }
218c5ebcedbSJiri Olsa 
219c5ebcedbSJiri Olsa #define perf_user_stack_pointer(regs) user_stack_pointer(regs)
220c5ebcedbSJiri Olsa #else
221c5ebcedbSJiri Olsa static inline bool arch_perf_have_user_stack_dump(void)
222c5ebcedbSJiri Olsa {
223c5ebcedbSJiri Olsa 	return false;
224c5ebcedbSJiri Olsa }
225c5ebcedbSJiri Olsa 
226c5ebcedbSJiri Olsa #define perf_user_stack_pointer(regs) 0
227c5ebcedbSJiri Olsa #endif /* CONFIG_HAVE_PERF_USER_STACK_DUMP */
228c5ebcedbSJiri Olsa 
22976369139SFrederic Weisbecker #endif /* _KERNEL_EVENTS_INTERNAL_H */
230