xref: /openbmc/linux/kernel/events/internal.h (revision 0a196848)
176369139SFrederic Weisbecker #ifndef _KERNEL_EVENTS_INTERNAL_H
276369139SFrederic Weisbecker #define _KERNEL_EVENTS_INTERNAL_H
376369139SFrederic Weisbecker 
49251f904SBorislav Petkov #include <linux/hardirq.h>
591d7753aSFrederic Weisbecker #include <linux/uaccess.h>
69251f904SBorislav Petkov 
79251f904SBorislav Petkov /* Buffer handling */
89251f904SBorislav Petkov 
976369139SFrederic Weisbecker #define RING_BUFFER_WRITABLE		0x01
1076369139SFrederic Weisbecker 
1176369139SFrederic Weisbecker struct ring_buffer {
1276369139SFrederic Weisbecker 	atomic_t			refcount;
1376369139SFrederic Weisbecker 	struct rcu_head			rcu_head;
1476369139SFrederic Weisbecker #ifdef CONFIG_PERF_USE_VMALLOC
1576369139SFrederic Weisbecker 	struct work_struct		work;
1676369139SFrederic Weisbecker 	int				page_order;	/* allocation order  */
1776369139SFrederic Weisbecker #endif
1876369139SFrederic Weisbecker 	int				nr_pages;	/* nr of data pages  */
19dd9c086dSStephane Eranian 	int				overwrite;	/* can overwrite itself */
2076369139SFrederic Weisbecker 
2176369139SFrederic Weisbecker 	atomic_t			poll;		/* POLL_ for wakeups */
2276369139SFrederic Weisbecker 
2376369139SFrederic Weisbecker 	local_t				head;		/* write position    */
2476369139SFrederic Weisbecker 	local_t				nest;		/* nested writers    */
2576369139SFrederic Weisbecker 	local_t				events;		/* event limit       */
2676369139SFrederic Weisbecker 	local_t				wakeup;		/* wakeup stamp      */
2776369139SFrederic Weisbecker 	local_t				lost;		/* nr records lost   */
2876369139SFrederic Weisbecker 
2976369139SFrederic Weisbecker 	long				watermark;	/* wakeup watermark  */
3010c6db11SPeter Zijlstra 	/* poll crap */
3110c6db11SPeter Zijlstra 	spinlock_t			event_lock;
3210c6db11SPeter Zijlstra 	struct list_head		event_list;
3376369139SFrederic Weisbecker 
349bb5d40cSPeter Zijlstra 	atomic_t			mmap_count;
359bb5d40cSPeter Zijlstra 	unsigned long			mmap_locked;
3626cb63adSPeter Zijlstra 	struct user_struct		*mmap_user;
3726cb63adSPeter Zijlstra 
3876369139SFrederic Weisbecker 	struct perf_event_mmap_page	*user_page;
3976369139SFrederic Weisbecker 	void				*data_pages[0];
4076369139SFrederic Weisbecker };
4176369139SFrederic Weisbecker 
4276369139SFrederic Weisbecker extern void rb_free(struct ring_buffer *rb);
4376369139SFrederic Weisbecker extern struct ring_buffer *
4476369139SFrederic Weisbecker rb_alloc(int nr_pages, long watermark, int cpu, int flags);
4576369139SFrederic Weisbecker extern void perf_event_wakeup(struct perf_event *event);
4676369139SFrederic Weisbecker 
4776369139SFrederic Weisbecker extern void
4876369139SFrederic Weisbecker perf_event_header__init_id(struct perf_event_header *header,
4976369139SFrederic Weisbecker 			   struct perf_sample_data *data,
5076369139SFrederic Weisbecker 			   struct perf_event *event);
5176369139SFrederic Weisbecker extern void
5276369139SFrederic Weisbecker perf_event__output_id_sample(struct perf_event *event,
5376369139SFrederic Weisbecker 			     struct perf_output_handle *handle,
5476369139SFrederic Weisbecker 			     struct perf_sample_data *sample);
5576369139SFrederic Weisbecker 
5676369139SFrederic Weisbecker extern struct page *
5776369139SFrederic Weisbecker perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff);
5876369139SFrederic Weisbecker 
5976369139SFrederic Weisbecker #ifdef CONFIG_PERF_USE_VMALLOC
6076369139SFrederic Weisbecker /*
6176369139SFrederic Weisbecker  * Back perf_mmap() with vmalloc memory.
6276369139SFrederic Weisbecker  *
6376369139SFrederic Weisbecker  * Required for architectures that have d-cache aliasing issues.
6476369139SFrederic Weisbecker  */
6576369139SFrederic Weisbecker 
6676369139SFrederic Weisbecker static inline int page_order(struct ring_buffer *rb)
6776369139SFrederic Weisbecker {
6876369139SFrederic Weisbecker 	return rb->page_order;
6976369139SFrederic Weisbecker }
7076369139SFrederic Weisbecker 
7176369139SFrederic Weisbecker #else
7276369139SFrederic Weisbecker 
7376369139SFrederic Weisbecker static inline int page_order(struct ring_buffer *rb)
7476369139SFrederic Weisbecker {
7576369139SFrederic Weisbecker 	return 0;
7676369139SFrederic Weisbecker }
7776369139SFrederic Weisbecker #endif
7876369139SFrederic Weisbecker 
799251f904SBorislav Petkov static inline unsigned long perf_data_size(struct ring_buffer *rb)
8076369139SFrederic Weisbecker {
8176369139SFrederic Weisbecker 	return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
8276369139SFrederic Weisbecker }
8376369139SFrederic Weisbecker 
8491d7753aSFrederic Weisbecker #define DEFINE_OUTPUT_COPY(func_name, memcpy_func)			\
850a196848SPeter Zijlstra static inline unsigned long						\
8691d7753aSFrederic Weisbecker func_name(struct perf_output_handle *handle,				\
870a196848SPeter Zijlstra 	  const void *buf, unsigned long len)				\
8891d7753aSFrederic Weisbecker {									\
8991d7753aSFrederic Weisbecker 	unsigned long size, written;					\
9091d7753aSFrederic Weisbecker 									\
9191d7753aSFrederic Weisbecker 	do {								\
920a196848SPeter Zijlstra 		size    = min(handle->size, len);			\
9391d7753aSFrederic Weisbecker 		written = memcpy_func(handle->addr, buf, size);		\
940a196848SPeter Zijlstra 		written = size - written;				\
9591d7753aSFrederic Weisbecker 									\
9691d7753aSFrederic Weisbecker 		len -= written;						\
9791d7753aSFrederic Weisbecker 		handle->addr += written;				\
9891d7753aSFrederic Weisbecker 		buf += written;						\
9991d7753aSFrederic Weisbecker 		handle->size -= written;				\
10091d7753aSFrederic Weisbecker 		if (!handle->size) {					\
10191d7753aSFrederic Weisbecker 			struct ring_buffer *rb = handle->rb;		\
10291d7753aSFrederic Weisbecker 									\
10391d7753aSFrederic Weisbecker 			handle->page++;					\
10491d7753aSFrederic Weisbecker 			handle->page &= rb->nr_pages - 1;		\
10591d7753aSFrederic Weisbecker 			handle->addr = rb->data_pages[handle->page];	\
10691d7753aSFrederic Weisbecker 			handle->size = PAGE_SIZE << page_order(rb);	\
10791d7753aSFrederic Weisbecker 		}							\
10891d7753aSFrederic Weisbecker 	} while (len && written == size);				\
10991d7753aSFrederic Weisbecker 									\
11091d7753aSFrederic Weisbecker 	return len;							\
11191d7753aSFrederic Weisbecker }
11291d7753aSFrederic Weisbecker 
1130a196848SPeter Zijlstra static inline unsigned long
1140a196848SPeter Zijlstra memcpy_common(void *dst, const void *src, unsigned long n)
11576369139SFrederic Weisbecker {
11691d7753aSFrederic Weisbecker 	memcpy(dst, src, n);
1170a196848SPeter Zijlstra 	return 0;
11876369139SFrederic Weisbecker }
11991d7753aSFrederic Weisbecker 
12091d7753aSFrederic Weisbecker DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
12191d7753aSFrederic Weisbecker 
1220a196848SPeter Zijlstra static inline unsigned long
1230a196848SPeter Zijlstra memcpy_skip(void *dst, const void *src, unsigned long n)
1240a196848SPeter Zijlstra {
1250a196848SPeter Zijlstra 	return 0;
1260a196848SPeter Zijlstra }
1275685e0ffSJiri Olsa 
1280a196848SPeter Zijlstra DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
1295685e0ffSJiri Olsa 
13091d7753aSFrederic Weisbecker #ifndef arch_perf_out_copy_user
1310a196848SPeter Zijlstra #define arch_perf_out_copy_user arch_perf_out_copy_user
1320a196848SPeter Zijlstra 
1330a196848SPeter Zijlstra static inline unsigned long
1340a196848SPeter Zijlstra arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
1350a196848SPeter Zijlstra {
1360a196848SPeter Zijlstra 	unsigned long ret;
1370a196848SPeter Zijlstra 
1380a196848SPeter Zijlstra 	pagefault_disable();
1390a196848SPeter Zijlstra 	ret = __copy_from_user_inatomic(dst, src, n);
1400a196848SPeter Zijlstra 	pagefault_enable();
1410a196848SPeter Zijlstra 
1420a196848SPeter Zijlstra 	return ret;
1430a196848SPeter Zijlstra }
14491d7753aSFrederic Weisbecker #endif
14591d7753aSFrederic Weisbecker 
14691d7753aSFrederic Weisbecker DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
14776369139SFrederic Weisbecker 
1489251f904SBorislav Petkov /* Callchain handling */
149e6dab5ffSAndrew Vagin extern struct perf_callchain_entry *
150e6dab5ffSAndrew Vagin perf_callchain(struct perf_event *event, struct pt_regs *regs);
1519251f904SBorislav Petkov extern int get_callchain_buffers(void);
1529251f904SBorislav Petkov extern void put_callchain_buffers(void);
1539251f904SBorislav Petkov 
1549251f904SBorislav Petkov static inline int get_recursion_context(int *recursion)
1559251f904SBorislav Petkov {
1569251f904SBorislav Petkov 	int rctx;
1579251f904SBorislav Petkov 
1589251f904SBorislav Petkov 	if (in_nmi())
1599251f904SBorislav Petkov 		rctx = 3;
1609251f904SBorislav Petkov 	else if (in_irq())
1619251f904SBorislav Petkov 		rctx = 2;
1629251f904SBorislav Petkov 	else if (in_softirq())
1639251f904SBorislav Petkov 		rctx = 1;
1649251f904SBorislav Petkov 	else
1659251f904SBorislav Petkov 		rctx = 0;
1669251f904SBorislav Petkov 
1679251f904SBorislav Petkov 	if (recursion[rctx])
1689251f904SBorislav Petkov 		return -1;
1699251f904SBorislav Petkov 
1709251f904SBorislav Petkov 	recursion[rctx]++;
1719251f904SBorislav Petkov 	barrier();
1729251f904SBorislav Petkov 
1739251f904SBorislav Petkov 	return rctx;
1749251f904SBorislav Petkov }
1759251f904SBorislav Petkov 
1769251f904SBorislav Petkov static inline void put_recursion_context(int *recursion, int rctx)
1779251f904SBorislav Petkov {
1789251f904SBorislav Petkov 	barrier();
1799251f904SBorislav Petkov 	recursion[rctx]--;
1809251f904SBorislav Petkov }
1819251f904SBorislav Petkov 
182c5ebcedbSJiri Olsa #ifdef CONFIG_HAVE_PERF_USER_STACK_DUMP
183c5ebcedbSJiri Olsa static inline bool arch_perf_have_user_stack_dump(void)
184c5ebcedbSJiri Olsa {
185c5ebcedbSJiri Olsa 	return true;
186c5ebcedbSJiri Olsa }
187c5ebcedbSJiri Olsa 
188c5ebcedbSJiri Olsa #define perf_user_stack_pointer(regs) user_stack_pointer(regs)
189c5ebcedbSJiri Olsa #else
190c5ebcedbSJiri Olsa static inline bool arch_perf_have_user_stack_dump(void)
191c5ebcedbSJiri Olsa {
192c5ebcedbSJiri Olsa 	return false;
193c5ebcedbSJiri Olsa }
194c5ebcedbSJiri Olsa 
195c5ebcedbSJiri Olsa #define perf_user_stack_pointer(regs) 0
196c5ebcedbSJiri Olsa #endif /* CONFIG_HAVE_PERF_USER_STACK_DUMP */
197c5ebcedbSJiri Olsa 
19876369139SFrederic Weisbecker #endif /* _KERNEL_EVENTS_INTERNAL_H */
199