xref: /openbmc/linux/kernel/locking/lock_events.h (revision c4f7ac64)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * This program is free software; you can redistribute it and/or modify
4  * it under the terms of the GNU General Public License as published by
5  * the Free Software Foundation; either version 2 of the License, or
6  * (at your option) any later version.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  * Authors: Waiman Long <longman@redhat.com>
14  */
15 
16 #ifndef __LOCKING_LOCK_EVENTS_H
17 #define __LOCKING_LOCK_EVENTS_H
18 
19 enum lock_events {
20 
21 #include "lock_events_list.h"
22 
23 	lockevent_num,	/* Total number of lock event counts */
24 	LOCKEVENT_reset_cnts = lockevent_num,
25 };
26 
27 #ifdef CONFIG_LOCK_EVENT_COUNTS
28 /*
29  * Per-cpu counters
30  */
31 DECLARE_PER_CPU(unsigned long, lockevents[lockevent_num]);
32 
33 /*
34  * Increment the statistical counters. use raw_cpu_inc() because of lower
35  * overhead and we don't care if we loose the occasional update.
36  */
37 static inline void __lockevent_inc(enum lock_events event, bool cond)
38 {
39 	if (cond)
40 		raw_cpu_inc(lockevents[event]);
41 }
42 
43 #define lockevent_inc(ev)	  __lockevent_inc(LOCKEVENT_ ##ev, true)
44 #define lockevent_cond_inc(ev, c) __lockevent_inc(LOCKEVENT_ ##ev, c)
45 
46 static inline void __lockevent_add(enum lock_events event, int inc)
47 {
48 	raw_cpu_add(lockevents[event], inc);
49 }
50 
51 #define lockevent_add(ev, c)	__lockevent_add(LOCKEVENT_ ##ev, c)
52 
53 #else  /* CONFIG_LOCK_EVENT_COUNTS */
54 
55 #define lockevent_inc(ev)
56 #define lockevent_add(ev, c)
57 #define lockevent_cond_inc(ev, c)
58 
59 #endif /* CONFIG_LOCK_EVENT_COUNTS */
60 #endif /* __LOCKING_LOCK_EVENTS_H */
61