1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * This program is free software; you can redistribute it and/or modify 4 * it under the terms of the GNU General Public License as published by 5 * the Free Software Foundation; either version 2 of the License, or 6 * (at your option) any later version. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * GNU General Public License for more details. 12 * 13 * Authors: Waiman Long <longman@redhat.com> 14 */ 15 16 #ifndef __LOCKING_LOCK_EVENTS_H 17 #define __LOCKING_LOCK_EVENTS_H 18 19 enum lock_events { 20 21 #include "lock_events_list.h" 22 23 lockevent_num, /* Total number of lock event counts */ 24 LOCKEVENT_reset_cnts = lockevent_num, 25 }; 26 27 #ifdef CONFIG_LOCK_EVENT_COUNTS 28 /* 29 * Per-cpu counters 30 */ 31 DECLARE_PER_CPU(unsigned long, lockevents[lockevent_num]); 32 33 /* 34 * The purpose of the lock event counting subsystem is to provide a low 35 * overhead way to record the number of specific locking events by using 36 * percpu counters. It is the percpu sum that matters, not specifically 37 * how many of them happens in each cpu. 38 * 39 * It is possible that the same percpu counter may be modified in both 40 * the process and interrupt contexts. For architectures that perform 41 * percpu operation with multiple instructions, it is possible to lose 42 * count if a process context percpu update is interrupted in the middle 43 * and the same counter is updated in the interrupt context. Therefore, 44 * the generated percpu sum may not be precise. The error, if any, should 45 * be small and insignificant. 46 * 47 * For those architectures that do multi-instruction percpu operation, 48 * preemption in the middle and moving the task to another cpu may cause 49 * a larger error in the count. Again, this will be few and far between. 50 * Given the imprecise nature of the count and the possibility of resetting 51 * the count and doing the measurement again, this is not really a big 52 * problem. 53 * 54 * To get a better picture of what is happening under the hood, it is 55 * suggested that a few measurements should be taken with the counts 56 * reset in between to stamp out outliner because of these possible 57 * error conditions. 58 * 59 * To minimize overhead, we use __this_cpu_*() in all cases except when 60 * CONFIG_DEBUG_PREEMPT is defined. In this particular case, this_cpu_*() 61 * will be used to avoid the appearance of unwanted BUG messages. 62 */ 63 #ifdef CONFIG_DEBUG_PREEMPT 64 #define lockevent_percpu_inc(x) this_cpu_inc(x) 65 #define lockevent_percpu_add(x, v) this_cpu_add(x, v) 66 #else 67 #define lockevent_percpu_inc(x) __this_cpu_inc(x) 68 #define lockevent_percpu_add(x, v) __this_cpu_add(x, v) 69 #endif 70 71 /* 72 * Increment the PV qspinlock statistical counters 73 */ 74 static inline void __lockevent_inc(enum lock_events event, bool cond) 75 { 76 if (cond) 77 lockevent_percpu_inc(lockevents[event]); 78 } 79 80 #define lockevent_inc(ev) __lockevent_inc(LOCKEVENT_ ##ev, true) 81 #define lockevent_cond_inc(ev, c) __lockevent_inc(LOCKEVENT_ ##ev, c) 82 83 static inline void __lockevent_add(enum lock_events event, int inc) 84 { 85 lockevent_percpu_add(lockevents[event], inc); 86 } 87 88 #define lockevent_add(ev, c) __lockevent_add(LOCKEVENT_ ##ev, c) 89 90 #else /* CONFIG_LOCK_EVENT_COUNTS */ 91 92 #define lockevent_inc(ev) 93 #define lockevent_add(ev, c) 94 #define lockevent_cond_inc(ev, c) 95 96 #endif /* CONFIG_LOCK_EVENT_COUNTS */ 97 #endif /* __LOCKING_LOCK_EVENTS_H */ 98