1 /* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License as published by 4 * the Free Software Foundation; either version 2 of the License, or 5 * (at your option) any later version. 6 * 7 * This program is distributed in the hope that it will be useful, 8 * but WITHOUT ANY WARRANTY; without even the implied warranty of 9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 10 * GNU General Public License for more details. 11 * 12 * Authors: Waiman Long <longman@redhat.com> 13 */ 14 15 #include "lock_events.h" 16 17 #ifdef CONFIG_LOCK_EVENT_COUNTS 18 #ifdef CONFIG_PARAVIRT_SPINLOCKS 19 /* 20 * Collect pvqspinlock locking event counts 21 */ 22 #include <linux/sched.h> 23 #include <linux/sched/clock.h> 24 #include <linux/fs.h> 25 26 #define EVENT_COUNT(ev) lockevents[LOCKEVENT_ ## ev] 27 28 /* 29 * PV specific per-cpu counter 30 */ 31 static DEFINE_PER_CPU(u64, pv_kick_time); 32 33 /* 34 * Function to read and return the PV qspinlock counts. 35 * 36 * The following counters are handled specially: 37 * 1. pv_latency_kick 38 * Average kick latency (ns) = pv_latency_kick/pv_kick_unlock 39 * 2. pv_latency_wake 40 * Average wake latency (ns) = pv_latency_wake/pv_kick_wake 41 * 3. pv_hash_hops 42 * Average hops/hash = pv_hash_hops/pv_kick_unlock 43 */ 44 ssize_t lockevent_read(struct file *file, char __user *user_buf, 45 size_t count, loff_t *ppos) 46 { 47 char buf[64]; 48 int cpu, id, len; 49 u64 sum = 0, kicks = 0; 50 51 /* 52 * Get the counter ID stored in file->f_inode->i_private 53 */ 54 id = (long)file_inode(file)->i_private; 55 56 if (id >= lockevent_num) 57 return -EBADF; 58 59 for_each_possible_cpu(cpu) { 60 sum += per_cpu(lockevents[id], cpu); 61 /* 62 * Need to sum additional counters for some of them 63 */ 64 switch (id) { 65 66 case LOCKEVENT_pv_latency_kick: 67 case LOCKEVENT_pv_hash_hops: 68 kicks += per_cpu(EVENT_COUNT(pv_kick_unlock), cpu); 69 break; 70 71 case LOCKEVENT_pv_latency_wake: 72 kicks += per_cpu(EVENT_COUNT(pv_kick_wake), cpu); 73 break; 74 } 75 } 76 77 if (id == LOCKEVENT_pv_hash_hops) { 78 u64 frac = 0; 79 80 if (kicks) { 81 frac = 100ULL * do_div(sum, kicks); 82 frac = DIV_ROUND_CLOSEST_ULL(frac, kicks); 83 } 84 85 /* 86 * Return a X.XX decimal number 87 */ 88 len = snprintf(buf, sizeof(buf) - 1, "%llu.%02llu\n", 89 sum, frac); 90 } else { 91 /* 92 * Round to the nearest ns 93 */ 94 if ((id == LOCKEVENT_pv_latency_kick) || 95 (id == LOCKEVENT_pv_latency_wake)) { 96 if (kicks) 97 sum = DIV_ROUND_CLOSEST_ULL(sum, kicks); 98 } 99 len = snprintf(buf, sizeof(buf) - 1, "%llu\n", sum); 100 } 101 102 return simple_read_from_buffer(user_buf, count, ppos, buf, len); 103 } 104 105 /* 106 * PV hash hop count 107 */ 108 static inline void lockevent_pv_hop(int hopcnt) 109 { 110 this_cpu_add(EVENT_COUNT(pv_hash_hops), hopcnt); 111 } 112 113 /* 114 * Replacement function for pv_kick() 115 */ 116 static inline void __pv_kick(int cpu) 117 { 118 u64 start = sched_clock(); 119 120 per_cpu(pv_kick_time, cpu) = start; 121 pv_kick(cpu); 122 this_cpu_add(EVENT_COUNT(pv_latency_kick), sched_clock() - start); 123 } 124 125 /* 126 * Replacement function for pv_wait() 127 */ 128 static inline void __pv_wait(u8 *ptr, u8 val) 129 { 130 u64 *pkick_time = this_cpu_ptr(&pv_kick_time); 131 132 *pkick_time = 0; 133 pv_wait(ptr, val); 134 if (*pkick_time) { 135 this_cpu_add(EVENT_COUNT(pv_latency_wake), 136 sched_clock() - *pkick_time); 137 lockevent_inc(pv_kick_wake); 138 } 139 } 140 141 #define pv_kick(c) __pv_kick(c) 142 #define pv_wait(p, v) __pv_wait(p, v) 143 144 #endif /* CONFIG_PARAVIRT_SPINLOCKS */ 145 146 #else /* CONFIG_LOCK_EVENT_COUNTS */ 147 148 static inline void lockevent_pv_hop(int hopcnt) { } 149 150 #endif /* CONFIG_LOCK_EVENT_COUNTS */ 151