1 /* 2 * include/linux/ktime.h 3 * 4 * ktime_t - nanosecond-resolution time format. 5 * 6 * Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de> 7 * Copyright(C) 2005, Red Hat, Inc., Ingo Molnar 8 * 9 * data type definitions, declarations, prototypes and macros. 10 * 11 * Started by: Thomas Gleixner and Ingo Molnar 12 * 13 * Credits: 14 * 15 * Roman Zippel provided the ideas and primary code snippets of 16 * the ktime_t union and further simplifications of the original 17 * code. 18 * 19 * For licencing details see kernel-base/COPYING 20 */ 21 #ifndef _LINUX_KTIME_H 22 #define _LINUX_KTIME_H 23 24 #include <linux/time.h> 25 #include <linux/jiffies.h> 26 27 /* Nanosecond scalar representation for kernel time values */ 28 typedef s64 ktime_t; 29 30 /** 31 * ktime_set - Set a ktime_t variable from a seconds/nanoseconds value 32 * @secs: seconds to set 33 * @nsecs: nanoseconds to set 34 * 35 * Return: The ktime_t representation of the value. 36 */ 37 static inline ktime_t ktime_set(const s64 secs, const unsigned long nsecs) 38 { 39 if (unlikely(secs >= KTIME_SEC_MAX)) 40 return KTIME_MAX; 41 42 return secs * NSEC_PER_SEC + (s64)nsecs; 43 } 44 45 /* Subtract two ktime_t variables. rem = lhs -rhs: */ 46 #define ktime_sub(lhs, rhs) ((lhs) - (rhs)) 47 48 /* Add two ktime_t variables. res = lhs + rhs: */ 49 #define ktime_add(lhs, rhs) ((lhs) + (rhs)) 50 51 /* 52 * Same as ktime_add(), but avoids undefined behaviour on overflow; however, 53 * this means that you must check the result for overflow yourself. 54 */ 55 #define ktime_add_unsafe(lhs, rhs) ((u64) (lhs) + (rhs)) 56 57 /* 58 * Add a ktime_t variable and a scalar nanosecond value. 59 * res = kt + nsval: 60 */ 61 #define ktime_add_ns(kt, nsval) ((kt) + (nsval)) 62 63 /* 64 * Subtract a scalar nanosecod from a ktime_t variable 65 * res = kt - nsval: 66 */ 67 #define ktime_sub_ns(kt, nsval) ((kt) - (nsval)) 68 69 /* convert a timespec64 to ktime_t format: */ 70 static inline ktime_t timespec64_to_ktime(struct timespec64 ts) 71 { 72 return ktime_set(ts.tv_sec, ts.tv_nsec); 73 } 74 75 /* Map the ktime_t to timespec conversion to ns_to_timespec function */ 76 #define ktime_to_timespec64(kt) ns_to_timespec64((kt)) 77 78 /* Convert ktime_t to nanoseconds */ 79 static inline s64 ktime_to_ns(const ktime_t kt) 80 { 81 return kt; 82 } 83 84 /** 85 * ktime_compare - Compares two ktime_t variables for less, greater or equal 86 * @cmp1: comparable1 87 * @cmp2: comparable2 88 * 89 * Return: ... 90 * cmp1 < cmp2: return <0 91 * cmp1 == cmp2: return 0 92 * cmp1 > cmp2: return >0 93 */ 94 static inline int ktime_compare(const ktime_t cmp1, const ktime_t cmp2) 95 { 96 if (cmp1 < cmp2) 97 return -1; 98 if (cmp1 > cmp2) 99 return 1; 100 return 0; 101 } 102 103 /** 104 * ktime_after - Compare if a ktime_t value is bigger than another one. 105 * @cmp1: comparable1 106 * @cmp2: comparable2 107 * 108 * Return: true if cmp1 happened after cmp2. 109 */ 110 static inline bool ktime_after(const ktime_t cmp1, const ktime_t cmp2) 111 { 112 return ktime_compare(cmp1, cmp2) > 0; 113 } 114 115 /** 116 * ktime_before - Compare if a ktime_t value is smaller than another one. 117 * @cmp1: comparable1 118 * @cmp2: comparable2 119 * 120 * Return: true if cmp1 happened before cmp2. 121 */ 122 static inline bool ktime_before(const ktime_t cmp1, const ktime_t cmp2) 123 { 124 return ktime_compare(cmp1, cmp2) < 0; 125 } 126 127 #if BITS_PER_LONG < 64 128 extern s64 __ktime_divns(const ktime_t kt, s64 div); 129 static inline s64 ktime_divns(const ktime_t kt, s64 div) 130 { 131 /* 132 * Negative divisors could cause an inf loop, 133 * so bug out here. 134 */ 135 BUG_ON(div < 0); 136 if (__builtin_constant_p(div) && !(div >> 32)) { 137 s64 ns = kt; 138 u64 tmp = ns < 0 ? -ns : ns; 139 140 do_div(tmp, div); 141 return ns < 0 ? -tmp : tmp; 142 } else { 143 return __ktime_divns(kt, div); 144 } 145 } 146 #else /* BITS_PER_LONG < 64 */ 147 static inline s64 ktime_divns(const ktime_t kt, s64 div) 148 { 149 /* 150 * 32-bit implementation cannot handle negative divisors, 151 * so catch them on 64bit as well. 152 */ 153 WARN_ON(div < 0); 154 return kt / div; 155 } 156 #endif 157 158 static inline s64 ktime_to_us(const ktime_t kt) 159 { 160 return ktime_divns(kt, NSEC_PER_USEC); 161 } 162 163 static inline s64 ktime_to_ms(const ktime_t kt) 164 { 165 return ktime_divns(kt, NSEC_PER_MSEC); 166 } 167 168 static inline s64 ktime_us_delta(const ktime_t later, const ktime_t earlier) 169 { 170 return ktime_to_us(ktime_sub(later, earlier)); 171 } 172 173 static inline s64 ktime_ms_delta(const ktime_t later, const ktime_t earlier) 174 { 175 return ktime_to_ms(ktime_sub(later, earlier)); 176 } 177 178 static inline ktime_t ktime_add_us(const ktime_t kt, const u64 usec) 179 { 180 return ktime_add_ns(kt, usec * NSEC_PER_USEC); 181 } 182 183 static inline ktime_t ktime_add_ms(const ktime_t kt, const u64 msec) 184 { 185 return ktime_add_ns(kt, msec * NSEC_PER_MSEC); 186 } 187 188 static inline ktime_t ktime_sub_us(const ktime_t kt, const u64 usec) 189 { 190 return ktime_sub_ns(kt, usec * NSEC_PER_USEC); 191 } 192 193 static inline ktime_t ktime_sub_ms(const ktime_t kt, const u64 msec) 194 { 195 return ktime_sub_ns(kt, msec * NSEC_PER_MSEC); 196 } 197 198 extern ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs); 199 200 /** 201 * ktime_to_timespec64_cond - convert a ktime_t variable to timespec64 202 * format only if the variable contains data 203 * @kt: the ktime_t variable to convert 204 * @ts: the timespec variable to store the result in 205 * 206 * Return: %true if there was a successful conversion, %false if kt was 0. 207 */ 208 static inline __must_check bool ktime_to_timespec64_cond(const ktime_t kt, 209 struct timespec64 *ts) 210 { 211 if (kt) { 212 *ts = ktime_to_timespec64(kt); 213 return true; 214 } else { 215 return false; 216 } 217 } 218 219 #include <vdso/ktime.h> 220 221 static inline ktime_t ns_to_ktime(u64 ns) 222 { 223 return ns; 224 } 225 226 static inline ktime_t ms_to_ktime(u64 ms) 227 { 228 return ms * NSEC_PER_MSEC; 229 } 230 231 # include <linux/timekeeping.h> 232 # include <linux/timekeeping32.h> 233 234 #endif 235