1 /* 2 * Copyright © 2016 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 */ 24 25 #ifndef __I915_UTILS_H 26 #define __I915_UTILS_H 27 28 #include <linux/list.h> 29 #include <linux/overflow.h> 30 #include <linux/sched.h> 31 #include <linux/types.h> 32 #include <linux/workqueue.h> 33 34 #undef WARN_ON 35 /* Many gcc seem to no see through this and fall over :( */ 36 #if 0 37 #define WARN_ON(x) ({ \ 38 bool __i915_warn_cond = (x); \ 39 if (__builtin_constant_p(__i915_warn_cond)) \ 40 BUILD_BUG_ON(__i915_warn_cond); \ 41 WARN(__i915_warn_cond, "WARN_ON(" #x ")"); }) 42 #else 43 #define WARN_ON(x) WARN((x), "%s", "WARN_ON(" __stringify(x) ")") 44 #endif 45 46 #undef WARN_ON_ONCE 47 #define WARN_ON_ONCE(x) WARN_ONCE((x), "%s", "WARN_ON_ONCE(" __stringify(x) ")") 48 49 #define MISSING_CASE(x) WARN(1, "Missing case (%s == %ld)\n", \ 50 __stringify(x), (long)(x)) 51 52 #if defined(GCC_VERSION) && GCC_VERSION >= 70000 53 #define add_overflows_t(T, A, B) \ 54 __builtin_add_overflow_p((A), (B), (T)0) 55 #else 56 #define add_overflows_t(T, A, B) ({ \ 57 typeof(A) a = (A); \ 58 typeof(B) b = (B); \ 59 (T)(a + b) < a; \ 60 }) 61 #endif 62 63 #define add_overflows(A, B) \ 64 add_overflows_t(typeof((A) + (B)), (A), (B)) 65 66 #define range_overflows(start, size, max) ({ \ 67 typeof(start) start__ = (start); \ 68 typeof(size) size__ = (size); \ 69 typeof(max) max__ = (max); \ 70 (void)(&start__ == &size__); \ 71 (void)(&start__ == &max__); \ 72 start__ > max__ || size__ > max__ - start__; \ 73 }) 74 75 #define range_overflows_t(type, start, size, max) \ 76 range_overflows((type)(start), (type)(size), (type)(max)) 77 78 /* Note we don't consider signbits :| */ 79 #define overflows_type(x, T) \ 80 (sizeof(x) > sizeof(T) && (x) >> BITS_PER_TYPE(T)) 81 82 static inline bool 83 __check_struct_size(size_t base, size_t arr, size_t count, size_t *size) 84 { 85 size_t sz; 86 87 if (check_mul_overflow(count, arr, &sz)) 88 return false; 89 90 if (check_add_overflow(sz, base, &sz)) 91 return false; 92 93 *size = sz; 94 return true; 95 } 96 97 /** 98 * check_struct_size() - Calculate size of structure with trailing array. 99 * @p: Pointer to the structure. 100 * @member: Name of the array member. 101 * @n: Number of elements in the array. 102 * @sz: Total size of structure and array 103 * 104 * Calculates size of memory needed for structure @p followed by an 105 * array of @n @member elements, like struct_size() but reports 106 * whether it overflowed, and the resultant size in @sz 107 * 108 * Return: false if the calculation overflowed. 109 */ 110 #define check_struct_size(p, member, n, sz) \ 111 likely(__check_struct_size(sizeof(*(p)), \ 112 sizeof(*(p)->member) + __must_be_array((p)->member), \ 113 n, sz)) 114 115 #define ptr_mask_bits(ptr, n) ({ \ 116 unsigned long __v = (unsigned long)(ptr); \ 117 (typeof(ptr))(__v & -BIT(n)); \ 118 }) 119 120 #define ptr_unmask_bits(ptr, n) ((unsigned long)(ptr) & (BIT(n) - 1)) 121 122 #define ptr_unpack_bits(ptr, bits, n) ({ \ 123 unsigned long __v = (unsigned long)(ptr); \ 124 *(bits) = __v & (BIT(n) - 1); \ 125 (typeof(ptr))(__v & -BIT(n)); \ 126 }) 127 128 #define ptr_pack_bits(ptr, bits, n) ({ \ 129 unsigned long __bits = (bits); \ 130 GEM_BUG_ON(__bits & -BIT(n)); \ 131 ((typeof(ptr))((unsigned long)(ptr) | __bits)); \ 132 }) 133 134 #define page_mask_bits(ptr) ptr_mask_bits(ptr, PAGE_SHIFT) 135 #define page_unmask_bits(ptr) ptr_unmask_bits(ptr, PAGE_SHIFT) 136 #define page_pack_bits(ptr, bits) ptr_pack_bits(ptr, bits, PAGE_SHIFT) 137 #define page_unpack_bits(ptr, bits) ptr_unpack_bits(ptr, bits, PAGE_SHIFT) 138 139 #define struct_member(T, member) (((T *)0)->member) 140 141 #define ptr_offset(ptr, member) offsetof(typeof(*(ptr)), member) 142 143 #define fetch_and_zero(ptr) ({ \ 144 typeof(*ptr) __T = *(ptr); \ 145 *(ptr) = (typeof(*ptr))0; \ 146 __T; \ 147 }) 148 149 /* 150 * container_of_user: Extract the superclass from a pointer to a member. 151 * 152 * Exactly like container_of() with the exception that it plays nicely 153 * with sparse for __user @ptr. 154 */ 155 #define container_of_user(ptr, type, member) ({ \ 156 void __user *__mptr = (void __user *)(ptr); \ 157 BUILD_BUG_ON_MSG(!__same_type(*(ptr), struct_member(type, member)) && \ 158 !__same_type(*(ptr), void), \ 159 "pointer type mismatch in container_of()"); \ 160 ((type __user *)(__mptr - offsetof(type, member))); }) 161 162 /* 163 * check_user_mbz: Check that a user value exists and is zero 164 * 165 * Frequently in our uABI we reserve space for future extensions, and 166 * two ensure that userspace is prepared we enforce that space must 167 * be zero. (Then any future extension can safely assume a default value 168 * of 0.) 169 * 170 * check_user_mbz() combines checking that the user pointer is accessible 171 * and that the contained value is zero. 172 * 173 * Returns: -EFAULT if not accessible, -EINVAL if !zero, or 0 on success. 174 */ 175 #define check_user_mbz(U) ({ \ 176 typeof(*(U)) mbz__; \ 177 get_user(mbz__, (U)) ? -EFAULT : mbz__ ? -EINVAL : 0; \ 178 }) 179 180 static inline u64 ptr_to_u64(const void *ptr) 181 { 182 return (uintptr_t)ptr; 183 } 184 185 #define u64_to_ptr(T, x) ({ \ 186 typecheck(u64, x); \ 187 (T *)(uintptr_t)(x); \ 188 }) 189 190 #define __mask_next_bit(mask) ({ \ 191 int __idx = ffs(mask) - 1; \ 192 mask &= ~BIT(__idx); \ 193 __idx; \ 194 }) 195 196 static inline void __list_del_many(struct list_head *head, 197 struct list_head *first) 198 { 199 first->prev = head; 200 WRITE_ONCE(head->next, first); 201 } 202 203 /* 204 * Wait until the work is finally complete, even if it tries to postpone 205 * by requeueing itself. Note, that if the worker never cancels itself, 206 * we will spin forever. 207 */ 208 static inline void drain_delayed_work(struct delayed_work *dw) 209 { 210 do { 211 while (flush_delayed_work(dw)) 212 ; 213 } while (delayed_work_pending(dw)); 214 } 215 216 static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m) 217 { 218 unsigned long j = msecs_to_jiffies(m); 219 220 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); 221 } 222 223 /* 224 * If you need to wait X milliseconds between events A and B, but event B 225 * doesn't happen exactly after event A, you record the timestamp (jiffies) of 226 * when event A happened, then just before event B you call this function and 227 * pass the timestamp as the first argument, and X as the second argument. 228 */ 229 static inline void 230 wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms) 231 { 232 unsigned long target_jiffies, tmp_jiffies, remaining_jiffies; 233 234 /* 235 * Don't re-read the value of "jiffies" every time since it may change 236 * behind our back and break the math. 237 */ 238 tmp_jiffies = jiffies; 239 target_jiffies = timestamp_jiffies + 240 msecs_to_jiffies_timeout(to_wait_ms); 241 242 if (time_after(target_jiffies, tmp_jiffies)) { 243 remaining_jiffies = target_jiffies - tmp_jiffies; 244 while (remaining_jiffies) 245 remaining_jiffies = 246 schedule_timeout_uninterruptible(remaining_jiffies); 247 } 248 } 249 250 /** 251 * __wait_for - magic wait macro 252 * 253 * Macro to help avoid open coding check/wait/timeout patterns. Note that it's 254 * important that we check the condition again after having timed out, since the 255 * timeout could be due to preemption or similar and we've never had a chance to 256 * check the condition before the timeout. 257 */ 258 #define __wait_for(OP, COND, US, Wmin, Wmax) ({ \ 259 const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (US)); \ 260 long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \ 261 int ret__; \ 262 might_sleep(); \ 263 for (;;) { \ 264 const bool expired__ = ktime_after(ktime_get_raw(), end__); \ 265 OP; \ 266 /* Guarantee COND check prior to timeout */ \ 267 barrier(); \ 268 if (COND) { \ 269 ret__ = 0; \ 270 break; \ 271 } \ 272 if (expired__) { \ 273 ret__ = -ETIMEDOUT; \ 274 break; \ 275 } \ 276 usleep_range(wait__, wait__ * 2); \ 277 if (wait__ < (Wmax)) \ 278 wait__ <<= 1; \ 279 } \ 280 ret__; \ 281 }) 282 283 #define _wait_for(COND, US, Wmin, Wmax) __wait_for(, (COND), (US), (Wmin), \ 284 (Wmax)) 285 #define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 10, 1000) 286 287 /* If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false. */ 288 #if defined(CONFIG_DRM_I915_DEBUG) && defined(CONFIG_PREEMPT_COUNT) 289 # define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) WARN_ON_ONCE((ATOMIC) && !in_atomic()) 290 #else 291 # define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) do { } while (0) 292 #endif 293 294 #define _wait_for_atomic(COND, US, ATOMIC) \ 295 ({ \ 296 int cpu, ret, timeout = (US) * 1000; \ 297 u64 base; \ 298 _WAIT_FOR_ATOMIC_CHECK(ATOMIC); \ 299 if (!(ATOMIC)) { \ 300 preempt_disable(); \ 301 cpu = smp_processor_id(); \ 302 } \ 303 base = local_clock(); \ 304 for (;;) { \ 305 u64 now = local_clock(); \ 306 if (!(ATOMIC)) \ 307 preempt_enable(); \ 308 /* Guarantee COND check prior to timeout */ \ 309 barrier(); \ 310 if (COND) { \ 311 ret = 0; \ 312 break; \ 313 } \ 314 if (now - base >= timeout) { \ 315 ret = -ETIMEDOUT; \ 316 break; \ 317 } \ 318 cpu_relax(); \ 319 if (!(ATOMIC)) { \ 320 preempt_disable(); \ 321 if (unlikely(cpu != smp_processor_id())) { \ 322 timeout -= now - base; \ 323 cpu = smp_processor_id(); \ 324 base = local_clock(); \ 325 } \ 326 } \ 327 } \ 328 ret; \ 329 }) 330 331 #define wait_for_us(COND, US) \ 332 ({ \ 333 int ret__; \ 334 BUILD_BUG_ON(!__builtin_constant_p(US)); \ 335 if ((US) > 10) \ 336 ret__ = _wait_for((COND), (US), 10, 10); \ 337 else \ 338 ret__ = _wait_for_atomic((COND), (US), 0); \ 339 ret__; \ 340 }) 341 342 #define wait_for_atomic_us(COND, US) \ 343 ({ \ 344 BUILD_BUG_ON(!__builtin_constant_p(US)); \ 345 BUILD_BUG_ON((US) > 50000); \ 346 _wait_for_atomic((COND), (US), 1); \ 347 }) 348 349 #define wait_for_atomic(COND, MS) wait_for_atomic_us((COND), (MS) * 1000) 350 351 #define KHz(x) (1000 * (x)) 352 #define MHz(x) KHz(1000 * (x)) 353 354 #define KBps(x) (1000 * (x)) 355 #define MBps(x) KBps(1000 * (x)) 356 #define GBps(x) ((u64)1000 * MBps((x))) 357 358 static inline const char *yesno(bool v) 359 { 360 return v ? "yes" : "no"; 361 } 362 363 static inline const char *onoff(bool v) 364 { 365 return v ? "on" : "off"; 366 } 367 368 static inline const char *enableddisabled(bool v) 369 { 370 return v ? "enabled" : "disabled"; 371 } 372 373 #endif /* !__I915_UTILS_H */ 374