1 /* 2 * Copyright © 2016 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 */ 24 25 #ifndef __I915_UTILS_H 26 #define __I915_UTILS_H 27 28 #include <linux/list.h> 29 #include <linux/overflow.h> 30 #include <linux/sched.h> 31 #include <linux/types.h> 32 #include <linux/workqueue.h> 33 34 #undef WARN_ON 35 /* Many gcc seem to no see through this and fall over :( */ 36 #if 0 37 #define WARN_ON(x) ({ \ 38 bool __i915_warn_cond = (x); \ 39 if (__builtin_constant_p(__i915_warn_cond)) \ 40 BUILD_BUG_ON(__i915_warn_cond); \ 41 WARN(__i915_warn_cond, "WARN_ON(" #x ")"); }) 42 #else 43 #define WARN_ON(x) WARN((x), "%s", "WARN_ON(" __stringify(x) ")") 44 #endif 45 46 #undef WARN_ON_ONCE 47 #define WARN_ON_ONCE(x) WARN_ONCE((x), "%s", "WARN_ON_ONCE(" __stringify(x) ")") 48 49 #define MISSING_CASE(x) WARN(1, "Missing case (%s == %ld)\n", \ 50 __stringify(x), (long)(x)) 51 52 #if defined(GCC_VERSION) && GCC_VERSION >= 70000 53 #define add_overflows_t(T, A, B) \ 54 __builtin_add_overflow_p((A), (B), (T)0) 55 #else 56 #define add_overflows_t(T, A, B) ({ \ 57 typeof(A) a = (A); \ 58 typeof(B) b = (B); \ 59 (T)(a + b) < a; \ 60 }) 61 #endif 62 63 #define add_overflows(A, B) \ 64 add_overflows_t(typeof((A) + (B)), (A), (B)) 65 66 #define range_overflows(start, size, max) ({ \ 67 typeof(start) start__ = (start); \ 68 typeof(size) size__ = (size); \ 69 typeof(max) max__ = (max); \ 70 (void)(&start__ == &size__); \ 71 (void)(&start__ == &max__); \ 72 start__ > max__ || size__ > max__ - start__; \ 73 }) 74 75 #define range_overflows_t(type, start, size, max) \ 76 range_overflows((type)(start), (type)(size), (type)(max)) 77 78 /* Note we don't consider signbits :| */ 79 #define overflows_type(x, T) \ 80 (sizeof(x) > sizeof(T) && (x) >> BITS_PER_TYPE(T)) 81 82 static inline bool 83 __check_struct_size(size_t base, size_t arr, size_t count, size_t *size) 84 { 85 size_t sz; 86 87 if (check_mul_overflow(count, arr, &sz)) 88 return false; 89 90 if (check_add_overflow(sz, base, &sz)) 91 return false; 92 93 *size = sz; 94 return true; 95 } 96 97 /** 98 * check_struct_size() - Calculate size of structure with trailing array. 99 * @p: Pointer to the structure. 100 * @member: Name of the array member. 101 * @n: Number of elements in the array. 102 * @sz: Total size of structure and array 103 * 104 * Calculates size of memory needed for structure @p followed by an 105 * array of @n @member elements, like struct_size() but reports 106 * whether it overflowed, and the resultant size in @sz 107 * 108 * Return: false if the calculation overflowed. 109 */ 110 #define check_struct_size(p, member, n, sz) \ 111 likely(__check_struct_size(sizeof(*(p)), \ 112 sizeof(*(p)->member) + __must_be_array((p)->member), \ 113 n, sz)) 114 115 #define ptr_mask_bits(ptr, n) ({ \ 116 unsigned long __v = (unsigned long)(ptr); \ 117 (typeof(ptr))(__v & -BIT(n)); \ 118 }) 119 120 #define ptr_unmask_bits(ptr, n) ((unsigned long)(ptr) & (BIT(n) - 1)) 121 122 #define ptr_unpack_bits(ptr, bits, n) ({ \ 123 unsigned long __v = (unsigned long)(ptr); \ 124 *(bits) = __v & (BIT(n) - 1); \ 125 (typeof(ptr))(__v & -BIT(n)); \ 126 }) 127 128 #define ptr_pack_bits(ptr, bits, n) ({ \ 129 unsigned long __bits = (bits); \ 130 GEM_BUG_ON(__bits & -BIT(n)); \ 131 ((typeof(ptr))((unsigned long)(ptr) | __bits)); \ 132 }) 133 134 #define ptr_count_dec(p_ptr) do { \ 135 typeof(p_ptr) __p = (p_ptr); \ 136 unsigned long __v = (unsigned long)(*__p); \ 137 *__p = (typeof(*p_ptr))(--__v); \ 138 } while (0) 139 140 #define ptr_count_inc(p_ptr) do { \ 141 typeof(p_ptr) __p = (p_ptr); \ 142 unsigned long __v = (unsigned long)(*__p); \ 143 *__p = (typeof(*p_ptr))(++__v); \ 144 } while (0) 145 146 #define page_mask_bits(ptr) ptr_mask_bits(ptr, PAGE_SHIFT) 147 #define page_unmask_bits(ptr) ptr_unmask_bits(ptr, PAGE_SHIFT) 148 #define page_pack_bits(ptr, bits) ptr_pack_bits(ptr, bits, PAGE_SHIFT) 149 #define page_unpack_bits(ptr, bits) ptr_unpack_bits(ptr, bits, PAGE_SHIFT) 150 151 #define struct_member(T, member) (((T *)0)->member) 152 153 #define ptr_offset(ptr, member) offsetof(typeof(*(ptr)), member) 154 155 #define fetch_and_zero(ptr) ({ \ 156 typeof(*ptr) __T = *(ptr); \ 157 *(ptr) = (typeof(*ptr))0; \ 158 __T; \ 159 }) 160 161 /* 162 * container_of_user: Extract the superclass from a pointer to a member. 163 * 164 * Exactly like container_of() with the exception that it plays nicely 165 * with sparse for __user @ptr. 166 */ 167 #define container_of_user(ptr, type, member) ({ \ 168 void __user *__mptr = (void __user *)(ptr); \ 169 BUILD_BUG_ON_MSG(!__same_type(*(ptr), struct_member(type, member)) && \ 170 !__same_type(*(ptr), void), \ 171 "pointer type mismatch in container_of()"); \ 172 ((type __user *)(__mptr - offsetof(type, member))); }) 173 174 /* 175 * check_user_mbz: Check that a user value exists and is zero 176 * 177 * Frequently in our uABI we reserve space for future extensions, and 178 * two ensure that userspace is prepared we enforce that space must 179 * be zero. (Then any future extension can safely assume a default value 180 * of 0.) 181 * 182 * check_user_mbz() combines checking that the user pointer is accessible 183 * and that the contained value is zero. 184 * 185 * Returns: -EFAULT if not accessible, -EINVAL if !zero, or 0 on success. 186 */ 187 #define check_user_mbz(U) ({ \ 188 typeof(*(U)) mbz__; \ 189 get_user(mbz__, (U)) ? -EFAULT : mbz__ ? -EINVAL : 0; \ 190 }) 191 192 static inline u64 ptr_to_u64(const void *ptr) 193 { 194 return (uintptr_t)ptr; 195 } 196 197 #define u64_to_ptr(T, x) ({ \ 198 typecheck(u64, x); \ 199 (T *)(uintptr_t)(x); \ 200 }) 201 202 #define __mask_next_bit(mask) ({ \ 203 int __idx = ffs(mask) - 1; \ 204 mask &= ~BIT(__idx); \ 205 __idx; \ 206 }) 207 208 static inline void __list_del_many(struct list_head *head, 209 struct list_head *first) 210 { 211 first->prev = head; 212 WRITE_ONCE(head->next, first); 213 } 214 215 /* 216 * Wait until the work is finally complete, even if it tries to postpone 217 * by requeueing itself. Note, that if the worker never cancels itself, 218 * we will spin forever. 219 */ 220 static inline void drain_delayed_work(struct delayed_work *dw) 221 { 222 do { 223 while (flush_delayed_work(dw)) 224 ; 225 } while (delayed_work_pending(dw)); 226 } 227 228 static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m) 229 { 230 unsigned long j = msecs_to_jiffies(m); 231 232 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); 233 } 234 235 /* 236 * If you need to wait X milliseconds between events A and B, but event B 237 * doesn't happen exactly after event A, you record the timestamp (jiffies) of 238 * when event A happened, then just before event B you call this function and 239 * pass the timestamp as the first argument, and X as the second argument. 240 */ 241 static inline void 242 wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms) 243 { 244 unsigned long target_jiffies, tmp_jiffies, remaining_jiffies; 245 246 /* 247 * Don't re-read the value of "jiffies" every time since it may change 248 * behind our back and break the math. 249 */ 250 tmp_jiffies = jiffies; 251 target_jiffies = timestamp_jiffies + 252 msecs_to_jiffies_timeout(to_wait_ms); 253 254 if (time_after(target_jiffies, tmp_jiffies)) { 255 remaining_jiffies = target_jiffies - tmp_jiffies; 256 while (remaining_jiffies) 257 remaining_jiffies = 258 schedule_timeout_uninterruptible(remaining_jiffies); 259 } 260 } 261 262 /** 263 * __wait_for - magic wait macro 264 * 265 * Macro to help avoid open coding check/wait/timeout patterns. Note that it's 266 * important that we check the condition again after having timed out, since the 267 * timeout could be due to preemption or similar and we've never had a chance to 268 * check the condition before the timeout. 269 */ 270 #define __wait_for(OP, COND, US, Wmin, Wmax) ({ \ 271 const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (US)); \ 272 long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \ 273 int ret__; \ 274 might_sleep(); \ 275 for (;;) { \ 276 const bool expired__ = ktime_after(ktime_get_raw(), end__); \ 277 OP; \ 278 /* Guarantee COND check prior to timeout */ \ 279 barrier(); \ 280 if (COND) { \ 281 ret__ = 0; \ 282 break; \ 283 } \ 284 if (expired__) { \ 285 ret__ = -ETIMEDOUT; \ 286 break; \ 287 } \ 288 usleep_range(wait__, wait__ * 2); \ 289 if (wait__ < (Wmax)) \ 290 wait__ <<= 1; \ 291 } \ 292 ret__; \ 293 }) 294 295 #define _wait_for(COND, US, Wmin, Wmax) __wait_for(, (COND), (US), (Wmin), \ 296 (Wmax)) 297 #define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 10, 1000) 298 299 /* If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false. */ 300 #if defined(CONFIG_DRM_I915_DEBUG) && defined(CONFIG_PREEMPT_COUNT) 301 # define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) WARN_ON_ONCE((ATOMIC) && !in_atomic()) 302 #else 303 # define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) do { } while (0) 304 #endif 305 306 #define _wait_for_atomic(COND, US, ATOMIC) \ 307 ({ \ 308 int cpu, ret, timeout = (US) * 1000; \ 309 u64 base; \ 310 _WAIT_FOR_ATOMIC_CHECK(ATOMIC); \ 311 if (!(ATOMIC)) { \ 312 preempt_disable(); \ 313 cpu = smp_processor_id(); \ 314 } \ 315 base = local_clock(); \ 316 for (;;) { \ 317 u64 now = local_clock(); \ 318 if (!(ATOMIC)) \ 319 preempt_enable(); \ 320 /* Guarantee COND check prior to timeout */ \ 321 barrier(); \ 322 if (COND) { \ 323 ret = 0; \ 324 break; \ 325 } \ 326 if (now - base >= timeout) { \ 327 ret = -ETIMEDOUT; \ 328 break; \ 329 } \ 330 cpu_relax(); \ 331 if (!(ATOMIC)) { \ 332 preempt_disable(); \ 333 if (unlikely(cpu != smp_processor_id())) { \ 334 timeout -= now - base; \ 335 cpu = smp_processor_id(); \ 336 base = local_clock(); \ 337 } \ 338 } \ 339 } \ 340 ret; \ 341 }) 342 343 #define wait_for_us(COND, US) \ 344 ({ \ 345 int ret__; \ 346 BUILD_BUG_ON(!__builtin_constant_p(US)); \ 347 if ((US) > 10) \ 348 ret__ = _wait_for((COND), (US), 10, 10); \ 349 else \ 350 ret__ = _wait_for_atomic((COND), (US), 0); \ 351 ret__; \ 352 }) 353 354 #define wait_for_atomic_us(COND, US) \ 355 ({ \ 356 BUILD_BUG_ON(!__builtin_constant_p(US)); \ 357 BUILD_BUG_ON((US) > 50000); \ 358 _wait_for_atomic((COND), (US), 1); \ 359 }) 360 361 #define wait_for_atomic(COND, MS) wait_for_atomic_us((COND), (MS) * 1000) 362 363 #define KHz(x) (1000 * (x)) 364 #define MHz(x) KHz(1000 * (x)) 365 366 #define KBps(x) (1000 * (x)) 367 #define MBps(x) KBps(1000 * (x)) 368 #define GBps(x) ((u64)1000 * MBps((x))) 369 370 static inline const char *yesno(bool v) 371 { 372 return v ? "yes" : "no"; 373 } 374 375 static inline const char *onoff(bool v) 376 { 377 return v ? "on" : "off"; 378 } 379 380 static inline const char *enableddisabled(bool v) 381 { 382 return v ? "enabled" : "disabled"; 383 } 384 385 #endif /* !__I915_UTILS_H */ 386