1 /* 2 * Copyright © 2016 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 */ 24 25 #ifndef __I915_UTILS_H 26 #define __I915_UTILS_H 27 28 #include <linux/list.h> 29 #include <linux/overflow.h> 30 #include <linux/sched.h> 31 #include <linux/types.h> 32 #include <linux/workqueue.h> 33 34 #undef WARN_ON 35 /* Many gcc seem to no see through this and fall over :( */ 36 #if 0 37 #define WARN_ON(x) ({ \ 38 bool __i915_warn_cond = (x); \ 39 if (__builtin_constant_p(__i915_warn_cond)) \ 40 BUILD_BUG_ON(__i915_warn_cond); \ 41 WARN(__i915_warn_cond, "WARN_ON(" #x ")"); }) 42 #else 43 #define WARN_ON(x) WARN((x), "%s", "WARN_ON(" __stringify(x) ")") 44 #endif 45 46 #undef WARN_ON_ONCE 47 #define WARN_ON_ONCE(x) WARN_ONCE((x), "%s", "WARN_ON_ONCE(" __stringify(x) ")") 48 49 #define MISSING_CASE(x) WARN(1, "Missing case (%s == %ld)\n", \ 50 __stringify(x), (long)(x)) 51 52 #if defined(GCC_VERSION) && GCC_VERSION >= 70000 53 #define add_overflows_t(T, A, B) \ 54 __builtin_add_overflow_p((A), (B), (T)0) 55 #else 56 #define add_overflows_t(T, A, B) ({ \ 57 typeof(A) a = (A); \ 58 typeof(B) b = (B); \ 59 (T)(a + b) < a; \ 60 }) 61 #endif 62 63 #define add_overflows(A, B) \ 64 add_overflows_t(typeof((A) + (B)), (A), (B)) 65 66 #define range_overflows(start, size, max) ({ \ 67 typeof(start) start__ = (start); \ 68 typeof(size) size__ = (size); \ 69 typeof(max) max__ = (max); \ 70 (void)(&start__ == &size__); \ 71 (void)(&start__ == &max__); \ 72 start__ > max__ || size__ > max__ - start__; \ 73 }) 74 75 #define range_overflows_t(type, start, size, max) \ 76 range_overflows((type)(start), (type)(size), (type)(max)) 77 78 /* Note we don't consider signbits :| */ 79 #define overflows_type(x, T) \ 80 (sizeof(x) > sizeof(T) && (x) >> BITS_PER_TYPE(T)) 81 82 static inline bool 83 __check_struct_size(size_t base, size_t arr, size_t count, size_t *size) 84 { 85 size_t sz; 86 87 if (check_mul_overflow(count, arr, &sz)) 88 return false; 89 90 if (check_add_overflow(sz, base, &sz)) 91 return false; 92 93 *size = sz; 94 return true; 95 } 96 97 /** 98 * check_struct_size() - Calculate size of structure with trailing array. 99 * @p: Pointer to the structure. 100 * @member: Name of the array member. 101 * @n: Number of elements in the array. 102 * @sz: Total size of structure and array 103 * 104 * Calculates size of memory needed for structure @p followed by an 105 * array of @n @member elements, like struct_size() but reports 106 * whether it overflowed, and the resultant size in @sz 107 * 108 * Return: false if the calculation overflowed. 109 */ 110 #define check_struct_size(p, member, n, sz) \ 111 likely(__check_struct_size(sizeof(*(p)), \ 112 sizeof(*(p)->member) + __must_be_array((p)->member), \ 113 n, sz)) 114 115 #define ptr_mask_bits(ptr, n) ({ \ 116 unsigned long __v = (unsigned long)(ptr); \ 117 (typeof(ptr))(__v & -BIT(n)); \ 118 }) 119 120 #define ptr_unmask_bits(ptr, n) ((unsigned long)(ptr) & (BIT(n) - 1)) 121 122 #define ptr_unpack_bits(ptr, bits, n) ({ \ 123 unsigned long __v = (unsigned long)(ptr); \ 124 *(bits) = __v & (BIT(n) - 1); \ 125 (typeof(ptr))(__v & -BIT(n)); \ 126 }) 127 128 #define ptr_pack_bits(ptr, bits, n) ({ \ 129 unsigned long __bits = (bits); \ 130 GEM_BUG_ON(__bits & -BIT(n)); \ 131 ((typeof(ptr))((unsigned long)(ptr) | __bits)); \ 132 }) 133 134 #define page_mask_bits(ptr) ptr_mask_bits(ptr, PAGE_SHIFT) 135 #define page_unmask_bits(ptr) ptr_unmask_bits(ptr, PAGE_SHIFT) 136 #define page_pack_bits(ptr, bits) ptr_pack_bits(ptr, bits, PAGE_SHIFT) 137 #define page_unpack_bits(ptr, bits) ptr_unpack_bits(ptr, bits, PAGE_SHIFT) 138 139 #define struct_member(T, member) (((T *)0)->member) 140 141 #define ptr_offset(ptr, member) offsetof(typeof(*(ptr)), member) 142 143 #define fetch_and_zero(ptr) ({ \ 144 typeof(*ptr) __T = *(ptr); \ 145 *(ptr) = (typeof(*ptr))0; \ 146 __T; \ 147 }) 148 149 /* 150 * container_of_user: Extract the superclass from a pointer to a member. 151 * 152 * Exactly like container_of() with the exception that it plays nicely 153 * with sparse for __user @ptr. 154 */ 155 #define container_of_user(ptr, type, member) ({ \ 156 void __user *__mptr = (void __user *)(ptr); \ 157 BUILD_BUG_ON_MSG(!__same_type(*(ptr), struct_member(type, member)) && \ 158 !__same_type(*(ptr), void), \ 159 "pointer type mismatch in container_of()"); \ 160 ((type __user *)(__mptr - offsetof(type, member))); }) 161 162 /* 163 * check_user_mbz: Check that a user value exists and is zero 164 * 165 * Frequently in our uABI we reserve space for future extensions, and 166 * two ensure that userspace is prepared we enforce that space must 167 * be zero. (Then any future extension can safely assume a default value 168 * of 0.) 169 * 170 * check_user_mbz() combines checking that the user pointer is accessible 171 * and that the contained value is zero. 172 * 173 * Returns: -EFAULT if not accessible, -EINVAL if !zero, or 0 on success. 174 */ 175 #define check_user_mbz(U) ({ \ 176 typeof(*(U)) mbz__; \ 177 get_user(mbz__, (U)) ? -EFAULT : mbz__ ? -EINVAL : 0; \ 178 }) 179 180 static inline u64 ptr_to_u64(const void *ptr) 181 { 182 return (uintptr_t)ptr; 183 } 184 185 #define u64_to_ptr(T, x) ({ \ 186 typecheck(u64, x); \ 187 (T *)(uintptr_t)(x); \ 188 }) 189 190 #define __mask_next_bit(mask) ({ \ 191 int __idx = ffs(mask) - 1; \ 192 mask &= ~BIT(__idx); \ 193 __idx; \ 194 }) 195 196 static inline void __list_del_many(struct list_head *head, 197 struct list_head *first) 198 { 199 first->prev = head; 200 WRITE_ONCE(head->next, first); 201 } 202 203 /* 204 * Wait until the work is finally complete, even if it tries to postpone 205 * by requeueing itself. Note, that if the worker never cancels itself, 206 * we will spin forever. 207 */ 208 static inline void drain_delayed_work(struct delayed_work *dw) 209 { 210 do { 211 while (flush_delayed_work(dw)) 212 ; 213 } while (delayed_work_pending(dw)); 214 } 215 216 static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m) 217 { 218 unsigned long j = msecs_to_jiffies(m); 219 220 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); 221 } 222 223 static inline unsigned long nsecs_to_jiffies_timeout(const u64 n) 224 { 225 /* nsecs_to_jiffies64() does not guard against overflow */ 226 if (NSEC_PER_SEC % HZ && 227 div_u64(n, NSEC_PER_SEC) >= MAX_JIFFY_OFFSET / HZ) 228 return MAX_JIFFY_OFFSET; 229 230 return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1); 231 } 232 233 /* 234 * If you need to wait X milliseconds between events A and B, but event B 235 * doesn't happen exactly after event A, you record the timestamp (jiffies) of 236 * when event A happened, then just before event B you call this function and 237 * pass the timestamp as the first argument, and X as the second argument. 238 */ 239 static inline void 240 wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms) 241 { 242 unsigned long target_jiffies, tmp_jiffies, remaining_jiffies; 243 244 /* 245 * Don't re-read the value of "jiffies" every time since it may change 246 * behind our back and break the math. 247 */ 248 tmp_jiffies = jiffies; 249 target_jiffies = timestamp_jiffies + 250 msecs_to_jiffies_timeout(to_wait_ms); 251 252 if (time_after(target_jiffies, tmp_jiffies)) { 253 remaining_jiffies = target_jiffies - tmp_jiffies; 254 while (remaining_jiffies) 255 remaining_jiffies = 256 schedule_timeout_uninterruptible(remaining_jiffies); 257 } 258 } 259 260 /** 261 * __wait_for - magic wait macro 262 * 263 * Macro to help avoid open coding check/wait/timeout patterns. Note that it's 264 * important that we check the condition again after having timed out, since the 265 * timeout could be due to preemption or similar and we've never had a chance to 266 * check the condition before the timeout. 267 */ 268 #define __wait_for(OP, COND, US, Wmin, Wmax) ({ \ 269 const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (US)); \ 270 long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \ 271 int ret__; \ 272 might_sleep(); \ 273 for (;;) { \ 274 const bool expired__ = ktime_after(ktime_get_raw(), end__); \ 275 OP; \ 276 /* Guarantee COND check prior to timeout */ \ 277 barrier(); \ 278 if (COND) { \ 279 ret__ = 0; \ 280 break; \ 281 } \ 282 if (expired__) { \ 283 ret__ = -ETIMEDOUT; \ 284 break; \ 285 } \ 286 usleep_range(wait__, wait__ * 2); \ 287 if (wait__ < (Wmax)) \ 288 wait__ <<= 1; \ 289 } \ 290 ret__; \ 291 }) 292 293 #define _wait_for(COND, US, Wmin, Wmax) __wait_for(, (COND), (US), (Wmin), \ 294 (Wmax)) 295 #define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 10, 1000) 296 297 /* If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false. */ 298 #if defined(CONFIG_DRM_I915_DEBUG) && defined(CONFIG_PREEMPT_COUNT) 299 # define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) WARN_ON_ONCE((ATOMIC) && !in_atomic()) 300 #else 301 # define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) do { } while (0) 302 #endif 303 304 #define _wait_for_atomic(COND, US, ATOMIC) \ 305 ({ \ 306 int cpu, ret, timeout = (US) * 1000; \ 307 u64 base; \ 308 _WAIT_FOR_ATOMIC_CHECK(ATOMIC); \ 309 if (!(ATOMIC)) { \ 310 preempt_disable(); \ 311 cpu = smp_processor_id(); \ 312 } \ 313 base = local_clock(); \ 314 for (;;) { \ 315 u64 now = local_clock(); \ 316 if (!(ATOMIC)) \ 317 preempt_enable(); \ 318 /* Guarantee COND check prior to timeout */ \ 319 barrier(); \ 320 if (COND) { \ 321 ret = 0; \ 322 break; \ 323 } \ 324 if (now - base >= timeout) { \ 325 ret = -ETIMEDOUT; \ 326 break; \ 327 } \ 328 cpu_relax(); \ 329 if (!(ATOMIC)) { \ 330 preempt_disable(); \ 331 if (unlikely(cpu != smp_processor_id())) { \ 332 timeout -= now - base; \ 333 cpu = smp_processor_id(); \ 334 base = local_clock(); \ 335 } \ 336 } \ 337 } \ 338 ret; \ 339 }) 340 341 #define wait_for_us(COND, US) \ 342 ({ \ 343 int ret__; \ 344 BUILD_BUG_ON(!__builtin_constant_p(US)); \ 345 if ((US) > 10) \ 346 ret__ = _wait_for((COND), (US), 10, 10); \ 347 else \ 348 ret__ = _wait_for_atomic((COND), (US), 0); \ 349 ret__; \ 350 }) 351 352 #define wait_for_atomic_us(COND, US) \ 353 ({ \ 354 BUILD_BUG_ON(!__builtin_constant_p(US)); \ 355 BUILD_BUG_ON((US) > 50000); \ 356 _wait_for_atomic((COND), (US), 1); \ 357 }) 358 359 #define wait_for_atomic(COND, MS) wait_for_atomic_us((COND), (MS) * 1000) 360 361 #define KHz(x) (1000 * (x)) 362 #define MHz(x) KHz(1000 * (x)) 363 364 #define KBps(x) (1000 * (x)) 365 #define MBps(x) KBps(1000 * (x)) 366 #define GBps(x) ((u64)1000 * MBps((x))) 367 368 static inline const char *yesno(bool v) 369 { 370 return v ? "yes" : "no"; 371 } 372 373 static inline const char *onoff(bool v) 374 { 375 return v ? "on" : "off"; 376 } 377 378 static inline const char *enableddisabled(bool v) 379 { 380 return v ? "enabled" : "disabled"; 381 } 382 383 #endif /* !__I915_UTILS_H */ 384