1 2 #ifndef _BCACHE_UTIL_H 3 #define _BCACHE_UTIL_H 4 5 #include <linux/errno.h> 6 #include <linux/kernel.h> 7 #include <linux/llist.h> 8 #include <linux/ratelimit.h> 9 #include <linux/vmalloc.h> 10 #include <linux/workqueue.h> 11 12 #include "closure.h" 13 14 #define PAGE_SECTORS (PAGE_SIZE / 512) 15 16 struct closure; 17 18 #ifdef CONFIG_BCACHE_DEBUG 19 20 #define atomic_dec_bug(v) BUG_ON(atomic_dec_return(v) < 0) 21 #define atomic_inc_bug(v, i) BUG_ON(atomic_inc_return(v) <= i) 22 23 #else /* DEBUG */ 24 25 #define atomic_dec_bug(v) atomic_dec(v) 26 #define atomic_inc_bug(v, i) atomic_inc(v) 27 28 #endif 29 30 #define DECLARE_HEAP(type, name) \ 31 struct { \ 32 size_t size, used; \ 33 type *data; \ 34 } name 35 36 #define init_heap(heap, _size, gfp) \ 37 ({ \ 38 size_t _bytes; \ 39 (heap)->used = 0; \ 40 (heap)->size = (_size); \ 41 _bytes = (heap)->size * sizeof(*(heap)->data); \ 42 (heap)->data = NULL; \ 43 if (_bytes < KMALLOC_MAX_SIZE) \ 44 (heap)->data = kmalloc(_bytes, (gfp)); \ 45 if ((!(heap)->data) && ((gfp) & GFP_KERNEL)) \ 46 (heap)->data = vmalloc(_bytes); \ 47 (heap)->data; \ 48 }) 49 50 #define free_heap(heap) \ 51 do { \ 52 if (is_vmalloc_addr((heap)->data)) \ 53 vfree((heap)->data); \ 54 else \ 55 kfree((heap)->data); \ 56 (heap)->data = NULL; \ 57 } while (0) 58 59 #define heap_swap(h, i, j) swap((h)->data[i], (h)->data[j]) 60 61 #define heap_sift(h, i, cmp) \ 62 do { \ 63 size_t _r, _j = i; \ 64 \ 65 for (; _j * 2 + 1 < (h)->used; _j = _r) { \ 66 _r = _j * 2 + 1; \ 67 if (_r + 1 < (h)->used && \ 68 cmp((h)->data[_r], (h)->data[_r + 1])) \ 69 _r++; \ 70 \ 71 if (cmp((h)->data[_r], (h)->data[_j])) \ 72 break; \ 73 heap_swap(h, _r, _j); \ 74 } \ 75 } while (0) 76 77 #define heap_sift_down(h, i, cmp) \ 78 do { \ 79 while (i) { \ 80 size_t p = (i - 1) / 2; \ 81 if (cmp((h)->data[i], (h)->data[p])) \ 82 break; \ 83 heap_swap(h, i, p); \ 84 i = p; \ 85 } \ 86 } while (0) 87 88 #define heap_add(h, d, cmp) \ 89 ({ \ 90 bool _r = !heap_full(h); \ 91 if (_r) { \ 92 size_t _i = (h)->used++; \ 93 (h)->data[_i] = d; \ 94 \ 95 heap_sift_down(h, _i, cmp); \ 96 heap_sift(h, _i, cmp); \ 97 } \ 98 _r; \ 99 }) 100 101 #define heap_pop(h, d, cmp) \ 102 ({ \ 103 bool _r = (h)->used; \ 104 if (_r) { \ 105 (d) = (h)->data[0]; \ 106 (h)->used--; \ 107 heap_swap(h, 0, (h)->used); \ 108 heap_sift(h, 0, cmp); \ 109 } \ 110 _r; \ 111 }) 112 113 #define heap_peek(h) ((h)->size ? (h)->data[0] : NULL) 114 115 #define heap_full(h) ((h)->used == (h)->size) 116 117 #define DECLARE_FIFO(type, name) \ 118 struct { \ 119 size_t front, back, size, mask; \ 120 type *data; \ 121 } name 122 123 #define fifo_for_each(c, fifo, iter) \ 124 for (iter = (fifo)->front; \ 125 c = (fifo)->data[iter], iter != (fifo)->back; \ 126 iter = (iter + 1) & (fifo)->mask) 127 128 #define __init_fifo(fifo, gfp) \ 129 ({ \ 130 size_t _allocated_size, _bytes; \ 131 BUG_ON(!(fifo)->size); \ 132 \ 133 _allocated_size = roundup_pow_of_two((fifo)->size + 1); \ 134 _bytes = _allocated_size * sizeof(*(fifo)->data); \ 135 \ 136 (fifo)->mask = _allocated_size - 1; \ 137 (fifo)->front = (fifo)->back = 0; \ 138 (fifo)->data = NULL; \ 139 \ 140 if (_bytes < KMALLOC_MAX_SIZE) \ 141 (fifo)->data = kmalloc(_bytes, (gfp)); \ 142 if ((!(fifo)->data) && ((gfp) & GFP_KERNEL)) \ 143 (fifo)->data = vmalloc(_bytes); \ 144 (fifo)->data; \ 145 }) 146 147 #define init_fifo_exact(fifo, _size, gfp) \ 148 ({ \ 149 (fifo)->size = (_size); \ 150 __init_fifo(fifo, gfp); \ 151 }) 152 153 #define init_fifo(fifo, _size, gfp) \ 154 ({ \ 155 (fifo)->size = (_size); \ 156 if ((fifo)->size > 4) \ 157 (fifo)->size = roundup_pow_of_two((fifo)->size) - 1; \ 158 __init_fifo(fifo, gfp); \ 159 }) 160 161 #define free_fifo(fifo) \ 162 do { \ 163 if (is_vmalloc_addr((fifo)->data)) \ 164 vfree((fifo)->data); \ 165 else \ 166 kfree((fifo)->data); \ 167 (fifo)->data = NULL; \ 168 } while (0) 169 170 #define fifo_used(fifo) (((fifo)->back - (fifo)->front) & (fifo)->mask) 171 #define fifo_free(fifo) ((fifo)->size - fifo_used(fifo)) 172 173 #define fifo_empty(fifo) (!fifo_used(fifo)) 174 #define fifo_full(fifo) (!fifo_free(fifo)) 175 176 #define fifo_front(fifo) ((fifo)->data[(fifo)->front]) 177 #define fifo_back(fifo) \ 178 ((fifo)->data[((fifo)->back - 1) & (fifo)->mask]) 179 180 #define fifo_idx(fifo, p) (((p) - &fifo_front(fifo)) & (fifo)->mask) 181 182 #define fifo_push_back(fifo, i) \ 183 ({ \ 184 bool _r = !fifo_full((fifo)); \ 185 if (_r) { \ 186 (fifo)->data[(fifo)->back++] = (i); \ 187 (fifo)->back &= (fifo)->mask; \ 188 } \ 189 _r; \ 190 }) 191 192 #define fifo_pop_front(fifo, i) \ 193 ({ \ 194 bool _r = !fifo_empty((fifo)); \ 195 if (_r) { \ 196 (i) = (fifo)->data[(fifo)->front++]; \ 197 (fifo)->front &= (fifo)->mask; \ 198 } \ 199 _r; \ 200 }) 201 202 #define fifo_push_front(fifo, i) \ 203 ({ \ 204 bool _r = !fifo_full((fifo)); \ 205 if (_r) { \ 206 --(fifo)->front; \ 207 (fifo)->front &= (fifo)->mask; \ 208 (fifo)->data[(fifo)->front] = (i); \ 209 } \ 210 _r; \ 211 }) 212 213 #define fifo_pop_back(fifo, i) \ 214 ({ \ 215 bool _r = !fifo_empty((fifo)); \ 216 if (_r) { \ 217 --(fifo)->back; \ 218 (fifo)->back &= (fifo)->mask; \ 219 (i) = (fifo)->data[(fifo)->back] \ 220 } \ 221 _r; \ 222 }) 223 224 #define fifo_push(fifo, i) fifo_push_back(fifo, (i)) 225 #define fifo_pop(fifo, i) fifo_pop_front(fifo, (i)) 226 227 #define fifo_swap(l, r) \ 228 do { \ 229 swap((l)->front, (r)->front); \ 230 swap((l)->back, (r)->back); \ 231 swap((l)->size, (r)->size); \ 232 swap((l)->mask, (r)->mask); \ 233 swap((l)->data, (r)->data); \ 234 } while (0) 235 236 #define fifo_move(dest, src) \ 237 do { \ 238 typeof(*((dest)->data)) _t; \ 239 while (!fifo_full(dest) && \ 240 fifo_pop(src, _t)) \ 241 fifo_push(dest, _t); \ 242 } while (0) 243 244 /* 245 * Simple array based allocator - preallocates a number of elements and you can 246 * never allocate more than that, also has no locking. 247 * 248 * Handy because if you know you only need a fixed number of elements you don't 249 * have to worry about memory allocation failure, and sometimes a mempool isn't 250 * what you want. 251 * 252 * We treat the free elements as entries in a singly linked list, and the 253 * freelist as a stack - allocating and freeing push and pop off the freelist. 254 */ 255 256 #define DECLARE_ARRAY_ALLOCATOR(type, name, size) \ 257 struct { \ 258 type *freelist; \ 259 type data[size]; \ 260 } name 261 262 #define array_alloc(array) \ 263 ({ \ 264 typeof((array)->freelist) _ret = (array)->freelist; \ 265 \ 266 if (_ret) \ 267 (array)->freelist = *((typeof((array)->freelist) *) _ret);\ 268 \ 269 _ret; \ 270 }) 271 272 #define array_free(array, ptr) \ 273 do { \ 274 typeof((array)->freelist) _ptr = ptr; \ 275 \ 276 *((typeof((array)->freelist) *) _ptr) = (array)->freelist; \ 277 (array)->freelist = _ptr; \ 278 } while (0) 279 280 #define array_allocator_init(array) \ 281 do { \ 282 typeof((array)->freelist) _i; \ 283 \ 284 BUILD_BUG_ON(sizeof((array)->data[0]) < sizeof(void *)); \ 285 (array)->freelist = NULL; \ 286 \ 287 for (_i = (array)->data; \ 288 _i < (array)->data + ARRAY_SIZE((array)->data); \ 289 _i++) \ 290 array_free(array, _i); \ 291 } while (0) 292 293 #define array_freelist_empty(array) ((array)->freelist == NULL) 294 295 #define ANYSINT_MAX(t) \ 296 ((((t) 1 << (sizeof(t) * 8 - 2)) - (t) 1) * (t) 2 + (t) 1) 297 298 int bch_strtoint_h(const char *, int *); 299 int bch_strtouint_h(const char *, unsigned int *); 300 int bch_strtoll_h(const char *, long long *); 301 int bch_strtoull_h(const char *, unsigned long long *); 302 303 static inline int bch_strtol_h(const char *cp, long *res) 304 { 305 #if BITS_PER_LONG == 32 306 return bch_strtoint_h(cp, (int *) res); 307 #else 308 return bch_strtoll_h(cp, (long long *) res); 309 #endif 310 } 311 312 static inline int bch_strtoul_h(const char *cp, long *res) 313 { 314 #if BITS_PER_LONG == 32 315 return bch_strtouint_h(cp, (unsigned int *) res); 316 #else 317 return bch_strtoull_h(cp, (unsigned long long *) res); 318 #endif 319 } 320 321 #define strtoi_h(cp, res) \ 322 (__builtin_types_compatible_p(typeof(*res), int) \ 323 ? bch_strtoint_h(cp, (void *) res) \ 324 : __builtin_types_compatible_p(typeof(*res), long) \ 325 ? bch_strtol_h(cp, (void *) res) \ 326 : __builtin_types_compatible_p(typeof(*res), long long) \ 327 ? bch_strtoll_h(cp, (void *) res) \ 328 : __builtin_types_compatible_p(typeof(*res), unsigned int) \ 329 ? bch_strtouint_h(cp, (void *) res) \ 330 : __builtin_types_compatible_p(typeof(*res), unsigned long) \ 331 ? bch_strtoul_h(cp, (void *) res) \ 332 : __builtin_types_compatible_p(typeof(*res), unsigned long long)\ 333 ? bch_strtoull_h(cp, (void *) res) : -EINVAL) 334 335 #define strtoul_safe(cp, var) \ 336 ({ \ 337 unsigned long _v; \ 338 int _r = kstrtoul(cp, 10, &_v); \ 339 if (!_r) \ 340 var = _v; \ 341 _r; \ 342 }) 343 344 #define strtoul_safe_clamp(cp, var, min, max) \ 345 ({ \ 346 unsigned long _v; \ 347 int _r = kstrtoul(cp, 10, &_v); \ 348 if (!_r) \ 349 var = clamp_t(typeof(var), _v, min, max); \ 350 _r; \ 351 }) 352 353 #define snprint(buf, size, var) \ 354 snprintf(buf, size, \ 355 __builtin_types_compatible_p(typeof(var), int) \ 356 ? "%i\n" : \ 357 __builtin_types_compatible_p(typeof(var), unsigned) \ 358 ? "%u\n" : \ 359 __builtin_types_compatible_p(typeof(var), long) \ 360 ? "%li\n" : \ 361 __builtin_types_compatible_p(typeof(var), unsigned long)\ 362 ? "%lu\n" : \ 363 __builtin_types_compatible_p(typeof(var), int64_t) \ 364 ? "%lli\n" : \ 365 __builtin_types_compatible_p(typeof(var), uint64_t) \ 366 ? "%llu\n" : \ 367 __builtin_types_compatible_p(typeof(var), const char *) \ 368 ? "%s\n" : "%i\n", var) 369 370 ssize_t bch_hprint(char *buf, int64_t v); 371 372 bool bch_is_zero(const char *p, size_t n); 373 int bch_parse_uuid(const char *s, char *uuid); 374 375 ssize_t bch_snprint_string_list(char *buf, size_t size, const char * const list[], 376 size_t selected); 377 378 ssize_t bch_read_string_list(const char *buf, const char * const list[]); 379 380 struct time_stats { 381 spinlock_t lock; 382 /* 383 * all fields are in nanoseconds, averages are ewmas stored left shifted 384 * by 8 385 */ 386 uint64_t max_duration; 387 uint64_t average_duration; 388 uint64_t average_frequency; 389 uint64_t last; 390 }; 391 392 void bch_time_stats_update(struct time_stats *stats, uint64_t time); 393 394 #define NSEC_PER_ns 1L 395 #define NSEC_PER_us NSEC_PER_USEC 396 #define NSEC_PER_ms NSEC_PER_MSEC 397 #define NSEC_PER_sec NSEC_PER_SEC 398 399 #define __print_time_stat(stats, name, stat, units) \ 400 sysfs_print(name ## _ ## stat ## _ ## units, \ 401 div_u64((stats)->stat >> 8, NSEC_PER_ ## units)) 402 403 #define sysfs_print_time_stats(stats, name, \ 404 frequency_units, \ 405 duration_units) \ 406 do { \ 407 __print_time_stat(stats, name, \ 408 average_frequency, frequency_units); \ 409 __print_time_stat(stats, name, \ 410 average_duration, duration_units); \ 411 __print_time_stat(stats, name, \ 412 max_duration, duration_units); \ 413 \ 414 sysfs_print(name ## _last_ ## frequency_units, (stats)->last \ 415 ? div_s64(local_clock() - (stats)->last, \ 416 NSEC_PER_ ## frequency_units) \ 417 : -1LL); \ 418 } while (0) 419 420 #define sysfs_time_stats_attribute(name, \ 421 frequency_units, \ 422 duration_units) \ 423 read_attribute(name ## _average_frequency_ ## frequency_units); \ 424 read_attribute(name ## _average_duration_ ## duration_units); \ 425 read_attribute(name ## _max_duration_ ## duration_units); \ 426 read_attribute(name ## _last_ ## frequency_units) 427 428 #define sysfs_time_stats_attribute_list(name, \ 429 frequency_units, \ 430 duration_units) \ 431 &sysfs_ ## name ## _average_frequency_ ## frequency_units, \ 432 &sysfs_ ## name ## _average_duration_ ## duration_units, \ 433 &sysfs_ ## name ## _max_duration_ ## duration_units, \ 434 &sysfs_ ## name ## _last_ ## frequency_units, 435 436 #define ewma_add(ewma, val, weight, factor) \ 437 ({ \ 438 (ewma) *= (weight) - 1; \ 439 (ewma) += (val) << factor; \ 440 (ewma) /= (weight); \ 441 (ewma) >> factor; \ 442 }) 443 444 struct bch_ratelimit { 445 /* Next time we want to do some work, in nanoseconds */ 446 uint64_t next; 447 448 /* 449 * Rate at which we want to do work, in units per nanosecond 450 * The units here correspond to the units passed to bch_next_delay() 451 */ 452 unsigned rate; 453 }; 454 455 static inline void bch_ratelimit_reset(struct bch_ratelimit *d) 456 { 457 d->next = local_clock(); 458 } 459 460 uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done); 461 462 #define __DIV_SAFE(n, d, zero) \ 463 ({ \ 464 typeof(n) _n = (n); \ 465 typeof(d) _d = (d); \ 466 _d ? _n / _d : zero; \ 467 }) 468 469 #define DIV_SAFE(n, d) __DIV_SAFE(n, d, 0) 470 471 #define container_of_or_null(ptr, type, member) \ 472 ({ \ 473 typeof(ptr) _ptr = ptr; \ 474 _ptr ? container_of(_ptr, type, member) : NULL; \ 475 }) 476 477 #define RB_INSERT(root, new, member, cmp) \ 478 ({ \ 479 __label__ dup; \ 480 struct rb_node **n = &(root)->rb_node, *parent = NULL; \ 481 typeof(new) this; \ 482 int res, ret = -1; \ 483 \ 484 while (*n) { \ 485 parent = *n; \ 486 this = container_of(*n, typeof(*(new)), member); \ 487 res = cmp(new, this); \ 488 if (!res) \ 489 goto dup; \ 490 n = res < 0 \ 491 ? &(*n)->rb_left \ 492 : &(*n)->rb_right; \ 493 } \ 494 \ 495 rb_link_node(&(new)->member, parent, n); \ 496 rb_insert_color(&(new)->member, root); \ 497 ret = 0; \ 498 dup: \ 499 ret; \ 500 }) 501 502 #define RB_SEARCH(root, search, member, cmp) \ 503 ({ \ 504 struct rb_node *n = (root)->rb_node; \ 505 typeof(&(search)) this, ret = NULL; \ 506 int res; \ 507 \ 508 while (n) { \ 509 this = container_of(n, typeof(search), member); \ 510 res = cmp(&(search), this); \ 511 if (!res) { \ 512 ret = this; \ 513 break; \ 514 } \ 515 n = res < 0 \ 516 ? n->rb_left \ 517 : n->rb_right; \ 518 } \ 519 ret; \ 520 }) 521 522 #define RB_GREATER(root, search, member, cmp) \ 523 ({ \ 524 struct rb_node *n = (root)->rb_node; \ 525 typeof(&(search)) this, ret = NULL; \ 526 int res; \ 527 \ 528 while (n) { \ 529 this = container_of(n, typeof(search), member); \ 530 res = cmp(&(search), this); \ 531 if (res < 0) { \ 532 ret = this; \ 533 n = n->rb_left; \ 534 } else \ 535 n = n->rb_right; \ 536 } \ 537 ret; \ 538 }) 539 540 #define RB_FIRST(root, type, member) \ 541 container_of_or_null(rb_first(root), type, member) 542 543 #define RB_LAST(root, type, member) \ 544 container_of_or_null(rb_last(root), type, member) 545 546 #define RB_NEXT(ptr, member) \ 547 container_of_or_null(rb_next(&(ptr)->member), typeof(*ptr), member) 548 549 #define RB_PREV(ptr, member) \ 550 container_of_or_null(rb_prev(&(ptr)->member), typeof(*ptr), member) 551 552 /* Does linear interpolation between powers of two */ 553 static inline unsigned fract_exp_two(unsigned x, unsigned fract_bits) 554 { 555 unsigned fract = x & ~(~0 << fract_bits); 556 557 x >>= fract_bits; 558 x = 1 << x; 559 x += (x * fract) >> fract_bits; 560 561 return x; 562 } 563 564 void bch_bio_map(struct bio *bio, void *base); 565 566 static inline sector_t bdev_sectors(struct block_device *bdev) 567 { 568 return bdev->bd_inode->i_size >> 9; 569 } 570 571 #define closure_bio_submit(bio, cl, dev) \ 572 do { \ 573 closure_get(cl); \ 574 bch_generic_make_request(bio, &(dev)->bio_split_hook); \ 575 } while (0) 576 577 uint64_t bch_crc64_update(uint64_t, const void *, size_t); 578 uint64_t bch_crc64(const void *, size_t); 579 580 #endif /* _BCACHE_UTIL_H */ 581