1 2 #ifndef _BCACHE_UTIL_H 3 #define _BCACHE_UTIL_H 4 5 #include <linux/errno.h> 6 #include <linux/kernel.h> 7 #include <linux/llist.h> 8 #include <linux/ratelimit.h> 9 #include <linux/vmalloc.h> 10 #include <linux/workqueue.h> 11 12 #include "closure.h" 13 14 #define PAGE_SECTORS (PAGE_SIZE / 512) 15 16 struct closure; 17 18 #ifdef CONFIG_BCACHE_EDEBUG 19 20 #define atomic_dec_bug(v) BUG_ON(atomic_dec_return(v) < 0) 21 #define atomic_inc_bug(v, i) BUG_ON(atomic_inc_return(v) <= i) 22 23 #else /* EDEBUG */ 24 25 #define atomic_dec_bug(v) atomic_dec(v) 26 #define atomic_inc_bug(v, i) atomic_inc(v) 27 28 #endif 29 30 #define BITMASK(name, type, field, offset, size) \ 31 static inline uint64_t name(const type *k) \ 32 { return (k->field >> offset) & ~(((uint64_t) ~0) << size); } \ 33 \ 34 static inline void SET_##name(type *k, uint64_t v) \ 35 { \ 36 k->field &= ~(~((uint64_t) ~0 << size) << offset); \ 37 k->field |= v << offset; \ 38 } 39 40 #define DECLARE_HEAP(type, name) \ 41 struct { \ 42 size_t size, used; \ 43 type *data; \ 44 } name 45 46 #define init_heap(heap, _size, gfp) \ 47 ({ \ 48 size_t _bytes; \ 49 (heap)->used = 0; \ 50 (heap)->size = (_size); \ 51 _bytes = (heap)->size * sizeof(*(heap)->data); \ 52 (heap)->data = NULL; \ 53 if (_bytes < KMALLOC_MAX_SIZE) \ 54 (heap)->data = kmalloc(_bytes, (gfp)); \ 55 if ((!(heap)->data) && ((gfp) & GFP_KERNEL)) \ 56 (heap)->data = vmalloc(_bytes); \ 57 (heap)->data; \ 58 }) 59 60 #define free_heap(heap) \ 61 do { \ 62 if (is_vmalloc_addr((heap)->data)) \ 63 vfree((heap)->data); \ 64 else \ 65 kfree((heap)->data); \ 66 (heap)->data = NULL; \ 67 } while (0) 68 69 #define heap_swap(h, i, j) swap((h)->data[i], (h)->data[j]) 70 71 #define heap_sift(h, i, cmp) \ 72 do { \ 73 size_t _r, _j = i; \ 74 \ 75 for (; _j * 2 + 1 < (h)->used; _j = _r) { \ 76 _r = _j * 2 + 1; \ 77 if (_r + 1 < (h)->used && \ 78 cmp((h)->data[_r], (h)->data[_r + 1])) \ 79 _r++; \ 80 \ 81 if (cmp((h)->data[_r], (h)->data[_j])) \ 82 break; \ 83 heap_swap(h, _r, _j); \ 84 } \ 85 } while (0) 86 87 #define heap_sift_down(h, i, cmp) \ 88 do { \ 89 while (i) { \ 90 size_t p = (i - 1) / 2; \ 91 if (cmp((h)->data[i], (h)->data[p])) \ 92 break; \ 93 heap_swap(h, i, p); \ 94 i = p; \ 95 } \ 96 } while (0) 97 98 #define heap_add(h, d, cmp) \ 99 ({ \ 100 bool _r = !heap_full(h); \ 101 if (_r) { \ 102 size_t _i = (h)->used++; \ 103 (h)->data[_i] = d; \ 104 \ 105 heap_sift_down(h, _i, cmp); \ 106 heap_sift(h, _i, cmp); \ 107 } \ 108 _r; \ 109 }) 110 111 #define heap_pop(h, d, cmp) \ 112 ({ \ 113 bool _r = (h)->used; \ 114 if (_r) { \ 115 (d) = (h)->data[0]; \ 116 (h)->used--; \ 117 heap_swap(h, 0, (h)->used); \ 118 heap_sift(h, 0, cmp); \ 119 } \ 120 _r; \ 121 }) 122 123 #define heap_peek(h) ((h)->size ? (h)->data[0] : NULL) 124 125 #define heap_full(h) ((h)->used == (h)->size) 126 127 #define DECLARE_FIFO(type, name) \ 128 struct { \ 129 size_t front, back, size, mask; \ 130 type *data; \ 131 } name 132 133 #define fifo_for_each(c, fifo, iter) \ 134 for (iter = (fifo)->front; \ 135 c = (fifo)->data[iter], iter != (fifo)->back; \ 136 iter = (iter + 1) & (fifo)->mask) 137 138 #define __init_fifo(fifo, gfp) \ 139 ({ \ 140 size_t _allocated_size, _bytes; \ 141 BUG_ON(!(fifo)->size); \ 142 \ 143 _allocated_size = roundup_pow_of_two((fifo)->size + 1); \ 144 _bytes = _allocated_size * sizeof(*(fifo)->data); \ 145 \ 146 (fifo)->mask = _allocated_size - 1; \ 147 (fifo)->front = (fifo)->back = 0; \ 148 (fifo)->data = NULL; \ 149 \ 150 if (_bytes < KMALLOC_MAX_SIZE) \ 151 (fifo)->data = kmalloc(_bytes, (gfp)); \ 152 if ((!(fifo)->data) && ((gfp) & GFP_KERNEL)) \ 153 (fifo)->data = vmalloc(_bytes); \ 154 (fifo)->data; \ 155 }) 156 157 #define init_fifo_exact(fifo, _size, gfp) \ 158 ({ \ 159 (fifo)->size = (_size); \ 160 __init_fifo(fifo, gfp); \ 161 }) 162 163 #define init_fifo(fifo, _size, gfp) \ 164 ({ \ 165 (fifo)->size = (_size); \ 166 if ((fifo)->size > 4) \ 167 (fifo)->size = roundup_pow_of_two((fifo)->size) - 1; \ 168 __init_fifo(fifo, gfp); \ 169 }) 170 171 #define free_fifo(fifo) \ 172 do { \ 173 if (is_vmalloc_addr((fifo)->data)) \ 174 vfree((fifo)->data); \ 175 else \ 176 kfree((fifo)->data); \ 177 (fifo)->data = NULL; \ 178 } while (0) 179 180 #define fifo_used(fifo) (((fifo)->back - (fifo)->front) & (fifo)->mask) 181 #define fifo_free(fifo) ((fifo)->size - fifo_used(fifo)) 182 183 #define fifo_empty(fifo) (!fifo_used(fifo)) 184 #define fifo_full(fifo) (!fifo_free(fifo)) 185 186 #define fifo_front(fifo) ((fifo)->data[(fifo)->front]) 187 #define fifo_back(fifo) \ 188 ((fifo)->data[((fifo)->back - 1) & (fifo)->mask]) 189 190 #define fifo_idx(fifo, p) (((p) - &fifo_front(fifo)) & (fifo)->mask) 191 192 #define fifo_push_back(fifo, i) \ 193 ({ \ 194 bool _r = !fifo_full((fifo)); \ 195 if (_r) { \ 196 (fifo)->data[(fifo)->back++] = (i); \ 197 (fifo)->back &= (fifo)->mask; \ 198 } \ 199 _r; \ 200 }) 201 202 #define fifo_pop_front(fifo, i) \ 203 ({ \ 204 bool _r = !fifo_empty((fifo)); \ 205 if (_r) { \ 206 (i) = (fifo)->data[(fifo)->front++]; \ 207 (fifo)->front &= (fifo)->mask; \ 208 } \ 209 _r; \ 210 }) 211 212 #define fifo_push_front(fifo, i) \ 213 ({ \ 214 bool _r = !fifo_full((fifo)); \ 215 if (_r) { \ 216 --(fifo)->front; \ 217 (fifo)->front &= (fifo)->mask; \ 218 (fifo)->data[(fifo)->front] = (i); \ 219 } \ 220 _r; \ 221 }) 222 223 #define fifo_pop_back(fifo, i) \ 224 ({ \ 225 bool _r = !fifo_empty((fifo)); \ 226 if (_r) { \ 227 --(fifo)->back; \ 228 (fifo)->back &= (fifo)->mask; \ 229 (i) = (fifo)->data[(fifo)->back] \ 230 } \ 231 _r; \ 232 }) 233 234 #define fifo_push(fifo, i) fifo_push_back(fifo, (i)) 235 #define fifo_pop(fifo, i) fifo_pop_front(fifo, (i)) 236 237 #define fifo_swap(l, r) \ 238 do { \ 239 swap((l)->front, (r)->front); \ 240 swap((l)->back, (r)->back); \ 241 swap((l)->size, (r)->size); \ 242 swap((l)->mask, (r)->mask); \ 243 swap((l)->data, (r)->data); \ 244 } while (0) 245 246 #define fifo_move(dest, src) \ 247 do { \ 248 typeof(*((dest)->data)) _t; \ 249 while (!fifo_full(dest) && \ 250 fifo_pop(src, _t)) \ 251 fifo_push(dest, _t); \ 252 } while (0) 253 254 /* 255 * Simple array based allocator - preallocates a number of elements and you can 256 * never allocate more than that, also has no locking. 257 * 258 * Handy because if you know you only need a fixed number of elements you don't 259 * have to worry about memory allocation failure, and sometimes a mempool isn't 260 * what you want. 261 * 262 * We treat the free elements as entries in a singly linked list, and the 263 * freelist as a stack - allocating and freeing push and pop off the freelist. 264 */ 265 266 #define DECLARE_ARRAY_ALLOCATOR(type, name, size) \ 267 struct { \ 268 type *freelist; \ 269 type data[size]; \ 270 } name 271 272 #define array_alloc(array) \ 273 ({ \ 274 typeof((array)->freelist) _ret = (array)->freelist; \ 275 \ 276 if (_ret) \ 277 (array)->freelist = *((typeof((array)->freelist) *) _ret);\ 278 \ 279 _ret; \ 280 }) 281 282 #define array_free(array, ptr) \ 283 do { \ 284 typeof((array)->freelist) _ptr = ptr; \ 285 \ 286 *((typeof((array)->freelist) *) _ptr) = (array)->freelist; \ 287 (array)->freelist = _ptr; \ 288 } while (0) 289 290 #define array_allocator_init(array) \ 291 do { \ 292 typeof((array)->freelist) _i; \ 293 \ 294 BUILD_BUG_ON(sizeof((array)->data[0]) < sizeof(void *)); \ 295 (array)->freelist = NULL; \ 296 \ 297 for (_i = (array)->data; \ 298 _i < (array)->data + ARRAY_SIZE((array)->data); \ 299 _i++) \ 300 array_free(array, _i); \ 301 } while (0) 302 303 #define array_freelist_empty(array) ((array)->freelist == NULL) 304 305 #define ANYSINT_MAX(t) \ 306 ((((t) 1 << (sizeof(t) * 8 - 2)) - (t) 1) * (t) 2 + (t) 1) 307 308 int bch_strtoint_h(const char *, int *); 309 int bch_strtouint_h(const char *, unsigned int *); 310 int bch_strtoll_h(const char *, long long *); 311 int bch_strtoull_h(const char *, unsigned long long *); 312 313 static inline int bch_strtol_h(const char *cp, long *res) 314 { 315 #if BITS_PER_LONG == 32 316 return bch_strtoint_h(cp, (int *) res); 317 #else 318 return bch_strtoll_h(cp, (long long *) res); 319 #endif 320 } 321 322 static inline int bch_strtoul_h(const char *cp, long *res) 323 { 324 #if BITS_PER_LONG == 32 325 return bch_strtouint_h(cp, (unsigned int *) res); 326 #else 327 return bch_strtoull_h(cp, (unsigned long long *) res); 328 #endif 329 } 330 331 #define strtoi_h(cp, res) \ 332 (__builtin_types_compatible_p(typeof(*res), int) \ 333 ? bch_strtoint_h(cp, (void *) res) \ 334 : __builtin_types_compatible_p(typeof(*res), long) \ 335 ? bch_strtol_h(cp, (void *) res) \ 336 : __builtin_types_compatible_p(typeof(*res), long long) \ 337 ? bch_strtoll_h(cp, (void *) res) \ 338 : __builtin_types_compatible_p(typeof(*res), unsigned int) \ 339 ? bch_strtouint_h(cp, (void *) res) \ 340 : __builtin_types_compatible_p(typeof(*res), unsigned long) \ 341 ? bch_strtoul_h(cp, (void *) res) \ 342 : __builtin_types_compatible_p(typeof(*res), unsigned long long)\ 343 ? bch_strtoull_h(cp, (void *) res) : -EINVAL) 344 345 #define strtoul_safe(cp, var) \ 346 ({ \ 347 unsigned long _v; \ 348 int _r = kstrtoul(cp, 10, &_v); \ 349 if (!_r) \ 350 var = _v; \ 351 _r; \ 352 }) 353 354 #define strtoul_safe_clamp(cp, var, min, max) \ 355 ({ \ 356 unsigned long _v; \ 357 int _r = kstrtoul(cp, 10, &_v); \ 358 if (!_r) \ 359 var = clamp_t(typeof(var), _v, min, max); \ 360 _r; \ 361 }) 362 363 #define snprint(buf, size, var) \ 364 snprintf(buf, size, \ 365 __builtin_types_compatible_p(typeof(var), int) \ 366 ? "%i\n" : \ 367 __builtin_types_compatible_p(typeof(var), unsigned) \ 368 ? "%u\n" : \ 369 __builtin_types_compatible_p(typeof(var), long) \ 370 ? "%li\n" : \ 371 __builtin_types_compatible_p(typeof(var), unsigned long)\ 372 ? "%lu\n" : \ 373 __builtin_types_compatible_p(typeof(var), int64_t) \ 374 ? "%lli\n" : \ 375 __builtin_types_compatible_p(typeof(var), uint64_t) \ 376 ? "%llu\n" : \ 377 __builtin_types_compatible_p(typeof(var), const char *) \ 378 ? "%s\n" : "%i\n", var) 379 380 ssize_t bch_hprint(char *buf, int64_t v); 381 382 bool bch_is_zero(const char *p, size_t n); 383 int bch_parse_uuid(const char *s, char *uuid); 384 385 ssize_t bch_snprint_string_list(char *buf, size_t size, const char * const list[], 386 size_t selected); 387 388 ssize_t bch_read_string_list(const char *buf, const char * const list[]); 389 390 struct time_stats { 391 /* 392 * all fields are in nanoseconds, averages are ewmas stored left shifted 393 * by 8 394 */ 395 uint64_t max_duration; 396 uint64_t average_duration; 397 uint64_t average_frequency; 398 uint64_t last; 399 }; 400 401 void bch_time_stats_update(struct time_stats *stats, uint64_t time); 402 403 #define NSEC_PER_ns 1L 404 #define NSEC_PER_us NSEC_PER_USEC 405 #define NSEC_PER_ms NSEC_PER_MSEC 406 #define NSEC_PER_sec NSEC_PER_SEC 407 408 #define __print_time_stat(stats, name, stat, units) \ 409 sysfs_print(name ## _ ## stat ## _ ## units, \ 410 div_u64((stats)->stat >> 8, NSEC_PER_ ## units)) 411 412 #define sysfs_print_time_stats(stats, name, \ 413 frequency_units, \ 414 duration_units) \ 415 do { \ 416 __print_time_stat(stats, name, \ 417 average_frequency, frequency_units); \ 418 __print_time_stat(stats, name, \ 419 average_duration, duration_units); \ 420 __print_time_stat(stats, name, \ 421 max_duration, duration_units); \ 422 \ 423 sysfs_print(name ## _last_ ## frequency_units, (stats)->last \ 424 ? div_s64(local_clock() - (stats)->last, \ 425 NSEC_PER_ ## frequency_units) \ 426 : -1LL); \ 427 } while (0) 428 429 #define sysfs_time_stats_attribute(name, \ 430 frequency_units, \ 431 duration_units) \ 432 read_attribute(name ## _average_frequency_ ## frequency_units); \ 433 read_attribute(name ## _average_duration_ ## duration_units); \ 434 read_attribute(name ## _max_duration_ ## duration_units); \ 435 read_attribute(name ## _last_ ## frequency_units) 436 437 #define sysfs_time_stats_attribute_list(name, \ 438 frequency_units, \ 439 duration_units) \ 440 &sysfs_ ## name ## _average_frequency_ ## frequency_units, \ 441 &sysfs_ ## name ## _average_duration_ ## duration_units, \ 442 &sysfs_ ## name ## _max_duration_ ## duration_units, \ 443 &sysfs_ ## name ## _last_ ## frequency_units, 444 445 #define ewma_add(ewma, val, weight, factor) \ 446 ({ \ 447 (ewma) *= (weight) - 1; \ 448 (ewma) += (val) << factor; \ 449 (ewma) /= (weight); \ 450 (ewma) >> factor; \ 451 }) 452 453 struct bch_ratelimit { 454 /* Next time we want to do some work, in nanoseconds */ 455 uint64_t next; 456 457 /* 458 * Rate at which we want to do work, in units per nanosecond 459 * The units here correspond to the units passed to bch_next_delay() 460 */ 461 unsigned rate; 462 }; 463 464 static inline void bch_ratelimit_reset(struct bch_ratelimit *d) 465 { 466 d->next = local_clock(); 467 } 468 469 uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done); 470 471 #define __DIV_SAFE(n, d, zero) \ 472 ({ \ 473 typeof(n) _n = (n); \ 474 typeof(d) _d = (d); \ 475 _d ? _n / _d : zero; \ 476 }) 477 478 #define DIV_SAFE(n, d) __DIV_SAFE(n, d, 0) 479 480 #define container_of_or_null(ptr, type, member) \ 481 ({ \ 482 typeof(ptr) _ptr = ptr; \ 483 _ptr ? container_of(_ptr, type, member) : NULL; \ 484 }) 485 486 #define RB_INSERT(root, new, member, cmp) \ 487 ({ \ 488 __label__ dup; \ 489 struct rb_node **n = &(root)->rb_node, *parent = NULL; \ 490 typeof(new) this; \ 491 int res, ret = -1; \ 492 \ 493 while (*n) { \ 494 parent = *n; \ 495 this = container_of(*n, typeof(*(new)), member); \ 496 res = cmp(new, this); \ 497 if (!res) \ 498 goto dup; \ 499 n = res < 0 \ 500 ? &(*n)->rb_left \ 501 : &(*n)->rb_right; \ 502 } \ 503 \ 504 rb_link_node(&(new)->member, parent, n); \ 505 rb_insert_color(&(new)->member, root); \ 506 ret = 0; \ 507 dup: \ 508 ret; \ 509 }) 510 511 #define RB_SEARCH(root, search, member, cmp) \ 512 ({ \ 513 struct rb_node *n = (root)->rb_node; \ 514 typeof(&(search)) this, ret = NULL; \ 515 int res; \ 516 \ 517 while (n) { \ 518 this = container_of(n, typeof(search), member); \ 519 res = cmp(&(search), this); \ 520 if (!res) { \ 521 ret = this; \ 522 break; \ 523 } \ 524 n = res < 0 \ 525 ? n->rb_left \ 526 : n->rb_right; \ 527 } \ 528 ret; \ 529 }) 530 531 #define RB_GREATER(root, search, member, cmp) \ 532 ({ \ 533 struct rb_node *n = (root)->rb_node; \ 534 typeof(&(search)) this, ret = NULL; \ 535 int res; \ 536 \ 537 while (n) { \ 538 this = container_of(n, typeof(search), member); \ 539 res = cmp(&(search), this); \ 540 if (res < 0) { \ 541 ret = this; \ 542 n = n->rb_left; \ 543 } else \ 544 n = n->rb_right; \ 545 } \ 546 ret; \ 547 }) 548 549 #define RB_FIRST(root, type, member) \ 550 container_of_or_null(rb_first(root), type, member) 551 552 #define RB_LAST(root, type, member) \ 553 container_of_or_null(rb_last(root), type, member) 554 555 #define RB_NEXT(ptr, member) \ 556 container_of_or_null(rb_next(&(ptr)->member), typeof(*ptr), member) 557 558 #define RB_PREV(ptr, member) \ 559 container_of_or_null(rb_prev(&(ptr)->member), typeof(*ptr), member) 560 561 /* Does linear interpolation between powers of two */ 562 static inline unsigned fract_exp_two(unsigned x, unsigned fract_bits) 563 { 564 unsigned fract = x & ~(~0 << fract_bits); 565 566 x >>= fract_bits; 567 x = 1 << x; 568 x += (x * fract) >> fract_bits; 569 570 return x; 571 } 572 573 void bch_bio_map(struct bio *bio, void *base); 574 575 static inline sector_t bdev_sectors(struct block_device *bdev) 576 { 577 return bdev->bd_inode->i_size >> 9; 578 } 579 580 #define closure_bio_submit(bio, cl, dev) \ 581 do { \ 582 closure_get(cl); \ 583 bch_generic_make_request(bio, &(dev)->bio_split_hook); \ 584 } while (0) 585 586 uint64_t bch_crc64_update(uint64_t, const void *, size_t); 587 uint64_t bch_crc64(const void *, size_t); 588 589 #endif /* _BCACHE_UTIL_H */ 590