1 /* SPDX-License-Identifier: GPL-2.0 */ 2 3 #ifndef _BCACHE_UTIL_H 4 #define _BCACHE_UTIL_H 5 6 #include <linux/blkdev.h> 7 #include <linux/errno.h> 8 #include <linux/kernel.h> 9 #include <linux/sched/clock.h> 10 #include <linux/llist.h> 11 #include <linux/ratelimit.h> 12 #include <linux/vmalloc.h> 13 #include <linux/workqueue.h> 14 15 #include "closure.h" 16 17 #define PAGE_SECTORS (PAGE_SIZE / 512) 18 19 struct closure; 20 21 #ifdef CONFIG_BCACHE_DEBUG 22 23 #define EBUG_ON(cond) BUG_ON(cond) 24 #define atomic_dec_bug(v) BUG_ON(atomic_dec_return(v) < 0) 25 #define atomic_inc_bug(v, i) BUG_ON(atomic_inc_return(v) <= i) 26 27 #else /* DEBUG */ 28 29 #define EBUG_ON(cond) do { if (cond); } while (0) 30 #define atomic_dec_bug(v) atomic_dec(v) 31 #define atomic_inc_bug(v, i) atomic_inc(v) 32 33 #endif 34 35 #define DECLARE_HEAP(type, name) \ 36 struct { \ 37 size_t size, used; \ 38 type *data; \ 39 } name 40 41 #define init_heap(heap, _size, gfp) \ 42 ({ \ 43 size_t _bytes; \ 44 (heap)->used = 0; \ 45 (heap)->size = (_size); \ 46 _bytes = (heap)->size * sizeof(*(heap)->data); \ 47 (heap)->data = kvmalloc(_bytes, (gfp) & GFP_KERNEL); \ 48 (heap)->data; \ 49 }) 50 51 #define free_heap(heap) \ 52 do { \ 53 kvfree((heap)->data); \ 54 (heap)->data = NULL; \ 55 } while (0) 56 57 #define heap_swap(h, i, j) swap((h)->data[i], (h)->data[j]) 58 59 #define heap_sift(h, i, cmp) \ 60 do { \ 61 size_t _r, _j = i; \ 62 \ 63 for (; _j * 2 + 1 < (h)->used; _j = _r) { \ 64 _r = _j * 2 + 1; \ 65 if (_r + 1 < (h)->used && \ 66 cmp((h)->data[_r], (h)->data[_r + 1])) \ 67 _r++; \ 68 \ 69 if (cmp((h)->data[_r], (h)->data[_j])) \ 70 break; \ 71 heap_swap(h, _r, _j); \ 72 } \ 73 } while (0) 74 75 #define heap_sift_down(h, i, cmp) \ 76 do { \ 77 while (i) { \ 78 size_t p = (i - 1) / 2; \ 79 if (cmp((h)->data[i], (h)->data[p])) \ 80 break; \ 81 heap_swap(h, i, p); \ 82 i = p; \ 83 } \ 84 } while (0) 85 86 #define heap_add(h, d, cmp) \ 87 ({ \ 88 bool _r = !heap_full(h); \ 89 if (_r) { \ 90 size_t _i = (h)->used++; \ 91 (h)->data[_i] = d; \ 92 \ 93 heap_sift_down(h, _i, cmp); \ 94 heap_sift(h, _i, cmp); \ 95 } \ 96 _r; \ 97 }) 98 99 #define heap_pop(h, d, cmp) \ 100 ({ \ 101 bool _r = (h)->used; \ 102 if (_r) { \ 103 (d) = (h)->data[0]; \ 104 (h)->used--; \ 105 heap_swap(h, 0, (h)->used); \ 106 heap_sift(h, 0, cmp); \ 107 } \ 108 _r; \ 109 }) 110 111 #define heap_peek(h) ((h)->used ? (h)->data[0] : NULL) 112 113 #define heap_full(h) ((h)->used == (h)->size) 114 115 #define heap_empty(h) ((h)->used == 0) 116 117 #define DECLARE_FIFO(type, name) \ 118 struct { \ 119 size_t front, back, size, mask; \ 120 type *data; \ 121 } name 122 123 #define fifo_for_each(c, fifo, iter) \ 124 for (iter = (fifo)->front; \ 125 c = (fifo)->data[iter], iter != (fifo)->back; \ 126 iter = (iter + 1) & (fifo)->mask) 127 128 #define __init_fifo(fifo, gfp) \ 129 ({ \ 130 size_t _allocated_size, _bytes; \ 131 BUG_ON(!(fifo)->size); \ 132 \ 133 _allocated_size = roundup_pow_of_two((fifo)->size + 1); \ 134 _bytes = _allocated_size * sizeof(*(fifo)->data); \ 135 \ 136 (fifo)->mask = _allocated_size - 1; \ 137 (fifo)->front = (fifo)->back = 0; \ 138 \ 139 (fifo)->data = kvmalloc(_bytes, (gfp) & GFP_KERNEL); \ 140 (fifo)->data; \ 141 }) 142 143 #define init_fifo_exact(fifo, _size, gfp) \ 144 ({ \ 145 (fifo)->size = (_size); \ 146 __init_fifo(fifo, gfp); \ 147 }) 148 149 #define init_fifo(fifo, _size, gfp) \ 150 ({ \ 151 (fifo)->size = (_size); \ 152 if ((fifo)->size > 4) \ 153 (fifo)->size = roundup_pow_of_two((fifo)->size) - 1; \ 154 __init_fifo(fifo, gfp); \ 155 }) 156 157 #define free_fifo(fifo) \ 158 do { \ 159 kvfree((fifo)->data); \ 160 (fifo)->data = NULL; \ 161 } while (0) 162 163 #define fifo_used(fifo) (((fifo)->back - (fifo)->front) & (fifo)->mask) 164 #define fifo_free(fifo) ((fifo)->size - fifo_used(fifo)) 165 166 #define fifo_empty(fifo) (!fifo_used(fifo)) 167 #define fifo_full(fifo) (!fifo_free(fifo)) 168 169 #define fifo_front(fifo) ((fifo)->data[(fifo)->front]) 170 #define fifo_back(fifo) \ 171 ((fifo)->data[((fifo)->back - 1) & (fifo)->mask]) 172 173 #define fifo_idx(fifo, p) (((p) - &fifo_front(fifo)) & (fifo)->mask) 174 175 #define fifo_push_back(fifo, i) \ 176 ({ \ 177 bool _r = !fifo_full((fifo)); \ 178 if (_r) { \ 179 (fifo)->data[(fifo)->back++] = (i); \ 180 (fifo)->back &= (fifo)->mask; \ 181 } \ 182 _r; \ 183 }) 184 185 #define fifo_pop_front(fifo, i) \ 186 ({ \ 187 bool _r = !fifo_empty((fifo)); \ 188 if (_r) { \ 189 (i) = (fifo)->data[(fifo)->front++]; \ 190 (fifo)->front &= (fifo)->mask; \ 191 } \ 192 _r; \ 193 }) 194 195 #define fifo_push_front(fifo, i) \ 196 ({ \ 197 bool _r = !fifo_full((fifo)); \ 198 if (_r) { \ 199 --(fifo)->front; \ 200 (fifo)->front &= (fifo)->mask; \ 201 (fifo)->data[(fifo)->front] = (i); \ 202 } \ 203 _r; \ 204 }) 205 206 #define fifo_pop_back(fifo, i) \ 207 ({ \ 208 bool _r = !fifo_empty((fifo)); \ 209 if (_r) { \ 210 --(fifo)->back; \ 211 (fifo)->back &= (fifo)->mask; \ 212 (i) = (fifo)->data[(fifo)->back] \ 213 } \ 214 _r; \ 215 }) 216 217 #define fifo_push(fifo, i) fifo_push_back(fifo, (i)) 218 #define fifo_pop(fifo, i) fifo_pop_front(fifo, (i)) 219 220 #define fifo_swap(l, r) \ 221 do { \ 222 swap((l)->front, (r)->front); \ 223 swap((l)->back, (r)->back); \ 224 swap((l)->size, (r)->size); \ 225 swap((l)->mask, (r)->mask); \ 226 swap((l)->data, (r)->data); \ 227 } while (0) 228 229 #define fifo_move(dest, src) \ 230 do { \ 231 typeof(*((dest)->data)) _t; \ 232 while (!fifo_full(dest) && \ 233 fifo_pop(src, _t)) \ 234 fifo_push(dest, _t); \ 235 } while (0) 236 237 /* 238 * Simple array based allocator - preallocates a number of elements and you can 239 * never allocate more than that, also has no locking. 240 * 241 * Handy because if you know you only need a fixed number of elements you don't 242 * have to worry about memory allocation failure, and sometimes a mempool isn't 243 * what you want. 244 * 245 * We treat the free elements as entries in a singly linked list, and the 246 * freelist as a stack - allocating and freeing push and pop off the freelist. 247 */ 248 249 #define DECLARE_ARRAY_ALLOCATOR(type, name, size) \ 250 struct { \ 251 type *freelist; \ 252 type data[size]; \ 253 } name 254 255 #define array_alloc(array) \ 256 ({ \ 257 typeof((array)->freelist) _ret = (array)->freelist; \ 258 \ 259 if (_ret) \ 260 (array)->freelist = *((typeof((array)->freelist) *) _ret);\ 261 \ 262 _ret; \ 263 }) 264 265 #define array_free(array, ptr) \ 266 do { \ 267 typeof((array)->freelist) _ptr = ptr; \ 268 \ 269 *((typeof((array)->freelist) *) _ptr) = (array)->freelist; \ 270 (array)->freelist = _ptr; \ 271 } while (0) 272 273 #define array_allocator_init(array) \ 274 do { \ 275 typeof((array)->freelist) _i; \ 276 \ 277 BUILD_BUG_ON(sizeof((array)->data[0]) < sizeof(void *)); \ 278 (array)->freelist = NULL; \ 279 \ 280 for (_i = (array)->data; \ 281 _i < (array)->data + ARRAY_SIZE((array)->data); \ 282 _i++) \ 283 array_free(array, _i); \ 284 } while (0) 285 286 #define array_freelist_empty(array) ((array)->freelist == NULL) 287 288 #define ANYSINT_MAX(t) \ 289 ((((t) 1 << (sizeof(t) * 8 - 2)) - (t) 1) * (t) 2 + (t) 1) 290 291 int bch_strtoint_h(const char *, int *); 292 int bch_strtouint_h(const char *, unsigned int *); 293 int bch_strtoll_h(const char *, long long *); 294 int bch_strtoull_h(const char *, unsigned long long *); 295 296 static inline int bch_strtol_h(const char *cp, long *res) 297 { 298 #if BITS_PER_LONG == 32 299 return bch_strtoint_h(cp, (int *) res); 300 #else 301 return bch_strtoll_h(cp, (long long *) res); 302 #endif 303 } 304 305 static inline int bch_strtoul_h(const char *cp, long *res) 306 { 307 #if BITS_PER_LONG == 32 308 return bch_strtouint_h(cp, (unsigned int *) res); 309 #else 310 return bch_strtoull_h(cp, (unsigned long long *) res); 311 #endif 312 } 313 314 #define strtoi_h(cp, res) \ 315 (__builtin_types_compatible_p(typeof(*res), int) \ 316 ? bch_strtoint_h(cp, (void *) res) \ 317 : __builtin_types_compatible_p(typeof(*res), long) \ 318 ? bch_strtol_h(cp, (void *) res) \ 319 : __builtin_types_compatible_p(typeof(*res), long long) \ 320 ? bch_strtoll_h(cp, (void *) res) \ 321 : __builtin_types_compatible_p(typeof(*res), unsigned int) \ 322 ? bch_strtouint_h(cp, (void *) res) \ 323 : __builtin_types_compatible_p(typeof(*res), unsigned long) \ 324 ? bch_strtoul_h(cp, (void *) res) \ 325 : __builtin_types_compatible_p(typeof(*res), unsigned long long)\ 326 ? bch_strtoull_h(cp, (void *) res) : -EINVAL) 327 328 #define strtoul_safe(cp, var) \ 329 ({ \ 330 unsigned long _v; \ 331 int _r = kstrtoul(cp, 10, &_v); \ 332 if (!_r) \ 333 var = _v; \ 334 _r; \ 335 }) 336 337 #define strtoul_safe_clamp(cp, var, min, max) \ 338 ({ \ 339 unsigned long _v; \ 340 int _r = kstrtoul(cp, 10, &_v); \ 341 if (!_r) \ 342 var = clamp_t(typeof(var), _v, min, max); \ 343 _r; \ 344 }) 345 346 #define snprint(buf, size, var) \ 347 snprintf(buf, size, \ 348 __builtin_types_compatible_p(typeof(var), int) \ 349 ? "%i\n" : \ 350 __builtin_types_compatible_p(typeof(var), unsigned) \ 351 ? "%u\n" : \ 352 __builtin_types_compatible_p(typeof(var), long) \ 353 ? "%li\n" : \ 354 __builtin_types_compatible_p(typeof(var), unsigned long)\ 355 ? "%lu\n" : \ 356 __builtin_types_compatible_p(typeof(var), int64_t) \ 357 ? "%lli\n" : \ 358 __builtin_types_compatible_p(typeof(var), uint64_t) \ 359 ? "%llu\n" : \ 360 __builtin_types_compatible_p(typeof(var), const char *) \ 361 ? "%s\n" : "%i\n", var) 362 363 ssize_t bch_hprint(char *buf, int64_t v); 364 365 bool bch_is_zero(const char *p, size_t n); 366 int bch_parse_uuid(const char *s, char *uuid); 367 368 ssize_t bch_snprint_string_list(char *buf, size_t size, const char * const list[], 369 size_t selected); 370 371 ssize_t bch_read_string_list(const char *buf, const char * const list[]); 372 373 struct time_stats { 374 spinlock_t lock; 375 /* 376 * all fields are in nanoseconds, averages are ewmas stored left shifted 377 * by 8 378 */ 379 uint64_t max_duration; 380 uint64_t average_duration; 381 uint64_t average_frequency; 382 uint64_t last; 383 }; 384 385 void bch_time_stats_update(struct time_stats *stats, uint64_t time); 386 387 static inline unsigned local_clock_us(void) 388 { 389 return local_clock() >> 10; 390 } 391 392 #define NSEC_PER_ns 1L 393 #define NSEC_PER_us NSEC_PER_USEC 394 #define NSEC_PER_ms NSEC_PER_MSEC 395 #define NSEC_PER_sec NSEC_PER_SEC 396 397 #define __print_time_stat(stats, name, stat, units) \ 398 sysfs_print(name ## _ ## stat ## _ ## units, \ 399 div_u64((stats)->stat >> 8, NSEC_PER_ ## units)) 400 401 #define sysfs_print_time_stats(stats, name, \ 402 frequency_units, \ 403 duration_units) \ 404 do { \ 405 __print_time_stat(stats, name, \ 406 average_frequency, frequency_units); \ 407 __print_time_stat(stats, name, \ 408 average_duration, duration_units); \ 409 sysfs_print(name ## _ ##max_duration ## _ ## duration_units, \ 410 div_u64((stats)->max_duration, NSEC_PER_ ## duration_units));\ 411 \ 412 sysfs_print(name ## _last_ ## frequency_units, (stats)->last \ 413 ? div_s64(local_clock() - (stats)->last, \ 414 NSEC_PER_ ## frequency_units) \ 415 : -1LL); \ 416 } while (0) 417 418 #define sysfs_time_stats_attribute(name, \ 419 frequency_units, \ 420 duration_units) \ 421 read_attribute(name ## _average_frequency_ ## frequency_units); \ 422 read_attribute(name ## _average_duration_ ## duration_units); \ 423 read_attribute(name ## _max_duration_ ## duration_units); \ 424 read_attribute(name ## _last_ ## frequency_units) 425 426 #define sysfs_time_stats_attribute_list(name, \ 427 frequency_units, \ 428 duration_units) \ 429 &sysfs_ ## name ## _average_frequency_ ## frequency_units, \ 430 &sysfs_ ## name ## _average_duration_ ## duration_units, \ 431 &sysfs_ ## name ## _max_duration_ ## duration_units, \ 432 &sysfs_ ## name ## _last_ ## frequency_units, 433 434 #define ewma_add(ewma, val, weight, factor) \ 435 ({ \ 436 (ewma) *= (weight) - 1; \ 437 (ewma) += (val) << factor; \ 438 (ewma) /= (weight); \ 439 (ewma) >> factor; \ 440 }) 441 442 struct bch_ratelimit { 443 /* Next time we want to do some work, in nanoseconds */ 444 uint64_t next; 445 446 /* 447 * Rate at which we want to do work, in units per second 448 * The units here correspond to the units passed to bch_next_delay() 449 */ 450 uint32_t rate; 451 }; 452 453 static inline void bch_ratelimit_reset(struct bch_ratelimit *d) 454 { 455 d->next = local_clock(); 456 } 457 458 uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done); 459 460 #define __DIV_SAFE(n, d, zero) \ 461 ({ \ 462 typeof(n) _n = (n); \ 463 typeof(d) _d = (d); \ 464 _d ? _n / _d : zero; \ 465 }) 466 467 #define DIV_SAFE(n, d) __DIV_SAFE(n, d, 0) 468 469 #define container_of_or_null(ptr, type, member) \ 470 ({ \ 471 typeof(ptr) _ptr = ptr; \ 472 _ptr ? container_of(_ptr, type, member) : NULL; \ 473 }) 474 475 #define RB_INSERT(root, new, member, cmp) \ 476 ({ \ 477 __label__ dup; \ 478 struct rb_node **n = &(root)->rb_node, *parent = NULL; \ 479 typeof(new) this; \ 480 int res, ret = -1; \ 481 \ 482 while (*n) { \ 483 parent = *n; \ 484 this = container_of(*n, typeof(*(new)), member); \ 485 res = cmp(new, this); \ 486 if (!res) \ 487 goto dup; \ 488 n = res < 0 \ 489 ? &(*n)->rb_left \ 490 : &(*n)->rb_right; \ 491 } \ 492 \ 493 rb_link_node(&(new)->member, parent, n); \ 494 rb_insert_color(&(new)->member, root); \ 495 ret = 0; \ 496 dup: \ 497 ret; \ 498 }) 499 500 #define RB_SEARCH(root, search, member, cmp) \ 501 ({ \ 502 struct rb_node *n = (root)->rb_node; \ 503 typeof(&(search)) this, ret = NULL; \ 504 int res; \ 505 \ 506 while (n) { \ 507 this = container_of(n, typeof(search), member); \ 508 res = cmp(&(search), this); \ 509 if (!res) { \ 510 ret = this; \ 511 break; \ 512 } \ 513 n = res < 0 \ 514 ? n->rb_left \ 515 : n->rb_right; \ 516 } \ 517 ret; \ 518 }) 519 520 #define RB_GREATER(root, search, member, cmp) \ 521 ({ \ 522 struct rb_node *n = (root)->rb_node; \ 523 typeof(&(search)) this, ret = NULL; \ 524 int res; \ 525 \ 526 while (n) { \ 527 this = container_of(n, typeof(search), member); \ 528 res = cmp(&(search), this); \ 529 if (res < 0) { \ 530 ret = this; \ 531 n = n->rb_left; \ 532 } else \ 533 n = n->rb_right; \ 534 } \ 535 ret; \ 536 }) 537 538 #define RB_FIRST(root, type, member) \ 539 container_of_or_null(rb_first(root), type, member) 540 541 #define RB_LAST(root, type, member) \ 542 container_of_or_null(rb_last(root), type, member) 543 544 #define RB_NEXT(ptr, member) \ 545 container_of_or_null(rb_next(&(ptr)->member), typeof(*ptr), member) 546 547 #define RB_PREV(ptr, member) \ 548 container_of_or_null(rb_prev(&(ptr)->member), typeof(*ptr), member) 549 550 /* Does linear interpolation between powers of two */ 551 static inline unsigned fract_exp_two(unsigned x, unsigned fract_bits) 552 { 553 unsigned fract = x & ~(~0 << fract_bits); 554 555 x >>= fract_bits; 556 x = 1 << x; 557 x += (x * fract) >> fract_bits; 558 559 return x; 560 } 561 562 void bch_bio_map(struct bio *bio, void *base); 563 int bch_bio_alloc_pages(struct bio *bio, gfp_t gfp_mask); 564 565 static inline sector_t bdev_sectors(struct block_device *bdev) 566 { 567 return bdev->bd_inode->i_size >> 9; 568 } 569 570 #define closure_bio_submit(bio, cl) \ 571 do { \ 572 closure_get(cl); \ 573 generic_make_request(bio); \ 574 } while (0) 575 576 uint64_t bch_crc64_update(uint64_t, const void *, size_t); 577 uint64_t bch_crc64(const void *, size_t); 578 579 #endif /* _BCACHE_UTIL_H */ 580