1 /* SPDX-License-Identifier: GPL-2.0 */ 2 3 #ifndef _BCACHE_UTIL_H 4 #define _BCACHE_UTIL_H 5 6 #include <linux/blkdev.h> 7 #include <linux/errno.h> 8 #include <linux/kernel.h> 9 #include <linux/sched/clock.h> 10 #include <linux/llist.h> 11 #include <linux/ratelimit.h> 12 #include <linux/vmalloc.h> 13 #include <linux/workqueue.h> 14 15 #include "closure.h" 16 17 #define PAGE_SECTORS (PAGE_SIZE / 512) 18 19 struct closure; 20 21 #ifdef CONFIG_BCACHE_DEBUG 22 23 #define EBUG_ON(cond) BUG_ON(cond) 24 #define atomic_dec_bug(v) BUG_ON(atomic_dec_return(v) < 0) 25 #define atomic_inc_bug(v, i) BUG_ON(atomic_inc_return(v) <= i) 26 27 #else /* DEBUG */ 28 29 #define EBUG_ON(cond) do { if (cond); } while (0) 30 #define atomic_dec_bug(v) atomic_dec(v) 31 #define atomic_inc_bug(v, i) atomic_inc(v) 32 33 #endif 34 35 #define DECLARE_HEAP(type, name) \ 36 struct { \ 37 size_t size, used; \ 38 type *data; \ 39 } name 40 41 #define init_heap(heap, _size, gfp) \ 42 ({ \ 43 size_t _bytes; \ 44 (heap)->used = 0; \ 45 (heap)->size = (_size); \ 46 _bytes = (heap)->size * sizeof(*(heap)->data); \ 47 (heap)->data = kvmalloc(_bytes, (gfp) & GFP_KERNEL); \ 48 (heap)->data; \ 49 }) 50 51 #define free_heap(heap) \ 52 do { \ 53 kvfree((heap)->data); \ 54 (heap)->data = NULL; \ 55 } while (0) 56 57 #define heap_swap(h, i, j) swap((h)->data[i], (h)->data[j]) 58 59 #define heap_sift(h, i, cmp) \ 60 do { \ 61 size_t _r, _j = i; \ 62 \ 63 for (; _j * 2 + 1 < (h)->used; _j = _r) { \ 64 _r = _j * 2 + 1; \ 65 if (_r + 1 < (h)->used && \ 66 cmp((h)->data[_r], (h)->data[_r + 1])) \ 67 _r++; \ 68 \ 69 if (cmp((h)->data[_r], (h)->data[_j])) \ 70 break; \ 71 heap_swap(h, _r, _j); \ 72 } \ 73 } while (0) 74 75 #define heap_sift_down(h, i, cmp) \ 76 do { \ 77 while (i) { \ 78 size_t p = (i - 1) / 2; \ 79 if (cmp((h)->data[i], (h)->data[p])) \ 80 break; \ 81 heap_swap(h, i, p); \ 82 i = p; \ 83 } \ 84 } while (0) 85 86 #define heap_add(h, d, cmp) \ 87 ({ \ 88 bool _r = !heap_full(h); \ 89 if (_r) { \ 90 size_t _i = (h)->used++; \ 91 (h)->data[_i] = d; \ 92 \ 93 heap_sift_down(h, _i, cmp); \ 94 heap_sift(h, _i, cmp); \ 95 } \ 96 _r; \ 97 }) 98 99 #define heap_pop(h, d, cmp) \ 100 ({ \ 101 bool _r = (h)->used; \ 102 if (_r) { \ 103 (d) = (h)->data[0]; \ 104 (h)->used--; \ 105 heap_swap(h, 0, (h)->used); \ 106 heap_sift(h, 0, cmp); \ 107 } \ 108 _r; \ 109 }) 110 111 #define heap_peek(h) ((h)->used ? (h)->data[0] : NULL) 112 113 #define heap_full(h) ((h)->used == (h)->size) 114 115 #define DECLARE_FIFO(type, name) \ 116 struct { \ 117 size_t front, back, size, mask; \ 118 type *data; \ 119 } name 120 121 #define fifo_for_each(c, fifo, iter) \ 122 for (iter = (fifo)->front; \ 123 c = (fifo)->data[iter], iter != (fifo)->back; \ 124 iter = (iter + 1) & (fifo)->mask) 125 126 #define __init_fifo(fifo, gfp) \ 127 ({ \ 128 size_t _allocated_size, _bytes; \ 129 BUG_ON(!(fifo)->size); \ 130 \ 131 _allocated_size = roundup_pow_of_two((fifo)->size + 1); \ 132 _bytes = _allocated_size * sizeof(*(fifo)->data); \ 133 \ 134 (fifo)->mask = _allocated_size - 1; \ 135 (fifo)->front = (fifo)->back = 0; \ 136 \ 137 (fifo)->data = kvmalloc(_bytes, (gfp) & GFP_KERNEL); \ 138 (fifo)->data; \ 139 }) 140 141 #define init_fifo_exact(fifo, _size, gfp) \ 142 ({ \ 143 (fifo)->size = (_size); \ 144 __init_fifo(fifo, gfp); \ 145 }) 146 147 #define init_fifo(fifo, _size, gfp) \ 148 ({ \ 149 (fifo)->size = (_size); \ 150 if ((fifo)->size > 4) \ 151 (fifo)->size = roundup_pow_of_two((fifo)->size) - 1; \ 152 __init_fifo(fifo, gfp); \ 153 }) 154 155 #define free_fifo(fifo) \ 156 do { \ 157 kvfree((fifo)->data); \ 158 (fifo)->data = NULL; \ 159 } while (0) 160 161 #define fifo_used(fifo) (((fifo)->back - (fifo)->front) & (fifo)->mask) 162 #define fifo_free(fifo) ((fifo)->size - fifo_used(fifo)) 163 164 #define fifo_empty(fifo) (!fifo_used(fifo)) 165 #define fifo_full(fifo) (!fifo_free(fifo)) 166 167 #define fifo_front(fifo) ((fifo)->data[(fifo)->front]) 168 #define fifo_back(fifo) \ 169 ((fifo)->data[((fifo)->back - 1) & (fifo)->mask]) 170 171 #define fifo_idx(fifo, p) (((p) - &fifo_front(fifo)) & (fifo)->mask) 172 173 #define fifo_push_back(fifo, i) \ 174 ({ \ 175 bool _r = !fifo_full((fifo)); \ 176 if (_r) { \ 177 (fifo)->data[(fifo)->back++] = (i); \ 178 (fifo)->back &= (fifo)->mask; \ 179 } \ 180 _r; \ 181 }) 182 183 #define fifo_pop_front(fifo, i) \ 184 ({ \ 185 bool _r = !fifo_empty((fifo)); \ 186 if (_r) { \ 187 (i) = (fifo)->data[(fifo)->front++]; \ 188 (fifo)->front &= (fifo)->mask; \ 189 } \ 190 _r; \ 191 }) 192 193 #define fifo_push_front(fifo, i) \ 194 ({ \ 195 bool _r = !fifo_full((fifo)); \ 196 if (_r) { \ 197 --(fifo)->front; \ 198 (fifo)->front &= (fifo)->mask; \ 199 (fifo)->data[(fifo)->front] = (i); \ 200 } \ 201 _r; \ 202 }) 203 204 #define fifo_pop_back(fifo, i) \ 205 ({ \ 206 bool _r = !fifo_empty((fifo)); \ 207 if (_r) { \ 208 --(fifo)->back; \ 209 (fifo)->back &= (fifo)->mask; \ 210 (i) = (fifo)->data[(fifo)->back] \ 211 } \ 212 _r; \ 213 }) 214 215 #define fifo_push(fifo, i) fifo_push_back(fifo, (i)) 216 #define fifo_pop(fifo, i) fifo_pop_front(fifo, (i)) 217 218 #define fifo_swap(l, r) \ 219 do { \ 220 swap((l)->front, (r)->front); \ 221 swap((l)->back, (r)->back); \ 222 swap((l)->size, (r)->size); \ 223 swap((l)->mask, (r)->mask); \ 224 swap((l)->data, (r)->data); \ 225 } while (0) 226 227 #define fifo_move(dest, src) \ 228 do { \ 229 typeof(*((dest)->data)) _t; \ 230 while (!fifo_full(dest) && \ 231 fifo_pop(src, _t)) \ 232 fifo_push(dest, _t); \ 233 } while (0) 234 235 /* 236 * Simple array based allocator - preallocates a number of elements and you can 237 * never allocate more than that, also has no locking. 238 * 239 * Handy because if you know you only need a fixed number of elements you don't 240 * have to worry about memory allocation failure, and sometimes a mempool isn't 241 * what you want. 242 * 243 * We treat the free elements as entries in a singly linked list, and the 244 * freelist as a stack - allocating and freeing push and pop off the freelist. 245 */ 246 247 #define DECLARE_ARRAY_ALLOCATOR(type, name, size) \ 248 struct { \ 249 type *freelist; \ 250 type data[size]; \ 251 } name 252 253 #define array_alloc(array) \ 254 ({ \ 255 typeof((array)->freelist) _ret = (array)->freelist; \ 256 \ 257 if (_ret) \ 258 (array)->freelist = *((typeof((array)->freelist) *) _ret);\ 259 \ 260 _ret; \ 261 }) 262 263 #define array_free(array, ptr) \ 264 do { \ 265 typeof((array)->freelist) _ptr = ptr; \ 266 \ 267 *((typeof((array)->freelist) *) _ptr) = (array)->freelist; \ 268 (array)->freelist = _ptr; \ 269 } while (0) 270 271 #define array_allocator_init(array) \ 272 do { \ 273 typeof((array)->freelist) _i; \ 274 \ 275 BUILD_BUG_ON(sizeof((array)->data[0]) < sizeof(void *)); \ 276 (array)->freelist = NULL; \ 277 \ 278 for (_i = (array)->data; \ 279 _i < (array)->data + ARRAY_SIZE((array)->data); \ 280 _i++) \ 281 array_free(array, _i); \ 282 } while (0) 283 284 #define array_freelist_empty(array) ((array)->freelist == NULL) 285 286 #define ANYSINT_MAX(t) \ 287 ((((t) 1 << (sizeof(t) * 8 - 2)) - (t) 1) * (t) 2 + (t) 1) 288 289 int bch_strtoint_h(const char *, int *); 290 int bch_strtouint_h(const char *, unsigned int *); 291 int bch_strtoll_h(const char *, long long *); 292 int bch_strtoull_h(const char *, unsigned long long *); 293 294 static inline int bch_strtol_h(const char *cp, long *res) 295 { 296 #if BITS_PER_LONG == 32 297 return bch_strtoint_h(cp, (int *) res); 298 #else 299 return bch_strtoll_h(cp, (long long *) res); 300 #endif 301 } 302 303 static inline int bch_strtoul_h(const char *cp, long *res) 304 { 305 #if BITS_PER_LONG == 32 306 return bch_strtouint_h(cp, (unsigned int *) res); 307 #else 308 return bch_strtoull_h(cp, (unsigned long long *) res); 309 #endif 310 } 311 312 #define strtoi_h(cp, res) \ 313 (__builtin_types_compatible_p(typeof(*res), int) \ 314 ? bch_strtoint_h(cp, (void *) res) \ 315 : __builtin_types_compatible_p(typeof(*res), long) \ 316 ? bch_strtol_h(cp, (void *) res) \ 317 : __builtin_types_compatible_p(typeof(*res), long long) \ 318 ? bch_strtoll_h(cp, (void *) res) \ 319 : __builtin_types_compatible_p(typeof(*res), unsigned int) \ 320 ? bch_strtouint_h(cp, (void *) res) \ 321 : __builtin_types_compatible_p(typeof(*res), unsigned long) \ 322 ? bch_strtoul_h(cp, (void *) res) \ 323 : __builtin_types_compatible_p(typeof(*res), unsigned long long)\ 324 ? bch_strtoull_h(cp, (void *) res) : -EINVAL) 325 326 #define strtoul_safe(cp, var) \ 327 ({ \ 328 unsigned long _v; \ 329 int _r = kstrtoul(cp, 10, &_v); \ 330 if (!_r) \ 331 var = _v; \ 332 _r; \ 333 }) 334 335 #define strtoul_safe_clamp(cp, var, min, max) \ 336 ({ \ 337 unsigned long _v; \ 338 int _r = kstrtoul(cp, 10, &_v); \ 339 if (!_r) \ 340 var = clamp_t(typeof(var), _v, min, max); \ 341 _r; \ 342 }) 343 344 #define snprint(buf, size, var) \ 345 snprintf(buf, size, \ 346 __builtin_types_compatible_p(typeof(var), int) \ 347 ? "%i\n" : \ 348 __builtin_types_compatible_p(typeof(var), unsigned) \ 349 ? "%u\n" : \ 350 __builtin_types_compatible_p(typeof(var), long) \ 351 ? "%li\n" : \ 352 __builtin_types_compatible_p(typeof(var), unsigned long)\ 353 ? "%lu\n" : \ 354 __builtin_types_compatible_p(typeof(var), int64_t) \ 355 ? "%lli\n" : \ 356 __builtin_types_compatible_p(typeof(var), uint64_t) \ 357 ? "%llu\n" : \ 358 __builtin_types_compatible_p(typeof(var), const char *) \ 359 ? "%s\n" : "%i\n", var) 360 361 ssize_t bch_hprint(char *buf, int64_t v); 362 363 bool bch_is_zero(const char *p, size_t n); 364 int bch_parse_uuid(const char *s, char *uuid); 365 366 ssize_t bch_snprint_string_list(char *buf, size_t size, const char * const list[], 367 size_t selected); 368 369 ssize_t bch_read_string_list(const char *buf, const char * const list[]); 370 371 struct time_stats { 372 spinlock_t lock; 373 /* 374 * all fields are in nanoseconds, averages are ewmas stored left shifted 375 * by 8 376 */ 377 uint64_t max_duration; 378 uint64_t average_duration; 379 uint64_t average_frequency; 380 uint64_t last; 381 }; 382 383 void bch_time_stats_update(struct time_stats *stats, uint64_t time); 384 385 static inline unsigned local_clock_us(void) 386 { 387 return local_clock() >> 10; 388 } 389 390 #define NSEC_PER_ns 1L 391 #define NSEC_PER_us NSEC_PER_USEC 392 #define NSEC_PER_ms NSEC_PER_MSEC 393 #define NSEC_PER_sec NSEC_PER_SEC 394 395 #define __print_time_stat(stats, name, stat, units) \ 396 sysfs_print(name ## _ ## stat ## _ ## units, \ 397 div_u64((stats)->stat >> 8, NSEC_PER_ ## units)) 398 399 #define sysfs_print_time_stats(stats, name, \ 400 frequency_units, \ 401 duration_units) \ 402 do { \ 403 __print_time_stat(stats, name, \ 404 average_frequency, frequency_units); \ 405 __print_time_stat(stats, name, \ 406 average_duration, duration_units); \ 407 sysfs_print(name ## _ ##max_duration ## _ ## duration_units, \ 408 div_u64((stats)->max_duration, NSEC_PER_ ## duration_units));\ 409 \ 410 sysfs_print(name ## _last_ ## frequency_units, (stats)->last \ 411 ? div_s64(local_clock() - (stats)->last, \ 412 NSEC_PER_ ## frequency_units) \ 413 : -1LL); \ 414 } while (0) 415 416 #define sysfs_time_stats_attribute(name, \ 417 frequency_units, \ 418 duration_units) \ 419 read_attribute(name ## _average_frequency_ ## frequency_units); \ 420 read_attribute(name ## _average_duration_ ## duration_units); \ 421 read_attribute(name ## _max_duration_ ## duration_units); \ 422 read_attribute(name ## _last_ ## frequency_units) 423 424 #define sysfs_time_stats_attribute_list(name, \ 425 frequency_units, \ 426 duration_units) \ 427 &sysfs_ ## name ## _average_frequency_ ## frequency_units, \ 428 &sysfs_ ## name ## _average_duration_ ## duration_units, \ 429 &sysfs_ ## name ## _max_duration_ ## duration_units, \ 430 &sysfs_ ## name ## _last_ ## frequency_units, 431 432 #define ewma_add(ewma, val, weight, factor) \ 433 ({ \ 434 (ewma) *= (weight) - 1; \ 435 (ewma) += (val) << factor; \ 436 (ewma) /= (weight); \ 437 (ewma) >> factor; \ 438 }) 439 440 struct bch_ratelimit { 441 /* Next time we want to do some work, in nanoseconds */ 442 uint64_t next; 443 444 /* 445 * Rate at which we want to do work, in units per second 446 * The units here correspond to the units passed to bch_next_delay() 447 */ 448 uint32_t rate; 449 }; 450 451 static inline void bch_ratelimit_reset(struct bch_ratelimit *d) 452 { 453 d->next = local_clock(); 454 } 455 456 uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done); 457 458 #define __DIV_SAFE(n, d, zero) \ 459 ({ \ 460 typeof(n) _n = (n); \ 461 typeof(d) _d = (d); \ 462 _d ? _n / _d : zero; \ 463 }) 464 465 #define DIV_SAFE(n, d) __DIV_SAFE(n, d, 0) 466 467 #define container_of_or_null(ptr, type, member) \ 468 ({ \ 469 typeof(ptr) _ptr = ptr; \ 470 _ptr ? container_of(_ptr, type, member) : NULL; \ 471 }) 472 473 #define RB_INSERT(root, new, member, cmp) \ 474 ({ \ 475 __label__ dup; \ 476 struct rb_node **n = &(root)->rb_node, *parent = NULL; \ 477 typeof(new) this; \ 478 int res, ret = -1; \ 479 \ 480 while (*n) { \ 481 parent = *n; \ 482 this = container_of(*n, typeof(*(new)), member); \ 483 res = cmp(new, this); \ 484 if (!res) \ 485 goto dup; \ 486 n = res < 0 \ 487 ? &(*n)->rb_left \ 488 : &(*n)->rb_right; \ 489 } \ 490 \ 491 rb_link_node(&(new)->member, parent, n); \ 492 rb_insert_color(&(new)->member, root); \ 493 ret = 0; \ 494 dup: \ 495 ret; \ 496 }) 497 498 #define RB_SEARCH(root, search, member, cmp) \ 499 ({ \ 500 struct rb_node *n = (root)->rb_node; \ 501 typeof(&(search)) this, ret = NULL; \ 502 int res; \ 503 \ 504 while (n) { \ 505 this = container_of(n, typeof(search), member); \ 506 res = cmp(&(search), this); \ 507 if (!res) { \ 508 ret = this; \ 509 break; \ 510 } \ 511 n = res < 0 \ 512 ? n->rb_left \ 513 : n->rb_right; \ 514 } \ 515 ret; \ 516 }) 517 518 #define RB_GREATER(root, search, member, cmp) \ 519 ({ \ 520 struct rb_node *n = (root)->rb_node; \ 521 typeof(&(search)) this, ret = NULL; \ 522 int res; \ 523 \ 524 while (n) { \ 525 this = container_of(n, typeof(search), member); \ 526 res = cmp(&(search), this); \ 527 if (res < 0) { \ 528 ret = this; \ 529 n = n->rb_left; \ 530 } else \ 531 n = n->rb_right; \ 532 } \ 533 ret; \ 534 }) 535 536 #define RB_FIRST(root, type, member) \ 537 container_of_or_null(rb_first(root), type, member) 538 539 #define RB_LAST(root, type, member) \ 540 container_of_or_null(rb_last(root), type, member) 541 542 #define RB_NEXT(ptr, member) \ 543 container_of_or_null(rb_next(&(ptr)->member), typeof(*ptr), member) 544 545 #define RB_PREV(ptr, member) \ 546 container_of_or_null(rb_prev(&(ptr)->member), typeof(*ptr), member) 547 548 /* Does linear interpolation between powers of two */ 549 static inline unsigned fract_exp_two(unsigned x, unsigned fract_bits) 550 { 551 unsigned fract = x & ~(~0 << fract_bits); 552 553 x >>= fract_bits; 554 x = 1 << x; 555 x += (x * fract) >> fract_bits; 556 557 return x; 558 } 559 560 void bch_bio_map(struct bio *bio, void *base); 561 562 static inline sector_t bdev_sectors(struct block_device *bdev) 563 { 564 return bdev->bd_inode->i_size >> 9; 565 } 566 567 #define closure_bio_submit(bio, cl) \ 568 do { \ 569 closure_get(cl); \ 570 generic_make_request(bio); \ 571 } while (0) 572 573 uint64_t bch_crc64_update(uint64_t, const void *, size_t); 574 uint64_t bch_crc64(const void *, size_t); 575 576 #endif /* _BCACHE_UTIL_H */ 577