1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/errno.h> 3 #include <linux/numa.h> 4 #include <linux/slab.h> 5 #include <linux/rculist.h> 6 #include <linux/threads.h> 7 #include <linux/preempt.h> 8 #include <linux/irqflags.h> 9 #include <linux/vmalloc.h> 10 #include <linux/mm.h> 11 #include <linux/module.h> 12 #include <linux/device-mapper.h> 13 14 #include "dm-core.h" 15 #include "dm-stats.h" 16 17 #define DM_MSG_PREFIX "stats" 18 19 static int dm_stat_need_rcu_barrier; 20 21 /* 22 * Using 64-bit values to avoid overflow (which is a 23 * problem that block/genhd.c's IO accounting has). 24 */ 25 struct dm_stat_percpu { 26 unsigned long long sectors[2]; 27 unsigned long long ios[2]; 28 unsigned long long merges[2]; 29 unsigned long long ticks[2]; 30 unsigned long long io_ticks[2]; 31 unsigned long long io_ticks_total; 32 unsigned long long time_in_queue; 33 unsigned long long *histogram; 34 }; 35 36 struct dm_stat_shared { 37 atomic_t in_flight[2]; 38 unsigned long long stamp; 39 struct dm_stat_percpu tmp; 40 }; 41 42 struct dm_stat { 43 struct list_head list_entry; 44 int id; 45 unsigned stat_flags; 46 size_t n_entries; 47 sector_t start; 48 sector_t end; 49 sector_t step; 50 unsigned n_histogram_entries; 51 unsigned long long *histogram_boundaries; 52 const char *program_id; 53 const char *aux_data; 54 struct rcu_head rcu_head; 55 size_t shared_alloc_size; 56 size_t percpu_alloc_size; 57 size_t histogram_alloc_size; 58 struct dm_stat_percpu *stat_percpu[NR_CPUS]; 59 struct dm_stat_shared stat_shared[0]; 60 }; 61 62 #define STAT_PRECISE_TIMESTAMPS 1 63 64 struct dm_stats_last_position { 65 sector_t last_sector; 66 unsigned last_rw; 67 }; 68 69 /* 70 * A typo on the command line could possibly make the kernel run out of memory 71 * and crash. To prevent the crash we account all used memory. We fail if we 72 * exhaust 1/4 of all memory or 1/2 of vmalloc space. 73 */ 74 #define DM_STATS_MEMORY_FACTOR 4 75 #define DM_STATS_VMALLOC_FACTOR 2 76 77 static DEFINE_SPINLOCK(shared_memory_lock); 78 79 static unsigned long shared_memory_amount; 80 81 static bool __check_shared_memory(size_t alloc_size) 82 { 83 size_t a; 84 85 a = shared_memory_amount + alloc_size; 86 if (a < shared_memory_amount) 87 return false; 88 if (a >> PAGE_SHIFT > totalram_pages() / DM_STATS_MEMORY_FACTOR) 89 return false; 90 #ifdef CONFIG_MMU 91 if (a > (VMALLOC_END - VMALLOC_START) / DM_STATS_VMALLOC_FACTOR) 92 return false; 93 #endif 94 return true; 95 } 96 97 static bool check_shared_memory(size_t alloc_size) 98 { 99 bool ret; 100 101 spin_lock_irq(&shared_memory_lock); 102 103 ret = __check_shared_memory(alloc_size); 104 105 spin_unlock_irq(&shared_memory_lock); 106 107 return ret; 108 } 109 110 static bool claim_shared_memory(size_t alloc_size) 111 { 112 spin_lock_irq(&shared_memory_lock); 113 114 if (!__check_shared_memory(alloc_size)) { 115 spin_unlock_irq(&shared_memory_lock); 116 return false; 117 } 118 119 shared_memory_amount += alloc_size; 120 121 spin_unlock_irq(&shared_memory_lock); 122 123 return true; 124 } 125 126 static void free_shared_memory(size_t alloc_size) 127 { 128 unsigned long flags; 129 130 spin_lock_irqsave(&shared_memory_lock, flags); 131 132 if (WARN_ON_ONCE(shared_memory_amount < alloc_size)) { 133 spin_unlock_irqrestore(&shared_memory_lock, flags); 134 DMCRIT("Memory usage accounting bug."); 135 return; 136 } 137 138 shared_memory_amount -= alloc_size; 139 140 spin_unlock_irqrestore(&shared_memory_lock, flags); 141 } 142 143 static void *dm_kvzalloc(size_t alloc_size, int node) 144 { 145 void *p; 146 147 if (!claim_shared_memory(alloc_size)) 148 return NULL; 149 150 p = kvzalloc_node(alloc_size, GFP_KERNEL | __GFP_NOMEMALLOC, node); 151 if (p) 152 return p; 153 154 free_shared_memory(alloc_size); 155 156 return NULL; 157 } 158 159 static void dm_kvfree(void *ptr, size_t alloc_size) 160 { 161 if (!ptr) 162 return; 163 164 free_shared_memory(alloc_size); 165 166 kvfree(ptr); 167 } 168 169 static void dm_stat_free(struct rcu_head *head) 170 { 171 int cpu; 172 struct dm_stat *s = container_of(head, struct dm_stat, rcu_head); 173 174 kfree(s->histogram_boundaries); 175 kfree(s->program_id); 176 kfree(s->aux_data); 177 for_each_possible_cpu(cpu) { 178 dm_kvfree(s->stat_percpu[cpu][0].histogram, s->histogram_alloc_size); 179 dm_kvfree(s->stat_percpu[cpu], s->percpu_alloc_size); 180 } 181 dm_kvfree(s->stat_shared[0].tmp.histogram, s->histogram_alloc_size); 182 dm_kvfree(s, s->shared_alloc_size); 183 } 184 185 static int dm_stat_in_flight(struct dm_stat_shared *shared) 186 { 187 return atomic_read(&shared->in_flight[READ]) + 188 atomic_read(&shared->in_flight[WRITE]); 189 } 190 191 void dm_stats_init(struct dm_stats *stats) 192 { 193 int cpu; 194 struct dm_stats_last_position *last; 195 196 mutex_init(&stats->mutex); 197 INIT_LIST_HEAD(&stats->list); 198 stats->last = alloc_percpu(struct dm_stats_last_position); 199 for_each_possible_cpu(cpu) { 200 last = per_cpu_ptr(stats->last, cpu); 201 last->last_sector = (sector_t)ULLONG_MAX; 202 last->last_rw = UINT_MAX; 203 } 204 } 205 206 void dm_stats_cleanup(struct dm_stats *stats) 207 { 208 size_t ni; 209 struct dm_stat *s; 210 struct dm_stat_shared *shared; 211 212 while (!list_empty(&stats->list)) { 213 s = container_of(stats->list.next, struct dm_stat, list_entry); 214 list_del(&s->list_entry); 215 for (ni = 0; ni < s->n_entries; ni++) { 216 shared = &s->stat_shared[ni]; 217 if (WARN_ON(dm_stat_in_flight(shared))) { 218 DMCRIT("leaked in-flight counter at index %lu " 219 "(start %llu, end %llu, step %llu): reads %d, writes %d", 220 (unsigned long)ni, 221 (unsigned long long)s->start, 222 (unsigned long long)s->end, 223 (unsigned long long)s->step, 224 atomic_read(&shared->in_flight[READ]), 225 atomic_read(&shared->in_flight[WRITE])); 226 } 227 } 228 dm_stat_free(&s->rcu_head); 229 } 230 free_percpu(stats->last); 231 mutex_destroy(&stats->mutex); 232 } 233 234 static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end, 235 sector_t step, unsigned stat_flags, 236 unsigned n_histogram_entries, 237 unsigned long long *histogram_boundaries, 238 const char *program_id, const char *aux_data, 239 void (*suspend_callback)(struct mapped_device *), 240 void (*resume_callback)(struct mapped_device *), 241 struct mapped_device *md) 242 { 243 struct list_head *l; 244 struct dm_stat *s, *tmp_s; 245 sector_t n_entries; 246 size_t ni; 247 size_t shared_alloc_size; 248 size_t percpu_alloc_size; 249 size_t histogram_alloc_size; 250 struct dm_stat_percpu *p; 251 int cpu; 252 int ret_id; 253 int r; 254 255 if (end < start || !step) 256 return -EINVAL; 257 258 n_entries = end - start; 259 if (dm_sector_div64(n_entries, step)) 260 n_entries++; 261 262 if (n_entries != (size_t)n_entries || !(size_t)(n_entries + 1)) 263 return -EOVERFLOW; 264 265 shared_alloc_size = struct_size(s, stat_shared, n_entries); 266 if ((shared_alloc_size - sizeof(struct dm_stat)) / sizeof(struct dm_stat_shared) != n_entries) 267 return -EOVERFLOW; 268 269 percpu_alloc_size = (size_t)n_entries * sizeof(struct dm_stat_percpu); 270 if (percpu_alloc_size / sizeof(struct dm_stat_percpu) != n_entries) 271 return -EOVERFLOW; 272 273 histogram_alloc_size = (n_histogram_entries + 1) * (size_t)n_entries * sizeof(unsigned long long); 274 if (histogram_alloc_size / (n_histogram_entries + 1) != (size_t)n_entries * sizeof(unsigned long long)) 275 return -EOVERFLOW; 276 277 if (!check_shared_memory(shared_alloc_size + histogram_alloc_size + 278 num_possible_cpus() * (percpu_alloc_size + histogram_alloc_size))) 279 return -ENOMEM; 280 281 s = dm_kvzalloc(shared_alloc_size, NUMA_NO_NODE); 282 if (!s) 283 return -ENOMEM; 284 285 s->stat_flags = stat_flags; 286 s->n_entries = n_entries; 287 s->start = start; 288 s->end = end; 289 s->step = step; 290 s->shared_alloc_size = shared_alloc_size; 291 s->percpu_alloc_size = percpu_alloc_size; 292 s->histogram_alloc_size = histogram_alloc_size; 293 294 s->n_histogram_entries = n_histogram_entries; 295 s->histogram_boundaries = kmemdup(histogram_boundaries, 296 s->n_histogram_entries * sizeof(unsigned long long), GFP_KERNEL); 297 if (!s->histogram_boundaries) { 298 r = -ENOMEM; 299 goto out; 300 } 301 302 s->program_id = kstrdup(program_id, GFP_KERNEL); 303 if (!s->program_id) { 304 r = -ENOMEM; 305 goto out; 306 } 307 s->aux_data = kstrdup(aux_data, GFP_KERNEL); 308 if (!s->aux_data) { 309 r = -ENOMEM; 310 goto out; 311 } 312 313 for (ni = 0; ni < n_entries; ni++) { 314 atomic_set(&s->stat_shared[ni].in_flight[READ], 0); 315 atomic_set(&s->stat_shared[ni].in_flight[WRITE], 0); 316 } 317 318 if (s->n_histogram_entries) { 319 unsigned long long *hi; 320 hi = dm_kvzalloc(s->histogram_alloc_size, NUMA_NO_NODE); 321 if (!hi) { 322 r = -ENOMEM; 323 goto out; 324 } 325 for (ni = 0; ni < n_entries; ni++) { 326 s->stat_shared[ni].tmp.histogram = hi; 327 hi += s->n_histogram_entries + 1; 328 } 329 } 330 331 for_each_possible_cpu(cpu) { 332 p = dm_kvzalloc(percpu_alloc_size, cpu_to_node(cpu)); 333 if (!p) { 334 r = -ENOMEM; 335 goto out; 336 } 337 s->stat_percpu[cpu] = p; 338 if (s->n_histogram_entries) { 339 unsigned long long *hi; 340 hi = dm_kvzalloc(s->histogram_alloc_size, cpu_to_node(cpu)); 341 if (!hi) { 342 r = -ENOMEM; 343 goto out; 344 } 345 for (ni = 0; ni < n_entries; ni++) { 346 p[ni].histogram = hi; 347 hi += s->n_histogram_entries + 1; 348 } 349 } 350 } 351 352 /* 353 * Suspend/resume to make sure there is no i/o in flight, 354 * so that newly created statistics will be exact. 355 * 356 * (note: we couldn't suspend earlier because we must not 357 * allocate memory while suspended) 358 */ 359 suspend_callback(md); 360 361 mutex_lock(&stats->mutex); 362 s->id = 0; 363 list_for_each(l, &stats->list) { 364 tmp_s = container_of(l, struct dm_stat, list_entry); 365 if (WARN_ON(tmp_s->id < s->id)) { 366 r = -EINVAL; 367 goto out_unlock_resume; 368 } 369 if (tmp_s->id > s->id) 370 break; 371 if (unlikely(s->id == INT_MAX)) { 372 r = -ENFILE; 373 goto out_unlock_resume; 374 } 375 s->id++; 376 } 377 ret_id = s->id; 378 list_add_tail_rcu(&s->list_entry, l); 379 mutex_unlock(&stats->mutex); 380 381 resume_callback(md); 382 383 return ret_id; 384 385 out_unlock_resume: 386 mutex_unlock(&stats->mutex); 387 resume_callback(md); 388 out: 389 dm_stat_free(&s->rcu_head); 390 return r; 391 } 392 393 static struct dm_stat *__dm_stats_find(struct dm_stats *stats, int id) 394 { 395 struct dm_stat *s; 396 397 list_for_each_entry(s, &stats->list, list_entry) { 398 if (s->id > id) 399 break; 400 if (s->id == id) 401 return s; 402 } 403 404 return NULL; 405 } 406 407 static int dm_stats_delete(struct dm_stats *stats, int id) 408 { 409 struct dm_stat *s; 410 int cpu; 411 412 mutex_lock(&stats->mutex); 413 414 s = __dm_stats_find(stats, id); 415 if (!s) { 416 mutex_unlock(&stats->mutex); 417 return -ENOENT; 418 } 419 420 list_del_rcu(&s->list_entry); 421 mutex_unlock(&stats->mutex); 422 423 /* 424 * vfree can't be called from RCU callback 425 */ 426 for_each_possible_cpu(cpu) 427 if (is_vmalloc_addr(s->stat_percpu) || 428 is_vmalloc_addr(s->stat_percpu[cpu][0].histogram)) 429 goto do_sync_free; 430 if (is_vmalloc_addr(s) || 431 is_vmalloc_addr(s->stat_shared[0].tmp.histogram)) { 432 do_sync_free: 433 synchronize_rcu_expedited(); 434 dm_stat_free(&s->rcu_head); 435 } else { 436 WRITE_ONCE(dm_stat_need_rcu_barrier, 1); 437 call_rcu(&s->rcu_head, dm_stat_free); 438 } 439 return 0; 440 } 441 442 static int dm_stats_list(struct dm_stats *stats, const char *program, 443 char *result, unsigned maxlen) 444 { 445 struct dm_stat *s; 446 sector_t len; 447 unsigned sz = 0; 448 449 /* 450 * Output format: 451 * <region_id>: <start_sector>+<length> <step> <program_id> <aux_data> 452 */ 453 454 mutex_lock(&stats->mutex); 455 list_for_each_entry(s, &stats->list, list_entry) { 456 if (!program || !strcmp(program, s->program_id)) { 457 len = s->end - s->start; 458 DMEMIT("%d: %llu+%llu %llu %s %s", s->id, 459 (unsigned long long)s->start, 460 (unsigned long long)len, 461 (unsigned long long)s->step, 462 s->program_id, 463 s->aux_data); 464 if (s->stat_flags & STAT_PRECISE_TIMESTAMPS) 465 DMEMIT(" precise_timestamps"); 466 if (s->n_histogram_entries) { 467 unsigned i; 468 DMEMIT(" histogram:"); 469 for (i = 0; i < s->n_histogram_entries; i++) { 470 if (i) 471 DMEMIT(","); 472 DMEMIT("%llu", s->histogram_boundaries[i]); 473 } 474 } 475 DMEMIT("\n"); 476 } 477 } 478 mutex_unlock(&stats->mutex); 479 480 return 1; 481 } 482 483 static void dm_stat_round(struct dm_stat *s, struct dm_stat_shared *shared, 484 struct dm_stat_percpu *p) 485 { 486 /* 487 * This is racy, but so is part_round_stats_single. 488 */ 489 unsigned long long now, difference; 490 unsigned in_flight_read, in_flight_write; 491 492 if (likely(!(s->stat_flags & STAT_PRECISE_TIMESTAMPS))) 493 now = jiffies; 494 else 495 now = ktime_to_ns(ktime_get()); 496 497 difference = now - shared->stamp; 498 if (!difference) 499 return; 500 501 in_flight_read = (unsigned)atomic_read(&shared->in_flight[READ]); 502 in_flight_write = (unsigned)atomic_read(&shared->in_flight[WRITE]); 503 if (in_flight_read) 504 p->io_ticks[READ] += difference; 505 if (in_flight_write) 506 p->io_ticks[WRITE] += difference; 507 if (in_flight_read + in_flight_write) { 508 p->io_ticks_total += difference; 509 p->time_in_queue += (in_flight_read + in_flight_write) * difference; 510 } 511 shared->stamp = now; 512 } 513 514 static void dm_stat_for_entry(struct dm_stat *s, size_t entry, 515 int idx, sector_t len, 516 struct dm_stats_aux *stats_aux, bool end, 517 unsigned long duration_jiffies) 518 { 519 struct dm_stat_shared *shared = &s->stat_shared[entry]; 520 struct dm_stat_percpu *p; 521 522 /* 523 * For strict correctness we should use local_irq_save/restore 524 * instead of preempt_disable/enable. 525 * 526 * preempt_disable/enable is racy if the driver finishes bios 527 * from non-interrupt context as well as from interrupt context 528 * or from more different interrupts. 529 * 530 * On 64-bit architectures the race only results in not counting some 531 * events, so it is acceptable. On 32-bit architectures the race could 532 * cause the counter going off by 2^32, so we need to do proper locking 533 * there. 534 * 535 * part_stat_lock()/part_stat_unlock() have this race too. 536 */ 537 #if BITS_PER_LONG == 32 538 unsigned long flags; 539 local_irq_save(flags); 540 #else 541 preempt_disable(); 542 #endif 543 p = &s->stat_percpu[smp_processor_id()][entry]; 544 545 if (!end) { 546 dm_stat_round(s, shared, p); 547 atomic_inc(&shared->in_flight[idx]); 548 } else { 549 unsigned long long duration; 550 dm_stat_round(s, shared, p); 551 atomic_dec(&shared->in_flight[idx]); 552 p->sectors[idx] += len; 553 p->ios[idx] += 1; 554 p->merges[idx] += stats_aux->merged; 555 if (!(s->stat_flags & STAT_PRECISE_TIMESTAMPS)) { 556 p->ticks[idx] += duration_jiffies; 557 duration = jiffies_to_msecs(duration_jiffies); 558 } else { 559 p->ticks[idx] += stats_aux->duration_ns; 560 duration = stats_aux->duration_ns; 561 } 562 if (s->n_histogram_entries) { 563 unsigned lo = 0, hi = s->n_histogram_entries + 1; 564 while (lo + 1 < hi) { 565 unsigned mid = (lo + hi) / 2; 566 if (s->histogram_boundaries[mid - 1] > duration) { 567 hi = mid; 568 } else { 569 lo = mid; 570 } 571 572 } 573 p->histogram[lo]++; 574 } 575 } 576 577 #if BITS_PER_LONG == 32 578 local_irq_restore(flags); 579 #else 580 preempt_enable(); 581 #endif 582 } 583 584 static void __dm_stat_bio(struct dm_stat *s, int bi_rw, 585 sector_t bi_sector, sector_t end_sector, 586 bool end, unsigned long duration_jiffies, 587 struct dm_stats_aux *stats_aux) 588 { 589 sector_t rel_sector, offset, todo, fragment_len; 590 size_t entry; 591 592 if (end_sector <= s->start || bi_sector >= s->end) 593 return; 594 if (unlikely(bi_sector < s->start)) { 595 rel_sector = 0; 596 todo = end_sector - s->start; 597 } else { 598 rel_sector = bi_sector - s->start; 599 todo = end_sector - bi_sector; 600 } 601 if (unlikely(end_sector > s->end)) 602 todo -= (end_sector - s->end); 603 604 offset = dm_sector_div64(rel_sector, s->step); 605 entry = rel_sector; 606 do { 607 if (WARN_ON_ONCE(entry >= s->n_entries)) { 608 DMCRIT("Invalid area access in region id %d", s->id); 609 return; 610 } 611 fragment_len = todo; 612 if (fragment_len > s->step - offset) 613 fragment_len = s->step - offset; 614 dm_stat_for_entry(s, entry, bi_rw, fragment_len, 615 stats_aux, end, duration_jiffies); 616 todo -= fragment_len; 617 entry++; 618 offset = 0; 619 } while (unlikely(todo != 0)); 620 } 621 622 void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw, 623 sector_t bi_sector, unsigned bi_sectors, bool end, 624 unsigned long duration_jiffies, 625 struct dm_stats_aux *stats_aux) 626 { 627 struct dm_stat *s; 628 sector_t end_sector; 629 struct dm_stats_last_position *last; 630 bool got_precise_time; 631 632 if (unlikely(!bi_sectors)) 633 return; 634 635 end_sector = bi_sector + bi_sectors; 636 637 if (!end) { 638 /* 639 * A race condition can at worst result in the merged flag being 640 * misrepresented, so we don't have to disable preemption here. 641 */ 642 last = raw_cpu_ptr(stats->last); 643 stats_aux->merged = 644 (bi_sector == (READ_ONCE(last->last_sector) && 645 ((bi_rw == WRITE) == 646 (READ_ONCE(last->last_rw) == WRITE)) 647 )); 648 WRITE_ONCE(last->last_sector, end_sector); 649 WRITE_ONCE(last->last_rw, bi_rw); 650 } 651 652 rcu_read_lock(); 653 654 got_precise_time = false; 655 list_for_each_entry_rcu(s, &stats->list, list_entry) { 656 if (s->stat_flags & STAT_PRECISE_TIMESTAMPS && !got_precise_time) { 657 if (!end) 658 stats_aux->duration_ns = ktime_to_ns(ktime_get()); 659 else 660 stats_aux->duration_ns = ktime_to_ns(ktime_get()) - stats_aux->duration_ns; 661 got_precise_time = true; 662 } 663 __dm_stat_bio(s, bi_rw, bi_sector, end_sector, end, duration_jiffies, stats_aux); 664 } 665 666 rcu_read_unlock(); 667 } 668 669 static void __dm_stat_init_temporary_percpu_totals(struct dm_stat_shared *shared, 670 struct dm_stat *s, size_t x) 671 { 672 int cpu; 673 struct dm_stat_percpu *p; 674 675 local_irq_disable(); 676 p = &s->stat_percpu[smp_processor_id()][x]; 677 dm_stat_round(s, shared, p); 678 local_irq_enable(); 679 680 shared->tmp.sectors[READ] = 0; 681 shared->tmp.sectors[WRITE] = 0; 682 shared->tmp.ios[READ] = 0; 683 shared->tmp.ios[WRITE] = 0; 684 shared->tmp.merges[READ] = 0; 685 shared->tmp.merges[WRITE] = 0; 686 shared->tmp.ticks[READ] = 0; 687 shared->tmp.ticks[WRITE] = 0; 688 shared->tmp.io_ticks[READ] = 0; 689 shared->tmp.io_ticks[WRITE] = 0; 690 shared->tmp.io_ticks_total = 0; 691 shared->tmp.time_in_queue = 0; 692 693 if (s->n_histogram_entries) 694 memset(shared->tmp.histogram, 0, (s->n_histogram_entries + 1) * sizeof(unsigned long long)); 695 696 for_each_possible_cpu(cpu) { 697 p = &s->stat_percpu[cpu][x]; 698 shared->tmp.sectors[READ] += READ_ONCE(p->sectors[READ]); 699 shared->tmp.sectors[WRITE] += READ_ONCE(p->sectors[WRITE]); 700 shared->tmp.ios[READ] += READ_ONCE(p->ios[READ]); 701 shared->tmp.ios[WRITE] += READ_ONCE(p->ios[WRITE]); 702 shared->tmp.merges[READ] += READ_ONCE(p->merges[READ]); 703 shared->tmp.merges[WRITE] += READ_ONCE(p->merges[WRITE]); 704 shared->tmp.ticks[READ] += READ_ONCE(p->ticks[READ]); 705 shared->tmp.ticks[WRITE] += READ_ONCE(p->ticks[WRITE]); 706 shared->tmp.io_ticks[READ] += READ_ONCE(p->io_ticks[READ]); 707 shared->tmp.io_ticks[WRITE] += READ_ONCE(p->io_ticks[WRITE]); 708 shared->tmp.io_ticks_total += READ_ONCE(p->io_ticks_total); 709 shared->tmp.time_in_queue += READ_ONCE(p->time_in_queue); 710 if (s->n_histogram_entries) { 711 unsigned i; 712 for (i = 0; i < s->n_histogram_entries + 1; i++) 713 shared->tmp.histogram[i] += READ_ONCE(p->histogram[i]); 714 } 715 } 716 } 717 718 static void __dm_stat_clear(struct dm_stat *s, size_t idx_start, size_t idx_end, 719 bool init_tmp_percpu_totals) 720 { 721 size_t x; 722 struct dm_stat_shared *shared; 723 struct dm_stat_percpu *p; 724 725 for (x = idx_start; x < idx_end; x++) { 726 shared = &s->stat_shared[x]; 727 if (init_tmp_percpu_totals) 728 __dm_stat_init_temporary_percpu_totals(shared, s, x); 729 local_irq_disable(); 730 p = &s->stat_percpu[smp_processor_id()][x]; 731 p->sectors[READ] -= shared->tmp.sectors[READ]; 732 p->sectors[WRITE] -= shared->tmp.sectors[WRITE]; 733 p->ios[READ] -= shared->tmp.ios[READ]; 734 p->ios[WRITE] -= shared->tmp.ios[WRITE]; 735 p->merges[READ] -= shared->tmp.merges[READ]; 736 p->merges[WRITE] -= shared->tmp.merges[WRITE]; 737 p->ticks[READ] -= shared->tmp.ticks[READ]; 738 p->ticks[WRITE] -= shared->tmp.ticks[WRITE]; 739 p->io_ticks[READ] -= shared->tmp.io_ticks[READ]; 740 p->io_ticks[WRITE] -= shared->tmp.io_ticks[WRITE]; 741 p->io_ticks_total -= shared->tmp.io_ticks_total; 742 p->time_in_queue -= shared->tmp.time_in_queue; 743 local_irq_enable(); 744 if (s->n_histogram_entries) { 745 unsigned i; 746 for (i = 0; i < s->n_histogram_entries + 1; i++) { 747 local_irq_disable(); 748 p = &s->stat_percpu[smp_processor_id()][x]; 749 p->histogram[i] -= shared->tmp.histogram[i]; 750 local_irq_enable(); 751 } 752 } 753 } 754 } 755 756 static int dm_stats_clear(struct dm_stats *stats, int id) 757 { 758 struct dm_stat *s; 759 760 mutex_lock(&stats->mutex); 761 762 s = __dm_stats_find(stats, id); 763 if (!s) { 764 mutex_unlock(&stats->mutex); 765 return -ENOENT; 766 } 767 768 __dm_stat_clear(s, 0, s->n_entries, true); 769 770 mutex_unlock(&stats->mutex); 771 772 return 1; 773 } 774 775 /* 776 * This is like jiffies_to_msec, but works for 64-bit values. 777 */ 778 static unsigned long long dm_jiffies_to_msec64(struct dm_stat *s, unsigned long long j) 779 { 780 unsigned long long result; 781 unsigned mult; 782 783 if (s->stat_flags & STAT_PRECISE_TIMESTAMPS) 784 return j; 785 786 result = 0; 787 if (j) 788 result = jiffies_to_msecs(j & 0x3fffff); 789 if (j >= 1 << 22) { 790 mult = jiffies_to_msecs(1 << 22); 791 result += (unsigned long long)mult * (unsigned long long)jiffies_to_msecs((j >> 22) & 0x3fffff); 792 } 793 if (j >= 1ULL << 44) 794 result += (unsigned long long)mult * (unsigned long long)mult * (unsigned long long)jiffies_to_msecs(j >> 44); 795 796 return result; 797 } 798 799 static int dm_stats_print(struct dm_stats *stats, int id, 800 size_t idx_start, size_t idx_len, 801 bool clear, char *result, unsigned maxlen) 802 { 803 unsigned sz = 0; 804 struct dm_stat *s; 805 size_t x; 806 sector_t start, end, step; 807 size_t idx_end; 808 struct dm_stat_shared *shared; 809 810 /* 811 * Output format: 812 * <start_sector>+<length> counters 813 */ 814 815 mutex_lock(&stats->mutex); 816 817 s = __dm_stats_find(stats, id); 818 if (!s) { 819 mutex_unlock(&stats->mutex); 820 return -ENOENT; 821 } 822 823 idx_end = idx_start + idx_len; 824 if (idx_end < idx_start || 825 idx_end > s->n_entries) 826 idx_end = s->n_entries; 827 828 if (idx_start > idx_end) 829 idx_start = idx_end; 830 831 step = s->step; 832 start = s->start + (step * idx_start); 833 834 for (x = idx_start; x < idx_end; x++, start = end) { 835 shared = &s->stat_shared[x]; 836 end = start + step; 837 if (unlikely(end > s->end)) 838 end = s->end; 839 840 __dm_stat_init_temporary_percpu_totals(shared, s, x); 841 842 DMEMIT("%llu+%llu %llu %llu %llu %llu %llu %llu %llu %llu %d %llu %llu %llu %llu", 843 (unsigned long long)start, 844 (unsigned long long)step, 845 shared->tmp.ios[READ], 846 shared->tmp.merges[READ], 847 shared->tmp.sectors[READ], 848 dm_jiffies_to_msec64(s, shared->tmp.ticks[READ]), 849 shared->tmp.ios[WRITE], 850 shared->tmp.merges[WRITE], 851 shared->tmp.sectors[WRITE], 852 dm_jiffies_to_msec64(s, shared->tmp.ticks[WRITE]), 853 dm_stat_in_flight(shared), 854 dm_jiffies_to_msec64(s, shared->tmp.io_ticks_total), 855 dm_jiffies_to_msec64(s, shared->tmp.time_in_queue), 856 dm_jiffies_to_msec64(s, shared->tmp.io_ticks[READ]), 857 dm_jiffies_to_msec64(s, shared->tmp.io_ticks[WRITE])); 858 if (s->n_histogram_entries) { 859 unsigned i; 860 for (i = 0; i < s->n_histogram_entries + 1; i++) { 861 DMEMIT("%s%llu", !i ? " " : ":", shared->tmp.histogram[i]); 862 } 863 } 864 DMEMIT("\n"); 865 866 if (unlikely(sz + 1 >= maxlen)) 867 goto buffer_overflow; 868 } 869 870 if (clear) 871 __dm_stat_clear(s, idx_start, idx_end, false); 872 873 buffer_overflow: 874 mutex_unlock(&stats->mutex); 875 876 return 1; 877 } 878 879 static int dm_stats_set_aux(struct dm_stats *stats, int id, const char *aux_data) 880 { 881 struct dm_stat *s; 882 const char *new_aux_data; 883 884 mutex_lock(&stats->mutex); 885 886 s = __dm_stats_find(stats, id); 887 if (!s) { 888 mutex_unlock(&stats->mutex); 889 return -ENOENT; 890 } 891 892 new_aux_data = kstrdup(aux_data, GFP_KERNEL); 893 if (!new_aux_data) { 894 mutex_unlock(&stats->mutex); 895 return -ENOMEM; 896 } 897 898 kfree(s->aux_data); 899 s->aux_data = new_aux_data; 900 901 mutex_unlock(&stats->mutex); 902 903 return 0; 904 } 905 906 static int parse_histogram(const char *h, unsigned *n_histogram_entries, 907 unsigned long long **histogram_boundaries) 908 { 909 const char *q; 910 unsigned n; 911 unsigned long long last; 912 913 *n_histogram_entries = 1; 914 for (q = h; *q; q++) 915 if (*q == ',') 916 (*n_histogram_entries)++; 917 918 *histogram_boundaries = kmalloc_array(*n_histogram_entries, 919 sizeof(unsigned long long), 920 GFP_KERNEL); 921 if (!*histogram_boundaries) 922 return -ENOMEM; 923 924 n = 0; 925 last = 0; 926 while (1) { 927 unsigned long long hi; 928 int s; 929 char ch; 930 s = sscanf(h, "%llu%c", &hi, &ch); 931 if (!s || (s == 2 && ch != ',')) 932 return -EINVAL; 933 if (hi <= last) 934 return -EINVAL; 935 last = hi; 936 (*histogram_boundaries)[n] = hi; 937 if (s == 1) 938 return 0; 939 h = strchr(h, ',') + 1; 940 n++; 941 } 942 } 943 944 static int message_stats_create(struct mapped_device *md, 945 unsigned argc, char **argv, 946 char *result, unsigned maxlen) 947 { 948 int r; 949 int id; 950 char dummy; 951 unsigned long long start, end, len, step; 952 unsigned divisor; 953 const char *program_id, *aux_data; 954 unsigned stat_flags = 0; 955 956 unsigned n_histogram_entries = 0; 957 unsigned long long *histogram_boundaries = NULL; 958 959 struct dm_arg_set as, as_backup; 960 const char *a; 961 unsigned feature_args; 962 963 /* 964 * Input format: 965 * <range> <step> [<extra_parameters> <parameters>] [<program_id> [<aux_data>]] 966 */ 967 968 if (argc < 3) 969 goto ret_einval; 970 971 as.argc = argc; 972 as.argv = argv; 973 dm_consume_args(&as, 1); 974 975 a = dm_shift_arg(&as); 976 if (!strcmp(a, "-")) { 977 start = 0; 978 len = dm_get_size(md); 979 if (!len) 980 len = 1; 981 } else if (sscanf(a, "%llu+%llu%c", &start, &len, &dummy) != 2 || 982 start != (sector_t)start || len != (sector_t)len) 983 goto ret_einval; 984 985 end = start + len; 986 if (start >= end) 987 goto ret_einval; 988 989 a = dm_shift_arg(&as); 990 if (sscanf(a, "/%u%c", &divisor, &dummy) == 1) { 991 if (!divisor) 992 return -EINVAL; 993 step = end - start; 994 if (do_div(step, divisor)) 995 step++; 996 if (!step) 997 step = 1; 998 } else if (sscanf(a, "%llu%c", &step, &dummy) != 1 || 999 step != (sector_t)step || !step) 1000 goto ret_einval; 1001 1002 as_backup = as; 1003 a = dm_shift_arg(&as); 1004 if (a && sscanf(a, "%u%c", &feature_args, &dummy) == 1) { 1005 while (feature_args--) { 1006 a = dm_shift_arg(&as); 1007 if (!a) 1008 goto ret_einval; 1009 if (!strcasecmp(a, "precise_timestamps")) 1010 stat_flags |= STAT_PRECISE_TIMESTAMPS; 1011 else if (!strncasecmp(a, "histogram:", 10)) { 1012 if (n_histogram_entries) 1013 goto ret_einval; 1014 if ((r = parse_histogram(a + 10, &n_histogram_entries, &histogram_boundaries))) 1015 goto ret; 1016 } else 1017 goto ret_einval; 1018 } 1019 } else { 1020 as = as_backup; 1021 } 1022 1023 program_id = "-"; 1024 aux_data = "-"; 1025 1026 a = dm_shift_arg(&as); 1027 if (a) 1028 program_id = a; 1029 1030 a = dm_shift_arg(&as); 1031 if (a) 1032 aux_data = a; 1033 1034 if (as.argc) 1035 goto ret_einval; 1036 1037 /* 1038 * If a buffer overflow happens after we created the region, 1039 * it's too late (the userspace would retry with a larger 1040 * buffer, but the region id that caused the overflow is already 1041 * leaked). So we must detect buffer overflow in advance. 1042 */ 1043 snprintf(result, maxlen, "%d", INT_MAX); 1044 if (dm_message_test_buffer_overflow(result, maxlen)) { 1045 r = 1; 1046 goto ret; 1047 } 1048 1049 id = dm_stats_create(dm_get_stats(md), start, end, step, stat_flags, 1050 n_histogram_entries, histogram_boundaries, program_id, aux_data, 1051 dm_internal_suspend_fast, dm_internal_resume_fast, md); 1052 if (id < 0) { 1053 r = id; 1054 goto ret; 1055 } 1056 1057 snprintf(result, maxlen, "%d", id); 1058 1059 r = 1; 1060 goto ret; 1061 1062 ret_einval: 1063 r = -EINVAL; 1064 ret: 1065 kfree(histogram_boundaries); 1066 return r; 1067 } 1068 1069 static int message_stats_delete(struct mapped_device *md, 1070 unsigned argc, char **argv) 1071 { 1072 int id; 1073 char dummy; 1074 1075 if (argc != 2) 1076 return -EINVAL; 1077 1078 if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0) 1079 return -EINVAL; 1080 1081 return dm_stats_delete(dm_get_stats(md), id); 1082 } 1083 1084 static int message_stats_clear(struct mapped_device *md, 1085 unsigned argc, char **argv) 1086 { 1087 int id; 1088 char dummy; 1089 1090 if (argc != 2) 1091 return -EINVAL; 1092 1093 if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0) 1094 return -EINVAL; 1095 1096 return dm_stats_clear(dm_get_stats(md), id); 1097 } 1098 1099 static int message_stats_list(struct mapped_device *md, 1100 unsigned argc, char **argv, 1101 char *result, unsigned maxlen) 1102 { 1103 int r; 1104 const char *program = NULL; 1105 1106 if (argc < 1 || argc > 2) 1107 return -EINVAL; 1108 1109 if (argc > 1) { 1110 program = kstrdup(argv[1], GFP_KERNEL); 1111 if (!program) 1112 return -ENOMEM; 1113 } 1114 1115 r = dm_stats_list(dm_get_stats(md), program, result, maxlen); 1116 1117 kfree(program); 1118 1119 return r; 1120 } 1121 1122 static int message_stats_print(struct mapped_device *md, 1123 unsigned argc, char **argv, bool clear, 1124 char *result, unsigned maxlen) 1125 { 1126 int id; 1127 char dummy; 1128 unsigned long idx_start = 0, idx_len = ULONG_MAX; 1129 1130 if (argc != 2 && argc != 4) 1131 return -EINVAL; 1132 1133 if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0) 1134 return -EINVAL; 1135 1136 if (argc > 3) { 1137 if (strcmp(argv[2], "-") && 1138 sscanf(argv[2], "%lu%c", &idx_start, &dummy) != 1) 1139 return -EINVAL; 1140 if (strcmp(argv[3], "-") && 1141 sscanf(argv[3], "%lu%c", &idx_len, &dummy) != 1) 1142 return -EINVAL; 1143 } 1144 1145 return dm_stats_print(dm_get_stats(md), id, idx_start, idx_len, clear, 1146 result, maxlen); 1147 } 1148 1149 static int message_stats_set_aux(struct mapped_device *md, 1150 unsigned argc, char **argv) 1151 { 1152 int id; 1153 char dummy; 1154 1155 if (argc != 3) 1156 return -EINVAL; 1157 1158 if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0) 1159 return -EINVAL; 1160 1161 return dm_stats_set_aux(dm_get_stats(md), id, argv[2]); 1162 } 1163 1164 int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv, 1165 char *result, unsigned maxlen) 1166 { 1167 int r; 1168 1169 /* All messages here must start with '@' */ 1170 if (!strcasecmp(argv[0], "@stats_create")) 1171 r = message_stats_create(md, argc, argv, result, maxlen); 1172 else if (!strcasecmp(argv[0], "@stats_delete")) 1173 r = message_stats_delete(md, argc, argv); 1174 else if (!strcasecmp(argv[0], "@stats_clear")) 1175 r = message_stats_clear(md, argc, argv); 1176 else if (!strcasecmp(argv[0], "@stats_list")) 1177 r = message_stats_list(md, argc, argv, result, maxlen); 1178 else if (!strcasecmp(argv[0], "@stats_print")) 1179 r = message_stats_print(md, argc, argv, false, result, maxlen); 1180 else if (!strcasecmp(argv[0], "@stats_print_clear")) 1181 r = message_stats_print(md, argc, argv, true, result, maxlen); 1182 else if (!strcasecmp(argv[0], "@stats_set_aux")) 1183 r = message_stats_set_aux(md, argc, argv); 1184 else 1185 return 2; /* this wasn't a stats message */ 1186 1187 if (r == -EINVAL) 1188 DMWARN("Invalid parameters for message %s", argv[0]); 1189 1190 return r; 1191 } 1192 1193 int __init dm_statistics_init(void) 1194 { 1195 shared_memory_amount = 0; 1196 dm_stat_need_rcu_barrier = 0; 1197 return 0; 1198 } 1199 1200 void dm_statistics_exit(void) 1201 { 1202 if (dm_stat_need_rcu_barrier) 1203 rcu_barrier(); 1204 if (WARN_ON(shared_memory_amount)) 1205 DMCRIT("shared_memory_amount leaked: %lu", shared_memory_amount); 1206 } 1207 1208 module_param_named(stats_current_allocated_bytes, shared_memory_amount, ulong, S_IRUGO); 1209 MODULE_PARM_DESC(stats_current_allocated_bytes, "Memory currently used by statistics"); 1210