1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * 4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved. 5 * 6 * This code builds two trees of free clusters extents. 7 * Trees are sorted by start of extent and by length of extent. 8 * NTFS_MAX_WND_EXTENTS defines the maximum number of elements in trees. 9 * In extreme case code reads on-disk bitmap to find free clusters. 10 * 11 */ 12 13 #include <linux/buffer_head.h> 14 #include <linux/fs.h> 15 #include <linux/kernel.h> 16 17 #include "ntfs.h" 18 #include "ntfs_fs.h" 19 20 /* 21 * Maximum number of extents in tree. 22 */ 23 #define NTFS_MAX_WND_EXTENTS (32u * 1024u) 24 25 struct rb_node_key { 26 struct rb_node node; 27 size_t key; 28 }; 29 30 struct e_node { 31 struct rb_node_key start; /* Tree sorted by start. */ 32 struct rb_node_key count; /* Tree sorted by len. */ 33 }; 34 35 static int wnd_rescan(struct wnd_bitmap *wnd); 36 static struct buffer_head *wnd_map(struct wnd_bitmap *wnd, size_t iw); 37 static bool wnd_is_free_hlp(struct wnd_bitmap *wnd, size_t bit, size_t bits); 38 39 static struct kmem_cache *ntfs_enode_cachep; 40 41 int __init ntfs3_init_bitmap(void) 42 { 43 ntfs_enode_cachep = kmem_cache_create("ntfs3_enode_cache", 44 sizeof(struct e_node), 0, 45 SLAB_RECLAIM_ACCOUNT, NULL); 46 return ntfs_enode_cachep ? 0 : -ENOMEM; 47 } 48 49 void ntfs3_exit_bitmap(void) 50 { 51 kmem_cache_destroy(ntfs_enode_cachep); 52 } 53 54 /* 55 * wnd_scan 56 * 57 * b_pos + b_len - biggest fragment. 58 * Scan range [wpos wbits) window @buf. 59 * 60 * Return: -1 if not found. 61 */ 62 static size_t wnd_scan(const void *buf, size_t wbit, u32 wpos, u32 wend, 63 size_t to_alloc, size_t *prev_tail, size_t *b_pos, 64 size_t *b_len) 65 { 66 while (wpos < wend) { 67 size_t free_len; 68 u32 free_bits, end; 69 u32 used = find_next_zero_bit_le(buf, wend, wpos); 70 71 if (used >= wend) { 72 if (*b_len < *prev_tail) { 73 *b_pos = wbit - *prev_tail; 74 *b_len = *prev_tail; 75 } 76 77 *prev_tail = 0; 78 return -1; 79 } 80 81 if (used > wpos) { 82 wpos = used; 83 if (*b_len < *prev_tail) { 84 *b_pos = wbit - *prev_tail; 85 *b_len = *prev_tail; 86 } 87 88 *prev_tail = 0; 89 } 90 91 /* 92 * Now we have a fragment [wpos, wend) staring with 0. 93 */ 94 end = wpos + to_alloc - *prev_tail; 95 free_bits = find_next_bit_le(buf, min(end, wend), wpos); 96 97 free_len = *prev_tail + free_bits - wpos; 98 99 if (*b_len < free_len) { 100 *b_pos = wbit + wpos - *prev_tail; 101 *b_len = free_len; 102 } 103 104 if (free_len >= to_alloc) 105 return wbit + wpos - *prev_tail; 106 107 if (free_bits >= wend) { 108 *prev_tail += free_bits - wpos; 109 return -1; 110 } 111 112 wpos = free_bits + 1; 113 114 *prev_tail = 0; 115 } 116 117 return -1; 118 } 119 120 /* 121 * wnd_close - Frees all resources. 122 */ 123 void wnd_close(struct wnd_bitmap *wnd) 124 { 125 struct rb_node *node, *next; 126 127 kfree(wnd->free_bits); 128 run_close(&wnd->run); 129 130 node = rb_first(&wnd->start_tree); 131 132 while (node) { 133 next = rb_next(node); 134 rb_erase(node, &wnd->start_tree); 135 kmem_cache_free(ntfs_enode_cachep, 136 rb_entry(node, struct e_node, start.node)); 137 node = next; 138 } 139 } 140 141 static struct rb_node *rb_lookup(struct rb_root *root, size_t v) 142 { 143 struct rb_node **p = &root->rb_node; 144 struct rb_node *r = NULL; 145 146 while (*p) { 147 struct rb_node_key *k; 148 149 k = rb_entry(*p, struct rb_node_key, node); 150 if (v < k->key) { 151 p = &(*p)->rb_left; 152 } else if (v > k->key) { 153 r = &k->node; 154 p = &(*p)->rb_right; 155 } else { 156 return &k->node; 157 } 158 } 159 160 return r; 161 } 162 163 /* 164 * rb_insert_count - Helper function to insert special kind of 'count' tree. 165 */ 166 static inline bool rb_insert_count(struct rb_root *root, struct e_node *e) 167 { 168 struct rb_node **p = &root->rb_node; 169 struct rb_node *parent = NULL; 170 size_t e_ckey = e->count.key; 171 size_t e_skey = e->start.key; 172 173 while (*p) { 174 struct e_node *k = 175 rb_entry(parent = *p, struct e_node, count.node); 176 177 if (e_ckey > k->count.key) { 178 p = &(*p)->rb_left; 179 } else if (e_ckey < k->count.key) { 180 p = &(*p)->rb_right; 181 } else if (e_skey < k->start.key) { 182 p = &(*p)->rb_left; 183 } else if (e_skey > k->start.key) { 184 p = &(*p)->rb_right; 185 } else { 186 WARN_ON(1); 187 return false; 188 } 189 } 190 191 rb_link_node(&e->count.node, parent, p); 192 rb_insert_color(&e->count.node, root); 193 return true; 194 } 195 196 /* 197 * rb_insert_start - Helper function to insert special kind of 'count' tree. 198 */ 199 static inline bool rb_insert_start(struct rb_root *root, struct e_node *e) 200 { 201 struct rb_node **p = &root->rb_node; 202 struct rb_node *parent = NULL; 203 size_t e_skey = e->start.key; 204 205 while (*p) { 206 struct e_node *k; 207 208 parent = *p; 209 210 k = rb_entry(parent, struct e_node, start.node); 211 if (e_skey < k->start.key) { 212 p = &(*p)->rb_left; 213 } else if (e_skey > k->start.key) { 214 p = &(*p)->rb_right; 215 } else { 216 WARN_ON(1); 217 return false; 218 } 219 } 220 221 rb_link_node(&e->start.node, parent, p); 222 rb_insert_color(&e->start.node, root); 223 return true; 224 } 225 226 /* 227 * wnd_add_free_ext - Adds a new extent of free space. 228 * @build: 1 when building tree. 229 */ 230 static void wnd_add_free_ext(struct wnd_bitmap *wnd, size_t bit, size_t len, 231 bool build) 232 { 233 struct e_node *e, *e0 = NULL; 234 size_t ib, end_in = bit + len; 235 struct rb_node *n; 236 237 if (build) { 238 /* Use extent_min to filter too short extents. */ 239 if (wnd->count >= NTFS_MAX_WND_EXTENTS && 240 len <= wnd->extent_min) { 241 wnd->uptodated = -1; 242 return; 243 } 244 } else { 245 /* Try to find extent before 'bit'. */ 246 n = rb_lookup(&wnd->start_tree, bit); 247 248 if (!n) { 249 n = rb_first(&wnd->start_tree); 250 } else { 251 e = rb_entry(n, struct e_node, start.node); 252 n = rb_next(n); 253 if (e->start.key + e->count.key == bit) { 254 /* Remove left. */ 255 bit = e->start.key; 256 len += e->count.key; 257 rb_erase(&e->start.node, &wnd->start_tree); 258 rb_erase(&e->count.node, &wnd->count_tree); 259 wnd->count -= 1; 260 e0 = e; 261 } 262 } 263 264 while (n) { 265 size_t next_end; 266 267 e = rb_entry(n, struct e_node, start.node); 268 next_end = e->start.key + e->count.key; 269 if (e->start.key > end_in) 270 break; 271 272 /* Remove right. */ 273 n = rb_next(n); 274 len += next_end - end_in; 275 end_in = next_end; 276 rb_erase(&e->start.node, &wnd->start_tree); 277 rb_erase(&e->count.node, &wnd->count_tree); 278 wnd->count -= 1; 279 280 if (!e0) 281 e0 = e; 282 else 283 kmem_cache_free(ntfs_enode_cachep, e); 284 } 285 286 if (wnd->uptodated != 1) { 287 /* Check bits before 'bit'. */ 288 ib = wnd->zone_bit == wnd->zone_end || 289 bit < wnd->zone_end ? 290 0 : 291 wnd->zone_end; 292 293 while (bit > ib && wnd_is_free_hlp(wnd, bit - 1, 1)) { 294 bit -= 1; 295 len += 1; 296 } 297 298 /* Check bits after 'end_in'. */ 299 ib = wnd->zone_bit == wnd->zone_end || 300 end_in > wnd->zone_bit ? 301 wnd->nbits : 302 wnd->zone_bit; 303 304 while (end_in < ib && wnd_is_free_hlp(wnd, end_in, 1)) { 305 end_in += 1; 306 len += 1; 307 } 308 } 309 } 310 /* Insert new fragment. */ 311 if (wnd->count >= NTFS_MAX_WND_EXTENTS) { 312 if (e0) 313 kmem_cache_free(ntfs_enode_cachep, e0); 314 315 wnd->uptodated = -1; 316 317 /* Compare with smallest fragment. */ 318 n = rb_last(&wnd->count_tree); 319 e = rb_entry(n, struct e_node, count.node); 320 if (len <= e->count.key) 321 goto out; /* Do not insert small fragments. */ 322 323 if (build) { 324 struct e_node *e2; 325 326 n = rb_prev(n); 327 e2 = rb_entry(n, struct e_node, count.node); 328 /* Smallest fragment will be 'e2->count.key'. */ 329 wnd->extent_min = e2->count.key; 330 } 331 332 /* Replace smallest fragment by new one. */ 333 rb_erase(&e->start.node, &wnd->start_tree); 334 rb_erase(&e->count.node, &wnd->count_tree); 335 wnd->count -= 1; 336 } else { 337 e = e0 ? e0 : kmem_cache_alloc(ntfs_enode_cachep, GFP_ATOMIC); 338 if (!e) { 339 wnd->uptodated = -1; 340 goto out; 341 } 342 343 if (build && len <= wnd->extent_min) 344 wnd->extent_min = len; 345 } 346 e->start.key = bit; 347 e->count.key = len; 348 if (len > wnd->extent_max) 349 wnd->extent_max = len; 350 351 rb_insert_start(&wnd->start_tree, e); 352 rb_insert_count(&wnd->count_tree, e); 353 wnd->count += 1; 354 355 out:; 356 } 357 358 /* 359 * wnd_remove_free_ext - Remove a run from the cached free space. 360 */ 361 static void wnd_remove_free_ext(struct wnd_bitmap *wnd, size_t bit, size_t len) 362 { 363 struct rb_node *n, *n3; 364 struct e_node *e, *e3; 365 size_t end_in = bit + len; 366 size_t end3, end, new_key, new_len, max_new_len; 367 368 /* Try to find extent before 'bit'. */ 369 n = rb_lookup(&wnd->start_tree, bit); 370 371 if (!n) 372 return; 373 374 e = rb_entry(n, struct e_node, start.node); 375 end = e->start.key + e->count.key; 376 377 new_key = new_len = 0; 378 len = e->count.key; 379 380 /* Range [bit,end_in) must be inside 'e' or outside 'e' and 'n'. */ 381 if (e->start.key > bit) 382 ; 383 else if (end_in <= end) { 384 /* Range [bit,end_in) inside 'e'. */ 385 new_key = end_in; 386 new_len = end - end_in; 387 len = bit - e->start.key; 388 } else if (bit > end) { 389 bool bmax = false; 390 391 n3 = rb_next(n); 392 393 while (n3) { 394 e3 = rb_entry(n3, struct e_node, start.node); 395 if (e3->start.key >= end_in) 396 break; 397 398 if (e3->count.key == wnd->extent_max) 399 bmax = true; 400 401 end3 = e3->start.key + e3->count.key; 402 if (end3 > end_in) { 403 e3->start.key = end_in; 404 rb_erase(&e3->count.node, &wnd->count_tree); 405 e3->count.key = end3 - end_in; 406 rb_insert_count(&wnd->count_tree, e3); 407 break; 408 } 409 410 n3 = rb_next(n3); 411 rb_erase(&e3->start.node, &wnd->start_tree); 412 rb_erase(&e3->count.node, &wnd->count_tree); 413 wnd->count -= 1; 414 kmem_cache_free(ntfs_enode_cachep, e3); 415 } 416 if (!bmax) 417 return; 418 n3 = rb_first(&wnd->count_tree); 419 wnd->extent_max = 420 n3 ? rb_entry(n3, struct e_node, count.node)->count.key : 421 0; 422 return; 423 } 424 425 if (e->count.key != wnd->extent_max) { 426 ; 427 } else if (rb_prev(&e->count.node)) { 428 ; 429 } else { 430 n3 = rb_next(&e->count.node); 431 max_new_len = max(len, new_len); 432 if (!n3) { 433 wnd->extent_max = max_new_len; 434 } else { 435 e3 = rb_entry(n3, struct e_node, count.node); 436 wnd->extent_max = max(e3->count.key, max_new_len); 437 } 438 } 439 440 if (!len) { 441 if (new_len) { 442 e->start.key = new_key; 443 rb_erase(&e->count.node, &wnd->count_tree); 444 e->count.key = new_len; 445 rb_insert_count(&wnd->count_tree, e); 446 } else { 447 rb_erase(&e->start.node, &wnd->start_tree); 448 rb_erase(&e->count.node, &wnd->count_tree); 449 wnd->count -= 1; 450 kmem_cache_free(ntfs_enode_cachep, e); 451 } 452 goto out; 453 } 454 rb_erase(&e->count.node, &wnd->count_tree); 455 e->count.key = len; 456 rb_insert_count(&wnd->count_tree, e); 457 458 if (!new_len) 459 goto out; 460 461 if (wnd->count >= NTFS_MAX_WND_EXTENTS) { 462 wnd->uptodated = -1; 463 464 /* Get minimal extent. */ 465 e = rb_entry(rb_last(&wnd->count_tree), struct e_node, 466 count.node); 467 if (e->count.key > new_len) 468 goto out; 469 470 /* Replace minimum. */ 471 rb_erase(&e->start.node, &wnd->start_tree); 472 rb_erase(&e->count.node, &wnd->count_tree); 473 wnd->count -= 1; 474 } else { 475 e = kmem_cache_alloc(ntfs_enode_cachep, GFP_ATOMIC); 476 if (!e) 477 wnd->uptodated = -1; 478 } 479 480 if (e) { 481 e->start.key = new_key; 482 e->count.key = new_len; 483 rb_insert_start(&wnd->start_tree, e); 484 rb_insert_count(&wnd->count_tree, e); 485 wnd->count += 1; 486 } 487 488 out: 489 if (!wnd->count && 1 != wnd->uptodated) 490 wnd_rescan(wnd); 491 } 492 493 /* 494 * wnd_rescan - Scan all bitmap. Used while initialization. 495 */ 496 static int wnd_rescan(struct wnd_bitmap *wnd) 497 { 498 int err = 0; 499 size_t prev_tail = 0; 500 struct super_block *sb = wnd->sb; 501 struct ntfs_sb_info *sbi = sb->s_fs_info; 502 u64 lbo, len = 0; 503 u32 blocksize = sb->s_blocksize; 504 u8 cluster_bits = sbi->cluster_bits; 505 u32 wbits = 8 * sb->s_blocksize; 506 u32 used, frb; 507 size_t wpos, wbit, iw, vbo; 508 struct buffer_head *bh = NULL; 509 CLST lcn, clen; 510 511 wnd->uptodated = 0; 512 wnd->extent_max = 0; 513 wnd->extent_min = MINUS_ONE_T; 514 wnd->total_zeroes = 0; 515 516 vbo = 0; 517 518 for (iw = 0; iw < wnd->nwnd; iw++) { 519 if (iw + 1 == wnd->nwnd) 520 wbits = wnd->bits_last; 521 522 if (wnd->inited) { 523 if (!wnd->free_bits[iw]) { 524 /* All ones. */ 525 if (prev_tail) { 526 wnd_add_free_ext(wnd, 527 vbo * 8 - prev_tail, 528 prev_tail, true); 529 prev_tail = 0; 530 } 531 goto next_wnd; 532 } 533 if (wbits == wnd->free_bits[iw]) { 534 /* All zeroes. */ 535 prev_tail += wbits; 536 wnd->total_zeroes += wbits; 537 goto next_wnd; 538 } 539 } 540 541 if (!len) { 542 u32 off = vbo & sbi->cluster_mask; 543 544 if (!run_lookup_entry(&wnd->run, vbo >> cluster_bits, 545 &lcn, &clen, NULL)) { 546 err = -ENOENT; 547 goto out; 548 } 549 550 lbo = ((u64)lcn << cluster_bits) + off; 551 len = ((u64)clen << cluster_bits) - off; 552 } 553 554 bh = ntfs_bread(sb, lbo >> sb->s_blocksize_bits); 555 if (!bh) { 556 err = -EIO; 557 goto out; 558 } 559 560 used = ntfs_bitmap_weight_le(bh->b_data, wbits); 561 if (used < wbits) { 562 frb = wbits - used; 563 wnd->free_bits[iw] = frb; 564 wnd->total_zeroes += frb; 565 } 566 567 wpos = 0; 568 wbit = vbo * 8; 569 570 if (wbit + wbits > wnd->nbits) 571 wbits = wnd->nbits - wbit; 572 573 do { 574 used = find_next_zero_bit_le(bh->b_data, wbits, wpos); 575 576 if (used > wpos && prev_tail) { 577 wnd_add_free_ext(wnd, wbit + wpos - prev_tail, 578 prev_tail, true); 579 prev_tail = 0; 580 } 581 582 wpos = used; 583 584 if (wpos >= wbits) { 585 /* No free blocks. */ 586 prev_tail = 0; 587 break; 588 } 589 590 frb = find_next_bit_le(bh->b_data, wbits, wpos); 591 if (frb >= wbits) { 592 /* Keep last free block. */ 593 prev_tail += frb - wpos; 594 break; 595 } 596 597 wnd_add_free_ext(wnd, wbit + wpos - prev_tail, 598 frb + prev_tail - wpos, true); 599 600 /* Skip free block and first '1'. */ 601 wpos = frb + 1; 602 /* Reset previous tail. */ 603 prev_tail = 0; 604 } while (wpos < wbits); 605 606 next_wnd: 607 608 if (bh) 609 put_bh(bh); 610 bh = NULL; 611 612 vbo += blocksize; 613 if (len) { 614 len -= blocksize; 615 lbo += blocksize; 616 } 617 } 618 619 /* Add last block. */ 620 if (prev_tail) 621 wnd_add_free_ext(wnd, wnd->nbits - prev_tail, prev_tail, true); 622 623 /* 624 * Before init cycle wnd->uptodated was 0. 625 * If any errors or limits occurs while initialization then 626 * wnd->uptodated will be -1. 627 * If 'uptodated' is still 0 then Tree is really updated. 628 */ 629 if (!wnd->uptodated) 630 wnd->uptodated = 1; 631 632 if (wnd->zone_bit != wnd->zone_end) { 633 size_t zlen = wnd->zone_end - wnd->zone_bit; 634 635 wnd->zone_end = wnd->zone_bit; 636 wnd_zone_set(wnd, wnd->zone_bit, zlen); 637 } 638 639 out: 640 return err; 641 } 642 643 int wnd_init(struct wnd_bitmap *wnd, struct super_block *sb, size_t nbits) 644 { 645 int err; 646 u32 blocksize = sb->s_blocksize; 647 u32 wbits = blocksize * 8; 648 649 init_rwsem(&wnd->rw_lock); 650 651 wnd->sb = sb; 652 wnd->nbits = nbits; 653 wnd->total_zeroes = nbits; 654 wnd->extent_max = MINUS_ONE_T; 655 wnd->zone_bit = wnd->zone_end = 0; 656 wnd->nwnd = bytes_to_block(sb, bitmap_size(nbits)); 657 wnd->bits_last = nbits & (wbits - 1); 658 if (!wnd->bits_last) 659 wnd->bits_last = wbits; 660 661 wnd->free_bits = 662 kcalloc(wnd->nwnd, sizeof(u16), GFP_NOFS | __GFP_NOWARN); 663 if (!wnd->free_bits) 664 return -ENOMEM; 665 666 err = wnd_rescan(wnd); 667 if (err) 668 return err; 669 670 wnd->inited = true; 671 672 return 0; 673 } 674 675 /* 676 * wnd_map - Call sb_bread for requested window. 677 */ 678 static struct buffer_head *wnd_map(struct wnd_bitmap *wnd, size_t iw) 679 { 680 size_t vbo; 681 CLST lcn, clen; 682 struct super_block *sb = wnd->sb; 683 struct ntfs_sb_info *sbi; 684 struct buffer_head *bh; 685 u64 lbo; 686 687 sbi = sb->s_fs_info; 688 vbo = (u64)iw << sb->s_blocksize_bits; 689 690 if (!run_lookup_entry(&wnd->run, vbo >> sbi->cluster_bits, &lcn, &clen, 691 NULL)) { 692 return ERR_PTR(-ENOENT); 693 } 694 695 lbo = ((u64)lcn << sbi->cluster_bits) + (vbo & sbi->cluster_mask); 696 697 bh = ntfs_bread(wnd->sb, lbo >> sb->s_blocksize_bits); 698 if (!bh) 699 return ERR_PTR(-EIO); 700 701 return bh; 702 } 703 704 /* 705 * wnd_set_free - Mark the bits range from bit to bit + bits as free. 706 */ 707 int wnd_set_free(struct wnd_bitmap *wnd, size_t bit, size_t bits) 708 { 709 int err = 0; 710 struct super_block *sb = wnd->sb; 711 size_t bits0 = bits; 712 u32 wbits = 8 * sb->s_blocksize; 713 size_t iw = bit >> (sb->s_blocksize_bits + 3); 714 u32 wbit = bit & (wbits - 1); 715 struct buffer_head *bh; 716 717 while (iw < wnd->nwnd && bits) { 718 u32 tail, op; 719 720 if (iw + 1 == wnd->nwnd) 721 wbits = wnd->bits_last; 722 723 tail = wbits - wbit; 724 op = min_t(u32, tail, bits); 725 726 bh = wnd_map(wnd, iw); 727 if (IS_ERR(bh)) { 728 err = PTR_ERR(bh); 729 break; 730 } 731 732 lock_buffer(bh); 733 734 ntfs_bitmap_clear_le(bh->b_data, wbit, op); 735 736 wnd->free_bits[iw] += op; 737 738 set_buffer_uptodate(bh); 739 mark_buffer_dirty(bh); 740 unlock_buffer(bh); 741 put_bh(bh); 742 743 wnd->total_zeroes += op; 744 bits -= op; 745 wbit = 0; 746 iw += 1; 747 } 748 749 wnd_add_free_ext(wnd, bit, bits0, false); 750 751 return err; 752 } 753 754 /* 755 * wnd_set_used - Mark the bits range from bit to bit + bits as used. 756 */ 757 int wnd_set_used(struct wnd_bitmap *wnd, size_t bit, size_t bits) 758 { 759 int err = 0; 760 struct super_block *sb = wnd->sb; 761 size_t bits0 = bits; 762 size_t iw = bit >> (sb->s_blocksize_bits + 3); 763 u32 wbits = 8 * sb->s_blocksize; 764 u32 wbit = bit & (wbits - 1); 765 struct buffer_head *bh; 766 767 while (iw < wnd->nwnd && bits) { 768 u32 tail, op; 769 770 if (unlikely(iw + 1 == wnd->nwnd)) 771 wbits = wnd->bits_last; 772 773 tail = wbits - wbit; 774 op = min_t(u32, tail, bits); 775 776 bh = wnd_map(wnd, iw); 777 if (IS_ERR(bh)) { 778 err = PTR_ERR(bh); 779 break; 780 } 781 782 lock_buffer(bh); 783 784 ntfs_bitmap_set_le(bh->b_data, wbit, op); 785 wnd->free_bits[iw] -= op; 786 787 set_buffer_uptodate(bh); 788 mark_buffer_dirty(bh); 789 unlock_buffer(bh); 790 put_bh(bh); 791 792 wnd->total_zeroes -= op; 793 bits -= op; 794 wbit = 0; 795 iw += 1; 796 } 797 798 if (!RB_EMPTY_ROOT(&wnd->start_tree)) 799 wnd_remove_free_ext(wnd, bit, bits0); 800 801 return err; 802 } 803 804 /* 805 * wnd_set_used_safe - Mark the bits range from bit to bit + bits as used. 806 * 807 * Unlikely wnd_set_used/wnd_set_free this function is not full trusted. 808 * It scans every bit in bitmap and marks free bit as used. 809 * @done - how many bits were marked as used. 810 * 811 * NOTE: normally *done should be 0. 812 */ 813 int wnd_set_used_safe(struct wnd_bitmap *wnd, size_t bit, size_t bits, 814 size_t *done) 815 { 816 size_t i, from = 0, len = 0; 817 int err = 0; 818 819 *done = 0; 820 for (i = 0; i < bits; i++) { 821 if (wnd_is_free(wnd, bit + i, 1)) { 822 if (!len) 823 from = bit + i; 824 len += 1; 825 } else if (len) { 826 err = wnd_set_used(wnd, from, len); 827 *done += len; 828 len = 0; 829 if (err) 830 break; 831 } 832 } 833 834 if (len) { 835 /* last fragment. */ 836 err = wnd_set_used(wnd, from, len); 837 *done += len; 838 } 839 return err; 840 } 841 842 /* 843 * wnd_is_free_hlp 844 * 845 * Return: True if all clusters [bit, bit+bits) are free (bitmap only). 846 */ 847 static bool wnd_is_free_hlp(struct wnd_bitmap *wnd, size_t bit, size_t bits) 848 { 849 struct super_block *sb = wnd->sb; 850 size_t iw = bit >> (sb->s_blocksize_bits + 3); 851 u32 wbits = 8 * sb->s_blocksize; 852 u32 wbit = bit & (wbits - 1); 853 854 while (iw < wnd->nwnd && bits) { 855 u32 tail, op; 856 857 if (unlikely(iw + 1 == wnd->nwnd)) 858 wbits = wnd->bits_last; 859 860 tail = wbits - wbit; 861 op = min_t(u32, tail, bits); 862 863 if (wbits != wnd->free_bits[iw]) { 864 bool ret; 865 struct buffer_head *bh = wnd_map(wnd, iw); 866 867 if (IS_ERR(bh)) 868 return false; 869 870 ret = are_bits_clear(bh->b_data, wbit, op); 871 872 put_bh(bh); 873 if (!ret) 874 return false; 875 } 876 877 bits -= op; 878 wbit = 0; 879 iw += 1; 880 } 881 882 return true; 883 } 884 885 /* 886 * wnd_is_free 887 * 888 * Return: True if all clusters [bit, bit+bits) are free. 889 */ 890 bool wnd_is_free(struct wnd_bitmap *wnd, size_t bit, size_t bits) 891 { 892 bool ret; 893 struct rb_node *n; 894 size_t end; 895 struct e_node *e; 896 897 if (RB_EMPTY_ROOT(&wnd->start_tree)) 898 goto use_wnd; 899 900 n = rb_lookup(&wnd->start_tree, bit); 901 if (!n) 902 goto use_wnd; 903 904 e = rb_entry(n, struct e_node, start.node); 905 906 end = e->start.key + e->count.key; 907 908 if (bit < end && bit + bits <= end) 909 return true; 910 911 use_wnd: 912 ret = wnd_is_free_hlp(wnd, bit, bits); 913 914 return ret; 915 } 916 917 /* 918 * wnd_is_used 919 * 920 * Return: True if all clusters [bit, bit+bits) are used. 921 */ 922 bool wnd_is_used(struct wnd_bitmap *wnd, size_t bit, size_t bits) 923 { 924 bool ret = false; 925 struct super_block *sb = wnd->sb; 926 size_t iw = bit >> (sb->s_blocksize_bits + 3); 927 u32 wbits = 8 * sb->s_blocksize; 928 u32 wbit = bit & (wbits - 1); 929 size_t end; 930 struct rb_node *n; 931 struct e_node *e; 932 933 if (RB_EMPTY_ROOT(&wnd->start_tree)) 934 goto use_wnd; 935 936 end = bit + bits; 937 n = rb_lookup(&wnd->start_tree, end - 1); 938 if (!n) 939 goto use_wnd; 940 941 e = rb_entry(n, struct e_node, start.node); 942 if (e->start.key + e->count.key > bit) 943 return false; 944 945 use_wnd: 946 while (iw < wnd->nwnd && bits) { 947 u32 tail, op; 948 949 if (unlikely(iw + 1 == wnd->nwnd)) 950 wbits = wnd->bits_last; 951 952 tail = wbits - wbit; 953 op = min_t(u32, tail, bits); 954 955 if (wnd->free_bits[iw]) { 956 bool ret; 957 struct buffer_head *bh = wnd_map(wnd, iw); 958 959 if (IS_ERR(bh)) 960 goto out; 961 962 ret = are_bits_set(bh->b_data, wbit, op); 963 put_bh(bh); 964 if (!ret) 965 goto out; 966 } 967 968 bits -= op; 969 wbit = 0; 970 iw += 1; 971 } 972 ret = true; 973 974 out: 975 return ret; 976 } 977 978 /* 979 * wnd_find - Look for free space. 980 * 981 * - flags - BITMAP_FIND_XXX flags 982 * 983 * Return: 0 if not found. 984 */ 985 size_t wnd_find(struct wnd_bitmap *wnd, size_t to_alloc, size_t hint, 986 size_t flags, size_t *allocated) 987 { 988 struct super_block *sb; 989 u32 wbits, wpos, wzbit, wzend; 990 size_t fnd, max_alloc, b_len, b_pos; 991 size_t iw, prev_tail, nwnd, wbit, ebit, zbit, zend; 992 size_t to_alloc0 = to_alloc; 993 const struct e_node *e; 994 const struct rb_node *pr, *cr; 995 u8 log2_bits; 996 bool fbits_valid; 997 struct buffer_head *bh; 998 999 /* Fast checking for available free space. */ 1000 if (flags & BITMAP_FIND_FULL) { 1001 size_t zeroes = wnd_zeroes(wnd); 1002 1003 zeroes -= wnd->zone_end - wnd->zone_bit; 1004 if (zeroes < to_alloc0) 1005 goto no_space; 1006 1007 if (to_alloc0 > wnd->extent_max) 1008 goto no_space; 1009 } else { 1010 if (to_alloc > wnd->extent_max) 1011 to_alloc = wnd->extent_max; 1012 } 1013 1014 if (wnd->zone_bit <= hint && hint < wnd->zone_end) 1015 hint = wnd->zone_end; 1016 1017 max_alloc = wnd->nbits; 1018 b_len = b_pos = 0; 1019 1020 if (hint >= max_alloc) 1021 hint = 0; 1022 1023 if (RB_EMPTY_ROOT(&wnd->start_tree)) { 1024 if (wnd->uptodated == 1) { 1025 /* Extents tree is updated -> No free space. */ 1026 goto no_space; 1027 } 1028 goto scan_bitmap; 1029 } 1030 1031 e = NULL; 1032 if (!hint) 1033 goto allocate_biggest; 1034 1035 /* Use hint: Enumerate extents by start >= hint. */ 1036 pr = NULL; 1037 cr = wnd->start_tree.rb_node; 1038 1039 for (;;) { 1040 e = rb_entry(cr, struct e_node, start.node); 1041 1042 if (e->start.key == hint) 1043 break; 1044 1045 if (e->start.key < hint) { 1046 pr = cr; 1047 cr = cr->rb_right; 1048 if (!cr) 1049 break; 1050 continue; 1051 } 1052 1053 cr = cr->rb_left; 1054 if (!cr) { 1055 e = pr ? rb_entry(pr, struct e_node, start.node) : NULL; 1056 break; 1057 } 1058 } 1059 1060 if (!e) 1061 goto allocate_biggest; 1062 1063 if (e->start.key + e->count.key > hint) { 1064 /* We have found extension with 'hint' inside. */ 1065 size_t len = e->start.key + e->count.key - hint; 1066 1067 if (len >= to_alloc && hint + to_alloc <= max_alloc) { 1068 fnd = hint; 1069 goto found; 1070 } 1071 1072 if (!(flags & BITMAP_FIND_FULL)) { 1073 if (len > to_alloc) 1074 len = to_alloc; 1075 1076 if (hint + len <= max_alloc) { 1077 fnd = hint; 1078 to_alloc = len; 1079 goto found; 1080 } 1081 } 1082 } 1083 1084 allocate_biggest: 1085 /* Allocate from biggest free extent. */ 1086 e = rb_entry(rb_first(&wnd->count_tree), struct e_node, count.node); 1087 if (e->count.key != wnd->extent_max) 1088 wnd->extent_max = e->count.key; 1089 1090 if (e->count.key < max_alloc) { 1091 if (e->count.key >= to_alloc) { 1092 ; 1093 } else if (flags & BITMAP_FIND_FULL) { 1094 if (e->count.key < to_alloc0) { 1095 /* Biggest free block is less then requested. */ 1096 goto no_space; 1097 } 1098 to_alloc = e->count.key; 1099 } else if (-1 != wnd->uptodated) { 1100 to_alloc = e->count.key; 1101 } else { 1102 /* Check if we can use more bits. */ 1103 size_t op, max_check; 1104 struct rb_root start_tree; 1105 1106 memcpy(&start_tree, &wnd->start_tree, 1107 sizeof(struct rb_root)); 1108 memset(&wnd->start_tree, 0, sizeof(struct rb_root)); 1109 1110 max_check = e->start.key + to_alloc; 1111 if (max_check > max_alloc) 1112 max_check = max_alloc; 1113 for (op = e->start.key + e->count.key; op < max_check; 1114 op++) { 1115 if (!wnd_is_free(wnd, op, 1)) 1116 break; 1117 } 1118 memcpy(&wnd->start_tree, &start_tree, 1119 sizeof(struct rb_root)); 1120 to_alloc = op - e->start.key; 1121 } 1122 1123 /* Prepare to return. */ 1124 fnd = e->start.key; 1125 if (e->start.key + to_alloc > max_alloc) 1126 to_alloc = max_alloc - e->start.key; 1127 goto found; 1128 } 1129 1130 if (wnd->uptodated == 1) { 1131 /* Extents tree is updated -> no free space. */ 1132 goto no_space; 1133 } 1134 1135 b_len = e->count.key; 1136 b_pos = e->start.key; 1137 1138 scan_bitmap: 1139 sb = wnd->sb; 1140 log2_bits = sb->s_blocksize_bits + 3; 1141 1142 /* At most two ranges [hint, max_alloc) + [0, hint). */ 1143 Again: 1144 1145 /* TODO: Optimize request for case nbits > wbits. */ 1146 iw = hint >> log2_bits; 1147 wbits = sb->s_blocksize * 8; 1148 wpos = hint & (wbits - 1); 1149 prev_tail = 0; 1150 fbits_valid = true; 1151 1152 if (max_alloc == wnd->nbits) { 1153 nwnd = wnd->nwnd; 1154 } else { 1155 size_t t = max_alloc + wbits - 1; 1156 1157 nwnd = likely(t > max_alloc) ? (t >> log2_bits) : wnd->nwnd; 1158 } 1159 1160 /* Enumerate all windows. */ 1161 for (; iw < nwnd; iw++) { 1162 wbit = iw << log2_bits; 1163 1164 if (!wnd->free_bits[iw]) { 1165 if (prev_tail > b_len) { 1166 b_pos = wbit - prev_tail; 1167 b_len = prev_tail; 1168 } 1169 1170 /* Skip full used window. */ 1171 prev_tail = 0; 1172 wpos = 0; 1173 continue; 1174 } 1175 1176 if (unlikely(iw + 1 == nwnd)) { 1177 if (max_alloc == wnd->nbits) { 1178 wbits = wnd->bits_last; 1179 } else { 1180 size_t t = max_alloc & (wbits - 1); 1181 1182 if (t) { 1183 wbits = t; 1184 fbits_valid = false; 1185 } 1186 } 1187 } 1188 1189 if (wnd->zone_end > wnd->zone_bit) { 1190 ebit = wbit + wbits; 1191 zbit = max(wnd->zone_bit, wbit); 1192 zend = min(wnd->zone_end, ebit); 1193 1194 /* Here we have a window [wbit, ebit) and zone [zbit, zend). */ 1195 if (zend <= zbit) { 1196 /* Zone does not overlap window. */ 1197 } else { 1198 wzbit = zbit - wbit; 1199 wzend = zend - wbit; 1200 1201 /* Zone overlaps window. */ 1202 if (wnd->free_bits[iw] == wzend - wzbit) { 1203 prev_tail = 0; 1204 wpos = 0; 1205 continue; 1206 } 1207 1208 /* Scan two ranges window: [wbit, zbit) and [zend, ebit). */ 1209 bh = wnd_map(wnd, iw); 1210 1211 if (IS_ERR(bh)) { 1212 /* TODO: Error */ 1213 prev_tail = 0; 1214 wpos = 0; 1215 continue; 1216 } 1217 1218 /* Scan range [wbit, zbit). */ 1219 if (wpos < wzbit) { 1220 /* Scan range [wpos, zbit). */ 1221 fnd = wnd_scan(bh->b_data, wbit, wpos, 1222 wzbit, to_alloc, 1223 &prev_tail, &b_pos, 1224 &b_len); 1225 if (fnd != MINUS_ONE_T) { 1226 put_bh(bh); 1227 goto found; 1228 } 1229 } 1230 1231 prev_tail = 0; 1232 1233 /* Scan range [zend, ebit). */ 1234 if (wzend < wbits) { 1235 fnd = wnd_scan(bh->b_data, wbit, 1236 max(wzend, wpos), wbits, 1237 to_alloc, &prev_tail, 1238 &b_pos, &b_len); 1239 if (fnd != MINUS_ONE_T) { 1240 put_bh(bh); 1241 goto found; 1242 } 1243 } 1244 1245 wpos = 0; 1246 put_bh(bh); 1247 continue; 1248 } 1249 } 1250 1251 /* Current window does not overlap zone. */ 1252 if (!wpos && fbits_valid && wnd->free_bits[iw] == wbits) { 1253 /* Window is empty. */ 1254 if (prev_tail + wbits >= to_alloc) { 1255 fnd = wbit + wpos - prev_tail; 1256 goto found; 1257 } 1258 1259 /* Increase 'prev_tail' and process next window. */ 1260 prev_tail += wbits; 1261 wpos = 0; 1262 continue; 1263 } 1264 1265 /* Read window. */ 1266 bh = wnd_map(wnd, iw); 1267 if (IS_ERR(bh)) { 1268 // TODO: Error. 1269 prev_tail = 0; 1270 wpos = 0; 1271 continue; 1272 } 1273 1274 /* Scan range [wpos, eBits). */ 1275 fnd = wnd_scan(bh->b_data, wbit, wpos, wbits, to_alloc, 1276 &prev_tail, &b_pos, &b_len); 1277 put_bh(bh); 1278 if (fnd != MINUS_ONE_T) 1279 goto found; 1280 } 1281 1282 if (b_len < prev_tail) { 1283 /* The last fragment. */ 1284 b_len = prev_tail; 1285 b_pos = max_alloc - prev_tail; 1286 } 1287 1288 if (hint) { 1289 /* 1290 * We have scanned range [hint max_alloc). 1291 * Prepare to scan range [0 hint + to_alloc). 1292 */ 1293 size_t nextmax = hint + to_alloc; 1294 1295 if (likely(nextmax >= hint) && nextmax < max_alloc) 1296 max_alloc = nextmax; 1297 hint = 0; 1298 goto Again; 1299 } 1300 1301 if (!b_len) 1302 goto no_space; 1303 1304 wnd->extent_max = b_len; 1305 1306 if (flags & BITMAP_FIND_FULL) 1307 goto no_space; 1308 1309 fnd = b_pos; 1310 to_alloc = b_len; 1311 1312 found: 1313 if (flags & BITMAP_FIND_MARK_AS_USED) { 1314 /* TODO: Optimize remove extent (pass 'e'?). */ 1315 if (wnd_set_used(wnd, fnd, to_alloc)) 1316 goto no_space; 1317 } else if (wnd->extent_max != MINUS_ONE_T && 1318 to_alloc > wnd->extent_max) { 1319 wnd->extent_max = to_alloc; 1320 } 1321 1322 *allocated = fnd; 1323 return to_alloc; 1324 1325 no_space: 1326 return 0; 1327 } 1328 1329 /* 1330 * wnd_extend - Extend bitmap ($MFT bitmap). 1331 */ 1332 int wnd_extend(struct wnd_bitmap *wnd, size_t new_bits) 1333 { 1334 int err; 1335 struct super_block *sb = wnd->sb; 1336 struct ntfs_sb_info *sbi = sb->s_fs_info; 1337 u32 blocksize = sb->s_blocksize; 1338 u32 wbits = blocksize * 8; 1339 u32 b0, new_last; 1340 size_t bits, iw, new_wnd; 1341 size_t old_bits = wnd->nbits; 1342 u16 *new_free; 1343 1344 if (new_bits <= old_bits) 1345 return -EINVAL; 1346 1347 /* Align to 8 byte boundary. */ 1348 new_wnd = bytes_to_block(sb, bitmap_size(new_bits)); 1349 new_last = new_bits & (wbits - 1); 1350 if (!new_last) 1351 new_last = wbits; 1352 1353 if (new_wnd != wnd->nwnd) { 1354 new_free = kmalloc_array(new_wnd, sizeof(u16), GFP_NOFS); 1355 if (!new_free) 1356 return -ENOMEM; 1357 1358 memcpy(new_free, wnd->free_bits, wnd->nwnd * sizeof(short)); 1359 memset(new_free + wnd->nwnd, 0, 1360 (new_wnd - wnd->nwnd) * sizeof(short)); 1361 kfree(wnd->free_bits); 1362 wnd->free_bits = new_free; 1363 } 1364 1365 /* Zero bits [old_bits,new_bits). */ 1366 bits = new_bits - old_bits; 1367 b0 = old_bits & (wbits - 1); 1368 1369 for (iw = old_bits >> (sb->s_blocksize_bits + 3); bits; iw += 1) { 1370 u32 op; 1371 size_t frb; 1372 u64 vbo, lbo, bytes; 1373 struct buffer_head *bh; 1374 1375 if (iw + 1 == new_wnd) 1376 wbits = new_last; 1377 1378 op = b0 + bits > wbits ? wbits - b0 : bits; 1379 vbo = (u64)iw * blocksize; 1380 1381 err = ntfs_vbo_to_lbo(sbi, &wnd->run, vbo, &lbo, &bytes); 1382 if (err) 1383 break; 1384 1385 bh = ntfs_bread(sb, lbo >> sb->s_blocksize_bits); 1386 if (!bh) 1387 return -EIO; 1388 1389 lock_buffer(bh); 1390 1391 ntfs_bitmap_clear_le(bh->b_data, b0, blocksize * 8 - b0); 1392 frb = wbits - ntfs_bitmap_weight_le(bh->b_data, wbits); 1393 wnd->total_zeroes += frb - wnd->free_bits[iw]; 1394 wnd->free_bits[iw] = frb; 1395 1396 set_buffer_uptodate(bh); 1397 mark_buffer_dirty(bh); 1398 unlock_buffer(bh); 1399 /* err = sync_dirty_buffer(bh); */ 1400 1401 b0 = 0; 1402 bits -= op; 1403 } 1404 1405 wnd->nbits = new_bits; 1406 wnd->nwnd = new_wnd; 1407 wnd->bits_last = new_last; 1408 1409 wnd_add_free_ext(wnd, old_bits, new_bits - old_bits, false); 1410 1411 return 0; 1412 } 1413 1414 void wnd_zone_set(struct wnd_bitmap *wnd, size_t lcn, size_t len) 1415 { 1416 size_t zlen = wnd->zone_end - wnd->zone_bit; 1417 1418 if (zlen) 1419 wnd_add_free_ext(wnd, wnd->zone_bit, zlen, false); 1420 1421 if (!RB_EMPTY_ROOT(&wnd->start_tree) && len) 1422 wnd_remove_free_ext(wnd, lcn, len); 1423 1424 wnd->zone_bit = lcn; 1425 wnd->zone_end = lcn + len; 1426 } 1427 1428 int ntfs_trim_fs(struct ntfs_sb_info *sbi, struct fstrim_range *range) 1429 { 1430 int err = 0; 1431 struct super_block *sb = sbi->sb; 1432 struct wnd_bitmap *wnd = &sbi->used.bitmap; 1433 u32 wbits = 8 * sb->s_blocksize; 1434 CLST len = 0, lcn = 0, done = 0; 1435 CLST minlen = bytes_to_cluster(sbi, range->minlen); 1436 CLST lcn_from = bytes_to_cluster(sbi, range->start); 1437 size_t iw = lcn_from >> (sb->s_blocksize_bits + 3); 1438 u32 wbit = lcn_from & (wbits - 1); 1439 CLST lcn_to; 1440 1441 if (!minlen) 1442 minlen = 1; 1443 1444 if (range->len == (u64)-1) 1445 lcn_to = wnd->nbits; 1446 else 1447 lcn_to = bytes_to_cluster(sbi, range->start + range->len); 1448 1449 down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS); 1450 1451 for (; iw < wnd->nwnd; iw++, wbit = 0) { 1452 CLST lcn_wnd = iw * wbits; 1453 struct buffer_head *bh; 1454 1455 if (lcn_wnd > lcn_to) 1456 break; 1457 1458 if (!wnd->free_bits[iw]) 1459 continue; 1460 1461 if (iw + 1 == wnd->nwnd) 1462 wbits = wnd->bits_last; 1463 1464 if (lcn_wnd + wbits > lcn_to) 1465 wbits = lcn_to - lcn_wnd; 1466 1467 bh = wnd_map(wnd, iw); 1468 if (IS_ERR(bh)) { 1469 err = PTR_ERR(bh); 1470 break; 1471 } 1472 1473 for (; wbit < wbits; wbit++) { 1474 if (!test_bit_le(wbit, bh->b_data)) { 1475 if (!len) 1476 lcn = lcn_wnd + wbit; 1477 len += 1; 1478 continue; 1479 } 1480 if (len >= minlen) { 1481 err = ntfs_discard(sbi, lcn, len); 1482 if (err) 1483 goto out; 1484 done += len; 1485 } 1486 len = 0; 1487 } 1488 put_bh(bh); 1489 } 1490 1491 /* Process the last fragment. */ 1492 if (len >= minlen) { 1493 err = ntfs_discard(sbi, lcn, len); 1494 if (err) 1495 goto out; 1496 done += len; 1497 } 1498 1499 out: 1500 range->len = (u64)done << sbi->cluster_bits; 1501 1502 up_read(&wnd->rw_lock); 1503 1504 return err; 1505 } 1506 1507 #if BITS_PER_LONG == 64 1508 typedef __le64 bitmap_ulong; 1509 #define cpu_to_ul(x) cpu_to_le64(x) 1510 #define ul_to_cpu(x) le64_to_cpu(x) 1511 #else 1512 typedef __le32 bitmap_ulong; 1513 #define cpu_to_ul(x) cpu_to_le32(x) 1514 #define ul_to_cpu(x) le32_to_cpu(x) 1515 #endif 1516 1517 void ntfs_bitmap_set_le(void *map, unsigned int start, int len) 1518 { 1519 bitmap_ulong *p = (bitmap_ulong *)map + BIT_WORD(start); 1520 const unsigned int size = start + len; 1521 int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG); 1522 bitmap_ulong mask_to_set = cpu_to_ul(BITMAP_FIRST_WORD_MASK(start)); 1523 1524 while (len - bits_to_set >= 0) { 1525 *p |= mask_to_set; 1526 len -= bits_to_set; 1527 bits_to_set = BITS_PER_LONG; 1528 mask_to_set = cpu_to_ul(~0UL); 1529 p++; 1530 } 1531 if (len) { 1532 mask_to_set &= cpu_to_ul(BITMAP_LAST_WORD_MASK(size)); 1533 *p |= mask_to_set; 1534 } 1535 } 1536 1537 void ntfs_bitmap_clear_le(void *map, unsigned int start, int len) 1538 { 1539 bitmap_ulong *p = (bitmap_ulong *)map + BIT_WORD(start); 1540 const unsigned int size = start + len; 1541 int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG); 1542 bitmap_ulong mask_to_clear = cpu_to_ul(BITMAP_FIRST_WORD_MASK(start)); 1543 1544 while (len - bits_to_clear >= 0) { 1545 *p &= ~mask_to_clear; 1546 len -= bits_to_clear; 1547 bits_to_clear = BITS_PER_LONG; 1548 mask_to_clear = cpu_to_ul(~0UL); 1549 p++; 1550 } 1551 if (len) { 1552 mask_to_clear &= cpu_to_ul(BITMAP_LAST_WORD_MASK(size)); 1553 *p &= ~mask_to_clear; 1554 } 1555 } 1556 1557 unsigned int ntfs_bitmap_weight_le(const void *bitmap, int bits) 1558 { 1559 const ulong *bmp = bitmap; 1560 unsigned int k, lim = bits / BITS_PER_LONG; 1561 unsigned int w = 0; 1562 1563 for (k = 0; k < lim; k++) 1564 w += hweight_long(bmp[k]); 1565 1566 if (bits % BITS_PER_LONG) { 1567 w += hweight_long(ul_to_cpu(((bitmap_ulong *)bitmap)[k]) & 1568 BITMAP_LAST_WORD_MASK(bits)); 1569 } 1570 1571 return w; 1572 } 1573