1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * 4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved. 5 * 6 * This code builds two trees of free clusters extents. 7 * Trees are sorted by start of extent and by length of extent. 8 * NTFS_MAX_WND_EXTENTS defines the maximum number of elements in trees. 9 * In extreme case code reads on-disk bitmap to find free clusters. 10 * 11 */ 12 13 #include <linux/buffer_head.h> 14 #include <linux/fs.h> 15 #include <linux/kernel.h> 16 17 #include "ntfs.h" 18 #include "ntfs_fs.h" 19 20 /* 21 * Maximum number of extents in tree. 22 */ 23 #define NTFS_MAX_WND_EXTENTS (32u * 1024u) 24 25 struct rb_node_key { 26 struct rb_node node; 27 size_t key; 28 }; 29 30 struct e_node { 31 struct rb_node_key start; /* Tree sorted by start. */ 32 struct rb_node_key count; /* Tree sorted by len. */ 33 }; 34 35 static int wnd_rescan(struct wnd_bitmap *wnd); 36 static struct buffer_head *wnd_map(struct wnd_bitmap *wnd, size_t iw); 37 static bool wnd_is_free_hlp(struct wnd_bitmap *wnd, size_t bit, size_t bits); 38 39 static struct kmem_cache *ntfs_enode_cachep; 40 41 int __init ntfs3_init_bitmap(void) 42 { 43 ntfs_enode_cachep = 44 kmem_cache_create("ntfs3_enode_cache", sizeof(struct e_node), 0, 45 SLAB_RECLAIM_ACCOUNT, NULL); 46 return ntfs_enode_cachep ? 0 : -ENOMEM; 47 } 48 49 void ntfs3_exit_bitmap(void) 50 { 51 kmem_cache_destroy(ntfs_enode_cachep); 52 } 53 54 /* 55 * wnd_scan 56 * 57 * b_pos + b_len - biggest fragment. 58 * Scan range [wpos wbits) window @buf. 59 * 60 * Return: -1 if not found. 61 */ 62 static size_t wnd_scan(const void *buf, size_t wbit, u32 wpos, u32 wend, 63 size_t to_alloc, size_t *prev_tail, size_t *b_pos, 64 size_t *b_len) 65 { 66 while (wpos < wend) { 67 size_t free_len; 68 u32 free_bits, end; 69 u32 used = find_next_zero_bit_le(buf, wend, wpos); 70 71 if (used >= wend) { 72 if (*b_len < *prev_tail) { 73 *b_pos = wbit - *prev_tail; 74 *b_len = *prev_tail; 75 } 76 77 *prev_tail = 0; 78 return -1; 79 } 80 81 if (used > wpos) { 82 wpos = used; 83 if (*b_len < *prev_tail) { 84 *b_pos = wbit - *prev_tail; 85 *b_len = *prev_tail; 86 } 87 88 *prev_tail = 0; 89 } 90 91 /* 92 * Now we have a fragment [wpos, wend) staring with 0. 93 */ 94 end = wpos + to_alloc - *prev_tail; 95 free_bits = find_next_bit_le(buf, min(end, wend), wpos); 96 97 free_len = *prev_tail + free_bits - wpos; 98 99 if (*b_len < free_len) { 100 *b_pos = wbit + wpos - *prev_tail; 101 *b_len = free_len; 102 } 103 104 if (free_len >= to_alloc) 105 return wbit + wpos - *prev_tail; 106 107 if (free_bits >= wend) { 108 *prev_tail += free_bits - wpos; 109 return -1; 110 } 111 112 wpos = free_bits + 1; 113 114 *prev_tail = 0; 115 } 116 117 return -1; 118 } 119 120 /* 121 * wnd_close - Frees all resources. 122 */ 123 void wnd_close(struct wnd_bitmap *wnd) 124 { 125 struct rb_node *node, *next; 126 127 kfree(wnd->free_bits); 128 run_close(&wnd->run); 129 130 node = rb_first(&wnd->start_tree); 131 132 while (node) { 133 next = rb_next(node); 134 rb_erase(node, &wnd->start_tree); 135 kmem_cache_free(ntfs_enode_cachep, 136 rb_entry(node, struct e_node, start.node)); 137 node = next; 138 } 139 } 140 141 static struct rb_node *rb_lookup(struct rb_root *root, size_t v) 142 { 143 struct rb_node **p = &root->rb_node; 144 struct rb_node *r = NULL; 145 146 while (*p) { 147 struct rb_node_key *k; 148 149 k = rb_entry(*p, struct rb_node_key, node); 150 if (v < k->key) { 151 p = &(*p)->rb_left; 152 } else if (v > k->key) { 153 r = &k->node; 154 p = &(*p)->rb_right; 155 } else { 156 return &k->node; 157 } 158 } 159 160 return r; 161 } 162 163 /* 164 * rb_insert_count - Helper function to insert special kind of 'count' tree. 165 */ 166 static inline bool rb_insert_count(struct rb_root *root, struct e_node *e) 167 { 168 struct rb_node **p = &root->rb_node; 169 struct rb_node *parent = NULL; 170 size_t e_ckey = e->count.key; 171 size_t e_skey = e->start.key; 172 173 while (*p) { 174 struct e_node *k = 175 rb_entry(parent = *p, struct e_node, count.node); 176 177 if (e_ckey > k->count.key) { 178 p = &(*p)->rb_left; 179 } else if (e_ckey < k->count.key) { 180 p = &(*p)->rb_right; 181 } else if (e_skey < k->start.key) { 182 p = &(*p)->rb_left; 183 } else if (e_skey > k->start.key) { 184 p = &(*p)->rb_right; 185 } else { 186 WARN_ON(1); 187 return false; 188 } 189 } 190 191 rb_link_node(&e->count.node, parent, p); 192 rb_insert_color(&e->count.node, root); 193 return true; 194 } 195 196 /* 197 * rb_insert_start - Helper function to insert special kind of 'count' tree. 198 */ 199 static inline bool rb_insert_start(struct rb_root *root, struct e_node *e) 200 { 201 struct rb_node **p = &root->rb_node; 202 struct rb_node *parent = NULL; 203 size_t e_skey = e->start.key; 204 205 while (*p) { 206 struct e_node *k; 207 208 parent = *p; 209 210 k = rb_entry(parent, struct e_node, start.node); 211 if (e_skey < k->start.key) { 212 p = &(*p)->rb_left; 213 } else if (e_skey > k->start.key) { 214 p = &(*p)->rb_right; 215 } else { 216 WARN_ON(1); 217 return false; 218 } 219 } 220 221 rb_link_node(&e->start.node, parent, p); 222 rb_insert_color(&e->start.node, root); 223 return true; 224 } 225 226 /* 227 * wnd_add_free_ext - Adds a new extent of free space. 228 * @build: 1 when building tree. 229 */ 230 static void wnd_add_free_ext(struct wnd_bitmap *wnd, size_t bit, size_t len, 231 bool build) 232 { 233 struct e_node *e, *e0 = NULL; 234 size_t ib, end_in = bit + len; 235 struct rb_node *n; 236 237 if (build) { 238 /* Use extent_min to filter too short extents. */ 239 if (wnd->count >= NTFS_MAX_WND_EXTENTS && 240 len <= wnd->extent_min) { 241 wnd->uptodated = -1; 242 return; 243 } 244 } else { 245 /* Try to find extent before 'bit'. */ 246 n = rb_lookup(&wnd->start_tree, bit); 247 248 if (!n) { 249 n = rb_first(&wnd->start_tree); 250 } else { 251 e = rb_entry(n, struct e_node, start.node); 252 n = rb_next(n); 253 if (e->start.key + e->count.key == bit) { 254 /* Remove left. */ 255 bit = e->start.key; 256 len += e->count.key; 257 rb_erase(&e->start.node, &wnd->start_tree); 258 rb_erase(&e->count.node, &wnd->count_tree); 259 wnd->count -= 1; 260 e0 = e; 261 } 262 } 263 264 while (n) { 265 size_t next_end; 266 267 e = rb_entry(n, struct e_node, start.node); 268 next_end = e->start.key + e->count.key; 269 if (e->start.key > end_in) 270 break; 271 272 /* Remove right. */ 273 n = rb_next(n); 274 len += next_end - end_in; 275 end_in = next_end; 276 rb_erase(&e->start.node, &wnd->start_tree); 277 rb_erase(&e->count.node, &wnd->count_tree); 278 wnd->count -= 1; 279 280 if (!e0) 281 e0 = e; 282 else 283 kmem_cache_free(ntfs_enode_cachep, e); 284 } 285 286 if (wnd->uptodated != 1) { 287 /* Check bits before 'bit'. */ 288 ib = wnd->zone_bit == wnd->zone_end || 289 bit < wnd->zone_end 290 ? 0 291 : wnd->zone_end; 292 293 while (bit > ib && wnd_is_free_hlp(wnd, bit - 1, 1)) { 294 bit -= 1; 295 len += 1; 296 } 297 298 /* Check bits after 'end_in'. */ 299 ib = wnd->zone_bit == wnd->zone_end || 300 end_in > wnd->zone_bit 301 ? wnd->nbits 302 : wnd->zone_bit; 303 304 while (end_in < ib && wnd_is_free_hlp(wnd, end_in, 1)) { 305 end_in += 1; 306 len += 1; 307 } 308 } 309 } 310 /* Insert new fragment. */ 311 if (wnd->count >= NTFS_MAX_WND_EXTENTS) { 312 if (e0) 313 kmem_cache_free(ntfs_enode_cachep, e0); 314 315 wnd->uptodated = -1; 316 317 /* Compare with smallest fragment. */ 318 n = rb_last(&wnd->count_tree); 319 e = rb_entry(n, struct e_node, count.node); 320 if (len <= e->count.key) 321 goto out; /* Do not insert small fragments. */ 322 323 if (build) { 324 struct e_node *e2; 325 326 n = rb_prev(n); 327 e2 = rb_entry(n, struct e_node, count.node); 328 /* Smallest fragment will be 'e2->count.key'. */ 329 wnd->extent_min = e2->count.key; 330 } 331 332 /* Replace smallest fragment by new one. */ 333 rb_erase(&e->start.node, &wnd->start_tree); 334 rb_erase(&e->count.node, &wnd->count_tree); 335 wnd->count -= 1; 336 } else { 337 e = e0 ? e0 : kmem_cache_alloc(ntfs_enode_cachep, GFP_ATOMIC); 338 if (!e) { 339 wnd->uptodated = -1; 340 goto out; 341 } 342 343 if (build && len <= wnd->extent_min) 344 wnd->extent_min = len; 345 } 346 e->start.key = bit; 347 e->count.key = len; 348 if (len > wnd->extent_max) 349 wnd->extent_max = len; 350 351 rb_insert_start(&wnd->start_tree, e); 352 rb_insert_count(&wnd->count_tree, e); 353 wnd->count += 1; 354 355 out:; 356 } 357 358 /* 359 * wnd_remove_free_ext - Remove a run from the cached free space. 360 */ 361 static void wnd_remove_free_ext(struct wnd_bitmap *wnd, size_t bit, size_t len) 362 { 363 struct rb_node *n, *n3; 364 struct e_node *e, *e3; 365 size_t end_in = bit + len; 366 size_t end3, end, new_key, new_len, max_new_len; 367 368 /* Try to find extent before 'bit'. */ 369 n = rb_lookup(&wnd->start_tree, bit); 370 371 if (!n) 372 return; 373 374 e = rb_entry(n, struct e_node, start.node); 375 end = e->start.key + e->count.key; 376 377 new_key = new_len = 0; 378 len = e->count.key; 379 380 /* Range [bit,end_in) must be inside 'e' or outside 'e' and 'n'. */ 381 if (e->start.key > bit) 382 ; 383 else if (end_in <= end) { 384 /* Range [bit,end_in) inside 'e'. */ 385 new_key = end_in; 386 new_len = end - end_in; 387 len = bit - e->start.key; 388 } else if (bit > end) { 389 bool bmax = false; 390 391 n3 = rb_next(n); 392 393 while (n3) { 394 e3 = rb_entry(n3, struct e_node, start.node); 395 if (e3->start.key >= end_in) 396 break; 397 398 if (e3->count.key == wnd->extent_max) 399 bmax = true; 400 401 end3 = e3->start.key + e3->count.key; 402 if (end3 > end_in) { 403 e3->start.key = end_in; 404 rb_erase(&e3->count.node, &wnd->count_tree); 405 e3->count.key = end3 - end_in; 406 rb_insert_count(&wnd->count_tree, e3); 407 break; 408 } 409 410 n3 = rb_next(n3); 411 rb_erase(&e3->start.node, &wnd->start_tree); 412 rb_erase(&e3->count.node, &wnd->count_tree); 413 wnd->count -= 1; 414 kmem_cache_free(ntfs_enode_cachep, e3); 415 } 416 if (!bmax) 417 return; 418 n3 = rb_first(&wnd->count_tree); 419 wnd->extent_max = 420 n3 ? rb_entry(n3, struct e_node, count.node)->count.key 421 : 0; 422 return; 423 } 424 425 if (e->count.key != wnd->extent_max) { 426 ; 427 } else if (rb_prev(&e->count.node)) { 428 ; 429 } else { 430 n3 = rb_next(&e->count.node); 431 max_new_len = max(len, new_len); 432 if (!n3) { 433 wnd->extent_max = max_new_len; 434 } else { 435 e3 = rb_entry(n3, struct e_node, count.node); 436 wnd->extent_max = max(e3->count.key, max_new_len); 437 } 438 } 439 440 if (!len) { 441 if (new_len) { 442 e->start.key = new_key; 443 rb_erase(&e->count.node, &wnd->count_tree); 444 e->count.key = new_len; 445 rb_insert_count(&wnd->count_tree, e); 446 } else { 447 rb_erase(&e->start.node, &wnd->start_tree); 448 rb_erase(&e->count.node, &wnd->count_tree); 449 wnd->count -= 1; 450 kmem_cache_free(ntfs_enode_cachep, e); 451 } 452 goto out; 453 } 454 rb_erase(&e->count.node, &wnd->count_tree); 455 e->count.key = len; 456 rb_insert_count(&wnd->count_tree, e); 457 458 if (!new_len) 459 goto out; 460 461 if (wnd->count >= NTFS_MAX_WND_EXTENTS) { 462 wnd->uptodated = -1; 463 464 /* Get minimal extent. */ 465 e = rb_entry(rb_last(&wnd->count_tree), struct e_node, 466 count.node); 467 if (e->count.key > new_len) 468 goto out; 469 470 /* Replace minimum. */ 471 rb_erase(&e->start.node, &wnd->start_tree); 472 rb_erase(&e->count.node, &wnd->count_tree); 473 wnd->count -= 1; 474 } else { 475 e = kmem_cache_alloc(ntfs_enode_cachep, GFP_ATOMIC); 476 if (!e) 477 wnd->uptodated = -1; 478 } 479 480 if (e) { 481 e->start.key = new_key; 482 e->count.key = new_len; 483 rb_insert_start(&wnd->start_tree, e); 484 rb_insert_count(&wnd->count_tree, e); 485 wnd->count += 1; 486 } 487 488 out: 489 if (!wnd->count && 1 != wnd->uptodated) 490 wnd_rescan(wnd); 491 } 492 493 /* 494 * wnd_rescan - Scan all bitmap. Used while initialization. 495 */ 496 static int wnd_rescan(struct wnd_bitmap *wnd) 497 { 498 int err = 0; 499 size_t prev_tail = 0; 500 struct super_block *sb = wnd->sb; 501 struct ntfs_sb_info *sbi = sb->s_fs_info; 502 u64 lbo, len = 0; 503 u32 blocksize = sb->s_blocksize; 504 u8 cluster_bits = sbi->cluster_bits; 505 u32 wbits = 8 * sb->s_blocksize; 506 u32 used, frb; 507 size_t wpos, wbit, iw, vbo; 508 struct buffer_head *bh = NULL; 509 CLST lcn, clen; 510 511 wnd->uptodated = 0; 512 wnd->extent_max = 0; 513 wnd->extent_min = MINUS_ONE_T; 514 wnd->total_zeroes = 0; 515 516 vbo = 0; 517 518 for (iw = 0; iw < wnd->nwnd; iw++) { 519 if (iw + 1 == wnd->nwnd) 520 wbits = wnd->bits_last; 521 522 if (wnd->inited) { 523 if (!wnd->free_bits[iw]) { 524 /* All ones. */ 525 if (prev_tail) { 526 wnd_add_free_ext(wnd, 527 vbo * 8 - prev_tail, 528 prev_tail, true); 529 prev_tail = 0; 530 } 531 goto next_wnd; 532 } 533 if (wbits == wnd->free_bits[iw]) { 534 /* All zeroes. */ 535 prev_tail += wbits; 536 wnd->total_zeroes += wbits; 537 goto next_wnd; 538 } 539 } 540 541 if (!len) { 542 u32 off = vbo & sbi->cluster_mask; 543 544 if (!run_lookup_entry(&wnd->run, vbo >> cluster_bits, 545 &lcn, &clen, NULL)) { 546 err = -ENOENT; 547 goto out; 548 } 549 550 lbo = ((u64)lcn << cluster_bits) + off; 551 len = ((u64)clen << cluster_bits) - off; 552 } 553 554 bh = ntfs_bread(sb, lbo >> sb->s_blocksize_bits); 555 if (!bh) { 556 err = -EIO; 557 goto out; 558 } 559 560 used = ntfs_bitmap_weight_le(bh->b_data, wbits); 561 if (used < wbits) { 562 frb = wbits - used; 563 wnd->free_bits[iw] = frb; 564 wnd->total_zeroes += frb; 565 } 566 567 wpos = 0; 568 wbit = vbo * 8; 569 570 if (wbit + wbits > wnd->nbits) 571 wbits = wnd->nbits - wbit; 572 573 do { 574 used = find_next_zero_bit_le(bh->b_data, wbits, wpos); 575 576 if (used > wpos && prev_tail) { 577 wnd_add_free_ext(wnd, wbit + wpos - prev_tail, 578 prev_tail, true); 579 prev_tail = 0; 580 } 581 582 wpos = used; 583 584 if (wpos >= wbits) { 585 /* No free blocks. */ 586 prev_tail = 0; 587 break; 588 } 589 590 frb = find_next_bit_le(bh->b_data, wbits, wpos); 591 if (frb >= wbits) { 592 /* Keep last free block. */ 593 prev_tail += frb - wpos; 594 break; 595 } 596 597 wnd_add_free_ext(wnd, wbit + wpos - prev_tail, 598 frb + prev_tail - wpos, true); 599 600 /* Skip free block and first '1'. */ 601 wpos = frb + 1; 602 /* Reset previous tail. */ 603 prev_tail = 0; 604 } while (wpos < wbits); 605 606 next_wnd: 607 608 if (bh) 609 put_bh(bh); 610 bh = NULL; 611 612 vbo += blocksize; 613 if (len) { 614 len -= blocksize; 615 lbo += blocksize; 616 } 617 } 618 619 /* Add last block. */ 620 if (prev_tail) 621 wnd_add_free_ext(wnd, wnd->nbits - prev_tail, prev_tail, true); 622 623 /* 624 * Before init cycle wnd->uptodated was 0. 625 * If any errors or limits occurs while initialization then 626 * wnd->uptodated will be -1. 627 * If 'uptodated' is still 0 then Tree is really updated. 628 */ 629 if (!wnd->uptodated) 630 wnd->uptodated = 1; 631 632 if (wnd->zone_bit != wnd->zone_end) { 633 size_t zlen = wnd->zone_end - wnd->zone_bit; 634 635 wnd->zone_end = wnd->zone_bit; 636 wnd_zone_set(wnd, wnd->zone_bit, zlen); 637 } 638 639 out: 640 return err; 641 } 642 643 int wnd_init(struct wnd_bitmap *wnd, struct super_block *sb, size_t nbits) 644 { 645 int err; 646 u32 blocksize = sb->s_blocksize; 647 u32 wbits = blocksize * 8; 648 649 init_rwsem(&wnd->rw_lock); 650 651 wnd->sb = sb; 652 wnd->nbits = nbits; 653 wnd->total_zeroes = nbits; 654 wnd->extent_max = MINUS_ONE_T; 655 wnd->zone_bit = wnd->zone_end = 0; 656 wnd->nwnd = bytes_to_block(sb, bitmap_size(nbits)); 657 wnd->bits_last = nbits & (wbits - 1); 658 if (!wnd->bits_last) 659 wnd->bits_last = wbits; 660 661 wnd->free_bits = kcalloc(wnd->nwnd, sizeof(u16), GFP_NOFS | __GFP_NOWARN); 662 if (!wnd->free_bits) 663 return -ENOMEM; 664 665 err = wnd_rescan(wnd); 666 if (err) 667 return err; 668 669 wnd->inited = true; 670 671 return 0; 672 } 673 674 /* 675 * wnd_map - Call sb_bread for requested window. 676 */ 677 static struct buffer_head *wnd_map(struct wnd_bitmap *wnd, size_t iw) 678 { 679 size_t vbo; 680 CLST lcn, clen; 681 struct super_block *sb = wnd->sb; 682 struct ntfs_sb_info *sbi; 683 struct buffer_head *bh; 684 u64 lbo; 685 686 sbi = sb->s_fs_info; 687 vbo = (u64)iw << sb->s_blocksize_bits; 688 689 if (!run_lookup_entry(&wnd->run, vbo >> sbi->cluster_bits, &lcn, &clen, 690 NULL)) { 691 return ERR_PTR(-ENOENT); 692 } 693 694 lbo = ((u64)lcn << sbi->cluster_bits) + (vbo & sbi->cluster_mask); 695 696 bh = ntfs_bread(wnd->sb, lbo >> sb->s_blocksize_bits); 697 if (!bh) 698 return ERR_PTR(-EIO); 699 700 return bh; 701 } 702 703 /* 704 * wnd_set_free - Mark the bits range from bit to bit + bits as free. 705 */ 706 int wnd_set_free(struct wnd_bitmap *wnd, size_t bit, size_t bits) 707 { 708 int err = 0; 709 struct super_block *sb = wnd->sb; 710 size_t bits0 = bits; 711 u32 wbits = 8 * sb->s_blocksize; 712 size_t iw = bit >> (sb->s_blocksize_bits + 3); 713 u32 wbit = bit & (wbits - 1); 714 struct buffer_head *bh; 715 716 while (iw < wnd->nwnd && bits) { 717 u32 tail, op; 718 719 if (iw + 1 == wnd->nwnd) 720 wbits = wnd->bits_last; 721 722 tail = wbits - wbit; 723 op = min_t(u32, tail, bits); 724 725 bh = wnd_map(wnd, iw); 726 if (IS_ERR(bh)) { 727 err = PTR_ERR(bh); 728 break; 729 } 730 731 lock_buffer(bh); 732 733 ntfs_bitmap_clear_le(bh->b_data, wbit, op); 734 735 wnd->free_bits[iw] += op; 736 737 set_buffer_uptodate(bh); 738 mark_buffer_dirty(bh); 739 unlock_buffer(bh); 740 put_bh(bh); 741 742 wnd->total_zeroes += op; 743 bits -= op; 744 wbit = 0; 745 iw += 1; 746 } 747 748 wnd_add_free_ext(wnd, bit, bits0, false); 749 750 return err; 751 } 752 753 /* 754 * wnd_set_used - Mark the bits range from bit to bit + bits as used. 755 */ 756 int wnd_set_used(struct wnd_bitmap *wnd, size_t bit, size_t bits) 757 { 758 int err = 0; 759 struct super_block *sb = wnd->sb; 760 size_t bits0 = bits; 761 size_t iw = bit >> (sb->s_blocksize_bits + 3); 762 u32 wbits = 8 * sb->s_blocksize; 763 u32 wbit = bit & (wbits - 1); 764 struct buffer_head *bh; 765 766 while (iw < wnd->nwnd && bits) { 767 u32 tail, op; 768 769 if (unlikely(iw + 1 == wnd->nwnd)) 770 wbits = wnd->bits_last; 771 772 tail = wbits - wbit; 773 op = min_t(u32, tail, bits); 774 775 bh = wnd_map(wnd, iw); 776 if (IS_ERR(bh)) { 777 err = PTR_ERR(bh); 778 break; 779 } 780 781 lock_buffer(bh); 782 783 ntfs_bitmap_set_le(bh->b_data, wbit, op); 784 wnd->free_bits[iw] -= op; 785 786 set_buffer_uptodate(bh); 787 mark_buffer_dirty(bh); 788 unlock_buffer(bh); 789 put_bh(bh); 790 791 wnd->total_zeroes -= op; 792 bits -= op; 793 wbit = 0; 794 iw += 1; 795 } 796 797 if (!RB_EMPTY_ROOT(&wnd->start_tree)) 798 wnd_remove_free_ext(wnd, bit, bits0); 799 800 return err; 801 } 802 803 /* 804 * wnd_set_used_safe - Mark the bits range from bit to bit + bits as used. 805 * 806 * Unlikely wnd_set_used/wnd_set_free this function is not full trusted. 807 * It scans every bit in bitmap and marks free bit as used. 808 * @done - how many bits were marked as used. 809 * 810 * NOTE: normally *done should be 0. 811 */ 812 int wnd_set_used_safe(struct wnd_bitmap *wnd, size_t bit, size_t bits, 813 size_t *done) 814 { 815 size_t i, from = 0, len = 0; 816 int err = 0; 817 818 *done = 0; 819 for (i = 0; i < bits; i++) { 820 if (wnd_is_free(wnd, bit + i, 1)) { 821 if (!len) 822 from = bit + i; 823 len += 1; 824 } else if (len) { 825 err = wnd_set_used(wnd, from, len); 826 *done += len; 827 len = 0; 828 if (err) 829 break; 830 } 831 } 832 833 if (len) { 834 /* last fragment. */ 835 err = wnd_set_used(wnd, from, len); 836 *done += len; 837 } 838 return err; 839 } 840 841 /* 842 * wnd_is_free_hlp 843 * 844 * Return: True if all clusters [bit, bit+bits) are free (bitmap only). 845 */ 846 static bool wnd_is_free_hlp(struct wnd_bitmap *wnd, size_t bit, size_t bits) 847 { 848 struct super_block *sb = wnd->sb; 849 size_t iw = bit >> (sb->s_blocksize_bits + 3); 850 u32 wbits = 8 * sb->s_blocksize; 851 u32 wbit = bit & (wbits - 1); 852 853 while (iw < wnd->nwnd && bits) { 854 u32 tail, op; 855 856 if (unlikely(iw + 1 == wnd->nwnd)) 857 wbits = wnd->bits_last; 858 859 tail = wbits - wbit; 860 op = min_t(u32, tail, bits); 861 862 if (wbits != wnd->free_bits[iw]) { 863 bool ret; 864 struct buffer_head *bh = wnd_map(wnd, iw); 865 866 if (IS_ERR(bh)) 867 return false; 868 869 ret = are_bits_clear(bh->b_data, wbit, op); 870 871 put_bh(bh); 872 if (!ret) 873 return false; 874 } 875 876 bits -= op; 877 wbit = 0; 878 iw += 1; 879 } 880 881 return true; 882 } 883 884 /* 885 * wnd_is_free 886 * 887 * Return: True if all clusters [bit, bit+bits) are free. 888 */ 889 bool wnd_is_free(struct wnd_bitmap *wnd, size_t bit, size_t bits) 890 { 891 bool ret; 892 struct rb_node *n; 893 size_t end; 894 struct e_node *e; 895 896 if (RB_EMPTY_ROOT(&wnd->start_tree)) 897 goto use_wnd; 898 899 n = rb_lookup(&wnd->start_tree, bit); 900 if (!n) 901 goto use_wnd; 902 903 e = rb_entry(n, struct e_node, start.node); 904 905 end = e->start.key + e->count.key; 906 907 if (bit < end && bit + bits <= end) 908 return true; 909 910 use_wnd: 911 ret = wnd_is_free_hlp(wnd, bit, bits); 912 913 return ret; 914 } 915 916 /* 917 * wnd_is_used 918 * 919 * Return: True if all clusters [bit, bit+bits) are used. 920 */ 921 bool wnd_is_used(struct wnd_bitmap *wnd, size_t bit, size_t bits) 922 { 923 bool ret = false; 924 struct super_block *sb = wnd->sb; 925 size_t iw = bit >> (sb->s_blocksize_bits + 3); 926 u32 wbits = 8 * sb->s_blocksize; 927 u32 wbit = bit & (wbits - 1); 928 size_t end; 929 struct rb_node *n; 930 struct e_node *e; 931 932 if (RB_EMPTY_ROOT(&wnd->start_tree)) 933 goto use_wnd; 934 935 end = bit + bits; 936 n = rb_lookup(&wnd->start_tree, end - 1); 937 if (!n) 938 goto use_wnd; 939 940 e = rb_entry(n, struct e_node, start.node); 941 if (e->start.key + e->count.key > bit) 942 return false; 943 944 use_wnd: 945 while (iw < wnd->nwnd && bits) { 946 u32 tail, op; 947 948 if (unlikely(iw + 1 == wnd->nwnd)) 949 wbits = wnd->bits_last; 950 951 tail = wbits - wbit; 952 op = min_t(u32, tail, bits); 953 954 if (wnd->free_bits[iw]) { 955 bool ret; 956 struct buffer_head *bh = wnd_map(wnd, iw); 957 958 if (IS_ERR(bh)) 959 goto out; 960 961 ret = are_bits_set(bh->b_data, wbit, op); 962 put_bh(bh); 963 if (!ret) 964 goto out; 965 } 966 967 bits -= op; 968 wbit = 0; 969 iw += 1; 970 } 971 ret = true; 972 973 out: 974 return ret; 975 } 976 977 /* 978 * wnd_find - Look for free space. 979 * 980 * - flags - BITMAP_FIND_XXX flags 981 * 982 * Return: 0 if not found. 983 */ 984 size_t wnd_find(struct wnd_bitmap *wnd, size_t to_alloc, size_t hint, 985 size_t flags, size_t *allocated) 986 { 987 struct super_block *sb; 988 u32 wbits, wpos, wzbit, wzend; 989 size_t fnd, max_alloc, b_len, b_pos; 990 size_t iw, prev_tail, nwnd, wbit, ebit, zbit, zend; 991 size_t to_alloc0 = to_alloc; 992 const struct e_node *e; 993 const struct rb_node *pr, *cr; 994 u8 log2_bits; 995 bool fbits_valid; 996 struct buffer_head *bh; 997 998 /* Fast checking for available free space. */ 999 if (flags & BITMAP_FIND_FULL) { 1000 size_t zeroes = wnd_zeroes(wnd); 1001 1002 zeroes -= wnd->zone_end - wnd->zone_bit; 1003 if (zeroes < to_alloc0) 1004 goto no_space; 1005 1006 if (to_alloc0 > wnd->extent_max) 1007 goto no_space; 1008 } else { 1009 if (to_alloc > wnd->extent_max) 1010 to_alloc = wnd->extent_max; 1011 } 1012 1013 if (wnd->zone_bit <= hint && hint < wnd->zone_end) 1014 hint = wnd->zone_end; 1015 1016 max_alloc = wnd->nbits; 1017 b_len = b_pos = 0; 1018 1019 if (hint >= max_alloc) 1020 hint = 0; 1021 1022 if (RB_EMPTY_ROOT(&wnd->start_tree)) { 1023 if (wnd->uptodated == 1) { 1024 /* Extents tree is updated -> No free space. */ 1025 goto no_space; 1026 } 1027 goto scan_bitmap; 1028 } 1029 1030 e = NULL; 1031 if (!hint) 1032 goto allocate_biggest; 1033 1034 /* Use hint: Enumerate extents by start >= hint. */ 1035 pr = NULL; 1036 cr = wnd->start_tree.rb_node; 1037 1038 for (;;) { 1039 e = rb_entry(cr, struct e_node, start.node); 1040 1041 if (e->start.key == hint) 1042 break; 1043 1044 if (e->start.key < hint) { 1045 pr = cr; 1046 cr = cr->rb_right; 1047 if (!cr) 1048 break; 1049 continue; 1050 } 1051 1052 cr = cr->rb_left; 1053 if (!cr) { 1054 e = pr ? rb_entry(pr, struct e_node, start.node) : NULL; 1055 break; 1056 } 1057 } 1058 1059 if (!e) 1060 goto allocate_biggest; 1061 1062 if (e->start.key + e->count.key > hint) { 1063 /* We have found extension with 'hint' inside. */ 1064 size_t len = e->start.key + e->count.key - hint; 1065 1066 if (len >= to_alloc && hint + to_alloc <= max_alloc) { 1067 fnd = hint; 1068 goto found; 1069 } 1070 1071 if (!(flags & BITMAP_FIND_FULL)) { 1072 if (len > to_alloc) 1073 len = to_alloc; 1074 1075 if (hint + len <= max_alloc) { 1076 fnd = hint; 1077 to_alloc = len; 1078 goto found; 1079 } 1080 } 1081 } 1082 1083 allocate_biggest: 1084 /* Allocate from biggest free extent. */ 1085 e = rb_entry(rb_first(&wnd->count_tree), struct e_node, count.node); 1086 if (e->count.key != wnd->extent_max) 1087 wnd->extent_max = e->count.key; 1088 1089 if (e->count.key < max_alloc) { 1090 if (e->count.key >= to_alloc) { 1091 ; 1092 } else if (flags & BITMAP_FIND_FULL) { 1093 if (e->count.key < to_alloc0) { 1094 /* Biggest free block is less then requested. */ 1095 goto no_space; 1096 } 1097 to_alloc = e->count.key; 1098 } else if (-1 != wnd->uptodated) { 1099 to_alloc = e->count.key; 1100 } else { 1101 /* Check if we can use more bits. */ 1102 size_t op, max_check; 1103 struct rb_root start_tree; 1104 1105 memcpy(&start_tree, &wnd->start_tree, 1106 sizeof(struct rb_root)); 1107 memset(&wnd->start_tree, 0, sizeof(struct rb_root)); 1108 1109 max_check = e->start.key + to_alloc; 1110 if (max_check > max_alloc) 1111 max_check = max_alloc; 1112 for (op = e->start.key + e->count.key; op < max_check; 1113 op++) { 1114 if (!wnd_is_free(wnd, op, 1)) 1115 break; 1116 } 1117 memcpy(&wnd->start_tree, &start_tree, 1118 sizeof(struct rb_root)); 1119 to_alloc = op - e->start.key; 1120 } 1121 1122 /* Prepare to return. */ 1123 fnd = e->start.key; 1124 if (e->start.key + to_alloc > max_alloc) 1125 to_alloc = max_alloc - e->start.key; 1126 goto found; 1127 } 1128 1129 if (wnd->uptodated == 1) { 1130 /* Extents tree is updated -> no free space. */ 1131 goto no_space; 1132 } 1133 1134 b_len = e->count.key; 1135 b_pos = e->start.key; 1136 1137 scan_bitmap: 1138 sb = wnd->sb; 1139 log2_bits = sb->s_blocksize_bits + 3; 1140 1141 /* At most two ranges [hint, max_alloc) + [0, hint). */ 1142 Again: 1143 1144 /* TODO: Optimize request for case nbits > wbits. */ 1145 iw = hint >> log2_bits; 1146 wbits = sb->s_blocksize * 8; 1147 wpos = hint & (wbits - 1); 1148 prev_tail = 0; 1149 fbits_valid = true; 1150 1151 if (max_alloc == wnd->nbits) { 1152 nwnd = wnd->nwnd; 1153 } else { 1154 size_t t = max_alloc + wbits - 1; 1155 1156 nwnd = likely(t > max_alloc) ? (t >> log2_bits) : wnd->nwnd; 1157 } 1158 1159 /* Enumerate all windows. */ 1160 for (; iw < nwnd; iw++) { 1161 wbit = iw << log2_bits; 1162 1163 if (!wnd->free_bits[iw]) { 1164 if (prev_tail > b_len) { 1165 b_pos = wbit - prev_tail; 1166 b_len = prev_tail; 1167 } 1168 1169 /* Skip full used window. */ 1170 prev_tail = 0; 1171 wpos = 0; 1172 continue; 1173 } 1174 1175 if (unlikely(iw + 1 == nwnd)) { 1176 if (max_alloc == wnd->nbits) { 1177 wbits = wnd->bits_last; 1178 } else { 1179 size_t t = max_alloc & (wbits - 1); 1180 1181 if (t) { 1182 wbits = t; 1183 fbits_valid = false; 1184 } 1185 } 1186 } 1187 1188 if (wnd->zone_end > wnd->zone_bit) { 1189 ebit = wbit + wbits; 1190 zbit = max(wnd->zone_bit, wbit); 1191 zend = min(wnd->zone_end, ebit); 1192 1193 /* Here we have a window [wbit, ebit) and zone [zbit, zend). */ 1194 if (zend <= zbit) { 1195 /* Zone does not overlap window. */ 1196 } else { 1197 wzbit = zbit - wbit; 1198 wzend = zend - wbit; 1199 1200 /* Zone overlaps window. */ 1201 if (wnd->free_bits[iw] == wzend - wzbit) { 1202 prev_tail = 0; 1203 wpos = 0; 1204 continue; 1205 } 1206 1207 /* Scan two ranges window: [wbit, zbit) and [zend, ebit). */ 1208 bh = wnd_map(wnd, iw); 1209 1210 if (IS_ERR(bh)) { 1211 /* TODO: Error */ 1212 prev_tail = 0; 1213 wpos = 0; 1214 continue; 1215 } 1216 1217 /* Scan range [wbit, zbit). */ 1218 if (wpos < wzbit) { 1219 /* Scan range [wpos, zbit). */ 1220 fnd = wnd_scan(bh->b_data, wbit, wpos, 1221 wzbit, to_alloc, 1222 &prev_tail, &b_pos, 1223 &b_len); 1224 if (fnd != MINUS_ONE_T) { 1225 put_bh(bh); 1226 goto found; 1227 } 1228 } 1229 1230 prev_tail = 0; 1231 1232 /* Scan range [zend, ebit). */ 1233 if (wzend < wbits) { 1234 fnd = wnd_scan(bh->b_data, wbit, 1235 max(wzend, wpos), wbits, 1236 to_alloc, &prev_tail, 1237 &b_pos, &b_len); 1238 if (fnd != MINUS_ONE_T) { 1239 put_bh(bh); 1240 goto found; 1241 } 1242 } 1243 1244 wpos = 0; 1245 put_bh(bh); 1246 continue; 1247 } 1248 } 1249 1250 /* Current window does not overlap zone. */ 1251 if (!wpos && fbits_valid && wnd->free_bits[iw] == wbits) { 1252 /* Window is empty. */ 1253 if (prev_tail + wbits >= to_alloc) { 1254 fnd = wbit + wpos - prev_tail; 1255 goto found; 1256 } 1257 1258 /* Increase 'prev_tail' and process next window. */ 1259 prev_tail += wbits; 1260 wpos = 0; 1261 continue; 1262 } 1263 1264 /* Read window. */ 1265 bh = wnd_map(wnd, iw); 1266 if (IS_ERR(bh)) { 1267 // TODO: Error. 1268 prev_tail = 0; 1269 wpos = 0; 1270 continue; 1271 } 1272 1273 /* Scan range [wpos, eBits). */ 1274 fnd = wnd_scan(bh->b_data, wbit, wpos, wbits, to_alloc, 1275 &prev_tail, &b_pos, &b_len); 1276 put_bh(bh); 1277 if (fnd != MINUS_ONE_T) 1278 goto found; 1279 } 1280 1281 if (b_len < prev_tail) { 1282 /* The last fragment. */ 1283 b_len = prev_tail; 1284 b_pos = max_alloc - prev_tail; 1285 } 1286 1287 if (hint) { 1288 /* 1289 * We have scanned range [hint max_alloc). 1290 * Prepare to scan range [0 hint + to_alloc). 1291 */ 1292 size_t nextmax = hint + to_alloc; 1293 1294 if (likely(nextmax >= hint) && nextmax < max_alloc) 1295 max_alloc = nextmax; 1296 hint = 0; 1297 goto Again; 1298 } 1299 1300 if (!b_len) 1301 goto no_space; 1302 1303 wnd->extent_max = b_len; 1304 1305 if (flags & BITMAP_FIND_FULL) 1306 goto no_space; 1307 1308 fnd = b_pos; 1309 to_alloc = b_len; 1310 1311 found: 1312 if (flags & BITMAP_FIND_MARK_AS_USED) { 1313 /* TODO: Optimize remove extent (pass 'e'?). */ 1314 if (wnd_set_used(wnd, fnd, to_alloc)) 1315 goto no_space; 1316 } else if (wnd->extent_max != MINUS_ONE_T && 1317 to_alloc > wnd->extent_max) { 1318 wnd->extent_max = to_alloc; 1319 } 1320 1321 *allocated = fnd; 1322 return to_alloc; 1323 1324 no_space: 1325 return 0; 1326 } 1327 1328 /* 1329 * wnd_extend - Extend bitmap ($MFT bitmap). 1330 */ 1331 int wnd_extend(struct wnd_bitmap *wnd, size_t new_bits) 1332 { 1333 int err; 1334 struct super_block *sb = wnd->sb; 1335 struct ntfs_sb_info *sbi = sb->s_fs_info; 1336 u32 blocksize = sb->s_blocksize; 1337 u32 wbits = blocksize * 8; 1338 u32 b0, new_last; 1339 size_t bits, iw, new_wnd; 1340 size_t old_bits = wnd->nbits; 1341 u16 *new_free; 1342 1343 if (new_bits <= old_bits) 1344 return -EINVAL; 1345 1346 /* Align to 8 byte boundary. */ 1347 new_wnd = bytes_to_block(sb, bitmap_size(new_bits)); 1348 new_last = new_bits & (wbits - 1); 1349 if (!new_last) 1350 new_last = wbits; 1351 1352 if (new_wnd != wnd->nwnd) { 1353 new_free = kmalloc_array(new_wnd, sizeof(u16), GFP_NOFS); 1354 if (!new_free) 1355 return -ENOMEM; 1356 1357 memcpy(new_free, wnd->free_bits, wnd->nwnd * sizeof(short)); 1358 memset(new_free + wnd->nwnd, 0, 1359 (new_wnd - wnd->nwnd) * sizeof(short)); 1360 kfree(wnd->free_bits); 1361 wnd->free_bits = new_free; 1362 } 1363 1364 /* Zero bits [old_bits,new_bits). */ 1365 bits = new_bits - old_bits; 1366 b0 = old_bits & (wbits - 1); 1367 1368 for (iw = old_bits >> (sb->s_blocksize_bits + 3); bits; iw += 1) { 1369 u32 op; 1370 size_t frb; 1371 u64 vbo, lbo, bytes; 1372 struct buffer_head *bh; 1373 1374 if (iw + 1 == new_wnd) 1375 wbits = new_last; 1376 1377 op = b0 + bits > wbits ? wbits - b0 : bits; 1378 vbo = (u64)iw * blocksize; 1379 1380 err = ntfs_vbo_to_lbo(sbi, &wnd->run, vbo, &lbo, &bytes); 1381 if (err) 1382 break; 1383 1384 bh = ntfs_bread(sb, lbo >> sb->s_blocksize_bits); 1385 if (!bh) 1386 return -EIO; 1387 1388 lock_buffer(bh); 1389 1390 ntfs_bitmap_clear_le(bh->b_data, b0, blocksize * 8 - b0); 1391 frb = wbits - ntfs_bitmap_weight_le(bh->b_data, wbits); 1392 wnd->total_zeroes += frb - wnd->free_bits[iw]; 1393 wnd->free_bits[iw] = frb; 1394 1395 set_buffer_uptodate(bh); 1396 mark_buffer_dirty(bh); 1397 unlock_buffer(bh); 1398 /* err = sync_dirty_buffer(bh); */ 1399 1400 b0 = 0; 1401 bits -= op; 1402 } 1403 1404 wnd->nbits = new_bits; 1405 wnd->nwnd = new_wnd; 1406 wnd->bits_last = new_last; 1407 1408 wnd_add_free_ext(wnd, old_bits, new_bits - old_bits, false); 1409 1410 return 0; 1411 } 1412 1413 void wnd_zone_set(struct wnd_bitmap *wnd, size_t lcn, size_t len) 1414 { 1415 size_t zlen = wnd->zone_end - wnd->zone_bit; 1416 1417 if (zlen) 1418 wnd_add_free_ext(wnd, wnd->zone_bit, zlen, false); 1419 1420 if (!RB_EMPTY_ROOT(&wnd->start_tree) && len) 1421 wnd_remove_free_ext(wnd, lcn, len); 1422 1423 wnd->zone_bit = lcn; 1424 wnd->zone_end = lcn + len; 1425 } 1426 1427 int ntfs_trim_fs(struct ntfs_sb_info *sbi, struct fstrim_range *range) 1428 { 1429 int err = 0; 1430 struct super_block *sb = sbi->sb; 1431 struct wnd_bitmap *wnd = &sbi->used.bitmap; 1432 u32 wbits = 8 * sb->s_blocksize; 1433 CLST len = 0, lcn = 0, done = 0; 1434 CLST minlen = bytes_to_cluster(sbi, range->minlen); 1435 CLST lcn_from = bytes_to_cluster(sbi, range->start); 1436 size_t iw = lcn_from >> (sb->s_blocksize_bits + 3); 1437 u32 wbit = lcn_from & (wbits - 1); 1438 CLST lcn_to; 1439 1440 if (!minlen) 1441 minlen = 1; 1442 1443 if (range->len == (u64)-1) 1444 lcn_to = wnd->nbits; 1445 else 1446 lcn_to = bytes_to_cluster(sbi, range->start + range->len); 1447 1448 down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS); 1449 1450 for (; iw < wnd->nwnd; iw++, wbit = 0) { 1451 CLST lcn_wnd = iw * wbits; 1452 struct buffer_head *bh; 1453 1454 if (lcn_wnd > lcn_to) 1455 break; 1456 1457 if (!wnd->free_bits[iw]) 1458 continue; 1459 1460 if (iw + 1 == wnd->nwnd) 1461 wbits = wnd->bits_last; 1462 1463 if (lcn_wnd + wbits > lcn_to) 1464 wbits = lcn_to - lcn_wnd; 1465 1466 bh = wnd_map(wnd, iw); 1467 if (IS_ERR(bh)) { 1468 err = PTR_ERR(bh); 1469 break; 1470 } 1471 1472 for (; wbit < wbits; wbit++) { 1473 if (!test_bit_le(wbit, bh->b_data)) { 1474 if (!len) 1475 lcn = lcn_wnd + wbit; 1476 len += 1; 1477 continue; 1478 } 1479 if (len >= minlen) { 1480 err = ntfs_discard(sbi, lcn, len); 1481 if (err) 1482 goto out; 1483 done += len; 1484 } 1485 len = 0; 1486 } 1487 put_bh(bh); 1488 } 1489 1490 /* Process the last fragment. */ 1491 if (len >= minlen) { 1492 err = ntfs_discard(sbi, lcn, len); 1493 if (err) 1494 goto out; 1495 done += len; 1496 } 1497 1498 out: 1499 range->len = (u64)done << sbi->cluster_bits; 1500 1501 up_read(&wnd->rw_lock); 1502 1503 return err; 1504 } 1505 1506 #if BITS_PER_LONG == 64 1507 typedef __le64 bitmap_ulong; 1508 #define cpu_to_ul(x) cpu_to_le64(x) 1509 #define ul_to_cpu(x) le64_to_cpu(x) 1510 #else 1511 typedef __le32 bitmap_ulong; 1512 #define cpu_to_ul(x) cpu_to_le32(x) 1513 #define ul_to_cpu(x) le32_to_cpu(x) 1514 #endif 1515 1516 void ntfs_bitmap_set_le(void *map, unsigned int start, int len) 1517 { 1518 bitmap_ulong *p = (bitmap_ulong *)map + BIT_WORD(start); 1519 const unsigned int size = start + len; 1520 int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG); 1521 bitmap_ulong mask_to_set = cpu_to_ul(BITMAP_FIRST_WORD_MASK(start)); 1522 1523 while (len - bits_to_set >= 0) { 1524 *p |= mask_to_set; 1525 len -= bits_to_set; 1526 bits_to_set = BITS_PER_LONG; 1527 mask_to_set = cpu_to_ul(~0UL); 1528 p++; 1529 } 1530 if (len) { 1531 mask_to_set &= cpu_to_ul(BITMAP_LAST_WORD_MASK(size)); 1532 *p |= mask_to_set; 1533 } 1534 } 1535 1536 void ntfs_bitmap_clear_le(void *map, unsigned int start, int len) 1537 { 1538 bitmap_ulong *p = (bitmap_ulong *)map + BIT_WORD(start); 1539 const unsigned int size = start + len; 1540 int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG); 1541 bitmap_ulong mask_to_clear = cpu_to_ul(BITMAP_FIRST_WORD_MASK(start)); 1542 1543 while (len - bits_to_clear >= 0) { 1544 *p &= ~mask_to_clear; 1545 len -= bits_to_clear; 1546 bits_to_clear = BITS_PER_LONG; 1547 mask_to_clear = cpu_to_ul(~0UL); 1548 p++; 1549 } 1550 if (len) { 1551 mask_to_clear &= cpu_to_ul(BITMAP_LAST_WORD_MASK(size)); 1552 *p &= ~mask_to_clear; 1553 } 1554 } 1555 1556 unsigned int ntfs_bitmap_weight_le(const void *bitmap, int bits) 1557 { 1558 const ulong *bmp = bitmap; 1559 unsigned int k, lim = bits / BITS_PER_LONG; 1560 unsigned int w = 0; 1561 1562 for (k = 0; k < lim; k++) 1563 w += hweight_long(bmp[k]); 1564 1565 if (bits % BITS_PER_LONG) { 1566 w += hweight_long(ul_to_cpu(((bitmap_ulong *)bitmap)[k]) & 1567 BITMAP_LAST_WORD_MASK(bits)); 1568 } 1569 1570 return w; 1571 } 1572