1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * bitmap.c two-level bitmap (C) Peter T. Breuer (ptb@ot.uc3m.es) 2003 4 * 5 * bitmap_create - sets up the bitmap structure 6 * bitmap_destroy - destroys the bitmap structure 7 * 8 * additions, Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.: 9 * - added disk storage for bitmap 10 * - changes to allow various bitmap chunk sizes 11 */ 12 13 /* 14 * Still to do: 15 * 16 * flush after percent set rather than just time based. (maybe both). 17 */ 18 19 #include <linux/blkdev.h> 20 #include <linux/module.h> 21 #include <linux/errno.h> 22 #include <linux/slab.h> 23 #include <linux/init.h> 24 #include <linux/timer.h> 25 #include <linux/sched.h> 26 #include <linux/list.h> 27 #include <linux/file.h> 28 #include <linux/mount.h> 29 #include <linux/buffer_head.h> 30 #include <linux/seq_file.h> 31 #include <trace/events/block.h> 32 #include "md.h" 33 #include "md-bitmap.h" 34 35 static inline char *bmname(struct bitmap *bitmap) 36 { 37 return bitmap->mddev ? mdname(bitmap->mddev) : "mdX"; 38 } 39 40 /* 41 * check a page and, if necessary, allocate it (or hijack it if the alloc fails) 42 * 43 * 1) check to see if this page is allocated, if it's not then try to alloc 44 * 2) if the alloc fails, set the page's hijacked flag so we'll use the 45 * page pointer directly as a counter 46 * 47 * if we find our page, we increment the page's refcount so that it stays 48 * allocated while we're using it 49 */ 50 static int md_bitmap_checkpage(struct bitmap_counts *bitmap, 51 unsigned long page, int create, int no_hijack) 52 __releases(bitmap->lock) 53 __acquires(bitmap->lock) 54 { 55 unsigned char *mappage; 56 57 if (page >= bitmap->pages) { 58 /* This can happen if bitmap_start_sync goes beyond 59 * End-of-device while looking for a whole page. 60 * It is harmless. 61 */ 62 return -EINVAL; 63 } 64 65 if (bitmap->bp[page].hijacked) /* it's hijacked, don't try to alloc */ 66 return 0; 67 68 if (bitmap->bp[page].map) /* page is already allocated, just return */ 69 return 0; 70 71 if (!create) 72 return -ENOENT; 73 74 /* this page has not been allocated yet */ 75 76 spin_unlock_irq(&bitmap->lock); 77 /* It is possible that this is being called inside a 78 * prepare_to_wait/finish_wait loop from raid5c:make_request(). 79 * In general it is not permitted to sleep in that context as it 80 * can cause the loop to spin freely. 81 * That doesn't apply here as we can only reach this point 82 * once with any loop. 83 * When this function completes, either bp[page].map or 84 * bp[page].hijacked. In either case, this function will 85 * abort before getting to this point again. So there is 86 * no risk of a free-spin, and so it is safe to assert 87 * that sleeping here is allowed. 88 */ 89 sched_annotate_sleep(); 90 mappage = kzalloc(PAGE_SIZE, GFP_NOIO); 91 spin_lock_irq(&bitmap->lock); 92 93 if (mappage == NULL) { 94 pr_debug("md/bitmap: map page allocation failed, hijacking\n"); 95 /* We don't support hijack for cluster raid */ 96 if (no_hijack) 97 return -ENOMEM; 98 /* failed - set the hijacked flag so that we can use the 99 * pointer as a counter */ 100 if (!bitmap->bp[page].map) 101 bitmap->bp[page].hijacked = 1; 102 } else if (bitmap->bp[page].map || 103 bitmap->bp[page].hijacked) { 104 /* somebody beat us to getting the page */ 105 kfree(mappage); 106 } else { 107 108 /* no page was in place and we have one, so install it */ 109 110 bitmap->bp[page].map = mappage; 111 bitmap->missing_pages--; 112 } 113 return 0; 114 } 115 116 /* if page is completely empty, put it back on the free list, or dealloc it */ 117 /* if page was hijacked, unmark the flag so it might get alloced next time */ 118 /* Note: lock should be held when calling this */ 119 static void md_bitmap_checkfree(struct bitmap_counts *bitmap, unsigned long page) 120 { 121 char *ptr; 122 123 if (bitmap->bp[page].count) /* page is still busy */ 124 return; 125 126 /* page is no longer in use, it can be released */ 127 128 if (bitmap->bp[page].hijacked) { /* page was hijacked, undo this now */ 129 bitmap->bp[page].hijacked = 0; 130 bitmap->bp[page].map = NULL; 131 } else { 132 /* normal case, free the page */ 133 ptr = bitmap->bp[page].map; 134 bitmap->bp[page].map = NULL; 135 bitmap->missing_pages++; 136 kfree(ptr); 137 } 138 } 139 140 /* 141 * bitmap file handling - read and write the bitmap file and its superblock 142 */ 143 144 /* 145 * basic page I/O operations 146 */ 147 148 /* IO operations when bitmap is stored near all superblocks */ 149 static int read_sb_page(struct mddev *mddev, loff_t offset, 150 struct page *page, 151 unsigned long index, int size) 152 { 153 /* choose a good rdev and read the page from there */ 154 155 struct md_rdev *rdev; 156 sector_t target; 157 158 rdev_for_each(rdev, mddev) { 159 if (! test_bit(In_sync, &rdev->flags) 160 || test_bit(Faulty, &rdev->flags) 161 || test_bit(Bitmap_sync, &rdev->flags)) 162 continue; 163 164 target = offset + index * (PAGE_SIZE/512); 165 166 if (sync_page_io(rdev, target, 167 roundup(size, bdev_logical_block_size(rdev->bdev)), 168 page, REQ_OP_READ, 0, true)) { 169 page->index = index; 170 return 0; 171 } 172 } 173 return -EIO; 174 } 175 176 static struct md_rdev *next_active_rdev(struct md_rdev *rdev, struct mddev *mddev) 177 { 178 /* Iterate the disks of an mddev, using rcu to protect access to the 179 * linked list, and raising the refcount of devices we return to ensure 180 * they don't disappear while in use. 181 * As devices are only added or removed when raid_disk is < 0 and 182 * nr_pending is 0 and In_sync is clear, the entries we return will 183 * still be in the same position on the list when we re-enter 184 * list_for_each_entry_continue_rcu. 185 * 186 * Note that if entered with 'rdev == NULL' to start at the 187 * beginning, we temporarily assign 'rdev' to an address which 188 * isn't really an rdev, but which can be used by 189 * list_for_each_entry_continue_rcu() to find the first entry. 190 */ 191 rcu_read_lock(); 192 if (rdev == NULL) 193 /* start at the beginning */ 194 rdev = list_entry(&mddev->disks, struct md_rdev, same_set); 195 else { 196 /* release the previous rdev and start from there. */ 197 rdev_dec_pending(rdev, mddev); 198 } 199 list_for_each_entry_continue_rcu(rdev, &mddev->disks, same_set) { 200 if (rdev->raid_disk >= 0 && 201 !test_bit(Faulty, &rdev->flags)) { 202 /* this is a usable devices */ 203 atomic_inc(&rdev->nr_pending); 204 rcu_read_unlock(); 205 return rdev; 206 } 207 } 208 rcu_read_unlock(); 209 return NULL; 210 } 211 212 static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait) 213 { 214 struct md_rdev *rdev; 215 struct block_device *bdev; 216 struct mddev *mddev = bitmap->mddev; 217 struct bitmap_storage *store = &bitmap->storage; 218 219 restart: 220 rdev = NULL; 221 while ((rdev = next_active_rdev(rdev, mddev)) != NULL) { 222 int size = PAGE_SIZE; 223 loff_t offset = mddev->bitmap_info.offset; 224 225 bdev = (rdev->meta_bdev) ? rdev->meta_bdev : rdev->bdev; 226 227 if (page->index == store->file_pages-1) { 228 int last_page_size = store->bytes & (PAGE_SIZE-1); 229 if (last_page_size == 0) 230 last_page_size = PAGE_SIZE; 231 size = roundup(last_page_size, 232 bdev_logical_block_size(bdev)); 233 } 234 /* Just make sure we aren't corrupting data or 235 * metadata 236 */ 237 if (mddev->external) { 238 /* Bitmap could be anywhere. */ 239 if (rdev->sb_start + offset + (page->index 240 * (PAGE_SIZE/512)) 241 > rdev->data_offset 242 && 243 rdev->sb_start + offset 244 < (rdev->data_offset + mddev->dev_sectors 245 + (PAGE_SIZE/512))) 246 goto bad_alignment; 247 } else if (offset < 0) { 248 /* DATA BITMAP METADATA */ 249 if (offset 250 + (long)(page->index * (PAGE_SIZE/512)) 251 + size/512 > 0) 252 /* bitmap runs in to metadata */ 253 goto bad_alignment; 254 if (rdev->data_offset + mddev->dev_sectors 255 > rdev->sb_start + offset) 256 /* data runs in to bitmap */ 257 goto bad_alignment; 258 } else if (rdev->sb_start < rdev->data_offset) { 259 /* METADATA BITMAP DATA */ 260 if (rdev->sb_start 261 + offset 262 + page->index*(PAGE_SIZE/512) + size/512 263 > rdev->data_offset) 264 /* bitmap runs in to data */ 265 goto bad_alignment; 266 } else { 267 /* DATA METADATA BITMAP - no problems */ 268 } 269 md_super_write(mddev, rdev, 270 rdev->sb_start + offset 271 + page->index * (PAGE_SIZE/512), 272 size, 273 page); 274 } 275 276 if (wait && md_super_wait(mddev) < 0) 277 goto restart; 278 return 0; 279 280 bad_alignment: 281 return -EINVAL; 282 } 283 284 static void md_bitmap_file_kick(struct bitmap *bitmap); 285 /* 286 * write out a page to a file 287 */ 288 static void write_page(struct bitmap *bitmap, struct page *page, int wait) 289 { 290 struct buffer_head *bh; 291 292 if (bitmap->storage.file == NULL) { 293 switch (write_sb_page(bitmap, page, wait)) { 294 case -EINVAL: 295 set_bit(BITMAP_WRITE_ERROR, &bitmap->flags); 296 } 297 } else { 298 299 bh = page_buffers(page); 300 301 while (bh && bh->b_blocknr) { 302 atomic_inc(&bitmap->pending_writes); 303 set_buffer_locked(bh); 304 set_buffer_mapped(bh); 305 submit_bh(REQ_OP_WRITE, REQ_SYNC, bh); 306 bh = bh->b_this_page; 307 } 308 309 if (wait) 310 wait_event(bitmap->write_wait, 311 atomic_read(&bitmap->pending_writes)==0); 312 } 313 if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags)) 314 md_bitmap_file_kick(bitmap); 315 } 316 317 static void end_bitmap_write(struct buffer_head *bh, int uptodate) 318 { 319 struct bitmap *bitmap = bh->b_private; 320 321 if (!uptodate) 322 set_bit(BITMAP_WRITE_ERROR, &bitmap->flags); 323 if (atomic_dec_and_test(&bitmap->pending_writes)) 324 wake_up(&bitmap->write_wait); 325 } 326 327 /* copied from buffer.c */ 328 static void 329 __clear_page_buffers(struct page *page) 330 { 331 ClearPagePrivate(page); 332 set_page_private(page, 0); 333 put_page(page); 334 } 335 static void free_buffers(struct page *page) 336 { 337 struct buffer_head *bh; 338 339 if (!PagePrivate(page)) 340 return; 341 342 bh = page_buffers(page); 343 while (bh) { 344 struct buffer_head *next = bh->b_this_page; 345 free_buffer_head(bh); 346 bh = next; 347 } 348 __clear_page_buffers(page); 349 put_page(page); 350 } 351 352 /* read a page from a file. 353 * We both read the page, and attach buffers to the page to record the 354 * address of each block (using bmap). These addresses will be used 355 * to write the block later, completely bypassing the filesystem. 356 * This usage is similar to how swap files are handled, and allows us 357 * to write to a file with no concerns of memory allocation failing. 358 */ 359 static int read_page(struct file *file, unsigned long index, 360 struct bitmap *bitmap, 361 unsigned long count, 362 struct page *page) 363 { 364 int ret = 0; 365 struct inode *inode = file_inode(file); 366 struct buffer_head *bh; 367 sector_t block; 368 369 pr_debug("read bitmap file (%dB @ %llu)\n", (int)PAGE_SIZE, 370 (unsigned long long)index << PAGE_SHIFT); 371 372 bh = alloc_page_buffers(page, 1<<inode->i_blkbits, false); 373 if (!bh) { 374 ret = -ENOMEM; 375 goto out; 376 } 377 attach_page_buffers(page, bh); 378 block = index << (PAGE_SHIFT - inode->i_blkbits); 379 while (bh) { 380 if (count == 0) 381 bh->b_blocknr = 0; 382 else { 383 bh->b_blocknr = bmap(inode, block); 384 if (bh->b_blocknr == 0) { 385 /* Cannot use this file! */ 386 ret = -EINVAL; 387 goto out; 388 } 389 bh->b_bdev = inode->i_sb->s_bdev; 390 if (count < (1<<inode->i_blkbits)) 391 count = 0; 392 else 393 count -= (1<<inode->i_blkbits); 394 395 bh->b_end_io = end_bitmap_write; 396 bh->b_private = bitmap; 397 atomic_inc(&bitmap->pending_writes); 398 set_buffer_locked(bh); 399 set_buffer_mapped(bh); 400 submit_bh(REQ_OP_READ, 0, bh); 401 } 402 block++; 403 bh = bh->b_this_page; 404 } 405 page->index = index; 406 407 wait_event(bitmap->write_wait, 408 atomic_read(&bitmap->pending_writes)==0); 409 if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags)) 410 ret = -EIO; 411 out: 412 if (ret) 413 pr_err("md: bitmap read error: (%dB @ %llu): %d\n", 414 (int)PAGE_SIZE, 415 (unsigned long long)index << PAGE_SHIFT, 416 ret); 417 return ret; 418 } 419 420 /* 421 * bitmap file superblock operations 422 */ 423 424 /* 425 * md_bitmap_wait_writes() should be called before writing any bitmap 426 * blocks, to ensure previous writes, particularly from 427 * md_bitmap_daemon_work(), have completed. 428 */ 429 static void md_bitmap_wait_writes(struct bitmap *bitmap) 430 { 431 if (bitmap->storage.file) 432 wait_event(bitmap->write_wait, 433 atomic_read(&bitmap->pending_writes)==0); 434 else 435 /* Note that we ignore the return value. The writes 436 * might have failed, but that would just mean that 437 * some bits which should be cleared haven't been, 438 * which is safe. The relevant bitmap blocks will 439 * probably get written again, but there is no great 440 * loss if they aren't. 441 */ 442 md_super_wait(bitmap->mddev); 443 } 444 445 446 /* update the event counter and sync the superblock to disk */ 447 void md_bitmap_update_sb(struct bitmap *bitmap) 448 { 449 bitmap_super_t *sb; 450 451 if (!bitmap || !bitmap->mddev) /* no bitmap for this array */ 452 return; 453 if (bitmap->mddev->bitmap_info.external) 454 return; 455 if (!bitmap->storage.sb_page) /* no superblock */ 456 return; 457 sb = kmap_atomic(bitmap->storage.sb_page); 458 sb->events = cpu_to_le64(bitmap->mddev->events); 459 if (bitmap->mddev->events < bitmap->events_cleared) 460 /* rocking back to read-only */ 461 bitmap->events_cleared = bitmap->mddev->events; 462 sb->events_cleared = cpu_to_le64(bitmap->events_cleared); 463 /* 464 * clear BITMAP_WRITE_ERROR bit to protect against the case that 465 * a bitmap write error occurred but the later writes succeeded. 466 */ 467 sb->state = cpu_to_le32(bitmap->flags & ~BIT(BITMAP_WRITE_ERROR)); 468 /* Just in case these have been changed via sysfs: */ 469 sb->daemon_sleep = cpu_to_le32(bitmap->mddev->bitmap_info.daemon_sleep/HZ); 470 sb->write_behind = cpu_to_le32(bitmap->mddev->bitmap_info.max_write_behind); 471 /* This might have been changed by a reshape */ 472 sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors); 473 sb->chunksize = cpu_to_le32(bitmap->mddev->bitmap_info.chunksize); 474 sb->nodes = cpu_to_le32(bitmap->mddev->bitmap_info.nodes); 475 sb->sectors_reserved = cpu_to_le32(bitmap->mddev-> 476 bitmap_info.space); 477 kunmap_atomic(sb); 478 write_page(bitmap, bitmap->storage.sb_page, 1); 479 } 480 EXPORT_SYMBOL(md_bitmap_update_sb); 481 482 /* print out the bitmap file superblock */ 483 void md_bitmap_print_sb(struct bitmap *bitmap) 484 { 485 bitmap_super_t *sb; 486 487 if (!bitmap || !bitmap->storage.sb_page) 488 return; 489 sb = kmap_atomic(bitmap->storage.sb_page); 490 pr_debug("%s: bitmap file superblock:\n", bmname(bitmap)); 491 pr_debug(" magic: %08x\n", le32_to_cpu(sb->magic)); 492 pr_debug(" version: %d\n", le32_to_cpu(sb->version)); 493 pr_debug(" uuid: %08x.%08x.%08x.%08x\n", 494 le32_to_cpu(*(__le32 *)(sb->uuid+0)), 495 le32_to_cpu(*(__le32 *)(sb->uuid+4)), 496 le32_to_cpu(*(__le32 *)(sb->uuid+8)), 497 le32_to_cpu(*(__le32 *)(sb->uuid+12))); 498 pr_debug(" events: %llu\n", 499 (unsigned long long) le64_to_cpu(sb->events)); 500 pr_debug("events cleared: %llu\n", 501 (unsigned long long) le64_to_cpu(sb->events_cleared)); 502 pr_debug(" state: %08x\n", le32_to_cpu(sb->state)); 503 pr_debug(" chunksize: %d B\n", le32_to_cpu(sb->chunksize)); 504 pr_debug(" daemon sleep: %ds\n", le32_to_cpu(sb->daemon_sleep)); 505 pr_debug(" sync size: %llu KB\n", 506 (unsigned long long)le64_to_cpu(sb->sync_size)/2); 507 pr_debug("max write behind: %d\n", le32_to_cpu(sb->write_behind)); 508 kunmap_atomic(sb); 509 } 510 511 /* 512 * bitmap_new_disk_sb 513 * @bitmap 514 * 515 * This function is somewhat the reverse of bitmap_read_sb. bitmap_read_sb 516 * reads and verifies the on-disk bitmap superblock and populates bitmap_info. 517 * This function verifies 'bitmap_info' and populates the on-disk bitmap 518 * structure, which is to be written to disk. 519 * 520 * Returns: 0 on success, -Exxx on error 521 */ 522 static int md_bitmap_new_disk_sb(struct bitmap *bitmap) 523 { 524 bitmap_super_t *sb; 525 unsigned long chunksize, daemon_sleep, write_behind; 526 527 bitmap->storage.sb_page = alloc_page(GFP_KERNEL | __GFP_ZERO); 528 if (bitmap->storage.sb_page == NULL) 529 return -ENOMEM; 530 bitmap->storage.sb_page->index = 0; 531 532 sb = kmap_atomic(bitmap->storage.sb_page); 533 534 sb->magic = cpu_to_le32(BITMAP_MAGIC); 535 sb->version = cpu_to_le32(BITMAP_MAJOR_HI); 536 537 chunksize = bitmap->mddev->bitmap_info.chunksize; 538 BUG_ON(!chunksize); 539 if (!is_power_of_2(chunksize)) { 540 kunmap_atomic(sb); 541 pr_warn("bitmap chunksize not a power of 2\n"); 542 return -EINVAL; 543 } 544 sb->chunksize = cpu_to_le32(chunksize); 545 546 daemon_sleep = bitmap->mddev->bitmap_info.daemon_sleep; 547 if (!daemon_sleep || (daemon_sleep > MAX_SCHEDULE_TIMEOUT)) { 548 pr_debug("Choosing daemon_sleep default (5 sec)\n"); 549 daemon_sleep = 5 * HZ; 550 } 551 sb->daemon_sleep = cpu_to_le32(daemon_sleep); 552 bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep; 553 554 /* 555 * FIXME: write_behind for RAID1. If not specified, what 556 * is a good choice? We choose COUNTER_MAX / 2 arbitrarily. 557 */ 558 write_behind = bitmap->mddev->bitmap_info.max_write_behind; 559 if (write_behind > COUNTER_MAX) 560 write_behind = COUNTER_MAX / 2; 561 sb->write_behind = cpu_to_le32(write_behind); 562 bitmap->mddev->bitmap_info.max_write_behind = write_behind; 563 564 /* keep the array size field of the bitmap superblock up to date */ 565 sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors); 566 567 memcpy(sb->uuid, bitmap->mddev->uuid, 16); 568 569 set_bit(BITMAP_STALE, &bitmap->flags); 570 sb->state = cpu_to_le32(bitmap->flags); 571 bitmap->events_cleared = bitmap->mddev->events; 572 sb->events_cleared = cpu_to_le64(bitmap->mddev->events); 573 bitmap->mddev->bitmap_info.nodes = 0; 574 575 kunmap_atomic(sb); 576 577 return 0; 578 } 579 580 /* read the superblock from the bitmap file and initialize some bitmap fields */ 581 static int md_bitmap_read_sb(struct bitmap *bitmap) 582 { 583 char *reason = NULL; 584 bitmap_super_t *sb; 585 unsigned long chunksize, daemon_sleep, write_behind; 586 unsigned long long events; 587 int nodes = 0; 588 unsigned long sectors_reserved = 0; 589 int err = -EINVAL; 590 struct page *sb_page; 591 loff_t offset = bitmap->mddev->bitmap_info.offset; 592 593 if (!bitmap->storage.file && !bitmap->mddev->bitmap_info.offset) { 594 chunksize = 128 * 1024 * 1024; 595 daemon_sleep = 5 * HZ; 596 write_behind = 0; 597 set_bit(BITMAP_STALE, &bitmap->flags); 598 err = 0; 599 goto out_no_sb; 600 } 601 /* page 0 is the superblock, read it... */ 602 sb_page = alloc_page(GFP_KERNEL); 603 if (!sb_page) 604 return -ENOMEM; 605 bitmap->storage.sb_page = sb_page; 606 607 re_read: 608 /* If cluster_slot is set, the cluster is setup */ 609 if (bitmap->cluster_slot >= 0) { 610 sector_t bm_blocks = bitmap->mddev->resync_max_sectors; 611 612 sector_div(bm_blocks, 613 bitmap->mddev->bitmap_info.chunksize >> 9); 614 /* bits to bytes */ 615 bm_blocks = ((bm_blocks+7) >> 3) + sizeof(bitmap_super_t); 616 /* to 4k blocks */ 617 bm_blocks = DIV_ROUND_UP_SECTOR_T(bm_blocks, 4096); 618 offset = bitmap->mddev->bitmap_info.offset + (bitmap->cluster_slot * (bm_blocks << 3)); 619 pr_debug("%s:%d bm slot: %d offset: %llu\n", __func__, __LINE__, 620 bitmap->cluster_slot, offset); 621 } 622 623 if (bitmap->storage.file) { 624 loff_t isize = i_size_read(bitmap->storage.file->f_mapping->host); 625 int bytes = isize > PAGE_SIZE ? PAGE_SIZE : isize; 626 627 err = read_page(bitmap->storage.file, 0, 628 bitmap, bytes, sb_page); 629 } else { 630 err = read_sb_page(bitmap->mddev, 631 offset, 632 sb_page, 633 0, sizeof(bitmap_super_t)); 634 } 635 if (err) 636 return err; 637 638 err = -EINVAL; 639 sb = kmap_atomic(sb_page); 640 641 chunksize = le32_to_cpu(sb->chunksize); 642 daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ; 643 write_behind = le32_to_cpu(sb->write_behind); 644 sectors_reserved = le32_to_cpu(sb->sectors_reserved); 645 /* Setup nodes/clustername only if bitmap version is 646 * cluster-compatible 647 */ 648 if (sb->version == cpu_to_le32(BITMAP_MAJOR_CLUSTERED)) { 649 nodes = le32_to_cpu(sb->nodes); 650 strlcpy(bitmap->mddev->bitmap_info.cluster_name, 651 sb->cluster_name, 64); 652 } 653 654 /* verify that the bitmap-specific fields are valid */ 655 if (sb->magic != cpu_to_le32(BITMAP_MAGIC)) 656 reason = "bad magic"; 657 else if (le32_to_cpu(sb->version) < BITMAP_MAJOR_LO || 658 le32_to_cpu(sb->version) > BITMAP_MAJOR_CLUSTERED) 659 reason = "unrecognized superblock version"; 660 else if (chunksize < 512) 661 reason = "bitmap chunksize too small"; 662 else if (!is_power_of_2(chunksize)) 663 reason = "bitmap chunksize not a power of 2"; 664 else if (daemon_sleep < 1 || daemon_sleep > MAX_SCHEDULE_TIMEOUT) 665 reason = "daemon sleep period out of range"; 666 else if (write_behind > COUNTER_MAX) 667 reason = "write-behind limit out of range (0 - 16383)"; 668 if (reason) { 669 pr_warn("%s: invalid bitmap file superblock: %s\n", 670 bmname(bitmap), reason); 671 goto out; 672 } 673 674 /* keep the array size field of the bitmap superblock up to date */ 675 sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors); 676 677 if (bitmap->mddev->persistent) { 678 /* 679 * We have a persistent array superblock, so compare the 680 * bitmap's UUID and event counter to the mddev's 681 */ 682 if (memcmp(sb->uuid, bitmap->mddev->uuid, 16)) { 683 pr_warn("%s: bitmap superblock UUID mismatch\n", 684 bmname(bitmap)); 685 goto out; 686 } 687 events = le64_to_cpu(sb->events); 688 if (!nodes && (events < bitmap->mddev->events)) { 689 pr_warn("%s: bitmap file is out of date (%llu < %llu) -- forcing full recovery\n", 690 bmname(bitmap), events, 691 (unsigned long long) bitmap->mddev->events); 692 set_bit(BITMAP_STALE, &bitmap->flags); 693 } 694 } 695 696 /* assign fields using values from superblock */ 697 bitmap->flags |= le32_to_cpu(sb->state); 698 if (le32_to_cpu(sb->version) == BITMAP_MAJOR_HOSTENDIAN) 699 set_bit(BITMAP_HOSTENDIAN, &bitmap->flags); 700 bitmap->events_cleared = le64_to_cpu(sb->events_cleared); 701 strlcpy(bitmap->mddev->bitmap_info.cluster_name, sb->cluster_name, 64); 702 err = 0; 703 704 out: 705 kunmap_atomic(sb); 706 /* Assigning chunksize is required for "re_read" */ 707 bitmap->mddev->bitmap_info.chunksize = chunksize; 708 if (err == 0 && nodes && (bitmap->cluster_slot < 0)) { 709 err = md_setup_cluster(bitmap->mddev, nodes); 710 if (err) { 711 pr_warn("%s: Could not setup cluster service (%d)\n", 712 bmname(bitmap), err); 713 goto out_no_sb; 714 } 715 bitmap->cluster_slot = md_cluster_ops->slot_number(bitmap->mddev); 716 goto re_read; 717 } 718 719 720 out_no_sb: 721 if (test_bit(BITMAP_STALE, &bitmap->flags)) 722 bitmap->events_cleared = bitmap->mddev->events; 723 bitmap->mddev->bitmap_info.chunksize = chunksize; 724 bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep; 725 bitmap->mddev->bitmap_info.max_write_behind = write_behind; 726 bitmap->mddev->bitmap_info.nodes = nodes; 727 if (bitmap->mddev->bitmap_info.space == 0 || 728 bitmap->mddev->bitmap_info.space > sectors_reserved) 729 bitmap->mddev->bitmap_info.space = sectors_reserved; 730 if (err) { 731 md_bitmap_print_sb(bitmap); 732 if (bitmap->cluster_slot < 0) 733 md_cluster_stop(bitmap->mddev); 734 } 735 return err; 736 } 737 738 /* 739 * general bitmap file operations 740 */ 741 742 /* 743 * on-disk bitmap: 744 * 745 * Use one bit per "chunk" (block set). We do the disk I/O on the bitmap 746 * file a page at a time. There's a superblock at the start of the file. 747 */ 748 /* calculate the index of the page that contains this bit */ 749 static inline unsigned long file_page_index(struct bitmap_storage *store, 750 unsigned long chunk) 751 { 752 if (store->sb_page) 753 chunk += sizeof(bitmap_super_t) << 3; 754 return chunk >> PAGE_BIT_SHIFT; 755 } 756 757 /* calculate the (bit) offset of this bit within a page */ 758 static inline unsigned long file_page_offset(struct bitmap_storage *store, 759 unsigned long chunk) 760 { 761 if (store->sb_page) 762 chunk += sizeof(bitmap_super_t) << 3; 763 return chunk & (PAGE_BITS - 1); 764 } 765 766 /* 767 * return a pointer to the page in the filemap that contains the given bit 768 * 769 */ 770 static inline struct page *filemap_get_page(struct bitmap_storage *store, 771 unsigned long chunk) 772 { 773 if (file_page_index(store, chunk) >= store->file_pages) 774 return NULL; 775 return store->filemap[file_page_index(store, chunk)]; 776 } 777 778 static int md_bitmap_storage_alloc(struct bitmap_storage *store, 779 unsigned long chunks, int with_super, 780 int slot_number) 781 { 782 int pnum, offset = 0; 783 unsigned long num_pages; 784 unsigned long bytes; 785 786 bytes = DIV_ROUND_UP(chunks, 8); 787 if (with_super) 788 bytes += sizeof(bitmap_super_t); 789 790 num_pages = DIV_ROUND_UP(bytes, PAGE_SIZE); 791 offset = slot_number * num_pages; 792 793 store->filemap = kmalloc_array(num_pages, sizeof(struct page *), 794 GFP_KERNEL); 795 if (!store->filemap) 796 return -ENOMEM; 797 798 if (with_super && !store->sb_page) { 799 store->sb_page = alloc_page(GFP_KERNEL|__GFP_ZERO); 800 if (store->sb_page == NULL) 801 return -ENOMEM; 802 } 803 804 pnum = 0; 805 if (store->sb_page) { 806 store->filemap[0] = store->sb_page; 807 pnum = 1; 808 store->sb_page->index = offset; 809 } 810 811 for ( ; pnum < num_pages; pnum++) { 812 store->filemap[pnum] = alloc_page(GFP_KERNEL|__GFP_ZERO); 813 if (!store->filemap[pnum]) { 814 store->file_pages = pnum; 815 return -ENOMEM; 816 } 817 store->filemap[pnum]->index = pnum + offset; 818 } 819 store->file_pages = pnum; 820 821 /* We need 4 bits per page, rounded up to a multiple 822 * of sizeof(unsigned long) */ 823 store->filemap_attr = kzalloc( 824 roundup(DIV_ROUND_UP(num_pages*4, 8), sizeof(unsigned long)), 825 GFP_KERNEL); 826 if (!store->filemap_attr) 827 return -ENOMEM; 828 829 store->bytes = bytes; 830 831 return 0; 832 } 833 834 static void md_bitmap_file_unmap(struct bitmap_storage *store) 835 { 836 struct page **map, *sb_page; 837 int pages; 838 struct file *file; 839 840 file = store->file; 841 map = store->filemap; 842 pages = store->file_pages; 843 sb_page = store->sb_page; 844 845 while (pages--) 846 if (map[pages] != sb_page) /* 0 is sb_page, release it below */ 847 free_buffers(map[pages]); 848 kfree(map); 849 kfree(store->filemap_attr); 850 851 if (sb_page) 852 free_buffers(sb_page); 853 854 if (file) { 855 struct inode *inode = file_inode(file); 856 invalidate_mapping_pages(inode->i_mapping, 0, -1); 857 fput(file); 858 } 859 } 860 861 /* 862 * bitmap_file_kick - if an error occurs while manipulating the bitmap file 863 * then it is no longer reliable, so we stop using it and we mark the file 864 * as failed in the superblock 865 */ 866 static void md_bitmap_file_kick(struct bitmap *bitmap) 867 { 868 char *path, *ptr = NULL; 869 870 if (!test_and_set_bit(BITMAP_STALE, &bitmap->flags)) { 871 md_bitmap_update_sb(bitmap); 872 873 if (bitmap->storage.file) { 874 path = kmalloc(PAGE_SIZE, GFP_KERNEL); 875 if (path) 876 ptr = file_path(bitmap->storage.file, 877 path, PAGE_SIZE); 878 879 pr_warn("%s: kicking failed bitmap file %s from array!\n", 880 bmname(bitmap), IS_ERR(ptr) ? "" : ptr); 881 882 kfree(path); 883 } else 884 pr_warn("%s: disabling internal bitmap due to errors\n", 885 bmname(bitmap)); 886 } 887 } 888 889 enum bitmap_page_attr { 890 BITMAP_PAGE_DIRTY = 0, /* there are set bits that need to be synced */ 891 BITMAP_PAGE_PENDING = 1, /* there are bits that are being cleaned. 892 * i.e. counter is 1 or 2. */ 893 BITMAP_PAGE_NEEDWRITE = 2, /* there are cleared bits that need to be synced */ 894 }; 895 896 static inline void set_page_attr(struct bitmap *bitmap, int pnum, 897 enum bitmap_page_attr attr) 898 { 899 set_bit((pnum<<2) + attr, bitmap->storage.filemap_attr); 900 } 901 902 static inline void clear_page_attr(struct bitmap *bitmap, int pnum, 903 enum bitmap_page_attr attr) 904 { 905 clear_bit((pnum<<2) + attr, bitmap->storage.filemap_attr); 906 } 907 908 static inline int test_page_attr(struct bitmap *bitmap, int pnum, 909 enum bitmap_page_attr attr) 910 { 911 return test_bit((pnum<<2) + attr, bitmap->storage.filemap_attr); 912 } 913 914 static inline int test_and_clear_page_attr(struct bitmap *bitmap, int pnum, 915 enum bitmap_page_attr attr) 916 { 917 return test_and_clear_bit((pnum<<2) + attr, 918 bitmap->storage.filemap_attr); 919 } 920 /* 921 * bitmap_file_set_bit -- called before performing a write to the md device 922 * to set (and eventually sync) a particular bit in the bitmap file 923 * 924 * we set the bit immediately, then we record the page number so that 925 * when an unplug occurs, we can flush the dirty pages out to disk 926 */ 927 static void md_bitmap_file_set_bit(struct bitmap *bitmap, sector_t block) 928 { 929 unsigned long bit; 930 struct page *page; 931 void *kaddr; 932 unsigned long chunk = block >> bitmap->counts.chunkshift; 933 struct bitmap_storage *store = &bitmap->storage; 934 unsigned long node_offset = 0; 935 936 if (mddev_is_clustered(bitmap->mddev)) 937 node_offset = bitmap->cluster_slot * store->file_pages; 938 939 page = filemap_get_page(&bitmap->storage, chunk); 940 if (!page) 941 return; 942 bit = file_page_offset(&bitmap->storage, chunk); 943 944 /* set the bit */ 945 kaddr = kmap_atomic(page); 946 if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags)) 947 set_bit(bit, kaddr); 948 else 949 set_bit_le(bit, kaddr); 950 kunmap_atomic(kaddr); 951 pr_debug("set file bit %lu page %lu\n", bit, page->index); 952 /* record page number so it gets flushed to disk when unplug occurs */ 953 set_page_attr(bitmap, page->index - node_offset, BITMAP_PAGE_DIRTY); 954 } 955 956 static void md_bitmap_file_clear_bit(struct bitmap *bitmap, sector_t block) 957 { 958 unsigned long bit; 959 struct page *page; 960 void *paddr; 961 unsigned long chunk = block >> bitmap->counts.chunkshift; 962 struct bitmap_storage *store = &bitmap->storage; 963 unsigned long node_offset = 0; 964 965 if (mddev_is_clustered(bitmap->mddev)) 966 node_offset = bitmap->cluster_slot * store->file_pages; 967 968 page = filemap_get_page(&bitmap->storage, chunk); 969 if (!page) 970 return; 971 bit = file_page_offset(&bitmap->storage, chunk); 972 paddr = kmap_atomic(page); 973 if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags)) 974 clear_bit(bit, paddr); 975 else 976 clear_bit_le(bit, paddr); 977 kunmap_atomic(paddr); 978 if (!test_page_attr(bitmap, page->index - node_offset, BITMAP_PAGE_NEEDWRITE)) { 979 set_page_attr(bitmap, page->index - node_offset, BITMAP_PAGE_PENDING); 980 bitmap->allclean = 0; 981 } 982 } 983 984 static int md_bitmap_file_test_bit(struct bitmap *bitmap, sector_t block) 985 { 986 unsigned long bit; 987 struct page *page; 988 void *paddr; 989 unsigned long chunk = block >> bitmap->counts.chunkshift; 990 int set = 0; 991 992 page = filemap_get_page(&bitmap->storage, chunk); 993 if (!page) 994 return -EINVAL; 995 bit = file_page_offset(&bitmap->storage, chunk); 996 paddr = kmap_atomic(page); 997 if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags)) 998 set = test_bit(bit, paddr); 999 else 1000 set = test_bit_le(bit, paddr); 1001 kunmap_atomic(paddr); 1002 return set; 1003 } 1004 1005 1006 /* this gets called when the md device is ready to unplug its underlying 1007 * (slave) device queues -- before we let any writes go down, we need to 1008 * sync the dirty pages of the bitmap file to disk */ 1009 void md_bitmap_unplug(struct bitmap *bitmap) 1010 { 1011 unsigned long i; 1012 int dirty, need_write; 1013 int writing = 0; 1014 1015 if (!bitmap || !bitmap->storage.filemap || 1016 test_bit(BITMAP_STALE, &bitmap->flags)) 1017 return; 1018 1019 /* look at each page to see if there are any set bits that need to be 1020 * flushed out to disk */ 1021 for (i = 0; i < bitmap->storage.file_pages; i++) { 1022 if (!bitmap->storage.filemap) 1023 return; 1024 dirty = test_and_clear_page_attr(bitmap, i, BITMAP_PAGE_DIRTY); 1025 need_write = test_and_clear_page_attr(bitmap, i, 1026 BITMAP_PAGE_NEEDWRITE); 1027 if (dirty || need_write) { 1028 if (!writing) { 1029 md_bitmap_wait_writes(bitmap); 1030 if (bitmap->mddev->queue) 1031 blk_add_trace_msg(bitmap->mddev->queue, 1032 "md bitmap_unplug"); 1033 } 1034 clear_page_attr(bitmap, i, BITMAP_PAGE_PENDING); 1035 write_page(bitmap, bitmap->storage.filemap[i], 0); 1036 writing = 1; 1037 } 1038 } 1039 if (writing) 1040 md_bitmap_wait_writes(bitmap); 1041 1042 if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags)) 1043 md_bitmap_file_kick(bitmap); 1044 } 1045 EXPORT_SYMBOL(md_bitmap_unplug); 1046 1047 static void md_bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed); 1048 /* * bitmap_init_from_disk -- called at bitmap_create time to initialize 1049 * the in-memory bitmap from the on-disk bitmap -- also, sets up the 1050 * memory mapping of the bitmap file 1051 * Special cases: 1052 * if there's no bitmap file, or if the bitmap file had been 1053 * previously kicked from the array, we mark all the bits as 1054 * 1's in order to cause a full resync. 1055 * 1056 * We ignore all bits for sectors that end earlier than 'start'. 1057 * This is used when reading an out-of-date bitmap... 1058 */ 1059 static int md_bitmap_init_from_disk(struct bitmap *bitmap, sector_t start) 1060 { 1061 unsigned long i, chunks, index, oldindex, bit, node_offset = 0; 1062 struct page *page = NULL; 1063 unsigned long bit_cnt = 0; 1064 struct file *file; 1065 unsigned long offset; 1066 int outofdate; 1067 int ret = -ENOSPC; 1068 void *paddr; 1069 struct bitmap_storage *store = &bitmap->storage; 1070 1071 chunks = bitmap->counts.chunks; 1072 file = store->file; 1073 1074 if (!file && !bitmap->mddev->bitmap_info.offset) { 1075 /* No permanent bitmap - fill with '1s'. */ 1076 store->filemap = NULL; 1077 store->file_pages = 0; 1078 for (i = 0; i < chunks ; i++) { 1079 /* if the disk bit is set, set the memory bit */ 1080 int needed = ((sector_t)(i+1) << (bitmap->counts.chunkshift) 1081 >= start); 1082 md_bitmap_set_memory_bits(bitmap, 1083 (sector_t)i << bitmap->counts.chunkshift, 1084 needed); 1085 } 1086 return 0; 1087 } 1088 1089 outofdate = test_bit(BITMAP_STALE, &bitmap->flags); 1090 if (outofdate) 1091 pr_warn("%s: bitmap file is out of date, doing full recovery\n", bmname(bitmap)); 1092 1093 if (file && i_size_read(file->f_mapping->host) < store->bytes) { 1094 pr_warn("%s: bitmap file too short %lu < %lu\n", 1095 bmname(bitmap), 1096 (unsigned long) i_size_read(file->f_mapping->host), 1097 store->bytes); 1098 goto err; 1099 } 1100 1101 oldindex = ~0L; 1102 offset = 0; 1103 if (!bitmap->mddev->bitmap_info.external) 1104 offset = sizeof(bitmap_super_t); 1105 1106 if (mddev_is_clustered(bitmap->mddev)) 1107 node_offset = bitmap->cluster_slot * (DIV_ROUND_UP(store->bytes, PAGE_SIZE)); 1108 1109 for (i = 0; i < chunks; i++) { 1110 int b; 1111 index = file_page_index(&bitmap->storage, i); 1112 bit = file_page_offset(&bitmap->storage, i); 1113 if (index != oldindex) { /* this is a new page, read it in */ 1114 int count; 1115 /* unmap the old page, we're done with it */ 1116 if (index == store->file_pages-1) 1117 count = store->bytes - index * PAGE_SIZE; 1118 else 1119 count = PAGE_SIZE; 1120 page = store->filemap[index]; 1121 if (file) 1122 ret = read_page(file, index, bitmap, 1123 count, page); 1124 else 1125 ret = read_sb_page( 1126 bitmap->mddev, 1127 bitmap->mddev->bitmap_info.offset, 1128 page, 1129 index + node_offset, count); 1130 1131 if (ret) 1132 goto err; 1133 1134 oldindex = index; 1135 1136 if (outofdate) { 1137 /* 1138 * if bitmap is out of date, dirty the 1139 * whole page and write it out 1140 */ 1141 paddr = kmap_atomic(page); 1142 memset(paddr + offset, 0xff, 1143 PAGE_SIZE - offset); 1144 kunmap_atomic(paddr); 1145 write_page(bitmap, page, 1); 1146 1147 ret = -EIO; 1148 if (test_bit(BITMAP_WRITE_ERROR, 1149 &bitmap->flags)) 1150 goto err; 1151 } 1152 } 1153 paddr = kmap_atomic(page); 1154 if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags)) 1155 b = test_bit(bit, paddr); 1156 else 1157 b = test_bit_le(bit, paddr); 1158 kunmap_atomic(paddr); 1159 if (b) { 1160 /* if the disk bit is set, set the memory bit */ 1161 int needed = ((sector_t)(i+1) << bitmap->counts.chunkshift 1162 >= start); 1163 md_bitmap_set_memory_bits(bitmap, 1164 (sector_t)i << bitmap->counts.chunkshift, 1165 needed); 1166 bit_cnt++; 1167 } 1168 offset = 0; 1169 } 1170 1171 pr_debug("%s: bitmap initialized from disk: read %lu pages, set %lu of %lu bits\n", 1172 bmname(bitmap), store->file_pages, 1173 bit_cnt, chunks); 1174 1175 return 0; 1176 1177 err: 1178 pr_warn("%s: bitmap initialisation failed: %d\n", 1179 bmname(bitmap), ret); 1180 return ret; 1181 } 1182 1183 void md_bitmap_write_all(struct bitmap *bitmap) 1184 { 1185 /* We don't actually write all bitmap blocks here, 1186 * just flag them as needing to be written 1187 */ 1188 int i; 1189 1190 if (!bitmap || !bitmap->storage.filemap) 1191 return; 1192 if (bitmap->storage.file) 1193 /* Only one copy, so nothing needed */ 1194 return; 1195 1196 for (i = 0; i < bitmap->storage.file_pages; i++) 1197 set_page_attr(bitmap, i, 1198 BITMAP_PAGE_NEEDWRITE); 1199 bitmap->allclean = 0; 1200 } 1201 1202 static void md_bitmap_count_page(struct bitmap_counts *bitmap, 1203 sector_t offset, int inc) 1204 { 1205 sector_t chunk = offset >> bitmap->chunkshift; 1206 unsigned long page = chunk >> PAGE_COUNTER_SHIFT; 1207 bitmap->bp[page].count += inc; 1208 md_bitmap_checkfree(bitmap, page); 1209 } 1210 1211 static void md_bitmap_set_pending(struct bitmap_counts *bitmap, sector_t offset) 1212 { 1213 sector_t chunk = offset >> bitmap->chunkshift; 1214 unsigned long page = chunk >> PAGE_COUNTER_SHIFT; 1215 struct bitmap_page *bp = &bitmap->bp[page]; 1216 1217 if (!bp->pending) 1218 bp->pending = 1; 1219 } 1220 1221 static bitmap_counter_t *md_bitmap_get_counter(struct bitmap_counts *bitmap, 1222 sector_t offset, sector_t *blocks, 1223 int create); 1224 1225 /* 1226 * bitmap daemon -- periodically wakes up to clean bits and flush pages 1227 * out to disk 1228 */ 1229 1230 void md_bitmap_daemon_work(struct mddev *mddev) 1231 { 1232 struct bitmap *bitmap; 1233 unsigned long j; 1234 unsigned long nextpage; 1235 sector_t blocks; 1236 struct bitmap_counts *counts; 1237 1238 /* Use a mutex to guard daemon_work against 1239 * bitmap_destroy. 1240 */ 1241 mutex_lock(&mddev->bitmap_info.mutex); 1242 bitmap = mddev->bitmap; 1243 if (bitmap == NULL) { 1244 mutex_unlock(&mddev->bitmap_info.mutex); 1245 return; 1246 } 1247 if (time_before(jiffies, bitmap->daemon_lastrun 1248 + mddev->bitmap_info.daemon_sleep)) 1249 goto done; 1250 1251 bitmap->daemon_lastrun = jiffies; 1252 if (bitmap->allclean) { 1253 mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT; 1254 goto done; 1255 } 1256 bitmap->allclean = 1; 1257 1258 if (bitmap->mddev->queue) 1259 blk_add_trace_msg(bitmap->mddev->queue, 1260 "md bitmap_daemon_work"); 1261 1262 /* Any file-page which is PENDING now needs to be written. 1263 * So set NEEDWRITE now, then after we make any last-minute changes 1264 * we will write it. 1265 */ 1266 for (j = 0; j < bitmap->storage.file_pages; j++) 1267 if (test_and_clear_page_attr(bitmap, j, 1268 BITMAP_PAGE_PENDING)) 1269 set_page_attr(bitmap, j, 1270 BITMAP_PAGE_NEEDWRITE); 1271 1272 if (bitmap->need_sync && 1273 mddev->bitmap_info.external == 0) { 1274 /* Arrange for superblock update as well as 1275 * other changes */ 1276 bitmap_super_t *sb; 1277 bitmap->need_sync = 0; 1278 if (bitmap->storage.filemap) { 1279 sb = kmap_atomic(bitmap->storage.sb_page); 1280 sb->events_cleared = 1281 cpu_to_le64(bitmap->events_cleared); 1282 kunmap_atomic(sb); 1283 set_page_attr(bitmap, 0, 1284 BITMAP_PAGE_NEEDWRITE); 1285 } 1286 } 1287 /* Now look at the bitmap counters and if any are '2' or '1', 1288 * decrement and handle accordingly. 1289 */ 1290 counts = &bitmap->counts; 1291 spin_lock_irq(&counts->lock); 1292 nextpage = 0; 1293 for (j = 0; j < counts->chunks; j++) { 1294 bitmap_counter_t *bmc; 1295 sector_t block = (sector_t)j << counts->chunkshift; 1296 1297 if (j == nextpage) { 1298 nextpage += PAGE_COUNTER_RATIO; 1299 if (!counts->bp[j >> PAGE_COUNTER_SHIFT].pending) { 1300 j |= PAGE_COUNTER_MASK; 1301 continue; 1302 } 1303 counts->bp[j >> PAGE_COUNTER_SHIFT].pending = 0; 1304 } 1305 1306 bmc = md_bitmap_get_counter(counts, block, &blocks, 0); 1307 if (!bmc) { 1308 j |= PAGE_COUNTER_MASK; 1309 continue; 1310 } 1311 if (*bmc == 1 && !bitmap->need_sync) { 1312 /* We can clear the bit */ 1313 *bmc = 0; 1314 md_bitmap_count_page(counts, block, -1); 1315 md_bitmap_file_clear_bit(bitmap, block); 1316 } else if (*bmc && *bmc <= 2) { 1317 *bmc = 1; 1318 md_bitmap_set_pending(counts, block); 1319 bitmap->allclean = 0; 1320 } 1321 } 1322 spin_unlock_irq(&counts->lock); 1323 1324 md_bitmap_wait_writes(bitmap); 1325 /* Now start writeout on any page in NEEDWRITE that isn't DIRTY. 1326 * DIRTY pages need to be written by bitmap_unplug so it can wait 1327 * for them. 1328 * If we find any DIRTY page we stop there and let bitmap_unplug 1329 * handle all the rest. This is important in the case where 1330 * the first blocking holds the superblock and it has been updated. 1331 * We mustn't write any other blocks before the superblock. 1332 */ 1333 for (j = 0; 1334 j < bitmap->storage.file_pages 1335 && !test_bit(BITMAP_STALE, &bitmap->flags); 1336 j++) { 1337 if (test_page_attr(bitmap, j, 1338 BITMAP_PAGE_DIRTY)) 1339 /* bitmap_unplug will handle the rest */ 1340 break; 1341 if (test_and_clear_page_attr(bitmap, j, 1342 BITMAP_PAGE_NEEDWRITE)) { 1343 write_page(bitmap, bitmap->storage.filemap[j], 0); 1344 } 1345 } 1346 1347 done: 1348 if (bitmap->allclean == 0) 1349 mddev->thread->timeout = 1350 mddev->bitmap_info.daemon_sleep; 1351 mutex_unlock(&mddev->bitmap_info.mutex); 1352 } 1353 1354 static bitmap_counter_t *md_bitmap_get_counter(struct bitmap_counts *bitmap, 1355 sector_t offset, sector_t *blocks, 1356 int create) 1357 __releases(bitmap->lock) 1358 __acquires(bitmap->lock) 1359 { 1360 /* If 'create', we might release the lock and reclaim it. 1361 * The lock must have been taken with interrupts enabled. 1362 * If !create, we don't release the lock. 1363 */ 1364 sector_t chunk = offset >> bitmap->chunkshift; 1365 unsigned long page = chunk >> PAGE_COUNTER_SHIFT; 1366 unsigned long pageoff = (chunk & PAGE_COUNTER_MASK) << COUNTER_BYTE_SHIFT; 1367 sector_t csize; 1368 int err; 1369 1370 err = md_bitmap_checkpage(bitmap, page, create, 0); 1371 1372 if (bitmap->bp[page].hijacked || 1373 bitmap->bp[page].map == NULL) 1374 csize = ((sector_t)1) << (bitmap->chunkshift + 1375 PAGE_COUNTER_SHIFT - 1); 1376 else 1377 csize = ((sector_t)1) << bitmap->chunkshift; 1378 *blocks = csize - (offset & (csize - 1)); 1379 1380 if (err < 0) 1381 return NULL; 1382 1383 /* now locked ... */ 1384 1385 if (bitmap->bp[page].hijacked) { /* hijacked pointer */ 1386 /* should we use the first or second counter field 1387 * of the hijacked pointer? */ 1388 int hi = (pageoff > PAGE_COUNTER_MASK); 1389 return &((bitmap_counter_t *) 1390 &bitmap->bp[page].map)[hi]; 1391 } else /* page is allocated */ 1392 return (bitmap_counter_t *) 1393 &(bitmap->bp[page].map[pageoff]); 1394 } 1395 1396 int md_bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors, int behind) 1397 { 1398 if (!bitmap) 1399 return 0; 1400 1401 if (behind) { 1402 int bw; 1403 atomic_inc(&bitmap->behind_writes); 1404 bw = atomic_read(&bitmap->behind_writes); 1405 if (bw > bitmap->behind_writes_used) 1406 bitmap->behind_writes_used = bw; 1407 1408 pr_debug("inc write-behind count %d/%lu\n", 1409 bw, bitmap->mddev->bitmap_info.max_write_behind); 1410 } 1411 1412 while (sectors) { 1413 sector_t blocks; 1414 bitmap_counter_t *bmc; 1415 1416 spin_lock_irq(&bitmap->counts.lock); 1417 bmc = md_bitmap_get_counter(&bitmap->counts, offset, &blocks, 1); 1418 if (!bmc) { 1419 spin_unlock_irq(&bitmap->counts.lock); 1420 return 0; 1421 } 1422 1423 if (unlikely(COUNTER(*bmc) == COUNTER_MAX)) { 1424 DEFINE_WAIT(__wait); 1425 /* note that it is safe to do the prepare_to_wait 1426 * after the test as long as we do it before dropping 1427 * the spinlock. 1428 */ 1429 prepare_to_wait(&bitmap->overflow_wait, &__wait, 1430 TASK_UNINTERRUPTIBLE); 1431 spin_unlock_irq(&bitmap->counts.lock); 1432 schedule(); 1433 finish_wait(&bitmap->overflow_wait, &__wait); 1434 continue; 1435 } 1436 1437 switch (*bmc) { 1438 case 0: 1439 md_bitmap_file_set_bit(bitmap, offset); 1440 md_bitmap_count_page(&bitmap->counts, offset, 1); 1441 /* fall through */ 1442 case 1: 1443 *bmc = 2; 1444 } 1445 1446 (*bmc)++; 1447 1448 spin_unlock_irq(&bitmap->counts.lock); 1449 1450 offset += blocks; 1451 if (sectors > blocks) 1452 sectors -= blocks; 1453 else 1454 sectors = 0; 1455 } 1456 return 0; 1457 } 1458 EXPORT_SYMBOL(md_bitmap_startwrite); 1459 1460 void md_bitmap_endwrite(struct bitmap *bitmap, sector_t offset, 1461 unsigned long sectors, int success, int behind) 1462 { 1463 if (!bitmap) 1464 return; 1465 if (behind) { 1466 if (atomic_dec_and_test(&bitmap->behind_writes)) 1467 wake_up(&bitmap->behind_wait); 1468 pr_debug("dec write-behind count %d/%lu\n", 1469 atomic_read(&bitmap->behind_writes), 1470 bitmap->mddev->bitmap_info.max_write_behind); 1471 } 1472 1473 while (sectors) { 1474 sector_t blocks; 1475 unsigned long flags; 1476 bitmap_counter_t *bmc; 1477 1478 spin_lock_irqsave(&bitmap->counts.lock, flags); 1479 bmc = md_bitmap_get_counter(&bitmap->counts, offset, &blocks, 0); 1480 if (!bmc) { 1481 spin_unlock_irqrestore(&bitmap->counts.lock, flags); 1482 return; 1483 } 1484 1485 if (success && !bitmap->mddev->degraded && 1486 bitmap->events_cleared < bitmap->mddev->events) { 1487 bitmap->events_cleared = bitmap->mddev->events; 1488 bitmap->need_sync = 1; 1489 sysfs_notify_dirent_safe(bitmap->sysfs_can_clear); 1490 } 1491 1492 if (!success && !NEEDED(*bmc)) 1493 *bmc |= NEEDED_MASK; 1494 1495 if (COUNTER(*bmc) == COUNTER_MAX) 1496 wake_up(&bitmap->overflow_wait); 1497 1498 (*bmc)--; 1499 if (*bmc <= 2) { 1500 md_bitmap_set_pending(&bitmap->counts, offset); 1501 bitmap->allclean = 0; 1502 } 1503 spin_unlock_irqrestore(&bitmap->counts.lock, flags); 1504 offset += blocks; 1505 if (sectors > blocks) 1506 sectors -= blocks; 1507 else 1508 sectors = 0; 1509 } 1510 } 1511 EXPORT_SYMBOL(md_bitmap_endwrite); 1512 1513 static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, 1514 int degraded) 1515 { 1516 bitmap_counter_t *bmc; 1517 int rv; 1518 if (bitmap == NULL) {/* FIXME or bitmap set as 'failed' */ 1519 *blocks = 1024; 1520 return 1; /* always resync if no bitmap */ 1521 } 1522 spin_lock_irq(&bitmap->counts.lock); 1523 bmc = md_bitmap_get_counter(&bitmap->counts, offset, blocks, 0); 1524 rv = 0; 1525 if (bmc) { 1526 /* locked */ 1527 if (RESYNC(*bmc)) 1528 rv = 1; 1529 else if (NEEDED(*bmc)) { 1530 rv = 1; 1531 if (!degraded) { /* don't set/clear bits if degraded */ 1532 *bmc |= RESYNC_MASK; 1533 *bmc &= ~NEEDED_MASK; 1534 } 1535 } 1536 } 1537 spin_unlock_irq(&bitmap->counts.lock); 1538 return rv; 1539 } 1540 1541 int md_bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, 1542 int degraded) 1543 { 1544 /* bitmap_start_sync must always report on multiples of whole 1545 * pages, otherwise resync (which is very PAGE_SIZE based) will 1546 * get confused. 1547 * So call __bitmap_start_sync repeatedly (if needed) until 1548 * At least PAGE_SIZE>>9 blocks are covered. 1549 * Return the 'or' of the result. 1550 */ 1551 int rv = 0; 1552 sector_t blocks1; 1553 1554 *blocks = 0; 1555 while (*blocks < (PAGE_SIZE>>9)) { 1556 rv |= __bitmap_start_sync(bitmap, offset, 1557 &blocks1, degraded); 1558 offset += blocks1; 1559 *blocks += blocks1; 1560 } 1561 return rv; 1562 } 1563 EXPORT_SYMBOL(md_bitmap_start_sync); 1564 1565 void md_bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int aborted) 1566 { 1567 bitmap_counter_t *bmc; 1568 unsigned long flags; 1569 1570 if (bitmap == NULL) { 1571 *blocks = 1024; 1572 return; 1573 } 1574 spin_lock_irqsave(&bitmap->counts.lock, flags); 1575 bmc = md_bitmap_get_counter(&bitmap->counts, offset, blocks, 0); 1576 if (bmc == NULL) 1577 goto unlock; 1578 /* locked */ 1579 if (RESYNC(*bmc)) { 1580 *bmc &= ~RESYNC_MASK; 1581 1582 if (!NEEDED(*bmc) && aborted) 1583 *bmc |= NEEDED_MASK; 1584 else { 1585 if (*bmc <= 2) { 1586 md_bitmap_set_pending(&bitmap->counts, offset); 1587 bitmap->allclean = 0; 1588 } 1589 } 1590 } 1591 unlock: 1592 spin_unlock_irqrestore(&bitmap->counts.lock, flags); 1593 } 1594 EXPORT_SYMBOL(md_bitmap_end_sync); 1595 1596 void md_bitmap_close_sync(struct bitmap *bitmap) 1597 { 1598 /* Sync has finished, and any bitmap chunks that weren't synced 1599 * properly have been aborted. It remains to us to clear the 1600 * RESYNC bit wherever it is still on 1601 */ 1602 sector_t sector = 0; 1603 sector_t blocks; 1604 if (!bitmap) 1605 return; 1606 while (sector < bitmap->mddev->resync_max_sectors) { 1607 md_bitmap_end_sync(bitmap, sector, &blocks, 0); 1608 sector += blocks; 1609 } 1610 } 1611 EXPORT_SYMBOL(md_bitmap_close_sync); 1612 1613 void md_bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force) 1614 { 1615 sector_t s = 0; 1616 sector_t blocks; 1617 1618 if (!bitmap) 1619 return; 1620 if (sector == 0) { 1621 bitmap->last_end_sync = jiffies; 1622 return; 1623 } 1624 if (!force && time_before(jiffies, (bitmap->last_end_sync 1625 + bitmap->mddev->bitmap_info.daemon_sleep))) 1626 return; 1627 wait_event(bitmap->mddev->recovery_wait, 1628 atomic_read(&bitmap->mddev->recovery_active) == 0); 1629 1630 bitmap->mddev->curr_resync_completed = sector; 1631 set_bit(MD_SB_CHANGE_CLEAN, &bitmap->mddev->sb_flags); 1632 sector &= ~((1ULL << bitmap->counts.chunkshift) - 1); 1633 s = 0; 1634 while (s < sector && s < bitmap->mddev->resync_max_sectors) { 1635 md_bitmap_end_sync(bitmap, s, &blocks, 0); 1636 s += blocks; 1637 } 1638 bitmap->last_end_sync = jiffies; 1639 sysfs_notify(&bitmap->mddev->kobj, NULL, "sync_completed"); 1640 } 1641 EXPORT_SYMBOL(md_bitmap_cond_end_sync); 1642 1643 void md_bitmap_sync_with_cluster(struct mddev *mddev, 1644 sector_t old_lo, sector_t old_hi, 1645 sector_t new_lo, sector_t new_hi) 1646 { 1647 struct bitmap *bitmap = mddev->bitmap; 1648 sector_t sector, blocks = 0; 1649 1650 for (sector = old_lo; sector < new_lo; ) { 1651 md_bitmap_end_sync(bitmap, sector, &blocks, 0); 1652 sector += blocks; 1653 } 1654 WARN((blocks > new_lo) && old_lo, "alignment is not correct for lo\n"); 1655 1656 for (sector = old_hi; sector < new_hi; ) { 1657 md_bitmap_start_sync(bitmap, sector, &blocks, 0); 1658 sector += blocks; 1659 } 1660 WARN((blocks > new_hi) && old_hi, "alignment is not correct for hi\n"); 1661 } 1662 EXPORT_SYMBOL(md_bitmap_sync_with_cluster); 1663 1664 static void md_bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed) 1665 { 1666 /* For each chunk covered by any of these sectors, set the 1667 * counter to 2 and possibly set resync_needed. They should all 1668 * be 0 at this point 1669 */ 1670 1671 sector_t secs; 1672 bitmap_counter_t *bmc; 1673 spin_lock_irq(&bitmap->counts.lock); 1674 bmc = md_bitmap_get_counter(&bitmap->counts, offset, &secs, 1); 1675 if (!bmc) { 1676 spin_unlock_irq(&bitmap->counts.lock); 1677 return; 1678 } 1679 if (!*bmc) { 1680 *bmc = 2; 1681 md_bitmap_count_page(&bitmap->counts, offset, 1); 1682 md_bitmap_set_pending(&bitmap->counts, offset); 1683 bitmap->allclean = 0; 1684 } 1685 if (needed) 1686 *bmc |= NEEDED_MASK; 1687 spin_unlock_irq(&bitmap->counts.lock); 1688 } 1689 1690 /* dirty the memory and file bits for bitmap chunks "s" to "e" */ 1691 void md_bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e) 1692 { 1693 unsigned long chunk; 1694 1695 for (chunk = s; chunk <= e; chunk++) { 1696 sector_t sec = (sector_t)chunk << bitmap->counts.chunkshift; 1697 md_bitmap_set_memory_bits(bitmap, sec, 1); 1698 md_bitmap_file_set_bit(bitmap, sec); 1699 if (sec < bitmap->mddev->recovery_cp) 1700 /* We are asserting that the array is dirty, 1701 * so move the recovery_cp address back so 1702 * that it is obvious that it is dirty 1703 */ 1704 bitmap->mddev->recovery_cp = sec; 1705 } 1706 } 1707 1708 /* 1709 * flush out any pending updates 1710 */ 1711 void md_bitmap_flush(struct mddev *mddev) 1712 { 1713 struct bitmap *bitmap = mddev->bitmap; 1714 long sleep; 1715 1716 if (!bitmap) /* there was no bitmap */ 1717 return; 1718 1719 /* run the daemon_work three time to ensure everything is flushed 1720 * that can be 1721 */ 1722 sleep = mddev->bitmap_info.daemon_sleep * 2; 1723 bitmap->daemon_lastrun -= sleep; 1724 md_bitmap_daemon_work(mddev); 1725 bitmap->daemon_lastrun -= sleep; 1726 md_bitmap_daemon_work(mddev); 1727 bitmap->daemon_lastrun -= sleep; 1728 md_bitmap_daemon_work(mddev); 1729 md_bitmap_update_sb(bitmap); 1730 } 1731 1732 /* 1733 * free memory that was allocated 1734 */ 1735 void md_bitmap_free(struct bitmap *bitmap) 1736 { 1737 unsigned long k, pages; 1738 struct bitmap_page *bp; 1739 1740 if (!bitmap) /* there was no bitmap */ 1741 return; 1742 1743 if (bitmap->sysfs_can_clear) 1744 sysfs_put(bitmap->sysfs_can_clear); 1745 1746 if (mddev_is_clustered(bitmap->mddev) && bitmap->mddev->cluster_info && 1747 bitmap->cluster_slot == md_cluster_ops->slot_number(bitmap->mddev)) 1748 md_cluster_stop(bitmap->mddev); 1749 1750 /* Shouldn't be needed - but just in case.... */ 1751 wait_event(bitmap->write_wait, 1752 atomic_read(&bitmap->pending_writes) == 0); 1753 1754 /* release the bitmap file */ 1755 md_bitmap_file_unmap(&bitmap->storage); 1756 1757 bp = bitmap->counts.bp; 1758 pages = bitmap->counts.pages; 1759 1760 /* free all allocated memory */ 1761 1762 if (bp) /* deallocate the page memory */ 1763 for (k = 0; k < pages; k++) 1764 if (bp[k].map && !bp[k].hijacked) 1765 kfree(bp[k].map); 1766 kfree(bp); 1767 kfree(bitmap); 1768 } 1769 EXPORT_SYMBOL(md_bitmap_free); 1770 1771 void md_bitmap_wait_behind_writes(struct mddev *mddev) 1772 { 1773 struct bitmap *bitmap = mddev->bitmap; 1774 1775 /* wait for behind writes to complete */ 1776 if (bitmap && atomic_read(&bitmap->behind_writes) > 0) { 1777 pr_debug("md:%s: behind writes in progress - waiting to stop.\n", 1778 mdname(mddev)); 1779 /* need to kick something here to make sure I/O goes? */ 1780 wait_event(bitmap->behind_wait, 1781 atomic_read(&bitmap->behind_writes) == 0); 1782 } 1783 } 1784 1785 void md_bitmap_destroy(struct mddev *mddev) 1786 { 1787 struct bitmap *bitmap = mddev->bitmap; 1788 1789 if (!bitmap) /* there was no bitmap */ 1790 return; 1791 1792 md_bitmap_wait_behind_writes(mddev); 1793 mempool_destroy(mddev->wb_info_pool); 1794 mddev->wb_info_pool = NULL; 1795 1796 mutex_lock(&mddev->bitmap_info.mutex); 1797 spin_lock(&mddev->lock); 1798 mddev->bitmap = NULL; /* disconnect from the md device */ 1799 spin_unlock(&mddev->lock); 1800 mutex_unlock(&mddev->bitmap_info.mutex); 1801 if (mddev->thread) 1802 mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT; 1803 1804 md_bitmap_free(bitmap); 1805 } 1806 1807 /* 1808 * initialize the bitmap structure 1809 * if this returns an error, bitmap_destroy must be called to do clean up 1810 * once mddev->bitmap is set 1811 */ 1812 struct bitmap *md_bitmap_create(struct mddev *mddev, int slot) 1813 { 1814 struct bitmap *bitmap; 1815 sector_t blocks = mddev->resync_max_sectors; 1816 struct file *file = mddev->bitmap_info.file; 1817 int err; 1818 struct kernfs_node *bm = NULL; 1819 1820 BUILD_BUG_ON(sizeof(bitmap_super_t) != 256); 1821 1822 BUG_ON(file && mddev->bitmap_info.offset); 1823 1824 if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) { 1825 pr_notice("md/raid:%s: array with journal cannot have bitmap\n", 1826 mdname(mddev)); 1827 return ERR_PTR(-EBUSY); 1828 } 1829 1830 bitmap = kzalloc(sizeof(*bitmap), GFP_KERNEL); 1831 if (!bitmap) 1832 return ERR_PTR(-ENOMEM); 1833 1834 spin_lock_init(&bitmap->counts.lock); 1835 atomic_set(&bitmap->pending_writes, 0); 1836 init_waitqueue_head(&bitmap->write_wait); 1837 init_waitqueue_head(&bitmap->overflow_wait); 1838 init_waitqueue_head(&bitmap->behind_wait); 1839 1840 bitmap->mddev = mddev; 1841 bitmap->cluster_slot = slot; 1842 1843 if (mddev->kobj.sd) 1844 bm = sysfs_get_dirent(mddev->kobj.sd, "bitmap"); 1845 if (bm) { 1846 bitmap->sysfs_can_clear = sysfs_get_dirent(bm, "can_clear"); 1847 sysfs_put(bm); 1848 } else 1849 bitmap->sysfs_can_clear = NULL; 1850 1851 bitmap->storage.file = file; 1852 if (file) { 1853 get_file(file); 1854 /* As future accesses to this file will use bmap, 1855 * and bypass the page cache, we must sync the file 1856 * first. 1857 */ 1858 vfs_fsync(file, 1); 1859 } 1860 /* read superblock from bitmap file (this sets mddev->bitmap_info.chunksize) */ 1861 if (!mddev->bitmap_info.external) { 1862 /* 1863 * If 'MD_ARRAY_FIRST_USE' is set, then device-mapper is 1864 * instructing us to create a new on-disk bitmap instance. 1865 */ 1866 if (test_and_clear_bit(MD_ARRAY_FIRST_USE, &mddev->flags)) 1867 err = md_bitmap_new_disk_sb(bitmap); 1868 else 1869 err = md_bitmap_read_sb(bitmap); 1870 } else { 1871 err = 0; 1872 if (mddev->bitmap_info.chunksize == 0 || 1873 mddev->bitmap_info.daemon_sleep == 0) 1874 /* chunksize and time_base need to be 1875 * set first. */ 1876 err = -EINVAL; 1877 } 1878 if (err) 1879 goto error; 1880 1881 bitmap->daemon_lastrun = jiffies; 1882 err = md_bitmap_resize(bitmap, blocks, mddev->bitmap_info.chunksize, 1); 1883 if (err) 1884 goto error; 1885 1886 pr_debug("created bitmap (%lu pages) for device %s\n", 1887 bitmap->counts.pages, bmname(bitmap)); 1888 1889 err = test_bit(BITMAP_WRITE_ERROR, &bitmap->flags) ? -EIO : 0; 1890 if (err) 1891 goto error; 1892 1893 return bitmap; 1894 error: 1895 md_bitmap_free(bitmap); 1896 return ERR_PTR(err); 1897 } 1898 1899 int md_bitmap_load(struct mddev *mddev) 1900 { 1901 int err = 0; 1902 sector_t start = 0; 1903 sector_t sector = 0; 1904 struct bitmap *bitmap = mddev->bitmap; 1905 struct md_rdev *rdev; 1906 1907 if (!bitmap) 1908 goto out; 1909 1910 rdev_for_each(rdev, mddev) 1911 mddev_create_wb_pool(mddev, rdev, true); 1912 1913 if (mddev_is_clustered(mddev)) 1914 md_cluster_ops->load_bitmaps(mddev, mddev->bitmap_info.nodes); 1915 1916 /* Clear out old bitmap info first: Either there is none, or we 1917 * are resuming after someone else has possibly changed things, 1918 * so we should forget old cached info. 1919 * All chunks should be clean, but some might need_sync. 1920 */ 1921 while (sector < mddev->resync_max_sectors) { 1922 sector_t blocks; 1923 md_bitmap_start_sync(bitmap, sector, &blocks, 0); 1924 sector += blocks; 1925 } 1926 md_bitmap_close_sync(bitmap); 1927 1928 if (mddev->degraded == 0 1929 || bitmap->events_cleared == mddev->events) 1930 /* no need to keep dirty bits to optimise a 1931 * re-add of a missing device */ 1932 start = mddev->recovery_cp; 1933 1934 mutex_lock(&mddev->bitmap_info.mutex); 1935 err = md_bitmap_init_from_disk(bitmap, start); 1936 mutex_unlock(&mddev->bitmap_info.mutex); 1937 1938 if (err) 1939 goto out; 1940 clear_bit(BITMAP_STALE, &bitmap->flags); 1941 1942 /* Kick recovery in case any bits were set */ 1943 set_bit(MD_RECOVERY_NEEDED, &bitmap->mddev->recovery); 1944 1945 mddev->thread->timeout = mddev->bitmap_info.daemon_sleep; 1946 md_wakeup_thread(mddev->thread); 1947 1948 md_bitmap_update_sb(bitmap); 1949 1950 if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags)) 1951 err = -EIO; 1952 out: 1953 return err; 1954 } 1955 EXPORT_SYMBOL_GPL(md_bitmap_load); 1956 1957 struct bitmap *get_bitmap_from_slot(struct mddev *mddev, int slot) 1958 { 1959 int rv = 0; 1960 struct bitmap *bitmap; 1961 1962 bitmap = md_bitmap_create(mddev, slot); 1963 if (IS_ERR(bitmap)) { 1964 rv = PTR_ERR(bitmap); 1965 return ERR_PTR(rv); 1966 } 1967 1968 rv = md_bitmap_init_from_disk(bitmap, 0); 1969 if (rv) { 1970 md_bitmap_free(bitmap); 1971 return ERR_PTR(rv); 1972 } 1973 1974 return bitmap; 1975 } 1976 EXPORT_SYMBOL(get_bitmap_from_slot); 1977 1978 /* Loads the bitmap associated with slot and copies the resync information 1979 * to our bitmap 1980 */ 1981 int md_bitmap_copy_from_slot(struct mddev *mddev, int slot, 1982 sector_t *low, sector_t *high, bool clear_bits) 1983 { 1984 int rv = 0, i, j; 1985 sector_t block, lo = 0, hi = 0; 1986 struct bitmap_counts *counts; 1987 struct bitmap *bitmap; 1988 1989 bitmap = get_bitmap_from_slot(mddev, slot); 1990 if (IS_ERR(bitmap)) { 1991 pr_err("%s can't get bitmap from slot %d\n", __func__, slot); 1992 return -1; 1993 } 1994 1995 counts = &bitmap->counts; 1996 for (j = 0; j < counts->chunks; j++) { 1997 block = (sector_t)j << counts->chunkshift; 1998 if (md_bitmap_file_test_bit(bitmap, block)) { 1999 if (!lo) 2000 lo = block; 2001 hi = block; 2002 md_bitmap_file_clear_bit(bitmap, block); 2003 md_bitmap_set_memory_bits(mddev->bitmap, block, 1); 2004 md_bitmap_file_set_bit(mddev->bitmap, block); 2005 } 2006 } 2007 2008 if (clear_bits) { 2009 md_bitmap_update_sb(bitmap); 2010 /* BITMAP_PAGE_PENDING is set, but bitmap_unplug needs 2011 * BITMAP_PAGE_DIRTY or _NEEDWRITE to write ... */ 2012 for (i = 0; i < bitmap->storage.file_pages; i++) 2013 if (test_page_attr(bitmap, i, BITMAP_PAGE_PENDING)) 2014 set_page_attr(bitmap, i, BITMAP_PAGE_NEEDWRITE); 2015 md_bitmap_unplug(bitmap); 2016 } 2017 md_bitmap_unplug(mddev->bitmap); 2018 *low = lo; 2019 *high = hi; 2020 2021 return rv; 2022 } 2023 EXPORT_SYMBOL_GPL(md_bitmap_copy_from_slot); 2024 2025 2026 void md_bitmap_status(struct seq_file *seq, struct bitmap *bitmap) 2027 { 2028 unsigned long chunk_kb; 2029 struct bitmap_counts *counts; 2030 2031 if (!bitmap) 2032 return; 2033 2034 counts = &bitmap->counts; 2035 2036 chunk_kb = bitmap->mddev->bitmap_info.chunksize >> 10; 2037 seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], " 2038 "%lu%s chunk", 2039 counts->pages - counts->missing_pages, 2040 counts->pages, 2041 (counts->pages - counts->missing_pages) 2042 << (PAGE_SHIFT - 10), 2043 chunk_kb ? chunk_kb : bitmap->mddev->bitmap_info.chunksize, 2044 chunk_kb ? "KB" : "B"); 2045 if (bitmap->storage.file) { 2046 seq_printf(seq, ", file: "); 2047 seq_file_path(seq, bitmap->storage.file, " \t\n"); 2048 } 2049 2050 seq_printf(seq, "\n"); 2051 } 2052 2053 int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks, 2054 int chunksize, int init) 2055 { 2056 /* If chunk_size is 0, choose an appropriate chunk size. 2057 * Then possibly allocate new storage space. 2058 * Then quiesce, copy bits, replace bitmap, and re-start 2059 * 2060 * This function is called both to set up the initial bitmap 2061 * and to resize the bitmap while the array is active. 2062 * If this happens as a result of the array being resized, 2063 * chunksize will be zero, and we need to choose a suitable 2064 * chunksize, otherwise we use what we are given. 2065 */ 2066 struct bitmap_storage store; 2067 struct bitmap_counts old_counts; 2068 unsigned long chunks; 2069 sector_t block; 2070 sector_t old_blocks, new_blocks; 2071 int chunkshift; 2072 int ret = 0; 2073 long pages; 2074 struct bitmap_page *new_bp; 2075 2076 if (bitmap->storage.file && !init) { 2077 pr_info("md: cannot resize file-based bitmap\n"); 2078 return -EINVAL; 2079 } 2080 2081 if (chunksize == 0) { 2082 /* If there is enough space, leave the chunk size unchanged, 2083 * else increase by factor of two until there is enough space. 2084 */ 2085 long bytes; 2086 long space = bitmap->mddev->bitmap_info.space; 2087 2088 if (space == 0) { 2089 /* We don't know how much space there is, so limit 2090 * to current size - in sectors. 2091 */ 2092 bytes = DIV_ROUND_UP(bitmap->counts.chunks, 8); 2093 if (!bitmap->mddev->bitmap_info.external) 2094 bytes += sizeof(bitmap_super_t); 2095 space = DIV_ROUND_UP(bytes, 512); 2096 bitmap->mddev->bitmap_info.space = space; 2097 } 2098 chunkshift = bitmap->counts.chunkshift; 2099 chunkshift--; 2100 do { 2101 /* 'chunkshift' is shift from block size to chunk size */ 2102 chunkshift++; 2103 chunks = DIV_ROUND_UP_SECTOR_T(blocks, 1 << chunkshift); 2104 bytes = DIV_ROUND_UP(chunks, 8); 2105 if (!bitmap->mddev->bitmap_info.external) 2106 bytes += sizeof(bitmap_super_t); 2107 } while (bytes > (space << 9)); 2108 } else 2109 chunkshift = ffz(~chunksize) - BITMAP_BLOCK_SHIFT; 2110 2111 chunks = DIV_ROUND_UP_SECTOR_T(blocks, 1 << chunkshift); 2112 memset(&store, 0, sizeof(store)); 2113 if (bitmap->mddev->bitmap_info.offset || bitmap->mddev->bitmap_info.file) 2114 ret = md_bitmap_storage_alloc(&store, chunks, 2115 !bitmap->mddev->bitmap_info.external, 2116 mddev_is_clustered(bitmap->mddev) 2117 ? bitmap->cluster_slot : 0); 2118 if (ret) { 2119 md_bitmap_file_unmap(&store); 2120 goto err; 2121 } 2122 2123 pages = DIV_ROUND_UP(chunks, PAGE_COUNTER_RATIO); 2124 2125 new_bp = kcalloc(pages, sizeof(*new_bp), GFP_KERNEL); 2126 ret = -ENOMEM; 2127 if (!new_bp) { 2128 md_bitmap_file_unmap(&store); 2129 goto err; 2130 } 2131 2132 if (!init) 2133 bitmap->mddev->pers->quiesce(bitmap->mddev, 1); 2134 2135 store.file = bitmap->storage.file; 2136 bitmap->storage.file = NULL; 2137 2138 if (store.sb_page && bitmap->storage.sb_page) 2139 memcpy(page_address(store.sb_page), 2140 page_address(bitmap->storage.sb_page), 2141 sizeof(bitmap_super_t)); 2142 md_bitmap_file_unmap(&bitmap->storage); 2143 bitmap->storage = store; 2144 2145 old_counts = bitmap->counts; 2146 bitmap->counts.bp = new_bp; 2147 bitmap->counts.pages = pages; 2148 bitmap->counts.missing_pages = pages; 2149 bitmap->counts.chunkshift = chunkshift; 2150 bitmap->counts.chunks = chunks; 2151 bitmap->mddev->bitmap_info.chunksize = 1 << (chunkshift + 2152 BITMAP_BLOCK_SHIFT); 2153 2154 blocks = min(old_counts.chunks << old_counts.chunkshift, 2155 chunks << chunkshift); 2156 2157 spin_lock_irq(&bitmap->counts.lock); 2158 /* For cluster raid, need to pre-allocate bitmap */ 2159 if (mddev_is_clustered(bitmap->mddev)) { 2160 unsigned long page; 2161 for (page = 0; page < pages; page++) { 2162 ret = md_bitmap_checkpage(&bitmap->counts, page, 1, 1); 2163 if (ret) { 2164 unsigned long k; 2165 2166 /* deallocate the page memory */ 2167 for (k = 0; k < page; k++) { 2168 kfree(new_bp[k].map); 2169 } 2170 kfree(new_bp); 2171 2172 /* restore some fields from old_counts */ 2173 bitmap->counts.bp = old_counts.bp; 2174 bitmap->counts.pages = old_counts.pages; 2175 bitmap->counts.missing_pages = old_counts.pages; 2176 bitmap->counts.chunkshift = old_counts.chunkshift; 2177 bitmap->counts.chunks = old_counts.chunks; 2178 bitmap->mddev->bitmap_info.chunksize = 1 << (old_counts.chunkshift + 2179 BITMAP_BLOCK_SHIFT); 2180 blocks = old_counts.chunks << old_counts.chunkshift; 2181 pr_warn("Could not pre-allocate in-memory bitmap for cluster raid\n"); 2182 break; 2183 } else 2184 bitmap->counts.bp[page].count += 1; 2185 } 2186 } 2187 2188 for (block = 0; block < blocks; ) { 2189 bitmap_counter_t *bmc_old, *bmc_new; 2190 int set; 2191 2192 bmc_old = md_bitmap_get_counter(&old_counts, block, &old_blocks, 0); 2193 set = bmc_old && NEEDED(*bmc_old); 2194 2195 if (set) { 2196 bmc_new = md_bitmap_get_counter(&bitmap->counts, block, &new_blocks, 1); 2197 if (*bmc_new == 0) { 2198 /* need to set on-disk bits too. */ 2199 sector_t end = block + new_blocks; 2200 sector_t start = block >> chunkshift; 2201 start <<= chunkshift; 2202 while (start < end) { 2203 md_bitmap_file_set_bit(bitmap, block); 2204 start += 1 << chunkshift; 2205 } 2206 *bmc_new = 2; 2207 md_bitmap_count_page(&bitmap->counts, block, 1); 2208 md_bitmap_set_pending(&bitmap->counts, block); 2209 } 2210 *bmc_new |= NEEDED_MASK; 2211 if (new_blocks < old_blocks) 2212 old_blocks = new_blocks; 2213 } 2214 block += old_blocks; 2215 } 2216 2217 if (bitmap->counts.bp != old_counts.bp) { 2218 unsigned long k; 2219 for (k = 0; k < old_counts.pages; k++) 2220 if (!old_counts.bp[k].hijacked) 2221 kfree(old_counts.bp[k].map); 2222 kfree(old_counts.bp); 2223 } 2224 2225 if (!init) { 2226 int i; 2227 while (block < (chunks << chunkshift)) { 2228 bitmap_counter_t *bmc; 2229 bmc = md_bitmap_get_counter(&bitmap->counts, block, &new_blocks, 1); 2230 if (bmc) { 2231 /* new space. It needs to be resynced, so 2232 * we set NEEDED_MASK. 2233 */ 2234 if (*bmc == 0) { 2235 *bmc = NEEDED_MASK | 2; 2236 md_bitmap_count_page(&bitmap->counts, block, 1); 2237 md_bitmap_set_pending(&bitmap->counts, block); 2238 } 2239 } 2240 block += new_blocks; 2241 } 2242 for (i = 0; i < bitmap->storage.file_pages; i++) 2243 set_page_attr(bitmap, i, BITMAP_PAGE_DIRTY); 2244 } 2245 spin_unlock_irq(&bitmap->counts.lock); 2246 2247 if (!init) { 2248 md_bitmap_unplug(bitmap); 2249 bitmap->mddev->pers->quiesce(bitmap->mddev, 0); 2250 } 2251 ret = 0; 2252 err: 2253 return ret; 2254 } 2255 EXPORT_SYMBOL_GPL(md_bitmap_resize); 2256 2257 static ssize_t 2258 location_show(struct mddev *mddev, char *page) 2259 { 2260 ssize_t len; 2261 if (mddev->bitmap_info.file) 2262 len = sprintf(page, "file"); 2263 else if (mddev->bitmap_info.offset) 2264 len = sprintf(page, "%+lld", (long long)mddev->bitmap_info.offset); 2265 else 2266 len = sprintf(page, "none"); 2267 len += sprintf(page+len, "\n"); 2268 return len; 2269 } 2270 2271 static ssize_t 2272 location_store(struct mddev *mddev, const char *buf, size_t len) 2273 { 2274 int rv; 2275 2276 rv = mddev_lock(mddev); 2277 if (rv) 2278 return rv; 2279 if (mddev->pers) { 2280 if (!mddev->pers->quiesce) { 2281 rv = -EBUSY; 2282 goto out; 2283 } 2284 if (mddev->recovery || mddev->sync_thread) { 2285 rv = -EBUSY; 2286 goto out; 2287 } 2288 } 2289 2290 if (mddev->bitmap || mddev->bitmap_info.file || 2291 mddev->bitmap_info.offset) { 2292 /* bitmap already configured. Only option is to clear it */ 2293 if (strncmp(buf, "none", 4) != 0) { 2294 rv = -EBUSY; 2295 goto out; 2296 } 2297 if (mddev->pers) { 2298 mddev_suspend(mddev); 2299 md_bitmap_destroy(mddev); 2300 mddev_resume(mddev); 2301 } 2302 mddev->bitmap_info.offset = 0; 2303 if (mddev->bitmap_info.file) { 2304 struct file *f = mddev->bitmap_info.file; 2305 mddev->bitmap_info.file = NULL; 2306 fput(f); 2307 } 2308 } else { 2309 /* No bitmap, OK to set a location */ 2310 long long offset; 2311 if (strncmp(buf, "none", 4) == 0) 2312 /* nothing to be done */; 2313 else if (strncmp(buf, "file:", 5) == 0) { 2314 /* Not supported yet */ 2315 rv = -EINVAL; 2316 goto out; 2317 } else { 2318 if (buf[0] == '+') 2319 rv = kstrtoll(buf+1, 10, &offset); 2320 else 2321 rv = kstrtoll(buf, 10, &offset); 2322 if (rv) 2323 goto out; 2324 if (offset == 0) { 2325 rv = -EINVAL; 2326 goto out; 2327 } 2328 if (mddev->bitmap_info.external == 0 && 2329 mddev->major_version == 0 && 2330 offset != mddev->bitmap_info.default_offset) { 2331 rv = -EINVAL; 2332 goto out; 2333 } 2334 mddev->bitmap_info.offset = offset; 2335 if (mddev->pers) { 2336 struct bitmap *bitmap; 2337 bitmap = md_bitmap_create(mddev, -1); 2338 mddev_suspend(mddev); 2339 if (IS_ERR(bitmap)) 2340 rv = PTR_ERR(bitmap); 2341 else { 2342 mddev->bitmap = bitmap; 2343 rv = md_bitmap_load(mddev); 2344 if (rv) 2345 mddev->bitmap_info.offset = 0; 2346 } 2347 if (rv) { 2348 md_bitmap_destroy(mddev); 2349 mddev_resume(mddev); 2350 goto out; 2351 } 2352 mddev_resume(mddev); 2353 } 2354 } 2355 } 2356 if (!mddev->external) { 2357 /* Ensure new bitmap info is stored in 2358 * metadata promptly. 2359 */ 2360 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 2361 md_wakeup_thread(mddev->thread); 2362 } 2363 rv = 0; 2364 out: 2365 mddev_unlock(mddev); 2366 if (rv) 2367 return rv; 2368 return len; 2369 } 2370 2371 static struct md_sysfs_entry bitmap_location = 2372 __ATTR(location, S_IRUGO|S_IWUSR, location_show, location_store); 2373 2374 /* 'bitmap/space' is the space available at 'location' for the 2375 * bitmap. This allows the kernel to know when it is safe to 2376 * resize the bitmap to match a resized array. 2377 */ 2378 static ssize_t 2379 space_show(struct mddev *mddev, char *page) 2380 { 2381 return sprintf(page, "%lu\n", mddev->bitmap_info.space); 2382 } 2383 2384 static ssize_t 2385 space_store(struct mddev *mddev, const char *buf, size_t len) 2386 { 2387 unsigned long sectors; 2388 int rv; 2389 2390 rv = kstrtoul(buf, 10, §ors); 2391 if (rv) 2392 return rv; 2393 2394 if (sectors == 0) 2395 return -EINVAL; 2396 2397 if (mddev->bitmap && 2398 sectors < (mddev->bitmap->storage.bytes + 511) >> 9) 2399 return -EFBIG; /* Bitmap is too big for this small space */ 2400 2401 /* could make sure it isn't too big, but that isn't really 2402 * needed - user-space should be careful. 2403 */ 2404 mddev->bitmap_info.space = sectors; 2405 return len; 2406 } 2407 2408 static struct md_sysfs_entry bitmap_space = 2409 __ATTR(space, S_IRUGO|S_IWUSR, space_show, space_store); 2410 2411 static ssize_t 2412 timeout_show(struct mddev *mddev, char *page) 2413 { 2414 ssize_t len; 2415 unsigned long secs = mddev->bitmap_info.daemon_sleep / HZ; 2416 unsigned long jifs = mddev->bitmap_info.daemon_sleep % HZ; 2417 2418 len = sprintf(page, "%lu", secs); 2419 if (jifs) 2420 len += sprintf(page+len, ".%03u", jiffies_to_msecs(jifs)); 2421 len += sprintf(page+len, "\n"); 2422 return len; 2423 } 2424 2425 static ssize_t 2426 timeout_store(struct mddev *mddev, const char *buf, size_t len) 2427 { 2428 /* timeout can be set at any time */ 2429 unsigned long timeout; 2430 int rv = strict_strtoul_scaled(buf, &timeout, 4); 2431 if (rv) 2432 return rv; 2433 2434 /* just to make sure we don't overflow... */ 2435 if (timeout >= LONG_MAX / HZ) 2436 return -EINVAL; 2437 2438 timeout = timeout * HZ / 10000; 2439 2440 if (timeout >= MAX_SCHEDULE_TIMEOUT) 2441 timeout = MAX_SCHEDULE_TIMEOUT-1; 2442 if (timeout < 1) 2443 timeout = 1; 2444 mddev->bitmap_info.daemon_sleep = timeout; 2445 if (mddev->thread) { 2446 /* if thread->timeout is MAX_SCHEDULE_TIMEOUT, then 2447 * the bitmap is all clean and we don't need to 2448 * adjust the timeout right now 2449 */ 2450 if (mddev->thread->timeout < MAX_SCHEDULE_TIMEOUT) { 2451 mddev->thread->timeout = timeout; 2452 md_wakeup_thread(mddev->thread); 2453 } 2454 } 2455 return len; 2456 } 2457 2458 static struct md_sysfs_entry bitmap_timeout = 2459 __ATTR(time_base, S_IRUGO|S_IWUSR, timeout_show, timeout_store); 2460 2461 static ssize_t 2462 backlog_show(struct mddev *mddev, char *page) 2463 { 2464 return sprintf(page, "%lu\n", mddev->bitmap_info.max_write_behind); 2465 } 2466 2467 static ssize_t 2468 backlog_store(struct mddev *mddev, const char *buf, size_t len) 2469 { 2470 unsigned long backlog; 2471 unsigned long old_mwb = mddev->bitmap_info.max_write_behind; 2472 int rv = kstrtoul(buf, 10, &backlog); 2473 if (rv) 2474 return rv; 2475 if (backlog > COUNTER_MAX) 2476 return -EINVAL; 2477 mddev->bitmap_info.max_write_behind = backlog; 2478 if (!backlog && mddev->wb_info_pool) { 2479 /* wb_info_pool is not needed if backlog is zero */ 2480 mempool_destroy(mddev->wb_info_pool); 2481 mddev->wb_info_pool = NULL; 2482 } else if (backlog && !mddev->wb_info_pool) { 2483 /* wb_info_pool is needed since backlog is not zero */ 2484 struct md_rdev *rdev; 2485 2486 rdev_for_each(rdev, mddev) 2487 mddev_create_wb_pool(mddev, rdev, false); 2488 } 2489 if (old_mwb != backlog) 2490 md_bitmap_update_sb(mddev->bitmap); 2491 return len; 2492 } 2493 2494 static struct md_sysfs_entry bitmap_backlog = 2495 __ATTR(backlog, S_IRUGO|S_IWUSR, backlog_show, backlog_store); 2496 2497 static ssize_t 2498 chunksize_show(struct mddev *mddev, char *page) 2499 { 2500 return sprintf(page, "%lu\n", mddev->bitmap_info.chunksize); 2501 } 2502 2503 static ssize_t 2504 chunksize_store(struct mddev *mddev, const char *buf, size_t len) 2505 { 2506 /* Can only be changed when no bitmap is active */ 2507 int rv; 2508 unsigned long csize; 2509 if (mddev->bitmap) 2510 return -EBUSY; 2511 rv = kstrtoul(buf, 10, &csize); 2512 if (rv) 2513 return rv; 2514 if (csize < 512 || 2515 !is_power_of_2(csize)) 2516 return -EINVAL; 2517 mddev->bitmap_info.chunksize = csize; 2518 return len; 2519 } 2520 2521 static struct md_sysfs_entry bitmap_chunksize = 2522 __ATTR(chunksize, S_IRUGO|S_IWUSR, chunksize_show, chunksize_store); 2523 2524 static ssize_t metadata_show(struct mddev *mddev, char *page) 2525 { 2526 if (mddev_is_clustered(mddev)) 2527 return sprintf(page, "clustered\n"); 2528 return sprintf(page, "%s\n", (mddev->bitmap_info.external 2529 ? "external" : "internal")); 2530 } 2531 2532 static ssize_t metadata_store(struct mddev *mddev, const char *buf, size_t len) 2533 { 2534 if (mddev->bitmap || 2535 mddev->bitmap_info.file || 2536 mddev->bitmap_info.offset) 2537 return -EBUSY; 2538 if (strncmp(buf, "external", 8) == 0) 2539 mddev->bitmap_info.external = 1; 2540 else if ((strncmp(buf, "internal", 8) == 0) || 2541 (strncmp(buf, "clustered", 9) == 0)) 2542 mddev->bitmap_info.external = 0; 2543 else 2544 return -EINVAL; 2545 return len; 2546 } 2547 2548 static struct md_sysfs_entry bitmap_metadata = 2549 __ATTR(metadata, S_IRUGO|S_IWUSR, metadata_show, metadata_store); 2550 2551 static ssize_t can_clear_show(struct mddev *mddev, char *page) 2552 { 2553 int len; 2554 spin_lock(&mddev->lock); 2555 if (mddev->bitmap) 2556 len = sprintf(page, "%s\n", (mddev->bitmap->need_sync ? 2557 "false" : "true")); 2558 else 2559 len = sprintf(page, "\n"); 2560 spin_unlock(&mddev->lock); 2561 return len; 2562 } 2563 2564 static ssize_t can_clear_store(struct mddev *mddev, const char *buf, size_t len) 2565 { 2566 if (mddev->bitmap == NULL) 2567 return -ENOENT; 2568 if (strncmp(buf, "false", 5) == 0) 2569 mddev->bitmap->need_sync = 1; 2570 else if (strncmp(buf, "true", 4) == 0) { 2571 if (mddev->degraded) 2572 return -EBUSY; 2573 mddev->bitmap->need_sync = 0; 2574 } else 2575 return -EINVAL; 2576 return len; 2577 } 2578 2579 static struct md_sysfs_entry bitmap_can_clear = 2580 __ATTR(can_clear, S_IRUGO|S_IWUSR, can_clear_show, can_clear_store); 2581 2582 static ssize_t 2583 behind_writes_used_show(struct mddev *mddev, char *page) 2584 { 2585 ssize_t ret; 2586 spin_lock(&mddev->lock); 2587 if (mddev->bitmap == NULL) 2588 ret = sprintf(page, "0\n"); 2589 else 2590 ret = sprintf(page, "%lu\n", 2591 mddev->bitmap->behind_writes_used); 2592 spin_unlock(&mddev->lock); 2593 return ret; 2594 } 2595 2596 static ssize_t 2597 behind_writes_used_reset(struct mddev *mddev, const char *buf, size_t len) 2598 { 2599 if (mddev->bitmap) 2600 mddev->bitmap->behind_writes_used = 0; 2601 return len; 2602 } 2603 2604 static struct md_sysfs_entry max_backlog_used = 2605 __ATTR(max_backlog_used, S_IRUGO | S_IWUSR, 2606 behind_writes_used_show, behind_writes_used_reset); 2607 2608 static struct attribute *md_bitmap_attrs[] = { 2609 &bitmap_location.attr, 2610 &bitmap_space.attr, 2611 &bitmap_timeout.attr, 2612 &bitmap_backlog.attr, 2613 &bitmap_chunksize.attr, 2614 &bitmap_metadata.attr, 2615 &bitmap_can_clear.attr, 2616 &max_backlog_used.attr, 2617 NULL 2618 }; 2619 struct attribute_group md_bitmap_group = { 2620 .name = "bitmap", 2621 .attrs = md_bitmap_attrs, 2622 }; 2623 2624