1 /* 2 * bitmap.c two-level bitmap (C) Peter T. Breuer (ptb@ot.uc3m.es) 2003 3 * 4 * bitmap_create - sets up the bitmap structure 5 * bitmap_destroy - destroys the bitmap structure 6 * 7 * additions, Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.: 8 * - added disk storage for bitmap 9 * - changes to allow various bitmap chunk sizes 10 */ 11 12 /* 13 * Still to do: 14 * 15 * flush after percent set rather than just time based. (maybe both). 16 */ 17 18 #include <linux/blkdev.h> 19 #include <linux/module.h> 20 #include <linux/errno.h> 21 #include <linux/slab.h> 22 #include <linux/init.h> 23 #include <linux/timer.h> 24 #include <linux/sched.h> 25 #include <linux/list.h> 26 #include <linux/file.h> 27 #include <linux/mount.h> 28 #include <linux/buffer_head.h> 29 #include <linux/seq_file.h> 30 #include <trace/events/block.h> 31 #include "md.h" 32 #include "md-bitmap.h" 33 34 static inline char *bmname(struct bitmap *bitmap) 35 { 36 return bitmap->mddev ? mdname(bitmap->mddev) : "mdX"; 37 } 38 39 /* 40 * check a page and, if necessary, allocate it (or hijack it if the alloc fails) 41 * 42 * 1) check to see if this page is allocated, if it's not then try to alloc 43 * 2) if the alloc fails, set the page's hijacked flag so we'll use the 44 * page pointer directly as a counter 45 * 46 * if we find our page, we increment the page's refcount so that it stays 47 * allocated while we're using it 48 */ 49 static int bitmap_checkpage(struct bitmap_counts *bitmap, 50 unsigned long page, int create, int no_hijack) 51 __releases(bitmap->lock) 52 __acquires(bitmap->lock) 53 { 54 unsigned char *mappage; 55 56 if (page >= bitmap->pages) { 57 /* This can happen if bitmap_start_sync goes beyond 58 * End-of-device while looking for a whole page. 59 * It is harmless. 60 */ 61 return -EINVAL; 62 } 63 64 if (bitmap->bp[page].hijacked) /* it's hijacked, don't try to alloc */ 65 return 0; 66 67 if (bitmap->bp[page].map) /* page is already allocated, just return */ 68 return 0; 69 70 if (!create) 71 return -ENOENT; 72 73 /* this page has not been allocated yet */ 74 75 spin_unlock_irq(&bitmap->lock); 76 /* It is possible that this is being called inside a 77 * prepare_to_wait/finish_wait loop from raid5c:make_request(). 78 * In general it is not permitted to sleep in that context as it 79 * can cause the loop to spin freely. 80 * That doesn't apply here as we can only reach this point 81 * once with any loop. 82 * When this function completes, either bp[page].map or 83 * bp[page].hijacked. In either case, this function will 84 * abort before getting to this point again. So there is 85 * no risk of a free-spin, and so it is safe to assert 86 * that sleeping here is allowed. 87 */ 88 sched_annotate_sleep(); 89 mappage = kzalloc(PAGE_SIZE, GFP_NOIO); 90 spin_lock_irq(&bitmap->lock); 91 92 if (mappage == NULL) { 93 pr_debug("md/bitmap: map page allocation failed, hijacking\n"); 94 /* We don't support hijack for cluster raid */ 95 if (no_hijack) 96 return -ENOMEM; 97 /* failed - set the hijacked flag so that we can use the 98 * pointer as a counter */ 99 if (!bitmap->bp[page].map) 100 bitmap->bp[page].hijacked = 1; 101 } else if (bitmap->bp[page].map || 102 bitmap->bp[page].hijacked) { 103 /* somebody beat us to getting the page */ 104 kfree(mappage); 105 } else { 106 107 /* no page was in place and we have one, so install it */ 108 109 bitmap->bp[page].map = mappage; 110 bitmap->missing_pages--; 111 } 112 return 0; 113 } 114 115 /* if page is completely empty, put it back on the free list, or dealloc it */ 116 /* if page was hijacked, unmark the flag so it might get alloced next time */ 117 /* Note: lock should be held when calling this */ 118 static void bitmap_checkfree(struct bitmap_counts *bitmap, unsigned long page) 119 { 120 char *ptr; 121 122 if (bitmap->bp[page].count) /* page is still busy */ 123 return; 124 125 /* page is no longer in use, it can be released */ 126 127 if (bitmap->bp[page].hijacked) { /* page was hijacked, undo this now */ 128 bitmap->bp[page].hijacked = 0; 129 bitmap->bp[page].map = NULL; 130 } else { 131 /* normal case, free the page */ 132 ptr = bitmap->bp[page].map; 133 bitmap->bp[page].map = NULL; 134 bitmap->missing_pages++; 135 kfree(ptr); 136 } 137 } 138 139 /* 140 * bitmap file handling - read and write the bitmap file and its superblock 141 */ 142 143 /* 144 * basic page I/O operations 145 */ 146 147 /* IO operations when bitmap is stored near all superblocks */ 148 static int read_sb_page(struct mddev *mddev, loff_t offset, 149 struct page *page, 150 unsigned long index, int size) 151 { 152 /* choose a good rdev and read the page from there */ 153 154 struct md_rdev *rdev; 155 sector_t target; 156 157 rdev_for_each(rdev, mddev) { 158 if (! test_bit(In_sync, &rdev->flags) 159 || test_bit(Faulty, &rdev->flags) 160 || test_bit(Bitmap_sync, &rdev->flags)) 161 continue; 162 163 target = offset + index * (PAGE_SIZE/512); 164 165 if (sync_page_io(rdev, target, 166 roundup(size, bdev_logical_block_size(rdev->bdev)), 167 page, REQ_OP_READ, 0, true)) { 168 page->index = index; 169 return 0; 170 } 171 } 172 return -EIO; 173 } 174 175 static struct md_rdev *next_active_rdev(struct md_rdev *rdev, struct mddev *mddev) 176 { 177 /* Iterate the disks of an mddev, using rcu to protect access to the 178 * linked list, and raising the refcount of devices we return to ensure 179 * they don't disappear while in use. 180 * As devices are only added or removed when raid_disk is < 0 and 181 * nr_pending is 0 and In_sync is clear, the entries we return will 182 * still be in the same position on the list when we re-enter 183 * list_for_each_entry_continue_rcu. 184 * 185 * Note that if entered with 'rdev == NULL' to start at the 186 * beginning, we temporarily assign 'rdev' to an address which 187 * isn't really an rdev, but which can be used by 188 * list_for_each_entry_continue_rcu() to find the first entry. 189 */ 190 rcu_read_lock(); 191 if (rdev == NULL) 192 /* start at the beginning */ 193 rdev = list_entry(&mddev->disks, struct md_rdev, same_set); 194 else { 195 /* release the previous rdev and start from there. */ 196 rdev_dec_pending(rdev, mddev); 197 } 198 list_for_each_entry_continue_rcu(rdev, &mddev->disks, same_set) { 199 if (rdev->raid_disk >= 0 && 200 !test_bit(Faulty, &rdev->flags)) { 201 /* this is a usable devices */ 202 atomic_inc(&rdev->nr_pending); 203 rcu_read_unlock(); 204 return rdev; 205 } 206 } 207 rcu_read_unlock(); 208 return NULL; 209 } 210 211 static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait) 212 { 213 struct md_rdev *rdev; 214 struct block_device *bdev; 215 struct mddev *mddev = bitmap->mddev; 216 struct bitmap_storage *store = &bitmap->storage; 217 218 restart: 219 rdev = NULL; 220 while ((rdev = next_active_rdev(rdev, mddev)) != NULL) { 221 int size = PAGE_SIZE; 222 loff_t offset = mddev->bitmap_info.offset; 223 224 bdev = (rdev->meta_bdev) ? rdev->meta_bdev : rdev->bdev; 225 226 if (page->index == store->file_pages-1) { 227 int last_page_size = store->bytes & (PAGE_SIZE-1); 228 if (last_page_size == 0) 229 last_page_size = PAGE_SIZE; 230 size = roundup(last_page_size, 231 bdev_logical_block_size(bdev)); 232 } 233 /* Just make sure we aren't corrupting data or 234 * metadata 235 */ 236 if (mddev->external) { 237 /* Bitmap could be anywhere. */ 238 if (rdev->sb_start + offset + (page->index 239 * (PAGE_SIZE/512)) 240 > rdev->data_offset 241 && 242 rdev->sb_start + offset 243 < (rdev->data_offset + mddev->dev_sectors 244 + (PAGE_SIZE/512))) 245 goto bad_alignment; 246 } else if (offset < 0) { 247 /* DATA BITMAP METADATA */ 248 if (offset 249 + (long)(page->index * (PAGE_SIZE/512)) 250 + size/512 > 0) 251 /* bitmap runs in to metadata */ 252 goto bad_alignment; 253 if (rdev->data_offset + mddev->dev_sectors 254 > rdev->sb_start + offset) 255 /* data runs in to bitmap */ 256 goto bad_alignment; 257 } else if (rdev->sb_start < rdev->data_offset) { 258 /* METADATA BITMAP DATA */ 259 if (rdev->sb_start 260 + offset 261 + page->index*(PAGE_SIZE/512) + size/512 262 > rdev->data_offset) 263 /* bitmap runs in to data */ 264 goto bad_alignment; 265 } else { 266 /* DATA METADATA BITMAP - no problems */ 267 } 268 md_super_write(mddev, rdev, 269 rdev->sb_start + offset 270 + page->index * (PAGE_SIZE/512), 271 size, 272 page); 273 } 274 275 if (wait && md_super_wait(mddev) < 0) 276 goto restart; 277 return 0; 278 279 bad_alignment: 280 return -EINVAL; 281 } 282 283 static void bitmap_file_kick(struct bitmap *bitmap); 284 /* 285 * write out a page to a file 286 */ 287 static void write_page(struct bitmap *bitmap, struct page *page, int wait) 288 { 289 struct buffer_head *bh; 290 291 if (bitmap->storage.file == NULL) { 292 switch (write_sb_page(bitmap, page, wait)) { 293 case -EINVAL: 294 set_bit(BITMAP_WRITE_ERROR, &bitmap->flags); 295 } 296 } else { 297 298 bh = page_buffers(page); 299 300 while (bh && bh->b_blocknr) { 301 atomic_inc(&bitmap->pending_writes); 302 set_buffer_locked(bh); 303 set_buffer_mapped(bh); 304 submit_bh(REQ_OP_WRITE, REQ_SYNC, bh); 305 bh = bh->b_this_page; 306 } 307 308 if (wait) 309 wait_event(bitmap->write_wait, 310 atomic_read(&bitmap->pending_writes)==0); 311 } 312 if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags)) 313 bitmap_file_kick(bitmap); 314 } 315 316 static void end_bitmap_write(struct buffer_head *bh, int uptodate) 317 { 318 struct bitmap *bitmap = bh->b_private; 319 320 if (!uptodate) 321 set_bit(BITMAP_WRITE_ERROR, &bitmap->flags); 322 if (atomic_dec_and_test(&bitmap->pending_writes)) 323 wake_up(&bitmap->write_wait); 324 } 325 326 /* copied from buffer.c */ 327 static void 328 __clear_page_buffers(struct page *page) 329 { 330 ClearPagePrivate(page); 331 set_page_private(page, 0); 332 put_page(page); 333 } 334 static void free_buffers(struct page *page) 335 { 336 struct buffer_head *bh; 337 338 if (!PagePrivate(page)) 339 return; 340 341 bh = page_buffers(page); 342 while (bh) { 343 struct buffer_head *next = bh->b_this_page; 344 free_buffer_head(bh); 345 bh = next; 346 } 347 __clear_page_buffers(page); 348 put_page(page); 349 } 350 351 /* read a page from a file. 352 * We both read the page, and attach buffers to the page to record the 353 * address of each block (using bmap). These addresses will be used 354 * to write the block later, completely bypassing the filesystem. 355 * This usage is similar to how swap files are handled, and allows us 356 * to write to a file with no concerns of memory allocation failing. 357 */ 358 static int read_page(struct file *file, unsigned long index, 359 struct bitmap *bitmap, 360 unsigned long count, 361 struct page *page) 362 { 363 int ret = 0; 364 struct inode *inode = file_inode(file); 365 struct buffer_head *bh; 366 sector_t block; 367 368 pr_debug("read bitmap file (%dB @ %llu)\n", (int)PAGE_SIZE, 369 (unsigned long long)index << PAGE_SHIFT); 370 371 bh = alloc_page_buffers(page, 1<<inode->i_blkbits, false); 372 if (!bh) { 373 ret = -ENOMEM; 374 goto out; 375 } 376 attach_page_buffers(page, bh); 377 block = index << (PAGE_SHIFT - inode->i_blkbits); 378 while (bh) { 379 if (count == 0) 380 bh->b_blocknr = 0; 381 else { 382 bh->b_blocknr = bmap(inode, block); 383 if (bh->b_blocknr == 0) { 384 /* Cannot use this file! */ 385 ret = -EINVAL; 386 goto out; 387 } 388 bh->b_bdev = inode->i_sb->s_bdev; 389 if (count < (1<<inode->i_blkbits)) 390 count = 0; 391 else 392 count -= (1<<inode->i_blkbits); 393 394 bh->b_end_io = end_bitmap_write; 395 bh->b_private = bitmap; 396 atomic_inc(&bitmap->pending_writes); 397 set_buffer_locked(bh); 398 set_buffer_mapped(bh); 399 submit_bh(REQ_OP_READ, 0, bh); 400 } 401 block++; 402 bh = bh->b_this_page; 403 } 404 page->index = index; 405 406 wait_event(bitmap->write_wait, 407 atomic_read(&bitmap->pending_writes)==0); 408 if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags)) 409 ret = -EIO; 410 out: 411 if (ret) 412 pr_err("md: bitmap read error: (%dB @ %llu): %d\n", 413 (int)PAGE_SIZE, 414 (unsigned long long)index << PAGE_SHIFT, 415 ret); 416 return ret; 417 } 418 419 /* 420 * bitmap file superblock operations 421 */ 422 423 /* 424 * bitmap_wait_writes() should be called before writing any bitmap 425 * blocks, to ensure previous writes, particularly from 426 * bitmap_daemon_work(), have completed. 427 */ 428 static void bitmap_wait_writes(struct bitmap *bitmap) 429 { 430 if (bitmap->storage.file) 431 wait_event(bitmap->write_wait, 432 atomic_read(&bitmap->pending_writes)==0); 433 else 434 /* Note that we ignore the return value. The writes 435 * might have failed, but that would just mean that 436 * some bits which should be cleared haven't been, 437 * which is safe. The relevant bitmap blocks will 438 * probably get written again, but there is no great 439 * loss if they aren't. 440 */ 441 md_super_wait(bitmap->mddev); 442 } 443 444 445 /* update the event counter and sync the superblock to disk */ 446 void bitmap_update_sb(struct bitmap *bitmap) 447 { 448 bitmap_super_t *sb; 449 450 if (!bitmap || !bitmap->mddev) /* no bitmap for this array */ 451 return; 452 if (bitmap->mddev->bitmap_info.external) 453 return; 454 if (!bitmap->storage.sb_page) /* no superblock */ 455 return; 456 sb = kmap_atomic(bitmap->storage.sb_page); 457 sb->events = cpu_to_le64(bitmap->mddev->events); 458 if (bitmap->mddev->events < bitmap->events_cleared) 459 /* rocking back to read-only */ 460 bitmap->events_cleared = bitmap->mddev->events; 461 sb->events_cleared = cpu_to_le64(bitmap->events_cleared); 462 /* 463 * clear BITMAP_WRITE_ERROR bit to protect against the case that 464 * a bitmap write error occurred but the later writes succeeded. 465 */ 466 sb->state = cpu_to_le32(bitmap->flags & ~BIT(BITMAP_WRITE_ERROR)); 467 /* Just in case these have been changed via sysfs: */ 468 sb->daemon_sleep = cpu_to_le32(bitmap->mddev->bitmap_info.daemon_sleep/HZ); 469 sb->write_behind = cpu_to_le32(bitmap->mddev->bitmap_info.max_write_behind); 470 /* This might have been changed by a reshape */ 471 sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors); 472 sb->chunksize = cpu_to_le32(bitmap->mddev->bitmap_info.chunksize); 473 sb->nodes = cpu_to_le32(bitmap->mddev->bitmap_info.nodes); 474 sb->sectors_reserved = cpu_to_le32(bitmap->mddev-> 475 bitmap_info.space); 476 kunmap_atomic(sb); 477 write_page(bitmap, bitmap->storage.sb_page, 1); 478 } 479 EXPORT_SYMBOL(bitmap_update_sb); 480 481 /* print out the bitmap file superblock */ 482 void bitmap_print_sb(struct bitmap *bitmap) 483 { 484 bitmap_super_t *sb; 485 486 if (!bitmap || !bitmap->storage.sb_page) 487 return; 488 sb = kmap_atomic(bitmap->storage.sb_page); 489 pr_debug("%s: bitmap file superblock:\n", bmname(bitmap)); 490 pr_debug(" magic: %08x\n", le32_to_cpu(sb->magic)); 491 pr_debug(" version: %d\n", le32_to_cpu(sb->version)); 492 pr_debug(" uuid: %08x.%08x.%08x.%08x\n", 493 le32_to_cpu(*(__u32 *)(sb->uuid+0)), 494 le32_to_cpu(*(__u32 *)(sb->uuid+4)), 495 le32_to_cpu(*(__u32 *)(sb->uuid+8)), 496 le32_to_cpu(*(__u32 *)(sb->uuid+12))); 497 pr_debug(" events: %llu\n", 498 (unsigned long long) le64_to_cpu(sb->events)); 499 pr_debug("events cleared: %llu\n", 500 (unsigned long long) le64_to_cpu(sb->events_cleared)); 501 pr_debug(" state: %08x\n", le32_to_cpu(sb->state)); 502 pr_debug(" chunksize: %d B\n", le32_to_cpu(sb->chunksize)); 503 pr_debug(" daemon sleep: %ds\n", le32_to_cpu(sb->daemon_sleep)); 504 pr_debug(" sync size: %llu KB\n", 505 (unsigned long long)le64_to_cpu(sb->sync_size)/2); 506 pr_debug("max write behind: %d\n", le32_to_cpu(sb->write_behind)); 507 kunmap_atomic(sb); 508 } 509 510 /* 511 * bitmap_new_disk_sb 512 * @bitmap 513 * 514 * This function is somewhat the reverse of bitmap_read_sb. bitmap_read_sb 515 * reads and verifies the on-disk bitmap superblock and populates bitmap_info. 516 * This function verifies 'bitmap_info' and populates the on-disk bitmap 517 * structure, which is to be written to disk. 518 * 519 * Returns: 0 on success, -Exxx on error 520 */ 521 static int bitmap_new_disk_sb(struct bitmap *bitmap) 522 { 523 bitmap_super_t *sb; 524 unsigned long chunksize, daemon_sleep, write_behind; 525 526 bitmap->storage.sb_page = alloc_page(GFP_KERNEL | __GFP_ZERO); 527 if (bitmap->storage.sb_page == NULL) 528 return -ENOMEM; 529 bitmap->storage.sb_page->index = 0; 530 531 sb = kmap_atomic(bitmap->storage.sb_page); 532 533 sb->magic = cpu_to_le32(BITMAP_MAGIC); 534 sb->version = cpu_to_le32(BITMAP_MAJOR_HI); 535 536 chunksize = bitmap->mddev->bitmap_info.chunksize; 537 BUG_ON(!chunksize); 538 if (!is_power_of_2(chunksize)) { 539 kunmap_atomic(sb); 540 pr_warn("bitmap chunksize not a power of 2\n"); 541 return -EINVAL; 542 } 543 sb->chunksize = cpu_to_le32(chunksize); 544 545 daemon_sleep = bitmap->mddev->bitmap_info.daemon_sleep; 546 if (!daemon_sleep || (daemon_sleep > MAX_SCHEDULE_TIMEOUT)) { 547 pr_debug("Choosing daemon_sleep default (5 sec)\n"); 548 daemon_sleep = 5 * HZ; 549 } 550 sb->daemon_sleep = cpu_to_le32(daemon_sleep); 551 bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep; 552 553 /* 554 * FIXME: write_behind for RAID1. If not specified, what 555 * is a good choice? We choose COUNTER_MAX / 2 arbitrarily. 556 */ 557 write_behind = bitmap->mddev->bitmap_info.max_write_behind; 558 if (write_behind > COUNTER_MAX) 559 write_behind = COUNTER_MAX / 2; 560 sb->write_behind = cpu_to_le32(write_behind); 561 bitmap->mddev->bitmap_info.max_write_behind = write_behind; 562 563 /* keep the array size field of the bitmap superblock up to date */ 564 sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors); 565 566 memcpy(sb->uuid, bitmap->mddev->uuid, 16); 567 568 set_bit(BITMAP_STALE, &bitmap->flags); 569 sb->state = cpu_to_le32(bitmap->flags); 570 bitmap->events_cleared = bitmap->mddev->events; 571 sb->events_cleared = cpu_to_le64(bitmap->mddev->events); 572 bitmap->mddev->bitmap_info.nodes = 0; 573 574 kunmap_atomic(sb); 575 576 return 0; 577 } 578 579 /* read the superblock from the bitmap file and initialize some bitmap fields */ 580 static int bitmap_read_sb(struct bitmap *bitmap) 581 { 582 char *reason = NULL; 583 bitmap_super_t *sb; 584 unsigned long chunksize, daemon_sleep, write_behind; 585 unsigned long long events; 586 int nodes = 0; 587 unsigned long sectors_reserved = 0; 588 int err = -EINVAL; 589 struct page *sb_page; 590 loff_t offset = bitmap->mddev->bitmap_info.offset; 591 592 if (!bitmap->storage.file && !bitmap->mddev->bitmap_info.offset) { 593 chunksize = 128 * 1024 * 1024; 594 daemon_sleep = 5 * HZ; 595 write_behind = 0; 596 set_bit(BITMAP_STALE, &bitmap->flags); 597 err = 0; 598 goto out_no_sb; 599 } 600 /* page 0 is the superblock, read it... */ 601 sb_page = alloc_page(GFP_KERNEL); 602 if (!sb_page) 603 return -ENOMEM; 604 bitmap->storage.sb_page = sb_page; 605 606 re_read: 607 /* If cluster_slot is set, the cluster is setup */ 608 if (bitmap->cluster_slot >= 0) { 609 sector_t bm_blocks = bitmap->mddev->resync_max_sectors; 610 611 sector_div(bm_blocks, 612 bitmap->mddev->bitmap_info.chunksize >> 9); 613 /* bits to bytes */ 614 bm_blocks = ((bm_blocks+7) >> 3) + sizeof(bitmap_super_t); 615 /* to 4k blocks */ 616 bm_blocks = DIV_ROUND_UP_SECTOR_T(bm_blocks, 4096); 617 offset = bitmap->mddev->bitmap_info.offset + (bitmap->cluster_slot * (bm_blocks << 3)); 618 pr_debug("%s:%d bm slot: %d offset: %llu\n", __func__, __LINE__, 619 bitmap->cluster_slot, offset); 620 } 621 622 if (bitmap->storage.file) { 623 loff_t isize = i_size_read(bitmap->storage.file->f_mapping->host); 624 int bytes = isize > PAGE_SIZE ? PAGE_SIZE : isize; 625 626 err = read_page(bitmap->storage.file, 0, 627 bitmap, bytes, sb_page); 628 } else { 629 err = read_sb_page(bitmap->mddev, 630 offset, 631 sb_page, 632 0, sizeof(bitmap_super_t)); 633 } 634 if (err) 635 return err; 636 637 err = -EINVAL; 638 sb = kmap_atomic(sb_page); 639 640 chunksize = le32_to_cpu(sb->chunksize); 641 daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ; 642 write_behind = le32_to_cpu(sb->write_behind); 643 sectors_reserved = le32_to_cpu(sb->sectors_reserved); 644 /* Setup nodes/clustername only if bitmap version is 645 * cluster-compatible 646 */ 647 if (sb->version == cpu_to_le32(BITMAP_MAJOR_CLUSTERED)) { 648 nodes = le32_to_cpu(sb->nodes); 649 strlcpy(bitmap->mddev->bitmap_info.cluster_name, 650 sb->cluster_name, 64); 651 } 652 653 /* verify that the bitmap-specific fields are valid */ 654 if (sb->magic != cpu_to_le32(BITMAP_MAGIC)) 655 reason = "bad magic"; 656 else if (le32_to_cpu(sb->version) < BITMAP_MAJOR_LO || 657 le32_to_cpu(sb->version) > BITMAP_MAJOR_CLUSTERED) 658 reason = "unrecognized superblock version"; 659 else if (chunksize < 512) 660 reason = "bitmap chunksize too small"; 661 else if (!is_power_of_2(chunksize)) 662 reason = "bitmap chunksize not a power of 2"; 663 else if (daemon_sleep < 1 || daemon_sleep > MAX_SCHEDULE_TIMEOUT) 664 reason = "daemon sleep period out of range"; 665 else if (write_behind > COUNTER_MAX) 666 reason = "write-behind limit out of range (0 - 16383)"; 667 if (reason) { 668 pr_warn("%s: invalid bitmap file superblock: %s\n", 669 bmname(bitmap), reason); 670 goto out; 671 } 672 673 /* keep the array size field of the bitmap superblock up to date */ 674 sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors); 675 676 if (bitmap->mddev->persistent) { 677 /* 678 * We have a persistent array superblock, so compare the 679 * bitmap's UUID and event counter to the mddev's 680 */ 681 if (memcmp(sb->uuid, bitmap->mddev->uuid, 16)) { 682 pr_warn("%s: bitmap superblock UUID mismatch\n", 683 bmname(bitmap)); 684 goto out; 685 } 686 events = le64_to_cpu(sb->events); 687 if (!nodes && (events < bitmap->mddev->events)) { 688 pr_warn("%s: bitmap file is out of date (%llu < %llu) -- forcing full recovery\n", 689 bmname(bitmap), events, 690 (unsigned long long) bitmap->mddev->events); 691 set_bit(BITMAP_STALE, &bitmap->flags); 692 } 693 } 694 695 /* assign fields using values from superblock */ 696 bitmap->flags |= le32_to_cpu(sb->state); 697 if (le32_to_cpu(sb->version) == BITMAP_MAJOR_HOSTENDIAN) 698 set_bit(BITMAP_HOSTENDIAN, &bitmap->flags); 699 bitmap->events_cleared = le64_to_cpu(sb->events_cleared); 700 strlcpy(bitmap->mddev->bitmap_info.cluster_name, sb->cluster_name, 64); 701 err = 0; 702 703 out: 704 kunmap_atomic(sb); 705 /* Assigning chunksize is required for "re_read" */ 706 bitmap->mddev->bitmap_info.chunksize = chunksize; 707 if (err == 0 && nodes && (bitmap->cluster_slot < 0)) { 708 err = md_setup_cluster(bitmap->mddev, nodes); 709 if (err) { 710 pr_warn("%s: Could not setup cluster service (%d)\n", 711 bmname(bitmap), err); 712 goto out_no_sb; 713 } 714 bitmap->cluster_slot = md_cluster_ops->slot_number(bitmap->mddev); 715 goto re_read; 716 } 717 718 719 out_no_sb: 720 if (test_bit(BITMAP_STALE, &bitmap->flags)) 721 bitmap->events_cleared = bitmap->mddev->events; 722 bitmap->mddev->bitmap_info.chunksize = chunksize; 723 bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep; 724 bitmap->mddev->bitmap_info.max_write_behind = write_behind; 725 bitmap->mddev->bitmap_info.nodes = nodes; 726 if (bitmap->mddev->bitmap_info.space == 0 || 727 bitmap->mddev->bitmap_info.space > sectors_reserved) 728 bitmap->mddev->bitmap_info.space = sectors_reserved; 729 if (err) { 730 bitmap_print_sb(bitmap); 731 if (bitmap->cluster_slot < 0) 732 md_cluster_stop(bitmap->mddev); 733 } 734 return err; 735 } 736 737 /* 738 * general bitmap file operations 739 */ 740 741 /* 742 * on-disk bitmap: 743 * 744 * Use one bit per "chunk" (block set). We do the disk I/O on the bitmap 745 * file a page at a time. There's a superblock at the start of the file. 746 */ 747 /* calculate the index of the page that contains this bit */ 748 static inline unsigned long file_page_index(struct bitmap_storage *store, 749 unsigned long chunk) 750 { 751 if (store->sb_page) 752 chunk += sizeof(bitmap_super_t) << 3; 753 return chunk >> PAGE_BIT_SHIFT; 754 } 755 756 /* calculate the (bit) offset of this bit within a page */ 757 static inline unsigned long file_page_offset(struct bitmap_storage *store, 758 unsigned long chunk) 759 { 760 if (store->sb_page) 761 chunk += sizeof(bitmap_super_t) << 3; 762 return chunk & (PAGE_BITS - 1); 763 } 764 765 /* 766 * return a pointer to the page in the filemap that contains the given bit 767 * 768 */ 769 static inline struct page *filemap_get_page(struct bitmap_storage *store, 770 unsigned long chunk) 771 { 772 if (file_page_index(store, chunk) >= store->file_pages) 773 return NULL; 774 return store->filemap[file_page_index(store, chunk)]; 775 } 776 777 static int bitmap_storage_alloc(struct bitmap_storage *store, 778 unsigned long chunks, int with_super, 779 int slot_number) 780 { 781 int pnum, offset = 0; 782 unsigned long num_pages; 783 unsigned long bytes; 784 785 bytes = DIV_ROUND_UP(chunks, 8); 786 if (with_super) 787 bytes += sizeof(bitmap_super_t); 788 789 num_pages = DIV_ROUND_UP(bytes, PAGE_SIZE); 790 offset = slot_number * num_pages; 791 792 store->filemap = kmalloc(sizeof(struct page *) 793 * num_pages, GFP_KERNEL); 794 if (!store->filemap) 795 return -ENOMEM; 796 797 if (with_super && !store->sb_page) { 798 store->sb_page = alloc_page(GFP_KERNEL|__GFP_ZERO); 799 if (store->sb_page == NULL) 800 return -ENOMEM; 801 } 802 803 pnum = 0; 804 if (store->sb_page) { 805 store->filemap[0] = store->sb_page; 806 pnum = 1; 807 store->sb_page->index = offset; 808 } 809 810 for ( ; pnum < num_pages; pnum++) { 811 store->filemap[pnum] = alloc_page(GFP_KERNEL|__GFP_ZERO); 812 if (!store->filemap[pnum]) { 813 store->file_pages = pnum; 814 return -ENOMEM; 815 } 816 store->filemap[pnum]->index = pnum + offset; 817 } 818 store->file_pages = pnum; 819 820 /* We need 4 bits per page, rounded up to a multiple 821 * of sizeof(unsigned long) */ 822 store->filemap_attr = kzalloc( 823 roundup(DIV_ROUND_UP(num_pages*4, 8), sizeof(unsigned long)), 824 GFP_KERNEL); 825 if (!store->filemap_attr) 826 return -ENOMEM; 827 828 store->bytes = bytes; 829 830 return 0; 831 } 832 833 static void bitmap_file_unmap(struct bitmap_storage *store) 834 { 835 struct page **map, *sb_page; 836 int pages; 837 struct file *file; 838 839 file = store->file; 840 map = store->filemap; 841 pages = store->file_pages; 842 sb_page = store->sb_page; 843 844 while (pages--) 845 if (map[pages] != sb_page) /* 0 is sb_page, release it below */ 846 free_buffers(map[pages]); 847 kfree(map); 848 kfree(store->filemap_attr); 849 850 if (sb_page) 851 free_buffers(sb_page); 852 853 if (file) { 854 struct inode *inode = file_inode(file); 855 invalidate_mapping_pages(inode->i_mapping, 0, -1); 856 fput(file); 857 } 858 } 859 860 /* 861 * bitmap_file_kick - if an error occurs while manipulating the bitmap file 862 * then it is no longer reliable, so we stop using it and we mark the file 863 * as failed in the superblock 864 */ 865 static void bitmap_file_kick(struct bitmap *bitmap) 866 { 867 char *path, *ptr = NULL; 868 869 if (!test_and_set_bit(BITMAP_STALE, &bitmap->flags)) { 870 bitmap_update_sb(bitmap); 871 872 if (bitmap->storage.file) { 873 path = kmalloc(PAGE_SIZE, GFP_KERNEL); 874 if (path) 875 ptr = file_path(bitmap->storage.file, 876 path, PAGE_SIZE); 877 878 pr_warn("%s: kicking failed bitmap file %s from array!\n", 879 bmname(bitmap), IS_ERR(ptr) ? "" : ptr); 880 881 kfree(path); 882 } else 883 pr_warn("%s: disabling internal bitmap due to errors\n", 884 bmname(bitmap)); 885 } 886 } 887 888 enum bitmap_page_attr { 889 BITMAP_PAGE_DIRTY = 0, /* there are set bits that need to be synced */ 890 BITMAP_PAGE_PENDING = 1, /* there are bits that are being cleaned. 891 * i.e. counter is 1 or 2. */ 892 BITMAP_PAGE_NEEDWRITE = 2, /* there are cleared bits that need to be synced */ 893 }; 894 895 static inline void set_page_attr(struct bitmap *bitmap, int pnum, 896 enum bitmap_page_attr attr) 897 { 898 set_bit((pnum<<2) + attr, bitmap->storage.filemap_attr); 899 } 900 901 static inline void clear_page_attr(struct bitmap *bitmap, int pnum, 902 enum bitmap_page_attr attr) 903 { 904 clear_bit((pnum<<2) + attr, bitmap->storage.filemap_attr); 905 } 906 907 static inline int test_page_attr(struct bitmap *bitmap, int pnum, 908 enum bitmap_page_attr attr) 909 { 910 return test_bit((pnum<<2) + attr, bitmap->storage.filemap_attr); 911 } 912 913 static inline int test_and_clear_page_attr(struct bitmap *bitmap, int pnum, 914 enum bitmap_page_attr attr) 915 { 916 return test_and_clear_bit((pnum<<2) + attr, 917 bitmap->storage.filemap_attr); 918 } 919 /* 920 * bitmap_file_set_bit -- called before performing a write to the md device 921 * to set (and eventually sync) a particular bit in the bitmap file 922 * 923 * we set the bit immediately, then we record the page number so that 924 * when an unplug occurs, we can flush the dirty pages out to disk 925 */ 926 static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block) 927 { 928 unsigned long bit; 929 struct page *page; 930 void *kaddr; 931 unsigned long chunk = block >> bitmap->counts.chunkshift; 932 struct bitmap_storage *store = &bitmap->storage; 933 unsigned long node_offset = 0; 934 935 if (mddev_is_clustered(bitmap->mddev)) 936 node_offset = bitmap->cluster_slot * store->file_pages; 937 938 page = filemap_get_page(&bitmap->storage, chunk); 939 if (!page) 940 return; 941 bit = file_page_offset(&bitmap->storage, chunk); 942 943 /* set the bit */ 944 kaddr = kmap_atomic(page); 945 if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags)) 946 set_bit(bit, kaddr); 947 else 948 set_bit_le(bit, kaddr); 949 kunmap_atomic(kaddr); 950 pr_debug("set file bit %lu page %lu\n", bit, page->index); 951 /* record page number so it gets flushed to disk when unplug occurs */ 952 set_page_attr(bitmap, page->index - node_offset, BITMAP_PAGE_DIRTY); 953 } 954 955 static void bitmap_file_clear_bit(struct bitmap *bitmap, sector_t block) 956 { 957 unsigned long bit; 958 struct page *page; 959 void *paddr; 960 unsigned long chunk = block >> bitmap->counts.chunkshift; 961 struct bitmap_storage *store = &bitmap->storage; 962 unsigned long node_offset = 0; 963 964 if (mddev_is_clustered(bitmap->mddev)) 965 node_offset = bitmap->cluster_slot * store->file_pages; 966 967 page = filemap_get_page(&bitmap->storage, chunk); 968 if (!page) 969 return; 970 bit = file_page_offset(&bitmap->storage, chunk); 971 paddr = kmap_atomic(page); 972 if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags)) 973 clear_bit(bit, paddr); 974 else 975 clear_bit_le(bit, paddr); 976 kunmap_atomic(paddr); 977 if (!test_page_attr(bitmap, page->index - node_offset, BITMAP_PAGE_NEEDWRITE)) { 978 set_page_attr(bitmap, page->index - node_offset, BITMAP_PAGE_PENDING); 979 bitmap->allclean = 0; 980 } 981 } 982 983 static int bitmap_file_test_bit(struct bitmap *bitmap, sector_t block) 984 { 985 unsigned long bit; 986 struct page *page; 987 void *paddr; 988 unsigned long chunk = block >> bitmap->counts.chunkshift; 989 int set = 0; 990 991 page = filemap_get_page(&bitmap->storage, chunk); 992 if (!page) 993 return -EINVAL; 994 bit = file_page_offset(&bitmap->storage, chunk); 995 paddr = kmap_atomic(page); 996 if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags)) 997 set = test_bit(bit, paddr); 998 else 999 set = test_bit_le(bit, paddr); 1000 kunmap_atomic(paddr); 1001 return set; 1002 } 1003 1004 1005 /* this gets called when the md device is ready to unplug its underlying 1006 * (slave) device queues -- before we let any writes go down, we need to 1007 * sync the dirty pages of the bitmap file to disk */ 1008 void bitmap_unplug(struct bitmap *bitmap) 1009 { 1010 unsigned long i; 1011 int dirty, need_write; 1012 int writing = 0; 1013 1014 if (!bitmap || !bitmap->storage.filemap || 1015 test_bit(BITMAP_STALE, &bitmap->flags)) 1016 return; 1017 1018 /* look at each page to see if there are any set bits that need to be 1019 * flushed out to disk */ 1020 for (i = 0; i < bitmap->storage.file_pages; i++) { 1021 if (!bitmap->storage.filemap) 1022 return; 1023 dirty = test_and_clear_page_attr(bitmap, i, BITMAP_PAGE_DIRTY); 1024 need_write = test_and_clear_page_attr(bitmap, i, 1025 BITMAP_PAGE_NEEDWRITE); 1026 if (dirty || need_write) { 1027 if (!writing) { 1028 bitmap_wait_writes(bitmap); 1029 if (bitmap->mddev->queue) 1030 blk_add_trace_msg(bitmap->mddev->queue, 1031 "md bitmap_unplug"); 1032 } 1033 clear_page_attr(bitmap, i, BITMAP_PAGE_PENDING); 1034 write_page(bitmap, bitmap->storage.filemap[i], 0); 1035 writing = 1; 1036 } 1037 } 1038 if (writing) 1039 bitmap_wait_writes(bitmap); 1040 1041 if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags)) 1042 bitmap_file_kick(bitmap); 1043 } 1044 EXPORT_SYMBOL(bitmap_unplug); 1045 1046 static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed); 1047 /* * bitmap_init_from_disk -- called at bitmap_create time to initialize 1048 * the in-memory bitmap from the on-disk bitmap -- also, sets up the 1049 * memory mapping of the bitmap file 1050 * Special cases: 1051 * if there's no bitmap file, or if the bitmap file had been 1052 * previously kicked from the array, we mark all the bits as 1053 * 1's in order to cause a full resync. 1054 * 1055 * We ignore all bits for sectors that end earlier than 'start'. 1056 * This is used when reading an out-of-date bitmap... 1057 */ 1058 static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start) 1059 { 1060 unsigned long i, chunks, index, oldindex, bit, node_offset = 0; 1061 struct page *page = NULL; 1062 unsigned long bit_cnt = 0; 1063 struct file *file; 1064 unsigned long offset; 1065 int outofdate; 1066 int ret = -ENOSPC; 1067 void *paddr; 1068 struct bitmap_storage *store = &bitmap->storage; 1069 1070 chunks = bitmap->counts.chunks; 1071 file = store->file; 1072 1073 if (!file && !bitmap->mddev->bitmap_info.offset) { 1074 /* No permanent bitmap - fill with '1s'. */ 1075 store->filemap = NULL; 1076 store->file_pages = 0; 1077 for (i = 0; i < chunks ; i++) { 1078 /* if the disk bit is set, set the memory bit */ 1079 int needed = ((sector_t)(i+1) << (bitmap->counts.chunkshift) 1080 >= start); 1081 bitmap_set_memory_bits(bitmap, 1082 (sector_t)i << bitmap->counts.chunkshift, 1083 needed); 1084 } 1085 return 0; 1086 } 1087 1088 outofdate = test_bit(BITMAP_STALE, &bitmap->flags); 1089 if (outofdate) 1090 pr_warn("%s: bitmap file is out of date, doing full recovery\n", bmname(bitmap)); 1091 1092 if (file && i_size_read(file->f_mapping->host) < store->bytes) { 1093 pr_warn("%s: bitmap file too short %lu < %lu\n", 1094 bmname(bitmap), 1095 (unsigned long) i_size_read(file->f_mapping->host), 1096 store->bytes); 1097 goto err; 1098 } 1099 1100 oldindex = ~0L; 1101 offset = 0; 1102 if (!bitmap->mddev->bitmap_info.external) 1103 offset = sizeof(bitmap_super_t); 1104 1105 if (mddev_is_clustered(bitmap->mddev)) 1106 node_offset = bitmap->cluster_slot * (DIV_ROUND_UP(store->bytes, PAGE_SIZE)); 1107 1108 for (i = 0; i < chunks; i++) { 1109 int b; 1110 index = file_page_index(&bitmap->storage, i); 1111 bit = file_page_offset(&bitmap->storage, i); 1112 if (index != oldindex) { /* this is a new page, read it in */ 1113 int count; 1114 /* unmap the old page, we're done with it */ 1115 if (index == store->file_pages-1) 1116 count = store->bytes - index * PAGE_SIZE; 1117 else 1118 count = PAGE_SIZE; 1119 page = store->filemap[index]; 1120 if (file) 1121 ret = read_page(file, index, bitmap, 1122 count, page); 1123 else 1124 ret = read_sb_page( 1125 bitmap->mddev, 1126 bitmap->mddev->bitmap_info.offset, 1127 page, 1128 index + node_offset, count); 1129 1130 if (ret) 1131 goto err; 1132 1133 oldindex = index; 1134 1135 if (outofdate) { 1136 /* 1137 * if bitmap is out of date, dirty the 1138 * whole page and write it out 1139 */ 1140 paddr = kmap_atomic(page); 1141 memset(paddr + offset, 0xff, 1142 PAGE_SIZE - offset); 1143 kunmap_atomic(paddr); 1144 write_page(bitmap, page, 1); 1145 1146 ret = -EIO; 1147 if (test_bit(BITMAP_WRITE_ERROR, 1148 &bitmap->flags)) 1149 goto err; 1150 } 1151 } 1152 paddr = kmap_atomic(page); 1153 if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags)) 1154 b = test_bit(bit, paddr); 1155 else 1156 b = test_bit_le(bit, paddr); 1157 kunmap_atomic(paddr); 1158 if (b) { 1159 /* if the disk bit is set, set the memory bit */ 1160 int needed = ((sector_t)(i+1) << bitmap->counts.chunkshift 1161 >= start); 1162 bitmap_set_memory_bits(bitmap, 1163 (sector_t)i << bitmap->counts.chunkshift, 1164 needed); 1165 bit_cnt++; 1166 } 1167 offset = 0; 1168 } 1169 1170 pr_debug("%s: bitmap initialized from disk: read %lu pages, set %lu of %lu bits\n", 1171 bmname(bitmap), store->file_pages, 1172 bit_cnt, chunks); 1173 1174 return 0; 1175 1176 err: 1177 pr_warn("%s: bitmap initialisation failed: %d\n", 1178 bmname(bitmap), ret); 1179 return ret; 1180 } 1181 1182 void bitmap_write_all(struct bitmap *bitmap) 1183 { 1184 /* We don't actually write all bitmap blocks here, 1185 * just flag them as needing to be written 1186 */ 1187 int i; 1188 1189 if (!bitmap || !bitmap->storage.filemap) 1190 return; 1191 if (bitmap->storage.file) 1192 /* Only one copy, so nothing needed */ 1193 return; 1194 1195 for (i = 0; i < bitmap->storage.file_pages; i++) 1196 set_page_attr(bitmap, i, 1197 BITMAP_PAGE_NEEDWRITE); 1198 bitmap->allclean = 0; 1199 } 1200 1201 static void bitmap_count_page(struct bitmap_counts *bitmap, 1202 sector_t offset, int inc) 1203 { 1204 sector_t chunk = offset >> bitmap->chunkshift; 1205 unsigned long page = chunk >> PAGE_COUNTER_SHIFT; 1206 bitmap->bp[page].count += inc; 1207 bitmap_checkfree(bitmap, page); 1208 } 1209 1210 static void bitmap_set_pending(struct bitmap_counts *bitmap, sector_t offset) 1211 { 1212 sector_t chunk = offset >> bitmap->chunkshift; 1213 unsigned long page = chunk >> PAGE_COUNTER_SHIFT; 1214 struct bitmap_page *bp = &bitmap->bp[page]; 1215 1216 if (!bp->pending) 1217 bp->pending = 1; 1218 } 1219 1220 static bitmap_counter_t *bitmap_get_counter(struct bitmap_counts *bitmap, 1221 sector_t offset, sector_t *blocks, 1222 int create); 1223 1224 /* 1225 * bitmap daemon -- periodically wakes up to clean bits and flush pages 1226 * out to disk 1227 */ 1228 1229 void bitmap_daemon_work(struct mddev *mddev) 1230 { 1231 struct bitmap *bitmap; 1232 unsigned long j; 1233 unsigned long nextpage; 1234 sector_t blocks; 1235 struct bitmap_counts *counts; 1236 1237 /* Use a mutex to guard daemon_work against 1238 * bitmap_destroy. 1239 */ 1240 mutex_lock(&mddev->bitmap_info.mutex); 1241 bitmap = mddev->bitmap; 1242 if (bitmap == NULL) { 1243 mutex_unlock(&mddev->bitmap_info.mutex); 1244 return; 1245 } 1246 if (time_before(jiffies, bitmap->daemon_lastrun 1247 + mddev->bitmap_info.daemon_sleep)) 1248 goto done; 1249 1250 bitmap->daemon_lastrun = jiffies; 1251 if (bitmap->allclean) { 1252 mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT; 1253 goto done; 1254 } 1255 bitmap->allclean = 1; 1256 1257 if (bitmap->mddev->queue) 1258 blk_add_trace_msg(bitmap->mddev->queue, 1259 "md bitmap_daemon_work"); 1260 1261 /* Any file-page which is PENDING now needs to be written. 1262 * So set NEEDWRITE now, then after we make any last-minute changes 1263 * we will write it. 1264 */ 1265 for (j = 0; j < bitmap->storage.file_pages; j++) 1266 if (test_and_clear_page_attr(bitmap, j, 1267 BITMAP_PAGE_PENDING)) 1268 set_page_attr(bitmap, j, 1269 BITMAP_PAGE_NEEDWRITE); 1270 1271 if (bitmap->need_sync && 1272 mddev->bitmap_info.external == 0) { 1273 /* Arrange for superblock update as well as 1274 * other changes */ 1275 bitmap_super_t *sb; 1276 bitmap->need_sync = 0; 1277 if (bitmap->storage.filemap) { 1278 sb = kmap_atomic(bitmap->storage.sb_page); 1279 sb->events_cleared = 1280 cpu_to_le64(bitmap->events_cleared); 1281 kunmap_atomic(sb); 1282 set_page_attr(bitmap, 0, 1283 BITMAP_PAGE_NEEDWRITE); 1284 } 1285 } 1286 /* Now look at the bitmap counters and if any are '2' or '1', 1287 * decrement and handle accordingly. 1288 */ 1289 counts = &bitmap->counts; 1290 spin_lock_irq(&counts->lock); 1291 nextpage = 0; 1292 for (j = 0; j < counts->chunks; j++) { 1293 bitmap_counter_t *bmc; 1294 sector_t block = (sector_t)j << counts->chunkshift; 1295 1296 if (j == nextpage) { 1297 nextpage += PAGE_COUNTER_RATIO; 1298 if (!counts->bp[j >> PAGE_COUNTER_SHIFT].pending) { 1299 j |= PAGE_COUNTER_MASK; 1300 continue; 1301 } 1302 counts->bp[j >> PAGE_COUNTER_SHIFT].pending = 0; 1303 } 1304 bmc = bitmap_get_counter(counts, 1305 block, 1306 &blocks, 0); 1307 1308 if (!bmc) { 1309 j |= PAGE_COUNTER_MASK; 1310 continue; 1311 } 1312 if (*bmc == 1 && !bitmap->need_sync) { 1313 /* We can clear the bit */ 1314 *bmc = 0; 1315 bitmap_count_page(counts, block, -1); 1316 bitmap_file_clear_bit(bitmap, block); 1317 } else if (*bmc && *bmc <= 2) { 1318 *bmc = 1; 1319 bitmap_set_pending(counts, block); 1320 bitmap->allclean = 0; 1321 } 1322 } 1323 spin_unlock_irq(&counts->lock); 1324 1325 bitmap_wait_writes(bitmap); 1326 /* Now start writeout on any page in NEEDWRITE that isn't DIRTY. 1327 * DIRTY pages need to be written by bitmap_unplug so it can wait 1328 * for them. 1329 * If we find any DIRTY page we stop there and let bitmap_unplug 1330 * handle all the rest. This is important in the case where 1331 * the first blocking holds the superblock and it has been updated. 1332 * We mustn't write any other blocks before the superblock. 1333 */ 1334 for (j = 0; 1335 j < bitmap->storage.file_pages 1336 && !test_bit(BITMAP_STALE, &bitmap->flags); 1337 j++) { 1338 if (test_page_attr(bitmap, j, 1339 BITMAP_PAGE_DIRTY)) 1340 /* bitmap_unplug will handle the rest */ 1341 break; 1342 if (test_and_clear_page_attr(bitmap, j, 1343 BITMAP_PAGE_NEEDWRITE)) { 1344 write_page(bitmap, bitmap->storage.filemap[j], 0); 1345 } 1346 } 1347 1348 done: 1349 if (bitmap->allclean == 0) 1350 mddev->thread->timeout = 1351 mddev->bitmap_info.daemon_sleep; 1352 mutex_unlock(&mddev->bitmap_info.mutex); 1353 } 1354 1355 static bitmap_counter_t *bitmap_get_counter(struct bitmap_counts *bitmap, 1356 sector_t offset, sector_t *blocks, 1357 int create) 1358 __releases(bitmap->lock) 1359 __acquires(bitmap->lock) 1360 { 1361 /* If 'create', we might release the lock and reclaim it. 1362 * The lock must have been taken with interrupts enabled. 1363 * If !create, we don't release the lock. 1364 */ 1365 sector_t chunk = offset >> bitmap->chunkshift; 1366 unsigned long page = chunk >> PAGE_COUNTER_SHIFT; 1367 unsigned long pageoff = (chunk & PAGE_COUNTER_MASK) << COUNTER_BYTE_SHIFT; 1368 sector_t csize; 1369 int err; 1370 1371 err = bitmap_checkpage(bitmap, page, create, 0); 1372 1373 if (bitmap->bp[page].hijacked || 1374 bitmap->bp[page].map == NULL) 1375 csize = ((sector_t)1) << (bitmap->chunkshift + 1376 PAGE_COUNTER_SHIFT - 1); 1377 else 1378 csize = ((sector_t)1) << bitmap->chunkshift; 1379 *blocks = csize - (offset & (csize - 1)); 1380 1381 if (err < 0) 1382 return NULL; 1383 1384 /* now locked ... */ 1385 1386 if (bitmap->bp[page].hijacked) { /* hijacked pointer */ 1387 /* should we use the first or second counter field 1388 * of the hijacked pointer? */ 1389 int hi = (pageoff > PAGE_COUNTER_MASK); 1390 return &((bitmap_counter_t *) 1391 &bitmap->bp[page].map)[hi]; 1392 } else /* page is allocated */ 1393 return (bitmap_counter_t *) 1394 &(bitmap->bp[page].map[pageoff]); 1395 } 1396 1397 int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors, int behind) 1398 { 1399 if (!bitmap) 1400 return 0; 1401 1402 if (behind) { 1403 int bw; 1404 atomic_inc(&bitmap->behind_writes); 1405 bw = atomic_read(&bitmap->behind_writes); 1406 if (bw > bitmap->behind_writes_used) 1407 bitmap->behind_writes_used = bw; 1408 1409 pr_debug("inc write-behind count %d/%lu\n", 1410 bw, bitmap->mddev->bitmap_info.max_write_behind); 1411 } 1412 1413 while (sectors) { 1414 sector_t blocks; 1415 bitmap_counter_t *bmc; 1416 1417 spin_lock_irq(&bitmap->counts.lock); 1418 bmc = bitmap_get_counter(&bitmap->counts, offset, &blocks, 1); 1419 if (!bmc) { 1420 spin_unlock_irq(&bitmap->counts.lock); 1421 return 0; 1422 } 1423 1424 if (unlikely(COUNTER(*bmc) == COUNTER_MAX)) { 1425 DEFINE_WAIT(__wait); 1426 /* note that it is safe to do the prepare_to_wait 1427 * after the test as long as we do it before dropping 1428 * the spinlock. 1429 */ 1430 prepare_to_wait(&bitmap->overflow_wait, &__wait, 1431 TASK_UNINTERRUPTIBLE); 1432 spin_unlock_irq(&bitmap->counts.lock); 1433 schedule(); 1434 finish_wait(&bitmap->overflow_wait, &__wait); 1435 continue; 1436 } 1437 1438 switch (*bmc) { 1439 case 0: 1440 bitmap_file_set_bit(bitmap, offset); 1441 bitmap_count_page(&bitmap->counts, offset, 1); 1442 /* fall through */ 1443 case 1: 1444 *bmc = 2; 1445 } 1446 1447 (*bmc)++; 1448 1449 spin_unlock_irq(&bitmap->counts.lock); 1450 1451 offset += blocks; 1452 if (sectors > blocks) 1453 sectors -= blocks; 1454 else 1455 sectors = 0; 1456 } 1457 return 0; 1458 } 1459 EXPORT_SYMBOL(bitmap_startwrite); 1460 1461 void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors, 1462 int success, int behind) 1463 { 1464 if (!bitmap) 1465 return; 1466 if (behind) { 1467 if (atomic_dec_and_test(&bitmap->behind_writes)) 1468 wake_up(&bitmap->behind_wait); 1469 pr_debug("dec write-behind count %d/%lu\n", 1470 atomic_read(&bitmap->behind_writes), 1471 bitmap->mddev->bitmap_info.max_write_behind); 1472 } 1473 1474 while (sectors) { 1475 sector_t blocks; 1476 unsigned long flags; 1477 bitmap_counter_t *bmc; 1478 1479 spin_lock_irqsave(&bitmap->counts.lock, flags); 1480 bmc = bitmap_get_counter(&bitmap->counts, offset, &blocks, 0); 1481 if (!bmc) { 1482 spin_unlock_irqrestore(&bitmap->counts.lock, flags); 1483 return; 1484 } 1485 1486 if (success && !bitmap->mddev->degraded && 1487 bitmap->events_cleared < bitmap->mddev->events) { 1488 bitmap->events_cleared = bitmap->mddev->events; 1489 bitmap->need_sync = 1; 1490 sysfs_notify_dirent_safe(bitmap->sysfs_can_clear); 1491 } 1492 1493 if (!success && !NEEDED(*bmc)) 1494 *bmc |= NEEDED_MASK; 1495 1496 if (COUNTER(*bmc) == COUNTER_MAX) 1497 wake_up(&bitmap->overflow_wait); 1498 1499 (*bmc)--; 1500 if (*bmc <= 2) { 1501 bitmap_set_pending(&bitmap->counts, offset); 1502 bitmap->allclean = 0; 1503 } 1504 spin_unlock_irqrestore(&bitmap->counts.lock, flags); 1505 offset += blocks; 1506 if (sectors > blocks) 1507 sectors -= blocks; 1508 else 1509 sectors = 0; 1510 } 1511 } 1512 EXPORT_SYMBOL(bitmap_endwrite); 1513 1514 static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, 1515 int degraded) 1516 { 1517 bitmap_counter_t *bmc; 1518 int rv; 1519 if (bitmap == NULL) {/* FIXME or bitmap set as 'failed' */ 1520 *blocks = 1024; 1521 return 1; /* always resync if no bitmap */ 1522 } 1523 spin_lock_irq(&bitmap->counts.lock); 1524 bmc = bitmap_get_counter(&bitmap->counts, offset, blocks, 0); 1525 rv = 0; 1526 if (bmc) { 1527 /* locked */ 1528 if (RESYNC(*bmc)) 1529 rv = 1; 1530 else if (NEEDED(*bmc)) { 1531 rv = 1; 1532 if (!degraded) { /* don't set/clear bits if degraded */ 1533 *bmc |= RESYNC_MASK; 1534 *bmc &= ~NEEDED_MASK; 1535 } 1536 } 1537 } 1538 spin_unlock_irq(&bitmap->counts.lock); 1539 return rv; 1540 } 1541 1542 int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, 1543 int degraded) 1544 { 1545 /* bitmap_start_sync must always report on multiples of whole 1546 * pages, otherwise resync (which is very PAGE_SIZE based) will 1547 * get confused. 1548 * So call __bitmap_start_sync repeatedly (if needed) until 1549 * At least PAGE_SIZE>>9 blocks are covered. 1550 * Return the 'or' of the result. 1551 */ 1552 int rv = 0; 1553 sector_t blocks1; 1554 1555 *blocks = 0; 1556 while (*blocks < (PAGE_SIZE>>9)) { 1557 rv |= __bitmap_start_sync(bitmap, offset, 1558 &blocks1, degraded); 1559 offset += blocks1; 1560 *blocks += blocks1; 1561 } 1562 return rv; 1563 } 1564 EXPORT_SYMBOL(bitmap_start_sync); 1565 1566 void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int aborted) 1567 { 1568 bitmap_counter_t *bmc; 1569 unsigned long flags; 1570 1571 if (bitmap == NULL) { 1572 *blocks = 1024; 1573 return; 1574 } 1575 spin_lock_irqsave(&bitmap->counts.lock, flags); 1576 bmc = bitmap_get_counter(&bitmap->counts, offset, blocks, 0); 1577 if (bmc == NULL) 1578 goto unlock; 1579 /* locked */ 1580 if (RESYNC(*bmc)) { 1581 *bmc &= ~RESYNC_MASK; 1582 1583 if (!NEEDED(*bmc) && aborted) 1584 *bmc |= NEEDED_MASK; 1585 else { 1586 if (*bmc <= 2) { 1587 bitmap_set_pending(&bitmap->counts, offset); 1588 bitmap->allclean = 0; 1589 } 1590 } 1591 } 1592 unlock: 1593 spin_unlock_irqrestore(&bitmap->counts.lock, flags); 1594 } 1595 EXPORT_SYMBOL(bitmap_end_sync); 1596 1597 void bitmap_close_sync(struct bitmap *bitmap) 1598 { 1599 /* Sync has finished, and any bitmap chunks that weren't synced 1600 * properly have been aborted. It remains to us to clear the 1601 * RESYNC bit wherever it is still on 1602 */ 1603 sector_t sector = 0; 1604 sector_t blocks; 1605 if (!bitmap) 1606 return; 1607 while (sector < bitmap->mddev->resync_max_sectors) { 1608 bitmap_end_sync(bitmap, sector, &blocks, 0); 1609 sector += blocks; 1610 } 1611 } 1612 EXPORT_SYMBOL(bitmap_close_sync); 1613 1614 void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force) 1615 { 1616 sector_t s = 0; 1617 sector_t blocks; 1618 1619 if (!bitmap) 1620 return; 1621 if (sector == 0) { 1622 bitmap->last_end_sync = jiffies; 1623 return; 1624 } 1625 if (!force && time_before(jiffies, (bitmap->last_end_sync 1626 + bitmap->mddev->bitmap_info.daemon_sleep))) 1627 return; 1628 wait_event(bitmap->mddev->recovery_wait, 1629 atomic_read(&bitmap->mddev->recovery_active) == 0); 1630 1631 bitmap->mddev->curr_resync_completed = sector; 1632 set_bit(MD_SB_CHANGE_CLEAN, &bitmap->mddev->sb_flags); 1633 sector &= ~((1ULL << bitmap->counts.chunkshift) - 1); 1634 s = 0; 1635 while (s < sector && s < bitmap->mddev->resync_max_sectors) { 1636 bitmap_end_sync(bitmap, s, &blocks, 0); 1637 s += blocks; 1638 } 1639 bitmap->last_end_sync = jiffies; 1640 sysfs_notify(&bitmap->mddev->kobj, NULL, "sync_completed"); 1641 } 1642 EXPORT_SYMBOL(bitmap_cond_end_sync); 1643 1644 void bitmap_sync_with_cluster(struct mddev *mddev, 1645 sector_t old_lo, sector_t old_hi, 1646 sector_t new_lo, sector_t new_hi) 1647 { 1648 struct bitmap *bitmap = mddev->bitmap; 1649 sector_t sector, blocks = 0; 1650 1651 for (sector = old_lo; sector < new_lo; ) { 1652 bitmap_end_sync(bitmap, sector, &blocks, 0); 1653 sector += blocks; 1654 } 1655 WARN((blocks > new_lo) && old_lo, "alignment is not correct for lo\n"); 1656 1657 for (sector = old_hi; sector < new_hi; ) { 1658 bitmap_start_sync(bitmap, sector, &blocks, 0); 1659 sector += blocks; 1660 } 1661 WARN((blocks > new_hi) && old_hi, "alignment is not correct for hi\n"); 1662 } 1663 EXPORT_SYMBOL(bitmap_sync_with_cluster); 1664 1665 static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed) 1666 { 1667 /* For each chunk covered by any of these sectors, set the 1668 * counter to 2 and possibly set resync_needed. They should all 1669 * be 0 at this point 1670 */ 1671 1672 sector_t secs; 1673 bitmap_counter_t *bmc; 1674 spin_lock_irq(&bitmap->counts.lock); 1675 bmc = bitmap_get_counter(&bitmap->counts, offset, &secs, 1); 1676 if (!bmc) { 1677 spin_unlock_irq(&bitmap->counts.lock); 1678 return; 1679 } 1680 if (!*bmc) { 1681 *bmc = 2; 1682 bitmap_count_page(&bitmap->counts, offset, 1); 1683 bitmap_set_pending(&bitmap->counts, offset); 1684 bitmap->allclean = 0; 1685 } 1686 if (needed) 1687 *bmc |= NEEDED_MASK; 1688 spin_unlock_irq(&bitmap->counts.lock); 1689 } 1690 1691 /* dirty the memory and file bits for bitmap chunks "s" to "e" */ 1692 void bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e) 1693 { 1694 unsigned long chunk; 1695 1696 for (chunk = s; chunk <= e; chunk++) { 1697 sector_t sec = (sector_t)chunk << bitmap->counts.chunkshift; 1698 bitmap_set_memory_bits(bitmap, sec, 1); 1699 bitmap_file_set_bit(bitmap, sec); 1700 if (sec < bitmap->mddev->recovery_cp) 1701 /* We are asserting that the array is dirty, 1702 * so move the recovery_cp address back so 1703 * that it is obvious that it is dirty 1704 */ 1705 bitmap->mddev->recovery_cp = sec; 1706 } 1707 } 1708 1709 /* 1710 * flush out any pending updates 1711 */ 1712 void bitmap_flush(struct mddev *mddev) 1713 { 1714 struct bitmap *bitmap = mddev->bitmap; 1715 long sleep; 1716 1717 if (!bitmap) /* there was no bitmap */ 1718 return; 1719 1720 /* run the daemon_work three time to ensure everything is flushed 1721 * that can be 1722 */ 1723 sleep = mddev->bitmap_info.daemon_sleep * 2; 1724 bitmap->daemon_lastrun -= sleep; 1725 bitmap_daemon_work(mddev); 1726 bitmap->daemon_lastrun -= sleep; 1727 bitmap_daemon_work(mddev); 1728 bitmap->daemon_lastrun -= sleep; 1729 bitmap_daemon_work(mddev); 1730 bitmap_update_sb(bitmap); 1731 } 1732 1733 /* 1734 * free memory that was allocated 1735 */ 1736 void bitmap_free(struct bitmap *bitmap) 1737 { 1738 unsigned long k, pages; 1739 struct bitmap_page *bp; 1740 1741 if (!bitmap) /* there was no bitmap */ 1742 return; 1743 1744 if (bitmap->sysfs_can_clear) 1745 sysfs_put(bitmap->sysfs_can_clear); 1746 1747 if (mddev_is_clustered(bitmap->mddev) && bitmap->mddev->cluster_info && 1748 bitmap->cluster_slot == md_cluster_ops->slot_number(bitmap->mddev)) 1749 md_cluster_stop(bitmap->mddev); 1750 1751 /* Shouldn't be needed - but just in case.... */ 1752 wait_event(bitmap->write_wait, 1753 atomic_read(&bitmap->pending_writes) == 0); 1754 1755 /* release the bitmap file */ 1756 bitmap_file_unmap(&bitmap->storage); 1757 1758 bp = bitmap->counts.bp; 1759 pages = bitmap->counts.pages; 1760 1761 /* free all allocated memory */ 1762 1763 if (bp) /* deallocate the page memory */ 1764 for (k = 0; k < pages; k++) 1765 if (bp[k].map && !bp[k].hijacked) 1766 kfree(bp[k].map); 1767 kfree(bp); 1768 kfree(bitmap); 1769 } 1770 EXPORT_SYMBOL(bitmap_free); 1771 1772 void bitmap_wait_behind_writes(struct mddev *mddev) 1773 { 1774 struct bitmap *bitmap = mddev->bitmap; 1775 1776 /* wait for behind writes to complete */ 1777 if (bitmap && atomic_read(&bitmap->behind_writes) > 0) { 1778 pr_debug("md:%s: behind writes in progress - waiting to stop.\n", 1779 mdname(mddev)); 1780 /* need to kick something here to make sure I/O goes? */ 1781 wait_event(bitmap->behind_wait, 1782 atomic_read(&bitmap->behind_writes) == 0); 1783 } 1784 } 1785 1786 void bitmap_destroy(struct mddev *mddev) 1787 { 1788 struct bitmap *bitmap = mddev->bitmap; 1789 1790 if (!bitmap) /* there was no bitmap */ 1791 return; 1792 1793 bitmap_wait_behind_writes(mddev); 1794 1795 mutex_lock(&mddev->bitmap_info.mutex); 1796 spin_lock(&mddev->lock); 1797 mddev->bitmap = NULL; /* disconnect from the md device */ 1798 spin_unlock(&mddev->lock); 1799 mutex_unlock(&mddev->bitmap_info.mutex); 1800 if (mddev->thread) 1801 mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT; 1802 1803 bitmap_free(bitmap); 1804 } 1805 1806 /* 1807 * initialize the bitmap structure 1808 * if this returns an error, bitmap_destroy must be called to do clean up 1809 * once mddev->bitmap is set 1810 */ 1811 struct bitmap *bitmap_create(struct mddev *mddev, int slot) 1812 { 1813 struct bitmap *bitmap; 1814 sector_t blocks = mddev->resync_max_sectors; 1815 struct file *file = mddev->bitmap_info.file; 1816 int err; 1817 struct kernfs_node *bm = NULL; 1818 1819 BUILD_BUG_ON(sizeof(bitmap_super_t) != 256); 1820 1821 BUG_ON(file && mddev->bitmap_info.offset); 1822 1823 if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) { 1824 pr_notice("md/raid:%s: array with journal cannot have bitmap\n", 1825 mdname(mddev)); 1826 return ERR_PTR(-EBUSY); 1827 } 1828 1829 bitmap = kzalloc(sizeof(*bitmap), GFP_KERNEL); 1830 if (!bitmap) 1831 return ERR_PTR(-ENOMEM); 1832 1833 spin_lock_init(&bitmap->counts.lock); 1834 atomic_set(&bitmap->pending_writes, 0); 1835 init_waitqueue_head(&bitmap->write_wait); 1836 init_waitqueue_head(&bitmap->overflow_wait); 1837 init_waitqueue_head(&bitmap->behind_wait); 1838 1839 bitmap->mddev = mddev; 1840 bitmap->cluster_slot = slot; 1841 1842 if (mddev->kobj.sd) 1843 bm = sysfs_get_dirent(mddev->kobj.sd, "bitmap"); 1844 if (bm) { 1845 bitmap->sysfs_can_clear = sysfs_get_dirent(bm, "can_clear"); 1846 sysfs_put(bm); 1847 } else 1848 bitmap->sysfs_can_clear = NULL; 1849 1850 bitmap->storage.file = file; 1851 if (file) { 1852 get_file(file); 1853 /* As future accesses to this file will use bmap, 1854 * and bypass the page cache, we must sync the file 1855 * first. 1856 */ 1857 vfs_fsync(file, 1); 1858 } 1859 /* read superblock from bitmap file (this sets mddev->bitmap_info.chunksize) */ 1860 if (!mddev->bitmap_info.external) { 1861 /* 1862 * If 'MD_ARRAY_FIRST_USE' is set, then device-mapper is 1863 * instructing us to create a new on-disk bitmap instance. 1864 */ 1865 if (test_and_clear_bit(MD_ARRAY_FIRST_USE, &mddev->flags)) 1866 err = bitmap_new_disk_sb(bitmap); 1867 else 1868 err = bitmap_read_sb(bitmap); 1869 } else { 1870 err = 0; 1871 if (mddev->bitmap_info.chunksize == 0 || 1872 mddev->bitmap_info.daemon_sleep == 0) 1873 /* chunksize and time_base need to be 1874 * set first. */ 1875 err = -EINVAL; 1876 } 1877 if (err) 1878 goto error; 1879 1880 bitmap->daemon_lastrun = jiffies; 1881 err = bitmap_resize(bitmap, blocks, mddev->bitmap_info.chunksize, 1); 1882 if (err) 1883 goto error; 1884 1885 pr_debug("created bitmap (%lu pages) for device %s\n", 1886 bitmap->counts.pages, bmname(bitmap)); 1887 1888 err = test_bit(BITMAP_WRITE_ERROR, &bitmap->flags) ? -EIO : 0; 1889 if (err) 1890 goto error; 1891 1892 return bitmap; 1893 error: 1894 bitmap_free(bitmap); 1895 return ERR_PTR(err); 1896 } 1897 1898 int bitmap_load(struct mddev *mddev) 1899 { 1900 int err = 0; 1901 sector_t start = 0; 1902 sector_t sector = 0; 1903 struct bitmap *bitmap = mddev->bitmap; 1904 1905 if (!bitmap) 1906 goto out; 1907 1908 if (mddev_is_clustered(mddev)) 1909 md_cluster_ops->load_bitmaps(mddev, mddev->bitmap_info.nodes); 1910 1911 /* Clear out old bitmap info first: Either there is none, or we 1912 * are resuming after someone else has possibly changed things, 1913 * so we should forget old cached info. 1914 * All chunks should be clean, but some might need_sync. 1915 */ 1916 while (sector < mddev->resync_max_sectors) { 1917 sector_t blocks; 1918 bitmap_start_sync(bitmap, sector, &blocks, 0); 1919 sector += blocks; 1920 } 1921 bitmap_close_sync(bitmap); 1922 1923 if (mddev->degraded == 0 1924 || bitmap->events_cleared == mddev->events) 1925 /* no need to keep dirty bits to optimise a 1926 * re-add of a missing device */ 1927 start = mddev->recovery_cp; 1928 1929 mutex_lock(&mddev->bitmap_info.mutex); 1930 err = bitmap_init_from_disk(bitmap, start); 1931 mutex_unlock(&mddev->bitmap_info.mutex); 1932 1933 if (err) 1934 goto out; 1935 clear_bit(BITMAP_STALE, &bitmap->flags); 1936 1937 /* Kick recovery in case any bits were set */ 1938 set_bit(MD_RECOVERY_NEEDED, &bitmap->mddev->recovery); 1939 1940 mddev->thread->timeout = mddev->bitmap_info.daemon_sleep; 1941 md_wakeup_thread(mddev->thread); 1942 1943 bitmap_update_sb(bitmap); 1944 1945 if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags)) 1946 err = -EIO; 1947 out: 1948 return err; 1949 } 1950 EXPORT_SYMBOL_GPL(bitmap_load); 1951 1952 struct bitmap *get_bitmap_from_slot(struct mddev *mddev, int slot) 1953 { 1954 int rv = 0; 1955 struct bitmap *bitmap; 1956 1957 bitmap = bitmap_create(mddev, slot); 1958 if (IS_ERR(bitmap)) { 1959 rv = PTR_ERR(bitmap); 1960 return ERR_PTR(rv); 1961 } 1962 1963 rv = bitmap_init_from_disk(bitmap, 0); 1964 if (rv) { 1965 bitmap_free(bitmap); 1966 return ERR_PTR(rv); 1967 } 1968 1969 return bitmap; 1970 } 1971 EXPORT_SYMBOL(get_bitmap_from_slot); 1972 1973 /* Loads the bitmap associated with slot and copies the resync information 1974 * to our bitmap 1975 */ 1976 int bitmap_copy_from_slot(struct mddev *mddev, int slot, 1977 sector_t *low, sector_t *high, bool clear_bits) 1978 { 1979 int rv = 0, i, j; 1980 sector_t block, lo = 0, hi = 0; 1981 struct bitmap_counts *counts; 1982 struct bitmap *bitmap; 1983 1984 bitmap = get_bitmap_from_slot(mddev, slot); 1985 if (IS_ERR(bitmap)) { 1986 pr_err("%s can't get bitmap from slot %d\n", __func__, slot); 1987 return -1; 1988 } 1989 1990 counts = &bitmap->counts; 1991 for (j = 0; j < counts->chunks; j++) { 1992 block = (sector_t)j << counts->chunkshift; 1993 if (bitmap_file_test_bit(bitmap, block)) { 1994 if (!lo) 1995 lo = block; 1996 hi = block; 1997 bitmap_file_clear_bit(bitmap, block); 1998 bitmap_set_memory_bits(mddev->bitmap, block, 1); 1999 bitmap_file_set_bit(mddev->bitmap, block); 2000 } 2001 } 2002 2003 if (clear_bits) { 2004 bitmap_update_sb(bitmap); 2005 /* BITMAP_PAGE_PENDING is set, but bitmap_unplug needs 2006 * BITMAP_PAGE_DIRTY or _NEEDWRITE to write ... */ 2007 for (i = 0; i < bitmap->storage.file_pages; i++) 2008 if (test_page_attr(bitmap, i, BITMAP_PAGE_PENDING)) 2009 set_page_attr(bitmap, i, BITMAP_PAGE_NEEDWRITE); 2010 bitmap_unplug(bitmap); 2011 } 2012 bitmap_unplug(mddev->bitmap); 2013 *low = lo; 2014 *high = hi; 2015 2016 return rv; 2017 } 2018 EXPORT_SYMBOL_GPL(bitmap_copy_from_slot); 2019 2020 2021 void bitmap_status(struct seq_file *seq, struct bitmap *bitmap) 2022 { 2023 unsigned long chunk_kb; 2024 struct bitmap_counts *counts; 2025 2026 if (!bitmap) 2027 return; 2028 2029 counts = &bitmap->counts; 2030 2031 chunk_kb = bitmap->mddev->bitmap_info.chunksize >> 10; 2032 seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], " 2033 "%lu%s chunk", 2034 counts->pages - counts->missing_pages, 2035 counts->pages, 2036 (counts->pages - counts->missing_pages) 2037 << (PAGE_SHIFT - 10), 2038 chunk_kb ? chunk_kb : bitmap->mddev->bitmap_info.chunksize, 2039 chunk_kb ? "KB" : "B"); 2040 if (bitmap->storage.file) { 2041 seq_printf(seq, ", file: "); 2042 seq_file_path(seq, bitmap->storage.file, " \t\n"); 2043 } 2044 2045 seq_printf(seq, "\n"); 2046 } 2047 2048 int bitmap_resize(struct bitmap *bitmap, sector_t blocks, 2049 int chunksize, int init) 2050 { 2051 /* If chunk_size is 0, choose an appropriate chunk size. 2052 * Then possibly allocate new storage space. 2053 * Then quiesce, copy bits, replace bitmap, and re-start 2054 * 2055 * This function is called both to set up the initial bitmap 2056 * and to resize the bitmap while the array is active. 2057 * If this happens as a result of the array being resized, 2058 * chunksize will be zero, and we need to choose a suitable 2059 * chunksize, otherwise we use what we are given. 2060 */ 2061 struct bitmap_storage store; 2062 struct bitmap_counts old_counts; 2063 unsigned long chunks; 2064 sector_t block; 2065 sector_t old_blocks, new_blocks; 2066 int chunkshift; 2067 int ret = 0; 2068 long pages; 2069 struct bitmap_page *new_bp; 2070 2071 if (bitmap->storage.file && !init) { 2072 pr_info("md: cannot resize file-based bitmap\n"); 2073 return -EINVAL; 2074 } 2075 2076 if (chunksize == 0) { 2077 /* If there is enough space, leave the chunk size unchanged, 2078 * else increase by factor of two until there is enough space. 2079 */ 2080 long bytes; 2081 long space = bitmap->mddev->bitmap_info.space; 2082 2083 if (space == 0) { 2084 /* We don't know how much space there is, so limit 2085 * to current size - in sectors. 2086 */ 2087 bytes = DIV_ROUND_UP(bitmap->counts.chunks, 8); 2088 if (!bitmap->mddev->bitmap_info.external) 2089 bytes += sizeof(bitmap_super_t); 2090 space = DIV_ROUND_UP(bytes, 512); 2091 bitmap->mddev->bitmap_info.space = space; 2092 } 2093 chunkshift = bitmap->counts.chunkshift; 2094 chunkshift--; 2095 do { 2096 /* 'chunkshift' is shift from block size to chunk size */ 2097 chunkshift++; 2098 chunks = DIV_ROUND_UP_SECTOR_T(blocks, 1 << chunkshift); 2099 bytes = DIV_ROUND_UP(chunks, 8); 2100 if (!bitmap->mddev->bitmap_info.external) 2101 bytes += sizeof(bitmap_super_t); 2102 } while (bytes > (space << 9)); 2103 } else 2104 chunkshift = ffz(~chunksize) - BITMAP_BLOCK_SHIFT; 2105 2106 chunks = DIV_ROUND_UP_SECTOR_T(blocks, 1 << chunkshift); 2107 memset(&store, 0, sizeof(store)); 2108 if (bitmap->mddev->bitmap_info.offset || bitmap->mddev->bitmap_info.file) 2109 ret = bitmap_storage_alloc(&store, chunks, 2110 !bitmap->mddev->bitmap_info.external, 2111 mddev_is_clustered(bitmap->mddev) 2112 ? bitmap->cluster_slot : 0); 2113 if (ret) { 2114 bitmap_file_unmap(&store); 2115 goto err; 2116 } 2117 2118 pages = DIV_ROUND_UP(chunks, PAGE_COUNTER_RATIO); 2119 2120 new_bp = kzalloc(pages * sizeof(*new_bp), GFP_KERNEL); 2121 ret = -ENOMEM; 2122 if (!new_bp) { 2123 bitmap_file_unmap(&store); 2124 goto err; 2125 } 2126 2127 if (!init) 2128 bitmap->mddev->pers->quiesce(bitmap->mddev, 1); 2129 2130 store.file = bitmap->storage.file; 2131 bitmap->storage.file = NULL; 2132 2133 if (store.sb_page && bitmap->storage.sb_page) 2134 memcpy(page_address(store.sb_page), 2135 page_address(bitmap->storage.sb_page), 2136 sizeof(bitmap_super_t)); 2137 bitmap_file_unmap(&bitmap->storage); 2138 bitmap->storage = store; 2139 2140 old_counts = bitmap->counts; 2141 bitmap->counts.bp = new_bp; 2142 bitmap->counts.pages = pages; 2143 bitmap->counts.missing_pages = pages; 2144 bitmap->counts.chunkshift = chunkshift; 2145 bitmap->counts.chunks = chunks; 2146 bitmap->mddev->bitmap_info.chunksize = 1 << (chunkshift + 2147 BITMAP_BLOCK_SHIFT); 2148 2149 blocks = min(old_counts.chunks << old_counts.chunkshift, 2150 chunks << chunkshift); 2151 2152 spin_lock_irq(&bitmap->counts.lock); 2153 /* For cluster raid, need to pre-allocate bitmap */ 2154 if (mddev_is_clustered(bitmap->mddev)) { 2155 unsigned long page; 2156 for (page = 0; page < pages; page++) { 2157 ret = bitmap_checkpage(&bitmap->counts, page, 1, 1); 2158 if (ret) { 2159 unsigned long k; 2160 2161 /* deallocate the page memory */ 2162 for (k = 0; k < page; k++) { 2163 kfree(new_bp[k].map); 2164 } 2165 kfree(new_bp); 2166 2167 /* restore some fields from old_counts */ 2168 bitmap->counts.bp = old_counts.bp; 2169 bitmap->counts.pages = old_counts.pages; 2170 bitmap->counts.missing_pages = old_counts.pages; 2171 bitmap->counts.chunkshift = old_counts.chunkshift; 2172 bitmap->counts.chunks = old_counts.chunks; 2173 bitmap->mddev->bitmap_info.chunksize = 1 << (old_counts.chunkshift + 2174 BITMAP_BLOCK_SHIFT); 2175 blocks = old_counts.chunks << old_counts.chunkshift; 2176 pr_warn("Could not pre-allocate in-memory bitmap for cluster raid\n"); 2177 break; 2178 } else 2179 bitmap->counts.bp[page].count += 1; 2180 } 2181 } 2182 2183 for (block = 0; block < blocks; ) { 2184 bitmap_counter_t *bmc_old, *bmc_new; 2185 int set; 2186 2187 bmc_old = bitmap_get_counter(&old_counts, block, 2188 &old_blocks, 0); 2189 set = bmc_old && NEEDED(*bmc_old); 2190 2191 if (set) { 2192 bmc_new = bitmap_get_counter(&bitmap->counts, block, 2193 &new_blocks, 1); 2194 if (*bmc_new == 0) { 2195 /* need to set on-disk bits too. */ 2196 sector_t end = block + new_blocks; 2197 sector_t start = block >> chunkshift; 2198 start <<= chunkshift; 2199 while (start < end) { 2200 bitmap_file_set_bit(bitmap, block); 2201 start += 1 << chunkshift; 2202 } 2203 *bmc_new = 2; 2204 bitmap_count_page(&bitmap->counts, 2205 block, 1); 2206 bitmap_set_pending(&bitmap->counts, 2207 block); 2208 } 2209 *bmc_new |= NEEDED_MASK; 2210 if (new_blocks < old_blocks) 2211 old_blocks = new_blocks; 2212 } 2213 block += old_blocks; 2214 } 2215 2216 if (bitmap->counts.bp != old_counts.bp) { 2217 unsigned long k; 2218 for (k = 0; k < old_counts.pages; k++) 2219 if (!old_counts.bp[k].hijacked) 2220 kfree(old_counts.bp[k].map); 2221 kfree(old_counts.bp); 2222 } 2223 2224 if (!init) { 2225 int i; 2226 while (block < (chunks << chunkshift)) { 2227 bitmap_counter_t *bmc; 2228 bmc = bitmap_get_counter(&bitmap->counts, block, 2229 &new_blocks, 1); 2230 if (bmc) { 2231 /* new space. It needs to be resynced, so 2232 * we set NEEDED_MASK. 2233 */ 2234 if (*bmc == 0) { 2235 *bmc = NEEDED_MASK | 2; 2236 bitmap_count_page(&bitmap->counts, 2237 block, 1); 2238 bitmap_set_pending(&bitmap->counts, 2239 block); 2240 } 2241 } 2242 block += new_blocks; 2243 } 2244 for (i = 0; i < bitmap->storage.file_pages; i++) 2245 set_page_attr(bitmap, i, BITMAP_PAGE_DIRTY); 2246 } 2247 spin_unlock_irq(&bitmap->counts.lock); 2248 2249 if (!init) { 2250 bitmap_unplug(bitmap); 2251 bitmap->mddev->pers->quiesce(bitmap->mddev, 0); 2252 } 2253 ret = 0; 2254 err: 2255 return ret; 2256 } 2257 EXPORT_SYMBOL_GPL(bitmap_resize); 2258 2259 static ssize_t 2260 location_show(struct mddev *mddev, char *page) 2261 { 2262 ssize_t len; 2263 if (mddev->bitmap_info.file) 2264 len = sprintf(page, "file"); 2265 else if (mddev->bitmap_info.offset) 2266 len = sprintf(page, "%+lld", (long long)mddev->bitmap_info.offset); 2267 else 2268 len = sprintf(page, "none"); 2269 len += sprintf(page+len, "\n"); 2270 return len; 2271 } 2272 2273 static ssize_t 2274 location_store(struct mddev *mddev, const char *buf, size_t len) 2275 { 2276 int rv; 2277 2278 rv = mddev_lock(mddev); 2279 if (rv) 2280 return rv; 2281 if (mddev->pers) { 2282 if (!mddev->pers->quiesce) { 2283 rv = -EBUSY; 2284 goto out; 2285 } 2286 if (mddev->recovery || mddev->sync_thread) { 2287 rv = -EBUSY; 2288 goto out; 2289 } 2290 } 2291 2292 if (mddev->bitmap || mddev->bitmap_info.file || 2293 mddev->bitmap_info.offset) { 2294 /* bitmap already configured. Only option is to clear it */ 2295 if (strncmp(buf, "none", 4) != 0) { 2296 rv = -EBUSY; 2297 goto out; 2298 } 2299 if (mddev->pers) { 2300 mddev->pers->quiesce(mddev, 1); 2301 bitmap_destroy(mddev); 2302 mddev->pers->quiesce(mddev, 0); 2303 } 2304 mddev->bitmap_info.offset = 0; 2305 if (mddev->bitmap_info.file) { 2306 struct file *f = mddev->bitmap_info.file; 2307 mddev->bitmap_info.file = NULL; 2308 fput(f); 2309 } 2310 } else { 2311 /* No bitmap, OK to set a location */ 2312 long long offset; 2313 if (strncmp(buf, "none", 4) == 0) 2314 /* nothing to be done */; 2315 else if (strncmp(buf, "file:", 5) == 0) { 2316 /* Not supported yet */ 2317 rv = -EINVAL; 2318 goto out; 2319 } else { 2320 if (buf[0] == '+') 2321 rv = kstrtoll(buf+1, 10, &offset); 2322 else 2323 rv = kstrtoll(buf, 10, &offset); 2324 if (rv) 2325 goto out; 2326 if (offset == 0) { 2327 rv = -EINVAL; 2328 goto out; 2329 } 2330 if (mddev->bitmap_info.external == 0 && 2331 mddev->major_version == 0 && 2332 offset != mddev->bitmap_info.default_offset) { 2333 rv = -EINVAL; 2334 goto out; 2335 } 2336 mddev->bitmap_info.offset = offset; 2337 if (mddev->pers) { 2338 struct bitmap *bitmap; 2339 mddev->pers->quiesce(mddev, 1); 2340 bitmap = bitmap_create(mddev, -1); 2341 if (IS_ERR(bitmap)) 2342 rv = PTR_ERR(bitmap); 2343 else { 2344 mddev->bitmap = bitmap; 2345 rv = bitmap_load(mddev); 2346 if (rv) 2347 mddev->bitmap_info.offset = 0; 2348 } 2349 mddev->pers->quiesce(mddev, 0); 2350 if (rv) { 2351 bitmap_destroy(mddev); 2352 goto out; 2353 } 2354 } 2355 } 2356 } 2357 if (!mddev->external) { 2358 /* Ensure new bitmap info is stored in 2359 * metadata promptly. 2360 */ 2361 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 2362 md_wakeup_thread(mddev->thread); 2363 } 2364 rv = 0; 2365 out: 2366 mddev_unlock(mddev); 2367 if (rv) 2368 return rv; 2369 return len; 2370 } 2371 2372 static struct md_sysfs_entry bitmap_location = 2373 __ATTR(location, S_IRUGO|S_IWUSR, location_show, location_store); 2374 2375 /* 'bitmap/space' is the space available at 'location' for the 2376 * bitmap. This allows the kernel to know when it is safe to 2377 * resize the bitmap to match a resized array. 2378 */ 2379 static ssize_t 2380 space_show(struct mddev *mddev, char *page) 2381 { 2382 return sprintf(page, "%lu\n", mddev->bitmap_info.space); 2383 } 2384 2385 static ssize_t 2386 space_store(struct mddev *mddev, const char *buf, size_t len) 2387 { 2388 unsigned long sectors; 2389 int rv; 2390 2391 rv = kstrtoul(buf, 10, §ors); 2392 if (rv) 2393 return rv; 2394 2395 if (sectors == 0) 2396 return -EINVAL; 2397 2398 if (mddev->bitmap && 2399 sectors < (mddev->bitmap->storage.bytes + 511) >> 9) 2400 return -EFBIG; /* Bitmap is too big for this small space */ 2401 2402 /* could make sure it isn't too big, but that isn't really 2403 * needed - user-space should be careful. 2404 */ 2405 mddev->bitmap_info.space = sectors; 2406 return len; 2407 } 2408 2409 static struct md_sysfs_entry bitmap_space = 2410 __ATTR(space, S_IRUGO|S_IWUSR, space_show, space_store); 2411 2412 static ssize_t 2413 timeout_show(struct mddev *mddev, char *page) 2414 { 2415 ssize_t len; 2416 unsigned long secs = mddev->bitmap_info.daemon_sleep / HZ; 2417 unsigned long jifs = mddev->bitmap_info.daemon_sleep % HZ; 2418 2419 len = sprintf(page, "%lu", secs); 2420 if (jifs) 2421 len += sprintf(page+len, ".%03u", jiffies_to_msecs(jifs)); 2422 len += sprintf(page+len, "\n"); 2423 return len; 2424 } 2425 2426 static ssize_t 2427 timeout_store(struct mddev *mddev, const char *buf, size_t len) 2428 { 2429 /* timeout can be set at any time */ 2430 unsigned long timeout; 2431 int rv = strict_strtoul_scaled(buf, &timeout, 4); 2432 if (rv) 2433 return rv; 2434 2435 /* just to make sure we don't overflow... */ 2436 if (timeout >= LONG_MAX / HZ) 2437 return -EINVAL; 2438 2439 timeout = timeout * HZ / 10000; 2440 2441 if (timeout >= MAX_SCHEDULE_TIMEOUT) 2442 timeout = MAX_SCHEDULE_TIMEOUT-1; 2443 if (timeout < 1) 2444 timeout = 1; 2445 mddev->bitmap_info.daemon_sleep = timeout; 2446 if (mddev->thread) { 2447 /* if thread->timeout is MAX_SCHEDULE_TIMEOUT, then 2448 * the bitmap is all clean and we don't need to 2449 * adjust the timeout right now 2450 */ 2451 if (mddev->thread->timeout < MAX_SCHEDULE_TIMEOUT) { 2452 mddev->thread->timeout = timeout; 2453 md_wakeup_thread(mddev->thread); 2454 } 2455 } 2456 return len; 2457 } 2458 2459 static struct md_sysfs_entry bitmap_timeout = 2460 __ATTR(time_base, S_IRUGO|S_IWUSR, timeout_show, timeout_store); 2461 2462 static ssize_t 2463 backlog_show(struct mddev *mddev, char *page) 2464 { 2465 return sprintf(page, "%lu\n", mddev->bitmap_info.max_write_behind); 2466 } 2467 2468 static ssize_t 2469 backlog_store(struct mddev *mddev, const char *buf, size_t len) 2470 { 2471 unsigned long backlog; 2472 int rv = kstrtoul(buf, 10, &backlog); 2473 if (rv) 2474 return rv; 2475 if (backlog > COUNTER_MAX) 2476 return -EINVAL; 2477 mddev->bitmap_info.max_write_behind = backlog; 2478 return len; 2479 } 2480 2481 static struct md_sysfs_entry bitmap_backlog = 2482 __ATTR(backlog, S_IRUGO|S_IWUSR, backlog_show, backlog_store); 2483 2484 static ssize_t 2485 chunksize_show(struct mddev *mddev, char *page) 2486 { 2487 return sprintf(page, "%lu\n", mddev->bitmap_info.chunksize); 2488 } 2489 2490 static ssize_t 2491 chunksize_store(struct mddev *mddev, const char *buf, size_t len) 2492 { 2493 /* Can only be changed when no bitmap is active */ 2494 int rv; 2495 unsigned long csize; 2496 if (mddev->bitmap) 2497 return -EBUSY; 2498 rv = kstrtoul(buf, 10, &csize); 2499 if (rv) 2500 return rv; 2501 if (csize < 512 || 2502 !is_power_of_2(csize)) 2503 return -EINVAL; 2504 mddev->bitmap_info.chunksize = csize; 2505 return len; 2506 } 2507 2508 static struct md_sysfs_entry bitmap_chunksize = 2509 __ATTR(chunksize, S_IRUGO|S_IWUSR, chunksize_show, chunksize_store); 2510 2511 static ssize_t metadata_show(struct mddev *mddev, char *page) 2512 { 2513 if (mddev_is_clustered(mddev)) 2514 return sprintf(page, "clustered\n"); 2515 return sprintf(page, "%s\n", (mddev->bitmap_info.external 2516 ? "external" : "internal")); 2517 } 2518 2519 static ssize_t metadata_store(struct mddev *mddev, const char *buf, size_t len) 2520 { 2521 if (mddev->bitmap || 2522 mddev->bitmap_info.file || 2523 mddev->bitmap_info.offset) 2524 return -EBUSY; 2525 if (strncmp(buf, "external", 8) == 0) 2526 mddev->bitmap_info.external = 1; 2527 else if ((strncmp(buf, "internal", 8) == 0) || 2528 (strncmp(buf, "clustered", 9) == 0)) 2529 mddev->bitmap_info.external = 0; 2530 else 2531 return -EINVAL; 2532 return len; 2533 } 2534 2535 static struct md_sysfs_entry bitmap_metadata = 2536 __ATTR(metadata, S_IRUGO|S_IWUSR, metadata_show, metadata_store); 2537 2538 static ssize_t can_clear_show(struct mddev *mddev, char *page) 2539 { 2540 int len; 2541 spin_lock(&mddev->lock); 2542 if (mddev->bitmap) 2543 len = sprintf(page, "%s\n", (mddev->bitmap->need_sync ? 2544 "false" : "true")); 2545 else 2546 len = sprintf(page, "\n"); 2547 spin_unlock(&mddev->lock); 2548 return len; 2549 } 2550 2551 static ssize_t can_clear_store(struct mddev *mddev, const char *buf, size_t len) 2552 { 2553 if (mddev->bitmap == NULL) 2554 return -ENOENT; 2555 if (strncmp(buf, "false", 5) == 0) 2556 mddev->bitmap->need_sync = 1; 2557 else if (strncmp(buf, "true", 4) == 0) { 2558 if (mddev->degraded) 2559 return -EBUSY; 2560 mddev->bitmap->need_sync = 0; 2561 } else 2562 return -EINVAL; 2563 return len; 2564 } 2565 2566 static struct md_sysfs_entry bitmap_can_clear = 2567 __ATTR(can_clear, S_IRUGO|S_IWUSR, can_clear_show, can_clear_store); 2568 2569 static ssize_t 2570 behind_writes_used_show(struct mddev *mddev, char *page) 2571 { 2572 ssize_t ret; 2573 spin_lock(&mddev->lock); 2574 if (mddev->bitmap == NULL) 2575 ret = sprintf(page, "0\n"); 2576 else 2577 ret = sprintf(page, "%lu\n", 2578 mddev->bitmap->behind_writes_used); 2579 spin_unlock(&mddev->lock); 2580 return ret; 2581 } 2582 2583 static ssize_t 2584 behind_writes_used_reset(struct mddev *mddev, const char *buf, size_t len) 2585 { 2586 if (mddev->bitmap) 2587 mddev->bitmap->behind_writes_used = 0; 2588 return len; 2589 } 2590 2591 static struct md_sysfs_entry max_backlog_used = 2592 __ATTR(max_backlog_used, S_IRUGO | S_IWUSR, 2593 behind_writes_used_show, behind_writes_used_reset); 2594 2595 static struct attribute *md_bitmap_attrs[] = { 2596 &bitmap_location.attr, 2597 &bitmap_space.attr, 2598 &bitmap_timeout.attr, 2599 &bitmap_backlog.attr, 2600 &bitmap_chunksize.attr, 2601 &bitmap_metadata.attr, 2602 &bitmap_can_clear.attr, 2603 &max_backlog_used.attr, 2604 NULL 2605 }; 2606 struct attribute_group md_bitmap_group = { 2607 .name = "bitmap", 2608 .attrs = md_bitmap_attrs, 2609 }; 2610 2611