1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 2935fe098SMike Snitzer /* 3935fe098SMike Snitzer * bitmap.c two-level bitmap (C) Peter T. Breuer (ptb@ot.uc3m.es) 2003 4935fe098SMike Snitzer * 5935fe098SMike Snitzer * bitmap_create - sets up the bitmap structure 6935fe098SMike Snitzer * bitmap_destroy - destroys the bitmap structure 7935fe098SMike Snitzer * 8935fe098SMike Snitzer * additions, Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.: 9935fe098SMike Snitzer * - added disk storage for bitmap 10935fe098SMike Snitzer * - changes to allow various bitmap chunk sizes 11935fe098SMike Snitzer */ 12935fe098SMike Snitzer 13935fe098SMike Snitzer /* 14935fe098SMike Snitzer * Still to do: 15935fe098SMike Snitzer * 16935fe098SMike Snitzer * flush after percent set rather than just time based. (maybe both). 17935fe098SMike Snitzer */ 18935fe098SMike Snitzer 19935fe098SMike Snitzer #include <linux/blkdev.h> 20935fe098SMike Snitzer #include <linux/module.h> 21935fe098SMike Snitzer #include <linux/errno.h> 22935fe098SMike Snitzer #include <linux/slab.h> 23935fe098SMike Snitzer #include <linux/init.h> 24935fe098SMike Snitzer #include <linux/timer.h> 25935fe098SMike Snitzer #include <linux/sched.h> 26935fe098SMike Snitzer #include <linux/list.h> 27935fe098SMike Snitzer #include <linux/file.h> 28935fe098SMike Snitzer #include <linux/mount.h> 29935fe098SMike Snitzer #include <linux/buffer_head.h> 30935fe098SMike Snitzer #include <linux/seq_file.h> 31935fe098SMike Snitzer #include <trace/events/block.h> 32935fe098SMike Snitzer #include "md.h" 33935fe098SMike Snitzer #include "md-bitmap.h" 34935fe098SMike Snitzer 35935fe098SMike Snitzer static inline char *bmname(struct bitmap *bitmap) 36935fe098SMike Snitzer { 37935fe098SMike Snitzer return bitmap->mddev ? mdname(bitmap->mddev) : "mdX"; 38935fe098SMike Snitzer } 39935fe098SMike Snitzer 40935fe098SMike Snitzer /* 41935fe098SMike Snitzer * check a page and, if necessary, allocate it (or hijack it if the alloc fails) 42935fe098SMike Snitzer * 43935fe098SMike Snitzer * 1) check to see if this page is allocated, if it's not then try to alloc 44935fe098SMike Snitzer * 2) if the alloc fails, set the page's hijacked flag so we'll use the 45935fe098SMike Snitzer * page pointer directly as a counter 46935fe098SMike Snitzer * 47935fe098SMike Snitzer * if we find our page, we increment the page's refcount so that it stays 48935fe098SMike Snitzer * allocated while we're using it 49935fe098SMike Snitzer */ 50e64e4018SAndy Shevchenko static int md_bitmap_checkpage(struct bitmap_counts *bitmap, 51935fe098SMike Snitzer unsigned long page, int create, int no_hijack) 52935fe098SMike Snitzer __releases(bitmap->lock) 53935fe098SMike Snitzer __acquires(bitmap->lock) 54935fe098SMike Snitzer { 55935fe098SMike Snitzer unsigned char *mappage; 56935fe098SMike Snitzer 57301867b1SLi Nan WARN_ON_ONCE(page >= bitmap->pages); 58935fe098SMike Snitzer if (bitmap->bp[page].hijacked) /* it's hijacked, don't try to alloc */ 59935fe098SMike Snitzer return 0; 60935fe098SMike Snitzer 61935fe098SMike Snitzer if (bitmap->bp[page].map) /* page is already allocated, just return */ 62935fe098SMike Snitzer return 0; 63935fe098SMike Snitzer 64935fe098SMike Snitzer if (!create) 65935fe098SMike Snitzer return -ENOENT; 66935fe098SMike Snitzer 67935fe098SMike Snitzer /* this page has not been allocated yet */ 68935fe098SMike Snitzer 69935fe098SMike Snitzer spin_unlock_irq(&bitmap->lock); 70935fe098SMike Snitzer /* It is possible that this is being called inside a 71935fe098SMike Snitzer * prepare_to_wait/finish_wait loop from raid5c:make_request(). 72935fe098SMike Snitzer * In general it is not permitted to sleep in that context as it 73935fe098SMike Snitzer * can cause the loop to spin freely. 74935fe098SMike Snitzer * That doesn't apply here as we can only reach this point 75935fe098SMike Snitzer * once with any loop. 76935fe098SMike Snitzer * When this function completes, either bp[page].map or 77935fe098SMike Snitzer * bp[page].hijacked. In either case, this function will 78935fe098SMike Snitzer * abort before getting to this point again. So there is 79935fe098SMike Snitzer * no risk of a free-spin, and so it is safe to assert 80935fe098SMike Snitzer * that sleeping here is allowed. 81935fe098SMike Snitzer */ 82935fe098SMike Snitzer sched_annotate_sleep(); 83935fe098SMike Snitzer mappage = kzalloc(PAGE_SIZE, GFP_NOIO); 84935fe098SMike Snitzer spin_lock_irq(&bitmap->lock); 85935fe098SMike Snitzer 86935fe098SMike Snitzer if (mappage == NULL) { 87935fe098SMike Snitzer pr_debug("md/bitmap: map page allocation failed, hijacking\n"); 88935fe098SMike Snitzer /* We don't support hijack for cluster raid */ 89935fe098SMike Snitzer if (no_hijack) 90935fe098SMike Snitzer return -ENOMEM; 91935fe098SMike Snitzer /* failed - set the hijacked flag so that we can use the 92935fe098SMike Snitzer * pointer as a counter */ 93935fe098SMike Snitzer if (!bitmap->bp[page].map) 94935fe098SMike Snitzer bitmap->bp[page].hijacked = 1; 95935fe098SMike Snitzer } else if (bitmap->bp[page].map || 96935fe098SMike Snitzer bitmap->bp[page].hijacked) { 97935fe098SMike Snitzer /* somebody beat us to getting the page */ 98935fe098SMike Snitzer kfree(mappage); 99935fe098SMike Snitzer } else { 100935fe098SMike Snitzer 101935fe098SMike Snitzer /* no page was in place and we have one, so install it */ 102935fe098SMike Snitzer 103935fe098SMike Snitzer bitmap->bp[page].map = mappage; 104935fe098SMike Snitzer bitmap->missing_pages--; 105935fe098SMike Snitzer } 106935fe098SMike Snitzer return 0; 107935fe098SMike Snitzer } 108935fe098SMike Snitzer 109935fe098SMike Snitzer /* if page is completely empty, put it back on the free list, or dealloc it */ 110935fe098SMike Snitzer /* if page was hijacked, unmark the flag so it might get alloced next time */ 111935fe098SMike Snitzer /* Note: lock should be held when calling this */ 112e64e4018SAndy Shevchenko static void md_bitmap_checkfree(struct bitmap_counts *bitmap, unsigned long page) 113935fe098SMike Snitzer { 114935fe098SMike Snitzer char *ptr; 115935fe098SMike Snitzer 116935fe098SMike Snitzer if (bitmap->bp[page].count) /* page is still busy */ 117935fe098SMike Snitzer return; 118935fe098SMike Snitzer 119935fe098SMike Snitzer /* page is no longer in use, it can be released */ 120935fe098SMike Snitzer 121935fe098SMike Snitzer if (bitmap->bp[page].hijacked) { /* page was hijacked, undo this now */ 122935fe098SMike Snitzer bitmap->bp[page].hijacked = 0; 123935fe098SMike Snitzer bitmap->bp[page].map = NULL; 124935fe098SMike Snitzer } else { 125935fe098SMike Snitzer /* normal case, free the page */ 126935fe098SMike Snitzer ptr = bitmap->bp[page].map; 127935fe098SMike Snitzer bitmap->bp[page].map = NULL; 128935fe098SMike Snitzer bitmap->missing_pages++; 129935fe098SMike Snitzer kfree(ptr); 130935fe098SMike Snitzer } 131935fe098SMike Snitzer } 132935fe098SMike Snitzer 133935fe098SMike Snitzer /* 134935fe098SMike Snitzer * bitmap file handling - read and write the bitmap file and its superblock 135935fe098SMike Snitzer */ 136935fe098SMike Snitzer 137935fe098SMike Snitzer /* 138935fe098SMike Snitzer * basic page I/O operations 139935fe098SMike Snitzer */ 140935fe098SMike Snitzer 141935fe098SMike Snitzer /* IO operations when bitmap is stored near all superblocks */ 142935fe098SMike Snitzer 143*0c3ea5ccSChristoph Hellwig /* choose a good rdev and read the page from there */ 144*0c3ea5ccSChristoph Hellwig static int read_sb_page(struct mddev *mddev, loff_t offset, 145*0c3ea5ccSChristoph Hellwig struct page *page, unsigned long index, int size) 146*0c3ea5ccSChristoph Hellwig { 147*0c3ea5ccSChristoph Hellwig 148*0c3ea5ccSChristoph Hellwig sector_t sector = offset + index * (PAGE_SIZE / SECTOR_SIZE); 149935fe098SMike Snitzer struct md_rdev *rdev; 150935fe098SMike Snitzer 151935fe098SMike Snitzer rdev_for_each(rdev, mddev) { 152*0c3ea5ccSChristoph Hellwig u32 iosize = roundup(size, bdev_logical_block_size(rdev->bdev)); 153*0c3ea5ccSChristoph Hellwig 154*0c3ea5ccSChristoph Hellwig if (!test_bit(In_sync, &rdev->flags) || 155*0c3ea5ccSChristoph Hellwig test_bit(Faulty, &rdev->flags) || 156*0c3ea5ccSChristoph Hellwig test_bit(Bitmap_sync, &rdev->flags)) 157935fe098SMike Snitzer continue; 158935fe098SMike Snitzer 159*0c3ea5ccSChristoph Hellwig if (sync_page_io(rdev, sector, iosize, page, REQ_OP_READ, 160*0c3ea5ccSChristoph Hellwig true)) { 161935fe098SMike Snitzer page->index = index; 162935fe098SMike Snitzer return 0; 163935fe098SMike Snitzer } 164935fe098SMike Snitzer } 165935fe098SMike Snitzer return -EIO; 166935fe098SMike Snitzer } 167935fe098SMike Snitzer 168935fe098SMike Snitzer static struct md_rdev *next_active_rdev(struct md_rdev *rdev, struct mddev *mddev) 169935fe098SMike Snitzer { 170935fe098SMike Snitzer /* Iterate the disks of an mddev, using rcu to protect access to the 171935fe098SMike Snitzer * linked list, and raising the refcount of devices we return to ensure 172935fe098SMike Snitzer * they don't disappear while in use. 173935fe098SMike Snitzer * As devices are only added or removed when raid_disk is < 0 and 174935fe098SMike Snitzer * nr_pending is 0 and In_sync is clear, the entries we return will 175935fe098SMike Snitzer * still be in the same position on the list when we re-enter 176935fe098SMike Snitzer * list_for_each_entry_continue_rcu. 177935fe098SMike Snitzer * 178935fe098SMike Snitzer * Note that if entered with 'rdev == NULL' to start at the 179935fe098SMike Snitzer * beginning, we temporarily assign 'rdev' to an address which 180935fe098SMike Snitzer * isn't really an rdev, but which can be used by 181935fe098SMike Snitzer * list_for_each_entry_continue_rcu() to find the first entry. 182935fe098SMike Snitzer */ 183935fe098SMike Snitzer rcu_read_lock(); 184935fe098SMike Snitzer if (rdev == NULL) 185935fe098SMike Snitzer /* start at the beginning */ 186935fe098SMike Snitzer rdev = list_entry(&mddev->disks, struct md_rdev, same_set); 187935fe098SMike Snitzer else { 188935fe098SMike Snitzer /* release the previous rdev and start from there. */ 189935fe098SMike Snitzer rdev_dec_pending(rdev, mddev); 190935fe098SMike Snitzer } 191935fe098SMike Snitzer list_for_each_entry_continue_rcu(rdev, &mddev->disks, same_set) { 192935fe098SMike Snitzer if (rdev->raid_disk >= 0 && 193935fe098SMike Snitzer !test_bit(Faulty, &rdev->flags)) { 194935fe098SMike Snitzer /* this is a usable devices */ 195935fe098SMike Snitzer atomic_inc(&rdev->nr_pending); 196935fe098SMike Snitzer rcu_read_unlock(); 197935fe098SMike Snitzer return rdev; 198935fe098SMike Snitzer } 199935fe098SMike Snitzer } 200935fe098SMike Snitzer rcu_read_unlock(); 201935fe098SMike Snitzer return NULL; 202935fe098SMike Snitzer } 203935fe098SMike Snitzer 2048745faa9SJon Derrick static unsigned int optimal_io_size(struct block_device *bdev, 2058745faa9SJon Derrick unsigned int last_page_size, 2068745faa9SJon Derrick unsigned int io_size) 2078745faa9SJon Derrick { 2088745faa9SJon Derrick if (bdev_io_opt(bdev) > bdev_logical_block_size(bdev)) 2098745faa9SJon Derrick return roundup(last_page_size, bdev_io_opt(bdev)); 2108745faa9SJon Derrick return io_size; 2118745faa9SJon Derrick } 2128745faa9SJon Derrick 2138745faa9SJon Derrick static unsigned int bitmap_io_size(unsigned int io_size, unsigned int opt_size, 214b1211978SJonathan Derrick loff_t start, loff_t boundary) 2158745faa9SJon Derrick { 2168745faa9SJon Derrick if (io_size != opt_size && 2178745faa9SJon Derrick start + opt_size / SECTOR_SIZE <= boundary) 2188745faa9SJon Derrick return opt_size; 2198745faa9SJon Derrick if (start + io_size / SECTOR_SIZE <= boundary) 2208745faa9SJon Derrick return io_size; 2218745faa9SJon Derrick 2228745faa9SJon Derrick /* Overflows boundary */ 2238745faa9SJon Derrick return 0; 2248745faa9SJon Derrick } 2258745faa9SJon Derrick 226328e17d8SJon Derrick static int __write_sb_page(struct md_rdev *rdev, struct bitmap *bitmap, 227328e17d8SJon Derrick struct page *page) 228935fe098SMike Snitzer { 229935fe098SMike Snitzer struct block_device *bdev; 230935fe098SMike Snitzer struct mddev *mddev = bitmap->mddev; 231935fe098SMike Snitzer struct bitmap_storage *store = &bitmap->storage; 232b1211978SJonathan Derrick loff_t sboff, offset = mddev->bitmap_info.offset; 233b1211978SJonathan Derrick sector_t ps, doff; 23410172f20SJon Derrick unsigned int size = PAGE_SIZE; 2358745faa9SJon Derrick unsigned int opt_size = PAGE_SIZE; 236935fe098SMike Snitzer 237935fe098SMike Snitzer bdev = (rdev->meta_bdev) ? rdev->meta_bdev : rdev->bdev; 238935fe098SMike Snitzer if (page->index == store->file_pages - 1) { 23910172f20SJon Derrick unsigned int last_page_size = store->bytes & (PAGE_SIZE - 1); 240328e17d8SJon Derrick 241935fe098SMike Snitzer if (last_page_size == 0) 242935fe098SMike Snitzer last_page_size = PAGE_SIZE; 2438745faa9SJon Derrick size = roundup(last_page_size, bdev_logical_block_size(bdev)); 2448745faa9SJon Derrick opt_size = optimal_io_size(bdev, last_page_size, size); 245935fe098SMike Snitzer } 246328e17d8SJon Derrick 24710172f20SJon Derrick ps = page->index * PAGE_SIZE / SECTOR_SIZE; 24810172f20SJon Derrick sboff = rdev->sb_start + offset; 24910172f20SJon Derrick doff = rdev->data_offset; 25010172f20SJon Derrick 251328e17d8SJon Derrick /* Just make sure we aren't corrupting data or metadata */ 252935fe098SMike Snitzer if (mddev->external) { 253935fe098SMike Snitzer /* Bitmap could be anywhere. */ 25410172f20SJon Derrick if (sboff + ps > doff && 25510172f20SJon Derrick sboff < (doff + mddev->dev_sectors + PAGE_SIZE / SECTOR_SIZE)) 256328e17d8SJon Derrick return -EINVAL; 257935fe098SMike Snitzer } else if (offset < 0) { 258935fe098SMike Snitzer /* DATA BITMAP METADATA */ 2598745faa9SJon Derrick size = bitmap_io_size(size, opt_size, offset + ps, 0); 2608745faa9SJon Derrick if (size == 0) 261935fe098SMike Snitzer /* bitmap runs in to metadata */ 262328e17d8SJon Derrick return -EINVAL; 263328e17d8SJon Derrick 26410172f20SJon Derrick if (doff + mddev->dev_sectors > sboff) 265935fe098SMike Snitzer /* data runs in to bitmap */ 266328e17d8SJon Derrick return -EINVAL; 267935fe098SMike Snitzer } else if (rdev->sb_start < rdev->data_offset) { 268935fe098SMike Snitzer /* METADATA BITMAP DATA */ 2698745faa9SJon Derrick size = bitmap_io_size(size, opt_size, sboff + ps, doff); 2708745faa9SJon Derrick if (size == 0) 271935fe098SMike Snitzer /* bitmap runs in to data */ 272328e17d8SJon Derrick return -EINVAL; 273935fe098SMike Snitzer } else { 274935fe098SMike Snitzer /* DATA METADATA BITMAP - no problems */ 275935fe098SMike Snitzer } 276328e17d8SJon Derrick 27710172f20SJon Derrick md_super_write(mddev, rdev, sboff + ps, (int) size, page); 278328e17d8SJon Derrick return 0; 279935fe098SMike Snitzer } 280935fe098SMike Snitzer 28159cefee7SChristoph Hellwig static void write_sb_page(struct bitmap *bitmap, struct page *page, int wait) 282328e17d8SJon Derrick { 283328e17d8SJon Derrick struct mddev *mddev = bitmap->mddev; 284935fe098SMike Snitzer 285328e17d8SJon Derrick do { 28659cefee7SChristoph Hellwig struct md_rdev *rdev = NULL; 28759cefee7SChristoph Hellwig 288328e17d8SJon Derrick while ((rdev = next_active_rdev(rdev, mddev)) != NULL) { 28959cefee7SChristoph Hellwig if (__write_sb_page(rdev, bitmap, page) < 0) { 29059cefee7SChristoph Hellwig set_bit(BITMAP_WRITE_ERROR, &bitmap->flags); 29159cefee7SChristoph Hellwig return; 29259cefee7SChristoph Hellwig } 293328e17d8SJon Derrick } 294328e17d8SJon Derrick } while (wait && md_super_wait(mddev) < 0); 295935fe098SMike Snitzer } 296935fe098SMike Snitzer 297e64e4018SAndy Shevchenko static void md_bitmap_file_kick(struct bitmap *bitmap); 2985339178eSChristoph Hellwig 2995339178eSChristoph Hellwig static void write_file_page(struct bitmap *bitmap, struct page *page, int wait) 300935fe098SMike Snitzer { 3015339178eSChristoph Hellwig struct buffer_head *bh = page_buffers(page); 302935fe098SMike Snitzer 303935fe098SMike Snitzer while (bh && bh->b_blocknr) { 304935fe098SMike Snitzer atomic_inc(&bitmap->pending_writes); 305935fe098SMike Snitzer set_buffer_locked(bh); 306935fe098SMike Snitzer set_buffer_mapped(bh); 3071420c4a5SBart Van Assche submit_bh(REQ_OP_WRITE | REQ_SYNC, bh); 308935fe098SMike Snitzer bh = bh->b_this_page; 309935fe098SMike Snitzer } 310935fe098SMike Snitzer 311935fe098SMike Snitzer if (wait) 312935fe098SMike Snitzer wait_event(bitmap->write_wait, 313935fe098SMike Snitzer atomic_read(&bitmap->pending_writes) == 0); 314935fe098SMike Snitzer } 315935fe098SMike Snitzer 316935fe098SMike Snitzer static void end_bitmap_write(struct buffer_head *bh, int uptodate) 317935fe098SMike Snitzer { 318935fe098SMike Snitzer struct bitmap *bitmap = bh->b_private; 319935fe098SMike Snitzer 320935fe098SMike Snitzer if (!uptodate) 321935fe098SMike Snitzer set_bit(BITMAP_WRITE_ERROR, &bitmap->flags); 322935fe098SMike Snitzer if (atomic_dec_and_test(&bitmap->pending_writes)) 323935fe098SMike Snitzer wake_up(&bitmap->write_wait); 324935fe098SMike Snitzer } 325935fe098SMike Snitzer 326935fe098SMike Snitzer static void free_buffers(struct page *page) 327935fe098SMike Snitzer { 328935fe098SMike Snitzer struct buffer_head *bh; 329935fe098SMike Snitzer 330935fe098SMike Snitzer if (!PagePrivate(page)) 331935fe098SMike Snitzer return; 332935fe098SMike Snitzer 333935fe098SMike Snitzer bh = page_buffers(page); 334935fe098SMike Snitzer while (bh) { 335935fe098SMike Snitzer struct buffer_head *next = bh->b_this_page; 336935fe098SMike Snitzer free_buffer_head(bh); 337935fe098SMike Snitzer bh = next; 338935fe098SMike Snitzer } 339db2c1d86SGuoqing Jiang detach_page_private(page); 340935fe098SMike Snitzer put_page(page); 341935fe098SMike Snitzer } 342935fe098SMike Snitzer 343935fe098SMike Snitzer /* read a page from a file. 344935fe098SMike Snitzer * We both read the page, and attach buffers to the page to record the 345935fe098SMike Snitzer * address of each block (using bmap). These addresses will be used 346935fe098SMike Snitzer * to write the block later, completely bypassing the filesystem. 347935fe098SMike Snitzer * This usage is similar to how swap files are handled, and allows us 348935fe098SMike Snitzer * to write to a file with no concerns of memory allocation failing. 349935fe098SMike Snitzer */ 350d681054cSChristoph Hellwig static int read_file_page(struct file *file, unsigned long index, 351d681054cSChristoph Hellwig struct bitmap *bitmap, unsigned long count, struct page *page) 352935fe098SMike Snitzer { 353935fe098SMike Snitzer int ret = 0; 354935fe098SMike Snitzer struct inode *inode = file_inode(file); 355935fe098SMike Snitzer struct buffer_head *bh; 35630460e1eSCarlos Maiolino sector_t block, blk_cur; 357313b825fSXianting Tian unsigned long blocksize = i_blocksize(inode); 358935fe098SMike Snitzer 359935fe098SMike Snitzer pr_debug("read bitmap file (%dB @ %llu)\n", (int)PAGE_SIZE, 360935fe098SMike Snitzer (unsigned long long)index << PAGE_SHIFT); 361935fe098SMike Snitzer 362313b825fSXianting Tian bh = alloc_page_buffers(page, blocksize, false); 363935fe098SMike Snitzer if (!bh) { 364935fe098SMike Snitzer ret = -ENOMEM; 365935fe098SMike Snitzer goto out; 366935fe098SMike Snitzer } 367db2c1d86SGuoqing Jiang attach_page_private(page, bh); 36830460e1eSCarlos Maiolino blk_cur = index << (PAGE_SHIFT - inode->i_blkbits); 369935fe098SMike Snitzer while (bh) { 37030460e1eSCarlos Maiolino block = blk_cur; 37130460e1eSCarlos Maiolino 372935fe098SMike Snitzer if (count == 0) 373935fe098SMike Snitzer bh->b_blocknr = 0; 374935fe098SMike Snitzer else { 37530460e1eSCarlos Maiolino ret = bmap(inode, &block); 37630460e1eSCarlos Maiolino if (ret || !block) { 377935fe098SMike Snitzer ret = -EINVAL; 37830460e1eSCarlos Maiolino bh->b_blocknr = 0; 379935fe098SMike Snitzer goto out; 380935fe098SMike Snitzer } 38130460e1eSCarlos Maiolino 38230460e1eSCarlos Maiolino bh->b_blocknr = block; 383935fe098SMike Snitzer bh->b_bdev = inode->i_sb->s_bdev; 384313b825fSXianting Tian if (count < blocksize) 385935fe098SMike Snitzer count = 0; 386935fe098SMike Snitzer else 387313b825fSXianting Tian count -= blocksize; 388935fe098SMike Snitzer 389935fe098SMike Snitzer bh->b_end_io = end_bitmap_write; 390935fe098SMike Snitzer bh->b_private = bitmap; 391935fe098SMike Snitzer atomic_inc(&bitmap->pending_writes); 392935fe098SMike Snitzer set_buffer_locked(bh); 393935fe098SMike Snitzer set_buffer_mapped(bh); 3941420c4a5SBart Van Assche submit_bh(REQ_OP_READ, bh); 395935fe098SMike Snitzer } 39630460e1eSCarlos Maiolino blk_cur++; 397935fe098SMike Snitzer bh = bh->b_this_page; 398935fe098SMike Snitzer } 399935fe098SMike Snitzer page->index = index; 400935fe098SMike Snitzer 401935fe098SMike Snitzer wait_event(bitmap->write_wait, 402935fe098SMike Snitzer atomic_read(&bitmap->pending_writes)==0); 403935fe098SMike Snitzer if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags)) 404935fe098SMike Snitzer ret = -EIO; 405935fe098SMike Snitzer out: 406935fe098SMike Snitzer if (ret) 407935fe098SMike Snitzer pr_err("md: bitmap read error: (%dB @ %llu): %d\n", 408935fe098SMike Snitzer (int)PAGE_SIZE, 409935fe098SMike Snitzer (unsigned long long)index << PAGE_SHIFT, 410935fe098SMike Snitzer ret); 411935fe098SMike Snitzer return ret; 412935fe098SMike Snitzer } 413935fe098SMike Snitzer 414935fe098SMike Snitzer /* 415935fe098SMike Snitzer * bitmap file superblock operations 416935fe098SMike Snitzer */ 417935fe098SMike Snitzer 418935fe098SMike Snitzer /* 4195339178eSChristoph Hellwig * write out a page to a file 4205339178eSChristoph Hellwig */ 4215339178eSChristoph Hellwig static void write_page(struct bitmap *bitmap, struct page *page, int wait) 4225339178eSChristoph Hellwig { 4235339178eSChristoph Hellwig if (bitmap->storage.file) 4245339178eSChristoph Hellwig write_file_page(bitmap, page, wait); 4255339178eSChristoph Hellwig else 4265339178eSChristoph Hellwig write_sb_page(bitmap, page, wait); 4275339178eSChristoph Hellwig } 4285339178eSChristoph Hellwig 4295339178eSChristoph Hellwig /* 430e64e4018SAndy Shevchenko * md_bitmap_wait_writes() should be called before writing any bitmap 431935fe098SMike Snitzer * blocks, to ensure previous writes, particularly from 432e64e4018SAndy Shevchenko * md_bitmap_daemon_work(), have completed. 433935fe098SMike Snitzer */ 434e64e4018SAndy Shevchenko static void md_bitmap_wait_writes(struct bitmap *bitmap) 435935fe098SMike Snitzer { 436935fe098SMike Snitzer if (bitmap->storage.file) 437935fe098SMike Snitzer wait_event(bitmap->write_wait, 438935fe098SMike Snitzer atomic_read(&bitmap->pending_writes)==0); 439935fe098SMike Snitzer else 440935fe098SMike Snitzer /* Note that we ignore the return value. The writes 441935fe098SMike Snitzer * might have failed, but that would just mean that 442935fe098SMike Snitzer * some bits which should be cleared haven't been, 443935fe098SMike Snitzer * which is safe. The relevant bitmap blocks will 444935fe098SMike Snitzer * probably get written again, but there is no great 445935fe098SMike Snitzer * loss if they aren't. 446935fe098SMike Snitzer */ 447935fe098SMike Snitzer md_super_wait(bitmap->mddev); 448935fe098SMike Snitzer } 449935fe098SMike Snitzer 450935fe098SMike Snitzer 451935fe098SMike Snitzer /* update the event counter and sync the superblock to disk */ 452e64e4018SAndy Shevchenko void md_bitmap_update_sb(struct bitmap *bitmap) 453935fe098SMike Snitzer { 454935fe098SMike Snitzer bitmap_super_t *sb; 455935fe098SMike Snitzer 456935fe098SMike Snitzer if (!bitmap || !bitmap->mddev) /* no bitmap for this array */ 457935fe098SMike Snitzer return; 458935fe098SMike Snitzer if (bitmap->mddev->bitmap_info.external) 459935fe098SMike Snitzer return; 460935fe098SMike Snitzer if (!bitmap->storage.sb_page) /* no superblock */ 461935fe098SMike Snitzer return; 462935fe098SMike Snitzer sb = kmap_atomic(bitmap->storage.sb_page); 463935fe098SMike Snitzer sb->events = cpu_to_le64(bitmap->mddev->events); 464935fe098SMike Snitzer if (bitmap->mddev->events < bitmap->events_cleared) 465935fe098SMike Snitzer /* rocking back to read-only */ 466935fe098SMike Snitzer bitmap->events_cleared = bitmap->mddev->events; 467935fe098SMike Snitzer sb->events_cleared = cpu_to_le64(bitmap->events_cleared); 46897f0eb9fSHou Tao /* 46997f0eb9fSHou Tao * clear BITMAP_WRITE_ERROR bit to protect against the case that 47097f0eb9fSHou Tao * a bitmap write error occurred but the later writes succeeded. 47197f0eb9fSHou Tao */ 47297f0eb9fSHou Tao sb->state = cpu_to_le32(bitmap->flags & ~BIT(BITMAP_WRITE_ERROR)); 473935fe098SMike Snitzer /* Just in case these have been changed via sysfs: */ 474935fe098SMike Snitzer sb->daemon_sleep = cpu_to_le32(bitmap->mddev->bitmap_info.daemon_sleep/HZ); 475935fe098SMike Snitzer sb->write_behind = cpu_to_le32(bitmap->mddev->bitmap_info.max_write_behind); 476935fe098SMike Snitzer /* This might have been changed by a reshape */ 477935fe098SMike Snitzer sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors); 478935fe098SMike Snitzer sb->chunksize = cpu_to_le32(bitmap->mddev->bitmap_info.chunksize); 479935fe098SMike Snitzer sb->nodes = cpu_to_le32(bitmap->mddev->bitmap_info.nodes); 480935fe098SMike Snitzer sb->sectors_reserved = cpu_to_le32(bitmap->mddev-> 481935fe098SMike Snitzer bitmap_info.space); 482935fe098SMike Snitzer kunmap_atomic(sb); 483935fe098SMike Snitzer write_page(bitmap, bitmap->storage.sb_page, 1); 484935fe098SMike Snitzer } 485e64e4018SAndy Shevchenko EXPORT_SYMBOL(md_bitmap_update_sb); 486935fe098SMike Snitzer 487935fe098SMike Snitzer /* print out the bitmap file superblock */ 488e64e4018SAndy Shevchenko void md_bitmap_print_sb(struct bitmap *bitmap) 489935fe098SMike Snitzer { 490935fe098SMike Snitzer bitmap_super_t *sb; 491935fe098SMike Snitzer 492935fe098SMike Snitzer if (!bitmap || !bitmap->storage.sb_page) 493935fe098SMike Snitzer return; 494935fe098SMike Snitzer sb = kmap_atomic(bitmap->storage.sb_page); 495935fe098SMike Snitzer pr_debug("%s: bitmap file superblock:\n", bmname(bitmap)); 496935fe098SMike Snitzer pr_debug(" magic: %08x\n", le32_to_cpu(sb->magic)); 49745552111SFlorian-Ewald Mueller pr_debug(" version: %u\n", le32_to_cpu(sb->version)); 498935fe098SMike Snitzer pr_debug(" uuid: %08x.%08x.%08x.%08x\n", 499c35403f8SChristoph Hellwig le32_to_cpu(*(__le32 *)(sb->uuid+0)), 500c35403f8SChristoph Hellwig le32_to_cpu(*(__le32 *)(sb->uuid+4)), 501c35403f8SChristoph Hellwig le32_to_cpu(*(__le32 *)(sb->uuid+8)), 502c35403f8SChristoph Hellwig le32_to_cpu(*(__le32 *)(sb->uuid+12))); 503935fe098SMike Snitzer pr_debug(" events: %llu\n", 504935fe098SMike Snitzer (unsigned long long) le64_to_cpu(sb->events)); 505935fe098SMike Snitzer pr_debug("events cleared: %llu\n", 506935fe098SMike Snitzer (unsigned long long) le64_to_cpu(sb->events_cleared)); 507935fe098SMike Snitzer pr_debug(" state: %08x\n", le32_to_cpu(sb->state)); 50845552111SFlorian-Ewald Mueller pr_debug(" chunksize: %u B\n", le32_to_cpu(sb->chunksize)); 50945552111SFlorian-Ewald Mueller pr_debug(" daemon sleep: %us\n", le32_to_cpu(sb->daemon_sleep)); 510935fe098SMike Snitzer pr_debug(" sync size: %llu KB\n", 511935fe098SMike Snitzer (unsigned long long)le64_to_cpu(sb->sync_size)/2); 51245552111SFlorian-Ewald Mueller pr_debug("max write behind: %u\n", le32_to_cpu(sb->write_behind)); 513935fe098SMike Snitzer kunmap_atomic(sb); 514935fe098SMike Snitzer } 515935fe098SMike Snitzer 516935fe098SMike Snitzer /* 517935fe098SMike Snitzer * bitmap_new_disk_sb 518935fe098SMike Snitzer * @bitmap 519935fe098SMike Snitzer * 520935fe098SMike Snitzer * This function is somewhat the reverse of bitmap_read_sb. bitmap_read_sb 521935fe098SMike Snitzer * reads and verifies the on-disk bitmap superblock and populates bitmap_info. 522935fe098SMike Snitzer * This function verifies 'bitmap_info' and populates the on-disk bitmap 523935fe098SMike Snitzer * structure, which is to be written to disk. 524935fe098SMike Snitzer * 525935fe098SMike Snitzer * Returns: 0 on success, -Exxx on error 526935fe098SMike Snitzer */ 527e64e4018SAndy Shevchenko static int md_bitmap_new_disk_sb(struct bitmap *bitmap) 528935fe098SMike Snitzer { 529935fe098SMike Snitzer bitmap_super_t *sb; 530935fe098SMike Snitzer unsigned long chunksize, daemon_sleep, write_behind; 531935fe098SMike Snitzer 532935fe098SMike Snitzer bitmap->storage.sb_page = alloc_page(GFP_KERNEL | __GFP_ZERO); 533935fe098SMike Snitzer if (bitmap->storage.sb_page == NULL) 534935fe098SMike Snitzer return -ENOMEM; 535935fe098SMike Snitzer bitmap->storage.sb_page->index = 0; 536935fe098SMike Snitzer 537935fe098SMike Snitzer sb = kmap_atomic(bitmap->storage.sb_page); 538935fe098SMike Snitzer 539935fe098SMike Snitzer sb->magic = cpu_to_le32(BITMAP_MAGIC); 540935fe098SMike Snitzer sb->version = cpu_to_le32(BITMAP_MAJOR_HI); 541935fe098SMike Snitzer 542935fe098SMike Snitzer chunksize = bitmap->mddev->bitmap_info.chunksize; 543935fe098SMike Snitzer BUG_ON(!chunksize); 544935fe098SMike Snitzer if (!is_power_of_2(chunksize)) { 545935fe098SMike Snitzer kunmap_atomic(sb); 546935fe098SMike Snitzer pr_warn("bitmap chunksize not a power of 2\n"); 547935fe098SMike Snitzer return -EINVAL; 548935fe098SMike Snitzer } 549935fe098SMike Snitzer sb->chunksize = cpu_to_le32(chunksize); 550935fe098SMike Snitzer 551935fe098SMike Snitzer daemon_sleep = bitmap->mddev->bitmap_info.daemon_sleep; 552935fe098SMike Snitzer if (!daemon_sleep || (daemon_sleep > MAX_SCHEDULE_TIMEOUT)) { 553935fe098SMike Snitzer pr_debug("Choosing daemon_sleep default (5 sec)\n"); 554935fe098SMike Snitzer daemon_sleep = 5 * HZ; 555935fe098SMike Snitzer } 556935fe098SMike Snitzer sb->daemon_sleep = cpu_to_le32(daemon_sleep); 557935fe098SMike Snitzer bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep; 558935fe098SMike Snitzer 559935fe098SMike Snitzer /* 560935fe098SMike Snitzer * FIXME: write_behind for RAID1. If not specified, what 561935fe098SMike Snitzer * is a good choice? We choose COUNTER_MAX / 2 arbitrarily. 562935fe098SMike Snitzer */ 563935fe098SMike Snitzer write_behind = bitmap->mddev->bitmap_info.max_write_behind; 564935fe098SMike Snitzer if (write_behind > COUNTER_MAX) 565935fe098SMike Snitzer write_behind = COUNTER_MAX / 2; 566935fe098SMike Snitzer sb->write_behind = cpu_to_le32(write_behind); 567935fe098SMike Snitzer bitmap->mddev->bitmap_info.max_write_behind = write_behind; 568935fe098SMike Snitzer 569935fe098SMike Snitzer /* keep the array size field of the bitmap superblock up to date */ 570935fe098SMike Snitzer sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors); 571935fe098SMike Snitzer 572935fe098SMike Snitzer memcpy(sb->uuid, bitmap->mddev->uuid, 16); 573935fe098SMike Snitzer 574935fe098SMike Snitzer set_bit(BITMAP_STALE, &bitmap->flags); 575935fe098SMike Snitzer sb->state = cpu_to_le32(bitmap->flags); 576935fe098SMike Snitzer bitmap->events_cleared = bitmap->mddev->events; 577935fe098SMike Snitzer sb->events_cleared = cpu_to_le64(bitmap->mddev->events); 578935fe098SMike Snitzer bitmap->mddev->bitmap_info.nodes = 0; 579935fe098SMike Snitzer 580935fe098SMike Snitzer kunmap_atomic(sb); 581935fe098SMike Snitzer 582935fe098SMike Snitzer return 0; 583935fe098SMike Snitzer } 584935fe098SMike Snitzer 585935fe098SMike Snitzer /* read the superblock from the bitmap file and initialize some bitmap fields */ 586e64e4018SAndy Shevchenko static int md_bitmap_read_sb(struct bitmap *bitmap) 587935fe098SMike Snitzer { 588935fe098SMike Snitzer char *reason = NULL; 589935fe098SMike Snitzer bitmap_super_t *sb; 590935fe098SMike Snitzer unsigned long chunksize, daemon_sleep, write_behind; 591935fe098SMike Snitzer unsigned long long events; 592935fe098SMike Snitzer int nodes = 0; 593935fe098SMike Snitzer unsigned long sectors_reserved = 0; 594935fe098SMike Snitzer int err = -EINVAL; 595935fe098SMike Snitzer struct page *sb_page; 596935fe098SMike Snitzer loff_t offset = bitmap->mddev->bitmap_info.offset; 597935fe098SMike Snitzer 598935fe098SMike Snitzer if (!bitmap->storage.file && !bitmap->mddev->bitmap_info.offset) { 599935fe098SMike Snitzer chunksize = 128 * 1024 * 1024; 600935fe098SMike Snitzer daemon_sleep = 5 * HZ; 601935fe098SMike Snitzer write_behind = 0; 602935fe098SMike Snitzer set_bit(BITMAP_STALE, &bitmap->flags); 603935fe098SMike Snitzer err = 0; 604935fe098SMike Snitzer goto out_no_sb; 605935fe098SMike Snitzer } 606935fe098SMike Snitzer /* page 0 is the superblock, read it... */ 607935fe098SMike Snitzer sb_page = alloc_page(GFP_KERNEL); 608935fe098SMike Snitzer if (!sb_page) 609935fe098SMike Snitzer return -ENOMEM; 610935fe098SMike Snitzer bitmap->storage.sb_page = sb_page; 611935fe098SMike Snitzer 612935fe098SMike Snitzer re_read: 613935fe098SMike Snitzer /* If cluster_slot is set, the cluster is setup */ 614935fe098SMike Snitzer if (bitmap->cluster_slot >= 0) { 615935fe098SMike Snitzer sector_t bm_blocks = bitmap->mddev->resync_max_sectors; 616935fe098SMike Snitzer 617a913096dSZhao Heming bm_blocks = DIV_ROUND_UP_SECTOR_T(bm_blocks, 618a913096dSZhao Heming (bitmap->mddev->bitmap_info.chunksize >> 9)); 619935fe098SMike Snitzer /* bits to bytes */ 620935fe098SMike Snitzer bm_blocks = ((bm_blocks+7) >> 3) + sizeof(bitmap_super_t); 621935fe098SMike Snitzer /* to 4k blocks */ 622935fe098SMike Snitzer bm_blocks = DIV_ROUND_UP_SECTOR_T(bm_blocks, 4096); 623935fe098SMike Snitzer offset = bitmap->mddev->bitmap_info.offset + (bitmap->cluster_slot * (bm_blocks << 3)); 624935fe098SMike Snitzer pr_debug("%s:%d bm slot: %d offset: %llu\n", __func__, __LINE__, 625935fe098SMike Snitzer bitmap->cluster_slot, offset); 626935fe098SMike Snitzer } 627935fe098SMike Snitzer 628935fe098SMike Snitzer if (bitmap->storage.file) { 629935fe098SMike Snitzer loff_t isize = i_size_read(bitmap->storage.file->f_mapping->host); 630935fe098SMike Snitzer int bytes = isize > PAGE_SIZE ? PAGE_SIZE : isize; 631935fe098SMike Snitzer 632d681054cSChristoph Hellwig err = read_file_page(bitmap->storage.file, 0, 633935fe098SMike Snitzer bitmap, bytes, sb_page); 634935fe098SMike Snitzer } else { 635935fe098SMike Snitzer err = read_sb_page(bitmap->mddev, 636935fe098SMike Snitzer offset, 637935fe098SMike Snitzer sb_page, 638935fe098SMike Snitzer 0, sizeof(bitmap_super_t)); 639935fe098SMike Snitzer } 640935fe098SMike Snitzer if (err) 641935fe098SMike Snitzer return err; 642935fe098SMike Snitzer 643935fe098SMike Snitzer err = -EINVAL; 644935fe098SMike Snitzer sb = kmap_atomic(sb_page); 645935fe098SMike Snitzer 646935fe098SMike Snitzer chunksize = le32_to_cpu(sb->chunksize); 647935fe098SMike Snitzer daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ; 648935fe098SMike Snitzer write_behind = le32_to_cpu(sb->write_behind); 649935fe098SMike Snitzer sectors_reserved = le32_to_cpu(sb->sectors_reserved); 650935fe098SMike Snitzer 651935fe098SMike Snitzer /* verify that the bitmap-specific fields are valid */ 652935fe098SMike Snitzer if (sb->magic != cpu_to_le32(BITMAP_MAGIC)) 653935fe098SMike Snitzer reason = "bad magic"; 654935fe098SMike Snitzer else if (le32_to_cpu(sb->version) < BITMAP_MAJOR_LO || 655935fe098SMike Snitzer le32_to_cpu(sb->version) > BITMAP_MAJOR_CLUSTERED) 656935fe098SMike Snitzer reason = "unrecognized superblock version"; 657935fe098SMike Snitzer else if (chunksize < 512) 658935fe098SMike Snitzer reason = "bitmap chunksize too small"; 659935fe098SMike Snitzer else if (!is_power_of_2(chunksize)) 660935fe098SMike Snitzer reason = "bitmap chunksize not a power of 2"; 661935fe098SMike Snitzer else if (daemon_sleep < 1 || daemon_sleep > MAX_SCHEDULE_TIMEOUT) 662935fe098SMike Snitzer reason = "daemon sleep period out of range"; 663935fe098SMike Snitzer else if (write_behind > COUNTER_MAX) 664935fe098SMike Snitzer reason = "write-behind limit out of range (0 - 16383)"; 665935fe098SMike Snitzer if (reason) { 666935fe098SMike Snitzer pr_warn("%s: invalid bitmap file superblock: %s\n", 667935fe098SMike Snitzer bmname(bitmap), reason); 668935fe098SMike Snitzer goto out; 669935fe098SMike Snitzer } 670935fe098SMike Snitzer 671e68cb83aSHeming Zhao /* 672e68cb83aSHeming Zhao * Setup nodes/clustername only if bitmap version is 673e68cb83aSHeming Zhao * cluster-compatible 674e68cb83aSHeming Zhao */ 675e68cb83aSHeming Zhao if (sb->version == cpu_to_le32(BITMAP_MAJOR_CLUSTERED)) { 676e68cb83aSHeming Zhao nodes = le32_to_cpu(sb->nodes); 67792d9aac9SHeming Zhao strscpy(bitmap->mddev->bitmap_info.cluster_name, 678e68cb83aSHeming Zhao sb->cluster_name, 64); 679e68cb83aSHeming Zhao } 680e68cb83aSHeming Zhao 681935fe098SMike Snitzer /* keep the array size field of the bitmap superblock up to date */ 682935fe098SMike Snitzer sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors); 683935fe098SMike Snitzer 684935fe098SMike Snitzer if (bitmap->mddev->persistent) { 685935fe098SMike Snitzer /* 686935fe098SMike Snitzer * We have a persistent array superblock, so compare the 687935fe098SMike Snitzer * bitmap's UUID and event counter to the mddev's 688935fe098SMike Snitzer */ 689935fe098SMike Snitzer if (memcmp(sb->uuid, bitmap->mddev->uuid, 16)) { 690935fe098SMike Snitzer pr_warn("%s: bitmap superblock UUID mismatch\n", 691935fe098SMike Snitzer bmname(bitmap)); 692935fe098SMike Snitzer goto out; 693935fe098SMike Snitzer } 694935fe098SMike Snitzer events = le64_to_cpu(sb->events); 695935fe098SMike Snitzer if (!nodes && (events < bitmap->mddev->events)) { 696935fe098SMike Snitzer pr_warn("%s: bitmap file is out of date (%llu < %llu) -- forcing full recovery\n", 697935fe098SMike Snitzer bmname(bitmap), events, 698935fe098SMike Snitzer (unsigned long long) bitmap->mddev->events); 699935fe098SMike Snitzer set_bit(BITMAP_STALE, &bitmap->flags); 700935fe098SMike Snitzer } 701935fe098SMike Snitzer } 702935fe098SMike Snitzer 703935fe098SMike Snitzer /* assign fields using values from superblock */ 704935fe098SMike Snitzer bitmap->flags |= le32_to_cpu(sb->state); 705935fe098SMike Snitzer if (le32_to_cpu(sb->version) == BITMAP_MAJOR_HOSTENDIAN) 706935fe098SMike Snitzer set_bit(BITMAP_HOSTENDIAN, &bitmap->flags); 707935fe098SMike Snitzer bitmap->events_cleared = le64_to_cpu(sb->events_cleared); 708935fe098SMike Snitzer err = 0; 709935fe098SMike Snitzer 710935fe098SMike Snitzer out: 711935fe098SMike Snitzer kunmap_atomic(sb); 712e68cb83aSHeming Zhao if (err == 0 && nodes && (bitmap->cluster_slot < 0)) { 713935fe098SMike Snitzer /* Assigning chunksize is required for "re_read" */ 714935fe098SMike Snitzer bitmap->mddev->bitmap_info.chunksize = chunksize; 715935fe098SMike Snitzer err = md_setup_cluster(bitmap->mddev, nodes); 716935fe098SMike Snitzer if (err) { 717935fe098SMike Snitzer pr_warn("%s: Could not setup cluster service (%d)\n", 718935fe098SMike Snitzer bmname(bitmap), err); 719935fe098SMike Snitzer goto out_no_sb; 720935fe098SMike Snitzer } 721935fe098SMike Snitzer bitmap->cluster_slot = md_cluster_ops->slot_number(bitmap->mddev); 722935fe098SMike Snitzer goto re_read; 723935fe098SMike Snitzer } 724935fe098SMike Snitzer 725935fe098SMike Snitzer out_no_sb: 726e68cb83aSHeming Zhao if (err == 0) { 727935fe098SMike Snitzer if (test_bit(BITMAP_STALE, &bitmap->flags)) 728935fe098SMike Snitzer bitmap->events_cleared = bitmap->mddev->events; 729935fe098SMike Snitzer bitmap->mddev->bitmap_info.chunksize = chunksize; 730935fe098SMike Snitzer bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep; 731935fe098SMike Snitzer bitmap->mddev->bitmap_info.max_write_behind = write_behind; 732935fe098SMike Snitzer bitmap->mddev->bitmap_info.nodes = nodes; 733935fe098SMike Snitzer if (bitmap->mddev->bitmap_info.space == 0 || 734935fe098SMike Snitzer bitmap->mddev->bitmap_info.space > sectors_reserved) 735935fe098SMike Snitzer bitmap->mddev->bitmap_info.space = sectors_reserved; 736e68cb83aSHeming Zhao } else { 737e64e4018SAndy Shevchenko md_bitmap_print_sb(bitmap); 738935fe098SMike Snitzer if (bitmap->cluster_slot < 0) 739935fe098SMike Snitzer md_cluster_stop(bitmap->mddev); 740935fe098SMike Snitzer } 741935fe098SMike Snitzer return err; 742935fe098SMike Snitzer } 743935fe098SMike Snitzer 744935fe098SMike Snitzer /* 745935fe098SMike Snitzer * general bitmap file operations 746935fe098SMike Snitzer */ 747935fe098SMike Snitzer 748935fe098SMike Snitzer /* 749935fe098SMike Snitzer * on-disk bitmap: 750935fe098SMike Snitzer * 751935fe098SMike Snitzer * Use one bit per "chunk" (block set). We do the disk I/O on the bitmap 752935fe098SMike Snitzer * file a page at a time. There's a superblock at the start of the file. 753935fe098SMike Snitzer */ 754935fe098SMike Snitzer /* calculate the index of the page that contains this bit */ 755935fe098SMike Snitzer static inline unsigned long file_page_index(struct bitmap_storage *store, 756935fe098SMike Snitzer unsigned long chunk) 757935fe098SMike Snitzer { 758935fe098SMike Snitzer if (store->sb_page) 759935fe098SMike Snitzer chunk += sizeof(bitmap_super_t) << 3; 760935fe098SMike Snitzer return chunk >> PAGE_BIT_SHIFT; 761935fe098SMike Snitzer } 762935fe098SMike Snitzer 763935fe098SMike Snitzer /* calculate the (bit) offset of this bit within a page */ 764935fe098SMike Snitzer static inline unsigned long file_page_offset(struct bitmap_storage *store, 765935fe098SMike Snitzer unsigned long chunk) 766935fe098SMike Snitzer { 767935fe098SMike Snitzer if (store->sb_page) 768935fe098SMike Snitzer chunk += sizeof(bitmap_super_t) << 3; 769935fe098SMike Snitzer return chunk & (PAGE_BITS - 1); 770935fe098SMike Snitzer } 771935fe098SMike Snitzer 772935fe098SMike Snitzer /* 773935fe098SMike Snitzer * return a pointer to the page in the filemap that contains the given bit 774935fe098SMike Snitzer * 775935fe098SMike Snitzer */ 776935fe098SMike Snitzer static inline struct page *filemap_get_page(struct bitmap_storage *store, 777935fe098SMike Snitzer unsigned long chunk) 778935fe098SMike Snitzer { 779935fe098SMike Snitzer if (file_page_index(store, chunk) >= store->file_pages) 780935fe098SMike Snitzer return NULL; 781935fe098SMike Snitzer return store->filemap[file_page_index(store, chunk)]; 782935fe098SMike Snitzer } 783935fe098SMike Snitzer 784e64e4018SAndy Shevchenko static int md_bitmap_storage_alloc(struct bitmap_storage *store, 785935fe098SMike Snitzer unsigned long chunks, int with_super, 786935fe098SMike Snitzer int slot_number) 787935fe098SMike Snitzer { 788935fe098SMike Snitzer int pnum, offset = 0; 789935fe098SMike Snitzer unsigned long num_pages; 790935fe098SMike Snitzer unsigned long bytes; 791935fe098SMike Snitzer 792935fe098SMike Snitzer bytes = DIV_ROUND_UP(chunks, 8); 793935fe098SMike Snitzer if (with_super) 794935fe098SMike Snitzer bytes += sizeof(bitmap_super_t); 795935fe098SMike Snitzer 796935fe098SMike Snitzer num_pages = DIV_ROUND_UP(bytes, PAGE_SIZE); 797935fe098SMike Snitzer offset = slot_number * num_pages; 798935fe098SMike Snitzer 7996da2ec56SKees Cook store->filemap = kmalloc_array(num_pages, sizeof(struct page *), 8006da2ec56SKees Cook GFP_KERNEL); 801935fe098SMike Snitzer if (!store->filemap) 802935fe098SMike Snitzer return -ENOMEM; 803935fe098SMike Snitzer 804935fe098SMike Snitzer if (with_super && !store->sb_page) { 805935fe098SMike Snitzer store->sb_page = alloc_page(GFP_KERNEL|__GFP_ZERO); 806935fe098SMike Snitzer if (store->sb_page == NULL) 807935fe098SMike Snitzer return -ENOMEM; 808935fe098SMike Snitzer } 809935fe098SMike Snitzer 810935fe098SMike Snitzer pnum = 0; 811935fe098SMike Snitzer if (store->sb_page) { 812935fe098SMike Snitzer store->filemap[0] = store->sb_page; 813935fe098SMike Snitzer pnum = 1; 814935fe098SMike Snitzer store->sb_page->index = offset; 815935fe098SMike Snitzer } 816935fe098SMike Snitzer 817935fe098SMike Snitzer for ( ; pnum < num_pages; pnum++) { 818935fe098SMike Snitzer store->filemap[pnum] = alloc_page(GFP_KERNEL|__GFP_ZERO); 819935fe098SMike Snitzer if (!store->filemap[pnum]) { 820935fe098SMike Snitzer store->file_pages = pnum; 821935fe098SMike Snitzer return -ENOMEM; 822935fe098SMike Snitzer } 823935fe098SMike Snitzer store->filemap[pnum]->index = pnum + offset; 824935fe098SMike Snitzer } 825935fe098SMike Snitzer store->file_pages = pnum; 826935fe098SMike Snitzer 827935fe098SMike Snitzer /* We need 4 bits per page, rounded up to a multiple 828935fe098SMike Snitzer * of sizeof(unsigned long) */ 829935fe098SMike Snitzer store->filemap_attr = kzalloc( 830935fe098SMike Snitzer roundup(DIV_ROUND_UP(num_pages*4, 8), sizeof(unsigned long)), 831935fe098SMike Snitzer GFP_KERNEL); 832935fe098SMike Snitzer if (!store->filemap_attr) 833935fe098SMike Snitzer return -ENOMEM; 834935fe098SMike Snitzer 835935fe098SMike Snitzer store->bytes = bytes; 836935fe098SMike Snitzer 837935fe098SMike Snitzer return 0; 838935fe098SMike Snitzer } 839935fe098SMike Snitzer 840e64e4018SAndy Shevchenko static void md_bitmap_file_unmap(struct bitmap_storage *store) 841935fe098SMike Snitzer { 842546ac0b2SChristoph Hellwig struct file *file = store->file; 843546ac0b2SChristoph Hellwig struct page *sb_page = store->sb_page; 844546ac0b2SChristoph Hellwig struct page **map = store->filemap; 845546ac0b2SChristoph Hellwig int pages = store->file_pages; 846935fe098SMike Snitzer 847935fe098SMike Snitzer while (pages--) 848935fe098SMike Snitzer if (map[pages] != sb_page) /* 0 is sb_page, release it below */ 849935fe098SMike Snitzer free_buffers(map[pages]); 850935fe098SMike Snitzer kfree(map); 851935fe098SMike Snitzer kfree(store->filemap_attr); 852935fe098SMike Snitzer 853935fe098SMike Snitzer if (sb_page) 854935fe098SMike Snitzer free_buffers(sb_page); 855935fe098SMike Snitzer 856935fe098SMike Snitzer if (file) { 857935fe098SMike Snitzer struct inode *inode = file_inode(file); 858935fe098SMike Snitzer invalidate_mapping_pages(inode->i_mapping, 0, -1); 859935fe098SMike Snitzer fput(file); 860935fe098SMike Snitzer } 861935fe098SMike Snitzer } 862935fe098SMike Snitzer 863935fe098SMike Snitzer /* 864935fe098SMike Snitzer * bitmap_file_kick - if an error occurs while manipulating the bitmap file 865935fe098SMike Snitzer * then it is no longer reliable, so we stop using it and we mark the file 866935fe098SMike Snitzer * as failed in the superblock 867935fe098SMike Snitzer */ 868e64e4018SAndy Shevchenko static void md_bitmap_file_kick(struct bitmap *bitmap) 869935fe098SMike Snitzer { 870935fe098SMike Snitzer if (!test_and_set_bit(BITMAP_STALE, &bitmap->flags)) { 871e64e4018SAndy Shevchenko md_bitmap_update_sb(bitmap); 872935fe098SMike Snitzer 873935fe098SMike Snitzer if (bitmap->storage.file) { 87492348518SChristoph Hellwig pr_warn("%s: kicking failed bitmap file %pD4 from array!\n", 87592348518SChristoph Hellwig bmname(bitmap), bitmap->storage.file); 876935fe098SMike Snitzer 877935fe098SMike Snitzer } else 878935fe098SMike Snitzer pr_warn("%s: disabling internal bitmap due to errors\n", 879935fe098SMike Snitzer bmname(bitmap)); 880935fe098SMike Snitzer } 881935fe098SMike Snitzer } 882935fe098SMike Snitzer 883935fe098SMike Snitzer enum bitmap_page_attr { 884935fe098SMike Snitzer BITMAP_PAGE_DIRTY = 0, /* there are set bits that need to be synced */ 885935fe098SMike Snitzer BITMAP_PAGE_PENDING = 1, /* there are bits that are being cleaned. 886935fe098SMike Snitzer * i.e. counter is 1 or 2. */ 887935fe098SMike Snitzer BITMAP_PAGE_NEEDWRITE = 2, /* there are cleared bits that need to be synced */ 888935fe098SMike Snitzer }; 889935fe098SMike Snitzer 890935fe098SMike Snitzer static inline void set_page_attr(struct bitmap *bitmap, int pnum, 891935fe098SMike Snitzer enum bitmap_page_attr attr) 892935fe098SMike Snitzer { 893935fe098SMike Snitzer set_bit((pnum<<2) + attr, bitmap->storage.filemap_attr); 894935fe098SMike Snitzer } 895935fe098SMike Snitzer 896935fe098SMike Snitzer static inline void clear_page_attr(struct bitmap *bitmap, int pnum, 897935fe098SMike Snitzer enum bitmap_page_attr attr) 898935fe098SMike Snitzer { 899935fe098SMike Snitzer clear_bit((pnum<<2) + attr, bitmap->storage.filemap_attr); 900935fe098SMike Snitzer } 901935fe098SMike Snitzer 902935fe098SMike Snitzer static inline int test_page_attr(struct bitmap *bitmap, int pnum, 903935fe098SMike Snitzer enum bitmap_page_attr attr) 904935fe098SMike Snitzer { 905935fe098SMike Snitzer return test_bit((pnum<<2) + attr, bitmap->storage.filemap_attr); 906935fe098SMike Snitzer } 907935fe098SMike Snitzer 908935fe098SMike Snitzer static inline int test_and_clear_page_attr(struct bitmap *bitmap, int pnum, 909935fe098SMike Snitzer enum bitmap_page_attr attr) 910935fe098SMike Snitzer { 911935fe098SMike Snitzer return test_and_clear_bit((pnum<<2) + attr, 912935fe098SMike Snitzer bitmap->storage.filemap_attr); 913935fe098SMike Snitzer } 914935fe098SMike Snitzer /* 915935fe098SMike Snitzer * bitmap_file_set_bit -- called before performing a write to the md device 916935fe098SMike Snitzer * to set (and eventually sync) a particular bit in the bitmap file 917935fe098SMike Snitzer * 918935fe098SMike Snitzer * we set the bit immediately, then we record the page number so that 919935fe098SMike Snitzer * when an unplug occurs, we can flush the dirty pages out to disk 920935fe098SMike Snitzer */ 921e64e4018SAndy Shevchenko static void md_bitmap_file_set_bit(struct bitmap *bitmap, sector_t block) 922935fe098SMike Snitzer { 923935fe098SMike Snitzer unsigned long bit; 924935fe098SMike Snitzer struct page *page; 925935fe098SMike Snitzer void *kaddr; 926935fe098SMike Snitzer unsigned long chunk = block >> bitmap->counts.chunkshift; 927935fe098SMike Snitzer struct bitmap_storage *store = &bitmap->storage; 928935fe098SMike Snitzer unsigned long node_offset = 0; 929935fe098SMike Snitzer 930935fe098SMike Snitzer if (mddev_is_clustered(bitmap->mddev)) 931935fe098SMike Snitzer node_offset = bitmap->cluster_slot * store->file_pages; 932935fe098SMike Snitzer 933935fe098SMike Snitzer page = filemap_get_page(&bitmap->storage, chunk); 934935fe098SMike Snitzer if (!page) 935935fe098SMike Snitzer return; 936935fe098SMike Snitzer bit = file_page_offset(&bitmap->storage, chunk); 937935fe098SMike Snitzer 938935fe098SMike Snitzer /* set the bit */ 939935fe098SMike Snitzer kaddr = kmap_atomic(page); 940935fe098SMike Snitzer if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags)) 941935fe098SMike Snitzer set_bit(bit, kaddr); 942935fe098SMike Snitzer else 943935fe098SMike Snitzer set_bit_le(bit, kaddr); 944935fe098SMike Snitzer kunmap_atomic(kaddr); 945935fe098SMike Snitzer pr_debug("set file bit %lu page %lu\n", bit, page->index); 946935fe098SMike Snitzer /* record page number so it gets flushed to disk when unplug occurs */ 947935fe098SMike Snitzer set_page_attr(bitmap, page->index - node_offset, BITMAP_PAGE_DIRTY); 948935fe098SMike Snitzer } 949935fe098SMike Snitzer 950e64e4018SAndy Shevchenko static void md_bitmap_file_clear_bit(struct bitmap *bitmap, sector_t block) 951935fe098SMike Snitzer { 952935fe098SMike Snitzer unsigned long bit; 953935fe098SMike Snitzer struct page *page; 954935fe098SMike Snitzer void *paddr; 955935fe098SMike Snitzer unsigned long chunk = block >> bitmap->counts.chunkshift; 956935fe098SMike Snitzer struct bitmap_storage *store = &bitmap->storage; 957935fe098SMike Snitzer unsigned long node_offset = 0; 958935fe098SMike Snitzer 959935fe098SMike Snitzer if (mddev_is_clustered(bitmap->mddev)) 960935fe098SMike Snitzer node_offset = bitmap->cluster_slot * store->file_pages; 961935fe098SMike Snitzer 962935fe098SMike Snitzer page = filemap_get_page(&bitmap->storage, chunk); 963935fe098SMike Snitzer if (!page) 964935fe098SMike Snitzer return; 965935fe098SMike Snitzer bit = file_page_offset(&bitmap->storage, chunk); 966935fe098SMike Snitzer paddr = kmap_atomic(page); 967935fe098SMike Snitzer if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags)) 968935fe098SMike Snitzer clear_bit(bit, paddr); 969935fe098SMike Snitzer else 970935fe098SMike Snitzer clear_bit_le(bit, paddr); 971935fe098SMike Snitzer kunmap_atomic(paddr); 972935fe098SMike Snitzer if (!test_page_attr(bitmap, page->index - node_offset, BITMAP_PAGE_NEEDWRITE)) { 973935fe098SMike Snitzer set_page_attr(bitmap, page->index - node_offset, BITMAP_PAGE_PENDING); 974935fe098SMike Snitzer bitmap->allclean = 0; 975935fe098SMike Snitzer } 976935fe098SMike Snitzer } 977935fe098SMike Snitzer 978e64e4018SAndy Shevchenko static int md_bitmap_file_test_bit(struct bitmap *bitmap, sector_t block) 979935fe098SMike Snitzer { 980935fe098SMike Snitzer unsigned long bit; 981935fe098SMike Snitzer struct page *page; 982935fe098SMike Snitzer void *paddr; 983935fe098SMike Snitzer unsigned long chunk = block >> bitmap->counts.chunkshift; 984935fe098SMike Snitzer int set = 0; 985935fe098SMike Snitzer 986935fe098SMike Snitzer page = filemap_get_page(&bitmap->storage, chunk); 987935fe098SMike Snitzer if (!page) 988935fe098SMike Snitzer return -EINVAL; 989935fe098SMike Snitzer bit = file_page_offset(&bitmap->storage, chunk); 990935fe098SMike Snitzer paddr = kmap_atomic(page); 991935fe098SMike Snitzer if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags)) 992935fe098SMike Snitzer set = test_bit(bit, paddr); 993935fe098SMike Snitzer else 994935fe098SMike Snitzer set = test_bit_le(bit, paddr); 995935fe098SMike Snitzer kunmap_atomic(paddr); 996935fe098SMike Snitzer return set; 997935fe098SMike Snitzer } 998935fe098SMike Snitzer 999935fe098SMike Snitzer /* this gets called when the md device is ready to unplug its underlying 1000935fe098SMike Snitzer * (slave) device queues -- before we let any writes go down, we need to 1001935fe098SMike Snitzer * sync the dirty pages of the bitmap file to disk */ 1002e64e4018SAndy Shevchenko void md_bitmap_unplug(struct bitmap *bitmap) 1003935fe098SMike Snitzer { 1004935fe098SMike Snitzer unsigned long i; 1005935fe098SMike Snitzer int dirty, need_write; 1006935fe098SMike Snitzer int writing = 0; 1007935fe098SMike Snitzer 10087db922baSYu Kuai if (!md_bitmap_enabled(bitmap)) 1009935fe098SMike Snitzer return; 1010935fe098SMike Snitzer 1011935fe098SMike Snitzer /* look at each page to see if there are any set bits that need to be 1012935fe098SMike Snitzer * flushed out to disk */ 1013935fe098SMike Snitzer for (i = 0; i < bitmap->storage.file_pages; i++) { 1014935fe098SMike Snitzer dirty = test_and_clear_page_attr(bitmap, i, BITMAP_PAGE_DIRTY); 1015935fe098SMike Snitzer need_write = test_and_clear_page_attr(bitmap, i, 1016935fe098SMike Snitzer BITMAP_PAGE_NEEDWRITE); 1017935fe098SMike Snitzer if (dirty || need_write) { 1018935fe098SMike Snitzer if (!writing) { 1019e64e4018SAndy Shevchenko md_bitmap_wait_writes(bitmap); 1020935fe098SMike Snitzer if (bitmap->mddev->queue) 1021935fe098SMike Snitzer blk_add_trace_msg(bitmap->mddev->queue, 1022935fe098SMike Snitzer "md bitmap_unplug"); 1023935fe098SMike Snitzer } 1024935fe098SMike Snitzer clear_page_attr(bitmap, i, BITMAP_PAGE_PENDING); 1025935fe098SMike Snitzer write_page(bitmap, bitmap->storage.filemap[i], 0); 1026935fe098SMike Snitzer writing = 1; 1027935fe098SMike Snitzer } 1028935fe098SMike Snitzer } 1029935fe098SMike Snitzer if (writing) 1030e64e4018SAndy Shevchenko md_bitmap_wait_writes(bitmap); 1031935fe098SMike Snitzer 1032935fe098SMike Snitzer if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags)) 1033e64e4018SAndy Shevchenko md_bitmap_file_kick(bitmap); 1034935fe098SMike Snitzer } 1035e64e4018SAndy Shevchenko EXPORT_SYMBOL(md_bitmap_unplug); 1036935fe098SMike Snitzer 1037a022325aSYu Kuai struct bitmap_unplug_work { 1038a022325aSYu Kuai struct work_struct work; 1039a022325aSYu Kuai struct bitmap *bitmap; 1040a022325aSYu Kuai struct completion *done; 1041a022325aSYu Kuai }; 1042a022325aSYu Kuai 1043a022325aSYu Kuai static void md_bitmap_unplug_fn(struct work_struct *work) 1044a022325aSYu Kuai { 1045a022325aSYu Kuai struct bitmap_unplug_work *unplug_work = 1046a022325aSYu Kuai container_of(work, struct bitmap_unplug_work, work); 1047a022325aSYu Kuai 1048a022325aSYu Kuai md_bitmap_unplug(unplug_work->bitmap); 1049a022325aSYu Kuai complete(unplug_work->done); 1050a022325aSYu Kuai } 1051a022325aSYu Kuai 1052a022325aSYu Kuai void md_bitmap_unplug_async(struct bitmap *bitmap) 1053a022325aSYu Kuai { 1054a022325aSYu Kuai DECLARE_COMPLETION_ONSTACK(done); 1055a022325aSYu Kuai struct bitmap_unplug_work unplug_work; 1056a022325aSYu Kuai 1057a022325aSYu Kuai INIT_WORK_ONSTACK(&unplug_work.work, md_bitmap_unplug_fn); 1058a022325aSYu Kuai unplug_work.bitmap = bitmap; 1059a022325aSYu Kuai unplug_work.done = &done; 1060a022325aSYu Kuai 1061a022325aSYu Kuai queue_work(md_bitmap_wq, &unplug_work.work); 1062a022325aSYu Kuai wait_for_completion(&done); 1063a022325aSYu Kuai } 1064a022325aSYu Kuai EXPORT_SYMBOL(md_bitmap_unplug_async); 1065a022325aSYu Kuai 1066e64e4018SAndy Shevchenko static void md_bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed); 1067844dc669SChristoph Hellwig 1068844dc669SChristoph Hellwig /* 1069844dc669SChristoph Hellwig * Initialize the in-memory bitmap from the on-disk bitmap and set up the memory 1070844dc669SChristoph Hellwig * mapping of the bitmap file. 1071844dc669SChristoph Hellwig * 1072844dc669SChristoph Hellwig * Special case: If there's no bitmap file, or if the bitmap file had been 1073844dc669SChristoph Hellwig * previously kicked from the array, we mark all the bits as 1's in order to 1074844dc669SChristoph Hellwig * cause a full resync. 1075935fe098SMike Snitzer * 1076935fe098SMike Snitzer * We ignore all bits for sectors that end earlier than 'start'. 1077844dc669SChristoph Hellwig * This is used when reading an out-of-date bitmap. 1078935fe098SMike Snitzer */ 1079e64e4018SAndy Shevchenko static int md_bitmap_init_from_disk(struct bitmap *bitmap, sector_t start) 1080935fe098SMike Snitzer { 1081844dc669SChristoph Hellwig bool outofdate = test_bit(BITMAP_STALE, &bitmap->flags); 1082844dc669SChristoph Hellwig struct mddev *mddev = bitmap->mddev; 1083844dc669SChristoph Hellwig unsigned long chunks = bitmap->counts.chunks; 1084935fe098SMike Snitzer struct bitmap_storage *store = &bitmap->storage; 1085844dc669SChristoph Hellwig struct file *file = store->file; 1086844dc669SChristoph Hellwig unsigned long node_offset = 0; 1087844dc669SChristoph Hellwig unsigned long bit_cnt = 0; 1088844dc669SChristoph Hellwig unsigned long i; 1089844dc669SChristoph Hellwig int ret; 1090935fe098SMike Snitzer 1091844dc669SChristoph Hellwig if (!file && !mddev->bitmap_info.offset) { 1092935fe098SMike Snitzer /* No permanent bitmap - fill with '1s'. */ 1093935fe098SMike Snitzer store->filemap = NULL; 1094935fe098SMike Snitzer store->file_pages = 0; 1095935fe098SMike Snitzer for (i = 0; i < chunks ; i++) { 1096935fe098SMike Snitzer /* if the disk bit is set, set the memory bit */ 1097935fe098SMike Snitzer int needed = ((sector_t)(i+1) << (bitmap->counts.chunkshift) 1098935fe098SMike Snitzer >= start); 1099e64e4018SAndy Shevchenko md_bitmap_set_memory_bits(bitmap, 1100935fe098SMike Snitzer (sector_t)i << bitmap->counts.chunkshift, 1101935fe098SMike Snitzer needed); 1102935fe098SMike Snitzer } 1103935fe098SMike Snitzer return 0; 1104935fe098SMike Snitzer } 1105935fe098SMike Snitzer 1106935fe098SMike Snitzer if (file && i_size_read(file->f_mapping->host) < store->bytes) { 1107935fe098SMike Snitzer pr_warn("%s: bitmap file too short %lu < %lu\n", 1108935fe098SMike Snitzer bmname(bitmap), 1109935fe098SMike Snitzer (unsigned long) i_size_read(file->f_mapping->host), 1110935fe098SMike Snitzer store->bytes); 1111844dc669SChristoph Hellwig ret = -ENOSPC; 1112935fe098SMike Snitzer goto err; 1113935fe098SMike Snitzer } 1114935fe098SMike Snitzer 1115844dc669SChristoph Hellwig if (mddev_is_clustered(mddev)) 1116935fe098SMike Snitzer node_offset = bitmap->cluster_slot * (DIV_ROUND_UP(store->bytes, PAGE_SIZE)); 1117935fe098SMike Snitzer 1118844dc669SChristoph Hellwig for (i = 0; i < store->file_pages; i++) { 1119844dc669SChristoph Hellwig struct page *page = store->filemap[i]; 1120935fe098SMike Snitzer int count; 1121844dc669SChristoph Hellwig 1122935fe098SMike Snitzer /* unmap the old page, we're done with it */ 1123844dc669SChristoph Hellwig if (i == store->file_pages - 1) 1124844dc669SChristoph Hellwig count = store->bytes - i * PAGE_SIZE; 1125935fe098SMike Snitzer else 1126935fe098SMike Snitzer count = PAGE_SIZE; 1127935fe098SMike Snitzer 1128844dc669SChristoph Hellwig if (file) 1129844dc669SChristoph Hellwig ret = read_file_page(file, i, bitmap, count, page); 1130844dc669SChristoph Hellwig else 1131844dc669SChristoph Hellwig ret = read_sb_page(mddev, mddev->bitmap_info.offset, 1132844dc669SChristoph Hellwig page, i + node_offset, count); 1133935fe098SMike Snitzer if (ret) 1134935fe098SMike Snitzer goto err; 1135844dc669SChristoph Hellwig } 1136935fe098SMike Snitzer 1137935fe098SMike Snitzer if (outofdate) { 1138844dc669SChristoph Hellwig pr_warn("%s: bitmap file is out of date, doing full recovery\n", 1139844dc669SChristoph Hellwig bmname(bitmap)); 1140844dc669SChristoph Hellwig 1141844dc669SChristoph Hellwig for (i = 0; i < store->file_pages; i++) { 1142844dc669SChristoph Hellwig struct page *page = store->filemap[i]; 1143844dc669SChristoph Hellwig unsigned long offset = 0; 1144844dc669SChristoph Hellwig void *paddr; 1145844dc669SChristoph Hellwig 1146844dc669SChristoph Hellwig if (i == 0 && !mddev->bitmap_info.external) 1147844dc669SChristoph Hellwig offset = sizeof(bitmap_super_t); 1148844dc669SChristoph Hellwig 1149935fe098SMike Snitzer /* 1150844dc669SChristoph Hellwig * If the bitmap is out of date, dirty the whole page 1151844dc669SChristoph Hellwig * and write it out 1152935fe098SMike Snitzer */ 1153935fe098SMike Snitzer paddr = kmap_atomic(page); 1154844dc669SChristoph Hellwig memset(paddr + offset, 0xff, PAGE_SIZE - offset); 1155935fe098SMike Snitzer kunmap_atomic(paddr); 1156935fe098SMike Snitzer 1157844dc669SChristoph Hellwig write_page(bitmap, page, 1); 1158844dc669SChristoph Hellwig if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags)) { 1159935fe098SMike Snitzer ret = -EIO; 1160935fe098SMike Snitzer goto err; 1161935fe098SMike Snitzer } 1162935fe098SMike Snitzer } 1163844dc669SChristoph Hellwig } 1164844dc669SChristoph Hellwig 1165844dc669SChristoph Hellwig for (i = 0; i < chunks; i++) { 1166844dc669SChristoph Hellwig struct page *page = filemap_get_page(&bitmap->storage, i); 1167844dc669SChristoph Hellwig unsigned long bit = file_page_offset(&bitmap->storage, i); 1168844dc669SChristoph Hellwig void *paddr; 1169844dc669SChristoph Hellwig bool was_set; 1170844dc669SChristoph Hellwig 1171935fe098SMike Snitzer paddr = kmap_atomic(page); 1172935fe098SMike Snitzer if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags)) 1173844dc669SChristoph Hellwig was_set = test_bit(bit, paddr); 1174935fe098SMike Snitzer else 1175844dc669SChristoph Hellwig was_set = test_bit_le(bit, paddr); 1176935fe098SMike Snitzer kunmap_atomic(paddr); 1177844dc669SChristoph Hellwig 1178844dc669SChristoph Hellwig if (was_set) { 1179935fe098SMike Snitzer /* if the disk bit is set, set the memory bit */ 1180935fe098SMike Snitzer int needed = ((sector_t)(i+1) << bitmap->counts.chunkshift 1181935fe098SMike Snitzer >= start); 1182e64e4018SAndy Shevchenko md_bitmap_set_memory_bits(bitmap, 1183935fe098SMike Snitzer (sector_t)i << bitmap->counts.chunkshift, 1184935fe098SMike Snitzer needed); 1185935fe098SMike Snitzer bit_cnt++; 1186935fe098SMike Snitzer } 1187935fe098SMike Snitzer } 1188935fe098SMike Snitzer 1189935fe098SMike Snitzer pr_debug("%s: bitmap initialized from disk: read %lu pages, set %lu of %lu bits\n", 1190935fe098SMike Snitzer bmname(bitmap), store->file_pages, 1191935fe098SMike Snitzer bit_cnt, chunks); 1192935fe098SMike Snitzer 1193935fe098SMike Snitzer return 0; 1194935fe098SMike Snitzer 1195935fe098SMike Snitzer err: 1196935fe098SMike Snitzer pr_warn("%s: bitmap initialisation failed: %d\n", 1197935fe098SMike Snitzer bmname(bitmap), ret); 1198935fe098SMike Snitzer return ret; 1199935fe098SMike Snitzer } 1200935fe098SMike Snitzer 1201e64e4018SAndy Shevchenko void md_bitmap_write_all(struct bitmap *bitmap) 1202935fe098SMike Snitzer { 1203935fe098SMike Snitzer /* We don't actually write all bitmap blocks here, 1204935fe098SMike Snitzer * just flag them as needing to be written 1205935fe098SMike Snitzer */ 1206935fe098SMike Snitzer int i; 1207935fe098SMike Snitzer 1208935fe098SMike Snitzer if (!bitmap || !bitmap->storage.filemap) 1209935fe098SMike Snitzer return; 1210935fe098SMike Snitzer if (bitmap->storage.file) 1211935fe098SMike Snitzer /* Only one copy, so nothing needed */ 1212935fe098SMike Snitzer return; 1213935fe098SMike Snitzer 1214935fe098SMike Snitzer for (i = 0; i < bitmap->storage.file_pages; i++) 1215935fe098SMike Snitzer set_page_attr(bitmap, i, 1216935fe098SMike Snitzer BITMAP_PAGE_NEEDWRITE); 1217935fe098SMike Snitzer bitmap->allclean = 0; 1218935fe098SMike Snitzer } 1219935fe098SMike Snitzer 1220e64e4018SAndy Shevchenko static void md_bitmap_count_page(struct bitmap_counts *bitmap, 1221935fe098SMike Snitzer sector_t offset, int inc) 1222935fe098SMike Snitzer { 1223935fe098SMike Snitzer sector_t chunk = offset >> bitmap->chunkshift; 1224935fe098SMike Snitzer unsigned long page = chunk >> PAGE_COUNTER_SHIFT; 1225935fe098SMike Snitzer bitmap->bp[page].count += inc; 1226e64e4018SAndy Shevchenko md_bitmap_checkfree(bitmap, page); 1227935fe098SMike Snitzer } 1228935fe098SMike Snitzer 1229e64e4018SAndy Shevchenko static void md_bitmap_set_pending(struct bitmap_counts *bitmap, sector_t offset) 1230935fe098SMike Snitzer { 1231935fe098SMike Snitzer sector_t chunk = offset >> bitmap->chunkshift; 1232935fe098SMike Snitzer unsigned long page = chunk >> PAGE_COUNTER_SHIFT; 1233935fe098SMike Snitzer struct bitmap_page *bp = &bitmap->bp[page]; 1234935fe098SMike Snitzer 1235935fe098SMike Snitzer if (!bp->pending) 1236935fe098SMike Snitzer bp->pending = 1; 1237935fe098SMike Snitzer } 1238935fe098SMike Snitzer 1239e64e4018SAndy Shevchenko static bitmap_counter_t *md_bitmap_get_counter(struct bitmap_counts *bitmap, 1240935fe098SMike Snitzer sector_t offset, sector_t *blocks, 1241935fe098SMike Snitzer int create); 1242935fe098SMike Snitzer 12434eeb6535SYu Kuai static void mddev_set_timeout(struct mddev *mddev, unsigned long timeout, 12444eeb6535SYu Kuai bool force) 12454eeb6535SYu Kuai { 124644693154SYu Kuai struct md_thread *thread; 124744693154SYu Kuai 124844693154SYu Kuai rcu_read_lock(); 124944693154SYu Kuai thread = rcu_dereference(mddev->thread); 12504eeb6535SYu Kuai 12514eeb6535SYu Kuai if (!thread) 125244693154SYu Kuai goto out; 12534eeb6535SYu Kuai 12544eeb6535SYu Kuai if (force || thread->timeout < MAX_SCHEDULE_TIMEOUT) 12554eeb6535SYu Kuai thread->timeout = timeout; 125644693154SYu Kuai 125744693154SYu Kuai out: 125844693154SYu Kuai rcu_read_unlock(); 12594eeb6535SYu Kuai } 12604eeb6535SYu Kuai 1261935fe098SMike Snitzer /* 1262935fe098SMike Snitzer * bitmap daemon -- periodically wakes up to clean bits and flush pages 1263935fe098SMike Snitzer * out to disk 1264935fe098SMike Snitzer */ 1265e64e4018SAndy Shevchenko void md_bitmap_daemon_work(struct mddev *mddev) 1266935fe098SMike Snitzer { 1267935fe098SMike Snitzer struct bitmap *bitmap; 1268935fe098SMike Snitzer unsigned long j; 1269935fe098SMike Snitzer unsigned long nextpage; 1270935fe098SMike Snitzer sector_t blocks; 1271935fe098SMike Snitzer struct bitmap_counts *counts; 1272935fe098SMike Snitzer 1273935fe098SMike Snitzer /* Use a mutex to guard daemon_work against 1274935fe098SMike Snitzer * bitmap_destroy. 1275935fe098SMike Snitzer */ 1276935fe098SMike Snitzer mutex_lock(&mddev->bitmap_info.mutex); 1277935fe098SMike Snitzer bitmap = mddev->bitmap; 1278935fe098SMike Snitzer if (bitmap == NULL) { 1279935fe098SMike Snitzer mutex_unlock(&mddev->bitmap_info.mutex); 1280935fe098SMike Snitzer return; 1281935fe098SMike Snitzer } 1282935fe098SMike Snitzer if (time_before(jiffies, bitmap->daemon_lastrun 1283935fe098SMike Snitzer + mddev->bitmap_info.daemon_sleep)) 1284935fe098SMike Snitzer goto done; 1285935fe098SMike Snitzer 1286935fe098SMike Snitzer bitmap->daemon_lastrun = jiffies; 1287935fe098SMike Snitzer if (bitmap->allclean) { 12884eeb6535SYu Kuai mddev_set_timeout(mddev, MAX_SCHEDULE_TIMEOUT, true); 1289935fe098SMike Snitzer goto done; 1290935fe098SMike Snitzer } 1291935fe098SMike Snitzer bitmap->allclean = 1; 1292935fe098SMike Snitzer 1293935fe098SMike Snitzer if (bitmap->mddev->queue) 1294935fe098SMike Snitzer blk_add_trace_msg(bitmap->mddev->queue, 1295935fe098SMike Snitzer "md bitmap_daemon_work"); 1296935fe098SMike Snitzer 1297935fe098SMike Snitzer /* Any file-page which is PENDING now needs to be written. 1298935fe098SMike Snitzer * So set NEEDWRITE now, then after we make any last-minute changes 1299935fe098SMike Snitzer * we will write it. 1300935fe098SMike Snitzer */ 1301935fe098SMike Snitzer for (j = 0; j < bitmap->storage.file_pages; j++) 1302935fe098SMike Snitzer if (test_and_clear_page_attr(bitmap, j, 1303935fe098SMike Snitzer BITMAP_PAGE_PENDING)) 1304935fe098SMike Snitzer set_page_attr(bitmap, j, 1305935fe098SMike Snitzer BITMAP_PAGE_NEEDWRITE); 1306935fe098SMike Snitzer 1307935fe098SMike Snitzer if (bitmap->need_sync && 1308935fe098SMike Snitzer mddev->bitmap_info.external == 0) { 1309935fe098SMike Snitzer /* Arrange for superblock update as well as 1310935fe098SMike Snitzer * other changes */ 1311935fe098SMike Snitzer bitmap_super_t *sb; 1312935fe098SMike Snitzer bitmap->need_sync = 0; 1313935fe098SMike Snitzer if (bitmap->storage.filemap) { 1314935fe098SMike Snitzer sb = kmap_atomic(bitmap->storage.sb_page); 1315935fe098SMike Snitzer sb->events_cleared = 1316935fe098SMike Snitzer cpu_to_le64(bitmap->events_cleared); 1317935fe098SMike Snitzer kunmap_atomic(sb); 1318935fe098SMike Snitzer set_page_attr(bitmap, 0, 1319935fe098SMike Snitzer BITMAP_PAGE_NEEDWRITE); 1320935fe098SMike Snitzer } 1321935fe098SMike Snitzer } 1322935fe098SMike Snitzer /* Now look at the bitmap counters and if any are '2' or '1', 1323935fe098SMike Snitzer * decrement and handle accordingly. 1324935fe098SMike Snitzer */ 1325935fe098SMike Snitzer counts = &bitmap->counts; 1326935fe098SMike Snitzer spin_lock_irq(&counts->lock); 1327935fe098SMike Snitzer nextpage = 0; 1328935fe098SMike Snitzer for (j = 0; j < counts->chunks; j++) { 1329935fe098SMike Snitzer bitmap_counter_t *bmc; 1330935fe098SMike Snitzer sector_t block = (sector_t)j << counts->chunkshift; 1331935fe098SMike Snitzer 1332935fe098SMike Snitzer if (j == nextpage) { 1333935fe098SMike Snitzer nextpage += PAGE_COUNTER_RATIO; 1334935fe098SMike Snitzer if (!counts->bp[j >> PAGE_COUNTER_SHIFT].pending) { 1335935fe098SMike Snitzer j |= PAGE_COUNTER_MASK; 1336935fe098SMike Snitzer continue; 1337935fe098SMike Snitzer } 1338935fe098SMike Snitzer counts->bp[j >> PAGE_COUNTER_SHIFT].pending = 0; 1339935fe098SMike Snitzer } 1340935fe098SMike Snitzer 1341e64e4018SAndy Shevchenko bmc = md_bitmap_get_counter(counts, block, &blocks, 0); 1342935fe098SMike Snitzer if (!bmc) { 1343935fe098SMike Snitzer j |= PAGE_COUNTER_MASK; 1344935fe098SMike Snitzer continue; 1345935fe098SMike Snitzer } 1346935fe098SMike Snitzer if (*bmc == 1 && !bitmap->need_sync) { 1347935fe098SMike Snitzer /* We can clear the bit */ 1348935fe098SMike Snitzer *bmc = 0; 1349e64e4018SAndy Shevchenko md_bitmap_count_page(counts, block, -1); 1350e64e4018SAndy Shevchenko md_bitmap_file_clear_bit(bitmap, block); 1351935fe098SMike Snitzer } else if (*bmc && *bmc <= 2) { 1352935fe098SMike Snitzer *bmc = 1; 1353e64e4018SAndy Shevchenko md_bitmap_set_pending(counts, block); 1354935fe098SMike Snitzer bitmap->allclean = 0; 1355935fe098SMike Snitzer } 1356935fe098SMike Snitzer } 1357935fe098SMike Snitzer spin_unlock_irq(&counts->lock); 1358935fe098SMike Snitzer 1359e64e4018SAndy Shevchenko md_bitmap_wait_writes(bitmap); 1360935fe098SMike Snitzer /* Now start writeout on any page in NEEDWRITE that isn't DIRTY. 1361935fe098SMike Snitzer * DIRTY pages need to be written by bitmap_unplug so it can wait 1362935fe098SMike Snitzer * for them. 1363935fe098SMike Snitzer * If we find any DIRTY page we stop there and let bitmap_unplug 1364935fe098SMike Snitzer * handle all the rest. This is important in the case where 1365935fe098SMike Snitzer * the first blocking holds the superblock and it has been updated. 1366935fe098SMike Snitzer * We mustn't write any other blocks before the superblock. 1367935fe098SMike Snitzer */ 1368935fe098SMike Snitzer for (j = 0; 1369935fe098SMike Snitzer j < bitmap->storage.file_pages 1370935fe098SMike Snitzer && !test_bit(BITMAP_STALE, &bitmap->flags); 1371935fe098SMike Snitzer j++) { 1372935fe098SMike Snitzer if (test_page_attr(bitmap, j, 1373935fe098SMike Snitzer BITMAP_PAGE_DIRTY)) 1374935fe098SMike Snitzer /* bitmap_unplug will handle the rest */ 1375935fe098SMike Snitzer break; 137655180498SZhiqiang Liu if (bitmap->storage.filemap && 137755180498SZhiqiang Liu test_and_clear_page_attr(bitmap, j, 1378935fe098SMike Snitzer BITMAP_PAGE_NEEDWRITE)) { 1379935fe098SMike Snitzer write_page(bitmap, bitmap->storage.filemap[j], 0); 1380935fe098SMike Snitzer } 1381935fe098SMike Snitzer } 1382935fe098SMike Snitzer 1383935fe098SMike Snitzer done: 1384935fe098SMike Snitzer if (bitmap->allclean == 0) 13854eeb6535SYu Kuai mddev_set_timeout(mddev, mddev->bitmap_info.daemon_sleep, true); 1386935fe098SMike Snitzer mutex_unlock(&mddev->bitmap_info.mutex); 1387935fe098SMike Snitzer } 1388935fe098SMike Snitzer 1389e64e4018SAndy Shevchenko static bitmap_counter_t *md_bitmap_get_counter(struct bitmap_counts *bitmap, 1390935fe098SMike Snitzer sector_t offset, sector_t *blocks, 1391935fe098SMike Snitzer int create) 1392935fe098SMike Snitzer __releases(bitmap->lock) 1393935fe098SMike Snitzer __acquires(bitmap->lock) 1394935fe098SMike Snitzer { 1395935fe098SMike Snitzer /* If 'create', we might release the lock and reclaim it. 1396935fe098SMike Snitzer * The lock must have been taken with interrupts enabled. 1397935fe098SMike Snitzer * If !create, we don't release the lock. 1398935fe098SMike Snitzer */ 1399935fe098SMike Snitzer sector_t chunk = offset >> bitmap->chunkshift; 1400935fe098SMike Snitzer unsigned long page = chunk >> PAGE_COUNTER_SHIFT; 1401935fe098SMike Snitzer unsigned long pageoff = (chunk & PAGE_COUNTER_MASK) << COUNTER_BYTE_SHIFT; 1402935fe098SMike Snitzer sector_t csize; 1403935fe098SMike Snitzer int err; 1404935fe098SMike Snitzer 1405301867b1SLi Nan if (page >= bitmap->pages) { 1406301867b1SLi Nan /* 1407301867b1SLi Nan * This can happen if bitmap_start_sync goes beyond 1408301867b1SLi Nan * End-of-device while looking for a whole page or 1409301867b1SLi Nan * user set a huge number to sysfs bitmap_set_bits. 1410301867b1SLi Nan */ 1411301867b1SLi Nan return NULL; 1412301867b1SLi Nan } 1413e64e4018SAndy Shevchenko err = md_bitmap_checkpage(bitmap, page, create, 0); 1414935fe098SMike Snitzer 1415935fe098SMike Snitzer if (bitmap->bp[page].hijacked || 1416935fe098SMike Snitzer bitmap->bp[page].map == NULL) 1417935fe098SMike Snitzer csize = ((sector_t)1) << (bitmap->chunkshift + 1418d837f727SZhao Heming PAGE_COUNTER_SHIFT); 1419935fe098SMike Snitzer else 1420935fe098SMike Snitzer csize = ((sector_t)1) << bitmap->chunkshift; 1421935fe098SMike Snitzer *blocks = csize - (offset & (csize - 1)); 1422935fe098SMike Snitzer 1423935fe098SMike Snitzer if (err < 0) 1424935fe098SMike Snitzer return NULL; 1425935fe098SMike Snitzer 1426935fe098SMike Snitzer /* now locked ... */ 1427935fe098SMike Snitzer 1428935fe098SMike Snitzer if (bitmap->bp[page].hijacked) { /* hijacked pointer */ 1429935fe098SMike Snitzer /* should we use the first or second counter field 1430935fe098SMike Snitzer * of the hijacked pointer? */ 1431935fe098SMike Snitzer int hi = (pageoff > PAGE_COUNTER_MASK); 1432935fe098SMike Snitzer return &((bitmap_counter_t *) 1433935fe098SMike Snitzer &bitmap->bp[page].map)[hi]; 1434935fe098SMike Snitzer } else /* page is allocated */ 1435935fe098SMike Snitzer return (bitmap_counter_t *) 1436935fe098SMike Snitzer &(bitmap->bp[page].map[pageoff]); 1437935fe098SMike Snitzer } 1438935fe098SMike Snitzer 1439e64e4018SAndy Shevchenko int md_bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors, int behind) 1440935fe098SMike Snitzer { 1441935fe098SMike Snitzer if (!bitmap) 1442935fe098SMike Snitzer return 0; 1443935fe098SMike Snitzer 1444935fe098SMike Snitzer if (behind) { 1445935fe098SMike Snitzer int bw; 1446935fe098SMike Snitzer atomic_inc(&bitmap->behind_writes); 1447935fe098SMike Snitzer bw = atomic_read(&bitmap->behind_writes); 1448935fe098SMike Snitzer if (bw > bitmap->behind_writes_used) 1449935fe098SMike Snitzer bitmap->behind_writes_used = bw; 1450935fe098SMike Snitzer 1451935fe098SMike Snitzer pr_debug("inc write-behind count %d/%lu\n", 1452935fe098SMike Snitzer bw, bitmap->mddev->bitmap_info.max_write_behind); 1453935fe098SMike Snitzer } 1454935fe098SMike Snitzer 1455935fe098SMike Snitzer while (sectors) { 1456935fe098SMike Snitzer sector_t blocks; 1457935fe098SMike Snitzer bitmap_counter_t *bmc; 1458935fe098SMike Snitzer 1459935fe098SMike Snitzer spin_lock_irq(&bitmap->counts.lock); 1460e64e4018SAndy Shevchenko bmc = md_bitmap_get_counter(&bitmap->counts, offset, &blocks, 1); 1461935fe098SMike Snitzer if (!bmc) { 1462935fe098SMike Snitzer spin_unlock_irq(&bitmap->counts.lock); 1463935fe098SMike Snitzer return 0; 1464935fe098SMike Snitzer } 1465935fe098SMike Snitzer 1466935fe098SMike Snitzer if (unlikely(COUNTER(*bmc) == COUNTER_MAX)) { 1467935fe098SMike Snitzer DEFINE_WAIT(__wait); 1468935fe098SMike Snitzer /* note that it is safe to do the prepare_to_wait 1469935fe098SMike Snitzer * after the test as long as we do it before dropping 1470935fe098SMike Snitzer * the spinlock. 1471935fe098SMike Snitzer */ 1472935fe098SMike Snitzer prepare_to_wait(&bitmap->overflow_wait, &__wait, 1473935fe098SMike Snitzer TASK_UNINTERRUPTIBLE); 1474935fe098SMike Snitzer spin_unlock_irq(&bitmap->counts.lock); 1475935fe098SMike Snitzer schedule(); 1476935fe098SMike Snitzer finish_wait(&bitmap->overflow_wait, &__wait); 1477935fe098SMike Snitzer continue; 1478935fe098SMike Snitzer } 1479935fe098SMike Snitzer 1480935fe098SMike Snitzer switch (*bmc) { 1481935fe098SMike Snitzer case 0: 1482e64e4018SAndy Shevchenko md_bitmap_file_set_bit(bitmap, offset); 1483e64e4018SAndy Shevchenko md_bitmap_count_page(&bitmap->counts, offset, 1); 1484df561f66SGustavo A. R. Silva fallthrough; 1485935fe098SMike Snitzer case 1: 1486935fe098SMike Snitzer *bmc = 2; 1487935fe098SMike Snitzer } 1488935fe098SMike Snitzer 1489935fe098SMike Snitzer (*bmc)++; 1490935fe098SMike Snitzer 1491935fe098SMike Snitzer spin_unlock_irq(&bitmap->counts.lock); 1492935fe098SMike Snitzer 1493935fe098SMike Snitzer offset += blocks; 1494935fe098SMike Snitzer if (sectors > blocks) 1495935fe098SMike Snitzer sectors -= blocks; 1496935fe098SMike Snitzer else 1497935fe098SMike Snitzer sectors = 0; 1498935fe098SMike Snitzer } 1499935fe098SMike Snitzer return 0; 1500935fe098SMike Snitzer } 1501e64e4018SAndy Shevchenko EXPORT_SYMBOL(md_bitmap_startwrite); 1502935fe098SMike Snitzer 1503e64e4018SAndy Shevchenko void md_bitmap_endwrite(struct bitmap *bitmap, sector_t offset, 1504e64e4018SAndy Shevchenko unsigned long sectors, int success, int behind) 1505935fe098SMike Snitzer { 1506935fe098SMike Snitzer if (!bitmap) 1507935fe098SMike Snitzer return; 1508935fe098SMike Snitzer if (behind) { 1509935fe098SMike Snitzer if (atomic_dec_and_test(&bitmap->behind_writes)) 1510935fe098SMike Snitzer wake_up(&bitmap->behind_wait); 1511935fe098SMike Snitzer pr_debug("dec write-behind count %d/%lu\n", 1512935fe098SMike Snitzer atomic_read(&bitmap->behind_writes), 1513935fe098SMike Snitzer bitmap->mddev->bitmap_info.max_write_behind); 1514935fe098SMike Snitzer } 1515935fe098SMike Snitzer 1516935fe098SMike Snitzer while (sectors) { 1517935fe098SMike Snitzer sector_t blocks; 1518935fe098SMike Snitzer unsigned long flags; 1519935fe098SMike Snitzer bitmap_counter_t *bmc; 1520935fe098SMike Snitzer 1521935fe098SMike Snitzer spin_lock_irqsave(&bitmap->counts.lock, flags); 1522e64e4018SAndy Shevchenko bmc = md_bitmap_get_counter(&bitmap->counts, offset, &blocks, 0); 1523935fe098SMike Snitzer if (!bmc) { 1524935fe098SMike Snitzer spin_unlock_irqrestore(&bitmap->counts.lock, flags); 1525935fe098SMike Snitzer return; 1526935fe098SMike Snitzer } 1527935fe098SMike Snitzer 1528935fe098SMike Snitzer if (success && !bitmap->mddev->degraded && 1529935fe098SMike Snitzer bitmap->events_cleared < bitmap->mddev->events) { 1530935fe098SMike Snitzer bitmap->events_cleared = bitmap->mddev->events; 1531935fe098SMike Snitzer bitmap->need_sync = 1; 1532935fe098SMike Snitzer sysfs_notify_dirent_safe(bitmap->sysfs_can_clear); 1533935fe098SMike Snitzer } 1534935fe098SMike Snitzer 1535935fe098SMike Snitzer if (!success && !NEEDED(*bmc)) 1536935fe098SMike Snitzer *bmc |= NEEDED_MASK; 1537935fe098SMike Snitzer 1538935fe098SMike Snitzer if (COUNTER(*bmc) == COUNTER_MAX) 1539935fe098SMike Snitzer wake_up(&bitmap->overflow_wait); 1540935fe098SMike Snitzer 1541935fe098SMike Snitzer (*bmc)--; 1542935fe098SMike Snitzer if (*bmc <= 2) { 1543e64e4018SAndy Shevchenko md_bitmap_set_pending(&bitmap->counts, offset); 1544935fe098SMike Snitzer bitmap->allclean = 0; 1545935fe098SMike Snitzer } 1546935fe098SMike Snitzer spin_unlock_irqrestore(&bitmap->counts.lock, flags); 1547935fe098SMike Snitzer offset += blocks; 1548935fe098SMike Snitzer if (sectors > blocks) 1549935fe098SMike Snitzer sectors -= blocks; 1550935fe098SMike Snitzer else 1551935fe098SMike Snitzer sectors = 0; 1552935fe098SMike Snitzer } 1553935fe098SMike Snitzer } 1554e64e4018SAndy Shevchenko EXPORT_SYMBOL(md_bitmap_endwrite); 1555935fe098SMike Snitzer 1556935fe098SMike Snitzer static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, 1557935fe098SMike Snitzer int degraded) 1558935fe098SMike Snitzer { 1559935fe098SMike Snitzer bitmap_counter_t *bmc; 1560935fe098SMike Snitzer int rv; 1561935fe098SMike Snitzer if (bitmap == NULL) {/* FIXME or bitmap set as 'failed' */ 1562935fe098SMike Snitzer *blocks = 1024; 1563935fe098SMike Snitzer return 1; /* always resync if no bitmap */ 1564935fe098SMike Snitzer } 1565935fe098SMike Snitzer spin_lock_irq(&bitmap->counts.lock); 1566e64e4018SAndy Shevchenko bmc = md_bitmap_get_counter(&bitmap->counts, offset, blocks, 0); 1567935fe098SMike Snitzer rv = 0; 1568935fe098SMike Snitzer if (bmc) { 1569935fe098SMike Snitzer /* locked */ 1570935fe098SMike Snitzer if (RESYNC(*bmc)) 1571935fe098SMike Snitzer rv = 1; 1572935fe098SMike Snitzer else if (NEEDED(*bmc)) { 1573935fe098SMike Snitzer rv = 1; 1574935fe098SMike Snitzer if (!degraded) { /* don't set/clear bits if degraded */ 1575935fe098SMike Snitzer *bmc |= RESYNC_MASK; 1576935fe098SMike Snitzer *bmc &= ~NEEDED_MASK; 1577935fe098SMike Snitzer } 1578935fe098SMike Snitzer } 1579935fe098SMike Snitzer } 1580935fe098SMike Snitzer spin_unlock_irq(&bitmap->counts.lock); 1581935fe098SMike Snitzer return rv; 1582935fe098SMike Snitzer } 1583935fe098SMike Snitzer 1584e64e4018SAndy Shevchenko int md_bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, 1585935fe098SMike Snitzer int degraded) 1586935fe098SMike Snitzer { 1587935fe098SMike Snitzer /* bitmap_start_sync must always report on multiples of whole 1588935fe098SMike Snitzer * pages, otherwise resync (which is very PAGE_SIZE based) will 1589935fe098SMike Snitzer * get confused. 1590935fe098SMike Snitzer * So call __bitmap_start_sync repeatedly (if needed) until 1591935fe098SMike Snitzer * At least PAGE_SIZE>>9 blocks are covered. 1592935fe098SMike Snitzer * Return the 'or' of the result. 1593935fe098SMike Snitzer */ 1594935fe098SMike Snitzer int rv = 0; 1595935fe098SMike Snitzer sector_t blocks1; 1596935fe098SMike Snitzer 1597935fe098SMike Snitzer *blocks = 0; 1598935fe098SMike Snitzer while (*blocks < (PAGE_SIZE>>9)) { 1599935fe098SMike Snitzer rv |= __bitmap_start_sync(bitmap, offset, 1600935fe098SMike Snitzer &blocks1, degraded); 1601935fe098SMike Snitzer offset += blocks1; 1602935fe098SMike Snitzer *blocks += blocks1; 1603935fe098SMike Snitzer } 1604935fe098SMike Snitzer return rv; 1605935fe098SMike Snitzer } 1606e64e4018SAndy Shevchenko EXPORT_SYMBOL(md_bitmap_start_sync); 1607935fe098SMike Snitzer 1608e64e4018SAndy Shevchenko void md_bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int aborted) 1609935fe098SMike Snitzer { 1610935fe098SMike Snitzer bitmap_counter_t *bmc; 1611935fe098SMike Snitzer unsigned long flags; 1612935fe098SMike Snitzer 1613935fe098SMike Snitzer if (bitmap == NULL) { 1614935fe098SMike Snitzer *blocks = 1024; 1615935fe098SMike Snitzer return; 1616935fe098SMike Snitzer } 1617935fe098SMike Snitzer spin_lock_irqsave(&bitmap->counts.lock, flags); 1618e64e4018SAndy Shevchenko bmc = md_bitmap_get_counter(&bitmap->counts, offset, blocks, 0); 1619935fe098SMike Snitzer if (bmc == NULL) 1620935fe098SMike Snitzer goto unlock; 1621935fe098SMike Snitzer /* locked */ 1622935fe098SMike Snitzer if (RESYNC(*bmc)) { 1623935fe098SMike Snitzer *bmc &= ~RESYNC_MASK; 1624935fe098SMike Snitzer 1625935fe098SMike Snitzer if (!NEEDED(*bmc) && aborted) 1626935fe098SMike Snitzer *bmc |= NEEDED_MASK; 1627935fe098SMike Snitzer else { 1628935fe098SMike Snitzer if (*bmc <= 2) { 1629e64e4018SAndy Shevchenko md_bitmap_set_pending(&bitmap->counts, offset); 1630935fe098SMike Snitzer bitmap->allclean = 0; 1631935fe098SMike Snitzer } 1632935fe098SMike Snitzer } 1633935fe098SMike Snitzer } 1634935fe098SMike Snitzer unlock: 1635935fe098SMike Snitzer spin_unlock_irqrestore(&bitmap->counts.lock, flags); 1636935fe098SMike Snitzer } 1637e64e4018SAndy Shevchenko EXPORT_SYMBOL(md_bitmap_end_sync); 1638935fe098SMike Snitzer 1639e64e4018SAndy Shevchenko void md_bitmap_close_sync(struct bitmap *bitmap) 1640935fe098SMike Snitzer { 1641935fe098SMike Snitzer /* Sync has finished, and any bitmap chunks that weren't synced 1642935fe098SMike Snitzer * properly have been aborted. It remains to us to clear the 1643935fe098SMike Snitzer * RESYNC bit wherever it is still on 1644935fe098SMike Snitzer */ 1645935fe098SMike Snitzer sector_t sector = 0; 1646935fe098SMike Snitzer sector_t blocks; 1647935fe098SMike Snitzer if (!bitmap) 1648935fe098SMike Snitzer return; 1649935fe098SMike Snitzer while (sector < bitmap->mddev->resync_max_sectors) { 1650e64e4018SAndy Shevchenko md_bitmap_end_sync(bitmap, sector, &blocks, 0); 1651935fe098SMike Snitzer sector += blocks; 1652935fe098SMike Snitzer } 1653935fe098SMike Snitzer } 1654e64e4018SAndy Shevchenko EXPORT_SYMBOL(md_bitmap_close_sync); 1655935fe098SMike Snitzer 1656e64e4018SAndy Shevchenko void md_bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force) 1657935fe098SMike Snitzer { 1658935fe098SMike Snitzer sector_t s = 0; 1659935fe098SMike Snitzer sector_t blocks; 1660935fe098SMike Snitzer 1661935fe098SMike Snitzer if (!bitmap) 1662935fe098SMike Snitzer return; 1663935fe098SMike Snitzer if (sector == 0) { 1664935fe098SMike Snitzer bitmap->last_end_sync = jiffies; 1665935fe098SMike Snitzer return; 1666935fe098SMike Snitzer } 1667935fe098SMike Snitzer if (!force && time_before(jiffies, (bitmap->last_end_sync 1668935fe098SMike Snitzer + bitmap->mddev->bitmap_info.daemon_sleep))) 1669935fe098SMike Snitzer return; 1670935fe098SMike Snitzer wait_event(bitmap->mddev->recovery_wait, 1671935fe098SMike Snitzer atomic_read(&bitmap->mddev->recovery_active) == 0); 1672935fe098SMike Snitzer 1673935fe098SMike Snitzer bitmap->mddev->curr_resync_completed = sector; 1674935fe098SMike Snitzer set_bit(MD_SB_CHANGE_CLEAN, &bitmap->mddev->sb_flags); 1675935fe098SMike Snitzer sector &= ~((1ULL << bitmap->counts.chunkshift) - 1); 1676935fe098SMike Snitzer s = 0; 1677935fe098SMike Snitzer while (s < sector && s < bitmap->mddev->resync_max_sectors) { 1678e64e4018SAndy Shevchenko md_bitmap_end_sync(bitmap, s, &blocks, 0); 1679935fe098SMike Snitzer s += blocks; 1680935fe098SMike Snitzer } 1681935fe098SMike Snitzer bitmap->last_end_sync = jiffies; 1682e1a86dbbSJunxiao Bi sysfs_notify_dirent_safe(bitmap->mddev->sysfs_completed); 1683935fe098SMike Snitzer } 1684e64e4018SAndy Shevchenko EXPORT_SYMBOL(md_bitmap_cond_end_sync); 1685935fe098SMike Snitzer 1686e64e4018SAndy Shevchenko void md_bitmap_sync_with_cluster(struct mddev *mddev, 1687935fe098SMike Snitzer sector_t old_lo, sector_t old_hi, 1688935fe098SMike Snitzer sector_t new_lo, sector_t new_hi) 1689935fe098SMike Snitzer { 1690935fe098SMike Snitzer struct bitmap *bitmap = mddev->bitmap; 1691935fe098SMike Snitzer sector_t sector, blocks = 0; 1692935fe098SMike Snitzer 1693935fe098SMike Snitzer for (sector = old_lo; sector < new_lo; ) { 1694e64e4018SAndy Shevchenko md_bitmap_end_sync(bitmap, sector, &blocks, 0); 1695935fe098SMike Snitzer sector += blocks; 1696935fe098SMike Snitzer } 1697935fe098SMike Snitzer WARN((blocks > new_lo) && old_lo, "alignment is not correct for lo\n"); 1698935fe098SMike Snitzer 1699935fe098SMike Snitzer for (sector = old_hi; sector < new_hi; ) { 1700e64e4018SAndy Shevchenko md_bitmap_start_sync(bitmap, sector, &blocks, 0); 1701935fe098SMike Snitzer sector += blocks; 1702935fe098SMike Snitzer } 1703935fe098SMike Snitzer WARN((blocks > new_hi) && old_hi, "alignment is not correct for hi\n"); 1704935fe098SMike Snitzer } 1705e64e4018SAndy Shevchenko EXPORT_SYMBOL(md_bitmap_sync_with_cluster); 1706935fe098SMike Snitzer 1707e64e4018SAndy Shevchenko static void md_bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed) 1708935fe098SMike Snitzer { 1709935fe098SMike Snitzer /* For each chunk covered by any of these sectors, set the 1710935fe098SMike Snitzer * counter to 2 and possibly set resync_needed. They should all 1711935fe098SMike Snitzer * be 0 at this point 1712935fe098SMike Snitzer */ 1713935fe098SMike Snitzer 1714935fe098SMike Snitzer sector_t secs; 1715935fe098SMike Snitzer bitmap_counter_t *bmc; 1716935fe098SMike Snitzer spin_lock_irq(&bitmap->counts.lock); 1717e64e4018SAndy Shevchenko bmc = md_bitmap_get_counter(&bitmap->counts, offset, &secs, 1); 1718935fe098SMike Snitzer if (!bmc) { 1719935fe098SMike Snitzer spin_unlock_irq(&bitmap->counts.lock); 1720935fe098SMike Snitzer return; 1721935fe098SMike Snitzer } 1722935fe098SMike Snitzer if (!*bmc) { 1723935fe098SMike Snitzer *bmc = 2; 1724e64e4018SAndy Shevchenko md_bitmap_count_page(&bitmap->counts, offset, 1); 1725e64e4018SAndy Shevchenko md_bitmap_set_pending(&bitmap->counts, offset); 1726935fe098SMike Snitzer bitmap->allclean = 0; 1727935fe098SMike Snitzer } 1728935fe098SMike Snitzer if (needed) 1729935fe098SMike Snitzer *bmc |= NEEDED_MASK; 1730935fe098SMike Snitzer spin_unlock_irq(&bitmap->counts.lock); 1731935fe098SMike Snitzer } 1732935fe098SMike Snitzer 1733935fe098SMike Snitzer /* dirty the memory and file bits for bitmap chunks "s" to "e" */ 1734e64e4018SAndy Shevchenko void md_bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e) 1735935fe098SMike Snitzer { 1736935fe098SMike Snitzer unsigned long chunk; 1737935fe098SMike Snitzer 1738935fe098SMike Snitzer for (chunk = s; chunk <= e; chunk++) { 1739935fe098SMike Snitzer sector_t sec = (sector_t)chunk << bitmap->counts.chunkshift; 1740e64e4018SAndy Shevchenko md_bitmap_set_memory_bits(bitmap, sec, 1); 1741e64e4018SAndy Shevchenko md_bitmap_file_set_bit(bitmap, sec); 1742935fe098SMike Snitzer if (sec < bitmap->mddev->recovery_cp) 1743935fe098SMike Snitzer /* We are asserting that the array is dirty, 1744935fe098SMike Snitzer * so move the recovery_cp address back so 1745935fe098SMike Snitzer * that it is obvious that it is dirty 1746935fe098SMike Snitzer */ 1747935fe098SMike Snitzer bitmap->mddev->recovery_cp = sec; 1748935fe098SMike Snitzer } 1749935fe098SMike Snitzer } 1750935fe098SMike Snitzer 1751935fe098SMike Snitzer /* 1752935fe098SMike Snitzer * flush out any pending updates 1753935fe098SMike Snitzer */ 1754e64e4018SAndy Shevchenko void md_bitmap_flush(struct mddev *mddev) 1755935fe098SMike Snitzer { 1756935fe098SMike Snitzer struct bitmap *bitmap = mddev->bitmap; 1757935fe098SMike Snitzer long sleep; 1758935fe098SMike Snitzer 1759935fe098SMike Snitzer if (!bitmap) /* there was no bitmap */ 1760935fe098SMike Snitzer return; 1761935fe098SMike Snitzer 1762935fe098SMike Snitzer /* run the daemon_work three time to ensure everything is flushed 1763935fe098SMike Snitzer * that can be 1764935fe098SMike Snitzer */ 1765935fe098SMike Snitzer sleep = mddev->bitmap_info.daemon_sleep * 2; 1766935fe098SMike Snitzer bitmap->daemon_lastrun -= sleep; 1767e64e4018SAndy Shevchenko md_bitmap_daemon_work(mddev); 1768935fe098SMike Snitzer bitmap->daemon_lastrun -= sleep; 1769e64e4018SAndy Shevchenko md_bitmap_daemon_work(mddev); 1770935fe098SMike Snitzer bitmap->daemon_lastrun -= sleep; 1771e64e4018SAndy Shevchenko md_bitmap_daemon_work(mddev); 1772404a8ef5SSudhakar Panneerselvam if (mddev->bitmap_info.external) 1773404a8ef5SSudhakar Panneerselvam md_super_wait(mddev); 1774e64e4018SAndy Shevchenko md_bitmap_update_sb(bitmap); 1775935fe098SMike Snitzer } 1776935fe098SMike Snitzer 1777935fe098SMike Snitzer /* 1778935fe098SMike Snitzer * free memory that was allocated 1779935fe098SMike Snitzer */ 1780e64e4018SAndy Shevchenko void md_bitmap_free(struct bitmap *bitmap) 1781935fe098SMike Snitzer { 1782935fe098SMike Snitzer unsigned long k, pages; 1783935fe098SMike Snitzer struct bitmap_page *bp; 1784935fe098SMike Snitzer 1785935fe098SMike Snitzer if (!bitmap) /* there was no bitmap */ 1786935fe098SMike Snitzer return; 1787935fe098SMike Snitzer 1788935fe098SMike Snitzer if (bitmap->sysfs_can_clear) 1789935fe098SMike Snitzer sysfs_put(bitmap->sysfs_can_clear); 1790935fe098SMike Snitzer 1791935fe098SMike Snitzer if (mddev_is_clustered(bitmap->mddev) && bitmap->mddev->cluster_info && 1792935fe098SMike Snitzer bitmap->cluster_slot == md_cluster_ops->slot_number(bitmap->mddev)) 1793935fe098SMike Snitzer md_cluster_stop(bitmap->mddev); 1794935fe098SMike Snitzer 1795935fe098SMike Snitzer /* Shouldn't be needed - but just in case.... */ 1796935fe098SMike Snitzer wait_event(bitmap->write_wait, 1797935fe098SMike Snitzer atomic_read(&bitmap->pending_writes) == 0); 1798935fe098SMike Snitzer 1799935fe098SMike Snitzer /* release the bitmap file */ 1800e64e4018SAndy Shevchenko md_bitmap_file_unmap(&bitmap->storage); 1801935fe098SMike Snitzer 1802935fe098SMike Snitzer bp = bitmap->counts.bp; 1803935fe098SMike Snitzer pages = bitmap->counts.pages; 1804935fe098SMike Snitzer 1805935fe098SMike Snitzer /* free all allocated memory */ 1806935fe098SMike Snitzer 1807935fe098SMike Snitzer if (bp) /* deallocate the page memory */ 1808935fe098SMike Snitzer for (k = 0; k < pages; k++) 1809935fe098SMike Snitzer if (bp[k].map && !bp[k].hijacked) 1810935fe098SMike Snitzer kfree(bp[k].map); 1811935fe098SMike Snitzer kfree(bp); 1812935fe098SMike Snitzer kfree(bitmap); 1813935fe098SMike Snitzer } 1814e64e4018SAndy Shevchenko EXPORT_SYMBOL(md_bitmap_free); 1815935fe098SMike Snitzer 1816e64e4018SAndy Shevchenko void md_bitmap_wait_behind_writes(struct mddev *mddev) 1817935fe098SMike Snitzer { 1818935fe098SMike Snitzer struct bitmap *bitmap = mddev->bitmap; 1819935fe098SMike Snitzer 1820935fe098SMike Snitzer /* wait for behind writes to complete */ 1821935fe098SMike Snitzer if (bitmap && atomic_read(&bitmap->behind_writes) > 0) { 1822935fe098SMike Snitzer pr_debug("md:%s: behind writes in progress - waiting to stop.\n", 1823935fe098SMike Snitzer mdname(mddev)); 1824935fe098SMike Snitzer /* need to kick something here to make sure I/O goes? */ 1825935fe098SMike Snitzer wait_event(bitmap->behind_wait, 1826935fe098SMike Snitzer atomic_read(&bitmap->behind_writes) == 0); 1827935fe098SMike Snitzer } 1828935fe098SMike Snitzer } 1829935fe098SMike Snitzer 1830e64e4018SAndy Shevchenko void md_bitmap_destroy(struct mddev *mddev) 1831935fe098SMike Snitzer { 1832935fe098SMike Snitzer struct bitmap *bitmap = mddev->bitmap; 1833935fe098SMike Snitzer 1834935fe098SMike Snitzer if (!bitmap) /* there was no bitmap */ 1835935fe098SMike Snitzer return; 1836935fe098SMike Snitzer 1837e64e4018SAndy Shevchenko md_bitmap_wait_behind_writes(mddev); 183869b00b5bSGuoqing Jiang if (!mddev->serialize_policy) 183969b00b5bSGuoqing Jiang mddev_destroy_serial_pool(mddev, NULL, true); 1840935fe098SMike Snitzer 1841935fe098SMike Snitzer mutex_lock(&mddev->bitmap_info.mutex); 1842935fe098SMike Snitzer spin_lock(&mddev->lock); 1843935fe098SMike Snitzer mddev->bitmap = NULL; /* disconnect from the md device */ 1844935fe098SMike Snitzer spin_unlock(&mddev->lock); 1845935fe098SMike Snitzer mutex_unlock(&mddev->bitmap_info.mutex); 18464eeb6535SYu Kuai mddev_set_timeout(mddev, MAX_SCHEDULE_TIMEOUT, true); 1847935fe098SMike Snitzer 1848e64e4018SAndy Shevchenko md_bitmap_free(bitmap); 1849935fe098SMike Snitzer } 1850935fe098SMike Snitzer 1851935fe098SMike Snitzer /* 1852935fe098SMike Snitzer * initialize the bitmap structure 1853935fe098SMike Snitzer * if this returns an error, bitmap_destroy must be called to do clean up 1854935fe098SMike Snitzer * once mddev->bitmap is set 1855935fe098SMike Snitzer */ 1856e64e4018SAndy Shevchenko struct bitmap *md_bitmap_create(struct mddev *mddev, int slot) 1857935fe098SMike Snitzer { 1858935fe098SMike Snitzer struct bitmap *bitmap; 1859935fe098SMike Snitzer sector_t blocks = mddev->resync_max_sectors; 1860935fe098SMike Snitzer struct file *file = mddev->bitmap_info.file; 1861935fe098SMike Snitzer int err; 1862935fe098SMike Snitzer struct kernfs_node *bm = NULL; 1863935fe098SMike Snitzer 1864935fe098SMike Snitzer BUILD_BUG_ON(sizeof(bitmap_super_t) != 256); 1865935fe098SMike Snitzer 1866935fe098SMike Snitzer BUG_ON(file && mddev->bitmap_info.offset); 1867935fe098SMike Snitzer 1868230b55faSNeilBrown if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) { 1869230b55faSNeilBrown pr_notice("md/raid:%s: array with journal cannot have bitmap\n", 1870230b55faSNeilBrown mdname(mddev)); 1871230b55faSNeilBrown return ERR_PTR(-EBUSY); 1872230b55faSNeilBrown } 1873230b55faSNeilBrown 1874935fe098SMike Snitzer bitmap = kzalloc(sizeof(*bitmap), GFP_KERNEL); 1875935fe098SMike Snitzer if (!bitmap) 1876935fe098SMike Snitzer return ERR_PTR(-ENOMEM); 1877935fe098SMike Snitzer 1878935fe098SMike Snitzer spin_lock_init(&bitmap->counts.lock); 1879935fe098SMike Snitzer atomic_set(&bitmap->pending_writes, 0); 1880935fe098SMike Snitzer init_waitqueue_head(&bitmap->write_wait); 1881935fe098SMike Snitzer init_waitqueue_head(&bitmap->overflow_wait); 1882935fe098SMike Snitzer init_waitqueue_head(&bitmap->behind_wait); 1883935fe098SMike Snitzer 1884935fe098SMike Snitzer bitmap->mddev = mddev; 1885935fe098SMike Snitzer bitmap->cluster_slot = slot; 1886935fe098SMike Snitzer 1887935fe098SMike Snitzer if (mddev->kobj.sd) 1888935fe098SMike Snitzer bm = sysfs_get_dirent(mddev->kobj.sd, "bitmap"); 1889935fe098SMike Snitzer if (bm) { 1890935fe098SMike Snitzer bitmap->sysfs_can_clear = sysfs_get_dirent(bm, "can_clear"); 1891935fe098SMike Snitzer sysfs_put(bm); 1892935fe098SMike Snitzer } else 1893935fe098SMike Snitzer bitmap->sysfs_can_clear = NULL; 1894935fe098SMike Snitzer 1895935fe098SMike Snitzer bitmap->storage.file = file; 1896935fe098SMike Snitzer if (file) { 1897935fe098SMike Snitzer get_file(file); 1898935fe098SMike Snitzer /* As future accesses to this file will use bmap, 1899935fe098SMike Snitzer * and bypass the page cache, we must sync the file 1900935fe098SMike Snitzer * first. 1901935fe098SMike Snitzer */ 1902935fe098SMike Snitzer vfs_fsync(file, 1); 1903935fe098SMike Snitzer } 1904935fe098SMike Snitzer /* read superblock from bitmap file (this sets mddev->bitmap_info.chunksize) */ 1905935fe098SMike Snitzer if (!mddev->bitmap_info.external) { 1906935fe098SMike Snitzer /* 1907935fe098SMike Snitzer * If 'MD_ARRAY_FIRST_USE' is set, then device-mapper is 1908935fe098SMike Snitzer * instructing us to create a new on-disk bitmap instance. 1909935fe098SMike Snitzer */ 1910935fe098SMike Snitzer if (test_and_clear_bit(MD_ARRAY_FIRST_USE, &mddev->flags)) 1911e64e4018SAndy Shevchenko err = md_bitmap_new_disk_sb(bitmap); 1912935fe098SMike Snitzer else 1913e64e4018SAndy Shevchenko err = md_bitmap_read_sb(bitmap); 1914935fe098SMike Snitzer } else { 1915935fe098SMike Snitzer err = 0; 1916935fe098SMike Snitzer if (mddev->bitmap_info.chunksize == 0 || 1917935fe098SMike Snitzer mddev->bitmap_info.daemon_sleep == 0) 1918935fe098SMike Snitzer /* chunksize and time_base need to be 1919935fe098SMike Snitzer * set first. */ 1920935fe098SMike Snitzer err = -EINVAL; 1921935fe098SMike Snitzer } 1922935fe098SMike Snitzer if (err) 1923935fe098SMike Snitzer goto error; 1924935fe098SMike Snitzer 1925935fe098SMike Snitzer bitmap->daemon_lastrun = jiffies; 1926e64e4018SAndy Shevchenko err = md_bitmap_resize(bitmap, blocks, mddev->bitmap_info.chunksize, 1); 1927935fe098SMike Snitzer if (err) 1928935fe098SMike Snitzer goto error; 1929935fe098SMike Snitzer 1930935fe098SMike Snitzer pr_debug("created bitmap (%lu pages) for device %s\n", 1931935fe098SMike Snitzer bitmap->counts.pages, bmname(bitmap)); 1932935fe098SMike Snitzer 1933935fe098SMike Snitzer err = test_bit(BITMAP_WRITE_ERROR, &bitmap->flags) ? -EIO : 0; 1934935fe098SMike Snitzer if (err) 1935935fe098SMike Snitzer goto error; 1936935fe098SMike Snitzer 1937935fe098SMike Snitzer return bitmap; 1938935fe098SMike Snitzer error: 1939e64e4018SAndy Shevchenko md_bitmap_free(bitmap); 1940935fe098SMike Snitzer return ERR_PTR(err); 1941935fe098SMike Snitzer } 1942935fe098SMike Snitzer 1943e64e4018SAndy Shevchenko int md_bitmap_load(struct mddev *mddev) 1944935fe098SMike Snitzer { 1945935fe098SMike Snitzer int err = 0; 1946935fe098SMike Snitzer sector_t start = 0; 1947935fe098SMike Snitzer sector_t sector = 0; 1948935fe098SMike Snitzer struct bitmap *bitmap = mddev->bitmap; 1949617b194aSGuoqing Jiang struct md_rdev *rdev; 1950935fe098SMike Snitzer 1951935fe098SMike Snitzer if (!bitmap) 1952935fe098SMike Snitzer goto out; 1953935fe098SMike Snitzer 1954617b194aSGuoqing Jiang rdev_for_each(rdev, mddev) 1955404659cfSGuoqing Jiang mddev_create_serial_pool(mddev, rdev, true); 1956617b194aSGuoqing Jiang 1957935fe098SMike Snitzer if (mddev_is_clustered(mddev)) 1958935fe098SMike Snitzer md_cluster_ops->load_bitmaps(mddev, mddev->bitmap_info.nodes); 1959935fe098SMike Snitzer 1960935fe098SMike Snitzer /* Clear out old bitmap info first: Either there is none, or we 1961935fe098SMike Snitzer * are resuming after someone else has possibly changed things, 1962935fe098SMike Snitzer * so we should forget old cached info. 1963935fe098SMike Snitzer * All chunks should be clean, but some might need_sync. 1964935fe098SMike Snitzer */ 1965935fe098SMike Snitzer while (sector < mddev->resync_max_sectors) { 1966935fe098SMike Snitzer sector_t blocks; 1967e64e4018SAndy Shevchenko md_bitmap_start_sync(bitmap, sector, &blocks, 0); 1968935fe098SMike Snitzer sector += blocks; 1969935fe098SMike Snitzer } 1970e64e4018SAndy Shevchenko md_bitmap_close_sync(bitmap); 1971935fe098SMike Snitzer 1972935fe098SMike Snitzer if (mddev->degraded == 0 1973935fe098SMike Snitzer || bitmap->events_cleared == mddev->events) 1974935fe098SMike Snitzer /* no need to keep dirty bits to optimise a 1975935fe098SMike Snitzer * re-add of a missing device */ 1976935fe098SMike Snitzer start = mddev->recovery_cp; 1977935fe098SMike Snitzer 1978935fe098SMike Snitzer mutex_lock(&mddev->bitmap_info.mutex); 1979e64e4018SAndy Shevchenko err = md_bitmap_init_from_disk(bitmap, start); 1980935fe098SMike Snitzer mutex_unlock(&mddev->bitmap_info.mutex); 1981935fe098SMike Snitzer 1982935fe098SMike Snitzer if (err) 1983935fe098SMike Snitzer goto out; 1984935fe098SMike Snitzer clear_bit(BITMAP_STALE, &bitmap->flags); 1985935fe098SMike Snitzer 1986935fe098SMike Snitzer /* Kick recovery in case any bits were set */ 1987935fe098SMike Snitzer set_bit(MD_RECOVERY_NEEDED, &bitmap->mddev->recovery); 1988935fe098SMike Snitzer 19894eeb6535SYu Kuai mddev_set_timeout(mddev, mddev->bitmap_info.daemon_sleep, true); 1990935fe098SMike Snitzer md_wakeup_thread(mddev->thread); 1991935fe098SMike Snitzer 1992e64e4018SAndy Shevchenko md_bitmap_update_sb(bitmap); 1993935fe098SMike Snitzer 1994935fe098SMike Snitzer if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags)) 1995935fe098SMike Snitzer err = -EIO; 1996935fe098SMike Snitzer out: 1997935fe098SMike Snitzer return err; 1998935fe098SMike Snitzer } 1999e64e4018SAndy Shevchenko EXPORT_SYMBOL_GPL(md_bitmap_load); 2000935fe098SMike Snitzer 20011383b347SZhao Heming /* caller need to free returned bitmap with md_bitmap_free() */ 2002935fe098SMike Snitzer struct bitmap *get_bitmap_from_slot(struct mddev *mddev, int slot) 2003935fe098SMike Snitzer { 2004935fe098SMike Snitzer int rv = 0; 2005935fe098SMike Snitzer struct bitmap *bitmap; 2006935fe098SMike Snitzer 2007e64e4018SAndy Shevchenko bitmap = md_bitmap_create(mddev, slot); 2008935fe098SMike Snitzer if (IS_ERR(bitmap)) { 2009935fe098SMike Snitzer rv = PTR_ERR(bitmap); 2010935fe098SMike Snitzer return ERR_PTR(rv); 2011935fe098SMike Snitzer } 2012935fe098SMike Snitzer 2013e64e4018SAndy Shevchenko rv = md_bitmap_init_from_disk(bitmap, 0); 2014935fe098SMike Snitzer if (rv) { 2015e64e4018SAndy Shevchenko md_bitmap_free(bitmap); 2016935fe098SMike Snitzer return ERR_PTR(rv); 2017935fe098SMike Snitzer } 2018935fe098SMike Snitzer 2019935fe098SMike Snitzer return bitmap; 2020935fe098SMike Snitzer } 2021935fe098SMike Snitzer EXPORT_SYMBOL(get_bitmap_from_slot); 2022935fe098SMike Snitzer 2023935fe098SMike Snitzer /* Loads the bitmap associated with slot and copies the resync information 2024935fe098SMike Snitzer * to our bitmap 2025935fe098SMike Snitzer */ 2026e64e4018SAndy Shevchenko int md_bitmap_copy_from_slot(struct mddev *mddev, int slot, 2027935fe098SMike Snitzer sector_t *low, sector_t *high, bool clear_bits) 2028935fe098SMike Snitzer { 2029935fe098SMike Snitzer int rv = 0, i, j; 2030935fe098SMike Snitzer sector_t block, lo = 0, hi = 0; 2031935fe098SMike Snitzer struct bitmap_counts *counts; 2032935fe098SMike Snitzer struct bitmap *bitmap; 2033935fe098SMike Snitzer 2034935fe098SMike Snitzer bitmap = get_bitmap_from_slot(mddev, slot); 2035935fe098SMike Snitzer if (IS_ERR(bitmap)) { 2036935fe098SMike Snitzer pr_err("%s can't get bitmap from slot %d\n", __func__, slot); 2037935fe098SMike Snitzer return -1; 2038935fe098SMike Snitzer } 2039935fe098SMike Snitzer 2040935fe098SMike Snitzer counts = &bitmap->counts; 2041935fe098SMike Snitzer for (j = 0; j < counts->chunks; j++) { 2042935fe098SMike Snitzer block = (sector_t)j << counts->chunkshift; 2043e64e4018SAndy Shevchenko if (md_bitmap_file_test_bit(bitmap, block)) { 2044935fe098SMike Snitzer if (!lo) 2045935fe098SMike Snitzer lo = block; 2046935fe098SMike Snitzer hi = block; 2047e64e4018SAndy Shevchenko md_bitmap_file_clear_bit(bitmap, block); 2048e64e4018SAndy Shevchenko md_bitmap_set_memory_bits(mddev->bitmap, block, 1); 2049e64e4018SAndy Shevchenko md_bitmap_file_set_bit(mddev->bitmap, block); 2050935fe098SMike Snitzer } 2051935fe098SMike Snitzer } 2052935fe098SMike Snitzer 2053935fe098SMike Snitzer if (clear_bits) { 2054e64e4018SAndy Shevchenko md_bitmap_update_sb(bitmap); 2055935fe098SMike Snitzer /* BITMAP_PAGE_PENDING is set, but bitmap_unplug needs 2056935fe098SMike Snitzer * BITMAP_PAGE_DIRTY or _NEEDWRITE to write ... */ 2057935fe098SMike Snitzer for (i = 0; i < bitmap->storage.file_pages; i++) 2058935fe098SMike Snitzer if (test_page_attr(bitmap, i, BITMAP_PAGE_PENDING)) 2059935fe098SMike Snitzer set_page_attr(bitmap, i, BITMAP_PAGE_NEEDWRITE); 2060e64e4018SAndy Shevchenko md_bitmap_unplug(bitmap); 2061935fe098SMike Snitzer } 2062e64e4018SAndy Shevchenko md_bitmap_unplug(mddev->bitmap); 2063935fe098SMike Snitzer *low = lo; 2064935fe098SMike Snitzer *high = hi; 20651383b347SZhao Heming md_bitmap_free(bitmap); 2066935fe098SMike Snitzer 2067935fe098SMike Snitzer return rv; 2068935fe098SMike Snitzer } 2069e64e4018SAndy Shevchenko EXPORT_SYMBOL_GPL(md_bitmap_copy_from_slot); 2070935fe098SMike Snitzer 2071935fe098SMike Snitzer 2072e64e4018SAndy Shevchenko void md_bitmap_status(struct seq_file *seq, struct bitmap *bitmap) 2073935fe098SMike Snitzer { 2074935fe098SMike Snitzer unsigned long chunk_kb; 2075935fe098SMike Snitzer struct bitmap_counts *counts; 2076935fe098SMike Snitzer 2077935fe098SMike Snitzer if (!bitmap) 2078935fe098SMike Snitzer return; 2079935fe098SMike Snitzer 2080935fe098SMike Snitzer counts = &bitmap->counts; 2081935fe098SMike Snitzer 2082935fe098SMike Snitzer chunk_kb = bitmap->mddev->bitmap_info.chunksize >> 10; 2083935fe098SMike Snitzer seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], " 2084935fe098SMike Snitzer "%lu%s chunk", 2085935fe098SMike Snitzer counts->pages - counts->missing_pages, 2086935fe098SMike Snitzer counts->pages, 2087935fe098SMike Snitzer (counts->pages - counts->missing_pages) 2088935fe098SMike Snitzer << (PAGE_SHIFT - 10), 2089935fe098SMike Snitzer chunk_kb ? chunk_kb : bitmap->mddev->bitmap_info.chunksize, 2090935fe098SMike Snitzer chunk_kb ? "KB" : "B"); 2091935fe098SMike Snitzer if (bitmap->storage.file) { 2092935fe098SMike Snitzer seq_printf(seq, ", file: "); 2093935fe098SMike Snitzer seq_file_path(seq, bitmap->storage.file, " \t\n"); 2094935fe098SMike Snitzer } 2095935fe098SMike Snitzer 2096935fe098SMike Snitzer seq_printf(seq, "\n"); 2097935fe098SMike Snitzer } 2098935fe098SMike Snitzer 2099e64e4018SAndy Shevchenko int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks, 2100935fe098SMike Snitzer int chunksize, int init) 2101935fe098SMike Snitzer { 2102935fe098SMike Snitzer /* If chunk_size is 0, choose an appropriate chunk size. 2103935fe098SMike Snitzer * Then possibly allocate new storage space. 2104935fe098SMike Snitzer * Then quiesce, copy bits, replace bitmap, and re-start 2105935fe098SMike Snitzer * 2106935fe098SMike Snitzer * This function is called both to set up the initial bitmap 2107935fe098SMike Snitzer * and to resize the bitmap while the array is active. 2108935fe098SMike Snitzer * If this happens as a result of the array being resized, 2109935fe098SMike Snitzer * chunksize will be zero, and we need to choose a suitable 2110935fe098SMike Snitzer * chunksize, otherwise we use what we are given. 2111935fe098SMike Snitzer */ 2112935fe098SMike Snitzer struct bitmap_storage store; 2113935fe098SMike Snitzer struct bitmap_counts old_counts; 2114935fe098SMike Snitzer unsigned long chunks; 2115935fe098SMike Snitzer sector_t block; 2116935fe098SMike Snitzer sector_t old_blocks, new_blocks; 2117935fe098SMike Snitzer int chunkshift; 2118935fe098SMike Snitzer int ret = 0; 2119935fe098SMike Snitzer long pages; 2120935fe098SMike Snitzer struct bitmap_page *new_bp; 2121935fe098SMike Snitzer 2122935fe098SMike Snitzer if (bitmap->storage.file && !init) { 2123935fe098SMike Snitzer pr_info("md: cannot resize file-based bitmap\n"); 2124935fe098SMike Snitzer return -EINVAL; 2125935fe098SMike Snitzer } 2126935fe098SMike Snitzer 2127935fe098SMike Snitzer if (chunksize == 0) { 2128935fe098SMike Snitzer /* If there is enough space, leave the chunk size unchanged, 2129935fe098SMike Snitzer * else increase by factor of two until there is enough space. 2130935fe098SMike Snitzer */ 2131935fe098SMike Snitzer long bytes; 2132935fe098SMike Snitzer long space = bitmap->mddev->bitmap_info.space; 2133935fe098SMike Snitzer 2134935fe098SMike Snitzer if (space == 0) { 2135935fe098SMike Snitzer /* We don't know how much space there is, so limit 2136935fe098SMike Snitzer * to current size - in sectors. 2137935fe098SMike Snitzer */ 2138935fe098SMike Snitzer bytes = DIV_ROUND_UP(bitmap->counts.chunks, 8); 2139935fe098SMike Snitzer if (!bitmap->mddev->bitmap_info.external) 2140935fe098SMike Snitzer bytes += sizeof(bitmap_super_t); 2141935fe098SMike Snitzer space = DIV_ROUND_UP(bytes, 512); 2142935fe098SMike Snitzer bitmap->mddev->bitmap_info.space = space; 2143935fe098SMike Snitzer } 2144935fe098SMike Snitzer chunkshift = bitmap->counts.chunkshift; 2145935fe098SMike Snitzer chunkshift--; 2146935fe098SMike Snitzer do { 2147935fe098SMike Snitzer /* 'chunkshift' is shift from block size to chunk size */ 2148935fe098SMike Snitzer chunkshift++; 2149935fe098SMike Snitzer chunks = DIV_ROUND_UP_SECTOR_T(blocks, 1 << chunkshift); 2150935fe098SMike Snitzer bytes = DIV_ROUND_UP(chunks, 8); 2151935fe098SMike Snitzer if (!bitmap->mddev->bitmap_info.external) 2152935fe098SMike Snitzer bytes += sizeof(bitmap_super_t); 215345552111SFlorian-Ewald Mueller } while (bytes > (space << 9) && (chunkshift + BITMAP_BLOCK_SHIFT) < 215445552111SFlorian-Ewald Mueller (BITS_PER_BYTE * sizeof(((bitmap_super_t *)0)->chunksize) - 1)); 2155935fe098SMike Snitzer } else 2156935fe098SMike Snitzer chunkshift = ffz(~chunksize) - BITMAP_BLOCK_SHIFT; 2157935fe098SMike Snitzer 2158935fe098SMike Snitzer chunks = DIV_ROUND_UP_SECTOR_T(blocks, 1 << chunkshift); 2159935fe098SMike Snitzer memset(&store, 0, sizeof(store)); 2160935fe098SMike Snitzer if (bitmap->mddev->bitmap_info.offset || bitmap->mddev->bitmap_info.file) 2161e64e4018SAndy Shevchenko ret = md_bitmap_storage_alloc(&store, chunks, 2162935fe098SMike Snitzer !bitmap->mddev->bitmap_info.external, 2163935fe098SMike Snitzer mddev_is_clustered(bitmap->mddev) 2164935fe098SMike Snitzer ? bitmap->cluster_slot : 0); 2165935fe098SMike Snitzer if (ret) { 2166e64e4018SAndy Shevchenko md_bitmap_file_unmap(&store); 2167935fe098SMike Snitzer goto err; 2168935fe098SMike Snitzer } 2169935fe098SMike Snitzer 2170935fe098SMike Snitzer pages = DIV_ROUND_UP(chunks, PAGE_COUNTER_RATIO); 2171935fe098SMike Snitzer 21726396bb22SKees Cook new_bp = kcalloc(pages, sizeof(*new_bp), GFP_KERNEL); 2173935fe098SMike Snitzer ret = -ENOMEM; 2174935fe098SMike Snitzer if (!new_bp) { 2175e64e4018SAndy Shevchenko md_bitmap_file_unmap(&store); 2176935fe098SMike Snitzer goto err; 2177935fe098SMike Snitzer } 2178935fe098SMike Snitzer 2179935fe098SMike Snitzer if (!init) 2180935fe098SMike Snitzer bitmap->mddev->pers->quiesce(bitmap->mddev, 1); 2181935fe098SMike Snitzer 2182935fe098SMike Snitzer store.file = bitmap->storage.file; 2183935fe098SMike Snitzer bitmap->storage.file = NULL; 2184935fe098SMike Snitzer 2185935fe098SMike Snitzer if (store.sb_page && bitmap->storage.sb_page) 2186935fe098SMike Snitzer memcpy(page_address(store.sb_page), 2187935fe098SMike Snitzer page_address(bitmap->storage.sb_page), 2188935fe098SMike Snitzer sizeof(bitmap_super_t)); 2189fadcbd29SGuoqing Jiang spin_lock_irq(&bitmap->counts.lock); 2190e64e4018SAndy Shevchenko md_bitmap_file_unmap(&bitmap->storage); 2191935fe098SMike Snitzer bitmap->storage = store; 2192935fe098SMike Snitzer 2193935fe098SMike Snitzer old_counts = bitmap->counts; 2194935fe098SMike Snitzer bitmap->counts.bp = new_bp; 2195935fe098SMike Snitzer bitmap->counts.pages = pages; 2196935fe098SMike Snitzer bitmap->counts.missing_pages = pages; 2197935fe098SMike Snitzer bitmap->counts.chunkshift = chunkshift; 2198935fe098SMike Snitzer bitmap->counts.chunks = chunks; 219945552111SFlorian-Ewald Mueller bitmap->mddev->bitmap_info.chunksize = 1UL << (chunkshift + 2200935fe098SMike Snitzer BITMAP_BLOCK_SHIFT); 2201935fe098SMike Snitzer 2202935fe098SMike Snitzer blocks = min(old_counts.chunks << old_counts.chunkshift, 2203935fe098SMike Snitzer chunks << chunkshift); 2204935fe098SMike Snitzer 2205935fe098SMike Snitzer /* For cluster raid, need to pre-allocate bitmap */ 2206935fe098SMike Snitzer if (mddev_is_clustered(bitmap->mddev)) { 2207935fe098SMike Snitzer unsigned long page; 2208935fe098SMike Snitzer for (page = 0; page < pages; page++) { 2209e64e4018SAndy Shevchenko ret = md_bitmap_checkpage(&bitmap->counts, page, 1, 1); 2210935fe098SMike Snitzer if (ret) { 2211935fe098SMike Snitzer unsigned long k; 2212935fe098SMike Snitzer 2213935fe098SMike Snitzer /* deallocate the page memory */ 2214935fe098SMike Snitzer for (k = 0; k < page; k++) { 2215935fe098SMike Snitzer kfree(new_bp[k].map); 2216935fe098SMike Snitzer } 22170868b99cSZdenek Kabelac kfree(new_bp); 2218935fe098SMike Snitzer 2219935fe098SMike Snitzer /* restore some fields from old_counts */ 2220935fe098SMike Snitzer bitmap->counts.bp = old_counts.bp; 2221935fe098SMike Snitzer bitmap->counts.pages = old_counts.pages; 2222935fe098SMike Snitzer bitmap->counts.missing_pages = old_counts.pages; 2223935fe098SMike Snitzer bitmap->counts.chunkshift = old_counts.chunkshift; 2224935fe098SMike Snitzer bitmap->counts.chunks = old_counts.chunks; 222545552111SFlorian-Ewald Mueller bitmap->mddev->bitmap_info.chunksize = 222645552111SFlorian-Ewald Mueller 1UL << (old_counts.chunkshift + BITMAP_BLOCK_SHIFT); 2227935fe098SMike Snitzer blocks = old_counts.chunks << old_counts.chunkshift; 2228935fe098SMike Snitzer pr_warn("Could not pre-allocate in-memory bitmap for cluster raid\n"); 2229935fe098SMike Snitzer break; 2230935fe098SMike Snitzer } else 2231935fe098SMike Snitzer bitmap->counts.bp[page].count += 1; 2232935fe098SMike Snitzer } 2233935fe098SMike Snitzer } 2234935fe098SMike Snitzer 2235935fe098SMike Snitzer for (block = 0; block < blocks; ) { 2236935fe098SMike Snitzer bitmap_counter_t *bmc_old, *bmc_new; 2237935fe098SMike Snitzer int set; 2238935fe098SMike Snitzer 2239e64e4018SAndy Shevchenko bmc_old = md_bitmap_get_counter(&old_counts, block, &old_blocks, 0); 2240935fe098SMike Snitzer set = bmc_old && NEEDED(*bmc_old); 2241935fe098SMike Snitzer 2242935fe098SMike Snitzer if (set) { 2243e64e4018SAndy Shevchenko bmc_new = md_bitmap_get_counter(&bitmap->counts, block, &new_blocks, 1); 22443bd548e5SLi Zhong if (bmc_new) { 2245935fe098SMike Snitzer if (*bmc_new == 0) { 2246935fe098SMike Snitzer /* need to set on-disk bits too. */ 2247935fe098SMike Snitzer sector_t end = block + new_blocks; 2248935fe098SMike Snitzer sector_t start = block >> chunkshift; 22493bd548e5SLi Zhong 2250935fe098SMike Snitzer start <<= chunkshift; 2251935fe098SMike Snitzer while (start < end) { 2252e64e4018SAndy Shevchenko md_bitmap_file_set_bit(bitmap, block); 2253935fe098SMike Snitzer start += 1 << chunkshift; 2254935fe098SMike Snitzer } 2255935fe098SMike Snitzer *bmc_new = 2; 2256e64e4018SAndy Shevchenko md_bitmap_count_page(&bitmap->counts, block, 1); 2257e64e4018SAndy Shevchenko md_bitmap_set_pending(&bitmap->counts, block); 2258935fe098SMike Snitzer } 2259935fe098SMike Snitzer *bmc_new |= NEEDED_MASK; 22603bd548e5SLi Zhong } 2261935fe098SMike Snitzer if (new_blocks < old_blocks) 2262935fe098SMike Snitzer old_blocks = new_blocks; 2263935fe098SMike Snitzer } 2264935fe098SMike Snitzer block += old_blocks; 2265935fe098SMike Snitzer } 2266935fe098SMike Snitzer 22670868b99cSZdenek Kabelac if (bitmap->counts.bp != old_counts.bp) { 22680868b99cSZdenek Kabelac unsigned long k; 22690868b99cSZdenek Kabelac for (k = 0; k < old_counts.pages; k++) 22700868b99cSZdenek Kabelac if (!old_counts.bp[k].hijacked) 22710868b99cSZdenek Kabelac kfree(old_counts.bp[k].map); 22720868b99cSZdenek Kabelac kfree(old_counts.bp); 22730868b99cSZdenek Kabelac } 22740868b99cSZdenek Kabelac 2275935fe098SMike Snitzer if (!init) { 2276935fe098SMike Snitzer int i; 2277935fe098SMike Snitzer while (block < (chunks << chunkshift)) { 2278935fe098SMike Snitzer bitmap_counter_t *bmc; 2279e64e4018SAndy Shevchenko bmc = md_bitmap_get_counter(&bitmap->counts, block, &new_blocks, 1); 2280935fe098SMike Snitzer if (bmc) { 2281935fe098SMike Snitzer /* new space. It needs to be resynced, so 2282935fe098SMike Snitzer * we set NEEDED_MASK. 2283935fe098SMike Snitzer */ 2284935fe098SMike Snitzer if (*bmc == 0) { 2285935fe098SMike Snitzer *bmc = NEEDED_MASK | 2; 2286e64e4018SAndy Shevchenko md_bitmap_count_page(&bitmap->counts, block, 1); 2287e64e4018SAndy Shevchenko md_bitmap_set_pending(&bitmap->counts, block); 2288935fe098SMike Snitzer } 2289935fe098SMike Snitzer } 2290935fe098SMike Snitzer block += new_blocks; 2291935fe098SMike Snitzer } 2292935fe098SMike Snitzer for (i = 0; i < bitmap->storage.file_pages; i++) 2293935fe098SMike Snitzer set_page_attr(bitmap, i, BITMAP_PAGE_DIRTY); 2294935fe098SMike Snitzer } 2295935fe098SMike Snitzer spin_unlock_irq(&bitmap->counts.lock); 2296935fe098SMike Snitzer 2297935fe098SMike Snitzer if (!init) { 2298e64e4018SAndy Shevchenko md_bitmap_unplug(bitmap); 2299935fe098SMike Snitzer bitmap->mddev->pers->quiesce(bitmap->mddev, 0); 2300935fe098SMike Snitzer } 2301935fe098SMike Snitzer ret = 0; 2302935fe098SMike Snitzer err: 2303935fe098SMike Snitzer return ret; 2304935fe098SMike Snitzer } 2305e64e4018SAndy Shevchenko EXPORT_SYMBOL_GPL(md_bitmap_resize); 2306935fe098SMike Snitzer 2307935fe098SMike Snitzer static ssize_t 2308935fe098SMike Snitzer location_show(struct mddev *mddev, char *page) 2309935fe098SMike Snitzer { 2310935fe098SMike Snitzer ssize_t len; 2311935fe098SMike Snitzer if (mddev->bitmap_info.file) 2312935fe098SMike Snitzer len = sprintf(page, "file"); 2313935fe098SMike Snitzer else if (mddev->bitmap_info.offset) 2314935fe098SMike Snitzer len = sprintf(page, "%+lld", (long long)mddev->bitmap_info.offset); 2315935fe098SMike Snitzer else 2316935fe098SMike Snitzer len = sprintf(page, "none"); 2317935fe098SMike Snitzer len += sprintf(page+len, "\n"); 2318935fe098SMike Snitzer return len; 2319935fe098SMike Snitzer } 2320935fe098SMike Snitzer 2321935fe098SMike Snitzer static ssize_t 2322935fe098SMike Snitzer location_store(struct mddev *mddev, const char *buf, size_t len) 2323935fe098SMike Snitzer { 2324935fe098SMike Snitzer int rv; 2325935fe098SMike Snitzer 2326935fe098SMike Snitzer rv = mddev_lock(mddev); 2327935fe098SMike Snitzer if (rv) 2328935fe098SMike Snitzer return rv; 2329935fe098SMike Snitzer if (mddev->pers) { 2330935fe098SMike Snitzer if (!mddev->pers->quiesce) { 2331935fe098SMike Snitzer rv = -EBUSY; 2332935fe098SMike Snitzer goto out; 2333935fe098SMike Snitzer } 2334935fe098SMike Snitzer if (mddev->recovery || mddev->sync_thread) { 2335935fe098SMike Snitzer rv = -EBUSY; 2336935fe098SMike Snitzer goto out; 2337935fe098SMike Snitzer } 2338935fe098SMike Snitzer } 2339935fe098SMike Snitzer 2340935fe098SMike Snitzer if (mddev->bitmap || mddev->bitmap_info.file || 2341935fe098SMike Snitzer mddev->bitmap_info.offset) { 2342935fe098SMike Snitzer /* bitmap already configured. Only option is to clear it */ 2343935fe098SMike Snitzer if (strncmp(buf, "none", 4) != 0) { 2344935fe098SMike Snitzer rv = -EBUSY; 2345935fe098SMike Snitzer goto out; 2346935fe098SMike Snitzer } 2347935fe098SMike Snitzer if (mddev->pers) { 2348f8f83d8fSJack Wang mddev_suspend(mddev); 2349e64e4018SAndy Shevchenko md_bitmap_destroy(mddev); 2350f8f83d8fSJack Wang mddev_resume(mddev); 2351935fe098SMike Snitzer } 2352935fe098SMike Snitzer mddev->bitmap_info.offset = 0; 2353935fe098SMike Snitzer if (mddev->bitmap_info.file) { 2354935fe098SMike Snitzer struct file *f = mddev->bitmap_info.file; 2355935fe098SMike Snitzer mddev->bitmap_info.file = NULL; 2356935fe098SMike Snitzer fput(f); 2357935fe098SMike Snitzer } 2358935fe098SMike Snitzer } else { 2359935fe098SMike Snitzer /* No bitmap, OK to set a location */ 2360935fe098SMike Snitzer long long offset; 2361935fe098SMike Snitzer if (strncmp(buf, "none", 4) == 0) 2362935fe098SMike Snitzer /* nothing to be done */; 2363935fe098SMike Snitzer else if (strncmp(buf, "file:", 5) == 0) { 2364935fe098SMike Snitzer /* Not supported yet */ 2365935fe098SMike Snitzer rv = -EINVAL; 2366935fe098SMike Snitzer goto out; 2367935fe098SMike Snitzer } else { 2368935fe098SMike Snitzer if (buf[0] == '+') 2369935fe098SMike Snitzer rv = kstrtoll(buf+1, 10, &offset); 2370935fe098SMike Snitzer else 2371935fe098SMike Snitzer rv = kstrtoll(buf, 10, &offset); 2372935fe098SMike Snitzer if (rv) 2373935fe098SMike Snitzer goto out; 2374935fe098SMike Snitzer if (offset == 0) { 2375935fe098SMike Snitzer rv = -EINVAL; 2376935fe098SMike Snitzer goto out; 2377935fe098SMike Snitzer } 2378935fe098SMike Snitzer if (mddev->bitmap_info.external == 0 && 2379935fe098SMike Snitzer mddev->major_version == 0 && 2380935fe098SMike Snitzer offset != mddev->bitmap_info.default_offset) { 2381935fe098SMike Snitzer rv = -EINVAL; 2382935fe098SMike Snitzer goto out; 2383935fe098SMike Snitzer } 2384935fe098SMike Snitzer mddev->bitmap_info.offset = offset; 2385935fe098SMike Snitzer if (mddev->pers) { 2386935fe098SMike Snitzer struct bitmap *bitmap; 2387e64e4018SAndy Shevchenko bitmap = md_bitmap_create(mddev, -1); 2388f8f83d8fSJack Wang mddev_suspend(mddev); 2389935fe098SMike Snitzer if (IS_ERR(bitmap)) 2390935fe098SMike Snitzer rv = PTR_ERR(bitmap); 2391935fe098SMike Snitzer else { 2392935fe098SMike Snitzer mddev->bitmap = bitmap; 2393e64e4018SAndy Shevchenko rv = md_bitmap_load(mddev); 2394935fe098SMike Snitzer if (rv) 2395935fe098SMike Snitzer mddev->bitmap_info.offset = 0; 2396935fe098SMike Snitzer } 2397935fe098SMike Snitzer if (rv) { 2398e64e4018SAndy Shevchenko md_bitmap_destroy(mddev); 2399f8f83d8fSJack Wang mddev_resume(mddev); 2400935fe098SMike Snitzer goto out; 2401935fe098SMike Snitzer } 2402f8f83d8fSJack Wang mddev_resume(mddev); 2403935fe098SMike Snitzer } 2404935fe098SMike Snitzer } 2405935fe098SMike Snitzer } 2406935fe098SMike Snitzer if (!mddev->external) { 2407935fe098SMike Snitzer /* Ensure new bitmap info is stored in 2408935fe098SMike Snitzer * metadata promptly. 2409935fe098SMike Snitzer */ 2410935fe098SMike Snitzer set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 2411935fe098SMike Snitzer md_wakeup_thread(mddev->thread); 2412935fe098SMike Snitzer } 2413935fe098SMike Snitzer rv = 0; 2414935fe098SMike Snitzer out: 2415935fe098SMike Snitzer mddev_unlock(mddev); 2416935fe098SMike Snitzer if (rv) 2417935fe098SMike Snitzer return rv; 2418935fe098SMike Snitzer return len; 2419935fe098SMike Snitzer } 2420935fe098SMike Snitzer 2421935fe098SMike Snitzer static struct md_sysfs_entry bitmap_location = 2422935fe098SMike Snitzer __ATTR(location, S_IRUGO|S_IWUSR, location_show, location_store); 2423935fe098SMike Snitzer 2424935fe098SMike Snitzer /* 'bitmap/space' is the space available at 'location' for the 2425935fe098SMike Snitzer * bitmap. This allows the kernel to know when it is safe to 2426935fe098SMike Snitzer * resize the bitmap to match a resized array. 2427935fe098SMike Snitzer */ 2428935fe098SMike Snitzer static ssize_t 2429935fe098SMike Snitzer space_show(struct mddev *mddev, char *page) 2430935fe098SMike Snitzer { 2431935fe098SMike Snitzer return sprintf(page, "%lu\n", mddev->bitmap_info.space); 2432935fe098SMike Snitzer } 2433935fe098SMike Snitzer 2434935fe098SMike Snitzer static ssize_t 2435935fe098SMike Snitzer space_store(struct mddev *mddev, const char *buf, size_t len) 2436935fe098SMike Snitzer { 2437935fe098SMike Snitzer unsigned long sectors; 2438935fe098SMike Snitzer int rv; 2439935fe098SMike Snitzer 2440935fe098SMike Snitzer rv = kstrtoul(buf, 10, §ors); 2441935fe098SMike Snitzer if (rv) 2442935fe098SMike Snitzer return rv; 2443935fe098SMike Snitzer 2444935fe098SMike Snitzer if (sectors == 0) 2445935fe098SMike Snitzer return -EINVAL; 2446935fe098SMike Snitzer 2447935fe098SMike Snitzer if (mddev->bitmap && 2448935fe098SMike Snitzer sectors < (mddev->bitmap->storage.bytes + 511) >> 9) 2449935fe098SMike Snitzer return -EFBIG; /* Bitmap is too big for this small space */ 2450935fe098SMike Snitzer 2451935fe098SMike Snitzer /* could make sure it isn't too big, but that isn't really 2452935fe098SMike Snitzer * needed - user-space should be careful. 2453935fe098SMike Snitzer */ 2454935fe098SMike Snitzer mddev->bitmap_info.space = sectors; 2455935fe098SMike Snitzer return len; 2456935fe098SMike Snitzer } 2457935fe098SMike Snitzer 2458935fe098SMike Snitzer static struct md_sysfs_entry bitmap_space = 2459935fe098SMike Snitzer __ATTR(space, S_IRUGO|S_IWUSR, space_show, space_store); 2460935fe098SMike Snitzer 2461935fe098SMike Snitzer static ssize_t 2462935fe098SMike Snitzer timeout_show(struct mddev *mddev, char *page) 2463935fe098SMike Snitzer { 2464935fe098SMike Snitzer ssize_t len; 2465935fe098SMike Snitzer unsigned long secs = mddev->bitmap_info.daemon_sleep / HZ; 2466935fe098SMike Snitzer unsigned long jifs = mddev->bitmap_info.daemon_sleep % HZ; 2467935fe098SMike Snitzer 2468935fe098SMike Snitzer len = sprintf(page, "%lu", secs); 2469935fe098SMike Snitzer if (jifs) 2470935fe098SMike Snitzer len += sprintf(page+len, ".%03u", jiffies_to_msecs(jifs)); 2471935fe098SMike Snitzer len += sprintf(page+len, "\n"); 2472935fe098SMike Snitzer return len; 2473935fe098SMike Snitzer } 2474935fe098SMike Snitzer 2475935fe098SMike Snitzer static ssize_t 2476935fe098SMike Snitzer timeout_store(struct mddev *mddev, const char *buf, size_t len) 2477935fe098SMike Snitzer { 2478935fe098SMike Snitzer /* timeout can be set at any time */ 2479935fe098SMike Snitzer unsigned long timeout; 2480935fe098SMike Snitzer int rv = strict_strtoul_scaled(buf, &timeout, 4); 2481935fe098SMike Snitzer if (rv) 2482935fe098SMike Snitzer return rv; 2483935fe098SMike Snitzer 2484935fe098SMike Snitzer /* just to make sure we don't overflow... */ 2485935fe098SMike Snitzer if (timeout >= LONG_MAX / HZ) 2486935fe098SMike Snitzer return -EINVAL; 2487935fe098SMike Snitzer 2488935fe098SMike Snitzer timeout = timeout * HZ / 10000; 2489935fe098SMike Snitzer 2490935fe098SMike Snitzer if (timeout >= MAX_SCHEDULE_TIMEOUT) 2491935fe098SMike Snitzer timeout = MAX_SCHEDULE_TIMEOUT-1; 2492935fe098SMike Snitzer if (timeout < 1) 2493935fe098SMike Snitzer timeout = 1; 2494c333673aSYu Kuai 24954eeb6535SYu Kuai mddev->bitmap_info.daemon_sleep = timeout; 24964eeb6535SYu Kuai mddev_set_timeout(mddev, timeout, false); 2497935fe098SMike Snitzer md_wakeup_thread(mddev->thread); 24984eeb6535SYu Kuai 2499935fe098SMike Snitzer return len; 2500935fe098SMike Snitzer } 2501935fe098SMike Snitzer 2502935fe098SMike Snitzer static struct md_sysfs_entry bitmap_timeout = 2503935fe098SMike Snitzer __ATTR(time_base, S_IRUGO|S_IWUSR, timeout_show, timeout_store); 2504935fe098SMike Snitzer 2505935fe098SMike Snitzer static ssize_t 2506935fe098SMike Snitzer backlog_show(struct mddev *mddev, char *page) 2507935fe098SMike Snitzer { 2508935fe098SMike Snitzer return sprintf(page, "%lu\n", mddev->bitmap_info.max_write_behind); 2509935fe098SMike Snitzer } 2510935fe098SMike Snitzer 2511935fe098SMike Snitzer static ssize_t 2512935fe098SMike Snitzer backlog_store(struct mddev *mddev, const char *buf, size_t len) 2513935fe098SMike Snitzer { 2514935fe098SMike Snitzer unsigned long backlog; 251510c92fcaSGuoqing Jiang unsigned long old_mwb = mddev->bitmap_info.max_write_behind; 25168c13ab11SGuoqing Jiang struct md_rdev *rdev; 25178c13ab11SGuoqing Jiang bool has_write_mostly = false; 2518935fe098SMike Snitzer int rv = kstrtoul(buf, 10, &backlog); 2519935fe098SMike Snitzer if (rv) 2520935fe098SMike Snitzer return rv; 2521935fe098SMike Snitzer if (backlog > COUNTER_MAX) 2522935fe098SMike Snitzer return -EINVAL; 25238c13ab11SGuoqing Jiang 25248c13ab11SGuoqing Jiang /* 25258c13ab11SGuoqing Jiang * Without write mostly device, it doesn't make sense to set 25268c13ab11SGuoqing Jiang * backlog for max_write_behind. 25278c13ab11SGuoqing Jiang */ 25288c13ab11SGuoqing Jiang rdev_for_each(rdev, mddev) { 25298c13ab11SGuoqing Jiang if (test_bit(WriteMostly, &rdev->flags)) { 25308c13ab11SGuoqing Jiang has_write_mostly = true; 25318c13ab11SGuoqing Jiang break; 25328c13ab11SGuoqing Jiang } 25338c13ab11SGuoqing Jiang } 25348c13ab11SGuoqing Jiang if (!has_write_mostly) { 25358c13ab11SGuoqing Jiang pr_warn_ratelimited("%s: can't set backlog, no write mostly device available\n", 25368c13ab11SGuoqing Jiang mdname(mddev)); 25378c13ab11SGuoqing Jiang return -EINVAL; 25388c13ab11SGuoqing Jiang } 25398c13ab11SGuoqing Jiang 2540935fe098SMike Snitzer mddev->bitmap_info.max_write_behind = backlog; 2541404659cfSGuoqing Jiang if (!backlog && mddev->serial_info_pool) { 2542404659cfSGuoqing Jiang /* serial_info_pool is not needed if backlog is zero */ 254369b00b5bSGuoqing Jiang if (!mddev->serialize_policy) 254469b00b5bSGuoqing Jiang mddev_destroy_serial_pool(mddev, NULL, false); 2545404659cfSGuoqing Jiang } else if (backlog && !mddev->serial_info_pool) { 2546404659cfSGuoqing Jiang /* serial_info_pool is needed since backlog is not zero */ 254710c92fcaSGuoqing Jiang struct md_rdev *rdev; 254810c92fcaSGuoqing Jiang 254910c92fcaSGuoqing Jiang rdev_for_each(rdev, mddev) 2550404659cfSGuoqing Jiang mddev_create_serial_pool(mddev, rdev, false); 255110c92fcaSGuoqing Jiang } 255210c92fcaSGuoqing Jiang if (old_mwb != backlog) 255310c92fcaSGuoqing Jiang md_bitmap_update_sb(mddev->bitmap); 2554935fe098SMike Snitzer return len; 2555935fe098SMike Snitzer } 2556935fe098SMike Snitzer 2557935fe098SMike Snitzer static struct md_sysfs_entry bitmap_backlog = 2558935fe098SMike Snitzer __ATTR(backlog, S_IRUGO|S_IWUSR, backlog_show, backlog_store); 2559935fe098SMike Snitzer 2560935fe098SMike Snitzer static ssize_t 2561935fe098SMike Snitzer chunksize_show(struct mddev *mddev, char *page) 2562935fe098SMike Snitzer { 2563935fe098SMike Snitzer return sprintf(page, "%lu\n", mddev->bitmap_info.chunksize); 2564935fe098SMike Snitzer } 2565935fe098SMike Snitzer 2566935fe098SMike Snitzer static ssize_t 2567935fe098SMike Snitzer chunksize_store(struct mddev *mddev, const char *buf, size_t len) 2568935fe098SMike Snitzer { 2569935fe098SMike Snitzer /* Can only be changed when no bitmap is active */ 2570935fe098SMike Snitzer int rv; 2571935fe098SMike Snitzer unsigned long csize; 2572935fe098SMike Snitzer if (mddev->bitmap) 2573935fe098SMike Snitzer return -EBUSY; 2574935fe098SMike Snitzer rv = kstrtoul(buf, 10, &csize); 2575935fe098SMike Snitzer if (rv) 2576935fe098SMike Snitzer return rv; 2577935fe098SMike Snitzer if (csize < 512 || 2578935fe098SMike Snitzer !is_power_of_2(csize)) 2579935fe098SMike Snitzer return -EINVAL; 258045552111SFlorian-Ewald Mueller if (BITS_PER_LONG > 32 && csize >= (1ULL << (BITS_PER_BYTE * 258145552111SFlorian-Ewald Mueller sizeof(((bitmap_super_t *)0)->chunksize)))) 258245552111SFlorian-Ewald Mueller return -EOVERFLOW; 2583935fe098SMike Snitzer mddev->bitmap_info.chunksize = csize; 2584935fe098SMike Snitzer return len; 2585935fe098SMike Snitzer } 2586935fe098SMike Snitzer 2587935fe098SMike Snitzer static struct md_sysfs_entry bitmap_chunksize = 2588935fe098SMike Snitzer __ATTR(chunksize, S_IRUGO|S_IWUSR, chunksize_show, chunksize_store); 2589935fe098SMike Snitzer 2590935fe098SMike Snitzer static ssize_t metadata_show(struct mddev *mddev, char *page) 2591935fe098SMike Snitzer { 2592935fe098SMike Snitzer if (mddev_is_clustered(mddev)) 2593935fe098SMike Snitzer return sprintf(page, "clustered\n"); 2594935fe098SMike Snitzer return sprintf(page, "%s\n", (mddev->bitmap_info.external 2595935fe098SMike Snitzer ? "external" : "internal")); 2596935fe098SMike Snitzer } 2597935fe098SMike Snitzer 2598935fe098SMike Snitzer static ssize_t metadata_store(struct mddev *mddev, const char *buf, size_t len) 2599935fe098SMike Snitzer { 2600935fe098SMike Snitzer if (mddev->bitmap || 2601935fe098SMike Snitzer mddev->bitmap_info.file || 2602935fe098SMike Snitzer mddev->bitmap_info.offset) 2603935fe098SMike Snitzer return -EBUSY; 2604935fe098SMike Snitzer if (strncmp(buf, "external", 8) == 0) 2605935fe098SMike Snitzer mddev->bitmap_info.external = 1; 2606935fe098SMike Snitzer else if ((strncmp(buf, "internal", 8) == 0) || 2607935fe098SMike Snitzer (strncmp(buf, "clustered", 9) == 0)) 2608935fe098SMike Snitzer mddev->bitmap_info.external = 0; 2609935fe098SMike Snitzer else 2610935fe098SMike Snitzer return -EINVAL; 2611935fe098SMike Snitzer return len; 2612935fe098SMike Snitzer } 2613935fe098SMike Snitzer 2614935fe098SMike Snitzer static struct md_sysfs_entry bitmap_metadata = 2615935fe098SMike Snitzer __ATTR(metadata, S_IRUGO|S_IWUSR, metadata_show, metadata_store); 2616935fe098SMike Snitzer 2617935fe098SMike Snitzer static ssize_t can_clear_show(struct mddev *mddev, char *page) 2618935fe098SMike Snitzer { 2619935fe098SMike Snitzer int len; 2620935fe098SMike Snitzer spin_lock(&mddev->lock); 2621935fe098SMike Snitzer if (mddev->bitmap) 2622935fe098SMike Snitzer len = sprintf(page, "%s\n", (mddev->bitmap->need_sync ? 2623935fe098SMike Snitzer "false" : "true")); 2624935fe098SMike Snitzer else 2625935fe098SMike Snitzer len = sprintf(page, "\n"); 2626935fe098SMike Snitzer spin_unlock(&mddev->lock); 2627935fe098SMike Snitzer return len; 2628935fe098SMike Snitzer } 2629935fe098SMike Snitzer 2630935fe098SMike Snitzer static ssize_t can_clear_store(struct mddev *mddev, const char *buf, size_t len) 2631935fe098SMike Snitzer { 2632935fe098SMike Snitzer if (mddev->bitmap == NULL) 2633935fe098SMike Snitzer return -ENOENT; 2634935fe098SMike Snitzer if (strncmp(buf, "false", 5) == 0) 2635935fe098SMike Snitzer mddev->bitmap->need_sync = 1; 2636935fe098SMike Snitzer else if (strncmp(buf, "true", 4) == 0) { 2637935fe098SMike Snitzer if (mddev->degraded) 2638935fe098SMike Snitzer return -EBUSY; 2639935fe098SMike Snitzer mddev->bitmap->need_sync = 0; 2640935fe098SMike Snitzer } else 2641935fe098SMike Snitzer return -EINVAL; 2642935fe098SMike Snitzer return len; 2643935fe098SMike Snitzer } 2644935fe098SMike Snitzer 2645935fe098SMike Snitzer static struct md_sysfs_entry bitmap_can_clear = 2646935fe098SMike Snitzer __ATTR(can_clear, S_IRUGO|S_IWUSR, can_clear_show, can_clear_store); 2647935fe098SMike Snitzer 2648935fe098SMike Snitzer static ssize_t 2649935fe098SMike Snitzer behind_writes_used_show(struct mddev *mddev, char *page) 2650935fe098SMike Snitzer { 2651935fe098SMike Snitzer ssize_t ret; 2652935fe098SMike Snitzer spin_lock(&mddev->lock); 2653935fe098SMike Snitzer if (mddev->bitmap == NULL) 2654935fe098SMike Snitzer ret = sprintf(page, "0\n"); 2655935fe098SMike Snitzer else 2656935fe098SMike Snitzer ret = sprintf(page, "%lu\n", 2657935fe098SMike Snitzer mddev->bitmap->behind_writes_used); 2658935fe098SMike Snitzer spin_unlock(&mddev->lock); 2659935fe098SMike Snitzer return ret; 2660935fe098SMike Snitzer } 2661935fe098SMike Snitzer 2662935fe098SMike Snitzer static ssize_t 2663935fe098SMike Snitzer behind_writes_used_reset(struct mddev *mddev, const char *buf, size_t len) 2664935fe098SMike Snitzer { 2665935fe098SMike Snitzer if (mddev->bitmap) 2666935fe098SMike Snitzer mddev->bitmap->behind_writes_used = 0; 2667935fe098SMike Snitzer return len; 2668935fe098SMike Snitzer } 2669935fe098SMike Snitzer 2670935fe098SMike Snitzer static struct md_sysfs_entry max_backlog_used = 2671935fe098SMike Snitzer __ATTR(max_backlog_used, S_IRUGO | S_IWUSR, 2672935fe098SMike Snitzer behind_writes_used_show, behind_writes_used_reset); 2673935fe098SMike Snitzer 2674935fe098SMike Snitzer static struct attribute *md_bitmap_attrs[] = { 2675935fe098SMike Snitzer &bitmap_location.attr, 2676935fe098SMike Snitzer &bitmap_space.attr, 2677935fe098SMike Snitzer &bitmap_timeout.attr, 2678935fe098SMike Snitzer &bitmap_backlog.attr, 2679935fe098SMike Snitzer &bitmap_chunksize.attr, 2680935fe098SMike Snitzer &bitmap_metadata.attr, 2681935fe098SMike Snitzer &bitmap_can_clear.attr, 2682935fe098SMike Snitzer &max_backlog_used.attr, 2683935fe098SMike Snitzer NULL 2684935fe098SMike Snitzer }; 2685c32dc040SRikard Falkeborn const struct attribute_group md_bitmap_group = { 2686935fe098SMike Snitzer .name = "bitmap", 2687935fe098SMike Snitzer .attrs = md_bitmap_attrs, 2688935fe098SMike Snitzer }; 2689