1b411b363SPhilipp Reisner /* 2b411b363SPhilipp Reisner drbd_bitmap.c 3b411b363SPhilipp Reisner 4b411b363SPhilipp Reisner This file is part of DRBD by Philipp Reisner and Lars Ellenberg. 5b411b363SPhilipp Reisner 6b411b363SPhilipp Reisner Copyright (C) 2004-2008, LINBIT Information Technologies GmbH. 7b411b363SPhilipp Reisner Copyright (C) 2004-2008, Philipp Reisner <philipp.reisner@linbit.com>. 8b411b363SPhilipp Reisner Copyright (C) 2004-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. 9b411b363SPhilipp Reisner 10b411b363SPhilipp Reisner drbd is free software; you can redistribute it and/or modify 11b411b363SPhilipp Reisner it under the terms of the GNU General Public License as published by 12b411b363SPhilipp Reisner the Free Software Foundation; either version 2, or (at your option) 13b411b363SPhilipp Reisner any later version. 14b411b363SPhilipp Reisner 15b411b363SPhilipp Reisner drbd is distributed in the hope that it will be useful, 16b411b363SPhilipp Reisner but WITHOUT ANY WARRANTY; without even the implied warranty of 17b411b363SPhilipp Reisner MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18b411b363SPhilipp Reisner GNU General Public License for more details. 19b411b363SPhilipp Reisner 20b411b363SPhilipp Reisner You should have received a copy of the GNU General Public License 21b411b363SPhilipp Reisner along with drbd; see the file COPYING. If not, write to 22b411b363SPhilipp Reisner the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 23b411b363SPhilipp Reisner */ 24b411b363SPhilipp Reisner 25b411b363SPhilipp Reisner #include <linux/bitops.h> 26b411b363SPhilipp Reisner #include <linux/vmalloc.h> 27b411b363SPhilipp Reisner #include <linux/string.h> 28b411b363SPhilipp Reisner #include <linux/drbd.h> 295a0e3ad6STejun Heo #include <linux/slab.h> 30b411b363SPhilipp Reisner #include <asm/kmap_types.h> 31f0ff1357SStephen Rothwell 32b411b363SPhilipp Reisner #include "drbd_int.h" 33b411b363SPhilipp Reisner 3495a0f10cSLars Ellenberg 35b411b363SPhilipp Reisner /* OPAQUE outside this file! 36b411b363SPhilipp Reisner * interface defined in drbd_int.h 37b411b363SPhilipp Reisner 38b411b363SPhilipp Reisner * convention: 39b411b363SPhilipp Reisner * function name drbd_bm_... => used elsewhere, "public". 40b411b363SPhilipp Reisner * function name bm_... => internal to implementation, "private". 414b0715f0SLars Ellenberg */ 42b411b363SPhilipp Reisner 434b0715f0SLars Ellenberg 444b0715f0SLars Ellenberg /* 454b0715f0SLars Ellenberg * LIMITATIONS: 464b0715f0SLars Ellenberg * We want to support >= peta byte of backend storage, while for now still using 474b0715f0SLars Ellenberg * a granularity of one bit per 4KiB of storage. 484b0715f0SLars Ellenberg * 1 << 50 bytes backend storage (1 PiB) 494b0715f0SLars Ellenberg * 1 << (50 - 12) bits needed 504b0715f0SLars Ellenberg * 38 --> we need u64 to index and count bits 514b0715f0SLars Ellenberg * 1 << (38 - 3) bitmap bytes needed 524b0715f0SLars Ellenberg * 35 --> we still need u64 to index and count bytes 534b0715f0SLars Ellenberg * (that's 32 GiB of bitmap for 1 PiB storage) 544b0715f0SLars Ellenberg * 1 << (35 - 2) 32bit longs needed 554b0715f0SLars Ellenberg * 33 --> we'd even need u64 to index and count 32bit long words. 564b0715f0SLars Ellenberg * 1 << (35 - 3) 64bit longs needed 574b0715f0SLars Ellenberg * 32 --> we could get away with a 32bit unsigned int to index and count 584b0715f0SLars Ellenberg * 64bit long words, but I rather stay with unsigned long for now. 594b0715f0SLars Ellenberg * We probably should neither count nor point to bytes or long words 604b0715f0SLars Ellenberg * directly, but either by bitnumber, or by page index and offset. 614b0715f0SLars Ellenberg * 1 << (35 - 12) 624b0715f0SLars Ellenberg * 22 --> we need that much 4KiB pages of bitmap. 634b0715f0SLars Ellenberg * 1 << (22 + 3) --> on a 64bit arch, 644b0715f0SLars Ellenberg * we need 32 MiB to store the array of page pointers. 654b0715f0SLars Ellenberg * 664b0715f0SLars Ellenberg * Because I'm lazy, and because the resulting patch was too large, too ugly 674b0715f0SLars Ellenberg * and still incomplete, on 32bit we still "only" support 16 TiB (minus some), 684b0715f0SLars Ellenberg * (1 << 32) bits * 4k storage. 694b0715f0SLars Ellenberg * 704b0715f0SLars Ellenberg 714b0715f0SLars Ellenberg * bitmap storage and IO: 724b0715f0SLars Ellenberg * Bitmap is stored little endian on disk, and is kept little endian in 734b0715f0SLars Ellenberg * core memory. Currently we still hold the full bitmap in core as long 744b0715f0SLars Ellenberg * as we are "attached" to a local disk, which at 32 GiB for 1PiB storage 754b0715f0SLars Ellenberg * seems excessive. 764b0715f0SLars Ellenberg * 7724c4830cSBart Van Assche * We plan to reduce the amount of in-core bitmap pages by paging them in 784b0715f0SLars Ellenberg * and out against their on-disk location as necessary, but need to make 794b0715f0SLars Ellenberg * sure we don't cause too much meta data IO, and must not deadlock in 804b0715f0SLars Ellenberg * tight memory situations. This needs some more work. 81b411b363SPhilipp Reisner */ 82b411b363SPhilipp Reisner 83b411b363SPhilipp Reisner /* 84b411b363SPhilipp Reisner * NOTE 85b411b363SPhilipp Reisner * Access to the *bm_pages is protected by bm_lock. 86b411b363SPhilipp Reisner * It is safe to read the other members within the lock. 87b411b363SPhilipp Reisner * 88b411b363SPhilipp Reisner * drbd_bm_set_bits is called from bio_endio callbacks, 89b411b363SPhilipp Reisner * We may be called with irq already disabled, 90b411b363SPhilipp Reisner * so we need spin_lock_irqsave(). 91b411b363SPhilipp Reisner * And we need the kmap_atomic. 92b411b363SPhilipp Reisner */ 93b411b363SPhilipp Reisner struct drbd_bitmap { 94b411b363SPhilipp Reisner struct page **bm_pages; 95b411b363SPhilipp Reisner spinlock_t bm_lock; 964b0715f0SLars Ellenberg 974b0715f0SLars Ellenberg /* see LIMITATIONS: above */ 984b0715f0SLars Ellenberg 99b411b363SPhilipp Reisner unsigned long bm_set; /* nr of set bits; THINK maybe atomic_t? */ 100b411b363SPhilipp Reisner unsigned long bm_bits; 101b411b363SPhilipp Reisner size_t bm_words; 102b411b363SPhilipp Reisner size_t bm_number_of_pages; 103b411b363SPhilipp Reisner sector_t bm_dev_capacity; 1048a03ae2aSThomas Gleixner struct mutex bm_change; /* serializes resize operations */ 105b411b363SPhilipp Reisner 10619f843aaSLars Ellenberg wait_queue_head_t bm_io_wait; /* used to serialize IO of single pages */ 107b411b363SPhilipp Reisner 10820ceb2b2SLars Ellenberg enum bm_flag bm_flags; 109b411b363SPhilipp Reisner 110b411b363SPhilipp Reisner /* debugging aid, in case we are still racy somewhere */ 111b411b363SPhilipp Reisner char *bm_why; 112b411b363SPhilipp Reisner struct task_struct *bm_task; 113b411b363SPhilipp Reisner }; 114b411b363SPhilipp Reisner 115b411b363SPhilipp Reisner #define bm_print_lock_info(m) __bm_print_lock_info(m, __func__) 116b411b363SPhilipp Reisner static void __bm_print_lock_info(struct drbd_conf *mdev, const char *func) 117b411b363SPhilipp Reisner { 118b411b363SPhilipp Reisner struct drbd_bitmap *b = mdev->bitmap; 119b411b363SPhilipp Reisner if (!__ratelimit(&drbd_ratelimit_state)) 120b411b363SPhilipp Reisner return; 121b411b363SPhilipp Reisner dev_err(DEV, "FIXME %s in %s, bitmap locked for '%s' by %s\n", 122392c8801SPhilipp Reisner drbd_task_to_thread_name(mdev->tconn, current), 123b411b363SPhilipp Reisner func, b->bm_why ?: "?", 124392c8801SPhilipp Reisner drbd_task_to_thread_name(mdev->tconn, b->bm_task)); 125b411b363SPhilipp Reisner } 126b411b363SPhilipp Reisner 12720ceb2b2SLars Ellenberg void drbd_bm_lock(struct drbd_conf *mdev, char *why, enum bm_flag flags) 128b411b363SPhilipp Reisner { 129b411b363SPhilipp Reisner struct drbd_bitmap *b = mdev->bitmap; 130b411b363SPhilipp Reisner int trylock_failed; 131b411b363SPhilipp Reisner 132b411b363SPhilipp Reisner if (!b) { 133b411b363SPhilipp Reisner dev_err(DEV, "FIXME no bitmap in drbd_bm_lock!?\n"); 134b411b363SPhilipp Reisner return; 135b411b363SPhilipp Reisner } 136b411b363SPhilipp Reisner 1378a03ae2aSThomas Gleixner trylock_failed = !mutex_trylock(&b->bm_change); 138b411b363SPhilipp Reisner 139b411b363SPhilipp Reisner if (trylock_failed) { 140b411b363SPhilipp Reisner dev_warn(DEV, "%s going to '%s' but bitmap already locked for '%s' by %s\n", 141392c8801SPhilipp Reisner drbd_task_to_thread_name(mdev->tconn, current), 142b411b363SPhilipp Reisner why, b->bm_why ?: "?", 143392c8801SPhilipp Reisner drbd_task_to_thread_name(mdev->tconn, b->bm_task)); 1448a03ae2aSThomas Gleixner mutex_lock(&b->bm_change); 145b411b363SPhilipp Reisner } 14620ceb2b2SLars Ellenberg if (BM_LOCKED_MASK & b->bm_flags) 147b411b363SPhilipp Reisner dev_err(DEV, "FIXME bitmap already locked in bm_lock\n"); 14820ceb2b2SLars Ellenberg b->bm_flags |= flags & BM_LOCKED_MASK; 149b411b363SPhilipp Reisner 150b411b363SPhilipp Reisner b->bm_why = why; 151b411b363SPhilipp Reisner b->bm_task = current; 152b411b363SPhilipp Reisner } 153b411b363SPhilipp Reisner 154b411b363SPhilipp Reisner void drbd_bm_unlock(struct drbd_conf *mdev) 155b411b363SPhilipp Reisner { 156b411b363SPhilipp Reisner struct drbd_bitmap *b = mdev->bitmap; 157b411b363SPhilipp Reisner if (!b) { 158b411b363SPhilipp Reisner dev_err(DEV, "FIXME no bitmap in drbd_bm_unlock!?\n"); 159b411b363SPhilipp Reisner return; 160b411b363SPhilipp Reisner } 161b411b363SPhilipp Reisner 16220ceb2b2SLars Ellenberg if (!(BM_LOCKED_MASK & mdev->bitmap->bm_flags)) 163b411b363SPhilipp Reisner dev_err(DEV, "FIXME bitmap not locked in bm_unlock\n"); 164b411b363SPhilipp Reisner 16520ceb2b2SLars Ellenberg b->bm_flags &= ~BM_LOCKED_MASK; 166b411b363SPhilipp Reisner b->bm_why = NULL; 167b411b363SPhilipp Reisner b->bm_task = NULL; 1688a03ae2aSThomas Gleixner mutex_unlock(&b->bm_change); 169b411b363SPhilipp Reisner } 170b411b363SPhilipp Reisner 17119f843aaSLars Ellenberg /* we store some "meta" info about our pages in page->private */ 17219f843aaSLars Ellenberg /* at a granularity of 4k storage per bitmap bit: 17319f843aaSLars Ellenberg * one peta byte storage: 1<<50 byte, 1<<38 * 4k storage blocks 17419f843aaSLars Ellenberg * 1<<38 bits, 17519f843aaSLars Ellenberg * 1<<23 4k bitmap pages. 17619f843aaSLars Ellenberg * Use 24 bits as page index, covers 2 peta byte storage 17719f843aaSLars Ellenberg * at a granularity of 4k per bit. 17819f843aaSLars Ellenberg * Used to report the failed page idx on io error from the endio handlers. 17919f843aaSLars Ellenberg */ 18019f843aaSLars Ellenberg #define BM_PAGE_IDX_MASK ((1UL<<24)-1) 18119f843aaSLars Ellenberg /* this page is currently read in, or written back */ 18219f843aaSLars Ellenberg #define BM_PAGE_IO_LOCK 31 18319f843aaSLars Ellenberg /* if there has been an IO error for this page */ 18419f843aaSLars Ellenberg #define BM_PAGE_IO_ERROR 30 18519f843aaSLars Ellenberg /* this is to be able to intelligently skip disk IO, 18619f843aaSLars Ellenberg * set if bits have been set since last IO. */ 18719f843aaSLars Ellenberg #define BM_PAGE_NEED_WRITEOUT 29 18819f843aaSLars Ellenberg /* to mark for lazy writeout once syncer cleared all clearable bits, 18919f843aaSLars Ellenberg * we if bits have been cleared since last IO. */ 19019f843aaSLars Ellenberg #define BM_PAGE_LAZY_WRITEOUT 28 19145dfffebSLars Ellenberg /* pages marked with this "HINT" will be considered for writeout 19245dfffebSLars Ellenberg * on activity log transactions */ 19345dfffebSLars Ellenberg #define BM_PAGE_HINT_WRITEOUT 27 19419f843aaSLars Ellenberg 19524c4830cSBart Van Assche /* store_page_idx uses non-atomic assignment. It is only used directly after 19619f843aaSLars Ellenberg * allocating the page. All other bm_set_page_* and bm_clear_page_* need to 19719f843aaSLars Ellenberg * use atomic bit manipulation, as set_out_of_sync (and therefore bitmap 19819f843aaSLars Ellenberg * changes) may happen from various contexts, and wait_on_bit/wake_up_bit 19919f843aaSLars Ellenberg * requires it all to be atomic as well. */ 20019f843aaSLars Ellenberg static void bm_store_page_idx(struct page *page, unsigned long idx) 20119f843aaSLars Ellenberg { 20219f843aaSLars Ellenberg BUG_ON(0 != (idx & ~BM_PAGE_IDX_MASK)); 203f66ee697SLars Ellenberg set_page_private(page, idx); 20419f843aaSLars Ellenberg } 20519f843aaSLars Ellenberg 20619f843aaSLars Ellenberg static unsigned long bm_page_to_idx(struct page *page) 20719f843aaSLars Ellenberg { 20819f843aaSLars Ellenberg return page_private(page) & BM_PAGE_IDX_MASK; 20919f843aaSLars Ellenberg } 21019f843aaSLars Ellenberg 21119f843aaSLars Ellenberg /* As is very unlikely that the same page is under IO from more than one 21219f843aaSLars Ellenberg * context, we can get away with a bit per page and one wait queue per bitmap. 21319f843aaSLars Ellenberg */ 21419f843aaSLars Ellenberg static void bm_page_lock_io(struct drbd_conf *mdev, int page_nr) 21519f843aaSLars Ellenberg { 21619f843aaSLars Ellenberg struct drbd_bitmap *b = mdev->bitmap; 21719f843aaSLars Ellenberg void *addr = &page_private(b->bm_pages[page_nr]); 21819f843aaSLars Ellenberg wait_event(b->bm_io_wait, !test_and_set_bit(BM_PAGE_IO_LOCK, addr)); 21919f843aaSLars Ellenberg } 22019f843aaSLars Ellenberg 22119f843aaSLars Ellenberg static void bm_page_unlock_io(struct drbd_conf *mdev, int page_nr) 22219f843aaSLars Ellenberg { 22319f843aaSLars Ellenberg struct drbd_bitmap *b = mdev->bitmap; 22419f843aaSLars Ellenberg void *addr = &page_private(b->bm_pages[page_nr]); 2254738fa16SLars Ellenberg clear_bit_unlock(BM_PAGE_IO_LOCK, addr); 22619f843aaSLars Ellenberg wake_up(&mdev->bitmap->bm_io_wait); 22719f843aaSLars Ellenberg } 22819f843aaSLars Ellenberg 22919f843aaSLars Ellenberg /* set _before_ submit_io, so it may be reset due to being changed 23019f843aaSLars Ellenberg * while this page is in flight... will get submitted later again */ 23119f843aaSLars Ellenberg static void bm_set_page_unchanged(struct page *page) 23219f843aaSLars Ellenberg { 23319f843aaSLars Ellenberg /* use cmpxchg? */ 23419f843aaSLars Ellenberg clear_bit(BM_PAGE_NEED_WRITEOUT, &page_private(page)); 23519f843aaSLars Ellenberg clear_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page)); 23619f843aaSLars Ellenberg } 23719f843aaSLars Ellenberg 23819f843aaSLars Ellenberg static void bm_set_page_need_writeout(struct page *page) 23919f843aaSLars Ellenberg { 24019f843aaSLars Ellenberg set_bit(BM_PAGE_NEED_WRITEOUT, &page_private(page)); 24119f843aaSLars Ellenberg } 24219f843aaSLars Ellenberg 24345dfffebSLars Ellenberg /** 24445dfffebSLars Ellenberg * drbd_bm_mark_for_writeout() - mark a page with a "hint" to be considered for writeout 24545dfffebSLars Ellenberg * @mdev: DRBD device. 24645dfffebSLars Ellenberg * @page_nr: the bitmap page to mark with the "hint" flag 24745dfffebSLars Ellenberg * 24845dfffebSLars Ellenberg * From within an activity log transaction, we mark a few pages with these 24945dfffebSLars Ellenberg * hints, then call drbd_bm_write_hinted(), which will only write out changed 25045dfffebSLars Ellenberg * pages which are flagged with this mark. 25145dfffebSLars Ellenberg */ 25245dfffebSLars Ellenberg void drbd_bm_mark_for_writeout(struct drbd_conf *mdev, int page_nr) 25345dfffebSLars Ellenberg { 25445dfffebSLars Ellenberg struct page *page; 25545dfffebSLars Ellenberg if (page_nr >= mdev->bitmap->bm_number_of_pages) { 25645dfffebSLars Ellenberg dev_warn(DEV, "BAD: page_nr: %u, number_of_pages: %u\n", 25745dfffebSLars Ellenberg page_nr, (int)mdev->bitmap->bm_number_of_pages); 25845dfffebSLars Ellenberg return; 25945dfffebSLars Ellenberg } 26045dfffebSLars Ellenberg page = mdev->bitmap->bm_pages[page_nr]; 26145dfffebSLars Ellenberg set_bit(BM_PAGE_HINT_WRITEOUT, &page_private(page)); 26245dfffebSLars Ellenberg } 26345dfffebSLars Ellenberg 26419f843aaSLars Ellenberg static int bm_test_page_unchanged(struct page *page) 26519f843aaSLars Ellenberg { 26619f843aaSLars Ellenberg volatile const unsigned long *addr = &page_private(page); 26719f843aaSLars Ellenberg return (*addr & ((1UL<<BM_PAGE_NEED_WRITEOUT)|(1UL<<BM_PAGE_LAZY_WRITEOUT))) == 0; 26819f843aaSLars Ellenberg } 26919f843aaSLars Ellenberg 27019f843aaSLars Ellenberg static void bm_set_page_io_err(struct page *page) 27119f843aaSLars Ellenberg { 27219f843aaSLars Ellenberg set_bit(BM_PAGE_IO_ERROR, &page_private(page)); 27319f843aaSLars Ellenberg } 27419f843aaSLars Ellenberg 27519f843aaSLars Ellenberg static void bm_clear_page_io_err(struct page *page) 27619f843aaSLars Ellenberg { 27719f843aaSLars Ellenberg clear_bit(BM_PAGE_IO_ERROR, &page_private(page)); 27819f843aaSLars Ellenberg } 27919f843aaSLars Ellenberg 28019f843aaSLars Ellenberg static void bm_set_page_lazy_writeout(struct page *page) 28119f843aaSLars Ellenberg { 28219f843aaSLars Ellenberg set_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page)); 28319f843aaSLars Ellenberg } 28419f843aaSLars Ellenberg 28519f843aaSLars Ellenberg static int bm_test_page_lazy_writeout(struct page *page) 28619f843aaSLars Ellenberg { 28719f843aaSLars Ellenberg return test_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page)); 28819f843aaSLars Ellenberg } 28919f843aaSLars Ellenberg 29019f843aaSLars Ellenberg /* on a 32bit box, this would allow for exactly (2<<38) bits. */ 29119f843aaSLars Ellenberg static unsigned int bm_word_to_page_idx(struct drbd_bitmap *b, unsigned long long_nr) 29219f843aaSLars Ellenberg { 29319f843aaSLars Ellenberg /* page_nr = (word*sizeof(long)) >> PAGE_SHIFT; */ 29419f843aaSLars Ellenberg unsigned int page_nr = long_nr >> (PAGE_SHIFT - LN2_BPL + 3); 29519f843aaSLars Ellenberg BUG_ON(page_nr >= b->bm_number_of_pages); 29619f843aaSLars Ellenberg return page_nr; 29719f843aaSLars Ellenberg } 29819f843aaSLars Ellenberg 29995a0f10cSLars Ellenberg static unsigned int bm_bit_to_page_idx(struct drbd_bitmap *b, u64 bitnr) 30095a0f10cSLars Ellenberg { 30195a0f10cSLars Ellenberg /* page_nr = (bitnr/8) >> PAGE_SHIFT; */ 30295a0f10cSLars Ellenberg unsigned int page_nr = bitnr >> (PAGE_SHIFT + 3); 30395a0f10cSLars Ellenberg BUG_ON(page_nr >= b->bm_number_of_pages); 30495a0f10cSLars Ellenberg return page_nr; 30595a0f10cSLars Ellenberg } 30695a0f10cSLars Ellenberg 30795a0f10cSLars Ellenberg static unsigned long *__bm_map_pidx(struct drbd_bitmap *b, unsigned int idx, const enum km_type km) 30895a0f10cSLars Ellenberg { 30995a0f10cSLars Ellenberg struct page *page = b->bm_pages[idx]; 31095a0f10cSLars Ellenberg return (unsigned long *) kmap_atomic(page, km); 31195a0f10cSLars Ellenberg } 31295a0f10cSLars Ellenberg 31395a0f10cSLars Ellenberg static unsigned long *bm_map_pidx(struct drbd_bitmap *b, unsigned int idx) 31495a0f10cSLars Ellenberg { 31595a0f10cSLars Ellenberg return __bm_map_pidx(b, idx, KM_IRQ1); 31695a0f10cSLars Ellenberg } 31795a0f10cSLars Ellenberg 318b411b363SPhilipp Reisner static void __bm_unmap(unsigned long *p_addr, const enum km_type km) 319b411b363SPhilipp Reisner { 320b411b363SPhilipp Reisner kunmap_atomic(p_addr, km); 321b411b363SPhilipp Reisner }; 322b411b363SPhilipp Reisner 323b411b363SPhilipp Reisner static void bm_unmap(unsigned long *p_addr) 324b411b363SPhilipp Reisner { 325b411b363SPhilipp Reisner return __bm_unmap(p_addr, KM_IRQ1); 326b411b363SPhilipp Reisner } 327b411b363SPhilipp Reisner 328b411b363SPhilipp Reisner /* long word offset of _bitmap_ sector */ 329b411b363SPhilipp Reisner #define S2W(s) ((s)<<(BM_EXT_SHIFT-BM_BLOCK_SHIFT-LN2_BPL)) 330b411b363SPhilipp Reisner /* word offset from start of bitmap to word number _in_page_ 331b411b363SPhilipp Reisner * modulo longs per page 332b411b363SPhilipp Reisner #define MLPP(X) ((X) % (PAGE_SIZE/sizeof(long)) 33324c4830cSBart Van Assche hm, well, Philipp thinks gcc might not optimize the % into & (... - 1) 334b411b363SPhilipp Reisner so do it explicitly: 335b411b363SPhilipp Reisner */ 336b411b363SPhilipp Reisner #define MLPP(X) ((X) & ((PAGE_SIZE/sizeof(long))-1)) 337b411b363SPhilipp Reisner 338b411b363SPhilipp Reisner /* Long words per page */ 339b411b363SPhilipp Reisner #define LWPP (PAGE_SIZE/sizeof(long)) 340b411b363SPhilipp Reisner 341b411b363SPhilipp Reisner /* 342b411b363SPhilipp Reisner * actually most functions herein should take a struct drbd_bitmap*, not a 343b411b363SPhilipp Reisner * struct drbd_conf*, but for the debug macros I like to have the mdev around 344b411b363SPhilipp Reisner * to be able to report device specific. 345b411b363SPhilipp Reisner */ 346b411b363SPhilipp Reisner 34719f843aaSLars Ellenberg 348b411b363SPhilipp Reisner static void bm_free_pages(struct page **pages, unsigned long number) 349b411b363SPhilipp Reisner { 350b411b363SPhilipp Reisner unsigned long i; 351b411b363SPhilipp Reisner if (!pages) 352b411b363SPhilipp Reisner return; 353b411b363SPhilipp Reisner 354b411b363SPhilipp Reisner for (i = 0; i < number; i++) { 355b411b363SPhilipp Reisner if (!pages[i]) { 356b411b363SPhilipp Reisner printk(KERN_ALERT "drbd: bm_free_pages tried to free " 357b411b363SPhilipp Reisner "a NULL pointer; i=%lu n=%lu\n", 358b411b363SPhilipp Reisner i, number); 359b411b363SPhilipp Reisner continue; 360b411b363SPhilipp Reisner } 361b411b363SPhilipp Reisner __free_page(pages[i]); 362b411b363SPhilipp Reisner pages[i] = NULL; 363b411b363SPhilipp Reisner } 364b411b363SPhilipp Reisner } 365b411b363SPhilipp Reisner 366b411b363SPhilipp Reisner static void bm_vk_free(void *ptr, int v) 367b411b363SPhilipp Reisner { 368b411b363SPhilipp Reisner if (v) 369b411b363SPhilipp Reisner vfree(ptr); 370b411b363SPhilipp Reisner else 371b411b363SPhilipp Reisner kfree(ptr); 372b411b363SPhilipp Reisner } 373b411b363SPhilipp Reisner 374b411b363SPhilipp Reisner /* 375b411b363SPhilipp Reisner * "have" and "want" are NUMBER OF PAGES. 376b411b363SPhilipp Reisner */ 377b411b363SPhilipp Reisner static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want) 378b411b363SPhilipp Reisner { 379b411b363SPhilipp Reisner struct page **old_pages = b->bm_pages; 380b411b363SPhilipp Reisner struct page **new_pages, *page; 381b411b363SPhilipp Reisner unsigned int i, bytes, vmalloced = 0; 382b411b363SPhilipp Reisner unsigned long have = b->bm_number_of_pages; 383b411b363SPhilipp Reisner 384b411b363SPhilipp Reisner BUG_ON(have == 0 && old_pages != NULL); 385b411b363SPhilipp Reisner BUG_ON(have != 0 && old_pages == NULL); 386b411b363SPhilipp Reisner 387b411b363SPhilipp Reisner if (have == want) 388b411b363SPhilipp Reisner return old_pages; 389b411b363SPhilipp Reisner 390b411b363SPhilipp Reisner /* Trying kmalloc first, falling back to vmalloc. 391*bc891c9aSLars Ellenberg * GFP_NOIO, as this is called while drbd IO is "suspended", 392*bc891c9aSLars Ellenberg * and during resize or attach on diskless Primary, 393*bc891c9aSLars Ellenberg * we must not block on IO to ourselves. 394*bc891c9aSLars Ellenberg * Context is receiver thread or dmsetup. */ 395b411b363SPhilipp Reisner bytes = sizeof(struct page *)*want; 396*bc891c9aSLars Ellenberg new_pages = kmalloc(bytes, GFP_NOIO); 397b411b363SPhilipp Reisner if (!new_pages) { 398*bc891c9aSLars Ellenberg new_pages = __vmalloc(bytes, 399*bc891c9aSLars Ellenberg GFP_NOIO | __GFP_HIGHMEM, 400*bc891c9aSLars Ellenberg PAGE_KERNEL); 401b411b363SPhilipp Reisner if (!new_pages) 402b411b363SPhilipp Reisner return NULL; 403b411b363SPhilipp Reisner vmalloced = 1; 404b411b363SPhilipp Reisner } 405b411b363SPhilipp Reisner 406b411b363SPhilipp Reisner memset(new_pages, 0, bytes); 407b411b363SPhilipp Reisner if (want >= have) { 408b411b363SPhilipp Reisner for (i = 0; i < have; i++) 409b411b363SPhilipp Reisner new_pages[i] = old_pages[i]; 410b411b363SPhilipp Reisner for (; i < want; i++) { 411*bc891c9aSLars Ellenberg page = alloc_page(GFP_NOIO | __GFP_HIGHMEM); 412b411b363SPhilipp Reisner if (!page) { 413b411b363SPhilipp Reisner bm_free_pages(new_pages + have, i - have); 414b411b363SPhilipp Reisner bm_vk_free(new_pages, vmalloced); 415b411b363SPhilipp Reisner return NULL; 416b411b363SPhilipp Reisner } 41719f843aaSLars Ellenberg /* we want to know which page it is 41819f843aaSLars Ellenberg * from the endio handlers */ 41919f843aaSLars Ellenberg bm_store_page_idx(page, i); 420b411b363SPhilipp Reisner new_pages[i] = page; 421b411b363SPhilipp Reisner } 422b411b363SPhilipp Reisner } else { 423b411b363SPhilipp Reisner for (i = 0; i < want; i++) 424b411b363SPhilipp Reisner new_pages[i] = old_pages[i]; 425b411b363SPhilipp Reisner /* NOT HERE, we are outside the spinlock! 426b411b363SPhilipp Reisner bm_free_pages(old_pages + want, have - want); 427b411b363SPhilipp Reisner */ 428b411b363SPhilipp Reisner } 429b411b363SPhilipp Reisner 430b411b363SPhilipp Reisner if (vmalloced) 43120ceb2b2SLars Ellenberg b->bm_flags |= BM_P_VMALLOCED; 432b411b363SPhilipp Reisner else 43320ceb2b2SLars Ellenberg b->bm_flags &= ~BM_P_VMALLOCED; 434b411b363SPhilipp Reisner 435b411b363SPhilipp Reisner return new_pages; 436b411b363SPhilipp Reisner } 437b411b363SPhilipp Reisner 438b411b363SPhilipp Reisner /* 439b411b363SPhilipp Reisner * called on driver init only. TODO call when a device is created. 440b411b363SPhilipp Reisner * allocates the drbd_bitmap, and stores it in mdev->bitmap. 441b411b363SPhilipp Reisner */ 442b411b363SPhilipp Reisner int drbd_bm_init(struct drbd_conf *mdev) 443b411b363SPhilipp Reisner { 444b411b363SPhilipp Reisner struct drbd_bitmap *b = mdev->bitmap; 445b411b363SPhilipp Reisner WARN_ON(b != NULL); 446b411b363SPhilipp Reisner b = kzalloc(sizeof(struct drbd_bitmap), GFP_KERNEL); 447b411b363SPhilipp Reisner if (!b) 448b411b363SPhilipp Reisner return -ENOMEM; 449b411b363SPhilipp Reisner spin_lock_init(&b->bm_lock); 4508a03ae2aSThomas Gleixner mutex_init(&b->bm_change); 451b411b363SPhilipp Reisner init_waitqueue_head(&b->bm_io_wait); 452b411b363SPhilipp Reisner 453b411b363SPhilipp Reisner mdev->bitmap = b; 454b411b363SPhilipp Reisner 455b411b363SPhilipp Reisner return 0; 456b411b363SPhilipp Reisner } 457b411b363SPhilipp Reisner 458b411b363SPhilipp Reisner sector_t drbd_bm_capacity(struct drbd_conf *mdev) 459b411b363SPhilipp Reisner { 460841ce241SAndreas Gruenbacher if (!expect(mdev->bitmap)) 461841ce241SAndreas Gruenbacher return 0; 462b411b363SPhilipp Reisner return mdev->bitmap->bm_dev_capacity; 463b411b363SPhilipp Reisner } 464b411b363SPhilipp Reisner 465b411b363SPhilipp Reisner /* called on driver unload. TODO: call when a device is destroyed. 466b411b363SPhilipp Reisner */ 467b411b363SPhilipp Reisner void drbd_bm_cleanup(struct drbd_conf *mdev) 468b411b363SPhilipp Reisner { 469841ce241SAndreas Gruenbacher if (!expect(mdev->bitmap)) 470841ce241SAndreas Gruenbacher return; 471b411b363SPhilipp Reisner bm_free_pages(mdev->bitmap->bm_pages, mdev->bitmap->bm_number_of_pages); 47220ceb2b2SLars Ellenberg bm_vk_free(mdev->bitmap->bm_pages, (BM_P_VMALLOCED & mdev->bitmap->bm_flags)); 473b411b363SPhilipp Reisner kfree(mdev->bitmap); 474b411b363SPhilipp Reisner mdev->bitmap = NULL; 475b411b363SPhilipp Reisner } 476b411b363SPhilipp Reisner 477b411b363SPhilipp Reisner /* 478b411b363SPhilipp Reisner * since (b->bm_bits % BITS_PER_LONG) != 0, 479b411b363SPhilipp Reisner * this masks out the remaining bits. 480b411b363SPhilipp Reisner * Returns the number of bits cleared. 481b411b363SPhilipp Reisner */ 48295a0f10cSLars Ellenberg #define BITS_PER_PAGE (1UL << (PAGE_SHIFT + 3)) 48395a0f10cSLars Ellenberg #define BITS_PER_PAGE_MASK (BITS_PER_PAGE - 1) 48495a0f10cSLars Ellenberg #define BITS_PER_LONG_MASK (BITS_PER_LONG - 1) 485b411b363SPhilipp Reisner static int bm_clear_surplus(struct drbd_bitmap *b) 486b411b363SPhilipp Reisner { 48795a0f10cSLars Ellenberg unsigned long mask; 488b411b363SPhilipp Reisner unsigned long *p_addr, *bm; 48995a0f10cSLars Ellenberg int tmp; 49095a0f10cSLars Ellenberg int cleared = 0; 491b411b363SPhilipp Reisner 49295a0f10cSLars Ellenberg /* number of bits modulo bits per page */ 49395a0f10cSLars Ellenberg tmp = (b->bm_bits & BITS_PER_PAGE_MASK); 49495a0f10cSLars Ellenberg /* mask the used bits of the word containing the last bit */ 49595a0f10cSLars Ellenberg mask = (1UL << (tmp & BITS_PER_LONG_MASK)) -1; 49695a0f10cSLars Ellenberg /* bitmap is always stored little endian, 49795a0f10cSLars Ellenberg * on disk and in core memory alike */ 49895a0f10cSLars Ellenberg mask = cpu_to_lel(mask); 49995a0f10cSLars Ellenberg 5006850c442SLars Ellenberg p_addr = bm_map_pidx(b, b->bm_number_of_pages - 1); 50195a0f10cSLars Ellenberg bm = p_addr + (tmp/BITS_PER_LONG); 50295a0f10cSLars Ellenberg if (mask) { 50395a0f10cSLars Ellenberg /* If mask != 0, we are not exactly aligned, so bm now points 50495a0f10cSLars Ellenberg * to the long containing the last bit. 50595a0f10cSLars Ellenberg * If mask == 0, bm already points to the word immediately 50695a0f10cSLars Ellenberg * after the last (long word aligned) bit. */ 507b411b363SPhilipp Reisner cleared = hweight_long(*bm & ~mask); 508b411b363SPhilipp Reisner *bm &= mask; 50995a0f10cSLars Ellenberg bm++; 510b411b363SPhilipp Reisner } 511b411b363SPhilipp Reisner 51295a0f10cSLars Ellenberg if (BITS_PER_LONG == 32 && ((bm - p_addr) & 1) == 1) { 51395a0f10cSLars Ellenberg /* on a 32bit arch, we may need to zero out 51495a0f10cSLars Ellenberg * a padding long to align with a 64bit remote */ 515b411b363SPhilipp Reisner cleared += hweight_long(*bm); 516b411b363SPhilipp Reisner *bm = 0; 517b411b363SPhilipp Reisner } 518b411b363SPhilipp Reisner bm_unmap(p_addr); 519b411b363SPhilipp Reisner return cleared; 520b411b363SPhilipp Reisner } 521b411b363SPhilipp Reisner 522b411b363SPhilipp Reisner static void bm_set_surplus(struct drbd_bitmap *b) 523b411b363SPhilipp Reisner { 52495a0f10cSLars Ellenberg unsigned long mask; 525b411b363SPhilipp Reisner unsigned long *p_addr, *bm; 52695a0f10cSLars Ellenberg int tmp; 527b411b363SPhilipp Reisner 52895a0f10cSLars Ellenberg /* number of bits modulo bits per page */ 52995a0f10cSLars Ellenberg tmp = (b->bm_bits & BITS_PER_PAGE_MASK); 53095a0f10cSLars Ellenberg /* mask the used bits of the word containing the last bit */ 53195a0f10cSLars Ellenberg mask = (1UL << (tmp & BITS_PER_LONG_MASK)) -1; 53295a0f10cSLars Ellenberg /* bitmap is always stored little endian, 53395a0f10cSLars Ellenberg * on disk and in core memory alike */ 53495a0f10cSLars Ellenberg mask = cpu_to_lel(mask); 53595a0f10cSLars Ellenberg 5366850c442SLars Ellenberg p_addr = bm_map_pidx(b, b->bm_number_of_pages - 1); 53795a0f10cSLars Ellenberg bm = p_addr + (tmp/BITS_PER_LONG); 53895a0f10cSLars Ellenberg if (mask) { 53995a0f10cSLars Ellenberg /* If mask != 0, we are not exactly aligned, so bm now points 54095a0f10cSLars Ellenberg * to the long containing the last bit. 54195a0f10cSLars Ellenberg * If mask == 0, bm already points to the word immediately 54295a0f10cSLars Ellenberg * after the last (long word aligned) bit. */ 543b411b363SPhilipp Reisner *bm |= ~mask; 54495a0f10cSLars Ellenberg bm++; 545b411b363SPhilipp Reisner } 546b411b363SPhilipp Reisner 54795a0f10cSLars Ellenberg if (BITS_PER_LONG == 32 && ((bm - p_addr) & 1) == 1) { 54895a0f10cSLars Ellenberg /* on a 32bit arch, we may need to zero out 54995a0f10cSLars Ellenberg * a padding long to align with a 64bit remote */ 55095a0f10cSLars Ellenberg *bm = ~0UL; 551b411b363SPhilipp Reisner } 552b411b363SPhilipp Reisner bm_unmap(p_addr); 553b411b363SPhilipp Reisner } 554b411b363SPhilipp Reisner 5554b0715f0SLars Ellenberg /* you better not modify the bitmap while this is running, 5564b0715f0SLars Ellenberg * or its results will be stale */ 55795a0f10cSLars Ellenberg static unsigned long bm_count_bits(struct drbd_bitmap *b) 558b411b363SPhilipp Reisner { 5594b0715f0SLars Ellenberg unsigned long *p_addr; 560b411b363SPhilipp Reisner unsigned long bits = 0; 5614b0715f0SLars Ellenberg unsigned long mask = (1UL << (b->bm_bits & BITS_PER_LONG_MASK)) -1; 5626850c442SLars Ellenberg int idx, i, last_word; 5637777a8baSLars Ellenberg 5644b0715f0SLars Ellenberg /* all but last page */ 5656850c442SLars Ellenberg for (idx = 0; idx < b->bm_number_of_pages - 1; idx++) { 5664b0715f0SLars Ellenberg p_addr = __bm_map_pidx(b, idx, KM_USER0); 5674b0715f0SLars Ellenberg for (i = 0; i < LWPP; i++) 5684b0715f0SLars Ellenberg bits += hweight_long(p_addr[i]); 5697777a8baSLars Ellenberg __bm_unmap(p_addr, KM_USER0); 570b411b363SPhilipp Reisner cond_resched(); 571b411b363SPhilipp Reisner } 5724b0715f0SLars Ellenberg /* last (or only) page */ 5734b0715f0SLars Ellenberg last_word = ((b->bm_bits - 1) & BITS_PER_PAGE_MASK) >> LN2_BPL; 5744b0715f0SLars Ellenberg p_addr = __bm_map_pidx(b, idx, KM_USER0); 5754b0715f0SLars Ellenberg for (i = 0; i < last_word; i++) 5764b0715f0SLars Ellenberg bits += hweight_long(p_addr[i]); 5774b0715f0SLars Ellenberg p_addr[last_word] &= cpu_to_lel(mask); 5784b0715f0SLars Ellenberg bits += hweight_long(p_addr[last_word]); 5794b0715f0SLars Ellenberg /* 32bit arch, may have an unused padding long */ 5804b0715f0SLars Ellenberg if (BITS_PER_LONG == 32 && (last_word & 1) == 0) 5814b0715f0SLars Ellenberg p_addr[last_word+1] = 0; 5824b0715f0SLars Ellenberg __bm_unmap(p_addr, KM_USER0); 583b411b363SPhilipp Reisner return bits; 584b411b363SPhilipp Reisner } 585b411b363SPhilipp Reisner 586b411b363SPhilipp Reisner /* offset and len in long words.*/ 587b411b363SPhilipp Reisner static void bm_memset(struct drbd_bitmap *b, size_t offset, int c, size_t len) 588b411b363SPhilipp Reisner { 589b411b363SPhilipp Reisner unsigned long *p_addr, *bm; 59019f843aaSLars Ellenberg unsigned int idx; 591b411b363SPhilipp Reisner size_t do_now, end; 592b411b363SPhilipp Reisner 593b411b363SPhilipp Reisner end = offset + len; 594b411b363SPhilipp Reisner 595b411b363SPhilipp Reisner if (end > b->bm_words) { 596b411b363SPhilipp Reisner printk(KERN_ALERT "drbd: bm_memset end > bm_words\n"); 597b411b363SPhilipp Reisner return; 598b411b363SPhilipp Reisner } 599b411b363SPhilipp Reisner 600b411b363SPhilipp Reisner while (offset < end) { 601b411b363SPhilipp Reisner do_now = min_t(size_t, ALIGN(offset + 1, LWPP), end) - offset; 60219f843aaSLars Ellenberg idx = bm_word_to_page_idx(b, offset); 60319f843aaSLars Ellenberg p_addr = bm_map_pidx(b, idx); 604b411b363SPhilipp Reisner bm = p_addr + MLPP(offset); 605b411b363SPhilipp Reisner if (bm+do_now > p_addr + LWPP) { 606b411b363SPhilipp Reisner printk(KERN_ALERT "drbd: BUG BUG BUG! p_addr:%p bm:%p do_now:%d\n", 607b411b363SPhilipp Reisner p_addr, bm, (int)do_now); 60884e7c0f7SLars Ellenberg } else 609b411b363SPhilipp Reisner memset(bm, c, do_now * sizeof(long)); 610b411b363SPhilipp Reisner bm_unmap(p_addr); 61119f843aaSLars Ellenberg bm_set_page_need_writeout(b->bm_pages[idx]); 612b411b363SPhilipp Reisner offset += do_now; 613b411b363SPhilipp Reisner } 614b411b363SPhilipp Reisner } 615b411b363SPhilipp Reisner 616b411b363SPhilipp Reisner /* 617b411b363SPhilipp Reisner * make sure the bitmap has enough room for the attached storage, 618b411b363SPhilipp Reisner * if necessary, resize. 619b411b363SPhilipp Reisner * called whenever we may have changed the device size. 620b411b363SPhilipp Reisner * returns -ENOMEM if we could not allocate enough memory, 0 on success. 621b411b363SPhilipp Reisner * In case this is actually a resize, we copy the old bitmap into the new one. 622b411b363SPhilipp Reisner * Otherwise, the bitmap is initialized to all bits set. 623b411b363SPhilipp Reisner */ 62402d9a94bSPhilipp Reisner int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits) 625b411b363SPhilipp Reisner { 626b411b363SPhilipp Reisner struct drbd_bitmap *b = mdev->bitmap; 6276850c442SLars Ellenberg unsigned long bits, words, owords, obits; 628b411b363SPhilipp Reisner unsigned long want, have, onpages; /* number of pages */ 629b411b363SPhilipp Reisner struct page **npages, **opages = NULL; 630b411b363SPhilipp Reisner int err = 0, growing; 631b411b363SPhilipp Reisner int opages_vmalloced; 632b411b363SPhilipp Reisner 633841ce241SAndreas Gruenbacher if (!expect(b)) 634841ce241SAndreas Gruenbacher return -ENOMEM; 635b411b363SPhilipp Reisner 63620ceb2b2SLars Ellenberg drbd_bm_lock(mdev, "resize", BM_LOCKED_MASK); 637b411b363SPhilipp Reisner 638b411b363SPhilipp Reisner dev_info(DEV, "drbd_bm_resize called with capacity == %llu\n", 639b411b363SPhilipp Reisner (unsigned long long)capacity); 640b411b363SPhilipp Reisner 641b411b363SPhilipp Reisner if (capacity == b->bm_dev_capacity) 642b411b363SPhilipp Reisner goto out; 643b411b363SPhilipp Reisner 64420ceb2b2SLars Ellenberg opages_vmalloced = (BM_P_VMALLOCED & b->bm_flags); 645b411b363SPhilipp Reisner 646b411b363SPhilipp Reisner if (capacity == 0) { 647b411b363SPhilipp Reisner spin_lock_irq(&b->bm_lock); 648b411b363SPhilipp Reisner opages = b->bm_pages; 649b411b363SPhilipp Reisner onpages = b->bm_number_of_pages; 650b411b363SPhilipp Reisner owords = b->bm_words; 651b411b363SPhilipp Reisner b->bm_pages = NULL; 652b411b363SPhilipp Reisner b->bm_number_of_pages = 653b411b363SPhilipp Reisner b->bm_set = 654b411b363SPhilipp Reisner b->bm_bits = 655b411b363SPhilipp Reisner b->bm_words = 656b411b363SPhilipp Reisner b->bm_dev_capacity = 0; 657b411b363SPhilipp Reisner spin_unlock_irq(&b->bm_lock); 658b411b363SPhilipp Reisner bm_free_pages(opages, onpages); 659b411b363SPhilipp Reisner bm_vk_free(opages, opages_vmalloced); 660b411b363SPhilipp Reisner goto out; 661b411b363SPhilipp Reisner } 662b411b363SPhilipp Reisner bits = BM_SECT_TO_BIT(ALIGN(capacity, BM_SECT_PER_BIT)); 663b411b363SPhilipp Reisner 664b411b363SPhilipp Reisner /* if we would use 665b411b363SPhilipp Reisner words = ALIGN(bits,BITS_PER_LONG) >> LN2_BPL; 666b411b363SPhilipp Reisner a 32bit host could present the wrong number of words 667b411b363SPhilipp Reisner to a 64bit host. 668b411b363SPhilipp Reisner */ 669b411b363SPhilipp Reisner words = ALIGN(bits, 64) >> LN2_BPL; 670b411b363SPhilipp Reisner 671b411b363SPhilipp Reisner if (get_ldev(mdev)) { 6724b0715f0SLars Ellenberg u64 bits_on_disk = ((u64)mdev->ldev->md.md_size_sect-MD_BM_OFFSET) << 12; 673b411b363SPhilipp Reisner put_ldev(mdev); 6744b0715f0SLars Ellenberg if (bits > bits_on_disk) { 6754b0715f0SLars Ellenberg dev_info(DEV, "bits = %lu\n", bits); 6764b0715f0SLars Ellenberg dev_info(DEV, "bits_on_disk = %llu\n", bits_on_disk); 6774b0715f0SLars Ellenberg err = -ENOSPC; 6784b0715f0SLars Ellenberg goto out; 6794b0715f0SLars Ellenberg } 680b411b363SPhilipp Reisner } 681b411b363SPhilipp Reisner 6826850c442SLars Ellenberg want = ALIGN(words*sizeof(long), PAGE_SIZE) >> PAGE_SHIFT; 683b411b363SPhilipp Reisner have = b->bm_number_of_pages; 684b411b363SPhilipp Reisner if (want == have) { 685b411b363SPhilipp Reisner D_ASSERT(b->bm_pages != NULL); 686b411b363SPhilipp Reisner npages = b->bm_pages; 687b411b363SPhilipp Reisner } else { 6880cf9d27eSAndreas Gruenbacher if (drbd_insert_fault(mdev, DRBD_FAULT_BM_ALLOC)) 689b411b363SPhilipp Reisner npages = NULL; 690b411b363SPhilipp Reisner else 691b411b363SPhilipp Reisner npages = bm_realloc_pages(b, want); 692b411b363SPhilipp Reisner } 693b411b363SPhilipp Reisner 694b411b363SPhilipp Reisner if (!npages) { 695b411b363SPhilipp Reisner err = -ENOMEM; 696b411b363SPhilipp Reisner goto out; 697b411b363SPhilipp Reisner } 698b411b363SPhilipp Reisner 699b411b363SPhilipp Reisner spin_lock_irq(&b->bm_lock); 700b411b363SPhilipp Reisner opages = b->bm_pages; 701b411b363SPhilipp Reisner owords = b->bm_words; 702b411b363SPhilipp Reisner obits = b->bm_bits; 703b411b363SPhilipp Reisner 704b411b363SPhilipp Reisner growing = bits > obits; 7055223671bSPhilipp Reisner if (opages && growing && set_new_bits) 706b411b363SPhilipp Reisner bm_set_surplus(b); 707b411b363SPhilipp Reisner 708b411b363SPhilipp Reisner b->bm_pages = npages; 709b411b363SPhilipp Reisner b->bm_number_of_pages = want; 710b411b363SPhilipp Reisner b->bm_bits = bits; 711b411b363SPhilipp Reisner b->bm_words = words; 712b411b363SPhilipp Reisner b->bm_dev_capacity = capacity; 713b411b363SPhilipp Reisner 714b411b363SPhilipp Reisner if (growing) { 71502d9a94bSPhilipp Reisner if (set_new_bits) { 716b411b363SPhilipp Reisner bm_memset(b, owords, 0xff, words-owords); 717b411b363SPhilipp Reisner b->bm_set += bits - obits; 71802d9a94bSPhilipp Reisner } else 71902d9a94bSPhilipp Reisner bm_memset(b, owords, 0x00, words-owords); 72002d9a94bSPhilipp Reisner 721b411b363SPhilipp Reisner } 722b411b363SPhilipp Reisner 723b411b363SPhilipp Reisner if (want < have) { 724b411b363SPhilipp Reisner /* implicit: (opages != NULL) && (opages != npages) */ 725b411b363SPhilipp Reisner bm_free_pages(opages + want, have - want); 726b411b363SPhilipp Reisner } 727b411b363SPhilipp Reisner 728b411b363SPhilipp Reisner (void)bm_clear_surplus(b); 729b411b363SPhilipp Reisner 730b411b363SPhilipp Reisner spin_unlock_irq(&b->bm_lock); 731b411b363SPhilipp Reisner if (opages != npages) 732b411b363SPhilipp Reisner bm_vk_free(opages, opages_vmalloced); 733b411b363SPhilipp Reisner if (!growing) 734b411b363SPhilipp Reisner b->bm_set = bm_count_bits(b); 73519f843aaSLars Ellenberg dev_info(DEV, "resync bitmap: bits=%lu words=%lu pages=%lu\n", bits, words, want); 736b411b363SPhilipp Reisner 737b411b363SPhilipp Reisner out: 738b411b363SPhilipp Reisner drbd_bm_unlock(mdev); 739b411b363SPhilipp Reisner return err; 740b411b363SPhilipp Reisner } 741b411b363SPhilipp Reisner 742b411b363SPhilipp Reisner /* inherently racy: 743b411b363SPhilipp Reisner * if not protected by other means, return value may be out of date when 744b411b363SPhilipp Reisner * leaving this function... 745b411b363SPhilipp Reisner * we still need to lock it, since it is important that this returns 746b411b363SPhilipp Reisner * bm_set == 0 precisely. 747b411b363SPhilipp Reisner * 748b411b363SPhilipp Reisner * maybe bm_set should be atomic_t ? 749b411b363SPhilipp Reisner */ 7500778286aSPhilipp Reisner unsigned long _drbd_bm_total_weight(struct drbd_conf *mdev) 751b411b363SPhilipp Reisner { 752b411b363SPhilipp Reisner struct drbd_bitmap *b = mdev->bitmap; 753b411b363SPhilipp Reisner unsigned long s; 754b411b363SPhilipp Reisner unsigned long flags; 755b411b363SPhilipp Reisner 756841ce241SAndreas Gruenbacher if (!expect(b)) 757841ce241SAndreas Gruenbacher return 0; 758841ce241SAndreas Gruenbacher if (!expect(b->bm_pages)) 759841ce241SAndreas Gruenbacher return 0; 760b411b363SPhilipp Reisner 761b411b363SPhilipp Reisner spin_lock_irqsave(&b->bm_lock, flags); 762b411b363SPhilipp Reisner s = b->bm_set; 763b411b363SPhilipp Reisner spin_unlock_irqrestore(&b->bm_lock, flags); 764b411b363SPhilipp Reisner 765b411b363SPhilipp Reisner return s; 766b411b363SPhilipp Reisner } 767b411b363SPhilipp Reisner 768b411b363SPhilipp Reisner unsigned long drbd_bm_total_weight(struct drbd_conf *mdev) 769b411b363SPhilipp Reisner { 770b411b363SPhilipp Reisner unsigned long s; 771b411b363SPhilipp Reisner /* if I don't have a disk, I don't know about out-of-sync status */ 772b411b363SPhilipp Reisner if (!get_ldev_if_state(mdev, D_NEGOTIATING)) 773b411b363SPhilipp Reisner return 0; 774b411b363SPhilipp Reisner s = _drbd_bm_total_weight(mdev); 775b411b363SPhilipp Reisner put_ldev(mdev); 776b411b363SPhilipp Reisner return s; 777b411b363SPhilipp Reisner } 778b411b363SPhilipp Reisner 779b411b363SPhilipp Reisner size_t drbd_bm_words(struct drbd_conf *mdev) 780b411b363SPhilipp Reisner { 781b411b363SPhilipp Reisner struct drbd_bitmap *b = mdev->bitmap; 782841ce241SAndreas Gruenbacher if (!expect(b)) 783841ce241SAndreas Gruenbacher return 0; 784841ce241SAndreas Gruenbacher if (!expect(b->bm_pages)) 785841ce241SAndreas Gruenbacher return 0; 786b411b363SPhilipp Reisner 787b411b363SPhilipp Reisner return b->bm_words; 788b411b363SPhilipp Reisner } 789b411b363SPhilipp Reisner 790b411b363SPhilipp Reisner unsigned long drbd_bm_bits(struct drbd_conf *mdev) 791b411b363SPhilipp Reisner { 792b411b363SPhilipp Reisner struct drbd_bitmap *b = mdev->bitmap; 793841ce241SAndreas Gruenbacher if (!expect(b)) 794841ce241SAndreas Gruenbacher return 0; 795b411b363SPhilipp Reisner 796b411b363SPhilipp Reisner return b->bm_bits; 797b411b363SPhilipp Reisner } 798b411b363SPhilipp Reisner 799b411b363SPhilipp Reisner /* merge number words from buffer into the bitmap starting at offset. 800b411b363SPhilipp Reisner * buffer[i] is expected to be little endian unsigned long. 801b411b363SPhilipp Reisner * bitmap must be locked by drbd_bm_lock. 802b411b363SPhilipp Reisner * currently only used from receive_bitmap. 803b411b363SPhilipp Reisner */ 804b411b363SPhilipp Reisner void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset, size_t number, 805b411b363SPhilipp Reisner unsigned long *buffer) 806b411b363SPhilipp Reisner { 807b411b363SPhilipp Reisner struct drbd_bitmap *b = mdev->bitmap; 808b411b363SPhilipp Reisner unsigned long *p_addr, *bm; 809b411b363SPhilipp Reisner unsigned long word, bits; 81019f843aaSLars Ellenberg unsigned int idx; 811b411b363SPhilipp Reisner size_t end, do_now; 812b411b363SPhilipp Reisner 813b411b363SPhilipp Reisner end = offset + number; 814b411b363SPhilipp Reisner 815841ce241SAndreas Gruenbacher if (!expect(b)) 816841ce241SAndreas Gruenbacher return; 817841ce241SAndreas Gruenbacher if (!expect(b->bm_pages)) 818841ce241SAndreas Gruenbacher return; 819b411b363SPhilipp Reisner if (number == 0) 820b411b363SPhilipp Reisner return; 821b411b363SPhilipp Reisner WARN_ON(offset >= b->bm_words); 822b411b363SPhilipp Reisner WARN_ON(end > b->bm_words); 823b411b363SPhilipp Reisner 824b411b363SPhilipp Reisner spin_lock_irq(&b->bm_lock); 825b411b363SPhilipp Reisner while (offset < end) { 826b411b363SPhilipp Reisner do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset; 82719f843aaSLars Ellenberg idx = bm_word_to_page_idx(b, offset); 82819f843aaSLars Ellenberg p_addr = bm_map_pidx(b, idx); 829b411b363SPhilipp Reisner bm = p_addr + MLPP(offset); 830b411b363SPhilipp Reisner offset += do_now; 831b411b363SPhilipp Reisner while (do_now--) { 832b411b363SPhilipp Reisner bits = hweight_long(*bm); 83395a0f10cSLars Ellenberg word = *bm | *buffer++; 834b411b363SPhilipp Reisner *bm++ = word; 835b411b363SPhilipp Reisner b->bm_set += hweight_long(word) - bits; 836b411b363SPhilipp Reisner } 837b411b363SPhilipp Reisner bm_unmap(p_addr); 83819f843aaSLars Ellenberg bm_set_page_need_writeout(b->bm_pages[idx]); 839b411b363SPhilipp Reisner } 840b411b363SPhilipp Reisner /* with 32bit <-> 64bit cross-platform connect 841b411b363SPhilipp Reisner * this is only correct for current usage, 842b411b363SPhilipp Reisner * where we _know_ that we are 64 bit aligned, 843b411b363SPhilipp Reisner * and know that this function is used in this way, too... 844b411b363SPhilipp Reisner */ 845b411b363SPhilipp Reisner if (end == b->bm_words) 846b411b363SPhilipp Reisner b->bm_set -= bm_clear_surplus(b); 847b411b363SPhilipp Reisner spin_unlock_irq(&b->bm_lock); 848b411b363SPhilipp Reisner } 849b411b363SPhilipp Reisner 850b411b363SPhilipp Reisner /* copy number words from the bitmap starting at offset into the buffer. 851b411b363SPhilipp Reisner * buffer[i] will be little endian unsigned long. 852b411b363SPhilipp Reisner */ 853b411b363SPhilipp Reisner void drbd_bm_get_lel(struct drbd_conf *mdev, size_t offset, size_t number, 854b411b363SPhilipp Reisner unsigned long *buffer) 855b411b363SPhilipp Reisner { 856b411b363SPhilipp Reisner struct drbd_bitmap *b = mdev->bitmap; 857b411b363SPhilipp Reisner unsigned long *p_addr, *bm; 858b411b363SPhilipp Reisner size_t end, do_now; 859b411b363SPhilipp Reisner 860b411b363SPhilipp Reisner end = offset + number; 861b411b363SPhilipp Reisner 862841ce241SAndreas Gruenbacher if (!expect(b)) 863841ce241SAndreas Gruenbacher return; 864841ce241SAndreas Gruenbacher if (!expect(b->bm_pages)) 865841ce241SAndreas Gruenbacher return; 866b411b363SPhilipp Reisner 867b411b363SPhilipp Reisner spin_lock_irq(&b->bm_lock); 868b411b363SPhilipp Reisner if ((offset >= b->bm_words) || 869b411b363SPhilipp Reisner (end > b->bm_words) || 870b411b363SPhilipp Reisner (number <= 0)) 871b411b363SPhilipp Reisner dev_err(DEV, "offset=%lu number=%lu bm_words=%lu\n", 872b411b363SPhilipp Reisner (unsigned long) offset, 873b411b363SPhilipp Reisner (unsigned long) number, 874b411b363SPhilipp Reisner (unsigned long) b->bm_words); 875b411b363SPhilipp Reisner else { 876b411b363SPhilipp Reisner while (offset < end) { 877b411b363SPhilipp Reisner do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset; 87819f843aaSLars Ellenberg p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, offset)); 879b411b363SPhilipp Reisner bm = p_addr + MLPP(offset); 880b411b363SPhilipp Reisner offset += do_now; 881b411b363SPhilipp Reisner while (do_now--) 88295a0f10cSLars Ellenberg *buffer++ = *bm++; 883b411b363SPhilipp Reisner bm_unmap(p_addr); 884b411b363SPhilipp Reisner } 885b411b363SPhilipp Reisner } 886b411b363SPhilipp Reisner spin_unlock_irq(&b->bm_lock); 887b411b363SPhilipp Reisner } 888b411b363SPhilipp Reisner 889b411b363SPhilipp Reisner /* set all bits in the bitmap */ 890b411b363SPhilipp Reisner void drbd_bm_set_all(struct drbd_conf *mdev) 891b411b363SPhilipp Reisner { 892b411b363SPhilipp Reisner struct drbd_bitmap *b = mdev->bitmap; 893841ce241SAndreas Gruenbacher if (!expect(b)) 894841ce241SAndreas Gruenbacher return; 895841ce241SAndreas Gruenbacher if (!expect(b->bm_pages)) 896841ce241SAndreas Gruenbacher return; 897b411b363SPhilipp Reisner 898b411b363SPhilipp Reisner spin_lock_irq(&b->bm_lock); 899b411b363SPhilipp Reisner bm_memset(b, 0, 0xff, b->bm_words); 900b411b363SPhilipp Reisner (void)bm_clear_surplus(b); 901b411b363SPhilipp Reisner b->bm_set = b->bm_bits; 902b411b363SPhilipp Reisner spin_unlock_irq(&b->bm_lock); 903b411b363SPhilipp Reisner } 904b411b363SPhilipp Reisner 905b411b363SPhilipp Reisner /* clear all bits in the bitmap */ 906b411b363SPhilipp Reisner void drbd_bm_clear_all(struct drbd_conf *mdev) 907b411b363SPhilipp Reisner { 908b411b363SPhilipp Reisner struct drbd_bitmap *b = mdev->bitmap; 909841ce241SAndreas Gruenbacher if (!expect(b)) 910841ce241SAndreas Gruenbacher return; 911841ce241SAndreas Gruenbacher if (!expect(b->bm_pages)) 912841ce241SAndreas Gruenbacher return; 913b411b363SPhilipp Reisner 914b411b363SPhilipp Reisner spin_lock_irq(&b->bm_lock); 915b411b363SPhilipp Reisner bm_memset(b, 0, 0, b->bm_words); 916b411b363SPhilipp Reisner b->bm_set = 0; 917b411b363SPhilipp Reisner spin_unlock_irq(&b->bm_lock); 918b411b363SPhilipp Reisner } 919b411b363SPhilipp Reisner 92019f843aaSLars Ellenberg struct bm_aio_ctx { 92119f843aaSLars Ellenberg struct drbd_conf *mdev; 92219f843aaSLars Ellenberg atomic_t in_flight; 923cdfda633SPhilipp Reisner unsigned int done; 92419f843aaSLars Ellenberg unsigned flags; 92519f843aaSLars Ellenberg #define BM_AIO_COPY_PAGES 1 92645dfffebSLars Ellenberg #define BM_AIO_WRITE_HINTED 2 927fef45d29SPhilipp Reisner #define BM_WRITE_ALL_PAGES 4 92819f843aaSLars Ellenberg int error; 929cdfda633SPhilipp Reisner struct kref kref; 93019f843aaSLars Ellenberg }; 93119f843aaSLars Ellenberg 932cdfda633SPhilipp Reisner static void bm_aio_ctx_destroy(struct kref *kref) 933cdfda633SPhilipp Reisner { 934cdfda633SPhilipp Reisner struct bm_aio_ctx *ctx = container_of(kref, struct bm_aio_ctx, kref); 935cdfda633SPhilipp Reisner 936cdfda633SPhilipp Reisner put_ldev(ctx->mdev); 937cdfda633SPhilipp Reisner kfree(ctx); 938cdfda633SPhilipp Reisner } 939cdfda633SPhilipp Reisner 94019f843aaSLars Ellenberg /* bv_page may be a copy, or may be the original */ 941b411b363SPhilipp Reisner static void bm_async_io_complete(struct bio *bio, int error) 942b411b363SPhilipp Reisner { 94319f843aaSLars Ellenberg struct bm_aio_ctx *ctx = bio->bi_private; 94419f843aaSLars Ellenberg struct drbd_conf *mdev = ctx->mdev; 94519f843aaSLars Ellenberg struct drbd_bitmap *b = mdev->bitmap; 94619f843aaSLars Ellenberg unsigned int idx = bm_page_to_idx(bio->bi_io_vec[0].bv_page); 947b411b363SPhilipp Reisner int uptodate = bio_flagged(bio, BIO_UPTODATE); 948b411b363SPhilipp Reisner 949b411b363SPhilipp Reisner 950b411b363SPhilipp Reisner /* strange behavior of some lower level drivers... 951b411b363SPhilipp Reisner * fail the request by clearing the uptodate flag, 952b411b363SPhilipp Reisner * but do not return any error?! 953b411b363SPhilipp Reisner * do we want to WARN() on this? */ 954b411b363SPhilipp Reisner if (!error && !uptodate) 955b411b363SPhilipp Reisner error = -EIO; 956b411b363SPhilipp Reisner 9577648cdfeSLars Ellenberg if ((ctx->flags & BM_AIO_COPY_PAGES) == 0 && 9587648cdfeSLars Ellenberg !bm_test_page_unchanged(b->bm_pages[idx])) 9597648cdfeSLars Ellenberg dev_warn(DEV, "bitmap page idx %u changed during IO!\n", idx); 96019f843aaSLars Ellenberg 961b411b363SPhilipp Reisner if (error) { 96219f843aaSLars Ellenberg /* ctx error will hold the completed-last non-zero error code, 96319f843aaSLars Ellenberg * in case error codes differ. */ 96419f843aaSLars Ellenberg ctx->error = error; 96519f843aaSLars Ellenberg bm_set_page_io_err(b->bm_pages[idx]); 96619f843aaSLars Ellenberg /* Not identical to on disk version of it. 96719f843aaSLars Ellenberg * Is BM_PAGE_IO_ERROR enough? */ 96819f843aaSLars Ellenberg if (__ratelimit(&drbd_ratelimit_state)) 96919f843aaSLars Ellenberg dev_err(DEV, "IO ERROR %d on bitmap page idx %u\n", 97019f843aaSLars Ellenberg error, idx); 97119f843aaSLars Ellenberg } else { 97219f843aaSLars Ellenberg bm_clear_page_io_err(b->bm_pages[idx]); 97319f843aaSLars Ellenberg dynamic_dev_dbg(DEV, "bitmap page idx %u completed\n", idx); 974b411b363SPhilipp Reisner } 97519f843aaSLars Ellenberg 97619f843aaSLars Ellenberg bm_page_unlock_io(mdev, idx); 97719f843aaSLars Ellenberg 97819f843aaSLars Ellenberg if (ctx->flags & BM_AIO_COPY_PAGES) 9799db4e77fSLars Ellenberg mempool_free(bio->bi_io_vec[0].bv_page, drbd_md_io_page_pool); 980b411b363SPhilipp Reisner 981b411b363SPhilipp Reisner bio_put(bio); 98219f843aaSLars Ellenberg 983cdfda633SPhilipp Reisner if (atomic_dec_and_test(&ctx->in_flight)) { 984cdfda633SPhilipp Reisner ctx->done = 1; 985cdfda633SPhilipp Reisner wake_up(&mdev->misc_wait); 986cdfda633SPhilipp Reisner kref_put(&ctx->kref, &bm_aio_ctx_destroy); 987cdfda633SPhilipp Reisner } 988b411b363SPhilipp Reisner } 989b411b363SPhilipp Reisner 99019f843aaSLars Ellenberg static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must_hold(local) 991b411b363SPhilipp Reisner { 992cdfda633SPhilipp Reisner struct bio *bio = bio_alloc_drbd(GFP_NOIO); 99319f843aaSLars Ellenberg struct drbd_conf *mdev = ctx->mdev; 99419f843aaSLars Ellenberg struct drbd_bitmap *b = mdev->bitmap; 99519f843aaSLars Ellenberg struct page *page; 996b411b363SPhilipp Reisner unsigned int len; 99719f843aaSLars Ellenberg 998b411b363SPhilipp Reisner sector_t on_disk_sector = 999b411b363SPhilipp Reisner mdev->ldev->md.md_offset + mdev->ldev->md.bm_offset; 1000b411b363SPhilipp Reisner on_disk_sector += ((sector_t)page_nr) << (PAGE_SHIFT-9); 1001b411b363SPhilipp Reisner 1002b411b363SPhilipp Reisner /* this might happen with very small 100319f843aaSLars Ellenberg * flexible external meta data device, 100419f843aaSLars Ellenberg * or with PAGE_SIZE > 4k */ 1005b411b363SPhilipp Reisner len = min_t(unsigned int, PAGE_SIZE, 1006b411b363SPhilipp Reisner (drbd_md_last_sector(mdev->ldev) - on_disk_sector + 1)<<9); 1007b411b363SPhilipp Reisner 100819f843aaSLars Ellenberg /* serialize IO on this page */ 100919f843aaSLars Ellenberg bm_page_lock_io(mdev, page_nr); 101019f843aaSLars Ellenberg /* before memcpy and submit, 101119f843aaSLars Ellenberg * so it can be redirtied any time */ 101219f843aaSLars Ellenberg bm_set_page_unchanged(b->bm_pages[page_nr]); 101319f843aaSLars Ellenberg 101419f843aaSLars Ellenberg if (ctx->flags & BM_AIO_COPY_PAGES) { 101519f843aaSLars Ellenberg void *src, *dest; 10169db4e77fSLars Ellenberg page = mempool_alloc(drbd_md_io_page_pool, __GFP_HIGHMEM|__GFP_WAIT); 101719f843aaSLars Ellenberg dest = kmap_atomic(page, KM_USER0); 101819f843aaSLars Ellenberg src = kmap_atomic(b->bm_pages[page_nr], KM_USER1); 101919f843aaSLars Ellenberg memcpy(dest, src, PAGE_SIZE); 102019f843aaSLars Ellenberg kunmap_atomic(src, KM_USER1); 102119f843aaSLars Ellenberg kunmap_atomic(dest, KM_USER0); 102219f843aaSLars Ellenberg bm_store_page_idx(page, page_nr); 102319f843aaSLars Ellenberg } else 102419f843aaSLars Ellenberg page = b->bm_pages[page_nr]; 102519f843aaSLars Ellenberg 1026b411b363SPhilipp Reisner bio->bi_bdev = mdev->ldev->md_bdev; 1027b411b363SPhilipp Reisner bio->bi_sector = on_disk_sector; 10289db4e77fSLars Ellenberg /* bio_add_page of a single page to an empty bio will always succeed, 10299db4e77fSLars Ellenberg * according to api. Do we want to assert that? */ 103019f843aaSLars Ellenberg bio_add_page(bio, page, len, 0); 103119f843aaSLars Ellenberg bio->bi_private = ctx; 1032b411b363SPhilipp Reisner bio->bi_end_io = bm_async_io_complete; 1033b411b363SPhilipp Reisner 10340cf9d27eSAndreas Gruenbacher if (drbd_insert_fault(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) { 1035b411b363SPhilipp Reisner bio->bi_rw |= rw; 1036b411b363SPhilipp Reisner bio_endio(bio, -EIO); 1037b411b363SPhilipp Reisner } else { 1038b411b363SPhilipp Reisner submit_bio(rw, bio); 10395a8b4242SLars Ellenberg /* this should not count as user activity and cause the 10405a8b4242SLars Ellenberg * resync to throttle -- see drbd_rs_should_slow_down(). */ 10415a8b4242SLars Ellenberg atomic_add(len >> 9, &mdev->rs_sect_ev); 1042b411b363SPhilipp Reisner } 1043b411b363SPhilipp Reisner } 1044b411b363SPhilipp Reisner 1045b411b363SPhilipp Reisner /* 1046b411b363SPhilipp Reisner * bm_rw: read/write the whole bitmap from/to its on disk location. 1047b411b363SPhilipp Reisner */ 104845dfffebSLars Ellenberg static int bm_rw(struct drbd_conf *mdev, int rw, unsigned flags, unsigned lazy_writeout_upper_idx) __must_hold(local) 1049b411b363SPhilipp Reisner { 1050cdfda633SPhilipp Reisner struct bm_aio_ctx *ctx; 1051b411b363SPhilipp Reisner struct drbd_bitmap *b = mdev->bitmap; 10526850c442SLars Ellenberg int num_pages, i, count = 0; 1053b411b363SPhilipp Reisner unsigned long now; 1054b411b363SPhilipp Reisner char ppb[10]; 1055b411b363SPhilipp Reisner int err = 0; 1056b411b363SPhilipp Reisner 105719f843aaSLars Ellenberg /* 105819f843aaSLars Ellenberg * We are protected against bitmap disappearing/resizing by holding an 105919f843aaSLars Ellenberg * ldev reference (caller must have called get_ldev()). 106019f843aaSLars Ellenberg * For read/write, we are protected against changes to the bitmap by 106119f843aaSLars Ellenberg * the bitmap lock (see drbd_bitmap_io). 106219f843aaSLars Ellenberg * For lazy writeout, we don't care for ongoing changes to the bitmap, 106319f843aaSLars Ellenberg * as we submit copies of pages anyways. 106419f843aaSLars Ellenberg */ 1065cdfda633SPhilipp Reisner 1066cdfda633SPhilipp Reisner ctx = kmalloc(sizeof(struct bm_aio_ctx), GFP_NOIO); 1067cdfda633SPhilipp Reisner if (!ctx) 1068cdfda633SPhilipp Reisner return -ENOMEM; 1069cdfda633SPhilipp Reisner 1070cdfda633SPhilipp Reisner *ctx = (struct bm_aio_ctx) { 1071cdfda633SPhilipp Reisner .mdev = mdev, 1072cdfda633SPhilipp Reisner .in_flight = ATOMIC_INIT(1), 1073cdfda633SPhilipp Reisner .done = 0, 1074cdfda633SPhilipp Reisner .flags = flags, 1075cdfda633SPhilipp Reisner .error = 0, 1076cdfda633SPhilipp Reisner .kref = { ATOMIC_INIT(2) }, 1077cdfda633SPhilipp Reisner }; 1078cdfda633SPhilipp Reisner 1079cdfda633SPhilipp Reisner if (!get_ldev_if_state(mdev, D_ATTACHING)) { /* put is in bm_aio_ctx_destroy() */ 1080cdfda633SPhilipp Reisner dev_err(DEV, "ASSERT FAILED: get_ldev_if_state() == 1 in bm_rw()\n"); 10819dab3842SLars Ellenberg kfree(ctx); 10829dab3842SLars Ellenberg return -ENODEV; 1083cdfda633SPhilipp Reisner } 1084cdfda633SPhilipp Reisner 1085cdfda633SPhilipp Reisner if (!ctx->flags) 108620ceb2b2SLars Ellenberg WARN_ON(!(BM_LOCKED_MASK & b->bm_flags)); 1087b411b363SPhilipp Reisner 10886850c442SLars Ellenberg num_pages = b->bm_number_of_pages; 1089b411b363SPhilipp Reisner 1090b411b363SPhilipp Reisner now = jiffies; 1091b411b363SPhilipp Reisner 1092b411b363SPhilipp Reisner /* let the layers below us try to merge these bios... */ 10936850c442SLars Ellenberg for (i = 0; i < num_pages; i++) { 109419f843aaSLars Ellenberg /* ignore completely unchanged pages */ 109519f843aaSLars Ellenberg if (lazy_writeout_upper_idx && i == lazy_writeout_upper_idx) 109619f843aaSLars Ellenberg break; 109719f843aaSLars Ellenberg if (rw & WRITE) { 109845dfffebSLars Ellenberg if ((flags & BM_AIO_WRITE_HINTED) && 109945dfffebSLars Ellenberg !test_and_clear_bit(BM_PAGE_HINT_WRITEOUT, 110045dfffebSLars Ellenberg &page_private(b->bm_pages[i]))) 110145dfffebSLars Ellenberg continue; 1102fef45d29SPhilipp Reisner 1103fef45d29SPhilipp Reisner if (!(flags & BM_WRITE_ALL_PAGES) && 1104fef45d29SPhilipp Reisner bm_test_page_unchanged(b->bm_pages[i])) { 110519f843aaSLars Ellenberg dynamic_dev_dbg(DEV, "skipped bm write for idx %u\n", i); 110619f843aaSLars Ellenberg continue; 110719f843aaSLars Ellenberg } 110819f843aaSLars Ellenberg /* during lazy writeout, 110919f843aaSLars Ellenberg * ignore those pages not marked for lazy writeout. */ 111019f843aaSLars Ellenberg if (lazy_writeout_upper_idx && 111119f843aaSLars Ellenberg !bm_test_page_lazy_writeout(b->bm_pages[i])) { 111219f843aaSLars Ellenberg dynamic_dev_dbg(DEV, "skipped bm lazy write for idx %u\n", i); 111319f843aaSLars Ellenberg continue; 111419f843aaSLars Ellenberg } 111519f843aaSLars Ellenberg } 1116cdfda633SPhilipp Reisner atomic_inc(&ctx->in_flight); 1117cdfda633SPhilipp Reisner bm_page_io_async(ctx, i, rw); 111819f843aaSLars Ellenberg ++count; 111919f843aaSLars Ellenberg cond_resched(); 112019f843aaSLars Ellenberg } 1121b411b363SPhilipp Reisner 1122725a97e4SLars Ellenberg /* 1123cdfda633SPhilipp Reisner * We initialize ctx->in_flight to one to make sure bm_async_io_complete 1124cdfda633SPhilipp Reisner * will not set ctx->done early, and decrement / test it here. If there 1125725a97e4SLars Ellenberg * are still some bios in flight, we need to wait for them here. 1126cdfda633SPhilipp Reisner * If all IO is done already (or nothing had been submitted), there is 1127cdfda633SPhilipp Reisner * no need to wait. Still, we need to put the kref associated with the 1128cdfda633SPhilipp Reisner * "in_flight reached zero, all done" event. 1129725a97e4SLars Ellenberg */ 1130cdfda633SPhilipp Reisner if (!atomic_dec_and_test(&ctx->in_flight)) 113132db80f6SPhilipp Reisner wait_until_done_or_disk_failure(mdev, mdev->ldev, &ctx->done); 1132cdfda633SPhilipp Reisner else 1133cdfda633SPhilipp Reisner kref_put(&ctx->kref, &bm_aio_ctx_destroy); 1134c9d963a4SLars Ellenberg 1135c9d963a4SLars Ellenberg /* summary for global bitmap IO */ 1136c9d963a4SLars Ellenberg if (flags == 0) 113719f843aaSLars Ellenberg dev_info(DEV, "bitmap %s of %u pages took %lu jiffies\n", 113819f843aaSLars Ellenberg rw == WRITE ? "WRITE" : "READ", 113919f843aaSLars Ellenberg count, jiffies - now); 1140b411b363SPhilipp Reisner 1141cdfda633SPhilipp Reisner if (ctx->error) { 1142b411b363SPhilipp Reisner dev_alert(DEV, "we had at least one MD IO ERROR during bitmap IO\n"); 11430c849666SLars Ellenberg drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR); 1144cdfda633SPhilipp Reisner err = -EIO; /* ctx->error ? */ 1145b411b363SPhilipp Reisner } 1146b411b363SPhilipp Reisner 1147cdfda633SPhilipp Reisner if (atomic_read(&ctx->in_flight)) 1148cdfda633SPhilipp Reisner err = -EIO; /* Disk failed during IO... */ 1149cdfda633SPhilipp Reisner 1150b411b363SPhilipp Reisner now = jiffies; 1151b411b363SPhilipp Reisner if (rw == WRITE) { 1152b411b363SPhilipp Reisner drbd_md_flush(mdev); 1153b411b363SPhilipp Reisner } else /* rw == READ */ { 115495a0f10cSLars Ellenberg b->bm_set = bm_count_bits(b); 1155b411b363SPhilipp Reisner dev_info(DEV, "recounting of set bits took additional %lu jiffies\n", 1156b411b363SPhilipp Reisner jiffies - now); 1157b411b363SPhilipp Reisner } 1158b411b363SPhilipp Reisner now = b->bm_set; 1159b411b363SPhilipp Reisner 1160c9d963a4SLars Ellenberg if (flags == 0) 1161b411b363SPhilipp Reisner dev_info(DEV, "%s (%lu bits) marked out-of-sync by on disk bit-map.\n", 1162b411b363SPhilipp Reisner ppsize(ppb, now << (BM_BLOCK_SHIFT-10)), now); 1163b411b363SPhilipp Reisner 1164cdfda633SPhilipp Reisner kref_put(&ctx->kref, &bm_aio_ctx_destroy); 1165b411b363SPhilipp Reisner return err; 1166b411b363SPhilipp Reisner } 1167b411b363SPhilipp Reisner 1168b411b363SPhilipp Reisner /** 1169b411b363SPhilipp Reisner * drbd_bm_read() - Read the whole bitmap from its on disk location. 1170b411b363SPhilipp Reisner * @mdev: DRBD device. 1171b411b363SPhilipp Reisner */ 1172b411b363SPhilipp Reisner int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local) 1173b411b363SPhilipp Reisner { 117445dfffebSLars Ellenberg return bm_rw(mdev, READ, 0, 0); 1175b411b363SPhilipp Reisner } 1176b411b363SPhilipp Reisner 1177b411b363SPhilipp Reisner /** 1178b411b363SPhilipp Reisner * drbd_bm_write() - Write the whole bitmap to its on disk location. 1179b411b363SPhilipp Reisner * @mdev: DRBD device. 118019f843aaSLars Ellenberg * 118119f843aaSLars Ellenberg * Will only write pages that have changed since last IO. 1182b411b363SPhilipp Reisner */ 1183b411b363SPhilipp Reisner int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local) 1184b411b363SPhilipp Reisner { 118545dfffebSLars Ellenberg return bm_rw(mdev, WRITE, 0, 0); 1186b411b363SPhilipp Reisner } 1187b411b363SPhilipp Reisner 1188b411b363SPhilipp Reisner /** 1189fef45d29SPhilipp Reisner * drbd_bm_write_all() - Write the whole bitmap to its on disk location. 1190fef45d29SPhilipp Reisner * @mdev: DRBD device. 1191fef45d29SPhilipp Reisner * 1192fef45d29SPhilipp Reisner * Will write all pages. 1193fef45d29SPhilipp Reisner */ 1194fef45d29SPhilipp Reisner int drbd_bm_write_all(struct drbd_conf *mdev) __must_hold(local) 1195fef45d29SPhilipp Reisner { 1196fef45d29SPhilipp Reisner return bm_rw(mdev, WRITE, BM_WRITE_ALL_PAGES, 0); 1197fef45d29SPhilipp Reisner } 1198fef45d29SPhilipp Reisner 1199fef45d29SPhilipp Reisner /** 120019f843aaSLars Ellenberg * drbd_bm_lazy_write_out() - Write bitmap pages 0 to @upper_idx-1, if they have changed. 1201b411b363SPhilipp Reisner * @mdev: DRBD device. 120219f843aaSLars Ellenberg * @upper_idx: 0: write all changed pages; +ve: page index to stop scanning for changed pages 1203b411b363SPhilipp Reisner */ 120419f843aaSLars Ellenberg int drbd_bm_write_lazy(struct drbd_conf *mdev, unsigned upper_idx) __must_hold(local) 1205b411b363SPhilipp Reisner { 120645dfffebSLars Ellenberg return bm_rw(mdev, WRITE, BM_AIO_COPY_PAGES, upper_idx); 1207b411b363SPhilipp Reisner } 120819f843aaSLars Ellenberg 120945dfffebSLars Ellenberg /** 1210a220d291SLars Ellenberg * drbd_bm_write_copy_pages() - Write the whole bitmap to its on disk location. 1211a220d291SLars Ellenberg * @mdev: DRBD device. 1212a220d291SLars Ellenberg * 1213a220d291SLars Ellenberg * Will only write pages that have changed since last IO. 1214a220d291SLars Ellenberg * In contrast to drbd_bm_write(), this will copy the bitmap pages 1215a220d291SLars Ellenberg * to temporary writeout pages. It is intended to trigger a full write-out 1216a220d291SLars Ellenberg * while still allowing the bitmap to change, for example if a resync or online 1217a220d291SLars Ellenberg * verify is aborted due to a failed peer disk, while local IO continues, or 1218a220d291SLars Ellenberg * pending resync acks are still being processed. 1219a220d291SLars Ellenberg */ 1220a220d291SLars Ellenberg int drbd_bm_write_copy_pages(struct drbd_conf *mdev) __must_hold(local) 1221a220d291SLars Ellenberg { 1222a220d291SLars Ellenberg return bm_rw(mdev, WRITE, BM_AIO_COPY_PAGES, 0); 1223a220d291SLars Ellenberg } 1224a220d291SLars Ellenberg 1225a220d291SLars Ellenberg /** 122645dfffebSLars Ellenberg * drbd_bm_write_hinted() - Write bitmap pages with "hint" marks, if they have changed. 122745dfffebSLars Ellenberg * @mdev: DRBD device. 122845dfffebSLars Ellenberg */ 122945dfffebSLars Ellenberg int drbd_bm_write_hinted(struct drbd_conf *mdev) __must_hold(local) 123045dfffebSLars Ellenberg { 123145dfffebSLars Ellenberg return bm_rw(mdev, WRITE, BM_AIO_WRITE_HINTED | BM_AIO_COPY_PAGES, 0); 123245dfffebSLars Ellenberg } 123319f843aaSLars Ellenberg 123419f843aaSLars Ellenberg /** 123545dfffebSLars Ellenberg * drbd_bm_write_page() - Writes a PAGE_SIZE aligned piece of bitmap 123619f843aaSLars Ellenberg * @mdev: DRBD device. 123719f843aaSLars Ellenberg * @idx: bitmap page index 123819f843aaSLars Ellenberg * 12394b0715f0SLars Ellenberg * We don't want to special case on logical_block_size of the backend device, 12404b0715f0SLars Ellenberg * so we submit PAGE_SIZE aligned pieces. 124119f843aaSLars Ellenberg * Note that on "most" systems, PAGE_SIZE is 4k. 12424b0715f0SLars Ellenberg * 12434b0715f0SLars Ellenberg * In case this becomes an issue on systems with larger PAGE_SIZE, 12444b0715f0SLars Ellenberg * we may want to change this again to write 4k aligned 4k pieces. 124519f843aaSLars Ellenberg */ 124619f843aaSLars Ellenberg int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local) 124719f843aaSLars Ellenberg { 1248cdfda633SPhilipp Reisner struct bm_aio_ctx *ctx; 1249cdfda633SPhilipp Reisner int err; 125019f843aaSLars Ellenberg 125119f843aaSLars Ellenberg if (bm_test_page_unchanged(mdev->bitmap->bm_pages[idx])) { 12527648cdfeSLars Ellenberg dynamic_dev_dbg(DEV, "skipped bm page write for idx %u\n", idx); 125319f843aaSLars Ellenberg return 0; 125419f843aaSLars Ellenberg } 125519f843aaSLars Ellenberg 1256cdfda633SPhilipp Reisner ctx = kmalloc(sizeof(struct bm_aio_ctx), GFP_NOIO); 1257cdfda633SPhilipp Reisner if (!ctx) 1258cdfda633SPhilipp Reisner return -ENOMEM; 125919f843aaSLars Ellenberg 1260cdfda633SPhilipp Reisner *ctx = (struct bm_aio_ctx) { 1261cdfda633SPhilipp Reisner .mdev = mdev, 1262cdfda633SPhilipp Reisner .in_flight = ATOMIC_INIT(1), 1263cdfda633SPhilipp Reisner .done = 0, 1264cdfda633SPhilipp Reisner .flags = BM_AIO_COPY_PAGES, 1265cdfda633SPhilipp Reisner .error = 0, 1266cdfda633SPhilipp Reisner .kref = { ATOMIC_INIT(2) }, 1267cdfda633SPhilipp Reisner }; 1268cdfda633SPhilipp Reisner 1269cdfda633SPhilipp Reisner if (!get_ldev_if_state(mdev, D_ATTACHING)) { /* put is in bm_aio_ctx_destroy() */ 1270cdfda633SPhilipp Reisner dev_err(DEV, "ASSERT FAILED: get_ldev_if_state() == 1 in drbd_bm_write_page()\n"); 12719dab3842SLars Ellenberg kfree(ctx); 12729dab3842SLars Ellenberg return -ENODEV; 1273cdfda633SPhilipp Reisner } 1274cdfda633SPhilipp Reisner 1275cdfda633SPhilipp Reisner bm_page_io_async(ctx, idx, WRITE_SYNC); 127632db80f6SPhilipp Reisner wait_until_done_or_disk_failure(mdev, mdev->ldev, &ctx->done); 1277cdfda633SPhilipp Reisner 1278cdfda633SPhilipp Reisner if (ctx->error) 12790c849666SLars Ellenberg drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR); 128019f843aaSLars Ellenberg /* that should force detach, so the in memory bitmap will be 128119f843aaSLars Ellenberg * gone in a moment as well. */ 128219f843aaSLars Ellenberg 1283b411b363SPhilipp Reisner mdev->bm_writ_cnt++; 1284cdfda633SPhilipp Reisner err = atomic_read(&ctx->in_flight) ? -EIO : ctx->error; 1285cdfda633SPhilipp Reisner kref_put(&ctx->kref, &bm_aio_ctx_destroy); 1286cdfda633SPhilipp Reisner return err; 1287b411b363SPhilipp Reisner } 1288b411b363SPhilipp Reisner 1289b411b363SPhilipp Reisner /* NOTE 1290b411b363SPhilipp Reisner * find_first_bit returns int, we return unsigned long. 12914b0715f0SLars Ellenberg * For this to work on 32bit arch with bitnumbers > (1<<32), 12924b0715f0SLars Ellenberg * we'd need to return u64, and get a whole lot of other places 12934b0715f0SLars Ellenberg * fixed where we still use unsigned long. 1294b411b363SPhilipp Reisner * 1295b411b363SPhilipp Reisner * this returns a bit number, NOT a sector! 1296b411b363SPhilipp Reisner */ 1297b411b363SPhilipp Reisner static unsigned long __bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo, 1298b411b363SPhilipp Reisner const int find_zero_bit, const enum km_type km) 1299b411b363SPhilipp Reisner { 1300b411b363SPhilipp Reisner struct drbd_bitmap *b = mdev->bitmap; 1301b411b363SPhilipp Reisner unsigned long *p_addr; 13024b0715f0SLars Ellenberg unsigned long bit_offset; 13034b0715f0SLars Ellenberg unsigned i; 13044b0715f0SLars Ellenberg 1305b411b363SPhilipp Reisner 1306b411b363SPhilipp Reisner if (bm_fo > b->bm_bits) { 1307b411b363SPhilipp Reisner dev_err(DEV, "bm_fo=%lu bm_bits=%lu\n", bm_fo, b->bm_bits); 13084b0715f0SLars Ellenberg bm_fo = DRBD_END_OF_BITMAP; 1309b411b363SPhilipp Reisner } else { 1310b411b363SPhilipp Reisner while (bm_fo < b->bm_bits) { 131119f843aaSLars Ellenberg /* bit offset of the first bit in the page */ 13124b0715f0SLars Ellenberg bit_offset = bm_fo & ~BITS_PER_PAGE_MASK; 131319f843aaSLars Ellenberg p_addr = __bm_map_pidx(b, bm_bit_to_page_idx(b, bm_fo), km); 1314b411b363SPhilipp Reisner 1315b411b363SPhilipp Reisner if (find_zero_bit) 13167e599e6eSLinus Torvalds i = find_next_zero_bit_le(p_addr, 13174b0715f0SLars Ellenberg PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK); 1318b411b363SPhilipp Reisner else 13197e599e6eSLinus Torvalds i = find_next_bit_le(p_addr, 13204b0715f0SLars Ellenberg PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK); 1321b411b363SPhilipp Reisner 1322b411b363SPhilipp Reisner __bm_unmap(p_addr, km); 1323b411b363SPhilipp Reisner if (i < PAGE_SIZE*8) { 13244b0715f0SLars Ellenberg bm_fo = bit_offset + i; 13254b0715f0SLars Ellenberg if (bm_fo >= b->bm_bits) 1326b411b363SPhilipp Reisner break; 1327b411b363SPhilipp Reisner goto found; 1328b411b363SPhilipp Reisner } 1329b411b363SPhilipp Reisner bm_fo = bit_offset + PAGE_SIZE*8; 1330b411b363SPhilipp Reisner } 13314b0715f0SLars Ellenberg bm_fo = DRBD_END_OF_BITMAP; 1332b411b363SPhilipp Reisner } 1333b411b363SPhilipp Reisner found: 13344b0715f0SLars Ellenberg return bm_fo; 1335b411b363SPhilipp Reisner } 1336b411b363SPhilipp Reisner 1337b411b363SPhilipp Reisner static unsigned long bm_find_next(struct drbd_conf *mdev, 1338b411b363SPhilipp Reisner unsigned long bm_fo, const int find_zero_bit) 1339b411b363SPhilipp Reisner { 1340b411b363SPhilipp Reisner struct drbd_bitmap *b = mdev->bitmap; 13414b0715f0SLars Ellenberg unsigned long i = DRBD_END_OF_BITMAP; 1342b411b363SPhilipp Reisner 1343841ce241SAndreas Gruenbacher if (!expect(b)) 1344841ce241SAndreas Gruenbacher return i; 1345841ce241SAndreas Gruenbacher if (!expect(b->bm_pages)) 1346841ce241SAndreas Gruenbacher return i; 1347b411b363SPhilipp Reisner 1348b411b363SPhilipp Reisner spin_lock_irq(&b->bm_lock); 134920ceb2b2SLars Ellenberg if (BM_DONT_TEST & b->bm_flags) 1350b411b363SPhilipp Reisner bm_print_lock_info(mdev); 1351b411b363SPhilipp Reisner 1352b411b363SPhilipp Reisner i = __bm_find_next(mdev, bm_fo, find_zero_bit, KM_IRQ1); 1353b411b363SPhilipp Reisner 1354b411b363SPhilipp Reisner spin_unlock_irq(&b->bm_lock); 1355b411b363SPhilipp Reisner return i; 1356b411b363SPhilipp Reisner } 1357b411b363SPhilipp Reisner 1358b411b363SPhilipp Reisner unsigned long drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo) 1359b411b363SPhilipp Reisner { 1360b411b363SPhilipp Reisner return bm_find_next(mdev, bm_fo, 0); 1361b411b363SPhilipp Reisner } 1362b411b363SPhilipp Reisner 1363b411b363SPhilipp Reisner #if 0 1364b411b363SPhilipp Reisner /* not yet needed for anything. */ 1365b411b363SPhilipp Reisner unsigned long drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo) 1366b411b363SPhilipp Reisner { 1367b411b363SPhilipp Reisner return bm_find_next(mdev, bm_fo, 1); 1368b411b363SPhilipp Reisner } 1369b411b363SPhilipp Reisner #endif 1370b411b363SPhilipp Reisner 1371b411b363SPhilipp Reisner /* does not spin_lock_irqsave. 1372b411b363SPhilipp Reisner * you must take drbd_bm_lock() first */ 1373b411b363SPhilipp Reisner unsigned long _drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo) 1374b411b363SPhilipp Reisner { 137520ceb2b2SLars Ellenberg /* WARN_ON(!(BM_DONT_SET & mdev->b->bm_flags)); */ 1376b411b363SPhilipp Reisner return __bm_find_next(mdev, bm_fo, 0, KM_USER1); 1377b411b363SPhilipp Reisner } 1378b411b363SPhilipp Reisner 1379b411b363SPhilipp Reisner unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo) 1380b411b363SPhilipp Reisner { 138120ceb2b2SLars Ellenberg /* WARN_ON(!(BM_DONT_SET & mdev->b->bm_flags)); */ 1382b411b363SPhilipp Reisner return __bm_find_next(mdev, bm_fo, 1, KM_USER1); 1383b411b363SPhilipp Reisner } 1384b411b363SPhilipp Reisner 1385b411b363SPhilipp Reisner /* returns number of bits actually changed. 1386b411b363SPhilipp Reisner * for val != 0, we change 0 -> 1, return code positive 1387b411b363SPhilipp Reisner * for val == 0, we change 1 -> 0, return code negative 1388b411b363SPhilipp Reisner * wants bitnr, not sector. 1389b411b363SPhilipp Reisner * expected to be called for only a few bits (e - s about BITS_PER_LONG). 1390b411b363SPhilipp Reisner * Must hold bitmap lock already. */ 1391b4ee79daSPhilipp Reisner static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s, 1392829c6087SLars Ellenberg unsigned long e, int val) 1393b411b363SPhilipp Reisner { 1394b411b363SPhilipp Reisner struct drbd_bitmap *b = mdev->bitmap; 1395b411b363SPhilipp Reisner unsigned long *p_addr = NULL; 1396b411b363SPhilipp Reisner unsigned long bitnr; 139719f843aaSLars Ellenberg unsigned int last_page_nr = -1U; 1398b411b363SPhilipp Reisner int c = 0; 139919f843aaSLars Ellenberg int changed_total = 0; 1400b411b363SPhilipp Reisner 1401b411b363SPhilipp Reisner if (e >= b->bm_bits) { 1402b411b363SPhilipp Reisner dev_err(DEV, "ASSERT FAILED: bit_s=%lu bit_e=%lu bm_bits=%lu\n", 1403b411b363SPhilipp Reisner s, e, b->bm_bits); 1404b411b363SPhilipp Reisner e = b->bm_bits ? b->bm_bits -1 : 0; 1405b411b363SPhilipp Reisner } 1406b411b363SPhilipp Reisner for (bitnr = s; bitnr <= e; bitnr++) { 140719f843aaSLars Ellenberg unsigned int page_nr = bm_bit_to_page_idx(b, bitnr); 1408b411b363SPhilipp Reisner if (page_nr != last_page_nr) { 1409b411b363SPhilipp Reisner if (p_addr) 1410829c6087SLars Ellenberg __bm_unmap(p_addr, KM_IRQ1); 141119f843aaSLars Ellenberg if (c < 0) 141219f843aaSLars Ellenberg bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]); 141319f843aaSLars Ellenberg else if (c > 0) 141419f843aaSLars Ellenberg bm_set_page_need_writeout(b->bm_pages[last_page_nr]); 141519f843aaSLars Ellenberg changed_total += c; 141619f843aaSLars Ellenberg c = 0; 1417829c6087SLars Ellenberg p_addr = __bm_map_pidx(b, page_nr, KM_IRQ1); 1418b411b363SPhilipp Reisner last_page_nr = page_nr; 1419b411b363SPhilipp Reisner } 1420b411b363SPhilipp Reisner if (val) 14217e599e6eSLinus Torvalds c += (0 == __test_and_set_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr)); 1422b411b363SPhilipp Reisner else 14237e599e6eSLinus Torvalds c -= (0 != __test_and_clear_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr)); 1424b411b363SPhilipp Reisner } 1425b411b363SPhilipp Reisner if (p_addr) 1426829c6087SLars Ellenberg __bm_unmap(p_addr, KM_IRQ1); 142719f843aaSLars Ellenberg if (c < 0) 142819f843aaSLars Ellenberg bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]); 142919f843aaSLars Ellenberg else if (c > 0) 143019f843aaSLars Ellenberg bm_set_page_need_writeout(b->bm_pages[last_page_nr]); 143119f843aaSLars Ellenberg changed_total += c; 143219f843aaSLars Ellenberg b->bm_set += changed_total; 143319f843aaSLars Ellenberg return changed_total; 1434b411b363SPhilipp Reisner } 1435b411b363SPhilipp Reisner 1436b411b363SPhilipp Reisner /* returns number of bits actually changed. 1437b411b363SPhilipp Reisner * for val != 0, we change 0 -> 1, return code positive 1438b411b363SPhilipp Reisner * for val == 0, we change 1 -> 0, return code negative 1439b411b363SPhilipp Reisner * wants bitnr, not sector */ 1440b4ee79daSPhilipp Reisner static int bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s, 1441b411b363SPhilipp Reisner const unsigned long e, int val) 1442b411b363SPhilipp Reisner { 1443b411b363SPhilipp Reisner unsigned long flags; 1444b411b363SPhilipp Reisner struct drbd_bitmap *b = mdev->bitmap; 1445b411b363SPhilipp Reisner int c = 0; 1446b411b363SPhilipp Reisner 1447841ce241SAndreas Gruenbacher if (!expect(b)) 1448841ce241SAndreas Gruenbacher return 1; 1449841ce241SAndreas Gruenbacher if (!expect(b->bm_pages)) 1450841ce241SAndreas Gruenbacher return 0; 1451b411b363SPhilipp Reisner 1452b411b363SPhilipp Reisner spin_lock_irqsave(&b->bm_lock, flags); 145320ceb2b2SLars Ellenberg if ((val ? BM_DONT_SET : BM_DONT_CLEAR) & b->bm_flags) 1454b411b363SPhilipp Reisner bm_print_lock_info(mdev); 1455b411b363SPhilipp Reisner 1456829c6087SLars Ellenberg c = __bm_change_bits_to(mdev, s, e, val); 1457b411b363SPhilipp Reisner 1458b411b363SPhilipp Reisner spin_unlock_irqrestore(&b->bm_lock, flags); 1459b411b363SPhilipp Reisner return c; 1460b411b363SPhilipp Reisner } 1461b411b363SPhilipp Reisner 1462b411b363SPhilipp Reisner /* returns number of bits changed 0 -> 1 */ 1463b411b363SPhilipp Reisner int drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e) 1464b411b363SPhilipp Reisner { 1465b411b363SPhilipp Reisner return bm_change_bits_to(mdev, s, e, 1); 1466b411b363SPhilipp Reisner } 1467b411b363SPhilipp Reisner 1468b411b363SPhilipp Reisner /* returns number of bits changed 1 -> 0 */ 1469b411b363SPhilipp Reisner int drbd_bm_clear_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e) 1470b411b363SPhilipp Reisner { 1471b411b363SPhilipp Reisner return -bm_change_bits_to(mdev, s, e, 0); 1472b411b363SPhilipp Reisner } 1473b411b363SPhilipp Reisner 1474b411b363SPhilipp Reisner /* sets all bits in full words, 1475b411b363SPhilipp Reisner * from first_word up to, but not including, last_word */ 1476b411b363SPhilipp Reisner static inline void bm_set_full_words_within_one_page(struct drbd_bitmap *b, 1477b411b363SPhilipp Reisner int page_nr, int first_word, int last_word) 1478b411b363SPhilipp Reisner { 1479b411b363SPhilipp Reisner int i; 1480b411b363SPhilipp Reisner int bits; 148122d81140SLars Ellenberg int changed = 0; 1482829c6087SLars Ellenberg unsigned long *paddr = kmap_atomic(b->bm_pages[page_nr], KM_IRQ1); 1483b411b363SPhilipp Reisner for (i = first_word; i < last_word; i++) { 1484b411b363SPhilipp Reisner bits = hweight_long(paddr[i]); 1485b411b363SPhilipp Reisner paddr[i] = ~0UL; 148622d81140SLars Ellenberg changed += BITS_PER_LONG - bits; 1487b411b363SPhilipp Reisner } 1488829c6087SLars Ellenberg kunmap_atomic(paddr, KM_IRQ1); 148922d81140SLars Ellenberg if (changed) { 149022d81140SLars Ellenberg /* We only need lazy writeout, the information is still in the 149122d81140SLars Ellenberg * remote bitmap as well, and is reconstructed during the next 149222d81140SLars Ellenberg * bitmap exchange, if lost locally due to a crash. */ 149322d81140SLars Ellenberg bm_set_page_lazy_writeout(b->bm_pages[page_nr]); 149422d81140SLars Ellenberg b->bm_set += changed; 149522d81140SLars Ellenberg } 1496b411b363SPhilipp Reisner } 1497b411b363SPhilipp Reisner 1498829c6087SLars Ellenberg /* Same thing as drbd_bm_set_bits, 1499829c6087SLars Ellenberg * but more efficient for a large bit range. 1500b411b363SPhilipp Reisner * You must first drbd_bm_lock(). 1501b411b363SPhilipp Reisner * Can be called to set the whole bitmap in one go. 1502b411b363SPhilipp Reisner * Sets bits from s to e _inclusive_. */ 1503b411b363SPhilipp Reisner void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e) 1504b411b363SPhilipp Reisner { 1505b411b363SPhilipp Reisner /* First set_bit from the first bit (s) 1506b411b363SPhilipp Reisner * up to the next long boundary (sl), 1507b411b363SPhilipp Reisner * then assign full words up to the last long boundary (el), 1508b411b363SPhilipp Reisner * then set_bit up to and including the last bit (e). 1509b411b363SPhilipp Reisner * 1510b411b363SPhilipp Reisner * Do not use memset, because we must account for changes, 1511b411b363SPhilipp Reisner * so we need to loop over the words with hweight() anyways. 1512b411b363SPhilipp Reisner */ 1513829c6087SLars Ellenberg struct drbd_bitmap *b = mdev->bitmap; 1514b411b363SPhilipp Reisner unsigned long sl = ALIGN(s,BITS_PER_LONG); 1515b411b363SPhilipp Reisner unsigned long el = (e+1) & ~((unsigned long)BITS_PER_LONG-1); 1516b411b363SPhilipp Reisner int first_page; 1517b411b363SPhilipp Reisner int last_page; 1518b411b363SPhilipp Reisner int page_nr; 1519b411b363SPhilipp Reisner int first_word; 1520b411b363SPhilipp Reisner int last_word; 1521b411b363SPhilipp Reisner 1522b411b363SPhilipp Reisner if (e - s <= 3*BITS_PER_LONG) { 1523b411b363SPhilipp Reisner /* don't bother; el and sl may even be wrong. */ 1524829c6087SLars Ellenberg spin_lock_irq(&b->bm_lock); 1525829c6087SLars Ellenberg __bm_change_bits_to(mdev, s, e, 1); 1526829c6087SLars Ellenberg spin_unlock_irq(&b->bm_lock); 1527b411b363SPhilipp Reisner return; 1528b411b363SPhilipp Reisner } 1529b411b363SPhilipp Reisner 1530b411b363SPhilipp Reisner /* difference is large enough that we can trust sl and el */ 1531b411b363SPhilipp Reisner 1532829c6087SLars Ellenberg spin_lock_irq(&b->bm_lock); 1533829c6087SLars Ellenberg 1534b411b363SPhilipp Reisner /* bits filling the current long */ 1535b411b363SPhilipp Reisner if (sl) 1536829c6087SLars Ellenberg __bm_change_bits_to(mdev, s, sl-1, 1); 1537b411b363SPhilipp Reisner 1538b411b363SPhilipp Reisner first_page = sl >> (3 + PAGE_SHIFT); 1539b411b363SPhilipp Reisner last_page = el >> (3 + PAGE_SHIFT); 1540b411b363SPhilipp Reisner 1541b411b363SPhilipp Reisner /* MLPP: modulo longs per page */ 1542b411b363SPhilipp Reisner /* LWPP: long words per page */ 1543b411b363SPhilipp Reisner first_word = MLPP(sl >> LN2_BPL); 1544b411b363SPhilipp Reisner last_word = LWPP; 1545b411b363SPhilipp Reisner 1546b411b363SPhilipp Reisner /* first and full pages, unless first page == last page */ 1547b411b363SPhilipp Reisner for (page_nr = first_page; page_nr < last_page; page_nr++) { 1548b411b363SPhilipp Reisner bm_set_full_words_within_one_page(mdev->bitmap, page_nr, first_word, last_word); 15498ccee20eSLars Ellenberg spin_unlock_irq(&b->bm_lock); 15508ccee20eSLars Ellenberg cond_resched(); 1551b411b363SPhilipp Reisner first_word = 0; 15528ccee20eSLars Ellenberg spin_lock_irq(&b->bm_lock); 1553b411b363SPhilipp Reisner } 1554b411b363SPhilipp Reisner /* last page (respectively only page, for first page == last page) */ 1555b411b363SPhilipp Reisner last_word = MLPP(el >> LN2_BPL); 15561b6f1974SLars Ellenberg 15571b6f1974SLars Ellenberg /* consider bitmap->bm_bits = 32768, bitmap->bm_number_of_pages = 1. (or multiples). 15581b6f1974SLars Ellenberg * ==> e = 32767, el = 32768, last_page = 2, 15591b6f1974SLars Ellenberg * and now last_word = 0. 15601b6f1974SLars Ellenberg * We do not want to touch last_page in this case, 15611b6f1974SLars Ellenberg * as we did not allocate it, it is not present in bitmap->bm_pages. 15621b6f1974SLars Ellenberg */ 15631b6f1974SLars Ellenberg if (last_word) 1564b411b363SPhilipp Reisner bm_set_full_words_within_one_page(mdev->bitmap, last_page, first_word, last_word); 1565b411b363SPhilipp Reisner 1566b411b363SPhilipp Reisner /* possibly trailing bits. 1567b411b363SPhilipp Reisner * example: (e & 63) == 63, el will be e+1. 1568b411b363SPhilipp Reisner * if that even was the very last bit, 1569b411b363SPhilipp Reisner * it would trigger an assert in __bm_change_bits_to() 1570b411b363SPhilipp Reisner */ 1571b411b363SPhilipp Reisner if (el <= e) 1572829c6087SLars Ellenberg __bm_change_bits_to(mdev, el, e, 1); 1573829c6087SLars Ellenberg spin_unlock_irq(&b->bm_lock); 1574b411b363SPhilipp Reisner } 1575b411b363SPhilipp Reisner 1576b411b363SPhilipp Reisner /* returns bit state 1577b411b363SPhilipp Reisner * wants bitnr, NOT sector. 1578b411b363SPhilipp Reisner * inherently racy... area needs to be locked by means of {al,rs}_lru 1579b411b363SPhilipp Reisner * 1 ... bit set 1580b411b363SPhilipp Reisner * 0 ... bit not set 1581b411b363SPhilipp Reisner * -1 ... first out of bounds access, stop testing for bits! 1582b411b363SPhilipp Reisner */ 1583b411b363SPhilipp Reisner int drbd_bm_test_bit(struct drbd_conf *mdev, const unsigned long bitnr) 1584b411b363SPhilipp Reisner { 1585b411b363SPhilipp Reisner unsigned long flags; 1586b411b363SPhilipp Reisner struct drbd_bitmap *b = mdev->bitmap; 1587b411b363SPhilipp Reisner unsigned long *p_addr; 1588b411b363SPhilipp Reisner int i; 1589b411b363SPhilipp Reisner 1590841ce241SAndreas Gruenbacher if (!expect(b)) 1591841ce241SAndreas Gruenbacher return 0; 1592841ce241SAndreas Gruenbacher if (!expect(b->bm_pages)) 1593841ce241SAndreas Gruenbacher return 0; 1594b411b363SPhilipp Reisner 1595b411b363SPhilipp Reisner spin_lock_irqsave(&b->bm_lock, flags); 159620ceb2b2SLars Ellenberg if (BM_DONT_TEST & b->bm_flags) 1597b411b363SPhilipp Reisner bm_print_lock_info(mdev); 1598b411b363SPhilipp Reisner if (bitnr < b->bm_bits) { 159919f843aaSLars Ellenberg p_addr = bm_map_pidx(b, bm_bit_to_page_idx(b, bitnr)); 16007e599e6eSLinus Torvalds i = test_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr) ? 1 : 0; 1601b411b363SPhilipp Reisner bm_unmap(p_addr); 1602b411b363SPhilipp Reisner } else if (bitnr == b->bm_bits) { 1603b411b363SPhilipp Reisner i = -1; 1604b411b363SPhilipp Reisner } else { /* (bitnr > b->bm_bits) */ 1605b411b363SPhilipp Reisner dev_err(DEV, "bitnr=%lu > bm_bits=%lu\n", bitnr, b->bm_bits); 1606b411b363SPhilipp Reisner i = 0; 1607b411b363SPhilipp Reisner } 1608b411b363SPhilipp Reisner 1609b411b363SPhilipp Reisner spin_unlock_irqrestore(&b->bm_lock, flags); 1610b411b363SPhilipp Reisner return i; 1611b411b363SPhilipp Reisner } 1612b411b363SPhilipp Reisner 1613b411b363SPhilipp Reisner /* returns number of bits set in the range [s, e] */ 1614b411b363SPhilipp Reisner int drbd_bm_count_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e) 1615b411b363SPhilipp Reisner { 1616b411b363SPhilipp Reisner unsigned long flags; 1617b411b363SPhilipp Reisner struct drbd_bitmap *b = mdev->bitmap; 161819f843aaSLars Ellenberg unsigned long *p_addr = NULL; 1619b411b363SPhilipp Reisner unsigned long bitnr; 162019f843aaSLars Ellenberg unsigned int page_nr = -1U; 1621b411b363SPhilipp Reisner int c = 0; 1622b411b363SPhilipp Reisner 1623b411b363SPhilipp Reisner /* If this is called without a bitmap, that is a bug. But just to be 1624b411b363SPhilipp Reisner * robust in case we screwed up elsewhere, in that case pretend there 1625b411b363SPhilipp Reisner * was one dirty bit in the requested area, so we won't try to do a 1626b411b363SPhilipp Reisner * local read there (no bitmap probably implies no disk) */ 1627841ce241SAndreas Gruenbacher if (!expect(b)) 1628841ce241SAndreas Gruenbacher return 1; 1629841ce241SAndreas Gruenbacher if (!expect(b->bm_pages)) 1630841ce241SAndreas Gruenbacher return 1; 1631b411b363SPhilipp Reisner 1632b411b363SPhilipp Reisner spin_lock_irqsave(&b->bm_lock, flags); 163320ceb2b2SLars Ellenberg if (BM_DONT_TEST & b->bm_flags) 1634b411b363SPhilipp Reisner bm_print_lock_info(mdev); 1635b411b363SPhilipp Reisner for (bitnr = s; bitnr <= e; bitnr++) { 163619f843aaSLars Ellenberg unsigned int idx = bm_bit_to_page_idx(b, bitnr); 163719f843aaSLars Ellenberg if (page_nr != idx) { 163819f843aaSLars Ellenberg page_nr = idx; 1639b411b363SPhilipp Reisner if (p_addr) 1640b411b363SPhilipp Reisner bm_unmap(p_addr); 164119f843aaSLars Ellenberg p_addr = bm_map_pidx(b, idx); 1642b411b363SPhilipp Reisner } 1643841ce241SAndreas Gruenbacher if (expect(bitnr < b->bm_bits)) 16447e599e6eSLinus Torvalds c += (0 != test_bit_le(bitnr - (page_nr << (PAGE_SHIFT+3)), p_addr)); 1645841ce241SAndreas Gruenbacher else 1646841ce241SAndreas Gruenbacher dev_err(DEV, "bitnr=%lu bm_bits=%lu\n", bitnr, b->bm_bits); 1647b411b363SPhilipp Reisner } 1648b411b363SPhilipp Reisner if (p_addr) 1649b411b363SPhilipp Reisner bm_unmap(p_addr); 1650b411b363SPhilipp Reisner spin_unlock_irqrestore(&b->bm_lock, flags); 1651b411b363SPhilipp Reisner return c; 1652b411b363SPhilipp Reisner } 1653b411b363SPhilipp Reisner 1654b411b363SPhilipp Reisner 1655b411b363SPhilipp Reisner /* inherently racy... 1656b411b363SPhilipp Reisner * return value may be already out-of-date when this function returns. 1657b411b363SPhilipp Reisner * but the general usage is that this is only use during a cstate when bits are 1658b411b363SPhilipp Reisner * only cleared, not set, and typically only care for the case when the return 1659b411b363SPhilipp Reisner * value is zero, or we already "locked" this "bitmap extent" by other means. 1660b411b363SPhilipp Reisner * 1661b411b363SPhilipp Reisner * enr is bm-extent number, since we chose to name one sector (512 bytes) 1662b411b363SPhilipp Reisner * worth of the bitmap a "bitmap extent". 1663b411b363SPhilipp Reisner * 1664b411b363SPhilipp Reisner * TODO 1665b411b363SPhilipp Reisner * I think since we use it like a reference count, we should use the real 1666b411b363SPhilipp Reisner * reference count of some bitmap extent element from some lru instead... 1667b411b363SPhilipp Reisner * 1668b411b363SPhilipp Reisner */ 1669b411b363SPhilipp Reisner int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr) 1670b411b363SPhilipp Reisner { 1671b411b363SPhilipp Reisner struct drbd_bitmap *b = mdev->bitmap; 1672b411b363SPhilipp Reisner int count, s, e; 1673b411b363SPhilipp Reisner unsigned long flags; 1674b411b363SPhilipp Reisner unsigned long *p_addr, *bm; 1675b411b363SPhilipp Reisner 1676841ce241SAndreas Gruenbacher if (!expect(b)) 1677841ce241SAndreas Gruenbacher return 0; 1678841ce241SAndreas Gruenbacher if (!expect(b->bm_pages)) 1679841ce241SAndreas Gruenbacher return 0; 1680b411b363SPhilipp Reisner 1681b411b363SPhilipp Reisner spin_lock_irqsave(&b->bm_lock, flags); 168220ceb2b2SLars Ellenberg if (BM_DONT_TEST & b->bm_flags) 1683b411b363SPhilipp Reisner bm_print_lock_info(mdev); 1684b411b363SPhilipp Reisner 1685b411b363SPhilipp Reisner s = S2W(enr); 1686b411b363SPhilipp Reisner e = min((size_t)S2W(enr+1), b->bm_words); 1687b411b363SPhilipp Reisner count = 0; 1688b411b363SPhilipp Reisner if (s < b->bm_words) { 1689b411b363SPhilipp Reisner int n = e-s; 169019f843aaSLars Ellenberg p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, s)); 1691b411b363SPhilipp Reisner bm = p_addr + MLPP(s); 1692b411b363SPhilipp Reisner while (n--) 1693b411b363SPhilipp Reisner count += hweight_long(*bm++); 1694b411b363SPhilipp Reisner bm_unmap(p_addr); 1695b411b363SPhilipp Reisner } else { 1696b411b363SPhilipp Reisner dev_err(DEV, "start offset (%d) too large in drbd_bm_e_weight\n", s); 1697b411b363SPhilipp Reisner } 1698b411b363SPhilipp Reisner spin_unlock_irqrestore(&b->bm_lock, flags); 1699b411b363SPhilipp Reisner return count; 1700b411b363SPhilipp Reisner } 1701