1b411b363SPhilipp Reisner /* 2b411b363SPhilipp Reisner drbd_bitmap.c 3b411b363SPhilipp Reisner 4b411b363SPhilipp Reisner This file is part of DRBD by Philipp Reisner and Lars Ellenberg. 5b411b363SPhilipp Reisner 6b411b363SPhilipp Reisner Copyright (C) 2004-2008, LINBIT Information Technologies GmbH. 7b411b363SPhilipp Reisner Copyright (C) 2004-2008, Philipp Reisner <philipp.reisner@linbit.com>. 8b411b363SPhilipp Reisner Copyright (C) 2004-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. 9b411b363SPhilipp Reisner 10b411b363SPhilipp Reisner drbd is free software; you can redistribute it and/or modify 11b411b363SPhilipp Reisner it under the terms of the GNU General Public License as published by 12b411b363SPhilipp Reisner the Free Software Foundation; either version 2, or (at your option) 13b411b363SPhilipp Reisner any later version. 14b411b363SPhilipp Reisner 15b411b363SPhilipp Reisner drbd is distributed in the hope that it will be useful, 16b411b363SPhilipp Reisner but WITHOUT ANY WARRANTY; without even the implied warranty of 17b411b363SPhilipp Reisner MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18b411b363SPhilipp Reisner GNU General Public License for more details. 19b411b363SPhilipp Reisner 20b411b363SPhilipp Reisner You should have received a copy of the GNU General Public License 21b411b363SPhilipp Reisner along with drbd; see the file COPYING. If not, write to 22b411b363SPhilipp Reisner the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 23b411b363SPhilipp Reisner */ 24b411b363SPhilipp Reisner 25b411b363SPhilipp Reisner #include <linux/bitops.h> 26b411b363SPhilipp Reisner #include <linux/vmalloc.h> 27b411b363SPhilipp Reisner #include <linux/string.h> 28b411b363SPhilipp Reisner #include <linux/drbd.h> 295a0e3ad6STejun Heo #include <linux/slab.h> 30b411b363SPhilipp Reisner #include <asm/kmap_types.h> 31f0ff1357SStephen Rothwell 32b411b363SPhilipp Reisner #include "drbd_int.h" 33b411b363SPhilipp Reisner 3495a0f10cSLars Ellenberg 35b411b363SPhilipp Reisner /* OPAQUE outside this file! 36b411b363SPhilipp Reisner * interface defined in drbd_int.h 37b411b363SPhilipp Reisner 38b411b363SPhilipp Reisner * convention: 39b411b363SPhilipp Reisner * function name drbd_bm_... => used elsewhere, "public". 40b411b363SPhilipp Reisner * function name bm_... => internal to implementation, "private". 414b0715f0SLars Ellenberg */ 42b411b363SPhilipp Reisner 434b0715f0SLars Ellenberg 444b0715f0SLars Ellenberg /* 454b0715f0SLars Ellenberg * LIMITATIONS: 464b0715f0SLars Ellenberg * We want to support >= peta byte of backend storage, while for now still using 474b0715f0SLars Ellenberg * a granularity of one bit per 4KiB of storage. 484b0715f0SLars Ellenberg * 1 << 50 bytes backend storage (1 PiB) 494b0715f0SLars Ellenberg * 1 << (50 - 12) bits needed 504b0715f0SLars Ellenberg * 38 --> we need u64 to index and count bits 514b0715f0SLars Ellenberg * 1 << (38 - 3) bitmap bytes needed 524b0715f0SLars Ellenberg * 35 --> we still need u64 to index and count bytes 534b0715f0SLars Ellenberg * (that's 32 GiB of bitmap for 1 PiB storage) 544b0715f0SLars Ellenberg * 1 << (35 - 2) 32bit longs needed 554b0715f0SLars Ellenberg * 33 --> we'd even need u64 to index and count 32bit long words. 564b0715f0SLars Ellenberg * 1 << (35 - 3) 64bit longs needed 574b0715f0SLars Ellenberg * 32 --> we could get away with a 32bit unsigned int to index and count 584b0715f0SLars Ellenberg * 64bit long words, but I rather stay with unsigned long for now. 594b0715f0SLars Ellenberg * We probably should neither count nor point to bytes or long words 604b0715f0SLars Ellenberg * directly, but either by bitnumber, or by page index and offset. 614b0715f0SLars Ellenberg * 1 << (35 - 12) 624b0715f0SLars Ellenberg * 22 --> we need that much 4KiB pages of bitmap. 634b0715f0SLars Ellenberg * 1 << (22 + 3) --> on a 64bit arch, 644b0715f0SLars Ellenberg * we need 32 MiB to store the array of page pointers. 654b0715f0SLars Ellenberg * 664b0715f0SLars Ellenberg * Because I'm lazy, and because the resulting patch was too large, too ugly 674b0715f0SLars Ellenberg * and still incomplete, on 32bit we still "only" support 16 TiB (minus some), 684b0715f0SLars Ellenberg * (1 << 32) bits * 4k storage. 694b0715f0SLars Ellenberg * 704b0715f0SLars Ellenberg 714b0715f0SLars Ellenberg * bitmap storage and IO: 724b0715f0SLars Ellenberg * Bitmap is stored little endian on disk, and is kept little endian in 734b0715f0SLars Ellenberg * core memory. Currently we still hold the full bitmap in core as long 744b0715f0SLars Ellenberg * as we are "attached" to a local disk, which at 32 GiB for 1PiB storage 754b0715f0SLars Ellenberg * seems excessive. 764b0715f0SLars Ellenberg * 77*24c4830cSBart Van Assche * We plan to reduce the amount of in-core bitmap pages by paging them in 784b0715f0SLars Ellenberg * and out against their on-disk location as necessary, but need to make 794b0715f0SLars Ellenberg * sure we don't cause too much meta data IO, and must not deadlock in 804b0715f0SLars Ellenberg * tight memory situations. This needs some more work. 81b411b363SPhilipp Reisner */ 82b411b363SPhilipp Reisner 83b411b363SPhilipp Reisner /* 84b411b363SPhilipp Reisner * NOTE 85b411b363SPhilipp Reisner * Access to the *bm_pages is protected by bm_lock. 86b411b363SPhilipp Reisner * It is safe to read the other members within the lock. 87b411b363SPhilipp Reisner * 88b411b363SPhilipp Reisner * drbd_bm_set_bits is called from bio_endio callbacks, 89b411b363SPhilipp Reisner * We may be called with irq already disabled, 90b411b363SPhilipp Reisner * so we need spin_lock_irqsave(). 91b411b363SPhilipp Reisner * And we need the kmap_atomic. 92b411b363SPhilipp Reisner */ 93b411b363SPhilipp Reisner struct drbd_bitmap { 94b411b363SPhilipp Reisner struct page **bm_pages; 95b411b363SPhilipp Reisner spinlock_t bm_lock; 964b0715f0SLars Ellenberg 974b0715f0SLars Ellenberg /* see LIMITATIONS: above */ 984b0715f0SLars Ellenberg 99b411b363SPhilipp Reisner unsigned long bm_set; /* nr of set bits; THINK maybe atomic_t? */ 100b411b363SPhilipp Reisner unsigned long bm_bits; 101b411b363SPhilipp Reisner size_t bm_words; 102b411b363SPhilipp Reisner size_t bm_number_of_pages; 103b411b363SPhilipp Reisner sector_t bm_dev_capacity; 1048a03ae2aSThomas Gleixner struct mutex bm_change; /* serializes resize operations */ 105b411b363SPhilipp Reisner 10619f843aaSLars Ellenberg wait_queue_head_t bm_io_wait; /* used to serialize IO of single pages */ 107b411b363SPhilipp Reisner 10820ceb2b2SLars Ellenberg enum bm_flag bm_flags; 109b411b363SPhilipp Reisner 110b411b363SPhilipp Reisner /* debugging aid, in case we are still racy somewhere */ 111b411b363SPhilipp Reisner char *bm_why; 112b411b363SPhilipp Reisner struct task_struct *bm_task; 113b411b363SPhilipp Reisner }; 114b411b363SPhilipp Reisner 115b4ee79daSPhilipp Reisner static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s, 116fd76438cSPhilipp Reisner unsigned long e, int val, const enum km_type km); 117fd76438cSPhilipp Reisner 118b411b363SPhilipp Reisner #define bm_print_lock_info(m) __bm_print_lock_info(m, __func__) 119b411b363SPhilipp Reisner static void __bm_print_lock_info(struct drbd_conf *mdev, const char *func) 120b411b363SPhilipp Reisner { 121b411b363SPhilipp Reisner struct drbd_bitmap *b = mdev->bitmap; 122b411b363SPhilipp Reisner if (!__ratelimit(&drbd_ratelimit_state)) 123b411b363SPhilipp Reisner return; 124b411b363SPhilipp Reisner dev_err(DEV, "FIXME %s in %s, bitmap locked for '%s' by %s\n", 125b411b363SPhilipp Reisner current == mdev->receiver.task ? "receiver" : 126b411b363SPhilipp Reisner current == mdev->asender.task ? "asender" : 127b411b363SPhilipp Reisner current == mdev->worker.task ? "worker" : current->comm, 128b411b363SPhilipp Reisner func, b->bm_why ?: "?", 129b411b363SPhilipp Reisner b->bm_task == mdev->receiver.task ? "receiver" : 130b411b363SPhilipp Reisner b->bm_task == mdev->asender.task ? "asender" : 131b411b363SPhilipp Reisner b->bm_task == mdev->worker.task ? "worker" : "?"); 132b411b363SPhilipp Reisner } 133b411b363SPhilipp Reisner 13420ceb2b2SLars Ellenberg void drbd_bm_lock(struct drbd_conf *mdev, char *why, enum bm_flag flags) 135b411b363SPhilipp Reisner { 136b411b363SPhilipp Reisner struct drbd_bitmap *b = mdev->bitmap; 137b411b363SPhilipp Reisner int trylock_failed; 138b411b363SPhilipp Reisner 139b411b363SPhilipp Reisner if (!b) { 140b411b363SPhilipp Reisner dev_err(DEV, "FIXME no bitmap in drbd_bm_lock!?\n"); 141b411b363SPhilipp Reisner return; 142b411b363SPhilipp Reisner } 143b411b363SPhilipp Reisner 1448a03ae2aSThomas Gleixner trylock_failed = !mutex_trylock(&b->bm_change); 145b411b363SPhilipp Reisner 146b411b363SPhilipp Reisner if (trylock_failed) { 147b411b363SPhilipp Reisner dev_warn(DEV, "%s going to '%s' but bitmap already locked for '%s' by %s\n", 148b411b363SPhilipp Reisner current == mdev->receiver.task ? "receiver" : 149b411b363SPhilipp Reisner current == mdev->asender.task ? "asender" : 150b411b363SPhilipp Reisner current == mdev->worker.task ? "worker" : current->comm, 151b411b363SPhilipp Reisner why, b->bm_why ?: "?", 152b411b363SPhilipp Reisner b->bm_task == mdev->receiver.task ? "receiver" : 153b411b363SPhilipp Reisner b->bm_task == mdev->asender.task ? "asender" : 154b411b363SPhilipp Reisner b->bm_task == mdev->worker.task ? "worker" : "?"); 1558a03ae2aSThomas Gleixner mutex_lock(&b->bm_change); 156b411b363SPhilipp Reisner } 15720ceb2b2SLars Ellenberg if (BM_LOCKED_MASK & b->bm_flags) 158b411b363SPhilipp Reisner dev_err(DEV, "FIXME bitmap already locked in bm_lock\n"); 15920ceb2b2SLars Ellenberg b->bm_flags |= flags & BM_LOCKED_MASK; 160b411b363SPhilipp Reisner 161b411b363SPhilipp Reisner b->bm_why = why; 162b411b363SPhilipp Reisner b->bm_task = current; 163b411b363SPhilipp Reisner } 164b411b363SPhilipp Reisner 165b411b363SPhilipp Reisner void drbd_bm_unlock(struct drbd_conf *mdev) 166b411b363SPhilipp Reisner { 167b411b363SPhilipp Reisner struct drbd_bitmap *b = mdev->bitmap; 168b411b363SPhilipp Reisner if (!b) { 169b411b363SPhilipp Reisner dev_err(DEV, "FIXME no bitmap in drbd_bm_unlock!?\n"); 170b411b363SPhilipp Reisner return; 171b411b363SPhilipp Reisner } 172b411b363SPhilipp Reisner 17320ceb2b2SLars Ellenberg if (!(BM_LOCKED_MASK & mdev->bitmap->bm_flags)) 174b411b363SPhilipp Reisner dev_err(DEV, "FIXME bitmap not locked in bm_unlock\n"); 175b411b363SPhilipp Reisner 17620ceb2b2SLars Ellenberg b->bm_flags &= ~BM_LOCKED_MASK; 177b411b363SPhilipp Reisner b->bm_why = NULL; 178b411b363SPhilipp Reisner b->bm_task = NULL; 1798a03ae2aSThomas Gleixner mutex_unlock(&b->bm_change); 180b411b363SPhilipp Reisner } 181b411b363SPhilipp Reisner 18219f843aaSLars Ellenberg /* we store some "meta" info about our pages in page->private */ 18319f843aaSLars Ellenberg /* at a granularity of 4k storage per bitmap bit: 18419f843aaSLars Ellenberg * one peta byte storage: 1<<50 byte, 1<<38 * 4k storage blocks 18519f843aaSLars Ellenberg * 1<<38 bits, 18619f843aaSLars Ellenberg * 1<<23 4k bitmap pages. 18719f843aaSLars Ellenberg * Use 24 bits as page index, covers 2 peta byte storage 18819f843aaSLars Ellenberg * at a granularity of 4k per bit. 18919f843aaSLars Ellenberg * Used to report the failed page idx on io error from the endio handlers. 19019f843aaSLars Ellenberg */ 19119f843aaSLars Ellenberg #define BM_PAGE_IDX_MASK ((1UL<<24)-1) 19219f843aaSLars Ellenberg /* this page is currently read in, or written back */ 19319f843aaSLars Ellenberg #define BM_PAGE_IO_LOCK 31 19419f843aaSLars Ellenberg /* if there has been an IO error for this page */ 19519f843aaSLars Ellenberg #define BM_PAGE_IO_ERROR 30 19619f843aaSLars Ellenberg /* this is to be able to intelligently skip disk IO, 19719f843aaSLars Ellenberg * set if bits have been set since last IO. */ 19819f843aaSLars Ellenberg #define BM_PAGE_NEED_WRITEOUT 29 19919f843aaSLars Ellenberg /* to mark for lazy writeout once syncer cleared all clearable bits, 20019f843aaSLars Ellenberg * we if bits have been cleared since last IO. */ 20119f843aaSLars Ellenberg #define BM_PAGE_LAZY_WRITEOUT 28 20219f843aaSLars Ellenberg 203*24c4830cSBart Van Assche /* store_page_idx uses non-atomic assignment. It is only used directly after 20419f843aaSLars Ellenberg * allocating the page. All other bm_set_page_* and bm_clear_page_* need to 20519f843aaSLars Ellenberg * use atomic bit manipulation, as set_out_of_sync (and therefore bitmap 20619f843aaSLars Ellenberg * changes) may happen from various contexts, and wait_on_bit/wake_up_bit 20719f843aaSLars Ellenberg * requires it all to be atomic as well. */ 20819f843aaSLars Ellenberg static void bm_store_page_idx(struct page *page, unsigned long idx) 20919f843aaSLars Ellenberg { 21019f843aaSLars Ellenberg BUG_ON(0 != (idx & ~BM_PAGE_IDX_MASK)); 21119f843aaSLars Ellenberg page_private(page) |= idx; 21219f843aaSLars Ellenberg } 21319f843aaSLars Ellenberg 21419f843aaSLars Ellenberg static unsigned long bm_page_to_idx(struct page *page) 21519f843aaSLars Ellenberg { 21619f843aaSLars Ellenberg return page_private(page) & BM_PAGE_IDX_MASK; 21719f843aaSLars Ellenberg } 21819f843aaSLars Ellenberg 21919f843aaSLars Ellenberg /* As is very unlikely that the same page is under IO from more than one 22019f843aaSLars Ellenberg * context, we can get away with a bit per page and one wait queue per bitmap. 22119f843aaSLars Ellenberg */ 22219f843aaSLars Ellenberg static void bm_page_lock_io(struct drbd_conf *mdev, int page_nr) 22319f843aaSLars Ellenberg { 22419f843aaSLars Ellenberg struct drbd_bitmap *b = mdev->bitmap; 22519f843aaSLars Ellenberg void *addr = &page_private(b->bm_pages[page_nr]); 22619f843aaSLars Ellenberg wait_event(b->bm_io_wait, !test_and_set_bit(BM_PAGE_IO_LOCK, addr)); 22719f843aaSLars Ellenberg } 22819f843aaSLars Ellenberg 22919f843aaSLars Ellenberg static void bm_page_unlock_io(struct drbd_conf *mdev, int page_nr) 23019f843aaSLars Ellenberg { 23119f843aaSLars Ellenberg struct drbd_bitmap *b = mdev->bitmap; 23219f843aaSLars Ellenberg void *addr = &page_private(b->bm_pages[page_nr]); 23319f843aaSLars Ellenberg clear_bit(BM_PAGE_IO_LOCK, addr); 23419f843aaSLars Ellenberg smp_mb__after_clear_bit(); 23519f843aaSLars Ellenberg wake_up(&mdev->bitmap->bm_io_wait); 23619f843aaSLars Ellenberg } 23719f843aaSLars Ellenberg 23819f843aaSLars Ellenberg /* set _before_ submit_io, so it may be reset due to being changed 23919f843aaSLars Ellenberg * while this page is in flight... will get submitted later again */ 24019f843aaSLars Ellenberg static void bm_set_page_unchanged(struct page *page) 24119f843aaSLars Ellenberg { 24219f843aaSLars Ellenberg /* use cmpxchg? */ 24319f843aaSLars Ellenberg clear_bit(BM_PAGE_NEED_WRITEOUT, &page_private(page)); 24419f843aaSLars Ellenberg clear_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page)); 24519f843aaSLars Ellenberg } 24619f843aaSLars Ellenberg 24719f843aaSLars Ellenberg static void bm_set_page_need_writeout(struct page *page) 24819f843aaSLars Ellenberg { 24919f843aaSLars Ellenberg set_bit(BM_PAGE_NEED_WRITEOUT, &page_private(page)); 25019f843aaSLars Ellenberg } 25119f843aaSLars Ellenberg 25219f843aaSLars Ellenberg static int bm_test_page_unchanged(struct page *page) 25319f843aaSLars Ellenberg { 25419f843aaSLars Ellenberg volatile const unsigned long *addr = &page_private(page); 25519f843aaSLars Ellenberg return (*addr & ((1UL<<BM_PAGE_NEED_WRITEOUT)|(1UL<<BM_PAGE_LAZY_WRITEOUT))) == 0; 25619f843aaSLars Ellenberg } 25719f843aaSLars Ellenberg 25819f843aaSLars Ellenberg static void bm_set_page_io_err(struct page *page) 25919f843aaSLars Ellenberg { 26019f843aaSLars Ellenberg set_bit(BM_PAGE_IO_ERROR, &page_private(page)); 26119f843aaSLars Ellenberg } 26219f843aaSLars Ellenberg 26319f843aaSLars Ellenberg static void bm_clear_page_io_err(struct page *page) 26419f843aaSLars Ellenberg { 26519f843aaSLars Ellenberg clear_bit(BM_PAGE_IO_ERROR, &page_private(page)); 26619f843aaSLars Ellenberg } 26719f843aaSLars Ellenberg 26819f843aaSLars Ellenberg static void bm_set_page_lazy_writeout(struct page *page) 26919f843aaSLars Ellenberg { 27019f843aaSLars Ellenberg set_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page)); 27119f843aaSLars Ellenberg } 27219f843aaSLars Ellenberg 27319f843aaSLars Ellenberg static int bm_test_page_lazy_writeout(struct page *page) 27419f843aaSLars Ellenberg { 27519f843aaSLars Ellenberg return test_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page)); 27619f843aaSLars Ellenberg } 27719f843aaSLars Ellenberg 27819f843aaSLars Ellenberg /* on a 32bit box, this would allow for exactly (2<<38) bits. */ 27919f843aaSLars Ellenberg static unsigned int bm_word_to_page_idx(struct drbd_bitmap *b, unsigned long long_nr) 28019f843aaSLars Ellenberg { 28119f843aaSLars Ellenberg /* page_nr = (word*sizeof(long)) >> PAGE_SHIFT; */ 28219f843aaSLars Ellenberg unsigned int page_nr = long_nr >> (PAGE_SHIFT - LN2_BPL + 3); 28319f843aaSLars Ellenberg BUG_ON(page_nr >= b->bm_number_of_pages); 28419f843aaSLars Ellenberg return page_nr; 28519f843aaSLars Ellenberg } 28619f843aaSLars Ellenberg 28795a0f10cSLars Ellenberg static unsigned int bm_bit_to_page_idx(struct drbd_bitmap *b, u64 bitnr) 28895a0f10cSLars Ellenberg { 28995a0f10cSLars Ellenberg /* page_nr = (bitnr/8) >> PAGE_SHIFT; */ 29095a0f10cSLars Ellenberg unsigned int page_nr = bitnr >> (PAGE_SHIFT + 3); 29195a0f10cSLars Ellenberg BUG_ON(page_nr >= b->bm_number_of_pages); 29295a0f10cSLars Ellenberg return page_nr; 29395a0f10cSLars Ellenberg } 29495a0f10cSLars Ellenberg 29595a0f10cSLars Ellenberg static unsigned long *__bm_map_pidx(struct drbd_bitmap *b, unsigned int idx, const enum km_type km) 29695a0f10cSLars Ellenberg { 29795a0f10cSLars Ellenberg struct page *page = b->bm_pages[idx]; 29895a0f10cSLars Ellenberg return (unsigned long *) kmap_atomic(page, km); 29995a0f10cSLars Ellenberg } 30095a0f10cSLars Ellenberg 30195a0f10cSLars Ellenberg static unsigned long *bm_map_pidx(struct drbd_bitmap *b, unsigned int idx) 30295a0f10cSLars Ellenberg { 30395a0f10cSLars Ellenberg return __bm_map_pidx(b, idx, KM_IRQ1); 30495a0f10cSLars Ellenberg } 30595a0f10cSLars Ellenberg 306b411b363SPhilipp Reisner static void __bm_unmap(unsigned long *p_addr, const enum km_type km) 307b411b363SPhilipp Reisner { 308b411b363SPhilipp Reisner kunmap_atomic(p_addr, km); 309b411b363SPhilipp Reisner }; 310b411b363SPhilipp Reisner 311b411b363SPhilipp Reisner static void bm_unmap(unsigned long *p_addr) 312b411b363SPhilipp Reisner { 313b411b363SPhilipp Reisner return __bm_unmap(p_addr, KM_IRQ1); 314b411b363SPhilipp Reisner } 315b411b363SPhilipp Reisner 316b411b363SPhilipp Reisner /* long word offset of _bitmap_ sector */ 317b411b363SPhilipp Reisner #define S2W(s) ((s)<<(BM_EXT_SHIFT-BM_BLOCK_SHIFT-LN2_BPL)) 318b411b363SPhilipp Reisner /* word offset from start of bitmap to word number _in_page_ 319b411b363SPhilipp Reisner * modulo longs per page 320b411b363SPhilipp Reisner #define MLPP(X) ((X) % (PAGE_SIZE/sizeof(long)) 321*24c4830cSBart Van Assche hm, well, Philipp thinks gcc might not optimize the % into & (... - 1) 322b411b363SPhilipp Reisner so do it explicitly: 323b411b363SPhilipp Reisner */ 324b411b363SPhilipp Reisner #define MLPP(X) ((X) & ((PAGE_SIZE/sizeof(long))-1)) 325b411b363SPhilipp Reisner 326b411b363SPhilipp Reisner /* Long words per page */ 327b411b363SPhilipp Reisner #define LWPP (PAGE_SIZE/sizeof(long)) 328b411b363SPhilipp Reisner 329b411b363SPhilipp Reisner /* 330b411b363SPhilipp Reisner * actually most functions herein should take a struct drbd_bitmap*, not a 331b411b363SPhilipp Reisner * struct drbd_conf*, but for the debug macros I like to have the mdev around 332b411b363SPhilipp Reisner * to be able to report device specific. 333b411b363SPhilipp Reisner */ 334b411b363SPhilipp Reisner 33519f843aaSLars Ellenberg 336b411b363SPhilipp Reisner static void bm_free_pages(struct page **pages, unsigned long number) 337b411b363SPhilipp Reisner { 338b411b363SPhilipp Reisner unsigned long i; 339b411b363SPhilipp Reisner if (!pages) 340b411b363SPhilipp Reisner return; 341b411b363SPhilipp Reisner 342b411b363SPhilipp Reisner for (i = 0; i < number; i++) { 343b411b363SPhilipp Reisner if (!pages[i]) { 344b411b363SPhilipp Reisner printk(KERN_ALERT "drbd: bm_free_pages tried to free " 345b411b363SPhilipp Reisner "a NULL pointer; i=%lu n=%lu\n", 346b411b363SPhilipp Reisner i, number); 347b411b363SPhilipp Reisner continue; 348b411b363SPhilipp Reisner } 349b411b363SPhilipp Reisner __free_page(pages[i]); 350b411b363SPhilipp Reisner pages[i] = NULL; 351b411b363SPhilipp Reisner } 352b411b363SPhilipp Reisner } 353b411b363SPhilipp Reisner 354b411b363SPhilipp Reisner static void bm_vk_free(void *ptr, int v) 355b411b363SPhilipp Reisner { 356b411b363SPhilipp Reisner if (v) 357b411b363SPhilipp Reisner vfree(ptr); 358b411b363SPhilipp Reisner else 359b411b363SPhilipp Reisner kfree(ptr); 360b411b363SPhilipp Reisner } 361b411b363SPhilipp Reisner 362b411b363SPhilipp Reisner /* 363b411b363SPhilipp Reisner * "have" and "want" are NUMBER OF PAGES. 364b411b363SPhilipp Reisner */ 365b411b363SPhilipp Reisner static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want) 366b411b363SPhilipp Reisner { 367b411b363SPhilipp Reisner struct page **old_pages = b->bm_pages; 368b411b363SPhilipp Reisner struct page **new_pages, *page; 369b411b363SPhilipp Reisner unsigned int i, bytes, vmalloced = 0; 370b411b363SPhilipp Reisner unsigned long have = b->bm_number_of_pages; 371b411b363SPhilipp Reisner 372b411b363SPhilipp Reisner BUG_ON(have == 0 && old_pages != NULL); 373b411b363SPhilipp Reisner BUG_ON(have != 0 && old_pages == NULL); 374b411b363SPhilipp Reisner 375b411b363SPhilipp Reisner if (have == want) 376b411b363SPhilipp Reisner return old_pages; 377b411b363SPhilipp Reisner 378b411b363SPhilipp Reisner /* Trying kmalloc first, falling back to vmalloc. 379b411b363SPhilipp Reisner * GFP_KERNEL is ok, as this is done when a lower level disk is 380b411b363SPhilipp Reisner * "attached" to the drbd. Context is receiver thread or cqueue 381b411b363SPhilipp Reisner * thread. As we have no disk yet, we are not in the IO path, 382b411b363SPhilipp Reisner * not even the IO path of the peer. */ 383b411b363SPhilipp Reisner bytes = sizeof(struct page *)*want; 384b411b363SPhilipp Reisner new_pages = kmalloc(bytes, GFP_KERNEL); 385b411b363SPhilipp Reisner if (!new_pages) { 386b411b363SPhilipp Reisner new_pages = vmalloc(bytes); 387b411b363SPhilipp Reisner if (!new_pages) 388b411b363SPhilipp Reisner return NULL; 389b411b363SPhilipp Reisner vmalloced = 1; 390b411b363SPhilipp Reisner } 391b411b363SPhilipp Reisner 392b411b363SPhilipp Reisner memset(new_pages, 0, bytes); 393b411b363SPhilipp Reisner if (want >= have) { 394b411b363SPhilipp Reisner for (i = 0; i < have; i++) 395b411b363SPhilipp Reisner new_pages[i] = old_pages[i]; 396b411b363SPhilipp Reisner for (; i < want; i++) { 397b411b363SPhilipp Reisner page = alloc_page(GFP_HIGHUSER); 398b411b363SPhilipp Reisner if (!page) { 399b411b363SPhilipp Reisner bm_free_pages(new_pages + have, i - have); 400b411b363SPhilipp Reisner bm_vk_free(new_pages, vmalloced); 401b411b363SPhilipp Reisner return NULL; 402b411b363SPhilipp Reisner } 40319f843aaSLars Ellenberg /* we want to know which page it is 40419f843aaSLars Ellenberg * from the endio handlers */ 40519f843aaSLars Ellenberg bm_store_page_idx(page, i); 406b411b363SPhilipp Reisner new_pages[i] = page; 407b411b363SPhilipp Reisner } 408b411b363SPhilipp Reisner } else { 409b411b363SPhilipp Reisner for (i = 0; i < want; i++) 410b411b363SPhilipp Reisner new_pages[i] = old_pages[i]; 411b411b363SPhilipp Reisner /* NOT HERE, we are outside the spinlock! 412b411b363SPhilipp Reisner bm_free_pages(old_pages + want, have - want); 413b411b363SPhilipp Reisner */ 414b411b363SPhilipp Reisner } 415b411b363SPhilipp Reisner 416b411b363SPhilipp Reisner if (vmalloced) 41720ceb2b2SLars Ellenberg b->bm_flags |= BM_P_VMALLOCED; 418b411b363SPhilipp Reisner else 41920ceb2b2SLars Ellenberg b->bm_flags &= ~BM_P_VMALLOCED; 420b411b363SPhilipp Reisner 421b411b363SPhilipp Reisner return new_pages; 422b411b363SPhilipp Reisner } 423b411b363SPhilipp Reisner 424b411b363SPhilipp Reisner /* 425b411b363SPhilipp Reisner * called on driver init only. TODO call when a device is created. 426b411b363SPhilipp Reisner * allocates the drbd_bitmap, and stores it in mdev->bitmap. 427b411b363SPhilipp Reisner */ 428b411b363SPhilipp Reisner int drbd_bm_init(struct drbd_conf *mdev) 429b411b363SPhilipp Reisner { 430b411b363SPhilipp Reisner struct drbd_bitmap *b = mdev->bitmap; 431b411b363SPhilipp Reisner WARN_ON(b != NULL); 432b411b363SPhilipp Reisner b = kzalloc(sizeof(struct drbd_bitmap), GFP_KERNEL); 433b411b363SPhilipp Reisner if (!b) 434b411b363SPhilipp Reisner return -ENOMEM; 435b411b363SPhilipp Reisner spin_lock_init(&b->bm_lock); 4368a03ae2aSThomas Gleixner mutex_init(&b->bm_change); 437b411b363SPhilipp Reisner init_waitqueue_head(&b->bm_io_wait); 438b411b363SPhilipp Reisner 439b411b363SPhilipp Reisner mdev->bitmap = b; 440b411b363SPhilipp Reisner 441b411b363SPhilipp Reisner return 0; 442b411b363SPhilipp Reisner } 443b411b363SPhilipp Reisner 444b411b363SPhilipp Reisner sector_t drbd_bm_capacity(struct drbd_conf *mdev) 445b411b363SPhilipp Reisner { 446b411b363SPhilipp Reisner ERR_IF(!mdev->bitmap) return 0; 447b411b363SPhilipp Reisner return mdev->bitmap->bm_dev_capacity; 448b411b363SPhilipp Reisner } 449b411b363SPhilipp Reisner 450b411b363SPhilipp Reisner /* called on driver unload. TODO: call when a device is destroyed. 451b411b363SPhilipp Reisner */ 452b411b363SPhilipp Reisner void drbd_bm_cleanup(struct drbd_conf *mdev) 453b411b363SPhilipp Reisner { 454b411b363SPhilipp Reisner ERR_IF (!mdev->bitmap) return; 455b411b363SPhilipp Reisner bm_free_pages(mdev->bitmap->bm_pages, mdev->bitmap->bm_number_of_pages); 45620ceb2b2SLars Ellenberg bm_vk_free(mdev->bitmap->bm_pages, (BM_P_VMALLOCED & mdev->bitmap->bm_flags)); 457b411b363SPhilipp Reisner kfree(mdev->bitmap); 458b411b363SPhilipp Reisner mdev->bitmap = NULL; 459b411b363SPhilipp Reisner } 460b411b363SPhilipp Reisner 461b411b363SPhilipp Reisner /* 462b411b363SPhilipp Reisner * since (b->bm_bits % BITS_PER_LONG) != 0, 463b411b363SPhilipp Reisner * this masks out the remaining bits. 464b411b363SPhilipp Reisner * Returns the number of bits cleared. 465b411b363SPhilipp Reisner */ 46695a0f10cSLars Ellenberg #define BITS_PER_PAGE (1UL << (PAGE_SHIFT + 3)) 46795a0f10cSLars Ellenberg #define BITS_PER_PAGE_MASK (BITS_PER_PAGE - 1) 46895a0f10cSLars Ellenberg #define BITS_PER_LONG_MASK (BITS_PER_LONG - 1) 469b411b363SPhilipp Reisner static int bm_clear_surplus(struct drbd_bitmap *b) 470b411b363SPhilipp Reisner { 47195a0f10cSLars Ellenberg unsigned long mask; 472b411b363SPhilipp Reisner unsigned long *p_addr, *bm; 47395a0f10cSLars Ellenberg int tmp; 47495a0f10cSLars Ellenberg int cleared = 0; 475b411b363SPhilipp Reisner 47695a0f10cSLars Ellenberg /* number of bits modulo bits per page */ 47795a0f10cSLars Ellenberg tmp = (b->bm_bits & BITS_PER_PAGE_MASK); 47895a0f10cSLars Ellenberg /* mask the used bits of the word containing the last bit */ 47995a0f10cSLars Ellenberg mask = (1UL << (tmp & BITS_PER_LONG_MASK)) -1; 48095a0f10cSLars Ellenberg /* bitmap is always stored little endian, 48195a0f10cSLars Ellenberg * on disk and in core memory alike */ 48295a0f10cSLars Ellenberg mask = cpu_to_lel(mask); 48395a0f10cSLars Ellenberg 4846850c442SLars Ellenberg p_addr = bm_map_pidx(b, b->bm_number_of_pages - 1); 48595a0f10cSLars Ellenberg bm = p_addr + (tmp/BITS_PER_LONG); 48695a0f10cSLars Ellenberg if (mask) { 48795a0f10cSLars Ellenberg /* If mask != 0, we are not exactly aligned, so bm now points 48895a0f10cSLars Ellenberg * to the long containing the last bit. 48995a0f10cSLars Ellenberg * If mask == 0, bm already points to the word immediately 49095a0f10cSLars Ellenberg * after the last (long word aligned) bit. */ 491b411b363SPhilipp Reisner cleared = hweight_long(*bm & ~mask); 492b411b363SPhilipp Reisner *bm &= mask; 49395a0f10cSLars Ellenberg bm++; 494b411b363SPhilipp Reisner } 495b411b363SPhilipp Reisner 49695a0f10cSLars Ellenberg if (BITS_PER_LONG == 32 && ((bm - p_addr) & 1) == 1) { 49795a0f10cSLars Ellenberg /* on a 32bit arch, we may need to zero out 49895a0f10cSLars Ellenberg * a padding long to align with a 64bit remote */ 499b411b363SPhilipp Reisner cleared += hweight_long(*bm); 500b411b363SPhilipp Reisner *bm = 0; 501b411b363SPhilipp Reisner } 502b411b363SPhilipp Reisner bm_unmap(p_addr); 503b411b363SPhilipp Reisner return cleared; 504b411b363SPhilipp Reisner } 505b411b363SPhilipp Reisner 506b411b363SPhilipp Reisner static void bm_set_surplus(struct drbd_bitmap *b) 507b411b363SPhilipp Reisner { 50895a0f10cSLars Ellenberg unsigned long mask; 509b411b363SPhilipp Reisner unsigned long *p_addr, *bm; 51095a0f10cSLars Ellenberg int tmp; 511b411b363SPhilipp Reisner 51295a0f10cSLars Ellenberg /* number of bits modulo bits per page */ 51395a0f10cSLars Ellenberg tmp = (b->bm_bits & BITS_PER_PAGE_MASK); 51495a0f10cSLars Ellenberg /* mask the used bits of the word containing the last bit */ 51595a0f10cSLars Ellenberg mask = (1UL << (tmp & BITS_PER_LONG_MASK)) -1; 51695a0f10cSLars Ellenberg /* bitmap is always stored little endian, 51795a0f10cSLars Ellenberg * on disk and in core memory alike */ 51895a0f10cSLars Ellenberg mask = cpu_to_lel(mask); 51995a0f10cSLars Ellenberg 5206850c442SLars Ellenberg p_addr = bm_map_pidx(b, b->bm_number_of_pages - 1); 52195a0f10cSLars Ellenberg bm = p_addr + (tmp/BITS_PER_LONG); 52295a0f10cSLars Ellenberg if (mask) { 52395a0f10cSLars Ellenberg /* If mask != 0, we are not exactly aligned, so bm now points 52495a0f10cSLars Ellenberg * to the long containing the last bit. 52595a0f10cSLars Ellenberg * If mask == 0, bm already points to the word immediately 52695a0f10cSLars Ellenberg * after the last (long word aligned) bit. */ 527b411b363SPhilipp Reisner *bm |= ~mask; 52895a0f10cSLars Ellenberg bm++; 529b411b363SPhilipp Reisner } 530b411b363SPhilipp Reisner 53195a0f10cSLars Ellenberg if (BITS_PER_LONG == 32 && ((bm - p_addr) & 1) == 1) { 53295a0f10cSLars Ellenberg /* on a 32bit arch, we may need to zero out 53395a0f10cSLars Ellenberg * a padding long to align with a 64bit remote */ 53495a0f10cSLars Ellenberg *bm = ~0UL; 535b411b363SPhilipp Reisner } 536b411b363SPhilipp Reisner bm_unmap(p_addr); 537b411b363SPhilipp Reisner } 538b411b363SPhilipp Reisner 5394b0715f0SLars Ellenberg /* you better not modify the bitmap while this is running, 5404b0715f0SLars Ellenberg * or its results will be stale */ 54195a0f10cSLars Ellenberg static unsigned long bm_count_bits(struct drbd_bitmap *b) 542b411b363SPhilipp Reisner { 5434b0715f0SLars Ellenberg unsigned long *p_addr; 544b411b363SPhilipp Reisner unsigned long bits = 0; 5454b0715f0SLars Ellenberg unsigned long mask = (1UL << (b->bm_bits & BITS_PER_LONG_MASK)) -1; 5466850c442SLars Ellenberg int idx, i, last_word; 5477777a8baSLars Ellenberg 5484b0715f0SLars Ellenberg /* all but last page */ 5496850c442SLars Ellenberg for (idx = 0; idx < b->bm_number_of_pages - 1; idx++) { 5504b0715f0SLars Ellenberg p_addr = __bm_map_pidx(b, idx, KM_USER0); 5514b0715f0SLars Ellenberg for (i = 0; i < LWPP; i++) 5524b0715f0SLars Ellenberg bits += hweight_long(p_addr[i]); 5537777a8baSLars Ellenberg __bm_unmap(p_addr, KM_USER0); 554b411b363SPhilipp Reisner cond_resched(); 555b411b363SPhilipp Reisner } 5564b0715f0SLars Ellenberg /* last (or only) page */ 5574b0715f0SLars Ellenberg last_word = ((b->bm_bits - 1) & BITS_PER_PAGE_MASK) >> LN2_BPL; 5584b0715f0SLars Ellenberg p_addr = __bm_map_pidx(b, idx, KM_USER0); 5594b0715f0SLars Ellenberg for (i = 0; i < last_word; i++) 5604b0715f0SLars Ellenberg bits += hweight_long(p_addr[i]); 5614b0715f0SLars Ellenberg p_addr[last_word] &= cpu_to_lel(mask); 5624b0715f0SLars Ellenberg bits += hweight_long(p_addr[last_word]); 5634b0715f0SLars Ellenberg /* 32bit arch, may have an unused padding long */ 5644b0715f0SLars Ellenberg if (BITS_PER_LONG == 32 && (last_word & 1) == 0) 5654b0715f0SLars Ellenberg p_addr[last_word+1] = 0; 5664b0715f0SLars Ellenberg __bm_unmap(p_addr, KM_USER0); 567b411b363SPhilipp Reisner return bits; 568b411b363SPhilipp Reisner } 569b411b363SPhilipp Reisner 570b411b363SPhilipp Reisner /* offset and len in long words.*/ 571b411b363SPhilipp Reisner static void bm_memset(struct drbd_bitmap *b, size_t offset, int c, size_t len) 572b411b363SPhilipp Reisner { 573b411b363SPhilipp Reisner unsigned long *p_addr, *bm; 57419f843aaSLars Ellenberg unsigned int idx; 575b411b363SPhilipp Reisner size_t do_now, end; 576b411b363SPhilipp Reisner 577b411b363SPhilipp Reisner end = offset + len; 578b411b363SPhilipp Reisner 579b411b363SPhilipp Reisner if (end > b->bm_words) { 580b411b363SPhilipp Reisner printk(KERN_ALERT "drbd: bm_memset end > bm_words\n"); 581b411b363SPhilipp Reisner return; 582b411b363SPhilipp Reisner } 583b411b363SPhilipp Reisner 584b411b363SPhilipp Reisner while (offset < end) { 585b411b363SPhilipp Reisner do_now = min_t(size_t, ALIGN(offset + 1, LWPP), end) - offset; 58619f843aaSLars Ellenberg idx = bm_word_to_page_idx(b, offset); 58719f843aaSLars Ellenberg p_addr = bm_map_pidx(b, idx); 588b411b363SPhilipp Reisner bm = p_addr + MLPP(offset); 589b411b363SPhilipp Reisner if (bm+do_now > p_addr + LWPP) { 590b411b363SPhilipp Reisner printk(KERN_ALERT "drbd: BUG BUG BUG! p_addr:%p bm:%p do_now:%d\n", 591b411b363SPhilipp Reisner p_addr, bm, (int)do_now); 59284e7c0f7SLars Ellenberg } else 593b411b363SPhilipp Reisner memset(bm, c, do_now * sizeof(long)); 594b411b363SPhilipp Reisner bm_unmap(p_addr); 59519f843aaSLars Ellenberg bm_set_page_need_writeout(b->bm_pages[idx]); 596b411b363SPhilipp Reisner offset += do_now; 597b411b363SPhilipp Reisner } 598b411b363SPhilipp Reisner } 599b411b363SPhilipp Reisner 600b411b363SPhilipp Reisner /* 601b411b363SPhilipp Reisner * make sure the bitmap has enough room for the attached storage, 602b411b363SPhilipp Reisner * if necessary, resize. 603b411b363SPhilipp Reisner * called whenever we may have changed the device size. 604b411b363SPhilipp Reisner * returns -ENOMEM if we could not allocate enough memory, 0 on success. 605b411b363SPhilipp Reisner * In case this is actually a resize, we copy the old bitmap into the new one. 606b411b363SPhilipp Reisner * Otherwise, the bitmap is initialized to all bits set. 607b411b363SPhilipp Reisner */ 60802d9a94bSPhilipp Reisner int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits) 609b411b363SPhilipp Reisner { 610b411b363SPhilipp Reisner struct drbd_bitmap *b = mdev->bitmap; 6116850c442SLars Ellenberg unsigned long bits, words, owords, obits; 612b411b363SPhilipp Reisner unsigned long want, have, onpages; /* number of pages */ 613b411b363SPhilipp Reisner struct page **npages, **opages = NULL; 614b411b363SPhilipp Reisner int err = 0, growing; 615b411b363SPhilipp Reisner int opages_vmalloced; 616b411b363SPhilipp Reisner 617b411b363SPhilipp Reisner ERR_IF(!b) return -ENOMEM; 618b411b363SPhilipp Reisner 61920ceb2b2SLars Ellenberg drbd_bm_lock(mdev, "resize", BM_LOCKED_MASK); 620b411b363SPhilipp Reisner 621b411b363SPhilipp Reisner dev_info(DEV, "drbd_bm_resize called with capacity == %llu\n", 622b411b363SPhilipp Reisner (unsigned long long)capacity); 623b411b363SPhilipp Reisner 624b411b363SPhilipp Reisner if (capacity == b->bm_dev_capacity) 625b411b363SPhilipp Reisner goto out; 626b411b363SPhilipp Reisner 62720ceb2b2SLars Ellenberg opages_vmalloced = (BM_P_VMALLOCED & b->bm_flags); 628b411b363SPhilipp Reisner 629b411b363SPhilipp Reisner if (capacity == 0) { 630b411b363SPhilipp Reisner spin_lock_irq(&b->bm_lock); 631b411b363SPhilipp Reisner opages = b->bm_pages; 632b411b363SPhilipp Reisner onpages = b->bm_number_of_pages; 633b411b363SPhilipp Reisner owords = b->bm_words; 634b411b363SPhilipp Reisner b->bm_pages = NULL; 635b411b363SPhilipp Reisner b->bm_number_of_pages = 636b411b363SPhilipp Reisner b->bm_set = 637b411b363SPhilipp Reisner b->bm_bits = 638b411b363SPhilipp Reisner b->bm_words = 639b411b363SPhilipp Reisner b->bm_dev_capacity = 0; 640b411b363SPhilipp Reisner spin_unlock_irq(&b->bm_lock); 641b411b363SPhilipp Reisner bm_free_pages(opages, onpages); 642b411b363SPhilipp Reisner bm_vk_free(opages, opages_vmalloced); 643b411b363SPhilipp Reisner goto out; 644b411b363SPhilipp Reisner } 645b411b363SPhilipp Reisner bits = BM_SECT_TO_BIT(ALIGN(capacity, BM_SECT_PER_BIT)); 646b411b363SPhilipp Reisner 647b411b363SPhilipp Reisner /* if we would use 648b411b363SPhilipp Reisner words = ALIGN(bits,BITS_PER_LONG) >> LN2_BPL; 649b411b363SPhilipp Reisner a 32bit host could present the wrong number of words 650b411b363SPhilipp Reisner to a 64bit host. 651b411b363SPhilipp Reisner */ 652b411b363SPhilipp Reisner words = ALIGN(bits, 64) >> LN2_BPL; 653b411b363SPhilipp Reisner 654b411b363SPhilipp Reisner if (get_ldev(mdev)) { 6554b0715f0SLars Ellenberg u64 bits_on_disk = ((u64)mdev->ldev->md.md_size_sect-MD_BM_OFFSET) << 12; 656b411b363SPhilipp Reisner put_ldev(mdev); 6574b0715f0SLars Ellenberg if (bits > bits_on_disk) { 6584b0715f0SLars Ellenberg dev_info(DEV, "bits = %lu\n", bits); 6594b0715f0SLars Ellenberg dev_info(DEV, "bits_on_disk = %llu\n", bits_on_disk); 6604b0715f0SLars Ellenberg err = -ENOSPC; 6614b0715f0SLars Ellenberg goto out; 6624b0715f0SLars Ellenberg } 663b411b363SPhilipp Reisner } 664b411b363SPhilipp Reisner 6656850c442SLars Ellenberg want = ALIGN(words*sizeof(long), PAGE_SIZE) >> PAGE_SHIFT; 666b411b363SPhilipp Reisner have = b->bm_number_of_pages; 667b411b363SPhilipp Reisner if (want == have) { 668b411b363SPhilipp Reisner D_ASSERT(b->bm_pages != NULL); 669b411b363SPhilipp Reisner npages = b->bm_pages; 670b411b363SPhilipp Reisner } else { 6710cf9d27eSAndreas Gruenbacher if (drbd_insert_fault(mdev, DRBD_FAULT_BM_ALLOC)) 672b411b363SPhilipp Reisner npages = NULL; 673b411b363SPhilipp Reisner else 674b411b363SPhilipp Reisner npages = bm_realloc_pages(b, want); 675b411b363SPhilipp Reisner } 676b411b363SPhilipp Reisner 677b411b363SPhilipp Reisner if (!npages) { 678b411b363SPhilipp Reisner err = -ENOMEM; 679b411b363SPhilipp Reisner goto out; 680b411b363SPhilipp Reisner } 681b411b363SPhilipp Reisner 682b411b363SPhilipp Reisner spin_lock_irq(&b->bm_lock); 683b411b363SPhilipp Reisner opages = b->bm_pages; 684b411b363SPhilipp Reisner owords = b->bm_words; 685b411b363SPhilipp Reisner obits = b->bm_bits; 686b411b363SPhilipp Reisner 687b411b363SPhilipp Reisner growing = bits > obits; 6885223671bSPhilipp Reisner if (opages && growing && set_new_bits) 689b411b363SPhilipp Reisner bm_set_surplus(b); 690b411b363SPhilipp Reisner 691b411b363SPhilipp Reisner b->bm_pages = npages; 692b411b363SPhilipp Reisner b->bm_number_of_pages = want; 693b411b363SPhilipp Reisner b->bm_bits = bits; 694b411b363SPhilipp Reisner b->bm_words = words; 695b411b363SPhilipp Reisner b->bm_dev_capacity = capacity; 696b411b363SPhilipp Reisner 697b411b363SPhilipp Reisner if (growing) { 69802d9a94bSPhilipp Reisner if (set_new_bits) { 699b411b363SPhilipp Reisner bm_memset(b, owords, 0xff, words-owords); 700b411b363SPhilipp Reisner b->bm_set += bits - obits; 70102d9a94bSPhilipp Reisner } else 70202d9a94bSPhilipp Reisner bm_memset(b, owords, 0x00, words-owords); 70302d9a94bSPhilipp Reisner 704b411b363SPhilipp Reisner } 705b411b363SPhilipp Reisner 706b411b363SPhilipp Reisner if (want < have) { 707b411b363SPhilipp Reisner /* implicit: (opages != NULL) && (opages != npages) */ 708b411b363SPhilipp Reisner bm_free_pages(opages + want, have - want); 709b411b363SPhilipp Reisner } 710b411b363SPhilipp Reisner 711b411b363SPhilipp Reisner (void)bm_clear_surplus(b); 712b411b363SPhilipp Reisner 713b411b363SPhilipp Reisner spin_unlock_irq(&b->bm_lock); 714b411b363SPhilipp Reisner if (opages != npages) 715b411b363SPhilipp Reisner bm_vk_free(opages, opages_vmalloced); 716b411b363SPhilipp Reisner if (!growing) 717b411b363SPhilipp Reisner b->bm_set = bm_count_bits(b); 71819f843aaSLars Ellenberg dev_info(DEV, "resync bitmap: bits=%lu words=%lu pages=%lu\n", bits, words, want); 719b411b363SPhilipp Reisner 720b411b363SPhilipp Reisner out: 721b411b363SPhilipp Reisner drbd_bm_unlock(mdev); 722b411b363SPhilipp Reisner return err; 723b411b363SPhilipp Reisner } 724b411b363SPhilipp Reisner 725b411b363SPhilipp Reisner /* inherently racy: 726b411b363SPhilipp Reisner * if not protected by other means, return value may be out of date when 727b411b363SPhilipp Reisner * leaving this function... 728b411b363SPhilipp Reisner * we still need to lock it, since it is important that this returns 729b411b363SPhilipp Reisner * bm_set == 0 precisely. 730b411b363SPhilipp Reisner * 731b411b363SPhilipp Reisner * maybe bm_set should be atomic_t ? 732b411b363SPhilipp Reisner */ 7330778286aSPhilipp Reisner unsigned long _drbd_bm_total_weight(struct drbd_conf *mdev) 734b411b363SPhilipp Reisner { 735b411b363SPhilipp Reisner struct drbd_bitmap *b = mdev->bitmap; 736b411b363SPhilipp Reisner unsigned long s; 737b411b363SPhilipp Reisner unsigned long flags; 738b411b363SPhilipp Reisner 739b411b363SPhilipp Reisner ERR_IF(!b) return 0; 740b411b363SPhilipp Reisner ERR_IF(!b->bm_pages) return 0; 741b411b363SPhilipp Reisner 742b411b363SPhilipp Reisner spin_lock_irqsave(&b->bm_lock, flags); 743b411b363SPhilipp Reisner s = b->bm_set; 744b411b363SPhilipp Reisner spin_unlock_irqrestore(&b->bm_lock, flags); 745b411b363SPhilipp Reisner 746b411b363SPhilipp Reisner return s; 747b411b363SPhilipp Reisner } 748b411b363SPhilipp Reisner 749b411b363SPhilipp Reisner unsigned long drbd_bm_total_weight(struct drbd_conf *mdev) 750b411b363SPhilipp Reisner { 751b411b363SPhilipp Reisner unsigned long s; 752b411b363SPhilipp Reisner /* if I don't have a disk, I don't know about out-of-sync status */ 753b411b363SPhilipp Reisner if (!get_ldev_if_state(mdev, D_NEGOTIATING)) 754b411b363SPhilipp Reisner return 0; 755b411b363SPhilipp Reisner s = _drbd_bm_total_weight(mdev); 756b411b363SPhilipp Reisner put_ldev(mdev); 757b411b363SPhilipp Reisner return s; 758b411b363SPhilipp Reisner } 759b411b363SPhilipp Reisner 760b411b363SPhilipp Reisner size_t drbd_bm_words(struct drbd_conf *mdev) 761b411b363SPhilipp Reisner { 762b411b363SPhilipp Reisner struct drbd_bitmap *b = mdev->bitmap; 763b411b363SPhilipp Reisner ERR_IF(!b) return 0; 764b411b363SPhilipp Reisner ERR_IF(!b->bm_pages) return 0; 765b411b363SPhilipp Reisner 766b411b363SPhilipp Reisner return b->bm_words; 767b411b363SPhilipp Reisner } 768b411b363SPhilipp Reisner 769b411b363SPhilipp Reisner unsigned long drbd_bm_bits(struct drbd_conf *mdev) 770b411b363SPhilipp Reisner { 771b411b363SPhilipp Reisner struct drbd_bitmap *b = mdev->bitmap; 772b411b363SPhilipp Reisner ERR_IF(!b) return 0; 773b411b363SPhilipp Reisner 774b411b363SPhilipp Reisner return b->bm_bits; 775b411b363SPhilipp Reisner } 776b411b363SPhilipp Reisner 777b411b363SPhilipp Reisner /* merge number words from buffer into the bitmap starting at offset. 778b411b363SPhilipp Reisner * buffer[i] is expected to be little endian unsigned long. 779b411b363SPhilipp Reisner * bitmap must be locked by drbd_bm_lock. 780b411b363SPhilipp Reisner * currently only used from receive_bitmap. 781b411b363SPhilipp Reisner */ 782b411b363SPhilipp Reisner void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset, size_t number, 783b411b363SPhilipp Reisner unsigned long *buffer) 784b411b363SPhilipp Reisner { 785b411b363SPhilipp Reisner struct drbd_bitmap *b = mdev->bitmap; 786b411b363SPhilipp Reisner unsigned long *p_addr, *bm; 787b411b363SPhilipp Reisner unsigned long word, bits; 78819f843aaSLars Ellenberg unsigned int idx; 789b411b363SPhilipp Reisner size_t end, do_now; 790b411b363SPhilipp Reisner 791b411b363SPhilipp Reisner end = offset + number; 792b411b363SPhilipp Reisner 793b411b363SPhilipp Reisner ERR_IF(!b) return; 794b411b363SPhilipp Reisner ERR_IF(!b->bm_pages) return; 795b411b363SPhilipp Reisner if (number == 0) 796b411b363SPhilipp Reisner return; 797b411b363SPhilipp Reisner WARN_ON(offset >= b->bm_words); 798b411b363SPhilipp Reisner WARN_ON(end > b->bm_words); 799b411b363SPhilipp Reisner 800b411b363SPhilipp Reisner spin_lock_irq(&b->bm_lock); 801b411b363SPhilipp Reisner while (offset < end) { 802b411b363SPhilipp Reisner do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset; 80319f843aaSLars Ellenberg idx = bm_word_to_page_idx(b, offset); 80419f843aaSLars Ellenberg p_addr = bm_map_pidx(b, idx); 805b411b363SPhilipp Reisner bm = p_addr + MLPP(offset); 806b411b363SPhilipp Reisner offset += do_now; 807b411b363SPhilipp Reisner while (do_now--) { 808b411b363SPhilipp Reisner bits = hweight_long(*bm); 80995a0f10cSLars Ellenberg word = *bm | *buffer++; 810b411b363SPhilipp Reisner *bm++ = word; 811b411b363SPhilipp Reisner b->bm_set += hweight_long(word) - bits; 812b411b363SPhilipp Reisner } 813b411b363SPhilipp Reisner bm_unmap(p_addr); 81419f843aaSLars Ellenberg bm_set_page_need_writeout(b->bm_pages[idx]); 815b411b363SPhilipp Reisner } 816b411b363SPhilipp Reisner /* with 32bit <-> 64bit cross-platform connect 817b411b363SPhilipp Reisner * this is only correct for current usage, 818b411b363SPhilipp Reisner * where we _know_ that we are 64 bit aligned, 819b411b363SPhilipp Reisner * and know that this function is used in this way, too... 820b411b363SPhilipp Reisner */ 821b411b363SPhilipp Reisner if (end == b->bm_words) 822b411b363SPhilipp Reisner b->bm_set -= bm_clear_surplus(b); 823b411b363SPhilipp Reisner spin_unlock_irq(&b->bm_lock); 824b411b363SPhilipp Reisner } 825b411b363SPhilipp Reisner 826b411b363SPhilipp Reisner /* copy number words from the bitmap starting at offset into the buffer. 827b411b363SPhilipp Reisner * buffer[i] will be little endian unsigned long. 828b411b363SPhilipp Reisner */ 829b411b363SPhilipp Reisner void drbd_bm_get_lel(struct drbd_conf *mdev, size_t offset, size_t number, 830b411b363SPhilipp Reisner unsigned long *buffer) 831b411b363SPhilipp Reisner { 832b411b363SPhilipp Reisner struct drbd_bitmap *b = mdev->bitmap; 833b411b363SPhilipp Reisner unsigned long *p_addr, *bm; 834b411b363SPhilipp Reisner size_t end, do_now; 835b411b363SPhilipp Reisner 836b411b363SPhilipp Reisner end = offset + number; 837b411b363SPhilipp Reisner 838b411b363SPhilipp Reisner ERR_IF(!b) return; 839b411b363SPhilipp Reisner ERR_IF(!b->bm_pages) return; 840b411b363SPhilipp Reisner 841b411b363SPhilipp Reisner spin_lock_irq(&b->bm_lock); 842b411b363SPhilipp Reisner if ((offset >= b->bm_words) || 843b411b363SPhilipp Reisner (end > b->bm_words) || 844b411b363SPhilipp Reisner (number <= 0)) 845b411b363SPhilipp Reisner dev_err(DEV, "offset=%lu number=%lu bm_words=%lu\n", 846b411b363SPhilipp Reisner (unsigned long) offset, 847b411b363SPhilipp Reisner (unsigned long) number, 848b411b363SPhilipp Reisner (unsigned long) b->bm_words); 849b411b363SPhilipp Reisner else { 850b411b363SPhilipp Reisner while (offset < end) { 851b411b363SPhilipp Reisner do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset; 85219f843aaSLars Ellenberg p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, offset)); 853b411b363SPhilipp Reisner bm = p_addr + MLPP(offset); 854b411b363SPhilipp Reisner offset += do_now; 855b411b363SPhilipp Reisner while (do_now--) 85695a0f10cSLars Ellenberg *buffer++ = *bm++; 857b411b363SPhilipp Reisner bm_unmap(p_addr); 858b411b363SPhilipp Reisner } 859b411b363SPhilipp Reisner } 860b411b363SPhilipp Reisner spin_unlock_irq(&b->bm_lock); 861b411b363SPhilipp Reisner } 862b411b363SPhilipp Reisner 863b411b363SPhilipp Reisner /* set all bits in the bitmap */ 864b411b363SPhilipp Reisner void drbd_bm_set_all(struct drbd_conf *mdev) 865b411b363SPhilipp Reisner { 866b411b363SPhilipp Reisner struct drbd_bitmap *b = mdev->bitmap; 867b411b363SPhilipp Reisner ERR_IF(!b) return; 868b411b363SPhilipp Reisner ERR_IF(!b->bm_pages) return; 869b411b363SPhilipp Reisner 870b411b363SPhilipp Reisner spin_lock_irq(&b->bm_lock); 871b411b363SPhilipp Reisner bm_memset(b, 0, 0xff, b->bm_words); 872b411b363SPhilipp Reisner (void)bm_clear_surplus(b); 873b411b363SPhilipp Reisner b->bm_set = b->bm_bits; 874b411b363SPhilipp Reisner spin_unlock_irq(&b->bm_lock); 875b411b363SPhilipp Reisner } 876b411b363SPhilipp Reisner 877b411b363SPhilipp Reisner /* clear all bits in the bitmap */ 878b411b363SPhilipp Reisner void drbd_bm_clear_all(struct drbd_conf *mdev) 879b411b363SPhilipp Reisner { 880b411b363SPhilipp Reisner struct drbd_bitmap *b = mdev->bitmap; 881b411b363SPhilipp Reisner ERR_IF(!b) return; 882b411b363SPhilipp Reisner ERR_IF(!b->bm_pages) return; 883b411b363SPhilipp Reisner 884b411b363SPhilipp Reisner spin_lock_irq(&b->bm_lock); 885b411b363SPhilipp Reisner bm_memset(b, 0, 0, b->bm_words); 886b411b363SPhilipp Reisner b->bm_set = 0; 887b411b363SPhilipp Reisner spin_unlock_irq(&b->bm_lock); 888b411b363SPhilipp Reisner } 889b411b363SPhilipp Reisner 89019f843aaSLars Ellenberg struct bm_aio_ctx { 89119f843aaSLars Ellenberg struct drbd_conf *mdev; 89219f843aaSLars Ellenberg atomic_t in_flight; 893725a97e4SLars Ellenberg struct completion done; 89419f843aaSLars Ellenberg unsigned flags; 89519f843aaSLars Ellenberg #define BM_AIO_COPY_PAGES 1 89619f843aaSLars Ellenberg int error; 89719f843aaSLars Ellenberg }; 89819f843aaSLars Ellenberg 89919f843aaSLars Ellenberg /* bv_page may be a copy, or may be the original */ 900b411b363SPhilipp Reisner static void bm_async_io_complete(struct bio *bio, int error) 901b411b363SPhilipp Reisner { 90219f843aaSLars Ellenberg struct bm_aio_ctx *ctx = bio->bi_private; 90319f843aaSLars Ellenberg struct drbd_conf *mdev = ctx->mdev; 90419f843aaSLars Ellenberg struct drbd_bitmap *b = mdev->bitmap; 90519f843aaSLars Ellenberg unsigned int idx = bm_page_to_idx(bio->bi_io_vec[0].bv_page); 906b411b363SPhilipp Reisner int uptodate = bio_flagged(bio, BIO_UPTODATE); 907b411b363SPhilipp Reisner 908b411b363SPhilipp Reisner 909b411b363SPhilipp Reisner /* strange behavior of some lower level drivers... 910b411b363SPhilipp Reisner * fail the request by clearing the uptodate flag, 911b411b363SPhilipp Reisner * but do not return any error?! 912b411b363SPhilipp Reisner * do we want to WARN() on this? */ 913b411b363SPhilipp Reisner if (!error && !uptodate) 914b411b363SPhilipp Reisner error = -EIO; 915b411b363SPhilipp Reisner 9167648cdfeSLars Ellenberg if ((ctx->flags & BM_AIO_COPY_PAGES) == 0 && 9177648cdfeSLars Ellenberg !bm_test_page_unchanged(b->bm_pages[idx])) 9187648cdfeSLars Ellenberg dev_warn(DEV, "bitmap page idx %u changed during IO!\n", idx); 91919f843aaSLars Ellenberg 920b411b363SPhilipp Reisner if (error) { 92119f843aaSLars Ellenberg /* ctx error will hold the completed-last non-zero error code, 92219f843aaSLars Ellenberg * in case error codes differ. */ 92319f843aaSLars Ellenberg ctx->error = error; 92419f843aaSLars Ellenberg bm_set_page_io_err(b->bm_pages[idx]); 92519f843aaSLars Ellenberg /* Not identical to on disk version of it. 92619f843aaSLars Ellenberg * Is BM_PAGE_IO_ERROR enough? */ 92719f843aaSLars Ellenberg if (__ratelimit(&drbd_ratelimit_state)) 92819f843aaSLars Ellenberg dev_err(DEV, "IO ERROR %d on bitmap page idx %u\n", 92919f843aaSLars Ellenberg error, idx); 93019f843aaSLars Ellenberg } else { 93119f843aaSLars Ellenberg bm_clear_page_io_err(b->bm_pages[idx]); 93219f843aaSLars Ellenberg dynamic_dev_dbg(DEV, "bitmap page idx %u completed\n", idx); 933b411b363SPhilipp Reisner } 93419f843aaSLars Ellenberg 93519f843aaSLars Ellenberg bm_page_unlock_io(mdev, idx); 93619f843aaSLars Ellenberg 93719f843aaSLars Ellenberg /* FIXME give back to page pool */ 93819f843aaSLars Ellenberg if (ctx->flags & BM_AIO_COPY_PAGES) 93919f843aaSLars Ellenberg put_page(bio->bi_io_vec[0].bv_page); 940b411b363SPhilipp Reisner 941b411b363SPhilipp Reisner bio_put(bio); 94219f843aaSLars Ellenberg 94319f843aaSLars Ellenberg if (atomic_dec_and_test(&ctx->in_flight)) 944725a97e4SLars Ellenberg complete(&ctx->done); 945b411b363SPhilipp Reisner } 946b411b363SPhilipp Reisner 94719f843aaSLars Ellenberg static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must_hold(local) 948b411b363SPhilipp Reisner { 949b411b363SPhilipp Reisner /* we are process context. we always get a bio */ 950b411b363SPhilipp Reisner struct bio *bio = bio_alloc(GFP_KERNEL, 1); 95119f843aaSLars Ellenberg struct drbd_conf *mdev = ctx->mdev; 95219f843aaSLars Ellenberg struct drbd_bitmap *b = mdev->bitmap; 95319f843aaSLars Ellenberg struct page *page; 954b411b363SPhilipp Reisner unsigned int len; 95519f843aaSLars Ellenberg 956b411b363SPhilipp Reisner sector_t on_disk_sector = 957b411b363SPhilipp Reisner mdev->ldev->md.md_offset + mdev->ldev->md.bm_offset; 958b411b363SPhilipp Reisner on_disk_sector += ((sector_t)page_nr) << (PAGE_SHIFT-9); 959b411b363SPhilipp Reisner 960b411b363SPhilipp Reisner /* this might happen with very small 96119f843aaSLars Ellenberg * flexible external meta data device, 96219f843aaSLars Ellenberg * or with PAGE_SIZE > 4k */ 963b411b363SPhilipp Reisner len = min_t(unsigned int, PAGE_SIZE, 964b411b363SPhilipp Reisner (drbd_md_last_sector(mdev->ldev) - on_disk_sector + 1)<<9); 965b411b363SPhilipp Reisner 96619f843aaSLars Ellenberg /* serialize IO on this page */ 96719f843aaSLars Ellenberg bm_page_lock_io(mdev, page_nr); 96819f843aaSLars Ellenberg /* before memcpy and submit, 96919f843aaSLars Ellenberg * so it can be redirtied any time */ 97019f843aaSLars Ellenberg bm_set_page_unchanged(b->bm_pages[page_nr]); 97119f843aaSLars Ellenberg 97219f843aaSLars Ellenberg if (ctx->flags & BM_AIO_COPY_PAGES) { 97319f843aaSLars Ellenberg /* FIXME alloc_page is good enough for now, but actually needs 97419f843aaSLars Ellenberg * to use pre-allocated page pool */ 97519f843aaSLars Ellenberg void *src, *dest; 97619f843aaSLars Ellenberg page = alloc_page(__GFP_HIGHMEM|__GFP_WAIT); 97719f843aaSLars Ellenberg dest = kmap_atomic(page, KM_USER0); 97819f843aaSLars Ellenberg src = kmap_atomic(b->bm_pages[page_nr], KM_USER1); 97919f843aaSLars Ellenberg memcpy(dest, src, PAGE_SIZE); 98019f843aaSLars Ellenberg kunmap_atomic(src, KM_USER1); 98119f843aaSLars Ellenberg kunmap_atomic(dest, KM_USER0); 98219f843aaSLars Ellenberg bm_store_page_idx(page, page_nr); 98319f843aaSLars Ellenberg } else 98419f843aaSLars Ellenberg page = b->bm_pages[page_nr]; 98519f843aaSLars Ellenberg 986b411b363SPhilipp Reisner bio->bi_bdev = mdev->ldev->md_bdev; 987b411b363SPhilipp Reisner bio->bi_sector = on_disk_sector; 98819f843aaSLars Ellenberg bio_add_page(bio, page, len, 0); 98919f843aaSLars Ellenberg bio->bi_private = ctx; 990b411b363SPhilipp Reisner bio->bi_end_io = bm_async_io_complete; 991b411b363SPhilipp Reisner 9920cf9d27eSAndreas Gruenbacher if (drbd_insert_fault(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) { 993b411b363SPhilipp Reisner bio->bi_rw |= rw; 994b411b363SPhilipp Reisner bio_endio(bio, -EIO); 995b411b363SPhilipp Reisner } else { 996b411b363SPhilipp Reisner submit_bio(rw, bio); 997b411b363SPhilipp Reisner } 998b411b363SPhilipp Reisner } 999b411b363SPhilipp Reisner 1000b411b363SPhilipp Reisner /* 1001b411b363SPhilipp Reisner * bm_rw: read/write the whole bitmap from/to its on disk location. 1002b411b363SPhilipp Reisner */ 100319f843aaSLars Ellenberg static int bm_rw(struct drbd_conf *mdev, int rw, unsigned lazy_writeout_upper_idx) __must_hold(local) 1004b411b363SPhilipp Reisner { 1005725a97e4SLars Ellenberg struct bm_aio_ctx ctx = { 1006725a97e4SLars Ellenberg .mdev = mdev, 1007725a97e4SLars Ellenberg .in_flight = ATOMIC_INIT(1), 1008725a97e4SLars Ellenberg .done = COMPLETION_INITIALIZER_ONSTACK(ctx.done), 1009725a97e4SLars Ellenberg .flags = lazy_writeout_upper_idx ? BM_AIO_COPY_PAGES : 0, 1010725a97e4SLars Ellenberg }; 1011b411b363SPhilipp Reisner struct drbd_bitmap *b = mdev->bitmap; 10126850c442SLars Ellenberg int num_pages, i, count = 0; 1013b411b363SPhilipp Reisner unsigned long now; 1014b411b363SPhilipp Reisner char ppb[10]; 1015b411b363SPhilipp Reisner int err = 0; 1016b411b363SPhilipp Reisner 101719f843aaSLars Ellenberg /* 101819f843aaSLars Ellenberg * We are protected against bitmap disappearing/resizing by holding an 101919f843aaSLars Ellenberg * ldev reference (caller must have called get_ldev()). 102019f843aaSLars Ellenberg * For read/write, we are protected against changes to the bitmap by 102119f843aaSLars Ellenberg * the bitmap lock (see drbd_bitmap_io). 102219f843aaSLars Ellenberg * For lazy writeout, we don't care for ongoing changes to the bitmap, 102319f843aaSLars Ellenberg * as we submit copies of pages anyways. 102419f843aaSLars Ellenberg */ 102519f843aaSLars Ellenberg if (!ctx.flags) 102620ceb2b2SLars Ellenberg WARN_ON(!(BM_LOCKED_MASK & b->bm_flags)); 1027b411b363SPhilipp Reisner 10286850c442SLars Ellenberg num_pages = b->bm_number_of_pages; 1029b411b363SPhilipp Reisner 1030b411b363SPhilipp Reisner now = jiffies; 1031b411b363SPhilipp Reisner 1032b411b363SPhilipp Reisner /* let the layers below us try to merge these bios... */ 10336850c442SLars Ellenberg for (i = 0; i < num_pages; i++) { 103419f843aaSLars Ellenberg /* ignore completely unchanged pages */ 103519f843aaSLars Ellenberg if (lazy_writeout_upper_idx && i == lazy_writeout_upper_idx) 103619f843aaSLars Ellenberg break; 103719f843aaSLars Ellenberg if (rw & WRITE) { 103819f843aaSLars Ellenberg if (bm_test_page_unchanged(b->bm_pages[i])) { 103919f843aaSLars Ellenberg dynamic_dev_dbg(DEV, "skipped bm write for idx %u\n", i); 104019f843aaSLars Ellenberg continue; 104119f843aaSLars Ellenberg } 104219f843aaSLars Ellenberg /* during lazy writeout, 104319f843aaSLars Ellenberg * ignore those pages not marked for lazy writeout. */ 104419f843aaSLars Ellenberg if (lazy_writeout_upper_idx && 104519f843aaSLars Ellenberg !bm_test_page_lazy_writeout(b->bm_pages[i])) { 104619f843aaSLars Ellenberg dynamic_dev_dbg(DEV, "skipped bm lazy write for idx %u\n", i); 104719f843aaSLars Ellenberg continue; 104819f843aaSLars Ellenberg } 104919f843aaSLars Ellenberg } 105019f843aaSLars Ellenberg atomic_inc(&ctx.in_flight); 105119f843aaSLars Ellenberg bm_page_io_async(&ctx, i, rw); 105219f843aaSLars Ellenberg ++count; 105319f843aaSLars Ellenberg cond_resched(); 105419f843aaSLars Ellenberg } 1055b411b363SPhilipp Reisner 1056725a97e4SLars Ellenberg /* 1057725a97e4SLars Ellenberg * We initialize ctx.in_flight to one to make sure bm_async_io_complete 1058725a97e4SLars Ellenberg * will not complete() early, and decrement / test it here. If there 1059725a97e4SLars Ellenberg * are still some bios in flight, we need to wait for them here. 1060725a97e4SLars Ellenberg */ 1061725a97e4SLars Ellenberg if (!atomic_dec_and_test(&ctx.in_flight)) 1062725a97e4SLars Ellenberg wait_for_completion(&ctx.done); 106319f843aaSLars Ellenberg dev_info(DEV, "bitmap %s of %u pages took %lu jiffies\n", 106419f843aaSLars Ellenberg rw == WRITE ? "WRITE" : "READ", 106519f843aaSLars Ellenberg count, jiffies - now); 1066b411b363SPhilipp Reisner 106719f843aaSLars Ellenberg if (ctx.error) { 1068b411b363SPhilipp Reisner dev_alert(DEV, "we had at least one MD IO ERROR during bitmap IO\n"); 106981e84650SAndreas Gruenbacher drbd_chk_io_error(mdev, 1, true); 107019f843aaSLars Ellenberg err = -EIO; /* ctx.error ? */ 1071b411b363SPhilipp Reisner } 1072b411b363SPhilipp Reisner 1073b411b363SPhilipp Reisner now = jiffies; 1074b411b363SPhilipp Reisner if (rw == WRITE) { 1075b411b363SPhilipp Reisner drbd_md_flush(mdev); 1076b411b363SPhilipp Reisner } else /* rw == READ */ { 107795a0f10cSLars Ellenberg b->bm_set = bm_count_bits(b); 1078b411b363SPhilipp Reisner dev_info(DEV, "recounting of set bits took additional %lu jiffies\n", 1079b411b363SPhilipp Reisner jiffies - now); 1080b411b363SPhilipp Reisner } 1081b411b363SPhilipp Reisner now = b->bm_set; 1082b411b363SPhilipp Reisner 1083b411b363SPhilipp Reisner dev_info(DEV, "%s (%lu bits) marked out-of-sync by on disk bit-map.\n", 1084b411b363SPhilipp Reisner ppsize(ppb, now << (BM_BLOCK_SHIFT-10)), now); 1085b411b363SPhilipp Reisner 1086b411b363SPhilipp Reisner return err; 1087b411b363SPhilipp Reisner } 1088b411b363SPhilipp Reisner 1089b411b363SPhilipp Reisner /** 1090b411b363SPhilipp Reisner * drbd_bm_read() - Read the whole bitmap from its on disk location. 1091b411b363SPhilipp Reisner * @mdev: DRBD device. 1092b411b363SPhilipp Reisner */ 1093b411b363SPhilipp Reisner int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local) 1094b411b363SPhilipp Reisner { 109519f843aaSLars Ellenberg return bm_rw(mdev, READ, 0); 1096b411b363SPhilipp Reisner } 1097b411b363SPhilipp Reisner 1098b411b363SPhilipp Reisner /** 1099b411b363SPhilipp Reisner * drbd_bm_write() - Write the whole bitmap to its on disk location. 1100b411b363SPhilipp Reisner * @mdev: DRBD device. 110119f843aaSLars Ellenberg * 110219f843aaSLars Ellenberg * Will only write pages that have changed since last IO. 1103b411b363SPhilipp Reisner */ 1104b411b363SPhilipp Reisner int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local) 1105b411b363SPhilipp Reisner { 110619f843aaSLars Ellenberg return bm_rw(mdev, WRITE, 0); 1107b411b363SPhilipp Reisner } 1108b411b363SPhilipp Reisner 1109b411b363SPhilipp Reisner /** 111019f843aaSLars Ellenberg * drbd_bm_lazy_write_out() - Write bitmap pages 0 to @upper_idx-1, if they have changed. 1111b411b363SPhilipp Reisner * @mdev: DRBD device. 111219f843aaSLars Ellenberg * @upper_idx: 0: write all changed pages; +ve: page index to stop scanning for changed pages 1113b411b363SPhilipp Reisner */ 111419f843aaSLars Ellenberg int drbd_bm_write_lazy(struct drbd_conf *mdev, unsigned upper_idx) __must_hold(local) 1115b411b363SPhilipp Reisner { 111619f843aaSLars Ellenberg return bm_rw(mdev, WRITE, upper_idx); 1117b411b363SPhilipp Reisner } 111819f843aaSLars Ellenberg 111919f843aaSLars Ellenberg 112019f843aaSLars Ellenberg /** 112119f843aaSLars Ellenberg * drbd_bm_write_page: Writes a PAGE_SIZE aligned piece of bitmap 112219f843aaSLars Ellenberg * @mdev: DRBD device. 112319f843aaSLars Ellenberg * @idx: bitmap page index 112419f843aaSLars Ellenberg * 11254b0715f0SLars Ellenberg * We don't want to special case on logical_block_size of the backend device, 11264b0715f0SLars Ellenberg * so we submit PAGE_SIZE aligned pieces. 112719f843aaSLars Ellenberg * Note that on "most" systems, PAGE_SIZE is 4k. 11284b0715f0SLars Ellenberg * 11294b0715f0SLars Ellenberg * In case this becomes an issue on systems with larger PAGE_SIZE, 11304b0715f0SLars Ellenberg * we may want to change this again to write 4k aligned 4k pieces. 113119f843aaSLars Ellenberg */ 113219f843aaSLars Ellenberg int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local) 113319f843aaSLars Ellenberg { 1134725a97e4SLars Ellenberg struct bm_aio_ctx ctx = { 1135725a97e4SLars Ellenberg .mdev = mdev, 1136725a97e4SLars Ellenberg .in_flight = ATOMIC_INIT(1), 1137725a97e4SLars Ellenberg .done = COMPLETION_INITIALIZER_ONSTACK(ctx.done), 1138725a97e4SLars Ellenberg .flags = BM_AIO_COPY_PAGES, 1139725a97e4SLars Ellenberg }; 114019f843aaSLars Ellenberg 114119f843aaSLars Ellenberg if (bm_test_page_unchanged(mdev->bitmap->bm_pages[idx])) { 11427648cdfeSLars Ellenberg dynamic_dev_dbg(DEV, "skipped bm page write for idx %u\n", idx); 114319f843aaSLars Ellenberg return 0; 114419f843aaSLars Ellenberg } 114519f843aaSLars Ellenberg 114619f843aaSLars Ellenberg bm_page_io_async(&ctx, idx, WRITE_SYNC); 1147725a97e4SLars Ellenberg wait_for_completion(&ctx.done); 114819f843aaSLars Ellenberg 114919f843aaSLars Ellenberg if (ctx.error) 115019f843aaSLars Ellenberg drbd_chk_io_error(mdev, 1, true); 115119f843aaSLars Ellenberg /* that should force detach, so the in memory bitmap will be 115219f843aaSLars Ellenberg * gone in a moment as well. */ 115319f843aaSLars Ellenberg 1154b411b363SPhilipp Reisner mdev->bm_writ_cnt++; 115519f843aaSLars Ellenberg return ctx.error; 1156b411b363SPhilipp Reisner } 1157b411b363SPhilipp Reisner 1158b411b363SPhilipp Reisner /* NOTE 1159b411b363SPhilipp Reisner * find_first_bit returns int, we return unsigned long. 11604b0715f0SLars Ellenberg * For this to work on 32bit arch with bitnumbers > (1<<32), 11614b0715f0SLars Ellenberg * we'd need to return u64, and get a whole lot of other places 11624b0715f0SLars Ellenberg * fixed where we still use unsigned long. 1163b411b363SPhilipp Reisner * 1164b411b363SPhilipp Reisner * this returns a bit number, NOT a sector! 1165b411b363SPhilipp Reisner */ 1166b411b363SPhilipp Reisner static unsigned long __bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo, 1167b411b363SPhilipp Reisner const int find_zero_bit, const enum km_type km) 1168b411b363SPhilipp Reisner { 1169b411b363SPhilipp Reisner struct drbd_bitmap *b = mdev->bitmap; 1170b411b363SPhilipp Reisner unsigned long *p_addr; 11714b0715f0SLars Ellenberg unsigned long bit_offset; 11724b0715f0SLars Ellenberg unsigned i; 11734b0715f0SLars Ellenberg 1174b411b363SPhilipp Reisner 1175b411b363SPhilipp Reisner if (bm_fo > b->bm_bits) { 1176b411b363SPhilipp Reisner dev_err(DEV, "bm_fo=%lu bm_bits=%lu\n", bm_fo, b->bm_bits); 11774b0715f0SLars Ellenberg bm_fo = DRBD_END_OF_BITMAP; 1178b411b363SPhilipp Reisner } else { 1179b411b363SPhilipp Reisner while (bm_fo < b->bm_bits) { 118019f843aaSLars Ellenberg /* bit offset of the first bit in the page */ 11814b0715f0SLars Ellenberg bit_offset = bm_fo & ~BITS_PER_PAGE_MASK; 118219f843aaSLars Ellenberg p_addr = __bm_map_pidx(b, bm_bit_to_page_idx(b, bm_fo), km); 1183b411b363SPhilipp Reisner 1184b411b363SPhilipp Reisner if (find_zero_bit) 11857e599e6eSLinus Torvalds i = find_next_zero_bit_le(p_addr, 11864b0715f0SLars Ellenberg PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK); 1187b411b363SPhilipp Reisner else 11887e599e6eSLinus Torvalds i = find_next_bit_le(p_addr, 11894b0715f0SLars Ellenberg PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK); 1190b411b363SPhilipp Reisner 1191b411b363SPhilipp Reisner __bm_unmap(p_addr, km); 1192b411b363SPhilipp Reisner if (i < PAGE_SIZE*8) { 11934b0715f0SLars Ellenberg bm_fo = bit_offset + i; 11944b0715f0SLars Ellenberg if (bm_fo >= b->bm_bits) 1195b411b363SPhilipp Reisner break; 1196b411b363SPhilipp Reisner goto found; 1197b411b363SPhilipp Reisner } 1198b411b363SPhilipp Reisner bm_fo = bit_offset + PAGE_SIZE*8; 1199b411b363SPhilipp Reisner } 12004b0715f0SLars Ellenberg bm_fo = DRBD_END_OF_BITMAP; 1201b411b363SPhilipp Reisner } 1202b411b363SPhilipp Reisner found: 12034b0715f0SLars Ellenberg return bm_fo; 1204b411b363SPhilipp Reisner } 1205b411b363SPhilipp Reisner 1206b411b363SPhilipp Reisner static unsigned long bm_find_next(struct drbd_conf *mdev, 1207b411b363SPhilipp Reisner unsigned long bm_fo, const int find_zero_bit) 1208b411b363SPhilipp Reisner { 1209b411b363SPhilipp Reisner struct drbd_bitmap *b = mdev->bitmap; 12104b0715f0SLars Ellenberg unsigned long i = DRBD_END_OF_BITMAP; 1211b411b363SPhilipp Reisner 1212b411b363SPhilipp Reisner ERR_IF(!b) return i; 1213b411b363SPhilipp Reisner ERR_IF(!b->bm_pages) return i; 1214b411b363SPhilipp Reisner 1215b411b363SPhilipp Reisner spin_lock_irq(&b->bm_lock); 121620ceb2b2SLars Ellenberg if (BM_DONT_TEST & b->bm_flags) 1217b411b363SPhilipp Reisner bm_print_lock_info(mdev); 1218b411b363SPhilipp Reisner 1219b411b363SPhilipp Reisner i = __bm_find_next(mdev, bm_fo, find_zero_bit, KM_IRQ1); 1220b411b363SPhilipp Reisner 1221b411b363SPhilipp Reisner spin_unlock_irq(&b->bm_lock); 1222b411b363SPhilipp Reisner return i; 1223b411b363SPhilipp Reisner } 1224b411b363SPhilipp Reisner 1225b411b363SPhilipp Reisner unsigned long drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo) 1226b411b363SPhilipp Reisner { 1227b411b363SPhilipp Reisner return bm_find_next(mdev, bm_fo, 0); 1228b411b363SPhilipp Reisner } 1229b411b363SPhilipp Reisner 1230b411b363SPhilipp Reisner #if 0 1231b411b363SPhilipp Reisner /* not yet needed for anything. */ 1232b411b363SPhilipp Reisner unsigned long drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo) 1233b411b363SPhilipp Reisner { 1234b411b363SPhilipp Reisner return bm_find_next(mdev, bm_fo, 1); 1235b411b363SPhilipp Reisner } 1236b411b363SPhilipp Reisner #endif 1237b411b363SPhilipp Reisner 1238b411b363SPhilipp Reisner /* does not spin_lock_irqsave. 1239b411b363SPhilipp Reisner * you must take drbd_bm_lock() first */ 1240b411b363SPhilipp Reisner unsigned long _drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo) 1241b411b363SPhilipp Reisner { 124220ceb2b2SLars Ellenberg /* WARN_ON(!(BM_DONT_SET & mdev->b->bm_flags)); */ 1243b411b363SPhilipp Reisner return __bm_find_next(mdev, bm_fo, 0, KM_USER1); 1244b411b363SPhilipp Reisner } 1245b411b363SPhilipp Reisner 1246b411b363SPhilipp Reisner unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo) 1247b411b363SPhilipp Reisner { 124820ceb2b2SLars Ellenberg /* WARN_ON(!(BM_DONT_SET & mdev->b->bm_flags)); */ 1249b411b363SPhilipp Reisner return __bm_find_next(mdev, bm_fo, 1, KM_USER1); 1250b411b363SPhilipp Reisner } 1251b411b363SPhilipp Reisner 1252b411b363SPhilipp Reisner /* returns number of bits actually changed. 1253b411b363SPhilipp Reisner * for val != 0, we change 0 -> 1, return code positive 1254b411b363SPhilipp Reisner * for val == 0, we change 1 -> 0, return code negative 1255b411b363SPhilipp Reisner * wants bitnr, not sector. 1256b411b363SPhilipp Reisner * expected to be called for only a few bits (e - s about BITS_PER_LONG). 1257b411b363SPhilipp Reisner * Must hold bitmap lock already. */ 1258b4ee79daSPhilipp Reisner static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s, 1259b411b363SPhilipp Reisner unsigned long e, int val, const enum km_type km) 1260b411b363SPhilipp Reisner { 1261b411b363SPhilipp Reisner struct drbd_bitmap *b = mdev->bitmap; 1262b411b363SPhilipp Reisner unsigned long *p_addr = NULL; 1263b411b363SPhilipp Reisner unsigned long bitnr; 126419f843aaSLars Ellenberg unsigned int last_page_nr = -1U; 1265b411b363SPhilipp Reisner int c = 0; 126619f843aaSLars Ellenberg int changed_total = 0; 1267b411b363SPhilipp Reisner 1268b411b363SPhilipp Reisner if (e >= b->bm_bits) { 1269b411b363SPhilipp Reisner dev_err(DEV, "ASSERT FAILED: bit_s=%lu bit_e=%lu bm_bits=%lu\n", 1270b411b363SPhilipp Reisner s, e, b->bm_bits); 1271b411b363SPhilipp Reisner e = b->bm_bits ? b->bm_bits -1 : 0; 1272b411b363SPhilipp Reisner } 1273b411b363SPhilipp Reisner for (bitnr = s; bitnr <= e; bitnr++) { 127419f843aaSLars Ellenberg unsigned int page_nr = bm_bit_to_page_idx(b, bitnr); 1275b411b363SPhilipp Reisner if (page_nr != last_page_nr) { 1276b411b363SPhilipp Reisner if (p_addr) 1277b411b363SPhilipp Reisner __bm_unmap(p_addr, km); 127819f843aaSLars Ellenberg if (c < 0) 127919f843aaSLars Ellenberg bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]); 128019f843aaSLars Ellenberg else if (c > 0) 128119f843aaSLars Ellenberg bm_set_page_need_writeout(b->bm_pages[last_page_nr]); 128219f843aaSLars Ellenberg changed_total += c; 128319f843aaSLars Ellenberg c = 0; 128419f843aaSLars Ellenberg p_addr = __bm_map_pidx(b, page_nr, km); 1285b411b363SPhilipp Reisner last_page_nr = page_nr; 1286b411b363SPhilipp Reisner } 1287b411b363SPhilipp Reisner if (val) 12887e599e6eSLinus Torvalds c += (0 == __test_and_set_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr)); 1289b411b363SPhilipp Reisner else 12907e599e6eSLinus Torvalds c -= (0 != __test_and_clear_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr)); 1291b411b363SPhilipp Reisner } 1292b411b363SPhilipp Reisner if (p_addr) 1293b411b363SPhilipp Reisner __bm_unmap(p_addr, km); 129419f843aaSLars Ellenberg if (c < 0) 129519f843aaSLars Ellenberg bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]); 129619f843aaSLars Ellenberg else if (c > 0) 129719f843aaSLars Ellenberg bm_set_page_need_writeout(b->bm_pages[last_page_nr]); 129819f843aaSLars Ellenberg changed_total += c; 129919f843aaSLars Ellenberg b->bm_set += changed_total; 130019f843aaSLars Ellenberg return changed_total; 1301b411b363SPhilipp Reisner } 1302b411b363SPhilipp Reisner 1303b411b363SPhilipp Reisner /* returns number of bits actually changed. 1304b411b363SPhilipp Reisner * for val != 0, we change 0 -> 1, return code positive 1305b411b363SPhilipp Reisner * for val == 0, we change 1 -> 0, return code negative 1306b411b363SPhilipp Reisner * wants bitnr, not sector */ 1307b4ee79daSPhilipp Reisner static int bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s, 1308b411b363SPhilipp Reisner const unsigned long e, int val) 1309b411b363SPhilipp Reisner { 1310b411b363SPhilipp Reisner unsigned long flags; 1311b411b363SPhilipp Reisner struct drbd_bitmap *b = mdev->bitmap; 1312b411b363SPhilipp Reisner int c = 0; 1313b411b363SPhilipp Reisner 1314b411b363SPhilipp Reisner ERR_IF(!b) return 1; 1315b411b363SPhilipp Reisner ERR_IF(!b->bm_pages) return 0; 1316b411b363SPhilipp Reisner 1317b411b363SPhilipp Reisner spin_lock_irqsave(&b->bm_lock, flags); 131820ceb2b2SLars Ellenberg if ((val ? BM_DONT_SET : BM_DONT_CLEAR) & b->bm_flags) 1319b411b363SPhilipp Reisner bm_print_lock_info(mdev); 1320b411b363SPhilipp Reisner 1321b411b363SPhilipp Reisner c = __bm_change_bits_to(mdev, s, e, val, KM_IRQ1); 1322b411b363SPhilipp Reisner 1323b411b363SPhilipp Reisner spin_unlock_irqrestore(&b->bm_lock, flags); 1324b411b363SPhilipp Reisner return c; 1325b411b363SPhilipp Reisner } 1326b411b363SPhilipp Reisner 1327b411b363SPhilipp Reisner /* returns number of bits changed 0 -> 1 */ 1328b411b363SPhilipp Reisner int drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e) 1329b411b363SPhilipp Reisner { 1330b411b363SPhilipp Reisner return bm_change_bits_to(mdev, s, e, 1); 1331b411b363SPhilipp Reisner } 1332b411b363SPhilipp Reisner 1333b411b363SPhilipp Reisner /* returns number of bits changed 1 -> 0 */ 1334b411b363SPhilipp Reisner int drbd_bm_clear_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e) 1335b411b363SPhilipp Reisner { 1336b411b363SPhilipp Reisner return -bm_change_bits_to(mdev, s, e, 0); 1337b411b363SPhilipp Reisner } 1338b411b363SPhilipp Reisner 1339b411b363SPhilipp Reisner /* sets all bits in full words, 1340b411b363SPhilipp Reisner * from first_word up to, but not including, last_word */ 1341b411b363SPhilipp Reisner static inline void bm_set_full_words_within_one_page(struct drbd_bitmap *b, 1342b411b363SPhilipp Reisner int page_nr, int first_word, int last_word) 1343b411b363SPhilipp Reisner { 1344b411b363SPhilipp Reisner int i; 1345b411b363SPhilipp Reisner int bits; 1346b411b363SPhilipp Reisner unsigned long *paddr = kmap_atomic(b->bm_pages[page_nr], KM_USER0); 1347b411b363SPhilipp Reisner for (i = first_word; i < last_word; i++) { 1348b411b363SPhilipp Reisner bits = hweight_long(paddr[i]); 1349b411b363SPhilipp Reisner paddr[i] = ~0UL; 1350b411b363SPhilipp Reisner b->bm_set += BITS_PER_LONG - bits; 1351b411b363SPhilipp Reisner } 1352b411b363SPhilipp Reisner kunmap_atomic(paddr, KM_USER0); 1353b411b363SPhilipp Reisner } 1354b411b363SPhilipp Reisner 1355b411b363SPhilipp Reisner /* Same thing as drbd_bm_set_bits, but without taking the spin_lock_irqsave. 1356b411b363SPhilipp Reisner * You must first drbd_bm_lock(). 1357b411b363SPhilipp Reisner * Can be called to set the whole bitmap in one go. 1358b411b363SPhilipp Reisner * Sets bits from s to e _inclusive_. */ 1359b411b363SPhilipp Reisner void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e) 1360b411b363SPhilipp Reisner { 1361b411b363SPhilipp Reisner /* First set_bit from the first bit (s) 1362b411b363SPhilipp Reisner * up to the next long boundary (sl), 1363b411b363SPhilipp Reisner * then assign full words up to the last long boundary (el), 1364b411b363SPhilipp Reisner * then set_bit up to and including the last bit (e). 1365b411b363SPhilipp Reisner * 1366b411b363SPhilipp Reisner * Do not use memset, because we must account for changes, 1367b411b363SPhilipp Reisner * so we need to loop over the words with hweight() anyways. 1368b411b363SPhilipp Reisner */ 1369b411b363SPhilipp Reisner unsigned long sl = ALIGN(s,BITS_PER_LONG); 1370b411b363SPhilipp Reisner unsigned long el = (e+1) & ~((unsigned long)BITS_PER_LONG-1); 1371b411b363SPhilipp Reisner int first_page; 1372b411b363SPhilipp Reisner int last_page; 1373b411b363SPhilipp Reisner int page_nr; 1374b411b363SPhilipp Reisner int first_word; 1375b411b363SPhilipp Reisner int last_word; 1376b411b363SPhilipp Reisner 1377b411b363SPhilipp Reisner if (e - s <= 3*BITS_PER_LONG) { 1378b411b363SPhilipp Reisner /* don't bother; el and sl may even be wrong. */ 1379b411b363SPhilipp Reisner __bm_change_bits_to(mdev, s, e, 1, KM_USER0); 1380b411b363SPhilipp Reisner return; 1381b411b363SPhilipp Reisner } 1382b411b363SPhilipp Reisner 1383b411b363SPhilipp Reisner /* difference is large enough that we can trust sl and el */ 1384b411b363SPhilipp Reisner 1385b411b363SPhilipp Reisner /* bits filling the current long */ 1386b411b363SPhilipp Reisner if (sl) 1387b411b363SPhilipp Reisner __bm_change_bits_to(mdev, s, sl-1, 1, KM_USER0); 1388b411b363SPhilipp Reisner 1389b411b363SPhilipp Reisner first_page = sl >> (3 + PAGE_SHIFT); 1390b411b363SPhilipp Reisner last_page = el >> (3 + PAGE_SHIFT); 1391b411b363SPhilipp Reisner 1392b411b363SPhilipp Reisner /* MLPP: modulo longs per page */ 1393b411b363SPhilipp Reisner /* LWPP: long words per page */ 1394b411b363SPhilipp Reisner first_word = MLPP(sl >> LN2_BPL); 1395b411b363SPhilipp Reisner last_word = LWPP; 1396b411b363SPhilipp Reisner 1397b411b363SPhilipp Reisner /* first and full pages, unless first page == last page */ 1398b411b363SPhilipp Reisner for (page_nr = first_page; page_nr < last_page; page_nr++) { 1399b411b363SPhilipp Reisner bm_set_full_words_within_one_page(mdev->bitmap, page_nr, first_word, last_word); 1400b411b363SPhilipp Reisner cond_resched(); 1401b411b363SPhilipp Reisner first_word = 0; 1402b411b363SPhilipp Reisner } 1403b411b363SPhilipp Reisner 1404b411b363SPhilipp Reisner /* last page (respectively only page, for first page == last page) */ 1405b411b363SPhilipp Reisner last_word = MLPP(el >> LN2_BPL); 1406b411b363SPhilipp Reisner bm_set_full_words_within_one_page(mdev->bitmap, last_page, first_word, last_word); 1407b411b363SPhilipp Reisner 1408b411b363SPhilipp Reisner /* possibly trailing bits. 1409b411b363SPhilipp Reisner * example: (e & 63) == 63, el will be e+1. 1410b411b363SPhilipp Reisner * if that even was the very last bit, 1411b411b363SPhilipp Reisner * it would trigger an assert in __bm_change_bits_to() 1412b411b363SPhilipp Reisner */ 1413b411b363SPhilipp Reisner if (el <= e) 1414b411b363SPhilipp Reisner __bm_change_bits_to(mdev, el, e, 1, KM_USER0); 1415b411b363SPhilipp Reisner } 1416b411b363SPhilipp Reisner 1417b411b363SPhilipp Reisner /* returns bit state 1418b411b363SPhilipp Reisner * wants bitnr, NOT sector. 1419b411b363SPhilipp Reisner * inherently racy... area needs to be locked by means of {al,rs}_lru 1420b411b363SPhilipp Reisner * 1 ... bit set 1421b411b363SPhilipp Reisner * 0 ... bit not set 1422b411b363SPhilipp Reisner * -1 ... first out of bounds access, stop testing for bits! 1423b411b363SPhilipp Reisner */ 1424b411b363SPhilipp Reisner int drbd_bm_test_bit(struct drbd_conf *mdev, const unsigned long bitnr) 1425b411b363SPhilipp Reisner { 1426b411b363SPhilipp Reisner unsigned long flags; 1427b411b363SPhilipp Reisner struct drbd_bitmap *b = mdev->bitmap; 1428b411b363SPhilipp Reisner unsigned long *p_addr; 1429b411b363SPhilipp Reisner int i; 1430b411b363SPhilipp Reisner 1431b411b363SPhilipp Reisner ERR_IF(!b) return 0; 1432b411b363SPhilipp Reisner ERR_IF(!b->bm_pages) return 0; 1433b411b363SPhilipp Reisner 1434b411b363SPhilipp Reisner spin_lock_irqsave(&b->bm_lock, flags); 143520ceb2b2SLars Ellenberg if (BM_DONT_TEST & b->bm_flags) 1436b411b363SPhilipp Reisner bm_print_lock_info(mdev); 1437b411b363SPhilipp Reisner if (bitnr < b->bm_bits) { 143819f843aaSLars Ellenberg p_addr = bm_map_pidx(b, bm_bit_to_page_idx(b, bitnr)); 14397e599e6eSLinus Torvalds i = test_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr) ? 1 : 0; 1440b411b363SPhilipp Reisner bm_unmap(p_addr); 1441b411b363SPhilipp Reisner } else if (bitnr == b->bm_bits) { 1442b411b363SPhilipp Reisner i = -1; 1443b411b363SPhilipp Reisner } else { /* (bitnr > b->bm_bits) */ 1444b411b363SPhilipp Reisner dev_err(DEV, "bitnr=%lu > bm_bits=%lu\n", bitnr, b->bm_bits); 1445b411b363SPhilipp Reisner i = 0; 1446b411b363SPhilipp Reisner } 1447b411b363SPhilipp Reisner 1448b411b363SPhilipp Reisner spin_unlock_irqrestore(&b->bm_lock, flags); 1449b411b363SPhilipp Reisner return i; 1450b411b363SPhilipp Reisner } 1451b411b363SPhilipp Reisner 1452b411b363SPhilipp Reisner /* returns number of bits set in the range [s, e] */ 1453b411b363SPhilipp Reisner int drbd_bm_count_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e) 1454b411b363SPhilipp Reisner { 1455b411b363SPhilipp Reisner unsigned long flags; 1456b411b363SPhilipp Reisner struct drbd_bitmap *b = mdev->bitmap; 145719f843aaSLars Ellenberg unsigned long *p_addr = NULL; 1458b411b363SPhilipp Reisner unsigned long bitnr; 145919f843aaSLars Ellenberg unsigned int page_nr = -1U; 1460b411b363SPhilipp Reisner int c = 0; 1461b411b363SPhilipp Reisner 1462b411b363SPhilipp Reisner /* If this is called without a bitmap, that is a bug. But just to be 1463b411b363SPhilipp Reisner * robust in case we screwed up elsewhere, in that case pretend there 1464b411b363SPhilipp Reisner * was one dirty bit in the requested area, so we won't try to do a 1465b411b363SPhilipp Reisner * local read there (no bitmap probably implies no disk) */ 1466b411b363SPhilipp Reisner ERR_IF(!b) return 1; 1467b411b363SPhilipp Reisner ERR_IF(!b->bm_pages) return 1; 1468b411b363SPhilipp Reisner 1469b411b363SPhilipp Reisner spin_lock_irqsave(&b->bm_lock, flags); 147020ceb2b2SLars Ellenberg if (BM_DONT_TEST & b->bm_flags) 1471b411b363SPhilipp Reisner bm_print_lock_info(mdev); 1472b411b363SPhilipp Reisner for (bitnr = s; bitnr <= e; bitnr++) { 147319f843aaSLars Ellenberg unsigned int idx = bm_bit_to_page_idx(b, bitnr); 147419f843aaSLars Ellenberg if (page_nr != idx) { 147519f843aaSLars Ellenberg page_nr = idx; 1476b411b363SPhilipp Reisner if (p_addr) 1477b411b363SPhilipp Reisner bm_unmap(p_addr); 147819f843aaSLars Ellenberg p_addr = bm_map_pidx(b, idx); 1479b411b363SPhilipp Reisner } 1480b411b363SPhilipp Reisner ERR_IF (bitnr >= b->bm_bits) { 1481b411b363SPhilipp Reisner dev_err(DEV, "bitnr=%lu bm_bits=%lu\n", bitnr, b->bm_bits); 1482b411b363SPhilipp Reisner } else { 14837e599e6eSLinus Torvalds c += (0 != test_bit_le(bitnr - (page_nr << (PAGE_SHIFT+3)), p_addr)); 1484b411b363SPhilipp Reisner } 1485b411b363SPhilipp Reisner } 1486b411b363SPhilipp Reisner if (p_addr) 1487b411b363SPhilipp Reisner bm_unmap(p_addr); 1488b411b363SPhilipp Reisner spin_unlock_irqrestore(&b->bm_lock, flags); 1489b411b363SPhilipp Reisner return c; 1490b411b363SPhilipp Reisner } 1491b411b363SPhilipp Reisner 1492b411b363SPhilipp Reisner 1493b411b363SPhilipp Reisner /* inherently racy... 1494b411b363SPhilipp Reisner * return value may be already out-of-date when this function returns. 1495b411b363SPhilipp Reisner * but the general usage is that this is only use during a cstate when bits are 1496b411b363SPhilipp Reisner * only cleared, not set, and typically only care for the case when the return 1497b411b363SPhilipp Reisner * value is zero, or we already "locked" this "bitmap extent" by other means. 1498b411b363SPhilipp Reisner * 1499b411b363SPhilipp Reisner * enr is bm-extent number, since we chose to name one sector (512 bytes) 1500b411b363SPhilipp Reisner * worth of the bitmap a "bitmap extent". 1501b411b363SPhilipp Reisner * 1502b411b363SPhilipp Reisner * TODO 1503b411b363SPhilipp Reisner * I think since we use it like a reference count, we should use the real 1504b411b363SPhilipp Reisner * reference count of some bitmap extent element from some lru instead... 1505b411b363SPhilipp Reisner * 1506b411b363SPhilipp Reisner */ 1507b411b363SPhilipp Reisner int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr) 1508b411b363SPhilipp Reisner { 1509b411b363SPhilipp Reisner struct drbd_bitmap *b = mdev->bitmap; 1510b411b363SPhilipp Reisner int count, s, e; 1511b411b363SPhilipp Reisner unsigned long flags; 1512b411b363SPhilipp Reisner unsigned long *p_addr, *bm; 1513b411b363SPhilipp Reisner 1514b411b363SPhilipp Reisner ERR_IF(!b) return 0; 1515b411b363SPhilipp Reisner ERR_IF(!b->bm_pages) return 0; 1516b411b363SPhilipp Reisner 1517b411b363SPhilipp Reisner spin_lock_irqsave(&b->bm_lock, flags); 151820ceb2b2SLars Ellenberg if (BM_DONT_TEST & b->bm_flags) 1519b411b363SPhilipp Reisner bm_print_lock_info(mdev); 1520b411b363SPhilipp Reisner 1521b411b363SPhilipp Reisner s = S2W(enr); 1522b411b363SPhilipp Reisner e = min((size_t)S2W(enr+1), b->bm_words); 1523b411b363SPhilipp Reisner count = 0; 1524b411b363SPhilipp Reisner if (s < b->bm_words) { 1525b411b363SPhilipp Reisner int n = e-s; 152619f843aaSLars Ellenberg p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, s)); 1527b411b363SPhilipp Reisner bm = p_addr + MLPP(s); 1528b411b363SPhilipp Reisner while (n--) 1529b411b363SPhilipp Reisner count += hweight_long(*bm++); 1530b411b363SPhilipp Reisner bm_unmap(p_addr); 1531b411b363SPhilipp Reisner } else { 1532b411b363SPhilipp Reisner dev_err(DEV, "start offset (%d) too large in drbd_bm_e_weight\n", s); 1533b411b363SPhilipp Reisner } 1534b411b363SPhilipp Reisner spin_unlock_irqrestore(&b->bm_lock, flags); 1535b411b363SPhilipp Reisner return count; 1536b411b363SPhilipp Reisner } 1537b411b363SPhilipp Reisner 15384b0715f0SLars Ellenberg /* Set all bits covered by the AL-extent al_enr. 15394b0715f0SLars Ellenberg * Returns number of bits changed. */ 1540b411b363SPhilipp Reisner unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev, unsigned long al_enr) 1541b411b363SPhilipp Reisner { 1542b411b363SPhilipp Reisner struct drbd_bitmap *b = mdev->bitmap; 1543b411b363SPhilipp Reisner unsigned long *p_addr, *bm; 1544b411b363SPhilipp Reisner unsigned long weight; 15454b0715f0SLars Ellenberg unsigned long s, e; 15464b0715f0SLars Ellenberg int count, i, do_now; 1547b411b363SPhilipp Reisner ERR_IF(!b) return 0; 1548b411b363SPhilipp Reisner ERR_IF(!b->bm_pages) return 0; 1549b411b363SPhilipp Reisner 1550b411b363SPhilipp Reisner spin_lock_irq(&b->bm_lock); 155120ceb2b2SLars Ellenberg if (BM_DONT_SET & b->bm_flags) 1552b411b363SPhilipp Reisner bm_print_lock_info(mdev); 1553b411b363SPhilipp Reisner weight = b->bm_set; 1554b411b363SPhilipp Reisner 1555b411b363SPhilipp Reisner s = al_enr * BM_WORDS_PER_AL_EXT; 1556b411b363SPhilipp Reisner e = min_t(size_t, s + BM_WORDS_PER_AL_EXT, b->bm_words); 1557b411b363SPhilipp Reisner /* assert that s and e are on the same page */ 1558b411b363SPhilipp Reisner D_ASSERT((e-1) >> (PAGE_SHIFT - LN2_BPL + 3) 1559b411b363SPhilipp Reisner == s >> (PAGE_SHIFT - LN2_BPL + 3)); 1560b411b363SPhilipp Reisner count = 0; 1561b411b363SPhilipp Reisner if (s < b->bm_words) { 1562b411b363SPhilipp Reisner i = do_now = e-s; 156319f843aaSLars Ellenberg p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, s)); 1564b411b363SPhilipp Reisner bm = p_addr + MLPP(s); 1565b411b363SPhilipp Reisner while (i--) { 1566b411b363SPhilipp Reisner count += hweight_long(*bm); 1567b411b363SPhilipp Reisner *bm = -1UL; 1568b411b363SPhilipp Reisner bm++; 1569b411b363SPhilipp Reisner } 1570b411b363SPhilipp Reisner bm_unmap(p_addr); 1571b411b363SPhilipp Reisner b->bm_set += do_now*BITS_PER_LONG - count; 1572b411b363SPhilipp Reisner if (e == b->bm_words) 1573b411b363SPhilipp Reisner b->bm_set -= bm_clear_surplus(b); 1574b411b363SPhilipp Reisner } else { 15754b0715f0SLars Ellenberg dev_err(DEV, "start offset (%lu) too large in drbd_bm_ALe_set_all\n", s); 1576b411b363SPhilipp Reisner } 1577b411b363SPhilipp Reisner weight = b->bm_set - weight; 1578b411b363SPhilipp Reisner spin_unlock_irq(&b->bm_lock); 1579b411b363SPhilipp Reisner return weight; 1580b411b363SPhilipp Reisner } 1581