1b411b363SPhilipp Reisner /* 2b411b363SPhilipp Reisner drbd_bitmap.c 3b411b363SPhilipp Reisner 4b411b363SPhilipp Reisner This file is part of DRBD by Philipp Reisner and Lars Ellenberg. 5b411b363SPhilipp Reisner 6b411b363SPhilipp Reisner Copyright (C) 2004-2008, LINBIT Information Technologies GmbH. 7b411b363SPhilipp Reisner Copyright (C) 2004-2008, Philipp Reisner <philipp.reisner@linbit.com>. 8b411b363SPhilipp Reisner Copyright (C) 2004-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. 9b411b363SPhilipp Reisner 10b411b363SPhilipp Reisner drbd is free software; you can redistribute it and/or modify 11b411b363SPhilipp Reisner it under the terms of the GNU General Public License as published by 12b411b363SPhilipp Reisner the Free Software Foundation; either version 2, or (at your option) 13b411b363SPhilipp Reisner any later version. 14b411b363SPhilipp Reisner 15b411b363SPhilipp Reisner drbd is distributed in the hope that it will be useful, 16b411b363SPhilipp Reisner but WITHOUT ANY WARRANTY; without even the implied warranty of 17b411b363SPhilipp Reisner MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18b411b363SPhilipp Reisner GNU General Public License for more details. 19b411b363SPhilipp Reisner 20b411b363SPhilipp Reisner You should have received a copy of the GNU General Public License 21b411b363SPhilipp Reisner along with drbd; see the file COPYING. If not, write to 22b411b363SPhilipp Reisner the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 23b411b363SPhilipp Reisner */ 24b411b363SPhilipp Reisner 25b411b363SPhilipp Reisner #include <linux/bitops.h> 26b411b363SPhilipp Reisner #include <linux/vmalloc.h> 27b411b363SPhilipp Reisner #include <linux/string.h> 28b411b363SPhilipp Reisner #include <linux/drbd.h> 295a0e3ad6STejun Heo #include <linux/slab.h> 30b411b363SPhilipp Reisner #include <asm/kmap_types.h> 31f0ff1357SStephen Rothwell 32b411b363SPhilipp Reisner #include "drbd_int.h" 33b411b363SPhilipp Reisner 3495a0f10cSLars Ellenberg 35b411b363SPhilipp Reisner /* OPAQUE outside this file! 36b411b363SPhilipp Reisner * interface defined in drbd_int.h 37b411b363SPhilipp Reisner 38b411b363SPhilipp Reisner * convention: 39b411b363SPhilipp Reisner * function name drbd_bm_... => used elsewhere, "public". 40b411b363SPhilipp Reisner * function name bm_... => internal to implementation, "private". 414b0715f0SLars Ellenberg */ 42b411b363SPhilipp Reisner 434b0715f0SLars Ellenberg 444b0715f0SLars Ellenberg /* 454b0715f0SLars Ellenberg * LIMITATIONS: 464b0715f0SLars Ellenberg * We want to support >= peta byte of backend storage, while for now still using 474b0715f0SLars Ellenberg * a granularity of one bit per 4KiB of storage. 484b0715f0SLars Ellenberg * 1 << 50 bytes backend storage (1 PiB) 494b0715f0SLars Ellenberg * 1 << (50 - 12) bits needed 504b0715f0SLars Ellenberg * 38 --> we need u64 to index and count bits 514b0715f0SLars Ellenberg * 1 << (38 - 3) bitmap bytes needed 524b0715f0SLars Ellenberg * 35 --> we still need u64 to index and count bytes 534b0715f0SLars Ellenberg * (that's 32 GiB of bitmap for 1 PiB storage) 544b0715f0SLars Ellenberg * 1 << (35 - 2) 32bit longs needed 554b0715f0SLars Ellenberg * 33 --> we'd even need u64 to index and count 32bit long words. 564b0715f0SLars Ellenberg * 1 << (35 - 3) 64bit longs needed 574b0715f0SLars Ellenberg * 32 --> we could get away with a 32bit unsigned int to index and count 584b0715f0SLars Ellenberg * 64bit long words, but I rather stay with unsigned long for now. 594b0715f0SLars Ellenberg * We probably should neither count nor point to bytes or long words 604b0715f0SLars Ellenberg * directly, but either by bitnumber, or by page index and offset. 614b0715f0SLars Ellenberg * 1 << (35 - 12) 624b0715f0SLars Ellenberg * 22 --> we need that much 4KiB pages of bitmap. 634b0715f0SLars Ellenberg * 1 << (22 + 3) --> on a 64bit arch, 644b0715f0SLars Ellenberg * we need 32 MiB to store the array of page pointers. 654b0715f0SLars Ellenberg * 664b0715f0SLars Ellenberg * Because I'm lazy, and because the resulting patch was too large, too ugly 674b0715f0SLars Ellenberg * and still incomplete, on 32bit we still "only" support 16 TiB (minus some), 684b0715f0SLars Ellenberg * (1 << 32) bits * 4k storage. 694b0715f0SLars Ellenberg * 704b0715f0SLars Ellenberg 714b0715f0SLars Ellenberg * bitmap storage and IO: 724b0715f0SLars Ellenberg * Bitmap is stored little endian on disk, and is kept little endian in 734b0715f0SLars Ellenberg * core memory. Currently we still hold the full bitmap in core as long 744b0715f0SLars Ellenberg * as we are "attached" to a local disk, which at 32 GiB for 1PiB storage 754b0715f0SLars Ellenberg * seems excessive. 764b0715f0SLars Ellenberg * 7724c4830cSBart Van Assche * We plan to reduce the amount of in-core bitmap pages by paging them in 784b0715f0SLars Ellenberg * and out against their on-disk location as necessary, but need to make 794b0715f0SLars Ellenberg * sure we don't cause too much meta data IO, and must not deadlock in 804b0715f0SLars Ellenberg * tight memory situations. This needs some more work. 81b411b363SPhilipp Reisner */ 82b411b363SPhilipp Reisner 83b411b363SPhilipp Reisner /* 84b411b363SPhilipp Reisner * NOTE 85b411b363SPhilipp Reisner * Access to the *bm_pages is protected by bm_lock. 86b411b363SPhilipp Reisner * It is safe to read the other members within the lock. 87b411b363SPhilipp Reisner * 88b411b363SPhilipp Reisner * drbd_bm_set_bits is called from bio_endio callbacks, 89b411b363SPhilipp Reisner * We may be called with irq already disabled, 90b411b363SPhilipp Reisner * so we need spin_lock_irqsave(). 91b411b363SPhilipp Reisner * And we need the kmap_atomic. 92b411b363SPhilipp Reisner */ 93b411b363SPhilipp Reisner struct drbd_bitmap { 94b411b363SPhilipp Reisner struct page **bm_pages; 95b411b363SPhilipp Reisner spinlock_t bm_lock; 964b0715f0SLars Ellenberg 974b0715f0SLars Ellenberg /* see LIMITATIONS: above */ 984b0715f0SLars Ellenberg 99b411b363SPhilipp Reisner unsigned long bm_set; /* nr of set bits; THINK maybe atomic_t? */ 100b411b363SPhilipp Reisner unsigned long bm_bits; 101b411b363SPhilipp Reisner size_t bm_words; 102b411b363SPhilipp Reisner size_t bm_number_of_pages; 103b411b363SPhilipp Reisner sector_t bm_dev_capacity; 1048a03ae2aSThomas Gleixner struct mutex bm_change; /* serializes resize operations */ 105b411b363SPhilipp Reisner 10619f843aaSLars Ellenberg wait_queue_head_t bm_io_wait; /* used to serialize IO of single pages */ 107b411b363SPhilipp Reisner 10820ceb2b2SLars Ellenberg enum bm_flag bm_flags; 109b411b363SPhilipp Reisner 110b411b363SPhilipp Reisner /* debugging aid, in case we are still racy somewhere */ 111b411b363SPhilipp Reisner char *bm_why; 112b411b363SPhilipp Reisner struct task_struct *bm_task; 113b411b363SPhilipp Reisner }; 114b411b363SPhilipp Reisner 115b411b363SPhilipp Reisner #define bm_print_lock_info(m) __bm_print_lock_info(m, __func__) 116b411b363SPhilipp Reisner static void __bm_print_lock_info(struct drbd_conf *mdev, const char *func) 117b411b363SPhilipp Reisner { 118b411b363SPhilipp Reisner struct drbd_bitmap *b = mdev->bitmap; 119b411b363SPhilipp Reisner if (!__ratelimit(&drbd_ratelimit_state)) 120b411b363SPhilipp Reisner return; 121b411b363SPhilipp Reisner dev_err(DEV, "FIXME %s in %s, bitmap locked for '%s' by %s\n", 122b411b363SPhilipp Reisner current == mdev->receiver.task ? "receiver" : 123b411b363SPhilipp Reisner current == mdev->asender.task ? "asender" : 124b411b363SPhilipp Reisner current == mdev->worker.task ? "worker" : current->comm, 125b411b363SPhilipp Reisner func, b->bm_why ?: "?", 126b411b363SPhilipp Reisner b->bm_task == mdev->receiver.task ? "receiver" : 127b411b363SPhilipp Reisner b->bm_task == mdev->asender.task ? "asender" : 128b411b363SPhilipp Reisner b->bm_task == mdev->worker.task ? "worker" : "?"); 129b411b363SPhilipp Reisner } 130b411b363SPhilipp Reisner 13120ceb2b2SLars Ellenberg void drbd_bm_lock(struct drbd_conf *mdev, char *why, enum bm_flag flags) 132b411b363SPhilipp Reisner { 133b411b363SPhilipp Reisner struct drbd_bitmap *b = mdev->bitmap; 134b411b363SPhilipp Reisner int trylock_failed; 135b411b363SPhilipp Reisner 136b411b363SPhilipp Reisner if (!b) { 137b411b363SPhilipp Reisner dev_err(DEV, "FIXME no bitmap in drbd_bm_lock!?\n"); 138b411b363SPhilipp Reisner return; 139b411b363SPhilipp Reisner } 140b411b363SPhilipp Reisner 1418a03ae2aSThomas Gleixner trylock_failed = !mutex_trylock(&b->bm_change); 142b411b363SPhilipp Reisner 143b411b363SPhilipp Reisner if (trylock_failed) { 144b411b363SPhilipp Reisner dev_warn(DEV, "%s going to '%s' but bitmap already locked for '%s' by %s\n", 145b411b363SPhilipp Reisner current == mdev->receiver.task ? "receiver" : 146b411b363SPhilipp Reisner current == mdev->asender.task ? "asender" : 147b411b363SPhilipp Reisner current == mdev->worker.task ? "worker" : current->comm, 148b411b363SPhilipp Reisner why, b->bm_why ?: "?", 149b411b363SPhilipp Reisner b->bm_task == mdev->receiver.task ? "receiver" : 150b411b363SPhilipp Reisner b->bm_task == mdev->asender.task ? "asender" : 151b411b363SPhilipp Reisner b->bm_task == mdev->worker.task ? "worker" : "?"); 1528a03ae2aSThomas Gleixner mutex_lock(&b->bm_change); 153b411b363SPhilipp Reisner } 15420ceb2b2SLars Ellenberg if (BM_LOCKED_MASK & b->bm_flags) 155b411b363SPhilipp Reisner dev_err(DEV, "FIXME bitmap already locked in bm_lock\n"); 15620ceb2b2SLars Ellenberg b->bm_flags |= flags & BM_LOCKED_MASK; 157b411b363SPhilipp Reisner 158b411b363SPhilipp Reisner b->bm_why = why; 159b411b363SPhilipp Reisner b->bm_task = current; 160b411b363SPhilipp Reisner } 161b411b363SPhilipp Reisner 162b411b363SPhilipp Reisner void drbd_bm_unlock(struct drbd_conf *mdev) 163b411b363SPhilipp Reisner { 164b411b363SPhilipp Reisner struct drbd_bitmap *b = mdev->bitmap; 165b411b363SPhilipp Reisner if (!b) { 166b411b363SPhilipp Reisner dev_err(DEV, "FIXME no bitmap in drbd_bm_unlock!?\n"); 167b411b363SPhilipp Reisner return; 168b411b363SPhilipp Reisner } 169b411b363SPhilipp Reisner 17020ceb2b2SLars Ellenberg if (!(BM_LOCKED_MASK & mdev->bitmap->bm_flags)) 171b411b363SPhilipp Reisner dev_err(DEV, "FIXME bitmap not locked in bm_unlock\n"); 172b411b363SPhilipp Reisner 17320ceb2b2SLars Ellenberg b->bm_flags &= ~BM_LOCKED_MASK; 174b411b363SPhilipp Reisner b->bm_why = NULL; 175b411b363SPhilipp Reisner b->bm_task = NULL; 1768a03ae2aSThomas Gleixner mutex_unlock(&b->bm_change); 177b411b363SPhilipp Reisner } 178b411b363SPhilipp Reisner 17919f843aaSLars Ellenberg /* we store some "meta" info about our pages in page->private */ 18019f843aaSLars Ellenberg /* at a granularity of 4k storage per bitmap bit: 18119f843aaSLars Ellenberg * one peta byte storage: 1<<50 byte, 1<<38 * 4k storage blocks 18219f843aaSLars Ellenberg * 1<<38 bits, 18319f843aaSLars Ellenberg * 1<<23 4k bitmap pages. 18419f843aaSLars Ellenberg * Use 24 bits as page index, covers 2 peta byte storage 18519f843aaSLars Ellenberg * at a granularity of 4k per bit. 18619f843aaSLars Ellenberg * Used to report the failed page idx on io error from the endio handlers. 18719f843aaSLars Ellenberg */ 18819f843aaSLars Ellenberg #define BM_PAGE_IDX_MASK ((1UL<<24)-1) 18919f843aaSLars Ellenberg /* this page is currently read in, or written back */ 19019f843aaSLars Ellenberg #define BM_PAGE_IO_LOCK 31 19119f843aaSLars Ellenberg /* if there has been an IO error for this page */ 19219f843aaSLars Ellenberg #define BM_PAGE_IO_ERROR 30 19319f843aaSLars Ellenberg /* this is to be able to intelligently skip disk IO, 19419f843aaSLars Ellenberg * set if bits have been set since last IO. */ 19519f843aaSLars Ellenberg #define BM_PAGE_NEED_WRITEOUT 29 19619f843aaSLars Ellenberg /* to mark for lazy writeout once syncer cleared all clearable bits, 19719f843aaSLars Ellenberg * we if bits have been cleared since last IO. */ 19819f843aaSLars Ellenberg #define BM_PAGE_LAZY_WRITEOUT 28 19919f843aaSLars Ellenberg 20024c4830cSBart Van Assche /* store_page_idx uses non-atomic assignment. It is only used directly after 20119f843aaSLars Ellenberg * allocating the page. All other bm_set_page_* and bm_clear_page_* need to 20219f843aaSLars Ellenberg * use atomic bit manipulation, as set_out_of_sync (and therefore bitmap 20319f843aaSLars Ellenberg * changes) may happen from various contexts, and wait_on_bit/wake_up_bit 20419f843aaSLars Ellenberg * requires it all to be atomic as well. */ 20519f843aaSLars Ellenberg static void bm_store_page_idx(struct page *page, unsigned long idx) 20619f843aaSLars Ellenberg { 20719f843aaSLars Ellenberg BUG_ON(0 != (idx & ~BM_PAGE_IDX_MASK)); 2080c7db279SArne Redlich set_page_private(page, idx); 20919f843aaSLars Ellenberg } 21019f843aaSLars Ellenberg 21119f843aaSLars Ellenberg static unsigned long bm_page_to_idx(struct page *page) 21219f843aaSLars Ellenberg { 21319f843aaSLars Ellenberg return page_private(page) & BM_PAGE_IDX_MASK; 21419f843aaSLars Ellenberg } 21519f843aaSLars Ellenberg 21619f843aaSLars Ellenberg /* As is very unlikely that the same page is under IO from more than one 21719f843aaSLars Ellenberg * context, we can get away with a bit per page and one wait queue per bitmap. 21819f843aaSLars Ellenberg */ 21919f843aaSLars Ellenberg static void bm_page_lock_io(struct drbd_conf *mdev, int page_nr) 22019f843aaSLars Ellenberg { 22119f843aaSLars Ellenberg struct drbd_bitmap *b = mdev->bitmap; 22219f843aaSLars Ellenberg void *addr = &page_private(b->bm_pages[page_nr]); 22319f843aaSLars Ellenberg wait_event(b->bm_io_wait, !test_and_set_bit(BM_PAGE_IO_LOCK, addr)); 22419f843aaSLars Ellenberg } 22519f843aaSLars Ellenberg 22619f843aaSLars Ellenberg static void bm_page_unlock_io(struct drbd_conf *mdev, int page_nr) 22719f843aaSLars Ellenberg { 22819f843aaSLars Ellenberg struct drbd_bitmap *b = mdev->bitmap; 22919f843aaSLars Ellenberg void *addr = &page_private(b->bm_pages[page_nr]); 23019f843aaSLars Ellenberg clear_bit(BM_PAGE_IO_LOCK, addr); 23119f843aaSLars Ellenberg smp_mb__after_clear_bit(); 23219f843aaSLars Ellenberg wake_up(&mdev->bitmap->bm_io_wait); 23319f843aaSLars Ellenberg } 23419f843aaSLars Ellenberg 23519f843aaSLars Ellenberg /* set _before_ submit_io, so it may be reset due to being changed 23619f843aaSLars Ellenberg * while this page is in flight... will get submitted later again */ 23719f843aaSLars Ellenberg static void bm_set_page_unchanged(struct page *page) 23819f843aaSLars Ellenberg { 23919f843aaSLars Ellenberg /* use cmpxchg? */ 24019f843aaSLars Ellenberg clear_bit(BM_PAGE_NEED_WRITEOUT, &page_private(page)); 24119f843aaSLars Ellenberg clear_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page)); 24219f843aaSLars Ellenberg } 24319f843aaSLars Ellenberg 24419f843aaSLars Ellenberg static void bm_set_page_need_writeout(struct page *page) 24519f843aaSLars Ellenberg { 24619f843aaSLars Ellenberg set_bit(BM_PAGE_NEED_WRITEOUT, &page_private(page)); 24719f843aaSLars Ellenberg } 24819f843aaSLars Ellenberg 24919f843aaSLars Ellenberg static int bm_test_page_unchanged(struct page *page) 25019f843aaSLars Ellenberg { 25119f843aaSLars Ellenberg volatile const unsigned long *addr = &page_private(page); 25219f843aaSLars Ellenberg return (*addr & ((1UL<<BM_PAGE_NEED_WRITEOUT)|(1UL<<BM_PAGE_LAZY_WRITEOUT))) == 0; 25319f843aaSLars Ellenberg } 25419f843aaSLars Ellenberg 25519f843aaSLars Ellenberg static void bm_set_page_io_err(struct page *page) 25619f843aaSLars Ellenberg { 25719f843aaSLars Ellenberg set_bit(BM_PAGE_IO_ERROR, &page_private(page)); 25819f843aaSLars Ellenberg } 25919f843aaSLars Ellenberg 26019f843aaSLars Ellenberg static void bm_clear_page_io_err(struct page *page) 26119f843aaSLars Ellenberg { 26219f843aaSLars Ellenberg clear_bit(BM_PAGE_IO_ERROR, &page_private(page)); 26319f843aaSLars Ellenberg } 26419f843aaSLars Ellenberg 26519f843aaSLars Ellenberg static void bm_set_page_lazy_writeout(struct page *page) 26619f843aaSLars Ellenberg { 26719f843aaSLars Ellenberg set_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page)); 26819f843aaSLars Ellenberg } 26919f843aaSLars Ellenberg 27019f843aaSLars Ellenberg static int bm_test_page_lazy_writeout(struct page *page) 27119f843aaSLars Ellenberg { 27219f843aaSLars Ellenberg return test_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page)); 27319f843aaSLars Ellenberg } 27419f843aaSLars Ellenberg 27519f843aaSLars Ellenberg /* on a 32bit box, this would allow for exactly (2<<38) bits. */ 27619f843aaSLars Ellenberg static unsigned int bm_word_to_page_idx(struct drbd_bitmap *b, unsigned long long_nr) 27719f843aaSLars Ellenberg { 27819f843aaSLars Ellenberg /* page_nr = (word*sizeof(long)) >> PAGE_SHIFT; */ 27919f843aaSLars Ellenberg unsigned int page_nr = long_nr >> (PAGE_SHIFT - LN2_BPL + 3); 28019f843aaSLars Ellenberg BUG_ON(page_nr >= b->bm_number_of_pages); 28119f843aaSLars Ellenberg return page_nr; 28219f843aaSLars Ellenberg } 28319f843aaSLars Ellenberg 28495a0f10cSLars Ellenberg static unsigned int bm_bit_to_page_idx(struct drbd_bitmap *b, u64 bitnr) 28595a0f10cSLars Ellenberg { 28695a0f10cSLars Ellenberg /* page_nr = (bitnr/8) >> PAGE_SHIFT; */ 28795a0f10cSLars Ellenberg unsigned int page_nr = bitnr >> (PAGE_SHIFT + 3); 28895a0f10cSLars Ellenberg BUG_ON(page_nr >= b->bm_number_of_pages); 28995a0f10cSLars Ellenberg return page_nr; 29095a0f10cSLars Ellenberg } 29195a0f10cSLars Ellenberg 292589973a7SCong Wang static unsigned long *__bm_map_pidx(struct drbd_bitmap *b, unsigned int idx) 29395a0f10cSLars Ellenberg { 29495a0f10cSLars Ellenberg struct page *page = b->bm_pages[idx]; 295cfd8005cSCong Wang return (unsigned long *) kmap_atomic(page); 29695a0f10cSLars Ellenberg } 29795a0f10cSLars Ellenberg 29895a0f10cSLars Ellenberg static unsigned long *bm_map_pidx(struct drbd_bitmap *b, unsigned int idx) 29995a0f10cSLars Ellenberg { 300cfd8005cSCong Wang return __bm_map_pidx(b, idx); 30195a0f10cSLars Ellenberg } 30295a0f10cSLars Ellenberg 303cfd8005cSCong Wang static void __bm_unmap(unsigned long *p_addr) 304b411b363SPhilipp Reisner { 305cfd8005cSCong Wang kunmap_atomic(p_addr); 306b411b363SPhilipp Reisner }; 307b411b363SPhilipp Reisner 308b411b363SPhilipp Reisner static void bm_unmap(unsigned long *p_addr) 309b411b363SPhilipp Reisner { 310cfd8005cSCong Wang return __bm_unmap(p_addr); 311b411b363SPhilipp Reisner } 312b411b363SPhilipp Reisner 313b411b363SPhilipp Reisner /* long word offset of _bitmap_ sector */ 314b411b363SPhilipp Reisner #define S2W(s) ((s)<<(BM_EXT_SHIFT-BM_BLOCK_SHIFT-LN2_BPL)) 315b411b363SPhilipp Reisner /* word offset from start of bitmap to word number _in_page_ 316b411b363SPhilipp Reisner * modulo longs per page 317b411b363SPhilipp Reisner #define MLPP(X) ((X) % (PAGE_SIZE/sizeof(long)) 31824c4830cSBart Van Assche hm, well, Philipp thinks gcc might not optimize the % into & (... - 1) 319b411b363SPhilipp Reisner so do it explicitly: 320b411b363SPhilipp Reisner */ 321b411b363SPhilipp Reisner #define MLPP(X) ((X) & ((PAGE_SIZE/sizeof(long))-1)) 322b411b363SPhilipp Reisner 323b411b363SPhilipp Reisner /* Long words per page */ 324b411b363SPhilipp Reisner #define LWPP (PAGE_SIZE/sizeof(long)) 325b411b363SPhilipp Reisner 326b411b363SPhilipp Reisner /* 327b411b363SPhilipp Reisner * actually most functions herein should take a struct drbd_bitmap*, not a 328b411b363SPhilipp Reisner * struct drbd_conf*, but for the debug macros I like to have the mdev around 329b411b363SPhilipp Reisner * to be able to report device specific. 330b411b363SPhilipp Reisner */ 331b411b363SPhilipp Reisner 33219f843aaSLars Ellenberg 333b411b363SPhilipp Reisner static void bm_free_pages(struct page **pages, unsigned long number) 334b411b363SPhilipp Reisner { 335b411b363SPhilipp Reisner unsigned long i; 336b411b363SPhilipp Reisner if (!pages) 337b411b363SPhilipp Reisner return; 338b411b363SPhilipp Reisner 339b411b363SPhilipp Reisner for (i = 0; i < number; i++) { 340b411b363SPhilipp Reisner if (!pages[i]) { 341b411b363SPhilipp Reisner printk(KERN_ALERT "drbd: bm_free_pages tried to free " 342b411b363SPhilipp Reisner "a NULL pointer; i=%lu n=%lu\n", 343b411b363SPhilipp Reisner i, number); 344b411b363SPhilipp Reisner continue; 345b411b363SPhilipp Reisner } 346b411b363SPhilipp Reisner __free_page(pages[i]); 347b411b363SPhilipp Reisner pages[i] = NULL; 348b411b363SPhilipp Reisner } 349b411b363SPhilipp Reisner } 350b411b363SPhilipp Reisner 351b411b363SPhilipp Reisner static void bm_vk_free(void *ptr, int v) 352b411b363SPhilipp Reisner { 353b411b363SPhilipp Reisner if (v) 354b411b363SPhilipp Reisner vfree(ptr); 355b411b363SPhilipp Reisner else 356b411b363SPhilipp Reisner kfree(ptr); 357b411b363SPhilipp Reisner } 358b411b363SPhilipp Reisner 359b411b363SPhilipp Reisner /* 360b411b363SPhilipp Reisner * "have" and "want" are NUMBER OF PAGES. 361b411b363SPhilipp Reisner */ 362b411b363SPhilipp Reisner static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want) 363b411b363SPhilipp Reisner { 364b411b363SPhilipp Reisner struct page **old_pages = b->bm_pages; 365b411b363SPhilipp Reisner struct page **new_pages, *page; 366b411b363SPhilipp Reisner unsigned int i, bytes, vmalloced = 0; 367b411b363SPhilipp Reisner unsigned long have = b->bm_number_of_pages; 368b411b363SPhilipp Reisner 369b411b363SPhilipp Reisner BUG_ON(have == 0 && old_pages != NULL); 370b411b363SPhilipp Reisner BUG_ON(have != 0 && old_pages == NULL); 371b411b363SPhilipp Reisner 372b411b363SPhilipp Reisner if (have == want) 373b411b363SPhilipp Reisner return old_pages; 374b411b363SPhilipp Reisner 375b411b363SPhilipp Reisner /* Trying kmalloc first, falling back to vmalloc. 3760b143d43SLars Ellenberg * GFP_NOIO, as this is called while drbd IO is "suspended", 3770b143d43SLars Ellenberg * and during resize or attach on diskless Primary, 3780b143d43SLars Ellenberg * we must not block on IO to ourselves. 3790b143d43SLars Ellenberg * Context is receiver thread or cqueue thread/dmsetup. */ 380b411b363SPhilipp Reisner bytes = sizeof(struct page *)*want; 3810b143d43SLars Ellenberg new_pages = kzalloc(bytes, GFP_NOIO); 382b411b363SPhilipp Reisner if (!new_pages) { 3830b143d43SLars Ellenberg new_pages = __vmalloc(bytes, 3840b143d43SLars Ellenberg GFP_NOIO | __GFP_HIGHMEM | __GFP_ZERO, 3850b143d43SLars Ellenberg PAGE_KERNEL); 386b411b363SPhilipp Reisner if (!new_pages) 387b411b363SPhilipp Reisner return NULL; 388b411b363SPhilipp Reisner vmalloced = 1; 389b411b363SPhilipp Reisner } 390b411b363SPhilipp Reisner 391b411b363SPhilipp Reisner if (want >= have) { 392b411b363SPhilipp Reisner for (i = 0; i < have; i++) 393b411b363SPhilipp Reisner new_pages[i] = old_pages[i]; 394b411b363SPhilipp Reisner for (; i < want; i++) { 3950b143d43SLars Ellenberg page = alloc_page(GFP_NOIO | __GFP_HIGHMEM); 396b411b363SPhilipp Reisner if (!page) { 397b411b363SPhilipp Reisner bm_free_pages(new_pages + have, i - have); 398b411b363SPhilipp Reisner bm_vk_free(new_pages, vmalloced); 399b411b363SPhilipp Reisner return NULL; 400b411b363SPhilipp Reisner } 40119f843aaSLars Ellenberg /* we want to know which page it is 40219f843aaSLars Ellenberg * from the endio handlers */ 40319f843aaSLars Ellenberg bm_store_page_idx(page, i); 404b411b363SPhilipp Reisner new_pages[i] = page; 405b411b363SPhilipp Reisner } 406b411b363SPhilipp Reisner } else { 407b411b363SPhilipp Reisner for (i = 0; i < want; i++) 408b411b363SPhilipp Reisner new_pages[i] = old_pages[i]; 409b411b363SPhilipp Reisner /* NOT HERE, we are outside the spinlock! 410b411b363SPhilipp Reisner bm_free_pages(old_pages + want, have - want); 411b411b363SPhilipp Reisner */ 412b411b363SPhilipp Reisner } 413b411b363SPhilipp Reisner 414b411b363SPhilipp Reisner if (vmalloced) 41520ceb2b2SLars Ellenberg b->bm_flags |= BM_P_VMALLOCED; 416b411b363SPhilipp Reisner else 41720ceb2b2SLars Ellenberg b->bm_flags &= ~BM_P_VMALLOCED; 418b411b363SPhilipp Reisner 419b411b363SPhilipp Reisner return new_pages; 420b411b363SPhilipp Reisner } 421b411b363SPhilipp Reisner 422b411b363SPhilipp Reisner /* 423b411b363SPhilipp Reisner * called on driver init only. TODO call when a device is created. 424b411b363SPhilipp Reisner * allocates the drbd_bitmap, and stores it in mdev->bitmap. 425b411b363SPhilipp Reisner */ 426b411b363SPhilipp Reisner int drbd_bm_init(struct drbd_conf *mdev) 427b411b363SPhilipp Reisner { 428b411b363SPhilipp Reisner struct drbd_bitmap *b = mdev->bitmap; 429b411b363SPhilipp Reisner WARN_ON(b != NULL); 430b411b363SPhilipp Reisner b = kzalloc(sizeof(struct drbd_bitmap), GFP_KERNEL); 431b411b363SPhilipp Reisner if (!b) 432b411b363SPhilipp Reisner return -ENOMEM; 433b411b363SPhilipp Reisner spin_lock_init(&b->bm_lock); 4348a03ae2aSThomas Gleixner mutex_init(&b->bm_change); 435b411b363SPhilipp Reisner init_waitqueue_head(&b->bm_io_wait); 436b411b363SPhilipp Reisner 437b411b363SPhilipp Reisner mdev->bitmap = b; 438b411b363SPhilipp Reisner 439b411b363SPhilipp Reisner return 0; 440b411b363SPhilipp Reisner } 441b411b363SPhilipp Reisner 442b411b363SPhilipp Reisner sector_t drbd_bm_capacity(struct drbd_conf *mdev) 443b411b363SPhilipp Reisner { 444b411b363SPhilipp Reisner ERR_IF(!mdev->bitmap) return 0; 445b411b363SPhilipp Reisner return mdev->bitmap->bm_dev_capacity; 446b411b363SPhilipp Reisner } 447b411b363SPhilipp Reisner 448b411b363SPhilipp Reisner /* called on driver unload. TODO: call when a device is destroyed. 449b411b363SPhilipp Reisner */ 450b411b363SPhilipp Reisner void drbd_bm_cleanup(struct drbd_conf *mdev) 451b411b363SPhilipp Reisner { 452b411b363SPhilipp Reisner ERR_IF (!mdev->bitmap) return; 453b411b363SPhilipp Reisner bm_free_pages(mdev->bitmap->bm_pages, mdev->bitmap->bm_number_of_pages); 45420ceb2b2SLars Ellenberg bm_vk_free(mdev->bitmap->bm_pages, (BM_P_VMALLOCED & mdev->bitmap->bm_flags)); 455b411b363SPhilipp Reisner kfree(mdev->bitmap); 456b411b363SPhilipp Reisner mdev->bitmap = NULL; 457b411b363SPhilipp Reisner } 458b411b363SPhilipp Reisner 459b411b363SPhilipp Reisner /* 460b411b363SPhilipp Reisner * since (b->bm_bits % BITS_PER_LONG) != 0, 461b411b363SPhilipp Reisner * this masks out the remaining bits. 462b411b363SPhilipp Reisner * Returns the number of bits cleared. 463b411b363SPhilipp Reisner */ 46495a0f10cSLars Ellenberg #define BITS_PER_PAGE (1UL << (PAGE_SHIFT + 3)) 46595a0f10cSLars Ellenberg #define BITS_PER_PAGE_MASK (BITS_PER_PAGE - 1) 46695a0f10cSLars Ellenberg #define BITS_PER_LONG_MASK (BITS_PER_LONG - 1) 467b411b363SPhilipp Reisner static int bm_clear_surplus(struct drbd_bitmap *b) 468b411b363SPhilipp Reisner { 46995a0f10cSLars Ellenberg unsigned long mask; 470b411b363SPhilipp Reisner unsigned long *p_addr, *bm; 47195a0f10cSLars Ellenberg int tmp; 47295a0f10cSLars Ellenberg int cleared = 0; 473b411b363SPhilipp Reisner 47495a0f10cSLars Ellenberg /* number of bits modulo bits per page */ 47595a0f10cSLars Ellenberg tmp = (b->bm_bits & BITS_PER_PAGE_MASK); 47695a0f10cSLars Ellenberg /* mask the used bits of the word containing the last bit */ 47795a0f10cSLars Ellenberg mask = (1UL << (tmp & BITS_PER_LONG_MASK)) -1; 47895a0f10cSLars Ellenberg /* bitmap is always stored little endian, 47995a0f10cSLars Ellenberg * on disk and in core memory alike */ 48095a0f10cSLars Ellenberg mask = cpu_to_lel(mask); 48195a0f10cSLars Ellenberg 4826850c442SLars Ellenberg p_addr = bm_map_pidx(b, b->bm_number_of_pages - 1); 48395a0f10cSLars Ellenberg bm = p_addr + (tmp/BITS_PER_LONG); 48495a0f10cSLars Ellenberg if (mask) { 48595a0f10cSLars Ellenberg /* If mask != 0, we are not exactly aligned, so bm now points 48695a0f10cSLars Ellenberg * to the long containing the last bit. 48795a0f10cSLars Ellenberg * If mask == 0, bm already points to the word immediately 48895a0f10cSLars Ellenberg * after the last (long word aligned) bit. */ 489b411b363SPhilipp Reisner cleared = hweight_long(*bm & ~mask); 490b411b363SPhilipp Reisner *bm &= mask; 49195a0f10cSLars Ellenberg bm++; 492b411b363SPhilipp Reisner } 493b411b363SPhilipp Reisner 49495a0f10cSLars Ellenberg if (BITS_PER_LONG == 32 && ((bm - p_addr) & 1) == 1) { 49595a0f10cSLars Ellenberg /* on a 32bit arch, we may need to zero out 49695a0f10cSLars Ellenberg * a padding long to align with a 64bit remote */ 497b411b363SPhilipp Reisner cleared += hweight_long(*bm); 498b411b363SPhilipp Reisner *bm = 0; 499b411b363SPhilipp Reisner } 500b411b363SPhilipp Reisner bm_unmap(p_addr); 501b411b363SPhilipp Reisner return cleared; 502b411b363SPhilipp Reisner } 503b411b363SPhilipp Reisner 504b411b363SPhilipp Reisner static void bm_set_surplus(struct drbd_bitmap *b) 505b411b363SPhilipp Reisner { 50695a0f10cSLars Ellenberg unsigned long mask; 507b411b363SPhilipp Reisner unsigned long *p_addr, *bm; 50895a0f10cSLars Ellenberg int tmp; 509b411b363SPhilipp Reisner 51095a0f10cSLars Ellenberg /* number of bits modulo bits per page */ 51195a0f10cSLars Ellenberg tmp = (b->bm_bits & BITS_PER_PAGE_MASK); 51295a0f10cSLars Ellenberg /* mask the used bits of the word containing the last bit */ 51395a0f10cSLars Ellenberg mask = (1UL << (tmp & BITS_PER_LONG_MASK)) -1; 51495a0f10cSLars Ellenberg /* bitmap is always stored little endian, 51595a0f10cSLars Ellenberg * on disk and in core memory alike */ 51695a0f10cSLars Ellenberg mask = cpu_to_lel(mask); 51795a0f10cSLars Ellenberg 5186850c442SLars Ellenberg p_addr = bm_map_pidx(b, b->bm_number_of_pages - 1); 51995a0f10cSLars Ellenberg bm = p_addr + (tmp/BITS_PER_LONG); 52095a0f10cSLars Ellenberg if (mask) { 52195a0f10cSLars Ellenberg /* If mask != 0, we are not exactly aligned, so bm now points 52295a0f10cSLars Ellenberg * to the long containing the last bit. 52395a0f10cSLars Ellenberg * If mask == 0, bm already points to the word immediately 52495a0f10cSLars Ellenberg * after the last (long word aligned) bit. */ 525b411b363SPhilipp Reisner *bm |= ~mask; 52695a0f10cSLars Ellenberg bm++; 527b411b363SPhilipp Reisner } 528b411b363SPhilipp Reisner 52995a0f10cSLars Ellenberg if (BITS_PER_LONG == 32 && ((bm - p_addr) & 1) == 1) { 53095a0f10cSLars Ellenberg /* on a 32bit arch, we may need to zero out 53195a0f10cSLars Ellenberg * a padding long to align with a 64bit remote */ 53295a0f10cSLars Ellenberg *bm = ~0UL; 533b411b363SPhilipp Reisner } 534b411b363SPhilipp Reisner bm_unmap(p_addr); 535b411b363SPhilipp Reisner } 536b411b363SPhilipp Reisner 5374b0715f0SLars Ellenberg /* you better not modify the bitmap while this is running, 5384b0715f0SLars Ellenberg * or its results will be stale */ 53995a0f10cSLars Ellenberg static unsigned long bm_count_bits(struct drbd_bitmap *b) 540b411b363SPhilipp Reisner { 5414b0715f0SLars Ellenberg unsigned long *p_addr; 542b411b363SPhilipp Reisner unsigned long bits = 0; 5434b0715f0SLars Ellenberg unsigned long mask = (1UL << (b->bm_bits & BITS_PER_LONG_MASK)) -1; 5446850c442SLars Ellenberg int idx, i, last_word; 5457777a8baSLars Ellenberg 5464b0715f0SLars Ellenberg /* all but last page */ 5476850c442SLars Ellenberg for (idx = 0; idx < b->bm_number_of_pages - 1; idx++) { 548cfd8005cSCong Wang p_addr = __bm_map_pidx(b, idx); 5494b0715f0SLars Ellenberg for (i = 0; i < LWPP; i++) 5504b0715f0SLars Ellenberg bits += hweight_long(p_addr[i]); 551cfd8005cSCong Wang __bm_unmap(p_addr); 552b411b363SPhilipp Reisner cond_resched(); 553b411b363SPhilipp Reisner } 5544b0715f0SLars Ellenberg /* last (or only) page */ 5554b0715f0SLars Ellenberg last_word = ((b->bm_bits - 1) & BITS_PER_PAGE_MASK) >> LN2_BPL; 556589973a7SCong Wang p_addr = __bm_map_pidx(b, idx); 5574b0715f0SLars Ellenberg for (i = 0; i < last_word; i++) 5584b0715f0SLars Ellenberg bits += hweight_long(p_addr[i]); 5594b0715f0SLars Ellenberg p_addr[last_word] &= cpu_to_lel(mask); 5604b0715f0SLars Ellenberg bits += hweight_long(p_addr[last_word]); 5614b0715f0SLars Ellenberg /* 32bit arch, may have an unused padding long */ 5624b0715f0SLars Ellenberg if (BITS_PER_LONG == 32 && (last_word & 1) == 0) 5634b0715f0SLars Ellenberg p_addr[last_word+1] = 0; 564589973a7SCong Wang __bm_unmap(p_addr); 565b411b363SPhilipp Reisner return bits; 566b411b363SPhilipp Reisner } 567b411b363SPhilipp Reisner 568b411b363SPhilipp Reisner /* offset and len in long words.*/ 569b411b363SPhilipp Reisner static void bm_memset(struct drbd_bitmap *b, size_t offset, int c, size_t len) 570b411b363SPhilipp Reisner { 571b411b363SPhilipp Reisner unsigned long *p_addr, *bm; 57219f843aaSLars Ellenberg unsigned int idx; 573b411b363SPhilipp Reisner size_t do_now, end; 574b411b363SPhilipp Reisner 575b411b363SPhilipp Reisner end = offset + len; 576b411b363SPhilipp Reisner 577b411b363SPhilipp Reisner if (end > b->bm_words) { 578b411b363SPhilipp Reisner printk(KERN_ALERT "drbd: bm_memset end > bm_words\n"); 579b411b363SPhilipp Reisner return; 580b411b363SPhilipp Reisner } 581b411b363SPhilipp Reisner 582b411b363SPhilipp Reisner while (offset < end) { 583b411b363SPhilipp Reisner do_now = min_t(size_t, ALIGN(offset + 1, LWPP), end) - offset; 58419f843aaSLars Ellenberg idx = bm_word_to_page_idx(b, offset); 58519f843aaSLars Ellenberg p_addr = bm_map_pidx(b, idx); 586b411b363SPhilipp Reisner bm = p_addr + MLPP(offset); 587b411b363SPhilipp Reisner if (bm+do_now > p_addr + LWPP) { 588b411b363SPhilipp Reisner printk(KERN_ALERT "drbd: BUG BUG BUG! p_addr:%p bm:%p do_now:%d\n", 589b411b363SPhilipp Reisner p_addr, bm, (int)do_now); 59084e7c0f7SLars Ellenberg } else 591b411b363SPhilipp Reisner memset(bm, c, do_now * sizeof(long)); 592b411b363SPhilipp Reisner bm_unmap(p_addr); 59319f843aaSLars Ellenberg bm_set_page_need_writeout(b->bm_pages[idx]); 594b411b363SPhilipp Reisner offset += do_now; 595b411b363SPhilipp Reisner } 596b411b363SPhilipp Reisner } 597b411b363SPhilipp Reisner 598b411b363SPhilipp Reisner /* 599b411b363SPhilipp Reisner * make sure the bitmap has enough room for the attached storage, 600b411b363SPhilipp Reisner * if necessary, resize. 601b411b363SPhilipp Reisner * called whenever we may have changed the device size. 602b411b363SPhilipp Reisner * returns -ENOMEM if we could not allocate enough memory, 0 on success. 603b411b363SPhilipp Reisner * In case this is actually a resize, we copy the old bitmap into the new one. 604b411b363SPhilipp Reisner * Otherwise, the bitmap is initialized to all bits set. 605b411b363SPhilipp Reisner */ 60602d9a94bSPhilipp Reisner int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits) 607b411b363SPhilipp Reisner { 608b411b363SPhilipp Reisner struct drbd_bitmap *b = mdev->bitmap; 6096850c442SLars Ellenberg unsigned long bits, words, owords, obits; 610b411b363SPhilipp Reisner unsigned long want, have, onpages; /* number of pages */ 611b411b363SPhilipp Reisner struct page **npages, **opages = NULL; 612b411b363SPhilipp Reisner int err = 0, growing; 613b411b363SPhilipp Reisner int opages_vmalloced; 614b411b363SPhilipp Reisner 615b411b363SPhilipp Reisner ERR_IF(!b) return -ENOMEM; 616b411b363SPhilipp Reisner 61720ceb2b2SLars Ellenberg drbd_bm_lock(mdev, "resize", BM_LOCKED_MASK); 618b411b363SPhilipp Reisner 619b411b363SPhilipp Reisner dev_info(DEV, "drbd_bm_resize called with capacity == %llu\n", 620b411b363SPhilipp Reisner (unsigned long long)capacity); 621b411b363SPhilipp Reisner 622b411b363SPhilipp Reisner if (capacity == b->bm_dev_capacity) 623b411b363SPhilipp Reisner goto out; 624b411b363SPhilipp Reisner 62520ceb2b2SLars Ellenberg opages_vmalloced = (BM_P_VMALLOCED & b->bm_flags); 626b411b363SPhilipp Reisner 627b411b363SPhilipp Reisner if (capacity == 0) { 628b411b363SPhilipp Reisner spin_lock_irq(&b->bm_lock); 629b411b363SPhilipp Reisner opages = b->bm_pages; 630b411b363SPhilipp Reisner onpages = b->bm_number_of_pages; 631b411b363SPhilipp Reisner owords = b->bm_words; 632b411b363SPhilipp Reisner b->bm_pages = NULL; 633b411b363SPhilipp Reisner b->bm_number_of_pages = 634b411b363SPhilipp Reisner b->bm_set = 635b411b363SPhilipp Reisner b->bm_bits = 636b411b363SPhilipp Reisner b->bm_words = 637b411b363SPhilipp Reisner b->bm_dev_capacity = 0; 638b411b363SPhilipp Reisner spin_unlock_irq(&b->bm_lock); 639b411b363SPhilipp Reisner bm_free_pages(opages, onpages); 640b411b363SPhilipp Reisner bm_vk_free(opages, opages_vmalloced); 641b411b363SPhilipp Reisner goto out; 642b411b363SPhilipp Reisner } 643b411b363SPhilipp Reisner bits = BM_SECT_TO_BIT(ALIGN(capacity, BM_SECT_PER_BIT)); 644b411b363SPhilipp Reisner 645b411b363SPhilipp Reisner /* if we would use 646b411b363SPhilipp Reisner words = ALIGN(bits,BITS_PER_LONG) >> LN2_BPL; 647b411b363SPhilipp Reisner a 32bit host could present the wrong number of words 648b411b363SPhilipp Reisner to a 64bit host. 649b411b363SPhilipp Reisner */ 650b411b363SPhilipp Reisner words = ALIGN(bits, 64) >> LN2_BPL; 651b411b363SPhilipp Reisner 652b411b363SPhilipp Reisner if (get_ldev(mdev)) { 6534b0715f0SLars Ellenberg u64 bits_on_disk = ((u64)mdev->ldev->md.md_size_sect-MD_BM_OFFSET) << 12; 654b411b363SPhilipp Reisner put_ldev(mdev); 6554b0715f0SLars Ellenberg if (bits > bits_on_disk) { 6564b0715f0SLars Ellenberg dev_info(DEV, "bits = %lu\n", bits); 6574b0715f0SLars Ellenberg dev_info(DEV, "bits_on_disk = %llu\n", bits_on_disk); 6584b0715f0SLars Ellenberg err = -ENOSPC; 6594b0715f0SLars Ellenberg goto out; 6604b0715f0SLars Ellenberg } 661b411b363SPhilipp Reisner } 662b411b363SPhilipp Reisner 6636850c442SLars Ellenberg want = ALIGN(words*sizeof(long), PAGE_SIZE) >> PAGE_SHIFT; 664b411b363SPhilipp Reisner have = b->bm_number_of_pages; 665b411b363SPhilipp Reisner if (want == have) { 666b411b363SPhilipp Reisner D_ASSERT(b->bm_pages != NULL); 667b411b363SPhilipp Reisner npages = b->bm_pages; 668b411b363SPhilipp Reisner } else { 6690cf9d27eSAndreas Gruenbacher if (drbd_insert_fault(mdev, DRBD_FAULT_BM_ALLOC)) 670b411b363SPhilipp Reisner npages = NULL; 671b411b363SPhilipp Reisner else 672b411b363SPhilipp Reisner npages = bm_realloc_pages(b, want); 673b411b363SPhilipp Reisner } 674b411b363SPhilipp Reisner 675b411b363SPhilipp Reisner if (!npages) { 676b411b363SPhilipp Reisner err = -ENOMEM; 677b411b363SPhilipp Reisner goto out; 678b411b363SPhilipp Reisner } 679b411b363SPhilipp Reisner 680b411b363SPhilipp Reisner spin_lock_irq(&b->bm_lock); 681b411b363SPhilipp Reisner opages = b->bm_pages; 682b411b363SPhilipp Reisner owords = b->bm_words; 683b411b363SPhilipp Reisner obits = b->bm_bits; 684b411b363SPhilipp Reisner 685b411b363SPhilipp Reisner growing = bits > obits; 6865223671bSPhilipp Reisner if (opages && growing && set_new_bits) 687b411b363SPhilipp Reisner bm_set_surplus(b); 688b411b363SPhilipp Reisner 689b411b363SPhilipp Reisner b->bm_pages = npages; 690b411b363SPhilipp Reisner b->bm_number_of_pages = want; 691b411b363SPhilipp Reisner b->bm_bits = bits; 692b411b363SPhilipp Reisner b->bm_words = words; 693b411b363SPhilipp Reisner b->bm_dev_capacity = capacity; 694b411b363SPhilipp Reisner 695b411b363SPhilipp Reisner if (growing) { 69602d9a94bSPhilipp Reisner if (set_new_bits) { 697b411b363SPhilipp Reisner bm_memset(b, owords, 0xff, words-owords); 698b411b363SPhilipp Reisner b->bm_set += bits - obits; 69902d9a94bSPhilipp Reisner } else 70002d9a94bSPhilipp Reisner bm_memset(b, owords, 0x00, words-owords); 70102d9a94bSPhilipp Reisner 702b411b363SPhilipp Reisner } 703b411b363SPhilipp Reisner 704b411b363SPhilipp Reisner if (want < have) { 705b411b363SPhilipp Reisner /* implicit: (opages != NULL) && (opages != npages) */ 706b411b363SPhilipp Reisner bm_free_pages(opages + want, have - want); 707b411b363SPhilipp Reisner } 708b411b363SPhilipp Reisner 709b411b363SPhilipp Reisner (void)bm_clear_surplus(b); 710b411b363SPhilipp Reisner 711b411b363SPhilipp Reisner spin_unlock_irq(&b->bm_lock); 712b411b363SPhilipp Reisner if (opages != npages) 713b411b363SPhilipp Reisner bm_vk_free(opages, opages_vmalloced); 714b411b363SPhilipp Reisner if (!growing) 715b411b363SPhilipp Reisner b->bm_set = bm_count_bits(b); 71619f843aaSLars Ellenberg dev_info(DEV, "resync bitmap: bits=%lu words=%lu pages=%lu\n", bits, words, want); 717b411b363SPhilipp Reisner 718b411b363SPhilipp Reisner out: 719b411b363SPhilipp Reisner drbd_bm_unlock(mdev); 720b411b363SPhilipp Reisner return err; 721b411b363SPhilipp Reisner } 722b411b363SPhilipp Reisner 723b411b363SPhilipp Reisner /* inherently racy: 724b411b363SPhilipp Reisner * if not protected by other means, return value may be out of date when 725b411b363SPhilipp Reisner * leaving this function... 726b411b363SPhilipp Reisner * we still need to lock it, since it is important that this returns 727b411b363SPhilipp Reisner * bm_set == 0 precisely. 728b411b363SPhilipp Reisner * 729b411b363SPhilipp Reisner * maybe bm_set should be atomic_t ? 730b411b363SPhilipp Reisner */ 7310778286aSPhilipp Reisner unsigned long _drbd_bm_total_weight(struct drbd_conf *mdev) 732b411b363SPhilipp Reisner { 733b411b363SPhilipp Reisner struct drbd_bitmap *b = mdev->bitmap; 734b411b363SPhilipp Reisner unsigned long s; 735b411b363SPhilipp Reisner unsigned long flags; 736b411b363SPhilipp Reisner 737b411b363SPhilipp Reisner ERR_IF(!b) return 0; 738b411b363SPhilipp Reisner ERR_IF(!b->bm_pages) return 0; 739b411b363SPhilipp Reisner 740b411b363SPhilipp Reisner spin_lock_irqsave(&b->bm_lock, flags); 741b411b363SPhilipp Reisner s = b->bm_set; 742b411b363SPhilipp Reisner spin_unlock_irqrestore(&b->bm_lock, flags); 743b411b363SPhilipp Reisner 744b411b363SPhilipp Reisner return s; 745b411b363SPhilipp Reisner } 746b411b363SPhilipp Reisner 747b411b363SPhilipp Reisner unsigned long drbd_bm_total_weight(struct drbd_conf *mdev) 748b411b363SPhilipp Reisner { 749b411b363SPhilipp Reisner unsigned long s; 750b411b363SPhilipp Reisner /* if I don't have a disk, I don't know about out-of-sync status */ 751b411b363SPhilipp Reisner if (!get_ldev_if_state(mdev, D_NEGOTIATING)) 752b411b363SPhilipp Reisner return 0; 753b411b363SPhilipp Reisner s = _drbd_bm_total_weight(mdev); 754b411b363SPhilipp Reisner put_ldev(mdev); 755b411b363SPhilipp Reisner return s; 756b411b363SPhilipp Reisner } 757b411b363SPhilipp Reisner 758b411b363SPhilipp Reisner size_t drbd_bm_words(struct drbd_conf *mdev) 759b411b363SPhilipp Reisner { 760b411b363SPhilipp Reisner struct drbd_bitmap *b = mdev->bitmap; 761b411b363SPhilipp Reisner ERR_IF(!b) return 0; 762b411b363SPhilipp Reisner ERR_IF(!b->bm_pages) return 0; 763b411b363SPhilipp Reisner 764b411b363SPhilipp Reisner return b->bm_words; 765b411b363SPhilipp Reisner } 766b411b363SPhilipp Reisner 767b411b363SPhilipp Reisner unsigned long drbd_bm_bits(struct drbd_conf *mdev) 768b411b363SPhilipp Reisner { 769b411b363SPhilipp Reisner struct drbd_bitmap *b = mdev->bitmap; 770b411b363SPhilipp Reisner ERR_IF(!b) return 0; 771b411b363SPhilipp Reisner 772b411b363SPhilipp Reisner return b->bm_bits; 773b411b363SPhilipp Reisner } 774b411b363SPhilipp Reisner 775b411b363SPhilipp Reisner /* merge number words from buffer into the bitmap starting at offset. 776b411b363SPhilipp Reisner * buffer[i] is expected to be little endian unsigned long. 777b411b363SPhilipp Reisner * bitmap must be locked by drbd_bm_lock. 778b411b363SPhilipp Reisner * currently only used from receive_bitmap. 779b411b363SPhilipp Reisner */ 780b411b363SPhilipp Reisner void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset, size_t number, 781b411b363SPhilipp Reisner unsigned long *buffer) 782b411b363SPhilipp Reisner { 783b411b363SPhilipp Reisner struct drbd_bitmap *b = mdev->bitmap; 784b411b363SPhilipp Reisner unsigned long *p_addr, *bm; 785b411b363SPhilipp Reisner unsigned long word, bits; 78619f843aaSLars Ellenberg unsigned int idx; 787b411b363SPhilipp Reisner size_t end, do_now; 788b411b363SPhilipp Reisner 789b411b363SPhilipp Reisner end = offset + number; 790b411b363SPhilipp Reisner 791b411b363SPhilipp Reisner ERR_IF(!b) return; 792b411b363SPhilipp Reisner ERR_IF(!b->bm_pages) return; 793b411b363SPhilipp Reisner if (number == 0) 794b411b363SPhilipp Reisner return; 795b411b363SPhilipp Reisner WARN_ON(offset >= b->bm_words); 796b411b363SPhilipp Reisner WARN_ON(end > b->bm_words); 797b411b363SPhilipp Reisner 798b411b363SPhilipp Reisner spin_lock_irq(&b->bm_lock); 799b411b363SPhilipp Reisner while (offset < end) { 800b411b363SPhilipp Reisner do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset; 80119f843aaSLars Ellenberg idx = bm_word_to_page_idx(b, offset); 80219f843aaSLars Ellenberg p_addr = bm_map_pidx(b, idx); 803b411b363SPhilipp Reisner bm = p_addr + MLPP(offset); 804b411b363SPhilipp Reisner offset += do_now; 805b411b363SPhilipp Reisner while (do_now--) { 806b411b363SPhilipp Reisner bits = hweight_long(*bm); 80795a0f10cSLars Ellenberg word = *bm | *buffer++; 808b411b363SPhilipp Reisner *bm++ = word; 809b411b363SPhilipp Reisner b->bm_set += hweight_long(word) - bits; 810b411b363SPhilipp Reisner } 811b411b363SPhilipp Reisner bm_unmap(p_addr); 81219f843aaSLars Ellenberg bm_set_page_need_writeout(b->bm_pages[idx]); 813b411b363SPhilipp Reisner } 814b411b363SPhilipp Reisner /* with 32bit <-> 64bit cross-platform connect 815b411b363SPhilipp Reisner * this is only correct for current usage, 816b411b363SPhilipp Reisner * where we _know_ that we are 64 bit aligned, 817b411b363SPhilipp Reisner * and know that this function is used in this way, too... 818b411b363SPhilipp Reisner */ 819b411b363SPhilipp Reisner if (end == b->bm_words) 820b411b363SPhilipp Reisner b->bm_set -= bm_clear_surplus(b); 821b411b363SPhilipp Reisner spin_unlock_irq(&b->bm_lock); 822b411b363SPhilipp Reisner } 823b411b363SPhilipp Reisner 824b411b363SPhilipp Reisner /* copy number words from the bitmap starting at offset into the buffer. 825b411b363SPhilipp Reisner * buffer[i] will be little endian unsigned long. 826b411b363SPhilipp Reisner */ 827b411b363SPhilipp Reisner void drbd_bm_get_lel(struct drbd_conf *mdev, size_t offset, size_t number, 828b411b363SPhilipp Reisner unsigned long *buffer) 829b411b363SPhilipp Reisner { 830b411b363SPhilipp Reisner struct drbd_bitmap *b = mdev->bitmap; 831b411b363SPhilipp Reisner unsigned long *p_addr, *bm; 832b411b363SPhilipp Reisner size_t end, do_now; 833b411b363SPhilipp Reisner 834b411b363SPhilipp Reisner end = offset + number; 835b411b363SPhilipp Reisner 836b411b363SPhilipp Reisner ERR_IF(!b) return; 837b411b363SPhilipp Reisner ERR_IF(!b->bm_pages) return; 838b411b363SPhilipp Reisner 839b411b363SPhilipp Reisner spin_lock_irq(&b->bm_lock); 840b411b363SPhilipp Reisner if ((offset >= b->bm_words) || 841b411b363SPhilipp Reisner (end > b->bm_words) || 842b411b363SPhilipp Reisner (number <= 0)) 843b411b363SPhilipp Reisner dev_err(DEV, "offset=%lu number=%lu bm_words=%lu\n", 844b411b363SPhilipp Reisner (unsigned long) offset, 845b411b363SPhilipp Reisner (unsigned long) number, 846b411b363SPhilipp Reisner (unsigned long) b->bm_words); 847b411b363SPhilipp Reisner else { 848b411b363SPhilipp Reisner while (offset < end) { 849b411b363SPhilipp Reisner do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset; 85019f843aaSLars Ellenberg p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, offset)); 851b411b363SPhilipp Reisner bm = p_addr + MLPP(offset); 852b411b363SPhilipp Reisner offset += do_now; 853b411b363SPhilipp Reisner while (do_now--) 85495a0f10cSLars Ellenberg *buffer++ = *bm++; 855b411b363SPhilipp Reisner bm_unmap(p_addr); 856b411b363SPhilipp Reisner } 857b411b363SPhilipp Reisner } 858b411b363SPhilipp Reisner spin_unlock_irq(&b->bm_lock); 859b411b363SPhilipp Reisner } 860b411b363SPhilipp Reisner 861b411b363SPhilipp Reisner /* set all bits in the bitmap */ 862b411b363SPhilipp Reisner void drbd_bm_set_all(struct drbd_conf *mdev) 863b411b363SPhilipp Reisner { 864b411b363SPhilipp Reisner struct drbd_bitmap *b = mdev->bitmap; 865b411b363SPhilipp Reisner ERR_IF(!b) return; 866b411b363SPhilipp Reisner ERR_IF(!b->bm_pages) return; 867b411b363SPhilipp Reisner 868b411b363SPhilipp Reisner spin_lock_irq(&b->bm_lock); 869b411b363SPhilipp Reisner bm_memset(b, 0, 0xff, b->bm_words); 870b411b363SPhilipp Reisner (void)bm_clear_surplus(b); 871b411b363SPhilipp Reisner b->bm_set = b->bm_bits; 872b411b363SPhilipp Reisner spin_unlock_irq(&b->bm_lock); 873b411b363SPhilipp Reisner } 874b411b363SPhilipp Reisner 875b411b363SPhilipp Reisner /* clear all bits in the bitmap */ 876b411b363SPhilipp Reisner void drbd_bm_clear_all(struct drbd_conf *mdev) 877b411b363SPhilipp Reisner { 878b411b363SPhilipp Reisner struct drbd_bitmap *b = mdev->bitmap; 879b411b363SPhilipp Reisner ERR_IF(!b) return; 880b411b363SPhilipp Reisner ERR_IF(!b->bm_pages) return; 881b411b363SPhilipp Reisner 882b411b363SPhilipp Reisner spin_lock_irq(&b->bm_lock); 883b411b363SPhilipp Reisner bm_memset(b, 0, 0, b->bm_words); 884b411b363SPhilipp Reisner b->bm_set = 0; 885b411b363SPhilipp Reisner spin_unlock_irq(&b->bm_lock); 886b411b363SPhilipp Reisner } 887b411b363SPhilipp Reisner 88819f843aaSLars Ellenberg struct bm_aio_ctx { 88919f843aaSLars Ellenberg struct drbd_conf *mdev; 89019f843aaSLars Ellenberg atomic_t in_flight; 8919e58c4daSPhilipp Reisner unsigned int done; 89219f843aaSLars Ellenberg unsigned flags; 89319f843aaSLars Ellenberg #define BM_AIO_COPY_PAGES 1 894d1aa4d04SPhilipp Reisner #define BM_WRITE_ALL_PAGES 2 89519f843aaSLars Ellenberg int error; 896d1f3779bSPhilipp Reisner struct kref kref; 89719f843aaSLars Ellenberg }; 89819f843aaSLars Ellenberg 899d1f3779bSPhilipp Reisner static void bm_aio_ctx_destroy(struct kref *kref) 900d1f3779bSPhilipp Reisner { 901d1f3779bSPhilipp Reisner struct bm_aio_ctx *ctx = container_of(kref, struct bm_aio_ctx, kref); 902d1f3779bSPhilipp Reisner 9039e58c4daSPhilipp Reisner put_ldev(ctx->mdev); 904d1f3779bSPhilipp Reisner kfree(ctx); 905d1f3779bSPhilipp Reisner } 906d1f3779bSPhilipp Reisner 90719f843aaSLars Ellenberg /* bv_page may be a copy, or may be the original */ 908b411b363SPhilipp Reisner static void bm_async_io_complete(struct bio *bio, int error) 909b411b363SPhilipp Reisner { 91019f843aaSLars Ellenberg struct bm_aio_ctx *ctx = bio->bi_private; 91119f843aaSLars Ellenberg struct drbd_conf *mdev = ctx->mdev; 91219f843aaSLars Ellenberg struct drbd_bitmap *b = mdev->bitmap; 91319f843aaSLars Ellenberg unsigned int idx = bm_page_to_idx(bio->bi_io_vec[0].bv_page); 914b411b363SPhilipp Reisner int uptodate = bio_flagged(bio, BIO_UPTODATE); 915b411b363SPhilipp Reisner 916b411b363SPhilipp Reisner 917b411b363SPhilipp Reisner /* strange behavior of some lower level drivers... 918b411b363SPhilipp Reisner * fail the request by clearing the uptodate flag, 919b411b363SPhilipp Reisner * but do not return any error?! 920b411b363SPhilipp Reisner * do we want to WARN() on this? */ 921b411b363SPhilipp Reisner if (!error && !uptodate) 922b411b363SPhilipp Reisner error = -EIO; 923b411b363SPhilipp Reisner 9247648cdfeSLars Ellenberg if ((ctx->flags & BM_AIO_COPY_PAGES) == 0 && 9257648cdfeSLars Ellenberg !bm_test_page_unchanged(b->bm_pages[idx])) 9267648cdfeSLars Ellenberg dev_warn(DEV, "bitmap page idx %u changed during IO!\n", idx); 92719f843aaSLars Ellenberg 928b411b363SPhilipp Reisner if (error) { 92919f843aaSLars Ellenberg /* ctx error will hold the completed-last non-zero error code, 93019f843aaSLars Ellenberg * in case error codes differ. */ 93119f843aaSLars Ellenberg ctx->error = error; 93219f843aaSLars Ellenberg bm_set_page_io_err(b->bm_pages[idx]); 93319f843aaSLars Ellenberg /* Not identical to on disk version of it. 93419f843aaSLars Ellenberg * Is BM_PAGE_IO_ERROR enough? */ 93519f843aaSLars Ellenberg if (__ratelimit(&drbd_ratelimit_state)) 93619f843aaSLars Ellenberg dev_err(DEV, "IO ERROR %d on bitmap page idx %u\n", 93719f843aaSLars Ellenberg error, idx); 93819f843aaSLars Ellenberg } else { 93919f843aaSLars Ellenberg bm_clear_page_io_err(b->bm_pages[idx]); 94019f843aaSLars Ellenberg dynamic_dev_dbg(DEV, "bitmap page idx %u completed\n", idx); 941b411b363SPhilipp Reisner } 94219f843aaSLars Ellenberg 94319f843aaSLars Ellenberg bm_page_unlock_io(mdev, idx); 94419f843aaSLars Ellenberg 94519f843aaSLars Ellenberg if (ctx->flags & BM_AIO_COPY_PAGES) 9464d95a10fSLars Ellenberg mempool_free(bio->bi_io_vec[0].bv_page, drbd_md_io_page_pool); 947b411b363SPhilipp Reisner 948b411b363SPhilipp Reisner bio_put(bio); 94919f843aaSLars Ellenberg 950d1f3779bSPhilipp Reisner if (atomic_dec_and_test(&ctx->in_flight)) { 9519e58c4daSPhilipp Reisner ctx->done = 1; 9529e58c4daSPhilipp Reisner wake_up(&mdev->misc_wait); 953d1f3779bSPhilipp Reisner kref_put(&ctx->kref, &bm_aio_ctx_destroy); 954d1f3779bSPhilipp Reisner } 955b411b363SPhilipp Reisner } 956b411b363SPhilipp Reisner 95719f843aaSLars Ellenberg static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must_hold(local) 958b411b363SPhilipp Reisner { 9599476f39dSLars Ellenberg struct bio *bio = bio_alloc_drbd(GFP_NOIO); 96019f843aaSLars Ellenberg struct drbd_conf *mdev = ctx->mdev; 96119f843aaSLars Ellenberg struct drbd_bitmap *b = mdev->bitmap; 96219f843aaSLars Ellenberg struct page *page; 963b411b363SPhilipp Reisner unsigned int len; 96419f843aaSLars Ellenberg 965b411b363SPhilipp Reisner sector_t on_disk_sector = 966b411b363SPhilipp Reisner mdev->ldev->md.md_offset + mdev->ldev->md.bm_offset; 967b411b363SPhilipp Reisner on_disk_sector += ((sector_t)page_nr) << (PAGE_SHIFT-9); 968b411b363SPhilipp Reisner 969b411b363SPhilipp Reisner /* this might happen with very small 97019f843aaSLars Ellenberg * flexible external meta data device, 97119f843aaSLars Ellenberg * or with PAGE_SIZE > 4k */ 972b411b363SPhilipp Reisner len = min_t(unsigned int, PAGE_SIZE, 973b411b363SPhilipp Reisner (drbd_md_last_sector(mdev->ldev) - on_disk_sector + 1)<<9); 974b411b363SPhilipp Reisner 97519f843aaSLars Ellenberg /* serialize IO on this page */ 97619f843aaSLars Ellenberg bm_page_lock_io(mdev, page_nr); 97719f843aaSLars Ellenberg /* before memcpy and submit, 97819f843aaSLars Ellenberg * so it can be redirtied any time */ 97919f843aaSLars Ellenberg bm_set_page_unchanged(b->bm_pages[page_nr]); 98019f843aaSLars Ellenberg 98119f843aaSLars Ellenberg if (ctx->flags & BM_AIO_COPY_PAGES) { 98219f843aaSLars Ellenberg void *src, *dest; 9834d95a10fSLars Ellenberg page = mempool_alloc(drbd_md_io_page_pool, __GFP_HIGHMEM|__GFP_WAIT); 984589973a7SCong Wang dest = kmap_atomic(page); 985589973a7SCong Wang src = kmap_atomic(b->bm_pages[page_nr]); 98619f843aaSLars Ellenberg memcpy(dest, src, PAGE_SIZE); 987589973a7SCong Wang kunmap_atomic(src); 988589973a7SCong Wang kunmap_atomic(dest); 98919f843aaSLars Ellenberg bm_store_page_idx(page, page_nr); 99019f843aaSLars Ellenberg } else 99119f843aaSLars Ellenberg page = b->bm_pages[page_nr]; 99219f843aaSLars Ellenberg 993b411b363SPhilipp Reisner bio->bi_bdev = mdev->ldev->md_bdev; 994b411b363SPhilipp Reisner bio->bi_sector = on_disk_sector; 9954d95a10fSLars Ellenberg /* bio_add_page of a single page to an empty bio will always succeed, 9964d95a10fSLars Ellenberg * according to api. Do we want to assert that? */ 99719f843aaSLars Ellenberg bio_add_page(bio, page, len, 0); 99819f843aaSLars Ellenberg bio->bi_private = ctx; 999b411b363SPhilipp Reisner bio->bi_end_io = bm_async_io_complete; 1000b411b363SPhilipp Reisner 10010cf9d27eSAndreas Gruenbacher if (drbd_insert_fault(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) { 1002b411b363SPhilipp Reisner bio->bi_rw |= rw; 1003b411b363SPhilipp Reisner bio_endio(bio, -EIO); 1004b411b363SPhilipp Reisner } else { 1005b411b363SPhilipp Reisner submit_bio(rw, bio); 10065a8b4242SLars Ellenberg /* this should not count as user activity and cause the 10075a8b4242SLars Ellenberg * resync to throttle -- see drbd_rs_should_slow_down(). */ 10085a8b4242SLars Ellenberg atomic_add(len >> 9, &mdev->rs_sect_ev); 1009b411b363SPhilipp Reisner } 1010b411b363SPhilipp Reisner } 1011b411b363SPhilipp Reisner 1012b411b363SPhilipp Reisner /* 1013b411b363SPhilipp Reisner * bm_rw: read/write the whole bitmap from/to its on disk location. 1014b411b363SPhilipp Reisner */ 10150e8488adSLars Ellenberg static int bm_rw(struct drbd_conf *mdev, int rw, unsigned flags, unsigned lazy_writeout_upper_idx) __must_hold(local) 1016b411b363SPhilipp Reisner { 1017d1f3779bSPhilipp Reisner struct bm_aio_ctx *ctx; 1018b411b363SPhilipp Reisner struct drbd_bitmap *b = mdev->bitmap; 10196850c442SLars Ellenberg int num_pages, i, count = 0; 1020b411b363SPhilipp Reisner unsigned long now; 1021b411b363SPhilipp Reisner char ppb[10]; 1022b411b363SPhilipp Reisner int err = 0; 1023b411b363SPhilipp Reisner 102419f843aaSLars Ellenberg /* 102519f843aaSLars Ellenberg * We are protected against bitmap disappearing/resizing by holding an 102619f843aaSLars Ellenberg * ldev reference (caller must have called get_ldev()). 102719f843aaSLars Ellenberg * For read/write, we are protected against changes to the bitmap by 102819f843aaSLars Ellenberg * the bitmap lock (see drbd_bitmap_io). 102919f843aaSLars Ellenberg * For lazy writeout, we don't care for ongoing changes to the bitmap, 103019f843aaSLars Ellenberg * as we submit copies of pages anyways. 103119f843aaSLars Ellenberg */ 1032d1f3779bSPhilipp Reisner 103322f46ce2SLars Ellenberg ctx = kmalloc(sizeof(struct bm_aio_ctx), GFP_NOIO); 1034d1f3779bSPhilipp Reisner if (!ctx) 1035d1f3779bSPhilipp Reisner return -ENOMEM; 1036d1f3779bSPhilipp Reisner 1037d1f3779bSPhilipp Reisner *ctx = (struct bm_aio_ctx) { 1038d1f3779bSPhilipp Reisner .mdev = mdev, 1039d1f3779bSPhilipp Reisner .in_flight = ATOMIC_INIT(1), 10409e58c4daSPhilipp Reisner .done = 0, 10410e8488adSLars Ellenberg .flags = flags, 1042d1f3779bSPhilipp Reisner .error = 0, 1043d1f3779bSPhilipp Reisner .kref = { ATOMIC_INIT(2) }, 1044d1f3779bSPhilipp Reisner }; 1045d1f3779bSPhilipp Reisner 10469e58c4daSPhilipp Reisner if (!get_ldev_if_state(mdev, D_ATTACHING)) { /* put is in bm_aio_ctx_destroy() */ 10479e58c4daSPhilipp Reisner dev_err(DEV, "ASSERT FAILED: get_ldev_if_state() == 1 in bm_rw()\n"); 10489e58c4daSPhilipp Reisner kfree(ctx); 10499e58c4daSPhilipp Reisner return -ENODEV; 10509e58c4daSPhilipp Reisner } 10519e58c4daSPhilipp Reisner 1052d1f3779bSPhilipp Reisner if (!ctx->flags) 105320ceb2b2SLars Ellenberg WARN_ON(!(BM_LOCKED_MASK & b->bm_flags)); 1054b411b363SPhilipp Reisner 10556850c442SLars Ellenberg num_pages = b->bm_number_of_pages; 1056b411b363SPhilipp Reisner 1057b411b363SPhilipp Reisner now = jiffies; 1058b411b363SPhilipp Reisner 1059b411b363SPhilipp Reisner /* let the layers below us try to merge these bios... */ 10606850c442SLars Ellenberg for (i = 0; i < num_pages; i++) { 106119f843aaSLars Ellenberg /* ignore completely unchanged pages */ 106219f843aaSLars Ellenberg if (lazy_writeout_upper_idx && i == lazy_writeout_upper_idx) 106319f843aaSLars Ellenberg break; 106419f843aaSLars Ellenberg if (rw & WRITE) { 1065d1aa4d04SPhilipp Reisner if (!(flags & BM_WRITE_ALL_PAGES) && 1066d1aa4d04SPhilipp Reisner bm_test_page_unchanged(b->bm_pages[i])) { 106719f843aaSLars Ellenberg dynamic_dev_dbg(DEV, "skipped bm write for idx %u\n", i); 106819f843aaSLars Ellenberg continue; 106919f843aaSLars Ellenberg } 107019f843aaSLars Ellenberg /* during lazy writeout, 107119f843aaSLars Ellenberg * ignore those pages not marked for lazy writeout. */ 107219f843aaSLars Ellenberg if (lazy_writeout_upper_idx && 107319f843aaSLars Ellenberg !bm_test_page_lazy_writeout(b->bm_pages[i])) { 107419f843aaSLars Ellenberg dynamic_dev_dbg(DEV, "skipped bm lazy write for idx %u\n", i); 107519f843aaSLars Ellenberg continue; 107619f843aaSLars Ellenberg } 107719f843aaSLars Ellenberg } 1078d1f3779bSPhilipp Reisner atomic_inc(&ctx->in_flight); 1079d1f3779bSPhilipp Reisner bm_page_io_async(ctx, i, rw); 108019f843aaSLars Ellenberg ++count; 108119f843aaSLars Ellenberg cond_resched(); 108219f843aaSLars Ellenberg } 1083b411b363SPhilipp Reisner 1084725a97e4SLars Ellenberg /* 1085d1f3779bSPhilipp Reisner * We initialize ctx->in_flight to one to make sure bm_async_io_complete 10869e58c4daSPhilipp Reisner * will not set ctx->done early, and decrement / test it here. If there 1087725a97e4SLars Ellenberg * are still some bios in flight, we need to wait for them here. 10889e58c4daSPhilipp Reisner * If all IO is done already (or nothing had been submitted), there is 10899e58c4daSPhilipp Reisner * no need to wait. Still, we need to put the kref associated with the 10909e58c4daSPhilipp Reisner * "in_flight reached zero, all done" event. 1091725a97e4SLars Ellenberg */ 1092d1f3779bSPhilipp Reisner if (!atomic_dec_and_test(&ctx->in_flight)) 1093*44edfb0dSLars Ellenberg wait_until_done_or_force_detached(mdev, mdev->ldev, &ctx->done); 10949e58c4daSPhilipp Reisner else 10959e58c4daSPhilipp Reisner kref_put(&ctx->kref, &bm_aio_ctx_destroy); 1096d1f3779bSPhilipp Reisner 109719f843aaSLars Ellenberg dev_info(DEV, "bitmap %s of %u pages took %lu jiffies\n", 109819f843aaSLars Ellenberg rw == WRITE ? "WRITE" : "READ", 109919f843aaSLars Ellenberg count, jiffies - now); 1100b411b363SPhilipp Reisner 1101d1f3779bSPhilipp Reisner if (ctx->error) { 1102b411b363SPhilipp Reisner dev_alert(DEV, "we had at least one MD IO ERROR during bitmap IO\n"); 1103383606e0SLars Ellenberg drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR); 1104d1f3779bSPhilipp Reisner err = -EIO; /* ctx->error ? */ 1105b411b363SPhilipp Reisner } 1106b411b363SPhilipp Reisner 11079e58c4daSPhilipp Reisner if (atomic_read(&ctx->in_flight)) 1108*44edfb0dSLars Ellenberg err = -EIO; /* Disk timeout/force-detach during IO... */ 11099e58c4daSPhilipp Reisner 1110b411b363SPhilipp Reisner now = jiffies; 1111b411b363SPhilipp Reisner if (rw == WRITE) { 1112b411b363SPhilipp Reisner drbd_md_flush(mdev); 1113b411b363SPhilipp Reisner } else /* rw == READ */ { 111495a0f10cSLars Ellenberg b->bm_set = bm_count_bits(b); 1115b411b363SPhilipp Reisner dev_info(DEV, "recounting of set bits took additional %lu jiffies\n", 1116b411b363SPhilipp Reisner jiffies - now); 1117b411b363SPhilipp Reisner } 1118b411b363SPhilipp Reisner now = b->bm_set; 1119b411b363SPhilipp Reisner 1120b411b363SPhilipp Reisner dev_info(DEV, "%s (%lu bits) marked out-of-sync by on disk bit-map.\n", 1121b411b363SPhilipp Reisner ppsize(ppb, now << (BM_BLOCK_SHIFT-10)), now); 1122b411b363SPhilipp Reisner 1123d1f3779bSPhilipp Reisner kref_put(&ctx->kref, &bm_aio_ctx_destroy); 1124b411b363SPhilipp Reisner return err; 1125b411b363SPhilipp Reisner } 1126b411b363SPhilipp Reisner 1127b411b363SPhilipp Reisner /** 1128b411b363SPhilipp Reisner * drbd_bm_read() - Read the whole bitmap from its on disk location. 1129b411b363SPhilipp Reisner * @mdev: DRBD device. 1130b411b363SPhilipp Reisner */ 1131b411b363SPhilipp Reisner int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local) 1132b411b363SPhilipp Reisner { 11330e8488adSLars Ellenberg return bm_rw(mdev, READ, 0, 0); 1134b411b363SPhilipp Reisner } 1135b411b363SPhilipp Reisner 1136b411b363SPhilipp Reisner /** 1137b411b363SPhilipp Reisner * drbd_bm_write() - Write the whole bitmap to its on disk location. 1138b411b363SPhilipp Reisner * @mdev: DRBD device. 113919f843aaSLars Ellenberg * 114019f843aaSLars Ellenberg * Will only write pages that have changed since last IO. 1141b411b363SPhilipp Reisner */ 1142b411b363SPhilipp Reisner int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local) 1143b411b363SPhilipp Reisner { 11440e8488adSLars Ellenberg return bm_rw(mdev, WRITE, 0, 0); 1145b411b363SPhilipp Reisner } 1146b411b363SPhilipp Reisner 1147b411b363SPhilipp Reisner /** 1148d1aa4d04SPhilipp Reisner * drbd_bm_write_all() - Write the whole bitmap to its on disk location. 1149d1aa4d04SPhilipp Reisner * @mdev: DRBD device. 1150d1aa4d04SPhilipp Reisner * 1151d1aa4d04SPhilipp Reisner * Will write all pages. 1152d1aa4d04SPhilipp Reisner */ 1153d1aa4d04SPhilipp Reisner int drbd_bm_write_all(struct drbd_conf *mdev) __must_hold(local) 1154d1aa4d04SPhilipp Reisner { 1155d1aa4d04SPhilipp Reisner return bm_rw(mdev, WRITE, BM_WRITE_ALL_PAGES, 0); 1156d1aa4d04SPhilipp Reisner } 1157d1aa4d04SPhilipp Reisner 1158d1aa4d04SPhilipp Reisner /** 115919f843aaSLars Ellenberg * drbd_bm_lazy_write_out() - Write bitmap pages 0 to @upper_idx-1, if they have changed. 1160b411b363SPhilipp Reisner * @mdev: DRBD device. 116119f843aaSLars Ellenberg * @upper_idx: 0: write all changed pages; +ve: page index to stop scanning for changed pages 1162b411b363SPhilipp Reisner */ 116319f843aaSLars Ellenberg int drbd_bm_write_lazy(struct drbd_conf *mdev, unsigned upper_idx) __must_hold(local) 1164b411b363SPhilipp Reisner { 11650e8488adSLars Ellenberg return bm_rw(mdev, WRITE, BM_AIO_COPY_PAGES, upper_idx); 11660e8488adSLars Ellenberg } 11670e8488adSLars Ellenberg 11680e8488adSLars Ellenberg /** 11690e8488adSLars Ellenberg * drbd_bm_write_copy_pages() - Write the whole bitmap to its on disk location. 11700e8488adSLars Ellenberg * @mdev: DRBD device. 11710e8488adSLars Ellenberg * 11720e8488adSLars Ellenberg * Will only write pages that have changed since last IO. 11730e8488adSLars Ellenberg * In contrast to drbd_bm_write(), this will copy the bitmap pages 11740e8488adSLars Ellenberg * to temporary writeout pages. It is intended to trigger a full write-out 11750e8488adSLars Ellenberg * while still allowing the bitmap to change, for example if a resync or online 11760e8488adSLars Ellenberg * verify is aborted due to a failed peer disk, while local IO continues, or 11770e8488adSLars Ellenberg * pending resync acks are still being processed. 11780e8488adSLars Ellenberg */ 11790e8488adSLars Ellenberg int drbd_bm_write_copy_pages(struct drbd_conf *mdev) __must_hold(local) 11800e8488adSLars Ellenberg { 11810e8488adSLars Ellenberg return bm_rw(mdev, WRITE, BM_AIO_COPY_PAGES, 0); 1182b411b363SPhilipp Reisner } 118319f843aaSLars Ellenberg 118419f843aaSLars Ellenberg 118519f843aaSLars Ellenberg /** 118619f843aaSLars Ellenberg * drbd_bm_write_page: Writes a PAGE_SIZE aligned piece of bitmap 118719f843aaSLars Ellenberg * @mdev: DRBD device. 118819f843aaSLars Ellenberg * @idx: bitmap page index 118919f843aaSLars Ellenberg * 11904b0715f0SLars Ellenberg * We don't want to special case on logical_block_size of the backend device, 11914b0715f0SLars Ellenberg * so we submit PAGE_SIZE aligned pieces. 119219f843aaSLars Ellenberg * Note that on "most" systems, PAGE_SIZE is 4k. 11934b0715f0SLars Ellenberg * 11944b0715f0SLars Ellenberg * In case this becomes an issue on systems with larger PAGE_SIZE, 11954b0715f0SLars Ellenberg * we may want to change this again to write 4k aligned 4k pieces. 119619f843aaSLars Ellenberg */ 119719f843aaSLars Ellenberg int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local) 119819f843aaSLars Ellenberg { 1199d1f3779bSPhilipp Reisner struct bm_aio_ctx *ctx; 1200d1f3779bSPhilipp Reisner int err; 120119f843aaSLars Ellenberg 120219f843aaSLars Ellenberg if (bm_test_page_unchanged(mdev->bitmap->bm_pages[idx])) { 12037648cdfeSLars Ellenberg dynamic_dev_dbg(DEV, "skipped bm page write for idx %u\n", idx); 120419f843aaSLars Ellenberg return 0; 120519f843aaSLars Ellenberg } 120619f843aaSLars Ellenberg 120722f46ce2SLars Ellenberg ctx = kmalloc(sizeof(struct bm_aio_ctx), GFP_NOIO); 1208d1f3779bSPhilipp Reisner if (!ctx) 1209d1f3779bSPhilipp Reisner return -ENOMEM; 121019f843aaSLars Ellenberg 1211d1f3779bSPhilipp Reisner *ctx = (struct bm_aio_ctx) { 1212d1f3779bSPhilipp Reisner .mdev = mdev, 1213d1f3779bSPhilipp Reisner .in_flight = ATOMIC_INIT(1), 12149e58c4daSPhilipp Reisner .done = 0, 1215d1f3779bSPhilipp Reisner .flags = BM_AIO_COPY_PAGES, 1216d1f3779bSPhilipp Reisner .error = 0, 1217d1f3779bSPhilipp Reisner .kref = { ATOMIC_INIT(2) }, 1218d1f3779bSPhilipp Reisner }; 1219d1f3779bSPhilipp Reisner 12209e58c4daSPhilipp Reisner if (!get_ldev_if_state(mdev, D_ATTACHING)) { /* put is in bm_aio_ctx_destroy() */ 12219e58c4daSPhilipp Reisner dev_err(DEV, "ASSERT FAILED: get_ldev_if_state() == 1 in drbd_bm_write_page()\n"); 12229e58c4daSPhilipp Reisner kfree(ctx); 12239e58c4daSPhilipp Reisner return -ENODEV; 12249e58c4daSPhilipp Reisner } 12259e58c4daSPhilipp Reisner 1226d1f3779bSPhilipp Reisner bm_page_io_async(ctx, idx, WRITE_SYNC); 1227*44edfb0dSLars Ellenberg wait_until_done_or_force_detached(mdev, mdev->ldev, &ctx->done); 1228d1f3779bSPhilipp Reisner 1229d1f3779bSPhilipp Reisner if (ctx->error) 1230383606e0SLars Ellenberg drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR); 1231*44edfb0dSLars Ellenberg /* that causes us to detach, so the in memory bitmap will be 123219f843aaSLars Ellenberg * gone in a moment as well. */ 123319f843aaSLars Ellenberg 1234b411b363SPhilipp Reisner mdev->bm_writ_cnt++; 12359e58c4daSPhilipp Reisner err = atomic_read(&ctx->in_flight) ? -EIO : ctx->error; 1236d1f3779bSPhilipp Reisner kref_put(&ctx->kref, &bm_aio_ctx_destroy); 1237d1f3779bSPhilipp Reisner return err; 1238b411b363SPhilipp Reisner } 1239b411b363SPhilipp Reisner 1240b411b363SPhilipp Reisner /* NOTE 1241b411b363SPhilipp Reisner * find_first_bit returns int, we return unsigned long. 12424b0715f0SLars Ellenberg * For this to work on 32bit arch with bitnumbers > (1<<32), 12434b0715f0SLars Ellenberg * we'd need to return u64, and get a whole lot of other places 12444b0715f0SLars Ellenberg * fixed where we still use unsigned long. 1245b411b363SPhilipp Reisner * 1246b411b363SPhilipp Reisner * this returns a bit number, NOT a sector! 1247b411b363SPhilipp Reisner */ 1248b411b363SPhilipp Reisner static unsigned long __bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo, 1249cfd8005cSCong Wang const int find_zero_bit) 1250b411b363SPhilipp Reisner { 1251b411b363SPhilipp Reisner struct drbd_bitmap *b = mdev->bitmap; 1252b411b363SPhilipp Reisner unsigned long *p_addr; 12534b0715f0SLars Ellenberg unsigned long bit_offset; 12544b0715f0SLars Ellenberg unsigned i; 12554b0715f0SLars Ellenberg 1256b411b363SPhilipp Reisner 1257b411b363SPhilipp Reisner if (bm_fo > b->bm_bits) { 1258b411b363SPhilipp Reisner dev_err(DEV, "bm_fo=%lu bm_bits=%lu\n", bm_fo, b->bm_bits); 12594b0715f0SLars Ellenberg bm_fo = DRBD_END_OF_BITMAP; 1260b411b363SPhilipp Reisner } else { 1261b411b363SPhilipp Reisner while (bm_fo < b->bm_bits) { 126219f843aaSLars Ellenberg /* bit offset of the first bit in the page */ 12634b0715f0SLars Ellenberg bit_offset = bm_fo & ~BITS_PER_PAGE_MASK; 1264cfd8005cSCong Wang p_addr = __bm_map_pidx(b, bm_bit_to_page_idx(b, bm_fo)); 1265b411b363SPhilipp Reisner 1266b411b363SPhilipp Reisner if (find_zero_bit) 12677e599e6eSLinus Torvalds i = find_next_zero_bit_le(p_addr, 12684b0715f0SLars Ellenberg PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK); 1269b411b363SPhilipp Reisner else 12707e599e6eSLinus Torvalds i = find_next_bit_le(p_addr, 12714b0715f0SLars Ellenberg PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK); 1272b411b363SPhilipp Reisner 1273cfd8005cSCong Wang __bm_unmap(p_addr); 1274b411b363SPhilipp Reisner if (i < PAGE_SIZE*8) { 12754b0715f0SLars Ellenberg bm_fo = bit_offset + i; 12764b0715f0SLars Ellenberg if (bm_fo >= b->bm_bits) 1277b411b363SPhilipp Reisner break; 1278b411b363SPhilipp Reisner goto found; 1279b411b363SPhilipp Reisner } 1280b411b363SPhilipp Reisner bm_fo = bit_offset + PAGE_SIZE*8; 1281b411b363SPhilipp Reisner } 12824b0715f0SLars Ellenberg bm_fo = DRBD_END_OF_BITMAP; 1283b411b363SPhilipp Reisner } 1284b411b363SPhilipp Reisner found: 12854b0715f0SLars Ellenberg return bm_fo; 1286b411b363SPhilipp Reisner } 1287b411b363SPhilipp Reisner 1288b411b363SPhilipp Reisner static unsigned long bm_find_next(struct drbd_conf *mdev, 1289b411b363SPhilipp Reisner unsigned long bm_fo, const int find_zero_bit) 1290b411b363SPhilipp Reisner { 1291b411b363SPhilipp Reisner struct drbd_bitmap *b = mdev->bitmap; 12924b0715f0SLars Ellenberg unsigned long i = DRBD_END_OF_BITMAP; 1293b411b363SPhilipp Reisner 1294b411b363SPhilipp Reisner ERR_IF(!b) return i; 1295b411b363SPhilipp Reisner ERR_IF(!b->bm_pages) return i; 1296b411b363SPhilipp Reisner 1297b411b363SPhilipp Reisner spin_lock_irq(&b->bm_lock); 129820ceb2b2SLars Ellenberg if (BM_DONT_TEST & b->bm_flags) 1299b411b363SPhilipp Reisner bm_print_lock_info(mdev); 1300b411b363SPhilipp Reisner 1301cfd8005cSCong Wang i = __bm_find_next(mdev, bm_fo, find_zero_bit); 1302b411b363SPhilipp Reisner 1303b411b363SPhilipp Reisner spin_unlock_irq(&b->bm_lock); 1304b411b363SPhilipp Reisner return i; 1305b411b363SPhilipp Reisner } 1306b411b363SPhilipp Reisner 1307b411b363SPhilipp Reisner unsigned long drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo) 1308b411b363SPhilipp Reisner { 1309b411b363SPhilipp Reisner return bm_find_next(mdev, bm_fo, 0); 1310b411b363SPhilipp Reisner } 1311b411b363SPhilipp Reisner 1312b411b363SPhilipp Reisner #if 0 1313b411b363SPhilipp Reisner /* not yet needed for anything. */ 1314b411b363SPhilipp Reisner unsigned long drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo) 1315b411b363SPhilipp Reisner { 1316b411b363SPhilipp Reisner return bm_find_next(mdev, bm_fo, 1); 1317b411b363SPhilipp Reisner } 1318b411b363SPhilipp Reisner #endif 1319b411b363SPhilipp Reisner 1320b411b363SPhilipp Reisner /* does not spin_lock_irqsave. 1321b411b363SPhilipp Reisner * you must take drbd_bm_lock() first */ 1322b411b363SPhilipp Reisner unsigned long _drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo) 1323b411b363SPhilipp Reisner { 132420ceb2b2SLars Ellenberg /* WARN_ON(!(BM_DONT_SET & mdev->b->bm_flags)); */ 1325cfd8005cSCong Wang return __bm_find_next(mdev, bm_fo, 0); 1326b411b363SPhilipp Reisner } 1327b411b363SPhilipp Reisner 1328b411b363SPhilipp Reisner unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo) 1329b411b363SPhilipp Reisner { 133020ceb2b2SLars Ellenberg /* WARN_ON(!(BM_DONT_SET & mdev->b->bm_flags)); */ 1331cfd8005cSCong Wang return __bm_find_next(mdev, bm_fo, 1); 1332b411b363SPhilipp Reisner } 1333b411b363SPhilipp Reisner 1334b411b363SPhilipp Reisner /* returns number of bits actually changed. 1335b411b363SPhilipp Reisner * for val != 0, we change 0 -> 1, return code positive 1336b411b363SPhilipp Reisner * for val == 0, we change 1 -> 0, return code negative 1337b411b363SPhilipp Reisner * wants bitnr, not sector. 1338b411b363SPhilipp Reisner * expected to be called for only a few bits (e - s about BITS_PER_LONG). 1339b411b363SPhilipp Reisner * Must hold bitmap lock already. */ 1340b4ee79daSPhilipp Reisner static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s, 1341829c6087SLars Ellenberg unsigned long e, int val) 1342b411b363SPhilipp Reisner { 1343b411b363SPhilipp Reisner struct drbd_bitmap *b = mdev->bitmap; 1344b411b363SPhilipp Reisner unsigned long *p_addr = NULL; 1345b411b363SPhilipp Reisner unsigned long bitnr; 134619f843aaSLars Ellenberg unsigned int last_page_nr = -1U; 1347b411b363SPhilipp Reisner int c = 0; 134819f843aaSLars Ellenberg int changed_total = 0; 1349b411b363SPhilipp Reisner 1350b411b363SPhilipp Reisner if (e >= b->bm_bits) { 1351b411b363SPhilipp Reisner dev_err(DEV, "ASSERT FAILED: bit_s=%lu bit_e=%lu bm_bits=%lu\n", 1352b411b363SPhilipp Reisner s, e, b->bm_bits); 1353b411b363SPhilipp Reisner e = b->bm_bits ? b->bm_bits -1 : 0; 1354b411b363SPhilipp Reisner } 1355b411b363SPhilipp Reisner for (bitnr = s; bitnr <= e; bitnr++) { 135619f843aaSLars Ellenberg unsigned int page_nr = bm_bit_to_page_idx(b, bitnr); 1357b411b363SPhilipp Reisner if (page_nr != last_page_nr) { 1358b411b363SPhilipp Reisner if (p_addr) 1359cfd8005cSCong Wang __bm_unmap(p_addr); 136019f843aaSLars Ellenberg if (c < 0) 136119f843aaSLars Ellenberg bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]); 136219f843aaSLars Ellenberg else if (c > 0) 136319f843aaSLars Ellenberg bm_set_page_need_writeout(b->bm_pages[last_page_nr]); 136419f843aaSLars Ellenberg changed_total += c; 136519f843aaSLars Ellenberg c = 0; 1366cfd8005cSCong Wang p_addr = __bm_map_pidx(b, page_nr); 1367b411b363SPhilipp Reisner last_page_nr = page_nr; 1368b411b363SPhilipp Reisner } 1369b411b363SPhilipp Reisner if (val) 13707e599e6eSLinus Torvalds c += (0 == __test_and_set_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr)); 1371b411b363SPhilipp Reisner else 13727e599e6eSLinus Torvalds c -= (0 != __test_and_clear_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr)); 1373b411b363SPhilipp Reisner } 1374b411b363SPhilipp Reisner if (p_addr) 1375cfd8005cSCong Wang __bm_unmap(p_addr); 137619f843aaSLars Ellenberg if (c < 0) 137719f843aaSLars Ellenberg bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]); 137819f843aaSLars Ellenberg else if (c > 0) 137919f843aaSLars Ellenberg bm_set_page_need_writeout(b->bm_pages[last_page_nr]); 138019f843aaSLars Ellenberg changed_total += c; 138119f843aaSLars Ellenberg b->bm_set += changed_total; 138219f843aaSLars Ellenberg return changed_total; 1383b411b363SPhilipp Reisner } 1384b411b363SPhilipp Reisner 1385b411b363SPhilipp Reisner /* returns number of bits actually changed. 1386b411b363SPhilipp Reisner * for val != 0, we change 0 -> 1, return code positive 1387b411b363SPhilipp Reisner * for val == 0, we change 1 -> 0, return code negative 1388b411b363SPhilipp Reisner * wants bitnr, not sector */ 1389b4ee79daSPhilipp Reisner static int bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s, 1390b411b363SPhilipp Reisner const unsigned long e, int val) 1391b411b363SPhilipp Reisner { 1392b411b363SPhilipp Reisner unsigned long flags; 1393b411b363SPhilipp Reisner struct drbd_bitmap *b = mdev->bitmap; 1394b411b363SPhilipp Reisner int c = 0; 1395b411b363SPhilipp Reisner 1396b411b363SPhilipp Reisner ERR_IF(!b) return 1; 1397b411b363SPhilipp Reisner ERR_IF(!b->bm_pages) return 0; 1398b411b363SPhilipp Reisner 1399b411b363SPhilipp Reisner spin_lock_irqsave(&b->bm_lock, flags); 140020ceb2b2SLars Ellenberg if ((val ? BM_DONT_SET : BM_DONT_CLEAR) & b->bm_flags) 1401b411b363SPhilipp Reisner bm_print_lock_info(mdev); 1402b411b363SPhilipp Reisner 1403829c6087SLars Ellenberg c = __bm_change_bits_to(mdev, s, e, val); 1404b411b363SPhilipp Reisner 1405b411b363SPhilipp Reisner spin_unlock_irqrestore(&b->bm_lock, flags); 1406b411b363SPhilipp Reisner return c; 1407b411b363SPhilipp Reisner } 1408b411b363SPhilipp Reisner 1409b411b363SPhilipp Reisner /* returns number of bits changed 0 -> 1 */ 1410b411b363SPhilipp Reisner int drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e) 1411b411b363SPhilipp Reisner { 1412b411b363SPhilipp Reisner return bm_change_bits_to(mdev, s, e, 1); 1413b411b363SPhilipp Reisner } 1414b411b363SPhilipp Reisner 1415b411b363SPhilipp Reisner /* returns number of bits changed 1 -> 0 */ 1416b411b363SPhilipp Reisner int drbd_bm_clear_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e) 1417b411b363SPhilipp Reisner { 1418b411b363SPhilipp Reisner return -bm_change_bits_to(mdev, s, e, 0); 1419b411b363SPhilipp Reisner } 1420b411b363SPhilipp Reisner 1421b411b363SPhilipp Reisner /* sets all bits in full words, 1422b411b363SPhilipp Reisner * from first_word up to, but not including, last_word */ 1423b411b363SPhilipp Reisner static inline void bm_set_full_words_within_one_page(struct drbd_bitmap *b, 1424b411b363SPhilipp Reisner int page_nr, int first_word, int last_word) 1425b411b363SPhilipp Reisner { 1426b411b363SPhilipp Reisner int i; 1427b411b363SPhilipp Reisner int bits; 1428cfd8005cSCong Wang unsigned long *paddr = kmap_atomic(b->bm_pages[page_nr]); 1429b411b363SPhilipp Reisner for (i = first_word; i < last_word; i++) { 1430b411b363SPhilipp Reisner bits = hweight_long(paddr[i]); 1431b411b363SPhilipp Reisner paddr[i] = ~0UL; 1432b411b363SPhilipp Reisner b->bm_set += BITS_PER_LONG - bits; 1433b411b363SPhilipp Reisner } 1434cfd8005cSCong Wang kunmap_atomic(paddr); 1435b411b363SPhilipp Reisner } 1436b411b363SPhilipp Reisner 1437829c6087SLars Ellenberg /* Same thing as drbd_bm_set_bits, 1438829c6087SLars Ellenberg * but more efficient for a large bit range. 1439b411b363SPhilipp Reisner * You must first drbd_bm_lock(). 1440b411b363SPhilipp Reisner * Can be called to set the whole bitmap in one go. 1441b411b363SPhilipp Reisner * Sets bits from s to e _inclusive_. */ 1442b411b363SPhilipp Reisner void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e) 1443b411b363SPhilipp Reisner { 1444b411b363SPhilipp Reisner /* First set_bit from the first bit (s) 1445b411b363SPhilipp Reisner * up to the next long boundary (sl), 1446b411b363SPhilipp Reisner * then assign full words up to the last long boundary (el), 1447b411b363SPhilipp Reisner * then set_bit up to and including the last bit (e). 1448b411b363SPhilipp Reisner * 1449b411b363SPhilipp Reisner * Do not use memset, because we must account for changes, 1450b411b363SPhilipp Reisner * so we need to loop over the words with hweight() anyways. 1451b411b363SPhilipp Reisner */ 1452829c6087SLars Ellenberg struct drbd_bitmap *b = mdev->bitmap; 1453b411b363SPhilipp Reisner unsigned long sl = ALIGN(s,BITS_PER_LONG); 1454b411b363SPhilipp Reisner unsigned long el = (e+1) & ~((unsigned long)BITS_PER_LONG-1); 1455b411b363SPhilipp Reisner int first_page; 1456b411b363SPhilipp Reisner int last_page; 1457b411b363SPhilipp Reisner int page_nr; 1458b411b363SPhilipp Reisner int first_word; 1459b411b363SPhilipp Reisner int last_word; 1460b411b363SPhilipp Reisner 1461b411b363SPhilipp Reisner if (e - s <= 3*BITS_PER_LONG) { 1462b411b363SPhilipp Reisner /* don't bother; el and sl may even be wrong. */ 1463829c6087SLars Ellenberg spin_lock_irq(&b->bm_lock); 1464829c6087SLars Ellenberg __bm_change_bits_to(mdev, s, e, 1); 1465829c6087SLars Ellenberg spin_unlock_irq(&b->bm_lock); 1466b411b363SPhilipp Reisner return; 1467b411b363SPhilipp Reisner } 1468b411b363SPhilipp Reisner 1469b411b363SPhilipp Reisner /* difference is large enough that we can trust sl and el */ 1470b411b363SPhilipp Reisner 1471829c6087SLars Ellenberg spin_lock_irq(&b->bm_lock); 1472829c6087SLars Ellenberg 1473b411b363SPhilipp Reisner /* bits filling the current long */ 1474b411b363SPhilipp Reisner if (sl) 1475829c6087SLars Ellenberg __bm_change_bits_to(mdev, s, sl-1, 1); 1476b411b363SPhilipp Reisner 1477b411b363SPhilipp Reisner first_page = sl >> (3 + PAGE_SHIFT); 1478b411b363SPhilipp Reisner last_page = el >> (3 + PAGE_SHIFT); 1479b411b363SPhilipp Reisner 1480b411b363SPhilipp Reisner /* MLPP: modulo longs per page */ 1481b411b363SPhilipp Reisner /* LWPP: long words per page */ 1482b411b363SPhilipp Reisner first_word = MLPP(sl >> LN2_BPL); 1483b411b363SPhilipp Reisner last_word = LWPP; 1484b411b363SPhilipp Reisner 1485b411b363SPhilipp Reisner /* first and full pages, unless first page == last page */ 1486b411b363SPhilipp Reisner for (page_nr = first_page; page_nr < last_page; page_nr++) { 1487b411b363SPhilipp Reisner bm_set_full_words_within_one_page(mdev->bitmap, page_nr, first_word, last_word); 14888ccee20eSLars Ellenberg spin_unlock_irq(&b->bm_lock); 14898ccee20eSLars Ellenberg cond_resched(); 1490b411b363SPhilipp Reisner first_word = 0; 14918ccee20eSLars Ellenberg spin_lock_irq(&b->bm_lock); 1492b411b363SPhilipp Reisner } 1493b411b363SPhilipp Reisner /* last page (respectively only page, for first page == last page) */ 1494b411b363SPhilipp Reisner last_word = MLPP(el >> LN2_BPL); 14954eccc579SLars Ellenberg 14964eccc579SLars Ellenberg /* consider bitmap->bm_bits = 32768, bitmap->bm_number_of_pages = 1. (or multiples). 14974eccc579SLars Ellenberg * ==> e = 32767, el = 32768, last_page = 2, 14984eccc579SLars Ellenberg * and now last_word = 0. 14994eccc579SLars Ellenberg * We do not want to touch last_page in this case, 15004eccc579SLars Ellenberg * as we did not allocate it, it is not present in bitmap->bm_pages. 15014eccc579SLars Ellenberg */ 15024eccc579SLars Ellenberg if (last_word) 1503b411b363SPhilipp Reisner bm_set_full_words_within_one_page(mdev->bitmap, last_page, first_word, last_word); 1504b411b363SPhilipp Reisner 1505b411b363SPhilipp Reisner /* possibly trailing bits. 1506b411b363SPhilipp Reisner * example: (e & 63) == 63, el will be e+1. 1507b411b363SPhilipp Reisner * if that even was the very last bit, 1508b411b363SPhilipp Reisner * it would trigger an assert in __bm_change_bits_to() 1509b411b363SPhilipp Reisner */ 1510b411b363SPhilipp Reisner if (el <= e) 1511829c6087SLars Ellenberg __bm_change_bits_to(mdev, el, e, 1); 1512829c6087SLars Ellenberg spin_unlock_irq(&b->bm_lock); 1513b411b363SPhilipp Reisner } 1514b411b363SPhilipp Reisner 1515b411b363SPhilipp Reisner /* returns bit state 1516b411b363SPhilipp Reisner * wants bitnr, NOT sector. 1517b411b363SPhilipp Reisner * inherently racy... area needs to be locked by means of {al,rs}_lru 1518b411b363SPhilipp Reisner * 1 ... bit set 1519b411b363SPhilipp Reisner * 0 ... bit not set 1520b411b363SPhilipp Reisner * -1 ... first out of bounds access, stop testing for bits! 1521b411b363SPhilipp Reisner */ 1522b411b363SPhilipp Reisner int drbd_bm_test_bit(struct drbd_conf *mdev, const unsigned long bitnr) 1523b411b363SPhilipp Reisner { 1524b411b363SPhilipp Reisner unsigned long flags; 1525b411b363SPhilipp Reisner struct drbd_bitmap *b = mdev->bitmap; 1526b411b363SPhilipp Reisner unsigned long *p_addr; 1527b411b363SPhilipp Reisner int i; 1528b411b363SPhilipp Reisner 1529b411b363SPhilipp Reisner ERR_IF(!b) return 0; 1530b411b363SPhilipp Reisner ERR_IF(!b->bm_pages) return 0; 1531b411b363SPhilipp Reisner 1532b411b363SPhilipp Reisner spin_lock_irqsave(&b->bm_lock, flags); 153320ceb2b2SLars Ellenberg if (BM_DONT_TEST & b->bm_flags) 1534b411b363SPhilipp Reisner bm_print_lock_info(mdev); 1535b411b363SPhilipp Reisner if (bitnr < b->bm_bits) { 153619f843aaSLars Ellenberg p_addr = bm_map_pidx(b, bm_bit_to_page_idx(b, bitnr)); 15377e599e6eSLinus Torvalds i = test_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr) ? 1 : 0; 1538b411b363SPhilipp Reisner bm_unmap(p_addr); 1539b411b363SPhilipp Reisner } else if (bitnr == b->bm_bits) { 1540b411b363SPhilipp Reisner i = -1; 1541b411b363SPhilipp Reisner } else { /* (bitnr > b->bm_bits) */ 1542b411b363SPhilipp Reisner dev_err(DEV, "bitnr=%lu > bm_bits=%lu\n", bitnr, b->bm_bits); 1543b411b363SPhilipp Reisner i = 0; 1544b411b363SPhilipp Reisner } 1545b411b363SPhilipp Reisner 1546b411b363SPhilipp Reisner spin_unlock_irqrestore(&b->bm_lock, flags); 1547b411b363SPhilipp Reisner return i; 1548b411b363SPhilipp Reisner } 1549b411b363SPhilipp Reisner 1550b411b363SPhilipp Reisner /* returns number of bits set in the range [s, e] */ 1551b411b363SPhilipp Reisner int drbd_bm_count_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e) 1552b411b363SPhilipp Reisner { 1553b411b363SPhilipp Reisner unsigned long flags; 1554b411b363SPhilipp Reisner struct drbd_bitmap *b = mdev->bitmap; 155519f843aaSLars Ellenberg unsigned long *p_addr = NULL; 1556b411b363SPhilipp Reisner unsigned long bitnr; 155719f843aaSLars Ellenberg unsigned int page_nr = -1U; 1558b411b363SPhilipp Reisner int c = 0; 1559b411b363SPhilipp Reisner 1560b411b363SPhilipp Reisner /* If this is called without a bitmap, that is a bug. But just to be 1561b411b363SPhilipp Reisner * robust in case we screwed up elsewhere, in that case pretend there 1562b411b363SPhilipp Reisner * was one dirty bit in the requested area, so we won't try to do a 1563b411b363SPhilipp Reisner * local read there (no bitmap probably implies no disk) */ 1564b411b363SPhilipp Reisner ERR_IF(!b) return 1; 1565b411b363SPhilipp Reisner ERR_IF(!b->bm_pages) return 1; 1566b411b363SPhilipp Reisner 1567b411b363SPhilipp Reisner spin_lock_irqsave(&b->bm_lock, flags); 156820ceb2b2SLars Ellenberg if (BM_DONT_TEST & b->bm_flags) 1569b411b363SPhilipp Reisner bm_print_lock_info(mdev); 1570b411b363SPhilipp Reisner for (bitnr = s; bitnr <= e; bitnr++) { 157119f843aaSLars Ellenberg unsigned int idx = bm_bit_to_page_idx(b, bitnr); 157219f843aaSLars Ellenberg if (page_nr != idx) { 157319f843aaSLars Ellenberg page_nr = idx; 1574b411b363SPhilipp Reisner if (p_addr) 1575b411b363SPhilipp Reisner bm_unmap(p_addr); 157619f843aaSLars Ellenberg p_addr = bm_map_pidx(b, idx); 1577b411b363SPhilipp Reisner } 1578b411b363SPhilipp Reisner ERR_IF (bitnr >= b->bm_bits) { 1579b411b363SPhilipp Reisner dev_err(DEV, "bitnr=%lu bm_bits=%lu\n", bitnr, b->bm_bits); 1580b411b363SPhilipp Reisner } else { 15817e599e6eSLinus Torvalds c += (0 != test_bit_le(bitnr - (page_nr << (PAGE_SHIFT+3)), p_addr)); 1582b411b363SPhilipp Reisner } 1583b411b363SPhilipp Reisner } 1584b411b363SPhilipp Reisner if (p_addr) 1585b411b363SPhilipp Reisner bm_unmap(p_addr); 1586b411b363SPhilipp Reisner spin_unlock_irqrestore(&b->bm_lock, flags); 1587b411b363SPhilipp Reisner return c; 1588b411b363SPhilipp Reisner } 1589b411b363SPhilipp Reisner 1590b411b363SPhilipp Reisner 1591b411b363SPhilipp Reisner /* inherently racy... 1592b411b363SPhilipp Reisner * return value may be already out-of-date when this function returns. 1593b411b363SPhilipp Reisner * but the general usage is that this is only use during a cstate when bits are 1594b411b363SPhilipp Reisner * only cleared, not set, and typically only care for the case when the return 1595b411b363SPhilipp Reisner * value is zero, or we already "locked" this "bitmap extent" by other means. 1596b411b363SPhilipp Reisner * 1597b411b363SPhilipp Reisner * enr is bm-extent number, since we chose to name one sector (512 bytes) 1598b411b363SPhilipp Reisner * worth of the bitmap a "bitmap extent". 1599b411b363SPhilipp Reisner * 1600b411b363SPhilipp Reisner * TODO 1601b411b363SPhilipp Reisner * I think since we use it like a reference count, we should use the real 1602b411b363SPhilipp Reisner * reference count of some bitmap extent element from some lru instead... 1603b411b363SPhilipp Reisner * 1604b411b363SPhilipp Reisner */ 1605b411b363SPhilipp Reisner int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr) 1606b411b363SPhilipp Reisner { 1607b411b363SPhilipp Reisner struct drbd_bitmap *b = mdev->bitmap; 1608b411b363SPhilipp Reisner int count, s, e; 1609b411b363SPhilipp Reisner unsigned long flags; 1610b411b363SPhilipp Reisner unsigned long *p_addr, *bm; 1611b411b363SPhilipp Reisner 1612b411b363SPhilipp Reisner ERR_IF(!b) return 0; 1613b411b363SPhilipp Reisner ERR_IF(!b->bm_pages) return 0; 1614b411b363SPhilipp Reisner 1615b411b363SPhilipp Reisner spin_lock_irqsave(&b->bm_lock, flags); 161620ceb2b2SLars Ellenberg if (BM_DONT_TEST & b->bm_flags) 1617b411b363SPhilipp Reisner bm_print_lock_info(mdev); 1618b411b363SPhilipp Reisner 1619b411b363SPhilipp Reisner s = S2W(enr); 1620b411b363SPhilipp Reisner e = min((size_t)S2W(enr+1), b->bm_words); 1621b411b363SPhilipp Reisner count = 0; 1622b411b363SPhilipp Reisner if (s < b->bm_words) { 1623b411b363SPhilipp Reisner int n = e-s; 162419f843aaSLars Ellenberg p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, s)); 1625b411b363SPhilipp Reisner bm = p_addr + MLPP(s); 1626b411b363SPhilipp Reisner while (n--) 1627b411b363SPhilipp Reisner count += hweight_long(*bm++); 1628b411b363SPhilipp Reisner bm_unmap(p_addr); 1629b411b363SPhilipp Reisner } else { 1630b411b363SPhilipp Reisner dev_err(DEV, "start offset (%d) too large in drbd_bm_e_weight\n", s); 1631b411b363SPhilipp Reisner } 1632b411b363SPhilipp Reisner spin_unlock_irqrestore(&b->bm_lock, flags); 1633b411b363SPhilipp Reisner return count; 1634b411b363SPhilipp Reisner } 1635b411b363SPhilipp Reisner 16364b0715f0SLars Ellenberg /* Set all bits covered by the AL-extent al_enr. 16374b0715f0SLars Ellenberg * Returns number of bits changed. */ 1638b411b363SPhilipp Reisner unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev, unsigned long al_enr) 1639b411b363SPhilipp Reisner { 1640b411b363SPhilipp Reisner struct drbd_bitmap *b = mdev->bitmap; 1641b411b363SPhilipp Reisner unsigned long *p_addr, *bm; 1642b411b363SPhilipp Reisner unsigned long weight; 16434b0715f0SLars Ellenberg unsigned long s, e; 16444b0715f0SLars Ellenberg int count, i, do_now; 1645b411b363SPhilipp Reisner ERR_IF(!b) return 0; 1646b411b363SPhilipp Reisner ERR_IF(!b->bm_pages) return 0; 1647b411b363SPhilipp Reisner 1648b411b363SPhilipp Reisner spin_lock_irq(&b->bm_lock); 164920ceb2b2SLars Ellenberg if (BM_DONT_SET & b->bm_flags) 1650b411b363SPhilipp Reisner bm_print_lock_info(mdev); 1651b411b363SPhilipp Reisner weight = b->bm_set; 1652b411b363SPhilipp Reisner 1653b411b363SPhilipp Reisner s = al_enr * BM_WORDS_PER_AL_EXT; 1654b411b363SPhilipp Reisner e = min_t(size_t, s + BM_WORDS_PER_AL_EXT, b->bm_words); 1655b411b363SPhilipp Reisner /* assert that s and e are on the same page */ 1656b411b363SPhilipp Reisner D_ASSERT((e-1) >> (PAGE_SHIFT - LN2_BPL + 3) 1657b411b363SPhilipp Reisner == s >> (PAGE_SHIFT - LN2_BPL + 3)); 1658b411b363SPhilipp Reisner count = 0; 1659b411b363SPhilipp Reisner if (s < b->bm_words) { 1660b411b363SPhilipp Reisner i = do_now = e-s; 166119f843aaSLars Ellenberg p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, s)); 1662b411b363SPhilipp Reisner bm = p_addr + MLPP(s); 1663b411b363SPhilipp Reisner while (i--) { 1664b411b363SPhilipp Reisner count += hweight_long(*bm); 1665b411b363SPhilipp Reisner *bm = -1UL; 1666b411b363SPhilipp Reisner bm++; 1667b411b363SPhilipp Reisner } 1668b411b363SPhilipp Reisner bm_unmap(p_addr); 1669b411b363SPhilipp Reisner b->bm_set += do_now*BITS_PER_LONG - count; 1670b411b363SPhilipp Reisner if (e == b->bm_words) 1671b411b363SPhilipp Reisner b->bm_set -= bm_clear_surplus(b); 1672b411b363SPhilipp Reisner } else { 16734b0715f0SLars Ellenberg dev_err(DEV, "start offset (%lu) too large in drbd_bm_ALe_set_all\n", s); 1674b411b363SPhilipp Reisner } 1675b411b363SPhilipp Reisner weight = b->bm_set - weight; 1676b411b363SPhilipp Reisner spin_unlock_irq(&b->bm_lock); 1677b411b363SPhilipp Reisner return weight; 1678b411b363SPhilipp Reisner } 1679