1b411b363SPhilipp Reisner /* 2b411b363SPhilipp Reisner drbd_bitmap.c 3b411b363SPhilipp Reisner 4b411b363SPhilipp Reisner This file is part of DRBD by Philipp Reisner and Lars Ellenberg. 5b411b363SPhilipp Reisner 6b411b363SPhilipp Reisner Copyright (C) 2004-2008, LINBIT Information Technologies GmbH. 7b411b363SPhilipp Reisner Copyright (C) 2004-2008, Philipp Reisner <philipp.reisner@linbit.com>. 8b411b363SPhilipp Reisner Copyright (C) 2004-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. 9b411b363SPhilipp Reisner 10b411b363SPhilipp Reisner drbd is free software; you can redistribute it and/or modify 11b411b363SPhilipp Reisner it under the terms of the GNU General Public License as published by 12b411b363SPhilipp Reisner the Free Software Foundation; either version 2, or (at your option) 13b411b363SPhilipp Reisner any later version. 14b411b363SPhilipp Reisner 15b411b363SPhilipp Reisner drbd is distributed in the hope that it will be useful, 16b411b363SPhilipp Reisner but WITHOUT ANY WARRANTY; without even the implied warranty of 17b411b363SPhilipp Reisner MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18b411b363SPhilipp Reisner GNU General Public License for more details. 19b411b363SPhilipp Reisner 20b411b363SPhilipp Reisner You should have received a copy of the GNU General Public License 21b411b363SPhilipp Reisner along with drbd; see the file COPYING. If not, write to 22b411b363SPhilipp Reisner the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 23b411b363SPhilipp Reisner */ 24b411b363SPhilipp Reisner 25f88c5d90SLars Ellenberg #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 26f88c5d90SLars Ellenberg 275fb3bc4dSLars Ellenberg #include <linux/bitmap.h> 28b411b363SPhilipp Reisner #include <linux/vmalloc.h> 29b411b363SPhilipp Reisner #include <linux/string.h> 30b411b363SPhilipp Reisner #include <linux/drbd.h> 315a0e3ad6STejun Heo #include <linux/slab.h> 32dbcbdc43SChristoph Hellwig #include <linux/highmem.h> 33f0ff1357SStephen Rothwell 34b411b363SPhilipp Reisner #include "drbd_int.h" 35b411b363SPhilipp Reisner 3695a0f10cSLars Ellenberg 37b411b363SPhilipp Reisner /* OPAQUE outside this file! 38b411b363SPhilipp Reisner * interface defined in drbd_int.h 39b411b363SPhilipp Reisner 40b411b363SPhilipp Reisner * convention: 41b411b363SPhilipp Reisner * function name drbd_bm_... => used elsewhere, "public". 42b411b363SPhilipp Reisner * function name bm_... => internal to implementation, "private". 434b0715f0SLars Ellenberg */ 44b411b363SPhilipp Reisner 454b0715f0SLars Ellenberg 464b0715f0SLars Ellenberg /* 474b0715f0SLars Ellenberg * LIMITATIONS: 484b0715f0SLars Ellenberg * We want to support >= peta byte of backend storage, while for now still using 494b0715f0SLars Ellenberg * a granularity of one bit per 4KiB of storage. 504b0715f0SLars Ellenberg * 1 << 50 bytes backend storage (1 PiB) 514b0715f0SLars Ellenberg * 1 << (50 - 12) bits needed 524b0715f0SLars Ellenberg * 38 --> we need u64 to index and count bits 534b0715f0SLars Ellenberg * 1 << (38 - 3) bitmap bytes needed 544b0715f0SLars Ellenberg * 35 --> we still need u64 to index and count bytes 554b0715f0SLars Ellenberg * (that's 32 GiB of bitmap for 1 PiB storage) 564b0715f0SLars Ellenberg * 1 << (35 - 2) 32bit longs needed 574b0715f0SLars Ellenberg * 33 --> we'd even need u64 to index and count 32bit long words. 584b0715f0SLars Ellenberg * 1 << (35 - 3) 64bit longs needed 594b0715f0SLars Ellenberg * 32 --> we could get away with a 32bit unsigned int to index and count 604b0715f0SLars Ellenberg * 64bit long words, but I rather stay with unsigned long for now. 614b0715f0SLars Ellenberg * We probably should neither count nor point to bytes or long words 624b0715f0SLars Ellenberg * directly, but either by bitnumber, or by page index and offset. 634b0715f0SLars Ellenberg * 1 << (35 - 12) 644b0715f0SLars Ellenberg * 22 --> we need that much 4KiB pages of bitmap. 654b0715f0SLars Ellenberg * 1 << (22 + 3) --> on a 64bit arch, 664b0715f0SLars Ellenberg * we need 32 MiB to store the array of page pointers. 674b0715f0SLars Ellenberg * 684b0715f0SLars Ellenberg * Because I'm lazy, and because the resulting patch was too large, too ugly 694b0715f0SLars Ellenberg * and still incomplete, on 32bit we still "only" support 16 TiB (minus some), 704b0715f0SLars Ellenberg * (1 << 32) bits * 4k storage. 714b0715f0SLars Ellenberg * 724b0715f0SLars Ellenberg 734b0715f0SLars Ellenberg * bitmap storage and IO: 744b0715f0SLars Ellenberg * Bitmap is stored little endian on disk, and is kept little endian in 754b0715f0SLars Ellenberg * core memory. Currently we still hold the full bitmap in core as long 764b0715f0SLars Ellenberg * as we are "attached" to a local disk, which at 32 GiB for 1PiB storage 774b0715f0SLars Ellenberg * seems excessive. 784b0715f0SLars Ellenberg * 7924c4830cSBart Van Assche * We plan to reduce the amount of in-core bitmap pages by paging them in 804b0715f0SLars Ellenberg * and out against their on-disk location as necessary, but need to make 814b0715f0SLars Ellenberg * sure we don't cause too much meta data IO, and must not deadlock in 824b0715f0SLars Ellenberg * tight memory situations. This needs some more work. 83b411b363SPhilipp Reisner */ 84b411b363SPhilipp Reisner 85b411b363SPhilipp Reisner /* 86b411b363SPhilipp Reisner * NOTE 87b411b363SPhilipp Reisner * Access to the *bm_pages is protected by bm_lock. 88b411b363SPhilipp Reisner * It is safe to read the other members within the lock. 89b411b363SPhilipp Reisner * 90b411b363SPhilipp Reisner * drbd_bm_set_bits is called from bio_endio callbacks, 91b411b363SPhilipp Reisner * We may be called with irq already disabled, 92b411b363SPhilipp Reisner * so we need spin_lock_irqsave(). 93b411b363SPhilipp Reisner * And we need the kmap_atomic. 94b411b363SPhilipp Reisner */ 95b411b363SPhilipp Reisner struct drbd_bitmap { 96b411b363SPhilipp Reisner struct page **bm_pages; 97b411b363SPhilipp Reisner spinlock_t bm_lock; 984b0715f0SLars Ellenberg 994b0715f0SLars Ellenberg /* see LIMITATIONS: above */ 1004b0715f0SLars Ellenberg 101b411b363SPhilipp Reisner unsigned long bm_set; /* nr of set bits; THINK maybe atomic_t? */ 102b411b363SPhilipp Reisner unsigned long bm_bits; 103b411b363SPhilipp Reisner size_t bm_words; 104b411b363SPhilipp Reisner size_t bm_number_of_pages; 105b411b363SPhilipp Reisner sector_t bm_dev_capacity; 1068a03ae2aSThomas Gleixner struct mutex bm_change; /* serializes resize operations */ 107b411b363SPhilipp Reisner 10819f843aaSLars Ellenberg wait_queue_head_t bm_io_wait; /* used to serialize IO of single pages */ 109b411b363SPhilipp Reisner 11020ceb2b2SLars Ellenberg enum bm_flag bm_flags; 111b411b363SPhilipp Reisner 112b411b363SPhilipp Reisner /* debugging aid, in case we are still racy somewhere */ 113b411b363SPhilipp Reisner char *bm_why; 114b411b363SPhilipp Reisner struct task_struct *bm_task; 115b411b363SPhilipp Reisner }; 116b411b363SPhilipp Reisner 117b411b363SPhilipp Reisner #define bm_print_lock_info(m) __bm_print_lock_info(m, __func__) 118b30ab791SAndreas Gruenbacher static void __bm_print_lock_info(struct drbd_device *device, const char *func) 119b411b363SPhilipp Reisner { 120b30ab791SAndreas Gruenbacher struct drbd_bitmap *b = device->bitmap; 121b411b363SPhilipp Reisner if (!__ratelimit(&drbd_ratelimit_state)) 122b411b363SPhilipp Reisner return; 123c60b0251SAndreas Gruenbacher drbd_err(device, "FIXME %s[%d] in %s, bitmap locked for '%s' by %s[%d]\n", 124c60b0251SAndreas Gruenbacher current->comm, task_pid_nr(current), 125b411b363SPhilipp Reisner func, b->bm_why ?: "?", 126c60b0251SAndreas Gruenbacher b->bm_task->comm, task_pid_nr(b->bm_task)); 127b411b363SPhilipp Reisner } 128b411b363SPhilipp Reisner 129b30ab791SAndreas Gruenbacher void drbd_bm_lock(struct drbd_device *device, char *why, enum bm_flag flags) 130b411b363SPhilipp Reisner { 131b30ab791SAndreas Gruenbacher struct drbd_bitmap *b = device->bitmap; 132b411b363SPhilipp Reisner int trylock_failed; 133b411b363SPhilipp Reisner 134b411b363SPhilipp Reisner if (!b) { 135d0180171SAndreas Gruenbacher drbd_err(device, "FIXME no bitmap in drbd_bm_lock!?\n"); 136b411b363SPhilipp Reisner return; 137b411b363SPhilipp Reisner } 138b411b363SPhilipp Reisner 1398a03ae2aSThomas Gleixner trylock_failed = !mutex_trylock(&b->bm_change); 140b411b363SPhilipp Reisner 141b411b363SPhilipp Reisner if (trylock_failed) { 142c60b0251SAndreas Gruenbacher drbd_warn(device, "%s[%d] going to '%s' but bitmap already locked for '%s' by %s[%d]\n", 143c60b0251SAndreas Gruenbacher current->comm, task_pid_nr(current), 144b411b363SPhilipp Reisner why, b->bm_why ?: "?", 145c60b0251SAndreas Gruenbacher b->bm_task->comm, task_pid_nr(b->bm_task)); 1468a03ae2aSThomas Gleixner mutex_lock(&b->bm_change); 147b411b363SPhilipp Reisner } 14820ceb2b2SLars Ellenberg if (BM_LOCKED_MASK & b->bm_flags) 149d0180171SAndreas Gruenbacher drbd_err(device, "FIXME bitmap already locked in bm_lock\n"); 15020ceb2b2SLars Ellenberg b->bm_flags |= flags & BM_LOCKED_MASK; 151b411b363SPhilipp Reisner 152b411b363SPhilipp Reisner b->bm_why = why; 153b411b363SPhilipp Reisner b->bm_task = current; 154b411b363SPhilipp Reisner } 155b411b363SPhilipp Reisner 156b30ab791SAndreas Gruenbacher void drbd_bm_unlock(struct drbd_device *device) 157b411b363SPhilipp Reisner { 158b30ab791SAndreas Gruenbacher struct drbd_bitmap *b = device->bitmap; 159b411b363SPhilipp Reisner if (!b) { 160d0180171SAndreas Gruenbacher drbd_err(device, "FIXME no bitmap in drbd_bm_unlock!?\n"); 161b411b363SPhilipp Reisner return; 162b411b363SPhilipp Reisner } 163b411b363SPhilipp Reisner 164b30ab791SAndreas Gruenbacher if (!(BM_LOCKED_MASK & device->bitmap->bm_flags)) 165d0180171SAndreas Gruenbacher drbd_err(device, "FIXME bitmap not locked in bm_unlock\n"); 166b411b363SPhilipp Reisner 16720ceb2b2SLars Ellenberg b->bm_flags &= ~BM_LOCKED_MASK; 168b411b363SPhilipp Reisner b->bm_why = NULL; 169b411b363SPhilipp Reisner b->bm_task = NULL; 1708a03ae2aSThomas Gleixner mutex_unlock(&b->bm_change); 171b411b363SPhilipp Reisner } 172b411b363SPhilipp Reisner 17319f843aaSLars Ellenberg /* we store some "meta" info about our pages in page->private */ 17419f843aaSLars Ellenberg /* at a granularity of 4k storage per bitmap bit: 17519f843aaSLars Ellenberg * one peta byte storage: 1<<50 byte, 1<<38 * 4k storage blocks 17619f843aaSLars Ellenberg * 1<<38 bits, 17719f843aaSLars Ellenberg * 1<<23 4k bitmap pages. 17819f843aaSLars Ellenberg * Use 24 bits as page index, covers 2 peta byte storage 17919f843aaSLars Ellenberg * at a granularity of 4k per bit. 18019f843aaSLars Ellenberg * Used to report the failed page idx on io error from the endio handlers. 18119f843aaSLars Ellenberg */ 18219f843aaSLars Ellenberg #define BM_PAGE_IDX_MASK ((1UL<<24)-1) 18319f843aaSLars Ellenberg /* this page is currently read in, or written back */ 18419f843aaSLars Ellenberg #define BM_PAGE_IO_LOCK 31 18519f843aaSLars Ellenberg /* if there has been an IO error for this page */ 18619f843aaSLars Ellenberg #define BM_PAGE_IO_ERROR 30 18719f843aaSLars Ellenberg /* this is to be able to intelligently skip disk IO, 18819f843aaSLars Ellenberg * set if bits have been set since last IO. */ 18919f843aaSLars Ellenberg #define BM_PAGE_NEED_WRITEOUT 29 19019f843aaSLars Ellenberg /* to mark for lazy writeout once syncer cleared all clearable bits, 19119f843aaSLars Ellenberg * we if bits have been cleared since last IO. */ 19219f843aaSLars Ellenberg #define BM_PAGE_LAZY_WRITEOUT 28 19345dfffebSLars Ellenberg /* pages marked with this "HINT" will be considered for writeout 19445dfffebSLars Ellenberg * on activity log transactions */ 19545dfffebSLars Ellenberg #define BM_PAGE_HINT_WRITEOUT 27 19619f843aaSLars Ellenberg 19724c4830cSBart Van Assche /* store_page_idx uses non-atomic assignment. It is only used directly after 19819f843aaSLars Ellenberg * allocating the page. All other bm_set_page_* and bm_clear_page_* need to 19919f843aaSLars Ellenberg * use atomic bit manipulation, as set_out_of_sync (and therefore bitmap 20019f843aaSLars Ellenberg * changes) may happen from various contexts, and wait_on_bit/wake_up_bit 20119f843aaSLars Ellenberg * requires it all to be atomic as well. */ 20219f843aaSLars Ellenberg static void bm_store_page_idx(struct page *page, unsigned long idx) 20319f843aaSLars Ellenberg { 20419f843aaSLars Ellenberg BUG_ON(0 != (idx & ~BM_PAGE_IDX_MASK)); 2050c7db279SArne Redlich set_page_private(page, idx); 20619f843aaSLars Ellenberg } 20719f843aaSLars Ellenberg 20819f843aaSLars Ellenberg static unsigned long bm_page_to_idx(struct page *page) 20919f843aaSLars Ellenberg { 21019f843aaSLars Ellenberg return page_private(page) & BM_PAGE_IDX_MASK; 21119f843aaSLars Ellenberg } 21219f843aaSLars Ellenberg 21319f843aaSLars Ellenberg /* As is very unlikely that the same page is under IO from more than one 21419f843aaSLars Ellenberg * context, we can get away with a bit per page and one wait queue per bitmap. 21519f843aaSLars Ellenberg */ 216b30ab791SAndreas Gruenbacher static void bm_page_lock_io(struct drbd_device *device, int page_nr) 21719f843aaSLars Ellenberg { 218b30ab791SAndreas Gruenbacher struct drbd_bitmap *b = device->bitmap; 21919f843aaSLars Ellenberg void *addr = &page_private(b->bm_pages[page_nr]); 22019f843aaSLars Ellenberg wait_event(b->bm_io_wait, !test_and_set_bit(BM_PAGE_IO_LOCK, addr)); 22119f843aaSLars Ellenberg } 22219f843aaSLars Ellenberg 223b30ab791SAndreas Gruenbacher static void bm_page_unlock_io(struct drbd_device *device, int page_nr) 22419f843aaSLars Ellenberg { 225b30ab791SAndreas Gruenbacher struct drbd_bitmap *b = device->bitmap; 22619f843aaSLars Ellenberg void *addr = &page_private(b->bm_pages[page_nr]); 2274738fa16SLars Ellenberg clear_bit_unlock(BM_PAGE_IO_LOCK, addr); 228b30ab791SAndreas Gruenbacher wake_up(&device->bitmap->bm_io_wait); 22919f843aaSLars Ellenberg } 23019f843aaSLars Ellenberg 23119f843aaSLars Ellenberg /* set _before_ submit_io, so it may be reset due to being changed 23219f843aaSLars Ellenberg * while this page is in flight... will get submitted later again */ 23319f843aaSLars Ellenberg static void bm_set_page_unchanged(struct page *page) 23419f843aaSLars Ellenberg { 23519f843aaSLars Ellenberg /* use cmpxchg? */ 23619f843aaSLars Ellenberg clear_bit(BM_PAGE_NEED_WRITEOUT, &page_private(page)); 23719f843aaSLars Ellenberg clear_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page)); 23819f843aaSLars Ellenberg } 23919f843aaSLars Ellenberg 24019f843aaSLars Ellenberg static void bm_set_page_need_writeout(struct page *page) 24119f843aaSLars Ellenberg { 24219f843aaSLars Ellenberg set_bit(BM_PAGE_NEED_WRITEOUT, &page_private(page)); 24319f843aaSLars Ellenberg } 24419f843aaSLars Ellenberg 24545dfffebSLars Ellenberg /** 24645dfffebSLars Ellenberg * drbd_bm_mark_for_writeout() - mark a page with a "hint" to be considered for writeout 247b30ab791SAndreas Gruenbacher * @device: DRBD device. 24845dfffebSLars Ellenberg * @page_nr: the bitmap page to mark with the "hint" flag 24945dfffebSLars Ellenberg * 25045dfffebSLars Ellenberg * From within an activity log transaction, we mark a few pages with these 25145dfffebSLars Ellenberg * hints, then call drbd_bm_write_hinted(), which will only write out changed 25245dfffebSLars Ellenberg * pages which are flagged with this mark. 25345dfffebSLars Ellenberg */ 254b30ab791SAndreas Gruenbacher void drbd_bm_mark_for_writeout(struct drbd_device *device, int page_nr) 25545dfffebSLars Ellenberg { 25645dfffebSLars Ellenberg struct page *page; 257b30ab791SAndreas Gruenbacher if (page_nr >= device->bitmap->bm_number_of_pages) { 258d0180171SAndreas Gruenbacher drbd_warn(device, "BAD: page_nr: %u, number_of_pages: %u\n", 259b30ab791SAndreas Gruenbacher page_nr, (int)device->bitmap->bm_number_of_pages); 26045dfffebSLars Ellenberg return; 26145dfffebSLars Ellenberg } 262b30ab791SAndreas Gruenbacher page = device->bitmap->bm_pages[page_nr]; 26345dfffebSLars Ellenberg set_bit(BM_PAGE_HINT_WRITEOUT, &page_private(page)); 26445dfffebSLars Ellenberg } 26545dfffebSLars Ellenberg 26619f843aaSLars Ellenberg static int bm_test_page_unchanged(struct page *page) 26719f843aaSLars Ellenberg { 26819f843aaSLars Ellenberg volatile const unsigned long *addr = &page_private(page); 26919f843aaSLars Ellenberg return (*addr & ((1UL<<BM_PAGE_NEED_WRITEOUT)|(1UL<<BM_PAGE_LAZY_WRITEOUT))) == 0; 27019f843aaSLars Ellenberg } 27119f843aaSLars Ellenberg 27219f843aaSLars Ellenberg static void bm_set_page_io_err(struct page *page) 27319f843aaSLars Ellenberg { 27419f843aaSLars Ellenberg set_bit(BM_PAGE_IO_ERROR, &page_private(page)); 27519f843aaSLars Ellenberg } 27619f843aaSLars Ellenberg 27719f843aaSLars Ellenberg static void bm_clear_page_io_err(struct page *page) 27819f843aaSLars Ellenberg { 27919f843aaSLars Ellenberg clear_bit(BM_PAGE_IO_ERROR, &page_private(page)); 28019f843aaSLars Ellenberg } 28119f843aaSLars Ellenberg 28219f843aaSLars Ellenberg static void bm_set_page_lazy_writeout(struct page *page) 28319f843aaSLars Ellenberg { 28419f843aaSLars Ellenberg set_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page)); 28519f843aaSLars Ellenberg } 28619f843aaSLars Ellenberg 28719f843aaSLars Ellenberg static int bm_test_page_lazy_writeout(struct page *page) 28819f843aaSLars Ellenberg { 28919f843aaSLars Ellenberg return test_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page)); 29019f843aaSLars Ellenberg } 29119f843aaSLars Ellenberg 29219f843aaSLars Ellenberg /* on a 32bit box, this would allow for exactly (2<<38) bits. */ 29319f843aaSLars Ellenberg static unsigned int bm_word_to_page_idx(struct drbd_bitmap *b, unsigned long long_nr) 29419f843aaSLars Ellenberg { 29519f843aaSLars Ellenberg /* page_nr = (word*sizeof(long)) >> PAGE_SHIFT; */ 29619f843aaSLars Ellenberg unsigned int page_nr = long_nr >> (PAGE_SHIFT - LN2_BPL + 3); 29719f843aaSLars Ellenberg BUG_ON(page_nr >= b->bm_number_of_pages); 29819f843aaSLars Ellenberg return page_nr; 29919f843aaSLars Ellenberg } 30019f843aaSLars Ellenberg 30195a0f10cSLars Ellenberg static unsigned int bm_bit_to_page_idx(struct drbd_bitmap *b, u64 bitnr) 30295a0f10cSLars Ellenberg { 30395a0f10cSLars Ellenberg /* page_nr = (bitnr/8) >> PAGE_SHIFT; */ 30495a0f10cSLars Ellenberg unsigned int page_nr = bitnr >> (PAGE_SHIFT + 3); 30595a0f10cSLars Ellenberg BUG_ON(page_nr >= b->bm_number_of_pages); 30695a0f10cSLars Ellenberg return page_nr; 30795a0f10cSLars Ellenberg } 30895a0f10cSLars Ellenberg 309589973a7SCong Wang static unsigned long *__bm_map_pidx(struct drbd_bitmap *b, unsigned int idx) 31095a0f10cSLars Ellenberg { 31195a0f10cSLars Ellenberg struct page *page = b->bm_pages[idx]; 312cfd8005cSCong Wang return (unsigned long *) kmap_atomic(page); 31395a0f10cSLars Ellenberg } 31495a0f10cSLars Ellenberg 31595a0f10cSLars Ellenberg static unsigned long *bm_map_pidx(struct drbd_bitmap *b, unsigned int idx) 31695a0f10cSLars Ellenberg { 317cfd8005cSCong Wang return __bm_map_pidx(b, idx); 31895a0f10cSLars Ellenberg } 31995a0f10cSLars Ellenberg 320cfd8005cSCong Wang static void __bm_unmap(unsigned long *p_addr) 321b411b363SPhilipp Reisner { 322cfd8005cSCong Wang kunmap_atomic(p_addr); 323b411b363SPhilipp Reisner }; 324b411b363SPhilipp Reisner 325b411b363SPhilipp Reisner static void bm_unmap(unsigned long *p_addr) 326b411b363SPhilipp Reisner { 327cfd8005cSCong Wang return __bm_unmap(p_addr); 328b411b363SPhilipp Reisner } 329b411b363SPhilipp Reisner 330b411b363SPhilipp Reisner /* long word offset of _bitmap_ sector */ 331b411b363SPhilipp Reisner #define S2W(s) ((s)<<(BM_EXT_SHIFT-BM_BLOCK_SHIFT-LN2_BPL)) 332b411b363SPhilipp Reisner /* word offset from start of bitmap to word number _in_page_ 333b411b363SPhilipp Reisner * modulo longs per page 334b411b363SPhilipp Reisner #define MLPP(X) ((X) % (PAGE_SIZE/sizeof(long)) 33524c4830cSBart Van Assche hm, well, Philipp thinks gcc might not optimize the % into & (... - 1) 336b411b363SPhilipp Reisner so do it explicitly: 337b411b363SPhilipp Reisner */ 338b411b363SPhilipp Reisner #define MLPP(X) ((X) & ((PAGE_SIZE/sizeof(long))-1)) 339b411b363SPhilipp Reisner 340b411b363SPhilipp Reisner /* Long words per page */ 341b411b363SPhilipp Reisner #define LWPP (PAGE_SIZE/sizeof(long)) 342b411b363SPhilipp Reisner 343b411b363SPhilipp Reisner /* 344b411b363SPhilipp Reisner * actually most functions herein should take a struct drbd_bitmap*, not a 345b30ab791SAndreas Gruenbacher * struct drbd_device*, but for the debug macros I like to have the device around 346b411b363SPhilipp Reisner * to be able to report device specific. 347b411b363SPhilipp Reisner */ 348b411b363SPhilipp Reisner 34919f843aaSLars Ellenberg 350b411b363SPhilipp Reisner static void bm_free_pages(struct page **pages, unsigned long number) 351b411b363SPhilipp Reisner { 352b411b363SPhilipp Reisner unsigned long i; 353b411b363SPhilipp Reisner if (!pages) 354b411b363SPhilipp Reisner return; 355b411b363SPhilipp Reisner 356b411b363SPhilipp Reisner for (i = 0; i < number; i++) { 357b411b363SPhilipp Reisner if (!pages[i]) { 358f88c5d90SLars Ellenberg pr_alert("bm_free_pages tried to free a NULL pointer; i=%lu n=%lu\n", 359b411b363SPhilipp Reisner i, number); 360b411b363SPhilipp Reisner continue; 361b411b363SPhilipp Reisner } 362b411b363SPhilipp Reisner __free_page(pages[i]); 363b411b363SPhilipp Reisner pages[i] = NULL; 364b411b363SPhilipp Reisner } 365b411b363SPhilipp Reisner } 366b411b363SPhilipp Reisner 3671d5cfdb0STetsuo Handa static inline void bm_vk_free(void *ptr) 368b411b363SPhilipp Reisner { 3691d5cfdb0STetsuo Handa kvfree(ptr); 370b411b363SPhilipp Reisner } 371b411b363SPhilipp Reisner 372b411b363SPhilipp Reisner /* 373b411b363SPhilipp Reisner * "have" and "want" are NUMBER OF PAGES. 374b411b363SPhilipp Reisner */ 375b411b363SPhilipp Reisner static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want) 376b411b363SPhilipp Reisner { 377b411b363SPhilipp Reisner struct page **old_pages = b->bm_pages; 378b411b363SPhilipp Reisner struct page **new_pages, *page; 3791d5cfdb0STetsuo Handa unsigned int i, bytes; 380b411b363SPhilipp Reisner unsigned long have = b->bm_number_of_pages; 381b411b363SPhilipp Reisner 382b411b363SPhilipp Reisner BUG_ON(have == 0 && old_pages != NULL); 383b411b363SPhilipp Reisner BUG_ON(have != 0 && old_pages == NULL); 384b411b363SPhilipp Reisner 385b411b363SPhilipp Reisner if (have == want) 386b411b363SPhilipp Reisner return old_pages; 387b411b363SPhilipp Reisner 388b411b363SPhilipp Reisner /* Trying kmalloc first, falling back to vmalloc. 3890b143d43SLars Ellenberg * GFP_NOIO, as this is called while drbd IO is "suspended", 3900b143d43SLars Ellenberg * and during resize or attach on diskless Primary, 3910b143d43SLars Ellenberg * we must not block on IO to ourselves. 392bc891c9aSLars Ellenberg * Context is receiver thread or dmsetup. */ 393b411b363SPhilipp Reisner bytes = sizeof(struct page *)*want; 3948be04b93SJoe Perches new_pages = kzalloc(bytes, GFP_NOIO | __GFP_NOWARN); 395b411b363SPhilipp Reisner if (!new_pages) { 3960b143d43SLars Ellenberg new_pages = __vmalloc(bytes, 3970b143d43SLars Ellenberg GFP_NOIO | __GFP_HIGHMEM | __GFP_ZERO, 3980b143d43SLars Ellenberg PAGE_KERNEL); 399b411b363SPhilipp Reisner if (!new_pages) 400b411b363SPhilipp Reisner return NULL; 401b411b363SPhilipp Reisner } 402b411b363SPhilipp Reisner 403b411b363SPhilipp Reisner if (want >= have) { 404b411b363SPhilipp Reisner for (i = 0; i < have; i++) 405b411b363SPhilipp Reisner new_pages[i] = old_pages[i]; 406b411b363SPhilipp Reisner for (; i < want; i++) { 4070b143d43SLars Ellenberg page = alloc_page(GFP_NOIO | __GFP_HIGHMEM); 408b411b363SPhilipp Reisner if (!page) { 409b411b363SPhilipp Reisner bm_free_pages(new_pages + have, i - have); 4101d5cfdb0STetsuo Handa bm_vk_free(new_pages); 411b411b363SPhilipp Reisner return NULL; 412b411b363SPhilipp Reisner } 41319f843aaSLars Ellenberg /* we want to know which page it is 41419f843aaSLars Ellenberg * from the endio handlers */ 41519f843aaSLars Ellenberg bm_store_page_idx(page, i); 416b411b363SPhilipp Reisner new_pages[i] = page; 417b411b363SPhilipp Reisner } 418b411b363SPhilipp Reisner } else { 419b411b363SPhilipp Reisner for (i = 0; i < want; i++) 420b411b363SPhilipp Reisner new_pages[i] = old_pages[i]; 421b411b363SPhilipp Reisner /* NOT HERE, we are outside the spinlock! 422b411b363SPhilipp Reisner bm_free_pages(old_pages + want, have - want); 423b411b363SPhilipp Reisner */ 424b411b363SPhilipp Reisner } 425b411b363SPhilipp Reisner 426b411b363SPhilipp Reisner return new_pages; 427b411b363SPhilipp Reisner } 428b411b363SPhilipp Reisner 429b411b363SPhilipp Reisner /* 430*7e5fec31SFabian Frederick * allocates the drbd_bitmap and stores it in device->bitmap. 431b411b363SPhilipp Reisner */ 432b30ab791SAndreas Gruenbacher int drbd_bm_init(struct drbd_device *device) 433b411b363SPhilipp Reisner { 434b30ab791SAndreas Gruenbacher struct drbd_bitmap *b = device->bitmap; 435b411b363SPhilipp Reisner WARN_ON(b != NULL); 436b411b363SPhilipp Reisner b = kzalloc(sizeof(struct drbd_bitmap), GFP_KERNEL); 437b411b363SPhilipp Reisner if (!b) 438b411b363SPhilipp Reisner return -ENOMEM; 439b411b363SPhilipp Reisner spin_lock_init(&b->bm_lock); 4408a03ae2aSThomas Gleixner mutex_init(&b->bm_change); 441b411b363SPhilipp Reisner init_waitqueue_head(&b->bm_io_wait); 442b411b363SPhilipp Reisner 443b30ab791SAndreas Gruenbacher device->bitmap = b; 444b411b363SPhilipp Reisner 445b411b363SPhilipp Reisner return 0; 446b411b363SPhilipp Reisner } 447b411b363SPhilipp Reisner 448b30ab791SAndreas Gruenbacher sector_t drbd_bm_capacity(struct drbd_device *device) 449b411b363SPhilipp Reisner { 450b30ab791SAndreas Gruenbacher if (!expect(device->bitmap)) 451841ce241SAndreas Gruenbacher return 0; 452b30ab791SAndreas Gruenbacher return device->bitmap->bm_dev_capacity; 453b411b363SPhilipp Reisner } 454b411b363SPhilipp Reisner 455b411b363SPhilipp Reisner /* called on driver unload. TODO: call when a device is destroyed. 456b411b363SPhilipp Reisner */ 457b30ab791SAndreas Gruenbacher void drbd_bm_cleanup(struct drbd_device *device) 458b411b363SPhilipp Reisner { 459b30ab791SAndreas Gruenbacher if (!expect(device->bitmap)) 460841ce241SAndreas Gruenbacher return; 461b30ab791SAndreas Gruenbacher bm_free_pages(device->bitmap->bm_pages, device->bitmap->bm_number_of_pages); 4621d5cfdb0STetsuo Handa bm_vk_free(device->bitmap->bm_pages); 463b30ab791SAndreas Gruenbacher kfree(device->bitmap); 464b30ab791SAndreas Gruenbacher device->bitmap = NULL; 465b411b363SPhilipp Reisner } 466b411b363SPhilipp Reisner 467b411b363SPhilipp Reisner /* 468b411b363SPhilipp Reisner * since (b->bm_bits % BITS_PER_LONG) != 0, 469b411b363SPhilipp Reisner * this masks out the remaining bits. 470b411b363SPhilipp Reisner * Returns the number of bits cleared. 471b411b363SPhilipp Reisner */ 4722630628bSLars Ellenberg #ifndef BITS_PER_PAGE 47395a0f10cSLars Ellenberg #define BITS_PER_PAGE (1UL << (PAGE_SHIFT + 3)) 47495a0f10cSLars Ellenberg #define BITS_PER_PAGE_MASK (BITS_PER_PAGE - 1) 4752630628bSLars Ellenberg #else 4762630628bSLars Ellenberg # if BITS_PER_PAGE != (1UL << (PAGE_SHIFT + 3)) 4772630628bSLars Ellenberg # error "ambiguous BITS_PER_PAGE" 4782630628bSLars Ellenberg # endif 4792630628bSLars Ellenberg #endif 48095a0f10cSLars Ellenberg #define BITS_PER_LONG_MASK (BITS_PER_LONG - 1) 481b411b363SPhilipp Reisner static int bm_clear_surplus(struct drbd_bitmap *b) 482b411b363SPhilipp Reisner { 48395a0f10cSLars Ellenberg unsigned long mask; 484b411b363SPhilipp Reisner unsigned long *p_addr, *bm; 48595a0f10cSLars Ellenberg int tmp; 48695a0f10cSLars Ellenberg int cleared = 0; 487b411b363SPhilipp Reisner 48895a0f10cSLars Ellenberg /* number of bits modulo bits per page */ 48995a0f10cSLars Ellenberg tmp = (b->bm_bits & BITS_PER_PAGE_MASK); 49095a0f10cSLars Ellenberg /* mask the used bits of the word containing the last bit */ 49195a0f10cSLars Ellenberg mask = (1UL << (tmp & BITS_PER_LONG_MASK)) -1; 49295a0f10cSLars Ellenberg /* bitmap is always stored little endian, 49395a0f10cSLars Ellenberg * on disk and in core memory alike */ 49495a0f10cSLars Ellenberg mask = cpu_to_lel(mask); 49595a0f10cSLars Ellenberg 4966850c442SLars Ellenberg p_addr = bm_map_pidx(b, b->bm_number_of_pages - 1); 49795a0f10cSLars Ellenberg bm = p_addr + (tmp/BITS_PER_LONG); 49895a0f10cSLars Ellenberg if (mask) { 49995a0f10cSLars Ellenberg /* If mask != 0, we are not exactly aligned, so bm now points 50095a0f10cSLars Ellenberg * to the long containing the last bit. 50195a0f10cSLars Ellenberg * If mask == 0, bm already points to the word immediately 50295a0f10cSLars Ellenberg * after the last (long word aligned) bit. */ 503b411b363SPhilipp Reisner cleared = hweight_long(*bm & ~mask); 504b411b363SPhilipp Reisner *bm &= mask; 50595a0f10cSLars Ellenberg bm++; 506b411b363SPhilipp Reisner } 507b411b363SPhilipp Reisner 50895a0f10cSLars Ellenberg if (BITS_PER_LONG == 32 && ((bm - p_addr) & 1) == 1) { 50995a0f10cSLars Ellenberg /* on a 32bit arch, we may need to zero out 51095a0f10cSLars Ellenberg * a padding long to align with a 64bit remote */ 511b411b363SPhilipp Reisner cleared += hweight_long(*bm); 512b411b363SPhilipp Reisner *bm = 0; 513b411b363SPhilipp Reisner } 514b411b363SPhilipp Reisner bm_unmap(p_addr); 515b411b363SPhilipp Reisner return cleared; 516b411b363SPhilipp Reisner } 517b411b363SPhilipp Reisner 518b411b363SPhilipp Reisner static void bm_set_surplus(struct drbd_bitmap *b) 519b411b363SPhilipp Reisner { 52095a0f10cSLars Ellenberg unsigned long mask; 521b411b363SPhilipp Reisner unsigned long *p_addr, *bm; 52295a0f10cSLars Ellenberg int tmp; 523b411b363SPhilipp Reisner 52495a0f10cSLars Ellenberg /* number of bits modulo bits per page */ 52595a0f10cSLars Ellenberg tmp = (b->bm_bits & BITS_PER_PAGE_MASK); 52695a0f10cSLars Ellenberg /* mask the used bits of the word containing the last bit */ 52795a0f10cSLars Ellenberg mask = (1UL << (tmp & BITS_PER_LONG_MASK)) -1; 52895a0f10cSLars Ellenberg /* bitmap is always stored little endian, 52995a0f10cSLars Ellenberg * on disk and in core memory alike */ 53095a0f10cSLars Ellenberg mask = cpu_to_lel(mask); 53195a0f10cSLars Ellenberg 5326850c442SLars Ellenberg p_addr = bm_map_pidx(b, b->bm_number_of_pages - 1); 53395a0f10cSLars Ellenberg bm = p_addr + (tmp/BITS_PER_LONG); 53495a0f10cSLars Ellenberg if (mask) { 53595a0f10cSLars Ellenberg /* If mask != 0, we are not exactly aligned, so bm now points 53695a0f10cSLars Ellenberg * to the long containing the last bit. 53795a0f10cSLars Ellenberg * If mask == 0, bm already points to the word immediately 53895a0f10cSLars Ellenberg * after the last (long word aligned) bit. */ 539b411b363SPhilipp Reisner *bm |= ~mask; 54095a0f10cSLars Ellenberg bm++; 541b411b363SPhilipp Reisner } 542b411b363SPhilipp Reisner 54395a0f10cSLars Ellenberg if (BITS_PER_LONG == 32 && ((bm - p_addr) & 1) == 1) { 54495a0f10cSLars Ellenberg /* on a 32bit arch, we may need to zero out 54595a0f10cSLars Ellenberg * a padding long to align with a 64bit remote */ 54695a0f10cSLars Ellenberg *bm = ~0UL; 547b411b363SPhilipp Reisner } 548b411b363SPhilipp Reisner bm_unmap(p_addr); 549b411b363SPhilipp Reisner } 550b411b363SPhilipp Reisner 5514b0715f0SLars Ellenberg /* you better not modify the bitmap while this is running, 5524b0715f0SLars Ellenberg * or its results will be stale */ 55395a0f10cSLars Ellenberg static unsigned long bm_count_bits(struct drbd_bitmap *b) 554b411b363SPhilipp Reisner { 5554b0715f0SLars Ellenberg unsigned long *p_addr; 556b411b363SPhilipp Reisner unsigned long bits = 0; 5574b0715f0SLars Ellenberg unsigned long mask = (1UL << (b->bm_bits & BITS_PER_LONG_MASK)) -1; 5585fb3bc4dSLars Ellenberg int idx, last_word; 5597777a8baSLars Ellenberg 5604b0715f0SLars Ellenberg /* all but last page */ 5616850c442SLars Ellenberg for (idx = 0; idx < b->bm_number_of_pages - 1; idx++) { 562cfd8005cSCong Wang p_addr = __bm_map_pidx(b, idx); 5635fb3bc4dSLars Ellenberg bits += bitmap_weight(p_addr, BITS_PER_PAGE); 564cfd8005cSCong Wang __bm_unmap(p_addr); 565b411b363SPhilipp Reisner cond_resched(); 566b411b363SPhilipp Reisner } 5674b0715f0SLars Ellenberg /* last (or only) page */ 5684b0715f0SLars Ellenberg last_word = ((b->bm_bits - 1) & BITS_PER_PAGE_MASK) >> LN2_BPL; 569589973a7SCong Wang p_addr = __bm_map_pidx(b, idx); 5705fb3bc4dSLars Ellenberg bits += bitmap_weight(p_addr, last_word * BITS_PER_LONG); 5714b0715f0SLars Ellenberg p_addr[last_word] &= cpu_to_lel(mask); 5724b0715f0SLars Ellenberg bits += hweight_long(p_addr[last_word]); 5734b0715f0SLars Ellenberg /* 32bit arch, may have an unused padding long */ 5744b0715f0SLars Ellenberg if (BITS_PER_LONG == 32 && (last_word & 1) == 0) 5754b0715f0SLars Ellenberg p_addr[last_word+1] = 0; 576589973a7SCong Wang __bm_unmap(p_addr); 577b411b363SPhilipp Reisner return bits; 578b411b363SPhilipp Reisner } 579b411b363SPhilipp Reisner 580b411b363SPhilipp Reisner /* offset and len in long words.*/ 581b411b363SPhilipp Reisner static void bm_memset(struct drbd_bitmap *b, size_t offset, int c, size_t len) 582b411b363SPhilipp Reisner { 583b411b363SPhilipp Reisner unsigned long *p_addr, *bm; 58419f843aaSLars Ellenberg unsigned int idx; 585b411b363SPhilipp Reisner size_t do_now, end; 586b411b363SPhilipp Reisner 587b411b363SPhilipp Reisner end = offset + len; 588b411b363SPhilipp Reisner 589b411b363SPhilipp Reisner if (end > b->bm_words) { 590f88c5d90SLars Ellenberg pr_alert("bm_memset end > bm_words\n"); 591b411b363SPhilipp Reisner return; 592b411b363SPhilipp Reisner } 593b411b363SPhilipp Reisner 594b411b363SPhilipp Reisner while (offset < end) { 595b411b363SPhilipp Reisner do_now = min_t(size_t, ALIGN(offset + 1, LWPP), end) - offset; 59619f843aaSLars Ellenberg idx = bm_word_to_page_idx(b, offset); 59719f843aaSLars Ellenberg p_addr = bm_map_pidx(b, idx); 598b411b363SPhilipp Reisner bm = p_addr + MLPP(offset); 599b411b363SPhilipp Reisner if (bm+do_now > p_addr + LWPP) { 600f88c5d90SLars Ellenberg pr_alert("BUG BUG BUG! p_addr:%p bm:%p do_now:%d\n", 601b411b363SPhilipp Reisner p_addr, bm, (int)do_now); 60284e7c0f7SLars Ellenberg } else 603b411b363SPhilipp Reisner memset(bm, c, do_now * sizeof(long)); 604b411b363SPhilipp Reisner bm_unmap(p_addr); 60519f843aaSLars Ellenberg bm_set_page_need_writeout(b->bm_pages[idx]); 606b411b363SPhilipp Reisner offset += do_now; 607b411b363SPhilipp Reisner } 608b411b363SPhilipp Reisner } 609b411b363SPhilipp Reisner 610ae8bf312SLars Ellenberg /* For the layout, see comment above drbd_md_set_sector_offsets(). */ 611ae8bf312SLars Ellenberg static u64 drbd_md_on_disk_bits(struct drbd_backing_dev *ldev) 612ae8bf312SLars Ellenberg { 613ae8bf312SLars Ellenberg u64 bitmap_sectors; 614ae8bf312SLars Ellenberg if (ldev->md.al_offset == 8) 615ae8bf312SLars Ellenberg bitmap_sectors = ldev->md.md_size_sect - ldev->md.bm_offset; 616ae8bf312SLars Ellenberg else 617ae8bf312SLars Ellenberg bitmap_sectors = ldev->md.al_offset - ldev->md.bm_offset; 618ae8bf312SLars Ellenberg return bitmap_sectors << (9 + 3); 619ae8bf312SLars Ellenberg } 620ae8bf312SLars Ellenberg 621b411b363SPhilipp Reisner /* 622b411b363SPhilipp Reisner * make sure the bitmap has enough room for the attached storage, 623b411b363SPhilipp Reisner * if necessary, resize. 624b411b363SPhilipp Reisner * called whenever we may have changed the device size. 625b411b363SPhilipp Reisner * returns -ENOMEM if we could not allocate enough memory, 0 on success. 626b411b363SPhilipp Reisner * In case this is actually a resize, we copy the old bitmap into the new one. 627b411b363SPhilipp Reisner * Otherwise, the bitmap is initialized to all bits set. 628b411b363SPhilipp Reisner */ 629b30ab791SAndreas Gruenbacher int drbd_bm_resize(struct drbd_device *device, sector_t capacity, int set_new_bits) 630b411b363SPhilipp Reisner { 631b30ab791SAndreas Gruenbacher struct drbd_bitmap *b = device->bitmap; 6326850c442SLars Ellenberg unsigned long bits, words, owords, obits; 633b411b363SPhilipp Reisner unsigned long want, have, onpages; /* number of pages */ 634b411b363SPhilipp Reisner struct page **npages, **opages = NULL; 635*7e5fec31SFabian Frederick int err = 0; 636*7e5fec31SFabian Frederick bool growing; 637b411b363SPhilipp Reisner 638841ce241SAndreas Gruenbacher if (!expect(b)) 639841ce241SAndreas Gruenbacher return -ENOMEM; 640b411b363SPhilipp Reisner 641b30ab791SAndreas Gruenbacher drbd_bm_lock(device, "resize", BM_LOCKED_MASK); 642b411b363SPhilipp Reisner 643d0180171SAndreas Gruenbacher drbd_info(device, "drbd_bm_resize called with capacity == %llu\n", 644b411b363SPhilipp Reisner (unsigned long long)capacity); 645b411b363SPhilipp Reisner 646b411b363SPhilipp Reisner if (capacity == b->bm_dev_capacity) 647b411b363SPhilipp Reisner goto out; 648b411b363SPhilipp Reisner 649b411b363SPhilipp Reisner if (capacity == 0) { 650b411b363SPhilipp Reisner spin_lock_irq(&b->bm_lock); 651b411b363SPhilipp Reisner opages = b->bm_pages; 652b411b363SPhilipp Reisner onpages = b->bm_number_of_pages; 653b411b363SPhilipp Reisner owords = b->bm_words; 654b411b363SPhilipp Reisner b->bm_pages = NULL; 655b411b363SPhilipp Reisner b->bm_number_of_pages = 656b411b363SPhilipp Reisner b->bm_set = 657b411b363SPhilipp Reisner b->bm_bits = 658b411b363SPhilipp Reisner b->bm_words = 659b411b363SPhilipp Reisner b->bm_dev_capacity = 0; 660b411b363SPhilipp Reisner spin_unlock_irq(&b->bm_lock); 661b411b363SPhilipp Reisner bm_free_pages(opages, onpages); 6621d5cfdb0STetsuo Handa bm_vk_free(opages); 663b411b363SPhilipp Reisner goto out; 664b411b363SPhilipp Reisner } 665b411b363SPhilipp Reisner bits = BM_SECT_TO_BIT(ALIGN(capacity, BM_SECT_PER_BIT)); 666b411b363SPhilipp Reisner 667b411b363SPhilipp Reisner /* if we would use 668b411b363SPhilipp Reisner words = ALIGN(bits,BITS_PER_LONG) >> LN2_BPL; 669b411b363SPhilipp Reisner a 32bit host could present the wrong number of words 670b411b363SPhilipp Reisner to a 64bit host. 671b411b363SPhilipp Reisner */ 672b411b363SPhilipp Reisner words = ALIGN(bits, 64) >> LN2_BPL; 673b411b363SPhilipp Reisner 674b30ab791SAndreas Gruenbacher if (get_ldev(device)) { 675b30ab791SAndreas Gruenbacher u64 bits_on_disk = drbd_md_on_disk_bits(device->ldev); 676b30ab791SAndreas Gruenbacher put_ldev(device); 6774b0715f0SLars Ellenberg if (bits > bits_on_disk) { 678d0180171SAndreas Gruenbacher drbd_info(device, "bits = %lu\n", bits); 679d0180171SAndreas Gruenbacher drbd_info(device, "bits_on_disk = %llu\n", bits_on_disk); 6804b0715f0SLars Ellenberg err = -ENOSPC; 6814b0715f0SLars Ellenberg goto out; 6824b0715f0SLars Ellenberg } 683b411b363SPhilipp Reisner } 684b411b363SPhilipp Reisner 6856850c442SLars Ellenberg want = ALIGN(words*sizeof(long), PAGE_SIZE) >> PAGE_SHIFT; 686b411b363SPhilipp Reisner have = b->bm_number_of_pages; 687b411b363SPhilipp Reisner if (want == have) { 6880b0ba1efSAndreas Gruenbacher D_ASSERT(device, b->bm_pages != NULL); 689b411b363SPhilipp Reisner npages = b->bm_pages; 690b411b363SPhilipp Reisner } else { 691b30ab791SAndreas Gruenbacher if (drbd_insert_fault(device, DRBD_FAULT_BM_ALLOC)) 692b411b363SPhilipp Reisner npages = NULL; 693b411b363SPhilipp Reisner else 694b411b363SPhilipp Reisner npages = bm_realloc_pages(b, want); 695b411b363SPhilipp Reisner } 696b411b363SPhilipp Reisner 697b411b363SPhilipp Reisner if (!npages) { 698b411b363SPhilipp Reisner err = -ENOMEM; 699b411b363SPhilipp Reisner goto out; 700b411b363SPhilipp Reisner } 701b411b363SPhilipp Reisner 702b411b363SPhilipp Reisner spin_lock_irq(&b->bm_lock); 703b411b363SPhilipp Reisner opages = b->bm_pages; 704b411b363SPhilipp Reisner owords = b->bm_words; 705b411b363SPhilipp Reisner obits = b->bm_bits; 706b411b363SPhilipp Reisner 707b411b363SPhilipp Reisner growing = bits > obits; 7085223671bSPhilipp Reisner if (opages && growing && set_new_bits) 709b411b363SPhilipp Reisner bm_set_surplus(b); 710b411b363SPhilipp Reisner 711b411b363SPhilipp Reisner b->bm_pages = npages; 712b411b363SPhilipp Reisner b->bm_number_of_pages = want; 713b411b363SPhilipp Reisner b->bm_bits = bits; 714b411b363SPhilipp Reisner b->bm_words = words; 715b411b363SPhilipp Reisner b->bm_dev_capacity = capacity; 716b411b363SPhilipp Reisner 717b411b363SPhilipp Reisner if (growing) { 71802d9a94bSPhilipp Reisner if (set_new_bits) { 719b411b363SPhilipp Reisner bm_memset(b, owords, 0xff, words-owords); 720b411b363SPhilipp Reisner b->bm_set += bits - obits; 72102d9a94bSPhilipp Reisner } else 72202d9a94bSPhilipp Reisner bm_memset(b, owords, 0x00, words-owords); 72302d9a94bSPhilipp Reisner 724b411b363SPhilipp Reisner } 725b411b363SPhilipp Reisner 726b411b363SPhilipp Reisner if (want < have) { 727b411b363SPhilipp Reisner /* implicit: (opages != NULL) && (opages != npages) */ 728b411b363SPhilipp Reisner bm_free_pages(opages + want, have - want); 729b411b363SPhilipp Reisner } 730b411b363SPhilipp Reisner 731b411b363SPhilipp Reisner (void)bm_clear_surplus(b); 732b411b363SPhilipp Reisner 733b411b363SPhilipp Reisner spin_unlock_irq(&b->bm_lock); 734b411b363SPhilipp Reisner if (opages != npages) 7351d5cfdb0STetsuo Handa bm_vk_free(opages); 736b411b363SPhilipp Reisner if (!growing) 737b411b363SPhilipp Reisner b->bm_set = bm_count_bits(b); 738d0180171SAndreas Gruenbacher drbd_info(device, "resync bitmap: bits=%lu words=%lu pages=%lu\n", bits, words, want); 739b411b363SPhilipp Reisner 740b411b363SPhilipp Reisner out: 741b30ab791SAndreas Gruenbacher drbd_bm_unlock(device); 742b411b363SPhilipp Reisner return err; 743b411b363SPhilipp Reisner } 744b411b363SPhilipp Reisner 745b411b363SPhilipp Reisner /* inherently racy: 746b411b363SPhilipp Reisner * if not protected by other means, return value may be out of date when 747b411b363SPhilipp Reisner * leaving this function... 748b411b363SPhilipp Reisner * we still need to lock it, since it is important that this returns 749b411b363SPhilipp Reisner * bm_set == 0 precisely. 750b411b363SPhilipp Reisner * 751b411b363SPhilipp Reisner * maybe bm_set should be atomic_t ? 752b411b363SPhilipp Reisner */ 753b30ab791SAndreas Gruenbacher unsigned long _drbd_bm_total_weight(struct drbd_device *device) 754b411b363SPhilipp Reisner { 755b30ab791SAndreas Gruenbacher struct drbd_bitmap *b = device->bitmap; 756b411b363SPhilipp Reisner unsigned long s; 757b411b363SPhilipp Reisner unsigned long flags; 758b411b363SPhilipp Reisner 759841ce241SAndreas Gruenbacher if (!expect(b)) 760841ce241SAndreas Gruenbacher return 0; 761841ce241SAndreas Gruenbacher if (!expect(b->bm_pages)) 762841ce241SAndreas Gruenbacher return 0; 763b411b363SPhilipp Reisner 764b411b363SPhilipp Reisner spin_lock_irqsave(&b->bm_lock, flags); 765b411b363SPhilipp Reisner s = b->bm_set; 766b411b363SPhilipp Reisner spin_unlock_irqrestore(&b->bm_lock, flags); 767b411b363SPhilipp Reisner 768b411b363SPhilipp Reisner return s; 769b411b363SPhilipp Reisner } 770b411b363SPhilipp Reisner 771b30ab791SAndreas Gruenbacher unsigned long drbd_bm_total_weight(struct drbd_device *device) 772b411b363SPhilipp Reisner { 773b411b363SPhilipp Reisner unsigned long s; 774b411b363SPhilipp Reisner /* if I don't have a disk, I don't know about out-of-sync status */ 775b30ab791SAndreas Gruenbacher if (!get_ldev_if_state(device, D_NEGOTIATING)) 776b411b363SPhilipp Reisner return 0; 777b30ab791SAndreas Gruenbacher s = _drbd_bm_total_weight(device); 778b30ab791SAndreas Gruenbacher put_ldev(device); 779b411b363SPhilipp Reisner return s; 780b411b363SPhilipp Reisner } 781b411b363SPhilipp Reisner 782b30ab791SAndreas Gruenbacher size_t drbd_bm_words(struct drbd_device *device) 783b411b363SPhilipp Reisner { 784b30ab791SAndreas Gruenbacher struct drbd_bitmap *b = device->bitmap; 785841ce241SAndreas Gruenbacher if (!expect(b)) 786841ce241SAndreas Gruenbacher return 0; 787841ce241SAndreas Gruenbacher if (!expect(b->bm_pages)) 788841ce241SAndreas Gruenbacher return 0; 789b411b363SPhilipp Reisner 790b411b363SPhilipp Reisner return b->bm_words; 791b411b363SPhilipp Reisner } 792b411b363SPhilipp Reisner 793b30ab791SAndreas Gruenbacher unsigned long drbd_bm_bits(struct drbd_device *device) 794b411b363SPhilipp Reisner { 795b30ab791SAndreas Gruenbacher struct drbd_bitmap *b = device->bitmap; 796841ce241SAndreas Gruenbacher if (!expect(b)) 797841ce241SAndreas Gruenbacher return 0; 798b411b363SPhilipp Reisner 799b411b363SPhilipp Reisner return b->bm_bits; 800b411b363SPhilipp Reisner } 801b411b363SPhilipp Reisner 802b411b363SPhilipp Reisner /* merge number words from buffer into the bitmap starting at offset. 803b411b363SPhilipp Reisner * buffer[i] is expected to be little endian unsigned long. 804b411b363SPhilipp Reisner * bitmap must be locked by drbd_bm_lock. 805b411b363SPhilipp Reisner * currently only used from receive_bitmap. 806b411b363SPhilipp Reisner */ 807b30ab791SAndreas Gruenbacher void drbd_bm_merge_lel(struct drbd_device *device, size_t offset, size_t number, 808b411b363SPhilipp Reisner unsigned long *buffer) 809b411b363SPhilipp Reisner { 810b30ab791SAndreas Gruenbacher struct drbd_bitmap *b = device->bitmap; 811b411b363SPhilipp Reisner unsigned long *p_addr, *bm; 812b411b363SPhilipp Reisner unsigned long word, bits; 81319f843aaSLars Ellenberg unsigned int idx; 814b411b363SPhilipp Reisner size_t end, do_now; 815b411b363SPhilipp Reisner 816b411b363SPhilipp Reisner end = offset + number; 817b411b363SPhilipp Reisner 818841ce241SAndreas Gruenbacher if (!expect(b)) 819841ce241SAndreas Gruenbacher return; 820841ce241SAndreas Gruenbacher if (!expect(b->bm_pages)) 821841ce241SAndreas Gruenbacher return; 822b411b363SPhilipp Reisner if (number == 0) 823b411b363SPhilipp Reisner return; 824b411b363SPhilipp Reisner WARN_ON(offset >= b->bm_words); 825b411b363SPhilipp Reisner WARN_ON(end > b->bm_words); 826b411b363SPhilipp Reisner 827b411b363SPhilipp Reisner spin_lock_irq(&b->bm_lock); 828b411b363SPhilipp Reisner while (offset < end) { 829b411b363SPhilipp Reisner do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset; 83019f843aaSLars Ellenberg idx = bm_word_to_page_idx(b, offset); 83119f843aaSLars Ellenberg p_addr = bm_map_pidx(b, idx); 832b411b363SPhilipp Reisner bm = p_addr + MLPP(offset); 833b411b363SPhilipp Reisner offset += do_now; 834b411b363SPhilipp Reisner while (do_now--) { 835b411b363SPhilipp Reisner bits = hweight_long(*bm); 83695a0f10cSLars Ellenberg word = *bm | *buffer++; 837b411b363SPhilipp Reisner *bm++ = word; 838b411b363SPhilipp Reisner b->bm_set += hweight_long(word) - bits; 839b411b363SPhilipp Reisner } 840b411b363SPhilipp Reisner bm_unmap(p_addr); 84119f843aaSLars Ellenberg bm_set_page_need_writeout(b->bm_pages[idx]); 842b411b363SPhilipp Reisner } 843b411b363SPhilipp Reisner /* with 32bit <-> 64bit cross-platform connect 844b411b363SPhilipp Reisner * this is only correct for current usage, 845b411b363SPhilipp Reisner * where we _know_ that we are 64 bit aligned, 846b411b363SPhilipp Reisner * and know that this function is used in this way, too... 847b411b363SPhilipp Reisner */ 848b411b363SPhilipp Reisner if (end == b->bm_words) 849b411b363SPhilipp Reisner b->bm_set -= bm_clear_surplus(b); 850b411b363SPhilipp Reisner spin_unlock_irq(&b->bm_lock); 851b411b363SPhilipp Reisner } 852b411b363SPhilipp Reisner 853b411b363SPhilipp Reisner /* copy number words from the bitmap starting at offset into the buffer. 854b411b363SPhilipp Reisner * buffer[i] will be little endian unsigned long. 855b411b363SPhilipp Reisner */ 856b30ab791SAndreas Gruenbacher void drbd_bm_get_lel(struct drbd_device *device, size_t offset, size_t number, 857b411b363SPhilipp Reisner unsigned long *buffer) 858b411b363SPhilipp Reisner { 859b30ab791SAndreas Gruenbacher struct drbd_bitmap *b = device->bitmap; 860b411b363SPhilipp Reisner unsigned long *p_addr, *bm; 861b411b363SPhilipp Reisner size_t end, do_now; 862b411b363SPhilipp Reisner 863b411b363SPhilipp Reisner end = offset + number; 864b411b363SPhilipp Reisner 865841ce241SAndreas Gruenbacher if (!expect(b)) 866841ce241SAndreas Gruenbacher return; 867841ce241SAndreas Gruenbacher if (!expect(b->bm_pages)) 868841ce241SAndreas Gruenbacher return; 869b411b363SPhilipp Reisner 870b411b363SPhilipp Reisner spin_lock_irq(&b->bm_lock); 871b411b363SPhilipp Reisner if ((offset >= b->bm_words) || 872b411b363SPhilipp Reisner (end > b->bm_words) || 873b411b363SPhilipp Reisner (number <= 0)) 874d0180171SAndreas Gruenbacher drbd_err(device, "offset=%lu number=%lu bm_words=%lu\n", 875b411b363SPhilipp Reisner (unsigned long) offset, 876b411b363SPhilipp Reisner (unsigned long) number, 877b411b363SPhilipp Reisner (unsigned long) b->bm_words); 878b411b363SPhilipp Reisner else { 879b411b363SPhilipp Reisner while (offset < end) { 880b411b363SPhilipp Reisner do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset; 88119f843aaSLars Ellenberg p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, offset)); 882b411b363SPhilipp Reisner bm = p_addr + MLPP(offset); 883b411b363SPhilipp Reisner offset += do_now; 884b411b363SPhilipp Reisner while (do_now--) 88595a0f10cSLars Ellenberg *buffer++ = *bm++; 886b411b363SPhilipp Reisner bm_unmap(p_addr); 887b411b363SPhilipp Reisner } 888b411b363SPhilipp Reisner } 889b411b363SPhilipp Reisner spin_unlock_irq(&b->bm_lock); 890b411b363SPhilipp Reisner } 891b411b363SPhilipp Reisner 892b411b363SPhilipp Reisner /* set all bits in the bitmap */ 893b30ab791SAndreas Gruenbacher void drbd_bm_set_all(struct drbd_device *device) 894b411b363SPhilipp Reisner { 895b30ab791SAndreas Gruenbacher struct drbd_bitmap *b = device->bitmap; 896841ce241SAndreas Gruenbacher if (!expect(b)) 897841ce241SAndreas Gruenbacher return; 898841ce241SAndreas Gruenbacher if (!expect(b->bm_pages)) 899841ce241SAndreas Gruenbacher return; 900b411b363SPhilipp Reisner 901b411b363SPhilipp Reisner spin_lock_irq(&b->bm_lock); 902b411b363SPhilipp Reisner bm_memset(b, 0, 0xff, b->bm_words); 903b411b363SPhilipp Reisner (void)bm_clear_surplus(b); 904b411b363SPhilipp Reisner b->bm_set = b->bm_bits; 905b411b363SPhilipp Reisner spin_unlock_irq(&b->bm_lock); 906b411b363SPhilipp Reisner } 907b411b363SPhilipp Reisner 908b411b363SPhilipp Reisner /* clear all bits in the bitmap */ 909b30ab791SAndreas Gruenbacher void drbd_bm_clear_all(struct drbd_device *device) 910b411b363SPhilipp Reisner { 911b30ab791SAndreas Gruenbacher struct drbd_bitmap *b = device->bitmap; 912841ce241SAndreas Gruenbacher if (!expect(b)) 913841ce241SAndreas Gruenbacher return; 914841ce241SAndreas Gruenbacher if (!expect(b->bm_pages)) 915841ce241SAndreas Gruenbacher return; 916b411b363SPhilipp Reisner 917b411b363SPhilipp Reisner spin_lock_irq(&b->bm_lock); 918b411b363SPhilipp Reisner bm_memset(b, 0, 0, b->bm_words); 919b411b363SPhilipp Reisner b->bm_set = 0; 920b411b363SPhilipp Reisner spin_unlock_irq(&b->bm_lock); 921b411b363SPhilipp Reisner } 922b411b363SPhilipp Reisner 9234ce49266SLars Ellenberg static void drbd_bm_aio_ctx_destroy(struct kref *kref) 924d1f3779bSPhilipp Reisner { 9254ce49266SLars Ellenberg struct drbd_bm_aio_ctx *ctx = container_of(kref, struct drbd_bm_aio_ctx, kref); 9264ce49266SLars Ellenberg unsigned long flags; 927d1f3779bSPhilipp Reisner 9284ce49266SLars Ellenberg spin_lock_irqsave(&ctx->device->resource->req_lock, flags); 9294ce49266SLars Ellenberg list_del(&ctx->list); 9304ce49266SLars Ellenberg spin_unlock_irqrestore(&ctx->device->resource->req_lock, flags); 931b30ab791SAndreas Gruenbacher put_ldev(ctx->device); 932d1f3779bSPhilipp Reisner kfree(ctx); 933d1f3779bSPhilipp Reisner } 934d1f3779bSPhilipp Reisner 93519f843aaSLars Ellenberg /* bv_page may be a copy, or may be the original */ 9364246a0b6SChristoph Hellwig static void drbd_bm_endio(struct bio *bio) 937b411b363SPhilipp Reisner { 9384ce49266SLars Ellenberg struct drbd_bm_aio_ctx *ctx = bio->bi_private; 939b30ab791SAndreas Gruenbacher struct drbd_device *device = ctx->device; 940b30ab791SAndreas Gruenbacher struct drbd_bitmap *b = device->bitmap; 94119f843aaSLars Ellenberg unsigned int idx = bm_page_to_idx(bio->bi_io_vec[0].bv_page); 942b411b363SPhilipp Reisner 9437648cdfeSLars Ellenberg if ((ctx->flags & BM_AIO_COPY_PAGES) == 0 && 9447648cdfeSLars Ellenberg !bm_test_page_unchanged(b->bm_pages[idx])) 945d0180171SAndreas Gruenbacher drbd_warn(device, "bitmap page idx %u changed during IO!\n", idx); 94619f843aaSLars Ellenberg 9474246a0b6SChristoph Hellwig if (bio->bi_error) { 94819f843aaSLars Ellenberg /* ctx error will hold the completed-last non-zero error code, 94919f843aaSLars Ellenberg * in case error codes differ. */ 9504246a0b6SChristoph Hellwig ctx->error = bio->bi_error; 95119f843aaSLars Ellenberg bm_set_page_io_err(b->bm_pages[idx]); 95219f843aaSLars Ellenberg /* Not identical to on disk version of it. 95319f843aaSLars Ellenberg * Is BM_PAGE_IO_ERROR enough? */ 95419f843aaSLars Ellenberg if (__ratelimit(&drbd_ratelimit_state)) 955d0180171SAndreas Gruenbacher drbd_err(device, "IO ERROR %d on bitmap page idx %u\n", 9564246a0b6SChristoph Hellwig bio->bi_error, idx); 95719f843aaSLars Ellenberg } else { 95819f843aaSLars Ellenberg bm_clear_page_io_err(b->bm_pages[idx]); 959d0180171SAndreas Gruenbacher dynamic_drbd_dbg(device, "bitmap page idx %u completed\n", idx); 960b411b363SPhilipp Reisner } 96119f843aaSLars Ellenberg 962b30ab791SAndreas Gruenbacher bm_page_unlock_io(device, idx); 96319f843aaSLars Ellenberg 96419f843aaSLars Ellenberg if (ctx->flags & BM_AIO_COPY_PAGES) 9654d95a10fSLars Ellenberg mempool_free(bio->bi_io_vec[0].bv_page, drbd_md_io_page_pool); 966b411b363SPhilipp Reisner 967b411b363SPhilipp Reisner bio_put(bio); 96819f843aaSLars Ellenberg 969d1f3779bSPhilipp Reisner if (atomic_dec_and_test(&ctx->in_flight)) { 9709e58c4daSPhilipp Reisner ctx->done = 1; 971b30ab791SAndreas Gruenbacher wake_up(&device->misc_wait); 9724ce49266SLars Ellenberg kref_put(&ctx->kref, &drbd_bm_aio_ctx_destroy); 973d1f3779bSPhilipp Reisner } 974b411b363SPhilipp Reisner } 975b411b363SPhilipp Reisner 9764ce49266SLars Ellenberg static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_hold(local) 977b411b363SPhilipp Reisner { 9789476f39dSLars Ellenberg struct bio *bio = bio_alloc_drbd(GFP_NOIO); 979b30ab791SAndreas Gruenbacher struct drbd_device *device = ctx->device; 980b30ab791SAndreas Gruenbacher struct drbd_bitmap *b = device->bitmap; 98119f843aaSLars Ellenberg struct page *page; 982b411b363SPhilipp Reisner unsigned int len; 983bb3cc85eSMike Christie unsigned int op = (ctx->flags & BM_AIO_READ) ? REQ_OP_READ : REQ_OP_WRITE; 98419f843aaSLars Ellenberg 985b411b363SPhilipp Reisner sector_t on_disk_sector = 986b30ab791SAndreas Gruenbacher device->ldev->md.md_offset + device->ldev->md.bm_offset; 987b411b363SPhilipp Reisner on_disk_sector += ((sector_t)page_nr) << (PAGE_SHIFT-9); 988b411b363SPhilipp Reisner 989b411b363SPhilipp Reisner /* this might happen with very small 99019f843aaSLars Ellenberg * flexible external meta data device, 99119f843aaSLars Ellenberg * or with PAGE_SIZE > 4k */ 992b411b363SPhilipp Reisner len = min_t(unsigned int, PAGE_SIZE, 993b30ab791SAndreas Gruenbacher (drbd_md_last_sector(device->ldev) - on_disk_sector + 1)<<9); 994b411b363SPhilipp Reisner 99519f843aaSLars Ellenberg /* serialize IO on this page */ 996b30ab791SAndreas Gruenbacher bm_page_lock_io(device, page_nr); 99719f843aaSLars Ellenberg /* before memcpy and submit, 99819f843aaSLars Ellenberg * so it can be redirtied any time */ 99919f843aaSLars Ellenberg bm_set_page_unchanged(b->bm_pages[page_nr]); 100019f843aaSLars Ellenberg 100119f843aaSLars Ellenberg if (ctx->flags & BM_AIO_COPY_PAGES) { 100271baba4bSMel Gorman page = mempool_alloc(drbd_md_io_page_pool, __GFP_HIGHMEM|__GFP_RECLAIM); 1003f1d6a328SAkinobu Mita copy_highpage(page, b->bm_pages[page_nr]); 100419f843aaSLars Ellenberg bm_store_page_idx(page, page_nr); 100519f843aaSLars Ellenberg } else 100619f843aaSLars Ellenberg page = b->bm_pages[page_nr]; 1007b30ab791SAndreas Gruenbacher bio->bi_bdev = device->ldev->md_bdev; 10084f024f37SKent Overstreet bio->bi_iter.bi_sector = on_disk_sector; 10094d95a10fSLars Ellenberg /* bio_add_page of a single page to an empty bio will always succeed, 10104d95a10fSLars Ellenberg * according to api. Do we want to assert that? */ 101119f843aaSLars Ellenberg bio_add_page(bio, page, len, 0); 101219f843aaSLars Ellenberg bio->bi_private = ctx; 1013ed15b795SAndreas Gruenbacher bio->bi_end_io = drbd_bm_endio; 1014bb3cc85eSMike Christie bio_set_op_attrs(bio, op, 0); 1015b411b363SPhilipp Reisner 1016bb3cc85eSMike Christie if (drbd_insert_fault(device, (op == REQ_OP_WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) { 10174246a0b6SChristoph Hellwig bio_io_error(bio); 1018b411b363SPhilipp Reisner } else { 10194e49ea4aSMike Christie submit_bio(bio); 10205a8b4242SLars Ellenberg /* this should not count as user activity and cause the 10215a8b4242SLars Ellenberg * resync to throttle -- see drbd_rs_should_slow_down(). */ 1022b30ab791SAndreas Gruenbacher atomic_add(len >> 9, &device->rs_sect_ev); 1023b411b363SPhilipp Reisner } 1024b411b363SPhilipp Reisner } 1025b411b363SPhilipp Reisner 1026b411b363SPhilipp Reisner /* 1027b411b363SPhilipp Reisner * bm_rw: read/write the whole bitmap from/to its on disk location. 1028b411b363SPhilipp Reisner */ 10294ce49266SLars Ellenberg static int bm_rw(struct drbd_device *device, const unsigned int flags, unsigned lazy_writeout_upper_idx) __must_hold(local) 1030b411b363SPhilipp Reisner { 10314ce49266SLars Ellenberg struct drbd_bm_aio_ctx *ctx; 1032b30ab791SAndreas Gruenbacher struct drbd_bitmap *b = device->bitmap; 10336850c442SLars Ellenberg int num_pages, i, count = 0; 1034b411b363SPhilipp Reisner unsigned long now; 1035b411b363SPhilipp Reisner char ppb[10]; 1036b411b363SPhilipp Reisner int err = 0; 1037b411b363SPhilipp Reisner 103819f843aaSLars Ellenberg /* 103919f843aaSLars Ellenberg * We are protected against bitmap disappearing/resizing by holding an 104019f843aaSLars Ellenberg * ldev reference (caller must have called get_ldev()). 104119f843aaSLars Ellenberg * For read/write, we are protected against changes to the bitmap by 104219f843aaSLars Ellenberg * the bitmap lock (see drbd_bitmap_io). 104319f843aaSLars Ellenberg * For lazy writeout, we don't care for ongoing changes to the bitmap, 104419f843aaSLars Ellenberg * as we submit copies of pages anyways. 104519f843aaSLars Ellenberg */ 1046d1f3779bSPhilipp Reisner 10474ce49266SLars Ellenberg ctx = kmalloc(sizeof(struct drbd_bm_aio_ctx), GFP_NOIO); 1048d1f3779bSPhilipp Reisner if (!ctx) 1049d1f3779bSPhilipp Reisner return -ENOMEM; 1050d1f3779bSPhilipp Reisner 10514ce49266SLars Ellenberg *ctx = (struct drbd_bm_aio_ctx) { 1052b30ab791SAndreas Gruenbacher .device = device, 10534ce49266SLars Ellenberg .start_jif = jiffies, 1054d1f3779bSPhilipp Reisner .in_flight = ATOMIC_INIT(1), 10559e58c4daSPhilipp Reisner .done = 0, 10560e8488adSLars Ellenberg .flags = flags, 1057d1f3779bSPhilipp Reisner .error = 0, 1058d1f3779bSPhilipp Reisner .kref = { ATOMIC_INIT(2) }, 1059d1f3779bSPhilipp Reisner }; 1060d1f3779bSPhilipp Reisner 10614ce49266SLars Ellenberg if (!get_ldev_if_state(device, D_ATTACHING)) { /* put is in drbd_bm_aio_ctx_destroy() */ 1062d0180171SAndreas Gruenbacher drbd_err(device, "ASSERT FAILED: get_ldev_if_state() == 1 in bm_rw()\n"); 10639e58c4daSPhilipp Reisner kfree(ctx); 10649e58c4daSPhilipp Reisner return -ENODEV; 10659e58c4daSPhilipp Reisner } 10668fe39aacSPhilipp Reisner /* Here D_ATTACHING is sufficient since drbd_bm_read() is called only from 10678fe39aacSPhilipp Reisner drbd_adm_attach(), after device->ldev was assigned. */ 10689e58c4daSPhilipp Reisner 10694ce49266SLars Ellenberg if (0 == (ctx->flags & ~BM_AIO_READ)) 107020ceb2b2SLars Ellenberg WARN_ON(!(BM_LOCKED_MASK & b->bm_flags)); 1071b411b363SPhilipp Reisner 10724ce49266SLars Ellenberg spin_lock_irq(&device->resource->req_lock); 10734ce49266SLars Ellenberg list_add_tail(&ctx->list, &device->pending_bitmap_io); 10744ce49266SLars Ellenberg spin_unlock_irq(&device->resource->req_lock); 10754ce49266SLars Ellenberg 10766850c442SLars Ellenberg num_pages = b->bm_number_of_pages; 1077b411b363SPhilipp Reisner 1078b411b363SPhilipp Reisner now = jiffies; 1079b411b363SPhilipp Reisner 1080b411b363SPhilipp Reisner /* let the layers below us try to merge these bios... */ 10816850c442SLars Ellenberg for (i = 0; i < num_pages; i++) { 108219f843aaSLars Ellenberg /* ignore completely unchanged pages */ 108319f843aaSLars Ellenberg if (lazy_writeout_upper_idx && i == lazy_writeout_upper_idx) 108419f843aaSLars Ellenberg break; 10854ce49266SLars Ellenberg if (!(flags & BM_AIO_READ)) { 108645dfffebSLars Ellenberg if ((flags & BM_AIO_WRITE_HINTED) && 108745dfffebSLars Ellenberg !test_and_clear_bit(BM_PAGE_HINT_WRITEOUT, 108845dfffebSLars Ellenberg &page_private(b->bm_pages[i]))) 108945dfffebSLars Ellenberg continue; 1090fef45d29SPhilipp Reisner 10914ce49266SLars Ellenberg if (!(flags & BM_AIO_WRITE_ALL_PAGES) && 1092d1aa4d04SPhilipp Reisner bm_test_page_unchanged(b->bm_pages[i])) { 1093d0180171SAndreas Gruenbacher dynamic_drbd_dbg(device, "skipped bm write for idx %u\n", i); 109419f843aaSLars Ellenberg continue; 109519f843aaSLars Ellenberg } 109619f843aaSLars Ellenberg /* during lazy writeout, 109719f843aaSLars Ellenberg * ignore those pages not marked for lazy writeout. */ 109819f843aaSLars Ellenberg if (lazy_writeout_upper_idx && 109919f843aaSLars Ellenberg !bm_test_page_lazy_writeout(b->bm_pages[i])) { 1100d0180171SAndreas Gruenbacher dynamic_drbd_dbg(device, "skipped bm lazy write for idx %u\n", i); 110119f843aaSLars Ellenberg continue; 110219f843aaSLars Ellenberg } 110319f843aaSLars Ellenberg } 1104d1f3779bSPhilipp Reisner atomic_inc(&ctx->in_flight); 11054ce49266SLars Ellenberg bm_page_io_async(ctx, i); 110619f843aaSLars Ellenberg ++count; 110719f843aaSLars Ellenberg cond_resched(); 110819f843aaSLars Ellenberg } 1109b411b363SPhilipp Reisner 1110725a97e4SLars Ellenberg /* 1111ed15b795SAndreas Gruenbacher * We initialize ctx->in_flight to one to make sure drbd_bm_endio 11129e58c4daSPhilipp Reisner * will not set ctx->done early, and decrement / test it here. If there 1113725a97e4SLars Ellenberg * are still some bios in flight, we need to wait for them here. 11149e58c4daSPhilipp Reisner * If all IO is done already (or nothing had been submitted), there is 11159e58c4daSPhilipp Reisner * no need to wait. Still, we need to put the kref associated with the 11169e58c4daSPhilipp Reisner * "in_flight reached zero, all done" event. 1117725a97e4SLars Ellenberg */ 1118d1f3779bSPhilipp Reisner if (!atomic_dec_and_test(&ctx->in_flight)) 1119b30ab791SAndreas Gruenbacher wait_until_done_or_force_detached(device, device->ldev, &ctx->done); 11209e58c4daSPhilipp Reisner else 11214ce49266SLars Ellenberg kref_put(&ctx->kref, &drbd_bm_aio_ctx_destroy); 1122d1f3779bSPhilipp Reisner 1123c9d963a4SLars Ellenberg /* summary for global bitmap IO */ 1124c9d963a4SLars Ellenberg if (flags == 0) 1125d0180171SAndreas Gruenbacher drbd_info(device, "bitmap %s of %u pages took %lu jiffies\n", 11264ce49266SLars Ellenberg (flags & BM_AIO_READ) ? "READ" : "WRITE", 112719f843aaSLars Ellenberg count, jiffies - now); 1128b411b363SPhilipp Reisner 1129d1f3779bSPhilipp Reisner if (ctx->error) { 1130d0180171SAndreas Gruenbacher drbd_alert(device, "we had at least one MD IO ERROR during bitmap IO\n"); 1131b30ab791SAndreas Gruenbacher drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR); 1132d1f3779bSPhilipp Reisner err = -EIO; /* ctx->error ? */ 1133b411b363SPhilipp Reisner } 1134b411b363SPhilipp Reisner 11359e58c4daSPhilipp Reisner if (atomic_read(&ctx->in_flight)) 113644edfb0dSLars Ellenberg err = -EIO; /* Disk timeout/force-detach during IO... */ 11379e58c4daSPhilipp Reisner 1138b411b363SPhilipp Reisner now = jiffies; 11394ce49266SLars Ellenberg if (flags & BM_AIO_READ) { 114095a0f10cSLars Ellenberg b->bm_set = bm_count_bits(b); 1141d0180171SAndreas Gruenbacher drbd_info(device, "recounting of set bits took additional %lu jiffies\n", 1142b411b363SPhilipp Reisner jiffies - now); 1143b411b363SPhilipp Reisner } 1144b411b363SPhilipp Reisner now = b->bm_set; 1145b411b363SPhilipp Reisner 11464ce49266SLars Ellenberg if ((flags & ~BM_AIO_READ) == 0) 1147d0180171SAndreas Gruenbacher drbd_info(device, "%s (%lu bits) marked out-of-sync by on disk bit-map.\n", 1148b411b363SPhilipp Reisner ppsize(ppb, now << (BM_BLOCK_SHIFT-10)), now); 1149b411b363SPhilipp Reisner 11504ce49266SLars Ellenberg kref_put(&ctx->kref, &drbd_bm_aio_ctx_destroy); 1151b411b363SPhilipp Reisner return err; 1152b411b363SPhilipp Reisner } 1153b411b363SPhilipp Reisner 1154b411b363SPhilipp Reisner /** 1155b411b363SPhilipp Reisner * drbd_bm_read() - Read the whole bitmap from its on disk location. 1156b30ab791SAndreas Gruenbacher * @device: DRBD device. 1157b411b363SPhilipp Reisner */ 1158b30ab791SAndreas Gruenbacher int drbd_bm_read(struct drbd_device *device) __must_hold(local) 1159b411b363SPhilipp Reisner { 11604ce49266SLars Ellenberg return bm_rw(device, BM_AIO_READ, 0); 1161b411b363SPhilipp Reisner } 1162b411b363SPhilipp Reisner 1163b411b363SPhilipp Reisner /** 1164b411b363SPhilipp Reisner * drbd_bm_write() - Write the whole bitmap to its on disk location. 1165b30ab791SAndreas Gruenbacher * @device: DRBD device. 116619f843aaSLars Ellenberg * 116719f843aaSLars Ellenberg * Will only write pages that have changed since last IO. 1168b411b363SPhilipp Reisner */ 1169b30ab791SAndreas Gruenbacher int drbd_bm_write(struct drbd_device *device) __must_hold(local) 1170b411b363SPhilipp Reisner { 11714ce49266SLars Ellenberg return bm_rw(device, 0, 0); 1172b411b363SPhilipp Reisner } 1173b411b363SPhilipp Reisner 1174b411b363SPhilipp Reisner /** 1175d1aa4d04SPhilipp Reisner * drbd_bm_write_all() - Write the whole bitmap to its on disk location. 1176b30ab791SAndreas Gruenbacher * @device: DRBD device. 1177d1aa4d04SPhilipp Reisner * 1178d1aa4d04SPhilipp Reisner * Will write all pages. 1179d1aa4d04SPhilipp Reisner */ 1180b30ab791SAndreas Gruenbacher int drbd_bm_write_all(struct drbd_device *device) __must_hold(local) 1181d1aa4d04SPhilipp Reisner { 11824ce49266SLars Ellenberg return bm_rw(device, BM_AIO_WRITE_ALL_PAGES, 0); 1183d1aa4d04SPhilipp Reisner } 1184d1aa4d04SPhilipp Reisner 1185d1aa4d04SPhilipp Reisner /** 1186c7a58db4SLars Ellenberg * drbd_bm_write_lazy() - Write bitmap pages 0 to @upper_idx-1, if they have changed. 1187c7a58db4SLars Ellenberg * @device: DRBD device. 1188c7a58db4SLars Ellenberg * @upper_idx: 0: write all changed pages; +ve: page index to stop scanning for changed pages 1189c7a58db4SLars Ellenberg */ 1190c7a58db4SLars Ellenberg int drbd_bm_write_lazy(struct drbd_device *device, unsigned upper_idx) __must_hold(local) 1191c7a58db4SLars Ellenberg { 1192c7a58db4SLars Ellenberg return bm_rw(device, BM_AIO_COPY_PAGES, upper_idx); 1193c7a58db4SLars Ellenberg } 1194c7a58db4SLars Ellenberg 1195c7a58db4SLars Ellenberg /** 11960e8488adSLars Ellenberg * drbd_bm_write_copy_pages() - Write the whole bitmap to its on disk location. 1197b30ab791SAndreas Gruenbacher * @device: DRBD device. 11980e8488adSLars Ellenberg * 11990e8488adSLars Ellenberg * Will only write pages that have changed since last IO. 12000e8488adSLars Ellenberg * In contrast to drbd_bm_write(), this will copy the bitmap pages 12010e8488adSLars Ellenberg * to temporary writeout pages. It is intended to trigger a full write-out 12020e8488adSLars Ellenberg * while still allowing the bitmap to change, for example if a resync or online 12030e8488adSLars Ellenberg * verify is aborted due to a failed peer disk, while local IO continues, or 12040e8488adSLars Ellenberg * pending resync acks are still being processed. 12050e8488adSLars Ellenberg */ 1206b30ab791SAndreas Gruenbacher int drbd_bm_write_copy_pages(struct drbd_device *device) __must_hold(local) 12070e8488adSLars Ellenberg { 12084ce49266SLars Ellenberg return bm_rw(device, BM_AIO_COPY_PAGES, 0); 1209b411b363SPhilipp Reisner } 121019f843aaSLars Ellenberg 1211a220d291SLars Ellenberg /** 121245dfffebSLars Ellenberg * drbd_bm_write_hinted() - Write bitmap pages with "hint" marks, if they have changed. 1213b30ab791SAndreas Gruenbacher * @device: DRBD device. 121445dfffebSLars Ellenberg */ 1215b30ab791SAndreas Gruenbacher int drbd_bm_write_hinted(struct drbd_device *device) __must_hold(local) 121645dfffebSLars Ellenberg { 12174ce49266SLars Ellenberg return bm_rw(device, BM_AIO_WRITE_HINTED | BM_AIO_COPY_PAGES, 0); 121845dfffebSLars Ellenberg } 121919f843aaSLars Ellenberg 1220b411b363SPhilipp Reisner /* NOTE 1221b411b363SPhilipp Reisner * find_first_bit returns int, we return unsigned long. 12224b0715f0SLars Ellenberg * For this to work on 32bit arch with bitnumbers > (1<<32), 12234b0715f0SLars Ellenberg * we'd need to return u64, and get a whole lot of other places 12244b0715f0SLars Ellenberg * fixed where we still use unsigned long. 1225b411b363SPhilipp Reisner * 1226b411b363SPhilipp Reisner * this returns a bit number, NOT a sector! 1227b411b363SPhilipp Reisner */ 1228b30ab791SAndreas Gruenbacher static unsigned long __bm_find_next(struct drbd_device *device, unsigned long bm_fo, 1229cfd8005cSCong Wang const int find_zero_bit) 1230b411b363SPhilipp Reisner { 1231b30ab791SAndreas Gruenbacher struct drbd_bitmap *b = device->bitmap; 1232b411b363SPhilipp Reisner unsigned long *p_addr; 12334b0715f0SLars Ellenberg unsigned long bit_offset; 12344b0715f0SLars Ellenberg unsigned i; 12354b0715f0SLars Ellenberg 1236b411b363SPhilipp Reisner 1237b411b363SPhilipp Reisner if (bm_fo > b->bm_bits) { 1238d0180171SAndreas Gruenbacher drbd_err(device, "bm_fo=%lu bm_bits=%lu\n", bm_fo, b->bm_bits); 12394b0715f0SLars Ellenberg bm_fo = DRBD_END_OF_BITMAP; 1240b411b363SPhilipp Reisner } else { 1241b411b363SPhilipp Reisner while (bm_fo < b->bm_bits) { 124219f843aaSLars Ellenberg /* bit offset of the first bit in the page */ 12434b0715f0SLars Ellenberg bit_offset = bm_fo & ~BITS_PER_PAGE_MASK; 1244cfd8005cSCong Wang p_addr = __bm_map_pidx(b, bm_bit_to_page_idx(b, bm_fo)); 1245b411b363SPhilipp Reisner 1246b411b363SPhilipp Reisner if (find_zero_bit) 12477e599e6eSLinus Torvalds i = find_next_zero_bit_le(p_addr, 12484b0715f0SLars Ellenberg PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK); 1249b411b363SPhilipp Reisner else 12507e599e6eSLinus Torvalds i = find_next_bit_le(p_addr, 12514b0715f0SLars Ellenberg PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK); 1252b411b363SPhilipp Reisner 1253cfd8005cSCong Wang __bm_unmap(p_addr); 1254b411b363SPhilipp Reisner if (i < PAGE_SIZE*8) { 12554b0715f0SLars Ellenberg bm_fo = bit_offset + i; 12564b0715f0SLars Ellenberg if (bm_fo >= b->bm_bits) 1257b411b363SPhilipp Reisner break; 1258b411b363SPhilipp Reisner goto found; 1259b411b363SPhilipp Reisner } 1260b411b363SPhilipp Reisner bm_fo = bit_offset + PAGE_SIZE*8; 1261b411b363SPhilipp Reisner } 12624b0715f0SLars Ellenberg bm_fo = DRBD_END_OF_BITMAP; 1263b411b363SPhilipp Reisner } 1264b411b363SPhilipp Reisner found: 12654b0715f0SLars Ellenberg return bm_fo; 1266b411b363SPhilipp Reisner } 1267b411b363SPhilipp Reisner 1268b30ab791SAndreas Gruenbacher static unsigned long bm_find_next(struct drbd_device *device, 1269b411b363SPhilipp Reisner unsigned long bm_fo, const int find_zero_bit) 1270b411b363SPhilipp Reisner { 1271b30ab791SAndreas Gruenbacher struct drbd_bitmap *b = device->bitmap; 12724b0715f0SLars Ellenberg unsigned long i = DRBD_END_OF_BITMAP; 1273b411b363SPhilipp Reisner 1274841ce241SAndreas Gruenbacher if (!expect(b)) 1275841ce241SAndreas Gruenbacher return i; 1276841ce241SAndreas Gruenbacher if (!expect(b->bm_pages)) 1277841ce241SAndreas Gruenbacher return i; 1278b411b363SPhilipp Reisner 1279b411b363SPhilipp Reisner spin_lock_irq(&b->bm_lock); 128020ceb2b2SLars Ellenberg if (BM_DONT_TEST & b->bm_flags) 1281b30ab791SAndreas Gruenbacher bm_print_lock_info(device); 1282b411b363SPhilipp Reisner 1283b30ab791SAndreas Gruenbacher i = __bm_find_next(device, bm_fo, find_zero_bit); 1284b411b363SPhilipp Reisner 1285b411b363SPhilipp Reisner spin_unlock_irq(&b->bm_lock); 1286b411b363SPhilipp Reisner return i; 1287b411b363SPhilipp Reisner } 1288b411b363SPhilipp Reisner 1289b30ab791SAndreas Gruenbacher unsigned long drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo) 1290b411b363SPhilipp Reisner { 1291b30ab791SAndreas Gruenbacher return bm_find_next(device, bm_fo, 0); 1292b411b363SPhilipp Reisner } 1293b411b363SPhilipp Reisner 1294b411b363SPhilipp Reisner #if 0 1295b411b363SPhilipp Reisner /* not yet needed for anything. */ 1296b30ab791SAndreas Gruenbacher unsigned long drbd_bm_find_next_zero(struct drbd_device *device, unsigned long bm_fo) 1297b411b363SPhilipp Reisner { 1298b30ab791SAndreas Gruenbacher return bm_find_next(device, bm_fo, 1); 1299b411b363SPhilipp Reisner } 1300b411b363SPhilipp Reisner #endif 1301b411b363SPhilipp Reisner 1302b411b363SPhilipp Reisner /* does not spin_lock_irqsave. 1303b411b363SPhilipp Reisner * you must take drbd_bm_lock() first */ 1304b30ab791SAndreas Gruenbacher unsigned long _drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo) 1305b411b363SPhilipp Reisner { 1306b30ab791SAndreas Gruenbacher /* WARN_ON(!(BM_DONT_SET & device->b->bm_flags)); */ 1307b30ab791SAndreas Gruenbacher return __bm_find_next(device, bm_fo, 0); 1308b411b363SPhilipp Reisner } 1309b411b363SPhilipp Reisner 1310b30ab791SAndreas Gruenbacher unsigned long _drbd_bm_find_next_zero(struct drbd_device *device, unsigned long bm_fo) 1311b411b363SPhilipp Reisner { 1312b30ab791SAndreas Gruenbacher /* WARN_ON(!(BM_DONT_SET & device->b->bm_flags)); */ 1313b30ab791SAndreas Gruenbacher return __bm_find_next(device, bm_fo, 1); 1314b411b363SPhilipp Reisner } 1315b411b363SPhilipp Reisner 1316b411b363SPhilipp Reisner /* returns number of bits actually changed. 1317b411b363SPhilipp Reisner * for val != 0, we change 0 -> 1, return code positive 1318b411b363SPhilipp Reisner * for val == 0, we change 1 -> 0, return code negative 1319b411b363SPhilipp Reisner * wants bitnr, not sector. 1320b411b363SPhilipp Reisner * expected to be called for only a few bits (e - s about BITS_PER_LONG). 1321b411b363SPhilipp Reisner * Must hold bitmap lock already. */ 1322b30ab791SAndreas Gruenbacher static int __bm_change_bits_to(struct drbd_device *device, const unsigned long s, 1323829c6087SLars Ellenberg unsigned long e, int val) 1324b411b363SPhilipp Reisner { 1325b30ab791SAndreas Gruenbacher struct drbd_bitmap *b = device->bitmap; 1326b411b363SPhilipp Reisner unsigned long *p_addr = NULL; 1327b411b363SPhilipp Reisner unsigned long bitnr; 132819f843aaSLars Ellenberg unsigned int last_page_nr = -1U; 1329b411b363SPhilipp Reisner int c = 0; 133019f843aaSLars Ellenberg int changed_total = 0; 1331b411b363SPhilipp Reisner 1332b411b363SPhilipp Reisner if (e >= b->bm_bits) { 1333d0180171SAndreas Gruenbacher drbd_err(device, "ASSERT FAILED: bit_s=%lu bit_e=%lu bm_bits=%lu\n", 1334b411b363SPhilipp Reisner s, e, b->bm_bits); 1335b411b363SPhilipp Reisner e = b->bm_bits ? b->bm_bits -1 : 0; 1336b411b363SPhilipp Reisner } 1337b411b363SPhilipp Reisner for (bitnr = s; bitnr <= e; bitnr++) { 133819f843aaSLars Ellenberg unsigned int page_nr = bm_bit_to_page_idx(b, bitnr); 1339b411b363SPhilipp Reisner if (page_nr != last_page_nr) { 1340b411b363SPhilipp Reisner if (p_addr) 1341cfd8005cSCong Wang __bm_unmap(p_addr); 134219f843aaSLars Ellenberg if (c < 0) 134319f843aaSLars Ellenberg bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]); 134419f843aaSLars Ellenberg else if (c > 0) 134519f843aaSLars Ellenberg bm_set_page_need_writeout(b->bm_pages[last_page_nr]); 134619f843aaSLars Ellenberg changed_total += c; 134719f843aaSLars Ellenberg c = 0; 1348cfd8005cSCong Wang p_addr = __bm_map_pidx(b, page_nr); 1349b411b363SPhilipp Reisner last_page_nr = page_nr; 1350b411b363SPhilipp Reisner } 1351b411b363SPhilipp Reisner if (val) 13527e599e6eSLinus Torvalds c += (0 == __test_and_set_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr)); 1353b411b363SPhilipp Reisner else 13547e599e6eSLinus Torvalds c -= (0 != __test_and_clear_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr)); 1355b411b363SPhilipp Reisner } 1356b411b363SPhilipp Reisner if (p_addr) 1357cfd8005cSCong Wang __bm_unmap(p_addr); 135819f843aaSLars Ellenberg if (c < 0) 135919f843aaSLars Ellenberg bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]); 136019f843aaSLars Ellenberg else if (c > 0) 136119f843aaSLars Ellenberg bm_set_page_need_writeout(b->bm_pages[last_page_nr]); 136219f843aaSLars Ellenberg changed_total += c; 136319f843aaSLars Ellenberg b->bm_set += changed_total; 136419f843aaSLars Ellenberg return changed_total; 1365b411b363SPhilipp Reisner } 1366b411b363SPhilipp Reisner 1367b411b363SPhilipp Reisner /* returns number of bits actually changed. 1368b411b363SPhilipp Reisner * for val != 0, we change 0 -> 1, return code positive 1369b411b363SPhilipp Reisner * for val == 0, we change 1 -> 0, return code negative 1370b411b363SPhilipp Reisner * wants bitnr, not sector */ 1371b30ab791SAndreas Gruenbacher static int bm_change_bits_to(struct drbd_device *device, const unsigned long s, 1372b411b363SPhilipp Reisner const unsigned long e, int val) 1373b411b363SPhilipp Reisner { 1374b411b363SPhilipp Reisner unsigned long flags; 1375b30ab791SAndreas Gruenbacher struct drbd_bitmap *b = device->bitmap; 1376b411b363SPhilipp Reisner int c = 0; 1377b411b363SPhilipp Reisner 1378841ce241SAndreas Gruenbacher if (!expect(b)) 1379841ce241SAndreas Gruenbacher return 1; 1380841ce241SAndreas Gruenbacher if (!expect(b->bm_pages)) 1381841ce241SAndreas Gruenbacher return 0; 1382b411b363SPhilipp Reisner 1383b411b363SPhilipp Reisner spin_lock_irqsave(&b->bm_lock, flags); 138420ceb2b2SLars Ellenberg if ((val ? BM_DONT_SET : BM_DONT_CLEAR) & b->bm_flags) 1385b30ab791SAndreas Gruenbacher bm_print_lock_info(device); 1386b411b363SPhilipp Reisner 1387b30ab791SAndreas Gruenbacher c = __bm_change_bits_to(device, s, e, val); 1388b411b363SPhilipp Reisner 1389b411b363SPhilipp Reisner spin_unlock_irqrestore(&b->bm_lock, flags); 1390b411b363SPhilipp Reisner return c; 1391b411b363SPhilipp Reisner } 1392b411b363SPhilipp Reisner 1393b411b363SPhilipp Reisner /* returns number of bits changed 0 -> 1 */ 1394b30ab791SAndreas Gruenbacher int drbd_bm_set_bits(struct drbd_device *device, const unsigned long s, const unsigned long e) 1395b411b363SPhilipp Reisner { 1396b30ab791SAndreas Gruenbacher return bm_change_bits_to(device, s, e, 1); 1397b411b363SPhilipp Reisner } 1398b411b363SPhilipp Reisner 1399b411b363SPhilipp Reisner /* returns number of bits changed 1 -> 0 */ 1400b30ab791SAndreas Gruenbacher int drbd_bm_clear_bits(struct drbd_device *device, const unsigned long s, const unsigned long e) 1401b411b363SPhilipp Reisner { 1402b30ab791SAndreas Gruenbacher return -bm_change_bits_to(device, s, e, 0); 1403b411b363SPhilipp Reisner } 1404b411b363SPhilipp Reisner 1405b411b363SPhilipp Reisner /* sets all bits in full words, 1406b411b363SPhilipp Reisner * from first_word up to, but not including, last_word */ 1407b411b363SPhilipp Reisner static inline void bm_set_full_words_within_one_page(struct drbd_bitmap *b, 1408b411b363SPhilipp Reisner int page_nr, int first_word, int last_word) 1409b411b363SPhilipp Reisner { 1410b411b363SPhilipp Reisner int i; 1411b411b363SPhilipp Reisner int bits; 141222d81140SLars Ellenberg int changed = 0; 1413cfd8005cSCong Wang unsigned long *paddr = kmap_atomic(b->bm_pages[page_nr]); 14145fb3bc4dSLars Ellenberg 14155fb3bc4dSLars Ellenberg /* I think it is more cache line friendly to hweight_long then set to ~0UL, 14165fb3bc4dSLars Ellenberg * than to first bitmap_weight() all words, then bitmap_fill() all words */ 1417b411b363SPhilipp Reisner for (i = first_word; i < last_word; i++) { 1418b411b363SPhilipp Reisner bits = hweight_long(paddr[i]); 1419b411b363SPhilipp Reisner paddr[i] = ~0UL; 142022d81140SLars Ellenberg changed += BITS_PER_LONG - bits; 1421b411b363SPhilipp Reisner } 1422cfd8005cSCong Wang kunmap_atomic(paddr); 142322d81140SLars Ellenberg if (changed) { 142422d81140SLars Ellenberg /* We only need lazy writeout, the information is still in the 142522d81140SLars Ellenberg * remote bitmap as well, and is reconstructed during the next 142622d81140SLars Ellenberg * bitmap exchange, if lost locally due to a crash. */ 142722d81140SLars Ellenberg bm_set_page_lazy_writeout(b->bm_pages[page_nr]); 142822d81140SLars Ellenberg b->bm_set += changed; 142922d81140SLars Ellenberg } 1430b411b363SPhilipp Reisner } 1431b411b363SPhilipp Reisner 1432829c6087SLars Ellenberg /* Same thing as drbd_bm_set_bits, 1433829c6087SLars Ellenberg * but more efficient for a large bit range. 1434b411b363SPhilipp Reisner * You must first drbd_bm_lock(). 1435b411b363SPhilipp Reisner * Can be called to set the whole bitmap in one go. 1436b411b363SPhilipp Reisner * Sets bits from s to e _inclusive_. */ 1437b30ab791SAndreas Gruenbacher void _drbd_bm_set_bits(struct drbd_device *device, const unsigned long s, const unsigned long e) 1438b411b363SPhilipp Reisner { 1439b411b363SPhilipp Reisner /* First set_bit from the first bit (s) 1440b411b363SPhilipp Reisner * up to the next long boundary (sl), 1441b411b363SPhilipp Reisner * then assign full words up to the last long boundary (el), 1442b411b363SPhilipp Reisner * then set_bit up to and including the last bit (e). 1443b411b363SPhilipp Reisner * 1444b411b363SPhilipp Reisner * Do not use memset, because we must account for changes, 1445b411b363SPhilipp Reisner * so we need to loop over the words with hweight() anyways. 1446b411b363SPhilipp Reisner */ 1447b30ab791SAndreas Gruenbacher struct drbd_bitmap *b = device->bitmap; 1448b411b363SPhilipp Reisner unsigned long sl = ALIGN(s,BITS_PER_LONG); 1449b411b363SPhilipp Reisner unsigned long el = (e+1) & ~((unsigned long)BITS_PER_LONG-1); 1450b411b363SPhilipp Reisner int first_page; 1451b411b363SPhilipp Reisner int last_page; 1452b411b363SPhilipp Reisner int page_nr; 1453b411b363SPhilipp Reisner int first_word; 1454b411b363SPhilipp Reisner int last_word; 1455b411b363SPhilipp Reisner 1456b411b363SPhilipp Reisner if (e - s <= 3*BITS_PER_LONG) { 1457b411b363SPhilipp Reisner /* don't bother; el and sl may even be wrong. */ 1458829c6087SLars Ellenberg spin_lock_irq(&b->bm_lock); 1459b30ab791SAndreas Gruenbacher __bm_change_bits_to(device, s, e, 1); 1460829c6087SLars Ellenberg spin_unlock_irq(&b->bm_lock); 1461b411b363SPhilipp Reisner return; 1462b411b363SPhilipp Reisner } 1463b411b363SPhilipp Reisner 1464b411b363SPhilipp Reisner /* difference is large enough that we can trust sl and el */ 1465b411b363SPhilipp Reisner 1466829c6087SLars Ellenberg spin_lock_irq(&b->bm_lock); 1467829c6087SLars Ellenberg 1468b411b363SPhilipp Reisner /* bits filling the current long */ 1469b411b363SPhilipp Reisner if (sl) 1470b30ab791SAndreas Gruenbacher __bm_change_bits_to(device, s, sl-1, 1); 1471b411b363SPhilipp Reisner 1472b411b363SPhilipp Reisner first_page = sl >> (3 + PAGE_SHIFT); 1473b411b363SPhilipp Reisner last_page = el >> (3 + PAGE_SHIFT); 1474b411b363SPhilipp Reisner 1475b411b363SPhilipp Reisner /* MLPP: modulo longs per page */ 1476b411b363SPhilipp Reisner /* LWPP: long words per page */ 1477b411b363SPhilipp Reisner first_word = MLPP(sl >> LN2_BPL); 1478b411b363SPhilipp Reisner last_word = LWPP; 1479b411b363SPhilipp Reisner 1480b411b363SPhilipp Reisner /* first and full pages, unless first page == last page */ 1481b411b363SPhilipp Reisner for (page_nr = first_page; page_nr < last_page; page_nr++) { 1482b30ab791SAndreas Gruenbacher bm_set_full_words_within_one_page(device->bitmap, page_nr, first_word, last_word); 14838ccee20eSLars Ellenberg spin_unlock_irq(&b->bm_lock); 14848ccee20eSLars Ellenberg cond_resched(); 1485b411b363SPhilipp Reisner first_word = 0; 14868ccee20eSLars Ellenberg spin_lock_irq(&b->bm_lock); 1487b411b363SPhilipp Reisner } 1488b411b363SPhilipp Reisner /* last page (respectively only page, for first page == last page) */ 1489b411b363SPhilipp Reisner last_word = MLPP(el >> LN2_BPL); 14904eccc579SLars Ellenberg 14914eccc579SLars Ellenberg /* consider bitmap->bm_bits = 32768, bitmap->bm_number_of_pages = 1. (or multiples). 14924eccc579SLars Ellenberg * ==> e = 32767, el = 32768, last_page = 2, 14934eccc579SLars Ellenberg * and now last_word = 0. 14944eccc579SLars Ellenberg * We do not want to touch last_page in this case, 14954eccc579SLars Ellenberg * as we did not allocate it, it is not present in bitmap->bm_pages. 14964eccc579SLars Ellenberg */ 14974eccc579SLars Ellenberg if (last_word) 1498b30ab791SAndreas Gruenbacher bm_set_full_words_within_one_page(device->bitmap, last_page, first_word, last_word); 1499b411b363SPhilipp Reisner 1500b411b363SPhilipp Reisner /* possibly trailing bits. 1501b411b363SPhilipp Reisner * example: (e & 63) == 63, el will be e+1. 1502b411b363SPhilipp Reisner * if that even was the very last bit, 1503b411b363SPhilipp Reisner * it would trigger an assert in __bm_change_bits_to() 1504b411b363SPhilipp Reisner */ 1505b411b363SPhilipp Reisner if (el <= e) 1506b30ab791SAndreas Gruenbacher __bm_change_bits_to(device, el, e, 1); 1507829c6087SLars Ellenberg spin_unlock_irq(&b->bm_lock); 1508b411b363SPhilipp Reisner } 1509b411b363SPhilipp Reisner 1510b411b363SPhilipp Reisner /* returns bit state 1511b411b363SPhilipp Reisner * wants bitnr, NOT sector. 1512b411b363SPhilipp Reisner * inherently racy... area needs to be locked by means of {al,rs}_lru 1513b411b363SPhilipp Reisner * 1 ... bit set 1514b411b363SPhilipp Reisner * 0 ... bit not set 1515b411b363SPhilipp Reisner * -1 ... first out of bounds access, stop testing for bits! 1516b411b363SPhilipp Reisner */ 1517b30ab791SAndreas Gruenbacher int drbd_bm_test_bit(struct drbd_device *device, const unsigned long bitnr) 1518b411b363SPhilipp Reisner { 1519b411b363SPhilipp Reisner unsigned long flags; 1520b30ab791SAndreas Gruenbacher struct drbd_bitmap *b = device->bitmap; 1521b411b363SPhilipp Reisner unsigned long *p_addr; 1522b411b363SPhilipp Reisner int i; 1523b411b363SPhilipp Reisner 1524841ce241SAndreas Gruenbacher if (!expect(b)) 1525841ce241SAndreas Gruenbacher return 0; 1526841ce241SAndreas Gruenbacher if (!expect(b->bm_pages)) 1527841ce241SAndreas Gruenbacher return 0; 1528b411b363SPhilipp Reisner 1529b411b363SPhilipp Reisner spin_lock_irqsave(&b->bm_lock, flags); 153020ceb2b2SLars Ellenberg if (BM_DONT_TEST & b->bm_flags) 1531b30ab791SAndreas Gruenbacher bm_print_lock_info(device); 1532b411b363SPhilipp Reisner if (bitnr < b->bm_bits) { 153319f843aaSLars Ellenberg p_addr = bm_map_pidx(b, bm_bit_to_page_idx(b, bitnr)); 15347e599e6eSLinus Torvalds i = test_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr) ? 1 : 0; 1535b411b363SPhilipp Reisner bm_unmap(p_addr); 1536b411b363SPhilipp Reisner } else if (bitnr == b->bm_bits) { 1537b411b363SPhilipp Reisner i = -1; 1538b411b363SPhilipp Reisner } else { /* (bitnr > b->bm_bits) */ 1539d0180171SAndreas Gruenbacher drbd_err(device, "bitnr=%lu > bm_bits=%lu\n", bitnr, b->bm_bits); 1540b411b363SPhilipp Reisner i = 0; 1541b411b363SPhilipp Reisner } 1542b411b363SPhilipp Reisner 1543b411b363SPhilipp Reisner spin_unlock_irqrestore(&b->bm_lock, flags); 1544b411b363SPhilipp Reisner return i; 1545b411b363SPhilipp Reisner } 1546b411b363SPhilipp Reisner 1547b411b363SPhilipp Reisner /* returns number of bits set in the range [s, e] */ 1548b30ab791SAndreas Gruenbacher int drbd_bm_count_bits(struct drbd_device *device, const unsigned long s, const unsigned long e) 1549b411b363SPhilipp Reisner { 1550b411b363SPhilipp Reisner unsigned long flags; 1551b30ab791SAndreas Gruenbacher struct drbd_bitmap *b = device->bitmap; 155219f843aaSLars Ellenberg unsigned long *p_addr = NULL; 1553b411b363SPhilipp Reisner unsigned long bitnr; 155419f843aaSLars Ellenberg unsigned int page_nr = -1U; 1555b411b363SPhilipp Reisner int c = 0; 1556b411b363SPhilipp Reisner 1557b411b363SPhilipp Reisner /* If this is called without a bitmap, that is a bug. But just to be 1558b411b363SPhilipp Reisner * robust in case we screwed up elsewhere, in that case pretend there 1559b411b363SPhilipp Reisner * was one dirty bit in the requested area, so we won't try to do a 1560b411b363SPhilipp Reisner * local read there (no bitmap probably implies no disk) */ 1561841ce241SAndreas Gruenbacher if (!expect(b)) 1562841ce241SAndreas Gruenbacher return 1; 1563841ce241SAndreas Gruenbacher if (!expect(b->bm_pages)) 1564841ce241SAndreas Gruenbacher return 1; 1565b411b363SPhilipp Reisner 1566b411b363SPhilipp Reisner spin_lock_irqsave(&b->bm_lock, flags); 156720ceb2b2SLars Ellenberg if (BM_DONT_TEST & b->bm_flags) 1568b30ab791SAndreas Gruenbacher bm_print_lock_info(device); 1569b411b363SPhilipp Reisner for (bitnr = s; bitnr <= e; bitnr++) { 157019f843aaSLars Ellenberg unsigned int idx = bm_bit_to_page_idx(b, bitnr); 157119f843aaSLars Ellenberg if (page_nr != idx) { 157219f843aaSLars Ellenberg page_nr = idx; 1573b411b363SPhilipp Reisner if (p_addr) 1574b411b363SPhilipp Reisner bm_unmap(p_addr); 157519f843aaSLars Ellenberg p_addr = bm_map_pidx(b, idx); 1576b411b363SPhilipp Reisner } 1577841ce241SAndreas Gruenbacher if (expect(bitnr < b->bm_bits)) 15787e599e6eSLinus Torvalds c += (0 != test_bit_le(bitnr - (page_nr << (PAGE_SHIFT+3)), p_addr)); 1579841ce241SAndreas Gruenbacher else 1580d0180171SAndreas Gruenbacher drbd_err(device, "bitnr=%lu bm_bits=%lu\n", bitnr, b->bm_bits); 1581b411b363SPhilipp Reisner } 1582b411b363SPhilipp Reisner if (p_addr) 1583b411b363SPhilipp Reisner bm_unmap(p_addr); 1584b411b363SPhilipp Reisner spin_unlock_irqrestore(&b->bm_lock, flags); 1585b411b363SPhilipp Reisner return c; 1586b411b363SPhilipp Reisner } 1587b411b363SPhilipp Reisner 1588b411b363SPhilipp Reisner 1589b411b363SPhilipp Reisner /* inherently racy... 1590b411b363SPhilipp Reisner * return value may be already out-of-date when this function returns. 1591b411b363SPhilipp Reisner * but the general usage is that this is only use during a cstate when bits are 1592b411b363SPhilipp Reisner * only cleared, not set, and typically only care for the case when the return 1593b411b363SPhilipp Reisner * value is zero, or we already "locked" this "bitmap extent" by other means. 1594b411b363SPhilipp Reisner * 1595b411b363SPhilipp Reisner * enr is bm-extent number, since we chose to name one sector (512 bytes) 1596b411b363SPhilipp Reisner * worth of the bitmap a "bitmap extent". 1597b411b363SPhilipp Reisner * 1598b411b363SPhilipp Reisner * TODO 1599b411b363SPhilipp Reisner * I think since we use it like a reference count, we should use the real 1600b411b363SPhilipp Reisner * reference count of some bitmap extent element from some lru instead... 1601b411b363SPhilipp Reisner * 1602b411b363SPhilipp Reisner */ 1603b30ab791SAndreas Gruenbacher int drbd_bm_e_weight(struct drbd_device *device, unsigned long enr) 1604b411b363SPhilipp Reisner { 1605b30ab791SAndreas Gruenbacher struct drbd_bitmap *b = device->bitmap; 1606b411b363SPhilipp Reisner int count, s, e; 1607b411b363SPhilipp Reisner unsigned long flags; 1608b411b363SPhilipp Reisner unsigned long *p_addr, *bm; 1609b411b363SPhilipp Reisner 1610841ce241SAndreas Gruenbacher if (!expect(b)) 1611841ce241SAndreas Gruenbacher return 0; 1612841ce241SAndreas Gruenbacher if (!expect(b->bm_pages)) 1613841ce241SAndreas Gruenbacher return 0; 1614b411b363SPhilipp Reisner 1615b411b363SPhilipp Reisner spin_lock_irqsave(&b->bm_lock, flags); 161620ceb2b2SLars Ellenberg if (BM_DONT_TEST & b->bm_flags) 1617b30ab791SAndreas Gruenbacher bm_print_lock_info(device); 1618b411b363SPhilipp Reisner 1619b411b363SPhilipp Reisner s = S2W(enr); 1620b411b363SPhilipp Reisner e = min((size_t)S2W(enr+1), b->bm_words); 1621b411b363SPhilipp Reisner count = 0; 1622b411b363SPhilipp Reisner if (s < b->bm_words) { 1623b411b363SPhilipp Reisner int n = e-s; 162419f843aaSLars Ellenberg p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, s)); 1625b411b363SPhilipp Reisner bm = p_addr + MLPP(s); 16265fb3bc4dSLars Ellenberg count += bitmap_weight(bm, n * BITS_PER_LONG); 1627b411b363SPhilipp Reisner bm_unmap(p_addr); 1628b411b363SPhilipp Reisner } else { 1629d0180171SAndreas Gruenbacher drbd_err(device, "start offset (%d) too large in drbd_bm_e_weight\n", s); 1630b411b363SPhilipp Reisner } 1631b411b363SPhilipp Reisner spin_unlock_irqrestore(&b->bm_lock, flags); 1632b411b363SPhilipp Reisner return count; 1633b411b363SPhilipp Reisner } 1634