xref: /openbmc/linux/drivers/block/drbd/drbd_bitmap.c (revision d1aa4d04da8de5c89d73859e077d89c4c71d8ed1)
1b411b363SPhilipp Reisner /*
2b411b363SPhilipp Reisner    drbd_bitmap.c
3b411b363SPhilipp Reisner 
4b411b363SPhilipp Reisner    This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5b411b363SPhilipp Reisner 
6b411b363SPhilipp Reisner    Copyright (C) 2004-2008, LINBIT Information Technologies GmbH.
7b411b363SPhilipp Reisner    Copyright (C) 2004-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8b411b363SPhilipp Reisner    Copyright (C) 2004-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9b411b363SPhilipp Reisner 
10b411b363SPhilipp Reisner    drbd is free software; you can redistribute it and/or modify
11b411b363SPhilipp Reisner    it under the terms of the GNU General Public License as published by
12b411b363SPhilipp Reisner    the Free Software Foundation; either version 2, or (at your option)
13b411b363SPhilipp Reisner    any later version.
14b411b363SPhilipp Reisner 
15b411b363SPhilipp Reisner    drbd is distributed in the hope that it will be useful,
16b411b363SPhilipp Reisner    but WITHOUT ANY WARRANTY; without even the implied warranty of
17b411b363SPhilipp Reisner    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18b411b363SPhilipp Reisner    GNU General Public License for more details.
19b411b363SPhilipp Reisner 
20b411b363SPhilipp Reisner    You should have received a copy of the GNU General Public License
21b411b363SPhilipp Reisner    along with drbd; see the file COPYING.  If not, write to
22b411b363SPhilipp Reisner    the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23b411b363SPhilipp Reisner  */
24b411b363SPhilipp Reisner 
25b411b363SPhilipp Reisner #include <linux/bitops.h>
26b411b363SPhilipp Reisner #include <linux/vmalloc.h>
27b411b363SPhilipp Reisner #include <linux/string.h>
28b411b363SPhilipp Reisner #include <linux/drbd.h>
295a0e3ad6STejun Heo #include <linux/slab.h>
30b411b363SPhilipp Reisner #include <asm/kmap_types.h>
31f0ff1357SStephen Rothwell 
32b411b363SPhilipp Reisner #include "drbd_int.h"
33b411b363SPhilipp Reisner 
3495a0f10cSLars Ellenberg 
35b411b363SPhilipp Reisner /* OPAQUE outside this file!
36b411b363SPhilipp Reisner  * interface defined in drbd_int.h
37b411b363SPhilipp Reisner 
38b411b363SPhilipp Reisner  * convention:
39b411b363SPhilipp Reisner  * function name drbd_bm_... => used elsewhere, "public".
40b411b363SPhilipp Reisner  * function name      bm_... => internal to implementation, "private".
414b0715f0SLars Ellenberg  */
42b411b363SPhilipp Reisner 
434b0715f0SLars Ellenberg 
444b0715f0SLars Ellenberg /*
454b0715f0SLars Ellenberg  * LIMITATIONS:
464b0715f0SLars Ellenberg  * We want to support >= peta byte of backend storage, while for now still using
474b0715f0SLars Ellenberg  * a granularity of one bit per 4KiB of storage.
484b0715f0SLars Ellenberg  * 1 << 50		bytes backend storage (1 PiB)
494b0715f0SLars Ellenberg  * 1 << (50 - 12)	bits needed
504b0715f0SLars Ellenberg  *	38 --> we need u64 to index and count bits
514b0715f0SLars Ellenberg  * 1 << (38 - 3)	bitmap bytes needed
524b0715f0SLars Ellenberg  *	35 --> we still need u64 to index and count bytes
534b0715f0SLars Ellenberg  *			(that's 32 GiB of bitmap for 1 PiB storage)
544b0715f0SLars Ellenberg  * 1 << (35 - 2)	32bit longs needed
554b0715f0SLars Ellenberg  *	33 --> we'd even need u64 to index and count 32bit long words.
564b0715f0SLars Ellenberg  * 1 << (35 - 3)	64bit longs needed
574b0715f0SLars Ellenberg  *	32 --> we could get away with a 32bit unsigned int to index and count
584b0715f0SLars Ellenberg  *	64bit long words, but I rather stay with unsigned long for now.
594b0715f0SLars Ellenberg  *	We probably should neither count nor point to bytes or long words
604b0715f0SLars Ellenberg  *	directly, but either by bitnumber, or by page index and offset.
614b0715f0SLars Ellenberg  * 1 << (35 - 12)
624b0715f0SLars Ellenberg  *	22 --> we need that much 4KiB pages of bitmap.
634b0715f0SLars Ellenberg  *	1 << (22 + 3) --> on a 64bit arch,
644b0715f0SLars Ellenberg  *	we need 32 MiB to store the array of page pointers.
654b0715f0SLars Ellenberg  *
664b0715f0SLars Ellenberg  * Because I'm lazy, and because the resulting patch was too large, too ugly
674b0715f0SLars Ellenberg  * and still incomplete, on 32bit we still "only" support 16 TiB (minus some),
684b0715f0SLars Ellenberg  * (1 << 32) bits * 4k storage.
694b0715f0SLars Ellenberg  *
704b0715f0SLars Ellenberg 
714b0715f0SLars Ellenberg  * bitmap storage and IO:
724b0715f0SLars Ellenberg  *	Bitmap is stored little endian on disk, and is kept little endian in
734b0715f0SLars Ellenberg  *	core memory. Currently we still hold the full bitmap in core as long
744b0715f0SLars Ellenberg  *	as we are "attached" to a local disk, which at 32 GiB for 1PiB storage
754b0715f0SLars Ellenberg  *	seems excessive.
764b0715f0SLars Ellenberg  *
7724c4830cSBart Van Assche  *	We plan to reduce the amount of in-core bitmap pages by paging them in
784b0715f0SLars Ellenberg  *	and out against their on-disk location as necessary, but need to make
794b0715f0SLars Ellenberg  *	sure we don't cause too much meta data IO, and must not deadlock in
804b0715f0SLars Ellenberg  *	tight memory situations. This needs some more work.
81b411b363SPhilipp Reisner  */
82b411b363SPhilipp Reisner 
83b411b363SPhilipp Reisner /*
84b411b363SPhilipp Reisner  * NOTE
85b411b363SPhilipp Reisner  *  Access to the *bm_pages is protected by bm_lock.
86b411b363SPhilipp Reisner  *  It is safe to read the other members within the lock.
87b411b363SPhilipp Reisner  *
88b411b363SPhilipp Reisner  *  drbd_bm_set_bits is called from bio_endio callbacks,
89b411b363SPhilipp Reisner  *  We may be called with irq already disabled,
90b411b363SPhilipp Reisner  *  so we need spin_lock_irqsave().
91b411b363SPhilipp Reisner  *  And we need the kmap_atomic.
92b411b363SPhilipp Reisner  */
93b411b363SPhilipp Reisner struct drbd_bitmap {
94b411b363SPhilipp Reisner 	struct page **bm_pages;
95b411b363SPhilipp Reisner 	spinlock_t bm_lock;
964b0715f0SLars Ellenberg 
974b0715f0SLars Ellenberg 	/* see LIMITATIONS: above */
984b0715f0SLars Ellenberg 
99b411b363SPhilipp Reisner 	unsigned long bm_set;       /* nr of set bits; THINK maybe atomic_t? */
100b411b363SPhilipp Reisner 	unsigned long bm_bits;
101b411b363SPhilipp Reisner 	size_t   bm_words;
102b411b363SPhilipp Reisner 	size_t   bm_number_of_pages;
103b411b363SPhilipp Reisner 	sector_t bm_dev_capacity;
1048a03ae2aSThomas Gleixner 	struct mutex bm_change; /* serializes resize operations */
105b411b363SPhilipp Reisner 
10619f843aaSLars Ellenberg 	wait_queue_head_t bm_io_wait; /* used to serialize IO of single pages */
107b411b363SPhilipp Reisner 
10820ceb2b2SLars Ellenberg 	enum bm_flag bm_flags;
109b411b363SPhilipp Reisner 
110b411b363SPhilipp Reisner 	/* debugging aid, in case we are still racy somewhere */
111b411b363SPhilipp Reisner 	char          *bm_why;
112b411b363SPhilipp Reisner 	struct task_struct *bm_task;
113b411b363SPhilipp Reisner };
114b411b363SPhilipp Reisner 
115b411b363SPhilipp Reisner #define bm_print_lock_info(m) __bm_print_lock_info(m, __func__)
116b411b363SPhilipp Reisner static void __bm_print_lock_info(struct drbd_conf *mdev, const char *func)
117b411b363SPhilipp Reisner {
118b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
119b411b363SPhilipp Reisner 	if (!__ratelimit(&drbd_ratelimit_state))
120b411b363SPhilipp Reisner 		return;
121b411b363SPhilipp Reisner 	dev_err(DEV, "FIXME %s in %s, bitmap locked for '%s' by %s\n",
122b411b363SPhilipp Reisner 	    current == mdev->receiver.task ? "receiver" :
123b411b363SPhilipp Reisner 	    current == mdev->asender.task  ? "asender"  :
124b411b363SPhilipp Reisner 	    current == mdev->worker.task   ? "worker"   : current->comm,
125b411b363SPhilipp Reisner 	    func, b->bm_why ?: "?",
126b411b363SPhilipp Reisner 	    b->bm_task == mdev->receiver.task ? "receiver" :
127b411b363SPhilipp Reisner 	    b->bm_task == mdev->asender.task  ? "asender"  :
128b411b363SPhilipp Reisner 	    b->bm_task == mdev->worker.task   ? "worker"   : "?");
129b411b363SPhilipp Reisner }
130b411b363SPhilipp Reisner 
13120ceb2b2SLars Ellenberg void drbd_bm_lock(struct drbd_conf *mdev, char *why, enum bm_flag flags)
132b411b363SPhilipp Reisner {
133b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
134b411b363SPhilipp Reisner 	int trylock_failed;
135b411b363SPhilipp Reisner 
136b411b363SPhilipp Reisner 	if (!b) {
137b411b363SPhilipp Reisner 		dev_err(DEV, "FIXME no bitmap in drbd_bm_lock!?\n");
138b411b363SPhilipp Reisner 		return;
139b411b363SPhilipp Reisner 	}
140b411b363SPhilipp Reisner 
1418a03ae2aSThomas Gleixner 	trylock_failed = !mutex_trylock(&b->bm_change);
142b411b363SPhilipp Reisner 
143b411b363SPhilipp Reisner 	if (trylock_failed) {
144b411b363SPhilipp Reisner 		dev_warn(DEV, "%s going to '%s' but bitmap already locked for '%s' by %s\n",
145b411b363SPhilipp Reisner 		    current == mdev->receiver.task ? "receiver" :
146b411b363SPhilipp Reisner 		    current == mdev->asender.task  ? "asender"  :
147b411b363SPhilipp Reisner 		    current == mdev->worker.task   ? "worker"   : current->comm,
148b411b363SPhilipp Reisner 		    why, b->bm_why ?: "?",
149b411b363SPhilipp Reisner 		    b->bm_task == mdev->receiver.task ? "receiver" :
150b411b363SPhilipp Reisner 		    b->bm_task == mdev->asender.task  ? "asender"  :
151b411b363SPhilipp Reisner 		    b->bm_task == mdev->worker.task   ? "worker"   : "?");
1528a03ae2aSThomas Gleixner 		mutex_lock(&b->bm_change);
153b411b363SPhilipp Reisner 	}
15420ceb2b2SLars Ellenberg 	if (BM_LOCKED_MASK & b->bm_flags)
155b411b363SPhilipp Reisner 		dev_err(DEV, "FIXME bitmap already locked in bm_lock\n");
15620ceb2b2SLars Ellenberg 	b->bm_flags |= flags & BM_LOCKED_MASK;
157b411b363SPhilipp Reisner 
158b411b363SPhilipp Reisner 	b->bm_why  = why;
159b411b363SPhilipp Reisner 	b->bm_task = current;
160b411b363SPhilipp Reisner }
161b411b363SPhilipp Reisner 
162b411b363SPhilipp Reisner void drbd_bm_unlock(struct drbd_conf *mdev)
163b411b363SPhilipp Reisner {
164b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
165b411b363SPhilipp Reisner 	if (!b) {
166b411b363SPhilipp Reisner 		dev_err(DEV, "FIXME no bitmap in drbd_bm_unlock!?\n");
167b411b363SPhilipp Reisner 		return;
168b411b363SPhilipp Reisner 	}
169b411b363SPhilipp Reisner 
17020ceb2b2SLars Ellenberg 	if (!(BM_LOCKED_MASK & mdev->bitmap->bm_flags))
171b411b363SPhilipp Reisner 		dev_err(DEV, "FIXME bitmap not locked in bm_unlock\n");
172b411b363SPhilipp Reisner 
17320ceb2b2SLars Ellenberg 	b->bm_flags &= ~BM_LOCKED_MASK;
174b411b363SPhilipp Reisner 	b->bm_why  = NULL;
175b411b363SPhilipp Reisner 	b->bm_task = NULL;
1768a03ae2aSThomas Gleixner 	mutex_unlock(&b->bm_change);
177b411b363SPhilipp Reisner }
178b411b363SPhilipp Reisner 
17919f843aaSLars Ellenberg /* we store some "meta" info about our pages in page->private */
18019f843aaSLars Ellenberg /* at a granularity of 4k storage per bitmap bit:
18119f843aaSLars Ellenberg  * one peta byte storage: 1<<50 byte, 1<<38 * 4k storage blocks
18219f843aaSLars Ellenberg  *  1<<38 bits,
18319f843aaSLars Ellenberg  *  1<<23 4k bitmap pages.
18419f843aaSLars Ellenberg  * Use 24 bits as page index, covers 2 peta byte storage
18519f843aaSLars Ellenberg  * at a granularity of 4k per bit.
18619f843aaSLars Ellenberg  * Used to report the failed page idx on io error from the endio handlers.
18719f843aaSLars Ellenberg  */
18819f843aaSLars Ellenberg #define BM_PAGE_IDX_MASK	((1UL<<24)-1)
18919f843aaSLars Ellenberg /* this page is currently read in, or written back */
19019f843aaSLars Ellenberg #define BM_PAGE_IO_LOCK		31
19119f843aaSLars Ellenberg /* if there has been an IO error for this page */
19219f843aaSLars Ellenberg #define BM_PAGE_IO_ERROR	30
19319f843aaSLars Ellenberg /* this is to be able to intelligently skip disk IO,
19419f843aaSLars Ellenberg  * set if bits have been set since last IO. */
19519f843aaSLars Ellenberg #define BM_PAGE_NEED_WRITEOUT	29
19619f843aaSLars Ellenberg /* to mark for lazy writeout once syncer cleared all clearable bits,
19719f843aaSLars Ellenberg  * we if bits have been cleared since last IO. */
19819f843aaSLars Ellenberg #define BM_PAGE_LAZY_WRITEOUT	28
19919f843aaSLars Ellenberg 
20024c4830cSBart Van Assche /* store_page_idx uses non-atomic assignment. It is only used directly after
20119f843aaSLars Ellenberg  * allocating the page.  All other bm_set_page_* and bm_clear_page_* need to
20219f843aaSLars Ellenberg  * use atomic bit manipulation, as set_out_of_sync (and therefore bitmap
20319f843aaSLars Ellenberg  * changes) may happen from various contexts, and wait_on_bit/wake_up_bit
20419f843aaSLars Ellenberg  * requires it all to be atomic as well. */
20519f843aaSLars Ellenberg static void bm_store_page_idx(struct page *page, unsigned long idx)
20619f843aaSLars Ellenberg {
20719f843aaSLars Ellenberg 	BUG_ON(0 != (idx & ~BM_PAGE_IDX_MASK));
2080c7db279SArne Redlich 	set_page_private(page, idx);
20919f843aaSLars Ellenberg }
21019f843aaSLars Ellenberg 
21119f843aaSLars Ellenberg static unsigned long bm_page_to_idx(struct page *page)
21219f843aaSLars Ellenberg {
21319f843aaSLars Ellenberg 	return page_private(page) & BM_PAGE_IDX_MASK;
21419f843aaSLars Ellenberg }
21519f843aaSLars Ellenberg 
21619f843aaSLars Ellenberg /* As is very unlikely that the same page is under IO from more than one
21719f843aaSLars Ellenberg  * context, we can get away with a bit per page and one wait queue per bitmap.
21819f843aaSLars Ellenberg  */
21919f843aaSLars Ellenberg static void bm_page_lock_io(struct drbd_conf *mdev, int page_nr)
22019f843aaSLars Ellenberg {
22119f843aaSLars Ellenberg 	struct drbd_bitmap *b = mdev->bitmap;
22219f843aaSLars Ellenberg 	void *addr = &page_private(b->bm_pages[page_nr]);
22319f843aaSLars Ellenberg 	wait_event(b->bm_io_wait, !test_and_set_bit(BM_PAGE_IO_LOCK, addr));
22419f843aaSLars Ellenberg }
22519f843aaSLars Ellenberg 
22619f843aaSLars Ellenberg static void bm_page_unlock_io(struct drbd_conf *mdev, int page_nr)
22719f843aaSLars Ellenberg {
22819f843aaSLars Ellenberg 	struct drbd_bitmap *b = mdev->bitmap;
22919f843aaSLars Ellenberg 	void *addr = &page_private(b->bm_pages[page_nr]);
23019f843aaSLars Ellenberg 	clear_bit(BM_PAGE_IO_LOCK, addr);
23119f843aaSLars Ellenberg 	smp_mb__after_clear_bit();
23219f843aaSLars Ellenberg 	wake_up(&mdev->bitmap->bm_io_wait);
23319f843aaSLars Ellenberg }
23419f843aaSLars Ellenberg 
23519f843aaSLars Ellenberg /* set _before_ submit_io, so it may be reset due to being changed
23619f843aaSLars Ellenberg  * while this page is in flight... will get submitted later again */
23719f843aaSLars Ellenberg static void bm_set_page_unchanged(struct page *page)
23819f843aaSLars Ellenberg {
23919f843aaSLars Ellenberg 	/* use cmpxchg? */
24019f843aaSLars Ellenberg 	clear_bit(BM_PAGE_NEED_WRITEOUT, &page_private(page));
24119f843aaSLars Ellenberg 	clear_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page));
24219f843aaSLars Ellenberg }
24319f843aaSLars Ellenberg 
24419f843aaSLars Ellenberg static void bm_set_page_need_writeout(struct page *page)
24519f843aaSLars Ellenberg {
24619f843aaSLars Ellenberg 	set_bit(BM_PAGE_NEED_WRITEOUT, &page_private(page));
24719f843aaSLars Ellenberg }
24819f843aaSLars Ellenberg 
24919f843aaSLars Ellenberg static int bm_test_page_unchanged(struct page *page)
25019f843aaSLars Ellenberg {
25119f843aaSLars Ellenberg 	volatile const unsigned long *addr = &page_private(page);
25219f843aaSLars Ellenberg 	return (*addr & ((1UL<<BM_PAGE_NEED_WRITEOUT)|(1UL<<BM_PAGE_LAZY_WRITEOUT))) == 0;
25319f843aaSLars Ellenberg }
25419f843aaSLars Ellenberg 
25519f843aaSLars Ellenberg static void bm_set_page_io_err(struct page *page)
25619f843aaSLars Ellenberg {
25719f843aaSLars Ellenberg 	set_bit(BM_PAGE_IO_ERROR, &page_private(page));
25819f843aaSLars Ellenberg }
25919f843aaSLars Ellenberg 
26019f843aaSLars Ellenberg static void bm_clear_page_io_err(struct page *page)
26119f843aaSLars Ellenberg {
26219f843aaSLars Ellenberg 	clear_bit(BM_PAGE_IO_ERROR, &page_private(page));
26319f843aaSLars Ellenberg }
26419f843aaSLars Ellenberg 
26519f843aaSLars Ellenberg static void bm_set_page_lazy_writeout(struct page *page)
26619f843aaSLars Ellenberg {
26719f843aaSLars Ellenberg 	set_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page));
26819f843aaSLars Ellenberg }
26919f843aaSLars Ellenberg 
27019f843aaSLars Ellenberg static int bm_test_page_lazy_writeout(struct page *page)
27119f843aaSLars Ellenberg {
27219f843aaSLars Ellenberg 	return test_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page));
27319f843aaSLars Ellenberg }
27419f843aaSLars Ellenberg 
27519f843aaSLars Ellenberg /* on a 32bit box, this would allow for exactly (2<<38) bits. */
27619f843aaSLars Ellenberg static unsigned int bm_word_to_page_idx(struct drbd_bitmap *b, unsigned long long_nr)
27719f843aaSLars Ellenberg {
27819f843aaSLars Ellenberg 	/* page_nr = (word*sizeof(long)) >> PAGE_SHIFT; */
27919f843aaSLars Ellenberg 	unsigned int page_nr = long_nr >> (PAGE_SHIFT - LN2_BPL + 3);
28019f843aaSLars Ellenberg 	BUG_ON(page_nr >= b->bm_number_of_pages);
28119f843aaSLars Ellenberg 	return page_nr;
28219f843aaSLars Ellenberg }
28319f843aaSLars Ellenberg 
28495a0f10cSLars Ellenberg static unsigned int bm_bit_to_page_idx(struct drbd_bitmap *b, u64 bitnr)
28595a0f10cSLars Ellenberg {
28695a0f10cSLars Ellenberg 	/* page_nr = (bitnr/8) >> PAGE_SHIFT; */
28795a0f10cSLars Ellenberg 	unsigned int page_nr = bitnr >> (PAGE_SHIFT + 3);
28895a0f10cSLars Ellenberg 	BUG_ON(page_nr >= b->bm_number_of_pages);
28995a0f10cSLars Ellenberg 	return page_nr;
29095a0f10cSLars Ellenberg }
29195a0f10cSLars Ellenberg 
292589973a7SCong Wang static unsigned long *__bm_map_pidx(struct drbd_bitmap *b, unsigned int idx)
29395a0f10cSLars Ellenberg {
29495a0f10cSLars Ellenberg 	struct page *page = b->bm_pages[idx];
295cfd8005cSCong Wang 	return (unsigned long *) kmap_atomic(page);
29695a0f10cSLars Ellenberg }
29795a0f10cSLars Ellenberg 
29895a0f10cSLars Ellenberg static unsigned long *bm_map_pidx(struct drbd_bitmap *b, unsigned int idx)
29995a0f10cSLars Ellenberg {
300cfd8005cSCong Wang 	return __bm_map_pidx(b, idx);
30195a0f10cSLars Ellenberg }
30295a0f10cSLars Ellenberg 
303cfd8005cSCong Wang static void __bm_unmap(unsigned long *p_addr)
304b411b363SPhilipp Reisner {
305cfd8005cSCong Wang 	kunmap_atomic(p_addr);
306b411b363SPhilipp Reisner };
307b411b363SPhilipp Reisner 
308b411b363SPhilipp Reisner static void bm_unmap(unsigned long *p_addr)
309b411b363SPhilipp Reisner {
310cfd8005cSCong Wang 	return __bm_unmap(p_addr);
311b411b363SPhilipp Reisner }
312b411b363SPhilipp Reisner 
313b411b363SPhilipp Reisner /* long word offset of _bitmap_ sector */
314b411b363SPhilipp Reisner #define S2W(s)	((s)<<(BM_EXT_SHIFT-BM_BLOCK_SHIFT-LN2_BPL))
315b411b363SPhilipp Reisner /* word offset from start of bitmap to word number _in_page_
316b411b363SPhilipp Reisner  * modulo longs per page
317b411b363SPhilipp Reisner #define MLPP(X) ((X) % (PAGE_SIZE/sizeof(long))
31824c4830cSBart Van Assche  hm, well, Philipp thinks gcc might not optimize the % into & (... - 1)
319b411b363SPhilipp Reisner  so do it explicitly:
320b411b363SPhilipp Reisner  */
321b411b363SPhilipp Reisner #define MLPP(X) ((X) & ((PAGE_SIZE/sizeof(long))-1))
322b411b363SPhilipp Reisner 
323b411b363SPhilipp Reisner /* Long words per page */
324b411b363SPhilipp Reisner #define LWPP (PAGE_SIZE/sizeof(long))
325b411b363SPhilipp Reisner 
326b411b363SPhilipp Reisner /*
327b411b363SPhilipp Reisner  * actually most functions herein should take a struct drbd_bitmap*, not a
328b411b363SPhilipp Reisner  * struct drbd_conf*, but for the debug macros I like to have the mdev around
329b411b363SPhilipp Reisner  * to be able to report device specific.
330b411b363SPhilipp Reisner  */
331b411b363SPhilipp Reisner 
33219f843aaSLars Ellenberg 
333b411b363SPhilipp Reisner static void bm_free_pages(struct page **pages, unsigned long number)
334b411b363SPhilipp Reisner {
335b411b363SPhilipp Reisner 	unsigned long i;
336b411b363SPhilipp Reisner 	if (!pages)
337b411b363SPhilipp Reisner 		return;
338b411b363SPhilipp Reisner 
339b411b363SPhilipp Reisner 	for (i = 0; i < number; i++) {
340b411b363SPhilipp Reisner 		if (!pages[i]) {
341b411b363SPhilipp Reisner 			printk(KERN_ALERT "drbd: bm_free_pages tried to free "
342b411b363SPhilipp Reisner 					  "a NULL pointer; i=%lu n=%lu\n",
343b411b363SPhilipp Reisner 					  i, number);
344b411b363SPhilipp Reisner 			continue;
345b411b363SPhilipp Reisner 		}
346b411b363SPhilipp Reisner 		__free_page(pages[i]);
347b411b363SPhilipp Reisner 		pages[i] = NULL;
348b411b363SPhilipp Reisner 	}
349b411b363SPhilipp Reisner }
350b411b363SPhilipp Reisner 
351b411b363SPhilipp Reisner static void bm_vk_free(void *ptr, int v)
352b411b363SPhilipp Reisner {
353b411b363SPhilipp Reisner 	if (v)
354b411b363SPhilipp Reisner 		vfree(ptr);
355b411b363SPhilipp Reisner 	else
356b411b363SPhilipp Reisner 		kfree(ptr);
357b411b363SPhilipp Reisner }
358b411b363SPhilipp Reisner 
359b411b363SPhilipp Reisner /*
360b411b363SPhilipp Reisner  * "have" and "want" are NUMBER OF PAGES.
361b411b363SPhilipp Reisner  */
362b411b363SPhilipp Reisner static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
363b411b363SPhilipp Reisner {
364b411b363SPhilipp Reisner 	struct page **old_pages = b->bm_pages;
365b411b363SPhilipp Reisner 	struct page **new_pages, *page;
366b411b363SPhilipp Reisner 	unsigned int i, bytes, vmalloced = 0;
367b411b363SPhilipp Reisner 	unsigned long have = b->bm_number_of_pages;
368b411b363SPhilipp Reisner 
369b411b363SPhilipp Reisner 	BUG_ON(have == 0 && old_pages != NULL);
370b411b363SPhilipp Reisner 	BUG_ON(have != 0 && old_pages == NULL);
371b411b363SPhilipp Reisner 
372b411b363SPhilipp Reisner 	if (have == want)
373b411b363SPhilipp Reisner 		return old_pages;
374b411b363SPhilipp Reisner 
375b411b363SPhilipp Reisner 	/* Trying kmalloc first, falling back to vmalloc.
376b411b363SPhilipp Reisner 	 * GFP_KERNEL is ok, as this is done when a lower level disk is
377b411b363SPhilipp Reisner 	 * "attached" to the drbd.  Context is receiver thread or cqueue
378b411b363SPhilipp Reisner 	 * thread.  As we have no disk yet, we are not in the IO path,
379b411b363SPhilipp Reisner 	 * not even the IO path of the peer. */
380b411b363SPhilipp Reisner 	bytes = sizeof(struct page *)*want;
38157f3224cSJoe Perches 	new_pages = kzalloc(bytes, GFP_KERNEL);
382b411b363SPhilipp Reisner 	if (!new_pages) {
38357f3224cSJoe Perches 		new_pages = vzalloc(bytes);
384b411b363SPhilipp Reisner 		if (!new_pages)
385b411b363SPhilipp Reisner 			return NULL;
386b411b363SPhilipp Reisner 		vmalloced = 1;
387b411b363SPhilipp Reisner 	}
388b411b363SPhilipp Reisner 
389b411b363SPhilipp Reisner 	if (want >= have) {
390b411b363SPhilipp Reisner 		for (i = 0; i < have; i++)
391b411b363SPhilipp Reisner 			new_pages[i] = old_pages[i];
392b411b363SPhilipp Reisner 		for (; i < want; i++) {
393b411b363SPhilipp Reisner 			page = alloc_page(GFP_HIGHUSER);
394b411b363SPhilipp Reisner 			if (!page) {
395b411b363SPhilipp Reisner 				bm_free_pages(new_pages + have, i - have);
396b411b363SPhilipp Reisner 				bm_vk_free(new_pages, vmalloced);
397b411b363SPhilipp Reisner 				return NULL;
398b411b363SPhilipp Reisner 			}
39919f843aaSLars Ellenberg 			/* we want to know which page it is
40019f843aaSLars Ellenberg 			 * from the endio handlers */
40119f843aaSLars Ellenberg 			bm_store_page_idx(page, i);
402b411b363SPhilipp Reisner 			new_pages[i] = page;
403b411b363SPhilipp Reisner 		}
404b411b363SPhilipp Reisner 	} else {
405b411b363SPhilipp Reisner 		for (i = 0; i < want; i++)
406b411b363SPhilipp Reisner 			new_pages[i] = old_pages[i];
407b411b363SPhilipp Reisner 		/* NOT HERE, we are outside the spinlock!
408b411b363SPhilipp Reisner 		bm_free_pages(old_pages + want, have - want);
409b411b363SPhilipp Reisner 		*/
410b411b363SPhilipp Reisner 	}
411b411b363SPhilipp Reisner 
412b411b363SPhilipp Reisner 	if (vmalloced)
41320ceb2b2SLars Ellenberg 		b->bm_flags |= BM_P_VMALLOCED;
414b411b363SPhilipp Reisner 	else
41520ceb2b2SLars Ellenberg 		b->bm_flags &= ~BM_P_VMALLOCED;
416b411b363SPhilipp Reisner 
417b411b363SPhilipp Reisner 	return new_pages;
418b411b363SPhilipp Reisner }
419b411b363SPhilipp Reisner 
420b411b363SPhilipp Reisner /*
421b411b363SPhilipp Reisner  * called on driver init only. TODO call when a device is created.
422b411b363SPhilipp Reisner  * allocates the drbd_bitmap, and stores it in mdev->bitmap.
423b411b363SPhilipp Reisner  */
424b411b363SPhilipp Reisner int drbd_bm_init(struct drbd_conf *mdev)
425b411b363SPhilipp Reisner {
426b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
427b411b363SPhilipp Reisner 	WARN_ON(b != NULL);
428b411b363SPhilipp Reisner 	b = kzalloc(sizeof(struct drbd_bitmap), GFP_KERNEL);
429b411b363SPhilipp Reisner 	if (!b)
430b411b363SPhilipp Reisner 		return -ENOMEM;
431b411b363SPhilipp Reisner 	spin_lock_init(&b->bm_lock);
4328a03ae2aSThomas Gleixner 	mutex_init(&b->bm_change);
433b411b363SPhilipp Reisner 	init_waitqueue_head(&b->bm_io_wait);
434b411b363SPhilipp Reisner 
435b411b363SPhilipp Reisner 	mdev->bitmap = b;
436b411b363SPhilipp Reisner 
437b411b363SPhilipp Reisner 	return 0;
438b411b363SPhilipp Reisner }
439b411b363SPhilipp Reisner 
440b411b363SPhilipp Reisner sector_t drbd_bm_capacity(struct drbd_conf *mdev)
441b411b363SPhilipp Reisner {
442b411b363SPhilipp Reisner 	ERR_IF(!mdev->bitmap) return 0;
443b411b363SPhilipp Reisner 	return mdev->bitmap->bm_dev_capacity;
444b411b363SPhilipp Reisner }
445b411b363SPhilipp Reisner 
446b411b363SPhilipp Reisner /* called on driver unload. TODO: call when a device is destroyed.
447b411b363SPhilipp Reisner  */
448b411b363SPhilipp Reisner void drbd_bm_cleanup(struct drbd_conf *mdev)
449b411b363SPhilipp Reisner {
450b411b363SPhilipp Reisner 	ERR_IF (!mdev->bitmap) return;
451b411b363SPhilipp Reisner 	bm_free_pages(mdev->bitmap->bm_pages, mdev->bitmap->bm_number_of_pages);
45220ceb2b2SLars Ellenberg 	bm_vk_free(mdev->bitmap->bm_pages, (BM_P_VMALLOCED & mdev->bitmap->bm_flags));
453b411b363SPhilipp Reisner 	kfree(mdev->bitmap);
454b411b363SPhilipp Reisner 	mdev->bitmap = NULL;
455b411b363SPhilipp Reisner }
456b411b363SPhilipp Reisner 
457b411b363SPhilipp Reisner /*
458b411b363SPhilipp Reisner  * since (b->bm_bits % BITS_PER_LONG) != 0,
459b411b363SPhilipp Reisner  * this masks out the remaining bits.
460b411b363SPhilipp Reisner  * Returns the number of bits cleared.
461b411b363SPhilipp Reisner  */
46295a0f10cSLars Ellenberg #define BITS_PER_PAGE		(1UL << (PAGE_SHIFT + 3))
46395a0f10cSLars Ellenberg #define BITS_PER_PAGE_MASK	(BITS_PER_PAGE - 1)
46495a0f10cSLars Ellenberg #define BITS_PER_LONG_MASK	(BITS_PER_LONG - 1)
465b411b363SPhilipp Reisner static int bm_clear_surplus(struct drbd_bitmap *b)
466b411b363SPhilipp Reisner {
46795a0f10cSLars Ellenberg 	unsigned long mask;
468b411b363SPhilipp Reisner 	unsigned long *p_addr, *bm;
46995a0f10cSLars Ellenberg 	int tmp;
47095a0f10cSLars Ellenberg 	int cleared = 0;
471b411b363SPhilipp Reisner 
47295a0f10cSLars Ellenberg 	/* number of bits modulo bits per page */
47395a0f10cSLars Ellenberg 	tmp = (b->bm_bits & BITS_PER_PAGE_MASK);
47495a0f10cSLars Ellenberg 	/* mask the used bits of the word containing the last bit */
47595a0f10cSLars Ellenberg 	mask = (1UL << (tmp & BITS_PER_LONG_MASK)) -1;
47695a0f10cSLars Ellenberg 	/* bitmap is always stored little endian,
47795a0f10cSLars Ellenberg 	 * on disk and in core memory alike */
47895a0f10cSLars Ellenberg 	mask = cpu_to_lel(mask);
47995a0f10cSLars Ellenberg 
4806850c442SLars Ellenberg 	p_addr = bm_map_pidx(b, b->bm_number_of_pages - 1);
48195a0f10cSLars Ellenberg 	bm = p_addr + (tmp/BITS_PER_LONG);
48295a0f10cSLars Ellenberg 	if (mask) {
48395a0f10cSLars Ellenberg 		/* If mask != 0, we are not exactly aligned, so bm now points
48495a0f10cSLars Ellenberg 		 * to the long containing the last bit.
48595a0f10cSLars Ellenberg 		 * If mask == 0, bm already points to the word immediately
48695a0f10cSLars Ellenberg 		 * after the last (long word aligned) bit. */
487b411b363SPhilipp Reisner 		cleared = hweight_long(*bm & ~mask);
488b411b363SPhilipp Reisner 		*bm &= mask;
48995a0f10cSLars Ellenberg 		bm++;
490b411b363SPhilipp Reisner 	}
491b411b363SPhilipp Reisner 
49295a0f10cSLars Ellenberg 	if (BITS_PER_LONG == 32 && ((bm - p_addr) & 1) == 1) {
49395a0f10cSLars Ellenberg 		/* on a 32bit arch, we may need to zero out
49495a0f10cSLars Ellenberg 		 * a padding long to align with a 64bit remote */
495b411b363SPhilipp Reisner 		cleared += hweight_long(*bm);
496b411b363SPhilipp Reisner 		*bm = 0;
497b411b363SPhilipp Reisner 	}
498b411b363SPhilipp Reisner 	bm_unmap(p_addr);
499b411b363SPhilipp Reisner 	return cleared;
500b411b363SPhilipp Reisner }
501b411b363SPhilipp Reisner 
502b411b363SPhilipp Reisner static void bm_set_surplus(struct drbd_bitmap *b)
503b411b363SPhilipp Reisner {
50495a0f10cSLars Ellenberg 	unsigned long mask;
505b411b363SPhilipp Reisner 	unsigned long *p_addr, *bm;
50695a0f10cSLars Ellenberg 	int tmp;
507b411b363SPhilipp Reisner 
50895a0f10cSLars Ellenberg 	/* number of bits modulo bits per page */
50995a0f10cSLars Ellenberg 	tmp = (b->bm_bits & BITS_PER_PAGE_MASK);
51095a0f10cSLars Ellenberg 	/* mask the used bits of the word containing the last bit */
51195a0f10cSLars Ellenberg 	mask = (1UL << (tmp & BITS_PER_LONG_MASK)) -1;
51295a0f10cSLars Ellenberg 	/* bitmap is always stored little endian,
51395a0f10cSLars Ellenberg 	 * on disk and in core memory alike */
51495a0f10cSLars Ellenberg 	mask = cpu_to_lel(mask);
51595a0f10cSLars Ellenberg 
5166850c442SLars Ellenberg 	p_addr = bm_map_pidx(b, b->bm_number_of_pages - 1);
51795a0f10cSLars Ellenberg 	bm = p_addr + (tmp/BITS_PER_LONG);
51895a0f10cSLars Ellenberg 	if (mask) {
51995a0f10cSLars Ellenberg 		/* If mask != 0, we are not exactly aligned, so bm now points
52095a0f10cSLars Ellenberg 		 * to the long containing the last bit.
52195a0f10cSLars Ellenberg 		 * If mask == 0, bm already points to the word immediately
52295a0f10cSLars Ellenberg 		 * after the last (long word aligned) bit. */
523b411b363SPhilipp Reisner 		*bm |= ~mask;
52495a0f10cSLars Ellenberg 		bm++;
525b411b363SPhilipp Reisner 	}
526b411b363SPhilipp Reisner 
52795a0f10cSLars Ellenberg 	if (BITS_PER_LONG == 32 && ((bm - p_addr) & 1) == 1) {
52895a0f10cSLars Ellenberg 		/* on a 32bit arch, we may need to zero out
52995a0f10cSLars Ellenberg 		 * a padding long to align with a 64bit remote */
53095a0f10cSLars Ellenberg 		*bm = ~0UL;
531b411b363SPhilipp Reisner 	}
532b411b363SPhilipp Reisner 	bm_unmap(p_addr);
533b411b363SPhilipp Reisner }
534b411b363SPhilipp Reisner 
5354b0715f0SLars Ellenberg /* you better not modify the bitmap while this is running,
5364b0715f0SLars Ellenberg  * or its results will be stale */
53795a0f10cSLars Ellenberg static unsigned long bm_count_bits(struct drbd_bitmap *b)
538b411b363SPhilipp Reisner {
5394b0715f0SLars Ellenberg 	unsigned long *p_addr;
540b411b363SPhilipp Reisner 	unsigned long bits = 0;
5414b0715f0SLars Ellenberg 	unsigned long mask = (1UL << (b->bm_bits & BITS_PER_LONG_MASK)) -1;
5426850c442SLars Ellenberg 	int idx, i, last_word;
5437777a8baSLars Ellenberg 
5444b0715f0SLars Ellenberg 	/* all but last page */
5456850c442SLars Ellenberg 	for (idx = 0; idx < b->bm_number_of_pages - 1; idx++) {
546cfd8005cSCong Wang 		p_addr = __bm_map_pidx(b, idx);
5474b0715f0SLars Ellenberg 		for (i = 0; i < LWPP; i++)
5484b0715f0SLars Ellenberg 			bits += hweight_long(p_addr[i]);
549cfd8005cSCong Wang 		__bm_unmap(p_addr);
550b411b363SPhilipp Reisner 		cond_resched();
551b411b363SPhilipp Reisner 	}
5524b0715f0SLars Ellenberg 	/* last (or only) page */
5534b0715f0SLars Ellenberg 	last_word = ((b->bm_bits - 1) & BITS_PER_PAGE_MASK) >> LN2_BPL;
554589973a7SCong Wang 	p_addr = __bm_map_pidx(b, idx);
5554b0715f0SLars Ellenberg 	for (i = 0; i < last_word; i++)
5564b0715f0SLars Ellenberg 		bits += hweight_long(p_addr[i]);
5574b0715f0SLars Ellenberg 	p_addr[last_word] &= cpu_to_lel(mask);
5584b0715f0SLars Ellenberg 	bits += hweight_long(p_addr[last_word]);
5594b0715f0SLars Ellenberg 	/* 32bit arch, may have an unused padding long */
5604b0715f0SLars Ellenberg 	if (BITS_PER_LONG == 32 && (last_word & 1) == 0)
5614b0715f0SLars Ellenberg 		p_addr[last_word+1] = 0;
562589973a7SCong Wang 	__bm_unmap(p_addr);
563b411b363SPhilipp Reisner 	return bits;
564b411b363SPhilipp Reisner }
565b411b363SPhilipp Reisner 
566b411b363SPhilipp Reisner /* offset and len in long words.*/
567b411b363SPhilipp Reisner static void bm_memset(struct drbd_bitmap *b, size_t offset, int c, size_t len)
568b411b363SPhilipp Reisner {
569b411b363SPhilipp Reisner 	unsigned long *p_addr, *bm;
57019f843aaSLars Ellenberg 	unsigned int idx;
571b411b363SPhilipp Reisner 	size_t do_now, end;
572b411b363SPhilipp Reisner 
573b411b363SPhilipp Reisner 	end = offset + len;
574b411b363SPhilipp Reisner 
575b411b363SPhilipp Reisner 	if (end > b->bm_words) {
576b411b363SPhilipp Reisner 		printk(KERN_ALERT "drbd: bm_memset end > bm_words\n");
577b411b363SPhilipp Reisner 		return;
578b411b363SPhilipp Reisner 	}
579b411b363SPhilipp Reisner 
580b411b363SPhilipp Reisner 	while (offset < end) {
581b411b363SPhilipp Reisner 		do_now = min_t(size_t, ALIGN(offset + 1, LWPP), end) - offset;
58219f843aaSLars Ellenberg 		idx = bm_word_to_page_idx(b, offset);
58319f843aaSLars Ellenberg 		p_addr = bm_map_pidx(b, idx);
584b411b363SPhilipp Reisner 		bm = p_addr + MLPP(offset);
585b411b363SPhilipp Reisner 		if (bm+do_now > p_addr + LWPP) {
586b411b363SPhilipp Reisner 			printk(KERN_ALERT "drbd: BUG BUG BUG! p_addr:%p bm:%p do_now:%d\n",
587b411b363SPhilipp Reisner 			       p_addr, bm, (int)do_now);
58884e7c0f7SLars Ellenberg 		} else
589b411b363SPhilipp Reisner 			memset(bm, c, do_now * sizeof(long));
590b411b363SPhilipp Reisner 		bm_unmap(p_addr);
59119f843aaSLars Ellenberg 		bm_set_page_need_writeout(b->bm_pages[idx]);
592b411b363SPhilipp Reisner 		offset += do_now;
593b411b363SPhilipp Reisner 	}
594b411b363SPhilipp Reisner }
595b411b363SPhilipp Reisner 
596b411b363SPhilipp Reisner /*
597b411b363SPhilipp Reisner  * make sure the bitmap has enough room for the attached storage,
598b411b363SPhilipp Reisner  * if necessary, resize.
599b411b363SPhilipp Reisner  * called whenever we may have changed the device size.
600b411b363SPhilipp Reisner  * returns -ENOMEM if we could not allocate enough memory, 0 on success.
601b411b363SPhilipp Reisner  * In case this is actually a resize, we copy the old bitmap into the new one.
602b411b363SPhilipp Reisner  * Otherwise, the bitmap is initialized to all bits set.
603b411b363SPhilipp Reisner  */
60402d9a94bSPhilipp Reisner int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits)
605b411b363SPhilipp Reisner {
606b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
6076850c442SLars Ellenberg 	unsigned long bits, words, owords, obits;
608b411b363SPhilipp Reisner 	unsigned long want, have, onpages; /* number of pages */
609b411b363SPhilipp Reisner 	struct page **npages, **opages = NULL;
610b411b363SPhilipp Reisner 	int err = 0, growing;
611b411b363SPhilipp Reisner 	int opages_vmalloced;
612b411b363SPhilipp Reisner 
613b411b363SPhilipp Reisner 	ERR_IF(!b) return -ENOMEM;
614b411b363SPhilipp Reisner 
61520ceb2b2SLars Ellenberg 	drbd_bm_lock(mdev, "resize", BM_LOCKED_MASK);
616b411b363SPhilipp Reisner 
617b411b363SPhilipp Reisner 	dev_info(DEV, "drbd_bm_resize called with capacity == %llu\n",
618b411b363SPhilipp Reisner 			(unsigned long long)capacity);
619b411b363SPhilipp Reisner 
620b411b363SPhilipp Reisner 	if (capacity == b->bm_dev_capacity)
621b411b363SPhilipp Reisner 		goto out;
622b411b363SPhilipp Reisner 
62320ceb2b2SLars Ellenberg 	opages_vmalloced = (BM_P_VMALLOCED & b->bm_flags);
624b411b363SPhilipp Reisner 
625b411b363SPhilipp Reisner 	if (capacity == 0) {
626b411b363SPhilipp Reisner 		spin_lock_irq(&b->bm_lock);
627b411b363SPhilipp Reisner 		opages = b->bm_pages;
628b411b363SPhilipp Reisner 		onpages = b->bm_number_of_pages;
629b411b363SPhilipp Reisner 		owords = b->bm_words;
630b411b363SPhilipp Reisner 		b->bm_pages = NULL;
631b411b363SPhilipp Reisner 		b->bm_number_of_pages =
632b411b363SPhilipp Reisner 		b->bm_set   =
633b411b363SPhilipp Reisner 		b->bm_bits  =
634b411b363SPhilipp Reisner 		b->bm_words =
635b411b363SPhilipp Reisner 		b->bm_dev_capacity = 0;
636b411b363SPhilipp Reisner 		spin_unlock_irq(&b->bm_lock);
637b411b363SPhilipp Reisner 		bm_free_pages(opages, onpages);
638b411b363SPhilipp Reisner 		bm_vk_free(opages, opages_vmalloced);
639b411b363SPhilipp Reisner 		goto out;
640b411b363SPhilipp Reisner 	}
641b411b363SPhilipp Reisner 	bits  = BM_SECT_TO_BIT(ALIGN(capacity, BM_SECT_PER_BIT));
642b411b363SPhilipp Reisner 
643b411b363SPhilipp Reisner 	/* if we would use
644b411b363SPhilipp Reisner 	   words = ALIGN(bits,BITS_PER_LONG) >> LN2_BPL;
645b411b363SPhilipp Reisner 	   a 32bit host could present the wrong number of words
646b411b363SPhilipp Reisner 	   to a 64bit host.
647b411b363SPhilipp Reisner 	*/
648b411b363SPhilipp Reisner 	words = ALIGN(bits, 64) >> LN2_BPL;
649b411b363SPhilipp Reisner 
650b411b363SPhilipp Reisner 	if (get_ldev(mdev)) {
6514b0715f0SLars Ellenberg 		u64 bits_on_disk = ((u64)mdev->ldev->md.md_size_sect-MD_BM_OFFSET) << 12;
652b411b363SPhilipp Reisner 		put_ldev(mdev);
6534b0715f0SLars Ellenberg 		if (bits > bits_on_disk) {
6544b0715f0SLars Ellenberg 			dev_info(DEV, "bits = %lu\n", bits);
6554b0715f0SLars Ellenberg 			dev_info(DEV, "bits_on_disk = %llu\n", bits_on_disk);
6564b0715f0SLars Ellenberg 			err = -ENOSPC;
6574b0715f0SLars Ellenberg 			goto out;
6584b0715f0SLars Ellenberg 		}
659b411b363SPhilipp Reisner 	}
660b411b363SPhilipp Reisner 
6616850c442SLars Ellenberg 	want = ALIGN(words*sizeof(long), PAGE_SIZE) >> PAGE_SHIFT;
662b411b363SPhilipp Reisner 	have = b->bm_number_of_pages;
663b411b363SPhilipp Reisner 	if (want == have) {
664b411b363SPhilipp Reisner 		D_ASSERT(b->bm_pages != NULL);
665b411b363SPhilipp Reisner 		npages = b->bm_pages;
666b411b363SPhilipp Reisner 	} else {
6670cf9d27eSAndreas Gruenbacher 		if (drbd_insert_fault(mdev, DRBD_FAULT_BM_ALLOC))
668b411b363SPhilipp Reisner 			npages = NULL;
669b411b363SPhilipp Reisner 		else
670b411b363SPhilipp Reisner 			npages = bm_realloc_pages(b, want);
671b411b363SPhilipp Reisner 	}
672b411b363SPhilipp Reisner 
673b411b363SPhilipp Reisner 	if (!npages) {
674b411b363SPhilipp Reisner 		err = -ENOMEM;
675b411b363SPhilipp Reisner 		goto out;
676b411b363SPhilipp Reisner 	}
677b411b363SPhilipp Reisner 
678b411b363SPhilipp Reisner 	spin_lock_irq(&b->bm_lock);
679b411b363SPhilipp Reisner 	opages = b->bm_pages;
680b411b363SPhilipp Reisner 	owords = b->bm_words;
681b411b363SPhilipp Reisner 	obits  = b->bm_bits;
682b411b363SPhilipp Reisner 
683b411b363SPhilipp Reisner 	growing = bits > obits;
6845223671bSPhilipp Reisner 	if (opages && growing && set_new_bits)
685b411b363SPhilipp Reisner 		bm_set_surplus(b);
686b411b363SPhilipp Reisner 
687b411b363SPhilipp Reisner 	b->bm_pages = npages;
688b411b363SPhilipp Reisner 	b->bm_number_of_pages = want;
689b411b363SPhilipp Reisner 	b->bm_bits  = bits;
690b411b363SPhilipp Reisner 	b->bm_words = words;
691b411b363SPhilipp Reisner 	b->bm_dev_capacity = capacity;
692b411b363SPhilipp Reisner 
693b411b363SPhilipp Reisner 	if (growing) {
69402d9a94bSPhilipp Reisner 		if (set_new_bits) {
695b411b363SPhilipp Reisner 			bm_memset(b, owords, 0xff, words-owords);
696b411b363SPhilipp Reisner 			b->bm_set += bits - obits;
69702d9a94bSPhilipp Reisner 		} else
69802d9a94bSPhilipp Reisner 			bm_memset(b, owords, 0x00, words-owords);
69902d9a94bSPhilipp Reisner 
700b411b363SPhilipp Reisner 	}
701b411b363SPhilipp Reisner 
702b411b363SPhilipp Reisner 	if (want < have) {
703b411b363SPhilipp Reisner 		/* implicit: (opages != NULL) && (opages != npages) */
704b411b363SPhilipp Reisner 		bm_free_pages(opages + want, have - want);
705b411b363SPhilipp Reisner 	}
706b411b363SPhilipp Reisner 
707b411b363SPhilipp Reisner 	(void)bm_clear_surplus(b);
708b411b363SPhilipp Reisner 
709b411b363SPhilipp Reisner 	spin_unlock_irq(&b->bm_lock);
710b411b363SPhilipp Reisner 	if (opages != npages)
711b411b363SPhilipp Reisner 		bm_vk_free(opages, opages_vmalloced);
712b411b363SPhilipp Reisner 	if (!growing)
713b411b363SPhilipp Reisner 		b->bm_set = bm_count_bits(b);
71419f843aaSLars Ellenberg 	dev_info(DEV, "resync bitmap: bits=%lu words=%lu pages=%lu\n", bits, words, want);
715b411b363SPhilipp Reisner 
716b411b363SPhilipp Reisner  out:
717b411b363SPhilipp Reisner 	drbd_bm_unlock(mdev);
718b411b363SPhilipp Reisner 	return err;
719b411b363SPhilipp Reisner }
720b411b363SPhilipp Reisner 
721b411b363SPhilipp Reisner /* inherently racy:
722b411b363SPhilipp Reisner  * if not protected by other means, return value may be out of date when
723b411b363SPhilipp Reisner  * leaving this function...
724b411b363SPhilipp Reisner  * we still need to lock it, since it is important that this returns
725b411b363SPhilipp Reisner  * bm_set == 0 precisely.
726b411b363SPhilipp Reisner  *
727b411b363SPhilipp Reisner  * maybe bm_set should be atomic_t ?
728b411b363SPhilipp Reisner  */
7290778286aSPhilipp Reisner unsigned long _drbd_bm_total_weight(struct drbd_conf *mdev)
730b411b363SPhilipp Reisner {
731b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
732b411b363SPhilipp Reisner 	unsigned long s;
733b411b363SPhilipp Reisner 	unsigned long flags;
734b411b363SPhilipp Reisner 
735b411b363SPhilipp Reisner 	ERR_IF(!b) return 0;
736b411b363SPhilipp Reisner 	ERR_IF(!b->bm_pages) return 0;
737b411b363SPhilipp Reisner 
738b411b363SPhilipp Reisner 	spin_lock_irqsave(&b->bm_lock, flags);
739b411b363SPhilipp Reisner 	s = b->bm_set;
740b411b363SPhilipp Reisner 	spin_unlock_irqrestore(&b->bm_lock, flags);
741b411b363SPhilipp Reisner 
742b411b363SPhilipp Reisner 	return s;
743b411b363SPhilipp Reisner }
744b411b363SPhilipp Reisner 
745b411b363SPhilipp Reisner unsigned long drbd_bm_total_weight(struct drbd_conf *mdev)
746b411b363SPhilipp Reisner {
747b411b363SPhilipp Reisner 	unsigned long s;
748b411b363SPhilipp Reisner 	/* if I don't have a disk, I don't know about out-of-sync status */
749b411b363SPhilipp Reisner 	if (!get_ldev_if_state(mdev, D_NEGOTIATING))
750b411b363SPhilipp Reisner 		return 0;
751b411b363SPhilipp Reisner 	s = _drbd_bm_total_weight(mdev);
752b411b363SPhilipp Reisner 	put_ldev(mdev);
753b411b363SPhilipp Reisner 	return s;
754b411b363SPhilipp Reisner }
755b411b363SPhilipp Reisner 
756b411b363SPhilipp Reisner size_t drbd_bm_words(struct drbd_conf *mdev)
757b411b363SPhilipp Reisner {
758b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
759b411b363SPhilipp Reisner 	ERR_IF(!b) return 0;
760b411b363SPhilipp Reisner 	ERR_IF(!b->bm_pages) return 0;
761b411b363SPhilipp Reisner 
762b411b363SPhilipp Reisner 	return b->bm_words;
763b411b363SPhilipp Reisner }
764b411b363SPhilipp Reisner 
765b411b363SPhilipp Reisner unsigned long drbd_bm_bits(struct drbd_conf *mdev)
766b411b363SPhilipp Reisner {
767b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
768b411b363SPhilipp Reisner 	ERR_IF(!b) return 0;
769b411b363SPhilipp Reisner 
770b411b363SPhilipp Reisner 	return b->bm_bits;
771b411b363SPhilipp Reisner }
772b411b363SPhilipp Reisner 
773b411b363SPhilipp Reisner /* merge number words from buffer into the bitmap starting at offset.
774b411b363SPhilipp Reisner  * buffer[i] is expected to be little endian unsigned long.
775b411b363SPhilipp Reisner  * bitmap must be locked by drbd_bm_lock.
776b411b363SPhilipp Reisner  * currently only used from receive_bitmap.
777b411b363SPhilipp Reisner  */
778b411b363SPhilipp Reisner void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset, size_t number,
779b411b363SPhilipp Reisner 			unsigned long *buffer)
780b411b363SPhilipp Reisner {
781b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
782b411b363SPhilipp Reisner 	unsigned long *p_addr, *bm;
783b411b363SPhilipp Reisner 	unsigned long word, bits;
78419f843aaSLars Ellenberg 	unsigned int idx;
785b411b363SPhilipp Reisner 	size_t end, do_now;
786b411b363SPhilipp Reisner 
787b411b363SPhilipp Reisner 	end = offset + number;
788b411b363SPhilipp Reisner 
789b411b363SPhilipp Reisner 	ERR_IF(!b) return;
790b411b363SPhilipp Reisner 	ERR_IF(!b->bm_pages) return;
791b411b363SPhilipp Reisner 	if (number == 0)
792b411b363SPhilipp Reisner 		return;
793b411b363SPhilipp Reisner 	WARN_ON(offset >= b->bm_words);
794b411b363SPhilipp Reisner 	WARN_ON(end    >  b->bm_words);
795b411b363SPhilipp Reisner 
796b411b363SPhilipp Reisner 	spin_lock_irq(&b->bm_lock);
797b411b363SPhilipp Reisner 	while (offset < end) {
798b411b363SPhilipp Reisner 		do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset;
79919f843aaSLars Ellenberg 		idx = bm_word_to_page_idx(b, offset);
80019f843aaSLars Ellenberg 		p_addr = bm_map_pidx(b, idx);
801b411b363SPhilipp Reisner 		bm = p_addr + MLPP(offset);
802b411b363SPhilipp Reisner 		offset += do_now;
803b411b363SPhilipp Reisner 		while (do_now--) {
804b411b363SPhilipp Reisner 			bits = hweight_long(*bm);
80595a0f10cSLars Ellenberg 			word = *bm | *buffer++;
806b411b363SPhilipp Reisner 			*bm++ = word;
807b411b363SPhilipp Reisner 			b->bm_set += hweight_long(word) - bits;
808b411b363SPhilipp Reisner 		}
809b411b363SPhilipp Reisner 		bm_unmap(p_addr);
81019f843aaSLars Ellenberg 		bm_set_page_need_writeout(b->bm_pages[idx]);
811b411b363SPhilipp Reisner 	}
812b411b363SPhilipp Reisner 	/* with 32bit <-> 64bit cross-platform connect
813b411b363SPhilipp Reisner 	 * this is only correct for current usage,
814b411b363SPhilipp Reisner 	 * where we _know_ that we are 64 bit aligned,
815b411b363SPhilipp Reisner 	 * and know that this function is used in this way, too...
816b411b363SPhilipp Reisner 	 */
817b411b363SPhilipp Reisner 	if (end == b->bm_words)
818b411b363SPhilipp Reisner 		b->bm_set -= bm_clear_surplus(b);
819b411b363SPhilipp Reisner 	spin_unlock_irq(&b->bm_lock);
820b411b363SPhilipp Reisner }
821b411b363SPhilipp Reisner 
822b411b363SPhilipp Reisner /* copy number words from the bitmap starting at offset into the buffer.
823b411b363SPhilipp Reisner  * buffer[i] will be little endian unsigned long.
824b411b363SPhilipp Reisner  */
825b411b363SPhilipp Reisner void drbd_bm_get_lel(struct drbd_conf *mdev, size_t offset, size_t number,
826b411b363SPhilipp Reisner 		     unsigned long *buffer)
827b411b363SPhilipp Reisner {
828b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
829b411b363SPhilipp Reisner 	unsigned long *p_addr, *bm;
830b411b363SPhilipp Reisner 	size_t end, do_now;
831b411b363SPhilipp Reisner 
832b411b363SPhilipp Reisner 	end = offset + number;
833b411b363SPhilipp Reisner 
834b411b363SPhilipp Reisner 	ERR_IF(!b) return;
835b411b363SPhilipp Reisner 	ERR_IF(!b->bm_pages) return;
836b411b363SPhilipp Reisner 
837b411b363SPhilipp Reisner 	spin_lock_irq(&b->bm_lock);
838b411b363SPhilipp Reisner 	if ((offset >= b->bm_words) ||
839b411b363SPhilipp Reisner 	    (end    >  b->bm_words) ||
840b411b363SPhilipp Reisner 	    (number <= 0))
841b411b363SPhilipp Reisner 		dev_err(DEV, "offset=%lu number=%lu bm_words=%lu\n",
842b411b363SPhilipp Reisner 			(unsigned long)	offset,
843b411b363SPhilipp Reisner 			(unsigned long)	number,
844b411b363SPhilipp Reisner 			(unsigned long) b->bm_words);
845b411b363SPhilipp Reisner 	else {
846b411b363SPhilipp Reisner 		while (offset < end) {
847b411b363SPhilipp Reisner 			do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset;
84819f843aaSLars Ellenberg 			p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, offset));
849b411b363SPhilipp Reisner 			bm = p_addr + MLPP(offset);
850b411b363SPhilipp Reisner 			offset += do_now;
851b411b363SPhilipp Reisner 			while (do_now--)
85295a0f10cSLars Ellenberg 				*buffer++ = *bm++;
853b411b363SPhilipp Reisner 			bm_unmap(p_addr);
854b411b363SPhilipp Reisner 		}
855b411b363SPhilipp Reisner 	}
856b411b363SPhilipp Reisner 	spin_unlock_irq(&b->bm_lock);
857b411b363SPhilipp Reisner }
858b411b363SPhilipp Reisner 
859b411b363SPhilipp Reisner /* set all bits in the bitmap */
860b411b363SPhilipp Reisner void drbd_bm_set_all(struct drbd_conf *mdev)
861b411b363SPhilipp Reisner {
862b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
863b411b363SPhilipp Reisner 	ERR_IF(!b) return;
864b411b363SPhilipp Reisner 	ERR_IF(!b->bm_pages) return;
865b411b363SPhilipp Reisner 
866b411b363SPhilipp Reisner 	spin_lock_irq(&b->bm_lock);
867b411b363SPhilipp Reisner 	bm_memset(b, 0, 0xff, b->bm_words);
868b411b363SPhilipp Reisner 	(void)bm_clear_surplus(b);
869b411b363SPhilipp Reisner 	b->bm_set = b->bm_bits;
870b411b363SPhilipp Reisner 	spin_unlock_irq(&b->bm_lock);
871b411b363SPhilipp Reisner }
872b411b363SPhilipp Reisner 
873b411b363SPhilipp Reisner /* clear all bits in the bitmap */
874b411b363SPhilipp Reisner void drbd_bm_clear_all(struct drbd_conf *mdev)
875b411b363SPhilipp Reisner {
876b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
877b411b363SPhilipp Reisner 	ERR_IF(!b) return;
878b411b363SPhilipp Reisner 	ERR_IF(!b->bm_pages) return;
879b411b363SPhilipp Reisner 
880b411b363SPhilipp Reisner 	spin_lock_irq(&b->bm_lock);
881b411b363SPhilipp Reisner 	bm_memset(b, 0, 0, b->bm_words);
882b411b363SPhilipp Reisner 	b->bm_set = 0;
883b411b363SPhilipp Reisner 	spin_unlock_irq(&b->bm_lock);
884b411b363SPhilipp Reisner }
885b411b363SPhilipp Reisner 
88619f843aaSLars Ellenberg struct bm_aio_ctx {
88719f843aaSLars Ellenberg 	struct drbd_conf *mdev;
88819f843aaSLars Ellenberg 	atomic_t in_flight;
8899e58c4daSPhilipp Reisner 	unsigned int done;
89019f843aaSLars Ellenberg 	unsigned flags;
89119f843aaSLars Ellenberg #define BM_AIO_COPY_PAGES	1
892*d1aa4d04SPhilipp Reisner #define BM_WRITE_ALL_PAGES	2
89319f843aaSLars Ellenberg 	int error;
894d1f3779bSPhilipp Reisner 	struct kref kref;
89519f843aaSLars Ellenberg };
89619f843aaSLars Ellenberg 
897d1f3779bSPhilipp Reisner static void bm_aio_ctx_destroy(struct kref *kref)
898d1f3779bSPhilipp Reisner {
899d1f3779bSPhilipp Reisner 	struct bm_aio_ctx *ctx = container_of(kref, struct bm_aio_ctx, kref);
900d1f3779bSPhilipp Reisner 
9019e58c4daSPhilipp Reisner 	put_ldev(ctx->mdev);
902d1f3779bSPhilipp Reisner 	kfree(ctx);
903d1f3779bSPhilipp Reisner }
904d1f3779bSPhilipp Reisner 
90519f843aaSLars Ellenberg /* bv_page may be a copy, or may be the original */
906b411b363SPhilipp Reisner static void bm_async_io_complete(struct bio *bio, int error)
907b411b363SPhilipp Reisner {
90819f843aaSLars Ellenberg 	struct bm_aio_ctx *ctx = bio->bi_private;
90919f843aaSLars Ellenberg 	struct drbd_conf *mdev = ctx->mdev;
91019f843aaSLars Ellenberg 	struct drbd_bitmap *b = mdev->bitmap;
91119f843aaSLars Ellenberg 	unsigned int idx = bm_page_to_idx(bio->bi_io_vec[0].bv_page);
912b411b363SPhilipp Reisner 	int uptodate = bio_flagged(bio, BIO_UPTODATE);
913b411b363SPhilipp Reisner 
914b411b363SPhilipp Reisner 
915b411b363SPhilipp Reisner 	/* strange behavior of some lower level drivers...
916b411b363SPhilipp Reisner 	 * fail the request by clearing the uptodate flag,
917b411b363SPhilipp Reisner 	 * but do not return any error?!
918b411b363SPhilipp Reisner 	 * do we want to WARN() on this? */
919b411b363SPhilipp Reisner 	if (!error && !uptodate)
920b411b363SPhilipp Reisner 		error = -EIO;
921b411b363SPhilipp Reisner 
9227648cdfeSLars Ellenberg 	if ((ctx->flags & BM_AIO_COPY_PAGES) == 0 &&
9237648cdfeSLars Ellenberg 	    !bm_test_page_unchanged(b->bm_pages[idx]))
9247648cdfeSLars Ellenberg 		dev_warn(DEV, "bitmap page idx %u changed during IO!\n", idx);
92519f843aaSLars Ellenberg 
926b411b363SPhilipp Reisner 	if (error) {
92719f843aaSLars Ellenberg 		/* ctx error will hold the completed-last non-zero error code,
92819f843aaSLars Ellenberg 		 * in case error codes differ. */
92919f843aaSLars Ellenberg 		ctx->error = error;
93019f843aaSLars Ellenberg 		bm_set_page_io_err(b->bm_pages[idx]);
93119f843aaSLars Ellenberg 		/* Not identical to on disk version of it.
93219f843aaSLars Ellenberg 		 * Is BM_PAGE_IO_ERROR enough? */
93319f843aaSLars Ellenberg 		if (__ratelimit(&drbd_ratelimit_state))
93419f843aaSLars Ellenberg 			dev_err(DEV, "IO ERROR %d on bitmap page idx %u\n",
93519f843aaSLars Ellenberg 					error, idx);
93619f843aaSLars Ellenberg 	} else {
93719f843aaSLars Ellenberg 		bm_clear_page_io_err(b->bm_pages[idx]);
93819f843aaSLars Ellenberg 		dynamic_dev_dbg(DEV, "bitmap page idx %u completed\n", idx);
939b411b363SPhilipp Reisner 	}
94019f843aaSLars Ellenberg 
94119f843aaSLars Ellenberg 	bm_page_unlock_io(mdev, idx);
94219f843aaSLars Ellenberg 
94319f843aaSLars Ellenberg 	if (ctx->flags & BM_AIO_COPY_PAGES)
9444d95a10fSLars Ellenberg 		mempool_free(bio->bi_io_vec[0].bv_page, drbd_md_io_page_pool);
945b411b363SPhilipp Reisner 
946b411b363SPhilipp Reisner 	bio_put(bio);
94719f843aaSLars Ellenberg 
948d1f3779bSPhilipp Reisner 	if (atomic_dec_and_test(&ctx->in_flight)) {
9499e58c4daSPhilipp Reisner 		ctx->done = 1;
9509e58c4daSPhilipp Reisner 		wake_up(&mdev->misc_wait);
951d1f3779bSPhilipp Reisner 		kref_put(&ctx->kref, &bm_aio_ctx_destroy);
952d1f3779bSPhilipp Reisner 	}
953b411b363SPhilipp Reisner }
954b411b363SPhilipp Reisner 
95519f843aaSLars Ellenberg static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must_hold(local)
956b411b363SPhilipp Reisner {
9579476f39dSLars Ellenberg 	struct bio *bio = bio_alloc_drbd(GFP_NOIO);
95819f843aaSLars Ellenberg 	struct drbd_conf *mdev = ctx->mdev;
95919f843aaSLars Ellenberg 	struct drbd_bitmap *b = mdev->bitmap;
96019f843aaSLars Ellenberg 	struct page *page;
961b411b363SPhilipp Reisner 	unsigned int len;
96219f843aaSLars Ellenberg 
963b411b363SPhilipp Reisner 	sector_t on_disk_sector =
964b411b363SPhilipp Reisner 		mdev->ldev->md.md_offset + mdev->ldev->md.bm_offset;
965b411b363SPhilipp Reisner 	on_disk_sector += ((sector_t)page_nr) << (PAGE_SHIFT-9);
966b411b363SPhilipp Reisner 
967b411b363SPhilipp Reisner 	/* this might happen with very small
96819f843aaSLars Ellenberg 	 * flexible external meta data device,
96919f843aaSLars Ellenberg 	 * or with PAGE_SIZE > 4k */
970b411b363SPhilipp Reisner 	len = min_t(unsigned int, PAGE_SIZE,
971b411b363SPhilipp Reisner 		(drbd_md_last_sector(mdev->ldev) - on_disk_sector + 1)<<9);
972b411b363SPhilipp Reisner 
97319f843aaSLars Ellenberg 	/* serialize IO on this page */
97419f843aaSLars Ellenberg 	bm_page_lock_io(mdev, page_nr);
97519f843aaSLars Ellenberg 	/* before memcpy and submit,
97619f843aaSLars Ellenberg 	 * so it can be redirtied any time */
97719f843aaSLars Ellenberg 	bm_set_page_unchanged(b->bm_pages[page_nr]);
97819f843aaSLars Ellenberg 
97919f843aaSLars Ellenberg 	if (ctx->flags & BM_AIO_COPY_PAGES) {
98019f843aaSLars Ellenberg 		void *src, *dest;
9814d95a10fSLars Ellenberg 		page = mempool_alloc(drbd_md_io_page_pool, __GFP_HIGHMEM|__GFP_WAIT);
982589973a7SCong Wang 		dest = kmap_atomic(page);
983589973a7SCong Wang 		src = kmap_atomic(b->bm_pages[page_nr]);
98419f843aaSLars Ellenberg 		memcpy(dest, src, PAGE_SIZE);
985589973a7SCong Wang 		kunmap_atomic(src);
986589973a7SCong Wang 		kunmap_atomic(dest);
98719f843aaSLars Ellenberg 		bm_store_page_idx(page, page_nr);
98819f843aaSLars Ellenberg 	} else
98919f843aaSLars Ellenberg 		page = b->bm_pages[page_nr];
99019f843aaSLars Ellenberg 
991b411b363SPhilipp Reisner 	bio->bi_bdev = mdev->ldev->md_bdev;
992b411b363SPhilipp Reisner 	bio->bi_sector = on_disk_sector;
9934d95a10fSLars Ellenberg 	/* bio_add_page of a single page to an empty bio will always succeed,
9944d95a10fSLars Ellenberg 	 * according to api.  Do we want to assert that? */
99519f843aaSLars Ellenberg 	bio_add_page(bio, page, len, 0);
99619f843aaSLars Ellenberg 	bio->bi_private = ctx;
997b411b363SPhilipp Reisner 	bio->bi_end_io = bm_async_io_complete;
998b411b363SPhilipp Reisner 
9990cf9d27eSAndreas Gruenbacher 	if (drbd_insert_fault(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) {
1000b411b363SPhilipp Reisner 		bio->bi_rw |= rw;
1001b411b363SPhilipp Reisner 		bio_endio(bio, -EIO);
1002b411b363SPhilipp Reisner 	} else {
1003b411b363SPhilipp Reisner 		submit_bio(rw, bio);
10045a8b4242SLars Ellenberg 		/* this should not count as user activity and cause the
10055a8b4242SLars Ellenberg 		 * resync to throttle -- see drbd_rs_should_slow_down(). */
10065a8b4242SLars Ellenberg 		atomic_add(len >> 9, &mdev->rs_sect_ev);
1007b411b363SPhilipp Reisner 	}
1008b411b363SPhilipp Reisner }
1009b411b363SPhilipp Reisner 
1010b411b363SPhilipp Reisner /*
1011b411b363SPhilipp Reisner  * bm_rw: read/write the whole bitmap from/to its on disk location.
1012b411b363SPhilipp Reisner  */
10130e8488adSLars Ellenberg static int bm_rw(struct drbd_conf *mdev, int rw, unsigned flags, unsigned lazy_writeout_upper_idx) __must_hold(local)
1014b411b363SPhilipp Reisner {
1015d1f3779bSPhilipp Reisner 	struct bm_aio_ctx *ctx;
1016b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
10176850c442SLars Ellenberg 	int num_pages, i, count = 0;
1018b411b363SPhilipp Reisner 	unsigned long now;
1019b411b363SPhilipp Reisner 	char ppb[10];
1020b411b363SPhilipp Reisner 	int err = 0;
1021b411b363SPhilipp Reisner 
102219f843aaSLars Ellenberg 	/*
102319f843aaSLars Ellenberg 	 * We are protected against bitmap disappearing/resizing by holding an
102419f843aaSLars Ellenberg 	 * ldev reference (caller must have called get_ldev()).
102519f843aaSLars Ellenberg 	 * For read/write, we are protected against changes to the bitmap by
102619f843aaSLars Ellenberg 	 * the bitmap lock (see drbd_bitmap_io).
102719f843aaSLars Ellenberg 	 * For lazy writeout, we don't care for ongoing changes to the bitmap,
102819f843aaSLars Ellenberg 	 * as we submit copies of pages anyways.
102919f843aaSLars Ellenberg 	 */
1030d1f3779bSPhilipp Reisner 
103122f46ce2SLars Ellenberg 	ctx = kmalloc(sizeof(struct bm_aio_ctx), GFP_NOIO);
1032d1f3779bSPhilipp Reisner 	if (!ctx)
1033d1f3779bSPhilipp Reisner 		return -ENOMEM;
1034d1f3779bSPhilipp Reisner 
1035d1f3779bSPhilipp Reisner 	*ctx = (struct bm_aio_ctx) {
1036d1f3779bSPhilipp Reisner 		.mdev = mdev,
1037d1f3779bSPhilipp Reisner 		.in_flight = ATOMIC_INIT(1),
10389e58c4daSPhilipp Reisner 		.done = 0,
10390e8488adSLars Ellenberg 		.flags = flags,
1040d1f3779bSPhilipp Reisner 		.error = 0,
1041d1f3779bSPhilipp Reisner 		.kref = { ATOMIC_INIT(2) },
1042d1f3779bSPhilipp Reisner 	};
1043d1f3779bSPhilipp Reisner 
10449e58c4daSPhilipp Reisner 	if (!get_ldev_if_state(mdev, D_ATTACHING)) {  /* put is in bm_aio_ctx_destroy() */
10459e58c4daSPhilipp Reisner 		dev_err(DEV, "ASSERT FAILED: get_ldev_if_state() == 1 in bm_rw()\n");
10469e58c4daSPhilipp Reisner 		kfree(ctx);
10479e58c4daSPhilipp Reisner 		return -ENODEV;
10489e58c4daSPhilipp Reisner 	}
10499e58c4daSPhilipp Reisner 
1050d1f3779bSPhilipp Reisner 	if (!ctx->flags)
105120ceb2b2SLars Ellenberg 		WARN_ON(!(BM_LOCKED_MASK & b->bm_flags));
1052b411b363SPhilipp Reisner 
10536850c442SLars Ellenberg 	num_pages = b->bm_number_of_pages;
1054b411b363SPhilipp Reisner 
1055b411b363SPhilipp Reisner 	now = jiffies;
1056b411b363SPhilipp Reisner 
1057b411b363SPhilipp Reisner 	/* let the layers below us try to merge these bios... */
10586850c442SLars Ellenberg 	for (i = 0; i < num_pages; i++) {
105919f843aaSLars Ellenberg 		/* ignore completely unchanged pages */
106019f843aaSLars Ellenberg 		if (lazy_writeout_upper_idx && i == lazy_writeout_upper_idx)
106119f843aaSLars Ellenberg 			break;
106219f843aaSLars Ellenberg 		if (rw & WRITE) {
1063*d1aa4d04SPhilipp Reisner 			if (!(flags & BM_WRITE_ALL_PAGES) &&
1064*d1aa4d04SPhilipp Reisner 			    bm_test_page_unchanged(b->bm_pages[i])) {
106519f843aaSLars Ellenberg 				dynamic_dev_dbg(DEV, "skipped bm write for idx %u\n", i);
106619f843aaSLars Ellenberg 				continue;
106719f843aaSLars Ellenberg 			}
106819f843aaSLars Ellenberg 			/* during lazy writeout,
106919f843aaSLars Ellenberg 			 * ignore those pages not marked for lazy writeout. */
107019f843aaSLars Ellenberg 			if (lazy_writeout_upper_idx &&
107119f843aaSLars Ellenberg 			    !bm_test_page_lazy_writeout(b->bm_pages[i])) {
107219f843aaSLars Ellenberg 				dynamic_dev_dbg(DEV, "skipped bm lazy write for idx %u\n", i);
107319f843aaSLars Ellenberg 				continue;
107419f843aaSLars Ellenberg 			}
107519f843aaSLars Ellenberg 		}
1076d1f3779bSPhilipp Reisner 		atomic_inc(&ctx->in_flight);
1077d1f3779bSPhilipp Reisner 		bm_page_io_async(ctx, i, rw);
107819f843aaSLars Ellenberg 		++count;
107919f843aaSLars Ellenberg 		cond_resched();
108019f843aaSLars Ellenberg 	}
1081b411b363SPhilipp Reisner 
1082725a97e4SLars Ellenberg 	/*
1083d1f3779bSPhilipp Reisner 	 * We initialize ctx->in_flight to one to make sure bm_async_io_complete
10849e58c4daSPhilipp Reisner 	 * will not set ctx->done early, and decrement / test it here.  If there
1085725a97e4SLars Ellenberg 	 * are still some bios in flight, we need to wait for them here.
10869e58c4daSPhilipp Reisner 	 * If all IO is done already (or nothing had been submitted), there is
10879e58c4daSPhilipp Reisner 	 * no need to wait.  Still, we need to put the kref associated with the
10889e58c4daSPhilipp Reisner 	 * "in_flight reached zero, all done" event.
1089725a97e4SLars Ellenberg 	 */
1090d1f3779bSPhilipp Reisner 	if (!atomic_dec_and_test(&ctx->in_flight))
10917caacb69SPhilipp Reisner 		wait_until_done_or_disk_failure(mdev, mdev->ldev, &ctx->done);
10929e58c4daSPhilipp Reisner 	else
10939e58c4daSPhilipp Reisner 		kref_put(&ctx->kref, &bm_aio_ctx_destroy);
1094d1f3779bSPhilipp Reisner 
109519f843aaSLars Ellenberg 	dev_info(DEV, "bitmap %s of %u pages took %lu jiffies\n",
109619f843aaSLars Ellenberg 			rw == WRITE ? "WRITE" : "READ",
109719f843aaSLars Ellenberg 			count, jiffies - now);
1098b411b363SPhilipp Reisner 
1099d1f3779bSPhilipp Reisner 	if (ctx->error) {
1100b411b363SPhilipp Reisner 		dev_alert(DEV, "we had at least one MD IO ERROR during bitmap IO\n");
1101383606e0SLars Ellenberg 		drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
1102d1f3779bSPhilipp Reisner 		err = -EIO; /* ctx->error ? */
1103b411b363SPhilipp Reisner 	}
1104b411b363SPhilipp Reisner 
11059e58c4daSPhilipp Reisner 	if (atomic_read(&ctx->in_flight))
11069e58c4daSPhilipp Reisner 		err = -EIO; /* Disk failed during IO... */
11079e58c4daSPhilipp Reisner 
1108b411b363SPhilipp Reisner 	now = jiffies;
1109b411b363SPhilipp Reisner 	if (rw == WRITE) {
1110b411b363SPhilipp Reisner 		drbd_md_flush(mdev);
1111b411b363SPhilipp Reisner 	} else /* rw == READ */ {
111295a0f10cSLars Ellenberg 		b->bm_set = bm_count_bits(b);
1113b411b363SPhilipp Reisner 		dev_info(DEV, "recounting of set bits took additional %lu jiffies\n",
1114b411b363SPhilipp Reisner 		     jiffies - now);
1115b411b363SPhilipp Reisner 	}
1116b411b363SPhilipp Reisner 	now = b->bm_set;
1117b411b363SPhilipp Reisner 
1118b411b363SPhilipp Reisner 	dev_info(DEV, "%s (%lu bits) marked out-of-sync by on disk bit-map.\n",
1119b411b363SPhilipp Reisner 	     ppsize(ppb, now << (BM_BLOCK_SHIFT-10)), now);
1120b411b363SPhilipp Reisner 
1121d1f3779bSPhilipp Reisner 	kref_put(&ctx->kref, &bm_aio_ctx_destroy);
1122b411b363SPhilipp Reisner 	return err;
1123b411b363SPhilipp Reisner }
1124b411b363SPhilipp Reisner 
1125b411b363SPhilipp Reisner /**
1126b411b363SPhilipp Reisner  * drbd_bm_read() - Read the whole bitmap from its on disk location.
1127b411b363SPhilipp Reisner  * @mdev:	DRBD device.
1128b411b363SPhilipp Reisner  */
1129b411b363SPhilipp Reisner int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local)
1130b411b363SPhilipp Reisner {
11310e8488adSLars Ellenberg 	return bm_rw(mdev, READ, 0, 0);
1132b411b363SPhilipp Reisner }
1133b411b363SPhilipp Reisner 
1134b411b363SPhilipp Reisner /**
1135b411b363SPhilipp Reisner  * drbd_bm_write() - Write the whole bitmap to its on disk location.
1136b411b363SPhilipp Reisner  * @mdev:	DRBD device.
113719f843aaSLars Ellenberg  *
113819f843aaSLars Ellenberg  * Will only write pages that have changed since last IO.
1139b411b363SPhilipp Reisner  */
1140b411b363SPhilipp Reisner int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local)
1141b411b363SPhilipp Reisner {
11420e8488adSLars Ellenberg 	return bm_rw(mdev, WRITE, 0, 0);
1143b411b363SPhilipp Reisner }
1144b411b363SPhilipp Reisner 
1145b411b363SPhilipp Reisner /**
1146*d1aa4d04SPhilipp Reisner  * drbd_bm_write_all() - Write the whole bitmap to its on disk location.
1147*d1aa4d04SPhilipp Reisner  * @mdev:	DRBD device.
1148*d1aa4d04SPhilipp Reisner  *
1149*d1aa4d04SPhilipp Reisner  * Will write all pages.
1150*d1aa4d04SPhilipp Reisner  */
1151*d1aa4d04SPhilipp Reisner int drbd_bm_write_all(struct drbd_conf *mdev) __must_hold(local)
1152*d1aa4d04SPhilipp Reisner {
1153*d1aa4d04SPhilipp Reisner 	return bm_rw(mdev, WRITE, BM_WRITE_ALL_PAGES, 0);
1154*d1aa4d04SPhilipp Reisner }
1155*d1aa4d04SPhilipp Reisner 
1156*d1aa4d04SPhilipp Reisner /**
115719f843aaSLars Ellenberg  * drbd_bm_lazy_write_out() - Write bitmap pages 0 to @upper_idx-1, if they have changed.
1158b411b363SPhilipp Reisner  * @mdev:	DRBD device.
115919f843aaSLars Ellenberg  * @upper_idx:	0: write all changed pages; +ve: page index to stop scanning for changed pages
1160b411b363SPhilipp Reisner  */
116119f843aaSLars Ellenberg int drbd_bm_write_lazy(struct drbd_conf *mdev, unsigned upper_idx) __must_hold(local)
1162b411b363SPhilipp Reisner {
11630e8488adSLars Ellenberg 	return bm_rw(mdev, WRITE, BM_AIO_COPY_PAGES, upper_idx);
11640e8488adSLars Ellenberg }
11650e8488adSLars Ellenberg 
11660e8488adSLars Ellenberg /**
11670e8488adSLars Ellenberg  * drbd_bm_write_copy_pages() - Write the whole bitmap to its on disk location.
11680e8488adSLars Ellenberg  * @mdev:	DRBD device.
11690e8488adSLars Ellenberg  *
11700e8488adSLars Ellenberg  * Will only write pages that have changed since last IO.
11710e8488adSLars Ellenberg  * In contrast to drbd_bm_write(), this will copy the bitmap pages
11720e8488adSLars Ellenberg  * to temporary writeout pages. It is intended to trigger a full write-out
11730e8488adSLars Ellenberg  * while still allowing the bitmap to change, for example if a resync or online
11740e8488adSLars Ellenberg  * verify is aborted due to a failed peer disk, while local IO continues, or
11750e8488adSLars Ellenberg  * pending resync acks are still being processed.
11760e8488adSLars Ellenberg  */
11770e8488adSLars Ellenberg int drbd_bm_write_copy_pages(struct drbd_conf *mdev) __must_hold(local)
11780e8488adSLars Ellenberg {
11790e8488adSLars Ellenberg 	return bm_rw(mdev, WRITE, BM_AIO_COPY_PAGES, 0);
1180b411b363SPhilipp Reisner }
118119f843aaSLars Ellenberg 
118219f843aaSLars Ellenberg 
118319f843aaSLars Ellenberg /**
118419f843aaSLars Ellenberg  * drbd_bm_write_page: Writes a PAGE_SIZE aligned piece of bitmap
118519f843aaSLars Ellenberg  * @mdev:	DRBD device.
118619f843aaSLars Ellenberg  * @idx:	bitmap page index
118719f843aaSLars Ellenberg  *
11884b0715f0SLars Ellenberg  * We don't want to special case on logical_block_size of the backend device,
11894b0715f0SLars Ellenberg  * so we submit PAGE_SIZE aligned pieces.
119019f843aaSLars Ellenberg  * Note that on "most" systems, PAGE_SIZE is 4k.
11914b0715f0SLars Ellenberg  *
11924b0715f0SLars Ellenberg  * In case this becomes an issue on systems with larger PAGE_SIZE,
11934b0715f0SLars Ellenberg  * we may want to change this again to write 4k aligned 4k pieces.
119419f843aaSLars Ellenberg  */
119519f843aaSLars Ellenberg int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local)
119619f843aaSLars Ellenberg {
1197d1f3779bSPhilipp Reisner 	struct bm_aio_ctx *ctx;
1198d1f3779bSPhilipp Reisner 	int err;
119919f843aaSLars Ellenberg 
120019f843aaSLars Ellenberg 	if (bm_test_page_unchanged(mdev->bitmap->bm_pages[idx])) {
12017648cdfeSLars Ellenberg 		dynamic_dev_dbg(DEV, "skipped bm page write for idx %u\n", idx);
120219f843aaSLars Ellenberg 		return 0;
120319f843aaSLars Ellenberg 	}
120419f843aaSLars Ellenberg 
120522f46ce2SLars Ellenberg 	ctx = kmalloc(sizeof(struct bm_aio_ctx), GFP_NOIO);
1206d1f3779bSPhilipp Reisner 	if (!ctx)
1207d1f3779bSPhilipp Reisner 		return -ENOMEM;
120819f843aaSLars Ellenberg 
1209d1f3779bSPhilipp Reisner 	*ctx = (struct bm_aio_ctx) {
1210d1f3779bSPhilipp Reisner 		.mdev = mdev,
1211d1f3779bSPhilipp Reisner 		.in_flight = ATOMIC_INIT(1),
12129e58c4daSPhilipp Reisner 		.done = 0,
1213d1f3779bSPhilipp Reisner 		.flags = BM_AIO_COPY_PAGES,
1214d1f3779bSPhilipp Reisner 		.error = 0,
1215d1f3779bSPhilipp Reisner 		.kref = { ATOMIC_INIT(2) },
1216d1f3779bSPhilipp Reisner 	};
1217d1f3779bSPhilipp Reisner 
12189e58c4daSPhilipp Reisner 	if (!get_ldev_if_state(mdev, D_ATTACHING)) {  /* put is in bm_aio_ctx_destroy() */
12199e58c4daSPhilipp Reisner 		dev_err(DEV, "ASSERT FAILED: get_ldev_if_state() == 1 in drbd_bm_write_page()\n");
12209e58c4daSPhilipp Reisner 		kfree(ctx);
12219e58c4daSPhilipp Reisner 		return -ENODEV;
12229e58c4daSPhilipp Reisner 	}
12239e58c4daSPhilipp Reisner 
1224d1f3779bSPhilipp Reisner 	bm_page_io_async(ctx, idx, WRITE_SYNC);
12257caacb69SPhilipp Reisner 	wait_until_done_or_disk_failure(mdev, mdev->ldev, &ctx->done);
1226d1f3779bSPhilipp Reisner 
1227d1f3779bSPhilipp Reisner 	if (ctx->error)
1228383606e0SLars Ellenberg 		drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
122919f843aaSLars Ellenberg 		/* that should force detach, so the in memory bitmap will be
123019f843aaSLars Ellenberg 		 * gone in a moment as well. */
123119f843aaSLars Ellenberg 
1232b411b363SPhilipp Reisner 	mdev->bm_writ_cnt++;
12339e58c4daSPhilipp Reisner 	err = atomic_read(&ctx->in_flight) ? -EIO : ctx->error;
1234d1f3779bSPhilipp Reisner 	kref_put(&ctx->kref, &bm_aio_ctx_destroy);
1235d1f3779bSPhilipp Reisner 	return err;
1236b411b363SPhilipp Reisner }
1237b411b363SPhilipp Reisner 
1238b411b363SPhilipp Reisner /* NOTE
1239b411b363SPhilipp Reisner  * find_first_bit returns int, we return unsigned long.
12404b0715f0SLars Ellenberg  * For this to work on 32bit arch with bitnumbers > (1<<32),
12414b0715f0SLars Ellenberg  * we'd need to return u64, and get a whole lot of other places
12424b0715f0SLars Ellenberg  * fixed where we still use unsigned long.
1243b411b363SPhilipp Reisner  *
1244b411b363SPhilipp Reisner  * this returns a bit number, NOT a sector!
1245b411b363SPhilipp Reisner  */
1246b411b363SPhilipp Reisner static unsigned long __bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo,
1247cfd8005cSCong Wang 	const int find_zero_bit)
1248b411b363SPhilipp Reisner {
1249b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
1250b411b363SPhilipp Reisner 	unsigned long *p_addr;
12514b0715f0SLars Ellenberg 	unsigned long bit_offset;
12524b0715f0SLars Ellenberg 	unsigned i;
12534b0715f0SLars Ellenberg 
1254b411b363SPhilipp Reisner 
1255b411b363SPhilipp Reisner 	if (bm_fo > b->bm_bits) {
1256b411b363SPhilipp Reisner 		dev_err(DEV, "bm_fo=%lu bm_bits=%lu\n", bm_fo, b->bm_bits);
12574b0715f0SLars Ellenberg 		bm_fo = DRBD_END_OF_BITMAP;
1258b411b363SPhilipp Reisner 	} else {
1259b411b363SPhilipp Reisner 		while (bm_fo < b->bm_bits) {
126019f843aaSLars Ellenberg 			/* bit offset of the first bit in the page */
12614b0715f0SLars Ellenberg 			bit_offset = bm_fo & ~BITS_PER_PAGE_MASK;
1262cfd8005cSCong Wang 			p_addr = __bm_map_pidx(b, bm_bit_to_page_idx(b, bm_fo));
1263b411b363SPhilipp Reisner 
1264b411b363SPhilipp Reisner 			if (find_zero_bit)
12657e599e6eSLinus Torvalds 				i = find_next_zero_bit_le(p_addr,
12664b0715f0SLars Ellenberg 						PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK);
1267b411b363SPhilipp Reisner 			else
12687e599e6eSLinus Torvalds 				i = find_next_bit_le(p_addr,
12694b0715f0SLars Ellenberg 						PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK);
1270b411b363SPhilipp Reisner 
1271cfd8005cSCong Wang 			__bm_unmap(p_addr);
1272b411b363SPhilipp Reisner 			if (i < PAGE_SIZE*8) {
12734b0715f0SLars Ellenberg 				bm_fo = bit_offset + i;
12744b0715f0SLars Ellenberg 				if (bm_fo >= b->bm_bits)
1275b411b363SPhilipp Reisner 					break;
1276b411b363SPhilipp Reisner 				goto found;
1277b411b363SPhilipp Reisner 			}
1278b411b363SPhilipp Reisner 			bm_fo = bit_offset + PAGE_SIZE*8;
1279b411b363SPhilipp Reisner 		}
12804b0715f0SLars Ellenberg 		bm_fo = DRBD_END_OF_BITMAP;
1281b411b363SPhilipp Reisner 	}
1282b411b363SPhilipp Reisner  found:
12834b0715f0SLars Ellenberg 	return bm_fo;
1284b411b363SPhilipp Reisner }
1285b411b363SPhilipp Reisner 
1286b411b363SPhilipp Reisner static unsigned long bm_find_next(struct drbd_conf *mdev,
1287b411b363SPhilipp Reisner 	unsigned long bm_fo, const int find_zero_bit)
1288b411b363SPhilipp Reisner {
1289b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
12904b0715f0SLars Ellenberg 	unsigned long i = DRBD_END_OF_BITMAP;
1291b411b363SPhilipp Reisner 
1292b411b363SPhilipp Reisner 	ERR_IF(!b) return i;
1293b411b363SPhilipp Reisner 	ERR_IF(!b->bm_pages) return i;
1294b411b363SPhilipp Reisner 
1295b411b363SPhilipp Reisner 	spin_lock_irq(&b->bm_lock);
129620ceb2b2SLars Ellenberg 	if (BM_DONT_TEST & b->bm_flags)
1297b411b363SPhilipp Reisner 		bm_print_lock_info(mdev);
1298b411b363SPhilipp Reisner 
1299cfd8005cSCong Wang 	i = __bm_find_next(mdev, bm_fo, find_zero_bit);
1300b411b363SPhilipp Reisner 
1301b411b363SPhilipp Reisner 	spin_unlock_irq(&b->bm_lock);
1302b411b363SPhilipp Reisner 	return i;
1303b411b363SPhilipp Reisner }
1304b411b363SPhilipp Reisner 
1305b411b363SPhilipp Reisner unsigned long drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo)
1306b411b363SPhilipp Reisner {
1307b411b363SPhilipp Reisner 	return bm_find_next(mdev, bm_fo, 0);
1308b411b363SPhilipp Reisner }
1309b411b363SPhilipp Reisner 
1310b411b363SPhilipp Reisner #if 0
1311b411b363SPhilipp Reisner /* not yet needed for anything. */
1312b411b363SPhilipp Reisner unsigned long drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo)
1313b411b363SPhilipp Reisner {
1314b411b363SPhilipp Reisner 	return bm_find_next(mdev, bm_fo, 1);
1315b411b363SPhilipp Reisner }
1316b411b363SPhilipp Reisner #endif
1317b411b363SPhilipp Reisner 
1318b411b363SPhilipp Reisner /* does not spin_lock_irqsave.
1319b411b363SPhilipp Reisner  * you must take drbd_bm_lock() first */
1320b411b363SPhilipp Reisner unsigned long _drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo)
1321b411b363SPhilipp Reisner {
132220ceb2b2SLars Ellenberg 	/* WARN_ON(!(BM_DONT_SET & mdev->b->bm_flags)); */
1323cfd8005cSCong Wang 	return __bm_find_next(mdev, bm_fo, 0);
1324b411b363SPhilipp Reisner }
1325b411b363SPhilipp Reisner 
1326b411b363SPhilipp Reisner unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo)
1327b411b363SPhilipp Reisner {
132820ceb2b2SLars Ellenberg 	/* WARN_ON(!(BM_DONT_SET & mdev->b->bm_flags)); */
1329cfd8005cSCong Wang 	return __bm_find_next(mdev, bm_fo, 1);
1330b411b363SPhilipp Reisner }
1331b411b363SPhilipp Reisner 
1332b411b363SPhilipp Reisner /* returns number of bits actually changed.
1333b411b363SPhilipp Reisner  * for val != 0, we change 0 -> 1, return code positive
1334b411b363SPhilipp Reisner  * for val == 0, we change 1 -> 0, return code negative
1335b411b363SPhilipp Reisner  * wants bitnr, not sector.
1336b411b363SPhilipp Reisner  * expected to be called for only a few bits (e - s about BITS_PER_LONG).
1337b411b363SPhilipp Reisner  * Must hold bitmap lock already. */
1338b4ee79daSPhilipp Reisner static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
1339829c6087SLars Ellenberg 	unsigned long e, int val)
1340b411b363SPhilipp Reisner {
1341b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
1342b411b363SPhilipp Reisner 	unsigned long *p_addr = NULL;
1343b411b363SPhilipp Reisner 	unsigned long bitnr;
134419f843aaSLars Ellenberg 	unsigned int last_page_nr = -1U;
1345b411b363SPhilipp Reisner 	int c = 0;
134619f843aaSLars Ellenberg 	int changed_total = 0;
1347b411b363SPhilipp Reisner 
1348b411b363SPhilipp Reisner 	if (e >= b->bm_bits) {
1349b411b363SPhilipp Reisner 		dev_err(DEV, "ASSERT FAILED: bit_s=%lu bit_e=%lu bm_bits=%lu\n",
1350b411b363SPhilipp Reisner 				s, e, b->bm_bits);
1351b411b363SPhilipp Reisner 		e = b->bm_bits ? b->bm_bits -1 : 0;
1352b411b363SPhilipp Reisner 	}
1353b411b363SPhilipp Reisner 	for (bitnr = s; bitnr <= e; bitnr++) {
135419f843aaSLars Ellenberg 		unsigned int page_nr = bm_bit_to_page_idx(b, bitnr);
1355b411b363SPhilipp Reisner 		if (page_nr != last_page_nr) {
1356b411b363SPhilipp Reisner 			if (p_addr)
1357cfd8005cSCong Wang 				__bm_unmap(p_addr);
135819f843aaSLars Ellenberg 			if (c < 0)
135919f843aaSLars Ellenberg 				bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]);
136019f843aaSLars Ellenberg 			else if (c > 0)
136119f843aaSLars Ellenberg 				bm_set_page_need_writeout(b->bm_pages[last_page_nr]);
136219f843aaSLars Ellenberg 			changed_total += c;
136319f843aaSLars Ellenberg 			c = 0;
1364cfd8005cSCong Wang 			p_addr = __bm_map_pidx(b, page_nr);
1365b411b363SPhilipp Reisner 			last_page_nr = page_nr;
1366b411b363SPhilipp Reisner 		}
1367b411b363SPhilipp Reisner 		if (val)
13687e599e6eSLinus Torvalds 			c += (0 == __test_and_set_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr));
1369b411b363SPhilipp Reisner 		else
13707e599e6eSLinus Torvalds 			c -= (0 != __test_and_clear_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr));
1371b411b363SPhilipp Reisner 	}
1372b411b363SPhilipp Reisner 	if (p_addr)
1373cfd8005cSCong Wang 		__bm_unmap(p_addr);
137419f843aaSLars Ellenberg 	if (c < 0)
137519f843aaSLars Ellenberg 		bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]);
137619f843aaSLars Ellenberg 	else if (c > 0)
137719f843aaSLars Ellenberg 		bm_set_page_need_writeout(b->bm_pages[last_page_nr]);
137819f843aaSLars Ellenberg 	changed_total += c;
137919f843aaSLars Ellenberg 	b->bm_set += changed_total;
138019f843aaSLars Ellenberg 	return changed_total;
1381b411b363SPhilipp Reisner }
1382b411b363SPhilipp Reisner 
1383b411b363SPhilipp Reisner /* returns number of bits actually changed.
1384b411b363SPhilipp Reisner  * for val != 0, we change 0 -> 1, return code positive
1385b411b363SPhilipp Reisner  * for val == 0, we change 1 -> 0, return code negative
1386b411b363SPhilipp Reisner  * wants bitnr, not sector */
1387b4ee79daSPhilipp Reisner static int bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
1388b411b363SPhilipp Reisner 	const unsigned long e, int val)
1389b411b363SPhilipp Reisner {
1390b411b363SPhilipp Reisner 	unsigned long flags;
1391b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
1392b411b363SPhilipp Reisner 	int c = 0;
1393b411b363SPhilipp Reisner 
1394b411b363SPhilipp Reisner 	ERR_IF(!b) return 1;
1395b411b363SPhilipp Reisner 	ERR_IF(!b->bm_pages) return 0;
1396b411b363SPhilipp Reisner 
1397b411b363SPhilipp Reisner 	spin_lock_irqsave(&b->bm_lock, flags);
139820ceb2b2SLars Ellenberg 	if ((val ? BM_DONT_SET : BM_DONT_CLEAR) & b->bm_flags)
1399b411b363SPhilipp Reisner 		bm_print_lock_info(mdev);
1400b411b363SPhilipp Reisner 
1401829c6087SLars Ellenberg 	c = __bm_change_bits_to(mdev, s, e, val);
1402b411b363SPhilipp Reisner 
1403b411b363SPhilipp Reisner 	spin_unlock_irqrestore(&b->bm_lock, flags);
1404b411b363SPhilipp Reisner 	return c;
1405b411b363SPhilipp Reisner }
1406b411b363SPhilipp Reisner 
1407b411b363SPhilipp Reisner /* returns number of bits changed 0 -> 1 */
1408b411b363SPhilipp Reisner int drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e)
1409b411b363SPhilipp Reisner {
1410b411b363SPhilipp Reisner 	return bm_change_bits_to(mdev, s, e, 1);
1411b411b363SPhilipp Reisner }
1412b411b363SPhilipp Reisner 
1413b411b363SPhilipp Reisner /* returns number of bits changed 1 -> 0 */
1414b411b363SPhilipp Reisner int drbd_bm_clear_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e)
1415b411b363SPhilipp Reisner {
1416b411b363SPhilipp Reisner 	return -bm_change_bits_to(mdev, s, e, 0);
1417b411b363SPhilipp Reisner }
1418b411b363SPhilipp Reisner 
1419b411b363SPhilipp Reisner /* sets all bits in full words,
1420b411b363SPhilipp Reisner  * from first_word up to, but not including, last_word */
1421b411b363SPhilipp Reisner static inline void bm_set_full_words_within_one_page(struct drbd_bitmap *b,
1422b411b363SPhilipp Reisner 		int page_nr, int first_word, int last_word)
1423b411b363SPhilipp Reisner {
1424b411b363SPhilipp Reisner 	int i;
1425b411b363SPhilipp Reisner 	int bits;
1426cfd8005cSCong Wang 	unsigned long *paddr = kmap_atomic(b->bm_pages[page_nr]);
1427b411b363SPhilipp Reisner 	for (i = first_word; i < last_word; i++) {
1428b411b363SPhilipp Reisner 		bits = hweight_long(paddr[i]);
1429b411b363SPhilipp Reisner 		paddr[i] = ~0UL;
1430b411b363SPhilipp Reisner 		b->bm_set += BITS_PER_LONG - bits;
1431b411b363SPhilipp Reisner 	}
1432cfd8005cSCong Wang 	kunmap_atomic(paddr);
1433b411b363SPhilipp Reisner }
1434b411b363SPhilipp Reisner 
1435829c6087SLars Ellenberg /* Same thing as drbd_bm_set_bits,
1436829c6087SLars Ellenberg  * but more efficient for a large bit range.
1437b411b363SPhilipp Reisner  * You must first drbd_bm_lock().
1438b411b363SPhilipp Reisner  * Can be called to set the whole bitmap in one go.
1439b411b363SPhilipp Reisner  * Sets bits from s to e _inclusive_. */
1440b411b363SPhilipp Reisner void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e)
1441b411b363SPhilipp Reisner {
1442b411b363SPhilipp Reisner 	/* First set_bit from the first bit (s)
1443b411b363SPhilipp Reisner 	 * up to the next long boundary (sl),
1444b411b363SPhilipp Reisner 	 * then assign full words up to the last long boundary (el),
1445b411b363SPhilipp Reisner 	 * then set_bit up to and including the last bit (e).
1446b411b363SPhilipp Reisner 	 *
1447b411b363SPhilipp Reisner 	 * Do not use memset, because we must account for changes,
1448b411b363SPhilipp Reisner 	 * so we need to loop over the words with hweight() anyways.
1449b411b363SPhilipp Reisner 	 */
1450829c6087SLars Ellenberg 	struct drbd_bitmap *b = mdev->bitmap;
1451b411b363SPhilipp Reisner 	unsigned long sl = ALIGN(s,BITS_PER_LONG);
1452b411b363SPhilipp Reisner 	unsigned long el = (e+1) & ~((unsigned long)BITS_PER_LONG-1);
1453b411b363SPhilipp Reisner 	int first_page;
1454b411b363SPhilipp Reisner 	int last_page;
1455b411b363SPhilipp Reisner 	int page_nr;
1456b411b363SPhilipp Reisner 	int first_word;
1457b411b363SPhilipp Reisner 	int last_word;
1458b411b363SPhilipp Reisner 
1459b411b363SPhilipp Reisner 	if (e - s <= 3*BITS_PER_LONG) {
1460b411b363SPhilipp Reisner 		/* don't bother; el and sl may even be wrong. */
1461829c6087SLars Ellenberg 		spin_lock_irq(&b->bm_lock);
1462829c6087SLars Ellenberg 		__bm_change_bits_to(mdev, s, e, 1);
1463829c6087SLars Ellenberg 		spin_unlock_irq(&b->bm_lock);
1464b411b363SPhilipp Reisner 		return;
1465b411b363SPhilipp Reisner 	}
1466b411b363SPhilipp Reisner 
1467b411b363SPhilipp Reisner 	/* difference is large enough that we can trust sl and el */
1468b411b363SPhilipp Reisner 
1469829c6087SLars Ellenberg 	spin_lock_irq(&b->bm_lock);
1470829c6087SLars Ellenberg 
1471b411b363SPhilipp Reisner 	/* bits filling the current long */
1472b411b363SPhilipp Reisner 	if (sl)
1473829c6087SLars Ellenberg 		__bm_change_bits_to(mdev, s, sl-1, 1);
1474b411b363SPhilipp Reisner 
1475b411b363SPhilipp Reisner 	first_page = sl >> (3 + PAGE_SHIFT);
1476b411b363SPhilipp Reisner 	last_page = el >> (3 + PAGE_SHIFT);
1477b411b363SPhilipp Reisner 
1478b411b363SPhilipp Reisner 	/* MLPP: modulo longs per page */
1479b411b363SPhilipp Reisner 	/* LWPP: long words per page */
1480b411b363SPhilipp Reisner 	first_word = MLPP(sl >> LN2_BPL);
1481b411b363SPhilipp Reisner 	last_word = LWPP;
1482b411b363SPhilipp Reisner 
1483b411b363SPhilipp Reisner 	/* first and full pages, unless first page == last page */
1484b411b363SPhilipp Reisner 	for (page_nr = first_page; page_nr < last_page; page_nr++) {
1485b411b363SPhilipp Reisner 		bm_set_full_words_within_one_page(mdev->bitmap, page_nr, first_word, last_word);
14868ccee20eSLars Ellenberg 		spin_unlock_irq(&b->bm_lock);
14878ccee20eSLars Ellenberg 		cond_resched();
1488b411b363SPhilipp Reisner 		first_word = 0;
14898ccee20eSLars Ellenberg 		spin_lock_irq(&b->bm_lock);
1490b411b363SPhilipp Reisner 	}
1491b411b363SPhilipp Reisner 	/* last page (respectively only page, for first page == last page) */
1492b411b363SPhilipp Reisner 	last_word = MLPP(el >> LN2_BPL);
14934eccc579SLars Ellenberg 
14944eccc579SLars Ellenberg 	/* consider bitmap->bm_bits = 32768, bitmap->bm_number_of_pages = 1. (or multiples).
14954eccc579SLars Ellenberg 	 * ==> e = 32767, el = 32768, last_page = 2,
14964eccc579SLars Ellenberg 	 * and now last_word = 0.
14974eccc579SLars Ellenberg 	 * We do not want to touch last_page in this case,
14984eccc579SLars Ellenberg 	 * as we did not allocate it, it is not present in bitmap->bm_pages.
14994eccc579SLars Ellenberg 	 */
15004eccc579SLars Ellenberg 	if (last_word)
1501b411b363SPhilipp Reisner 		bm_set_full_words_within_one_page(mdev->bitmap, last_page, first_word, last_word);
1502b411b363SPhilipp Reisner 
1503b411b363SPhilipp Reisner 	/* possibly trailing bits.
1504b411b363SPhilipp Reisner 	 * example: (e & 63) == 63, el will be e+1.
1505b411b363SPhilipp Reisner 	 * if that even was the very last bit,
1506b411b363SPhilipp Reisner 	 * it would trigger an assert in __bm_change_bits_to()
1507b411b363SPhilipp Reisner 	 */
1508b411b363SPhilipp Reisner 	if (el <= e)
1509829c6087SLars Ellenberg 		__bm_change_bits_to(mdev, el, e, 1);
1510829c6087SLars Ellenberg 	spin_unlock_irq(&b->bm_lock);
1511b411b363SPhilipp Reisner }
1512b411b363SPhilipp Reisner 
1513b411b363SPhilipp Reisner /* returns bit state
1514b411b363SPhilipp Reisner  * wants bitnr, NOT sector.
1515b411b363SPhilipp Reisner  * inherently racy... area needs to be locked by means of {al,rs}_lru
1516b411b363SPhilipp Reisner  *  1 ... bit set
1517b411b363SPhilipp Reisner  *  0 ... bit not set
1518b411b363SPhilipp Reisner  * -1 ... first out of bounds access, stop testing for bits!
1519b411b363SPhilipp Reisner  */
1520b411b363SPhilipp Reisner int drbd_bm_test_bit(struct drbd_conf *mdev, const unsigned long bitnr)
1521b411b363SPhilipp Reisner {
1522b411b363SPhilipp Reisner 	unsigned long flags;
1523b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
1524b411b363SPhilipp Reisner 	unsigned long *p_addr;
1525b411b363SPhilipp Reisner 	int i;
1526b411b363SPhilipp Reisner 
1527b411b363SPhilipp Reisner 	ERR_IF(!b) return 0;
1528b411b363SPhilipp Reisner 	ERR_IF(!b->bm_pages) return 0;
1529b411b363SPhilipp Reisner 
1530b411b363SPhilipp Reisner 	spin_lock_irqsave(&b->bm_lock, flags);
153120ceb2b2SLars Ellenberg 	if (BM_DONT_TEST & b->bm_flags)
1532b411b363SPhilipp Reisner 		bm_print_lock_info(mdev);
1533b411b363SPhilipp Reisner 	if (bitnr < b->bm_bits) {
153419f843aaSLars Ellenberg 		p_addr = bm_map_pidx(b, bm_bit_to_page_idx(b, bitnr));
15357e599e6eSLinus Torvalds 		i = test_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr) ? 1 : 0;
1536b411b363SPhilipp Reisner 		bm_unmap(p_addr);
1537b411b363SPhilipp Reisner 	} else if (bitnr == b->bm_bits) {
1538b411b363SPhilipp Reisner 		i = -1;
1539b411b363SPhilipp Reisner 	} else { /* (bitnr > b->bm_bits) */
1540b411b363SPhilipp Reisner 		dev_err(DEV, "bitnr=%lu > bm_bits=%lu\n", bitnr, b->bm_bits);
1541b411b363SPhilipp Reisner 		i = 0;
1542b411b363SPhilipp Reisner 	}
1543b411b363SPhilipp Reisner 
1544b411b363SPhilipp Reisner 	spin_unlock_irqrestore(&b->bm_lock, flags);
1545b411b363SPhilipp Reisner 	return i;
1546b411b363SPhilipp Reisner }
1547b411b363SPhilipp Reisner 
1548b411b363SPhilipp Reisner /* returns number of bits set in the range [s, e] */
1549b411b363SPhilipp Reisner int drbd_bm_count_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e)
1550b411b363SPhilipp Reisner {
1551b411b363SPhilipp Reisner 	unsigned long flags;
1552b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
155319f843aaSLars Ellenberg 	unsigned long *p_addr = NULL;
1554b411b363SPhilipp Reisner 	unsigned long bitnr;
155519f843aaSLars Ellenberg 	unsigned int page_nr = -1U;
1556b411b363SPhilipp Reisner 	int c = 0;
1557b411b363SPhilipp Reisner 
1558b411b363SPhilipp Reisner 	/* If this is called without a bitmap, that is a bug.  But just to be
1559b411b363SPhilipp Reisner 	 * robust in case we screwed up elsewhere, in that case pretend there
1560b411b363SPhilipp Reisner 	 * was one dirty bit in the requested area, so we won't try to do a
1561b411b363SPhilipp Reisner 	 * local read there (no bitmap probably implies no disk) */
1562b411b363SPhilipp Reisner 	ERR_IF(!b) return 1;
1563b411b363SPhilipp Reisner 	ERR_IF(!b->bm_pages) return 1;
1564b411b363SPhilipp Reisner 
1565b411b363SPhilipp Reisner 	spin_lock_irqsave(&b->bm_lock, flags);
156620ceb2b2SLars Ellenberg 	if (BM_DONT_TEST & b->bm_flags)
1567b411b363SPhilipp Reisner 		bm_print_lock_info(mdev);
1568b411b363SPhilipp Reisner 	for (bitnr = s; bitnr <= e; bitnr++) {
156919f843aaSLars Ellenberg 		unsigned int idx = bm_bit_to_page_idx(b, bitnr);
157019f843aaSLars Ellenberg 		if (page_nr != idx) {
157119f843aaSLars Ellenberg 			page_nr = idx;
1572b411b363SPhilipp Reisner 			if (p_addr)
1573b411b363SPhilipp Reisner 				bm_unmap(p_addr);
157419f843aaSLars Ellenberg 			p_addr = bm_map_pidx(b, idx);
1575b411b363SPhilipp Reisner 		}
1576b411b363SPhilipp Reisner 		ERR_IF (bitnr >= b->bm_bits) {
1577b411b363SPhilipp Reisner 			dev_err(DEV, "bitnr=%lu bm_bits=%lu\n", bitnr, b->bm_bits);
1578b411b363SPhilipp Reisner 		} else {
15797e599e6eSLinus Torvalds 			c += (0 != test_bit_le(bitnr - (page_nr << (PAGE_SHIFT+3)), p_addr));
1580b411b363SPhilipp Reisner 		}
1581b411b363SPhilipp Reisner 	}
1582b411b363SPhilipp Reisner 	if (p_addr)
1583b411b363SPhilipp Reisner 		bm_unmap(p_addr);
1584b411b363SPhilipp Reisner 	spin_unlock_irqrestore(&b->bm_lock, flags);
1585b411b363SPhilipp Reisner 	return c;
1586b411b363SPhilipp Reisner }
1587b411b363SPhilipp Reisner 
1588b411b363SPhilipp Reisner 
1589b411b363SPhilipp Reisner /* inherently racy...
1590b411b363SPhilipp Reisner  * return value may be already out-of-date when this function returns.
1591b411b363SPhilipp Reisner  * but the general usage is that this is only use during a cstate when bits are
1592b411b363SPhilipp Reisner  * only cleared, not set, and typically only care for the case when the return
1593b411b363SPhilipp Reisner  * value is zero, or we already "locked" this "bitmap extent" by other means.
1594b411b363SPhilipp Reisner  *
1595b411b363SPhilipp Reisner  * enr is bm-extent number, since we chose to name one sector (512 bytes)
1596b411b363SPhilipp Reisner  * worth of the bitmap a "bitmap extent".
1597b411b363SPhilipp Reisner  *
1598b411b363SPhilipp Reisner  * TODO
1599b411b363SPhilipp Reisner  * I think since we use it like a reference count, we should use the real
1600b411b363SPhilipp Reisner  * reference count of some bitmap extent element from some lru instead...
1601b411b363SPhilipp Reisner  *
1602b411b363SPhilipp Reisner  */
1603b411b363SPhilipp Reisner int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr)
1604b411b363SPhilipp Reisner {
1605b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
1606b411b363SPhilipp Reisner 	int count, s, e;
1607b411b363SPhilipp Reisner 	unsigned long flags;
1608b411b363SPhilipp Reisner 	unsigned long *p_addr, *bm;
1609b411b363SPhilipp Reisner 
1610b411b363SPhilipp Reisner 	ERR_IF(!b) return 0;
1611b411b363SPhilipp Reisner 	ERR_IF(!b->bm_pages) return 0;
1612b411b363SPhilipp Reisner 
1613b411b363SPhilipp Reisner 	spin_lock_irqsave(&b->bm_lock, flags);
161420ceb2b2SLars Ellenberg 	if (BM_DONT_TEST & b->bm_flags)
1615b411b363SPhilipp Reisner 		bm_print_lock_info(mdev);
1616b411b363SPhilipp Reisner 
1617b411b363SPhilipp Reisner 	s = S2W(enr);
1618b411b363SPhilipp Reisner 	e = min((size_t)S2W(enr+1), b->bm_words);
1619b411b363SPhilipp Reisner 	count = 0;
1620b411b363SPhilipp Reisner 	if (s < b->bm_words) {
1621b411b363SPhilipp Reisner 		int n = e-s;
162219f843aaSLars Ellenberg 		p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, s));
1623b411b363SPhilipp Reisner 		bm = p_addr + MLPP(s);
1624b411b363SPhilipp Reisner 		while (n--)
1625b411b363SPhilipp Reisner 			count += hweight_long(*bm++);
1626b411b363SPhilipp Reisner 		bm_unmap(p_addr);
1627b411b363SPhilipp Reisner 	} else {
1628b411b363SPhilipp Reisner 		dev_err(DEV, "start offset (%d) too large in drbd_bm_e_weight\n", s);
1629b411b363SPhilipp Reisner 	}
1630b411b363SPhilipp Reisner 	spin_unlock_irqrestore(&b->bm_lock, flags);
1631b411b363SPhilipp Reisner 	return count;
1632b411b363SPhilipp Reisner }
1633b411b363SPhilipp Reisner 
16344b0715f0SLars Ellenberg /* Set all bits covered by the AL-extent al_enr.
16354b0715f0SLars Ellenberg  * Returns number of bits changed. */
1636b411b363SPhilipp Reisner unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev, unsigned long al_enr)
1637b411b363SPhilipp Reisner {
1638b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
1639b411b363SPhilipp Reisner 	unsigned long *p_addr, *bm;
1640b411b363SPhilipp Reisner 	unsigned long weight;
16414b0715f0SLars Ellenberg 	unsigned long s, e;
16424b0715f0SLars Ellenberg 	int count, i, do_now;
1643b411b363SPhilipp Reisner 	ERR_IF(!b) return 0;
1644b411b363SPhilipp Reisner 	ERR_IF(!b->bm_pages) return 0;
1645b411b363SPhilipp Reisner 
1646b411b363SPhilipp Reisner 	spin_lock_irq(&b->bm_lock);
164720ceb2b2SLars Ellenberg 	if (BM_DONT_SET & b->bm_flags)
1648b411b363SPhilipp Reisner 		bm_print_lock_info(mdev);
1649b411b363SPhilipp Reisner 	weight = b->bm_set;
1650b411b363SPhilipp Reisner 
1651b411b363SPhilipp Reisner 	s = al_enr * BM_WORDS_PER_AL_EXT;
1652b411b363SPhilipp Reisner 	e = min_t(size_t, s + BM_WORDS_PER_AL_EXT, b->bm_words);
1653b411b363SPhilipp Reisner 	/* assert that s and e are on the same page */
1654b411b363SPhilipp Reisner 	D_ASSERT((e-1) >> (PAGE_SHIFT - LN2_BPL + 3)
1655b411b363SPhilipp Reisner 	      ==  s    >> (PAGE_SHIFT - LN2_BPL + 3));
1656b411b363SPhilipp Reisner 	count = 0;
1657b411b363SPhilipp Reisner 	if (s < b->bm_words) {
1658b411b363SPhilipp Reisner 		i = do_now = e-s;
165919f843aaSLars Ellenberg 		p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, s));
1660b411b363SPhilipp Reisner 		bm = p_addr + MLPP(s);
1661b411b363SPhilipp Reisner 		while (i--) {
1662b411b363SPhilipp Reisner 			count += hweight_long(*bm);
1663b411b363SPhilipp Reisner 			*bm = -1UL;
1664b411b363SPhilipp Reisner 			bm++;
1665b411b363SPhilipp Reisner 		}
1666b411b363SPhilipp Reisner 		bm_unmap(p_addr);
1667b411b363SPhilipp Reisner 		b->bm_set += do_now*BITS_PER_LONG - count;
1668b411b363SPhilipp Reisner 		if (e == b->bm_words)
1669b411b363SPhilipp Reisner 			b->bm_set -= bm_clear_surplus(b);
1670b411b363SPhilipp Reisner 	} else {
16714b0715f0SLars Ellenberg 		dev_err(DEV, "start offset (%lu) too large in drbd_bm_ALe_set_all\n", s);
1672b411b363SPhilipp Reisner 	}
1673b411b363SPhilipp Reisner 	weight = b->bm_set - weight;
1674b411b363SPhilipp Reisner 	spin_unlock_irq(&b->bm_lock);
1675b411b363SPhilipp Reisner 	return weight;
1676b411b363SPhilipp Reisner }
1677