xref: /openbmc/linux/drivers/block/drbd/drbd_bitmap.c (revision 5fb3bc4ddcdda8d2a6b2185075d140b9009f99b5)
1b411b363SPhilipp Reisner /*
2b411b363SPhilipp Reisner    drbd_bitmap.c
3b411b363SPhilipp Reisner 
4b411b363SPhilipp Reisner    This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5b411b363SPhilipp Reisner 
6b411b363SPhilipp Reisner    Copyright (C) 2004-2008, LINBIT Information Technologies GmbH.
7b411b363SPhilipp Reisner    Copyright (C) 2004-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8b411b363SPhilipp Reisner    Copyright (C) 2004-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9b411b363SPhilipp Reisner 
10b411b363SPhilipp Reisner    drbd is free software; you can redistribute it and/or modify
11b411b363SPhilipp Reisner    it under the terms of the GNU General Public License as published by
12b411b363SPhilipp Reisner    the Free Software Foundation; either version 2, or (at your option)
13b411b363SPhilipp Reisner    any later version.
14b411b363SPhilipp Reisner 
15b411b363SPhilipp Reisner    drbd is distributed in the hope that it will be useful,
16b411b363SPhilipp Reisner    but WITHOUT ANY WARRANTY; without even the implied warranty of
17b411b363SPhilipp Reisner    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18b411b363SPhilipp Reisner    GNU General Public License for more details.
19b411b363SPhilipp Reisner 
20b411b363SPhilipp Reisner    You should have received a copy of the GNU General Public License
21b411b363SPhilipp Reisner    along with drbd; see the file COPYING.  If not, write to
22b411b363SPhilipp Reisner    the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23b411b363SPhilipp Reisner  */
24b411b363SPhilipp Reisner 
25f88c5d90SLars Ellenberg #define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
26f88c5d90SLars Ellenberg 
27*5fb3bc4dSLars Ellenberg #include <linux/bitmap.h>
28b411b363SPhilipp Reisner #include <linux/vmalloc.h>
29b411b363SPhilipp Reisner #include <linux/string.h>
30b411b363SPhilipp Reisner #include <linux/drbd.h>
315a0e3ad6STejun Heo #include <linux/slab.h>
32dbcbdc43SChristoph Hellwig #include <linux/highmem.h>
33f0ff1357SStephen Rothwell 
34b411b363SPhilipp Reisner #include "drbd_int.h"
35b411b363SPhilipp Reisner 
3695a0f10cSLars Ellenberg 
37b411b363SPhilipp Reisner /* OPAQUE outside this file!
38b411b363SPhilipp Reisner  * interface defined in drbd_int.h
39b411b363SPhilipp Reisner 
40b411b363SPhilipp Reisner  * convention:
41b411b363SPhilipp Reisner  * function name drbd_bm_... => used elsewhere, "public".
42b411b363SPhilipp Reisner  * function name      bm_... => internal to implementation, "private".
434b0715f0SLars Ellenberg  */
44b411b363SPhilipp Reisner 
454b0715f0SLars Ellenberg 
464b0715f0SLars Ellenberg /*
474b0715f0SLars Ellenberg  * LIMITATIONS:
484b0715f0SLars Ellenberg  * We want to support >= peta byte of backend storage, while for now still using
494b0715f0SLars Ellenberg  * a granularity of one bit per 4KiB of storage.
504b0715f0SLars Ellenberg  * 1 << 50		bytes backend storage (1 PiB)
514b0715f0SLars Ellenberg  * 1 << (50 - 12)	bits needed
524b0715f0SLars Ellenberg  *	38 --> we need u64 to index and count bits
534b0715f0SLars Ellenberg  * 1 << (38 - 3)	bitmap bytes needed
544b0715f0SLars Ellenberg  *	35 --> we still need u64 to index and count bytes
554b0715f0SLars Ellenberg  *			(that's 32 GiB of bitmap for 1 PiB storage)
564b0715f0SLars Ellenberg  * 1 << (35 - 2)	32bit longs needed
574b0715f0SLars Ellenberg  *	33 --> we'd even need u64 to index and count 32bit long words.
584b0715f0SLars Ellenberg  * 1 << (35 - 3)	64bit longs needed
594b0715f0SLars Ellenberg  *	32 --> we could get away with a 32bit unsigned int to index and count
604b0715f0SLars Ellenberg  *	64bit long words, but I rather stay with unsigned long for now.
614b0715f0SLars Ellenberg  *	We probably should neither count nor point to bytes or long words
624b0715f0SLars Ellenberg  *	directly, but either by bitnumber, or by page index and offset.
634b0715f0SLars Ellenberg  * 1 << (35 - 12)
644b0715f0SLars Ellenberg  *	22 --> we need that much 4KiB pages of bitmap.
654b0715f0SLars Ellenberg  *	1 << (22 + 3) --> on a 64bit arch,
664b0715f0SLars Ellenberg  *	we need 32 MiB to store the array of page pointers.
674b0715f0SLars Ellenberg  *
684b0715f0SLars Ellenberg  * Because I'm lazy, and because the resulting patch was too large, too ugly
694b0715f0SLars Ellenberg  * and still incomplete, on 32bit we still "only" support 16 TiB (minus some),
704b0715f0SLars Ellenberg  * (1 << 32) bits * 4k storage.
714b0715f0SLars Ellenberg  *
724b0715f0SLars Ellenberg 
734b0715f0SLars Ellenberg  * bitmap storage and IO:
744b0715f0SLars Ellenberg  *	Bitmap is stored little endian on disk, and is kept little endian in
754b0715f0SLars Ellenberg  *	core memory. Currently we still hold the full bitmap in core as long
764b0715f0SLars Ellenberg  *	as we are "attached" to a local disk, which at 32 GiB for 1PiB storage
774b0715f0SLars Ellenberg  *	seems excessive.
784b0715f0SLars Ellenberg  *
7924c4830cSBart Van Assche  *	We plan to reduce the amount of in-core bitmap pages by paging them in
804b0715f0SLars Ellenberg  *	and out against their on-disk location as necessary, but need to make
814b0715f0SLars Ellenberg  *	sure we don't cause too much meta data IO, and must not deadlock in
824b0715f0SLars Ellenberg  *	tight memory situations. This needs some more work.
83b411b363SPhilipp Reisner  */
84b411b363SPhilipp Reisner 
85b411b363SPhilipp Reisner /*
86b411b363SPhilipp Reisner  * NOTE
87b411b363SPhilipp Reisner  *  Access to the *bm_pages is protected by bm_lock.
88b411b363SPhilipp Reisner  *  It is safe to read the other members within the lock.
89b411b363SPhilipp Reisner  *
90b411b363SPhilipp Reisner  *  drbd_bm_set_bits is called from bio_endio callbacks,
91b411b363SPhilipp Reisner  *  We may be called with irq already disabled,
92b411b363SPhilipp Reisner  *  so we need spin_lock_irqsave().
93b411b363SPhilipp Reisner  *  And we need the kmap_atomic.
94b411b363SPhilipp Reisner  */
95b411b363SPhilipp Reisner struct drbd_bitmap {
96b411b363SPhilipp Reisner 	struct page **bm_pages;
97b411b363SPhilipp Reisner 	spinlock_t bm_lock;
984b0715f0SLars Ellenberg 
994b0715f0SLars Ellenberg 	/* see LIMITATIONS: above */
1004b0715f0SLars Ellenberg 
101b411b363SPhilipp Reisner 	unsigned long bm_set;       /* nr of set bits; THINK maybe atomic_t? */
102b411b363SPhilipp Reisner 	unsigned long bm_bits;
103b411b363SPhilipp Reisner 	size_t   bm_words;
104b411b363SPhilipp Reisner 	size_t   bm_number_of_pages;
105b411b363SPhilipp Reisner 	sector_t bm_dev_capacity;
1068a03ae2aSThomas Gleixner 	struct mutex bm_change; /* serializes resize operations */
107b411b363SPhilipp Reisner 
10819f843aaSLars Ellenberg 	wait_queue_head_t bm_io_wait; /* used to serialize IO of single pages */
109b411b363SPhilipp Reisner 
11020ceb2b2SLars Ellenberg 	enum bm_flag bm_flags;
111b411b363SPhilipp Reisner 
112b411b363SPhilipp Reisner 	/* debugging aid, in case we are still racy somewhere */
113b411b363SPhilipp Reisner 	char          *bm_why;
114b411b363SPhilipp Reisner 	struct task_struct *bm_task;
115b411b363SPhilipp Reisner };
116b411b363SPhilipp Reisner 
117b411b363SPhilipp Reisner #define bm_print_lock_info(m) __bm_print_lock_info(m, __func__)
118b30ab791SAndreas Gruenbacher static void __bm_print_lock_info(struct drbd_device *device, const char *func)
119b411b363SPhilipp Reisner {
120b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
121b411b363SPhilipp Reisner 	if (!__ratelimit(&drbd_ratelimit_state))
122b411b363SPhilipp Reisner 		return;
123c60b0251SAndreas Gruenbacher 	drbd_err(device, "FIXME %s[%d] in %s, bitmap locked for '%s' by %s[%d]\n",
124c60b0251SAndreas Gruenbacher 		 current->comm, task_pid_nr(current),
125b411b363SPhilipp Reisner 		 func, b->bm_why ?: "?",
126c60b0251SAndreas Gruenbacher 		 b->bm_task->comm, task_pid_nr(b->bm_task));
127b411b363SPhilipp Reisner }
128b411b363SPhilipp Reisner 
129b30ab791SAndreas Gruenbacher void drbd_bm_lock(struct drbd_device *device, char *why, enum bm_flag flags)
130b411b363SPhilipp Reisner {
131b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
132b411b363SPhilipp Reisner 	int trylock_failed;
133b411b363SPhilipp Reisner 
134b411b363SPhilipp Reisner 	if (!b) {
135d0180171SAndreas Gruenbacher 		drbd_err(device, "FIXME no bitmap in drbd_bm_lock!?\n");
136b411b363SPhilipp Reisner 		return;
137b411b363SPhilipp Reisner 	}
138b411b363SPhilipp Reisner 
1398a03ae2aSThomas Gleixner 	trylock_failed = !mutex_trylock(&b->bm_change);
140b411b363SPhilipp Reisner 
141b411b363SPhilipp Reisner 	if (trylock_failed) {
142c60b0251SAndreas Gruenbacher 		drbd_warn(device, "%s[%d] going to '%s' but bitmap already locked for '%s' by %s[%d]\n",
143c60b0251SAndreas Gruenbacher 			  current->comm, task_pid_nr(current),
144b411b363SPhilipp Reisner 			  why, b->bm_why ?: "?",
145c60b0251SAndreas Gruenbacher 			  b->bm_task->comm, task_pid_nr(b->bm_task));
1468a03ae2aSThomas Gleixner 		mutex_lock(&b->bm_change);
147b411b363SPhilipp Reisner 	}
14820ceb2b2SLars Ellenberg 	if (BM_LOCKED_MASK & b->bm_flags)
149d0180171SAndreas Gruenbacher 		drbd_err(device, "FIXME bitmap already locked in bm_lock\n");
15020ceb2b2SLars Ellenberg 	b->bm_flags |= flags & BM_LOCKED_MASK;
151b411b363SPhilipp Reisner 
152b411b363SPhilipp Reisner 	b->bm_why  = why;
153b411b363SPhilipp Reisner 	b->bm_task = current;
154b411b363SPhilipp Reisner }
155b411b363SPhilipp Reisner 
156b30ab791SAndreas Gruenbacher void drbd_bm_unlock(struct drbd_device *device)
157b411b363SPhilipp Reisner {
158b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
159b411b363SPhilipp Reisner 	if (!b) {
160d0180171SAndreas Gruenbacher 		drbd_err(device, "FIXME no bitmap in drbd_bm_unlock!?\n");
161b411b363SPhilipp Reisner 		return;
162b411b363SPhilipp Reisner 	}
163b411b363SPhilipp Reisner 
164b30ab791SAndreas Gruenbacher 	if (!(BM_LOCKED_MASK & device->bitmap->bm_flags))
165d0180171SAndreas Gruenbacher 		drbd_err(device, "FIXME bitmap not locked in bm_unlock\n");
166b411b363SPhilipp Reisner 
16720ceb2b2SLars Ellenberg 	b->bm_flags &= ~BM_LOCKED_MASK;
168b411b363SPhilipp Reisner 	b->bm_why  = NULL;
169b411b363SPhilipp Reisner 	b->bm_task = NULL;
1708a03ae2aSThomas Gleixner 	mutex_unlock(&b->bm_change);
171b411b363SPhilipp Reisner }
172b411b363SPhilipp Reisner 
17319f843aaSLars Ellenberg /* we store some "meta" info about our pages in page->private */
17419f843aaSLars Ellenberg /* at a granularity of 4k storage per bitmap bit:
17519f843aaSLars Ellenberg  * one peta byte storage: 1<<50 byte, 1<<38 * 4k storage blocks
17619f843aaSLars Ellenberg  *  1<<38 bits,
17719f843aaSLars Ellenberg  *  1<<23 4k bitmap pages.
17819f843aaSLars Ellenberg  * Use 24 bits as page index, covers 2 peta byte storage
17919f843aaSLars Ellenberg  * at a granularity of 4k per bit.
18019f843aaSLars Ellenberg  * Used to report the failed page idx on io error from the endio handlers.
18119f843aaSLars Ellenberg  */
18219f843aaSLars Ellenberg #define BM_PAGE_IDX_MASK	((1UL<<24)-1)
18319f843aaSLars Ellenberg /* this page is currently read in, or written back */
18419f843aaSLars Ellenberg #define BM_PAGE_IO_LOCK		31
18519f843aaSLars Ellenberg /* if there has been an IO error for this page */
18619f843aaSLars Ellenberg #define BM_PAGE_IO_ERROR	30
18719f843aaSLars Ellenberg /* this is to be able to intelligently skip disk IO,
18819f843aaSLars Ellenberg  * set if bits have been set since last IO. */
18919f843aaSLars Ellenberg #define BM_PAGE_NEED_WRITEOUT	29
19019f843aaSLars Ellenberg /* to mark for lazy writeout once syncer cleared all clearable bits,
19119f843aaSLars Ellenberg  * we if bits have been cleared since last IO. */
19219f843aaSLars Ellenberg #define BM_PAGE_LAZY_WRITEOUT	28
19345dfffebSLars Ellenberg /* pages marked with this "HINT" will be considered for writeout
19445dfffebSLars Ellenberg  * on activity log transactions */
19545dfffebSLars Ellenberg #define BM_PAGE_HINT_WRITEOUT	27
19619f843aaSLars Ellenberg 
19724c4830cSBart Van Assche /* store_page_idx uses non-atomic assignment. It is only used directly after
19819f843aaSLars Ellenberg  * allocating the page.  All other bm_set_page_* and bm_clear_page_* need to
19919f843aaSLars Ellenberg  * use atomic bit manipulation, as set_out_of_sync (and therefore bitmap
20019f843aaSLars Ellenberg  * changes) may happen from various contexts, and wait_on_bit/wake_up_bit
20119f843aaSLars Ellenberg  * requires it all to be atomic as well. */
20219f843aaSLars Ellenberg static void bm_store_page_idx(struct page *page, unsigned long idx)
20319f843aaSLars Ellenberg {
20419f843aaSLars Ellenberg 	BUG_ON(0 != (idx & ~BM_PAGE_IDX_MASK));
2050c7db279SArne Redlich 	set_page_private(page, idx);
20619f843aaSLars Ellenberg }
20719f843aaSLars Ellenberg 
20819f843aaSLars Ellenberg static unsigned long bm_page_to_idx(struct page *page)
20919f843aaSLars Ellenberg {
21019f843aaSLars Ellenberg 	return page_private(page) & BM_PAGE_IDX_MASK;
21119f843aaSLars Ellenberg }
21219f843aaSLars Ellenberg 
21319f843aaSLars Ellenberg /* As is very unlikely that the same page is under IO from more than one
21419f843aaSLars Ellenberg  * context, we can get away with a bit per page and one wait queue per bitmap.
21519f843aaSLars Ellenberg  */
216b30ab791SAndreas Gruenbacher static void bm_page_lock_io(struct drbd_device *device, int page_nr)
21719f843aaSLars Ellenberg {
218b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
21919f843aaSLars Ellenberg 	void *addr = &page_private(b->bm_pages[page_nr]);
22019f843aaSLars Ellenberg 	wait_event(b->bm_io_wait, !test_and_set_bit(BM_PAGE_IO_LOCK, addr));
22119f843aaSLars Ellenberg }
22219f843aaSLars Ellenberg 
223b30ab791SAndreas Gruenbacher static void bm_page_unlock_io(struct drbd_device *device, int page_nr)
22419f843aaSLars Ellenberg {
225b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
22619f843aaSLars Ellenberg 	void *addr = &page_private(b->bm_pages[page_nr]);
2274738fa16SLars Ellenberg 	clear_bit_unlock(BM_PAGE_IO_LOCK, addr);
228b30ab791SAndreas Gruenbacher 	wake_up(&device->bitmap->bm_io_wait);
22919f843aaSLars Ellenberg }
23019f843aaSLars Ellenberg 
23119f843aaSLars Ellenberg /* set _before_ submit_io, so it may be reset due to being changed
23219f843aaSLars Ellenberg  * while this page is in flight... will get submitted later again */
23319f843aaSLars Ellenberg static void bm_set_page_unchanged(struct page *page)
23419f843aaSLars Ellenberg {
23519f843aaSLars Ellenberg 	/* use cmpxchg? */
23619f843aaSLars Ellenberg 	clear_bit(BM_PAGE_NEED_WRITEOUT, &page_private(page));
23719f843aaSLars Ellenberg 	clear_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page));
23819f843aaSLars Ellenberg }
23919f843aaSLars Ellenberg 
24019f843aaSLars Ellenberg static void bm_set_page_need_writeout(struct page *page)
24119f843aaSLars Ellenberg {
24219f843aaSLars Ellenberg 	set_bit(BM_PAGE_NEED_WRITEOUT, &page_private(page));
24319f843aaSLars Ellenberg }
24419f843aaSLars Ellenberg 
24545dfffebSLars Ellenberg /**
24645dfffebSLars Ellenberg  * drbd_bm_mark_for_writeout() - mark a page with a "hint" to be considered for writeout
247b30ab791SAndreas Gruenbacher  * @device:	DRBD device.
24845dfffebSLars Ellenberg  * @page_nr:	the bitmap page to mark with the "hint" flag
24945dfffebSLars Ellenberg  *
25045dfffebSLars Ellenberg  * From within an activity log transaction, we mark a few pages with these
25145dfffebSLars Ellenberg  * hints, then call drbd_bm_write_hinted(), which will only write out changed
25245dfffebSLars Ellenberg  * pages which are flagged with this mark.
25345dfffebSLars Ellenberg  */
254b30ab791SAndreas Gruenbacher void drbd_bm_mark_for_writeout(struct drbd_device *device, int page_nr)
25545dfffebSLars Ellenberg {
25645dfffebSLars Ellenberg 	struct page *page;
257b30ab791SAndreas Gruenbacher 	if (page_nr >= device->bitmap->bm_number_of_pages) {
258d0180171SAndreas Gruenbacher 		drbd_warn(device, "BAD: page_nr: %u, number_of_pages: %u\n",
259b30ab791SAndreas Gruenbacher 			 page_nr, (int)device->bitmap->bm_number_of_pages);
26045dfffebSLars Ellenberg 		return;
26145dfffebSLars Ellenberg 	}
262b30ab791SAndreas Gruenbacher 	page = device->bitmap->bm_pages[page_nr];
26345dfffebSLars Ellenberg 	set_bit(BM_PAGE_HINT_WRITEOUT, &page_private(page));
26445dfffebSLars Ellenberg }
26545dfffebSLars Ellenberg 
26619f843aaSLars Ellenberg static int bm_test_page_unchanged(struct page *page)
26719f843aaSLars Ellenberg {
26819f843aaSLars Ellenberg 	volatile const unsigned long *addr = &page_private(page);
26919f843aaSLars Ellenberg 	return (*addr & ((1UL<<BM_PAGE_NEED_WRITEOUT)|(1UL<<BM_PAGE_LAZY_WRITEOUT))) == 0;
27019f843aaSLars Ellenberg }
27119f843aaSLars Ellenberg 
27219f843aaSLars Ellenberg static void bm_set_page_io_err(struct page *page)
27319f843aaSLars Ellenberg {
27419f843aaSLars Ellenberg 	set_bit(BM_PAGE_IO_ERROR, &page_private(page));
27519f843aaSLars Ellenberg }
27619f843aaSLars Ellenberg 
27719f843aaSLars Ellenberg static void bm_clear_page_io_err(struct page *page)
27819f843aaSLars Ellenberg {
27919f843aaSLars Ellenberg 	clear_bit(BM_PAGE_IO_ERROR, &page_private(page));
28019f843aaSLars Ellenberg }
28119f843aaSLars Ellenberg 
28219f843aaSLars Ellenberg static void bm_set_page_lazy_writeout(struct page *page)
28319f843aaSLars Ellenberg {
28419f843aaSLars Ellenberg 	set_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page));
28519f843aaSLars Ellenberg }
28619f843aaSLars Ellenberg 
28719f843aaSLars Ellenberg static int bm_test_page_lazy_writeout(struct page *page)
28819f843aaSLars Ellenberg {
28919f843aaSLars Ellenberg 	return test_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page));
29019f843aaSLars Ellenberg }
29119f843aaSLars Ellenberg 
29219f843aaSLars Ellenberg /* on a 32bit box, this would allow for exactly (2<<38) bits. */
29319f843aaSLars Ellenberg static unsigned int bm_word_to_page_idx(struct drbd_bitmap *b, unsigned long long_nr)
29419f843aaSLars Ellenberg {
29519f843aaSLars Ellenberg 	/* page_nr = (word*sizeof(long)) >> PAGE_SHIFT; */
29619f843aaSLars Ellenberg 	unsigned int page_nr = long_nr >> (PAGE_SHIFT - LN2_BPL + 3);
29719f843aaSLars Ellenberg 	BUG_ON(page_nr >= b->bm_number_of_pages);
29819f843aaSLars Ellenberg 	return page_nr;
29919f843aaSLars Ellenberg }
30019f843aaSLars Ellenberg 
30195a0f10cSLars Ellenberg static unsigned int bm_bit_to_page_idx(struct drbd_bitmap *b, u64 bitnr)
30295a0f10cSLars Ellenberg {
30395a0f10cSLars Ellenberg 	/* page_nr = (bitnr/8) >> PAGE_SHIFT; */
30495a0f10cSLars Ellenberg 	unsigned int page_nr = bitnr >> (PAGE_SHIFT + 3);
30595a0f10cSLars Ellenberg 	BUG_ON(page_nr >= b->bm_number_of_pages);
30695a0f10cSLars Ellenberg 	return page_nr;
30795a0f10cSLars Ellenberg }
30895a0f10cSLars Ellenberg 
309589973a7SCong Wang static unsigned long *__bm_map_pidx(struct drbd_bitmap *b, unsigned int idx)
31095a0f10cSLars Ellenberg {
31195a0f10cSLars Ellenberg 	struct page *page = b->bm_pages[idx];
312cfd8005cSCong Wang 	return (unsigned long *) kmap_atomic(page);
31395a0f10cSLars Ellenberg }
31495a0f10cSLars Ellenberg 
31595a0f10cSLars Ellenberg static unsigned long *bm_map_pidx(struct drbd_bitmap *b, unsigned int idx)
31695a0f10cSLars Ellenberg {
317cfd8005cSCong Wang 	return __bm_map_pidx(b, idx);
31895a0f10cSLars Ellenberg }
31995a0f10cSLars Ellenberg 
320cfd8005cSCong Wang static void __bm_unmap(unsigned long *p_addr)
321b411b363SPhilipp Reisner {
322cfd8005cSCong Wang 	kunmap_atomic(p_addr);
323b411b363SPhilipp Reisner };
324b411b363SPhilipp Reisner 
325b411b363SPhilipp Reisner static void bm_unmap(unsigned long *p_addr)
326b411b363SPhilipp Reisner {
327cfd8005cSCong Wang 	return __bm_unmap(p_addr);
328b411b363SPhilipp Reisner }
329b411b363SPhilipp Reisner 
330b411b363SPhilipp Reisner /* long word offset of _bitmap_ sector */
331b411b363SPhilipp Reisner #define S2W(s)	((s)<<(BM_EXT_SHIFT-BM_BLOCK_SHIFT-LN2_BPL))
332b411b363SPhilipp Reisner /* word offset from start of bitmap to word number _in_page_
333b411b363SPhilipp Reisner  * modulo longs per page
334b411b363SPhilipp Reisner #define MLPP(X) ((X) % (PAGE_SIZE/sizeof(long))
33524c4830cSBart Van Assche  hm, well, Philipp thinks gcc might not optimize the % into & (... - 1)
336b411b363SPhilipp Reisner  so do it explicitly:
337b411b363SPhilipp Reisner  */
338b411b363SPhilipp Reisner #define MLPP(X) ((X) & ((PAGE_SIZE/sizeof(long))-1))
339b411b363SPhilipp Reisner 
340b411b363SPhilipp Reisner /* Long words per page */
341b411b363SPhilipp Reisner #define LWPP (PAGE_SIZE/sizeof(long))
342b411b363SPhilipp Reisner 
343b411b363SPhilipp Reisner /*
344b411b363SPhilipp Reisner  * actually most functions herein should take a struct drbd_bitmap*, not a
345b30ab791SAndreas Gruenbacher  * struct drbd_device*, but for the debug macros I like to have the device around
346b411b363SPhilipp Reisner  * to be able to report device specific.
347b411b363SPhilipp Reisner  */
348b411b363SPhilipp Reisner 
34919f843aaSLars Ellenberg 
350b411b363SPhilipp Reisner static void bm_free_pages(struct page **pages, unsigned long number)
351b411b363SPhilipp Reisner {
352b411b363SPhilipp Reisner 	unsigned long i;
353b411b363SPhilipp Reisner 	if (!pages)
354b411b363SPhilipp Reisner 		return;
355b411b363SPhilipp Reisner 
356b411b363SPhilipp Reisner 	for (i = 0; i < number; i++) {
357b411b363SPhilipp Reisner 		if (!pages[i]) {
358f88c5d90SLars Ellenberg 			pr_alert("bm_free_pages tried to free a NULL pointer; i=%lu n=%lu\n",
359b411b363SPhilipp Reisner 				 i, number);
360b411b363SPhilipp Reisner 			continue;
361b411b363SPhilipp Reisner 		}
362b411b363SPhilipp Reisner 		__free_page(pages[i]);
363b411b363SPhilipp Reisner 		pages[i] = NULL;
364b411b363SPhilipp Reisner 	}
365b411b363SPhilipp Reisner }
366b411b363SPhilipp Reisner 
367b411b363SPhilipp Reisner static void bm_vk_free(void *ptr, int v)
368b411b363SPhilipp Reisner {
369b411b363SPhilipp Reisner 	if (v)
370b411b363SPhilipp Reisner 		vfree(ptr);
371b411b363SPhilipp Reisner 	else
372b411b363SPhilipp Reisner 		kfree(ptr);
373b411b363SPhilipp Reisner }
374b411b363SPhilipp Reisner 
375b411b363SPhilipp Reisner /*
376b411b363SPhilipp Reisner  * "have" and "want" are NUMBER OF PAGES.
377b411b363SPhilipp Reisner  */
378b411b363SPhilipp Reisner static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
379b411b363SPhilipp Reisner {
380b411b363SPhilipp Reisner 	struct page **old_pages = b->bm_pages;
381b411b363SPhilipp Reisner 	struct page **new_pages, *page;
382b411b363SPhilipp Reisner 	unsigned int i, bytes, vmalloced = 0;
383b411b363SPhilipp Reisner 	unsigned long have = b->bm_number_of_pages;
384b411b363SPhilipp Reisner 
385b411b363SPhilipp Reisner 	BUG_ON(have == 0 && old_pages != NULL);
386b411b363SPhilipp Reisner 	BUG_ON(have != 0 && old_pages == NULL);
387b411b363SPhilipp Reisner 
388b411b363SPhilipp Reisner 	if (have == want)
389b411b363SPhilipp Reisner 		return old_pages;
390b411b363SPhilipp Reisner 
391b411b363SPhilipp Reisner 	/* Trying kmalloc first, falling back to vmalloc.
3920b143d43SLars Ellenberg 	 * GFP_NOIO, as this is called while drbd IO is "suspended",
3930b143d43SLars Ellenberg 	 * and during resize or attach on diskless Primary,
3940b143d43SLars Ellenberg 	 * we must not block on IO to ourselves.
395bc891c9aSLars Ellenberg 	 * Context is receiver thread or dmsetup. */
396b411b363SPhilipp Reisner 	bytes = sizeof(struct page *)*want;
3978be04b93SJoe Perches 	new_pages = kzalloc(bytes, GFP_NOIO | __GFP_NOWARN);
398b411b363SPhilipp Reisner 	if (!new_pages) {
3990b143d43SLars Ellenberg 		new_pages = __vmalloc(bytes,
4000b143d43SLars Ellenberg 				GFP_NOIO | __GFP_HIGHMEM | __GFP_ZERO,
4010b143d43SLars Ellenberg 				PAGE_KERNEL);
402b411b363SPhilipp Reisner 		if (!new_pages)
403b411b363SPhilipp Reisner 			return NULL;
404b411b363SPhilipp Reisner 		vmalloced = 1;
405b411b363SPhilipp Reisner 	}
406b411b363SPhilipp Reisner 
407b411b363SPhilipp Reisner 	if (want >= have) {
408b411b363SPhilipp Reisner 		for (i = 0; i < have; i++)
409b411b363SPhilipp Reisner 			new_pages[i] = old_pages[i];
410b411b363SPhilipp Reisner 		for (; i < want; i++) {
4110b143d43SLars Ellenberg 			page = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
412b411b363SPhilipp Reisner 			if (!page) {
413b411b363SPhilipp Reisner 				bm_free_pages(new_pages + have, i - have);
414b411b363SPhilipp Reisner 				bm_vk_free(new_pages, vmalloced);
415b411b363SPhilipp Reisner 				return NULL;
416b411b363SPhilipp Reisner 			}
41719f843aaSLars Ellenberg 			/* we want to know which page it is
41819f843aaSLars Ellenberg 			 * from the endio handlers */
41919f843aaSLars Ellenberg 			bm_store_page_idx(page, i);
420b411b363SPhilipp Reisner 			new_pages[i] = page;
421b411b363SPhilipp Reisner 		}
422b411b363SPhilipp Reisner 	} else {
423b411b363SPhilipp Reisner 		for (i = 0; i < want; i++)
424b411b363SPhilipp Reisner 			new_pages[i] = old_pages[i];
425b411b363SPhilipp Reisner 		/* NOT HERE, we are outside the spinlock!
426b411b363SPhilipp Reisner 		bm_free_pages(old_pages + want, have - want);
427b411b363SPhilipp Reisner 		*/
428b411b363SPhilipp Reisner 	}
429b411b363SPhilipp Reisner 
430b411b363SPhilipp Reisner 	if (vmalloced)
43120ceb2b2SLars Ellenberg 		b->bm_flags |= BM_P_VMALLOCED;
432b411b363SPhilipp Reisner 	else
43320ceb2b2SLars Ellenberg 		b->bm_flags &= ~BM_P_VMALLOCED;
434b411b363SPhilipp Reisner 
435b411b363SPhilipp Reisner 	return new_pages;
436b411b363SPhilipp Reisner }
437b411b363SPhilipp Reisner 
438b411b363SPhilipp Reisner /*
439b411b363SPhilipp Reisner  * called on driver init only. TODO call when a device is created.
440b30ab791SAndreas Gruenbacher  * allocates the drbd_bitmap, and stores it in device->bitmap.
441b411b363SPhilipp Reisner  */
442b30ab791SAndreas Gruenbacher int drbd_bm_init(struct drbd_device *device)
443b411b363SPhilipp Reisner {
444b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
445b411b363SPhilipp Reisner 	WARN_ON(b != NULL);
446b411b363SPhilipp Reisner 	b = kzalloc(sizeof(struct drbd_bitmap), GFP_KERNEL);
447b411b363SPhilipp Reisner 	if (!b)
448b411b363SPhilipp Reisner 		return -ENOMEM;
449b411b363SPhilipp Reisner 	spin_lock_init(&b->bm_lock);
4508a03ae2aSThomas Gleixner 	mutex_init(&b->bm_change);
451b411b363SPhilipp Reisner 	init_waitqueue_head(&b->bm_io_wait);
452b411b363SPhilipp Reisner 
453b30ab791SAndreas Gruenbacher 	device->bitmap = b;
454b411b363SPhilipp Reisner 
455b411b363SPhilipp Reisner 	return 0;
456b411b363SPhilipp Reisner }
457b411b363SPhilipp Reisner 
458b30ab791SAndreas Gruenbacher sector_t drbd_bm_capacity(struct drbd_device *device)
459b411b363SPhilipp Reisner {
460b30ab791SAndreas Gruenbacher 	if (!expect(device->bitmap))
461841ce241SAndreas Gruenbacher 		return 0;
462b30ab791SAndreas Gruenbacher 	return device->bitmap->bm_dev_capacity;
463b411b363SPhilipp Reisner }
464b411b363SPhilipp Reisner 
465b411b363SPhilipp Reisner /* called on driver unload. TODO: call when a device is destroyed.
466b411b363SPhilipp Reisner  */
467b30ab791SAndreas Gruenbacher void drbd_bm_cleanup(struct drbd_device *device)
468b411b363SPhilipp Reisner {
469b30ab791SAndreas Gruenbacher 	if (!expect(device->bitmap))
470841ce241SAndreas Gruenbacher 		return;
471b30ab791SAndreas Gruenbacher 	bm_free_pages(device->bitmap->bm_pages, device->bitmap->bm_number_of_pages);
472b30ab791SAndreas Gruenbacher 	bm_vk_free(device->bitmap->bm_pages, (BM_P_VMALLOCED & device->bitmap->bm_flags));
473b30ab791SAndreas Gruenbacher 	kfree(device->bitmap);
474b30ab791SAndreas Gruenbacher 	device->bitmap = NULL;
475b411b363SPhilipp Reisner }
476b411b363SPhilipp Reisner 
477b411b363SPhilipp Reisner /*
478b411b363SPhilipp Reisner  * since (b->bm_bits % BITS_PER_LONG) != 0,
479b411b363SPhilipp Reisner  * this masks out the remaining bits.
480b411b363SPhilipp Reisner  * Returns the number of bits cleared.
481b411b363SPhilipp Reisner  */
4822630628bSLars Ellenberg #ifndef BITS_PER_PAGE
48395a0f10cSLars Ellenberg #define BITS_PER_PAGE		(1UL << (PAGE_SHIFT + 3))
48495a0f10cSLars Ellenberg #define BITS_PER_PAGE_MASK	(BITS_PER_PAGE - 1)
4852630628bSLars Ellenberg #else
4862630628bSLars Ellenberg # if BITS_PER_PAGE != (1UL << (PAGE_SHIFT + 3))
4872630628bSLars Ellenberg #  error "ambiguous BITS_PER_PAGE"
4882630628bSLars Ellenberg # endif
4892630628bSLars Ellenberg #endif
49095a0f10cSLars Ellenberg #define BITS_PER_LONG_MASK	(BITS_PER_LONG - 1)
491b411b363SPhilipp Reisner static int bm_clear_surplus(struct drbd_bitmap *b)
492b411b363SPhilipp Reisner {
49395a0f10cSLars Ellenberg 	unsigned long mask;
494b411b363SPhilipp Reisner 	unsigned long *p_addr, *bm;
49595a0f10cSLars Ellenberg 	int tmp;
49695a0f10cSLars Ellenberg 	int cleared = 0;
497b411b363SPhilipp Reisner 
49895a0f10cSLars Ellenberg 	/* number of bits modulo bits per page */
49995a0f10cSLars Ellenberg 	tmp = (b->bm_bits & BITS_PER_PAGE_MASK);
50095a0f10cSLars Ellenberg 	/* mask the used bits of the word containing the last bit */
50195a0f10cSLars Ellenberg 	mask = (1UL << (tmp & BITS_PER_LONG_MASK)) -1;
50295a0f10cSLars Ellenberg 	/* bitmap is always stored little endian,
50395a0f10cSLars Ellenberg 	 * on disk and in core memory alike */
50495a0f10cSLars Ellenberg 	mask = cpu_to_lel(mask);
50595a0f10cSLars Ellenberg 
5066850c442SLars Ellenberg 	p_addr = bm_map_pidx(b, b->bm_number_of_pages - 1);
50795a0f10cSLars Ellenberg 	bm = p_addr + (tmp/BITS_PER_LONG);
50895a0f10cSLars Ellenberg 	if (mask) {
50995a0f10cSLars Ellenberg 		/* If mask != 0, we are not exactly aligned, so bm now points
51095a0f10cSLars Ellenberg 		 * to the long containing the last bit.
51195a0f10cSLars Ellenberg 		 * If mask == 0, bm already points to the word immediately
51295a0f10cSLars Ellenberg 		 * after the last (long word aligned) bit. */
513b411b363SPhilipp Reisner 		cleared = hweight_long(*bm & ~mask);
514b411b363SPhilipp Reisner 		*bm &= mask;
51595a0f10cSLars Ellenberg 		bm++;
516b411b363SPhilipp Reisner 	}
517b411b363SPhilipp Reisner 
51895a0f10cSLars Ellenberg 	if (BITS_PER_LONG == 32 && ((bm - p_addr) & 1) == 1) {
51995a0f10cSLars Ellenberg 		/* on a 32bit arch, we may need to zero out
52095a0f10cSLars Ellenberg 		 * a padding long to align with a 64bit remote */
521b411b363SPhilipp Reisner 		cleared += hweight_long(*bm);
522b411b363SPhilipp Reisner 		*bm = 0;
523b411b363SPhilipp Reisner 	}
524b411b363SPhilipp Reisner 	bm_unmap(p_addr);
525b411b363SPhilipp Reisner 	return cleared;
526b411b363SPhilipp Reisner }
527b411b363SPhilipp Reisner 
528b411b363SPhilipp Reisner static void bm_set_surplus(struct drbd_bitmap *b)
529b411b363SPhilipp Reisner {
53095a0f10cSLars Ellenberg 	unsigned long mask;
531b411b363SPhilipp Reisner 	unsigned long *p_addr, *bm;
53295a0f10cSLars Ellenberg 	int tmp;
533b411b363SPhilipp Reisner 
53495a0f10cSLars Ellenberg 	/* number of bits modulo bits per page */
53595a0f10cSLars Ellenberg 	tmp = (b->bm_bits & BITS_PER_PAGE_MASK);
53695a0f10cSLars Ellenberg 	/* mask the used bits of the word containing the last bit */
53795a0f10cSLars Ellenberg 	mask = (1UL << (tmp & BITS_PER_LONG_MASK)) -1;
53895a0f10cSLars Ellenberg 	/* bitmap is always stored little endian,
53995a0f10cSLars Ellenberg 	 * on disk and in core memory alike */
54095a0f10cSLars Ellenberg 	mask = cpu_to_lel(mask);
54195a0f10cSLars Ellenberg 
5426850c442SLars Ellenberg 	p_addr = bm_map_pidx(b, b->bm_number_of_pages - 1);
54395a0f10cSLars Ellenberg 	bm = p_addr + (tmp/BITS_PER_LONG);
54495a0f10cSLars Ellenberg 	if (mask) {
54595a0f10cSLars Ellenberg 		/* If mask != 0, we are not exactly aligned, so bm now points
54695a0f10cSLars Ellenberg 		 * to the long containing the last bit.
54795a0f10cSLars Ellenberg 		 * If mask == 0, bm already points to the word immediately
54895a0f10cSLars Ellenberg 		 * after the last (long word aligned) bit. */
549b411b363SPhilipp Reisner 		*bm |= ~mask;
55095a0f10cSLars Ellenberg 		bm++;
551b411b363SPhilipp Reisner 	}
552b411b363SPhilipp Reisner 
55395a0f10cSLars Ellenberg 	if (BITS_PER_LONG == 32 && ((bm - p_addr) & 1) == 1) {
55495a0f10cSLars Ellenberg 		/* on a 32bit arch, we may need to zero out
55595a0f10cSLars Ellenberg 		 * a padding long to align with a 64bit remote */
55695a0f10cSLars Ellenberg 		*bm = ~0UL;
557b411b363SPhilipp Reisner 	}
558b411b363SPhilipp Reisner 	bm_unmap(p_addr);
559b411b363SPhilipp Reisner }
560b411b363SPhilipp Reisner 
5614b0715f0SLars Ellenberg /* you better not modify the bitmap while this is running,
5624b0715f0SLars Ellenberg  * or its results will be stale */
56395a0f10cSLars Ellenberg static unsigned long bm_count_bits(struct drbd_bitmap *b)
564b411b363SPhilipp Reisner {
5654b0715f0SLars Ellenberg 	unsigned long *p_addr;
566b411b363SPhilipp Reisner 	unsigned long bits = 0;
5674b0715f0SLars Ellenberg 	unsigned long mask = (1UL << (b->bm_bits & BITS_PER_LONG_MASK)) -1;
568*5fb3bc4dSLars Ellenberg 	int idx, last_word;
5697777a8baSLars Ellenberg 
5704b0715f0SLars Ellenberg 	/* all but last page */
5716850c442SLars Ellenberg 	for (idx = 0; idx < b->bm_number_of_pages - 1; idx++) {
572cfd8005cSCong Wang 		p_addr = __bm_map_pidx(b, idx);
573*5fb3bc4dSLars Ellenberg 		bits += bitmap_weight(p_addr, BITS_PER_PAGE);
574cfd8005cSCong Wang 		__bm_unmap(p_addr);
575b411b363SPhilipp Reisner 		cond_resched();
576b411b363SPhilipp Reisner 	}
5774b0715f0SLars Ellenberg 	/* last (or only) page */
5784b0715f0SLars Ellenberg 	last_word = ((b->bm_bits - 1) & BITS_PER_PAGE_MASK) >> LN2_BPL;
579589973a7SCong Wang 	p_addr = __bm_map_pidx(b, idx);
580*5fb3bc4dSLars Ellenberg 	bits += bitmap_weight(p_addr, last_word * BITS_PER_LONG);
5814b0715f0SLars Ellenberg 	p_addr[last_word] &= cpu_to_lel(mask);
5824b0715f0SLars Ellenberg 	bits += hweight_long(p_addr[last_word]);
5834b0715f0SLars Ellenberg 	/* 32bit arch, may have an unused padding long */
5844b0715f0SLars Ellenberg 	if (BITS_PER_LONG == 32 && (last_word & 1) == 0)
5854b0715f0SLars Ellenberg 		p_addr[last_word+1] = 0;
586589973a7SCong Wang 	__bm_unmap(p_addr);
587b411b363SPhilipp Reisner 	return bits;
588b411b363SPhilipp Reisner }
589b411b363SPhilipp Reisner 
590b411b363SPhilipp Reisner /* offset and len in long words.*/
591b411b363SPhilipp Reisner static void bm_memset(struct drbd_bitmap *b, size_t offset, int c, size_t len)
592b411b363SPhilipp Reisner {
593b411b363SPhilipp Reisner 	unsigned long *p_addr, *bm;
59419f843aaSLars Ellenberg 	unsigned int idx;
595b411b363SPhilipp Reisner 	size_t do_now, end;
596b411b363SPhilipp Reisner 
597b411b363SPhilipp Reisner 	end = offset + len;
598b411b363SPhilipp Reisner 
599b411b363SPhilipp Reisner 	if (end > b->bm_words) {
600f88c5d90SLars Ellenberg 		pr_alert("bm_memset end > bm_words\n");
601b411b363SPhilipp Reisner 		return;
602b411b363SPhilipp Reisner 	}
603b411b363SPhilipp Reisner 
604b411b363SPhilipp Reisner 	while (offset < end) {
605b411b363SPhilipp Reisner 		do_now = min_t(size_t, ALIGN(offset + 1, LWPP), end) - offset;
60619f843aaSLars Ellenberg 		idx = bm_word_to_page_idx(b, offset);
60719f843aaSLars Ellenberg 		p_addr = bm_map_pidx(b, idx);
608b411b363SPhilipp Reisner 		bm = p_addr + MLPP(offset);
609b411b363SPhilipp Reisner 		if (bm+do_now > p_addr + LWPP) {
610f88c5d90SLars Ellenberg 			pr_alert("BUG BUG BUG! p_addr:%p bm:%p do_now:%d\n",
611b411b363SPhilipp Reisner 			       p_addr, bm, (int)do_now);
61284e7c0f7SLars Ellenberg 		} else
613b411b363SPhilipp Reisner 			memset(bm, c, do_now * sizeof(long));
614b411b363SPhilipp Reisner 		bm_unmap(p_addr);
61519f843aaSLars Ellenberg 		bm_set_page_need_writeout(b->bm_pages[idx]);
616b411b363SPhilipp Reisner 		offset += do_now;
617b411b363SPhilipp Reisner 	}
618b411b363SPhilipp Reisner }
619b411b363SPhilipp Reisner 
620ae8bf312SLars Ellenberg /* For the layout, see comment above drbd_md_set_sector_offsets(). */
621ae8bf312SLars Ellenberg static u64 drbd_md_on_disk_bits(struct drbd_backing_dev *ldev)
622ae8bf312SLars Ellenberg {
623ae8bf312SLars Ellenberg 	u64 bitmap_sectors;
624ae8bf312SLars Ellenberg 	if (ldev->md.al_offset == 8)
625ae8bf312SLars Ellenberg 		bitmap_sectors = ldev->md.md_size_sect - ldev->md.bm_offset;
626ae8bf312SLars Ellenberg 	else
627ae8bf312SLars Ellenberg 		bitmap_sectors = ldev->md.al_offset - ldev->md.bm_offset;
628ae8bf312SLars Ellenberg 	return bitmap_sectors << (9 + 3);
629ae8bf312SLars Ellenberg }
630ae8bf312SLars Ellenberg 
631b411b363SPhilipp Reisner /*
632b411b363SPhilipp Reisner  * make sure the bitmap has enough room for the attached storage,
633b411b363SPhilipp Reisner  * if necessary, resize.
634b411b363SPhilipp Reisner  * called whenever we may have changed the device size.
635b411b363SPhilipp Reisner  * returns -ENOMEM if we could not allocate enough memory, 0 on success.
636b411b363SPhilipp Reisner  * In case this is actually a resize, we copy the old bitmap into the new one.
637b411b363SPhilipp Reisner  * Otherwise, the bitmap is initialized to all bits set.
638b411b363SPhilipp Reisner  */
639b30ab791SAndreas Gruenbacher int drbd_bm_resize(struct drbd_device *device, sector_t capacity, int set_new_bits)
640b411b363SPhilipp Reisner {
641b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
6426850c442SLars Ellenberg 	unsigned long bits, words, owords, obits;
643b411b363SPhilipp Reisner 	unsigned long want, have, onpages; /* number of pages */
644b411b363SPhilipp Reisner 	struct page **npages, **opages = NULL;
645b411b363SPhilipp Reisner 	int err = 0, growing;
646b411b363SPhilipp Reisner 	int opages_vmalloced;
647b411b363SPhilipp Reisner 
648841ce241SAndreas Gruenbacher 	if (!expect(b))
649841ce241SAndreas Gruenbacher 		return -ENOMEM;
650b411b363SPhilipp Reisner 
651b30ab791SAndreas Gruenbacher 	drbd_bm_lock(device, "resize", BM_LOCKED_MASK);
652b411b363SPhilipp Reisner 
653d0180171SAndreas Gruenbacher 	drbd_info(device, "drbd_bm_resize called with capacity == %llu\n",
654b411b363SPhilipp Reisner 			(unsigned long long)capacity);
655b411b363SPhilipp Reisner 
656b411b363SPhilipp Reisner 	if (capacity == b->bm_dev_capacity)
657b411b363SPhilipp Reisner 		goto out;
658b411b363SPhilipp Reisner 
65920ceb2b2SLars Ellenberg 	opages_vmalloced = (BM_P_VMALLOCED & b->bm_flags);
660b411b363SPhilipp Reisner 
661b411b363SPhilipp Reisner 	if (capacity == 0) {
662b411b363SPhilipp Reisner 		spin_lock_irq(&b->bm_lock);
663b411b363SPhilipp Reisner 		opages = b->bm_pages;
664b411b363SPhilipp Reisner 		onpages = b->bm_number_of_pages;
665b411b363SPhilipp Reisner 		owords = b->bm_words;
666b411b363SPhilipp Reisner 		b->bm_pages = NULL;
667b411b363SPhilipp Reisner 		b->bm_number_of_pages =
668b411b363SPhilipp Reisner 		b->bm_set   =
669b411b363SPhilipp Reisner 		b->bm_bits  =
670b411b363SPhilipp Reisner 		b->bm_words =
671b411b363SPhilipp Reisner 		b->bm_dev_capacity = 0;
672b411b363SPhilipp Reisner 		spin_unlock_irq(&b->bm_lock);
673b411b363SPhilipp Reisner 		bm_free_pages(opages, onpages);
674b411b363SPhilipp Reisner 		bm_vk_free(opages, opages_vmalloced);
675b411b363SPhilipp Reisner 		goto out;
676b411b363SPhilipp Reisner 	}
677b411b363SPhilipp Reisner 	bits  = BM_SECT_TO_BIT(ALIGN(capacity, BM_SECT_PER_BIT));
678b411b363SPhilipp Reisner 
679b411b363SPhilipp Reisner 	/* if we would use
680b411b363SPhilipp Reisner 	   words = ALIGN(bits,BITS_PER_LONG) >> LN2_BPL;
681b411b363SPhilipp Reisner 	   a 32bit host could present the wrong number of words
682b411b363SPhilipp Reisner 	   to a 64bit host.
683b411b363SPhilipp Reisner 	*/
684b411b363SPhilipp Reisner 	words = ALIGN(bits, 64) >> LN2_BPL;
685b411b363SPhilipp Reisner 
686b30ab791SAndreas Gruenbacher 	if (get_ldev(device)) {
687b30ab791SAndreas Gruenbacher 		u64 bits_on_disk = drbd_md_on_disk_bits(device->ldev);
688b30ab791SAndreas Gruenbacher 		put_ldev(device);
6894b0715f0SLars Ellenberg 		if (bits > bits_on_disk) {
690d0180171SAndreas Gruenbacher 			drbd_info(device, "bits = %lu\n", bits);
691d0180171SAndreas Gruenbacher 			drbd_info(device, "bits_on_disk = %llu\n", bits_on_disk);
6924b0715f0SLars Ellenberg 			err = -ENOSPC;
6934b0715f0SLars Ellenberg 			goto out;
6944b0715f0SLars Ellenberg 		}
695b411b363SPhilipp Reisner 	}
696b411b363SPhilipp Reisner 
6976850c442SLars Ellenberg 	want = ALIGN(words*sizeof(long), PAGE_SIZE) >> PAGE_SHIFT;
698b411b363SPhilipp Reisner 	have = b->bm_number_of_pages;
699b411b363SPhilipp Reisner 	if (want == have) {
7000b0ba1efSAndreas Gruenbacher 		D_ASSERT(device, b->bm_pages != NULL);
701b411b363SPhilipp Reisner 		npages = b->bm_pages;
702b411b363SPhilipp Reisner 	} else {
703b30ab791SAndreas Gruenbacher 		if (drbd_insert_fault(device, DRBD_FAULT_BM_ALLOC))
704b411b363SPhilipp Reisner 			npages = NULL;
705b411b363SPhilipp Reisner 		else
706b411b363SPhilipp Reisner 			npages = bm_realloc_pages(b, want);
707b411b363SPhilipp Reisner 	}
708b411b363SPhilipp Reisner 
709b411b363SPhilipp Reisner 	if (!npages) {
710b411b363SPhilipp Reisner 		err = -ENOMEM;
711b411b363SPhilipp Reisner 		goto out;
712b411b363SPhilipp Reisner 	}
713b411b363SPhilipp Reisner 
714b411b363SPhilipp Reisner 	spin_lock_irq(&b->bm_lock);
715b411b363SPhilipp Reisner 	opages = b->bm_pages;
716b411b363SPhilipp Reisner 	owords = b->bm_words;
717b411b363SPhilipp Reisner 	obits  = b->bm_bits;
718b411b363SPhilipp Reisner 
719b411b363SPhilipp Reisner 	growing = bits > obits;
7205223671bSPhilipp Reisner 	if (opages && growing && set_new_bits)
721b411b363SPhilipp Reisner 		bm_set_surplus(b);
722b411b363SPhilipp Reisner 
723b411b363SPhilipp Reisner 	b->bm_pages = npages;
724b411b363SPhilipp Reisner 	b->bm_number_of_pages = want;
725b411b363SPhilipp Reisner 	b->bm_bits  = bits;
726b411b363SPhilipp Reisner 	b->bm_words = words;
727b411b363SPhilipp Reisner 	b->bm_dev_capacity = capacity;
728b411b363SPhilipp Reisner 
729b411b363SPhilipp Reisner 	if (growing) {
73002d9a94bSPhilipp Reisner 		if (set_new_bits) {
731b411b363SPhilipp Reisner 			bm_memset(b, owords, 0xff, words-owords);
732b411b363SPhilipp Reisner 			b->bm_set += bits - obits;
73302d9a94bSPhilipp Reisner 		} else
73402d9a94bSPhilipp Reisner 			bm_memset(b, owords, 0x00, words-owords);
73502d9a94bSPhilipp Reisner 
736b411b363SPhilipp Reisner 	}
737b411b363SPhilipp Reisner 
738b411b363SPhilipp Reisner 	if (want < have) {
739b411b363SPhilipp Reisner 		/* implicit: (opages != NULL) && (opages != npages) */
740b411b363SPhilipp Reisner 		bm_free_pages(opages + want, have - want);
741b411b363SPhilipp Reisner 	}
742b411b363SPhilipp Reisner 
743b411b363SPhilipp Reisner 	(void)bm_clear_surplus(b);
744b411b363SPhilipp Reisner 
745b411b363SPhilipp Reisner 	spin_unlock_irq(&b->bm_lock);
746b411b363SPhilipp Reisner 	if (opages != npages)
747b411b363SPhilipp Reisner 		bm_vk_free(opages, opages_vmalloced);
748b411b363SPhilipp Reisner 	if (!growing)
749b411b363SPhilipp Reisner 		b->bm_set = bm_count_bits(b);
750d0180171SAndreas Gruenbacher 	drbd_info(device, "resync bitmap: bits=%lu words=%lu pages=%lu\n", bits, words, want);
751b411b363SPhilipp Reisner 
752b411b363SPhilipp Reisner  out:
753b30ab791SAndreas Gruenbacher 	drbd_bm_unlock(device);
754b411b363SPhilipp Reisner 	return err;
755b411b363SPhilipp Reisner }
756b411b363SPhilipp Reisner 
757b411b363SPhilipp Reisner /* inherently racy:
758b411b363SPhilipp Reisner  * if not protected by other means, return value may be out of date when
759b411b363SPhilipp Reisner  * leaving this function...
760b411b363SPhilipp Reisner  * we still need to lock it, since it is important that this returns
761b411b363SPhilipp Reisner  * bm_set == 0 precisely.
762b411b363SPhilipp Reisner  *
763b411b363SPhilipp Reisner  * maybe bm_set should be atomic_t ?
764b411b363SPhilipp Reisner  */
765b30ab791SAndreas Gruenbacher unsigned long _drbd_bm_total_weight(struct drbd_device *device)
766b411b363SPhilipp Reisner {
767b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
768b411b363SPhilipp Reisner 	unsigned long s;
769b411b363SPhilipp Reisner 	unsigned long flags;
770b411b363SPhilipp Reisner 
771841ce241SAndreas Gruenbacher 	if (!expect(b))
772841ce241SAndreas Gruenbacher 		return 0;
773841ce241SAndreas Gruenbacher 	if (!expect(b->bm_pages))
774841ce241SAndreas Gruenbacher 		return 0;
775b411b363SPhilipp Reisner 
776b411b363SPhilipp Reisner 	spin_lock_irqsave(&b->bm_lock, flags);
777b411b363SPhilipp Reisner 	s = b->bm_set;
778b411b363SPhilipp Reisner 	spin_unlock_irqrestore(&b->bm_lock, flags);
779b411b363SPhilipp Reisner 
780b411b363SPhilipp Reisner 	return s;
781b411b363SPhilipp Reisner }
782b411b363SPhilipp Reisner 
783b30ab791SAndreas Gruenbacher unsigned long drbd_bm_total_weight(struct drbd_device *device)
784b411b363SPhilipp Reisner {
785b411b363SPhilipp Reisner 	unsigned long s;
786b411b363SPhilipp Reisner 	/* if I don't have a disk, I don't know about out-of-sync status */
787b30ab791SAndreas Gruenbacher 	if (!get_ldev_if_state(device, D_NEGOTIATING))
788b411b363SPhilipp Reisner 		return 0;
789b30ab791SAndreas Gruenbacher 	s = _drbd_bm_total_weight(device);
790b30ab791SAndreas Gruenbacher 	put_ldev(device);
791b411b363SPhilipp Reisner 	return s;
792b411b363SPhilipp Reisner }
793b411b363SPhilipp Reisner 
794b30ab791SAndreas Gruenbacher size_t drbd_bm_words(struct drbd_device *device)
795b411b363SPhilipp Reisner {
796b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
797841ce241SAndreas Gruenbacher 	if (!expect(b))
798841ce241SAndreas Gruenbacher 		return 0;
799841ce241SAndreas Gruenbacher 	if (!expect(b->bm_pages))
800841ce241SAndreas Gruenbacher 		return 0;
801b411b363SPhilipp Reisner 
802b411b363SPhilipp Reisner 	return b->bm_words;
803b411b363SPhilipp Reisner }
804b411b363SPhilipp Reisner 
805b30ab791SAndreas Gruenbacher unsigned long drbd_bm_bits(struct drbd_device *device)
806b411b363SPhilipp Reisner {
807b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
808841ce241SAndreas Gruenbacher 	if (!expect(b))
809841ce241SAndreas Gruenbacher 		return 0;
810b411b363SPhilipp Reisner 
811b411b363SPhilipp Reisner 	return b->bm_bits;
812b411b363SPhilipp Reisner }
813b411b363SPhilipp Reisner 
814b411b363SPhilipp Reisner /* merge number words from buffer into the bitmap starting at offset.
815b411b363SPhilipp Reisner  * buffer[i] is expected to be little endian unsigned long.
816b411b363SPhilipp Reisner  * bitmap must be locked by drbd_bm_lock.
817b411b363SPhilipp Reisner  * currently only used from receive_bitmap.
818b411b363SPhilipp Reisner  */
819b30ab791SAndreas Gruenbacher void drbd_bm_merge_lel(struct drbd_device *device, size_t offset, size_t number,
820b411b363SPhilipp Reisner 			unsigned long *buffer)
821b411b363SPhilipp Reisner {
822b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
823b411b363SPhilipp Reisner 	unsigned long *p_addr, *bm;
824b411b363SPhilipp Reisner 	unsigned long word, bits;
82519f843aaSLars Ellenberg 	unsigned int idx;
826b411b363SPhilipp Reisner 	size_t end, do_now;
827b411b363SPhilipp Reisner 
828b411b363SPhilipp Reisner 	end = offset + number;
829b411b363SPhilipp Reisner 
830841ce241SAndreas Gruenbacher 	if (!expect(b))
831841ce241SAndreas Gruenbacher 		return;
832841ce241SAndreas Gruenbacher 	if (!expect(b->bm_pages))
833841ce241SAndreas Gruenbacher 		return;
834b411b363SPhilipp Reisner 	if (number == 0)
835b411b363SPhilipp Reisner 		return;
836b411b363SPhilipp Reisner 	WARN_ON(offset >= b->bm_words);
837b411b363SPhilipp Reisner 	WARN_ON(end    >  b->bm_words);
838b411b363SPhilipp Reisner 
839b411b363SPhilipp Reisner 	spin_lock_irq(&b->bm_lock);
840b411b363SPhilipp Reisner 	while (offset < end) {
841b411b363SPhilipp Reisner 		do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset;
84219f843aaSLars Ellenberg 		idx = bm_word_to_page_idx(b, offset);
84319f843aaSLars Ellenberg 		p_addr = bm_map_pidx(b, idx);
844b411b363SPhilipp Reisner 		bm = p_addr + MLPP(offset);
845b411b363SPhilipp Reisner 		offset += do_now;
846b411b363SPhilipp Reisner 		while (do_now--) {
847b411b363SPhilipp Reisner 			bits = hweight_long(*bm);
84895a0f10cSLars Ellenberg 			word = *bm | *buffer++;
849b411b363SPhilipp Reisner 			*bm++ = word;
850b411b363SPhilipp Reisner 			b->bm_set += hweight_long(word) - bits;
851b411b363SPhilipp Reisner 		}
852b411b363SPhilipp Reisner 		bm_unmap(p_addr);
85319f843aaSLars Ellenberg 		bm_set_page_need_writeout(b->bm_pages[idx]);
854b411b363SPhilipp Reisner 	}
855b411b363SPhilipp Reisner 	/* with 32bit <-> 64bit cross-platform connect
856b411b363SPhilipp Reisner 	 * this is only correct for current usage,
857b411b363SPhilipp Reisner 	 * where we _know_ that we are 64 bit aligned,
858b411b363SPhilipp Reisner 	 * and know that this function is used in this way, too...
859b411b363SPhilipp Reisner 	 */
860b411b363SPhilipp Reisner 	if (end == b->bm_words)
861b411b363SPhilipp Reisner 		b->bm_set -= bm_clear_surplus(b);
862b411b363SPhilipp Reisner 	spin_unlock_irq(&b->bm_lock);
863b411b363SPhilipp Reisner }
864b411b363SPhilipp Reisner 
865b411b363SPhilipp Reisner /* copy number words from the bitmap starting at offset into the buffer.
866b411b363SPhilipp Reisner  * buffer[i] will be little endian unsigned long.
867b411b363SPhilipp Reisner  */
868b30ab791SAndreas Gruenbacher void drbd_bm_get_lel(struct drbd_device *device, size_t offset, size_t number,
869b411b363SPhilipp Reisner 		     unsigned long *buffer)
870b411b363SPhilipp Reisner {
871b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
872b411b363SPhilipp Reisner 	unsigned long *p_addr, *bm;
873b411b363SPhilipp Reisner 	size_t end, do_now;
874b411b363SPhilipp Reisner 
875b411b363SPhilipp Reisner 	end = offset + number;
876b411b363SPhilipp Reisner 
877841ce241SAndreas Gruenbacher 	if (!expect(b))
878841ce241SAndreas Gruenbacher 		return;
879841ce241SAndreas Gruenbacher 	if (!expect(b->bm_pages))
880841ce241SAndreas Gruenbacher 		return;
881b411b363SPhilipp Reisner 
882b411b363SPhilipp Reisner 	spin_lock_irq(&b->bm_lock);
883b411b363SPhilipp Reisner 	if ((offset >= b->bm_words) ||
884b411b363SPhilipp Reisner 	    (end    >  b->bm_words) ||
885b411b363SPhilipp Reisner 	    (number <= 0))
886d0180171SAndreas Gruenbacher 		drbd_err(device, "offset=%lu number=%lu bm_words=%lu\n",
887b411b363SPhilipp Reisner 			(unsigned long)	offset,
888b411b363SPhilipp Reisner 			(unsigned long)	number,
889b411b363SPhilipp Reisner 			(unsigned long) b->bm_words);
890b411b363SPhilipp Reisner 	else {
891b411b363SPhilipp Reisner 		while (offset < end) {
892b411b363SPhilipp Reisner 			do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset;
89319f843aaSLars Ellenberg 			p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, offset));
894b411b363SPhilipp Reisner 			bm = p_addr + MLPP(offset);
895b411b363SPhilipp Reisner 			offset += do_now;
896b411b363SPhilipp Reisner 			while (do_now--)
89795a0f10cSLars Ellenberg 				*buffer++ = *bm++;
898b411b363SPhilipp Reisner 			bm_unmap(p_addr);
899b411b363SPhilipp Reisner 		}
900b411b363SPhilipp Reisner 	}
901b411b363SPhilipp Reisner 	spin_unlock_irq(&b->bm_lock);
902b411b363SPhilipp Reisner }
903b411b363SPhilipp Reisner 
904b411b363SPhilipp Reisner /* set all bits in the bitmap */
905b30ab791SAndreas Gruenbacher void drbd_bm_set_all(struct drbd_device *device)
906b411b363SPhilipp Reisner {
907b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
908841ce241SAndreas Gruenbacher 	if (!expect(b))
909841ce241SAndreas Gruenbacher 		return;
910841ce241SAndreas Gruenbacher 	if (!expect(b->bm_pages))
911841ce241SAndreas Gruenbacher 		return;
912b411b363SPhilipp Reisner 
913b411b363SPhilipp Reisner 	spin_lock_irq(&b->bm_lock);
914b411b363SPhilipp Reisner 	bm_memset(b, 0, 0xff, b->bm_words);
915b411b363SPhilipp Reisner 	(void)bm_clear_surplus(b);
916b411b363SPhilipp Reisner 	b->bm_set = b->bm_bits;
917b411b363SPhilipp Reisner 	spin_unlock_irq(&b->bm_lock);
918b411b363SPhilipp Reisner }
919b411b363SPhilipp Reisner 
920b411b363SPhilipp Reisner /* clear all bits in the bitmap */
921b30ab791SAndreas Gruenbacher void drbd_bm_clear_all(struct drbd_device *device)
922b411b363SPhilipp Reisner {
923b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
924841ce241SAndreas Gruenbacher 	if (!expect(b))
925841ce241SAndreas Gruenbacher 		return;
926841ce241SAndreas Gruenbacher 	if (!expect(b->bm_pages))
927841ce241SAndreas Gruenbacher 		return;
928b411b363SPhilipp Reisner 
929b411b363SPhilipp Reisner 	spin_lock_irq(&b->bm_lock);
930b411b363SPhilipp Reisner 	bm_memset(b, 0, 0, b->bm_words);
931b411b363SPhilipp Reisner 	b->bm_set = 0;
932b411b363SPhilipp Reisner 	spin_unlock_irq(&b->bm_lock);
933b411b363SPhilipp Reisner }
934b411b363SPhilipp Reisner 
9354ce49266SLars Ellenberg static void drbd_bm_aio_ctx_destroy(struct kref *kref)
936d1f3779bSPhilipp Reisner {
9374ce49266SLars Ellenberg 	struct drbd_bm_aio_ctx *ctx = container_of(kref, struct drbd_bm_aio_ctx, kref);
9384ce49266SLars Ellenberg 	unsigned long flags;
939d1f3779bSPhilipp Reisner 
9404ce49266SLars Ellenberg 	spin_lock_irqsave(&ctx->device->resource->req_lock, flags);
9414ce49266SLars Ellenberg 	list_del(&ctx->list);
9424ce49266SLars Ellenberg 	spin_unlock_irqrestore(&ctx->device->resource->req_lock, flags);
943b30ab791SAndreas Gruenbacher 	put_ldev(ctx->device);
944d1f3779bSPhilipp Reisner 	kfree(ctx);
945d1f3779bSPhilipp Reisner }
946d1f3779bSPhilipp Reisner 
94719f843aaSLars Ellenberg /* bv_page may be a copy, or may be the original */
9484246a0b6SChristoph Hellwig static void drbd_bm_endio(struct bio *bio)
949b411b363SPhilipp Reisner {
9504ce49266SLars Ellenberg 	struct drbd_bm_aio_ctx *ctx = bio->bi_private;
951b30ab791SAndreas Gruenbacher 	struct drbd_device *device = ctx->device;
952b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
95319f843aaSLars Ellenberg 	unsigned int idx = bm_page_to_idx(bio->bi_io_vec[0].bv_page);
954b411b363SPhilipp Reisner 
9557648cdfeSLars Ellenberg 	if ((ctx->flags & BM_AIO_COPY_PAGES) == 0 &&
9567648cdfeSLars Ellenberg 	    !bm_test_page_unchanged(b->bm_pages[idx]))
957d0180171SAndreas Gruenbacher 		drbd_warn(device, "bitmap page idx %u changed during IO!\n", idx);
95819f843aaSLars Ellenberg 
9594246a0b6SChristoph Hellwig 	if (bio->bi_error) {
96019f843aaSLars Ellenberg 		/* ctx error will hold the completed-last non-zero error code,
96119f843aaSLars Ellenberg 		 * in case error codes differ. */
9624246a0b6SChristoph Hellwig 		ctx->error = bio->bi_error;
96319f843aaSLars Ellenberg 		bm_set_page_io_err(b->bm_pages[idx]);
96419f843aaSLars Ellenberg 		/* Not identical to on disk version of it.
96519f843aaSLars Ellenberg 		 * Is BM_PAGE_IO_ERROR enough? */
96619f843aaSLars Ellenberg 		if (__ratelimit(&drbd_ratelimit_state))
967d0180171SAndreas Gruenbacher 			drbd_err(device, "IO ERROR %d on bitmap page idx %u\n",
9684246a0b6SChristoph Hellwig 					bio->bi_error, idx);
96919f843aaSLars Ellenberg 	} else {
97019f843aaSLars Ellenberg 		bm_clear_page_io_err(b->bm_pages[idx]);
971d0180171SAndreas Gruenbacher 		dynamic_drbd_dbg(device, "bitmap page idx %u completed\n", idx);
972b411b363SPhilipp Reisner 	}
97319f843aaSLars Ellenberg 
974b30ab791SAndreas Gruenbacher 	bm_page_unlock_io(device, idx);
97519f843aaSLars Ellenberg 
97619f843aaSLars Ellenberg 	if (ctx->flags & BM_AIO_COPY_PAGES)
9774d95a10fSLars Ellenberg 		mempool_free(bio->bi_io_vec[0].bv_page, drbd_md_io_page_pool);
978b411b363SPhilipp Reisner 
979b411b363SPhilipp Reisner 	bio_put(bio);
98019f843aaSLars Ellenberg 
981d1f3779bSPhilipp Reisner 	if (atomic_dec_and_test(&ctx->in_flight)) {
9829e58c4daSPhilipp Reisner 		ctx->done = 1;
983b30ab791SAndreas Gruenbacher 		wake_up(&device->misc_wait);
9844ce49266SLars Ellenberg 		kref_put(&ctx->kref, &drbd_bm_aio_ctx_destroy);
985d1f3779bSPhilipp Reisner 	}
986b411b363SPhilipp Reisner }
987b411b363SPhilipp Reisner 
9884ce49266SLars Ellenberg static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_hold(local)
989b411b363SPhilipp Reisner {
9909476f39dSLars Ellenberg 	struct bio *bio = bio_alloc_drbd(GFP_NOIO);
991b30ab791SAndreas Gruenbacher 	struct drbd_device *device = ctx->device;
992b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
99319f843aaSLars Ellenberg 	struct page *page;
994b411b363SPhilipp Reisner 	unsigned int len;
9954ce49266SLars Ellenberg 	unsigned int rw = (ctx->flags & BM_AIO_READ) ? READ : WRITE;
99619f843aaSLars Ellenberg 
997b411b363SPhilipp Reisner 	sector_t on_disk_sector =
998b30ab791SAndreas Gruenbacher 		device->ldev->md.md_offset + device->ldev->md.bm_offset;
999b411b363SPhilipp Reisner 	on_disk_sector += ((sector_t)page_nr) << (PAGE_SHIFT-9);
1000b411b363SPhilipp Reisner 
1001b411b363SPhilipp Reisner 	/* this might happen with very small
100219f843aaSLars Ellenberg 	 * flexible external meta data device,
100319f843aaSLars Ellenberg 	 * or with PAGE_SIZE > 4k */
1004b411b363SPhilipp Reisner 	len = min_t(unsigned int, PAGE_SIZE,
1005b30ab791SAndreas Gruenbacher 		(drbd_md_last_sector(device->ldev) - on_disk_sector + 1)<<9);
1006b411b363SPhilipp Reisner 
100719f843aaSLars Ellenberg 	/* serialize IO on this page */
1008b30ab791SAndreas Gruenbacher 	bm_page_lock_io(device, page_nr);
100919f843aaSLars Ellenberg 	/* before memcpy and submit,
101019f843aaSLars Ellenberg 	 * so it can be redirtied any time */
101119f843aaSLars Ellenberg 	bm_set_page_unchanged(b->bm_pages[page_nr]);
101219f843aaSLars Ellenberg 
101319f843aaSLars Ellenberg 	if (ctx->flags & BM_AIO_COPY_PAGES) {
101471baba4bSMel Gorman 		page = mempool_alloc(drbd_md_io_page_pool, __GFP_HIGHMEM|__GFP_RECLAIM);
1015f1d6a328SAkinobu Mita 		copy_highpage(page, b->bm_pages[page_nr]);
101619f843aaSLars Ellenberg 		bm_store_page_idx(page, page_nr);
101719f843aaSLars Ellenberg 	} else
101819f843aaSLars Ellenberg 		page = b->bm_pages[page_nr];
1019b30ab791SAndreas Gruenbacher 	bio->bi_bdev = device->ldev->md_bdev;
10204f024f37SKent Overstreet 	bio->bi_iter.bi_sector = on_disk_sector;
10214d95a10fSLars Ellenberg 	/* bio_add_page of a single page to an empty bio will always succeed,
10224d95a10fSLars Ellenberg 	 * according to api.  Do we want to assert that? */
102319f843aaSLars Ellenberg 	bio_add_page(bio, page, len, 0);
102419f843aaSLars Ellenberg 	bio->bi_private = ctx;
1025ed15b795SAndreas Gruenbacher 	bio->bi_end_io = drbd_bm_endio;
1026b411b363SPhilipp Reisner 
1027b30ab791SAndreas Gruenbacher 	if (drbd_insert_fault(device, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) {
1028b411b363SPhilipp Reisner 		bio->bi_rw |= rw;
10294246a0b6SChristoph Hellwig 		bio_io_error(bio);
1030b411b363SPhilipp Reisner 	} else {
1031b411b363SPhilipp Reisner 		submit_bio(rw, bio);
10325a8b4242SLars Ellenberg 		/* this should not count as user activity and cause the
10335a8b4242SLars Ellenberg 		 * resync to throttle -- see drbd_rs_should_slow_down(). */
1034b30ab791SAndreas Gruenbacher 		atomic_add(len >> 9, &device->rs_sect_ev);
1035b411b363SPhilipp Reisner 	}
1036b411b363SPhilipp Reisner }
1037b411b363SPhilipp Reisner 
1038b411b363SPhilipp Reisner /*
1039b411b363SPhilipp Reisner  * bm_rw: read/write the whole bitmap from/to its on disk location.
1040b411b363SPhilipp Reisner  */
10414ce49266SLars Ellenberg static int bm_rw(struct drbd_device *device, const unsigned int flags, unsigned lazy_writeout_upper_idx) __must_hold(local)
1042b411b363SPhilipp Reisner {
10434ce49266SLars Ellenberg 	struct drbd_bm_aio_ctx *ctx;
1044b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
10456850c442SLars Ellenberg 	int num_pages, i, count = 0;
1046b411b363SPhilipp Reisner 	unsigned long now;
1047b411b363SPhilipp Reisner 	char ppb[10];
1048b411b363SPhilipp Reisner 	int err = 0;
1049b411b363SPhilipp Reisner 
105019f843aaSLars Ellenberg 	/*
105119f843aaSLars Ellenberg 	 * We are protected against bitmap disappearing/resizing by holding an
105219f843aaSLars Ellenberg 	 * ldev reference (caller must have called get_ldev()).
105319f843aaSLars Ellenberg 	 * For read/write, we are protected against changes to the bitmap by
105419f843aaSLars Ellenberg 	 * the bitmap lock (see drbd_bitmap_io).
105519f843aaSLars Ellenberg 	 * For lazy writeout, we don't care for ongoing changes to the bitmap,
105619f843aaSLars Ellenberg 	 * as we submit copies of pages anyways.
105719f843aaSLars Ellenberg 	 */
1058d1f3779bSPhilipp Reisner 
10594ce49266SLars Ellenberg 	ctx = kmalloc(sizeof(struct drbd_bm_aio_ctx), GFP_NOIO);
1060d1f3779bSPhilipp Reisner 	if (!ctx)
1061d1f3779bSPhilipp Reisner 		return -ENOMEM;
1062d1f3779bSPhilipp Reisner 
10634ce49266SLars Ellenberg 	*ctx = (struct drbd_bm_aio_ctx) {
1064b30ab791SAndreas Gruenbacher 		.device = device,
10654ce49266SLars Ellenberg 		.start_jif = jiffies,
1066d1f3779bSPhilipp Reisner 		.in_flight = ATOMIC_INIT(1),
10679e58c4daSPhilipp Reisner 		.done = 0,
10680e8488adSLars Ellenberg 		.flags = flags,
1069d1f3779bSPhilipp Reisner 		.error = 0,
1070d1f3779bSPhilipp Reisner 		.kref = { ATOMIC_INIT(2) },
1071d1f3779bSPhilipp Reisner 	};
1072d1f3779bSPhilipp Reisner 
10734ce49266SLars Ellenberg 	if (!get_ldev_if_state(device, D_ATTACHING)) {  /* put is in drbd_bm_aio_ctx_destroy() */
1074d0180171SAndreas Gruenbacher 		drbd_err(device, "ASSERT FAILED: get_ldev_if_state() == 1 in bm_rw()\n");
10759e58c4daSPhilipp Reisner 		kfree(ctx);
10769e58c4daSPhilipp Reisner 		return -ENODEV;
10779e58c4daSPhilipp Reisner 	}
10788fe39aacSPhilipp Reisner 	/* Here D_ATTACHING is sufficient since drbd_bm_read() is called only from
10798fe39aacSPhilipp Reisner 	   drbd_adm_attach(), after device->ldev was assigned. */
10809e58c4daSPhilipp Reisner 
10814ce49266SLars Ellenberg 	if (0 == (ctx->flags & ~BM_AIO_READ))
108220ceb2b2SLars Ellenberg 		WARN_ON(!(BM_LOCKED_MASK & b->bm_flags));
1083b411b363SPhilipp Reisner 
10844ce49266SLars Ellenberg 	spin_lock_irq(&device->resource->req_lock);
10854ce49266SLars Ellenberg 	list_add_tail(&ctx->list, &device->pending_bitmap_io);
10864ce49266SLars Ellenberg 	spin_unlock_irq(&device->resource->req_lock);
10874ce49266SLars Ellenberg 
10886850c442SLars Ellenberg 	num_pages = b->bm_number_of_pages;
1089b411b363SPhilipp Reisner 
1090b411b363SPhilipp Reisner 	now = jiffies;
1091b411b363SPhilipp Reisner 
1092b411b363SPhilipp Reisner 	/* let the layers below us try to merge these bios... */
10936850c442SLars Ellenberg 	for (i = 0; i < num_pages; i++) {
109419f843aaSLars Ellenberg 		/* ignore completely unchanged pages */
109519f843aaSLars Ellenberg 		if (lazy_writeout_upper_idx && i == lazy_writeout_upper_idx)
109619f843aaSLars Ellenberg 			break;
10974ce49266SLars Ellenberg 		if (!(flags & BM_AIO_READ)) {
109845dfffebSLars Ellenberg 			if ((flags & BM_AIO_WRITE_HINTED) &&
109945dfffebSLars Ellenberg 			    !test_and_clear_bit(BM_PAGE_HINT_WRITEOUT,
110045dfffebSLars Ellenberg 				    &page_private(b->bm_pages[i])))
110145dfffebSLars Ellenberg 				continue;
1102fef45d29SPhilipp Reisner 
11034ce49266SLars Ellenberg 			if (!(flags & BM_AIO_WRITE_ALL_PAGES) &&
1104d1aa4d04SPhilipp Reisner 			    bm_test_page_unchanged(b->bm_pages[i])) {
1105d0180171SAndreas Gruenbacher 				dynamic_drbd_dbg(device, "skipped bm write for idx %u\n", i);
110619f843aaSLars Ellenberg 				continue;
110719f843aaSLars Ellenberg 			}
110819f843aaSLars Ellenberg 			/* during lazy writeout,
110919f843aaSLars Ellenberg 			 * ignore those pages not marked for lazy writeout. */
111019f843aaSLars Ellenberg 			if (lazy_writeout_upper_idx &&
111119f843aaSLars Ellenberg 			    !bm_test_page_lazy_writeout(b->bm_pages[i])) {
1112d0180171SAndreas Gruenbacher 				dynamic_drbd_dbg(device, "skipped bm lazy write for idx %u\n", i);
111319f843aaSLars Ellenberg 				continue;
111419f843aaSLars Ellenberg 			}
111519f843aaSLars Ellenberg 		}
1116d1f3779bSPhilipp Reisner 		atomic_inc(&ctx->in_flight);
11174ce49266SLars Ellenberg 		bm_page_io_async(ctx, i);
111819f843aaSLars Ellenberg 		++count;
111919f843aaSLars Ellenberg 		cond_resched();
112019f843aaSLars Ellenberg 	}
1121b411b363SPhilipp Reisner 
1122725a97e4SLars Ellenberg 	/*
1123ed15b795SAndreas Gruenbacher 	 * We initialize ctx->in_flight to one to make sure drbd_bm_endio
11249e58c4daSPhilipp Reisner 	 * will not set ctx->done early, and decrement / test it here.  If there
1125725a97e4SLars Ellenberg 	 * are still some bios in flight, we need to wait for them here.
11269e58c4daSPhilipp Reisner 	 * If all IO is done already (or nothing had been submitted), there is
11279e58c4daSPhilipp Reisner 	 * no need to wait.  Still, we need to put the kref associated with the
11289e58c4daSPhilipp Reisner 	 * "in_flight reached zero, all done" event.
1129725a97e4SLars Ellenberg 	 */
1130d1f3779bSPhilipp Reisner 	if (!atomic_dec_and_test(&ctx->in_flight))
1131b30ab791SAndreas Gruenbacher 		wait_until_done_or_force_detached(device, device->ldev, &ctx->done);
11329e58c4daSPhilipp Reisner 	else
11334ce49266SLars Ellenberg 		kref_put(&ctx->kref, &drbd_bm_aio_ctx_destroy);
1134d1f3779bSPhilipp Reisner 
1135c9d963a4SLars Ellenberg 	/* summary for global bitmap IO */
1136c9d963a4SLars Ellenberg 	if (flags == 0)
1137d0180171SAndreas Gruenbacher 		drbd_info(device, "bitmap %s of %u pages took %lu jiffies\n",
11384ce49266SLars Ellenberg 			 (flags & BM_AIO_READ) ? "READ" : "WRITE",
113919f843aaSLars Ellenberg 			 count, jiffies - now);
1140b411b363SPhilipp Reisner 
1141d1f3779bSPhilipp Reisner 	if (ctx->error) {
1142d0180171SAndreas Gruenbacher 		drbd_alert(device, "we had at least one MD IO ERROR during bitmap IO\n");
1143b30ab791SAndreas Gruenbacher 		drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR);
1144d1f3779bSPhilipp Reisner 		err = -EIO; /* ctx->error ? */
1145b411b363SPhilipp Reisner 	}
1146b411b363SPhilipp Reisner 
11479e58c4daSPhilipp Reisner 	if (atomic_read(&ctx->in_flight))
114844edfb0dSLars Ellenberg 		err = -EIO; /* Disk timeout/force-detach during IO... */
11499e58c4daSPhilipp Reisner 
1150b411b363SPhilipp Reisner 	now = jiffies;
11514ce49266SLars Ellenberg 	if (flags & BM_AIO_READ) {
115295a0f10cSLars Ellenberg 		b->bm_set = bm_count_bits(b);
1153d0180171SAndreas Gruenbacher 		drbd_info(device, "recounting of set bits took additional %lu jiffies\n",
1154b411b363SPhilipp Reisner 		     jiffies - now);
1155b411b363SPhilipp Reisner 	}
1156b411b363SPhilipp Reisner 	now = b->bm_set;
1157b411b363SPhilipp Reisner 
11584ce49266SLars Ellenberg 	if ((flags & ~BM_AIO_READ) == 0)
1159d0180171SAndreas Gruenbacher 		drbd_info(device, "%s (%lu bits) marked out-of-sync by on disk bit-map.\n",
1160b411b363SPhilipp Reisner 		     ppsize(ppb, now << (BM_BLOCK_SHIFT-10)), now);
1161b411b363SPhilipp Reisner 
11624ce49266SLars Ellenberg 	kref_put(&ctx->kref, &drbd_bm_aio_ctx_destroy);
1163b411b363SPhilipp Reisner 	return err;
1164b411b363SPhilipp Reisner }
1165b411b363SPhilipp Reisner 
1166b411b363SPhilipp Reisner /**
1167b411b363SPhilipp Reisner  * drbd_bm_read() - Read the whole bitmap from its on disk location.
1168b30ab791SAndreas Gruenbacher  * @device:	DRBD device.
1169b411b363SPhilipp Reisner  */
1170b30ab791SAndreas Gruenbacher int drbd_bm_read(struct drbd_device *device) __must_hold(local)
1171b411b363SPhilipp Reisner {
11724ce49266SLars Ellenberg 	return bm_rw(device, BM_AIO_READ, 0);
1173b411b363SPhilipp Reisner }
1174b411b363SPhilipp Reisner 
1175b411b363SPhilipp Reisner /**
1176b411b363SPhilipp Reisner  * drbd_bm_write() - Write the whole bitmap to its on disk location.
1177b30ab791SAndreas Gruenbacher  * @device:	DRBD device.
117819f843aaSLars Ellenberg  *
117919f843aaSLars Ellenberg  * Will only write pages that have changed since last IO.
1180b411b363SPhilipp Reisner  */
1181b30ab791SAndreas Gruenbacher int drbd_bm_write(struct drbd_device *device) __must_hold(local)
1182b411b363SPhilipp Reisner {
11834ce49266SLars Ellenberg 	return bm_rw(device, 0, 0);
1184b411b363SPhilipp Reisner }
1185b411b363SPhilipp Reisner 
1186b411b363SPhilipp Reisner /**
1187d1aa4d04SPhilipp Reisner  * drbd_bm_write_all() - Write the whole bitmap to its on disk location.
1188b30ab791SAndreas Gruenbacher  * @device:	DRBD device.
1189d1aa4d04SPhilipp Reisner  *
1190d1aa4d04SPhilipp Reisner  * Will write all pages.
1191d1aa4d04SPhilipp Reisner  */
1192b30ab791SAndreas Gruenbacher int drbd_bm_write_all(struct drbd_device *device) __must_hold(local)
1193d1aa4d04SPhilipp Reisner {
11944ce49266SLars Ellenberg 	return bm_rw(device, BM_AIO_WRITE_ALL_PAGES, 0);
1195d1aa4d04SPhilipp Reisner }
1196d1aa4d04SPhilipp Reisner 
1197d1aa4d04SPhilipp Reisner /**
1198c7a58db4SLars Ellenberg  * drbd_bm_write_lazy() - Write bitmap pages 0 to @upper_idx-1, if they have changed.
1199c7a58db4SLars Ellenberg  * @device:	DRBD device.
1200c7a58db4SLars Ellenberg  * @upper_idx:	0: write all changed pages; +ve: page index to stop scanning for changed pages
1201c7a58db4SLars Ellenberg  */
1202c7a58db4SLars Ellenberg int drbd_bm_write_lazy(struct drbd_device *device, unsigned upper_idx) __must_hold(local)
1203c7a58db4SLars Ellenberg {
1204c7a58db4SLars Ellenberg 	return bm_rw(device, BM_AIO_COPY_PAGES, upper_idx);
1205c7a58db4SLars Ellenberg }
1206c7a58db4SLars Ellenberg 
1207c7a58db4SLars Ellenberg /**
12080e8488adSLars Ellenberg  * drbd_bm_write_copy_pages() - Write the whole bitmap to its on disk location.
1209b30ab791SAndreas Gruenbacher  * @device:	DRBD device.
12100e8488adSLars Ellenberg  *
12110e8488adSLars Ellenberg  * Will only write pages that have changed since last IO.
12120e8488adSLars Ellenberg  * In contrast to drbd_bm_write(), this will copy the bitmap pages
12130e8488adSLars Ellenberg  * to temporary writeout pages. It is intended to trigger a full write-out
12140e8488adSLars Ellenberg  * while still allowing the bitmap to change, for example if a resync or online
12150e8488adSLars Ellenberg  * verify is aborted due to a failed peer disk, while local IO continues, or
12160e8488adSLars Ellenberg  * pending resync acks are still being processed.
12170e8488adSLars Ellenberg  */
1218b30ab791SAndreas Gruenbacher int drbd_bm_write_copy_pages(struct drbd_device *device) __must_hold(local)
12190e8488adSLars Ellenberg {
12204ce49266SLars Ellenberg 	return bm_rw(device, BM_AIO_COPY_PAGES, 0);
1221b411b363SPhilipp Reisner }
122219f843aaSLars Ellenberg 
1223a220d291SLars Ellenberg /**
122445dfffebSLars Ellenberg  * drbd_bm_write_hinted() - Write bitmap pages with "hint" marks, if they have changed.
1225b30ab791SAndreas Gruenbacher  * @device:	DRBD device.
122645dfffebSLars Ellenberg  */
1227b30ab791SAndreas Gruenbacher int drbd_bm_write_hinted(struct drbd_device *device) __must_hold(local)
122845dfffebSLars Ellenberg {
12294ce49266SLars Ellenberg 	return bm_rw(device, BM_AIO_WRITE_HINTED | BM_AIO_COPY_PAGES, 0);
123045dfffebSLars Ellenberg }
123119f843aaSLars Ellenberg 
1232b411b363SPhilipp Reisner /* NOTE
1233b411b363SPhilipp Reisner  * find_first_bit returns int, we return unsigned long.
12344b0715f0SLars Ellenberg  * For this to work on 32bit arch with bitnumbers > (1<<32),
12354b0715f0SLars Ellenberg  * we'd need to return u64, and get a whole lot of other places
12364b0715f0SLars Ellenberg  * fixed where we still use unsigned long.
1237b411b363SPhilipp Reisner  *
1238b411b363SPhilipp Reisner  * this returns a bit number, NOT a sector!
1239b411b363SPhilipp Reisner  */
1240b30ab791SAndreas Gruenbacher static unsigned long __bm_find_next(struct drbd_device *device, unsigned long bm_fo,
1241cfd8005cSCong Wang 	const int find_zero_bit)
1242b411b363SPhilipp Reisner {
1243b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
1244b411b363SPhilipp Reisner 	unsigned long *p_addr;
12454b0715f0SLars Ellenberg 	unsigned long bit_offset;
12464b0715f0SLars Ellenberg 	unsigned i;
12474b0715f0SLars Ellenberg 
1248b411b363SPhilipp Reisner 
1249b411b363SPhilipp Reisner 	if (bm_fo > b->bm_bits) {
1250d0180171SAndreas Gruenbacher 		drbd_err(device, "bm_fo=%lu bm_bits=%lu\n", bm_fo, b->bm_bits);
12514b0715f0SLars Ellenberg 		bm_fo = DRBD_END_OF_BITMAP;
1252b411b363SPhilipp Reisner 	} else {
1253b411b363SPhilipp Reisner 		while (bm_fo < b->bm_bits) {
125419f843aaSLars Ellenberg 			/* bit offset of the first bit in the page */
12554b0715f0SLars Ellenberg 			bit_offset = bm_fo & ~BITS_PER_PAGE_MASK;
1256cfd8005cSCong Wang 			p_addr = __bm_map_pidx(b, bm_bit_to_page_idx(b, bm_fo));
1257b411b363SPhilipp Reisner 
1258b411b363SPhilipp Reisner 			if (find_zero_bit)
12597e599e6eSLinus Torvalds 				i = find_next_zero_bit_le(p_addr,
12604b0715f0SLars Ellenberg 						PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK);
1261b411b363SPhilipp Reisner 			else
12627e599e6eSLinus Torvalds 				i = find_next_bit_le(p_addr,
12634b0715f0SLars Ellenberg 						PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK);
1264b411b363SPhilipp Reisner 
1265cfd8005cSCong Wang 			__bm_unmap(p_addr);
1266b411b363SPhilipp Reisner 			if (i < PAGE_SIZE*8) {
12674b0715f0SLars Ellenberg 				bm_fo = bit_offset + i;
12684b0715f0SLars Ellenberg 				if (bm_fo >= b->bm_bits)
1269b411b363SPhilipp Reisner 					break;
1270b411b363SPhilipp Reisner 				goto found;
1271b411b363SPhilipp Reisner 			}
1272b411b363SPhilipp Reisner 			bm_fo = bit_offset + PAGE_SIZE*8;
1273b411b363SPhilipp Reisner 		}
12744b0715f0SLars Ellenberg 		bm_fo = DRBD_END_OF_BITMAP;
1275b411b363SPhilipp Reisner 	}
1276b411b363SPhilipp Reisner  found:
12774b0715f0SLars Ellenberg 	return bm_fo;
1278b411b363SPhilipp Reisner }
1279b411b363SPhilipp Reisner 
1280b30ab791SAndreas Gruenbacher static unsigned long bm_find_next(struct drbd_device *device,
1281b411b363SPhilipp Reisner 	unsigned long bm_fo, const int find_zero_bit)
1282b411b363SPhilipp Reisner {
1283b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
12844b0715f0SLars Ellenberg 	unsigned long i = DRBD_END_OF_BITMAP;
1285b411b363SPhilipp Reisner 
1286841ce241SAndreas Gruenbacher 	if (!expect(b))
1287841ce241SAndreas Gruenbacher 		return i;
1288841ce241SAndreas Gruenbacher 	if (!expect(b->bm_pages))
1289841ce241SAndreas Gruenbacher 		return i;
1290b411b363SPhilipp Reisner 
1291b411b363SPhilipp Reisner 	spin_lock_irq(&b->bm_lock);
129220ceb2b2SLars Ellenberg 	if (BM_DONT_TEST & b->bm_flags)
1293b30ab791SAndreas Gruenbacher 		bm_print_lock_info(device);
1294b411b363SPhilipp Reisner 
1295b30ab791SAndreas Gruenbacher 	i = __bm_find_next(device, bm_fo, find_zero_bit);
1296b411b363SPhilipp Reisner 
1297b411b363SPhilipp Reisner 	spin_unlock_irq(&b->bm_lock);
1298b411b363SPhilipp Reisner 	return i;
1299b411b363SPhilipp Reisner }
1300b411b363SPhilipp Reisner 
1301b30ab791SAndreas Gruenbacher unsigned long drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo)
1302b411b363SPhilipp Reisner {
1303b30ab791SAndreas Gruenbacher 	return bm_find_next(device, bm_fo, 0);
1304b411b363SPhilipp Reisner }
1305b411b363SPhilipp Reisner 
1306b411b363SPhilipp Reisner #if 0
1307b411b363SPhilipp Reisner /* not yet needed for anything. */
1308b30ab791SAndreas Gruenbacher unsigned long drbd_bm_find_next_zero(struct drbd_device *device, unsigned long bm_fo)
1309b411b363SPhilipp Reisner {
1310b30ab791SAndreas Gruenbacher 	return bm_find_next(device, bm_fo, 1);
1311b411b363SPhilipp Reisner }
1312b411b363SPhilipp Reisner #endif
1313b411b363SPhilipp Reisner 
1314b411b363SPhilipp Reisner /* does not spin_lock_irqsave.
1315b411b363SPhilipp Reisner  * you must take drbd_bm_lock() first */
1316b30ab791SAndreas Gruenbacher unsigned long _drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo)
1317b411b363SPhilipp Reisner {
1318b30ab791SAndreas Gruenbacher 	/* WARN_ON(!(BM_DONT_SET & device->b->bm_flags)); */
1319b30ab791SAndreas Gruenbacher 	return __bm_find_next(device, bm_fo, 0);
1320b411b363SPhilipp Reisner }
1321b411b363SPhilipp Reisner 
1322b30ab791SAndreas Gruenbacher unsigned long _drbd_bm_find_next_zero(struct drbd_device *device, unsigned long bm_fo)
1323b411b363SPhilipp Reisner {
1324b30ab791SAndreas Gruenbacher 	/* WARN_ON(!(BM_DONT_SET & device->b->bm_flags)); */
1325b30ab791SAndreas Gruenbacher 	return __bm_find_next(device, bm_fo, 1);
1326b411b363SPhilipp Reisner }
1327b411b363SPhilipp Reisner 
1328b411b363SPhilipp Reisner /* returns number of bits actually changed.
1329b411b363SPhilipp Reisner  * for val != 0, we change 0 -> 1, return code positive
1330b411b363SPhilipp Reisner  * for val == 0, we change 1 -> 0, return code negative
1331b411b363SPhilipp Reisner  * wants bitnr, not sector.
1332b411b363SPhilipp Reisner  * expected to be called for only a few bits (e - s about BITS_PER_LONG).
1333b411b363SPhilipp Reisner  * Must hold bitmap lock already. */
1334b30ab791SAndreas Gruenbacher static int __bm_change_bits_to(struct drbd_device *device, const unsigned long s,
1335829c6087SLars Ellenberg 	unsigned long e, int val)
1336b411b363SPhilipp Reisner {
1337b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
1338b411b363SPhilipp Reisner 	unsigned long *p_addr = NULL;
1339b411b363SPhilipp Reisner 	unsigned long bitnr;
134019f843aaSLars Ellenberg 	unsigned int last_page_nr = -1U;
1341b411b363SPhilipp Reisner 	int c = 0;
134219f843aaSLars Ellenberg 	int changed_total = 0;
1343b411b363SPhilipp Reisner 
1344b411b363SPhilipp Reisner 	if (e >= b->bm_bits) {
1345d0180171SAndreas Gruenbacher 		drbd_err(device, "ASSERT FAILED: bit_s=%lu bit_e=%lu bm_bits=%lu\n",
1346b411b363SPhilipp Reisner 				s, e, b->bm_bits);
1347b411b363SPhilipp Reisner 		e = b->bm_bits ? b->bm_bits -1 : 0;
1348b411b363SPhilipp Reisner 	}
1349b411b363SPhilipp Reisner 	for (bitnr = s; bitnr <= e; bitnr++) {
135019f843aaSLars Ellenberg 		unsigned int page_nr = bm_bit_to_page_idx(b, bitnr);
1351b411b363SPhilipp Reisner 		if (page_nr != last_page_nr) {
1352b411b363SPhilipp Reisner 			if (p_addr)
1353cfd8005cSCong Wang 				__bm_unmap(p_addr);
135419f843aaSLars Ellenberg 			if (c < 0)
135519f843aaSLars Ellenberg 				bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]);
135619f843aaSLars Ellenberg 			else if (c > 0)
135719f843aaSLars Ellenberg 				bm_set_page_need_writeout(b->bm_pages[last_page_nr]);
135819f843aaSLars Ellenberg 			changed_total += c;
135919f843aaSLars Ellenberg 			c = 0;
1360cfd8005cSCong Wang 			p_addr = __bm_map_pidx(b, page_nr);
1361b411b363SPhilipp Reisner 			last_page_nr = page_nr;
1362b411b363SPhilipp Reisner 		}
1363b411b363SPhilipp Reisner 		if (val)
13647e599e6eSLinus Torvalds 			c += (0 == __test_and_set_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr));
1365b411b363SPhilipp Reisner 		else
13667e599e6eSLinus Torvalds 			c -= (0 != __test_and_clear_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr));
1367b411b363SPhilipp Reisner 	}
1368b411b363SPhilipp Reisner 	if (p_addr)
1369cfd8005cSCong Wang 		__bm_unmap(p_addr);
137019f843aaSLars Ellenberg 	if (c < 0)
137119f843aaSLars Ellenberg 		bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]);
137219f843aaSLars Ellenberg 	else if (c > 0)
137319f843aaSLars Ellenberg 		bm_set_page_need_writeout(b->bm_pages[last_page_nr]);
137419f843aaSLars Ellenberg 	changed_total += c;
137519f843aaSLars Ellenberg 	b->bm_set += changed_total;
137619f843aaSLars Ellenberg 	return changed_total;
1377b411b363SPhilipp Reisner }
1378b411b363SPhilipp Reisner 
1379b411b363SPhilipp Reisner /* returns number of bits actually changed.
1380b411b363SPhilipp Reisner  * for val != 0, we change 0 -> 1, return code positive
1381b411b363SPhilipp Reisner  * for val == 0, we change 1 -> 0, return code negative
1382b411b363SPhilipp Reisner  * wants bitnr, not sector */
1383b30ab791SAndreas Gruenbacher static int bm_change_bits_to(struct drbd_device *device, const unsigned long s,
1384b411b363SPhilipp Reisner 	const unsigned long e, int val)
1385b411b363SPhilipp Reisner {
1386b411b363SPhilipp Reisner 	unsigned long flags;
1387b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
1388b411b363SPhilipp Reisner 	int c = 0;
1389b411b363SPhilipp Reisner 
1390841ce241SAndreas Gruenbacher 	if (!expect(b))
1391841ce241SAndreas Gruenbacher 		return 1;
1392841ce241SAndreas Gruenbacher 	if (!expect(b->bm_pages))
1393841ce241SAndreas Gruenbacher 		return 0;
1394b411b363SPhilipp Reisner 
1395b411b363SPhilipp Reisner 	spin_lock_irqsave(&b->bm_lock, flags);
139620ceb2b2SLars Ellenberg 	if ((val ? BM_DONT_SET : BM_DONT_CLEAR) & b->bm_flags)
1397b30ab791SAndreas Gruenbacher 		bm_print_lock_info(device);
1398b411b363SPhilipp Reisner 
1399b30ab791SAndreas Gruenbacher 	c = __bm_change_bits_to(device, s, e, val);
1400b411b363SPhilipp Reisner 
1401b411b363SPhilipp Reisner 	spin_unlock_irqrestore(&b->bm_lock, flags);
1402b411b363SPhilipp Reisner 	return c;
1403b411b363SPhilipp Reisner }
1404b411b363SPhilipp Reisner 
1405b411b363SPhilipp Reisner /* returns number of bits changed 0 -> 1 */
1406b30ab791SAndreas Gruenbacher int drbd_bm_set_bits(struct drbd_device *device, const unsigned long s, const unsigned long e)
1407b411b363SPhilipp Reisner {
1408b30ab791SAndreas Gruenbacher 	return bm_change_bits_to(device, s, e, 1);
1409b411b363SPhilipp Reisner }
1410b411b363SPhilipp Reisner 
1411b411b363SPhilipp Reisner /* returns number of bits changed 1 -> 0 */
1412b30ab791SAndreas Gruenbacher int drbd_bm_clear_bits(struct drbd_device *device, const unsigned long s, const unsigned long e)
1413b411b363SPhilipp Reisner {
1414b30ab791SAndreas Gruenbacher 	return -bm_change_bits_to(device, s, e, 0);
1415b411b363SPhilipp Reisner }
1416b411b363SPhilipp Reisner 
1417b411b363SPhilipp Reisner /* sets all bits in full words,
1418b411b363SPhilipp Reisner  * from first_word up to, but not including, last_word */
1419b411b363SPhilipp Reisner static inline void bm_set_full_words_within_one_page(struct drbd_bitmap *b,
1420b411b363SPhilipp Reisner 		int page_nr, int first_word, int last_word)
1421b411b363SPhilipp Reisner {
1422b411b363SPhilipp Reisner 	int i;
1423b411b363SPhilipp Reisner 	int bits;
142422d81140SLars Ellenberg 	int changed = 0;
1425cfd8005cSCong Wang 	unsigned long *paddr = kmap_atomic(b->bm_pages[page_nr]);
1426*5fb3bc4dSLars Ellenberg 
1427*5fb3bc4dSLars Ellenberg 	/* I think it is more cache line friendly to hweight_long then set to ~0UL,
1428*5fb3bc4dSLars Ellenberg 	 * than to first bitmap_weight() all words, then bitmap_fill() all words */
1429b411b363SPhilipp Reisner 	for (i = first_word; i < last_word; i++) {
1430b411b363SPhilipp Reisner 		bits = hweight_long(paddr[i]);
1431b411b363SPhilipp Reisner 		paddr[i] = ~0UL;
143222d81140SLars Ellenberg 		changed += BITS_PER_LONG - bits;
1433b411b363SPhilipp Reisner 	}
1434cfd8005cSCong Wang 	kunmap_atomic(paddr);
143522d81140SLars Ellenberg 	if (changed) {
143622d81140SLars Ellenberg 		/* We only need lazy writeout, the information is still in the
143722d81140SLars Ellenberg 		 * remote bitmap as well, and is reconstructed during the next
143822d81140SLars Ellenberg 		 * bitmap exchange, if lost locally due to a crash. */
143922d81140SLars Ellenberg 		bm_set_page_lazy_writeout(b->bm_pages[page_nr]);
144022d81140SLars Ellenberg 		b->bm_set += changed;
144122d81140SLars Ellenberg 	}
1442b411b363SPhilipp Reisner }
1443b411b363SPhilipp Reisner 
1444829c6087SLars Ellenberg /* Same thing as drbd_bm_set_bits,
1445829c6087SLars Ellenberg  * but more efficient for a large bit range.
1446b411b363SPhilipp Reisner  * You must first drbd_bm_lock().
1447b411b363SPhilipp Reisner  * Can be called to set the whole bitmap in one go.
1448b411b363SPhilipp Reisner  * Sets bits from s to e _inclusive_. */
1449b30ab791SAndreas Gruenbacher void _drbd_bm_set_bits(struct drbd_device *device, const unsigned long s, const unsigned long e)
1450b411b363SPhilipp Reisner {
1451b411b363SPhilipp Reisner 	/* First set_bit from the first bit (s)
1452b411b363SPhilipp Reisner 	 * up to the next long boundary (sl),
1453b411b363SPhilipp Reisner 	 * then assign full words up to the last long boundary (el),
1454b411b363SPhilipp Reisner 	 * then set_bit up to and including the last bit (e).
1455b411b363SPhilipp Reisner 	 *
1456b411b363SPhilipp Reisner 	 * Do not use memset, because we must account for changes,
1457b411b363SPhilipp Reisner 	 * so we need to loop over the words with hweight() anyways.
1458b411b363SPhilipp Reisner 	 */
1459b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
1460b411b363SPhilipp Reisner 	unsigned long sl = ALIGN(s,BITS_PER_LONG);
1461b411b363SPhilipp Reisner 	unsigned long el = (e+1) & ~((unsigned long)BITS_PER_LONG-1);
1462b411b363SPhilipp Reisner 	int first_page;
1463b411b363SPhilipp Reisner 	int last_page;
1464b411b363SPhilipp Reisner 	int page_nr;
1465b411b363SPhilipp Reisner 	int first_word;
1466b411b363SPhilipp Reisner 	int last_word;
1467b411b363SPhilipp Reisner 
1468b411b363SPhilipp Reisner 	if (e - s <= 3*BITS_PER_LONG) {
1469b411b363SPhilipp Reisner 		/* don't bother; el and sl may even be wrong. */
1470829c6087SLars Ellenberg 		spin_lock_irq(&b->bm_lock);
1471b30ab791SAndreas Gruenbacher 		__bm_change_bits_to(device, s, e, 1);
1472829c6087SLars Ellenberg 		spin_unlock_irq(&b->bm_lock);
1473b411b363SPhilipp Reisner 		return;
1474b411b363SPhilipp Reisner 	}
1475b411b363SPhilipp Reisner 
1476b411b363SPhilipp Reisner 	/* difference is large enough that we can trust sl and el */
1477b411b363SPhilipp Reisner 
1478829c6087SLars Ellenberg 	spin_lock_irq(&b->bm_lock);
1479829c6087SLars Ellenberg 
1480b411b363SPhilipp Reisner 	/* bits filling the current long */
1481b411b363SPhilipp Reisner 	if (sl)
1482b30ab791SAndreas Gruenbacher 		__bm_change_bits_to(device, s, sl-1, 1);
1483b411b363SPhilipp Reisner 
1484b411b363SPhilipp Reisner 	first_page = sl >> (3 + PAGE_SHIFT);
1485b411b363SPhilipp Reisner 	last_page = el >> (3 + PAGE_SHIFT);
1486b411b363SPhilipp Reisner 
1487b411b363SPhilipp Reisner 	/* MLPP: modulo longs per page */
1488b411b363SPhilipp Reisner 	/* LWPP: long words per page */
1489b411b363SPhilipp Reisner 	first_word = MLPP(sl >> LN2_BPL);
1490b411b363SPhilipp Reisner 	last_word = LWPP;
1491b411b363SPhilipp Reisner 
1492b411b363SPhilipp Reisner 	/* first and full pages, unless first page == last page */
1493b411b363SPhilipp Reisner 	for (page_nr = first_page; page_nr < last_page; page_nr++) {
1494b30ab791SAndreas Gruenbacher 		bm_set_full_words_within_one_page(device->bitmap, page_nr, first_word, last_word);
14958ccee20eSLars Ellenberg 		spin_unlock_irq(&b->bm_lock);
14968ccee20eSLars Ellenberg 		cond_resched();
1497b411b363SPhilipp Reisner 		first_word = 0;
14988ccee20eSLars Ellenberg 		spin_lock_irq(&b->bm_lock);
1499b411b363SPhilipp Reisner 	}
1500b411b363SPhilipp Reisner 	/* last page (respectively only page, for first page == last page) */
1501b411b363SPhilipp Reisner 	last_word = MLPP(el >> LN2_BPL);
15024eccc579SLars Ellenberg 
15034eccc579SLars Ellenberg 	/* consider bitmap->bm_bits = 32768, bitmap->bm_number_of_pages = 1. (or multiples).
15044eccc579SLars Ellenberg 	 * ==> e = 32767, el = 32768, last_page = 2,
15054eccc579SLars Ellenberg 	 * and now last_word = 0.
15064eccc579SLars Ellenberg 	 * We do not want to touch last_page in this case,
15074eccc579SLars Ellenberg 	 * as we did not allocate it, it is not present in bitmap->bm_pages.
15084eccc579SLars Ellenberg 	 */
15094eccc579SLars Ellenberg 	if (last_word)
1510b30ab791SAndreas Gruenbacher 		bm_set_full_words_within_one_page(device->bitmap, last_page, first_word, last_word);
1511b411b363SPhilipp Reisner 
1512b411b363SPhilipp Reisner 	/* possibly trailing bits.
1513b411b363SPhilipp Reisner 	 * example: (e & 63) == 63, el will be e+1.
1514b411b363SPhilipp Reisner 	 * if that even was the very last bit,
1515b411b363SPhilipp Reisner 	 * it would trigger an assert in __bm_change_bits_to()
1516b411b363SPhilipp Reisner 	 */
1517b411b363SPhilipp Reisner 	if (el <= e)
1518b30ab791SAndreas Gruenbacher 		__bm_change_bits_to(device, el, e, 1);
1519829c6087SLars Ellenberg 	spin_unlock_irq(&b->bm_lock);
1520b411b363SPhilipp Reisner }
1521b411b363SPhilipp Reisner 
1522b411b363SPhilipp Reisner /* returns bit state
1523b411b363SPhilipp Reisner  * wants bitnr, NOT sector.
1524b411b363SPhilipp Reisner  * inherently racy... area needs to be locked by means of {al,rs}_lru
1525b411b363SPhilipp Reisner  *  1 ... bit set
1526b411b363SPhilipp Reisner  *  0 ... bit not set
1527b411b363SPhilipp Reisner  * -1 ... first out of bounds access, stop testing for bits!
1528b411b363SPhilipp Reisner  */
1529b30ab791SAndreas Gruenbacher int drbd_bm_test_bit(struct drbd_device *device, const unsigned long bitnr)
1530b411b363SPhilipp Reisner {
1531b411b363SPhilipp Reisner 	unsigned long flags;
1532b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
1533b411b363SPhilipp Reisner 	unsigned long *p_addr;
1534b411b363SPhilipp Reisner 	int i;
1535b411b363SPhilipp Reisner 
1536841ce241SAndreas Gruenbacher 	if (!expect(b))
1537841ce241SAndreas Gruenbacher 		return 0;
1538841ce241SAndreas Gruenbacher 	if (!expect(b->bm_pages))
1539841ce241SAndreas Gruenbacher 		return 0;
1540b411b363SPhilipp Reisner 
1541b411b363SPhilipp Reisner 	spin_lock_irqsave(&b->bm_lock, flags);
154220ceb2b2SLars Ellenberg 	if (BM_DONT_TEST & b->bm_flags)
1543b30ab791SAndreas Gruenbacher 		bm_print_lock_info(device);
1544b411b363SPhilipp Reisner 	if (bitnr < b->bm_bits) {
154519f843aaSLars Ellenberg 		p_addr = bm_map_pidx(b, bm_bit_to_page_idx(b, bitnr));
15467e599e6eSLinus Torvalds 		i = test_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr) ? 1 : 0;
1547b411b363SPhilipp Reisner 		bm_unmap(p_addr);
1548b411b363SPhilipp Reisner 	} else if (bitnr == b->bm_bits) {
1549b411b363SPhilipp Reisner 		i = -1;
1550b411b363SPhilipp Reisner 	} else { /* (bitnr > b->bm_bits) */
1551d0180171SAndreas Gruenbacher 		drbd_err(device, "bitnr=%lu > bm_bits=%lu\n", bitnr, b->bm_bits);
1552b411b363SPhilipp Reisner 		i = 0;
1553b411b363SPhilipp Reisner 	}
1554b411b363SPhilipp Reisner 
1555b411b363SPhilipp Reisner 	spin_unlock_irqrestore(&b->bm_lock, flags);
1556b411b363SPhilipp Reisner 	return i;
1557b411b363SPhilipp Reisner }
1558b411b363SPhilipp Reisner 
1559b411b363SPhilipp Reisner /* returns number of bits set in the range [s, e] */
1560b30ab791SAndreas Gruenbacher int drbd_bm_count_bits(struct drbd_device *device, const unsigned long s, const unsigned long e)
1561b411b363SPhilipp Reisner {
1562b411b363SPhilipp Reisner 	unsigned long flags;
1563b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
156419f843aaSLars Ellenberg 	unsigned long *p_addr = NULL;
1565b411b363SPhilipp Reisner 	unsigned long bitnr;
156619f843aaSLars Ellenberg 	unsigned int page_nr = -1U;
1567b411b363SPhilipp Reisner 	int c = 0;
1568b411b363SPhilipp Reisner 
1569b411b363SPhilipp Reisner 	/* If this is called without a bitmap, that is a bug.  But just to be
1570b411b363SPhilipp Reisner 	 * robust in case we screwed up elsewhere, in that case pretend there
1571b411b363SPhilipp Reisner 	 * was one dirty bit in the requested area, so we won't try to do a
1572b411b363SPhilipp Reisner 	 * local read there (no bitmap probably implies no disk) */
1573841ce241SAndreas Gruenbacher 	if (!expect(b))
1574841ce241SAndreas Gruenbacher 		return 1;
1575841ce241SAndreas Gruenbacher 	if (!expect(b->bm_pages))
1576841ce241SAndreas Gruenbacher 		return 1;
1577b411b363SPhilipp Reisner 
1578b411b363SPhilipp Reisner 	spin_lock_irqsave(&b->bm_lock, flags);
157920ceb2b2SLars Ellenberg 	if (BM_DONT_TEST & b->bm_flags)
1580b30ab791SAndreas Gruenbacher 		bm_print_lock_info(device);
1581b411b363SPhilipp Reisner 	for (bitnr = s; bitnr <= e; bitnr++) {
158219f843aaSLars Ellenberg 		unsigned int idx = bm_bit_to_page_idx(b, bitnr);
158319f843aaSLars Ellenberg 		if (page_nr != idx) {
158419f843aaSLars Ellenberg 			page_nr = idx;
1585b411b363SPhilipp Reisner 			if (p_addr)
1586b411b363SPhilipp Reisner 				bm_unmap(p_addr);
158719f843aaSLars Ellenberg 			p_addr = bm_map_pidx(b, idx);
1588b411b363SPhilipp Reisner 		}
1589841ce241SAndreas Gruenbacher 		if (expect(bitnr < b->bm_bits))
15907e599e6eSLinus Torvalds 			c += (0 != test_bit_le(bitnr - (page_nr << (PAGE_SHIFT+3)), p_addr));
1591841ce241SAndreas Gruenbacher 		else
1592d0180171SAndreas Gruenbacher 			drbd_err(device, "bitnr=%lu bm_bits=%lu\n", bitnr, b->bm_bits);
1593b411b363SPhilipp Reisner 	}
1594b411b363SPhilipp Reisner 	if (p_addr)
1595b411b363SPhilipp Reisner 		bm_unmap(p_addr);
1596b411b363SPhilipp Reisner 	spin_unlock_irqrestore(&b->bm_lock, flags);
1597b411b363SPhilipp Reisner 	return c;
1598b411b363SPhilipp Reisner }
1599b411b363SPhilipp Reisner 
1600b411b363SPhilipp Reisner 
1601b411b363SPhilipp Reisner /* inherently racy...
1602b411b363SPhilipp Reisner  * return value may be already out-of-date when this function returns.
1603b411b363SPhilipp Reisner  * but the general usage is that this is only use during a cstate when bits are
1604b411b363SPhilipp Reisner  * only cleared, not set, and typically only care for the case when the return
1605b411b363SPhilipp Reisner  * value is zero, or we already "locked" this "bitmap extent" by other means.
1606b411b363SPhilipp Reisner  *
1607b411b363SPhilipp Reisner  * enr is bm-extent number, since we chose to name one sector (512 bytes)
1608b411b363SPhilipp Reisner  * worth of the bitmap a "bitmap extent".
1609b411b363SPhilipp Reisner  *
1610b411b363SPhilipp Reisner  * TODO
1611b411b363SPhilipp Reisner  * I think since we use it like a reference count, we should use the real
1612b411b363SPhilipp Reisner  * reference count of some bitmap extent element from some lru instead...
1613b411b363SPhilipp Reisner  *
1614b411b363SPhilipp Reisner  */
1615b30ab791SAndreas Gruenbacher int drbd_bm_e_weight(struct drbd_device *device, unsigned long enr)
1616b411b363SPhilipp Reisner {
1617b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
1618b411b363SPhilipp Reisner 	int count, s, e;
1619b411b363SPhilipp Reisner 	unsigned long flags;
1620b411b363SPhilipp Reisner 	unsigned long *p_addr, *bm;
1621b411b363SPhilipp Reisner 
1622841ce241SAndreas Gruenbacher 	if (!expect(b))
1623841ce241SAndreas Gruenbacher 		return 0;
1624841ce241SAndreas Gruenbacher 	if (!expect(b->bm_pages))
1625841ce241SAndreas Gruenbacher 		return 0;
1626b411b363SPhilipp Reisner 
1627b411b363SPhilipp Reisner 	spin_lock_irqsave(&b->bm_lock, flags);
162820ceb2b2SLars Ellenberg 	if (BM_DONT_TEST & b->bm_flags)
1629b30ab791SAndreas Gruenbacher 		bm_print_lock_info(device);
1630b411b363SPhilipp Reisner 
1631b411b363SPhilipp Reisner 	s = S2W(enr);
1632b411b363SPhilipp Reisner 	e = min((size_t)S2W(enr+1), b->bm_words);
1633b411b363SPhilipp Reisner 	count = 0;
1634b411b363SPhilipp Reisner 	if (s < b->bm_words) {
1635b411b363SPhilipp Reisner 		int n = e-s;
163619f843aaSLars Ellenberg 		p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, s));
1637b411b363SPhilipp Reisner 		bm = p_addr + MLPP(s);
1638*5fb3bc4dSLars Ellenberg 		count += bitmap_weight(bm, n * BITS_PER_LONG);
1639b411b363SPhilipp Reisner 		bm_unmap(p_addr);
1640b411b363SPhilipp Reisner 	} else {
1641d0180171SAndreas Gruenbacher 		drbd_err(device, "start offset (%d) too large in drbd_bm_e_weight\n", s);
1642b411b363SPhilipp Reisner 	}
1643b411b363SPhilipp Reisner 	spin_unlock_irqrestore(&b->bm_lock, flags);
1644b411b363SPhilipp Reisner 	return count;
1645b411b363SPhilipp Reisner }
1646