xref: /openbmc/linux/drivers/block/drbd/drbd_bitmap.c (revision 8fe39aac0578cbb0abf27e1be70ff581e0c1d836)
1b411b363SPhilipp Reisner /*
2b411b363SPhilipp Reisner    drbd_bitmap.c
3b411b363SPhilipp Reisner 
4b411b363SPhilipp Reisner    This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5b411b363SPhilipp Reisner 
6b411b363SPhilipp Reisner    Copyright (C) 2004-2008, LINBIT Information Technologies GmbH.
7b411b363SPhilipp Reisner    Copyright (C) 2004-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8b411b363SPhilipp Reisner    Copyright (C) 2004-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9b411b363SPhilipp Reisner 
10b411b363SPhilipp Reisner    drbd is free software; you can redistribute it and/or modify
11b411b363SPhilipp Reisner    it under the terms of the GNU General Public License as published by
12b411b363SPhilipp Reisner    the Free Software Foundation; either version 2, or (at your option)
13b411b363SPhilipp Reisner    any later version.
14b411b363SPhilipp Reisner 
15b411b363SPhilipp Reisner    drbd is distributed in the hope that it will be useful,
16b411b363SPhilipp Reisner    but WITHOUT ANY WARRANTY; without even the implied warranty of
17b411b363SPhilipp Reisner    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18b411b363SPhilipp Reisner    GNU General Public License for more details.
19b411b363SPhilipp Reisner 
20b411b363SPhilipp Reisner    You should have received a copy of the GNU General Public License
21b411b363SPhilipp Reisner    along with drbd; see the file COPYING.  If not, write to
22b411b363SPhilipp Reisner    the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23b411b363SPhilipp Reisner  */
24b411b363SPhilipp Reisner 
25b411b363SPhilipp Reisner #include <linux/bitops.h>
26b411b363SPhilipp Reisner #include <linux/vmalloc.h>
27b411b363SPhilipp Reisner #include <linux/string.h>
28b411b363SPhilipp Reisner #include <linux/drbd.h>
295a0e3ad6STejun Heo #include <linux/slab.h>
30b411b363SPhilipp Reisner #include <asm/kmap_types.h>
31f0ff1357SStephen Rothwell 
32b411b363SPhilipp Reisner #include "drbd_int.h"
33b411b363SPhilipp Reisner 
3495a0f10cSLars Ellenberg 
35b411b363SPhilipp Reisner /* OPAQUE outside this file!
36b411b363SPhilipp Reisner  * interface defined in drbd_int.h
37b411b363SPhilipp Reisner 
38b411b363SPhilipp Reisner  * convention:
39b411b363SPhilipp Reisner  * function name drbd_bm_... => used elsewhere, "public".
40b411b363SPhilipp Reisner  * function name      bm_... => internal to implementation, "private".
414b0715f0SLars Ellenberg  */
42b411b363SPhilipp Reisner 
434b0715f0SLars Ellenberg 
444b0715f0SLars Ellenberg /*
454b0715f0SLars Ellenberg  * LIMITATIONS:
464b0715f0SLars Ellenberg  * We want to support >= peta byte of backend storage, while for now still using
474b0715f0SLars Ellenberg  * a granularity of one bit per 4KiB of storage.
484b0715f0SLars Ellenberg  * 1 << 50		bytes backend storage (1 PiB)
494b0715f0SLars Ellenberg  * 1 << (50 - 12)	bits needed
504b0715f0SLars Ellenberg  *	38 --> we need u64 to index and count bits
514b0715f0SLars Ellenberg  * 1 << (38 - 3)	bitmap bytes needed
524b0715f0SLars Ellenberg  *	35 --> we still need u64 to index and count bytes
534b0715f0SLars Ellenberg  *			(that's 32 GiB of bitmap for 1 PiB storage)
544b0715f0SLars Ellenberg  * 1 << (35 - 2)	32bit longs needed
554b0715f0SLars Ellenberg  *	33 --> we'd even need u64 to index and count 32bit long words.
564b0715f0SLars Ellenberg  * 1 << (35 - 3)	64bit longs needed
574b0715f0SLars Ellenberg  *	32 --> we could get away with a 32bit unsigned int to index and count
584b0715f0SLars Ellenberg  *	64bit long words, but I rather stay with unsigned long for now.
594b0715f0SLars Ellenberg  *	We probably should neither count nor point to bytes or long words
604b0715f0SLars Ellenberg  *	directly, but either by bitnumber, or by page index and offset.
614b0715f0SLars Ellenberg  * 1 << (35 - 12)
624b0715f0SLars Ellenberg  *	22 --> we need that much 4KiB pages of bitmap.
634b0715f0SLars Ellenberg  *	1 << (22 + 3) --> on a 64bit arch,
644b0715f0SLars Ellenberg  *	we need 32 MiB to store the array of page pointers.
654b0715f0SLars Ellenberg  *
664b0715f0SLars Ellenberg  * Because I'm lazy, and because the resulting patch was too large, too ugly
674b0715f0SLars Ellenberg  * and still incomplete, on 32bit we still "only" support 16 TiB (minus some),
684b0715f0SLars Ellenberg  * (1 << 32) bits * 4k storage.
694b0715f0SLars Ellenberg  *
704b0715f0SLars Ellenberg 
714b0715f0SLars Ellenberg  * bitmap storage and IO:
724b0715f0SLars Ellenberg  *	Bitmap is stored little endian on disk, and is kept little endian in
734b0715f0SLars Ellenberg  *	core memory. Currently we still hold the full bitmap in core as long
744b0715f0SLars Ellenberg  *	as we are "attached" to a local disk, which at 32 GiB for 1PiB storage
754b0715f0SLars Ellenberg  *	seems excessive.
764b0715f0SLars Ellenberg  *
7724c4830cSBart Van Assche  *	We plan to reduce the amount of in-core bitmap pages by paging them in
784b0715f0SLars Ellenberg  *	and out against their on-disk location as necessary, but need to make
794b0715f0SLars Ellenberg  *	sure we don't cause too much meta data IO, and must not deadlock in
804b0715f0SLars Ellenberg  *	tight memory situations. This needs some more work.
81b411b363SPhilipp Reisner  */
82b411b363SPhilipp Reisner 
83b411b363SPhilipp Reisner /*
84b411b363SPhilipp Reisner  * NOTE
85b411b363SPhilipp Reisner  *  Access to the *bm_pages is protected by bm_lock.
86b411b363SPhilipp Reisner  *  It is safe to read the other members within the lock.
87b411b363SPhilipp Reisner  *
88b411b363SPhilipp Reisner  *  drbd_bm_set_bits is called from bio_endio callbacks,
89b411b363SPhilipp Reisner  *  We may be called with irq already disabled,
90b411b363SPhilipp Reisner  *  so we need spin_lock_irqsave().
91b411b363SPhilipp Reisner  *  And we need the kmap_atomic.
92b411b363SPhilipp Reisner  */
93b411b363SPhilipp Reisner struct drbd_bitmap {
94b411b363SPhilipp Reisner 	struct page **bm_pages;
95b411b363SPhilipp Reisner 	spinlock_t bm_lock;
964b0715f0SLars Ellenberg 
974b0715f0SLars Ellenberg 	/* see LIMITATIONS: above */
984b0715f0SLars Ellenberg 
99b411b363SPhilipp Reisner 	unsigned long bm_set;       /* nr of set bits; THINK maybe atomic_t? */
100b411b363SPhilipp Reisner 	unsigned long bm_bits;
101b411b363SPhilipp Reisner 	size_t   bm_words;
102b411b363SPhilipp Reisner 	size_t   bm_number_of_pages;
103b411b363SPhilipp Reisner 	sector_t bm_dev_capacity;
1048a03ae2aSThomas Gleixner 	struct mutex bm_change; /* serializes resize operations */
105b411b363SPhilipp Reisner 
10619f843aaSLars Ellenberg 	wait_queue_head_t bm_io_wait; /* used to serialize IO of single pages */
107b411b363SPhilipp Reisner 
10820ceb2b2SLars Ellenberg 	enum bm_flag bm_flags;
109b411b363SPhilipp Reisner 
110b411b363SPhilipp Reisner 	/* debugging aid, in case we are still racy somewhere */
111b411b363SPhilipp Reisner 	char          *bm_why;
112b411b363SPhilipp Reisner 	struct task_struct *bm_task;
113b411b363SPhilipp Reisner };
114b411b363SPhilipp Reisner 
115b411b363SPhilipp Reisner #define bm_print_lock_info(m) __bm_print_lock_info(m, __func__)
116b30ab791SAndreas Gruenbacher static void __bm_print_lock_info(struct drbd_device *device, const char *func)
117b411b363SPhilipp Reisner {
118b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
119b411b363SPhilipp Reisner 	if (!__ratelimit(&drbd_ratelimit_state))
120b411b363SPhilipp Reisner 		return;
121c60b0251SAndreas Gruenbacher 	drbd_err(device, "FIXME %s[%d] in %s, bitmap locked for '%s' by %s[%d]\n",
122c60b0251SAndreas Gruenbacher 		 current->comm, task_pid_nr(current),
123b411b363SPhilipp Reisner 		 func, b->bm_why ?: "?",
124c60b0251SAndreas Gruenbacher 		 b->bm_task->comm, task_pid_nr(b->bm_task));
125b411b363SPhilipp Reisner }
126b411b363SPhilipp Reisner 
127b30ab791SAndreas Gruenbacher void drbd_bm_lock(struct drbd_device *device, char *why, enum bm_flag flags)
128b411b363SPhilipp Reisner {
129b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
130b411b363SPhilipp Reisner 	int trylock_failed;
131b411b363SPhilipp Reisner 
132b411b363SPhilipp Reisner 	if (!b) {
133d0180171SAndreas Gruenbacher 		drbd_err(device, "FIXME no bitmap in drbd_bm_lock!?\n");
134b411b363SPhilipp Reisner 		return;
135b411b363SPhilipp Reisner 	}
136b411b363SPhilipp Reisner 
1378a03ae2aSThomas Gleixner 	trylock_failed = !mutex_trylock(&b->bm_change);
138b411b363SPhilipp Reisner 
139b411b363SPhilipp Reisner 	if (trylock_failed) {
140c60b0251SAndreas Gruenbacher 		drbd_warn(device, "%s[%d] going to '%s' but bitmap already locked for '%s' by %s[%d]\n",
141c60b0251SAndreas Gruenbacher 			  current->comm, task_pid_nr(current),
142b411b363SPhilipp Reisner 			  why, b->bm_why ?: "?",
143c60b0251SAndreas Gruenbacher 			  b->bm_task->comm, task_pid_nr(b->bm_task));
1448a03ae2aSThomas Gleixner 		mutex_lock(&b->bm_change);
145b411b363SPhilipp Reisner 	}
14620ceb2b2SLars Ellenberg 	if (BM_LOCKED_MASK & b->bm_flags)
147d0180171SAndreas Gruenbacher 		drbd_err(device, "FIXME bitmap already locked in bm_lock\n");
14820ceb2b2SLars Ellenberg 	b->bm_flags |= flags & BM_LOCKED_MASK;
149b411b363SPhilipp Reisner 
150b411b363SPhilipp Reisner 	b->bm_why  = why;
151b411b363SPhilipp Reisner 	b->bm_task = current;
152b411b363SPhilipp Reisner }
153b411b363SPhilipp Reisner 
154b30ab791SAndreas Gruenbacher void drbd_bm_unlock(struct drbd_device *device)
155b411b363SPhilipp Reisner {
156b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
157b411b363SPhilipp Reisner 	if (!b) {
158d0180171SAndreas Gruenbacher 		drbd_err(device, "FIXME no bitmap in drbd_bm_unlock!?\n");
159b411b363SPhilipp Reisner 		return;
160b411b363SPhilipp Reisner 	}
161b411b363SPhilipp Reisner 
162b30ab791SAndreas Gruenbacher 	if (!(BM_LOCKED_MASK & device->bitmap->bm_flags))
163d0180171SAndreas Gruenbacher 		drbd_err(device, "FIXME bitmap not locked in bm_unlock\n");
164b411b363SPhilipp Reisner 
16520ceb2b2SLars Ellenberg 	b->bm_flags &= ~BM_LOCKED_MASK;
166b411b363SPhilipp Reisner 	b->bm_why  = NULL;
167b411b363SPhilipp Reisner 	b->bm_task = NULL;
1688a03ae2aSThomas Gleixner 	mutex_unlock(&b->bm_change);
169b411b363SPhilipp Reisner }
170b411b363SPhilipp Reisner 
17119f843aaSLars Ellenberg /* we store some "meta" info about our pages in page->private */
17219f843aaSLars Ellenberg /* at a granularity of 4k storage per bitmap bit:
17319f843aaSLars Ellenberg  * one peta byte storage: 1<<50 byte, 1<<38 * 4k storage blocks
17419f843aaSLars Ellenberg  *  1<<38 bits,
17519f843aaSLars Ellenberg  *  1<<23 4k bitmap pages.
17619f843aaSLars Ellenberg  * Use 24 bits as page index, covers 2 peta byte storage
17719f843aaSLars Ellenberg  * at a granularity of 4k per bit.
17819f843aaSLars Ellenberg  * Used to report the failed page idx on io error from the endio handlers.
17919f843aaSLars Ellenberg  */
18019f843aaSLars Ellenberg #define BM_PAGE_IDX_MASK	((1UL<<24)-1)
18119f843aaSLars Ellenberg /* this page is currently read in, or written back */
18219f843aaSLars Ellenberg #define BM_PAGE_IO_LOCK		31
18319f843aaSLars Ellenberg /* if there has been an IO error for this page */
18419f843aaSLars Ellenberg #define BM_PAGE_IO_ERROR	30
18519f843aaSLars Ellenberg /* this is to be able to intelligently skip disk IO,
18619f843aaSLars Ellenberg  * set if bits have been set since last IO. */
18719f843aaSLars Ellenberg #define BM_PAGE_NEED_WRITEOUT	29
18819f843aaSLars Ellenberg /* to mark for lazy writeout once syncer cleared all clearable bits,
18919f843aaSLars Ellenberg  * we if bits have been cleared since last IO. */
19019f843aaSLars Ellenberg #define BM_PAGE_LAZY_WRITEOUT	28
19145dfffebSLars Ellenberg /* pages marked with this "HINT" will be considered for writeout
19245dfffebSLars Ellenberg  * on activity log transactions */
19345dfffebSLars Ellenberg #define BM_PAGE_HINT_WRITEOUT	27
19419f843aaSLars Ellenberg 
19524c4830cSBart Van Assche /* store_page_idx uses non-atomic assignment. It is only used directly after
19619f843aaSLars Ellenberg  * allocating the page.  All other bm_set_page_* and bm_clear_page_* need to
19719f843aaSLars Ellenberg  * use atomic bit manipulation, as set_out_of_sync (and therefore bitmap
19819f843aaSLars Ellenberg  * changes) may happen from various contexts, and wait_on_bit/wake_up_bit
19919f843aaSLars Ellenberg  * requires it all to be atomic as well. */
20019f843aaSLars Ellenberg static void bm_store_page_idx(struct page *page, unsigned long idx)
20119f843aaSLars Ellenberg {
20219f843aaSLars Ellenberg 	BUG_ON(0 != (idx & ~BM_PAGE_IDX_MASK));
2030c7db279SArne Redlich 	set_page_private(page, idx);
20419f843aaSLars Ellenberg }
20519f843aaSLars Ellenberg 
20619f843aaSLars Ellenberg static unsigned long bm_page_to_idx(struct page *page)
20719f843aaSLars Ellenberg {
20819f843aaSLars Ellenberg 	return page_private(page) & BM_PAGE_IDX_MASK;
20919f843aaSLars Ellenberg }
21019f843aaSLars Ellenberg 
21119f843aaSLars Ellenberg /* As is very unlikely that the same page is under IO from more than one
21219f843aaSLars Ellenberg  * context, we can get away with a bit per page and one wait queue per bitmap.
21319f843aaSLars Ellenberg  */
214b30ab791SAndreas Gruenbacher static void bm_page_lock_io(struct drbd_device *device, int page_nr)
21519f843aaSLars Ellenberg {
216b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
21719f843aaSLars Ellenberg 	void *addr = &page_private(b->bm_pages[page_nr]);
21819f843aaSLars Ellenberg 	wait_event(b->bm_io_wait, !test_and_set_bit(BM_PAGE_IO_LOCK, addr));
21919f843aaSLars Ellenberg }
22019f843aaSLars Ellenberg 
221b30ab791SAndreas Gruenbacher static void bm_page_unlock_io(struct drbd_device *device, int page_nr)
22219f843aaSLars Ellenberg {
223b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
22419f843aaSLars Ellenberg 	void *addr = &page_private(b->bm_pages[page_nr]);
2254738fa16SLars Ellenberg 	clear_bit_unlock(BM_PAGE_IO_LOCK, addr);
226b30ab791SAndreas Gruenbacher 	wake_up(&device->bitmap->bm_io_wait);
22719f843aaSLars Ellenberg }
22819f843aaSLars Ellenberg 
22919f843aaSLars Ellenberg /* set _before_ submit_io, so it may be reset due to being changed
23019f843aaSLars Ellenberg  * while this page is in flight... will get submitted later again */
23119f843aaSLars Ellenberg static void bm_set_page_unchanged(struct page *page)
23219f843aaSLars Ellenberg {
23319f843aaSLars Ellenberg 	/* use cmpxchg? */
23419f843aaSLars Ellenberg 	clear_bit(BM_PAGE_NEED_WRITEOUT, &page_private(page));
23519f843aaSLars Ellenberg 	clear_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page));
23619f843aaSLars Ellenberg }
23719f843aaSLars Ellenberg 
23819f843aaSLars Ellenberg static void bm_set_page_need_writeout(struct page *page)
23919f843aaSLars Ellenberg {
24019f843aaSLars Ellenberg 	set_bit(BM_PAGE_NEED_WRITEOUT, &page_private(page));
24119f843aaSLars Ellenberg }
24219f843aaSLars Ellenberg 
24345dfffebSLars Ellenberg /**
24445dfffebSLars Ellenberg  * drbd_bm_mark_for_writeout() - mark a page with a "hint" to be considered for writeout
245b30ab791SAndreas Gruenbacher  * @device:	DRBD device.
24645dfffebSLars Ellenberg  * @page_nr:	the bitmap page to mark with the "hint" flag
24745dfffebSLars Ellenberg  *
24845dfffebSLars Ellenberg  * From within an activity log transaction, we mark a few pages with these
24945dfffebSLars Ellenberg  * hints, then call drbd_bm_write_hinted(), which will only write out changed
25045dfffebSLars Ellenberg  * pages which are flagged with this mark.
25145dfffebSLars Ellenberg  */
252b30ab791SAndreas Gruenbacher void drbd_bm_mark_for_writeout(struct drbd_device *device, int page_nr)
25345dfffebSLars Ellenberg {
25445dfffebSLars Ellenberg 	struct page *page;
255b30ab791SAndreas Gruenbacher 	if (page_nr >= device->bitmap->bm_number_of_pages) {
256d0180171SAndreas Gruenbacher 		drbd_warn(device, "BAD: page_nr: %u, number_of_pages: %u\n",
257b30ab791SAndreas Gruenbacher 			 page_nr, (int)device->bitmap->bm_number_of_pages);
25845dfffebSLars Ellenberg 		return;
25945dfffebSLars Ellenberg 	}
260b30ab791SAndreas Gruenbacher 	page = device->bitmap->bm_pages[page_nr];
26145dfffebSLars Ellenberg 	set_bit(BM_PAGE_HINT_WRITEOUT, &page_private(page));
26245dfffebSLars Ellenberg }
26345dfffebSLars Ellenberg 
26419f843aaSLars Ellenberg static int bm_test_page_unchanged(struct page *page)
26519f843aaSLars Ellenberg {
26619f843aaSLars Ellenberg 	volatile const unsigned long *addr = &page_private(page);
26719f843aaSLars Ellenberg 	return (*addr & ((1UL<<BM_PAGE_NEED_WRITEOUT)|(1UL<<BM_PAGE_LAZY_WRITEOUT))) == 0;
26819f843aaSLars Ellenberg }
26919f843aaSLars Ellenberg 
27019f843aaSLars Ellenberg static void bm_set_page_io_err(struct page *page)
27119f843aaSLars Ellenberg {
27219f843aaSLars Ellenberg 	set_bit(BM_PAGE_IO_ERROR, &page_private(page));
27319f843aaSLars Ellenberg }
27419f843aaSLars Ellenberg 
27519f843aaSLars Ellenberg static void bm_clear_page_io_err(struct page *page)
27619f843aaSLars Ellenberg {
27719f843aaSLars Ellenberg 	clear_bit(BM_PAGE_IO_ERROR, &page_private(page));
27819f843aaSLars Ellenberg }
27919f843aaSLars Ellenberg 
28019f843aaSLars Ellenberg static void bm_set_page_lazy_writeout(struct page *page)
28119f843aaSLars Ellenberg {
28219f843aaSLars Ellenberg 	set_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page));
28319f843aaSLars Ellenberg }
28419f843aaSLars Ellenberg 
28519f843aaSLars Ellenberg static int bm_test_page_lazy_writeout(struct page *page)
28619f843aaSLars Ellenberg {
28719f843aaSLars Ellenberg 	return test_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page));
28819f843aaSLars Ellenberg }
28919f843aaSLars Ellenberg 
29019f843aaSLars Ellenberg /* on a 32bit box, this would allow for exactly (2<<38) bits. */
29119f843aaSLars Ellenberg static unsigned int bm_word_to_page_idx(struct drbd_bitmap *b, unsigned long long_nr)
29219f843aaSLars Ellenberg {
29319f843aaSLars Ellenberg 	/* page_nr = (word*sizeof(long)) >> PAGE_SHIFT; */
29419f843aaSLars Ellenberg 	unsigned int page_nr = long_nr >> (PAGE_SHIFT - LN2_BPL + 3);
29519f843aaSLars Ellenberg 	BUG_ON(page_nr >= b->bm_number_of_pages);
29619f843aaSLars Ellenberg 	return page_nr;
29719f843aaSLars Ellenberg }
29819f843aaSLars Ellenberg 
29995a0f10cSLars Ellenberg static unsigned int bm_bit_to_page_idx(struct drbd_bitmap *b, u64 bitnr)
30095a0f10cSLars Ellenberg {
30195a0f10cSLars Ellenberg 	/* page_nr = (bitnr/8) >> PAGE_SHIFT; */
30295a0f10cSLars Ellenberg 	unsigned int page_nr = bitnr >> (PAGE_SHIFT + 3);
30395a0f10cSLars Ellenberg 	BUG_ON(page_nr >= b->bm_number_of_pages);
30495a0f10cSLars Ellenberg 	return page_nr;
30595a0f10cSLars Ellenberg }
30695a0f10cSLars Ellenberg 
307589973a7SCong Wang static unsigned long *__bm_map_pidx(struct drbd_bitmap *b, unsigned int idx)
30895a0f10cSLars Ellenberg {
30995a0f10cSLars Ellenberg 	struct page *page = b->bm_pages[idx];
310cfd8005cSCong Wang 	return (unsigned long *) kmap_atomic(page);
31195a0f10cSLars Ellenberg }
31295a0f10cSLars Ellenberg 
31395a0f10cSLars Ellenberg static unsigned long *bm_map_pidx(struct drbd_bitmap *b, unsigned int idx)
31495a0f10cSLars Ellenberg {
315cfd8005cSCong Wang 	return __bm_map_pidx(b, idx);
31695a0f10cSLars Ellenberg }
31795a0f10cSLars Ellenberg 
318cfd8005cSCong Wang static void __bm_unmap(unsigned long *p_addr)
319b411b363SPhilipp Reisner {
320cfd8005cSCong Wang 	kunmap_atomic(p_addr);
321b411b363SPhilipp Reisner };
322b411b363SPhilipp Reisner 
323b411b363SPhilipp Reisner static void bm_unmap(unsigned long *p_addr)
324b411b363SPhilipp Reisner {
325cfd8005cSCong Wang 	return __bm_unmap(p_addr);
326b411b363SPhilipp Reisner }
327b411b363SPhilipp Reisner 
328b411b363SPhilipp Reisner /* long word offset of _bitmap_ sector */
329b411b363SPhilipp Reisner #define S2W(s)	((s)<<(BM_EXT_SHIFT-BM_BLOCK_SHIFT-LN2_BPL))
330b411b363SPhilipp Reisner /* word offset from start of bitmap to word number _in_page_
331b411b363SPhilipp Reisner  * modulo longs per page
332b411b363SPhilipp Reisner #define MLPP(X) ((X) % (PAGE_SIZE/sizeof(long))
33324c4830cSBart Van Assche  hm, well, Philipp thinks gcc might not optimize the % into & (... - 1)
334b411b363SPhilipp Reisner  so do it explicitly:
335b411b363SPhilipp Reisner  */
336b411b363SPhilipp Reisner #define MLPP(X) ((X) & ((PAGE_SIZE/sizeof(long))-1))
337b411b363SPhilipp Reisner 
338b411b363SPhilipp Reisner /* Long words per page */
339b411b363SPhilipp Reisner #define LWPP (PAGE_SIZE/sizeof(long))
340b411b363SPhilipp Reisner 
341b411b363SPhilipp Reisner /*
342b411b363SPhilipp Reisner  * actually most functions herein should take a struct drbd_bitmap*, not a
343b30ab791SAndreas Gruenbacher  * struct drbd_device*, but for the debug macros I like to have the device around
344b411b363SPhilipp Reisner  * to be able to report device specific.
345b411b363SPhilipp Reisner  */
346b411b363SPhilipp Reisner 
34719f843aaSLars Ellenberg 
348b411b363SPhilipp Reisner static void bm_free_pages(struct page **pages, unsigned long number)
349b411b363SPhilipp Reisner {
350b411b363SPhilipp Reisner 	unsigned long i;
351b411b363SPhilipp Reisner 	if (!pages)
352b411b363SPhilipp Reisner 		return;
353b411b363SPhilipp Reisner 
354b411b363SPhilipp Reisner 	for (i = 0; i < number; i++) {
355b411b363SPhilipp Reisner 		if (!pages[i]) {
356b411b363SPhilipp Reisner 			printk(KERN_ALERT "drbd: bm_free_pages tried to free "
357b411b363SPhilipp Reisner 					  "a NULL pointer; i=%lu n=%lu\n",
358b411b363SPhilipp Reisner 					  i, number);
359b411b363SPhilipp Reisner 			continue;
360b411b363SPhilipp Reisner 		}
361b411b363SPhilipp Reisner 		__free_page(pages[i]);
362b411b363SPhilipp Reisner 		pages[i] = NULL;
363b411b363SPhilipp Reisner 	}
364b411b363SPhilipp Reisner }
365b411b363SPhilipp Reisner 
366b411b363SPhilipp Reisner static void bm_vk_free(void *ptr, int v)
367b411b363SPhilipp Reisner {
368b411b363SPhilipp Reisner 	if (v)
369b411b363SPhilipp Reisner 		vfree(ptr);
370b411b363SPhilipp Reisner 	else
371b411b363SPhilipp Reisner 		kfree(ptr);
372b411b363SPhilipp Reisner }
373b411b363SPhilipp Reisner 
374b411b363SPhilipp Reisner /*
375b411b363SPhilipp Reisner  * "have" and "want" are NUMBER OF PAGES.
376b411b363SPhilipp Reisner  */
377b411b363SPhilipp Reisner static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
378b411b363SPhilipp Reisner {
379b411b363SPhilipp Reisner 	struct page **old_pages = b->bm_pages;
380b411b363SPhilipp Reisner 	struct page **new_pages, *page;
381b411b363SPhilipp Reisner 	unsigned int i, bytes, vmalloced = 0;
382b411b363SPhilipp Reisner 	unsigned long have = b->bm_number_of_pages;
383b411b363SPhilipp Reisner 
384b411b363SPhilipp Reisner 	BUG_ON(have == 0 && old_pages != NULL);
385b411b363SPhilipp Reisner 	BUG_ON(have != 0 && old_pages == NULL);
386b411b363SPhilipp Reisner 
387b411b363SPhilipp Reisner 	if (have == want)
388b411b363SPhilipp Reisner 		return old_pages;
389b411b363SPhilipp Reisner 
390b411b363SPhilipp Reisner 	/* Trying kmalloc first, falling back to vmalloc.
3910b143d43SLars Ellenberg 	 * GFP_NOIO, as this is called while drbd IO is "suspended",
3920b143d43SLars Ellenberg 	 * and during resize or attach on diskless Primary,
3930b143d43SLars Ellenberg 	 * we must not block on IO to ourselves.
394bc891c9aSLars Ellenberg 	 * Context is receiver thread or dmsetup. */
395b411b363SPhilipp Reisner 	bytes = sizeof(struct page *)*want;
3968be04b93SJoe Perches 	new_pages = kzalloc(bytes, GFP_NOIO | __GFP_NOWARN);
397b411b363SPhilipp Reisner 	if (!new_pages) {
3980b143d43SLars Ellenberg 		new_pages = __vmalloc(bytes,
3990b143d43SLars Ellenberg 				GFP_NOIO | __GFP_HIGHMEM | __GFP_ZERO,
4000b143d43SLars Ellenberg 				PAGE_KERNEL);
401b411b363SPhilipp Reisner 		if (!new_pages)
402b411b363SPhilipp Reisner 			return NULL;
403b411b363SPhilipp Reisner 		vmalloced = 1;
404b411b363SPhilipp Reisner 	}
405b411b363SPhilipp Reisner 
406b411b363SPhilipp Reisner 	if (want >= have) {
407b411b363SPhilipp Reisner 		for (i = 0; i < have; i++)
408b411b363SPhilipp Reisner 			new_pages[i] = old_pages[i];
409b411b363SPhilipp Reisner 		for (; i < want; i++) {
4100b143d43SLars Ellenberg 			page = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
411b411b363SPhilipp Reisner 			if (!page) {
412b411b363SPhilipp Reisner 				bm_free_pages(new_pages + have, i - have);
413b411b363SPhilipp Reisner 				bm_vk_free(new_pages, vmalloced);
414b411b363SPhilipp Reisner 				return NULL;
415b411b363SPhilipp Reisner 			}
41619f843aaSLars Ellenberg 			/* we want to know which page it is
41719f843aaSLars Ellenberg 			 * from the endio handlers */
41819f843aaSLars Ellenberg 			bm_store_page_idx(page, i);
419b411b363SPhilipp Reisner 			new_pages[i] = page;
420b411b363SPhilipp Reisner 		}
421b411b363SPhilipp Reisner 	} else {
422b411b363SPhilipp Reisner 		for (i = 0; i < want; i++)
423b411b363SPhilipp Reisner 			new_pages[i] = old_pages[i];
424b411b363SPhilipp Reisner 		/* NOT HERE, we are outside the spinlock!
425b411b363SPhilipp Reisner 		bm_free_pages(old_pages + want, have - want);
426b411b363SPhilipp Reisner 		*/
427b411b363SPhilipp Reisner 	}
428b411b363SPhilipp Reisner 
429b411b363SPhilipp Reisner 	if (vmalloced)
43020ceb2b2SLars Ellenberg 		b->bm_flags |= BM_P_VMALLOCED;
431b411b363SPhilipp Reisner 	else
43220ceb2b2SLars Ellenberg 		b->bm_flags &= ~BM_P_VMALLOCED;
433b411b363SPhilipp Reisner 
434b411b363SPhilipp Reisner 	return new_pages;
435b411b363SPhilipp Reisner }
436b411b363SPhilipp Reisner 
437b411b363SPhilipp Reisner /*
438b411b363SPhilipp Reisner  * called on driver init only. TODO call when a device is created.
439b30ab791SAndreas Gruenbacher  * allocates the drbd_bitmap, and stores it in device->bitmap.
440b411b363SPhilipp Reisner  */
441b30ab791SAndreas Gruenbacher int drbd_bm_init(struct drbd_device *device)
442b411b363SPhilipp Reisner {
443b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
444b411b363SPhilipp Reisner 	WARN_ON(b != NULL);
445b411b363SPhilipp Reisner 	b = kzalloc(sizeof(struct drbd_bitmap), GFP_KERNEL);
446b411b363SPhilipp Reisner 	if (!b)
447b411b363SPhilipp Reisner 		return -ENOMEM;
448b411b363SPhilipp Reisner 	spin_lock_init(&b->bm_lock);
4498a03ae2aSThomas Gleixner 	mutex_init(&b->bm_change);
450b411b363SPhilipp Reisner 	init_waitqueue_head(&b->bm_io_wait);
451b411b363SPhilipp Reisner 
452b30ab791SAndreas Gruenbacher 	device->bitmap = b;
453b411b363SPhilipp Reisner 
454b411b363SPhilipp Reisner 	return 0;
455b411b363SPhilipp Reisner }
456b411b363SPhilipp Reisner 
457b30ab791SAndreas Gruenbacher sector_t drbd_bm_capacity(struct drbd_device *device)
458b411b363SPhilipp Reisner {
459b30ab791SAndreas Gruenbacher 	if (!expect(device->bitmap))
460841ce241SAndreas Gruenbacher 		return 0;
461b30ab791SAndreas Gruenbacher 	return device->bitmap->bm_dev_capacity;
462b411b363SPhilipp Reisner }
463b411b363SPhilipp Reisner 
464b411b363SPhilipp Reisner /* called on driver unload. TODO: call when a device is destroyed.
465b411b363SPhilipp Reisner  */
466b30ab791SAndreas Gruenbacher void drbd_bm_cleanup(struct drbd_device *device)
467b411b363SPhilipp Reisner {
468b30ab791SAndreas Gruenbacher 	if (!expect(device->bitmap))
469841ce241SAndreas Gruenbacher 		return;
470b30ab791SAndreas Gruenbacher 	bm_free_pages(device->bitmap->bm_pages, device->bitmap->bm_number_of_pages);
471b30ab791SAndreas Gruenbacher 	bm_vk_free(device->bitmap->bm_pages, (BM_P_VMALLOCED & device->bitmap->bm_flags));
472b30ab791SAndreas Gruenbacher 	kfree(device->bitmap);
473b30ab791SAndreas Gruenbacher 	device->bitmap = NULL;
474b411b363SPhilipp Reisner }
475b411b363SPhilipp Reisner 
476b411b363SPhilipp Reisner /*
477b411b363SPhilipp Reisner  * since (b->bm_bits % BITS_PER_LONG) != 0,
478b411b363SPhilipp Reisner  * this masks out the remaining bits.
479b411b363SPhilipp Reisner  * Returns the number of bits cleared.
480b411b363SPhilipp Reisner  */
48195a0f10cSLars Ellenberg #define BITS_PER_PAGE		(1UL << (PAGE_SHIFT + 3))
48295a0f10cSLars Ellenberg #define BITS_PER_PAGE_MASK	(BITS_PER_PAGE - 1)
48395a0f10cSLars Ellenberg #define BITS_PER_LONG_MASK	(BITS_PER_LONG - 1)
484b411b363SPhilipp Reisner static int bm_clear_surplus(struct drbd_bitmap *b)
485b411b363SPhilipp Reisner {
48695a0f10cSLars Ellenberg 	unsigned long mask;
487b411b363SPhilipp Reisner 	unsigned long *p_addr, *bm;
48895a0f10cSLars Ellenberg 	int tmp;
48995a0f10cSLars Ellenberg 	int cleared = 0;
490b411b363SPhilipp Reisner 
49195a0f10cSLars Ellenberg 	/* number of bits modulo bits per page */
49295a0f10cSLars Ellenberg 	tmp = (b->bm_bits & BITS_PER_PAGE_MASK);
49395a0f10cSLars Ellenberg 	/* mask the used bits of the word containing the last bit */
49495a0f10cSLars Ellenberg 	mask = (1UL << (tmp & BITS_PER_LONG_MASK)) -1;
49595a0f10cSLars Ellenberg 	/* bitmap is always stored little endian,
49695a0f10cSLars Ellenberg 	 * on disk and in core memory alike */
49795a0f10cSLars Ellenberg 	mask = cpu_to_lel(mask);
49895a0f10cSLars Ellenberg 
4996850c442SLars Ellenberg 	p_addr = bm_map_pidx(b, b->bm_number_of_pages - 1);
50095a0f10cSLars Ellenberg 	bm = p_addr + (tmp/BITS_PER_LONG);
50195a0f10cSLars Ellenberg 	if (mask) {
50295a0f10cSLars Ellenberg 		/* If mask != 0, we are not exactly aligned, so bm now points
50395a0f10cSLars Ellenberg 		 * to the long containing the last bit.
50495a0f10cSLars Ellenberg 		 * If mask == 0, bm already points to the word immediately
50595a0f10cSLars Ellenberg 		 * after the last (long word aligned) bit. */
506b411b363SPhilipp Reisner 		cleared = hweight_long(*bm & ~mask);
507b411b363SPhilipp Reisner 		*bm &= mask;
50895a0f10cSLars Ellenberg 		bm++;
509b411b363SPhilipp Reisner 	}
510b411b363SPhilipp Reisner 
51195a0f10cSLars Ellenberg 	if (BITS_PER_LONG == 32 && ((bm - p_addr) & 1) == 1) {
51295a0f10cSLars Ellenberg 		/* on a 32bit arch, we may need to zero out
51395a0f10cSLars Ellenberg 		 * a padding long to align with a 64bit remote */
514b411b363SPhilipp Reisner 		cleared += hweight_long(*bm);
515b411b363SPhilipp Reisner 		*bm = 0;
516b411b363SPhilipp Reisner 	}
517b411b363SPhilipp Reisner 	bm_unmap(p_addr);
518b411b363SPhilipp Reisner 	return cleared;
519b411b363SPhilipp Reisner }
520b411b363SPhilipp Reisner 
521b411b363SPhilipp Reisner static void bm_set_surplus(struct drbd_bitmap *b)
522b411b363SPhilipp Reisner {
52395a0f10cSLars Ellenberg 	unsigned long mask;
524b411b363SPhilipp Reisner 	unsigned long *p_addr, *bm;
52595a0f10cSLars Ellenberg 	int tmp;
526b411b363SPhilipp Reisner 
52795a0f10cSLars Ellenberg 	/* number of bits modulo bits per page */
52895a0f10cSLars Ellenberg 	tmp = (b->bm_bits & BITS_PER_PAGE_MASK);
52995a0f10cSLars Ellenberg 	/* mask the used bits of the word containing the last bit */
53095a0f10cSLars Ellenberg 	mask = (1UL << (tmp & BITS_PER_LONG_MASK)) -1;
53195a0f10cSLars Ellenberg 	/* bitmap is always stored little endian,
53295a0f10cSLars Ellenberg 	 * on disk and in core memory alike */
53395a0f10cSLars Ellenberg 	mask = cpu_to_lel(mask);
53495a0f10cSLars Ellenberg 
5356850c442SLars Ellenberg 	p_addr = bm_map_pidx(b, b->bm_number_of_pages - 1);
53695a0f10cSLars Ellenberg 	bm = p_addr + (tmp/BITS_PER_LONG);
53795a0f10cSLars Ellenberg 	if (mask) {
53895a0f10cSLars Ellenberg 		/* If mask != 0, we are not exactly aligned, so bm now points
53995a0f10cSLars Ellenberg 		 * to the long containing the last bit.
54095a0f10cSLars Ellenberg 		 * If mask == 0, bm already points to the word immediately
54195a0f10cSLars Ellenberg 		 * after the last (long word aligned) bit. */
542b411b363SPhilipp Reisner 		*bm |= ~mask;
54395a0f10cSLars Ellenberg 		bm++;
544b411b363SPhilipp Reisner 	}
545b411b363SPhilipp Reisner 
54695a0f10cSLars Ellenberg 	if (BITS_PER_LONG == 32 && ((bm - p_addr) & 1) == 1) {
54795a0f10cSLars Ellenberg 		/* on a 32bit arch, we may need to zero out
54895a0f10cSLars Ellenberg 		 * a padding long to align with a 64bit remote */
54995a0f10cSLars Ellenberg 		*bm = ~0UL;
550b411b363SPhilipp Reisner 	}
551b411b363SPhilipp Reisner 	bm_unmap(p_addr);
552b411b363SPhilipp Reisner }
553b411b363SPhilipp Reisner 
5544b0715f0SLars Ellenberg /* you better not modify the bitmap while this is running,
5554b0715f0SLars Ellenberg  * or its results will be stale */
55695a0f10cSLars Ellenberg static unsigned long bm_count_bits(struct drbd_bitmap *b)
557b411b363SPhilipp Reisner {
5584b0715f0SLars Ellenberg 	unsigned long *p_addr;
559b411b363SPhilipp Reisner 	unsigned long bits = 0;
5604b0715f0SLars Ellenberg 	unsigned long mask = (1UL << (b->bm_bits & BITS_PER_LONG_MASK)) -1;
5616850c442SLars Ellenberg 	int idx, i, last_word;
5627777a8baSLars Ellenberg 
5634b0715f0SLars Ellenberg 	/* all but last page */
5646850c442SLars Ellenberg 	for (idx = 0; idx < b->bm_number_of_pages - 1; idx++) {
565cfd8005cSCong Wang 		p_addr = __bm_map_pidx(b, idx);
5664b0715f0SLars Ellenberg 		for (i = 0; i < LWPP; i++)
5674b0715f0SLars Ellenberg 			bits += hweight_long(p_addr[i]);
568cfd8005cSCong Wang 		__bm_unmap(p_addr);
569b411b363SPhilipp Reisner 		cond_resched();
570b411b363SPhilipp Reisner 	}
5714b0715f0SLars Ellenberg 	/* last (or only) page */
5724b0715f0SLars Ellenberg 	last_word = ((b->bm_bits - 1) & BITS_PER_PAGE_MASK) >> LN2_BPL;
573589973a7SCong Wang 	p_addr = __bm_map_pidx(b, idx);
5744b0715f0SLars Ellenberg 	for (i = 0; i < last_word; i++)
5754b0715f0SLars Ellenberg 		bits += hweight_long(p_addr[i]);
5764b0715f0SLars Ellenberg 	p_addr[last_word] &= cpu_to_lel(mask);
5774b0715f0SLars Ellenberg 	bits += hweight_long(p_addr[last_word]);
5784b0715f0SLars Ellenberg 	/* 32bit arch, may have an unused padding long */
5794b0715f0SLars Ellenberg 	if (BITS_PER_LONG == 32 && (last_word & 1) == 0)
5804b0715f0SLars Ellenberg 		p_addr[last_word+1] = 0;
581589973a7SCong Wang 	__bm_unmap(p_addr);
582b411b363SPhilipp Reisner 	return bits;
583b411b363SPhilipp Reisner }
584b411b363SPhilipp Reisner 
585b411b363SPhilipp Reisner /* offset and len in long words.*/
586b411b363SPhilipp Reisner static void bm_memset(struct drbd_bitmap *b, size_t offset, int c, size_t len)
587b411b363SPhilipp Reisner {
588b411b363SPhilipp Reisner 	unsigned long *p_addr, *bm;
58919f843aaSLars Ellenberg 	unsigned int idx;
590b411b363SPhilipp Reisner 	size_t do_now, end;
591b411b363SPhilipp Reisner 
592b411b363SPhilipp Reisner 	end = offset + len;
593b411b363SPhilipp Reisner 
594b411b363SPhilipp Reisner 	if (end > b->bm_words) {
595b411b363SPhilipp Reisner 		printk(KERN_ALERT "drbd: bm_memset end > bm_words\n");
596b411b363SPhilipp Reisner 		return;
597b411b363SPhilipp Reisner 	}
598b411b363SPhilipp Reisner 
599b411b363SPhilipp Reisner 	while (offset < end) {
600b411b363SPhilipp Reisner 		do_now = min_t(size_t, ALIGN(offset + 1, LWPP), end) - offset;
60119f843aaSLars Ellenberg 		idx = bm_word_to_page_idx(b, offset);
60219f843aaSLars Ellenberg 		p_addr = bm_map_pidx(b, idx);
603b411b363SPhilipp Reisner 		bm = p_addr + MLPP(offset);
604b411b363SPhilipp Reisner 		if (bm+do_now > p_addr + LWPP) {
605b411b363SPhilipp Reisner 			printk(KERN_ALERT "drbd: BUG BUG BUG! p_addr:%p bm:%p do_now:%d\n",
606b411b363SPhilipp Reisner 			       p_addr, bm, (int)do_now);
60784e7c0f7SLars Ellenberg 		} else
608b411b363SPhilipp Reisner 			memset(bm, c, do_now * sizeof(long));
609b411b363SPhilipp Reisner 		bm_unmap(p_addr);
61019f843aaSLars Ellenberg 		bm_set_page_need_writeout(b->bm_pages[idx]);
611b411b363SPhilipp Reisner 		offset += do_now;
612b411b363SPhilipp Reisner 	}
613b411b363SPhilipp Reisner }
614b411b363SPhilipp Reisner 
615ae8bf312SLars Ellenberg /* For the layout, see comment above drbd_md_set_sector_offsets(). */
616ae8bf312SLars Ellenberg static u64 drbd_md_on_disk_bits(struct drbd_backing_dev *ldev)
617ae8bf312SLars Ellenberg {
618ae8bf312SLars Ellenberg 	u64 bitmap_sectors;
619ae8bf312SLars Ellenberg 	if (ldev->md.al_offset == 8)
620ae8bf312SLars Ellenberg 		bitmap_sectors = ldev->md.md_size_sect - ldev->md.bm_offset;
621ae8bf312SLars Ellenberg 	else
622ae8bf312SLars Ellenberg 		bitmap_sectors = ldev->md.al_offset - ldev->md.bm_offset;
623ae8bf312SLars Ellenberg 	return bitmap_sectors << (9 + 3);
624ae8bf312SLars Ellenberg }
625ae8bf312SLars Ellenberg 
626b411b363SPhilipp Reisner /*
627b411b363SPhilipp Reisner  * make sure the bitmap has enough room for the attached storage,
628b411b363SPhilipp Reisner  * if necessary, resize.
629b411b363SPhilipp Reisner  * called whenever we may have changed the device size.
630b411b363SPhilipp Reisner  * returns -ENOMEM if we could not allocate enough memory, 0 on success.
631b411b363SPhilipp Reisner  * In case this is actually a resize, we copy the old bitmap into the new one.
632b411b363SPhilipp Reisner  * Otherwise, the bitmap is initialized to all bits set.
633b411b363SPhilipp Reisner  */
634b30ab791SAndreas Gruenbacher int drbd_bm_resize(struct drbd_device *device, sector_t capacity, int set_new_bits)
635b411b363SPhilipp Reisner {
636b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
6376850c442SLars Ellenberg 	unsigned long bits, words, owords, obits;
638b411b363SPhilipp Reisner 	unsigned long want, have, onpages; /* number of pages */
639b411b363SPhilipp Reisner 	struct page **npages, **opages = NULL;
640b411b363SPhilipp Reisner 	int err = 0, growing;
641b411b363SPhilipp Reisner 	int opages_vmalloced;
642b411b363SPhilipp Reisner 
643841ce241SAndreas Gruenbacher 	if (!expect(b))
644841ce241SAndreas Gruenbacher 		return -ENOMEM;
645b411b363SPhilipp Reisner 
646b30ab791SAndreas Gruenbacher 	drbd_bm_lock(device, "resize", BM_LOCKED_MASK);
647b411b363SPhilipp Reisner 
648d0180171SAndreas Gruenbacher 	drbd_info(device, "drbd_bm_resize called with capacity == %llu\n",
649b411b363SPhilipp Reisner 			(unsigned long long)capacity);
650b411b363SPhilipp Reisner 
651b411b363SPhilipp Reisner 	if (capacity == b->bm_dev_capacity)
652b411b363SPhilipp Reisner 		goto out;
653b411b363SPhilipp Reisner 
65420ceb2b2SLars Ellenberg 	opages_vmalloced = (BM_P_VMALLOCED & b->bm_flags);
655b411b363SPhilipp Reisner 
656b411b363SPhilipp Reisner 	if (capacity == 0) {
657b411b363SPhilipp Reisner 		spin_lock_irq(&b->bm_lock);
658b411b363SPhilipp Reisner 		opages = b->bm_pages;
659b411b363SPhilipp Reisner 		onpages = b->bm_number_of_pages;
660b411b363SPhilipp Reisner 		owords = b->bm_words;
661b411b363SPhilipp Reisner 		b->bm_pages = NULL;
662b411b363SPhilipp Reisner 		b->bm_number_of_pages =
663b411b363SPhilipp Reisner 		b->bm_set   =
664b411b363SPhilipp Reisner 		b->bm_bits  =
665b411b363SPhilipp Reisner 		b->bm_words =
666b411b363SPhilipp Reisner 		b->bm_dev_capacity = 0;
667b411b363SPhilipp Reisner 		spin_unlock_irq(&b->bm_lock);
668b411b363SPhilipp Reisner 		bm_free_pages(opages, onpages);
669b411b363SPhilipp Reisner 		bm_vk_free(opages, opages_vmalloced);
670b411b363SPhilipp Reisner 		goto out;
671b411b363SPhilipp Reisner 	}
672b411b363SPhilipp Reisner 	bits  = BM_SECT_TO_BIT(ALIGN(capacity, BM_SECT_PER_BIT));
673b411b363SPhilipp Reisner 
674b411b363SPhilipp Reisner 	/* if we would use
675b411b363SPhilipp Reisner 	   words = ALIGN(bits,BITS_PER_LONG) >> LN2_BPL;
676b411b363SPhilipp Reisner 	   a 32bit host could present the wrong number of words
677b411b363SPhilipp Reisner 	   to a 64bit host.
678b411b363SPhilipp Reisner 	*/
679b411b363SPhilipp Reisner 	words = ALIGN(bits, 64) >> LN2_BPL;
680b411b363SPhilipp Reisner 
681b30ab791SAndreas Gruenbacher 	if (get_ldev(device)) {
682b30ab791SAndreas Gruenbacher 		u64 bits_on_disk = drbd_md_on_disk_bits(device->ldev);
683b30ab791SAndreas Gruenbacher 		put_ldev(device);
6844b0715f0SLars Ellenberg 		if (bits > bits_on_disk) {
685d0180171SAndreas Gruenbacher 			drbd_info(device, "bits = %lu\n", bits);
686d0180171SAndreas Gruenbacher 			drbd_info(device, "bits_on_disk = %llu\n", bits_on_disk);
6874b0715f0SLars Ellenberg 			err = -ENOSPC;
6884b0715f0SLars Ellenberg 			goto out;
6894b0715f0SLars Ellenberg 		}
690b411b363SPhilipp Reisner 	}
691b411b363SPhilipp Reisner 
6926850c442SLars Ellenberg 	want = ALIGN(words*sizeof(long), PAGE_SIZE) >> PAGE_SHIFT;
693b411b363SPhilipp Reisner 	have = b->bm_number_of_pages;
694b411b363SPhilipp Reisner 	if (want == have) {
6950b0ba1efSAndreas Gruenbacher 		D_ASSERT(device, b->bm_pages != NULL);
696b411b363SPhilipp Reisner 		npages = b->bm_pages;
697b411b363SPhilipp Reisner 	} else {
698b30ab791SAndreas Gruenbacher 		if (drbd_insert_fault(device, DRBD_FAULT_BM_ALLOC))
699b411b363SPhilipp Reisner 			npages = NULL;
700b411b363SPhilipp Reisner 		else
701b411b363SPhilipp Reisner 			npages = bm_realloc_pages(b, want);
702b411b363SPhilipp Reisner 	}
703b411b363SPhilipp Reisner 
704b411b363SPhilipp Reisner 	if (!npages) {
705b411b363SPhilipp Reisner 		err = -ENOMEM;
706b411b363SPhilipp Reisner 		goto out;
707b411b363SPhilipp Reisner 	}
708b411b363SPhilipp Reisner 
709b411b363SPhilipp Reisner 	spin_lock_irq(&b->bm_lock);
710b411b363SPhilipp Reisner 	opages = b->bm_pages;
711b411b363SPhilipp Reisner 	owords = b->bm_words;
712b411b363SPhilipp Reisner 	obits  = b->bm_bits;
713b411b363SPhilipp Reisner 
714b411b363SPhilipp Reisner 	growing = bits > obits;
7155223671bSPhilipp Reisner 	if (opages && growing && set_new_bits)
716b411b363SPhilipp Reisner 		bm_set_surplus(b);
717b411b363SPhilipp Reisner 
718b411b363SPhilipp Reisner 	b->bm_pages = npages;
719b411b363SPhilipp Reisner 	b->bm_number_of_pages = want;
720b411b363SPhilipp Reisner 	b->bm_bits  = bits;
721b411b363SPhilipp Reisner 	b->bm_words = words;
722b411b363SPhilipp Reisner 	b->bm_dev_capacity = capacity;
723b411b363SPhilipp Reisner 
724b411b363SPhilipp Reisner 	if (growing) {
72502d9a94bSPhilipp Reisner 		if (set_new_bits) {
726b411b363SPhilipp Reisner 			bm_memset(b, owords, 0xff, words-owords);
727b411b363SPhilipp Reisner 			b->bm_set += bits - obits;
72802d9a94bSPhilipp Reisner 		} else
72902d9a94bSPhilipp Reisner 			bm_memset(b, owords, 0x00, words-owords);
73002d9a94bSPhilipp Reisner 
731b411b363SPhilipp Reisner 	}
732b411b363SPhilipp Reisner 
733b411b363SPhilipp Reisner 	if (want < have) {
734b411b363SPhilipp Reisner 		/* implicit: (opages != NULL) && (opages != npages) */
735b411b363SPhilipp Reisner 		bm_free_pages(opages + want, have - want);
736b411b363SPhilipp Reisner 	}
737b411b363SPhilipp Reisner 
738b411b363SPhilipp Reisner 	(void)bm_clear_surplus(b);
739b411b363SPhilipp Reisner 
740b411b363SPhilipp Reisner 	spin_unlock_irq(&b->bm_lock);
741b411b363SPhilipp Reisner 	if (opages != npages)
742b411b363SPhilipp Reisner 		bm_vk_free(opages, opages_vmalloced);
743b411b363SPhilipp Reisner 	if (!growing)
744b411b363SPhilipp Reisner 		b->bm_set = bm_count_bits(b);
745d0180171SAndreas Gruenbacher 	drbd_info(device, "resync bitmap: bits=%lu words=%lu pages=%lu\n", bits, words, want);
746b411b363SPhilipp Reisner 
747b411b363SPhilipp Reisner  out:
748b30ab791SAndreas Gruenbacher 	drbd_bm_unlock(device);
749b411b363SPhilipp Reisner 	return err;
750b411b363SPhilipp Reisner }
751b411b363SPhilipp Reisner 
752b411b363SPhilipp Reisner /* inherently racy:
753b411b363SPhilipp Reisner  * if not protected by other means, return value may be out of date when
754b411b363SPhilipp Reisner  * leaving this function...
755b411b363SPhilipp Reisner  * we still need to lock it, since it is important that this returns
756b411b363SPhilipp Reisner  * bm_set == 0 precisely.
757b411b363SPhilipp Reisner  *
758b411b363SPhilipp Reisner  * maybe bm_set should be atomic_t ?
759b411b363SPhilipp Reisner  */
760b30ab791SAndreas Gruenbacher unsigned long _drbd_bm_total_weight(struct drbd_device *device)
761b411b363SPhilipp Reisner {
762b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
763b411b363SPhilipp Reisner 	unsigned long s;
764b411b363SPhilipp Reisner 	unsigned long flags;
765b411b363SPhilipp Reisner 
766841ce241SAndreas Gruenbacher 	if (!expect(b))
767841ce241SAndreas Gruenbacher 		return 0;
768841ce241SAndreas Gruenbacher 	if (!expect(b->bm_pages))
769841ce241SAndreas Gruenbacher 		return 0;
770b411b363SPhilipp Reisner 
771b411b363SPhilipp Reisner 	spin_lock_irqsave(&b->bm_lock, flags);
772b411b363SPhilipp Reisner 	s = b->bm_set;
773b411b363SPhilipp Reisner 	spin_unlock_irqrestore(&b->bm_lock, flags);
774b411b363SPhilipp Reisner 
775b411b363SPhilipp Reisner 	return s;
776b411b363SPhilipp Reisner }
777b411b363SPhilipp Reisner 
778b30ab791SAndreas Gruenbacher unsigned long drbd_bm_total_weight(struct drbd_device *device)
779b411b363SPhilipp Reisner {
780b411b363SPhilipp Reisner 	unsigned long s;
781b411b363SPhilipp Reisner 	/* if I don't have a disk, I don't know about out-of-sync status */
782b30ab791SAndreas Gruenbacher 	if (!get_ldev_if_state(device, D_NEGOTIATING))
783b411b363SPhilipp Reisner 		return 0;
784b30ab791SAndreas Gruenbacher 	s = _drbd_bm_total_weight(device);
785b30ab791SAndreas Gruenbacher 	put_ldev(device);
786b411b363SPhilipp Reisner 	return s;
787b411b363SPhilipp Reisner }
788b411b363SPhilipp Reisner 
789b30ab791SAndreas Gruenbacher size_t drbd_bm_words(struct drbd_device *device)
790b411b363SPhilipp Reisner {
791b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
792841ce241SAndreas Gruenbacher 	if (!expect(b))
793841ce241SAndreas Gruenbacher 		return 0;
794841ce241SAndreas Gruenbacher 	if (!expect(b->bm_pages))
795841ce241SAndreas Gruenbacher 		return 0;
796b411b363SPhilipp Reisner 
797b411b363SPhilipp Reisner 	return b->bm_words;
798b411b363SPhilipp Reisner }
799b411b363SPhilipp Reisner 
800b30ab791SAndreas Gruenbacher unsigned long drbd_bm_bits(struct drbd_device *device)
801b411b363SPhilipp Reisner {
802b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
803841ce241SAndreas Gruenbacher 	if (!expect(b))
804841ce241SAndreas Gruenbacher 		return 0;
805b411b363SPhilipp Reisner 
806b411b363SPhilipp Reisner 	return b->bm_bits;
807b411b363SPhilipp Reisner }
808b411b363SPhilipp Reisner 
809b411b363SPhilipp Reisner /* merge number words from buffer into the bitmap starting at offset.
810b411b363SPhilipp Reisner  * buffer[i] is expected to be little endian unsigned long.
811b411b363SPhilipp Reisner  * bitmap must be locked by drbd_bm_lock.
812b411b363SPhilipp Reisner  * currently only used from receive_bitmap.
813b411b363SPhilipp Reisner  */
814b30ab791SAndreas Gruenbacher void drbd_bm_merge_lel(struct drbd_device *device, size_t offset, size_t number,
815b411b363SPhilipp Reisner 			unsigned long *buffer)
816b411b363SPhilipp Reisner {
817b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
818b411b363SPhilipp Reisner 	unsigned long *p_addr, *bm;
819b411b363SPhilipp Reisner 	unsigned long word, bits;
82019f843aaSLars Ellenberg 	unsigned int idx;
821b411b363SPhilipp Reisner 	size_t end, do_now;
822b411b363SPhilipp Reisner 
823b411b363SPhilipp Reisner 	end = offset + number;
824b411b363SPhilipp Reisner 
825841ce241SAndreas Gruenbacher 	if (!expect(b))
826841ce241SAndreas Gruenbacher 		return;
827841ce241SAndreas Gruenbacher 	if (!expect(b->bm_pages))
828841ce241SAndreas Gruenbacher 		return;
829b411b363SPhilipp Reisner 	if (number == 0)
830b411b363SPhilipp Reisner 		return;
831b411b363SPhilipp Reisner 	WARN_ON(offset >= b->bm_words);
832b411b363SPhilipp Reisner 	WARN_ON(end    >  b->bm_words);
833b411b363SPhilipp Reisner 
834b411b363SPhilipp Reisner 	spin_lock_irq(&b->bm_lock);
835b411b363SPhilipp Reisner 	while (offset < end) {
836b411b363SPhilipp Reisner 		do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset;
83719f843aaSLars Ellenberg 		idx = bm_word_to_page_idx(b, offset);
83819f843aaSLars Ellenberg 		p_addr = bm_map_pidx(b, idx);
839b411b363SPhilipp Reisner 		bm = p_addr + MLPP(offset);
840b411b363SPhilipp Reisner 		offset += do_now;
841b411b363SPhilipp Reisner 		while (do_now--) {
842b411b363SPhilipp Reisner 			bits = hweight_long(*bm);
84395a0f10cSLars Ellenberg 			word = *bm | *buffer++;
844b411b363SPhilipp Reisner 			*bm++ = word;
845b411b363SPhilipp Reisner 			b->bm_set += hweight_long(word) - bits;
846b411b363SPhilipp Reisner 		}
847b411b363SPhilipp Reisner 		bm_unmap(p_addr);
84819f843aaSLars Ellenberg 		bm_set_page_need_writeout(b->bm_pages[idx]);
849b411b363SPhilipp Reisner 	}
850b411b363SPhilipp Reisner 	/* with 32bit <-> 64bit cross-platform connect
851b411b363SPhilipp Reisner 	 * this is only correct for current usage,
852b411b363SPhilipp Reisner 	 * where we _know_ that we are 64 bit aligned,
853b411b363SPhilipp Reisner 	 * and know that this function is used in this way, too...
854b411b363SPhilipp Reisner 	 */
855b411b363SPhilipp Reisner 	if (end == b->bm_words)
856b411b363SPhilipp Reisner 		b->bm_set -= bm_clear_surplus(b);
857b411b363SPhilipp Reisner 	spin_unlock_irq(&b->bm_lock);
858b411b363SPhilipp Reisner }
859b411b363SPhilipp Reisner 
860b411b363SPhilipp Reisner /* copy number words from the bitmap starting at offset into the buffer.
861b411b363SPhilipp Reisner  * buffer[i] will be little endian unsigned long.
862b411b363SPhilipp Reisner  */
863b30ab791SAndreas Gruenbacher void drbd_bm_get_lel(struct drbd_device *device, size_t offset, size_t number,
864b411b363SPhilipp Reisner 		     unsigned long *buffer)
865b411b363SPhilipp Reisner {
866b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
867b411b363SPhilipp Reisner 	unsigned long *p_addr, *bm;
868b411b363SPhilipp Reisner 	size_t end, do_now;
869b411b363SPhilipp Reisner 
870b411b363SPhilipp Reisner 	end = offset + number;
871b411b363SPhilipp Reisner 
872841ce241SAndreas Gruenbacher 	if (!expect(b))
873841ce241SAndreas Gruenbacher 		return;
874841ce241SAndreas Gruenbacher 	if (!expect(b->bm_pages))
875841ce241SAndreas Gruenbacher 		return;
876b411b363SPhilipp Reisner 
877b411b363SPhilipp Reisner 	spin_lock_irq(&b->bm_lock);
878b411b363SPhilipp Reisner 	if ((offset >= b->bm_words) ||
879b411b363SPhilipp Reisner 	    (end    >  b->bm_words) ||
880b411b363SPhilipp Reisner 	    (number <= 0))
881d0180171SAndreas Gruenbacher 		drbd_err(device, "offset=%lu number=%lu bm_words=%lu\n",
882b411b363SPhilipp Reisner 			(unsigned long)	offset,
883b411b363SPhilipp Reisner 			(unsigned long)	number,
884b411b363SPhilipp Reisner 			(unsigned long) b->bm_words);
885b411b363SPhilipp Reisner 	else {
886b411b363SPhilipp Reisner 		while (offset < end) {
887b411b363SPhilipp Reisner 			do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset;
88819f843aaSLars Ellenberg 			p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, offset));
889b411b363SPhilipp Reisner 			bm = p_addr + MLPP(offset);
890b411b363SPhilipp Reisner 			offset += do_now;
891b411b363SPhilipp Reisner 			while (do_now--)
89295a0f10cSLars Ellenberg 				*buffer++ = *bm++;
893b411b363SPhilipp Reisner 			bm_unmap(p_addr);
894b411b363SPhilipp Reisner 		}
895b411b363SPhilipp Reisner 	}
896b411b363SPhilipp Reisner 	spin_unlock_irq(&b->bm_lock);
897b411b363SPhilipp Reisner }
898b411b363SPhilipp Reisner 
899b411b363SPhilipp Reisner /* set all bits in the bitmap */
900b30ab791SAndreas Gruenbacher void drbd_bm_set_all(struct drbd_device *device)
901b411b363SPhilipp Reisner {
902b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
903841ce241SAndreas Gruenbacher 	if (!expect(b))
904841ce241SAndreas Gruenbacher 		return;
905841ce241SAndreas Gruenbacher 	if (!expect(b->bm_pages))
906841ce241SAndreas Gruenbacher 		return;
907b411b363SPhilipp Reisner 
908b411b363SPhilipp Reisner 	spin_lock_irq(&b->bm_lock);
909b411b363SPhilipp Reisner 	bm_memset(b, 0, 0xff, b->bm_words);
910b411b363SPhilipp Reisner 	(void)bm_clear_surplus(b);
911b411b363SPhilipp Reisner 	b->bm_set = b->bm_bits;
912b411b363SPhilipp Reisner 	spin_unlock_irq(&b->bm_lock);
913b411b363SPhilipp Reisner }
914b411b363SPhilipp Reisner 
915b411b363SPhilipp Reisner /* clear all bits in the bitmap */
916b30ab791SAndreas Gruenbacher void drbd_bm_clear_all(struct drbd_device *device)
917b411b363SPhilipp Reisner {
918b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
919841ce241SAndreas Gruenbacher 	if (!expect(b))
920841ce241SAndreas Gruenbacher 		return;
921841ce241SAndreas Gruenbacher 	if (!expect(b->bm_pages))
922841ce241SAndreas Gruenbacher 		return;
923b411b363SPhilipp Reisner 
924b411b363SPhilipp Reisner 	spin_lock_irq(&b->bm_lock);
925b411b363SPhilipp Reisner 	bm_memset(b, 0, 0, b->bm_words);
926b411b363SPhilipp Reisner 	b->bm_set = 0;
927b411b363SPhilipp Reisner 	spin_unlock_irq(&b->bm_lock);
928b411b363SPhilipp Reisner }
929b411b363SPhilipp Reisner 
93019f843aaSLars Ellenberg struct bm_aio_ctx {
931b30ab791SAndreas Gruenbacher 	struct drbd_device *device;
93219f843aaSLars Ellenberg 	atomic_t in_flight;
9339e58c4daSPhilipp Reisner 	unsigned int done;
93419f843aaSLars Ellenberg 	unsigned flags;
93519f843aaSLars Ellenberg #define BM_AIO_COPY_PAGES	1
93645dfffebSLars Ellenberg #define BM_AIO_WRITE_HINTED	2
937fef45d29SPhilipp Reisner #define BM_WRITE_ALL_PAGES	4
93819f843aaSLars Ellenberg 	int error;
939d1f3779bSPhilipp Reisner 	struct kref kref;
94019f843aaSLars Ellenberg };
94119f843aaSLars Ellenberg 
942d1f3779bSPhilipp Reisner static void bm_aio_ctx_destroy(struct kref *kref)
943d1f3779bSPhilipp Reisner {
944d1f3779bSPhilipp Reisner 	struct bm_aio_ctx *ctx = container_of(kref, struct bm_aio_ctx, kref);
945d1f3779bSPhilipp Reisner 
946b30ab791SAndreas Gruenbacher 	put_ldev(ctx->device);
947d1f3779bSPhilipp Reisner 	kfree(ctx);
948d1f3779bSPhilipp Reisner }
949d1f3779bSPhilipp Reisner 
95019f843aaSLars Ellenberg /* bv_page may be a copy, or may be the original */
951b411b363SPhilipp Reisner static void bm_async_io_complete(struct bio *bio, int error)
952b411b363SPhilipp Reisner {
95319f843aaSLars Ellenberg 	struct bm_aio_ctx *ctx = bio->bi_private;
954b30ab791SAndreas Gruenbacher 	struct drbd_device *device = ctx->device;
955b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
95619f843aaSLars Ellenberg 	unsigned int idx = bm_page_to_idx(bio->bi_io_vec[0].bv_page);
957b411b363SPhilipp Reisner 	int uptodate = bio_flagged(bio, BIO_UPTODATE);
958b411b363SPhilipp Reisner 
959b411b363SPhilipp Reisner 
960b411b363SPhilipp Reisner 	/* strange behavior of some lower level drivers...
961b411b363SPhilipp Reisner 	 * fail the request by clearing the uptodate flag,
962b411b363SPhilipp Reisner 	 * but do not return any error?!
963b411b363SPhilipp Reisner 	 * do we want to WARN() on this? */
964b411b363SPhilipp Reisner 	if (!error && !uptodate)
965b411b363SPhilipp Reisner 		error = -EIO;
966b411b363SPhilipp Reisner 
9677648cdfeSLars Ellenberg 	if ((ctx->flags & BM_AIO_COPY_PAGES) == 0 &&
9687648cdfeSLars Ellenberg 	    !bm_test_page_unchanged(b->bm_pages[idx]))
969d0180171SAndreas Gruenbacher 		drbd_warn(device, "bitmap page idx %u changed during IO!\n", idx);
97019f843aaSLars Ellenberg 
971b411b363SPhilipp Reisner 	if (error) {
97219f843aaSLars Ellenberg 		/* ctx error will hold the completed-last non-zero error code,
97319f843aaSLars Ellenberg 		 * in case error codes differ. */
97419f843aaSLars Ellenberg 		ctx->error = error;
97519f843aaSLars Ellenberg 		bm_set_page_io_err(b->bm_pages[idx]);
97619f843aaSLars Ellenberg 		/* Not identical to on disk version of it.
97719f843aaSLars Ellenberg 		 * Is BM_PAGE_IO_ERROR enough? */
97819f843aaSLars Ellenberg 		if (__ratelimit(&drbd_ratelimit_state))
979d0180171SAndreas Gruenbacher 			drbd_err(device, "IO ERROR %d on bitmap page idx %u\n",
98019f843aaSLars Ellenberg 					error, idx);
98119f843aaSLars Ellenberg 	} else {
98219f843aaSLars Ellenberg 		bm_clear_page_io_err(b->bm_pages[idx]);
983d0180171SAndreas Gruenbacher 		dynamic_drbd_dbg(device, "bitmap page idx %u completed\n", idx);
984b411b363SPhilipp Reisner 	}
98519f843aaSLars Ellenberg 
986b30ab791SAndreas Gruenbacher 	bm_page_unlock_io(device, idx);
98719f843aaSLars Ellenberg 
98819f843aaSLars Ellenberg 	if (ctx->flags & BM_AIO_COPY_PAGES)
9894d95a10fSLars Ellenberg 		mempool_free(bio->bi_io_vec[0].bv_page, drbd_md_io_page_pool);
990b411b363SPhilipp Reisner 
991b411b363SPhilipp Reisner 	bio_put(bio);
99219f843aaSLars Ellenberg 
993d1f3779bSPhilipp Reisner 	if (atomic_dec_and_test(&ctx->in_flight)) {
9949e58c4daSPhilipp Reisner 		ctx->done = 1;
995b30ab791SAndreas Gruenbacher 		wake_up(&device->misc_wait);
996d1f3779bSPhilipp Reisner 		kref_put(&ctx->kref, &bm_aio_ctx_destroy);
997d1f3779bSPhilipp Reisner 	}
998b411b363SPhilipp Reisner }
999b411b363SPhilipp Reisner 
100019f843aaSLars Ellenberg static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must_hold(local)
1001b411b363SPhilipp Reisner {
10029476f39dSLars Ellenberg 	struct bio *bio = bio_alloc_drbd(GFP_NOIO);
1003b30ab791SAndreas Gruenbacher 	struct drbd_device *device = ctx->device;
1004b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
100519f843aaSLars Ellenberg 	struct page *page;
1006b411b363SPhilipp Reisner 	unsigned int len;
100719f843aaSLars Ellenberg 
1008b411b363SPhilipp Reisner 	sector_t on_disk_sector =
1009b30ab791SAndreas Gruenbacher 		device->ldev->md.md_offset + device->ldev->md.bm_offset;
1010b411b363SPhilipp Reisner 	on_disk_sector += ((sector_t)page_nr) << (PAGE_SHIFT-9);
1011b411b363SPhilipp Reisner 
1012b411b363SPhilipp Reisner 	/* this might happen with very small
101319f843aaSLars Ellenberg 	 * flexible external meta data device,
101419f843aaSLars Ellenberg 	 * or with PAGE_SIZE > 4k */
1015b411b363SPhilipp Reisner 	len = min_t(unsigned int, PAGE_SIZE,
1016b30ab791SAndreas Gruenbacher 		(drbd_md_last_sector(device->ldev) - on_disk_sector + 1)<<9);
1017b411b363SPhilipp Reisner 
101819f843aaSLars Ellenberg 	/* serialize IO on this page */
1019b30ab791SAndreas Gruenbacher 	bm_page_lock_io(device, page_nr);
102019f843aaSLars Ellenberg 	/* before memcpy and submit,
102119f843aaSLars Ellenberg 	 * so it can be redirtied any time */
102219f843aaSLars Ellenberg 	bm_set_page_unchanged(b->bm_pages[page_nr]);
102319f843aaSLars Ellenberg 
102419f843aaSLars Ellenberg 	if (ctx->flags & BM_AIO_COPY_PAGES) {
10254d95a10fSLars Ellenberg 		page = mempool_alloc(drbd_md_io_page_pool, __GFP_HIGHMEM|__GFP_WAIT);
1026f1d6a328SAkinobu Mita 		copy_highpage(page, b->bm_pages[page_nr]);
102719f843aaSLars Ellenberg 		bm_store_page_idx(page, page_nr);
102819f843aaSLars Ellenberg 	} else
102919f843aaSLars Ellenberg 		page = b->bm_pages[page_nr];
1030b30ab791SAndreas Gruenbacher 	bio->bi_bdev = device->ldev->md_bdev;
10314f024f37SKent Overstreet 	bio->bi_iter.bi_sector = on_disk_sector;
10324d95a10fSLars Ellenberg 	/* bio_add_page of a single page to an empty bio will always succeed,
10334d95a10fSLars Ellenberg 	 * according to api.  Do we want to assert that? */
103419f843aaSLars Ellenberg 	bio_add_page(bio, page, len, 0);
103519f843aaSLars Ellenberg 	bio->bi_private = ctx;
1036b411b363SPhilipp Reisner 	bio->bi_end_io = bm_async_io_complete;
1037b411b363SPhilipp Reisner 
1038b30ab791SAndreas Gruenbacher 	if (drbd_insert_fault(device, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) {
1039b411b363SPhilipp Reisner 		bio->bi_rw |= rw;
1040b411b363SPhilipp Reisner 		bio_endio(bio, -EIO);
1041b411b363SPhilipp Reisner 	} else {
1042b411b363SPhilipp Reisner 		submit_bio(rw, bio);
10435a8b4242SLars Ellenberg 		/* this should not count as user activity and cause the
10445a8b4242SLars Ellenberg 		 * resync to throttle -- see drbd_rs_should_slow_down(). */
1045b30ab791SAndreas Gruenbacher 		atomic_add(len >> 9, &device->rs_sect_ev);
1046b411b363SPhilipp Reisner 	}
1047b411b363SPhilipp Reisner }
1048b411b363SPhilipp Reisner 
1049b411b363SPhilipp Reisner /*
1050b411b363SPhilipp Reisner  * bm_rw: read/write the whole bitmap from/to its on disk location.
1051b411b363SPhilipp Reisner  */
1052b30ab791SAndreas Gruenbacher static int bm_rw(struct drbd_device *device, int rw, unsigned flags, unsigned lazy_writeout_upper_idx) __must_hold(local)
1053b411b363SPhilipp Reisner {
1054d1f3779bSPhilipp Reisner 	struct bm_aio_ctx *ctx;
1055b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
10566850c442SLars Ellenberg 	int num_pages, i, count = 0;
1057b411b363SPhilipp Reisner 	unsigned long now;
1058b411b363SPhilipp Reisner 	char ppb[10];
1059b411b363SPhilipp Reisner 	int err = 0;
1060b411b363SPhilipp Reisner 
106119f843aaSLars Ellenberg 	/*
106219f843aaSLars Ellenberg 	 * We are protected against bitmap disappearing/resizing by holding an
106319f843aaSLars Ellenberg 	 * ldev reference (caller must have called get_ldev()).
106419f843aaSLars Ellenberg 	 * For read/write, we are protected against changes to the bitmap by
106519f843aaSLars Ellenberg 	 * the bitmap lock (see drbd_bitmap_io).
106619f843aaSLars Ellenberg 	 * For lazy writeout, we don't care for ongoing changes to the bitmap,
106719f843aaSLars Ellenberg 	 * as we submit copies of pages anyways.
106819f843aaSLars Ellenberg 	 */
1069d1f3779bSPhilipp Reisner 
107022f46ce2SLars Ellenberg 	ctx = kmalloc(sizeof(struct bm_aio_ctx), GFP_NOIO);
1071d1f3779bSPhilipp Reisner 	if (!ctx)
1072d1f3779bSPhilipp Reisner 		return -ENOMEM;
1073d1f3779bSPhilipp Reisner 
1074d1f3779bSPhilipp Reisner 	*ctx = (struct bm_aio_ctx) {
1075b30ab791SAndreas Gruenbacher 		.device = device,
1076d1f3779bSPhilipp Reisner 		.in_flight = ATOMIC_INIT(1),
10779e58c4daSPhilipp Reisner 		.done = 0,
10780e8488adSLars Ellenberg 		.flags = flags,
1079d1f3779bSPhilipp Reisner 		.error = 0,
1080d1f3779bSPhilipp Reisner 		.kref = { ATOMIC_INIT(2) },
1081d1f3779bSPhilipp Reisner 	};
1082d1f3779bSPhilipp Reisner 
1083b30ab791SAndreas Gruenbacher 	if (!get_ldev_if_state(device, D_ATTACHING)) {  /* put is in bm_aio_ctx_destroy() */
1084d0180171SAndreas Gruenbacher 		drbd_err(device, "ASSERT FAILED: get_ldev_if_state() == 1 in bm_rw()\n");
10859e58c4daSPhilipp Reisner 		kfree(ctx);
10869e58c4daSPhilipp Reisner 		return -ENODEV;
10879e58c4daSPhilipp Reisner 	}
1088*8fe39aacSPhilipp Reisner 	/* Here D_ATTACHING is sufficient since drbd_bm_read() is called only from
1089*8fe39aacSPhilipp Reisner 	   drbd_adm_attach(), after device->ldev was assigned. */
10909e58c4daSPhilipp Reisner 
1091d1f3779bSPhilipp Reisner 	if (!ctx->flags)
109220ceb2b2SLars Ellenberg 		WARN_ON(!(BM_LOCKED_MASK & b->bm_flags));
1093b411b363SPhilipp Reisner 
10946850c442SLars Ellenberg 	num_pages = b->bm_number_of_pages;
1095b411b363SPhilipp Reisner 
1096b411b363SPhilipp Reisner 	now = jiffies;
1097b411b363SPhilipp Reisner 
1098b411b363SPhilipp Reisner 	/* let the layers below us try to merge these bios... */
10996850c442SLars Ellenberg 	for (i = 0; i < num_pages; i++) {
110019f843aaSLars Ellenberg 		/* ignore completely unchanged pages */
110119f843aaSLars Ellenberg 		if (lazy_writeout_upper_idx && i == lazy_writeout_upper_idx)
110219f843aaSLars Ellenberg 			break;
110319f843aaSLars Ellenberg 		if (rw & WRITE) {
110445dfffebSLars Ellenberg 			if ((flags & BM_AIO_WRITE_HINTED) &&
110545dfffebSLars Ellenberg 			    !test_and_clear_bit(BM_PAGE_HINT_WRITEOUT,
110645dfffebSLars Ellenberg 				    &page_private(b->bm_pages[i])))
110745dfffebSLars Ellenberg 				continue;
1108fef45d29SPhilipp Reisner 
1109d1aa4d04SPhilipp Reisner 			if (!(flags & BM_WRITE_ALL_PAGES) &&
1110d1aa4d04SPhilipp Reisner 			    bm_test_page_unchanged(b->bm_pages[i])) {
1111d0180171SAndreas Gruenbacher 				dynamic_drbd_dbg(device, "skipped bm write for idx %u\n", i);
111219f843aaSLars Ellenberg 				continue;
111319f843aaSLars Ellenberg 			}
111419f843aaSLars Ellenberg 			/* during lazy writeout,
111519f843aaSLars Ellenberg 			 * ignore those pages not marked for lazy writeout. */
111619f843aaSLars Ellenberg 			if (lazy_writeout_upper_idx &&
111719f843aaSLars Ellenberg 			    !bm_test_page_lazy_writeout(b->bm_pages[i])) {
1118d0180171SAndreas Gruenbacher 				dynamic_drbd_dbg(device, "skipped bm lazy write for idx %u\n", i);
111919f843aaSLars Ellenberg 				continue;
112019f843aaSLars Ellenberg 			}
112119f843aaSLars Ellenberg 		}
1122d1f3779bSPhilipp Reisner 		atomic_inc(&ctx->in_flight);
1123d1f3779bSPhilipp Reisner 		bm_page_io_async(ctx, i, rw);
112419f843aaSLars Ellenberg 		++count;
112519f843aaSLars Ellenberg 		cond_resched();
112619f843aaSLars Ellenberg 	}
1127b411b363SPhilipp Reisner 
1128725a97e4SLars Ellenberg 	/*
1129d1f3779bSPhilipp Reisner 	 * We initialize ctx->in_flight to one to make sure bm_async_io_complete
11309e58c4daSPhilipp Reisner 	 * will not set ctx->done early, and decrement / test it here.  If there
1131725a97e4SLars Ellenberg 	 * are still some bios in flight, we need to wait for them here.
11329e58c4daSPhilipp Reisner 	 * If all IO is done already (or nothing had been submitted), there is
11339e58c4daSPhilipp Reisner 	 * no need to wait.  Still, we need to put the kref associated with the
11349e58c4daSPhilipp Reisner 	 * "in_flight reached zero, all done" event.
1135725a97e4SLars Ellenberg 	 */
1136d1f3779bSPhilipp Reisner 	if (!atomic_dec_and_test(&ctx->in_flight))
1137b30ab791SAndreas Gruenbacher 		wait_until_done_or_force_detached(device, device->ldev, &ctx->done);
11389e58c4daSPhilipp Reisner 	else
11399e58c4daSPhilipp Reisner 		kref_put(&ctx->kref, &bm_aio_ctx_destroy);
1140d1f3779bSPhilipp Reisner 
1141c9d963a4SLars Ellenberg 	/* summary for global bitmap IO */
1142c9d963a4SLars Ellenberg 	if (flags == 0)
1143d0180171SAndreas Gruenbacher 		drbd_info(device, "bitmap %s of %u pages took %lu jiffies\n",
114419f843aaSLars Ellenberg 			 rw == WRITE ? "WRITE" : "READ",
114519f843aaSLars Ellenberg 			 count, jiffies - now);
1146b411b363SPhilipp Reisner 
1147d1f3779bSPhilipp Reisner 	if (ctx->error) {
1148d0180171SAndreas Gruenbacher 		drbd_alert(device, "we had at least one MD IO ERROR during bitmap IO\n");
1149b30ab791SAndreas Gruenbacher 		drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR);
1150d1f3779bSPhilipp Reisner 		err = -EIO; /* ctx->error ? */
1151b411b363SPhilipp Reisner 	}
1152b411b363SPhilipp Reisner 
11539e58c4daSPhilipp Reisner 	if (atomic_read(&ctx->in_flight))
115444edfb0dSLars Ellenberg 		err = -EIO; /* Disk timeout/force-detach during IO... */
11559e58c4daSPhilipp Reisner 
1156b411b363SPhilipp Reisner 	now = jiffies;
1157b411b363SPhilipp Reisner 	if (rw == WRITE) {
1158b30ab791SAndreas Gruenbacher 		drbd_md_flush(device);
1159b411b363SPhilipp Reisner 	} else /* rw == READ */ {
116095a0f10cSLars Ellenberg 		b->bm_set = bm_count_bits(b);
1161d0180171SAndreas Gruenbacher 		drbd_info(device, "recounting of set bits took additional %lu jiffies\n",
1162b411b363SPhilipp Reisner 		     jiffies - now);
1163b411b363SPhilipp Reisner 	}
1164b411b363SPhilipp Reisner 	now = b->bm_set;
1165b411b363SPhilipp Reisner 
1166c9d963a4SLars Ellenberg 	if (flags == 0)
1167d0180171SAndreas Gruenbacher 		drbd_info(device, "%s (%lu bits) marked out-of-sync by on disk bit-map.\n",
1168b411b363SPhilipp Reisner 		     ppsize(ppb, now << (BM_BLOCK_SHIFT-10)), now);
1169b411b363SPhilipp Reisner 
1170d1f3779bSPhilipp Reisner 	kref_put(&ctx->kref, &bm_aio_ctx_destroy);
1171b411b363SPhilipp Reisner 	return err;
1172b411b363SPhilipp Reisner }
1173b411b363SPhilipp Reisner 
1174b411b363SPhilipp Reisner /**
1175b411b363SPhilipp Reisner  * drbd_bm_read() - Read the whole bitmap from its on disk location.
1176b30ab791SAndreas Gruenbacher  * @device:	DRBD device.
1177b411b363SPhilipp Reisner  */
1178b30ab791SAndreas Gruenbacher int drbd_bm_read(struct drbd_device *device) __must_hold(local)
1179b411b363SPhilipp Reisner {
1180b30ab791SAndreas Gruenbacher 	return bm_rw(device, READ, 0, 0);
1181b411b363SPhilipp Reisner }
1182b411b363SPhilipp Reisner 
1183b411b363SPhilipp Reisner /**
1184b411b363SPhilipp Reisner  * drbd_bm_write() - Write the whole bitmap to its on disk location.
1185b30ab791SAndreas Gruenbacher  * @device:	DRBD device.
118619f843aaSLars Ellenberg  *
118719f843aaSLars Ellenberg  * Will only write pages that have changed since last IO.
1188b411b363SPhilipp Reisner  */
1189b30ab791SAndreas Gruenbacher int drbd_bm_write(struct drbd_device *device) __must_hold(local)
1190b411b363SPhilipp Reisner {
1191b30ab791SAndreas Gruenbacher 	return bm_rw(device, WRITE, 0, 0);
1192b411b363SPhilipp Reisner }
1193b411b363SPhilipp Reisner 
1194b411b363SPhilipp Reisner /**
1195d1aa4d04SPhilipp Reisner  * drbd_bm_write_all() - Write the whole bitmap to its on disk location.
1196b30ab791SAndreas Gruenbacher  * @device:	DRBD device.
1197d1aa4d04SPhilipp Reisner  *
1198d1aa4d04SPhilipp Reisner  * Will write all pages.
1199d1aa4d04SPhilipp Reisner  */
1200b30ab791SAndreas Gruenbacher int drbd_bm_write_all(struct drbd_device *device) __must_hold(local)
1201d1aa4d04SPhilipp Reisner {
1202b30ab791SAndreas Gruenbacher 	return bm_rw(device, WRITE, BM_WRITE_ALL_PAGES, 0);
1203d1aa4d04SPhilipp Reisner }
1204d1aa4d04SPhilipp Reisner 
1205d1aa4d04SPhilipp Reisner /**
12060e8488adSLars Ellenberg  * drbd_bm_write_copy_pages() - Write the whole bitmap to its on disk location.
1207b30ab791SAndreas Gruenbacher  * @device:	DRBD device.
12080e8488adSLars Ellenberg  *
12090e8488adSLars Ellenberg  * Will only write pages that have changed since last IO.
12100e8488adSLars Ellenberg  * In contrast to drbd_bm_write(), this will copy the bitmap pages
12110e8488adSLars Ellenberg  * to temporary writeout pages. It is intended to trigger a full write-out
12120e8488adSLars Ellenberg  * while still allowing the bitmap to change, for example if a resync or online
12130e8488adSLars Ellenberg  * verify is aborted due to a failed peer disk, while local IO continues, or
12140e8488adSLars Ellenberg  * pending resync acks are still being processed.
12150e8488adSLars Ellenberg  */
1216b30ab791SAndreas Gruenbacher int drbd_bm_write_copy_pages(struct drbd_device *device) __must_hold(local)
12170e8488adSLars Ellenberg {
1218b30ab791SAndreas Gruenbacher 	return bm_rw(device, WRITE, BM_AIO_COPY_PAGES, 0);
1219b411b363SPhilipp Reisner }
122019f843aaSLars Ellenberg 
1221a220d291SLars Ellenberg /**
122245dfffebSLars Ellenberg  * drbd_bm_write_hinted() - Write bitmap pages with "hint" marks, if they have changed.
1223b30ab791SAndreas Gruenbacher  * @device:	DRBD device.
122445dfffebSLars Ellenberg  */
1225b30ab791SAndreas Gruenbacher int drbd_bm_write_hinted(struct drbd_device *device) __must_hold(local)
122645dfffebSLars Ellenberg {
1227b30ab791SAndreas Gruenbacher 	return bm_rw(device, WRITE, BM_AIO_WRITE_HINTED | BM_AIO_COPY_PAGES, 0);
122845dfffebSLars Ellenberg }
122919f843aaSLars Ellenberg 
123019f843aaSLars Ellenberg /**
123145dfffebSLars Ellenberg  * drbd_bm_write_page() - Writes a PAGE_SIZE aligned piece of bitmap
1232b30ab791SAndreas Gruenbacher  * @device:	DRBD device.
123319f843aaSLars Ellenberg  * @idx:	bitmap page index
123419f843aaSLars Ellenberg  *
12354b0715f0SLars Ellenberg  * We don't want to special case on logical_block_size of the backend device,
12364b0715f0SLars Ellenberg  * so we submit PAGE_SIZE aligned pieces.
123719f843aaSLars Ellenberg  * Note that on "most" systems, PAGE_SIZE is 4k.
12384b0715f0SLars Ellenberg  *
12394b0715f0SLars Ellenberg  * In case this becomes an issue on systems with larger PAGE_SIZE,
12404b0715f0SLars Ellenberg  * we may want to change this again to write 4k aligned 4k pieces.
124119f843aaSLars Ellenberg  */
1242b30ab791SAndreas Gruenbacher int drbd_bm_write_page(struct drbd_device *device, unsigned int idx) __must_hold(local)
124319f843aaSLars Ellenberg {
1244d1f3779bSPhilipp Reisner 	struct bm_aio_ctx *ctx;
1245d1f3779bSPhilipp Reisner 	int err;
124619f843aaSLars Ellenberg 
1247b30ab791SAndreas Gruenbacher 	if (bm_test_page_unchanged(device->bitmap->bm_pages[idx])) {
1248d0180171SAndreas Gruenbacher 		dynamic_drbd_dbg(device, "skipped bm page write for idx %u\n", idx);
124919f843aaSLars Ellenberg 		return 0;
125019f843aaSLars Ellenberg 	}
125119f843aaSLars Ellenberg 
125222f46ce2SLars Ellenberg 	ctx = kmalloc(sizeof(struct bm_aio_ctx), GFP_NOIO);
1253d1f3779bSPhilipp Reisner 	if (!ctx)
1254d1f3779bSPhilipp Reisner 		return -ENOMEM;
125519f843aaSLars Ellenberg 
1256d1f3779bSPhilipp Reisner 	*ctx = (struct bm_aio_ctx) {
1257b30ab791SAndreas Gruenbacher 		.device = device,
1258d1f3779bSPhilipp Reisner 		.in_flight = ATOMIC_INIT(1),
12599e58c4daSPhilipp Reisner 		.done = 0,
1260d1f3779bSPhilipp Reisner 		.flags = BM_AIO_COPY_PAGES,
1261d1f3779bSPhilipp Reisner 		.error = 0,
1262d1f3779bSPhilipp Reisner 		.kref = { ATOMIC_INIT(2) },
1263d1f3779bSPhilipp Reisner 	};
1264d1f3779bSPhilipp Reisner 
1265*8fe39aacSPhilipp Reisner 	if (!get_ldev(device)) {  /* put is in bm_aio_ctx_destroy() */
1266d0180171SAndreas Gruenbacher 		drbd_err(device, "ASSERT FAILED: get_ldev_if_state() == 1 in drbd_bm_write_page()\n");
12679e58c4daSPhilipp Reisner 		kfree(ctx);
12689e58c4daSPhilipp Reisner 		return -ENODEV;
12699e58c4daSPhilipp Reisner 	}
12709e58c4daSPhilipp Reisner 
1271d1f3779bSPhilipp Reisner 	bm_page_io_async(ctx, idx, WRITE_SYNC);
1272b30ab791SAndreas Gruenbacher 	wait_until_done_or_force_detached(device, device->ldev, &ctx->done);
1273d1f3779bSPhilipp Reisner 
1274d1f3779bSPhilipp Reisner 	if (ctx->error)
1275b30ab791SAndreas Gruenbacher 		drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR);
127644edfb0dSLars Ellenberg 		/* that causes us to detach, so the in memory bitmap will be
127719f843aaSLars Ellenberg 		 * gone in a moment as well. */
127819f843aaSLars Ellenberg 
1279b30ab791SAndreas Gruenbacher 	device->bm_writ_cnt++;
12809e58c4daSPhilipp Reisner 	err = atomic_read(&ctx->in_flight) ? -EIO : ctx->error;
1281d1f3779bSPhilipp Reisner 	kref_put(&ctx->kref, &bm_aio_ctx_destroy);
1282d1f3779bSPhilipp Reisner 	return err;
1283b411b363SPhilipp Reisner }
1284b411b363SPhilipp Reisner 
1285b411b363SPhilipp Reisner /* NOTE
1286b411b363SPhilipp Reisner  * find_first_bit returns int, we return unsigned long.
12874b0715f0SLars Ellenberg  * For this to work on 32bit arch with bitnumbers > (1<<32),
12884b0715f0SLars Ellenberg  * we'd need to return u64, and get a whole lot of other places
12894b0715f0SLars Ellenberg  * fixed where we still use unsigned long.
1290b411b363SPhilipp Reisner  *
1291b411b363SPhilipp Reisner  * this returns a bit number, NOT a sector!
1292b411b363SPhilipp Reisner  */
1293b30ab791SAndreas Gruenbacher static unsigned long __bm_find_next(struct drbd_device *device, unsigned long bm_fo,
1294cfd8005cSCong Wang 	const int find_zero_bit)
1295b411b363SPhilipp Reisner {
1296b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
1297b411b363SPhilipp Reisner 	unsigned long *p_addr;
12984b0715f0SLars Ellenberg 	unsigned long bit_offset;
12994b0715f0SLars Ellenberg 	unsigned i;
13004b0715f0SLars Ellenberg 
1301b411b363SPhilipp Reisner 
1302b411b363SPhilipp Reisner 	if (bm_fo > b->bm_bits) {
1303d0180171SAndreas Gruenbacher 		drbd_err(device, "bm_fo=%lu bm_bits=%lu\n", bm_fo, b->bm_bits);
13044b0715f0SLars Ellenberg 		bm_fo = DRBD_END_OF_BITMAP;
1305b411b363SPhilipp Reisner 	} else {
1306b411b363SPhilipp Reisner 		while (bm_fo < b->bm_bits) {
130719f843aaSLars Ellenberg 			/* bit offset of the first bit in the page */
13084b0715f0SLars Ellenberg 			bit_offset = bm_fo & ~BITS_PER_PAGE_MASK;
1309cfd8005cSCong Wang 			p_addr = __bm_map_pidx(b, bm_bit_to_page_idx(b, bm_fo));
1310b411b363SPhilipp Reisner 
1311b411b363SPhilipp Reisner 			if (find_zero_bit)
13127e599e6eSLinus Torvalds 				i = find_next_zero_bit_le(p_addr,
13134b0715f0SLars Ellenberg 						PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK);
1314b411b363SPhilipp Reisner 			else
13157e599e6eSLinus Torvalds 				i = find_next_bit_le(p_addr,
13164b0715f0SLars Ellenberg 						PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK);
1317b411b363SPhilipp Reisner 
1318cfd8005cSCong Wang 			__bm_unmap(p_addr);
1319b411b363SPhilipp Reisner 			if (i < PAGE_SIZE*8) {
13204b0715f0SLars Ellenberg 				bm_fo = bit_offset + i;
13214b0715f0SLars Ellenberg 				if (bm_fo >= b->bm_bits)
1322b411b363SPhilipp Reisner 					break;
1323b411b363SPhilipp Reisner 				goto found;
1324b411b363SPhilipp Reisner 			}
1325b411b363SPhilipp Reisner 			bm_fo = bit_offset + PAGE_SIZE*8;
1326b411b363SPhilipp Reisner 		}
13274b0715f0SLars Ellenberg 		bm_fo = DRBD_END_OF_BITMAP;
1328b411b363SPhilipp Reisner 	}
1329b411b363SPhilipp Reisner  found:
13304b0715f0SLars Ellenberg 	return bm_fo;
1331b411b363SPhilipp Reisner }
1332b411b363SPhilipp Reisner 
1333b30ab791SAndreas Gruenbacher static unsigned long bm_find_next(struct drbd_device *device,
1334b411b363SPhilipp Reisner 	unsigned long bm_fo, const int find_zero_bit)
1335b411b363SPhilipp Reisner {
1336b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
13374b0715f0SLars Ellenberg 	unsigned long i = DRBD_END_OF_BITMAP;
1338b411b363SPhilipp Reisner 
1339841ce241SAndreas Gruenbacher 	if (!expect(b))
1340841ce241SAndreas Gruenbacher 		return i;
1341841ce241SAndreas Gruenbacher 	if (!expect(b->bm_pages))
1342841ce241SAndreas Gruenbacher 		return i;
1343b411b363SPhilipp Reisner 
1344b411b363SPhilipp Reisner 	spin_lock_irq(&b->bm_lock);
134520ceb2b2SLars Ellenberg 	if (BM_DONT_TEST & b->bm_flags)
1346b30ab791SAndreas Gruenbacher 		bm_print_lock_info(device);
1347b411b363SPhilipp Reisner 
1348b30ab791SAndreas Gruenbacher 	i = __bm_find_next(device, bm_fo, find_zero_bit);
1349b411b363SPhilipp Reisner 
1350b411b363SPhilipp Reisner 	spin_unlock_irq(&b->bm_lock);
1351b411b363SPhilipp Reisner 	return i;
1352b411b363SPhilipp Reisner }
1353b411b363SPhilipp Reisner 
1354b30ab791SAndreas Gruenbacher unsigned long drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo)
1355b411b363SPhilipp Reisner {
1356b30ab791SAndreas Gruenbacher 	return bm_find_next(device, bm_fo, 0);
1357b411b363SPhilipp Reisner }
1358b411b363SPhilipp Reisner 
1359b411b363SPhilipp Reisner #if 0
1360b411b363SPhilipp Reisner /* not yet needed for anything. */
1361b30ab791SAndreas Gruenbacher unsigned long drbd_bm_find_next_zero(struct drbd_device *device, unsigned long bm_fo)
1362b411b363SPhilipp Reisner {
1363b30ab791SAndreas Gruenbacher 	return bm_find_next(device, bm_fo, 1);
1364b411b363SPhilipp Reisner }
1365b411b363SPhilipp Reisner #endif
1366b411b363SPhilipp Reisner 
1367b411b363SPhilipp Reisner /* does not spin_lock_irqsave.
1368b411b363SPhilipp Reisner  * you must take drbd_bm_lock() first */
1369b30ab791SAndreas Gruenbacher unsigned long _drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo)
1370b411b363SPhilipp Reisner {
1371b30ab791SAndreas Gruenbacher 	/* WARN_ON(!(BM_DONT_SET & device->b->bm_flags)); */
1372b30ab791SAndreas Gruenbacher 	return __bm_find_next(device, bm_fo, 0);
1373b411b363SPhilipp Reisner }
1374b411b363SPhilipp Reisner 
1375b30ab791SAndreas Gruenbacher unsigned long _drbd_bm_find_next_zero(struct drbd_device *device, unsigned long bm_fo)
1376b411b363SPhilipp Reisner {
1377b30ab791SAndreas Gruenbacher 	/* WARN_ON(!(BM_DONT_SET & device->b->bm_flags)); */
1378b30ab791SAndreas Gruenbacher 	return __bm_find_next(device, bm_fo, 1);
1379b411b363SPhilipp Reisner }
1380b411b363SPhilipp Reisner 
1381b411b363SPhilipp Reisner /* returns number of bits actually changed.
1382b411b363SPhilipp Reisner  * for val != 0, we change 0 -> 1, return code positive
1383b411b363SPhilipp Reisner  * for val == 0, we change 1 -> 0, return code negative
1384b411b363SPhilipp Reisner  * wants bitnr, not sector.
1385b411b363SPhilipp Reisner  * expected to be called for only a few bits (e - s about BITS_PER_LONG).
1386b411b363SPhilipp Reisner  * Must hold bitmap lock already. */
1387b30ab791SAndreas Gruenbacher static int __bm_change_bits_to(struct drbd_device *device, const unsigned long s,
1388829c6087SLars Ellenberg 	unsigned long e, int val)
1389b411b363SPhilipp Reisner {
1390b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
1391b411b363SPhilipp Reisner 	unsigned long *p_addr = NULL;
1392b411b363SPhilipp Reisner 	unsigned long bitnr;
139319f843aaSLars Ellenberg 	unsigned int last_page_nr = -1U;
1394b411b363SPhilipp Reisner 	int c = 0;
139519f843aaSLars Ellenberg 	int changed_total = 0;
1396b411b363SPhilipp Reisner 
1397b411b363SPhilipp Reisner 	if (e >= b->bm_bits) {
1398d0180171SAndreas Gruenbacher 		drbd_err(device, "ASSERT FAILED: bit_s=%lu bit_e=%lu bm_bits=%lu\n",
1399b411b363SPhilipp Reisner 				s, e, b->bm_bits);
1400b411b363SPhilipp Reisner 		e = b->bm_bits ? b->bm_bits -1 : 0;
1401b411b363SPhilipp Reisner 	}
1402b411b363SPhilipp Reisner 	for (bitnr = s; bitnr <= e; bitnr++) {
140319f843aaSLars Ellenberg 		unsigned int page_nr = bm_bit_to_page_idx(b, bitnr);
1404b411b363SPhilipp Reisner 		if (page_nr != last_page_nr) {
1405b411b363SPhilipp Reisner 			if (p_addr)
1406cfd8005cSCong Wang 				__bm_unmap(p_addr);
140719f843aaSLars Ellenberg 			if (c < 0)
140819f843aaSLars Ellenberg 				bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]);
140919f843aaSLars Ellenberg 			else if (c > 0)
141019f843aaSLars Ellenberg 				bm_set_page_need_writeout(b->bm_pages[last_page_nr]);
141119f843aaSLars Ellenberg 			changed_total += c;
141219f843aaSLars Ellenberg 			c = 0;
1413cfd8005cSCong Wang 			p_addr = __bm_map_pidx(b, page_nr);
1414b411b363SPhilipp Reisner 			last_page_nr = page_nr;
1415b411b363SPhilipp Reisner 		}
1416b411b363SPhilipp Reisner 		if (val)
14177e599e6eSLinus Torvalds 			c += (0 == __test_and_set_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr));
1418b411b363SPhilipp Reisner 		else
14197e599e6eSLinus Torvalds 			c -= (0 != __test_and_clear_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr));
1420b411b363SPhilipp Reisner 	}
1421b411b363SPhilipp Reisner 	if (p_addr)
1422cfd8005cSCong Wang 		__bm_unmap(p_addr);
142319f843aaSLars Ellenberg 	if (c < 0)
142419f843aaSLars Ellenberg 		bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]);
142519f843aaSLars Ellenberg 	else if (c > 0)
142619f843aaSLars Ellenberg 		bm_set_page_need_writeout(b->bm_pages[last_page_nr]);
142719f843aaSLars Ellenberg 	changed_total += c;
142819f843aaSLars Ellenberg 	b->bm_set += changed_total;
142919f843aaSLars Ellenberg 	return changed_total;
1430b411b363SPhilipp Reisner }
1431b411b363SPhilipp Reisner 
1432b411b363SPhilipp Reisner /* returns number of bits actually changed.
1433b411b363SPhilipp Reisner  * for val != 0, we change 0 -> 1, return code positive
1434b411b363SPhilipp Reisner  * for val == 0, we change 1 -> 0, return code negative
1435b411b363SPhilipp Reisner  * wants bitnr, not sector */
1436b30ab791SAndreas Gruenbacher static int bm_change_bits_to(struct drbd_device *device, const unsigned long s,
1437b411b363SPhilipp Reisner 	const unsigned long e, int val)
1438b411b363SPhilipp Reisner {
1439b411b363SPhilipp Reisner 	unsigned long flags;
1440b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
1441b411b363SPhilipp Reisner 	int c = 0;
1442b411b363SPhilipp Reisner 
1443841ce241SAndreas Gruenbacher 	if (!expect(b))
1444841ce241SAndreas Gruenbacher 		return 1;
1445841ce241SAndreas Gruenbacher 	if (!expect(b->bm_pages))
1446841ce241SAndreas Gruenbacher 		return 0;
1447b411b363SPhilipp Reisner 
1448b411b363SPhilipp Reisner 	spin_lock_irqsave(&b->bm_lock, flags);
144920ceb2b2SLars Ellenberg 	if ((val ? BM_DONT_SET : BM_DONT_CLEAR) & b->bm_flags)
1450b30ab791SAndreas Gruenbacher 		bm_print_lock_info(device);
1451b411b363SPhilipp Reisner 
1452b30ab791SAndreas Gruenbacher 	c = __bm_change_bits_to(device, s, e, val);
1453b411b363SPhilipp Reisner 
1454b411b363SPhilipp Reisner 	spin_unlock_irqrestore(&b->bm_lock, flags);
1455b411b363SPhilipp Reisner 	return c;
1456b411b363SPhilipp Reisner }
1457b411b363SPhilipp Reisner 
1458b411b363SPhilipp Reisner /* returns number of bits changed 0 -> 1 */
1459b30ab791SAndreas Gruenbacher int drbd_bm_set_bits(struct drbd_device *device, const unsigned long s, const unsigned long e)
1460b411b363SPhilipp Reisner {
1461b30ab791SAndreas Gruenbacher 	return bm_change_bits_to(device, s, e, 1);
1462b411b363SPhilipp Reisner }
1463b411b363SPhilipp Reisner 
1464b411b363SPhilipp Reisner /* returns number of bits changed 1 -> 0 */
1465b30ab791SAndreas Gruenbacher int drbd_bm_clear_bits(struct drbd_device *device, const unsigned long s, const unsigned long e)
1466b411b363SPhilipp Reisner {
1467b30ab791SAndreas Gruenbacher 	return -bm_change_bits_to(device, s, e, 0);
1468b411b363SPhilipp Reisner }
1469b411b363SPhilipp Reisner 
1470b411b363SPhilipp Reisner /* sets all bits in full words,
1471b411b363SPhilipp Reisner  * from first_word up to, but not including, last_word */
1472b411b363SPhilipp Reisner static inline void bm_set_full_words_within_one_page(struct drbd_bitmap *b,
1473b411b363SPhilipp Reisner 		int page_nr, int first_word, int last_word)
1474b411b363SPhilipp Reisner {
1475b411b363SPhilipp Reisner 	int i;
1476b411b363SPhilipp Reisner 	int bits;
147722d81140SLars Ellenberg 	int changed = 0;
1478cfd8005cSCong Wang 	unsigned long *paddr = kmap_atomic(b->bm_pages[page_nr]);
1479b411b363SPhilipp Reisner 	for (i = first_word; i < last_word; i++) {
1480b411b363SPhilipp Reisner 		bits = hweight_long(paddr[i]);
1481b411b363SPhilipp Reisner 		paddr[i] = ~0UL;
148222d81140SLars Ellenberg 		changed += BITS_PER_LONG - bits;
1483b411b363SPhilipp Reisner 	}
1484cfd8005cSCong Wang 	kunmap_atomic(paddr);
148522d81140SLars Ellenberg 	if (changed) {
148622d81140SLars Ellenberg 		/* We only need lazy writeout, the information is still in the
148722d81140SLars Ellenberg 		 * remote bitmap as well, and is reconstructed during the next
148822d81140SLars Ellenberg 		 * bitmap exchange, if lost locally due to a crash. */
148922d81140SLars Ellenberg 		bm_set_page_lazy_writeout(b->bm_pages[page_nr]);
149022d81140SLars Ellenberg 		b->bm_set += changed;
149122d81140SLars Ellenberg 	}
1492b411b363SPhilipp Reisner }
1493b411b363SPhilipp Reisner 
1494829c6087SLars Ellenberg /* Same thing as drbd_bm_set_bits,
1495829c6087SLars Ellenberg  * but more efficient for a large bit range.
1496b411b363SPhilipp Reisner  * You must first drbd_bm_lock().
1497b411b363SPhilipp Reisner  * Can be called to set the whole bitmap in one go.
1498b411b363SPhilipp Reisner  * Sets bits from s to e _inclusive_. */
1499b30ab791SAndreas Gruenbacher void _drbd_bm_set_bits(struct drbd_device *device, const unsigned long s, const unsigned long e)
1500b411b363SPhilipp Reisner {
1501b411b363SPhilipp Reisner 	/* First set_bit from the first bit (s)
1502b411b363SPhilipp Reisner 	 * up to the next long boundary (sl),
1503b411b363SPhilipp Reisner 	 * then assign full words up to the last long boundary (el),
1504b411b363SPhilipp Reisner 	 * then set_bit up to and including the last bit (e).
1505b411b363SPhilipp Reisner 	 *
1506b411b363SPhilipp Reisner 	 * Do not use memset, because we must account for changes,
1507b411b363SPhilipp Reisner 	 * so we need to loop over the words with hweight() anyways.
1508b411b363SPhilipp Reisner 	 */
1509b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
1510b411b363SPhilipp Reisner 	unsigned long sl = ALIGN(s,BITS_PER_LONG);
1511b411b363SPhilipp Reisner 	unsigned long el = (e+1) & ~((unsigned long)BITS_PER_LONG-1);
1512b411b363SPhilipp Reisner 	int first_page;
1513b411b363SPhilipp Reisner 	int last_page;
1514b411b363SPhilipp Reisner 	int page_nr;
1515b411b363SPhilipp Reisner 	int first_word;
1516b411b363SPhilipp Reisner 	int last_word;
1517b411b363SPhilipp Reisner 
1518b411b363SPhilipp Reisner 	if (e - s <= 3*BITS_PER_LONG) {
1519b411b363SPhilipp Reisner 		/* don't bother; el and sl may even be wrong. */
1520829c6087SLars Ellenberg 		spin_lock_irq(&b->bm_lock);
1521b30ab791SAndreas Gruenbacher 		__bm_change_bits_to(device, s, e, 1);
1522829c6087SLars Ellenberg 		spin_unlock_irq(&b->bm_lock);
1523b411b363SPhilipp Reisner 		return;
1524b411b363SPhilipp Reisner 	}
1525b411b363SPhilipp Reisner 
1526b411b363SPhilipp Reisner 	/* difference is large enough that we can trust sl and el */
1527b411b363SPhilipp Reisner 
1528829c6087SLars Ellenberg 	spin_lock_irq(&b->bm_lock);
1529829c6087SLars Ellenberg 
1530b411b363SPhilipp Reisner 	/* bits filling the current long */
1531b411b363SPhilipp Reisner 	if (sl)
1532b30ab791SAndreas Gruenbacher 		__bm_change_bits_to(device, s, sl-1, 1);
1533b411b363SPhilipp Reisner 
1534b411b363SPhilipp Reisner 	first_page = sl >> (3 + PAGE_SHIFT);
1535b411b363SPhilipp Reisner 	last_page = el >> (3 + PAGE_SHIFT);
1536b411b363SPhilipp Reisner 
1537b411b363SPhilipp Reisner 	/* MLPP: modulo longs per page */
1538b411b363SPhilipp Reisner 	/* LWPP: long words per page */
1539b411b363SPhilipp Reisner 	first_word = MLPP(sl >> LN2_BPL);
1540b411b363SPhilipp Reisner 	last_word = LWPP;
1541b411b363SPhilipp Reisner 
1542b411b363SPhilipp Reisner 	/* first and full pages, unless first page == last page */
1543b411b363SPhilipp Reisner 	for (page_nr = first_page; page_nr < last_page; page_nr++) {
1544b30ab791SAndreas Gruenbacher 		bm_set_full_words_within_one_page(device->bitmap, page_nr, first_word, last_word);
15458ccee20eSLars Ellenberg 		spin_unlock_irq(&b->bm_lock);
15468ccee20eSLars Ellenberg 		cond_resched();
1547b411b363SPhilipp Reisner 		first_word = 0;
15488ccee20eSLars Ellenberg 		spin_lock_irq(&b->bm_lock);
1549b411b363SPhilipp Reisner 	}
1550b411b363SPhilipp Reisner 	/* last page (respectively only page, for first page == last page) */
1551b411b363SPhilipp Reisner 	last_word = MLPP(el >> LN2_BPL);
15524eccc579SLars Ellenberg 
15534eccc579SLars Ellenberg 	/* consider bitmap->bm_bits = 32768, bitmap->bm_number_of_pages = 1. (or multiples).
15544eccc579SLars Ellenberg 	 * ==> e = 32767, el = 32768, last_page = 2,
15554eccc579SLars Ellenberg 	 * and now last_word = 0.
15564eccc579SLars Ellenberg 	 * We do not want to touch last_page in this case,
15574eccc579SLars Ellenberg 	 * as we did not allocate it, it is not present in bitmap->bm_pages.
15584eccc579SLars Ellenberg 	 */
15594eccc579SLars Ellenberg 	if (last_word)
1560b30ab791SAndreas Gruenbacher 		bm_set_full_words_within_one_page(device->bitmap, last_page, first_word, last_word);
1561b411b363SPhilipp Reisner 
1562b411b363SPhilipp Reisner 	/* possibly trailing bits.
1563b411b363SPhilipp Reisner 	 * example: (e & 63) == 63, el will be e+1.
1564b411b363SPhilipp Reisner 	 * if that even was the very last bit,
1565b411b363SPhilipp Reisner 	 * it would trigger an assert in __bm_change_bits_to()
1566b411b363SPhilipp Reisner 	 */
1567b411b363SPhilipp Reisner 	if (el <= e)
1568b30ab791SAndreas Gruenbacher 		__bm_change_bits_to(device, el, e, 1);
1569829c6087SLars Ellenberg 	spin_unlock_irq(&b->bm_lock);
1570b411b363SPhilipp Reisner }
1571b411b363SPhilipp Reisner 
1572b411b363SPhilipp Reisner /* returns bit state
1573b411b363SPhilipp Reisner  * wants bitnr, NOT sector.
1574b411b363SPhilipp Reisner  * inherently racy... area needs to be locked by means of {al,rs}_lru
1575b411b363SPhilipp Reisner  *  1 ... bit set
1576b411b363SPhilipp Reisner  *  0 ... bit not set
1577b411b363SPhilipp Reisner  * -1 ... first out of bounds access, stop testing for bits!
1578b411b363SPhilipp Reisner  */
1579b30ab791SAndreas Gruenbacher int drbd_bm_test_bit(struct drbd_device *device, const unsigned long bitnr)
1580b411b363SPhilipp Reisner {
1581b411b363SPhilipp Reisner 	unsigned long flags;
1582b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
1583b411b363SPhilipp Reisner 	unsigned long *p_addr;
1584b411b363SPhilipp Reisner 	int i;
1585b411b363SPhilipp Reisner 
1586841ce241SAndreas Gruenbacher 	if (!expect(b))
1587841ce241SAndreas Gruenbacher 		return 0;
1588841ce241SAndreas Gruenbacher 	if (!expect(b->bm_pages))
1589841ce241SAndreas Gruenbacher 		return 0;
1590b411b363SPhilipp Reisner 
1591b411b363SPhilipp Reisner 	spin_lock_irqsave(&b->bm_lock, flags);
159220ceb2b2SLars Ellenberg 	if (BM_DONT_TEST & b->bm_flags)
1593b30ab791SAndreas Gruenbacher 		bm_print_lock_info(device);
1594b411b363SPhilipp Reisner 	if (bitnr < b->bm_bits) {
159519f843aaSLars Ellenberg 		p_addr = bm_map_pidx(b, bm_bit_to_page_idx(b, bitnr));
15967e599e6eSLinus Torvalds 		i = test_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr) ? 1 : 0;
1597b411b363SPhilipp Reisner 		bm_unmap(p_addr);
1598b411b363SPhilipp Reisner 	} else if (bitnr == b->bm_bits) {
1599b411b363SPhilipp Reisner 		i = -1;
1600b411b363SPhilipp Reisner 	} else { /* (bitnr > b->bm_bits) */
1601d0180171SAndreas Gruenbacher 		drbd_err(device, "bitnr=%lu > bm_bits=%lu\n", bitnr, b->bm_bits);
1602b411b363SPhilipp Reisner 		i = 0;
1603b411b363SPhilipp Reisner 	}
1604b411b363SPhilipp Reisner 
1605b411b363SPhilipp Reisner 	spin_unlock_irqrestore(&b->bm_lock, flags);
1606b411b363SPhilipp Reisner 	return i;
1607b411b363SPhilipp Reisner }
1608b411b363SPhilipp Reisner 
1609b411b363SPhilipp Reisner /* returns number of bits set in the range [s, e] */
1610b30ab791SAndreas Gruenbacher int drbd_bm_count_bits(struct drbd_device *device, const unsigned long s, const unsigned long e)
1611b411b363SPhilipp Reisner {
1612b411b363SPhilipp Reisner 	unsigned long flags;
1613b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
161419f843aaSLars Ellenberg 	unsigned long *p_addr = NULL;
1615b411b363SPhilipp Reisner 	unsigned long bitnr;
161619f843aaSLars Ellenberg 	unsigned int page_nr = -1U;
1617b411b363SPhilipp Reisner 	int c = 0;
1618b411b363SPhilipp Reisner 
1619b411b363SPhilipp Reisner 	/* If this is called without a bitmap, that is a bug.  But just to be
1620b411b363SPhilipp Reisner 	 * robust in case we screwed up elsewhere, in that case pretend there
1621b411b363SPhilipp Reisner 	 * was one dirty bit in the requested area, so we won't try to do a
1622b411b363SPhilipp Reisner 	 * local read there (no bitmap probably implies no disk) */
1623841ce241SAndreas Gruenbacher 	if (!expect(b))
1624841ce241SAndreas Gruenbacher 		return 1;
1625841ce241SAndreas Gruenbacher 	if (!expect(b->bm_pages))
1626841ce241SAndreas Gruenbacher 		return 1;
1627b411b363SPhilipp Reisner 
1628b411b363SPhilipp Reisner 	spin_lock_irqsave(&b->bm_lock, flags);
162920ceb2b2SLars Ellenberg 	if (BM_DONT_TEST & b->bm_flags)
1630b30ab791SAndreas Gruenbacher 		bm_print_lock_info(device);
1631b411b363SPhilipp Reisner 	for (bitnr = s; bitnr <= e; bitnr++) {
163219f843aaSLars Ellenberg 		unsigned int idx = bm_bit_to_page_idx(b, bitnr);
163319f843aaSLars Ellenberg 		if (page_nr != idx) {
163419f843aaSLars Ellenberg 			page_nr = idx;
1635b411b363SPhilipp Reisner 			if (p_addr)
1636b411b363SPhilipp Reisner 				bm_unmap(p_addr);
163719f843aaSLars Ellenberg 			p_addr = bm_map_pidx(b, idx);
1638b411b363SPhilipp Reisner 		}
1639841ce241SAndreas Gruenbacher 		if (expect(bitnr < b->bm_bits))
16407e599e6eSLinus Torvalds 			c += (0 != test_bit_le(bitnr - (page_nr << (PAGE_SHIFT+3)), p_addr));
1641841ce241SAndreas Gruenbacher 		else
1642d0180171SAndreas Gruenbacher 			drbd_err(device, "bitnr=%lu bm_bits=%lu\n", bitnr, b->bm_bits);
1643b411b363SPhilipp Reisner 	}
1644b411b363SPhilipp Reisner 	if (p_addr)
1645b411b363SPhilipp Reisner 		bm_unmap(p_addr);
1646b411b363SPhilipp Reisner 	spin_unlock_irqrestore(&b->bm_lock, flags);
1647b411b363SPhilipp Reisner 	return c;
1648b411b363SPhilipp Reisner }
1649b411b363SPhilipp Reisner 
1650b411b363SPhilipp Reisner 
1651b411b363SPhilipp Reisner /* inherently racy...
1652b411b363SPhilipp Reisner  * return value may be already out-of-date when this function returns.
1653b411b363SPhilipp Reisner  * but the general usage is that this is only use during a cstate when bits are
1654b411b363SPhilipp Reisner  * only cleared, not set, and typically only care for the case when the return
1655b411b363SPhilipp Reisner  * value is zero, or we already "locked" this "bitmap extent" by other means.
1656b411b363SPhilipp Reisner  *
1657b411b363SPhilipp Reisner  * enr is bm-extent number, since we chose to name one sector (512 bytes)
1658b411b363SPhilipp Reisner  * worth of the bitmap a "bitmap extent".
1659b411b363SPhilipp Reisner  *
1660b411b363SPhilipp Reisner  * TODO
1661b411b363SPhilipp Reisner  * I think since we use it like a reference count, we should use the real
1662b411b363SPhilipp Reisner  * reference count of some bitmap extent element from some lru instead...
1663b411b363SPhilipp Reisner  *
1664b411b363SPhilipp Reisner  */
1665b30ab791SAndreas Gruenbacher int drbd_bm_e_weight(struct drbd_device *device, unsigned long enr)
1666b411b363SPhilipp Reisner {
1667b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
1668b411b363SPhilipp Reisner 	int count, s, e;
1669b411b363SPhilipp Reisner 	unsigned long flags;
1670b411b363SPhilipp Reisner 	unsigned long *p_addr, *bm;
1671b411b363SPhilipp Reisner 
1672841ce241SAndreas Gruenbacher 	if (!expect(b))
1673841ce241SAndreas Gruenbacher 		return 0;
1674841ce241SAndreas Gruenbacher 	if (!expect(b->bm_pages))
1675841ce241SAndreas Gruenbacher 		return 0;
1676b411b363SPhilipp Reisner 
1677b411b363SPhilipp Reisner 	spin_lock_irqsave(&b->bm_lock, flags);
167820ceb2b2SLars Ellenberg 	if (BM_DONT_TEST & b->bm_flags)
1679b30ab791SAndreas Gruenbacher 		bm_print_lock_info(device);
1680b411b363SPhilipp Reisner 
1681b411b363SPhilipp Reisner 	s = S2W(enr);
1682b411b363SPhilipp Reisner 	e = min((size_t)S2W(enr+1), b->bm_words);
1683b411b363SPhilipp Reisner 	count = 0;
1684b411b363SPhilipp Reisner 	if (s < b->bm_words) {
1685b411b363SPhilipp Reisner 		int n = e-s;
168619f843aaSLars Ellenberg 		p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, s));
1687b411b363SPhilipp Reisner 		bm = p_addr + MLPP(s);
1688b411b363SPhilipp Reisner 		while (n--)
1689b411b363SPhilipp Reisner 			count += hweight_long(*bm++);
1690b411b363SPhilipp Reisner 		bm_unmap(p_addr);
1691b411b363SPhilipp Reisner 	} else {
1692d0180171SAndreas Gruenbacher 		drbd_err(device, "start offset (%d) too large in drbd_bm_e_weight\n", s);
1693b411b363SPhilipp Reisner 	}
1694b411b363SPhilipp Reisner 	spin_unlock_irqrestore(&b->bm_lock, flags);
1695b411b363SPhilipp Reisner 	return count;
1696b411b363SPhilipp Reisner }
1697