xref: /openbmc/linux/drivers/block/drbd/drbd_bitmap.c (revision 27ea1d876e16c0ca5ae6335fc85cf4f278f5c98c)
1b411b363SPhilipp Reisner /*
2b411b363SPhilipp Reisner    drbd_bitmap.c
3b411b363SPhilipp Reisner 
4b411b363SPhilipp Reisner    This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5b411b363SPhilipp Reisner 
6b411b363SPhilipp Reisner    Copyright (C) 2004-2008, LINBIT Information Technologies GmbH.
7b411b363SPhilipp Reisner    Copyright (C) 2004-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8b411b363SPhilipp Reisner    Copyright (C) 2004-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9b411b363SPhilipp Reisner 
10b411b363SPhilipp Reisner    drbd is free software; you can redistribute it and/or modify
11b411b363SPhilipp Reisner    it under the terms of the GNU General Public License as published by
12b411b363SPhilipp Reisner    the Free Software Foundation; either version 2, or (at your option)
13b411b363SPhilipp Reisner    any later version.
14b411b363SPhilipp Reisner 
15b411b363SPhilipp Reisner    drbd is distributed in the hope that it will be useful,
16b411b363SPhilipp Reisner    but WITHOUT ANY WARRANTY; without even the implied warranty of
17b411b363SPhilipp Reisner    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18b411b363SPhilipp Reisner    GNU General Public License for more details.
19b411b363SPhilipp Reisner 
20b411b363SPhilipp Reisner    You should have received a copy of the GNU General Public License
21b411b363SPhilipp Reisner    along with drbd; see the file COPYING.  If not, write to
22b411b363SPhilipp Reisner    the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23b411b363SPhilipp Reisner  */
24b411b363SPhilipp Reisner 
25f88c5d90SLars Ellenberg #define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
26f88c5d90SLars Ellenberg 
275fb3bc4dSLars Ellenberg #include <linux/bitmap.h>
28b411b363SPhilipp Reisner #include <linux/vmalloc.h>
29b411b363SPhilipp Reisner #include <linux/string.h>
30b411b363SPhilipp Reisner #include <linux/drbd.h>
315a0e3ad6STejun Heo #include <linux/slab.h>
32dbcbdc43SChristoph Hellwig #include <linux/highmem.h>
33f0ff1357SStephen Rothwell 
34b411b363SPhilipp Reisner #include "drbd_int.h"
35b411b363SPhilipp Reisner 
3695a0f10cSLars Ellenberg 
37b411b363SPhilipp Reisner /* OPAQUE outside this file!
38b411b363SPhilipp Reisner  * interface defined in drbd_int.h
39b411b363SPhilipp Reisner 
40b411b363SPhilipp Reisner  * convention:
41b411b363SPhilipp Reisner  * function name drbd_bm_... => used elsewhere, "public".
42b411b363SPhilipp Reisner  * function name      bm_... => internal to implementation, "private".
434b0715f0SLars Ellenberg  */
44b411b363SPhilipp Reisner 
454b0715f0SLars Ellenberg 
464b0715f0SLars Ellenberg /*
474b0715f0SLars Ellenberg  * LIMITATIONS:
484b0715f0SLars Ellenberg  * We want to support >= peta byte of backend storage, while for now still using
494b0715f0SLars Ellenberg  * a granularity of one bit per 4KiB of storage.
504b0715f0SLars Ellenberg  * 1 << 50		bytes backend storage (1 PiB)
514b0715f0SLars Ellenberg  * 1 << (50 - 12)	bits needed
524b0715f0SLars Ellenberg  *	38 --> we need u64 to index and count bits
534b0715f0SLars Ellenberg  * 1 << (38 - 3)	bitmap bytes needed
544b0715f0SLars Ellenberg  *	35 --> we still need u64 to index and count bytes
554b0715f0SLars Ellenberg  *			(that's 32 GiB of bitmap for 1 PiB storage)
564b0715f0SLars Ellenberg  * 1 << (35 - 2)	32bit longs needed
574b0715f0SLars Ellenberg  *	33 --> we'd even need u64 to index and count 32bit long words.
584b0715f0SLars Ellenberg  * 1 << (35 - 3)	64bit longs needed
594b0715f0SLars Ellenberg  *	32 --> we could get away with a 32bit unsigned int to index and count
604b0715f0SLars Ellenberg  *	64bit long words, but I rather stay with unsigned long for now.
614b0715f0SLars Ellenberg  *	We probably should neither count nor point to bytes or long words
624b0715f0SLars Ellenberg  *	directly, but either by bitnumber, or by page index and offset.
634b0715f0SLars Ellenberg  * 1 << (35 - 12)
644b0715f0SLars Ellenberg  *	22 --> we need that much 4KiB pages of bitmap.
654b0715f0SLars Ellenberg  *	1 << (22 + 3) --> on a 64bit arch,
664b0715f0SLars Ellenberg  *	we need 32 MiB to store the array of page pointers.
674b0715f0SLars Ellenberg  *
684b0715f0SLars Ellenberg  * Because I'm lazy, and because the resulting patch was too large, too ugly
694b0715f0SLars Ellenberg  * and still incomplete, on 32bit we still "only" support 16 TiB (minus some),
704b0715f0SLars Ellenberg  * (1 << 32) bits * 4k storage.
714b0715f0SLars Ellenberg  *
724b0715f0SLars Ellenberg 
734b0715f0SLars Ellenberg  * bitmap storage and IO:
744b0715f0SLars Ellenberg  *	Bitmap is stored little endian on disk, and is kept little endian in
754b0715f0SLars Ellenberg  *	core memory. Currently we still hold the full bitmap in core as long
764b0715f0SLars Ellenberg  *	as we are "attached" to a local disk, which at 32 GiB for 1PiB storage
774b0715f0SLars Ellenberg  *	seems excessive.
784b0715f0SLars Ellenberg  *
7924c4830cSBart Van Assche  *	We plan to reduce the amount of in-core bitmap pages by paging them in
804b0715f0SLars Ellenberg  *	and out against their on-disk location as necessary, but need to make
814b0715f0SLars Ellenberg  *	sure we don't cause too much meta data IO, and must not deadlock in
824b0715f0SLars Ellenberg  *	tight memory situations. This needs some more work.
83b411b363SPhilipp Reisner  */
84b411b363SPhilipp Reisner 
85b411b363SPhilipp Reisner /*
86b411b363SPhilipp Reisner  * NOTE
87b411b363SPhilipp Reisner  *  Access to the *bm_pages is protected by bm_lock.
88b411b363SPhilipp Reisner  *  It is safe to read the other members within the lock.
89b411b363SPhilipp Reisner  *
90b411b363SPhilipp Reisner  *  drbd_bm_set_bits is called from bio_endio callbacks,
91b411b363SPhilipp Reisner  *  We may be called with irq already disabled,
92b411b363SPhilipp Reisner  *  so we need spin_lock_irqsave().
93b411b363SPhilipp Reisner  *  And we need the kmap_atomic.
94b411b363SPhilipp Reisner  */
95b411b363SPhilipp Reisner struct drbd_bitmap {
96b411b363SPhilipp Reisner 	struct page **bm_pages;
97b411b363SPhilipp Reisner 	spinlock_t bm_lock;
984b0715f0SLars Ellenberg 
99*27ea1d87SLars Ellenberg 	/* exclusively to be used by __al_write_transaction(),
100*27ea1d87SLars Ellenberg 	 * drbd_bm_mark_for_writeout() and
101*27ea1d87SLars Ellenberg 	 * and drbd_bm_write_hinted() -> bm_rw() called from there.
102*27ea1d87SLars Ellenberg 	 */
103*27ea1d87SLars Ellenberg 	unsigned int n_bitmap_hints;
104*27ea1d87SLars Ellenberg 	unsigned int al_bitmap_hints[AL_UPDATES_PER_TRANSACTION];
105*27ea1d87SLars Ellenberg 
1064b0715f0SLars Ellenberg 	/* see LIMITATIONS: above */
1074b0715f0SLars Ellenberg 
108b411b363SPhilipp Reisner 	unsigned long bm_set;       /* nr of set bits; THINK maybe atomic_t? */
109b411b363SPhilipp Reisner 	unsigned long bm_bits;
110b411b363SPhilipp Reisner 	size_t   bm_words;
111b411b363SPhilipp Reisner 	size_t   bm_number_of_pages;
112b411b363SPhilipp Reisner 	sector_t bm_dev_capacity;
1138a03ae2aSThomas Gleixner 	struct mutex bm_change; /* serializes resize operations */
114b411b363SPhilipp Reisner 
11519f843aaSLars Ellenberg 	wait_queue_head_t bm_io_wait; /* used to serialize IO of single pages */
116b411b363SPhilipp Reisner 
11720ceb2b2SLars Ellenberg 	enum bm_flag bm_flags;
118b411b363SPhilipp Reisner 
119b411b363SPhilipp Reisner 	/* debugging aid, in case we are still racy somewhere */
120b411b363SPhilipp Reisner 	char          *bm_why;
121b411b363SPhilipp Reisner 	struct task_struct *bm_task;
122b411b363SPhilipp Reisner };
123b411b363SPhilipp Reisner 
124b411b363SPhilipp Reisner #define bm_print_lock_info(m) __bm_print_lock_info(m, __func__)
125b30ab791SAndreas Gruenbacher static void __bm_print_lock_info(struct drbd_device *device, const char *func)
126b411b363SPhilipp Reisner {
127b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
128b411b363SPhilipp Reisner 	if (!__ratelimit(&drbd_ratelimit_state))
129b411b363SPhilipp Reisner 		return;
130c60b0251SAndreas Gruenbacher 	drbd_err(device, "FIXME %s[%d] in %s, bitmap locked for '%s' by %s[%d]\n",
131c60b0251SAndreas Gruenbacher 		 current->comm, task_pid_nr(current),
132b411b363SPhilipp Reisner 		 func, b->bm_why ?: "?",
133c60b0251SAndreas Gruenbacher 		 b->bm_task->comm, task_pid_nr(b->bm_task));
134b411b363SPhilipp Reisner }
135b411b363SPhilipp Reisner 
136b30ab791SAndreas Gruenbacher void drbd_bm_lock(struct drbd_device *device, char *why, enum bm_flag flags)
137b411b363SPhilipp Reisner {
138b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
139b411b363SPhilipp Reisner 	int trylock_failed;
140b411b363SPhilipp Reisner 
141b411b363SPhilipp Reisner 	if (!b) {
142d0180171SAndreas Gruenbacher 		drbd_err(device, "FIXME no bitmap in drbd_bm_lock!?\n");
143b411b363SPhilipp Reisner 		return;
144b411b363SPhilipp Reisner 	}
145b411b363SPhilipp Reisner 
1468a03ae2aSThomas Gleixner 	trylock_failed = !mutex_trylock(&b->bm_change);
147b411b363SPhilipp Reisner 
148b411b363SPhilipp Reisner 	if (trylock_failed) {
149c60b0251SAndreas Gruenbacher 		drbd_warn(device, "%s[%d] going to '%s' but bitmap already locked for '%s' by %s[%d]\n",
150c60b0251SAndreas Gruenbacher 			  current->comm, task_pid_nr(current),
151b411b363SPhilipp Reisner 			  why, b->bm_why ?: "?",
152c60b0251SAndreas Gruenbacher 			  b->bm_task->comm, task_pid_nr(b->bm_task));
1538a03ae2aSThomas Gleixner 		mutex_lock(&b->bm_change);
154b411b363SPhilipp Reisner 	}
15520ceb2b2SLars Ellenberg 	if (BM_LOCKED_MASK & b->bm_flags)
156d0180171SAndreas Gruenbacher 		drbd_err(device, "FIXME bitmap already locked in bm_lock\n");
15720ceb2b2SLars Ellenberg 	b->bm_flags |= flags & BM_LOCKED_MASK;
158b411b363SPhilipp Reisner 
159b411b363SPhilipp Reisner 	b->bm_why  = why;
160b411b363SPhilipp Reisner 	b->bm_task = current;
161b411b363SPhilipp Reisner }
162b411b363SPhilipp Reisner 
163b30ab791SAndreas Gruenbacher void drbd_bm_unlock(struct drbd_device *device)
164b411b363SPhilipp Reisner {
165b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
166b411b363SPhilipp Reisner 	if (!b) {
167d0180171SAndreas Gruenbacher 		drbd_err(device, "FIXME no bitmap in drbd_bm_unlock!?\n");
168b411b363SPhilipp Reisner 		return;
169b411b363SPhilipp Reisner 	}
170b411b363SPhilipp Reisner 
171b30ab791SAndreas Gruenbacher 	if (!(BM_LOCKED_MASK & device->bitmap->bm_flags))
172d0180171SAndreas Gruenbacher 		drbd_err(device, "FIXME bitmap not locked in bm_unlock\n");
173b411b363SPhilipp Reisner 
17420ceb2b2SLars Ellenberg 	b->bm_flags &= ~BM_LOCKED_MASK;
175b411b363SPhilipp Reisner 	b->bm_why  = NULL;
176b411b363SPhilipp Reisner 	b->bm_task = NULL;
1778a03ae2aSThomas Gleixner 	mutex_unlock(&b->bm_change);
178b411b363SPhilipp Reisner }
179b411b363SPhilipp Reisner 
18019f843aaSLars Ellenberg /* we store some "meta" info about our pages in page->private */
18119f843aaSLars Ellenberg /* at a granularity of 4k storage per bitmap bit:
18219f843aaSLars Ellenberg  * one peta byte storage: 1<<50 byte, 1<<38 * 4k storage blocks
18319f843aaSLars Ellenberg  *  1<<38 bits,
18419f843aaSLars Ellenberg  *  1<<23 4k bitmap pages.
18519f843aaSLars Ellenberg  * Use 24 bits as page index, covers 2 peta byte storage
18619f843aaSLars Ellenberg  * at a granularity of 4k per bit.
18719f843aaSLars Ellenberg  * Used to report the failed page idx on io error from the endio handlers.
18819f843aaSLars Ellenberg  */
18919f843aaSLars Ellenberg #define BM_PAGE_IDX_MASK	((1UL<<24)-1)
19019f843aaSLars Ellenberg /* this page is currently read in, or written back */
19119f843aaSLars Ellenberg #define BM_PAGE_IO_LOCK		31
19219f843aaSLars Ellenberg /* if there has been an IO error for this page */
19319f843aaSLars Ellenberg #define BM_PAGE_IO_ERROR	30
19419f843aaSLars Ellenberg /* this is to be able to intelligently skip disk IO,
19519f843aaSLars Ellenberg  * set if bits have been set since last IO. */
19619f843aaSLars Ellenberg #define BM_PAGE_NEED_WRITEOUT	29
19719f843aaSLars Ellenberg /* to mark for lazy writeout once syncer cleared all clearable bits,
19819f843aaSLars Ellenberg  * we if bits have been cleared since last IO. */
19919f843aaSLars Ellenberg #define BM_PAGE_LAZY_WRITEOUT	28
20045dfffebSLars Ellenberg /* pages marked with this "HINT" will be considered for writeout
20145dfffebSLars Ellenberg  * on activity log transactions */
20245dfffebSLars Ellenberg #define BM_PAGE_HINT_WRITEOUT	27
20319f843aaSLars Ellenberg 
20424c4830cSBart Van Assche /* store_page_idx uses non-atomic assignment. It is only used directly after
20519f843aaSLars Ellenberg  * allocating the page.  All other bm_set_page_* and bm_clear_page_* need to
20619f843aaSLars Ellenberg  * use atomic bit manipulation, as set_out_of_sync (and therefore bitmap
20719f843aaSLars Ellenberg  * changes) may happen from various contexts, and wait_on_bit/wake_up_bit
20819f843aaSLars Ellenberg  * requires it all to be atomic as well. */
20919f843aaSLars Ellenberg static void bm_store_page_idx(struct page *page, unsigned long idx)
21019f843aaSLars Ellenberg {
21119f843aaSLars Ellenberg 	BUG_ON(0 != (idx & ~BM_PAGE_IDX_MASK));
2120c7db279SArne Redlich 	set_page_private(page, idx);
21319f843aaSLars Ellenberg }
21419f843aaSLars Ellenberg 
21519f843aaSLars Ellenberg static unsigned long bm_page_to_idx(struct page *page)
21619f843aaSLars Ellenberg {
21719f843aaSLars Ellenberg 	return page_private(page) & BM_PAGE_IDX_MASK;
21819f843aaSLars Ellenberg }
21919f843aaSLars Ellenberg 
22019f843aaSLars Ellenberg /* As is very unlikely that the same page is under IO from more than one
22119f843aaSLars Ellenberg  * context, we can get away with a bit per page and one wait queue per bitmap.
22219f843aaSLars Ellenberg  */
223b30ab791SAndreas Gruenbacher static void bm_page_lock_io(struct drbd_device *device, int page_nr)
22419f843aaSLars Ellenberg {
225b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
22619f843aaSLars Ellenberg 	void *addr = &page_private(b->bm_pages[page_nr]);
22719f843aaSLars Ellenberg 	wait_event(b->bm_io_wait, !test_and_set_bit(BM_PAGE_IO_LOCK, addr));
22819f843aaSLars Ellenberg }
22919f843aaSLars Ellenberg 
230b30ab791SAndreas Gruenbacher static void bm_page_unlock_io(struct drbd_device *device, int page_nr)
23119f843aaSLars Ellenberg {
232b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
23319f843aaSLars Ellenberg 	void *addr = &page_private(b->bm_pages[page_nr]);
2344738fa16SLars Ellenberg 	clear_bit_unlock(BM_PAGE_IO_LOCK, addr);
235b30ab791SAndreas Gruenbacher 	wake_up(&device->bitmap->bm_io_wait);
23619f843aaSLars Ellenberg }
23719f843aaSLars Ellenberg 
23819f843aaSLars Ellenberg /* set _before_ submit_io, so it may be reset due to being changed
23919f843aaSLars Ellenberg  * while this page is in flight... will get submitted later again */
24019f843aaSLars Ellenberg static void bm_set_page_unchanged(struct page *page)
24119f843aaSLars Ellenberg {
24219f843aaSLars Ellenberg 	/* use cmpxchg? */
24319f843aaSLars Ellenberg 	clear_bit(BM_PAGE_NEED_WRITEOUT, &page_private(page));
24419f843aaSLars Ellenberg 	clear_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page));
24519f843aaSLars Ellenberg }
24619f843aaSLars Ellenberg 
24719f843aaSLars Ellenberg static void bm_set_page_need_writeout(struct page *page)
24819f843aaSLars Ellenberg {
24919f843aaSLars Ellenberg 	set_bit(BM_PAGE_NEED_WRITEOUT, &page_private(page));
25019f843aaSLars Ellenberg }
25119f843aaSLars Ellenberg 
252*27ea1d87SLars Ellenberg void drbd_bm_reset_al_hints(struct drbd_device *device)
253*27ea1d87SLars Ellenberg {
254*27ea1d87SLars Ellenberg 	device->bitmap->n_bitmap_hints = 0;
255*27ea1d87SLars Ellenberg }
256*27ea1d87SLars Ellenberg 
25745dfffebSLars Ellenberg /**
25845dfffebSLars Ellenberg  * drbd_bm_mark_for_writeout() - mark a page with a "hint" to be considered for writeout
259b30ab791SAndreas Gruenbacher  * @device:	DRBD device.
26045dfffebSLars Ellenberg  * @page_nr:	the bitmap page to mark with the "hint" flag
26145dfffebSLars Ellenberg  *
26245dfffebSLars Ellenberg  * From within an activity log transaction, we mark a few pages with these
26345dfffebSLars Ellenberg  * hints, then call drbd_bm_write_hinted(), which will only write out changed
26445dfffebSLars Ellenberg  * pages which are flagged with this mark.
26545dfffebSLars Ellenberg  */
266b30ab791SAndreas Gruenbacher void drbd_bm_mark_for_writeout(struct drbd_device *device, int page_nr)
26745dfffebSLars Ellenberg {
268*27ea1d87SLars Ellenberg 	struct drbd_bitmap *b = device->bitmap;
26945dfffebSLars Ellenberg 	struct page *page;
270b30ab791SAndreas Gruenbacher 	if (page_nr >= device->bitmap->bm_number_of_pages) {
271d0180171SAndreas Gruenbacher 		drbd_warn(device, "BAD: page_nr: %u, number_of_pages: %u\n",
272b30ab791SAndreas Gruenbacher 			 page_nr, (int)device->bitmap->bm_number_of_pages);
27345dfffebSLars Ellenberg 		return;
27445dfffebSLars Ellenberg 	}
275b30ab791SAndreas Gruenbacher 	page = device->bitmap->bm_pages[page_nr];
276*27ea1d87SLars Ellenberg 	BUG_ON(b->n_bitmap_hints >= ARRAY_SIZE(b->al_bitmap_hints));
277*27ea1d87SLars Ellenberg 	if (!test_and_set_bit(BM_PAGE_HINT_WRITEOUT, &page_private(page)))
278*27ea1d87SLars Ellenberg 		b->al_bitmap_hints[b->n_bitmap_hints++] = page_nr;
27945dfffebSLars Ellenberg }
28045dfffebSLars Ellenberg 
28119f843aaSLars Ellenberg static int bm_test_page_unchanged(struct page *page)
28219f843aaSLars Ellenberg {
28319f843aaSLars Ellenberg 	volatile const unsigned long *addr = &page_private(page);
28419f843aaSLars Ellenberg 	return (*addr & ((1UL<<BM_PAGE_NEED_WRITEOUT)|(1UL<<BM_PAGE_LAZY_WRITEOUT))) == 0;
28519f843aaSLars Ellenberg }
28619f843aaSLars Ellenberg 
28719f843aaSLars Ellenberg static void bm_set_page_io_err(struct page *page)
28819f843aaSLars Ellenberg {
28919f843aaSLars Ellenberg 	set_bit(BM_PAGE_IO_ERROR, &page_private(page));
29019f843aaSLars Ellenberg }
29119f843aaSLars Ellenberg 
29219f843aaSLars Ellenberg static void bm_clear_page_io_err(struct page *page)
29319f843aaSLars Ellenberg {
29419f843aaSLars Ellenberg 	clear_bit(BM_PAGE_IO_ERROR, &page_private(page));
29519f843aaSLars Ellenberg }
29619f843aaSLars Ellenberg 
29719f843aaSLars Ellenberg static void bm_set_page_lazy_writeout(struct page *page)
29819f843aaSLars Ellenberg {
29919f843aaSLars Ellenberg 	set_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page));
30019f843aaSLars Ellenberg }
30119f843aaSLars Ellenberg 
30219f843aaSLars Ellenberg static int bm_test_page_lazy_writeout(struct page *page)
30319f843aaSLars Ellenberg {
30419f843aaSLars Ellenberg 	return test_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page));
30519f843aaSLars Ellenberg }
30619f843aaSLars Ellenberg 
30719f843aaSLars Ellenberg /* on a 32bit box, this would allow for exactly (2<<38) bits. */
30819f843aaSLars Ellenberg static unsigned int bm_word_to_page_idx(struct drbd_bitmap *b, unsigned long long_nr)
30919f843aaSLars Ellenberg {
31019f843aaSLars Ellenberg 	/* page_nr = (word*sizeof(long)) >> PAGE_SHIFT; */
31119f843aaSLars Ellenberg 	unsigned int page_nr = long_nr >> (PAGE_SHIFT - LN2_BPL + 3);
31219f843aaSLars Ellenberg 	BUG_ON(page_nr >= b->bm_number_of_pages);
31319f843aaSLars Ellenberg 	return page_nr;
31419f843aaSLars Ellenberg }
31519f843aaSLars Ellenberg 
31695a0f10cSLars Ellenberg static unsigned int bm_bit_to_page_idx(struct drbd_bitmap *b, u64 bitnr)
31795a0f10cSLars Ellenberg {
31895a0f10cSLars Ellenberg 	/* page_nr = (bitnr/8) >> PAGE_SHIFT; */
31995a0f10cSLars Ellenberg 	unsigned int page_nr = bitnr >> (PAGE_SHIFT + 3);
32095a0f10cSLars Ellenberg 	BUG_ON(page_nr >= b->bm_number_of_pages);
32195a0f10cSLars Ellenberg 	return page_nr;
32295a0f10cSLars Ellenberg }
32395a0f10cSLars Ellenberg 
324589973a7SCong Wang static unsigned long *__bm_map_pidx(struct drbd_bitmap *b, unsigned int idx)
32595a0f10cSLars Ellenberg {
32695a0f10cSLars Ellenberg 	struct page *page = b->bm_pages[idx];
327cfd8005cSCong Wang 	return (unsigned long *) kmap_atomic(page);
32895a0f10cSLars Ellenberg }
32995a0f10cSLars Ellenberg 
33095a0f10cSLars Ellenberg static unsigned long *bm_map_pidx(struct drbd_bitmap *b, unsigned int idx)
33195a0f10cSLars Ellenberg {
332cfd8005cSCong Wang 	return __bm_map_pidx(b, idx);
33395a0f10cSLars Ellenberg }
33495a0f10cSLars Ellenberg 
335cfd8005cSCong Wang static void __bm_unmap(unsigned long *p_addr)
336b411b363SPhilipp Reisner {
337cfd8005cSCong Wang 	kunmap_atomic(p_addr);
338b411b363SPhilipp Reisner };
339b411b363SPhilipp Reisner 
340b411b363SPhilipp Reisner static void bm_unmap(unsigned long *p_addr)
341b411b363SPhilipp Reisner {
342cfd8005cSCong Wang 	return __bm_unmap(p_addr);
343b411b363SPhilipp Reisner }
344b411b363SPhilipp Reisner 
345b411b363SPhilipp Reisner /* long word offset of _bitmap_ sector */
346b411b363SPhilipp Reisner #define S2W(s)	((s)<<(BM_EXT_SHIFT-BM_BLOCK_SHIFT-LN2_BPL))
347b411b363SPhilipp Reisner /* word offset from start of bitmap to word number _in_page_
348b411b363SPhilipp Reisner  * modulo longs per page
349b411b363SPhilipp Reisner #define MLPP(X) ((X) % (PAGE_SIZE/sizeof(long))
35024c4830cSBart Van Assche  hm, well, Philipp thinks gcc might not optimize the % into & (... - 1)
351b411b363SPhilipp Reisner  so do it explicitly:
352b411b363SPhilipp Reisner  */
353b411b363SPhilipp Reisner #define MLPP(X) ((X) & ((PAGE_SIZE/sizeof(long))-1))
354b411b363SPhilipp Reisner 
355b411b363SPhilipp Reisner /* Long words per page */
356b411b363SPhilipp Reisner #define LWPP (PAGE_SIZE/sizeof(long))
357b411b363SPhilipp Reisner 
358b411b363SPhilipp Reisner /*
359b411b363SPhilipp Reisner  * actually most functions herein should take a struct drbd_bitmap*, not a
360b30ab791SAndreas Gruenbacher  * struct drbd_device*, but for the debug macros I like to have the device around
361b411b363SPhilipp Reisner  * to be able to report device specific.
362b411b363SPhilipp Reisner  */
363b411b363SPhilipp Reisner 
36419f843aaSLars Ellenberg 
365b411b363SPhilipp Reisner static void bm_free_pages(struct page **pages, unsigned long number)
366b411b363SPhilipp Reisner {
367b411b363SPhilipp Reisner 	unsigned long i;
368b411b363SPhilipp Reisner 	if (!pages)
369b411b363SPhilipp Reisner 		return;
370b411b363SPhilipp Reisner 
371b411b363SPhilipp Reisner 	for (i = 0; i < number; i++) {
372b411b363SPhilipp Reisner 		if (!pages[i]) {
373f88c5d90SLars Ellenberg 			pr_alert("bm_free_pages tried to free a NULL pointer; i=%lu n=%lu\n",
374b411b363SPhilipp Reisner 				 i, number);
375b411b363SPhilipp Reisner 			continue;
376b411b363SPhilipp Reisner 		}
377b411b363SPhilipp Reisner 		__free_page(pages[i]);
378b411b363SPhilipp Reisner 		pages[i] = NULL;
379b411b363SPhilipp Reisner 	}
380b411b363SPhilipp Reisner }
381b411b363SPhilipp Reisner 
3821d5cfdb0STetsuo Handa static inline void bm_vk_free(void *ptr)
383b411b363SPhilipp Reisner {
3841d5cfdb0STetsuo Handa 	kvfree(ptr);
385b411b363SPhilipp Reisner }
386b411b363SPhilipp Reisner 
387b411b363SPhilipp Reisner /*
388b411b363SPhilipp Reisner  * "have" and "want" are NUMBER OF PAGES.
389b411b363SPhilipp Reisner  */
390b411b363SPhilipp Reisner static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
391b411b363SPhilipp Reisner {
392b411b363SPhilipp Reisner 	struct page **old_pages = b->bm_pages;
393b411b363SPhilipp Reisner 	struct page **new_pages, *page;
3941d5cfdb0STetsuo Handa 	unsigned int i, bytes;
395b411b363SPhilipp Reisner 	unsigned long have = b->bm_number_of_pages;
396b411b363SPhilipp Reisner 
397b411b363SPhilipp Reisner 	BUG_ON(have == 0 && old_pages != NULL);
398b411b363SPhilipp Reisner 	BUG_ON(have != 0 && old_pages == NULL);
399b411b363SPhilipp Reisner 
400b411b363SPhilipp Reisner 	if (have == want)
401b411b363SPhilipp Reisner 		return old_pages;
402b411b363SPhilipp Reisner 
403b411b363SPhilipp Reisner 	/* Trying kmalloc first, falling back to vmalloc.
4040b143d43SLars Ellenberg 	 * GFP_NOIO, as this is called while drbd IO is "suspended",
4050b143d43SLars Ellenberg 	 * and during resize or attach on diskless Primary,
4060b143d43SLars Ellenberg 	 * we must not block on IO to ourselves.
407bc891c9aSLars Ellenberg 	 * Context is receiver thread or dmsetup. */
408b411b363SPhilipp Reisner 	bytes = sizeof(struct page *)*want;
4098be04b93SJoe Perches 	new_pages = kzalloc(bytes, GFP_NOIO | __GFP_NOWARN);
410b411b363SPhilipp Reisner 	if (!new_pages) {
4110b143d43SLars Ellenberg 		new_pages = __vmalloc(bytes,
4120b143d43SLars Ellenberg 				GFP_NOIO | __GFP_HIGHMEM | __GFP_ZERO,
4130b143d43SLars Ellenberg 				PAGE_KERNEL);
414b411b363SPhilipp Reisner 		if (!new_pages)
415b411b363SPhilipp Reisner 			return NULL;
416b411b363SPhilipp Reisner 	}
417b411b363SPhilipp Reisner 
418b411b363SPhilipp Reisner 	if (want >= have) {
419b411b363SPhilipp Reisner 		for (i = 0; i < have; i++)
420b411b363SPhilipp Reisner 			new_pages[i] = old_pages[i];
421b411b363SPhilipp Reisner 		for (; i < want; i++) {
4220b143d43SLars Ellenberg 			page = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
423b411b363SPhilipp Reisner 			if (!page) {
424b411b363SPhilipp Reisner 				bm_free_pages(new_pages + have, i - have);
4251d5cfdb0STetsuo Handa 				bm_vk_free(new_pages);
426b411b363SPhilipp Reisner 				return NULL;
427b411b363SPhilipp Reisner 			}
42819f843aaSLars Ellenberg 			/* we want to know which page it is
42919f843aaSLars Ellenberg 			 * from the endio handlers */
43019f843aaSLars Ellenberg 			bm_store_page_idx(page, i);
431b411b363SPhilipp Reisner 			new_pages[i] = page;
432b411b363SPhilipp Reisner 		}
433b411b363SPhilipp Reisner 	} else {
434b411b363SPhilipp Reisner 		for (i = 0; i < want; i++)
435b411b363SPhilipp Reisner 			new_pages[i] = old_pages[i];
436b411b363SPhilipp Reisner 		/* NOT HERE, we are outside the spinlock!
437b411b363SPhilipp Reisner 		bm_free_pages(old_pages + want, have - want);
438b411b363SPhilipp Reisner 		*/
439b411b363SPhilipp Reisner 	}
440b411b363SPhilipp Reisner 
441b411b363SPhilipp Reisner 	return new_pages;
442b411b363SPhilipp Reisner }
443b411b363SPhilipp Reisner 
444b411b363SPhilipp Reisner /*
4457e5fec31SFabian Frederick  * allocates the drbd_bitmap and stores it in device->bitmap.
446b411b363SPhilipp Reisner  */
447b30ab791SAndreas Gruenbacher int drbd_bm_init(struct drbd_device *device)
448b411b363SPhilipp Reisner {
449b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
450b411b363SPhilipp Reisner 	WARN_ON(b != NULL);
451b411b363SPhilipp Reisner 	b = kzalloc(sizeof(struct drbd_bitmap), GFP_KERNEL);
452b411b363SPhilipp Reisner 	if (!b)
453b411b363SPhilipp Reisner 		return -ENOMEM;
454b411b363SPhilipp Reisner 	spin_lock_init(&b->bm_lock);
4558a03ae2aSThomas Gleixner 	mutex_init(&b->bm_change);
456b411b363SPhilipp Reisner 	init_waitqueue_head(&b->bm_io_wait);
457b411b363SPhilipp Reisner 
458b30ab791SAndreas Gruenbacher 	device->bitmap = b;
459b411b363SPhilipp Reisner 
460b411b363SPhilipp Reisner 	return 0;
461b411b363SPhilipp Reisner }
462b411b363SPhilipp Reisner 
463b30ab791SAndreas Gruenbacher sector_t drbd_bm_capacity(struct drbd_device *device)
464b411b363SPhilipp Reisner {
465b30ab791SAndreas Gruenbacher 	if (!expect(device->bitmap))
466841ce241SAndreas Gruenbacher 		return 0;
467b30ab791SAndreas Gruenbacher 	return device->bitmap->bm_dev_capacity;
468b411b363SPhilipp Reisner }
469b411b363SPhilipp Reisner 
470b411b363SPhilipp Reisner /* called on driver unload. TODO: call when a device is destroyed.
471b411b363SPhilipp Reisner  */
472b30ab791SAndreas Gruenbacher void drbd_bm_cleanup(struct drbd_device *device)
473b411b363SPhilipp Reisner {
474b30ab791SAndreas Gruenbacher 	if (!expect(device->bitmap))
475841ce241SAndreas Gruenbacher 		return;
476b30ab791SAndreas Gruenbacher 	bm_free_pages(device->bitmap->bm_pages, device->bitmap->bm_number_of_pages);
4771d5cfdb0STetsuo Handa 	bm_vk_free(device->bitmap->bm_pages);
478b30ab791SAndreas Gruenbacher 	kfree(device->bitmap);
479b30ab791SAndreas Gruenbacher 	device->bitmap = NULL;
480b411b363SPhilipp Reisner }
481b411b363SPhilipp Reisner 
482b411b363SPhilipp Reisner /*
483b411b363SPhilipp Reisner  * since (b->bm_bits % BITS_PER_LONG) != 0,
484b411b363SPhilipp Reisner  * this masks out the remaining bits.
485b411b363SPhilipp Reisner  * Returns the number of bits cleared.
486b411b363SPhilipp Reisner  */
4872630628bSLars Ellenberg #ifndef BITS_PER_PAGE
48895a0f10cSLars Ellenberg #define BITS_PER_PAGE		(1UL << (PAGE_SHIFT + 3))
48995a0f10cSLars Ellenberg #define BITS_PER_PAGE_MASK	(BITS_PER_PAGE - 1)
4902630628bSLars Ellenberg #else
4912630628bSLars Ellenberg # if BITS_PER_PAGE != (1UL << (PAGE_SHIFT + 3))
4922630628bSLars Ellenberg #  error "ambiguous BITS_PER_PAGE"
4932630628bSLars Ellenberg # endif
4942630628bSLars Ellenberg #endif
49595a0f10cSLars Ellenberg #define BITS_PER_LONG_MASK	(BITS_PER_LONG - 1)
496b411b363SPhilipp Reisner static int bm_clear_surplus(struct drbd_bitmap *b)
497b411b363SPhilipp Reisner {
49895a0f10cSLars Ellenberg 	unsigned long mask;
499b411b363SPhilipp Reisner 	unsigned long *p_addr, *bm;
50095a0f10cSLars Ellenberg 	int tmp;
50195a0f10cSLars Ellenberg 	int cleared = 0;
502b411b363SPhilipp Reisner 
50395a0f10cSLars Ellenberg 	/* number of bits modulo bits per page */
50495a0f10cSLars Ellenberg 	tmp = (b->bm_bits & BITS_PER_PAGE_MASK);
50595a0f10cSLars Ellenberg 	/* mask the used bits of the word containing the last bit */
50695a0f10cSLars Ellenberg 	mask = (1UL << (tmp & BITS_PER_LONG_MASK)) -1;
50795a0f10cSLars Ellenberg 	/* bitmap is always stored little endian,
50895a0f10cSLars Ellenberg 	 * on disk and in core memory alike */
50995a0f10cSLars Ellenberg 	mask = cpu_to_lel(mask);
51095a0f10cSLars Ellenberg 
5116850c442SLars Ellenberg 	p_addr = bm_map_pidx(b, b->bm_number_of_pages - 1);
51295a0f10cSLars Ellenberg 	bm = p_addr + (tmp/BITS_PER_LONG);
51395a0f10cSLars Ellenberg 	if (mask) {
51495a0f10cSLars Ellenberg 		/* If mask != 0, we are not exactly aligned, so bm now points
51595a0f10cSLars Ellenberg 		 * to the long containing the last bit.
51695a0f10cSLars Ellenberg 		 * If mask == 0, bm already points to the word immediately
51795a0f10cSLars Ellenberg 		 * after the last (long word aligned) bit. */
518b411b363SPhilipp Reisner 		cleared = hweight_long(*bm & ~mask);
519b411b363SPhilipp Reisner 		*bm &= mask;
52095a0f10cSLars Ellenberg 		bm++;
521b411b363SPhilipp Reisner 	}
522b411b363SPhilipp Reisner 
52395a0f10cSLars Ellenberg 	if (BITS_PER_LONG == 32 && ((bm - p_addr) & 1) == 1) {
52495a0f10cSLars Ellenberg 		/* on a 32bit arch, we may need to zero out
52595a0f10cSLars Ellenberg 		 * a padding long to align with a 64bit remote */
526b411b363SPhilipp Reisner 		cleared += hweight_long(*bm);
527b411b363SPhilipp Reisner 		*bm = 0;
528b411b363SPhilipp Reisner 	}
529b411b363SPhilipp Reisner 	bm_unmap(p_addr);
530b411b363SPhilipp Reisner 	return cleared;
531b411b363SPhilipp Reisner }
532b411b363SPhilipp Reisner 
533b411b363SPhilipp Reisner static void bm_set_surplus(struct drbd_bitmap *b)
534b411b363SPhilipp Reisner {
53595a0f10cSLars Ellenberg 	unsigned long mask;
536b411b363SPhilipp Reisner 	unsigned long *p_addr, *bm;
53795a0f10cSLars Ellenberg 	int tmp;
538b411b363SPhilipp Reisner 
53995a0f10cSLars Ellenberg 	/* number of bits modulo bits per page */
54095a0f10cSLars Ellenberg 	tmp = (b->bm_bits & BITS_PER_PAGE_MASK);
54195a0f10cSLars Ellenberg 	/* mask the used bits of the word containing the last bit */
54295a0f10cSLars Ellenberg 	mask = (1UL << (tmp & BITS_PER_LONG_MASK)) -1;
54395a0f10cSLars Ellenberg 	/* bitmap is always stored little endian,
54495a0f10cSLars Ellenberg 	 * on disk and in core memory alike */
54595a0f10cSLars Ellenberg 	mask = cpu_to_lel(mask);
54695a0f10cSLars Ellenberg 
5476850c442SLars Ellenberg 	p_addr = bm_map_pidx(b, b->bm_number_of_pages - 1);
54895a0f10cSLars Ellenberg 	bm = p_addr + (tmp/BITS_PER_LONG);
54995a0f10cSLars Ellenberg 	if (mask) {
55095a0f10cSLars Ellenberg 		/* If mask != 0, we are not exactly aligned, so bm now points
55195a0f10cSLars Ellenberg 		 * to the long containing the last bit.
55295a0f10cSLars Ellenberg 		 * If mask == 0, bm already points to the word immediately
55395a0f10cSLars Ellenberg 		 * after the last (long word aligned) bit. */
554b411b363SPhilipp Reisner 		*bm |= ~mask;
55595a0f10cSLars Ellenberg 		bm++;
556b411b363SPhilipp Reisner 	}
557b411b363SPhilipp Reisner 
55895a0f10cSLars Ellenberg 	if (BITS_PER_LONG == 32 && ((bm - p_addr) & 1) == 1) {
55995a0f10cSLars Ellenberg 		/* on a 32bit arch, we may need to zero out
56095a0f10cSLars Ellenberg 		 * a padding long to align with a 64bit remote */
56195a0f10cSLars Ellenberg 		*bm = ~0UL;
562b411b363SPhilipp Reisner 	}
563b411b363SPhilipp Reisner 	bm_unmap(p_addr);
564b411b363SPhilipp Reisner }
565b411b363SPhilipp Reisner 
5664b0715f0SLars Ellenberg /* you better not modify the bitmap while this is running,
5674b0715f0SLars Ellenberg  * or its results will be stale */
56895a0f10cSLars Ellenberg static unsigned long bm_count_bits(struct drbd_bitmap *b)
569b411b363SPhilipp Reisner {
5704b0715f0SLars Ellenberg 	unsigned long *p_addr;
571b411b363SPhilipp Reisner 	unsigned long bits = 0;
5724b0715f0SLars Ellenberg 	unsigned long mask = (1UL << (b->bm_bits & BITS_PER_LONG_MASK)) -1;
5735fb3bc4dSLars Ellenberg 	int idx, last_word;
5747777a8baSLars Ellenberg 
5754b0715f0SLars Ellenberg 	/* all but last page */
5766850c442SLars Ellenberg 	for (idx = 0; idx < b->bm_number_of_pages - 1; idx++) {
577cfd8005cSCong Wang 		p_addr = __bm_map_pidx(b, idx);
5785fb3bc4dSLars Ellenberg 		bits += bitmap_weight(p_addr, BITS_PER_PAGE);
579cfd8005cSCong Wang 		__bm_unmap(p_addr);
580b411b363SPhilipp Reisner 		cond_resched();
581b411b363SPhilipp Reisner 	}
5824b0715f0SLars Ellenberg 	/* last (or only) page */
5834b0715f0SLars Ellenberg 	last_word = ((b->bm_bits - 1) & BITS_PER_PAGE_MASK) >> LN2_BPL;
584589973a7SCong Wang 	p_addr = __bm_map_pidx(b, idx);
5855fb3bc4dSLars Ellenberg 	bits += bitmap_weight(p_addr, last_word * BITS_PER_LONG);
5864b0715f0SLars Ellenberg 	p_addr[last_word] &= cpu_to_lel(mask);
5874b0715f0SLars Ellenberg 	bits += hweight_long(p_addr[last_word]);
5884b0715f0SLars Ellenberg 	/* 32bit arch, may have an unused padding long */
5894b0715f0SLars Ellenberg 	if (BITS_PER_LONG == 32 && (last_word & 1) == 0)
5904b0715f0SLars Ellenberg 		p_addr[last_word+1] = 0;
591589973a7SCong Wang 	__bm_unmap(p_addr);
592b411b363SPhilipp Reisner 	return bits;
593b411b363SPhilipp Reisner }
594b411b363SPhilipp Reisner 
595b411b363SPhilipp Reisner /* offset and len in long words.*/
596b411b363SPhilipp Reisner static void bm_memset(struct drbd_bitmap *b, size_t offset, int c, size_t len)
597b411b363SPhilipp Reisner {
598b411b363SPhilipp Reisner 	unsigned long *p_addr, *bm;
59919f843aaSLars Ellenberg 	unsigned int idx;
600b411b363SPhilipp Reisner 	size_t do_now, end;
601b411b363SPhilipp Reisner 
602b411b363SPhilipp Reisner 	end = offset + len;
603b411b363SPhilipp Reisner 
604b411b363SPhilipp Reisner 	if (end > b->bm_words) {
605f88c5d90SLars Ellenberg 		pr_alert("bm_memset end > bm_words\n");
606b411b363SPhilipp Reisner 		return;
607b411b363SPhilipp Reisner 	}
608b411b363SPhilipp Reisner 
609b411b363SPhilipp Reisner 	while (offset < end) {
610b411b363SPhilipp Reisner 		do_now = min_t(size_t, ALIGN(offset + 1, LWPP), end) - offset;
61119f843aaSLars Ellenberg 		idx = bm_word_to_page_idx(b, offset);
61219f843aaSLars Ellenberg 		p_addr = bm_map_pidx(b, idx);
613b411b363SPhilipp Reisner 		bm = p_addr + MLPP(offset);
614b411b363SPhilipp Reisner 		if (bm+do_now > p_addr + LWPP) {
615f88c5d90SLars Ellenberg 			pr_alert("BUG BUG BUG! p_addr:%p bm:%p do_now:%d\n",
616b411b363SPhilipp Reisner 			       p_addr, bm, (int)do_now);
61784e7c0f7SLars Ellenberg 		} else
618b411b363SPhilipp Reisner 			memset(bm, c, do_now * sizeof(long));
619b411b363SPhilipp Reisner 		bm_unmap(p_addr);
62019f843aaSLars Ellenberg 		bm_set_page_need_writeout(b->bm_pages[idx]);
621b411b363SPhilipp Reisner 		offset += do_now;
622b411b363SPhilipp Reisner 	}
623b411b363SPhilipp Reisner }
624b411b363SPhilipp Reisner 
625ae8bf312SLars Ellenberg /* For the layout, see comment above drbd_md_set_sector_offsets(). */
626ae8bf312SLars Ellenberg static u64 drbd_md_on_disk_bits(struct drbd_backing_dev *ldev)
627ae8bf312SLars Ellenberg {
628ae8bf312SLars Ellenberg 	u64 bitmap_sectors;
629ae8bf312SLars Ellenberg 	if (ldev->md.al_offset == 8)
630ae8bf312SLars Ellenberg 		bitmap_sectors = ldev->md.md_size_sect - ldev->md.bm_offset;
631ae8bf312SLars Ellenberg 	else
632ae8bf312SLars Ellenberg 		bitmap_sectors = ldev->md.al_offset - ldev->md.bm_offset;
633ae8bf312SLars Ellenberg 	return bitmap_sectors << (9 + 3);
634ae8bf312SLars Ellenberg }
635ae8bf312SLars Ellenberg 
636b411b363SPhilipp Reisner /*
637b411b363SPhilipp Reisner  * make sure the bitmap has enough room for the attached storage,
638b411b363SPhilipp Reisner  * if necessary, resize.
639b411b363SPhilipp Reisner  * called whenever we may have changed the device size.
640b411b363SPhilipp Reisner  * returns -ENOMEM if we could not allocate enough memory, 0 on success.
641b411b363SPhilipp Reisner  * In case this is actually a resize, we copy the old bitmap into the new one.
642b411b363SPhilipp Reisner  * Otherwise, the bitmap is initialized to all bits set.
643b411b363SPhilipp Reisner  */
644b30ab791SAndreas Gruenbacher int drbd_bm_resize(struct drbd_device *device, sector_t capacity, int set_new_bits)
645b411b363SPhilipp Reisner {
646b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
6476850c442SLars Ellenberg 	unsigned long bits, words, owords, obits;
648b411b363SPhilipp Reisner 	unsigned long want, have, onpages; /* number of pages */
649b411b363SPhilipp Reisner 	struct page **npages, **opages = NULL;
6507e5fec31SFabian Frederick 	int err = 0;
6517e5fec31SFabian Frederick 	bool growing;
652b411b363SPhilipp Reisner 
653841ce241SAndreas Gruenbacher 	if (!expect(b))
654841ce241SAndreas Gruenbacher 		return -ENOMEM;
655b411b363SPhilipp Reisner 
656b30ab791SAndreas Gruenbacher 	drbd_bm_lock(device, "resize", BM_LOCKED_MASK);
657b411b363SPhilipp Reisner 
658d0180171SAndreas Gruenbacher 	drbd_info(device, "drbd_bm_resize called with capacity == %llu\n",
659b411b363SPhilipp Reisner 			(unsigned long long)capacity);
660b411b363SPhilipp Reisner 
661b411b363SPhilipp Reisner 	if (capacity == b->bm_dev_capacity)
662b411b363SPhilipp Reisner 		goto out;
663b411b363SPhilipp Reisner 
664b411b363SPhilipp Reisner 	if (capacity == 0) {
665b411b363SPhilipp Reisner 		spin_lock_irq(&b->bm_lock);
666b411b363SPhilipp Reisner 		opages = b->bm_pages;
667b411b363SPhilipp Reisner 		onpages = b->bm_number_of_pages;
668b411b363SPhilipp Reisner 		owords = b->bm_words;
669b411b363SPhilipp Reisner 		b->bm_pages = NULL;
670b411b363SPhilipp Reisner 		b->bm_number_of_pages =
671b411b363SPhilipp Reisner 		b->bm_set   =
672b411b363SPhilipp Reisner 		b->bm_bits  =
673b411b363SPhilipp Reisner 		b->bm_words =
674b411b363SPhilipp Reisner 		b->bm_dev_capacity = 0;
675b411b363SPhilipp Reisner 		spin_unlock_irq(&b->bm_lock);
676b411b363SPhilipp Reisner 		bm_free_pages(opages, onpages);
6771d5cfdb0STetsuo Handa 		bm_vk_free(opages);
678b411b363SPhilipp Reisner 		goto out;
679b411b363SPhilipp Reisner 	}
680b411b363SPhilipp Reisner 	bits  = BM_SECT_TO_BIT(ALIGN(capacity, BM_SECT_PER_BIT));
681b411b363SPhilipp Reisner 
682b411b363SPhilipp Reisner 	/* if we would use
683b411b363SPhilipp Reisner 	   words = ALIGN(bits,BITS_PER_LONG) >> LN2_BPL;
684b411b363SPhilipp Reisner 	   a 32bit host could present the wrong number of words
685b411b363SPhilipp Reisner 	   to a 64bit host.
686b411b363SPhilipp Reisner 	*/
687b411b363SPhilipp Reisner 	words = ALIGN(bits, 64) >> LN2_BPL;
688b411b363SPhilipp Reisner 
689b30ab791SAndreas Gruenbacher 	if (get_ldev(device)) {
690b30ab791SAndreas Gruenbacher 		u64 bits_on_disk = drbd_md_on_disk_bits(device->ldev);
691b30ab791SAndreas Gruenbacher 		put_ldev(device);
6924b0715f0SLars Ellenberg 		if (bits > bits_on_disk) {
693d0180171SAndreas Gruenbacher 			drbd_info(device, "bits = %lu\n", bits);
694d0180171SAndreas Gruenbacher 			drbd_info(device, "bits_on_disk = %llu\n", bits_on_disk);
6954b0715f0SLars Ellenberg 			err = -ENOSPC;
6964b0715f0SLars Ellenberg 			goto out;
6974b0715f0SLars Ellenberg 		}
698b411b363SPhilipp Reisner 	}
699b411b363SPhilipp Reisner 
7006850c442SLars Ellenberg 	want = ALIGN(words*sizeof(long), PAGE_SIZE) >> PAGE_SHIFT;
701b411b363SPhilipp Reisner 	have = b->bm_number_of_pages;
702b411b363SPhilipp Reisner 	if (want == have) {
7030b0ba1efSAndreas Gruenbacher 		D_ASSERT(device, b->bm_pages != NULL);
704b411b363SPhilipp Reisner 		npages = b->bm_pages;
705b411b363SPhilipp Reisner 	} else {
706b30ab791SAndreas Gruenbacher 		if (drbd_insert_fault(device, DRBD_FAULT_BM_ALLOC))
707b411b363SPhilipp Reisner 			npages = NULL;
708b411b363SPhilipp Reisner 		else
709b411b363SPhilipp Reisner 			npages = bm_realloc_pages(b, want);
710b411b363SPhilipp Reisner 	}
711b411b363SPhilipp Reisner 
712b411b363SPhilipp Reisner 	if (!npages) {
713b411b363SPhilipp Reisner 		err = -ENOMEM;
714b411b363SPhilipp Reisner 		goto out;
715b411b363SPhilipp Reisner 	}
716b411b363SPhilipp Reisner 
717b411b363SPhilipp Reisner 	spin_lock_irq(&b->bm_lock);
718b411b363SPhilipp Reisner 	opages = b->bm_pages;
719b411b363SPhilipp Reisner 	owords = b->bm_words;
720b411b363SPhilipp Reisner 	obits  = b->bm_bits;
721b411b363SPhilipp Reisner 
722b411b363SPhilipp Reisner 	growing = bits > obits;
7235223671bSPhilipp Reisner 	if (opages && growing && set_new_bits)
724b411b363SPhilipp Reisner 		bm_set_surplus(b);
725b411b363SPhilipp Reisner 
726b411b363SPhilipp Reisner 	b->bm_pages = npages;
727b411b363SPhilipp Reisner 	b->bm_number_of_pages = want;
728b411b363SPhilipp Reisner 	b->bm_bits  = bits;
729b411b363SPhilipp Reisner 	b->bm_words = words;
730b411b363SPhilipp Reisner 	b->bm_dev_capacity = capacity;
731b411b363SPhilipp Reisner 
732b411b363SPhilipp Reisner 	if (growing) {
73302d9a94bSPhilipp Reisner 		if (set_new_bits) {
734b411b363SPhilipp Reisner 			bm_memset(b, owords, 0xff, words-owords);
735b411b363SPhilipp Reisner 			b->bm_set += bits - obits;
73602d9a94bSPhilipp Reisner 		} else
73702d9a94bSPhilipp Reisner 			bm_memset(b, owords, 0x00, words-owords);
73802d9a94bSPhilipp Reisner 
739b411b363SPhilipp Reisner 	}
740b411b363SPhilipp Reisner 
741b411b363SPhilipp Reisner 	if (want < have) {
742b411b363SPhilipp Reisner 		/* implicit: (opages != NULL) && (opages != npages) */
743b411b363SPhilipp Reisner 		bm_free_pages(opages + want, have - want);
744b411b363SPhilipp Reisner 	}
745b411b363SPhilipp Reisner 
746b411b363SPhilipp Reisner 	(void)bm_clear_surplus(b);
747b411b363SPhilipp Reisner 
748b411b363SPhilipp Reisner 	spin_unlock_irq(&b->bm_lock);
749b411b363SPhilipp Reisner 	if (opages != npages)
7501d5cfdb0STetsuo Handa 		bm_vk_free(opages);
751b411b363SPhilipp Reisner 	if (!growing)
752b411b363SPhilipp Reisner 		b->bm_set = bm_count_bits(b);
753d0180171SAndreas Gruenbacher 	drbd_info(device, "resync bitmap: bits=%lu words=%lu pages=%lu\n", bits, words, want);
754b411b363SPhilipp Reisner 
755b411b363SPhilipp Reisner  out:
756b30ab791SAndreas Gruenbacher 	drbd_bm_unlock(device);
757b411b363SPhilipp Reisner 	return err;
758b411b363SPhilipp Reisner }
759b411b363SPhilipp Reisner 
760b411b363SPhilipp Reisner /* inherently racy:
761b411b363SPhilipp Reisner  * if not protected by other means, return value may be out of date when
762b411b363SPhilipp Reisner  * leaving this function...
763b411b363SPhilipp Reisner  * we still need to lock it, since it is important that this returns
764b411b363SPhilipp Reisner  * bm_set == 0 precisely.
765b411b363SPhilipp Reisner  *
766b411b363SPhilipp Reisner  * maybe bm_set should be atomic_t ?
767b411b363SPhilipp Reisner  */
768b30ab791SAndreas Gruenbacher unsigned long _drbd_bm_total_weight(struct drbd_device *device)
769b411b363SPhilipp Reisner {
770b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
771b411b363SPhilipp Reisner 	unsigned long s;
772b411b363SPhilipp Reisner 	unsigned long flags;
773b411b363SPhilipp Reisner 
774841ce241SAndreas Gruenbacher 	if (!expect(b))
775841ce241SAndreas Gruenbacher 		return 0;
776841ce241SAndreas Gruenbacher 	if (!expect(b->bm_pages))
777841ce241SAndreas Gruenbacher 		return 0;
778b411b363SPhilipp Reisner 
779b411b363SPhilipp Reisner 	spin_lock_irqsave(&b->bm_lock, flags);
780b411b363SPhilipp Reisner 	s = b->bm_set;
781b411b363SPhilipp Reisner 	spin_unlock_irqrestore(&b->bm_lock, flags);
782b411b363SPhilipp Reisner 
783b411b363SPhilipp Reisner 	return s;
784b411b363SPhilipp Reisner }
785b411b363SPhilipp Reisner 
786b30ab791SAndreas Gruenbacher unsigned long drbd_bm_total_weight(struct drbd_device *device)
787b411b363SPhilipp Reisner {
788b411b363SPhilipp Reisner 	unsigned long s;
789b411b363SPhilipp Reisner 	/* if I don't have a disk, I don't know about out-of-sync status */
790b30ab791SAndreas Gruenbacher 	if (!get_ldev_if_state(device, D_NEGOTIATING))
791b411b363SPhilipp Reisner 		return 0;
792b30ab791SAndreas Gruenbacher 	s = _drbd_bm_total_weight(device);
793b30ab791SAndreas Gruenbacher 	put_ldev(device);
794b411b363SPhilipp Reisner 	return s;
795b411b363SPhilipp Reisner }
796b411b363SPhilipp Reisner 
797b30ab791SAndreas Gruenbacher size_t drbd_bm_words(struct drbd_device *device)
798b411b363SPhilipp Reisner {
799b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
800841ce241SAndreas Gruenbacher 	if (!expect(b))
801841ce241SAndreas Gruenbacher 		return 0;
802841ce241SAndreas Gruenbacher 	if (!expect(b->bm_pages))
803841ce241SAndreas Gruenbacher 		return 0;
804b411b363SPhilipp Reisner 
805b411b363SPhilipp Reisner 	return b->bm_words;
806b411b363SPhilipp Reisner }
807b411b363SPhilipp Reisner 
808b30ab791SAndreas Gruenbacher unsigned long drbd_bm_bits(struct drbd_device *device)
809b411b363SPhilipp Reisner {
810b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
811841ce241SAndreas Gruenbacher 	if (!expect(b))
812841ce241SAndreas Gruenbacher 		return 0;
813b411b363SPhilipp Reisner 
814b411b363SPhilipp Reisner 	return b->bm_bits;
815b411b363SPhilipp Reisner }
816b411b363SPhilipp Reisner 
817b411b363SPhilipp Reisner /* merge number words from buffer into the bitmap starting at offset.
818b411b363SPhilipp Reisner  * buffer[i] is expected to be little endian unsigned long.
819b411b363SPhilipp Reisner  * bitmap must be locked by drbd_bm_lock.
820b411b363SPhilipp Reisner  * currently only used from receive_bitmap.
821b411b363SPhilipp Reisner  */
822b30ab791SAndreas Gruenbacher void drbd_bm_merge_lel(struct drbd_device *device, size_t offset, size_t number,
823b411b363SPhilipp Reisner 			unsigned long *buffer)
824b411b363SPhilipp Reisner {
825b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
826b411b363SPhilipp Reisner 	unsigned long *p_addr, *bm;
827b411b363SPhilipp Reisner 	unsigned long word, bits;
82819f843aaSLars Ellenberg 	unsigned int idx;
829b411b363SPhilipp Reisner 	size_t end, do_now;
830b411b363SPhilipp Reisner 
831b411b363SPhilipp Reisner 	end = offset + number;
832b411b363SPhilipp Reisner 
833841ce241SAndreas Gruenbacher 	if (!expect(b))
834841ce241SAndreas Gruenbacher 		return;
835841ce241SAndreas Gruenbacher 	if (!expect(b->bm_pages))
836841ce241SAndreas Gruenbacher 		return;
837b411b363SPhilipp Reisner 	if (number == 0)
838b411b363SPhilipp Reisner 		return;
839b411b363SPhilipp Reisner 	WARN_ON(offset >= b->bm_words);
840b411b363SPhilipp Reisner 	WARN_ON(end    >  b->bm_words);
841b411b363SPhilipp Reisner 
842b411b363SPhilipp Reisner 	spin_lock_irq(&b->bm_lock);
843b411b363SPhilipp Reisner 	while (offset < end) {
844b411b363SPhilipp Reisner 		do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset;
84519f843aaSLars Ellenberg 		idx = bm_word_to_page_idx(b, offset);
84619f843aaSLars Ellenberg 		p_addr = bm_map_pidx(b, idx);
847b411b363SPhilipp Reisner 		bm = p_addr + MLPP(offset);
848b411b363SPhilipp Reisner 		offset += do_now;
849b411b363SPhilipp Reisner 		while (do_now--) {
850b411b363SPhilipp Reisner 			bits = hweight_long(*bm);
85195a0f10cSLars Ellenberg 			word = *bm | *buffer++;
852b411b363SPhilipp Reisner 			*bm++ = word;
853b411b363SPhilipp Reisner 			b->bm_set += hweight_long(word) - bits;
854b411b363SPhilipp Reisner 		}
855b411b363SPhilipp Reisner 		bm_unmap(p_addr);
85619f843aaSLars Ellenberg 		bm_set_page_need_writeout(b->bm_pages[idx]);
857b411b363SPhilipp Reisner 	}
858b411b363SPhilipp Reisner 	/* with 32bit <-> 64bit cross-platform connect
859b411b363SPhilipp Reisner 	 * this is only correct for current usage,
860b411b363SPhilipp Reisner 	 * where we _know_ that we are 64 bit aligned,
861b411b363SPhilipp Reisner 	 * and know that this function is used in this way, too...
862b411b363SPhilipp Reisner 	 */
863b411b363SPhilipp Reisner 	if (end == b->bm_words)
864b411b363SPhilipp Reisner 		b->bm_set -= bm_clear_surplus(b);
865b411b363SPhilipp Reisner 	spin_unlock_irq(&b->bm_lock);
866b411b363SPhilipp Reisner }
867b411b363SPhilipp Reisner 
868b411b363SPhilipp Reisner /* copy number words from the bitmap starting at offset into the buffer.
869b411b363SPhilipp Reisner  * buffer[i] will be little endian unsigned long.
870b411b363SPhilipp Reisner  */
871b30ab791SAndreas Gruenbacher void drbd_bm_get_lel(struct drbd_device *device, size_t offset, size_t number,
872b411b363SPhilipp Reisner 		     unsigned long *buffer)
873b411b363SPhilipp Reisner {
874b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
875b411b363SPhilipp Reisner 	unsigned long *p_addr, *bm;
876b411b363SPhilipp Reisner 	size_t end, do_now;
877b411b363SPhilipp Reisner 
878b411b363SPhilipp Reisner 	end = offset + number;
879b411b363SPhilipp Reisner 
880841ce241SAndreas Gruenbacher 	if (!expect(b))
881841ce241SAndreas Gruenbacher 		return;
882841ce241SAndreas Gruenbacher 	if (!expect(b->bm_pages))
883841ce241SAndreas Gruenbacher 		return;
884b411b363SPhilipp Reisner 
885b411b363SPhilipp Reisner 	spin_lock_irq(&b->bm_lock);
886b411b363SPhilipp Reisner 	if ((offset >= b->bm_words) ||
887b411b363SPhilipp Reisner 	    (end    >  b->bm_words) ||
888b411b363SPhilipp Reisner 	    (number <= 0))
889d0180171SAndreas Gruenbacher 		drbd_err(device, "offset=%lu number=%lu bm_words=%lu\n",
890b411b363SPhilipp Reisner 			(unsigned long)	offset,
891b411b363SPhilipp Reisner 			(unsigned long)	number,
892b411b363SPhilipp Reisner 			(unsigned long) b->bm_words);
893b411b363SPhilipp Reisner 	else {
894b411b363SPhilipp Reisner 		while (offset < end) {
895b411b363SPhilipp Reisner 			do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset;
89619f843aaSLars Ellenberg 			p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, offset));
897b411b363SPhilipp Reisner 			bm = p_addr + MLPP(offset);
898b411b363SPhilipp Reisner 			offset += do_now;
899b411b363SPhilipp Reisner 			while (do_now--)
90095a0f10cSLars Ellenberg 				*buffer++ = *bm++;
901b411b363SPhilipp Reisner 			bm_unmap(p_addr);
902b411b363SPhilipp Reisner 		}
903b411b363SPhilipp Reisner 	}
904b411b363SPhilipp Reisner 	spin_unlock_irq(&b->bm_lock);
905b411b363SPhilipp Reisner }
906b411b363SPhilipp Reisner 
907b411b363SPhilipp Reisner /* set all bits in the bitmap */
908b30ab791SAndreas Gruenbacher void drbd_bm_set_all(struct drbd_device *device)
909b411b363SPhilipp Reisner {
910b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
911841ce241SAndreas Gruenbacher 	if (!expect(b))
912841ce241SAndreas Gruenbacher 		return;
913841ce241SAndreas Gruenbacher 	if (!expect(b->bm_pages))
914841ce241SAndreas Gruenbacher 		return;
915b411b363SPhilipp Reisner 
916b411b363SPhilipp Reisner 	spin_lock_irq(&b->bm_lock);
917b411b363SPhilipp Reisner 	bm_memset(b, 0, 0xff, b->bm_words);
918b411b363SPhilipp Reisner 	(void)bm_clear_surplus(b);
919b411b363SPhilipp Reisner 	b->bm_set = b->bm_bits;
920b411b363SPhilipp Reisner 	spin_unlock_irq(&b->bm_lock);
921b411b363SPhilipp Reisner }
922b411b363SPhilipp Reisner 
923b411b363SPhilipp Reisner /* clear all bits in the bitmap */
924b30ab791SAndreas Gruenbacher void drbd_bm_clear_all(struct drbd_device *device)
925b411b363SPhilipp Reisner {
926b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
927841ce241SAndreas Gruenbacher 	if (!expect(b))
928841ce241SAndreas Gruenbacher 		return;
929841ce241SAndreas Gruenbacher 	if (!expect(b->bm_pages))
930841ce241SAndreas Gruenbacher 		return;
931b411b363SPhilipp Reisner 
932b411b363SPhilipp Reisner 	spin_lock_irq(&b->bm_lock);
933b411b363SPhilipp Reisner 	bm_memset(b, 0, 0, b->bm_words);
934b411b363SPhilipp Reisner 	b->bm_set = 0;
935b411b363SPhilipp Reisner 	spin_unlock_irq(&b->bm_lock);
936b411b363SPhilipp Reisner }
937b411b363SPhilipp Reisner 
9384ce49266SLars Ellenberg static void drbd_bm_aio_ctx_destroy(struct kref *kref)
939d1f3779bSPhilipp Reisner {
9404ce49266SLars Ellenberg 	struct drbd_bm_aio_ctx *ctx = container_of(kref, struct drbd_bm_aio_ctx, kref);
9414ce49266SLars Ellenberg 	unsigned long flags;
942d1f3779bSPhilipp Reisner 
9434ce49266SLars Ellenberg 	spin_lock_irqsave(&ctx->device->resource->req_lock, flags);
9444ce49266SLars Ellenberg 	list_del(&ctx->list);
9454ce49266SLars Ellenberg 	spin_unlock_irqrestore(&ctx->device->resource->req_lock, flags);
946b30ab791SAndreas Gruenbacher 	put_ldev(ctx->device);
947d1f3779bSPhilipp Reisner 	kfree(ctx);
948d1f3779bSPhilipp Reisner }
949d1f3779bSPhilipp Reisner 
95019f843aaSLars Ellenberg /* bv_page may be a copy, or may be the original */
9514246a0b6SChristoph Hellwig static void drbd_bm_endio(struct bio *bio)
952b411b363SPhilipp Reisner {
9534ce49266SLars Ellenberg 	struct drbd_bm_aio_ctx *ctx = bio->bi_private;
954b30ab791SAndreas Gruenbacher 	struct drbd_device *device = ctx->device;
955b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
95619f843aaSLars Ellenberg 	unsigned int idx = bm_page_to_idx(bio->bi_io_vec[0].bv_page);
957b411b363SPhilipp Reisner 
9587648cdfeSLars Ellenberg 	if ((ctx->flags & BM_AIO_COPY_PAGES) == 0 &&
9597648cdfeSLars Ellenberg 	    !bm_test_page_unchanged(b->bm_pages[idx]))
960d0180171SAndreas Gruenbacher 		drbd_warn(device, "bitmap page idx %u changed during IO!\n", idx);
96119f843aaSLars Ellenberg 
9624246a0b6SChristoph Hellwig 	if (bio->bi_error) {
96319f843aaSLars Ellenberg 		/* ctx error will hold the completed-last non-zero error code,
96419f843aaSLars Ellenberg 		 * in case error codes differ. */
9654246a0b6SChristoph Hellwig 		ctx->error = bio->bi_error;
96619f843aaSLars Ellenberg 		bm_set_page_io_err(b->bm_pages[idx]);
96719f843aaSLars Ellenberg 		/* Not identical to on disk version of it.
96819f843aaSLars Ellenberg 		 * Is BM_PAGE_IO_ERROR enough? */
96919f843aaSLars Ellenberg 		if (__ratelimit(&drbd_ratelimit_state))
970d0180171SAndreas Gruenbacher 			drbd_err(device, "IO ERROR %d on bitmap page idx %u\n",
9714246a0b6SChristoph Hellwig 					bio->bi_error, idx);
97219f843aaSLars Ellenberg 	} else {
97319f843aaSLars Ellenberg 		bm_clear_page_io_err(b->bm_pages[idx]);
974d0180171SAndreas Gruenbacher 		dynamic_drbd_dbg(device, "bitmap page idx %u completed\n", idx);
975b411b363SPhilipp Reisner 	}
97619f843aaSLars Ellenberg 
977b30ab791SAndreas Gruenbacher 	bm_page_unlock_io(device, idx);
97819f843aaSLars Ellenberg 
97919f843aaSLars Ellenberg 	if (ctx->flags & BM_AIO_COPY_PAGES)
9804d95a10fSLars Ellenberg 		mempool_free(bio->bi_io_vec[0].bv_page, drbd_md_io_page_pool);
981b411b363SPhilipp Reisner 
982b411b363SPhilipp Reisner 	bio_put(bio);
98319f843aaSLars Ellenberg 
984d1f3779bSPhilipp Reisner 	if (atomic_dec_and_test(&ctx->in_flight)) {
9859e58c4daSPhilipp Reisner 		ctx->done = 1;
986b30ab791SAndreas Gruenbacher 		wake_up(&device->misc_wait);
9874ce49266SLars Ellenberg 		kref_put(&ctx->kref, &drbd_bm_aio_ctx_destroy);
988d1f3779bSPhilipp Reisner 	}
989b411b363SPhilipp Reisner }
990b411b363SPhilipp Reisner 
9914ce49266SLars Ellenberg static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_hold(local)
992b411b363SPhilipp Reisner {
9939476f39dSLars Ellenberg 	struct bio *bio = bio_alloc_drbd(GFP_NOIO);
994b30ab791SAndreas Gruenbacher 	struct drbd_device *device = ctx->device;
995b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
99619f843aaSLars Ellenberg 	struct page *page;
997b411b363SPhilipp Reisner 	unsigned int len;
998bb3cc85eSMike Christie 	unsigned int op = (ctx->flags & BM_AIO_READ) ? REQ_OP_READ : REQ_OP_WRITE;
99919f843aaSLars Ellenberg 
1000b411b363SPhilipp Reisner 	sector_t on_disk_sector =
1001b30ab791SAndreas Gruenbacher 		device->ldev->md.md_offset + device->ldev->md.bm_offset;
1002b411b363SPhilipp Reisner 	on_disk_sector += ((sector_t)page_nr) << (PAGE_SHIFT-9);
1003b411b363SPhilipp Reisner 
1004b411b363SPhilipp Reisner 	/* this might happen with very small
100519f843aaSLars Ellenberg 	 * flexible external meta data device,
100619f843aaSLars Ellenberg 	 * or with PAGE_SIZE > 4k */
1007b411b363SPhilipp Reisner 	len = min_t(unsigned int, PAGE_SIZE,
1008b30ab791SAndreas Gruenbacher 		(drbd_md_last_sector(device->ldev) - on_disk_sector + 1)<<9);
1009b411b363SPhilipp Reisner 
101019f843aaSLars Ellenberg 	/* serialize IO on this page */
1011b30ab791SAndreas Gruenbacher 	bm_page_lock_io(device, page_nr);
101219f843aaSLars Ellenberg 	/* before memcpy and submit,
101319f843aaSLars Ellenberg 	 * so it can be redirtied any time */
101419f843aaSLars Ellenberg 	bm_set_page_unchanged(b->bm_pages[page_nr]);
101519f843aaSLars Ellenberg 
101619f843aaSLars Ellenberg 	if (ctx->flags & BM_AIO_COPY_PAGES) {
101771baba4bSMel Gorman 		page = mempool_alloc(drbd_md_io_page_pool, __GFP_HIGHMEM|__GFP_RECLAIM);
1018f1d6a328SAkinobu Mita 		copy_highpage(page, b->bm_pages[page_nr]);
101919f843aaSLars Ellenberg 		bm_store_page_idx(page, page_nr);
102019f843aaSLars Ellenberg 	} else
102119f843aaSLars Ellenberg 		page = b->bm_pages[page_nr];
1022b30ab791SAndreas Gruenbacher 	bio->bi_bdev = device->ldev->md_bdev;
10234f024f37SKent Overstreet 	bio->bi_iter.bi_sector = on_disk_sector;
10244d95a10fSLars Ellenberg 	/* bio_add_page of a single page to an empty bio will always succeed,
10254d95a10fSLars Ellenberg 	 * according to api.  Do we want to assert that? */
102619f843aaSLars Ellenberg 	bio_add_page(bio, page, len, 0);
102719f843aaSLars Ellenberg 	bio->bi_private = ctx;
1028ed15b795SAndreas Gruenbacher 	bio->bi_end_io = drbd_bm_endio;
1029bb3cc85eSMike Christie 	bio_set_op_attrs(bio, op, 0);
1030b411b363SPhilipp Reisner 
1031bb3cc85eSMike Christie 	if (drbd_insert_fault(device, (op == REQ_OP_WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) {
10324246a0b6SChristoph Hellwig 		bio_io_error(bio);
1033b411b363SPhilipp Reisner 	} else {
10344e49ea4aSMike Christie 		submit_bio(bio);
10355a8b4242SLars Ellenberg 		/* this should not count as user activity and cause the
10365a8b4242SLars Ellenberg 		 * resync to throttle -- see drbd_rs_should_slow_down(). */
1037b30ab791SAndreas Gruenbacher 		atomic_add(len >> 9, &device->rs_sect_ev);
1038b411b363SPhilipp Reisner 	}
1039b411b363SPhilipp Reisner }
1040b411b363SPhilipp Reisner 
1041b411b363SPhilipp Reisner /*
1042b411b363SPhilipp Reisner  * bm_rw: read/write the whole bitmap from/to its on disk location.
1043b411b363SPhilipp Reisner  */
10444ce49266SLars Ellenberg static int bm_rw(struct drbd_device *device, const unsigned int flags, unsigned lazy_writeout_upper_idx) __must_hold(local)
1045b411b363SPhilipp Reisner {
10464ce49266SLars Ellenberg 	struct drbd_bm_aio_ctx *ctx;
1047b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
1048*27ea1d87SLars Ellenberg 	unsigned int num_pages, i, count = 0;
1049b411b363SPhilipp Reisner 	unsigned long now;
1050b411b363SPhilipp Reisner 	char ppb[10];
1051b411b363SPhilipp Reisner 	int err = 0;
1052b411b363SPhilipp Reisner 
105319f843aaSLars Ellenberg 	/*
105419f843aaSLars Ellenberg 	 * We are protected against bitmap disappearing/resizing by holding an
105519f843aaSLars Ellenberg 	 * ldev reference (caller must have called get_ldev()).
105619f843aaSLars Ellenberg 	 * For read/write, we are protected against changes to the bitmap by
105719f843aaSLars Ellenberg 	 * the bitmap lock (see drbd_bitmap_io).
105819f843aaSLars Ellenberg 	 * For lazy writeout, we don't care for ongoing changes to the bitmap,
105919f843aaSLars Ellenberg 	 * as we submit copies of pages anyways.
106019f843aaSLars Ellenberg 	 */
1061d1f3779bSPhilipp Reisner 
10624ce49266SLars Ellenberg 	ctx = kmalloc(sizeof(struct drbd_bm_aio_ctx), GFP_NOIO);
1063d1f3779bSPhilipp Reisner 	if (!ctx)
1064d1f3779bSPhilipp Reisner 		return -ENOMEM;
1065d1f3779bSPhilipp Reisner 
10664ce49266SLars Ellenberg 	*ctx = (struct drbd_bm_aio_ctx) {
1067b30ab791SAndreas Gruenbacher 		.device = device,
10684ce49266SLars Ellenberg 		.start_jif = jiffies,
1069d1f3779bSPhilipp Reisner 		.in_flight = ATOMIC_INIT(1),
10709e58c4daSPhilipp Reisner 		.done = 0,
10710e8488adSLars Ellenberg 		.flags = flags,
1072d1f3779bSPhilipp Reisner 		.error = 0,
1073d1f3779bSPhilipp Reisner 		.kref = { ATOMIC_INIT(2) },
1074d1f3779bSPhilipp Reisner 	};
1075d1f3779bSPhilipp Reisner 
10764ce49266SLars Ellenberg 	if (!get_ldev_if_state(device, D_ATTACHING)) {  /* put is in drbd_bm_aio_ctx_destroy() */
1077d0180171SAndreas Gruenbacher 		drbd_err(device, "ASSERT FAILED: get_ldev_if_state() == 1 in bm_rw()\n");
10789e58c4daSPhilipp Reisner 		kfree(ctx);
10799e58c4daSPhilipp Reisner 		return -ENODEV;
10809e58c4daSPhilipp Reisner 	}
10818fe39aacSPhilipp Reisner 	/* Here D_ATTACHING is sufficient since drbd_bm_read() is called only from
10828fe39aacSPhilipp Reisner 	   drbd_adm_attach(), after device->ldev was assigned. */
10839e58c4daSPhilipp Reisner 
10844ce49266SLars Ellenberg 	if (0 == (ctx->flags & ~BM_AIO_READ))
108520ceb2b2SLars Ellenberg 		WARN_ON(!(BM_LOCKED_MASK & b->bm_flags));
1086b411b363SPhilipp Reisner 
10874ce49266SLars Ellenberg 	spin_lock_irq(&device->resource->req_lock);
10884ce49266SLars Ellenberg 	list_add_tail(&ctx->list, &device->pending_bitmap_io);
10894ce49266SLars Ellenberg 	spin_unlock_irq(&device->resource->req_lock);
10904ce49266SLars Ellenberg 
10916850c442SLars Ellenberg 	num_pages = b->bm_number_of_pages;
1092b411b363SPhilipp Reisner 
1093b411b363SPhilipp Reisner 	now = jiffies;
1094b411b363SPhilipp Reisner 
1095b411b363SPhilipp Reisner 	/* let the layers below us try to merge these bios... */
1096*27ea1d87SLars Ellenberg 
1097*27ea1d87SLars Ellenberg 	if (flags & BM_AIO_READ) {
1098*27ea1d87SLars Ellenberg 		for (i = 0; i < num_pages; i++) {
1099*27ea1d87SLars Ellenberg 			atomic_inc(&ctx->in_flight);
1100*27ea1d87SLars Ellenberg 			bm_page_io_async(ctx, i);
1101*27ea1d87SLars Ellenberg 			++count;
1102*27ea1d87SLars Ellenberg 			cond_resched();
1103*27ea1d87SLars Ellenberg 		}
1104*27ea1d87SLars Ellenberg 	} else if (flags & BM_AIO_WRITE_HINTED) {
1105*27ea1d87SLars Ellenberg 		/* ASSERT: BM_AIO_WRITE_ALL_PAGES is not set. */
1106*27ea1d87SLars Ellenberg 		unsigned int hint;
1107*27ea1d87SLars Ellenberg 		for (hint = 0; hint < b->n_bitmap_hints; hint++) {
1108*27ea1d87SLars Ellenberg 			i = b->al_bitmap_hints[hint];
1109*27ea1d87SLars Ellenberg 			if (i >= num_pages) /* == -1U: no hint here. */
1110*27ea1d87SLars Ellenberg 				continue;
1111*27ea1d87SLars Ellenberg 			/* Several AL-extents may point to the same page. */
1112*27ea1d87SLars Ellenberg 			if (!test_and_clear_bit(BM_PAGE_HINT_WRITEOUT,
1113*27ea1d87SLars Ellenberg 			    &page_private(b->bm_pages[i])))
1114*27ea1d87SLars Ellenberg 				continue;
1115*27ea1d87SLars Ellenberg 			/* Has it even changed? */
1116*27ea1d87SLars Ellenberg 			if (bm_test_page_unchanged(b->bm_pages[i]))
1117*27ea1d87SLars Ellenberg 				continue;
1118*27ea1d87SLars Ellenberg 			atomic_inc(&ctx->in_flight);
1119*27ea1d87SLars Ellenberg 			bm_page_io_async(ctx, i);
1120*27ea1d87SLars Ellenberg 			++count;
1121*27ea1d87SLars Ellenberg 		}
1122*27ea1d87SLars Ellenberg 	} else {
11236850c442SLars Ellenberg 		for (i = 0; i < num_pages; i++) {
112419f843aaSLars Ellenberg 			/* ignore completely unchanged pages */
112519f843aaSLars Ellenberg 			if (lazy_writeout_upper_idx && i == lazy_writeout_upper_idx)
112619f843aaSLars Ellenberg 				break;
11274ce49266SLars Ellenberg 			if (!(flags & BM_AIO_WRITE_ALL_PAGES) &&
1128d1aa4d04SPhilipp Reisner 			    bm_test_page_unchanged(b->bm_pages[i])) {
1129d0180171SAndreas Gruenbacher 				dynamic_drbd_dbg(device, "skipped bm write for idx %u\n", i);
113019f843aaSLars Ellenberg 				continue;
113119f843aaSLars Ellenberg 			}
113219f843aaSLars Ellenberg 			/* during lazy writeout,
113319f843aaSLars Ellenberg 			 * ignore those pages not marked for lazy writeout. */
113419f843aaSLars Ellenberg 			if (lazy_writeout_upper_idx &&
113519f843aaSLars Ellenberg 			    !bm_test_page_lazy_writeout(b->bm_pages[i])) {
1136d0180171SAndreas Gruenbacher 				dynamic_drbd_dbg(device, "skipped bm lazy write for idx %u\n", i);
113719f843aaSLars Ellenberg 				continue;
113819f843aaSLars Ellenberg 			}
1139d1f3779bSPhilipp Reisner 			atomic_inc(&ctx->in_flight);
11404ce49266SLars Ellenberg 			bm_page_io_async(ctx, i);
114119f843aaSLars Ellenberg 			++count;
114219f843aaSLars Ellenberg 			cond_resched();
114319f843aaSLars Ellenberg 		}
1144*27ea1d87SLars Ellenberg 	}
1145b411b363SPhilipp Reisner 
1146725a97e4SLars Ellenberg 	/*
1147ed15b795SAndreas Gruenbacher 	 * We initialize ctx->in_flight to one to make sure drbd_bm_endio
11489e58c4daSPhilipp Reisner 	 * will not set ctx->done early, and decrement / test it here.  If there
1149725a97e4SLars Ellenberg 	 * are still some bios in flight, we need to wait for them here.
11509e58c4daSPhilipp Reisner 	 * If all IO is done already (or nothing had been submitted), there is
11519e58c4daSPhilipp Reisner 	 * no need to wait.  Still, we need to put the kref associated with the
11529e58c4daSPhilipp Reisner 	 * "in_flight reached zero, all done" event.
1153725a97e4SLars Ellenberg 	 */
1154d1f3779bSPhilipp Reisner 	if (!atomic_dec_and_test(&ctx->in_flight))
1155b30ab791SAndreas Gruenbacher 		wait_until_done_or_force_detached(device, device->ldev, &ctx->done);
11569e58c4daSPhilipp Reisner 	else
11574ce49266SLars Ellenberg 		kref_put(&ctx->kref, &drbd_bm_aio_ctx_destroy);
1158d1f3779bSPhilipp Reisner 
1159c9d963a4SLars Ellenberg 	/* summary for global bitmap IO */
116013c2088dSLars Ellenberg 	if (flags == 0) {
116113c2088dSLars Ellenberg 		unsigned int ms = jiffies_to_msecs(jiffies - now);
116213c2088dSLars Ellenberg 		if (ms > 5) {
116313c2088dSLars Ellenberg 			drbd_info(device, "bitmap %s of %u pages took %u ms\n",
11644ce49266SLars Ellenberg 				 (flags & BM_AIO_READ) ? "READ" : "WRITE",
116513c2088dSLars Ellenberg 				 count, ms);
116613c2088dSLars Ellenberg 		}
116713c2088dSLars Ellenberg 	}
1168b411b363SPhilipp Reisner 
1169d1f3779bSPhilipp Reisner 	if (ctx->error) {
1170d0180171SAndreas Gruenbacher 		drbd_alert(device, "we had at least one MD IO ERROR during bitmap IO\n");
1171b30ab791SAndreas Gruenbacher 		drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR);
1172d1f3779bSPhilipp Reisner 		err = -EIO; /* ctx->error ? */
1173b411b363SPhilipp Reisner 	}
1174b411b363SPhilipp Reisner 
11759e58c4daSPhilipp Reisner 	if (atomic_read(&ctx->in_flight))
117644edfb0dSLars Ellenberg 		err = -EIO; /* Disk timeout/force-detach during IO... */
11779e58c4daSPhilipp Reisner 
1178b411b363SPhilipp Reisner 	now = jiffies;
11794ce49266SLars Ellenberg 	if (flags & BM_AIO_READ) {
118095a0f10cSLars Ellenberg 		b->bm_set = bm_count_bits(b);
1181d0180171SAndreas Gruenbacher 		drbd_info(device, "recounting of set bits took additional %lu jiffies\n",
1182b411b363SPhilipp Reisner 		     jiffies - now);
1183b411b363SPhilipp Reisner 	}
1184b411b363SPhilipp Reisner 	now = b->bm_set;
1185b411b363SPhilipp Reisner 
11864ce49266SLars Ellenberg 	if ((flags & ~BM_AIO_READ) == 0)
1187d0180171SAndreas Gruenbacher 		drbd_info(device, "%s (%lu bits) marked out-of-sync by on disk bit-map.\n",
1188b411b363SPhilipp Reisner 		     ppsize(ppb, now << (BM_BLOCK_SHIFT-10)), now);
1189b411b363SPhilipp Reisner 
11904ce49266SLars Ellenberg 	kref_put(&ctx->kref, &drbd_bm_aio_ctx_destroy);
1191b411b363SPhilipp Reisner 	return err;
1192b411b363SPhilipp Reisner }
1193b411b363SPhilipp Reisner 
1194b411b363SPhilipp Reisner /**
1195b411b363SPhilipp Reisner  * drbd_bm_read() - Read the whole bitmap from its on disk location.
1196b30ab791SAndreas Gruenbacher  * @device:	DRBD device.
1197b411b363SPhilipp Reisner  */
1198b30ab791SAndreas Gruenbacher int drbd_bm_read(struct drbd_device *device) __must_hold(local)
1199b411b363SPhilipp Reisner {
12004ce49266SLars Ellenberg 	return bm_rw(device, BM_AIO_READ, 0);
1201b411b363SPhilipp Reisner }
1202b411b363SPhilipp Reisner 
1203b411b363SPhilipp Reisner /**
1204b411b363SPhilipp Reisner  * drbd_bm_write() - Write the whole bitmap to its on disk location.
1205b30ab791SAndreas Gruenbacher  * @device:	DRBD device.
120619f843aaSLars Ellenberg  *
120719f843aaSLars Ellenberg  * Will only write pages that have changed since last IO.
1208b411b363SPhilipp Reisner  */
1209b30ab791SAndreas Gruenbacher int drbd_bm_write(struct drbd_device *device) __must_hold(local)
1210b411b363SPhilipp Reisner {
12114ce49266SLars Ellenberg 	return bm_rw(device, 0, 0);
1212b411b363SPhilipp Reisner }
1213b411b363SPhilipp Reisner 
1214b411b363SPhilipp Reisner /**
1215d1aa4d04SPhilipp Reisner  * drbd_bm_write_all() - Write the whole bitmap to its on disk location.
1216b30ab791SAndreas Gruenbacher  * @device:	DRBD device.
1217d1aa4d04SPhilipp Reisner  *
1218d1aa4d04SPhilipp Reisner  * Will write all pages.
1219d1aa4d04SPhilipp Reisner  */
1220b30ab791SAndreas Gruenbacher int drbd_bm_write_all(struct drbd_device *device) __must_hold(local)
1221d1aa4d04SPhilipp Reisner {
12224ce49266SLars Ellenberg 	return bm_rw(device, BM_AIO_WRITE_ALL_PAGES, 0);
1223d1aa4d04SPhilipp Reisner }
1224d1aa4d04SPhilipp Reisner 
1225d1aa4d04SPhilipp Reisner /**
1226c7a58db4SLars Ellenberg  * drbd_bm_write_lazy() - Write bitmap pages 0 to @upper_idx-1, if they have changed.
1227c7a58db4SLars Ellenberg  * @device:	DRBD device.
1228c7a58db4SLars Ellenberg  * @upper_idx:	0: write all changed pages; +ve: page index to stop scanning for changed pages
1229c7a58db4SLars Ellenberg  */
1230c7a58db4SLars Ellenberg int drbd_bm_write_lazy(struct drbd_device *device, unsigned upper_idx) __must_hold(local)
1231c7a58db4SLars Ellenberg {
1232c7a58db4SLars Ellenberg 	return bm_rw(device, BM_AIO_COPY_PAGES, upper_idx);
1233c7a58db4SLars Ellenberg }
1234c7a58db4SLars Ellenberg 
1235c7a58db4SLars Ellenberg /**
12360e8488adSLars Ellenberg  * drbd_bm_write_copy_pages() - Write the whole bitmap to its on disk location.
1237b30ab791SAndreas Gruenbacher  * @device:	DRBD device.
12380e8488adSLars Ellenberg  *
12390e8488adSLars Ellenberg  * Will only write pages that have changed since last IO.
12400e8488adSLars Ellenberg  * In contrast to drbd_bm_write(), this will copy the bitmap pages
12410e8488adSLars Ellenberg  * to temporary writeout pages. It is intended to trigger a full write-out
12420e8488adSLars Ellenberg  * while still allowing the bitmap to change, for example if a resync or online
12430e8488adSLars Ellenberg  * verify is aborted due to a failed peer disk, while local IO continues, or
12440e8488adSLars Ellenberg  * pending resync acks are still being processed.
12450e8488adSLars Ellenberg  */
1246b30ab791SAndreas Gruenbacher int drbd_bm_write_copy_pages(struct drbd_device *device) __must_hold(local)
12470e8488adSLars Ellenberg {
12484ce49266SLars Ellenberg 	return bm_rw(device, BM_AIO_COPY_PAGES, 0);
1249b411b363SPhilipp Reisner }
125019f843aaSLars Ellenberg 
1251a220d291SLars Ellenberg /**
125245dfffebSLars Ellenberg  * drbd_bm_write_hinted() - Write bitmap pages with "hint" marks, if they have changed.
1253b30ab791SAndreas Gruenbacher  * @device:	DRBD device.
125445dfffebSLars Ellenberg  */
1255b30ab791SAndreas Gruenbacher int drbd_bm_write_hinted(struct drbd_device *device) __must_hold(local)
125645dfffebSLars Ellenberg {
12574ce49266SLars Ellenberg 	return bm_rw(device, BM_AIO_WRITE_HINTED | BM_AIO_COPY_PAGES, 0);
125845dfffebSLars Ellenberg }
125919f843aaSLars Ellenberg 
1260b411b363SPhilipp Reisner /* NOTE
1261b411b363SPhilipp Reisner  * find_first_bit returns int, we return unsigned long.
12624b0715f0SLars Ellenberg  * For this to work on 32bit arch with bitnumbers > (1<<32),
12634b0715f0SLars Ellenberg  * we'd need to return u64, and get a whole lot of other places
12644b0715f0SLars Ellenberg  * fixed where we still use unsigned long.
1265b411b363SPhilipp Reisner  *
1266b411b363SPhilipp Reisner  * this returns a bit number, NOT a sector!
1267b411b363SPhilipp Reisner  */
1268b30ab791SAndreas Gruenbacher static unsigned long __bm_find_next(struct drbd_device *device, unsigned long bm_fo,
1269cfd8005cSCong Wang 	const int find_zero_bit)
1270b411b363SPhilipp Reisner {
1271b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
1272b411b363SPhilipp Reisner 	unsigned long *p_addr;
12734b0715f0SLars Ellenberg 	unsigned long bit_offset;
12744b0715f0SLars Ellenberg 	unsigned i;
12754b0715f0SLars Ellenberg 
1276b411b363SPhilipp Reisner 
1277b411b363SPhilipp Reisner 	if (bm_fo > b->bm_bits) {
1278d0180171SAndreas Gruenbacher 		drbd_err(device, "bm_fo=%lu bm_bits=%lu\n", bm_fo, b->bm_bits);
12794b0715f0SLars Ellenberg 		bm_fo = DRBD_END_OF_BITMAP;
1280b411b363SPhilipp Reisner 	} else {
1281b411b363SPhilipp Reisner 		while (bm_fo < b->bm_bits) {
128219f843aaSLars Ellenberg 			/* bit offset of the first bit in the page */
12834b0715f0SLars Ellenberg 			bit_offset = bm_fo & ~BITS_PER_PAGE_MASK;
1284cfd8005cSCong Wang 			p_addr = __bm_map_pidx(b, bm_bit_to_page_idx(b, bm_fo));
1285b411b363SPhilipp Reisner 
1286b411b363SPhilipp Reisner 			if (find_zero_bit)
12877e599e6eSLinus Torvalds 				i = find_next_zero_bit_le(p_addr,
12884b0715f0SLars Ellenberg 						PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK);
1289b411b363SPhilipp Reisner 			else
12907e599e6eSLinus Torvalds 				i = find_next_bit_le(p_addr,
12914b0715f0SLars Ellenberg 						PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK);
1292b411b363SPhilipp Reisner 
1293cfd8005cSCong Wang 			__bm_unmap(p_addr);
1294b411b363SPhilipp Reisner 			if (i < PAGE_SIZE*8) {
12954b0715f0SLars Ellenberg 				bm_fo = bit_offset + i;
12964b0715f0SLars Ellenberg 				if (bm_fo >= b->bm_bits)
1297b411b363SPhilipp Reisner 					break;
1298b411b363SPhilipp Reisner 				goto found;
1299b411b363SPhilipp Reisner 			}
1300b411b363SPhilipp Reisner 			bm_fo = bit_offset + PAGE_SIZE*8;
1301b411b363SPhilipp Reisner 		}
13024b0715f0SLars Ellenberg 		bm_fo = DRBD_END_OF_BITMAP;
1303b411b363SPhilipp Reisner 	}
1304b411b363SPhilipp Reisner  found:
13054b0715f0SLars Ellenberg 	return bm_fo;
1306b411b363SPhilipp Reisner }
1307b411b363SPhilipp Reisner 
1308b30ab791SAndreas Gruenbacher static unsigned long bm_find_next(struct drbd_device *device,
1309b411b363SPhilipp Reisner 	unsigned long bm_fo, const int find_zero_bit)
1310b411b363SPhilipp Reisner {
1311b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
13124b0715f0SLars Ellenberg 	unsigned long i = DRBD_END_OF_BITMAP;
1313b411b363SPhilipp Reisner 
1314841ce241SAndreas Gruenbacher 	if (!expect(b))
1315841ce241SAndreas Gruenbacher 		return i;
1316841ce241SAndreas Gruenbacher 	if (!expect(b->bm_pages))
1317841ce241SAndreas Gruenbacher 		return i;
1318b411b363SPhilipp Reisner 
1319b411b363SPhilipp Reisner 	spin_lock_irq(&b->bm_lock);
132020ceb2b2SLars Ellenberg 	if (BM_DONT_TEST & b->bm_flags)
1321b30ab791SAndreas Gruenbacher 		bm_print_lock_info(device);
1322b411b363SPhilipp Reisner 
1323b30ab791SAndreas Gruenbacher 	i = __bm_find_next(device, bm_fo, find_zero_bit);
1324b411b363SPhilipp Reisner 
1325b411b363SPhilipp Reisner 	spin_unlock_irq(&b->bm_lock);
1326b411b363SPhilipp Reisner 	return i;
1327b411b363SPhilipp Reisner }
1328b411b363SPhilipp Reisner 
1329b30ab791SAndreas Gruenbacher unsigned long drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo)
1330b411b363SPhilipp Reisner {
1331b30ab791SAndreas Gruenbacher 	return bm_find_next(device, bm_fo, 0);
1332b411b363SPhilipp Reisner }
1333b411b363SPhilipp Reisner 
1334b411b363SPhilipp Reisner #if 0
1335b411b363SPhilipp Reisner /* not yet needed for anything. */
1336b30ab791SAndreas Gruenbacher unsigned long drbd_bm_find_next_zero(struct drbd_device *device, unsigned long bm_fo)
1337b411b363SPhilipp Reisner {
1338b30ab791SAndreas Gruenbacher 	return bm_find_next(device, bm_fo, 1);
1339b411b363SPhilipp Reisner }
1340b411b363SPhilipp Reisner #endif
1341b411b363SPhilipp Reisner 
1342b411b363SPhilipp Reisner /* does not spin_lock_irqsave.
1343b411b363SPhilipp Reisner  * you must take drbd_bm_lock() first */
1344b30ab791SAndreas Gruenbacher unsigned long _drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo)
1345b411b363SPhilipp Reisner {
1346b30ab791SAndreas Gruenbacher 	/* WARN_ON(!(BM_DONT_SET & device->b->bm_flags)); */
1347b30ab791SAndreas Gruenbacher 	return __bm_find_next(device, bm_fo, 0);
1348b411b363SPhilipp Reisner }
1349b411b363SPhilipp Reisner 
1350b30ab791SAndreas Gruenbacher unsigned long _drbd_bm_find_next_zero(struct drbd_device *device, unsigned long bm_fo)
1351b411b363SPhilipp Reisner {
1352b30ab791SAndreas Gruenbacher 	/* WARN_ON(!(BM_DONT_SET & device->b->bm_flags)); */
1353b30ab791SAndreas Gruenbacher 	return __bm_find_next(device, bm_fo, 1);
1354b411b363SPhilipp Reisner }
1355b411b363SPhilipp Reisner 
1356b411b363SPhilipp Reisner /* returns number of bits actually changed.
1357b411b363SPhilipp Reisner  * for val != 0, we change 0 -> 1, return code positive
1358b411b363SPhilipp Reisner  * for val == 0, we change 1 -> 0, return code negative
1359b411b363SPhilipp Reisner  * wants bitnr, not sector.
1360b411b363SPhilipp Reisner  * expected to be called for only a few bits (e - s about BITS_PER_LONG).
1361b411b363SPhilipp Reisner  * Must hold bitmap lock already. */
1362b30ab791SAndreas Gruenbacher static int __bm_change_bits_to(struct drbd_device *device, const unsigned long s,
1363829c6087SLars Ellenberg 	unsigned long e, int val)
1364b411b363SPhilipp Reisner {
1365b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
1366b411b363SPhilipp Reisner 	unsigned long *p_addr = NULL;
1367b411b363SPhilipp Reisner 	unsigned long bitnr;
136819f843aaSLars Ellenberg 	unsigned int last_page_nr = -1U;
1369b411b363SPhilipp Reisner 	int c = 0;
137019f843aaSLars Ellenberg 	int changed_total = 0;
1371b411b363SPhilipp Reisner 
1372b411b363SPhilipp Reisner 	if (e >= b->bm_bits) {
1373d0180171SAndreas Gruenbacher 		drbd_err(device, "ASSERT FAILED: bit_s=%lu bit_e=%lu bm_bits=%lu\n",
1374b411b363SPhilipp Reisner 				s, e, b->bm_bits);
1375b411b363SPhilipp Reisner 		e = b->bm_bits ? b->bm_bits -1 : 0;
1376b411b363SPhilipp Reisner 	}
1377b411b363SPhilipp Reisner 	for (bitnr = s; bitnr <= e; bitnr++) {
137819f843aaSLars Ellenberg 		unsigned int page_nr = bm_bit_to_page_idx(b, bitnr);
1379b411b363SPhilipp Reisner 		if (page_nr != last_page_nr) {
1380b411b363SPhilipp Reisner 			if (p_addr)
1381cfd8005cSCong Wang 				__bm_unmap(p_addr);
138219f843aaSLars Ellenberg 			if (c < 0)
138319f843aaSLars Ellenberg 				bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]);
138419f843aaSLars Ellenberg 			else if (c > 0)
138519f843aaSLars Ellenberg 				bm_set_page_need_writeout(b->bm_pages[last_page_nr]);
138619f843aaSLars Ellenberg 			changed_total += c;
138719f843aaSLars Ellenberg 			c = 0;
1388cfd8005cSCong Wang 			p_addr = __bm_map_pidx(b, page_nr);
1389b411b363SPhilipp Reisner 			last_page_nr = page_nr;
1390b411b363SPhilipp Reisner 		}
1391b411b363SPhilipp Reisner 		if (val)
13927e599e6eSLinus Torvalds 			c += (0 == __test_and_set_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr));
1393b411b363SPhilipp Reisner 		else
13947e599e6eSLinus Torvalds 			c -= (0 != __test_and_clear_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr));
1395b411b363SPhilipp Reisner 	}
1396b411b363SPhilipp Reisner 	if (p_addr)
1397cfd8005cSCong Wang 		__bm_unmap(p_addr);
139819f843aaSLars Ellenberg 	if (c < 0)
139919f843aaSLars Ellenberg 		bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]);
140019f843aaSLars Ellenberg 	else if (c > 0)
140119f843aaSLars Ellenberg 		bm_set_page_need_writeout(b->bm_pages[last_page_nr]);
140219f843aaSLars Ellenberg 	changed_total += c;
140319f843aaSLars Ellenberg 	b->bm_set += changed_total;
140419f843aaSLars Ellenberg 	return changed_total;
1405b411b363SPhilipp Reisner }
1406b411b363SPhilipp Reisner 
1407b411b363SPhilipp Reisner /* returns number of bits actually changed.
1408b411b363SPhilipp Reisner  * for val != 0, we change 0 -> 1, return code positive
1409b411b363SPhilipp Reisner  * for val == 0, we change 1 -> 0, return code negative
1410b411b363SPhilipp Reisner  * wants bitnr, not sector */
1411b30ab791SAndreas Gruenbacher static int bm_change_bits_to(struct drbd_device *device, const unsigned long s,
1412b411b363SPhilipp Reisner 	const unsigned long e, int val)
1413b411b363SPhilipp Reisner {
1414b411b363SPhilipp Reisner 	unsigned long flags;
1415b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
1416b411b363SPhilipp Reisner 	int c = 0;
1417b411b363SPhilipp Reisner 
1418841ce241SAndreas Gruenbacher 	if (!expect(b))
1419841ce241SAndreas Gruenbacher 		return 1;
1420841ce241SAndreas Gruenbacher 	if (!expect(b->bm_pages))
1421841ce241SAndreas Gruenbacher 		return 0;
1422b411b363SPhilipp Reisner 
1423b411b363SPhilipp Reisner 	spin_lock_irqsave(&b->bm_lock, flags);
142420ceb2b2SLars Ellenberg 	if ((val ? BM_DONT_SET : BM_DONT_CLEAR) & b->bm_flags)
1425b30ab791SAndreas Gruenbacher 		bm_print_lock_info(device);
1426b411b363SPhilipp Reisner 
1427b30ab791SAndreas Gruenbacher 	c = __bm_change_bits_to(device, s, e, val);
1428b411b363SPhilipp Reisner 
1429b411b363SPhilipp Reisner 	spin_unlock_irqrestore(&b->bm_lock, flags);
1430b411b363SPhilipp Reisner 	return c;
1431b411b363SPhilipp Reisner }
1432b411b363SPhilipp Reisner 
1433b411b363SPhilipp Reisner /* returns number of bits changed 0 -> 1 */
1434b30ab791SAndreas Gruenbacher int drbd_bm_set_bits(struct drbd_device *device, const unsigned long s, const unsigned long e)
1435b411b363SPhilipp Reisner {
1436b30ab791SAndreas Gruenbacher 	return bm_change_bits_to(device, s, e, 1);
1437b411b363SPhilipp Reisner }
1438b411b363SPhilipp Reisner 
1439b411b363SPhilipp Reisner /* returns number of bits changed 1 -> 0 */
1440b30ab791SAndreas Gruenbacher int drbd_bm_clear_bits(struct drbd_device *device, const unsigned long s, const unsigned long e)
1441b411b363SPhilipp Reisner {
1442b30ab791SAndreas Gruenbacher 	return -bm_change_bits_to(device, s, e, 0);
1443b411b363SPhilipp Reisner }
1444b411b363SPhilipp Reisner 
1445b411b363SPhilipp Reisner /* sets all bits in full words,
1446b411b363SPhilipp Reisner  * from first_word up to, but not including, last_word */
1447b411b363SPhilipp Reisner static inline void bm_set_full_words_within_one_page(struct drbd_bitmap *b,
1448b411b363SPhilipp Reisner 		int page_nr, int first_word, int last_word)
1449b411b363SPhilipp Reisner {
1450b411b363SPhilipp Reisner 	int i;
1451b411b363SPhilipp Reisner 	int bits;
145222d81140SLars Ellenberg 	int changed = 0;
1453cfd8005cSCong Wang 	unsigned long *paddr = kmap_atomic(b->bm_pages[page_nr]);
14545fb3bc4dSLars Ellenberg 
14555fb3bc4dSLars Ellenberg 	/* I think it is more cache line friendly to hweight_long then set to ~0UL,
14565fb3bc4dSLars Ellenberg 	 * than to first bitmap_weight() all words, then bitmap_fill() all words */
1457b411b363SPhilipp Reisner 	for (i = first_word; i < last_word; i++) {
1458b411b363SPhilipp Reisner 		bits = hweight_long(paddr[i]);
1459b411b363SPhilipp Reisner 		paddr[i] = ~0UL;
146022d81140SLars Ellenberg 		changed += BITS_PER_LONG - bits;
1461b411b363SPhilipp Reisner 	}
1462cfd8005cSCong Wang 	kunmap_atomic(paddr);
146322d81140SLars Ellenberg 	if (changed) {
146422d81140SLars Ellenberg 		/* We only need lazy writeout, the information is still in the
146522d81140SLars Ellenberg 		 * remote bitmap as well, and is reconstructed during the next
146622d81140SLars Ellenberg 		 * bitmap exchange, if lost locally due to a crash. */
146722d81140SLars Ellenberg 		bm_set_page_lazy_writeout(b->bm_pages[page_nr]);
146822d81140SLars Ellenberg 		b->bm_set += changed;
146922d81140SLars Ellenberg 	}
1470b411b363SPhilipp Reisner }
1471b411b363SPhilipp Reisner 
1472829c6087SLars Ellenberg /* Same thing as drbd_bm_set_bits,
1473829c6087SLars Ellenberg  * but more efficient for a large bit range.
1474b411b363SPhilipp Reisner  * You must first drbd_bm_lock().
1475b411b363SPhilipp Reisner  * Can be called to set the whole bitmap in one go.
1476b411b363SPhilipp Reisner  * Sets bits from s to e _inclusive_. */
1477b30ab791SAndreas Gruenbacher void _drbd_bm_set_bits(struct drbd_device *device, const unsigned long s, const unsigned long e)
1478b411b363SPhilipp Reisner {
1479b411b363SPhilipp Reisner 	/* First set_bit from the first bit (s)
1480b411b363SPhilipp Reisner 	 * up to the next long boundary (sl),
1481b411b363SPhilipp Reisner 	 * then assign full words up to the last long boundary (el),
1482b411b363SPhilipp Reisner 	 * then set_bit up to and including the last bit (e).
1483b411b363SPhilipp Reisner 	 *
1484b411b363SPhilipp Reisner 	 * Do not use memset, because we must account for changes,
1485b411b363SPhilipp Reisner 	 * so we need to loop over the words with hweight() anyways.
1486b411b363SPhilipp Reisner 	 */
1487b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
1488b411b363SPhilipp Reisner 	unsigned long sl = ALIGN(s,BITS_PER_LONG);
1489b411b363SPhilipp Reisner 	unsigned long el = (e+1) & ~((unsigned long)BITS_PER_LONG-1);
1490b411b363SPhilipp Reisner 	int first_page;
1491b411b363SPhilipp Reisner 	int last_page;
1492b411b363SPhilipp Reisner 	int page_nr;
1493b411b363SPhilipp Reisner 	int first_word;
1494b411b363SPhilipp Reisner 	int last_word;
1495b411b363SPhilipp Reisner 
1496b411b363SPhilipp Reisner 	if (e - s <= 3*BITS_PER_LONG) {
1497b411b363SPhilipp Reisner 		/* don't bother; el and sl may even be wrong. */
1498829c6087SLars Ellenberg 		spin_lock_irq(&b->bm_lock);
1499b30ab791SAndreas Gruenbacher 		__bm_change_bits_to(device, s, e, 1);
1500829c6087SLars Ellenberg 		spin_unlock_irq(&b->bm_lock);
1501b411b363SPhilipp Reisner 		return;
1502b411b363SPhilipp Reisner 	}
1503b411b363SPhilipp Reisner 
1504b411b363SPhilipp Reisner 	/* difference is large enough that we can trust sl and el */
1505b411b363SPhilipp Reisner 
1506829c6087SLars Ellenberg 	spin_lock_irq(&b->bm_lock);
1507829c6087SLars Ellenberg 
1508b411b363SPhilipp Reisner 	/* bits filling the current long */
1509b411b363SPhilipp Reisner 	if (sl)
1510b30ab791SAndreas Gruenbacher 		__bm_change_bits_to(device, s, sl-1, 1);
1511b411b363SPhilipp Reisner 
1512b411b363SPhilipp Reisner 	first_page = sl >> (3 + PAGE_SHIFT);
1513b411b363SPhilipp Reisner 	last_page = el >> (3 + PAGE_SHIFT);
1514b411b363SPhilipp Reisner 
1515b411b363SPhilipp Reisner 	/* MLPP: modulo longs per page */
1516b411b363SPhilipp Reisner 	/* LWPP: long words per page */
1517b411b363SPhilipp Reisner 	first_word = MLPP(sl >> LN2_BPL);
1518b411b363SPhilipp Reisner 	last_word = LWPP;
1519b411b363SPhilipp Reisner 
1520b411b363SPhilipp Reisner 	/* first and full pages, unless first page == last page */
1521b411b363SPhilipp Reisner 	for (page_nr = first_page; page_nr < last_page; page_nr++) {
1522b30ab791SAndreas Gruenbacher 		bm_set_full_words_within_one_page(device->bitmap, page_nr, first_word, last_word);
15238ccee20eSLars Ellenberg 		spin_unlock_irq(&b->bm_lock);
15248ccee20eSLars Ellenberg 		cond_resched();
1525b411b363SPhilipp Reisner 		first_word = 0;
15268ccee20eSLars Ellenberg 		spin_lock_irq(&b->bm_lock);
1527b411b363SPhilipp Reisner 	}
1528b411b363SPhilipp Reisner 	/* last page (respectively only page, for first page == last page) */
1529b411b363SPhilipp Reisner 	last_word = MLPP(el >> LN2_BPL);
15304eccc579SLars Ellenberg 
15314eccc579SLars Ellenberg 	/* consider bitmap->bm_bits = 32768, bitmap->bm_number_of_pages = 1. (or multiples).
15324eccc579SLars Ellenberg 	 * ==> e = 32767, el = 32768, last_page = 2,
15334eccc579SLars Ellenberg 	 * and now last_word = 0.
15344eccc579SLars Ellenberg 	 * We do not want to touch last_page in this case,
15354eccc579SLars Ellenberg 	 * as we did not allocate it, it is not present in bitmap->bm_pages.
15364eccc579SLars Ellenberg 	 */
15374eccc579SLars Ellenberg 	if (last_word)
1538b30ab791SAndreas Gruenbacher 		bm_set_full_words_within_one_page(device->bitmap, last_page, first_word, last_word);
1539b411b363SPhilipp Reisner 
1540b411b363SPhilipp Reisner 	/* possibly trailing bits.
1541b411b363SPhilipp Reisner 	 * example: (e & 63) == 63, el will be e+1.
1542b411b363SPhilipp Reisner 	 * if that even was the very last bit,
1543b411b363SPhilipp Reisner 	 * it would trigger an assert in __bm_change_bits_to()
1544b411b363SPhilipp Reisner 	 */
1545b411b363SPhilipp Reisner 	if (el <= e)
1546b30ab791SAndreas Gruenbacher 		__bm_change_bits_to(device, el, e, 1);
1547829c6087SLars Ellenberg 	spin_unlock_irq(&b->bm_lock);
1548b411b363SPhilipp Reisner }
1549b411b363SPhilipp Reisner 
1550b411b363SPhilipp Reisner /* returns bit state
1551b411b363SPhilipp Reisner  * wants bitnr, NOT sector.
1552b411b363SPhilipp Reisner  * inherently racy... area needs to be locked by means of {al,rs}_lru
1553b411b363SPhilipp Reisner  *  1 ... bit set
1554b411b363SPhilipp Reisner  *  0 ... bit not set
1555b411b363SPhilipp Reisner  * -1 ... first out of bounds access, stop testing for bits!
1556b411b363SPhilipp Reisner  */
1557b30ab791SAndreas Gruenbacher int drbd_bm_test_bit(struct drbd_device *device, const unsigned long bitnr)
1558b411b363SPhilipp Reisner {
1559b411b363SPhilipp Reisner 	unsigned long flags;
1560b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
1561b411b363SPhilipp Reisner 	unsigned long *p_addr;
1562b411b363SPhilipp Reisner 	int i;
1563b411b363SPhilipp Reisner 
1564841ce241SAndreas Gruenbacher 	if (!expect(b))
1565841ce241SAndreas Gruenbacher 		return 0;
1566841ce241SAndreas Gruenbacher 	if (!expect(b->bm_pages))
1567841ce241SAndreas Gruenbacher 		return 0;
1568b411b363SPhilipp Reisner 
1569b411b363SPhilipp Reisner 	spin_lock_irqsave(&b->bm_lock, flags);
157020ceb2b2SLars Ellenberg 	if (BM_DONT_TEST & b->bm_flags)
1571b30ab791SAndreas Gruenbacher 		bm_print_lock_info(device);
1572b411b363SPhilipp Reisner 	if (bitnr < b->bm_bits) {
157319f843aaSLars Ellenberg 		p_addr = bm_map_pidx(b, bm_bit_to_page_idx(b, bitnr));
15747e599e6eSLinus Torvalds 		i = test_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr) ? 1 : 0;
1575b411b363SPhilipp Reisner 		bm_unmap(p_addr);
1576b411b363SPhilipp Reisner 	} else if (bitnr == b->bm_bits) {
1577b411b363SPhilipp Reisner 		i = -1;
1578b411b363SPhilipp Reisner 	} else { /* (bitnr > b->bm_bits) */
1579d0180171SAndreas Gruenbacher 		drbd_err(device, "bitnr=%lu > bm_bits=%lu\n", bitnr, b->bm_bits);
1580b411b363SPhilipp Reisner 		i = 0;
1581b411b363SPhilipp Reisner 	}
1582b411b363SPhilipp Reisner 
1583b411b363SPhilipp Reisner 	spin_unlock_irqrestore(&b->bm_lock, flags);
1584b411b363SPhilipp Reisner 	return i;
1585b411b363SPhilipp Reisner }
1586b411b363SPhilipp Reisner 
1587b411b363SPhilipp Reisner /* returns number of bits set in the range [s, e] */
1588b30ab791SAndreas Gruenbacher int drbd_bm_count_bits(struct drbd_device *device, const unsigned long s, const unsigned long e)
1589b411b363SPhilipp Reisner {
1590b411b363SPhilipp Reisner 	unsigned long flags;
1591b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
159219f843aaSLars Ellenberg 	unsigned long *p_addr = NULL;
1593b411b363SPhilipp Reisner 	unsigned long bitnr;
159419f843aaSLars Ellenberg 	unsigned int page_nr = -1U;
1595b411b363SPhilipp Reisner 	int c = 0;
1596b411b363SPhilipp Reisner 
1597b411b363SPhilipp Reisner 	/* If this is called without a bitmap, that is a bug.  But just to be
1598b411b363SPhilipp Reisner 	 * robust in case we screwed up elsewhere, in that case pretend there
1599b411b363SPhilipp Reisner 	 * was one dirty bit in the requested area, so we won't try to do a
1600b411b363SPhilipp Reisner 	 * local read there (no bitmap probably implies no disk) */
1601841ce241SAndreas Gruenbacher 	if (!expect(b))
1602841ce241SAndreas Gruenbacher 		return 1;
1603841ce241SAndreas Gruenbacher 	if (!expect(b->bm_pages))
1604841ce241SAndreas Gruenbacher 		return 1;
1605b411b363SPhilipp Reisner 
1606b411b363SPhilipp Reisner 	spin_lock_irqsave(&b->bm_lock, flags);
160720ceb2b2SLars Ellenberg 	if (BM_DONT_TEST & b->bm_flags)
1608b30ab791SAndreas Gruenbacher 		bm_print_lock_info(device);
1609b411b363SPhilipp Reisner 	for (bitnr = s; bitnr <= e; bitnr++) {
161019f843aaSLars Ellenberg 		unsigned int idx = bm_bit_to_page_idx(b, bitnr);
161119f843aaSLars Ellenberg 		if (page_nr != idx) {
161219f843aaSLars Ellenberg 			page_nr = idx;
1613b411b363SPhilipp Reisner 			if (p_addr)
1614b411b363SPhilipp Reisner 				bm_unmap(p_addr);
161519f843aaSLars Ellenberg 			p_addr = bm_map_pidx(b, idx);
1616b411b363SPhilipp Reisner 		}
1617841ce241SAndreas Gruenbacher 		if (expect(bitnr < b->bm_bits))
16187e599e6eSLinus Torvalds 			c += (0 != test_bit_le(bitnr - (page_nr << (PAGE_SHIFT+3)), p_addr));
1619841ce241SAndreas Gruenbacher 		else
1620d0180171SAndreas Gruenbacher 			drbd_err(device, "bitnr=%lu bm_bits=%lu\n", bitnr, b->bm_bits);
1621b411b363SPhilipp Reisner 	}
1622b411b363SPhilipp Reisner 	if (p_addr)
1623b411b363SPhilipp Reisner 		bm_unmap(p_addr);
1624b411b363SPhilipp Reisner 	spin_unlock_irqrestore(&b->bm_lock, flags);
1625b411b363SPhilipp Reisner 	return c;
1626b411b363SPhilipp Reisner }
1627b411b363SPhilipp Reisner 
1628b411b363SPhilipp Reisner 
1629b411b363SPhilipp Reisner /* inherently racy...
1630b411b363SPhilipp Reisner  * return value may be already out-of-date when this function returns.
1631b411b363SPhilipp Reisner  * but the general usage is that this is only use during a cstate when bits are
1632b411b363SPhilipp Reisner  * only cleared, not set, and typically only care for the case when the return
1633b411b363SPhilipp Reisner  * value is zero, or we already "locked" this "bitmap extent" by other means.
1634b411b363SPhilipp Reisner  *
1635b411b363SPhilipp Reisner  * enr is bm-extent number, since we chose to name one sector (512 bytes)
1636b411b363SPhilipp Reisner  * worth of the bitmap a "bitmap extent".
1637b411b363SPhilipp Reisner  *
1638b411b363SPhilipp Reisner  * TODO
1639b411b363SPhilipp Reisner  * I think since we use it like a reference count, we should use the real
1640b411b363SPhilipp Reisner  * reference count of some bitmap extent element from some lru instead...
1641b411b363SPhilipp Reisner  *
1642b411b363SPhilipp Reisner  */
1643b30ab791SAndreas Gruenbacher int drbd_bm_e_weight(struct drbd_device *device, unsigned long enr)
1644b411b363SPhilipp Reisner {
1645b30ab791SAndreas Gruenbacher 	struct drbd_bitmap *b = device->bitmap;
1646b411b363SPhilipp Reisner 	int count, s, e;
1647b411b363SPhilipp Reisner 	unsigned long flags;
1648b411b363SPhilipp Reisner 	unsigned long *p_addr, *bm;
1649b411b363SPhilipp Reisner 
1650841ce241SAndreas Gruenbacher 	if (!expect(b))
1651841ce241SAndreas Gruenbacher 		return 0;
1652841ce241SAndreas Gruenbacher 	if (!expect(b->bm_pages))
1653841ce241SAndreas Gruenbacher 		return 0;
1654b411b363SPhilipp Reisner 
1655b411b363SPhilipp Reisner 	spin_lock_irqsave(&b->bm_lock, flags);
165620ceb2b2SLars Ellenberg 	if (BM_DONT_TEST & b->bm_flags)
1657b30ab791SAndreas Gruenbacher 		bm_print_lock_info(device);
1658b411b363SPhilipp Reisner 
1659b411b363SPhilipp Reisner 	s = S2W(enr);
1660b411b363SPhilipp Reisner 	e = min((size_t)S2W(enr+1), b->bm_words);
1661b411b363SPhilipp Reisner 	count = 0;
1662b411b363SPhilipp Reisner 	if (s < b->bm_words) {
1663b411b363SPhilipp Reisner 		int n = e-s;
166419f843aaSLars Ellenberg 		p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, s));
1665b411b363SPhilipp Reisner 		bm = p_addr + MLPP(s);
16665fb3bc4dSLars Ellenberg 		count += bitmap_weight(bm, n * BITS_PER_LONG);
1667b411b363SPhilipp Reisner 		bm_unmap(p_addr);
1668b411b363SPhilipp Reisner 	} else {
1669d0180171SAndreas Gruenbacher 		drbd_err(device, "start offset (%d) too large in drbd_bm_e_weight\n", s);
1670b411b363SPhilipp Reisner 	}
1671b411b363SPhilipp Reisner 	spin_unlock_irqrestore(&b->bm_lock, flags);
1672b411b363SPhilipp Reisner 	return count;
1673b411b363SPhilipp Reisner }
1674