xref: /openbmc/linux/drivers/block/drbd/drbd_bitmap.c (revision 22d81140aea85f9ac388fa12768dc502ef00eaae)
1b411b363SPhilipp Reisner /*
2b411b363SPhilipp Reisner    drbd_bitmap.c
3b411b363SPhilipp Reisner 
4b411b363SPhilipp Reisner    This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5b411b363SPhilipp Reisner 
6b411b363SPhilipp Reisner    Copyright (C) 2004-2008, LINBIT Information Technologies GmbH.
7b411b363SPhilipp Reisner    Copyright (C) 2004-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8b411b363SPhilipp Reisner    Copyright (C) 2004-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9b411b363SPhilipp Reisner 
10b411b363SPhilipp Reisner    drbd is free software; you can redistribute it and/or modify
11b411b363SPhilipp Reisner    it under the terms of the GNU General Public License as published by
12b411b363SPhilipp Reisner    the Free Software Foundation; either version 2, or (at your option)
13b411b363SPhilipp Reisner    any later version.
14b411b363SPhilipp Reisner 
15b411b363SPhilipp Reisner    drbd is distributed in the hope that it will be useful,
16b411b363SPhilipp Reisner    but WITHOUT ANY WARRANTY; without even the implied warranty of
17b411b363SPhilipp Reisner    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18b411b363SPhilipp Reisner    GNU General Public License for more details.
19b411b363SPhilipp Reisner 
20b411b363SPhilipp Reisner    You should have received a copy of the GNU General Public License
21b411b363SPhilipp Reisner    along with drbd; see the file COPYING.  If not, write to
22b411b363SPhilipp Reisner    the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23b411b363SPhilipp Reisner  */
24b411b363SPhilipp Reisner 
25b411b363SPhilipp Reisner #include <linux/bitops.h>
26b411b363SPhilipp Reisner #include <linux/vmalloc.h>
27b411b363SPhilipp Reisner #include <linux/string.h>
28b411b363SPhilipp Reisner #include <linux/drbd.h>
295a0e3ad6STejun Heo #include <linux/slab.h>
30b411b363SPhilipp Reisner #include <asm/kmap_types.h>
31f0ff1357SStephen Rothwell 
32b411b363SPhilipp Reisner #include "drbd_int.h"
33b411b363SPhilipp Reisner 
3495a0f10cSLars Ellenberg 
35b411b363SPhilipp Reisner /* OPAQUE outside this file!
36b411b363SPhilipp Reisner  * interface defined in drbd_int.h
37b411b363SPhilipp Reisner 
38b411b363SPhilipp Reisner  * convention:
39b411b363SPhilipp Reisner  * function name drbd_bm_... => used elsewhere, "public".
40b411b363SPhilipp Reisner  * function name      bm_... => internal to implementation, "private".
414b0715f0SLars Ellenberg  */
42b411b363SPhilipp Reisner 
434b0715f0SLars Ellenberg 
444b0715f0SLars Ellenberg /*
454b0715f0SLars Ellenberg  * LIMITATIONS:
464b0715f0SLars Ellenberg  * We want to support >= peta byte of backend storage, while for now still using
474b0715f0SLars Ellenberg  * a granularity of one bit per 4KiB of storage.
484b0715f0SLars Ellenberg  * 1 << 50		bytes backend storage (1 PiB)
494b0715f0SLars Ellenberg  * 1 << (50 - 12)	bits needed
504b0715f0SLars Ellenberg  *	38 --> we need u64 to index and count bits
514b0715f0SLars Ellenberg  * 1 << (38 - 3)	bitmap bytes needed
524b0715f0SLars Ellenberg  *	35 --> we still need u64 to index and count bytes
534b0715f0SLars Ellenberg  *			(that's 32 GiB of bitmap for 1 PiB storage)
544b0715f0SLars Ellenberg  * 1 << (35 - 2)	32bit longs needed
554b0715f0SLars Ellenberg  *	33 --> we'd even need u64 to index and count 32bit long words.
564b0715f0SLars Ellenberg  * 1 << (35 - 3)	64bit longs needed
574b0715f0SLars Ellenberg  *	32 --> we could get away with a 32bit unsigned int to index and count
584b0715f0SLars Ellenberg  *	64bit long words, but I rather stay with unsigned long for now.
594b0715f0SLars Ellenberg  *	We probably should neither count nor point to bytes or long words
604b0715f0SLars Ellenberg  *	directly, but either by bitnumber, or by page index and offset.
614b0715f0SLars Ellenberg  * 1 << (35 - 12)
624b0715f0SLars Ellenberg  *	22 --> we need that much 4KiB pages of bitmap.
634b0715f0SLars Ellenberg  *	1 << (22 + 3) --> on a 64bit arch,
644b0715f0SLars Ellenberg  *	we need 32 MiB to store the array of page pointers.
654b0715f0SLars Ellenberg  *
664b0715f0SLars Ellenberg  * Because I'm lazy, and because the resulting patch was too large, too ugly
674b0715f0SLars Ellenberg  * and still incomplete, on 32bit we still "only" support 16 TiB (minus some),
684b0715f0SLars Ellenberg  * (1 << 32) bits * 4k storage.
694b0715f0SLars Ellenberg  *
704b0715f0SLars Ellenberg 
714b0715f0SLars Ellenberg  * bitmap storage and IO:
724b0715f0SLars Ellenberg  *	Bitmap is stored little endian on disk, and is kept little endian in
734b0715f0SLars Ellenberg  *	core memory. Currently we still hold the full bitmap in core as long
744b0715f0SLars Ellenberg  *	as we are "attached" to a local disk, which at 32 GiB for 1PiB storage
754b0715f0SLars Ellenberg  *	seems excessive.
764b0715f0SLars Ellenberg  *
7724c4830cSBart Van Assche  *	We plan to reduce the amount of in-core bitmap pages by paging them in
784b0715f0SLars Ellenberg  *	and out against their on-disk location as necessary, but need to make
794b0715f0SLars Ellenberg  *	sure we don't cause too much meta data IO, and must not deadlock in
804b0715f0SLars Ellenberg  *	tight memory situations. This needs some more work.
81b411b363SPhilipp Reisner  */
82b411b363SPhilipp Reisner 
83b411b363SPhilipp Reisner /*
84b411b363SPhilipp Reisner  * NOTE
85b411b363SPhilipp Reisner  *  Access to the *bm_pages is protected by bm_lock.
86b411b363SPhilipp Reisner  *  It is safe to read the other members within the lock.
87b411b363SPhilipp Reisner  *
88b411b363SPhilipp Reisner  *  drbd_bm_set_bits is called from bio_endio callbacks,
89b411b363SPhilipp Reisner  *  We may be called with irq already disabled,
90b411b363SPhilipp Reisner  *  so we need spin_lock_irqsave().
91b411b363SPhilipp Reisner  *  And we need the kmap_atomic.
92b411b363SPhilipp Reisner  */
93b411b363SPhilipp Reisner struct drbd_bitmap {
94b411b363SPhilipp Reisner 	struct page **bm_pages;
95b411b363SPhilipp Reisner 	spinlock_t bm_lock;
964b0715f0SLars Ellenberg 
974b0715f0SLars Ellenberg 	/* see LIMITATIONS: above */
984b0715f0SLars Ellenberg 
99b411b363SPhilipp Reisner 	unsigned long bm_set;       /* nr of set bits; THINK maybe atomic_t? */
100b411b363SPhilipp Reisner 	unsigned long bm_bits;
101b411b363SPhilipp Reisner 	size_t   bm_words;
102b411b363SPhilipp Reisner 	size_t   bm_number_of_pages;
103b411b363SPhilipp Reisner 	sector_t bm_dev_capacity;
1048a03ae2aSThomas Gleixner 	struct mutex bm_change; /* serializes resize operations */
105b411b363SPhilipp Reisner 
10619f843aaSLars Ellenberg 	wait_queue_head_t bm_io_wait; /* used to serialize IO of single pages */
107b411b363SPhilipp Reisner 
10820ceb2b2SLars Ellenberg 	enum bm_flag bm_flags;
109b411b363SPhilipp Reisner 
110b411b363SPhilipp Reisner 	/* debugging aid, in case we are still racy somewhere */
111b411b363SPhilipp Reisner 	char          *bm_why;
112b411b363SPhilipp Reisner 	struct task_struct *bm_task;
113b411b363SPhilipp Reisner };
114b411b363SPhilipp Reisner 
115b411b363SPhilipp Reisner #define bm_print_lock_info(m) __bm_print_lock_info(m, __func__)
116b411b363SPhilipp Reisner static void __bm_print_lock_info(struct drbd_conf *mdev, const char *func)
117b411b363SPhilipp Reisner {
118b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
119b411b363SPhilipp Reisner 	if (!__ratelimit(&drbd_ratelimit_state))
120b411b363SPhilipp Reisner 		return;
121b411b363SPhilipp Reisner 	dev_err(DEV, "FIXME %s in %s, bitmap locked for '%s' by %s\n",
122392c8801SPhilipp Reisner 		drbd_task_to_thread_name(mdev->tconn, current),
123b411b363SPhilipp Reisner 		func, b->bm_why ?: "?",
124392c8801SPhilipp Reisner 		drbd_task_to_thread_name(mdev->tconn, b->bm_task));
125b411b363SPhilipp Reisner }
126b411b363SPhilipp Reisner 
12720ceb2b2SLars Ellenberg void drbd_bm_lock(struct drbd_conf *mdev, char *why, enum bm_flag flags)
128b411b363SPhilipp Reisner {
129b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
130b411b363SPhilipp Reisner 	int trylock_failed;
131b411b363SPhilipp Reisner 
132b411b363SPhilipp Reisner 	if (!b) {
133b411b363SPhilipp Reisner 		dev_err(DEV, "FIXME no bitmap in drbd_bm_lock!?\n");
134b411b363SPhilipp Reisner 		return;
135b411b363SPhilipp Reisner 	}
136b411b363SPhilipp Reisner 
1378a03ae2aSThomas Gleixner 	trylock_failed = !mutex_trylock(&b->bm_change);
138b411b363SPhilipp Reisner 
139b411b363SPhilipp Reisner 	if (trylock_failed) {
140b411b363SPhilipp Reisner 		dev_warn(DEV, "%s going to '%s' but bitmap already locked for '%s' by %s\n",
141392c8801SPhilipp Reisner 			 drbd_task_to_thread_name(mdev->tconn, current),
142b411b363SPhilipp Reisner 			 why, b->bm_why ?: "?",
143392c8801SPhilipp Reisner 			 drbd_task_to_thread_name(mdev->tconn, b->bm_task));
1448a03ae2aSThomas Gleixner 		mutex_lock(&b->bm_change);
145b411b363SPhilipp Reisner 	}
14620ceb2b2SLars Ellenberg 	if (BM_LOCKED_MASK & b->bm_flags)
147b411b363SPhilipp Reisner 		dev_err(DEV, "FIXME bitmap already locked in bm_lock\n");
14820ceb2b2SLars Ellenberg 	b->bm_flags |= flags & BM_LOCKED_MASK;
149b411b363SPhilipp Reisner 
150b411b363SPhilipp Reisner 	b->bm_why  = why;
151b411b363SPhilipp Reisner 	b->bm_task = current;
152b411b363SPhilipp Reisner }
153b411b363SPhilipp Reisner 
154b411b363SPhilipp Reisner void drbd_bm_unlock(struct drbd_conf *mdev)
155b411b363SPhilipp Reisner {
156b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
157b411b363SPhilipp Reisner 	if (!b) {
158b411b363SPhilipp Reisner 		dev_err(DEV, "FIXME no bitmap in drbd_bm_unlock!?\n");
159b411b363SPhilipp Reisner 		return;
160b411b363SPhilipp Reisner 	}
161b411b363SPhilipp Reisner 
16220ceb2b2SLars Ellenberg 	if (!(BM_LOCKED_MASK & mdev->bitmap->bm_flags))
163b411b363SPhilipp Reisner 		dev_err(DEV, "FIXME bitmap not locked in bm_unlock\n");
164b411b363SPhilipp Reisner 
16520ceb2b2SLars Ellenberg 	b->bm_flags &= ~BM_LOCKED_MASK;
166b411b363SPhilipp Reisner 	b->bm_why  = NULL;
167b411b363SPhilipp Reisner 	b->bm_task = NULL;
1688a03ae2aSThomas Gleixner 	mutex_unlock(&b->bm_change);
169b411b363SPhilipp Reisner }
170b411b363SPhilipp Reisner 
17119f843aaSLars Ellenberg /* we store some "meta" info about our pages in page->private */
17219f843aaSLars Ellenberg /* at a granularity of 4k storage per bitmap bit:
17319f843aaSLars Ellenberg  * one peta byte storage: 1<<50 byte, 1<<38 * 4k storage blocks
17419f843aaSLars Ellenberg  *  1<<38 bits,
17519f843aaSLars Ellenberg  *  1<<23 4k bitmap pages.
17619f843aaSLars Ellenberg  * Use 24 bits as page index, covers 2 peta byte storage
17719f843aaSLars Ellenberg  * at a granularity of 4k per bit.
17819f843aaSLars Ellenberg  * Used to report the failed page idx on io error from the endio handlers.
17919f843aaSLars Ellenberg  */
18019f843aaSLars Ellenberg #define BM_PAGE_IDX_MASK	((1UL<<24)-1)
18119f843aaSLars Ellenberg /* this page is currently read in, or written back */
18219f843aaSLars Ellenberg #define BM_PAGE_IO_LOCK		31
18319f843aaSLars Ellenberg /* if there has been an IO error for this page */
18419f843aaSLars Ellenberg #define BM_PAGE_IO_ERROR	30
18519f843aaSLars Ellenberg /* this is to be able to intelligently skip disk IO,
18619f843aaSLars Ellenberg  * set if bits have been set since last IO. */
18719f843aaSLars Ellenberg #define BM_PAGE_NEED_WRITEOUT	29
18819f843aaSLars Ellenberg /* to mark for lazy writeout once syncer cleared all clearable bits,
18919f843aaSLars Ellenberg  * we if bits have been cleared since last IO. */
19019f843aaSLars Ellenberg #define BM_PAGE_LAZY_WRITEOUT	28
19145dfffebSLars Ellenberg /* pages marked with this "HINT" will be considered for writeout
19245dfffebSLars Ellenberg  * on activity log transactions */
19345dfffebSLars Ellenberg #define BM_PAGE_HINT_WRITEOUT	27
19419f843aaSLars Ellenberg 
19524c4830cSBart Van Assche /* store_page_idx uses non-atomic assignment. It is only used directly after
19619f843aaSLars Ellenberg  * allocating the page.  All other bm_set_page_* and bm_clear_page_* need to
19719f843aaSLars Ellenberg  * use atomic bit manipulation, as set_out_of_sync (and therefore bitmap
19819f843aaSLars Ellenberg  * changes) may happen from various contexts, and wait_on_bit/wake_up_bit
19919f843aaSLars Ellenberg  * requires it all to be atomic as well. */
20019f843aaSLars Ellenberg static void bm_store_page_idx(struct page *page, unsigned long idx)
20119f843aaSLars Ellenberg {
20219f843aaSLars Ellenberg 	BUG_ON(0 != (idx & ~BM_PAGE_IDX_MASK));
20319f843aaSLars Ellenberg 	page_private(page) |= idx;
20419f843aaSLars Ellenberg }
20519f843aaSLars Ellenberg 
20619f843aaSLars Ellenberg static unsigned long bm_page_to_idx(struct page *page)
20719f843aaSLars Ellenberg {
20819f843aaSLars Ellenberg 	return page_private(page) & BM_PAGE_IDX_MASK;
20919f843aaSLars Ellenberg }
21019f843aaSLars Ellenberg 
21119f843aaSLars Ellenberg /* As is very unlikely that the same page is under IO from more than one
21219f843aaSLars Ellenberg  * context, we can get away with a bit per page and one wait queue per bitmap.
21319f843aaSLars Ellenberg  */
21419f843aaSLars Ellenberg static void bm_page_lock_io(struct drbd_conf *mdev, int page_nr)
21519f843aaSLars Ellenberg {
21619f843aaSLars Ellenberg 	struct drbd_bitmap *b = mdev->bitmap;
21719f843aaSLars Ellenberg 	void *addr = &page_private(b->bm_pages[page_nr]);
21819f843aaSLars Ellenberg 	wait_event(b->bm_io_wait, !test_and_set_bit(BM_PAGE_IO_LOCK, addr));
21919f843aaSLars Ellenberg }
22019f843aaSLars Ellenberg 
22119f843aaSLars Ellenberg static void bm_page_unlock_io(struct drbd_conf *mdev, int page_nr)
22219f843aaSLars Ellenberg {
22319f843aaSLars Ellenberg 	struct drbd_bitmap *b = mdev->bitmap;
22419f843aaSLars Ellenberg 	void *addr = &page_private(b->bm_pages[page_nr]);
2254738fa16SLars Ellenberg 	clear_bit_unlock(BM_PAGE_IO_LOCK, addr);
22619f843aaSLars Ellenberg 	wake_up(&mdev->bitmap->bm_io_wait);
22719f843aaSLars Ellenberg }
22819f843aaSLars Ellenberg 
22919f843aaSLars Ellenberg /* set _before_ submit_io, so it may be reset due to being changed
23019f843aaSLars Ellenberg  * while this page is in flight... will get submitted later again */
23119f843aaSLars Ellenberg static void bm_set_page_unchanged(struct page *page)
23219f843aaSLars Ellenberg {
23319f843aaSLars Ellenberg 	/* use cmpxchg? */
23419f843aaSLars Ellenberg 	clear_bit(BM_PAGE_NEED_WRITEOUT, &page_private(page));
23519f843aaSLars Ellenberg 	clear_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page));
23619f843aaSLars Ellenberg }
23719f843aaSLars Ellenberg 
23819f843aaSLars Ellenberg static void bm_set_page_need_writeout(struct page *page)
23919f843aaSLars Ellenberg {
24019f843aaSLars Ellenberg 	set_bit(BM_PAGE_NEED_WRITEOUT, &page_private(page));
24119f843aaSLars Ellenberg }
24219f843aaSLars Ellenberg 
24345dfffebSLars Ellenberg /**
24445dfffebSLars Ellenberg  * drbd_bm_mark_for_writeout() - mark a page with a "hint" to be considered for writeout
24545dfffebSLars Ellenberg  * @mdev:	DRBD device.
24645dfffebSLars Ellenberg  * @page_nr:	the bitmap page to mark with the "hint" flag
24745dfffebSLars Ellenberg  *
24845dfffebSLars Ellenberg  * From within an activity log transaction, we mark a few pages with these
24945dfffebSLars Ellenberg  * hints, then call drbd_bm_write_hinted(), which will only write out changed
25045dfffebSLars Ellenberg  * pages which are flagged with this mark.
25145dfffebSLars Ellenberg  */
25245dfffebSLars Ellenberg void drbd_bm_mark_for_writeout(struct drbd_conf *mdev, int page_nr)
25345dfffebSLars Ellenberg {
25445dfffebSLars Ellenberg 	struct page *page;
25545dfffebSLars Ellenberg 	if (page_nr >= mdev->bitmap->bm_number_of_pages) {
25645dfffebSLars Ellenberg 		dev_warn(DEV, "BAD: page_nr: %u, number_of_pages: %u\n",
25745dfffebSLars Ellenberg 			 page_nr, (int)mdev->bitmap->bm_number_of_pages);
25845dfffebSLars Ellenberg 		return;
25945dfffebSLars Ellenberg 	}
26045dfffebSLars Ellenberg 	page = mdev->bitmap->bm_pages[page_nr];
26145dfffebSLars Ellenberg 	set_bit(BM_PAGE_HINT_WRITEOUT, &page_private(page));
26245dfffebSLars Ellenberg }
26345dfffebSLars Ellenberg 
26419f843aaSLars Ellenberg static int bm_test_page_unchanged(struct page *page)
26519f843aaSLars Ellenberg {
26619f843aaSLars Ellenberg 	volatile const unsigned long *addr = &page_private(page);
26719f843aaSLars Ellenberg 	return (*addr & ((1UL<<BM_PAGE_NEED_WRITEOUT)|(1UL<<BM_PAGE_LAZY_WRITEOUT))) == 0;
26819f843aaSLars Ellenberg }
26919f843aaSLars Ellenberg 
27019f843aaSLars Ellenberg static void bm_set_page_io_err(struct page *page)
27119f843aaSLars Ellenberg {
27219f843aaSLars Ellenberg 	set_bit(BM_PAGE_IO_ERROR, &page_private(page));
27319f843aaSLars Ellenberg }
27419f843aaSLars Ellenberg 
27519f843aaSLars Ellenberg static void bm_clear_page_io_err(struct page *page)
27619f843aaSLars Ellenberg {
27719f843aaSLars Ellenberg 	clear_bit(BM_PAGE_IO_ERROR, &page_private(page));
27819f843aaSLars Ellenberg }
27919f843aaSLars Ellenberg 
28019f843aaSLars Ellenberg static void bm_set_page_lazy_writeout(struct page *page)
28119f843aaSLars Ellenberg {
28219f843aaSLars Ellenberg 	set_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page));
28319f843aaSLars Ellenberg }
28419f843aaSLars Ellenberg 
28519f843aaSLars Ellenberg static int bm_test_page_lazy_writeout(struct page *page)
28619f843aaSLars Ellenberg {
28719f843aaSLars Ellenberg 	return test_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page));
28819f843aaSLars Ellenberg }
28919f843aaSLars Ellenberg 
29019f843aaSLars Ellenberg /* on a 32bit box, this would allow for exactly (2<<38) bits. */
29119f843aaSLars Ellenberg static unsigned int bm_word_to_page_idx(struct drbd_bitmap *b, unsigned long long_nr)
29219f843aaSLars Ellenberg {
29319f843aaSLars Ellenberg 	/* page_nr = (word*sizeof(long)) >> PAGE_SHIFT; */
29419f843aaSLars Ellenberg 	unsigned int page_nr = long_nr >> (PAGE_SHIFT - LN2_BPL + 3);
29519f843aaSLars Ellenberg 	BUG_ON(page_nr >= b->bm_number_of_pages);
29619f843aaSLars Ellenberg 	return page_nr;
29719f843aaSLars Ellenberg }
29819f843aaSLars Ellenberg 
29995a0f10cSLars Ellenberg static unsigned int bm_bit_to_page_idx(struct drbd_bitmap *b, u64 bitnr)
30095a0f10cSLars Ellenberg {
30195a0f10cSLars Ellenberg 	/* page_nr = (bitnr/8) >> PAGE_SHIFT; */
30295a0f10cSLars Ellenberg 	unsigned int page_nr = bitnr >> (PAGE_SHIFT + 3);
30395a0f10cSLars Ellenberg 	BUG_ON(page_nr >= b->bm_number_of_pages);
30495a0f10cSLars Ellenberg 	return page_nr;
30595a0f10cSLars Ellenberg }
30695a0f10cSLars Ellenberg 
30795a0f10cSLars Ellenberg static unsigned long *__bm_map_pidx(struct drbd_bitmap *b, unsigned int idx, const enum km_type km)
30895a0f10cSLars Ellenberg {
30995a0f10cSLars Ellenberg 	struct page *page = b->bm_pages[idx];
31095a0f10cSLars Ellenberg 	return (unsigned long *) kmap_atomic(page, km);
31195a0f10cSLars Ellenberg }
31295a0f10cSLars Ellenberg 
31395a0f10cSLars Ellenberg static unsigned long *bm_map_pidx(struct drbd_bitmap *b, unsigned int idx)
31495a0f10cSLars Ellenberg {
31595a0f10cSLars Ellenberg 	return __bm_map_pidx(b, idx, KM_IRQ1);
31695a0f10cSLars Ellenberg }
31795a0f10cSLars Ellenberg 
318b411b363SPhilipp Reisner static void __bm_unmap(unsigned long *p_addr, const enum km_type km)
319b411b363SPhilipp Reisner {
320b411b363SPhilipp Reisner 	kunmap_atomic(p_addr, km);
321b411b363SPhilipp Reisner };
322b411b363SPhilipp Reisner 
323b411b363SPhilipp Reisner static void bm_unmap(unsigned long *p_addr)
324b411b363SPhilipp Reisner {
325b411b363SPhilipp Reisner 	return __bm_unmap(p_addr, KM_IRQ1);
326b411b363SPhilipp Reisner }
327b411b363SPhilipp Reisner 
328b411b363SPhilipp Reisner /* long word offset of _bitmap_ sector */
329b411b363SPhilipp Reisner #define S2W(s)	((s)<<(BM_EXT_SHIFT-BM_BLOCK_SHIFT-LN2_BPL))
330b411b363SPhilipp Reisner /* word offset from start of bitmap to word number _in_page_
331b411b363SPhilipp Reisner  * modulo longs per page
332b411b363SPhilipp Reisner #define MLPP(X) ((X) % (PAGE_SIZE/sizeof(long))
33324c4830cSBart Van Assche  hm, well, Philipp thinks gcc might not optimize the % into & (... - 1)
334b411b363SPhilipp Reisner  so do it explicitly:
335b411b363SPhilipp Reisner  */
336b411b363SPhilipp Reisner #define MLPP(X) ((X) & ((PAGE_SIZE/sizeof(long))-1))
337b411b363SPhilipp Reisner 
338b411b363SPhilipp Reisner /* Long words per page */
339b411b363SPhilipp Reisner #define LWPP (PAGE_SIZE/sizeof(long))
340b411b363SPhilipp Reisner 
341b411b363SPhilipp Reisner /*
342b411b363SPhilipp Reisner  * actually most functions herein should take a struct drbd_bitmap*, not a
343b411b363SPhilipp Reisner  * struct drbd_conf*, but for the debug macros I like to have the mdev around
344b411b363SPhilipp Reisner  * to be able to report device specific.
345b411b363SPhilipp Reisner  */
346b411b363SPhilipp Reisner 
34719f843aaSLars Ellenberg 
348b411b363SPhilipp Reisner static void bm_free_pages(struct page **pages, unsigned long number)
349b411b363SPhilipp Reisner {
350b411b363SPhilipp Reisner 	unsigned long i;
351b411b363SPhilipp Reisner 	if (!pages)
352b411b363SPhilipp Reisner 		return;
353b411b363SPhilipp Reisner 
354b411b363SPhilipp Reisner 	for (i = 0; i < number; i++) {
355b411b363SPhilipp Reisner 		if (!pages[i]) {
356b411b363SPhilipp Reisner 			printk(KERN_ALERT "drbd: bm_free_pages tried to free "
357b411b363SPhilipp Reisner 					  "a NULL pointer; i=%lu n=%lu\n",
358b411b363SPhilipp Reisner 					  i, number);
359b411b363SPhilipp Reisner 			continue;
360b411b363SPhilipp Reisner 		}
361b411b363SPhilipp Reisner 		__free_page(pages[i]);
362b411b363SPhilipp Reisner 		pages[i] = NULL;
363b411b363SPhilipp Reisner 	}
364b411b363SPhilipp Reisner }
365b411b363SPhilipp Reisner 
366b411b363SPhilipp Reisner static void bm_vk_free(void *ptr, int v)
367b411b363SPhilipp Reisner {
368b411b363SPhilipp Reisner 	if (v)
369b411b363SPhilipp Reisner 		vfree(ptr);
370b411b363SPhilipp Reisner 	else
371b411b363SPhilipp Reisner 		kfree(ptr);
372b411b363SPhilipp Reisner }
373b411b363SPhilipp Reisner 
374b411b363SPhilipp Reisner /*
375b411b363SPhilipp Reisner  * "have" and "want" are NUMBER OF PAGES.
376b411b363SPhilipp Reisner  */
377b411b363SPhilipp Reisner static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
378b411b363SPhilipp Reisner {
379b411b363SPhilipp Reisner 	struct page **old_pages = b->bm_pages;
380b411b363SPhilipp Reisner 	struct page **new_pages, *page;
381b411b363SPhilipp Reisner 	unsigned int i, bytes, vmalloced = 0;
382b411b363SPhilipp Reisner 	unsigned long have = b->bm_number_of_pages;
383b411b363SPhilipp Reisner 
384b411b363SPhilipp Reisner 	BUG_ON(have == 0 && old_pages != NULL);
385b411b363SPhilipp Reisner 	BUG_ON(have != 0 && old_pages == NULL);
386b411b363SPhilipp Reisner 
387b411b363SPhilipp Reisner 	if (have == want)
388b411b363SPhilipp Reisner 		return old_pages;
389b411b363SPhilipp Reisner 
390b411b363SPhilipp Reisner 	/* Trying kmalloc first, falling back to vmalloc.
391b411b363SPhilipp Reisner 	 * GFP_KERNEL is ok, as this is done when a lower level disk is
392a209b4aeSAndreas Gruenbacher 	 * "attached" to the drbd.  Context is receiver thread or drbdsetup /
393a209b4aeSAndreas Gruenbacher 	 * netlink process.  As we have no disk yet, we are not in the IO path,
394b411b363SPhilipp Reisner 	 * not even the IO path of the peer. */
395b411b363SPhilipp Reisner 	bytes = sizeof(struct page *)*want;
396b411b363SPhilipp Reisner 	new_pages = kmalloc(bytes, GFP_KERNEL);
397b411b363SPhilipp Reisner 	if (!new_pages) {
398b411b363SPhilipp Reisner 		new_pages = vmalloc(bytes);
399b411b363SPhilipp Reisner 		if (!new_pages)
400b411b363SPhilipp Reisner 			return NULL;
401b411b363SPhilipp Reisner 		vmalloced = 1;
402b411b363SPhilipp Reisner 	}
403b411b363SPhilipp Reisner 
404b411b363SPhilipp Reisner 	memset(new_pages, 0, bytes);
405b411b363SPhilipp Reisner 	if (want >= have) {
406b411b363SPhilipp Reisner 		for (i = 0; i < have; i++)
407b411b363SPhilipp Reisner 			new_pages[i] = old_pages[i];
408b411b363SPhilipp Reisner 		for (; i < want; i++) {
409b411b363SPhilipp Reisner 			page = alloc_page(GFP_HIGHUSER);
410b411b363SPhilipp Reisner 			if (!page) {
411b411b363SPhilipp Reisner 				bm_free_pages(new_pages + have, i - have);
412b411b363SPhilipp Reisner 				bm_vk_free(new_pages, vmalloced);
413b411b363SPhilipp Reisner 				return NULL;
414b411b363SPhilipp Reisner 			}
41519f843aaSLars Ellenberg 			/* we want to know which page it is
41619f843aaSLars Ellenberg 			 * from the endio handlers */
41719f843aaSLars Ellenberg 			bm_store_page_idx(page, i);
418b411b363SPhilipp Reisner 			new_pages[i] = page;
419b411b363SPhilipp Reisner 		}
420b411b363SPhilipp Reisner 	} else {
421b411b363SPhilipp Reisner 		for (i = 0; i < want; i++)
422b411b363SPhilipp Reisner 			new_pages[i] = old_pages[i];
423b411b363SPhilipp Reisner 		/* NOT HERE, we are outside the spinlock!
424b411b363SPhilipp Reisner 		bm_free_pages(old_pages + want, have - want);
425b411b363SPhilipp Reisner 		*/
426b411b363SPhilipp Reisner 	}
427b411b363SPhilipp Reisner 
428b411b363SPhilipp Reisner 	if (vmalloced)
42920ceb2b2SLars Ellenberg 		b->bm_flags |= BM_P_VMALLOCED;
430b411b363SPhilipp Reisner 	else
43120ceb2b2SLars Ellenberg 		b->bm_flags &= ~BM_P_VMALLOCED;
432b411b363SPhilipp Reisner 
433b411b363SPhilipp Reisner 	return new_pages;
434b411b363SPhilipp Reisner }
435b411b363SPhilipp Reisner 
436b411b363SPhilipp Reisner /*
437b411b363SPhilipp Reisner  * called on driver init only. TODO call when a device is created.
438b411b363SPhilipp Reisner  * allocates the drbd_bitmap, and stores it in mdev->bitmap.
439b411b363SPhilipp Reisner  */
440b411b363SPhilipp Reisner int drbd_bm_init(struct drbd_conf *mdev)
441b411b363SPhilipp Reisner {
442b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
443b411b363SPhilipp Reisner 	WARN_ON(b != NULL);
444b411b363SPhilipp Reisner 	b = kzalloc(sizeof(struct drbd_bitmap), GFP_KERNEL);
445b411b363SPhilipp Reisner 	if (!b)
446b411b363SPhilipp Reisner 		return -ENOMEM;
447b411b363SPhilipp Reisner 	spin_lock_init(&b->bm_lock);
4488a03ae2aSThomas Gleixner 	mutex_init(&b->bm_change);
449b411b363SPhilipp Reisner 	init_waitqueue_head(&b->bm_io_wait);
450b411b363SPhilipp Reisner 
451b411b363SPhilipp Reisner 	mdev->bitmap = b;
452b411b363SPhilipp Reisner 
453b411b363SPhilipp Reisner 	return 0;
454b411b363SPhilipp Reisner }
455b411b363SPhilipp Reisner 
456b411b363SPhilipp Reisner sector_t drbd_bm_capacity(struct drbd_conf *mdev)
457b411b363SPhilipp Reisner {
458841ce241SAndreas Gruenbacher 	if (!expect(mdev->bitmap))
459841ce241SAndreas Gruenbacher 		return 0;
460b411b363SPhilipp Reisner 	return mdev->bitmap->bm_dev_capacity;
461b411b363SPhilipp Reisner }
462b411b363SPhilipp Reisner 
463b411b363SPhilipp Reisner /* called on driver unload. TODO: call when a device is destroyed.
464b411b363SPhilipp Reisner  */
465b411b363SPhilipp Reisner void drbd_bm_cleanup(struct drbd_conf *mdev)
466b411b363SPhilipp Reisner {
467841ce241SAndreas Gruenbacher 	if (!expect(mdev->bitmap))
468841ce241SAndreas Gruenbacher 		return;
469b411b363SPhilipp Reisner 	bm_free_pages(mdev->bitmap->bm_pages, mdev->bitmap->bm_number_of_pages);
47020ceb2b2SLars Ellenberg 	bm_vk_free(mdev->bitmap->bm_pages, (BM_P_VMALLOCED & mdev->bitmap->bm_flags));
471b411b363SPhilipp Reisner 	kfree(mdev->bitmap);
472b411b363SPhilipp Reisner 	mdev->bitmap = NULL;
473b411b363SPhilipp Reisner }
474b411b363SPhilipp Reisner 
475b411b363SPhilipp Reisner /*
476b411b363SPhilipp Reisner  * since (b->bm_bits % BITS_PER_LONG) != 0,
477b411b363SPhilipp Reisner  * this masks out the remaining bits.
478b411b363SPhilipp Reisner  * Returns the number of bits cleared.
479b411b363SPhilipp Reisner  */
48095a0f10cSLars Ellenberg #define BITS_PER_PAGE		(1UL << (PAGE_SHIFT + 3))
48195a0f10cSLars Ellenberg #define BITS_PER_PAGE_MASK	(BITS_PER_PAGE - 1)
48295a0f10cSLars Ellenberg #define BITS_PER_LONG_MASK	(BITS_PER_LONG - 1)
483b411b363SPhilipp Reisner static int bm_clear_surplus(struct drbd_bitmap *b)
484b411b363SPhilipp Reisner {
48595a0f10cSLars Ellenberg 	unsigned long mask;
486b411b363SPhilipp Reisner 	unsigned long *p_addr, *bm;
48795a0f10cSLars Ellenberg 	int tmp;
48895a0f10cSLars Ellenberg 	int cleared = 0;
489b411b363SPhilipp Reisner 
49095a0f10cSLars Ellenberg 	/* number of bits modulo bits per page */
49195a0f10cSLars Ellenberg 	tmp = (b->bm_bits & BITS_PER_PAGE_MASK);
49295a0f10cSLars Ellenberg 	/* mask the used bits of the word containing the last bit */
49395a0f10cSLars Ellenberg 	mask = (1UL << (tmp & BITS_PER_LONG_MASK)) -1;
49495a0f10cSLars Ellenberg 	/* bitmap is always stored little endian,
49595a0f10cSLars Ellenberg 	 * on disk and in core memory alike */
49695a0f10cSLars Ellenberg 	mask = cpu_to_lel(mask);
49795a0f10cSLars Ellenberg 
4986850c442SLars Ellenberg 	p_addr = bm_map_pidx(b, b->bm_number_of_pages - 1);
49995a0f10cSLars Ellenberg 	bm = p_addr + (tmp/BITS_PER_LONG);
50095a0f10cSLars Ellenberg 	if (mask) {
50195a0f10cSLars Ellenberg 		/* If mask != 0, we are not exactly aligned, so bm now points
50295a0f10cSLars Ellenberg 		 * to the long containing the last bit.
50395a0f10cSLars Ellenberg 		 * If mask == 0, bm already points to the word immediately
50495a0f10cSLars Ellenberg 		 * after the last (long word aligned) bit. */
505b411b363SPhilipp Reisner 		cleared = hweight_long(*bm & ~mask);
506b411b363SPhilipp Reisner 		*bm &= mask;
50795a0f10cSLars Ellenberg 		bm++;
508b411b363SPhilipp Reisner 	}
509b411b363SPhilipp Reisner 
51095a0f10cSLars Ellenberg 	if (BITS_PER_LONG == 32 && ((bm - p_addr) & 1) == 1) {
51195a0f10cSLars Ellenberg 		/* on a 32bit arch, we may need to zero out
51295a0f10cSLars Ellenberg 		 * a padding long to align with a 64bit remote */
513b411b363SPhilipp Reisner 		cleared += hweight_long(*bm);
514b411b363SPhilipp Reisner 		*bm = 0;
515b411b363SPhilipp Reisner 	}
516b411b363SPhilipp Reisner 	bm_unmap(p_addr);
517b411b363SPhilipp Reisner 	return cleared;
518b411b363SPhilipp Reisner }
519b411b363SPhilipp Reisner 
520b411b363SPhilipp Reisner static void bm_set_surplus(struct drbd_bitmap *b)
521b411b363SPhilipp Reisner {
52295a0f10cSLars Ellenberg 	unsigned long mask;
523b411b363SPhilipp Reisner 	unsigned long *p_addr, *bm;
52495a0f10cSLars Ellenberg 	int tmp;
525b411b363SPhilipp Reisner 
52695a0f10cSLars Ellenberg 	/* number of bits modulo bits per page */
52795a0f10cSLars Ellenberg 	tmp = (b->bm_bits & BITS_PER_PAGE_MASK);
52895a0f10cSLars Ellenberg 	/* mask the used bits of the word containing the last bit */
52995a0f10cSLars Ellenberg 	mask = (1UL << (tmp & BITS_PER_LONG_MASK)) -1;
53095a0f10cSLars Ellenberg 	/* bitmap is always stored little endian,
53195a0f10cSLars Ellenberg 	 * on disk and in core memory alike */
53295a0f10cSLars Ellenberg 	mask = cpu_to_lel(mask);
53395a0f10cSLars Ellenberg 
5346850c442SLars Ellenberg 	p_addr = bm_map_pidx(b, b->bm_number_of_pages - 1);
53595a0f10cSLars Ellenberg 	bm = p_addr + (tmp/BITS_PER_LONG);
53695a0f10cSLars Ellenberg 	if (mask) {
53795a0f10cSLars Ellenberg 		/* If mask != 0, we are not exactly aligned, so bm now points
53895a0f10cSLars Ellenberg 		 * to the long containing the last bit.
53995a0f10cSLars Ellenberg 		 * If mask == 0, bm already points to the word immediately
54095a0f10cSLars Ellenberg 		 * after the last (long word aligned) bit. */
541b411b363SPhilipp Reisner 		*bm |= ~mask;
54295a0f10cSLars Ellenberg 		bm++;
543b411b363SPhilipp Reisner 	}
544b411b363SPhilipp Reisner 
54595a0f10cSLars Ellenberg 	if (BITS_PER_LONG == 32 && ((bm - p_addr) & 1) == 1) {
54695a0f10cSLars Ellenberg 		/* on a 32bit arch, we may need to zero out
54795a0f10cSLars Ellenberg 		 * a padding long to align with a 64bit remote */
54895a0f10cSLars Ellenberg 		*bm = ~0UL;
549b411b363SPhilipp Reisner 	}
550b411b363SPhilipp Reisner 	bm_unmap(p_addr);
551b411b363SPhilipp Reisner }
552b411b363SPhilipp Reisner 
5534b0715f0SLars Ellenberg /* you better not modify the bitmap while this is running,
5544b0715f0SLars Ellenberg  * or its results will be stale */
55595a0f10cSLars Ellenberg static unsigned long bm_count_bits(struct drbd_bitmap *b)
556b411b363SPhilipp Reisner {
5574b0715f0SLars Ellenberg 	unsigned long *p_addr;
558b411b363SPhilipp Reisner 	unsigned long bits = 0;
5594b0715f0SLars Ellenberg 	unsigned long mask = (1UL << (b->bm_bits & BITS_PER_LONG_MASK)) -1;
5606850c442SLars Ellenberg 	int idx, i, last_word;
5617777a8baSLars Ellenberg 
5624b0715f0SLars Ellenberg 	/* all but last page */
5636850c442SLars Ellenberg 	for (idx = 0; idx < b->bm_number_of_pages - 1; idx++) {
5644b0715f0SLars Ellenberg 		p_addr = __bm_map_pidx(b, idx, KM_USER0);
5654b0715f0SLars Ellenberg 		for (i = 0; i < LWPP; i++)
5664b0715f0SLars Ellenberg 			bits += hweight_long(p_addr[i]);
5677777a8baSLars Ellenberg 		__bm_unmap(p_addr, KM_USER0);
568b411b363SPhilipp Reisner 		cond_resched();
569b411b363SPhilipp Reisner 	}
5704b0715f0SLars Ellenberg 	/* last (or only) page */
5714b0715f0SLars Ellenberg 	last_word = ((b->bm_bits - 1) & BITS_PER_PAGE_MASK) >> LN2_BPL;
5724b0715f0SLars Ellenberg 	p_addr = __bm_map_pidx(b, idx, KM_USER0);
5734b0715f0SLars Ellenberg 	for (i = 0; i < last_word; i++)
5744b0715f0SLars Ellenberg 		bits += hweight_long(p_addr[i]);
5754b0715f0SLars Ellenberg 	p_addr[last_word] &= cpu_to_lel(mask);
5764b0715f0SLars Ellenberg 	bits += hweight_long(p_addr[last_word]);
5774b0715f0SLars Ellenberg 	/* 32bit arch, may have an unused padding long */
5784b0715f0SLars Ellenberg 	if (BITS_PER_LONG == 32 && (last_word & 1) == 0)
5794b0715f0SLars Ellenberg 		p_addr[last_word+1] = 0;
5804b0715f0SLars Ellenberg 	__bm_unmap(p_addr, KM_USER0);
581b411b363SPhilipp Reisner 	return bits;
582b411b363SPhilipp Reisner }
583b411b363SPhilipp Reisner 
584b411b363SPhilipp Reisner /* offset and len in long words.*/
585b411b363SPhilipp Reisner static void bm_memset(struct drbd_bitmap *b, size_t offset, int c, size_t len)
586b411b363SPhilipp Reisner {
587b411b363SPhilipp Reisner 	unsigned long *p_addr, *bm;
58819f843aaSLars Ellenberg 	unsigned int idx;
589b411b363SPhilipp Reisner 	size_t do_now, end;
590b411b363SPhilipp Reisner 
591b411b363SPhilipp Reisner 	end = offset + len;
592b411b363SPhilipp Reisner 
593b411b363SPhilipp Reisner 	if (end > b->bm_words) {
594b411b363SPhilipp Reisner 		printk(KERN_ALERT "drbd: bm_memset end > bm_words\n");
595b411b363SPhilipp Reisner 		return;
596b411b363SPhilipp Reisner 	}
597b411b363SPhilipp Reisner 
598b411b363SPhilipp Reisner 	while (offset < end) {
599b411b363SPhilipp Reisner 		do_now = min_t(size_t, ALIGN(offset + 1, LWPP), end) - offset;
60019f843aaSLars Ellenberg 		idx = bm_word_to_page_idx(b, offset);
60119f843aaSLars Ellenberg 		p_addr = bm_map_pidx(b, idx);
602b411b363SPhilipp Reisner 		bm = p_addr + MLPP(offset);
603b411b363SPhilipp Reisner 		if (bm+do_now > p_addr + LWPP) {
604b411b363SPhilipp Reisner 			printk(KERN_ALERT "drbd: BUG BUG BUG! p_addr:%p bm:%p do_now:%d\n",
605b411b363SPhilipp Reisner 			       p_addr, bm, (int)do_now);
60684e7c0f7SLars Ellenberg 		} else
607b411b363SPhilipp Reisner 			memset(bm, c, do_now * sizeof(long));
608b411b363SPhilipp Reisner 		bm_unmap(p_addr);
60919f843aaSLars Ellenberg 		bm_set_page_need_writeout(b->bm_pages[idx]);
610b411b363SPhilipp Reisner 		offset += do_now;
611b411b363SPhilipp Reisner 	}
612b411b363SPhilipp Reisner }
613b411b363SPhilipp Reisner 
614b411b363SPhilipp Reisner /*
615b411b363SPhilipp Reisner  * make sure the bitmap has enough room for the attached storage,
616b411b363SPhilipp Reisner  * if necessary, resize.
617b411b363SPhilipp Reisner  * called whenever we may have changed the device size.
618b411b363SPhilipp Reisner  * returns -ENOMEM if we could not allocate enough memory, 0 on success.
619b411b363SPhilipp Reisner  * In case this is actually a resize, we copy the old bitmap into the new one.
620b411b363SPhilipp Reisner  * Otherwise, the bitmap is initialized to all bits set.
621b411b363SPhilipp Reisner  */
62202d9a94bSPhilipp Reisner int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits)
623b411b363SPhilipp Reisner {
624b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
6256850c442SLars Ellenberg 	unsigned long bits, words, owords, obits;
626b411b363SPhilipp Reisner 	unsigned long want, have, onpages; /* number of pages */
627b411b363SPhilipp Reisner 	struct page **npages, **opages = NULL;
628b411b363SPhilipp Reisner 	int err = 0, growing;
629b411b363SPhilipp Reisner 	int opages_vmalloced;
630b411b363SPhilipp Reisner 
631841ce241SAndreas Gruenbacher 	if (!expect(b))
632841ce241SAndreas Gruenbacher 		return -ENOMEM;
633b411b363SPhilipp Reisner 
63420ceb2b2SLars Ellenberg 	drbd_bm_lock(mdev, "resize", BM_LOCKED_MASK);
635b411b363SPhilipp Reisner 
636b411b363SPhilipp Reisner 	dev_info(DEV, "drbd_bm_resize called with capacity == %llu\n",
637b411b363SPhilipp Reisner 			(unsigned long long)capacity);
638b411b363SPhilipp Reisner 
639b411b363SPhilipp Reisner 	if (capacity == b->bm_dev_capacity)
640b411b363SPhilipp Reisner 		goto out;
641b411b363SPhilipp Reisner 
64220ceb2b2SLars Ellenberg 	opages_vmalloced = (BM_P_VMALLOCED & b->bm_flags);
643b411b363SPhilipp Reisner 
644b411b363SPhilipp Reisner 	if (capacity == 0) {
645b411b363SPhilipp Reisner 		spin_lock_irq(&b->bm_lock);
646b411b363SPhilipp Reisner 		opages = b->bm_pages;
647b411b363SPhilipp Reisner 		onpages = b->bm_number_of_pages;
648b411b363SPhilipp Reisner 		owords = b->bm_words;
649b411b363SPhilipp Reisner 		b->bm_pages = NULL;
650b411b363SPhilipp Reisner 		b->bm_number_of_pages =
651b411b363SPhilipp Reisner 		b->bm_set   =
652b411b363SPhilipp Reisner 		b->bm_bits  =
653b411b363SPhilipp Reisner 		b->bm_words =
654b411b363SPhilipp Reisner 		b->bm_dev_capacity = 0;
655b411b363SPhilipp Reisner 		spin_unlock_irq(&b->bm_lock);
656b411b363SPhilipp Reisner 		bm_free_pages(opages, onpages);
657b411b363SPhilipp Reisner 		bm_vk_free(opages, opages_vmalloced);
658b411b363SPhilipp Reisner 		goto out;
659b411b363SPhilipp Reisner 	}
660b411b363SPhilipp Reisner 	bits  = BM_SECT_TO_BIT(ALIGN(capacity, BM_SECT_PER_BIT));
661b411b363SPhilipp Reisner 
662b411b363SPhilipp Reisner 	/* if we would use
663b411b363SPhilipp Reisner 	   words = ALIGN(bits,BITS_PER_LONG) >> LN2_BPL;
664b411b363SPhilipp Reisner 	   a 32bit host could present the wrong number of words
665b411b363SPhilipp Reisner 	   to a 64bit host.
666b411b363SPhilipp Reisner 	*/
667b411b363SPhilipp Reisner 	words = ALIGN(bits, 64) >> LN2_BPL;
668b411b363SPhilipp Reisner 
669b411b363SPhilipp Reisner 	if (get_ldev(mdev)) {
6704b0715f0SLars Ellenberg 		u64 bits_on_disk = ((u64)mdev->ldev->md.md_size_sect-MD_BM_OFFSET) << 12;
671b411b363SPhilipp Reisner 		put_ldev(mdev);
6724b0715f0SLars Ellenberg 		if (bits > bits_on_disk) {
6734b0715f0SLars Ellenberg 			dev_info(DEV, "bits = %lu\n", bits);
6744b0715f0SLars Ellenberg 			dev_info(DEV, "bits_on_disk = %llu\n", bits_on_disk);
6754b0715f0SLars Ellenberg 			err = -ENOSPC;
6764b0715f0SLars Ellenberg 			goto out;
6774b0715f0SLars Ellenberg 		}
678b411b363SPhilipp Reisner 	}
679b411b363SPhilipp Reisner 
6806850c442SLars Ellenberg 	want = ALIGN(words*sizeof(long), PAGE_SIZE) >> PAGE_SHIFT;
681b411b363SPhilipp Reisner 	have = b->bm_number_of_pages;
682b411b363SPhilipp Reisner 	if (want == have) {
683b411b363SPhilipp Reisner 		D_ASSERT(b->bm_pages != NULL);
684b411b363SPhilipp Reisner 		npages = b->bm_pages;
685b411b363SPhilipp Reisner 	} else {
6860cf9d27eSAndreas Gruenbacher 		if (drbd_insert_fault(mdev, DRBD_FAULT_BM_ALLOC))
687b411b363SPhilipp Reisner 			npages = NULL;
688b411b363SPhilipp Reisner 		else
689b411b363SPhilipp Reisner 			npages = bm_realloc_pages(b, want);
690b411b363SPhilipp Reisner 	}
691b411b363SPhilipp Reisner 
692b411b363SPhilipp Reisner 	if (!npages) {
693b411b363SPhilipp Reisner 		err = -ENOMEM;
694b411b363SPhilipp Reisner 		goto out;
695b411b363SPhilipp Reisner 	}
696b411b363SPhilipp Reisner 
697b411b363SPhilipp Reisner 	spin_lock_irq(&b->bm_lock);
698b411b363SPhilipp Reisner 	opages = b->bm_pages;
699b411b363SPhilipp Reisner 	owords = b->bm_words;
700b411b363SPhilipp Reisner 	obits  = b->bm_bits;
701b411b363SPhilipp Reisner 
702b411b363SPhilipp Reisner 	growing = bits > obits;
7035223671bSPhilipp Reisner 	if (opages && growing && set_new_bits)
704b411b363SPhilipp Reisner 		bm_set_surplus(b);
705b411b363SPhilipp Reisner 
706b411b363SPhilipp Reisner 	b->bm_pages = npages;
707b411b363SPhilipp Reisner 	b->bm_number_of_pages = want;
708b411b363SPhilipp Reisner 	b->bm_bits  = bits;
709b411b363SPhilipp Reisner 	b->bm_words = words;
710b411b363SPhilipp Reisner 	b->bm_dev_capacity = capacity;
711b411b363SPhilipp Reisner 
712b411b363SPhilipp Reisner 	if (growing) {
71302d9a94bSPhilipp Reisner 		if (set_new_bits) {
714b411b363SPhilipp Reisner 			bm_memset(b, owords, 0xff, words-owords);
715b411b363SPhilipp Reisner 			b->bm_set += bits - obits;
71602d9a94bSPhilipp Reisner 		} else
71702d9a94bSPhilipp Reisner 			bm_memset(b, owords, 0x00, words-owords);
71802d9a94bSPhilipp Reisner 
719b411b363SPhilipp Reisner 	}
720b411b363SPhilipp Reisner 
721b411b363SPhilipp Reisner 	if (want < have) {
722b411b363SPhilipp Reisner 		/* implicit: (opages != NULL) && (opages != npages) */
723b411b363SPhilipp Reisner 		bm_free_pages(opages + want, have - want);
724b411b363SPhilipp Reisner 	}
725b411b363SPhilipp Reisner 
726b411b363SPhilipp Reisner 	(void)bm_clear_surplus(b);
727b411b363SPhilipp Reisner 
728b411b363SPhilipp Reisner 	spin_unlock_irq(&b->bm_lock);
729b411b363SPhilipp Reisner 	if (opages != npages)
730b411b363SPhilipp Reisner 		bm_vk_free(opages, opages_vmalloced);
731b411b363SPhilipp Reisner 	if (!growing)
732b411b363SPhilipp Reisner 		b->bm_set = bm_count_bits(b);
73319f843aaSLars Ellenberg 	dev_info(DEV, "resync bitmap: bits=%lu words=%lu pages=%lu\n", bits, words, want);
734b411b363SPhilipp Reisner 
735b411b363SPhilipp Reisner  out:
736b411b363SPhilipp Reisner 	drbd_bm_unlock(mdev);
737b411b363SPhilipp Reisner 	return err;
738b411b363SPhilipp Reisner }
739b411b363SPhilipp Reisner 
740b411b363SPhilipp Reisner /* inherently racy:
741b411b363SPhilipp Reisner  * if not protected by other means, return value may be out of date when
742b411b363SPhilipp Reisner  * leaving this function...
743b411b363SPhilipp Reisner  * we still need to lock it, since it is important that this returns
744b411b363SPhilipp Reisner  * bm_set == 0 precisely.
745b411b363SPhilipp Reisner  *
746b411b363SPhilipp Reisner  * maybe bm_set should be atomic_t ?
747b411b363SPhilipp Reisner  */
7480778286aSPhilipp Reisner unsigned long _drbd_bm_total_weight(struct drbd_conf *mdev)
749b411b363SPhilipp Reisner {
750b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
751b411b363SPhilipp Reisner 	unsigned long s;
752b411b363SPhilipp Reisner 	unsigned long flags;
753b411b363SPhilipp Reisner 
754841ce241SAndreas Gruenbacher 	if (!expect(b))
755841ce241SAndreas Gruenbacher 		return 0;
756841ce241SAndreas Gruenbacher 	if (!expect(b->bm_pages))
757841ce241SAndreas Gruenbacher 		return 0;
758b411b363SPhilipp Reisner 
759b411b363SPhilipp Reisner 	spin_lock_irqsave(&b->bm_lock, flags);
760b411b363SPhilipp Reisner 	s = b->bm_set;
761b411b363SPhilipp Reisner 	spin_unlock_irqrestore(&b->bm_lock, flags);
762b411b363SPhilipp Reisner 
763b411b363SPhilipp Reisner 	return s;
764b411b363SPhilipp Reisner }
765b411b363SPhilipp Reisner 
766b411b363SPhilipp Reisner unsigned long drbd_bm_total_weight(struct drbd_conf *mdev)
767b411b363SPhilipp Reisner {
768b411b363SPhilipp Reisner 	unsigned long s;
769b411b363SPhilipp Reisner 	/* if I don't have a disk, I don't know about out-of-sync status */
770b411b363SPhilipp Reisner 	if (!get_ldev_if_state(mdev, D_NEGOTIATING))
771b411b363SPhilipp Reisner 		return 0;
772b411b363SPhilipp Reisner 	s = _drbd_bm_total_weight(mdev);
773b411b363SPhilipp Reisner 	put_ldev(mdev);
774b411b363SPhilipp Reisner 	return s;
775b411b363SPhilipp Reisner }
776b411b363SPhilipp Reisner 
777b411b363SPhilipp Reisner size_t drbd_bm_words(struct drbd_conf *mdev)
778b411b363SPhilipp Reisner {
779b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
780841ce241SAndreas Gruenbacher 	if (!expect(b))
781841ce241SAndreas Gruenbacher 		return 0;
782841ce241SAndreas Gruenbacher 	if (!expect(b->bm_pages))
783841ce241SAndreas Gruenbacher 		return 0;
784b411b363SPhilipp Reisner 
785b411b363SPhilipp Reisner 	return b->bm_words;
786b411b363SPhilipp Reisner }
787b411b363SPhilipp Reisner 
788b411b363SPhilipp Reisner unsigned long drbd_bm_bits(struct drbd_conf *mdev)
789b411b363SPhilipp Reisner {
790b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
791841ce241SAndreas Gruenbacher 	if (!expect(b))
792841ce241SAndreas Gruenbacher 		return 0;
793b411b363SPhilipp Reisner 
794b411b363SPhilipp Reisner 	return b->bm_bits;
795b411b363SPhilipp Reisner }
796b411b363SPhilipp Reisner 
797b411b363SPhilipp Reisner /* merge number words from buffer into the bitmap starting at offset.
798b411b363SPhilipp Reisner  * buffer[i] is expected to be little endian unsigned long.
799b411b363SPhilipp Reisner  * bitmap must be locked by drbd_bm_lock.
800b411b363SPhilipp Reisner  * currently only used from receive_bitmap.
801b411b363SPhilipp Reisner  */
802b411b363SPhilipp Reisner void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset, size_t number,
803b411b363SPhilipp Reisner 			unsigned long *buffer)
804b411b363SPhilipp Reisner {
805b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
806b411b363SPhilipp Reisner 	unsigned long *p_addr, *bm;
807b411b363SPhilipp Reisner 	unsigned long word, bits;
80819f843aaSLars Ellenberg 	unsigned int idx;
809b411b363SPhilipp Reisner 	size_t end, do_now;
810b411b363SPhilipp Reisner 
811b411b363SPhilipp Reisner 	end = offset + number;
812b411b363SPhilipp Reisner 
813841ce241SAndreas Gruenbacher 	if (!expect(b))
814841ce241SAndreas Gruenbacher 		return;
815841ce241SAndreas Gruenbacher 	if (!expect(b->bm_pages))
816841ce241SAndreas Gruenbacher 		return;
817b411b363SPhilipp Reisner 	if (number == 0)
818b411b363SPhilipp Reisner 		return;
819b411b363SPhilipp Reisner 	WARN_ON(offset >= b->bm_words);
820b411b363SPhilipp Reisner 	WARN_ON(end    >  b->bm_words);
821b411b363SPhilipp Reisner 
822b411b363SPhilipp Reisner 	spin_lock_irq(&b->bm_lock);
823b411b363SPhilipp Reisner 	while (offset < end) {
824b411b363SPhilipp Reisner 		do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset;
82519f843aaSLars Ellenberg 		idx = bm_word_to_page_idx(b, offset);
82619f843aaSLars Ellenberg 		p_addr = bm_map_pidx(b, idx);
827b411b363SPhilipp Reisner 		bm = p_addr + MLPP(offset);
828b411b363SPhilipp Reisner 		offset += do_now;
829b411b363SPhilipp Reisner 		while (do_now--) {
830b411b363SPhilipp Reisner 			bits = hweight_long(*bm);
83195a0f10cSLars Ellenberg 			word = *bm | *buffer++;
832b411b363SPhilipp Reisner 			*bm++ = word;
833b411b363SPhilipp Reisner 			b->bm_set += hweight_long(word) - bits;
834b411b363SPhilipp Reisner 		}
835b411b363SPhilipp Reisner 		bm_unmap(p_addr);
83619f843aaSLars Ellenberg 		bm_set_page_need_writeout(b->bm_pages[idx]);
837b411b363SPhilipp Reisner 	}
838b411b363SPhilipp Reisner 	/* with 32bit <-> 64bit cross-platform connect
839b411b363SPhilipp Reisner 	 * this is only correct for current usage,
840b411b363SPhilipp Reisner 	 * where we _know_ that we are 64 bit aligned,
841b411b363SPhilipp Reisner 	 * and know that this function is used in this way, too...
842b411b363SPhilipp Reisner 	 */
843b411b363SPhilipp Reisner 	if (end == b->bm_words)
844b411b363SPhilipp Reisner 		b->bm_set -= bm_clear_surplus(b);
845b411b363SPhilipp Reisner 	spin_unlock_irq(&b->bm_lock);
846b411b363SPhilipp Reisner }
847b411b363SPhilipp Reisner 
848b411b363SPhilipp Reisner /* copy number words from the bitmap starting at offset into the buffer.
849b411b363SPhilipp Reisner  * buffer[i] will be little endian unsigned long.
850b411b363SPhilipp Reisner  */
851b411b363SPhilipp Reisner void drbd_bm_get_lel(struct drbd_conf *mdev, size_t offset, size_t number,
852b411b363SPhilipp Reisner 		     unsigned long *buffer)
853b411b363SPhilipp Reisner {
854b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
855b411b363SPhilipp Reisner 	unsigned long *p_addr, *bm;
856b411b363SPhilipp Reisner 	size_t end, do_now;
857b411b363SPhilipp Reisner 
858b411b363SPhilipp Reisner 	end = offset + number;
859b411b363SPhilipp Reisner 
860841ce241SAndreas Gruenbacher 	if (!expect(b))
861841ce241SAndreas Gruenbacher 		return;
862841ce241SAndreas Gruenbacher 	if (!expect(b->bm_pages))
863841ce241SAndreas Gruenbacher 		return;
864b411b363SPhilipp Reisner 
865b411b363SPhilipp Reisner 	spin_lock_irq(&b->bm_lock);
866b411b363SPhilipp Reisner 	if ((offset >= b->bm_words) ||
867b411b363SPhilipp Reisner 	    (end    >  b->bm_words) ||
868b411b363SPhilipp Reisner 	    (number <= 0))
869b411b363SPhilipp Reisner 		dev_err(DEV, "offset=%lu number=%lu bm_words=%lu\n",
870b411b363SPhilipp Reisner 			(unsigned long)	offset,
871b411b363SPhilipp Reisner 			(unsigned long)	number,
872b411b363SPhilipp Reisner 			(unsigned long) b->bm_words);
873b411b363SPhilipp Reisner 	else {
874b411b363SPhilipp Reisner 		while (offset < end) {
875b411b363SPhilipp Reisner 			do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset;
87619f843aaSLars Ellenberg 			p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, offset));
877b411b363SPhilipp Reisner 			bm = p_addr + MLPP(offset);
878b411b363SPhilipp Reisner 			offset += do_now;
879b411b363SPhilipp Reisner 			while (do_now--)
88095a0f10cSLars Ellenberg 				*buffer++ = *bm++;
881b411b363SPhilipp Reisner 			bm_unmap(p_addr);
882b411b363SPhilipp Reisner 		}
883b411b363SPhilipp Reisner 	}
884b411b363SPhilipp Reisner 	spin_unlock_irq(&b->bm_lock);
885b411b363SPhilipp Reisner }
886b411b363SPhilipp Reisner 
887b411b363SPhilipp Reisner /* set all bits in the bitmap */
888b411b363SPhilipp Reisner void drbd_bm_set_all(struct drbd_conf *mdev)
889b411b363SPhilipp Reisner {
890b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
891841ce241SAndreas Gruenbacher 	if (!expect(b))
892841ce241SAndreas Gruenbacher 		return;
893841ce241SAndreas Gruenbacher 	if (!expect(b->bm_pages))
894841ce241SAndreas Gruenbacher 		return;
895b411b363SPhilipp Reisner 
896b411b363SPhilipp Reisner 	spin_lock_irq(&b->bm_lock);
897b411b363SPhilipp Reisner 	bm_memset(b, 0, 0xff, b->bm_words);
898b411b363SPhilipp Reisner 	(void)bm_clear_surplus(b);
899b411b363SPhilipp Reisner 	b->bm_set = b->bm_bits;
900b411b363SPhilipp Reisner 	spin_unlock_irq(&b->bm_lock);
901b411b363SPhilipp Reisner }
902b411b363SPhilipp Reisner 
903b411b363SPhilipp Reisner /* clear all bits in the bitmap */
904b411b363SPhilipp Reisner void drbd_bm_clear_all(struct drbd_conf *mdev)
905b411b363SPhilipp Reisner {
906b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
907841ce241SAndreas Gruenbacher 	if (!expect(b))
908841ce241SAndreas Gruenbacher 		return;
909841ce241SAndreas Gruenbacher 	if (!expect(b->bm_pages))
910841ce241SAndreas Gruenbacher 		return;
911b411b363SPhilipp Reisner 
912b411b363SPhilipp Reisner 	spin_lock_irq(&b->bm_lock);
913b411b363SPhilipp Reisner 	bm_memset(b, 0, 0, b->bm_words);
914b411b363SPhilipp Reisner 	b->bm_set = 0;
915b411b363SPhilipp Reisner 	spin_unlock_irq(&b->bm_lock);
916b411b363SPhilipp Reisner }
917b411b363SPhilipp Reisner 
91819f843aaSLars Ellenberg struct bm_aio_ctx {
91919f843aaSLars Ellenberg 	struct drbd_conf *mdev;
92019f843aaSLars Ellenberg 	atomic_t in_flight;
921cdfda633SPhilipp Reisner 	unsigned int done;
92219f843aaSLars Ellenberg 	unsigned flags;
92319f843aaSLars Ellenberg #define BM_AIO_COPY_PAGES	1
92445dfffebSLars Ellenberg #define BM_AIO_WRITE_HINTED	2
92519f843aaSLars Ellenberg 	int error;
926cdfda633SPhilipp Reisner 	struct kref kref;
92719f843aaSLars Ellenberg };
92819f843aaSLars Ellenberg 
929cdfda633SPhilipp Reisner static void bm_aio_ctx_destroy(struct kref *kref)
930cdfda633SPhilipp Reisner {
931cdfda633SPhilipp Reisner 	struct bm_aio_ctx *ctx = container_of(kref, struct bm_aio_ctx, kref);
932cdfda633SPhilipp Reisner 
933cdfda633SPhilipp Reisner 	put_ldev(ctx->mdev);
934cdfda633SPhilipp Reisner 	kfree(ctx);
935cdfda633SPhilipp Reisner }
936cdfda633SPhilipp Reisner 
93719f843aaSLars Ellenberg /* bv_page may be a copy, or may be the original */
938b411b363SPhilipp Reisner static void bm_async_io_complete(struct bio *bio, int error)
939b411b363SPhilipp Reisner {
94019f843aaSLars Ellenberg 	struct bm_aio_ctx *ctx = bio->bi_private;
94119f843aaSLars Ellenberg 	struct drbd_conf *mdev = ctx->mdev;
94219f843aaSLars Ellenberg 	struct drbd_bitmap *b = mdev->bitmap;
94319f843aaSLars Ellenberg 	unsigned int idx = bm_page_to_idx(bio->bi_io_vec[0].bv_page);
944b411b363SPhilipp Reisner 	int uptodate = bio_flagged(bio, BIO_UPTODATE);
945b411b363SPhilipp Reisner 
946b411b363SPhilipp Reisner 
947b411b363SPhilipp Reisner 	/* strange behavior of some lower level drivers...
948b411b363SPhilipp Reisner 	 * fail the request by clearing the uptodate flag,
949b411b363SPhilipp Reisner 	 * but do not return any error?!
950b411b363SPhilipp Reisner 	 * do we want to WARN() on this? */
951b411b363SPhilipp Reisner 	if (!error && !uptodate)
952b411b363SPhilipp Reisner 		error = -EIO;
953b411b363SPhilipp Reisner 
9547648cdfeSLars Ellenberg 	if ((ctx->flags & BM_AIO_COPY_PAGES) == 0 &&
9557648cdfeSLars Ellenberg 	    !bm_test_page_unchanged(b->bm_pages[idx]))
9567648cdfeSLars Ellenberg 		dev_warn(DEV, "bitmap page idx %u changed during IO!\n", idx);
95719f843aaSLars Ellenberg 
958b411b363SPhilipp Reisner 	if (error) {
95919f843aaSLars Ellenberg 		/* ctx error will hold the completed-last non-zero error code,
96019f843aaSLars Ellenberg 		 * in case error codes differ. */
96119f843aaSLars Ellenberg 		ctx->error = error;
96219f843aaSLars Ellenberg 		bm_set_page_io_err(b->bm_pages[idx]);
96319f843aaSLars Ellenberg 		/* Not identical to on disk version of it.
96419f843aaSLars Ellenberg 		 * Is BM_PAGE_IO_ERROR enough? */
96519f843aaSLars Ellenberg 		if (__ratelimit(&drbd_ratelimit_state))
96619f843aaSLars Ellenberg 			dev_err(DEV, "IO ERROR %d on bitmap page idx %u\n",
96719f843aaSLars Ellenberg 					error, idx);
96819f843aaSLars Ellenberg 	} else {
96919f843aaSLars Ellenberg 		bm_clear_page_io_err(b->bm_pages[idx]);
97019f843aaSLars Ellenberg 		dynamic_dev_dbg(DEV, "bitmap page idx %u completed\n", idx);
971b411b363SPhilipp Reisner 	}
97219f843aaSLars Ellenberg 
97319f843aaSLars Ellenberg 	bm_page_unlock_io(mdev, idx);
97419f843aaSLars Ellenberg 
97519f843aaSLars Ellenberg 	if (ctx->flags & BM_AIO_COPY_PAGES)
9769db4e77fSLars Ellenberg 		mempool_free(bio->bi_io_vec[0].bv_page, drbd_md_io_page_pool);
977b411b363SPhilipp Reisner 
978b411b363SPhilipp Reisner 	bio_put(bio);
97919f843aaSLars Ellenberg 
980cdfda633SPhilipp Reisner 	if (atomic_dec_and_test(&ctx->in_flight)) {
981cdfda633SPhilipp Reisner 		ctx->done = 1;
982cdfda633SPhilipp Reisner 		wake_up(&mdev->misc_wait);
983cdfda633SPhilipp Reisner 		kref_put(&ctx->kref, &bm_aio_ctx_destroy);
984cdfda633SPhilipp Reisner 	}
985b411b363SPhilipp Reisner }
986b411b363SPhilipp Reisner 
98719f843aaSLars Ellenberg static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must_hold(local)
988b411b363SPhilipp Reisner {
989cdfda633SPhilipp Reisner 	struct bio *bio = bio_alloc_drbd(GFP_NOIO);
99019f843aaSLars Ellenberg 	struct drbd_conf *mdev = ctx->mdev;
99119f843aaSLars Ellenberg 	struct drbd_bitmap *b = mdev->bitmap;
99219f843aaSLars Ellenberg 	struct page *page;
993b411b363SPhilipp Reisner 	unsigned int len;
99419f843aaSLars Ellenberg 
995b411b363SPhilipp Reisner 	sector_t on_disk_sector =
996b411b363SPhilipp Reisner 		mdev->ldev->md.md_offset + mdev->ldev->md.bm_offset;
997b411b363SPhilipp Reisner 	on_disk_sector += ((sector_t)page_nr) << (PAGE_SHIFT-9);
998b411b363SPhilipp Reisner 
999b411b363SPhilipp Reisner 	/* this might happen with very small
100019f843aaSLars Ellenberg 	 * flexible external meta data device,
100119f843aaSLars Ellenberg 	 * or with PAGE_SIZE > 4k */
1002b411b363SPhilipp Reisner 	len = min_t(unsigned int, PAGE_SIZE,
1003b411b363SPhilipp Reisner 		(drbd_md_last_sector(mdev->ldev) - on_disk_sector + 1)<<9);
1004b411b363SPhilipp Reisner 
100519f843aaSLars Ellenberg 	/* serialize IO on this page */
100619f843aaSLars Ellenberg 	bm_page_lock_io(mdev, page_nr);
100719f843aaSLars Ellenberg 	/* before memcpy and submit,
100819f843aaSLars Ellenberg 	 * so it can be redirtied any time */
100919f843aaSLars Ellenberg 	bm_set_page_unchanged(b->bm_pages[page_nr]);
101019f843aaSLars Ellenberg 
101119f843aaSLars Ellenberg 	if (ctx->flags & BM_AIO_COPY_PAGES) {
101219f843aaSLars Ellenberg 		void *src, *dest;
10139db4e77fSLars Ellenberg 		page = mempool_alloc(drbd_md_io_page_pool, __GFP_HIGHMEM|__GFP_WAIT);
101419f843aaSLars Ellenberg 		dest = kmap_atomic(page, KM_USER0);
101519f843aaSLars Ellenberg 		src = kmap_atomic(b->bm_pages[page_nr], KM_USER1);
101619f843aaSLars Ellenberg 		memcpy(dest, src, PAGE_SIZE);
101719f843aaSLars Ellenberg 		kunmap_atomic(src, KM_USER1);
101819f843aaSLars Ellenberg 		kunmap_atomic(dest, KM_USER0);
101919f843aaSLars Ellenberg 		bm_store_page_idx(page, page_nr);
102019f843aaSLars Ellenberg 	} else
102119f843aaSLars Ellenberg 		page = b->bm_pages[page_nr];
102219f843aaSLars Ellenberg 
1023b411b363SPhilipp Reisner 	bio->bi_bdev = mdev->ldev->md_bdev;
1024b411b363SPhilipp Reisner 	bio->bi_sector = on_disk_sector;
10259db4e77fSLars Ellenberg 	/* bio_add_page of a single page to an empty bio will always succeed,
10269db4e77fSLars Ellenberg 	 * according to api.  Do we want to assert that? */
102719f843aaSLars Ellenberg 	bio_add_page(bio, page, len, 0);
102819f843aaSLars Ellenberg 	bio->bi_private = ctx;
1029b411b363SPhilipp Reisner 	bio->bi_end_io = bm_async_io_complete;
1030b411b363SPhilipp Reisner 
10310cf9d27eSAndreas Gruenbacher 	if (drbd_insert_fault(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) {
1032b411b363SPhilipp Reisner 		bio->bi_rw |= rw;
1033b411b363SPhilipp Reisner 		bio_endio(bio, -EIO);
1034b411b363SPhilipp Reisner 	} else {
1035b411b363SPhilipp Reisner 		submit_bio(rw, bio);
10365a8b4242SLars Ellenberg 		/* this should not count as user activity and cause the
10375a8b4242SLars Ellenberg 		 * resync to throttle -- see drbd_rs_should_slow_down(). */
10385a8b4242SLars Ellenberg 		atomic_add(len >> 9, &mdev->rs_sect_ev);
1039b411b363SPhilipp Reisner 	}
1040b411b363SPhilipp Reisner }
1041b411b363SPhilipp Reisner 
1042b411b363SPhilipp Reisner /*
1043b411b363SPhilipp Reisner  * bm_rw: read/write the whole bitmap from/to its on disk location.
1044b411b363SPhilipp Reisner  */
104545dfffebSLars Ellenberg static int bm_rw(struct drbd_conf *mdev, int rw, unsigned flags, unsigned lazy_writeout_upper_idx) __must_hold(local)
1046b411b363SPhilipp Reisner {
1047cdfda633SPhilipp Reisner 	struct bm_aio_ctx *ctx;
1048b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
10496850c442SLars Ellenberg 	int num_pages, i, count = 0;
1050b411b363SPhilipp Reisner 	unsigned long now;
1051b411b363SPhilipp Reisner 	char ppb[10];
1052b411b363SPhilipp Reisner 	int err = 0;
1053b411b363SPhilipp Reisner 
105419f843aaSLars Ellenberg 	/*
105519f843aaSLars Ellenberg 	 * We are protected against bitmap disappearing/resizing by holding an
105619f843aaSLars Ellenberg 	 * ldev reference (caller must have called get_ldev()).
105719f843aaSLars Ellenberg 	 * For read/write, we are protected against changes to the bitmap by
105819f843aaSLars Ellenberg 	 * the bitmap lock (see drbd_bitmap_io).
105919f843aaSLars Ellenberg 	 * For lazy writeout, we don't care for ongoing changes to the bitmap,
106019f843aaSLars Ellenberg 	 * as we submit copies of pages anyways.
106119f843aaSLars Ellenberg 	 */
1062cdfda633SPhilipp Reisner 
1063cdfda633SPhilipp Reisner 	ctx = kmalloc(sizeof(struct bm_aio_ctx), GFP_NOIO);
1064cdfda633SPhilipp Reisner 	if (!ctx)
1065cdfda633SPhilipp Reisner 		return -ENOMEM;
1066cdfda633SPhilipp Reisner 
1067cdfda633SPhilipp Reisner 	*ctx = (struct bm_aio_ctx) {
1068cdfda633SPhilipp Reisner 		.mdev = mdev,
1069cdfda633SPhilipp Reisner 		.in_flight = ATOMIC_INIT(1),
1070cdfda633SPhilipp Reisner 		.done = 0,
1071cdfda633SPhilipp Reisner 		.flags = flags,
1072cdfda633SPhilipp Reisner 		.error = 0,
1073cdfda633SPhilipp Reisner 		.kref = { ATOMIC_INIT(2) },
1074cdfda633SPhilipp Reisner 	};
1075cdfda633SPhilipp Reisner 
1076cdfda633SPhilipp Reisner 	if (!get_ldev_if_state(mdev, D_ATTACHING)) {  /* put is in bm_aio_ctx_destroy() */
1077cdfda633SPhilipp Reisner 		dev_err(DEV, "ASSERT FAILED: get_ldev_if_state() == 1 in bm_rw()\n");
1078cdfda633SPhilipp Reisner 		err = -ENODEV;
1079cdfda633SPhilipp Reisner 		goto out;
1080cdfda633SPhilipp Reisner 	}
1081cdfda633SPhilipp Reisner 
1082cdfda633SPhilipp Reisner 	if (!ctx->flags)
108320ceb2b2SLars Ellenberg 		WARN_ON(!(BM_LOCKED_MASK & b->bm_flags));
1084b411b363SPhilipp Reisner 
10856850c442SLars Ellenberg 	num_pages = b->bm_number_of_pages;
1086b411b363SPhilipp Reisner 
1087b411b363SPhilipp Reisner 	now = jiffies;
1088b411b363SPhilipp Reisner 
1089b411b363SPhilipp Reisner 	/* let the layers below us try to merge these bios... */
10906850c442SLars Ellenberg 	for (i = 0; i < num_pages; i++) {
109119f843aaSLars Ellenberg 		/* ignore completely unchanged pages */
109219f843aaSLars Ellenberg 		if (lazy_writeout_upper_idx && i == lazy_writeout_upper_idx)
109319f843aaSLars Ellenberg 			break;
109419f843aaSLars Ellenberg 		if (rw & WRITE) {
109545dfffebSLars Ellenberg 			if ((flags & BM_AIO_WRITE_HINTED) &&
109645dfffebSLars Ellenberg 			    !test_and_clear_bit(BM_PAGE_HINT_WRITEOUT,
109745dfffebSLars Ellenberg 				    &page_private(b->bm_pages[i])))
109845dfffebSLars Ellenberg 				continue;
109919f843aaSLars Ellenberg 			if (bm_test_page_unchanged(b->bm_pages[i])) {
110019f843aaSLars Ellenberg 				dynamic_dev_dbg(DEV, "skipped bm write for idx %u\n", i);
110119f843aaSLars Ellenberg 				continue;
110219f843aaSLars Ellenberg 			}
110319f843aaSLars Ellenberg 			/* during lazy writeout,
110419f843aaSLars Ellenberg 			 * ignore those pages not marked for lazy writeout. */
110519f843aaSLars Ellenberg 			if (lazy_writeout_upper_idx &&
110619f843aaSLars Ellenberg 			    !bm_test_page_lazy_writeout(b->bm_pages[i])) {
110719f843aaSLars Ellenberg 				dynamic_dev_dbg(DEV, "skipped bm lazy write for idx %u\n", i);
110819f843aaSLars Ellenberg 				continue;
110919f843aaSLars Ellenberg 			}
111019f843aaSLars Ellenberg 		}
1111cdfda633SPhilipp Reisner 		atomic_inc(&ctx->in_flight);
1112cdfda633SPhilipp Reisner 		bm_page_io_async(ctx, i, rw);
111319f843aaSLars Ellenberg 		++count;
111419f843aaSLars Ellenberg 		cond_resched();
111519f843aaSLars Ellenberg 	}
1116b411b363SPhilipp Reisner 
1117725a97e4SLars Ellenberg 	/*
1118cdfda633SPhilipp Reisner 	 * We initialize ctx->in_flight to one to make sure bm_async_io_complete
1119cdfda633SPhilipp Reisner 	 * will not set ctx->done early, and decrement / test it here.  If there
1120725a97e4SLars Ellenberg 	 * are still some bios in flight, we need to wait for them here.
1121cdfda633SPhilipp Reisner 	 * If all IO is done already (or nothing had been submitted), there is
1122cdfda633SPhilipp Reisner 	 * no need to wait.  Still, we need to put the kref associated with the
1123cdfda633SPhilipp Reisner 	 * "in_flight reached zero, all done" event.
1124725a97e4SLars Ellenberg 	 */
1125cdfda633SPhilipp Reisner 	if (!atomic_dec_and_test(&ctx->in_flight))
1126cdfda633SPhilipp Reisner 		wait_until_done_or_disk_failure(mdev, &ctx->done);
1127cdfda633SPhilipp Reisner 	else
1128cdfda633SPhilipp Reisner 		kref_put(&ctx->kref, &bm_aio_ctx_destroy);
1129c9d963a4SLars Ellenberg 
1130c9d963a4SLars Ellenberg 	/* summary for global bitmap IO */
1131c9d963a4SLars Ellenberg 	if (flags == 0)
113219f843aaSLars Ellenberg 		dev_info(DEV, "bitmap %s of %u pages took %lu jiffies\n",
113319f843aaSLars Ellenberg 			 rw == WRITE ? "WRITE" : "READ",
113419f843aaSLars Ellenberg 			 count, jiffies - now);
1135b411b363SPhilipp Reisner 
1136cdfda633SPhilipp Reisner 	if (ctx->error) {
1137b411b363SPhilipp Reisner 		dev_alert(DEV, "we had at least one MD IO ERROR during bitmap IO\n");
113881e84650SAndreas Gruenbacher 		drbd_chk_io_error(mdev, 1, true);
1139cdfda633SPhilipp Reisner 		err = -EIO; /* ctx->error ? */
1140b411b363SPhilipp Reisner 	}
1141b411b363SPhilipp Reisner 
1142cdfda633SPhilipp Reisner 	if (atomic_read(&ctx->in_flight))
1143cdfda633SPhilipp Reisner 		err = -EIO; /* Disk failed during IO... */
1144cdfda633SPhilipp Reisner 
1145b411b363SPhilipp Reisner 	now = jiffies;
1146b411b363SPhilipp Reisner 	if (rw == WRITE) {
1147b411b363SPhilipp Reisner 		drbd_md_flush(mdev);
1148b411b363SPhilipp Reisner 	} else /* rw == READ */ {
114995a0f10cSLars Ellenberg 		b->bm_set = bm_count_bits(b);
1150b411b363SPhilipp Reisner 		dev_info(DEV, "recounting of set bits took additional %lu jiffies\n",
1151b411b363SPhilipp Reisner 		     jiffies - now);
1152b411b363SPhilipp Reisner 	}
1153b411b363SPhilipp Reisner 	now = b->bm_set;
1154b411b363SPhilipp Reisner 
1155c9d963a4SLars Ellenberg 	if (flags == 0)
1156b411b363SPhilipp Reisner 		dev_info(DEV, "%s (%lu bits) marked out-of-sync by on disk bit-map.\n",
1157b411b363SPhilipp Reisner 		     ppsize(ppb, now << (BM_BLOCK_SHIFT-10)), now);
1158b411b363SPhilipp Reisner 
1159cdfda633SPhilipp Reisner out:
1160cdfda633SPhilipp Reisner 	kref_put(&ctx->kref, &bm_aio_ctx_destroy);
1161b411b363SPhilipp Reisner 	return err;
1162b411b363SPhilipp Reisner }
1163b411b363SPhilipp Reisner 
1164b411b363SPhilipp Reisner /**
1165b411b363SPhilipp Reisner  * drbd_bm_read() - Read the whole bitmap from its on disk location.
1166b411b363SPhilipp Reisner  * @mdev:	DRBD device.
1167b411b363SPhilipp Reisner  */
1168b411b363SPhilipp Reisner int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local)
1169b411b363SPhilipp Reisner {
117045dfffebSLars Ellenberg 	return bm_rw(mdev, READ, 0, 0);
1171b411b363SPhilipp Reisner }
1172b411b363SPhilipp Reisner 
1173b411b363SPhilipp Reisner /**
1174b411b363SPhilipp Reisner  * drbd_bm_write() - Write the whole bitmap to its on disk location.
1175b411b363SPhilipp Reisner  * @mdev:	DRBD device.
117619f843aaSLars Ellenberg  *
117719f843aaSLars Ellenberg  * Will only write pages that have changed since last IO.
1178b411b363SPhilipp Reisner  */
1179b411b363SPhilipp Reisner int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local)
1180b411b363SPhilipp Reisner {
118145dfffebSLars Ellenberg 	return bm_rw(mdev, WRITE, 0, 0);
1182b411b363SPhilipp Reisner }
1183b411b363SPhilipp Reisner 
1184b411b363SPhilipp Reisner /**
118519f843aaSLars Ellenberg  * drbd_bm_lazy_write_out() - Write bitmap pages 0 to @upper_idx-1, if they have changed.
1186b411b363SPhilipp Reisner  * @mdev:	DRBD device.
118719f843aaSLars Ellenberg  * @upper_idx:	0: write all changed pages; +ve: page index to stop scanning for changed pages
1188b411b363SPhilipp Reisner  */
118919f843aaSLars Ellenberg int drbd_bm_write_lazy(struct drbd_conf *mdev, unsigned upper_idx) __must_hold(local)
1190b411b363SPhilipp Reisner {
119145dfffebSLars Ellenberg 	return bm_rw(mdev, WRITE, BM_AIO_COPY_PAGES, upper_idx);
1192b411b363SPhilipp Reisner }
119319f843aaSLars Ellenberg 
119445dfffebSLars Ellenberg /**
119545dfffebSLars Ellenberg  * drbd_bm_write_hinted() - Write bitmap pages with "hint" marks, if they have changed.
119645dfffebSLars Ellenberg  * @mdev:	DRBD device.
119745dfffebSLars Ellenberg  */
119845dfffebSLars Ellenberg int drbd_bm_write_hinted(struct drbd_conf *mdev) __must_hold(local)
119945dfffebSLars Ellenberg {
120045dfffebSLars Ellenberg 	return bm_rw(mdev, WRITE, BM_AIO_WRITE_HINTED | BM_AIO_COPY_PAGES, 0);
120145dfffebSLars Ellenberg }
120219f843aaSLars Ellenberg 
120319f843aaSLars Ellenberg /**
120445dfffebSLars Ellenberg  * drbd_bm_write_page() - Writes a PAGE_SIZE aligned piece of bitmap
120519f843aaSLars Ellenberg  * @mdev:	DRBD device.
120619f843aaSLars Ellenberg  * @idx:	bitmap page index
120719f843aaSLars Ellenberg  *
12084b0715f0SLars Ellenberg  * We don't want to special case on logical_block_size of the backend device,
12094b0715f0SLars Ellenberg  * so we submit PAGE_SIZE aligned pieces.
121019f843aaSLars Ellenberg  * Note that on "most" systems, PAGE_SIZE is 4k.
12114b0715f0SLars Ellenberg  *
12124b0715f0SLars Ellenberg  * In case this becomes an issue on systems with larger PAGE_SIZE,
12134b0715f0SLars Ellenberg  * we may want to change this again to write 4k aligned 4k pieces.
121419f843aaSLars Ellenberg  */
121519f843aaSLars Ellenberg int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local)
121619f843aaSLars Ellenberg {
1217cdfda633SPhilipp Reisner 	struct bm_aio_ctx *ctx;
1218cdfda633SPhilipp Reisner 	int err;
121919f843aaSLars Ellenberg 
122019f843aaSLars Ellenberg 	if (bm_test_page_unchanged(mdev->bitmap->bm_pages[idx])) {
12217648cdfeSLars Ellenberg 		dynamic_dev_dbg(DEV, "skipped bm page write for idx %u\n", idx);
122219f843aaSLars Ellenberg 		return 0;
122319f843aaSLars Ellenberg 	}
122419f843aaSLars Ellenberg 
1225cdfda633SPhilipp Reisner 	ctx = kmalloc(sizeof(struct bm_aio_ctx), GFP_NOIO);
1226cdfda633SPhilipp Reisner 	if (!ctx)
1227cdfda633SPhilipp Reisner 		return -ENOMEM;
122819f843aaSLars Ellenberg 
1229cdfda633SPhilipp Reisner 	*ctx = (struct bm_aio_ctx) {
1230cdfda633SPhilipp Reisner 		.mdev = mdev,
1231cdfda633SPhilipp Reisner 		.in_flight = ATOMIC_INIT(1),
1232cdfda633SPhilipp Reisner 		.done = 0,
1233cdfda633SPhilipp Reisner 		.flags = BM_AIO_COPY_PAGES,
1234cdfda633SPhilipp Reisner 		.error = 0,
1235cdfda633SPhilipp Reisner 		.kref = { ATOMIC_INIT(2) },
1236cdfda633SPhilipp Reisner 	};
1237cdfda633SPhilipp Reisner 
1238cdfda633SPhilipp Reisner 	if (!get_ldev_if_state(mdev, D_ATTACHING)) {  /* put is in bm_aio_ctx_destroy() */
1239cdfda633SPhilipp Reisner 		dev_err(DEV, "ASSERT FAILED: get_ldev_if_state() == 1 in drbd_bm_write_page()\n");
1240cdfda633SPhilipp Reisner 		err = -ENODEV;
1241cdfda633SPhilipp Reisner 		goto out;
1242cdfda633SPhilipp Reisner 	}
1243cdfda633SPhilipp Reisner 
1244cdfda633SPhilipp Reisner 	bm_page_io_async(ctx, idx, WRITE_SYNC);
1245cdfda633SPhilipp Reisner 	wait_until_done_or_disk_failure(mdev, &ctx->done);
1246cdfda633SPhilipp Reisner 
1247cdfda633SPhilipp Reisner 	if (ctx->error)
124819f843aaSLars Ellenberg 		drbd_chk_io_error(mdev, 1, true);
124919f843aaSLars Ellenberg 		/* that should force detach, so the in memory bitmap will be
125019f843aaSLars Ellenberg 		 * gone in a moment as well. */
125119f843aaSLars Ellenberg 
1252b411b363SPhilipp Reisner 	mdev->bm_writ_cnt++;
1253cdfda633SPhilipp Reisner 	err = atomic_read(&ctx->in_flight) ? -EIO : ctx->error;
1254cdfda633SPhilipp Reisner  out:
1255cdfda633SPhilipp Reisner 	kref_put(&ctx->kref, &bm_aio_ctx_destroy);
1256cdfda633SPhilipp Reisner 	return err;
1257b411b363SPhilipp Reisner }
1258b411b363SPhilipp Reisner 
1259b411b363SPhilipp Reisner /* NOTE
1260b411b363SPhilipp Reisner  * find_first_bit returns int, we return unsigned long.
12614b0715f0SLars Ellenberg  * For this to work on 32bit arch with bitnumbers > (1<<32),
12624b0715f0SLars Ellenberg  * we'd need to return u64, and get a whole lot of other places
12634b0715f0SLars Ellenberg  * fixed where we still use unsigned long.
1264b411b363SPhilipp Reisner  *
1265b411b363SPhilipp Reisner  * this returns a bit number, NOT a sector!
1266b411b363SPhilipp Reisner  */
1267b411b363SPhilipp Reisner static unsigned long __bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo,
1268b411b363SPhilipp Reisner 	const int find_zero_bit, const enum km_type km)
1269b411b363SPhilipp Reisner {
1270b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
1271b411b363SPhilipp Reisner 	unsigned long *p_addr;
12724b0715f0SLars Ellenberg 	unsigned long bit_offset;
12734b0715f0SLars Ellenberg 	unsigned i;
12744b0715f0SLars Ellenberg 
1275b411b363SPhilipp Reisner 
1276b411b363SPhilipp Reisner 	if (bm_fo > b->bm_bits) {
1277b411b363SPhilipp Reisner 		dev_err(DEV, "bm_fo=%lu bm_bits=%lu\n", bm_fo, b->bm_bits);
12784b0715f0SLars Ellenberg 		bm_fo = DRBD_END_OF_BITMAP;
1279b411b363SPhilipp Reisner 	} else {
1280b411b363SPhilipp Reisner 		while (bm_fo < b->bm_bits) {
128119f843aaSLars Ellenberg 			/* bit offset of the first bit in the page */
12824b0715f0SLars Ellenberg 			bit_offset = bm_fo & ~BITS_PER_PAGE_MASK;
128319f843aaSLars Ellenberg 			p_addr = __bm_map_pidx(b, bm_bit_to_page_idx(b, bm_fo), km);
1284b411b363SPhilipp Reisner 
1285b411b363SPhilipp Reisner 			if (find_zero_bit)
12867e599e6eSLinus Torvalds 				i = find_next_zero_bit_le(p_addr,
12874b0715f0SLars Ellenberg 						PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK);
1288b411b363SPhilipp Reisner 			else
12897e599e6eSLinus Torvalds 				i = find_next_bit_le(p_addr,
12904b0715f0SLars Ellenberg 						PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK);
1291b411b363SPhilipp Reisner 
1292b411b363SPhilipp Reisner 			__bm_unmap(p_addr, km);
1293b411b363SPhilipp Reisner 			if (i < PAGE_SIZE*8) {
12944b0715f0SLars Ellenberg 				bm_fo = bit_offset + i;
12954b0715f0SLars Ellenberg 				if (bm_fo >= b->bm_bits)
1296b411b363SPhilipp Reisner 					break;
1297b411b363SPhilipp Reisner 				goto found;
1298b411b363SPhilipp Reisner 			}
1299b411b363SPhilipp Reisner 			bm_fo = bit_offset + PAGE_SIZE*8;
1300b411b363SPhilipp Reisner 		}
13014b0715f0SLars Ellenberg 		bm_fo = DRBD_END_OF_BITMAP;
1302b411b363SPhilipp Reisner 	}
1303b411b363SPhilipp Reisner  found:
13044b0715f0SLars Ellenberg 	return bm_fo;
1305b411b363SPhilipp Reisner }
1306b411b363SPhilipp Reisner 
1307b411b363SPhilipp Reisner static unsigned long bm_find_next(struct drbd_conf *mdev,
1308b411b363SPhilipp Reisner 	unsigned long bm_fo, const int find_zero_bit)
1309b411b363SPhilipp Reisner {
1310b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
13114b0715f0SLars Ellenberg 	unsigned long i = DRBD_END_OF_BITMAP;
1312b411b363SPhilipp Reisner 
1313841ce241SAndreas Gruenbacher 	if (!expect(b))
1314841ce241SAndreas Gruenbacher 		return i;
1315841ce241SAndreas Gruenbacher 	if (!expect(b->bm_pages))
1316841ce241SAndreas Gruenbacher 		return i;
1317b411b363SPhilipp Reisner 
1318b411b363SPhilipp Reisner 	spin_lock_irq(&b->bm_lock);
131920ceb2b2SLars Ellenberg 	if (BM_DONT_TEST & b->bm_flags)
1320b411b363SPhilipp Reisner 		bm_print_lock_info(mdev);
1321b411b363SPhilipp Reisner 
1322b411b363SPhilipp Reisner 	i = __bm_find_next(mdev, bm_fo, find_zero_bit, KM_IRQ1);
1323b411b363SPhilipp Reisner 
1324b411b363SPhilipp Reisner 	spin_unlock_irq(&b->bm_lock);
1325b411b363SPhilipp Reisner 	return i;
1326b411b363SPhilipp Reisner }
1327b411b363SPhilipp Reisner 
1328b411b363SPhilipp Reisner unsigned long drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo)
1329b411b363SPhilipp Reisner {
1330b411b363SPhilipp Reisner 	return bm_find_next(mdev, bm_fo, 0);
1331b411b363SPhilipp Reisner }
1332b411b363SPhilipp Reisner 
1333b411b363SPhilipp Reisner #if 0
1334b411b363SPhilipp Reisner /* not yet needed for anything. */
1335b411b363SPhilipp Reisner unsigned long drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo)
1336b411b363SPhilipp Reisner {
1337b411b363SPhilipp Reisner 	return bm_find_next(mdev, bm_fo, 1);
1338b411b363SPhilipp Reisner }
1339b411b363SPhilipp Reisner #endif
1340b411b363SPhilipp Reisner 
1341b411b363SPhilipp Reisner /* does not spin_lock_irqsave.
1342b411b363SPhilipp Reisner  * you must take drbd_bm_lock() first */
1343b411b363SPhilipp Reisner unsigned long _drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo)
1344b411b363SPhilipp Reisner {
134520ceb2b2SLars Ellenberg 	/* WARN_ON(!(BM_DONT_SET & mdev->b->bm_flags)); */
1346b411b363SPhilipp Reisner 	return __bm_find_next(mdev, bm_fo, 0, KM_USER1);
1347b411b363SPhilipp Reisner }
1348b411b363SPhilipp Reisner 
1349b411b363SPhilipp Reisner unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo)
1350b411b363SPhilipp Reisner {
135120ceb2b2SLars Ellenberg 	/* WARN_ON(!(BM_DONT_SET & mdev->b->bm_flags)); */
1352b411b363SPhilipp Reisner 	return __bm_find_next(mdev, bm_fo, 1, KM_USER1);
1353b411b363SPhilipp Reisner }
1354b411b363SPhilipp Reisner 
1355b411b363SPhilipp Reisner /* returns number of bits actually changed.
1356b411b363SPhilipp Reisner  * for val != 0, we change 0 -> 1, return code positive
1357b411b363SPhilipp Reisner  * for val == 0, we change 1 -> 0, return code negative
1358b411b363SPhilipp Reisner  * wants bitnr, not sector.
1359b411b363SPhilipp Reisner  * expected to be called for only a few bits (e - s about BITS_PER_LONG).
1360b411b363SPhilipp Reisner  * Must hold bitmap lock already. */
1361b4ee79daSPhilipp Reisner static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
1362829c6087SLars Ellenberg 	unsigned long e, int val)
1363b411b363SPhilipp Reisner {
1364b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
1365b411b363SPhilipp Reisner 	unsigned long *p_addr = NULL;
1366b411b363SPhilipp Reisner 	unsigned long bitnr;
136719f843aaSLars Ellenberg 	unsigned int last_page_nr = -1U;
1368b411b363SPhilipp Reisner 	int c = 0;
136919f843aaSLars Ellenberg 	int changed_total = 0;
1370b411b363SPhilipp Reisner 
1371b411b363SPhilipp Reisner 	if (e >= b->bm_bits) {
1372b411b363SPhilipp Reisner 		dev_err(DEV, "ASSERT FAILED: bit_s=%lu bit_e=%lu bm_bits=%lu\n",
1373b411b363SPhilipp Reisner 				s, e, b->bm_bits);
1374b411b363SPhilipp Reisner 		e = b->bm_bits ? b->bm_bits -1 : 0;
1375b411b363SPhilipp Reisner 	}
1376b411b363SPhilipp Reisner 	for (bitnr = s; bitnr <= e; bitnr++) {
137719f843aaSLars Ellenberg 		unsigned int page_nr = bm_bit_to_page_idx(b, bitnr);
1378b411b363SPhilipp Reisner 		if (page_nr != last_page_nr) {
1379b411b363SPhilipp Reisner 			if (p_addr)
1380829c6087SLars Ellenberg 				__bm_unmap(p_addr, KM_IRQ1);
138119f843aaSLars Ellenberg 			if (c < 0)
138219f843aaSLars Ellenberg 				bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]);
138319f843aaSLars Ellenberg 			else if (c > 0)
138419f843aaSLars Ellenberg 				bm_set_page_need_writeout(b->bm_pages[last_page_nr]);
138519f843aaSLars Ellenberg 			changed_total += c;
138619f843aaSLars Ellenberg 			c = 0;
1387829c6087SLars Ellenberg 			p_addr = __bm_map_pidx(b, page_nr, KM_IRQ1);
1388b411b363SPhilipp Reisner 			last_page_nr = page_nr;
1389b411b363SPhilipp Reisner 		}
1390b411b363SPhilipp Reisner 		if (val)
13917e599e6eSLinus Torvalds 			c += (0 == __test_and_set_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr));
1392b411b363SPhilipp Reisner 		else
13937e599e6eSLinus Torvalds 			c -= (0 != __test_and_clear_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr));
1394b411b363SPhilipp Reisner 	}
1395b411b363SPhilipp Reisner 	if (p_addr)
1396829c6087SLars Ellenberg 		__bm_unmap(p_addr, KM_IRQ1);
139719f843aaSLars Ellenberg 	if (c < 0)
139819f843aaSLars Ellenberg 		bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]);
139919f843aaSLars Ellenberg 	else if (c > 0)
140019f843aaSLars Ellenberg 		bm_set_page_need_writeout(b->bm_pages[last_page_nr]);
140119f843aaSLars Ellenberg 	changed_total += c;
140219f843aaSLars Ellenberg 	b->bm_set += changed_total;
140319f843aaSLars Ellenberg 	return changed_total;
1404b411b363SPhilipp Reisner }
1405b411b363SPhilipp Reisner 
1406b411b363SPhilipp Reisner /* returns number of bits actually changed.
1407b411b363SPhilipp Reisner  * for val != 0, we change 0 -> 1, return code positive
1408b411b363SPhilipp Reisner  * for val == 0, we change 1 -> 0, return code negative
1409b411b363SPhilipp Reisner  * wants bitnr, not sector */
1410b4ee79daSPhilipp Reisner static int bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
1411b411b363SPhilipp Reisner 	const unsigned long e, int val)
1412b411b363SPhilipp Reisner {
1413b411b363SPhilipp Reisner 	unsigned long flags;
1414b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
1415b411b363SPhilipp Reisner 	int c = 0;
1416b411b363SPhilipp Reisner 
1417841ce241SAndreas Gruenbacher 	if (!expect(b))
1418841ce241SAndreas Gruenbacher 		return 1;
1419841ce241SAndreas Gruenbacher 	if (!expect(b->bm_pages))
1420841ce241SAndreas Gruenbacher 		return 0;
1421b411b363SPhilipp Reisner 
1422b411b363SPhilipp Reisner 	spin_lock_irqsave(&b->bm_lock, flags);
142320ceb2b2SLars Ellenberg 	if ((val ? BM_DONT_SET : BM_DONT_CLEAR) & b->bm_flags)
1424b411b363SPhilipp Reisner 		bm_print_lock_info(mdev);
1425b411b363SPhilipp Reisner 
1426829c6087SLars Ellenberg 	c = __bm_change_bits_to(mdev, s, e, val);
1427b411b363SPhilipp Reisner 
1428b411b363SPhilipp Reisner 	spin_unlock_irqrestore(&b->bm_lock, flags);
1429b411b363SPhilipp Reisner 	return c;
1430b411b363SPhilipp Reisner }
1431b411b363SPhilipp Reisner 
1432b411b363SPhilipp Reisner /* returns number of bits changed 0 -> 1 */
1433b411b363SPhilipp Reisner int drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e)
1434b411b363SPhilipp Reisner {
1435b411b363SPhilipp Reisner 	return bm_change_bits_to(mdev, s, e, 1);
1436b411b363SPhilipp Reisner }
1437b411b363SPhilipp Reisner 
1438b411b363SPhilipp Reisner /* returns number of bits changed 1 -> 0 */
1439b411b363SPhilipp Reisner int drbd_bm_clear_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e)
1440b411b363SPhilipp Reisner {
1441b411b363SPhilipp Reisner 	return -bm_change_bits_to(mdev, s, e, 0);
1442b411b363SPhilipp Reisner }
1443b411b363SPhilipp Reisner 
1444b411b363SPhilipp Reisner /* sets all bits in full words,
1445b411b363SPhilipp Reisner  * from first_word up to, but not including, last_word */
1446b411b363SPhilipp Reisner static inline void bm_set_full_words_within_one_page(struct drbd_bitmap *b,
1447b411b363SPhilipp Reisner 		int page_nr, int first_word, int last_word)
1448b411b363SPhilipp Reisner {
1449b411b363SPhilipp Reisner 	int i;
1450b411b363SPhilipp Reisner 	int bits;
1451*22d81140SLars Ellenberg 	int changed = 0;
1452829c6087SLars Ellenberg 	unsigned long *paddr = kmap_atomic(b->bm_pages[page_nr], KM_IRQ1);
1453b411b363SPhilipp Reisner 	for (i = first_word; i < last_word; i++) {
1454b411b363SPhilipp Reisner 		bits = hweight_long(paddr[i]);
1455b411b363SPhilipp Reisner 		paddr[i] = ~0UL;
1456*22d81140SLars Ellenberg 		changed += BITS_PER_LONG - bits;
1457b411b363SPhilipp Reisner 	}
1458829c6087SLars Ellenberg 	kunmap_atomic(paddr, KM_IRQ1);
1459*22d81140SLars Ellenberg 	if (changed) {
1460*22d81140SLars Ellenberg 		/* We only need lazy writeout, the information is still in the
1461*22d81140SLars Ellenberg 		 * remote bitmap as well, and is reconstructed during the next
1462*22d81140SLars Ellenberg 		 * bitmap exchange, if lost locally due to a crash. */
1463*22d81140SLars Ellenberg 		bm_set_page_lazy_writeout(b->bm_pages[page_nr]);
1464*22d81140SLars Ellenberg 		b->bm_set += changed;
1465*22d81140SLars Ellenberg 	}
1466b411b363SPhilipp Reisner }
1467b411b363SPhilipp Reisner 
1468829c6087SLars Ellenberg /* Same thing as drbd_bm_set_bits,
1469829c6087SLars Ellenberg  * but more efficient for a large bit range.
1470b411b363SPhilipp Reisner  * You must first drbd_bm_lock().
1471b411b363SPhilipp Reisner  * Can be called to set the whole bitmap in one go.
1472b411b363SPhilipp Reisner  * Sets bits from s to e _inclusive_. */
1473b411b363SPhilipp Reisner void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e)
1474b411b363SPhilipp Reisner {
1475b411b363SPhilipp Reisner 	/* First set_bit from the first bit (s)
1476b411b363SPhilipp Reisner 	 * up to the next long boundary (sl),
1477b411b363SPhilipp Reisner 	 * then assign full words up to the last long boundary (el),
1478b411b363SPhilipp Reisner 	 * then set_bit up to and including the last bit (e).
1479b411b363SPhilipp Reisner 	 *
1480b411b363SPhilipp Reisner 	 * Do not use memset, because we must account for changes,
1481b411b363SPhilipp Reisner 	 * so we need to loop over the words with hweight() anyways.
1482b411b363SPhilipp Reisner 	 */
1483829c6087SLars Ellenberg 	struct drbd_bitmap *b = mdev->bitmap;
1484b411b363SPhilipp Reisner 	unsigned long sl = ALIGN(s,BITS_PER_LONG);
1485b411b363SPhilipp Reisner 	unsigned long el = (e+1) & ~((unsigned long)BITS_PER_LONG-1);
1486b411b363SPhilipp Reisner 	int first_page;
1487b411b363SPhilipp Reisner 	int last_page;
1488b411b363SPhilipp Reisner 	int page_nr;
1489b411b363SPhilipp Reisner 	int first_word;
1490b411b363SPhilipp Reisner 	int last_word;
1491b411b363SPhilipp Reisner 
1492b411b363SPhilipp Reisner 	if (e - s <= 3*BITS_PER_LONG) {
1493b411b363SPhilipp Reisner 		/* don't bother; el and sl may even be wrong. */
1494829c6087SLars Ellenberg 		spin_lock_irq(&b->bm_lock);
1495829c6087SLars Ellenberg 		__bm_change_bits_to(mdev, s, e, 1);
1496829c6087SLars Ellenberg 		spin_unlock_irq(&b->bm_lock);
1497b411b363SPhilipp Reisner 		return;
1498b411b363SPhilipp Reisner 	}
1499b411b363SPhilipp Reisner 
1500b411b363SPhilipp Reisner 	/* difference is large enough that we can trust sl and el */
1501b411b363SPhilipp Reisner 
1502829c6087SLars Ellenberg 	spin_lock_irq(&b->bm_lock);
1503829c6087SLars Ellenberg 
1504b411b363SPhilipp Reisner 	/* bits filling the current long */
1505b411b363SPhilipp Reisner 	if (sl)
1506829c6087SLars Ellenberg 		__bm_change_bits_to(mdev, s, sl-1, 1);
1507b411b363SPhilipp Reisner 
1508b411b363SPhilipp Reisner 	first_page = sl >> (3 + PAGE_SHIFT);
1509b411b363SPhilipp Reisner 	last_page = el >> (3 + PAGE_SHIFT);
1510b411b363SPhilipp Reisner 
1511b411b363SPhilipp Reisner 	/* MLPP: modulo longs per page */
1512b411b363SPhilipp Reisner 	/* LWPP: long words per page */
1513b411b363SPhilipp Reisner 	first_word = MLPP(sl >> LN2_BPL);
1514b411b363SPhilipp Reisner 	last_word = LWPP;
1515b411b363SPhilipp Reisner 
1516b411b363SPhilipp Reisner 	/* first and full pages, unless first page == last page */
1517b411b363SPhilipp Reisner 	for (page_nr = first_page; page_nr < last_page; page_nr++) {
1518b411b363SPhilipp Reisner 		bm_set_full_words_within_one_page(mdev->bitmap, page_nr, first_word, last_word);
15198ccee20eSLars Ellenberg 		spin_unlock_irq(&b->bm_lock);
15208ccee20eSLars Ellenberg 		cond_resched();
1521b411b363SPhilipp Reisner 		first_word = 0;
15228ccee20eSLars Ellenberg 		spin_lock_irq(&b->bm_lock);
1523b411b363SPhilipp Reisner 	}
1524b411b363SPhilipp Reisner 
1525b411b363SPhilipp Reisner 	/* last page (respectively only page, for first page == last page) */
1526b411b363SPhilipp Reisner 	last_word = MLPP(el >> LN2_BPL);
1527b411b363SPhilipp Reisner 	bm_set_full_words_within_one_page(mdev->bitmap, last_page, first_word, last_word);
1528b411b363SPhilipp Reisner 
1529b411b363SPhilipp Reisner 	/* possibly trailing bits.
1530b411b363SPhilipp Reisner 	 * example: (e & 63) == 63, el will be e+1.
1531b411b363SPhilipp Reisner 	 * if that even was the very last bit,
1532b411b363SPhilipp Reisner 	 * it would trigger an assert in __bm_change_bits_to()
1533b411b363SPhilipp Reisner 	 */
1534b411b363SPhilipp Reisner 	if (el <= e)
1535829c6087SLars Ellenberg 		__bm_change_bits_to(mdev, el, e, 1);
1536829c6087SLars Ellenberg 	spin_unlock_irq(&b->bm_lock);
1537b411b363SPhilipp Reisner }
1538b411b363SPhilipp Reisner 
1539b411b363SPhilipp Reisner /* returns bit state
1540b411b363SPhilipp Reisner  * wants bitnr, NOT sector.
1541b411b363SPhilipp Reisner  * inherently racy... area needs to be locked by means of {al,rs}_lru
1542b411b363SPhilipp Reisner  *  1 ... bit set
1543b411b363SPhilipp Reisner  *  0 ... bit not set
1544b411b363SPhilipp Reisner  * -1 ... first out of bounds access, stop testing for bits!
1545b411b363SPhilipp Reisner  */
1546b411b363SPhilipp Reisner int drbd_bm_test_bit(struct drbd_conf *mdev, const unsigned long bitnr)
1547b411b363SPhilipp Reisner {
1548b411b363SPhilipp Reisner 	unsigned long flags;
1549b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
1550b411b363SPhilipp Reisner 	unsigned long *p_addr;
1551b411b363SPhilipp Reisner 	int i;
1552b411b363SPhilipp Reisner 
1553841ce241SAndreas Gruenbacher 	if (!expect(b))
1554841ce241SAndreas Gruenbacher 		return 0;
1555841ce241SAndreas Gruenbacher 	if (!expect(b->bm_pages))
1556841ce241SAndreas Gruenbacher 		return 0;
1557b411b363SPhilipp Reisner 
1558b411b363SPhilipp Reisner 	spin_lock_irqsave(&b->bm_lock, flags);
155920ceb2b2SLars Ellenberg 	if (BM_DONT_TEST & b->bm_flags)
1560b411b363SPhilipp Reisner 		bm_print_lock_info(mdev);
1561b411b363SPhilipp Reisner 	if (bitnr < b->bm_bits) {
156219f843aaSLars Ellenberg 		p_addr = bm_map_pidx(b, bm_bit_to_page_idx(b, bitnr));
15637e599e6eSLinus Torvalds 		i = test_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr) ? 1 : 0;
1564b411b363SPhilipp Reisner 		bm_unmap(p_addr);
1565b411b363SPhilipp Reisner 	} else if (bitnr == b->bm_bits) {
1566b411b363SPhilipp Reisner 		i = -1;
1567b411b363SPhilipp Reisner 	} else { /* (bitnr > b->bm_bits) */
1568b411b363SPhilipp Reisner 		dev_err(DEV, "bitnr=%lu > bm_bits=%lu\n", bitnr, b->bm_bits);
1569b411b363SPhilipp Reisner 		i = 0;
1570b411b363SPhilipp Reisner 	}
1571b411b363SPhilipp Reisner 
1572b411b363SPhilipp Reisner 	spin_unlock_irqrestore(&b->bm_lock, flags);
1573b411b363SPhilipp Reisner 	return i;
1574b411b363SPhilipp Reisner }
1575b411b363SPhilipp Reisner 
1576b411b363SPhilipp Reisner /* returns number of bits set in the range [s, e] */
1577b411b363SPhilipp Reisner int drbd_bm_count_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e)
1578b411b363SPhilipp Reisner {
1579b411b363SPhilipp Reisner 	unsigned long flags;
1580b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
158119f843aaSLars Ellenberg 	unsigned long *p_addr = NULL;
1582b411b363SPhilipp Reisner 	unsigned long bitnr;
158319f843aaSLars Ellenberg 	unsigned int page_nr = -1U;
1584b411b363SPhilipp Reisner 	int c = 0;
1585b411b363SPhilipp Reisner 
1586b411b363SPhilipp Reisner 	/* If this is called without a bitmap, that is a bug.  But just to be
1587b411b363SPhilipp Reisner 	 * robust in case we screwed up elsewhere, in that case pretend there
1588b411b363SPhilipp Reisner 	 * was one dirty bit in the requested area, so we won't try to do a
1589b411b363SPhilipp Reisner 	 * local read there (no bitmap probably implies no disk) */
1590841ce241SAndreas Gruenbacher 	if (!expect(b))
1591841ce241SAndreas Gruenbacher 		return 1;
1592841ce241SAndreas Gruenbacher 	if (!expect(b->bm_pages))
1593841ce241SAndreas Gruenbacher 		return 1;
1594b411b363SPhilipp Reisner 
1595b411b363SPhilipp Reisner 	spin_lock_irqsave(&b->bm_lock, flags);
159620ceb2b2SLars Ellenberg 	if (BM_DONT_TEST & b->bm_flags)
1597b411b363SPhilipp Reisner 		bm_print_lock_info(mdev);
1598b411b363SPhilipp Reisner 	for (bitnr = s; bitnr <= e; bitnr++) {
159919f843aaSLars Ellenberg 		unsigned int idx = bm_bit_to_page_idx(b, bitnr);
160019f843aaSLars Ellenberg 		if (page_nr != idx) {
160119f843aaSLars Ellenberg 			page_nr = idx;
1602b411b363SPhilipp Reisner 			if (p_addr)
1603b411b363SPhilipp Reisner 				bm_unmap(p_addr);
160419f843aaSLars Ellenberg 			p_addr = bm_map_pidx(b, idx);
1605b411b363SPhilipp Reisner 		}
1606841ce241SAndreas Gruenbacher 		if (expect(bitnr < b->bm_bits))
16077e599e6eSLinus Torvalds 			c += (0 != test_bit_le(bitnr - (page_nr << (PAGE_SHIFT+3)), p_addr));
1608841ce241SAndreas Gruenbacher 		else
1609841ce241SAndreas Gruenbacher 			dev_err(DEV, "bitnr=%lu bm_bits=%lu\n", bitnr, b->bm_bits);
1610b411b363SPhilipp Reisner 	}
1611b411b363SPhilipp Reisner 	if (p_addr)
1612b411b363SPhilipp Reisner 		bm_unmap(p_addr);
1613b411b363SPhilipp Reisner 	spin_unlock_irqrestore(&b->bm_lock, flags);
1614b411b363SPhilipp Reisner 	return c;
1615b411b363SPhilipp Reisner }
1616b411b363SPhilipp Reisner 
1617b411b363SPhilipp Reisner 
1618b411b363SPhilipp Reisner /* inherently racy...
1619b411b363SPhilipp Reisner  * return value may be already out-of-date when this function returns.
1620b411b363SPhilipp Reisner  * but the general usage is that this is only use during a cstate when bits are
1621b411b363SPhilipp Reisner  * only cleared, not set, and typically only care for the case when the return
1622b411b363SPhilipp Reisner  * value is zero, or we already "locked" this "bitmap extent" by other means.
1623b411b363SPhilipp Reisner  *
1624b411b363SPhilipp Reisner  * enr is bm-extent number, since we chose to name one sector (512 bytes)
1625b411b363SPhilipp Reisner  * worth of the bitmap a "bitmap extent".
1626b411b363SPhilipp Reisner  *
1627b411b363SPhilipp Reisner  * TODO
1628b411b363SPhilipp Reisner  * I think since we use it like a reference count, we should use the real
1629b411b363SPhilipp Reisner  * reference count of some bitmap extent element from some lru instead...
1630b411b363SPhilipp Reisner  *
1631b411b363SPhilipp Reisner  */
1632b411b363SPhilipp Reisner int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr)
1633b411b363SPhilipp Reisner {
1634b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
1635b411b363SPhilipp Reisner 	int count, s, e;
1636b411b363SPhilipp Reisner 	unsigned long flags;
1637b411b363SPhilipp Reisner 	unsigned long *p_addr, *bm;
1638b411b363SPhilipp Reisner 
1639841ce241SAndreas Gruenbacher 	if (!expect(b))
1640841ce241SAndreas Gruenbacher 		return 0;
1641841ce241SAndreas Gruenbacher 	if (!expect(b->bm_pages))
1642841ce241SAndreas Gruenbacher 		return 0;
1643b411b363SPhilipp Reisner 
1644b411b363SPhilipp Reisner 	spin_lock_irqsave(&b->bm_lock, flags);
164520ceb2b2SLars Ellenberg 	if (BM_DONT_TEST & b->bm_flags)
1646b411b363SPhilipp Reisner 		bm_print_lock_info(mdev);
1647b411b363SPhilipp Reisner 
1648b411b363SPhilipp Reisner 	s = S2W(enr);
1649b411b363SPhilipp Reisner 	e = min((size_t)S2W(enr+1), b->bm_words);
1650b411b363SPhilipp Reisner 	count = 0;
1651b411b363SPhilipp Reisner 	if (s < b->bm_words) {
1652b411b363SPhilipp Reisner 		int n = e-s;
165319f843aaSLars Ellenberg 		p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, s));
1654b411b363SPhilipp Reisner 		bm = p_addr + MLPP(s);
1655b411b363SPhilipp Reisner 		while (n--)
1656b411b363SPhilipp Reisner 			count += hweight_long(*bm++);
1657b411b363SPhilipp Reisner 		bm_unmap(p_addr);
1658b411b363SPhilipp Reisner 	} else {
1659b411b363SPhilipp Reisner 		dev_err(DEV, "start offset (%d) too large in drbd_bm_e_weight\n", s);
1660b411b363SPhilipp Reisner 	}
1661b411b363SPhilipp Reisner 	spin_unlock_irqrestore(&b->bm_lock, flags);
1662b411b363SPhilipp Reisner 	return count;
1663b411b363SPhilipp Reisner }
1664b411b363SPhilipp Reisner 
16654b0715f0SLars Ellenberg /* Set all bits covered by the AL-extent al_enr.
16664b0715f0SLars Ellenberg  * Returns number of bits changed. */
1667b411b363SPhilipp Reisner unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev, unsigned long al_enr)
1668b411b363SPhilipp Reisner {
1669b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
1670b411b363SPhilipp Reisner 	unsigned long *p_addr, *bm;
1671b411b363SPhilipp Reisner 	unsigned long weight;
16724b0715f0SLars Ellenberg 	unsigned long s, e;
16734b0715f0SLars Ellenberg 	int count, i, do_now;
1674841ce241SAndreas Gruenbacher 	if (!expect(b))
1675841ce241SAndreas Gruenbacher 		return 0;
1676841ce241SAndreas Gruenbacher 	if (!expect(b->bm_pages))
1677841ce241SAndreas Gruenbacher 		return 0;
1678b411b363SPhilipp Reisner 
1679b411b363SPhilipp Reisner 	spin_lock_irq(&b->bm_lock);
168020ceb2b2SLars Ellenberg 	if (BM_DONT_SET & b->bm_flags)
1681b411b363SPhilipp Reisner 		bm_print_lock_info(mdev);
1682b411b363SPhilipp Reisner 	weight = b->bm_set;
1683b411b363SPhilipp Reisner 
1684b411b363SPhilipp Reisner 	s = al_enr * BM_WORDS_PER_AL_EXT;
1685b411b363SPhilipp Reisner 	e = min_t(size_t, s + BM_WORDS_PER_AL_EXT, b->bm_words);
1686b411b363SPhilipp Reisner 	/* assert that s and e are on the same page */
1687b411b363SPhilipp Reisner 	D_ASSERT((e-1) >> (PAGE_SHIFT - LN2_BPL + 3)
1688b411b363SPhilipp Reisner 	      ==  s    >> (PAGE_SHIFT - LN2_BPL + 3));
1689b411b363SPhilipp Reisner 	count = 0;
1690b411b363SPhilipp Reisner 	if (s < b->bm_words) {
1691b411b363SPhilipp Reisner 		i = do_now = e-s;
169219f843aaSLars Ellenberg 		p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, s));
1693b411b363SPhilipp Reisner 		bm = p_addr + MLPP(s);
1694b411b363SPhilipp Reisner 		while (i--) {
1695b411b363SPhilipp Reisner 			count += hweight_long(*bm);
1696b411b363SPhilipp Reisner 			*bm = -1UL;
1697b411b363SPhilipp Reisner 			bm++;
1698b411b363SPhilipp Reisner 		}
1699b411b363SPhilipp Reisner 		bm_unmap(p_addr);
1700b411b363SPhilipp Reisner 		b->bm_set += do_now*BITS_PER_LONG - count;
1701b411b363SPhilipp Reisner 		if (e == b->bm_words)
1702b411b363SPhilipp Reisner 			b->bm_set -= bm_clear_surplus(b);
1703b411b363SPhilipp Reisner 	} else {
17044b0715f0SLars Ellenberg 		dev_err(DEV, "start offset (%lu) too large in drbd_bm_ALe_set_all\n", s);
1705b411b363SPhilipp Reisner 	}
1706b411b363SPhilipp Reisner 	weight = b->bm_set - weight;
1707b411b363SPhilipp Reisner 	spin_unlock_irq(&b->bm_lock);
1708b411b363SPhilipp Reisner 	return weight;
1709b411b363SPhilipp Reisner }
1710