xref: /openbmc/linux/drivers/block/drbd/drbd_bitmap.c (revision 4738fa16907a933d72bbcae1b8922dc9330fde92)
1b411b363SPhilipp Reisner /*
2b411b363SPhilipp Reisner    drbd_bitmap.c
3b411b363SPhilipp Reisner 
4b411b363SPhilipp Reisner    This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5b411b363SPhilipp Reisner 
6b411b363SPhilipp Reisner    Copyright (C) 2004-2008, LINBIT Information Technologies GmbH.
7b411b363SPhilipp Reisner    Copyright (C) 2004-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8b411b363SPhilipp Reisner    Copyright (C) 2004-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9b411b363SPhilipp Reisner 
10b411b363SPhilipp Reisner    drbd is free software; you can redistribute it and/or modify
11b411b363SPhilipp Reisner    it under the terms of the GNU General Public License as published by
12b411b363SPhilipp Reisner    the Free Software Foundation; either version 2, or (at your option)
13b411b363SPhilipp Reisner    any later version.
14b411b363SPhilipp Reisner 
15b411b363SPhilipp Reisner    drbd is distributed in the hope that it will be useful,
16b411b363SPhilipp Reisner    but WITHOUT ANY WARRANTY; without even the implied warranty of
17b411b363SPhilipp Reisner    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18b411b363SPhilipp Reisner    GNU General Public License for more details.
19b411b363SPhilipp Reisner 
20b411b363SPhilipp Reisner    You should have received a copy of the GNU General Public License
21b411b363SPhilipp Reisner    along with drbd; see the file COPYING.  If not, write to
22b411b363SPhilipp Reisner    the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23b411b363SPhilipp Reisner  */
24b411b363SPhilipp Reisner 
25b411b363SPhilipp Reisner #include <linux/bitops.h>
26b411b363SPhilipp Reisner #include <linux/vmalloc.h>
27b411b363SPhilipp Reisner #include <linux/string.h>
28b411b363SPhilipp Reisner #include <linux/drbd.h>
295a0e3ad6STejun Heo #include <linux/slab.h>
30b411b363SPhilipp Reisner #include <asm/kmap_types.h>
31f0ff1357SStephen Rothwell 
32b411b363SPhilipp Reisner #include "drbd_int.h"
33b411b363SPhilipp Reisner 
3495a0f10cSLars Ellenberg 
35b411b363SPhilipp Reisner /* OPAQUE outside this file!
36b411b363SPhilipp Reisner  * interface defined in drbd_int.h
37b411b363SPhilipp Reisner 
38b411b363SPhilipp Reisner  * convention:
39b411b363SPhilipp Reisner  * function name drbd_bm_... => used elsewhere, "public".
40b411b363SPhilipp Reisner  * function name      bm_... => internal to implementation, "private".
414b0715f0SLars Ellenberg  */
42b411b363SPhilipp Reisner 
434b0715f0SLars Ellenberg 
444b0715f0SLars Ellenberg /*
454b0715f0SLars Ellenberg  * LIMITATIONS:
464b0715f0SLars Ellenberg  * We want to support >= peta byte of backend storage, while for now still using
474b0715f0SLars Ellenberg  * a granularity of one bit per 4KiB of storage.
484b0715f0SLars Ellenberg  * 1 << 50		bytes backend storage (1 PiB)
494b0715f0SLars Ellenberg  * 1 << (50 - 12)	bits needed
504b0715f0SLars Ellenberg  *	38 --> we need u64 to index and count bits
514b0715f0SLars Ellenberg  * 1 << (38 - 3)	bitmap bytes needed
524b0715f0SLars Ellenberg  *	35 --> we still need u64 to index and count bytes
534b0715f0SLars Ellenberg  *			(that's 32 GiB of bitmap for 1 PiB storage)
544b0715f0SLars Ellenberg  * 1 << (35 - 2)	32bit longs needed
554b0715f0SLars Ellenberg  *	33 --> we'd even need u64 to index and count 32bit long words.
564b0715f0SLars Ellenberg  * 1 << (35 - 3)	64bit longs needed
574b0715f0SLars Ellenberg  *	32 --> we could get away with a 32bit unsigned int to index and count
584b0715f0SLars Ellenberg  *	64bit long words, but I rather stay with unsigned long for now.
594b0715f0SLars Ellenberg  *	We probably should neither count nor point to bytes or long words
604b0715f0SLars Ellenberg  *	directly, but either by bitnumber, or by page index and offset.
614b0715f0SLars Ellenberg  * 1 << (35 - 12)
624b0715f0SLars Ellenberg  *	22 --> we need that much 4KiB pages of bitmap.
634b0715f0SLars Ellenberg  *	1 << (22 + 3) --> on a 64bit arch,
644b0715f0SLars Ellenberg  *	we need 32 MiB to store the array of page pointers.
654b0715f0SLars Ellenberg  *
664b0715f0SLars Ellenberg  * Because I'm lazy, and because the resulting patch was too large, too ugly
674b0715f0SLars Ellenberg  * and still incomplete, on 32bit we still "only" support 16 TiB (minus some),
684b0715f0SLars Ellenberg  * (1 << 32) bits * 4k storage.
694b0715f0SLars Ellenberg  *
704b0715f0SLars Ellenberg 
714b0715f0SLars Ellenberg  * bitmap storage and IO:
724b0715f0SLars Ellenberg  *	Bitmap is stored little endian on disk, and is kept little endian in
734b0715f0SLars Ellenberg  *	core memory. Currently we still hold the full bitmap in core as long
744b0715f0SLars Ellenberg  *	as we are "attached" to a local disk, which at 32 GiB for 1PiB storage
754b0715f0SLars Ellenberg  *	seems excessive.
764b0715f0SLars Ellenberg  *
7724c4830cSBart Van Assche  *	We plan to reduce the amount of in-core bitmap pages by paging them in
784b0715f0SLars Ellenberg  *	and out against their on-disk location as necessary, but need to make
794b0715f0SLars Ellenberg  *	sure we don't cause too much meta data IO, and must not deadlock in
804b0715f0SLars Ellenberg  *	tight memory situations. This needs some more work.
81b411b363SPhilipp Reisner  */
82b411b363SPhilipp Reisner 
83b411b363SPhilipp Reisner /*
84b411b363SPhilipp Reisner  * NOTE
85b411b363SPhilipp Reisner  *  Access to the *bm_pages is protected by bm_lock.
86b411b363SPhilipp Reisner  *  It is safe to read the other members within the lock.
87b411b363SPhilipp Reisner  *
88b411b363SPhilipp Reisner  *  drbd_bm_set_bits is called from bio_endio callbacks,
89b411b363SPhilipp Reisner  *  We may be called with irq already disabled,
90b411b363SPhilipp Reisner  *  so we need spin_lock_irqsave().
91b411b363SPhilipp Reisner  *  And we need the kmap_atomic.
92b411b363SPhilipp Reisner  */
93b411b363SPhilipp Reisner struct drbd_bitmap {
94b411b363SPhilipp Reisner 	struct page **bm_pages;
95b411b363SPhilipp Reisner 	spinlock_t bm_lock;
964b0715f0SLars Ellenberg 
974b0715f0SLars Ellenberg 	/* see LIMITATIONS: above */
984b0715f0SLars Ellenberg 
99b411b363SPhilipp Reisner 	unsigned long bm_set;       /* nr of set bits; THINK maybe atomic_t? */
100b411b363SPhilipp Reisner 	unsigned long bm_bits;
101b411b363SPhilipp Reisner 	size_t   bm_words;
102b411b363SPhilipp Reisner 	size_t   bm_number_of_pages;
103b411b363SPhilipp Reisner 	sector_t bm_dev_capacity;
1048a03ae2aSThomas Gleixner 	struct mutex bm_change; /* serializes resize operations */
105b411b363SPhilipp Reisner 
10619f843aaSLars Ellenberg 	wait_queue_head_t bm_io_wait; /* used to serialize IO of single pages */
107b411b363SPhilipp Reisner 
10820ceb2b2SLars Ellenberg 	enum bm_flag bm_flags;
109b411b363SPhilipp Reisner 
110b411b363SPhilipp Reisner 	/* debugging aid, in case we are still racy somewhere */
111b411b363SPhilipp Reisner 	char          *bm_why;
112b411b363SPhilipp Reisner 	struct task_struct *bm_task;
113b411b363SPhilipp Reisner };
114b411b363SPhilipp Reisner 
115b411b363SPhilipp Reisner #define bm_print_lock_info(m) __bm_print_lock_info(m, __func__)
116b411b363SPhilipp Reisner static void __bm_print_lock_info(struct drbd_conf *mdev, const char *func)
117b411b363SPhilipp Reisner {
118b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
119b411b363SPhilipp Reisner 	if (!__ratelimit(&drbd_ratelimit_state))
120b411b363SPhilipp Reisner 		return;
121b411b363SPhilipp Reisner 	dev_err(DEV, "FIXME %s in %s, bitmap locked for '%s' by %s\n",
122392c8801SPhilipp Reisner 		drbd_task_to_thread_name(mdev->tconn, current),
123b411b363SPhilipp Reisner 		func, b->bm_why ?: "?",
124392c8801SPhilipp Reisner 		drbd_task_to_thread_name(mdev->tconn, b->bm_task));
125b411b363SPhilipp Reisner }
126b411b363SPhilipp Reisner 
12720ceb2b2SLars Ellenberg void drbd_bm_lock(struct drbd_conf *mdev, char *why, enum bm_flag flags)
128b411b363SPhilipp Reisner {
129b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
130b411b363SPhilipp Reisner 	int trylock_failed;
131b411b363SPhilipp Reisner 
132b411b363SPhilipp Reisner 	if (!b) {
133b411b363SPhilipp Reisner 		dev_err(DEV, "FIXME no bitmap in drbd_bm_lock!?\n");
134b411b363SPhilipp Reisner 		return;
135b411b363SPhilipp Reisner 	}
136b411b363SPhilipp Reisner 
1378a03ae2aSThomas Gleixner 	trylock_failed = !mutex_trylock(&b->bm_change);
138b411b363SPhilipp Reisner 
139b411b363SPhilipp Reisner 	if (trylock_failed) {
140b411b363SPhilipp Reisner 		dev_warn(DEV, "%s going to '%s' but bitmap already locked for '%s' by %s\n",
141392c8801SPhilipp Reisner 			 drbd_task_to_thread_name(mdev->tconn, current),
142b411b363SPhilipp Reisner 			 why, b->bm_why ?: "?",
143392c8801SPhilipp Reisner 			 drbd_task_to_thread_name(mdev->tconn, b->bm_task));
1448a03ae2aSThomas Gleixner 		mutex_lock(&b->bm_change);
145b411b363SPhilipp Reisner 	}
14620ceb2b2SLars Ellenberg 	if (BM_LOCKED_MASK & b->bm_flags)
147b411b363SPhilipp Reisner 		dev_err(DEV, "FIXME bitmap already locked in bm_lock\n");
14820ceb2b2SLars Ellenberg 	b->bm_flags |= flags & BM_LOCKED_MASK;
149b411b363SPhilipp Reisner 
150b411b363SPhilipp Reisner 	b->bm_why  = why;
151b411b363SPhilipp Reisner 	b->bm_task = current;
152b411b363SPhilipp Reisner }
153b411b363SPhilipp Reisner 
154b411b363SPhilipp Reisner void drbd_bm_unlock(struct drbd_conf *mdev)
155b411b363SPhilipp Reisner {
156b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
157b411b363SPhilipp Reisner 	if (!b) {
158b411b363SPhilipp Reisner 		dev_err(DEV, "FIXME no bitmap in drbd_bm_unlock!?\n");
159b411b363SPhilipp Reisner 		return;
160b411b363SPhilipp Reisner 	}
161b411b363SPhilipp Reisner 
16220ceb2b2SLars Ellenberg 	if (!(BM_LOCKED_MASK & mdev->bitmap->bm_flags))
163b411b363SPhilipp Reisner 		dev_err(DEV, "FIXME bitmap not locked in bm_unlock\n");
164b411b363SPhilipp Reisner 
16520ceb2b2SLars Ellenberg 	b->bm_flags &= ~BM_LOCKED_MASK;
166b411b363SPhilipp Reisner 	b->bm_why  = NULL;
167b411b363SPhilipp Reisner 	b->bm_task = NULL;
1688a03ae2aSThomas Gleixner 	mutex_unlock(&b->bm_change);
169b411b363SPhilipp Reisner }
170b411b363SPhilipp Reisner 
17119f843aaSLars Ellenberg /* we store some "meta" info about our pages in page->private */
17219f843aaSLars Ellenberg /* at a granularity of 4k storage per bitmap bit:
17319f843aaSLars Ellenberg  * one peta byte storage: 1<<50 byte, 1<<38 * 4k storage blocks
17419f843aaSLars Ellenberg  *  1<<38 bits,
17519f843aaSLars Ellenberg  *  1<<23 4k bitmap pages.
17619f843aaSLars Ellenberg  * Use 24 bits as page index, covers 2 peta byte storage
17719f843aaSLars Ellenberg  * at a granularity of 4k per bit.
17819f843aaSLars Ellenberg  * Used to report the failed page idx on io error from the endio handlers.
17919f843aaSLars Ellenberg  */
18019f843aaSLars Ellenberg #define BM_PAGE_IDX_MASK	((1UL<<24)-1)
18119f843aaSLars Ellenberg /* this page is currently read in, or written back */
18219f843aaSLars Ellenberg #define BM_PAGE_IO_LOCK		31
18319f843aaSLars Ellenberg /* if there has been an IO error for this page */
18419f843aaSLars Ellenberg #define BM_PAGE_IO_ERROR	30
18519f843aaSLars Ellenberg /* this is to be able to intelligently skip disk IO,
18619f843aaSLars Ellenberg  * set if bits have been set since last IO. */
18719f843aaSLars Ellenberg #define BM_PAGE_NEED_WRITEOUT	29
18819f843aaSLars Ellenberg /* to mark for lazy writeout once syncer cleared all clearable bits,
18919f843aaSLars Ellenberg  * we if bits have been cleared since last IO. */
19019f843aaSLars Ellenberg #define BM_PAGE_LAZY_WRITEOUT	28
19119f843aaSLars Ellenberg 
19224c4830cSBart Van Assche /* store_page_idx uses non-atomic assignment. It is only used directly after
19319f843aaSLars Ellenberg  * allocating the page.  All other bm_set_page_* and bm_clear_page_* need to
19419f843aaSLars Ellenberg  * use atomic bit manipulation, as set_out_of_sync (and therefore bitmap
19519f843aaSLars Ellenberg  * changes) may happen from various contexts, and wait_on_bit/wake_up_bit
19619f843aaSLars Ellenberg  * requires it all to be atomic as well. */
19719f843aaSLars Ellenberg static void bm_store_page_idx(struct page *page, unsigned long idx)
19819f843aaSLars Ellenberg {
19919f843aaSLars Ellenberg 	BUG_ON(0 != (idx & ~BM_PAGE_IDX_MASK));
20019f843aaSLars Ellenberg 	page_private(page) |= idx;
20119f843aaSLars Ellenberg }
20219f843aaSLars Ellenberg 
20319f843aaSLars Ellenberg static unsigned long bm_page_to_idx(struct page *page)
20419f843aaSLars Ellenberg {
20519f843aaSLars Ellenberg 	return page_private(page) & BM_PAGE_IDX_MASK;
20619f843aaSLars Ellenberg }
20719f843aaSLars Ellenberg 
20819f843aaSLars Ellenberg /* As is very unlikely that the same page is under IO from more than one
20919f843aaSLars Ellenberg  * context, we can get away with a bit per page and one wait queue per bitmap.
21019f843aaSLars Ellenberg  */
21119f843aaSLars Ellenberg static void bm_page_lock_io(struct drbd_conf *mdev, int page_nr)
21219f843aaSLars Ellenberg {
21319f843aaSLars Ellenberg 	struct drbd_bitmap *b = mdev->bitmap;
21419f843aaSLars Ellenberg 	void *addr = &page_private(b->bm_pages[page_nr]);
21519f843aaSLars Ellenberg 	wait_event(b->bm_io_wait, !test_and_set_bit(BM_PAGE_IO_LOCK, addr));
21619f843aaSLars Ellenberg }
21719f843aaSLars Ellenberg 
21819f843aaSLars Ellenberg static void bm_page_unlock_io(struct drbd_conf *mdev, int page_nr)
21919f843aaSLars Ellenberg {
22019f843aaSLars Ellenberg 	struct drbd_bitmap *b = mdev->bitmap;
22119f843aaSLars Ellenberg 	void *addr = &page_private(b->bm_pages[page_nr]);
222*4738fa16SLars Ellenberg 	clear_bit_unlock(BM_PAGE_IO_LOCK, addr);
22319f843aaSLars Ellenberg 	wake_up(&mdev->bitmap->bm_io_wait);
22419f843aaSLars Ellenberg }
22519f843aaSLars Ellenberg 
22619f843aaSLars Ellenberg /* set _before_ submit_io, so it may be reset due to being changed
22719f843aaSLars Ellenberg  * while this page is in flight... will get submitted later again */
22819f843aaSLars Ellenberg static void bm_set_page_unchanged(struct page *page)
22919f843aaSLars Ellenberg {
23019f843aaSLars Ellenberg 	/* use cmpxchg? */
23119f843aaSLars Ellenberg 	clear_bit(BM_PAGE_NEED_WRITEOUT, &page_private(page));
23219f843aaSLars Ellenberg 	clear_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page));
23319f843aaSLars Ellenberg }
23419f843aaSLars Ellenberg 
23519f843aaSLars Ellenberg static void bm_set_page_need_writeout(struct page *page)
23619f843aaSLars Ellenberg {
23719f843aaSLars Ellenberg 	set_bit(BM_PAGE_NEED_WRITEOUT, &page_private(page));
23819f843aaSLars Ellenberg }
23919f843aaSLars Ellenberg 
24019f843aaSLars Ellenberg static int bm_test_page_unchanged(struct page *page)
24119f843aaSLars Ellenberg {
24219f843aaSLars Ellenberg 	volatile const unsigned long *addr = &page_private(page);
24319f843aaSLars Ellenberg 	return (*addr & ((1UL<<BM_PAGE_NEED_WRITEOUT)|(1UL<<BM_PAGE_LAZY_WRITEOUT))) == 0;
24419f843aaSLars Ellenberg }
24519f843aaSLars Ellenberg 
24619f843aaSLars Ellenberg static void bm_set_page_io_err(struct page *page)
24719f843aaSLars Ellenberg {
24819f843aaSLars Ellenberg 	set_bit(BM_PAGE_IO_ERROR, &page_private(page));
24919f843aaSLars Ellenberg }
25019f843aaSLars Ellenberg 
25119f843aaSLars Ellenberg static void bm_clear_page_io_err(struct page *page)
25219f843aaSLars Ellenberg {
25319f843aaSLars Ellenberg 	clear_bit(BM_PAGE_IO_ERROR, &page_private(page));
25419f843aaSLars Ellenberg }
25519f843aaSLars Ellenberg 
25619f843aaSLars Ellenberg static void bm_set_page_lazy_writeout(struct page *page)
25719f843aaSLars Ellenberg {
25819f843aaSLars Ellenberg 	set_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page));
25919f843aaSLars Ellenberg }
26019f843aaSLars Ellenberg 
26119f843aaSLars Ellenberg static int bm_test_page_lazy_writeout(struct page *page)
26219f843aaSLars Ellenberg {
26319f843aaSLars Ellenberg 	return test_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page));
26419f843aaSLars Ellenberg }
26519f843aaSLars Ellenberg 
26619f843aaSLars Ellenberg /* on a 32bit box, this would allow for exactly (2<<38) bits. */
26719f843aaSLars Ellenberg static unsigned int bm_word_to_page_idx(struct drbd_bitmap *b, unsigned long long_nr)
26819f843aaSLars Ellenberg {
26919f843aaSLars Ellenberg 	/* page_nr = (word*sizeof(long)) >> PAGE_SHIFT; */
27019f843aaSLars Ellenberg 	unsigned int page_nr = long_nr >> (PAGE_SHIFT - LN2_BPL + 3);
27119f843aaSLars Ellenberg 	BUG_ON(page_nr >= b->bm_number_of_pages);
27219f843aaSLars Ellenberg 	return page_nr;
27319f843aaSLars Ellenberg }
27419f843aaSLars Ellenberg 
27595a0f10cSLars Ellenberg static unsigned int bm_bit_to_page_idx(struct drbd_bitmap *b, u64 bitnr)
27695a0f10cSLars Ellenberg {
27795a0f10cSLars Ellenberg 	/* page_nr = (bitnr/8) >> PAGE_SHIFT; */
27895a0f10cSLars Ellenberg 	unsigned int page_nr = bitnr >> (PAGE_SHIFT + 3);
27995a0f10cSLars Ellenberg 	BUG_ON(page_nr >= b->bm_number_of_pages);
28095a0f10cSLars Ellenberg 	return page_nr;
28195a0f10cSLars Ellenberg }
28295a0f10cSLars Ellenberg 
28395a0f10cSLars Ellenberg static unsigned long *__bm_map_pidx(struct drbd_bitmap *b, unsigned int idx, const enum km_type km)
28495a0f10cSLars Ellenberg {
28595a0f10cSLars Ellenberg 	struct page *page = b->bm_pages[idx];
28695a0f10cSLars Ellenberg 	return (unsigned long *) kmap_atomic(page, km);
28795a0f10cSLars Ellenberg }
28895a0f10cSLars Ellenberg 
28995a0f10cSLars Ellenberg static unsigned long *bm_map_pidx(struct drbd_bitmap *b, unsigned int idx)
29095a0f10cSLars Ellenberg {
29195a0f10cSLars Ellenberg 	return __bm_map_pidx(b, idx, KM_IRQ1);
29295a0f10cSLars Ellenberg }
29395a0f10cSLars Ellenberg 
294b411b363SPhilipp Reisner static void __bm_unmap(unsigned long *p_addr, const enum km_type km)
295b411b363SPhilipp Reisner {
296b411b363SPhilipp Reisner 	kunmap_atomic(p_addr, km);
297b411b363SPhilipp Reisner };
298b411b363SPhilipp Reisner 
299b411b363SPhilipp Reisner static void bm_unmap(unsigned long *p_addr)
300b411b363SPhilipp Reisner {
301b411b363SPhilipp Reisner 	return __bm_unmap(p_addr, KM_IRQ1);
302b411b363SPhilipp Reisner }
303b411b363SPhilipp Reisner 
304b411b363SPhilipp Reisner /* long word offset of _bitmap_ sector */
305b411b363SPhilipp Reisner #define S2W(s)	((s)<<(BM_EXT_SHIFT-BM_BLOCK_SHIFT-LN2_BPL))
306b411b363SPhilipp Reisner /* word offset from start of bitmap to word number _in_page_
307b411b363SPhilipp Reisner  * modulo longs per page
308b411b363SPhilipp Reisner #define MLPP(X) ((X) % (PAGE_SIZE/sizeof(long))
30924c4830cSBart Van Assche  hm, well, Philipp thinks gcc might not optimize the % into & (... - 1)
310b411b363SPhilipp Reisner  so do it explicitly:
311b411b363SPhilipp Reisner  */
312b411b363SPhilipp Reisner #define MLPP(X) ((X) & ((PAGE_SIZE/sizeof(long))-1))
313b411b363SPhilipp Reisner 
314b411b363SPhilipp Reisner /* Long words per page */
315b411b363SPhilipp Reisner #define LWPP (PAGE_SIZE/sizeof(long))
316b411b363SPhilipp Reisner 
317b411b363SPhilipp Reisner /*
318b411b363SPhilipp Reisner  * actually most functions herein should take a struct drbd_bitmap*, not a
319b411b363SPhilipp Reisner  * struct drbd_conf*, but for the debug macros I like to have the mdev around
320b411b363SPhilipp Reisner  * to be able to report device specific.
321b411b363SPhilipp Reisner  */
322b411b363SPhilipp Reisner 
32319f843aaSLars Ellenberg 
324b411b363SPhilipp Reisner static void bm_free_pages(struct page **pages, unsigned long number)
325b411b363SPhilipp Reisner {
326b411b363SPhilipp Reisner 	unsigned long i;
327b411b363SPhilipp Reisner 	if (!pages)
328b411b363SPhilipp Reisner 		return;
329b411b363SPhilipp Reisner 
330b411b363SPhilipp Reisner 	for (i = 0; i < number; i++) {
331b411b363SPhilipp Reisner 		if (!pages[i]) {
332b411b363SPhilipp Reisner 			printk(KERN_ALERT "drbd: bm_free_pages tried to free "
333b411b363SPhilipp Reisner 					  "a NULL pointer; i=%lu n=%lu\n",
334b411b363SPhilipp Reisner 					  i, number);
335b411b363SPhilipp Reisner 			continue;
336b411b363SPhilipp Reisner 		}
337b411b363SPhilipp Reisner 		__free_page(pages[i]);
338b411b363SPhilipp Reisner 		pages[i] = NULL;
339b411b363SPhilipp Reisner 	}
340b411b363SPhilipp Reisner }
341b411b363SPhilipp Reisner 
342b411b363SPhilipp Reisner static void bm_vk_free(void *ptr, int v)
343b411b363SPhilipp Reisner {
344b411b363SPhilipp Reisner 	if (v)
345b411b363SPhilipp Reisner 		vfree(ptr);
346b411b363SPhilipp Reisner 	else
347b411b363SPhilipp Reisner 		kfree(ptr);
348b411b363SPhilipp Reisner }
349b411b363SPhilipp Reisner 
350b411b363SPhilipp Reisner /*
351b411b363SPhilipp Reisner  * "have" and "want" are NUMBER OF PAGES.
352b411b363SPhilipp Reisner  */
353b411b363SPhilipp Reisner static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
354b411b363SPhilipp Reisner {
355b411b363SPhilipp Reisner 	struct page **old_pages = b->bm_pages;
356b411b363SPhilipp Reisner 	struct page **new_pages, *page;
357b411b363SPhilipp Reisner 	unsigned int i, bytes, vmalloced = 0;
358b411b363SPhilipp Reisner 	unsigned long have = b->bm_number_of_pages;
359b411b363SPhilipp Reisner 
360b411b363SPhilipp Reisner 	BUG_ON(have == 0 && old_pages != NULL);
361b411b363SPhilipp Reisner 	BUG_ON(have != 0 && old_pages == NULL);
362b411b363SPhilipp Reisner 
363b411b363SPhilipp Reisner 	if (have == want)
364b411b363SPhilipp Reisner 		return old_pages;
365b411b363SPhilipp Reisner 
366b411b363SPhilipp Reisner 	/* Trying kmalloc first, falling back to vmalloc.
367b411b363SPhilipp Reisner 	 * GFP_KERNEL is ok, as this is done when a lower level disk is
368b411b363SPhilipp Reisner 	 * "attached" to the drbd.  Context is receiver thread or cqueue
369b411b363SPhilipp Reisner 	 * thread.  As we have no disk yet, we are not in the IO path,
370b411b363SPhilipp Reisner 	 * not even the IO path of the peer. */
371b411b363SPhilipp Reisner 	bytes = sizeof(struct page *)*want;
372b411b363SPhilipp Reisner 	new_pages = kmalloc(bytes, GFP_KERNEL);
373b411b363SPhilipp Reisner 	if (!new_pages) {
374b411b363SPhilipp Reisner 		new_pages = vmalloc(bytes);
375b411b363SPhilipp Reisner 		if (!new_pages)
376b411b363SPhilipp Reisner 			return NULL;
377b411b363SPhilipp Reisner 		vmalloced = 1;
378b411b363SPhilipp Reisner 	}
379b411b363SPhilipp Reisner 
380b411b363SPhilipp Reisner 	memset(new_pages, 0, bytes);
381b411b363SPhilipp Reisner 	if (want >= have) {
382b411b363SPhilipp Reisner 		for (i = 0; i < have; i++)
383b411b363SPhilipp Reisner 			new_pages[i] = old_pages[i];
384b411b363SPhilipp Reisner 		for (; i < want; i++) {
385b411b363SPhilipp Reisner 			page = alloc_page(GFP_HIGHUSER);
386b411b363SPhilipp Reisner 			if (!page) {
387b411b363SPhilipp Reisner 				bm_free_pages(new_pages + have, i - have);
388b411b363SPhilipp Reisner 				bm_vk_free(new_pages, vmalloced);
389b411b363SPhilipp Reisner 				return NULL;
390b411b363SPhilipp Reisner 			}
39119f843aaSLars Ellenberg 			/* we want to know which page it is
39219f843aaSLars Ellenberg 			 * from the endio handlers */
39319f843aaSLars Ellenberg 			bm_store_page_idx(page, i);
394b411b363SPhilipp Reisner 			new_pages[i] = page;
395b411b363SPhilipp Reisner 		}
396b411b363SPhilipp Reisner 	} else {
397b411b363SPhilipp Reisner 		for (i = 0; i < want; i++)
398b411b363SPhilipp Reisner 			new_pages[i] = old_pages[i];
399b411b363SPhilipp Reisner 		/* NOT HERE, we are outside the spinlock!
400b411b363SPhilipp Reisner 		bm_free_pages(old_pages + want, have - want);
401b411b363SPhilipp Reisner 		*/
402b411b363SPhilipp Reisner 	}
403b411b363SPhilipp Reisner 
404b411b363SPhilipp Reisner 	if (vmalloced)
40520ceb2b2SLars Ellenberg 		b->bm_flags |= BM_P_VMALLOCED;
406b411b363SPhilipp Reisner 	else
40720ceb2b2SLars Ellenberg 		b->bm_flags &= ~BM_P_VMALLOCED;
408b411b363SPhilipp Reisner 
409b411b363SPhilipp Reisner 	return new_pages;
410b411b363SPhilipp Reisner }
411b411b363SPhilipp Reisner 
412b411b363SPhilipp Reisner /*
413b411b363SPhilipp Reisner  * called on driver init only. TODO call when a device is created.
414b411b363SPhilipp Reisner  * allocates the drbd_bitmap, and stores it in mdev->bitmap.
415b411b363SPhilipp Reisner  */
416b411b363SPhilipp Reisner int drbd_bm_init(struct drbd_conf *mdev)
417b411b363SPhilipp Reisner {
418b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
419b411b363SPhilipp Reisner 	WARN_ON(b != NULL);
420b411b363SPhilipp Reisner 	b = kzalloc(sizeof(struct drbd_bitmap), GFP_KERNEL);
421b411b363SPhilipp Reisner 	if (!b)
422b411b363SPhilipp Reisner 		return -ENOMEM;
423b411b363SPhilipp Reisner 	spin_lock_init(&b->bm_lock);
4248a03ae2aSThomas Gleixner 	mutex_init(&b->bm_change);
425b411b363SPhilipp Reisner 	init_waitqueue_head(&b->bm_io_wait);
426b411b363SPhilipp Reisner 
427b411b363SPhilipp Reisner 	mdev->bitmap = b;
428b411b363SPhilipp Reisner 
429b411b363SPhilipp Reisner 	return 0;
430b411b363SPhilipp Reisner }
431b411b363SPhilipp Reisner 
432b411b363SPhilipp Reisner sector_t drbd_bm_capacity(struct drbd_conf *mdev)
433b411b363SPhilipp Reisner {
434841ce241SAndreas Gruenbacher 	if (!expect(mdev->bitmap))
435841ce241SAndreas Gruenbacher 		return 0;
436b411b363SPhilipp Reisner 	return mdev->bitmap->bm_dev_capacity;
437b411b363SPhilipp Reisner }
438b411b363SPhilipp Reisner 
439b411b363SPhilipp Reisner /* called on driver unload. TODO: call when a device is destroyed.
440b411b363SPhilipp Reisner  */
441b411b363SPhilipp Reisner void drbd_bm_cleanup(struct drbd_conf *mdev)
442b411b363SPhilipp Reisner {
443841ce241SAndreas Gruenbacher 	if (!expect(mdev->bitmap))
444841ce241SAndreas Gruenbacher 		return;
445b411b363SPhilipp Reisner 	bm_free_pages(mdev->bitmap->bm_pages, mdev->bitmap->bm_number_of_pages);
44620ceb2b2SLars Ellenberg 	bm_vk_free(mdev->bitmap->bm_pages, (BM_P_VMALLOCED & mdev->bitmap->bm_flags));
447b411b363SPhilipp Reisner 	kfree(mdev->bitmap);
448b411b363SPhilipp Reisner 	mdev->bitmap = NULL;
449b411b363SPhilipp Reisner }
450b411b363SPhilipp Reisner 
451b411b363SPhilipp Reisner /*
452b411b363SPhilipp Reisner  * since (b->bm_bits % BITS_PER_LONG) != 0,
453b411b363SPhilipp Reisner  * this masks out the remaining bits.
454b411b363SPhilipp Reisner  * Returns the number of bits cleared.
455b411b363SPhilipp Reisner  */
45695a0f10cSLars Ellenberg #define BITS_PER_PAGE		(1UL << (PAGE_SHIFT + 3))
45795a0f10cSLars Ellenberg #define BITS_PER_PAGE_MASK	(BITS_PER_PAGE - 1)
45895a0f10cSLars Ellenberg #define BITS_PER_LONG_MASK	(BITS_PER_LONG - 1)
459b411b363SPhilipp Reisner static int bm_clear_surplus(struct drbd_bitmap *b)
460b411b363SPhilipp Reisner {
46195a0f10cSLars Ellenberg 	unsigned long mask;
462b411b363SPhilipp Reisner 	unsigned long *p_addr, *bm;
46395a0f10cSLars Ellenberg 	int tmp;
46495a0f10cSLars Ellenberg 	int cleared = 0;
465b411b363SPhilipp Reisner 
46695a0f10cSLars Ellenberg 	/* number of bits modulo bits per page */
46795a0f10cSLars Ellenberg 	tmp = (b->bm_bits & BITS_PER_PAGE_MASK);
46895a0f10cSLars Ellenberg 	/* mask the used bits of the word containing the last bit */
46995a0f10cSLars Ellenberg 	mask = (1UL << (tmp & BITS_PER_LONG_MASK)) -1;
47095a0f10cSLars Ellenberg 	/* bitmap is always stored little endian,
47195a0f10cSLars Ellenberg 	 * on disk and in core memory alike */
47295a0f10cSLars Ellenberg 	mask = cpu_to_lel(mask);
47395a0f10cSLars Ellenberg 
4746850c442SLars Ellenberg 	p_addr = bm_map_pidx(b, b->bm_number_of_pages - 1);
47595a0f10cSLars Ellenberg 	bm = p_addr + (tmp/BITS_PER_LONG);
47695a0f10cSLars Ellenberg 	if (mask) {
47795a0f10cSLars Ellenberg 		/* If mask != 0, we are not exactly aligned, so bm now points
47895a0f10cSLars Ellenberg 		 * to the long containing the last bit.
47995a0f10cSLars Ellenberg 		 * If mask == 0, bm already points to the word immediately
48095a0f10cSLars Ellenberg 		 * after the last (long word aligned) bit. */
481b411b363SPhilipp Reisner 		cleared = hweight_long(*bm & ~mask);
482b411b363SPhilipp Reisner 		*bm &= mask;
48395a0f10cSLars Ellenberg 		bm++;
484b411b363SPhilipp Reisner 	}
485b411b363SPhilipp Reisner 
48695a0f10cSLars Ellenberg 	if (BITS_PER_LONG == 32 && ((bm - p_addr) & 1) == 1) {
48795a0f10cSLars Ellenberg 		/* on a 32bit arch, we may need to zero out
48895a0f10cSLars Ellenberg 		 * a padding long to align with a 64bit remote */
489b411b363SPhilipp Reisner 		cleared += hweight_long(*bm);
490b411b363SPhilipp Reisner 		*bm = 0;
491b411b363SPhilipp Reisner 	}
492b411b363SPhilipp Reisner 	bm_unmap(p_addr);
493b411b363SPhilipp Reisner 	return cleared;
494b411b363SPhilipp Reisner }
495b411b363SPhilipp Reisner 
496b411b363SPhilipp Reisner static void bm_set_surplus(struct drbd_bitmap *b)
497b411b363SPhilipp Reisner {
49895a0f10cSLars Ellenberg 	unsigned long mask;
499b411b363SPhilipp Reisner 	unsigned long *p_addr, *bm;
50095a0f10cSLars Ellenberg 	int tmp;
501b411b363SPhilipp Reisner 
50295a0f10cSLars Ellenberg 	/* number of bits modulo bits per page */
50395a0f10cSLars Ellenberg 	tmp = (b->bm_bits & BITS_PER_PAGE_MASK);
50495a0f10cSLars Ellenberg 	/* mask the used bits of the word containing the last bit */
50595a0f10cSLars Ellenberg 	mask = (1UL << (tmp & BITS_PER_LONG_MASK)) -1;
50695a0f10cSLars Ellenberg 	/* bitmap is always stored little endian,
50795a0f10cSLars Ellenberg 	 * on disk and in core memory alike */
50895a0f10cSLars Ellenberg 	mask = cpu_to_lel(mask);
50995a0f10cSLars Ellenberg 
5106850c442SLars Ellenberg 	p_addr = bm_map_pidx(b, b->bm_number_of_pages - 1);
51195a0f10cSLars Ellenberg 	bm = p_addr + (tmp/BITS_PER_LONG);
51295a0f10cSLars Ellenberg 	if (mask) {
51395a0f10cSLars Ellenberg 		/* If mask != 0, we are not exactly aligned, so bm now points
51495a0f10cSLars Ellenberg 		 * to the long containing the last bit.
51595a0f10cSLars Ellenberg 		 * If mask == 0, bm already points to the word immediately
51695a0f10cSLars Ellenberg 		 * after the last (long word aligned) bit. */
517b411b363SPhilipp Reisner 		*bm |= ~mask;
51895a0f10cSLars Ellenberg 		bm++;
519b411b363SPhilipp Reisner 	}
520b411b363SPhilipp Reisner 
52195a0f10cSLars Ellenberg 	if (BITS_PER_LONG == 32 && ((bm - p_addr) & 1) == 1) {
52295a0f10cSLars Ellenberg 		/* on a 32bit arch, we may need to zero out
52395a0f10cSLars Ellenberg 		 * a padding long to align with a 64bit remote */
52495a0f10cSLars Ellenberg 		*bm = ~0UL;
525b411b363SPhilipp Reisner 	}
526b411b363SPhilipp Reisner 	bm_unmap(p_addr);
527b411b363SPhilipp Reisner }
528b411b363SPhilipp Reisner 
5294b0715f0SLars Ellenberg /* you better not modify the bitmap while this is running,
5304b0715f0SLars Ellenberg  * or its results will be stale */
53195a0f10cSLars Ellenberg static unsigned long bm_count_bits(struct drbd_bitmap *b)
532b411b363SPhilipp Reisner {
5334b0715f0SLars Ellenberg 	unsigned long *p_addr;
534b411b363SPhilipp Reisner 	unsigned long bits = 0;
5354b0715f0SLars Ellenberg 	unsigned long mask = (1UL << (b->bm_bits & BITS_PER_LONG_MASK)) -1;
5366850c442SLars Ellenberg 	int idx, i, last_word;
5377777a8baSLars Ellenberg 
5384b0715f0SLars Ellenberg 	/* all but last page */
5396850c442SLars Ellenberg 	for (idx = 0; idx < b->bm_number_of_pages - 1; idx++) {
5404b0715f0SLars Ellenberg 		p_addr = __bm_map_pidx(b, idx, KM_USER0);
5414b0715f0SLars Ellenberg 		for (i = 0; i < LWPP; i++)
5424b0715f0SLars Ellenberg 			bits += hweight_long(p_addr[i]);
5437777a8baSLars Ellenberg 		__bm_unmap(p_addr, KM_USER0);
544b411b363SPhilipp Reisner 		cond_resched();
545b411b363SPhilipp Reisner 	}
5464b0715f0SLars Ellenberg 	/* last (or only) page */
5474b0715f0SLars Ellenberg 	last_word = ((b->bm_bits - 1) & BITS_PER_PAGE_MASK) >> LN2_BPL;
5484b0715f0SLars Ellenberg 	p_addr = __bm_map_pidx(b, idx, KM_USER0);
5494b0715f0SLars Ellenberg 	for (i = 0; i < last_word; i++)
5504b0715f0SLars Ellenberg 		bits += hweight_long(p_addr[i]);
5514b0715f0SLars Ellenberg 	p_addr[last_word] &= cpu_to_lel(mask);
5524b0715f0SLars Ellenberg 	bits += hweight_long(p_addr[last_word]);
5534b0715f0SLars Ellenberg 	/* 32bit arch, may have an unused padding long */
5544b0715f0SLars Ellenberg 	if (BITS_PER_LONG == 32 && (last_word & 1) == 0)
5554b0715f0SLars Ellenberg 		p_addr[last_word+1] = 0;
5564b0715f0SLars Ellenberg 	__bm_unmap(p_addr, KM_USER0);
557b411b363SPhilipp Reisner 	return bits;
558b411b363SPhilipp Reisner }
559b411b363SPhilipp Reisner 
560b411b363SPhilipp Reisner /* offset and len in long words.*/
561b411b363SPhilipp Reisner static void bm_memset(struct drbd_bitmap *b, size_t offset, int c, size_t len)
562b411b363SPhilipp Reisner {
563b411b363SPhilipp Reisner 	unsigned long *p_addr, *bm;
56419f843aaSLars Ellenberg 	unsigned int idx;
565b411b363SPhilipp Reisner 	size_t do_now, end;
566b411b363SPhilipp Reisner 
567b411b363SPhilipp Reisner 	end = offset + len;
568b411b363SPhilipp Reisner 
569b411b363SPhilipp Reisner 	if (end > b->bm_words) {
570b411b363SPhilipp Reisner 		printk(KERN_ALERT "drbd: bm_memset end > bm_words\n");
571b411b363SPhilipp Reisner 		return;
572b411b363SPhilipp Reisner 	}
573b411b363SPhilipp Reisner 
574b411b363SPhilipp Reisner 	while (offset < end) {
575b411b363SPhilipp Reisner 		do_now = min_t(size_t, ALIGN(offset + 1, LWPP), end) - offset;
57619f843aaSLars Ellenberg 		idx = bm_word_to_page_idx(b, offset);
57719f843aaSLars Ellenberg 		p_addr = bm_map_pidx(b, idx);
578b411b363SPhilipp Reisner 		bm = p_addr + MLPP(offset);
579b411b363SPhilipp Reisner 		if (bm+do_now > p_addr + LWPP) {
580b411b363SPhilipp Reisner 			printk(KERN_ALERT "drbd: BUG BUG BUG! p_addr:%p bm:%p do_now:%d\n",
581b411b363SPhilipp Reisner 			       p_addr, bm, (int)do_now);
58284e7c0f7SLars Ellenberg 		} else
583b411b363SPhilipp Reisner 			memset(bm, c, do_now * sizeof(long));
584b411b363SPhilipp Reisner 		bm_unmap(p_addr);
58519f843aaSLars Ellenberg 		bm_set_page_need_writeout(b->bm_pages[idx]);
586b411b363SPhilipp Reisner 		offset += do_now;
587b411b363SPhilipp Reisner 	}
588b411b363SPhilipp Reisner }
589b411b363SPhilipp Reisner 
590b411b363SPhilipp Reisner /*
591b411b363SPhilipp Reisner  * make sure the bitmap has enough room for the attached storage,
592b411b363SPhilipp Reisner  * if necessary, resize.
593b411b363SPhilipp Reisner  * called whenever we may have changed the device size.
594b411b363SPhilipp Reisner  * returns -ENOMEM if we could not allocate enough memory, 0 on success.
595b411b363SPhilipp Reisner  * In case this is actually a resize, we copy the old bitmap into the new one.
596b411b363SPhilipp Reisner  * Otherwise, the bitmap is initialized to all bits set.
597b411b363SPhilipp Reisner  */
59802d9a94bSPhilipp Reisner int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits)
599b411b363SPhilipp Reisner {
600b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
6016850c442SLars Ellenberg 	unsigned long bits, words, owords, obits;
602b411b363SPhilipp Reisner 	unsigned long want, have, onpages; /* number of pages */
603b411b363SPhilipp Reisner 	struct page **npages, **opages = NULL;
604b411b363SPhilipp Reisner 	int err = 0, growing;
605b411b363SPhilipp Reisner 	int opages_vmalloced;
606b411b363SPhilipp Reisner 
607841ce241SAndreas Gruenbacher 	if (!expect(b))
608841ce241SAndreas Gruenbacher 		return -ENOMEM;
609b411b363SPhilipp Reisner 
61020ceb2b2SLars Ellenberg 	drbd_bm_lock(mdev, "resize", BM_LOCKED_MASK);
611b411b363SPhilipp Reisner 
612b411b363SPhilipp Reisner 	dev_info(DEV, "drbd_bm_resize called with capacity == %llu\n",
613b411b363SPhilipp Reisner 			(unsigned long long)capacity);
614b411b363SPhilipp Reisner 
615b411b363SPhilipp Reisner 	if (capacity == b->bm_dev_capacity)
616b411b363SPhilipp Reisner 		goto out;
617b411b363SPhilipp Reisner 
61820ceb2b2SLars Ellenberg 	opages_vmalloced = (BM_P_VMALLOCED & b->bm_flags);
619b411b363SPhilipp Reisner 
620b411b363SPhilipp Reisner 	if (capacity == 0) {
621b411b363SPhilipp Reisner 		spin_lock_irq(&b->bm_lock);
622b411b363SPhilipp Reisner 		opages = b->bm_pages;
623b411b363SPhilipp Reisner 		onpages = b->bm_number_of_pages;
624b411b363SPhilipp Reisner 		owords = b->bm_words;
625b411b363SPhilipp Reisner 		b->bm_pages = NULL;
626b411b363SPhilipp Reisner 		b->bm_number_of_pages =
627b411b363SPhilipp Reisner 		b->bm_set   =
628b411b363SPhilipp Reisner 		b->bm_bits  =
629b411b363SPhilipp Reisner 		b->bm_words =
630b411b363SPhilipp Reisner 		b->bm_dev_capacity = 0;
631b411b363SPhilipp Reisner 		spin_unlock_irq(&b->bm_lock);
632b411b363SPhilipp Reisner 		bm_free_pages(opages, onpages);
633b411b363SPhilipp Reisner 		bm_vk_free(opages, opages_vmalloced);
634b411b363SPhilipp Reisner 		goto out;
635b411b363SPhilipp Reisner 	}
636b411b363SPhilipp Reisner 	bits  = BM_SECT_TO_BIT(ALIGN(capacity, BM_SECT_PER_BIT));
637b411b363SPhilipp Reisner 
638b411b363SPhilipp Reisner 	/* if we would use
639b411b363SPhilipp Reisner 	   words = ALIGN(bits,BITS_PER_LONG) >> LN2_BPL;
640b411b363SPhilipp Reisner 	   a 32bit host could present the wrong number of words
641b411b363SPhilipp Reisner 	   to a 64bit host.
642b411b363SPhilipp Reisner 	*/
643b411b363SPhilipp Reisner 	words = ALIGN(bits, 64) >> LN2_BPL;
644b411b363SPhilipp Reisner 
645b411b363SPhilipp Reisner 	if (get_ldev(mdev)) {
6464b0715f0SLars Ellenberg 		u64 bits_on_disk = ((u64)mdev->ldev->md.md_size_sect-MD_BM_OFFSET) << 12;
647b411b363SPhilipp Reisner 		put_ldev(mdev);
6484b0715f0SLars Ellenberg 		if (bits > bits_on_disk) {
6494b0715f0SLars Ellenberg 			dev_info(DEV, "bits = %lu\n", bits);
6504b0715f0SLars Ellenberg 			dev_info(DEV, "bits_on_disk = %llu\n", bits_on_disk);
6514b0715f0SLars Ellenberg 			err = -ENOSPC;
6524b0715f0SLars Ellenberg 			goto out;
6534b0715f0SLars Ellenberg 		}
654b411b363SPhilipp Reisner 	}
655b411b363SPhilipp Reisner 
6566850c442SLars Ellenberg 	want = ALIGN(words*sizeof(long), PAGE_SIZE) >> PAGE_SHIFT;
657b411b363SPhilipp Reisner 	have = b->bm_number_of_pages;
658b411b363SPhilipp Reisner 	if (want == have) {
659b411b363SPhilipp Reisner 		D_ASSERT(b->bm_pages != NULL);
660b411b363SPhilipp Reisner 		npages = b->bm_pages;
661b411b363SPhilipp Reisner 	} else {
6620cf9d27eSAndreas Gruenbacher 		if (drbd_insert_fault(mdev, DRBD_FAULT_BM_ALLOC))
663b411b363SPhilipp Reisner 			npages = NULL;
664b411b363SPhilipp Reisner 		else
665b411b363SPhilipp Reisner 			npages = bm_realloc_pages(b, want);
666b411b363SPhilipp Reisner 	}
667b411b363SPhilipp Reisner 
668b411b363SPhilipp Reisner 	if (!npages) {
669b411b363SPhilipp Reisner 		err = -ENOMEM;
670b411b363SPhilipp Reisner 		goto out;
671b411b363SPhilipp Reisner 	}
672b411b363SPhilipp Reisner 
673b411b363SPhilipp Reisner 	spin_lock_irq(&b->bm_lock);
674b411b363SPhilipp Reisner 	opages = b->bm_pages;
675b411b363SPhilipp Reisner 	owords = b->bm_words;
676b411b363SPhilipp Reisner 	obits  = b->bm_bits;
677b411b363SPhilipp Reisner 
678b411b363SPhilipp Reisner 	growing = bits > obits;
6795223671bSPhilipp Reisner 	if (opages && growing && set_new_bits)
680b411b363SPhilipp Reisner 		bm_set_surplus(b);
681b411b363SPhilipp Reisner 
682b411b363SPhilipp Reisner 	b->bm_pages = npages;
683b411b363SPhilipp Reisner 	b->bm_number_of_pages = want;
684b411b363SPhilipp Reisner 	b->bm_bits  = bits;
685b411b363SPhilipp Reisner 	b->bm_words = words;
686b411b363SPhilipp Reisner 	b->bm_dev_capacity = capacity;
687b411b363SPhilipp Reisner 
688b411b363SPhilipp Reisner 	if (growing) {
68902d9a94bSPhilipp Reisner 		if (set_new_bits) {
690b411b363SPhilipp Reisner 			bm_memset(b, owords, 0xff, words-owords);
691b411b363SPhilipp Reisner 			b->bm_set += bits - obits;
69202d9a94bSPhilipp Reisner 		} else
69302d9a94bSPhilipp Reisner 			bm_memset(b, owords, 0x00, words-owords);
69402d9a94bSPhilipp Reisner 
695b411b363SPhilipp Reisner 	}
696b411b363SPhilipp Reisner 
697b411b363SPhilipp Reisner 	if (want < have) {
698b411b363SPhilipp Reisner 		/* implicit: (opages != NULL) && (opages != npages) */
699b411b363SPhilipp Reisner 		bm_free_pages(opages + want, have - want);
700b411b363SPhilipp Reisner 	}
701b411b363SPhilipp Reisner 
702b411b363SPhilipp Reisner 	(void)bm_clear_surplus(b);
703b411b363SPhilipp Reisner 
704b411b363SPhilipp Reisner 	spin_unlock_irq(&b->bm_lock);
705b411b363SPhilipp Reisner 	if (opages != npages)
706b411b363SPhilipp Reisner 		bm_vk_free(opages, opages_vmalloced);
707b411b363SPhilipp Reisner 	if (!growing)
708b411b363SPhilipp Reisner 		b->bm_set = bm_count_bits(b);
70919f843aaSLars Ellenberg 	dev_info(DEV, "resync bitmap: bits=%lu words=%lu pages=%lu\n", bits, words, want);
710b411b363SPhilipp Reisner 
711b411b363SPhilipp Reisner  out:
712b411b363SPhilipp Reisner 	drbd_bm_unlock(mdev);
713b411b363SPhilipp Reisner 	return err;
714b411b363SPhilipp Reisner }
715b411b363SPhilipp Reisner 
716b411b363SPhilipp Reisner /* inherently racy:
717b411b363SPhilipp Reisner  * if not protected by other means, return value may be out of date when
718b411b363SPhilipp Reisner  * leaving this function...
719b411b363SPhilipp Reisner  * we still need to lock it, since it is important that this returns
720b411b363SPhilipp Reisner  * bm_set == 0 precisely.
721b411b363SPhilipp Reisner  *
722b411b363SPhilipp Reisner  * maybe bm_set should be atomic_t ?
723b411b363SPhilipp Reisner  */
7240778286aSPhilipp Reisner unsigned long _drbd_bm_total_weight(struct drbd_conf *mdev)
725b411b363SPhilipp Reisner {
726b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
727b411b363SPhilipp Reisner 	unsigned long s;
728b411b363SPhilipp Reisner 	unsigned long flags;
729b411b363SPhilipp Reisner 
730841ce241SAndreas Gruenbacher 	if (!expect(b))
731841ce241SAndreas Gruenbacher 		return 0;
732841ce241SAndreas Gruenbacher 	if (!expect(b->bm_pages))
733841ce241SAndreas Gruenbacher 		return 0;
734b411b363SPhilipp Reisner 
735b411b363SPhilipp Reisner 	spin_lock_irqsave(&b->bm_lock, flags);
736b411b363SPhilipp Reisner 	s = b->bm_set;
737b411b363SPhilipp Reisner 	spin_unlock_irqrestore(&b->bm_lock, flags);
738b411b363SPhilipp Reisner 
739b411b363SPhilipp Reisner 	return s;
740b411b363SPhilipp Reisner }
741b411b363SPhilipp Reisner 
742b411b363SPhilipp Reisner unsigned long drbd_bm_total_weight(struct drbd_conf *mdev)
743b411b363SPhilipp Reisner {
744b411b363SPhilipp Reisner 	unsigned long s;
745b411b363SPhilipp Reisner 	/* if I don't have a disk, I don't know about out-of-sync status */
746b411b363SPhilipp Reisner 	if (!get_ldev_if_state(mdev, D_NEGOTIATING))
747b411b363SPhilipp Reisner 		return 0;
748b411b363SPhilipp Reisner 	s = _drbd_bm_total_weight(mdev);
749b411b363SPhilipp Reisner 	put_ldev(mdev);
750b411b363SPhilipp Reisner 	return s;
751b411b363SPhilipp Reisner }
752b411b363SPhilipp Reisner 
753b411b363SPhilipp Reisner size_t drbd_bm_words(struct drbd_conf *mdev)
754b411b363SPhilipp Reisner {
755b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
756841ce241SAndreas Gruenbacher 	if (!expect(b))
757841ce241SAndreas Gruenbacher 		return 0;
758841ce241SAndreas Gruenbacher 	if (!expect(b->bm_pages))
759841ce241SAndreas Gruenbacher 		return 0;
760b411b363SPhilipp Reisner 
761b411b363SPhilipp Reisner 	return b->bm_words;
762b411b363SPhilipp Reisner }
763b411b363SPhilipp Reisner 
764b411b363SPhilipp Reisner unsigned long drbd_bm_bits(struct drbd_conf *mdev)
765b411b363SPhilipp Reisner {
766b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
767841ce241SAndreas Gruenbacher 	if (!expect(b))
768841ce241SAndreas Gruenbacher 		return 0;
769b411b363SPhilipp Reisner 
770b411b363SPhilipp Reisner 	return b->bm_bits;
771b411b363SPhilipp Reisner }
772b411b363SPhilipp Reisner 
773b411b363SPhilipp Reisner /* merge number words from buffer into the bitmap starting at offset.
774b411b363SPhilipp Reisner  * buffer[i] is expected to be little endian unsigned long.
775b411b363SPhilipp Reisner  * bitmap must be locked by drbd_bm_lock.
776b411b363SPhilipp Reisner  * currently only used from receive_bitmap.
777b411b363SPhilipp Reisner  */
778b411b363SPhilipp Reisner void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset, size_t number,
779b411b363SPhilipp Reisner 			unsigned long *buffer)
780b411b363SPhilipp Reisner {
781b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
782b411b363SPhilipp Reisner 	unsigned long *p_addr, *bm;
783b411b363SPhilipp Reisner 	unsigned long word, bits;
78419f843aaSLars Ellenberg 	unsigned int idx;
785b411b363SPhilipp Reisner 	size_t end, do_now;
786b411b363SPhilipp Reisner 
787b411b363SPhilipp Reisner 	end = offset + number;
788b411b363SPhilipp Reisner 
789841ce241SAndreas Gruenbacher 	if (!expect(b))
790841ce241SAndreas Gruenbacher 		return;
791841ce241SAndreas Gruenbacher 	if (!expect(b->bm_pages))
792841ce241SAndreas Gruenbacher 		return;
793b411b363SPhilipp Reisner 	if (number == 0)
794b411b363SPhilipp Reisner 		return;
795b411b363SPhilipp Reisner 	WARN_ON(offset >= b->bm_words);
796b411b363SPhilipp Reisner 	WARN_ON(end    >  b->bm_words);
797b411b363SPhilipp Reisner 
798b411b363SPhilipp Reisner 	spin_lock_irq(&b->bm_lock);
799b411b363SPhilipp Reisner 	while (offset < end) {
800b411b363SPhilipp Reisner 		do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset;
80119f843aaSLars Ellenberg 		idx = bm_word_to_page_idx(b, offset);
80219f843aaSLars Ellenberg 		p_addr = bm_map_pidx(b, idx);
803b411b363SPhilipp Reisner 		bm = p_addr + MLPP(offset);
804b411b363SPhilipp Reisner 		offset += do_now;
805b411b363SPhilipp Reisner 		while (do_now--) {
806b411b363SPhilipp Reisner 			bits = hweight_long(*bm);
80795a0f10cSLars Ellenberg 			word = *bm | *buffer++;
808b411b363SPhilipp Reisner 			*bm++ = word;
809b411b363SPhilipp Reisner 			b->bm_set += hweight_long(word) - bits;
810b411b363SPhilipp Reisner 		}
811b411b363SPhilipp Reisner 		bm_unmap(p_addr);
81219f843aaSLars Ellenberg 		bm_set_page_need_writeout(b->bm_pages[idx]);
813b411b363SPhilipp Reisner 	}
814b411b363SPhilipp Reisner 	/* with 32bit <-> 64bit cross-platform connect
815b411b363SPhilipp Reisner 	 * this is only correct for current usage,
816b411b363SPhilipp Reisner 	 * where we _know_ that we are 64 bit aligned,
817b411b363SPhilipp Reisner 	 * and know that this function is used in this way, too...
818b411b363SPhilipp Reisner 	 */
819b411b363SPhilipp Reisner 	if (end == b->bm_words)
820b411b363SPhilipp Reisner 		b->bm_set -= bm_clear_surplus(b);
821b411b363SPhilipp Reisner 	spin_unlock_irq(&b->bm_lock);
822b411b363SPhilipp Reisner }
823b411b363SPhilipp Reisner 
824b411b363SPhilipp Reisner /* copy number words from the bitmap starting at offset into the buffer.
825b411b363SPhilipp Reisner  * buffer[i] will be little endian unsigned long.
826b411b363SPhilipp Reisner  */
827b411b363SPhilipp Reisner void drbd_bm_get_lel(struct drbd_conf *mdev, size_t offset, size_t number,
828b411b363SPhilipp Reisner 		     unsigned long *buffer)
829b411b363SPhilipp Reisner {
830b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
831b411b363SPhilipp Reisner 	unsigned long *p_addr, *bm;
832b411b363SPhilipp Reisner 	size_t end, do_now;
833b411b363SPhilipp Reisner 
834b411b363SPhilipp Reisner 	end = offset + number;
835b411b363SPhilipp Reisner 
836841ce241SAndreas Gruenbacher 	if (!expect(b))
837841ce241SAndreas Gruenbacher 		return;
838841ce241SAndreas Gruenbacher 	if (!expect(b->bm_pages))
839841ce241SAndreas Gruenbacher 		return;
840b411b363SPhilipp Reisner 
841b411b363SPhilipp Reisner 	spin_lock_irq(&b->bm_lock);
842b411b363SPhilipp Reisner 	if ((offset >= b->bm_words) ||
843b411b363SPhilipp Reisner 	    (end    >  b->bm_words) ||
844b411b363SPhilipp Reisner 	    (number <= 0))
845b411b363SPhilipp Reisner 		dev_err(DEV, "offset=%lu number=%lu bm_words=%lu\n",
846b411b363SPhilipp Reisner 			(unsigned long)	offset,
847b411b363SPhilipp Reisner 			(unsigned long)	number,
848b411b363SPhilipp Reisner 			(unsigned long) b->bm_words);
849b411b363SPhilipp Reisner 	else {
850b411b363SPhilipp Reisner 		while (offset < end) {
851b411b363SPhilipp Reisner 			do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset;
85219f843aaSLars Ellenberg 			p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, offset));
853b411b363SPhilipp Reisner 			bm = p_addr + MLPP(offset);
854b411b363SPhilipp Reisner 			offset += do_now;
855b411b363SPhilipp Reisner 			while (do_now--)
85695a0f10cSLars Ellenberg 				*buffer++ = *bm++;
857b411b363SPhilipp Reisner 			bm_unmap(p_addr);
858b411b363SPhilipp Reisner 		}
859b411b363SPhilipp Reisner 	}
860b411b363SPhilipp Reisner 	spin_unlock_irq(&b->bm_lock);
861b411b363SPhilipp Reisner }
862b411b363SPhilipp Reisner 
863b411b363SPhilipp Reisner /* set all bits in the bitmap */
864b411b363SPhilipp Reisner void drbd_bm_set_all(struct drbd_conf *mdev)
865b411b363SPhilipp Reisner {
866b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
867841ce241SAndreas Gruenbacher 	if (!expect(b))
868841ce241SAndreas Gruenbacher 		return;
869841ce241SAndreas Gruenbacher 	if (!expect(b->bm_pages))
870841ce241SAndreas Gruenbacher 		return;
871b411b363SPhilipp Reisner 
872b411b363SPhilipp Reisner 	spin_lock_irq(&b->bm_lock);
873b411b363SPhilipp Reisner 	bm_memset(b, 0, 0xff, b->bm_words);
874b411b363SPhilipp Reisner 	(void)bm_clear_surplus(b);
875b411b363SPhilipp Reisner 	b->bm_set = b->bm_bits;
876b411b363SPhilipp Reisner 	spin_unlock_irq(&b->bm_lock);
877b411b363SPhilipp Reisner }
878b411b363SPhilipp Reisner 
879b411b363SPhilipp Reisner /* clear all bits in the bitmap */
880b411b363SPhilipp Reisner void drbd_bm_clear_all(struct drbd_conf *mdev)
881b411b363SPhilipp Reisner {
882b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
883841ce241SAndreas Gruenbacher 	if (!expect(b))
884841ce241SAndreas Gruenbacher 		return;
885841ce241SAndreas Gruenbacher 	if (!expect(b->bm_pages))
886841ce241SAndreas Gruenbacher 		return;
887b411b363SPhilipp Reisner 
888b411b363SPhilipp Reisner 	spin_lock_irq(&b->bm_lock);
889b411b363SPhilipp Reisner 	bm_memset(b, 0, 0, b->bm_words);
890b411b363SPhilipp Reisner 	b->bm_set = 0;
891b411b363SPhilipp Reisner 	spin_unlock_irq(&b->bm_lock);
892b411b363SPhilipp Reisner }
893b411b363SPhilipp Reisner 
89419f843aaSLars Ellenberg struct bm_aio_ctx {
89519f843aaSLars Ellenberg 	struct drbd_conf *mdev;
89619f843aaSLars Ellenberg 	atomic_t in_flight;
897725a97e4SLars Ellenberg 	struct completion done;
89819f843aaSLars Ellenberg 	unsigned flags;
89919f843aaSLars Ellenberg #define BM_AIO_COPY_PAGES	1
90019f843aaSLars Ellenberg 	int error;
90119f843aaSLars Ellenberg };
90219f843aaSLars Ellenberg 
90319f843aaSLars Ellenberg /* bv_page may be a copy, or may be the original */
904b411b363SPhilipp Reisner static void bm_async_io_complete(struct bio *bio, int error)
905b411b363SPhilipp Reisner {
90619f843aaSLars Ellenberg 	struct bm_aio_ctx *ctx = bio->bi_private;
90719f843aaSLars Ellenberg 	struct drbd_conf *mdev = ctx->mdev;
90819f843aaSLars Ellenberg 	struct drbd_bitmap *b = mdev->bitmap;
90919f843aaSLars Ellenberg 	unsigned int idx = bm_page_to_idx(bio->bi_io_vec[0].bv_page);
910b411b363SPhilipp Reisner 	int uptodate = bio_flagged(bio, BIO_UPTODATE);
911b411b363SPhilipp Reisner 
912b411b363SPhilipp Reisner 
913b411b363SPhilipp Reisner 	/* strange behavior of some lower level drivers...
914b411b363SPhilipp Reisner 	 * fail the request by clearing the uptodate flag,
915b411b363SPhilipp Reisner 	 * but do not return any error?!
916b411b363SPhilipp Reisner 	 * do we want to WARN() on this? */
917b411b363SPhilipp Reisner 	if (!error && !uptodate)
918b411b363SPhilipp Reisner 		error = -EIO;
919b411b363SPhilipp Reisner 
9207648cdfeSLars Ellenberg 	if ((ctx->flags & BM_AIO_COPY_PAGES) == 0 &&
9217648cdfeSLars Ellenberg 	    !bm_test_page_unchanged(b->bm_pages[idx]))
9227648cdfeSLars Ellenberg 		dev_warn(DEV, "bitmap page idx %u changed during IO!\n", idx);
92319f843aaSLars Ellenberg 
924b411b363SPhilipp Reisner 	if (error) {
92519f843aaSLars Ellenberg 		/* ctx error will hold the completed-last non-zero error code,
92619f843aaSLars Ellenberg 		 * in case error codes differ. */
92719f843aaSLars Ellenberg 		ctx->error = error;
92819f843aaSLars Ellenberg 		bm_set_page_io_err(b->bm_pages[idx]);
92919f843aaSLars Ellenberg 		/* Not identical to on disk version of it.
93019f843aaSLars Ellenberg 		 * Is BM_PAGE_IO_ERROR enough? */
93119f843aaSLars Ellenberg 		if (__ratelimit(&drbd_ratelimit_state))
93219f843aaSLars Ellenberg 			dev_err(DEV, "IO ERROR %d on bitmap page idx %u\n",
93319f843aaSLars Ellenberg 					error, idx);
93419f843aaSLars Ellenberg 	} else {
93519f843aaSLars Ellenberg 		bm_clear_page_io_err(b->bm_pages[idx]);
93619f843aaSLars Ellenberg 		dynamic_dev_dbg(DEV, "bitmap page idx %u completed\n", idx);
937b411b363SPhilipp Reisner 	}
93819f843aaSLars Ellenberg 
93919f843aaSLars Ellenberg 	bm_page_unlock_io(mdev, idx);
94019f843aaSLars Ellenberg 
94119f843aaSLars Ellenberg 	/* FIXME give back to page pool */
94219f843aaSLars Ellenberg 	if (ctx->flags & BM_AIO_COPY_PAGES)
94319f843aaSLars Ellenberg 		put_page(bio->bi_io_vec[0].bv_page);
944b411b363SPhilipp Reisner 
945b411b363SPhilipp Reisner 	bio_put(bio);
94619f843aaSLars Ellenberg 
94719f843aaSLars Ellenberg 	if (atomic_dec_and_test(&ctx->in_flight))
948725a97e4SLars Ellenberg 		complete(&ctx->done);
949b411b363SPhilipp Reisner }
950b411b363SPhilipp Reisner 
95119f843aaSLars Ellenberg static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must_hold(local)
952b411b363SPhilipp Reisner {
953b411b363SPhilipp Reisner 	/* we are process context. we always get a bio */
954b411b363SPhilipp Reisner 	struct bio *bio = bio_alloc(GFP_KERNEL, 1);
95519f843aaSLars Ellenberg 	struct drbd_conf *mdev = ctx->mdev;
95619f843aaSLars Ellenberg 	struct drbd_bitmap *b = mdev->bitmap;
95719f843aaSLars Ellenberg 	struct page *page;
958b411b363SPhilipp Reisner 	unsigned int len;
95919f843aaSLars Ellenberg 
960b411b363SPhilipp Reisner 	sector_t on_disk_sector =
961b411b363SPhilipp Reisner 		mdev->ldev->md.md_offset + mdev->ldev->md.bm_offset;
962b411b363SPhilipp Reisner 	on_disk_sector += ((sector_t)page_nr) << (PAGE_SHIFT-9);
963b411b363SPhilipp Reisner 
964b411b363SPhilipp Reisner 	/* this might happen with very small
96519f843aaSLars Ellenberg 	 * flexible external meta data device,
96619f843aaSLars Ellenberg 	 * or with PAGE_SIZE > 4k */
967b411b363SPhilipp Reisner 	len = min_t(unsigned int, PAGE_SIZE,
968b411b363SPhilipp Reisner 		(drbd_md_last_sector(mdev->ldev) - on_disk_sector + 1)<<9);
969b411b363SPhilipp Reisner 
97019f843aaSLars Ellenberg 	/* serialize IO on this page */
97119f843aaSLars Ellenberg 	bm_page_lock_io(mdev, page_nr);
97219f843aaSLars Ellenberg 	/* before memcpy and submit,
97319f843aaSLars Ellenberg 	 * so it can be redirtied any time */
97419f843aaSLars Ellenberg 	bm_set_page_unchanged(b->bm_pages[page_nr]);
97519f843aaSLars Ellenberg 
97619f843aaSLars Ellenberg 	if (ctx->flags & BM_AIO_COPY_PAGES) {
97719f843aaSLars Ellenberg 		/* FIXME alloc_page is good enough for now, but actually needs
97819f843aaSLars Ellenberg 		 * to use pre-allocated page pool */
97919f843aaSLars Ellenberg 		void *src, *dest;
98019f843aaSLars Ellenberg 		page = alloc_page(__GFP_HIGHMEM|__GFP_WAIT);
98119f843aaSLars Ellenberg 		dest = kmap_atomic(page, KM_USER0);
98219f843aaSLars Ellenberg 		src = kmap_atomic(b->bm_pages[page_nr], KM_USER1);
98319f843aaSLars Ellenberg 		memcpy(dest, src, PAGE_SIZE);
98419f843aaSLars Ellenberg 		kunmap_atomic(src, KM_USER1);
98519f843aaSLars Ellenberg 		kunmap_atomic(dest, KM_USER0);
98619f843aaSLars Ellenberg 		bm_store_page_idx(page, page_nr);
98719f843aaSLars Ellenberg 	} else
98819f843aaSLars Ellenberg 		page = b->bm_pages[page_nr];
98919f843aaSLars Ellenberg 
990b411b363SPhilipp Reisner 	bio->bi_bdev = mdev->ldev->md_bdev;
991b411b363SPhilipp Reisner 	bio->bi_sector = on_disk_sector;
99219f843aaSLars Ellenberg 	bio_add_page(bio, page, len, 0);
99319f843aaSLars Ellenberg 	bio->bi_private = ctx;
994b411b363SPhilipp Reisner 	bio->bi_end_io = bm_async_io_complete;
995b411b363SPhilipp Reisner 
9960cf9d27eSAndreas Gruenbacher 	if (drbd_insert_fault(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) {
997b411b363SPhilipp Reisner 		bio->bi_rw |= rw;
998b411b363SPhilipp Reisner 		bio_endio(bio, -EIO);
999b411b363SPhilipp Reisner 	} else {
1000b411b363SPhilipp Reisner 		submit_bio(rw, bio);
10015a8b4242SLars Ellenberg 		/* this should not count as user activity and cause the
10025a8b4242SLars Ellenberg 		 * resync to throttle -- see drbd_rs_should_slow_down(). */
10035a8b4242SLars Ellenberg 		atomic_add(len >> 9, &mdev->rs_sect_ev);
1004b411b363SPhilipp Reisner 	}
1005b411b363SPhilipp Reisner }
1006b411b363SPhilipp Reisner 
1007b411b363SPhilipp Reisner /*
1008b411b363SPhilipp Reisner  * bm_rw: read/write the whole bitmap from/to its on disk location.
1009b411b363SPhilipp Reisner  */
101019f843aaSLars Ellenberg static int bm_rw(struct drbd_conf *mdev, int rw, unsigned lazy_writeout_upper_idx) __must_hold(local)
1011b411b363SPhilipp Reisner {
1012725a97e4SLars Ellenberg 	struct bm_aio_ctx ctx = {
1013725a97e4SLars Ellenberg 		.mdev = mdev,
1014725a97e4SLars Ellenberg 		.in_flight = ATOMIC_INIT(1),
1015725a97e4SLars Ellenberg 		.done = COMPLETION_INITIALIZER_ONSTACK(ctx.done),
1016725a97e4SLars Ellenberg 		.flags = lazy_writeout_upper_idx ? BM_AIO_COPY_PAGES : 0,
1017725a97e4SLars Ellenberg 	};
1018b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
10196850c442SLars Ellenberg 	int num_pages, i, count = 0;
1020b411b363SPhilipp Reisner 	unsigned long now;
1021b411b363SPhilipp Reisner 	char ppb[10];
1022b411b363SPhilipp Reisner 	int err = 0;
1023b411b363SPhilipp Reisner 
102419f843aaSLars Ellenberg 	/*
102519f843aaSLars Ellenberg 	 * We are protected against bitmap disappearing/resizing by holding an
102619f843aaSLars Ellenberg 	 * ldev reference (caller must have called get_ldev()).
102719f843aaSLars Ellenberg 	 * For read/write, we are protected against changes to the bitmap by
102819f843aaSLars Ellenberg 	 * the bitmap lock (see drbd_bitmap_io).
102919f843aaSLars Ellenberg 	 * For lazy writeout, we don't care for ongoing changes to the bitmap,
103019f843aaSLars Ellenberg 	 * as we submit copies of pages anyways.
103119f843aaSLars Ellenberg 	 */
103219f843aaSLars Ellenberg 	if (!ctx.flags)
103320ceb2b2SLars Ellenberg 		WARN_ON(!(BM_LOCKED_MASK & b->bm_flags));
1034b411b363SPhilipp Reisner 
10356850c442SLars Ellenberg 	num_pages = b->bm_number_of_pages;
1036b411b363SPhilipp Reisner 
1037b411b363SPhilipp Reisner 	now = jiffies;
1038b411b363SPhilipp Reisner 
1039b411b363SPhilipp Reisner 	/* let the layers below us try to merge these bios... */
10406850c442SLars Ellenberg 	for (i = 0; i < num_pages; i++) {
104119f843aaSLars Ellenberg 		/* ignore completely unchanged pages */
104219f843aaSLars Ellenberg 		if (lazy_writeout_upper_idx && i == lazy_writeout_upper_idx)
104319f843aaSLars Ellenberg 			break;
104419f843aaSLars Ellenberg 		if (rw & WRITE) {
104519f843aaSLars Ellenberg 			if (bm_test_page_unchanged(b->bm_pages[i])) {
104619f843aaSLars Ellenberg 				dynamic_dev_dbg(DEV, "skipped bm write for idx %u\n", i);
104719f843aaSLars Ellenberg 				continue;
104819f843aaSLars Ellenberg 			}
104919f843aaSLars Ellenberg 			/* during lazy writeout,
105019f843aaSLars Ellenberg 			 * ignore those pages not marked for lazy writeout. */
105119f843aaSLars Ellenberg 			if (lazy_writeout_upper_idx &&
105219f843aaSLars Ellenberg 			    !bm_test_page_lazy_writeout(b->bm_pages[i])) {
105319f843aaSLars Ellenberg 				dynamic_dev_dbg(DEV, "skipped bm lazy write for idx %u\n", i);
105419f843aaSLars Ellenberg 				continue;
105519f843aaSLars Ellenberg 			}
105619f843aaSLars Ellenberg 		}
105719f843aaSLars Ellenberg 		atomic_inc(&ctx.in_flight);
105819f843aaSLars Ellenberg 		bm_page_io_async(&ctx, i, rw);
105919f843aaSLars Ellenberg 		++count;
106019f843aaSLars Ellenberg 		cond_resched();
106119f843aaSLars Ellenberg 	}
1062b411b363SPhilipp Reisner 
1063725a97e4SLars Ellenberg 	/*
1064725a97e4SLars Ellenberg 	 * We initialize ctx.in_flight to one to make sure bm_async_io_complete
1065725a97e4SLars Ellenberg 	 * will not complete() early, and decrement / test it here.  If there
1066725a97e4SLars Ellenberg 	 * are still some bios in flight, we need to wait for them here.
1067725a97e4SLars Ellenberg 	 */
1068725a97e4SLars Ellenberg 	if (!atomic_dec_and_test(&ctx.in_flight))
1069725a97e4SLars Ellenberg 		wait_for_completion(&ctx.done);
107019f843aaSLars Ellenberg 	dev_info(DEV, "bitmap %s of %u pages took %lu jiffies\n",
107119f843aaSLars Ellenberg 			rw == WRITE ? "WRITE" : "READ",
107219f843aaSLars Ellenberg 			count, jiffies - now);
1073b411b363SPhilipp Reisner 
107419f843aaSLars Ellenberg 	if (ctx.error) {
1075b411b363SPhilipp Reisner 		dev_alert(DEV, "we had at least one MD IO ERROR during bitmap IO\n");
107681e84650SAndreas Gruenbacher 		drbd_chk_io_error(mdev, 1, true);
107719f843aaSLars Ellenberg 		err = -EIO; /* ctx.error ? */
1078b411b363SPhilipp Reisner 	}
1079b411b363SPhilipp Reisner 
1080b411b363SPhilipp Reisner 	now = jiffies;
1081b411b363SPhilipp Reisner 	if (rw == WRITE) {
1082b411b363SPhilipp Reisner 		drbd_md_flush(mdev);
1083b411b363SPhilipp Reisner 	} else /* rw == READ */ {
108495a0f10cSLars Ellenberg 		b->bm_set = bm_count_bits(b);
1085b411b363SPhilipp Reisner 		dev_info(DEV, "recounting of set bits took additional %lu jiffies\n",
1086b411b363SPhilipp Reisner 		     jiffies - now);
1087b411b363SPhilipp Reisner 	}
1088b411b363SPhilipp Reisner 	now = b->bm_set;
1089b411b363SPhilipp Reisner 
1090b411b363SPhilipp Reisner 	dev_info(DEV, "%s (%lu bits) marked out-of-sync by on disk bit-map.\n",
1091b411b363SPhilipp Reisner 	     ppsize(ppb, now << (BM_BLOCK_SHIFT-10)), now);
1092b411b363SPhilipp Reisner 
1093b411b363SPhilipp Reisner 	return err;
1094b411b363SPhilipp Reisner }
1095b411b363SPhilipp Reisner 
1096b411b363SPhilipp Reisner /**
1097b411b363SPhilipp Reisner  * drbd_bm_read() - Read the whole bitmap from its on disk location.
1098b411b363SPhilipp Reisner  * @mdev:	DRBD device.
1099b411b363SPhilipp Reisner  */
1100b411b363SPhilipp Reisner int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local)
1101b411b363SPhilipp Reisner {
110219f843aaSLars Ellenberg 	return bm_rw(mdev, READ, 0);
1103b411b363SPhilipp Reisner }
1104b411b363SPhilipp Reisner 
1105b411b363SPhilipp Reisner /**
1106b411b363SPhilipp Reisner  * drbd_bm_write() - Write the whole bitmap to its on disk location.
1107b411b363SPhilipp Reisner  * @mdev:	DRBD device.
110819f843aaSLars Ellenberg  *
110919f843aaSLars Ellenberg  * Will only write pages that have changed since last IO.
1110b411b363SPhilipp Reisner  */
1111b411b363SPhilipp Reisner int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local)
1112b411b363SPhilipp Reisner {
111319f843aaSLars Ellenberg 	return bm_rw(mdev, WRITE, 0);
1114b411b363SPhilipp Reisner }
1115b411b363SPhilipp Reisner 
1116b411b363SPhilipp Reisner /**
111719f843aaSLars Ellenberg  * drbd_bm_lazy_write_out() - Write bitmap pages 0 to @upper_idx-1, if they have changed.
1118b411b363SPhilipp Reisner  * @mdev:	DRBD device.
111919f843aaSLars Ellenberg  * @upper_idx:	0: write all changed pages; +ve: page index to stop scanning for changed pages
1120b411b363SPhilipp Reisner  */
112119f843aaSLars Ellenberg int drbd_bm_write_lazy(struct drbd_conf *mdev, unsigned upper_idx) __must_hold(local)
1122b411b363SPhilipp Reisner {
112319f843aaSLars Ellenberg 	return bm_rw(mdev, WRITE, upper_idx);
1124b411b363SPhilipp Reisner }
112519f843aaSLars Ellenberg 
112619f843aaSLars Ellenberg 
112719f843aaSLars Ellenberg /**
112819f843aaSLars Ellenberg  * drbd_bm_write_page: Writes a PAGE_SIZE aligned piece of bitmap
112919f843aaSLars Ellenberg  * @mdev:	DRBD device.
113019f843aaSLars Ellenberg  * @idx:	bitmap page index
113119f843aaSLars Ellenberg  *
11324b0715f0SLars Ellenberg  * We don't want to special case on logical_block_size of the backend device,
11334b0715f0SLars Ellenberg  * so we submit PAGE_SIZE aligned pieces.
113419f843aaSLars Ellenberg  * Note that on "most" systems, PAGE_SIZE is 4k.
11354b0715f0SLars Ellenberg  *
11364b0715f0SLars Ellenberg  * In case this becomes an issue on systems with larger PAGE_SIZE,
11374b0715f0SLars Ellenberg  * we may want to change this again to write 4k aligned 4k pieces.
113819f843aaSLars Ellenberg  */
113919f843aaSLars Ellenberg int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local)
114019f843aaSLars Ellenberg {
1141725a97e4SLars Ellenberg 	struct bm_aio_ctx ctx = {
1142725a97e4SLars Ellenberg 		.mdev = mdev,
1143725a97e4SLars Ellenberg 		.in_flight = ATOMIC_INIT(1),
1144725a97e4SLars Ellenberg 		.done = COMPLETION_INITIALIZER_ONSTACK(ctx.done),
1145725a97e4SLars Ellenberg 		.flags = BM_AIO_COPY_PAGES,
1146725a97e4SLars Ellenberg 	};
114719f843aaSLars Ellenberg 
114819f843aaSLars Ellenberg 	if (bm_test_page_unchanged(mdev->bitmap->bm_pages[idx])) {
11497648cdfeSLars Ellenberg 		dynamic_dev_dbg(DEV, "skipped bm page write for idx %u\n", idx);
115019f843aaSLars Ellenberg 		return 0;
115119f843aaSLars Ellenberg 	}
115219f843aaSLars Ellenberg 
115319f843aaSLars Ellenberg 	bm_page_io_async(&ctx, idx, WRITE_SYNC);
1154725a97e4SLars Ellenberg 	wait_for_completion(&ctx.done);
115519f843aaSLars Ellenberg 
115619f843aaSLars Ellenberg 	if (ctx.error)
115719f843aaSLars Ellenberg 		drbd_chk_io_error(mdev, 1, true);
115819f843aaSLars Ellenberg 		/* that should force detach, so the in memory bitmap will be
115919f843aaSLars Ellenberg 		 * gone in a moment as well. */
116019f843aaSLars Ellenberg 
1161b411b363SPhilipp Reisner 	mdev->bm_writ_cnt++;
116219f843aaSLars Ellenberg 	return ctx.error;
1163b411b363SPhilipp Reisner }
1164b411b363SPhilipp Reisner 
1165b411b363SPhilipp Reisner /* NOTE
1166b411b363SPhilipp Reisner  * find_first_bit returns int, we return unsigned long.
11674b0715f0SLars Ellenberg  * For this to work on 32bit arch with bitnumbers > (1<<32),
11684b0715f0SLars Ellenberg  * we'd need to return u64, and get a whole lot of other places
11694b0715f0SLars Ellenberg  * fixed where we still use unsigned long.
1170b411b363SPhilipp Reisner  *
1171b411b363SPhilipp Reisner  * this returns a bit number, NOT a sector!
1172b411b363SPhilipp Reisner  */
1173b411b363SPhilipp Reisner static unsigned long __bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo,
1174b411b363SPhilipp Reisner 	const int find_zero_bit, const enum km_type km)
1175b411b363SPhilipp Reisner {
1176b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
1177b411b363SPhilipp Reisner 	unsigned long *p_addr;
11784b0715f0SLars Ellenberg 	unsigned long bit_offset;
11794b0715f0SLars Ellenberg 	unsigned i;
11804b0715f0SLars Ellenberg 
1181b411b363SPhilipp Reisner 
1182b411b363SPhilipp Reisner 	if (bm_fo > b->bm_bits) {
1183b411b363SPhilipp Reisner 		dev_err(DEV, "bm_fo=%lu bm_bits=%lu\n", bm_fo, b->bm_bits);
11844b0715f0SLars Ellenberg 		bm_fo = DRBD_END_OF_BITMAP;
1185b411b363SPhilipp Reisner 	} else {
1186b411b363SPhilipp Reisner 		while (bm_fo < b->bm_bits) {
118719f843aaSLars Ellenberg 			/* bit offset of the first bit in the page */
11884b0715f0SLars Ellenberg 			bit_offset = bm_fo & ~BITS_PER_PAGE_MASK;
118919f843aaSLars Ellenberg 			p_addr = __bm_map_pidx(b, bm_bit_to_page_idx(b, bm_fo), km);
1190b411b363SPhilipp Reisner 
1191b411b363SPhilipp Reisner 			if (find_zero_bit)
11927e599e6eSLinus Torvalds 				i = find_next_zero_bit_le(p_addr,
11934b0715f0SLars Ellenberg 						PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK);
1194b411b363SPhilipp Reisner 			else
11957e599e6eSLinus Torvalds 				i = find_next_bit_le(p_addr,
11964b0715f0SLars Ellenberg 						PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK);
1197b411b363SPhilipp Reisner 
1198b411b363SPhilipp Reisner 			__bm_unmap(p_addr, km);
1199b411b363SPhilipp Reisner 			if (i < PAGE_SIZE*8) {
12004b0715f0SLars Ellenberg 				bm_fo = bit_offset + i;
12014b0715f0SLars Ellenberg 				if (bm_fo >= b->bm_bits)
1202b411b363SPhilipp Reisner 					break;
1203b411b363SPhilipp Reisner 				goto found;
1204b411b363SPhilipp Reisner 			}
1205b411b363SPhilipp Reisner 			bm_fo = bit_offset + PAGE_SIZE*8;
1206b411b363SPhilipp Reisner 		}
12074b0715f0SLars Ellenberg 		bm_fo = DRBD_END_OF_BITMAP;
1208b411b363SPhilipp Reisner 	}
1209b411b363SPhilipp Reisner  found:
12104b0715f0SLars Ellenberg 	return bm_fo;
1211b411b363SPhilipp Reisner }
1212b411b363SPhilipp Reisner 
1213b411b363SPhilipp Reisner static unsigned long bm_find_next(struct drbd_conf *mdev,
1214b411b363SPhilipp Reisner 	unsigned long bm_fo, const int find_zero_bit)
1215b411b363SPhilipp Reisner {
1216b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
12174b0715f0SLars Ellenberg 	unsigned long i = DRBD_END_OF_BITMAP;
1218b411b363SPhilipp Reisner 
1219841ce241SAndreas Gruenbacher 	if (!expect(b))
1220841ce241SAndreas Gruenbacher 		return i;
1221841ce241SAndreas Gruenbacher 	if (!expect(b->bm_pages))
1222841ce241SAndreas Gruenbacher 		return i;
1223b411b363SPhilipp Reisner 
1224b411b363SPhilipp Reisner 	spin_lock_irq(&b->bm_lock);
122520ceb2b2SLars Ellenberg 	if (BM_DONT_TEST & b->bm_flags)
1226b411b363SPhilipp Reisner 		bm_print_lock_info(mdev);
1227b411b363SPhilipp Reisner 
1228b411b363SPhilipp Reisner 	i = __bm_find_next(mdev, bm_fo, find_zero_bit, KM_IRQ1);
1229b411b363SPhilipp Reisner 
1230b411b363SPhilipp Reisner 	spin_unlock_irq(&b->bm_lock);
1231b411b363SPhilipp Reisner 	return i;
1232b411b363SPhilipp Reisner }
1233b411b363SPhilipp Reisner 
1234b411b363SPhilipp Reisner unsigned long drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo)
1235b411b363SPhilipp Reisner {
1236b411b363SPhilipp Reisner 	return bm_find_next(mdev, bm_fo, 0);
1237b411b363SPhilipp Reisner }
1238b411b363SPhilipp Reisner 
1239b411b363SPhilipp Reisner #if 0
1240b411b363SPhilipp Reisner /* not yet needed for anything. */
1241b411b363SPhilipp Reisner unsigned long drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo)
1242b411b363SPhilipp Reisner {
1243b411b363SPhilipp Reisner 	return bm_find_next(mdev, bm_fo, 1);
1244b411b363SPhilipp Reisner }
1245b411b363SPhilipp Reisner #endif
1246b411b363SPhilipp Reisner 
1247b411b363SPhilipp Reisner /* does not spin_lock_irqsave.
1248b411b363SPhilipp Reisner  * you must take drbd_bm_lock() first */
1249b411b363SPhilipp Reisner unsigned long _drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo)
1250b411b363SPhilipp Reisner {
125120ceb2b2SLars Ellenberg 	/* WARN_ON(!(BM_DONT_SET & mdev->b->bm_flags)); */
1252b411b363SPhilipp Reisner 	return __bm_find_next(mdev, bm_fo, 0, KM_USER1);
1253b411b363SPhilipp Reisner }
1254b411b363SPhilipp Reisner 
1255b411b363SPhilipp Reisner unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo)
1256b411b363SPhilipp Reisner {
125720ceb2b2SLars Ellenberg 	/* WARN_ON(!(BM_DONT_SET & mdev->b->bm_flags)); */
1258b411b363SPhilipp Reisner 	return __bm_find_next(mdev, bm_fo, 1, KM_USER1);
1259b411b363SPhilipp Reisner }
1260b411b363SPhilipp Reisner 
1261b411b363SPhilipp Reisner /* returns number of bits actually changed.
1262b411b363SPhilipp Reisner  * for val != 0, we change 0 -> 1, return code positive
1263b411b363SPhilipp Reisner  * for val == 0, we change 1 -> 0, return code negative
1264b411b363SPhilipp Reisner  * wants bitnr, not sector.
1265b411b363SPhilipp Reisner  * expected to be called for only a few bits (e - s about BITS_PER_LONG).
1266b411b363SPhilipp Reisner  * Must hold bitmap lock already. */
1267b4ee79daSPhilipp Reisner static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
1268829c6087SLars Ellenberg 	unsigned long e, int val)
1269b411b363SPhilipp Reisner {
1270b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
1271b411b363SPhilipp Reisner 	unsigned long *p_addr = NULL;
1272b411b363SPhilipp Reisner 	unsigned long bitnr;
127319f843aaSLars Ellenberg 	unsigned int last_page_nr = -1U;
1274b411b363SPhilipp Reisner 	int c = 0;
127519f843aaSLars Ellenberg 	int changed_total = 0;
1276b411b363SPhilipp Reisner 
1277b411b363SPhilipp Reisner 	if (e >= b->bm_bits) {
1278b411b363SPhilipp Reisner 		dev_err(DEV, "ASSERT FAILED: bit_s=%lu bit_e=%lu bm_bits=%lu\n",
1279b411b363SPhilipp Reisner 				s, e, b->bm_bits);
1280b411b363SPhilipp Reisner 		e = b->bm_bits ? b->bm_bits -1 : 0;
1281b411b363SPhilipp Reisner 	}
1282b411b363SPhilipp Reisner 	for (bitnr = s; bitnr <= e; bitnr++) {
128319f843aaSLars Ellenberg 		unsigned int page_nr = bm_bit_to_page_idx(b, bitnr);
1284b411b363SPhilipp Reisner 		if (page_nr != last_page_nr) {
1285b411b363SPhilipp Reisner 			if (p_addr)
1286829c6087SLars Ellenberg 				__bm_unmap(p_addr, KM_IRQ1);
128719f843aaSLars Ellenberg 			if (c < 0)
128819f843aaSLars Ellenberg 				bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]);
128919f843aaSLars Ellenberg 			else if (c > 0)
129019f843aaSLars Ellenberg 				bm_set_page_need_writeout(b->bm_pages[last_page_nr]);
129119f843aaSLars Ellenberg 			changed_total += c;
129219f843aaSLars Ellenberg 			c = 0;
1293829c6087SLars Ellenberg 			p_addr = __bm_map_pidx(b, page_nr, KM_IRQ1);
1294b411b363SPhilipp Reisner 			last_page_nr = page_nr;
1295b411b363SPhilipp Reisner 		}
1296b411b363SPhilipp Reisner 		if (val)
12977e599e6eSLinus Torvalds 			c += (0 == __test_and_set_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr));
1298b411b363SPhilipp Reisner 		else
12997e599e6eSLinus Torvalds 			c -= (0 != __test_and_clear_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr));
1300b411b363SPhilipp Reisner 	}
1301b411b363SPhilipp Reisner 	if (p_addr)
1302829c6087SLars Ellenberg 		__bm_unmap(p_addr, KM_IRQ1);
130319f843aaSLars Ellenberg 	if (c < 0)
130419f843aaSLars Ellenberg 		bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]);
130519f843aaSLars Ellenberg 	else if (c > 0)
130619f843aaSLars Ellenberg 		bm_set_page_need_writeout(b->bm_pages[last_page_nr]);
130719f843aaSLars Ellenberg 	changed_total += c;
130819f843aaSLars Ellenberg 	b->bm_set += changed_total;
130919f843aaSLars Ellenberg 	return changed_total;
1310b411b363SPhilipp Reisner }
1311b411b363SPhilipp Reisner 
1312b411b363SPhilipp Reisner /* returns number of bits actually changed.
1313b411b363SPhilipp Reisner  * for val != 0, we change 0 -> 1, return code positive
1314b411b363SPhilipp Reisner  * for val == 0, we change 1 -> 0, return code negative
1315b411b363SPhilipp Reisner  * wants bitnr, not sector */
1316b4ee79daSPhilipp Reisner static int bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
1317b411b363SPhilipp Reisner 	const unsigned long e, int val)
1318b411b363SPhilipp Reisner {
1319b411b363SPhilipp Reisner 	unsigned long flags;
1320b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
1321b411b363SPhilipp Reisner 	int c = 0;
1322b411b363SPhilipp Reisner 
1323841ce241SAndreas Gruenbacher 	if (!expect(b))
1324841ce241SAndreas Gruenbacher 		return 1;
1325841ce241SAndreas Gruenbacher 	if (!expect(b->bm_pages))
1326841ce241SAndreas Gruenbacher 		return 0;
1327b411b363SPhilipp Reisner 
1328b411b363SPhilipp Reisner 	spin_lock_irqsave(&b->bm_lock, flags);
132920ceb2b2SLars Ellenberg 	if ((val ? BM_DONT_SET : BM_DONT_CLEAR) & b->bm_flags)
1330b411b363SPhilipp Reisner 		bm_print_lock_info(mdev);
1331b411b363SPhilipp Reisner 
1332829c6087SLars Ellenberg 	c = __bm_change_bits_to(mdev, s, e, val);
1333b411b363SPhilipp Reisner 
1334b411b363SPhilipp Reisner 	spin_unlock_irqrestore(&b->bm_lock, flags);
1335b411b363SPhilipp Reisner 	return c;
1336b411b363SPhilipp Reisner }
1337b411b363SPhilipp Reisner 
1338b411b363SPhilipp Reisner /* returns number of bits changed 0 -> 1 */
1339b411b363SPhilipp Reisner int drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e)
1340b411b363SPhilipp Reisner {
1341b411b363SPhilipp Reisner 	return bm_change_bits_to(mdev, s, e, 1);
1342b411b363SPhilipp Reisner }
1343b411b363SPhilipp Reisner 
1344b411b363SPhilipp Reisner /* returns number of bits changed 1 -> 0 */
1345b411b363SPhilipp Reisner int drbd_bm_clear_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e)
1346b411b363SPhilipp Reisner {
1347b411b363SPhilipp Reisner 	return -bm_change_bits_to(mdev, s, e, 0);
1348b411b363SPhilipp Reisner }
1349b411b363SPhilipp Reisner 
1350b411b363SPhilipp Reisner /* sets all bits in full words,
1351b411b363SPhilipp Reisner  * from first_word up to, but not including, last_word */
1352b411b363SPhilipp Reisner static inline void bm_set_full_words_within_one_page(struct drbd_bitmap *b,
1353b411b363SPhilipp Reisner 		int page_nr, int first_word, int last_word)
1354b411b363SPhilipp Reisner {
1355b411b363SPhilipp Reisner 	int i;
1356b411b363SPhilipp Reisner 	int bits;
1357829c6087SLars Ellenberg 	unsigned long *paddr = kmap_atomic(b->bm_pages[page_nr], KM_IRQ1);
1358b411b363SPhilipp Reisner 	for (i = first_word; i < last_word; i++) {
1359b411b363SPhilipp Reisner 		bits = hweight_long(paddr[i]);
1360b411b363SPhilipp Reisner 		paddr[i] = ~0UL;
1361b411b363SPhilipp Reisner 		b->bm_set += BITS_PER_LONG - bits;
1362b411b363SPhilipp Reisner 	}
1363829c6087SLars Ellenberg 	kunmap_atomic(paddr, KM_IRQ1);
1364b411b363SPhilipp Reisner }
1365b411b363SPhilipp Reisner 
1366829c6087SLars Ellenberg /* Same thing as drbd_bm_set_bits,
1367829c6087SLars Ellenberg  * but more efficient for a large bit range.
1368b411b363SPhilipp Reisner  * You must first drbd_bm_lock().
1369b411b363SPhilipp Reisner  * Can be called to set the whole bitmap in one go.
1370b411b363SPhilipp Reisner  * Sets bits from s to e _inclusive_. */
1371b411b363SPhilipp Reisner void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e)
1372b411b363SPhilipp Reisner {
1373b411b363SPhilipp Reisner 	/* First set_bit from the first bit (s)
1374b411b363SPhilipp Reisner 	 * up to the next long boundary (sl),
1375b411b363SPhilipp Reisner 	 * then assign full words up to the last long boundary (el),
1376b411b363SPhilipp Reisner 	 * then set_bit up to and including the last bit (e).
1377b411b363SPhilipp Reisner 	 *
1378b411b363SPhilipp Reisner 	 * Do not use memset, because we must account for changes,
1379b411b363SPhilipp Reisner 	 * so we need to loop over the words with hweight() anyways.
1380b411b363SPhilipp Reisner 	 */
1381829c6087SLars Ellenberg 	struct drbd_bitmap *b = mdev->bitmap;
1382b411b363SPhilipp Reisner 	unsigned long sl = ALIGN(s,BITS_PER_LONG);
1383b411b363SPhilipp Reisner 	unsigned long el = (e+1) & ~((unsigned long)BITS_PER_LONG-1);
1384b411b363SPhilipp Reisner 	int first_page;
1385b411b363SPhilipp Reisner 	int last_page;
1386b411b363SPhilipp Reisner 	int page_nr;
1387b411b363SPhilipp Reisner 	int first_word;
1388b411b363SPhilipp Reisner 	int last_word;
1389b411b363SPhilipp Reisner 
1390b411b363SPhilipp Reisner 	if (e - s <= 3*BITS_PER_LONG) {
1391b411b363SPhilipp Reisner 		/* don't bother; el and sl may even be wrong. */
1392829c6087SLars Ellenberg 		spin_lock_irq(&b->bm_lock);
1393829c6087SLars Ellenberg 		__bm_change_bits_to(mdev, s, e, 1);
1394829c6087SLars Ellenberg 		spin_unlock_irq(&b->bm_lock);
1395b411b363SPhilipp Reisner 		return;
1396b411b363SPhilipp Reisner 	}
1397b411b363SPhilipp Reisner 
1398b411b363SPhilipp Reisner 	/* difference is large enough that we can trust sl and el */
1399b411b363SPhilipp Reisner 
1400829c6087SLars Ellenberg 	spin_lock_irq(&b->bm_lock);
1401829c6087SLars Ellenberg 
1402b411b363SPhilipp Reisner 	/* bits filling the current long */
1403b411b363SPhilipp Reisner 	if (sl)
1404829c6087SLars Ellenberg 		__bm_change_bits_to(mdev, s, sl-1, 1);
1405b411b363SPhilipp Reisner 
1406b411b363SPhilipp Reisner 	first_page = sl >> (3 + PAGE_SHIFT);
1407b411b363SPhilipp Reisner 	last_page = el >> (3 + PAGE_SHIFT);
1408b411b363SPhilipp Reisner 
1409b411b363SPhilipp Reisner 	/* MLPP: modulo longs per page */
1410b411b363SPhilipp Reisner 	/* LWPP: long words per page */
1411b411b363SPhilipp Reisner 	first_word = MLPP(sl >> LN2_BPL);
1412b411b363SPhilipp Reisner 	last_word = LWPP;
1413b411b363SPhilipp Reisner 
1414b411b363SPhilipp Reisner 	/* first and full pages, unless first page == last page */
1415b411b363SPhilipp Reisner 	for (page_nr = first_page; page_nr < last_page; page_nr++) {
1416b411b363SPhilipp Reisner 		bm_set_full_words_within_one_page(mdev->bitmap, page_nr, first_word, last_word);
14178ccee20eSLars Ellenberg 		spin_unlock_irq(&b->bm_lock);
14188ccee20eSLars Ellenberg 		cond_resched();
1419b411b363SPhilipp Reisner 		first_word = 0;
14208ccee20eSLars Ellenberg 		spin_lock_irq(&b->bm_lock);
1421b411b363SPhilipp Reisner 	}
1422b411b363SPhilipp Reisner 
1423b411b363SPhilipp Reisner 	/* last page (respectively only page, for first page == last page) */
1424b411b363SPhilipp Reisner 	last_word = MLPP(el >> LN2_BPL);
1425b411b363SPhilipp Reisner 	bm_set_full_words_within_one_page(mdev->bitmap, last_page, first_word, last_word);
1426b411b363SPhilipp Reisner 
1427b411b363SPhilipp Reisner 	/* possibly trailing bits.
1428b411b363SPhilipp Reisner 	 * example: (e & 63) == 63, el will be e+1.
1429b411b363SPhilipp Reisner 	 * if that even was the very last bit,
1430b411b363SPhilipp Reisner 	 * it would trigger an assert in __bm_change_bits_to()
1431b411b363SPhilipp Reisner 	 */
1432b411b363SPhilipp Reisner 	if (el <= e)
1433829c6087SLars Ellenberg 		__bm_change_bits_to(mdev, el, e, 1);
1434829c6087SLars Ellenberg 	spin_unlock_irq(&b->bm_lock);
1435b411b363SPhilipp Reisner }
1436b411b363SPhilipp Reisner 
1437b411b363SPhilipp Reisner /* returns bit state
1438b411b363SPhilipp Reisner  * wants bitnr, NOT sector.
1439b411b363SPhilipp Reisner  * inherently racy... area needs to be locked by means of {al,rs}_lru
1440b411b363SPhilipp Reisner  *  1 ... bit set
1441b411b363SPhilipp Reisner  *  0 ... bit not set
1442b411b363SPhilipp Reisner  * -1 ... first out of bounds access, stop testing for bits!
1443b411b363SPhilipp Reisner  */
1444b411b363SPhilipp Reisner int drbd_bm_test_bit(struct drbd_conf *mdev, const unsigned long bitnr)
1445b411b363SPhilipp Reisner {
1446b411b363SPhilipp Reisner 	unsigned long flags;
1447b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
1448b411b363SPhilipp Reisner 	unsigned long *p_addr;
1449b411b363SPhilipp Reisner 	int i;
1450b411b363SPhilipp Reisner 
1451841ce241SAndreas Gruenbacher 	if (!expect(b))
1452841ce241SAndreas Gruenbacher 		return 0;
1453841ce241SAndreas Gruenbacher 	if (!expect(b->bm_pages))
1454841ce241SAndreas Gruenbacher 		return 0;
1455b411b363SPhilipp Reisner 
1456b411b363SPhilipp Reisner 	spin_lock_irqsave(&b->bm_lock, flags);
145720ceb2b2SLars Ellenberg 	if (BM_DONT_TEST & b->bm_flags)
1458b411b363SPhilipp Reisner 		bm_print_lock_info(mdev);
1459b411b363SPhilipp Reisner 	if (bitnr < b->bm_bits) {
146019f843aaSLars Ellenberg 		p_addr = bm_map_pidx(b, bm_bit_to_page_idx(b, bitnr));
14617e599e6eSLinus Torvalds 		i = test_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr) ? 1 : 0;
1462b411b363SPhilipp Reisner 		bm_unmap(p_addr);
1463b411b363SPhilipp Reisner 	} else if (bitnr == b->bm_bits) {
1464b411b363SPhilipp Reisner 		i = -1;
1465b411b363SPhilipp Reisner 	} else { /* (bitnr > b->bm_bits) */
1466b411b363SPhilipp Reisner 		dev_err(DEV, "bitnr=%lu > bm_bits=%lu\n", bitnr, b->bm_bits);
1467b411b363SPhilipp Reisner 		i = 0;
1468b411b363SPhilipp Reisner 	}
1469b411b363SPhilipp Reisner 
1470b411b363SPhilipp Reisner 	spin_unlock_irqrestore(&b->bm_lock, flags);
1471b411b363SPhilipp Reisner 	return i;
1472b411b363SPhilipp Reisner }
1473b411b363SPhilipp Reisner 
1474b411b363SPhilipp Reisner /* returns number of bits set in the range [s, e] */
1475b411b363SPhilipp Reisner int drbd_bm_count_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e)
1476b411b363SPhilipp Reisner {
1477b411b363SPhilipp Reisner 	unsigned long flags;
1478b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
147919f843aaSLars Ellenberg 	unsigned long *p_addr = NULL;
1480b411b363SPhilipp Reisner 	unsigned long bitnr;
148119f843aaSLars Ellenberg 	unsigned int page_nr = -1U;
1482b411b363SPhilipp Reisner 	int c = 0;
1483b411b363SPhilipp Reisner 
1484b411b363SPhilipp Reisner 	/* If this is called without a bitmap, that is a bug.  But just to be
1485b411b363SPhilipp Reisner 	 * robust in case we screwed up elsewhere, in that case pretend there
1486b411b363SPhilipp Reisner 	 * was one dirty bit in the requested area, so we won't try to do a
1487b411b363SPhilipp Reisner 	 * local read there (no bitmap probably implies no disk) */
1488841ce241SAndreas Gruenbacher 	if (!expect(b))
1489841ce241SAndreas Gruenbacher 		return 1;
1490841ce241SAndreas Gruenbacher 	if (!expect(b->bm_pages))
1491841ce241SAndreas Gruenbacher 		return 1;
1492b411b363SPhilipp Reisner 
1493b411b363SPhilipp Reisner 	spin_lock_irqsave(&b->bm_lock, flags);
149420ceb2b2SLars Ellenberg 	if (BM_DONT_TEST & b->bm_flags)
1495b411b363SPhilipp Reisner 		bm_print_lock_info(mdev);
1496b411b363SPhilipp Reisner 	for (bitnr = s; bitnr <= e; bitnr++) {
149719f843aaSLars Ellenberg 		unsigned int idx = bm_bit_to_page_idx(b, bitnr);
149819f843aaSLars Ellenberg 		if (page_nr != idx) {
149919f843aaSLars Ellenberg 			page_nr = idx;
1500b411b363SPhilipp Reisner 			if (p_addr)
1501b411b363SPhilipp Reisner 				bm_unmap(p_addr);
150219f843aaSLars Ellenberg 			p_addr = bm_map_pidx(b, idx);
1503b411b363SPhilipp Reisner 		}
1504841ce241SAndreas Gruenbacher 		if (expect(bitnr < b->bm_bits))
15057e599e6eSLinus Torvalds 			c += (0 != test_bit_le(bitnr - (page_nr << (PAGE_SHIFT+3)), p_addr));
1506841ce241SAndreas Gruenbacher 		else
1507841ce241SAndreas Gruenbacher 			dev_err(DEV, "bitnr=%lu bm_bits=%lu\n", bitnr, b->bm_bits);
1508b411b363SPhilipp Reisner 	}
1509b411b363SPhilipp Reisner 	if (p_addr)
1510b411b363SPhilipp Reisner 		bm_unmap(p_addr);
1511b411b363SPhilipp Reisner 	spin_unlock_irqrestore(&b->bm_lock, flags);
1512b411b363SPhilipp Reisner 	return c;
1513b411b363SPhilipp Reisner }
1514b411b363SPhilipp Reisner 
1515b411b363SPhilipp Reisner 
1516b411b363SPhilipp Reisner /* inherently racy...
1517b411b363SPhilipp Reisner  * return value may be already out-of-date when this function returns.
1518b411b363SPhilipp Reisner  * but the general usage is that this is only use during a cstate when bits are
1519b411b363SPhilipp Reisner  * only cleared, not set, and typically only care for the case when the return
1520b411b363SPhilipp Reisner  * value is zero, or we already "locked" this "bitmap extent" by other means.
1521b411b363SPhilipp Reisner  *
1522b411b363SPhilipp Reisner  * enr is bm-extent number, since we chose to name one sector (512 bytes)
1523b411b363SPhilipp Reisner  * worth of the bitmap a "bitmap extent".
1524b411b363SPhilipp Reisner  *
1525b411b363SPhilipp Reisner  * TODO
1526b411b363SPhilipp Reisner  * I think since we use it like a reference count, we should use the real
1527b411b363SPhilipp Reisner  * reference count of some bitmap extent element from some lru instead...
1528b411b363SPhilipp Reisner  *
1529b411b363SPhilipp Reisner  */
1530b411b363SPhilipp Reisner int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr)
1531b411b363SPhilipp Reisner {
1532b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
1533b411b363SPhilipp Reisner 	int count, s, e;
1534b411b363SPhilipp Reisner 	unsigned long flags;
1535b411b363SPhilipp Reisner 	unsigned long *p_addr, *bm;
1536b411b363SPhilipp Reisner 
1537841ce241SAndreas Gruenbacher 	if (!expect(b))
1538841ce241SAndreas Gruenbacher 		return 0;
1539841ce241SAndreas Gruenbacher 	if (!expect(b->bm_pages))
1540841ce241SAndreas Gruenbacher 		return 0;
1541b411b363SPhilipp Reisner 
1542b411b363SPhilipp Reisner 	spin_lock_irqsave(&b->bm_lock, flags);
154320ceb2b2SLars Ellenberg 	if (BM_DONT_TEST & b->bm_flags)
1544b411b363SPhilipp Reisner 		bm_print_lock_info(mdev);
1545b411b363SPhilipp Reisner 
1546b411b363SPhilipp Reisner 	s = S2W(enr);
1547b411b363SPhilipp Reisner 	e = min((size_t)S2W(enr+1), b->bm_words);
1548b411b363SPhilipp Reisner 	count = 0;
1549b411b363SPhilipp Reisner 	if (s < b->bm_words) {
1550b411b363SPhilipp Reisner 		int n = e-s;
155119f843aaSLars Ellenberg 		p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, s));
1552b411b363SPhilipp Reisner 		bm = p_addr + MLPP(s);
1553b411b363SPhilipp Reisner 		while (n--)
1554b411b363SPhilipp Reisner 			count += hweight_long(*bm++);
1555b411b363SPhilipp Reisner 		bm_unmap(p_addr);
1556b411b363SPhilipp Reisner 	} else {
1557b411b363SPhilipp Reisner 		dev_err(DEV, "start offset (%d) too large in drbd_bm_e_weight\n", s);
1558b411b363SPhilipp Reisner 	}
1559b411b363SPhilipp Reisner 	spin_unlock_irqrestore(&b->bm_lock, flags);
1560b411b363SPhilipp Reisner 	return count;
1561b411b363SPhilipp Reisner }
1562b411b363SPhilipp Reisner 
15634b0715f0SLars Ellenberg /* Set all bits covered by the AL-extent al_enr.
15644b0715f0SLars Ellenberg  * Returns number of bits changed. */
1565b411b363SPhilipp Reisner unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev, unsigned long al_enr)
1566b411b363SPhilipp Reisner {
1567b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
1568b411b363SPhilipp Reisner 	unsigned long *p_addr, *bm;
1569b411b363SPhilipp Reisner 	unsigned long weight;
15704b0715f0SLars Ellenberg 	unsigned long s, e;
15714b0715f0SLars Ellenberg 	int count, i, do_now;
1572841ce241SAndreas Gruenbacher 	if (!expect(b))
1573841ce241SAndreas Gruenbacher 		return 0;
1574841ce241SAndreas Gruenbacher 	if (!expect(b->bm_pages))
1575841ce241SAndreas Gruenbacher 		return 0;
1576b411b363SPhilipp Reisner 
1577b411b363SPhilipp Reisner 	spin_lock_irq(&b->bm_lock);
157820ceb2b2SLars Ellenberg 	if (BM_DONT_SET & b->bm_flags)
1579b411b363SPhilipp Reisner 		bm_print_lock_info(mdev);
1580b411b363SPhilipp Reisner 	weight = b->bm_set;
1581b411b363SPhilipp Reisner 
1582b411b363SPhilipp Reisner 	s = al_enr * BM_WORDS_PER_AL_EXT;
1583b411b363SPhilipp Reisner 	e = min_t(size_t, s + BM_WORDS_PER_AL_EXT, b->bm_words);
1584b411b363SPhilipp Reisner 	/* assert that s and e are on the same page */
1585b411b363SPhilipp Reisner 	D_ASSERT((e-1) >> (PAGE_SHIFT - LN2_BPL + 3)
1586b411b363SPhilipp Reisner 	      ==  s    >> (PAGE_SHIFT - LN2_BPL + 3));
1587b411b363SPhilipp Reisner 	count = 0;
1588b411b363SPhilipp Reisner 	if (s < b->bm_words) {
1589b411b363SPhilipp Reisner 		i = do_now = e-s;
159019f843aaSLars Ellenberg 		p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, s));
1591b411b363SPhilipp Reisner 		bm = p_addr + MLPP(s);
1592b411b363SPhilipp Reisner 		while (i--) {
1593b411b363SPhilipp Reisner 			count += hweight_long(*bm);
1594b411b363SPhilipp Reisner 			*bm = -1UL;
1595b411b363SPhilipp Reisner 			bm++;
1596b411b363SPhilipp Reisner 		}
1597b411b363SPhilipp Reisner 		bm_unmap(p_addr);
1598b411b363SPhilipp Reisner 		b->bm_set += do_now*BITS_PER_LONG - count;
1599b411b363SPhilipp Reisner 		if (e == b->bm_words)
1600b411b363SPhilipp Reisner 			b->bm_set -= bm_clear_surplus(b);
1601b411b363SPhilipp Reisner 	} else {
16024b0715f0SLars Ellenberg 		dev_err(DEV, "start offset (%lu) too large in drbd_bm_ALe_set_all\n", s);
1603b411b363SPhilipp Reisner 	}
1604b411b363SPhilipp Reisner 	weight = b->bm_set - weight;
1605b411b363SPhilipp Reisner 	spin_unlock_irq(&b->bm_lock);
1606b411b363SPhilipp Reisner 	return weight;
1607b411b363SPhilipp Reisner }
1608