xref: /openbmc/linux/drivers/block/drbd/drbd_bitmap.c (revision f0ff1357ce391265edbf844792da7da9a694f4bd)
1b411b363SPhilipp Reisner /*
2b411b363SPhilipp Reisner    drbd_bitmap.c
3b411b363SPhilipp Reisner 
4b411b363SPhilipp Reisner    This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5b411b363SPhilipp Reisner 
6b411b363SPhilipp Reisner    Copyright (C) 2004-2008, LINBIT Information Technologies GmbH.
7b411b363SPhilipp Reisner    Copyright (C) 2004-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8b411b363SPhilipp Reisner    Copyright (C) 2004-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9b411b363SPhilipp Reisner 
10b411b363SPhilipp Reisner    drbd is free software; you can redistribute it and/or modify
11b411b363SPhilipp Reisner    it under the terms of the GNU General Public License as published by
12b411b363SPhilipp Reisner    the Free Software Foundation; either version 2, or (at your option)
13b411b363SPhilipp Reisner    any later version.
14b411b363SPhilipp Reisner 
15b411b363SPhilipp Reisner    drbd is distributed in the hope that it will be useful,
16b411b363SPhilipp Reisner    but WITHOUT ANY WARRANTY; without even the implied warranty of
17b411b363SPhilipp Reisner    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18b411b363SPhilipp Reisner    GNU General Public License for more details.
19b411b363SPhilipp Reisner 
20b411b363SPhilipp Reisner    You should have received a copy of the GNU General Public License
21b411b363SPhilipp Reisner    along with drbd; see the file COPYING.  If not, write to
22b411b363SPhilipp Reisner    the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23b411b363SPhilipp Reisner  */
24b411b363SPhilipp Reisner 
25b411b363SPhilipp Reisner #include <linux/bitops.h>
26b411b363SPhilipp Reisner #include <linux/vmalloc.h>
27b411b363SPhilipp Reisner #include <linux/string.h>
28b411b363SPhilipp Reisner #include <linux/drbd.h>
295a0e3ad6STejun Heo #include <linux/slab.h>
30b411b363SPhilipp Reisner #include <asm/kmap_types.h>
31*f0ff1357SStephen Rothwell 
32*f0ff1357SStephen Rothwell #include <asm-generic/bitops/le.h>
33*f0ff1357SStephen Rothwell 
34b411b363SPhilipp Reisner #include "drbd_int.h"
35b411b363SPhilipp Reisner 
3695a0f10cSLars Ellenberg 
37b411b363SPhilipp Reisner /* OPAQUE outside this file!
38b411b363SPhilipp Reisner  * interface defined in drbd_int.h
39b411b363SPhilipp Reisner 
40b411b363SPhilipp Reisner  * convention:
41b411b363SPhilipp Reisner  * function name drbd_bm_... => used elsewhere, "public".
42b411b363SPhilipp Reisner  * function name      bm_... => internal to implementation, "private".
434b0715f0SLars Ellenberg  */
44b411b363SPhilipp Reisner 
454b0715f0SLars Ellenberg 
464b0715f0SLars Ellenberg /*
474b0715f0SLars Ellenberg  * LIMITATIONS:
484b0715f0SLars Ellenberg  * We want to support >= peta byte of backend storage, while for now still using
494b0715f0SLars Ellenberg  * a granularity of one bit per 4KiB of storage.
504b0715f0SLars Ellenberg  * 1 << 50		bytes backend storage (1 PiB)
514b0715f0SLars Ellenberg  * 1 << (50 - 12)	bits needed
524b0715f0SLars Ellenberg  *	38 --> we need u64 to index and count bits
534b0715f0SLars Ellenberg  * 1 << (38 - 3)	bitmap bytes needed
544b0715f0SLars Ellenberg  *	35 --> we still need u64 to index and count bytes
554b0715f0SLars Ellenberg  *			(that's 32 GiB of bitmap for 1 PiB storage)
564b0715f0SLars Ellenberg  * 1 << (35 - 2)	32bit longs needed
574b0715f0SLars Ellenberg  *	33 --> we'd even need u64 to index and count 32bit long words.
584b0715f0SLars Ellenberg  * 1 << (35 - 3)	64bit longs needed
594b0715f0SLars Ellenberg  *	32 --> we could get away with a 32bit unsigned int to index and count
604b0715f0SLars Ellenberg  *	64bit long words, but I rather stay with unsigned long for now.
614b0715f0SLars Ellenberg  *	We probably should neither count nor point to bytes or long words
624b0715f0SLars Ellenberg  *	directly, but either by bitnumber, or by page index and offset.
634b0715f0SLars Ellenberg  * 1 << (35 - 12)
644b0715f0SLars Ellenberg  *	22 --> we need that much 4KiB pages of bitmap.
654b0715f0SLars Ellenberg  *	1 << (22 + 3) --> on a 64bit arch,
664b0715f0SLars Ellenberg  *	we need 32 MiB to store the array of page pointers.
674b0715f0SLars Ellenberg  *
684b0715f0SLars Ellenberg  * Because I'm lazy, and because the resulting patch was too large, too ugly
694b0715f0SLars Ellenberg  * and still incomplete, on 32bit we still "only" support 16 TiB (minus some),
704b0715f0SLars Ellenberg  * (1 << 32) bits * 4k storage.
714b0715f0SLars Ellenberg  *
724b0715f0SLars Ellenberg 
734b0715f0SLars Ellenberg  * bitmap storage and IO:
744b0715f0SLars Ellenberg  *	Bitmap is stored little endian on disk, and is kept little endian in
754b0715f0SLars Ellenberg  *	core memory. Currently we still hold the full bitmap in core as long
764b0715f0SLars Ellenberg  *	as we are "attached" to a local disk, which at 32 GiB for 1PiB storage
774b0715f0SLars Ellenberg  *	seems excessive.
784b0715f0SLars Ellenberg  *
794b0715f0SLars Ellenberg  *	We plan to reduce the amount of in-core bitmap pages by pageing them in
804b0715f0SLars Ellenberg  *	and out against their on-disk location as necessary, but need to make
814b0715f0SLars Ellenberg  *	sure we don't cause too much meta data IO, and must not deadlock in
824b0715f0SLars Ellenberg  *	tight memory situations. This needs some more work.
83b411b363SPhilipp Reisner  */
84b411b363SPhilipp Reisner 
85b411b363SPhilipp Reisner /*
86b411b363SPhilipp Reisner  * NOTE
87b411b363SPhilipp Reisner  *  Access to the *bm_pages is protected by bm_lock.
88b411b363SPhilipp Reisner  *  It is safe to read the other members within the lock.
89b411b363SPhilipp Reisner  *
90b411b363SPhilipp Reisner  *  drbd_bm_set_bits is called from bio_endio callbacks,
91b411b363SPhilipp Reisner  *  We may be called with irq already disabled,
92b411b363SPhilipp Reisner  *  so we need spin_lock_irqsave().
93b411b363SPhilipp Reisner  *  And we need the kmap_atomic.
94b411b363SPhilipp Reisner  */
95b411b363SPhilipp Reisner struct drbd_bitmap {
96b411b363SPhilipp Reisner 	struct page **bm_pages;
97b411b363SPhilipp Reisner 	spinlock_t bm_lock;
984b0715f0SLars Ellenberg 
994b0715f0SLars Ellenberg 	/* see LIMITATIONS: above */
1004b0715f0SLars Ellenberg 
101b411b363SPhilipp Reisner 	unsigned long bm_set;       /* nr of set bits; THINK maybe atomic_t? */
102b411b363SPhilipp Reisner 	unsigned long bm_bits;
103b411b363SPhilipp Reisner 	size_t   bm_words;
104b411b363SPhilipp Reisner 	size_t   bm_number_of_pages;
105b411b363SPhilipp Reisner 	sector_t bm_dev_capacity;
1068a03ae2aSThomas Gleixner 	struct mutex bm_change; /* serializes resize operations */
107b411b363SPhilipp Reisner 
10819f843aaSLars Ellenberg 	wait_queue_head_t bm_io_wait; /* used to serialize IO of single pages */
109b411b363SPhilipp Reisner 
11020ceb2b2SLars Ellenberg 	enum bm_flag bm_flags;
111b411b363SPhilipp Reisner 
112b411b363SPhilipp Reisner 	/* debugging aid, in case we are still racy somewhere */
113b411b363SPhilipp Reisner 	char          *bm_why;
114b411b363SPhilipp Reisner 	struct task_struct *bm_task;
115b411b363SPhilipp Reisner };
116b411b363SPhilipp Reisner 
117b4ee79daSPhilipp Reisner static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
118fd76438cSPhilipp Reisner 			       unsigned long e, int val, const enum km_type km);
119fd76438cSPhilipp Reisner 
120b411b363SPhilipp Reisner #define bm_print_lock_info(m) __bm_print_lock_info(m, __func__)
121b411b363SPhilipp Reisner static void __bm_print_lock_info(struct drbd_conf *mdev, const char *func)
122b411b363SPhilipp Reisner {
123b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
124b411b363SPhilipp Reisner 	if (!__ratelimit(&drbd_ratelimit_state))
125b411b363SPhilipp Reisner 		return;
126b411b363SPhilipp Reisner 	dev_err(DEV, "FIXME %s in %s, bitmap locked for '%s' by %s\n",
127b411b363SPhilipp Reisner 	    current == mdev->receiver.task ? "receiver" :
128b411b363SPhilipp Reisner 	    current == mdev->asender.task  ? "asender"  :
129b411b363SPhilipp Reisner 	    current == mdev->worker.task   ? "worker"   : current->comm,
130b411b363SPhilipp Reisner 	    func, b->bm_why ?: "?",
131b411b363SPhilipp Reisner 	    b->bm_task == mdev->receiver.task ? "receiver" :
132b411b363SPhilipp Reisner 	    b->bm_task == mdev->asender.task  ? "asender"  :
133b411b363SPhilipp Reisner 	    b->bm_task == mdev->worker.task   ? "worker"   : "?");
134b411b363SPhilipp Reisner }
135b411b363SPhilipp Reisner 
13620ceb2b2SLars Ellenberg void drbd_bm_lock(struct drbd_conf *mdev, char *why, enum bm_flag flags)
137b411b363SPhilipp Reisner {
138b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
139b411b363SPhilipp Reisner 	int trylock_failed;
140b411b363SPhilipp Reisner 
141b411b363SPhilipp Reisner 	if (!b) {
142b411b363SPhilipp Reisner 		dev_err(DEV, "FIXME no bitmap in drbd_bm_lock!?\n");
143b411b363SPhilipp Reisner 		return;
144b411b363SPhilipp Reisner 	}
145b411b363SPhilipp Reisner 
1468a03ae2aSThomas Gleixner 	trylock_failed = !mutex_trylock(&b->bm_change);
147b411b363SPhilipp Reisner 
148b411b363SPhilipp Reisner 	if (trylock_failed) {
149b411b363SPhilipp Reisner 		dev_warn(DEV, "%s going to '%s' but bitmap already locked for '%s' by %s\n",
150b411b363SPhilipp Reisner 		    current == mdev->receiver.task ? "receiver" :
151b411b363SPhilipp Reisner 		    current == mdev->asender.task  ? "asender"  :
152b411b363SPhilipp Reisner 		    current == mdev->worker.task   ? "worker"   : current->comm,
153b411b363SPhilipp Reisner 		    why, b->bm_why ?: "?",
154b411b363SPhilipp Reisner 		    b->bm_task == mdev->receiver.task ? "receiver" :
155b411b363SPhilipp Reisner 		    b->bm_task == mdev->asender.task  ? "asender"  :
156b411b363SPhilipp Reisner 		    b->bm_task == mdev->worker.task   ? "worker"   : "?");
1578a03ae2aSThomas Gleixner 		mutex_lock(&b->bm_change);
158b411b363SPhilipp Reisner 	}
15920ceb2b2SLars Ellenberg 	if (BM_LOCKED_MASK & b->bm_flags)
160b411b363SPhilipp Reisner 		dev_err(DEV, "FIXME bitmap already locked in bm_lock\n");
16120ceb2b2SLars Ellenberg 	b->bm_flags |= flags & BM_LOCKED_MASK;
162b411b363SPhilipp Reisner 
163b411b363SPhilipp Reisner 	b->bm_why  = why;
164b411b363SPhilipp Reisner 	b->bm_task = current;
165b411b363SPhilipp Reisner }
166b411b363SPhilipp Reisner 
167b411b363SPhilipp Reisner void drbd_bm_unlock(struct drbd_conf *mdev)
168b411b363SPhilipp Reisner {
169b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
170b411b363SPhilipp Reisner 	if (!b) {
171b411b363SPhilipp Reisner 		dev_err(DEV, "FIXME no bitmap in drbd_bm_unlock!?\n");
172b411b363SPhilipp Reisner 		return;
173b411b363SPhilipp Reisner 	}
174b411b363SPhilipp Reisner 
17520ceb2b2SLars Ellenberg 	if (!(BM_LOCKED_MASK & mdev->bitmap->bm_flags))
176b411b363SPhilipp Reisner 		dev_err(DEV, "FIXME bitmap not locked in bm_unlock\n");
177b411b363SPhilipp Reisner 
17820ceb2b2SLars Ellenberg 	b->bm_flags &= ~BM_LOCKED_MASK;
179b411b363SPhilipp Reisner 	b->bm_why  = NULL;
180b411b363SPhilipp Reisner 	b->bm_task = NULL;
1818a03ae2aSThomas Gleixner 	mutex_unlock(&b->bm_change);
182b411b363SPhilipp Reisner }
183b411b363SPhilipp Reisner 
18419f843aaSLars Ellenberg /* we store some "meta" info about our pages in page->private */
18519f843aaSLars Ellenberg /* at a granularity of 4k storage per bitmap bit:
18619f843aaSLars Ellenberg  * one peta byte storage: 1<<50 byte, 1<<38 * 4k storage blocks
18719f843aaSLars Ellenberg  *  1<<38 bits,
18819f843aaSLars Ellenberg  *  1<<23 4k bitmap pages.
18919f843aaSLars Ellenberg  * Use 24 bits as page index, covers 2 peta byte storage
19019f843aaSLars Ellenberg  * at a granularity of 4k per bit.
19119f843aaSLars Ellenberg  * Used to report the failed page idx on io error from the endio handlers.
19219f843aaSLars Ellenberg  */
19319f843aaSLars Ellenberg #define BM_PAGE_IDX_MASK	((1UL<<24)-1)
19419f843aaSLars Ellenberg /* this page is currently read in, or written back */
19519f843aaSLars Ellenberg #define BM_PAGE_IO_LOCK		31
19619f843aaSLars Ellenberg /* if there has been an IO error for this page */
19719f843aaSLars Ellenberg #define BM_PAGE_IO_ERROR	30
19819f843aaSLars Ellenberg /* this is to be able to intelligently skip disk IO,
19919f843aaSLars Ellenberg  * set if bits have been set since last IO. */
20019f843aaSLars Ellenberg #define BM_PAGE_NEED_WRITEOUT	29
20119f843aaSLars Ellenberg /* to mark for lazy writeout once syncer cleared all clearable bits,
20219f843aaSLars Ellenberg  * we if bits have been cleared since last IO. */
20319f843aaSLars Ellenberg #define BM_PAGE_LAZY_WRITEOUT	28
20419f843aaSLars Ellenberg 
20519f843aaSLars Ellenberg /* store_page_idx uses non-atomic assingment. It is only used directly after
20619f843aaSLars Ellenberg  * allocating the page.  All other bm_set_page_* and bm_clear_page_* need to
20719f843aaSLars Ellenberg  * use atomic bit manipulation, as set_out_of_sync (and therefore bitmap
20819f843aaSLars Ellenberg  * changes) may happen from various contexts, and wait_on_bit/wake_up_bit
20919f843aaSLars Ellenberg  * requires it all to be atomic as well. */
21019f843aaSLars Ellenberg static void bm_store_page_idx(struct page *page, unsigned long idx)
21119f843aaSLars Ellenberg {
21219f843aaSLars Ellenberg 	BUG_ON(0 != (idx & ~BM_PAGE_IDX_MASK));
21319f843aaSLars Ellenberg 	page_private(page) |= idx;
21419f843aaSLars Ellenberg }
21519f843aaSLars Ellenberg 
21619f843aaSLars Ellenberg static unsigned long bm_page_to_idx(struct page *page)
21719f843aaSLars Ellenberg {
21819f843aaSLars Ellenberg 	return page_private(page) & BM_PAGE_IDX_MASK;
21919f843aaSLars Ellenberg }
22019f843aaSLars Ellenberg 
22119f843aaSLars Ellenberg /* As is very unlikely that the same page is under IO from more than one
22219f843aaSLars Ellenberg  * context, we can get away with a bit per page and one wait queue per bitmap.
22319f843aaSLars Ellenberg  */
22419f843aaSLars Ellenberg static void bm_page_lock_io(struct drbd_conf *mdev, int page_nr)
22519f843aaSLars Ellenberg {
22619f843aaSLars Ellenberg 	struct drbd_bitmap *b = mdev->bitmap;
22719f843aaSLars Ellenberg 	void *addr = &page_private(b->bm_pages[page_nr]);
22819f843aaSLars Ellenberg 	wait_event(b->bm_io_wait, !test_and_set_bit(BM_PAGE_IO_LOCK, addr));
22919f843aaSLars Ellenberg }
23019f843aaSLars Ellenberg 
23119f843aaSLars Ellenberg static void bm_page_unlock_io(struct drbd_conf *mdev, int page_nr)
23219f843aaSLars Ellenberg {
23319f843aaSLars Ellenberg 	struct drbd_bitmap *b = mdev->bitmap;
23419f843aaSLars Ellenberg 	void *addr = &page_private(b->bm_pages[page_nr]);
23519f843aaSLars Ellenberg 	clear_bit(BM_PAGE_IO_LOCK, addr);
23619f843aaSLars Ellenberg 	smp_mb__after_clear_bit();
23719f843aaSLars Ellenberg 	wake_up(&mdev->bitmap->bm_io_wait);
23819f843aaSLars Ellenberg }
23919f843aaSLars Ellenberg 
24019f843aaSLars Ellenberg /* set _before_ submit_io, so it may be reset due to being changed
24119f843aaSLars Ellenberg  * while this page is in flight... will get submitted later again */
24219f843aaSLars Ellenberg static void bm_set_page_unchanged(struct page *page)
24319f843aaSLars Ellenberg {
24419f843aaSLars Ellenberg 	/* use cmpxchg? */
24519f843aaSLars Ellenberg 	clear_bit(BM_PAGE_NEED_WRITEOUT, &page_private(page));
24619f843aaSLars Ellenberg 	clear_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page));
24719f843aaSLars Ellenberg }
24819f843aaSLars Ellenberg 
24919f843aaSLars Ellenberg static void bm_set_page_need_writeout(struct page *page)
25019f843aaSLars Ellenberg {
25119f843aaSLars Ellenberg 	set_bit(BM_PAGE_NEED_WRITEOUT, &page_private(page));
25219f843aaSLars Ellenberg }
25319f843aaSLars Ellenberg 
25419f843aaSLars Ellenberg static int bm_test_page_unchanged(struct page *page)
25519f843aaSLars Ellenberg {
25619f843aaSLars Ellenberg 	volatile const unsigned long *addr = &page_private(page);
25719f843aaSLars Ellenberg 	return (*addr & ((1UL<<BM_PAGE_NEED_WRITEOUT)|(1UL<<BM_PAGE_LAZY_WRITEOUT))) == 0;
25819f843aaSLars Ellenberg }
25919f843aaSLars Ellenberg 
26019f843aaSLars Ellenberg static void bm_set_page_io_err(struct page *page)
26119f843aaSLars Ellenberg {
26219f843aaSLars Ellenberg 	set_bit(BM_PAGE_IO_ERROR, &page_private(page));
26319f843aaSLars Ellenberg }
26419f843aaSLars Ellenberg 
26519f843aaSLars Ellenberg static void bm_clear_page_io_err(struct page *page)
26619f843aaSLars Ellenberg {
26719f843aaSLars Ellenberg 	clear_bit(BM_PAGE_IO_ERROR, &page_private(page));
26819f843aaSLars Ellenberg }
26919f843aaSLars Ellenberg 
27019f843aaSLars Ellenberg static void bm_set_page_lazy_writeout(struct page *page)
27119f843aaSLars Ellenberg {
27219f843aaSLars Ellenberg 	set_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page));
27319f843aaSLars Ellenberg }
27419f843aaSLars Ellenberg 
27519f843aaSLars Ellenberg static int bm_test_page_lazy_writeout(struct page *page)
27619f843aaSLars Ellenberg {
27719f843aaSLars Ellenberg 	return test_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page));
27819f843aaSLars Ellenberg }
27919f843aaSLars Ellenberg 
28019f843aaSLars Ellenberg /* on a 32bit box, this would allow for exactly (2<<38) bits. */
28119f843aaSLars Ellenberg static unsigned int bm_word_to_page_idx(struct drbd_bitmap *b, unsigned long long_nr)
28219f843aaSLars Ellenberg {
28319f843aaSLars Ellenberg 	/* page_nr = (word*sizeof(long)) >> PAGE_SHIFT; */
28419f843aaSLars Ellenberg 	unsigned int page_nr = long_nr >> (PAGE_SHIFT - LN2_BPL + 3);
28519f843aaSLars Ellenberg 	BUG_ON(page_nr >= b->bm_number_of_pages);
28619f843aaSLars Ellenberg 	return page_nr;
28719f843aaSLars Ellenberg }
28819f843aaSLars Ellenberg 
28995a0f10cSLars Ellenberg static unsigned int bm_bit_to_page_idx(struct drbd_bitmap *b, u64 bitnr)
29095a0f10cSLars Ellenberg {
29195a0f10cSLars Ellenberg 	/* page_nr = (bitnr/8) >> PAGE_SHIFT; */
29295a0f10cSLars Ellenberg 	unsigned int page_nr = bitnr >> (PAGE_SHIFT + 3);
29395a0f10cSLars Ellenberg 	BUG_ON(page_nr >= b->bm_number_of_pages);
29495a0f10cSLars Ellenberg 	return page_nr;
29595a0f10cSLars Ellenberg }
29695a0f10cSLars Ellenberg 
29795a0f10cSLars Ellenberg static unsigned long *__bm_map_pidx(struct drbd_bitmap *b, unsigned int idx, const enum km_type km)
29895a0f10cSLars Ellenberg {
29995a0f10cSLars Ellenberg 	struct page *page = b->bm_pages[idx];
30095a0f10cSLars Ellenberg 	return (unsigned long *) kmap_atomic(page, km);
30195a0f10cSLars Ellenberg }
30295a0f10cSLars Ellenberg 
30395a0f10cSLars Ellenberg static unsigned long *bm_map_pidx(struct drbd_bitmap *b, unsigned int idx)
30495a0f10cSLars Ellenberg {
30595a0f10cSLars Ellenberg 	return __bm_map_pidx(b, idx, KM_IRQ1);
30695a0f10cSLars Ellenberg }
30795a0f10cSLars Ellenberg 
308b411b363SPhilipp Reisner static void __bm_unmap(unsigned long *p_addr, const enum km_type km)
309b411b363SPhilipp Reisner {
310b411b363SPhilipp Reisner 	kunmap_atomic(p_addr, km);
311b411b363SPhilipp Reisner };
312b411b363SPhilipp Reisner 
313b411b363SPhilipp Reisner static void bm_unmap(unsigned long *p_addr)
314b411b363SPhilipp Reisner {
315b411b363SPhilipp Reisner 	return __bm_unmap(p_addr, KM_IRQ1);
316b411b363SPhilipp Reisner }
317b411b363SPhilipp Reisner 
318b411b363SPhilipp Reisner /* long word offset of _bitmap_ sector */
319b411b363SPhilipp Reisner #define S2W(s)	((s)<<(BM_EXT_SHIFT-BM_BLOCK_SHIFT-LN2_BPL))
320b411b363SPhilipp Reisner /* word offset from start of bitmap to word number _in_page_
321b411b363SPhilipp Reisner  * modulo longs per page
322b411b363SPhilipp Reisner #define MLPP(X) ((X) % (PAGE_SIZE/sizeof(long))
323b411b363SPhilipp Reisner  hm, well, Philipp thinks gcc might not optimze the % into & (... - 1)
324b411b363SPhilipp Reisner  so do it explicitly:
325b411b363SPhilipp Reisner  */
326b411b363SPhilipp Reisner #define MLPP(X) ((X) & ((PAGE_SIZE/sizeof(long))-1))
327b411b363SPhilipp Reisner 
328b411b363SPhilipp Reisner /* Long words per page */
329b411b363SPhilipp Reisner #define LWPP (PAGE_SIZE/sizeof(long))
330b411b363SPhilipp Reisner 
331b411b363SPhilipp Reisner /*
332b411b363SPhilipp Reisner  * actually most functions herein should take a struct drbd_bitmap*, not a
333b411b363SPhilipp Reisner  * struct drbd_conf*, but for the debug macros I like to have the mdev around
334b411b363SPhilipp Reisner  * to be able to report device specific.
335b411b363SPhilipp Reisner  */
336b411b363SPhilipp Reisner 
33719f843aaSLars Ellenberg 
338b411b363SPhilipp Reisner static void bm_free_pages(struct page **pages, unsigned long number)
339b411b363SPhilipp Reisner {
340b411b363SPhilipp Reisner 	unsigned long i;
341b411b363SPhilipp Reisner 	if (!pages)
342b411b363SPhilipp Reisner 		return;
343b411b363SPhilipp Reisner 
344b411b363SPhilipp Reisner 	for (i = 0; i < number; i++) {
345b411b363SPhilipp Reisner 		if (!pages[i]) {
346b411b363SPhilipp Reisner 			printk(KERN_ALERT "drbd: bm_free_pages tried to free "
347b411b363SPhilipp Reisner 					  "a NULL pointer; i=%lu n=%lu\n",
348b411b363SPhilipp Reisner 					  i, number);
349b411b363SPhilipp Reisner 			continue;
350b411b363SPhilipp Reisner 		}
351b411b363SPhilipp Reisner 		__free_page(pages[i]);
352b411b363SPhilipp Reisner 		pages[i] = NULL;
353b411b363SPhilipp Reisner 	}
354b411b363SPhilipp Reisner }
355b411b363SPhilipp Reisner 
356b411b363SPhilipp Reisner static void bm_vk_free(void *ptr, int v)
357b411b363SPhilipp Reisner {
358b411b363SPhilipp Reisner 	if (v)
359b411b363SPhilipp Reisner 		vfree(ptr);
360b411b363SPhilipp Reisner 	else
361b411b363SPhilipp Reisner 		kfree(ptr);
362b411b363SPhilipp Reisner }
363b411b363SPhilipp Reisner 
364b411b363SPhilipp Reisner /*
365b411b363SPhilipp Reisner  * "have" and "want" are NUMBER OF PAGES.
366b411b363SPhilipp Reisner  */
367b411b363SPhilipp Reisner static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
368b411b363SPhilipp Reisner {
369b411b363SPhilipp Reisner 	struct page **old_pages = b->bm_pages;
370b411b363SPhilipp Reisner 	struct page **new_pages, *page;
371b411b363SPhilipp Reisner 	unsigned int i, bytes, vmalloced = 0;
372b411b363SPhilipp Reisner 	unsigned long have = b->bm_number_of_pages;
373b411b363SPhilipp Reisner 
374b411b363SPhilipp Reisner 	BUG_ON(have == 0 && old_pages != NULL);
375b411b363SPhilipp Reisner 	BUG_ON(have != 0 && old_pages == NULL);
376b411b363SPhilipp Reisner 
377b411b363SPhilipp Reisner 	if (have == want)
378b411b363SPhilipp Reisner 		return old_pages;
379b411b363SPhilipp Reisner 
380b411b363SPhilipp Reisner 	/* Trying kmalloc first, falling back to vmalloc.
381b411b363SPhilipp Reisner 	 * GFP_KERNEL is ok, as this is done when a lower level disk is
382b411b363SPhilipp Reisner 	 * "attached" to the drbd.  Context is receiver thread or cqueue
383b411b363SPhilipp Reisner 	 * thread.  As we have no disk yet, we are not in the IO path,
384b411b363SPhilipp Reisner 	 * not even the IO path of the peer. */
385b411b363SPhilipp Reisner 	bytes = sizeof(struct page *)*want;
386b411b363SPhilipp Reisner 	new_pages = kmalloc(bytes, GFP_KERNEL);
387b411b363SPhilipp Reisner 	if (!new_pages) {
388b411b363SPhilipp Reisner 		new_pages = vmalloc(bytes);
389b411b363SPhilipp Reisner 		if (!new_pages)
390b411b363SPhilipp Reisner 			return NULL;
391b411b363SPhilipp Reisner 		vmalloced = 1;
392b411b363SPhilipp Reisner 	}
393b411b363SPhilipp Reisner 
394b411b363SPhilipp Reisner 	memset(new_pages, 0, bytes);
395b411b363SPhilipp Reisner 	if (want >= have) {
396b411b363SPhilipp Reisner 		for (i = 0; i < have; i++)
397b411b363SPhilipp Reisner 			new_pages[i] = old_pages[i];
398b411b363SPhilipp Reisner 		for (; i < want; i++) {
399b411b363SPhilipp Reisner 			page = alloc_page(GFP_HIGHUSER);
400b411b363SPhilipp Reisner 			if (!page) {
401b411b363SPhilipp Reisner 				bm_free_pages(new_pages + have, i - have);
402b411b363SPhilipp Reisner 				bm_vk_free(new_pages, vmalloced);
403b411b363SPhilipp Reisner 				return NULL;
404b411b363SPhilipp Reisner 			}
40519f843aaSLars Ellenberg 			/* we want to know which page it is
40619f843aaSLars Ellenberg 			 * from the endio handlers */
40719f843aaSLars Ellenberg 			bm_store_page_idx(page, i);
408b411b363SPhilipp Reisner 			new_pages[i] = page;
409b411b363SPhilipp Reisner 		}
410b411b363SPhilipp Reisner 	} else {
411b411b363SPhilipp Reisner 		for (i = 0; i < want; i++)
412b411b363SPhilipp Reisner 			new_pages[i] = old_pages[i];
413b411b363SPhilipp Reisner 		/* NOT HERE, we are outside the spinlock!
414b411b363SPhilipp Reisner 		bm_free_pages(old_pages + want, have - want);
415b411b363SPhilipp Reisner 		*/
416b411b363SPhilipp Reisner 	}
417b411b363SPhilipp Reisner 
418b411b363SPhilipp Reisner 	if (vmalloced)
41920ceb2b2SLars Ellenberg 		b->bm_flags |= BM_P_VMALLOCED;
420b411b363SPhilipp Reisner 	else
42120ceb2b2SLars Ellenberg 		b->bm_flags &= ~BM_P_VMALLOCED;
422b411b363SPhilipp Reisner 
423b411b363SPhilipp Reisner 	return new_pages;
424b411b363SPhilipp Reisner }
425b411b363SPhilipp Reisner 
426b411b363SPhilipp Reisner /*
427b411b363SPhilipp Reisner  * called on driver init only. TODO call when a device is created.
428b411b363SPhilipp Reisner  * allocates the drbd_bitmap, and stores it in mdev->bitmap.
429b411b363SPhilipp Reisner  */
430b411b363SPhilipp Reisner int drbd_bm_init(struct drbd_conf *mdev)
431b411b363SPhilipp Reisner {
432b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
433b411b363SPhilipp Reisner 	WARN_ON(b != NULL);
434b411b363SPhilipp Reisner 	b = kzalloc(sizeof(struct drbd_bitmap), GFP_KERNEL);
435b411b363SPhilipp Reisner 	if (!b)
436b411b363SPhilipp Reisner 		return -ENOMEM;
437b411b363SPhilipp Reisner 	spin_lock_init(&b->bm_lock);
4388a03ae2aSThomas Gleixner 	mutex_init(&b->bm_change);
439b411b363SPhilipp Reisner 	init_waitqueue_head(&b->bm_io_wait);
440b411b363SPhilipp Reisner 
441b411b363SPhilipp Reisner 	mdev->bitmap = b;
442b411b363SPhilipp Reisner 
443b411b363SPhilipp Reisner 	return 0;
444b411b363SPhilipp Reisner }
445b411b363SPhilipp Reisner 
446b411b363SPhilipp Reisner sector_t drbd_bm_capacity(struct drbd_conf *mdev)
447b411b363SPhilipp Reisner {
448b411b363SPhilipp Reisner 	ERR_IF(!mdev->bitmap) return 0;
449b411b363SPhilipp Reisner 	return mdev->bitmap->bm_dev_capacity;
450b411b363SPhilipp Reisner }
451b411b363SPhilipp Reisner 
452b411b363SPhilipp Reisner /* called on driver unload. TODO: call when a device is destroyed.
453b411b363SPhilipp Reisner  */
454b411b363SPhilipp Reisner void drbd_bm_cleanup(struct drbd_conf *mdev)
455b411b363SPhilipp Reisner {
456b411b363SPhilipp Reisner 	ERR_IF (!mdev->bitmap) return;
457b411b363SPhilipp Reisner 	bm_free_pages(mdev->bitmap->bm_pages, mdev->bitmap->bm_number_of_pages);
45820ceb2b2SLars Ellenberg 	bm_vk_free(mdev->bitmap->bm_pages, (BM_P_VMALLOCED & mdev->bitmap->bm_flags));
459b411b363SPhilipp Reisner 	kfree(mdev->bitmap);
460b411b363SPhilipp Reisner 	mdev->bitmap = NULL;
461b411b363SPhilipp Reisner }
462b411b363SPhilipp Reisner 
463b411b363SPhilipp Reisner /*
464b411b363SPhilipp Reisner  * since (b->bm_bits % BITS_PER_LONG) != 0,
465b411b363SPhilipp Reisner  * this masks out the remaining bits.
466b411b363SPhilipp Reisner  * Returns the number of bits cleared.
467b411b363SPhilipp Reisner  */
46895a0f10cSLars Ellenberg #define BITS_PER_PAGE		(1UL << (PAGE_SHIFT + 3))
46995a0f10cSLars Ellenberg #define BITS_PER_PAGE_MASK	(BITS_PER_PAGE - 1)
47095a0f10cSLars Ellenberg #define BITS_PER_LONG_MASK	(BITS_PER_LONG - 1)
471b411b363SPhilipp Reisner static int bm_clear_surplus(struct drbd_bitmap *b)
472b411b363SPhilipp Reisner {
47395a0f10cSLars Ellenberg 	unsigned long mask;
474b411b363SPhilipp Reisner 	unsigned long *p_addr, *bm;
47595a0f10cSLars Ellenberg 	int tmp;
47695a0f10cSLars Ellenberg 	int cleared = 0;
477b411b363SPhilipp Reisner 
47895a0f10cSLars Ellenberg 	/* number of bits modulo bits per page */
47995a0f10cSLars Ellenberg 	tmp = (b->bm_bits & BITS_PER_PAGE_MASK);
48095a0f10cSLars Ellenberg 	/* mask the used bits of the word containing the last bit */
48195a0f10cSLars Ellenberg 	mask = (1UL << (tmp & BITS_PER_LONG_MASK)) -1;
48295a0f10cSLars Ellenberg 	/* bitmap is always stored little endian,
48395a0f10cSLars Ellenberg 	 * on disk and in core memory alike */
48495a0f10cSLars Ellenberg 	mask = cpu_to_lel(mask);
48595a0f10cSLars Ellenberg 
4866850c442SLars Ellenberg 	p_addr = bm_map_pidx(b, b->bm_number_of_pages - 1);
48795a0f10cSLars Ellenberg 	bm = p_addr + (tmp/BITS_PER_LONG);
48895a0f10cSLars Ellenberg 	if (mask) {
48995a0f10cSLars Ellenberg 		/* If mask != 0, we are not exactly aligned, so bm now points
49095a0f10cSLars Ellenberg 		 * to the long containing the last bit.
49195a0f10cSLars Ellenberg 		 * If mask == 0, bm already points to the word immediately
49295a0f10cSLars Ellenberg 		 * after the last (long word aligned) bit. */
493b411b363SPhilipp Reisner 		cleared = hweight_long(*bm & ~mask);
494b411b363SPhilipp Reisner 		*bm &= mask;
49595a0f10cSLars Ellenberg 		bm++;
496b411b363SPhilipp Reisner 	}
497b411b363SPhilipp Reisner 
49895a0f10cSLars Ellenberg 	if (BITS_PER_LONG == 32 && ((bm - p_addr) & 1) == 1) {
49995a0f10cSLars Ellenberg 		/* on a 32bit arch, we may need to zero out
50095a0f10cSLars Ellenberg 		 * a padding long to align with a 64bit remote */
501b411b363SPhilipp Reisner 		cleared += hweight_long(*bm);
502b411b363SPhilipp Reisner 		*bm = 0;
503b411b363SPhilipp Reisner 	}
504b411b363SPhilipp Reisner 	bm_unmap(p_addr);
505b411b363SPhilipp Reisner 	return cleared;
506b411b363SPhilipp Reisner }
507b411b363SPhilipp Reisner 
508b411b363SPhilipp Reisner static void bm_set_surplus(struct drbd_bitmap *b)
509b411b363SPhilipp Reisner {
51095a0f10cSLars Ellenberg 	unsigned long mask;
511b411b363SPhilipp Reisner 	unsigned long *p_addr, *bm;
51295a0f10cSLars Ellenberg 	int tmp;
513b411b363SPhilipp Reisner 
51495a0f10cSLars Ellenberg 	/* number of bits modulo bits per page */
51595a0f10cSLars Ellenberg 	tmp = (b->bm_bits & BITS_PER_PAGE_MASK);
51695a0f10cSLars Ellenberg 	/* mask the used bits of the word containing the last bit */
51795a0f10cSLars Ellenberg 	mask = (1UL << (tmp & BITS_PER_LONG_MASK)) -1;
51895a0f10cSLars Ellenberg 	/* bitmap is always stored little endian,
51995a0f10cSLars Ellenberg 	 * on disk and in core memory alike */
52095a0f10cSLars Ellenberg 	mask = cpu_to_lel(mask);
52195a0f10cSLars Ellenberg 
5226850c442SLars Ellenberg 	p_addr = bm_map_pidx(b, b->bm_number_of_pages - 1);
52395a0f10cSLars Ellenberg 	bm = p_addr + (tmp/BITS_PER_LONG);
52495a0f10cSLars Ellenberg 	if (mask) {
52595a0f10cSLars Ellenberg 		/* If mask != 0, we are not exactly aligned, so bm now points
52695a0f10cSLars Ellenberg 		 * to the long containing the last bit.
52795a0f10cSLars Ellenberg 		 * If mask == 0, bm already points to the word immediately
52895a0f10cSLars Ellenberg 		 * after the last (long word aligned) bit. */
529b411b363SPhilipp Reisner 		*bm |= ~mask;
53095a0f10cSLars Ellenberg 		bm++;
531b411b363SPhilipp Reisner 	}
532b411b363SPhilipp Reisner 
53395a0f10cSLars Ellenberg 	if (BITS_PER_LONG == 32 && ((bm - p_addr) & 1) == 1) {
53495a0f10cSLars Ellenberg 		/* on a 32bit arch, we may need to zero out
53595a0f10cSLars Ellenberg 		 * a padding long to align with a 64bit remote */
53695a0f10cSLars Ellenberg 		*bm = ~0UL;
537b411b363SPhilipp Reisner 	}
538b411b363SPhilipp Reisner 	bm_unmap(p_addr);
539b411b363SPhilipp Reisner }
540b411b363SPhilipp Reisner 
5414b0715f0SLars Ellenberg /* you better not modify the bitmap while this is running,
5424b0715f0SLars Ellenberg  * or its results will be stale */
54395a0f10cSLars Ellenberg static unsigned long bm_count_bits(struct drbd_bitmap *b)
544b411b363SPhilipp Reisner {
5454b0715f0SLars Ellenberg 	unsigned long *p_addr;
546b411b363SPhilipp Reisner 	unsigned long bits = 0;
5474b0715f0SLars Ellenberg 	unsigned long mask = (1UL << (b->bm_bits & BITS_PER_LONG_MASK)) -1;
5486850c442SLars Ellenberg 	int idx, i, last_word;
5497777a8baSLars Ellenberg 
5504b0715f0SLars Ellenberg 	/* all but last page */
5516850c442SLars Ellenberg 	for (idx = 0; idx < b->bm_number_of_pages - 1; idx++) {
5524b0715f0SLars Ellenberg 		p_addr = __bm_map_pidx(b, idx, KM_USER0);
5534b0715f0SLars Ellenberg 		for (i = 0; i < LWPP; i++)
5544b0715f0SLars Ellenberg 			bits += hweight_long(p_addr[i]);
5557777a8baSLars Ellenberg 		__bm_unmap(p_addr, KM_USER0);
556b411b363SPhilipp Reisner 		cond_resched();
557b411b363SPhilipp Reisner 	}
5584b0715f0SLars Ellenberg 	/* last (or only) page */
5594b0715f0SLars Ellenberg 	last_word = ((b->bm_bits - 1) & BITS_PER_PAGE_MASK) >> LN2_BPL;
5604b0715f0SLars Ellenberg 	p_addr = __bm_map_pidx(b, idx, KM_USER0);
5614b0715f0SLars Ellenberg 	for (i = 0; i < last_word; i++)
5624b0715f0SLars Ellenberg 		bits += hweight_long(p_addr[i]);
5634b0715f0SLars Ellenberg 	p_addr[last_word] &= cpu_to_lel(mask);
5644b0715f0SLars Ellenberg 	bits += hweight_long(p_addr[last_word]);
5654b0715f0SLars Ellenberg 	/* 32bit arch, may have an unused padding long */
5664b0715f0SLars Ellenberg 	if (BITS_PER_LONG == 32 && (last_word & 1) == 0)
5674b0715f0SLars Ellenberg 		p_addr[last_word+1] = 0;
5684b0715f0SLars Ellenberg 	__bm_unmap(p_addr, KM_USER0);
569b411b363SPhilipp Reisner 	return bits;
570b411b363SPhilipp Reisner }
571b411b363SPhilipp Reisner 
572b411b363SPhilipp Reisner /* offset and len in long words.*/
573b411b363SPhilipp Reisner static void bm_memset(struct drbd_bitmap *b, size_t offset, int c, size_t len)
574b411b363SPhilipp Reisner {
575b411b363SPhilipp Reisner 	unsigned long *p_addr, *bm;
57619f843aaSLars Ellenberg 	unsigned int idx;
577b411b363SPhilipp Reisner 	size_t do_now, end;
578b411b363SPhilipp Reisner 
579b411b363SPhilipp Reisner 	end = offset + len;
580b411b363SPhilipp Reisner 
581b411b363SPhilipp Reisner 	if (end > b->bm_words) {
582b411b363SPhilipp Reisner 		printk(KERN_ALERT "drbd: bm_memset end > bm_words\n");
583b411b363SPhilipp Reisner 		return;
584b411b363SPhilipp Reisner 	}
585b411b363SPhilipp Reisner 
586b411b363SPhilipp Reisner 	while (offset < end) {
587b411b363SPhilipp Reisner 		do_now = min_t(size_t, ALIGN(offset + 1, LWPP), end) - offset;
58819f843aaSLars Ellenberg 		idx = bm_word_to_page_idx(b, offset);
58919f843aaSLars Ellenberg 		p_addr = bm_map_pidx(b, idx);
590b411b363SPhilipp Reisner 		bm = p_addr + MLPP(offset);
591b411b363SPhilipp Reisner 		if (bm+do_now > p_addr + LWPP) {
592b411b363SPhilipp Reisner 			printk(KERN_ALERT "drbd: BUG BUG BUG! p_addr:%p bm:%p do_now:%d\n",
593b411b363SPhilipp Reisner 			       p_addr, bm, (int)do_now);
59484e7c0f7SLars Ellenberg 		} else
595b411b363SPhilipp Reisner 			memset(bm, c, do_now * sizeof(long));
596b411b363SPhilipp Reisner 		bm_unmap(p_addr);
59719f843aaSLars Ellenberg 		bm_set_page_need_writeout(b->bm_pages[idx]);
598b411b363SPhilipp Reisner 		offset += do_now;
599b411b363SPhilipp Reisner 	}
600b411b363SPhilipp Reisner }
601b411b363SPhilipp Reisner 
602b411b363SPhilipp Reisner /*
603b411b363SPhilipp Reisner  * make sure the bitmap has enough room for the attached storage,
604b411b363SPhilipp Reisner  * if necessary, resize.
605b411b363SPhilipp Reisner  * called whenever we may have changed the device size.
606b411b363SPhilipp Reisner  * returns -ENOMEM if we could not allocate enough memory, 0 on success.
607b411b363SPhilipp Reisner  * In case this is actually a resize, we copy the old bitmap into the new one.
608b411b363SPhilipp Reisner  * Otherwise, the bitmap is initialized to all bits set.
609b411b363SPhilipp Reisner  */
61002d9a94bSPhilipp Reisner int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits)
611b411b363SPhilipp Reisner {
612b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
6136850c442SLars Ellenberg 	unsigned long bits, words, owords, obits;
614b411b363SPhilipp Reisner 	unsigned long want, have, onpages; /* number of pages */
615b411b363SPhilipp Reisner 	struct page **npages, **opages = NULL;
616b411b363SPhilipp Reisner 	int err = 0, growing;
617b411b363SPhilipp Reisner 	int opages_vmalloced;
618b411b363SPhilipp Reisner 
619b411b363SPhilipp Reisner 	ERR_IF(!b) return -ENOMEM;
620b411b363SPhilipp Reisner 
62120ceb2b2SLars Ellenberg 	drbd_bm_lock(mdev, "resize", BM_LOCKED_MASK);
622b411b363SPhilipp Reisner 
623b411b363SPhilipp Reisner 	dev_info(DEV, "drbd_bm_resize called with capacity == %llu\n",
624b411b363SPhilipp Reisner 			(unsigned long long)capacity);
625b411b363SPhilipp Reisner 
626b411b363SPhilipp Reisner 	if (capacity == b->bm_dev_capacity)
627b411b363SPhilipp Reisner 		goto out;
628b411b363SPhilipp Reisner 
62920ceb2b2SLars Ellenberg 	opages_vmalloced = (BM_P_VMALLOCED & b->bm_flags);
630b411b363SPhilipp Reisner 
631b411b363SPhilipp Reisner 	if (capacity == 0) {
632b411b363SPhilipp Reisner 		spin_lock_irq(&b->bm_lock);
633b411b363SPhilipp Reisner 		opages = b->bm_pages;
634b411b363SPhilipp Reisner 		onpages = b->bm_number_of_pages;
635b411b363SPhilipp Reisner 		owords = b->bm_words;
636b411b363SPhilipp Reisner 		b->bm_pages = NULL;
637b411b363SPhilipp Reisner 		b->bm_number_of_pages =
638b411b363SPhilipp Reisner 		b->bm_set   =
639b411b363SPhilipp Reisner 		b->bm_bits  =
640b411b363SPhilipp Reisner 		b->bm_words =
641b411b363SPhilipp Reisner 		b->bm_dev_capacity = 0;
642b411b363SPhilipp Reisner 		spin_unlock_irq(&b->bm_lock);
643b411b363SPhilipp Reisner 		bm_free_pages(opages, onpages);
644b411b363SPhilipp Reisner 		bm_vk_free(opages, opages_vmalloced);
645b411b363SPhilipp Reisner 		goto out;
646b411b363SPhilipp Reisner 	}
647b411b363SPhilipp Reisner 	bits  = BM_SECT_TO_BIT(ALIGN(capacity, BM_SECT_PER_BIT));
648b411b363SPhilipp Reisner 
649b411b363SPhilipp Reisner 	/* if we would use
650b411b363SPhilipp Reisner 	   words = ALIGN(bits,BITS_PER_LONG) >> LN2_BPL;
651b411b363SPhilipp Reisner 	   a 32bit host could present the wrong number of words
652b411b363SPhilipp Reisner 	   to a 64bit host.
653b411b363SPhilipp Reisner 	*/
654b411b363SPhilipp Reisner 	words = ALIGN(bits, 64) >> LN2_BPL;
655b411b363SPhilipp Reisner 
656b411b363SPhilipp Reisner 	if (get_ldev(mdev)) {
6574b0715f0SLars Ellenberg 		u64 bits_on_disk = ((u64)mdev->ldev->md.md_size_sect-MD_BM_OFFSET) << 12;
658b411b363SPhilipp Reisner 		put_ldev(mdev);
6594b0715f0SLars Ellenberg 		if (bits > bits_on_disk) {
6604b0715f0SLars Ellenberg 			dev_info(DEV, "bits = %lu\n", bits);
6614b0715f0SLars Ellenberg 			dev_info(DEV, "bits_on_disk = %llu\n", bits_on_disk);
6624b0715f0SLars Ellenberg 			err = -ENOSPC;
6634b0715f0SLars Ellenberg 			goto out;
6644b0715f0SLars Ellenberg 		}
665b411b363SPhilipp Reisner 	}
666b411b363SPhilipp Reisner 
6676850c442SLars Ellenberg 	want = ALIGN(words*sizeof(long), PAGE_SIZE) >> PAGE_SHIFT;
668b411b363SPhilipp Reisner 	have = b->bm_number_of_pages;
669b411b363SPhilipp Reisner 	if (want == have) {
670b411b363SPhilipp Reisner 		D_ASSERT(b->bm_pages != NULL);
671b411b363SPhilipp Reisner 		npages = b->bm_pages;
672b411b363SPhilipp Reisner 	} else {
6730cf9d27eSAndreas Gruenbacher 		if (drbd_insert_fault(mdev, DRBD_FAULT_BM_ALLOC))
674b411b363SPhilipp Reisner 			npages = NULL;
675b411b363SPhilipp Reisner 		else
676b411b363SPhilipp Reisner 			npages = bm_realloc_pages(b, want);
677b411b363SPhilipp Reisner 	}
678b411b363SPhilipp Reisner 
679b411b363SPhilipp Reisner 	if (!npages) {
680b411b363SPhilipp Reisner 		err = -ENOMEM;
681b411b363SPhilipp Reisner 		goto out;
682b411b363SPhilipp Reisner 	}
683b411b363SPhilipp Reisner 
684b411b363SPhilipp Reisner 	spin_lock_irq(&b->bm_lock);
685b411b363SPhilipp Reisner 	opages = b->bm_pages;
686b411b363SPhilipp Reisner 	owords = b->bm_words;
687b411b363SPhilipp Reisner 	obits  = b->bm_bits;
688b411b363SPhilipp Reisner 
689b411b363SPhilipp Reisner 	growing = bits > obits;
6905223671bSPhilipp Reisner 	if (opages && growing && set_new_bits)
691b411b363SPhilipp Reisner 		bm_set_surplus(b);
692b411b363SPhilipp Reisner 
693b411b363SPhilipp Reisner 	b->bm_pages = npages;
694b411b363SPhilipp Reisner 	b->bm_number_of_pages = want;
695b411b363SPhilipp Reisner 	b->bm_bits  = bits;
696b411b363SPhilipp Reisner 	b->bm_words = words;
697b411b363SPhilipp Reisner 	b->bm_dev_capacity = capacity;
698b411b363SPhilipp Reisner 
699b411b363SPhilipp Reisner 	if (growing) {
70002d9a94bSPhilipp Reisner 		if (set_new_bits) {
701b411b363SPhilipp Reisner 			bm_memset(b, owords, 0xff, words-owords);
702b411b363SPhilipp Reisner 			b->bm_set += bits - obits;
70302d9a94bSPhilipp Reisner 		} else
70402d9a94bSPhilipp Reisner 			bm_memset(b, owords, 0x00, words-owords);
70502d9a94bSPhilipp Reisner 
706b411b363SPhilipp Reisner 	}
707b411b363SPhilipp Reisner 
708b411b363SPhilipp Reisner 	if (want < have) {
709b411b363SPhilipp Reisner 		/* implicit: (opages != NULL) && (opages != npages) */
710b411b363SPhilipp Reisner 		bm_free_pages(opages + want, have - want);
711b411b363SPhilipp Reisner 	}
712b411b363SPhilipp Reisner 
713b411b363SPhilipp Reisner 	(void)bm_clear_surplus(b);
714b411b363SPhilipp Reisner 
715b411b363SPhilipp Reisner 	spin_unlock_irq(&b->bm_lock);
716b411b363SPhilipp Reisner 	if (opages != npages)
717b411b363SPhilipp Reisner 		bm_vk_free(opages, opages_vmalloced);
718b411b363SPhilipp Reisner 	if (!growing)
719b411b363SPhilipp Reisner 		b->bm_set = bm_count_bits(b);
72019f843aaSLars Ellenberg 	dev_info(DEV, "resync bitmap: bits=%lu words=%lu pages=%lu\n", bits, words, want);
721b411b363SPhilipp Reisner 
722b411b363SPhilipp Reisner  out:
723b411b363SPhilipp Reisner 	drbd_bm_unlock(mdev);
724b411b363SPhilipp Reisner 	return err;
725b411b363SPhilipp Reisner }
726b411b363SPhilipp Reisner 
727b411b363SPhilipp Reisner /* inherently racy:
728b411b363SPhilipp Reisner  * if not protected by other means, return value may be out of date when
729b411b363SPhilipp Reisner  * leaving this function...
730b411b363SPhilipp Reisner  * we still need to lock it, since it is important that this returns
731b411b363SPhilipp Reisner  * bm_set == 0 precisely.
732b411b363SPhilipp Reisner  *
733b411b363SPhilipp Reisner  * maybe bm_set should be atomic_t ?
734b411b363SPhilipp Reisner  */
7350778286aSPhilipp Reisner unsigned long _drbd_bm_total_weight(struct drbd_conf *mdev)
736b411b363SPhilipp Reisner {
737b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
738b411b363SPhilipp Reisner 	unsigned long s;
739b411b363SPhilipp Reisner 	unsigned long flags;
740b411b363SPhilipp Reisner 
741b411b363SPhilipp Reisner 	ERR_IF(!b) return 0;
742b411b363SPhilipp Reisner 	ERR_IF(!b->bm_pages) return 0;
743b411b363SPhilipp Reisner 
744b411b363SPhilipp Reisner 	spin_lock_irqsave(&b->bm_lock, flags);
745b411b363SPhilipp Reisner 	s = b->bm_set;
746b411b363SPhilipp Reisner 	spin_unlock_irqrestore(&b->bm_lock, flags);
747b411b363SPhilipp Reisner 
748b411b363SPhilipp Reisner 	return s;
749b411b363SPhilipp Reisner }
750b411b363SPhilipp Reisner 
751b411b363SPhilipp Reisner unsigned long drbd_bm_total_weight(struct drbd_conf *mdev)
752b411b363SPhilipp Reisner {
753b411b363SPhilipp Reisner 	unsigned long s;
754b411b363SPhilipp Reisner 	/* if I don't have a disk, I don't know about out-of-sync status */
755b411b363SPhilipp Reisner 	if (!get_ldev_if_state(mdev, D_NEGOTIATING))
756b411b363SPhilipp Reisner 		return 0;
757b411b363SPhilipp Reisner 	s = _drbd_bm_total_weight(mdev);
758b411b363SPhilipp Reisner 	put_ldev(mdev);
759b411b363SPhilipp Reisner 	return s;
760b411b363SPhilipp Reisner }
761b411b363SPhilipp Reisner 
762b411b363SPhilipp Reisner size_t drbd_bm_words(struct drbd_conf *mdev)
763b411b363SPhilipp Reisner {
764b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
765b411b363SPhilipp Reisner 	ERR_IF(!b) return 0;
766b411b363SPhilipp Reisner 	ERR_IF(!b->bm_pages) return 0;
767b411b363SPhilipp Reisner 
768b411b363SPhilipp Reisner 	return b->bm_words;
769b411b363SPhilipp Reisner }
770b411b363SPhilipp Reisner 
771b411b363SPhilipp Reisner unsigned long drbd_bm_bits(struct drbd_conf *mdev)
772b411b363SPhilipp Reisner {
773b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
774b411b363SPhilipp Reisner 	ERR_IF(!b) return 0;
775b411b363SPhilipp Reisner 
776b411b363SPhilipp Reisner 	return b->bm_bits;
777b411b363SPhilipp Reisner }
778b411b363SPhilipp Reisner 
779b411b363SPhilipp Reisner /* merge number words from buffer into the bitmap starting at offset.
780b411b363SPhilipp Reisner  * buffer[i] is expected to be little endian unsigned long.
781b411b363SPhilipp Reisner  * bitmap must be locked by drbd_bm_lock.
782b411b363SPhilipp Reisner  * currently only used from receive_bitmap.
783b411b363SPhilipp Reisner  */
784b411b363SPhilipp Reisner void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset, size_t number,
785b411b363SPhilipp Reisner 			unsigned long *buffer)
786b411b363SPhilipp Reisner {
787b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
788b411b363SPhilipp Reisner 	unsigned long *p_addr, *bm;
789b411b363SPhilipp Reisner 	unsigned long word, bits;
79019f843aaSLars Ellenberg 	unsigned int idx;
791b411b363SPhilipp Reisner 	size_t end, do_now;
792b411b363SPhilipp Reisner 
793b411b363SPhilipp Reisner 	end = offset + number;
794b411b363SPhilipp Reisner 
795b411b363SPhilipp Reisner 	ERR_IF(!b) return;
796b411b363SPhilipp Reisner 	ERR_IF(!b->bm_pages) return;
797b411b363SPhilipp Reisner 	if (number == 0)
798b411b363SPhilipp Reisner 		return;
799b411b363SPhilipp Reisner 	WARN_ON(offset >= b->bm_words);
800b411b363SPhilipp Reisner 	WARN_ON(end    >  b->bm_words);
801b411b363SPhilipp Reisner 
802b411b363SPhilipp Reisner 	spin_lock_irq(&b->bm_lock);
803b411b363SPhilipp Reisner 	while (offset < end) {
804b411b363SPhilipp Reisner 		do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset;
80519f843aaSLars Ellenberg 		idx = bm_word_to_page_idx(b, offset);
80619f843aaSLars Ellenberg 		p_addr = bm_map_pidx(b, idx);
807b411b363SPhilipp Reisner 		bm = p_addr + MLPP(offset);
808b411b363SPhilipp Reisner 		offset += do_now;
809b411b363SPhilipp Reisner 		while (do_now--) {
810b411b363SPhilipp Reisner 			bits = hweight_long(*bm);
81195a0f10cSLars Ellenberg 			word = *bm | *buffer++;
812b411b363SPhilipp Reisner 			*bm++ = word;
813b411b363SPhilipp Reisner 			b->bm_set += hweight_long(word) - bits;
814b411b363SPhilipp Reisner 		}
815b411b363SPhilipp Reisner 		bm_unmap(p_addr);
81619f843aaSLars Ellenberg 		bm_set_page_need_writeout(b->bm_pages[idx]);
817b411b363SPhilipp Reisner 	}
818b411b363SPhilipp Reisner 	/* with 32bit <-> 64bit cross-platform connect
819b411b363SPhilipp Reisner 	 * this is only correct for current usage,
820b411b363SPhilipp Reisner 	 * where we _know_ that we are 64 bit aligned,
821b411b363SPhilipp Reisner 	 * and know that this function is used in this way, too...
822b411b363SPhilipp Reisner 	 */
823b411b363SPhilipp Reisner 	if (end == b->bm_words)
824b411b363SPhilipp Reisner 		b->bm_set -= bm_clear_surplus(b);
825b411b363SPhilipp Reisner 	spin_unlock_irq(&b->bm_lock);
826b411b363SPhilipp Reisner }
827b411b363SPhilipp Reisner 
828b411b363SPhilipp Reisner /* copy number words from the bitmap starting at offset into the buffer.
829b411b363SPhilipp Reisner  * buffer[i] will be little endian unsigned long.
830b411b363SPhilipp Reisner  */
831b411b363SPhilipp Reisner void drbd_bm_get_lel(struct drbd_conf *mdev, size_t offset, size_t number,
832b411b363SPhilipp Reisner 		     unsigned long *buffer)
833b411b363SPhilipp Reisner {
834b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
835b411b363SPhilipp Reisner 	unsigned long *p_addr, *bm;
836b411b363SPhilipp Reisner 	size_t end, do_now;
837b411b363SPhilipp Reisner 
838b411b363SPhilipp Reisner 	end = offset + number;
839b411b363SPhilipp Reisner 
840b411b363SPhilipp Reisner 	ERR_IF(!b) return;
841b411b363SPhilipp Reisner 	ERR_IF(!b->bm_pages) return;
842b411b363SPhilipp Reisner 
843b411b363SPhilipp Reisner 	spin_lock_irq(&b->bm_lock);
844b411b363SPhilipp Reisner 	if ((offset >= b->bm_words) ||
845b411b363SPhilipp Reisner 	    (end    >  b->bm_words) ||
846b411b363SPhilipp Reisner 	    (number <= 0))
847b411b363SPhilipp Reisner 		dev_err(DEV, "offset=%lu number=%lu bm_words=%lu\n",
848b411b363SPhilipp Reisner 			(unsigned long)	offset,
849b411b363SPhilipp Reisner 			(unsigned long)	number,
850b411b363SPhilipp Reisner 			(unsigned long) b->bm_words);
851b411b363SPhilipp Reisner 	else {
852b411b363SPhilipp Reisner 		while (offset < end) {
853b411b363SPhilipp Reisner 			do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset;
85419f843aaSLars Ellenberg 			p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, offset));
855b411b363SPhilipp Reisner 			bm = p_addr + MLPP(offset);
856b411b363SPhilipp Reisner 			offset += do_now;
857b411b363SPhilipp Reisner 			while (do_now--)
85895a0f10cSLars Ellenberg 				*buffer++ = *bm++;
859b411b363SPhilipp Reisner 			bm_unmap(p_addr);
860b411b363SPhilipp Reisner 		}
861b411b363SPhilipp Reisner 	}
862b411b363SPhilipp Reisner 	spin_unlock_irq(&b->bm_lock);
863b411b363SPhilipp Reisner }
864b411b363SPhilipp Reisner 
865b411b363SPhilipp Reisner /* set all bits in the bitmap */
866b411b363SPhilipp Reisner void drbd_bm_set_all(struct drbd_conf *mdev)
867b411b363SPhilipp Reisner {
868b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
869b411b363SPhilipp Reisner 	ERR_IF(!b) return;
870b411b363SPhilipp Reisner 	ERR_IF(!b->bm_pages) return;
871b411b363SPhilipp Reisner 
872b411b363SPhilipp Reisner 	spin_lock_irq(&b->bm_lock);
873b411b363SPhilipp Reisner 	bm_memset(b, 0, 0xff, b->bm_words);
874b411b363SPhilipp Reisner 	(void)bm_clear_surplus(b);
875b411b363SPhilipp Reisner 	b->bm_set = b->bm_bits;
876b411b363SPhilipp Reisner 	spin_unlock_irq(&b->bm_lock);
877b411b363SPhilipp Reisner }
878b411b363SPhilipp Reisner 
879b411b363SPhilipp Reisner /* clear all bits in the bitmap */
880b411b363SPhilipp Reisner void drbd_bm_clear_all(struct drbd_conf *mdev)
881b411b363SPhilipp Reisner {
882b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
883b411b363SPhilipp Reisner 	ERR_IF(!b) return;
884b411b363SPhilipp Reisner 	ERR_IF(!b->bm_pages) return;
885b411b363SPhilipp Reisner 
886b411b363SPhilipp Reisner 	spin_lock_irq(&b->bm_lock);
887b411b363SPhilipp Reisner 	bm_memset(b, 0, 0, b->bm_words);
888b411b363SPhilipp Reisner 	b->bm_set = 0;
889b411b363SPhilipp Reisner 	spin_unlock_irq(&b->bm_lock);
890b411b363SPhilipp Reisner }
891b411b363SPhilipp Reisner 
89219f843aaSLars Ellenberg struct bm_aio_ctx {
89319f843aaSLars Ellenberg 	struct drbd_conf *mdev;
89419f843aaSLars Ellenberg 	atomic_t in_flight;
895725a97e4SLars Ellenberg 	struct completion done;
89619f843aaSLars Ellenberg 	unsigned flags;
89719f843aaSLars Ellenberg #define BM_AIO_COPY_PAGES	1
89819f843aaSLars Ellenberg 	int error;
89919f843aaSLars Ellenberg };
90019f843aaSLars Ellenberg 
90119f843aaSLars Ellenberg /* bv_page may be a copy, or may be the original */
902b411b363SPhilipp Reisner static void bm_async_io_complete(struct bio *bio, int error)
903b411b363SPhilipp Reisner {
90419f843aaSLars Ellenberg 	struct bm_aio_ctx *ctx = bio->bi_private;
90519f843aaSLars Ellenberg 	struct drbd_conf *mdev = ctx->mdev;
90619f843aaSLars Ellenberg 	struct drbd_bitmap *b = mdev->bitmap;
90719f843aaSLars Ellenberg 	unsigned int idx = bm_page_to_idx(bio->bi_io_vec[0].bv_page);
908b411b363SPhilipp Reisner 	int uptodate = bio_flagged(bio, BIO_UPTODATE);
909b411b363SPhilipp Reisner 
910b411b363SPhilipp Reisner 
911b411b363SPhilipp Reisner 	/* strange behavior of some lower level drivers...
912b411b363SPhilipp Reisner 	 * fail the request by clearing the uptodate flag,
913b411b363SPhilipp Reisner 	 * but do not return any error?!
914b411b363SPhilipp Reisner 	 * do we want to WARN() on this? */
915b411b363SPhilipp Reisner 	if (!error && !uptodate)
916b411b363SPhilipp Reisner 		error = -EIO;
917b411b363SPhilipp Reisner 
9187648cdfeSLars Ellenberg 	if ((ctx->flags & BM_AIO_COPY_PAGES) == 0 &&
9197648cdfeSLars Ellenberg 	    !bm_test_page_unchanged(b->bm_pages[idx]))
9207648cdfeSLars Ellenberg 		dev_warn(DEV, "bitmap page idx %u changed during IO!\n", idx);
92119f843aaSLars Ellenberg 
922b411b363SPhilipp Reisner 	if (error) {
92319f843aaSLars Ellenberg 		/* ctx error will hold the completed-last non-zero error code,
92419f843aaSLars Ellenberg 		 * in case error codes differ. */
92519f843aaSLars Ellenberg 		ctx->error = error;
92619f843aaSLars Ellenberg 		bm_set_page_io_err(b->bm_pages[idx]);
92719f843aaSLars Ellenberg 		/* Not identical to on disk version of it.
92819f843aaSLars Ellenberg 		 * Is BM_PAGE_IO_ERROR enough? */
92919f843aaSLars Ellenberg 		if (__ratelimit(&drbd_ratelimit_state))
93019f843aaSLars Ellenberg 			dev_err(DEV, "IO ERROR %d on bitmap page idx %u\n",
93119f843aaSLars Ellenberg 					error, idx);
93219f843aaSLars Ellenberg 	} else {
93319f843aaSLars Ellenberg 		bm_clear_page_io_err(b->bm_pages[idx]);
93419f843aaSLars Ellenberg 		dynamic_dev_dbg(DEV, "bitmap page idx %u completed\n", idx);
935b411b363SPhilipp Reisner 	}
93619f843aaSLars Ellenberg 
93719f843aaSLars Ellenberg 	bm_page_unlock_io(mdev, idx);
93819f843aaSLars Ellenberg 
93919f843aaSLars Ellenberg 	/* FIXME give back to page pool */
94019f843aaSLars Ellenberg 	if (ctx->flags & BM_AIO_COPY_PAGES)
94119f843aaSLars Ellenberg 		put_page(bio->bi_io_vec[0].bv_page);
942b411b363SPhilipp Reisner 
943b411b363SPhilipp Reisner 	bio_put(bio);
94419f843aaSLars Ellenberg 
94519f843aaSLars Ellenberg 	if (atomic_dec_and_test(&ctx->in_flight))
946725a97e4SLars Ellenberg 		complete(&ctx->done);
947b411b363SPhilipp Reisner }
948b411b363SPhilipp Reisner 
94919f843aaSLars Ellenberg static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must_hold(local)
950b411b363SPhilipp Reisner {
951b411b363SPhilipp Reisner 	/* we are process context. we always get a bio */
952b411b363SPhilipp Reisner 	struct bio *bio = bio_alloc(GFP_KERNEL, 1);
95319f843aaSLars Ellenberg 	struct drbd_conf *mdev = ctx->mdev;
95419f843aaSLars Ellenberg 	struct drbd_bitmap *b = mdev->bitmap;
95519f843aaSLars Ellenberg 	struct page *page;
956b411b363SPhilipp Reisner 	unsigned int len;
95719f843aaSLars Ellenberg 
958b411b363SPhilipp Reisner 	sector_t on_disk_sector =
959b411b363SPhilipp Reisner 		mdev->ldev->md.md_offset + mdev->ldev->md.bm_offset;
960b411b363SPhilipp Reisner 	on_disk_sector += ((sector_t)page_nr) << (PAGE_SHIFT-9);
961b411b363SPhilipp Reisner 
962b411b363SPhilipp Reisner 	/* this might happen with very small
96319f843aaSLars Ellenberg 	 * flexible external meta data device,
96419f843aaSLars Ellenberg 	 * or with PAGE_SIZE > 4k */
965b411b363SPhilipp Reisner 	len = min_t(unsigned int, PAGE_SIZE,
966b411b363SPhilipp Reisner 		(drbd_md_last_sector(mdev->ldev) - on_disk_sector + 1)<<9);
967b411b363SPhilipp Reisner 
96819f843aaSLars Ellenberg 	/* serialize IO on this page */
96919f843aaSLars Ellenberg 	bm_page_lock_io(mdev, page_nr);
97019f843aaSLars Ellenberg 	/* before memcpy and submit,
97119f843aaSLars Ellenberg 	 * so it can be redirtied any time */
97219f843aaSLars Ellenberg 	bm_set_page_unchanged(b->bm_pages[page_nr]);
97319f843aaSLars Ellenberg 
97419f843aaSLars Ellenberg 	if (ctx->flags & BM_AIO_COPY_PAGES) {
97519f843aaSLars Ellenberg 		/* FIXME alloc_page is good enough for now, but actually needs
97619f843aaSLars Ellenberg 		 * to use pre-allocated page pool */
97719f843aaSLars Ellenberg 		void *src, *dest;
97819f843aaSLars Ellenberg 		page = alloc_page(__GFP_HIGHMEM|__GFP_WAIT);
97919f843aaSLars Ellenberg 		dest = kmap_atomic(page, KM_USER0);
98019f843aaSLars Ellenberg 		src = kmap_atomic(b->bm_pages[page_nr], KM_USER1);
98119f843aaSLars Ellenberg 		memcpy(dest, src, PAGE_SIZE);
98219f843aaSLars Ellenberg 		kunmap_atomic(src, KM_USER1);
98319f843aaSLars Ellenberg 		kunmap_atomic(dest, KM_USER0);
98419f843aaSLars Ellenberg 		bm_store_page_idx(page, page_nr);
98519f843aaSLars Ellenberg 	} else
98619f843aaSLars Ellenberg 		page = b->bm_pages[page_nr];
98719f843aaSLars Ellenberg 
988b411b363SPhilipp Reisner 	bio->bi_bdev = mdev->ldev->md_bdev;
989b411b363SPhilipp Reisner 	bio->bi_sector = on_disk_sector;
99019f843aaSLars Ellenberg 	bio_add_page(bio, page, len, 0);
99119f843aaSLars Ellenberg 	bio->bi_private = ctx;
992b411b363SPhilipp Reisner 	bio->bi_end_io = bm_async_io_complete;
993b411b363SPhilipp Reisner 
9940cf9d27eSAndreas Gruenbacher 	if (drbd_insert_fault(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) {
995b411b363SPhilipp Reisner 		bio->bi_rw |= rw;
996b411b363SPhilipp Reisner 		bio_endio(bio, -EIO);
997b411b363SPhilipp Reisner 	} else {
998b411b363SPhilipp Reisner 		submit_bio(rw, bio);
999b411b363SPhilipp Reisner 	}
1000b411b363SPhilipp Reisner }
1001b411b363SPhilipp Reisner 
1002b411b363SPhilipp Reisner /*
1003b411b363SPhilipp Reisner  * bm_rw: read/write the whole bitmap from/to its on disk location.
1004b411b363SPhilipp Reisner  */
100519f843aaSLars Ellenberg static int bm_rw(struct drbd_conf *mdev, int rw, unsigned lazy_writeout_upper_idx) __must_hold(local)
1006b411b363SPhilipp Reisner {
1007725a97e4SLars Ellenberg 	struct bm_aio_ctx ctx = {
1008725a97e4SLars Ellenberg 		.mdev = mdev,
1009725a97e4SLars Ellenberg 		.in_flight = ATOMIC_INIT(1),
1010725a97e4SLars Ellenberg 		.done = COMPLETION_INITIALIZER_ONSTACK(ctx.done),
1011725a97e4SLars Ellenberg 		.flags = lazy_writeout_upper_idx ? BM_AIO_COPY_PAGES : 0,
1012725a97e4SLars Ellenberg 	};
1013b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
10146850c442SLars Ellenberg 	int num_pages, i, count = 0;
1015b411b363SPhilipp Reisner 	unsigned long now;
1016b411b363SPhilipp Reisner 	char ppb[10];
1017b411b363SPhilipp Reisner 	int err = 0;
1018b411b363SPhilipp Reisner 
101919f843aaSLars Ellenberg 	/*
102019f843aaSLars Ellenberg 	 * We are protected against bitmap disappearing/resizing by holding an
102119f843aaSLars Ellenberg 	 * ldev reference (caller must have called get_ldev()).
102219f843aaSLars Ellenberg 	 * For read/write, we are protected against changes to the bitmap by
102319f843aaSLars Ellenberg 	 * the bitmap lock (see drbd_bitmap_io).
102419f843aaSLars Ellenberg 	 * For lazy writeout, we don't care for ongoing changes to the bitmap,
102519f843aaSLars Ellenberg 	 * as we submit copies of pages anyways.
102619f843aaSLars Ellenberg 	 */
102719f843aaSLars Ellenberg 	if (!ctx.flags)
102820ceb2b2SLars Ellenberg 		WARN_ON(!(BM_LOCKED_MASK & b->bm_flags));
1029b411b363SPhilipp Reisner 
10306850c442SLars Ellenberg 	num_pages = b->bm_number_of_pages;
1031b411b363SPhilipp Reisner 
1032b411b363SPhilipp Reisner 	now = jiffies;
1033b411b363SPhilipp Reisner 
1034b411b363SPhilipp Reisner 	/* let the layers below us try to merge these bios... */
10356850c442SLars Ellenberg 	for (i = 0; i < num_pages; i++) {
103619f843aaSLars Ellenberg 		/* ignore completely unchanged pages */
103719f843aaSLars Ellenberg 		if (lazy_writeout_upper_idx && i == lazy_writeout_upper_idx)
103819f843aaSLars Ellenberg 			break;
103919f843aaSLars Ellenberg 		if (rw & WRITE) {
104019f843aaSLars Ellenberg 			if (bm_test_page_unchanged(b->bm_pages[i])) {
104119f843aaSLars Ellenberg 				dynamic_dev_dbg(DEV, "skipped bm write for idx %u\n", i);
104219f843aaSLars Ellenberg 				continue;
104319f843aaSLars Ellenberg 			}
104419f843aaSLars Ellenberg 			/* during lazy writeout,
104519f843aaSLars Ellenberg 			 * ignore those pages not marked for lazy writeout. */
104619f843aaSLars Ellenberg 			if (lazy_writeout_upper_idx &&
104719f843aaSLars Ellenberg 			    !bm_test_page_lazy_writeout(b->bm_pages[i])) {
104819f843aaSLars Ellenberg 				dynamic_dev_dbg(DEV, "skipped bm lazy write for idx %u\n", i);
104919f843aaSLars Ellenberg 				continue;
105019f843aaSLars Ellenberg 			}
105119f843aaSLars Ellenberg 		}
105219f843aaSLars Ellenberg 		atomic_inc(&ctx.in_flight);
105319f843aaSLars Ellenberg 		bm_page_io_async(&ctx, i, rw);
105419f843aaSLars Ellenberg 		++count;
105519f843aaSLars Ellenberg 		cond_resched();
105619f843aaSLars Ellenberg 	}
1057b411b363SPhilipp Reisner 
1058725a97e4SLars Ellenberg 	/*
1059725a97e4SLars Ellenberg 	 * We initialize ctx.in_flight to one to make sure bm_async_io_complete
1060725a97e4SLars Ellenberg 	 * will not complete() early, and decrement / test it here.  If there
1061725a97e4SLars Ellenberg 	 * are still some bios in flight, we need to wait for them here.
1062725a97e4SLars Ellenberg 	 */
1063725a97e4SLars Ellenberg 	if (!atomic_dec_and_test(&ctx.in_flight))
1064725a97e4SLars Ellenberg 		wait_for_completion(&ctx.done);
106519f843aaSLars Ellenberg 	dev_info(DEV, "bitmap %s of %u pages took %lu jiffies\n",
106619f843aaSLars Ellenberg 			rw == WRITE ? "WRITE" : "READ",
106719f843aaSLars Ellenberg 			count, jiffies - now);
1068b411b363SPhilipp Reisner 
106919f843aaSLars Ellenberg 	if (ctx.error) {
1070b411b363SPhilipp Reisner 		dev_alert(DEV, "we had at least one MD IO ERROR during bitmap IO\n");
107181e84650SAndreas Gruenbacher 		drbd_chk_io_error(mdev, 1, true);
107219f843aaSLars Ellenberg 		err = -EIO; /* ctx.error ? */
1073b411b363SPhilipp Reisner 	}
1074b411b363SPhilipp Reisner 
1075b411b363SPhilipp Reisner 	now = jiffies;
1076b411b363SPhilipp Reisner 	if (rw == WRITE) {
1077b411b363SPhilipp Reisner 		drbd_md_flush(mdev);
1078b411b363SPhilipp Reisner 	} else /* rw == READ */ {
107995a0f10cSLars Ellenberg 		b->bm_set = bm_count_bits(b);
1080b411b363SPhilipp Reisner 		dev_info(DEV, "recounting of set bits took additional %lu jiffies\n",
1081b411b363SPhilipp Reisner 		     jiffies - now);
1082b411b363SPhilipp Reisner 	}
1083b411b363SPhilipp Reisner 	now = b->bm_set;
1084b411b363SPhilipp Reisner 
1085b411b363SPhilipp Reisner 	dev_info(DEV, "%s (%lu bits) marked out-of-sync by on disk bit-map.\n",
1086b411b363SPhilipp Reisner 	     ppsize(ppb, now << (BM_BLOCK_SHIFT-10)), now);
1087b411b363SPhilipp Reisner 
1088b411b363SPhilipp Reisner 	return err;
1089b411b363SPhilipp Reisner }
1090b411b363SPhilipp Reisner 
1091b411b363SPhilipp Reisner /**
1092b411b363SPhilipp Reisner  * drbd_bm_read() - Read the whole bitmap from its on disk location.
1093b411b363SPhilipp Reisner  * @mdev:	DRBD device.
1094b411b363SPhilipp Reisner  */
1095b411b363SPhilipp Reisner int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local)
1096b411b363SPhilipp Reisner {
109719f843aaSLars Ellenberg 	return bm_rw(mdev, READ, 0);
1098b411b363SPhilipp Reisner }
1099b411b363SPhilipp Reisner 
1100b411b363SPhilipp Reisner /**
1101b411b363SPhilipp Reisner  * drbd_bm_write() - Write the whole bitmap to its on disk location.
1102b411b363SPhilipp Reisner  * @mdev:	DRBD device.
110319f843aaSLars Ellenberg  *
110419f843aaSLars Ellenberg  * Will only write pages that have changed since last IO.
1105b411b363SPhilipp Reisner  */
1106b411b363SPhilipp Reisner int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local)
1107b411b363SPhilipp Reisner {
110819f843aaSLars Ellenberg 	return bm_rw(mdev, WRITE, 0);
1109b411b363SPhilipp Reisner }
1110b411b363SPhilipp Reisner 
1111b411b363SPhilipp Reisner /**
111219f843aaSLars Ellenberg  * drbd_bm_lazy_write_out() - Write bitmap pages 0 to @upper_idx-1, if they have changed.
1113b411b363SPhilipp Reisner  * @mdev:	DRBD device.
111419f843aaSLars Ellenberg  * @upper_idx:	0: write all changed pages; +ve: page index to stop scanning for changed pages
1115b411b363SPhilipp Reisner  */
111619f843aaSLars Ellenberg int drbd_bm_write_lazy(struct drbd_conf *mdev, unsigned upper_idx) __must_hold(local)
1117b411b363SPhilipp Reisner {
111819f843aaSLars Ellenberg 	return bm_rw(mdev, WRITE, upper_idx);
1119b411b363SPhilipp Reisner }
112019f843aaSLars Ellenberg 
112119f843aaSLars Ellenberg 
112219f843aaSLars Ellenberg /**
112319f843aaSLars Ellenberg  * drbd_bm_write_page: Writes a PAGE_SIZE aligned piece of bitmap
112419f843aaSLars Ellenberg  * @mdev:	DRBD device.
112519f843aaSLars Ellenberg  * @idx:	bitmap page index
112619f843aaSLars Ellenberg  *
11274b0715f0SLars Ellenberg  * We don't want to special case on logical_block_size of the backend device,
11284b0715f0SLars Ellenberg  * so we submit PAGE_SIZE aligned pieces.
112919f843aaSLars Ellenberg  * Note that on "most" systems, PAGE_SIZE is 4k.
11304b0715f0SLars Ellenberg  *
11314b0715f0SLars Ellenberg  * In case this becomes an issue on systems with larger PAGE_SIZE,
11324b0715f0SLars Ellenberg  * we may want to change this again to write 4k aligned 4k pieces.
113319f843aaSLars Ellenberg  */
113419f843aaSLars Ellenberg int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local)
113519f843aaSLars Ellenberg {
1136725a97e4SLars Ellenberg 	struct bm_aio_ctx ctx = {
1137725a97e4SLars Ellenberg 		.mdev = mdev,
1138725a97e4SLars Ellenberg 		.in_flight = ATOMIC_INIT(1),
1139725a97e4SLars Ellenberg 		.done = COMPLETION_INITIALIZER_ONSTACK(ctx.done),
1140725a97e4SLars Ellenberg 		.flags = BM_AIO_COPY_PAGES,
1141725a97e4SLars Ellenberg 	};
114219f843aaSLars Ellenberg 
114319f843aaSLars Ellenberg 	if (bm_test_page_unchanged(mdev->bitmap->bm_pages[idx])) {
11447648cdfeSLars Ellenberg 		dynamic_dev_dbg(DEV, "skipped bm page write for idx %u\n", idx);
114519f843aaSLars Ellenberg 		return 0;
114619f843aaSLars Ellenberg 	}
114719f843aaSLars Ellenberg 
114819f843aaSLars Ellenberg 	bm_page_io_async(&ctx, idx, WRITE_SYNC);
1149725a97e4SLars Ellenberg 	wait_for_completion(&ctx.done);
115019f843aaSLars Ellenberg 
115119f843aaSLars Ellenberg 	if (ctx.error)
115219f843aaSLars Ellenberg 		drbd_chk_io_error(mdev, 1, true);
115319f843aaSLars Ellenberg 		/* that should force detach, so the in memory bitmap will be
115419f843aaSLars Ellenberg 		 * gone in a moment as well. */
115519f843aaSLars Ellenberg 
1156b411b363SPhilipp Reisner 	mdev->bm_writ_cnt++;
115719f843aaSLars Ellenberg 	return ctx.error;
1158b411b363SPhilipp Reisner }
1159b411b363SPhilipp Reisner 
1160b411b363SPhilipp Reisner /* NOTE
1161b411b363SPhilipp Reisner  * find_first_bit returns int, we return unsigned long.
11624b0715f0SLars Ellenberg  * For this to work on 32bit arch with bitnumbers > (1<<32),
11634b0715f0SLars Ellenberg  * we'd need to return u64, and get a whole lot of other places
11644b0715f0SLars Ellenberg  * fixed where we still use unsigned long.
1165b411b363SPhilipp Reisner  *
1166b411b363SPhilipp Reisner  * this returns a bit number, NOT a sector!
1167b411b363SPhilipp Reisner  */
1168b411b363SPhilipp Reisner static unsigned long __bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo,
1169b411b363SPhilipp Reisner 	const int find_zero_bit, const enum km_type km)
1170b411b363SPhilipp Reisner {
1171b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
1172b411b363SPhilipp Reisner 	unsigned long *p_addr;
11734b0715f0SLars Ellenberg 	unsigned long bit_offset;
11744b0715f0SLars Ellenberg 	unsigned i;
11754b0715f0SLars Ellenberg 
1176b411b363SPhilipp Reisner 
1177b411b363SPhilipp Reisner 	if (bm_fo > b->bm_bits) {
1178b411b363SPhilipp Reisner 		dev_err(DEV, "bm_fo=%lu bm_bits=%lu\n", bm_fo, b->bm_bits);
11794b0715f0SLars Ellenberg 		bm_fo = DRBD_END_OF_BITMAP;
1180b411b363SPhilipp Reisner 	} else {
1181b411b363SPhilipp Reisner 		while (bm_fo < b->bm_bits) {
118219f843aaSLars Ellenberg 			/* bit offset of the first bit in the page */
11834b0715f0SLars Ellenberg 			bit_offset = bm_fo & ~BITS_PER_PAGE_MASK;
118419f843aaSLars Ellenberg 			p_addr = __bm_map_pidx(b, bm_bit_to_page_idx(b, bm_fo), km);
1185b411b363SPhilipp Reisner 
1186b411b363SPhilipp Reisner 			if (find_zero_bit)
11874b0715f0SLars Ellenberg 				i = generic_find_next_zero_le_bit(p_addr,
11884b0715f0SLars Ellenberg 						PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK);
1189b411b363SPhilipp Reisner 			else
11904b0715f0SLars Ellenberg 				i = generic_find_next_le_bit(p_addr,
11914b0715f0SLars Ellenberg 						PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK);
1192b411b363SPhilipp Reisner 
1193b411b363SPhilipp Reisner 			__bm_unmap(p_addr, km);
1194b411b363SPhilipp Reisner 			if (i < PAGE_SIZE*8) {
11954b0715f0SLars Ellenberg 				bm_fo = bit_offset + i;
11964b0715f0SLars Ellenberg 				if (bm_fo >= b->bm_bits)
1197b411b363SPhilipp Reisner 					break;
1198b411b363SPhilipp Reisner 				goto found;
1199b411b363SPhilipp Reisner 			}
1200b411b363SPhilipp Reisner 			bm_fo = bit_offset + PAGE_SIZE*8;
1201b411b363SPhilipp Reisner 		}
12024b0715f0SLars Ellenberg 		bm_fo = DRBD_END_OF_BITMAP;
1203b411b363SPhilipp Reisner 	}
1204b411b363SPhilipp Reisner  found:
12054b0715f0SLars Ellenberg 	return bm_fo;
1206b411b363SPhilipp Reisner }
1207b411b363SPhilipp Reisner 
1208b411b363SPhilipp Reisner static unsigned long bm_find_next(struct drbd_conf *mdev,
1209b411b363SPhilipp Reisner 	unsigned long bm_fo, const int find_zero_bit)
1210b411b363SPhilipp Reisner {
1211b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
12124b0715f0SLars Ellenberg 	unsigned long i = DRBD_END_OF_BITMAP;
1213b411b363SPhilipp Reisner 
1214b411b363SPhilipp Reisner 	ERR_IF(!b) return i;
1215b411b363SPhilipp Reisner 	ERR_IF(!b->bm_pages) return i;
1216b411b363SPhilipp Reisner 
1217b411b363SPhilipp Reisner 	spin_lock_irq(&b->bm_lock);
121820ceb2b2SLars Ellenberg 	if (BM_DONT_TEST & b->bm_flags)
1219b411b363SPhilipp Reisner 		bm_print_lock_info(mdev);
1220b411b363SPhilipp Reisner 
1221b411b363SPhilipp Reisner 	i = __bm_find_next(mdev, bm_fo, find_zero_bit, KM_IRQ1);
1222b411b363SPhilipp Reisner 
1223b411b363SPhilipp Reisner 	spin_unlock_irq(&b->bm_lock);
1224b411b363SPhilipp Reisner 	return i;
1225b411b363SPhilipp Reisner }
1226b411b363SPhilipp Reisner 
1227b411b363SPhilipp Reisner unsigned long drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo)
1228b411b363SPhilipp Reisner {
1229b411b363SPhilipp Reisner 	return bm_find_next(mdev, bm_fo, 0);
1230b411b363SPhilipp Reisner }
1231b411b363SPhilipp Reisner 
1232b411b363SPhilipp Reisner #if 0
1233b411b363SPhilipp Reisner /* not yet needed for anything. */
1234b411b363SPhilipp Reisner unsigned long drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo)
1235b411b363SPhilipp Reisner {
1236b411b363SPhilipp Reisner 	return bm_find_next(mdev, bm_fo, 1);
1237b411b363SPhilipp Reisner }
1238b411b363SPhilipp Reisner #endif
1239b411b363SPhilipp Reisner 
1240b411b363SPhilipp Reisner /* does not spin_lock_irqsave.
1241b411b363SPhilipp Reisner  * you must take drbd_bm_lock() first */
1242b411b363SPhilipp Reisner unsigned long _drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo)
1243b411b363SPhilipp Reisner {
124420ceb2b2SLars Ellenberg 	/* WARN_ON(!(BM_DONT_SET & mdev->b->bm_flags)); */
1245b411b363SPhilipp Reisner 	return __bm_find_next(mdev, bm_fo, 0, KM_USER1);
1246b411b363SPhilipp Reisner }
1247b411b363SPhilipp Reisner 
1248b411b363SPhilipp Reisner unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo)
1249b411b363SPhilipp Reisner {
125020ceb2b2SLars Ellenberg 	/* WARN_ON(!(BM_DONT_SET & mdev->b->bm_flags)); */
1251b411b363SPhilipp Reisner 	return __bm_find_next(mdev, bm_fo, 1, KM_USER1);
1252b411b363SPhilipp Reisner }
1253b411b363SPhilipp Reisner 
1254b411b363SPhilipp Reisner /* returns number of bits actually changed.
1255b411b363SPhilipp Reisner  * for val != 0, we change 0 -> 1, return code positive
1256b411b363SPhilipp Reisner  * for val == 0, we change 1 -> 0, return code negative
1257b411b363SPhilipp Reisner  * wants bitnr, not sector.
1258b411b363SPhilipp Reisner  * expected to be called for only a few bits (e - s about BITS_PER_LONG).
1259b411b363SPhilipp Reisner  * Must hold bitmap lock already. */
1260b4ee79daSPhilipp Reisner static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
1261b411b363SPhilipp Reisner 	unsigned long e, int val, const enum km_type km)
1262b411b363SPhilipp Reisner {
1263b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
1264b411b363SPhilipp Reisner 	unsigned long *p_addr = NULL;
1265b411b363SPhilipp Reisner 	unsigned long bitnr;
126619f843aaSLars Ellenberg 	unsigned int last_page_nr = -1U;
1267b411b363SPhilipp Reisner 	int c = 0;
126819f843aaSLars Ellenberg 	int changed_total = 0;
1269b411b363SPhilipp Reisner 
1270b411b363SPhilipp Reisner 	if (e >= b->bm_bits) {
1271b411b363SPhilipp Reisner 		dev_err(DEV, "ASSERT FAILED: bit_s=%lu bit_e=%lu bm_bits=%lu\n",
1272b411b363SPhilipp Reisner 				s, e, b->bm_bits);
1273b411b363SPhilipp Reisner 		e = b->bm_bits ? b->bm_bits -1 : 0;
1274b411b363SPhilipp Reisner 	}
1275b411b363SPhilipp Reisner 	for (bitnr = s; bitnr <= e; bitnr++) {
127619f843aaSLars Ellenberg 		unsigned int page_nr = bm_bit_to_page_idx(b, bitnr);
1277b411b363SPhilipp Reisner 		if (page_nr != last_page_nr) {
1278b411b363SPhilipp Reisner 			if (p_addr)
1279b411b363SPhilipp Reisner 				__bm_unmap(p_addr, km);
128019f843aaSLars Ellenberg 			if (c < 0)
128119f843aaSLars Ellenberg 				bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]);
128219f843aaSLars Ellenberg 			else if (c > 0)
128319f843aaSLars Ellenberg 				bm_set_page_need_writeout(b->bm_pages[last_page_nr]);
128419f843aaSLars Ellenberg 			changed_total += c;
128519f843aaSLars Ellenberg 			c = 0;
128619f843aaSLars Ellenberg 			p_addr = __bm_map_pidx(b, page_nr, km);
1287b411b363SPhilipp Reisner 			last_page_nr = page_nr;
1288b411b363SPhilipp Reisner 		}
1289b411b363SPhilipp Reisner 		if (val)
12904b0715f0SLars Ellenberg 			c += (0 == generic___test_and_set_le_bit(bitnr & BITS_PER_PAGE_MASK, p_addr));
1291b411b363SPhilipp Reisner 		else
12924b0715f0SLars Ellenberg 			c -= (0 != generic___test_and_clear_le_bit(bitnr & BITS_PER_PAGE_MASK, p_addr));
1293b411b363SPhilipp Reisner 	}
1294b411b363SPhilipp Reisner 	if (p_addr)
1295b411b363SPhilipp Reisner 		__bm_unmap(p_addr, km);
129619f843aaSLars Ellenberg 	if (c < 0)
129719f843aaSLars Ellenberg 		bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]);
129819f843aaSLars Ellenberg 	else if (c > 0)
129919f843aaSLars Ellenberg 		bm_set_page_need_writeout(b->bm_pages[last_page_nr]);
130019f843aaSLars Ellenberg 	changed_total += c;
130119f843aaSLars Ellenberg 	b->bm_set += changed_total;
130219f843aaSLars Ellenberg 	return changed_total;
1303b411b363SPhilipp Reisner }
1304b411b363SPhilipp Reisner 
1305b411b363SPhilipp Reisner /* returns number of bits actually changed.
1306b411b363SPhilipp Reisner  * for val != 0, we change 0 -> 1, return code positive
1307b411b363SPhilipp Reisner  * for val == 0, we change 1 -> 0, return code negative
1308b411b363SPhilipp Reisner  * wants bitnr, not sector */
1309b4ee79daSPhilipp Reisner static int bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
1310b411b363SPhilipp Reisner 	const unsigned long e, int val)
1311b411b363SPhilipp Reisner {
1312b411b363SPhilipp Reisner 	unsigned long flags;
1313b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
1314b411b363SPhilipp Reisner 	int c = 0;
1315b411b363SPhilipp Reisner 
1316b411b363SPhilipp Reisner 	ERR_IF(!b) return 1;
1317b411b363SPhilipp Reisner 	ERR_IF(!b->bm_pages) return 0;
1318b411b363SPhilipp Reisner 
1319b411b363SPhilipp Reisner 	spin_lock_irqsave(&b->bm_lock, flags);
132020ceb2b2SLars Ellenberg 	if ((val ? BM_DONT_SET : BM_DONT_CLEAR) & b->bm_flags)
1321b411b363SPhilipp Reisner 		bm_print_lock_info(mdev);
1322b411b363SPhilipp Reisner 
1323b411b363SPhilipp Reisner 	c = __bm_change_bits_to(mdev, s, e, val, KM_IRQ1);
1324b411b363SPhilipp Reisner 
1325b411b363SPhilipp Reisner 	spin_unlock_irqrestore(&b->bm_lock, flags);
1326b411b363SPhilipp Reisner 	return c;
1327b411b363SPhilipp Reisner }
1328b411b363SPhilipp Reisner 
1329b411b363SPhilipp Reisner /* returns number of bits changed 0 -> 1 */
1330b411b363SPhilipp Reisner int drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e)
1331b411b363SPhilipp Reisner {
1332b411b363SPhilipp Reisner 	return bm_change_bits_to(mdev, s, e, 1);
1333b411b363SPhilipp Reisner }
1334b411b363SPhilipp Reisner 
1335b411b363SPhilipp Reisner /* returns number of bits changed 1 -> 0 */
1336b411b363SPhilipp Reisner int drbd_bm_clear_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e)
1337b411b363SPhilipp Reisner {
1338b411b363SPhilipp Reisner 	return -bm_change_bits_to(mdev, s, e, 0);
1339b411b363SPhilipp Reisner }
1340b411b363SPhilipp Reisner 
1341b411b363SPhilipp Reisner /* sets all bits in full words,
1342b411b363SPhilipp Reisner  * from first_word up to, but not including, last_word */
1343b411b363SPhilipp Reisner static inline void bm_set_full_words_within_one_page(struct drbd_bitmap *b,
1344b411b363SPhilipp Reisner 		int page_nr, int first_word, int last_word)
1345b411b363SPhilipp Reisner {
1346b411b363SPhilipp Reisner 	int i;
1347b411b363SPhilipp Reisner 	int bits;
1348b411b363SPhilipp Reisner 	unsigned long *paddr = kmap_atomic(b->bm_pages[page_nr], KM_USER0);
1349b411b363SPhilipp Reisner 	for (i = first_word; i < last_word; i++) {
1350b411b363SPhilipp Reisner 		bits = hweight_long(paddr[i]);
1351b411b363SPhilipp Reisner 		paddr[i] = ~0UL;
1352b411b363SPhilipp Reisner 		b->bm_set += BITS_PER_LONG - bits;
1353b411b363SPhilipp Reisner 	}
1354b411b363SPhilipp Reisner 	kunmap_atomic(paddr, KM_USER0);
1355b411b363SPhilipp Reisner }
1356b411b363SPhilipp Reisner 
1357b411b363SPhilipp Reisner /* Same thing as drbd_bm_set_bits, but without taking the spin_lock_irqsave.
1358b411b363SPhilipp Reisner  * You must first drbd_bm_lock().
1359b411b363SPhilipp Reisner  * Can be called to set the whole bitmap in one go.
1360b411b363SPhilipp Reisner  * Sets bits from s to e _inclusive_. */
1361b411b363SPhilipp Reisner void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e)
1362b411b363SPhilipp Reisner {
1363b411b363SPhilipp Reisner 	/* First set_bit from the first bit (s)
1364b411b363SPhilipp Reisner 	 * up to the next long boundary (sl),
1365b411b363SPhilipp Reisner 	 * then assign full words up to the last long boundary (el),
1366b411b363SPhilipp Reisner 	 * then set_bit up to and including the last bit (e).
1367b411b363SPhilipp Reisner 	 *
1368b411b363SPhilipp Reisner 	 * Do not use memset, because we must account for changes,
1369b411b363SPhilipp Reisner 	 * so we need to loop over the words with hweight() anyways.
1370b411b363SPhilipp Reisner 	 */
1371b411b363SPhilipp Reisner 	unsigned long sl = ALIGN(s,BITS_PER_LONG);
1372b411b363SPhilipp Reisner 	unsigned long el = (e+1) & ~((unsigned long)BITS_PER_LONG-1);
1373b411b363SPhilipp Reisner 	int first_page;
1374b411b363SPhilipp Reisner 	int last_page;
1375b411b363SPhilipp Reisner 	int page_nr;
1376b411b363SPhilipp Reisner 	int first_word;
1377b411b363SPhilipp Reisner 	int last_word;
1378b411b363SPhilipp Reisner 
1379b411b363SPhilipp Reisner 	if (e - s <= 3*BITS_PER_LONG) {
1380b411b363SPhilipp Reisner 		/* don't bother; el and sl may even be wrong. */
1381b411b363SPhilipp Reisner 		__bm_change_bits_to(mdev, s, e, 1, KM_USER0);
1382b411b363SPhilipp Reisner 		return;
1383b411b363SPhilipp Reisner 	}
1384b411b363SPhilipp Reisner 
1385b411b363SPhilipp Reisner 	/* difference is large enough that we can trust sl and el */
1386b411b363SPhilipp Reisner 
1387b411b363SPhilipp Reisner 	/* bits filling the current long */
1388b411b363SPhilipp Reisner 	if (sl)
1389b411b363SPhilipp Reisner 		__bm_change_bits_to(mdev, s, sl-1, 1, KM_USER0);
1390b411b363SPhilipp Reisner 
1391b411b363SPhilipp Reisner 	first_page = sl >> (3 + PAGE_SHIFT);
1392b411b363SPhilipp Reisner 	last_page = el >> (3 + PAGE_SHIFT);
1393b411b363SPhilipp Reisner 
1394b411b363SPhilipp Reisner 	/* MLPP: modulo longs per page */
1395b411b363SPhilipp Reisner 	/* LWPP: long words per page */
1396b411b363SPhilipp Reisner 	first_word = MLPP(sl >> LN2_BPL);
1397b411b363SPhilipp Reisner 	last_word = LWPP;
1398b411b363SPhilipp Reisner 
1399b411b363SPhilipp Reisner 	/* first and full pages, unless first page == last page */
1400b411b363SPhilipp Reisner 	for (page_nr = first_page; page_nr < last_page; page_nr++) {
1401b411b363SPhilipp Reisner 		bm_set_full_words_within_one_page(mdev->bitmap, page_nr, first_word, last_word);
1402b411b363SPhilipp Reisner 		cond_resched();
1403b411b363SPhilipp Reisner 		first_word = 0;
1404b411b363SPhilipp Reisner 	}
1405b411b363SPhilipp Reisner 
1406b411b363SPhilipp Reisner 	/* last page (respectively only page, for first page == last page) */
1407b411b363SPhilipp Reisner 	last_word = MLPP(el >> LN2_BPL);
1408b411b363SPhilipp Reisner 	bm_set_full_words_within_one_page(mdev->bitmap, last_page, first_word, last_word);
1409b411b363SPhilipp Reisner 
1410b411b363SPhilipp Reisner 	/* possibly trailing bits.
1411b411b363SPhilipp Reisner 	 * example: (e & 63) == 63, el will be e+1.
1412b411b363SPhilipp Reisner 	 * if that even was the very last bit,
1413b411b363SPhilipp Reisner 	 * it would trigger an assert in __bm_change_bits_to()
1414b411b363SPhilipp Reisner 	 */
1415b411b363SPhilipp Reisner 	if (el <= e)
1416b411b363SPhilipp Reisner 		__bm_change_bits_to(mdev, el, e, 1, KM_USER0);
1417b411b363SPhilipp Reisner }
1418b411b363SPhilipp Reisner 
1419b411b363SPhilipp Reisner /* returns bit state
1420b411b363SPhilipp Reisner  * wants bitnr, NOT sector.
1421b411b363SPhilipp Reisner  * inherently racy... area needs to be locked by means of {al,rs}_lru
1422b411b363SPhilipp Reisner  *  1 ... bit set
1423b411b363SPhilipp Reisner  *  0 ... bit not set
1424b411b363SPhilipp Reisner  * -1 ... first out of bounds access, stop testing for bits!
1425b411b363SPhilipp Reisner  */
1426b411b363SPhilipp Reisner int drbd_bm_test_bit(struct drbd_conf *mdev, const unsigned long bitnr)
1427b411b363SPhilipp Reisner {
1428b411b363SPhilipp Reisner 	unsigned long flags;
1429b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
1430b411b363SPhilipp Reisner 	unsigned long *p_addr;
1431b411b363SPhilipp Reisner 	int i;
1432b411b363SPhilipp Reisner 
1433b411b363SPhilipp Reisner 	ERR_IF(!b) return 0;
1434b411b363SPhilipp Reisner 	ERR_IF(!b->bm_pages) return 0;
1435b411b363SPhilipp Reisner 
1436b411b363SPhilipp Reisner 	spin_lock_irqsave(&b->bm_lock, flags);
143720ceb2b2SLars Ellenberg 	if (BM_DONT_TEST & b->bm_flags)
1438b411b363SPhilipp Reisner 		bm_print_lock_info(mdev);
1439b411b363SPhilipp Reisner 	if (bitnr < b->bm_bits) {
144019f843aaSLars Ellenberg 		p_addr = bm_map_pidx(b, bm_bit_to_page_idx(b, bitnr));
14414b0715f0SLars Ellenberg 		i = generic_test_le_bit(bitnr & BITS_PER_PAGE_MASK, p_addr) ? 1 : 0;
1442b411b363SPhilipp Reisner 		bm_unmap(p_addr);
1443b411b363SPhilipp Reisner 	} else if (bitnr == b->bm_bits) {
1444b411b363SPhilipp Reisner 		i = -1;
1445b411b363SPhilipp Reisner 	} else { /* (bitnr > b->bm_bits) */
1446b411b363SPhilipp Reisner 		dev_err(DEV, "bitnr=%lu > bm_bits=%lu\n", bitnr, b->bm_bits);
1447b411b363SPhilipp Reisner 		i = 0;
1448b411b363SPhilipp Reisner 	}
1449b411b363SPhilipp Reisner 
1450b411b363SPhilipp Reisner 	spin_unlock_irqrestore(&b->bm_lock, flags);
1451b411b363SPhilipp Reisner 	return i;
1452b411b363SPhilipp Reisner }
1453b411b363SPhilipp Reisner 
1454b411b363SPhilipp Reisner /* returns number of bits set in the range [s, e] */
1455b411b363SPhilipp Reisner int drbd_bm_count_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e)
1456b411b363SPhilipp Reisner {
1457b411b363SPhilipp Reisner 	unsigned long flags;
1458b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
145919f843aaSLars Ellenberg 	unsigned long *p_addr = NULL;
1460b411b363SPhilipp Reisner 	unsigned long bitnr;
146119f843aaSLars Ellenberg 	unsigned int page_nr = -1U;
1462b411b363SPhilipp Reisner 	int c = 0;
1463b411b363SPhilipp Reisner 
1464b411b363SPhilipp Reisner 	/* If this is called without a bitmap, that is a bug.  But just to be
1465b411b363SPhilipp Reisner 	 * robust in case we screwed up elsewhere, in that case pretend there
1466b411b363SPhilipp Reisner 	 * was one dirty bit in the requested area, so we won't try to do a
1467b411b363SPhilipp Reisner 	 * local read there (no bitmap probably implies no disk) */
1468b411b363SPhilipp Reisner 	ERR_IF(!b) return 1;
1469b411b363SPhilipp Reisner 	ERR_IF(!b->bm_pages) return 1;
1470b411b363SPhilipp Reisner 
1471b411b363SPhilipp Reisner 	spin_lock_irqsave(&b->bm_lock, flags);
147220ceb2b2SLars Ellenberg 	if (BM_DONT_TEST & b->bm_flags)
1473b411b363SPhilipp Reisner 		bm_print_lock_info(mdev);
1474b411b363SPhilipp Reisner 	for (bitnr = s; bitnr <= e; bitnr++) {
147519f843aaSLars Ellenberg 		unsigned int idx = bm_bit_to_page_idx(b, bitnr);
147619f843aaSLars Ellenberg 		if (page_nr != idx) {
147719f843aaSLars Ellenberg 			page_nr = idx;
1478b411b363SPhilipp Reisner 			if (p_addr)
1479b411b363SPhilipp Reisner 				bm_unmap(p_addr);
148019f843aaSLars Ellenberg 			p_addr = bm_map_pidx(b, idx);
1481b411b363SPhilipp Reisner 		}
1482b411b363SPhilipp Reisner 		ERR_IF (bitnr >= b->bm_bits) {
1483b411b363SPhilipp Reisner 			dev_err(DEV, "bitnr=%lu bm_bits=%lu\n", bitnr, b->bm_bits);
1484b411b363SPhilipp Reisner 		} else {
148595a0f10cSLars Ellenberg 			c += (0 != generic_test_le_bit(bitnr - (page_nr << (PAGE_SHIFT+3)), p_addr));
1486b411b363SPhilipp Reisner 		}
1487b411b363SPhilipp Reisner 	}
1488b411b363SPhilipp Reisner 	if (p_addr)
1489b411b363SPhilipp Reisner 		bm_unmap(p_addr);
1490b411b363SPhilipp Reisner 	spin_unlock_irqrestore(&b->bm_lock, flags);
1491b411b363SPhilipp Reisner 	return c;
1492b411b363SPhilipp Reisner }
1493b411b363SPhilipp Reisner 
1494b411b363SPhilipp Reisner 
1495b411b363SPhilipp Reisner /* inherently racy...
1496b411b363SPhilipp Reisner  * return value may be already out-of-date when this function returns.
1497b411b363SPhilipp Reisner  * but the general usage is that this is only use during a cstate when bits are
1498b411b363SPhilipp Reisner  * only cleared, not set, and typically only care for the case when the return
1499b411b363SPhilipp Reisner  * value is zero, or we already "locked" this "bitmap extent" by other means.
1500b411b363SPhilipp Reisner  *
1501b411b363SPhilipp Reisner  * enr is bm-extent number, since we chose to name one sector (512 bytes)
1502b411b363SPhilipp Reisner  * worth of the bitmap a "bitmap extent".
1503b411b363SPhilipp Reisner  *
1504b411b363SPhilipp Reisner  * TODO
1505b411b363SPhilipp Reisner  * I think since we use it like a reference count, we should use the real
1506b411b363SPhilipp Reisner  * reference count of some bitmap extent element from some lru instead...
1507b411b363SPhilipp Reisner  *
1508b411b363SPhilipp Reisner  */
1509b411b363SPhilipp Reisner int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr)
1510b411b363SPhilipp Reisner {
1511b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
1512b411b363SPhilipp Reisner 	int count, s, e;
1513b411b363SPhilipp Reisner 	unsigned long flags;
1514b411b363SPhilipp Reisner 	unsigned long *p_addr, *bm;
1515b411b363SPhilipp Reisner 
1516b411b363SPhilipp Reisner 	ERR_IF(!b) return 0;
1517b411b363SPhilipp Reisner 	ERR_IF(!b->bm_pages) return 0;
1518b411b363SPhilipp Reisner 
1519b411b363SPhilipp Reisner 	spin_lock_irqsave(&b->bm_lock, flags);
152020ceb2b2SLars Ellenberg 	if (BM_DONT_TEST & b->bm_flags)
1521b411b363SPhilipp Reisner 		bm_print_lock_info(mdev);
1522b411b363SPhilipp Reisner 
1523b411b363SPhilipp Reisner 	s = S2W(enr);
1524b411b363SPhilipp Reisner 	e = min((size_t)S2W(enr+1), b->bm_words);
1525b411b363SPhilipp Reisner 	count = 0;
1526b411b363SPhilipp Reisner 	if (s < b->bm_words) {
1527b411b363SPhilipp Reisner 		int n = e-s;
152819f843aaSLars Ellenberg 		p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, s));
1529b411b363SPhilipp Reisner 		bm = p_addr + MLPP(s);
1530b411b363SPhilipp Reisner 		while (n--)
1531b411b363SPhilipp Reisner 			count += hweight_long(*bm++);
1532b411b363SPhilipp Reisner 		bm_unmap(p_addr);
1533b411b363SPhilipp Reisner 	} else {
1534b411b363SPhilipp Reisner 		dev_err(DEV, "start offset (%d) too large in drbd_bm_e_weight\n", s);
1535b411b363SPhilipp Reisner 	}
1536b411b363SPhilipp Reisner 	spin_unlock_irqrestore(&b->bm_lock, flags);
1537b411b363SPhilipp Reisner 	return count;
1538b411b363SPhilipp Reisner }
1539b411b363SPhilipp Reisner 
15404b0715f0SLars Ellenberg /* Set all bits covered by the AL-extent al_enr.
15414b0715f0SLars Ellenberg  * Returns number of bits changed. */
1542b411b363SPhilipp Reisner unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev, unsigned long al_enr)
1543b411b363SPhilipp Reisner {
1544b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
1545b411b363SPhilipp Reisner 	unsigned long *p_addr, *bm;
1546b411b363SPhilipp Reisner 	unsigned long weight;
15474b0715f0SLars Ellenberg 	unsigned long s, e;
15484b0715f0SLars Ellenberg 	int count, i, do_now;
1549b411b363SPhilipp Reisner 	ERR_IF(!b) return 0;
1550b411b363SPhilipp Reisner 	ERR_IF(!b->bm_pages) return 0;
1551b411b363SPhilipp Reisner 
1552b411b363SPhilipp Reisner 	spin_lock_irq(&b->bm_lock);
155320ceb2b2SLars Ellenberg 	if (BM_DONT_SET & b->bm_flags)
1554b411b363SPhilipp Reisner 		bm_print_lock_info(mdev);
1555b411b363SPhilipp Reisner 	weight = b->bm_set;
1556b411b363SPhilipp Reisner 
1557b411b363SPhilipp Reisner 	s = al_enr * BM_WORDS_PER_AL_EXT;
1558b411b363SPhilipp Reisner 	e = min_t(size_t, s + BM_WORDS_PER_AL_EXT, b->bm_words);
1559b411b363SPhilipp Reisner 	/* assert that s and e are on the same page */
1560b411b363SPhilipp Reisner 	D_ASSERT((e-1) >> (PAGE_SHIFT - LN2_BPL + 3)
1561b411b363SPhilipp Reisner 	      ==  s    >> (PAGE_SHIFT - LN2_BPL + 3));
1562b411b363SPhilipp Reisner 	count = 0;
1563b411b363SPhilipp Reisner 	if (s < b->bm_words) {
1564b411b363SPhilipp Reisner 		i = do_now = e-s;
156519f843aaSLars Ellenberg 		p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, s));
1566b411b363SPhilipp Reisner 		bm = p_addr + MLPP(s);
1567b411b363SPhilipp Reisner 		while (i--) {
1568b411b363SPhilipp Reisner 			count += hweight_long(*bm);
1569b411b363SPhilipp Reisner 			*bm = -1UL;
1570b411b363SPhilipp Reisner 			bm++;
1571b411b363SPhilipp Reisner 		}
1572b411b363SPhilipp Reisner 		bm_unmap(p_addr);
1573b411b363SPhilipp Reisner 		b->bm_set += do_now*BITS_PER_LONG - count;
1574b411b363SPhilipp Reisner 		if (e == b->bm_words)
1575b411b363SPhilipp Reisner 			b->bm_set -= bm_clear_surplus(b);
1576b411b363SPhilipp Reisner 	} else {
15774b0715f0SLars Ellenberg 		dev_err(DEV, "start offset (%lu) too large in drbd_bm_ALe_set_all\n", s);
1578b411b363SPhilipp Reisner 	}
1579b411b363SPhilipp Reisner 	weight = b->bm_set - weight;
1580b411b363SPhilipp Reisner 	spin_unlock_irq(&b->bm_lock);
1581b411b363SPhilipp Reisner 	return weight;
1582b411b363SPhilipp Reisner }
1583