xref: /openbmc/linux/drivers/block/drbd/drbd_bitmap.c (revision 4b0715f09655e76ca24c35a9e25e7c464c2f7346)
1b411b363SPhilipp Reisner /*
2b411b363SPhilipp Reisner    drbd_bitmap.c
3b411b363SPhilipp Reisner 
4b411b363SPhilipp Reisner    This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5b411b363SPhilipp Reisner 
6b411b363SPhilipp Reisner    Copyright (C) 2004-2008, LINBIT Information Technologies GmbH.
7b411b363SPhilipp Reisner    Copyright (C) 2004-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8b411b363SPhilipp Reisner    Copyright (C) 2004-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9b411b363SPhilipp Reisner 
10b411b363SPhilipp Reisner    drbd is free software; you can redistribute it and/or modify
11b411b363SPhilipp Reisner    it under the terms of the GNU General Public License as published by
12b411b363SPhilipp Reisner    the Free Software Foundation; either version 2, or (at your option)
13b411b363SPhilipp Reisner    any later version.
14b411b363SPhilipp Reisner 
15b411b363SPhilipp Reisner    drbd is distributed in the hope that it will be useful,
16b411b363SPhilipp Reisner    but WITHOUT ANY WARRANTY; without even the implied warranty of
17b411b363SPhilipp Reisner    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18b411b363SPhilipp Reisner    GNU General Public License for more details.
19b411b363SPhilipp Reisner 
20b411b363SPhilipp Reisner    You should have received a copy of the GNU General Public License
21b411b363SPhilipp Reisner    along with drbd; see the file COPYING.  If not, write to
22b411b363SPhilipp Reisner    the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23b411b363SPhilipp Reisner  */
24b411b363SPhilipp Reisner 
25b411b363SPhilipp Reisner #include <linux/bitops.h>
26b411b363SPhilipp Reisner #include <linux/vmalloc.h>
27b411b363SPhilipp Reisner #include <linux/string.h>
28b411b363SPhilipp Reisner #include <linux/drbd.h>
295a0e3ad6STejun Heo #include <linux/slab.h>
30b411b363SPhilipp Reisner #include <asm/kmap_types.h>
31b411b363SPhilipp Reisner #include "drbd_int.h"
32b411b363SPhilipp Reisner 
3395a0f10cSLars Ellenberg 
34b411b363SPhilipp Reisner /* OPAQUE outside this file!
35b411b363SPhilipp Reisner  * interface defined in drbd_int.h
36b411b363SPhilipp Reisner 
37b411b363SPhilipp Reisner  * convention:
38b411b363SPhilipp Reisner  * function name drbd_bm_... => used elsewhere, "public".
39b411b363SPhilipp Reisner  * function name      bm_... => internal to implementation, "private".
40*4b0715f0SLars Ellenberg  */
41b411b363SPhilipp Reisner 
42*4b0715f0SLars Ellenberg 
43*4b0715f0SLars Ellenberg /*
44*4b0715f0SLars Ellenberg  * LIMITATIONS:
45*4b0715f0SLars Ellenberg  * We want to support >= peta byte of backend storage, while for now still using
46*4b0715f0SLars Ellenberg  * a granularity of one bit per 4KiB of storage.
47*4b0715f0SLars Ellenberg  * 1 << 50		bytes backend storage (1 PiB)
48*4b0715f0SLars Ellenberg  * 1 << (50 - 12)	bits needed
49*4b0715f0SLars Ellenberg  *	38 --> we need u64 to index and count bits
50*4b0715f0SLars Ellenberg  * 1 << (38 - 3)	bitmap bytes needed
51*4b0715f0SLars Ellenberg  *	35 --> we still need u64 to index and count bytes
52*4b0715f0SLars Ellenberg  *			(that's 32 GiB of bitmap for 1 PiB storage)
53*4b0715f0SLars Ellenberg  * 1 << (35 - 2)	32bit longs needed
54*4b0715f0SLars Ellenberg  *	33 --> we'd even need u64 to index and count 32bit long words.
55*4b0715f0SLars Ellenberg  * 1 << (35 - 3)	64bit longs needed
56*4b0715f0SLars Ellenberg  *	32 --> we could get away with a 32bit unsigned int to index and count
57*4b0715f0SLars Ellenberg  *	64bit long words, but I rather stay with unsigned long for now.
58*4b0715f0SLars Ellenberg  *	We probably should neither count nor point to bytes or long words
59*4b0715f0SLars Ellenberg  *	directly, but either by bitnumber, or by page index and offset.
60*4b0715f0SLars Ellenberg  * 1 << (35 - 12)
61*4b0715f0SLars Ellenberg  *	22 --> we need that much 4KiB pages of bitmap.
62*4b0715f0SLars Ellenberg  *	1 << (22 + 3) --> on a 64bit arch,
63*4b0715f0SLars Ellenberg  *	we need 32 MiB to store the array of page pointers.
64*4b0715f0SLars Ellenberg  *
65*4b0715f0SLars Ellenberg  * Because I'm lazy, and because the resulting patch was too large, too ugly
66*4b0715f0SLars Ellenberg  * and still incomplete, on 32bit we still "only" support 16 TiB (minus some),
67*4b0715f0SLars Ellenberg  * (1 << 32) bits * 4k storage.
68*4b0715f0SLars Ellenberg  *
69*4b0715f0SLars Ellenberg 
70*4b0715f0SLars Ellenberg  * bitmap storage and IO:
71*4b0715f0SLars Ellenberg  *	Bitmap is stored little endian on disk, and is kept little endian in
72*4b0715f0SLars Ellenberg  *	core memory. Currently we still hold the full bitmap in core as long
73*4b0715f0SLars Ellenberg  *	as we are "attached" to a local disk, which at 32 GiB for 1PiB storage
74*4b0715f0SLars Ellenberg  *	seems excessive.
75*4b0715f0SLars Ellenberg  *
76*4b0715f0SLars Ellenberg  *	We plan to reduce the amount of in-core bitmap pages by pageing them in
77*4b0715f0SLars Ellenberg  *	and out against their on-disk location as necessary, but need to make
78*4b0715f0SLars Ellenberg  *	sure we don't cause too much meta data IO, and must not deadlock in
79*4b0715f0SLars Ellenberg  *	tight memory situations. This needs some more work.
80b411b363SPhilipp Reisner  */
81b411b363SPhilipp Reisner 
82b411b363SPhilipp Reisner /*
83b411b363SPhilipp Reisner  * NOTE
84b411b363SPhilipp Reisner  *  Access to the *bm_pages is protected by bm_lock.
85b411b363SPhilipp Reisner  *  It is safe to read the other members within the lock.
86b411b363SPhilipp Reisner  *
87b411b363SPhilipp Reisner  *  drbd_bm_set_bits is called from bio_endio callbacks,
88b411b363SPhilipp Reisner  *  We may be called with irq already disabled,
89b411b363SPhilipp Reisner  *  so we need spin_lock_irqsave().
90b411b363SPhilipp Reisner  *  And we need the kmap_atomic.
91b411b363SPhilipp Reisner  */
92b411b363SPhilipp Reisner struct drbd_bitmap {
93b411b363SPhilipp Reisner 	struct page **bm_pages;
94b411b363SPhilipp Reisner 	spinlock_t bm_lock;
95*4b0715f0SLars Ellenberg 
96*4b0715f0SLars Ellenberg 	/* see LIMITATIONS: above */
97*4b0715f0SLars Ellenberg 
98b411b363SPhilipp Reisner 	unsigned long bm_set;       /* nr of set bits; THINK maybe atomic_t? */
99b411b363SPhilipp Reisner 	unsigned long bm_bits;
100b411b363SPhilipp Reisner 	size_t   bm_words;
101b411b363SPhilipp Reisner 	size_t   bm_number_of_pages;
102b411b363SPhilipp Reisner 	sector_t bm_dev_capacity;
1038a03ae2aSThomas Gleixner 	struct mutex bm_change; /* serializes resize operations */
104b411b363SPhilipp Reisner 
10519f843aaSLars Ellenberg 	wait_queue_head_t bm_io_wait; /* used to serialize IO of single pages */
106b411b363SPhilipp Reisner 
107b411b363SPhilipp Reisner 	unsigned long  bm_flags;
108b411b363SPhilipp Reisner 
109b411b363SPhilipp Reisner 	/* debugging aid, in case we are still racy somewhere */
110b411b363SPhilipp Reisner 	char          *bm_why;
111b411b363SPhilipp Reisner 	struct task_struct *bm_task;
112b411b363SPhilipp Reisner };
113b411b363SPhilipp Reisner 
114b411b363SPhilipp Reisner /* definition of bits in bm_flags */
115b411b363SPhilipp Reisner #define BM_LOCKED       0
11619f843aaSLars Ellenberg // #define BM_MD_IO_ERROR  1 unused now.
117b411b363SPhilipp Reisner #define BM_P_VMALLOCED  2
118b411b363SPhilipp Reisner 
119b4ee79daSPhilipp Reisner static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
120fd76438cSPhilipp Reisner 			       unsigned long e, int val, const enum km_type km);
121fd76438cSPhilipp Reisner 
122b411b363SPhilipp Reisner static int bm_is_locked(struct drbd_bitmap *b)
123b411b363SPhilipp Reisner {
124b411b363SPhilipp Reisner 	return test_bit(BM_LOCKED, &b->bm_flags);
125b411b363SPhilipp Reisner }
126b411b363SPhilipp Reisner 
127b411b363SPhilipp Reisner #define bm_print_lock_info(m) __bm_print_lock_info(m, __func__)
128b411b363SPhilipp Reisner static void __bm_print_lock_info(struct drbd_conf *mdev, const char *func)
129b411b363SPhilipp Reisner {
130b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
131b411b363SPhilipp Reisner 	if (!__ratelimit(&drbd_ratelimit_state))
132b411b363SPhilipp Reisner 		return;
133b411b363SPhilipp Reisner 	dev_err(DEV, "FIXME %s in %s, bitmap locked for '%s' by %s\n",
134b411b363SPhilipp Reisner 	    current == mdev->receiver.task ? "receiver" :
135b411b363SPhilipp Reisner 	    current == mdev->asender.task  ? "asender"  :
136b411b363SPhilipp Reisner 	    current == mdev->worker.task   ? "worker"   : current->comm,
137b411b363SPhilipp Reisner 	    func, b->bm_why ?: "?",
138b411b363SPhilipp Reisner 	    b->bm_task == mdev->receiver.task ? "receiver" :
139b411b363SPhilipp Reisner 	    b->bm_task == mdev->asender.task  ? "asender"  :
140b411b363SPhilipp Reisner 	    b->bm_task == mdev->worker.task   ? "worker"   : "?");
141b411b363SPhilipp Reisner }
142b411b363SPhilipp Reisner 
143b411b363SPhilipp Reisner void drbd_bm_lock(struct drbd_conf *mdev, char *why)
144b411b363SPhilipp Reisner {
145b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
146b411b363SPhilipp Reisner 	int trylock_failed;
147b411b363SPhilipp Reisner 
148b411b363SPhilipp Reisner 	if (!b) {
149b411b363SPhilipp Reisner 		dev_err(DEV, "FIXME no bitmap in drbd_bm_lock!?\n");
150b411b363SPhilipp Reisner 		return;
151b411b363SPhilipp Reisner 	}
152b411b363SPhilipp Reisner 
1538a03ae2aSThomas Gleixner 	trylock_failed = !mutex_trylock(&b->bm_change);
154b411b363SPhilipp Reisner 
155b411b363SPhilipp Reisner 	if (trylock_failed) {
156b411b363SPhilipp Reisner 		dev_warn(DEV, "%s going to '%s' but bitmap already locked for '%s' by %s\n",
157b411b363SPhilipp Reisner 		    current == mdev->receiver.task ? "receiver" :
158b411b363SPhilipp Reisner 		    current == mdev->asender.task  ? "asender"  :
159b411b363SPhilipp Reisner 		    current == mdev->worker.task   ? "worker"   : current->comm,
160b411b363SPhilipp Reisner 		    why, b->bm_why ?: "?",
161b411b363SPhilipp Reisner 		    b->bm_task == mdev->receiver.task ? "receiver" :
162b411b363SPhilipp Reisner 		    b->bm_task == mdev->asender.task  ? "asender"  :
163b411b363SPhilipp Reisner 		    b->bm_task == mdev->worker.task   ? "worker"   : "?");
1648a03ae2aSThomas Gleixner 		mutex_lock(&b->bm_change);
165b411b363SPhilipp Reisner 	}
166b411b363SPhilipp Reisner 	if (__test_and_set_bit(BM_LOCKED, &b->bm_flags))
167b411b363SPhilipp Reisner 		dev_err(DEV, "FIXME bitmap already locked in bm_lock\n");
168b411b363SPhilipp Reisner 
169b411b363SPhilipp Reisner 	b->bm_why  = why;
170b411b363SPhilipp Reisner 	b->bm_task = current;
171b411b363SPhilipp Reisner }
172b411b363SPhilipp Reisner 
173b411b363SPhilipp Reisner void drbd_bm_unlock(struct drbd_conf *mdev)
174b411b363SPhilipp Reisner {
175b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
176b411b363SPhilipp Reisner 	if (!b) {
177b411b363SPhilipp Reisner 		dev_err(DEV, "FIXME no bitmap in drbd_bm_unlock!?\n");
178b411b363SPhilipp Reisner 		return;
179b411b363SPhilipp Reisner 	}
180b411b363SPhilipp Reisner 
181b411b363SPhilipp Reisner 	if (!__test_and_clear_bit(BM_LOCKED, &mdev->bitmap->bm_flags))
182b411b363SPhilipp Reisner 		dev_err(DEV, "FIXME bitmap not locked in bm_unlock\n");
183b411b363SPhilipp Reisner 
184b411b363SPhilipp Reisner 	b->bm_why  = NULL;
185b411b363SPhilipp Reisner 	b->bm_task = NULL;
1868a03ae2aSThomas Gleixner 	mutex_unlock(&b->bm_change);
187b411b363SPhilipp Reisner }
188b411b363SPhilipp Reisner 
18919f843aaSLars Ellenberg /* we store some "meta" info about our pages in page->private */
19019f843aaSLars Ellenberg /* at a granularity of 4k storage per bitmap bit:
19119f843aaSLars Ellenberg  * one peta byte storage: 1<<50 byte, 1<<38 * 4k storage blocks
19219f843aaSLars Ellenberg  *  1<<38 bits,
19319f843aaSLars Ellenberg  *  1<<23 4k bitmap pages.
19419f843aaSLars Ellenberg  * Use 24 bits as page index, covers 2 peta byte storage
19519f843aaSLars Ellenberg  * at a granularity of 4k per bit.
19619f843aaSLars Ellenberg  * Used to report the failed page idx on io error from the endio handlers.
19719f843aaSLars Ellenberg  */
19819f843aaSLars Ellenberg #define BM_PAGE_IDX_MASK	((1UL<<24)-1)
19919f843aaSLars Ellenberg /* this page is currently read in, or written back */
20019f843aaSLars Ellenberg #define BM_PAGE_IO_LOCK		31
20119f843aaSLars Ellenberg /* if there has been an IO error for this page */
20219f843aaSLars Ellenberg #define BM_PAGE_IO_ERROR	30
20319f843aaSLars Ellenberg /* this is to be able to intelligently skip disk IO,
20419f843aaSLars Ellenberg  * set if bits have been set since last IO. */
20519f843aaSLars Ellenberg #define BM_PAGE_NEED_WRITEOUT	29
20619f843aaSLars Ellenberg /* to mark for lazy writeout once syncer cleared all clearable bits,
20719f843aaSLars Ellenberg  * we if bits have been cleared since last IO. */
20819f843aaSLars Ellenberg #define BM_PAGE_LAZY_WRITEOUT	28
20919f843aaSLars Ellenberg 
21019f843aaSLars Ellenberg /* store_page_idx uses non-atomic assingment. It is only used directly after
21119f843aaSLars Ellenberg  * allocating the page.  All other bm_set_page_* and bm_clear_page_* need to
21219f843aaSLars Ellenberg  * use atomic bit manipulation, as set_out_of_sync (and therefore bitmap
21319f843aaSLars Ellenberg  * changes) may happen from various contexts, and wait_on_bit/wake_up_bit
21419f843aaSLars Ellenberg  * requires it all to be atomic as well. */
21519f843aaSLars Ellenberg static void bm_store_page_idx(struct page *page, unsigned long idx)
21619f843aaSLars Ellenberg {
21719f843aaSLars Ellenberg 	BUG_ON(0 != (idx & ~BM_PAGE_IDX_MASK));
21819f843aaSLars Ellenberg 	page_private(page) |= idx;
21919f843aaSLars Ellenberg }
22019f843aaSLars Ellenberg 
22119f843aaSLars Ellenberg static unsigned long bm_page_to_idx(struct page *page)
22219f843aaSLars Ellenberg {
22319f843aaSLars Ellenberg 	return page_private(page) & BM_PAGE_IDX_MASK;
22419f843aaSLars Ellenberg }
22519f843aaSLars Ellenberg 
22619f843aaSLars Ellenberg /* As is very unlikely that the same page is under IO from more than one
22719f843aaSLars Ellenberg  * context, we can get away with a bit per page and one wait queue per bitmap.
22819f843aaSLars Ellenberg  */
22919f843aaSLars Ellenberg static void bm_page_lock_io(struct drbd_conf *mdev, int page_nr)
23019f843aaSLars Ellenberg {
23119f843aaSLars Ellenberg 	struct drbd_bitmap *b = mdev->bitmap;
23219f843aaSLars Ellenberg 	void *addr = &page_private(b->bm_pages[page_nr]);
23319f843aaSLars Ellenberg 	wait_event(b->bm_io_wait, !test_and_set_bit(BM_PAGE_IO_LOCK, addr));
23419f843aaSLars Ellenberg }
23519f843aaSLars Ellenberg 
23619f843aaSLars Ellenberg static void bm_page_unlock_io(struct drbd_conf *mdev, int page_nr)
23719f843aaSLars Ellenberg {
23819f843aaSLars Ellenberg 	struct drbd_bitmap *b = mdev->bitmap;
23919f843aaSLars Ellenberg 	void *addr = &page_private(b->bm_pages[page_nr]);
24019f843aaSLars Ellenberg 	clear_bit(BM_PAGE_IO_LOCK, addr);
24119f843aaSLars Ellenberg 	smp_mb__after_clear_bit();
24219f843aaSLars Ellenberg 	wake_up(&mdev->bitmap->bm_io_wait);
24319f843aaSLars Ellenberg }
24419f843aaSLars Ellenberg 
24519f843aaSLars Ellenberg /* set _before_ submit_io, so it may be reset due to being changed
24619f843aaSLars Ellenberg  * while this page is in flight... will get submitted later again */
24719f843aaSLars Ellenberg static void bm_set_page_unchanged(struct page *page)
24819f843aaSLars Ellenberg {
24919f843aaSLars Ellenberg 	/* use cmpxchg? */
25019f843aaSLars Ellenberg 	clear_bit(BM_PAGE_NEED_WRITEOUT, &page_private(page));
25119f843aaSLars Ellenberg 	clear_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page));
25219f843aaSLars Ellenberg }
25319f843aaSLars Ellenberg 
25419f843aaSLars Ellenberg static void bm_set_page_need_writeout(struct page *page)
25519f843aaSLars Ellenberg {
25619f843aaSLars Ellenberg 	set_bit(BM_PAGE_NEED_WRITEOUT, &page_private(page));
25719f843aaSLars Ellenberg }
25819f843aaSLars Ellenberg 
25919f843aaSLars Ellenberg static int bm_test_page_unchanged(struct page *page)
26019f843aaSLars Ellenberg {
26119f843aaSLars Ellenberg 	volatile const unsigned long *addr = &page_private(page);
26219f843aaSLars Ellenberg 	return (*addr & ((1UL<<BM_PAGE_NEED_WRITEOUT)|(1UL<<BM_PAGE_LAZY_WRITEOUT))) == 0;
26319f843aaSLars Ellenberg }
26419f843aaSLars Ellenberg 
26519f843aaSLars Ellenberg static void bm_set_page_io_err(struct page *page)
26619f843aaSLars Ellenberg {
26719f843aaSLars Ellenberg 	set_bit(BM_PAGE_IO_ERROR, &page_private(page));
26819f843aaSLars Ellenberg }
26919f843aaSLars Ellenberg 
27019f843aaSLars Ellenberg static void bm_clear_page_io_err(struct page *page)
27119f843aaSLars Ellenberg {
27219f843aaSLars Ellenberg 	clear_bit(BM_PAGE_IO_ERROR, &page_private(page));
27319f843aaSLars Ellenberg }
27419f843aaSLars Ellenberg 
27519f843aaSLars Ellenberg static void bm_set_page_lazy_writeout(struct page *page)
27619f843aaSLars Ellenberg {
27719f843aaSLars Ellenberg 	set_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page));
27819f843aaSLars Ellenberg }
27919f843aaSLars Ellenberg 
28019f843aaSLars Ellenberg static int bm_test_page_lazy_writeout(struct page *page)
28119f843aaSLars Ellenberg {
28219f843aaSLars Ellenberg 	return test_bit(BM_PAGE_LAZY_WRITEOUT, &page_private(page));
28319f843aaSLars Ellenberg }
28419f843aaSLars Ellenberg 
28519f843aaSLars Ellenberg /* on a 32bit box, this would allow for exactly (2<<38) bits. */
28619f843aaSLars Ellenberg static unsigned int bm_word_to_page_idx(struct drbd_bitmap *b, unsigned long long_nr)
28719f843aaSLars Ellenberg {
28819f843aaSLars Ellenberg 	/* page_nr = (word*sizeof(long)) >> PAGE_SHIFT; */
28919f843aaSLars Ellenberg 	unsigned int page_nr = long_nr >> (PAGE_SHIFT - LN2_BPL + 3);
29019f843aaSLars Ellenberg 	BUG_ON(page_nr >= b->bm_number_of_pages);
29119f843aaSLars Ellenberg 	return page_nr;
29219f843aaSLars Ellenberg }
29319f843aaSLars Ellenberg 
29495a0f10cSLars Ellenberg static unsigned int bm_bit_to_page_idx(struct drbd_bitmap *b, u64 bitnr)
29595a0f10cSLars Ellenberg {
29695a0f10cSLars Ellenberg 	/* page_nr = (bitnr/8) >> PAGE_SHIFT; */
29795a0f10cSLars Ellenberg 	unsigned int page_nr = bitnr >> (PAGE_SHIFT + 3);
29895a0f10cSLars Ellenberg 	BUG_ON(page_nr >= b->bm_number_of_pages);
29995a0f10cSLars Ellenberg 	return page_nr;
30095a0f10cSLars Ellenberg }
30195a0f10cSLars Ellenberg 
30295a0f10cSLars Ellenberg static unsigned long *__bm_map_pidx(struct drbd_bitmap *b, unsigned int idx, const enum km_type km)
30395a0f10cSLars Ellenberg {
30495a0f10cSLars Ellenberg 	struct page *page = b->bm_pages[idx];
30595a0f10cSLars Ellenberg 	return (unsigned long *) kmap_atomic(page, km);
30695a0f10cSLars Ellenberg }
30795a0f10cSLars Ellenberg 
30895a0f10cSLars Ellenberg static unsigned long *bm_map_pidx(struct drbd_bitmap *b, unsigned int idx)
30995a0f10cSLars Ellenberg {
31095a0f10cSLars Ellenberg 	return __bm_map_pidx(b, idx, KM_IRQ1);
31195a0f10cSLars Ellenberg }
31295a0f10cSLars Ellenberg 
313b411b363SPhilipp Reisner static void __bm_unmap(unsigned long *p_addr, const enum km_type km)
314b411b363SPhilipp Reisner {
315b411b363SPhilipp Reisner 	kunmap_atomic(p_addr, km);
316b411b363SPhilipp Reisner };
317b411b363SPhilipp Reisner 
318b411b363SPhilipp Reisner static void bm_unmap(unsigned long *p_addr)
319b411b363SPhilipp Reisner {
320b411b363SPhilipp Reisner 	return __bm_unmap(p_addr, KM_IRQ1);
321b411b363SPhilipp Reisner }
322b411b363SPhilipp Reisner 
323b411b363SPhilipp Reisner /* long word offset of _bitmap_ sector */
324b411b363SPhilipp Reisner #define S2W(s)	((s)<<(BM_EXT_SHIFT-BM_BLOCK_SHIFT-LN2_BPL))
325b411b363SPhilipp Reisner /* word offset from start of bitmap to word number _in_page_
326b411b363SPhilipp Reisner  * modulo longs per page
327b411b363SPhilipp Reisner #define MLPP(X) ((X) % (PAGE_SIZE/sizeof(long))
328b411b363SPhilipp Reisner  hm, well, Philipp thinks gcc might not optimze the % into & (... - 1)
329b411b363SPhilipp Reisner  so do it explicitly:
330b411b363SPhilipp Reisner  */
331b411b363SPhilipp Reisner #define MLPP(X) ((X) & ((PAGE_SIZE/sizeof(long))-1))
332b411b363SPhilipp Reisner 
333b411b363SPhilipp Reisner /* Long words per page */
334b411b363SPhilipp Reisner #define LWPP (PAGE_SIZE/sizeof(long))
335b411b363SPhilipp Reisner 
336b411b363SPhilipp Reisner /*
337b411b363SPhilipp Reisner  * actually most functions herein should take a struct drbd_bitmap*, not a
338b411b363SPhilipp Reisner  * struct drbd_conf*, but for the debug macros I like to have the mdev around
339b411b363SPhilipp Reisner  * to be able to report device specific.
340b411b363SPhilipp Reisner  */
341b411b363SPhilipp Reisner 
34219f843aaSLars Ellenberg 
343b411b363SPhilipp Reisner static void bm_free_pages(struct page **pages, unsigned long number)
344b411b363SPhilipp Reisner {
345b411b363SPhilipp Reisner 	unsigned long i;
346b411b363SPhilipp Reisner 	if (!pages)
347b411b363SPhilipp Reisner 		return;
348b411b363SPhilipp Reisner 
349b411b363SPhilipp Reisner 	for (i = 0; i < number; i++) {
350b411b363SPhilipp Reisner 		if (!pages[i]) {
351b411b363SPhilipp Reisner 			printk(KERN_ALERT "drbd: bm_free_pages tried to free "
352b411b363SPhilipp Reisner 					  "a NULL pointer; i=%lu n=%lu\n",
353b411b363SPhilipp Reisner 					  i, number);
354b411b363SPhilipp Reisner 			continue;
355b411b363SPhilipp Reisner 		}
356b411b363SPhilipp Reisner 		__free_page(pages[i]);
357b411b363SPhilipp Reisner 		pages[i] = NULL;
358b411b363SPhilipp Reisner 	}
359b411b363SPhilipp Reisner }
360b411b363SPhilipp Reisner 
361b411b363SPhilipp Reisner static void bm_vk_free(void *ptr, int v)
362b411b363SPhilipp Reisner {
363b411b363SPhilipp Reisner 	if (v)
364b411b363SPhilipp Reisner 		vfree(ptr);
365b411b363SPhilipp Reisner 	else
366b411b363SPhilipp Reisner 		kfree(ptr);
367b411b363SPhilipp Reisner }
368b411b363SPhilipp Reisner 
369b411b363SPhilipp Reisner /*
370b411b363SPhilipp Reisner  * "have" and "want" are NUMBER OF PAGES.
371b411b363SPhilipp Reisner  */
372b411b363SPhilipp Reisner static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
373b411b363SPhilipp Reisner {
374b411b363SPhilipp Reisner 	struct page **old_pages = b->bm_pages;
375b411b363SPhilipp Reisner 	struct page **new_pages, *page;
376b411b363SPhilipp Reisner 	unsigned int i, bytes, vmalloced = 0;
377b411b363SPhilipp Reisner 	unsigned long have = b->bm_number_of_pages;
378b411b363SPhilipp Reisner 
379b411b363SPhilipp Reisner 	BUG_ON(have == 0 && old_pages != NULL);
380b411b363SPhilipp Reisner 	BUG_ON(have != 0 && old_pages == NULL);
381b411b363SPhilipp Reisner 
382b411b363SPhilipp Reisner 	if (have == want)
383b411b363SPhilipp Reisner 		return old_pages;
384b411b363SPhilipp Reisner 
385b411b363SPhilipp Reisner 	/* Trying kmalloc first, falling back to vmalloc.
386b411b363SPhilipp Reisner 	 * GFP_KERNEL is ok, as this is done when a lower level disk is
387b411b363SPhilipp Reisner 	 * "attached" to the drbd.  Context is receiver thread or cqueue
388b411b363SPhilipp Reisner 	 * thread.  As we have no disk yet, we are not in the IO path,
389b411b363SPhilipp Reisner 	 * not even the IO path of the peer. */
390b411b363SPhilipp Reisner 	bytes = sizeof(struct page *)*want;
391b411b363SPhilipp Reisner 	new_pages = kmalloc(bytes, GFP_KERNEL);
392b411b363SPhilipp Reisner 	if (!new_pages) {
393b411b363SPhilipp Reisner 		new_pages = vmalloc(bytes);
394b411b363SPhilipp Reisner 		if (!new_pages)
395b411b363SPhilipp Reisner 			return NULL;
396b411b363SPhilipp Reisner 		vmalloced = 1;
397b411b363SPhilipp Reisner 	}
398b411b363SPhilipp Reisner 
399b411b363SPhilipp Reisner 	memset(new_pages, 0, bytes);
400b411b363SPhilipp Reisner 	if (want >= have) {
401b411b363SPhilipp Reisner 		for (i = 0; i < have; i++)
402b411b363SPhilipp Reisner 			new_pages[i] = old_pages[i];
403b411b363SPhilipp Reisner 		for (; i < want; i++) {
404b411b363SPhilipp Reisner 			page = alloc_page(GFP_HIGHUSER);
405b411b363SPhilipp Reisner 			if (!page) {
406b411b363SPhilipp Reisner 				bm_free_pages(new_pages + have, i - have);
407b411b363SPhilipp Reisner 				bm_vk_free(new_pages, vmalloced);
408b411b363SPhilipp Reisner 				return NULL;
409b411b363SPhilipp Reisner 			}
41019f843aaSLars Ellenberg 			/* we want to know which page it is
41119f843aaSLars Ellenberg 			 * from the endio handlers */
41219f843aaSLars Ellenberg 			bm_store_page_idx(page, i);
413b411b363SPhilipp Reisner 			new_pages[i] = page;
414b411b363SPhilipp Reisner 		}
415b411b363SPhilipp Reisner 	} else {
416b411b363SPhilipp Reisner 		for (i = 0; i < want; i++)
417b411b363SPhilipp Reisner 			new_pages[i] = old_pages[i];
418b411b363SPhilipp Reisner 		/* NOT HERE, we are outside the spinlock!
419b411b363SPhilipp Reisner 		bm_free_pages(old_pages + want, have - want);
420b411b363SPhilipp Reisner 		*/
421b411b363SPhilipp Reisner 	}
422b411b363SPhilipp Reisner 
423b411b363SPhilipp Reisner 	if (vmalloced)
424b411b363SPhilipp Reisner 		set_bit(BM_P_VMALLOCED, &b->bm_flags);
425b411b363SPhilipp Reisner 	else
426b411b363SPhilipp Reisner 		clear_bit(BM_P_VMALLOCED, &b->bm_flags);
427b411b363SPhilipp Reisner 
428b411b363SPhilipp Reisner 	return new_pages;
429b411b363SPhilipp Reisner }
430b411b363SPhilipp Reisner 
431b411b363SPhilipp Reisner /*
432b411b363SPhilipp Reisner  * called on driver init only. TODO call when a device is created.
433b411b363SPhilipp Reisner  * allocates the drbd_bitmap, and stores it in mdev->bitmap.
434b411b363SPhilipp Reisner  */
435b411b363SPhilipp Reisner int drbd_bm_init(struct drbd_conf *mdev)
436b411b363SPhilipp Reisner {
437b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
438b411b363SPhilipp Reisner 	WARN_ON(b != NULL);
439b411b363SPhilipp Reisner 	b = kzalloc(sizeof(struct drbd_bitmap), GFP_KERNEL);
440b411b363SPhilipp Reisner 	if (!b)
441b411b363SPhilipp Reisner 		return -ENOMEM;
442b411b363SPhilipp Reisner 	spin_lock_init(&b->bm_lock);
4438a03ae2aSThomas Gleixner 	mutex_init(&b->bm_change);
444b411b363SPhilipp Reisner 	init_waitqueue_head(&b->bm_io_wait);
445b411b363SPhilipp Reisner 
446b411b363SPhilipp Reisner 	mdev->bitmap = b;
447b411b363SPhilipp Reisner 
448b411b363SPhilipp Reisner 	return 0;
449b411b363SPhilipp Reisner }
450b411b363SPhilipp Reisner 
451b411b363SPhilipp Reisner sector_t drbd_bm_capacity(struct drbd_conf *mdev)
452b411b363SPhilipp Reisner {
453b411b363SPhilipp Reisner 	ERR_IF(!mdev->bitmap) return 0;
454b411b363SPhilipp Reisner 	return mdev->bitmap->bm_dev_capacity;
455b411b363SPhilipp Reisner }
456b411b363SPhilipp Reisner 
457b411b363SPhilipp Reisner /* called on driver unload. TODO: call when a device is destroyed.
458b411b363SPhilipp Reisner  */
459b411b363SPhilipp Reisner void drbd_bm_cleanup(struct drbd_conf *mdev)
460b411b363SPhilipp Reisner {
461b411b363SPhilipp Reisner 	ERR_IF (!mdev->bitmap) return;
462b411b363SPhilipp Reisner 	bm_free_pages(mdev->bitmap->bm_pages, mdev->bitmap->bm_number_of_pages);
463b411b363SPhilipp Reisner 	bm_vk_free(mdev->bitmap->bm_pages, test_bit(BM_P_VMALLOCED, &mdev->bitmap->bm_flags));
464b411b363SPhilipp Reisner 	kfree(mdev->bitmap);
465b411b363SPhilipp Reisner 	mdev->bitmap = NULL;
466b411b363SPhilipp Reisner }
467b411b363SPhilipp Reisner 
468b411b363SPhilipp Reisner /*
469b411b363SPhilipp Reisner  * since (b->bm_bits % BITS_PER_LONG) != 0,
470b411b363SPhilipp Reisner  * this masks out the remaining bits.
471b411b363SPhilipp Reisner  * Returns the number of bits cleared.
472b411b363SPhilipp Reisner  */
47395a0f10cSLars Ellenberg #define BITS_PER_PAGE		(1UL << (PAGE_SHIFT + 3))
47495a0f10cSLars Ellenberg #define BITS_PER_PAGE_MASK	(BITS_PER_PAGE - 1)
47595a0f10cSLars Ellenberg #define BITS_PER_LONG_MASK	(BITS_PER_LONG - 1)
476b411b363SPhilipp Reisner static int bm_clear_surplus(struct drbd_bitmap *b)
477b411b363SPhilipp Reisner {
47895a0f10cSLars Ellenberg 	unsigned long mask;
479b411b363SPhilipp Reisner 	unsigned long *p_addr, *bm;
48095a0f10cSLars Ellenberg 	int tmp;
48195a0f10cSLars Ellenberg 	int cleared = 0;
482b411b363SPhilipp Reisner 
48395a0f10cSLars Ellenberg 	/* number of bits modulo bits per page */
48495a0f10cSLars Ellenberg 	tmp = (b->bm_bits & BITS_PER_PAGE_MASK);
48595a0f10cSLars Ellenberg 	/* mask the used bits of the word containing the last bit */
48695a0f10cSLars Ellenberg 	mask = (1UL << (tmp & BITS_PER_LONG_MASK)) -1;
48795a0f10cSLars Ellenberg 	/* bitmap is always stored little endian,
48895a0f10cSLars Ellenberg 	 * on disk and in core memory alike */
48995a0f10cSLars Ellenberg 	mask = cpu_to_lel(mask);
49095a0f10cSLars Ellenberg 
49195a0f10cSLars Ellenberg 	/* because of the "extra long to catch oob access" we allocate in
49295a0f10cSLars Ellenberg 	 * drbd_bm_resize, bm_number_of_pages -1 is not necessarily the page
49395a0f10cSLars Ellenberg 	 * containing the last _relevant_ bitmap word */
49495a0f10cSLars Ellenberg 	p_addr = bm_map_pidx(b, bm_bit_to_page_idx(b, b->bm_bits - 1));
49595a0f10cSLars Ellenberg 	bm = p_addr + (tmp/BITS_PER_LONG);
49695a0f10cSLars Ellenberg 	if (mask) {
49795a0f10cSLars Ellenberg 		/* If mask != 0, we are not exactly aligned, so bm now points
49895a0f10cSLars Ellenberg 		 * to the long containing the last bit.
49995a0f10cSLars Ellenberg 		 * If mask == 0, bm already points to the word immediately
50095a0f10cSLars Ellenberg 		 * after the last (long word aligned) bit. */
501b411b363SPhilipp Reisner 		cleared = hweight_long(*bm & ~mask);
502b411b363SPhilipp Reisner 		*bm &= mask;
50395a0f10cSLars Ellenberg 		bm++;
504b411b363SPhilipp Reisner 	}
505b411b363SPhilipp Reisner 
50695a0f10cSLars Ellenberg 	if (BITS_PER_LONG == 32 && ((bm - p_addr) & 1) == 1) {
50795a0f10cSLars Ellenberg 		/* on a 32bit arch, we may need to zero out
50895a0f10cSLars Ellenberg 		 * a padding long to align with a 64bit remote */
509b411b363SPhilipp Reisner 		cleared += hweight_long(*bm);
510b411b363SPhilipp Reisner 		*bm = 0;
511b411b363SPhilipp Reisner 	}
512b411b363SPhilipp Reisner 	bm_unmap(p_addr);
513b411b363SPhilipp Reisner 	return cleared;
514b411b363SPhilipp Reisner }
515b411b363SPhilipp Reisner 
516b411b363SPhilipp Reisner static void bm_set_surplus(struct drbd_bitmap *b)
517b411b363SPhilipp Reisner {
51895a0f10cSLars Ellenberg 	unsigned long mask;
519b411b363SPhilipp Reisner 	unsigned long *p_addr, *bm;
52095a0f10cSLars Ellenberg 	int tmp;
521b411b363SPhilipp Reisner 
52295a0f10cSLars Ellenberg 	/* number of bits modulo bits per page */
52395a0f10cSLars Ellenberg 	tmp = (b->bm_bits & BITS_PER_PAGE_MASK);
52495a0f10cSLars Ellenberg 	/* mask the used bits of the word containing the last bit */
52595a0f10cSLars Ellenberg 	mask = (1UL << (tmp & BITS_PER_LONG_MASK)) -1;
52695a0f10cSLars Ellenberg 	/* bitmap is always stored little endian,
52795a0f10cSLars Ellenberg 	 * on disk and in core memory alike */
52895a0f10cSLars Ellenberg 	mask = cpu_to_lel(mask);
52995a0f10cSLars Ellenberg 
53095a0f10cSLars Ellenberg 	/* because of the "extra long to catch oob access" we allocate in
53195a0f10cSLars Ellenberg 	 * drbd_bm_resize, bm_number_of_pages -1 is not necessarily the page
53295a0f10cSLars Ellenberg 	 * containing the last _relevant_ bitmap word */
53395a0f10cSLars Ellenberg 	p_addr = bm_map_pidx(b, bm_bit_to_page_idx(b, b->bm_bits - 1));
53495a0f10cSLars Ellenberg 	bm = p_addr + (tmp/BITS_PER_LONG);
53595a0f10cSLars Ellenberg 	if (mask) {
53695a0f10cSLars Ellenberg 		/* If mask != 0, we are not exactly aligned, so bm now points
53795a0f10cSLars Ellenberg 		 * to the long containing the last bit.
53895a0f10cSLars Ellenberg 		 * If mask == 0, bm already points to the word immediately
53995a0f10cSLars Ellenberg 		 * after the last (long word aligned) bit. */
540b411b363SPhilipp Reisner 		*bm |= ~mask;
54195a0f10cSLars Ellenberg 		bm++;
542b411b363SPhilipp Reisner 	}
543b411b363SPhilipp Reisner 
54495a0f10cSLars Ellenberg 	if (BITS_PER_LONG == 32 && ((bm - p_addr) & 1) == 1) {
54595a0f10cSLars Ellenberg 		/* on a 32bit arch, we may need to zero out
54695a0f10cSLars Ellenberg 		 * a padding long to align with a 64bit remote */
54795a0f10cSLars Ellenberg 		*bm = ~0UL;
548b411b363SPhilipp Reisner 	}
549b411b363SPhilipp Reisner 	bm_unmap(p_addr);
550b411b363SPhilipp Reisner }
551b411b363SPhilipp Reisner 
552*4b0715f0SLars Ellenberg /* you better not modify the bitmap while this is running,
553*4b0715f0SLars Ellenberg  * or its results will be stale */
55495a0f10cSLars Ellenberg static unsigned long bm_count_bits(struct drbd_bitmap *b)
555b411b363SPhilipp Reisner {
556*4b0715f0SLars Ellenberg 	unsigned long *p_addr;
557b411b363SPhilipp Reisner 	unsigned long bits = 0;
558*4b0715f0SLars Ellenberg 	unsigned long mask = (1UL << (b->bm_bits & BITS_PER_LONG_MASK)) -1;
559*4b0715f0SLars Ellenberg 	int idx, last_page, i, last_word;
560b411b363SPhilipp Reisner 
561*4b0715f0SLars Ellenberg 	/* because of the "extra long to catch oob access" we allocate in
562*4b0715f0SLars Ellenberg 	 * drbd_bm_resize, bm_number_of_pages -1 is not necessarily the page
563*4b0715f0SLars Ellenberg 	 * containing the last _relevant_ bitmap word */
564*4b0715f0SLars Ellenberg 	last_page = bm_bit_to_page_idx(b, b->bm_bits-1);
5657777a8baSLars Ellenberg 
566*4b0715f0SLars Ellenberg 	/* all but last page */
567*4b0715f0SLars Ellenberg 	for (idx = 0; idx < last_page; idx++) {
568*4b0715f0SLars Ellenberg 		p_addr = __bm_map_pidx(b, idx, KM_USER0);
569*4b0715f0SLars Ellenberg 		for (i = 0; i < LWPP; i++)
570*4b0715f0SLars Ellenberg 			bits += hweight_long(p_addr[i]);
5717777a8baSLars Ellenberg 		__bm_unmap(p_addr, KM_USER0);
572b411b363SPhilipp Reisner 		cond_resched();
573b411b363SPhilipp Reisner 	}
574*4b0715f0SLars Ellenberg 	/* last (or only) page */
575*4b0715f0SLars Ellenberg 	last_word = ((b->bm_bits - 1) & BITS_PER_PAGE_MASK) >> LN2_BPL;
576*4b0715f0SLars Ellenberg 	p_addr = __bm_map_pidx(b, idx, KM_USER0);
577*4b0715f0SLars Ellenberg 	for (i = 0; i < last_word; i++)
578*4b0715f0SLars Ellenberg 		bits += hweight_long(p_addr[i]);
579*4b0715f0SLars Ellenberg 	p_addr[last_word] &= cpu_to_lel(mask);
580*4b0715f0SLars Ellenberg 	bits += hweight_long(p_addr[last_word]);
581*4b0715f0SLars Ellenberg 	/* 32bit arch, may have an unused padding long */
582*4b0715f0SLars Ellenberg 	if (BITS_PER_LONG == 32 && (last_word & 1) == 0)
583*4b0715f0SLars Ellenberg 		p_addr[last_word+1] = 0;
584*4b0715f0SLars Ellenberg 	__bm_unmap(p_addr, KM_USER0);
585b411b363SPhilipp Reisner 	return bits;
586b411b363SPhilipp Reisner }
587b411b363SPhilipp Reisner 
588b411b363SPhilipp Reisner /* offset and len in long words.*/
589b411b363SPhilipp Reisner static void bm_memset(struct drbd_bitmap *b, size_t offset, int c, size_t len)
590b411b363SPhilipp Reisner {
591b411b363SPhilipp Reisner 	unsigned long *p_addr, *bm;
59219f843aaSLars Ellenberg 	unsigned int idx;
593b411b363SPhilipp Reisner 	size_t do_now, end;
594b411b363SPhilipp Reisner 
595b411b363SPhilipp Reisner 	end = offset + len;
596b411b363SPhilipp Reisner 
597b411b363SPhilipp Reisner 	if (end > b->bm_words) {
598b411b363SPhilipp Reisner 		printk(KERN_ALERT "drbd: bm_memset end > bm_words\n");
599b411b363SPhilipp Reisner 		return;
600b411b363SPhilipp Reisner 	}
601b411b363SPhilipp Reisner 
602b411b363SPhilipp Reisner 	while (offset < end) {
603b411b363SPhilipp Reisner 		do_now = min_t(size_t, ALIGN(offset + 1, LWPP), end) - offset;
60419f843aaSLars Ellenberg 		idx = bm_word_to_page_idx(b, offset);
60519f843aaSLars Ellenberg 		p_addr = bm_map_pidx(b, idx);
606b411b363SPhilipp Reisner 		bm = p_addr + MLPP(offset);
607b411b363SPhilipp Reisner 		if (bm+do_now > p_addr + LWPP) {
608b411b363SPhilipp Reisner 			printk(KERN_ALERT "drbd: BUG BUG BUG! p_addr:%p bm:%p do_now:%d\n",
609b411b363SPhilipp Reisner 			       p_addr, bm, (int)do_now);
610b411b363SPhilipp Reisner 			break; /* breaks to after catch_oob_access_end() only! */
611b411b363SPhilipp Reisner 		}
612b411b363SPhilipp Reisner 		memset(bm, c, do_now * sizeof(long));
613b411b363SPhilipp Reisner 		bm_unmap(p_addr);
61419f843aaSLars Ellenberg 		bm_set_page_need_writeout(b->bm_pages[idx]);
615b411b363SPhilipp Reisner 		offset += do_now;
616b411b363SPhilipp Reisner 	}
617b411b363SPhilipp Reisner }
618b411b363SPhilipp Reisner 
619b411b363SPhilipp Reisner /*
620b411b363SPhilipp Reisner  * make sure the bitmap has enough room for the attached storage,
621b411b363SPhilipp Reisner  * if necessary, resize.
622b411b363SPhilipp Reisner  * called whenever we may have changed the device size.
623b411b363SPhilipp Reisner  * returns -ENOMEM if we could not allocate enough memory, 0 on success.
624b411b363SPhilipp Reisner  * In case this is actually a resize, we copy the old bitmap into the new one.
625b411b363SPhilipp Reisner  * Otherwise, the bitmap is initialized to all bits set.
626b411b363SPhilipp Reisner  */
62702d9a94bSPhilipp Reisner int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits)
628b411b363SPhilipp Reisner {
629b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
630b411b363SPhilipp Reisner 	unsigned long bits, words, owords, obits, *p_addr, *bm;
631b411b363SPhilipp Reisner 	unsigned long want, have, onpages; /* number of pages */
632b411b363SPhilipp Reisner 	struct page **npages, **opages = NULL;
633b411b363SPhilipp Reisner 	int err = 0, growing;
634b411b363SPhilipp Reisner 	int opages_vmalloced;
635b411b363SPhilipp Reisner 
636b411b363SPhilipp Reisner 	ERR_IF(!b) return -ENOMEM;
637b411b363SPhilipp Reisner 
638b411b363SPhilipp Reisner 	drbd_bm_lock(mdev, "resize");
639b411b363SPhilipp Reisner 
640b411b363SPhilipp Reisner 	dev_info(DEV, "drbd_bm_resize called with capacity == %llu\n",
641b411b363SPhilipp Reisner 			(unsigned long long)capacity);
642b411b363SPhilipp Reisner 
643b411b363SPhilipp Reisner 	if (capacity == b->bm_dev_capacity)
644b411b363SPhilipp Reisner 		goto out;
645b411b363SPhilipp Reisner 
646b411b363SPhilipp Reisner 	opages_vmalloced = test_bit(BM_P_VMALLOCED, &b->bm_flags);
647b411b363SPhilipp Reisner 
648b411b363SPhilipp Reisner 	if (capacity == 0) {
649b411b363SPhilipp Reisner 		spin_lock_irq(&b->bm_lock);
650b411b363SPhilipp Reisner 		opages = b->bm_pages;
651b411b363SPhilipp Reisner 		onpages = b->bm_number_of_pages;
652b411b363SPhilipp Reisner 		owords = b->bm_words;
653b411b363SPhilipp Reisner 		b->bm_pages = NULL;
654b411b363SPhilipp Reisner 		b->bm_number_of_pages =
655b411b363SPhilipp Reisner 		b->bm_set   =
656b411b363SPhilipp Reisner 		b->bm_bits  =
657b411b363SPhilipp Reisner 		b->bm_words =
658b411b363SPhilipp Reisner 		b->bm_dev_capacity = 0;
659b411b363SPhilipp Reisner 		spin_unlock_irq(&b->bm_lock);
660b411b363SPhilipp Reisner 		bm_free_pages(opages, onpages);
661b411b363SPhilipp Reisner 		bm_vk_free(opages, opages_vmalloced);
662b411b363SPhilipp Reisner 		goto out;
663b411b363SPhilipp Reisner 	}
664b411b363SPhilipp Reisner 	bits  = BM_SECT_TO_BIT(ALIGN(capacity, BM_SECT_PER_BIT));
665b411b363SPhilipp Reisner 
666b411b363SPhilipp Reisner 	/* if we would use
667b411b363SPhilipp Reisner 	   words = ALIGN(bits,BITS_PER_LONG) >> LN2_BPL;
668b411b363SPhilipp Reisner 	   a 32bit host could present the wrong number of words
669b411b363SPhilipp Reisner 	   to a 64bit host.
670b411b363SPhilipp Reisner 	*/
671b411b363SPhilipp Reisner 	words = ALIGN(bits, 64) >> LN2_BPL;
672b411b363SPhilipp Reisner 
673b411b363SPhilipp Reisner 	if (get_ldev(mdev)) {
674*4b0715f0SLars Ellenberg 		u64 bits_on_disk = ((u64)mdev->ldev->md.md_size_sect-MD_BM_OFFSET) << 12;
675b411b363SPhilipp Reisner 		put_ldev(mdev);
676*4b0715f0SLars Ellenberg 		if (bits > bits_on_disk) {
677*4b0715f0SLars Ellenberg 			dev_info(DEV, "bits = %lu\n", bits);
678*4b0715f0SLars Ellenberg 			dev_info(DEV, "bits_on_disk = %llu\n", bits_on_disk);
679*4b0715f0SLars Ellenberg 			err = -ENOSPC;
680*4b0715f0SLars Ellenberg 			goto out;
681*4b0715f0SLars Ellenberg 		}
682b411b363SPhilipp Reisner 	}
683b411b363SPhilipp Reisner 
684b411b363SPhilipp Reisner 	/* one extra long to catch off by one errors */
685b411b363SPhilipp Reisner 	want = ALIGN((words+1)*sizeof(long), PAGE_SIZE) >> PAGE_SHIFT;
686b411b363SPhilipp Reisner 	have = b->bm_number_of_pages;
687b411b363SPhilipp Reisner 	if (want == have) {
688b411b363SPhilipp Reisner 		D_ASSERT(b->bm_pages != NULL);
689b411b363SPhilipp Reisner 		npages = b->bm_pages;
690b411b363SPhilipp Reisner 	} else {
6910cf9d27eSAndreas Gruenbacher 		if (drbd_insert_fault(mdev, DRBD_FAULT_BM_ALLOC))
692b411b363SPhilipp Reisner 			npages = NULL;
693b411b363SPhilipp Reisner 		else
694b411b363SPhilipp Reisner 			npages = bm_realloc_pages(b, want);
695b411b363SPhilipp Reisner 	}
696b411b363SPhilipp Reisner 
697b411b363SPhilipp Reisner 	if (!npages) {
698b411b363SPhilipp Reisner 		err = -ENOMEM;
699b411b363SPhilipp Reisner 		goto out;
700b411b363SPhilipp Reisner 	}
701b411b363SPhilipp Reisner 
702b411b363SPhilipp Reisner 	spin_lock_irq(&b->bm_lock);
703b411b363SPhilipp Reisner 	opages = b->bm_pages;
704b411b363SPhilipp Reisner 	owords = b->bm_words;
705b411b363SPhilipp Reisner 	obits  = b->bm_bits;
706b411b363SPhilipp Reisner 
707b411b363SPhilipp Reisner 	growing = bits > obits;
7085223671bSPhilipp Reisner 	if (opages && growing && set_new_bits)
709b411b363SPhilipp Reisner 		bm_set_surplus(b);
710b411b363SPhilipp Reisner 
711b411b363SPhilipp Reisner 	b->bm_pages = npages;
712b411b363SPhilipp Reisner 	b->bm_number_of_pages = want;
713b411b363SPhilipp Reisner 	b->bm_bits  = bits;
714b411b363SPhilipp Reisner 	b->bm_words = words;
715b411b363SPhilipp Reisner 	b->bm_dev_capacity = capacity;
716b411b363SPhilipp Reisner 
717b411b363SPhilipp Reisner 	if (growing) {
71802d9a94bSPhilipp Reisner 		if (set_new_bits) {
719b411b363SPhilipp Reisner 			bm_memset(b, owords, 0xff, words-owords);
720b411b363SPhilipp Reisner 			b->bm_set += bits - obits;
72102d9a94bSPhilipp Reisner 		} else
72202d9a94bSPhilipp Reisner 			bm_memset(b, owords, 0x00, words-owords);
72302d9a94bSPhilipp Reisner 
724b411b363SPhilipp Reisner 	}
725b411b363SPhilipp Reisner 
726b411b363SPhilipp Reisner 	if (want < have) {
727b411b363SPhilipp Reisner 		/* implicit: (opages != NULL) && (opages != npages) */
728b411b363SPhilipp Reisner 		bm_free_pages(opages + want, have - want);
729b411b363SPhilipp Reisner 	}
730b411b363SPhilipp Reisner 
73119f843aaSLars Ellenberg 	p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, words));
732b411b363SPhilipp Reisner 	bm = p_addr + MLPP(words);
733b411b363SPhilipp Reisner 	*bm = DRBD_MAGIC;
734b411b363SPhilipp Reisner 	bm_unmap(p_addr);
735b411b363SPhilipp Reisner 
736b411b363SPhilipp Reisner 	(void)bm_clear_surplus(b);
737b411b363SPhilipp Reisner 
738b411b363SPhilipp Reisner 	spin_unlock_irq(&b->bm_lock);
739b411b363SPhilipp Reisner 	if (opages != npages)
740b411b363SPhilipp Reisner 		bm_vk_free(opages, opages_vmalloced);
741b411b363SPhilipp Reisner 	if (!growing)
742b411b363SPhilipp Reisner 		b->bm_set = bm_count_bits(b);
74319f843aaSLars Ellenberg 	dev_info(DEV, "resync bitmap: bits=%lu words=%lu pages=%lu\n", bits, words, want);
744b411b363SPhilipp Reisner 
745b411b363SPhilipp Reisner  out:
746b411b363SPhilipp Reisner 	drbd_bm_unlock(mdev);
747b411b363SPhilipp Reisner 	return err;
748b411b363SPhilipp Reisner }
749b411b363SPhilipp Reisner 
750b411b363SPhilipp Reisner /* inherently racy:
751b411b363SPhilipp Reisner  * if not protected by other means, return value may be out of date when
752b411b363SPhilipp Reisner  * leaving this function...
753b411b363SPhilipp Reisner  * we still need to lock it, since it is important that this returns
754b411b363SPhilipp Reisner  * bm_set == 0 precisely.
755b411b363SPhilipp Reisner  *
756b411b363SPhilipp Reisner  * maybe bm_set should be atomic_t ?
757b411b363SPhilipp Reisner  */
7580778286aSPhilipp Reisner unsigned long _drbd_bm_total_weight(struct drbd_conf *mdev)
759b411b363SPhilipp Reisner {
760b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
761b411b363SPhilipp Reisner 	unsigned long s;
762b411b363SPhilipp Reisner 	unsigned long flags;
763b411b363SPhilipp Reisner 
764b411b363SPhilipp Reisner 	ERR_IF(!b) return 0;
765b411b363SPhilipp Reisner 	ERR_IF(!b->bm_pages) return 0;
766b411b363SPhilipp Reisner 
767b411b363SPhilipp Reisner 	spin_lock_irqsave(&b->bm_lock, flags);
768b411b363SPhilipp Reisner 	s = b->bm_set;
769b411b363SPhilipp Reisner 	spin_unlock_irqrestore(&b->bm_lock, flags);
770b411b363SPhilipp Reisner 
771b411b363SPhilipp Reisner 	return s;
772b411b363SPhilipp Reisner }
773b411b363SPhilipp Reisner 
774b411b363SPhilipp Reisner unsigned long drbd_bm_total_weight(struct drbd_conf *mdev)
775b411b363SPhilipp Reisner {
776b411b363SPhilipp Reisner 	unsigned long s;
777b411b363SPhilipp Reisner 	/* if I don't have a disk, I don't know about out-of-sync status */
778b411b363SPhilipp Reisner 	if (!get_ldev_if_state(mdev, D_NEGOTIATING))
779b411b363SPhilipp Reisner 		return 0;
780b411b363SPhilipp Reisner 	s = _drbd_bm_total_weight(mdev);
781b411b363SPhilipp Reisner 	put_ldev(mdev);
782b411b363SPhilipp Reisner 	return s;
783b411b363SPhilipp Reisner }
784b411b363SPhilipp Reisner 
785b411b363SPhilipp Reisner size_t drbd_bm_words(struct drbd_conf *mdev)
786b411b363SPhilipp Reisner {
787b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
788b411b363SPhilipp Reisner 	ERR_IF(!b) return 0;
789b411b363SPhilipp Reisner 	ERR_IF(!b->bm_pages) return 0;
790b411b363SPhilipp Reisner 
791b411b363SPhilipp Reisner 	return b->bm_words;
792b411b363SPhilipp Reisner }
793b411b363SPhilipp Reisner 
794b411b363SPhilipp Reisner unsigned long drbd_bm_bits(struct drbd_conf *mdev)
795b411b363SPhilipp Reisner {
796b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
797b411b363SPhilipp Reisner 	ERR_IF(!b) return 0;
798b411b363SPhilipp Reisner 
799b411b363SPhilipp Reisner 	return b->bm_bits;
800b411b363SPhilipp Reisner }
801b411b363SPhilipp Reisner 
802b411b363SPhilipp Reisner /* merge number words from buffer into the bitmap starting at offset.
803b411b363SPhilipp Reisner  * buffer[i] is expected to be little endian unsigned long.
804b411b363SPhilipp Reisner  * bitmap must be locked by drbd_bm_lock.
805b411b363SPhilipp Reisner  * currently only used from receive_bitmap.
806b411b363SPhilipp Reisner  */
807b411b363SPhilipp Reisner void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset, size_t number,
808b411b363SPhilipp Reisner 			unsigned long *buffer)
809b411b363SPhilipp Reisner {
810b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
811b411b363SPhilipp Reisner 	unsigned long *p_addr, *bm;
812b411b363SPhilipp Reisner 	unsigned long word, bits;
81319f843aaSLars Ellenberg 	unsigned int idx;
814b411b363SPhilipp Reisner 	size_t end, do_now;
815b411b363SPhilipp Reisner 
816b411b363SPhilipp Reisner 	end = offset + number;
817b411b363SPhilipp Reisner 
818b411b363SPhilipp Reisner 	ERR_IF(!b) return;
819b411b363SPhilipp Reisner 	ERR_IF(!b->bm_pages) return;
820b411b363SPhilipp Reisner 	if (number == 0)
821b411b363SPhilipp Reisner 		return;
822b411b363SPhilipp Reisner 	WARN_ON(offset >= b->bm_words);
823b411b363SPhilipp Reisner 	WARN_ON(end    >  b->bm_words);
824b411b363SPhilipp Reisner 
825b411b363SPhilipp Reisner 	spin_lock_irq(&b->bm_lock);
826b411b363SPhilipp Reisner 	while (offset < end) {
827b411b363SPhilipp Reisner 		do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset;
82819f843aaSLars Ellenberg 		idx = bm_word_to_page_idx(b, offset);
82919f843aaSLars Ellenberg 		p_addr = bm_map_pidx(b, idx);
830b411b363SPhilipp Reisner 		bm = p_addr + MLPP(offset);
831b411b363SPhilipp Reisner 		offset += do_now;
832b411b363SPhilipp Reisner 		while (do_now--) {
833b411b363SPhilipp Reisner 			bits = hweight_long(*bm);
83495a0f10cSLars Ellenberg 			word = *bm | *buffer++;
835b411b363SPhilipp Reisner 			*bm++ = word;
836b411b363SPhilipp Reisner 			b->bm_set += hweight_long(word) - bits;
837b411b363SPhilipp Reisner 		}
838b411b363SPhilipp Reisner 		bm_unmap(p_addr);
83919f843aaSLars Ellenberg 		bm_set_page_need_writeout(b->bm_pages[idx]);
840b411b363SPhilipp Reisner 	}
841b411b363SPhilipp Reisner 	/* with 32bit <-> 64bit cross-platform connect
842b411b363SPhilipp Reisner 	 * this is only correct for current usage,
843b411b363SPhilipp Reisner 	 * where we _know_ that we are 64 bit aligned,
844b411b363SPhilipp Reisner 	 * and know that this function is used in this way, too...
845b411b363SPhilipp Reisner 	 */
846b411b363SPhilipp Reisner 	if (end == b->bm_words)
847b411b363SPhilipp Reisner 		b->bm_set -= bm_clear_surplus(b);
848b411b363SPhilipp Reisner 
849b411b363SPhilipp Reisner 	spin_unlock_irq(&b->bm_lock);
850b411b363SPhilipp Reisner }
851b411b363SPhilipp Reisner 
852b411b363SPhilipp Reisner /* copy number words from the bitmap starting at offset into the buffer.
853b411b363SPhilipp Reisner  * buffer[i] will be little endian unsigned long.
854b411b363SPhilipp Reisner  */
855b411b363SPhilipp Reisner void drbd_bm_get_lel(struct drbd_conf *mdev, size_t offset, size_t number,
856b411b363SPhilipp Reisner 		     unsigned long *buffer)
857b411b363SPhilipp Reisner {
858b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
859b411b363SPhilipp Reisner 	unsigned long *p_addr, *bm;
860b411b363SPhilipp Reisner 	size_t end, do_now;
861b411b363SPhilipp Reisner 
862b411b363SPhilipp Reisner 	end = offset + number;
863b411b363SPhilipp Reisner 
864b411b363SPhilipp Reisner 	ERR_IF(!b) return;
865b411b363SPhilipp Reisner 	ERR_IF(!b->bm_pages) return;
866b411b363SPhilipp Reisner 
867b411b363SPhilipp Reisner 	spin_lock_irq(&b->bm_lock);
868b411b363SPhilipp Reisner 	if ((offset >= b->bm_words) ||
869b411b363SPhilipp Reisner 	    (end    >  b->bm_words) ||
870b411b363SPhilipp Reisner 	    (number <= 0))
871b411b363SPhilipp Reisner 		dev_err(DEV, "offset=%lu number=%lu bm_words=%lu\n",
872b411b363SPhilipp Reisner 			(unsigned long)	offset,
873b411b363SPhilipp Reisner 			(unsigned long)	number,
874b411b363SPhilipp Reisner 			(unsigned long) b->bm_words);
875b411b363SPhilipp Reisner 	else {
876b411b363SPhilipp Reisner 		while (offset < end) {
877b411b363SPhilipp Reisner 			do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset;
87819f843aaSLars Ellenberg 			p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, offset));
879b411b363SPhilipp Reisner 			bm = p_addr + MLPP(offset);
880b411b363SPhilipp Reisner 			offset += do_now;
881b411b363SPhilipp Reisner 			while (do_now--)
88295a0f10cSLars Ellenberg 				*buffer++ = *bm++;
883b411b363SPhilipp Reisner 			bm_unmap(p_addr);
884b411b363SPhilipp Reisner 		}
885b411b363SPhilipp Reisner 	}
886b411b363SPhilipp Reisner 	spin_unlock_irq(&b->bm_lock);
887b411b363SPhilipp Reisner }
888b411b363SPhilipp Reisner 
889b411b363SPhilipp Reisner /* set all bits in the bitmap */
890b411b363SPhilipp Reisner void drbd_bm_set_all(struct drbd_conf *mdev)
891b411b363SPhilipp Reisner {
892b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
893b411b363SPhilipp Reisner 	ERR_IF(!b) return;
894b411b363SPhilipp Reisner 	ERR_IF(!b->bm_pages) return;
895b411b363SPhilipp Reisner 
896b411b363SPhilipp Reisner 	spin_lock_irq(&b->bm_lock);
897b411b363SPhilipp Reisner 	bm_memset(b, 0, 0xff, b->bm_words);
898b411b363SPhilipp Reisner 	(void)bm_clear_surplus(b);
899b411b363SPhilipp Reisner 	b->bm_set = b->bm_bits;
900b411b363SPhilipp Reisner 	spin_unlock_irq(&b->bm_lock);
901b411b363SPhilipp Reisner }
902b411b363SPhilipp Reisner 
903b411b363SPhilipp Reisner /* clear all bits in the bitmap */
904b411b363SPhilipp Reisner void drbd_bm_clear_all(struct drbd_conf *mdev)
905b411b363SPhilipp Reisner {
906b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
907b411b363SPhilipp Reisner 	ERR_IF(!b) return;
908b411b363SPhilipp Reisner 	ERR_IF(!b->bm_pages) return;
909b411b363SPhilipp Reisner 
910b411b363SPhilipp Reisner 	spin_lock_irq(&b->bm_lock);
911b411b363SPhilipp Reisner 	bm_memset(b, 0, 0, b->bm_words);
912b411b363SPhilipp Reisner 	b->bm_set = 0;
913b411b363SPhilipp Reisner 	spin_unlock_irq(&b->bm_lock);
914b411b363SPhilipp Reisner }
915b411b363SPhilipp Reisner 
91619f843aaSLars Ellenberg struct bm_aio_ctx {
91719f843aaSLars Ellenberg 	struct drbd_conf *mdev;
91819f843aaSLars Ellenberg 	atomic_t in_flight;
91919f843aaSLars Ellenberg 	wait_queue_head_t io_wait;
92019f843aaSLars Ellenberg 	unsigned flags;
92119f843aaSLars Ellenberg #define BM_AIO_COPY_PAGES	1
92219f843aaSLars Ellenberg 	int error;
92319f843aaSLars Ellenberg };
92419f843aaSLars Ellenberg 
92519f843aaSLars Ellenberg /* bv_page may be a copy, or may be the original */
926b411b363SPhilipp Reisner static void bm_async_io_complete(struct bio *bio, int error)
927b411b363SPhilipp Reisner {
92819f843aaSLars Ellenberg 	struct bm_aio_ctx *ctx = bio->bi_private;
92919f843aaSLars Ellenberg 	struct drbd_conf *mdev = ctx->mdev;
93019f843aaSLars Ellenberg 	struct drbd_bitmap *b = mdev->bitmap;
93119f843aaSLars Ellenberg 	unsigned int idx = bm_page_to_idx(bio->bi_io_vec[0].bv_page);
932b411b363SPhilipp Reisner 	int uptodate = bio_flagged(bio, BIO_UPTODATE);
933b411b363SPhilipp Reisner 
934b411b363SPhilipp Reisner 
935b411b363SPhilipp Reisner 	/* strange behavior of some lower level drivers...
936b411b363SPhilipp Reisner 	 * fail the request by clearing the uptodate flag,
937b411b363SPhilipp Reisner 	 * but do not return any error?!
938b411b363SPhilipp Reisner 	 * do we want to WARN() on this? */
939b411b363SPhilipp Reisner 	if (!error && !uptodate)
940b411b363SPhilipp Reisner 		error = -EIO;
941b411b363SPhilipp Reisner 
94219f843aaSLars Ellenberg 	if (!bm_test_page_unchanged(b->bm_pages[idx]))
94319f843aaSLars Ellenberg 		dev_info(DEV, "bitmap page idx %u changed during IO!\n", idx);
94419f843aaSLars Ellenberg 
945b411b363SPhilipp Reisner 	if (error) {
94619f843aaSLars Ellenberg 		/* ctx error will hold the completed-last non-zero error code,
94719f843aaSLars Ellenberg 		 * in case error codes differ. */
94819f843aaSLars Ellenberg 		ctx->error = error;
94919f843aaSLars Ellenberg 		bm_set_page_io_err(b->bm_pages[idx]);
95019f843aaSLars Ellenberg 		/* Not identical to on disk version of it.
95119f843aaSLars Ellenberg 		 * Is BM_PAGE_IO_ERROR enough? */
95219f843aaSLars Ellenberg 		if (__ratelimit(&drbd_ratelimit_state))
95319f843aaSLars Ellenberg 			dev_err(DEV, "IO ERROR %d on bitmap page idx %u\n",
95419f843aaSLars Ellenberg 					error, idx);
95519f843aaSLars Ellenberg 	} else {
95619f843aaSLars Ellenberg 		bm_clear_page_io_err(b->bm_pages[idx]);
95719f843aaSLars Ellenberg 		dynamic_dev_dbg(DEV, "bitmap page idx %u completed\n", idx);
958b411b363SPhilipp Reisner 	}
95919f843aaSLars Ellenberg 
96019f843aaSLars Ellenberg 	bm_page_unlock_io(mdev, idx);
96119f843aaSLars Ellenberg 
96219f843aaSLars Ellenberg 	/* FIXME give back to page pool */
96319f843aaSLars Ellenberg 	if (ctx->flags & BM_AIO_COPY_PAGES)
96419f843aaSLars Ellenberg 		put_page(bio->bi_io_vec[0].bv_page);
965b411b363SPhilipp Reisner 
966b411b363SPhilipp Reisner 	bio_put(bio);
96719f843aaSLars Ellenberg 
96819f843aaSLars Ellenberg 	if (atomic_dec_and_test(&ctx->in_flight))
96919f843aaSLars Ellenberg 		wake_up(&ctx->io_wait);
970b411b363SPhilipp Reisner }
971b411b363SPhilipp Reisner 
97219f843aaSLars Ellenberg static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must_hold(local)
973b411b363SPhilipp Reisner {
974b411b363SPhilipp Reisner 	/* we are process context. we always get a bio */
975b411b363SPhilipp Reisner 	struct bio *bio = bio_alloc(GFP_KERNEL, 1);
97619f843aaSLars Ellenberg 	struct drbd_conf *mdev = ctx->mdev;
97719f843aaSLars Ellenberg 	struct drbd_bitmap *b = mdev->bitmap;
97819f843aaSLars Ellenberg 	struct page *page;
979b411b363SPhilipp Reisner 	unsigned int len;
98019f843aaSLars Ellenberg 
981b411b363SPhilipp Reisner 	sector_t on_disk_sector =
982b411b363SPhilipp Reisner 		mdev->ldev->md.md_offset + mdev->ldev->md.bm_offset;
983b411b363SPhilipp Reisner 	on_disk_sector += ((sector_t)page_nr) << (PAGE_SHIFT-9);
984b411b363SPhilipp Reisner 
985b411b363SPhilipp Reisner 	/* this might happen with very small
98619f843aaSLars Ellenberg 	 * flexible external meta data device,
98719f843aaSLars Ellenberg 	 * or with PAGE_SIZE > 4k */
988b411b363SPhilipp Reisner 	len = min_t(unsigned int, PAGE_SIZE,
989b411b363SPhilipp Reisner 		(drbd_md_last_sector(mdev->ldev) - on_disk_sector + 1)<<9);
990b411b363SPhilipp Reisner 
99119f843aaSLars Ellenberg 	/* serialize IO on this page */
99219f843aaSLars Ellenberg 	bm_page_lock_io(mdev, page_nr);
99319f843aaSLars Ellenberg 	/* before memcpy and submit,
99419f843aaSLars Ellenberg 	 * so it can be redirtied any time */
99519f843aaSLars Ellenberg 	bm_set_page_unchanged(b->bm_pages[page_nr]);
99619f843aaSLars Ellenberg 
99719f843aaSLars Ellenberg 	if (ctx->flags & BM_AIO_COPY_PAGES) {
99819f843aaSLars Ellenberg 		/* FIXME alloc_page is good enough for now, but actually needs
99919f843aaSLars Ellenberg 		 * to use pre-allocated page pool */
100019f843aaSLars Ellenberg 		void *src, *dest;
100119f843aaSLars Ellenberg 		page = alloc_page(__GFP_HIGHMEM|__GFP_WAIT);
100219f843aaSLars Ellenberg 		dest = kmap_atomic(page, KM_USER0);
100319f843aaSLars Ellenberg 		src = kmap_atomic(b->bm_pages[page_nr], KM_USER1);
100419f843aaSLars Ellenberg 		memcpy(dest, src, PAGE_SIZE);
100519f843aaSLars Ellenberg 		kunmap_atomic(src, KM_USER1);
100619f843aaSLars Ellenberg 		kunmap_atomic(dest, KM_USER0);
100719f843aaSLars Ellenberg 		bm_store_page_idx(page, page_nr);
100819f843aaSLars Ellenberg 	} else
100919f843aaSLars Ellenberg 		page = b->bm_pages[page_nr];
101019f843aaSLars Ellenberg 
1011b411b363SPhilipp Reisner 	bio->bi_bdev = mdev->ldev->md_bdev;
1012b411b363SPhilipp Reisner 	bio->bi_sector = on_disk_sector;
101319f843aaSLars Ellenberg 	bio_add_page(bio, page, len, 0);
101419f843aaSLars Ellenberg 	bio->bi_private = ctx;
1015b411b363SPhilipp Reisner 	bio->bi_end_io = bm_async_io_complete;
1016b411b363SPhilipp Reisner 
10170cf9d27eSAndreas Gruenbacher 	if (drbd_insert_fault(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) {
1018b411b363SPhilipp Reisner 		bio->bi_rw |= rw;
1019b411b363SPhilipp Reisner 		bio_endio(bio, -EIO);
1020b411b363SPhilipp Reisner 	} else {
1021b411b363SPhilipp Reisner 		submit_bio(rw, bio);
1022b411b363SPhilipp Reisner 	}
1023b411b363SPhilipp Reisner }
1024b411b363SPhilipp Reisner 
1025b411b363SPhilipp Reisner /*
1026b411b363SPhilipp Reisner  * bm_rw: read/write the whole bitmap from/to its on disk location.
1027b411b363SPhilipp Reisner  */
102819f843aaSLars Ellenberg static int bm_rw(struct drbd_conf *mdev, int rw, unsigned lazy_writeout_upper_idx) __must_hold(local)
1029b411b363SPhilipp Reisner {
103019f843aaSLars Ellenberg 	struct bm_aio_ctx ctx =
103119f843aaSLars Ellenberg 		{ .flags = lazy_writeout_upper_idx ? BM_AIO_COPY_PAGES : 0 };
1032b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
103319f843aaSLars Ellenberg 	int last_page, i, count = 0;
1034b411b363SPhilipp Reisner 	unsigned long now;
1035b411b363SPhilipp Reisner 	char ppb[10];
1036b411b363SPhilipp Reisner 	int err = 0;
1037b411b363SPhilipp Reisner 
103819f843aaSLars Ellenberg 	/*
103919f843aaSLars Ellenberg 	 * We are protected against bitmap disappearing/resizing by holding an
104019f843aaSLars Ellenberg 	 * ldev reference (caller must have called get_ldev()).
104119f843aaSLars Ellenberg 	 * For read/write, we are protected against changes to the bitmap by
104219f843aaSLars Ellenberg 	 * the bitmap lock (see drbd_bitmap_io).
104319f843aaSLars Ellenberg 	 * For lazy writeout, we don't care for ongoing changes to the bitmap,
104419f843aaSLars Ellenberg 	 * as we submit copies of pages anyways.
104519f843aaSLars Ellenberg 	 */
104619f843aaSLars Ellenberg 	if (!ctx.flags)
1047b411b363SPhilipp Reisner 		WARN_ON(!bm_is_locked(b));
1048b411b363SPhilipp Reisner 
104919f843aaSLars Ellenberg 	/* because of the "extra long to catch oob access" we allocate in
105019f843aaSLars Ellenberg 	 * drbd_bm_resize, bm_number_of_pages -1 is not necessarily the page
105119f843aaSLars Ellenberg 	 * containing the last _relevant_ bitmap word */
105219f843aaSLars Ellenberg 	last_page = bm_word_to_page_idx(b, b->bm_words - 1);
1053b411b363SPhilipp Reisner 
1054b411b363SPhilipp Reisner 	now = jiffies;
105519f843aaSLars Ellenberg 	ctx.mdev = mdev;
105619f843aaSLars Ellenberg 	atomic_set(&ctx.in_flight, 1); /* one extra ref */
105719f843aaSLars Ellenberg 	init_waitqueue_head(&ctx.io_wait);
105819f843aaSLars Ellenberg 	ctx.error = 0;
1059b411b363SPhilipp Reisner 
1060b411b363SPhilipp Reisner 	/* let the layers below us try to merge these bios... */
106119f843aaSLars Ellenberg 	for (i = 0; i <= last_page; i++) {
106219f843aaSLars Ellenberg 		/* ignore completely unchanged pages */
106319f843aaSLars Ellenberg 		if (lazy_writeout_upper_idx && i == lazy_writeout_upper_idx)
106419f843aaSLars Ellenberg 			break;
106519f843aaSLars Ellenberg 		if (rw & WRITE) {
106619f843aaSLars Ellenberg 			if (bm_test_page_unchanged(b->bm_pages[i])) {
106719f843aaSLars Ellenberg 				dynamic_dev_dbg(DEV, "skipped bm write for idx %u\n", i);
106819f843aaSLars Ellenberg 				continue;
106919f843aaSLars Ellenberg 			}
107019f843aaSLars Ellenberg 			/* during lazy writeout,
107119f843aaSLars Ellenberg 			 * ignore those pages not marked for lazy writeout. */
107219f843aaSLars Ellenberg 			if (lazy_writeout_upper_idx &&
107319f843aaSLars Ellenberg 			    !bm_test_page_lazy_writeout(b->bm_pages[i])) {
107419f843aaSLars Ellenberg 				dynamic_dev_dbg(DEV, "skipped bm lazy write for idx %u\n", i);
107519f843aaSLars Ellenberg 				continue;
107619f843aaSLars Ellenberg 			}
107719f843aaSLars Ellenberg 		}
107819f843aaSLars Ellenberg 		atomic_inc(&ctx.in_flight);
107919f843aaSLars Ellenberg 		bm_page_io_async(&ctx, i, rw);
108019f843aaSLars Ellenberg 		++count;
108119f843aaSLars Ellenberg 		cond_resched();
108219f843aaSLars Ellenberg 	}
1083b411b363SPhilipp Reisner 
108419f843aaSLars Ellenberg 	atomic_dec(&ctx.in_flight); /* drop the extra ref */
108519f843aaSLars Ellenberg 	wait_event(ctx.io_wait, atomic_read(&ctx.in_flight) == 0);
108619f843aaSLars Ellenberg 	dev_info(DEV, "bitmap %s of %u pages took %lu jiffies\n",
108719f843aaSLars Ellenberg 			rw == WRITE ? "WRITE" : "READ",
108819f843aaSLars Ellenberg 			count, jiffies - now);
1089b411b363SPhilipp Reisner 
109019f843aaSLars Ellenberg 	if (ctx.error) {
1091b411b363SPhilipp Reisner 		dev_alert(DEV, "we had at least one MD IO ERROR during bitmap IO\n");
109281e84650SAndreas Gruenbacher 		drbd_chk_io_error(mdev, 1, true);
109319f843aaSLars Ellenberg 		err = -EIO; /* ctx.error ? */
1094b411b363SPhilipp Reisner 	}
1095b411b363SPhilipp Reisner 
1096b411b363SPhilipp Reisner 	now = jiffies;
1097b411b363SPhilipp Reisner 	if (rw == WRITE) {
1098b411b363SPhilipp Reisner 		drbd_md_flush(mdev);
1099b411b363SPhilipp Reisner 	} else /* rw == READ */ {
110095a0f10cSLars Ellenberg 		b->bm_set = bm_count_bits(b);
1101b411b363SPhilipp Reisner 		dev_info(DEV, "recounting of set bits took additional %lu jiffies\n",
1102b411b363SPhilipp Reisner 		     jiffies - now);
1103b411b363SPhilipp Reisner 	}
1104b411b363SPhilipp Reisner 	now = b->bm_set;
1105b411b363SPhilipp Reisner 
1106b411b363SPhilipp Reisner 	dev_info(DEV, "%s (%lu bits) marked out-of-sync by on disk bit-map.\n",
1107b411b363SPhilipp Reisner 	     ppsize(ppb, now << (BM_BLOCK_SHIFT-10)), now);
1108b411b363SPhilipp Reisner 
1109b411b363SPhilipp Reisner 	return err;
1110b411b363SPhilipp Reisner }
1111b411b363SPhilipp Reisner 
1112b411b363SPhilipp Reisner /**
1113b411b363SPhilipp Reisner  * drbd_bm_read() - Read the whole bitmap from its on disk location.
1114b411b363SPhilipp Reisner  * @mdev:	DRBD device.
1115b411b363SPhilipp Reisner  */
1116b411b363SPhilipp Reisner int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local)
1117b411b363SPhilipp Reisner {
111819f843aaSLars Ellenberg 	return bm_rw(mdev, READ, 0);
1119b411b363SPhilipp Reisner }
1120b411b363SPhilipp Reisner 
1121b411b363SPhilipp Reisner /**
1122b411b363SPhilipp Reisner  * drbd_bm_write() - Write the whole bitmap to its on disk location.
1123b411b363SPhilipp Reisner  * @mdev:	DRBD device.
112419f843aaSLars Ellenberg  *
112519f843aaSLars Ellenberg  * Will only write pages that have changed since last IO.
1126b411b363SPhilipp Reisner  */
1127b411b363SPhilipp Reisner int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local)
1128b411b363SPhilipp Reisner {
112919f843aaSLars Ellenberg 	return bm_rw(mdev, WRITE, 0);
1130b411b363SPhilipp Reisner }
1131b411b363SPhilipp Reisner 
1132b411b363SPhilipp Reisner /**
113319f843aaSLars Ellenberg  * drbd_bm_lazy_write_out() - Write bitmap pages 0 to @upper_idx-1, if they have changed.
1134b411b363SPhilipp Reisner  * @mdev:	DRBD device.
113519f843aaSLars Ellenberg  * @upper_idx:	0: write all changed pages; +ve: page index to stop scanning for changed pages
1136b411b363SPhilipp Reisner  */
113719f843aaSLars Ellenberg int drbd_bm_write_lazy(struct drbd_conf *mdev, unsigned upper_idx) __must_hold(local)
1138b411b363SPhilipp Reisner {
113919f843aaSLars Ellenberg 	return bm_rw(mdev, WRITE, upper_idx);
1140b411b363SPhilipp Reisner }
114119f843aaSLars Ellenberg 
114219f843aaSLars Ellenberg 
114319f843aaSLars Ellenberg /**
114419f843aaSLars Ellenberg  * drbd_bm_write_page: Writes a PAGE_SIZE aligned piece of bitmap
114519f843aaSLars Ellenberg  * @mdev:	DRBD device.
114619f843aaSLars Ellenberg  * @idx:	bitmap page index
114719f843aaSLars Ellenberg  *
1148*4b0715f0SLars Ellenberg  * We don't want to special case on logical_block_size of the backend device,
1149*4b0715f0SLars Ellenberg  * so we submit PAGE_SIZE aligned pieces.
115019f843aaSLars Ellenberg  * Note that on "most" systems, PAGE_SIZE is 4k.
1151*4b0715f0SLars Ellenberg  *
1152*4b0715f0SLars Ellenberg  * In case this becomes an issue on systems with larger PAGE_SIZE,
1153*4b0715f0SLars Ellenberg  * we may want to change this again to write 4k aligned 4k pieces.
115419f843aaSLars Ellenberg  */
115519f843aaSLars Ellenberg int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local)
115619f843aaSLars Ellenberg {
115719f843aaSLars Ellenberg 	struct bm_aio_ctx ctx = { .flags = BM_AIO_COPY_PAGES, };
115819f843aaSLars Ellenberg 
115919f843aaSLars Ellenberg 	if (bm_test_page_unchanged(mdev->bitmap->bm_pages[idx])) {
116019f843aaSLars Ellenberg 		dev_info(DEV, "skipped bm page write for idx %u\n", idx);
116119f843aaSLars Ellenberg 		return 0;
116219f843aaSLars Ellenberg 	}
116319f843aaSLars Ellenberg 
116419f843aaSLars Ellenberg 	ctx.mdev = mdev;
116519f843aaSLars Ellenberg 	atomic_set(&ctx.in_flight, 1);
116619f843aaSLars Ellenberg 	init_waitqueue_head(&ctx.io_wait);
116719f843aaSLars Ellenberg 
116819f843aaSLars Ellenberg 	bm_page_io_async(&ctx, idx, WRITE_SYNC);
116919f843aaSLars Ellenberg 	wait_event(ctx.io_wait, atomic_read(&ctx.in_flight) == 0);
117019f843aaSLars Ellenberg 
117119f843aaSLars Ellenberg 	if (ctx.error)
117219f843aaSLars Ellenberg 		drbd_chk_io_error(mdev, 1, true);
117319f843aaSLars Ellenberg 		/* that should force detach, so the in memory bitmap will be
117419f843aaSLars Ellenberg 		 * gone in a moment as well. */
117519f843aaSLars Ellenberg 
1176b411b363SPhilipp Reisner 	mdev->bm_writ_cnt++;
117719f843aaSLars Ellenberg 	return ctx.error;
1178b411b363SPhilipp Reisner }
1179b411b363SPhilipp Reisner 
1180b411b363SPhilipp Reisner /* NOTE
1181b411b363SPhilipp Reisner  * find_first_bit returns int, we return unsigned long.
1182*4b0715f0SLars Ellenberg  * For this to work on 32bit arch with bitnumbers > (1<<32),
1183*4b0715f0SLars Ellenberg  * we'd need to return u64, and get a whole lot of other places
1184*4b0715f0SLars Ellenberg  * fixed where we still use unsigned long.
1185b411b363SPhilipp Reisner  *
1186b411b363SPhilipp Reisner  * this returns a bit number, NOT a sector!
1187b411b363SPhilipp Reisner  */
1188b411b363SPhilipp Reisner static unsigned long __bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo,
1189b411b363SPhilipp Reisner 	const int find_zero_bit, const enum km_type km)
1190b411b363SPhilipp Reisner {
1191b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
1192b411b363SPhilipp Reisner 	unsigned long *p_addr;
1193*4b0715f0SLars Ellenberg 	unsigned long bit_offset;
1194*4b0715f0SLars Ellenberg 	unsigned i;
1195*4b0715f0SLars Ellenberg 
1196b411b363SPhilipp Reisner 
1197b411b363SPhilipp Reisner 	if (bm_fo > b->bm_bits) {
1198b411b363SPhilipp Reisner 		dev_err(DEV, "bm_fo=%lu bm_bits=%lu\n", bm_fo, b->bm_bits);
1199*4b0715f0SLars Ellenberg 		bm_fo = DRBD_END_OF_BITMAP;
1200b411b363SPhilipp Reisner 	} else {
1201b411b363SPhilipp Reisner 		while (bm_fo < b->bm_bits) {
120219f843aaSLars Ellenberg 			/* bit offset of the first bit in the page */
1203*4b0715f0SLars Ellenberg 			bit_offset = bm_fo & ~BITS_PER_PAGE_MASK;
120419f843aaSLars Ellenberg 			p_addr = __bm_map_pidx(b, bm_bit_to_page_idx(b, bm_fo), km);
1205b411b363SPhilipp Reisner 
1206b411b363SPhilipp Reisner 			if (find_zero_bit)
1207*4b0715f0SLars Ellenberg 				i = generic_find_next_zero_le_bit(p_addr,
1208*4b0715f0SLars Ellenberg 						PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK);
1209b411b363SPhilipp Reisner 			else
1210*4b0715f0SLars Ellenberg 				i = generic_find_next_le_bit(p_addr,
1211*4b0715f0SLars Ellenberg 						PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK);
1212b411b363SPhilipp Reisner 
1213b411b363SPhilipp Reisner 			__bm_unmap(p_addr, km);
1214b411b363SPhilipp Reisner 			if (i < PAGE_SIZE*8) {
1215*4b0715f0SLars Ellenberg 				bm_fo = bit_offset + i;
1216*4b0715f0SLars Ellenberg 				if (bm_fo >= b->bm_bits)
1217b411b363SPhilipp Reisner 					break;
1218b411b363SPhilipp Reisner 				goto found;
1219b411b363SPhilipp Reisner 			}
1220b411b363SPhilipp Reisner 			bm_fo = bit_offset + PAGE_SIZE*8;
1221b411b363SPhilipp Reisner 		}
1222*4b0715f0SLars Ellenberg 		bm_fo = DRBD_END_OF_BITMAP;
1223b411b363SPhilipp Reisner 	}
1224b411b363SPhilipp Reisner  found:
1225*4b0715f0SLars Ellenberg 	return bm_fo;
1226b411b363SPhilipp Reisner }
1227b411b363SPhilipp Reisner 
1228b411b363SPhilipp Reisner static unsigned long bm_find_next(struct drbd_conf *mdev,
1229b411b363SPhilipp Reisner 	unsigned long bm_fo, const int find_zero_bit)
1230b411b363SPhilipp Reisner {
1231b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
1232*4b0715f0SLars Ellenberg 	unsigned long i = DRBD_END_OF_BITMAP;
1233b411b363SPhilipp Reisner 
1234b411b363SPhilipp Reisner 	ERR_IF(!b) return i;
1235b411b363SPhilipp Reisner 	ERR_IF(!b->bm_pages) return i;
1236b411b363SPhilipp Reisner 
1237b411b363SPhilipp Reisner 	spin_lock_irq(&b->bm_lock);
1238b411b363SPhilipp Reisner 	if (bm_is_locked(b))
1239b411b363SPhilipp Reisner 		bm_print_lock_info(mdev);
1240b411b363SPhilipp Reisner 
1241b411b363SPhilipp Reisner 	i = __bm_find_next(mdev, bm_fo, find_zero_bit, KM_IRQ1);
1242b411b363SPhilipp Reisner 
1243b411b363SPhilipp Reisner 	spin_unlock_irq(&b->bm_lock);
1244b411b363SPhilipp Reisner 	return i;
1245b411b363SPhilipp Reisner }
1246b411b363SPhilipp Reisner 
1247b411b363SPhilipp Reisner unsigned long drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo)
1248b411b363SPhilipp Reisner {
1249b411b363SPhilipp Reisner 	return bm_find_next(mdev, bm_fo, 0);
1250b411b363SPhilipp Reisner }
1251b411b363SPhilipp Reisner 
1252b411b363SPhilipp Reisner #if 0
1253b411b363SPhilipp Reisner /* not yet needed for anything. */
1254b411b363SPhilipp Reisner unsigned long drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo)
1255b411b363SPhilipp Reisner {
1256b411b363SPhilipp Reisner 	return bm_find_next(mdev, bm_fo, 1);
1257b411b363SPhilipp Reisner }
1258b411b363SPhilipp Reisner #endif
1259b411b363SPhilipp Reisner 
1260b411b363SPhilipp Reisner /* does not spin_lock_irqsave.
1261b411b363SPhilipp Reisner  * you must take drbd_bm_lock() first */
1262b411b363SPhilipp Reisner unsigned long _drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo)
1263b411b363SPhilipp Reisner {
1264b411b363SPhilipp Reisner 	/* WARN_ON(!bm_is_locked(mdev)); */
1265b411b363SPhilipp Reisner 	return __bm_find_next(mdev, bm_fo, 0, KM_USER1);
1266b411b363SPhilipp Reisner }
1267b411b363SPhilipp Reisner 
1268b411b363SPhilipp Reisner unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo)
1269b411b363SPhilipp Reisner {
1270b411b363SPhilipp Reisner 	/* WARN_ON(!bm_is_locked(mdev)); */
1271b411b363SPhilipp Reisner 	return __bm_find_next(mdev, bm_fo, 1, KM_USER1);
1272b411b363SPhilipp Reisner }
1273b411b363SPhilipp Reisner 
1274b411b363SPhilipp Reisner /* returns number of bits actually changed.
1275b411b363SPhilipp Reisner  * for val != 0, we change 0 -> 1, return code positive
1276b411b363SPhilipp Reisner  * for val == 0, we change 1 -> 0, return code negative
1277b411b363SPhilipp Reisner  * wants bitnr, not sector.
1278b411b363SPhilipp Reisner  * expected to be called for only a few bits (e - s about BITS_PER_LONG).
1279b411b363SPhilipp Reisner  * Must hold bitmap lock already. */
1280b4ee79daSPhilipp Reisner static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
1281b411b363SPhilipp Reisner 	unsigned long e, int val, const enum km_type km)
1282b411b363SPhilipp Reisner {
1283b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
1284b411b363SPhilipp Reisner 	unsigned long *p_addr = NULL;
1285b411b363SPhilipp Reisner 	unsigned long bitnr;
128619f843aaSLars Ellenberg 	unsigned int last_page_nr = -1U;
1287b411b363SPhilipp Reisner 	int c = 0;
128819f843aaSLars Ellenberg 	int changed_total = 0;
1289b411b363SPhilipp Reisner 
1290b411b363SPhilipp Reisner 	if (e >= b->bm_bits) {
1291b411b363SPhilipp Reisner 		dev_err(DEV, "ASSERT FAILED: bit_s=%lu bit_e=%lu bm_bits=%lu\n",
1292b411b363SPhilipp Reisner 				s, e, b->bm_bits);
1293b411b363SPhilipp Reisner 		e = b->bm_bits ? b->bm_bits -1 : 0;
1294b411b363SPhilipp Reisner 	}
1295b411b363SPhilipp Reisner 	for (bitnr = s; bitnr <= e; bitnr++) {
129619f843aaSLars Ellenberg 		unsigned int page_nr = bm_bit_to_page_idx(b, bitnr);
1297b411b363SPhilipp Reisner 		if (page_nr != last_page_nr) {
1298b411b363SPhilipp Reisner 			if (p_addr)
1299b411b363SPhilipp Reisner 				__bm_unmap(p_addr, km);
130019f843aaSLars Ellenberg 			if (c < 0)
130119f843aaSLars Ellenberg 				bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]);
130219f843aaSLars Ellenberg 			else if (c > 0)
130319f843aaSLars Ellenberg 				bm_set_page_need_writeout(b->bm_pages[last_page_nr]);
130419f843aaSLars Ellenberg 			changed_total += c;
130519f843aaSLars Ellenberg 			c = 0;
130619f843aaSLars Ellenberg 			p_addr = __bm_map_pidx(b, page_nr, km);
1307b411b363SPhilipp Reisner 			last_page_nr = page_nr;
1308b411b363SPhilipp Reisner 		}
1309b411b363SPhilipp Reisner 		if (val)
1310*4b0715f0SLars Ellenberg 			c += (0 == generic___test_and_set_le_bit(bitnr & BITS_PER_PAGE_MASK, p_addr));
1311b411b363SPhilipp Reisner 		else
1312*4b0715f0SLars Ellenberg 			c -= (0 != generic___test_and_clear_le_bit(bitnr & BITS_PER_PAGE_MASK, p_addr));
1313b411b363SPhilipp Reisner 	}
1314b411b363SPhilipp Reisner 	if (p_addr)
1315b411b363SPhilipp Reisner 		__bm_unmap(p_addr, km);
131619f843aaSLars Ellenberg 	if (c < 0)
131719f843aaSLars Ellenberg 		bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]);
131819f843aaSLars Ellenberg 	else if (c > 0)
131919f843aaSLars Ellenberg 		bm_set_page_need_writeout(b->bm_pages[last_page_nr]);
132019f843aaSLars Ellenberg 	changed_total += c;
132119f843aaSLars Ellenberg 	b->bm_set += changed_total;
132219f843aaSLars Ellenberg 	return changed_total;
1323b411b363SPhilipp Reisner }
1324b411b363SPhilipp Reisner 
1325b411b363SPhilipp Reisner /* returns number of bits actually changed.
1326b411b363SPhilipp Reisner  * for val != 0, we change 0 -> 1, return code positive
1327b411b363SPhilipp Reisner  * for val == 0, we change 1 -> 0, return code negative
1328b411b363SPhilipp Reisner  * wants bitnr, not sector */
1329b4ee79daSPhilipp Reisner static int bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
1330b411b363SPhilipp Reisner 	const unsigned long e, int val)
1331b411b363SPhilipp Reisner {
1332b411b363SPhilipp Reisner 	unsigned long flags;
1333b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
1334b411b363SPhilipp Reisner 	int c = 0;
1335b411b363SPhilipp Reisner 
1336b411b363SPhilipp Reisner 	ERR_IF(!b) return 1;
1337b411b363SPhilipp Reisner 	ERR_IF(!b->bm_pages) return 0;
1338b411b363SPhilipp Reisner 
1339b411b363SPhilipp Reisner 	spin_lock_irqsave(&b->bm_lock, flags);
1340b411b363SPhilipp Reisner 	if (bm_is_locked(b))
1341b411b363SPhilipp Reisner 		bm_print_lock_info(mdev);
1342b411b363SPhilipp Reisner 
1343b411b363SPhilipp Reisner 	c = __bm_change_bits_to(mdev, s, e, val, KM_IRQ1);
1344b411b363SPhilipp Reisner 
1345b411b363SPhilipp Reisner 	spin_unlock_irqrestore(&b->bm_lock, flags);
1346b411b363SPhilipp Reisner 	return c;
1347b411b363SPhilipp Reisner }
1348b411b363SPhilipp Reisner 
1349b411b363SPhilipp Reisner /* returns number of bits changed 0 -> 1 */
1350b411b363SPhilipp Reisner int drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e)
1351b411b363SPhilipp Reisner {
1352b411b363SPhilipp Reisner 	return bm_change_bits_to(mdev, s, e, 1);
1353b411b363SPhilipp Reisner }
1354b411b363SPhilipp Reisner 
1355b411b363SPhilipp Reisner /* returns number of bits changed 1 -> 0 */
1356b411b363SPhilipp Reisner int drbd_bm_clear_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e)
1357b411b363SPhilipp Reisner {
1358b411b363SPhilipp Reisner 	return -bm_change_bits_to(mdev, s, e, 0);
1359b411b363SPhilipp Reisner }
1360b411b363SPhilipp Reisner 
1361b411b363SPhilipp Reisner /* sets all bits in full words,
1362b411b363SPhilipp Reisner  * from first_word up to, but not including, last_word */
1363b411b363SPhilipp Reisner static inline void bm_set_full_words_within_one_page(struct drbd_bitmap *b,
1364b411b363SPhilipp Reisner 		int page_nr, int first_word, int last_word)
1365b411b363SPhilipp Reisner {
1366b411b363SPhilipp Reisner 	int i;
1367b411b363SPhilipp Reisner 	int bits;
1368b411b363SPhilipp Reisner 	unsigned long *paddr = kmap_atomic(b->bm_pages[page_nr], KM_USER0);
1369b411b363SPhilipp Reisner 	for (i = first_word; i < last_word; i++) {
1370b411b363SPhilipp Reisner 		bits = hweight_long(paddr[i]);
1371b411b363SPhilipp Reisner 		paddr[i] = ~0UL;
1372b411b363SPhilipp Reisner 		b->bm_set += BITS_PER_LONG - bits;
1373b411b363SPhilipp Reisner 	}
1374b411b363SPhilipp Reisner 	kunmap_atomic(paddr, KM_USER0);
1375b411b363SPhilipp Reisner }
1376b411b363SPhilipp Reisner 
1377b411b363SPhilipp Reisner /* Same thing as drbd_bm_set_bits, but without taking the spin_lock_irqsave.
1378b411b363SPhilipp Reisner  * You must first drbd_bm_lock().
1379b411b363SPhilipp Reisner  * Can be called to set the whole bitmap in one go.
1380b411b363SPhilipp Reisner  * Sets bits from s to e _inclusive_. */
1381b411b363SPhilipp Reisner void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e)
1382b411b363SPhilipp Reisner {
1383b411b363SPhilipp Reisner 	/* First set_bit from the first bit (s)
1384b411b363SPhilipp Reisner 	 * up to the next long boundary (sl),
1385b411b363SPhilipp Reisner 	 * then assign full words up to the last long boundary (el),
1386b411b363SPhilipp Reisner 	 * then set_bit up to and including the last bit (e).
1387b411b363SPhilipp Reisner 	 *
1388b411b363SPhilipp Reisner 	 * Do not use memset, because we must account for changes,
1389b411b363SPhilipp Reisner 	 * so we need to loop over the words with hweight() anyways.
1390b411b363SPhilipp Reisner 	 */
1391b411b363SPhilipp Reisner 	unsigned long sl = ALIGN(s,BITS_PER_LONG);
1392b411b363SPhilipp Reisner 	unsigned long el = (e+1) & ~((unsigned long)BITS_PER_LONG-1);
1393b411b363SPhilipp Reisner 	int first_page;
1394b411b363SPhilipp Reisner 	int last_page;
1395b411b363SPhilipp Reisner 	int page_nr;
1396b411b363SPhilipp Reisner 	int first_word;
1397b411b363SPhilipp Reisner 	int last_word;
1398b411b363SPhilipp Reisner 
1399b411b363SPhilipp Reisner 	if (e - s <= 3*BITS_PER_LONG) {
1400b411b363SPhilipp Reisner 		/* don't bother; el and sl may even be wrong. */
1401b411b363SPhilipp Reisner 		__bm_change_bits_to(mdev, s, e, 1, KM_USER0);
1402b411b363SPhilipp Reisner 		return;
1403b411b363SPhilipp Reisner 	}
1404b411b363SPhilipp Reisner 
1405b411b363SPhilipp Reisner 	/* difference is large enough that we can trust sl and el */
1406b411b363SPhilipp Reisner 
1407b411b363SPhilipp Reisner 	/* bits filling the current long */
1408b411b363SPhilipp Reisner 	if (sl)
1409b411b363SPhilipp Reisner 		__bm_change_bits_to(mdev, s, sl-1, 1, KM_USER0);
1410b411b363SPhilipp Reisner 
1411b411b363SPhilipp Reisner 	first_page = sl >> (3 + PAGE_SHIFT);
1412b411b363SPhilipp Reisner 	last_page = el >> (3 + PAGE_SHIFT);
1413b411b363SPhilipp Reisner 
1414b411b363SPhilipp Reisner 	/* MLPP: modulo longs per page */
1415b411b363SPhilipp Reisner 	/* LWPP: long words per page */
1416b411b363SPhilipp Reisner 	first_word = MLPP(sl >> LN2_BPL);
1417b411b363SPhilipp Reisner 	last_word = LWPP;
1418b411b363SPhilipp Reisner 
1419b411b363SPhilipp Reisner 	/* first and full pages, unless first page == last page */
1420b411b363SPhilipp Reisner 	for (page_nr = first_page; page_nr < last_page; page_nr++) {
1421b411b363SPhilipp Reisner 		bm_set_full_words_within_one_page(mdev->bitmap, page_nr, first_word, last_word);
1422b411b363SPhilipp Reisner 		cond_resched();
1423b411b363SPhilipp Reisner 		first_word = 0;
1424b411b363SPhilipp Reisner 	}
1425b411b363SPhilipp Reisner 
1426b411b363SPhilipp Reisner 	/* last page (respectively only page, for first page == last page) */
1427b411b363SPhilipp Reisner 	last_word = MLPP(el >> LN2_BPL);
1428b411b363SPhilipp Reisner 	bm_set_full_words_within_one_page(mdev->bitmap, last_page, first_word, last_word);
1429b411b363SPhilipp Reisner 
1430b411b363SPhilipp Reisner 	/* possibly trailing bits.
1431b411b363SPhilipp Reisner 	 * example: (e & 63) == 63, el will be e+1.
1432b411b363SPhilipp Reisner 	 * if that even was the very last bit,
1433b411b363SPhilipp Reisner 	 * it would trigger an assert in __bm_change_bits_to()
1434b411b363SPhilipp Reisner 	 */
1435b411b363SPhilipp Reisner 	if (el <= e)
1436b411b363SPhilipp Reisner 		__bm_change_bits_to(mdev, el, e, 1, KM_USER0);
1437b411b363SPhilipp Reisner }
1438b411b363SPhilipp Reisner 
1439b411b363SPhilipp Reisner /* returns bit state
1440b411b363SPhilipp Reisner  * wants bitnr, NOT sector.
1441b411b363SPhilipp Reisner  * inherently racy... area needs to be locked by means of {al,rs}_lru
1442b411b363SPhilipp Reisner  *  1 ... bit set
1443b411b363SPhilipp Reisner  *  0 ... bit not set
1444b411b363SPhilipp Reisner  * -1 ... first out of bounds access, stop testing for bits!
1445b411b363SPhilipp Reisner  */
1446b411b363SPhilipp Reisner int drbd_bm_test_bit(struct drbd_conf *mdev, const unsigned long bitnr)
1447b411b363SPhilipp Reisner {
1448b411b363SPhilipp Reisner 	unsigned long flags;
1449b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
1450b411b363SPhilipp Reisner 	unsigned long *p_addr;
1451b411b363SPhilipp Reisner 	int i;
1452b411b363SPhilipp Reisner 
1453b411b363SPhilipp Reisner 	ERR_IF(!b) return 0;
1454b411b363SPhilipp Reisner 	ERR_IF(!b->bm_pages) return 0;
1455b411b363SPhilipp Reisner 
1456b411b363SPhilipp Reisner 	spin_lock_irqsave(&b->bm_lock, flags);
1457b411b363SPhilipp Reisner 	if (bm_is_locked(b))
1458b411b363SPhilipp Reisner 		bm_print_lock_info(mdev);
1459b411b363SPhilipp Reisner 	if (bitnr < b->bm_bits) {
146019f843aaSLars Ellenberg 		p_addr = bm_map_pidx(b, bm_bit_to_page_idx(b, bitnr));
1461*4b0715f0SLars Ellenberg 		i = generic_test_le_bit(bitnr & BITS_PER_PAGE_MASK, p_addr) ? 1 : 0;
1462b411b363SPhilipp Reisner 		bm_unmap(p_addr);
1463b411b363SPhilipp Reisner 	} else if (bitnr == b->bm_bits) {
1464b411b363SPhilipp Reisner 		i = -1;
1465b411b363SPhilipp Reisner 	} else { /* (bitnr > b->bm_bits) */
1466b411b363SPhilipp Reisner 		dev_err(DEV, "bitnr=%lu > bm_bits=%lu\n", bitnr, b->bm_bits);
1467b411b363SPhilipp Reisner 		i = 0;
1468b411b363SPhilipp Reisner 	}
1469b411b363SPhilipp Reisner 
1470b411b363SPhilipp Reisner 	spin_unlock_irqrestore(&b->bm_lock, flags);
1471b411b363SPhilipp Reisner 	return i;
1472b411b363SPhilipp Reisner }
1473b411b363SPhilipp Reisner 
1474b411b363SPhilipp Reisner /* returns number of bits set in the range [s, e] */
1475b411b363SPhilipp Reisner int drbd_bm_count_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e)
1476b411b363SPhilipp Reisner {
1477b411b363SPhilipp Reisner 	unsigned long flags;
1478b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
147919f843aaSLars Ellenberg 	unsigned long *p_addr = NULL;
1480b411b363SPhilipp Reisner 	unsigned long bitnr;
148119f843aaSLars Ellenberg 	unsigned int page_nr = -1U;
1482b411b363SPhilipp Reisner 	int c = 0;
1483b411b363SPhilipp Reisner 
1484b411b363SPhilipp Reisner 	/* If this is called without a bitmap, that is a bug.  But just to be
1485b411b363SPhilipp Reisner 	 * robust in case we screwed up elsewhere, in that case pretend there
1486b411b363SPhilipp Reisner 	 * was one dirty bit in the requested area, so we won't try to do a
1487b411b363SPhilipp Reisner 	 * local read there (no bitmap probably implies no disk) */
1488b411b363SPhilipp Reisner 	ERR_IF(!b) return 1;
1489b411b363SPhilipp Reisner 	ERR_IF(!b->bm_pages) return 1;
1490b411b363SPhilipp Reisner 
1491b411b363SPhilipp Reisner 	spin_lock_irqsave(&b->bm_lock, flags);
1492b411b363SPhilipp Reisner 	if (bm_is_locked(b))
1493b411b363SPhilipp Reisner 		bm_print_lock_info(mdev);
1494b411b363SPhilipp Reisner 	for (bitnr = s; bitnr <= e; bitnr++) {
149519f843aaSLars Ellenberg 		unsigned int idx = bm_bit_to_page_idx(b, bitnr);
149619f843aaSLars Ellenberg 		if (page_nr != idx) {
149719f843aaSLars Ellenberg 			page_nr = idx;
1498b411b363SPhilipp Reisner 			if (p_addr)
1499b411b363SPhilipp Reisner 				bm_unmap(p_addr);
150019f843aaSLars Ellenberg 			p_addr = bm_map_pidx(b, idx);
1501b411b363SPhilipp Reisner 		}
1502b411b363SPhilipp Reisner 		ERR_IF (bitnr >= b->bm_bits) {
1503b411b363SPhilipp Reisner 			dev_err(DEV, "bitnr=%lu bm_bits=%lu\n", bitnr, b->bm_bits);
1504b411b363SPhilipp Reisner 		} else {
150595a0f10cSLars Ellenberg 			c += (0 != generic_test_le_bit(bitnr - (page_nr << (PAGE_SHIFT+3)), p_addr));
1506b411b363SPhilipp Reisner 		}
1507b411b363SPhilipp Reisner 	}
1508b411b363SPhilipp Reisner 	if (p_addr)
1509b411b363SPhilipp Reisner 		bm_unmap(p_addr);
1510b411b363SPhilipp Reisner 	spin_unlock_irqrestore(&b->bm_lock, flags);
1511b411b363SPhilipp Reisner 	return c;
1512b411b363SPhilipp Reisner }
1513b411b363SPhilipp Reisner 
1514b411b363SPhilipp Reisner 
1515b411b363SPhilipp Reisner /* inherently racy...
1516b411b363SPhilipp Reisner  * return value may be already out-of-date when this function returns.
1517b411b363SPhilipp Reisner  * but the general usage is that this is only use during a cstate when bits are
1518b411b363SPhilipp Reisner  * only cleared, not set, and typically only care for the case when the return
1519b411b363SPhilipp Reisner  * value is zero, or we already "locked" this "bitmap extent" by other means.
1520b411b363SPhilipp Reisner  *
1521b411b363SPhilipp Reisner  * enr is bm-extent number, since we chose to name one sector (512 bytes)
1522b411b363SPhilipp Reisner  * worth of the bitmap a "bitmap extent".
1523b411b363SPhilipp Reisner  *
1524b411b363SPhilipp Reisner  * TODO
1525b411b363SPhilipp Reisner  * I think since we use it like a reference count, we should use the real
1526b411b363SPhilipp Reisner  * reference count of some bitmap extent element from some lru instead...
1527b411b363SPhilipp Reisner  *
1528b411b363SPhilipp Reisner  */
1529b411b363SPhilipp Reisner int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr)
1530b411b363SPhilipp Reisner {
1531b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
1532b411b363SPhilipp Reisner 	int count, s, e;
1533b411b363SPhilipp Reisner 	unsigned long flags;
1534b411b363SPhilipp Reisner 	unsigned long *p_addr, *bm;
1535b411b363SPhilipp Reisner 
1536b411b363SPhilipp Reisner 	ERR_IF(!b) return 0;
1537b411b363SPhilipp Reisner 	ERR_IF(!b->bm_pages) return 0;
1538b411b363SPhilipp Reisner 
1539b411b363SPhilipp Reisner 	spin_lock_irqsave(&b->bm_lock, flags);
1540b411b363SPhilipp Reisner 	if (bm_is_locked(b))
1541b411b363SPhilipp Reisner 		bm_print_lock_info(mdev);
1542b411b363SPhilipp Reisner 
1543b411b363SPhilipp Reisner 	s = S2W(enr);
1544b411b363SPhilipp Reisner 	e = min((size_t)S2W(enr+1), b->bm_words);
1545b411b363SPhilipp Reisner 	count = 0;
1546b411b363SPhilipp Reisner 	if (s < b->bm_words) {
1547b411b363SPhilipp Reisner 		int n = e-s;
154819f843aaSLars Ellenberg 		p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, s));
1549b411b363SPhilipp Reisner 		bm = p_addr + MLPP(s);
1550b411b363SPhilipp Reisner 		while (n--)
1551b411b363SPhilipp Reisner 			count += hweight_long(*bm++);
1552b411b363SPhilipp Reisner 		bm_unmap(p_addr);
1553b411b363SPhilipp Reisner 	} else {
1554b411b363SPhilipp Reisner 		dev_err(DEV, "start offset (%d) too large in drbd_bm_e_weight\n", s);
1555b411b363SPhilipp Reisner 	}
1556b411b363SPhilipp Reisner 	spin_unlock_irqrestore(&b->bm_lock, flags);
1557b411b363SPhilipp Reisner 	return count;
1558b411b363SPhilipp Reisner }
1559b411b363SPhilipp Reisner 
1560*4b0715f0SLars Ellenberg /* Set all bits covered by the AL-extent al_enr.
1561*4b0715f0SLars Ellenberg  * Returns number of bits changed. */
1562b411b363SPhilipp Reisner unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev, unsigned long al_enr)
1563b411b363SPhilipp Reisner {
1564b411b363SPhilipp Reisner 	struct drbd_bitmap *b = mdev->bitmap;
1565b411b363SPhilipp Reisner 	unsigned long *p_addr, *bm;
1566b411b363SPhilipp Reisner 	unsigned long weight;
1567*4b0715f0SLars Ellenberg 	unsigned long s, e;
1568*4b0715f0SLars Ellenberg 	int count, i, do_now;
1569b411b363SPhilipp Reisner 	ERR_IF(!b) return 0;
1570b411b363SPhilipp Reisner 	ERR_IF(!b->bm_pages) return 0;
1571b411b363SPhilipp Reisner 
1572b411b363SPhilipp Reisner 	spin_lock_irq(&b->bm_lock);
1573b411b363SPhilipp Reisner 	if (bm_is_locked(b))
1574b411b363SPhilipp Reisner 		bm_print_lock_info(mdev);
1575b411b363SPhilipp Reisner 	weight = b->bm_set;
1576b411b363SPhilipp Reisner 
1577b411b363SPhilipp Reisner 	s = al_enr * BM_WORDS_PER_AL_EXT;
1578b411b363SPhilipp Reisner 	e = min_t(size_t, s + BM_WORDS_PER_AL_EXT, b->bm_words);
1579b411b363SPhilipp Reisner 	/* assert that s and e are on the same page */
1580b411b363SPhilipp Reisner 	D_ASSERT((e-1) >> (PAGE_SHIFT - LN2_BPL + 3)
1581b411b363SPhilipp Reisner 	      ==  s    >> (PAGE_SHIFT - LN2_BPL + 3));
1582b411b363SPhilipp Reisner 	count = 0;
1583b411b363SPhilipp Reisner 	if (s < b->bm_words) {
1584b411b363SPhilipp Reisner 		i = do_now = e-s;
158519f843aaSLars Ellenberg 		p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, s));
1586b411b363SPhilipp Reisner 		bm = p_addr + MLPP(s);
1587b411b363SPhilipp Reisner 		while (i--) {
1588b411b363SPhilipp Reisner 			count += hweight_long(*bm);
1589b411b363SPhilipp Reisner 			*bm = -1UL;
1590b411b363SPhilipp Reisner 			bm++;
1591b411b363SPhilipp Reisner 		}
1592b411b363SPhilipp Reisner 		bm_unmap(p_addr);
1593b411b363SPhilipp Reisner 		b->bm_set += do_now*BITS_PER_LONG - count;
1594b411b363SPhilipp Reisner 		if (e == b->bm_words)
1595b411b363SPhilipp Reisner 			b->bm_set -= bm_clear_surplus(b);
1596b411b363SPhilipp Reisner 	} else {
1597*4b0715f0SLars Ellenberg 		dev_err(DEV, "start offset (%lu) too large in drbd_bm_ALe_set_all\n", s);
1598b411b363SPhilipp Reisner 	}
1599b411b363SPhilipp Reisner 	weight = b->bm_set - weight;
1600b411b363SPhilipp Reisner 	spin_unlock_irq(&b->bm_lock);
1601b411b363SPhilipp Reisner 	return weight;
1602b411b363SPhilipp Reisner }
1603