xref: /openbmc/linux/drivers/md/md-bitmap.c (revision 44abfa6a)
1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2935fe098SMike Snitzer /*
3935fe098SMike Snitzer  * bitmap.c two-level bitmap (C) Peter T. Breuer (ptb@ot.uc3m.es) 2003
4935fe098SMike Snitzer  *
5935fe098SMike Snitzer  * bitmap_create  - sets up the bitmap structure
6935fe098SMike Snitzer  * bitmap_destroy - destroys the bitmap structure
7935fe098SMike Snitzer  *
8935fe098SMike Snitzer  * additions, Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.:
9935fe098SMike Snitzer  * - added disk storage for bitmap
10935fe098SMike Snitzer  * - changes to allow various bitmap chunk sizes
11935fe098SMike Snitzer  */
12935fe098SMike Snitzer 
13935fe098SMike Snitzer /*
14935fe098SMike Snitzer  * Still to do:
15935fe098SMike Snitzer  *
16935fe098SMike Snitzer  * flush after percent set rather than just time based. (maybe both).
17935fe098SMike Snitzer  */
18935fe098SMike Snitzer 
19935fe098SMike Snitzer #include <linux/blkdev.h>
20935fe098SMike Snitzer #include <linux/module.h>
21935fe098SMike Snitzer #include <linux/errno.h>
22935fe098SMike Snitzer #include <linux/slab.h>
23935fe098SMike Snitzer #include <linux/init.h>
24935fe098SMike Snitzer #include <linux/timer.h>
25935fe098SMike Snitzer #include <linux/sched.h>
26935fe098SMike Snitzer #include <linux/list.h>
27935fe098SMike Snitzer #include <linux/file.h>
28935fe098SMike Snitzer #include <linux/mount.h>
29935fe098SMike Snitzer #include <linux/buffer_head.h>
30935fe098SMike Snitzer #include <linux/seq_file.h>
31935fe098SMike Snitzer #include <trace/events/block.h>
32935fe098SMike Snitzer #include "md.h"
33935fe098SMike Snitzer #include "md-bitmap.h"
34935fe098SMike Snitzer 
35935fe098SMike Snitzer static inline char *bmname(struct bitmap *bitmap)
36935fe098SMike Snitzer {
37935fe098SMike Snitzer 	return bitmap->mddev ? mdname(bitmap->mddev) : "mdX";
38935fe098SMike Snitzer }
39935fe098SMike Snitzer 
40935fe098SMike Snitzer /*
41935fe098SMike Snitzer  * check a page and, if necessary, allocate it (or hijack it if the alloc fails)
42935fe098SMike Snitzer  *
43935fe098SMike Snitzer  * 1) check to see if this page is allocated, if it's not then try to alloc
44935fe098SMike Snitzer  * 2) if the alloc fails, set the page's hijacked flag so we'll use the
45935fe098SMike Snitzer  *    page pointer directly as a counter
46935fe098SMike Snitzer  *
47935fe098SMike Snitzer  * if we find our page, we increment the page's refcount so that it stays
48935fe098SMike Snitzer  * allocated while we're using it
49935fe098SMike Snitzer  */
50e64e4018SAndy Shevchenko static int md_bitmap_checkpage(struct bitmap_counts *bitmap,
51935fe098SMike Snitzer 			       unsigned long page, int create, int no_hijack)
52935fe098SMike Snitzer __releases(bitmap->lock)
53935fe098SMike Snitzer __acquires(bitmap->lock)
54935fe098SMike Snitzer {
55935fe098SMike Snitzer 	unsigned char *mappage;
56935fe098SMike Snitzer 
57301867b1SLi Nan 	WARN_ON_ONCE(page >= bitmap->pages);
58935fe098SMike Snitzer 	if (bitmap->bp[page].hijacked) /* it's hijacked, don't try to alloc */
59935fe098SMike Snitzer 		return 0;
60935fe098SMike Snitzer 
61935fe098SMike Snitzer 	if (bitmap->bp[page].map) /* page is already allocated, just return */
62935fe098SMike Snitzer 		return 0;
63935fe098SMike Snitzer 
64935fe098SMike Snitzer 	if (!create)
65935fe098SMike Snitzer 		return -ENOENT;
66935fe098SMike Snitzer 
67935fe098SMike Snitzer 	/* this page has not been allocated yet */
68935fe098SMike Snitzer 
69935fe098SMike Snitzer 	spin_unlock_irq(&bitmap->lock);
70935fe098SMike Snitzer 	/* It is possible that this is being called inside a
71935fe098SMike Snitzer 	 * prepare_to_wait/finish_wait loop from raid5c:make_request().
72935fe098SMike Snitzer 	 * In general it is not permitted to sleep in that context as it
73935fe098SMike Snitzer 	 * can cause the loop to spin freely.
74935fe098SMike Snitzer 	 * That doesn't apply here as we can only reach this point
75935fe098SMike Snitzer 	 * once with any loop.
76935fe098SMike Snitzer 	 * When this function completes, either bp[page].map or
77935fe098SMike Snitzer 	 * bp[page].hijacked.  In either case, this function will
78935fe098SMike Snitzer 	 * abort before getting to this point again.  So there is
79935fe098SMike Snitzer 	 * no risk of a free-spin, and so it is safe to assert
80935fe098SMike Snitzer 	 * that sleeping here is allowed.
81935fe098SMike Snitzer 	 */
82935fe098SMike Snitzer 	sched_annotate_sleep();
83935fe098SMike Snitzer 	mappage = kzalloc(PAGE_SIZE, GFP_NOIO);
84935fe098SMike Snitzer 	spin_lock_irq(&bitmap->lock);
85935fe098SMike Snitzer 
86935fe098SMike Snitzer 	if (mappage == NULL) {
87935fe098SMike Snitzer 		pr_debug("md/bitmap: map page allocation failed, hijacking\n");
88935fe098SMike Snitzer 		/* We don't support hijack for cluster raid */
89935fe098SMike Snitzer 		if (no_hijack)
90935fe098SMike Snitzer 			return -ENOMEM;
91935fe098SMike Snitzer 		/* failed - set the hijacked flag so that we can use the
92935fe098SMike Snitzer 		 * pointer as a counter */
93935fe098SMike Snitzer 		if (!bitmap->bp[page].map)
94935fe098SMike Snitzer 			bitmap->bp[page].hijacked = 1;
95935fe098SMike Snitzer 	} else if (bitmap->bp[page].map ||
96935fe098SMike Snitzer 		   bitmap->bp[page].hijacked) {
97935fe098SMike Snitzer 		/* somebody beat us to getting the page */
98935fe098SMike Snitzer 		kfree(mappage);
99935fe098SMike Snitzer 	} else {
100935fe098SMike Snitzer 
101935fe098SMike Snitzer 		/* no page was in place and we have one, so install it */
102935fe098SMike Snitzer 
103935fe098SMike Snitzer 		bitmap->bp[page].map = mappage;
104935fe098SMike Snitzer 		bitmap->missing_pages--;
105935fe098SMike Snitzer 	}
106935fe098SMike Snitzer 	return 0;
107935fe098SMike Snitzer }
108935fe098SMike Snitzer 
109935fe098SMike Snitzer /* if page is completely empty, put it back on the free list, or dealloc it */
110935fe098SMike Snitzer /* if page was hijacked, unmark the flag so it might get alloced next time */
111935fe098SMike Snitzer /* Note: lock should be held when calling this */
112e64e4018SAndy Shevchenko static void md_bitmap_checkfree(struct bitmap_counts *bitmap, unsigned long page)
113935fe098SMike Snitzer {
114935fe098SMike Snitzer 	char *ptr;
115935fe098SMike Snitzer 
116935fe098SMike Snitzer 	if (bitmap->bp[page].count) /* page is still busy */
117935fe098SMike Snitzer 		return;
118935fe098SMike Snitzer 
119935fe098SMike Snitzer 	/* page is no longer in use, it can be released */
120935fe098SMike Snitzer 
121935fe098SMike Snitzer 	if (bitmap->bp[page].hijacked) { /* page was hijacked, undo this now */
122935fe098SMike Snitzer 		bitmap->bp[page].hijacked = 0;
123935fe098SMike Snitzer 		bitmap->bp[page].map = NULL;
124935fe098SMike Snitzer 	} else {
125935fe098SMike Snitzer 		/* normal case, free the page */
126935fe098SMike Snitzer 		ptr = bitmap->bp[page].map;
127935fe098SMike Snitzer 		bitmap->bp[page].map = NULL;
128935fe098SMike Snitzer 		bitmap->missing_pages++;
129935fe098SMike Snitzer 		kfree(ptr);
130935fe098SMike Snitzer 	}
131935fe098SMike Snitzer }
132935fe098SMike Snitzer 
133935fe098SMike Snitzer /*
134935fe098SMike Snitzer  * bitmap file handling - read and write the bitmap file and its superblock
135935fe098SMike Snitzer  */
136935fe098SMike Snitzer 
137935fe098SMike Snitzer /*
138935fe098SMike Snitzer  * basic page I/O operations
139935fe098SMike Snitzer  */
140935fe098SMike Snitzer 
141935fe098SMike Snitzer /* IO operations when bitmap is stored near all superblocks */
142935fe098SMike Snitzer 
1430c3ea5ccSChristoph Hellwig /* choose a good rdev and read the page from there */
1440c3ea5ccSChristoph Hellwig static int read_sb_page(struct mddev *mddev, loff_t offset,
1450c3ea5ccSChristoph Hellwig 		struct page *page, unsigned long index, int size)
1460c3ea5ccSChristoph Hellwig {
1470c3ea5ccSChristoph Hellwig 
148f5f2d5acSChristoph Hellwig 	sector_t sector = mddev->bitmap_info.offset + offset +
149f5f2d5acSChristoph Hellwig 		index * (PAGE_SIZE / SECTOR_SIZE);
150935fe098SMike Snitzer 	struct md_rdev *rdev;
151935fe098SMike Snitzer 
152935fe098SMike Snitzer 	rdev_for_each(rdev, mddev) {
1530c3ea5ccSChristoph Hellwig 		u32 iosize = roundup(size, bdev_logical_block_size(rdev->bdev));
1540c3ea5ccSChristoph Hellwig 
1550c3ea5ccSChristoph Hellwig 		if (!test_bit(In_sync, &rdev->flags) ||
1560c3ea5ccSChristoph Hellwig 		    test_bit(Faulty, &rdev->flags) ||
1570c3ea5ccSChristoph Hellwig 		    test_bit(Bitmap_sync, &rdev->flags))
158935fe098SMike Snitzer 			continue;
159935fe098SMike Snitzer 
160d7038f95SChristoph Hellwig 		if (sync_page_io(rdev, sector, iosize, page, REQ_OP_READ, true))
161935fe098SMike Snitzer 			return 0;
162935fe098SMike Snitzer 	}
163935fe098SMike Snitzer 	return -EIO;
164935fe098SMike Snitzer }
165935fe098SMike Snitzer 
166935fe098SMike Snitzer static struct md_rdev *next_active_rdev(struct md_rdev *rdev, struct mddev *mddev)
167935fe098SMike Snitzer {
168935fe098SMike Snitzer 	/* Iterate the disks of an mddev, using rcu to protect access to the
169935fe098SMike Snitzer 	 * linked list, and raising the refcount of devices we return to ensure
170935fe098SMike Snitzer 	 * they don't disappear while in use.
171935fe098SMike Snitzer 	 * As devices are only added or removed when raid_disk is < 0 and
172935fe098SMike Snitzer 	 * nr_pending is 0 and In_sync is clear, the entries we return will
173935fe098SMike Snitzer 	 * still be in the same position on the list when we re-enter
174935fe098SMike Snitzer 	 * list_for_each_entry_continue_rcu.
175935fe098SMike Snitzer 	 *
176935fe098SMike Snitzer 	 * Note that if entered with 'rdev == NULL' to start at the
177935fe098SMike Snitzer 	 * beginning, we temporarily assign 'rdev' to an address which
178935fe098SMike Snitzer 	 * isn't really an rdev, but which can be used by
179935fe098SMike Snitzer 	 * list_for_each_entry_continue_rcu() to find the first entry.
180935fe098SMike Snitzer 	 */
181935fe098SMike Snitzer 	rcu_read_lock();
182935fe098SMike Snitzer 	if (rdev == NULL)
183935fe098SMike Snitzer 		/* start at the beginning */
184935fe098SMike Snitzer 		rdev = list_entry(&mddev->disks, struct md_rdev, same_set);
185935fe098SMike Snitzer 	else {
186935fe098SMike Snitzer 		/* release the previous rdev and start from there. */
187935fe098SMike Snitzer 		rdev_dec_pending(rdev, mddev);
188935fe098SMike Snitzer 	}
189935fe098SMike Snitzer 	list_for_each_entry_continue_rcu(rdev, &mddev->disks, same_set) {
190935fe098SMike Snitzer 		if (rdev->raid_disk >= 0 &&
191935fe098SMike Snitzer 		    !test_bit(Faulty, &rdev->flags)) {
192935fe098SMike Snitzer 			/* this is a usable devices */
193935fe098SMike Snitzer 			atomic_inc(&rdev->nr_pending);
194935fe098SMike Snitzer 			rcu_read_unlock();
195935fe098SMike Snitzer 			return rdev;
196935fe098SMike Snitzer 		}
197935fe098SMike Snitzer 	}
198935fe098SMike Snitzer 	rcu_read_unlock();
199935fe098SMike Snitzer 	return NULL;
200935fe098SMike Snitzer }
201935fe098SMike Snitzer 
2028745faa9SJon Derrick static unsigned int optimal_io_size(struct block_device *bdev,
2038745faa9SJon Derrick 				    unsigned int last_page_size,
2048745faa9SJon Derrick 				    unsigned int io_size)
2058745faa9SJon Derrick {
2068745faa9SJon Derrick 	if (bdev_io_opt(bdev) > bdev_logical_block_size(bdev))
2078745faa9SJon Derrick 		return roundup(last_page_size, bdev_io_opt(bdev));
2088745faa9SJon Derrick 	return io_size;
2098745faa9SJon Derrick }
2108745faa9SJon Derrick 
2118745faa9SJon Derrick static unsigned int bitmap_io_size(unsigned int io_size, unsigned int opt_size,
212b1211978SJonathan Derrick 				   loff_t start, loff_t boundary)
2138745faa9SJon Derrick {
2148745faa9SJon Derrick 	if (io_size != opt_size &&
2158745faa9SJon Derrick 	    start + opt_size / SECTOR_SIZE <= boundary)
2168745faa9SJon Derrick 		return opt_size;
2178745faa9SJon Derrick 	if (start + io_size / SECTOR_SIZE <= boundary)
2188745faa9SJon Derrick 		return io_size;
2198745faa9SJon Derrick 
2208745faa9SJon Derrick 	/* Overflows boundary */
2218745faa9SJon Derrick 	return 0;
2228745faa9SJon Derrick }
2238745faa9SJon Derrick 
224328e17d8SJon Derrick static int __write_sb_page(struct md_rdev *rdev, struct bitmap *bitmap,
225d7038f95SChristoph Hellwig 			   unsigned long pg_index, struct page *page)
226935fe098SMike Snitzer {
227935fe098SMike Snitzer 	struct block_device *bdev;
228935fe098SMike Snitzer 	struct mddev *mddev = bitmap->mddev;
229935fe098SMike Snitzer 	struct bitmap_storage *store = &bitmap->storage;
230b1211978SJonathan Derrick 	loff_t sboff, offset = mddev->bitmap_info.offset;
231d7038f95SChristoph Hellwig 	sector_t ps = pg_index * PAGE_SIZE / SECTOR_SIZE;
23210172f20SJon Derrick 	unsigned int size = PAGE_SIZE;
2338745faa9SJon Derrick 	unsigned int opt_size = PAGE_SIZE;
234d7038f95SChristoph Hellwig 	sector_t doff;
235935fe098SMike Snitzer 
236935fe098SMike Snitzer 	bdev = (rdev->meta_bdev) ? rdev->meta_bdev : rdev->bdev;
237d7038f95SChristoph Hellwig 	if (pg_index == store->file_pages - 1) {
23810172f20SJon Derrick 		unsigned int last_page_size = store->bytes & (PAGE_SIZE - 1);
239328e17d8SJon Derrick 
240935fe098SMike Snitzer 		if (last_page_size == 0)
241935fe098SMike Snitzer 			last_page_size = PAGE_SIZE;
2428745faa9SJon Derrick 		size = roundup(last_page_size, bdev_logical_block_size(bdev));
2438745faa9SJon Derrick 		opt_size = optimal_io_size(bdev, last_page_size, size);
244935fe098SMike Snitzer 	}
245328e17d8SJon Derrick 
24610172f20SJon Derrick 	sboff = rdev->sb_start + offset;
24710172f20SJon Derrick 	doff = rdev->data_offset;
24810172f20SJon Derrick 
249328e17d8SJon Derrick 	/* Just make sure we aren't corrupting data or metadata */
250935fe098SMike Snitzer 	if (mddev->external) {
251935fe098SMike Snitzer 		/* Bitmap could be anywhere. */
25210172f20SJon Derrick 		if (sboff + ps > doff &&
25310172f20SJon Derrick 		    sboff < (doff + mddev->dev_sectors + PAGE_SIZE / SECTOR_SIZE))
254328e17d8SJon Derrick 			return -EINVAL;
255935fe098SMike Snitzer 	} else if (offset < 0) {
256935fe098SMike Snitzer 		/* DATA  BITMAP METADATA  */
2578745faa9SJon Derrick 		size = bitmap_io_size(size, opt_size, offset + ps, 0);
2588745faa9SJon Derrick 		if (size == 0)
259935fe098SMike Snitzer 			/* bitmap runs in to metadata */
260328e17d8SJon Derrick 			return -EINVAL;
261328e17d8SJon Derrick 
26210172f20SJon Derrick 		if (doff + mddev->dev_sectors > sboff)
263935fe098SMike Snitzer 			/* data runs in to bitmap */
264328e17d8SJon Derrick 			return -EINVAL;
265935fe098SMike Snitzer 	} else if (rdev->sb_start < rdev->data_offset) {
266935fe098SMike Snitzer 		/* METADATA BITMAP DATA */
2678745faa9SJon Derrick 		size = bitmap_io_size(size, opt_size, sboff + ps, doff);
2688745faa9SJon Derrick 		if (size == 0)
269935fe098SMike Snitzer 			/* bitmap runs in to data */
270328e17d8SJon Derrick 			return -EINVAL;
271935fe098SMike Snitzer 	} else {
272935fe098SMike Snitzer 		/* DATA METADATA BITMAP - no problems */
273935fe098SMike Snitzer 	}
274328e17d8SJon Derrick 
27510172f20SJon Derrick 	md_super_write(mddev, rdev, sboff + ps, (int) size, page);
276328e17d8SJon Derrick 	return 0;
277935fe098SMike Snitzer }
278935fe098SMike Snitzer 
279d7038f95SChristoph Hellwig static void write_sb_page(struct bitmap *bitmap, unsigned long pg_index,
280d7038f95SChristoph Hellwig 			  struct page *page, bool wait)
281328e17d8SJon Derrick {
282328e17d8SJon Derrick 	struct mddev *mddev = bitmap->mddev;
283935fe098SMike Snitzer 
284328e17d8SJon Derrick 	do {
28559cefee7SChristoph Hellwig 		struct md_rdev *rdev = NULL;
28659cefee7SChristoph Hellwig 
287328e17d8SJon Derrick 		while ((rdev = next_active_rdev(rdev, mddev)) != NULL) {
288d7038f95SChristoph Hellwig 			if (__write_sb_page(rdev, bitmap, pg_index, page) < 0) {
28959cefee7SChristoph Hellwig 				set_bit(BITMAP_WRITE_ERROR, &bitmap->flags);
29059cefee7SChristoph Hellwig 				return;
29159cefee7SChristoph Hellwig 			}
292328e17d8SJon Derrick 		}
293328e17d8SJon Derrick 	} while (wait && md_super_wait(mddev) < 0);
294935fe098SMike Snitzer }
295935fe098SMike Snitzer 
296e64e4018SAndy Shevchenko static void md_bitmap_file_kick(struct bitmap *bitmap);
2975339178eSChristoph Hellwig 
298a34d4ef8SChristoph Hellwig #ifdef CONFIG_MD_BITMAP_FILE
2995339178eSChristoph Hellwig static void write_file_page(struct bitmap *bitmap, struct page *page, int wait)
300935fe098SMike Snitzer {
3015339178eSChristoph Hellwig 	struct buffer_head *bh = page_buffers(page);
302935fe098SMike Snitzer 
303935fe098SMike Snitzer 	while (bh && bh->b_blocknr) {
304935fe098SMike Snitzer 		atomic_inc(&bitmap->pending_writes);
305935fe098SMike Snitzer 		set_buffer_locked(bh);
306935fe098SMike Snitzer 		set_buffer_mapped(bh);
3071420c4a5SBart Van Assche 		submit_bh(REQ_OP_WRITE | REQ_SYNC, bh);
308935fe098SMike Snitzer 		bh = bh->b_this_page;
309935fe098SMike Snitzer 	}
310935fe098SMike Snitzer 
311935fe098SMike Snitzer 	if (wait)
312935fe098SMike Snitzer 		wait_event(bitmap->write_wait,
313935fe098SMike Snitzer 			   atomic_read(&bitmap->pending_writes) == 0);
314935fe098SMike Snitzer }
315935fe098SMike Snitzer 
316935fe098SMike Snitzer static void end_bitmap_write(struct buffer_head *bh, int uptodate)
317935fe098SMike Snitzer {
318935fe098SMike Snitzer 	struct bitmap *bitmap = bh->b_private;
319935fe098SMike Snitzer 
320935fe098SMike Snitzer 	if (!uptodate)
321935fe098SMike Snitzer 		set_bit(BITMAP_WRITE_ERROR, &bitmap->flags);
322935fe098SMike Snitzer 	if (atomic_dec_and_test(&bitmap->pending_writes))
323935fe098SMike Snitzer 		wake_up(&bitmap->write_wait);
324935fe098SMike Snitzer }
325935fe098SMike Snitzer 
326935fe098SMike Snitzer static void free_buffers(struct page *page)
327935fe098SMike Snitzer {
328935fe098SMike Snitzer 	struct buffer_head *bh;
329935fe098SMike Snitzer 
330935fe098SMike Snitzer 	if (!PagePrivate(page))
331935fe098SMike Snitzer 		return;
332935fe098SMike Snitzer 
333935fe098SMike Snitzer 	bh = page_buffers(page);
334935fe098SMike Snitzer 	while (bh) {
335935fe098SMike Snitzer 		struct buffer_head *next = bh->b_this_page;
336935fe098SMike Snitzer 		free_buffer_head(bh);
337935fe098SMike Snitzer 		bh = next;
338935fe098SMike Snitzer 	}
339db2c1d86SGuoqing Jiang 	detach_page_private(page);
340935fe098SMike Snitzer 	put_page(page);
341935fe098SMike Snitzer }
342935fe098SMike Snitzer 
343935fe098SMike Snitzer /* read a page from a file.
344935fe098SMike Snitzer  * We both read the page, and attach buffers to the page to record the
345935fe098SMike Snitzer  * address of each block (using bmap).  These addresses will be used
346935fe098SMike Snitzer  * to write the block later, completely bypassing the filesystem.
347935fe098SMike Snitzer  * This usage is similar to how swap files are handled, and allows us
348935fe098SMike Snitzer  * to write to a file with no concerns of memory allocation failing.
349935fe098SMike Snitzer  */
350d681054cSChristoph Hellwig static int read_file_page(struct file *file, unsigned long index,
351d681054cSChristoph Hellwig 		struct bitmap *bitmap, unsigned long count, struct page *page)
352935fe098SMike Snitzer {
353935fe098SMike Snitzer 	int ret = 0;
354935fe098SMike Snitzer 	struct inode *inode = file_inode(file);
355935fe098SMike Snitzer 	struct buffer_head *bh;
35630460e1eSCarlos Maiolino 	sector_t block, blk_cur;
357313b825fSXianting Tian 	unsigned long blocksize = i_blocksize(inode);
358935fe098SMike Snitzer 
359935fe098SMike Snitzer 	pr_debug("read bitmap file (%dB @ %llu)\n", (int)PAGE_SIZE,
360935fe098SMike Snitzer 		 (unsigned long long)index << PAGE_SHIFT);
361935fe098SMike Snitzer 
362313b825fSXianting Tian 	bh = alloc_page_buffers(page, blocksize, false);
363935fe098SMike Snitzer 	if (!bh) {
364935fe098SMike Snitzer 		ret = -ENOMEM;
365935fe098SMike Snitzer 		goto out;
366935fe098SMike Snitzer 	}
367db2c1d86SGuoqing Jiang 	attach_page_private(page, bh);
36830460e1eSCarlos Maiolino 	blk_cur = index << (PAGE_SHIFT - inode->i_blkbits);
369935fe098SMike Snitzer 	while (bh) {
37030460e1eSCarlos Maiolino 		block = blk_cur;
37130460e1eSCarlos Maiolino 
372935fe098SMike Snitzer 		if (count == 0)
373935fe098SMike Snitzer 			bh->b_blocknr = 0;
374935fe098SMike Snitzer 		else {
37530460e1eSCarlos Maiolino 			ret = bmap(inode, &block);
37630460e1eSCarlos Maiolino 			if (ret || !block) {
377935fe098SMike Snitzer 				ret = -EINVAL;
37830460e1eSCarlos Maiolino 				bh->b_blocknr = 0;
379935fe098SMike Snitzer 				goto out;
380935fe098SMike Snitzer 			}
38130460e1eSCarlos Maiolino 
38230460e1eSCarlos Maiolino 			bh->b_blocknr = block;
383935fe098SMike Snitzer 			bh->b_bdev = inode->i_sb->s_bdev;
384313b825fSXianting Tian 			if (count < blocksize)
385935fe098SMike Snitzer 				count = 0;
386935fe098SMike Snitzer 			else
387313b825fSXianting Tian 				count -= blocksize;
388935fe098SMike Snitzer 
389935fe098SMike Snitzer 			bh->b_end_io = end_bitmap_write;
390935fe098SMike Snitzer 			bh->b_private = bitmap;
391935fe098SMike Snitzer 			atomic_inc(&bitmap->pending_writes);
392935fe098SMike Snitzer 			set_buffer_locked(bh);
393935fe098SMike Snitzer 			set_buffer_mapped(bh);
3941420c4a5SBart Van Assche 			submit_bh(REQ_OP_READ, bh);
395935fe098SMike Snitzer 		}
39630460e1eSCarlos Maiolino 		blk_cur++;
397935fe098SMike Snitzer 		bh = bh->b_this_page;
398935fe098SMike Snitzer 	}
399935fe098SMike Snitzer 
400935fe098SMike Snitzer 	wait_event(bitmap->write_wait,
401935fe098SMike Snitzer 		   atomic_read(&bitmap->pending_writes)==0);
402935fe098SMike Snitzer 	if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
403935fe098SMike Snitzer 		ret = -EIO;
404935fe098SMike Snitzer out:
405935fe098SMike Snitzer 	if (ret)
406935fe098SMike Snitzer 		pr_err("md: bitmap read error: (%dB @ %llu): %d\n",
407935fe098SMike Snitzer 		       (int)PAGE_SIZE,
408935fe098SMike Snitzer 		       (unsigned long long)index << PAGE_SHIFT,
409935fe098SMike Snitzer 		       ret);
410935fe098SMike Snitzer 	return ret;
411935fe098SMike Snitzer }
412a34d4ef8SChristoph Hellwig #else /* CONFIG_MD_BITMAP_FILE */
413a34d4ef8SChristoph Hellwig static void write_file_page(struct bitmap *bitmap, struct page *page, int wait)
414a34d4ef8SChristoph Hellwig {
415a34d4ef8SChristoph Hellwig }
416a34d4ef8SChristoph Hellwig static int read_file_page(struct file *file, unsigned long index,
417a34d4ef8SChristoph Hellwig 		struct bitmap *bitmap, unsigned long count, struct page *page)
418a34d4ef8SChristoph Hellwig {
419a34d4ef8SChristoph Hellwig 	return -EIO;
420a34d4ef8SChristoph Hellwig }
421a34d4ef8SChristoph Hellwig static void free_buffers(struct page *page)
422a34d4ef8SChristoph Hellwig {
423a34d4ef8SChristoph Hellwig 	put_page(page);
424a34d4ef8SChristoph Hellwig }
425a34d4ef8SChristoph Hellwig #endif /* CONFIG_MD_BITMAP_FILE */
426935fe098SMike Snitzer 
427935fe098SMike Snitzer /*
428935fe098SMike Snitzer  * bitmap file superblock operations
429935fe098SMike Snitzer  */
430935fe098SMike Snitzer 
431935fe098SMike Snitzer /*
4325339178eSChristoph Hellwig  * write out a page to a file
4335339178eSChristoph Hellwig  */
434d7038f95SChristoph Hellwig static void filemap_write_page(struct bitmap *bitmap, unsigned long pg_index,
435d7038f95SChristoph Hellwig 			       bool wait)
4365339178eSChristoph Hellwig {
437d7038f95SChristoph Hellwig 	struct bitmap_storage *store = &bitmap->storage;
438d7038f95SChristoph Hellwig 	struct page *page = store->filemap[pg_index];
439d7038f95SChristoph Hellwig 
440d7038f95SChristoph Hellwig 	if (mddev_is_clustered(bitmap->mddev)) {
441d7038f95SChristoph Hellwig 		pg_index += bitmap->cluster_slot *
442d7038f95SChristoph Hellwig 			DIV_ROUND_UP(store->bytes, PAGE_SIZE);
443d7038f95SChristoph Hellwig 	}
444d7038f95SChristoph Hellwig 
445d7038f95SChristoph Hellwig 	if (store->file)
4465339178eSChristoph Hellwig 		write_file_page(bitmap, page, wait);
4475339178eSChristoph Hellwig 	else
448d7038f95SChristoph Hellwig 		write_sb_page(bitmap, pg_index, page, wait);
4495339178eSChristoph Hellwig }
4505339178eSChristoph Hellwig 
4515339178eSChristoph Hellwig /*
452e64e4018SAndy Shevchenko  * md_bitmap_wait_writes() should be called before writing any bitmap
453935fe098SMike Snitzer  * blocks, to ensure previous writes, particularly from
454e64e4018SAndy Shevchenko  * md_bitmap_daemon_work(), have completed.
455935fe098SMike Snitzer  */
456e64e4018SAndy Shevchenko static void md_bitmap_wait_writes(struct bitmap *bitmap)
457935fe098SMike Snitzer {
458935fe098SMike Snitzer 	if (bitmap->storage.file)
459935fe098SMike Snitzer 		wait_event(bitmap->write_wait,
460935fe098SMike Snitzer 			   atomic_read(&bitmap->pending_writes)==0);
461935fe098SMike Snitzer 	else
462935fe098SMike Snitzer 		/* Note that we ignore the return value.  The writes
463935fe098SMike Snitzer 		 * might have failed, but that would just mean that
464935fe098SMike Snitzer 		 * some bits which should be cleared haven't been,
465935fe098SMike Snitzer 		 * which is safe.  The relevant bitmap blocks will
466935fe098SMike Snitzer 		 * probably get written again, but there is no great
467935fe098SMike Snitzer 		 * loss if they aren't.
468935fe098SMike Snitzer 		 */
469935fe098SMike Snitzer 		md_super_wait(bitmap->mddev);
470935fe098SMike Snitzer }
471935fe098SMike Snitzer 
472935fe098SMike Snitzer 
473935fe098SMike Snitzer /* update the event counter and sync the superblock to disk */
474e64e4018SAndy Shevchenko void md_bitmap_update_sb(struct bitmap *bitmap)
475935fe098SMike Snitzer {
476935fe098SMike Snitzer 	bitmap_super_t *sb;
477935fe098SMike Snitzer 
478935fe098SMike Snitzer 	if (!bitmap || !bitmap->mddev) /* no bitmap for this array */
479935fe098SMike Snitzer 		return;
480935fe098SMike Snitzer 	if (bitmap->mddev->bitmap_info.external)
481935fe098SMike Snitzer 		return;
482935fe098SMike Snitzer 	if (!bitmap->storage.sb_page) /* no superblock */
483935fe098SMike Snitzer 		return;
484935fe098SMike Snitzer 	sb = kmap_atomic(bitmap->storage.sb_page);
485935fe098SMike Snitzer 	sb->events = cpu_to_le64(bitmap->mddev->events);
486935fe098SMike Snitzer 	if (bitmap->mddev->events < bitmap->events_cleared)
487935fe098SMike Snitzer 		/* rocking back to read-only */
488935fe098SMike Snitzer 		bitmap->events_cleared = bitmap->mddev->events;
489935fe098SMike Snitzer 	sb->events_cleared = cpu_to_le64(bitmap->events_cleared);
49097f0eb9fSHou Tao 	/*
49197f0eb9fSHou Tao 	 * clear BITMAP_WRITE_ERROR bit to protect against the case that
49297f0eb9fSHou Tao 	 * a bitmap write error occurred but the later writes succeeded.
49397f0eb9fSHou Tao 	 */
49497f0eb9fSHou Tao 	sb->state = cpu_to_le32(bitmap->flags & ~BIT(BITMAP_WRITE_ERROR));
495935fe098SMike Snitzer 	/* Just in case these have been changed via sysfs: */
496935fe098SMike Snitzer 	sb->daemon_sleep = cpu_to_le32(bitmap->mddev->bitmap_info.daemon_sleep/HZ);
497935fe098SMike Snitzer 	sb->write_behind = cpu_to_le32(bitmap->mddev->bitmap_info.max_write_behind);
498935fe098SMike Snitzer 	/* This might have been changed by a reshape */
499935fe098SMike Snitzer 	sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);
500935fe098SMike Snitzer 	sb->chunksize = cpu_to_le32(bitmap->mddev->bitmap_info.chunksize);
501935fe098SMike Snitzer 	sb->nodes = cpu_to_le32(bitmap->mddev->bitmap_info.nodes);
502935fe098SMike Snitzer 	sb->sectors_reserved = cpu_to_le32(bitmap->mddev->
503935fe098SMike Snitzer 					   bitmap_info.space);
504935fe098SMike Snitzer 	kunmap_atomic(sb);
505d7038f95SChristoph Hellwig 
506d7038f95SChristoph Hellwig 	if (bitmap->storage.file)
507d7038f95SChristoph Hellwig 		write_file_page(bitmap, bitmap->storage.sb_page, 1);
508d7038f95SChristoph Hellwig 	else
509d7038f95SChristoph Hellwig 		write_sb_page(bitmap, bitmap->storage.sb_index,
510d7038f95SChristoph Hellwig 			      bitmap->storage.sb_page, 1);
511935fe098SMike Snitzer }
512e64e4018SAndy Shevchenko EXPORT_SYMBOL(md_bitmap_update_sb);
513935fe098SMike Snitzer 
514935fe098SMike Snitzer /* print out the bitmap file superblock */
515e64e4018SAndy Shevchenko void md_bitmap_print_sb(struct bitmap *bitmap)
516935fe098SMike Snitzer {
517935fe098SMike Snitzer 	bitmap_super_t *sb;
518935fe098SMike Snitzer 
519935fe098SMike Snitzer 	if (!bitmap || !bitmap->storage.sb_page)
520935fe098SMike Snitzer 		return;
521935fe098SMike Snitzer 	sb = kmap_atomic(bitmap->storage.sb_page);
522935fe098SMike Snitzer 	pr_debug("%s: bitmap file superblock:\n", bmname(bitmap));
523935fe098SMike Snitzer 	pr_debug("         magic: %08x\n", le32_to_cpu(sb->magic));
52445552111SFlorian-Ewald Mueller 	pr_debug("       version: %u\n", le32_to_cpu(sb->version));
525935fe098SMike Snitzer 	pr_debug("          uuid: %08x.%08x.%08x.%08x\n",
526c35403f8SChristoph Hellwig 		 le32_to_cpu(*(__le32 *)(sb->uuid+0)),
527c35403f8SChristoph Hellwig 		 le32_to_cpu(*(__le32 *)(sb->uuid+4)),
528c35403f8SChristoph Hellwig 		 le32_to_cpu(*(__le32 *)(sb->uuid+8)),
529c35403f8SChristoph Hellwig 		 le32_to_cpu(*(__le32 *)(sb->uuid+12)));
530935fe098SMike Snitzer 	pr_debug("        events: %llu\n",
531935fe098SMike Snitzer 		 (unsigned long long) le64_to_cpu(sb->events));
532935fe098SMike Snitzer 	pr_debug("events cleared: %llu\n",
533935fe098SMike Snitzer 		 (unsigned long long) le64_to_cpu(sb->events_cleared));
534935fe098SMike Snitzer 	pr_debug("         state: %08x\n", le32_to_cpu(sb->state));
53545552111SFlorian-Ewald Mueller 	pr_debug("     chunksize: %u B\n", le32_to_cpu(sb->chunksize));
53645552111SFlorian-Ewald Mueller 	pr_debug("  daemon sleep: %us\n", le32_to_cpu(sb->daemon_sleep));
537935fe098SMike Snitzer 	pr_debug("     sync size: %llu KB\n",
538935fe098SMike Snitzer 		 (unsigned long long)le64_to_cpu(sb->sync_size)/2);
53945552111SFlorian-Ewald Mueller 	pr_debug("max write behind: %u\n", le32_to_cpu(sb->write_behind));
540935fe098SMike Snitzer 	kunmap_atomic(sb);
541935fe098SMike Snitzer }
542935fe098SMike Snitzer 
543935fe098SMike Snitzer /*
544935fe098SMike Snitzer  * bitmap_new_disk_sb
545935fe098SMike Snitzer  * @bitmap
546935fe098SMike Snitzer  *
547935fe098SMike Snitzer  * This function is somewhat the reverse of bitmap_read_sb.  bitmap_read_sb
548935fe098SMike Snitzer  * reads and verifies the on-disk bitmap superblock and populates bitmap_info.
549935fe098SMike Snitzer  * This function verifies 'bitmap_info' and populates the on-disk bitmap
550935fe098SMike Snitzer  * structure, which is to be written to disk.
551935fe098SMike Snitzer  *
552935fe098SMike Snitzer  * Returns: 0 on success, -Exxx on error
553935fe098SMike Snitzer  */
554e64e4018SAndy Shevchenko static int md_bitmap_new_disk_sb(struct bitmap *bitmap)
555935fe098SMike Snitzer {
556935fe098SMike Snitzer 	bitmap_super_t *sb;
557935fe098SMike Snitzer 	unsigned long chunksize, daemon_sleep, write_behind;
558935fe098SMike Snitzer 
559935fe098SMike Snitzer 	bitmap->storage.sb_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
560935fe098SMike Snitzer 	if (bitmap->storage.sb_page == NULL)
561935fe098SMike Snitzer 		return -ENOMEM;
562d7038f95SChristoph Hellwig 	bitmap->storage.sb_index = 0;
563935fe098SMike Snitzer 
564935fe098SMike Snitzer 	sb = kmap_atomic(bitmap->storage.sb_page);
565935fe098SMike Snitzer 
566935fe098SMike Snitzer 	sb->magic = cpu_to_le32(BITMAP_MAGIC);
567935fe098SMike Snitzer 	sb->version = cpu_to_le32(BITMAP_MAJOR_HI);
568935fe098SMike Snitzer 
569935fe098SMike Snitzer 	chunksize = bitmap->mddev->bitmap_info.chunksize;
570935fe098SMike Snitzer 	BUG_ON(!chunksize);
571935fe098SMike Snitzer 	if (!is_power_of_2(chunksize)) {
572935fe098SMike Snitzer 		kunmap_atomic(sb);
573935fe098SMike Snitzer 		pr_warn("bitmap chunksize not a power of 2\n");
574935fe098SMike Snitzer 		return -EINVAL;
575935fe098SMike Snitzer 	}
576935fe098SMike Snitzer 	sb->chunksize = cpu_to_le32(chunksize);
577935fe098SMike Snitzer 
578935fe098SMike Snitzer 	daemon_sleep = bitmap->mddev->bitmap_info.daemon_sleep;
579935fe098SMike Snitzer 	if (!daemon_sleep || (daemon_sleep > MAX_SCHEDULE_TIMEOUT)) {
580935fe098SMike Snitzer 		pr_debug("Choosing daemon_sleep default (5 sec)\n");
581935fe098SMike Snitzer 		daemon_sleep = 5 * HZ;
582935fe098SMike Snitzer 	}
583935fe098SMike Snitzer 	sb->daemon_sleep = cpu_to_le32(daemon_sleep);
584935fe098SMike Snitzer 	bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep;
585935fe098SMike Snitzer 
586935fe098SMike Snitzer 	/*
587935fe098SMike Snitzer 	 * FIXME: write_behind for RAID1.  If not specified, what
588935fe098SMike Snitzer 	 * is a good choice?  We choose COUNTER_MAX / 2 arbitrarily.
589935fe098SMike Snitzer 	 */
590935fe098SMike Snitzer 	write_behind = bitmap->mddev->bitmap_info.max_write_behind;
591935fe098SMike Snitzer 	if (write_behind > COUNTER_MAX)
592935fe098SMike Snitzer 		write_behind = COUNTER_MAX / 2;
593935fe098SMike Snitzer 	sb->write_behind = cpu_to_le32(write_behind);
594935fe098SMike Snitzer 	bitmap->mddev->bitmap_info.max_write_behind = write_behind;
595935fe098SMike Snitzer 
596935fe098SMike Snitzer 	/* keep the array size field of the bitmap superblock up to date */
597935fe098SMike Snitzer 	sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);
598935fe098SMike Snitzer 
599935fe098SMike Snitzer 	memcpy(sb->uuid, bitmap->mddev->uuid, 16);
600935fe098SMike Snitzer 
601935fe098SMike Snitzer 	set_bit(BITMAP_STALE, &bitmap->flags);
602935fe098SMike Snitzer 	sb->state = cpu_to_le32(bitmap->flags);
603935fe098SMike Snitzer 	bitmap->events_cleared = bitmap->mddev->events;
604935fe098SMike Snitzer 	sb->events_cleared = cpu_to_le64(bitmap->mddev->events);
605935fe098SMike Snitzer 	bitmap->mddev->bitmap_info.nodes = 0;
606935fe098SMike Snitzer 
607935fe098SMike Snitzer 	kunmap_atomic(sb);
608935fe098SMike Snitzer 
609935fe098SMike Snitzer 	return 0;
610935fe098SMike Snitzer }
611935fe098SMike Snitzer 
612935fe098SMike Snitzer /* read the superblock from the bitmap file and initialize some bitmap fields */
613e64e4018SAndy Shevchenko static int md_bitmap_read_sb(struct bitmap *bitmap)
614935fe098SMike Snitzer {
615935fe098SMike Snitzer 	char *reason = NULL;
616935fe098SMike Snitzer 	bitmap_super_t *sb;
617935fe098SMike Snitzer 	unsigned long chunksize, daemon_sleep, write_behind;
618935fe098SMike Snitzer 	unsigned long long events;
619935fe098SMike Snitzer 	int nodes = 0;
620935fe098SMike Snitzer 	unsigned long sectors_reserved = 0;
621935fe098SMike Snitzer 	int err = -EINVAL;
622935fe098SMike Snitzer 	struct page *sb_page;
623f5f2d5acSChristoph Hellwig 	loff_t offset = 0;
624935fe098SMike Snitzer 
625935fe098SMike Snitzer 	if (!bitmap->storage.file && !bitmap->mddev->bitmap_info.offset) {
626935fe098SMike Snitzer 		chunksize = 128 * 1024 * 1024;
627935fe098SMike Snitzer 		daemon_sleep = 5 * HZ;
628935fe098SMike Snitzer 		write_behind = 0;
629935fe098SMike Snitzer 		set_bit(BITMAP_STALE, &bitmap->flags);
630935fe098SMike Snitzer 		err = 0;
631935fe098SMike Snitzer 		goto out_no_sb;
632935fe098SMike Snitzer 	}
633935fe098SMike Snitzer 	/* page 0 is the superblock, read it... */
634935fe098SMike Snitzer 	sb_page = alloc_page(GFP_KERNEL);
635935fe098SMike Snitzer 	if (!sb_page)
636935fe098SMike Snitzer 		return -ENOMEM;
637935fe098SMike Snitzer 	bitmap->storage.sb_page = sb_page;
638935fe098SMike Snitzer 
639935fe098SMike Snitzer re_read:
640935fe098SMike Snitzer 	/* If cluster_slot is set, the cluster is setup */
641935fe098SMike Snitzer 	if (bitmap->cluster_slot >= 0) {
642935fe098SMike Snitzer 		sector_t bm_blocks = bitmap->mddev->resync_max_sectors;
643935fe098SMike Snitzer 
644a913096dSZhao Heming 		bm_blocks = DIV_ROUND_UP_SECTOR_T(bm_blocks,
645a913096dSZhao Heming 			   (bitmap->mddev->bitmap_info.chunksize >> 9));
646935fe098SMike Snitzer 		/* bits to bytes */
647935fe098SMike Snitzer 		bm_blocks = ((bm_blocks+7) >> 3) + sizeof(bitmap_super_t);
648935fe098SMike Snitzer 		/* to 4k blocks */
649935fe098SMike Snitzer 		bm_blocks = DIV_ROUND_UP_SECTOR_T(bm_blocks, 4096);
650f5f2d5acSChristoph Hellwig 		offset = bitmap->cluster_slot * (bm_blocks << 3);
651935fe098SMike Snitzer 		pr_debug("%s:%d bm slot: %d offset: %llu\n", __func__, __LINE__,
652935fe098SMike Snitzer 			bitmap->cluster_slot, offset);
653935fe098SMike Snitzer 	}
654935fe098SMike Snitzer 
655935fe098SMike Snitzer 	if (bitmap->storage.file) {
656935fe098SMike Snitzer 		loff_t isize = i_size_read(bitmap->storage.file->f_mapping->host);
657935fe098SMike Snitzer 		int bytes = isize > PAGE_SIZE ? PAGE_SIZE : isize;
658935fe098SMike Snitzer 
659d681054cSChristoph Hellwig 		err = read_file_page(bitmap->storage.file, 0,
660935fe098SMike Snitzer 				bitmap, bytes, sb_page);
661935fe098SMike Snitzer 	} else {
662f5f2d5acSChristoph Hellwig 		err = read_sb_page(bitmap->mddev, offset, sb_page, 0,
663f5f2d5acSChristoph Hellwig 				   sizeof(bitmap_super_t));
664935fe098SMike Snitzer 	}
665935fe098SMike Snitzer 	if (err)
666935fe098SMike Snitzer 		return err;
667935fe098SMike Snitzer 
668935fe098SMike Snitzer 	err = -EINVAL;
669935fe098SMike Snitzer 	sb = kmap_atomic(sb_page);
670935fe098SMike Snitzer 
671935fe098SMike Snitzer 	chunksize = le32_to_cpu(sb->chunksize);
672935fe098SMike Snitzer 	daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ;
673935fe098SMike Snitzer 	write_behind = le32_to_cpu(sb->write_behind);
674935fe098SMike Snitzer 	sectors_reserved = le32_to_cpu(sb->sectors_reserved);
675935fe098SMike Snitzer 
676935fe098SMike Snitzer 	/* verify that the bitmap-specific fields are valid */
677935fe098SMike Snitzer 	if (sb->magic != cpu_to_le32(BITMAP_MAGIC))
678935fe098SMike Snitzer 		reason = "bad magic";
679935fe098SMike Snitzer 	else if (le32_to_cpu(sb->version) < BITMAP_MAJOR_LO ||
680935fe098SMike Snitzer 		 le32_to_cpu(sb->version) > BITMAP_MAJOR_CLUSTERED)
681935fe098SMike Snitzer 		reason = "unrecognized superblock version";
682935fe098SMike Snitzer 	else if (chunksize < 512)
683935fe098SMike Snitzer 		reason = "bitmap chunksize too small";
684935fe098SMike Snitzer 	else if (!is_power_of_2(chunksize))
685935fe098SMike Snitzer 		reason = "bitmap chunksize not a power of 2";
686935fe098SMike Snitzer 	else if (daemon_sleep < 1 || daemon_sleep > MAX_SCHEDULE_TIMEOUT)
687935fe098SMike Snitzer 		reason = "daemon sleep period out of range";
688935fe098SMike Snitzer 	else if (write_behind > COUNTER_MAX)
689935fe098SMike Snitzer 		reason = "write-behind limit out of range (0 - 16383)";
690935fe098SMike Snitzer 	if (reason) {
691935fe098SMike Snitzer 		pr_warn("%s: invalid bitmap file superblock: %s\n",
692935fe098SMike Snitzer 			bmname(bitmap), reason);
693935fe098SMike Snitzer 		goto out;
694935fe098SMike Snitzer 	}
695935fe098SMike Snitzer 
696e68cb83aSHeming Zhao 	/*
697e68cb83aSHeming Zhao 	 * Setup nodes/clustername only if bitmap version is
698e68cb83aSHeming Zhao 	 * cluster-compatible
699e68cb83aSHeming Zhao 	 */
700e68cb83aSHeming Zhao 	if (sb->version == cpu_to_le32(BITMAP_MAJOR_CLUSTERED)) {
701e68cb83aSHeming Zhao 		nodes = le32_to_cpu(sb->nodes);
70292d9aac9SHeming Zhao 		strscpy(bitmap->mddev->bitmap_info.cluster_name,
703e68cb83aSHeming Zhao 				sb->cluster_name, 64);
704e68cb83aSHeming Zhao 	}
705e68cb83aSHeming Zhao 
706935fe098SMike Snitzer 	/* keep the array size field of the bitmap superblock up to date */
707935fe098SMike Snitzer 	sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);
708935fe098SMike Snitzer 
709935fe098SMike Snitzer 	if (bitmap->mddev->persistent) {
710935fe098SMike Snitzer 		/*
711935fe098SMike Snitzer 		 * We have a persistent array superblock, so compare the
712935fe098SMike Snitzer 		 * bitmap's UUID and event counter to the mddev's
713935fe098SMike Snitzer 		 */
714935fe098SMike Snitzer 		if (memcmp(sb->uuid, bitmap->mddev->uuid, 16)) {
715935fe098SMike Snitzer 			pr_warn("%s: bitmap superblock UUID mismatch\n",
716935fe098SMike Snitzer 				bmname(bitmap));
717935fe098SMike Snitzer 			goto out;
718935fe098SMike Snitzer 		}
719935fe098SMike Snitzer 		events = le64_to_cpu(sb->events);
720935fe098SMike Snitzer 		if (!nodes && (events < bitmap->mddev->events)) {
721935fe098SMike Snitzer 			pr_warn("%s: bitmap file is out of date (%llu < %llu) -- forcing full recovery\n",
722935fe098SMike Snitzer 				bmname(bitmap), events,
723935fe098SMike Snitzer 				(unsigned long long) bitmap->mddev->events);
724935fe098SMike Snitzer 			set_bit(BITMAP_STALE, &bitmap->flags);
725935fe098SMike Snitzer 		}
726935fe098SMike Snitzer 	}
727935fe098SMike Snitzer 
728935fe098SMike Snitzer 	/* assign fields using values from superblock */
729935fe098SMike Snitzer 	bitmap->flags |= le32_to_cpu(sb->state);
730935fe098SMike Snitzer 	if (le32_to_cpu(sb->version) == BITMAP_MAJOR_HOSTENDIAN)
731935fe098SMike Snitzer 		set_bit(BITMAP_HOSTENDIAN, &bitmap->flags);
732935fe098SMike Snitzer 	bitmap->events_cleared = le64_to_cpu(sb->events_cleared);
733935fe098SMike Snitzer 	err = 0;
734935fe098SMike Snitzer 
735935fe098SMike Snitzer out:
736935fe098SMike Snitzer 	kunmap_atomic(sb);
737e68cb83aSHeming Zhao 	if (err == 0 && nodes && (bitmap->cluster_slot < 0)) {
738935fe098SMike Snitzer 		/* Assigning chunksize is required for "re_read" */
739935fe098SMike Snitzer 		bitmap->mddev->bitmap_info.chunksize = chunksize;
740935fe098SMike Snitzer 		err = md_setup_cluster(bitmap->mddev, nodes);
741935fe098SMike Snitzer 		if (err) {
742935fe098SMike Snitzer 			pr_warn("%s: Could not setup cluster service (%d)\n",
743935fe098SMike Snitzer 				bmname(bitmap), err);
744935fe098SMike Snitzer 			goto out_no_sb;
745935fe098SMike Snitzer 		}
746935fe098SMike Snitzer 		bitmap->cluster_slot = md_cluster_ops->slot_number(bitmap->mddev);
747935fe098SMike Snitzer 		goto re_read;
748935fe098SMike Snitzer 	}
749935fe098SMike Snitzer 
750935fe098SMike Snitzer out_no_sb:
751e68cb83aSHeming Zhao 	if (err == 0) {
752935fe098SMike Snitzer 		if (test_bit(BITMAP_STALE, &bitmap->flags))
753935fe098SMike Snitzer 			bitmap->events_cleared = bitmap->mddev->events;
754935fe098SMike Snitzer 		bitmap->mddev->bitmap_info.chunksize = chunksize;
755935fe098SMike Snitzer 		bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep;
756935fe098SMike Snitzer 		bitmap->mddev->bitmap_info.max_write_behind = write_behind;
757935fe098SMike Snitzer 		bitmap->mddev->bitmap_info.nodes = nodes;
758935fe098SMike Snitzer 		if (bitmap->mddev->bitmap_info.space == 0 ||
759935fe098SMike Snitzer 			bitmap->mddev->bitmap_info.space > sectors_reserved)
760935fe098SMike Snitzer 			bitmap->mddev->bitmap_info.space = sectors_reserved;
761e68cb83aSHeming Zhao 	} else {
762e64e4018SAndy Shevchenko 		md_bitmap_print_sb(bitmap);
763935fe098SMike Snitzer 		if (bitmap->cluster_slot < 0)
764935fe098SMike Snitzer 			md_cluster_stop(bitmap->mddev);
765935fe098SMike Snitzer 	}
766935fe098SMike Snitzer 	return err;
767935fe098SMike Snitzer }
768935fe098SMike Snitzer 
769935fe098SMike Snitzer /*
770935fe098SMike Snitzer  * general bitmap file operations
771935fe098SMike Snitzer  */
772935fe098SMike Snitzer 
773935fe098SMike Snitzer /*
774935fe098SMike Snitzer  * on-disk bitmap:
775935fe098SMike Snitzer  *
776935fe098SMike Snitzer  * Use one bit per "chunk" (block set). We do the disk I/O on the bitmap
777935fe098SMike Snitzer  * file a page at a time. There's a superblock at the start of the file.
778935fe098SMike Snitzer  */
779935fe098SMike Snitzer /* calculate the index of the page that contains this bit */
780935fe098SMike Snitzer static inline unsigned long file_page_index(struct bitmap_storage *store,
781935fe098SMike Snitzer 					    unsigned long chunk)
782935fe098SMike Snitzer {
783935fe098SMike Snitzer 	if (store->sb_page)
784935fe098SMike Snitzer 		chunk += sizeof(bitmap_super_t) << 3;
785935fe098SMike Snitzer 	return chunk >> PAGE_BIT_SHIFT;
786935fe098SMike Snitzer }
787935fe098SMike Snitzer 
788935fe098SMike Snitzer /* calculate the (bit) offset of this bit within a page */
789935fe098SMike Snitzer static inline unsigned long file_page_offset(struct bitmap_storage *store,
790935fe098SMike Snitzer 					     unsigned long chunk)
791935fe098SMike Snitzer {
792935fe098SMike Snitzer 	if (store->sb_page)
793935fe098SMike Snitzer 		chunk += sizeof(bitmap_super_t) << 3;
794935fe098SMike Snitzer 	return chunk & (PAGE_BITS - 1);
795935fe098SMike Snitzer }
796935fe098SMike Snitzer 
797935fe098SMike Snitzer /*
798935fe098SMike Snitzer  * return a pointer to the page in the filemap that contains the given bit
799935fe098SMike Snitzer  *
800935fe098SMike Snitzer  */
801935fe098SMike Snitzer static inline struct page *filemap_get_page(struct bitmap_storage *store,
802935fe098SMike Snitzer 					    unsigned long chunk)
803935fe098SMike Snitzer {
804935fe098SMike Snitzer 	if (file_page_index(store, chunk) >= store->file_pages)
805935fe098SMike Snitzer 		return NULL;
806935fe098SMike Snitzer 	return store->filemap[file_page_index(store, chunk)];
807935fe098SMike Snitzer }
808935fe098SMike Snitzer 
809e64e4018SAndy Shevchenko static int md_bitmap_storage_alloc(struct bitmap_storage *store,
810935fe098SMike Snitzer 				   unsigned long chunks, int with_super,
811935fe098SMike Snitzer 				   int slot_number)
812935fe098SMike Snitzer {
813935fe098SMike Snitzer 	int pnum, offset = 0;
814935fe098SMike Snitzer 	unsigned long num_pages;
815935fe098SMike Snitzer 	unsigned long bytes;
816935fe098SMike Snitzer 
817935fe098SMike Snitzer 	bytes = DIV_ROUND_UP(chunks, 8);
818935fe098SMike Snitzer 	if (with_super)
819935fe098SMike Snitzer 		bytes += sizeof(bitmap_super_t);
820935fe098SMike Snitzer 
821935fe098SMike Snitzer 	num_pages = DIV_ROUND_UP(bytes, PAGE_SIZE);
822935fe098SMike Snitzer 	offset = slot_number * num_pages;
823935fe098SMike Snitzer 
8246da2ec56SKees Cook 	store->filemap = kmalloc_array(num_pages, sizeof(struct page *),
8256da2ec56SKees Cook 				       GFP_KERNEL);
826935fe098SMike Snitzer 	if (!store->filemap)
827935fe098SMike Snitzer 		return -ENOMEM;
828935fe098SMike Snitzer 
829935fe098SMike Snitzer 	if (with_super && !store->sb_page) {
830935fe098SMike Snitzer 		store->sb_page = alloc_page(GFP_KERNEL|__GFP_ZERO);
831935fe098SMike Snitzer 		if (store->sb_page == NULL)
832935fe098SMike Snitzer 			return -ENOMEM;
833935fe098SMike Snitzer 	}
834935fe098SMike Snitzer 
835935fe098SMike Snitzer 	pnum = 0;
836935fe098SMike Snitzer 	if (store->sb_page) {
837935fe098SMike Snitzer 		store->filemap[0] = store->sb_page;
838935fe098SMike Snitzer 		pnum = 1;
839d7038f95SChristoph Hellwig 		store->sb_index = offset;
840935fe098SMike Snitzer 	}
841935fe098SMike Snitzer 
842935fe098SMike Snitzer 	for ( ; pnum < num_pages; pnum++) {
843935fe098SMike Snitzer 		store->filemap[pnum] = alloc_page(GFP_KERNEL|__GFP_ZERO);
844935fe098SMike Snitzer 		if (!store->filemap[pnum]) {
845935fe098SMike Snitzer 			store->file_pages = pnum;
846935fe098SMike Snitzer 			return -ENOMEM;
847935fe098SMike Snitzer 		}
848935fe098SMike Snitzer 	}
849935fe098SMike Snitzer 	store->file_pages = pnum;
850935fe098SMike Snitzer 
851935fe098SMike Snitzer 	/* We need 4 bits per page, rounded up to a multiple
852935fe098SMike Snitzer 	 * of sizeof(unsigned long) */
853935fe098SMike Snitzer 	store->filemap_attr = kzalloc(
854935fe098SMike Snitzer 		roundup(DIV_ROUND_UP(num_pages*4, 8), sizeof(unsigned long)),
855935fe098SMike Snitzer 		GFP_KERNEL);
856935fe098SMike Snitzer 	if (!store->filemap_attr)
857935fe098SMike Snitzer 		return -ENOMEM;
858935fe098SMike Snitzer 
859935fe098SMike Snitzer 	store->bytes = bytes;
860935fe098SMike Snitzer 
861935fe098SMike Snitzer 	return 0;
862935fe098SMike Snitzer }
863935fe098SMike Snitzer 
864e64e4018SAndy Shevchenko static void md_bitmap_file_unmap(struct bitmap_storage *store)
865935fe098SMike Snitzer {
866546ac0b2SChristoph Hellwig 	struct file *file = store->file;
867546ac0b2SChristoph Hellwig 	struct page *sb_page = store->sb_page;
868546ac0b2SChristoph Hellwig 	struct page **map = store->filemap;
869546ac0b2SChristoph Hellwig 	int pages = store->file_pages;
870935fe098SMike Snitzer 
871935fe098SMike Snitzer 	while (pages--)
872935fe098SMike Snitzer 		if (map[pages] != sb_page) /* 0 is sb_page, release it below */
873935fe098SMike Snitzer 			free_buffers(map[pages]);
874935fe098SMike Snitzer 	kfree(map);
875935fe098SMike Snitzer 	kfree(store->filemap_attr);
876935fe098SMike Snitzer 
877935fe098SMike Snitzer 	if (sb_page)
878935fe098SMike Snitzer 		free_buffers(sb_page);
879935fe098SMike Snitzer 
880935fe098SMike Snitzer 	if (file) {
881935fe098SMike Snitzer 		struct inode *inode = file_inode(file);
882935fe098SMike Snitzer 		invalidate_mapping_pages(inode->i_mapping, 0, -1);
883935fe098SMike Snitzer 		fput(file);
884935fe098SMike Snitzer 	}
885935fe098SMike Snitzer }
886935fe098SMike Snitzer 
887935fe098SMike Snitzer /*
888935fe098SMike Snitzer  * bitmap_file_kick - if an error occurs while manipulating the bitmap file
889935fe098SMike Snitzer  * then it is no longer reliable, so we stop using it and we mark the file
890935fe098SMike Snitzer  * as failed in the superblock
891935fe098SMike Snitzer  */
892e64e4018SAndy Shevchenko static void md_bitmap_file_kick(struct bitmap *bitmap)
893935fe098SMike Snitzer {
894935fe098SMike Snitzer 	if (!test_and_set_bit(BITMAP_STALE, &bitmap->flags)) {
895e64e4018SAndy Shevchenko 		md_bitmap_update_sb(bitmap);
896935fe098SMike Snitzer 
897935fe098SMike Snitzer 		if (bitmap->storage.file) {
89892348518SChristoph Hellwig 			pr_warn("%s: kicking failed bitmap file %pD4 from array!\n",
89992348518SChristoph Hellwig 				bmname(bitmap), bitmap->storage.file);
900935fe098SMike Snitzer 
901935fe098SMike Snitzer 		} else
902935fe098SMike Snitzer 			pr_warn("%s: disabling internal bitmap due to errors\n",
903935fe098SMike Snitzer 				bmname(bitmap));
904935fe098SMike Snitzer 	}
905935fe098SMike Snitzer }
906935fe098SMike Snitzer 
907935fe098SMike Snitzer enum bitmap_page_attr {
908935fe098SMike Snitzer 	BITMAP_PAGE_DIRTY = 0,     /* there are set bits that need to be synced */
909935fe098SMike Snitzer 	BITMAP_PAGE_PENDING = 1,   /* there are bits that are being cleaned.
910935fe098SMike Snitzer 				    * i.e. counter is 1 or 2. */
911935fe098SMike Snitzer 	BITMAP_PAGE_NEEDWRITE = 2, /* there are cleared bits that need to be synced */
912935fe098SMike Snitzer };
913935fe098SMike Snitzer 
914935fe098SMike Snitzer static inline void set_page_attr(struct bitmap *bitmap, int pnum,
915935fe098SMike Snitzer 				 enum bitmap_page_attr attr)
916935fe098SMike Snitzer {
917935fe098SMike Snitzer 	set_bit((pnum<<2) + attr, bitmap->storage.filemap_attr);
918935fe098SMike Snitzer }
919935fe098SMike Snitzer 
920935fe098SMike Snitzer static inline void clear_page_attr(struct bitmap *bitmap, int pnum,
921935fe098SMike Snitzer 				   enum bitmap_page_attr attr)
922935fe098SMike Snitzer {
923935fe098SMike Snitzer 	clear_bit((pnum<<2) + attr, bitmap->storage.filemap_attr);
924935fe098SMike Snitzer }
925935fe098SMike Snitzer 
926935fe098SMike Snitzer static inline int test_page_attr(struct bitmap *bitmap, int pnum,
927935fe098SMike Snitzer 				 enum bitmap_page_attr attr)
928935fe098SMike Snitzer {
929935fe098SMike Snitzer 	return test_bit((pnum<<2) + attr, bitmap->storage.filemap_attr);
930935fe098SMike Snitzer }
931935fe098SMike Snitzer 
932935fe098SMike Snitzer static inline int test_and_clear_page_attr(struct bitmap *bitmap, int pnum,
933935fe098SMike Snitzer 					   enum bitmap_page_attr attr)
934935fe098SMike Snitzer {
935935fe098SMike Snitzer 	return test_and_clear_bit((pnum<<2) + attr,
936935fe098SMike Snitzer 				  bitmap->storage.filemap_attr);
937935fe098SMike Snitzer }
938935fe098SMike Snitzer /*
939935fe098SMike Snitzer  * bitmap_file_set_bit -- called before performing a write to the md device
940935fe098SMike Snitzer  * to set (and eventually sync) a particular bit in the bitmap file
941935fe098SMike Snitzer  *
942935fe098SMike Snitzer  * we set the bit immediately, then we record the page number so that
943935fe098SMike Snitzer  * when an unplug occurs, we can flush the dirty pages out to disk
944935fe098SMike Snitzer  */
945e64e4018SAndy Shevchenko static void md_bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
946935fe098SMike Snitzer {
947935fe098SMike Snitzer 	unsigned long bit;
948935fe098SMike Snitzer 	struct page *page;
949935fe098SMike Snitzer 	void *kaddr;
950935fe098SMike Snitzer 	unsigned long chunk = block >> bitmap->counts.chunkshift;
951935fe098SMike Snitzer 	struct bitmap_storage *store = &bitmap->storage;
952d7038f95SChristoph Hellwig 	unsigned long index = file_page_index(store, chunk);
953935fe098SMike Snitzer 	unsigned long node_offset = 0;
954935fe098SMike Snitzer 
955935fe098SMike Snitzer 	if (mddev_is_clustered(bitmap->mddev))
956935fe098SMike Snitzer 		node_offset = bitmap->cluster_slot * store->file_pages;
957935fe098SMike Snitzer 
958935fe098SMike Snitzer 	page = filemap_get_page(&bitmap->storage, chunk);
959935fe098SMike Snitzer 	if (!page)
960935fe098SMike Snitzer 		return;
961935fe098SMike Snitzer 	bit = file_page_offset(&bitmap->storage, chunk);
962935fe098SMike Snitzer 
963935fe098SMike Snitzer 	/* set the bit */
964935fe098SMike Snitzer 	kaddr = kmap_atomic(page);
965935fe098SMike Snitzer 	if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
966935fe098SMike Snitzer 		set_bit(bit, kaddr);
967935fe098SMike Snitzer 	else
968935fe098SMike Snitzer 		set_bit_le(bit, kaddr);
969935fe098SMike Snitzer 	kunmap_atomic(kaddr);
970d7038f95SChristoph Hellwig 	pr_debug("set file bit %lu page %lu\n", bit, index);
971935fe098SMike Snitzer 	/* record page number so it gets flushed to disk when unplug occurs */
972d7038f95SChristoph Hellwig 	set_page_attr(bitmap, index - node_offset, BITMAP_PAGE_DIRTY);
973935fe098SMike Snitzer }
974935fe098SMike Snitzer 
975e64e4018SAndy Shevchenko static void md_bitmap_file_clear_bit(struct bitmap *bitmap, sector_t block)
976935fe098SMike Snitzer {
977935fe098SMike Snitzer 	unsigned long bit;
978935fe098SMike Snitzer 	struct page *page;
979935fe098SMike Snitzer 	void *paddr;
980935fe098SMike Snitzer 	unsigned long chunk = block >> bitmap->counts.chunkshift;
981935fe098SMike Snitzer 	struct bitmap_storage *store = &bitmap->storage;
982d7038f95SChristoph Hellwig 	unsigned long index = file_page_index(store, chunk);
983935fe098SMike Snitzer 	unsigned long node_offset = 0;
984935fe098SMike Snitzer 
985935fe098SMike Snitzer 	if (mddev_is_clustered(bitmap->mddev))
986935fe098SMike Snitzer 		node_offset = bitmap->cluster_slot * store->file_pages;
987935fe098SMike Snitzer 
988935fe098SMike Snitzer 	page = filemap_get_page(&bitmap->storage, chunk);
989935fe098SMike Snitzer 	if (!page)
990935fe098SMike Snitzer 		return;
991935fe098SMike Snitzer 	bit = file_page_offset(&bitmap->storage, chunk);
992935fe098SMike Snitzer 	paddr = kmap_atomic(page);
993935fe098SMike Snitzer 	if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
994935fe098SMike Snitzer 		clear_bit(bit, paddr);
995935fe098SMike Snitzer 	else
996935fe098SMike Snitzer 		clear_bit_le(bit, paddr);
997935fe098SMike Snitzer 	kunmap_atomic(paddr);
998d7038f95SChristoph Hellwig 	if (!test_page_attr(bitmap, index - node_offset, BITMAP_PAGE_NEEDWRITE)) {
999d7038f95SChristoph Hellwig 		set_page_attr(bitmap, index - node_offset, BITMAP_PAGE_PENDING);
1000935fe098SMike Snitzer 		bitmap->allclean = 0;
1001935fe098SMike Snitzer 	}
1002935fe098SMike Snitzer }
1003935fe098SMike Snitzer 
1004e64e4018SAndy Shevchenko static int md_bitmap_file_test_bit(struct bitmap *bitmap, sector_t block)
1005935fe098SMike Snitzer {
1006935fe098SMike Snitzer 	unsigned long bit;
1007935fe098SMike Snitzer 	struct page *page;
1008935fe098SMike Snitzer 	void *paddr;
1009935fe098SMike Snitzer 	unsigned long chunk = block >> bitmap->counts.chunkshift;
1010935fe098SMike Snitzer 	int set = 0;
1011935fe098SMike Snitzer 
1012935fe098SMike Snitzer 	page = filemap_get_page(&bitmap->storage, chunk);
1013935fe098SMike Snitzer 	if (!page)
1014935fe098SMike Snitzer 		return -EINVAL;
1015935fe098SMike Snitzer 	bit = file_page_offset(&bitmap->storage, chunk);
1016935fe098SMike Snitzer 	paddr = kmap_atomic(page);
1017935fe098SMike Snitzer 	if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
1018935fe098SMike Snitzer 		set = test_bit(bit, paddr);
1019935fe098SMike Snitzer 	else
1020935fe098SMike Snitzer 		set = test_bit_le(bit, paddr);
1021935fe098SMike Snitzer 	kunmap_atomic(paddr);
1022935fe098SMike Snitzer 	return set;
1023935fe098SMike Snitzer }
1024935fe098SMike Snitzer 
1025935fe098SMike Snitzer /* this gets called when the md device is ready to unplug its underlying
1026935fe098SMike Snitzer  * (slave) device queues -- before we let any writes go down, we need to
1027935fe098SMike Snitzer  * sync the dirty pages of the bitmap file to disk */
1028e64e4018SAndy Shevchenko void md_bitmap_unplug(struct bitmap *bitmap)
1029935fe098SMike Snitzer {
1030935fe098SMike Snitzer 	unsigned long i;
1031935fe098SMike Snitzer 	int dirty, need_write;
1032935fe098SMike Snitzer 	int writing = 0;
1033935fe098SMike Snitzer 
10347db922baSYu Kuai 	if (!md_bitmap_enabled(bitmap))
1035935fe098SMike Snitzer 		return;
1036935fe098SMike Snitzer 
1037935fe098SMike Snitzer 	/* look at each page to see if there are any set bits that need to be
1038935fe098SMike Snitzer 	 * flushed out to disk */
1039935fe098SMike Snitzer 	for (i = 0; i < bitmap->storage.file_pages; i++) {
1040935fe098SMike Snitzer 		dirty = test_and_clear_page_attr(bitmap, i, BITMAP_PAGE_DIRTY);
1041935fe098SMike Snitzer 		need_write = test_and_clear_page_attr(bitmap, i,
1042935fe098SMike Snitzer 						      BITMAP_PAGE_NEEDWRITE);
1043935fe098SMike Snitzer 		if (dirty || need_write) {
1044935fe098SMike Snitzer 			if (!writing) {
1045e64e4018SAndy Shevchenko 				md_bitmap_wait_writes(bitmap);
1046935fe098SMike Snitzer 				if (bitmap->mddev->queue)
1047935fe098SMike Snitzer 					blk_add_trace_msg(bitmap->mddev->queue,
1048935fe098SMike Snitzer 							  "md bitmap_unplug");
1049935fe098SMike Snitzer 			}
1050935fe098SMike Snitzer 			clear_page_attr(bitmap, i, BITMAP_PAGE_PENDING);
1051d7038f95SChristoph Hellwig 			filemap_write_page(bitmap, i, false);
1052935fe098SMike Snitzer 			writing = 1;
1053935fe098SMike Snitzer 		}
1054935fe098SMike Snitzer 	}
1055935fe098SMike Snitzer 	if (writing)
1056e64e4018SAndy Shevchenko 		md_bitmap_wait_writes(bitmap);
1057935fe098SMike Snitzer 
1058935fe098SMike Snitzer 	if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
1059e64e4018SAndy Shevchenko 		md_bitmap_file_kick(bitmap);
1060935fe098SMike Snitzer }
1061e64e4018SAndy Shevchenko EXPORT_SYMBOL(md_bitmap_unplug);
1062935fe098SMike Snitzer 
1063a022325aSYu Kuai struct bitmap_unplug_work {
1064a022325aSYu Kuai 	struct work_struct work;
1065a022325aSYu Kuai 	struct bitmap *bitmap;
1066a022325aSYu Kuai 	struct completion *done;
1067a022325aSYu Kuai };
1068a022325aSYu Kuai 
1069a022325aSYu Kuai static void md_bitmap_unplug_fn(struct work_struct *work)
1070a022325aSYu Kuai {
1071a022325aSYu Kuai 	struct bitmap_unplug_work *unplug_work =
1072a022325aSYu Kuai 		container_of(work, struct bitmap_unplug_work, work);
1073a022325aSYu Kuai 
1074a022325aSYu Kuai 	md_bitmap_unplug(unplug_work->bitmap);
1075a022325aSYu Kuai 	complete(unplug_work->done);
1076a022325aSYu Kuai }
1077a022325aSYu Kuai 
1078a022325aSYu Kuai void md_bitmap_unplug_async(struct bitmap *bitmap)
1079a022325aSYu Kuai {
1080a022325aSYu Kuai 	DECLARE_COMPLETION_ONSTACK(done);
1081a022325aSYu Kuai 	struct bitmap_unplug_work unplug_work;
1082a022325aSYu Kuai 
1083a022325aSYu Kuai 	INIT_WORK_ONSTACK(&unplug_work.work, md_bitmap_unplug_fn);
1084a022325aSYu Kuai 	unplug_work.bitmap = bitmap;
1085a022325aSYu Kuai 	unplug_work.done = &done;
1086a022325aSYu Kuai 
1087a022325aSYu Kuai 	queue_work(md_bitmap_wq, &unplug_work.work);
1088a022325aSYu Kuai 	wait_for_completion(&done);
1089a022325aSYu Kuai }
1090a022325aSYu Kuai EXPORT_SYMBOL(md_bitmap_unplug_async);
1091a022325aSYu Kuai 
1092e64e4018SAndy Shevchenko static void md_bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed);
1093844dc669SChristoph Hellwig 
1094844dc669SChristoph Hellwig /*
1095844dc669SChristoph Hellwig  * Initialize the in-memory bitmap from the on-disk bitmap and set up the memory
1096844dc669SChristoph Hellwig  * mapping of the bitmap file.
1097844dc669SChristoph Hellwig  *
1098844dc669SChristoph Hellwig  * Special case: If there's no bitmap file, or if the bitmap file had been
1099844dc669SChristoph Hellwig  * previously kicked from the array, we mark all the bits as 1's in order to
1100844dc669SChristoph Hellwig  * cause a full resync.
1101935fe098SMike Snitzer  *
1102935fe098SMike Snitzer  * We ignore all bits for sectors that end earlier than 'start'.
1103844dc669SChristoph Hellwig  * This is used when reading an out-of-date bitmap.
1104935fe098SMike Snitzer  */
1105e64e4018SAndy Shevchenko static int md_bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
1106935fe098SMike Snitzer {
1107844dc669SChristoph Hellwig 	bool outofdate = test_bit(BITMAP_STALE, &bitmap->flags);
1108844dc669SChristoph Hellwig 	struct mddev *mddev = bitmap->mddev;
1109844dc669SChristoph Hellwig 	unsigned long chunks = bitmap->counts.chunks;
1110935fe098SMike Snitzer 	struct bitmap_storage *store = &bitmap->storage;
1111844dc669SChristoph Hellwig 	struct file *file = store->file;
1112844dc669SChristoph Hellwig 	unsigned long node_offset = 0;
1113844dc669SChristoph Hellwig 	unsigned long bit_cnt = 0;
1114844dc669SChristoph Hellwig 	unsigned long i;
1115844dc669SChristoph Hellwig 	int ret;
1116935fe098SMike Snitzer 
1117844dc669SChristoph Hellwig 	if (!file && !mddev->bitmap_info.offset) {
1118935fe098SMike Snitzer 		/* No permanent bitmap - fill with '1s'. */
1119935fe098SMike Snitzer 		store->filemap = NULL;
1120935fe098SMike Snitzer 		store->file_pages = 0;
1121935fe098SMike Snitzer 		for (i = 0; i < chunks ; i++) {
1122935fe098SMike Snitzer 			/* if the disk bit is set, set the memory bit */
1123935fe098SMike Snitzer 			int needed = ((sector_t)(i+1) << (bitmap->counts.chunkshift)
1124935fe098SMike Snitzer 				      >= start);
1125e64e4018SAndy Shevchenko 			md_bitmap_set_memory_bits(bitmap,
1126935fe098SMike Snitzer 						  (sector_t)i << bitmap->counts.chunkshift,
1127935fe098SMike Snitzer 						  needed);
1128935fe098SMike Snitzer 		}
1129935fe098SMike Snitzer 		return 0;
1130935fe098SMike Snitzer 	}
1131935fe098SMike Snitzer 
1132935fe098SMike Snitzer 	if (file && i_size_read(file->f_mapping->host) < store->bytes) {
1133935fe098SMike Snitzer 		pr_warn("%s: bitmap file too short %lu < %lu\n",
1134935fe098SMike Snitzer 			bmname(bitmap),
1135935fe098SMike Snitzer 			(unsigned long) i_size_read(file->f_mapping->host),
1136935fe098SMike Snitzer 			store->bytes);
1137844dc669SChristoph Hellwig 		ret = -ENOSPC;
1138935fe098SMike Snitzer 		goto err;
1139935fe098SMike Snitzer 	}
1140935fe098SMike Snitzer 
1141844dc669SChristoph Hellwig 	if (mddev_is_clustered(mddev))
1142935fe098SMike Snitzer 		node_offset = bitmap->cluster_slot * (DIV_ROUND_UP(store->bytes, PAGE_SIZE));
1143935fe098SMike Snitzer 
1144844dc669SChristoph Hellwig 	for (i = 0; i < store->file_pages; i++) {
1145844dc669SChristoph Hellwig 		struct page *page = store->filemap[i];
1146935fe098SMike Snitzer 		int count;
1147844dc669SChristoph Hellwig 
1148935fe098SMike Snitzer 		/* unmap the old page, we're done with it */
1149844dc669SChristoph Hellwig 		if (i == store->file_pages - 1)
1150844dc669SChristoph Hellwig 			count = store->bytes - i * PAGE_SIZE;
1151935fe098SMike Snitzer 		else
1152935fe098SMike Snitzer 			count = PAGE_SIZE;
1153935fe098SMike Snitzer 
1154844dc669SChristoph Hellwig 		if (file)
1155844dc669SChristoph Hellwig 			ret = read_file_page(file, i, bitmap, count, page);
1156844dc669SChristoph Hellwig 		else
1157f5f2d5acSChristoph Hellwig 			ret = read_sb_page(mddev, 0, page, i + node_offset,
1158f5f2d5acSChristoph Hellwig 					   count);
1159935fe098SMike Snitzer 		if (ret)
1160935fe098SMike Snitzer 			goto err;
1161844dc669SChristoph Hellwig 	}
1162935fe098SMike Snitzer 
1163935fe098SMike Snitzer 	if (outofdate) {
1164844dc669SChristoph Hellwig 		pr_warn("%s: bitmap file is out of date, doing full recovery\n",
1165844dc669SChristoph Hellwig 			bmname(bitmap));
1166844dc669SChristoph Hellwig 
1167844dc669SChristoph Hellwig 		for (i = 0; i < store->file_pages; i++) {
1168844dc669SChristoph Hellwig 			struct page *page = store->filemap[i];
1169844dc669SChristoph Hellwig 			unsigned long offset = 0;
1170844dc669SChristoph Hellwig 			void *paddr;
1171844dc669SChristoph Hellwig 
1172844dc669SChristoph Hellwig 			if (i == 0 && !mddev->bitmap_info.external)
1173844dc669SChristoph Hellwig 				offset = sizeof(bitmap_super_t);
1174844dc669SChristoph Hellwig 
1175935fe098SMike Snitzer 			/*
1176844dc669SChristoph Hellwig 			 * If the bitmap is out of date, dirty the whole page
1177844dc669SChristoph Hellwig 			 * and write it out
1178935fe098SMike Snitzer 			 */
1179935fe098SMike Snitzer 			paddr = kmap_atomic(page);
1180844dc669SChristoph Hellwig 			memset(paddr + offset, 0xff, PAGE_SIZE - offset);
1181935fe098SMike Snitzer 			kunmap_atomic(paddr);
1182935fe098SMike Snitzer 
1183d7038f95SChristoph Hellwig 			filemap_write_page(bitmap, i, true);
1184844dc669SChristoph Hellwig 			if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags)) {
1185935fe098SMike Snitzer 				ret = -EIO;
1186935fe098SMike Snitzer 				goto err;
1187935fe098SMike Snitzer 			}
1188935fe098SMike Snitzer 		}
1189844dc669SChristoph Hellwig 	}
1190844dc669SChristoph Hellwig 
1191844dc669SChristoph Hellwig 	for (i = 0; i < chunks; i++) {
1192844dc669SChristoph Hellwig 		struct page *page = filemap_get_page(&bitmap->storage, i);
1193844dc669SChristoph Hellwig 		unsigned long bit = file_page_offset(&bitmap->storage, i);
1194844dc669SChristoph Hellwig 		void *paddr;
1195844dc669SChristoph Hellwig 		bool was_set;
1196844dc669SChristoph Hellwig 
1197935fe098SMike Snitzer 		paddr = kmap_atomic(page);
1198935fe098SMike Snitzer 		if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
1199844dc669SChristoph Hellwig 			was_set = test_bit(bit, paddr);
1200935fe098SMike Snitzer 		else
1201844dc669SChristoph Hellwig 			was_set = test_bit_le(bit, paddr);
1202935fe098SMike Snitzer 		kunmap_atomic(paddr);
1203844dc669SChristoph Hellwig 
1204844dc669SChristoph Hellwig 		if (was_set) {
1205935fe098SMike Snitzer 			/* if the disk bit is set, set the memory bit */
1206935fe098SMike Snitzer 			int needed = ((sector_t)(i+1) << bitmap->counts.chunkshift
1207935fe098SMike Snitzer 				      >= start);
1208e64e4018SAndy Shevchenko 			md_bitmap_set_memory_bits(bitmap,
1209935fe098SMike Snitzer 						  (sector_t)i << bitmap->counts.chunkshift,
1210935fe098SMike Snitzer 						  needed);
1211935fe098SMike Snitzer 			bit_cnt++;
1212935fe098SMike Snitzer 		}
1213935fe098SMike Snitzer 	}
1214935fe098SMike Snitzer 
1215935fe098SMike Snitzer 	pr_debug("%s: bitmap initialized from disk: read %lu pages, set %lu of %lu bits\n",
1216935fe098SMike Snitzer 		 bmname(bitmap), store->file_pages,
1217935fe098SMike Snitzer 		 bit_cnt, chunks);
1218935fe098SMike Snitzer 
1219935fe098SMike Snitzer 	return 0;
1220935fe098SMike Snitzer 
1221935fe098SMike Snitzer  err:
1222935fe098SMike Snitzer 	pr_warn("%s: bitmap initialisation failed: %d\n",
1223935fe098SMike Snitzer 		bmname(bitmap), ret);
1224935fe098SMike Snitzer 	return ret;
1225935fe098SMike Snitzer }
1226935fe098SMike Snitzer 
1227e64e4018SAndy Shevchenko void md_bitmap_write_all(struct bitmap *bitmap)
1228935fe098SMike Snitzer {
1229935fe098SMike Snitzer 	/* We don't actually write all bitmap blocks here,
1230935fe098SMike Snitzer 	 * just flag them as needing to be written
1231935fe098SMike Snitzer 	 */
1232935fe098SMike Snitzer 	int i;
1233935fe098SMike Snitzer 
1234935fe098SMike Snitzer 	if (!bitmap || !bitmap->storage.filemap)
1235935fe098SMike Snitzer 		return;
1236935fe098SMike Snitzer 	if (bitmap->storage.file)
1237935fe098SMike Snitzer 		/* Only one copy, so nothing needed */
1238935fe098SMike Snitzer 		return;
1239935fe098SMike Snitzer 
1240935fe098SMike Snitzer 	for (i = 0; i < bitmap->storage.file_pages; i++)
1241935fe098SMike Snitzer 		set_page_attr(bitmap, i,
1242935fe098SMike Snitzer 			      BITMAP_PAGE_NEEDWRITE);
1243935fe098SMike Snitzer 	bitmap->allclean = 0;
1244935fe098SMike Snitzer }
1245935fe098SMike Snitzer 
1246e64e4018SAndy Shevchenko static void md_bitmap_count_page(struct bitmap_counts *bitmap,
1247935fe098SMike Snitzer 				 sector_t offset, int inc)
1248935fe098SMike Snitzer {
1249935fe098SMike Snitzer 	sector_t chunk = offset >> bitmap->chunkshift;
1250935fe098SMike Snitzer 	unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
1251935fe098SMike Snitzer 	bitmap->bp[page].count += inc;
1252e64e4018SAndy Shevchenko 	md_bitmap_checkfree(bitmap, page);
1253935fe098SMike Snitzer }
1254935fe098SMike Snitzer 
1255e64e4018SAndy Shevchenko static void md_bitmap_set_pending(struct bitmap_counts *bitmap, sector_t offset)
1256935fe098SMike Snitzer {
1257935fe098SMike Snitzer 	sector_t chunk = offset >> bitmap->chunkshift;
1258935fe098SMike Snitzer 	unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
1259935fe098SMike Snitzer 	struct bitmap_page *bp = &bitmap->bp[page];
1260935fe098SMike Snitzer 
1261935fe098SMike Snitzer 	if (!bp->pending)
1262935fe098SMike Snitzer 		bp->pending = 1;
1263935fe098SMike Snitzer }
1264935fe098SMike Snitzer 
1265e64e4018SAndy Shevchenko static bitmap_counter_t *md_bitmap_get_counter(struct bitmap_counts *bitmap,
1266935fe098SMike Snitzer 					       sector_t offset, sector_t *blocks,
1267935fe098SMike Snitzer 					       int create);
1268935fe098SMike Snitzer 
12694eeb6535SYu Kuai static void mddev_set_timeout(struct mddev *mddev, unsigned long timeout,
12704eeb6535SYu Kuai 			      bool force)
12714eeb6535SYu Kuai {
127244693154SYu Kuai 	struct md_thread *thread;
127344693154SYu Kuai 
127444693154SYu Kuai 	rcu_read_lock();
127544693154SYu Kuai 	thread = rcu_dereference(mddev->thread);
12764eeb6535SYu Kuai 
12774eeb6535SYu Kuai 	if (!thread)
127844693154SYu Kuai 		goto out;
12794eeb6535SYu Kuai 
12804eeb6535SYu Kuai 	if (force || thread->timeout < MAX_SCHEDULE_TIMEOUT)
12814eeb6535SYu Kuai 		thread->timeout = timeout;
128244693154SYu Kuai 
128344693154SYu Kuai out:
128444693154SYu Kuai 	rcu_read_unlock();
12854eeb6535SYu Kuai }
12864eeb6535SYu Kuai 
1287935fe098SMike Snitzer /*
1288935fe098SMike Snitzer  * bitmap daemon -- periodically wakes up to clean bits and flush pages
1289935fe098SMike Snitzer  *			out to disk
1290935fe098SMike Snitzer  */
1291e64e4018SAndy Shevchenko void md_bitmap_daemon_work(struct mddev *mddev)
1292935fe098SMike Snitzer {
1293935fe098SMike Snitzer 	struct bitmap *bitmap;
1294935fe098SMike Snitzer 	unsigned long j;
1295935fe098SMike Snitzer 	unsigned long nextpage;
1296935fe098SMike Snitzer 	sector_t blocks;
1297935fe098SMike Snitzer 	struct bitmap_counts *counts;
1298935fe098SMike Snitzer 
1299935fe098SMike Snitzer 	/* Use a mutex to guard daemon_work against
1300935fe098SMike Snitzer 	 * bitmap_destroy.
1301935fe098SMike Snitzer 	 */
1302935fe098SMike Snitzer 	mutex_lock(&mddev->bitmap_info.mutex);
1303935fe098SMike Snitzer 	bitmap = mddev->bitmap;
1304935fe098SMike Snitzer 	if (bitmap == NULL) {
1305935fe098SMike Snitzer 		mutex_unlock(&mddev->bitmap_info.mutex);
1306935fe098SMike Snitzer 		return;
1307935fe098SMike Snitzer 	}
1308935fe098SMike Snitzer 	if (time_before(jiffies, bitmap->daemon_lastrun
1309935fe098SMike Snitzer 			+ mddev->bitmap_info.daemon_sleep))
1310935fe098SMike Snitzer 		goto done;
1311935fe098SMike Snitzer 
1312935fe098SMike Snitzer 	bitmap->daemon_lastrun = jiffies;
1313935fe098SMike Snitzer 	if (bitmap->allclean) {
13144eeb6535SYu Kuai 		mddev_set_timeout(mddev, MAX_SCHEDULE_TIMEOUT, true);
1315935fe098SMike Snitzer 		goto done;
1316935fe098SMike Snitzer 	}
1317935fe098SMike Snitzer 	bitmap->allclean = 1;
1318935fe098SMike Snitzer 
1319935fe098SMike Snitzer 	if (bitmap->mddev->queue)
1320935fe098SMike Snitzer 		blk_add_trace_msg(bitmap->mddev->queue,
1321935fe098SMike Snitzer 				  "md bitmap_daemon_work");
1322935fe098SMike Snitzer 
1323935fe098SMike Snitzer 	/* Any file-page which is PENDING now needs to be written.
1324935fe098SMike Snitzer 	 * So set NEEDWRITE now, then after we make any last-minute changes
1325935fe098SMike Snitzer 	 * we will write it.
1326935fe098SMike Snitzer 	 */
1327935fe098SMike Snitzer 	for (j = 0; j < bitmap->storage.file_pages; j++)
1328935fe098SMike Snitzer 		if (test_and_clear_page_attr(bitmap, j,
1329935fe098SMike Snitzer 					     BITMAP_PAGE_PENDING))
1330935fe098SMike Snitzer 			set_page_attr(bitmap, j,
1331935fe098SMike Snitzer 				      BITMAP_PAGE_NEEDWRITE);
1332935fe098SMike Snitzer 
1333935fe098SMike Snitzer 	if (bitmap->need_sync &&
1334935fe098SMike Snitzer 	    mddev->bitmap_info.external == 0) {
1335935fe098SMike Snitzer 		/* Arrange for superblock update as well as
1336935fe098SMike Snitzer 		 * other changes */
1337935fe098SMike Snitzer 		bitmap_super_t *sb;
1338935fe098SMike Snitzer 		bitmap->need_sync = 0;
1339935fe098SMike Snitzer 		if (bitmap->storage.filemap) {
1340935fe098SMike Snitzer 			sb = kmap_atomic(bitmap->storage.sb_page);
1341935fe098SMike Snitzer 			sb->events_cleared =
1342935fe098SMike Snitzer 				cpu_to_le64(bitmap->events_cleared);
1343935fe098SMike Snitzer 			kunmap_atomic(sb);
1344935fe098SMike Snitzer 			set_page_attr(bitmap, 0,
1345935fe098SMike Snitzer 				      BITMAP_PAGE_NEEDWRITE);
1346935fe098SMike Snitzer 		}
1347935fe098SMike Snitzer 	}
1348935fe098SMike Snitzer 	/* Now look at the bitmap counters and if any are '2' or '1',
1349935fe098SMike Snitzer 	 * decrement and handle accordingly.
1350935fe098SMike Snitzer 	 */
1351935fe098SMike Snitzer 	counts = &bitmap->counts;
1352935fe098SMike Snitzer 	spin_lock_irq(&counts->lock);
1353935fe098SMike Snitzer 	nextpage = 0;
1354935fe098SMike Snitzer 	for (j = 0; j < counts->chunks; j++) {
1355935fe098SMike Snitzer 		bitmap_counter_t *bmc;
1356935fe098SMike Snitzer 		sector_t  block = (sector_t)j << counts->chunkshift;
1357935fe098SMike Snitzer 
1358935fe098SMike Snitzer 		if (j == nextpage) {
1359935fe098SMike Snitzer 			nextpage += PAGE_COUNTER_RATIO;
1360935fe098SMike Snitzer 			if (!counts->bp[j >> PAGE_COUNTER_SHIFT].pending) {
1361935fe098SMike Snitzer 				j |= PAGE_COUNTER_MASK;
1362935fe098SMike Snitzer 				continue;
1363935fe098SMike Snitzer 			}
1364935fe098SMike Snitzer 			counts->bp[j >> PAGE_COUNTER_SHIFT].pending = 0;
1365935fe098SMike Snitzer 		}
1366935fe098SMike Snitzer 
1367e64e4018SAndy Shevchenko 		bmc = md_bitmap_get_counter(counts, block, &blocks, 0);
1368935fe098SMike Snitzer 		if (!bmc) {
1369935fe098SMike Snitzer 			j |= PAGE_COUNTER_MASK;
1370935fe098SMike Snitzer 			continue;
1371935fe098SMike Snitzer 		}
1372935fe098SMike Snitzer 		if (*bmc == 1 && !bitmap->need_sync) {
1373935fe098SMike Snitzer 			/* We can clear the bit */
1374935fe098SMike Snitzer 			*bmc = 0;
1375e64e4018SAndy Shevchenko 			md_bitmap_count_page(counts, block, -1);
1376e64e4018SAndy Shevchenko 			md_bitmap_file_clear_bit(bitmap, block);
1377935fe098SMike Snitzer 		} else if (*bmc && *bmc <= 2) {
1378935fe098SMike Snitzer 			*bmc = 1;
1379e64e4018SAndy Shevchenko 			md_bitmap_set_pending(counts, block);
1380935fe098SMike Snitzer 			bitmap->allclean = 0;
1381935fe098SMike Snitzer 		}
1382935fe098SMike Snitzer 	}
1383935fe098SMike Snitzer 	spin_unlock_irq(&counts->lock);
1384935fe098SMike Snitzer 
1385e64e4018SAndy Shevchenko 	md_bitmap_wait_writes(bitmap);
1386935fe098SMike Snitzer 	/* Now start writeout on any page in NEEDWRITE that isn't DIRTY.
1387935fe098SMike Snitzer 	 * DIRTY pages need to be written by bitmap_unplug so it can wait
1388935fe098SMike Snitzer 	 * for them.
1389935fe098SMike Snitzer 	 * If we find any DIRTY page we stop there and let bitmap_unplug
1390935fe098SMike Snitzer 	 * handle all the rest.  This is important in the case where
1391935fe098SMike Snitzer 	 * the first blocking holds the superblock and it has been updated.
1392935fe098SMike Snitzer 	 * We mustn't write any other blocks before the superblock.
1393935fe098SMike Snitzer 	 */
1394935fe098SMike Snitzer 	for (j = 0;
1395935fe098SMike Snitzer 	     j < bitmap->storage.file_pages
1396935fe098SMike Snitzer 		     && !test_bit(BITMAP_STALE, &bitmap->flags);
1397935fe098SMike Snitzer 	     j++) {
1398935fe098SMike Snitzer 		if (test_page_attr(bitmap, j,
1399935fe098SMike Snitzer 				   BITMAP_PAGE_DIRTY))
1400935fe098SMike Snitzer 			/* bitmap_unplug will handle the rest */
1401935fe098SMike Snitzer 			break;
140255180498SZhiqiang Liu 		if (bitmap->storage.filemap &&
140355180498SZhiqiang Liu 		    test_and_clear_page_attr(bitmap, j,
1404d7038f95SChristoph Hellwig 					     BITMAP_PAGE_NEEDWRITE))
1405d7038f95SChristoph Hellwig 			filemap_write_page(bitmap, j, false);
1406935fe098SMike Snitzer 	}
1407935fe098SMike Snitzer 
1408935fe098SMike Snitzer  done:
1409935fe098SMike Snitzer 	if (bitmap->allclean == 0)
14104eeb6535SYu Kuai 		mddev_set_timeout(mddev, mddev->bitmap_info.daemon_sleep, true);
1411935fe098SMike Snitzer 	mutex_unlock(&mddev->bitmap_info.mutex);
1412935fe098SMike Snitzer }
1413935fe098SMike Snitzer 
1414e64e4018SAndy Shevchenko static bitmap_counter_t *md_bitmap_get_counter(struct bitmap_counts *bitmap,
1415935fe098SMike Snitzer 					       sector_t offset, sector_t *blocks,
1416935fe098SMike Snitzer 					       int create)
1417935fe098SMike Snitzer __releases(bitmap->lock)
1418935fe098SMike Snitzer __acquires(bitmap->lock)
1419935fe098SMike Snitzer {
1420935fe098SMike Snitzer 	/* If 'create', we might release the lock and reclaim it.
1421935fe098SMike Snitzer 	 * The lock must have been taken with interrupts enabled.
1422935fe098SMike Snitzer 	 * If !create, we don't release the lock.
1423935fe098SMike Snitzer 	 */
1424935fe098SMike Snitzer 	sector_t chunk = offset >> bitmap->chunkshift;
1425935fe098SMike Snitzer 	unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
1426935fe098SMike Snitzer 	unsigned long pageoff = (chunk & PAGE_COUNTER_MASK) << COUNTER_BYTE_SHIFT;
1427935fe098SMike Snitzer 	sector_t csize;
1428935fe098SMike Snitzer 	int err;
1429935fe098SMike Snitzer 
1430301867b1SLi Nan 	if (page >= bitmap->pages) {
1431301867b1SLi Nan 		/*
1432301867b1SLi Nan 		 * This can happen if bitmap_start_sync goes beyond
1433301867b1SLi Nan 		 * End-of-device while looking for a whole page or
1434301867b1SLi Nan 		 * user set a huge number to sysfs bitmap_set_bits.
1435301867b1SLi Nan 		 */
1436301867b1SLi Nan 		return NULL;
1437301867b1SLi Nan 	}
1438e64e4018SAndy Shevchenko 	err = md_bitmap_checkpage(bitmap, page, create, 0);
1439935fe098SMike Snitzer 
1440935fe098SMike Snitzer 	if (bitmap->bp[page].hijacked ||
1441935fe098SMike Snitzer 	    bitmap->bp[page].map == NULL)
1442935fe098SMike Snitzer 		csize = ((sector_t)1) << (bitmap->chunkshift +
1443d837f727SZhao Heming 					  PAGE_COUNTER_SHIFT);
1444935fe098SMike Snitzer 	else
1445935fe098SMike Snitzer 		csize = ((sector_t)1) << bitmap->chunkshift;
1446935fe098SMike Snitzer 	*blocks = csize - (offset & (csize - 1));
1447935fe098SMike Snitzer 
1448935fe098SMike Snitzer 	if (err < 0)
1449935fe098SMike Snitzer 		return NULL;
1450935fe098SMike Snitzer 
1451935fe098SMike Snitzer 	/* now locked ... */
1452935fe098SMike Snitzer 
1453935fe098SMike Snitzer 	if (bitmap->bp[page].hijacked) { /* hijacked pointer */
1454935fe098SMike Snitzer 		/* should we use the first or second counter field
1455935fe098SMike Snitzer 		 * of the hijacked pointer? */
1456935fe098SMike Snitzer 		int hi = (pageoff > PAGE_COUNTER_MASK);
1457935fe098SMike Snitzer 		return  &((bitmap_counter_t *)
1458935fe098SMike Snitzer 			  &bitmap->bp[page].map)[hi];
1459935fe098SMike Snitzer 	} else /* page is allocated */
1460935fe098SMike Snitzer 		return (bitmap_counter_t *)
1461935fe098SMike Snitzer 			&(bitmap->bp[page].map[pageoff]);
1462935fe098SMike Snitzer }
1463935fe098SMike Snitzer 
1464e64e4018SAndy Shevchenko int md_bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors, int behind)
1465935fe098SMike Snitzer {
1466935fe098SMike Snitzer 	if (!bitmap)
1467935fe098SMike Snitzer 		return 0;
1468935fe098SMike Snitzer 
1469935fe098SMike Snitzer 	if (behind) {
1470935fe098SMike Snitzer 		int bw;
1471935fe098SMike Snitzer 		atomic_inc(&bitmap->behind_writes);
1472935fe098SMike Snitzer 		bw = atomic_read(&bitmap->behind_writes);
1473935fe098SMike Snitzer 		if (bw > bitmap->behind_writes_used)
1474935fe098SMike Snitzer 			bitmap->behind_writes_used = bw;
1475935fe098SMike Snitzer 
1476935fe098SMike Snitzer 		pr_debug("inc write-behind count %d/%lu\n",
1477935fe098SMike Snitzer 			 bw, bitmap->mddev->bitmap_info.max_write_behind);
1478935fe098SMike Snitzer 	}
1479935fe098SMike Snitzer 
1480935fe098SMike Snitzer 	while (sectors) {
1481935fe098SMike Snitzer 		sector_t blocks;
1482935fe098SMike Snitzer 		bitmap_counter_t *bmc;
1483935fe098SMike Snitzer 
1484935fe098SMike Snitzer 		spin_lock_irq(&bitmap->counts.lock);
1485e64e4018SAndy Shevchenko 		bmc = md_bitmap_get_counter(&bitmap->counts, offset, &blocks, 1);
1486935fe098SMike Snitzer 		if (!bmc) {
1487935fe098SMike Snitzer 			spin_unlock_irq(&bitmap->counts.lock);
1488935fe098SMike Snitzer 			return 0;
1489935fe098SMike Snitzer 		}
1490935fe098SMike Snitzer 
1491935fe098SMike Snitzer 		if (unlikely(COUNTER(*bmc) == COUNTER_MAX)) {
1492935fe098SMike Snitzer 			DEFINE_WAIT(__wait);
1493935fe098SMike Snitzer 			/* note that it is safe to do the prepare_to_wait
1494935fe098SMike Snitzer 			 * after the test as long as we do it before dropping
1495935fe098SMike Snitzer 			 * the spinlock.
1496935fe098SMike Snitzer 			 */
1497935fe098SMike Snitzer 			prepare_to_wait(&bitmap->overflow_wait, &__wait,
1498935fe098SMike Snitzer 					TASK_UNINTERRUPTIBLE);
1499935fe098SMike Snitzer 			spin_unlock_irq(&bitmap->counts.lock);
1500935fe098SMike Snitzer 			schedule();
1501935fe098SMike Snitzer 			finish_wait(&bitmap->overflow_wait, &__wait);
1502935fe098SMike Snitzer 			continue;
1503935fe098SMike Snitzer 		}
1504935fe098SMike Snitzer 
1505935fe098SMike Snitzer 		switch (*bmc) {
1506935fe098SMike Snitzer 		case 0:
1507e64e4018SAndy Shevchenko 			md_bitmap_file_set_bit(bitmap, offset);
1508e64e4018SAndy Shevchenko 			md_bitmap_count_page(&bitmap->counts, offset, 1);
1509df561f66SGustavo A. R. Silva 			fallthrough;
1510935fe098SMike Snitzer 		case 1:
1511935fe098SMike Snitzer 			*bmc = 2;
1512935fe098SMike Snitzer 		}
1513935fe098SMike Snitzer 
1514935fe098SMike Snitzer 		(*bmc)++;
1515935fe098SMike Snitzer 
1516935fe098SMike Snitzer 		spin_unlock_irq(&bitmap->counts.lock);
1517935fe098SMike Snitzer 
1518935fe098SMike Snitzer 		offset += blocks;
1519935fe098SMike Snitzer 		if (sectors > blocks)
1520935fe098SMike Snitzer 			sectors -= blocks;
1521935fe098SMike Snitzer 		else
1522935fe098SMike Snitzer 			sectors = 0;
1523935fe098SMike Snitzer 	}
1524935fe098SMike Snitzer 	return 0;
1525935fe098SMike Snitzer }
1526e64e4018SAndy Shevchenko EXPORT_SYMBOL(md_bitmap_startwrite);
1527935fe098SMike Snitzer 
1528e64e4018SAndy Shevchenko void md_bitmap_endwrite(struct bitmap *bitmap, sector_t offset,
1529e64e4018SAndy Shevchenko 			unsigned long sectors, int success, int behind)
1530935fe098SMike Snitzer {
1531935fe098SMike Snitzer 	if (!bitmap)
1532935fe098SMike Snitzer 		return;
1533935fe098SMike Snitzer 	if (behind) {
1534935fe098SMike Snitzer 		if (atomic_dec_and_test(&bitmap->behind_writes))
1535935fe098SMike Snitzer 			wake_up(&bitmap->behind_wait);
1536935fe098SMike Snitzer 		pr_debug("dec write-behind count %d/%lu\n",
1537935fe098SMike Snitzer 			 atomic_read(&bitmap->behind_writes),
1538935fe098SMike Snitzer 			 bitmap->mddev->bitmap_info.max_write_behind);
1539935fe098SMike Snitzer 	}
1540935fe098SMike Snitzer 
1541935fe098SMike Snitzer 	while (sectors) {
1542935fe098SMike Snitzer 		sector_t blocks;
1543935fe098SMike Snitzer 		unsigned long flags;
1544935fe098SMike Snitzer 		bitmap_counter_t *bmc;
1545935fe098SMike Snitzer 
1546935fe098SMike Snitzer 		spin_lock_irqsave(&bitmap->counts.lock, flags);
1547e64e4018SAndy Shevchenko 		bmc = md_bitmap_get_counter(&bitmap->counts, offset, &blocks, 0);
1548935fe098SMike Snitzer 		if (!bmc) {
1549935fe098SMike Snitzer 			spin_unlock_irqrestore(&bitmap->counts.lock, flags);
1550935fe098SMike Snitzer 			return;
1551935fe098SMike Snitzer 		}
1552935fe098SMike Snitzer 
1553935fe098SMike Snitzer 		if (success && !bitmap->mddev->degraded &&
1554935fe098SMike Snitzer 		    bitmap->events_cleared < bitmap->mddev->events) {
1555935fe098SMike Snitzer 			bitmap->events_cleared = bitmap->mddev->events;
1556935fe098SMike Snitzer 			bitmap->need_sync = 1;
1557935fe098SMike Snitzer 			sysfs_notify_dirent_safe(bitmap->sysfs_can_clear);
1558935fe098SMike Snitzer 		}
1559935fe098SMike Snitzer 
1560935fe098SMike Snitzer 		if (!success && !NEEDED(*bmc))
1561935fe098SMike Snitzer 			*bmc |= NEEDED_MASK;
1562935fe098SMike Snitzer 
1563935fe098SMike Snitzer 		if (COUNTER(*bmc) == COUNTER_MAX)
1564935fe098SMike Snitzer 			wake_up(&bitmap->overflow_wait);
1565935fe098SMike Snitzer 
1566935fe098SMike Snitzer 		(*bmc)--;
1567935fe098SMike Snitzer 		if (*bmc <= 2) {
1568e64e4018SAndy Shevchenko 			md_bitmap_set_pending(&bitmap->counts, offset);
1569935fe098SMike Snitzer 			bitmap->allclean = 0;
1570935fe098SMike Snitzer 		}
1571935fe098SMike Snitzer 		spin_unlock_irqrestore(&bitmap->counts.lock, flags);
1572935fe098SMike Snitzer 		offset += blocks;
1573935fe098SMike Snitzer 		if (sectors > blocks)
1574935fe098SMike Snitzer 			sectors -= blocks;
1575935fe098SMike Snitzer 		else
1576935fe098SMike Snitzer 			sectors = 0;
1577935fe098SMike Snitzer 	}
1578935fe098SMike Snitzer }
1579e64e4018SAndy Shevchenko EXPORT_SYMBOL(md_bitmap_endwrite);
1580935fe098SMike Snitzer 
1581935fe098SMike Snitzer static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks,
1582935fe098SMike Snitzer 			       int degraded)
1583935fe098SMike Snitzer {
1584935fe098SMike Snitzer 	bitmap_counter_t *bmc;
1585935fe098SMike Snitzer 	int rv;
1586935fe098SMike Snitzer 	if (bitmap == NULL) {/* FIXME or bitmap set as 'failed' */
1587935fe098SMike Snitzer 		*blocks = 1024;
1588935fe098SMike Snitzer 		return 1; /* always resync if no bitmap */
1589935fe098SMike Snitzer 	}
1590935fe098SMike Snitzer 	spin_lock_irq(&bitmap->counts.lock);
1591e64e4018SAndy Shevchenko 	bmc = md_bitmap_get_counter(&bitmap->counts, offset, blocks, 0);
1592935fe098SMike Snitzer 	rv = 0;
1593935fe098SMike Snitzer 	if (bmc) {
1594935fe098SMike Snitzer 		/* locked */
1595935fe098SMike Snitzer 		if (RESYNC(*bmc))
1596935fe098SMike Snitzer 			rv = 1;
1597935fe098SMike Snitzer 		else if (NEEDED(*bmc)) {
1598935fe098SMike Snitzer 			rv = 1;
1599935fe098SMike Snitzer 			if (!degraded) { /* don't set/clear bits if degraded */
1600935fe098SMike Snitzer 				*bmc |= RESYNC_MASK;
1601935fe098SMike Snitzer 				*bmc &= ~NEEDED_MASK;
1602935fe098SMike Snitzer 			}
1603935fe098SMike Snitzer 		}
1604935fe098SMike Snitzer 	}
1605935fe098SMike Snitzer 	spin_unlock_irq(&bitmap->counts.lock);
1606935fe098SMike Snitzer 	return rv;
1607935fe098SMike Snitzer }
1608935fe098SMike Snitzer 
1609e64e4018SAndy Shevchenko int md_bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks,
1610935fe098SMike Snitzer 			 int degraded)
1611935fe098SMike Snitzer {
1612935fe098SMike Snitzer 	/* bitmap_start_sync must always report on multiples of whole
1613935fe098SMike Snitzer 	 * pages, otherwise resync (which is very PAGE_SIZE based) will
1614935fe098SMike Snitzer 	 * get confused.
1615935fe098SMike Snitzer 	 * So call __bitmap_start_sync repeatedly (if needed) until
1616935fe098SMike Snitzer 	 * At least PAGE_SIZE>>9 blocks are covered.
1617935fe098SMike Snitzer 	 * Return the 'or' of the result.
1618935fe098SMike Snitzer 	 */
1619935fe098SMike Snitzer 	int rv = 0;
1620935fe098SMike Snitzer 	sector_t blocks1;
1621935fe098SMike Snitzer 
1622935fe098SMike Snitzer 	*blocks = 0;
1623935fe098SMike Snitzer 	while (*blocks < (PAGE_SIZE>>9)) {
1624935fe098SMike Snitzer 		rv |= __bitmap_start_sync(bitmap, offset,
1625935fe098SMike Snitzer 					  &blocks1, degraded);
1626935fe098SMike Snitzer 		offset += blocks1;
1627935fe098SMike Snitzer 		*blocks += blocks1;
1628935fe098SMike Snitzer 	}
1629935fe098SMike Snitzer 	return rv;
1630935fe098SMike Snitzer }
1631e64e4018SAndy Shevchenko EXPORT_SYMBOL(md_bitmap_start_sync);
1632935fe098SMike Snitzer 
1633e64e4018SAndy Shevchenko void md_bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int aborted)
1634935fe098SMike Snitzer {
1635935fe098SMike Snitzer 	bitmap_counter_t *bmc;
1636935fe098SMike Snitzer 	unsigned long flags;
1637935fe098SMike Snitzer 
1638935fe098SMike Snitzer 	if (bitmap == NULL) {
1639935fe098SMike Snitzer 		*blocks = 1024;
1640935fe098SMike Snitzer 		return;
1641935fe098SMike Snitzer 	}
1642935fe098SMike Snitzer 	spin_lock_irqsave(&bitmap->counts.lock, flags);
1643e64e4018SAndy Shevchenko 	bmc = md_bitmap_get_counter(&bitmap->counts, offset, blocks, 0);
1644935fe098SMike Snitzer 	if (bmc == NULL)
1645935fe098SMike Snitzer 		goto unlock;
1646935fe098SMike Snitzer 	/* locked */
1647935fe098SMike Snitzer 	if (RESYNC(*bmc)) {
1648935fe098SMike Snitzer 		*bmc &= ~RESYNC_MASK;
1649935fe098SMike Snitzer 
1650935fe098SMike Snitzer 		if (!NEEDED(*bmc) && aborted)
1651935fe098SMike Snitzer 			*bmc |= NEEDED_MASK;
1652935fe098SMike Snitzer 		else {
1653935fe098SMike Snitzer 			if (*bmc <= 2) {
1654e64e4018SAndy Shevchenko 				md_bitmap_set_pending(&bitmap->counts, offset);
1655935fe098SMike Snitzer 				bitmap->allclean = 0;
1656935fe098SMike Snitzer 			}
1657935fe098SMike Snitzer 		}
1658935fe098SMike Snitzer 	}
1659935fe098SMike Snitzer  unlock:
1660935fe098SMike Snitzer 	spin_unlock_irqrestore(&bitmap->counts.lock, flags);
1661935fe098SMike Snitzer }
1662e64e4018SAndy Shevchenko EXPORT_SYMBOL(md_bitmap_end_sync);
1663935fe098SMike Snitzer 
1664e64e4018SAndy Shevchenko void md_bitmap_close_sync(struct bitmap *bitmap)
1665935fe098SMike Snitzer {
1666935fe098SMike Snitzer 	/* Sync has finished, and any bitmap chunks that weren't synced
1667935fe098SMike Snitzer 	 * properly have been aborted.  It remains to us to clear the
1668935fe098SMike Snitzer 	 * RESYNC bit wherever it is still on
1669935fe098SMike Snitzer 	 */
1670935fe098SMike Snitzer 	sector_t sector = 0;
1671935fe098SMike Snitzer 	sector_t blocks;
1672935fe098SMike Snitzer 	if (!bitmap)
1673935fe098SMike Snitzer 		return;
1674935fe098SMike Snitzer 	while (sector < bitmap->mddev->resync_max_sectors) {
1675e64e4018SAndy Shevchenko 		md_bitmap_end_sync(bitmap, sector, &blocks, 0);
1676935fe098SMike Snitzer 		sector += blocks;
1677935fe098SMike Snitzer 	}
1678935fe098SMike Snitzer }
1679e64e4018SAndy Shevchenko EXPORT_SYMBOL(md_bitmap_close_sync);
1680935fe098SMike Snitzer 
1681e64e4018SAndy Shevchenko void md_bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force)
1682935fe098SMike Snitzer {
1683935fe098SMike Snitzer 	sector_t s = 0;
1684935fe098SMike Snitzer 	sector_t blocks;
1685935fe098SMike Snitzer 
1686935fe098SMike Snitzer 	if (!bitmap)
1687935fe098SMike Snitzer 		return;
1688935fe098SMike Snitzer 	if (sector == 0) {
1689935fe098SMike Snitzer 		bitmap->last_end_sync = jiffies;
1690935fe098SMike Snitzer 		return;
1691935fe098SMike Snitzer 	}
1692935fe098SMike Snitzer 	if (!force && time_before(jiffies, (bitmap->last_end_sync
1693935fe098SMike Snitzer 				  + bitmap->mddev->bitmap_info.daemon_sleep)))
1694935fe098SMike Snitzer 		return;
1695935fe098SMike Snitzer 	wait_event(bitmap->mddev->recovery_wait,
1696935fe098SMike Snitzer 		   atomic_read(&bitmap->mddev->recovery_active) == 0);
1697935fe098SMike Snitzer 
1698935fe098SMike Snitzer 	bitmap->mddev->curr_resync_completed = sector;
1699935fe098SMike Snitzer 	set_bit(MD_SB_CHANGE_CLEAN, &bitmap->mddev->sb_flags);
1700935fe098SMike Snitzer 	sector &= ~((1ULL << bitmap->counts.chunkshift) - 1);
1701935fe098SMike Snitzer 	s = 0;
1702935fe098SMike Snitzer 	while (s < sector && s < bitmap->mddev->resync_max_sectors) {
1703e64e4018SAndy Shevchenko 		md_bitmap_end_sync(bitmap, s, &blocks, 0);
1704935fe098SMike Snitzer 		s += blocks;
1705935fe098SMike Snitzer 	}
1706935fe098SMike Snitzer 	bitmap->last_end_sync = jiffies;
1707e1a86dbbSJunxiao Bi 	sysfs_notify_dirent_safe(bitmap->mddev->sysfs_completed);
1708935fe098SMike Snitzer }
1709e64e4018SAndy Shevchenko EXPORT_SYMBOL(md_bitmap_cond_end_sync);
1710935fe098SMike Snitzer 
1711e64e4018SAndy Shevchenko void md_bitmap_sync_with_cluster(struct mddev *mddev,
1712935fe098SMike Snitzer 			      sector_t old_lo, sector_t old_hi,
1713935fe098SMike Snitzer 			      sector_t new_lo, sector_t new_hi)
1714935fe098SMike Snitzer {
1715935fe098SMike Snitzer 	struct bitmap *bitmap = mddev->bitmap;
1716935fe098SMike Snitzer 	sector_t sector, blocks = 0;
1717935fe098SMike Snitzer 
1718935fe098SMike Snitzer 	for (sector = old_lo; sector < new_lo; ) {
1719e64e4018SAndy Shevchenko 		md_bitmap_end_sync(bitmap, sector, &blocks, 0);
1720935fe098SMike Snitzer 		sector += blocks;
1721935fe098SMike Snitzer 	}
1722935fe098SMike Snitzer 	WARN((blocks > new_lo) && old_lo, "alignment is not correct for lo\n");
1723935fe098SMike Snitzer 
1724935fe098SMike Snitzer 	for (sector = old_hi; sector < new_hi; ) {
1725e64e4018SAndy Shevchenko 		md_bitmap_start_sync(bitmap, sector, &blocks, 0);
1726935fe098SMike Snitzer 		sector += blocks;
1727935fe098SMike Snitzer 	}
1728935fe098SMike Snitzer 	WARN((blocks > new_hi) && old_hi, "alignment is not correct for hi\n");
1729935fe098SMike Snitzer }
1730e64e4018SAndy Shevchenko EXPORT_SYMBOL(md_bitmap_sync_with_cluster);
1731935fe098SMike Snitzer 
1732e64e4018SAndy Shevchenko static void md_bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed)
1733935fe098SMike Snitzer {
1734935fe098SMike Snitzer 	/* For each chunk covered by any of these sectors, set the
1735935fe098SMike Snitzer 	 * counter to 2 and possibly set resync_needed.  They should all
1736935fe098SMike Snitzer 	 * be 0 at this point
1737935fe098SMike Snitzer 	 */
1738935fe098SMike Snitzer 
1739935fe098SMike Snitzer 	sector_t secs;
1740935fe098SMike Snitzer 	bitmap_counter_t *bmc;
1741935fe098SMike Snitzer 	spin_lock_irq(&bitmap->counts.lock);
1742e64e4018SAndy Shevchenko 	bmc = md_bitmap_get_counter(&bitmap->counts, offset, &secs, 1);
1743935fe098SMike Snitzer 	if (!bmc) {
1744935fe098SMike Snitzer 		spin_unlock_irq(&bitmap->counts.lock);
1745935fe098SMike Snitzer 		return;
1746935fe098SMike Snitzer 	}
1747935fe098SMike Snitzer 	if (!*bmc) {
1748935fe098SMike Snitzer 		*bmc = 2;
1749e64e4018SAndy Shevchenko 		md_bitmap_count_page(&bitmap->counts, offset, 1);
1750e64e4018SAndy Shevchenko 		md_bitmap_set_pending(&bitmap->counts, offset);
1751935fe098SMike Snitzer 		bitmap->allclean = 0;
1752935fe098SMike Snitzer 	}
1753935fe098SMike Snitzer 	if (needed)
1754935fe098SMike Snitzer 		*bmc |= NEEDED_MASK;
1755935fe098SMike Snitzer 	spin_unlock_irq(&bitmap->counts.lock);
1756935fe098SMike Snitzer }
1757935fe098SMike Snitzer 
1758935fe098SMike Snitzer /* dirty the memory and file bits for bitmap chunks "s" to "e" */
1759e64e4018SAndy Shevchenko void md_bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e)
1760935fe098SMike Snitzer {
1761935fe098SMike Snitzer 	unsigned long chunk;
1762935fe098SMike Snitzer 
1763935fe098SMike Snitzer 	for (chunk = s; chunk <= e; chunk++) {
1764935fe098SMike Snitzer 		sector_t sec = (sector_t)chunk << bitmap->counts.chunkshift;
1765e64e4018SAndy Shevchenko 		md_bitmap_set_memory_bits(bitmap, sec, 1);
1766e64e4018SAndy Shevchenko 		md_bitmap_file_set_bit(bitmap, sec);
1767935fe098SMike Snitzer 		if (sec < bitmap->mddev->recovery_cp)
1768935fe098SMike Snitzer 			/* We are asserting that the array is dirty,
1769935fe098SMike Snitzer 			 * so move the recovery_cp address back so
1770935fe098SMike Snitzer 			 * that it is obvious that it is dirty
1771935fe098SMike Snitzer 			 */
1772935fe098SMike Snitzer 			bitmap->mddev->recovery_cp = sec;
1773935fe098SMike Snitzer 	}
1774935fe098SMike Snitzer }
1775935fe098SMike Snitzer 
1776935fe098SMike Snitzer /*
1777935fe098SMike Snitzer  * flush out any pending updates
1778935fe098SMike Snitzer  */
1779e64e4018SAndy Shevchenko void md_bitmap_flush(struct mddev *mddev)
1780935fe098SMike Snitzer {
1781935fe098SMike Snitzer 	struct bitmap *bitmap = mddev->bitmap;
1782935fe098SMike Snitzer 	long sleep;
1783935fe098SMike Snitzer 
1784935fe098SMike Snitzer 	if (!bitmap) /* there was no bitmap */
1785935fe098SMike Snitzer 		return;
1786935fe098SMike Snitzer 
1787935fe098SMike Snitzer 	/* run the daemon_work three time to ensure everything is flushed
1788935fe098SMike Snitzer 	 * that can be
1789935fe098SMike Snitzer 	 */
1790935fe098SMike Snitzer 	sleep = mddev->bitmap_info.daemon_sleep * 2;
1791935fe098SMike Snitzer 	bitmap->daemon_lastrun -= sleep;
1792e64e4018SAndy Shevchenko 	md_bitmap_daemon_work(mddev);
1793935fe098SMike Snitzer 	bitmap->daemon_lastrun -= sleep;
1794e64e4018SAndy Shevchenko 	md_bitmap_daemon_work(mddev);
1795935fe098SMike Snitzer 	bitmap->daemon_lastrun -= sleep;
1796e64e4018SAndy Shevchenko 	md_bitmap_daemon_work(mddev);
1797404a8ef5SSudhakar Panneerselvam 	if (mddev->bitmap_info.external)
1798404a8ef5SSudhakar Panneerselvam 		md_super_wait(mddev);
1799e64e4018SAndy Shevchenko 	md_bitmap_update_sb(bitmap);
1800935fe098SMike Snitzer }
1801935fe098SMike Snitzer 
1802935fe098SMike Snitzer /*
1803935fe098SMike Snitzer  * free memory that was allocated
1804935fe098SMike Snitzer  */
1805e64e4018SAndy Shevchenko void md_bitmap_free(struct bitmap *bitmap)
1806935fe098SMike Snitzer {
1807935fe098SMike Snitzer 	unsigned long k, pages;
1808935fe098SMike Snitzer 	struct bitmap_page *bp;
1809935fe098SMike Snitzer 
1810935fe098SMike Snitzer 	if (!bitmap) /* there was no bitmap */
1811935fe098SMike Snitzer 		return;
1812935fe098SMike Snitzer 
1813935fe098SMike Snitzer 	if (bitmap->sysfs_can_clear)
1814935fe098SMike Snitzer 		sysfs_put(bitmap->sysfs_can_clear);
1815935fe098SMike Snitzer 
1816935fe098SMike Snitzer 	if (mddev_is_clustered(bitmap->mddev) && bitmap->mddev->cluster_info &&
1817935fe098SMike Snitzer 		bitmap->cluster_slot == md_cluster_ops->slot_number(bitmap->mddev))
1818935fe098SMike Snitzer 		md_cluster_stop(bitmap->mddev);
1819935fe098SMike Snitzer 
1820935fe098SMike Snitzer 	/* Shouldn't be needed - but just in case.... */
1821935fe098SMike Snitzer 	wait_event(bitmap->write_wait,
1822935fe098SMike Snitzer 		   atomic_read(&bitmap->pending_writes) == 0);
1823935fe098SMike Snitzer 
1824935fe098SMike Snitzer 	/* release the bitmap file  */
1825e64e4018SAndy Shevchenko 	md_bitmap_file_unmap(&bitmap->storage);
1826935fe098SMike Snitzer 
1827935fe098SMike Snitzer 	bp = bitmap->counts.bp;
1828935fe098SMike Snitzer 	pages = bitmap->counts.pages;
1829935fe098SMike Snitzer 
1830935fe098SMike Snitzer 	/* free all allocated memory */
1831935fe098SMike Snitzer 
1832935fe098SMike Snitzer 	if (bp) /* deallocate the page memory */
1833935fe098SMike Snitzer 		for (k = 0; k < pages; k++)
1834935fe098SMike Snitzer 			if (bp[k].map && !bp[k].hijacked)
1835935fe098SMike Snitzer 				kfree(bp[k].map);
1836935fe098SMike Snitzer 	kfree(bp);
1837935fe098SMike Snitzer 	kfree(bitmap);
1838935fe098SMike Snitzer }
1839e64e4018SAndy Shevchenko EXPORT_SYMBOL(md_bitmap_free);
1840935fe098SMike Snitzer 
1841e64e4018SAndy Shevchenko void md_bitmap_wait_behind_writes(struct mddev *mddev)
1842935fe098SMike Snitzer {
1843935fe098SMike Snitzer 	struct bitmap *bitmap = mddev->bitmap;
1844935fe098SMike Snitzer 
1845935fe098SMike Snitzer 	/* wait for behind writes to complete */
1846935fe098SMike Snitzer 	if (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
1847935fe098SMike Snitzer 		pr_debug("md:%s: behind writes in progress - waiting to stop.\n",
1848935fe098SMike Snitzer 			 mdname(mddev));
1849935fe098SMike Snitzer 		/* need to kick something here to make sure I/O goes? */
1850935fe098SMike Snitzer 		wait_event(bitmap->behind_wait,
1851935fe098SMike Snitzer 			   atomic_read(&bitmap->behind_writes) == 0);
1852935fe098SMike Snitzer 	}
1853935fe098SMike Snitzer }
1854935fe098SMike Snitzer 
1855e64e4018SAndy Shevchenko void md_bitmap_destroy(struct mddev *mddev)
1856935fe098SMike Snitzer {
1857935fe098SMike Snitzer 	struct bitmap *bitmap = mddev->bitmap;
1858935fe098SMike Snitzer 
1859935fe098SMike Snitzer 	if (!bitmap) /* there was no bitmap */
1860935fe098SMike Snitzer 		return;
1861935fe098SMike Snitzer 
1862e64e4018SAndy Shevchenko 	md_bitmap_wait_behind_writes(mddev);
186369b00b5bSGuoqing Jiang 	if (!mddev->serialize_policy)
186469b00b5bSGuoqing Jiang 		mddev_destroy_serial_pool(mddev, NULL, true);
1865935fe098SMike Snitzer 
1866935fe098SMike Snitzer 	mutex_lock(&mddev->bitmap_info.mutex);
1867935fe098SMike Snitzer 	spin_lock(&mddev->lock);
1868935fe098SMike Snitzer 	mddev->bitmap = NULL; /* disconnect from the md device */
1869935fe098SMike Snitzer 	spin_unlock(&mddev->lock);
1870935fe098SMike Snitzer 	mutex_unlock(&mddev->bitmap_info.mutex);
18714eeb6535SYu Kuai 	mddev_set_timeout(mddev, MAX_SCHEDULE_TIMEOUT, true);
1872935fe098SMike Snitzer 
1873e64e4018SAndy Shevchenko 	md_bitmap_free(bitmap);
1874935fe098SMike Snitzer }
1875935fe098SMike Snitzer 
1876935fe098SMike Snitzer /*
1877935fe098SMike Snitzer  * initialize the bitmap structure
1878935fe098SMike Snitzer  * if this returns an error, bitmap_destroy must be called to do clean up
1879935fe098SMike Snitzer  * once mddev->bitmap is set
1880935fe098SMike Snitzer  */
1881e64e4018SAndy Shevchenko struct bitmap *md_bitmap_create(struct mddev *mddev, int slot)
1882935fe098SMike Snitzer {
1883935fe098SMike Snitzer 	struct bitmap *bitmap;
1884935fe098SMike Snitzer 	sector_t blocks = mddev->resync_max_sectors;
1885935fe098SMike Snitzer 	struct file *file = mddev->bitmap_info.file;
1886935fe098SMike Snitzer 	int err;
1887935fe098SMike Snitzer 	struct kernfs_node *bm = NULL;
1888935fe098SMike Snitzer 
1889935fe098SMike Snitzer 	BUILD_BUG_ON(sizeof(bitmap_super_t) != 256);
1890935fe098SMike Snitzer 
1891935fe098SMike Snitzer 	BUG_ON(file && mddev->bitmap_info.offset);
1892935fe098SMike Snitzer 
1893230b55faSNeilBrown 	if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
1894230b55faSNeilBrown 		pr_notice("md/raid:%s: array with journal cannot have bitmap\n",
1895230b55faSNeilBrown 			  mdname(mddev));
1896230b55faSNeilBrown 		return ERR_PTR(-EBUSY);
1897230b55faSNeilBrown 	}
1898230b55faSNeilBrown 
1899935fe098SMike Snitzer 	bitmap = kzalloc(sizeof(*bitmap), GFP_KERNEL);
1900935fe098SMike Snitzer 	if (!bitmap)
1901935fe098SMike Snitzer 		return ERR_PTR(-ENOMEM);
1902935fe098SMike Snitzer 
1903935fe098SMike Snitzer 	spin_lock_init(&bitmap->counts.lock);
1904935fe098SMike Snitzer 	atomic_set(&bitmap->pending_writes, 0);
1905935fe098SMike Snitzer 	init_waitqueue_head(&bitmap->write_wait);
1906935fe098SMike Snitzer 	init_waitqueue_head(&bitmap->overflow_wait);
1907935fe098SMike Snitzer 	init_waitqueue_head(&bitmap->behind_wait);
1908935fe098SMike Snitzer 
1909935fe098SMike Snitzer 	bitmap->mddev = mddev;
1910935fe098SMike Snitzer 	bitmap->cluster_slot = slot;
1911935fe098SMike Snitzer 
1912935fe098SMike Snitzer 	if (mddev->kobj.sd)
1913935fe098SMike Snitzer 		bm = sysfs_get_dirent(mddev->kobj.sd, "bitmap");
1914935fe098SMike Snitzer 	if (bm) {
1915935fe098SMike Snitzer 		bitmap->sysfs_can_clear = sysfs_get_dirent(bm, "can_clear");
1916935fe098SMike Snitzer 		sysfs_put(bm);
1917935fe098SMike Snitzer 	} else
1918935fe098SMike Snitzer 		bitmap->sysfs_can_clear = NULL;
1919935fe098SMike Snitzer 
1920935fe098SMike Snitzer 	bitmap->storage.file = file;
1921935fe098SMike Snitzer 	if (file) {
1922935fe098SMike Snitzer 		get_file(file);
1923935fe098SMike Snitzer 		/* As future accesses to this file will use bmap,
1924935fe098SMike Snitzer 		 * and bypass the page cache, we must sync the file
1925935fe098SMike Snitzer 		 * first.
1926935fe098SMike Snitzer 		 */
1927935fe098SMike Snitzer 		vfs_fsync(file, 1);
1928935fe098SMike Snitzer 	}
1929935fe098SMike Snitzer 	/* read superblock from bitmap file (this sets mddev->bitmap_info.chunksize) */
1930935fe098SMike Snitzer 	if (!mddev->bitmap_info.external) {
1931935fe098SMike Snitzer 		/*
1932935fe098SMike Snitzer 		 * If 'MD_ARRAY_FIRST_USE' is set, then device-mapper is
1933935fe098SMike Snitzer 		 * instructing us to create a new on-disk bitmap instance.
1934935fe098SMike Snitzer 		 */
1935935fe098SMike Snitzer 		if (test_and_clear_bit(MD_ARRAY_FIRST_USE, &mddev->flags))
1936e64e4018SAndy Shevchenko 			err = md_bitmap_new_disk_sb(bitmap);
1937935fe098SMike Snitzer 		else
1938e64e4018SAndy Shevchenko 			err = md_bitmap_read_sb(bitmap);
1939935fe098SMike Snitzer 	} else {
1940935fe098SMike Snitzer 		err = 0;
1941935fe098SMike Snitzer 		if (mddev->bitmap_info.chunksize == 0 ||
1942935fe098SMike Snitzer 		    mddev->bitmap_info.daemon_sleep == 0)
1943935fe098SMike Snitzer 			/* chunksize and time_base need to be
1944935fe098SMike Snitzer 			 * set first. */
1945935fe098SMike Snitzer 			err = -EINVAL;
1946935fe098SMike Snitzer 	}
1947935fe098SMike Snitzer 	if (err)
1948935fe098SMike Snitzer 		goto error;
1949935fe098SMike Snitzer 
1950935fe098SMike Snitzer 	bitmap->daemon_lastrun = jiffies;
1951e64e4018SAndy Shevchenko 	err = md_bitmap_resize(bitmap, blocks, mddev->bitmap_info.chunksize, 1);
1952935fe098SMike Snitzer 	if (err)
1953935fe098SMike Snitzer 		goto error;
1954935fe098SMike Snitzer 
1955935fe098SMike Snitzer 	pr_debug("created bitmap (%lu pages) for device %s\n",
1956935fe098SMike Snitzer 		 bitmap->counts.pages, bmname(bitmap));
1957935fe098SMike Snitzer 
1958935fe098SMike Snitzer 	err = test_bit(BITMAP_WRITE_ERROR, &bitmap->flags) ? -EIO : 0;
1959935fe098SMike Snitzer 	if (err)
1960935fe098SMike Snitzer 		goto error;
1961935fe098SMike Snitzer 
1962935fe098SMike Snitzer 	return bitmap;
1963935fe098SMike Snitzer  error:
1964e64e4018SAndy Shevchenko 	md_bitmap_free(bitmap);
1965935fe098SMike Snitzer 	return ERR_PTR(err);
1966935fe098SMike Snitzer }
1967935fe098SMike Snitzer 
1968e64e4018SAndy Shevchenko int md_bitmap_load(struct mddev *mddev)
1969935fe098SMike Snitzer {
1970935fe098SMike Snitzer 	int err = 0;
1971935fe098SMike Snitzer 	sector_t start = 0;
1972935fe098SMike Snitzer 	sector_t sector = 0;
1973935fe098SMike Snitzer 	struct bitmap *bitmap = mddev->bitmap;
1974617b194aSGuoqing Jiang 	struct md_rdev *rdev;
1975935fe098SMike Snitzer 
1976935fe098SMike Snitzer 	if (!bitmap)
1977935fe098SMike Snitzer 		goto out;
1978935fe098SMike Snitzer 
1979617b194aSGuoqing Jiang 	rdev_for_each(rdev, mddev)
1980404659cfSGuoqing Jiang 		mddev_create_serial_pool(mddev, rdev, true);
1981617b194aSGuoqing Jiang 
1982935fe098SMike Snitzer 	if (mddev_is_clustered(mddev))
1983935fe098SMike Snitzer 		md_cluster_ops->load_bitmaps(mddev, mddev->bitmap_info.nodes);
1984935fe098SMike Snitzer 
1985935fe098SMike Snitzer 	/* Clear out old bitmap info first:  Either there is none, or we
1986935fe098SMike Snitzer 	 * are resuming after someone else has possibly changed things,
1987935fe098SMike Snitzer 	 * so we should forget old cached info.
1988935fe098SMike Snitzer 	 * All chunks should be clean, but some might need_sync.
1989935fe098SMike Snitzer 	 */
1990935fe098SMike Snitzer 	while (sector < mddev->resync_max_sectors) {
1991935fe098SMike Snitzer 		sector_t blocks;
1992e64e4018SAndy Shevchenko 		md_bitmap_start_sync(bitmap, sector, &blocks, 0);
1993935fe098SMike Snitzer 		sector += blocks;
1994935fe098SMike Snitzer 	}
1995e64e4018SAndy Shevchenko 	md_bitmap_close_sync(bitmap);
1996935fe098SMike Snitzer 
1997935fe098SMike Snitzer 	if (mddev->degraded == 0
1998935fe098SMike Snitzer 	    || bitmap->events_cleared == mddev->events)
1999935fe098SMike Snitzer 		/* no need to keep dirty bits to optimise a
2000935fe098SMike Snitzer 		 * re-add of a missing device */
2001935fe098SMike Snitzer 		start = mddev->recovery_cp;
2002935fe098SMike Snitzer 
2003935fe098SMike Snitzer 	mutex_lock(&mddev->bitmap_info.mutex);
2004e64e4018SAndy Shevchenko 	err = md_bitmap_init_from_disk(bitmap, start);
2005935fe098SMike Snitzer 	mutex_unlock(&mddev->bitmap_info.mutex);
2006935fe098SMike Snitzer 
2007935fe098SMike Snitzer 	if (err)
2008935fe098SMike Snitzer 		goto out;
2009935fe098SMike Snitzer 	clear_bit(BITMAP_STALE, &bitmap->flags);
2010935fe098SMike Snitzer 
2011935fe098SMike Snitzer 	/* Kick recovery in case any bits were set */
2012935fe098SMike Snitzer 	set_bit(MD_RECOVERY_NEEDED, &bitmap->mddev->recovery);
2013935fe098SMike Snitzer 
20144eeb6535SYu Kuai 	mddev_set_timeout(mddev, mddev->bitmap_info.daemon_sleep, true);
2015935fe098SMike Snitzer 	md_wakeup_thread(mddev->thread);
2016935fe098SMike Snitzer 
2017e64e4018SAndy Shevchenko 	md_bitmap_update_sb(bitmap);
2018935fe098SMike Snitzer 
2019935fe098SMike Snitzer 	if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
2020935fe098SMike Snitzer 		err = -EIO;
2021935fe098SMike Snitzer out:
2022935fe098SMike Snitzer 	return err;
2023935fe098SMike Snitzer }
2024e64e4018SAndy Shevchenko EXPORT_SYMBOL_GPL(md_bitmap_load);
2025935fe098SMike Snitzer 
20261383b347SZhao Heming /* caller need to free returned bitmap with md_bitmap_free() */
2027935fe098SMike Snitzer struct bitmap *get_bitmap_from_slot(struct mddev *mddev, int slot)
2028935fe098SMike Snitzer {
2029935fe098SMike Snitzer 	int rv = 0;
2030935fe098SMike Snitzer 	struct bitmap *bitmap;
2031935fe098SMike Snitzer 
2032e64e4018SAndy Shevchenko 	bitmap = md_bitmap_create(mddev, slot);
2033935fe098SMike Snitzer 	if (IS_ERR(bitmap)) {
2034935fe098SMike Snitzer 		rv = PTR_ERR(bitmap);
2035935fe098SMike Snitzer 		return ERR_PTR(rv);
2036935fe098SMike Snitzer 	}
2037935fe098SMike Snitzer 
2038e64e4018SAndy Shevchenko 	rv = md_bitmap_init_from_disk(bitmap, 0);
2039935fe098SMike Snitzer 	if (rv) {
2040e64e4018SAndy Shevchenko 		md_bitmap_free(bitmap);
2041935fe098SMike Snitzer 		return ERR_PTR(rv);
2042935fe098SMike Snitzer 	}
2043935fe098SMike Snitzer 
2044935fe098SMike Snitzer 	return bitmap;
2045935fe098SMike Snitzer }
2046935fe098SMike Snitzer EXPORT_SYMBOL(get_bitmap_from_slot);
2047935fe098SMike Snitzer 
2048935fe098SMike Snitzer /* Loads the bitmap associated with slot and copies the resync information
2049935fe098SMike Snitzer  * to our bitmap
2050935fe098SMike Snitzer  */
2051e64e4018SAndy Shevchenko int md_bitmap_copy_from_slot(struct mddev *mddev, int slot,
2052935fe098SMike Snitzer 		sector_t *low, sector_t *high, bool clear_bits)
2053935fe098SMike Snitzer {
2054935fe098SMike Snitzer 	int rv = 0, i, j;
2055935fe098SMike Snitzer 	sector_t block, lo = 0, hi = 0;
2056935fe098SMike Snitzer 	struct bitmap_counts *counts;
2057935fe098SMike Snitzer 	struct bitmap *bitmap;
2058935fe098SMike Snitzer 
2059935fe098SMike Snitzer 	bitmap = get_bitmap_from_slot(mddev, slot);
2060935fe098SMike Snitzer 	if (IS_ERR(bitmap)) {
2061935fe098SMike Snitzer 		pr_err("%s can't get bitmap from slot %d\n", __func__, slot);
2062935fe098SMike Snitzer 		return -1;
2063935fe098SMike Snitzer 	}
2064935fe098SMike Snitzer 
2065935fe098SMike Snitzer 	counts = &bitmap->counts;
2066935fe098SMike Snitzer 	for (j = 0; j < counts->chunks; j++) {
2067935fe098SMike Snitzer 		block = (sector_t)j << counts->chunkshift;
2068e64e4018SAndy Shevchenko 		if (md_bitmap_file_test_bit(bitmap, block)) {
2069935fe098SMike Snitzer 			if (!lo)
2070935fe098SMike Snitzer 				lo = block;
2071935fe098SMike Snitzer 			hi = block;
2072e64e4018SAndy Shevchenko 			md_bitmap_file_clear_bit(bitmap, block);
2073e64e4018SAndy Shevchenko 			md_bitmap_set_memory_bits(mddev->bitmap, block, 1);
2074e64e4018SAndy Shevchenko 			md_bitmap_file_set_bit(mddev->bitmap, block);
2075935fe098SMike Snitzer 		}
2076935fe098SMike Snitzer 	}
2077935fe098SMike Snitzer 
2078935fe098SMike Snitzer 	if (clear_bits) {
2079e64e4018SAndy Shevchenko 		md_bitmap_update_sb(bitmap);
2080935fe098SMike Snitzer 		/* BITMAP_PAGE_PENDING is set, but bitmap_unplug needs
2081935fe098SMike Snitzer 		 * BITMAP_PAGE_DIRTY or _NEEDWRITE to write ... */
2082935fe098SMike Snitzer 		for (i = 0; i < bitmap->storage.file_pages; i++)
2083935fe098SMike Snitzer 			if (test_page_attr(bitmap, i, BITMAP_PAGE_PENDING))
2084935fe098SMike Snitzer 				set_page_attr(bitmap, i, BITMAP_PAGE_NEEDWRITE);
2085e64e4018SAndy Shevchenko 		md_bitmap_unplug(bitmap);
2086935fe098SMike Snitzer 	}
2087e64e4018SAndy Shevchenko 	md_bitmap_unplug(mddev->bitmap);
2088935fe098SMike Snitzer 	*low = lo;
2089935fe098SMike Snitzer 	*high = hi;
20901383b347SZhao Heming 	md_bitmap_free(bitmap);
2091935fe098SMike Snitzer 
2092935fe098SMike Snitzer 	return rv;
2093935fe098SMike Snitzer }
2094e64e4018SAndy Shevchenko EXPORT_SYMBOL_GPL(md_bitmap_copy_from_slot);
2095935fe098SMike Snitzer 
2096935fe098SMike Snitzer 
2097e64e4018SAndy Shevchenko void md_bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
2098935fe098SMike Snitzer {
2099935fe098SMike Snitzer 	unsigned long chunk_kb;
2100935fe098SMike Snitzer 	struct bitmap_counts *counts;
2101935fe098SMike Snitzer 
2102935fe098SMike Snitzer 	if (!bitmap)
2103935fe098SMike Snitzer 		return;
2104935fe098SMike Snitzer 
2105935fe098SMike Snitzer 	counts = &bitmap->counts;
2106935fe098SMike Snitzer 
2107935fe098SMike Snitzer 	chunk_kb = bitmap->mddev->bitmap_info.chunksize >> 10;
2108935fe098SMike Snitzer 	seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], "
2109935fe098SMike Snitzer 		   "%lu%s chunk",
2110935fe098SMike Snitzer 		   counts->pages - counts->missing_pages,
2111935fe098SMike Snitzer 		   counts->pages,
2112935fe098SMike Snitzer 		   (counts->pages - counts->missing_pages)
2113935fe098SMike Snitzer 		   << (PAGE_SHIFT - 10),
2114935fe098SMike Snitzer 		   chunk_kb ? chunk_kb : bitmap->mddev->bitmap_info.chunksize,
2115935fe098SMike Snitzer 		   chunk_kb ? "KB" : "B");
2116935fe098SMike Snitzer 	if (bitmap->storage.file) {
2117935fe098SMike Snitzer 		seq_printf(seq, ", file: ");
2118935fe098SMike Snitzer 		seq_file_path(seq, bitmap->storage.file, " \t\n");
2119935fe098SMike Snitzer 	}
2120935fe098SMike Snitzer 
2121935fe098SMike Snitzer 	seq_printf(seq, "\n");
2122935fe098SMike Snitzer }
2123935fe098SMike Snitzer 
2124e64e4018SAndy Shevchenko int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
2125935fe098SMike Snitzer 		  int chunksize, int init)
2126935fe098SMike Snitzer {
2127935fe098SMike Snitzer 	/* If chunk_size is 0, choose an appropriate chunk size.
2128935fe098SMike Snitzer 	 * Then possibly allocate new storage space.
2129935fe098SMike Snitzer 	 * Then quiesce, copy bits, replace bitmap, and re-start
2130935fe098SMike Snitzer 	 *
2131935fe098SMike Snitzer 	 * This function is called both to set up the initial bitmap
2132935fe098SMike Snitzer 	 * and to resize the bitmap while the array is active.
2133935fe098SMike Snitzer 	 * If this happens as a result of the array being resized,
2134935fe098SMike Snitzer 	 * chunksize will be zero, and we need to choose a suitable
2135935fe098SMike Snitzer 	 * chunksize, otherwise we use what we are given.
2136935fe098SMike Snitzer 	 */
2137935fe098SMike Snitzer 	struct bitmap_storage store;
2138935fe098SMike Snitzer 	struct bitmap_counts old_counts;
2139935fe098SMike Snitzer 	unsigned long chunks;
2140935fe098SMike Snitzer 	sector_t block;
2141935fe098SMike Snitzer 	sector_t old_blocks, new_blocks;
2142935fe098SMike Snitzer 	int chunkshift;
2143935fe098SMike Snitzer 	int ret = 0;
2144935fe098SMike Snitzer 	long pages;
2145935fe098SMike Snitzer 	struct bitmap_page *new_bp;
2146935fe098SMike Snitzer 
2147935fe098SMike Snitzer 	if (bitmap->storage.file && !init) {
2148935fe098SMike Snitzer 		pr_info("md: cannot resize file-based bitmap\n");
2149935fe098SMike Snitzer 		return -EINVAL;
2150935fe098SMike Snitzer 	}
2151935fe098SMike Snitzer 
2152935fe098SMike Snitzer 	if (chunksize == 0) {
2153935fe098SMike Snitzer 		/* If there is enough space, leave the chunk size unchanged,
2154935fe098SMike Snitzer 		 * else increase by factor of two until there is enough space.
2155935fe098SMike Snitzer 		 */
2156935fe098SMike Snitzer 		long bytes;
2157935fe098SMike Snitzer 		long space = bitmap->mddev->bitmap_info.space;
2158935fe098SMike Snitzer 
2159935fe098SMike Snitzer 		if (space == 0) {
2160935fe098SMike Snitzer 			/* We don't know how much space there is, so limit
2161935fe098SMike Snitzer 			 * to current size - in sectors.
2162935fe098SMike Snitzer 			 */
2163935fe098SMike Snitzer 			bytes = DIV_ROUND_UP(bitmap->counts.chunks, 8);
2164935fe098SMike Snitzer 			if (!bitmap->mddev->bitmap_info.external)
2165935fe098SMike Snitzer 				bytes += sizeof(bitmap_super_t);
2166935fe098SMike Snitzer 			space = DIV_ROUND_UP(bytes, 512);
2167935fe098SMike Snitzer 			bitmap->mddev->bitmap_info.space = space;
2168935fe098SMike Snitzer 		}
2169935fe098SMike Snitzer 		chunkshift = bitmap->counts.chunkshift;
2170935fe098SMike Snitzer 		chunkshift--;
2171935fe098SMike Snitzer 		do {
2172935fe098SMike Snitzer 			/* 'chunkshift' is shift from block size to chunk size */
2173935fe098SMike Snitzer 			chunkshift++;
2174935fe098SMike Snitzer 			chunks = DIV_ROUND_UP_SECTOR_T(blocks, 1 << chunkshift);
2175935fe098SMike Snitzer 			bytes = DIV_ROUND_UP(chunks, 8);
2176935fe098SMike Snitzer 			if (!bitmap->mddev->bitmap_info.external)
2177935fe098SMike Snitzer 				bytes += sizeof(bitmap_super_t);
217845552111SFlorian-Ewald Mueller 		} while (bytes > (space << 9) && (chunkshift + BITMAP_BLOCK_SHIFT) <
217945552111SFlorian-Ewald Mueller 			(BITS_PER_BYTE * sizeof(((bitmap_super_t *)0)->chunksize) - 1));
2180935fe098SMike Snitzer 	} else
2181935fe098SMike Snitzer 		chunkshift = ffz(~chunksize) - BITMAP_BLOCK_SHIFT;
2182935fe098SMike Snitzer 
2183935fe098SMike Snitzer 	chunks = DIV_ROUND_UP_SECTOR_T(blocks, 1 << chunkshift);
2184935fe098SMike Snitzer 	memset(&store, 0, sizeof(store));
2185935fe098SMike Snitzer 	if (bitmap->mddev->bitmap_info.offset || bitmap->mddev->bitmap_info.file)
2186e64e4018SAndy Shevchenko 		ret = md_bitmap_storage_alloc(&store, chunks,
2187935fe098SMike Snitzer 					      !bitmap->mddev->bitmap_info.external,
2188935fe098SMike Snitzer 					      mddev_is_clustered(bitmap->mddev)
2189935fe098SMike Snitzer 					      ? bitmap->cluster_slot : 0);
2190935fe098SMike Snitzer 	if (ret) {
2191e64e4018SAndy Shevchenko 		md_bitmap_file_unmap(&store);
2192935fe098SMike Snitzer 		goto err;
2193935fe098SMike Snitzer 	}
2194935fe098SMike Snitzer 
2195935fe098SMike Snitzer 	pages = DIV_ROUND_UP(chunks, PAGE_COUNTER_RATIO);
2196935fe098SMike Snitzer 
21976396bb22SKees Cook 	new_bp = kcalloc(pages, sizeof(*new_bp), GFP_KERNEL);
2198935fe098SMike Snitzer 	ret = -ENOMEM;
2199935fe098SMike Snitzer 	if (!new_bp) {
2200e64e4018SAndy Shevchenko 		md_bitmap_file_unmap(&store);
2201935fe098SMike Snitzer 		goto err;
2202935fe098SMike Snitzer 	}
2203935fe098SMike Snitzer 
2204935fe098SMike Snitzer 	if (!init)
2205935fe098SMike Snitzer 		bitmap->mddev->pers->quiesce(bitmap->mddev, 1);
2206935fe098SMike Snitzer 
2207935fe098SMike Snitzer 	store.file = bitmap->storage.file;
2208935fe098SMike Snitzer 	bitmap->storage.file = NULL;
2209935fe098SMike Snitzer 
2210935fe098SMike Snitzer 	if (store.sb_page && bitmap->storage.sb_page)
2211935fe098SMike Snitzer 		memcpy(page_address(store.sb_page),
2212935fe098SMike Snitzer 		       page_address(bitmap->storage.sb_page),
2213935fe098SMike Snitzer 		       sizeof(bitmap_super_t));
2214fadcbd29SGuoqing Jiang 	spin_lock_irq(&bitmap->counts.lock);
2215e64e4018SAndy Shevchenko 	md_bitmap_file_unmap(&bitmap->storage);
2216935fe098SMike Snitzer 	bitmap->storage = store;
2217935fe098SMike Snitzer 
2218935fe098SMike Snitzer 	old_counts = bitmap->counts;
2219935fe098SMike Snitzer 	bitmap->counts.bp = new_bp;
2220935fe098SMike Snitzer 	bitmap->counts.pages = pages;
2221935fe098SMike Snitzer 	bitmap->counts.missing_pages = pages;
2222935fe098SMike Snitzer 	bitmap->counts.chunkshift = chunkshift;
2223935fe098SMike Snitzer 	bitmap->counts.chunks = chunks;
222445552111SFlorian-Ewald Mueller 	bitmap->mddev->bitmap_info.chunksize = 1UL << (chunkshift +
2225935fe098SMike Snitzer 						     BITMAP_BLOCK_SHIFT);
2226935fe098SMike Snitzer 
2227935fe098SMike Snitzer 	blocks = min(old_counts.chunks << old_counts.chunkshift,
2228935fe098SMike Snitzer 		     chunks << chunkshift);
2229935fe098SMike Snitzer 
2230935fe098SMike Snitzer 	/* For cluster raid, need to pre-allocate bitmap */
2231935fe098SMike Snitzer 	if (mddev_is_clustered(bitmap->mddev)) {
2232935fe098SMike Snitzer 		unsigned long page;
2233935fe098SMike Snitzer 		for (page = 0; page < pages; page++) {
2234e64e4018SAndy Shevchenko 			ret = md_bitmap_checkpage(&bitmap->counts, page, 1, 1);
2235935fe098SMike Snitzer 			if (ret) {
2236935fe098SMike Snitzer 				unsigned long k;
2237935fe098SMike Snitzer 
2238935fe098SMike Snitzer 				/* deallocate the page memory */
2239935fe098SMike Snitzer 				for (k = 0; k < page; k++) {
2240935fe098SMike Snitzer 					kfree(new_bp[k].map);
2241935fe098SMike Snitzer 				}
22420868b99cSZdenek Kabelac 				kfree(new_bp);
2243935fe098SMike Snitzer 
2244935fe098SMike Snitzer 				/* restore some fields from old_counts */
2245935fe098SMike Snitzer 				bitmap->counts.bp = old_counts.bp;
2246935fe098SMike Snitzer 				bitmap->counts.pages = old_counts.pages;
2247935fe098SMike Snitzer 				bitmap->counts.missing_pages = old_counts.pages;
2248935fe098SMike Snitzer 				bitmap->counts.chunkshift = old_counts.chunkshift;
2249935fe098SMike Snitzer 				bitmap->counts.chunks = old_counts.chunks;
225045552111SFlorian-Ewald Mueller 				bitmap->mddev->bitmap_info.chunksize =
225145552111SFlorian-Ewald Mueller 					1UL << (old_counts.chunkshift + BITMAP_BLOCK_SHIFT);
2252935fe098SMike Snitzer 				blocks = old_counts.chunks << old_counts.chunkshift;
2253935fe098SMike Snitzer 				pr_warn("Could not pre-allocate in-memory bitmap for cluster raid\n");
2254935fe098SMike Snitzer 				break;
2255935fe098SMike Snitzer 			} else
2256935fe098SMike Snitzer 				bitmap->counts.bp[page].count += 1;
2257935fe098SMike Snitzer 		}
2258935fe098SMike Snitzer 	}
2259935fe098SMike Snitzer 
2260935fe098SMike Snitzer 	for (block = 0; block < blocks; ) {
2261935fe098SMike Snitzer 		bitmap_counter_t *bmc_old, *bmc_new;
2262935fe098SMike Snitzer 		int set;
2263935fe098SMike Snitzer 
2264e64e4018SAndy Shevchenko 		bmc_old = md_bitmap_get_counter(&old_counts, block, &old_blocks, 0);
2265935fe098SMike Snitzer 		set = bmc_old && NEEDED(*bmc_old);
2266935fe098SMike Snitzer 
2267935fe098SMike Snitzer 		if (set) {
2268e64e4018SAndy Shevchenko 			bmc_new = md_bitmap_get_counter(&bitmap->counts, block, &new_blocks, 1);
22693bd548e5SLi Zhong 			if (bmc_new) {
2270935fe098SMike Snitzer 				if (*bmc_new == 0) {
2271935fe098SMike Snitzer 					/* need to set on-disk bits too. */
2272935fe098SMike Snitzer 					sector_t end = block + new_blocks;
2273935fe098SMike Snitzer 					sector_t start = block >> chunkshift;
22743bd548e5SLi Zhong 
2275935fe098SMike Snitzer 					start <<= chunkshift;
2276935fe098SMike Snitzer 					while (start < end) {
2277e64e4018SAndy Shevchenko 						md_bitmap_file_set_bit(bitmap, block);
2278935fe098SMike Snitzer 						start += 1 << chunkshift;
2279935fe098SMike Snitzer 					}
2280935fe098SMike Snitzer 					*bmc_new = 2;
2281e64e4018SAndy Shevchenko 					md_bitmap_count_page(&bitmap->counts, block, 1);
2282e64e4018SAndy Shevchenko 					md_bitmap_set_pending(&bitmap->counts, block);
2283935fe098SMike Snitzer 				}
2284935fe098SMike Snitzer 				*bmc_new |= NEEDED_MASK;
22853bd548e5SLi Zhong 			}
2286935fe098SMike Snitzer 			if (new_blocks < old_blocks)
2287935fe098SMike Snitzer 				old_blocks = new_blocks;
2288935fe098SMike Snitzer 		}
2289935fe098SMike Snitzer 		block += old_blocks;
2290935fe098SMike Snitzer 	}
2291935fe098SMike Snitzer 
22920868b99cSZdenek Kabelac 	if (bitmap->counts.bp != old_counts.bp) {
22930868b99cSZdenek Kabelac 		unsigned long k;
22940868b99cSZdenek Kabelac 		for (k = 0; k < old_counts.pages; k++)
22950868b99cSZdenek Kabelac 			if (!old_counts.bp[k].hijacked)
22960868b99cSZdenek Kabelac 				kfree(old_counts.bp[k].map);
22970868b99cSZdenek Kabelac 		kfree(old_counts.bp);
22980868b99cSZdenek Kabelac 	}
22990868b99cSZdenek Kabelac 
2300935fe098SMike Snitzer 	if (!init) {
2301935fe098SMike Snitzer 		int i;
2302935fe098SMike Snitzer 		while (block < (chunks << chunkshift)) {
2303935fe098SMike Snitzer 			bitmap_counter_t *bmc;
2304e64e4018SAndy Shevchenko 			bmc = md_bitmap_get_counter(&bitmap->counts, block, &new_blocks, 1);
2305935fe098SMike Snitzer 			if (bmc) {
2306935fe098SMike Snitzer 				/* new space.  It needs to be resynced, so
2307935fe098SMike Snitzer 				 * we set NEEDED_MASK.
2308935fe098SMike Snitzer 				 */
2309935fe098SMike Snitzer 				if (*bmc == 0) {
2310935fe098SMike Snitzer 					*bmc = NEEDED_MASK | 2;
2311e64e4018SAndy Shevchenko 					md_bitmap_count_page(&bitmap->counts, block, 1);
2312e64e4018SAndy Shevchenko 					md_bitmap_set_pending(&bitmap->counts, block);
2313935fe098SMike Snitzer 				}
2314935fe098SMike Snitzer 			}
2315935fe098SMike Snitzer 			block += new_blocks;
2316935fe098SMike Snitzer 		}
2317935fe098SMike Snitzer 		for (i = 0; i < bitmap->storage.file_pages; i++)
2318935fe098SMike Snitzer 			set_page_attr(bitmap, i, BITMAP_PAGE_DIRTY);
2319935fe098SMike Snitzer 	}
2320935fe098SMike Snitzer 	spin_unlock_irq(&bitmap->counts.lock);
2321935fe098SMike Snitzer 
2322935fe098SMike Snitzer 	if (!init) {
2323e64e4018SAndy Shevchenko 		md_bitmap_unplug(bitmap);
2324935fe098SMike Snitzer 		bitmap->mddev->pers->quiesce(bitmap->mddev, 0);
2325935fe098SMike Snitzer 	}
2326935fe098SMike Snitzer 	ret = 0;
2327935fe098SMike Snitzer err:
2328935fe098SMike Snitzer 	return ret;
2329935fe098SMike Snitzer }
2330e64e4018SAndy Shevchenko EXPORT_SYMBOL_GPL(md_bitmap_resize);
2331935fe098SMike Snitzer 
2332935fe098SMike Snitzer static ssize_t
2333935fe098SMike Snitzer location_show(struct mddev *mddev, char *page)
2334935fe098SMike Snitzer {
2335935fe098SMike Snitzer 	ssize_t len;
2336935fe098SMike Snitzer 	if (mddev->bitmap_info.file)
2337935fe098SMike Snitzer 		len = sprintf(page, "file");
2338935fe098SMike Snitzer 	else if (mddev->bitmap_info.offset)
2339935fe098SMike Snitzer 		len = sprintf(page, "%+lld", (long long)mddev->bitmap_info.offset);
2340935fe098SMike Snitzer 	else
2341935fe098SMike Snitzer 		len = sprintf(page, "none");
2342935fe098SMike Snitzer 	len += sprintf(page+len, "\n");
2343935fe098SMike Snitzer 	return len;
2344935fe098SMike Snitzer }
2345935fe098SMike Snitzer 
2346935fe098SMike Snitzer static ssize_t
2347935fe098SMike Snitzer location_store(struct mddev *mddev, const char *buf, size_t len)
2348935fe098SMike Snitzer {
2349935fe098SMike Snitzer 	int rv;
2350935fe098SMike Snitzer 
2351935fe098SMike Snitzer 	rv = mddev_lock(mddev);
2352935fe098SMike Snitzer 	if (rv)
2353935fe098SMike Snitzer 		return rv;
2354935fe098SMike Snitzer 	if (mddev->pers) {
2355935fe098SMike Snitzer 		if (!mddev->pers->quiesce) {
2356935fe098SMike Snitzer 			rv = -EBUSY;
2357935fe098SMike Snitzer 			goto out;
2358935fe098SMike Snitzer 		}
2359935fe098SMike Snitzer 		if (mddev->recovery || mddev->sync_thread) {
2360935fe098SMike Snitzer 			rv = -EBUSY;
2361935fe098SMike Snitzer 			goto out;
2362935fe098SMike Snitzer 		}
2363935fe098SMike Snitzer 	}
2364935fe098SMike Snitzer 
2365935fe098SMike Snitzer 	if (mddev->bitmap || mddev->bitmap_info.file ||
2366935fe098SMike Snitzer 	    mddev->bitmap_info.offset) {
2367935fe098SMike Snitzer 		/* bitmap already configured.  Only option is to clear it */
2368935fe098SMike Snitzer 		if (strncmp(buf, "none", 4) != 0) {
2369935fe098SMike Snitzer 			rv = -EBUSY;
2370935fe098SMike Snitzer 			goto out;
2371935fe098SMike Snitzer 		}
2372935fe098SMike Snitzer 		if (mddev->pers) {
2373f8f83d8fSJack Wang 			mddev_suspend(mddev);
2374e64e4018SAndy Shevchenko 			md_bitmap_destroy(mddev);
2375f8f83d8fSJack Wang 			mddev_resume(mddev);
2376935fe098SMike Snitzer 		}
2377935fe098SMike Snitzer 		mddev->bitmap_info.offset = 0;
2378935fe098SMike Snitzer 		if (mddev->bitmap_info.file) {
2379935fe098SMike Snitzer 			struct file *f = mddev->bitmap_info.file;
2380935fe098SMike Snitzer 			mddev->bitmap_info.file = NULL;
2381935fe098SMike Snitzer 			fput(f);
2382935fe098SMike Snitzer 		}
2383935fe098SMike Snitzer 	} else {
2384935fe098SMike Snitzer 		/* No bitmap, OK to set a location */
2385935fe098SMike Snitzer 		long long offset;
2386935fe098SMike Snitzer 		if (strncmp(buf, "none", 4) == 0)
2387935fe098SMike Snitzer 			/* nothing to be done */;
2388935fe098SMike Snitzer 		else if (strncmp(buf, "file:", 5) == 0) {
2389935fe098SMike Snitzer 			/* Not supported yet */
2390935fe098SMike Snitzer 			rv = -EINVAL;
2391935fe098SMike Snitzer 			goto out;
2392935fe098SMike Snitzer 		} else {
2393935fe098SMike Snitzer 			if (buf[0] == '+')
2394935fe098SMike Snitzer 				rv = kstrtoll(buf+1, 10, &offset);
2395935fe098SMike Snitzer 			else
2396935fe098SMike Snitzer 				rv = kstrtoll(buf, 10, &offset);
2397935fe098SMike Snitzer 			if (rv)
2398935fe098SMike Snitzer 				goto out;
2399935fe098SMike Snitzer 			if (offset == 0) {
2400935fe098SMike Snitzer 				rv = -EINVAL;
2401935fe098SMike Snitzer 				goto out;
2402935fe098SMike Snitzer 			}
2403935fe098SMike Snitzer 			if (mddev->bitmap_info.external == 0 &&
2404935fe098SMike Snitzer 			    mddev->major_version == 0 &&
2405935fe098SMike Snitzer 			    offset != mddev->bitmap_info.default_offset) {
2406935fe098SMike Snitzer 				rv = -EINVAL;
2407935fe098SMike Snitzer 				goto out;
2408935fe098SMike Snitzer 			}
2409935fe098SMike Snitzer 			mddev->bitmap_info.offset = offset;
2410935fe098SMike Snitzer 			if (mddev->pers) {
2411935fe098SMike Snitzer 				struct bitmap *bitmap;
2412e64e4018SAndy Shevchenko 				bitmap = md_bitmap_create(mddev, -1);
2413f8f83d8fSJack Wang 				mddev_suspend(mddev);
2414935fe098SMike Snitzer 				if (IS_ERR(bitmap))
2415935fe098SMike Snitzer 					rv = PTR_ERR(bitmap);
2416935fe098SMike Snitzer 				else {
2417935fe098SMike Snitzer 					mddev->bitmap = bitmap;
2418e64e4018SAndy Shevchenko 					rv = md_bitmap_load(mddev);
2419935fe098SMike Snitzer 					if (rv)
2420935fe098SMike Snitzer 						mddev->bitmap_info.offset = 0;
2421935fe098SMike Snitzer 				}
2422935fe098SMike Snitzer 				if (rv) {
2423e64e4018SAndy Shevchenko 					md_bitmap_destroy(mddev);
2424f8f83d8fSJack Wang 					mddev_resume(mddev);
2425935fe098SMike Snitzer 					goto out;
2426935fe098SMike Snitzer 				}
2427f8f83d8fSJack Wang 				mddev_resume(mddev);
2428935fe098SMike Snitzer 			}
2429935fe098SMike Snitzer 		}
2430935fe098SMike Snitzer 	}
2431935fe098SMike Snitzer 	if (!mddev->external) {
2432935fe098SMike Snitzer 		/* Ensure new bitmap info is stored in
2433935fe098SMike Snitzer 		 * metadata promptly.
2434935fe098SMike Snitzer 		 */
2435935fe098SMike Snitzer 		set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2436935fe098SMike Snitzer 		md_wakeup_thread(mddev->thread);
2437935fe098SMike Snitzer 	}
2438935fe098SMike Snitzer 	rv = 0;
2439935fe098SMike Snitzer out:
2440935fe098SMike Snitzer 	mddev_unlock(mddev);
2441935fe098SMike Snitzer 	if (rv)
2442935fe098SMike Snitzer 		return rv;
2443935fe098SMike Snitzer 	return len;
2444935fe098SMike Snitzer }
2445935fe098SMike Snitzer 
2446935fe098SMike Snitzer static struct md_sysfs_entry bitmap_location =
2447935fe098SMike Snitzer __ATTR(location, S_IRUGO|S_IWUSR, location_show, location_store);
2448935fe098SMike Snitzer 
2449935fe098SMike Snitzer /* 'bitmap/space' is the space available at 'location' for the
2450935fe098SMike Snitzer  * bitmap.  This allows the kernel to know when it is safe to
2451935fe098SMike Snitzer  * resize the bitmap to match a resized array.
2452935fe098SMike Snitzer  */
2453935fe098SMike Snitzer static ssize_t
2454935fe098SMike Snitzer space_show(struct mddev *mddev, char *page)
2455935fe098SMike Snitzer {
2456935fe098SMike Snitzer 	return sprintf(page, "%lu\n", mddev->bitmap_info.space);
2457935fe098SMike Snitzer }
2458935fe098SMike Snitzer 
2459935fe098SMike Snitzer static ssize_t
2460935fe098SMike Snitzer space_store(struct mddev *mddev, const char *buf, size_t len)
2461935fe098SMike Snitzer {
2462935fe098SMike Snitzer 	unsigned long sectors;
2463935fe098SMike Snitzer 	int rv;
2464935fe098SMike Snitzer 
2465935fe098SMike Snitzer 	rv = kstrtoul(buf, 10, &sectors);
2466935fe098SMike Snitzer 	if (rv)
2467935fe098SMike Snitzer 		return rv;
2468935fe098SMike Snitzer 
2469935fe098SMike Snitzer 	if (sectors == 0)
2470935fe098SMike Snitzer 		return -EINVAL;
2471935fe098SMike Snitzer 
2472935fe098SMike Snitzer 	if (mddev->bitmap &&
2473935fe098SMike Snitzer 	    sectors < (mddev->bitmap->storage.bytes + 511) >> 9)
2474935fe098SMike Snitzer 		return -EFBIG; /* Bitmap is too big for this small space */
2475935fe098SMike Snitzer 
2476935fe098SMike Snitzer 	/* could make sure it isn't too big, but that isn't really
2477935fe098SMike Snitzer 	 * needed - user-space should be careful.
2478935fe098SMike Snitzer 	 */
2479935fe098SMike Snitzer 	mddev->bitmap_info.space = sectors;
2480935fe098SMike Snitzer 	return len;
2481935fe098SMike Snitzer }
2482935fe098SMike Snitzer 
2483935fe098SMike Snitzer static struct md_sysfs_entry bitmap_space =
2484935fe098SMike Snitzer __ATTR(space, S_IRUGO|S_IWUSR, space_show, space_store);
2485935fe098SMike Snitzer 
2486935fe098SMike Snitzer static ssize_t
2487935fe098SMike Snitzer timeout_show(struct mddev *mddev, char *page)
2488935fe098SMike Snitzer {
2489935fe098SMike Snitzer 	ssize_t len;
2490935fe098SMike Snitzer 	unsigned long secs = mddev->bitmap_info.daemon_sleep / HZ;
2491935fe098SMike Snitzer 	unsigned long jifs = mddev->bitmap_info.daemon_sleep % HZ;
2492935fe098SMike Snitzer 
2493935fe098SMike Snitzer 	len = sprintf(page, "%lu", secs);
2494935fe098SMike Snitzer 	if (jifs)
2495935fe098SMike Snitzer 		len += sprintf(page+len, ".%03u", jiffies_to_msecs(jifs));
2496935fe098SMike Snitzer 	len += sprintf(page+len, "\n");
2497935fe098SMike Snitzer 	return len;
2498935fe098SMike Snitzer }
2499935fe098SMike Snitzer 
2500935fe098SMike Snitzer static ssize_t
2501935fe098SMike Snitzer timeout_store(struct mddev *mddev, const char *buf, size_t len)
2502935fe098SMike Snitzer {
2503935fe098SMike Snitzer 	/* timeout can be set at any time */
2504935fe098SMike Snitzer 	unsigned long timeout;
2505935fe098SMike Snitzer 	int rv = strict_strtoul_scaled(buf, &timeout, 4);
2506935fe098SMike Snitzer 	if (rv)
2507935fe098SMike Snitzer 		return rv;
2508935fe098SMike Snitzer 
2509935fe098SMike Snitzer 	/* just to make sure we don't overflow... */
2510935fe098SMike Snitzer 	if (timeout >= LONG_MAX / HZ)
2511935fe098SMike Snitzer 		return -EINVAL;
2512935fe098SMike Snitzer 
2513935fe098SMike Snitzer 	timeout = timeout * HZ / 10000;
2514935fe098SMike Snitzer 
2515935fe098SMike Snitzer 	if (timeout >= MAX_SCHEDULE_TIMEOUT)
2516935fe098SMike Snitzer 		timeout = MAX_SCHEDULE_TIMEOUT-1;
2517935fe098SMike Snitzer 	if (timeout < 1)
2518935fe098SMike Snitzer 		timeout = 1;
2519c333673aSYu Kuai 
25204eeb6535SYu Kuai 	mddev->bitmap_info.daemon_sleep = timeout;
25214eeb6535SYu Kuai 	mddev_set_timeout(mddev, timeout, false);
2522935fe098SMike Snitzer 	md_wakeup_thread(mddev->thread);
25234eeb6535SYu Kuai 
2524935fe098SMike Snitzer 	return len;
2525935fe098SMike Snitzer }
2526935fe098SMike Snitzer 
2527935fe098SMike Snitzer static struct md_sysfs_entry bitmap_timeout =
2528935fe098SMike Snitzer __ATTR(time_base, S_IRUGO|S_IWUSR, timeout_show, timeout_store);
2529935fe098SMike Snitzer 
2530935fe098SMike Snitzer static ssize_t
2531935fe098SMike Snitzer backlog_show(struct mddev *mddev, char *page)
2532935fe098SMike Snitzer {
2533935fe098SMike Snitzer 	return sprintf(page, "%lu\n", mddev->bitmap_info.max_write_behind);
2534935fe098SMike Snitzer }
2535935fe098SMike Snitzer 
2536935fe098SMike Snitzer static ssize_t
2537935fe098SMike Snitzer backlog_store(struct mddev *mddev, const char *buf, size_t len)
2538935fe098SMike Snitzer {
2539935fe098SMike Snitzer 	unsigned long backlog;
254010c92fcaSGuoqing Jiang 	unsigned long old_mwb = mddev->bitmap_info.max_write_behind;
25418c13ab11SGuoqing Jiang 	struct md_rdev *rdev;
25428c13ab11SGuoqing Jiang 	bool has_write_mostly = false;
2543935fe098SMike Snitzer 	int rv = kstrtoul(buf, 10, &backlog);
2544935fe098SMike Snitzer 	if (rv)
2545935fe098SMike Snitzer 		return rv;
2546935fe098SMike Snitzer 	if (backlog > COUNTER_MAX)
2547935fe098SMike Snitzer 		return -EINVAL;
25488c13ab11SGuoqing Jiang 
2549*44abfa6aSYu Kuai 	rv = mddev_lock(mddev);
2550*44abfa6aSYu Kuai 	if (rv)
2551*44abfa6aSYu Kuai 		return rv;
2552*44abfa6aSYu Kuai 
25538c13ab11SGuoqing Jiang 	/*
25548c13ab11SGuoqing Jiang 	 * Without write mostly device, it doesn't make sense to set
25558c13ab11SGuoqing Jiang 	 * backlog for max_write_behind.
25568c13ab11SGuoqing Jiang 	 */
25578c13ab11SGuoqing Jiang 	rdev_for_each(rdev, mddev) {
25588c13ab11SGuoqing Jiang 		if (test_bit(WriteMostly, &rdev->flags)) {
25598c13ab11SGuoqing Jiang 			has_write_mostly = true;
25608c13ab11SGuoqing Jiang 			break;
25618c13ab11SGuoqing Jiang 		}
25628c13ab11SGuoqing Jiang 	}
25638c13ab11SGuoqing Jiang 	if (!has_write_mostly) {
25648c13ab11SGuoqing Jiang 		pr_warn_ratelimited("%s: can't set backlog, no write mostly device available\n",
25658c13ab11SGuoqing Jiang 				    mdname(mddev));
2566*44abfa6aSYu Kuai 		mddev_unlock(mddev);
25678c13ab11SGuoqing Jiang 		return -EINVAL;
25688c13ab11SGuoqing Jiang 	}
25698c13ab11SGuoqing Jiang 
2570935fe098SMike Snitzer 	mddev->bitmap_info.max_write_behind = backlog;
2571404659cfSGuoqing Jiang 	if (!backlog && mddev->serial_info_pool) {
2572404659cfSGuoqing Jiang 		/* serial_info_pool is not needed if backlog is zero */
257369b00b5bSGuoqing Jiang 		if (!mddev->serialize_policy)
257469b00b5bSGuoqing Jiang 			mddev_destroy_serial_pool(mddev, NULL, false);
2575404659cfSGuoqing Jiang 	} else if (backlog && !mddev->serial_info_pool) {
2576404659cfSGuoqing Jiang 		/* serial_info_pool is needed since backlog is not zero */
257710c92fcaSGuoqing Jiang 		rdev_for_each(rdev, mddev)
2578404659cfSGuoqing Jiang 			mddev_create_serial_pool(mddev, rdev, false);
257910c92fcaSGuoqing Jiang 	}
258010c92fcaSGuoqing Jiang 	if (old_mwb != backlog)
258110c92fcaSGuoqing Jiang 		md_bitmap_update_sb(mddev->bitmap);
2582*44abfa6aSYu Kuai 
2583*44abfa6aSYu Kuai 	mddev_unlock(mddev);
2584935fe098SMike Snitzer 	return len;
2585935fe098SMike Snitzer }
2586935fe098SMike Snitzer 
2587935fe098SMike Snitzer static struct md_sysfs_entry bitmap_backlog =
2588935fe098SMike Snitzer __ATTR(backlog, S_IRUGO|S_IWUSR, backlog_show, backlog_store);
2589935fe098SMike Snitzer 
2590935fe098SMike Snitzer static ssize_t
2591935fe098SMike Snitzer chunksize_show(struct mddev *mddev, char *page)
2592935fe098SMike Snitzer {
2593935fe098SMike Snitzer 	return sprintf(page, "%lu\n", mddev->bitmap_info.chunksize);
2594935fe098SMike Snitzer }
2595935fe098SMike Snitzer 
2596935fe098SMike Snitzer static ssize_t
2597935fe098SMike Snitzer chunksize_store(struct mddev *mddev, const char *buf, size_t len)
2598935fe098SMike Snitzer {
2599935fe098SMike Snitzer 	/* Can only be changed when no bitmap is active */
2600935fe098SMike Snitzer 	int rv;
2601935fe098SMike Snitzer 	unsigned long csize;
2602935fe098SMike Snitzer 	if (mddev->bitmap)
2603935fe098SMike Snitzer 		return -EBUSY;
2604935fe098SMike Snitzer 	rv = kstrtoul(buf, 10, &csize);
2605935fe098SMike Snitzer 	if (rv)
2606935fe098SMike Snitzer 		return rv;
2607935fe098SMike Snitzer 	if (csize < 512 ||
2608935fe098SMike Snitzer 	    !is_power_of_2(csize))
2609935fe098SMike Snitzer 		return -EINVAL;
261045552111SFlorian-Ewald Mueller 	if (BITS_PER_LONG > 32 && csize >= (1ULL << (BITS_PER_BYTE *
261145552111SFlorian-Ewald Mueller 		sizeof(((bitmap_super_t *)0)->chunksize))))
261245552111SFlorian-Ewald Mueller 		return -EOVERFLOW;
2613935fe098SMike Snitzer 	mddev->bitmap_info.chunksize = csize;
2614935fe098SMike Snitzer 	return len;
2615935fe098SMike Snitzer }
2616935fe098SMike Snitzer 
2617935fe098SMike Snitzer static struct md_sysfs_entry bitmap_chunksize =
2618935fe098SMike Snitzer __ATTR(chunksize, S_IRUGO|S_IWUSR, chunksize_show, chunksize_store);
2619935fe098SMike Snitzer 
2620935fe098SMike Snitzer static ssize_t metadata_show(struct mddev *mddev, char *page)
2621935fe098SMike Snitzer {
2622935fe098SMike Snitzer 	if (mddev_is_clustered(mddev))
2623935fe098SMike Snitzer 		return sprintf(page, "clustered\n");
2624935fe098SMike Snitzer 	return sprintf(page, "%s\n", (mddev->bitmap_info.external
2625935fe098SMike Snitzer 				      ? "external" : "internal"));
2626935fe098SMike Snitzer }
2627935fe098SMike Snitzer 
2628935fe098SMike Snitzer static ssize_t metadata_store(struct mddev *mddev, const char *buf, size_t len)
2629935fe098SMike Snitzer {
2630935fe098SMike Snitzer 	if (mddev->bitmap ||
2631935fe098SMike Snitzer 	    mddev->bitmap_info.file ||
2632935fe098SMike Snitzer 	    mddev->bitmap_info.offset)
2633935fe098SMike Snitzer 		return -EBUSY;
2634935fe098SMike Snitzer 	if (strncmp(buf, "external", 8) == 0)
2635935fe098SMike Snitzer 		mddev->bitmap_info.external = 1;
2636935fe098SMike Snitzer 	else if ((strncmp(buf, "internal", 8) == 0) ||
2637935fe098SMike Snitzer 			(strncmp(buf, "clustered", 9) == 0))
2638935fe098SMike Snitzer 		mddev->bitmap_info.external = 0;
2639935fe098SMike Snitzer 	else
2640935fe098SMike Snitzer 		return -EINVAL;
2641935fe098SMike Snitzer 	return len;
2642935fe098SMike Snitzer }
2643935fe098SMike Snitzer 
2644935fe098SMike Snitzer static struct md_sysfs_entry bitmap_metadata =
2645935fe098SMike Snitzer __ATTR(metadata, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
2646935fe098SMike Snitzer 
2647935fe098SMike Snitzer static ssize_t can_clear_show(struct mddev *mddev, char *page)
2648935fe098SMike Snitzer {
2649935fe098SMike Snitzer 	int len;
2650935fe098SMike Snitzer 	spin_lock(&mddev->lock);
2651935fe098SMike Snitzer 	if (mddev->bitmap)
2652935fe098SMike Snitzer 		len = sprintf(page, "%s\n", (mddev->bitmap->need_sync ?
2653935fe098SMike Snitzer 					     "false" : "true"));
2654935fe098SMike Snitzer 	else
2655935fe098SMike Snitzer 		len = sprintf(page, "\n");
2656935fe098SMike Snitzer 	spin_unlock(&mddev->lock);
2657935fe098SMike Snitzer 	return len;
2658935fe098SMike Snitzer }
2659935fe098SMike Snitzer 
2660935fe098SMike Snitzer static ssize_t can_clear_store(struct mddev *mddev, const char *buf, size_t len)
2661935fe098SMike Snitzer {
2662935fe098SMike Snitzer 	if (mddev->bitmap == NULL)
2663935fe098SMike Snitzer 		return -ENOENT;
2664935fe098SMike Snitzer 	if (strncmp(buf, "false", 5) == 0)
2665935fe098SMike Snitzer 		mddev->bitmap->need_sync = 1;
2666935fe098SMike Snitzer 	else if (strncmp(buf, "true", 4) == 0) {
2667935fe098SMike Snitzer 		if (mddev->degraded)
2668935fe098SMike Snitzer 			return -EBUSY;
2669935fe098SMike Snitzer 		mddev->bitmap->need_sync = 0;
2670935fe098SMike Snitzer 	} else
2671935fe098SMike Snitzer 		return -EINVAL;
2672935fe098SMike Snitzer 	return len;
2673935fe098SMike Snitzer }
2674935fe098SMike Snitzer 
2675935fe098SMike Snitzer static struct md_sysfs_entry bitmap_can_clear =
2676935fe098SMike Snitzer __ATTR(can_clear, S_IRUGO|S_IWUSR, can_clear_show, can_clear_store);
2677935fe098SMike Snitzer 
2678935fe098SMike Snitzer static ssize_t
2679935fe098SMike Snitzer behind_writes_used_show(struct mddev *mddev, char *page)
2680935fe098SMike Snitzer {
2681935fe098SMike Snitzer 	ssize_t ret;
2682935fe098SMike Snitzer 	spin_lock(&mddev->lock);
2683935fe098SMike Snitzer 	if (mddev->bitmap == NULL)
2684935fe098SMike Snitzer 		ret = sprintf(page, "0\n");
2685935fe098SMike Snitzer 	else
2686935fe098SMike Snitzer 		ret = sprintf(page, "%lu\n",
2687935fe098SMike Snitzer 			      mddev->bitmap->behind_writes_used);
2688935fe098SMike Snitzer 	spin_unlock(&mddev->lock);
2689935fe098SMike Snitzer 	return ret;
2690935fe098SMike Snitzer }
2691935fe098SMike Snitzer 
2692935fe098SMike Snitzer static ssize_t
2693935fe098SMike Snitzer behind_writes_used_reset(struct mddev *mddev, const char *buf, size_t len)
2694935fe098SMike Snitzer {
2695935fe098SMike Snitzer 	if (mddev->bitmap)
2696935fe098SMike Snitzer 		mddev->bitmap->behind_writes_used = 0;
2697935fe098SMike Snitzer 	return len;
2698935fe098SMike Snitzer }
2699935fe098SMike Snitzer 
2700935fe098SMike Snitzer static struct md_sysfs_entry max_backlog_used =
2701935fe098SMike Snitzer __ATTR(max_backlog_used, S_IRUGO | S_IWUSR,
2702935fe098SMike Snitzer        behind_writes_used_show, behind_writes_used_reset);
2703935fe098SMike Snitzer 
2704935fe098SMike Snitzer static struct attribute *md_bitmap_attrs[] = {
2705935fe098SMike Snitzer 	&bitmap_location.attr,
2706935fe098SMike Snitzer 	&bitmap_space.attr,
2707935fe098SMike Snitzer 	&bitmap_timeout.attr,
2708935fe098SMike Snitzer 	&bitmap_backlog.attr,
2709935fe098SMike Snitzer 	&bitmap_chunksize.attr,
2710935fe098SMike Snitzer 	&bitmap_metadata.attr,
2711935fe098SMike Snitzer 	&bitmap_can_clear.attr,
2712935fe098SMike Snitzer 	&max_backlog_used.attr,
2713935fe098SMike Snitzer 	NULL
2714935fe098SMike Snitzer };
2715c32dc040SRikard Falkeborn const struct attribute_group md_bitmap_group = {
2716935fe098SMike Snitzer 	.name = "bitmap",
2717935fe098SMike Snitzer 	.attrs = md_bitmap_attrs,
2718935fe098SMike Snitzer };
2719