xref: /openbmc/linux/drivers/block/drbd/drbd_worker.c (revision 20ee6390)
1b411b363SPhilipp Reisner /*
2b411b363SPhilipp Reisner    drbd_worker.c
3b411b363SPhilipp Reisner 
4b411b363SPhilipp Reisner    This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5b411b363SPhilipp Reisner 
6b411b363SPhilipp Reisner    Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7b411b363SPhilipp Reisner    Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8b411b363SPhilipp Reisner    Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9b411b363SPhilipp Reisner 
10b411b363SPhilipp Reisner    drbd is free software; you can redistribute it and/or modify
11b411b363SPhilipp Reisner    it under the terms of the GNU General Public License as published by
12b411b363SPhilipp Reisner    the Free Software Foundation; either version 2, or (at your option)
13b411b363SPhilipp Reisner    any later version.
14b411b363SPhilipp Reisner 
15b411b363SPhilipp Reisner    drbd is distributed in the hope that it will be useful,
16b411b363SPhilipp Reisner    but WITHOUT ANY WARRANTY; without even the implied warranty of
17b411b363SPhilipp Reisner    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18b411b363SPhilipp Reisner    GNU General Public License for more details.
19b411b363SPhilipp Reisner 
20b411b363SPhilipp Reisner    You should have received a copy of the GNU General Public License
21b411b363SPhilipp Reisner    along with drbd; see the file COPYING.  If not, write to
22b411b363SPhilipp Reisner    the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23b411b363SPhilipp Reisner 
24b411b363SPhilipp Reisner  */
25b411b363SPhilipp Reisner 
26b411b363SPhilipp Reisner #include <linux/module.h>
27b411b363SPhilipp Reisner #include <linux/drbd.h>
28b411b363SPhilipp Reisner #include <linux/sched.h>
29b411b363SPhilipp Reisner #include <linux/wait.h>
30b411b363SPhilipp Reisner #include <linux/mm.h>
31b411b363SPhilipp Reisner #include <linux/memcontrol.h>
32b411b363SPhilipp Reisner #include <linux/mm_inline.h>
33b411b363SPhilipp Reisner #include <linux/slab.h>
34b411b363SPhilipp Reisner #include <linux/random.h>
35b411b363SPhilipp Reisner #include <linux/string.h>
36b411b363SPhilipp Reisner #include <linux/scatterlist.h>
37b411b363SPhilipp Reisner 
38b411b363SPhilipp Reisner #include "drbd_int.h"
39b411b363SPhilipp Reisner #include "drbd_req.h"
40b411b363SPhilipp Reisner 
41b411b363SPhilipp Reisner static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int cancel);
429d77a5feSPhilipp Reisner static int w_make_resync_request(struct drbd_conf *mdev,
439d77a5feSPhilipp Reisner 				 struct drbd_work *w, int cancel);
44b411b363SPhilipp Reisner 
45b411b363SPhilipp Reisner 
46b411b363SPhilipp Reisner 
47b411b363SPhilipp Reisner /* defined here:
48b411b363SPhilipp Reisner    drbd_md_io_complete
4945bb912bSLars Ellenberg    drbd_endio_sec
50b411b363SPhilipp Reisner    drbd_endio_pri
51b411b363SPhilipp Reisner 
52b411b363SPhilipp Reisner  * more endio handlers:
53b411b363SPhilipp Reisner    atodb_endio in drbd_actlog.c
54b411b363SPhilipp Reisner    drbd_bm_async_io_complete in drbd_bitmap.c
55b411b363SPhilipp Reisner 
56b411b363SPhilipp Reisner  * For all these callbacks, note the following:
57b411b363SPhilipp Reisner  * The callbacks will be called in irq context by the IDE drivers,
58b411b363SPhilipp Reisner  * and in Softirqs/Tasklets/BH context by the SCSI drivers.
59b411b363SPhilipp Reisner  * Try to get the locking right :)
60b411b363SPhilipp Reisner  *
61b411b363SPhilipp Reisner  */
62b411b363SPhilipp Reisner 
63b411b363SPhilipp Reisner 
64b411b363SPhilipp Reisner /* About the global_state_lock
65b411b363SPhilipp Reisner    Each state transition on an device holds a read lock. In case we have
66b411b363SPhilipp Reisner    to evaluate the sync after dependencies, we grab a write lock, because
67b411b363SPhilipp Reisner    we need stable states on all devices for that.  */
68b411b363SPhilipp Reisner rwlock_t global_state_lock;
69b411b363SPhilipp Reisner 
70b411b363SPhilipp Reisner /* used for synchronous meta data and bitmap IO
71b411b363SPhilipp Reisner  * submitted by drbd_md_sync_page_io()
72b411b363SPhilipp Reisner  */
73b411b363SPhilipp Reisner void drbd_md_io_complete(struct bio *bio, int error)
74b411b363SPhilipp Reisner {
75b411b363SPhilipp Reisner 	struct drbd_md_io *md_io;
76b411b363SPhilipp Reisner 
77b411b363SPhilipp Reisner 	md_io = (struct drbd_md_io *)bio->bi_private;
78b411b363SPhilipp Reisner 	md_io->error = error;
79b411b363SPhilipp Reisner 
80b411b363SPhilipp Reisner 	complete(&md_io->event);
81b411b363SPhilipp Reisner }
82b411b363SPhilipp Reisner 
83b411b363SPhilipp Reisner /* reads on behalf of the partner,
84b411b363SPhilipp Reisner  * "submitted" by the receiver
85b411b363SPhilipp Reisner  */
8645bb912bSLars Ellenberg void drbd_endio_read_sec_final(struct drbd_epoch_entry *e) __releases(local)
87b411b363SPhilipp Reisner {
88b411b363SPhilipp Reisner 	unsigned long flags = 0;
8945bb912bSLars Ellenberg 	struct drbd_conf *mdev = e->mdev;
90b411b363SPhilipp Reisner 
91b411b363SPhilipp Reisner 	D_ASSERT(e->block_id != ID_VACANT);
92b411b363SPhilipp Reisner 
93b411b363SPhilipp Reisner 	spin_lock_irqsave(&mdev->req_lock, flags);
94b411b363SPhilipp Reisner 	mdev->read_cnt += e->size >> 9;
95b411b363SPhilipp Reisner 	list_del(&e->w.list);
96b411b363SPhilipp Reisner 	if (list_empty(&mdev->read_ee))
97b411b363SPhilipp Reisner 		wake_up(&mdev->ee_wait);
9845bb912bSLars Ellenberg 	if (test_bit(__EE_WAS_ERROR, &e->flags))
9981e84650SAndreas Gruenbacher 		__drbd_chk_io_error(mdev, false);
100b411b363SPhilipp Reisner 	spin_unlock_irqrestore(&mdev->req_lock, flags);
101b411b363SPhilipp Reisner 
102b411b363SPhilipp Reisner 	drbd_queue_work(&mdev->data.work, &e->w);
103b411b363SPhilipp Reisner 	put_ldev(mdev);
104b411b363SPhilipp Reisner }
105b411b363SPhilipp Reisner 
106b411b363SPhilipp Reisner /* writes on behalf of the partner, or resync writes,
10745bb912bSLars Ellenberg  * "submitted" by the receiver, final stage.  */
10845bb912bSLars Ellenberg static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(local)
109b411b363SPhilipp Reisner {
110b411b363SPhilipp Reisner 	unsigned long flags = 0;
11145bb912bSLars Ellenberg 	struct drbd_conf *mdev = e->mdev;
112b411b363SPhilipp Reisner 	sector_t e_sector;
113b411b363SPhilipp Reisner 	int do_wake;
114b411b363SPhilipp Reisner 	int is_syncer_req;
115b411b363SPhilipp Reisner 	int do_al_complete_io;
116b411b363SPhilipp Reisner 
117b411b363SPhilipp Reisner 	D_ASSERT(e->block_id != ID_VACANT);
118b411b363SPhilipp Reisner 
119b411b363SPhilipp Reisner 	/* after we moved e to done_ee,
120b411b363SPhilipp Reisner 	 * we may no longer access it,
121b411b363SPhilipp Reisner 	 * it may be freed/reused already!
122b411b363SPhilipp Reisner 	 * (as soon as we release the req_lock) */
123b411b363SPhilipp Reisner 	e_sector = e->sector;
124b411b363SPhilipp Reisner 	do_al_complete_io = e->flags & EE_CALL_AL_COMPLETE_IO;
12545bb912bSLars Ellenberg 	is_syncer_req = is_syncer_block_id(e->block_id);
126b411b363SPhilipp Reisner 
12745bb912bSLars Ellenberg 	spin_lock_irqsave(&mdev->req_lock, flags);
12845bb912bSLars Ellenberg 	mdev->writ_cnt += e->size >> 9;
129b411b363SPhilipp Reisner 	list_del(&e->w.list); /* has been on active_ee or sync_ee */
130b411b363SPhilipp Reisner 	list_add_tail(&e->w.list, &mdev->done_ee);
131b411b363SPhilipp Reisner 
132b411b363SPhilipp Reisner 	/* No hlist_del_init(&e->colision) here, we did not send the Ack yet,
133b411b363SPhilipp Reisner 	 * neither did we wake possibly waiting conflicting requests.
134b411b363SPhilipp Reisner 	 * done from "drbd_process_done_ee" within the appropriate w.cb
135b411b363SPhilipp Reisner 	 * (e_end_block/e_end_resync_block) or from _drbd_clear_done_ee */
136b411b363SPhilipp Reisner 
137b411b363SPhilipp Reisner 	do_wake = is_syncer_req
138b411b363SPhilipp Reisner 		? list_empty(&mdev->sync_ee)
139b411b363SPhilipp Reisner 		: list_empty(&mdev->active_ee);
140b411b363SPhilipp Reisner 
14145bb912bSLars Ellenberg 	if (test_bit(__EE_WAS_ERROR, &e->flags))
14281e84650SAndreas Gruenbacher 		__drbd_chk_io_error(mdev, false);
143b411b363SPhilipp Reisner 	spin_unlock_irqrestore(&mdev->req_lock, flags);
144b411b363SPhilipp Reisner 
145b411b363SPhilipp Reisner 	if (is_syncer_req)
146b411b363SPhilipp Reisner 		drbd_rs_complete_io(mdev, e_sector);
147b411b363SPhilipp Reisner 
148b411b363SPhilipp Reisner 	if (do_wake)
149b411b363SPhilipp Reisner 		wake_up(&mdev->ee_wait);
150b411b363SPhilipp Reisner 
151b411b363SPhilipp Reisner 	if (do_al_complete_io)
152b411b363SPhilipp Reisner 		drbd_al_complete_io(mdev, e_sector);
153b411b363SPhilipp Reisner 
154b411b363SPhilipp Reisner 	wake_asender(mdev);
155b411b363SPhilipp Reisner 	put_ldev(mdev);
15645bb912bSLars Ellenberg }
157b411b363SPhilipp Reisner 
15845bb912bSLars Ellenberg /* writes on behalf of the partner, or resync writes,
15945bb912bSLars Ellenberg  * "submitted" by the receiver.
16045bb912bSLars Ellenberg  */
16145bb912bSLars Ellenberg void drbd_endio_sec(struct bio *bio, int error)
16245bb912bSLars Ellenberg {
16345bb912bSLars Ellenberg 	struct drbd_epoch_entry *e = bio->bi_private;
16445bb912bSLars Ellenberg 	struct drbd_conf *mdev = e->mdev;
16545bb912bSLars Ellenberg 	int uptodate = bio_flagged(bio, BIO_UPTODATE);
16645bb912bSLars Ellenberg 	int is_write = bio_data_dir(bio) == WRITE;
16745bb912bSLars Ellenberg 
16807194272SLars Ellenberg 	if (error && __ratelimit(&drbd_ratelimit_state))
16945bb912bSLars Ellenberg 		dev_warn(DEV, "%s: error=%d s=%llus\n",
17045bb912bSLars Ellenberg 				is_write ? "write" : "read", error,
17145bb912bSLars Ellenberg 				(unsigned long long)e->sector);
17245bb912bSLars Ellenberg 	if (!error && !uptodate) {
17307194272SLars Ellenberg 		if (__ratelimit(&drbd_ratelimit_state))
17445bb912bSLars Ellenberg 			dev_warn(DEV, "%s: setting error to -EIO s=%llus\n",
17545bb912bSLars Ellenberg 					is_write ? "write" : "read",
17645bb912bSLars Ellenberg 					(unsigned long long)e->sector);
17745bb912bSLars Ellenberg 		/* strange behavior of some lower level drivers...
17845bb912bSLars Ellenberg 		 * fail the request by clearing the uptodate flag,
17945bb912bSLars Ellenberg 		 * but do not return any error?! */
18045bb912bSLars Ellenberg 		error = -EIO;
18145bb912bSLars Ellenberg 	}
18245bb912bSLars Ellenberg 
18345bb912bSLars Ellenberg 	if (error)
18445bb912bSLars Ellenberg 		set_bit(__EE_WAS_ERROR, &e->flags);
18545bb912bSLars Ellenberg 
18645bb912bSLars Ellenberg 	bio_put(bio); /* no need for the bio anymore */
18745bb912bSLars Ellenberg 	if (atomic_dec_and_test(&e->pending_bios)) {
18845bb912bSLars Ellenberg 		if (is_write)
18945bb912bSLars Ellenberg 			drbd_endio_write_sec_final(e);
19045bb912bSLars Ellenberg 		else
19145bb912bSLars Ellenberg 			drbd_endio_read_sec_final(e);
19245bb912bSLars Ellenberg 	}
193b411b363SPhilipp Reisner }
194b411b363SPhilipp Reisner 
195b411b363SPhilipp Reisner /* read, readA or write requests on R_PRIMARY coming from drbd_make_request
196b411b363SPhilipp Reisner  */
197b411b363SPhilipp Reisner void drbd_endio_pri(struct bio *bio, int error)
198b411b363SPhilipp Reisner {
199a115413dSLars Ellenberg 	unsigned long flags;
200b411b363SPhilipp Reisner 	struct drbd_request *req = bio->bi_private;
201b411b363SPhilipp Reisner 	struct drbd_conf *mdev = req->mdev;
202a115413dSLars Ellenberg 	struct bio_and_error m;
203b411b363SPhilipp Reisner 	enum drbd_req_event what;
204b411b363SPhilipp Reisner 	int uptodate = bio_flagged(bio, BIO_UPTODATE);
205b411b363SPhilipp Reisner 
206b411b363SPhilipp Reisner 	if (!error && !uptodate) {
207b411b363SPhilipp Reisner 		dev_warn(DEV, "p %s: setting error to -EIO\n",
208b411b363SPhilipp Reisner 			 bio_data_dir(bio) == WRITE ? "write" : "read");
209b411b363SPhilipp Reisner 		/* strange behavior of some lower level drivers...
210b411b363SPhilipp Reisner 		 * fail the request by clearing the uptodate flag,
211b411b363SPhilipp Reisner 		 * but do not return any error?! */
212b411b363SPhilipp Reisner 		error = -EIO;
213b411b363SPhilipp Reisner 	}
214b411b363SPhilipp Reisner 
215b411b363SPhilipp Reisner 	/* to avoid recursion in __req_mod */
216b411b363SPhilipp Reisner 	if (unlikely(error)) {
217b411b363SPhilipp Reisner 		what = (bio_data_dir(bio) == WRITE)
218b411b363SPhilipp Reisner 			? write_completed_with_error
2195c3c7e64SLars Ellenberg 			: (bio_rw(bio) == READ)
220b411b363SPhilipp Reisner 			  ? read_completed_with_error
221b411b363SPhilipp Reisner 			  : read_ahead_completed_with_error;
222b411b363SPhilipp Reisner 	} else
223b411b363SPhilipp Reisner 		what = completed_ok;
224b411b363SPhilipp Reisner 
225b411b363SPhilipp Reisner 	bio_put(req->private_bio);
226b411b363SPhilipp Reisner 	req->private_bio = ERR_PTR(error);
227b411b363SPhilipp Reisner 
228a115413dSLars Ellenberg 	/* not req_mod(), we need irqsave here! */
229a115413dSLars Ellenberg 	spin_lock_irqsave(&mdev->req_lock, flags);
230a115413dSLars Ellenberg 	__req_mod(req, what, &m);
231a115413dSLars Ellenberg 	spin_unlock_irqrestore(&mdev->req_lock, flags);
232a115413dSLars Ellenberg 
233a115413dSLars Ellenberg 	if (m.bio)
234a115413dSLars Ellenberg 		complete_master_bio(mdev, &m);
235b411b363SPhilipp Reisner }
236b411b363SPhilipp Reisner 
237b411b363SPhilipp Reisner int w_read_retry_remote(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
238b411b363SPhilipp Reisner {
239b411b363SPhilipp Reisner 	struct drbd_request *req = container_of(w, struct drbd_request, w);
240b411b363SPhilipp Reisner 
241b411b363SPhilipp Reisner 	/* We should not detach for read io-error,
242b411b363SPhilipp Reisner 	 * but try to WRITE the P_DATA_REPLY to the failed location,
243b411b363SPhilipp Reisner 	 * to give the disk the chance to relocate that block */
244b411b363SPhilipp Reisner 
245b411b363SPhilipp Reisner 	spin_lock_irq(&mdev->req_lock);
246d255e5ffSLars Ellenberg 	if (cancel || mdev->state.pdsk != D_UP_TO_DATE) {
247d255e5ffSLars Ellenberg 		_req_mod(req, read_retry_remote_canceled);
248b411b363SPhilipp Reisner 		spin_unlock_irq(&mdev->req_lock);
249b411b363SPhilipp Reisner 		return 1;
250b411b363SPhilipp Reisner 	}
251b411b363SPhilipp Reisner 	spin_unlock_irq(&mdev->req_lock);
252b411b363SPhilipp Reisner 
253b411b363SPhilipp Reisner 	return w_send_read_req(mdev, w, 0);
254b411b363SPhilipp Reisner }
255b411b363SPhilipp Reisner 
25645bb912bSLars Ellenberg void drbd_csum_ee(struct drbd_conf *mdev, struct crypto_hash *tfm, struct drbd_epoch_entry *e, void *digest)
25745bb912bSLars Ellenberg {
25845bb912bSLars Ellenberg 	struct hash_desc desc;
25945bb912bSLars Ellenberg 	struct scatterlist sg;
26045bb912bSLars Ellenberg 	struct page *page = e->pages;
26145bb912bSLars Ellenberg 	struct page *tmp;
26245bb912bSLars Ellenberg 	unsigned len;
26345bb912bSLars Ellenberg 
26445bb912bSLars Ellenberg 	desc.tfm = tfm;
26545bb912bSLars Ellenberg 	desc.flags = 0;
26645bb912bSLars Ellenberg 
26745bb912bSLars Ellenberg 	sg_init_table(&sg, 1);
26845bb912bSLars Ellenberg 	crypto_hash_init(&desc);
26945bb912bSLars Ellenberg 
27045bb912bSLars Ellenberg 	while ((tmp = page_chain_next(page))) {
27145bb912bSLars Ellenberg 		/* all but the last page will be fully used */
27245bb912bSLars Ellenberg 		sg_set_page(&sg, page, PAGE_SIZE, 0);
27345bb912bSLars Ellenberg 		crypto_hash_update(&desc, &sg, sg.length);
27445bb912bSLars Ellenberg 		page = tmp;
27545bb912bSLars Ellenberg 	}
27645bb912bSLars Ellenberg 	/* and now the last, possibly only partially used page */
27745bb912bSLars Ellenberg 	len = e->size & (PAGE_SIZE - 1);
27845bb912bSLars Ellenberg 	sg_set_page(&sg, page, len ?: PAGE_SIZE, 0);
27945bb912bSLars Ellenberg 	crypto_hash_update(&desc, &sg, sg.length);
28045bb912bSLars Ellenberg 	crypto_hash_final(&desc, digest);
28145bb912bSLars Ellenberg }
28245bb912bSLars Ellenberg 
28345bb912bSLars Ellenberg void drbd_csum_bio(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio *bio, void *digest)
284b411b363SPhilipp Reisner {
285b411b363SPhilipp Reisner 	struct hash_desc desc;
286b411b363SPhilipp Reisner 	struct scatterlist sg;
287b411b363SPhilipp Reisner 	struct bio_vec *bvec;
288b411b363SPhilipp Reisner 	int i;
289b411b363SPhilipp Reisner 
290b411b363SPhilipp Reisner 	desc.tfm = tfm;
291b411b363SPhilipp Reisner 	desc.flags = 0;
292b411b363SPhilipp Reisner 
293b411b363SPhilipp Reisner 	sg_init_table(&sg, 1);
294b411b363SPhilipp Reisner 	crypto_hash_init(&desc);
295b411b363SPhilipp Reisner 
296b411b363SPhilipp Reisner 	__bio_for_each_segment(bvec, bio, i, 0) {
297b411b363SPhilipp Reisner 		sg_set_page(&sg, bvec->bv_page, bvec->bv_len, bvec->bv_offset);
298b411b363SPhilipp Reisner 		crypto_hash_update(&desc, &sg, sg.length);
299b411b363SPhilipp Reisner 	}
300b411b363SPhilipp Reisner 	crypto_hash_final(&desc, digest);
301b411b363SPhilipp Reisner }
302b411b363SPhilipp Reisner 
303b411b363SPhilipp Reisner static int w_e_send_csum(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
304b411b363SPhilipp Reisner {
305b411b363SPhilipp Reisner 	struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
306b411b363SPhilipp Reisner 	int digest_size;
307b411b363SPhilipp Reisner 	void *digest;
308b411b363SPhilipp Reisner 	int ok;
309b411b363SPhilipp Reisner 
310b411b363SPhilipp Reisner 	D_ASSERT(e->block_id == DRBD_MAGIC + 0xbeef);
311b411b363SPhilipp Reisner 
312b411b363SPhilipp Reisner 	if (unlikely(cancel)) {
313b411b363SPhilipp Reisner 		drbd_free_ee(mdev, e);
314b411b363SPhilipp Reisner 		return 1;
315b411b363SPhilipp Reisner 	}
316b411b363SPhilipp Reisner 
31745bb912bSLars Ellenberg 	if (likely((e->flags & EE_WAS_ERROR) == 0)) {
318b411b363SPhilipp Reisner 		digest_size = crypto_hash_digestsize(mdev->csums_tfm);
319b411b363SPhilipp Reisner 		digest = kmalloc(digest_size, GFP_NOIO);
320b411b363SPhilipp Reisner 		if (digest) {
32145bb912bSLars Ellenberg 			drbd_csum_ee(mdev, mdev->csums_tfm, e, digest);
322b411b363SPhilipp Reisner 
323b411b363SPhilipp Reisner 			inc_rs_pending(mdev);
324b411b363SPhilipp Reisner 			ok = drbd_send_drequest_csum(mdev,
325b411b363SPhilipp Reisner 						     e->sector,
326b411b363SPhilipp Reisner 						     e->size,
327b411b363SPhilipp Reisner 						     digest,
328b411b363SPhilipp Reisner 						     digest_size,
329b411b363SPhilipp Reisner 						     P_CSUM_RS_REQUEST);
330b411b363SPhilipp Reisner 			kfree(digest);
331b411b363SPhilipp Reisner 		} else {
332b411b363SPhilipp Reisner 			dev_err(DEV, "kmalloc() of digest failed.\n");
333b411b363SPhilipp Reisner 			ok = 0;
334b411b363SPhilipp Reisner 		}
335b411b363SPhilipp Reisner 	} else
336b411b363SPhilipp Reisner 		ok = 1;
337b411b363SPhilipp Reisner 
338b411b363SPhilipp Reisner 	drbd_free_ee(mdev, e);
339b411b363SPhilipp Reisner 
340b411b363SPhilipp Reisner 	if (unlikely(!ok))
341b411b363SPhilipp Reisner 		dev_err(DEV, "drbd_send_drequest(..., csum) failed\n");
342b411b363SPhilipp Reisner 	return ok;
343b411b363SPhilipp Reisner }
344b411b363SPhilipp Reisner 
345b411b363SPhilipp Reisner #define GFP_TRY	(__GFP_HIGHMEM | __GFP_NOWARN)
346b411b363SPhilipp Reisner 
347b411b363SPhilipp Reisner static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size)
348b411b363SPhilipp Reisner {
349b411b363SPhilipp Reisner 	struct drbd_epoch_entry *e;
350b411b363SPhilipp Reisner 
351b411b363SPhilipp Reisner 	if (!get_ldev(mdev))
35280a40e43SLars Ellenberg 		return -EIO;
353b411b363SPhilipp Reisner 
354e3555d85SPhilipp Reisner 	if (drbd_rs_should_slow_down(mdev, sector))
3550f0601f4SLars Ellenberg 		goto defer;
3560f0601f4SLars Ellenberg 
357b411b363SPhilipp Reisner 	/* GFP_TRY, because if there is no memory available right now, this may
358b411b363SPhilipp Reisner 	 * be rescheduled for later. It is "only" background resync, after all. */
359b411b363SPhilipp Reisner 	e = drbd_alloc_ee(mdev, DRBD_MAGIC+0xbeef, sector, size, GFP_TRY);
36045bb912bSLars Ellenberg 	if (!e)
36180a40e43SLars Ellenberg 		goto defer;
362b411b363SPhilipp Reisner 
36380a40e43SLars Ellenberg 	e->w.cb = w_e_send_csum;
364b411b363SPhilipp Reisner 	spin_lock_irq(&mdev->req_lock);
365b411b363SPhilipp Reisner 	list_add(&e->w.list, &mdev->read_ee);
366b411b363SPhilipp Reisner 	spin_unlock_irq(&mdev->req_lock);
367b411b363SPhilipp Reisner 
3680f0601f4SLars Ellenberg 	atomic_add(size >> 9, &mdev->rs_sect_ev);
36945bb912bSLars Ellenberg 	if (drbd_submit_ee(mdev, e, READ, DRBD_FAULT_RS_RD) == 0)
37080a40e43SLars Ellenberg 		return 0;
37145bb912bSLars Ellenberg 
37222cc37a9SLars Ellenberg 	/* drbd_submit_ee currently fails for one reason only:
37322cc37a9SLars Ellenberg 	 * not being able to allocate enough bios.
37422cc37a9SLars Ellenberg 	 * Is dropping the connection going to help? */
37522cc37a9SLars Ellenberg 	spin_lock_irq(&mdev->req_lock);
37622cc37a9SLars Ellenberg 	list_del(&e->w.list);
37722cc37a9SLars Ellenberg 	spin_unlock_irq(&mdev->req_lock);
37822cc37a9SLars Ellenberg 
37945bb912bSLars Ellenberg 	drbd_free_ee(mdev, e);
38080a40e43SLars Ellenberg defer:
38145bb912bSLars Ellenberg 	put_ldev(mdev);
38280a40e43SLars Ellenberg 	return -EAGAIN;
383b411b363SPhilipp Reisner }
384b411b363SPhilipp Reisner 
385794abb75SPhilipp Reisner int w_resync_timer(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
386794abb75SPhilipp Reisner {
387794abb75SPhilipp Reisner 	switch (mdev->state.conn) {
388794abb75SPhilipp Reisner 	case C_VERIFY_S:
389794abb75SPhilipp Reisner 		w_make_ov_request(mdev, w, cancel);
390794abb75SPhilipp Reisner 		break;
391794abb75SPhilipp Reisner 	case C_SYNC_TARGET:
392794abb75SPhilipp Reisner 		w_make_resync_request(mdev, w, cancel);
393794abb75SPhilipp Reisner 		break;
394794abb75SPhilipp Reisner 	}
395794abb75SPhilipp Reisner 
396794abb75SPhilipp Reisner 	return 1;
397794abb75SPhilipp Reisner }
398794abb75SPhilipp Reisner 
399b411b363SPhilipp Reisner void resync_timer_fn(unsigned long data)
400b411b363SPhilipp Reisner {
401b411b363SPhilipp Reisner 	struct drbd_conf *mdev = (struct drbd_conf *) data;
402b411b363SPhilipp Reisner 
403794abb75SPhilipp Reisner 	if (list_empty(&mdev->resync_work.list))
404b411b363SPhilipp Reisner 		drbd_queue_work(&mdev->data.work, &mdev->resync_work);
405b411b363SPhilipp Reisner }
406b411b363SPhilipp Reisner 
407778f271dSPhilipp Reisner static void fifo_set(struct fifo_buffer *fb, int value)
408778f271dSPhilipp Reisner {
409778f271dSPhilipp Reisner 	int i;
410778f271dSPhilipp Reisner 
411778f271dSPhilipp Reisner 	for (i = 0; i < fb->size; i++)
412f10f2623SPhilipp Reisner 		fb->values[i] = value;
413778f271dSPhilipp Reisner }
414778f271dSPhilipp Reisner 
415778f271dSPhilipp Reisner static int fifo_push(struct fifo_buffer *fb, int value)
416778f271dSPhilipp Reisner {
417778f271dSPhilipp Reisner 	int ov;
418778f271dSPhilipp Reisner 
419778f271dSPhilipp Reisner 	ov = fb->values[fb->head_index];
420778f271dSPhilipp Reisner 	fb->values[fb->head_index++] = value;
421778f271dSPhilipp Reisner 
422778f271dSPhilipp Reisner 	if (fb->head_index >= fb->size)
423778f271dSPhilipp Reisner 		fb->head_index = 0;
424778f271dSPhilipp Reisner 
425778f271dSPhilipp Reisner 	return ov;
426778f271dSPhilipp Reisner }
427778f271dSPhilipp Reisner 
428778f271dSPhilipp Reisner static void fifo_add_val(struct fifo_buffer *fb, int value)
429778f271dSPhilipp Reisner {
430778f271dSPhilipp Reisner 	int i;
431778f271dSPhilipp Reisner 
432778f271dSPhilipp Reisner 	for (i = 0; i < fb->size; i++)
433778f271dSPhilipp Reisner 		fb->values[i] += value;
434778f271dSPhilipp Reisner }
435778f271dSPhilipp Reisner 
4369d77a5feSPhilipp Reisner static int drbd_rs_controller(struct drbd_conf *mdev)
437778f271dSPhilipp Reisner {
438778f271dSPhilipp Reisner 	unsigned int sect_in;  /* Number of sectors that came in since the last turn */
439778f271dSPhilipp Reisner 	unsigned int want;     /* The number of sectors we want in the proxy */
440778f271dSPhilipp Reisner 	int req_sect; /* Number of sectors to request in this turn */
441778f271dSPhilipp Reisner 	int correction; /* Number of sectors more we need in the proxy*/
442778f271dSPhilipp Reisner 	int cps; /* correction per invocation of drbd_rs_controller() */
443778f271dSPhilipp Reisner 	int steps; /* Number of time steps to plan ahead */
444778f271dSPhilipp Reisner 	int curr_corr;
445778f271dSPhilipp Reisner 	int max_sect;
446778f271dSPhilipp Reisner 
447778f271dSPhilipp Reisner 	sect_in = atomic_xchg(&mdev->rs_sect_in, 0); /* Number of sectors that came in */
448778f271dSPhilipp Reisner 	mdev->rs_in_flight -= sect_in;
449778f271dSPhilipp Reisner 
450778f271dSPhilipp Reisner 	spin_lock(&mdev->peer_seq_lock); /* get an atomic view on mdev->rs_plan_s */
451778f271dSPhilipp Reisner 
452778f271dSPhilipp Reisner 	steps = mdev->rs_plan_s.size; /* (mdev->sync_conf.c_plan_ahead * 10 * SLEEP_TIME) / HZ; */
453778f271dSPhilipp Reisner 
454778f271dSPhilipp Reisner 	if (mdev->rs_in_flight + sect_in == 0) { /* At start of resync */
455778f271dSPhilipp Reisner 		want = ((mdev->sync_conf.rate * 2 * SLEEP_TIME) / HZ) * steps;
456778f271dSPhilipp Reisner 	} else { /* normal path */
457778f271dSPhilipp Reisner 		want = mdev->sync_conf.c_fill_target ? mdev->sync_conf.c_fill_target :
458778f271dSPhilipp Reisner 			sect_in * mdev->sync_conf.c_delay_target * HZ / (SLEEP_TIME * 10);
459778f271dSPhilipp Reisner 	}
460778f271dSPhilipp Reisner 
461778f271dSPhilipp Reisner 	correction = want - mdev->rs_in_flight - mdev->rs_planed;
462778f271dSPhilipp Reisner 
463778f271dSPhilipp Reisner 	/* Plan ahead */
464778f271dSPhilipp Reisner 	cps = correction / steps;
465778f271dSPhilipp Reisner 	fifo_add_val(&mdev->rs_plan_s, cps);
466778f271dSPhilipp Reisner 	mdev->rs_planed += cps * steps;
467778f271dSPhilipp Reisner 
468778f271dSPhilipp Reisner 	/* What we do in this step */
469778f271dSPhilipp Reisner 	curr_corr = fifo_push(&mdev->rs_plan_s, 0);
470778f271dSPhilipp Reisner 	spin_unlock(&mdev->peer_seq_lock);
471778f271dSPhilipp Reisner 	mdev->rs_planed -= curr_corr;
472778f271dSPhilipp Reisner 
473778f271dSPhilipp Reisner 	req_sect = sect_in + curr_corr;
474778f271dSPhilipp Reisner 	if (req_sect < 0)
475778f271dSPhilipp Reisner 		req_sect = 0;
476778f271dSPhilipp Reisner 
477778f271dSPhilipp Reisner 	max_sect = (mdev->sync_conf.c_max_rate * 2 * SLEEP_TIME) / HZ;
478778f271dSPhilipp Reisner 	if (req_sect > max_sect)
479778f271dSPhilipp Reisner 		req_sect = max_sect;
480778f271dSPhilipp Reisner 
481778f271dSPhilipp Reisner 	/*
482778f271dSPhilipp Reisner 	dev_warn(DEV, "si=%u if=%d wa=%u co=%d st=%d cps=%d pl=%d cc=%d rs=%d\n",
483778f271dSPhilipp Reisner 		 sect_in, mdev->rs_in_flight, want, correction,
484778f271dSPhilipp Reisner 		 steps, cps, mdev->rs_planed, curr_corr, req_sect);
485778f271dSPhilipp Reisner 	*/
486778f271dSPhilipp Reisner 
487778f271dSPhilipp Reisner 	return req_sect;
488778f271dSPhilipp Reisner }
489778f271dSPhilipp Reisner 
4909d77a5feSPhilipp Reisner static int drbd_rs_number_requests(struct drbd_conf *mdev)
491e65f440dSLars Ellenberg {
492e65f440dSLars Ellenberg 	int number;
493e65f440dSLars Ellenberg 	if (mdev->rs_plan_s.size) { /* mdev->sync_conf.c_plan_ahead */
494e65f440dSLars Ellenberg 		number = drbd_rs_controller(mdev) >> (BM_BLOCK_SHIFT - 9);
495e65f440dSLars Ellenberg 		mdev->c_sync_rate = number * HZ * (BM_BLOCK_SIZE / 1024) / SLEEP_TIME;
496e65f440dSLars Ellenberg 	} else {
497e65f440dSLars Ellenberg 		mdev->c_sync_rate = mdev->sync_conf.rate;
498e65f440dSLars Ellenberg 		number = SLEEP_TIME * mdev->c_sync_rate  / ((BM_BLOCK_SIZE / 1024) * HZ);
499e65f440dSLars Ellenberg 	}
500e65f440dSLars Ellenberg 
501e65f440dSLars Ellenberg 	/* ignore the amount of pending requests, the resync controller should
502e65f440dSLars Ellenberg 	 * throttle down to incoming reply rate soon enough anyways. */
503e65f440dSLars Ellenberg 	return number;
504e65f440dSLars Ellenberg }
505e65f440dSLars Ellenberg 
5069d77a5feSPhilipp Reisner static int w_make_resync_request(struct drbd_conf *mdev,
507b411b363SPhilipp Reisner 				 struct drbd_work *w, int cancel)
508b411b363SPhilipp Reisner {
509b411b363SPhilipp Reisner 	unsigned long bit;
510b411b363SPhilipp Reisner 	sector_t sector;
511b411b363SPhilipp Reisner 	const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
5121816a2b4SLars Ellenberg 	int max_bio_size;
513e65f440dSLars Ellenberg 	int number, rollback_i, size;
514b411b363SPhilipp Reisner 	int align, queued, sndbuf;
5150f0601f4SLars Ellenberg 	int i = 0;
516b411b363SPhilipp Reisner 
517b411b363SPhilipp Reisner 	if (unlikely(cancel))
518b411b363SPhilipp Reisner 		return 1;
519b411b363SPhilipp Reisner 
520af85e8e8SLars Ellenberg 	if (mdev->rs_total == 0) {
521af85e8e8SLars Ellenberg 		/* empty resync? */
522af85e8e8SLars Ellenberg 		drbd_resync_finished(mdev);
523af85e8e8SLars Ellenberg 		return 1;
524af85e8e8SLars Ellenberg 	}
525af85e8e8SLars Ellenberg 
526b411b363SPhilipp Reisner 	if (!get_ldev(mdev)) {
527b411b363SPhilipp Reisner 		/* Since we only need to access mdev->rsync a
528b411b363SPhilipp Reisner 		   get_ldev_if_state(mdev,D_FAILED) would be sufficient, but
529b411b363SPhilipp Reisner 		   to continue resync with a broken disk makes no sense at
530b411b363SPhilipp Reisner 		   all */
531b411b363SPhilipp Reisner 		dev_err(DEV, "Disk broke down during resync!\n");
532b411b363SPhilipp Reisner 		return 1;
533b411b363SPhilipp Reisner 	}
534b411b363SPhilipp Reisner 
535bb3d000cSLars Ellenberg 	/* starting with drbd 8.3.8, we can handle multi-bio EEs,
536bb3d000cSLars Ellenberg 	 * if it should be necessary */
5371816a2b4SLars Ellenberg 	max_bio_size =
5381816a2b4SLars Ellenberg 		mdev->agreed_pro_version < 94 ? queue_max_hw_sectors(mdev->rq_queue) << 9 :
5391816a2b4SLars Ellenberg 		mdev->agreed_pro_version < 95 ?	DRBD_MAX_SIZE_H80_PACKET : DRBD_MAX_BIO_SIZE;
540bb3d000cSLars Ellenberg 
541e65f440dSLars Ellenberg 	number = drbd_rs_number_requests(mdev);
542e65f440dSLars Ellenberg 	if (number == 0)
5430f0601f4SLars Ellenberg 		goto requeue;
544b411b363SPhilipp Reisner 
545b411b363SPhilipp Reisner 	for (i = 0; i < number; i++) {
546b411b363SPhilipp Reisner 		/* Stop generating RS requests, when half of the send buffer is filled */
547b411b363SPhilipp Reisner 		mutex_lock(&mdev->data.mutex);
548b411b363SPhilipp Reisner 		if (mdev->data.socket) {
549b411b363SPhilipp Reisner 			queued = mdev->data.socket->sk->sk_wmem_queued;
550b411b363SPhilipp Reisner 			sndbuf = mdev->data.socket->sk->sk_sndbuf;
551b411b363SPhilipp Reisner 		} else {
552b411b363SPhilipp Reisner 			queued = 1;
553b411b363SPhilipp Reisner 			sndbuf = 0;
554b411b363SPhilipp Reisner 		}
555b411b363SPhilipp Reisner 		mutex_unlock(&mdev->data.mutex);
556b411b363SPhilipp Reisner 		if (queued > sndbuf / 2)
557b411b363SPhilipp Reisner 			goto requeue;
558b411b363SPhilipp Reisner 
559b411b363SPhilipp Reisner next_sector:
560b411b363SPhilipp Reisner 		size = BM_BLOCK_SIZE;
561b411b363SPhilipp Reisner 		bit  = drbd_bm_find_next(mdev, mdev->bm_resync_fo);
562b411b363SPhilipp Reisner 
5634b0715f0SLars Ellenberg 		if (bit == DRBD_END_OF_BITMAP) {
564b411b363SPhilipp Reisner 			mdev->bm_resync_fo = drbd_bm_bits(mdev);
565b411b363SPhilipp Reisner 			put_ldev(mdev);
566b411b363SPhilipp Reisner 			return 1;
567b411b363SPhilipp Reisner 		}
568b411b363SPhilipp Reisner 
569b411b363SPhilipp Reisner 		sector = BM_BIT_TO_SECT(bit);
570b411b363SPhilipp Reisner 
571e3555d85SPhilipp Reisner 		if (drbd_rs_should_slow_down(mdev, sector) ||
572e3555d85SPhilipp Reisner 		    drbd_try_rs_begin_io(mdev, sector)) {
573b411b363SPhilipp Reisner 			mdev->bm_resync_fo = bit;
574b411b363SPhilipp Reisner 			goto requeue;
575b411b363SPhilipp Reisner 		}
576b411b363SPhilipp Reisner 		mdev->bm_resync_fo = bit + 1;
577b411b363SPhilipp Reisner 
578b411b363SPhilipp Reisner 		if (unlikely(drbd_bm_test_bit(mdev, bit) == 0)) {
579b411b363SPhilipp Reisner 			drbd_rs_complete_io(mdev, sector);
580b411b363SPhilipp Reisner 			goto next_sector;
581b411b363SPhilipp Reisner 		}
582b411b363SPhilipp Reisner 
5831816a2b4SLars Ellenberg #if DRBD_MAX_BIO_SIZE > BM_BLOCK_SIZE
584b411b363SPhilipp Reisner 		/* try to find some adjacent bits.
585b411b363SPhilipp Reisner 		 * we stop if we have already the maximum req size.
586b411b363SPhilipp Reisner 		 *
587b411b363SPhilipp Reisner 		 * Additionally always align bigger requests, in order to
588b411b363SPhilipp Reisner 		 * be prepared for all stripe sizes of software RAIDs.
589b411b363SPhilipp Reisner 		 */
590b411b363SPhilipp Reisner 		align = 1;
591d207450cSPhilipp Reisner 		rollback_i = i;
592b411b363SPhilipp Reisner 		for (;;) {
5931816a2b4SLars Ellenberg 			if (size + BM_BLOCK_SIZE > max_bio_size)
594b411b363SPhilipp Reisner 				break;
595b411b363SPhilipp Reisner 
596b411b363SPhilipp Reisner 			/* Be always aligned */
597b411b363SPhilipp Reisner 			if (sector & ((1<<(align+3))-1))
598b411b363SPhilipp Reisner 				break;
599b411b363SPhilipp Reisner 
600b411b363SPhilipp Reisner 			/* do not cross extent boundaries */
601b411b363SPhilipp Reisner 			if (((bit+1) & BM_BLOCKS_PER_BM_EXT_MASK) == 0)
602b411b363SPhilipp Reisner 				break;
603b411b363SPhilipp Reisner 			/* now, is it actually dirty, after all?
604b411b363SPhilipp Reisner 			 * caution, drbd_bm_test_bit is tri-state for some
605b411b363SPhilipp Reisner 			 * obscure reason; ( b == 0 ) would get the out-of-band
606b411b363SPhilipp Reisner 			 * only accidentally right because of the "oddly sized"
607b411b363SPhilipp Reisner 			 * adjustment below */
608b411b363SPhilipp Reisner 			if (drbd_bm_test_bit(mdev, bit+1) != 1)
609b411b363SPhilipp Reisner 				break;
610b411b363SPhilipp Reisner 			bit++;
611b411b363SPhilipp Reisner 			size += BM_BLOCK_SIZE;
612b411b363SPhilipp Reisner 			if ((BM_BLOCK_SIZE << align) <= size)
613b411b363SPhilipp Reisner 				align++;
614b411b363SPhilipp Reisner 			i++;
615b411b363SPhilipp Reisner 		}
616b411b363SPhilipp Reisner 		/* if we merged some,
617b411b363SPhilipp Reisner 		 * reset the offset to start the next drbd_bm_find_next from */
618b411b363SPhilipp Reisner 		if (size > BM_BLOCK_SIZE)
619b411b363SPhilipp Reisner 			mdev->bm_resync_fo = bit + 1;
620b411b363SPhilipp Reisner #endif
621b411b363SPhilipp Reisner 
622b411b363SPhilipp Reisner 		/* adjust very last sectors, in case we are oddly sized */
623b411b363SPhilipp Reisner 		if (sector + (size>>9) > capacity)
624b411b363SPhilipp Reisner 			size = (capacity-sector)<<9;
625b411b363SPhilipp Reisner 		if (mdev->agreed_pro_version >= 89 && mdev->csums_tfm) {
626b411b363SPhilipp Reisner 			switch (read_for_csum(mdev, sector, size)) {
62780a40e43SLars Ellenberg 			case -EIO: /* Disk failure */
628b411b363SPhilipp Reisner 				put_ldev(mdev);
629b411b363SPhilipp Reisner 				return 0;
63080a40e43SLars Ellenberg 			case -EAGAIN: /* allocation failed, or ldev busy */
631b411b363SPhilipp Reisner 				drbd_rs_complete_io(mdev, sector);
632b411b363SPhilipp Reisner 				mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
633d207450cSPhilipp Reisner 				i = rollback_i;
634b411b363SPhilipp Reisner 				goto requeue;
63580a40e43SLars Ellenberg 			case 0:
63680a40e43SLars Ellenberg 				/* everything ok */
63780a40e43SLars Ellenberg 				break;
63880a40e43SLars Ellenberg 			default:
63980a40e43SLars Ellenberg 				BUG();
640b411b363SPhilipp Reisner 			}
641b411b363SPhilipp Reisner 		} else {
642b411b363SPhilipp Reisner 			inc_rs_pending(mdev);
643b411b363SPhilipp Reisner 			if (!drbd_send_drequest(mdev, P_RS_DATA_REQUEST,
644b411b363SPhilipp Reisner 					       sector, size, ID_SYNCER)) {
645b411b363SPhilipp Reisner 				dev_err(DEV, "drbd_send_drequest() failed, aborting...\n");
646b411b363SPhilipp Reisner 				dec_rs_pending(mdev);
647b411b363SPhilipp Reisner 				put_ldev(mdev);
648b411b363SPhilipp Reisner 				return 0;
649b411b363SPhilipp Reisner 			}
650b411b363SPhilipp Reisner 		}
651b411b363SPhilipp Reisner 	}
652b411b363SPhilipp Reisner 
653b411b363SPhilipp Reisner 	if (mdev->bm_resync_fo >= drbd_bm_bits(mdev)) {
654b411b363SPhilipp Reisner 		/* last syncer _request_ was sent,
655b411b363SPhilipp Reisner 		 * but the P_RS_DATA_REPLY not yet received.  sync will end (and
656b411b363SPhilipp Reisner 		 * next sync group will resume), as soon as we receive the last
657b411b363SPhilipp Reisner 		 * resync data block, and the last bit is cleared.
658b411b363SPhilipp Reisner 		 * until then resync "work" is "inactive" ...
659b411b363SPhilipp Reisner 		 */
660b411b363SPhilipp Reisner 		put_ldev(mdev);
661b411b363SPhilipp Reisner 		return 1;
662b411b363SPhilipp Reisner 	}
663b411b363SPhilipp Reisner 
664b411b363SPhilipp Reisner  requeue:
665778f271dSPhilipp Reisner 	mdev->rs_in_flight += (i << (BM_BLOCK_SHIFT - 9));
666b411b363SPhilipp Reisner 	mod_timer(&mdev->resync_timer, jiffies + SLEEP_TIME);
667b411b363SPhilipp Reisner 	put_ldev(mdev);
668b411b363SPhilipp Reisner 	return 1;
669b411b363SPhilipp Reisner }
670b411b363SPhilipp Reisner 
671b411b363SPhilipp Reisner static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
672b411b363SPhilipp Reisner {
673b411b363SPhilipp Reisner 	int number, i, size;
674b411b363SPhilipp Reisner 	sector_t sector;
675b411b363SPhilipp Reisner 	const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
676b411b363SPhilipp Reisner 
677b411b363SPhilipp Reisner 	if (unlikely(cancel))
678b411b363SPhilipp Reisner 		return 1;
679b411b363SPhilipp Reisner 
6802649f080SLars Ellenberg 	number = drbd_rs_number_requests(mdev);
681b411b363SPhilipp Reisner 
682b411b363SPhilipp Reisner 	sector = mdev->ov_position;
683b411b363SPhilipp Reisner 	for (i = 0; i < number; i++) {
684b411b363SPhilipp Reisner 		if (sector >= capacity) {
685b411b363SPhilipp Reisner 			return 1;
686b411b363SPhilipp Reisner 		}
687b411b363SPhilipp Reisner 
688b411b363SPhilipp Reisner 		size = BM_BLOCK_SIZE;
689b411b363SPhilipp Reisner 
690e3555d85SPhilipp Reisner 		if (drbd_rs_should_slow_down(mdev, sector) ||
691e3555d85SPhilipp Reisner 		    drbd_try_rs_begin_io(mdev, sector)) {
692b411b363SPhilipp Reisner 			mdev->ov_position = sector;
693b411b363SPhilipp Reisner 			goto requeue;
694b411b363SPhilipp Reisner 		}
695b411b363SPhilipp Reisner 
696b411b363SPhilipp Reisner 		if (sector + (size>>9) > capacity)
697b411b363SPhilipp Reisner 			size = (capacity-sector)<<9;
698b411b363SPhilipp Reisner 
699b411b363SPhilipp Reisner 		inc_rs_pending(mdev);
700b411b363SPhilipp Reisner 		if (!drbd_send_ov_request(mdev, sector, size)) {
701b411b363SPhilipp Reisner 			dec_rs_pending(mdev);
702b411b363SPhilipp Reisner 			return 0;
703b411b363SPhilipp Reisner 		}
704b411b363SPhilipp Reisner 		sector += BM_SECT_PER_BIT;
705b411b363SPhilipp Reisner 	}
706b411b363SPhilipp Reisner 	mdev->ov_position = sector;
707b411b363SPhilipp Reisner 
708b411b363SPhilipp Reisner  requeue:
7092649f080SLars Ellenberg 	mdev->rs_in_flight += (i << (BM_BLOCK_SHIFT - 9));
710b411b363SPhilipp Reisner 	mod_timer(&mdev->resync_timer, jiffies + SLEEP_TIME);
711b411b363SPhilipp Reisner 	return 1;
712b411b363SPhilipp Reisner }
713b411b363SPhilipp Reisner 
714c4752ef1SPhilipp Reisner 
715370a43e7SPhilipp Reisner void start_resync_timer_fn(unsigned long data)
716370a43e7SPhilipp Reisner {
717370a43e7SPhilipp Reisner 	struct drbd_conf *mdev = (struct drbd_conf *) data;
718370a43e7SPhilipp Reisner 
719370a43e7SPhilipp Reisner 	drbd_queue_work(&mdev->data.work, &mdev->start_resync_work);
720370a43e7SPhilipp Reisner }
721370a43e7SPhilipp Reisner 
722c4752ef1SPhilipp Reisner int w_start_resync(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
723c4752ef1SPhilipp Reisner {
724370a43e7SPhilipp Reisner 	if (atomic_read(&mdev->unacked_cnt) || atomic_read(&mdev->rs_pending_cnt)) {
725370a43e7SPhilipp Reisner 		dev_warn(DEV, "w_start_resync later...\n");
726370a43e7SPhilipp Reisner 		mdev->start_resync_timer.expires = jiffies + HZ/10;
727370a43e7SPhilipp Reisner 		add_timer(&mdev->start_resync_timer);
728370a43e7SPhilipp Reisner 		return 1;
729370a43e7SPhilipp Reisner 	}
730c4752ef1SPhilipp Reisner 
731370a43e7SPhilipp Reisner 	drbd_start_resync(mdev, C_SYNC_SOURCE);
732370a43e7SPhilipp Reisner 	clear_bit(AHEAD_TO_SYNC_SOURCE, &mdev->current_epoch->flags);
733c4752ef1SPhilipp Reisner 	return 1;
734c4752ef1SPhilipp Reisner }
735c4752ef1SPhilipp Reisner 
736b411b363SPhilipp Reisner int w_ov_finished(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
737b411b363SPhilipp Reisner {
738b411b363SPhilipp Reisner 	kfree(w);
739b411b363SPhilipp Reisner 	ov_oos_print(mdev);
740b411b363SPhilipp Reisner 	drbd_resync_finished(mdev);
741b411b363SPhilipp Reisner 
742b411b363SPhilipp Reisner 	return 1;
743b411b363SPhilipp Reisner }
744b411b363SPhilipp Reisner 
745b411b363SPhilipp Reisner static int w_resync_finished(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
746b411b363SPhilipp Reisner {
747b411b363SPhilipp Reisner 	kfree(w);
748b411b363SPhilipp Reisner 
749b411b363SPhilipp Reisner 	drbd_resync_finished(mdev);
750b411b363SPhilipp Reisner 
751b411b363SPhilipp Reisner 	return 1;
752b411b363SPhilipp Reisner }
753b411b363SPhilipp Reisner 
754af85e8e8SLars Ellenberg static void ping_peer(struct drbd_conf *mdev)
755af85e8e8SLars Ellenberg {
756af85e8e8SLars Ellenberg 	clear_bit(GOT_PING_ACK, &mdev->flags);
757af85e8e8SLars Ellenberg 	request_ping(mdev);
758af85e8e8SLars Ellenberg 	wait_event(mdev->misc_wait,
759af85e8e8SLars Ellenberg 		   test_bit(GOT_PING_ACK, &mdev->flags) || mdev->state.conn < C_CONNECTED);
760af85e8e8SLars Ellenberg }
761af85e8e8SLars Ellenberg 
762b411b363SPhilipp Reisner int drbd_resync_finished(struct drbd_conf *mdev)
763b411b363SPhilipp Reisner {
764b411b363SPhilipp Reisner 	unsigned long db, dt, dbdt;
765b411b363SPhilipp Reisner 	unsigned long n_oos;
766b411b363SPhilipp Reisner 	union drbd_state os, ns;
767b411b363SPhilipp Reisner 	struct drbd_work *w;
768b411b363SPhilipp Reisner 	char *khelper_cmd = NULL;
76926525618SLars Ellenberg 	int verify_done = 0;
770b411b363SPhilipp Reisner 
771b411b363SPhilipp Reisner 	/* Remove all elements from the resync LRU. Since future actions
772b411b363SPhilipp Reisner 	 * might set bits in the (main) bitmap, then the entries in the
773b411b363SPhilipp Reisner 	 * resync LRU would be wrong. */
774b411b363SPhilipp Reisner 	if (drbd_rs_del_all(mdev)) {
775b411b363SPhilipp Reisner 		/* In case this is not possible now, most probably because
776b411b363SPhilipp Reisner 		 * there are P_RS_DATA_REPLY Packets lingering on the worker's
777b411b363SPhilipp Reisner 		 * queue (or even the read operations for those packets
778b411b363SPhilipp Reisner 		 * is not finished by now).   Retry in 100ms. */
779b411b363SPhilipp Reisner 
78020ee6390SPhilipp Reisner 		schedule_timeout_interruptible(HZ / 10);
781b411b363SPhilipp Reisner 		w = kmalloc(sizeof(struct drbd_work), GFP_ATOMIC);
782b411b363SPhilipp Reisner 		if (w) {
783b411b363SPhilipp Reisner 			w->cb = w_resync_finished;
784b411b363SPhilipp Reisner 			drbd_queue_work(&mdev->data.work, w);
785b411b363SPhilipp Reisner 			return 1;
786b411b363SPhilipp Reisner 		}
787b411b363SPhilipp Reisner 		dev_err(DEV, "Warn failed to drbd_rs_del_all() and to kmalloc(w).\n");
788b411b363SPhilipp Reisner 	}
789b411b363SPhilipp Reisner 
790b411b363SPhilipp Reisner 	dt = (jiffies - mdev->rs_start - mdev->rs_paused) / HZ;
791b411b363SPhilipp Reisner 	if (dt <= 0)
792b411b363SPhilipp Reisner 		dt = 1;
793b411b363SPhilipp Reisner 	db = mdev->rs_total;
794b411b363SPhilipp Reisner 	dbdt = Bit2KB(db/dt);
795b411b363SPhilipp Reisner 	mdev->rs_paused /= HZ;
796b411b363SPhilipp Reisner 
797b411b363SPhilipp Reisner 	if (!get_ldev(mdev))
798b411b363SPhilipp Reisner 		goto out;
799b411b363SPhilipp Reisner 
800af85e8e8SLars Ellenberg 	ping_peer(mdev);
801af85e8e8SLars Ellenberg 
802b411b363SPhilipp Reisner 	spin_lock_irq(&mdev->req_lock);
803b411b363SPhilipp Reisner 	os = mdev->state;
804b411b363SPhilipp Reisner 
80526525618SLars Ellenberg 	verify_done = (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T);
80626525618SLars Ellenberg 
807b411b363SPhilipp Reisner 	/* This protects us against multiple calls (that can happen in the presence
808b411b363SPhilipp Reisner 	   of application IO), and against connectivity loss just before we arrive here. */
809b411b363SPhilipp Reisner 	if (os.conn <= C_CONNECTED)
810b411b363SPhilipp Reisner 		goto out_unlock;
811b411b363SPhilipp Reisner 
812b411b363SPhilipp Reisner 	ns = os;
813b411b363SPhilipp Reisner 	ns.conn = C_CONNECTED;
814b411b363SPhilipp Reisner 
815b411b363SPhilipp Reisner 	dev_info(DEV, "%s done (total %lu sec; paused %lu sec; %lu K/sec)\n",
81626525618SLars Ellenberg 	     verify_done ? "Online verify " : "Resync",
817b411b363SPhilipp Reisner 	     dt + mdev->rs_paused, mdev->rs_paused, dbdt);
818b411b363SPhilipp Reisner 
819b411b363SPhilipp Reisner 	n_oos = drbd_bm_total_weight(mdev);
820b411b363SPhilipp Reisner 
821b411b363SPhilipp Reisner 	if (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) {
822b411b363SPhilipp Reisner 		if (n_oos) {
823b411b363SPhilipp Reisner 			dev_alert(DEV, "Online verify found %lu %dk block out of sync!\n",
824b411b363SPhilipp Reisner 			      n_oos, Bit2KB(1));
825b411b363SPhilipp Reisner 			khelper_cmd = "out-of-sync";
826b411b363SPhilipp Reisner 		}
827b411b363SPhilipp Reisner 	} else {
828b411b363SPhilipp Reisner 		D_ASSERT((n_oos - mdev->rs_failed) == 0);
829b411b363SPhilipp Reisner 
830b411b363SPhilipp Reisner 		if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T)
831b411b363SPhilipp Reisner 			khelper_cmd = "after-resync-target";
832b411b363SPhilipp Reisner 
833b411b363SPhilipp Reisner 		if (mdev->csums_tfm && mdev->rs_total) {
834b411b363SPhilipp Reisner 			const unsigned long s = mdev->rs_same_csum;
835b411b363SPhilipp Reisner 			const unsigned long t = mdev->rs_total;
836b411b363SPhilipp Reisner 			const int ratio =
837b411b363SPhilipp Reisner 				(t == 0)     ? 0 :
838b411b363SPhilipp Reisner 			(t < 100000) ? ((s*100)/t) : (s/(t/100));
839b411b363SPhilipp Reisner 			dev_info(DEV, "%u %% had equal check sums, eliminated: %luK; "
840b411b363SPhilipp Reisner 			     "transferred %luK total %luK\n",
841b411b363SPhilipp Reisner 			     ratio,
842b411b363SPhilipp Reisner 			     Bit2KB(mdev->rs_same_csum),
843b411b363SPhilipp Reisner 			     Bit2KB(mdev->rs_total - mdev->rs_same_csum),
844b411b363SPhilipp Reisner 			     Bit2KB(mdev->rs_total));
845b411b363SPhilipp Reisner 		}
846b411b363SPhilipp Reisner 	}
847b411b363SPhilipp Reisner 
848b411b363SPhilipp Reisner 	if (mdev->rs_failed) {
849b411b363SPhilipp Reisner 		dev_info(DEV, "            %lu failed blocks\n", mdev->rs_failed);
850b411b363SPhilipp Reisner 
851b411b363SPhilipp Reisner 		if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T) {
852b411b363SPhilipp Reisner 			ns.disk = D_INCONSISTENT;
853b411b363SPhilipp Reisner 			ns.pdsk = D_UP_TO_DATE;
854b411b363SPhilipp Reisner 		} else {
855b411b363SPhilipp Reisner 			ns.disk = D_UP_TO_DATE;
856b411b363SPhilipp Reisner 			ns.pdsk = D_INCONSISTENT;
857b411b363SPhilipp Reisner 		}
858b411b363SPhilipp Reisner 	} else {
859b411b363SPhilipp Reisner 		ns.disk = D_UP_TO_DATE;
860b411b363SPhilipp Reisner 		ns.pdsk = D_UP_TO_DATE;
861b411b363SPhilipp Reisner 
862b411b363SPhilipp Reisner 		if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T) {
863b411b363SPhilipp Reisner 			if (mdev->p_uuid) {
864b411b363SPhilipp Reisner 				int i;
865b411b363SPhilipp Reisner 				for (i = UI_BITMAP ; i <= UI_HISTORY_END ; i++)
866b411b363SPhilipp Reisner 					_drbd_uuid_set(mdev, i, mdev->p_uuid[i]);
867b411b363SPhilipp Reisner 				drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_CURRENT]);
868b411b363SPhilipp Reisner 				_drbd_uuid_set(mdev, UI_CURRENT, mdev->p_uuid[UI_CURRENT]);
869b411b363SPhilipp Reisner 			} else {
870b411b363SPhilipp Reisner 				dev_err(DEV, "mdev->p_uuid is NULL! BUG\n");
871b411b363SPhilipp Reisner 			}
872b411b363SPhilipp Reisner 		}
873b411b363SPhilipp Reisner 
874b411b363SPhilipp Reisner 		drbd_uuid_set_bm(mdev, 0UL);
875b411b363SPhilipp Reisner 
876b411b363SPhilipp Reisner 		if (mdev->p_uuid) {
877b411b363SPhilipp Reisner 			/* Now the two UUID sets are equal, update what we
878b411b363SPhilipp Reisner 			 * know of the peer. */
879b411b363SPhilipp Reisner 			int i;
880b411b363SPhilipp Reisner 			for (i = UI_CURRENT ; i <= UI_HISTORY_END ; i++)
881b411b363SPhilipp Reisner 				mdev->p_uuid[i] = mdev->ldev->md.uuid[i];
882b411b363SPhilipp Reisner 		}
883b411b363SPhilipp Reisner 	}
884b411b363SPhilipp Reisner 
885b411b363SPhilipp Reisner 	_drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
886b411b363SPhilipp Reisner out_unlock:
887b411b363SPhilipp Reisner 	spin_unlock_irq(&mdev->req_lock);
888b411b363SPhilipp Reisner 	put_ldev(mdev);
889b411b363SPhilipp Reisner out:
890b411b363SPhilipp Reisner 	mdev->rs_total  = 0;
891b411b363SPhilipp Reisner 	mdev->rs_failed = 0;
892b411b363SPhilipp Reisner 	mdev->rs_paused = 0;
89326525618SLars Ellenberg 	if (verify_done)
894b411b363SPhilipp Reisner 		mdev->ov_start_sector = 0;
895b411b363SPhilipp Reisner 
89613d42685SLars Ellenberg 	drbd_md_sync(mdev);
89713d42685SLars Ellenberg 
898b411b363SPhilipp Reisner 	if (khelper_cmd)
899b411b363SPhilipp Reisner 		drbd_khelper(mdev, khelper_cmd);
900b411b363SPhilipp Reisner 
901b411b363SPhilipp Reisner 	return 1;
902b411b363SPhilipp Reisner }
903b411b363SPhilipp Reisner 
904b411b363SPhilipp Reisner /* helper */
905b411b363SPhilipp Reisner static void move_to_net_ee_or_free(struct drbd_conf *mdev, struct drbd_epoch_entry *e)
906b411b363SPhilipp Reisner {
90745bb912bSLars Ellenberg 	if (drbd_ee_has_active_page(e)) {
908b411b363SPhilipp Reisner 		/* This might happen if sendpage() has not finished */
90978db8928SLars Ellenberg 		int i = (e->size + PAGE_SIZE -1) >> PAGE_SHIFT;
910435f0740SLars Ellenberg 		atomic_add(i, &mdev->pp_in_use_by_net);
911435f0740SLars Ellenberg 		atomic_sub(i, &mdev->pp_in_use);
912b411b363SPhilipp Reisner 		spin_lock_irq(&mdev->req_lock);
913b411b363SPhilipp Reisner 		list_add_tail(&e->w.list, &mdev->net_ee);
914b411b363SPhilipp Reisner 		spin_unlock_irq(&mdev->req_lock);
915435f0740SLars Ellenberg 		wake_up(&drbd_pp_wait);
916b411b363SPhilipp Reisner 	} else
917b411b363SPhilipp Reisner 		drbd_free_ee(mdev, e);
918b411b363SPhilipp Reisner }
919b411b363SPhilipp Reisner 
920b411b363SPhilipp Reisner /**
921b411b363SPhilipp Reisner  * w_e_end_data_req() - Worker callback, to send a P_DATA_REPLY packet in response to a P_DATA_REQUEST
922b411b363SPhilipp Reisner  * @mdev:	DRBD device.
923b411b363SPhilipp Reisner  * @w:		work object.
924b411b363SPhilipp Reisner  * @cancel:	The connection will be closed anyways
925b411b363SPhilipp Reisner  */
926b411b363SPhilipp Reisner int w_e_end_data_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
927b411b363SPhilipp Reisner {
928b411b363SPhilipp Reisner 	struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
929b411b363SPhilipp Reisner 	int ok;
930b411b363SPhilipp Reisner 
931b411b363SPhilipp Reisner 	if (unlikely(cancel)) {
932b411b363SPhilipp Reisner 		drbd_free_ee(mdev, e);
933b411b363SPhilipp Reisner 		dec_unacked(mdev);
934b411b363SPhilipp Reisner 		return 1;
935b411b363SPhilipp Reisner 	}
936b411b363SPhilipp Reisner 
93745bb912bSLars Ellenberg 	if (likely((e->flags & EE_WAS_ERROR) == 0)) {
938b411b363SPhilipp Reisner 		ok = drbd_send_block(mdev, P_DATA_REPLY, e);
939b411b363SPhilipp Reisner 	} else {
940b411b363SPhilipp Reisner 		if (__ratelimit(&drbd_ratelimit_state))
941b411b363SPhilipp Reisner 			dev_err(DEV, "Sending NegDReply. sector=%llus.\n",
942b411b363SPhilipp Reisner 			    (unsigned long long)e->sector);
943b411b363SPhilipp Reisner 
944b411b363SPhilipp Reisner 		ok = drbd_send_ack(mdev, P_NEG_DREPLY, e);
945b411b363SPhilipp Reisner 	}
946b411b363SPhilipp Reisner 
947b411b363SPhilipp Reisner 	dec_unacked(mdev);
948b411b363SPhilipp Reisner 
949b411b363SPhilipp Reisner 	move_to_net_ee_or_free(mdev, e);
950b411b363SPhilipp Reisner 
951b411b363SPhilipp Reisner 	if (unlikely(!ok))
952b411b363SPhilipp Reisner 		dev_err(DEV, "drbd_send_block() failed\n");
953b411b363SPhilipp Reisner 	return ok;
954b411b363SPhilipp Reisner }
955b411b363SPhilipp Reisner 
956b411b363SPhilipp Reisner /**
957b411b363SPhilipp Reisner  * w_e_end_rsdata_req() - Worker callback to send a P_RS_DATA_REPLY packet in response to a P_RS_DATA_REQUESTRS
958b411b363SPhilipp Reisner  * @mdev:	DRBD device.
959b411b363SPhilipp Reisner  * @w:		work object.
960b411b363SPhilipp Reisner  * @cancel:	The connection will be closed anyways
961b411b363SPhilipp Reisner  */
962b411b363SPhilipp Reisner int w_e_end_rsdata_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
963b411b363SPhilipp Reisner {
964b411b363SPhilipp Reisner 	struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
965b411b363SPhilipp Reisner 	int ok;
966b411b363SPhilipp Reisner 
967b411b363SPhilipp Reisner 	if (unlikely(cancel)) {
968b411b363SPhilipp Reisner 		drbd_free_ee(mdev, e);
969b411b363SPhilipp Reisner 		dec_unacked(mdev);
970b411b363SPhilipp Reisner 		return 1;
971b411b363SPhilipp Reisner 	}
972b411b363SPhilipp Reisner 
973b411b363SPhilipp Reisner 	if (get_ldev_if_state(mdev, D_FAILED)) {
974b411b363SPhilipp Reisner 		drbd_rs_complete_io(mdev, e->sector);
975b411b363SPhilipp Reisner 		put_ldev(mdev);
976b411b363SPhilipp Reisner 	}
977b411b363SPhilipp Reisner 
978d612d309SPhilipp Reisner 	if (mdev->state.conn == C_AHEAD) {
979d612d309SPhilipp Reisner 		ok = drbd_send_ack(mdev, P_RS_CANCEL, e);
980d612d309SPhilipp Reisner 	} else if (likely((e->flags & EE_WAS_ERROR) == 0)) {
981b411b363SPhilipp Reisner 		if (likely(mdev->state.pdsk >= D_INCONSISTENT)) {
982b411b363SPhilipp Reisner 			inc_rs_pending(mdev);
983b411b363SPhilipp Reisner 			ok = drbd_send_block(mdev, P_RS_DATA_REPLY, e);
984b411b363SPhilipp Reisner 		} else {
985b411b363SPhilipp Reisner 			if (__ratelimit(&drbd_ratelimit_state))
986b411b363SPhilipp Reisner 				dev_err(DEV, "Not sending RSDataReply, "
987b411b363SPhilipp Reisner 				    "partner DISKLESS!\n");
988b411b363SPhilipp Reisner 			ok = 1;
989b411b363SPhilipp Reisner 		}
990b411b363SPhilipp Reisner 	} else {
991b411b363SPhilipp Reisner 		if (__ratelimit(&drbd_ratelimit_state))
992b411b363SPhilipp Reisner 			dev_err(DEV, "Sending NegRSDReply. sector %llus.\n",
993b411b363SPhilipp Reisner 			    (unsigned long long)e->sector);
994b411b363SPhilipp Reisner 
995b411b363SPhilipp Reisner 		ok = drbd_send_ack(mdev, P_NEG_RS_DREPLY, e);
996b411b363SPhilipp Reisner 
997b411b363SPhilipp Reisner 		/* update resync data with failure */
998b411b363SPhilipp Reisner 		drbd_rs_failed_io(mdev, e->sector, e->size);
999b411b363SPhilipp Reisner 	}
1000b411b363SPhilipp Reisner 
1001b411b363SPhilipp Reisner 	dec_unacked(mdev);
1002b411b363SPhilipp Reisner 
1003b411b363SPhilipp Reisner 	move_to_net_ee_or_free(mdev, e);
1004b411b363SPhilipp Reisner 
1005b411b363SPhilipp Reisner 	if (unlikely(!ok))
1006b411b363SPhilipp Reisner 		dev_err(DEV, "drbd_send_block() failed\n");
1007b411b363SPhilipp Reisner 	return ok;
1008b411b363SPhilipp Reisner }
1009b411b363SPhilipp Reisner 
1010b411b363SPhilipp Reisner int w_e_end_csum_rs_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1011b411b363SPhilipp Reisner {
1012b411b363SPhilipp Reisner 	struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
1013b411b363SPhilipp Reisner 	struct digest_info *di;
1014b411b363SPhilipp Reisner 	int digest_size;
1015b411b363SPhilipp Reisner 	void *digest = NULL;
1016b411b363SPhilipp Reisner 	int ok, eq = 0;
1017b411b363SPhilipp Reisner 
1018b411b363SPhilipp Reisner 	if (unlikely(cancel)) {
1019b411b363SPhilipp Reisner 		drbd_free_ee(mdev, e);
1020b411b363SPhilipp Reisner 		dec_unacked(mdev);
1021b411b363SPhilipp Reisner 		return 1;
1022b411b363SPhilipp Reisner 	}
1023b411b363SPhilipp Reisner 
10241d53f09eSLars Ellenberg 	if (get_ldev(mdev)) {
1025b411b363SPhilipp Reisner 		drbd_rs_complete_io(mdev, e->sector);
10261d53f09eSLars Ellenberg 		put_ldev(mdev);
10271d53f09eSLars Ellenberg 	}
1028b411b363SPhilipp Reisner 
102985719573SPhilipp Reisner 	di = e->digest;
1030b411b363SPhilipp Reisner 
103145bb912bSLars Ellenberg 	if (likely((e->flags & EE_WAS_ERROR) == 0)) {
1032b411b363SPhilipp Reisner 		/* quick hack to try to avoid a race against reconfiguration.
1033b411b363SPhilipp Reisner 		 * a real fix would be much more involved,
1034b411b363SPhilipp Reisner 		 * introducing more locking mechanisms */
1035b411b363SPhilipp Reisner 		if (mdev->csums_tfm) {
1036b411b363SPhilipp Reisner 			digest_size = crypto_hash_digestsize(mdev->csums_tfm);
1037b411b363SPhilipp Reisner 			D_ASSERT(digest_size == di->digest_size);
1038b411b363SPhilipp Reisner 			digest = kmalloc(digest_size, GFP_NOIO);
1039b411b363SPhilipp Reisner 		}
1040b411b363SPhilipp Reisner 		if (digest) {
104145bb912bSLars Ellenberg 			drbd_csum_ee(mdev, mdev->csums_tfm, e, digest);
1042b411b363SPhilipp Reisner 			eq = !memcmp(digest, di->digest, digest_size);
1043b411b363SPhilipp Reisner 			kfree(digest);
1044b411b363SPhilipp Reisner 		}
1045b411b363SPhilipp Reisner 
1046b411b363SPhilipp Reisner 		if (eq) {
1047b411b363SPhilipp Reisner 			drbd_set_in_sync(mdev, e->sector, e->size);
1048676396d5SLars Ellenberg 			/* rs_same_csums unit is BM_BLOCK_SIZE */
1049676396d5SLars Ellenberg 			mdev->rs_same_csum += e->size >> BM_BLOCK_SHIFT;
1050b411b363SPhilipp Reisner 			ok = drbd_send_ack(mdev, P_RS_IS_IN_SYNC, e);
1051b411b363SPhilipp Reisner 		} else {
1052b411b363SPhilipp Reisner 			inc_rs_pending(mdev);
1053204bba99SPhilipp Reisner 			e->block_id = ID_SYNCER; /* By setting block_id, digest pointer becomes invalid! */
1054204bba99SPhilipp Reisner 			e->flags &= ~EE_HAS_DIGEST; /* This e no longer has a digest pointer */
1055204bba99SPhilipp Reisner 			kfree(di);
1056b411b363SPhilipp Reisner 			ok = drbd_send_block(mdev, P_RS_DATA_REPLY, e);
1057b411b363SPhilipp Reisner 		}
1058b411b363SPhilipp Reisner 	} else {
1059b411b363SPhilipp Reisner 		ok = drbd_send_ack(mdev, P_NEG_RS_DREPLY, e);
1060b411b363SPhilipp Reisner 		if (__ratelimit(&drbd_ratelimit_state))
1061b411b363SPhilipp Reisner 			dev_err(DEV, "Sending NegDReply. I guess it gets messy.\n");
1062b411b363SPhilipp Reisner 	}
1063b411b363SPhilipp Reisner 
1064b411b363SPhilipp Reisner 	dec_unacked(mdev);
1065b411b363SPhilipp Reisner 	move_to_net_ee_or_free(mdev, e);
1066b411b363SPhilipp Reisner 
1067b411b363SPhilipp Reisner 	if (unlikely(!ok))
1068b411b363SPhilipp Reisner 		dev_err(DEV, "drbd_send_block/ack() failed\n");
1069b411b363SPhilipp Reisner 	return ok;
1070b411b363SPhilipp Reisner }
1071b411b363SPhilipp Reisner 
1072b411b363SPhilipp Reisner int w_e_end_ov_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1073b411b363SPhilipp Reisner {
1074b411b363SPhilipp Reisner 	struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
1075b411b363SPhilipp Reisner 	int digest_size;
1076b411b363SPhilipp Reisner 	void *digest;
1077b411b363SPhilipp Reisner 	int ok = 1;
1078b411b363SPhilipp Reisner 
1079b411b363SPhilipp Reisner 	if (unlikely(cancel))
1080b411b363SPhilipp Reisner 		goto out;
1081b411b363SPhilipp Reisner 
108245bb912bSLars Ellenberg 	if (unlikely((e->flags & EE_WAS_ERROR) != 0))
1083b411b363SPhilipp Reisner 		goto out;
1084b411b363SPhilipp Reisner 
1085b411b363SPhilipp Reisner 	digest_size = crypto_hash_digestsize(mdev->verify_tfm);
1086b411b363SPhilipp Reisner 	/* FIXME if this allocation fails, online verify will not terminate! */
1087b411b363SPhilipp Reisner 	digest = kmalloc(digest_size, GFP_NOIO);
1088b411b363SPhilipp Reisner 	if (digest) {
108945bb912bSLars Ellenberg 		drbd_csum_ee(mdev, mdev->verify_tfm, e, digest);
1090b411b363SPhilipp Reisner 		inc_rs_pending(mdev);
1091b411b363SPhilipp Reisner 		ok = drbd_send_drequest_csum(mdev, e->sector, e->size,
1092b411b363SPhilipp Reisner 					     digest, digest_size, P_OV_REPLY);
1093b411b363SPhilipp Reisner 		if (!ok)
1094b411b363SPhilipp Reisner 			dec_rs_pending(mdev);
1095b411b363SPhilipp Reisner 		kfree(digest);
1096b411b363SPhilipp Reisner 	}
1097b411b363SPhilipp Reisner 
1098b411b363SPhilipp Reisner out:
1099b411b363SPhilipp Reisner 	drbd_free_ee(mdev, e);
1100b411b363SPhilipp Reisner 
1101b411b363SPhilipp Reisner 	dec_unacked(mdev);
1102b411b363SPhilipp Reisner 
1103b411b363SPhilipp Reisner 	return ok;
1104b411b363SPhilipp Reisner }
1105b411b363SPhilipp Reisner 
1106b411b363SPhilipp Reisner void drbd_ov_oos_found(struct drbd_conf *mdev, sector_t sector, int size)
1107b411b363SPhilipp Reisner {
1108b411b363SPhilipp Reisner 	if (mdev->ov_last_oos_start + mdev->ov_last_oos_size == sector) {
1109b411b363SPhilipp Reisner 		mdev->ov_last_oos_size += size>>9;
1110b411b363SPhilipp Reisner 	} else {
1111b411b363SPhilipp Reisner 		mdev->ov_last_oos_start = sector;
1112b411b363SPhilipp Reisner 		mdev->ov_last_oos_size = size>>9;
1113b411b363SPhilipp Reisner 	}
1114b411b363SPhilipp Reisner 	drbd_set_out_of_sync(mdev, sector, size);
1115b411b363SPhilipp Reisner }
1116b411b363SPhilipp Reisner 
1117b411b363SPhilipp Reisner int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1118b411b363SPhilipp Reisner {
1119b411b363SPhilipp Reisner 	struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
1120b411b363SPhilipp Reisner 	struct digest_info *di;
1121b411b363SPhilipp Reisner 	int digest_size;
1122b411b363SPhilipp Reisner 	void *digest;
1123b411b363SPhilipp Reisner 	int ok, eq = 0;
1124b411b363SPhilipp Reisner 
1125b411b363SPhilipp Reisner 	if (unlikely(cancel)) {
1126b411b363SPhilipp Reisner 		drbd_free_ee(mdev, e);
1127b411b363SPhilipp Reisner 		dec_unacked(mdev);
1128b411b363SPhilipp Reisner 		return 1;
1129b411b363SPhilipp Reisner 	}
1130b411b363SPhilipp Reisner 
1131b411b363SPhilipp Reisner 	/* after "cancel", because after drbd_disconnect/drbd_rs_cancel_all
1132b411b363SPhilipp Reisner 	 * the resync lru has been cleaned up already */
11331d53f09eSLars Ellenberg 	if (get_ldev(mdev)) {
1134b411b363SPhilipp Reisner 		drbd_rs_complete_io(mdev, e->sector);
11351d53f09eSLars Ellenberg 		put_ldev(mdev);
11361d53f09eSLars Ellenberg 	}
1137b411b363SPhilipp Reisner 
113885719573SPhilipp Reisner 	di = e->digest;
1139b411b363SPhilipp Reisner 
114045bb912bSLars Ellenberg 	if (likely((e->flags & EE_WAS_ERROR) == 0)) {
1141b411b363SPhilipp Reisner 		digest_size = crypto_hash_digestsize(mdev->verify_tfm);
1142b411b363SPhilipp Reisner 		digest = kmalloc(digest_size, GFP_NOIO);
1143b411b363SPhilipp Reisner 		if (digest) {
114445bb912bSLars Ellenberg 			drbd_csum_ee(mdev, mdev->verify_tfm, e, digest);
1145b411b363SPhilipp Reisner 
1146b411b363SPhilipp Reisner 			D_ASSERT(digest_size == di->digest_size);
1147b411b363SPhilipp Reisner 			eq = !memcmp(digest, di->digest, digest_size);
1148b411b363SPhilipp Reisner 			kfree(digest);
1149b411b363SPhilipp Reisner 		}
1150b411b363SPhilipp Reisner 	} else {
1151b411b363SPhilipp Reisner 		ok = drbd_send_ack(mdev, P_NEG_RS_DREPLY, e);
1152b411b363SPhilipp Reisner 		if (__ratelimit(&drbd_ratelimit_state))
1153b411b363SPhilipp Reisner 			dev_err(DEV, "Sending NegDReply. I guess it gets messy.\n");
1154b411b363SPhilipp Reisner 	}
1155b411b363SPhilipp Reisner 
1156b411b363SPhilipp Reisner 	dec_unacked(mdev);
1157b411b363SPhilipp Reisner 	if (!eq)
1158b411b363SPhilipp Reisner 		drbd_ov_oos_found(mdev, e->sector, e->size);
1159b411b363SPhilipp Reisner 	else
1160b411b363SPhilipp Reisner 		ov_oos_print(mdev);
1161b411b363SPhilipp Reisner 
1162b411b363SPhilipp Reisner 	ok = drbd_send_ack_ex(mdev, P_OV_RESULT, e->sector, e->size,
1163b411b363SPhilipp Reisner 			      eq ? ID_IN_SYNC : ID_OUT_OF_SYNC);
1164b411b363SPhilipp Reisner 
1165b411b363SPhilipp Reisner 	drbd_free_ee(mdev, e);
1166b411b363SPhilipp Reisner 
1167ea5442afSLars Ellenberg 	--mdev->ov_left;
1168ea5442afSLars Ellenberg 
1169ea5442afSLars Ellenberg 	/* let's advance progress step marks only for every other megabyte */
1170ea5442afSLars Ellenberg 	if ((mdev->ov_left & 0x200) == 0x200)
1171ea5442afSLars Ellenberg 		drbd_advance_rs_marks(mdev, mdev->ov_left);
1172ea5442afSLars Ellenberg 
1173ea5442afSLars Ellenberg 	if (mdev->ov_left == 0) {
1174b411b363SPhilipp Reisner 		ov_oos_print(mdev);
1175b411b363SPhilipp Reisner 		drbd_resync_finished(mdev);
1176b411b363SPhilipp Reisner 	}
1177b411b363SPhilipp Reisner 
1178b411b363SPhilipp Reisner 	return ok;
1179b411b363SPhilipp Reisner }
1180b411b363SPhilipp Reisner 
1181b411b363SPhilipp Reisner int w_prev_work_done(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1182b411b363SPhilipp Reisner {
1183b411b363SPhilipp Reisner 	struct drbd_wq_barrier *b = container_of(w, struct drbd_wq_barrier, w);
1184b411b363SPhilipp Reisner 	complete(&b->done);
1185b411b363SPhilipp Reisner 	return 1;
1186b411b363SPhilipp Reisner }
1187b411b363SPhilipp Reisner 
1188b411b363SPhilipp Reisner int w_send_barrier(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1189b411b363SPhilipp Reisner {
1190b411b363SPhilipp Reisner 	struct drbd_tl_epoch *b = container_of(w, struct drbd_tl_epoch, w);
1191b411b363SPhilipp Reisner 	struct p_barrier *p = &mdev->data.sbuf.barrier;
1192b411b363SPhilipp Reisner 	int ok = 1;
1193b411b363SPhilipp Reisner 
1194b411b363SPhilipp Reisner 	/* really avoid racing with tl_clear.  w.cb may have been referenced
1195b411b363SPhilipp Reisner 	 * just before it was reassigned and re-queued, so double check that.
1196b411b363SPhilipp Reisner 	 * actually, this race was harmless, since we only try to send the
1197b411b363SPhilipp Reisner 	 * barrier packet here, and otherwise do nothing with the object.
1198b411b363SPhilipp Reisner 	 * but compare with the head of w_clear_epoch */
1199b411b363SPhilipp Reisner 	spin_lock_irq(&mdev->req_lock);
1200b411b363SPhilipp Reisner 	if (w->cb != w_send_barrier || mdev->state.conn < C_CONNECTED)
1201b411b363SPhilipp Reisner 		cancel = 1;
1202b411b363SPhilipp Reisner 	spin_unlock_irq(&mdev->req_lock);
1203b411b363SPhilipp Reisner 	if (cancel)
1204b411b363SPhilipp Reisner 		return 1;
1205b411b363SPhilipp Reisner 
1206b411b363SPhilipp Reisner 	if (!drbd_get_data_sock(mdev))
1207b411b363SPhilipp Reisner 		return 0;
1208b411b363SPhilipp Reisner 	p->barrier = b->br_number;
1209b411b363SPhilipp Reisner 	/* inc_ap_pending was done where this was queued.
1210b411b363SPhilipp Reisner 	 * dec_ap_pending will be done in got_BarrierAck
1211b411b363SPhilipp Reisner 	 * or (on connection loss) in w_clear_epoch.  */
1212b411b363SPhilipp Reisner 	ok = _drbd_send_cmd(mdev, mdev->data.socket, P_BARRIER,
12130b70a13dSPhilipp Reisner 				(struct p_header80 *)p, sizeof(*p), 0);
1214b411b363SPhilipp Reisner 	drbd_put_data_sock(mdev);
1215b411b363SPhilipp Reisner 
1216b411b363SPhilipp Reisner 	return ok;
1217b411b363SPhilipp Reisner }
1218b411b363SPhilipp Reisner 
1219b411b363SPhilipp Reisner int w_send_write_hint(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1220b411b363SPhilipp Reisner {
1221b411b363SPhilipp Reisner 	if (cancel)
1222b411b363SPhilipp Reisner 		return 1;
1223b411b363SPhilipp Reisner 	return drbd_send_short_cmd(mdev, P_UNPLUG_REMOTE);
1224b411b363SPhilipp Reisner }
1225b411b363SPhilipp Reisner 
122673a01a18SPhilipp Reisner int w_send_oos(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
122773a01a18SPhilipp Reisner {
122873a01a18SPhilipp Reisner 	struct drbd_request *req = container_of(w, struct drbd_request, w);
122973a01a18SPhilipp Reisner 	int ok;
123073a01a18SPhilipp Reisner 
123173a01a18SPhilipp Reisner 	if (unlikely(cancel)) {
123273a01a18SPhilipp Reisner 		req_mod(req, send_canceled);
123373a01a18SPhilipp Reisner 		return 1;
123473a01a18SPhilipp Reisner 	}
123573a01a18SPhilipp Reisner 
123673a01a18SPhilipp Reisner 	ok = drbd_send_oos(mdev, req);
123773a01a18SPhilipp Reisner 	req_mod(req, oos_handed_to_network);
123873a01a18SPhilipp Reisner 
123973a01a18SPhilipp Reisner 	return ok;
124073a01a18SPhilipp Reisner }
124173a01a18SPhilipp Reisner 
1242b411b363SPhilipp Reisner /**
1243b411b363SPhilipp Reisner  * w_send_dblock() - Worker callback to send a P_DATA packet in order to mirror a write request
1244b411b363SPhilipp Reisner  * @mdev:	DRBD device.
1245b411b363SPhilipp Reisner  * @w:		work object.
1246b411b363SPhilipp Reisner  * @cancel:	The connection will be closed anyways
1247b411b363SPhilipp Reisner  */
1248b411b363SPhilipp Reisner int w_send_dblock(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1249b411b363SPhilipp Reisner {
1250b411b363SPhilipp Reisner 	struct drbd_request *req = container_of(w, struct drbd_request, w);
1251b411b363SPhilipp Reisner 	int ok;
1252b411b363SPhilipp Reisner 
1253b411b363SPhilipp Reisner 	if (unlikely(cancel)) {
1254b411b363SPhilipp Reisner 		req_mod(req, send_canceled);
1255b411b363SPhilipp Reisner 		return 1;
1256b411b363SPhilipp Reisner 	}
1257b411b363SPhilipp Reisner 
1258b411b363SPhilipp Reisner 	ok = drbd_send_dblock(mdev, req);
1259b411b363SPhilipp Reisner 	req_mod(req, ok ? handed_over_to_network : send_failed);
1260b411b363SPhilipp Reisner 
1261b411b363SPhilipp Reisner 	return ok;
1262b411b363SPhilipp Reisner }
1263b411b363SPhilipp Reisner 
1264b411b363SPhilipp Reisner /**
1265b411b363SPhilipp Reisner  * w_send_read_req() - Worker callback to send a read request (P_DATA_REQUEST) packet
1266b411b363SPhilipp Reisner  * @mdev:	DRBD device.
1267b411b363SPhilipp Reisner  * @w:		work object.
1268b411b363SPhilipp Reisner  * @cancel:	The connection will be closed anyways
1269b411b363SPhilipp Reisner  */
1270b411b363SPhilipp Reisner int w_send_read_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1271b411b363SPhilipp Reisner {
1272b411b363SPhilipp Reisner 	struct drbd_request *req = container_of(w, struct drbd_request, w);
1273b411b363SPhilipp Reisner 	int ok;
1274b411b363SPhilipp Reisner 
1275b411b363SPhilipp Reisner 	if (unlikely(cancel)) {
1276b411b363SPhilipp Reisner 		req_mod(req, send_canceled);
1277b411b363SPhilipp Reisner 		return 1;
1278b411b363SPhilipp Reisner 	}
1279b411b363SPhilipp Reisner 
1280b411b363SPhilipp Reisner 	ok = drbd_send_drequest(mdev, P_DATA_REQUEST, req->sector, req->size,
1281b411b363SPhilipp Reisner 				(unsigned long)req);
1282b411b363SPhilipp Reisner 
1283b411b363SPhilipp Reisner 	if (!ok) {
1284b411b363SPhilipp Reisner 		/* ?? we set C_TIMEOUT or C_BROKEN_PIPE in drbd_send();
1285b411b363SPhilipp Reisner 		 * so this is probably redundant */
1286b411b363SPhilipp Reisner 		if (mdev->state.conn >= C_CONNECTED)
1287b411b363SPhilipp Reisner 			drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE));
1288b411b363SPhilipp Reisner 	}
1289b411b363SPhilipp Reisner 	req_mod(req, ok ? handed_over_to_network : send_failed);
1290b411b363SPhilipp Reisner 
1291b411b363SPhilipp Reisner 	return ok;
1292b411b363SPhilipp Reisner }
1293b411b363SPhilipp Reisner 
1294265be2d0SPhilipp Reisner int w_restart_disk_io(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1295265be2d0SPhilipp Reisner {
1296265be2d0SPhilipp Reisner 	struct drbd_request *req = container_of(w, struct drbd_request, w);
1297265be2d0SPhilipp Reisner 
12980778286aSPhilipp Reisner 	if (bio_data_dir(req->master_bio) == WRITE && req->rq_state & RQ_IN_ACT_LOG)
1299265be2d0SPhilipp Reisner 		drbd_al_begin_io(mdev, req->sector);
1300265be2d0SPhilipp Reisner 	/* Calling drbd_al_begin_io() out of the worker might deadlocks
1301265be2d0SPhilipp Reisner 	   theoretically. Practically it can not deadlock, since this is
1302265be2d0SPhilipp Reisner 	   only used when unfreezing IOs. All the extents of the requests
1303265be2d0SPhilipp Reisner 	   that made it into the TL are already active */
1304265be2d0SPhilipp Reisner 
1305265be2d0SPhilipp Reisner 	drbd_req_make_private_bio(req, req->master_bio);
1306265be2d0SPhilipp Reisner 	req->private_bio->bi_bdev = mdev->ldev->backing_bdev;
1307265be2d0SPhilipp Reisner 	generic_make_request(req->private_bio);
1308265be2d0SPhilipp Reisner 
1309265be2d0SPhilipp Reisner 	return 1;
1310265be2d0SPhilipp Reisner }
1311265be2d0SPhilipp Reisner 
1312b411b363SPhilipp Reisner static int _drbd_may_sync_now(struct drbd_conf *mdev)
1313b411b363SPhilipp Reisner {
1314b411b363SPhilipp Reisner 	struct drbd_conf *odev = mdev;
1315b411b363SPhilipp Reisner 
1316b411b363SPhilipp Reisner 	while (1) {
1317b411b363SPhilipp Reisner 		if (odev->sync_conf.after == -1)
1318b411b363SPhilipp Reisner 			return 1;
1319b411b363SPhilipp Reisner 		odev = minor_to_mdev(odev->sync_conf.after);
1320b411b363SPhilipp Reisner 		ERR_IF(!odev) return 1;
1321b411b363SPhilipp Reisner 		if ((odev->state.conn >= C_SYNC_SOURCE &&
1322b411b363SPhilipp Reisner 		     odev->state.conn <= C_PAUSED_SYNC_T) ||
1323b411b363SPhilipp Reisner 		    odev->state.aftr_isp || odev->state.peer_isp ||
1324b411b363SPhilipp Reisner 		    odev->state.user_isp)
1325b411b363SPhilipp Reisner 			return 0;
1326b411b363SPhilipp Reisner 	}
1327b411b363SPhilipp Reisner }
1328b411b363SPhilipp Reisner 
1329b411b363SPhilipp Reisner /**
1330b411b363SPhilipp Reisner  * _drbd_pause_after() - Pause resync on all devices that may not resync now
1331b411b363SPhilipp Reisner  * @mdev:	DRBD device.
1332b411b363SPhilipp Reisner  *
1333b411b363SPhilipp Reisner  * Called from process context only (admin command and after_state_ch).
1334b411b363SPhilipp Reisner  */
1335b411b363SPhilipp Reisner static int _drbd_pause_after(struct drbd_conf *mdev)
1336b411b363SPhilipp Reisner {
1337b411b363SPhilipp Reisner 	struct drbd_conf *odev;
1338b411b363SPhilipp Reisner 	int i, rv = 0;
1339b411b363SPhilipp Reisner 
1340b411b363SPhilipp Reisner 	for (i = 0; i < minor_count; i++) {
1341b411b363SPhilipp Reisner 		odev = minor_to_mdev(i);
1342b411b363SPhilipp Reisner 		if (!odev)
1343b411b363SPhilipp Reisner 			continue;
1344b411b363SPhilipp Reisner 		if (odev->state.conn == C_STANDALONE && odev->state.disk == D_DISKLESS)
1345b411b363SPhilipp Reisner 			continue;
1346b411b363SPhilipp Reisner 		if (!_drbd_may_sync_now(odev))
1347b411b363SPhilipp Reisner 			rv |= (__drbd_set_state(_NS(odev, aftr_isp, 1), CS_HARD, NULL)
1348b411b363SPhilipp Reisner 			       != SS_NOTHING_TO_DO);
1349b411b363SPhilipp Reisner 	}
1350b411b363SPhilipp Reisner 
1351b411b363SPhilipp Reisner 	return rv;
1352b411b363SPhilipp Reisner }
1353b411b363SPhilipp Reisner 
1354b411b363SPhilipp Reisner /**
1355b411b363SPhilipp Reisner  * _drbd_resume_next() - Resume resync on all devices that may resync now
1356b411b363SPhilipp Reisner  * @mdev:	DRBD device.
1357b411b363SPhilipp Reisner  *
1358b411b363SPhilipp Reisner  * Called from process context only (admin command and worker).
1359b411b363SPhilipp Reisner  */
1360b411b363SPhilipp Reisner static int _drbd_resume_next(struct drbd_conf *mdev)
1361b411b363SPhilipp Reisner {
1362b411b363SPhilipp Reisner 	struct drbd_conf *odev;
1363b411b363SPhilipp Reisner 	int i, rv = 0;
1364b411b363SPhilipp Reisner 
1365b411b363SPhilipp Reisner 	for (i = 0; i < minor_count; i++) {
1366b411b363SPhilipp Reisner 		odev = minor_to_mdev(i);
1367b411b363SPhilipp Reisner 		if (!odev)
1368b411b363SPhilipp Reisner 			continue;
1369b411b363SPhilipp Reisner 		if (odev->state.conn == C_STANDALONE && odev->state.disk == D_DISKLESS)
1370b411b363SPhilipp Reisner 			continue;
1371b411b363SPhilipp Reisner 		if (odev->state.aftr_isp) {
1372b411b363SPhilipp Reisner 			if (_drbd_may_sync_now(odev))
1373b411b363SPhilipp Reisner 				rv |= (__drbd_set_state(_NS(odev, aftr_isp, 0),
1374b411b363SPhilipp Reisner 							CS_HARD, NULL)
1375b411b363SPhilipp Reisner 				       != SS_NOTHING_TO_DO) ;
1376b411b363SPhilipp Reisner 		}
1377b411b363SPhilipp Reisner 	}
1378b411b363SPhilipp Reisner 	return rv;
1379b411b363SPhilipp Reisner }
1380b411b363SPhilipp Reisner 
1381b411b363SPhilipp Reisner void resume_next_sg(struct drbd_conf *mdev)
1382b411b363SPhilipp Reisner {
1383b411b363SPhilipp Reisner 	write_lock_irq(&global_state_lock);
1384b411b363SPhilipp Reisner 	_drbd_resume_next(mdev);
1385b411b363SPhilipp Reisner 	write_unlock_irq(&global_state_lock);
1386b411b363SPhilipp Reisner }
1387b411b363SPhilipp Reisner 
1388b411b363SPhilipp Reisner void suspend_other_sg(struct drbd_conf *mdev)
1389b411b363SPhilipp Reisner {
1390b411b363SPhilipp Reisner 	write_lock_irq(&global_state_lock);
1391b411b363SPhilipp Reisner 	_drbd_pause_after(mdev);
1392b411b363SPhilipp Reisner 	write_unlock_irq(&global_state_lock);
1393b411b363SPhilipp Reisner }
1394b411b363SPhilipp Reisner 
1395b411b363SPhilipp Reisner static int sync_after_error(struct drbd_conf *mdev, int o_minor)
1396b411b363SPhilipp Reisner {
1397b411b363SPhilipp Reisner 	struct drbd_conf *odev;
1398b411b363SPhilipp Reisner 
1399b411b363SPhilipp Reisner 	if (o_minor == -1)
1400b411b363SPhilipp Reisner 		return NO_ERROR;
1401b411b363SPhilipp Reisner 	if (o_minor < -1 || minor_to_mdev(o_minor) == NULL)
1402b411b363SPhilipp Reisner 		return ERR_SYNC_AFTER;
1403b411b363SPhilipp Reisner 
1404b411b363SPhilipp Reisner 	/* check for loops */
1405b411b363SPhilipp Reisner 	odev = minor_to_mdev(o_minor);
1406b411b363SPhilipp Reisner 	while (1) {
1407b411b363SPhilipp Reisner 		if (odev == mdev)
1408b411b363SPhilipp Reisner 			return ERR_SYNC_AFTER_CYCLE;
1409b411b363SPhilipp Reisner 
1410b411b363SPhilipp Reisner 		/* dependency chain ends here, no cycles. */
1411b411b363SPhilipp Reisner 		if (odev->sync_conf.after == -1)
1412b411b363SPhilipp Reisner 			return NO_ERROR;
1413b411b363SPhilipp Reisner 
1414b411b363SPhilipp Reisner 		/* follow the dependency chain */
1415b411b363SPhilipp Reisner 		odev = minor_to_mdev(odev->sync_conf.after);
1416b411b363SPhilipp Reisner 	}
1417b411b363SPhilipp Reisner }
1418b411b363SPhilipp Reisner 
1419b411b363SPhilipp Reisner int drbd_alter_sa(struct drbd_conf *mdev, int na)
1420b411b363SPhilipp Reisner {
1421b411b363SPhilipp Reisner 	int changes;
1422b411b363SPhilipp Reisner 	int retcode;
1423b411b363SPhilipp Reisner 
1424b411b363SPhilipp Reisner 	write_lock_irq(&global_state_lock);
1425b411b363SPhilipp Reisner 	retcode = sync_after_error(mdev, na);
1426b411b363SPhilipp Reisner 	if (retcode == NO_ERROR) {
1427b411b363SPhilipp Reisner 		mdev->sync_conf.after = na;
1428b411b363SPhilipp Reisner 		do {
1429b411b363SPhilipp Reisner 			changes  = _drbd_pause_after(mdev);
1430b411b363SPhilipp Reisner 			changes |= _drbd_resume_next(mdev);
1431b411b363SPhilipp Reisner 		} while (changes);
1432b411b363SPhilipp Reisner 	}
1433b411b363SPhilipp Reisner 	write_unlock_irq(&global_state_lock);
1434b411b363SPhilipp Reisner 	return retcode;
1435b411b363SPhilipp Reisner }
1436b411b363SPhilipp Reisner 
14379bd28d3cSLars Ellenberg void drbd_rs_controller_reset(struct drbd_conf *mdev)
14389bd28d3cSLars Ellenberg {
14399bd28d3cSLars Ellenberg 	atomic_set(&mdev->rs_sect_in, 0);
14409bd28d3cSLars Ellenberg 	atomic_set(&mdev->rs_sect_ev, 0);
14419bd28d3cSLars Ellenberg 	mdev->rs_in_flight = 0;
14429bd28d3cSLars Ellenberg 	mdev->rs_planed = 0;
14439bd28d3cSLars Ellenberg 	spin_lock(&mdev->peer_seq_lock);
14449bd28d3cSLars Ellenberg 	fifo_set(&mdev->rs_plan_s, 0);
14459bd28d3cSLars Ellenberg 	spin_unlock(&mdev->peer_seq_lock);
14469bd28d3cSLars Ellenberg }
14479bd28d3cSLars Ellenberg 
1448b411b363SPhilipp Reisner /**
1449b411b363SPhilipp Reisner  * drbd_start_resync() - Start the resync process
1450b411b363SPhilipp Reisner  * @mdev:	DRBD device.
1451b411b363SPhilipp Reisner  * @side:	Either C_SYNC_SOURCE or C_SYNC_TARGET
1452b411b363SPhilipp Reisner  *
1453b411b363SPhilipp Reisner  * This function might bring you directly into one of the
1454b411b363SPhilipp Reisner  * C_PAUSED_SYNC_* states.
1455b411b363SPhilipp Reisner  */
1456b411b363SPhilipp Reisner void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
1457b411b363SPhilipp Reisner {
1458b411b363SPhilipp Reisner 	union drbd_state ns;
1459b411b363SPhilipp Reisner 	int r;
1460b411b363SPhilipp Reisner 
1461c4752ef1SPhilipp Reisner 	if (mdev->state.conn >= C_SYNC_SOURCE && mdev->state.conn < C_AHEAD) {
1462b411b363SPhilipp Reisner 		dev_err(DEV, "Resync already running!\n");
1463b411b363SPhilipp Reisner 		return;
1464b411b363SPhilipp Reisner 	}
1465b411b363SPhilipp Reisner 
146659817f4fSPhilipp Reisner 	if (mdev->state.conn < C_AHEAD) {
1467b411b363SPhilipp Reisner 		/* In case a previous resync run was aborted by an IO error/detach on the peer. */
1468b411b363SPhilipp Reisner 		drbd_rs_cancel_all(mdev);
146959817f4fSPhilipp Reisner 		/* This should be done when we abort the resync. We definitely do not
147059817f4fSPhilipp Reisner 		   want to have this for connections going back and forth between
147159817f4fSPhilipp Reisner 		   Ahead/Behind and SyncSource/SyncTarget */
147259817f4fSPhilipp Reisner 	}
1473b411b363SPhilipp Reisner 
1474b411b363SPhilipp Reisner 	if (side == C_SYNC_TARGET) {
1475b411b363SPhilipp Reisner 		/* Since application IO was locked out during C_WF_BITMAP_T and
1476b411b363SPhilipp Reisner 		   C_WF_SYNC_UUID we are still unmodified. Before going to C_SYNC_TARGET
1477b411b363SPhilipp Reisner 		   we check that we might make the data inconsistent. */
1478b411b363SPhilipp Reisner 		r = drbd_khelper(mdev, "before-resync-target");
1479b411b363SPhilipp Reisner 		r = (r >> 8) & 0xff;
1480b411b363SPhilipp Reisner 		if (r > 0) {
1481b411b363SPhilipp Reisner 			dev_info(DEV, "before-resync-target handler returned %d, "
1482b411b363SPhilipp Reisner 			     "dropping connection.\n", r);
1483b411b363SPhilipp Reisner 			drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
1484b411b363SPhilipp Reisner 			return;
1485b411b363SPhilipp Reisner 		}
148609b9e797SPhilipp Reisner 	} else /* C_SYNC_SOURCE */ {
148709b9e797SPhilipp Reisner 		r = drbd_khelper(mdev, "before-resync-source");
148809b9e797SPhilipp Reisner 		r = (r >> 8) & 0xff;
148909b9e797SPhilipp Reisner 		if (r > 0) {
149009b9e797SPhilipp Reisner 			if (r == 3) {
149109b9e797SPhilipp Reisner 				dev_info(DEV, "before-resync-source handler returned %d, "
149209b9e797SPhilipp Reisner 					 "ignoring. Old userland tools?", r);
149309b9e797SPhilipp Reisner 			} else {
149409b9e797SPhilipp Reisner 				dev_info(DEV, "before-resync-source handler returned %d, "
149509b9e797SPhilipp Reisner 					 "dropping connection.\n", r);
149609b9e797SPhilipp Reisner 				drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
149709b9e797SPhilipp Reisner 				return;
149809b9e797SPhilipp Reisner 			}
149909b9e797SPhilipp Reisner 		}
1500b411b363SPhilipp Reisner 	}
1501b411b363SPhilipp Reisner 
1502b411b363SPhilipp Reisner 	drbd_state_lock(mdev);
1503b411b363SPhilipp Reisner 
1504b411b363SPhilipp Reisner 	if (!get_ldev_if_state(mdev, D_NEGOTIATING)) {
1505b411b363SPhilipp Reisner 		drbd_state_unlock(mdev);
1506b411b363SPhilipp Reisner 		return;
1507b411b363SPhilipp Reisner 	}
1508b411b363SPhilipp Reisner 
1509b411b363SPhilipp Reisner 	write_lock_irq(&global_state_lock);
1510b411b363SPhilipp Reisner 	ns = mdev->state;
1511b411b363SPhilipp Reisner 
1512b411b363SPhilipp Reisner 	ns.aftr_isp = !_drbd_may_sync_now(mdev);
1513b411b363SPhilipp Reisner 
1514b411b363SPhilipp Reisner 	ns.conn = side;
1515b411b363SPhilipp Reisner 
1516b411b363SPhilipp Reisner 	if (side == C_SYNC_TARGET)
1517b411b363SPhilipp Reisner 		ns.disk = D_INCONSISTENT;
1518b411b363SPhilipp Reisner 	else /* side == C_SYNC_SOURCE */
1519b411b363SPhilipp Reisner 		ns.pdsk = D_INCONSISTENT;
1520b411b363SPhilipp Reisner 
1521b411b363SPhilipp Reisner 	r = __drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
1522b411b363SPhilipp Reisner 	ns = mdev->state;
1523b411b363SPhilipp Reisner 
1524b411b363SPhilipp Reisner 	if (ns.conn < C_CONNECTED)
1525b411b363SPhilipp Reisner 		r = SS_UNKNOWN_ERROR;
1526b411b363SPhilipp Reisner 
1527b411b363SPhilipp Reisner 	if (r == SS_SUCCESS) {
15281d7734a0SLars Ellenberg 		unsigned long tw = drbd_bm_total_weight(mdev);
15291d7734a0SLars Ellenberg 		unsigned long now = jiffies;
15301d7734a0SLars Ellenberg 		int i;
15311d7734a0SLars Ellenberg 
1532b411b363SPhilipp Reisner 		mdev->rs_failed    = 0;
1533b411b363SPhilipp Reisner 		mdev->rs_paused    = 0;
1534b411b363SPhilipp Reisner 		mdev->rs_same_csum = 0;
15350f0601f4SLars Ellenberg 		mdev->rs_last_events = 0;
15360f0601f4SLars Ellenberg 		mdev->rs_last_sect_ev = 0;
15371d7734a0SLars Ellenberg 		mdev->rs_total     = tw;
15381d7734a0SLars Ellenberg 		mdev->rs_start     = now;
15391d7734a0SLars Ellenberg 		for (i = 0; i < DRBD_SYNC_MARKS; i++) {
15401d7734a0SLars Ellenberg 			mdev->rs_mark_left[i] = tw;
15411d7734a0SLars Ellenberg 			mdev->rs_mark_time[i] = now;
15421d7734a0SLars Ellenberg 		}
1543b411b363SPhilipp Reisner 		_drbd_pause_after(mdev);
1544b411b363SPhilipp Reisner 	}
1545b411b363SPhilipp Reisner 	write_unlock_irq(&global_state_lock);
15465a22db89SLars Ellenberg 
15475a22db89SLars Ellenberg 	if (side == C_SYNC_TARGET)
15485a22db89SLars Ellenberg 		mdev->bm_resync_fo = 0;
15495a22db89SLars Ellenberg 
15505a22db89SLars Ellenberg 	/* Since protocol 96, we must serialize drbd_gen_and_send_sync_uuid
15515a22db89SLars Ellenberg 	 * with w_send_oos, or the sync target will get confused as to
15525a22db89SLars Ellenberg 	 * how much bits to resync.  We cannot do that always, because for an
15535a22db89SLars Ellenberg 	 * empty resync and protocol < 95, we need to do it here, as we call
15545a22db89SLars Ellenberg 	 * drbd_resync_finished from here in that case.
15555a22db89SLars Ellenberg 	 * We drbd_gen_and_send_sync_uuid here for protocol < 96,
15565a22db89SLars Ellenberg 	 * and from after_state_ch otherwise. */
15575a22db89SLars Ellenberg 	if (side == C_SYNC_SOURCE && mdev->agreed_pro_version < 96)
15585a22db89SLars Ellenberg 		drbd_gen_and_send_sync_uuid(mdev);
1559b411b363SPhilipp Reisner 
1560b411b363SPhilipp Reisner 	if (r == SS_SUCCESS) {
1561b411b363SPhilipp Reisner 		dev_info(DEV, "Began resync as %s (will sync %lu KB [%lu bits set]).\n",
1562b411b363SPhilipp Reisner 		     drbd_conn_str(ns.conn),
1563b411b363SPhilipp Reisner 		     (unsigned long) mdev->rs_total << (BM_BLOCK_SHIFT-10),
1564b411b363SPhilipp Reisner 		     (unsigned long) mdev->rs_total);
1565b411b363SPhilipp Reisner 
1566af85e8e8SLars Ellenberg 		if (mdev->agreed_pro_version < 95 && mdev->rs_total == 0) {
1567af85e8e8SLars Ellenberg 			/* This still has a race (about when exactly the peers
1568af85e8e8SLars Ellenberg 			 * detect connection loss) that can lead to a full sync
1569af85e8e8SLars Ellenberg 			 * on next handshake. In 8.3.9 we fixed this with explicit
1570af85e8e8SLars Ellenberg 			 * resync-finished notifications, but the fix
1571af85e8e8SLars Ellenberg 			 * introduces a protocol change.  Sleeping for some
1572af85e8e8SLars Ellenberg 			 * time longer than the ping interval + timeout on the
1573af85e8e8SLars Ellenberg 			 * SyncSource, to give the SyncTarget the chance to
1574af85e8e8SLars Ellenberg 			 * detect connection loss, then waiting for a ping
1575af85e8e8SLars Ellenberg 			 * response (implicit in drbd_resync_finished) reduces
1576af85e8e8SLars Ellenberg 			 * the race considerably, but does not solve it. */
1577af85e8e8SLars Ellenberg 			if (side == C_SYNC_SOURCE)
1578af85e8e8SLars Ellenberg 				schedule_timeout_interruptible(
1579af85e8e8SLars Ellenberg 					mdev->net_conf->ping_int * HZ +
1580af85e8e8SLars Ellenberg 					mdev->net_conf->ping_timeo*HZ/9);
1581b411b363SPhilipp Reisner 			drbd_resync_finished(mdev);
1582b411b363SPhilipp Reisner 		}
1583b411b363SPhilipp Reisner 
15849bd28d3cSLars Ellenberg 		drbd_rs_controller_reset(mdev);
1585b411b363SPhilipp Reisner 		/* ns.conn may already be != mdev->state.conn,
1586b411b363SPhilipp Reisner 		 * we may have been paused in between, or become paused until
1587b411b363SPhilipp Reisner 		 * the timer triggers.
1588b411b363SPhilipp Reisner 		 * No matter, that is handled in resync_timer_fn() */
1589b411b363SPhilipp Reisner 		if (ns.conn == C_SYNC_TARGET)
1590b411b363SPhilipp Reisner 			mod_timer(&mdev->resync_timer, jiffies);
1591b411b363SPhilipp Reisner 
1592b411b363SPhilipp Reisner 		drbd_md_sync(mdev);
1593b411b363SPhilipp Reisner 	}
15945a22db89SLars Ellenberg 	put_ldev(mdev);
1595d0c3f60fSPhilipp Reisner 	drbd_state_unlock(mdev);
1596b411b363SPhilipp Reisner }
1597b411b363SPhilipp Reisner 
1598b411b363SPhilipp Reisner int drbd_worker(struct drbd_thread *thi)
1599b411b363SPhilipp Reisner {
1600b411b363SPhilipp Reisner 	struct drbd_conf *mdev = thi->mdev;
1601b411b363SPhilipp Reisner 	struct drbd_work *w = NULL;
1602b411b363SPhilipp Reisner 	LIST_HEAD(work_list);
1603b411b363SPhilipp Reisner 	int intr = 0, i;
1604b411b363SPhilipp Reisner 
1605b411b363SPhilipp Reisner 	sprintf(current->comm, "drbd%d_worker", mdev_to_minor(mdev));
1606b411b363SPhilipp Reisner 
1607b411b363SPhilipp Reisner 	while (get_t_state(thi) == Running) {
1608b411b363SPhilipp Reisner 		drbd_thread_current_set_cpu(mdev);
1609b411b363SPhilipp Reisner 
1610b411b363SPhilipp Reisner 		if (down_trylock(&mdev->data.work.s)) {
1611b411b363SPhilipp Reisner 			mutex_lock(&mdev->data.mutex);
1612b411b363SPhilipp Reisner 			if (mdev->data.socket && !mdev->net_conf->no_cork)
1613b411b363SPhilipp Reisner 				drbd_tcp_uncork(mdev->data.socket);
1614b411b363SPhilipp Reisner 			mutex_unlock(&mdev->data.mutex);
1615b411b363SPhilipp Reisner 
1616b411b363SPhilipp Reisner 			intr = down_interruptible(&mdev->data.work.s);
1617b411b363SPhilipp Reisner 
1618b411b363SPhilipp Reisner 			mutex_lock(&mdev->data.mutex);
1619b411b363SPhilipp Reisner 			if (mdev->data.socket  && !mdev->net_conf->no_cork)
1620b411b363SPhilipp Reisner 				drbd_tcp_cork(mdev->data.socket);
1621b411b363SPhilipp Reisner 			mutex_unlock(&mdev->data.mutex);
1622b411b363SPhilipp Reisner 		}
1623b411b363SPhilipp Reisner 
1624b411b363SPhilipp Reisner 		if (intr) {
1625b411b363SPhilipp Reisner 			D_ASSERT(intr == -EINTR);
1626b411b363SPhilipp Reisner 			flush_signals(current);
1627b411b363SPhilipp Reisner 			ERR_IF (get_t_state(thi) == Running)
1628b411b363SPhilipp Reisner 				continue;
1629b411b363SPhilipp Reisner 			break;
1630b411b363SPhilipp Reisner 		}
1631b411b363SPhilipp Reisner 
1632b411b363SPhilipp Reisner 		if (get_t_state(thi) != Running)
1633b411b363SPhilipp Reisner 			break;
1634b411b363SPhilipp Reisner 		/* With this break, we have done a down() but not consumed
1635b411b363SPhilipp Reisner 		   the entry from the list. The cleanup code takes care of
1636b411b363SPhilipp Reisner 		   this...   */
1637b411b363SPhilipp Reisner 
1638b411b363SPhilipp Reisner 		w = NULL;
1639b411b363SPhilipp Reisner 		spin_lock_irq(&mdev->data.work.q_lock);
1640b411b363SPhilipp Reisner 		ERR_IF(list_empty(&mdev->data.work.q)) {
1641b411b363SPhilipp Reisner 			/* something terribly wrong in our logic.
1642b411b363SPhilipp Reisner 			 * we were able to down() the semaphore,
1643b411b363SPhilipp Reisner 			 * but the list is empty... doh.
1644b411b363SPhilipp Reisner 			 *
1645b411b363SPhilipp Reisner 			 * what is the best thing to do now?
1646b411b363SPhilipp Reisner 			 * try again from scratch, restarting the receiver,
1647b411b363SPhilipp Reisner 			 * asender, whatnot? could break even more ugly,
1648b411b363SPhilipp Reisner 			 * e.g. when we are primary, but no good local data.
1649b411b363SPhilipp Reisner 			 *
1650b411b363SPhilipp Reisner 			 * I'll try to get away just starting over this loop.
1651b411b363SPhilipp Reisner 			 */
1652b411b363SPhilipp Reisner 			spin_unlock_irq(&mdev->data.work.q_lock);
1653b411b363SPhilipp Reisner 			continue;
1654b411b363SPhilipp Reisner 		}
1655b411b363SPhilipp Reisner 		w = list_entry(mdev->data.work.q.next, struct drbd_work, list);
1656b411b363SPhilipp Reisner 		list_del_init(&w->list);
1657b411b363SPhilipp Reisner 		spin_unlock_irq(&mdev->data.work.q_lock);
1658b411b363SPhilipp Reisner 
1659b411b363SPhilipp Reisner 		if (!w->cb(mdev, w, mdev->state.conn < C_CONNECTED)) {
1660b411b363SPhilipp Reisner 			/* dev_warn(DEV, "worker: a callback failed! \n"); */
1661b411b363SPhilipp Reisner 			if (mdev->state.conn >= C_CONNECTED)
1662b411b363SPhilipp Reisner 				drbd_force_state(mdev,
1663b411b363SPhilipp Reisner 						NS(conn, C_NETWORK_FAILURE));
1664b411b363SPhilipp Reisner 		}
1665b411b363SPhilipp Reisner 	}
1666b411b363SPhilipp Reisner 	D_ASSERT(test_bit(DEVICE_DYING, &mdev->flags));
1667b411b363SPhilipp Reisner 	D_ASSERT(test_bit(CONFIG_PENDING, &mdev->flags));
1668b411b363SPhilipp Reisner 
1669b411b363SPhilipp Reisner 	spin_lock_irq(&mdev->data.work.q_lock);
1670b411b363SPhilipp Reisner 	i = 0;
1671b411b363SPhilipp Reisner 	while (!list_empty(&mdev->data.work.q)) {
1672b411b363SPhilipp Reisner 		list_splice_init(&mdev->data.work.q, &work_list);
1673b411b363SPhilipp Reisner 		spin_unlock_irq(&mdev->data.work.q_lock);
1674b411b363SPhilipp Reisner 
1675b411b363SPhilipp Reisner 		while (!list_empty(&work_list)) {
1676b411b363SPhilipp Reisner 			w = list_entry(work_list.next, struct drbd_work, list);
1677b411b363SPhilipp Reisner 			list_del_init(&w->list);
1678b411b363SPhilipp Reisner 			w->cb(mdev, w, 1);
1679b411b363SPhilipp Reisner 			i++; /* dead debugging code */
1680b411b363SPhilipp Reisner 		}
1681b411b363SPhilipp Reisner 
1682b411b363SPhilipp Reisner 		spin_lock_irq(&mdev->data.work.q_lock);
1683b411b363SPhilipp Reisner 	}
1684b411b363SPhilipp Reisner 	sema_init(&mdev->data.work.s, 0);
1685b411b363SPhilipp Reisner 	/* DANGEROUS race: if someone did queue his work within the spinlock,
1686b411b363SPhilipp Reisner 	 * but up() ed outside the spinlock, we could get an up() on the
1687b411b363SPhilipp Reisner 	 * semaphore without corresponding list entry.
1688b411b363SPhilipp Reisner 	 * So don't do that.
1689b411b363SPhilipp Reisner 	 */
1690b411b363SPhilipp Reisner 	spin_unlock_irq(&mdev->data.work.q_lock);
1691b411b363SPhilipp Reisner 
1692b411b363SPhilipp Reisner 	D_ASSERT(mdev->state.disk == D_DISKLESS && mdev->state.conn == C_STANDALONE);
1693b411b363SPhilipp Reisner 	/* _drbd_set_state only uses stop_nowait.
1694b411b363SPhilipp Reisner 	 * wait here for the Exiting receiver. */
1695b411b363SPhilipp Reisner 	drbd_thread_stop(&mdev->receiver);
1696b411b363SPhilipp Reisner 	drbd_mdev_cleanup(mdev);
1697b411b363SPhilipp Reisner 
1698b411b363SPhilipp Reisner 	dev_info(DEV, "worker terminated\n");
1699b411b363SPhilipp Reisner 
1700b411b363SPhilipp Reisner 	clear_bit(DEVICE_DYING, &mdev->flags);
1701b411b363SPhilipp Reisner 	clear_bit(CONFIG_PENDING, &mdev->flags);
1702b411b363SPhilipp Reisner 	wake_up(&mdev->state_wait);
1703b411b363SPhilipp Reisner 
1704b411b363SPhilipp Reisner 	return 0;
1705b411b363SPhilipp Reisner }
1706