xref: /openbmc/linux/drivers/block/drbd/drbd_worker.c (revision 0d11f3cf)
193c68cc4SChristoph Böhmwalder // SPDX-License-Identifier: GPL-2.0-only
2b411b363SPhilipp Reisner /*
3b411b363SPhilipp Reisner    drbd_worker.c
4b411b363SPhilipp Reisner 
5b411b363SPhilipp Reisner    This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6b411b363SPhilipp Reisner 
7b411b363SPhilipp Reisner    Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
8b411b363SPhilipp Reisner    Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
9b411b363SPhilipp Reisner    Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10b411b363SPhilipp Reisner 
11b411b363SPhilipp Reisner 
12b411b363SPhilipp Reisner */
13b411b363SPhilipp Reisner 
14b411b363SPhilipp Reisner #include <linux/module.h>
15b411b363SPhilipp Reisner #include <linux/drbd.h>
16174cd4b1SIngo Molnar #include <linux/sched/signal.h>
17b411b363SPhilipp Reisner #include <linux/wait.h>
18b411b363SPhilipp Reisner #include <linux/mm.h>
19b411b363SPhilipp Reisner #include <linux/memcontrol.h>
20b411b363SPhilipp Reisner #include <linux/mm_inline.h>
21b411b363SPhilipp Reisner #include <linux/slab.h>
22b411b363SPhilipp Reisner #include <linux/random.h>
23b411b363SPhilipp Reisner #include <linux/string.h>
24b411b363SPhilipp Reisner #include <linux/scatterlist.h>
25c6a564ffSChristoph Hellwig #include <linux/part_stat.h>
26b411b363SPhilipp Reisner 
27b411b363SPhilipp Reisner #include "drbd_int.h"
28a3603a6eSAndreas Gruenbacher #include "drbd_protocol.h"
29b411b363SPhilipp Reisner #include "drbd_req.h"
30b411b363SPhilipp Reisner 
31*0d11f3cfSChristoph Böhmwalder static int make_ov_request(struct drbd_peer_device *, int);
32*0d11f3cfSChristoph Böhmwalder static int make_resync_request(struct drbd_peer_device *, int);
33b411b363SPhilipp Reisner 
34c5a91619SAndreas Gruenbacher /* endio handlers:
35ed15b795SAndreas Gruenbacher  *   drbd_md_endio (defined here)
36fcefa62eSAndreas Gruenbacher  *   drbd_request_endio (defined here)
37fcefa62eSAndreas Gruenbacher  *   drbd_peer_request_endio (defined here)
38ed15b795SAndreas Gruenbacher  *   drbd_bm_endio (defined in drbd_bitmap.c)
39c5a91619SAndreas Gruenbacher  *
40b411b363SPhilipp Reisner  * For all these callbacks, note the following:
41b411b363SPhilipp Reisner  * The callbacks will be called in irq context by the IDE drivers,
42b411b363SPhilipp Reisner  * and in Softirqs/Tasklets/BH context by the SCSI drivers.
43b411b363SPhilipp Reisner  * Try to get the locking right :)
44b411b363SPhilipp Reisner  *
45b411b363SPhilipp Reisner  */
46b411b363SPhilipp Reisner 
47b411b363SPhilipp Reisner /* used for synchronous meta data and bitmap IO
48b411b363SPhilipp Reisner  * submitted by drbd_md_sync_page_io()
49b411b363SPhilipp Reisner  */
drbd_md_endio(struct bio * bio)504246a0b6SChristoph Hellwig void drbd_md_endio(struct bio *bio)
51b411b363SPhilipp Reisner {
52b30ab791SAndreas Gruenbacher 	struct drbd_device *device;
53b411b363SPhilipp Reisner 
54e37d2438SLars Ellenberg 	device = bio->bi_private;
554e4cbee9SChristoph Hellwig 	device->md_io.error = blk_status_to_errno(bio->bi_status);
56b411b363SPhilipp Reisner 
577c752ed3SLars Ellenberg 	/* special case: drbd_md_read() during drbd_adm_attach() */
587c752ed3SLars Ellenberg 	if (device->ldev)
597c752ed3SLars Ellenberg 		put_ldev(device);
607c752ed3SLars Ellenberg 	bio_put(bio);
617c752ed3SLars Ellenberg 
620cfac5ddSPhilipp Reisner 	/* We grabbed an extra reference in _drbd_md_sync_page_io() to be able
630cfac5ddSPhilipp Reisner 	 * to timeout on the lower level device, and eventually detach from it.
640cfac5ddSPhilipp Reisner 	 * If this io completion runs after that timeout expired, this
650cfac5ddSPhilipp Reisner 	 * drbd_md_put_buffer() may allow us to finally try and re-attach.
660cfac5ddSPhilipp Reisner 	 * During normal operation, this only puts that extra reference
670cfac5ddSPhilipp Reisner 	 * down to 1 again.
680cfac5ddSPhilipp Reisner 	 * Make sure we first drop the reference, and only then signal
690cfac5ddSPhilipp Reisner 	 * completion, or we may (in drbd_al_read_log()) cycle so fast into the
700cfac5ddSPhilipp Reisner 	 * next drbd_md_sync_page_io(), that we trigger the
71b30ab791SAndreas Gruenbacher 	 * ASSERT(atomic_read(&device->md_io_in_use) == 1) there.
720cfac5ddSPhilipp Reisner 	 */
73b30ab791SAndreas Gruenbacher 	drbd_md_put_buffer(device);
74e37d2438SLars Ellenberg 	device->md_io.done = 1;
75b30ab791SAndreas Gruenbacher 	wake_up(&device->misc_wait);
76b411b363SPhilipp Reisner }
77b411b363SPhilipp Reisner 
78b411b363SPhilipp Reisner /* reads on behalf of the partner,
79b411b363SPhilipp Reisner  * "submitted" by the receiver
80b411b363SPhilipp Reisner  */
drbd_endio_read_sec_final(struct drbd_peer_request * peer_req)81a186e478SRashika Kheria static void drbd_endio_read_sec_final(struct drbd_peer_request *peer_req) __releases(local)
82b411b363SPhilipp Reisner {
83b411b363SPhilipp Reisner 	unsigned long flags = 0;
846780139cSAndreas Gruenbacher 	struct drbd_peer_device *peer_device = peer_req->peer_device;
856780139cSAndreas Gruenbacher 	struct drbd_device *device = peer_device->device;
86b411b363SPhilipp Reisner 
870500813fSAndreas Gruenbacher 	spin_lock_irqsave(&device->resource->req_lock, flags);
88b30ab791SAndreas Gruenbacher 	device->read_cnt += peer_req->i.size >> 9;
89a8cd15baSAndreas Gruenbacher 	list_del(&peer_req->w.list);
90b30ab791SAndreas Gruenbacher 	if (list_empty(&device->read_ee))
91b30ab791SAndreas Gruenbacher 		wake_up(&device->ee_wait);
92db830c46SAndreas Gruenbacher 	if (test_bit(__EE_WAS_ERROR, &peer_req->flags))
93b30ab791SAndreas Gruenbacher 		__drbd_chk_io_error(device, DRBD_READ_ERROR);
940500813fSAndreas Gruenbacher 	spin_unlock_irqrestore(&device->resource->req_lock, flags);
95b411b363SPhilipp Reisner 
966780139cSAndreas Gruenbacher 	drbd_queue_work(&peer_device->connection->sender_work, &peer_req->w);
97b30ab791SAndreas Gruenbacher 	put_ldev(device);
98b411b363SPhilipp Reisner }
99b411b363SPhilipp Reisner 
100b411b363SPhilipp Reisner /* writes on behalf of the partner, or resync writes,
10145bb912bSLars Ellenberg  * "submitted" by the receiver, final stage.  */
drbd_endio_write_sec_final(struct drbd_peer_request * peer_req)102a0fb3c47SLars Ellenberg void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __releases(local)
103b411b363SPhilipp Reisner {
104b411b363SPhilipp Reisner 	unsigned long flags = 0;
1056780139cSAndreas Gruenbacher 	struct drbd_peer_device *peer_device = peer_req->peer_device;
1066780139cSAndreas Gruenbacher 	struct drbd_device *device = peer_device->device;
107668700b4SPhilipp Reisner 	struct drbd_connection *connection = peer_device->connection;
108181286adSLars Ellenberg 	struct drbd_interval i;
109b411b363SPhilipp Reisner 	int do_wake;
110579b57edSAndreas Gruenbacher 	u64 block_id;
111b411b363SPhilipp Reisner 	int do_al_complete_io;
112b411b363SPhilipp Reisner 
113db830c46SAndreas Gruenbacher 	/* after we moved peer_req to done_ee,
114b411b363SPhilipp Reisner 	 * we may no longer access it,
115b411b363SPhilipp Reisner 	 * it may be freed/reused already!
116b411b363SPhilipp Reisner 	 * (as soon as we release the req_lock) */
117181286adSLars Ellenberg 	i = peer_req->i;
118db830c46SAndreas Gruenbacher 	do_al_complete_io = peer_req->flags & EE_CALL_AL_COMPLETE_IO;
119db830c46SAndreas Gruenbacher 	block_id = peer_req->block_id;
12021ae5d7fSLars Ellenberg 	peer_req->flags &= ~EE_CALL_AL_COMPLETE_IO;
121b411b363SPhilipp Reisner 
122e1fbc4caSLars Ellenberg 	if (peer_req->flags & EE_WAS_ERROR) {
123e1fbc4caSLars Ellenberg 		/* In protocol != C, we usually do not send write acks.
124e1fbc4caSLars Ellenberg 		 * In case of a write error, send the neg ack anyways. */
125e1fbc4caSLars Ellenberg 		if (!__test_and_set_bit(__EE_SEND_WRITE_ACK, &peer_req->flags))
126e1fbc4caSLars Ellenberg 			inc_unacked(device);
127*0d11f3cfSChristoph Böhmwalder 		drbd_set_out_of_sync(peer_device, peer_req->i.sector, peer_req->i.size);
128e1fbc4caSLars Ellenberg 	}
129e1fbc4caSLars Ellenberg 
1300500813fSAndreas Gruenbacher 	spin_lock_irqsave(&device->resource->req_lock, flags);
131b30ab791SAndreas Gruenbacher 	device->writ_cnt += peer_req->i.size >> 9;
132a8cd15baSAndreas Gruenbacher 	list_move_tail(&peer_req->w.list, &device->done_ee);
133b411b363SPhilipp Reisner 
134bb3bfe96SAndreas Gruenbacher 	/*
1355e472264SAndreas Gruenbacher 	 * Do not remove from the write_requests tree here: we did not send the
136bb3bfe96SAndreas Gruenbacher 	 * Ack yet and did not wake possibly waiting conflicting requests.
137bb3bfe96SAndreas Gruenbacher 	 * Removed from the tree from "drbd_process_done_ee" within the
13884b8c06bSAndreas Gruenbacher 	 * appropriate dw.cb (e_end_block/e_end_resync_block) or from
139bb3bfe96SAndreas Gruenbacher 	 * _drbd_clear_done_ee.
140bb3bfe96SAndreas Gruenbacher 	 */
141b411b363SPhilipp Reisner 
142b30ab791SAndreas Gruenbacher 	do_wake = list_empty(block_id == ID_SYNCER ? &device->sync_ee : &device->active_ee);
143b411b363SPhilipp Reisner 
1449305455aSBart Van Assche 	/* FIXME do we want to detach for failed REQ_OP_DISCARD?
145f31e583aSLars Ellenberg 	 * ((peer_req->flags & (EE_WAS_ERROR|EE_TRIM)) == EE_WAS_ERROR) */
146a0fb3c47SLars Ellenberg 	if (peer_req->flags & EE_WAS_ERROR)
147b30ab791SAndreas Gruenbacher 		__drbd_chk_io_error(device, DRBD_WRITE_ERROR);
148668700b4SPhilipp Reisner 
149668700b4SPhilipp Reisner 	if (connection->cstate >= C_WF_REPORT_PARAMS) {
150668700b4SPhilipp Reisner 		kref_get(&device->kref); /* put is in drbd_send_acks_wf() */
151668700b4SPhilipp Reisner 		if (!queue_work(connection->ack_sender, &peer_device->send_acks_work))
152668700b4SPhilipp Reisner 			kref_put(&device->kref, drbd_destroy_device);
153668700b4SPhilipp Reisner 	}
1540500813fSAndreas Gruenbacher 	spin_unlock_irqrestore(&device->resource->req_lock, flags);
155b411b363SPhilipp Reisner 
156579b57edSAndreas Gruenbacher 	if (block_id == ID_SYNCER)
157b30ab791SAndreas Gruenbacher 		drbd_rs_complete_io(device, i.sector);
158b411b363SPhilipp Reisner 
159b411b363SPhilipp Reisner 	if (do_wake)
160b30ab791SAndreas Gruenbacher 		wake_up(&device->ee_wait);
161b411b363SPhilipp Reisner 
162b411b363SPhilipp Reisner 	if (do_al_complete_io)
163b30ab791SAndreas Gruenbacher 		drbd_al_complete_io(device, &i);
164b411b363SPhilipp Reisner 
165b30ab791SAndreas Gruenbacher 	put_ldev(device);
16645bb912bSLars Ellenberg }
167b411b363SPhilipp Reisner 
16845bb912bSLars Ellenberg /* writes on behalf of the partner, or resync writes,
16945bb912bSLars Ellenberg  * "submitted" by the receiver.
17045bb912bSLars Ellenberg  */
drbd_peer_request_endio(struct bio * bio)1714246a0b6SChristoph Hellwig void drbd_peer_request_endio(struct bio *bio)
17245bb912bSLars Ellenberg {
173db830c46SAndreas Gruenbacher 	struct drbd_peer_request *peer_req = bio->bi_private;
174a8cd15baSAndreas Gruenbacher 	struct drbd_device *device = peer_req->peer_device->device;
1757e5fec31SFabian Frederick 	bool is_write = bio_data_dir(bio) == WRITE;
17645c21793SChristoph Hellwig 	bool is_discard = bio_op(bio) == REQ_OP_WRITE_ZEROES ||
17745c21793SChristoph Hellwig 			  bio_op(bio) == REQ_OP_DISCARD;
17845bb912bSLars Ellenberg 
179e3fa02d7SChristoph Böhmwalder 	if (bio->bi_status && drbd_ratelimit())
180d0180171SAndreas Gruenbacher 		drbd_warn(device, "%s: error=%d s=%llus\n",
181a0fb3c47SLars Ellenberg 				is_write ? (is_discard ? "discard" : "write")
1824e4cbee9SChristoph Hellwig 					: "read", bio->bi_status,
183db830c46SAndreas Gruenbacher 				(unsigned long long)peer_req->i.sector);
18445bb912bSLars Ellenberg 
1854e4cbee9SChristoph Hellwig 	if (bio->bi_status)
186db830c46SAndreas Gruenbacher 		set_bit(__EE_WAS_ERROR, &peer_req->flags);
18745bb912bSLars Ellenberg 
18845bb912bSLars Ellenberg 	bio_put(bio); /* no need for the bio anymore */
189db830c46SAndreas Gruenbacher 	if (atomic_dec_and_test(&peer_req->pending_bios)) {
19045bb912bSLars Ellenberg 		if (is_write)
191db830c46SAndreas Gruenbacher 			drbd_endio_write_sec_final(peer_req);
19245bb912bSLars Ellenberg 		else
193db830c46SAndreas Gruenbacher 			drbd_endio_read_sec_final(peer_req);
19445bb912bSLars Ellenberg 	}
195b411b363SPhilipp Reisner }
196b411b363SPhilipp Reisner 
1971ffa7bfaSBaoyou Xie static void
drbd_panic_after_delayed_completion_of_aborted_request(struct drbd_device * device)1981ffa7bfaSBaoyou Xie drbd_panic_after_delayed_completion_of_aborted_request(struct drbd_device *device)
199142207f7SLars Ellenberg {
200142207f7SLars Ellenberg 	panic("drbd%u %s/%u potential random memory corruption caused by delayed completion of aborted local request\n",
201142207f7SLars Ellenberg 		device->minor, device->resource->name, device->vnr);
202142207f7SLars Ellenberg }
203142207f7SLars Ellenberg 
204b411b363SPhilipp Reisner /* read, readA or write requests on R_PRIMARY coming from drbd_make_request
205b411b363SPhilipp Reisner  */
drbd_request_endio(struct bio * bio)2064246a0b6SChristoph Hellwig void drbd_request_endio(struct bio *bio)
207b411b363SPhilipp Reisner {
208a115413dSLars Ellenberg 	unsigned long flags;
209b411b363SPhilipp Reisner 	struct drbd_request *req = bio->bi_private;
21084b8c06bSAndreas Gruenbacher 	struct drbd_device *device = req->device;
211a115413dSLars Ellenberg 	struct bio_and_error m;
212b411b363SPhilipp Reisner 	enum drbd_req_event what;
2131b6dd252SPhilipp Reisner 
2141b6dd252SPhilipp Reisner 	/* If this request was aborted locally before,
2151b6dd252SPhilipp Reisner 	 * but now was completed "successfully",
2161b6dd252SPhilipp Reisner 	 * chances are that this caused arbitrary data corruption.
2171b6dd252SPhilipp Reisner 	 *
2181b6dd252SPhilipp Reisner 	 * "aborting" requests, or force-detaching the disk, is intended for
2191b6dd252SPhilipp Reisner 	 * completely blocked/hung local backing devices which do no longer
2201b6dd252SPhilipp Reisner 	 * complete requests at all, not even do error completions.  In this
2211b6dd252SPhilipp Reisner 	 * situation, usually a hard-reset and failover is the only way out.
2221b6dd252SPhilipp Reisner 	 *
2231b6dd252SPhilipp Reisner 	 * By "aborting", basically faking a local error-completion,
2241b6dd252SPhilipp Reisner 	 * we allow for a more graceful swichover by cleanly migrating services.
2251b6dd252SPhilipp Reisner 	 * Still the affected node has to be rebooted "soon".
2261b6dd252SPhilipp Reisner 	 *
2271b6dd252SPhilipp Reisner 	 * By completing these requests, we allow the upper layers to re-use
2281b6dd252SPhilipp Reisner 	 * the associated data pages.
2291b6dd252SPhilipp Reisner 	 *
2301b6dd252SPhilipp Reisner 	 * If later the local backing device "recovers", and now DMAs some data
2311b6dd252SPhilipp Reisner 	 * from disk into the original request pages, in the best case it will
2321b6dd252SPhilipp Reisner 	 * just put random data into unused pages; but typically it will corrupt
2331b6dd252SPhilipp Reisner 	 * meanwhile completely unrelated data, causing all sorts of damage.
2341b6dd252SPhilipp Reisner 	 *
2351b6dd252SPhilipp Reisner 	 * Which means delayed successful completion,
2361b6dd252SPhilipp Reisner 	 * especially for READ requests,
2371b6dd252SPhilipp Reisner 	 * is a reason to panic().
2381b6dd252SPhilipp Reisner 	 *
2391b6dd252SPhilipp Reisner 	 * We assume that a delayed *error* completion is OK,
2401b6dd252SPhilipp Reisner 	 * though we still will complain noisily about it.
2411b6dd252SPhilipp Reisner 	 */
2421b6dd252SPhilipp Reisner 	if (unlikely(req->rq_state & RQ_LOCAL_ABORTED)) {
243e3fa02d7SChristoph Böhmwalder 		if (drbd_ratelimit())
244d0180171SAndreas Gruenbacher 			drbd_emerg(device, "delayed completion of aborted local request; disk-timeout may be too aggressive\n");
2451b6dd252SPhilipp Reisner 
2464e4cbee9SChristoph Hellwig 		if (!bio->bi_status)
247142207f7SLars Ellenberg 			drbd_panic_after_delayed_completion_of_aborted_request(device);
2481b6dd252SPhilipp Reisner 	}
2491b6dd252SPhilipp Reisner 
250b411b363SPhilipp Reisner 	/* to avoid recursion in __req_mod */
2514e4cbee9SChristoph Hellwig 	if (unlikely(bio->bi_status)) {
25270246286SChristoph Hellwig 		switch (bio_op(bio)) {
25345c21793SChristoph Hellwig 		case REQ_OP_WRITE_ZEROES:
25470246286SChristoph Hellwig 		case REQ_OP_DISCARD:
2554e4cbee9SChristoph Hellwig 			if (bio->bi_status == BLK_STS_NOTSUPP)
25670246286SChristoph Hellwig 				what = DISCARD_COMPLETED_NOTSUPP;
2572f632aebSLars Ellenberg 			else
25870246286SChristoph Hellwig 				what = DISCARD_COMPLETED_WITH_ERROR;
25970246286SChristoph Hellwig 			break;
26070246286SChristoph Hellwig 		case REQ_OP_READ:
2611eff9d32SJens Axboe 			if (bio->bi_opf & REQ_RAHEAD)
26270246286SChristoph Hellwig 				what = READ_AHEAD_COMPLETED_WITH_ERROR;
26370246286SChristoph Hellwig 			else
26470246286SChristoph Hellwig 				what = READ_COMPLETED_WITH_ERROR;
26570246286SChristoph Hellwig 			break;
26670246286SChristoph Hellwig 		default:
26770246286SChristoph Hellwig 			what = WRITE_COMPLETED_WITH_ERROR;
26870246286SChristoph Hellwig 			break;
26970246286SChristoph Hellwig 		}
27070246286SChristoph Hellwig 	} else {
2718554df1cSAndreas Gruenbacher 		what = COMPLETED_OK;
27270246286SChristoph Hellwig 	}
273b411b363SPhilipp Reisner 
2744e4cbee9SChristoph Hellwig 	req->private_bio = ERR_PTR(blk_status_to_errno(bio->bi_status));
27564dafbc9SLars Ellenberg 	bio_put(bio);
276b411b363SPhilipp Reisner 
277a115413dSLars Ellenberg 	/* not req_mod(), we need irqsave here! */
2780500813fSAndreas Gruenbacher 	spin_lock_irqsave(&device->resource->req_lock, flags);
279ad878a0dSChristoph Böhmwalder 	__req_mod(req, what, NULL, &m);
2800500813fSAndreas Gruenbacher 	spin_unlock_irqrestore(&device->resource->req_lock, flags);
281b30ab791SAndreas Gruenbacher 	put_ldev(device);
282a115413dSLars Ellenberg 
283a115413dSLars Ellenberg 	if (m.bio)
284b30ab791SAndreas Gruenbacher 		complete_master_bio(device, &m);
285b411b363SPhilipp Reisner }
286b411b363SPhilipp Reisner 
drbd_csum_ee(struct crypto_shash * tfm,struct drbd_peer_request * peer_req,void * digest)2873d0e6375SKees Cook void drbd_csum_ee(struct crypto_shash *tfm, struct drbd_peer_request *peer_req, void *digest)
28845bb912bSLars Ellenberg {
2893d0e6375SKees Cook 	SHASH_DESC_ON_STACK(desc, tfm);
290db830c46SAndreas Gruenbacher 	struct page *page = peer_req->pages;
29145bb912bSLars Ellenberg 	struct page *tmp;
29245bb912bSLars Ellenberg 	unsigned len;
2933d0e6375SKees Cook 	void *src;
29445bb912bSLars Ellenberg 
2953d0e6375SKees Cook 	desc->tfm = tfm;
29645bb912bSLars Ellenberg 
2973d0e6375SKees Cook 	crypto_shash_init(desc);
29845bb912bSLars Ellenberg 
2993d0e6375SKees Cook 	src = kmap_atomic(page);
30045bb912bSLars Ellenberg 	while ((tmp = page_chain_next(page))) {
30145bb912bSLars Ellenberg 		/* all but the last page will be fully used */
3023d0e6375SKees Cook 		crypto_shash_update(desc, src, PAGE_SIZE);
3033d0e6375SKees Cook 		kunmap_atomic(src);
30445bb912bSLars Ellenberg 		page = tmp;
3053d0e6375SKees Cook 		src = kmap_atomic(page);
30645bb912bSLars Ellenberg 	}
30745bb912bSLars Ellenberg 	/* and now the last, possibly only partially used page */
308db830c46SAndreas Gruenbacher 	len = peer_req->i.size & (PAGE_SIZE - 1);
3093d0e6375SKees Cook 	crypto_shash_update(desc, src, len ?: PAGE_SIZE);
3103d0e6375SKees Cook 	kunmap_atomic(src);
3113d0e6375SKees Cook 
3123d0e6375SKees Cook 	crypto_shash_final(desc, digest);
3133d0e6375SKees Cook 	shash_desc_zero(desc);
31445bb912bSLars Ellenberg }
31545bb912bSLars Ellenberg 
drbd_csum_bio(struct crypto_shash * tfm,struct bio * bio,void * digest)3163d0e6375SKees Cook void drbd_csum_bio(struct crypto_shash *tfm, struct bio *bio, void *digest)
317b411b363SPhilipp Reisner {
3183d0e6375SKees Cook 	SHASH_DESC_ON_STACK(desc, tfm);
3197988613bSKent Overstreet 	struct bio_vec bvec;
3207988613bSKent Overstreet 	struct bvec_iter iter;
321b411b363SPhilipp Reisner 
3223d0e6375SKees Cook 	desc->tfm = tfm;
323b411b363SPhilipp Reisner 
3243d0e6375SKees Cook 	crypto_shash_init(desc);
325b411b363SPhilipp Reisner 
3267988613bSKent Overstreet 	bio_for_each_segment(bvec, bio, iter) {
3273d0e6375SKees Cook 		u8 *src;
3283d0e6375SKees Cook 
32947227850SChristoph Hellwig 		src = bvec_kmap_local(&bvec);
33047227850SChristoph Hellwig 		crypto_shash_update(desc, src, bvec.bv_len);
33147227850SChristoph Hellwig 		kunmap_local(src);
332b411b363SPhilipp Reisner 	}
3333d0e6375SKees Cook 	crypto_shash_final(desc, digest);
3343d0e6375SKees Cook 	shash_desc_zero(desc);
335b411b363SPhilipp Reisner }
336b411b363SPhilipp Reisner 
3379676c760SLars Ellenberg /* MAYBE merge common code with w_e_end_ov_req */
w_e_send_csum(struct drbd_work * w,int cancel)33899920dc5SAndreas Gruenbacher static int w_e_send_csum(struct drbd_work *w, int cancel)
339b411b363SPhilipp Reisner {
340a8cd15baSAndreas Gruenbacher 	struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
3416780139cSAndreas Gruenbacher 	struct drbd_peer_device *peer_device = peer_req->peer_device;
3426780139cSAndreas Gruenbacher 	struct drbd_device *device = peer_device->device;
343b411b363SPhilipp Reisner 	int digest_size;
344b411b363SPhilipp Reisner 	void *digest;
34599920dc5SAndreas Gruenbacher 	int err = 0;
346b411b363SPhilipp Reisner 
34753ea4331SLars Ellenberg 	if (unlikely(cancel))
34853ea4331SLars Ellenberg 		goto out;
349b411b363SPhilipp Reisner 
3509676c760SLars Ellenberg 	if (unlikely((peer_req->flags & EE_WAS_ERROR) != 0))
35153ea4331SLars Ellenberg 		goto out;
35253ea4331SLars Ellenberg 
3533d0e6375SKees Cook 	digest_size = crypto_shash_digestsize(peer_device->connection->csums_tfm);
354b411b363SPhilipp Reisner 	digest = kmalloc(digest_size, GFP_NOIO);
355b411b363SPhilipp Reisner 	if (digest) {
356db830c46SAndreas Gruenbacher 		sector_t sector = peer_req->i.sector;
357db830c46SAndreas Gruenbacher 		unsigned int size = peer_req->i.size;
3586780139cSAndreas Gruenbacher 		drbd_csum_ee(peer_device->connection->csums_tfm, peer_req, digest);
3599676c760SLars Ellenberg 		/* Free peer_req and pages before send.
36053ea4331SLars Ellenberg 		 * In case we block on congestion, we could otherwise run into
36153ea4331SLars Ellenberg 		 * some distributed deadlock, if the other side blocks on
36253ea4331SLars Ellenberg 		 * congestion as well, because our receiver blocks in
363c37c8ecfSAndreas Gruenbacher 		 * drbd_alloc_pages due to pp_in_use > max_buffers. */
364b30ab791SAndreas Gruenbacher 		drbd_free_peer_req(device, peer_req);
365db830c46SAndreas Gruenbacher 		peer_req = NULL;
366*0d11f3cfSChristoph Böhmwalder 		inc_rs_pending(peer_device);
3676780139cSAndreas Gruenbacher 		err = drbd_send_drequest_csum(peer_device, sector, size,
36853ea4331SLars Ellenberg 					      digest, digest_size,
369b411b363SPhilipp Reisner 					      P_CSUM_RS_REQUEST);
370b411b363SPhilipp Reisner 		kfree(digest);
371b411b363SPhilipp Reisner 	} else {
372d0180171SAndreas Gruenbacher 		drbd_err(device, "kmalloc() of digest failed.\n");
37399920dc5SAndreas Gruenbacher 		err = -ENOMEM;
374b411b363SPhilipp Reisner 	}
375b411b363SPhilipp Reisner 
37653ea4331SLars Ellenberg out:
377db830c46SAndreas Gruenbacher 	if (peer_req)
378b30ab791SAndreas Gruenbacher 		drbd_free_peer_req(device, peer_req);
379b411b363SPhilipp Reisner 
38099920dc5SAndreas Gruenbacher 	if (unlikely(err))
381d0180171SAndreas Gruenbacher 		drbd_err(device, "drbd_send_drequest(..., csum) failed\n");
38299920dc5SAndreas Gruenbacher 	return err;
383b411b363SPhilipp Reisner }
384b411b363SPhilipp Reisner 
385b411b363SPhilipp Reisner #define GFP_TRY	(__GFP_HIGHMEM | __GFP_NOWARN)
386b411b363SPhilipp Reisner 
read_for_csum(struct drbd_peer_device * peer_device,sector_t sector,int size)38769a22773SAndreas Gruenbacher static int read_for_csum(struct drbd_peer_device *peer_device, sector_t sector, int size)
388b411b363SPhilipp Reisner {
38969a22773SAndreas Gruenbacher 	struct drbd_device *device = peer_device->device;
390db830c46SAndreas Gruenbacher 	struct drbd_peer_request *peer_req;
391b411b363SPhilipp Reisner 
392b30ab791SAndreas Gruenbacher 	if (!get_ldev(device))
39380a40e43SLars Ellenberg 		return -EIO;
394b411b363SPhilipp Reisner 
395b411b363SPhilipp Reisner 	/* GFP_TRY, because if there is no memory available right now, this may
396b411b363SPhilipp Reisner 	 * be rescheduled for later. It is "only" background resync, after all. */
39769a22773SAndreas Gruenbacher 	peer_req = drbd_alloc_peer_req(peer_device, ID_SYNCER /* unused */, sector,
3989104d31aSLars Ellenberg 				       size, size, GFP_TRY);
399db830c46SAndreas Gruenbacher 	if (!peer_req)
40080a40e43SLars Ellenberg 		goto defer;
401b411b363SPhilipp Reisner 
402a8cd15baSAndreas Gruenbacher 	peer_req->w.cb = w_e_send_csum;
403ce668b6dSChristoph Böhmwalder 	peer_req->opf = REQ_OP_READ;
4040500813fSAndreas Gruenbacher 	spin_lock_irq(&device->resource->req_lock);
405b9ed7080SLars Ellenberg 	list_add_tail(&peer_req->w.list, &device->read_ee);
4060500813fSAndreas Gruenbacher 	spin_unlock_irq(&device->resource->req_lock);
407b411b363SPhilipp Reisner 
408b30ab791SAndreas Gruenbacher 	atomic_add(size >> 9, &device->rs_sect_ev);
409ce668b6dSChristoph Böhmwalder 	if (drbd_submit_peer_request(peer_req) == 0)
41080a40e43SLars Ellenberg 		return 0;
41145bb912bSLars Ellenberg 
41210f6d992SLars Ellenberg 	/* If it failed because of ENOMEM, retry should help.  If it failed
41310f6d992SLars Ellenberg 	 * because bio_add_page failed (probably broken lower level driver),
41410f6d992SLars Ellenberg 	 * retry may or may not help.
41510f6d992SLars Ellenberg 	 * If it does not, you may need to force disconnect. */
4160500813fSAndreas Gruenbacher 	spin_lock_irq(&device->resource->req_lock);
417a8cd15baSAndreas Gruenbacher 	list_del(&peer_req->w.list);
4180500813fSAndreas Gruenbacher 	spin_unlock_irq(&device->resource->req_lock);
41922cc37a9SLars Ellenberg 
420b30ab791SAndreas Gruenbacher 	drbd_free_peer_req(device, peer_req);
42180a40e43SLars Ellenberg defer:
422b30ab791SAndreas Gruenbacher 	put_ldev(device);
42380a40e43SLars Ellenberg 	return -EAGAIN;
424b411b363SPhilipp Reisner }
425b411b363SPhilipp Reisner 
w_resync_timer(struct drbd_work * w,int cancel)42699920dc5SAndreas Gruenbacher int w_resync_timer(struct drbd_work *w, int cancel)
427794abb75SPhilipp Reisner {
42884b8c06bSAndreas Gruenbacher 	struct drbd_device *device =
42984b8c06bSAndreas Gruenbacher 		container_of(w, struct drbd_device, resync_work);
43084b8c06bSAndreas Gruenbacher 
431b30ab791SAndreas Gruenbacher 	switch (device->state.conn) {
432794abb75SPhilipp Reisner 	case C_VERIFY_S:
433*0d11f3cfSChristoph Böhmwalder 		make_ov_request(first_peer_device(device), cancel);
434794abb75SPhilipp Reisner 		break;
435794abb75SPhilipp Reisner 	case C_SYNC_TARGET:
436*0d11f3cfSChristoph Böhmwalder 		make_resync_request(first_peer_device(device), cancel);
437794abb75SPhilipp Reisner 		break;
438794abb75SPhilipp Reisner 	}
439794abb75SPhilipp Reisner 
44099920dc5SAndreas Gruenbacher 	return 0;
441794abb75SPhilipp Reisner }
442794abb75SPhilipp Reisner 
resync_timer_fn(struct timer_list * t)4432bccef39SKees Cook void resync_timer_fn(struct timer_list *t)
444b411b363SPhilipp Reisner {
4452bccef39SKees Cook 	struct drbd_device *device = from_timer(device, t, resync_timer);
446b411b363SPhilipp Reisner 
44715e26f6aSLars Ellenberg 	drbd_queue_work_if_unqueued(
44815e26f6aSLars Ellenberg 		&first_peer_device(device)->connection->sender_work,
44984b8c06bSAndreas Gruenbacher 		&device->resync_work);
450b411b363SPhilipp Reisner }
451b411b363SPhilipp Reisner 
fifo_set(struct fifo_buffer * fb,int value)452778f271dSPhilipp Reisner static void fifo_set(struct fifo_buffer *fb, int value)
453778f271dSPhilipp Reisner {
454778f271dSPhilipp Reisner 	int i;
455778f271dSPhilipp Reisner 
456778f271dSPhilipp Reisner 	for (i = 0; i < fb->size; i++)
457f10f2623SPhilipp Reisner 		fb->values[i] = value;
458778f271dSPhilipp Reisner }
459778f271dSPhilipp Reisner 
fifo_push(struct fifo_buffer * fb,int value)460778f271dSPhilipp Reisner static int fifo_push(struct fifo_buffer *fb, int value)
461778f271dSPhilipp Reisner {
462778f271dSPhilipp Reisner 	int ov;
463778f271dSPhilipp Reisner 
464778f271dSPhilipp Reisner 	ov = fb->values[fb->head_index];
465778f271dSPhilipp Reisner 	fb->values[fb->head_index++] = value;
466778f271dSPhilipp Reisner 
467778f271dSPhilipp Reisner 	if (fb->head_index >= fb->size)
468778f271dSPhilipp Reisner 		fb->head_index = 0;
469778f271dSPhilipp Reisner 
470778f271dSPhilipp Reisner 	return ov;
471778f271dSPhilipp Reisner }
472778f271dSPhilipp Reisner 
fifo_add_val(struct fifo_buffer * fb,int value)473778f271dSPhilipp Reisner static void fifo_add_val(struct fifo_buffer *fb, int value)
474778f271dSPhilipp Reisner {
475778f271dSPhilipp Reisner 	int i;
476778f271dSPhilipp Reisner 
477778f271dSPhilipp Reisner 	for (i = 0; i < fb->size; i++)
478778f271dSPhilipp Reisner 		fb->values[i] += value;
479778f271dSPhilipp Reisner }
480778f271dSPhilipp Reisner 
fifo_alloc(unsigned int fifo_size)4816a365874SStephen Kitt struct fifo_buffer *fifo_alloc(unsigned int fifo_size)
4829958c857SPhilipp Reisner {
4839958c857SPhilipp Reisner 	struct fifo_buffer *fb;
4849958c857SPhilipp Reisner 
4856a365874SStephen Kitt 	fb = kzalloc(struct_size(fb, values, fifo_size), GFP_NOIO);
4869958c857SPhilipp Reisner 	if (!fb)
4879958c857SPhilipp Reisner 		return NULL;
4889958c857SPhilipp Reisner 
4899958c857SPhilipp Reisner 	fb->head_index = 0;
4909958c857SPhilipp Reisner 	fb->size = fifo_size;
4919958c857SPhilipp Reisner 	fb->total = 0;
4929958c857SPhilipp Reisner 
4939958c857SPhilipp Reisner 	return fb;
4949958c857SPhilipp Reisner }
4959958c857SPhilipp Reisner 
drbd_rs_controller(struct drbd_peer_device * peer_device,unsigned int sect_in)496*0d11f3cfSChristoph Böhmwalder static int drbd_rs_controller(struct drbd_peer_device *peer_device, unsigned int sect_in)
497778f271dSPhilipp Reisner {
498*0d11f3cfSChristoph Böhmwalder 	struct drbd_device *device = peer_device->device;
499daeda1ccSPhilipp Reisner 	struct disk_conf *dc;
5007f34f614SLars Ellenberg 	unsigned int want;     /* The number of sectors we want in-flight */
501778f271dSPhilipp Reisner 	int req_sect; /* Number of sectors to request in this turn */
5027f34f614SLars Ellenberg 	int correction; /* Number of sectors more we need in-flight */
503778f271dSPhilipp Reisner 	int cps; /* correction per invocation of drbd_rs_controller() */
504778f271dSPhilipp Reisner 	int steps; /* Number of time steps to plan ahead */
505778f271dSPhilipp Reisner 	int curr_corr;
506778f271dSPhilipp Reisner 	int max_sect;
507813472ceSPhilipp Reisner 	struct fifo_buffer *plan;
508778f271dSPhilipp Reisner 
509b30ab791SAndreas Gruenbacher 	dc = rcu_dereference(device->ldev->disk_conf);
510b30ab791SAndreas Gruenbacher 	plan = rcu_dereference(device->rs_plan_s);
511778f271dSPhilipp Reisner 
512813472ceSPhilipp Reisner 	steps = plan->size; /* (dc->c_plan_ahead * 10 * SLEEP_TIME) / HZ; */
513778f271dSPhilipp Reisner 
514b30ab791SAndreas Gruenbacher 	if (device->rs_in_flight + sect_in == 0) { /* At start of resync */
515daeda1ccSPhilipp Reisner 		want = ((dc->resync_rate * 2 * SLEEP_TIME) / HZ) * steps;
516778f271dSPhilipp Reisner 	} else { /* normal path */
517daeda1ccSPhilipp Reisner 		want = dc->c_fill_target ? dc->c_fill_target :
518daeda1ccSPhilipp Reisner 			sect_in * dc->c_delay_target * HZ / (SLEEP_TIME * 10);
519778f271dSPhilipp Reisner 	}
520778f271dSPhilipp Reisner 
521b30ab791SAndreas Gruenbacher 	correction = want - device->rs_in_flight - plan->total;
522778f271dSPhilipp Reisner 
523778f271dSPhilipp Reisner 	/* Plan ahead */
524778f271dSPhilipp Reisner 	cps = correction / steps;
525813472ceSPhilipp Reisner 	fifo_add_val(plan, cps);
526813472ceSPhilipp Reisner 	plan->total += cps * steps;
527778f271dSPhilipp Reisner 
528778f271dSPhilipp Reisner 	/* What we do in this step */
529813472ceSPhilipp Reisner 	curr_corr = fifo_push(plan, 0);
530813472ceSPhilipp Reisner 	plan->total -= curr_corr;
531778f271dSPhilipp Reisner 
532778f271dSPhilipp Reisner 	req_sect = sect_in + curr_corr;
533778f271dSPhilipp Reisner 	if (req_sect < 0)
534778f271dSPhilipp Reisner 		req_sect = 0;
535778f271dSPhilipp Reisner 
536daeda1ccSPhilipp Reisner 	max_sect = (dc->c_max_rate * 2 * SLEEP_TIME) / HZ;
537778f271dSPhilipp Reisner 	if (req_sect > max_sect)
538778f271dSPhilipp Reisner 		req_sect = max_sect;
539778f271dSPhilipp Reisner 
540778f271dSPhilipp Reisner 	/*
541d0180171SAndreas Gruenbacher 	drbd_warn(device, "si=%u if=%d wa=%u co=%d st=%d cps=%d pl=%d cc=%d rs=%d\n",
542b30ab791SAndreas Gruenbacher 		 sect_in, device->rs_in_flight, want, correction,
543b30ab791SAndreas Gruenbacher 		 steps, cps, device->rs_planed, curr_corr, req_sect);
544778f271dSPhilipp Reisner 	*/
545778f271dSPhilipp Reisner 
546778f271dSPhilipp Reisner 	return req_sect;
547778f271dSPhilipp Reisner }
548778f271dSPhilipp Reisner 
drbd_rs_number_requests(struct drbd_peer_device * peer_device)549*0d11f3cfSChristoph Böhmwalder static int drbd_rs_number_requests(struct drbd_peer_device *peer_device)
550e65f440dSLars Ellenberg {
551*0d11f3cfSChristoph Böhmwalder 	struct drbd_device *device = peer_device->device;
5520e49d7b0SLars Ellenberg 	unsigned int sect_in;  /* Number of sectors that came in since the last turn */
5530e49d7b0SLars Ellenberg 	int number, mxb;
5540e49d7b0SLars Ellenberg 
5550e49d7b0SLars Ellenberg 	sect_in = atomic_xchg(&device->rs_sect_in, 0);
5560e49d7b0SLars Ellenberg 	device->rs_in_flight -= sect_in;
557813472ceSPhilipp Reisner 
558813472ceSPhilipp Reisner 	rcu_read_lock();
5590e49d7b0SLars Ellenberg 	mxb = drbd_get_max_buffers(device) / 2;
560b30ab791SAndreas Gruenbacher 	if (rcu_dereference(device->rs_plan_s)->size) {
561*0d11f3cfSChristoph Böhmwalder 		number = drbd_rs_controller(peer_device, sect_in) >> (BM_BLOCK_SHIFT - 9);
562b30ab791SAndreas Gruenbacher 		device->c_sync_rate = number * HZ * (BM_BLOCK_SIZE / 1024) / SLEEP_TIME;
563e65f440dSLars Ellenberg 	} else {
564b30ab791SAndreas Gruenbacher 		device->c_sync_rate = rcu_dereference(device->ldev->disk_conf)->resync_rate;
565b30ab791SAndreas Gruenbacher 		number = SLEEP_TIME * device->c_sync_rate  / ((BM_BLOCK_SIZE / 1024) * HZ);
566e65f440dSLars Ellenberg 	}
567813472ceSPhilipp Reisner 	rcu_read_unlock();
568e65f440dSLars Ellenberg 
5690e49d7b0SLars Ellenberg 	/* Don't have more than "max-buffers"/2 in-flight.
5700e49d7b0SLars Ellenberg 	 * Otherwise we may cause the remote site to stall on drbd_alloc_pages(),
5710e49d7b0SLars Ellenberg 	 * potentially causing a distributed deadlock on congestion during
5720e49d7b0SLars Ellenberg 	 * online-verify or (checksum-based) resync, if max-buffers,
5730e49d7b0SLars Ellenberg 	 * socket buffer sizes and resync rate settings are mis-configured. */
5747f34f614SLars Ellenberg 
5757f34f614SLars Ellenberg 	/* note that "number" is in units of "BM_BLOCK_SIZE" (which is 4k),
5767f34f614SLars Ellenberg 	 * mxb (as used here, and in drbd_alloc_pages on the peer) is
5777f34f614SLars Ellenberg 	 * "number of pages" (typically also 4k),
5787f34f614SLars Ellenberg 	 * but "rs_in_flight" is in "sectors" (512 Byte). */
5797f34f614SLars Ellenberg 	if (mxb - device->rs_in_flight/8 < number)
5807f34f614SLars Ellenberg 		number = mxb - device->rs_in_flight/8;
5810e49d7b0SLars Ellenberg 
582e65f440dSLars Ellenberg 	return number;
583e65f440dSLars Ellenberg }
584e65f440dSLars Ellenberg 
make_resync_request(struct drbd_peer_device * const peer_device,int cancel)585*0d11f3cfSChristoph Böhmwalder static int make_resync_request(struct drbd_peer_device *const peer_device, int cancel)
586b411b363SPhilipp Reisner {
587*0d11f3cfSChristoph Böhmwalder 	struct drbd_device *const device = peer_device->device;
58844a4d551SLars Ellenberg 	struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL;
589b411b363SPhilipp Reisner 	unsigned long bit;
590b411b363SPhilipp Reisner 	sector_t sector;
591155bd9d1SChristoph Hellwig 	const sector_t capacity = get_capacity(device->vdisk);
5921816a2b4SLars Ellenberg 	int max_bio_size;
593e65f440dSLars Ellenberg 	int number, rollback_i, size;
594506afb62SLars Ellenberg 	int align, requeue = 0;
5950f0601f4SLars Ellenberg 	int i = 0;
59692d94ae6SPhilipp Reisner 	int discard_granularity = 0;
597b411b363SPhilipp Reisner 
598b411b363SPhilipp Reisner 	if (unlikely(cancel))
59999920dc5SAndreas Gruenbacher 		return 0;
600b411b363SPhilipp Reisner 
601b30ab791SAndreas Gruenbacher 	if (device->rs_total == 0) {
602af85e8e8SLars Ellenberg 		/* empty resync? */
603*0d11f3cfSChristoph Böhmwalder 		drbd_resync_finished(peer_device);
60499920dc5SAndreas Gruenbacher 		return 0;
605af85e8e8SLars Ellenberg 	}
606af85e8e8SLars Ellenberg 
607b30ab791SAndreas Gruenbacher 	if (!get_ldev(device)) {
608b30ab791SAndreas Gruenbacher 		/* Since we only need to access device->rsync a
609b30ab791SAndreas Gruenbacher 		   get_ldev_if_state(device,D_FAILED) would be sufficient, but
610b411b363SPhilipp Reisner 		   to continue resync with a broken disk makes no sense at
611b411b363SPhilipp Reisner 		   all */
612d0180171SAndreas Gruenbacher 		drbd_err(device, "Disk broke down during resync!\n");
61399920dc5SAndreas Gruenbacher 		return 0;
614b411b363SPhilipp Reisner 	}
615b411b363SPhilipp Reisner 
6169104d31aSLars Ellenberg 	if (connection->agreed_features & DRBD_FF_THIN_RESYNC) {
61792d94ae6SPhilipp Reisner 		rcu_read_lock();
61892d94ae6SPhilipp Reisner 		discard_granularity = rcu_dereference(device->ldev->disk_conf)->rs_discard_granularity;
61992d94ae6SPhilipp Reisner 		rcu_read_unlock();
62092d94ae6SPhilipp Reisner 	}
62192d94ae6SPhilipp Reisner 
622b30ab791SAndreas Gruenbacher 	max_bio_size = queue_max_hw_sectors(device->rq_queue) << 9;
623*0d11f3cfSChristoph Böhmwalder 	number = drbd_rs_number_requests(peer_device);
6240e49d7b0SLars Ellenberg 	if (number <= 0)
6250f0601f4SLars Ellenberg 		goto requeue;
626b411b363SPhilipp Reisner 
627b411b363SPhilipp Reisner 	for (i = 0; i < number; i++) {
628506afb62SLars Ellenberg 		/* Stop generating RS requests when half of the send buffer is filled,
629506afb62SLars Ellenberg 		 * but notify TCP that we'd like to have more space. */
63044a4d551SLars Ellenberg 		mutex_lock(&connection->data.mutex);
63144a4d551SLars Ellenberg 		if (connection->data.socket) {
632506afb62SLars Ellenberg 			struct sock *sk = connection->data.socket->sk;
633506afb62SLars Ellenberg 			int queued = sk->sk_wmem_queued;
634506afb62SLars Ellenberg 			int sndbuf = sk->sk_sndbuf;
635506afb62SLars Ellenberg 			if (queued > sndbuf / 2) {
636506afb62SLars Ellenberg 				requeue = 1;
637506afb62SLars Ellenberg 				if (sk->sk_socket)
638506afb62SLars Ellenberg 					set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
639b411b363SPhilipp Reisner 			}
640506afb62SLars Ellenberg 		} else
641506afb62SLars Ellenberg 			requeue = 1;
64244a4d551SLars Ellenberg 		mutex_unlock(&connection->data.mutex);
643506afb62SLars Ellenberg 		if (requeue)
644b411b363SPhilipp Reisner 			goto requeue;
645b411b363SPhilipp Reisner 
646b411b363SPhilipp Reisner next_sector:
647b411b363SPhilipp Reisner 		size = BM_BLOCK_SIZE;
648b30ab791SAndreas Gruenbacher 		bit  = drbd_bm_find_next(device, device->bm_resync_fo);
649b411b363SPhilipp Reisner 
6504b0715f0SLars Ellenberg 		if (bit == DRBD_END_OF_BITMAP) {
651b30ab791SAndreas Gruenbacher 			device->bm_resync_fo = drbd_bm_bits(device);
652b30ab791SAndreas Gruenbacher 			put_ldev(device);
65399920dc5SAndreas Gruenbacher 			return 0;
654b411b363SPhilipp Reisner 		}
655b411b363SPhilipp Reisner 
656b411b363SPhilipp Reisner 		sector = BM_BIT_TO_SECT(bit);
657b411b363SPhilipp Reisner 
658*0d11f3cfSChristoph Böhmwalder 		if (drbd_try_rs_begin_io(peer_device, sector)) {
659b30ab791SAndreas Gruenbacher 			device->bm_resync_fo = bit;
660b411b363SPhilipp Reisner 			goto requeue;
661b411b363SPhilipp Reisner 		}
662b30ab791SAndreas Gruenbacher 		device->bm_resync_fo = bit + 1;
663b411b363SPhilipp Reisner 
664b30ab791SAndreas Gruenbacher 		if (unlikely(drbd_bm_test_bit(device, bit) == 0)) {
665b30ab791SAndreas Gruenbacher 			drbd_rs_complete_io(device, sector);
666b411b363SPhilipp Reisner 			goto next_sector;
667b411b363SPhilipp Reisner 		}
668b411b363SPhilipp Reisner 
6691816a2b4SLars Ellenberg #if DRBD_MAX_BIO_SIZE > BM_BLOCK_SIZE
670b411b363SPhilipp Reisner 		/* try to find some adjacent bits.
671b411b363SPhilipp Reisner 		 * we stop if we have already the maximum req size.
672b411b363SPhilipp Reisner 		 *
673b411b363SPhilipp Reisner 		 * Additionally always align bigger requests, in order to
674b411b363SPhilipp Reisner 		 * be prepared for all stripe sizes of software RAIDs.
675b411b363SPhilipp Reisner 		 */
676b411b363SPhilipp Reisner 		align = 1;
677d207450cSPhilipp Reisner 		rollback_i = i;
6786377b923SLars Ellenberg 		while (i < number) {
6791816a2b4SLars Ellenberg 			if (size + BM_BLOCK_SIZE > max_bio_size)
680b411b363SPhilipp Reisner 				break;
681b411b363SPhilipp Reisner 
682b411b363SPhilipp Reisner 			/* Be always aligned */
683b411b363SPhilipp Reisner 			if (sector & ((1<<(align+3))-1))
684b411b363SPhilipp Reisner 				break;
685b411b363SPhilipp Reisner 
68692d94ae6SPhilipp Reisner 			if (discard_granularity && size == discard_granularity)
68792d94ae6SPhilipp Reisner 				break;
68892d94ae6SPhilipp Reisner 
689b411b363SPhilipp Reisner 			/* do not cross extent boundaries */
690b411b363SPhilipp Reisner 			if (((bit+1) & BM_BLOCKS_PER_BM_EXT_MASK) == 0)
691b411b363SPhilipp Reisner 				break;
692b411b363SPhilipp Reisner 			/* now, is it actually dirty, after all?
693b411b363SPhilipp Reisner 			 * caution, drbd_bm_test_bit is tri-state for some
694b411b363SPhilipp Reisner 			 * obscure reason; ( b == 0 ) would get the out-of-band
695b411b363SPhilipp Reisner 			 * only accidentally right because of the "oddly sized"
696b411b363SPhilipp Reisner 			 * adjustment below */
697b30ab791SAndreas Gruenbacher 			if (drbd_bm_test_bit(device, bit+1) != 1)
698b411b363SPhilipp Reisner 				break;
699b411b363SPhilipp Reisner 			bit++;
700b411b363SPhilipp Reisner 			size += BM_BLOCK_SIZE;
701b411b363SPhilipp Reisner 			if ((BM_BLOCK_SIZE << align) <= size)
702b411b363SPhilipp Reisner 				align++;
703b411b363SPhilipp Reisner 			i++;
704b411b363SPhilipp Reisner 		}
705b411b363SPhilipp Reisner 		/* if we merged some,
706b411b363SPhilipp Reisner 		 * reset the offset to start the next drbd_bm_find_next from */
707b411b363SPhilipp Reisner 		if (size > BM_BLOCK_SIZE)
708b30ab791SAndreas Gruenbacher 			device->bm_resync_fo = bit + 1;
709b411b363SPhilipp Reisner #endif
710b411b363SPhilipp Reisner 
711b411b363SPhilipp Reisner 		/* adjust very last sectors, in case we are oddly sized */
712b411b363SPhilipp Reisner 		if (sector + (size>>9) > capacity)
713b411b363SPhilipp Reisner 			size = (capacity-sector)<<9;
714aaaba345SLars Ellenberg 
715aaaba345SLars Ellenberg 		if (device->use_csums) {
71644a4d551SLars Ellenberg 			switch (read_for_csum(peer_device, sector, size)) {
71780a40e43SLars Ellenberg 			case -EIO: /* Disk failure */
718b30ab791SAndreas Gruenbacher 				put_ldev(device);
71999920dc5SAndreas Gruenbacher 				return -EIO;
72080a40e43SLars Ellenberg 			case -EAGAIN: /* allocation failed, or ldev busy */
721b30ab791SAndreas Gruenbacher 				drbd_rs_complete_io(device, sector);
722b30ab791SAndreas Gruenbacher 				device->bm_resync_fo = BM_SECT_TO_BIT(sector);
723d207450cSPhilipp Reisner 				i = rollback_i;
724b411b363SPhilipp Reisner 				goto requeue;
72580a40e43SLars Ellenberg 			case 0:
72680a40e43SLars Ellenberg 				/* everything ok */
72780a40e43SLars Ellenberg 				break;
72880a40e43SLars Ellenberg 			default:
72980a40e43SLars Ellenberg 				BUG();
730b411b363SPhilipp Reisner 			}
731b411b363SPhilipp Reisner 		} else {
73299920dc5SAndreas Gruenbacher 			int err;
73399920dc5SAndreas Gruenbacher 
734*0d11f3cfSChristoph Böhmwalder 			inc_rs_pending(peer_device);
73592d94ae6SPhilipp Reisner 			err = drbd_send_drequest(peer_device,
73692d94ae6SPhilipp Reisner 						 size == discard_granularity ? P_RS_THIN_REQ : P_RS_DATA_REQUEST,
73799920dc5SAndreas Gruenbacher 						 sector, size, ID_SYNCER);
73899920dc5SAndreas Gruenbacher 			if (err) {
739d0180171SAndreas Gruenbacher 				drbd_err(device, "drbd_send_drequest() failed, aborting...\n");
740*0d11f3cfSChristoph Böhmwalder 				dec_rs_pending(peer_device);
741b30ab791SAndreas Gruenbacher 				put_ldev(device);
74299920dc5SAndreas Gruenbacher 				return err;
743b411b363SPhilipp Reisner 			}
744b411b363SPhilipp Reisner 		}
745b411b363SPhilipp Reisner 	}
746b411b363SPhilipp Reisner 
747b30ab791SAndreas Gruenbacher 	if (device->bm_resync_fo >= drbd_bm_bits(device)) {
748b411b363SPhilipp Reisner 		/* last syncer _request_ was sent,
749b411b363SPhilipp Reisner 		 * but the P_RS_DATA_REPLY not yet received.  sync will end (and
750b411b363SPhilipp Reisner 		 * next sync group will resume), as soon as we receive the last
751b411b363SPhilipp Reisner 		 * resync data block, and the last bit is cleared.
752b411b363SPhilipp Reisner 		 * until then resync "work" is "inactive" ...
753b411b363SPhilipp Reisner 		 */
754b30ab791SAndreas Gruenbacher 		put_ldev(device);
75599920dc5SAndreas Gruenbacher 		return 0;
756b411b363SPhilipp Reisner 	}
757b411b363SPhilipp Reisner 
758b411b363SPhilipp Reisner  requeue:
759b30ab791SAndreas Gruenbacher 	device->rs_in_flight += (i << (BM_BLOCK_SHIFT - 9));
760b30ab791SAndreas Gruenbacher 	mod_timer(&device->resync_timer, jiffies + SLEEP_TIME);
761b30ab791SAndreas Gruenbacher 	put_ldev(device);
76299920dc5SAndreas Gruenbacher 	return 0;
763b411b363SPhilipp Reisner }
764b411b363SPhilipp Reisner 
make_ov_request(struct drbd_peer_device * peer_device,int cancel)765*0d11f3cfSChristoph Böhmwalder static int make_ov_request(struct drbd_peer_device *peer_device, int cancel)
766b411b363SPhilipp Reisner {
767*0d11f3cfSChristoph Böhmwalder 	struct drbd_device *device = peer_device->device;
768b411b363SPhilipp Reisner 	int number, i, size;
769b411b363SPhilipp Reisner 	sector_t sector;
770155bd9d1SChristoph Hellwig 	const sector_t capacity = get_capacity(device->vdisk);
77158ffa580SLars Ellenberg 	bool stop_sector_reached = false;
772b411b363SPhilipp Reisner 
773b411b363SPhilipp Reisner 	if (unlikely(cancel))
774b411b363SPhilipp Reisner 		return 1;
775b411b363SPhilipp Reisner 
776*0d11f3cfSChristoph Böhmwalder 	number = drbd_rs_number_requests(peer_device);
777b411b363SPhilipp Reisner 
778b30ab791SAndreas Gruenbacher 	sector = device->ov_position;
779b411b363SPhilipp Reisner 	for (i = 0; i < number; i++) {
78058ffa580SLars Ellenberg 		if (sector >= capacity)
781b411b363SPhilipp Reisner 			return 1;
78258ffa580SLars Ellenberg 
78358ffa580SLars Ellenberg 		/* We check for "finished" only in the reply path:
78458ffa580SLars Ellenberg 		 * w_e_end_ov_reply().
78558ffa580SLars Ellenberg 		 * We need to send at least one request out. */
78658ffa580SLars Ellenberg 		stop_sector_reached = i > 0
787b30ab791SAndreas Gruenbacher 			&& verify_can_do_stop_sector(device)
788b30ab791SAndreas Gruenbacher 			&& sector >= device->ov_stop_sector;
78958ffa580SLars Ellenberg 		if (stop_sector_reached)
79058ffa580SLars Ellenberg 			break;
791b411b363SPhilipp Reisner 
792b411b363SPhilipp Reisner 		size = BM_BLOCK_SIZE;
793b411b363SPhilipp Reisner 
794*0d11f3cfSChristoph Böhmwalder 		if (drbd_try_rs_begin_io(peer_device, sector)) {
795b30ab791SAndreas Gruenbacher 			device->ov_position = sector;
796b411b363SPhilipp Reisner 			goto requeue;
797b411b363SPhilipp Reisner 		}
798b411b363SPhilipp Reisner 
799b411b363SPhilipp Reisner 		if (sector + (size>>9) > capacity)
800b411b363SPhilipp Reisner 			size = (capacity-sector)<<9;
801b411b363SPhilipp Reisner 
802*0d11f3cfSChristoph Böhmwalder 		inc_rs_pending(peer_device);
80369a22773SAndreas Gruenbacher 		if (drbd_send_ov_request(first_peer_device(device), sector, size)) {
804*0d11f3cfSChristoph Böhmwalder 			dec_rs_pending(peer_device);
805b411b363SPhilipp Reisner 			return 0;
806b411b363SPhilipp Reisner 		}
807b411b363SPhilipp Reisner 		sector += BM_SECT_PER_BIT;
808b411b363SPhilipp Reisner 	}
809b30ab791SAndreas Gruenbacher 	device->ov_position = sector;
810b411b363SPhilipp Reisner 
811b411b363SPhilipp Reisner  requeue:
812b30ab791SAndreas Gruenbacher 	device->rs_in_flight += (i << (BM_BLOCK_SHIFT - 9));
81358ffa580SLars Ellenberg 	if (i == 0 || !stop_sector_reached)
814b30ab791SAndreas Gruenbacher 		mod_timer(&device->resync_timer, jiffies + SLEEP_TIME);
815b411b363SPhilipp Reisner 	return 1;
816b411b363SPhilipp Reisner }
817b411b363SPhilipp Reisner 
w_ov_finished(struct drbd_work * w,int cancel)81899920dc5SAndreas Gruenbacher int w_ov_finished(struct drbd_work *w, int cancel)
819b411b363SPhilipp Reisner {
82084b8c06bSAndreas Gruenbacher 	struct drbd_device_work *dw =
82184b8c06bSAndreas Gruenbacher 		container_of(w, struct drbd_device_work, w);
82284b8c06bSAndreas Gruenbacher 	struct drbd_device *device = dw->device;
82384b8c06bSAndreas Gruenbacher 	kfree(dw);
824*0d11f3cfSChristoph Böhmwalder 	ov_out_of_sync_print(first_peer_device(device));
825*0d11f3cfSChristoph Böhmwalder 	drbd_resync_finished(first_peer_device(device));
826b411b363SPhilipp Reisner 
82799920dc5SAndreas Gruenbacher 	return 0;
828b411b363SPhilipp Reisner }
829b411b363SPhilipp Reisner 
w_resync_finished(struct drbd_work * w,int cancel)83099920dc5SAndreas Gruenbacher static int w_resync_finished(struct drbd_work *w, int cancel)
831b411b363SPhilipp Reisner {
83284b8c06bSAndreas Gruenbacher 	struct drbd_device_work *dw =
83384b8c06bSAndreas Gruenbacher 		container_of(w, struct drbd_device_work, w);
83484b8c06bSAndreas Gruenbacher 	struct drbd_device *device = dw->device;
83584b8c06bSAndreas Gruenbacher 	kfree(dw);
836b411b363SPhilipp Reisner 
837*0d11f3cfSChristoph Böhmwalder 	drbd_resync_finished(first_peer_device(device));
838b411b363SPhilipp Reisner 
83999920dc5SAndreas Gruenbacher 	return 0;
840b411b363SPhilipp Reisner }
841b411b363SPhilipp Reisner 
ping_peer(struct drbd_device * device)842b30ab791SAndreas Gruenbacher static void ping_peer(struct drbd_device *device)
843af85e8e8SLars Ellenberg {
844a6b32bc3SAndreas Gruenbacher 	struct drbd_connection *connection = first_peer_device(device)->connection;
8452a67d8b9SPhilipp Reisner 
846bde89a9eSAndreas Gruenbacher 	clear_bit(GOT_PING_ACK, &connection->flags);
847bde89a9eSAndreas Gruenbacher 	request_ping(connection);
848bde89a9eSAndreas Gruenbacher 	wait_event(connection->ping_wait,
849bde89a9eSAndreas Gruenbacher 		   test_bit(GOT_PING_ACK, &connection->flags) || device->state.conn < C_CONNECTED);
850af85e8e8SLars Ellenberg }
851af85e8e8SLars Ellenberg 
drbd_resync_finished(struct drbd_peer_device * peer_device)852*0d11f3cfSChristoph Böhmwalder int drbd_resync_finished(struct drbd_peer_device *peer_device)
853b411b363SPhilipp Reisner {
854*0d11f3cfSChristoph Böhmwalder 	struct drbd_device *device = peer_device->device;
855*0d11f3cfSChristoph Böhmwalder 	struct drbd_connection *connection = peer_device->connection;
856b411b363SPhilipp Reisner 	unsigned long db, dt, dbdt;
857b411b363SPhilipp Reisner 	unsigned long n_oos;
858b411b363SPhilipp Reisner 	union drbd_state os, ns;
85984b8c06bSAndreas Gruenbacher 	struct drbd_device_work *dw;
860b411b363SPhilipp Reisner 	char *khelper_cmd = NULL;
86126525618SLars Ellenberg 	int verify_done = 0;
862b411b363SPhilipp Reisner 
863b411b363SPhilipp Reisner 	/* Remove all elements from the resync LRU. Since future actions
864b411b363SPhilipp Reisner 	 * might set bits in the (main) bitmap, then the entries in the
865b411b363SPhilipp Reisner 	 * resync LRU would be wrong. */
866b30ab791SAndreas Gruenbacher 	if (drbd_rs_del_all(device)) {
867b411b363SPhilipp Reisner 		/* In case this is not possible now, most probably because
868b411b363SPhilipp Reisner 		 * there are P_RS_DATA_REPLY Packets lingering on the worker's
869b411b363SPhilipp Reisner 		 * queue (or even the read operations for those packets
870b411b363SPhilipp Reisner 		 * is not finished by now).   Retry in 100ms. */
871b411b363SPhilipp Reisner 
87220ee6390SPhilipp Reisner 		schedule_timeout_interruptible(HZ / 10);
87384b8c06bSAndreas Gruenbacher 		dw = kmalloc(sizeof(struct drbd_device_work), GFP_ATOMIC);
87484b8c06bSAndreas Gruenbacher 		if (dw) {
87584b8c06bSAndreas Gruenbacher 			dw->w.cb = w_resync_finished;
87684b8c06bSAndreas Gruenbacher 			dw->device = device;
87726a96110SLars Ellenberg 			drbd_queue_work(&connection->sender_work, &dw->w);
878b411b363SPhilipp Reisner 			return 1;
879b411b363SPhilipp Reisner 		}
88084b8c06bSAndreas Gruenbacher 		drbd_err(device, "Warn failed to drbd_rs_del_all() and to kmalloc(dw).\n");
881b411b363SPhilipp Reisner 	}
882b411b363SPhilipp Reisner 
883b30ab791SAndreas Gruenbacher 	dt = (jiffies - device->rs_start - device->rs_paused) / HZ;
884b411b363SPhilipp Reisner 	if (dt <= 0)
885b411b363SPhilipp Reisner 		dt = 1;
88658ffa580SLars Ellenberg 
887b30ab791SAndreas Gruenbacher 	db = device->rs_total;
88858ffa580SLars Ellenberg 	/* adjust for verify start and stop sectors, respective reached position */
889b30ab791SAndreas Gruenbacher 	if (device->state.conn == C_VERIFY_S || device->state.conn == C_VERIFY_T)
890b30ab791SAndreas Gruenbacher 		db -= device->ov_left;
89158ffa580SLars Ellenberg 
892b411b363SPhilipp Reisner 	dbdt = Bit2KB(db/dt);
893b30ab791SAndreas Gruenbacher 	device->rs_paused /= HZ;
894b411b363SPhilipp Reisner 
895b30ab791SAndreas Gruenbacher 	if (!get_ldev(device))
896b411b363SPhilipp Reisner 		goto out;
897b411b363SPhilipp Reisner 
898b30ab791SAndreas Gruenbacher 	ping_peer(device);
899af85e8e8SLars Ellenberg 
9000500813fSAndreas Gruenbacher 	spin_lock_irq(&device->resource->req_lock);
901b30ab791SAndreas Gruenbacher 	os = drbd_read_state(device);
902b411b363SPhilipp Reisner 
90326525618SLars Ellenberg 	verify_done = (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T);
90426525618SLars Ellenberg 
905b411b363SPhilipp Reisner 	/* This protects us against multiple calls (that can happen in the presence
906b411b363SPhilipp Reisner 	   of application IO), and against connectivity loss just before we arrive here. */
907b411b363SPhilipp Reisner 	if (os.conn <= C_CONNECTED)
908b411b363SPhilipp Reisner 		goto out_unlock;
909b411b363SPhilipp Reisner 
910b411b363SPhilipp Reisner 	ns = os;
911b411b363SPhilipp Reisner 	ns.conn = C_CONNECTED;
912b411b363SPhilipp Reisner 
913d0180171SAndreas Gruenbacher 	drbd_info(device, "%s done (total %lu sec; paused %lu sec; %lu K/sec)\n",
91426525618SLars Ellenberg 	     verify_done ? "Online verify" : "Resync",
915b30ab791SAndreas Gruenbacher 	     dt + device->rs_paused, device->rs_paused, dbdt);
916b411b363SPhilipp Reisner 
917b30ab791SAndreas Gruenbacher 	n_oos = drbd_bm_total_weight(device);
918b411b363SPhilipp Reisner 
919b411b363SPhilipp Reisner 	if (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) {
920b411b363SPhilipp Reisner 		if (n_oos) {
921d0180171SAndreas Gruenbacher 			drbd_alert(device, "Online verify found %lu %dk block out of sync!\n",
922b411b363SPhilipp Reisner 			      n_oos, Bit2KB(1));
923b411b363SPhilipp Reisner 			khelper_cmd = "out-of-sync";
924b411b363SPhilipp Reisner 		}
925b411b363SPhilipp Reisner 	} else {
9260b0ba1efSAndreas Gruenbacher 		D_ASSERT(device, (n_oos - device->rs_failed) == 0);
927b411b363SPhilipp Reisner 
928b411b363SPhilipp Reisner 		if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T)
929b411b363SPhilipp Reisner 			khelper_cmd = "after-resync-target";
930b411b363SPhilipp Reisner 
931aaaba345SLars Ellenberg 		if (device->use_csums && device->rs_total) {
932b30ab791SAndreas Gruenbacher 			const unsigned long s = device->rs_same_csum;
933b30ab791SAndreas Gruenbacher 			const unsigned long t = device->rs_total;
934b411b363SPhilipp Reisner 			const int ratio =
935b411b363SPhilipp Reisner 				(t == 0)     ? 0 :
936b411b363SPhilipp Reisner 			(t < 100000) ? ((s*100)/t) : (s/(t/100));
937d0180171SAndreas Gruenbacher 			drbd_info(device, "%u %% had equal checksums, eliminated: %luK; "
938b411b363SPhilipp Reisner 			     "transferred %luK total %luK\n",
939b411b363SPhilipp Reisner 			     ratio,
940b30ab791SAndreas Gruenbacher 			     Bit2KB(device->rs_same_csum),
941b30ab791SAndreas Gruenbacher 			     Bit2KB(device->rs_total - device->rs_same_csum),
942b30ab791SAndreas Gruenbacher 			     Bit2KB(device->rs_total));
943b411b363SPhilipp Reisner 		}
944b411b363SPhilipp Reisner 	}
945b411b363SPhilipp Reisner 
946b30ab791SAndreas Gruenbacher 	if (device->rs_failed) {
947d0180171SAndreas Gruenbacher 		drbd_info(device, "            %lu failed blocks\n", device->rs_failed);
948b411b363SPhilipp Reisner 
949b411b363SPhilipp Reisner 		if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T) {
950b411b363SPhilipp Reisner 			ns.disk = D_INCONSISTENT;
951b411b363SPhilipp Reisner 			ns.pdsk = D_UP_TO_DATE;
952b411b363SPhilipp Reisner 		} else {
953b411b363SPhilipp Reisner 			ns.disk = D_UP_TO_DATE;
954b411b363SPhilipp Reisner 			ns.pdsk = D_INCONSISTENT;
955b411b363SPhilipp Reisner 		}
956b411b363SPhilipp Reisner 	} else {
957b411b363SPhilipp Reisner 		ns.disk = D_UP_TO_DATE;
958b411b363SPhilipp Reisner 		ns.pdsk = D_UP_TO_DATE;
959b411b363SPhilipp Reisner 
960b411b363SPhilipp Reisner 		if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T) {
961b30ab791SAndreas Gruenbacher 			if (device->p_uuid) {
962b411b363SPhilipp Reisner 				int i;
963b411b363SPhilipp Reisner 				for (i = UI_BITMAP ; i <= UI_HISTORY_END ; i++)
964b30ab791SAndreas Gruenbacher 					_drbd_uuid_set(device, i, device->p_uuid[i]);
965b30ab791SAndreas Gruenbacher 				drbd_uuid_set(device, UI_BITMAP, device->ldev->md.uuid[UI_CURRENT]);
966b30ab791SAndreas Gruenbacher 				_drbd_uuid_set(device, UI_CURRENT, device->p_uuid[UI_CURRENT]);
967b411b363SPhilipp Reisner 			} else {
968d0180171SAndreas Gruenbacher 				drbd_err(device, "device->p_uuid is NULL! BUG\n");
969b411b363SPhilipp Reisner 			}
970b411b363SPhilipp Reisner 		}
971b411b363SPhilipp Reisner 
97262b0da3aSLars Ellenberg 		if (!(os.conn == C_VERIFY_S || os.conn == C_VERIFY_T)) {
97362b0da3aSLars Ellenberg 			/* for verify runs, we don't update uuids here,
97462b0da3aSLars Ellenberg 			 * so there would be nothing to report. */
975b30ab791SAndreas Gruenbacher 			drbd_uuid_set_bm(device, 0UL);
976b30ab791SAndreas Gruenbacher 			drbd_print_uuids(device, "updated UUIDs");
977b30ab791SAndreas Gruenbacher 			if (device->p_uuid) {
978b411b363SPhilipp Reisner 				/* Now the two UUID sets are equal, update what we
979b411b363SPhilipp Reisner 				 * know of the peer. */
980b411b363SPhilipp Reisner 				int i;
981b411b363SPhilipp Reisner 				for (i = UI_CURRENT ; i <= UI_HISTORY_END ; i++)
982b30ab791SAndreas Gruenbacher 					device->p_uuid[i] = device->ldev->md.uuid[i];
983b411b363SPhilipp Reisner 			}
984b411b363SPhilipp Reisner 		}
98562b0da3aSLars Ellenberg 	}
986b411b363SPhilipp Reisner 
987b30ab791SAndreas Gruenbacher 	_drbd_set_state(device, ns, CS_VERBOSE, NULL);
988b411b363SPhilipp Reisner out_unlock:
9890500813fSAndreas Gruenbacher 	spin_unlock_irq(&device->resource->req_lock);
99026a96110SLars Ellenberg 
99126a96110SLars Ellenberg 	/* If we have been sync source, and have an effective fencing-policy,
99226a96110SLars Ellenberg 	 * once *all* volumes are back in sync, call "unfence". */
99326a96110SLars Ellenberg 	if (os.conn == C_SYNC_SOURCE) {
99426a96110SLars Ellenberg 		enum drbd_disk_state disk_state = D_MASK;
99526a96110SLars Ellenberg 		enum drbd_disk_state pdsk_state = D_MASK;
99626a96110SLars Ellenberg 		enum drbd_fencing_p fp = FP_DONT_CARE;
99726a96110SLars Ellenberg 
99826a96110SLars Ellenberg 		rcu_read_lock();
99926a96110SLars Ellenberg 		fp = rcu_dereference(device->ldev->disk_conf)->fencing;
100026a96110SLars Ellenberg 		if (fp != FP_DONT_CARE) {
100126a96110SLars Ellenberg 			struct drbd_peer_device *peer_device;
100226a96110SLars Ellenberg 			int vnr;
100326a96110SLars Ellenberg 			idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
100426a96110SLars Ellenberg 				struct drbd_device *device = peer_device->device;
100526a96110SLars Ellenberg 				disk_state = min_t(enum drbd_disk_state, disk_state, device->state.disk);
100626a96110SLars Ellenberg 				pdsk_state = min_t(enum drbd_disk_state, pdsk_state, device->state.pdsk);
100726a96110SLars Ellenberg 			}
100826a96110SLars Ellenberg 		}
100926a96110SLars Ellenberg 		rcu_read_unlock();
101026a96110SLars Ellenberg 		if (disk_state == D_UP_TO_DATE && pdsk_state == D_UP_TO_DATE)
101126a96110SLars Ellenberg 			conn_khelper(connection, "unfence-peer");
101226a96110SLars Ellenberg 	}
101326a96110SLars Ellenberg 
1014b30ab791SAndreas Gruenbacher 	put_ldev(device);
1015b411b363SPhilipp Reisner out:
1016b30ab791SAndreas Gruenbacher 	device->rs_total  = 0;
1017b30ab791SAndreas Gruenbacher 	device->rs_failed = 0;
1018b30ab791SAndreas Gruenbacher 	device->rs_paused = 0;
101958ffa580SLars Ellenberg 
102058ffa580SLars Ellenberg 	/* reset start sector, if we reached end of device */
1021b30ab791SAndreas Gruenbacher 	if (verify_done && device->ov_left == 0)
1022b30ab791SAndreas Gruenbacher 		device->ov_start_sector = 0;
1023b411b363SPhilipp Reisner 
1024b30ab791SAndreas Gruenbacher 	drbd_md_sync(device);
102513d42685SLars Ellenberg 
1026b411b363SPhilipp Reisner 	if (khelper_cmd)
1027b30ab791SAndreas Gruenbacher 		drbd_khelper(device, khelper_cmd);
1028b411b363SPhilipp Reisner 
1029b411b363SPhilipp Reisner 	return 1;
1030b411b363SPhilipp Reisner }
1031b411b363SPhilipp Reisner 
1032b411b363SPhilipp Reisner /* helper */
move_to_net_ee_or_free(struct drbd_device * device,struct drbd_peer_request * peer_req)1033b30ab791SAndreas Gruenbacher static void move_to_net_ee_or_free(struct drbd_device *device, struct drbd_peer_request *peer_req)
1034b411b363SPhilipp Reisner {
1035045417f7SAndreas Gruenbacher 	if (drbd_peer_req_has_active_page(peer_req)) {
1036b411b363SPhilipp Reisner 		/* This might happen if sendpage() has not finished */
1037ba6bee98SCai Huoqing 		int i = PFN_UP(peer_req->i.size);
1038b30ab791SAndreas Gruenbacher 		atomic_add(i, &device->pp_in_use_by_net);
1039b30ab791SAndreas Gruenbacher 		atomic_sub(i, &device->pp_in_use);
10400500813fSAndreas Gruenbacher 		spin_lock_irq(&device->resource->req_lock);
1041a8cd15baSAndreas Gruenbacher 		list_add_tail(&peer_req->w.list, &device->net_ee);
10420500813fSAndreas Gruenbacher 		spin_unlock_irq(&device->resource->req_lock);
1043435f0740SLars Ellenberg 		wake_up(&drbd_pp_wait);
1044b411b363SPhilipp Reisner 	} else
1045b30ab791SAndreas Gruenbacher 		drbd_free_peer_req(device, peer_req);
1046b411b363SPhilipp Reisner }
1047b411b363SPhilipp Reisner 
1048b411b363SPhilipp Reisner /**
1049b411b363SPhilipp Reisner  * w_e_end_data_req() - Worker callback, to send a P_DATA_REPLY packet in response to a P_DATA_REQUEST
1050b411b363SPhilipp Reisner  * @w:		work object.
1051b411b363SPhilipp Reisner  * @cancel:	The connection will be closed anyways
1052b411b363SPhilipp Reisner  */
w_e_end_data_req(struct drbd_work * w,int cancel)105399920dc5SAndreas Gruenbacher int w_e_end_data_req(struct drbd_work *w, int cancel)
1054b411b363SPhilipp Reisner {
1055a8cd15baSAndreas Gruenbacher 	struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
10566780139cSAndreas Gruenbacher 	struct drbd_peer_device *peer_device = peer_req->peer_device;
10576780139cSAndreas Gruenbacher 	struct drbd_device *device = peer_device->device;
105899920dc5SAndreas Gruenbacher 	int err;
1059b411b363SPhilipp Reisner 
1060b411b363SPhilipp Reisner 	if (unlikely(cancel)) {
1061b30ab791SAndreas Gruenbacher 		drbd_free_peer_req(device, peer_req);
1062b30ab791SAndreas Gruenbacher 		dec_unacked(device);
106399920dc5SAndreas Gruenbacher 		return 0;
1064b411b363SPhilipp Reisner 	}
1065b411b363SPhilipp Reisner 
1066db830c46SAndreas Gruenbacher 	if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
10676780139cSAndreas Gruenbacher 		err = drbd_send_block(peer_device, P_DATA_REPLY, peer_req);
1068b411b363SPhilipp Reisner 	} else {
1069e3fa02d7SChristoph Böhmwalder 		if (drbd_ratelimit())
1070d0180171SAndreas Gruenbacher 			drbd_err(device, "Sending NegDReply. sector=%llus.\n",
1071db830c46SAndreas Gruenbacher 			    (unsigned long long)peer_req->i.sector);
1072b411b363SPhilipp Reisner 
10736780139cSAndreas Gruenbacher 		err = drbd_send_ack(peer_device, P_NEG_DREPLY, peer_req);
1074b411b363SPhilipp Reisner 	}
1075b411b363SPhilipp Reisner 
1076b30ab791SAndreas Gruenbacher 	dec_unacked(device);
1077b411b363SPhilipp Reisner 
1078b30ab791SAndreas Gruenbacher 	move_to_net_ee_or_free(device, peer_req);
1079b411b363SPhilipp Reisner 
108099920dc5SAndreas Gruenbacher 	if (unlikely(err))
1081d0180171SAndreas Gruenbacher 		drbd_err(device, "drbd_send_block() failed\n");
108299920dc5SAndreas Gruenbacher 	return err;
1083b411b363SPhilipp Reisner }
1084b411b363SPhilipp Reisner 
all_zero(struct drbd_peer_request * peer_req)1085700ca8c0SPhilipp Reisner static bool all_zero(struct drbd_peer_request *peer_req)
1086700ca8c0SPhilipp Reisner {
1087700ca8c0SPhilipp Reisner 	struct page *page = peer_req->pages;
1088700ca8c0SPhilipp Reisner 	unsigned int len = peer_req->i.size;
1089700ca8c0SPhilipp Reisner 
1090700ca8c0SPhilipp Reisner 	page_chain_for_each(page) {
1091700ca8c0SPhilipp Reisner 		unsigned int l = min_t(unsigned int, len, PAGE_SIZE);
1092700ca8c0SPhilipp Reisner 		unsigned int i, words = l / sizeof(long);
1093700ca8c0SPhilipp Reisner 		unsigned long *d;
1094700ca8c0SPhilipp Reisner 
1095700ca8c0SPhilipp Reisner 		d = kmap_atomic(page);
1096700ca8c0SPhilipp Reisner 		for (i = 0; i < words; i++) {
1097700ca8c0SPhilipp Reisner 			if (d[i]) {
1098700ca8c0SPhilipp Reisner 				kunmap_atomic(d);
1099700ca8c0SPhilipp Reisner 				return false;
1100700ca8c0SPhilipp Reisner 			}
1101700ca8c0SPhilipp Reisner 		}
1102700ca8c0SPhilipp Reisner 		kunmap_atomic(d);
1103700ca8c0SPhilipp Reisner 		len -= l;
1104700ca8c0SPhilipp Reisner 	}
1105700ca8c0SPhilipp Reisner 
1106700ca8c0SPhilipp Reisner 	return true;
1107700ca8c0SPhilipp Reisner }
1108700ca8c0SPhilipp Reisner 
1109b411b363SPhilipp Reisner /**
1110a209b4aeSAndreas Gruenbacher  * w_e_end_rsdata_req() - Worker callback to send a P_RS_DATA_REPLY packet in response to a P_RS_DATA_REQUEST
1111b411b363SPhilipp Reisner  * @w:		work object.
1112b411b363SPhilipp Reisner  * @cancel:	The connection will be closed anyways
1113b411b363SPhilipp Reisner  */
w_e_end_rsdata_req(struct drbd_work * w,int cancel)111499920dc5SAndreas Gruenbacher int w_e_end_rsdata_req(struct drbd_work *w, int cancel)
1115b411b363SPhilipp Reisner {
1116a8cd15baSAndreas Gruenbacher 	struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
11176780139cSAndreas Gruenbacher 	struct drbd_peer_device *peer_device = peer_req->peer_device;
11186780139cSAndreas Gruenbacher 	struct drbd_device *device = peer_device->device;
111999920dc5SAndreas Gruenbacher 	int err;
1120b411b363SPhilipp Reisner 
1121b411b363SPhilipp Reisner 	if (unlikely(cancel)) {
1122b30ab791SAndreas Gruenbacher 		drbd_free_peer_req(device, peer_req);
1123b30ab791SAndreas Gruenbacher 		dec_unacked(device);
112499920dc5SAndreas Gruenbacher 		return 0;
1125b411b363SPhilipp Reisner 	}
1126b411b363SPhilipp Reisner 
1127b30ab791SAndreas Gruenbacher 	if (get_ldev_if_state(device, D_FAILED)) {
1128b30ab791SAndreas Gruenbacher 		drbd_rs_complete_io(device, peer_req->i.sector);
1129b30ab791SAndreas Gruenbacher 		put_ldev(device);
1130b411b363SPhilipp Reisner 	}
1131b411b363SPhilipp Reisner 
1132b30ab791SAndreas Gruenbacher 	if (device->state.conn == C_AHEAD) {
11336780139cSAndreas Gruenbacher 		err = drbd_send_ack(peer_device, P_RS_CANCEL, peer_req);
1134db830c46SAndreas Gruenbacher 	} else if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
1135b30ab791SAndreas Gruenbacher 		if (likely(device->state.pdsk >= D_INCONSISTENT)) {
1136*0d11f3cfSChristoph Böhmwalder 			inc_rs_pending(peer_device);
1137700ca8c0SPhilipp Reisner 			if (peer_req->flags & EE_RS_THIN_REQ && all_zero(peer_req))
1138700ca8c0SPhilipp Reisner 				err = drbd_send_rs_deallocated(peer_device, peer_req);
1139700ca8c0SPhilipp Reisner 			else
11406780139cSAndreas Gruenbacher 				err = drbd_send_block(peer_device, P_RS_DATA_REPLY, peer_req);
1141b411b363SPhilipp Reisner 		} else {
1142e3fa02d7SChristoph Böhmwalder 			if (drbd_ratelimit())
1143d0180171SAndreas Gruenbacher 				drbd_err(device, "Not sending RSDataReply, "
1144b411b363SPhilipp Reisner 				    "partner DISKLESS!\n");
114599920dc5SAndreas Gruenbacher 			err = 0;
1146b411b363SPhilipp Reisner 		}
1147b411b363SPhilipp Reisner 	} else {
1148e3fa02d7SChristoph Böhmwalder 		if (drbd_ratelimit())
1149d0180171SAndreas Gruenbacher 			drbd_err(device, "Sending NegRSDReply. sector %llus.\n",
1150db830c46SAndreas Gruenbacher 			    (unsigned long long)peer_req->i.sector);
1151b411b363SPhilipp Reisner 
11526780139cSAndreas Gruenbacher 		err = drbd_send_ack(peer_device, P_NEG_RS_DREPLY, peer_req);
1153b411b363SPhilipp Reisner 
1154b411b363SPhilipp Reisner 		/* update resync data with failure */
1155*0d11f3cfSChristoph Böhmwalder 		drbd_rs_failed_io(peer_device, peer_req->i.sector, peer_req->i.size);
1156b411b363SPhilipp Reisner 	}
1157b411b363SPhilipp Reisner 
1158b30ab791SAndreas Gruenbacher 	dec_unacked(device);
1159b411b363SPhilipp Reisner 
1160b30ab791SAndreas Gruenbacher 	move_to_net_ee_or_free(device, peer_req);
1161b411b363SPhilipp Reisner 
116299920dc5SAndreas Gruenbacher 	if (unlikely(err))
1163d0180171SAndreas Gruenbacher 		drbd_err(device, "drbd_send_block() failed\n");
116499920dc5SAndreas Gruenbacher 	return err;
1165b411b363SPhilipp Reisner }
1166b411b363SPhilipp Reisner 
w_e_end_csum_rs_req(struct drbd_work * w,int cancel)116799920dc5SAndreas Gruenbacher int w_e_end_csum_rs_req(struct drbd_work *w, int cancel)
1168b411b363SPhilipp Reisner {
1169a8cd15baSAndreas Gruenbacher 	struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
11706780139cSAndreas Gruenbacher 	struct drbd_peer_device *peer_device = peer_req->peer_device;
11716780139cSAndreas Gruenbacher 	struct drbd_device *device = peer_device->device;
1172b411b363SPhilipp Reisner 	struct digest_info *di;
1173b411b363SPhilipp Reisner 	int digest_size;
1174b411b363SPhilipp Reisner 	void *digest = NULL;
117599920dc5SAndreas Gruenbacher 	int err, eq = 0;
1176b411b363SPhilipp Reisner 
1177b411b363SPhilipp Reisner 	if (unlikely(cancel)) {
1178b30ab791SAndreas Gruenbacher 		drbd_free_peer_req(device, peer_req);
1179b30ab791SAndreas Gruenbacher 		dec_unacked(device);
118099920dc5SAndreas Gruenbacher 		return 0;
1181b411b363SPhilipp Reisner 	}
1182b411b363SPhilipp Reisner 
1183b30ab791SAndreas Gruenbacher 	if (get_ldev(device)) {
1184b30ab791SAndreas Gruenbacher 		drbd_rs_complete_io(device, peer_req->i.sector);
1185b30ab791SAndreas Gruenbacher 		put_ldev(device);
11861d53f09eSLars Ellenberg 	}
1187b411b363SPhilipp Reisner 
1188db830c46SAndreas Gruenbacher 	di = peer_req->digest;
1189b411b363SPhilipp Reisner 
1190db830c46SAndreas Gruenbacher 	if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
1191b411b363SPhilipp Reisner 		/* quick hack to try to avoid a race against reconfiguration.
1192b411b363SPhilipp Reisner 		 * a real fix would be much more involved,
1193b411b363SPhilipp Reisner 		 * introducing more locking mechanisms */
11946780139cSAndreas Gruenbacher 		if (peer_device->connection->csums_tfm) {
11953d0e6375SKees Cook 			digest_size = crypto_shash_digestsize(peer_device->connection->csums_tfm);
11960b0ba1efSAndreas Gruenbacher 			D_ASSERT(device, digest_size == di->digest_size);
1197b411b363SPhilipp Reisner 			digest = kmalloc(digest_size, GFP_NOIO);
1198b411b363SPhilipp Reisner 		}
1199b411b363SPhilipp Reisner 		if (digest) {
12006780139cSAndreas Gruenbacher 			drbd_csum_ee(peer_device->connection->csums_tfm, peer_req, digest);
1201b411b363SPhilipp Reisner 			eq = !memcmp(digest, di->digest, digest_size);
1202b411b363SPhilipp Reisner 			kfree(digest);
1203b411b363SPhilipp Reisner 		}
1204b411b363SPhilipp Reisner 
1205b411b363SPhilipp Reisner 		if (eq) {
1206*0d11f3cfSChristoph Böhmwalder 			drbd_set_in_sync(peer_device, peer_req->i.sector, peer_req->i.size);
1207676396d5SLars Ellenberg 			/* rs_same_csums unit is BM_BLOCK_SIZE */
1208b30ab791SAndreas Gruenbacher 			device->rs_same_csum += peer_req->i.size >> BM_BLOCK_SHIFT;
12096780139cSAndreas Gruenbacher 			err = drbd_send_ack(peer_device, P_RS_IS_IN_SYNC, peer_req);
1210b411b363SPhilipp Reisner 		} else {
1211*0d11f3cfSChristoph Böhmwalder 			inc_rs_pending(peer_device);
1212db830c46SAndreas Gruenbacher 			peer_req->block_id = ID_SYNCER; /* By setting block_id, digest pointer becomes invalid! */
1213db830c46SAndreas Gruenbacher 			peer_req->flags &= ~EE_HAS_DIGEST; /* This peer request no longer has a digest pointer */
1214204bba99SPhilipp Reisner 			kfree(di);
12156780139cSAndreas Gruenbacher 			err = drbd_send_block(peer_device, P_RS_DATA_REPLY, peer_req);
1216b411b363SPhilipp Reisner 		}
1217b411b363SPhilipp Reisner 	} else {
12186780139cSAndreas Gruenbacher 		err = drbd_send_ack(peer_device, P_NEG_RS_DREPLY, peer_req);
1219e3fa02d7SChristoph Böhmwalder 		if (drbd_ratelimit())
1220d0180171SAndreas Gruenbacher 			drbd_err(device, "Sending NegDReply. I guess it gets messy.\n");
1221b411b363SPhilipp Reisner 	}
1222b411b363SPhilipp Reisner 
1223b30ab791SAndreas Gruenbacher 	dec_unacked(device);
1224b30ab791SAndreas Gruenbacher 	move_to_net_ee_or_free(device, peer_req);
1225b411b363SPhilipp Reisner 
122699920dc5SAndreas Gruenbacher 	if (unlikely(err))
1227d0180171SAndreas Gruenbacher 		drbd_err(device, "drbd_send_block/ack() failed\n");
122899920dc5SAndreas Gruenbacher 	return err;
1229b411b363SPhilipp Reisner }
1230b411b363SPhilipp Reisner 
w_e_end_ov_req(struct drbd_work * w,int cancel)123199920dc5SAndreas Gruenbacher int w_e_end_ov_req(struct drbd_work *w, int cancel)
1232b411b363SPhilipp Reisner {
1233a8cd15baSAndreas Gruenbacher 	struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
12346780139cSAndreas Gruenbacher 	struct drbd_peer_device *peer_device = peer_req->peer_device;
12356780139cSAndreas Gruenbacher 	struct drbd_device *device = peer_device->device;
1236db830c46SAndreas Gruenbacher 	sector_t sector = peer_req->i.sector;
1237db830c46SAndreas Gruenbacher 	unsigned int size = peer_req->i.size;
1238b411b363SPhilipp Reisner 	int digest_size;
1239b411b363SPhilipp Reisner 	void *digest;
124099920dc5SAndreas Gruenbacher 	int err = 0;
1241b411b363SPhilipp Reisner 
1242b411b363SPhilipp Reisner 	if (unlikely(cancel))
1243b411b363SPhilipp Reisner 		goto out;
1244b411b363SPhilipp Reisner 
12453d0e6375SKees Cook 	digest_size = crypto_shash_digestsize(peer_device->connection->verify_tfm);
1246b411b363SPhilipp Reisner 	digest = kmalloc(digest_size, GFP_NOIO);
12478f21420eSPhilipp Reisner 	if (!digest) {
124899920dc5SAndreas Gruenbacher 		err = 1;	/* terminate the connection in case the allocation failed */
12498f21420eSPhilipp Reisner 		goto out;
12508f21420eSPhilipp Reisner 	}
12518f21420eSPhilipp Reisner 
1252db830c46SAndreas Gruenbacher 	if (likely(!(peer_req->flags & EE_WAS_ERROR)))
12536780139cSAndreas Gruenbacher 		drbd_csum_ee(peer_device->connection->verify_tfm, peer_req, digest);
12548f21420eSPhilipp Reisner 	else
12558f21420eSPhilipp Reisner 		memset(digest, 0, digest_size);
12568f21420eSPhilipp Reisner 
125753ea4331SLars Ellenberg 	/* Free e and pages before send.
125853ea4331SLars Ellenberg 	 * In case we block on congestion, we could otherwise run into
125953ea4331SLars Ellenberg 	 * some distributed deadlock, if the other side blocks on
126053ea4331SLars Ellenberg 	 * congestion as well, because our receiver blocks in
1261c37c8ecfSAndreas Gruenbacher 	 * drbd_alloc_pages due to pp_in_use > max_buffers. */
1262b30ab791SAndreas Gruenbacher 	drbd_free_peer_req(device, peer_req);
1263db830c46SAndreas Gruenbacher 	peer_req = NULL;
1264*0d11f3cfSChristoph Böhmwalder 	inc_rs_pending(peer_device);
12656780139cSAndreas Gruenbacher 	err = drbd_send_drequest_csum(peer_device, sector, size, digest, digest_size, P_OV_REPLY);
126699920dc5SAndreas Gruenbacher 	if (err)
1267*0d11f3cfSChristoph Böhmwalder 		dec_rs_pending(peer_device);
1268b411b363SPhilipp Reisner 	kfree(digest);
1269b411b363SPhilipp Reisner 
1270b411b363SPhilipp Reisner out:
1271db830c46SAndreas Gruenbacher 	if (peer_req)
1272b30ab791SAndreas Gruenbacher 		drbd_free_peer_req(device, peer_req);
1273b30ab791SAndreas Gruenbacher 	dec_unacked(device);
127499920dc5SAndreas Gruenbacher 	return err;
1275b411b363SPhilipp Reisner }
1276b411b363SPhilipp Reisner 
drbd_ov_out_of_sync_found(struct drbd_peer_device * peer_device,sector_t sector,int size)1277*0d11f3cfSChristoph Böhmwalder void drbd_ov_out_of_sync_found(struct drbd_peer_device *peer_device, sector_t sector, int size)
1278b411b363SPhilipp Reisner {
1279*0d11f3cfSChristoph Böhmwalder 	struct drbd_device *device = peer_device->device;
1280b30ab791SAndreas Gruenbacher 	if (device->ov_last_oos_start + device->ov_last_oos_size == sector) {
1281b30ab791SAndreas Gruenbacher 		device->ov_last_oos_size += size>>9;
1282b411b363SPhilipp Reisner 	} else {
1283b30ab791SAndreas Gruenbacher 		device->ov_last_oos_start = sector;
1284b30ab791SAndreas Gruenbacher 		device->ov_last_oos_size = size>>9;
1285b411b363SPhilipp Reisner 	}
1286*0d11f3cfSChristoph Böhmwalder 	drbd_set_out_of_sync(peer_device, sector, size);
1287b411b363SPhilipp Reisner }
1288b411b363SPhilipp Reisner 
w_e_end_ov_reply(struct drbd_work * w,int cancel)128999920dc5SAndreas Gruenbacher int w_e_end_ov_reply(struct drbd_work *w, int cancel)
1290b411b363SPhilipp Reisner {
1291a8cd15baSAndreas Gruenbacher 	struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
12926780139cSAndreas Gruenbacher 	struct drbd_peer_device *peer_device = peer_req->peer_device;
12936780139cSAndreas Gruenbacher 	struct drbd_device *device = peer_device->device;
1294b411b363SPhilipp Reisner 	struct digest_info *di;
1295b411b363SPhilipp Reisner 	void *digest;
1296db830c46SAndreas Gruenbacher 	sector_t sector = peer_req->i.sector;
1297db830c46SAndreas Gruenbacher 	unsigned int size = peer_req->i.size;
129853ea4331SLars Ellenberg 	int digest_size;
129999920dc5SAndreas Gruenbacher 	int err, eq = 0;
130058ffa580SLars Ellenberg 	bool stop_sector_reached = false;
1301b411b363SPhilipp Reisner 
1302b411b363SPhilipp Reisner 	if (unlikely(cancel)) {
1303b30ab791SAndreas Gruenbacher 		drbd_free_peer_req(device, peer_req);
1304b30ab791SAndreas Gruenbacher 		dec_unacked(device);
130599920dc5SAndreas Gruenbacher 		return 0;
1306b411b363SPhilipp Reisner 	}
1307b411b363SPhilipp Reisner 
1308b411b363SPhilipp Reisner 	/* after "cancel", because after drbd_disconnect/drbd_rs_cancel_all
1309b411b363SPhilipp Reisner 	 * the resync lru has been cleaned up already */
1310b30ab791SAndreas Gruenbacher 	if (get_ldev(device)) {
1311b30ab791SAndreas Gruenbacher 		drbd_rs_complete_io(device, peer_req->i.sector);
1312b30ab791SAndreas Gruenbacher 		put_ldev(device);
13131d53f09eSLars Ellenberg 	}
1314b411b363SPhilipp Reisner 
1315db830c46SAndreas Gruenbacher 	di = peer_req->digest;
1316b411b363SPhilipp Reisner 
1317db830c46SAndreas Gruenbacher 	if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
13183d0e6375SKees Cook 		digest_size = crypto_shash_digestsize(peer_device->connection->verify_tfm);
1319b411b363SPhilipp Reisner 		digest = kmalloc(digest_size, GFP_NOIO);
1320b411b363SPhilipp Reisner 		if (digest) {
13216780139cSAndreas Gruenbacher 			drbd_csum_ee(peer_device->connection->verify_tfm, peer_req, digest);
1322b411b363SPhilipp Reisner 
13230b0ba1efSAndreas Gruenbacher 			D_ASSERT(device, digest_size == di->digest_size);
1324b411b363SPhilipp Reisner 			eq = !memcmp(digest, di->digest, digest_size);
1325b411b363SPhilipp Reisner 			kfree(digest);
1326b411b363SPhilipp Reisner 		}
1327b411b363SPhilipp Reisner 	}
1328b411b363SPhilipp Reisner 
13299676c760SLars Ellenberg 	/* Free peer_req and pages before send.
133053ea4331SLars Ellenberg 	 * In case we block on congestion, we could otherwise run into
133153ea4331SLars Ellenberg 	 * some distributed deadlock, if the other side blocks on
133253ea4331SLars Ellenberg 	 * congestion as well, because our receiver blocks in
1333c37c8ecfSAndreas Gruenbacher 	 * drbd_alloc_pages due to pp_in_use > max_buffers. */
1334b30ab791SAndreas Gruenbacher 	drbd_free_peer_req(device, peer_req);
1335b411b363SPhilipp Reisner 	if (!eq)
1336*0d11f3cfSChristoph Böhmwalder 		drbd_ov_out_of_sync_found(peer_device, sector, size);
1337b411b363SPhilipp Reisner 	else
1338*0d11f3cfSChristoph Böhmwalder 		ov_out_of_sync_print(peer_device);
1339b411b363SPhilipp Reisner 
13406780139cSAndreas Gruenbacher 	err = drbd_send_ack_ex(peer_device, P_OV_RESULT, sector, size,
1341b411b363SPhilipp Reisner 			       eq ? ID_IN_SYNC : ID_OUT_OF_SYNC);
1342b411b363SPhilipp Reisner 
1343b30ab791SAndreas Gruenbacher 	dec_unacked(device);
1344b411b363SPhilipp Reisner 
1345b30ab791SAndreas Gruenbacher 	--device->ov_left;
1346ea5442afSLars Ellenberg 
1347ea5442afSLars Ellenberg 	/* let's advance progress step marks only for every other megabyte */
1348b30ab791SAndreas Gruenbacher 	if ((device->ov_left & 0x200) == 0x200)
1349*0d11f3cfSChristoph Böhmwalder 		drbd_advance_rs_marks(peer_device, device->ov_left);
1350ea5442afSLars Ellenberg 
1351b30ab791SAndreas Gruenbacher 	stop_sector_reached = verify_can_do_stop_sector(device) &&
1352b30ab791SAndreas Gruenbacher 		(sector + (size>>9)) >= device->ov_stop_sector;
135358ffa580SLars Ellenberg 
1354b30ab791SAndreas Gruenbacher 	if (device->ov_left == 0 || stop_sector_reached) {
1355*0d11f3cfSChristoph Böhmwalder 		ov_out_of_sync_print(peer_device);
1356*0d11f3cfSChristoph Böhmwalder 		drbd_resync_finished(peer_device);
1357b411b363SPhilipp Reisner 	}
1358b411b363SPhilipp Reisner 
135999920dc5SAndreas Gruenbacher 	return err;
1360b411b363SPhilipp Reisner }
1361b411b363SPhilipp Reisner 
1362b6dd1a89SLars Ellenberg /* FIXME
1363b6dd1a89SLars Ellenberg  * We need to track the number of pending barrier acks,
1364b6dd1a89SLars Ellenberg  * and to be able to wait for them.
1365b6dd1a89SLars Ellenberg  * See also comment in drbd_adm_attach before drbd_suspend_io.
1366b6dd1a89SLars Ellenberg  */
drbd_send_barrier(struct drbd_connection * connection)1367bde89a9eSAndreas Gruenbacher static int drbd_send_barrier(struct drbd_connection *connection)
1368b411b363SPhilipp Reisner {
13699f5bdc33SAndreas Gruenbacher 	struct p_barrier *p;
1370b6dd1a89SLars Ellenberg 	struct drbd_socket *sock;
1371b411b363SPhilipp Reisner 
1372bde89a9eSAndreas Gruenbacher 	sock = &connection->data;
1373bde89a9eSAndreas Gruenbacher 	p = conn_prepare_command(connection, sock);
13749f5bdc33SAndreas Gruenbacher 	if (!p)
13759f5bdc33SAndreas Gruenbacher 		return -EIO;
1376bde89a9eSAndreas Gruenbacher 	p->barrier = connection->send.current_epoch_nr;
1377b6dd1a89SLars Ellenberg 	p->pad = 0;
1378bde89a9eSAndreas Gruenbacher 	connection->send.current_epoch_writes = 0;
137984d34f2fSLars Ellenberg 	connection->send.last_sent_barrier_jif = jiffies;
1380b6dd1a89SLars Ellenberg 
1381bde89a9eSAndreas Gruenbacher 	return conn_send_command(connection, sock, P_BARRIER, sizeof(*p), NULL, 0);
1382b411b363SPhilipp Reisner }
1383b411b363SPhilipp Reisner 
pd_send_unplug_remote(struct drbd_peer_device * pd)1384c51a0ef3SLars Ellenberg static int pd_send_unplug_remote(struct drbd_peer_device *pd)
1385c51a0ef3SLars Ellenberg {
1386c51a0ef3SLars Ellenberg 	struct drbd_socket *sock = &pd->connection->data;
1387c51a0ef3SLars Ellenberg 	if (!drbd_prepare_command(pd, sock))
1388c51a0ef3SLars Ellenberg 		return -EIO;
1389c51a0ef3SLars Ellenberg 	return drbd_send_command(pd, sock, P_UNPLUG_REMOTE, 0, NULL, 0);
1390c51a0ef3SLars Ellenberg }
1391c51a0ef3SLars Ellenberg 
w_send_write_hint(struct drbd_work * w,int cancel)139299920dc5SAndreas Gruenbacher int w_send_write_hint(struct drbd_work *w, int cancel)
1393b411b363SPhilipp Reisner {
139484b8c06bSAndreas Gruenbacher 	struct drbd_device *device =
139584b8c06bSAndreas Gruenbacher 		container_of(w, struct drbd_device, unplug_work);
13969f5bdc33SAndreas Gruenbacher 
1397b411b363SPhilipp Reisner 	if (cancel)
139899920dc5SAndreas Gruenbacher 		return 0;
1399c51a0ef3SLars Ellenberg 	return pd_send_unplug_remote(first_peer_device(device));
1400b411b363SPhilipp Reisner }
1401b411b363SPhilipp Reisner 
re_init_if_first_write(struct drbd_connection * connection,unsigned int epoch)1402bde89a9eSAndreas Gruenbacher static void re_init_if_first_write(struct drbd_connection *connection, unsigned int epoch)
14034eb9b3cbSLars Ellenberg {
1404bde89a9eSAndreas Gruenbacher 	if (!connection->send.seen_any_write_yet) {
1405bde89a9eSAndreas Gruenbacher 		connection->send.seen_any_write_yet = true;
1406bde89a9eSAndreas Gruenbacher 		connection->send.current_epoch_nr = epoch;
1407bde89a9eSAndreas Gruenbacher 		connection->send.current_epoch_writes = 0;
140884d34f2fSLars Ellenberg 		connection->send.last_sent_barrier_jif = jiffies;
14094eb9b3cbSLars Ellenberg 	}
14104eb9b3cbSLars Ellenberg }
14114eb9b3cbSLars Ellenberg 
maybe_send_barrier(struct drbd_connection * connection,unsigned int epoch)1412bde89a9eSAndreas Gruenbacher static void maybe_send_barrier(struct drbd_connection *connection, unsigned int epoch)
14134eb9b3cbSLars Ellenberg {
14144eb9b3cbSLars Ellenberg 	/* re-init if first write on this connection */
1415bde89a9eSAndreas Gruenbacher 	if (!connection->send.seen_any_write_yet)
14164eb9b3cbSLars Ellenberg 		return;
1417bde89a9eSAndreas Gruenbacher 	if (connection->send.current_epoch_nr != epoch) {
1418bde89a9eSAndreas Gruenbacher 		if (connection->send.current_epoch_writes)
1419bde89a9eSAndreas Gruenbacher 			drbd_send_barrier(connection);
1420bde89a9eSAndreas Gruenbacher 		connection->send.current_epoch_nr = epoch;
14214eb9b3cbSLars Ellenberg 	}
14224eb9b3cbSLars Ellenberg }
14234eb9b3cbSLars Ellenberg 
w_send_out_of_sync(struct drbd_work * w,int cancel)14248f7bed77SAndreas Gruenbacher int w_send_out_of_sync(struct drbd_work *w, int cancel)
142573a01a18SPhilipp Reisner {
142673a01a18SPhilipp Reisner 	struct drbd_request *req = container_of(w, struct drbd_request, w);
142784b8c06bSAndreas Gruenbacher 	struct drbd_device *device = req->device;
142844a4d551SLars Ellenberg 	struct drbd_peer_device *const peer_device = first_peer_device(device);
142944a4d551SLars Ellenberg 	struct drbd_connection *const connection = peer_device->connection;
143099920dc5SAndreas Gruenbacher 	int err;
143173a01a18SPhilipp Reisner 
143273a01a18SPhilipp Reisner 	if (unlikely(cancel)) {
1433ad878a0dSChristoph Böhmwalder 		req_mod(req, SEND_CANCELED, peer_device);
143499920dc5SAndreas Gruenbacher 		return 0;
143573a01a18SPhilipp Reisner 	}
1436e5f891b2SLars Ellenberg 	req->pre_send_jif = jiffies;
143773a01a18SPhilipp Reisner 
1438bde89a9eSAndreas Gruenbacher 	/* this time, no connection->send.current_epoch_writes++;
1439b6dd1a89SLars Ellenberg 	 * If it was sent, it was the closing barrier for the last
1440b6dd1a89SLars Ellenberg 	 * replicated epoch, before we went into AHEAD mode.
1441b6dd1a89SLars Ellenberg 	 * No more barriers will be sent, until we leave AHEAD mode again. */
1442bde89a9eSAndreas Gruenbacher 	maybe_send_barrier(connection, req->epoch);
1443b6dd1a89SLars Ellenberg 
144444a4d551SLars Ellenberg 	err = drbd_send_out_of_sync(peer_device, req);
1445ad878a0dSChristoph Böhmwalder 	req_mod(req, OOS_HANDED_TO_NETWORK, peer_device);
144673a01a18SPhilipp Reisner 
144799920dc5SAndreas Gruenbacher 	return err;
144873a01a18SPhilipp Reisner }
144973a01a18SPhilipp Reisner 
1450b411b363SPhilipp Reisner /**
1451b411b363SPhilipp Reisner  * w_send_dblock() - Worker callback to send a P_DATA packet in order to mirror a write request
1452b411b363SPhilipp Reisner  * @w:		work object.
1453b411b363SPhilipp Reisner  * @cancel:	The connection will be closed anyways
1454b411b363SPhilipp Reisner  */
w_send_dblock(struct drbd_work * w,int cancel)145599920dc5SAndreas Gruenbacher int w_send_dblock(struct drbd_work *w, int cancel)
1456b411b363SPhilipp Reisner {
1457b411b363SPhilipp Reisner 	struct drbd_request *req = container_of(w, struct drbd_request, w);
145884b8c06bSAndreas Gruenbacher 	struct drbd_device *device = req->device;
145944a4d551SLars Ellenberg 	struct drbd_peer_device *const peer_device = first_peer_device(device);
146044a4d551SLars Ellenberg 	struct drbd_connection *connection = peer_device->connection;
1461c51a0ef3SLars Ellenberg 	bool do_send_unplug = req->rq_state & RQ_UNPLUG;
146299920dc5SAndreas Gruenbacher 	int err;
1463b411b363SPhilipp Reisner 
1464b411b363SPhilipp Reisner 	if (unlikely(cancel)) {
1465ad878a0dSChristoph Böhmwalder 		req_mod(req, SEND_CANCELED, peer_device);
146699920dc5SAndreas Gruenbacher 		return 0;
1467b411b363SPhilipp Reisner 	}
1468e5f891b2SLars Ellenberg 	req->pre_send_jif = jiffies;
1469b411b363SPhilipp Reisner 
1470bde89a9eSAndreas Gruenbacher 	re_init_if_first_write(connection, req->epoch);
1471bde89a9eSAndreas Gruenbacher 	maybe_send_barrier(connection, req->epoch);
1472bde89a9eSAndreas Gruenbacher 	connection->send.current_epoch_writes++;
1473b6dd1a89SLars Ellenberg 
147444a4d551SLars Ellenberg 	err = drbd_send_dblock(peer_device, req);
1475ad878a0dSChristoph Böhmwalder 	req_mod(req, err ? SEND_FAILED : HANDED_OVER_TO_NETWORK, peer_device);
1476b411b363SPhilipp Reisner 
1477c51a0ef3SLars Ellenberg 	if (do_send_unplug && !err)
1478c51a0ef3SLars Ellenberg 		pd_send_unplug_remote(peer_device);
1479c51a0ef3SLars Ellenberg 
148099920dc5SAndreas Gruenbacher 	return err;
1481b411b363SPhilipp Reisner }
1482b411b363SPhilipp Reisner 
1483b411b363SPhilipp Reisner /**
1484b411b363SPhilipp Reisner  * w_send_read_req() - Worker callback to send a read request (P_DATA_REQUEST) packet
1485b411b363SPhilipp Reisner  * @w:		work object.
1486b411b363SPhilipp Reisner  * @cancel:	The connection will be closed anyways
1487b411b363SPhilipp Reisner  */
w_send_read_req(struct drbd_work * w,int cancel)148899920dc5SAndreas Gruenbacher int w_send_read_req(struct drbd_work *w, int cancel)
1489b411b363SPhilipp Reisner {
1490b411b363SPhilipp Reisner 	struct drbd_request *req = container_of(w, struct drbd_request, w);
149184b8c06bSAndreas Gruenbacher 	struct drbd_device *device = req->device;
149244a4d551SLars Ellenberg 	struct drbd_peer_device *const peer_device = first_peer_device(device);
149344a4d551SLars Ellenberg 	struct drbd_connection *connection = peer_device->connection;
1494c51a0ef3SLars Ellenberg 	bool do_send_unplug = req->rq_state & RQ_UNPLUG;
149599920dc5SAndreas Gruenbacher 	int err;
1496b411b363SPhilipp Reisner 
1497b411b363SPhilipp Reisner 	if (unlikely(cancel)) {
1498ad878a0dSChristoph Böhmwalder 		req_mod(req, SEND_CANCELED, peer_device);
149999920dc5SAndreas Gruenbacher 		return 0;
1500b411b363SPhilipp Reisner 	}
1501e5f891b2SLars Ellenberg 	req->pre_send_jif = jiffies;
1502b411b363SPhilipp Reisner 
1503b6dd1a89SLars Ellenberg 	/* Even read requests may close a write epoch,
1504b6dd1a89SLars Ellenberg 	 * if there was any yet. */
1505bde89a9eSAndreas Gruenbacher 	maybe_send_barrier(connection, req->epoch);
1506b6dd1a89SLars Ellenberg 
150744a4d551SLars Ellenberg 	err = drbd_send_drequest(peer_device, P_DATA_REQUEST, req->i.sector, req->i.size,
1508b411b363SPhilipp Reisner 				 (unsigned long)req);
1509b411b363SPhilipp Reisner 
1510ad878a0dSChristoph Böhmwalder 	req_mod(req, err ? SEND_FAILED : HANDED_OVER_TO_NETWORK, peer_device);
1511b411b363SPhilipp Reisner 
1512c51a0ef3SLars Ellenberg 	if (do_send_unplug && !err)
1513c51a0ef3SLars Ellenberg 		pd_send_unplug_remote(peer_device);
1514c51a0ef3SLars Ellenberg 
151599920dc5SAndreas Gruenbacher 	return err;
1516b411b363SPhilipp Reisner }
1517b411b363SPhilipp Reisner 
w_restart_disk_io(struct drbd_work * w,int cancel)151899920dc5SAndreas Gruenbacher int w_restart_disk_io(struct drbd_work *w, int cancel)
1519265be2d0SPhilipp Reisner {
1520265be2d0SPhilipp Reisner 	struct drbd_request *req = container_of(w, struct drbd_request, w);
152184b8c06bSAndreas Gruenbacher 	struct drbd_device *device = req->device;
1522265be2d0SPhilipp Reisner 
15230778286aSPhilipp Reisner 	if (bio_data_dir(req->master_bio) == WRITE && req->rq_state & RQ_IN_ACT_LOG)
15244dd726f0SLars Ellenberg 		drbd_al_begin_io(device, &req->i);
1525265be2d0SPhilipp Reisner 
1526abfc426dSChristoph Hellwig 	req->private_bio = bio_alloc_clone(device->ldev->backing_bdev,
1527abfc426dSChristoph Hellwig 					   req->master_bio, GFP_NOIO,
1528ae7153f1SChristoph Hellwig 					  &drbd_io_bio_set);
1529ae7153f1SChristoph Hellwig 	req->private_bio->bi_private = req;
1530ae7153f1SChristoph Hellwig 	req->private_bio->bi_end_io = drbd_request_endio;
1531ed00aabdSChristoph Hellwig 	submit_bio_noacct(req->private_bio);
1532265be2d0SPhilipp Reisner 
153399920dc5SAndreas Gruenbacher 	return 0;
1534265be2d0SPhilipp Reisner }
1535265be2d0SPhilipp Reisner 
_drbd_may_sync_now(struct drbd_device * device)1536b30ab791SAndreas Gruenbacher static int _drbd_may_sync_now(struct drbd_device *device)
1537b411b363SPhilipp Reisner {
1538b30ab791SAndreas Gruenbacher 	struct drbd_device *odev = device;
153995f8efd0SAndreas Gruenbacher 	int resync_after;
1540b411b363SPhilipp Reisner 
1541b411b363SPhilipp Reisner 	while (1) {
1542a3f8f7dcSLars Ellenberg 		if (!odev->ldev || odev->state.disk == D_DISKLESS)
1543438c8374SPhilipp Reisner 			return 1;
1544daeda1ccSPhilipp Reisner 		rcu_read_lock();
154595f8efd0SAndreas Gruenbacher 		resync_after = rcu_dereference(odev->ldev->disk_conf)->resync_after;
1546daeda1ccSPhilipp Reisner 		rcu_read_unlock();
154795f8efd0SAndreas Gruenbacher 		if (resync_after == -1)
1548b411b363SPhilipp Reisner 			return 1;
1549b30ab791SAndreas Gruenbacher 		odev = minor_to_device(resync_after);
1550a3f8f7dcSLars Ellenberg 		if (!odev)
1551841ce241SAndreas Gruenbacher 			return 1;
1552b411b363SPhilipp Reisner 		if ((odev->state.conn >= C_SYNC_SOURCE &&
1553b411b363SPhilipp Reisner 		     odev->state.conn <= C_PAUSED_SYNC_T) ||
1554b411b363SPhilipp Reisner 		    odev->state.aftr_isp || odev->state.peer_isp ||
1555b411b363SPhilipp Reisner 		    odev->state.user_isp)
1556b411b363SPhilipp Reisner 			return 0;
1557b411b363SPhilipp Reisner 	}
1558b411b363SPhilipp Reisner }
1559b411b363SPhilipp Reisner 
1560b411b363SPhilipp Reisner /**
156128bc3b8cSAndreas Gruenbacher  * drbd_pause_after() - Pause resync on all devices that may not resync now
1562b30ab791SAndreas Gruenbacher  * @device:	DRBD device.
1563b411b363SPhilipp Reisner  *
1564b411b363SPhilipp Reisner  * Called from process context only (admin command and after_state_ch).
1565b411b363SPhilipp Reisner  */
drbd_pause_after(struct drbd_device * device)156628bc3b8cSAndreas Gruenbacher static bool drbd_pause_after(struct drbd_device *device)
1567b411b363SPhilipp Reisner {
156828bc3b8cSAndreas Gruenbacher 	bool changed = false;
156954761697SAndreas Gruenbacher 	struct drbd_device *odev;
157028bc3b8cSAndreas Gruenbacher 	int i;
1571b411b363SPhilipp Reisner 
1572695d08faSPhilipp Reisner 	rcu_read_lock();
157305a10ec7SAndreas Gruenbacher 	idr_for_each_entry(&drbd_devices, odev, i) {
1574b411b363SPhilipp Reisner 		if (odev->state.conn == C_STANDALONE && odev->state.disk == D_DISKLESS)
1575b411b363SPhilipp Reisner 			continue;
157628bc3b8cSAndreas Gruenbacher 		if (!_drbd_may_sync_now(odev) &&
157728bc3b8cSAndreas Gruenbacher 		    _drbd_set_state(_NS(odev, aftr_isp, 1),
157828bc3b8cSAndreas Gruenbacher 				    CS_HARD, NULL) != SS_NOTHING_TO_DO)
157928bc3b8cSAndreas Gruenbacher 			changed = true;
1580b411b363SPhilipp Reisner 	}
1581695d08faSPhilipp Reisner 	rcu_read_unlock();
1582b411b363SPhilipp Reisner 
158328bc3b8cSAndreas Gruenbacher 	return changed;
1584b411b363SPhilipp Reisner }
1585b411b363SPhilipp Reisner 
1586b411b363SPhilipp Reisner /**
158728bc3b8cSAndreas Gruenbacher  * drbd_resume_next() - Resume resync on all devices that may resync now
1588b30ab791SAndreas Gruenbacher  * @device:	DRBD device.
1589b411b363SPhilipp Reisner  *
1590b411b363SPhilipp Reisner  * Called from process context only (admin command and worker).
1591b411b363SPhilipp Reisner  */
drbd_resume_next(struct drbd_device * device)159228bc3b8cSAndreas Gruenbacher static bool drbd_resume_next(struct drbd_device *device)
1593b411b363SPhilipp Reisner {
159428bc3b8cSAndreas Gruenbacher 	bool changed = false;
159554761697SAndreas Gruenbacher 	struct drbd_device *odev;
159628bc3b8cSAndreas Gruenbacher 	int i;
1597b411b363SPhilipp Reisner 
1598695d08faSPhilipp Reisner 	rcu_read_lock();
159905a10ec7SAndreas Gruenbacher 	idr_for_each_entry(&drbd_devices, odev, i) {
1600b411b363SPhilipp Reisner 		if (odev->state.conn == C_STANDALONE && odev->state.disk == D_DISKLESS)
1601b411b363SPhilipp Reisner 			continue;
1602b411b363SPhilipp Reisner 		if (odev->state.aftr_isp) {
160328bc3b8cSAndreas Gruenbacher 			if (_drbd_may_sync_now(odev) &&
160428bc3b8cSAndreas Gruenbacher 			    _drbd_set_state(_NS(odev, aftr_isp, 0),
160528bc3b8cSAndreas Gruenbacher 					    CS_HARD, NULL) != SS_NOTHING_TO_DO)
160628bc3b8cSAndreas Gruenbacher 				changed = true;
1607b411b363SPhilipp Reisner 		}
1608b411b363SPhilipp Reisner 	}
1609695d08faSPhilipp Reisner 	rcu_read_unlock();
161028bc3b8cSAndreas Gruenbacher 	return changed;
1611b411b363SPhilipp Reisner }
1612b411b363SPhilipp Reisner 
resume_next_sg(struct drbd_device * device)1613b30ab791SAndreas Gruenbacher void resume_next_sg(struct drbd_device *device)
1614b411b363SPhilipp Reisner {
161528bc3b8cSAndreas Gruenbacher 	lock_all_resources();
161628bc3b8cSAndreas Gruenbacher 	drbd_resume_next(device);
161728bc3b8cSAndreas Gruenbacher 	unlock_all_resources();
1618b411b363SPhilipp Reisner }
1619b411b363SPhilipp Reisner 
suspend_other_sg(struct drbd_device * device)1620b30ab791SAndreas Gruenbacher void suspend_other_sg(struct drbd_device *device)
1621b411b363SPhilipp Reisner {
162228bc3b8cSAndreas Gruenbacher 	lock_all_resources();
162328bc3b8cSAndreas Gruenbacher 	drbd_pause_after(device);
162428bc3b8cSAndreas Gruenbacher 	unlock_all_resources();
1625b411b363SPhilipp Reisner }
1626b411b363SPhilipp Reisner 
162728bc3b8cSAndreas Gruenbacher /* caller must lock_all_resources() */
drbd_resync_after_valid(struct drbd_device * device,int o_minor)1628b30ab791SAndreas Gruenbacher enum drbd_ret_code drbd_resync_after_valid(struct drbd_device *device, int o_minor)
1629b411b363SPhilipp Reisner {
163054761697SAndreas Gruenbacher 	struct drbd_device *odev;
163195f8efd0SAndreas Gruenbacher 	int resync_after;
1632b411b363SPhilipp Reisner 
1633b411b363SPhilipp Reisner 	if (o_minor == -1)
1634b411b363SPhilipp Reisner 		return NO_ERROR;
1635a3f8f7dcSLars Ellenberg 	if (o_minor < -1 || o_minor > MINORMASK)
163695f8efd0SAndreas Gruenbacher 		return ERR_RESYNC_AFTER;
1637b411b363SPhilipp Reisner 
1638b411b363SPhilipp Reisner 	/* check for loops */
1639b30ab791SAndreas Gruenbacher 	odev = minor_to_device(o_minor);
1640b411b363SPhilipp Reisner 	while (1) {
1641b30ab791SAndreas Gruenbacher 		if (odev == device)
164295f8efd0SAndreas Gruenbacher 			return ERR_RESYNC_AFTER_CYCLE;
1643b411b363SPhilipp Reisner 
1644a3f8f7dcSLars Ellenberg 		/* You are free to depend on diskless, non-existing,
1645a3f8f7dcSLars Ellenberg 		 * or not yet/no longer existing minors.
1646a3f8f7dcSLars Ellenberg 		 * We only reject dependency loops.
1647a3f8f7dcSLars Ellenberg 		 * We cannot follow the dependency chain beyond a detached or
1648a3f8f7dcSLars Ellenberg 		 * missing minor.
1649a3f8f7dcSLars Ellenberg 		 */
1650a3f8f7dcSLars Ellenberg 		if (!odev || !odev->ldev || odev->state.disk == D_DISKLESS)
1651a3f8f7dcSLars Ellenberg 			return NO_ERROR;
1652a3f8f7dcSLars Ellenberg 
1653daeda1ccSPhilipp Reisner 		rcu_read_lock();
165495f8efd0SAndreas Gruenbacher 		resync_after = rcu_dereference(odev->ldev->disk_conf)->resync_after;
1655daeda1ccSPhilipp Reisner 		rcu_read_unlock();
1656b411b363SPhilipp Reisner 		/* dependency chain ends here, no cycles. */
165795f8efd0SAndreas Gruenbacher 		if (resync_after == -1)
1658b411b363SPhilipp Reisner 			return NO_ERROR;
1659b411b363SPhilipp Reisner 
1660b411b363SPhilipp Reisner 		/* follow the dependency chain */
1661b30ab791SAndreas Gruenbacher 		odev = minor_to_device(resync_after);
1662b411b363SPhilipp Reisner 	}
1663b411b363SPhilipp Reisner }
1664b411b363SPhilipp Reisner 
166528bc3b8cSAndreas Gruenbacher /* caller must lock_all_resources() */
drbd_resync_after_changed(struct drbd_device * device)1666b30ab791SAndreas Gruenbacher void drbd_resync_after_changed(struct drbd_device *device)
1667b411b363SPhilipp Reisner {
166828bc3b8cSAndreas Gruenbacher 	int changed;
1669b411b363SPhilipp Reisner 
1670b411b363SPhilipp Reisner 	do {
167128bc3b8cSAndreas Gruenbacher 		changed  = drbd_pause_after(device);
167228bc3b8cSAndreas Gruenbacher 		changed |= drbd_resume_next(device);
167328bc3b8cSAndreas Gruenbacher 	} while (changed);
1674b411b363SPhilipp Reisner }
1675b411b363SPhilipp Reisner 
drbd_rs_controller_reset(struct drbd_peer_device * peer_device)1676*0d11f3cfSChristoph Böhmwalder void drbd_rs_controller_reset(struct drbd_peer_device *peer_device)
16779bd28d3cSLars Ellenberg {
1678*0d11f3cfSChristoph Böhmwalder 	struct drbd_device *device = peer_device->device;
16798c40c7c4SChristoph Hellwig 	struct gendisk *disk = device->ldev->backing_bdev->bd_disk;
1680813472ceSPhilipp Reisner 	struct fifo_buffer *plan;
1681813472ceSPhilipp Reisner 
1682b30ab791SAndreas Gruenbacher 	atomic_set(&device->rs_sect_in, 0);
1683b30ab791SAndreas Gruenbacher 	atomic_set(&device->rs_sect_ev, 0);
1684b30ab791SAndreas Gruenbacher 	device->rs_in_flight = 0;
1685cb8432d6SChristoph Hellwig 	device->rs_last_events =
16868446fe92SChristoph Hellwig 		(int)part_stat_read_accum(disk->part0, sectors);
1687813472ceSPhilipp Reisner 
1688813472ceSPhilipp Reisner 	/* Updating the RCU protected object in place is necessary since
1689813472ceSPhilipp Reisner 	   this function gets called from atomic context.
1690813472ceSPhilipp Reisner 	   It is valid since all other updates also lead to an completely
1691813472ceSPhilipp Reisner 	   empty fifo */
1692813472ceSPhilipp Reisner 	rcu_read_lock();
1693b30ab791SAndreas Gruenbacher 	plan = rcu_dereference(device->rs_plan_s);
1694813472ceSPhilipp Reisner 	plan->total = 0;
1695813472ceSPhilipp Reisner 	fifo_set(plan, 0);
1696813472ceSPhilipp Reisner 	rcu_read_unlock();
16979bd28d3cSLars Ellenberg }
16989bd28d3cSLars Ellenberg 
start_resync_timer_fn(struct timer_list * t)16992bccef39SKees Cook void start_resync_timer_fn(struct timer_list *t)
17001f04af33SPhilipp Reisner {
17012bccef39SKees Cook 	struct drbd_device *device = from_timer(device, t, start_resync_timer);
1702ac0acb9eSLars Ellenberg 	drbd_device_post_work(device, RS_START);
17031f04af33SPhilipp Reisner }
17041f04af33SPhilipp Reisner 
do_start_resync(struct drbd_device * device)1705ac0acb9eSLars Ellenberg static void do_start_resync(struct drbd_device *device)
17061f04af33SPhilipp Reisner {
1707b30ab791SAndreas Gruenbacher 	if (atomic_read(&device->unacked_cnt) || atomic_read(&device->rs_pending_cnt)) {
1708ac0acb9eSLars Ellenberg 		drbd_warn(device, "postponing start_resync ...\n");
1709b30ab791SAndreas Gruenbacher 		device->start_resync_timer.expires = jiffies + HZ/10;
1710b30ab791SAndreas Gruenbacher 		add_timer(&device->start_resync_timer);
1711ac0acb9eSLars Ellenberg 		return;
17121f04af33SPhilipp Reisner 	}
17131f04af33SPhilipp Reisner 
1714b30ab791SAndreas Gruenbacher 	drbd_start_resync(device, C_SYNC_SOURCE);
1715b30ab791SAndreas Gruenbacher 	clear_bit(AHEAD_TO_SYNC_SOURCE, &device->flags);
17161f04af33SPhilipp Reisner }
17171f04af33SPhilipp Reisner 
use_checksum_based_resync(struct drbd_connection * connection,struct drbd_device * device)1718aaaba345SLars Ellenberg static bool use_checksum_based_resync(struct drbd_connection *connection, struct drbd_device *device)
1719aaaba345SLars Ellenberg {
1720aaaba345SLars Ellenberg 	bool csums_after_crash_only;
1721aaaba345SLars Ellenberg 	rcu_read_lock();
1722aaaba345SLars Ellenberg 	csums_after_crash_only = rcu_dereference(connection->net_conf)->csums_after_crash_only;
1723aaaba345SLars Ellenberg 	rcu_read_unlock();
1724aaaba345SLars Ellenberg 	return connection->agreed_pro_version >= 89 &&		/* supported? */
1725aaaba345SLars Ellenberg 		connection->csums_tfm &&			/* configured? */
17267e5fec31SFabian Frederick 		(csums_after_crash_only == false		/* use for each resync? */
1727aaaba345SLars Ellenberg 		 || test_bit(CRASHED_PRIMARY, &device->flags));	/* or only after Primary crash? */
1728aaaba345SLars Ellenberg }
1729aaaba345SLars Ellenberg 
1730b411b363SPhilipp Reisner /**
1731b411b363SPhilipp Reisner  * drbd_start_resync() - Start the resync process
1732b30ab791SAndreas Gruenbacher  * @device:	DRBD device.
1733b411b363SPhilipp Reisner  * @side:	Either C_SYNC_SOURCE or C_SYNC_TARGET
1734b411b363SPhilipp Reisner  *
1735b411b363SPhilipp Reisner  * This function might bring you directly into one of the
1736b411b363SPhilipp Reisner  * C_PAUSED_SYNC_* states.
1737b411b363SPhilipp Reisner  */
drbd_start_resync(struct drbd_device * device,enum drbd_conns side)1738b30ab791SAndreas Gruenbacher void drbd_start_resync(struct drbd_device *device, enum drbd_conns side)
1739b411b363SPhilipp Reisner {
174044a4d551SLars Ellenberg 	struct drbd_peer_device *peer_device = first_peer_device(device);
174144a4d551SLars Ellenberg 	struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
1742b411b363SPhilipp Reisner 	union drbd_state ns;
1743b411b363SPhilipp Reisner 	int r;
1744b411b363SPhilipp Reisner 
1745b30ab791SAndreas Gruenbacher 	if (device->state.conn >= C_SYNC_SOURCE && device->state.conn < C_AHEAD) {
1746d0180171SAndreas Gruenbacher 		drbd_err(device, "Resync already running!\n");
1747b411b363SPhilipp Reisner 		return;
1748b411b363SPhilipp Reisner 	}
1749b411b363SPhilipp Reisner 
1750d3d2948fSRoland Kammerer 	if (!connection) {
1751d3d2948fSRoland Kammerer 		drbd_err(device, "No connection to peer, aborting!\n");
1752d3d2948fSRoland Kammerer 		return;
1753d3d2948fSRoland Kammerer 	}
1754d3d2948fSRoland Kammerer 
1755b30ab791SAndreas Gruenbacher 	if (!test_bit(B_RS_H_DONE, &device->flags)) {
1756b411b363SPhilipp Reisner 		if (side == C_SYNC_TARGET) {
1757b411b363SPhilipp Reisner 			/* Since application IO was locked out during C_WF_BITMAP_T and
1758b411b363SPhilipp Reisner 			   C_WF_SYNC_UUID we are still unmodified. Before going to C_SYNC_TARGET
1759b411b363SPhilipp Reisner 			   we check that we might make the data inconsistent. */
1760b30ab791SAndreas Gruenbacher 			r = drbd_khelper(device, "before-resync-target");
1761b411b363SPhilipp Reisner 			r = (r >> 8) & 0xff;
1762b411b363SPhilipp Reisner 			if (r > 0) {
1763d0180171SAndreas Gruenbacher 				drbd_info(device, "before-resync-target handler returned %d, "
1764b411b363SPhilipp Reisner 					 "dropping connection.\n", r);
176544a4d551SLars Ellenberg 				conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
1766b411b363SPhilipp Reisner 				return;
1767b411b363SPhilipp Reisner 			}
176809b9e797SPhilipp Reisner 		} else /* C_SYNC_SOURCE */ {
1769b30ab791SAndreas Gruenbacher 			r = drbd_khelper(device, "before-resync-source");
177009b9e797SPhilipp Reisner 			r = (r >> 8) & 0xff;
177109b9e797SPhilipp Reisner 			if (r > 0) {
177209b9e797SPhilipp Reisner 				if (r == 3) {
1773d0180171SAndreas Gruenbacher 					drbd_info(device, "before-resync-source handler returned %d, "
177409b9e797SPhilipp Reisner 						 "ignoring. Old userland tools?", r);
177509b9e797SPhilipp Reisner 				} else {
1776d0180171SAndreas Gruenbacher 					drbd_info(device, "before-resync-source handler returned %d, "
177709b9e797SPhilipp Reisner 						 "dropping connection.\n", r);
177844a4d551SLars Ellenberg 					conn_request_state(connection,
1779a6b32bc3SAndreas Gruenbacher 							   NS(conn, C_DISCONNECTING), CS_HARD);
178009b9e797SPhilipp Reisner 					return;
178109b9e797SPhilipp Reisner 				}
178209b9e797SPhilipp Reisner 			}
1783b411b363SPhilipp Reisner 		}
1784e64a3294SPhilipp Reisner 	}
1785b411b363SPhilipp Reisner 
178644a4d551SLars Ellenberg 	if (current == connection->worker.task) {
1787dad20554SPhilipp Reisner 		/* The worker should not sleep waiting for state_mutex,
1788e64a3294SPhilipp Reisner 		   that can take long */
1789b30ab791SAndreas Gruenbacher 		if (!mutex_trylock(device->state_mutex)) {
1790b30ab791SAndreas Gruenbacher 			set_bit(B_RS_H_DONE, &device->flags);
1791b30ab791SAndreas Gruenbacher 			device->start_resync_timer.expires = jiffies + HZ/5;
1792b30ab791SAndreas Gruenbacher 			add_timer(&device->start_resync_timer);
1793e64a3294SPhilipp Reisner 			return;
1794e64a3294SPhilipp Reisner 		}
1795e64a3294SPhilipp Reisner 	} else {
1796b30ab791SAndreas Gruenbacher 		mutex_lock(device->state_mutex);
1797e64a3294SPhilipp Reisner 	}
1798b411b363SPhilipp Reisner 
179928bc3b8cSAndreas Gruenbacher 	lock_all_resources();
180028bc3b8cSAndreas Gruenbacher 	clear_bit(B_RS_H_DONE, &device->flags);
1801a700471bSPhilipp Reisner 	/* Did some connection breakage or IO error race with us? */
1802b30ab791SAndreas Gruenbacher 	if (device->state.conn < C_CONNECTED
1803b30ab791SAndreas Gruenbacher 	|| !get_ldev_if_state(device, D_NEGOTIATING)) {
180428bc3b8cSAndreas Gruenbacher 		unlock_all_resources();
180528bc3b8cSAndreas Gruenbacher 		goto out;
1806b411b363SPhilipp Reisner 	}
1807b411b363SPhilipp Reisner 
1808b30ab791SAndreas Gruenbacher 	ns = drbd_read_state(device);
1809b411b363SPhilipp Reisner 
1810b30ab791SAndreas Gruenbacher 	ns.aftr_isp = !_drbd_may_sync_now(device);
1811b411b363SPhilipp Reisner 
1812b411b363SPhilipp Reisner 	ns.conn = side;
1813b411b363SPhilipp Reisner 
1814b411b363SPhilipp Reisner 	if (side == C_SYNC_TARGET)
1815b411b363SPhilipp Reisner 		ns.disk = D_INCONSISTENT;
1816b411b363SPhilipp Reisner 	else /* side == C_SYNC_SOURCE */
1817b411b363SPhilipp Reisner 		ns.pdsk = D_INCONSISTENT;
1818b411b363SPhilipp Reisner 
181928bc3b8cSAndreas Gruenbacher 	r = _drbd_set_state(device, ns, CS_VERBOSE, NULL);
1820b30ab791SAndreas Gruenbacher 	ns = drbd_read_state(device);
1821b411b363SPhilipp Reisner 
1822b411b363SPhilipp Reisner 	if (ns.conn < C_CONNECTED)
1823b411b363SPhilipp Reisner 		r = SS_UNKNOWN_ERROR;
1824b411b363SPhilipp Reisner 
1825b411b363SPhilipp Reisner 	if (r == SS_SUCCESS) {
1826b30ab791SAndreas Gruenbacher 		unsigned long tw = drbd_bm_total_weight(device);
18271d7734a0SLars Ellenberg 		unsigned long now = jiffies;
18281d7734a0SLars Ellenberg 		int i;
18291d7734a0SLars Ellenberg 
1830b30ab791SAndreas Gruenbacher 		device->rs_failed    = 0;
1831b30ab791SAndreas Gruenbacher 		device->rs_paused    = 0;
1832b30ab791SAndreas Gruenbacher 		device->rs_same_csum = 0;
1833b30ab791SAndreas Gruenbacher 		device->rs_last_sect_ev = 0;
1834b30ab791SAndreas Gruenbacher 		device->rs_total     = tw;
1835b30ab791SAndreas Gruenbacher 		device->rs_start     = now;
18361d7734a0SLars Ellenberg 		for (i = 0; i < DRBD_SYNC_MARKS; i++) {
1837b30ab791SAndreas Gruenbacher 			device->rs_mark_left[i] = tw;
1838b30ab791SAndreas Gruenbacher 			device->rs_mark_time[i] = now;
18391d7734a0SLars Ellenberg 		}
184028bc3b8cSAndreas Gruenbacher 		drbd_pause_after(device);
18415ab7d2c0SLars Ellenberg 		/* Forget potentially stale cached per resync extent bit-counts.
18425ab7d2c0SLars Ellenberg 		 * Open coded drbd_rs_cancel_all(device), we already have IRQs
18435ab7d2c0SLars Ellenberg 		 * disabled, and know the disk state is ok. */
18445ab7d2c0SLars Ellenberg 		spin_lock(&device->al_lock);
18455ab7d2c0SLars Ellenberg 		lc_reset(device->resync);
18465ab7d2c0SLars Ellenberg 		device->resync_locked = 0;
18475ab7d2c0SLars Ellenberg 		device->resync_wenr = LC_FREE;
18485ab7d2c0SLars Ellenberg 		spin_unlock(&device->al_lock);
1849b411b363SPhilipp Reisner 	}
185028bc3b8cSAndreas Gruenbacher 	unlock_all_resources();
18515a22db89SLars Ellenberg 
18526c922ed5SLars Ellenberg 	if (r == SS_SUCCESS) {
18535ab7d2c0SLars Ellenberg 		wake_up(&device->al_wait); /* for lc_reset() above */
1854328e0f12SPhilipp Reisner 		/* reset rs_last_bcast when a resync or verify is started,
1855328e0f12SPhilipp Reisner 		 * to deal with potential jiffies wrap. */
1856b30ab791SAndreas Gruenbacher 		device->rs_last_bcast = jiffies - HZ;
1857328e0f12SPhilipp Reisner 
1858d0180171SAndreas Gruenbacher 		drbd_info(device, "Began resync as %s (will sync %lu KB [%lu bits set]).\n",
18596c922ed5SLars Ellenberg 		     drbd_conn_str(ns.conn),
1860b30ab791SAndreas Gruenbacher 		     (unsigned long) device->rs_total << (BM_BLOCK_SHIFT-10),
1861b30ab791SAndreas Gruenbacher 		     (unsigned long) device->rs_total);
1862aaaba345SLars Ellenberg 		if (side == C_SYNC_TARGET) {
1863b30ab791SAndreas Gruenbacher 			device->bm_resync_fo = 0;
1864aaaba345SLars Ellenberg 			device->use_csums = use_checksum_based_resync(connection, device);
1865aaaba345SLars Ellenberg 		} else {
18667e5fec31SFabian Frederick 			device->use_csums = false;
1867aaaba345SLars Ellenberg 		}
18685a22db89SLars Ellenberg 
18695a22db89SLars Ellenberg 		/* Since protocol 96, we must serialize drbd_gen_and_send_sync_uuid
18705a22db89SLars Ellenberg 		 * with w_send_oos, or the sync target will get confused as to
18715a22db89SLars Ellenberg 		 * how much bits to resync.  We cannot do that always, because for an
18725a22db89SLars Ellenberg 		 * empty resync and protocol < 95, we need to do it here, as we call
18735a22db89SLars Ellenberg 		 * drbd_resync_finished from here in that case.
18745a22db89SLars Ellenberg 		 * We drbd_gen_and_send_sync_uuid here for protocol < 96,
18755a22db89SLars Ellenberg 		 * and from after_state_ch otherwise. */
187644a4d551SLars Ellenberg 		if (side == C_SYNC_SOURCE && connection->agreed_pro_version < 96)
187744a4d551SLars Ellenberg 			drbd_gen_and_send_sync_uuid(peer_device);
1878b411b363SPhilipp Reisner 
187944a4d551SLars Ellenberg 		if (connection->agreed_pro_version < 95 && device->rs_total == 0) {
1880af85e8e8SLars Ellenberg 			/* This still has a race (about when exactly the peers
1881af85e8e8SLars Ellenberg 			 * detect connection loss) that can lead to a full sync
1882af85e8e8SLars Ellenberg 			 * on next handshake. In 8.3.9 we fixed this with explicit
1883af85e8e8SLars Ellenberg 			 * resync-finished notifications, but the fix
1884af85e8e8SLars Ellenberg 			 * introduces a protocol change.  Sleeping for some
1885af85e8e8SLars Ellenberg 			 * time longer than the ping interval + timeout on the
1886af85e8e8SLars Ellenberg 			 * SyncSource, to give the SyncTarget the chance to
1887af85e8e8SLars Ellenberg 			 * detect connection loss, then waiting for a ping
1888af85e8e8SLars Ellenberg 			 * response (implicit in drbd_resync_finished) reduces
1889af85e8e8SLars Ellenberg 			 * the race considerably, but does not solve it. */
189044ed167dSPhilipp Reisner 			if (side == C_SYNC_SOURCE) {
189144ed167dSPhilipp Reisner 				struct net_conf *nc;
189244ed167dSPhilipp Reisner 				int timeo;
189344ed167dSPhilipp Reisner 
189444ed167dSPhilipp Reisner 				rcu_read_lock();
189544a4d551SLars Ellenberg 				nc = rcu_dereference(connection->net_conf);
189644ed167dSPhilipp Reisner 				timeo = nc->ping_int * HZ + nc->ping_timeo * HZ / 9;
189744ed167dSPhilipp Reisner 				rcu_read_unlock();
189844ed167dSPhilipp Reisner 				schedule_timeout_interruptible(timeo);
189944ed167dSPhilipp Reisner 			}
1900*0d11f3cfSChristoph Böhmwalder 			drbd_resync_finished(peer_device);
1901b411b363SPhilipp Reisner 		}
1902b411b363SPhilipp Reisner 
1903*0d11f3cfSChristoph Böhmwalder 		drbd_rs_controller_reset(peer_device);
1904b30ab791SAndreas Gruenbacher 		/* ns.conn may already be != device->state.conn,
1905b411b363SPhilipp Reisner 		 * we may have been paused in between, or become paused until
1906b411b363SPhilipp Reisner 		 * the timer triggers.
1907b411b363SPhilipp Reisner 		 * No matter, that is handled in resync_timer_fn() */
1908b411b363SPhilipp Reisner 		if (ns.conn == C_SYNC_TARGET)
1909b30ab791SAndreas Gruenbacher 			mod_timer(&device->resync_timer, jiffies);
1910b411b363SPhilipp Reisner 
1911b30ab791SAndreas Gruenbacher 		drbd_md_sync(device);
1912b411b363SPhilipp Reisner 	}
1913b30ab791SAndreas Gruenbacher 	put_ldev(device);
191428bc3b8cSAndreas Gruenbacher out:
1915b30ab791SAndreas Gruenbacher 	mutex_unlock(device->state_mutex);
1916b411b363SPhilipp Reisner }
1917b411b363SPhilipp Reisner 
update_on_disk_bitmap(struct drbd_peer_device * peer_device,bool resync_done)1918*0d11f3cfSChristoph Böhmwalder static void update_on_disk_bitmap(struct drbd_peer_device *peer_device, bool resync_done)
1919c7a58db4SLars Ellenberg {
1920*0d11f3cfSChristoph Böhmwalder 	struct drbd_device *device = peer_device->device;
1921c7a58db4SLars Ellenberg 	struct sib_info sib = { .sib_reason = SIB_SYNC_PROGRESS, };
1922c7a58db4SLars Ellenberg 	device->rs_last_bcast = jiffies;
1923c7a58db4SLars Ellenberg 
1924c7a58db4SLars Ellenberg 	if (!get_ldev(device))
1925c7a58db4SLars Ellenberg 		return;
1926c7a58db4SLars Ellenberg 
1927c7a58db4SLars Ellenberg 	drbd_bm_write_lazy(device, 0);
19285ab7d2c0SLars Ellenberg 	if (resync_done && is_sync_state(device->state.conn))
1929*0d11f3cfSChristoph Böhmwalder 		drbd_resync_finished(peer_device);
19305ab7d2c0SLars Ellenberg 
1931c7a58db4SLars Ellenberg 	drbd_bcast_event(device, &sib);
1932c7a58db4SLars Ellenberg 	/* update timestamp, in case it took a while to write out stuff */
1933c7a58db4SLars Ellenberg 	device->rs_last_bcast = jiffies;
1934c7a58db4SLars Ellenberg 	put_ldev(device);
1935c7a58db4SLars Ellenberg }
1936c7a58db4SLars Ellenberg 
drbd_ldev_destroy(struct drbd_device * device)1937e334f550SLars Ellenberg static void drbd_ldev_destroy(struct drbd_device *device)
1938e334f550SLars Ellenberg {
1939e334f550SLars Ellenberg 	lc_destroy(device->resync);
1940e334f550SLars Ellenberg 	device->resync = NULL;
1941e334f550SLars Ellenberg 	lc_destroy(device->act_log);
1942e334f550SLars Ellenberg 	device->act_log = NULL;
1943d1b80853SAndreas Gruenbacher 
1944d1b80853SAndreas Gruenbacher 	__acquire(local);
194563a7c8adSLars Ellenberg 	drbd_backing_dev_free(device, device->ldev);
1946d1b80853SAndreas Gruenbacher 	device->ldev = NULL;
1947d1b80853SAndreas Gruenbacher 	__release(local);
1948d1b80853SAndreas Gruenbacher 
1949e334f550SLars Ellenberg 	clear_bit(GOING_DISKLESS, &device->flags);
1950e334f550SLars Ellenberg 	wake_up(&device->misc_wait);
1951e334f550SLars Ellenberg }
1952e334f550SLars Ellenberg 
go_diskless(struct drbd_device * device)1953e334f550SLars Ellenberg static void go_diskless(struct drbd_device *device)
1954e334f550SLars Ellenberg {
19558164dd6cSAndreas Gruenbacher 	struct drbd_peer_device *peer_device = first_peer_device(device);
1956e334f550SLars Ellenberg 	D_ASSERT(device, device->state.disk == D_FAILED);
1957e334f550SLars Ellenberg 	/* we cannot assert local_cnt == 0 here, as get_ldev_if_state will
1958e334f550SLars Ellenberg 	 * inc/dec it frequently. Once we are D_DISKLESS, no one will touch
1959e334f550SLars Ellenberg 	 * the protected members anymore, though, so once put_ldev reaches zero
1960e334f550SLars Ellenberg 	 * again, it will be safe to free them. */
1961e334f550SLars Ellenberg 
1962e334f550SLars Ellenberg 	/* Try to write changed bitmap pages, read errors may have just
1963e334f550SLars Ellenberg 	 * set some bits outside the area covered by the activity log.
1964e334f550SLars Ellenberg 	 *
1965e334f550SLars Ellenberg 	 * If we have an IO error during the bitmap writeout,
1966e334f550SLars Ellenberg 	 * we will want a full sync next time, just in case.
1967e334f550SLars Ellenberg 	 * (Do we want a specific meta data flag for this?)
1968e334f550SLars Ellenberg 	 *
1969e334f550SLars Ellenberg 	 * If that does not make it to stable storage either,
1970e334f550SLars Ellenberg 	 * we cannot do anything about that anymore.
1971e334f550SLars Ellenberg 	 *
1972e334f550SLars Ellenberg 	 * We still need to check if both bitmap and ldev are present, we may
1973e334f550SLars Ellenberg 	 * end up here after a failed attach, before ldev was even assigned.
1974e334f550SLars Ellenberg 	 */
1975e334f550SLars Ellenberg 	if (device->bitmap && device->ldev) {
1976e334f550SLars Ellenberg 		/* An interrupted resync or similar is allowed to recounts bits
1977e334f550SLars Ellenberg 		 * while we detach.
1978e334f550SLars Ellenberg 		 * Any modifications would not be expected anymore, though.
1979e334f550SLars Ellenberg 		 */
1980e334f550SLars Ellenberg 		if (drbd_bitmap_io_from_worker(device, drbd_bm_write,
19818164dd6cSAndreas Gruenbacher 					"detach", BM_LOCKED_TEST_ALLOWED, peer_device)) {
1982e334f550SLars Ellenberg 			if (test_bit(WAS_READ_ERROR, &device->flags)) {
1983e334f550SLars Ellenberg 				drbd_md_set_flag(device, MDF_FULL_SYNC);
1984e334f550SLars Ellenberg 				drbd_md_sync(device);
1985e334f550SLars Ellenberg 			}
1986e334f550SLars Ellenberg 		}
1987e334f550SLars Ellenberg 	}
1988e334f550SLars Ellenberg 
1989e334f550SLars Ellenberg 	drbd_force_state(device, NS(disk, D_DISKLESS));
1990e334f550SLars Ellenberg }
1991e334f550SLars Ellenberg 
do_md_sync(struct drbd_device * device)1992ac0acb9eSLars Ellenberg static int do_md_sync(struct drbd_device *device)
1993ac0acb9eSLars Ellenberg {
1994ac0acb9eSLars Ellenberg 	drbd_warn(device, "md_sync_timer expired! Worker calls drbd_md_sync().\n");
1995ac0acb9eSLars Ellenberg 	drbd_md_sync(device);
1996ac0acb9eSLars Ellenberg 	return 0;
1997ac0acb9eSLars Ellenberg }
1998ac0acb9eSLars Ellenberg 
1999944410e9SLars Ellenberg /* only called from drbd_worker thread, no locking */
__update_timing_details(struct drbd_thread_timing_details * tdp,unsigned int * cb_nr,void * cb,const char * fn,const unsigned int line)2000944410e9SLars Ellenberg void __update_timing_details(
2001944410e9SLars Ellenberg 		struct drbd_thread_timing_details *tdp,
2002944410e9SLars Ellenberg 		unsigned int *cb_nr,
2003944410e9SLars Ellenberg 		void *cb,
2004944410e9SLars Ellenberg 		const char *fn, const unsigned int line)
2005944410e9SLars Ellenberg {
2006944410e9SLars Ellenberg 	unsigned int i = *cb_nr % DRBD_THREAD_DETAILS_HIST;
2007944410e9SLars Ellenberg 	struct drbd_thread_timing_details *td = tdp + i;
2008944410e9SLars Ellenberg 
2009944410e9SLars Ellenberg 	td->start_jif = jiffies;
2010944410e9SLars Ellenberg 	td->cb_addr = cb;
2011944410e9SLars Ellenberg 	td->caller_fn = fn;
2012944410e9SLars Ellenberg 	td->line = line;
2013944410e9SLars Ellenberg 	td->cb_nr = *cb_nr;
2014944410e9SLars Ellenberg 
2015944410e9SLars Ellenberg 	i = (i+1) % DRBD_THREAD_DETAILS_HIST;
2016944410e9SLars Ellenberg 	td = tdp + i;
2017944410e9SLars Ellenberg 	memset(td, 0, sizeof(*td));
2018944410e9SLars Ellenberg 
2019944410e9SLars Ellenberg 	++(*cb_nr);
2020944410e9SLars Ellenberg }
2021944410e9SLars Ellenberg 
do_device_work(struct drbd_device * device,const unsigned long todo)2022e334f550SLars Ellenberg static void do_device_work(struct drbd_device *device, const unsigned long todo)
2023e334f550SLars Ellenberg {
2024b47a06d1SAndreas Gruenbacher 	if (test_bit(MD_SYNC, &todo))
2025ac0acb9eSLars Ellenberg 		do_md_sync(device);
2026b47a06d1SAndreas Gruenbacher 	if (test_bit(RS_DONE, &todo) ||
2027b47a06d1SAndreas Gruenbacher 	    test_bit(RS_PROGRESS, &todo))
2028*0d11f3cfSChristoph Böhmwalder 		update_on_disk_bitmap(first_peer_device(device), test_bit(RS_DONE, &todo));
2029b47a06d1SAndreas Gruenbacher 	if (test_bit(GO_DISKLESS, &todo))
2030e334f550SLars Ellenberg 		go_diskless(device);
2031b47a06d1SAndreas Gruenbacher 	if (test_bit(DESTROY_DISK, &todo))
2032e334f550SLars Ellenberg 		drbd_ldev_destroy(device);
2033b47a06d1SAndreas Gruenbacher 	if (test_bit(RS_START, &todo))
2034ac0acb9eSLars Ellenberg 		do_start_resync(device);
2035e334f550SLars Ellenberg }
2036e334f550SLars Ellenberg 
2037e334f550SLars Ellenberg #define DRBD_DEVICE_WORK_MASK	\
2038e334f550SLars Ellenberg 	((1UL << GO_DISKLESS)	\
2039e334f550SLars Ellenberg 	|(1UL << DESTROY_DISK)	\
2040ac0acb9eSLars Ellenberg 	|(1UL << MD_SYNC)	\
2041ac0acb9eSLars Ellenberg 	|(1UL << RS_START)	\
2042e334f550SLars Ellenberg 	|(1UL << RS_PROGRESS)	\
2043e334f550SLars Ellenberg 	|(1UL << RS_DONE)	\
2044e334f550SLars Ellenberg 	)
2045e334f550SLars Ellenberg 
get_work_bits(unsigned long * flags)2046e334f550SLars Ellenberg static unsigned long get_work_bits(unsigned long *flags)
2047e334f550SLars Ellenberg {
2048e334f550SLars Ellenberg 	unsigned long old, new;
2049e334f550SLars Ellenberg 	do {
2050e334f550SLars Ellenberg 		old = *flags;
2051e334f550SLars Ellenberg 		new = old & ~DRBD_DEVICE_WORK_MASK;
2052e334f550SLars Ellenberg 	} while (cmpxchg(flags, old, new) != old);
2053e334f550SLars Ellenberg 	return old & DRBD_DEVICE_WORK_MASK;
2054e334f550SLars Ellenberg }
2055e334f550SLars Ellenberg 
do_unqueued_work(struct drbd_connection * connection)2056e334f550SLars Ellenberg static void do_unqueued_work(struct drbd_connection *connection)
2057c7a58db4SLars Ellenberg {
2058c7a58db4SLars Ellenberg 	struct drbd_peer_device *peer_device;
2059c7a58db4SLars Ellenberg 	int vnr;
2060c7a58db4SLars Ellenberg 
2061c7a58db4SLars Ellenberg 	rcu_read_lock();
2062c7a58db4SLars Ellenberg 	idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
2063c7a58db4SLars Ellenberg 		struct drbd_device *device = peer_device->device;
2064e334f550SLars Ellenberg 		unsigned long todo = get_work_bits(&device->flags);
2065e334f550SLars Ellenberg 		if (!todo)
2066c7a58db4SLars Ellenberg 			continue;
20675ab7d2c0SLars Ellenberg 
2068c7a58db4SLars Ellenberg 		kref_get(&device->kref);
2069c7a58db4SLars Ellenberg 		rcu_read_unlock();
2070e334f550SLars Ellenberg 		do_device_work(device, todo);
2071c7a58db4SLars Ellenberg 		kref_put(&device->kref, drbd_destroy_device);
2072c7a58db4SLars Ellenberg 		rcu_read_lock();
2073c7a58db4SLars Ellenberg 	}
2074c7a58db4SLars Ellenberg 	rcu_read_unlock();
2075c7a58db4SLars Ellenberg }
2076c7a58db4SLars Ellenberg 
dequeue_work_batch(struct drbd_work_queue * queue,struct list_head * work_list)2077a186e478SRashika Kheria static bool dequeue_work_batch(struct drbd_work_queue *queue, struct list_head *work_list)
20788c0785a5SLars Ellenberg {
20798c0785a5SLars Ellenberg 	spin_lock_irq(&queue->q_lock);
208015e26f6aSLars Ellenberg 	list_splice_tail_init(&queue->q, work_list);
20818c0785a5SLars Ellenberg 	spin_unlock_irq(&queue->q_lock);
20828c0785a5SLars Ellenberg 	return !list_empty(work_list);
20838c0785a5SLars Ellenberg }
20848c0785a5SLars Ellenberg 
wait_for_work(struct drbd_connection * connection,struct list_head * work_list)2085bde89a9eSAndreas Gruenbacher static void wait_for_work(struct drbd_connection *connection, struct list_head *work_list)
2086b6dd1a89SLars Ellenberg {
2087b6dd1a89SLars Ellenberg 	DEFINE_WAIT(wait);
2088b6dd1a89SLars Ellenberg 	struct net_conf *nc;
2089b6dd1a89SLars Ellenberg 	int uncork, cork;
2090b6dd1a89SLars Ellenberg 
2091abde9cc6SLars Ellenberg 	dequeue_work_batch(&connection->sender_work, work_list);
2092b6dd1a89SLars Ellenberg 	if (!list_empty(work_list))
2093b6dd1a89SLars Ellenberg 		return;
2094b6dd1a89SLars Ellenberg 
2095b6dd1a89SLars Ellenberg 	/* Still nothing to do?
2096b6dd1a89SLars Ellenberg 	 * Maybe we still need to close the current epoch,
2097b6dd1a89SLars Ellenberg 	 * even if no new requests are queued yet.
2098b6dd1a89SLars Ellenberg 	 *
2099b6dd1a89SLars Ellenberg 	 * Also, poke TCP, just in case.
2100b6dd1a89SLars Ellenberg 	 * Then wait for new work (or signal). */
2101b6dd1a89SLars Ellenberg 	rcu_read_lock();
2102b6dd1a89SLars Ellenberg 	nc = rcu_dereference(connection->net_conf);
2103b6dd1a89SLars Ellenberg 	uncork = nc ? nc->tcp_cork : 0;
2104b6dd1a89SLars Ellenberg 	rcu_read_unlock();
2105b6dd1a89SLars Ellenberg 	if (uncork) {
2106b6dd1a89SLars Ellenberg 		mutex_lock(&connection->data.mutex);
2107b6dd1a89SLars Ellenberg 		if (connection->data.socket)
2108db10538aSChristoph Hellwig 			tcp_sock_set_cork(connection->data.socket->sk, false);
2109b6dd1a89SLars Ellenberg 		mutex_unlock(&connection->data.mutex);
2110b6dd1a89SLars Ellenberg 	}
2111b6dd1a89SLars Ellenberg 
2112b6dd1a89SLars Ellenberg 	for (;;) {
2113b6dd1a89SLars Ellenberg 		int send_barrier;
2114b6dd1a89SLars Ellenberg 		prepare_to_wait(&connection->sender_work.q_wait, &wait, TASK_INTERRUPTIBLE);
21150500813fSAndreas Gruenbacher 		spin_lock_irq(&connection->resource->req_lock);
2116b6dd1a89SLars Ellenberg 		spin_lock(&connection->sender_work.q_lock);	/* FIXME get rid of this one? */
2117bc317a9eSLars Ellenberg 		if (!list_empty(&connection->sender_work.q))
21184dd726f0SLars Ellenberg 			list_splice_tail_init(&connection->sender_work.q, work_list);
2119b6dd1a89SLars Ellenberg 		spin_unlock(&connection->sender_work.q_lock);	/* FIXME get rid of this one? */
2120b6dd1a89SLars Ellenberg 		if (!list_empty(work_list) || signal_pending(current)) {
21210500813fSAndreas Gruenbacher 			spin_unlock_irq(&connection->resource->req_lock);
2122b6dd1a89SLars Ellenberg 			break;
2123b6dd1a89SLars Ellenberg 		}
2124f9c78128SLars Ellenberg 
2125f9c78128SLars Ellenberg 		/* We found nothing new to do, no to-be-communicated request,
2126f9c78128SLars Ellenberg 		 * no other work item.  We may still need to close the last
2127f9c78128SLars Ellenberg 		 * epoch.  Next incoming request epoch will be connection ->
2128f9c78128SLars Ellenberg 		 * current transfer log epoch number.  If that is different
2129f9c78128SLars Ellenberg 		 * from the epoch of the last request we communicated, it is
2130f9c78128SLars Ellenberg 		 * safe to send the epoch separating barrier now.
2131f9c78128SLars Ellenberg 		 */
2132f9c78128SLars Ellenberg 		send_barrier =
2133f9c78128SLars Ellenberg 			atomic_read(&connection->current_tle_nr) !=
2134f9c78128SLars Ellenberg 			connection->send.current_epoch_nr;
21350500813fSAndreas Gruenbacher 		spin_unlock_irq(&connection->resource->req_lock);
2136f9c78128SLars Ellenberg 
2137f9c78128SLars Ellenberg 		if (send_barrier)
2138f9c78128SLars Ellenberg 			maybe_send_barrier(connection,
2139f9c78128SLars Ellenberg 					connection->send.current_epoch_nr + 1);
21405ab7d2c0SLars Ellenberg 
2141e334f550SLars Ellenberg 		if (test_bit(DEVICE_WORK_PENDING, &connection->flags))
21425ab7d2c0SLars Ellenberg 			break;
21435ab7d2c0SLars Ellenberg 
2144a80ca1aeSLars Ellenberg 		/* drbd_send() may have called flush_signals() */
2145a80ca1aeSLars Ellenberg 		if (get_t_state(&connection->worker) != RUNNING)
2146a80ca1aeSLars Ellenberg 			break;
21475ab7d2c0SLars Ellenberg 
2148b6dd1a89SLars Ellenberg 		schedule();
2149b6dd1a89SLars Ellenberg 		/* may be woken up for other things but new work, too,
2150b6dd1a89SLars Ellenberg 		 * e.g. if the current epoch got closed.
2151b6dd1a89SLars Ellenberg 		 * In which case we send the barrier above. */
2152b6dd1a89SLars Ellenberg 	}
2153b6dd1a89SLars Ellenberg 	finish_wait(&connection->sender_work.q_wait, &wait);
2154b6dd1a89SLars Ellenberg 
2155b6dd1a89SLars Ellenberg 	/* someone may have changed the config while we have been waiting above. */
2156b6dd1a89SLars Ellenberg 	rcu_read_lock();
2157b6dd1a89SLars Ellenberg 	nc = rcu_dereference(connection->net_conf);
2158b6dd1a89SLars Ellenberg 	cork = nc ? nc->tcp_cork : 0;
2159b6dd1a89SLars Ellenberg 	rcu_read_unlock();
2160b6dd1a89SLars Ellenberg 	mutex_lock(&connection->data.mutex);
2161b6dd1a89SLars Ellenberg 	if (connection->data.socket) {
2162b6dd1a89SLars Ellenberg 		if (cork)
2163db10538aSChristoph Hellwig 			tcp_sock_set_cork(connection->data.socket->sk, true);
2164b6dd1a89SLars Ellenberg 		else if (!uncork)
2165db10538aSChristoph Hellwig 			tcp_sock_set_cork(connection->data.socket->sk, false);
2166b6dd1a89SLars Ellenberg 	}
2167b6dd1a89SLars Ellenberg 	mutex_unlock(&connection->data.mutex);
2168b6dd1a89SLars Ellenberg }
2169b6dd1a89SLars Ellenberg 
drbd_worker(struct drbd_thread * thi)2170b411b363SPhilipp Reisner int drbd_worker(struct drbd_thread *thi)
2171b411b363SPhilipp Reisner {
2172bde89a9eSAndreas Gruenbacher 	struct drbd_connection *connection = thi->connection;
21736db7e50aSAndreas Gruenbacher 	struct drbd_work *w = NULL;
2174c06ece6bSAndreas Gruenbacher 	struct drbd_peer_device *peer_device;
2175b411b363SPhilipp Reisner 	LIST_HEAD(work_list);
21768c0785a5SLars Ellenberg 	int vnr;
2177b411b363SPhilipp Reisner 
2178e77a0a5cSAndreas Gruenbacher 	while (get_t_state(thi) == RUNNING) {
217980822284SPhilipp Reisner 		drbd_thread_current_set_cpu(thi);
2180b411b363SPhilipp Reisner 
2181944410e9SLars Ellenberg 		if (list_empty(&work_list)) {
2182944410e9SLars Ellenberg 			update_worker_timing_details(connection, wait_for_work);
2183bde89a9eSAndreas Gruenbacher 			wait_for_work(connection, &work_list);
2184944410e9SLars Ellenberg 		}
2185b411b363SPhilipp Reisner 
2186944410e9SLars Ellenberg 		if (test_and_clear_bit(DEVICE_WORK_PENDING, &connection->flags)) {
2187944410e9SLars Ellenberg 			update_worker_timing_details(connection, do_unqueued_work);
2188e334f550SLars Ellenberg 			do_unqueued_work(connection);
2189944410e9SLars Ellenberg 		}
21905ab7d2c0SLars Ellenberg 
21918c0785a5SLars Ellenberg 		if (signal_pending(current)) {
2192b411b363SPhilipp Reisner 			flush_signals(current);
219319393e10SPhilipp Reisner 			if (get_t_state(thi) == RUNNING) {
21941ec861ebSAndreas Gruenbacher 				drbd_warn(connection, "Worker got an unexpected signal\n");
2195b411b363SPhilipp Reisner 				continue;
219619393e10SPhilipp Reisner 			}
2197b411b363SPhilipp Reisner 			break;
2198b411b363SPhilipp Reisner 		}
2199b411b363SPhilipp Reisner 
2200e77a0a5cSAndreas Gruenbacher 		if (get_t_state(thi) != RUNNING)
2201b411b363SPhilipp Reisner 			break;
2202b411b363SPhilipp Reisner 
2203729e8b87SLars Ellenberg 		if (!list_empty(&work_list)) {
22046db7e50aSAndreas Gruenbacher 			w = list_first_entry(&work_list, struct drbd_work, list);
22056db7e50aSAndreas Gruenbacher 			list_del_init(&w->list);
2206944410e9SLars Ellenberg 			update_worker_timing_details(connection, w->cb);
22076db7e50aSAndreas Gruenbacher 			if (w->cb(w, connection->cstate < C_WF_REPORT_PARAMS) == 0)
22088c0785a5SLars Ellenberg 				continue;
2209bde89a9eSAndreas Gruenbacher 			if (connection->cstate >= C_WF_REPORT_PARAMS)
2210bde89a9eSAndreas Gruenbacher 				conn_request_state(connection, NS(conn, C_NETWORK_FAILURE), CS_HARD);
2211b411b363SPhilipp Reisner 		}
2212b411b363SPhilipp Reisner 	}
2213b411b363SPhilipp Reisner 
22148c0785a5SLars Ellenberg 	do {
2215944410e9SLars Ellenberg 		if (test_and_clear_bit(DEVICE_WORK_PENDING, &connection->flags)) {
2216944410e9SLars Ellenberg 			update_worker_timing_details(connection, do_unqueued_work);
2217e334f550SLars Ellenberg 			do_unqueued_work(connection);
2218944410e9SLars Ellenberg 		}
2219729e8b87SLars Ellenberg 		if (!list_empty(&work_list)) {
22206db7e50aSAndreas Gruenbacher 			w = list_first_entry(&work_list, struct drbd_work, list);
22216db7e50aSAndreas Gruenbacher 			list_del_init(&w->list);
2222944410e9SLars Ellenberg 			update_worker_timing_details(connection, w->cb);
22236db7e50aSAndreas Gruenbacher 			w->cb(w, 1);
2224729e8b87SLars Ellenberg 		} else
2225bde89a9eSAndreas Gruenbacher 			dequeue_work_batch(&connection->sender_work, &work_list);
2226e334f550SLars Ellenberg 	} while (!list_empty(&work_list) || test_bit(DEVICE_WORK_PENDING, &connection->flags));
2227b411b363SPhilipp Reisner 
2228c141ebdaSPhilipp Reisner 	rcu_read_lock();
2229c06ece6bSAndreas Gruenbacher 	idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
2230c06ece6bSAndreas Gruenbacher 		struct drbd_device *device = peer_device->device;
22310b0ba1efSAndreas Gruenbacher 		D_ASSERT(device, device->state.disk == D_DISKLESS && device->state.conn == C_STANDALONE);
2232b30ab791SAndreas Gruenbacher 		kref_get(&device->kref);
2233c141ebdaSPhilipp Reisner 		rcu_read_unlock();
2234b30ab791SAndreas Gruenbacher 		drbd_device_cleanup(device);
223505a10ec7SAndreas Gruenbacher 		kref_put(&device->kref, drbd_destroy_device);
2236c141ebdaSPhilipp Reisner 		rcu_read_lock();
22370e29d163SPhilipp Reisner 	}
2238c141ebdaSPhilipp Reisner 	rcu_read_unlock();
2239b411b363SPhilipp Reisner 
2240b411b363SPhilipp Reisner 	return 0;
2241b411b363SPhilipp Reisner }
2242