xref: /openbmc/linux/drivers/block/drbd/drbd_worker.c (revision 8164dd6c)
193c68cc4SChristoph Böhmwalder // SPDX-License-Identifier: GPL-2.0-only
2b411b363SPhilipp Reisner /*
3b411b363SPhilipp Reisner    drbd_worker.c
4b411b363SPhilipp Reisner 
5b411b363SPhilipp Reisner    This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6b411b363SPhilipp Reisner 
7b411b363SPhilipp Reisner    Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
8b411b363SPhilipp Reisner    Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
9b411b363SPhilipp Reisner    Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10b411b363SPhilipp Reisner 
11b411b363SPhilipp Reisner 
12b411b363SPhilipp Reisner */
13b411b363SPhilipp Reisner 
14b411b363SPhilipp Reisner #include <linux/module.h>
15b411b363SPhilipp Reisner #include <linux/drbd.h>
16174cd4b1SIngo Molnar #include <linux/sched/signal.h>
17b411b363SPhilipp Reisner #include <linux/wait.h>
18b411b363SPhilipp Reisner #include <linux/mm.h>
19b411b363SPhilipp Reisner #include <linux/memcontrol.h>
20b411b363SPhilipp Reisner #include <linux/mm_inline.h>
21b411b363SPhilipp Reisner #include <linux/slab.h>
22b411b363SPhilipp Reisner #include <linux/random.h>
23b411b363SPhilipp Reisner #include <linux/string.h>
24b411b363SPhilipp Reisner #include <linux/scatterlist.h>
25c6a564ffSChristoph Hellwig #include <linux/part_stat.h>
26b411b363SPhilipp Reisner 
27b411b363SPhilipp Reisner #include "drbd_int.h"
28a3603a6eSAndreas Gruenbacher #include "drbd_protocol.h"
29b411b363SPhilipp Reisner #include "drbd_req.h"
30b411b363SPhilipp Reisner 
31d448a2e1SAndreas Gruenbacher static int make_ov_request(struct drbd_device *, int);
32d448a2e1SAndreas Gruenbacher static int make_resync_request(struct drbd_device *, int);
33b411b363SPhilipp Reisner 
34c5a91619SAndreas Gruenbacher /* endio handlers:
35ed15b795SAndreas Gruenbacher  *   drbd_md_endio (defined here)
36fcefa62eSAndreas Gruenbacher  *   drbd_request_endio (defined here)
37fcefa62eSAndreas Gruenbacher  *   drbd_peer_request_endio (defined here)
38ed15b795SAndreas Gruenbacher  *   drbd_bm_endio (defined in drbd_bitmap.c)
39c5a91619SAndreas Gruenbacher  *
40b411b363SPhilipp Reisner  * For all these callbacks, note the following:
41b411b363SPhilipp Reisner  * The callbacks will be called in irq context by the IDE drivers,
42b411b363SPhilipp Reisner  * and in Softirqs/Tasklets/BH context by the SCSI drivers.
43b411b363SPhilipp Reisner  * Try to get the locking right :)
44b411b363SPhilipp Reisner  *
45b411b363SPhilipp Reisner  */
46b411b363SPhilipp Reisner 
47b411b363SPhilipp Reisner /* used for synchronous meta data and bitmap IO
48b411b363SPhilipp Reisner  * submitted by drbd_md_sync_page_io()
49b411b363SPhilipp Reisner  */
504246a0b6SChristoph Hellwig void drbd_md_endio(struct bio *bio)
51b411b363SPhilipp Reisner {
52b30ab791SAndreas Gruenbacher 	struct drbd_device *device;
53b411b363SPhilipp Reisner 
54e37d2438SLars Ellenberg 	device = bio->bi_private;
554e4cbee9SChristoph Hellwig 	device->md_io.error = blk_status_to_errno(bio->bi_status);
56b411b363SPhilipp Reisner 
577c752ed3SLars Ellenberg 	/* special case: drbd_md_read() during drbd_adm_attach() */
587c752ed3SLars Ellenberg 	if (device->ldev)
597c752ed3SLars Ellenberg 		put_ldev(device);
607c752ed3SLars Ellenberg 	bio_put(bio);
617c752ed3SLars Ellenberg 
620cfac5ddSPhilipp Reisner 	/* We grabbed an extra reference in _drbd_md_sync_page_io() to be able
630cfac5ddSPhilipp Reisner 	 * to timeout on the lower level device, and eventually detach from it.
640cfac5ddSPhilipp Reisner 	 * If this io completion runs after that timeout expired, this
650cfac5ddSPhilipp Reisner 	 * drbd_md_put_buffer() may allow us to finally try and re-attach.
660cfac5ddSPhilipp Reisner 	 * During normal operation, this only puts that extra reference
670cfac5ddSPhilipp Reisner 	 * down to 1 again.
680cfac5ddSPhilipp Reisner 	 * Make sure we first drop the reference, and only then signal
690cfac5ddSPhilipp Reisner 	 * completion, or we may (in drbd_al_read_log()) cycle so fast into the
700cfac5ddSPhilipp Reisner 	 * next drbd_md_sync_page_io(), that we trigger the
71b30ab791SAndreas Gruenbacher 	 * ASSERT(atomic_read(&device->md_io_in_use) == 1) there.
720cfac5ddSPhilipp Reisner 	 */
73b30ab791SAndreas Gruenbacher 	drbd_md_put_buffer(device);
74e37d2438SLars Ellenberg 	device->md_io.done = 1;
75b30ab791SAndreas Gruenbacher 	wake_up(&device->misc_wait);
76b411b363SPhilipp Reisner }
77b411b363SPhilipp Reisner 
78b411b363SPhilipp Reisner /* reads on behalf of the partner,
79b411b363SPhilipp Reisner  * "submitted" by the receiver
80b411b363SPhilipp Reisner  */
81a186e478SRashika Kheria static void drbd_endio_read_sec_final(struct drbd_peer_request *peer_req) __releases(local)
82b411b363SPhilipp Reisner {
83b411b363SPhilipp Reisner 	unsigned long flags = 0;
846780139cSAndreas Gruenbacher 	struct drbd_peer_device *peer_device = peer_req->peer_device;
856780139cSAndreas Gruenbacher 	struct drbd_device *device = peer_device->device;
86b411b363SPhilipp Reisner 
870500813fSAndreas Gruenbacher 	spin_lock_irqsave(&device->resource->req_lock, flags);
88b30ab791SAndreas Gruenbacher 	device->read_cnt += peer_req->i.size >> 9;
89a8cd15baSAndreas Gruenbacher 	list_del(&peer_req->w.list);
90b30ab791SAndreas Gruenbacher 	if (list_empty(&device->read_ee))
91b30ab791SAndreas Gruenbacher 		wake_up(&device->ee_wait);
92db830c46SAndreas Gruenbacher 	if (test_bit(__EE_WAS_ERROR, &peer_req->flags))
93b30ab791SAndreas Gruenbacher 		__drbd_chk_io_error(device, DRBD_READ_ERROR);
940500813fSAndreas Gruenbacher 	spin_unlock_irqrestore(&device->resource->req_lock, flags);
95b411b363SPhilipp Reisner 
966780139cSAndreas Gruenbacher 	drbd_queue_work(&peer_device->connection->sender_work, &peer_req->w);
97b30ab791SAndreas Gruenbacher 	put_ldev(device);
98b411b363SPhilipp Reisner }
99b411b363SPhilipp Reisner 
100b411b363SPhilipp Reisner /* writes on behalf of the partner, or resync writes,
10145bb912bSLars Ellenberg  * "submitted" by the receiver, final stage.  */
102a0fb3c47SLars Ellenberg void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __releases(local)
103b411b363SPhilipp Reisner {
104b411b363SPhilipp Reisner 	unsigned long flags = 0;
1056780139cSAndreas Gruenbacher 	struct drbd_peer_device *peer_device = peer_req->peer_device;
1066780139cSAndreas Gruenbacher 	struct drbd_device *device = peer_device->device;
107668700b4SPhilipp Reisner 	struct drbd_connection *connection = peer_device->connection;
108181286adSLars Ellenberg 	struct drbd_interval i;
109b411b363SPhilipp Reisner 	int do_wake;
110579b57edSAndreas Gruenbacher 	u64 block_id;
111b411b363SPhilipp Reisner 	int do_al_complete_io;
112b411b363SPhilipp Reisner 
113db830c46SAndreas Gruenbacher 	/* after we moved peer_req to done_ee,
114b411b363SPhilipp Reisner 	 * we may no longer access it,
115b411b363SPhilipp Reisner 	 * it may be freed/reused already!
116b411b363SPhilipp Reisner 	 * (as soon as we release the req_lock) */
117181286adSLars Ellenberg 	i = peer_req->i;
118db830c46SAndreas Gruenbacher 	do_al_complete_io = peer_req->flags & EE_CALL_AL_COMPLETE_IO;
119db830c46SAndreas Gruenbacher 	block_id = peer_req->block_id;
12021ae5d7fSLars Ellenberg 	peer_req->flags &= ~EE_CALL_AL_COMPLETE_IO;
121b411b363SPhilipp Reisner 
122e1fbc4caSLars Ellenberg 	if (peer_req->flags & EE_WAS_ERROR) {
123e1fbc4caSLars Ellenberg 		/* In protocol != C, we usually do not send write acks.
124e1fbc4caSLars Ellenberg 		 * In case of a write error, send the neg ack anyways. */
125e1fbc4caSLars Ellenberg 		if (!__test_and_set_bit(__EE_SEND_WRITE_ACK, &peer_req->flags))
126e1fbc4caSLars Ellenberg 			inc_unacked(device);
127e1fbc4caSLars Ellenberg 		drbd_set_out_of_sync(device, peer_req->i.sector, peer_req->i.size);
128e1fbc4caSLars Ellenberg 	}
129e1fbc4caSLars Ellenberg 
1300500813fSAndreas Gruenbacher 	spin_lock_irqsave(&device->resource->req_lock, flags);
131b30ab791SAndreas Gruenbacher 	device->writ_cnt += peer_req->i.size >> 9;
132a8cd15baSAndreas Gruenbacher 	list_move_tail(&peer_req->w.list, &device->done_ee);
133b411b363SPhilipp Reisner 
134bb3bfe96SAndreas Gruenbacher 	/*
1355e472264SAndreas Gruenbacher 	 * Do not remove from the write_requests tree here: we did not send the
136bb3bfe96SAndreas Gruenbacher 	 * Ack yet and did not wake possibly waiting conflicting requests.
137bb3bfe96SAndreas Gruenbacher 	 * Removed from the tree from "drbd_process_done_ee" within the
13884b8c06bSAndreas Gruenbacher 	 * appropriate dw.cb (e_end_block/e_end_resync_block) or from
139bb3bfe96SAndreas Gruenbacher 	 * _drbd_clear_done_ee.
140bb3bfe96SAndreas Gruenbacher 	 */
141b411b363SPhilipp Reisner 
142b30ab791SAndreas Gruenbacher 	do_wake = list_empty(block_id == ID_SYNCER ? &device->sync_ee : &device->active_ee);
143b411b363SPhilipp Reisner 
1449305455aSBart Van Assche 	/* FIXME do we want to detach for failed REQ_OP_DISCARD?
145f31e583aSLars Ellenberg 	 * ((peer_req->flags & (EE_WAS_ERROR|EE_TRIM)) == EE_WAS_ERROR) */
146a0fb3c47SLars Ellenberg 	if (peer_req->flags & EE_WAS_ERROR)
147b30ab791SAndreas Gruenbacher 		__drbd_chk_io_error(device, DRBD_WRITE_ERROR);
148668700b4SPhilipp Reisner 
149668700b4SPhilipp Reisner 	if (connection->cstate >= C_WF_REPORT_PARAMS) {
150668700b4SPhilipp Reisner 		kref_get(&device->kref); /* put is in drbd_send_acks_wf() */
151668700b4SPhilipp Reisner 		if (!queue_work(connection->ack_sender, &peer_device->send_acks_work))
152668700b4SPhilipp Reisner 			kref_put(&device->kref, drbd_destroy_device);
153668700b4SPhilipp Reisner 	}
1540500813fSAndreas Gruenbacher 	spin_unlock_irqrestore(&device->resource->req_lock, flags);
155b411b363SPhilipp Reisner 
156579b57edSAndreas Gruenbacher 	if (block_id == ID_SYNCER)
157b30ab791SAndreas Gruenbacher 		drbd_rs_complete_io(device, i.sector);
158b411b363SPhilipp Reisner 
159b411b363SPhilipp Reisner 	if (do_wake)
160b30ab791SAndreas Gruenbacher 		wake_up(&device->ee_wait);
161b411b363SPhilipp Reisner 
162b411b363SPhilipp Reisner 	if (do_al_complete_io)
163b30ab791SAndreas Gruenbacher 		drbd_al_complete_io(device, &i);
164b411b363SPhilipp Reisner 
165b30ab791SAndreas Gruenbacher 	put_ldev(device);
16645bb912bSLars Ellenberg }
167b411b363SPhilipp Reisner 
16845bb912bSLars Ellenberg /* writes on behalf of the partner, or resync writes,
16945bb912bSLars Ellenberg  * "submitted" by the receiver.
17045bb912bSLars Ellenberg  */
1714246a0b6SChristoph Hellwig void drbd_peer_request_endio(struct bio *bio)
17245bb912bSLars Ellenberg {
173db830c46SAndreas Gruenbacher 	struct drbd_peer_request *peer_req = bio->bi_private;
174a8cd15baSAndreas Gruenbacher 	struct drbd_device *device = peer_req->peer_device->device;
1757e5fec31SFabian Frederick 	bool is_write = bio_data_dir(bio) == WRITE;
17645c21793SChristoph Hellwig 	bool is_discard = bio_op(bio) == REQ_OP_WRITE_ZEROES ||
17745c21793SChristoph Hellwig 			  bio_op(bio) == REQ_OP_DISCARD;
17845bb912bSLars Ellenberg 
179e3fa02d7SChristoph Böhmwalder 	if (bio->bi_status && drbd_ratelimit())
180d0180171SAndreas Gruenbacher 		drbd_warn(device, "%s: error=%d s=%llus\n",
181a0fb3c47SLars Ellenberg 				is_write ? (is_discard ? "discard" : "write")
1824e4cbee9SChristoph Hellwig 					: "read", bio->bi_status,
183db830c46SAndreas Gruenbacher 				(unsigned long long)peer_req->i.sector);
18445bb912bSLars Ellenberg 
1854e4cbee9SChristoph Hellwig 	if (bio->bi_status)
186db830c46SAndreas Gruenbacher 		set_bit(__EE_WAS_ERROR, &peer_req->flags);
18745bb912bSLars Ellenberg 
18845bb912bSLars Ellenberg 	bio_put(bio); /* no need for the bio anymore */
189db830c46SAndreas Gruenbacher 	if (atomic_dec_and_test(&peer_req->pending_bios)) {
19045bb912bSLars Ellenberg 		if (is_write)
191db830c46SAndreas Gruenbacher 			drbd_endio_write_sec_final(peer_req);
19245bb912bSLars Ellenberg 		else
193db830c46SAndreas Gruenbacher 			drbd_endio_read_sec_final(peer_req);
19445bb912bSLars Ellenberg 	}
195b411b363SPhilipp Reisner }
196b411b363SPhilipp Reisner 
1971ffa7bfaSBaoyou Xie static void
1981ffa7bfaSBaoyou Xie drbd_panic_after_delayed_completion_of_aborted_request(struct drbd_device *device)
199142207f7SLars Ellenberg {
200142207f7SLars Ellenberg 	panic("drbd%u %s/%u potential random memory corruption caused by delayed completion of aborted local request\n",
201142207f7SLars Ellenberg 		device->minor, device->resource->name, device->vnr);
202142207f7SLars Ellenberg }
203142207f7SLars Ellenberg 
204b411b363SPhilipp Reisner /* read, readA or write requests on R_PRIMARY coming from drbd_make_request
205b411b363SPhilipp Reisner  */
2064246a0b6SChristoph Hellwig void drbd_request_endio(struct bio *bio)
207b411b363SPhilipp Reisner {
208a115413dSLars Ellenberg 	unsigned long flags;
209b411b363SPhilipp Reisner 	struct drbd_request *req = bio->bi_private;
21084b8c06bSAndreas Gruenbacher 	struct drbd_device *device = req->device;
211a115413dSLars Ellenberg 	struct bio_and_error m;
212b411b363SPhilipp Reisner 	enum drbd_req_event what;
2131b6dd252SPhilipp Reisner 
2141b6dd252SPhilipp Reisner 	/* If this request was aborted locally before,
2151b6dd252SPhilipp Reisner 	 * but now was completed "successfully",
2161b6dd252SPhilipp Reisner 	 * chances are that this caused arbitrary data corruption.
2171b6dd252SPhilipp Reisner 	 *
2181b6dd252SPhilipp Reisner 	 * "aborting" requests, or force-detaching the disk, is intended for
2191b6dd252SPhilipp Reisner 	 * completely blocked/hung local backing devices which do no longer
2201b6dd252SPhilipp Reisner 	 * complete requests at all, not even do error completions.  In this
2211b6dd252SPhilipp Reisner 	 * situation, usually a hard-reset and failover is the only way out.
2221b6dd252SPhilipp Reisner 	 *
2231b6dd252SPhilipp Reisner 	 * By "aborting", basically faking a local error-completion,
2241b6dd252SPhilipp Reisner 	 * we allow for a more graceful swichover by cleanly migrating services.
2251b6dd252SPhilipp Reisner 	 * Still the affected node has to be rebooted "soon".
2261b6dd252SPhilipp Reisner 	 *
2271b6dd252SPhilipp Reisner 	 * By completing these requests, we allow the upper layers to re-use
2281b6dd252SPhilipp Reisner 	 * the associated data pages.
2291b6dd252SPhilipp Reisner 	 *
2301b6dd252SPhilipp Reisner 	 * If later the local backing device "recovers", and now DMAs some data
2311b6dd252SPhilipp Reisner 	 * from disk into the original request pages, in the best case it will
2321b6dd252SPhilipp Reisner 	 * just put random data into unused pages; but typically it will corrupt
2331b6dd252SPhilipp Reisner 	 * meanwhile completely unrelated data, causing all sorts of damage.
2341b6dd252SPhilipp Reisner 	 *
2351b6dd252SPhilipp Reisner 	 * Which means delayed successful completion,
2361b6dd252SPhilipp Reisner 	 * especially for READ requests,
2371b6dd252SPhilipp Reisner 	 * is a reason to panic().
2381b6dd252SPhilipp Reisner 	 *
2391b6dd252SPhilipp Reisner 	 * We assume that a delayed *error* completion is OK,
2401b6dd252SPhilipp Reisner 	 * though we still will complain noisily about it.
2411b6dd252SPhilipp Reisner 	 */
2421b6dd252SPhilipp Reisner 	if (unlikely(req->rq_state & RQ_LOCAL_ABORTED)) {
243e3fa02d7SChristoph Böhmwalder 		if (drbd_ratelimit())
244d0180171SAndreas Gruenbacher 			drbd_emerg(device, "delayed completion of aborted local request; disk-timeout may be too aggressive\n");
2451b6dd252SPhilipp Reisner 
2464e4cbee9SChristoph Hellwig 		if (!bio->bi_status)
247142207f7SLars Ellenberg 			drbd_panic_after_delayed_completion_of_aborted_request(device);
2481b6dd252SPhilipp Reisner 	}
2491b6dd252SPhilipp Reisner 
250b411b363SPhilipp Reisner 	/* to avoid recursion in __req_mod */
2514e4cbee9SChristoph Hellwig 	if (unlikely(bio->bi_status)) {
25270246286SChristoph Hellwig 		switch (bio_op(bio)) {
25345c21793SChristoph Hellwig 		case REQ_OP_WRITE_ZEROES:
25470246286SChristoph Hellwig 		case REQ_OP_DISCARD:
2554e4cbee9SChristoph Hellwig 			if (bio->bi_status == BLK_STS_NOTSUPP)
25670246286SChristoph Hellwig 				what = DISCARD_COMPLETED_NOTSUPP;
2572f632aebSLars Ellenberg 			else
25870246286SChristoph Hellwig 				what = DISCARD_COMPLETED_WITH_ERROR;
25970246286SChristoph Hellwig 			break;
26070246286SChristoph Hellwig 		case REQ_OP_READ:
2611eff9d32SJens Axboe 			if (bio->bi_opf & REQ_RAHEAD)
26270246286SChristoph Hellwig 				what = READ_AHEAD_COMPLETED_WITH_ERROR;
26370246286SChristoph Hellwig 			else
26470246286SChristoph Hellwig 				what = READ_COMPLETED_WITH_ERROR;
26570246286SChristoph Hellwig 			break;
26670246286SChristoph Hellwig 		default:
26770246286SChristoph Hellwig 			what = WRITE_COMPLETED_WITH_ERROR;
26870246286SChristoph Hellwig 			break;
26970246286SChristoph Hellwig 		}
27070246286SChristoph Hellwig 	} else {
2718554df1cSAndreas Gruenbacher 		what = COMPLETED_OK;
27270246286SChristoph Hellwig 	}
273b411b363SPhilipp Reisner 
2744e4cbee9SChristoph Hellwig 	req->private_bio = ERR_PTR(blk_status_to_errno(bio->bi_status));
27564dafbc9SLars Ellenberg 	bio_put(bio);
276b411b363SPhilipp Reisner 
277a115413dSLars Ellenberg 	/* not req_mod(), we need irqsave here! */
2780500813fSAndreas Gruenbacher 	spin_lock_irqsave(&device->resource->req_lock, flags);
279a115413dSLars Ellenberg 	__req_mod(req, what, &m);
2800500813fSAndreas Gruenbacher 	spin_unlock_irqrestore(&device->resource->req_lock, flags);
281b30ab791SAndreas Gruenbacher 	put_ldev(device);
282a115413dSLars Ellenberg 
283a115413dSLars Ellenberg 	if (m.bio)
284b30ab791SAndreas Gruenbacher 		complete_master_bio(device, &m);
285b411b363SPhilipp Reisner }
286b411b363SPhilipp Reisner 
2873d0e6375SKees Cook void drbd_csum_ee(struct crypto_shash *tfm, struct drbd_peer_request *peer_req, void *digest)
28845bb912bSLars Ellenberg {
2893d0e6375SKees Cook 	SHASH_DESC_ON_STACK(desc, tfm);
290db830c46SAndreas Gruenbacher 	struct page *page = peer_req->pages;
29145bb912bSLars Ellenberg 	struct page *tmp;
29245bb912bSLars Ellenberg 	unsigned len;
2933d0e6375SKees Cook 	void *src;
29445bb912bSLars Ellenberg 
2953d0e6375SKees Cook 	desc->tfm = tfm;
29645bb912bSLars Ellenberg 
2973d0e6375SKees Cook 	crypto_shash_init(desc);
29845bb912bSLars Ellenberg 
2993d0e6375SKees Cook 	src = kmap_atomic(page);
30045bb912bSLars Ellenberg 	while ((tmp = page_chain_next(page))) {
30145bb912bSLars Ellenberg 		/* all but the last page will be fully used */
3023d0e6375SKees Cook 		crypto_shash_update(desc, src, PAGE_SIZE);
3033d0e6375SKees Cook 		kunmap_atomic(src);
30445bb912bSLars Ellenberg 		page = tmp;
3053d0e6375SKees Cook 		src = kmap_atomic(page);
30645bb912bSLars Ellenberg 	}
30745bb912bSLars Ellenberg 	/* and now the last, possibly only partially used page */
308db830c46SAndreas Gruenbacher 	len = peer_req->i.size & (PAGE_SIZE - 1);
3093d0e6375SKees Cook 	crypto_shash_update(desc, src, len ?: PAGE_SIZE);
3103d0e6375SKees Cook 	kunmap_atomic(src);
3113d0e6375SKees Cook 
3123d0e6375SKees Cook 	crypto_shash_final(desc, digest);
3133d0e6375SKees Cook 	shash_desc_zero(desc);
31445bb912bSLars Ellenberg }
31545bb912bSLars Ellenberg 
3163d0e6375SKees Cook void drbd_csum_bio(struct crypto_shash *tfm, struct bio *bio, void *digest)
317b411b363SPhilipp Reisner {
3183d0e6375SKees Cook 	SHASH_DESC_ON_STACK(desc, tfm);
3197988613bSKent Overstreet 	struct bio_vec bvec;
3207988613bSKent Overstreet 	struct bvec_iter iter;
321b411b363SPhilipp Reisner 
3223d0e6375SKees Cook 	desc->tfm = tfm;
323b411b363SPhilipp Reisner 
3243d0e6375SKees Cook 	crypto_shash_init(desc);
325b411b363SPhilipp Reisner 
3267988613bSKent Overstreet 	bio_for_each_segment(bvec, bio, iter) {
3273d0e6375SKees Cook 		u8 *src;
3283d0e6375SKees Cook 
32947227850SChristoph Hellwig 		src = bvec_kmap_local(&bvec);
33047227850SChristoph Hellwig 		crypto_shash_update(desc, src, bvec.bv_len);
33147227850SChristoph Hellwig 		kunmap_local(src);
332b411b363SPhilipp Reisner 	}
3333d0e6375SKees Cook 	crypto_shash_final(desc, digest);
3343d0e6375SKees Cook 	shash_desc_zero(desc);
335b411b363SPhilipp Reisner }
336b411b363SPhilipp Reisner 
3379676c760SLars Ellenberg /* MAYBE merge common code with w_e_end_ov_req */
33899920dc5SAndreas Gruenbacher static int w_e_send_csum(struct drbd_work *w, int cancel)
339b411b363SPhilipp Reisner {
340a8cd15baSAndreas Gruenbacher 	struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
3416780139cSAndreas Gruenbacher 	struct drbd_peer_device *peer_device = peer_req->peer_device;
3426780139cSAndreas Gruenbacher 	struct drbd_device *device = peer_device->device;
343b411b363SPhilipp Reisner 	int digest_size;
344b411b363SPhilipp Reisner 	void *digest;
34599920dc5SAndreas Gruenbacher 	int err = 0;
346b411b363SPhilipp Reisner 
34753ea4331SLars Ellenberg 	if (unlikely(cancel))
34853ea4331SLars Ellenberg 		goto out;
349b411b363SPhilipp Reisner 
3509676c760SLars Ellenberg 	if (unlikely((peer_req->flags & EE_WAS_ERROR) != 0))
35153ea4331SLars Ellenberg 		goto out;
35253ea4331SLars Ellenberg 
3533d0e6375SKees Cook 	digest_size = crypto_shash_digestsize(peer_device->connection->csums_tfm);
354b411b363SPhilipp Reisner 	digest = kmalloc(digest_size, GFP_NOIO);
355b411b363SPhilipp Reisner 	if (digest) {
356db830c46SAndreas Gruenbacher 		sector_t sector = peer_req->i.sector;
357db830c46SAndreas Gruenbacher 		unsigned int size = peer_req->i.size;
3586780139cSAndreas Gruenbacher 		drbd_csum_ee(peer_device->connection->csums_tfm, peer_req, digest);
3599676c760SLars Ellenberg 		/* Free peer_req and pages before send.
36053ea4331SLars Ellenberg 		 * In case we block on congestion, we could otherwise run into
36153ea4331SLars Ellenberg 		 * some distributed deadlock, if the other side blocks on
36253ea4331SLars Ellenberg 		 * congestion as well, because our receiver blocks in
363c37c8ecfSAndreas Gruenbacher 		 * drbd_alloc_pages due to pp_in_use > max_buffers. */
364b30ab791SAndreas Gruenbacher 		drbd_free_peer_req(device, peer_req);
365db830c46SAndreas Gruenbacher 		peer_req = NULL;
366b30ab791SAndreas Gruenbacher 		inc_rs_pending(device);
3676780139cSAndreas Gruenbacher 		err = drbd_send_drequest_csum(peer_device, sector, size,
36853ea4331SLars Ellenberg 					      digest, digest_size,
369b411b363SPhilipp Reisner 					      P_CSUM_RS_REQUEST);
370b411b363SPhilipp Reisner 		kfree(digest);
371b411b363SPhilipp Reisner 	} else {
372d0180171SAndreas Gruenbacher 		drbd_err(device, "kmalloc() of digest failed.\n");
37399920dc5SAndreas Gruenbacher 		err = -ENOMEM;
374b411b363SPhilipp Reisner 	}
375b411b363SPhilipp Reisner 
37653ea4331SLars Ellenberg out:
377db830c46SAndreas Gruenbacher 	if (peer_req)
378b30ab791SAndreas Gruenbacher 		drbd_free_peer_req(device, peer_req);
379b411b363SPhilipp Reisner 
38099920dc5SAndreas Gruenbacher 	if (unlikely(err))
381d0180171SAndreas Gruenbacher 		drbd_err(device, "drbd_send_drequest(..., csum) failed\n");
38299920dc5SAndreas Gruenbacher 	return err;
383b411b363SPhilipp Reisner }
384b411b363SPhilipp Reisner 
385b411b363SPhilipp Reisner #define GFP_TRY	(__GFP_HIGHMEM | __GFP_NOWARN)
386b411b363SPhilipp Reisner 
38769a22773SAndreas Gruenbacher static int read_for_csum(struct drbd_peer_device *peer_device, sector_t sector, int size)
388b411b363SPhilipp Reisner {
38969a22773SAndreas Gruenbacher 	struct drbd_device *device = peer_device->device;
390db830c46SAndreas Gruenbacher 	struct drbd_peer_request *peer_req;
391b411b363SPhilipp Reisner 
392b30ab791SAndreas Gruenbacher 	if (!get_ldev(device))
39380a40e43SLars Ellenberg 		return -EIO;
394b411b363SPhilipp Reisner 
395b411b363SPhilipp Reisner 	/* GFP_TRY, because if there is no memory available right now, this may
396b411b363SPhilipp Reisner 	 * be rescheduled for later. It is "only" background resync, after all. */
39769a22773SAndreas Gruenbacher 	peer_req = drbd_alloc_peer_req(peer_device, ID_SYNCER /* unused */, sector,
3989104d31aSLars Ellenberg 				       size, size, GFP_TRY);
399db830c46SAndreas Gruenbacher 	if (!peer_req)
40080a40e43SLars Ellenberg 		goto defer;
401b411b363SPhilipp Reisner 
402a8cd15baSAndreas Gruenbacher 	peer_req->w.cb = w_e_send_csum;
403ce668b6dSChristoph Böhmwalder 	peer_req->opf = REQ_OP_READ;
4040500813fSAndreas Gruenbacher 	spin_lock_irq(&device->resource->req_lock);
405b9ed7080SLars Ellenberg 	list_add_tail(&peer_req->w.list, &device->read_ee);
4060500813fSAndreas Gruenbacher 	spin_unlock_irq(&device->resource->req_lock);
407b411b363SPhilipp Reisner 
408b30ab791SAndreas Gruenbacher 	atomic_add(size >> 9, &device->rs_sect_ev);
409ce668b6dSChristoph Böhmwalder 	if (drbd_submit_peer_request(peer_req) == 0)
41080a40e43SLars Ellenberg 		return 0;
41145bb912bSLars Ellenberg 
41210f6d992SLars Ellenberg 	/* If it failed because of ENOMEM, retry should help.  If it failed
41310f6d992SLars Ellenberg 	 * because bio_add_page failed (probably broken lower level driver),
41410f6d992SLars Ellenberg 	 * retry may or may not help.
41510f6d992SLars Ellenberg 	 * If it does not, you may need to force disconnect. */
4160500813fSAndreas Gruenbacher 	spin_lock_irq(&device->resource->req_lock);
417a8cd15baSAndreas Gruenbacher 	list_del(&peer_req->w.list);
4180500813fSAndreas Gruenbacher 	spin_unlock_irq(&device->resource->req_lock);
41922cc37a9SLars Ellenberg 
420b30ab791SAndreas Gruenbacher 	drbd_free_peer_req(device, peer_req);
42180a40e43SLars Ellenberg defer:
422b30ab791SAndreas Gruenbacher 	put_ldev(device);
42380a40e43SLars Ellenberg 	return -EAGAIN;
424b411b363SPhilipp Reisner }
425b411b363SPhilipp Reisner 
42699920dc5SAndreas Gruenbacher int w_resync_timer(struct drbd_work *w, int cancel)
427794abb75SPhilipp Reisner {
42884b8c06bSAndreas Gruenbacher 	struct drbd_device *device =
42984b8c06bSAndreas Gruenbacher 		container_of(w, struct drbd_device, resync_work);
43084b8c06bSAndreas Gruenbacher 
431b30ab791SAndreas Gruenbacher 	switch (device->state.conn) {
432794abb75SPhilipp Reisner 	case C_VERIFY_S:
433d448a2e1SAndreas Gruenbacher 		make_ov_request(device, cancel);
434794abb75SPhilipp Reisner 		break;
435794abb75SPhilipp Reisner 	case C_SYNC_TARGET:
436d448a2e1SAndreas Gruenbacher 		make_resync_request(device, cancel);
437794abb75SPhilipp Reisner 		break;
438794abb75SPhilipp Reisner 	}
439794abb75SPhilipp Reisner 
44099920dc5SAndreas Gruenbacher 	return 0;
441794abb75SPhilipp Reisner }
442794abb75SPhilipp Reisner 
4432bccef39SKees Cook void resync_timer_fn(struct timer_list *t)
444b411b363SPhilipp Reisner {
4452bccef39SKees Cook 	struct drbd_device *device = from_timer(device, t, resync_timer);
446b411b363SPhilipp Reisner 
44715e26f6aSLars Ellenberg 	drbd_queue_work_if_unqueued(
44815e26f6aSLars Ellenberg 		&first_peer_device(device)->connection->sender_work,
44984b8c06bSAndreas Gruenbacher 		&device->resync_work);
450b411b363SPhilipp Reisner }
451b411b363SPhilipp Reisner 
452778f271dSPhilipp Reisner static void fifo_set(struct fifo_buffer *fb, int value)
453778f271dSPhilipp Reisner {
454778f271dSPhilipp Reisner 	int i;
455778f271dSPhilipp Reisner 
456778f271dSPhilipp Reisner 	for (i = 0; i < fb->size; i++)
457f10f2623SPhilipp Reisner 		fb->values[i] = value;
458778f271dSPhilipp Reisner }
459778f271dSPhilipp Reisner 
460778f271dSPhilipp Reisner static int fifo_push(struct fifo_buffer *fb, int value)
461778f271dSPhilipp Reisner {
462778f271dSPhilipp Reisner 	int ov;
463778f271dSPhilipp Reisner 
464778f271dSPhilipp Reisner 	ov = fb->values[fb->head_index];
465778f271dSPhilipp Reisner 	fb->values[fb->head_index++] = value;
466778f271dSPhilipp Reisner 
467778f271dSPhilipp Reisner 	if (fb->head_index >= fb->size)
468778f271dSPhilipp Reisner 		fb->head_index = 0;
469778f271dSPhilipp Reisner 
470778f271dSPhilipp Reisner 	return ov;
471778f271dSPhilipp Reisner }
472778f271dSPhilipp Reisner 
473778f271dSPhilipp Reisner static void fifo_add_val(struct fifo_buffer *fb, int value)
474778f271dSPhilipp Reisner {
475778f271dSPhilipp Reisner 	int i;
476778f271dSPhilipp Reisner 
477778f271dSPhilipp Reisner 	for (i = 0; i < fb->size; i++)
478778f271dSPhilipp Reisner 		fb->values[i] += value;
479778f271dSPhilipp Reisner }
480778f271dSPhilipp Reisner 
4816a365874SStephen Kitt struct fifo_buffer *fifo_alloc(unsigned int fifo_size)
4829958c857SPhilipp Reisner {
4839958c857SPhilipp Reisner 	struct fifo_buffer *fb;
4849958c857SPhilipp Reisner 
4856a365874SStephen Kitt 	fb = kzalloc(struct_size(fb, values, fifo_size), GFP_NOIO);
4869958c857SPhilipp Reisner 	if (!fb)
4879958c857SPhilipp Reisner 		return NULL;
4889958c857SPhilipp Reisner 
4899958c857SPhilipp Reisner 	fb->head_index = 0;
4909958c857SPhilipp Reisner 	fb->size = fifo_size;
4919958c857SPhilipp Reisner 	fb->total = 0;
4929958c857SPhilipp Reisner 
4939958c857SPhilipp Reisner 	return fb;
4949958c857SPhilipp Reisner }
4959958c857SPhilipp Reisner 
4960e49d7b0SLars Ellenberg static int drbd_rs_controller(struct drbd_device *device, unsigned int sect_in)
497778f271dSPhilipp Reisner {
498daeda1ccSPhilipp Reisner 	struct disk_conf *dc;
4997f34f614SLars Ellenberg 	unsigned int want;     /* The number of sectors we want in-flight */
500778f271dSPhilipp Reisner 	int req_sect; /* Number of sectors to request in this turn */
5017f34f614SLars Ellenberg 	int correction; /* Number of sectors more we need in-flight */
502778f271dSPhilipp Reisner 	int cps; /* correction per invocation of drbd_rs_controller() */
503778f271dSPhilipp Reisner 	int steps; /* Number of time steps to plan ahead */
504778f271dSPhilipp Reisner 	int curr_corr;
505778f271dSPhilipp Reisner 	int max_sect;
506813472ceSPhilipp Reisner 	struct fifo_buffer *plan;
507778f271dSPhilipp Reisner 
508b30ab791SAndreas Gruenbacher 	dc = rcu_dereference(device->ldev->disk_conf);
509b30ab791SAndreas Gruenbacher 	plan = rcu_dereference(device->rs_plan_s);
510778f271dSPhilipp Reisner 
511813472ceSPhilipp Reisner 	steps = plan->size; /* (dc->c_plan_ahead * 10 * SLEEP_TIME) / HZ; */
512778f271dSPhilipp Reisner 
513b30ab791SAndreas Gruenbacher 	if (device->rs_in_flight + sect_in == 0) { /* At start of resync */
514daeda1ccSPhilipp Reisner 		want = ((dc->resync_rate * 2 * SLEEP_TIME) / HZ) * steps;
515778f271dSPhilipp Reisner 	} else { /* normal path */
516daeda1ccSPhilipp Reisner 		want = dc->c_fill_target ? dc->c_fill_target :
517daeda1ccSPhilipp Reisner 			sect_in * dc->c_delay_target * HZ / (SLEEP_TIME * 10);
518778f271dSPhilipp Reisner 	}
519778f271dSPhilipp Reisner 
520b30ab791SAndreas Gruenbacher 	correction = want - device->rs_in_flight - plan->total;
521778f271dSPhilipp Reisner 
522778f271dSPhilipp Reisner 	/* Plan ahead */
523778f271dSPhilipp Reisner 	cps = correction / steps;
524813472ceSPhilipp Reisner 	fifo_add_val(plan, cps);
525813472ceSPhilipp Reisner 	plan->total += cps * steps;
526778f271dSPhilipp Reisner 
527778f271dSPhilipp Reisner 	/* What we do in this step */
528813472ceSPhilipp Reisner 	curr_corr = fifo_push(plan, 0);
529813472ceSPhilipp Reisner 	plan->total -= curr_corr;
530778f271dSPhilipp Reisner 
531778f271dSPhilipp Reisner 	req_sect = sect_in + curr_corr;
532778f271dSPhilipp Reisner 	if (req_sect < 0)
533778f271dSPhilipp Reisner 		req_sect = 0;
534778f271dSPhilipp Reisner 
535daeda1ccSPhilipp Reisner 	max_sect = (dc->c_max_rate * 2 * SLEEP_TIME) / HZ;
536778f271dSPhilipp Reisner 	if (req_sect > max_sect)
537778f271dSPhilipp Reisner 		req_sect = max_sect;
538778f271dSPhilipp Reisner 
539778f271dSPhilipp Reisner 	/*
540d0180171SAndreas Gruenbacher 	drbd_warn(device, "si=%u if=%d wa=%u co=%d st=%d cps=%d pl=%d cc=%d rs=%d\n",
541b30ab791SAndreas Gruenbacher 		 sect_in, device->rs_in_flight, want, correction,
542b30ab791SAndreas Gruenbacher 		 steps, cps, device->rs_planed, curr_corr, req_sect);
543778f271dSPhilipp Reisner 	*/
544778f271dSPhilipp Reisner 
545778f271dSPhilipp Reisner 	return req_sect;
546778f271dSPhilipp Reisner }
547778f271dSPhilipp Reisner 
548b30ab791SAndreas Gruenbacher static int drbd_rs_number_requests(struct drbd_device *device)
549e65f440dSLars Ellenberg {
5500e49d7b0SLars Ellenberg 	unsigned int sect_in;  /* Number of sectors that came in since the last turn */
5510e49d7b0SLars Ellenberg 	int number, mxb;
5520e49d7b0SLars Ellenberg 
5530e49d7b0SLars Ellenberg 	sect_in = atomic_xchg(&device->rs_sect_in, 0);
5540e49d7b0SLars Ellenberg 	device->rs_in_flight -= sect_in;
555813472ceSPhilipp Reisner 
556813472ceSPhilipp Reisner 	rcu_read_lock();
5570e49d7b0SLars Ellenberg 	mxb = drbd_get_max_buffers(device) / 2;
558b30ab791SAndreas Gruenbacher 	if (rcu_dereference(device->rs_plan_s)->size) {
5590e49d7b0SLars Ellenberg 		number = drbd_rs_controller(device, sect_in) >> (BM_BLOCK_SHIFT - 9);
560b30ab791SAndreas Gruenbacher 		device->c_sync_rate = number * HZ * (BM_BLOCK_SIZE / 1024) / SLEEP_TIME;
561e65f440dSLars Ellenberg 	} else {
562b30ab791SAndreas Gruenbacher 		device->c_sync_rate = rcu_dereference(device->ldev->disk_conf)->resync_rate;
563b30ab791SAndreas Gruenbacher 		number = SLEEP_TIME * device->c_sync_rate  / ((BM_BLOCK_SIZE / 1024) * HZ);
564e65f440dSLars Ellenberg 	}
565813472ceSPhilipp Reisner 	rcu_read_unlock();
566e65f440dSLars Ellenberg 
5670e49d7b0SLars Ellenberg 	/* Don't have more than "max-buffers"/2 in-flight.
5680e49d7b0SLars Ellenberg 	 * Otherwise we may cause the remote site to stall on drbd_alloc_pages(),
5690e49d7b0SLars Ellenberg 	 * potentially causing a distributed deadlock on congestion during
5700e49d7b0SLars Ellenberg 	 * online-verify or (checksum-based) resync, if max-buffers,
5710e49d7b0SLars Ellenberg 	 * socket buffer sizes and resync rate settings are mis-configured. */
5727f34f614SLars Ellenberg 
5737f34f614SLars Ellenberg 	/* note that "number" is in units of "BM_BLOCK_SIZE" (which is 4k),
5747f34f614SLars Ellenberg 	 * mxb (as used here, and in drbd_alloc_pages on the peer) is
5757f34f614SLars Ellenberg 	 * "number of pages" (typically also 4k),
5767f34f614SLars Ellenberg 	 * but "rs_in_flight" is in "sectors" (512 Byte). */
5777f34f614SLars Ellenberg 	if (mxb - device->rs_in_flight/8 < number)
5787f34f614SLars Ellenberg 		number = mxb - device->rs_in_flight/8;
5790e49d7b0SLars Ellenberg 
580e65f440dSLars Ellenberg 	return number;
581e65f440dSLars Ellenberg }
582e65f440dSLars Ellenberg 
58344a4d551SLars Ellenberg static int make_resync_request(struct drbd_device *const device, int cancel)
584b411b363SPhilipp Reisner {
58544a4d551SLars Ellenberg 	struct drbd_peer_device *const peer_device = first_peer_device(device);
58644a4d551SLars Ellenberg 	struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL;
587b411b363SPhilipp Reisner 	unsigned long bit;
588b411b363SPhilipp Reisner 	sector_t sector;
589155bd9d1SChristoph Hellwig 	const sector_t capacity = get_capacity(device->vdisk);
5901816a2b4SLars Ellenberg 	int max_bio_size;
591e65f440dSLars Ellenberg 	int number, rollback_i, size;
592506afb62SLars Ellenberg 	int align, requeue = 0;
5930f0601f4SLars Ellenberg 	int i = 0;
59492d94ae6SPhilipp Reisner 	int discard_granularity = 0;
595b411b363SPhilipp Reisner 
596b411b363SPhilipp Reisner 	if (unlikely(cancel))
59799920dc5SAndreas Gruenbacher 		return 0;
598b411b363SPhilipp Reisner 
599b30ab791SAndreas Gruenbacher 	if (device->rs_total == 0) {
600af85e8e8SLars Ellenberg 		/* empty resync? */
601b30ab791SAndreas Gruenbacher 		drbd_resync_finished(device);
60299920dc5SAndreas Gruenbacher 		return 0;
603af85e8e8SLars Ellenberg 	}
604af85e8e8SLars Ellenberg 
605b30ab791SAndreas Gruenbacher 	if (!get_ldev(device)) {
606b30ab791SAndreas Gruenbacher 		/* Since we only need to access device->rsync a
607b30ab791SAndreas Gruenbacher 		   get_ldev_if_state(device,D_FAILED) would be sufficient, but
608b411b363SPhilipp Reisner 		   to continue resync with a broken disk makes no sense at
609b411b363SPhilipp Reisner 		   all */
610d0180171SAndreas Gruenbacher 		drbd_err(device, "Disk broke down during resync!\n");
61199920dc5SAndreas Gruenbacher 		return 0;
612b411b363SPhilipp Reisner 	}
613b411b363SPhilipp Reisner 
6149104d31aSLars Ellenberg 	if (connection->agreed_features & DRBD_FF_THIN_RESYNC) {
61592d94ae6SPhilipp Reisner 		rcu_read_lock();
61692d94ae6SPhilipp Reisner 		discard_granularity = rcu_dereference(device->ldev->disk_conf)->rs_discard_granularity;
61792d94ae6SPhilipp Reisner 		rcu_read_unlock();
61892d94ae6SPhilipp Reisner 	}
61992d94ae6SPhilipp Reisner 
620b30ab791SAndreas Gruenbacher 	max_bio_size = queue_max_hw_sectors(device->rq_queue) << 9;
621b30ab791SAndreas Gruenbacher 	number = drbd_rs_number_requests(device);
6220e49d7b0SLars Ellenberg 	if (number <= 0)
6230f0601f4SLars Ellenberg 		goto requeue;
624b411b363SPhilipp Reisner 
625b411b363SPhilipp Reisner 	for (i = 0; i < number; i++) {
626506afb62SLars Ellenberg 		/* Stop generating RS requests when half of the send buffer is filled,
627506afb62SLars Ellenberg 		 * but notify TCP that we'd like to have more space. */
62844a4d551SLars Ellenberg 		mutex_lock(&connection->data.mutex);
62944a4d551SLars Ellenberg 		if (connection->data.socket) {
630506afb62SLars Ellenberg 			struct sock *sk = connection->data.socket->sk;
631506afb62SLars Ellenberg 			int queued = sk->sk_wmem_queued;
632506afb62SLars Ellenberg 			int sndbuf = sk->sk_sndbuf;
633506afb62SLars Ellenberg 			if (queued > sndbuf / 2) {
634506afb62SLars Ellenberg 				requeue = 1;
635506afb62SLars Ellenberg 				if (sk->sk_socket)
636506afb62SLars Ellenberg 					set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
637b411b363SPhilipp Reisner 			}
638506afb62SLars Ellenberg 		} else
639506afb62SLars Ellenberg 			requeue = 1;
64044a4d551SLars Ellenberg 		mutex_unlock(&connection->data.mutex);
641506afb62SLars Ellenberg 		if (requeue)
642b411b363SPhilipp Reisner 			goto requeue;
643b411b363SPhilipp Reisner 
644b411b363SPhilipp Reisner next_sector:
645b411b363SPhilipp Reisner 		size = BM_BLOCK_SIZE;
646b30ab791SAndreas Gruenbacher 		bit  = drbd_bm_find_next(device, device->bm_resync_fo);
647b411b363SPhilipp Reisner 
6484b0715f0SLars Ellenberg 		if (bit == DRBD_END_OF_BITMAP) {
649b30ab791SAndreas Gruenbacher 			device->bm_resync_fo = drbd_bm_bits(device);
650b30ab791SAndreas Gruenbacher 			put_ldev(device);
65199920dc5SAndreas Gruenbacher 			return 0;
652b411b363SPhilipp Reisner 		}
653b411b363SPhilipp Reisner 
654b411b363SPhilipp Reisner 		sector = BM_BIT_TO_SECT(bit);
655b411b363SPhilipp Reisner 
656ad3fee79SLars Ellenberg 		if (drbd_try_rs_begin_io(device, sector)) {
657b30ab791SAndreas Gruenbacher 			device->bm_resync_fo = bit;
658b411b363SPhilipp Reisner 			goto requeue;
659b411b363SPhilipp Reisner 		}
660b30ab791SAndreas Gruenbacher 		device->bm_resync_fo = bit + 1;
661b411b363SPhilipp Reisner 
662b30ab791SAndreas Gruenbacher 		if (unlikely(drbd_bm_test_bit(device, bit) == 0)) {
663b30ab791SAndreas Gruenbacher 			drbd_rs_complete_io(device, sector);
664b411b363SPhilipp Reisner 			goto next_sector;
665b411b363SPhilipp Reisner 		}
666b411b363SPhilipp Reisner 
6671816a2b4SLars Ellenberg #if DRBD_MAX_BIO_SIZE > BM_BLOCK_SIZE
668b411b363SPhilipp Reisner 		/* try to find some adjacent bits.
669b411b363SPhilipp Reisner 		 * we stop if we have already the maximum req size.
670b411b363SPhilipp Reisner 		 *
671b411b363SPhilipp Reisner 		 * Additionally always align bigger requests, in order to
672b411b363SPhilipp Reisner 		 * be prepared for all stripe sizes of software RAIDs.
673b411b363SPhilipp Reisner 		 */
674b411b363SPhilipp Reisner 		align = 1;
675d207450cSPhilipp Reisner 		rollback_i = i;
6766377b923SLars Ellenberg 		while (i < number) {
6771816a2b4SLars Ellenberg 			if (size + BM_BLOCK_SIZE > max_bio_size)
678b411b363SPhilipp Reisner 				break;
679b411b363SPhilipp Reisner 
680b411b363SPhilipp Reisner 			/* Be always aligned */
681b411b363SPhilipp Reisner 			if (sector & ((1<<(align+3))-1))
682b411b363SPhilipp Reisner 				break;
683b411b363SPhilipp Reisner 
68492d94ae6SPhilipp Reisner 			if (discard_granularity && size == discard_granularity)
68592d94ae6SPhilipp Reisner 				break;
68692d94ae6SPhilipp Reisner 
687b411b363SPhilipp Reisner 			/* do not cross extent boundaries */
688b411b363SPhilipp Reisner 			if (((bit+1) & BM_BLOCKS_PER_BM_EXT_MASK) == 0)
689b411b363SPhilipp Reisner 				break;
690b411b363SPhilipp Reisner 			/* now, is it actually dirty, after all?
691b411b363SPhilipp Reisner 			 * caution, drbd_bm_test_bit is tri-state for some
692b411b363SPhilipp Reisner 			 * obscure reason; ( b == 0 ) would get the out-of-band
693b411b363SPhilipp Reisner 			 * only accidentally right because of the "oddly sized"
694b411b363SPhilipp Reisner 			 * adjustment below */
695b30ab791SAndreas Gruenbacher 			if (drbd_bm_test_bit(device, bit+1) != 1)
696b411b363SPhilipp Reisner 				break;
697b411b363SPhilipp Reisner 			bit++;
698b411b363SPhilipp Reisner 			size += BM_BLOCK_SIZE;
699b411b363SPhilipp Reisner 			if ((BM_BLOCK_SIZE << align) <= size)
700b411b363SPhilipp Reisner 				align++;
701b411b363SPhilipp Reisner 			i++;
702b411b363SPhilipp Reisner 		}
703b411b363SPhilipp Reisner 		/* if we merged some,
704b411b363SPhilipp Reisner 		 * reset the offset to start the next drbd_bm_find_next from */
705b411b363SPhilipp Reisner 		if (size > BM_BLOCK_SIZE)
706b30ab791SAndreas Gruenbacher 			device->bm_resync_fo = bit + 1;
707b411b363SPhilipp Reisner #endif
708b411b363SPhilipp Reisner 
709b411b363SPhilipp Reisner 		/* adjust very last sectors, in case we are oddly sized */
710b411b363SPhilipp Reisner 		if (sector + (size>>9) > capacity)
711b411b363SPhilipp Reisner 			size = (capacity-sector)<<9;
712aaaba345SLars Ellenberg 
713aaaba345SLars Ellenberg 		if (device->use_csums) {
71444a4d551SLars Ellenberg 			switch (read_for_csum(peer_device, sector, size)) {
71580a40e43SLars Ellenberg 			case -EIO: /* Disk failure */
716b30ab791SAndreas Gruenbacher 				put_ldev(device);
71799920dc5SAndreas Gruenbacher 				return -EIO;
71880a40e43SLars Ellenberg 			case -EAGAIN: /* allocation failed, or ldev busy */
719b30ab791SAndreas Gruenbacher 				drbd_rs_complete_io(device, sector);
720b30ab791SAndreas Gruenbacher 				device->bm_resync_fo = BM_SECT_TO_BIT(sector);
721d207450cSPhilipp Reisner 				i = rollback_i;
722b411b363SPhilipp Reisner 				goto requeue;
72380a40e43SLars Ellenberg 			case 0:
72480a40e43SLars Ellenberg 				/* everything ok */
72580a40e43SLars Ellenberg 				break;
72680a40e43SLars Ellenberg 			default:
72780a40e43SLars Ellenberg 				BUG();
728b411b363SPhilipp Reisner 			}
729b411b363SPhilipp Reisner 		} else {
73099920dc5SAndreas Gruenbacher 			int err;
73199920dc5SAndreas Gruenbacher 
732b30ab791SAndreas Gruenbacher 			inc_rs_pending(device);
73392d94ae6SPhilipp Reisner 			err = drbd_send_drequest(peer_device,
73492d94ae6SPhilipp Reisner 						 size == discard_granularity ? P_RS_THIN_REQ : P_RS_DATA_REQUEST,
73599920dc5SAndreas Gruenbacher 						 sector, size, ID_SYNCER);
73699920dc5SAndreas Gruenbacher 			if (err) {
737d0180171SAndreas Gruenbacher 				drbd_err(device, "drbd_send_drequest() failed, aborting...\n");
738b30ab791SAndreas Gruenbacher 				dec_rs_pending(device);
739b30ab791SAndreas Gruenbacher 				put_ldev(device);
74099920dc5SAndreas Gruenbacher 				return err;
741b411b363SPhilipp Reisner 			}
742b411b363SPhilipp Reisner 		}
743b411b363SPhilipp Reisner 	}
744b411b363SPhilipp Reisner 
745b30ab791SAndreas Gruenbacher 	if (device->bm_resync_fo >= drbd_bm_bits(device)) {
746b411b363SPhilipp Reisner 		/* last syncer _request_ was sent,
747b411b363SPhilipp Reisner 		 * but the P_RS_DATA_REPLY not yet received.  sync will end (and
748b411b363SPhilipp Reisner 		 * next sync group will resume), as soon as we receive the last
749b411b363SPhilipp Reisner 		 * resync data block, and the last bit is cleared.
750b411b363SPhilipp Reisner 		 * until then resync "work" is "inactive" ...
751b411b363SPhilipp Reisner 		 */
752b30ab791SAndreas Gruenbacher 		put_ldev(device);
75399920dc5SAndreas Gruenbacher 		return 0;
754b411b363SPhilipp Reisner 	}
755b411b363SPhilipp Reisner 
756b411b363SPhilipp Reisner  requeue:
757b30ab791SAndreas Gruenbacher 	device->rs_in_flight += (i << (BM_BLOCK_SHIFT - 9));
758b30ab791SAndreas Gruenbacher 	mod_timer(&device->resync_timer, jiffies + SLEEP_TIME);
759b30ab791SAndreas Gruenbacher 	put_ldev(device);
76099920dc5SAndreas Gruenbacher 	return 0;
761b411b363SPhilipp Reisner }
762b411b363SPhilipp Reisner 
763d448a2e1SAndreas Gruenbacher static int make_ov_request(struct drbd_device *device, int cancel)
764b411b363SPhilipp Reisner {
765b411b363SPhilipp Reisner 	int number, i, size;
766b411b363SPhilipp Reisner 	sector_t sector;
767155bd9d1SChristoph Hellwig 	const sector_t capacity = get_capacity(device->vdisk);
76858ffa580SLars Ellenberg 	bool stop_sector_reached = false;
769b411b363SPhilipp Reisner 
770b411b363SPhilipp Reisner 	if (unlikely(cancel))
771b411b363SPhilipp Reisner 		return 1;
772b411b363SPhilipp Reisner 
773b30ab791SAndreas Gruenbacher 	number = drbd_rs_number_requests(device);
774b411b363SPhilipp Reisner 
775b30ab791SAndreas Gruenbacher 	sector = device->ov_position;
776b411b363SPhilipp Reisner 	for (i = 0; i < number; i++) {
77758ffa580SLars Ellenberg 		if (sector >= capacity)
778b411b363SPhilipp Reisner 			return 1;
77958ffa580SLars Ellenberg 
78058ffa580SLars Ellenberg 		/* We check for "finished" only in the reply path:
78158ffa580SLars Ellenberg 		 * w_e_end_ov_reply().
78258ffa580SLars Ellenberg 		 * We need to send at least one request out. */
78358ffa580SLars Ellenberg 		stop_sector_reached = i > 0
784b30ab791SAndreas Gruenbacher 			&& verify_can_do_stop_sector(device)
785b30ab791SAndreas Gruenbacher 			&& sector >= device->ov_stop_sector;
78658ffa580SLars Ellenberg 		if (stop_sector_reached)
78758ffa580SLars Ellenberg 			break;
788b411b363SPhilipp Reisner 
789b411b363SPhilipp Reisner 		size = BM_BLOCK_SIZE;
790b411b363SPhilipp Reisner 
791ad3fee79SLars Ellenberg 		if (drbd_try_rs_begin_io(device, sector)) {
792b30ab791SAndreas Gruenbacher 			device->ov_position = sector;
793b411b363SPhilipp Reisner 			goto requeue;
794b411b363SPhilipp Reisner 		}
795b411b363SPhilipp Reisner 
796b411b363SPhilipp Reisner 		if (sector + (size>>9) > capacity)
797b411b363SPhilipp Reisner 			size = (capacity-sector)<<9;
798b411b363SPhilipp Reisner 
799b30ab791SAndreas Gruenbacher 		inc_rs_pending(device);
80069a22773SAndreas Gruenbacher 		if (drbd_send_ov_request(first_peer_device(device), sector, size)) {
801b30ab791SAndreas Gruenbacher 			dec_rs_pending(device);
802b411b363SPhilipp Reisner 			return 0;
803b411b363SPhilipp Reisner 		}
804b411b363SPhilipp Reisner 		sector += BM_SECT_PER_BIT;
805b411b363SPhilipp Reisner 	}
806b30ab791SAndreas Gruenbacher 	device->ov_position = sector;
807b411b363SPhilipp Reisner 
808b411b363SPhilipp Reisner  requeue:
809b30ab791SAndreas Gruenbacher 	device->rs_in_flight += (i << (BM_BLOCK_SHIFT - 9));
81058ffa580SLars Ellenberg 	if (i == 0 || !stop_sector_reached)
811b30ab791SAndreas Gruenbacher 		mod_timer(&device->resync_timer, jiffies + SLEEP_TIME);
812b411b363SPhilipp Reisner 	return 1;
813b411b363SPhilipp Reisner }
814b411b363SPhilipp Reisner 
81599920dc5SAndreas Gruenbacher int w_ov_finished(struct drbd_work *w, int cancel)
816b411b363SPhilipp Reisner {
81784b8c06bSAndreas Gruenbacher 	struct drbd_device_work *dw =
81884b8c06bSAndreas Gruenbacher 		container_of(w, struct drbd_device_work, w);
81984b8c06bSAndreas Gruenbacher 	struct drbd_device *device = dw->device;
82084b8c06bSAndreas Gruenbacher 	kfree(dw);
821b30ab791SAndreas Gruenbacher 	ov_out_of_sync_print(device);
822b30ab791SAndreas Gruenbacher 	drbd_resync_finished(device);
823b411b363SPhilipp Reisner 
82499920dc5SAndreas Gruenbacher 	return 0;
825b411b363SPhilipp Reisner }
826b411b363SPhilipp Reisner 
82799920dc5SAndreas Gruenbacher static int w_resync_finished(struct drbd_work *w, int cancel)
828b411b363SPhilipp Reisner {
82984b8c06bSAndreas Gruenbacher 	struct drbd_device_work *dw =
83084b8c06bSAndreas Gruenbacher 		container_of(w, struct drbd_device_work, w);
83184b8c06bSAndreas Gruenbacher 	struct drbd_device *device = dw->device;
83284b8c06bSAndreas Gruenbacher 	kfree(dw);
833b411b363SPhilipp Reisner 
834b30ab791SAndreas Gruenbacher 	drbd_resync_finished(device);
835b411b363SPhilipp Reisner 
83699920dc5SAndreas Gruenbacher 	return 0;
837b411b363SPhilipp Reisner }
838b411b363SPhilipp Reisner 
839b30ab791SAndreas Gruenbacher static void ping_peer(struct drbd_device *device)
840af85e8e8SLars Ellenberg {
841a6b32bc3SAndreas Gruenbacher 	struct drbd_connection *connection = first_peer_device(device)->connection;
8422a67d8b9SPhilipp Reisner 
843bde89a9eSAndreas Gruenbacher 	clear_bit(GOT_PING_ACK, &connection->flags);
844bde89a9eSAndreas Gruenbacher 	request_ping(connection);
845bde89a9eSAndreas Gruenbacher 	wait_event(connection->ping_wait,
846bde89a9eSAndreas Gruenbacher 		   test_bit(GOT_PING_ACK, &connection->flags) || device->state.conn < C_CONNECTED);
847af85e8e8SLars Ellenberg }
848af85e8e8SLars Ellenberg 
849b30ab791SAndreas Gruenbacher int drbd_resync_finished(struct drbd_device *device)
850b411b363SPhilipp Reisner {
85126a96110SLars Ellenberg 	struct drbd_connection *connection = first_peer_device(device)->connection;
852b411b363SPhilipp Reisner 	unsigned long db, dt, dbdt;
853b411b363SPhilipp Reisner 	unsigned long n_oos;
854b411b363SPhilipp Reisner 	union drbd_state os, ns;
85584b8c06bSAndreas Gruenbacher 	struct drbd_device_work *dw;
856b411b363SPhilipp Reisner 	char *khelper_cmd = NULL;
85726525618SLars Ellenberg 	int verify_done = 0;
858b411b363SPhilipp Reisner 
859b411b363SPhilipp Reisner 	/* Remove all elements from the resync LRU. Since future actions
860b411b363SPhilipp Reisner 	 * might set bits in the (main) bitmap, then the entries in the
861b411b363SPhilipp Reisner 	 * resync LRU would be wrong. */
862b30ab791SAndreas Gruenbacher 	if (drbd_rs_del_all(device)) {
863b411b363SPhilipp Reisner 		/* In case this is not possible now, most probably because
864b411b363SPhilipp Reisner 		 * there are P_RS_DATA_REPLY Packets lingering on the worker's
865b411b363SPhilipp Reisner 		 * queue (or even the read operations for those packets
866b411b363SPhilipp Reisner 		 * is not finished by now).   Retry in 100ms. */
867b411b363SPhilipp Reisner 
86820ee6390SPhilipp Reisner 		schedule_timeout_interruptible(HZ / 10);
86984b8c06bSAndreas Gruenbacher 		dw = kmalloc(sizeof(struct drbd_device_work), GFP_ATOMIC);
87084b8c06bSAndreas Gruenbacher 		if (dw) {
87184b8c06bSAndreas Gruenbacher 			dw->w.cb = w_resync_finished;
87284b8c06bSAndreas Gruenbacher 			dw->device = device;
87326a96110SLars Ellenberg 			drbd_queue_work(&connection->sender_work, &dw->w);
874b411b363SPhilipp Reisner 			return 1;
875b411b363SPhilipp Reisner 		}
87684b8c06bSAndreas Gruenbacher 		drbd_err(device, "Warn failed to drbd_rs_del_all() and to kmalloc(dw).\n");
877b411b363SPhilipp Reisner 	}
878b411b363SPhilipp Reisner 
879b30ab791SAndreas Gruenbacher 	dt = (jiffies - device->rs_start - device->rs_paused) / HZ;
880b411b363SPhilipp Reisner 	if (dt <= 0)
881b411b363SPhilipp Reisner 		dt = 1;
88258ffa580SLars Ellenberg 
883b30ab791SAndreas Gruenbacher 	db = device->rs_total;
88458ffa580SLars Ellenberg 	/* adjust for verify start and stop sectors, respective reached position */
885b30ab791SAndreas Gruenbacher 	if (device->state.conn == C_VERIFY_S || device->state.conn == C_VERIFY_T)
886b30ab791SAndreas Gruenbacher 		db -= device->ov_left;
88758ffa580SLars Ellenberg 
888b411b363SPhilipp Reisner 	dbdt = Bit2KB(db/dt);
889b30ab791SAndreas Gruenbacher 	device->rs_paused /= HZ;
890b411b363SPhilipp Reisner 
891b30ab791SAndreas Gruenbacher 	if (!get_ldev(device))
892b411b363SPhilipp Reisner 		goto out;
893b411b363SPhilipp Reisner 
894b30ab791SAndreas Gruenbacher 	ping_peer(device);
895af85e8e8SLars Ellenberg 
8960500813fSAndreas Gruenbacher 	spin_lock_irq(&device->resource->req_lock);
897b30ab791SAndreas Gruenbacher 	os = drbd_read_state(device);
898b411b363SPhilipp Reisner 
89926525618SLars Ellenberg 	verify_done = (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T);
90026525618SLars Ellenberg 
901b411b363SPhilipp Reisner 	/* This protects us against multiple calls (that can happen in the presence
902b411b363SPhilipp Reisner 	   of application IO), and against connectivity loss just before we arrive here. */
903b411b363SPhilipp Reisner 	if (os.conn <= C_CONNECTED)
904b411b363SPhilipp Reisner 		goto out_unlock;
905b411b363SPhilipp Reisner 
906b411b363SPhilipp Reisner 	ns = os;
907b411b363SPhilipp Reisner 	ns.conn = C_CONNECTED;
908b411b363SPhilipp Reisner 
909d0180171SAndreas Gruenbacher 	drbd_info(device, "%s done (total %lu sec; paused %lu sec; %lu K/sec)\n",
91026525618SLars Ellenberg 	     verify_done ? "Online verify" : "Resync",
911b30ab791SAndreas Gruenbacher 	     dt + device->rs_paused, device->rs_paused, dbdt);
912b411b363SPhilipp Reisner 
913b30ab791SAndreas Gruenbacher 	n_oos = drbd_bm_total_weight(device);
914b411b363SPhilipp Reisner 
915b411b363SPhilipp Reisner 	if (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) {
916b411b363SPhilipp Reisner 		if (n_oos) {
917d0180171SAndreas Gruenbacher 			drbd_alert(device, "Online verify found %lu %dk block out of sync!\n",
918b411b363SPhilipp Reisner 			      n_oos, Bit2KB(1));
919b411b363SPhilipp Reisner 			khelper_cmd = "out-of-sync";
920b411b363SPhilipp Reisner 		}
921b411b363SPhilipp Reisner 	} else {
9220b0ba1efSAndreas Gruenbacher 		D_ASSERT(device, (n_oos - device->rs_failed) == 0);
923b411b363SPhilipp Reisner 
924b411b363SPhilipp Reisner 		if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T)
925b411b363SPhilipp Reisner 			khelper_cmd = "after-resync-target";
926b411b363SPhilipp Reisner 
927aaaba345SLars Ellenberg 		if (device->use_csums && device->rs_total) {
928b30ab791SAndreas Gruenbacher 			const unsigned long s = device->rs_same_csum;
929b30ab791SAndreas Gruenbacher 			const unsigned long t = device->rs_total;
930b411b363SPhilipp Reisner 			const int ratio =
931b411b363SPhilipp Reisner 				(t == 0)     ? 0 :
932b411b363SPhilipp Reisner 			(t < 100000) ? ((s*100)/t) : (s/(t/100));
933d0180171SAndreas Gruenbacher 			drbd_info(device, "%u %% had equal checksums, eliminated: %luK; "
934b411b363SPhilipp Reisner 			     "transferred %luK total %luK\n",
935b411b363SPhilipp Reisner 			     ratio,
936b30ab791SAndreas Gruenbacher 			     Bit2KB(device->rs_same_csum),
937b30ab791SAndreas Gruenbacher 			     Bit2KB(device->rs_total - device->rs_same_csum),
938b30ab791SAndreas Gruenbacher 			     Bit2KB(device->rs_total));
939b411b363SPhilipp Reisner 		}
940b411b363SPhilipp Reisner 	}
941b411b363SPhilipp Reisner 
942b30ab791SAndreas Gruenbacher 	if (device->rs_failed) {
943d0180171SAndreas Gruenbacher 		drbd_info(device, "            %lu failed blocks\n", device->rs_failed);
944b411b363SPhilipp Reisner 
945b411b363SPhilipp Reisner 		if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T) {
946b411b363SPhilipp Reisner 			ns.disk = D_INCONSISTENT;
947b411b363SPhilipp Reisner 			ns.pdsk = D_UP_TO_DATE;
948b411b363SPhilipp Reisner 		} else {
949b411b363SPhilipp Reisner 			ns.disk = D_UP_TO_DATE;
950b411b363SPhilipp Reisner 			ns.pdsk = D_INCONSISTENT;
951b411b363SPhilipp Reisner 		}
952b411b363SPhilipp Reisner 	} else {
953b411b363SPhilipp Reisner 		ns.disk = D_UP_TO_DATE;
954b411b363SPhilipp Reisner 		ns.pdsk = D_UP_TO_DATE;
955b411b363SPhilipp Reisner 
956b411b363SPhilipp Reisner 		if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T) {
957b30ab791SAndreas Gruenbacher 			if (device->p_uuid) {
958b411b363SPhilipp Reisner 				int i;
959b411b363SPhilipp Reisner 				for (i = UI_BITMAP ; i <= UI_HISTORY_END ; i++)
960b30ab791SAndreas Gruenbacher 					_drbd_uuid_set(device, i, device->p_uuid[i]);
961b30ab791SAndreas Gruenbacher 				drbd_uuid_set(device, UI_BITMAP, device->ldev->md.uuid[UI_CURRENT]);
962b30ab791SAndreas Gruenbacher 				_drbd_uuid_set(device, UI_CURRENT, device->p_uuid[UI_CURRENT]);
963b411b363SPhilipp Reisner 			} else {
964d0180171SAndreas Gruenbacher 				drbd_err(device, "device->p_uuid is NULL! BUG\n");
965b411b363SPhilipp Reisner 			}
966b411b363SPhilipp Reisner 		}
967b411b363SPhilipp Reisner 
96862b0da3aSLars Ellenberg 		if (!(os.conn == C_VERIFY_S || os.conn == C_VERIFY_T)) {
96962b0da3aSLars Ellenberg 			/* for verify runs, we don't update uuids here,
97062b0da3aSLars Ellenberg 			 * so there would be nothing to report. */
971b30ab791SAndreas Gruenbacher 			drbd_uuid_set_bm(device, 0UL);
972b30ab791SAndreas Gruenbacher 			drbd_print_uuids(device, "updated UUIDs");
973b30ab791SAndreas Gruenbacher 			if (device->p_uuid) {
974b411b363SPhilipp Reisner 				/* Now the two UUID sets are equal, update what we
975b411b363SPhilipp Reisner 				 * know of the peer. */
976b411b363SPhilipp Reisner 				int i;
977b411b363SPhilipp Reisner 				for (i = UI_CURRENT ; i <= UI_HISTORY_END ; i++)
978b30ab791SAndreas Gruenbacher 					device->p_uuid[i] = device->ldev->md.uuid[i];
979b411b363SPhilipp Reisner 			}
980b411b363SPhilipp Reisner 		}
98162b0da3aSLars Ellenberg 	}
982b411b363SPhilipp Reisner 
983b30ab791SAndreas Gruenbacher 	_drbd_set_state(device, ns, CS_VERBOSE, NULL);
984b411b363SPhilipp Reisner out_unlock:
9850500813fSAndreas Gruenbacher 	spin_unlock_irq(&device->resource->req_lock);
98626a96110SLars Ellenberg 
98726a96110SLars Ellenberg 	/* If we have been sync source, and have an effective fencing-policy,
98826a96110SLars Ellenberg 	 * once *all* volumes are back in sync, call "unfence". */
98926a96110SLars Ellenberg 	if (os.conn == C_SYNC_SOURCE) {
99026a96110SLars Ellenberg 		enum drbd_disk_state disk_state = D_MASK;
99126a96110SLars Ellenberg 		enum drbd_disk_state pdsk_state = D_MASK;
99226a96110SLars Ellenberg 		enum drbd_fencing_p fp = FP_DONT_CARE;
99326a96110SLars Ellenberg 
99426a96110SLars Ellenberg 		rcu_read_lock();
99526a96110SLars Ellenberg 		fp = rcu_dereference(device->ldev->disk_conf)->fencing;
99626a96110SLars Ellenberg 		if (fp != FP_DONT_CARE) {
99726a96110SLars Ellenberg 			struct drbd_peer_device *peer_device;
99826a96110SLars Ellenberg 			int vnr;
99926a96110SLars Ellenberg 			idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
100026a96110SLars Ellenberg 				struct drbd_device *device = peer_device->device;
100126a96110SLars Ellenberg 				disk_state = min_t(enum drbd_disk_state, disk_state, device->state.disk);
100226a96110SLars Ellenberg 				pdsk_state = min_t(enum drbd_disk_state, pdsk_state, device->state.pdsk);
100326a96110SLars Ellenberg 			}
100426a96110SLars Ellenberg 		}
100526a96110SLars Ellenberg 		rcu_read_unlock();
100626a96110SLars Ellenberg 		if (disk_state == D_UP_TO_DATE && pdsk_state == D_UP_TO_DATE)
100726a96110SLars Ellenberg 			conn_khelper(connection, "unfence-peer");
100826a96110SLars Ellenberg 	}
100926a96110SLars Ellenberg 
1010b30ab791SAndreas Gruenbacher 	put_ldev(device);
1011b411b363SPhilipp Reisner out:
1012b30ab791SAndreas Gruenbacher 	device->rs_total  = 0;
1013b30ab791SAndreas Gruenbacher 	device->rs_failed = 0;
1014b30ab791SAndreas Gruenbacher 	device->rs_paused = 0;
101558ffa580SLars Ellenberg 
101658ffa580SLars Ellenberg 	/* reset start sector, if we reached end of device */
1017b30ab791SAndreas Gruenbacher 	if (verify_done && device->ov_left == 0)
1018b30ab791SAndreas Gruenbacher 		device->ov_start_sector = 0;
1019b411b363SPhilipp Reisner 
1020b30ab791SAndreas Gruenbacher 	drbd_md_sync(device);
102113d42685SLars Ellenberg 
1022b411b363SPhilipp Reisner 	if (khelper_cmd)
1023b30ab791SAndreas Gruenbacher 		drbd_khelper(device, khelper_cmd);
1024b411b363SPhilipp Reisner 
1025b411b363SPhilipp Reisner 	return 1;
1026b411b363SPhilipp Reisner }
1027b411b363SPhilipp Reisner 
1028b411b363SPhilipp Reisner /* helper */
1029b30ab791SAndreas Gruenbacher static void move_to_net_ee_or_free(struct drbd_device *device, struct drbd_peer_request *peer_req)
1030b411b363SPhilipp Reisner {
1031045417f7SAndreas Gruenbacher 	if (drbd_peer_req_has_active_page(peer_req)) {
1032b411b363SPhilipp Reisner 		/* This might happen if sendpage() has not finished */
1033ba6bee98SCai Huoqing 		int i = PFN_UP(peer_req->i.size);
1034b30ab791SAndreas Gruenbacher 		atomic_add(i, &device->pp_in_use_by_net);
1035b30ab791SAndreas Gruenbacher 		atomic_sub(i, &device->pp_in_use);
10360500813fSAndreas Gruenbacher 		spin_lock_irq(&device->resource->req_lock);
1037a8cd15baSAndreas Gruenbacher 		list_add_tail(&peer_req->w.list, &device->net_ee);
10380500813fSAndreas Gruenbacher 		spin_unlock_irq(&device->resource->req_lock);
1039435f0740SLars Ellenberg 		wake_up(&drbd_pp_wait);
1040b411b363SPhilipp Reisner 	} else
1041b30ab791SAndreas Gruenbacher 		drbd_free_peer_req(device, peer_req);
1042b411b363SPhilipp Reisner }
1043b411b363SPhilipp Reisner 
1044b411b363SPhilipp Reisner /**
1045b411b363SPhilipp Reisner  * w_e_end_data_req() - Worker callback, to send a P_DATA_REPLY packet in response to a P_DATA_REQUEST
1046b411b363SPhilipp Reisner  * @w:		work object.
1047b411b363SPhilipp Reisner  * @cancel:	The connection will be closed anyways
1048b411b363SPhilipp Reisner  */
104999920dc5SAndreas Gruenbacher int w_e_end_data_req(struct drbd_work *w, int cancel)
1050b411b363SPhilipp Reisner {
1051a8cd15baSAndreas Gruenbacher 	struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
10526780139cSAndreas Gruenbacher 	struct drbd_peer_device *peer_device = peer_req->peer_device;
10536780139cSAndreas Gruenbacher 	struct drbd_device *device = peer_device->device;
105499920dc5SAndreas Gruenbacher 	int err;
1055b411b363SPhilipp Reisner 
1056b411b363SPhilipp Reisner 	if (unlikely(cancel)) {
1057b30ab791SAndreas Gruenbacher 		drbd_free_peer_req(device, peer_req);
1058b30ab791SAndreas Gruenbacher 		dec_unacked(device);
105999920dc5SAndreas Gruenbacher 		return 0;
1060b411b363SPhilipp Reisner 	}
1061b411b363SPhilipp Reisner 
1062db830c46SAndreas Gruenbacher 	if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
10636780139cSAndreas Gruenbacher 		err = drbd_send_block(peer_device, P_DATA_REPLY, peer_req);
1064b411b363SPhilipp Reisner 	} else {
1065e3fa02d7SChristoph Böhmwalder 		if (drbd_ratelimit())
1066d0180171SAndreas Gruenbacher 			drbd_err(device, "Sending NegDReply. sector=%llus.\n",
1067db830c46SAndreas Gruenbacher 			    (unsigned long long)peer_req->i.sector);
1068b411b363SPhilipp Reisner 
10696780139cSAndreas Gruenbacher 		err = drbd_send_ack(peer_device, P_NEG_DREPLY, peer_req);
1070b411b363SPhilipp Reisner 	}
1071b411b363SPhilipp Reisner 
1072b30ab791SAndreas Gruenbacher 	dec_unacked(device);
1073b411b363SPhilipp Reisner 
1074b30ab791SAndreas Gruenbacher 	move_to_net_ee_or_free(device, peer_req);
1075b411b363SPhilipp Reisner 
107699920dc5SAndreas Gruenbacher 	if (unlikely(err))
1077d0180171SAndreas Gruenbacher 		drbd_err(device, "drbd_send_block() failed\n");
107899920dc5SAndreas Gruenbacher 	return err;
1079b411b363SPhilipp Reisner }
1080b411b363SPhilipp Reisner 
1081700ca8c0SPhilipp Reisner static bool all_zero(struct drbd_peer_request *peer_req)
1082700ca8c0SPhilipp Reisner {
1083700ca8c0SPhilipp Reisner 	struct page *page = peer_req->pages;
1084700ca8c0SPhilipp Reisner 	unsigned int len = peer_req->i.size;
1085700ca8c0SPhilipp Reisner 
1086700ca8c0SPhilipp Reisner 	page_chain_for_each(page) {
1087700ca8c0SPhilipp Reisner 		unsigned int l = min_t(unsigned int, len, PAGE_SIZE);
1088700ca8c0SPhilipp Reisner 		unsigned int i, words = l / sizeof(long);
1089700ca8c0SPhilipp Reisner 		unsigned long *d;
1090700ca8c0SPhilipp Reisner 
1091700ca8c0SPhilipp Reisner 		d = kmap_atomic(page);
1092700ca8c0SPhilipp Reisner 		for (i = 0; i < words; i++) {
1093700ca8c0SPhilipp Reisner 			if (d[i]) {
1094700ca8c0SPhilipp Reisner 				kunmap_atomic(d);
1095700ca8c0SPhilipp Reisner 				return false;
1096700ca8c0SPhilipp Reisner 			}
1097700ca8c0SPhilipp Reisner 		}
1098700ca8c0SPhilipp Reisner 		kunmap_atomic(d);
1099700ca8c0SPhilipp Reisner 		len -= l;
1100700ca8c0SPhilipp Reisner 	}
1101700ca8c0SPhilipp Reisner 
1102700ca8c0SPhilipp Reisner 	return true;
1103700ca8c0SPhilipp Reisner }
1104700ca8c0SPhilipp Reisner 
1105b411b363SPhilipp Reisner /**
1106a209b4aeSAndreas Gruenbacher  * w_e_end_rsdata_req() - Worker callback to send a P_RS_DATA_REPLY packet in response to a P_RS_DATA_REQUEST
1107b411b363SPhilipp Reisner  * @w:		work object.
1108b411b363SPhilipp Reisner  * @cancel:	The connection will be closed anyways
1109b411b363SPhilipp Reisner  */
111099920dc5SAndreas Gruenbacher int w_e_end_rsdata_req(struct drbd_work *w, int cancel)
1111b411b363SPhilipp Reisner {
1112a8cd15baSAndreas Gruenbacher 	struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
11136780139cSAndreas Gruenbacher 	struct drbd_peer_device *peer_device = peer_req->peer_device;
11146780139cSAndreas Gruenbacher 	struct drbd_device *device = peer_device->device;
111599920dc5SAndreas Gruenbacher 	int err;
1116b411b363SPhilipp Reisner 
1117b411b363SPhilipp Reisner 	if (unlikely(cancel)) {
1118b30ab791SAndreas Gruenbacher 		drbd_free_peer_req(device, peer_req);
1119b30ab791SAndreas Gruenbacher 		dec_unacked(device);
112099920dc5SAndreas Gruenbacher 		return 0;
1121b411b363SPhilipp Reisner 	}
1122b411b363SPhilipp Reisner 
1123b30ab791SAndreas Gruenbacher 	if (get_ldev_if_state(device, D_FAILED)) {
1124b30ab791SAndreas Gruenbacher 		drbd_rs_complete_io(device, peer_req->i.sector);
1125b30ab791SAndreas Gruenbacher 		put_ldev(device);
1126b411b363SPhilipp Reisner 	}
1127b411b363SPhilipp Reisner 
1128b30ab791SAndreas Gruenbacher 	if (device->state.conn == C_AHEAD) {
11296780139cSAndreas Gruenbacher 		err = drbd_send_ack(peer_device, P_RS_CANCEL, peer_req);
1130db830c46SAndreas Gruenbacher 	} else if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
1131b30ab791SAndreas Gruenbacher 		if (likely(device->state.pdsk >= D_INCONSISTENT)) {
1132b30ab791SAndreas Gruenbacher 			inc_rs_pending(device);
1133700ca8c0SPhilipp Reisner 			if (peer_req->flags & EE_RS_THIN_REQ && all_zero(peer_req))
1134700ca8c0SPhilipp Reisner 				err = drbd_send_rs_deallocated(peer_device, peer_req);
1135700ca8c0SPhilipp Reisner 			else
11366780139cSAndreas Gruenbacher 				err = drbd_send_block(peer_device, P_RS_DATA_REPLY, peer_req);
1137b411b363SPhilipp Reisner 		} else {
1138e3fa02d7SChristoph Böhmwalder 			if (drbd_ratelimit())
1139d0180171SAndreas Gruenbacher 				drbd_err(device, "Not sending RSDataReply, "
1140b411b363SPhilipp Reisner 				    "partner DISKLESS!\n");
114199920dc5SAndreas Gruenbacher 			err = 0;
1142b411b363SPhilipp Reisner 		}
1143b411b363SPhilipp Reisner 	} else {
1144e3fa02d7SChristoph Böhmwalder 		if (drbd_ratelimit())
1145d0180171SAndreas Gruenbacher 			drbd_err(device, "Sending NegRSDReply. sector %llus.\n",
1146db830c46SAndreas Gruenbacher 			    (unsigned long long)peer_req->i.sector);
1147b411b363SPhilipp Reisner 
11486780139cSAndreas Gruenbacher 		err = drbd_send_ack(peer_device, P_NEG_RS_DREPLY, peer_req);
1149b411b363SPhilipp Reisner 
1150b411b363SPhilipp Reisner 		/* update resync data with failure */
1151b30ab791SAndreas Gruenbacher 		drbd_rs_failed_io(device, peer_req->i.sector, peer_req->i.size);
1152b411b363SPhilipp Reisner 	}
1153b411b363SPhilipp Reisner 
1154b30ab791SAndreas Gruenbacher 	dec_unacked(device);
1155b411b363SPhilipp Reisner 
1156b30ab791SAndreas Gruenbacher 	move_to_net_ee_or_free(device, peer_req);
1157b411b363SPhilipp Reisner 
115899920dc5SAndreas Gruenbacher 	if (unlikely(err))
1159d0180171SAndreas Gruenbacher 		drbd_err(device, "drbd_send_block() failed\n");
116099920dc5SAndreas Gruenbacher 	return err;
1161b411b363SPhilipp Reisner }
1162b411b363SPhilipp Reisner 
116399920dc5SAndreas Gruenbacher int w_e_end_csum_rs_req(struct drbd_work *w, int cancel)
1164b411b363SPhilipp Reisner {
1165a8cd15baSAndreas Gruenbacher 	struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
11666780139cSAndreas Gruenbacher 	struct drbd_peer_device *peer_device = peer_req->peer_device;
11676780139cSAndreas Gruenbacher 	struct drbd_device *device = peer_device->device;
1168b411b363SPhilipp Reisner 	struct digest_info *di;
1169b411b363SPhilipp Reisner 	int digest_size;
1170b411b363SPhilipp Reisner 	void *digest = NULL;
117199920dc5SAndreas Gruenbacher 	int err, eq = 0;
1172b411b363SPhilipp Reisner 
1173b411b363SPhilipp Reisner 	if (unlikely(cancel)) {
1174b30ab791SAndreas Gruenbacher 		drbd_free_peer_req(device, peer_req);
1175b30ab791SAndreas Gruenbacher 		dec_unacked(device);
117699920dc5SAndreas Gruenbacher 		return 0;
1177b411b363SPhilipp Reisner 	}
1178b411b363SPhilipp Reisner 
1179b30ab791SAndreas Gruenbacher 	if (get_ldev(device)) {
1180b30ab791SAndreas Gruenbacher 		drbd_rs_complete_io(device, peer_req->i.sector);
1181b30ab791SAndreas Gruenbacher 		put_ldev(device);
11821d53f09eSLars Ellenberg 	}
1183b411b363SPhilipp Reisner 
1184db830c46SAndreas Gruenbacher 	di = peer_req->digest;
1185b411b363SPhilipp Reisner 
1186db830c46SAndreas Gruenbacher 	if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
1187b411b363SPhilipp Reisner 		/* quick hack to try to avoid a race against reconfiguration.
1188b411b363SPhilipp Reisner 		 * a real fix would be much more involved,
1189b411b363SPhilipp Reisner 		 * introducing more locking mechanisms */
11906780139cSAndreas Gruenbacher 		if (peer_device->connection->csums_tfm) {
11913d0e6375SKees Cook 			digest_size = crypto_shash_digestsize(peer_device->connection->csums_tfm);
11920b0ba1efSAndreas Gruenbacher 			D_ASSERT(device, digest_size == di->digest_size);
1193b411b363SPhilipp Reisner 			digest = kmalloc(digest_size, GFP_NOIO);
1194b411b363SPhilipp Reisner 		}
1195b411b363SPhilipp Reisner 		if (digest) {
11966780139cSAndreas Gruenbacher 			drbd_csum_ee(peer_device->connection->csums_tfm, peer_req, digest);
1197b411b363SPhilipp Reisner 			eq = !memcmp(digest, di->digest, digest_size);
1198b411b363SPhilipp Reisner 			kfree(digest);
1199b411b363SPhilipp Reisner 		}
1200b411b363SPhilipp Reisner 
1201b411b363SPhilipp Reisner 		if (eq) {
1202b30ab791SAndreas Gruenbacher 			drbd_set_in_sync(device, peer_req->i.sector, peer_req->i.size);
1203676396d5SLars Ellenberg 			/* rs_same_csums unit is BM_BLOCK_SIZE */
1204b30ab791SAndreas Gruenbacher 			device->rs_same_csum += peer_req->i.size >> BM_BLOCK_SHIFT;
12056780139cSAndreas Gruenbacher 			err = drbd_send_ack(peer_device, P_RS_IS_IN_SYNC, peer_req);
1206b411b363SPhilipp Reisner 		} else {
1207b30ab791SAndreas Gruenbacher 			inc_rs_pending(device);
1208db830c46SAndreas Gruenbacher 			peer_req->block_id = ID_SYNCER; /* By setting block_id, digest pointer becomes invalid! */
1209db830c46SAndreas Gruenbacher 			peer_req->flags &= ~EE_HAS_DIGEST; /* This peer request no longer has a digest pointer */
1210204bba99SPhilipp Reisner 			kfree(di);
12116780139cSAndreas Gruenbacher 			err = drbd_send_block(peer_device, P_RS_DATA_REPLY, peer_req);
1212b411b363SPhilipp Reisner 		}
1213b411b363SPhilipp Reisner 	} else {
12146780139cSAndreas Gruenbacher 		err = drbd_send_ack(peer_device, P_NEG_RS_DREPLY, peer_req);
1215e3fa02d7SChristoph Böhmwalder 		if (drbd_ratelimit())
1216d0180171SAndreas Gruenbacher 			drbd_err(device, "Sending NegDReply. I guess it gets messy.\n");
1217b411b363SPhilipp Reisner 	}
1218b411b363SPhilipp Reisner 
1219b30ab791SAndreas Gruenbacher 	dec_unacked(device);
1220b30ab791SAndreas Gruenbacher 	move_to_net_ee_or_free(device, peer_req);
1221b411b363SPhilipp Reisner 
122299920dc5SAndreas Gruenbacher 	if (unlikely(err))
1223d0180171SAndreas Gruenbacher 		drbd_err(device, "drbd_send_block/ack() failed\n");
122499920dc5SAndreas Gruenbacher 	return err;
1225b411b363SPhilipp Reisner }
1226b411b363SPhilipp Reisner 
122799920dc5SAndreas Gruenbacher int w_e_end_ov_req(struct drbd_work *w, int cancel)
1228b411b363SPhilipp Reisner {
1229a8cd15baSAndreas Gruenbacher 	struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
12306780139cSAndreas Gruenbacher 	struct drbd_peer_device *peer_device = peer_req->peer_device;
12316780139cSAndreas Gruenbacher 	struct drbd_device *device = peer_device->device;
1232db830c46SAndreas Gruenbacher 	sector_t sector = peer_req->i.sector;
1233db830c46SAndreas Gruenbacher 	unsigned int size = peer_req->i.size;
1234b411b363SPhilipp Reisner 	int digest_size;
1235b411b363SPhilipp Reisner 	void *digest;
123699920dc5SAndreas Gruenbacher 	int err = 0;
1237b411b363SPhilipp Reisner 
1238b411b363SPhilipp Reisner 	if (unlikely(cancel))
1239b411b363SPhilipp Reisner 		goto out;
1240b411b363SPhilipp Reisner 
12413d0e6375SKees Cook 	digest_size = crypto_shash_digestsize(peer_device->connection->verify_tfm);
1242b411b363SPhilipp Reisner 	digest = kmalloc(digest_size, GFP_NOIO);
12438f21420eSPhilipp Reisner 	if (!digest) {
124499920dc5SAndreas Gruenbacher 		err = 1;	/* terminate the connection in case the allocation failed */
12458f21420eSPhilipp Reisner 		goto out;
12468f21420eSPhilipp Reisner 	}
12478f21420eSPhilipp Reisner 
1248db830c46SAndreas Gruenbacher 	if (likely(!(peer_req->flags & EE_WAS_ERROR)))
12496780139cSAndreas Gruenbacher 		drbd_csum_ee(peer_device->connection->verify_tfm, peer_req, digest);
12508f21420eSPhilipp Reisner 	else
12518f21420eSPhilipp Reisner 		memset(digest, 0, digest_size);
12528f21420eSPhilipp Reisner 
125353ea4331SLars Ellenberg 	/* Free e and pages before send.
125453ea4331SLars Ellenberg 	 * In case we block on congestion, we could otherwise run into
125553ea4331SLars Ellenberg 	 * some distributed deadlock, if the other side blocks on
125653ea4331SLars Ellenberg 	 * congestion as well, because our receiver blocks in
1257c37c8ecfSAndreas Gruenbacher 	 * drbd_alloc_pages due to pp_in_use > max_buffers. */
1258b30ab791SAndreas Gruenbacher 	drbd_free_peer_req(device, peer_req);
1259db830c46SAndreas Gruenbacher 	peer_req = NULL;
1260b30ab791SAndreas Gruenbacher 	inc_rs_pending(device);
12616780139cSAndreas Gruenbacher 	err = drbd_send_drequest_csum(peer_device, sector, size, digest, digest_size, P_OV_REPLY);
126299920dc5SAndreas Gruenbacher 	if (err)
1263b30ab791SAndreas Gruenbacher 		dec_rs_pending(device);
1264b411b363SPhilipp Reisner 	kfree(digest);
1265b411b363SPhilipp Reisner 
1266b411b363SPhilipp Reisner out:
1267db830c46SAndreas Gruenbacher 	if (peer_req)
1268b30ab791SAndreas Gruenbacher 		drbd_free_peer_req(device, peer_req);
1269b30ab791SAndreas Gruenbacher 	dec_unacked(device);
127099920dc5SAndreas Gruenbacher 	return err;
1271b411b363SPhilipp Reisner }
1272b411b363SPhilipp Reisner 
1273b30ab791SAndreas Gruenbacher void drbd_ov_out_of_sync_found(struct drbd_device *device, sector_t sector, int size)
1274b411b363SPhilipp Reisner {
1275b30ab791SAndreas Gruenbacher 	if (device->ov_last_oos_start + device->ov_last_oos_size == sector) {
1276b30ab791SAndreas Gruenbacher 		device->ov_last_oos_size += size>>9;
1277b411b363SPhilipp Reisner 	} else {
1278b30ab791SAndreas Gruenbacher 		device->ov_last_oos_start = sector;
1279b30ab791SAndreas Gruenbacher 		device->ov_last_oos_size = size>>9;
1280b411b363SPhilipp Reisner 	}
1281b30ab791SAndreas Gruenbacher 	drbd_set_out_of_sync(device, sector, size);
1282b411b363SPhilipp Reisner }
1283b411b363SPhilipp Reisner 
128499920dc5SAndreas Gruenbacher int w_e_end_ov_reply(struct drbd_work *w, int cancel)
1285b411b363SPhilipp Reisner {
1286a8cd15baSAndreas Gruenbacher 	struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
12876780139cSAndreas Gruenbacher 	struct drbd_peer_device *peer_device = peer_req->peer_device;
12886780139cSAndreas Gruenbacher 	struct drbd_device *device = peer_device->device;
1289b411b363SPhilipp Reisner 	struct digest_info *di;
1290b411b363SPhilipp Reisner 	void *digest;
1291db830c46SAndreas Gruenbacher 	sector_t sector = peer_req->i.sector;
1292db830c46SAndreas Gruenbacher 	unsigned int size = peer_req->i.size;
129353ea4331SLars Ellenberg 	int digest_size;
129499920dc5SAndreas Gruenbacher 	int err, eq = 0;
129558ffa580SLars Ellenberg 	bool stop_sector_reached = false;
1296b411b363SPhilipp Reisner 
1297b411b363SPhilipp Reisner 	if (unlikely(cancel)) {
1298b30ab791SAndreas Gruenbacher 		drbd_free_peer_req(device, peer_req);
1299b30ab791SAndreas Gruenbacher 		dec_unacked(device);
130099920dc5SAndreas Gruenbacher 		return 0;
1301b411b363SPhilipp Reisner 	}
1302b411b363SPhilipp Reisner 
1303b411b363SPhilipp Reisner 	/* after "cancel", because after drbd_disconnect/drbd_rs_cancel_all
1304b411b363SPhilipp Reisner 	 * the resync lru has been cleaned up already */
1305b30ab791SAndreas Gruenbacher 	if (get_ldev(device)) {
1306b30ab791SAndreas Gruenbacher 		drbd_rs_complete_io(device, peer_req->i.sector);
1307b30ab791SAndreas Gruenbacher 		put_ldev(device);
13081d53f09eSLars Ellenberg 	}
1309b411b363SPhilipp Reisner 
1310db830c46SAndreas Gruenbacher 	di = peer_req->digest;
1311b411b363SPhilipp Reisner 
1312db830c46SAndreas Gruenbacher 	if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
13133d0e6375SKees Cook 		digest_size = crypto_shash_digestsize(peer_device->connection->verify_tfm);
1314b411b363SPhilipp Reisner 		digest = kmalloc(digest_size, GFP_NOIO);
1315b411b363SPhilipp Reisner 		if (digest) {
13166780139cSAndreas Gruenbacher 			drbd_csum_ee(peer_device->connection->verify_tfm, peer_req, digest);
1317b411b363SPhilipp Reisner 
13180b0ba1efSAndreas Gruenbacher 			D_ASSERT(device, digest_size == di->digest_size);
1319b411b363SPhilipp Reisner 			eq = !memcmp(digest, di->digest, digest_size);
1320b411b363SPhilipp Reisner 			kfree(digest);
1321b411b363SPhilipp Reisner 		}
1322b411b363SPhilipp Reisner 	}
1323b411b363SPhilipp Reisner 
13249676c760SLars Ellenberg 	/* Free peer_req and pages before send.
132553ea4331SLars Ellenberg 	 * In case we block on congestion, we could otherwise run into
132653ea4331SLars Ellenberg 	 * some distributed deadlock, if the other side blocks on
132753ea4331SLars Ellenberg 	 * congestion as well, because our receiver blocks in
1328c37c8ecfSAndreas Gruenbacher 	 * drbd_alloc_pages due to pp_in_use > max_buffers. */
1329b30ab791SAndreas Gruenbacher 	drbd_free_peer_req(device, peer_req);
1330b411b363SPhilipp Reisner 	if (!eq)
1331b30ab791SAndreas Gruenbacher 		drbd_ov_out_of_sync_found(device, sector, size);
1332b411b363SPhilipp Reisner 	else
1333b30ab791SAndreas Gruenbacher 		ov_out_of_sync_print(device);
1334b411b363SPhilipp Reisner 
13356780139cSAndreas Gruenbacher 	err = drbd_send_ack_ex(peer_device, P_OV_RESULT, sector, size,
1336b411b363SPhilipp Reisner 			       eq ? ID_IN_SYNC : ID_OUT_OF_SYNC);
1337b411b363SPhilipp Reisner 
1338b30ab791SAndreas Gruenbacher 	dec_unacked(device);
1339b411b363SPhilipp Reisner 
1340b30ab791SAndreas Gruenbacher 	--device->ov_left;
1341ea5442afSLars Ellenberg 
1342ea5442afSLars Ellenberg 	/* let's advance progress step marks only for every other megabyte */
1343b30ab791SAndreas Gruenbacher 	if ((device->ov_left & 0x200) == 0x200)
1344b30ab791SAndreas Gruenbacher 		drbd_advance_rs_marks(device, device->ov_left);
1345ea5442afSLars Ellenberg 
1346b30ab791SAndreas Gruenbacher 	stop_sector_reached = verify_can_do_stop_sector(device) &&
1347b30ab791SAndreas Gruenbacher 		(sector + (size>>9)) >= device->ov_stop_sector;
134858ffa580SLars Ellenberg 
1349b30ab791SAndreas Gruenbacher 	if (device->ov_left == 0 || stop_sector_reached) {
1350b30ab791SAndreas Gruenbacher 		ov_out_of_sync_print(device);
1351b30ab791SAndreas Gruenbacher 		drbd_resync_finished(device);
1352b411b363SPhilipp Reisner 	}
1353b411b363SPhilipp Reisner 
135499920dc5SAndreas Gruenbacher 	return err;
1355b411b363SPhilipp Reisner }
1356b411b363SPhilipp Reisner 
1357b6dd1a89SLars Ellenberg /* FIXME
1358b6dd1a89SLars Ellenberg  * We need to track the number of pending barrier acks,
1359b6dd1a89SLars Ellenberg  * and to be able to wait for them.
1360b6dd1a89SLars Ellenberg  * See also comment in drbd_adm_attach before drbd_suspend_io.
1361b6dd1a89SLars Ellenberg  */
1362bde89a9eSAndreas Gruenbacher static int drbd_send_barrier(struct drbd_connection *connection)
1363b411b363SPhilipp Reisner {
13649f5bdc33SAndreas Gruenbacher 	struct p_barrier *p;
1365b6dd1a89SLars Ellenberg 	struct drbd_socket *sock;
1366b411b363SPhilipp Reisner 
1367bde89a9eSAndreas Gruenbacher 	sock = &connection->data;
1368bde89a9eSAndreas Gruenbacher 	p = conn_prepare_command(connection, sock);
13699f5bdc33SAndreas Gruenbacher 	if (!p)
13709f5bdc33SAndreas Gruenbacher 		return -EIO;
1371bde89a9eSAndreas Gruenbacher 	p->barrier = connection->send.current_epoch_nr;
1372b6dd1a89SLars Ellenberg 	p->pad = 0;
1373bde89a9eSAndreas Gruenbacher 	connection->send.current_epoch_writes = 0;
137484d34f2fSLars Ellenberg 	connection->send.last_sent_barrier_jif = jiffies;
1375b6dd1a89SLars Ellenberg 
1376bde89a9eSAndreas Gruenbacher 	return conn_send_command(connection, sock, P_BARRIER, sizeof(*p), NULL, 0);
1377b411b363SPhilipp Reisner }
1378b411b363SPhilipp Reisner 
1379c51a0ef3SLars Ellenberg static int pd_send_unplug_remote(struct drbd_peer_device *pd)
1380c51a0ef3SLars Ellenberg {
1381c51a0ef3SLars Ellenberg 	struct drbd_socket *sock = &pd->connection->data;
1382c51a0ef3SLars Ellenberg 	if (!drbd_prepare_command(pd, sock))
1383c51a0ef3SLars Ellenberg 		return -EIO;
1384c51a0ef3SLars Ellenberg 	return drbd_send_command(pd, sock, P_UNPLUG_REMOTE, 0, NULL, 0);
1385c51a0ef3SLars Ellenberg }
1386c51a0ef3SLars Ellenberg 
138799920dc5SAndreas Gruenbacher int w_send_write_hint(struct drbd_work *w, int cancel)
1388b411b363SPhilipp Reisner {
138984b8c06bSAndreas Gruenbacher 	struct drbd_device *device =
139084b8c06bSAndreas Gruenbacher 		container_of(w, struct drbd_device, unplug_work);
13919f5bdc33SAndreas Gruenbacher 
1392b411b363SPhilipp Reisner 	if (cancel)
139399920dc5SAndreas Gruenbacher 		return 0;
1394c51a0ef3SLars Ellenberg 	return pd_send_unplug_remote(first_peer_device(device));
1395b411b363SPhilipp Reisner }
1396b411b363SPhilipp Reisner 
1397bde89a9eSAndreas Gruenbacher static void re_init_if_first_write(struct drbd_connection *connection, unsigned int epoch)
13984eb9b3cbSLars Ellenberg {
1399bde89a9eSAndreas Gruenbacher 	if (!connection->send.seen_any_write_yet) {
1400bde89a9eSAndreas Gruenbacher 		connection->send.seen_any_write_yet = true;
1401bde89a9eSAndreas Gruenbacher 		connection->send.current_epoch_nr = epoch;
1402bde89a9eSAndreas Gruenbacher 		connection->send.current_epoch_writes = 0;
140384d34f2fSLars Ellenberg 		connection->send.last_sent_barrier_jif = jiffies;
14044eb9b3cbSLars Ellenberg 	}
14054eb9b3cbSLars Ellenberg }
14064eb9b3cbSLars Ellenberg 
1407bde89a9eSAndreas Gruenbacher static void maybe_send_barrier(struct drbd_connection *connection, unsigned int epoch)
14084eb9b3cbSLars Ellenberg {
14094eb9b3cbSLars Ellenberg 	/* re-init if first write on this connection */
1410bde89a9eSAndreas Gruenbacher 	if (!connection->send.seen_any_write_yet)
14114eb9b3cbSLars Ellenberg 		return;
1412bde89a9eSAndreas Gruenbacher 	if (connection->send.current_epoch_nr != epoch) {
1413bde89a9eSAndreas Gruenbacher 		if (connection->send.current_epoch_writes)
1414bde89a9eSAndreas Gruenbacher 			drbd_send_barrier(connection);
1415bde89a9eSAndreas Gruenbacher 		connection->send.current_epoch_nr = epoch;
14164eb9b3cbSLars Ellenberg 	}
14174eb9b3cbSLars Ellenberg }
14184eb9b3cbSLars Ellenberg 
14198f7bed77SAndreas Gruenbacher int w_send_out_of_sync(struct drbd_work *w, int cancel)
142073a01a18SPhilipp Reisner {
142173a01a18SPhilipp Reisner 	struct drbd_request *req = container_of(w, struct drbd_request, w);
142284b8c06bSAndreas Gruenbacher 	struct drbd_device *device = req->device;
142344a4d551SLars Ellenberg 	struct drbd_peer_device *const peer_device = first_peer_device(device);
142444a4d551SLars Ellenberg 	struct drbd_connection *const connection = peer_device->connection;
142599920dc5SAndreas Gruenbacher 	int err;
142673a01a18SPhilipp Reisner 
142773a01a18SPhilipp Reisner 	if (unlikely(cancel)) {
14288554df1cSAndreas Gruenbacher 		req_mod(req, SEND_CANCELED);
142999920dc5SAndreas Gruenbacher 		return 0;
143073a01a18SPhilipp Reisner 	}
1431e5f891b2SLars Ellenberg 	req->pre_send_jif = jiffies;
143273a01a18SPhilipp Reisner 
1433bde89a9eSAndreas Gruenbacher 	/* this time, no connection->send.current_epoch_writes++;
1434b6dd1a89SLars Ellenberg 	 * If it was sent, it was the closing barrier for the last
1435b6dd1a89SLars Ellenberg 	 * replicated epoch, before we went into AHEAD mode.
1436b6dd1a89SLars Ellenberg 	 * No more barriers will be sent, until we leave AHEAD mode again. */
1437bde89a9eSAndreas Gruenbacher 	maybe_send_barrier(connection, req->epoch);
1438b6dd1a89SLars Ellenberg 
143944a4d551SLars Ellenberg 	err = drbd_send_out_of_sync(peer_device, req);
14408554df1cSAndreas Gruenbacher 	req_mod(req, OOS_HANDED_TO_NETWORK);
144173a01a18SPhilipp Reisner 
144299920dc5SAndreas Gruenbacher 	return err;
144373a01a18SPhilipp Reisner }
144473a01a18SPhilipp Reisner 
1445b411b363SPhilipp Reisner /**
1446b411b363SPhilipp Reisner  * w_send_dblock() - Worker callback to send a P_DATA packet in order to mirror a write request
1447b411b363SPhilipp Reisner  * @w:		work object.
1448b411b363SPhilipp Reisner  * @cancel:	The connection will be closed anyways
1449b411b363SPhilipp Reisner  */
145099920dc5SAndreas Gruenbacher int w_send_dblock(struct drbd_work *w, int cancel)
1451b411b363SPhilipp Reisner {
1452b411b363SPhilipp Reisner 	struct drbd_request *req = container_of(w, struct drbd_request, w);
145384b8c06bSAndreas Gruenbacher 	struct drbd_device *device = req->device;
145444a4d551SLars Ellenberg 	struct drbd_peer_device *const peer_device = first_peer_device(device);
145544a4d551SLars Ellenberg 	struct drbd_connection *connection = peer_device->connection;
1456c51a0ef3SLars Ellenberg 	bool do_send_unplug = req->rq_state & RQ_UNPLUG;
145799920dc5SAndreas Gruenbacher 	int err;
1458b411b363SPhilipp Reisner 
1459b411b363SPhilipp Reisner 	if (unlikely(cancel)) {
14608554df1cSAndreas Gruenbacher 		req_mod(req, SEND_CANCELED);
146199920dc5SAndreas Gruenbacher 		return 0;
1462b411b363SPhilipp Reisner 	}
1463e5f891b2SLars Ellenberg 	req->pre_send_jif = jiffies;
1464b411b363SPhilipp Reisner 
1465bde89a9eSAndreas Gruenbacher 	re_init_if_first_write(connection, req->epoch);
1466bde89a9eSAndreas Gruenbacher 	maybe_send_barrier(connection, req->epoch);
1467bde89a9eSAndreas Gruenbacher 	connection->send.current_epoch_writes++;
1468b6dd1a89SLars Ellenberg 
146944a4d551SLars Ellenberg 	err = drbd_send_dblock(peer_device, req);
147099920dc5SAndreas Gruenbacher 	req_mod(req, err ? SEND_FAILED : HANDED_OVER_TO_NETWORK);
1471b411b363SPhilipp Reisner 
1472c51a0ef3SLars Ellenberg 	if (do_send_unplug && !err)
1473c51a0ef3SLars Ellenberg 		pd_send_unplug_remote(peer_device);
1474c51a0ef3SLars Ellenberg 
147599920dc5SAndreas Gruenbacher 	return err;
1476b411b363SPhilipp Reisner }
1477b411b363SPhilipp Reisner 
1478b411b363SPhilipp Reisner /**
1479b411b363SPhilipp Reisner  * w_send_read_req() - Worker callback to send a read request (P_DATA_REQUEST) packet
1480b411b363SPhilipp Reisner  * @w:		work object.
1481b411b363SPhilipp Reisner  * @cancel:	The connection will be closed anyways
1482b411b363SPhilipp Reisner  */
148399920dc5SAndreas Gruenbacher int w_send_read_req(struct drbd_work *w, int cancel)
1484b411b363SPhilipp Reisner {
1485b411b363SPhilipp Reisner 	struct drbd_request *req = container_of(w, struct drbd_request, w);
148684b8c06bSAndreas Gruenbacher 	struct drbd_device *device = req->device;
148744a4d551SLars Ellenberg 	struct drbd_peer_device *const peer_device = first_peer_device(device);
148844a4d551SLars Ellenberg 	struct drbd_connection *connection = peer_device->connection;
1489c51a0ef3SLars Ellenberg 	bool do_send_unplug = req->rq_state & RQ_UNPLUG;
149099920dc5SAndreas Gruenbacher 	int err;
1491b411b363SPhilipp Reisner 
1492b411b363SPhilipp Reisner 	if (unlikely(cancel)) {
14938554df1cSAndreas Gruenbacher 		req_mod(req, SEND_CANCELED);
149499920dc5SAndreas Gruenbacher 		return 0;
1495b411b363SPhilipp Reisner 	}
1496e5f891b2SLars Ellenberg 	req->pre_send_jif = jiffies;
1497b411b363SPhilipp Reisner 
1498b6dd1a89SLars Ellenberg 	/* Even read requests may close a write epoch,
1499b6dd1a89SLars Ellenberg 	 * if there was any yet. */
1500bde89a9eSAndreas Gruenbacher 	maybe_send_barrier(connection, req->epoch);
1501b6dd1a89SLars Ellenberg 
150244a4d551SLars Ellenberg 	err = drbd_send_drequest(peer_device, P_DATA_REQUEST, req->i.sector, req->i.size,
1503b411b363SPhilipp Reisner 				 (unsigned long)req);
1504b411b363SPhilipp Reisner 
150599920dc5SAndreas Gruenbacher 	req_mod(req, err ? SEND_FAILED : HANDED_OVER_TO_NETWORK);
1506b411b363SPhilipp Reisner 
1507c51a0ef3SLars Ellenberg 	if (do_send_unplug && !err)
1508c51a0ef3SLars Ellenberg 		pd_send_unplug_remote(peer_device);
1509c51a0ef3SLars Ellenberg 
151099920dc5SAndreas Gruenbacher 	return err;
1511b411b363SPhilipp Reisner }
1512b411b363SPhilipp Reisner 
151399920dc5SAndreas Gruenbacher int w_restart_disk_io(struct drbd_work *w, int cancel)
1514265be2d0SPhilipp Reisner {
1515265be2d0SPhilipp Reisner 	struct drbd_request *req = container_of(w, struct drbd_request, w);
151684b8c06bSAndreas Gruenbacher 	struct drbd_device *device = req->device;
1517265be2d0SPhilipp Reisner 
15180778286aSPhilipp Reisner 	if (bio_data_dir(req->master_bio) == WRITE && req->rq_state & RQ_IN_ACT_LOG)
15194dd726f0SLars Ellenberg 		drbd_al_begin_io(device, &req->i);
1520265be2d0SPhilipp Reisner 
1521abfc426dSChristoph Hellwig 	req->private_bio = bio_alloc_clone(device->ldev->backing_bdev,
1522abfc426dSChristoph Hellwig 					   req->master_bio, GFP_NOIO,
1523ae7153f1SChristoph Hellwig 					  &drbd_io_bio_set);
1524ae7153f1SChristoph Hellwig 	req->private_bio->bi_private = req;
1525ae7153f1SChristoph Hellwig 	req->private_bio->bi_end_io = drbd_request_endio;
1526ed00aabdSChristoph Hellwig 	submit_bio_noacct(req->private_bio);
1527265be2d0SPhilipp Reisner 
152899920dc5SAndreas Gruenbacher 	return 0;
1529265be2d0SPhilipp Reisner }
1530265be2d0SPhilipp Reisner 
1531b30ab791SAndreas Gruenbacher static int _drbd_may_sync_now(struct drbd_device *device)
1532b411b363SPhilipp Reisner {
1533b30ab791SAndreas Gruenbacher 	struct drbd_device *odev = device;
153495f8efd0SAndreas Gruenbacher 	int resync_after;
1535b411b363SPhilipp Reisner 
1536b411b363SPhilipp Reisner 	while (1) {
1537a3f8f7dcSLars Ellenberg 		if (!odev->ldev || odev->state.disk == D_DISKLESS)
1538438c8374SPhilipp Reisner 			return 1;
1539daeda1ccSPhilipp Reisner 		rcu_read_lock();
154095f8efd0SAndreas Gruenbacher 		resync_after = rcu_dereference(odev->ldev->disk_conf)->resync_after;
1541daeda1ccSPhilipp Reisner 		rcu_read_unlock();
154295f8efd0SAndreas Gruenbacher 		if (resync_after == -1)
1543b411b363SPhilipp Reisner 			return 1;
1544b30ab791SAndreas Gruenbacher 		odev = minor_to_device(resync_after);
1545a3f8f7dcSLars Ellenberg 		if (!odev)
1546841ce241SAndreas Gruenbacher 			return 1;
1547b411b363SPhilipp Reisner 		if ((odev->state.conn >= C_SYNC_SOURCE &&
1548b411b363SPhilipp Reisner 		     odev->state.conn <= C_PAUSED_SYNC_T) ||
1549b411b363SPhilipp Reisner 		    odev->state.aftr_isp || odev->state.peer_isp ||
1550b411b363SPhilipp Reisner 		    odev->state.user_isp)
1551b411b363SPhilipp Reisner 			return 0;
1552b411b363SPhilipp Reisner 	}
1553b411b363SPhilipp Reisner }
1554b411b363SPhilipp Reisner 
1555b411b363SPhilipp Reisner /**
155628bc3b8cSAndreas Gruenbacher  * drbd_pause_after() - Pause resync on all devices that may not resync now
1557b30ab791SAndreas Gruenbacher  * @device:	DRBD device.
1558b411b363SPhilipp Reisner  *
1559b411b363SPhilipp Reisner  * Called from process context only (admin command and after_state_ch).
1560b411b363SPhilipp Reisner  */
156128bc3b8cSAndreas Gruenbacher static bool drbd_pause_after(struct drbd_device *device)
1562b411b363SPhilipp Reisner {
156328bc3b8cSAndreas Gruenbacher 	bool changed = false;
156454761697SAndreas Gruenbacher 	struct drbd_device *odev;
156528bc3b8cSAndreas Gruenbacher 	int i;
1566b411b363SPhilipp Reisner 
1567695d08faSPhilipp Reisner 	rcu_read_lock();
156805a10ec7SAndreas Gruenbacher 	idr_for_each_entry(&drbd_devices, odev, i) {
1569b411b363SPhilipp Reisner 		if (odev->state.conn == C_STANDALONE && odev->state.disk == D_DISKLESS)
1570b411b363SPhilipp Reisner 			continue;
157128bc3b8cSAndreas Gruenbacher 		if (!_drbd_may_sync_now(odev) &&
157228bc3b8cSAndreas Gruenbacher 		    _drbd_set_state(_NS(odev, aftr_isp, 1),
157328bc3b8cSAndreas Gruenbacher 				    CS_HARD, NULL) != SS_NOTHING_TO_DO)
157428bc3b8cSAndreas Gruenbacher 			changed = true;
1575b411b363SPhilipp Reisner 	}
1576695d08faSPhilipp Reisner 	rcu_read_unlock();
1577b411b363SPhilipp Reisner 
157828bc3b8cSAndreas Gruenbacher 	return changed;
1579b411b363SPhilipp Reisner }
1580b411b363SPhilipp Reisner 
1581b411b363SPhilipp Reisner /**
158228bc3b8cSAndreas Gruenbacher  * drbd_resume_next() - Resume resync on all devices that may resync now
1583b30ab791SAndreas Gruenbacher  * @device:	DRBD device.
1584b411b363SPhilipp Reisner  *
1585b411b363SPhilipp Reisner  * Called from process context only (admin command and worker).
1586b411b363SPhilipp Reisner  */
158728bc3b8cSAndreas Gruenbacher static bool drbd_resume_next(struct drbd_device *device)
1588b411b363SPhilipp Reisner {
158928bc3b8cSAndreas Gruenbacher 	bool changed = false;
159054761697SAndreas Gruenbacher 	struct drbd_device *odev;
159128bc3b8cSAndreas Gruenbacher 	int i;
1592b411b363SPhilipp Reisner 
1593695d08faSPhilipp Reisner 	rcu_read_lock();
159405a10ec7SAndreas Gruenbacher 	idr_for_each_entry(&drbd_devices, odev, i) {
1595b411b363SPhilipp Reisner 		if (odev->state.conn == C_STANDALONE && odev->state.disk == D_DISKLESS)
1596b411b363SPhilipp Reisner 			continue;
1597b411b363SPhilipp Reisner 		if (odev->state.aftr_isp) {
159828bc3b8cSAndreas Gruenbacher 			if (_drbd_may_sync_now(odev) &&
159928bc3b8cSAndreas Gruenbacher 			    _drbd_set_state(_NS(odev, aftr_isp, 0),
160028bc3b8cSAndreas Gruenbacher 					    CS_HARD, NULL) != SS_NOTHING_TO_DO)
160128bc3b8cSAndreas Gruenbacher 				changed = true;
1602b411b363SPhilipp Reisner 		}
1603b411b363SPhilipp Reisner 	}
1604695d08faSPhilipp Reisner 	rcu_read_unlock();
160528bc3b8cSAndreas Gruenbacher 	return changed;
1606b411b363SPhilipp Reisner }
1607b411b363SPhilipp Reisner 
1608b30ab791SAndreas Gruenbacher void resume_next_sg(struct drbd_device *device)
1609b411b363SPhilipp Reisner {
161028bc3b8cSAndreas Gruenbacher 	lock_all_resources();
161128bc3b8cSAndreas Gruenbacher 	drbd_resume_next(device);
161228bc3b8cSAndreas Gruenbacher 	unlock_all_resources();
1613b411b363SPhilipp Reisner }
1614b411b363SPhilipp Reisner 
1615b30ab791SAndreas Gruenbacher void suspend_other_sg(struct drbd_device *device)
1616b411b363SPhilipp Reisner {
161728bc3b8cSAndreas Gruenbacher 	lock_all_resources();
161828bc3b8cSAndreas Gruenbacher 	drbd_pause_after(device);
161928bc3b8cSAndreas Gruenbacher 	unlock_all_resources();
1620b411b363SPhilipp Reisner }
1621b411b363SPhilipp Reisner 
162228bc3b8cSAndreas Gruenbacher /* caller must lock_all_resources() */
1623b30ab791SAndreas Gruenbacher enum drbd_ret_code drbd_resync_after_valid(struct drbd_device *device, int o_minor)
1624b411b363SPhilipp Reisner {
162554761697SAndreas Gruenbacher 	struct drbd_device *odev;
162695f8efd0SAndreas Gruenbacher 	int resync_after;
1627b411b363SPhilipp Reisner 
1628b411b363SPhilipp Reisner 	if (o_minor == -1)
1629b411b363SPhilipp Reisner 		return NO_ERROR;
1630a3f8f7dcSLars Ellenberg 	if (o_minor < -1 || o_minor > MINORMASK)
163195f8efd0SAndreas Gruenbacher 		return ERR_RESYNC_AFTER;
1632b411b363SPhilipp Reisner 
1633b411b363SPhilipp Reisner 	/* check for loops */
1634b30ab791SAndreas Gruenbacher 	odev = minor_to_device(o_minor);
1635b411b363SPhilipp Reisner 	while (1) {
1636b30ab791SAndreas Gruenbacher 		if (odev == device)
163795f8efd0SAndreas Gruenbacher 			return ERR_RESYNC_AFTER_CYCLE;
1638b411b363SPhilipp Reisner 
1639a3f8f7dcSLars Ellenberg 		/* You are free to depend on diskless, non-existing,
1640a3f8f7dcSLars Ellenberg 		 * or not yet/no longer existing minors.
1641a3f8f7dcSLars Ellenberg 		 * We only reject dependency loops.
1642a3f8f7dcSLars Ellenberg 		 * We cannot follow the dependency chain beyond a detached or
1643a3f8f7dcSLars Ellenberg 		 * missing minor.
1644a3f8f7dcSLars Ellenberg 		 */
1645a3f8f7dcSLars Ellenberg 		if (!odev || !odev->ldev || odev->state.disk == D_DISKLESS)
1646a3f8f7dcSLars Ellenberg 			return NO_ERROR;
1647a3f8f7dcSLars Ellenberg 
1648daeda1ccSPhilipp Reisner 		rcu_read_lock();
164995f8efd0SAndreas Gruenbacher 		resync_after = rcu_dereference(odev->ldev->disk_conf)->resync_after;
1650daeda1ccSPhilipp Reisner 		rcu_read_unlock();
1651b411b363SPhilipp Reisner 		/* dependency chain ends here, no cycles. */
165295f8efd0SAndreas Gruenbacher 		if (resync_after == -1)
1653b411b363SPhilipp Reisner 			return NO_ERROR;
1654b411b363SPhilipp Reisner 
1655b411b363SPhilipp Reisner 		/* follow the dependency chain */
1656b30ab791SAndreas Gruenbacher 		odev = minor_to_device(resync_after);
1657b411b363SPhilipp Reisner 	}
1658b411b363SPhilipp Reisner }
1659b411b363SPhilipp Reisner 
166028bc3b8cSAndreas Gruenbacher /* caller must lock_all_resources() */
1661b30ab791SAndreas Gruenbacher void drbd_resync_after_changed(struct drbd_device *device)
1662b411b363SPhilipp Reisner {
166328bc3b8cSAndreas Gruenbacher 	int changed;
1664b411b363SPhilipp Reisner 
1665b411b363SPhilipp Reisner 	do {
166628bc3b8cSAndreas Gruenbacher 		changed  = drbd_pause_after(device);
166728bc3b8cSAndreas Gruenbacher 		changed |= drbd_resume_next(device);
166828bc3b8cSAndreas Gruenbacher 	} while (changed);
1669b411b363SPhilipp Reisner }
1670b411b363SPhilipp Reisner 
1671b30ab791SAndreas Gruenbacher void drbd_rs_controller_reset(struct drbd_device *device)
16729bd28d3cSLars Ellenberg {
16738c40c7c4SChristoph Hellwig 	struct gendisk *disk = device->ldev->backing_bdev->bd_disk;
1674813472ceSPhilipp Reisner 	struct fifo_buffer *plan;
1675813472ceSPhilipp Reisner 
1676b30ab791SAndreas Gruenbacher 	atomic_set(&device->rs_sect_in, 0);
1677b30ab791SAndreas Gruenbacher 	atomic_set(&device->rs_sect_ev, 0);
1678b30ab791SAndreas Gruenbacher 	device->rs_in_flight = 0;
1679cb8432d6SChristoph Hellwig 	device->rs_last_events =
16808446fe92SChristoph Hellwig 		(int)part_stat_read_accum(disk->part0, sectors);
1681813472ceSPhilipp Reisner 
1682813472ceSPhilipp Reisner 	/* Updating the RCU protected object in place is necessary since
1683813472ceSPhilipp Reisner 	   this function gets called from atomic context.
1684813472ceSPhilipp Reisner 	   It is valid since all other updates also lead to an completely
1685813472ceSPhilipp Reisner 	   empty fifo */
1686813472ceSPhilipp Reisner 	rcu_read_lock();
1687b30ab791SAndreas Gruenbacher 	plan = rcu_dereference(device->rs_plan_s);
1688813472ceSPhilipp Reisner 	plan->total = 0;
1689813472ceSPhilipp Reisner 	fifo_set(plan, 0);
1690813472ceSPhilipp Reisner 	rcu_read_unlock();
16919bd28d3cSLars Ellenberg }
16929bd28d3cSLars Ellenberg 
16932bccef39SKees Cook void start_resync_timer_fn(struct timer_list *t)
16941f04af33SPhilipp Reisner {
16952bccef39SKees Cook 	struct drbd_device *device = from_timer(device, t, start_resync_timer);
1696ac0acb9eSLars Ellenberg 	drbd_device_post_work(device, RS_START);
16971f04af33SPhilipp Reisner }
16981f04af33SPhilipp Reisner 
1699ac0acb9eSLars Ellenberg static void do_start_resync(struct drbd_device *device)
17001f04af33SPhilipp Reisner {
1701b30ab791SAndreas Gruenbacher 	if (atomic_read(&device->unacked_cnt) || atomic_read(&device->rs_pending_cnt)) {
1702ac0acb9eSLars Ellenberg 		drbd_warn(device, "postponing start_resync ...\n");
1703b30ab791SAndreas Gruenbacher 		device->start_resync_timer.expires = jiffies + HZ/10;
1704b30ab791SAndreas Gruenbacher 		add_timer(&device->start_resync_timer);
1705ac0acb9eSLars Ellenberg 		return;
17061f04af33SPhilipp Reisner 	}
17071f04af33SPhilipp Reisner 
1708b30ab791SAndreas Gruenbacher 	drbd_start_resync(device, C_SYNC_SOURCE);
1709b30ab791SAndreas Gruenbacher 	clear_bit(AHEAD_TO_SYNC_SOURCE, &device->flags);
17101f04af33SPhilipp Reisner }
17111f04af33SPhilipp Reisner 
1712aaaba345SLars Ellenberg static bool use_checksum_based_resync(struct drbd_connection *connection, struct drbd_device *device)
1713aaaba345SLars Ellenberg {
1714aaaba345SLars Ellenberg 	bool csums_after_crash_only;
1715aaaba345SLars Ellenberg 	rcu_read_lock();
1716aaaba345SLars Ellenberg 	csums_after_crash_only = rcu_dereference(connection->net_conf)->csums_after_crash_only;
1717aaaba345SLars Ellenberg 	rcu_read_unlock();
1718aaaba345SLars Ellenberg 	return connection->agreed_pro_version >= 89 &&		/* supported? */
1719aaaba345SLars Ellenberg 		connection->csums_tfm &&			/* configured? */
17207e5fec31SFabian Frederick 		(csums_after_crash_only == false		/* use for each resync? */
1721aaaba345SLars Ellenberg 		 || test_bit(CRASHED_PRIMARY, &device->flags));	/* or only after Primary crash? */
1722aaaba345SLars Ellenberg }
1723aaaba345SLars Ellenberg 
1724b411b363SPhilipp Reisner /**
1725b411b363SPhilipp Reisner  * drbd_start_resync() - Start the resync process
1726b30ab791SAndreas Gruenbacher  * @device:	DRBD device.
1727b411b363SPhilipp Reisner  * @side:	Either C_SYNC_SOURCE or C_SYNC_TARGET
1728b411b363SPhilipp Reisner  *
1729b411b363SPhilipp Reisner  * This function might bring you directly into one of the
1730b411b363SPhilipp Reisner  * C_PAUSED_SYNC_* states.
1731b411b363SPhilipp Reisner  */
1732b30ab791SAndreas Gruenbacher void drbd_start_resync(struct drbd_device *device, enum drbd_conns side)
1733b411b363SPhilipp Reisner {
173444a4d551SLars Ellenberg 	struct drbd_peer_device *peer_device = first_peer_device(device);
173544a4d551SLars Ellenberg 	struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
1736b411b363SPhilipp Reisner 	union drbd_state ns;
1737b411b363SPhilipp Reisner 	int r;
1738b411b363SPhilipp Reisner 
1739b30ab791SAndreas Gruenbacher 	if (device->state.conn >= C_SYNC_SOURCE && device->state.conn < C_AHEAD) {
1740d0180171SAndreas Gruenbacher 		drbd_err(device, "Resync already running!\n");
1741b411b363SPhilipp Reisner 		return;
1742b411b363SPhilipp Reisner 	}
1743b411b363SPhilipp Reisner 
1744d3d2948fSRoland Kammerer 	if (!connection) {
1745d3d2948fSRoland Kammerer 		drbd_err(device, "No connection to peer, aborting!\n");
1746d3d2948fSRoland Kammerer 		return;
1747d3d2948fSRoland Kammerer 	}
1748d3d2948fSRoland Kammerer 
1749b30ab791SAndreas Gruenbacher 	if (!test_bit(B_RS_H_DONE, &device->flags)) {
1750b411b363SPhilipp Reisner 		if (side == C_SYNC_TARGET) {
1751b411b363SPhilipp Reisner 			/* Since application IO was locked out during C_WF_BITMAP_T and
1752b411b363SPhilipp Reisner 			   C_WF_SYNC_UUID we are still unmodified. Before going to C_SYNC_TARGET
1753b411b363SPhilipp Reisner 			   we check that we might make the data inconsistent. */
1754b30ab791SAndreas Gruenbacher 			r = drbd_khelper(device, "before-resync-target");
1755b411b363SPhilipp Reisner 			r = (r >> 8) & 0xff;
1756b411b363SPhilipp Reisner 			if (r > 0) {
1757d0180171SAndreas Gruenbacher 				drbd_info(device, "before-resync-target handler returned %d, "
1758b411b363SPhilipp Reisner 					 "dropping connection.\n", r);
175944a4d551SLars Ellenberg 				conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
1760b411b363SPhilipp Reisner 				return;
1761b411b363SPhilipp Reisner 			}
176209b9e797SPhilipp Reisner 		} else /* C_SYNC_SOURCE */ {
1763b30ab791SAndreas Gruenbacher 			r = drbd_khelper(device, "before-resync-source");
176409b9e797SPhilipp Reisner 			r = (r >> 8) & 0xff;
176509b9e797SPhilipp Reisner 			if (r > 0) {
176609b9e797SPhilipp Reisner 				if (r == 3) {
1767d0180171SAndreas Gruenbacher 					drbd_info(device, "before-resync-source handler returned %d, "
176809b9e797SPhilipp Reisner 						 "ignoring. Old userland tools?", r);
176909b9e797SPhilipp Reisner 				} else {
1770d0180171SAndreas Gruenbacher 					drbd_info(device, "before-resync-source handler returned %d, "
177109b9e797SPhilipp Reisner 						 "dropping connection.\n", r);
177244a4d551SLars Ellenberg 					conn_request_state(connection,
1773a6b32bc3SAndreas Gruenbacher 							   NS(conn, C_DISCONNECTING), CS_HARD);
177409b9e797SPhilipp Reisner 					return;
177509b9e797SPhilipp Reisner 				}
177609b9e797SPhilipp Reisner 			}
1777b411b363SPhilipp Reisner 		}
1778e64a3294SPhilipp Reisner 	}
1779b411b363SPhilipp Reisner 
178044a4d551SLars Ellenberg 	if (current == connection->worker.task) {
1781dad20554SPhilipp Reisner 		/* The worker should not sleep waiting for state_mutex,
1782e64a3294SPhilipp Reisner 		   that can take long */
1783b30ab791SAndreas Gruenbacher 		if (!mutex_trylock(device->state_mutex)) {
1784b30ab791SAndreas Gruenbacher 			set_bit(B_RS_H_DONE, &device->flags);
1785b30ab791SAndreas Gruenbacher 			device->start_resync_timer.expires = jiffies + HZ/5;
1786b30ab791SAndreas Gruenbacher 			add_timer(&device->start_resync_timer);
1787e64a3294SPhilipp Reisner 			return;
1788e64a3294SPhilipp Reisner 		}
1789e64a3294SPhilipp Reisner 	} else {
1790b30ab791SAndreas Gruenbacher 		mutex_lock(device->state_mutex);
1791e64a3294SPhilipp Reisner 	}
1792b411b363SPhilipp Reisner 
179328bc3b8cSAndreas Gruenbacher 	lock_all_resources();
179428bc3b8cSAndreas Gruenbacher 	clear_bit(B_RS_H_DONE, &device->flags);
1795a700471bSPhilipp Reisner 	/* Did some connection breakage or IO error race with us? */
1796b30ab791SAndreas Gruenbacher 	if (device->state.conn < C_CONNECTED
1797b30ab791SAndreas Gruenbacher 	|| !get_ldev_if_state(device, D_NEGOTIATING)) {
179828bc3b8cSAndreas Gruenbacher 		unlock_all_resources();
179928bc3b8cSAndreas Gruenbacher 		goto out;
1800b411b363SPhilipp Reisner 	}
1801b411b363SPhilipp Reisner 
1802b30ab791SAndreas Gruenbacher 	ns = drbd_read_state(device);
1803b411b363SPhilipp Reisner 
1804b30ab791SAndreas Gruenbacher 	ns.aftr_isp = !_drbd_may_sync_now(device);
1805b411b363SPhilipp Reisner 
1806b411b363SPhilipp Reisner 	ns.conn = side;
1807b411b363SPhilipp Reisner 
1808b411b363SPhilipp Reisner 	if (side == C_SYNC_TARGET)
1809b411b363SPhilipp Reisner 		ns.disk = D_INCONSISTENT;
1810b411b363SPhilipp Reisner 	else /* side == C_SYNC_SOURCE */
1811b411b363SPhilipp Reisner 		ns.pdsk = D_INCONSISTENT;
1812b411b363SPhilipp Reisner 
181328bc3b8cSAndreas Gruenbacher 	r = _drbd_set_state(device, ns, CS_VERBOSE, NULL);
1814b30ab791SAndreas Gruenbacher 	ns = drbd_read_state(device);
1815b411b363SPhilipp Reisner 
1816b411b363SPhilipp Reisner 	if (ns.conn < C_CONNECTED)
1817b411b363SPhilipp Reisner 		r = SS_UNKNOWN_ERROR;
1818b411b363SPhilipp Reisner 
1819b411b363SPhilipp Reisner 	if (r == SS_SUCCESS) {
1820b30ab791SAndreas Gruenbacher 		unsigned long tw = drbd_bm_total_weight(device);
18211d7734a0SLars Ellenberg 		unsigned long now = jiffies;
18221d7734a0SLars Ellenberg 		int i;
18231d7734a0SLars Ellenberg 
1824b30ab791SAndreas Gruenbacher 		device->rs_failed    = 0;
1825b30ab791SAndreas Gruenbacher 		device->rs_paused    = 0;
1826b30ab791SAndreas Gruenbacher 		device->rs_same_csum = 0;
1827b30ab791SAndreas Gruenbacher 		device->rs_last_sect_ev = 0;
1828b30ab791SAndreas Gruenbacher 		device->rs_total     = tw;
1829b30ab791SAndreas Gruenbacher 		device->rs_start     = now;
18301d7734a0SLars Ellenberg 		for (i = 0; i < DRBD_SYNC_MARKS; i++) {
1831b30ab791SAndreas Gruenbacher 			device->rs_mark_left[i] = tw;
1832b30ab791SAndreas Gruenbacher 			device->rs_mark_time[i] = now;
18331d7734a0SLars Ellenberg 		}
183428bc3b8cSAndreas Gruenbacher 		drbd_pause_after(device);
18355ab7d2c0SLars Ellenberg 		/* Forget potentially stale cached per resync extent bit-counts.
18365ab7d2c0SLars Ellenberg 		 * Open coded drbd_rs_cancel_all(device), we already have IRQs
18375ab7d2c0SLars Ellenberg 		 * disabled, and know the disk state is ok. */
18385ab7d2c0SLars Ellenberg 		spin_lock(&device->al_lock);
18395ab7d2c0SLars Ellenberg 		lc_reset(device->resync);
18405ab7d2c0SLars Ellenberg 		device->resync_locked = 0;
18415ab7d2c0SLars Ellenberg 		device->resync_wenr = LC_FREE;
18425ab7d2c0SLars Ellenberg 		spin_unlock(&device->al_lock);
1843b411b363SPhilipp Reisner 	}
184428bc3b8cSAndreas Gruenbacher 	unlock_all_resources();
18455a22db89SLars Ellenberg 
18466c922ed5SLars Ellenberg 	if (r == SS_SUCCESS) {
18475ab7d2c0SLars Ellenberg 		wake_up(&device->al_wait); /* for lc_reset() above */
1848328e0f12SPhilipp Reisner 		/* reset rs_last_bcast when a resync or verify is started,
1849328e0f12SPhilipp Reisner 		 * to deal with potential jiffies wrap. */
1850b30ab791SAndreas Gruenbacher 		device->rs_last_bcast = jiffies - HZ;
1851328e0f12SPhilipp Reisner 
1852d0180171SAndreas Gruenbacher 		drbd_info(device, "Began resync as %s (will sync %lu KB [%lu bits set]).\n",
18536c922ed5SLars Ellenberg 		     drbd_conn_str(ns.conn),
1854b30ab791SAndreas Gruenbacher 		     (unsigned long) device->rs_total << (BM_BLOCK_SHIFT-10),
1855b30ab791SAndreas Gruenbacher 		     (unsigned long) device->rs_total);
1856aaaba345SLars Ellenberg 		if (side == C_SYNC_TARGET) {
1857b30ab791SAndreas Gruenbacher 			device->bm_resync_fo = 0;
1858aaaba345SLars Ellenberg 			device->use_csums = use_checksum_based_resync(connection, device);
1859aaaba345SLars Ellenberg 		} else {
18607e5fec31SFabian Frederick 			device->use_csums = false;
1861aaaba345SLars Ellenberg 		}
18625a22db89SLars Ellenberg 
18635a22db89SLars Ellenberg 		/* Since protocol 96, we must serialize drbd_gen_and_send_sync_uuid
18645a22db89SLars Ellenberg 		 * with w_send_oos, or the sync target will get confused as to
18655a22db89SLars Ellenberg 		 * how much bits to resync.  We cannot do that always, because for an
18665a22db89SLars Ellenberg 		 * empty resync and protocol < 95, we need to do it here, as we call
18675a22db89SLars Ellenberg 		 * drbd_resync_finished from here in that case.
18685a22db89SLars Ellenberg 		 * We drbd_gen_and_send_sync_uuid here for protocol < 96,
18695a22db89SLars Ellenberg 		 * and from after_state_ch otherwise. */
187044a4d551SLars Ellenberg 		if (side == C_SYNC_SOURCE && connection->agreed_pro_version < 96)
187144a4d551SLars Ellenberg 			drbd_gen_and_send_sync_uuid(peer_device);
1872b411b363SPhilipp Reisner 
187344a4d551SLars Ellenberg 		if (connection->agreed_pro_version < 95 && device->rs_total == 0) {
1874af85e8e8SLars Ellenberg 			/* This still has a race (about when exactly the peers
1875af85e8e8SLars Ellenberg 			 * detect connection loss) that can lead to a full sync
1876af85e8e8SLars Ellenberg 			 * on next handshake. In 8.3.9 we fixed this with explicit
1877af85e8e8SLars Ellenberg 			 * resync-finished notifications, but the fix
1878af85e8e8SLars Ellenberg 			 * introduces a protocol change.  Sleeping for some
1879af85e8e8SLars Ellenberg 			 * time longer than the ping interval + timeout on the
1880af85e8e8SLars Ellenberg 			 * SyncSource, to give the SyncTarget the chance to
1881af85e8e8SLars Ellenberg 			 * detect connection loss, then waiting for a ping
1882af85e8e8SLars Ellenberg 			 * response (implicit in drbd_resync_finished) reduces
1883af85e8e8SLars Ellenberg 			 * the race considerably, but does not solve it. */
188444ed167dSPhilipp Reisner 			if (side == C_SYNC_SOURCE) {
188544ed167dSPhilipp Reisner 				struct net_conf *nc;
188644ed167dSPhilipp Reisner 				int timeo;
188744ed167dSPhilipp Reisner 
188844ed167dSPhilipp Reisner 				rcu_read_lock();
188944a4d551SLars Ellenberg 				nc = rcu_dereference(connection->net_conf);
189044ed167dSPhilipp Reisner 				timeo = nc->ping_int * HZ + nc->ping_timeo * HZ / 9;
189144ed167dSPhilipp Reisner 				rcu_read_unlock();
189244ed167dSPhilipp Reisner 				schedule_timeout_interruptible(timeo);
189344ed167dSPhilipp Reisner 			}
1894b30ab791SAndreas Gruenbacher 			drbd_resync_finished(device);
1895b411b363SPhilipp Reisner 		}
1896b411b363SPhilipp Reisner 
1897b30ab791SAndreas Gruenbacher 		drbd_rs_controller_reset(device);
1898b30ab791SAndreas Gruenbacher 		/* ns.conn may already be != device->state.conn,
1899b411b363SPhilipp Reisner 		 * we may have been paused in between, or become paused until
1900b411b363SPhilipp Reisner 		 * the timer triggers.
1901b411b363SPhilipp Reisner 		 * No matter, that is handled in resync_timer_fn() */
1902b411b363SPhilipp Reisner 		if (ns.conn == C_SYNC_TARGET)
1903b30ab791SAndreas Gruenbacher 			mod_timer(&device->resync_timer, jiffies);
1904b411b363SPhilipp Reisner 
1905b30ab791SAndreas Gruenbacher 		drbd_md_sync(device);
1906b411b363SPhilipp Reisner 	}
1907b30ab791SAndreas Gruenbacher 	put_ldev(device);
190828bc3b8cSAndreas Gruenbacher out:
1909b30ab791SAndreas Gruenbacher 	mutex_unlock(device->state_mutex);
1910b411b363SPhilipp Reisner }
1911b411b363SPhilipp Reisner 
1912e334f550SLars Ellenberg static void update_on_disk_bitmap(struct drbd_device *device, bool resync_done)
1913c7a58db4SLars Ellenberg {
1914c7a58db4SLars Ellenberg 	struct sib_info sib = { .sib_reason = SIB_SYNC_PROGRESS, };
1915c7a58db4SLars Ellenberg 	device->rs_last_bcast = jiffies;
1916c7a58db4SLars Ellenberg 
1917c7a58db4SLars Ellenberg 	if (!get_ldev(device))
1918c7a58db4SLars Ellenberg 		return;
1919c7a58db4SLars Ellenberg 
1920c7a58db4SLars Ellenberg 	drbd_bm_write_lazy(device, 0);
19215ab7d2c0SLars Ellenberg 	if (resync_done && is_sync_state(device->state.conn))
1922c7a58db4SLars Ellenberg 		drbd_resync_finished(device);
19235ab7d2c0SLars Ellenberg 
1924c7a58db4SLars Ellenberg 	drbd_bcast_event(device, &sib);
1925c7a58db4SLars Ellenberg 	/* update timestamp, in case it took a while to write out stuff */
1926c7a58db4SLars Ellenberg 	device->rs_last_bcast = jiffies;
1927c7a58db4SLars Ellenberg 	put_ldev(device);
1928c7a58db4SLars Ellenberg }
1929c7a58db4SLars Ellenberg 
1930e334f550SLars Ellenberg static void drbd_ldev_destroy(struct drbd_device *device)
1931e334f550SLars Ellenberg {
1932e334f550SLars Ellenberg 	lc_destroy(device->resync);
1933e334f550SLars Ellenberg 	device->resync = NULL;
1934e334f550SLars Ellenberg 	lc_destroy(device->act_log);
1935e334f550SLars Ellenberg 	device->act_log = NULL;
1936d1b80853SAndreas Gruenbacher 
1937d1b80853SAndreas Gruenbacher 	__acquire(local);
193863a7c8adSLars Ellenberg 	drbd_backing_dev_free(device, device->ldev);
1939d1b80853SAndreas Gruenbacher 	device->ldev = NULL;
1940d1b80853SAndreas Gruenbacher 	__release(local);
1941d1b80853SAndreas Gruenbacher 
1942e334f550SLars Ellenberg 	clear_bit(GOING_DISKLESS, &device->flags);
1943e334f550SLars Ellenberg 	wake_up(&device->misc_wait);
1944e334f550SLars Ellenberg }
1945e334f550SLars Ellenberg 
1946e334f550SLars Ellenberg static void go_diskless(struct drbd_device *device)
1947e334f550SLars Ellenberg {
1948*8164dd6cSAndreas Gruenbacher 	struct drbd_peer_device *peer_device = first_peer_device(device);
1949e334f550SLars Ellenberg 	D_ASSERT(device, device->state.disk == D_FAILED);
1950e334f550SLars Ellenberg 	/* we cannot assert local_cnt == 0 here, as get_ldev_if_state will
1951e334f550SLars Ellenberg 	 * inc/dec it frequently. Once we are D_DISKLESS, no one will touch
1952e334f550SLars Ellenberg 	 * the protected members anymore, though, so once put_ldev reaches zero
1953e334f550SLars Ellenberg 	 * again, it will be safe to free them. */
1954e334f550SLars Ellenberg 
1955e334f550SLars Ellenberg 	/* Try to write changed bitmap pages, read errors may have just
1956e334f550SLars Ellenberg 	 * set some bits outside the area covered by the activity log.
1957e334f550SLars Ellenberg 	 *
1958e334f550SLars Ellenberg 	 * If we have an IO error during the bitmap writeout,
1959e334f550SLars Ellenberg 	 * we will want a full sync next time, just in case.
1960e334f550SLars Ellenberg 	 * (Do we want a specific meta data flag for this?)
1961e334f550SLars Ellenberg 	 *
1962e334f550SLars Ellenberg 	 * If that does not make it to stable storage either,
1963e334f550SLars Ellenberg 	 * we cannot do anything about that anymore.
1964e334f550SLars Ellenberg 	 *
1965e334f550SLars Ellenberg 	 * We still need to check if both bitmap and ldev are present, we may
1966e334f550SLars Ellenberg 	 * end up here after a failed attach, before ldev was even assigned.
1967e334f550SLars Ellenberg 	 */
1968e334f550SLars Ellenberg 	if (device->bitmap && device->ldev) {
1969e334f550SLars Ellenberg 		/* An interrupted resync or similar is allowed to recounts bits
1970e334f550SLars Ellenberg 		 * while we detach.
1971e334f550SLars Ellenberg 		 * Any modifications would not be expected anymore, though.
1972e334f550SLars Ellenberg 		 */
1973e334f550SLars Ellenberg 		if (drbd_bitmap_io_from_worker(device, drbd_bm_write,
1974*8164dd6cSAndreas Gruenbacher 					"detach", BM_LOCKED_TEST_ALLOWED, peer_device)) {
1975e334f550SLars Ellenberg 			if (test_bit(WAS_READ_ERROR, &device->flags)) {
1976e334f550SLars Ellenberg 				drbd_md_set_flag(device, MDF_FULL_SYNC);
1977e334f550SLars Ellenberg 				drbd_md_sync(device);
1978e334f550SLars Ellenberg 			}
1979e334f550SLars Ellenberg 		}
1980e334f550SLars Ellenberg 	}
1981e334f550SLars Ellenberg 
1982e334f550SLars Ellenberg 	drbd_force_state(device, NS(disk, D_DISKLESS));
1983e334f550SLars Ellenberg }
1984e334f550SLars Ellenberg 
1985ac0acb9eSLars Ellenberg static int do_md_sync(struct drbd_device *device)
1986ac0acb9eSLars Ellenberg {
1987ac0acb9eSLars Ellenberg 	drbd_warn(device, "md_sync_timer expired! Worker calls drbd_md_sync().\n");
1988ac0acb9eSLars Ellenberg 	drbd_md_sync(device);
1989ac0acb9eSLars Ellenberg 	return 0;
1990ac0acb9eSLars Ellenberg }
1991ac0acb9eSLars Ellenberg 
1992944410e9SLars Ellenberg /* only called from drbd_worker thread, no locking */
1993944410e9SLars Ellenberg void __update_timing_details(
1994944410e9SLars Ellenberg 		struct drbd_thread_timing_details *tdp,
1995944410e9SLars Ellenberg 		unsigned int *cb_nr,
1996944410e9SLars Ellenberg 		void *cb,
1997944410e9SLars Ellenberg 		const char *fn, const unsigned int line)
1998944410e9SLars Ellenberg {
1999944410e9SLars Ellenberg 	unsigned int i = *cb_nr % DRBD_THREAD_DETAILS_HIST;
2000944410e9SLars Ellenberg 	struct drbd_thread_timing_details *td = tdp + i;
2001944410e9SLars Ellenberg 
2002944410e9SLars Ellenberg 	td->start_jif = jiffies;
2003944410e9SLars Ellenberg 	td->cb_addr = cb;
2004944410e9SLars Ellenberg 	td->caller_fn = fn;
2005944410e9SLars Ellenberg 	td->line = line;
2006944410e9SLars Ellenberg 	td->cb_nr = *cb_nr;
2007944410e9SLars Ellenberg 
2008944410e9SLars Ellenberg 	i = (i+1) % DRBD_THREAD_DETAILS_HIST;
2009944410e9SLars Ellenberg 	td = tdp + i;
2010944410e9SLars Ellenberg 	memset(td, 0, sizeof(*td));
2011944410e9SLars Ellenberg 
2012944410e9SLars Ellenberg 	++(*cb_nr);
2013944410e9SLars Ellenberg }
2014944410e9SLars Ellenberg 
2015e334f550SLars Ellenberg static void do_device_work(struct drbd_device *device, const unsigned long todo)
2016e334f550SLars Ellenberg {
2017b47a06d1SAndreas Gruenbacher 	if (test_bit(MD_SYNC, &todo))
2018ac0acb9eSLars Ellenberg 		do_md_sync(device);
2019b47a06d1SAndreas Gruenbacher 	if (test_bit(RS_DONE, &todo) ||
2020b47a06d1SAndreas Gruenbacher 	    test_bit(RS_PROGRESS, &todo))
2021b47a06d1SAndreas Gruenbacher 		update_on_disk_bitmap(device, test_bit(RS_DONE, &todo));
2022b47a06d1SAndreas Gruenbacher 	if (test_bit(GO_DISKLESS, &todo))
2023e334f550SLars Ellenberg 		go_diskless(device);
2024b47a06d1SAndreas Gruenbacher 	if (test_bit(DESTROY_DISK, &todo))
2025e334f550SLars Ellenberg 		drbd_ldev_destroy(device);
2026b47a06d1SAndreas Gruenbacher 	if (test_bit(RS_START, &todo))
2027ac0acb9eSLars Ellenberg 		do_start_resync(device);
2028e334f550SLars Ellenberg }
2029e334f550SLars Ellenberg 
2030e334f550SLars Ellenberg #define DRBD_DEVICE_WORK_MASK	\
2031e334f550SLars Ellenberg 	((1UL << GO_DISKLESS)	\
2032e334f550SLars Ellenberg 	|(1UL << DESTROY_DISK)	\
2033ac0acb9eSLars Ellenberg 	|(1UL << MD_SYNC)	\
2034ac0acb9eSLars Ellenberg 	|(1UL << RS_START)	\
2035e334f550SLars Ellenberg 	|(1UL << RS_PROGRESS)	\
2036e334f550SLars Ellenberg 	|(1UL << RS_DONE)	\
2037e334f550SLars Ellenberg 	)
2038e334f550SLars Ellenberg 
2039e334f550SLars Ellenberg static unsigned long get_work_bits(unsigned long *flags)
2040e334f550SLars Ellenberg {
2041e334f550SLars Ellenberg 	unsigned long old, new;
2042e334f550SLars Ellenberg 	do {
2043e334f550SLars Ellenberg 		old = *flags;
2044e334f550SLars Ellenberg 		new = old & ~DRBD_DEVICE_WORK_MASK;
2045e334f550SLars Ellenberg 	} while (cmpxchg(flags, old, new) != old);
2046e334f550SLars Ellenberg 	return old & DRBD_DEVICE_WORK_MASK;
2047e334f550SLars Ellenberg }
2048e334f550SLars Ellenberg 
2049e334f550SLars Ellenberg static void do_unqueued_work(struct drbd_connection *connection)
2050c7a58db4SLars Ellenberg {
2051c7a58db4SLars Ellenberg 	struct drbd_peer_device *peer_device;
2052c7a58db4SLars Ellenberg 	int vnr;
2053c7a58db4SLars Ellenberg 
2054c7a58db4SLars Ellenberg 	rcu_read_lock();
2055c7a58db4SLars Ellenberg 	idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
2056c7a58db4SLars Ellenberg 		struct drbd_device *device = peer_device->device;
2057e334f550SLars Ellenberg 		unsigned long todo = get_work_bits(&device->flags);
2058e334f550SLars Ellenberg 		if (!todo)
2059c7a58db4SLars Ellenberg 			continue;
20605ab7d2c0SLars Ellenberg 
2061c7a58db4SLars Ellenberg 		kref_get(&device->kref);
2062c7a58db4SLars Ellenberg 		rcu_read_unlock();
2063e334f550SLars Ellenberg 		do_device_work(device, todo);
2064c7a58db4SLars Ellenberg 		kref_put(&device->kref, drbd_destroy_device);
2065c7a58db4SLars Ellenberg 		rcu_read_lock();
2066c7a58db4SLars Ellenberg 	}
2067c7a58db4SLars Ellenberg 	rcu_read_unlock();
2068c7a58db4SLars Ellenberg }
2069c7a58db4SLars Ellenberg 
2070a186e478SRashika Kheria static bool dequeue_work_batch(struct drbd_work_queue *queue, struct list_head *work_list)
20718c0785a5SLars Ellenberg {
20728c0785a5SLars Ellenberg 	spin_lock_irq(&queue->q_lock);
207315e26f6aSLars Ellenberg 	list_splice_tail_init(&queue->q, work_list);
20748c0785a5SLars Ellenberg 	spin_unlock_irq(&queue->q_lock);
20758c0785a5SLars Ellenberg 	return !list_empty(work_list);
20768c0785a5SLars Ellenberg }
20778c0785a5SLars Ellenberg 
2078bde89a9eSAndreas Gruenbacher static void wait_for_work(struct drbd_connection *connection, struct list_head *work_list)
2079b6dd1a89SLars Ellenberg {
2080b6dd1a89SLars Ellenberg 	DEFINE_WAIT(wait);
2081b6dd1a89SLars Ellenberg 	struct net_conf *nc;
2082b6dd1a89SLars Ellenberg 	int uncork, cork;
2083b6dd1a89SLars Ellenberg 
2084abde9cc6SLars Ellenberg 	dequeue_work_batch(&connection->sender_work, work_list);
2085b6dd1a89SLars Ellenberg 	if (!list_empty(work_list))
2086b6dd1a89SLars Ellenberg 		return;
2087b6dd1a89SLars Ellenberg 
2088b6dd1a89SLars Ellenberg 	/* Still nothing to do?
2089b6dd1a89SLars Ellenberg 	 * Maybe we still need to close the current epoch,
2090b6dd1a89SLars Ellenberg 	 * even if no new requests are queued yet.
2091b6dd1a89SLars Ellenberg 	 *
2092b6dd1a89SLars Ellenberg 	 * Also, poke TCP, just in case.
2093b6dd1a89SLars Ellenberg 	 * Then wait for new work (or signal). */
2094b6dd1a89SLars Ellenberg 	rcu_read_lock();
2095b6dd1a89SLars Ellenberg 	nc = rcu_dereference(connection->net_conf);
2096b6dd1a89SLars Ellenberg 	uncork = nc ? nc->tcp_cork : 0;
2097b6dd1a89SLars Ellenberg 	rcu_read_unlock();
2098b6dd1a89SLars Ellenberg 	if (uncork) {
2099b6dd1a89SLars Ellenberg 		mutex_lock(&connection->data.mutex);
2100b6dd1a89SLars Ellenberg 		if (connection->data.socket)
2101db10538aSChristoph Hellwig 			tcp_sock_set_cork(connection->data.socket->sk, false);
2102b6dd1a89SLars Ellenberg 		mutex_unlock(&connection->data.mutex);
2103b6dd1a89SLars Ellenberg 	}
2104b6dd1a89SLars Ellenberg 
2105b6dd1a89SLars Ellenberg 	for (;;) {
2106b6dd1a89SLars Ellenberg 		int send_barrier;
2107b6dd1a89SLars Ellenberg 		prepare_to_wait(&connection->sender_work.q_wait, &wait, TASK_INTERRUPTIBLE);
21080500813fSAndreas Gruenbacher 		spin_lock_irq(&connection->resource->req_lock);
2109b6dd1a89SLars Ellenberg 		spin_lock(&connection->sender_work.q_lock);	/* FIXME get rid of this one? */
2110bc317a9eSLars Ellenberg 		if (!list_empty(&connection->sender_work.q))
21114dd726f0SLars Ellenberg 			list_splice_tail_init(&connection->sender_work.q, work_list);
2112b6dd1a89SLars Ellenberg 		spin_unlock(&connection->sender_work.q_lock);	/* FIXME get rid of this one? */
2113b6dd1a89SLars Ellenberg 		if (!list_empty(work_list) || signal_pending(current)) {
21140500813fSAndreas Gruenbacher 			spin_unlock_irq(&connection->resource->req_lock);
2115b6dd1a89SLars Ellenberg 			break;
2116b6dd1a89SLars Ellenberg 		}
2117f9c78128SLars Ellenberg 
2118f9c78128SLars Ellenberg 		/* We found nothing new to do, no to-be-communicated request,
2119f9c78128SLars Ellenberg 		 * no other work item.  We may still need to close the last
2120f9c78128SLars Ellenberg 		 * epoch.  Next incoming request epoch will be connection ->
2121f9c78128SLars Ellenberg 		 * current transfer log epoch number.  If that is different
2122f9c78128SLars Ellenberg 		 * from the epoch of the last request we communicated, it is
2123f9c78128SLars Ellenberg 		 * safe to send the epoch separating barrier now.
2124f9c78128SLars Ellenberg 		 */
2125f9c78128SLars Ellenberg 		send_barrier =
2126f9c78128SLars Ellenberg 			atomic_read(&connection->current_tle_nr) !=
2127f9c78128SLars Ellenberg 			connection->send.current_epoch_nr;
21280500813fSAndreas Gruenbacher 		spin_unlock_irq(&connection->resource->req_lock);
2129f9c78128SLars Ellenberg 
2130f9c78128SLars Ellenberg 		if (send_barrier)
2131f9c78128SLars Ellenberg 			maybe_send_barrier(connection,
2132f9c78128SLars Ellenberg 					connection->send.current_epoch_nr + 1);
21335ab7d2c0SLars Ellenberg 
2134e334f550SLars Ellenberg 		if (test_bit(DEVICE_WORK_PENDING, &connection->flags))
21355ab7d2c0SLars Ellenberg 			break;
21365ab7d2c0SLars Ellenberg 
2137a80ca1aeSLars Ellenberg 		/* drbd_send() may have called flush_signals() */
2138a80ca1aeSLars Ellenberg 		if (get_t_state(&connection->worker) != RUNNING)
2139a80ca1aeSLars Ellenberg 			break;
21405ab7d2c0SLars Ellenberg 
2141b6dd1a89SLars Ellenberg 		schedule();
2142b6dd1a89SLars Ellenberg 		/* may be woken up for other things but new work, too,
2143b6dd1a89SLars Ellenberg 		 * e.g. if the current epoch got closed.
2144b6dd1a89SLars Ellenberg 		 * In which case we send the barrier above. */
2145b6dd1a89SLars Ellenberg 	}
2146b6dd1a89SLars Ellenberg 	finish_wait(&connection->sender_work.q_wait, &wait);
2147b6dd1a89SLars Ellenberg 
2148b6dd1a89SLars Ellenberg 	/* someone may have changed the config while we have been waiting above. */
2149b6dd1a89SLars Ellenberg 	rcu_read_lock();
2150b6dd1a89SLars Ellenberg 	nc = rcu_dereference(connection->net_conf);
2151b6dd1a89SLars Ellenberg 	cork = nc ? nc->tcp_cork : 0;
2152b6dd1a89SLars Ellenberg 	rcu_read_unlock();
2153b6dd1a89SLars Ellenberg 	mutex_lock(&connection->data.mutex);
2154b6dd1a89SLars Ellenberg 	if (connection->data.socket) {
2155b6dd1a89SLars Ellenberg 		if (cork)
2156db10538aSChristoph Hellwig 			tcp_sock_set_cork(connection->data.socket->sk, true);
2157b6dd1a89SLars Ellenberg 		else if (!uncork)
2158db10538aSChristoph Hellwig 			tcp_sock_set_cork(connection->data.socket->sk, false);
2159b6dd1a89SLars Ellenberg 	}
2160b6dd1a89SLars Ellenberg 	mutex_unlock(&connection->data.mutex);
2161b6dd1a89SLars Ellenberg }
2162b6dd1a89SLars Ellenberg 
2163b411b363SPhilipp Reisner int drbd_worker(struct drbd_thread *thi)
2164b411b363SPhilipp Reisner {
2165bde89a9eSAndreas Gruenbacher 	struct drbd_connection *connection = thi->connection;
21666db7e50aSAndreas Gruenbacher 	struct drbd_work *w = NULL;
2167c06ece6bSAndreas Gruenbacher 	struct drbd_peer_device *peer_device;
2168b411b363SPhilipp Reisner 	LIST_HEAD(work_list);
21698c0785a5SLars Ellenberg 	int vnr;
2170b411b363SPhilipp Reisner 
2171e77a0a5cSAndreas Gruenbacher 	while (get_t_state(thi) == RUNNING) {
217280822284SPhilipp Reisner 		drbd_thread_current_set_cpu(thi);
2173b411b363SPhilipp Reisner 
2174944410e9SLars Ellenberg 		if (list_empty(&work_list)) {
2175944410e9SLars Ellenberg 			update_worker_timing_details(connection, wait_for_work);
2176bde89a9eSAndreas Gruenbacher 			wait_for_work(connection, &work_list);
2177944410e9SLars Ellenberg 		}
2178b411b363SPhilipp Reisner 
2179944410e9SLars Ellenberg 		if (test_and_clear_bit(DEVICE_WORK_PENDING, &connection->flags)) {
2180944410e9SLars Ellenberg 			update_worker_timing_details(connection, do_unqueued_work);
2181e334f550SLars Ellenberg 			do_unqueued_work(connection);
2182944410e9SLars Ellenberg 		}
21835ab7d2c0SLars Ellenberg 
21848c0785a5SLars Ellenberg 		if (signal_pending(current)) {
2185b411b363SPhilipp Reisner 			flush_signals(current);
218619393e10SPhilipp Reisner 			if (get_t_state(thi) == RUNNING) {
21871ec861ebSAndreas Gruenbacher 				drbd_warn(connection, "Worker got an unexpected signal\n");
2188b411b363SPhilipp Reisner 				continue;
218919393e10SPhilipp Reisner 			}
2190b411b363SPhilipp Reisner 			break;
2191b411b363SPhilipp Reisner 		}
2192b411b363SPhilipp Reisner 
2193e77a0a5cSAndreas Gruenbacher 		if (get_t_state(thi) != RUNNING)
2194b411b363SPhilipp Reisner 			break;
2195b411b363SPhilipp Reisner 
2196729e8b87SLars Ellenberg 		if (!list_empty(&work_list)) {
21976db7e50aSAndreas Gruenbacher 			w = list_first_entry(&work_list, struct drbd_work, list);
21986db7e50aSAndreas Gruenbacher 			list_del_init(&w->list);
2199944410e9SLars Ellenberg 			update_worker_timing_details(connection, w->cb);
22006db7e50aSAndreas Gruenbacher 			if (w->cb(w, connection->cstate < C_WF_REPORT_PARAMS) == 0)
22018c0785a5SLars Ellenberg 				continue;
2202bde89a9eSAndreas Gruenbacher 			if (connection->cstate >= C_WF_REPORT_PARAMS)
2203bde89a9eSAndreas Gruenbacher 				conn_request_state(connection, NS(conn, C_NETWORK_FAILURE), CS_HARD);
2204b411b363SPhilipp Reisner 		}
2205b411b363SPhilipp Reisner 	}
2206b411b363SPhilipp Reisner 
22078c0785a5SLars Ellenberg 	do {
2208944410e9SLars Ellenberg 		if (test_and_clear_bit(DEVICE_WORK_PENDING, &connection->flags)) {
2209944410e9SLars Ellenberg 			update_worker_timing_details(connection, do_unqueued_work);
2210e334f550SLars Ellenberg 			do_unqueued_work(connection);
2211944410e9SLars Ellenberg 		}
2212729e8b87SLars Ellenberg 		if (!list_empty(&work_list)) {
22136db7e50aSAndreas Gruenbacher 			w = list_first_entry(&work_list, struct drbd_work, list);
22146db7e50aSAndreas Gruenbacher 			list_del_init(&w->list);
2215944410e9SLars Ellenberg 			update_worker_timing_details(connection, w->cb);
22166db7e50aSAndreas Gruenbacher 			w->cb(w, 1);
2217729e8b87SLars Ellenberg 		} else
2218bde89a9eSAndreas Gruenbacher 			dequeue_work_batch(&connection->sender_work, &work_list);
2219e334f550SLars Ellenberg 	} while (!list_empty(&work_list) || test_bit(DEVICE_WORK_PENDING, &connection->flags));
2220b411b363SPhilipp Reisner 
2221c141ebdaSPhilipp Reisner 	rcu_read_lock();
2222c06ece6bSAndreas Gruenbacher 	idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
2223c06ece6bSAndreas Gruenbacher 		struct drbd_device *device = peer_device->device;
22240b0ba1efSAndreas Gruenbacher 		D_ASSERT(device, device->state.disk == D_DISKLESS && device->state.conn == C_STANDALONE);
2225b30ab791SAndreas Gruenbacher 		kref_get(&device->kref);
2226c141ebdaSPhilipp Reisner 		rcu_read_unlock();
2227b30ab791SAndreas Gruenbacher 		drbd_device_cleanup(device);
222805a10ec7SAndreas Gruenbacher 		kref_put(&device->kref, drbd_destroy_device);
2229c141ebdaSPhilipp Reisner 		rcu_read_lock();
22300e29d163SPhilipp Reisner 	}
2231c141ebdaSPhilipp Reisner 	rcu_read_unlock();
2232b411b363SPhilipp Reisner 
2233b411b363SPhilipp Reisner 	return 0;
2234b411b363SPhilipp Reisner }
2235